xref: /freebsd/sys/dev/dpaa2/dpaa2_channel.c (revision 36ef39831fe0e89f0b1672340a44c4ac1183158e)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright © 2023 Dmitry Salychev
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 /*
29  * QBMan channel to process ingress traffic (Rx, Tx confirmation, Rx error).
30  *
31  * NOTE: Several WQs are organized into a single channel.
32  */
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/bus.h>
38 #include <sys/rman.h>
39 #include <sys/module.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <sys/sysctl.h>
45 #include <sys/mbuf.h>
46 #include <sys/taskqueue.h>
47 #include <sys/sysctl.h>
48 #include <sys/buf_ring.h>
49 #include <sys/smp.h>
50 #include <sys/proc.h>
51 
52 #include <machine/bus.h>
53 #include <machine/resource.h>
54 #include <machine/atomic.h>
55 #include <machine/vmparam.h>
56 
57 #include <net/ethernet.h>
58 #include <net/bpf.h>
59 #include <net/if.h>
60 #include <net/if_dl.h>
61 #include <net/if_media.h>
62 #include <net/if_types.h>
63 #include <net/if_var.h>
64 
65 #include "dpaa2_types.h"
66 #include "dpaa2_channel.h"
67 #include "dpaa2_ni.h"
68 #include "dpaa2_mc.h"
69 #include "dpaa2_mc_if.h"
70 #include "dpaa2_mcp.h"
71 #include "dpaa2_io.h"
72 #include "dpaa2_con.h"
73 #include "dpaa2_buf.h"
74 #include "dpaa2_swp.h"
75 #include "dpaa2_swp_if.h"
76 #include "dpaa2_bp.h"
77 #include "dpaa2_cmd_if.h"
78 
79 MALLOC_DEFINE(M_DPAA2_CH, "dpaa2_ch", "DPAA2 QBMan Channel");
80 
81 #define RX_SEG_N		 (1u)
82 #define RX_SEG_SZ		 (((MJUM9BYTES - 1) / PAGE_SIZE + 1) * PAGE_SIZE)
83 #define RX_SEG_MAXSZ	 	 (((MJUM9BYTES - 1) / PAGE_SIZE + 1) * PAGE_SIZE)
84 CTASSERT(RX_SEG_SZ % PAGE_SIZE == 0);
85 CTASSERT(RX_SEG_MAXSZ % PAGE_SIZE == 0);
86 
87 #define TX_SEG_N		 (16u) /* XXX-DSL: does DPAA2 limit exist? */
88 #define TX_SEG_SZ		 (PAGE_SIZE)
89 #define TX_SEG_MAXSZ	 	 (TX_SEG_N * TX_SEG_SZ)
90 CTASSERT(TX_SEG_SZ % PAGE_SIZE == 0);
91 CTASSERT(TX_SEG_MAXSZ % PAGE_SIZE == 0);
92 
93 #define SGT_SEG_N		 (1u)
94 #define SGT_SEG_SZ		 (PAGE_SIZE)
95 #define SGT_SEG_MAXSZ	 	 (PAGE_SIZE)
96 CTASSERT(SGT_SEG_SZ % PAGE_SIZE == 0);
97 CTASSERT(SGT_SEG_MAXSZ % PAGE_SIZE == 0);
98 
99 static int dpaa2_chan_setup_dma(device_t, struct dpaa2_channel *, bus_size_t);
100 static int dpaa2_chan_alloc_storage(device_t, struct dpaa2_channel *, bus_size_t,
101     int, bus_size_t);
102 static void dpaa2_chan_bp_task(void *, int);
103 
104 /**
105  * @brief Сonfigures QBMan channel and registers data availability notifications.
106  */
107 int
dpaa2_chan_setup(device_t dev,device_t iodev,device_t condev,device_t bpdev,struct dpaa2_channel ** channel,uint32_t flowid,task_fn_t cleanup_task_fn)108 dpaa2_chan_setup(device_t dev, device_t iodev, device_t condev, device_t bpdev,
109     struct dpaa2_channel **channel, uint32_t flowid, task_fn_t cleanup_task_fn)
110 {
111 	device_t pdev = device_get_parent(dev);
112 	device_t child = dev;
113 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
114 	struct dpaa2_io_softc *iosc = device_get_softc(iodev);
115 	struct dpaa2_con_softc *consc = device_get_softc(condev);
116 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
117 	struct dpaa2_devinfo *ioinfo = device_get_ivars(iodev);
118 	struct dpaa2_devinfo *coninfo = device_get_ivars(condev);
119 	struct dpaa2_con_notif_cfg notif_cfg;
120 	struct dpaa2_io_notif_ctx *ctx;
121 	struct dpaa2_channel *ch = NULL;
122 	struct dpaa2_cmd cmd;
123 	uint16_t rctk, contk;
124 	int error;
125 
126 	DPAA2_CMD_INIT(&cmd);
127 
128 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rctk);
129 	if (error) {
130 		device_printf(dev, "%s: failed to open DPRC: id=%d, error=%d\n",
131 		    __func__, rcinfo->id, error);
132 		goto fail_rc_open;
133 	}
134 	error = DPAA2_CMD_CON_OPEN(dev, child, &cmd, coninfo->id, &contk);
135 	if (error) {
136 		device_printf(dev, "%s: failed to open DPCON: id=%d, error=%d\n",
137 		    __func__, coninfo->id, error);
138 		goto fail_con_open;
139 	}
140 
141 	error = DPAA2_CMD_CON_ENABLE(dev, child, &cmd);
142 	if (error) {
143 		device_printf(dev, "%s: failed to enable channel: dpcon_id=%d, "
144 		    "chan_id=%d\n", __func__, coninfo->id, consc->attr.chan_id);
145 		goto fail_con_enable;
146 	}
147 
148 	ch = malloc(sizeof(struct dpaa2_channel), M_DPAA2_CH, M_WAITOK | M_ZERO);
149 	ch->ni_dev = dev;
150 	ch->io_dev = iodev;
151 	ch->con_dev = condev;
152 	ch->id = consc->attr.chan_id;
153 	ch->flowid = flowid;
154 	ch->tx_frames = 0; /* for debug purposes */
155 	ch->tx_dropped = 0; /* for debug purposes */
156 	ch->store_sz = 0;
157 	ch->store_idx = 0;
158 	ch->recycled_n = 0;
159 	ch->rxq_n = 0;
160 
161 	NET_TASK_INIT(&ch->cleanup_task, 0, cleanup_task_fn, ch);
162 	NET_TASK_INIT(&ch->bp_task, 0, dpaa2_chan_bp_task, ch);
163 
164 	ch->cleanup_tq = taskqueue_create("dpaa2_ch cleanup", M_WAITOK,
165 	    taskqueue_thread_enqueue, &ch->cleanup_tq);
166 	taskqueue_start_threads_cpuset(&ch->cleanup_tq, 1, PI_NET,
167 	    &iosc->cpu_mask, "dpaa2_ch%d cleanup", ch->id);
168 
169 	error = dpaa2_chan_setup_dma(dev, ch, sc->buf_align);
170 	if (error != 0) {
171 		device_printf(dev, "%s: failed to setup DMA\n", __func__);
172 		goto fail_dma_setup;
173 	}
174 
175 	mtx_init(&ch->xmit_mtx, "dpaa2_ch_xmit", NULL, MTX_DEF);
176 
177 	ch->xmit_br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF, M_NOWAIT,
178 	    &ch->xmit_mtx);
179 	if (ch->xmit_br == NULL) {
180 		device_printf(dev, "%s: buf_ring_alloc() failed\n", __func__);
181 		error = ENOMEM;
182 		goto fail_buf_ring;
183 	}
184 
185 	DPAA2_BUF_INIT(&ch->store);
186 
187 	/* Register the new notification context */
188 	ctx = &ch->ctx;
189 	ctx->qman_ctx = (uint64_t)ctx;
190 	ctx->cdan_en = true;
191 	ctx->fq_chan_id = ch->id;
192 	ctx->io_dev = ch->io_dev;
193 	ctx->channel = ch;
194 	error = DPAA2_SWP_CONF_WQ_CHANNEL(ch->io_dev, ctx);
195 	if (error) {
196 		device_printf(dev, "%s: failed to register CDAN context\n",
197 		    __func__);
198 		goto fail_dpcon_notif;
199 	}
200 
201 	/* Register DPCON notification within Management Complex */
202 	notif_cfg.dpio_id = ioinfo->id;
203 	notif_cfg.prior = 0;
204 	notif_cfg.qman_ctx = ctx->qman_ctx;
205 	error = DPAA2_CMD_CON_SET_NOTIF(dev, child, &cmd, &notif_cfg);
206 	if (error) {
207 		device_printf(dev, "%s: failed to register DPCON "
208 		    "notifications: dpcon_id=%d, chan_id=%d\n", __func__,
209 		    coninfo->id, consc->attr.chan_id);
210 		goto fail_dpcon_notif;
211 	}
212 
213 	/* Allocate initial # of Rx buffers and a channel storage */
214 	error = dpaa2_buf_seed_pool(dev, bpdev, ch, DPAA2_NI_BUFS_INIT,
215 	    DPAA2_RX_BUF_SIZE, NULL);
216 	if (error) {
217 		device_printf(dev, "%s: failed to seed buffer pool\n",
218 		    __func__);
219 		goto fail_dpcon_notif;
220 	}
221 	error = dpaa2_chan_alloc_storage(dev, ch, DPAA2_ETH_STORE_SIZE,
222 	    BUS_DMA_NOWAIT, sc->buf_align);
223 	if (error != 0) {
224 		device_printf(dev, "%s: failed to allocate channel storage\n",
225 		    __func__);
226 		goto fail_dpcon_notif;
227 	} else {
228 		ch->store_sz = DPAA2_ETH_STORE_FRAMES;
229 	}
230 
231 	/* Prepare queues for the channel */
232 	error = dpaa2_chan_setup_fq(dev, ch, DPAA2_NI_QUEUE_TX_CONF);
233 	if (error) {
234 		device_printf(dev, "%s: failed to prepare TxConf queue: "
235 		    "error=%d\n", __func__, error);
236 		goto fail_fq_setup;
237 	}
238 	error = dpaa2_chan_setup_fq(dev, ch, DPAA2_NI_QUEUE_RX);
239 	if (error) {
240 		device_printf(dev, "%s: failed to prepare Rx queue: error=%d\n",
241 		    __func__, error);
242 		goto fail_fq_setup;
243 	}
244 
245 	if (bootverbose) {
246 		device_printf(dev, "channel: dpio_id=%d dpcon_id=%d chan_id=%d, "
247 		    "priorities=%d\n", ioinfo->id, coninfo->id, ch->id,
248 		    consc->attr.prior_num);
249 	}
250 
251 	*channel = ch;
252 
253 	(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
254 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rctk));
255 
256 	return (0);
257 
258 fail_fq_setup:
259 	if (ch->store.vaddr != NULL) {
260 		bus_dmamem_free(ch->store.dmat, ch->store.vaddr, ch->store.dmap);
261 	}
262 	if (ch->store.dmat != NULL) {
263 		bus_dma_tag_destroy(ch->store.dmat);
264 	}
265 	ch->store.dmat = NULL;
266 	ch->store.vaddr = NULL;
267 	ch->store.paddr = 0;
268 	ch->store.nseg = 0;
269 fail_dpcon_notif:
270 	buf_ring_free(ch->xmit_br, M_DEVBUF);
271 fail_buf_ring:
272 	mtx_destroy(&ch->xmit_mtx);
273 fail_dma_setup:
274 	/* while (taskqueue_cancel(ch->cleanup_tq, &ch->cleanup_task, NULL)) { */
275 	/* 	taskqueue_drain(ch->cleanup_tq, &ch->cleanup_task); */
276 	/* } */
277 	/* taskqueue_free(ch->cleanup_tq); */
278 	(void)DPAA2_CMD_CON_DISABLE(dev, child, DPAA2_CMD_TK(&cmd, contk));
279 fail_con_enable:
280 	(void)DPAA2_CMD_CON_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, contk));
281 fail_con_open:
282 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rctk));
283 fail_rc_open:
284 	return (error);
285 }
286 
287 /**
288  * @brief Performs an initial configuration of the frame queue.
289  */
290 int
dpaa2_chan_setup_fq(device_t dev,struct dpaa2_channel * ch,enum dpaa2_ni_queue_type queue_type)291 dpaa2_chan_setup_fq(device_t dev, struct dpaa2_channel *ch,
292     enum dpaa2_ni_queue_type queue_type)
293 {
294 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
295 	struct dpaa2_ni_fq *fq;
296 
297 	switch (queue_type) {
298 	case DPAA2_NI_QUEUE_TX_CONF:
299 		/* One queue per channel */
300 		fq = &ch->txc_queue;
301 		fq->chan = ch;
302 		fq->flowid = ch->flowid;
303 		fq->tc = 0; /* ignored */
304 		fq->type = queue_type;
305 		break;
306 	case DPAA2_NI_QUEUE_RX:
307 		KASSERT(sc->attr.num.rx_tcs <= DPAA2_MAX_TCS,
308 		    ("too many Rx traffic classes: rx_tcs=%d\n",
309 		    sc->attr.num.rx_tcs));
310 
311 		/* One queue per Rx traffic class within a channel */
312 		for (int i = 0; i < sc->attr.num.rx_tcs; i++) {
313 			fq = &ch->rx_queues[i];
314 			fq->chan = ch;
315 			fq->flowid = ch->flowid;
316 			fq->tc = (uint8_t) i;
317 			fq->type = queue_type;
318 
319 			ch->rxq_n++;
320 		}
321 		break;
322 	case DPAA2_NI_QUEUE_RX_ERR:
323 		/* One queue per network interface */
324 		fq = &sc->rxe_queue;
325 		fq->chan = ch;
326 		fq->flowid = 0; /* ignored */
327 		fq->tc = 0; /* ignored */
328 		fq->type = queue_type;
329 		break;
330 	default:
331 		device_printf(dev, "%s: unexpected frame queue type: %d\n",
332 		    __func__, queue_type);
333 		return (EINVAL);
334 	}
335 
336 	return (0);
337 }
338 
339 /**
340  * @brief Obtain the next dequeue response from the channel storage.
341  */
342 int
dpaa2_chan_next_frame(struct dpaa2_channel * ch,struct dpaa2_dq ** dq)343 dpaa2_chan_next_frame(struct dpaa2_channel *ch, struct dpaa2_dq **dq)
344 {
345 	struct dpaa2_buf *buf = &ch->store;
346 	struct dpaa2_dq *msgs = (struct dpaa2_dq *)buf->vaddr;
347 	struct dpaa2_dq *msg = &msgs[ch->store_idx];
348 	int rc = EINPROGRESS;
349 
350 	ch->store_idx++;
351 
352 	if (msg->fdr.desc.stat & DPAA2_DQ_STAT_EXPIRED) {
353 		rc = EALREADY; /* VDQ command is expired */
354 		ch->store_idx = 0;
355 		if (!(msg->fdr.desc.stat & DPAA2_DQ_STAT_VALIDFRAME)) {
356 			msg = NULL; /* Null response, FD is invalid */
357 		}
358 	}
359 	if (msg != NULL && (msg->fdr.desc.stat & DPAA2_DQ_STAT_FQEMPTY)) {
360 		rc = ENOENT; /* FQ is empty */
361 		ch->store_idx = 0;
362 	}
363 
364 	if (dq != NULL) {
365 		*dq = msg;
366 	}
367 
368 	return (rc);
369 }
370 
371 static int
dpaa2_chan_setup_dma(device_t dev,struct dpaa2_channel * ch,bus_size_t alignment)372 dpaa2_chan_setup_dma(device_t dev, struct dpaa2_channel *ch,
373     bus_size_t alignment)
374 {
375 	int error;
376 
377 	mtx_init(&ch->dma_mtx, "dpaa2_ch_dma_mtx", NULL, MTX_DEF);
378 
379 	error = bus_dma_tag_create(
380 	    bus_get_dma_tag(dev),	/* parent */
381 	    alignment, 0,		/* alignment, boundary */
382 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
383 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
384 	    NULL, NULL,			/* filter, filterarg */
385 	    RX_SEG_MAXSZ,		/* maxsize */
386 	    RX_SEG_N,			/* nsegments */
387 	    RX_SEG_SZ,			/* maxsegsize */
388 	    0,				/* flags */
389 	    NULL,			/* lockfunc */
390 	    NULL,			/* lockarg */
391 	    &ch->rx_dmat);
392 	if (error) {
393 		device_printf(dev, "%s: failed to create rx_dmat\n", __func__);
394 		goto fail_rx_tag;
395 	}
396 
397 	error = bus_dma_tag_create(
398 	    bus_get_dma_tag(dev),	/* parent */
399 	    alignment, 0,		/* alignment, boundary */
400 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
401 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
402 	    NULL, NULL,			/* filter, filterarg */
403 	    TX_SEG_MAXSZ,		/* maxsize */
404 	    TX_SEG_N,			/* nsegments */
405 	    TX_SEG_SZ,			/* maxsegsize */
406 	    0,				/* flags */
407 	    NULL,			/* lockfunc */
408 	    NULL,			/* lockarg */
409 	    &ch->tx_dmat);
410 	if (error) {
411 		device_printf(dev, "%s: failed to create tx_dmat\n", __func__);
412 		goto fail_tx_tag;
413 	}
414 
415 	error = bus_dma_tag_create(
416 	    bus_get_dma_tag(dev),	/* parent */
417 	    alignment, 0,		/* alignment, boundary */
418 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
419 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
420 	    NULL, NULL,			/* filter, filterarg */
421 	    SGT_SEG_MAXSZ,		/* maxsize */
422 	    SGT_SEG_N,			/* nsegments */
423 	    SGT_SEG_SZ,			/* maxsegsize */
424 	    0,				/* flags */
425 	    NULL,			/* lockfunc */
426 	    NULL,			/* lockarg */
427 	    &ch->sgt_dmat);
428 	if (error) {
429 		device_printf(dev, "%s: failed to create sgt_dmat\n", __func__);
430 		goto fail_sgt_tag;
431 	}
432 
433 	return (0);
434 
435 fail_sgt_tag:
436 	bus_dma_tag_destroy(ch->tx_dmat);
437 fail_tx_tag:
438 	bus_dma_tag_destroy(ch->rx_dmat);
439 fail_rx_tag:
440 	mtx_destroy(&ch->dma_mtx);
441 	ch->rx_dmat = NULL;
442 	ch->tx_dmat = NULL;
443 	ch->sgt_dmat = NULL;
444 
445 	return (error);
446 }
447 
448 /**
449  * @brief Allocate a DMA-mapped storage to keep responses from VDQ command.
450  */
451 static int
dpaa2_chan_alloc_storage(device_t dev,struct dpaa2_channel * ch,bus_size_t size,int mapflags,bus_size_t alignment)452 dpaa2_chan_alloc_storage(device_t dev, struct dpaa2_channel *ch, bus_size_t size,
453     int mapflags, bus_size_t alignment)
454 {
455 	struct dpaa2_buf *buf = &ch->store;
456 	uint32_t maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE;
457 	int error;
458 
459 	error = bus_dma_tag_create(
460 	    bus_get_dma_tag(dev),	/* parent */
461 	    alignment, 0,		/* alignment, boundary */
462 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
463 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
464 	    NULL, NULL,			/* filter, filterarg */
465 	    maxsize,			/* maxsize */
466 	    1,				/* nsegments */
467 	    maxsize,			/* maxsegsize */
468 	    BUS_DMA_ALLOCNOW,		/* flags */
469 	    NULL,			/* lockfunc */
470 	    NULL,			/* lockarg */
471 	    &buf->dmat);
472 	if (error != 0) {
473 		device_printf(dev, "%s: failed to create DMA tag\n", __func__);
474 		goto fail_tag;
475 	}
476 
477 	error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr,
478 	    BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap);
479 	if (error != 0) {
480 		device_printf(dev, "%s: failed to allocate storage memory\n",
481 		    __func__);
482 		goto fail_map_create;
483 	}
484 
485 	buf->paddr = 0;
486 	error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr, size,
487 	    dpaa2_dmamap_oneseg_cb, &buf->paddr, mapflags);
488 	if (error != 0) {
489 		device_printf(dev, "%s: failed to map storage memory\n",
490 		    __func__);
491 		goto fail_map_load;
492 	}
493 
494 	bus_dmamap_sync(buf->dmat, buf->dmap,
495 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
496 
497 	buf->nseg = 1;
498 
499 	return (0);
500 
501 fail_map_load:
502 	bus_dmamem_free(buf->dmat, buf->vaddr, buf->dmap);
503 fail_map_create:
504 	bus_dma_tag_destroy(buf->dmat);
505 fail_tag:
506 	buf->dmat = NULL;
507 	buf->vaddr = NULL;
508 	buf->paddr = 0;
509 	buf->nseg = 0;
510 
511 	return (error);
512 }
513 
514 /**
515  * @brief Release new buffers to the buffer pool if necessary.
516  */
517 static void
dpaa2_chan_bp_task(void * arg,int count)518 dpaa2_chan_bp_task(void *arg, int count)
519 {
520 	struct dpaa2_channel *ch = (struct dpaa2_channel *)arg;
521 	struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev);
522 	struct dpaa2_bp_softc *bpsc;
523 	struct dpaa2_bp_conf bpconf;
524 	const int buf_num = DPAA2_ATOMIC_READ(&sc->buf_num);
525 	device_t bpdev;
526 	int error;
527 
528 	/* There's only one buffer pool for now */
529 	bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
530 	bpsc = device_get_softc(bpdev);
531 
532 	/* Get state of the buffer pool */
533 	error = DPAA2_SWP_QUERY_BP(ch->io_dev, bpsc->attr.bpid, &bpconf);
534 	if (error) {
535 		device_printf(sc->dev, "%s: DPAA2_SWP_QUERY_BP() failed: "
536 		    "error=%d\n", __func__, error);
537 		return;
538 	}
539 
540 	/* Double allocated Rx buffers if amount of free buffers is < 25% */
541 	if (bpconf.free_bufn < (buf_num >> 2)) {
542 		mtx_assert(&ch->dma_mtx, MA_NOTOWNED);
543 		mtx_lock(&ch->dma_mtx);
544 		(void)dpaa2_buf_seed_pool(ch->ni_dev, bpdev, ch, buf_num,
545 		    DPAA2_RX_BUF_SIZE, &ch->dma_mtx);
546 		mtx_unlock(&ch->dma_mtx);
547 
548 		DPAA2_ATOMIC_XCHG(&sc->buf_free, bpconf.free_bufn);
549 	}
550 }
551