xref: /freebsd/sys/dev/dpaa2/dpaa2_channel.c (revision 36ef39831fe0e89f0b1672340a44c4ac1183158e)
1*58983e4bSDmitry Salychev /*-
2*58983e4bSDmitry Salychev  * SPDX-License-Identifier: BSD-2-Clause
3*58983e4bSDmitry Salychev  *
4*58983e4bSDmitry Salychev  * Copyright © 2023 Dmitry Salychev
5*58983e4bSDmitry Salychev  *
6*58983e4bSDmitry Salychev  * Redistribution and use in source and binary forms, with or without
7*58983e4bSDmitry Salychev  * modification, are permitted provided that the following conditions
8*58983e4bSDmitry Salychev  * are met:
9*58983e4bSDmitry Salychev  * 1. Redistributions of source code must retain the above copyright
10*58983e4bSDmitry Salychev  *    notice, this list of conditions and the following disclaimer.
11*58983e4bSDmitry Salychev  * 2. Redistributions in binary form must reproduce the above copyright
12*58983e4bSDmitry Salychev  *    notice, this list of conditions and the following disclaimer in the
13*58983e4bSDmitry Salychev  *    documentation and/or other materials provided with the distribution.
14*58983e4bSDmitry Salychev  *
15*58983e4bSDmitry Salychev  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16*58983e4bSDmitry Salychev  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17*58983e4bSDmitry Salychev  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18*58983e4bSDmitry Salychev  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19*58983e4bSDmitry Salychev  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20*58983e4bSDmitry Salychev  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21*58983e4bSDmitry Salychev  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22*58983e4bSDmitry Salychev  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23*58983e4bSDmitry Salychev  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24*58983e4bSDmitry Salychev  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25*58983e4bSDmitry Salychev  * SUCH DAMAGE.
26*58983e4bSDmitry Salychev  */
27*58983e4bSDmitry Salychev 
28*58983e4bSDmitry Salychev /*
29*58983e4bSDmitry Salychev  * QBMan channel to process ingress traffic (Rx, Tx confirmation, Rx error).
30*58983e4bSDmitry Salychev  *
31*58983e4bSDmitry Salychev  * NOTE: Several WQs are organized into a single channel.
32*58983e4bSDmitry Salychev  */
33*58983e4bSDmitry Salychev 
34*58983e4bSDmitry Salychev #include <sys/param.h>
35*58983e4bSDmitry Salychev #include <sys/systm.h>
36*58983e4bSDmitry Salychev #include <sys/kernel.h>
37*58983e4bSDmitry Salychev #include <sys/bus.h>
38*58983e4bSDmitry Salychev #include <sys/rman.h>
39*58983e4bSDmitry Salychev #include <sys/module.h>
40*58983e4bSDmitry Salychev #include <sys/malloc.h>
41*58983e4bSDmitry Salychev #include <sys/mutex.h>
42*58983e4bSDmitry Salychev #include <sys/socket.h>
43*58983e4bSDmitry Salychev #include <sys/sockio.h>
44*58983e4bSDmitry Salychev #include <sys/sysctl.h>
45*58983e4bSDmitry Salychev #include <sys/mbuf.h>
46*58983e4bSDmitry Salychev #include <sys/taskqueue.h>
47*58983e4bSDmitry Salychev #include <sys/sysctl.h>
48*58983e4bSDmitry Salychev #include <sys/buf_ring.h>
49*58983e4bSDmitry Salychev #include <sys/smp.h>
50*58983e4bSDmitry Salychev #include <sys/proc.h>
51*58983e4bSDmitry Salychev 
52*58983e4bSDmitry Salychev #include <machine/bus.h>
53*58983e4bSDmitry Salychev #include <machine/resource.h>
54*58983e4bSDmitry Salychev #include <machine/atomic.h>
55*58983e4bSDmitry Salychev #include <machine/vmparam.h>
56*58983e4bSDmitry Salychev 
57*58983e4bSDmitry Salychev #include <net/ethernet.h>
58*58983e4bSDmitry Salychev #include <net/bpf.h>
59*58983e4bSDmitry Salychev #include <net/if.h>
60*58983e4bSDmitry Salychev #include <net/if_dl.h>
61*58983e4bSDmitry Salychev #include <net/if_media.h>
62*58983e4bSDmitry Salychev #include <net/if_types.h>
63*58983e4bSDmitry Salychev #include <net/if_var.h>
64*58983e4bSDmitry Salychev 
65*58983e4bSDmitry Salychev #include "dpaa2_types.h"
66*58983e4bSDmitry Salychev #include "dpaa2_channel.h"
67*58983e4bSDmitry Salychev #include "dpaa2_ni.h"
68*58983e4bSDmitry Salychev #include "dpaa2_mc.h"
69*58983e4bSDmitry Salychev #include "dpaa2_mc_if.h"
70*58983e4bSDmitry Salychev #include "dpaa2_mcp.h"
71*58983e4bSDmitry Salychev #include "dpaa2_io.h"
72*58983e4bSDmitry Salychev #include "dpaa2_con.h"
73*58983e4bSDmitry Salychev #include "dpaa2_buf.h"
74*58983e4bSDmitry Salychev #include "dpaa2_swp.h"
75*58983e4bSDmitry Salychev #include "dpaa2_swp_if.h"
76*58983e4bSDmitry Salychev #include "dpaa2_bp.h"
77*58983e4bSDmitry Salychev #include "dpaa2_cmd_if.h"
78*58983e4bSDmitry Salychev 
79*58983e4bSDmitry Salychev MALLOC_DEFINE(M_DPAA2_CH, "dpaa2_ch", "DPAA2 QBMan Channel");
80*58983e4bSDmitry Salychev 
81*58983e4bSDmitry Salychev #define RX_SEG_N		 (1u)
82*58983e4bSDmitry Salychev #define RX_SEG_SZ		 (((MJUM9BYTES - 1) / PAGE_SIZE + 1) * PAGE_SIZE)
83*58983e4bSDmitry Salychev #define RX_SEG_MAXSZ	 	 (((MJUM9BYTES - 1) / PAGE_SIZE + 1) * PAGE_SIZE)
84*58983e4bSDmitry Salychev CTASSERT(RX_SEG_SZ % PAGE_SIZE == 0);
85*58983e4bSDmitry Salychev CTASSERT(RX_SEG_MAXSZ % PAGE_SIZE == 0);
86*58983e4bSDmitry Salychev 
87*58983e4bSDmitry Salychev #define TX_SEG_N		 (16u) /* XXX-DSL: does DPAA2 limit exist? */
88*58983e4bSDmitry Salychev #define TX_SEG_SZ		 (PAGE_SIZE)
89*58983e4bSDmitry Salychev #define TX_SEG_MAXSZ	 	 (TX_SEG_N * TX_SEG_SZ)
90*58983e4bSDmitry Salychev CTASSERT(TX_SEG_SZ % PAGE_SIZE == 0);
91*58983e4bSDmitry Salychev CTASSERT(TX_SEG_MAXSZ % PAGE_SIZE == 0);
92*58983e4bSDmitry Salychev 
93*58983e4bSDmitry Salychev #define SGT_SEG_N		 (1u)
94*58983e4bSDmitry Salychev #define SGT_SEG_SZ		 (PAGE_SIZE)
95*58983e4bSDmitry Salychev #define SGT_SEG_MAXSZ	 	 (PAGE_SIZE)
96*58983e4bSDmitry Salychev CTASSERT(SGT_SEG_SZ % PAGE_SIZE == 0);
97*58983e4bSDmitry Salychev CTASSERT(SGT_SEG_MAXSZ % PAGE_SIZE == 0);
98*58983e4bSDmitry Salychev 
99*58983e4bSDmitry Salychev static int dpaa2_chan_setup_dma(device_t, struct dpaa2_channel *, bus_size_t);
100*58983e4bSDmitry Salychev static int dpaa2_chan_alloc_storage(device_t, struct dpaa2_channel *, bus_size_t,
101*58983e4bSDmitry Salychev     int, bus_size_t);
102*58983e4bSDmitry Salychev static void dpaa2_chan_bp_task(void *, int);
103*58983e4bSDmitry Salychev 
104*58983e4bSDmitry Salychev /**
105*58983e4bSDmitry Salychev  * @brief Сonfigures QBMan channel and registers data availability notifications.
106*58983e4bSDmitry Salychev  */
107*58983e4bSDmitry Salychev int
dpaa2_chan_setup(device_t dev,device_t iodev,device_t condev,device_t bpdev,struct dpaa2_channel ** channel,uint32_t flowid,task_fn_t cleanup_task_fn)108*58983e4bSDmitry Salychev dpaa2_chan_setup(device_t dev, device_t iodev, device_t condev, device_t bpdev,
109*58983e4bSDmitry Salychev     struct dpaa2_channel **channel, uint32_t flowid, task_fn_t cleanup_task_fn)
110*58983e4bSDmitry Salychev {
111*58983e4bSDmitry Salychev 	device_t pdev = device_get_parent(dev);
112*58983e4bSDmitry Salychev 	device_t child = dev;
113*58983e4bSDmitry Salychev 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
114*58983e4bSDmitry Salychev 	struct dpaa2_io_softc *iosc = device_get_softc(iodev);
115*58983e4bSDmitry Salychev 	struct dpaa2_con_softc *consc = device_get_softc(condev);
116*58983e4bSDmitry Salychev 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
117*58983e4bSDmitry Salychev 	struct dpaa2_devinfo *ioinfo = device_get_ivars(iodev);
118*58983e4bSDmitry Salychev 	struct dpaa2_devinfo *coninfo = device_get_ivars(condev);
119*58983e4bSDmitry Salychev 	struct dpaa2_con_notif_cfg notif_cfg;
120*58983e4bSDmitry Salychev 	struct dpaa2_io_notif_ctx *ctx;
121*58983e4bSDmitry Salychev 	struct dpaa2_channel *ch = NULL;
122*58983e4bSDmitry Salychev 	struct dpaa2_cmd cmd;
123*58983e4bSDmitry Salychev 	uint16_t rctk, contk;
124*58983e4bSDmitry Salychev 	int error;
125*58983e4bSDmitry Salychev 
126*58983e4bSDmitry Salychev 	DPAA2_CMD_INIT(&cmd);
127*58983e4bSDmitry Salychev 
128*58983e4bSDmitry Salychev 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rctk);
129*58983e4bSDmitry Salychev 	if (error) {
130*58983e4bSDmitry Salychev 		device_printf(dev, "%s: failed to open DPRC: id=%d, error=%d\n",
131*58983e4bSDmitry Salychev 		    __func__, rcinfo->id, error);
132*58983e4bSDmitry Salychev 		goto fail_rc_open;
133*58983e4bSDmitry Salychev 	}
134*58983e4bSDmitry Salychev 	error = DPAA2_CMD_CON_OPEN(dev, child, &cmd, coninfo->id, &contk);
135*58983e4bSDmitry Salychev 	if (error) {
136*58983e4bSDmitry Salychev 		device_printf(dev, "%s: failed to open DPCON: id=%d, error=%d\n",
137*58983e4bSDmitry Salychev 		    __func__, coninfo->id, error);
138*58983e4bSDmitry Salychev 		goto fail_con_open;
139*58983e4bSDmitry Salychev 	}
140*58983e4bSDmitry Salychev 
141*58983e4bSDmitry Salychev 	error = DPAA2_CMD_CON_ENABLE(dev, child, &cmd);
142*58983e4bSDmitry Salychev 	if (error) {
143*58983e4bSDmitry Salychev 		device_printf(dev, "%s: failed to enable channel: dpcon_id=%d, "
144*58983e4bSDmitry Salychev 		    "chan_id=%d\n", __func__, coninfo->id, consc->attr.chan_id);
145*58983e4bSDmitry Salychev 		goto fail_con_enable;
146*58983e4bSDmitry Salychev 	}
147*58983e4bSDmitry Salychev 
148*58983e4bSDmitry Salychev 	ch = malloc(sizeof(struct dpaa2_channel), M_DPAA2_CH, M_WAITOK | M_ZERO);
149*58983e4bSDmitry Salychev 	ch->ni_dev = dev;
150*58983e4bSDmitry Salychev 	ch->io_dev = iodev;
151*58983e4bSDmitry Salychev 	ch->con_dev = condev;
152*58983e4bSDmitry Salychev 	ch->id = consc->attr.chan_id;
153*58983e4bSDmitry Salychev 	ch->flowid = flowid;
154*58983e4bSDmitry Salychev 	ch->tx_frames = 0; /* for debug purposes */
155*58983e4bSDmitry Salychev 	ch->tx_dropped = 0; /* for debug purposes */
156*58983e4bSDmitry Salychev 	ch->store_sz = 0;
157*58983e4bSDmitry Salychev 	ch->store_idx = 0;
158*58983e4bSDmitry Salychev 	ch->recycled_n = 0;
159*58983e4bSDmitry Salychev 	ch->rxq_n = 0;
160*58983e4bSDmitry Salychev 
161*58983e4bSDmitry Salychev 	NET_TASK_INIT(&ch->cleanup_task, 0, cleanup_task_fn, ch);
162*58983e4bSDmitry Salychev 	NET_TASK_INIT(&ch->bp_task, 0, dpaa2_chan_bp_task, ch);
163*58983e4bSDmitry Salychev 
164*58983e4bSDmitry Salychev 	ch->cleanup_tq = taskqueue_create("dpaa2_ch cleanup", M_WAITOK,
165*58983e4bSDmitry Salychev 	    taskqueue_thread_enqueue, &ch->cleanup_tq);
166*58983e4bSDmitry Salychev 	taskqueue_start_threads_cpuset(&ch->cleanup_tq, 1, PI_NET,
167*58983e4bSDmitry Salychev 	    &iosc->cpu_mask, "dpaa2_ch%d cleanup", ch->id);
168*58983e4bSDmitry Salychev 
169*58983e4bSDmitry Salychev 	error = dpaa2_chan_setup_dma(dev, ch, sc->buf_align);
170*58983e4bSDmitry Salychev 	if (error != 0) {
171*58983e4bSDmitry Salychev 		device_printf(dev, "%s: failed to setup DMA\n", __func__);
172*58983e4bSDmitry Salychev 		goto fail_dma_setup;
173*58983e4bSDmitry Salychev 	}
174*58983e4bSDmitry Salychev 
175*58983e4bSDmitry Salychev 	mtx_init(&ch->xmit_mtx, "dpaa2_ch_xmit", NULL, MTX_DEF);
176*58983e4bSDmitry Salychev 
177*58983e4bSDmitry Salychev 	ch->xmit_br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF, M_NOWAIT,
178*58983e4bSDmitry Salychev 	    &ch->xmit_mtx);
179*58983e4bSDmitry Salychev 	if (ch->xmit_br == NULL) {
180*58983e4bSDmitry Salychev 		device_printf(dev, "%s: buf_ring_alloc() failed\n", __func__);
181*58983e4bSDmitry Salychev 		error = ENOMEM;
182*58983e4bSDmitry Salychev 		goto fail_buf_ring;
183*58983e4bSDmitry Salychev 	}
184*58983e4bSDmitry Salychev 
185*58983e4bSDmitry Salychev 	DPAA2_BUF_INIT(&ch->store);
186*58983e4bSDmitry Salychev 
187*58983e4bSDmitry Salychev 	/* Register the new notification context */
188*58983e4bSDmitry Salychev 	ctx = &ch->ctx;
189*58983e4bSDmitry Salychev 	ctx->qman_ctx = (uint64_t)ctx;
190*58983e4bSDmitry Salychev 	ctx->cdan_en = true;
191*58983e4bSDmitry Salychev 	ctx->fq_chan_id = ch->id;
192*58983e4bSDmitry Salychev 	ctx->io_dev = ch->io_dev;
193*58983e4bSDmitry Salychev 	ctx->channel = ch;
194*58983e4bSDmitry Salychev 	error = DPAA2_SWP_CONF_WQ_CHANNEL(ch->io_dev, ctx);
195*58983e4bSDmitry Salychev 	if (error) {
196*58983e4bSDmitry Salychev 		device_printf(dev, "%s: failed to register CDAN context\n",
197*58983e4bSDmitry Salychev 		    __func__);
198*58983e4bSDmitry Salychev 		goto fail_dpcon_notif;
199*58983e4bSDmitry Salychev 	}
200*58983e4bSDmitry Salychev 
201*58983e4bSDmitry Salychev 	/* Register DPCON notification within Management Complex */
202*58983e4bSDmitry Salychev 	notif_cfg.dpio_id = ioinfo->id;
203*58983e4bSDmitry Salychev 	notif_cfg.prior = 0;
204*58983e4bSDmitry Salychev 	notif_cfg.qman_ctx = ctx->qman_ctx;
205*58983e4bSDmitry Salychev 	error = DPAA2_CMD_CON_SET_NOTIF(dev, child, &cmd, &notif_cfg);
206*58983e4bSDmitry Salychev 	if (error) {
207*58983e4bSDmitry Salychev 		device_printf(dev, "%s: failed to register DPCON "
208*58983e4bSDmitry Salychev 		    "notifications: dpcon_id=%d, chan_id=%d\n", __func__,
209*58983e4bSDmitry Salychev 		    coninfo->id, consc->attr.chan_id);
210*58983e4bSDmitry Salychev 		goto fail_dpcon_notif;
211*58983e4bSDmitry Salychev 	}
212*58983e4bSDmitry Salychev 
213*58983e4bSDmitry Salychev 	/* Allocate initial # of Rx buffers and a channel storage */
214*58983e4bSDmitry Salychev 	error = dpaa2_buf_seed_pool(dev, bpdev, ch, DPAA2_NI_BUFS_INIT,
215*58983e4bSDmitry Salychev 	    DPAA2_RX_BUF_SIZE, NULL);
216*58983e4bSDmitry Salychev 	if (error) {
217*58983e4bSDmitry Salychev 		device_printf(dev, "%s: failed to seed buffer pool\n",
218*58983e4bSDmitry Salychev 		    __func__);
219*58983e4bSDmitry Salychev 		goto fail_dpcon_notif;
220*58983e4bSDmitry Salychev 	}
221*58983e4bSDmitry Salychev 	error = dpaa2_chan_alloc_storage(dev, ch, DPAA2_ETH_STORE_SIZE,
222*58983e4bSDmitry Salychev 	    BUS_DMA_NOWAIT, sc->buf_align);
223*58983e4bSDmitry Salychev 	if (error != 0) {
224*58983e4bSDmitry Salychev 		device_printf(dev, "%s: failed to allocate channel storage\n",
225*58983e4bSDmitry Salychev 		    __func__);
226*58983e4bSDmitry Salychev 		goto fail_dpcon_notif;
227*58983e4bSDmitry Salychev 	} else {
228*58983e4bSDmitry Salychev 		ch->store_sz = DPAA2_ETH_STORE_FRAMES;
229*58983e4bSDmitry Salychev 	}
230*58983e4bSDmitry Salychev 
231*58983e4bSDmitry Salychev 	/* Prepare queues for the channel */
232*58983e4bSDmitry Salychev 	error = dpaa2_chan_setup_fq(dev, ch, DPAA2_NI_QUEUE_TX_CONF);
233*58983e4bSDmitry Salychev 	if (error) {
234*58983e4bSDmitry Salychev 		device_printf(dev, "%s: failed to prepare TxConf queue: "
235*58983e4bSDmitry Salychev 		    "error=%d\n", __func__, error);
236*58983e4bSDmitry Salychev 		goto fail_fq_setup;
237*58983e4bSDmitry Salychev 	}
238*58983e4bSDmitry Salychev 	error = dpaa2_chan_setup_fq(dev, ch, DPAA2_NI_QUEUE_RX);
239*58983e4bSDmitry Salychev 	if (error) {
240*58983e4bSDmitry Salychev 		device_printf(dev, "%s: failed to prepare Rx queue: error=%d\n",
241*58983e4bSDmitry Salychev 		    __func__, error);
242*58983e4bSDmitry Salychev 		goto fail_fq_setup;
243*58983e4bSDmitry Salychev 	}
244*58983e4bSDmitry Salychev 
245*58983e4bSDmitry Salychev 	if (bootverbose) {
246*58983e4bSDmitry Salychev 		device_printf(dev, "channel: dpio_id=%d dpcon_id=%d chan_id=%d, "
247*58983e4bSDmitry Salychev 		    "priorities=%d\n", ioinfo->id, coninfo->id, ch->id,
248*58983e4bSDmitry Salychev 		    consc->attr.prior_num);
249*58983e4bSDmitry Salychev 	}
250*58983e4bSDmitry Salychev 
251*58983e4bSDmitry Salychev 	*channel = ch;
252*58983e4bSDmitry Salychev 
253*58983e4bSDmitry Salychev 	(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
254*58983e4bSDmitry Salychev 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rctk));
255*58983e4bSDmitry Salychev 
256*58983e4bSDmitry Salychev 	return (0);
257*58983e4bSDmitry Salychev 
258*58983e4bSDmitry Salychev fail_fq_setup:
259*58983e4bSDmitry Salychev 	if (ch->store.vaddr != NULL) {
260*58983e4bSDmitry Salychev 		bus_dmamem_free(ch->store.dmat, ch->store.vaddr, ch->store.dmap);
261*58983e4bSDmitry Salychev 	}
262*58983e4bSDmitry Salychev 	if (ch->store.dmat != NULL) {
263*58983e4bSDmitry Salychev 		bus_dma_tag_destroy(ch->store.dmat);
264*58983e4bSDmitry Salychev 	}
265*58983e4bSDmitry Salychev 	ch->store.dmat = NULL;
266*58983e4bSDmitry Salychev 	ch->store.vaddr = NULL;
267*58983e4bSDmitry Salychev 	ch->store.paddr = 0;
268*58983e4bSDmitry Salychev 	ch->store.nseg = 0;
269*58983e4bSDmitry Salychev fail_dpcon_notif:
270*58983e4bSDmitry Salychev 	buf_ring_free(ch->xmit_br, M_DEVBUF);
271*58983e4bSDmitry Salychev fail_buf_ring:
272*58983e4bSDmitry Salychev 	mtx_destroy(&ch->xmit_mtx);
273*58983e4bSDmitry Salychev fail_dma_setup:
274*58983e4bSDmitry Salychev 	/* while (taskqueue_cancel(ch->cleanup_tq, &ch->cleanup_task, NULL)) { */
275*58983e4bSDmitry Salychev 	/* 	taskqueue_drain(ch->cleanup_tq, &ch->cleanup_task); */
276*58983e4bSDmitry Salychev 	/* } */
277*58983e4bSDmitry Salychev 	/* taskqueue_free(ch->cleanup_tq); */
278*58983e4bSDmitry Salychev 	(void)DPAA2_CMD_CON_DISABLE(dev, child, DPAA2_CMD_TK(&cmd, contk));
279*58983e4bSDmitry Salychev fail_con_enable:
280*58983e4bSDmitry Salychev 	(void)DPAA2_CMD_CON_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, contk));
281*58983e4bSDmitry Salychev fail_con_open:
282*58983e4bSDmitry Salychev 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rctk));
283*58983e4bSDmitry Salychev fail_rc_open:
284*58983e4bSDmitry Salychev 	return (error);
285*58983e4bSDmitry Salychev }
286*58983e4bSDmitry Salychev 
287*58983e4bSDmitry Salychev /**
288*58983e4bSDmitry Salychev  * @brief Performs an initial configuration of the frame queue.
289*58983e4bSDmitry Salychev  */
290*58983e4bSDmitry Salychev int
dpaa2_chan_setup_fq(device_t dev,struct dpaa2_channel * ch,enum dpaa2_ni_queue_type queue_type)291*58983e4bSDmitry Salychev dpaa2_chan_setup_fq(device_t dev, struct dpaa2_channel *ch,
292*58983e4bSDmitry Salychev     enum dpaa2_ni_queue_type queue_type)
293*58983e4bSDmitry Salychev {
294*58983e4bSDmitry Salychev 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
295*58983e4bSDmitry Salychev 	struct dpaa2_ni_fq *fq;
296*58983e4bSDmitry Salychev 
297*58983e4bSDmitry Salychev 	switch (queue_type) {
298*58983e4bSDmitry Salychev 	case DPAA2_NI_QUEUE_TX_CONF:
299*58983e4bSDmitry Salychev 		/* One queue per channel */
300*58983e4bSDmitry Salychev 		fq = &ch->txc_queue;
301*58983e4bSDmitry Salychev 		fq->chan = ch;
302*58983e4bSDmitry Salychev 		fq->flowid = ch->flowid;
303*58983e4bSDmitry Salychev 		fq->tc = 0; /* ignored */
304*58983e4bSDmitry Salychev 		fq->type = queue_type;
305*58983e4bSDmitry Salychev 		break;
306*58983e4bSDmitry Salychev 	case DPAA2_NI_QUEUE_RX:
307*58983e4bSDmitry Salychev 		KASSERT(sc->attr.num.rx_tcs <= DPAA2_MAX_TCS,
308*58983e4bSDmitry Salychev 		    ("too many Rx traffic classes: rx_tcs=%d\n",
309*58983e4bSDmitry Salychev 		    sc->attr.num.rx_tcs));
310*58983e4bSDmitry Salychev 
311*58983e4bSDmitry Salychev 		/* One queue per Rx traffic class within a channel */
312*58983e4bSDmitry Salychev 		for (int i = 0; i < sc->attr.num.rx_tcs; i++) {
313*58983e4bSDmitry Salychev 			fq = &ch->rx_queues[i];
314*58983e4bSDmitry Salychev 			fq->chan = ch;
315*58983e4bSDmitry Salychev 			fq->flowid = ch->flowid;
316*58983e4bSDmitry Salychev 			fq->tc = (uint8_t) i;
317*58983e4bSDmitry Salychev 			fq->type = queue_type;
318*58983e4bSDmitry Salychev 
319*58983e4bSDmitry Salychev 			ch->rxq_n++;
320*58983e4bSDmitry Salychev 		}
321*58983e4bSDmitry Salychev 		break;
322*58983e4bSDmitry Salychev 	case DPAA2_NI_QUEUE_RX_ERR:
323*58983e4bSDmitry Salychev 		/* One queue per network interface */
324*58983e4bSDmitry Salychev 		fq = &sc->rxe_queue;
325*58983e4bSDmitry Salychev 		fq->chan = ch;
326*58983e4bSDmitry Salychev 		fq->flowid = 0; /* ignored */
327*58983e4bSDmitry Salychev 		fq->tc = 0; /* ignored */
328*58983e4bSDmitry Salychev 		fq->type = queue_type;
329*58983e4bSDmitry Salychev 		break;
330*58983e4bSDmitry Salychev 	default:
331*58983e4bSDmitry Salychev 		device_printf(dev, "%s: unexpected frame queue type: %d\n",
332*58983e4bSDmitry Salychev 		    __func__, queue_type);
333*58983e4bSDmitry Salychev 		return (EINVAL);
334*58983e4bSDmitry Salychev 	}
335*58983e4bSDmitry Salychev 
336*58983e4bSDmitry Salychev 	return (0);
337*58983e4bSDmitry Salychev }
338*58983e4bSDmitry Salychev 
339*58983e4bSDmitry Salychev /**
340*58983e4bSDmitry Salychev  * @brief Obtain the next dequeue response from the channel storage.
341*58983e4bSDmitry Salychev  */
342*58983e4bSDmitry Salychev int
dpaa2_chan_next_frame(struct dpaa2_channel * ch,struct dpaa2_dq ** dq)343*58983e4bSDmitry Salychev dpaa2_chan_next_frame(struct dpaa2_channel *ch, struct dpaa2_dq **dq)
344*58983e4bSDmitry Salychev {
345*58983e4bSDmitry Salychev 	struct dpaa2_buf *buf = &ch->store;
346*58983e4bSDmitry Salychev 	struct dpaa2_dq *msgs = (struct dpaa2_dq *)buf->vaddr;
347*58983e4bSDmitry Salychev 	struct dpaa2_dq *msg = &msgs[ch->store_idx];
348*58983e4bSDmitry Salychev 	int rc = EINPROGRESS;
349*58983e4bSDmitry Salychev 
350*58983e4bSDmitry Salychev 	ch->store_idx++;
351*58983e4bSDmitry Salychev 
352*58983e4bSDmitry Salychev 	if (msg->fdr.desc.stat & DPAA2_DQ_STAT_EXPIRED) {
353*58983e4bSDmitry Salychev 		rc = EALREADY; /* VDQ command is expired */
354*58983e4bSDmitry Salychev 		ch->store_idx = 0;
355*58983e4bSDmitry Salychev 		if (!(msg->fdr.desc.stat & DPAA2_DQ_STAT_VALIDFRAME)) {
356*58983e4bSDmitry Salychev 			msg = NULL; /* Null response, FD is invalid */
357*58983e4bSDmitry Salychev 		}
358*58983e4bSDmitry Salychev 	}
359*58983e4bSDmitry Salychev 	if (msg != NULL && (msg->fdr.desc.stat & DPAA2_DQ_STAT_FQEMPTY)) {
360*58983e4bSDmitry Salychev 		rc = ENOENT; /* FQ is empty */
361*58983e4bSDmitry Salychev 		ch->store_idx = 0;
362*58983e4bSDmitry Salychev 	}
363*58983e4bSDmitry Salychev 
364*58983e4bSDmitry Salychev 	if (dq != NULL) {
365*58983e4bSDmitry Salychev 		*dq = msg;
366*58983e4bSDmitry Salychev 	}
367*58983e4bSDmitry Salychev 
368*58983e4bSDmitry Salychev 	return (rc);
369*58983e4bSDmitry Salychev }
370*58983e4bSDmitry Salychev 
371*58983e4bSDmitry Salychev static int
dpaa2_chan_setup_dma(device_t dev,struct dpaa2_channel * ch,bus_size_t alignment)372*58983e4bSDmitry Salychev dpaa2_chan_setup_dma(device_t dev, struct dpaa2_channel *ch,
373*58983e4bSDmitry Salychev     bus_size_t alignment)
374*58983e4bSDmitry Salychev {
375*58983e4bSDmitry Salychev 	int error;
376*58983e4bSDmitry Salychev 
377*58983e4bSDmitry Salychev 	mtx_init(&ch->dma_mtx, "dpaa2_ch_dma_mtx", NULL, MTX_DEF);
378*58983e4bSDmitry Salychev 
379*58983e4bSDmitry Salychev 	error = bus_dma_tag_create(
380*58983e4bSDmitry Salychev 	    bus_get_dma_tag(dev),	/* parent */
381*58983e4bSDmitry Salychev 	    alignment, 0,		/* alignment, boundary */
382*58983e4bSDmitry Salychev 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
383*58983e4bSDmitry Salychev 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
384*58983e4bSDmitry Salychev 	    NULL, NULL,			/* filter, filterarg */
385*58983e4bSDmitry Salychev 	    RX_SEG_MAXSZ,		/* maxsize */
386*58983e4bSDmitry Salychev 	    RX_SEG_N,			/* nsegments */
387*58983e4bSDmitry Salychev 	    RX_SEG_SZ,			/* maxsegsize */
388*58983e4bSDmitry Salychev 	    0,				/* flags */
389*58983e4bSDmitry Salychev 	    NULL,			/* lockfunc */
390*58983e4bSDmitry Salychev 	    NULL,			/* lockarg */
391*58983e4bSDmitry Salychev 	    &ch->rx_dmat);
392*58983e4bSDmitry Salychev 	if (error) {
393*58983e4bSDmitry Salychev 		device_printf(dev, "%s: failed to create rx_dmat\n", __func__);
394*58983e4bSDmitry Salychev 		goto fail_rx_tag;
395*58983e4bSDmitry Salychev 	}
396*58983e4bSDmitry Salychev 
397*58983e4bSDmitry Salychev 	error = bus_dma_tag_create(
398*58983e4bSDmitry Salychev 	    bus_get_dma_tag(dev),	/* parent */
399*58983e4bSDmitry Salychev 	    alignment, 0,		/* alignment, boundary */
400*58983e4bSDmitry Salychev 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
401*58983e4bSDmitry Salychev 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
402*58983e4bSDmitry Salychev 	    NULL, NULL,			/* filter, filterarg */
403*58983e4bSDmitry Salychev 	    TX_SEG_MAXSZ,		/* maxsize */
404*58983e4bSDmitry Salychev 	    TX_SEG_N,			/* nsegments */
405*58983e4bSDmitry Salychev 	    TX_SEG_SZ,			/* maxsegsize */
406*58983e4bSDmitry Salychev 	    0,				/* flags */
407*58983e4bSDmitry Salychev 	    NULL,			/* lockfunc */
408*58983e4bSDmitry Salychev 	    NULL,			/* lockarg */
409*58983e4bSDmitry Salychev 	    &ch->tx_dmat);
410*58983e4bSDmitry Salychev 	if (error) {
411*58983e4bSDmitry Salychev 		device_printf(dev, "%s: failed to create tx_dmat\n", __func__);
412*58983e4bSDmitry Salychev 		goto fail_tx_tag;
413*58983e4bSDmitry Salychev 	}
414*58983e4bSDmitry Salychev 
415*58983e4bSDmitry Salychev 	error = bus_dma_tag_create(
416*58983e4bSDmitry Salychev 	    bus_get_dma_tag(dev),	/* parent */
417*58983e4bSDmitry Salychev 	    alignment, 0,		/* alignment, boundary */
418*58983e4bSDmitry Salychev 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
419*58983e4bSDmitry Salychev 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
420*58983e4bSDmitry Salychev 	    NULL, NULL,			/* filter, filterarg */
421*58983e4bSDmitry Salychev 	    SGT_SEG_MAXSZ,		/* maxsize */
422*58983e4bSDmitry Salychev 	    SGT_SEG_N,			/* nsegments */
423*58983e4bSDmitry Salychev 	    SGT_SEG_SZ,			/* maxsegsize */
424*58983e4bSDmitry Salychev 	    0,				/* flags */
425*58983e4bSDmitry Salychev 	    NULL,			/* lockfunc */
426*58983e4bSDmitry Salychev 	    NULL,			/* lockarg */
427*58983e4bSDmitry Salychev 	    &ch->sgt_dmat);
428*58983e4bSDmitry Salychev 	if (error) {
429*58983e4bSDmitry Salychev 		device_printf(dev, "%s: failed to create sgt_dmat\n", __func__);
430*58983e4bSDmitry Salychev 		goto fail_sgt_tag;
431*58983e4bSDmitry Salychev 	}
432*58983e4bSDmitry Salychev 
433*58983e4bSDmitry Salychev 	return (0);
434*58983e4bSDmitry Salychev 
435*58983e4bSDmitry Salychev fail_sgt_tag:
436*58983e4bSDmitry Salychev 	bus_dma_tag_destroy(ch->tx_dmat);
437*58983e4bSDmitry Salychev fail_tx_tag:
438*58983e4bSDmitry Salychev 	bus_dma_tag_destroy(ch->rx_dmat);
439*58983e4bSDmitry Salychev fail_rx_tag:
440*58983e4bSDmitry Salychev 	mtx_destroy(&ch->dma_mtx);
441*58983e4bSDmitry Salychev 	ch->rx_dmat = NULL;
442*58983e4bSDmitry Salychev 	ch->tx_dmat = NULL;
443*58983e4bSDmitry Salychev 	ch->sgt_dmat = NULL;
444*58983e4bSDmitry Salychev 
445*58983e4bSDmitry Salychev 	return (error);
446*58983e4bSDmitry Salychev }
447*58983e4bSDmitry Salychev 
448*58983e4bSDmitry Salychev /**
449*58983e4bSDmitry Salychev  * @brief Allocate a DMA-mapped storage to keep responses from VDQ command.
450*58983e4bSDmitry Salychev  */
451*58983e4bSDmitry Salychev static int
dpaa2_chan_alloc_storage(device_t dev,struct dpaa2_channel * ch,bus_size_t size,int mapflags,bus_size_t alignment)452*58983e4bSDmitry Salychev dpaa2_chan_alloc_storage(device_t dev, struct dpaa2_channel *ch, bus_size_t size,
453*58983e4bSDmitry Salychev     int mapflags, bus_size_t alignment)
454*58983e4bSDmitry Salychev {
455*58983e4bSDmitry Salychev 	struct dpaa2_buf *buf = &ch->store;
456*58983e4bSDmitry Salychev 	uint32_t maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE;
457*58983e4bSDmitry Salychev 	int error;
458*58983e4bSDmitry Salychev 
459*58983e4bSDmitry Salychev 	error = bus_dma_tag_create(
460*58983e4bSDmitry Salychev 	    bus_get_dma_tag(dev),	/* parent */
461*58983e4bSDmitry Salychev 	    alignment, 0,		/* alignment, boundary */
462*58983e4bSDmitry Salychev 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
463*58983e4bSDmitry Salychev 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
464*58983e4bSDmitry Salychev 	    NULL, NULL,			/* filter, filterarg */
465*58983e4bSDmitry Salychev 	    maxsize,			/* maxsize */
466*58983e4bSDmitry Salychev 	    1,				/* nsegments */
467*58983e4bSDmitry Salychev 	    maxsize,			/* maxsegsize */
468*58983e4bSDmitry Salychev 	    BUS_DMA_ALLOCNOW,		/* flags */
469*58983e4bSDmitry Salychev 	    NULL,			/* lockfunc */
470*58983e4bSDmitry Salychev 	    NULL,			/* lockarg */
471*58983e4bSDmitry Salychev 	    &buf->dmat);
472*58983e4bSDmitry Salychev 	if (error != 0) {
473*58983e4bSDmitry Salychev 		device_printf(dev, "%s: failed to create DMA tag\n", __func__);
474*58983e4bSDmitry Salychev 		goto fail_tag;
475*58983e4bSDmitry Salychev 	}
476*58983e4bSDmitry Salychev 
477*58983e4bSDmitry Salychev 	error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr,
478*58983e4bSDmitry Salychev 	    BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap);
479*58983e4bSDmitry Salychev 	if (error != 0) {
480*58983e4bSDmitry Salychev 		device_printf(dev, "%s: failed to allocate storage memory\n",
481*58983e4bSDmitry Salychev 		    __func__);
482*58983e4bSDmitry Salychev 		goto fail_map_create;
483*58983e4bSDmitry Salychev 	}
484*58983e4bSDmitry Salychev 
485*58983e4bSDmitry Salychev 	buf->paddr = 0;
486*58983e4bSDmitry Salychev 	error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr, size,
487*58983e4bSDmitry Salychev 	    dpaa2_dmamap_oneseg_cb, &buf->paddr, mapflags);
488*58983e4bSDmitry Salychev 	if (error != 0) {
489*58983e4bSDmitry Salychev 		device_printf(dev, "%s: failed to map storage memory\n",
490*58983e4bSDmitry Salychev 		    __func__);
491*58983e4bSDmitry Salychev 		goto fail_map_load;
492*58983e4bSDmitry Salychev 	}
493*58983e4bSDmitry Salychev 
494*58983e4bSDmitry Salychev 	bus_dmamap_sync(buf->dmat, buf->dmap,
495*58983e4bSDmitry Salychev 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
496*58983e4bSDmitry Salychev 
497*58983e4bSDmitry Salychev 	buf->nseg = 1;
498*58983e4bSDmitry Salychev 
499*58983e4bSDmitry Salychev 	return (0);
500*58983e4bSDmitry Salychev 
501*58983e4bSDmitry Salychev fail_map_load:
502*58983e4bSDmitry Salychev 	bus_dmamem_free(buf->dmat, buf->vaddr, buf->dmap);
503*58983e4bSDmitry Salychev fail_map_create:
504*58983e4bSDmitry Salychev 	bus_dma_tag_destroy(buf->dmat);
505*58983e4bSDmitry Salychev fail_tag:
506*58983e4bSDmitry Salychev 	buf->dmat = NULL;
507*58983e4bSDmitry Salychev 	buf->vaddr = NULL;
508*58983e4bSDmitry Salychev 	buf->paddr = 0;
509*58983e4bSDmitry Salychev 	buf->nseg = 0;
510*58983e4bSDmitry Salychev 
511*58983e4bSDmitry Salychev 	return (error);
512*58983e4bSDmitry Salychev }
513*58983e4bSDmitry Salychev 
514*58983e4bSDmitry Salychev /**
515*58983e4bSDmitry Salychev  * @brief Release new buffers to the buffer pool if necessary.
516*58983e4bSDmitry Salychev  */
517*58983e4bSDmitry Salychev static void
dpaa2_chan_bp_task(void * arg,int count)518*58983e4bSDmitry Salychev dpaa2_chan_bp_task(void *arg, int count)
519*58983e4bSDmitry Salychev {
520*58983e4bSDmitry Salychev 	struct dpaa2_channel *ch = (struct dpaa2_channel *)arg;
521*58983e4bSDmitry Salychev 	struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev);
522*58983e4bSDmitry Salychev 	struct dpaa2_bp_softc *bpsc;
523*58983e4bSDmitry Salychev 	struct dpaa2_bp_conf bpconf;
524*58983e4bSDmitry Salychev 	const int buf_num = DPAA2_ATOMIC_READ(&sc->buf_num);
525*58983e4bSDmitry Salychev 	device_t bpdev;
526*58983e4bSDmitry Salychev 	int error;
527*58983e4bSDmitry Salychev 
528*58983e4bSDmitry Salychev 	/* There's only one buffer pool for now */
529*58983e4bSDmitry Salychev 	bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
530*58983e4bSDmitry Salychev 	bpsc = device_get_softc(bpdev);
531*58983e4bSDmitry Salychev 
532*58983e4bSDmitry Salychev 	/* Get state of the buffer pool */
533*58983e4bSDmitry Salychev 	error = DPAA2_SWP_QUERY_BP(ch->io_dev, bpsc->attr.bpid, &bpconf);
534*58983e4bSDmitry Salychev 	if (error) {
535*58983e4bSDmitry Salychev 		device_printf(sc->dev, "%s: DPAA2_SWP_QUERY_BP() failed: "
536*58983e4bSDmitry Salychev 		    "error=%d\n", __func__, error);
537*58983e4bSDmitry Salychev 		return;
538*58983e4bSDmitry Salychev 	}
539*58983e4bSDmitry Salychev 
540*58983e4bSDmitry Salychev 	/* Double allocated Rx buffers if amount of free buffers is < 25% */
541*58983e4bSDmitry Salychev 	if (bpconf.free_bufn < (buf_num >> 2)) {
542*58983e4bSDmitry Salychev 		mtx_assert(&ch->dma_mtx, MA_NOTOWNED);
543*58983e4bSDmitry Salychev 		mtx_lock(&ch->dma_mtx);
544*58983e4bSDmitry Salychev 		(void)dpaa2_buf_seed_pool(ch->ni_dev, bpdev, ch, buf_num,
545*58983e4bSDmitry Salychev 		    DPAA2_RX_BUF_SIZE, &ch->dma_mtx);
546*58983e4bSDmitry Salychev 		mtx_unlock(&ch->dma_mtx);
547*58983e4bSDmitry Salychev 
548*58983e4bSDmitry Salychev 		DPAA2_ATOMIC_XCHG(&sc->buf_free, bpconf.free_bufn);
549*58983e4bSDmitry Salychev 	}
550*58983e4bSDmitry Salychev }
551