xref: /freebsd/sys/dev/dpaa2/dpaa2_ni.c (revision d0b2dbfa0ecf2bbc9709efc5e20baf8e4b44bbbf)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright © 2021-2023 Dmitry Salychev
5  * Copyright © 2022 Mathew McBride
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 /*
31  * The DPAA2 Network Interface (DPNI) driver.
32  *
33  * The DPNI object is a network interface that is configurable to support a wide
34  * range of features from a very basic Ethernet interface up to a
35  * high-functioning network interface. The DPNI supports features that are
36  * expected by standard network stacks, from basic features to offloads.
37  *
38  * DPNIs work with Ethernet traffic, starting with the L2 header. Additional
39  * functions are provided for standard network protocols (L2, L3, L4, etc.).
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/bus.h>
46 #include <sys/rman.h>
47 #include <sys/module.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <sys/mbuf.h>
54 #include <sys/taskqueue.h>
55 #include <sys/sysctl.h>
56 #include <sys/buf_ring.h>
57 #include <sys/smp.h>
58 #include <sys/proc.h>
59 
60 #include <vm/vm.h>
61 #include <vm/pmap.h>
62 
63 #include <machine/bus.h>
64 #include <machine/resource.h>
65 #include <machine/atomic.h>
66 #include <machine/vmparam.h>
67 
68 #include <net/ethernet.h>
69 #include <net/bpf.h>
70 #include <net/if.h>
71 #include <net/if_dl.h>
72 #include <net/if_media.h>
73 #include <net/if_types.h>
74 #include <net/if_var.h>
75 
76 #include <dev/pci/pcivar.h>
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
79 #include <dev/mdio/mdio.h>
80 
81 #include "opt_acpi.h"
82 #include "opt_platform.h"
83 
84 #include "pcib_if.h"
85 #include "pci_if.h"
86 #include "miibus_if.h"
87 #include "memac_mdio_if.h"
88 
89 #include "dpaa2_types.h"
90 #include "dpaa2_mc.h"
91 #include "dpaa2_mc_if.h"
92 #include "dpaa2_mcp.h"
93 #include "dpaa2_swp.h"
94 #include "dpaa2_swp_if.h"
95 #include "dpaa2_cmd_if.h"
96 #include "dpaa2_ni.h"
97 
98 #define BIT(x)			(1ul << (x))
99 #define WRIOP_VERSION(x, y, z)	((x) << 10 | (y) << 5 | (z) << 0)
100 #define ARRAY_SIZE(a)		(sizeof(a) / sizeof((a)[0]))
101 
102 /* Frame Dequeue Response status bits. */
103 #define IS_NULL_RESPONSE(stat)	((((stat) >> 4) & 1) == 0)
104 
105 #define	ALIGN_UP(x, y)		roundup2((x), (y))
106 #define	ALIGN_DOWN(x, y)	rounddown2((x), (y))
107 #define CACHE_LINE_ALIGN(x)	ALIGN_UP((x), CACHE_LINE_SIZE)
108 
109 #define DPNI_LOCK(__sc) do {			\
110 	mtx_assert(&(__sc)->lock, MA_NOTOWNED);	\
111 	mtx_lock(&(__sc)->lock);		\
112 } while (0)
113 #define	DPNI_UNLOCK(__sc) do {			\
114 	mtx_assert(&(__sc)->lock, MA_OWNED);	\
115 	mtx_unlock(&(__sc)->lock);		\
116 } while (0)
117 
118 #define TX_LOCK(__tx) do {			\
119 	mtx_assert(&(__tx)->lock, MA_NOTOWNED);	\
120 	mtx_lock(&(__tx)->lock);		\
121 } while (0)
122 #define	TX_UNLOCK(__tx) do {			\
123 	mtx_assert(&(__tx)->lock, MA_OWNED);	\
124 	mtx_unlock(&(__tx)->lock);		\
125 } while (0)
126 
127 #define DPAA2_TX_RING(sc, chan, tc)				\
128 	(&(sc)->channels[(chan)]->txc_queue.tx_rings[(tc)])
129 
130 #define DPNI_IRQ_INDEX		0 /* Index of the only DPNI IRQ. */
131 #define DPNI_IRQ_LINK_CHANGED	1 /* Link state changed */
132 #define DPNI_IRQ_EP_CHANGED	2 /* DPAA2 endpoint dis/connected */
133 
134 /* Default maximum frame length. */
135 #define DPAA2_ETH_MFL		(ETHER_MAX_LEN - ETHER_CRC_LEN)
136 
137 /* Minimally supported version of the DPNI API. */
138 #define DPNI_VER_MAJOR		7
139 #define DPNI_VER_MINOR		0
140 
141 /* Rx/Tx buffers configuration. */
142 #define BUF_ALIGN_V1		256 /* WRIOP v1.0.0 limitation */
143 #define BUF_ALIGN		64
144 #define BUF_SWA_SIZE		64  /* SW annotation size */
145 #define BUF_RX_HWA_SIZE		64  /* HW annotation size */
146 #define BUF_TX_HWA_SIZE		128 /* HW annotation size */
147 #define BUF_SIZE		(MJUM9BYTES)
148 
149 #define DPAA2_TX_BUFRING_SZ	(4096u)
150 #define DPAA2_TX_SEGLIMIT	(16u) /* arbitrary number */
151 #define DPAA2_TX_SEG_SZ		(4096u)
152 #define DPAA2_TX_SEGS_MAXSZ	(DPAA2_TX_SEGLIMIT * DPAA2_TX_SEG_SZ)
153 #define DPAA2_TX_SGT_SZ		(PAGE_SIZE) /* bytes */
154 
155 /* Size of a buffer to keep a QoS table key configuration. */
156 #define ETH_QOS_KCFG_BUF_SIZE	256
157 
158 /* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */
159 #define DPAA2_CLASSIFIER_DMA_SIZE 256
160 
161 /* Channel storage buffer configuration. */
162 #define ETH_STORE_FRAMES	16u
163 #define ETH_STORE_SIZE		((ETH_STORE_FRAMES + 1) * sizeof(struct dpaa2_dq))
164 #define ETH_STORE_ALIGN		64u
165 
166 /* Buffers layout options. */
167 #define BUF_LOPT_TIMESTAMP	0x1
168 #define BUF_LOPT_PARSER_RESULT	0x2
169 #define BUF_LOPT_FRAME_STATUS	0x4
170 #define BUF_LOPT_PRIV_DATA_SZ	0x8
171 #define BUF_LOPT_DATA_ALIGN	0x10
172 #define BUF_LOPT_DATA_HEAD_ROOM	0x20
173 #define BUF_LOPT_DATA_TAIL_ROOM	0x40
174 
175 #define DPAA2_NI_BUF_ADDR_MASK	(0x1FFFFFFFFFFFFul) /* 49-bit addresses max. */
176 #define DPAA2_NI_BUF_CHAN_MASK	(0xFu)
177 #define DPAA2_NI_BUF_CHAN_SHIFT	(60)
178 #define DPAA2_NI_BUF_IDX_MASK	(0x7FFFu)
179 #define DPAA2_NI_BUF_IDX_SHIFT	(49)
180 #define DPAA2_NI_TX_IDX_MASK	(0x7u)
181 #define DPAA2_NI_TX_IDX_SHIFT	(57)
182 #define DPAA2_NI_TXBUF_IDX_MASK	(0xFFu)
183 #define DPAA2_NI_TXBUF_IDX_SHIFT (49)
184 
185 #define DPAA2_NI_FD_FMT_MASK	(0x3u)
186 #define DPAA2_NI_FD_FMT_SHIFT	(12)
187 #define DPAA2_NI_FD_ERR_MASK	(0xFFu)
188 #define DPAA2_NI_FD_ERR_SHIFT	(0)
189 #define DPAA2_NI_FD_SL_MASK	(0x1u)
190 #define DPAA2_NI_FD_SL_SHIFT	(14)
191 #define DPAA2_NI_FD_LEN_MASK	(0x3FFFFu)
192 #define DPAA2_NI_FD_OFFSET_MASK (0x0FFFu)
193 
194 /* Enables TCAM for Flow Steering and QoS look-ups. */
195 #define DPNI_OPT_HAS_KEY_MASKING 0x10
196 
197 /* Unique IDs for the supported Rx classification header fields. */
198 #define DPAA2_ETH_DIST_ETHDST	BIT(0)
199 #define DPAA2_ETH_DIST_ETHSRC	BIT(1)
200 #define DPAA2_ETH_DIST_ETHTYPE	BIT(2)
201 #define DPAA2_ETH_DIST_VLAN	BIT(3)
202 #define DPAA2_ETH_DIST_IPSRC	BIT(4)
203 #define DPAA2_ETH_DIST_IPDST	BIT(5)
204 #define DPAA2_ETH_DIST_IPPROTO	BIT(6)
205 #define DPAA2_ETH_DIST_L4SRC	BIT(7)
206 #define DPAA2_ETH_DIST_L4DST	BIT(8)
207 #define DPAA2_ETH_DIST_ALL	(~0ULL)
208 
209 /* L3-L4 network traffic flow hash options. */
210 #define	RXH_L2DA		(1 << 1)
211 #define	RXH_VLAN		(1 << 2)
212 #define	RXH_L3_PROTO		(1 << 3)
213 #define	RXH_IP_SRC		(1 << 4)
214 #define	RXH_IP_DST		(1 << 5)
215 #define	RXH_L4_B_0_1		(1 << 6) /* src port in case of TCP/UDP/SCTP */
216 #define	RXH_L4_B_2_3		(1 << 7) /* dst port in case of TCP/UDP/SCTP */
217 #define	RXH_DISCARD		(1 << 31)
218 
219 /* Default Rx hash options, set during attaching. */
220 #define DPAA2_RXH_DEFAULT	(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
221 
222 MALLOC_DEFINE(M_DPAA2_NI, "dpaa2_ni", "DPAA2 Network Interface");
223 
224 /* DPAA2 Network Interface resource specification. */
225 struct resource_spec dpaa2_ni_spec[] = {
226 	/*
227 	 * DPMCP resources.
228 	 *
229 	 * NOTE: MC command portals (MCPs) are used to send commands to, and
230 	 *	 receive responses from, the MC firmware. One portal per DPNI.
231 	 */
232 #define MCP_RES_NUM	(1u)
233 #define MCP_RID_OFF	(0u)
234 #define MCP_RID(rid)	((rid) + MCP_RID_OFF)
235 	/* --- */
236 	{ DPAA2_DEV_MCP, MCP_RID(0),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
237 	/*
238 	 * DPIO resources (software portals).
239 	 *
240 	 * NOTE: One per running core. While DPIOs are the source of data
241 	 *	 availability interrupts, the DPCONs are used to identify the
242 	 *	 network interface that has produced ingress data to that core.
243 	 */
244 #define IO_RES_NUM	(16u)
245 #define IO_RID_OFF	(MCP_RID_OFF + MCP_RES_NUM)
246 #define IO_RID(rid)	((rid) + IO_RID_OFF)
247 	/* --- */
248 	{ DPAA2_DEV_IO,  IO_RID(0),    RF_ACTIVE | RF_SHAREABLE },
249 	{ DPAA2_DEV_IO,  IO_RID(1),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
250 	{ DPAA2_DEV_IO,  IO_RID(2),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
251 	{ DPAA2_DEV_IO,  IO_RID(3),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
252 	{ DPAA2_DEV_IO,  IO_RID(4),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
253 	{ DPAA2_DEV_IO,  IO_RID(5),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
254 	{ DPAA2_DEV_IO,  IO_RID(6),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
255 	{ DPAA2_DEV_IO,  IO_RID(7),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
256 	{ DPAA2_DEV_IO,  IO_RID(8),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
257 	{ DPAA2_DEV_IO,  IO_RID(9),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
258 	{ DPAA2_DEV_IO,  IO_RID(10),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
259 	{ DPAA2_DEV_IO,  IO_RID(11),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
260 	{ DPAA2_DEV_IO,  IO_RID(12),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
261 	{ DPAA2_DEV_IO,  IO_RID(13),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
262 	{ DPAA2_DEV_IO,  IO_RID(14),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
263 	{ DPAA2_DEV_IO,  IO_RID(15),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
264 	/*
265 	 * DPBP resources (buffer pools).
266 	 *
267 	 * NOTE: One per network interface.
268 	 */
269 #define BP_RES_NUM	(1u)
270 #define BP_RID_OFF	(IO_RID_OFF + IO_RES_NUM)
271 #define BP_RID(rid)	((rid) + BP_RID_OFF)
272 	/* --- */
273 	{ DPAA2_DEV_BP,  BP_RID(0),   RF_ACTIVE },
274 	/*
275 	 * DPCON resources (channels).
276 	 *
277 	 * NOTE: One DPCON per core where Rx or Tx confirmation traffic to be
278 	 *	 distributed to.
279 	 * NOTE: Since it is necessary to distinguish between traffic from
280 	 *	 different network interfaces arriving on the same core, the
281 	 *	 DPCONs must be private to the DPNIs.
282 	 */
283 #define CON_RES_NUM	(16u)
284 #define CON_RID_OFF	(BP_RID_OFF + BP_RES_NUM)
285 #define CON_RID(rid)	((rid) + CON_RID_OFF)
286 	/* --- */
287 	{ DPAA2_DEV_CON, CON_RID(0),   RF_ACTIVE },
288 	{ DPAA2_DEV_CON, CON_RID(1),   RF_ACTIVE | RF_OPTIONAL },
289 	{ DPAA2_DEV_CON, CON_RID(2),   RF_ACTIVE | RF_OPTIONAL },
290  	{ DPAA2_DEV_CON, CON_RID(3),   RF_ACTIVE | RF_OPTIONAL },
291  	{ DPAA2_DEV_CON, CON_RID(4),   RF_ACTIVE | RF_OPTIONAL },
292  	{ DPAA2_DEV_CON, CON_RID(5),   RF_ACTIVE | RF_OPTIONAL },
293  	{ DPAA2_DEV_CON, CON_RID(6),   RF_ACTIVE | RF_OPTIONAL },
294  	{ DPAA2_DEV_CON, CON_RID(7),   RF_ACTIVE | RF_OPTIONAL },
295  	{ DPAA2_DEV_CON, CON_RID(8),   RF_ACTIVE | RF_OPTIONAL },
296  	{ DPAA2_DEV_CON, CON_RID(9),   RF_ACTIVE | RF_OPTIONAL },
297  	{ DPAA2_DEV_CON, CON_RID(10),  RF_ACTIVE | RF_OPTIONAL },
298  	{ DPAA2_DEV_CON, CON_RID(11),  RF_ACTIVE | RF_OPTIONAL },
299  	{ DPAA2_DEV_CON, CON_RID(12),  RF_ACTIVE | RF_OPTIONAL },
300  	{ DPAA2_DEV_CON, CON_RID(13),  RF_ACTIVE | RF_OPTIONAL },
301  	{ DPAA2_DEV_CON, CON_RID(14),  RF_ACTIVE | RF_OPTIONAL },
302  	{ DPAA2_DEV_CON, CON_RID(15),  RF_ACTIVE | RF_OPTIONAL },
303 	/* --- */
304 	RESOURCE_SPEC_END
305 };
306 
307 /* Supported header fields for Rx hash distribution key */
308 static const struct dpaa2_eth_dist_fields dist_fields[] = {
309 	{
310 		/* L2 header */
311 		.rxnfc_field = RXH_L2DA,
312 		.cls_prot = NET_PROT_ETH,
313 		.cls_field = NH_FLD_ETH_DA,
314 		.id = DPAA2_ETH_DIST_ETHDST,
315 		.size = 6,
316 	}, {
317 		.cls_prot = NET_PROT_ETH,
318 		.cls_field = NH_FLD_ETH_SA,
319 		.id = DPAA2_ETH_DIST_ETHSRC,
320 		.size = 6,
321 	}, {
322 		/* This is the last ethertype field parsed:
323 		 * depending on frame format, it can be the MAC ethertype
324 		 * or the VLAN etype.
325 		 */
326 		.cls_prot = NET_PROT_ETH,
327 		.cls_field = NH_FLD_ETH_TYPE,
328 		.id = DPAA2_ETH_DIST_ETHTYPE,
329 		.size = 2,
330 	}, {
331 		/* VLAN header */
332 		.rxnfc_field = RXH_VLAN,
333 		.cls_prot = NET_PROT_VLAN,
334 		.cls_field = NH_FLD_VLAN_TCI,
335 		.id = DPAA2_ETH_DIST_VLAN,
336 		.size = 2,
337 	}, {
338 		/* IP header */
339 		.rxnfc_field = RXH_IP_SRC,
340 		.cls_prot = NET_PROT_IP,
341 		.cls_field = NH_FLD_IP_SRC,
342 		.id = DPAA2_ETH_DIST_IPSRC,
343 		.size = 4,
344 	}, {
345 		.rxnfc_field = RXH_IP_DST,
346 		.cls_prot = NET_PROT_IP,
347 		.cls_field = NH_FLD_IP_DST,
348 		.id = DPAA2_ETH_DIST_IPDST,
349 		.size = 4,
350 	}, {
351 		.rxnfc_field = RXH_L3_PROTO,
352 		.cls_prot = NET_PROT_IP,
353 		.cls_field = NH_FLD_IP_PROTO,
354 		.id = DPAA2_ETH_DIST_IPPROTO,
355 		.size = 1,
356 	}, {
357 		/* Using UDP ports, this is functionally equivalent to raw
358 		 * byte pairs from L4 header.
359 		 */
360 		.rxnfc_field = RXH_L4_B_0_1,
361 		.cls_prot = NET_PROT_UDP,
362 		.cls_field = NH_FLD_UDP_PORT_SRC,
363 		.id = DPAA2_ETH_DIST_L4SRC,
364 		.size = 2,
365 	}, {
366 		.rxnfc_field = RXH_L4_B_2_3,
367 		.cls_prot = NET_PROT_UDP,
368 		.cls_field = NH_FLD_UDP_PORT_DST,
369 		.id = DPAA2_ETH_DIST_L4DST,
370 		.size = 2,
371 	},
372 };
373 
374 static struct dpni_stat {
375 	int	 page;
376 	int	 cnt;
377 	char	*name;
378 	char	*desc;
379 } dpni_stat_sysctls[DPAA2_NI_STAT_SYSCTLS] = {
380 	/* PAGE, COUNTER, NAME, DESCRIPTION */
381 	{  0, 0, "in_all_frames",	"All accepted ingress frames" },
382 	{  0, 1, "in_all_bytes",	"Bytes in all accepted ingress frames" },
383 	{  0, 2, "in_multi_frames",	"Multicast accepted ingress frames" },
384 	{  1, 0, "eg_all_frames",	"All egress frames transmitted" },
385 	{  1, 1, "eg_all_bytes",	"Bytes in all frames transmitted" },
386 	{  1, 2, "eg_multi_frames",	"Multicast egress frames transmitted" },
387 	{  2, 0, "in_filtered_frames",	"All ingress frames discarded due to "
388 	   				"filtering" },
389 	{  2, 1, "in_discarded_frames",	"All frames discarded due to errors" },
390 	{  2, 2, "in_nobuf_discards",	"Discards on ingress side due to buffer "
391 	   				"depletion in DPNI buffer pools" },
392 };
393 
394 /* Device interface */
395 static int dpaa2_ni_probe(device_t);
396 static int dpaa2_ni_attach(device_t);
397 static int dpaa2_ni_detach(device_t);
398 
399 /* DPAA2 network interface setup and configuration */
400 static int dpaa2_ni_setup(device_t);
401 static int dpaa2_ni_setup_channels(device_t);
402 static int dpaa2_ni_setup_fq(device_t, struct dpaa2_ni_channel *,
403     enum dpaa2_ni_queue_type);
404 static int dpaa2_ni_bind(device_t);
405 static int dpaa2_ni_setup_rx_dist(device_t);
406 static int dpaa2_ni_setup_irqs(device_t);
407 static int dpaa2_ni_setup_msi(struct dpaa2_ni_softc *);
408 static int dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *);
409 static int dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *);
410 static int dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *);
411 static int dpaa2_ni_setup_dma(struct dpaa2_ni_softc *);
412 
413 /* Tx/Rx flow configuration */
414 static int dpaa2_ni_setup_rx_flow(device_t, struct dpaa2_ni_fq *);
415 static int dpaa2_ni_setup_tx_flow(device_t, struct dpaa2_ni_fq *);
416 static int dpaa2_ni_setup_rx_err_flow(device_t, struct dpaa2_ni_fq *);
417 
418 /* Configuration subroutines */
419 static int dpaa2_ni_set_buf_layout(device_t);
420 static int dpaa2_ni_set_pause_frame(device_t);
421 static int dpaa2_ni_set_qos_table(device_t);
422 static int dpaa2_ni_set_mac_addr(device_t);
423 static int dpaa2_ni_set_hash(device_t, uint64_t);
424 static int dpaa2_ni_set_dist_key(device_t, enum dpaa2_ni_dist_mode, uint64_t);
425 
426 /* Buffers and buffer pools */
427 static int dpaa2_ni_seed_buf_pool(struct dpaa2_ni_softc *, uint32_t);
428 static int dpaa2_ni_seed_rxbuf(struct dpaa2_ni_softc *, struct dpaa2_buf *);
429 static int dpaa2_ni_seed_txbuf(struct dpaa2_ni_softc *, struct dpaa2_buf *);
430 static int dpaa2_ni_seed_chan_storage(struct dpaa2_ni_softc *,
431     struct dpaa2_ni_channel *);
432 
433 /* Frame descriptor routines */
434 static int dpaa2_ni_build_fd(struct dpaa2_ni_softc *, struct dpaa2_ni_tx_ring *,
435     struct dpaa2_buf *, bus_dma_segment_t *, int, struct dpaa2_fd *);
436 static int dpaa2_ni_fd_err(struct dpaa2_fd *);
437 static uint32_t dpaa2_ni_fd_data_len(struct dpaa2_fd *);
438 static int dpaa2_ni_fd_format(struct dpaa2_fd *);
439 static bool dpaa2_ni_fd_short_len(struct dpaa2_fd *);
440 static int dpaa2_ni_fd_offset(struct dpaa2_fd *);
441 
442 /* Various subroutines */
443 static int dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *, uint16_t, uint16_t);
444 static int dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *, uint8_t *);
445 static int dpaa2_ni_chan_storage_next(struct dpaa2_ni_channel *,
446     struct dpaa2_dq **);
447 
448 /* Network interface routines */
449 static void dpaa2_ni_init(void *);
450 static int  dpaa2_ni_transmit(if_t , struct mbuf *);
451 static void dpaa2_ni_qflush(if_t );
452 static int  dpaa2_ni_ioctl(if_t , u_long, caddr_t);
453 static int  dpaa2_ni_update_mac_filters(if_t );
454 static u_int dpaa2_ni_add_maddr(void *, struct sockaddr_dl *, u_int);
455 
456 /* Interrupt handlers */
457 static void dpaa2_ni_intr(void *);
458 
459 /* MII handlers */
460 static void dpaa2_ni_miibus_statchg(device_t);
461 static int  dpaa2_ni_media_change(if_t );
462 static void dpaa2_ni_media_status(if_t , struct ifmediareq *);
463 static void dpaa2_ni_media_tick(void *);
464 
465 /* DMA mapping callback */
466 static void dpaa2_ni_dmamap_cb(void *, bus_dma_segment_t *, int, int);
467 
468 /* Tx/Rx routines. */
469 static void dpaa2_ni_poll(void *);
470 static void dpaa2_ni_tx_locked(struct dpaa2_ni_softc *,
471     struct dpaa2_ni_tx_ring *, struct mbuf *);
472 static void dpaa2_ni_bp_task(void *, int);
473 
474 /* Tx/Rx subroutines */
475 static int  dpaa2_ni_consume_frames(struct dpaa2_ni_channel *,
476     struct dpaa2_ni_fq **, uint32_t *);
477 static int  dpaa2_ni_rx(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *,
478     struct dpaa2_fd *);
479 static int  dpaa2_ni_rx_err(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *,
480     struct dpaa2_fd *);
481 static int  dpaa2_ni_tx_conf(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *,
482     struct dpaa2_fd *);
483 
484 /* sysctl(9) */
485 static int dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS);
486 static int dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS);
487 static int dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS);
488 
489 static int
490 dpaa2_ni_probe(device_t dev)
491 {
492 	/* DPNI device will be added by a parent resource container itself. */
493 	device_set_desc(dev, "DPAA2 Network Interface");
494 	return (BUS_PROBE_DEFAULT);
495 }
496 
497 static int
498 dpaa2_ni_attach(device_t dev)
499 {
500 	device_t pdev = device_get_parent(dev);
501 	device_t child = dev;
502 	device_t mcp_dev;
503 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
504 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
505 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
506 	struct dpaa2_devinfo *mcp_dinfo;
507 	struct dpaa2_cmd cmd;
508 	uint16_t rc_token, ni_token;
509 	if_t ifp;
510 	char tq_name[32];
511 	int error;
512 
513 	sc->dev = dev;
514 	sc->ifp = NULL;
515 	sc->miibus = NULL;
516 	sc->mii = NULL;
517 	sc->media_status = 0;
518 	sc->if_flags = 0;
519 	sc->link_state = LINK_STATE_UNKNOWN;
520 	sc->buf_align = 0;
521 
522 	/* For debug purposes only! */
523 	sc->rx_anomaly_frames = 0;
524 	sc->rx_single_buf_frames = 0;
525 	sc->rx_sg_buf_frames = 0;
526 	sc->rx_enq_rej_frames = 0;
527 	sc->rx_ieoi_err_frames = 0;
528 	sc->tx_single_buf_frames = 0;
529 	sc->tx_sg_frames = 0;
530 
531 	DPAA2_ATOMIC_XCHG(&sc->buf_num, 0);
532 	DPAA2_ATOMIC_XCHG(&sc->buf_free, 0);
533 
534 	sc->bp_dmat = NULL;
535 	sc->st_dmat = NULL;
536 	sc->rxd_dmat = NULL;
537 	sc->qos_dmat = NULL;
538 
539 	sc->qos_kcfg.type = DPAA2_BUF_STORE;
540 	sc->qos_kcfg.store.dmap = NULL;
541 	sc->qos_kcfg.store.paddr = 0;
542 	sc->qos_kcfg.store.vaddr = NULL;
543 
544 	sc->rxd_kcfg.type = DPAA2_BUF_STORE;
545 	sc->rxd_kcfg.store.dmap = NULL;
546 	sc->rxd_kcfg.store.paddr = 0;
547 	sc->rxd_kcfg.store.vaddr = NULL;
548 
549 	sc->mac.dpmac_id = 0;
550 	sc->mac.phy_dev = NULL;
551 	memset(sc->mac.addr, 0, ETHER_ADDR_LEN);
552 
553 	error = bus_alloc_resources(sc->dev, dpaa2_ni_spec, sc->res);
554 	if (error) {
555 		device_printf(dev, "%s: failed to allocate resources: "
556 		    "error=%d\n", __func__, error);
557 		goto err_exit;
558 	}
559 
560 	/* Obtain MC portal. */
561 	mcp_dev = (device_t) rman_get_start(sc->res[MCP_RID(0)]);
562 	mcp_dinfo = device_get_ivars(mcp_dev);
563 	dinfo->portal = mcp_dinfo->portal;
564 
565 	mtx_init(&sc->lock, device_get_nameunit(dev), "dpaa2_ni", MTX_DEF);
566 
567 	/* Allocate network interface */
568 	ifp = if_alloc(IFT_ETHER);
569 	if (ifp == NULL) {
570 		device_printf(dev, "%s: failed to allocate network interface\n",
571 		    __func__);
572 		goto err_exit;
573 	}
574 	sc->ifp = ifp;
575 	if_initname(ifp, DPAA2_NI_IFNAME, device_get_unit(sc->dev));
576 
577 	if_setsoftc(ifp, sc);
578 	if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST);
579 	if_setinitfn(ifp, dpaa2_ni_init);
580 	if_setioctlfn(ifp, dpaa2_ni_ioctl);
581 	if_settransmitfn(ifp, dpaa2_ni_transmit);
582 	if_setqflushfn(ifp, dpaa2_ni_qflush);
583 
584 	if_setcapabilities(ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU);
585 	if_setcapenable(ifp, if_getcapabilities(ifp));
586 
587 	DPAA2_CMD_INIT(&cmd);
588 
589 	/* Open resource container and network interface object. */
590 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
591 	if (error) {
592 		device_printf(dev, "%s: failed to open resource container: "
593 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
594 		goto err_exit;
595 	}
596 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
597 	if (error) {
598 		device_printf(dev, "%s: failed to open network interface: "
599 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
600 		goto close_rc;
601 	}
602 
603 	/*
604 	 * XXX-DSL: Release new buffers on Buffer Pool State Change Notification
605 	 *          (BPSCN) returned as a result to the VDQ command instead.
606 	 *          It is similar to CDAN processed in dpaa2_io_intr().
607 	 */
608 	/* Create a taskqueue thread to release new buffers to the pool. */
609 	TASK_INIT(&sc->bp_task, 0, dpaa2_ni_bp_task, sc);
610 	bzero(tq_name, sizeof (tq_name));
611 	snprintf(tq_name, sizeof (tq_name), "%s_tqbp",
612 	    device_get_nameunit(dev));
613 	sc->bp_taskq = taskqueue_create(tq_name, M_WAITOK,
614 	    taskqueue_thread_enqueue, &sc->bp_taskq);
615 	if (sc->bp_taskq == NULL) {
616 		device_printf(dev, "%s: failed to allocate task queue: %s\n",
617 		    __func__, tq_name);
618 		goto close_ni;
619 	}
620 	taskqueue_start_threads(&sc->bp_taskq, 1, PI_NET, "%s", tq_name);
621 
622 	error = dpaa2_ni_setup(dev);
623 	if (error) {
624 		device_printf(dev, "%s: failed to setup DPNI: error=%d\n",
625 		    __func__, error);
626 		goto close_ni;
627 	}
628 	error = dpaa2_ni_setup_channels(dev);
629 	if (error) {
630 		device_printf(dev, "%s: failed to setup QBMan channels: "
631 		    "error=%d\n", __func__, error);
632 		goto close_ni;
633 	}
634 
635 	error = dpaa2_ni_bind(dev);
636 	if (error) {
637 		device_printf(dev, "%s: failed to bind DPNI: error=%d\n",
638 		    __func__, error);
639 		goto close_ni;
640 	}
641 	error = dpaa2_ni_setup_irqs(dev);
642 	if (error) {
643 		device_printf(dev, "%s: failed to setup IRQs: error=%d\n",
644 		    __func__, error);
645 		goto close_ni;
646 	}
647 	error = dpaa2_ni_setup_sysctls(sc);
648 	if (error) {
649 		device_printf(dev, "%s: failed to setup sysctls: error=%d\n",
650 		    __func__, error);
651 		goto close_ni;
652 	}
653 
654 	ether_ifattach(sc->ifp, sc->mac.addr);
655 	callout_init(&sc->mii_callout, 0);
656 
657 	return (0);
658 
659 close_ni:
660 	DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
661 close_rc:
662 	DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
663 err_exit:
664 	return (ENXIO);
665 }
666 
667 static void
668 dpaa2_ni_fixed_media_status(if_t ifp, struct ifmediareq* ifmr)
669 {
670 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
671 
672 	DPNI_LOCK(sc);
673 	ifmr->ifm_count = 0;
674 	ifmr->ifm_mask = 0;
675 	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
676 	ifmr->ifm_current = ifmr->ifm_active =
677 	    sc->fixed_ifmedia.ifm_cur->ifm_media;
678 
679 	/*
680 	 * In non-PHY usecases, we need to signal link state up, otherwise
681 	 * certain things requiring a link event (e.g async DHCP client) from
682 	 * devd do not happen.
683 	 */
684 	if (if_getlinkstate(ifp) == LINK_STATE_UNKNOWN) {
685 		if_link_state_change(ifp, LINK_STATE_UP);
686 	}
687 
688 	/*
689 	 * TODO: Check the status of the link partner (DPMAC, DPNI or other) and
690 	 * reset if down. This is different to the DPAA2_MAC_LINK_TYPE_PHY as
691 	 * the MC firmware sets the status, instead of us telling the MC what
692 	 * it is.
693 	 */
694 	DPNI_UNLOCK(sc);
695 
696 	return;
697 }
698 
699 static void
700 dpaa2_ni_setup_fixed_link(struct dpaa2_ni_softc *sc)
701 {
702 	/*
703 	 * FIXME: When the DPNI is connected to a DPMAC, we can get the
704 	 * 'apparent' speed from it.
705 	 */
706 	sc->fixed_link = true;
707 
708 	ifmedia_init(&sc->fixed_ifmedia, 0, dpaa2_ni_media_change,
709 		     dpaa2_ni_fixed_media_status);
710 	ifmedia_add(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL);
711 	ifmedia_set(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T);
712 }
713 
714 static int
715 dpaa2_ni_detach(device_t dev)
716 {
717 	/* TBD */
718 	return (0);
719 }
720 
721 /**
722  * @brief Configure DPAA2 network interface object.
723  */
724 static int
725 dpaa2_ni_setup(device_t dev)
726 {
727 	device_t pdev = device_get_parent(dev);
728 	device_t child = dev;
729 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
730 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
731 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
732 	struct dpaa2_ep_desc ep1_desc, ep2_desc; /* endpoint descriptors */
733 	struct dpaa2_cmd cmd;
734 	uint8_t eth_bca[ETHER_ADDR_LEN]; /* broadcast physical address */
735 	uint16_t rc_token, ni_token, mac_token;
736 	struct dpaa2_mac_attr attr;
737 	enum dpaa2_mac_link_type link_type;
738 	uint32_t link;
739 	int error;
740 
741 	DPAA2_CMD_INIT(&cmd);
742 
743 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
744 	if (error) {
745 		device_printf(dev, "%s: failed to open resource container: "
746 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
747 		goto err_exit;
748 	}
749 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
750 	if (error) {
751 		device_printf(dev, "%s: failed to open network interface: "
752 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
753 		goto close_rc;
754 	}
755 
756 	/* Check if we can work with this DPNI object. */
757 	error = DPAA2_CMD_NI_GET_API_VERSION(dev, child, &cmd, &sc->api_major,
758 	    &sc->api_minor);
759 	if (error) {
760 		device_printf(dev, "%s: failed to get DPNI API version\n",
761 		    __func__);
762 		goto close_ni;
763 	}
764 	if (dpaa2_ni_cmp_api_version(sc, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
765 		device_printf(dev, "%s: DPNI API version %u.%u not supported, "
766 		    "need >= %u.%u\n", __func__, sc->api_major, sc->api_minor,
767 		    DPNI_VER_MAJOR, DPNI_VER_MINOR);
768 		error = ENODEV;
769 		goto close_ni;
770 	}
771 
772 	/* Reset the DPNI object. */
773 	error = DPAA2_CMD_NI_RESET(dev, child, &cmd);
774 	if (error) {
775 		device_printf(dev, "%s: failed to reset DPNI: id=%d\n",
776 		    __func__, dinfo->id);
777 		goto close_ni;
778 	}
779 
780 	/* Obtain attributes of the DPNI object. */
781 	error = DPAA2_CMD_NI_GET_ATTRIBUTES(dev, child, &cmd, &sc->attr);
782 	if (error) {
783 		device_printf(dev, "%s: failed to obtain DPNI attributes: "
784 		    "id=%d\n", __func__, dinfo->id);
785 		goto close_ni;
786 	}
787 	if (bootverbose) {
788 		device_printf(dev, "\toptions=0x%#x queues=%d tx_channels=%d "
789 		    "wriop_version=%#x\n", sc->attr.options, sc->attr.num.queues,
790 		    sc->attr.num.channels, sc->attr.wriop_ver);
791 		device_printf(dev, "\ttraffic classes: rx=%d tx=%d "
792 		    "cgs_groups=%d\n", sc->attr.num.rx_tcs, sc->attr.num.tx_tcs,
793 		    sc->attr.num.cgs);
794 		device_printf(dev, "\ttable entries: mac=%d vlan=%d qos=%d "
795 		    "fs=%d\n", sc->attr.entries.mac, sc->attr.entries.vlan,
796 		    sc->attr.entries.qos, sc->attr.entries.fs);
797 		device_printf(dev, "\tkey sizes: qos=%d fs=%d\n",
798 		    sc->attr.key_size.qos, sc->attr.key_size.fs);
799 	}
800 
801 	/* Configure buffer layouts of the DPNI queues. */
802 	error = dpaa2_ni_set_buf_layout(dev);
803 	if (error) {
804 		device_printf(dev, "%s: failed to configure buffer layout\n",
805 		    __func__);
806 		goto close_ni;
807 	}
808 
809 	/* Configure DMA resources. */
810 	error = dpaa2_ni_setup_dma(sc);
811 	if (error) {
812 		device_printf(dev, "%s: failed to setup DMA\n", __func__);
813 		goto close_ni;
814 	}
815 
816 	/* Setup link between DPNI and an object it's connected to. */
817 	ep1_desc.obj_id = dinfo->id;
818 	ep1_desc.if_id = 0; /* DPNI has the only endpoint */
819 	ep1_desc.type = dinfo->dtype;
820 
821 	error = DPAA2_CMD_RC_GET_CONN(dev, child, DPAA2_CMD_TK(&cmd, rc_token),
822 	    &ep1_desc, &ep2_desc, &link);
823 	if (error) {
824 		device_printf(dev, "%s: failed to obtain an object DPNI is "
825 		    "connected to: error=%d\n", __func__, error);
826 	} else {
827 		device_printf(dev, "connected to %s (id=%d)\n",
828 		    dpaa2_ttos(ep2_desc.type), ep2_desc.obj_id);
829 
830 		error = dpaa2_ni_set_mac_addr(dev);
831 		if (error) {
832 			device_printf(dev, "%s: failed to set MAC address: "
833 			    "error=%d\n", __func__, error);
834 		}
835 
836 		if (ep2_desc.type == DPAA2_DEV_MAC) {
837 			/*
838 			 * This is the simplest case when DPNI is connected to
839 			 * DPMAC directly.
840 			 */
841 			sc->mac.dpmac_id = ep2_desc.obj_id;
842 
843 			link_type = DPAA2_MAC_LINK_TYPE_NONE;
844 
845 			/*
846 			 * Need to determine if DPMAC type is PHY (attached to
847 			 * conventional MII PHY) or FIXED (usually SFP/SerDes,
848 			 * link state managed by MC firmware).
849 			 */
850 			error = DPAA2_CMD_MAC_OPEN(sc->dev, child,
851 			    DPAA2_CMD_TK(&cmd, rc_token), sc->mac.dpmac_id,
852 			    &mac_token);
853 			/*
854 			 * Under VFIO, the DPMAC might be sitting in another
855 			 * container (DPRC) we don't have access to.
856 			 * Assume DPAA2_MAC_LINK_TYPE_FIXED if this is
857 			 * the case.
858 			 */
859 			if (error) {
860 				device_printf(dev, "%s: failed to open "
861 				    "connected DPMAC: %d (assuming in other DPRC)\n", __func__,
862 				    sc->mac.dpmac_id);
863 				link_type = DPAA2_MAC_LINK_TYPE_FIXED;
864 			} else {
865 				error = DPAA2_CMD_MAC_GET_ATTRIBUTES(dev, child,
866 				    &cmd, &attr);
867 				if (error) {
868 					device_printf(dev, "%s: failed to get "
869 					    "DPMAC attributes: id=%d, "
870 					    "error=%d\n", __func__, dinfo->id,
871 					    error);
872 				} else {
873 					link_type = attr.link_type;
874 				}
875 			}
876 			DPAA2_CMD_MAC_CLOSE(dev, child, &cmd);
877 
878 			if (link_type == DPAA2_MAC_LINK_TYPE_FIXED) {
879 				device_printf(dev, "connected DPMAC is in FIXED "
880 				    "mode\n");
881 				dpaa2_ni_setup_fixed_link(sc);
882 			} else if (link_type == DPAA2_MAC_LINK_TYPE_PHY) {
883 				device_printf(dev, "connected DPMAC is in PHY "
884 				    "mode\n");
885 				error = DPAA2_MC_GET_PHY_DEV(dev,
886 				    &sc->mac.phy_dev, sc->mac.dpmac_id);
887 				if (error == 0) {
888 					error = MEMAC_MDIO_SET_NI_DEV(
889 					    sc->mac.phy_dev, dev);
890 					if (error != 0) {
891 						device_printf(dev, "%s: failed "
892 						    "to set dpni dev on memac "
893 						    "mdio dev %s: error=%d\n",
894 						    __func__,
895 						    device_get_nameunit(
896 						    sc->mac.phy_dev), error);
897 					}
898 				}
899 				if (error == 0) {
900 					error = MEMAC_MDIO_GET_PHY_LOC(
901 					    sc->mac.phy_dev, &sc->mac.phy_loc);
902 					if (error == ENODEV) {
903 						error = 0;
904 					}
905 					if (error != 0) {
906 						device_printf(dev, "%s: failed "
907 						    "to get phy location from "
908 						    "memac mdio dev %s: error=%d\n",
909 						    __func__, device_get_nameunit(
910 						    sc->mac.phy_dev), error);
911 					}
912 				}
913 				if (error == 0) {
914 					error = mii_attach(sc->mac.phy_dev,
915 					    &sc->miibus, sc->ifp,
916 					    dpaa2_ni_media_change,
917 					    dpaa2_ni_media_status,
918 					    BMSR_DEFCAPMASK, sc->mac.phy_loc,
919 					    MII_OFFSET_ANY, 0);
920 					if (error != 0) {
921 						device_printf(dev, "%s: failed "
922 						    "to attach to miibus: "
923 						    "error=%d\n",
924 						    __func__, error);
925 					}
926 				}
927 				if (error == 0) {
928 					sc->mii = device_get_softc(sc->miibus);
929 				}
930 			} else {
931 				device_printf(dev, "%s: DPMAC link type is not "
932 				    "supported\n", __func__);
933 			}
934 		} else if (ep2_desc.type == DPAA2_DEV_NI ||
935 			   ep2_desc.type == DPAA2_DEV_MUX ||
936 			   ep2_desc.type == DPAA2_DEV_SW) {
937 			dpaa2_ni_setup_fixed_link(sc);
938 		}
939 	}
940 
941 	/* Select mode to enqueue frames. */
942 	/* ... TBD ... */
943 
944 	/*
945 	 * Update link configuration to enable Rx/Tx pause frames support.
946 	 *
947 	 * NOTE: MC may generate an interrupt to the DPMAC and request changes
948 	 *       in link configuration. It might be necessary to attach miibus
949 	 *       and PHY before this point.
950 	 */
951 	error = dpaa2_ni_set_pause_frame(dev);
952 	if (error) {
953 		device_printf(dev, "%s: failed to configure Rx/Tx pause "
954 		    "frames\n", __func__);
955 		goto close_ni;
956 	}
957 
958 	/* Configure ingress traffic classification. */
959 	error = dpaa2_ni_set_qos_table(dev);
960 	if (error) {
961 		device_printf(dev, "%s: failed to configure QoS table: "
962 		    "error=%d\n", __func__, error);
963 		goto close_ni;
964 	}
965 
966 	/* Add broadcast physical address to the MAC filtering table. */
967 	memset(eth_bca, 0xff, ETHER_ADDR_LEN);
968 	error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, DPAA2_CMD_TK(&cmd,
969 	    ni_token), eth_bca);
970 	if (error) {
971 		device_printf(dev, "%s: failed to add broadcast physical "
972 		    "address to the MAC filtering table\n", __func__);
973 		goto close_ni;
974 	}
975 
976 	/* Set the maximum allowed length for received frames. */
977 	error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd, DPAA2_ETH_MFL);
978 	if (error) {
979 		device_printf(dev, "%s: failed to set maximum length for "
980 		    "received frames\n", __func__);
981 		goto close_ni;
982 	}
983 
984 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
985 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
986 	return (0);
987 
988 close_ni:
989 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
990 close_rc:
991 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
992 err_exit:
993 	return (error);
994 }
995 
996 /**
997  * @brief Сonfigure QBMan channels and register data availability notifications.
998  */
999 static int
1000 dpaa2_ni_setup_channels(device_t dev)
1001 {
1002 	device_t pdev = device_get_parent(dev);
1003 	device_t child = dev;
1004 	device_t io_dev, con_dev;
1005 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1006 	struct dpaa2_ni_channel *channel;
1007 	struct dpaa2_con_softc *consc;
1008 	struct dpaa2_con_notif_cfg notif_cfg;
1009 	struct dpaa2_devinfo *rc_info = device_get_ivars(pdev);
1010 	struct dpaa2_devinfo *io_info;
1011 	struct dpaa2_devinfo *con_info;
1012 	struct dpaa2_io_notif_ctx *ctx;
1013 	struct dpaa2_buf *buf;
1014 	struct dpaa2_cmd cmd;
1015 	struct sysctl_ctx_list *sysctl_ctx;
1016 	struct sysctl_oid *node;
1017 	struct sysctl_oid_list *parent;
1018 	uint32_t i, num_chan;
1019 	uint16_t rc_token, con_token;
1020 	int error;
1021 
1022 	/* Calculate number of the channels based on the allocated resources. */
1023 	for (i = 0; i < IO_RES_NUM; i++) {
1024 		if (!sc->res[IO_RID(i)]) {
1025 			break;
1026 		}
1027 	}
1028 	num_chan = i;
1029 	for (i = 0; i < CON_RES_NUM; i++) {
1030 		if (!sc->res[CON_RID(i)]) {
1031 			break;
1032 		}
1033 	}
1034 	num_chan = i < num_chan ? i : num_chan;
1035 	sc->chan_n = num_chan > DPAA2_NI_MAX_CHANNELS
1036 	    ? DPAA2_NI_MAX_CHANNELS : num_chan;
1037 	sc->chan_n = sc->chan_n > sc->attr.num.queues
1038 	    ? sc->attr.num.queues : sc->chan_n;
1039 
1040 	device_printf(dev, "channels=%d\n", sc->chan_n);
1041 
1042 	sysctl_ctx = device_get_sysctl_ctx(sc->dev);
1043 	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1044 	node = SYSCTL_ADD_NODE(sysctl_ctx, parent, OID_AUTO, "channels",
1045 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channels");
1046 	parent = SYSCTL_CHILDREN(node);
1047 
1048 	/* Setup channels for the portal. */
1049 	for (uint32_t i = 0; i < sc->chan_n; i++) {
1050 		io_dev = (device_t) rman_get_start(sc->res[IO_RID(i)]);
1051 		io_info = device_get_ivars(io_dev);
1052 
1053 		con_dev = (device_t) rman_get_start(sc->res[CON_RID(i)]);
1054 		consc = device_get_softc(con_dev);
1055 		con_info = device_get_ivars(con_dev);
1056 
1057 		DPAA2_CMD_INIT(&cmd);
1058 
1059 		error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rc_info->id,
1060 		    &rc_token);
1061 		if (error) {
1062 			device_printf(dev, "%s: failed to open resource "
1063 			    "container: id=%d, error=%d\n", __func__,
1064 			    rc_info->id, error);
1065 			return (error);
1066 		}
1067 		error = DPAA2_CMD_CON_OPEN(dev, child, &cmd, con_info->id,
1068 		    &con_token);
1069 		if (error) {
1070 			device_printf(dev, "%s: failed to open DPCON: id=%d, "
1071 			    "error=%d\n", __func__, con_info->id, error);
1072 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
1073 			    rc_token));
1074 			return (error);
1075 		}
1076 
1077 		error = DPAA2_CMD_CON_ENABLE(dev, child, &cmd);
1078 		if (error) {
1079 			device_printf(dev, "%s: failed to enable channel: "
1080 			    "dpcon_id=%d, chan_id=%d\n", __func__, con_info->id,
1081 			    consc->attr.chan_id);
1082 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
1083 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
1084 			    rc_token));
1085 			return (error);
1086 		}
1087 
1088 		channel = malloc(sizeof(struct dpaa2_ni_channel), M_DPAA2_NI,
1089 		    M_WAITOK | M_ZERO);
1090 		if (!channel) {
1091 			device_printf(dev, "%s: failed to allocate a channel\n",
1092 			    __func__);
1093 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
1094 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
1095 			    rc_token));
1096 			return (ENOMEM);
1097 		}
1098 
1099 		sc->channels[i] = channel;
1100 
1101 		channel->id = consc->attr.chan_id;
1102 		channel->flowid = i;
1103 		channel->ni_dev = dev;
1104 		channel->io_dev = io_dev;
1105 		channel->con_dev = con_dev;
1106 		channel->recycled_n = 0;
1107 		channel->tx_frames = 0; /* for debug purposes */
1108 		channel->tx_dropped = 0; /* for debug purposes */
1109 		channel->rxq_n = 0;
1110 
1111 		buf = &channel->store;
1112 		buf->type = DPAA2_BUF_STORE;
1113 		buf->store.dmat = NULL;
1114 		buf->store.dmap = NULL;
1115 		buf->store.paddr = 0;
1116 		buf->store.vaddr = NULL;
1117 
1118 		/* Setup WQ channel notification context. */
1119 		ctx = &channel->ctx;
1120 		ctx->qman_ctx = (uint64_t) ctx;
1121 		ctx->cdan_en = true;
1122 		ctx->fq_chan_id = channel->id;
1123 		ctx->io_dev = channel->io_dev;
1124 		ctx->channel = channel;
1125 		ctx->poll = dpaa2_ni_poll;
1126 
1127 		/* Register the new notification context. */
1128 		error = DPAA2_SWP_CONF_WQ_CHANNEL(channel->io_dev, ctx);
1129 		if (error) {
1130 			device_printf(dev, "%s: failed to register notification "
1131 			    "context\n", __func__);
1132 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
1133 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
1134 			    rc_token));
1135 			return (error);
1136 		}
1137 
1138 		/* Register DPCON notification with Management Complex. */
1139 		notif_cfg.dpio_id = io_info->id;
1140 		notif_cfg.prior = 0;
1141 		notif_cfg.qman_ctx = ctx->qman_ctx;
1142 		error = DPAA2_CMD_CON_SET_NOTIF(dev, child, &cmd, &notif_cfg);
1143 		if (error) {
1144 			device_printf(dev, "%s: failed to set DPCON "
1145 			    "notification: dpcon_id=%d, chan_id=%d\n", __func__,
1146 			    con_info->id, consc->attr.chan_id);
1147 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
1148 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
1149 			    rc_token));
1150 			return (error);
1151 		}
1152 
1153 		/* Allocate initial # of Rx buffers and a channel storage. */
1154 		error = dpaa2_ni_seed_buf_pool(sc, DPAA2_NI_BUFS_INIT);
1155 		if (error) {
1156 			device_printf(dev, "%s: failed to seed buffer pool\n",
1157 			    __func__);
1158 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
1159 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
1160 			    rc_token));
1161 			return (error);
1162 		}
1163 		error = dpaa2_ni_seed_chan_storage(sc, channel);
1164 		if (error) {
1165 			device_printf(dev, "%s: failed to seed channel "
1166 			    "storage\n", __func__);
1167 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
1168 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
1169 			    rc_token));
1170 			return (error);
1171 		}
1172 
1173 		/* Prepare queues for this channel. */
1174 		error = dpaa2_ni_setup_fq(dev, channel, DPAA2_NI_QUEUE_TX_CONF);
1175 		if (error) {
1176 			device_printf(dev, "%s: failed to prepare TxConf "
1177 			    "queue: error=%d\n", __func__, error);
1178 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
1179 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
1180 			    rc_token));
1181 			return (error);
1182 		}
1183 		error = dpaa2_ni_setup_fq(dev, channel, DPAA2_NI_QUEUE_RX);
1184 		if (error) {
1185 			device_printf(dev, "%s: failed to prepare Rx queue: "
1186 			    "error=%d\n", __func__, error);
1187 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
1188 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
1189 			    rc_token));
1190 			return (error);
1191 		}
1192 
1193 		if (bootverbose) {
1194 			device_printf(dev, "channel: dpio_id=%d "
1195 			    "dpcon_id=%d chan_id=%d, priorities=%d\n",
1196 			    io_info->id, con_info->id, channel->id,
1197 			    consc->attr.prior_num);
1198 		}
1199 
1200 		(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
1201 		(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
1202 		    rc_token));
1203 	}
1204 
1205 	/* There is exactly one Rx error queue per DPNI. */
1206 	error = dpaa2_ni_setup_fq(dev, sc->channels[0], DPAA2_NI_QUEUE_RX_ERR);
1207 	if (error) {
1208 		device_printf(dev, "%s: failed to prepare RxError queue: "
1209 		    "error=%d\n", __func__, error);
1210 		return (error);
1211 	}
1212 
1213 	return (0);
1214 }
1215 
1216 /**
1217  * @brief Performs an initial configuration of the frame queues.
1218  */
1219 static int
1220 dpaa2_ni_setup_fq(device_t dev, struct dpaa2_ni_channel *chan,
1221     enum dpaa2_ni_queue_type queue_type)
1222 {
1223 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1224 	struct dpaa2_ni_fq *fq;
1225 
1226 	switch (queue_type) {
1227 	case DPAA2_NI_QUEUE_TX_CONF:
1228 		/* One queue per channel. */
1229 		fq = &chan->txc_queue;
1230 
1231 		fq->consume = dpaa2_ni_tx_conf;
1232 		fq->chan = chan;
1233 		fq->flowid = chan->flowid;
1234 		fq->tc = 0; /* ignored */
1235 		fq->type = queue_type;
1236 
1237 		break;
1238 	case DPAA2_NI_QUEUE_RX:
1239 		KASSERT(sc->attr.num.rx_tcs <= DPAA2_NI_MAX_TCS,
1240 		    ("too many Rx traffic classes: rx_tcs=%d\n",
1241 		    sc->attr.num.rx_tcs));
1242 
1243 		/* One queue per Rx traffic class within a channel. */
1244 		for (int i = 0; i < sc->attr.num.rx_tcs; i++) {
1245 			fq = &chan->rx_queues[i];
1246 
1247 			fq->consume = dpaa2_ni_rx;
1248 			fq->chan = chan;
1249 			fq->flowid = chan->flowid;
1250 			fq->tc = (uint8_t) i;
1251 			fq->type = queue_type;
1252 
1253 			chan->rxq_n++;
1254 		}
1255 		break;
1256 	case DPAA2_NI_QUEUE_RX_ERR:
1257 		/* One queue per network interface. */
1258 		fq = &sc->rxe_queue;
1259 
1260 		fq->consume = dpaa2_ni_rx_err;
1261 		fq->chan = chan;
1262 		fq->flowid = 0; /* ignored */
1263 		fq->tc = 0; /* ignored */
1264 		fq->type = queue_type;
1265 		break;
1266 	default:
1267 		device_printf(dev, "%s: unexpected frame queue type: %d\n",
1268 		    __func__, queue_type);
1269 		return (EINVAL);
1270 	}
1271 
1272 	return (0);
1273 }
1274 
1275 /**
1276  * @brief Bind DPNI to DPBPs, DPIOs, frame queues and channels.
1277  */
1278 static int
1279 dpaa2_ni_bind(device_t dev)
1280 {
1281 	device_t pdev = device_get_parent(dev);
1282 	device_t child = dev;
1283 	device_t bp_dev;
1284 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1285 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1286 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1287 	struct dpaa2_devinfo *bp_info;
1288 	struct dpaa2_cmd cmd;
1289 	struct dpaa2_ni_pools_cfg pools_cfg;
1290 	struct dpaa2_ni_err_cfg err_cfg;
1291 	struct dpaa2_ni_channel *chan;
1292 	uint16_t rc_token, ni_token;
1293 	int error;
1294 
1295 	DPAA2_CMD_INIT(&cmd);
1296 
1297 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1298 	if (error) {
1299 		device_printf(dev, "%s: failed to open resource container: "
1300 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1301 		goto err_exit;
1302 	}
1303 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1304 	if (error) {
1305 		device_printf(dev, "%s: failed to open network interface: "
1306 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1307 		goto close_rc;
1308 	}
1309 
1310 	/* Select buffer pool (only one available at the moment). */
1311 	bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
1312 	bp_info = device_get_ivars(bp_dev);
1313 
1314 	/* Configure buffers pool. */
1315 	pools_cfg.pools_num = 1;
1316 	pools_cfg.pools[0].bp_obj_id = bp_info->id;
1317 	pools_cfg.pools[0].backup_flag = 0;
1318 	pools_cfg.pools[0].buf_sz = sc->buf_sz;
1319 	error = DPAA2_CMD_NI_SET_POOLS(dev, child, &cmd, &pools_cfg);
1320 	if (error) {
1321 		device_printf(dev, "%s: failed to set buffer pools\n", __func__);
1322 		goto close_ni;
1323 	}
1324 
1325 	/* Setup ingress traffic distribution. */
1326 	error = dpaa2_ni_setup_rx_dist(dev);
1327 	if (error && error != EOPNOTSUPP) {
1328 		device_printf(dev, "%s: failed to setup ingress traffic "
1329 		    "distribution\n", __func__);
1330 		goto close_ni;
1331 	}
1332 	if (bootverbose && error == EOPNOTSUPP) {
1333 		device_printf(dev, "Ingress traffic distribution not "
1334 		    "supported\n");
1335 	}
1336 
1337 	/* Configure handling of error frames. */
1338 	err_cfg.err_mask = DPAA2_NI_FAS_RX_ERR_MASK;
1339 	err_cfg.set_err_fas = false;
1340 	err_cfg.action = DPAA2_NI_ERR_DISCARD;
1341 	error = DPAA2_CMD_NI_SET_ERR_BEHAVIOR(dev, child, &cmd, &err_cfg);
1342 	if (error) {
1343 		device_printf(dev, "%s: failed to set errors behavior\n",
1344 		    __func__);
1345 		goto close_ni;
1346 	}
1347 
1348 	/* Configure channel queues to generate CDANs. */
1349 	for (uint32_t i = 0; i < sc->chan_n; i++) {
1350 		chan = sc->channels[i];
1351 
1352 		/* Setup Rx flows. */
1353 		for (uint32_t j = 0; j < chan->rxq_n; j++) {
1354 			error = dpaa2_ni_setup_rx_flow(dev, &chan->rx_queues[j]);
1355 			if (error) {
1356 				device_printf(dev, "%s: failed to setup Rx "
1357 				    "flow: error=%d\n", __func__, error);
1358 				goto close_ni;
1359 			}
1360 		}
1361 
1362 		/* Setup Tx flow. */
1363 		error = dpaa2_ni_setup_tx_flow(dev, &chan->txc_queue);
1364 		if (error) {
1365 			device_printf(dev, "%s: failed to setup Tx "
1366 			    "flow: error=%d\n", __func__, error);
1367 			goto close_ni;
1368 		}
1369 	}
1370 
1371 	/* Configure RxError queue to generate CDAN. */
1372 	error = dpaa2_ni_setup_rx_err_flow(dev, &sc->rxe_queue);
1373 	if (error) {
1374 		device_printf(dev, "%s: failed to setup RxError flow: "
1375 		    "error=%d\n", __func__, error);
1376 		goto close_ni;
1377 	}
1378 
1379 	/*
1380 	 * Get the Queuing Destination ID (QDID) that should be used for frame
1381 	 * enqueue operations.
1382 	 */
1383 	error = DPAA2_CMD_NI_GET_QDID(dev, child, &cmd, DPAA2_NI_QUEUE_TX,
1384 	    &sc->tx_qdid);
1385 	if (error) {
1386 		device_printf(dev, "%s: failed to get Tx queuing destination "
1387 		    "ID\n", __func__);
1388 		goto close_ni;
1389 	}
1390 
1391 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1392 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1393 	return (0);
1394 
1395 close_ni:
1396 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1397 close_rc:
1398 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1399 err_exit:
1400 	return (error);
1401 }
1402 
1403 /**
1404  * @brief Setup ingress traffic distribution.
1405  *
1406  * NOTE: Ingress traffic distribution is valid only when DPNI_OPT_NO_FS option
1407  *	 hasn't been set for DPNI and a number of DPNI queues > 1.
1408  */
1409 static int
1410 dpaa2_ni_setup_rx_dist(device_t dev)
1411 {
1412 	/*
1413 	 * Have the interface implicitly distribute traffic based on the default
1414 	 * hash key.
1415 	 */
1416 	return (dpaa2_ni_set_hash(dev, DPAA2_RXH_DEFAULT));
1417 }
1418 
1419 static int
1420 dpaa2_ni_setup_rx_flow(device_t dev, struct dpaa2_ni_fq *fq)
1421 {
1422 	device_t pdev = device_get_parent(dev);
1423 	device_t child = dev;
1424 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1425 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1426 	struct dpaa2_devinfo *con_info;
1427 	struct dpaa2_cmd cmd;
1428 	struct dpaa2_ni_queue_cfg queue_cfg = {0};
1429 	uint16_t rc_token, ni_token;
1430 	int error;
1431 
1432 	DPAA2_CMD_INIT(&cmd);
1433 
1434 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1435 	if (error) {
1436 		device_printf(dev, "%s: failed to open resource container: "
1437 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1438 		goto err_exit;
1439 	}
1440 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1441 	if (error) {
1442 		device_printf(dev, "%s: failed to open network interface: "
1443 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1444 		goto close_rc;
1445 	}
1446 
1447 	/* Obtain DPCON associated with the FQ's channel. */
1448 	con_info = device_get_ivars(fq->chan->con_dev);
1449 
1450 	queue_cfg.type = DPAA2_NI_QUEUE_RX;
1451 	queue_cfg.tc = fq->tc;
1452 	queue_cfg.idx = fq->flowid;
1453 	error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1454 	if (error) {
1455 		device_printf(dev, "%s: failed to obtain Rx queue "
1456 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1457 		    queue_cfg.idx);
1458 		goto close_ni;
1459 	}
1460 
1461 	fq->fqid = queue_cfg.fqid;
1462 
1463 	queue_cfg.dest_id = con_info->id;
1464 	queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1465 	queue_cfg.priority = 1;
1466 	queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1467 	queue_cfg.options =
1468 	    DPAA2_NI_QUEUE_OPT_USER_CTX |
1469 	    DPAA2_NI_QUEUE_OPT_DEST;
1470 	error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
1471 	if (error) {
1472 		device_printf(dev, "%s: failed to update Rx queue "
1473 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1474 		    queue_cfg.idx);
1475 		goto close_ni;
1476 	}
1477 
1478 	if (bootverbose) {
1479 		device_printf(dev, "RX queue idx=%d, tc=%d, chan=%d, fqid=%d, "
1480 		    "user_ctx=%#jx\n", fq->flowid, fq->tc, fq->chan->id,
1481 		    fq->fqid, (uint64_t) fq);
1482 	}
1483 
1484 	(void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
1485 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1486 	return (0);
1487 
1488 close_ni:
1489 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1490 close_rc:
1491 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1492 err_exit:
1493 	return (error);
1494 }
1495 
1496 static int
1497 dpaa2_ni_setup_tx_flow(device_t dev, struct dpaa2_ni_fq *fq)
1498 {
1499 	device_t pdev = device_get_parent(dev);
1500 	device_t child = dev;
1501 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1502 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1503 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1504 	struct dpaa2_devinfo *con_info;
1505 	struct dpaa2_ni_queue_cfg queue_cfg = {0};
1506 	struct dpaa2_ni_tx_ring *tx;
1507 	struct dpaa2_buf *buf;
1508 	struct dpaa2_cmd cmd;
1509 	uint32_t tx_rings_n = 0;
1510 	uint16_t rc_token, ni_token;
1511 	int error;
1512 
1513 	DPAA2_CMD_INIT(&cmd);
1514 
1515 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1516 	if (error) {
1517 		device_printf(dev, "%s: failed to open resource container: "
1518 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1519 		goto err_exit;
1520 	}
1521 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1522 	if (error) {
1523 		device_printf(dev, "%s: failed to open network interface: "
1524 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1525 		goto close_rc;
1526 	}
1527 
1528 	/* Obtain DPCON associated with the FQ's channel. */
1529 	con_info = device_get_ivars(fq->chan->con_dev);
1530 
1531 	KASSERT(sc->attr.num.tx_tcs <= DPAA2_NI_MAX_TCS,
1532 	    ("%s: too many Tx traffic classes: tx_tcs=%d\n", __func__,
1533 	    sc->attr.num.tx_tcs));
1534 	KASSERT(DPAA2_NI_BUFS_PER_TX <= DPAA2_NI_MAX_BPTX,
1535 	    ("%s: too many Tx buffers (%d): max=%d\n", __func__,
1536 	    DPAA2_NI_BUFS_PER_TX, DPAA2_NI_MAX_BPTX));
1537 
1538 	/* Setup Tx rings. */
1539 	for (int i = 0; i < sc->attr.num.tx_tcs; i++) {
1540 		queue_cfg.type = DPAA2_NI_QUEUE_TX;
1541 		queue_cfg.tc = i;
1542 		queue_cfg.idx = fq->flowid;
1543 		queue_cfg.chan_id = fq->chan->id;
1544 
1545 		error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1546 		if (error) {
1547 			device_printf(dev, "%s: failed to obtain Tx queue "
1548 			    "configuration: tc=%d, flowid=%d\n", __func__,
1549 			    queue_cfg.tc, queue_cfg.idx);
1550 			goto close_ni;
1551 		}
1552 
1553 		tx = &fq->tx_rings[i];
1554 		tx->fq = fq;
1555 		tx->fqid = queue_cfg.fqid;
1556 		tx->txid = tx_rings_n;
1557 
1558 		if (bootverbose) {
1559 			device_printf(dev, "TX queue idx=%d, tc=%d, chan=%d, "
1560 			    "fqid=%d\n", fq->flowid, i, fq->chan->id,
1561 			    queue_cfg.fqid);
1562 		}
1563 
1564 		mtx_init(&tx->lock, "dpaa2_tx_ring", NULL, MTX_DEF);
1565 
1566 		/* Allocate Tx ring buffer. */
1567 		tx->idx_br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF,
1568 		    M_NOWAIT, &tx->lock);
1569 		if (tx->idx_br == NULL) {
1570 			device_printf(dev, "%s: failed to setup Tx ring buffer"
1571 			    " (2) fqid=%d\n", __func__, tx->fqid);
1572 			goto close_ni;
1573 		}
1574 
1575 		/* Configure Tx buffers. */
1576 		for (uint64_t j = 0; j < DPAA2_NI_BUFS_PER_TX; j++) {
1577 			buf = &tx->buf[j];
1578 			buf->type = DPAA2_BUF_TX;
1579 			buf->tx.dmat = buf->tx.sgt_dmat = NULL;
1580 			buf->tx.dmap = buf->tx.sgt_dmap = NULL;
1581 			buf->tx.paddr = buf->tx.sgt_paddr = 0;
1582 			buf->tx.vaddr = buf->tx.sgt_vaddr = NULL;
1583 			buf->tx.m = NULL;
1584 			buf->tx.idx = j;
1585 
1586 			error = dpaa2_ni_seed_txbuf(sc, buf);
1587 
1588 			/* Add index of the Tx buffer to the ring. */
1589 			buf_ring_enqueue(tx->idx_br, (void *) j);
1590 		}
1591 
1592 		tx_rings_n++;
1593 	}
1594 
1595 	/* All Tx queues which belong to the same flowid have the same qdbin. */
1596 	fq->tx_qdbin = queue_cfg.qdbin;
1597 
1598 	queue_cfg.type = DPAA2_NI_QUEUE_TX_CONF;
1599 	queue_cfg.tc = 0; /* ignored for TxConf queue */
1600 	queue_cfg.idx = fq->flowid;
1601 	error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1602 	if (error) {
1603 		device_printf(dev, "%s: failed to obtain TxConf queue "
1604 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1605 		    queue_cfg.idx);
1606 		goto close_ni;
1607 	}
1608 
1609 	fq->fqid = queue_cfg.fqid;
1610 
1611 	queue_cfg.dest_id = con_info->id;
1612 	queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1613 	queue_cfg.priority = 0;
1614 	queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1615 	queue_cfg.options =
1616 	    DPAA2_NI_QUEUE_OPT_USER_CTX |
1617 	    DPAA2_NI_QUEUE_OPT_DEST;
1618 	error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
1619 	if (error) {
1620 		device_printf(dev, "%s: failed to update TxConf queue "
1621 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1622 		    queue_cfg.idx);
1623 		goto close_ni;
1624 	}
1625 
1626 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1627 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1628 	return (0);
1629 
1630 close_ni:
1631 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1632 close_rc:
1633 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1634 err_exit:
1635 	return (error);
1636 }
1637 
1638 static int
1639 dpaa2_ni_setup_rx_err_flow(device_t dev, struct dpaa2_ni_fq *fq)
1640 {
1641 	device_t pdev = device_get_parent(dev);
1642 	device_t child = dev;
1643 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1644 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1645 	struct dpaa2_devinfo *con_info;
1646 	struct dpaa2_ni_queue_cfg queue_cfg = {0};
1647 	struct dpaa2_cmd cmd;
1648 	uint16_t rc_token, ni_token;
1649 	int error;
1650 
1651 	DPAA2_CMD_INIT(&cmd);
1652 
1653 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1654 	if (error) {
1655 		device_printf(dev, "%s: failed to open resource container: "
1656 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1657 		goto err_exit;
1658 	}
1659 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1660 	if (error) {
1661 		device_printf(dev, "%s: failed to open network interface: "
1662 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1663 		goto close_rc;
1664 	}
1665 
1666 	/* Obtain DPCON associated with the FQ's channel. */
1667 	con_info = device_get_ivars(fq->chan->con_dev);
1668 
1669 	queue_cfg.type = DPAA2_NI_QUEUE_RX_ERR;
1670 	queue_cfg.tc = fq->tc; /* ignored */
1671 	queue_cfg.idx = fq->flowid; /* ignored */
1672 	error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1673 	if (error) {
1674 		device_printf(dev, "%s: failed to obtain RxErr queue "
1675 		    "configuration\n", __func__);
1676 		goto close_ni;
1677 	}
1678 
1679 	fq->fqid = queue_cfg.fqid;
1680 
1681 	queue_cfg.dest_id = con_info->id;
1682 	queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1683 	queue_cfg.priority = 1;
1684 	queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1685 	queue_cfg.options =
1686 	    DPAA2_NI_QUEUE_OPT_USER_CTX |
1687 	    DPAA2_NI_QUEUE_OPT_DEST;
1688 	error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
1689 	if (error) {
1690 		device_printf(dev, "%s: failed to update RxErr queue "
1691 		    "configuration\n", __func__);
1692 		goto close_ni;
1693 	}
1694 
1695 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1696 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1697 	return (0);
1698 
1699 close_ni:
1700 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1701 close_rc:
1702 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1703 err_exit:
1704 	return (error);
1705 }
1706 
1707 /**
1708  * @brief Configure DPNI object to generate interrupts.
1709  */
1710 static int
1711 dpaa2_ni_setup_irqs(device_t dev)
1712 {
1713 	device_t pdev = device_get_parent(dev);
1714 	device_t child = dev;
1715 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1716 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1717 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1718 	struct dpaa2_cmd cmd;
1719 	uint16_t rc_token, ni_token;
1720 	int error;
1721 
1722 	DPAA2_CMD_INIT(&cmd);
1723 
1724 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1725 	if (error) {
1726 		device_printf(dev, "%s: failed to open resource container: "
1727 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1728 		goto err_exit;
1729 	}
1730 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1731 	if (error) {
1732 		device_printf(dev, "%s: failed to open network interface: "
1733 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1734 		goto close_rc;
1735 	}
1736 
1737 	/* Configure IRQs. */
1738 	error = dpaa2_ni_setup_msi(sc);
1739 	if (error) {
1740 		device_printf(dev, "%s: failed to allocate MSI\n", __func__);
1741 		goto close_ni;
1742 	}
1743 	if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1744 	    &sc->irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) {
1745 		device_printf(dev, "%s: failed to allocate IRQ resource\n",
1746 		    __func__);
1747 		goto close_ni;
1748 	}
1749 	if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
1750 	    NULL, dpaa2_ni_intr, sc, &sc->intr)) {
1751 		device_printf(dev, "%s: failed to setup IRQ resource\n",
1752 		    __func__);
1753 		goto close_ni;
1754 	}
1755 
1756 	error = DPAA2_CMD_NI_SET_IRQ_MASK(dev, child, &cmd, DPNI_IRQ_INDEX,
1757 	    DPNI_IRQ_LINK_CHANGED | DPNI_IRQ_EP_CHANGED);
1758 	if (error) {
1759 		device_printf(dev, "%s: failed to set DPNI IRQ mask\n",
1760 		    __func__);
1761 		goto close_ni;
1762 	}
1763 
1764 	error = DPAA2_CMD_NI_SET_IRQ_ENABLE(dev, child, &cmd, DPNI_IRQ_INDEX,
1765 	    true);
1766 	if (error) {
1767 		device_printf(dev, "%s: failed to enable DPNI IRQ\n", __func__);
1768 		goto close_ni;
1769 	}
1770 
1771 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1772 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1773 	return (0);
1774 
1775 close_ni:
1776 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1777 close_rc:
1778 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1779 err_exit:
1780 	return (error);
1781 }
1782 
1783 /**
1784  * @brief Allocate MSI interrupts for DPNI.
1785  */
1786 static int
1787 dpaa2_ni_setup_msi(struct dpaa2_ni_softc *sc)
1788 {
1789 	int val;
1790 
1791 	val = pci_msi_count(sc->dev);
1792 	if (val < DPAA2_NI_MSI_COUNT)
1793 		device_printf(sc->dev, "MSI: actual=%d, expected=%d\n", val,
1794 		    DPAA2_IO_MSI_COUNT);
1795 	val = MIN(val, DPAA2_NI_MSI_COUNT);
1796 
1797 	if (pci_alloc_msi(sc->dev, &val) != 0)
1798 		return (EINVAL);
1799 
1800 	for (int i = 0; i < val; i++)
1801 		sc->irq_rid[i] = i + 1;
1802 
1803 	return (0);
1804 }
1805 
1806 /**
1807  * @brief Update DPNI according to the updated interface capabilities.
1808  */
1809 static int
1810 dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *sc)
1811 {
1812 	const bool en_rxcsum = if_getcapenable(sc->ifp) & IFCAP_RXCSUM;
1813 	const bool en_txcsum = if_getcapenable(sc->ifp) & IFCAP_TXCSUM;
1814 	device_t pdev = device_get_parent(sc->dev);
1815 	device_t dev = sc->dev;
1816 	device_t child = dev;
1817 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1818 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1819 	struct dpaa2_cmd cmd;
1820 	uint16_t rc_token, ni_token;
1821 	int error;
1822 
1823 	DPAA2_CMD_INIT(&cmd);
1824 
1825 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1826 	if (error) {
1827 		device_printf(dev, "%s: failed to open resource container: "
1828 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1829 		goto err_exit;
1830 	}
1831 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1832 	if (error) {
1833 		device_printf(dev, "%s: failed to open network interface: "
1834 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1835 		goto close_rc;
1836 	}
1837 
1838 	/* Setup checksums validation. */
1839 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1840 	    DPAA2_NI_OFL_RX_L3_CSUM, en_rxcsum);
1841 	if (error) {
1842 		device_printf(dev, "%s: failed to %s L3 checksum validation\n",
1843 		    __func__, en_rxcsum ? "enable" : "disable");
1844 		goto close_ni;
1845 	}
1846 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1847 	    DPAA2_NI_OFL_RX_L4_CSUM, en_rxcsum);
1848 	if (error) {
1849 		device_printf(dev, "%s: failed to %s L4 checksum validation\n",
1850 		    __func__, en_rxcsum ? "enable" : "disable");
1851 		goto close_ni;
1852 	}
1853 
1854 	/* Setup checksums generation. */
1855 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1856 	    DPAA2_NI_OFL_TX_L3_CSUM, en_txcsum);
1857 	if (error) {
1858 		device_printf(dev, "%s: failed to %s L3 checksum generation\n",
1859 		    __func__, en_txcsum ? "enable" : "disable");
1860 		goto close_ni;
1861 	}
1862 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1863 	    DPAA2_NI_OFL_TX_L4_CSUM, en_txcsum);
1864 	if (error) {
1865 		device_printf(dev, "%s: failed to %s L4 checksum generation\n",
1866 		    __func__, en_txcsum ? "enable" : "disable");
1867 		goto close_ni;
1868 	}
1869 
1870 	(void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
1871 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1872 	return (0);
1873 
1874 close_ni:
1875 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1876 close_rc:
1877 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1878 err_exit:
1879 	return (error);
1880 }
1881 
1882 /**
1883  * @brief Update DPNI according to the updated interface flags.
1884  */
1885 static int
1886 dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *sc)
1887 {
1888 	const bool en_promisc = if_getflags(sc->ifp) & IFF_PROMISC;
1889 	const bool en_allmulti = if_getflags(sc->ifp) & IFF_ALLMULTI;
1890 	device_t pdev = device_get_parent(sc->dev);
1891 	device_t dev = sc->dev;
1892 	device_t child = dev;
1893 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1894 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1895 	struct dpaa2_cmd cmd;
1896 	uint16_t rc_token, ni_token;
1897 	int error;
1898 
1899 	DPAA2_CMD_INIT(&cmd);
1900 
1901 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1902 	if (error) {
1903 		device_printf(dev, "%s: failed to open resource container: "
1904 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1905 		goto err_exit;
1906 	}
1907 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1908 	if (error) {
1909 		device_printf(dev, "%s: failed to open network interface: "
1910 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1911 		goto close_rc;
1912 	}
1913 
1914 	error = DPAA2_CMD_NI_SET_MULTI_PROMISC(dev, child, &cmd,
1915 	    en_promisc ? true : en_allmulti);
1916 	if (error) {
1917 		device_printf(dev, "%s: failed to %s multicast promiscuous "
1918 		    "mode\n", __func__, en_allmulti ? "enable" : "disable");
1919 		goto close_ni;
1920 	}
1921 
1922 	error = DPAA2_CMD_NI_SET_UNI_PROMISC(dev, child, &cmd, en_promisc);
1923 	if (error) {
1924 		device_printf(dev, "%s: failed to %s unicast promiscuous mode\n",
1925 		    __func__, en_promisc ? "enable" : "disable");
1926 		goto close_ni;
1927 	}
1928 
1929 	(void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
1930 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1931 	return (0);
1932 
1933 close_ni:
1934 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1935 close_rc:
1936 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1937 err_exit:
1938 	return (error);
1939 }
1940 
1941 static int
1942 dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *sc)
1943 {
1944 	struct sysctl_ctx_list *ctx;
1945 	struct sysctl_oid *node, *node2;
1946 	struct sysctl_oid_list *parent, *parent2;
1947 	char cbuf[128];
1948 	int i;
1949 
1950 	ctx = device_get_sysctl_ctx(sc->dev);
1951 	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1952 
1953 	/* Add DPNI statistics. */
1954 	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
1955 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Statistics");
1956 	parent = SYSCTL_CHILDREN(node);
1957 	for (i = 0; i < DPAA2_NI_STAT_SYSCTLS; ++i) {
1958 		SYSCTL_ADD_PROC(ctx, parent, i, dpni_stat_sysctls[i].name,
1959 		    CTLTYPE_U64 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_stats,
1960 		    "IU", dpni_stat_sysctls[i].desc);
1961 	}
1962 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_anomaly_frames",
1963 	    CTLFLAG_RD, &sc->rx_anomaly_frames,
1964 	    "Rx frames in the buffers outside of the buffer pools");
1965 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_single_buf_frames",
1966 	    CTLFLAG_RD, &sc->rx_single_buf_frames,
1967 	    "Rx frames in single buffers");
1968 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_sg_buf_frames",
1969 	    CTLFLAG_RD, &sc->rx_sg_buf_frames,
1970 	    "Rx frames in scatter/gather list");
1971 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_enq_rej_frames",
1972 	    CTLFLAG_RD, &sc->rx_enq_rej_frames,
1973 	    "Enqueue rejected by QMan");
1974 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_ieoi_err_frames",
1975 	    CTLFLAG_RD, &sc->rx_ieoi_err_frames,
1976 	    "QMan IEOI error");
1977 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_single_buf_frames",
1978 	    CTLFLAG_RD, &sc->tx_single_buf_frames,
1979 	    "Tx single buffer frames");
1980 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_sg_frames",
1981 	    CTLFLAG_RD, &sc->tx_sg_frames,
1982 	    "Tx S/G frames");
1983 
1984 	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_num",
1985 	    CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_num,
1986 	    "IU", "number of Rx buffers in the buffer pool");
1987 	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_free",
1988 	    CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_free,
1989 	    "IU", "number of free Rx buffers in the buffer pool");
1990 
1991  	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1992 
1993 	/* Add channels statistics. */
1994 	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "channels",
1995 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channels");
1996 	parent = SYSCTL_CHILDREN(node);
1997 	for (int i = 0; i < sc->chan_n; i++) {
1998 		snprintf(cbuf, sizeof(cbuf), "%d", i);
1999 
2000 		node2 = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, cbuf,
2001 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channel");
2002 		parent2 = SYSCTL_CHILDREN(node2);
2003 
2004 		SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_frames",
2005 		    CTLFLAG_RD, &sc->channels[i]->tx_frames,
2006 		    "Tx frames counter");
2007 		SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_dropped",
2008 		    CTLFLAG_RD, &sc->channels[i]->tx_dropped,
2009 		    "Tx dropped counter");
2010 	}
2011 
2012 	return (0);
2013 }
2014 
2015 static int
2016 dpaa2_ni_setup_dma(struct dpaa2_ni_softc *sc)
2017 {
2018 	device_t dev = sc->dev;
2019 	int error;
2020 
2021 	KASSERT((sc->buf_align == BUF_ALIGN) || (sc->buf_align == BUF_ALIGN_V1),
2022 	    ("unexpected buffer alignment: %d\n", sc->buf_align));
2023 
2024 	/* DMA tag to allocate buffers for Rx buffer pool. */
2025 	error = bus_dma_tag_create(
2026 	    bus_get_dma_tag(dev),
2027 	    sc->buf_align, 0,		/* alignment, boundary */
2028 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
2029 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
2030 	    NULL, NULL,			/* filter, filterarg */
2031 	    BUF_SIZE, 1,		/* maxsize, nsegments */
2032 	    BUF_SIZE, 0,		/* maxsegsize, flags */
2033 	    NULL, NULL,			/* lockfunc, lockarg */
2034 	    &sc->bp_dmat);
2035 	if (error) {
2036 		device_printf(dev, "%s: failed to create DMA tag for buffer "
2037 		    "pool\n", __func__);
2038 		return (error);
2039 	}
2040 
2041 	/* DMA tag to map Tx mbufs. */
2042 	error = bus_dma_tag_create(
2043 	    bus_get_dma_tag(dev),
2044 	    sc->buf_align, 0,		/* alignment, boundary */
2045 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
2046 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
2047 	    NULL, NULL,			/* filter, filterarg */
2048 	    DPAA2_TX_SEGS_MAXSZ,	/* maxsize */
2049 	    DPAA2_TX_SEGLIMIT,		/* nsegments */
2050 	    DPAA2_TX_SEG_SZ, 0,		/* maxsegsize, flags */
2051 	    NULL, NULL,			/* lockfunc, lockarg */
2052 	    &sc->tx_dmat);
2053 	if (error) {
2054 		device_printf(dev, "%s: failed to create DMA tag for Tx "
2055 		    "buffers\n", __func__);
2056 		return (error);
2057 	}
2058 
2059 	/* DMA tag to allocate channel storage. */
2060 	error = bus_dma_tag_create(
2061 	    bus_get_dma_tag(dev),
2062 	    ETH_STORE_ALIGN, 0,		/* alignment, boundary */
2063 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
2064 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
2065 	    NULL, NULL,			/* filter, filterarg */
2066 	    ETH_STORE_SIZE, 1,		/* maxsize, nsegments */
2067 	    ETH_STORE_SIZE, 0,		/* maxsegsize, flags */
2068 	    NULL, NULL,			/* lockfunc, lockarg */
2069 	    &sc->st_dmat);
2070 	if (error) {
2071 		device_printf(dev, "%s: failed to create DMA tag for channel "
2072 		    "storage\n", __func__);
2073 		return (error);
2074 	}
2075 
2076 	/* DMA tag for Rx distribution key. */
2077 	error = bus_dma_tag_create(
2078 	    bus_get_dma_tag(dev),
2079 	    PAGE_SIZE, 0,		/* alignment, boundary */
2080 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
2081 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
2082 	    NULL, NULL,			/* filter, filterarg */
2083 	    DPAA2_CLASSIFIER_DMA_SIZE, 1, /* maxsize, nsegments */
2084 	    DPAA2_CLASSIFIER_DMA_SIZE, 0, /* maxsegsize, flags */
2085 	    NULL, NULL,			/* lockfunc, lockarg */
2086 	    &sc->rxd_dmat);
2087 	if (error) {
2088 		device_printf(dev, "%s: failed to create DMA tag for Rx "
2089 		    "distribution key\n", __func__);
2090 		return (error);
2091 	}
2092 
2093 	error = bus_dma_tag_create(
2094 	    bus_get_dma_tag(dev),
2095 	    PAGE_SIZE, 0,		/* alignment, boundary */
2096 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
2097 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
2098 	    NULL, NULL,			/* filter, filterarg */
2099 	    ETH_QOS_KCFG_BUF_SIZE, 1,	/* maxsize, nsegments */
2100 	    ETH_QOS_KCFG_BUF_SIZE, 0,	/* maxsegsize, flags */
2101 	    NULL, NULL,			/* lockfunc, lockarg */
2102 	    &sc->qos_dmat);
2103 	if (error) {
2104 		device_printf(dev, "%s: failed to create DMA tag for QoS key\n",
2105 		    __func__);
2106 		return (error);
2107 	}
2108 
2109 	error = bus_dma_tag_create(
2110 	    bus_get_dma_tag(dev),
2111 	    PAGE_SIZE, 0,		/* alignment, boundary */
2112 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
2113 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
2114 	    NULL, NULL,			/* filter, filterarg */
2115 	    DPAA2_TX_SGT_SZ, 1,		/* maxsize, nsegments */
2116 	    DPAA2_TX_SGT_SZ, 0,		/* maxsegsize, flags */
2117 	    NULL, NULL,			/* lockfunc, lockarg */
2118 	    &sc->sgt_dmat);
2119 	if (error) {
2120 		device_printf(dev, "%s: failed to create DMA tag for S/G "
2121 		    "tables\n", __func__);
2122 		return (error);
2123 	}
2124 
2125 	return (0);
2126 }
2127 
2128 /**
2129  * @brief Configure buffer layouts of the different DPNI queues.
2130  */
2131 static int
2132 dpaa2_ni_set_buf_layout(device_t dev)
2133 {
2134 	device_t pdev = device_get_parent(dev);
2135 	device_t child = dev;
2136 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2137 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2138 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2139 	struct dpaa2_ni_buf_layout buf_layout = {0};
2140 	struct dpaa2_cmd cmd;
2141 	uint16_t rc_token, ni_token;
2142 	int error;
2143 
2144 	DPAA2_CMD_INIT(&cmd);
2145 
2146 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2147 	if (error) {
2148 		device_printf(dev, "%s: failed to open resource container: "
2149 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2150 		goto err_exit;
2151 	}
2152 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2153 	if (error) {
2154 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2155 		    "error=%d\n", __func__, dinfo->id, error);
2156 		goto close_rc;
2157 	}
2158 
2159 	/*
2160 	 * Select Rx/Tx buffer alignment. It's necessary to ensure that the
2161 	 * buffer size seen by WRIOP is a multiple of 64 or 256 bytes depending
2162 	 * on the WRIOP version.
2163 	 */
2164 	sc->buf_align = (sc->attr.wriop_ver == WRIOP_VERSION(0, 0, 0) ||
2165 	    sc->attr.wriop_ver == WRIOP_VERSION(1, 0, 0))
2166 	    ? BUF_ALIGN_V1 : BUF_ALIGN;
2167 
2168 	/*
2169 	 * We need to ensure that the buffer size seen by WRIOP is a multiple
2170 	 * of 64 or 256 bytes depending on the WRIOP version.
2171 	 */
2172 	sc->buf_sz = ALIGN_DOWN(BUF_SIZE, sc->buf_align);
2173 
2174 	if (bootverbose) {
2175 		device_printf(dev, "Rx/Tx buffers: size=%d, alignment=%d\n",
2176 		    sc->buf_sz, sc->buf_align);
2177 	}
2178 
2179 	/*
2180 	 *    Frame Descriptor       Tx buffer layout
2181 	 *
2182 	 *                ADDR -> |---------------------|
2183 	 *                        | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes
2184 	 *                        |---------------------|
2185 	 *                        | HW FRAME ANNOTATION | BUF_TX_HWA_SIZE bytes
2186 	 *                        |---------------------|
2187 	 *                        |    DATA HEADROOM    |
2188 	 *       ADDR + OFFSET -> |---------------------|
2189 	 *                        |                     |
2190 	 *                        |                     |
2191 	 *                        |     FRAME DATA      |
2192 	 *                        |                     |
2193 	 *                        |                     |
2194 	 *                        |---------------------|
2195 	 *                        |    DATA TAILROOM    |
2196 	 *                        |---------------------|
2197 	 *
2198 	 * NOTE: It's for a single buffer frame only.
2199 	 */
2200 	buf_layout.queue_type = DPAA2_NI_QUEUE_TX;
2201 	buf_layout.pd_size = BUF_SWA_SIZE;
2202 	buf_layout.pass_timestamp = true;
2203 	buf_layout.pass_frame_status = true;
2204 	buf_layout.options =
2205 	    BUF_LOPT_PRIV_DATA_SZ |
2206 	    BUF_LOPT_TIMESTAMP | /* requires 128 bytes in HWA */
2207 	    BUF_LOPT_FRAME_STATUS;
2208 	error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
2209 	if (error) {
2210 		device_printf(dev, "%s: failed to set Tx buffer layout\n",
2211 		    __func__);
2212 		goto close_ni;
2213 	}
2214 
2215 	/* Tx-confirmation buffer layout */
2216 	buf_layout.queue_type = DPAA2_NI_QUEUE_TX_CONF;
2217 	buf_layout.options =
2218 	    BUF_LOPT_TIMESTAMP |
2219 	    BUF_LOPT_FRAME_STATUS;
2220 	error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
2221 	if (error) {
2222 		device_printf(dev, "%s: failed to set TxConf buffer layout\n",
2223 		    __func__);
2224 		goto close_ni;
2225 	}
2226 
2227 	/*
2228 	 * Driver should reserve the amount of space indicated by this command
2229 	 * as headroom in all Tx frames.
2230 	 */
2231 	error = DPAA2_CMD_NI_GET_TX_DATA_OFF(dev, child, &cmd, &sc->tx_data_off);
2232 	if (error) {
2233 		device_printf(dev, "%s: failed to obtain Tx data offset\n",
2234 		    __func__);
2235 		goto close_ni;
2236 	}
2237 
2238 	if (bootverbose) {
2239 		device_printf(dev, "Tx data offset=%d\n", sc->tx_data_off);
2240 	}
2241 	if ((sc->tx_data_off % 64) != 0) {
2242 		device_printf(dev, "Tx data offset (%d) is not a multiplication "
2243 		    "of 64 bytes\n", sc->tx_data_off);
2244 	}
2245 
2246 	/*
2247 	 *    Frame Descriptor       Rx buffer layout
2248 	 *
2249 	 *                ADDR -> |---------------------|
2250 	 *                        | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes
2251 	 *                        |---------------------|
2252 	 *                        | HW FRAME ANNOTATION | BUF_RX_HWA_SIZE bytes
2253 	 *                        |---------------------|
2254 	 *                        |    DATA HEADROOM    | OFFSET-BUF_RX_HWA_SIZE
2255 	 *       ADDR + OFFSET -> |---------------------|
2256 	 *                        |                     |
2257 	 *                        |                     |
2258 	 *                        |     FRAME DATA      |
2259 	 *                        |                     |
2260 	 *                        |                     |
2261 	 *                        |---------------------|
2262 	 *                        |    DATA TAILROOM    | 0 bytes
2263 	 *                        |---------------------|
2264 	 *
2265 	 * NOTE: It's for a single buffer frame only.
2266 	 */
2267 	buf_layout.queue_type = DPAA2_NI_QUEUE_RX;
2268 	buf_layout.pd_size = BUF_SWA_SIZE;
2269 	buf_layout.fd_align = sc->buf_align;
2270 	buf_layout.head_size = sc->tx_data_off - BUF_RX_HWA_SIZE - BUF_SWA_SIZE;
2271 	buf_layout.tail_size = 0;
2272 	buf_layout.pass_frame_status = true;
2273 	buf_layout.pass_parser_result = true;
2274 	buf_layout.pass_timestamp = true;
2275 	buf_layout.options =
2276 	    BUF_LOPT_PRIV_DATA_SZ |
2277 	    BUF_LOPT_DATA_ALIGN |
2278 	    BUF_LOPT_DATA_HEAD_ROOM |
2279 	    BUF_LOPT_DATA_TAIL_ROOM |
2280 	    BUF_LOPT_FRAME_STATUS |
2281 	    BUF_LOPT_PARSER_RESULT |
2282 	    BUF_LOPT_TIMESTAMP;
2283 	error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
2284 	if (error) {
2285 		device_printf(dev, "%s: failed to set Rx buffer layout\n",
2286 		    __func__);
2287 		goto close_ni;
2288 	}
2289 
2290 	error = 0;
2291 close_ni:
2292 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2293 close_rc:
2294 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2295 err_exit:
2296 	return (error);
2297 }
2298 
2299 /**
2300  * @brief Enable Rx/Tx pause frames.
2301  *
2302  * NOTE: DPNI stops sending when a pause frame is received (Rx frame) or DPNI
2303  *       itself generates pause frames (Tx frame).
2304  */
2305 static int
2306 dpaa2_ni_set_pause_frame(device_t dev)
2307 {
2308 	device_t pdev = device_get_parent(dev);
2309 	device_t child = dev;
2310 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2311 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2312 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2313 	struct dpaa2_ni_link_cfg link_cfg = {0};
2314 	struct dpaa2_cmd cmd;
2315 	uint16_t rc_token, ni_token;
2316 	int error;
2317 
2318 	DPAA2_CMD_INIT(&cmd);
2319 
2320 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2321 	if (error) {
2322 		device_printf(dev, "%s: failed to open resource container: "
2323 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2324 		goto err_exit;
2325 	}
2326 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2327 	if (error) {
2328 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2329 		    "error=%d\n", __func__, dinfo->id, error);
2330 		goto close_rc;
2331 	}
2332 
2333 	error = DPAA2_CMD_NI_GET_LINK_CFG(dev, child, &cmd, &link_cfg);
2334 	if (error) {
2335 		device_printf(dev, "%s: failed to obtain link configuration: "
2336 		    "error=%d\n", __func__, error);
2337 		goto close_ni;
2338 	}
2339 
2340 	/* Enable both Rx and Tx pause frames by default. */
2341 	link_cfg.options |= DPAA2_NI_LINK_OPT_PAUSE;
2342 	link_cfg.options &= ~DPAA2_NI_LINK_OPT_ASYM_PAUSE;
2343 
2344 	error = DPAA2_CMD_NI_SET_LINK_CFG(dev, child, &cmd, &link_cfg);
2345 	if (error) {
2346 		device_printf(dev, "%s: failed to set link configuration: "
2347 		    "error=%d\n", __func__, error);
2348 		goto close_ni;
2349 	}
2350 
2351 	sc->link_options = link_cfg.options;
2352 	error = 0;
2353 close_ni:
2354 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2355 close_rc:
2356 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2357 err_exit:
2358 	return (error);
2359 }
2360 
2361 /**
2362  * @brief Configure QoS table to determine the traffic class for the received
2363  * frame.
2364  */
2365 static int
2366 dpaa2_ni_set_qos_table(device_t dev)
2367 {
2368 	device_t pdev = device_get_parent(dev);
2369 	device_t child = dev;
2370 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2371 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2372 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2373 	struct dpaa2_ni_qos_table tbl;
2374 	struct dpaa2_buf *buf = &sc->qos_kcfg;
2375 	struct dpaa2_cmd cmd;
2376 	uint16_t rc_token, ni_token;
2377 	int error;
2378 
2379 	if (sc->attr.num.rx_tcs == 1 ||
2380 	    !(sc->attr.options & DPNI_OPT_HAS_KEY_MASKING)) {
2381 		if (bootverbose) {
2382 			device_printf(dev, "Ingress traffic classification is "
2383 			    "not supported\n");
2384 		}
2385 		return (0);
2386 	}
2387 
2388 	/*
2389 	 * Allocate a buffer visible to the device to hold the QoS table key
2390 	 * configuration.
2391 	 */
2392 	KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not storage buffer",
2393 	    __func__));
2394 	if (__predict_true(buf->store.dmat == NULL)) {
2395 		buf->store.dmat = sc->qos_dmat;
2396 	}
2397 
2398 	error = bus_dmamem_alloc(buf->store.dmat, &buf->store.vaddr,
2399 	    BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->store.dmap);
2400 	if (error) {
2401 		device_printf(dev, "%s: failed to allocate a buffer for QoS key "
2402 		    "configuration\n", __func__);
2403 		goto err_exit;
2404 	}
2405 
2406 	error = bus_dmamap_load(buf->store.dmat, buf->store.dmap,
2407 	    buf->store.vaddr, ETH_QOS_KCFG_BUF_SIZE, dpaa2_ni_dmamap_cb,
2408 	    &buf->store.paddr, BUS_DMA_NOWAIT);
2409 	if (error) {
2410 		device_printf(dev, "%s: failed to map QoS key configuration "
2411 		    "buffer into bus space\n", __func__);
2412 		goto err_exit;
2413 	}
2414 
2415 	DPAA2_CMD_INIT(&cmd);
2416 
2417 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2418 	if (error) {
2419 		device_printf(dev, "%s: failed to open resource container: "
2420 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2421 		goto err_exit;
2422 	}
2423 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2424 	if (error) {
2425 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2426 		    "error=%d\n", __func__, dinfo->id, error);
2427 		goto close_rc;
2428 	}
2429 
2430 	tbl.default_tc = 0;
2431 	tbl.discard_on_miss = false;
2432 	tbl.keep_entries = false;
2433 	tbl.kcfg_busaddr = buf->store.paddr;
2434 	error = DPAA2_CMD_NI_SET_QOS_TABLE(dev, child, &cmd, &tbl);
2435 	if (error) {
2436 		device_printf(dev, "%s: failed to set QoS table\n", __func__);
2437 		goto close_ni;
2438 	}
2439 
2440 	error = DPAA2_CMD_NI_CLEAR_QOS_TABLE(dev, child, &cmd);
2441 	if (error) {
2442 		device_printf(dev, "%s: failed to clear QoS table\n", __func__);
2443 		goto close_ni;
2444 	}
2445 
2446 	error = 0;
2447 close_ni:
2448 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2449 close_rc:
2450 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2451 err_exit:
2452 	return (error);
2453 }
2454 
2455 static int
2456 dpaa2_ni_set_mac_addr(device_t dev)
2457 {
2458 	device_t pdev = device_get_parent(dev);
2459 	device_t child = dev;
2460 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2461 	if_t ifp = sc->ifp;
2462 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2463 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2464 	struct dpaa2_cmd cmd;
2465 	struct ether_addr rnd_mac_addr;
2466 	uint16_t rc_token, ni_token;
2467 	uint8_t mac_addr[ETHER_ADDR_LEN];
2468 	uint8_t dpni_mac_addr[ETHER_ADDR_LEN];
2469 	int error;
2470 
2471 	DPAA2_CMD_INIT(&cmd);
2472 
2473 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2474 	if (error) {
2475 		device_printf(dev, "%s: failed to open resource container: "
2476 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2477 		goto err_exit;
2478 	}
2479 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2480 	if (error) {
2481 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2482 		    "error=%d\n", __func__, dinfo->id, error);
2483 		goto close_rc;
2484 	}
2485 
2486 	/*
2487 	 * Get the MAC address associated with the physical port, if the DPNI is
2488 	 * connected to a DPMAC directly associated with one of the physical
2489 	 * ports.
2490 	 */
2491 	error = DPAA2_CMD_NI_GET_PORT_MAC_ADDR(dev, child, &cmd, mac_addr);
2492 	if (error) {
2493 		device_printf(dev, "%s: failed to obtain the MAC address "
2494 		    "associated with the physical port\n", __func__);
2495 		goto close_ni;
2496 	}
2497 
2498 	/* Get primary MAC address from the DPNI attributes. */
2499 	error = DPAA2_CMD_NI_GET_PRIM_MAC_ADDR(dev, child, &cmd, dpni_mac_addr);
2500 	if (error) {
2501 		device_printf(dev, "%s: failed to obtain primary MAC address\n",
2502 		    __func__);
2503 		goto close_ni;
2504 	}
2505 
2506 	if (!ETHER_IS_ZERO(mac_addr)) {
2507 		/* Set MAC address of the physical port as DPNI's primary one. */
2508 		error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd,
2509 		    mac_addr);
2510 		if (error) {
2511 			device_printf(dev, "%s: failed to set primary MAC "
2512 			    "address\n", __func__);
2513 			goto close_ni;
2514 		}
2515 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2516 			sc->mac.addr[i] = mac_addr[i];
2517 		}
2518 	} else if (ETHER_IS_ZERO(dpni_mac_addr)) {
2519 		/* Generate random MAC address as DPNI's primary one. */
2520 		ether_gen_addr(ifp, &rnd_mac_addr);
2521 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2522 			mac_addr[i] = rnd_mac_addr.octet[i];
2523 		}
2524 
2525 		error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd,
2526 		    mac_addr);
2527 		if (error) {
2528 			device_printf(dev, "%s: failed to set random primary "
2529 			    "MAC address\n", __func__);
2530 			goto close_ni;
2531 		}
2532 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2533 			sc->mac.addr[i] = mac_addr[i];
2534 		}
2535 	} else {
2536 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2537 			sc->mac.addr[i] = dpni_mac_addr[i];
2538 		}
2539 	}
2540 
2541 	error = 0;
2542 close_ni:
2543 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2544 close_rc:
2545 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2546 err_exit:
2547 	return (error);
2548 }
2549 
2550 static void
2551 dpaa2_ni_miibus_statchg(device_t dev)
2552 {
2553 	device_t pdev = device_get_parent(dev);
2554 	device_t child = dev;
2555 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2556 	struct dpaa2_mac_link_state mac_link = { 0 };
2557 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2558 	struct dpaa2_cmd cmd;
2559 	uint16_t rc_token, mac_token;
2560 	int error, link_state;
2561 
2562 	if (sc->fixed_link || sc->mii == NULL) {
2563 		return;
2564 	}
2565 
2566 	/*
2567 	 * Note: ifp link state will only be changed AFTER we are called so we
2568 	 * cannot rely on ifp->if_linkstate here.
2569 	 */
2570 	if (sc->mii->mii_media_status & IFM_AVALID) {
2571 		if (sc->mii->mii_media_status & IFM_ACTIVE) {
2572 			link_state = LINK_STATE_UP;
2573 		} else {
2574 			link_state = LINK_STATE_DOWN;
2575 		}
2576 	} else {
2577 		link_state = LINK_STATE_UNKNOWN;
2578 	}
2579 
2580 	if (link_state != sc->link_state) {
2581 		sc->link_state = link_state;
2582 
2583 		DPAA2_CMD_INIT(&cmd);
2584 
2585 		error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
2586 		    &rc_token);
2587 		if (error) {
2588 			device_printf(dev, "%s: failed to open resource "
2589 			    "container: id=%d, error=%d\n", __func__, rcinfo->id,
2590 			    error);
2591 			goto err_exit;
2592 		}
2593 		error = DPAA2_CMD_MAC_OPEN(dev, child, &cmd, sc->mac.dpmac_id,
2594 		    &mac_token);
2595 		if (error) {
2596 			device_printf(sc->dev, "%s: failed to open DPMAC: "
2597 			    "id=%d, error=%d\n", __func__, sc->mac.dpmac_id,
2598 			    error);
2599 			goto close_rc;
2600 		}
2601 
2602 		if (link_state == LINK_STATE_UP ||
2603 		    link_state == LINK_STATE_DOWN) {
2604 			/* Update DPMAC link state. */
2605 			mac_link.supported = sc->mii->mii_media.ifm_media;
2606 			mac_link.advert = sc->mii->mii_media.ifm_media;
2607 			mac_link.rate = 1000; /* TODO: Where to get from? */	/* ifmedia_baudrate? */
2608 			mac_link.options =
2609 			    DPAA2_MAC_LINK_OPT_AUTONEG |
2610 			    DPAA2_MAC_LINK_OPT_PAUSE;
2611 			mac_link.up = (link_state == LINK_STATE_UP) ? true : false;
2612 			mac_link.state_valid = true;
2613 
2614 			/* Inform DPMAC about link state. */
2615 			error = DPAA2_CMD_MAC_SET_LINK_STATE(dev, child, &cmd,
2616 			    &mac_link);
2617 			if (error) {
2618 				device_printf(sc->dev, "%s: failed to set DPMAC "
2619 				    "link state: id=%d, error=%d\n", __func__,
2620 				    sc->mac.dpmac_id, error);
2621 			}
2622 		}
2623 		(void)DPAA2_CMD_MAC_CLOSE(dev, child, &cmd);
2624 		(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2625 		    rc_token));
2626 	}
2627 
2628 	return;
2629 
2630 close_rc:
2631 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2632 err_exit:
2633 	return;
2634 }
2635 
2636 /**
2637  * @brief Callback function to process media change request.
2638  */
2639 static int
2640 dpaa2_ni_media_change(if_t ifp)
2641 {
2642 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2643 
2644 	DPNI_LOCK(sc);
2645 	if (sc->mii) {
2646 		mii_mediachg(sc->mii);
2647 		sc->media_status = sc->mii->mii_media.ifm_media;
2648 	} else if (sc->fixed_link) {
2649 		if_printf(ifp, "%s: can't change media in fixed mode\n",
2650 		    __func__);
2651 	}
2652 	DPNI_UNLOCK(sc);
2653 
2654 	return (0);
2655 }
2656 
2657 /**
2658  * @brief Callback function to process media status request.
2659  */
2660 static void
2661 dpaa2_ni_media_status(if_t ifp, struct ifmediareq *ifmr)
2662 {
2663 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2664 
2665 	DPNI_LOCK(sc);
2666 	if (sc->mii) {
2667 		mii_pollstat(sc->mii);
2668 		ifmr->ifm_active = sc->mii->mii_media_active;
2669 		ifmr->ifm_status = sc->mii->mii_media_status;
2670 	}
2671 	DPNI_UNLOCK(sc);
2672 }
2673 
2674 /**
2675  * @brief Callout function to check and update media status.
2676  */
2677 static void
2678 dpaa2_ni_media_tick(void *arg)
2679 {
2680 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2681 
2682 	/* Check for media type change */
2683 	if (sc->mii) {
2684 		mii_tick(sc->mii);
2685 		if (sc->media_status != sc->mii->mii_media.ifm_media) {
2686 			printf("%s: media type changed (ifm_media=%x)\n",
2687 			    __func__, sc->mii->mii_media.ifm_media);
2688 			dpaa2_ni_media_change(sc->ifp);
2689 		}
2690 	}
2691 
2692 	/* Schedule another timeout one second from now */
2693 	callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
2694 }
2695 
2696 static void
2697 dpaa2_ni_init(void *arg)
2698 {
2699 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2700 	if_t ifp = sc->ifp;
2701 	device_t pdev = device_get_parent(sc->dev);
2702 	device_t dev = sc->dev;
2703 	device_t child = dev;
2704 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2705 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2706 	struct dpaa2_cmd cmd;
2707 	uint16_t rc_token, ni_token;
2708 	int error;
2709 
2710 	DPNI_LOCK(sc);
2711 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2712 		DPNI_UNLOCK(sc);
2713 		return;
2714 	}
2715 	DPNI_UNLOCK(sc);
2716 
2717 	DPAA2_CMD_INIT(&cmd);
2718 
2719 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2720 	if (error) {
2721 		device_printf(dev, "%s: failed to open resource container: "
2722 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2723 		goto err_exit;
2724 	}
2725 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2726 	if (error) {
2727 		device_printf(dev, "%s: failed to open network interface: "
2728 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
2729 		goto close_rc;
2730 	}
2731 
2732 	error = DPAA2_CMD_NI_ENABLE(dev, child, &cmd);
2733 	if (error) {
2734 		device_printf(dev, "%s: failed to enable DPNI: error=%d\n",
2735 		    __func__, error);
2736 	}
2737 
2738 	DPNI_LOCK(sc);
2739 	if (sc->mii) {
2740 		mii_mediachg(sc->mii);
2741 	}
2742 	callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
2743 
2744 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2745 	DPNI_UNLOCK(sc);
2746 
2747 	/* Force link-state update to initilize things. */
2748 	dpaa2_ni_miibus_statchg(dev);
2749 
2750 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2751 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2752 	return;
2753 
2754 close_rc:
2755 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2756 err_exit:
2757 	return;
2758 }
2759 
2760 static int
2761 dpaa2_ni_transmit(if_t ifp, struct mbuf *m)
2762 {
2763 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2764 	struct dpaa2_ni_channel	*chan;
2765 	struct dpaa2_ni_tx_ring *tx;
2766 	uint32_t fqid;
2767 	bool found = false;
2768 	int chan_n = 0;
2769 
2770 	if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)))
2771 		return (0);
2772 
2773 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
2774 		fqid = m->m_pkthdr.flowid;
2775 		for (int i = 0; i < sc->chan_n; i++) {
2776 			chan = sc->channels[i];
2777 			for (int j = 0; j < chan->rxq_n; j++) {
2778 				if (fqid == chan->rx_queues[j].fqid) {
2779 					chan_n = chan->flowid;
2780 					found = true;
2781 					break;
2782 				}
2783 			}
2784 			if (found) {
2785 				break;
2786 			}
2787 		}
2788 	}
2789 	tx = DPAA2_TX_RING(sc, chan_n, 0);
2790 
2791 	TX_LOCK(tx);
2792 	dpaa2_ni_tx_locked(sc, tx, m);
2793 	TX_UNLOCK(tx);
2794 
2795 	return (0);
2796 }
2797 
2798 static void
2799 dpaa2_ni_qflush(if_t ifp)
2800 {
2801 	/* TODO: Find a way to drain Tx queues in QBMan. */
2802 	if_qflush(ifp);
2803 }
2804 
2805 static int
2806 dpaa2_ni_ioctl(if_t ifp, u_long c, caddr_t data)
2807 {
2808 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2809 	struct ifreq *ifr = (struct ifreq *) data;
2810 	device_t pdev = device_get_parent(sc->dev);
2811 	device_t dev = sc->dev;
2812 	device_t child = dev;
2813 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2814 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2815 	struct dpaa2_cmd cmd;
2816 	uint32_t changed = 0;
2817 	uint16_t rc_token, ni_token;
2818 	int mtu, error, rc = 0;
2819 
2820 	DPAA2_CMD_INIT(&cmd);
2821 
2822 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2823 	if (error) {
2824 		device_printf(dev, "%s: failed to open resource container: "
2825 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2826 		goto err_exit;
2827 	}
2828 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2829 	if (error) {
2830 		device_printf(dev, "%s: failed to open network interface: "
2831 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
2832 		goto close_rc;
2833 	}
2834 
2835 	switch (c) {
2836 	case SIOCSIFMTU:
2837 		DPNI_LOCK(sc);
2838 		mtu = ifr->ifr_mtu;
2839 		if (mtu < ETHERMIN || mtu > ETHERMTU_JUMBO) {
2840 			DPNI_UNLOCK(sc);
2841 			error = EINVAL;
2842 			goto close_ni;
2843 		}
2844 		if_setmtu(ifp, mtu);
2845 		DPNI_UNLOCK(sc);
2846 
2847 		/* Update maximum frame length. */
2848 		error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd,
2849 		    mtu + ETHER_HDR_LEN);
2850 		if (error) {
2851 			device_printf(dev, "%s: failed to update maximum frame "
2852 			    "length: error=%d\n", __func__, error);
2853 			goto close_ni;
2854 		}
2855 		break;
2856 	case SIOCSIFCAP:
2857 		changed = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
2858 		if (changed & IFCAP_HWCSUM) {
2859 			if ((ifr->ifr_reqcap & changed) & IFCAP_HWCSUM) {
2860 				if_setcapenablebit(ifp, IFCAP_HWCSUM, 0);
2861 			} else {
2862 				if_setcapenablebit(ifp, 0, IFCAP_HWCSUM);
2863 			}
2864 		}
2865 		rc = dpaa2_ni_setup_if_caps(sc);
2866 		if (rc) {
2867 			printf("%s: failed to update iface capabilities: "
2868 			    "error=%d\n", __func__, rc);
2869 			rc = ENXIO;
2870 		}
2871 		break;
2872 	case SIOCSIFFLAGS:
2873 		DPNI_LOCK(sc);
2874 		if (if_getflags(ifp) & IFF_UP) {
2875 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2876 				changed = if_getflags(ifp) ^ sc->if_flags;
2877 				if (changed & IFF_PROMISC ||
2878 				    changed & IFF_ALLMULTI) {
2879 					rc = dpaa2_ni_setup_if_flags(sc);
2880 				}
2881 			} else {
2882 				DPNI_UNLOCK(sc);
2883 				dpaa2_ni_init(sc);
2884 				DPNI_LOCK(sc);
2885 			}
2886 		} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2887 			/* FIXME: Disable DPNI. See dpaa2_ni_init(). */
2888 		}
2889 
2890 		sc->if_flags = if_getflags(ifp);
2891 		DPNI_UNLOCK(sc);
2892 		break;
2893 	case SIOCADDMULTI:
2894 	case SIOCDELMULTI:
2895 		DPNI_LOCK(sc);
2896 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2897 			DPNI_UNLOCK(sc);
2898 			rc = dpaa2_ni_update_mac_filters(ifp);
2899 			if (rc) {
2900 				device_printf(dev, "%s: failed to update MAC "
2901 				    "filters: error=%d\n", __func__, rc);
2902 			}
2903 			DPNI_LOCK(sc);
2904 		}
2905 		DPNI_UNLOCK(sc);
2906 		break;
2907 	case SIOCGIFMEDIA:
2908 	case SIOCSIFMEDIA:
2909 		if (sc->mii)
2910 			rc = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, c);
2911 		else if(sc->fixed_link) {
2912 			rc = ifmedia_ioctl(ifp, ifr, &sc->fixed_ifmedia, c);
2913 		}
2914 		break;
2915 	default:
2916 		rc = ether_ioctl(ifp, c, data);
2917 		break;
2918 	}
2919 
2920 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2921 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2922 	return (rc);
2923 
2924 close_ni:
2925 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2926 close_rc:
2927 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2928 err_exit:
2929 	return (error);
2930 }
2931 
2932 static int
2933 dpaa2_ni_update_mac_filters(if_t ifp)
2934 {
2935 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2936 	struct dpaa2_ni_mcaddr_ctx ctx;
2937 	device_t pdev = device_get_parent(sc->dev);
2938 	device_t dev = sc->dev;
2939 	device_t child = dev;
2940 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2941 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2942 	struct dpaa2_cmd cmd;
2943 	uint16_t rc_token, ni_token;
2944 	int error;
2945 
2946 	DPAA2_CMD_INIT(&cmd);
2947 
2948 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2949 	if (error) {
2950 		device_printf(dev, "%s: failed to open resource container: "
2951 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2952 		goto err_exit;
2953 	}
2954 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2955 	if (error) {
2956 		device_printf(dev, "%s: failed to open network interface: "
2957 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
2958 		goto close_rc;
2959 	}
2960 
2961 	/* Remove all multicast MAC filters. */
2962 	error = DPAA2_CMD_NI_CLEAR_MAC_FILTERS(dev, child, &cmd, false, true);
2963 	if (error) {
2964 		device_printf(dev, "%s: failed to clear multicast MAC filters: "
2965 		    "error=%d\n", __func__, error);
2966 		goto close_ni;
2967 	}
2968 
2969 	ctx.ifp = ifp;
2970 	ctx.error = 0;
2971 	ctx.nent = 0;
2972 
2973 	if_foreach_llmaddr(ifp, dpaa2_ni_add_maddr, &ctx);
2974 
2975 	error = ctx.error;
2976 close_ni:
2977 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2978 close_rc:
2979 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2980 err_exit:
2981 	return (error);
2982 }
2983 
2984 static u_int
2985 dpaa2_ni_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2986 {
2987 	struct dpaa2_ni_mcaddr_ctx *ctx = arg;
2988 	struct dpaa2_ni_softc *sc = if_getsoftc(ctx->ifp);
2989 	device_t pdev = device_get_parent(sc->dev);
2990 	device_t dev = sc->dev;
2991 	device_t child = dev;
2992 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2993 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2994 	struct dpaa2_cmd cmd;
2995 	uint16_t rc_token, ni_token;
2996 	int error;
2997 
2998 	if (ctx->error != 0) {
2999 		return (0);
3000 	}
3001 
3002 	if (ETHER_IS_MULTICAST(LLADDR(sdl))) {
3003 		DPAA2_CMD_INIT(&cmd);
3004 
3005 		error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
3006 		    &rc_token);
3007 		if (error) {
3008 			device_printf(dev, "%s: failed to open resource "
3009 			    "container: id=%d, error=%d\n", __func__, rcinfo->id,
3010 			    error);
3011 			return (0);
3012 		}
3013 		error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id,
3014 		    &ni_token);
3015 		if (error) {
3016 			device_printf(dev, "%s: failed to open network interface: "
3017 			    "id=%d, error=%d\n", __func__, dinfo->id, error);
3018 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
3019 			    rc_token));
3020 			return (0);
3021 		}
3022 
3023 		ctx->error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, &cmd,
3024 		    LLADDR(sdl));
3025 
3026 		(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
3027 		    ni_token));
3028 		(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
3029 		    rc_token));
3030 
3031 		if (ctx->error != 0) {
3032 			device_printf(dev, "%s: can't add more then %d MAC "
3033 			    "addresses, switching to the multicast promiscuous "
3034 			    "mode\n", __func__, ctx->nent);
3035 
3036 			/* Enable multicast promiscuous mode. */
3037 			DPNI_LOCK(sc);
3038 			if_setflagbits(ctx->ifp, IFF_ALLMULTI, 0);
3039 			sc->if_flags |= IFF_ALLMULTI;
3040 			ctx->error = dpaa2_ni_setup_if_flags(sc);
3041 			DPNI_UNLOCK(sc);
3042 
3043 			return (0);
3044 		}
3045 		ctx->nent++;
3046 	}
3047 
3048 	return (1);
3049 }
3050 
3051 static void
3052 dpaa2_ni_intr(void *arg)
3053 {
3054 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
3055 	device_t pdev = device_get_parent(sc->dev);
3056 	device_t dev = sc->dev;
3057 	device_t child = dev;
3058 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
3059 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
3060 	struct dpaa2_cmd cmd;
3061 	uint32_t status = ~0u; /* clear all IRQ status bits */
3062 	uint16_t rc_token, ni_token;
3063 	int error;
3064 
3065 	DPAA2_CMD_INIT(&cmd);
3066 
3067 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
3068 	if (error) {
3069 		device_printf(dev, "%s: failed to open resource container: "
3070 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
3071 		goto err_exit;
3072 	}
3073 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
3074 	if (error) {
3075 		device_printf(dev, "%s: failed to open network interface: "
3076 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
3077 		goto close_rc;
3078 	}
3079 
3080 	error = DPAA2_CMD_NI_GET_IRQ_STATUS(dev, child, &cmd, DPNI_IRQ_INDEX,
3081 	    &status);
3082 	if (error) {
3083 		device_printf(sc->dev, "%s: failed to obtain IRQ status: "
3084 		    "error=%d\n", __func__, error);
3085 	}
3086 
3087 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
3088 close_rc:
3089 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
3090 err_exit:
3091 	return;
3092 }
3093 
3094 /**
3095  * @brief Callback to obtain a physical address of the only DMA segment mapped.
3096  */
3097 static void
3098 dpaa2_ni_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3099 {
3100 	if (error == 0) {
3101 		KASSERT(nseg == 1, ("too many segments: nseg=%d\n", nseg));
3102 		*(bus_addr_t *) arg = segs[0].ds_addr;
3103 	}
3104 }
3105 
3106 /**
3107  * @brief Release new buffers to the buffer pool if necessary.
3108  */
3109 static void
3110 dpaa2_ni_bp_task(void *arg, int count)
3111 {
3112 	device_t bp_dev;
3113 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
3114 	struct dpaa2_bp_softc *bpsc;
3115 	struct dpaa2_bp_conf bp_conf;
3116 	const int buf_num = DPAA2_ATOMIC_READ(&sc->buf_num);
3117 	int error;
3118 
3119 	/* There's only one buffer pool for now. */
3120 	bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
3121 	bpsc = device_get_softc(bp_dev);
3122 
3123 	/* Get state of the buffer pool. */
3124 	error = DPAA2_SWP_QUERY_BP(sc->channels[0]->io_dev, bpsc->attr.bpid,
3125 	    &bp_conf);
3126 	if (error) {
3127 		device_printf(sc->dev, "%s: failed to query buffer pool "
3128 		    "configuration: error=%d\n", __func__, error);
3129 		return;
3130 	}
3131 
3132 	/* Double allocated buffers number if free buffers < 25%. */
3133 	if (bp_conf.free_bufn < (buf_num >> 2)) {
3134 		(void)dpaa2_ni_seed_buf_pool(sc, buf_num);
3135 		DPAA2_ATOMIC_XCHG(&sc->buf_free, bp_conf.free_bufn);
3136 	}
3137 }
3138 
3139 /**
3140  * @brief Poll frames from a specific channel when CDAN is received.
3141  *
3142  * NOTE: To be called from the DPIO interrupt handler.
3143  */
3144 static void
3145 dpaa2_ni_poll(void *arg)
3146 {
3147 	struct dpaa2_ni_channel *chan = (struct dpaa2_ni_channel *) arg;
3148 	struct dpaa2_io_softc *iosc;
3149 	struct dpaa2_swp *swp;
3150 	struct dpaa2_ni_fq *fq;
3151 	int error, consumed = 0;
3152 
3153 	KASSERT(chan != NULL, ("%s: channel is NULL", __func__));
3154 
3155 	iosc = device_get_softc(chan->io_dev);
3156 	swp = iosc->swp;
3157 
3158 	do {
3159 		error = dpaa2_swp_pull(swp, chan->id, &chan->store,
3160 		    ETH_STORE_FRAMES);
3161 		if (error) {
3162 			device_printf(chan->ni_dev, "%s: failed to pull frames: "
3163 			    "chan_id=%d, error=%d\n", __func__, chan->id, error);
3164 			break;
3165 		}
3166 
3167 		/*
3168 		 * TODO: Combine frames from the same Rx queue returned as
3169 		 * a result to the current VDQ command into a chain (linked
3170 		 * with m_nextpkt) to ammortize the FQ lock.
3171 		 */
3172 		error = dpaa2_ni_consume_frames(chan, &fq, &consumed);
3173 		if (error == ENOENT) {
3174 			break;
3175 		}
3176 		if (error == ETIMEDOUT) {
3177 			device_printf(chan->ni_dev, "%s: timeout to consume "
3178 			    "frames: chan_id=%d\n", __func__, chan->id);
3179 		}
3180 	} while (true);
3181 
3182 	/* Re-arm channel to generate CDAN. */
3183 	error = DPAA2_SWP_CONF_WQ_CHANNEL(chan->io_dev, &chan->ctx);
3184 	if (error) {
3185 		device_printf(chan->ni_dev, "%s: failed to rearm: chan_id=%d, "
3186 		    "error=%d\n", __func__, chan->id, error);
3187 	}
3188 }
3189 
3190 /**
3191  * @brief Transmit mbufs.
3192  */
3193 static void
3194 dpaa2_ni_tx_locked(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx,
3195     struct mbuf *m)
3196 {
3197 	struct dpaa2_ni_fq *fq = tx->fq;
3198 	struct dpaa2_buf *buf;
3199 	struct dpaa2_fd fd;
3200 	struct mbuf *m_d;
3201 	bus_dma_segment_t txsegs[DPAA2_TX_SEGLIMIT];
3202 	uint64_t idx;
3203 	void *pidx;
3204 	int error, rc, txnsegs;
3205 
3206 	/* Obtain an index of a Tx buffer. */
3207 	pidx = buf_ring_dequeue_sc(tx->idx_br);
3208 	if (__predict_false(pidx == NULL)) {
3209 		/* TODO: Do not give up easily. */
3210 		m_freem(m);
3211 		return;
3212 	} else {
3213 		idx = (uint64_t) pidx;
3214 		buf = &tx->buf[idx];
3215 		buf->tx.m = m;
3216 		buf->tx.sgt_paddr = 0;
3217 	}
3218 
3219 	/* Load mbuf to transmit. */
3220 	error = bus_dmamap_load_mbuf_sg(buf->tx.dmat, buf->tx.dmap, m,
3221 	    txsegs, &txnsegs, BUS_DMA_NOWAIT);
3222 	if (__predict_false(error != 0)) {
3223 		/* Too many fragments, trying to defragment... */
3224 		m_d = m_collapse(m, M_NOWAIT, DPAA2_TX_SEGLIMIT);
3225 		if (m_d == NULL) {
3226 			device_printf(sc->dev, "%s: mbuf "
3227 			    "defragmentation failed\n", __func__);
3228 			fq->chan->tx_dropped++;
3229 			goto err;
3230 		}
3231 
3232 		buf->tx.m = m = m_d;
3233 		error = bus_dmamap_load_mbuf_sg(buf->tx.dmat,
3234 		    buf->tx.dmap, m, txsegs, &txnsegs, BUS_DMA_NOWAIT);
3235 		if (__predict_false(error != 0)) {
3236 			device_printf(sc->dev, "%s: failed to load "
3237 			    "mbuf: error=%d\n", __func__, error);
3238 			fq->chan->tx_dropped++;
3239 			goto err;
3240 		}
3241 	}
3242 
3243 	/* Build frame descriptor. */
3244 	error = dpaa2_ni_build_fd(sc, tx, buf, txsegs, txnsegs, &fd);
3245 	if (__predict_false(error != 0)) {
3246 		device_printf(sc->dev, "%s: failed to build frame "
3247 		    "descriptor: error=%d\n", __func__, error);
3248 		fq->chan->tx_dropped++;
3249 		goto err_unload;
3250 	}
3251 
3252 	/* TODO: Enqueue several frames in a single command. */
3253 	for (int i = 0; i < DPAA2_NI_ENQUEUE_RETRIES; i++) {
3254 		/* TODO: Return error codes instead of # of frames. */
3255 		rc = DPAA2_SWP_ENQ_MULTIPLE_FQ(fq->chan->io_dev, tx->fqid,
3256 		    &fd, 1);
3257 		if (rc == 1) {
3258 			break;
3259 		}
3260 	}
3261 
3262 	bus_dmamap_sync(buf->tx.dmat, buf->tx.dmap, BUS_DMASYNC_PREWRITE);
3263 	bus_dmamap_sync(buf->tx.sgt_dmat, buf->tx.sgt_dmap, BUS_DMASYNC_PREWRITE);
3264 
3265 	if (rc != 1) {
3266 		fq->chan->tx_dropped++;
3267 		goto err_unload;
3268 	} else {
3269 		fq->chan->tx_frames++;
3270 	}
3271 	return;
3272 
3273 err_unload:
3274 	bus_dmamap_unload(buf->tx.dmat, buf->tx.dmap);
3275 	if (buf->tx.sgt_paddr != 0) {
3276 		bus_dmamap_unload(buf->tx.sgt_dmat, buf->tx.sgt_dmap);
3277 	}
3278 err:
3279 	m_freem(buf->tx.m);
3280 	buf_ring_enqueue(tx->idx_br, pidx);
3281 }
3282 
3283 static int
3284 dpaa2_ni_consume_frames(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq **src,
3285     uint32_t *consumed)
3286 {
3287 	struct dpaa2_ni_fq *fq = NULL;
3288 	struct dpaa2_dq *dq;
3289 	struct dpaa2_fd *fd;
3290 	int rc, frames = 0;
3291 
3292 	do {
3293 		rc = dpaa2_ni_chan_storage_next(chan, &dq);
3294 		if (rc == EINPROGRESS) {
3295 			if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
3296 				fd = &dq->fdr.fd;
3297 				fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
3298 				fq->consume(chan, fq, fd);
3299 				frames++;
3300 			}
3301 		} else if (rc == EALREADY || rc == ENOENT) {
3302 			if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
3303 				fd = &dq->fdr.fd;
3304 				fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
3305 				fq->consume(chan, fq, fd);
3306 				frames++;
3307 			}
3308 			break;
3309 		} else {
3310 			KASSERT(1 == 0, ("%s: should not reach here", __func__));
3311 		}
3312 	} while (true);
3313 
3314 	KASSERT(chan->store_idx < chan->store_sz,
3315 	    ("channel store idx >= size: store_idx=%d, store_sz=%d",
3316 	    chan->store_idx, chan->store_sz));
3317 
3318 	/*
3319 	 * A dequeue operation pulls frames from a single queue into the store.
3320 	 * Return the frame queue and a number of consumed frames as an output.
3321 	 */
3322 	if (src != NULL)
3323 		*src = fq;
3324 	if (consumed != NULL)
3325 		*consumed = frames;
3326 
3327 	return (rc);
3328 }
3329 
3330 /**
3331  * @brief Receive frames.
3332  */
3333 static int
3334 dpaa2_ni_rx(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq,
3335     struct dpaa2_fd *fd)
3336 {
3337 	struct dpaa2_ni_softc *sc = device_get_softc(chan->ni_dev);
3338 	struct dpaa2_bp_softc *bpsc;
3339 	struct dpaa2_buf *buf;
3340 	struct dpaa2_fa *fa;
3341 	if_t ifp = sc->ifp;
3342 	struct mbuf *m;
3343 	device_t bp_dev;
3344 	bus_addr_t paddr = (bus_addr_t) fd->addr;
3345 	bus_addr_t released[DPAA2_SWP_BUFS_PER_CMD];
3346 	void *buf_data;
3347 	int buf_len, error, released_n = 0;
3348 
3349 	fa = (struct dpaa2_fa *) PHYS_TO_DMAP(paddr);
3350 	buf = fa->buf;
3351 
3352 	KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
3353 	KASSERT(buf->type == DPAA2_BUF_RX, ("%s: not Rx buffer", __func__));
3354 	if (__predict_false(paddr != buf->rx.paddr)) {
3355 		panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3356 		    __func__, paddr, buf->rx.paddr);
3357 	}
3358 
3359 	/* Update statistics. */
3360 	switch (dpaa2_ni_fd_err(fd)) {
3361 	case 1: /* Enqueue rejected by QMan */
3362 		sc->rx_enq_rej_frames++;
3363 		break;
3364 	case 2: /* QMan IEOI error */
3365 		sc->rx_ieoi_err_frames++;
3366 		break;
3367 	default:
3368 		break;
3369 	}
3370 	switch (dpaa2_ni_fd_format(fd)) {
3371 	case DPAA2_FD_SINGLE:
3372 		sc->rx_single_buf_frames++;
3373 		break;
3374 	case DPAA2_FD_SG:
3375 		sc->rx_sg_buf_frames++;
3376 		break;
3377 	default:
3378 		break;
3379 	}
3380 
3381 	m = buf->rx.m;
3382 	buf->rx.m = NULL;
3383 	bus_dmamap_sync(buf->rx.dmat, buf->rx.dmap, BUS_DMASYNC_POSTREAD);
3384 	bus_dmamap_unload(buf->rx.dmat, buf->rx.dmap);
3385 
3386 	buf_len = dpaa2_ni_fd_data_len(fd);
3387 	buf_data = (uint8_t *)buf->rx.vaddr + dpaa2_ni_fd_offset(fd);
3388 
3389 	/* Prefetch mbuf data. */
3390 	__builtin_prefetch(buf_data);
3391 
3392 	/* Write value to mbuf (avoid reading). */
3393 	m->m_flags |= M_PKTHDR;
3394 	m->m_data = buf_data;
3395 	m->m_len = buf_len;
3396 	m->m_pkthdr.len = buf_len;
3397 	m->m_pkthdr.rcvif = ifp;
3398 	m->m_pkthdr.flowid = fq->fqid;
3399 	M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
3400 
3401 	if_input(ifp, m);
3402 
3403 	/* Keep the buffer to be recycled. */
3404 	chan->recycled[chan->recycled_n++] = buf;
3405 	KASSERT(chan->recycled_n <= DPAA2_SWP_BUFS_PER_CMD,
3406 	    ("%s: too many buffers to recycle", __func__));
3407 
3408 	/* Re-seed and release recycled buffers back to the pool. */
3409 	if (chan->recycled_n == DPAA2_SWP_BUFS_PER_CMD) {
3410 		/* Release new buffers to the pool if needed. */
3411 		taskqueue_enqueue(sc->bp_taskq, &sc->bp_task);
3412 
3413 		for (int i = 0; i < chan->recycled_n; i++) {
3414 			buf = chan->recycled[i];
3415 
3416 			/* Seed recycled buffer. */
3417 			error = dpaa2_ni_seed_rxbuf(sc, buf);
3418 			KASSERT(error == 0, ("%s: failed to seed recycled "
3419 			    "buffer: error=%d", __func__, error));
3420 			if (__predict_false(error != 0)) {
3421 				device_printf(sc->dev, "%s: failed to seed "
3422 				    "recycled buffer: error=%d\n", __func__,
3423 				    error);
3424 				continue;
3425 			}
3426 
3427 			/* Prepare buffer to be released in a single command. */
3428 			released[released_n++] = buf->rx.paddr;
3429 		}
3430 
3431 		/* There's only one buffer pool for now. */
3432 		bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
3433 		bpsc = device_get_softc(bp_dev);
3434 
3435 		error = DPAA2_SWP_RELEASE_BUFS(chan->io_dev, bpsc->attr.bpid,
3436 		    released, released_n);
3437 		if (__predict_false(error != 0)) {
3438 			device_printf(sc->dev, "%s: failed to release buffers "
3439 			    "to the pool: error=%d\n", __func__, error);
3440 			return (error);
3441 		}
3442 
3443 		/* Be ready to recycle the next portion of the buffers. */
3444 		chan->recycled_n = 0;
3445 	}
3446 
3447 	return (0);
3448 }
3449 
3450 /**
3451  * @brief Receive Rx error frames.
3452  */
3453 static int
3454 dpaa2_ni_rx_err(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq,
3455     struct dpaa2_fd *fd)
3456 {
3457 	device_t bp_dev;
3458 	struct dpaa2_ni_softc *sc = device_get_softc(chan->ni_dev);
3459 	struct dpaa2_bp_softc *bpsc;
3460 	struct dpaa2_buf *buf;
3461 	struct dpaa2_fa *fa;
3462 	bus_addr_t paddr = (bus_addr_t) fd->addr;
3463 	int error;
3464 
3465 	fa = (struct dpaa2_fa *) PHYS_TO_DMAP(paddr);
3466 	buf = fa->buf;
3467 
3468 	KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
3469 	KASSERT(buf->type == DPAA2_BUF_RX, ("%s: not Rx buffer", __func__));
3470 	if (__predict_false(paddr != buf->rx.paddr)) {
3471 		panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3472 		    __func__, paddr, buf->rx.paddr);
3473 	}
3474 
3475 	/* There's only one buffer pool for now. */
3476 	bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
3477 	bpsc = device_get_softc(bp_dev);
3478 
3479 	/* Release buffer to QBMan buffer pool. */
3480 	error = DPAA2_SWP_RELEASE_BUFS(chan->io_dev, bpsc->attr.bpid, &paddr, 1);
3481 	if (error != 0) {
3482 		device_printf(sc->dev, "%s: failed to release frame buffer to "
3483 		    "the pool: error=%d\n", __func__, error);
3484 		return (error);
3485 	}
3486 
3487 	return (0);
3488 }
3489 
3490 /**
3491  * @brief Receive Tx confirmation frames.
3492  */
3493 static int
3494 dpaa2_ni_tx_conf(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq,
3495     struct dpaa2_fd *fd)
3496 {
3497 	struct dpaa2_ni_tx_ring *tx;
3498 	struct dpaa2_buf *buf;
3499 	struct dpaa2_fa *fa;
3500 	bus_addr_t paddr = (bus_addr_t) fd->addr;
3501 
3502 	fa = (struct dpaa2_fa *) PHYS_TO_DMAP(paddr);
3503 	buf = fa->buf;
3504 	tx = fa->tx;
3505 
3506 	KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
3507 	KASSERT(buf->type == DPAA2_BUF_TX, ("%s: not Tx buffer", __func__));
3508 	if (paddr != buf->tx.paddr) {
3509 		panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3510 		    __func__, paddr, buf->tx.paddr);
3511 	}
3512 
3513 	bus_dmamap_sync(buf->tx.dmat, buf->tx.dmap, BUS_DMASYNC_POSTWRITE);
3514 	bus_dmamap_sync(buf->tx.sgt_dmat, buf->tx.sgt_dmap, BUS_DMASYNC_POSTWRITE);
3515 	bus_dmamap_unload(buf->tx.dmat, buf->tx.dmap);
3516 	bus_dmamap_unload(buf->tx.sgt_dmat, buf->tx.sgt_dmap);
3517 	m_freem(buf->tx.m);
3518 
3519 	/* Return Tx buffer index back to the ring. */
3520 	buf_ring_enqueue(tx->idx_br, (void *) buf->tx.idx);
3521 
3522 	return (0);
3523 }
3524 
3525 /**
3526  * @brief Compare versions of the DPAA2 network interface API.
3527  */
3528 static int
3529 dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *sc, uint16_t major,
3530     uint16_t minor)
3531 {
3532 	if (sc->api_major == major)
3533 		return sc->api_minor - minor;
3534 	return sc->api_major - major;
3535 }
3536 
3537 /**
3538  * @brief Allocate Rx buffers visible to QBMan and release them to the pool.
3539  */
3540 static int
3541 dpaa2_ni_seed_buf_pool(struct dpaa2_ni_softc *sc, uint32_t seedn)
3542 {
3543 	device_t bp_dev;
3544 	struct dpaa2_bp_softc *bpsc;
3545 	struct dpaa2_buf *buf;
3546 	bus_addr_t paddr[DPAA2_SWP_BUFS_PER_CMD];
3547 	const int allocated = DPAA2_ATOMIC_READ(&sc->buf_num);
3548 	int i, error, bufn = 0;
3549 
3550 	KASSERT(sc->bp_dmat != NULL, ("%s: DMA tag for buffer pool not "
3551 	    "created?", __func__));
3552 
3553 	/* There's only one buffer pool for now. */
3554 	bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
3555 	bpsc = device_get_softc(bp_dev);
3556 
3557 	/* Limit # of buffers released to the pool. */
3558 	if (allocated + seedn > DPAA2_NI_BUFS_MAX)
3559 		seedn = DPAA2_NI_BUFS_MAX - allocated;
3560 
3561 	/* Release "seedn" buffers to the pool. */
3562 	for (i = allocated; i < (allocated + seedn); i++) {
3563 		/* Enough buffers were allocated for a single command. */
3564 		if (bufn == DPAA2_SWP_BUFS_PER_CMD) {
3565 			error = DPAA2_SWP_RELEASE_BUFS(sc->channels[0]->io_dev,
3566 			    bpsc->attr.bpid, paddr, bufn);
3567 			if (error) {
3568 				device_printf(sc->dev, "%s: failed to release "
3569 				    "buffers to the pool (1)\n", __func__);
3570 				return (error);
3571 			}
3572 			DPAA2_ATOMIC_ADD(&sc->buf_num, bufn);
3573 			bufn = 0;
3574 		}
3575 
3576 		buf = &sc->buf[i];
3577 		buf->type = DPAA2_BUF_RX;
3578 		buf->rx.m = NULL;
3579 		buf->rx.dmap = NULL;
3580 		buf->rx.paddr = 0;
3581 		buf->rx.vaddr = NULL;
3582 		error = dpaa2_ni_seed_rxbuf(sc, buf);
3583 		if (error != 0) {
3584 			break;
3585 		}
3586 		paddr[bufn] = buf->rx.paddr;
3587 		bufn++;
3588 	}
3589 
3590 	/* Release if there are buffers left. */
3591 	if (bufn > 0) {
3592 		error = DPAA2_SWP_RELEASE_BUFS(sc->channels[0]->io_dev,
3593 		    bpsc->attr.bpid, paddr, bufn);
3594 		if (error) {
3595 			device_printf(sc->dev, "%s: failed to release "
3596 			    "buffers to the pool (2)\n", __func__);
3597 			return (error);
3598 		}
3599 		DPAA2_ATOMIC_ADD(&sc->buf_num, bufn);
3600 	}
3601 
3602 	return (0);
3603 }
3604 
3605 /**
3606  * @brief Prepare Rx buffer to be released to the buffer pool.
3607  */
3608 static int
3609 dpaa2_ni_seed_rxbuf(struct dpaa2_ni_softc *sc, struct dpaa2_buf *buf)
3610 {
3611 	struct mbuf *m;
3612 	struct dpaa2_fa *fa;
3613 	bus_dmamap_t dmap;
3614 	bus_dma_segment_t segs;
3615 	int error, nsegs;
3616 
3617 	KASSERT(sc->bp_dmat != NULL, ("%s: Buffer pool DMA tag is not "
3618 	    "allocated?", __func__));
3619 	KASSERT(buf->type == DPAA2_BUF_RX, ("%s: not Rx buffer", __func__));
3620 
3621 	/* Keep DMA tag for this buffer. */
3622 	if (__predict_false(buf->rx.dmat == NULL))
3623 		buf->rx.dmat = sc->bp_dmat;
3624 
3625 	/* Create a DMA map for the giving buffer if it doesn't exist yet. */
3626 	if (__predict_false(buf->rx.dmap == NULL)) {
3627 		error = bus_dmamap_create(buf->rx.dmat, 0, &dmap);
3628 		if (error) {
3629 			device_printf(sc->dev, "%s: failed to create DMA map "
3630 			    "for buffer: error=%d\n", __func__, error);
3631 			return (error);
3632 		}
3633 		buf->rx.dmap = dmap;
3634 	}
3635 
3636 	/* Allocate mbuf if needed. */
3637 	if (__predict_false(buf->rx.m == NULL)) {
3638 		m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, BUF_SIZE);
3639 		if (__predict_false(m == NULL)) {
3640 			device_printf(sc->dev, "%s: failed to allocate mbuf for "
3641 			    "buffer\n", __func__);
3642 			return (ENOMEM);
3643 		}
3644 		m->m_len = m->m_ext.ext_size;
3645 		m->m_pkthdr.len = m->m_ext.ext_size;
3646 		buf->rx.m = m;
3647 	} else
3648 		m = buf->rx.m;
3649 
3650 	error = bus_dmamap_load_mbuf_sg(buf->rx.dmat, buf->rx.dmap,
3651 	    m, &segs, &nsegs, BUS_DMA_NOWAIT);
3652 	KASSERT(nsegs == 1, ("one segment expected: nsegs=%d", nsegs));
3653 	KASSERT(error == 0, ("failed to map mbuf: error=%d", error));
3654 	if (__predict_false(error != 0 || nsegs != 1)) {
3655 		device_printf(sc->dev, "%s: failed to map mbuf: error=%d, "
3656 		    "nsegs=%d\n", __func__, error, nsegs);
3657 		bus_dmamap_unload(buf->rx.dmat, buf->rx.dmap);
3658 		m_freem(m);
3659 		return (error);
3660 	}
3661 	buf->rx.paddr = segs.ds_addr;
3662 	buf->rx.vaddr = m->m_data;
3663 
3664 	/* Populate frame annotation for future use. */
3665 	fa = (struct dpaa2_fa *) m->m_data;
3666 	fa->magic = DPAA2_MAGIC;
3667 	fa->buf = buf;
3668 
3669 	bus_dmamap_sync(buf->rx.dmat, buf->rx.dmap, BUS_DMASYNC_PREREAD);
3670 
3671 	return (0);
3672 }
3673 
3674 /**
3675  * @brief Prepare Tx buffer to be added to the Tx ring.
3676  */
3677 static int
3678 dpaa2_ni_seed_txbuf(struct dpaa2_ni_softc *sc, struct dpaa2_buf *buf)
3679 {
3680 	bus_dmamap_t dmap;
3681 	int error;
3682 
3683 	KASSERT(sc->tx_dmat != NULL, ("%s: Tx DMA tag is not allocated?",
3684 	    __func__));
3685 	KASSERT(sc->sgt_dmat != NULL, ("%s: S/G DMA tag not allocated?",
3686 	    __func__));
3687 	KASSERT(buf->type == DPAA2_BUF_TX, ("%s: not Tx buffer", __func__));
3688 
3689 	/* Keep DMA tags for this buffer. */
3690 	if (__predict_true(buf->tx.dmat == NULL))
3691 		buf->tx.dmat = sc->tx_dmat;
3692 	if (__predict_true(buf->tx.sgt_dmat == NULL))
3693 		buf->tx.sgt_dmat = sc->sgt_dmat;
3694 
3695 	/* Create a DMA map for the giving buffer if it doesn't exist yet. */
3696 	if (__predict_true(buf->tx.dmap == NULL)) {
3697 		error = bus_dmamap_create(buf->tx.dmat, 0, &dmap);
3698 		if (error != 0) {
3699 			device_printf(sc->dev, "%s: failed to create "
3700 			    "Tx DMA map: error=%d\n", __func__, error);
3701 			return (error);
3702 		}
3703 		buf->tx.dmap = dmap;
3704 	}
3705 
3706 	/* Allocate a buffer to store scatter/gather table. */
3707 	if (__predict_true(buf->tx.sgt_vaddr == NULL)) {
3708 		error = bus_dmamem_alloc(buf->tx.sgt_dmat,
3709 		    &buf->tx.sgt_vaddr, BUS_DMA_ZERO | BUS_DMA_COHERENT,
3710 		    &buf->tx.sgt_dmap);
3711 		if (error != 0) {
3712 			device_printf(sc->dev, "%s: failed to allocate "
3713 			    "S/G table: error=%d\n", __func__, error);
3714 			return (error);
3715 		}
3716 	}
3717 
3718 	return (0);
3719 }
3720 
3721 /**
3722  * @brief Allocate channel storage visible to QBMan.
3723  */
3724 static int
3725 dpaa2_ni_seed_chan_storage(struct dpaa2_ni_softc *sc,
3726     struct dpaa2_ni_channel *chan)
3727 {
3728 	struct dpaa2_buf *buf = &chan->store;
3729 	int error;
3730 
3731 	KASSERT(sc->st_dmat != NULL, ("%s: channel storage DMA tag is not "
3732 	    "allocated?", __func__));
3733 	KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not channel storage buffer",
3734 	    __func__));
3735 
3736 	/* Keep DMA tag for this buffer. */
3737 	if (__predict_false(buf->store.dmat == NULL)) {
3738 		buf->store.dmat = sc->st_dmat;
3739 	}
3740 
3741 	if (__predict_false(buf->store.vaddr == NULL)) {
3742 		error = bus_dmamem_alloc(buf->store.dmat, &buf->store.vaddr,
3743 		    BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->store.dmap);
3744 		if (error) {
3745 			device_printf(sc->dev, "%s: failed to allocate channel "
3746 			    "storage\n", __func__);
3747 			return (error);
3748 		}
3749 	}
3750 
3751 	if (__predict_false(buf->store.paddr == 0)) {
3752 		error = bus_dmamap_load(buf->store.dmat, buf->store.dmap,
3753 		    buf->store.vaddr, ETH_STORE_SIZE, dpaa2_ni_dmamap_cb,
3754 		    &buf->store.paddr, BUS_DMA_NOWAIT);
3755 		if (error) {
3756 			device_printf(sc->dev, "%s: failed to map channel "
3757 			    "storage\n", __func__);
3758 			return (error);
3759 		}
3760 	}
3761 
3762 	chan->store_sz = ETH_STORE_FRAMES;
3763 	chan->store_idx = 0;
3764 
3765 	return (0);
3766 }
3767 
3768 /**
3769  * @brief Build a DPAA2 frame descriptor.
3770  */
3771 static int
3772 dpaa2_ni_build_fd(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx,
3773     struct dpaa2_buf *buf, bus_dma_segment_t *txsegs, int txnsegs,
3774     struct dpaa2_fd *fd)
3775 {
3776 	struct dpaa2_sg_entry *sgt;
3777 	struct dpaa2_fa *fa;
3778 	int i, error;
3779 
3780 	KASSERT(txnsegs <= DPAA2_TX_SEGLIMIT, ("%s: too many segments, "
3781 	    "txnsegs (%d) > %d", __func__, txnsegs, DPAA2_TX_SEGLIMIT));
3782 	KASSERT(buf->type == DPAA2_BUF_TX, ("%s: not Tx buffer", __func__));
3783 	KASSERT(buf->tx.sgt_vaddr != NULL, ("%s: S/G table not allocated?",
3784 	    __func__));
3785 
3786 	/* Reset frame descriptor fields. */
3787 	memset(fd, 0, sizeof(*fd));
3788 
3789 	if (__predict_true(txnsegs <= DPAA2_TX_SEGLIMIT)) {
3790 		/* Populate S/G table. */
3791 		sgt = (struct dpaa2_sg_entry *) buf->tx.sgt_vaddr +
3792 		    sc->tx_data_off;
3793 		for (i = 0; i < txnsegs; i++) {
3794 			sgt[i].addr = (uint64_t) txsegs[i].ds_addr;
3795 			sgt[i].len = (uint32_t) txsegs[i].ds_len;
3796 			sgt[i].offset_fmt = 0u;
3797 		}
3798 		sgt[i-1].offset_fmt |= 0x8000u; /* set final entry flag */
3799 
3800 		KASSERT(buf->tx.sgt_paddr == 0, ("%s: sgt_paddr(%#jx) != 0",
3801 		    __func__, buf->tx.sgt_paddr));
3802 
3803 		/* Load S/G table. */
3804 		error = bus_dmamap_load(buf->tx.sgt_dmat, buf->tx.sgt_dmap,
3805 		    buf->tx.sgt_vaddr, DPAA2_TX_SGT_SZ, dpaa2_ni_dmamap_cb,
3806 		    &buf->tx.sgt_paddr, BUS_DMA_NOWAIT);
3807 		if (__predict_false(error != 0)) {
3808 			device_printf(sc->dev, "%s: failed to map S/G table: "
3809 			    "error=%d\n", __func__, error);
3810 			return (error);
3811 		}
3812 
3813 		buf->tx.paddr = buf->tx.sgt_paddr;
3814 		buf->tx.vaddr = buf->tx.sgt_vaddr;
3815 		sc->tx_sg_frames++; /* for sysctl(9) */
3816 	} else {
3817 		return (EINVAL);
3818 	}
3819 
3820 	fa = (struct dpaa2_fa *) buf->tx.sgt_vaddr;
3821 	fa->magic = DPAA2_MAGIC;
3822 	fa->buf = buf;
3823 	fa->tx = tx;
3824 
3825 	fd->addr = buf->tx.paddr;
3826 	fd->data_length = (uint32_t) buf->tx.m->m_pkthdr.len;
3827 	fd->bpid_ivp_bmt = 0;
3828 	fd->offset_fmt_sl = 0x2000u | sc->tx_data_off;
3829 	fd->ctrl = 0x00800000u;
3830 
3831 	return (0);
3832 }
3833 
3834 static int
3835 dpaa2_ni_fd_err(struct dpaa2_fd *fd)
3836 {
3837 	return ((fd->ctrl >> DPAA2_NI_FD_ERR_SHIFT) & DPAA2_NI_FD_ERR_MASK);
3838 }
3839 
3840 static uint32_t
3841 dpaa2_ni_fd_data_len(struct dpaa2_fd *fd)
3842 {
3843 	if (dpaa2_ni_fd_short_len(fd))
3844 		return (fd->data_length & DPAA2_NI_FD_LEN_MASK);
3845 
3846 	return (fd->data_length);
3847 }
3848 
3849 static int
3850 dpaa2_ni_fd_format(struct dpaa2_fd *fd)
3851 {
3852 	return ((enum dpaa2_fd_format)((fd->offset_fmt_sl >>
3853 	    DPAA2_NI_FD_FMT_SHIFT) & DPAA2_NI_FD_FMT_MASK));
3854 }
3855 
3856 static bool
3857 dpaa2_ni_fd_short_len(struct dpaa2_fd *fd)
3858 {
3859 	return (((fd->offset_fmt_sl >> DPAA2_NI_FD_SL_SHIFT)
3860 	    & DPAA2_NI_FD_SL_MASK) == 1);
3861 }
3862 
3863 static int
3864 dpaa2_ni_fd_offset(struct dpaa2_fd *fd)
3865 {
3866 	return (fd->offset_fmt_sl & DPAA2_NI_FD_OFFSET_MASK);
3867 }
3868 
3869 /**
3870  * @brief Collect statistics of the network interface.
3871  */
3872 static int
3873 dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS)
3874 {
3875 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3876 	struct dpni_stat *stat = &dpni_stat_sysctls[oidp->oid_number];
3877 	device_t pdev = device_get_parent(sc->dev);
3878 	device_t dev = sc->dev;
3879 	device_t child = dev;
3880 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
3881 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
3882 	struct dpaa2_cmd cmd;
3883 	uint64_t cnt[DPAA2_NI_STAT_COUNTERS];
3884 	uint64_t result = 0;
3885 	uint16_t rc_token, ni_token;
3886 	int error;
3887 
3888 	DPAA2_CMD_INIT(&cmd);
3889 
3890 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
3891 	if (error) {
3892 		device_printf(dev, "%s: failed to open resource container: "
3893 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
3894 		goto exit;
3895 	}
3896 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
3897 	if (error) {
3898 		device_printf(dev, "%s: failed to open network interface: "
3899 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
3900 		goto close_rc;
3901 	}
3902 
3903 	error = DPAA2_CMD_NI_GET_STATISTICS(dev, child, &cmd, stat->page, 0, cnt);
3904 	if (!error) {
3905 		result = cnt[stat->cnt];
3906 	}
3907 
3908 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
3909 close_rc:
3910 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
3911 exit:
3912 	return (sysctl_handle_64(oidp, &result, 0, req));
3913 }
3914 
3915 static int
3916 dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS)
3917 {
3918 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3919 	uint32_t buf_num = DPAA2_ATOMIC_READ(&sc->buf_num);
3920 
3921 	return (sysctl_handle_32(oidp, &buf_num, 0, req));
3922 }
3923 
3924 static int
3925 dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS)
3926 {
3927 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3928 	uint32_t buf_free = DPAA2_ATOMIC_READ(&sc->buf_free);
3929 
3930 	return (sysctl_handle_32(oidp, &buf_free, 0, req));
3931 }
3932 
3933 static int
3934 dpaa2_ni_set_hash(device_t dev, uint64_t flags)
3935 {
3936 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
3937 	uint64_t key = 0;
3938 	int i;
3939 
3940 	if (!(sc->attr.num.queues > 1)) {
3941 		return (EOPNOTSUPP);
3942 	}
3943 
3944 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3945 		if (dist_fields[i].rxnfc_field & flags) {
3946 			key |= dist_fields[i].id;
3947 		}
3948 	}
3949 
3950 	return (dpaa2_ni_set_dist_key(dev, DPAA2_NI_DIST_MODE_HASH, key));
3951 }
3952 
3953 /**
3954  * @brief Set Rx distribution (hash or flow classification) key flags is a
3955  * combination of RXH_ bits.
3956  */
3957 static int
3958 dpaa2_ni_set_dist_key(device_t dev, enum dpaa2_ni_dist_mode type, uint64_t flags)
3959 {
3960 	device_t pdev = device_get_parent(dev);
3961 	device_t child = dev;
3962 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
3963 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
3964 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
3965 	struct dpkg_profile_cfg cls_cfg;
3966 	struct dpkg_extract *key;
3967 	struct dpaa2_buf *buf = &sc->rxd_kcfg;
3968 	struct dpaa2_cmd cmd;
3969 	uint16_t rc_token, ni_token;
3970 	int i, error = 0;
3971 
3972 	KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not storage buffer",
3973 	    __func__));
3974 	if (__predict_true(buf->store.dmat == NULL)) {
3975 		buf->store.dmat = sc->rxd_dmat;
3976 	}
3977 
3978 	memset(&cls_cfg, 0, sizeof(cls_cfg));
3979 
3980 	/* Configure extracts according to the given flags. */
3981 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3982 		key = &cls_cfg.extracts[cls_cfg.num_extracts];
3983 
3984 		if (!(flags & dist_fields[i].id)) {
3985 			continue;
3986 		}
3987 
3988 		if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
3989 			device_printf(dev, "%s: failed to add key extraction "
3990 			    "rule\n", __func__);
3991 			return (E2BIG);
3992 		}
3993 
3994 		key->type = DPKG_EXTRACT_FROM_HDR;
3995 		key->extract.from_hdr.prot = dist_fields[i].cls_prot;
3996 		key->extract.from_hdr.type = DPKG_FULL_FIELD;
3997 		key->extract.from_hdr.field = dist_fields[i].cls_field;
3998 		cls_cfg.num_extracts++;
3999 	}
4000 
4001 	error = bus_dmamem_alloc(buf->store.dmat, &buf->store.vaddr,
4002 	    BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->store.dmap);
4003 	if (error != 0) {
4004 		device_printf(dev, "%s: failed to allocate a buffer for Rx "
4005 		    "traffic distribution key configuration\n", __func__);
4006 		return (error);
4007 	}
4008 
4009 	error = dpaa2_ni_prepare_key_cfg(&cls_cfg, (uint8_t *) buf->store.vaddr);
4010 	if (error != 0) {
4011 		device_printf(dev, "%s: failed to prepare key configuration: "
4012 		    "error=%d\n", __func__, error);
4013 		return (error);
4014 	}
4015 
4016 	/* Prepare for setting the Rx dist. */
4017 	error = bus_dmamap_load(buf->store.dmat, buf->store.dmap,
4018 	    buf->store.vaddr, DPAA2_CLASSIFIER_DMA_SIZE, dpaa2_ni_dmamap_cb,
4019 	    &buf->store.paddr, BUS_DMA_NOWAIT);
4020 	if (error != 0) {
4021 		device_printf(sc->dev, "%s: failed to map a buffer for Rx "
4022 		    "traffic distribution key configuration\n", __func__);
4023 		return (error);
4024 	}
4025 
4026 	if (type == DPAA2_NI_DIST_MODE_HASH) {
4027 		DPAA2_CMD_INIT(&cmd);
4028 
4029 		error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
4030 		    &rc_token);
4031 		if (error) {
4032 			device_printf(dev, "%s: failed to open resource "
4033 			    "container: id=%d, error=%d\n", __func__, rcinfo->id,
4034 			    error);
4035 			goto err_exit;
4036 		}
4037 		error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id,
4038 		    &ni_token);
4039 		if (error) {
4040 			device_printf(dev, "%s: failed to open network "
4041 			    "interface: id=%d, error=%d\n", __func__, dinfo->id,
4042 			    error);
4043 			goto close_rc;
4044 		}
4045 
4046 		error = DPAA2_CMD_NI_SET_RX_TC_DIST(dev, child, &cmd,
4047 		    sc->attr.num.queues, 0, DPAA2_NI_DIST_MODE_HASH,
4048 		    buf->store.paddr);
4049 		if (error != 0) {
4050 			device_printf(dev, "%s: failed to set distribution mode "
4051 			    "and size for the traffic class\n", __func__);
4052 		}
4053 
4054 		(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
4055 		    ni_token));
4056 close_rc:
4057 		(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
4058 		    rc_token));
4059 	}
4060 
4061 err_exit:
4062 	return (error);
4063 }
4064 
4065 /**
4066  * @brief Prepares extract parameters.
4067  *
4068  * cfg:		Defining a full Key Generation profile.
4069  * key_cfg_buf:	Zeroed 256 bytes of memory before mapping it to DMA.
4070  */
4071 static int
4072 dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf)
4073 {
4074 	struct dpni_ext_set_rx_tc_dist *dpni_ext;
4075 	struct dpni_dist_extract *extr;
4076 	int i, j;
4077 
4078 	if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
4079 		return (EINVAL);
4080 
4081 	dpni_ext = (struct dpni_ext_set_rx_tc_dist *) key_cfg_buf;
4082 	dpni_ext->num_extracts = cfg->num_extracts;
4083 
4084 	for (i = 0; i < cfg->num_extracts; i++) {
4085 		extr = &dpni_ext->extracts[i];
4086 
4087 		switch (cfg->extracts[i].type) {
4088 		case DPKG_EXTRACT_FROM_HDR:
4089 			extr->prot = cfg->extracts[i].extract.from_hdr.prot;
4090 			extr->efh_type =
4091 			    cfg->extracts[i].extract.from_hdr.type & 0x0Fu;
4092 			extr->size = cfg->extracts[i].extract.from_hdr.size;
4093 			extr->offset = cfg->extracts[i].extract.from_hdr.offset;
4094 			extr->field = cfg->extracts[i].extract.from_hdr.field;
4095 			extr->hdr_index =
4096 				cfg->extracts[i].extract.from_hdr.hdr_index;
4097 			break;
4098 		case DPKG_EXTRACT_FROM_DATA:
4099 			extr->size = cfg->extracts[i].extract.from_data.size;
4100 			extr->offset =
4101 				cfg->extracts[i].extract.from_data.offset;
4102 			break;
4103 		case DPKG_EXTRACT_FROM_PARSE:
4104 			extr->size = cfg->extracts[i].extract.from_parse.size;
4105 			extr->offset =
4106 				cfg->extracts[i].extract.from_parse.offset;
4107 			break;
4108 		default:
4109 			return (EINVAL);
4110 		}
4111 
4112 		extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
4113 		extr->extract_type = cfg->extracts[i].type & 0x0Fu;
4114 
4115 		for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
4116 			extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
4117 			extr->masks[j].offset =
4118 				cfg->extracts[i].masks[j].offset;
4119 		}
4120 	}
4121 
4122 	return (0);
4123 }
4124 
4125 /**
4126  * @brief Obtain the next dequeue response from the channel storage.
4127  */
4128 static int
4129 dpaa2_ni_chan_storage_next(struct dpaa2_ni_channel *chan, struct dpaa2_dq **dq)
4130 {
4131 	struct dpaa2_buf *buf = &chan->store;
4132 	struct dpaa2_dq *msgs = buf->store.vaddr;
4133 	struct dpaa2_dq *msg = &msgs[chan->store_idx];
4134 	int rc = EINPROGRESS;
4135 
4136 	chan->store_idx++;
4137 
4138 	if (msg->fdr.desc.stat & DPAA2_DQ_STAT_EXPIRED) {
4139 		rc = EALREADY; /* VDQ command is expired */
4140 		chan->store_idx = 0;
4141 		if (!(msg->fdr.desc.stat & DPAA2_DQ_STAT_VALIDFRAME))
4142 			msg = NULL; /* Null response, FD is invalid */
4143 	}
4144 	if (msg->fdr.desc.stat & DPAA2_DQ_STAT_FQEMPTY) {
4145 		rc = ENOENT; /* FQ is empty */
4146 		chan->store_idx = 0;
4147 	}
4148 
4149 	if (dq != NULL)
4150 		*dq = msg;
4151 
4152 	return (rc);
4153 }
4154 
4155 static device_method_t dpaa2_ni_methods[] = {
4156 	/* Device interface */
4157 	DEVMETHOD(device_probe,		dpaa2_ni_probe),
4158 	DEVMETHOD(device_attach,	dpaa2_ni_attach),
4159 	DEVMETHOD(device_detach,	dpaa2_ni_detach),
4160 
4161 	/* mii via memac_mdio */
4162 	DEVMETHOD(miibus_statchg,	dpaa2_ni_miibus_statchg),
4163 
4164 	DEVMETHOD_END
4165 };
4166 
4167 static driver_t dpaa2_ni_driver = {
4168 	"dpaa2_ni",
4169 	dpaa2_ni_methods,
4170 	sizeof(struct dpaa2_ni_softc),
4171 };
4172 
4173 DRIVER_MODULE(miibus, dpaa2_ni, miibus_driver, 0, 0);
4174 DRIVER_MODULE(dpaa2_ni, dpaa2_rc, dpaa2_ni_driver, 0, 0);
4175 
4176 MODULE_DEPEND(dpaa2_ni, miibus, 1, 1, 1);
4177 #ifdef DEV_ACPI
4178 MODULE_DEPEND(dpaa2_ni, memac_mdio_acpi, 1, 1, 1);
4179 #endif
4180 #ifdef FDT
4181 MODULE_DEPEND(dpaa2_ni, memac_mdio_fdt, 1, 1, 1);
4182 #endif
4183