xref: /freebsd/sys/dev/dpaa2/dpaa2_ni.c (revision 3c4ba5f55438f7afd4f4b0b56f88f2bb505fd6a6)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright © 2021-2022 Dmitry Salychev
5  * Copyright © 2022 Mathew McBride
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 /*
33  * The DPAA2 Network Interface (DPNI) driver.
34  *
35  * The DPNI object is a network interface that is configurable to support a wide
36  * range of features from a very basic Ethernet interface up to a
37  * high-functioning network interface. The DPNI supports features that are
38  * expected by standard network stacks, from basic features to offloads.
39  *
40  * DPNIs work with Ethernet traffic, starting with the L2 header. Additional
41  * functions are provided for standard network protocols (L2, L3, L4, etc.).
42  */
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/bus.h>
48 #include <sys/rman.h>
49 #include <sys/module.h>
50 #include <sys/malloc.h>
51 #include <sys/mutex.h>
52 #include <sys/socket.h>
53 #include <sys/sockio.h>
54 #include <sys/sysctl.h>
55 #include <sys/mbuf.h>
56 #include <sys/taskqueue.h>
57 #include <sys/sysctl.h>
58 #include <sys/buf_ring.h>
59 #include <sys/smp.h>
60 #include <sys/proc.h>
61 
62 #include <vm/vm.h>
63 #include <vm/pmap.h>
64 
65 #include <machine/bus.h>
66 #include <machine/resource.h>
67 #include <machine/atomic.h>
68 
69 #include <net/ethernet.h>
70 #include <net/bpf.h>
71 #include <net/if.h>
72 #include <net/if_dl.h>
73 #include <net/if_media.h>
74 #include <net/if_types.h>
75 #include <net/if_var.h>
76 
77 #include <dev/pci/pcivar.h>
78 #include <dev/mii/mii.h>
79 #include <dev/mii/miivar.h>
80 #include <dev/mdio/mdio.h>
81 
82 #include "opt_acpi.h"
83 #include "opt_platform.h"
84 
85 #include "pcib_if.h"
86 #include "pci_if.h"
87 #include "miibus_if.h"
88 #include "memac_mdio_if.h"
89 
90 #include "dpaa2_types.h"
91 #include "dpaa2_mc.h"
92 #include "dpaa2_mc_if.h"
93 #include "dpaa2_mcp.h"
94 #include "dpaa2_swp.h"
95 #include "dpaa2_swp_if.h"
96 #include "dpaa2_cmd_if.h"
97 #include "dpaa2_ni.h"
98 
99 #define BIT(x)			(1ul << (x))
100 #define WRIOP_VERSION(x, y, z)	((x) << 10 | (y) << 5 | (z) << 0)
101 #define ARRAY_SIZE(a)		(sizeof(a) / sizeof((a)[0]))
102 
103 /* Frame Dequeue Response status bits. */
104 #define IS_NULL_RESPONSE(stat)	((((stat) >> 4) & 1) == 0)
105 
106 #define	ALIGN_UP(x, y)		roundup2((x), (y))
107 #define	ALIGN_DOWN(x, y)	rounddown2((x), (y))
108 #define CACHE_LINE_ALIGN(x)	ALIGN_UP((x), CACHE_LINE_SIZE)
109 
110 #define DPNI_LOCK(__sc) do {			\
111 	mtx_assert(&(__sc)->lock, MA_NOTOWNED);	\
112 	mtx_lock(&(__sc)->lock);		\
113 } while (0)
114 #define	DPNI_UNLOCK(__sc) do {			\
115 	mtx_assert(&(__sc)->lock, MA_OWNED);	\
116 	mtx_unlock(&(__sc)->lock);		\
117 } while (0)
118 
119 #define TX_LOCK(__tx) do {			\
120 	mtx_assert(&(__tx)->lock, MA_NOTOWNED);	\
121 	mtx_lock(&(__tx)->lock);		\
122 } while (0)
123 #define	TX_UNLOCK(__tx) do {			\
124 	mtx_assert(&(__tx)->lock, MA_OWNED);	\
125 	mtx_unlock(&(__tx)->lock);		\
126 } while (0)
127 
128 #define DPAA2_TX_RING(sc, chan, tc)				\
129 	(&(sc)->channels[(chan)]->txc_queue.tx_rings[(tc)])
130 
131 #define DPNI_IRQ_INDEX		0 /* Index of the only DPNI IRQ. */
132 #define DPNI_IRQ_LINK_CHANGED	1 /* Link state changed */
133 #define DPNI_IRQ_EP_CHANGED	2 /* DPAA2 endpoint dis/connected */
134 
135 /* Default maximum frame length. */
136 #define DPAA2_ETH_MFL		(ETHER_MAX_LEN - ETHER_CRC_LEN)
137 
138 /* Minimally supported version of the DPNI API. */
139 #define DPNI_VER_MAJOR		7
140 #define DPNI_VER_MINOR		0
141 
142 /* Rx/Tx buffers configuration. */
143 #define BUF_ALIGN_V1		256 /* WRIOP v1.0.0 limitation */
144 #define BUF_ALIGN		64
145 #define BUF_SWA_SIZE		64  /* SW annotation size */
146 #define BUF_RX_HWA_SIZE		64  /* HW annotation size */
147 #define BUF_TX_HWA_SIZE		128 /* HW annotation size */
148 #define BUF_SIZE		(MJUM9BYTES)
149 #define	BUF_MAXADDR_49BIT	0x1FFFFFFFFFFFFul
150 #define	BUF_MAXADDR		(BUS_SPACE_MAXADDR)
151 
152 #define DPAA2_TX_BUFRING_SZ	(4096u)
153 #define DPAA2_TX_SEGLIMIT	(16u) /* arbitrary number */
154 #define DPAA2_TX_SEG_SZ		(4096u)
155 #define DPAA2_TX_SEGS_MAXSZ	(DPAA2_TX_SEGLIMIT * DPAA2_TX_SEG_SZ)
156 #define DPAA2_TX_SGT_SZ		(512u) /* bytes */
157 
158 /* Size of a buffer to keep a QoS table key configuration. */
159 #define ETH_QOS_KCFG_BUF_SIZE	256
160 
161 /* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */
162 #define DPAA2_CLASSIFIER_DMA_SIZE 256
163 
164 /* Channel storage buffer configuration. */
165 #define ETH_STORE_FRAMES	16u
166 #define ETH_STORE_SIZE		((ETH_STORE_FRAMES + 1) * sizeof(struct dpaa2_dq))
167 #define ETH_STORE_ALIGN		64u
168 
169 /* Buffers layout options. */
170 #define BUF_LOPT_TIMESTAMP	0x1
171 #define BUF_LOPT_PARSER_RESULT	0x2
172 #define BUF_LOPT_FRAME_STATUS	0x4
173 #define BUF_LOPT_PRIV_DATA_SZ	0x8
174 #define BUF_LOPT_DATA_ALIGN	0x10
175 #define BUF_LOPT_DATA_HEAD_ROOM	0x20
176 #define BUF_LOPT_DATA_TAIL_ROOM	0x40
177 
178 #define DPAA2_NI_BUF_ADDR_MASK	(0x1FFFFFFFFFFFFul) /* 49-bit addresses max. */
179 #define DPAA2_NI_BUF_CHAN_MASK	(0xFu)
180 #define DPAA2_NI_BUF_CHAN_SHIFT	(60)
181 #define DPAA2_NI_BUF_IDX_MASK	(0x7FFFu)
182 #define DPAA2_NI_BUF_IDX_SHIFT	(49)
183 #define DPAA2_NI_TX_IDX_MASK	(0x7u)
184 #define DPAA2_NI_TX_IDX_SHIFT	(57)
185 #define DPAA2_NI_TXBUF_IDX_MASK	(0xFFu)
186 #define DPAA2_NI_TXBUF_IDX_SHIFT (49)
187 
188 #define DPAA2_NI_FD_FMT_MASK	(0x3u)
189 #define DPAA2_NI_FD_FMT_SHIFT	(12)
190 #define DPAA2_NI_FD_ERR_MASK	(0xFFu)
191 #define DPAA2_NI_FD_ERR_SHIFT	(0)
192 #define DPAA2_NI_FD_SL_MASK	(0x1u)
193 #define DPAA2_NI_FD_SL_SHIFT	(14)
194 #define DPAA2_NI_FD_LEN_MASK	(0x3FFFFu)
195 #define DPAA2_NI_FD_OFFSET_MASK (0x0FFFu)
196 
197 /* Enables TCAM for Flow Steering and QoS look-ups. */
198 #define DPNI_OPT_HAS_KEY_MASKING 0x10
199 
200 /* Unique IDs for the supported Rx classification header fields. */
201 #define DPAA2_ETH_DIST_ETHDST	BIT(0)
202 #define DPAA2_ETH_DIST_ETHSRC	BIT(1)
203 #define DPAA2_ETH_DIST_ETHTYPE	BIT(2)
204 #define DPAA2_ETH_DIST_VLAN	BIT(3)
205 #define DPAA2_ETH_DIST_IPSRC	BIT(4)
206 #define DPAA2_ETH_DIST_IPDST	BIT(5)
207 #define DPAA2_ETH_DIST_IPPROTO	BIT(6)
208 #define DPAA2_ETH_DIST_L4SRC	BIT(7)
209 #define DPAA2_ETH_DIST_L4DST	BIT(8)
210 #define DPAA2_ETH_DIST_ALL	(~0ULL)
211 
212 /* L3-L4 network traffic flow hash options. */
213 #define	RXH_L2DA		(1 << 1)
214 #define	RXH_VLAN		(1 << 2)
215 #define	RXH_L3_PROTO		(1 << 3)
216 #define	RXH_IP_SRC		(1 << 4)
217 #define	RXH_IP_DST		(1 << 5)
218 #define	RXH_L4_B_0_1		(1 << 6) /* src port in case of TCP/UDP/SCTP */
219 #define	RXH_L4_B_2_3		(1 << 7) /* dst port in case of TCP/UDP/SCTP */
220 #define	RXH_DISCARD		(1 << 31)
221 
222 /* Default Rx hash options, set during attaching. */
223 #define DPAA2_RXH_DEFAULT	(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
224 
225 MALLOC_DEFINE(M_DPAA2_NI, "dpaa2_ni", "DPAA2 Network Interface");
226 
227 /* DPAA2 Network Interface resource specification. */
228 struct resource_spec dpaa2_ni_spec[] = {
229 	/*
230 	 * DPMCP resources.
231 	 *
232 	 * NOTE: MC command portals (MCPs) are used to send commands to, and
233 	 *	 receive responses from, the MC firmware. One portal per DPNI.
234 	 */
235 #define MCP_RES_NUM	(1u)
236 #define MCP_RID_OFF	(0u)
237 #define MCP_RID(rid)	((rid) + MCP_RID_OFF)
238 	/* --- */
239 	{ DPAA2_DEV_MCP, MCP_RID(0),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
240 	/*
241 	 * DPIO resources (software portals).
242 	 *
243 	 * NOTE: One per running core. While DPIOs are the source of data
244 	 *	 availability interrupts, the DPCONs are used to identify the
245 	 *	 network interface that has produced ingress data to that core.
246 	 */
247 #define IO_RES_NUM	(16u)
248 #define IO_RID_OFF	(MCP_RID_OFF + MCP_RES_NUM)
249 #define IO_RID(rid)	((rid) + IO_RID_OFF)
250 	/* --- */
251 	{ DPAA2_DEV_IO,  IO_RID(0),    RF_ACTIVE | RF_SHAREABLE },
252 	{ DPAA2_DEV_IO,  IO_RID(1),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
253 	{ DPAA2_DEV_IO,  IO_RID(2),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
254 	{ DPAA2_DEV_IO,  IO_RID(3),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
255 	{ DPAA2_DEV_IO,  IO_RID(4),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
256 	{ DPAA2_DEV_IO,  IO_RID(5),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
257 	{ DPAA2_DEV_IO,  IO_RID(6),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
258 	{ DPAA2_DEV_IO,  IO_RID(7),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
259 	{ DPAA2_DEV_IO,  IO_RID(8),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
260 	{ DPAA2_DEV_IO,  IO_RID(9),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
261 	{ DPAA2_DEV_IO,  IO_RID(10),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
262 	{ DPAA2_DEV_IO,  IO_RID(11),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
263 	{ DPAA2_DEV_IO,  IO_RID(12),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
264 	{ DPAA2_DEV_IO,  IO_RID(13),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
265 	{ DPAA2_DEV_IO,  IO_RID(14),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
266 	{ DPAA2_DEV_IO,  IO_RID(15),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
267 	/*
268 	 * DPBP resources (buffer pools).
269 	 *
270 	 * NOTE: One per network interface.
271 	 */
272 #define BP_RES_NUM	(1u)
273 #define BP_RID_OFF	(IO_RID_OFF + IO_RES_NUM)
274 #define BP_RID(rid)	((rid) + BP_RID_OFF)
275 	/* --- */
276 	{ DPAA2_DEV_BP,  BP_RID(0),   RF_ACTIVE },
277 	/*
278 	 * DPCON resources (channels).
279 	 *
280 	 * NOTE: One DPCON per core where Rx or Tx confirmation traffic to be
281 	 *	 distributed to.
282 	 * NOTE: Since it is necessary to distinguish between traffic from
283 	 *	 different network interfaces arriving on the same core, the
284 	 *	 DPCONs must be private to the DPNIs.
285 	 */
286 #define CON_RES_NUM	(16u)
287 #define CON_RID_OFF	(BP_RID_OFF + BP_RES_NUM)
288 #define CON_RID(rid)	((rid) + CON_RID_OFF)
289 	/* --- */
290 	{ DPAA2_DEV_CON, CON_RID(0),   RF_ACTIVE },
291 	{ DPAA2_DEV_CON, CON_RID(1),   RF_ACTIVE | RF_OPTIONAL },
292 	{ DPAA2_DEV_CON, CON_RID(2),   RF_ACTIVE | RF_OPTIONAL },
293  	{ DPAA2_DEV_CON, CON_RID(3),   RF_ACTIVE | RF_OPTIONAL },
294  	{ DPAA2_DEV_CON, CON_RID(4),   RF_ACTIVE | RF_OPTIONAL },
295  	{ DPAA2_DEV_CON, CON_RID(5),   RF_ACTIVE | RF_OPTIONAL },
296  	{ DPAA2_DEV_CON, CON_RID(6),   RF_ACTIVE | RF_OPTIONAL },
297  	{ DPAA2_DEV_CON, CON_RID(7),   RF_ACTIVE | RF_OPTIONAL },
298  	{ DPAA2_DEV_CON, CON_RID(8),   RF_ACTIVE | RF_OPTIONAL },
299  	{ DPAA2_DEV_CON, CON_RID(9),   RF_ACTIVE | RF_OPTIONAL },
300  	{ DPAA2_DEV_CON, CON_RID(10),  RF_ACTIVE | RF_OPTIONAL },
301  	{ DPAA2_DEV_CON, CON_RID(11),  RF_ACTIVE | RF_OPTIONAL },
302  	{ DPAA2_DEV_CON, CON_RID(12),  RF_ACTIVE | RF_OPTIONAL },
303  	{ DPAA2_DEV_CON, CON_RID(13),  RF_ACTIVE | RF_OPTIONAL },
304  	{ DPAA2_DEV_CON, CON_RID(14),  RF_ACTIVE | RF_OPTIONAL },
305  	{ DPAA2_DEV_CON, CON_RID(15),  RF_ACTIVE | RF_OPTIONAL },
306 	/* --- */
307 	RESOURCE_SPEC_END
308 };
309 
310 /* Supported header fields for Rx hash distribution key */
311 static const struct dpaa2_eth_dist_fields dist_fields[] = {
312 	{
313 		/* L2 header */
314 		.rxnfc_field = RXH_L2DA,
315 		.cls_prot = NET_PROT_ETH,
316 		.cls_field = NH_FLD_ETH_DA,
317 		.id = DPAA2_ETH_DIST_ETHDST,
318 		.size = 6,
319 	}, {
320 		.cls_prot = NET_PROT_ETH,
321 		.cls_field = NH_FLD_ETH_SA,
322 		.id = DPAA2_ETH_DIST_ETHSRC,
323 		.size = 6,
324 	}, {
325 		/* This is the last ethertype field parsed:
326 		 * depending on frame format, it can be the MAC ethertype
327 		 * or the VLAN etype.
328 		 */
329 		.cls_prot = NET_PROT_ETH,
330 		.cls_field = NH_FLD_ETH_TYPE,
331 		.id = DPAA2_ETH_DIST_ETHTYPE,
332 		.size = 2,
333 	}, {
334 		/* VLAN header */
335 		.rxnfc_field = RXH_VLAN,
336 		.cls_prot = NET_PROT_VLAN,
337 		.cls_field = NH_FLD_VLAN_TCI,
338 		.id = DPAA2_ETH_DIST_VLAN,
339 		.size = 2,
340 	}, {
341 		/* IP header */
342 		.rxnfc_field = RXH_IP_SRC,
343 		.cls_prot = NET_PROT_IP,
344 		.cls_field = NH_FLD_IP_SRC,
345 		.id = DPAA2_ETH_DIST_IPSRC,
346 		.size = 4,
347 	}, {
348 		.rxnfc_field = RXH_IP_DST,
349 		.cls_prot = NET_PROT_IP,
350 		.cls_field = NH_FLD_IP_DST,
351 		.id = DPAA2_ETH_DIST_IPDST,
352 		.size = 4,
353 	}, {
354 		.rxnfc_field = RXH_L3_PROTO,
355 		.cls_prot = NET_PROT_IP,
356 		.cls_field = NH_FLD_IP_PROTO,
357 		.id = DPAA2_ETH_DIST_IPPROTO,
358 		.size = 1,
359 	}, {
360 		/* Using UDP ports, this is functionally equivalent to raw
361 		 * byte pairs from L4 header.
362 		 */
363 		.rxnfc_field = RXH_L4_B_0_1,
364 		.cls_prot = NET_PROT_UDP,
365 		.cls_field = NH_FLD_UDP_PORT_SRC,
366 		.id = DPAA2_ETH_DIST_L4SRC,
367 		.size = 2,
368 	}, {
369 		.rxnfc_field = RXH_L4_B_2_3,
370 		.cls_prot = NET_PROT_UDP,
371 		.cls_field = NH_FLD_UDP_PORT_DST,
372 		.id = DPAA2_ETH_DIST_L4DST,
373 		.size = 2,
374 	},
375 };
376 
377 static struct dpni_stat {
378 	int	 page;
379 	int	 cnt;
380 	char	*name;
381 	char	*desc;
382 } dpni_stat_sysctls[DPAA2_NI_STAT_SYSCTLS] = {
383 	/* PAGE, COUNTER, NAME, DESCRIPTION */
384 	{  0, 0, "in_all_frames",	"All accepted ingress frames" },
385 	{  0, 1, "in_all_bytes",	"Bytes in all accepted ingress frames" },
386 	{  0, 2, "in_multi_frames",	"Multicast accepted ingress frames" },
387 	{  1, 0, "eg_all_frames",	"All egress frames transmitted" },
388 	{  1, 1, "eg_all_bytes",	"Bytes in all frames transmitted" },
389 	{  1, 2, "eg_multi_frames",	"Multicast egress frames transmitted" },
390 	{  2, 0, "in_filtered_frames",	"All ingress frames discarded due to "
391 	   				"filtering" },
392 	{  2, 1, "in_discarded_frames",	"All frames discarded due to errors" },
393 	{  2, 2, "in_nobuf_discards",	"Discards on ingress side due to buffer "
394 	   				"depletion in DPNI buffer pools" },
395 };
396 
397 /* Device interface */
398 static int dpaa2_ni_probe(device_t);
399 static int dpaa2_ni_attach(device_t);
400 static int dpaa2_ni_detach(device_t);
401 
402 /* DPAA2 network interface setup and configuration */
403 static int dpaa2_ni_setup(device_t);
404 static int dpaa2_ni_setup_channels(device_t);
405 static int dpaa2_ni_setup_fq(device_t, struct dpaa2_ni_channel *,
406     enum dpaa2_ni_queue_type);
407 static int dpaa2_ni_bind(device_t);
408 static int dpaa2_ni_setup_rx_dist(device_t);
409 static int dpaa2_ni_setup_irqs(device_t);
410 static int dpaa2_ni_setup_msi(struct dpaa2_ni_softc *);
411 static int dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *);
412 static int dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *);
413 static int dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *);
414 static int dpaa2_ni_setup_dma(struct dpaa2_ni_softc *);
415 
416 /* Tx/Rx flow configuration */
417 static int dpaa2_ni_setup_rx_flow(device_t, struct dpaa2_ni_fq *);
418 static int dpaa2_ni_setup_tx_flow(device_t, struct dpaa2_ni_fq *);
419 static int dpaa2_ni_setup_rx_err_flow(device_t, struct dpaa2_ni_fq *);
420 
421 /* Configuration subroutines */
422 static int dpaa2_ni_set_buf_layout(device_t);
423 static int dpaa2_ni_set_pause_frame(device_t);
424 static int dpaa2_ni_set_qos_table(device_t);
425 static int dpaa2_ni_set_mac_addr(device_t);
426 static int dpaa2_ni_set_hash(device_t, uint64_t);
427 static int dpaa2_ni_set_dist_key(device_t, enum dpaa2_ni_dist_mode, uint64_t);
428 
429 /* Buffers and buffer pools */
430 static int dpaa2_ni_seed_buf_pool(struct dpaa2_ni_softc *, uint32_t);
431 static int dpaa2_ni_seed_rxbuf(struct dpaa2_ni_softc *, struct dpaa2_buf *, int);
432 static int dpaa2_ni_seed_txbuf(struct dpaa2_ni_softc *, struct dpaa2_buf *, int);
433 static int dpaa2_ni_seed_chan_storage(struct dpaa2_ni_softc *,
434     struct dpaa2_ni_channel *);
435 
436 /* Frame descriptor routines */
437 static int dpaa2_ni_build_fd(struct dpaa2_ni_softc *, struct dpaa2_ni_tx_ring *,
438     struct dpaa2_buf *, bus_dma_segment_t *, int, struct dpaa2_fd *);
439 static int dpaa2_ni_fd_err(struct dpaa2_fd *);
440 static uint32_t dpaa2_ni_fd_data_len(struct dpaa2_fd *);
441 static int dpaa2_ni_fd_chan_idx(struct dpaa2_fd *);
442 static int dpaa2_ni_fd_buf_idx(struct dpaa2_fd *);
443 static int dpaa2_ni_fd_tx_idx(struct dpaa2_fd *);
444 static int dpaa2_ni_fd_txbuf_idx(struct dpaa2_fd *);
445 static int dpaa2_ni_fd_format(struct dpaa2_fd *);
446 static bool dpaa2_ni_fd_short_len(struct dpaa2_fd *);
447 static int dpaa2_ni_fd_offset(struct dpaa2_fd *);
448 
449 /* Various subroutines */
450 static int dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *, uint16_t, uint16_t);
451 static int dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *, uint8_t *);
452 static int dpaa2_ni_chan_storage_next(struct dpaa2_ni_channel *,
453     struct dpaa2_dq **);
454 
455 /* Network interface routines */
456 static void dpaa2_ni_init(void *);
457 static int  dpaa2_ni_transmit(if_t , struct mbuf *);
458 static void dpaa2_ni_qflush(if_t );
459 static int  dpaa2_ni_ioctl(if_t , u_long, caddr_t);
460 static int  dpaa2_ni_update_mac_filters(if_t );
461 static u_int dpaa2_ni_add_maddr(void *, struct sockaddr_dl *, u_int);
462 
463 /* Interrupt handlers */
464 static void dpaa2_ni_intr(void *);
465 
466 /* MII handlers */
467 static void dpaa2_ni_miibus_statchg(device_t);
468 static int  dpaa2_ni_media_change(if_t );
469 static void dpaa2_ni_media_status(if_t , struct ifmediareq *);
470 static void dpaa2_ni_media_tick(void *);
471 
472 /* DMA mapping callback */
473 static void dpaa2_ni_dmamap_cb(void *, bus_dma_segment_t *, int, int);
474 
475 /* Tx/Rx routines. */
476 static void dpaa2_ni_poll(void *);
477 static void dpaa2_ni_tx_locked(struct dpaa2_ni_softc *,
478     struct dpaa2_ni_tx_ring *, struct mbuf *);
479 static void dpaa2_ni_bp_task(void *, int);
480 
481 /* Tx/Rx subroutines */
482 static int  dpaa2_ni_consume_frames(struct dpaa2_ni_channel *,
483     struct dpaa2_ni_fq **, uint32_t *);
484 static int  dpaa2_ni_rx(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *,
485     struct dpaa2_fd *);
486 static int  dpaa2_ni_rx_err(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *,
487     struct dpaa2_fd *);
488 static int  dpaa2_ni_tx_conf(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *,
489     struct dpaa2_fd *);
490 
491 /* sysctl(9) */
492 static int dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS);
493 static int dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS);
494 static int dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS);
495 
496 static int
497 dpaa2_ni_probe(device_t dev)
498 {
499 	/* DPNI device will be added by a parent resource container itself. */
500 	device_set_desc(dev, "DPAA2 Network Interface");
501 	return (BUS_PROBE_DEFAULT);
502 }
503 
504 static int
505 dpaa2_ni_attach(device_t dev)
506 {
507 	device_t pdev = device_get_parent(dev);
508 	device_t child = dev;
509 	device_t mcp_dev;
510 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
511 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
512 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
513 	struct dpaa2_devinfo *mcp_dinfo;
514 	struct dpaa2_cmd cmd;
515 	uint16_t rc_token, ni_token;
516 	if_t ifp;
517 	char tq_name[32];
518 	int error;
519 
520 	sc->dev = dev;
521 	sc->ifp = NULL;
522 	sc->miibus = NULL;
523 	sc->mii = NULL;
524 	sc->media_status = 0;
525 	sc->if_flags = 0;
526 	sc->link_state = LINK_STATE_UNKNOWN;
527 	sc->buf_align = 0;
528 
529 	/* For debug purposes only! */
530 	sc->rx_anomaly_frames = 0;
531 	sc->rx_single_buf_frames = 0;
532 	sc->rx_sg_buf_frames = 0;
533 	sc->rx_enq_rej_frames = 0;
534 	sc->rx_ieoi_err_frames = 0;
535 	sc->tx_single_buf_frames = 0;
536 	sc->tx_sg_frames = 0;
537 
538 	DPAA2_ATOMIC_XCHG(&sc->buf_num, 0);
539 	DPAA2_ATOMIC_XCHG(&sc->buf_free, 0);
540 
541 	sc->bp_dmat = NULL;
542 	sc->st_dmat = NULL;
543 	sc->rxd_dmat = NULL;
544 	sc->qos_dmat = NULL;
545 
546 	sc->qos_kcfg.type = DPAA2_BUF_STORE;
547 	sc->qos_kcfg.store.dmap = NULL;
548 	sc->qos_kcfg.store.paddr = 0;
549 	sc->qos_kcfg.store.vaddr = NULL;
550 
551 	sc->rxd_kcfg.type = DPAA2_BUF_STORE;
552 	sc->rxd_kcfg.store.dmap = NULL;
553 	sc->rxd_kcfg.store.paddr = 0;
554 	sc->rxd_kcfg.store.vaddr = NULL;
555 
556 	sc->mac.dpmac_id = 0;
557 	sc->mac.phy_dev = NULL;
558 	memset(sc->mac.addr, 0, ETHER_ADDR_LEN);
559 
560 	error = bus_alloc_resources(sc->dev, dpaa2_ni_spec, sc->res);
561 	if (error) {
562 		device_printf(dev, "%s: failed to allocate resources: "
563 		    "error=%d\n", __func__, error);
564 		goto err_exit;
565 	}
566 
567 	/* Obtain MC portal. */
568 	mcp_dev = (device_t) rman_get_start(sc->res[MCP_RID(0)]);
569 	mcp_dinfo = device_get_ivars(mcp_dev);
570 	dinfo->portal = mcp_dinfo->portal;
571 
572 	mtx_init(&sc->lock, device_get_nameunit(dev), "dpaa2_ni", MTX_DEF);
573 
574 	/* Allocate network interface */
575 	ifp = if_alloc(IFT_ETHER);
576 	if (ifp == NULL) {
577 		device_printf(dev, "%s: failed to allocate network interface\n",
578 		    __func__);
579 		goto err_exit;
580 	}
581 	sc->ifp = ifp;
582 	if_initname(ifp, DPAA2_NI_IFNAME, device_get_unit(sc->dev));
583 
584 	if_setsoftc(ifp, sc);
585 	if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST);
586 	if_setinitfn(ifp, dpaa2_ni_init);
587 	if_setioctlfn(ifp, dpaa2_ni_ioctl);
588 	if_settransmitfn(ifp, dpaa2_ni_transmit);
589 	if_setqflushfn(ifp, dpaa2_ni_qflush);
590 
591 	if_setcapabilities(ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU);
592 	if_setcapenable(ifp, if_getcapabilities(ifp));
593 
594 	DPAA2_CMD_INIT(&cmd);
595 
596 	/* Open resource container and network interface object. */
597 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
598 	if (error) {
599 		device_printf(dev, "%s: failed to open resource container: "
600 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
601 		goto err_exit;
602 	}
603 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
604 	if (error) {
605 		device_printf(dev, "%s: failed to open network interface: "
606 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
607 		goto close_rc;
608 	}
609 
610 	/*
611 	 * XXX-DSL: Release new buffers on Buffer Pool State Change Notification
612 	 *          (BPSCN) returned as a result to the VDQ command instead.
613 	 *          It is similar to CDAN processed in dpaa2_io_intr().
614 	 */
615 	/* Create a taskqueue thread to release new buffers to the pool. */
616 	TASK_INIT(&sc->bp_task, 0, dpaa2_ni_bp_task, sc);
617 	bzero(tq_name, sizeof (tq_name));
618 	snprintf(tq_name, sizeof (tq_name), "%s_tqbp",
619 	    device_get_nameunit(dev));
620 	sc->bp_taskq = taskqueue_create(tq_name, M_WAITOK,
621 	    taskqueue_thread_enqueue, &sc->bp_taskq);
622 	if (sc->bp_taskq == NULL) {
623 		device_printf(dev, "%s: failed to allocate task queue: %s\n",
624 		    __func__, tq_name);
625 		goto close_ni;
626 	}
627 	taskqueue_start_threads(&sc->bp_taskq, 1, PI_NET, "%s", tq_name);
628 
629 	error = dpaa2_ni_setup(dev);
630 	if (error) {
631 		device_printf(dev, "%s: failed to setup DPNI: error=%d\n",
632 		    __func__, error);
633 		goto close_ni;
634 	}
635 	error = dpaa2_ni_setup_channels(dev);
636 	if (error) {
637 		device_printf(dev, "%s: failed to setup QBMan channels: "
638 		    "error=%d\n", __func__, error);
639 		goto close_ni;
640 	}
641 
642 	error = dpaa2_ni_bind(dev);
643 	if (error) {
644 		device_printf(dev, "%s: failed to bind DPNI: error=%d\n",
645 		    __func__, error);
646 		goto close_ni;
647 	}
648 	error = dpaa2_ni_setup_irqs(dev);
649 	if (error) {
650 		device_printf(dev, "%s: failed to setup IRQs: error=%d\n",
651 		    __func__, error);
652 		goto close_ni;
653 	}
654 	error = dpaa2_ni_setup_sysctls(sc);
655 	if (error) {
656 		device_printf(dev, "%s: failed to setup sysctls: error=%d\n",
657 		    __func__, error);
658 		goto close_ni;
659 	}
660 
661 	ether_ifattach(sc->ifp, sc->mac.addr);
662 	callout_init(&sc->mii_callout, 0);
663 
664 	return (0);
665 
666 close_ni:
667 	DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
668 close_rc:
669 	DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
670 err_exit:
671 	return (ENXIO);
672 }
673 
674 static void
675 dpaa2_ni_fixed_media_status(if_t ifp, struct ifmediareq* ifmr)
676 {
677 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
678 
679 	DPNI_LOCK(sc);
680 	ifmr->ifm_count = 0;
681 	ifmr->ifm_mask = 0;
682 	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
683 	ifmr->ifm_current = ifmr->ifm_active =
684 	    sc->fixed_ifmedia.ifm_cur->ifm_media;
685 
686 	/*
687 	 * In non-PHY usecases, we need to signal link state up, otherwise
688 	 * certain things requiring a link event (e.g async DHCP client) from
689 	 * devd do not happen.
690 	 */
691 	if (if_getlinkstate(ifp) == LINK_STATE_UNKNOWN) {
692 		if_link_state_change(ifp, LINK_STATE_UP);
693 	}
694 
695 	/*
696 	 * TODO: Check the status of the link partner (DPMAC, DPNI or other) and
697 	 * reset if down. This is different to the DPAA2_MAC_LINK_TYPE_PHY as
698 	 * the MC firmware sets the status, instead of us telling the MC what
699 	 * it is.
700 	 */
701 	DPNI_UNLOCK(sc);
702 
703 	return;
704 }
705 
706 static void
707 dpaa2_ni_setup_fixed_link(struct dpaa2_ni_softc *sc)
708 {
709 	/*
710 	 * FIXME: When the DPNI is connected to a DPMAC, we can get the
711 	 * 'apparent' speed from it.
712 	 */
713 	sc->fixed_link = true;
714 
715 	ifmedia_init(&sc->fixed_ifmedia, 0, dpaa2_ni_media_change,
716 		     dpaa2_ni_fixed_media_status);
717 	ifmedia_add(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL);
718 	ifmedia_set(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T);
719 }
720 
721 static int
722 dpaa2_ni_detach(device_t dev)
723 {
724 	/* TBD */
725 	return (0);
726 }
727 
728 /**
729  * @brief Configure DPAA2 network interface object.
730  */
731 static int
732 dpaa2_ni_setup(device_t dev)
733 {
734 	device_t pdev = device_get_parent(dev);
735 	device_t child = dev;
736 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
737 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
738 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
739 	struct dpaa2_ep_desc ep1_desc, ep2_desc; /* endpoint descriptors */
740 	struct dpaa2_cmd cmd;
741 	uint8_t eth_bca[ETHER_ADDR_LEN]; /* broadcast physical address */
742 	uint16_t rc_token, ni_token, mac_token;
743 	struct dpaa2_mac_attr attr;
744 	enum dpaa2_mac_link_type link_type;
745 	uint32_t link;
746 	int error;
747 
748 	DPAA2_CMD_INIT(&cmd);
749 
750 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
751 	if (error) {
752 		device_printf(dev, "%s: failed to open resource container: "
753 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
754 		goto err_exit;
755 	}
756 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
757 	if (error) {
758 		device_printf(dev, "%s: failed to open network interface: "
759 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
760 		goto close_rc;
761 	}
762 
763 	/* Check if we can work with this DPNI object. */
764 	error = DPAA2_CMD_NI_GET_API_VERSION(dev, child, &cmd, &sc->api_major,
765 	    &sc->api_minor);
766 	if (error) {
767 		device_printf(dev, "%s: failed to get DPNI API version\n",
768 		    __func__);
769 		goto close_ni;
770 	}
771 	if (dpaa2_ni_cmp_api_version(sc, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
772 		device_printf(dev, "%s: DPNI API version %u.%u not supported, "
773 		    "need >= %u.%u\n", __func__, sc->api_major, sc->api_minor,
774 		    DPNI_VER_MAJOR, DPNI_VER_MINOR);
775 		error = ENODEV;
776 		goto close_ni;
777 	}
778 
779 	/* Reset the DPNI object. */
780 	error = DPAA2_CMD_NI_RESET(dev, child, &cmd);
781 	if (error) {
782 		device_printf(dev, "%s: failed to reset DPNI: id=%d\n",
783 		    __func__, dinfo->id);
784 		goto close_ni;
785 	}
786 
787 	/* Obtain attributes of the DPNI object. */
788 	error = DPAA2_CMD_NI_GET_ATTRIBUTES(dev, child, &cmd, &sc->attr);
789 	if (error) {
790 		device_printf(dev, "%s: failed to obtain DPNI attributes: "
791 		    "id=%d\n", __func__, dinfo->id);
792 		goto close_ni;
793 	}
794 	if (bootverbose) {
795 		device_printf(dev, "\toptions=0x%#x queues=%d tx_channels=%d "
796 		    "wriop_version=%#x\n", sc->attr.options, sc->attr.num.queues,
797 		    sc->attr.num.channels, sc->attr.wriop_ver);
798 		device_printf(dev, "\ttraffic classes: rx=%d tx=%d "
799 		    "cgs_groups=%d\n", sc->attr.num.rx_tcs, sc->attr.num.tx_tcs,
800 		    sc->attr.num.cgs);
801 		device_printf(dev, "\ttable entries: mac=%d vlan=%d qos=%d "
802 		    "fs=%d\n", sc->attr.entries.mac, sc->attr.entries.vlan,
803 		    sc->attr.entries.qos, sc->attr.entries.fs);
804 		device_printf(dev, "\tkey sizes: qos=%d fs=%d\n",
805 		    sc->attr.key_size.qos, sc->attr.key_size.fs);
806 	}
807 
808 	/* Configure buffer layouts of the DPNI queues. */
809 	error = dpaa2_ni_set_buf_layout(dev);
810 	if (error) {
811 		device_printf(dev, "%s: failed to configure buffer layout\n",
812 		    __func__);
813 		goto close_ni;
814 	}
815 
816 	/* Configure DMA resources. */
817 	error = dpaa2_ni_setup_dma(sc);
818 	if (error) {
819 		device_printf(dev, "%s: failed to setup DMA\n", __func__);
820 		goto close_ni;
821 	}
822 
823 	/* Setup link between DPNI and an object it's connected to. */
824 	ep1_desc.obj_id = dinfo->id;
825 	ep1_desc.if_id = 0; /* DPNI has the only endpoint */
826 	ep1_desc.type = dinfo->dtype;
827 
828 	error = DPAA2_CMD_RC_GET_CONN(dev, child, DPAA2_CMD_TK(&cmd, rc_token),
829 	    &ep1_desc, &ep2_desc, &link);
830 	if (error) {
831 		device_printf(dev, "%s: failed to obtain an object DPNI is "
832 		    "connected to: error=%d\n", __func__, error);
833 	} else {
834 		device_printf(dev, "connected to %s (id=%d)\n",
835 		    dpaa2_ttos(ep2_desc.type), ep2_desc.obj_id);
836 
837 		error = dpaa2_ni_set_mac_addr(dev);
838 		if (error) {
839 			device_printf(dev, "%s: failed to set MAC address: "
840 			    "error=%d\n", __func__, error);
841 		}
842 
843 		if (ep2_desc.type == DPAA2_DEV_MAC) {
844 			/*
845 			 * This is the simplest case when DPNI is connected to
846 			 * DPMAC directly.
847 			 */
848 			sc->mac.dpmac_id = ep2_desc.obj_id;
849 
850 			link_type = DPAA2_MAC_LINK_TYPE_NONE;
851 
852 			/*
853 			 * Need to determine if DPMAC type is PHY (attached to
854 			 * conventional MII PHY) or FIXED (usually SFP/SerDes,
855 			 * link state managed by MC firmware).
856 			 */
857 			error = DPAA2_CMD_MAC_OPEN(sc->dev, child,
858 			    DPAA2_CMD_TK(&cmd, rc_token), sc->mac.dpmac_id,
859 			    &mac_token);
860 			/*
861 			 * Under VFIO, the DPMAC might be sitting in another
862 			 * container (DPRC) we don't have access to.
863 			 * Assume DPAA2_MAC_LINK_TYPE_FIXED if this is
864 			 * the case.
865 			 */
866 			if (error) {
867 				device_printf(dev, "%s: failed to open "
868 				    "connected DPMAC: %d (assuming in other DPRC)\n", __func__,
869 				    sc->mac.dpmac_id);
870 				link_type = DPAA2_MAC_LINK_TYPE_FIXED;
871 			} else {
872 				error = DPAA2_CMD_MAC_GET_ATTRIBUTES(dev, child,
873 				    &cmd, &attr);
874 				if (error) {
875 					device_printf(dev, "%s: failed to get "
876 					    "DPMAC attributes: id=%d, "
877 					    "error=%d\n", __func__, dinfo->id,
878 					    error);
879 				} else {
880 					link_type = attr.link_type;
881 				}
882 			}
883 			DPAA2_CMD_MAC_CLOSE(dev, child, &cmd);
884 
885 			if (link_type == DPAA2_MAC_LINK_TYPE_FIXED) {
886 				device_printf(dev, "connected DPMAC is in FIXED "
887 				    "mode\n");
888 				dpaa2_ni_setup_fixed_link(sc);
889 			} else if (link_type == DPAA2_MAC_LINK_TYPE_PHY) {
890 				device_printf(dev, "connected DPMAC is in PHY "
891 				    "mode\n");
892 				error = DPAA2_MC_GET_PHY_DEV(dev,
893 				    &sc->mac.phy_dev, sc->mac.dpmac_id);
894 				if (error == 0) {
895 					error = MEMAC_MDIO_SET_NI_DEV(
896 					    sc->mac.phy_dev, dev);
897 					if (error != 0) {
898 						device_printf(dev, "%s: failed "
899 						    "to set dpni dev on memac "
900 						    "mdio dev %s: error=%d\n",
901 						    __func__,
902 						    device_get_nameunit(
903 						    sc->mac.phy_dev), error);
904 					}
905 				}
906 				if (error == 0) {
907 					error = MEMAC_MDIO_GET_PHY_LOC(
908 					    sc->mac.phy_dev, &sc->mac.phy_loc);
909 					if (error == ENODEV) {
910 						error = 0;
911 					}
912 					if (error != 0) {
913 						device_printf(dev, "%s: failed "
914 						    "to get phy location from "
915 						    "memac mdio dev %s: error=%d\n",
916 						    __func__, device_get_nameunit(
917 						    sc->mac.phy_dev), error);
918 					}
919 				}
920 				if (error == 0) {
921 					error = mii_attach(sc->mac.phy_dev,
922 					    &sc->miibus, sc->ifp,
923 					    dpaa2_ni_media_change,
924 					    dpaa2_ni_media_status,
925 					    BMSR_DEFCAPMASK, sc->mac.phy_loc,
926 					    MII_OFFSET_ANY, 0);
927 					if (error != 0) {
928 						device_printf(dev, "%s: failed "
929 						    "to attach to miibus: "
930 						    "error=%d\n",
931 						    __func__, error);
932 					}
933 				}
934 				if (error == 0) {
935 					sc->mii = device_get_softc(sc->miibus);
936 				}
937 			} else {
938 				device_printf(dev, "%s: DPMAC link type is not "
939 				    "supported\n", __func__);
940 			}
941 		} else if (ep2_desc.type == DPAA2_DEV_NI ||
942 			   ep2_desc.type == DPAA2_DEV_MUX ||
943 			   ep2_desc.type == DPAA2_DEV_SW) {
944 			dpaa2_ni_setup_fixed_link(sc);
945 		}
946 	}
947 
948 	/* Select mode to enqueue frames. */
949 	/* ... TBD ... */
950 
951 	/*
952 	 * Update link configuration to enable Rx/Tx pause frames support.
953 	 *
954 	 * NOTE: MC may generate an interrupt to the DPMAC and request changes
955 	 *       in link configuration. It might be necessary to attach miibus
956 	 *       and PHY before this point.
957 	 */
958 	error = dpaa2_ni_set_pause_frame(dev);
959 	if (error) {
960 		device_printf(dev, "%s: failed to configure Rx/Tx pause "
961 		    "frames\n", __func__);
962 		goto close_ni;
963 	}
964 
965 	/* Configure ingress traffic classification. */
966 	error = dpaa2_ni_set_qos_table(dev);
967 	if (error) {
968 		device_printf(dev, "%s: failed to configure QoS table: "
969 		    "error=%d\n", __func__, error);
970 		goto close_ni;
971 	}
972 
973 	/* Add broadcast physical address to the MAC filtering table. */
974 	memset(eth_bca, 0xff, ETHER_ADDR_LEN);
975 	error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, DPAA2_CMD_TK(&cmd,
976 	    ni_token), eth_bca);
977 	if (error) {
978 		device_printf(dev, "%s: failed to add broadcast physical "
979 		    "address to the MAC filtering table\n", __func__);
980 		goto close_ni;
981 	}
982 
983 	/* Set the maximum allowed length for received frames. */
984 	error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd, DPAA2_ETH_MFL);
985 	if (error) {
986 		device_printf(dev, "%s: failed to set maximum length for "
987 		    "received frames\n", __func__);
988 		goto close_ni;
989 	}
990 
991 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
992 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
993 	return (0);
994 
995 close_ni:
996 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
997 close_rc:
998 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
999 err_exit:
1000 	return (error);
1001 }
1002 
1003 /**
1004  * @brief Сonfigure QBMan channels and register data availability notifications.
1005  */
1006 static int
1007 dpaa2_ni_setup_channels(device_t dev)
1008 {
1009 	device_t pdev = device_get_parent(dev);
1010 	device_t child = dev;
1011 	device_t io_dev, con_dev;
1012 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1013 	struct dpaa2_ni_channel *channel;
1014 	struct dpaa2_con_softc *consc;
1015 	struct dpaa2_con_notif_cfg notif_cfg;
1016 	struct dpaa2_devinfo *rc_info = device_get_ivars(pdev);
1017 	struct dpaa2_devinfo *io_info;
1018 	struct dpaa2_devinfo *con_info;
1019 	struct dpaa2_io_notif_ctx *ctx;
1020 	struct dpaa2_buf *buf;
1021 	struct dpaa2_cmd cmd;
1022 	struct sysctl_ctx_list *sysctl_ctx;
1023 	struct sysctl_oid *node;
1024 	struct sysctl_oid_list *parent;
1025 	uint32_t i, num_chan;
1026 	uint16_t rc_token, con_token;
1027 	int error;
1028 
1029 	/* Calculate number of the channels based on the allocated resources. */
1030 	for (i = 0; i < IO_RES_NUM; i++) {
1031 		if (!sc->res[IO_RID(i)]) {
1032 			break;
1033 		}
1034 	}
1035 	num_chan = i;
1036 	for (i = 0; i < CON_RES_NUM; i++) {
1037 		if (!sc->res[CON_RID(i)]) {
1038 			break;
1039 		}
1040 	}
1041 	num_chan = i < num_chan ? i : num_chan;
1042 	sc->chan_n = num_chan > DPAA2_NI_MAX_CHANNELS
1043 	    ? DPAA2_NI_MAX_CHANNELS : num_chan;
1044 	sc->chan_n = sc->chan_n > sc->attr.num.queues
1045 	    ? sc->attr.num.queues : sc->chan_n;
1046 
1047 	device_printf(dev, "channels=%d\n", sc->chan_n);
1048 
1049 	sysctl_ctx = device_get_sysctl_ctx(sc->dev);
1050 	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1051 	node = SYSCTL_ADD_NODE(sysctl_ctx, parent, OID_AUTO, "channels",
1052 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channels");
1053 	parent = SYSCTL_CHILDREN(node);
1054 
1055 	/* Setup channels for the portal. */
1056 	for (uint32_t i = 0; i < sc->chan_n; i++) {
1057 		io_dev = (device_t) rman_get_start(sc->res[IO_RID(i)]);
1058 		io_info = device_get_ivars(io_dev);
1059 
1060 		con_dev = (device_t) rman_get_start(sc->res[CON_RID(i)]);
1061 		consc = device_get_softc(con_dev);
1062 		con_info = device_get_ivars(con_dev);
1063 
1064 		DPAA2_CMD_INIT(&cmd);
1065 
1066 		error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rc_info->id,
1067 		    &rc_token);
1068 		if (error) {
1069 			device_printf(dev, "%s: failed to open resource "
1070 			    "container: id=%d, error=%d\n", __func__,
1071 			    rc_info->id, error);
1072 			return (error);
1073 		}
1074 		error = DPAA2_CMD_CON_OPEN(dev, child, &cmd, con_info->id,
1075 		    &con_token);
1076 		if (error) {
1077 			device_printf(dev, "%s: failed to open DPCON: id=%d, "
1078 			    "error=%d\n", __func__, con_info->id, error);
1079 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
1080 			    rc_token));
1081 			return (error);
1082 		}
1083 
1084 		error = DPAA2_CMD_CON_ENABLE(dev, child, &cmd);
1085 		if (error) {
1086 			device_printf(dev, "%s: failed to enable channel: "
1087 			    "dpcon_id=%d, chan_id=%d\n", __func__, con_info->id,
1088 			    consc->attr.chan_id);
1089 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
1090 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
1091 			    rc_token));
1092 			return (error);
1093 		}
1094 
1095 		channel = malloc(sizeof(struct dpaa2_ni_channel), M_DPAA2_NI,
1096 		    M_WAITOK | M_ZERO);
1097 		if (!channel) {
1098 			device_printf(dev, "%s: failed to allocate a channel\n",
1099 			    __func__);
1100 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
1101 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
1102 			    rc_token));
1103 			return (ENOMEM);
1104 		}
1105 
1106 		sc->channels[i] = channel;
1107 
1108 		channel->id = consc->attr.chan_id;
1109 		channel->flowid = i;
1110 		channel->ni_dev = dev;
1111 		channel->io_dev = io_dev;
1112 		channel->con_dev = con_dev;
1113 		channel->recycled_n = 0;
1114 		channel->tx_frames = 0; /* for debug purposes */
1115 		channel->tx_dropped = 0; /* for debug purposes */
1116 		channel->rxq_n = 0;
1117 
1118 		buf = &channel->store;
1119 		buf->type = DPAA2_BUF_STORE;
1120 		buf->store.dmat = NULL;
1121 		buf->store.dmap = NULL;
1122 		buf->store.paddr = 0;
1123 		buf->store.vaddr = NULL;
1124 
1125 		/* Setup WQ channel notification context. */
1126 		ctx = &channel->ctx;
1127 		ctx->qman_ctx = (uint64_t) ctx;
1128 		ctx->cdan_en = true;
1129 		ctx->fq_chan_id = channel->id;
1130 		ctx->io_dev = channel->io_dev;
1131 		ctx->channel = channel;
1132 		ctx->poll = dpaa2_ni_poll;
1133 
1134 		/* Register the new notification context. */
1135 		error = DPAA2_SWP_CONF_WQ_CHANNEL(channel->io_dev, ctx);
1136 		if (error) {
1137 			device_printf(dev, "%s: failed to register notification "
1138 			    "context\n", __func__);
1139 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
1140 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
1141 			    rc_token));
1142 			return (error);
1143 		}
1144 
1145 		/* Register DPCON notification with Management Complex. */
1146 		notif_cfg.dpio_id = io_info->id;
1147 		notif_cfg.prior = 0;
1148 		notif_cfg.qman_ctx = ctx->qman_ctx;
1149 		error = DPAA2_CMD_CON_SET_NOTIF(dev, child, &cmd, &notif_cfg);
1150 		if (error) {
1151 			device_printf(dev, "%s: failed to set DPCON "
1152 			    "notification: dpcon_id=%d, chan_id=%d\n", __func__,
1153 			    con_info->id, consc->attr.chan_id);
1154 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
1155 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
1156 			    rc_token));
1157 			return (error);
1158 		}
1159 
1160 		/* Allocate initial # of Rx buffers and a channel storage. */
1161 		error = dpaa2_ni_seed_buf_pool(sc, DPAA2_NI_BUFS_INIT);
1162 		if (error) {
1163 			device_printf(dev, "%s: failed to seed buffer pool\n",
1164 			    __func__);
1165 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
1166 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
1167 			    rc_token));
1168 			return (error);
1169 		}
1170 		error = dpaa2_ni_seed_chan_storage(sc, channel);
1171 		if (error) {
1172 			device_printf(dev, "%s: failed to seed channel "
1173 			    "storage\n", __func__);
1174 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
1175 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
1176 			    rc_token));
1177 			return (error);
1178 		}
1179 
1180 		/* Prepare queues for this channel. */
1181 		error = dpaa2_ni_setup_fq(dev, channel, DPAA2_NI_QUEUE_TX_CONF);
1182 		if (error) {
1183 			device_printf(dev, "%s: failed to prepare TxConf "
1184 			    "queue: error=%d\n", __func__, error);
1185 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
1186 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
1187 			    rc_token));
1188 			return (error);
1189 		}
1190 		error = dpaa2_ni_setup_fq(dev, channel, DPAA2_NI_QUEUE_RX);
1191 		if (error) {
1192 			device_printf(dev, "%s: failed to prepare Rx queue: "
1193 			    "error=%d\n", __func__, error);
1194 			(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
1195 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
1196 			    rc_token));
1197 			return (error);
1198 		}
1199 
1200 		if (bootverbose) {
1201 			device_printf(dev, "channel: dpio_id=%d "
1202 			    "dpcon_id=%d chan_id=%d, priorities=%d\n",
1203 			    io_info->id, con_info->id, channel->id,
1204 			    consc->attr.prior_num);
1205 		}
1206 
1207 		(void)DPAA2_CMD_CON_CLOSE(dev, child, &cmd);
1208 		(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
1209 		    rc_token));
1210 	}
1211 
1212 	/* There is exactly one Rx error queue per DPNI. */
1213 	error = dpaa2_ni_setup_fq(dev, sc->channels[0], DPAA2_NI_QUEUE_RX_ERR);
1214 	if (error) {
1215 		device_printf(dev, "%s: failed to prepare RxError queue: "
1216 		    "error=%d\n", __func__, error);
1217 		return (error);
1218 	}
1219 
1220 	return (0);
1221 }
1222 
1223 /**
1224  * @brief Performs an initial configuration of the frame queues.
1225  */
1226 static int
1227 dpaa2_ni_setup_fq(device_t dev, struct dpaa2_ni_channel *chan,
1228     enum dpaa2_ni_queue_type queue_type)
1229 {
1230 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1231 	struct dpaa2_ni_fq *fq;
1232 
1233 	switch (queue_type) {
1234 	case DPAA2_NI_QUEUE_TX_CONF:
1235 		/* One queue per channel. */
1236 		fq = &chan->txc_queue;
1237 
1238 		fq->consume = dpaa2_ni_tx_conf;
1239 		fq->chan = chan;
1240 		fq->flowid = chan->flowid;
1241 		fq->tc = 0; /* ignored */
1242 		fq->type = queue_type;
1243 
1244 		break;
1245 	case DPAA2_NI_QUEUE_RX:
1246 		KASSERT(sc->attr.num.rx_tcs <= DPAA2_NI_MAX_TCS,
1247 		    ("too many Rx traffic classes: rx_tcs=%d\n",
1248 		    sc->attr.num.rx_tcs));
1249 
1250 		/* One queue per Rx traffic class within a channel. */
1251 		for (int i = 0; i < sc->attr.num.rx_tcs; i++) {
1252 			fq = &chan->rx_queues[i];
1253 
1254 			fq->consume = dpaa2_ni_rx;
1255 			fq->chan = chan;
1256 			fq->flowid = chan->flowid;
1257 			fq->tc = (uint8_t) i;
1258 			fq->type = queue_type;
1259 
1260 			chan->rxq_n++;
1261 		}
1262 		break;
1263 	case DPAA2_NI_QUEUE_RX_ERR:
1264 		/* One queue per network interface. */
1265 		fq = &sc->rxe_queue;
1266 
1267 		fq->consume = dpaa2_ni_rx_err;
1268 		fq->chan = chan;
1269 		fq->flowid = 0; /* ignored */
1270 		fq->tc = 0; /* ignored */
1271 		fq->type = queue_type;
1272 		break;
1273 	default:
1274 		device_printf(dev, "%s: unexpected frame queue type: %d\n",
1275 		    __func__, queue_type);
1276 		return (EINVAL);
1277 	}
1278 
1279 	return (0);
1280 }
1281 
1282 /**
1283  * @brief Bind DPNI to DPBPs, DPIOs, frame queues and channels.
1284  */
1285 static int
1286 dpaa2_ni_bind(device_t dev)
1287 {
1288 	device_t pdev = device_get_parent(dev);
1289 	device_t child = dev;
1290 	device_t bp_dev;
1291 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1292 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1293 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1294 	struct dpaa2_devinfo *bp_info;
1295 	struct dpaa2_cmd cmd;
1296 	struct dpaa2_ni_pools_cfg pools_cfg;
1297 	struct dpaa2_ni_err_cfg err_cfg;
1298 	struct dpaa2_ni_channel *chan;
1299 	uint16_t rc_token, ni_token;
1300 	int error;
1301 
1302 	DPAA2_CMD_INIT(&cmd);
1303 
1304 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1305 	if (error) {
1306 		device_printf(dev, "%s: failed to open resource container: "
1307 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1308 		goto err_exit;
1309 	}
1310 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1311 	if (error) {
1312 		device_printf(dev, "%s: failed to open network interface: "
1313 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1314 		goto close_rc;
1315 	}
1316 
1317 	/* Select buffer pool (only one available at the moment). */
1318 	bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
1319 	bp_info = device_get_ivars(bp_dev);
1320 
1321 	/* Configure buffers pool. */
1322 	pools_cfg.pools_num = 1;
1323 	pools_cfg.pools[0].bp_obj_id = bp_info->id;
1324 	pools_cfg.pools[0].backup_flag = 0;
1325 	pools_cfg.pools[0].buf_sz = sc->buf_sz;
1326 	error = DPAA2_CMD_NI_SET_POOLS(dev, child, &cmd, &pools_cfg);
1327 	if (error) {
1328 		device_printf(dev, "%s: failed to set buffer pools\n", __func__);
1329 		goto close_ni;
1330 	}
1331 
1332 	/* Setup ingress traffic distribution. */
1333 	error = dpaa2_ni_setup_rx_dist(dev);
1334 	if (error && error != EOPNOTSUPP) {
1335 		device_printf(dev, "%s: failed to setup ingress traffic "
1336 		    "distribution\n", __func__);
1337 		goto close_ni;
1338 	}
1339 	if (bootverbose && error == EOPNOTSUPP) {
1340 		device_printf(dev, "Ingress traffic distribution not "
1341 		    "supported\n");
1342 	}
1343 
1344 	/* Configure handling of error frames. */
1345 	err_cfg.err_mask = DPAA2_NI_FAS_RX_ERR_MASK;
1346 	err_cfg.set_err_fas = false;
1347 	err_cfg.action = DPAA2_NI_ERR_DISCARD;
1348 	error = DPAA2_CMD_NI_SET_ERR_BEHAVIOR(dev, child, &cmd, &err_cfg);
1349 	if (error) {
1350 		device_printf(dev, "%s: failed to set errors behavior\n",
1351 		    __func__);
1352 		goto close_ni;
1353 	}
1354 
1355 	/* Configure channel queues to generate CDANs. */
1356 	for (uint32_t i = 0; i < sc->chan_n; i++) {
1357 		chan = sc->channels[i];
1358 
1359 		/* Setup Rx flows. */
1360 		for (uint32_t j = 0; j < chan->rxq_n; j++) {
1361 			error = dpaa2_ni_setup_rx_flow(dev, &chan->rx_queues[j]);
1362 			if (error) {
1363 				device_printf(dev, "%s: failed to setup Rx "
1364 				    "flow: error=%d\n", __func__, error);
1365 				goto close_ni;
1366 			}
1367 		}
1368 
1369 		/* Setup Tx flow. */
1370 		error = dpaa2_ni_setup_tx_flow(dev, &chan->txc_queue);
1371 		if (error) {
1372 			device_printf(dev, "%s: failed to setup Tx "
1373 			    "flow: error=%d\n", __func__, error);
1374 			goto close_ni;
1375 		}
1376 	}
1377 
1378 	/* Configure RxError queue to generate CDAN. */
1379 	error = dpaa2_ni_setup_rx_err_flow(dev, &sc->rxe_queue);
1380 	if (error) {
1381 		device_printf(dev, "%s: failed to setup RxError flow: "
1382 		    "error=%d\n", __func__, error);
1383 		goto close_ni;
1384 	}
1385 
1386 	/*
1387 	 * Get the Queuing Destination ID (QDID) that should be used for frame
1388 	 * enqueue operations.
1389 	 */
1390 	error = DPAA2_CMD_NI_GET_QDID(dev, child, &cmd, DPAA2_NI_QUEUE_TX,
1391 	    &sc->tx_qdid);
1392 	if (error) {
1393 		device_printf(dev, "%s: failed to get Tx queuing destination "
1394 		    "ID\n", __func__);
1395 		goto close_ni;
1396 	}
1397 
1398 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1399 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1400 	return (0);
1401 
1402 close_ni:
1403 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1404 close_rc:
1405 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1406 err_exit:
1407 	return (error);
1408 }
1409 
1410 /**
1411  * @brief Setup ingress traffic distribution.
1412  *
1413  * NOTE: Ingress traffic distribution is valid only when DPNI_OPT_NO_FS option
1414  *	 hasn't been set for DPNI and a number of DPNI queues > 1.
1415  */
1416 static int
1417 dpaa2_ni_setup_rx_dist(device_t dev)
1418 {
1419 	/*
1420 	 * Have the interface implicitly distribute traffic based on the default
1421 	 * hash key.
1422 	 */
1423 	return (dpaa2_ni_set_hash(dev, DPAA2_RXH_DEFAULT));
1424 }
1425 
1426 static int
1427 dpaa2_ni_setup_rx_flow(device_t dev, struct dpaa2_ni_fq *fq)
1428 {
1429 	device_t pdev = device_get_parent(dev);
1430 	device_t child = dev;
1431 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1432 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1433 	struct dpaa2_devinfo *con_info;
1434 	struct dpaa2_cmd cmd;
1435 	struct dpaa2_ni_queue_cfg queue_cfg = {0};
1436 	uint16_t rc_token, ni_token;
1437 	int error;
1438 
1439 	DPAA2_CMD_INIT(&cmd);
1440 
1441 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1442 	if (error) {
1443 		device_printf(dev, "%s: failed to open resource container: "
1444 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1445 		goto err_exit;
1446 	}
1447 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1448 	if (error) {
1449 		device_printf(dev, "%s: failed to open network interface: "
1450 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1451 		goto close_rc;
1452 	}
1453 
1454 	/* Obtain DPCON associated with the FQ's channel. */
1455 	con_info = device_get_ivars(fq->chan->con_dev);
1456 
1457 	queue_cfg.type = DPAA2_NI_QUEUE_RX;
1458 	queue_cfg.tc = fq->tc;
1459 	queue_cfg.idx = fq->flowid;
1460 	error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1461 	if (error) {
1462 		device_printf(dev, "%s: failed to obtain Rx queue "
1463 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1464 		    queue_cfg.idx);
1465 		goto close_ni;
1466 	}
1467 
1468 	fq->fqid = queue_cfg.fqid;
1469 
1470 	queue_cfg.dest_id = con_info->id;
1471 	queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1472 	queue_cfg.priority = 1;
1473 	queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1474 	queue_cfg.options =
1475 	    DPAA2_NI_QUEUE_OPT_USER_CTX |
1476 	    DPAA2_NI_QUEUE_OPT_DEST;
1477 	error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
1478 	if (error) {
1479 		device_printf(dev, "%s: failed to update Rx queue "
1480 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1481 		    queue_cfg.idx);
1482 		goto close_ni;
1483 	}
1484 
1485 	if (bootverbose) {
1486 		device_printf(dev, "RX queue idx=%d, tc=%d, chan=%d, fqid=%d, "
1487 		    "user_ctx=%#jx\n", fq->flowid, fq->tc, fq->chan->id,
1488 		    fq->fqid, (uint64_t) fq);
1489 	}
1490 
1491 	(void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
1492 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1493 	return (0);
1494 
1495 close_ni:
1496 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1497 close_rc:
1498 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1499 err_exit:
1500 	return (error);
1501 }
1502 
1503 static int
1504 dpaa2_ni_setup_tx_flow(device_t dev, struct dpaa2_ni_fq *fq)
1505 {
1506 	device_t pdev = device_get_parent(dev);
1507 	device_t child = dev;
1508 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1509 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1510 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1511 	struct dpaa2_devinfo *con_info;
1512 	struct dpaa2_ni_queue_cfg queue_cfg = {0};
1513 	struct dpaa2_ni_tx_ring *tx;
1514 	struct dpaa2_buf *buf;
1515 	struct dpaa2_cmd cmd;
1516 	uint32_t tx_rings_n = 0;
1517 	uint16_t rc_token, ni_token;
1518 	int error;
1519 
1520 	DPAA2_CMD_INIT(&cmd);
1521 
1522 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1523 	if (error) {
1524 		device_printf(dev, "%s: failed to open resource container: "
1525 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1526 		goto err_exit;
1527 	}
1528 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1529 	if (error) {
1530 		device_printf(dev, "%s: failed to open network interface: "
1531 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1532 		goto close_rc;
1533 	}
1534 
1535 	/* Obtain DPCON associated with the FQ's channel. */
1536 	con_info = device_get_ivars(fq->chan->con_dev);
1537 
1538 	KASSERT(sc->attr.num.tx_tcs <= DPAA2_NI_MAX_TCS,
1539 	    ("%s: too many Tx traffic classes: tx_tcs=%d\n", __func__,
1540 	    sc->attr.num.tx_tcs));
1541 	KASSERT(DPAA2_NI_BUFS_PER_TX <= DPAA2_NI_MAX_BPTX,
1542 	    ("%s: too many Tx buffers (%d): max=%d\n", __func__,
1543 	    DPAA2_NI_BUFS_PER_TX, DPAA2_NI_MAX_BPTX));
1544 
1545 	/* Setup Tx rings. */
1546 	for (int i = 0; i < sc->attr.num.tx_tcs; i++) {
1547 		queue_cfg.type = DPAA2_NI_QUEUE_TX;
1548 		queue_cfg.tc = i;
1549 		queue_cfg.idx = fq->flowid;
1550 		queue_cfg.chan_id = fq->chan->id;
1551 
1552 		error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1553 		if (error) {
1554 			device_printf(dev, "%s: failed to obtain Tx queue "
1555 			    "configuration: tc=%d, flowid=%d\n", __func__,
1556 			    queue_cfg.tc, queue_cfg.idx);
1557 			goto close_ni;
1558 		}
1559 
1560 		tx = &fq->tx_rings[i];
1561 		tx->fq = fq;
1562 		tx->fqid = queue_cfg.fqid;
1563 		tx->txid = tx_rings_n;
1564 
1565 		if (bootverbose) {
1566 			device_printf(dev, "TX queue idx=%d, tc=%d, chan=%d, "
1567 			    "fqid=%d\n", fq->flowid, i, fq->chan->id,
1568 			    queue_cfg.fqid);
1569 		}
1570 
1571 		mtx_init(&tx->lock, "dpaa2_tx_ring", NULL, MTX_DEF);
1572 
1573 		/* Allocate Tx ring buffer. */
1574 		tx->idx_br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF,
1575 		    M_NOWAIT, &tx->lock);
1576 		if (tx->idx_br == NULL) {
1577 			device_printf(dev, "%s: failed to setup Tx ring buffer"
1578 			    " (2) fqid=%d\n", __func__, tx->fqid);
1579 			goto close_ni;
1580 		}
1581 
1582 		/* Configure Tx buffers. */
1583 		for (uint64_t j = 0; j < DPAA2_NI_BUFS_PER_TX; j++) {
1584 			buf = &tx->buf[j];
1585 			buf->type = DPAA2_BUF_TX;
1586 			buf->tx.dmat = buf->tx.sgt_dmat = NULL;
1587 			buf->tx.dmap = buf->tx.sgt_dmap = NULL;
1588 			buf->tx.paddr = buf->tx.sgt_paddr = 0;
1589 			buf->tx.vaddr = buf->tx.sgt_vaddr = NULL;
1590 			buf->tx.m = NULL;
1591 			buf->tx.idx = 0;
1592 
1593 			error = dpaa2_ni_seed_txbuf(sc, buf, j);
1594 
1595 			/* Add index of the Tx buffer to the ring. */
1596 			buf_ring_enqueue(tx->idx_br, (void *) j);
1597 		}
1598 
1599 		tx_rings_n++;
1600 	}
1601 
1602 	/* All Tx queues which belong to the same flowid have the same qdbin. */
1603 	fq->tx_qdbin = queue_cfg.qdbin;
1604 
1605 	queue_cfg.type = DPAA2_NI_QUEUE_TX_CONF;
1606 	queue_cfg.tc = 0; /* ignored for TxConf queue */
1607 	queue_cfg.idx = fq->flowid;
1608 	error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1609 	if (error) {
1610 		device_printf(dev, "%s: failed to obtain TxConf queue "
1611 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1612 		    queue_cfg.idx);
1613 		goto close_ni;
1614 	}
1615 
1616 	fq->fqid = queue_cfg.fqid;
1617 
1618 	queue_cfg.dest_id = con_info->id;
1619 	queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1620 	queue_cfg.priority = 0;
1621 	queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1622 	queue_cfg.options =
1623 	    DPAA2_NI_QUEUE_OPT_USER_CTX |
1624 	    DPAA2_NI_QUEUE_OPT_DEST;
1625 	error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
1626 	if (error) {
1627 		device_printf(dev, "%s: failed to update TxConf queue "
1628 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1629 		    queue_cfg.idx);
1630 		goto close_ni;
1631 	}
1632 
1633 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1634 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1635 	return (0);
1636 
1637 close_ni:
1638 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1639 close_rc:
1640 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1641 err_exit:
1642 	return (error);
1643 }
1644 
1645 static int
1646 dpaa2_ni_setup_rx_err_flow(device_t dev, struct dpaa2_ni_fq *fq)
1647 {
1648 	device_t pdev = device_get_parent(dev);
1649 	device_t child = dev;
1650 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1651 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1652 	struct dpaa2_devinfo *con_info;
1653 	struct dpaa2_ni_queue_cfg queue_cfg = {0};
1654 	struct dpaa2_cmd cmd;
1655 	uint16_t rc_token, ni_token;
1656 	int error;
1657 
1658 	DPAA2_CMD_INIT(&cmd);
1659 
1660 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1661 	if (error) {
1662 		device_printf(dev, "%s: failed to open resource container: "
1663 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1664 		goto err_exit;
1665 	}
1666 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1667 	if (error) {
1668 		device_printf(dev, "%s: failed to open network interface: "
1669 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1670 		goto close_rc;
1671 	}
1672 
1673 	/* Obtain DPCON associated with the FQ's channel. */
1674 	con_info = device_get_ivars(fq->chan->con_dev);
1675 
1676 	queue_cfg.type = DPAA2_NI_QUEUE_RX_ERR;
1677 	queue_cfg.tc = fq->tc; /* ignored */
1678 	queue_cfg.idx = fq->flowid; /* ignored */
1679 	error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1680 	if (error) {
1681 		device_printf(dev, "%s: failed to obtain RxErr queue "
1682 		    "configuration\n", __func__);
1683 		goto close_ni;
1684 	}
1685 
1686 	fq->fqid = queue_cfg.fqid;
1687 
1688 	queue_cfg.dest_id = con_info->id;
1689 	queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1690 	queue_cfg.priority = 1;
1691 	queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1692 	queue_cfg.options =
1693 	    DPAA2_NI_QUEUE_OPT_USER_CTX |
1694 	    DPAA2_NI_QUEUE_OPT_DEST;
1695 	error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
1696 	if (error) {
1697 		device_printf(dev, "%s: failed to update RxErr queue "
1698 		    "configuration\n", __func__);
1699 		goto close_ni;
1700 	}
1701 
1702 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1703 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1704 	return (0);
1705 
1706 close_ni:
1707 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1708 close_rc:
1709 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1710 err_exit:
1711 	return (error);
1712 }
1713 
1714 /**
1715  * @brief Configure DPNI object to generate interrupts.
1716  */
1717 static int
1718 dpaa2_ni_setup_irqs(device_t dev)
1719 {
1720 	device_t pdev = device_get_parent(dev);
1721 	device_t child = dev;
1722 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1723 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1724 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1725 	struct dpaa2_cmd cmd;
1726 	uint16_t rc_token, ni_token;
1727 	int error;
1728 
1729 	DPAA2_CMD_INIT(&cmd);
1730 
1731 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1732 	if (error) {
1733 		device_printf(dev, "%s: failed to open resource container: "
1734 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1735 		goto err_exit;
1736 	}
1737 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1738 	if (error) {
1739 		device_printf(dev, "%s: failed to open network interface: "
1740 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1741 		goto close_rc;
1742 	}
1743 
1744 	/* Configure IRQs. */
1745 	error = dpaa2_ni_setup_msi(sc);
1746 	if (error) {
1747 		device_printf(dev, "%s: failed to allocate MSI\n", __func__);
1748 		goto close_ni;
1749 	}
1750 	if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1751 	    &sc->irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) {
1752 		device_printf(dev, "%s: failed to allocate IRQ resource\n",
1753 		    __func__);
1754 		goto close_ni;
1755 	}
1756 	if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
1757 	    NULL, dpaa2_ni_intr, sc, &sc->intr)) {
1758 		device_printf(dev, "%s: failed to setup IRQ resource\n",
1759 		    __func__);
1760 		goto close_ni;
1761 	}
1762 
1763 	error = DPAA2_CMD_NI_SET_IRQ_MASK(dev, child, &cmd, DPNI_IRQ_INDEX,
1764 	    DPNI_IRQ_LINK_CHANGED | DPNI_IRQ_EP_CHANGED);
1765 	if (error) {
1766 		device_printf(dev, "%s: failed to set DPNI IRQ mask\n",
1767 		    __func__);
1768 		goto close_ni;
1769 	}
1770 
1771 	error = DPAA2_CMD_NI_SET_IRQ_ENABLE(dev, child, &cmd, DPNI_IRQ_INDEX,
1772 	    true);
1773 	if (error) {
1774 		device_printf(dev, "%s: failed to enable DPNI IRQ\n", __func__);
1775 		goto close_ni;
1776 	}
1777 
1778 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1779 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1780 	return (0);
1781 
1782 close_ni:
1783 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1784 close_rc:
1785 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1786 err_exit:
1787 	return (error);
1788 }
1789 
1790 /**
1791  * @brief Allocate MSI interrupts for DPNI.
1792  */
1793 static int
1794 dpaa2_ni_setup_msi(struct dpaa2_ni_softc *sc)
1795 {
1796 	int val;
1797 
1798 	val = pci_msi_count(sc->dev);
1799 	if (val < DPAA2_NI_MSI_COUNT)
1800 		device_printf(sc->dev, "MSI: actual=%d, expected=%d\n", val,
1801 		    DPAA2_IO_MSI_COUNT);
1802 	val = MIN(val, DPAA2_NI_MSI_COUNT);
1803 
1804 	if (pci_alloc_msi(sc->dev, &val) != 0)
1805 		return (EINVAL);
1806 
1807 	for (int i = 0; i < val; i++)
1808 		sc->irq_rid[i] = i + 1;
1809 
1810 	return (0);
1811 }
1812 
1813 /**
1814  * @brief Update DPNI according to the updated interface capabilities.
1815  */
1816 static int
1817 dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *sc)
1818 {
1819 	const bool en_rxcsum = if_getcapenable(sc->ifp) & IFCAP_RXCSUM;
1820 	const bool en_txcsum = if_getcapenable(sc->ifp) & IFCAP_TXCSUM;
1821 	device_t pdev = device_get_parent(sc->dev);
1822 	device_t dev = sc->dev;
1823 	device_t child = dev;
1824 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1825 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1826 	struct dpaa2_cmd cmd;
1827 	uint16_t rc_token, ni_token;
1828 	int error;
1829 
1830 	DPAA2_CMD_INIT(&cmd);
1831 
1832 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1833 	if (error) {
1834 		device_printf(dev, "%s: failed to open resource container: "
1835 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1836 		goto err_exit;
1837 	}
1838 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1839 	if (error) {
1840 		device_printf(dev, "%s: failed to open network interface: "
1841 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1842 		goto close_rc;
1843 	}
1844 
1845 	/* Setup checksums validation. */
1846 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1847 	    DPAA2_NI_OFL_RX_L3_CSUM, en_rxcsum);
1848 	if (error) {
1849 		device_printf(dev, "%s: failed to %s L3 checksum validation\n",
1850 		    __func__, en_rxcsum ? "enable" : "disable");
1851 		goto close_ni;
1852 	}
1853 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1854 	    DPAA2_NI_OFL_RX_L4_CSUM, en_rxcsum);
1855 	if (error) {
1856 		device_printf(dev, "%s: failed to %s L4 checksum validation\n",
1857 		    __func__, en_rxcsum ? "enable" : "disable");
1858 		goto close_ni;
1859 	}
1860 
1861 	/* Setup checksums generation. */
1862 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1863 	    DPAA2_NI_OFL_TX_L3_CSUM, en_txcsum);
1864 	if (error) {
1865 		device_printf(dev, "%s: failed to %s L3 checksum generation\n",
1866 		    __func__, en_txcsum ? "enable" : "disable");
1867 		goto close_ni;
1868 	}
1869 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1870 	    DPAA2_NI_OFL_TX_L4_CSUM, en_txcsum);
1871 	if (error) {
1872 		device_printf(dev, "%s: failed to %s L4 checksum generation\n",
1873 		    __func__, en_txcsum ? "enable" : "disable");
1874 		goto close_ni;
1875 	}
1876 
1877 	(void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
1878 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1879 	return (0);
1880 
1881 close_ni:
1882 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1883 close_rc:
1884 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1885 err_exit:
1886 	return (error);
1887 }
1888 
1889 /**
1890  * @brief Update DPNI according to the updated interface flags.
1891  */
1892 static int
1893 dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *sc)
1894 {
1895 	const bool en_promisc = if_getflags(sc->ifp) & IFF_PROMISC;
1896 	const bool en_allmulti = if_getflags(sc->ifp) & IFF_ALLMULTI;
1897 	device_t pdev = device_get_parent(sc->dev);
1898 	device_t dev = sc->dev;
1899 	device_t child = dev;
1900 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1901 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1902 	struct dpaa2_cmd cmd;
1903 	uint16_t rc_token, ni_token;
1904 	int error;
1905 
1906 	DPAA2_CMD_INIT(&cmd);
1907 
1908 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1909 	if (error) {
1910 		device_printf(dev, "%s: failed to open resource container: "
1911 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1912 		goto err_exit;
1913 	}
1914 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1915 	if (error) {
1916 		device_printf(dev, "%s: failed to open network interface: "
1917 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1918 		goto close_rc;
1919 	}
1920 
1921 	error = DPAA2_CMD_NI_SET_MULTI_PROMISC(dev, child, &cmd,
1922 	    en_promisc ? true : en_allmulti);
1923 	if (error) {
1924 		device_printf(dev, "%s: failed to %s multicast promiscuous "
1925 		    "mode\n", __func__, en_allmulti ? "enable" : "disable");
1926 		goto close_ni;
1927 	}
1928 
1929 	error = DPAA2_CMD_NI_SET_UNI_PROMISC(dev, child, &cmd, en_promisc);
1930 	if (error) {
1931 		device_printf(dev, "%s: failed to %s unicast promiscuous mode\n",
1932 		    __func__, en_promisc ? "enable" : "disable");
1933 		goto close_ni;
1934 	}
1935 
1936 	(void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
1937 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1938 	return (0);
1939 
1940 close_ni:
1941 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1942 close_rc:
1943 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1944 err_exit:
1945 	return (error);
1946 }
1947 
1948 static int
1949 dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *sc)
1950 {
1951 	struct sysctl_ctx_list *ctx;
1952 	struct sysctl_oid *node, *node2;
1953 	struct sysctl_oid_list *parent, *parent2;
1954 	char cbuf[128];
1955 	int i;
1956 
1957 	ctx = device_get_sysctl_ctx(sc->dev);
1958 	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1959 
1960 	/* Add DPNI statistics. */
1961 	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
1962 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Statistics");
1963 	parent = SYSCTL_CHILDREN(node);
1964 	for (i = 0; i < DPAA2_NI_STAT_SYSCTLS; ++i) {
1965 		SYSCTL_ADD_PROC(ctx, parent, i, dpni_stat_sysctls[i].name,
1966 		    CTLTYPE_U64 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_stats,
1967 		    "IU", dpni_stat_sysctls[i].desc);
1968 	}
1969 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_anomaly_frames",
1970 	    CTLFLAG_RD, &sc->rx_anomaly_frames,
1971 	    "Rx frames in the buffers outside of the buffer pools");
1972 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_single_buf_frames",
1973 	    CTLFLAG_RD, &sc->rx_single_buf_frames,
1974 	    "Rx frames in single buffers");
1975 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_sg_buf_frames",
1976 	    CTLFLAG_RD, &sc->rx_sg_buf_frames,
1977 	    "Rx frames in scatter/gather list");
1978 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_enq_rej_frames",
1979 	    CTLFLAG_RD, &sc->rx_enq_rej_frames,
1980 	    "Enqueue rejected by QMan");
1981 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_ieoi_err_frames",
1982 	    CTLFLAG_RD, &sc->rx_ieoi_err_frames,
1983 	    "QMan IEOI error");
1984 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_single_buf_frames",
1985 	    CTLFLAG_RD, &sc->tx_single_buf_frames,
1986 	    "Tx single buffer frames");
1987 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_sg_frames",
1988 	    CTLFLAG_RD, &sc->tx_sg_frames,
1989 	    "Tx S/G frames");
1990 
1991 	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_num",
1992 	    CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_num,
1993 	    "IU", "number of Rx buffers in the buffer pool");
1994 	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_free",
1995 	    CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_free,
1996 	    "IU", "number of free Rx buffers in the buffer pool");
1997 
1998  	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1999 
2000 	/* Add channels statistics. */
2001 	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "channels",
2002 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channels");
2003 	parent = SYSCTL_CHILDREN(node);
2004 	for (int i = 0; i < sc->chan_n; i++) {
2005 		snprintf(cbuf, sizeof(cbuf), "%d", i);
2006 
2007 		node2 = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, cbuf,
2008 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channel");
2009 		parent2 = SYSCTL_CHILDREN(node2);
2010 
2011 		SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_frames",
2012 		    CTLFLAG_RD, &sc->channels[i]->tx_frames,
2013 		    "Tx frames counter");
2014 		SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_dropped",
2015 		    CTLFLAG_RD, &sc->channels[i]->tx_dropped,
2016 		    "Tx dropped counter");
2017 	}
2018 
2019 	return (0);
2020 }
2021 
2022 static int
2023 dpaa2_ni_setup_dma(struct dpaa2_ni_softc *sc)
2024 {
2025 	device_t dev = sc->dev;
2026 	int error;
2027 
2028 	KASSERT((sc->buf_align == BUF_ALIGN) || (sc->buf_align == BUF_ALIGN_V1),
2029 	    ("unexpected buffer alignment: %d\n", sc->buf_align));
2030 
2031 	/*
2032 	 * DMA tag to allocate buffers for buffer pool.
2033 	 *
2034 	 * NOTE: QBMan supports DMA addresses up to 49-bits maximum.
2035 	 *	 Bits 63-49 are not used by QBMan.
2036 	 */
2037 	error = bus_dma_tag_create(
2038 	    bus_get_dma_tag(dev),
2039 	    sc->buf_align, 0,		/* alignment, boundary */
2040 	    BUF_MAXADDR_49BIT,		/* low restricted addr */
2041 	    BUF_MAXADDR,		/* high restricted addr */
2042 	    NULL, NULL,			/* filter, filterarg */
2043 	    BUF_SIZE, 1,		/* maxsize, nsegments */
2044 	    BUF_SIZE, 0,		/* maxsegsize, flags */
2045 	    NULL, NULL,			/* lockfunc, lockarg */
2046 	    &sc->bp_dmat);
2047 	if (error) {
2048 		device_printf(dev, "%s: failed to create DMA tag for buffer "
2049 		    "pool\n", __func__);
2050 		return (error);
2051 	}
2052 
2053 	/* DMA tag to map Tx mbufs. */
2054 	error = bus_dma_tag_create(
2055 	    bus_get_dma_tag(dev),
2056 	    sc->buf_align, 0,		/* alignment, boundary */
2057 	    BUF_MAXADDR_49BIT,		/* low restricted addr */
2058 	    BUF_MAXADDR,		/* high restricted addr */
2059 	    NULL, NULL,			/* filter, filterarg */
2060 	    DPAA2_TX_SEGS_MAXSZ,	/* maxsize */
2061 	    DPAA2_TX_SEGLIMIT,		/* nsegments */
2062 	    DPAA2_TX_SEG_SZ, 0,		/* maxsegsize, flags */
2063 	    NULL, NULL,			/* lockfunc, lockarg */
2064 	    &sc->tx_dmat);
2065 	if (error) {
2066 		device_printf(dev, "%s: failed to create DMA tag for Tx "
2067 		    "buffers\n", __func__);
2068 		return (error);
2069 	}
2070 
2071 	/* DMA tag to allocate channel storage. */
2072 	error = bus_dma_tag_create(
2073 	    bus_get_dma_tag(dev),
2074 	    ETH_STORE_ALIGN, 0,		/* alignment, boundary */
2075 	    BUS_SPACE_MAXADDR_32BIT,	/* low restricted addr */
2076 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
2077 	    NULL, NULL,			/* filter, filterarg */
2078 	    ETH_STORE_SIZE, 1,		/* maxsize, nsegments */
2079 	    ETH_STORE_SIZE, 0,		/* maxsegsize, flags */
2080 	    NULL, NULL,			/* lockfunc, lockarg */
2081 	    &sc->st_dmat);
2082 	if (error) {
2083 		device_printf(dev, "%s: failed to create DMA tag for channel "
2084 		    "storage\n", __func__);
2085 		return (error);
2086 	}
2087 
2088 	/* DMA tag for Rx distribution key. */
2089 	error = bus_dma_tag_create(
2090 	    bus_get_dma_tag(dev),
2091 	    PAGE_SIZE, 0,		/* alignment, boundary */
2092 	    BUS_SPACE_MAXADDR_32BIT,	/* low restricted addr */
2093 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
2094 	    NULL, NULL,			/* filter, filterarg */
2095 	    DPAA2_CLASSIFIER_DMA_SIZE, 1, /* maxsize, nsegments */
2096 	    DPAA2_CLASSIFIER_DMA_SIZE, 0, /* maxsegsize, flags */
2097 	    NULL, NULL,			/* lockfunc, lockarg */
2098 	    &sc->rxd_dmat);
2099 	if (error) {
2100 		device_printf(dev, "%s: failed to create DMA tag for Rx "
2101 		    "distribution key\n", __func__);
2102 		return (error);
2103 	}
2104 
2105 	error = bus_dma_tag_create(
2106 	    bus_get_dma_tag(dev),
2107 	    PAGE_SIZE, 0,		/* alignment, boundary */
2108 	    BUS_SPACE_MAXADDR_32BIT,	/* low restricted addr */
2109 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
2110 	    NULL, NULL,			/* filter, filterarg */
2111 	    ETH_QOS_KCFG_BUF_SIZE, 1,	/* maxsize, nsegments */
2112 	    ETH_QOS_KCFG_BUF_SIZE, 0,	/* maxsegsize, flags */
2113 	    NULL, NULL,			/* lockfunc, lockarg */
2114 	    &sc->qos_dmat);
2115 	if (error) {
2116 		device_printf(dev, "%s: failed to create DMA tag for QoS key\n",
2117 		    __func__);
2118 		return (error);
2119 	}
2120 
2121 	error = bus_dma_tag_create(
2122 	    bus_get_dma_tag(dev),
2123 	    PAGE_SIZE, 0,		/* alignment, boundary */
2124 	    BUS_SPACE_MAXADDR_32BIT,	/* low restricted addr */
2125 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
2126 	    NULL, NULL,			/* filter, filterarg */
2127 	    DPAA2_TX_SGT_SZ, 1,		/* maxsize, nsegments */
2128 	    DPAA2_TX_SGT_SZ, 0,		/* maxsegsize, flags */
2129 	    NULL, NULL,			/* lockfunc, lockarg */
2130 	    &sc->sgt_dmat);
2131 	if (error) {
2132 		device_printf(dev, "%s: failed to create DMA tag for S/G "
2133 		    "tables\n", __func__);
2134 		return (error);
2135 	}
2136 
2137 	return (0);
2138 }
2139 
2140 /**
2141  * @brief Configure buffer layouts of the different DPNI queues.
2142  */
2143 static int
2144 dpaa2_ni_set_buf_layout(device_t dev)
2145 {
2146 	device_t pdev = device_get_parent(dev);
2147 	device_t child = dev;
2148 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2149 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2150 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2151 	struct dpaa2_ni_buf_layout buf_layout = {0};
2152 	struct dpaa2_cmd cmd;
2153 	uint16_t rc_token, ni_token;
2154 	int error;
2155 
2156 	DPAA2_CMD_INIT(&cmd);
2157 
2158 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2159 	if (error) {
2160 		device_printf(dev, "%s: failed to open resource container: "
2161 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2162 		goto err_exit;
2163 	}
2164 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2165 	if (error) {
2166 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2167 		    "error=%d\n", __func__, dinfo->id, error);
2168 		goto close_rc;
2169 	}
2170 
2171 	/*
2172 	 * Select Rx/Tx buffer alignment. It's necessary to ensure that the
2173 	 * buffer size seen by WRIOP is a multiple of 64 or 256 bytes depending
2174 	 * on the WRIOP version.
2175 	 */
2176 	sc->buf_align = (sc->attr.wriop_ver == WRIOP_VERSION(0, 0, 0) ||
2177 	    sc->attr.wriop_ver == WRIOP_VERSION(1, 0, 0))
2178 	    ? BUF_ALIGN_V1 : BUF_ALIGN;
2179 
2180 	/*
2181 	 * We need to ensure that the buffer size seen by WRIOP is a multiple
2182 	 * of 64 or 256 bytes depending on the WRIOP version.
2183 	 */
2184 	sc->buf_sz = ALIGN_DOWN(BUF_SIZE, sc->buf_align);
2185 
2186 	if (bootverbose) {
2187 		device_printf(dev, "Rx/Tx buffers: size=%d, alignment=%d\n",
2188 		    sc->buf_sz, sc->buf_align);
2189 	}
2190 
2191 	/*
2192 	 *    Frame Descriptor       Tx buffer layout
2193 	 *
2194 	 *                ADDR -> |---------------------|
2195 	 *                        | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes
2196 	 *                        |---------------------|
2197 	 *                        | HW FRAME ANNOTATION | BUF_TX_HWA_SIZE bytes
2198 	 *                        |---------------------|
2199 	 *                        |    DATA HEADROOM    |
2200 	 *       ADDR + OFFSET -> |---------------------|
2201 	 *                        |                     |
2202 	 *                        |                     |
2203 	 *                        |     FRAME DATA      |
2204 	 *                        |                     |
2205 	 *                        |                     |
2206 	 *                        |---------------------|
2207 	 *                        |    DATA TAILROOM    |
2208 	 *                        |---------------------|
2209 	 *
2210 	 * NOTE: It's for a single buffer frame only.
2211 	 */
2212 	buf_layout.queue_type = DPAA2_NI_QUEUE_TX;
2213 	buf_layout.pd_size = BUF_SWA_SIZE;
2214 	buf_layout.pass_timestamp = true;
2215 	buf_layout.pass_frame_status = true;
2216 	buf_layout.options =
2217 	    BUF_LOPT_PRIV_DATA_SZ |
2218 	    BUF_LOPT_TIMESTAMP | /* requires 128 bytes in HWA */
2219 	    BUF_LOPT_FRAME_STATUS;
2220 	error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
2221 	if (error) {
2222 		device_printf(dev, "%s: failed to set Tx buffer layout\n",
2223 		    __func__);
2224 		goto close_ni;
2225 	}
2226 
2227 	/* Tx-confirmation buffer layout */
2228 	buf_layout.queue_type = DPAA2_NI_QUEUE_TX_CONF;
2229 	buf_layout.options =
2230 	    BUF_LOPT_TIMESTAMP |
2231 	    BUF_LOPT_FRAME_STATUS;
2232 	error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
2233 	if (error) {
2234 		device_printf(dev, "%s: failed to set TxConf buffer layout\n",
2235 		    __func__);
2236 		goto close_ni;
2237 	}
2238 
2239 	/*
2240 	 * Driver should reserve the amount of space indicated by this command
2241 	 * as headroom in all Tx frames.
2242 	 */
2243 	error = DPAA2_CMD_NI_GET_TX_DATA_OFF(dev, child, &cmd, &sc->tx_data_off);
2244 	if (error) {
2245 		device_printf(dev, "%s: failed to obtain Tx data offset\n",
2246 		    __func__);
2247 		goto close_ni;
2248 	}
2249 
2250 	if (bootverbose) {
2251 		device_printf(dev, "Tx data offset=%d\n", sc->tx_data_off);
2252 	}
2253 	if ((sc->tx_data_off % 64) != 0) {
2254 		device_printf(dev, "Tx data offset (%d) is not a multiplication "
2255 		    "of 64 bytes\n", sc->tx_data_off);
2256 	}
2257 
2258 	/*
2259 	 *    Frame Descriptor       Rx buffer layout
2260 	 *
2261 	 *                ADDR -> |---------------------|
2262 	 *                        | SW FRAME ANNOTATION | 0 bytes
2263 	 *                        |---------------------|
2264 	 *                        | HW FRAME ANNOTATION | BUF_RX_HWA_SIZE bytes
2265 	 *                        |---------------------|
2266 	 *                        |    DATA HEADROOM    | OFFSET-BUF_RX_HWA_SIZE
2267 	 *       ADDR + OFFSET -> |---------------------|
2268 	 *                        |                     |
2269 	 *                        |                     |
2270 	 *                        |     FRAME DATA      |
2271 	 *                        |                     |
2272 	 *                        |                     |
2273 	 *                        |---------------------|
2274 	 *                        |    DATA TAILROOM    | 0 bytes
2275 	 *                        |---------------------|
2276 	 *
2277 	 * NOTE: It's for a single buffer frame only.
2278 	 */
2279 	buf_layout.queue_type = DPAA2_NI_QUEUE_RX;
2280 	buf_layout.pd_size = 0;
2281 	buf_layout.fd_align = sc->buf_align;
2282 	buf_layout.head_size = sc->tx_data_off - BUF_RX_HWA_SIZE;
2283 	buf_layout.tail_size = 0;
2284 	buf_layout.pass_frame_status = true;
2285 	buf_layout.pass_parser_result = true;
2286 	buf_layout.pass_timestamp = true;
2287 	buf_layout.options =
2288 	    BUF_LOPT_PRIV_DATA_SZ |
2289 	    BUF_LOPT_DATA_ALIGN |
2290 	    BUF_LOPT_DATA_HEAD_ROOM |
2291 	    BUF_LOPT_DATA_TAIL_ROOM |
2292 	    BUF_LOPT_FRAME_STATUS |
2293 	    BUF_LOPT_PARSER_RESULT |
2294 	    BUF_LOPT_TIMESTAMP;
2295 	error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
2296 	if (error) {
2297 		device_printf(dev, "%s: failed to set Rx buffer layout\n",
2298 		    __func__);
2299 		goto close_ni;
2300 	}
2301 
2302 	error = 0;
2303 close_ni:
2304 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2305 close_rc:
2306 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2307 err_exit:
2308 	return (error);
2309 }
2310 
2311 /**
2312  * @brief Enable Rx/Tx pause frames.
2313  *
2314  * NOTE: DPNI stops sending when a pause frame is received (Rx frame) or DPNI
2315  *       itself generates pause frames (Tx frame).
2316  */
2317 static int
2318 dpaa2_ni_set_pause_frame(device_t dev)
2319 {
2320 	device_t pdev = device_get_parent(dev);
2321 	device_t child = dev;
2322 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2323 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2324 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2325 	struct dpaa2_ni_link_cfg link_cfg = {0};
2326 	struct dpaa2_cmd cmd;
2327 	uint16_t rc_token, ni_token;
2328 	int error;
2329 
2330 	DPAA2_CMD_INIT(&cmd);
2331 
2332 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2333 	if (error) {
2334 		device_printf(dev, "%s: failed to open resource container: "
2335 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2336 		goto err_exit;
2337 	}
2338 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2339 	if (error) {
2340 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2341 		    "error=%d\n", __func__, dinfo->id, error);
2342 		goto close_rc;
2343 	}
2344 
2345 	error = DPAA2_CMD_NI_GET_LINK_CFG(dev, child, &cmd, &link_cfg);
2346 	if (error) {
2347 		device_printf(dev, "%s: failed to obtain link configuration: "
2348 		    "error=%d\n", __func__, error);
2349 		goto close_ni;
2350 	}
2351 
2352 	/* Enable both Rx and Tx pause frames by default. */
2353 	link_cfg.options |= DPAA2_NI_LINK_OPT_PAUSE;
2354 	link_cfg.options &= ~DPAA2_NI_LINK_OPT_ASYM_PAUSE;
2355 
2356 	error = DPAA2_CMD_NI_SET_LINK_CFG(dev, child, &cmd, &link_cfg);
2357 	if (error) {
2358 		device_printf(dev, "%s: failed to set link configuration: "
2359 		    "error=%d\n", __func__, error);
2360 		goto close_ni;
2361 	}
2362 
2363 	sc->link_options = link_cfg.options;
2364 	error = 0;
2365 close_ni:
2366 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2367 close_rc:
2368 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2369 err_exit:
2370 	return (error);
2371 }
2372 
2373 /**
2374  * @brief Configure QoS table to determine the traffic class for the received
2375  * frame.
2376  */
2377 static int
2378 dpaa2_ni_set_qos_table(device_t dev)
2379 {
2380 	device_t pdev = device_get_parent(dev);
2381 	device_t child = dev;
2382 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2383 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2384 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2385 	struct dpaa2_ni_qos_table tbl;
2386 	struct dpaa2_buf *buf = &sc->qos_kcfg;
2387 	struct dpaa2_cmd cmd;
2388 	uint16_t rc_token, ni_token;
2389 	int error;
2390 
2391 	if (sc->attr.num.rx_tcs == 1 ||
2392 	    !(sc->attr.options & DPNI_OPT_HAS_KEY_MASKING)) {
2393 		if (bootverbose) {
2394 			device_printf(dev, "Ingress traffic classification is "
2395 			    "not supported\n");
2396 		}
2397 		return (0);
2398 	}
2399 
2400 	/*
2401 	 * Allocate a buffer visible to the device to hold the QoS table key
2402 	 * configuration.
2403 	 */
2404 	KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not storage buffer",
2405 	    __func__));
2406 	if (__predict_true(buf->store.dmat == NULL)) {
2407 		buf->store.dmat = sc->qos_dmat;
2408 	}
2409 
2410 	error = bus_dmamem_alloc(buf->store.dmat, &buf->store.vaddr,
2411 	    BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->store.dmap);
2412 	if (error) {
2413 		device_printf(dev, "%s: failed to allocate a buffer for QoS key "
2414 		    "configuration\n", __func__);
2415 		goto err_exit;
2416 	}
2417 
2418 	error = bus_dmamap_load(buf->store.dmat, buf->store.dmap,
2419 	    buf->store.vaddr, ETH_QOS_KCFG_BUF_SIZE, dpaa2_ni_dmamap_cb,
2420 	    &buf->store.paddr, BUS_DMA_NOWAIT);
2421 	if (error) {
2422 		device_printf(dev, "%s: failed to map QoS key configuration "
2423 		    "buffer into bus space\n", __func__);
2424 		goto err_exit;
2425 	}
2426 
2427 	DPAA2_CMD_INIT(&cmd);
2428 
2429 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2430 	if (error) {
2431 		device_printf(dev, "%s: failed to open resource container: "
2432 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2433 		goto err_exit;
2434 	}
2435 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2436 	if (error) {
2437 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2438 		    "error=%d\n", __func__, dinfo->id, error);
2439 		goto close_rc;
2440 	}
2441 
2442 	tbl.default_tc = 0;
2443 	tbl.discard_on_miss = false;
2444 	tbl.keep_entries = false;
2445 	tbl.kcfg_busaddr = buf->store.paddr;
2446 	error = DPAA2_CMD_NI_SET_QOS_TABLE(dev, child, &cmd, &tbl);
2447 	if (error) {
2448 		device_printf(dev, "%s: failed to set QoS table\n", __func__);
2449 		goto close_ni;
2450 	}
2451 
2452 	error = DPAA2_CMD_NI_CLEAR_QOS_TABLE(dev, child, &cmd);
2453 	if (error) {
2454 		device_printf(dev, "%s: failed to clear QoS table\n", __func__);
2455 		goto close_ni;
2456 	}
2457 
2458 	error = 0;
2459 close_ni:
2460 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2461 close_rc:
2462 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2463 err_exit:
2464 	return (error);
2465 }
2466 
2467 static int
2468 dpaa2_ni_set_mac_addr(device_t dev)
2469 {
2470 	device_t pdev = device_get_parent(dev);
2471 	device_t child = dev;
2472 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2473 	if_t ifp = sc->ifp;
2474 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2475 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2476 	struct dpaa2_cmd cmd;
2477 	struct ether_addr rnd_mac_addr;
2478 	uint16_t rc_token, ni_token;
2479 	uint8_t mac_addr[ETHER_ADDR_LEN];
2480 	uint8_t dpni_mac_addr[ETHER_ADDR_LEN];
2481 	int error;
2482 
2483 	DPAA2_CMD_INIT(&cmd);
2484 
2485 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2486 	if (error) {
2487 		device_printf(dev, "%s: failed to open resource container: "
2488 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2489 		goto err_exit;
2490 	}
2491 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2492 	if (error) {
2493 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2494 		    "error=%d\n", __func__, dinfo->id, error);
2495 		goto close_rc;
2496 	}
2497 
2498 	/*
2499 	 * Get the MAC address associated with the physical port, if the DPNI is
2500 	 * connected to a DPMAC directly associated with one of the physical
2501 	 * ports.
2502 	 */
2503 	error = DPAA2_CMD_NI_GET_PORT_MAC_ADDR(dev, child, &cmd, mac_addr);
2504 	if (error) {
2505 		device_printf(dev, "%s: failed to obtain the MAC address "
2506 		    "associated with the physical port\n", __func__);
2507 		goto close_ni;
2508 	}
2509 
2510 	/* Get primary MAC address from the DPNI attributes. */
2511 	error = DPAA2_CMD_NI_GET_PRIM_MAC_ADDR(dev, child, &cmd, dpni_mac_addr);
2512 	if (error) {
2513 		device_printf(dev, "%s: failed to obtain primary MAC address\n",
2514 		    __func__);
2515 		goto close_ni;
2516 	}
2517 
2518 	if (!ETHER_IS_ZERO(mac_addr)) {
2519 		/* Set MAC address of the physical port as DPNI's primary one. */
2520 		error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd,
2521 		    mac_addr);
2522 		if (error) {
2523 			device_printf(dev, "%s: failed to set primary MAC "
2524 			    "address\n", __func__);
2525 			goto close_ni;
2526 		}
2527 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2528 			sc->mac.addr[i] = mac_addr[i];
2529 		}
2530 	} else if (ETHER_IS_ZERO(dpni_mac_addr)) {
2531 		/* Generate random MAC address as DPNI's primary one. */
2532 		ether_gen_addr(ifp, &rnd_mac_addr);
2533 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2534 			mac_addr[i] = rnd_mac_addr.octet[i];
2535 		}
2536 
2537 		error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd,
2538 		    mac_addr);
2539 		if (error) {
2540 			device_printf(dev, "%s: failed to set random primary "
2541 			    "MAC address\n", __func__);
2542 			goto close_ni;
2543 		}
2544 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2545 			sc->mac.addr[i] = mac_addr[i];
2546 		}
2547 	} else {
2548 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2549 			sc->mac.addr[i] = dpni_mac_addr[i];
2550 		}
2551 	}
2552 
2553 	error = 0;
2554 close_ni:
2555 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2556 close_rc:
2557 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2558 err_exit:
2559 	return (error);
2560 }
2561 
2562 static void
2563 dpaa2_ni_miibus_statchg(device_t dev)
2564 {
2565 	device_t pdev = device_get_parent(dev);
2566 	device_t child = dev;
2567 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2568 	struct dpaa2_mac_link_state mac_link = { 0 };
2569 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2570 	struct dpaa2_cmd cmd;
2571 	uint16_t rc_token, mac_token;
2572 	int error, link_state;
2573 
2574 	if (sc->fixed_link || sc->mii == NULL) {
2575 		return;
2576 	}
2577 
2578 	/*
2579 	 * Note: ifp link state will only be changed AFTER we are called so we
2580 	 * cannot rely on ifp->if_linkstate here.
2581 	 */
2582 	if (sc->mii->mii_media_status & IFM_AVALID) {
2583 		if (sc->mii->mii_media_status & IFM_ACTIVE) {
2584 			link_state = LINK_STATE_UP;
2585 		} else {
2586 			link_state = LINK_STATE_DOWN;
2587 		}
2588 	} else {
2589 		link_state = LINK_STATE_UNKNOWN;
2590 	}
2591 
2592 	if (link_state != sc->link_state) {
2593 		sc->link_state = link_state;
2594 
2595 		DPAA2_CMD_INIT(&cmd);
2596 
2597 		error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
2598 		    &rc_token);
2599 		if (error) {
2600 			device_printf(dev, "%s: failed to open resource "
2601 			    "container: id=%d, error=%d\n", __func__, rcinfo->id,
2602 			    error);
2603 			goto err_exit;
2604 		}
2605 		error = DPAA2_CMD_MAC_OPEN(dev, child, &cmd, sc->mac.dpmac_id,
2606 		    &mac_token);
2607 		if (error) {
2608 			device_printf(sc->dev, "%s: failed to open DPMAC: "
2609 			    "id=%d, error=%d\n", __func__, sc->mac.dpmac_id,
2610 			    error);
2611 			goto close_rc;
2612 		}
2613 
2614 		if (link_state == LINK_STATE_UP ||
2615 		    link_state == LINK_STATE_DOWN) {
2616 			/* Update DPMAC link state. */
2617 			mac_link.supported = sc->mii->mii_media.ifm_media;
2618 			mac_link.advert = sc->mii->mii_media.ifm_media;
2619 			mac_link.rate = 1000; /* TODO: Where to get from? */	/* ifmedia_baudrate? */
2620 			mac_link.options =
2621 			    DPAA2_MAC_LINK_OPT_AUTONEG |
2622 			    DPAA2_MAC_LINK_OPT_PAUSE;
2623 			mac_link.up = (link_state == LINK_STATE_UP) ? true : false;
2624 			mac_link.state_valid = true;
2625 
2626 			/* Inform DPMAC about link state. */
2627 			error = DPAA2_CMD_MAC_SET_LINK_STATE(dev, child, &cmd,
2628 			    &mac_link);
2629 			if (error) {
2630 				device_printf(sc->dev, "%s: failed to set DPMAC "
2631 				    "link state: id=%d, error=%d\n", __func__,
2632 				    sc->mac.dpmac_id, error);
2633 			}
2634 		}
2635 		(void)DPAA2_CMD_MAC_CLOSE(dev, child, &cmd);
2636 		(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2637 		    rc_token));
2638 	}
2639 
2640 	return;
2641 
2642 close_rc:
2643 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2644 err_exit:
2645 	return;
2646 }
2647 
2648 /**
2649  * @brief Callback function to process media change request.
2650  */
2651 static int
2652 dpaa2_ni_media_change(if_t ifp)
2653 {
2654 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2655 
2656 	DPNI_LOCK(sc);
2657 	if (sc->mii) {
2658 		mii_mediachg(sc->mii);
2659 		sc->media_status = sc->mii->mii_media.ifm_media;
2660 	} else if (sc->fixed_link) {
2661 		if_printf(ifp, "%s: can't change media in fixed mode\n",
2662 		    __func__);
2663 	}
2664 	DPNI_UNLOCK(sc);
2665 
2666 	return (0);
2667 }
2668 
2669 /**
2670  * @brief Callback function to process media status request.
2671  */
2672 static void
2673 dpaa2_ni_media_status(if_t ifp, struct ifmediareq *ifmr)
2674 {
2675 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2676 
2677 	DPNI_LOCK(sc);
2678 	if (sc->mii) {
2679 		mii_pollstat(sc->mii);
2680 		ifmr->ifm_active = sc->mii->mii_media_active;
2681 		ifmr->ifm_status = sc->mii->mii_media_status;
2682 	}
2683 	DPNI_UNLOCK(sc);
2684 }
2685 
2686 /**
2687  * @brief Callout function to check and update media status.
2688  */
2689 static void
2690 dpaa2_ni_media_tick(void *arg)
2691 {
2692 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2693 
2694 	/* Check for media type change */
2695 	if (sc->mii) {
2696 		mii_tick(sc->mii);
2697 		if (sc->media_status != sc->mii->mii_media.ifm_media) {
2698 			printf("%s: media type changed (ifm_media=%x)\n",
2699 			    __func__, sc->mii->mii_media.ifm_media);
2700 			dpaa2_ni_media_change(sc->ifp);
2701 		}
2702 	}
2703 
2704 	/* Schedule another timeout one second from now */
2705 	callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
2706 }
2707 
2708 static void
2709 dpaa2_ni_init(void *arg)
2710 {
2711 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2712 	if_t ifp = sc->ifp;
2713 	device_t pdev = device_get_parent(sc->dev);
2714 	device_t dev = sc->dev;
2715 	device_t child = dev;
2716 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2717 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2718 	struct dpaa2_cmd cmd;
2719 	uint16_t rc_token, ni_token;
2720 	int error;
2721 
2722 	DPNI_LOCK(sc);
2723 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2724 		DPNI_UNLOCK(sc);
2725 		return;
2726 	}
2727 	DPNI_UNLOCK(sc);
2728 
2729 	DPAA2_CMD_INIT(&cmd);
2730 
2731 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2732 	if (error) {
2733 		device_printf(dev, "%s: failed to open resource container: "
2734 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2735 		goto err_exit;
2736 	}
2737 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2738 	if (error) {
2739 		device_printf(dev, "%s: failed to open network interface: "
2740 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
2741 		goto close_rc;
2742 	}
2743 
2744 	error = DPAA2_CMD_NI_ENABLE(dev, child, &cmd);
2745 	if (error) {
2746 		device_printf(dev, "%s: failed to enable DPNI: error=%d\n",
2747 		    __func__, error);
2748 	}
2749 
2750 	DPNI_LOCK(sc);
2751 	if (sc->mii) {
2752 		mii_mediachg(sc->mii);
2753 	}
2754 	callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
2755 
2756 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2757 	DPNI_UNLOCK(sc);
2758 
2759 	/* Force link-state update to initilize things. */
2760 	dpaa2_ni_miibus_statchg(dev);
2761 
2762 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2763 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2764 	return;
2765 
2766 close_rc:
2767 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2768 err_exit:
2769 	return;
2770 }
2771 
2772 static int
2773 dpaa2_ni_transmit(if_t ifp, struct mbuf *m)
2774 {
2775 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2776 	struct dpaa2_ni_channel	*chan;
2777 	struct dpaa2_ni_tx_ring *tx;
2778 	uint32_t fqid;
2779 	boolean_t found = false;
2780 	int chan_n = 0;
2781 
2782 	if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)))
2783 		return (0);
2784 
2785 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
2786 		fqid = m->m_pkthdr.flowid;
2787 		for (int i = 0; i < sc->chan_n; i++) {
2788 			chan = sc->channels[i];
2789 			for (int j = 0; j < chan->rxq_n; j++) {
2790 				if (fqid == chan->rx_queues[j].fqid) {
2791 					chan_n = chan->flowid;
2792 					found = true;
2793 					break;
2794 				}
2795 			}
2796 			if (found) {
2797 				break;
2798 			}
2799 		}
2800 	}
2801 	tx = DPAA2_TX_RING(sc, chan_n, 0);
2802 
2803 	TX_LOCK(tx);
2804 	dpaa2_ni_tx_locked(sc, tx, m);
2805 	TX_UNLOCK(tx);
2806 
2807 	return (0);
2808 }
2809 
2810 static void
2811 dpaa2_ni_qflush(if_t ifp)
2812 {
2813 	/* TODO: Find a way to drain Tx queues in QBMan. */
2814 	if_qflush(ifp);
2815 }
2816 
2817 static int
2818 dpaa2_ni_ioctl(if_t ifp, u_long c, caddr_t data)
2819 {
2820 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2821 	struct ifreq *ifr = (struct ifreq *) data;
2822 	device_t pdev = device_get_parent(sc->dev);
2823 	device_t dev = sc->dev;
2824 	device_t child = dev;
2825 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2826 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2827 	struct dpaa2_cmd cmd;
2828 	uint32_t changed = 0;
2829 	uint16_t rc_token, ni_token;
2830 	int mtu, error, rc = 0;
2831 
2832 	DPAA2_CMD_INIT(&cmd);
2833 
2834 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2835 	if (error) {
2836 		device_printf(dev, "%s: failed to open resource container: "
2837 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2838 		goto err_exit;
2839 	}
2840 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2841 	if (error) {
2842 		device_printf(dev, "%s: failed to open network interface: "
2843 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
2844 		goto close_rc;
2845 	}
2846 
2847 	switch (c) {
2848 	case SIOCSIFMTU:
2849 		DPNI_LOCK(sc);
2850 		mtu = ifr->ifr_mtu;
2851 		if (mtu < ETHERMIN || mtu > ETHERMTU_JUMBO) {
2852 			DPNI_UNLOCK(sc);
2853 			error = EINVAL;
2854 			goto close_ni;
2855 		}
2856 		if_setmtu(ifp, mtu);
2857 		DPNI_UNLOCK(sc);
2858 
2859 		/* Update maximum frame length. */
2860 		error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd,
2861 		    mtu + ETHER_HDR_LEN);
2862 		if (error) {
2863 			device_printf(dev, "%s: failed to update maximum frame "
2864 			    "length: error=%d\n", __func__, error);
2865 			goto close_ni;
2866 		}
2867 		break;
2868 	case SIOCSIFCAP:
2869 		changed = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
2870 		if (changed & IFCAP_HWCSUM) {
2871 			if ((ifr->ifr_reqcap & changed) & IFCAP_HWCSUM) {
2872 				if_setcapenablebit(ifp, IFCAP_HWCSUM, 0);
2873 			} else {
2874 				if_setcapenablebit(ifp, 0, IFCAP_HWCSUM);
2875 			}
2876 		}
2877 		rc = dpaa2_ni_setup_if_caps(sc);
2878 		if (rc) {
2879 			printf("%s: failed to update iface capabilities: "
2880 			    "error=%d\n", __func__, rc);
2881 			rc = ENXIO;
2882 		}
2883 		break;
2884 	case SIOCSIFFLAGS:
2885 		DPNI_LOCK(sc);
2886 		if (if_getflags(ifp) & IFF_UP) {
2887 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2888 				changed = if_getflags(ifp) ^ sc->if_flags;
2889 				if (changed & IFF_PROMISC ||
2890 				    changed & IFF_ALLMULTI) {
2891 					rc = dpaa2_ni_setup_if_flags(sc);
2892 				}
2893 			} else {
2894 				DPNI_UNLOCK(sc);
2895 				dpaa2_ni_init(sc);
2896 				DPNI_LOCK(sc);
2897 			}
2898 		} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2899 			/* FIXME: Disable DPNI. See dpaa2_ni_init(). */
2900 		}
2901 
2902 		sc->if_flags = if_getflags(ifp);
2903 		DPNI_UNLOCK(sc);
2904 		break;
2905 	case SIOCADDMULTI:
2906 	case SIOCDELMULTI:
2907 		DPNI_LOCK(sc);
2908 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2909 			DPNI_UNLOCK(sc);
2910 			rc = dpaa2_ni_update_mac_filters(ifp);
2911 			if (rc) {
2912 				device_printf(dev, "%s: failed to update MAC "
2913 				    "filters: error=%d\n", __func__, rc);
2914 			}
2915 			DPNI_LOCK(sc);
2916 		}
2917 		DPNI_UNLOCK(sc);
2918 		break;
2919 	case SIOCGIFMEDIA:
2920 	case SIOCSIFMEDIA:
2921 		if (sc->mii)
2922 			rc = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, c);
2923 		else if(sc->fixed_link) {
2924 			rc = ifmedia_ioctl(ifp, ifr, &sc->fixed_ifmedia, c);
2925 		}
2926 		break;
2927 	default:
2928 		rc = ether_ioctl(ifp, c, data);
2929 		break;
2930 	}
2931 
2932 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2933 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2934 	return (rc);
2935 
2936 close_ni:
2937 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2938 close_rc:
2939 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2940 err_exit:
2941 	return (error);
2942 }
2943 
2944 static int
2945 dpaa2_ni_update_mac_filters(if_t ifp)
2946 {
2947 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2948 	struct dpaa2_ni_mcaddr_ctx ctx;
2949 	device_t pdev = device_get_parent(sc->dev);
2950 	device_t dev = sc->dev;
2951 	device_t child = dev;
2952 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2953 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2954 	struct dpaa2_cmd cmd;
2955 	uint16_t rc_token, ni_token;
2956 	int error;
2957 
2958 	DPAA2_CMD_INIT(&cmd);
2959 
2960 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2961 	if (error) {
2962 		device_printf(dev, "%s: failed to open resource container: "
2963 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2964 		goto err_exit;
2965 	}
2966 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2967 	if (error) {
2968 		device_printf(dev, "%s: failed to open network interface: "
2969 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
2970 		goto close_rc;
2971 	}
2972 
2973 	/* Remove all multicast MAC filters. */
2974 	error = DPAA2_CMD_NI_CLEAR_MAC_FILTERS(dev, child, &cmd, false, true);
2975 	if (error) {
2976 		device_printf(dev, "%s: failed to clear multicast MAC filters: "
2977 		    "error=%d\n", __func__, error);
2978 		goto close_ni;
2979 	}
2980 
2981 	ctx.ifp = ifp;
2982 	ctx.error = 0;
2983 	ctx.nent = 0;
2984 
2985 	if_foreach_llmaddr(ifp, dpaa2_ni_add_maddr, &ctx);
2986 
2987 	error = ctx.error;
2988 close_ni:
2989 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2990 close_rc:
2991 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2992 err_exit:
2993 	return (error);
2994 }
2995 
2996 static u_int
2997 dpaa2_ni_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2998 {
2999 	struct dpaa2_ni_mcaddr_ctx *ctx = arg;
3000 	struct dpaa2_ni_softc *sc = if_getsoftc(ctx->ifp);
3001 	device_t pdev = device_get_parent(sc->dev);
3002 	device_t dev = sc->dev;
3003 	device_t child = dev;
3004 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
3005 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
3006 	struct dpaa2_cmd cmd;
3007 	uint16_t rc_token, ni_token;
3008 	int error;
3009 
3010 	if (ctx->error != 0) {
3011 		return (0);
3012 	}
3013 
3014 	if (ETHER_IS_MULTICAST(LLADDR(sdl))) {
3015 		DPAA2_CMD_INIT(&cmd);
3016 
3017 		error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
3018 		    &rc_token);
3019 		if (error) {
3020 			device_printf(dev, "%s: failed to open resource "
3021 			    "container: id=%d, error=%d\n", __func__, rcinfo->id,
3022 			    error);
3023 			return (0);
3024 		}
3025 		error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id,
3026 		    &ni_token);
3027 		if (error) {
3028 			device_printf(dev, "%s: failed to open network interface: "
3029 			    "id=%d, error=%d\n", __func__, dinfo->id, error);
3030 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
3031 			    rc_token));
3032 			return (0);
3033 		}
3034 
3035 		ctx->error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, &cmd,
3036 		    LLADDR(sdl));
3037 
3038 		(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
3039 		    ni_token));
3040 		(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
3041 		    rc_token));
3042 
3043 		if (ctx->error != 0) {
3044 			device_printf(dev, "%s: can't add more then %d MAC "
3045 			    "addresses, switching to the multicast promiscuous "
3046 			    "mode\n", __func__, ctx->nent);
3047 
3048 			/* Enable multicast promiscuous mode. */
3049 			DPNI_LOCK(sc);
3050 			if_setflagbits(ctx->ifp, IFF_ALLMULTI, 0);
3051 			sc->if_flags |= IFF_ALLMULTI;
3052 			ctx->error = dpaa2_ni_setup_if_flags(sc);
3053 			DPNI_UNLOCK(sc);
3054 
3055 			return (0);
3056 		}
3057 		ctx->nent++;
3058 	}
3059 
3060 	return (1);
3061 }
3062 
3063 static void
3064 dpaa2_ni_intr(void *arg)
3065 {
3066 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
3067 	device_t pdev = device_get_parent(sc->dev);
3068 	device_t dev = sc->dev;
3069 	device_t child = dev;
3070 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
3071 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
3072 	struct dpaa2_cmd cmd;
3073 	uint32_t status = ~0u; /* clear all IRQ status bits */
3074 	uint16_t rc_token, ni_token;
3075 	int error;
3076 
3077 	DPAA2_CMD_INIT(&cmd);
3078 
3079 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
3080 	if (error) {
3081 		device_printf(dev, "%s: failed to open resource container: "
3082 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
3083 		goto err_exit;
3084 	}
3085 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
3086 	if (error) {
3087 		device_printf(dev, "%s: failed to open network interface: "
3088 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
3089 		goto close_rc;
3090 	}
3091 
3092 	error = DPAA2_CMD_NI_GET_IRQ_STATUS(dev, child, &cmd, DPNI_IRQ_INDEX,
3093 	    &status);
3094 	if (error) {
3095 		device_printf(sc->dev, "%s: failed to obtain IRQ status: "
3096 		    "error=%d\n", __func__, error);
3097 	}
3098 
3099 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
3100 close_rc:
3101 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
3102 err_exit:
3103 	return;
3104 }
3105 
3106 /**
3107  * @brief Callback to obtain a physical address of the only DMA segment mapped.
3108  */
3109 static void
3110 dpaa2_ni_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3111 {
3112 	if (error == 0) {
3113 		KASSERT(nseg == 1, ("too many segments: nseg=%d\n", nseg));
3114 		*(bus_addr_t *) arg = segs[0].ds_addr;
3115 	}
3116 }
3117 
3118 /**
3119  * @brief Release new buffers to the buffer pool if necessary.
3120  */
3121 static void
3122 dpaa2_ni_bp_task(void *arg, int count)
3123 {
3124 	device_t bp_dev;
3125 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
3126 	struct dpaa2_bp_softc *bpsc;
3127 	struct dpaa2_bp_conf bp_conf;
3128 	const int buf_num = DPAA2_ATOMIC_READ(&sc->buf_num);
3129 	int error;
3130 
3131 	/* There's only one buffer pool for now. */
3132 	bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
3133 	bpsc = device_get_softc(bp_dev);
3134 
3135 	/* Get state of the buffer pool. */
3136 	error = DPAA2_SWP_QUERY_BP(sc->channels[0]->io_dev, bpsc->attr.bpid,
3137 	    &bp_conf);
3138 	if (error) {
3139 		device_printf(sc->dev, "%s: failed to query buffer pool "
3140 		    "configuration: error=%d\n", __func__, error);
3141 		return;
3142 	}
3143 
3144 	/* Double allocated buffers number if free buffers < 25%. */
3145 	if (bp_conf.free_bufn < (buf_num >> 2)) {
3146 		(void)dpaa2_ni_seed_buf_pool(sc, buf_num);
3147 		DPAA2_ATOMIC_XCHG(&sc->buf_free, bp_conf.free_bufn);
3148 	}
3149 }
3150 
3151 /**
3152  * @brief Poll frames from a specific channel when CDAN is received.
3153  *
3154  * NOTE: To be called from the DPIO interrupt handler.
3155  */
3156 static void
3157 dpaa2_ni_poll(void *arg)
3158 {
3159 	struct dpaa2_ni_channel *chan = (struct dpaa2_ni_channel *) arg;
3160 	struct dpaa2_io_softc *iosc;
3161 	struct dpaa2_swp *swp;
3162 	struct dpaa2_ni_fq *fq;
3163 	int error, consumed = 0;
3164 
3165 	KASSERT(chan != NULL, ("%s: channel is NULL", __func__));
3166 
3167 	iosc = device_get_softc(chan->io_dev);
3168 	swp = iosc->swp;
3169 
3170 	do {
3171 		error = dpaa2_swp_pull(swp, chan->id, &chan->store,
3172 		    ETH_STORE_FRAMES);
3173 		if (error) {
3174 			device_printf(chan->ni_dev, "%s: failed to pull frames: "
3175 			    "chan_id=%d, error=%d\n", __func__, chan->id, error);
3176 			break;
3177 		}
3178 
3179 		/*
3180 		 * TODO: Combine frames from the same Rx queue returned as
3181 		 * a result to the current VDQ command into a chain (linked
3182 		 * with m_nextpkt) to ammortize the FQ lock.
3183 		 */
3184 		error = dpaa2_ni_consume_frames(chan, &fq, &consumed);
3185 		if (error == ENOENT) {
3186 			break;
3187 		}
3188 		if (error == ETIMEDOUT) {
3189 			device_printf(chan->ni_dev, "%s: timeout to consume "
3190 			    "frames: chan_id=%d\n", __func__, chan->id);
3191 		}
3192 	} while (true);
3193 
3194 	/* Re-arm channel to generate CDAN. */
3195 	error = DPAA2_SWP_CONF_WQ_CHANNEL(chan->io_dev, &chan->ctx);
3196 	if (error) {
3197 		device_printf(chan->ni_dev, "%s: failed to rearm: chan_id=%d, "
3198 		    "error=%d\n", __func__, chan->id, error);
3199 	}
3200 }
3201 
3202 /**
3203  * @brief Transmit mbufs.
3204  */
3205 static void
3206 dpaa2_ni_tx_locked(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx,
3207     struct mbuf *m)
3208 {
3209 	struct dpaa2_ni_fq *fq = tx->fq;
3210 	struct dpaa2_buf *buf;
3211 	struct dpaa2_fd fd;
3212 	struct mbuf *m_d;
3213 	bus_dma_segment_t txsegs[DPAA2_TX_SEGLIMIT];
3214 	uint64_t idx;
3215 	void *pidx;
3216 	int error, rc, txnsegs;
3217 
3218 	/* Obtain an index of a Tx buffer. */
3219 	pidx = buf_ring_dequeue_sc(tx->idx_br);
3220 	if (__predict_false(pidx == NULL)) {
3221 		/* TODO: Do not give up easily. */
3222 		m_freem(m);
3223 		return;
3224 	} else {
3225 		idx = (uint64_t) pidx;
3226 		buf = &tx->buf[idx];
3227 		buf->tx.m = m;
3228 		buf->tx.idx = idx;
3229 		buf->tx.sgt_paddr = 0;
3230 	}
3231 
3232 	/* Load mbuf to transmit. */
3233 	error = bus_dmamap_load_mbuf_sg(buf->tx.dmat, buf->tx.dmap, m,
3234 	    txsegs, &txnsegs, BUS_DMA_NOWAIT);
3235 	if (__predict_false(error != 0)) {
3236 		/* Too many fragments, trying to defragment... */
3237 		m_d = m_collapse(m, M_NOWAIT, DPAA2_TX_SEGLIMIT);
3238 		if (m_d == NULL) {
3239 			device_printf(sc->dev, "%s: mbuf "
3240 			    "defragmentation failed\n", __func__);
3241 			fq->chan->tx_dropped++;
3242 			goto err;
3243 		}
3244 
3245 		buf->tx.m = m = m_d;
3246 		error = bus_dmamap_load_mbuf_sg(buf->tx.dmat,
3247 		    buf->tx.dmap, m, txsegs, &txnsegs, BUS_DMA_NOWAIT);
3248 		if (__predict_false(error != 0)) {
3249 			device_printf(sc->dev, "%s: failed to load "
3250 			    "mbuf: error=%d\n", __func__, error);
3251 			fq->chan->tx_dropped++;
3252 			goto err;
3253 		}
3254 	}
3255 
3256 	/* Build frame descriptor. */
3257 	error = dpaa2_ni_build_fd(sc, tx, buf, txsegs, txnsegs, &fd);
3258 	if (__predict_false(error != 0)) {
3259 		device_printf(sc->dev, "%s: failed to build frame "
3260 		    "descriptor: error=%d\n", __func__, error);
3261 		fq->chan->tx_dropped++;
3262 		goto err_unload;
3263 	}
3264 
3265 	/* TODO: Enqueue several frames in a single command. */
3266 	for (int i = 0; i < DPAA2_NI_ENQUEUE_RETRIES; i++) {
3267 		/* TODO: Return error codes instead of # of frames. */
3268 		rc = DPAA2_SWP_ENQ_MULTIPLE_FQ(fq->chan->io_dev, tx->fqid,
3269 		    &fd, 1);
3270 		if (rc == 1) {
3271 			break;
3272 		}
3273 	}
3274 
3275 	bus_dmamap_sync(buf->tx.dmat, buf->tx.dmap,
3276 	    BUS_DMASYNC_PREWRITE);
3277 	bus_dmamap_sync(buf->tx.sgt_dmat, buf->tx.sgt_dmap,
3278 	    BUS_DMASYNC_PREWRITE);
3279 
3280 	if (rc != 1) {
3281 		fq->chan->tx_dropped++;
3282 		goto err_unload;
3283 	} else {
3284 		fq->chan->tx_frames++;
3285 	}
3286 	return;
3287 
3288 err_unload:
3289 	bus_dmamap_unload(buf->tx.dmat, buf->tx.dmap);
3290 	if (buf->tx.sgt_paddr != 0) {
3291 		bus_dmamap_unload(buf->tx.sgt_dmat, buf->tx.sgt_dmap);
3292 	}
3293 err:
3294 	m_freem(buf->tx.m);
3295 	buf_ring_enqueue(tx->idx_br, pidx);
3296 }
3297 
3298 static int
3299 dpaa2_ni_consume_frames(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq **src,
3300     uint32_t *consumed)
3301 {
3302 	struct dpaa2_ni_fq *fq = NULL;
3303 	struct dpaa2_dq *dq;
3304 	struct dpaa2_fd *fd;
3305 	int rc, frames = 0;
3306 
3307 	do {
3308 		rc = dpaa2_ni_chan_storage_next(chan, &dq);
3309 		if (rc == EINPROGRESS) {
3310 			if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
3311 				fd = &dq->fdr.fd;
3312 				fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
3313 				fq->consume(chan, fq, fd);
3314 				frames++;
3315 			}
3316 		} else if (rc == EALREADY || rc == ENOENT) {
3317 			if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
3318 				fd = &dq->fdr.fd;
3319 				fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
3320 				fq->consume(chan, fq, fd);
3321 				frames++;
3322 			}
3323 			break;
3324 		} else {
3325 			KASSERT(1 == 0, ("%s: should not reach here", __func__));
3326 		}
3327 	} while (true);
3328 
3329 	KASSERT(chan->store_idx < chan->store_sz,
3330 	    ("channel store idx >= size: store_idx=%d, store_sz=%d",
3331 	    chan->store_idx, chan->store_sz));
3332 
3333 	/*
3334 	 * A dequeue operation pulls frames from a single queue into the store.
3335 	 * Return the frame queue and a number of consumed frames as an output.
3336 	 */
3337 	if (src != NULL)
3338 		*src = fq;
3339 	if (consumed != NULL)
3340 		*consumed = frames;
3341 
3342 	return (rc);
3343 }
3344 
3345 /**
3346  * @brief Receive frames.
3347  */
3348 static int
3349 dpaa2_ni_rx(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq,
3350     struct dpaa2_fd *fd)
3351 {
3352 	struct dpaa2_ni_softc *sc = device_get_softc(chan->ni_dev);
3353 	struct dpaa2_bp_softc *bpsc;
3354 	struct dpaa2_buf *buf;
3355 	if_t ifp = sc->ifp;
3356 	struct mbuf *m;
3357 	device_t bp_dev;
3358 	bus_addr_t paddr = (bus_addr_t) fd->addr;
3359 	bus_addr_t released[DPAA2_SWP_BUFS_PER_CMD];
3360 	void *buf_data;
3361 	int buf_idx, buf_len;
3362 	int error, released_n = 0;
3363 
3364 	/*
3365 	 * Get buffer index from the ADDR_TOK (not used by QBMan) bits of the
3366 	 * physical address.
3367 	 */
3368 	buf_idx = dpaa2_ni_fd_buf_idx(fd);
3369 	buf = &sc->buf[buf_idx];
3370 
3371 	KASSERT(buf->type == DPAA2_BUF_RX, ("%s: not Rx buffer", __func__));
3372 	if (paddr != buf->rx.paddr) {
3373 		panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3374 		    __func__, paddr, buf->rx.paddr);
3375 	}
3376 
3377 	/* Update statistics. */
3378 	switch (dpaa2_ni_fd_err(fd)) {
3379 	case 1: /* Enqueue rejected by QMan */
3380 		sc->rx_enq_rej_frames++;
3381 		break;
3382 	case 2: /* QMan IEOI error */
3383 		sc->rx_ieoi_err_frames++;
3384 		break;
3385 	default:
3386 		break;
3387 	}
3388 	switch (dpaa2_ni_fd_format(fd)) {
3389 	case DPAA2_FD_SINGLE:
3390 		sc->rx_single_buf_frames++;
3391 		break;
3392 	case DPAA2_FD_SG:
3393 		sc->rx_sg_buf_frames++;
3394 		break;
3395 	default:
3396 		break;
3397 	}
3398 
3399 	m = buf->rx.m;
3400 	buf->rx.m = NULL;
3401 	bus_dmamap_sync(buf->rx.dmat, buf->rx.dmap,
3402 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3403 	bus_dmamap_unload(buf->rx.dmat, buf->rx.dmap);
3404 
3405 	buf_len = dpaa2_ni_fd_data_len(fd);
3406 	buf_data = (uint8_t *)buf->rx.vaddr + dpaa2_ni_fd_offset(fd);
3407 
3408 	/* Prefetch mbuf data. */
3409 	__builtin_prefetch(buf_data);
3410 
3411 	/* Write value to mbuf (avoid reading). */
3412 	m->m_flags |= M_PKTHDR;
3413 	m->m_data = buf_data;
3414 	m->m_len = buf_len;
3415 	m->m_pkthdr.len = buf_len;
3416 	m->m_pkthdr.rcvif = ifp;
3417 	m->m_pkthdr.flowid = fq->fqid;
3418 	M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
3419 
3420 	if_input(ifp, m);
3421 
3422 	/* Keep the buffer to be recycled. */
3423 	chan->recycled[chan->recycled_n++] = paddr;
3424 	KASSERT(chan->recycled_n <= DPAA2_SWP_BUFS_PER_CMD,
3425 	    ("%s: too many buffers to recycle", __func__));
3426 
3427 	/* Re-seed and release recycled buffers back to the pool. */
3428 	if (chan->recycled_n == DPAA2_SWP_BUFS_PER_CMD) {
3429 		/* Release new buffers to the pool if needed. */
3430 		taskqueue_enqueue(sc->bp_taskq, &sc->bp_task);
3431 
3432 		for (int i = 0; i < chan->recycled_n; i++) {
3433 			paddr = chan->recycled[i];
3434 
3435 			/* Parse ADDR_TOK of the recycled buffer. */
3436 			buf_idx = (paddr >> DPAA2_NI_BUF_IDX_SHIFT)
3437 			    & DPAA2_NI_BUF_IDX_MASK;
3438 			buf = &sc->buf[buf_idx];
3439 
3440 			/* Seed recycled buffer. */
3441 			error = dpaa2_ni_seed_rxbuf(sc, buf, buf_idx);
3442 			KASSERT(error == 0, ("%s: failed to seed recycled "
3443 			    "buffer: error=%d", __func__, error));
3444 			if (__predict_false(error != 0)) {
3445 				device_printf(sc->dev, "%s: failed to seed "
3446 				    "recycled buffer: error=%d\n", __func__,
3447 				    error);
3448 				continue;
3449 			}
3450 
3451 			/* Prepare buffer to be released in a single command. */
3452 			released[released_n++] = buf->rx.paddr;
3453 		}
3454 
3455 		/* There's only one buffer pool for now. */
3456 		bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
3457 		bpsc = device_get_softc(bp_dev);
3458 
3459 		error = DPAA2_SWP_RELEASE_BUFS(chan->io_dev, bpsc->attr.bpid,
3460 		    released, released_n);
3461 		if (__predict_false(error != 0)) {
3462 			device_printf(sc->dev, "%s: failed to release buffers "
3463 			    "to the pool: error=%d\n", __func__, error);
3464 			return (error);
3465 		}
3466 
3467 		/* Be ready to recycle the next portion of the buffers. */
3468 		chan->recycled_n = 0;
3469 	}
3470 
3471 	return (0);
3472 }
3473 
3474 /**
3475  * @brief Receive Rx error frames.
3476  */
3477 static int
3478 dpaa2_ni_rx_err(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq,
3479     struct dpaa2_fd *fd)
3480 {
3481 	device_t bp_dev;
3482 	struct dpaa2_ni_softc *sc = device_get_softc(chan->ni_dev);
3483 	struct dpaa2_bp_softc *bpsc;
3484 	struct dpaa2_buf *buf;
3485 	bus_addr_t paddr = (bus_addr_t) fd->addr;
3486 	int buf_idx, error;
3487 
3488 	/*
3489 	 * Get buffer index from the ADDR_TOK (not used by QBMan) bits of the
3490 	 * physical address.
3491 	 */
3492 	buf_idx = dpaa2_ni_fd_buf_idx(fd);
3493 	buf = &sc->buf[buf_idx];
3494 
3495 	KASSERT(buf->type == DPAA2_BUF_RX, ("%s: not Rx buffer", __func__));
3496 	if (paddr != buf->rx.paddr) {
3497 		panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3498 		    __func__, paddr, buf->rx.paddr);
3499 	}
3500 
3501 	/* There's only one buffer pool for now. */
3502 	bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
3503 	bpsc = device_get_softc(bp_dev);
3504 
3505 	/* Release buffer to QBMan buffer pool. */
3506 	error = DPAA2_SWP_RELEASE_BUFS(chan->io_dev, bpsc->attr.bpid, &paddr, 1);
3507 	if (error != 0) {
3508 		device_printf(sc->dev, "%s: failed to release frame buffer to "
3509 		    "the pool: error=%d\n", __func__, error);
3510 		return (error);
3511 	}
3512 
3513 	return (0);
3514 }
3515 
3516 /**
3517  * @brief Receive Tx confirmation frames.
3518  */
3519 static int
3520 dpaa2_ni_tx_conf(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq,
3521     struct dpaa2_fd *fd)
3522 {
3523 	struct dpaa2_ni_softc *sc = device_get_softc(chan->ni_dev);
3524 	struct dpaa2_ni_channel	*buf_chan;
3525 	struct dpaa2_ni_tx_ring *tx;
3526 	struct dpaa2_buf *buf;
3527 	bus_addr_t paddr = (bus_addr_t) (fd->addr & BUF_MAXADDR_49BIT);
3528 	uint64_t buf_idx;
3529 	int chan_idx, tx_idx;
3530 
3531 	/*
3532 	 * Get channel, Tx ring and buffer indexes from the ADDR_TOK bits
3533 	 * (not used by QBMan) of the physical address.
3534 	 */
3535 	chan_idx = dpaa2_ni_fd_chan_idx(fd);
3536 	tx_idx = dpaa2_ni_fd_tx_idx(fd);
3537 	buf_idx = (uint64_t) dpaa2_ni_fd_txbuf_idx(fd);
3538 
3539 	KASSERT(tx_idx < DPAA2_NI_MAX_TCS, ("%s: invalid Tx ring index",
3540 	    __func__));
3541 	KASSERT(buf_idx < DPAA2_NI_BUFS_PER_TX, ("%s: invalid Tx buffer index",
3542 	    __func__));
3543 
3544 	buf_chan = sc->channels[chan_idx];
3545 	tx = &buf_chan->txc_queue.tx_rings[tx_idx];
3546 	buf = &tx->buf[buf_idx];
3547 
3548 	KASSERT(buf->type == DPAA2_BUF_TX, ("%s: not Tx buffer", __func__));
3549 	if (paddr != buf->tx.paddr) {
3550 		panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3551 		    __func__, paddr, buf->tx.paddr);
3552 	}
3553 
3554 
3555 	bus_dmamap_unload(buf->tx.dmat, buf->tx.dmap);
3556 	if (buf->tx.sgt_paddr != 0)
3557 		bus_dmamap_unload(buf->tx.sgt_dmat, buf->tx.sgt_dmap);
3558 	m_freem(buf->tx.m);
3559 
3560 	/* Return Tx buffer index back to the ring. */
3561 	buf_ring_enqueue(tx->idx_br, (void *) buf_idx);
3562 
3563 	return (0);
3564 }
3565 
3566 /**
3567  * @brief Compare versions of the DPAA2 network interface API.
3568  */
3569 static int
3570 dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *sc, uint16_t major,
3571     uint16_t minor)
3572 {
3573 	if (sc->api_major == major)
3574 		return sc->api_minor - minor;
3575 	return sc->api_major - major;
3576 }
3577 
3578 /**
3579  * @brief Allocate Rx buffers visible to QBMan and release them to the pool.
3580  */
3581 static int
3582 dpaa2_ni_seed_buf_pool(struct dpaa2_ni_softc *sc, uint32_t seedn)
3583 {
3584 	device_t bp_dev;
3585 	struct dpaa2_bp_softc *bpsc;
3586 	struct dpaa2_buf *buf;
3587 	bus_addr_t paddr[DPAA2_SWP_BUFS_PER_CMD];
3588 	const int allocated = DPAA2_ATOMIC_READ(&sc->buf_num);
3589 	int i, error, bufn = 0;
3590 
3591 	KASSERT(sc->bp_dmat != NULL, ("%s: DMA tag for buffer pool not "
3592 	    "created?", __func__));
3593 
3594 	/* There's only one buffer pool for now. */
3595 	bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
3596 	bpsc = device_get_softc(bp_dev);
3597 
3598 	/* Limit # of buffers released to the pool. */
3599 	if (allocated + seedn > DPAA2_NI_BUFS_MAX)
3600 		seedn = DPAA2_NI_BUFS_MAX - allocated;
3601 
3602 	/* Release "seedn" buffers to the pool. */
3603 	for (i = allocated; i < (allocated + seedn); i++) {
3604 		/* Enough buffers were allocated for a single command. */
3605 		if (bufn == DPAA2_SWP_BUFS_PER_CMD) {
3606 			error = DPAA2_SWP_RELEASE_BUFS(sc->channels[0]->io_dev,
3607 			    bpsc->attr.bpid, paddr, bufn);
3608 			if (error) {
3609 				device_printf(sc->dev, "%s: failed to release "
3610 				    "buffers to the pool (1)\n", __func__);
3611 				return (error);
3612 			}
3613 			DPAA2_ATOMIC_ADD(&sc->buf_num, bufn);
3614 			bufn = 0;
3615 		}
3616 
3617 		buf = &sc->buf[i];
3618 		buf->type = DPAA2_BUF_RX;
3619 		buf->rx.m = NULL;
3620 		buf->rx.dmap = NULL;
3621 		buf->rx.paddr = 0;
3622 		buf->rx.vaddr = NULL;
3623 		error = dpaa2_ni_seed_rxbuf(sc, buf, i);
3624 		if (error)
3625 			break;
3626 		paddr[bufn] = buf->rx.paddr;
3627 		bufn++;
3628 	}
3629 
3630 	/* Release if there are buffers left. */
3631 	if (bufn > 0) {
3632 		error = DPAA2_SWP_RELEASE_BUFS(sc->channels[0]->io_dev,
3633 		    bpsc->attr.bpid, paddr, bufn);
3634 		if (error) {
3635 			device_printf(sc->dev, "%s: failed to release "
3636 			    "buffers to the pool (2)\n", __func__);
3637 			return (error);
3638 		}
3639 		DPAA2_ATOMIC_ADD(&sc->buf_num, bufn);
3640 	}
3641 
3642 	return (0);
3643 }
3644 
3645 /**
3646  * @brief Prepare Rx buffer to be released to the buffer pool.
3647  */
3648 static int
3649 dpaa2_ni_seed_rxbuf(struct dpaa2_ni_softc *sc, struct dpaa2_buf *buf, int idx)
3650 {
3651 	struct mbuf *m;
3652 	bus_dmamap_t dmap;
3653 	bus_dma_segment_t segs;
3654 	int error, nsegs;
3655 
3656 	KASSERT(sc->bp_dmat != NULL, ("%s: Buffer pool DMA tag is not "
3657 	    "allocated?", __func__));
3658 	KASSERT(buf->type == DPAA2_BUF_RX, ("%s: not Rx buffer", __func__));
3659 
3660 	/* Keep DMA tag for this buffer. */
3661 	if (__predict_false(buf->rx.dmat == NULL))
3662 		buf->rx.dmat = sc->bp_dmat;
3663 
3664 	/* Create a DMA map for the giving buffer if it doesn't exist yet. */
3665 	if (__predict_false(buf->rx.dmap == NULL)) {
3666 		error = bus_dmamap_create(buf->rx.dmat, 0, &dmap);
3667 		if (error) {
3668 			device_printf(sc->dev, "%s: failed to create DMA map "
3669 			    "for buffer: buf_idx=%d, error=%d\n", __func__,
3670 			    idx, error);
3671 			return (error);
3672 		}
3673 		buf->rx.dmap = dmap;
3674 	}
3675 
3676 	/* Allocate mbuf if needed. */
3677 	if (__predict_false(buf->rx.m == NULL)) {
3678 		m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, BUF_SIZE);
3679 		if (__predict_false(m == NULL)) {
3680 			device_printf(sc->dev, "%s: failed to allocate mbuf for "
3681 			    "buffer\n", __func__);
3682 			return (ENOMEM);
3683 		}
3684 		m->m_len = m->m_ext.ext_size;
3685 		m->m_pkthdr.len = m->m_ext.ext_size;
3686 		buf->rx.m = m;
3687 	} else
3688 		m = buf->rx.m;
3689 
3690 	error = bus_dmamap_load_mbuf_sg(buf->rx.dmat, buf->rx.dmap,
3691 	    m, &segs, &nsegs, BUS_DMA_NOWAIT);
3692 	KASSERT(nsegs == 1, ("one segment expected: nsegs=%d", nsegs));
3693 	KASSERT(error == 0, ("failed to map mbuf: error=%d", error));
3694 	if (__predict_false(error != 0 || nsegs != 1)) {
3695 		device_printf(sc->dev, "%s: failed to map mbuf: error=%d, "
3696 		    "nsegs=%d\n", __func__, error, nsegs);
3697 		bus_dmamap_unload(buf->rx.dmat, buf->rx.dmap);
3698 		m_freem(m);
3699 		return (error);
3700 	}
3701 	buf->rx.paddr = segs.ds_addr;
3702 	buf->rx.vaddr = m->m_data;
3703 
3704 	/*
3705 	 * Write buffer index to the ADDR_TOK (bits 63-49) which is not used by
3706 	 * QBMan and is supposed to assist in physical to virtual address
3707 	 * translation.
3708 	 *
3709 	 * NOTE: "lowaddr" and "highaddr" of the window which cannot be accessed
3710 	 * 	 by QBMan must be configured in the DMA tag accordingly.
3711 	 */
3712 	buf->rx.paddr =
3713 	    ((uint64_t)(idx & DPAA2_NI_BUF_IDX_MASK) <<
3714 		DPAA2_NI_BUF_IDX_SHIFT) |
3715 	    (buf->rx.paddr & DPAA2_NI_BUF_ADDR_MASK);
3716 
3717 	return (0);
3718 }
3719 
3720 /**
3721  * @brief Prepare Tx buffer to be added to the Tx ring.
3722  */
3723 static int
3724 dpaa2_ni_seed_txbuf(struct dpaa2_ni_softc *sc, struct dpaa2_buf *buf, int idx)
3725 {
3726 	bus_dmamap_t dmap;
3727 	int error;
3728 
3729 	KASSERT(sc->tx_dmat != NULL, ("%s: Tx DMA tag is not allocated?",
3730 	    __func__));
3731 	KASSERT(sc->sgt_dmat != NULL, ("%s: S/G DMA tag not allocated?",
3732 	    __func__));
3733 	KASSERT(buf->type == DPAA2_BUF_TX, ("%s: not Tx buffer", __func__));
3734 
3735 	/* Keep DMA tags for this buffer. */
3736 	if (__predict_true(buf->tx.dmat == NULL))
3737 		buf->tx.dmat = sc->tx_dmat;
3738 	if (__predict_true(buf->tx.sgt_dmat == NULL))
3739 		buf->tx.sgt_dmat = sc->sgt_dmat;
3740 
3741 	/* Create a DMA map for the giving buffer if it doesn't exist yet. */
3742 	if (__predict_true(buf->tx.dmap == NULL)) {
3743 		error = bus_dmamap_create(buf->tx.dmat, 0, &dmap);
3744 		if (error != 0) {
3745 			device_printf(sc->dev, "%s: failed to create "
3746 			    "Tx DMA map: error=%d\n", __func__, error);
3747 			return (error);
3748 		}
3749 		buf->tx.dmap = dmap;
3750 	}
3751 
3752 	/* Allocate a buffer to store scatter/gather table. */
3753 	if (__predict_true(buf->tx.sgt_vaddr == NULL)) {
3754 		error = bus_dmamem_alloc(buf->tx.sgt_dmat,
3755 		    &buf->tx.sgt_vaddr, BUS_DMA_ZERO | BUS_DMA_COHERENT,
3756 		    &buf->tx.sgt_dmap);
3757 		if (error != 0) {
3758 			device_printf(sc->dev, "%s: failed to allocate "
3759 			    "S/G table: error=%d\n", __func__, error);
3760 			return (error);
3761 		}
3762 	}
3763 
3764 	return (0);
3765 }
3766 
3767 /**
3768  * @brief Allocate channel storage visible to QBMan.
3769  */
3770 static int
3771 dpaa2_ni_seed_chan_storage(struct dpaa2_ni_softc *sc,
3772     struct dpaa2_ni_channel *chan)
3773 {
3774 	struct dpaa2_buf *buf = &chan->store;
3775 	int error;
3776 
3777 	KASSERT(sc->st_dmat != NULL, ("%s: channel storage DMA tag is not "
3778 	    "allocated?", __func__));
3779 	KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not channel storage buffer",
3780 	    __func__));
3781 
3782 	/* Keep DMA tag for this buffer. */
3783 	if (__predict_false(buf->store.dmat == NULL)) {
3784 		buf->store.dmat = sc->st_dmat;
3785 	}
3786 
3787 	if (__predict_false(buf->store.vaddr == NULL)) {
3788 		error = bus_dmamem_alloc(buf->store.dmat, &buf->store.vaddr,
3789 		    BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->store.dmap);
3790 		if (error) {
3791 			device_printf(sc->dev, "%s: failed to allocate channel "
3792 			    "storage\n", __func__);
3793 			return (error);
3794 		}
3795 	}
3796 
3797 	if (__predict_false(buf->store.paddr == 0)) {
3798 		error = bus_dmamap_load(buf->store.dmat, buf->store.dmap,
3799 		    buf->store.vaddr, ETH_STORE_SIZE, dpaa2_ni_dmamap_cb,
3800 		    &buf->store.paddr, BUS_DMA_NOWAIT);
3801 		if (error) {
3802 			device_printf(sc->dev, "%s: failed to map channel "
3803 			    "storage\n", __func__);
3804 			return (error);
3805 		}
3806 	}
3807 
3808 	chan->store_sz = ETH_STORE_FRAMES;
3809 	chan->store_idx = 0;
3810 
3811 	return (0);
3812 }
3813 
3814 /**
3815  * @brief Build a DPAA2 frame descriptor.
3816  */
3817 static int
3818 dpaa2_ni_build_fd(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx,
3819     struct dpaa2_buf *buf, bus_dma_segment_t *txsegs, int txnsegs,
3820     struct dpaa2_fd *fd)
3821 {
3822 	struct dpaa2_ni_channel	*chan = tx->fq->chan;
3823 	struct dpaa2_sg_entry *sgt;
3824 	int i, error;
3825 
3826 	KASSERT(txnsegs <= DPAA2_TX_SEGLIMIT, ("%s: too many segments, "
3827 	    "txnsegs (%d) > %d", __func__, txnsegs, DPAA2_TX_SEGLIMIT));
3828 	KASSERT(buf->type == DPAA2_BUF_TX, ("%s: not Tx buffer", __func__));
3829 	KASSERT(buf->tx.sgt_vaddr != NULL, ("%s: S/G table not allocated?",
3830 	    __func__));
3831 
3832 	/* Reset frame descriptor fields. */
3833 	memset(fd, 0, sizeof(*fd));
3834 
3835 	if (__predict_true(txnsegs <= DPAA2_TX_SEGLIMIT)) {
3836 		/* Populate S/G table. */
3837 		sgt = (struct dpaa2_sg_entry *) buf->tx.sgt_vaddr +
3838 		    sc->tx_data_off;
3839 		for (i = 0; i < txnsegs; i++) {
3840 			sgt[i].addr = (uint64_t) txsegs[i].ds_addr;
3841 			sgt[i].len = (uint32_t) txsegs[i].ds_len;
3842 			sgt[i].offset_fmt = 0u;
3843 		}
3844 		sgt[i-1].offset_fmt |= 0x8000u; /* set final entry flag */
3845 
3846 		KASSERT(buf->tx.sgt_paddr == 0, ("%s: sgt_paddr(%#jx) != 0",
3847 		    __func__, buf->tx.sgt_paddr));
3848 
3849 		/* Load S/G table. */
3850 		error = bus_dmamap_load(buf->tx.sgt_dmat, buf->tx.sgt_dmap,
3851 		    buf->tx.sgt_vaddr, DPAA2_TX_SGT_SZ, dpaa2_ni_dmamap_cb,
3852 		    &buf->tx.sgt_paddr, BUS_DMA_NOWAIT);
3853 		if (__predict_false(error != 0)) {
3854 			device_printf(sc->dev, "%s: failed to map S/G table: "
3855 			    "error=%d\n", __func__, error);
3856 			return (error);
3857 		}
3858 		buf->tx.paddr = buf->tx.sgt_paddr;
3859 		buf->tx.vaddr = buf->tx.sgt_vaddr;
3860 		sc->tx_sg_frames++; /* for sysctl(9) */
3861 	} else {
3862 		return (EINVAL);
3863 	}
3864 
3865 	fd->addr =
3866 	    ((uint64_t)(chan->flowid & DPAA2_NI_BUF_CHAN_MASK) <<
3867 		DPAA2_NI_BUF_CHAN_SHIFT) |
3868 	    ((uint64_t)(tx->txid & DPAA2_NI_TX_IDX_MASK) <<
3869 		DPAA2_NI_TX_IDX_SHIFT) |
3870 	    ((uint64_t)(buf->tx.idx & DPAA2_NI_TXBUF_IDX_MASK) <<
3871 		DPAA2_NI_TXBUF_IDX_SHIFT) |
3872 	    (buf->tx.paddr & DPAA2_NI_BUF_ADDR_MASK);
3873 
3874 	fd->data_length = (uint32_t) buf->tx.m->m_pkthdr.len;
3875 	fd->bpid_ivp_bmt = 0;
3876 	fd->offset_fmt_sl = 0x2000u | sc->tx_data_off;
3877 	fd->ctrl = 0x00800000u;
3878 
3879 	return (0);
3880 }
3881 
3882 static int
3883 dpaa2_ni_fd_err(struct dpaa2_fd *fd)
3884 {
3885 	return ((fd->ctrl >> DPAA2_NI_FD_ERR_SHIFT) & DPAA2_NI_FD_ERR_MASK);
3886 }
3887 
3888 static uint32_t
3889 dpaa2_ni_fd_data_len(struct dpaa2_fd *fd)
3890 {
3891 	if (dpaa2_ni_fd_short_len(fd))
3892 		return (fd->data_length & DPAA2_NI_FD_LEN_MASK);
3893 
3894 	return (fd->data_length);
3895 }
3896 
3897 static int
3898 dpaa2_ni_fd_chan_idx(struct dpaa2_fd *fd)
3899 {
3900 	return ((((bus_addr_t) fd->addr) >> DPAA2_NI_BUF_CHAN_SHIFT) &
3901 	    DPAA2_NI_BUF_CHAN_MASK);
3902 }
3903 
3904 static int
3905 dpaa2_ni_fd_buf_idx(struct dpaa2_fd *fd)
3906 {
3907 	return ((((bus_addr_t) fd->addr) >> DPAA2_NI_BUF_IDX_SHIFT) &
3908 	    DPAA2_NI_BUF_IDX_MASK);
3909 }
3910 
3911 static int
3912 dpaa2_ni_fd_tx_idx(struct dpaa2_fd *fd)
3913 {
3914 	return ((((bus_addr_t) fd->addr) >> DPAA2_NI_TX_IDX_SHIFT) &
3915 	    DPAA2_NI_TX_IDX_MASK);
3916 }
3917 
3918 static int
3919 dpaa2_ni_fd_txbuf_idx(struct dpaa2_fd *fd)
3920 {
3921 	return ((((bus_addr_t) fd->addr) >> DPAA2_NI_TXBUF_IDX_SHIFT) &
3922 	    DPAA2_NI_TXBUF_IDX_MASK);
3923 }
3924 
3925 static int
3926 dpaa2_ni_fd_format(struct dpaa2_fd *fd)
3927 {
3928 	return ((enum dpaa2_fd_format)((fd->offset_fmt_sl >>
3929 	    DPAA2_NI_FD_FMT_SHIFT) & DPAA2_NI_FD_FMT_MASK));
3930 }
3931 
3932 static bool
3933 dpaa2_ni_fd_short_len(struct dpaa2_fd *fd)
3934 {
3935 	return (((fd->offset_fmt_sl >> DPAA2_NI_FD_SL_SHIFT)
3936 	    & DPAA2_NI_FD_SL_MASK) == 1);
3937 }
3938 
3939 static int
3940 dpaa2_ni_fd_offset(struct dpaa2_fd *fd)
3941 {
3942 	return (fd->offset_fmt_sl & DPAA2_NI_FD_OFFSET_MASK);
3943 }
3944 
3945 /**
3946  * @brief Collect statistics of the network interface.
3947  */
3948 static int
3949 dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS)
3950 {
3951 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3952 	struct dpni_stat *stat = &dpni_stat_sysctls[oidp->oid_number];
3953 	device_t pdev = device_get_parent(sc->dev);
3954 	device_t dev = sc->dev;
3955 	device_t child = dev;
3956 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
3957 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
3958 	struct dpaa2_cmd cmd;
3959 	uint64_t cnt[DPAA2_NI_STAT_COUNTERS];
3960 	uint64_t result = 0;
3961 	uint16_t rc_token, ni_token;
3962 	int error;
3963 
3964 	DPAA2_CMD_INIT(&cmd);
3965 
3966 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
3967 	if (error) {
3968 		device_printf(dev, "%s: failed to open resource container: "
3969 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
3970 		goto exit;
3971 	}
3972 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
3973 	if (error) {
3974 		device_printf(dev, "%s: failed to open network interface: "
3975 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
3976 		goto close_rc;
3977 	}
3978 
3979 	error = DPAA2_CMD_NI_GET_STATISTICS(dev, child, &cmd, stat->page, 0, cnt);
3980 	if (!error) {
3981 		result = cnt[stat->cnt];
3982 	}
3983 
3984 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
3985 close_rc:
3986 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
3987 exit:
3988 	return (sysctl_handle_64(oidp, &result, 0, req));
3989 }
3990 
3991 static int
3992 dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS)
3993 {
3994 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3995 	uint32_t buf_num = DPAA2_ATOMIC_READ(&sc->buf_num);
3996 
3997 	return (sysctl_handle_32(oidp, &buf_num, 0, req));
3998 }
3999 
4000 static int
4001 dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS)
4002 {
4003 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
4004 	uint32_t buf_free = DPAA2_ATOMIC_READ(&sc->buf_free);
4005 
4006 	return (sysctl_handle_32(oidp, &buf_free, 0, req));
4007 }
4008 
4009 static int
4010 dpaa2_ni_set_hash(device_t dev, uint64_t flags)
4011 {
4012 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
4013 	uint64_t key = 0;
4014 	int i;
4015 
4016 	if (!(sc->attr.num.queues > 1)) {
4017 		return (EOPNOTSUPP);
4018 	}
4019 
4020 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
4021 		if (dist_fields[i].rxnfc_field & flags) {
4022 			key |= dist_fields[i].id;
4023 		}
4024 	}
4025 
4026 	return (dpaa2_ni_set_dist_key(dev, DPAA2_NI_DIST_MODE_HASH, key));
4027 }
4028 
4029 /**
4030  * @brief Set Rx distribution (hash or flow classification) key flags is a
4031  * combination of RXH_ bits.
4032  */
4033 static int
4034 dpaa2_ni_set_dist_key(device_t dev, enum dpaa2_ni_dist_mode type, uint64_t flags)
4035 {
4036 	device_t pdev = device_get_parent(dev);
4037 	device_t child = dev;
4038 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
4039 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
4040 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
4041 	struct dpkg_profile_cfg cls_cfg;
4042 	struct dpkg_extract *key;
4043 	struct dpaa2_buf *buf = &sc->rxd_kcfg;
4044 	struct dpaa2_cmd cmd;
4045 	uint16_t rc_token, ni_token;
4046 	int i, error = 0;
4047 
4048 	KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not storage buffer",
4049 	    __func__));
4050 	if (__predict_true(buf->store.dmat == NULL)) {
4051 		buf->store.dmat = sc->rxd_dmat;
4052 	}
4053 
4054 	memset(&cls_cfg, 0, sizeof(cls_cfg));
4055 
4056 	/* Configure extracts according to the given flags. */
4057 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
4058 		key = &cls_cfg.extracts[cls_cfg.num_extracts];
4059 
4060 		if (!(flags & dist_fields[i].id)) {
4061 			continue;
4062 		}
4063 
4064 		if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
4065 			device_printf(dev, "%s: failed to add key extraction "
4066 			    "rule\n", __func__);
4067 			return (E2BIG);
4068 		}
4069 
4070 		key->type = DPKG_EXTRACT_FROM_HDR;
4071 		key->extract.from_hdr.prot = dist_fields[i].cls_prot;
4072 		key->extract.from_hdr.type = DPKG_FULL_FIELD;
4073 		key->extract.from_hdr.field = dist_fields[i].cls_field;
4074 		cls_cfg.num_extracts++;
4075 	}
4076 
4077 	error = bus_dmamem_alloc(buf->store.dmat, &buf->store.vaddr,
4078 	    BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->store.dmap);
4079 	if (error != 0) {
4080 		device_printf(dev, "%s: failed to allocate a buffer for Rx "
4081 		    "traffic distribution key configuration\n", __func__);
4082 		return (error);
4083 	}
4084 
4085 	error = dpaa2_ni_prepare_key_cfg(&cls_cfg, (uint8_t *) buf->store.vaddr);
4086 	if (error != 0) {
4087 		device_printf(dev, "%s: failed to prepare key configuration: "
4088 		    "error=%d\n", __func__, error);
4089 		return (error);
4090 	}
4091 
4092 	/* Prepare for setting the Rx dist. */
4093 	error = bus_dmamap_load(buf->store.dmat, buf->store.dmap,
4094 	    buf->store.vaddr, DPAA2_CLASSIFIER_DMA_SIZE, dpaa2_ni_dmamap_cb,
4095 	    &buf->store.paddr, BUS_DMA_NOWAIT);
4096 	if (error != 0) {
4097 		device_printf(sc->dev, "%s: failed to map a buffer for Rx "
4098 		    "traffic distribution key configuration\n", __func__);
4099 		return (error);
4100 	}
4101 
4102 	if (type == DPAA2_NI_DIST_MODE_HASH) {
4103 		DPAA2_CMD_INIT(&cmd);
4104 
4105 		error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
4106 		    &rc_token);
4107 		if (error) {
4108 			device_printf(dev, "%s: failed to open resource "
4109 			    "container: id=%d, error=%d\n", __func__, rcinfo->id,
4110 			    error);
4111 			goto err_exit;
4112 		}
4113 		error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id,
4114 		    &ni_token);
4115 		if (error) {
4116 			device_printf(dev, "%s: failed to open network "
4117 			    "interface: id=%d, error=%d\n", __func__, dinfo->id,
4118 			    error);
4119 			goto close_rc;
4120 		}
4121 
4122 		error = DPAA2_CMD_NI_SET_RX_TC_DIST(dev, child, &cmd,
4123 		    sc->attr.num.queues, 0, DPAA2_NI_DIST_MODE_HASH,
4124 		    buf->store.paddr);
4125 		if (error != 0) {
4126 			device_printf(dev, "%s: failed to set distribution mode "
4127 			    "and size for the traffic class\n", __func__);
4128 		}
4129 
4130 		(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
4131 		    ni_token));
4132 close_rc:
4133 		(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
4134 		    rc_token));
4135 	}
4136 
4137 err_exit:
4138 	return (error);
4139 }
4140 
4141 /**
4142  * @brief Prepares extract parameters.
4143  *
4144  * cfg:		Defining a full Key Generation profile.
4145  * key_cfg_buf:	Zeroed 256 bytes of memory before mapping it to DMA.
4146  */
4147 static int
4148 dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf)
4149 {
4150 	struct dpni_ext_set_rx_tc_dist *dpni_ext;
4151 	struct dpni_dist_extract *extr;
4152 	int i, j;
4153 
4154 	if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
4155 		return (EINVAL);
4156 
4157 	dpni_ext = (struct dpni_ext_set_rx_tc_dist *) key_cfg_buf;
4158 	dpni_ext->num_extracts = cfg->num_extracts;
4159 
4160 	for (i = 0; i < cfg->num_extracts; i++) {
4161 		extr = &dpni_ext->extracts[i];
4162 
4163 		switch (cfg->extracts[i].type) {
4164 		case DPKG_EXTRACT_FROM_HDR:
4165 			extr->prot = cfg->extracts[i].extract.from_hdr.prot;
4166 			extr->efh_type =
4167 			    cfg->extracts[i].extract.from_hdr.type & 0x0Fu;
4168 			extr->size = cfg->extracts[i].extract.from_hdr.size;
4169 			extr->offset = cfg->extracts[i].extract.from_hdr.offset;
4170 			extr->field = cfg->extracts[i].extract.from_hdr.field;
4171 			extr->hdr_index =
4172 				cfg->extracts[i].extract.from_hdr.hdr_index;
4173 			break;
4174 		case DPKG_EXTRACT_FROM_DATA:
4175 			extr->size = cfg->extracts[i].extract.from_data.size;
4176 			extr->offset =
4177 				cfg->extracts[i].extract.from_data.offset;
4178 			break;
4179 		case DPKG_EXTRACT_FROM_PARSE:
4180 			extr->size = cfg->extracts[i].extract.from_parse.size;
4181 			extr->offset =
4182 				cfg->extracts[i].extract.from_parse.offset;
4183 			break;
4184 		default:
4185 			return (EINVAL);
4186 		}
4187 
4188 		extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
4189 		extr->extract_type = cfg->extracts[i].type & 0x0Fu;
4190 
4191 		for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
4192 			extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
4193 			extr->masks[j].offset =
4194 				cfg->extracts[i].masks[j].offset;
4195 		}
4196 	}
4197 
4198 	return (0);
4199 }
4200 
4201 /**
4202  * @brief Obtain the next dequeue response from the channel storage.
4203  */
4204 static int
4205 dpaa2_ni_chan_storage_next(struct dpaa2_ni_channel *chan, struct dpaa2_dq **dq)
4206 {
4207 	struct dpaa2_buf *buf = &chan->store;
4208 	struct dpaa2_dq *msgs = buf->store.vaddr;
4209 	struct dpaa2_dq *msg = &msgs[chan->store_idx];
4210 	int rc = EINPROGRESS;
4211 
4212 	chan->store_idx++;
4213 
4214 	if (msg->fdr.desc.stat & DPAA2_DQ_STAT_EXPIRED) {
4215 		rc = EALREADY; /* VDQ command is expired */
4216 		chan->store_idx = 0;
4217 		if (!(msg->fdr.desc.stat & DPAA2_DQ_STAT_VALIDFRAME))
4218 			msg = NULL; /* Null response, FD is invalid */
4219 	}
4220 	if (msg->fdr.desc.stat & DPAA2_DQ_STAT_FQEMPTY) {
4221 		rc = ENOENT; /* FQ is empty */
4222 		chan->store_idx = 0;
4223 	}
4224 
4225 	if (dq != NULL)
4226 		*dq = msg;
4227 
4228 	return (rc);
4229 }
4230 
4231 static device_method_t dpaa2_ni_methods[] = {
4232 	/* Device interface */
4233 	DEVMETHOD(device_probe,		dpaa2_ni_probe),
4234 	DEVMETHOD(device_attach,	dpaa2_ni_attach),
4235 	DEVMETHOD(device_detach,	dpaa2_ni_detach),
4236 
4237 	/* mii via memac_mdio */
4238 	DEVMETHOD(miibus_statchg,	dpaa2_ni_miibus_statchg),
4239 
4240 	DEVMETHOD_END
4241 };
4242 
4243 static driver_t dpaa2_ni_driver = {
4244 	"dpaa2_ni",
4245 	dpaa2_ni_methods,
4246 	sizeof(struct dpaa2_ni_softc),
4247 };
4248 
4249 DRIVER_MODULE(miibus, dpaa2_ni, miibus_driver, 0, 0);
4250 DRIVER_MODULE(dpaa2_ni, dpaa2_rc, dpaa2_ni_driver, 0, 0);
4251 
4252 MODULE_DEPEND(dpaa2_ni, miibus, 1, 1, 1);
4253 #ifdef DEV_ACPI
4254 MODULE_DEPEND(dpaa2_ni, memac_mdio_acpi, 1, 1, 1);
4255 #endif
4256 #ifdef FDT
4257 MODULE_DEPEND(dpaa2_ni, memac_mdio_fdt, 1, 1, 1);
4258 #endif
4259