xref: /freebsd/sys/dev/dpaa2/dpaa2_ni.c (revision b1bebaaba9b9c0ddfe503c43ca8e9e3917ee2c57)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright © 2021-2023 Dmitry Salychev
5  * Copyright © 2022 Mathew McBride
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 /*
31  * The DPAA2 Network Interface (DPNI) driver.
32  *
33  * The DPNI object is a network interface that is configurable to support a wide
34  * range of features from a very basic Ethernet interface up to a
35  * high-functioning network interface. The DPNI supports features that are
36  * expected by standard network stacks, from basic features to offloads.
37  *
38  * DPNIs work with Ethernet traffic, starting with the L2 header. Additional
39  * functions are provided for standard network protocols (L2, L3, L4, etc.).
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/bus.h>
46 #include <sys/rman.h>
47 #include <sys/module.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <sys/mbuf.h>
54 #include <sys/taskqueue.h>
55 #include <sys/sysctl.h>
56 #include <sys/buf_ring.h>
57 #include <sys/smp.h>
58 #include <sys/proc.h>
59 
60 #include <vm/vm.h>
61 #include <vm/pmap.h>
62 
63 #include <machine/bus.h>
64 #include <machine/resource.h>
65 #include <machine/atomic.h>
66 #include <machine/vmparam.h>
67 
68 #include <net/ethernet.h>
69 #include <net/bpf.h>
70 #include <net/if.h>
71 #include <net/if_dl.h>
72 #include <net/if_media.h>
73 #include <net/if_types.h>
74 #include <net/if_var.h>
75 
76 #include <dev/pci/pcivar.h>
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
79 #include <dev/mdio/mdio.h>
80 
81 #include "opt_acpi.h"
82 #include "opt_platform.h"
83 
84 #include "pcib_if.h"
85 #include "pci_if.h"
86 #include "miibus_if.h"
87 #include "memac_mdio_if.h"
88 
89 #include "dpaa2_types.h"
90 #include "dpaa2_mc.h"
91 #include "dpaa2_mc_if.h"
92 #include "dpaa2_mcp.h"
93 #include "dpaa2_swp.h"
94 #include "dpaa2_swp_if.h"
95 #include "dpaa2_cmd_if.h"
96 #include "dpaa2_ni.h"
97 #include "dpaa2_channel.h"
98 #include "dpaa2_buf.h"
99 
100 #define BIT(x)			(1ul << (x))
101 #define WRIOP_VERSION(x, y, z)	((x) << 10 | (y) << 5 | (z) << 0)
102 #define ARRAY_SIZE(a)		(sizeof(a) / sizeof((a)[0]))
103 
104 /* Frame Dequeue Response status bits. */
105 #define IS_NULL_RESPONSE(stat)	((((stat) >> 4) & 1) == 0)
106 
107 #define	ALIGN_UP(x, y)		roundup2((x), (y))
108 #define	ALIGN_DOWN(x, y)	rounddown2((x), (y))
109 #define CACHE_LINE_ALIGN(x)	ALIGN_UP((x), CACHE_LINE_SIZE)
110 
111 #define DPNI_LOCK(__sc) do {			\
112 	mtx_assert(&(__sc)->lock, MA_NOTOWNED);	\
113 	mtx_lock(&(__sc)->lock);		\
114 } while (0)
115 #define	DPNI_UNLOCK(__sc) do {			\
116 	mtx_assert(&(__sc)->lock, MA_OWNED);	\
117 	mtx_unlock(&(__sc)->lock);		\
118 } while (0)
119 #define	DPNI_LOCK_ASSERT(__sc) do {		\
120 	mtx_assert(&(__sc)->lock, MA_OWNED);	\
121 } while (0)
122 
123 #define DPAA2_TX_RING(sc, chan, tc) \
124 	(&(sc)->channels[(chan)]->txc_queue.tx_rings[(tc)])
125 
126 MALLOC_DEFINE(M_DPAA2_TXB, "dpaa2_txb", "DPAA2 DMA-mapped buffer (Tx)");
127 
128 /*
129  * How many times channel cleanup routine will be repeated if the RX or TX
130  * budget was depleted.
131  */
132 #define DPAA2_CLEAN_BUDGET	64 /* sysctl(9)? */
133 /* TX/RX budget for the channel cleanup task */
134 #define DPAA2_TX_BUDGET		128 /* sysctl(9)? */
135 #define DPAA2_RX_BUDGET		256 /* sysctl(9)? */
136 
137 #define DPNI_IRQ_INDEX		0 /* Index of the only DPNI IRQ. */
138 #define DPNI_IRQ_LINK_CHANGED	1 /* Link state changed */
139 #define DPNI_IRQ_EP_CHANGED	2 /* DPAA2 endpoint dis/connected */
140 
141 /* Default maximum RX frame length w/o CRC. */
142 #define	DPAA2_ETH_MFL		(ETHER_MAX_LEN_JUMBO + ETHER_VLAN_ENCAP_LEN - \
143     ETHER_CRC_LEN)
144 
145 /* Minimally supported version of the DPNI API. */
146 #define DPNI_VER_MAJOR		7
147 #define DPNI_VER_MINOR		0
148 
149 /* Rx/Tx buffers configuration. */
150 #define BUF_ALIGN_V1		256 /* WRIOP v1.0.0 limitation */
151 #define BUF_ALIGN		64
152 #define BUF_SWA_SIZE		64  /* SW annotation size */
153 #define BUF_RX_HWA_SIZE		64  /* HW annotation size */
154 #define BUF_TX_HWA_SIZE		128 /* HW annotation size */
155 
156 #define DPAA2_RX_BUFRING_SZ	(4096u)
157 #define DPAA2_RXE_BUFRING_SZ	(1024u)
158 #define DPAA2_TXC_BUFRING_SZ	(4096u)
159 #define DPAA2_TX_SEGLIMIT	(16u) /* arbitrary number */
160 #define DPAA2_TX_SEG_SZ		(PAGE_SIZE)
161 #define DPAA2_TX_SEGS_MAXSZ	(DPAA2_TX_SEGLIMIT * DPAA2_TX_SEG_SZ)
162 #define DPAA2_TX_SGT_SZ		(PAGE_SIZE) /* bytes */
163 
164 /* Size of a buffer to keep a QoS table key configuration. */
165 #define ETH_QOS_KCFG_BUF_SIZE	(PAGE_SIZE)
166 
167 /* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */
168 #define DPAA2_CLASSIFIER_DMA_SIZE (PAGE_SIZE)
169 
170 /* Buffers layout options. */
171 #define BUF_LOPT_TIMESTAMP	0x1
172 #define BUF_LOPT_PARSER_RESULT	0x2
173 #define BUF_LOPT_FRAME_STATUS	0x4
174 #define BUF_LOPT_PRIV_DATA_SZ	0x8
175 #define BUF_LOPT_DATA_ALIGN	0x10
176 #define BUF_LOPT_DATA_HEAD_ROOM	0x20
177 #define BUF_LOPT_DATA_TAIL_ROOM	0x40
178 
179 #define DPAA2_NI_BUF_ADDR_MASK	(0x1FFFFFFFFFFFFul) /* 49-bit addresses max. */
180 #define DPAA2_NI_BUF_CHAN_MASK	(0xFu)
181 #define DPAA2_NI_BUF_CHAN_SHIFT	(60)
182 #define DPAA2_NI_BUF_IDX_MASK	(0x7FFFu)
183 #define DPAA2_NI_BUF_IDX_SHIFT	(49)
184 #define DPAA2_NI_TX_IDX_MASK	(0x7u)
185 #define DPAA2_NI_TX_IDX_SHIFT	(57)
186 #define DPAA2_NI_TXBUF_IDX_MASK	(0xFFu)
187 #define DPAA2_NI_TXBUF_IDX_SHIFT (49)
188 
189 #define DPAA2_NI_FD_FMT_MASK	(0x3u)
190 #define DPAA2_NI_FD_FMT_SHIFT	(12)
191 #define DPAA2_NI_FD_ERR_MASK	(0xFFu)
192 #define DPAA2_NI_FD_ERR_SHIFT	(0)
193 #define DPAA2_NI_FD_SL_MASK	(0x1u)
194 #define DPAA2_NI_FD_SL_SHIFT	(14)
195 #define DPAA2_NI_FD_LEN_MASK	(0x3FFFFu)
196 #define DPAA2_NI_FD_OFFSET_MASK (0x0FFFu)
197 
198 /* Enables TCAM for Flow Steering and QoS look-ups. */
199 #define DPNI_OPT_HAS_KEY_MASKING 0x10
200 
201 /* Unique IDs for the supported Rx classification header fields. */
202 #define DPAA2_ETH_DIST_ETHDST	BIT(0)
203 #define DPAA2_ETH_DIST_ETHSRC	BIT(1)
204 #define DPAA2_ETH_DIST_ETHTYPE	BIT(2)
205 #define DPAA2_ETH_DIST_VLAN	BIT(3)
206 #define DPAA2_ETH_DIST_IPSRC	BIT(4)
207 #define DPAA2_ETH_DIST_IPDST	BIT(5)
208 #define DPAA2_ETH_DIST_IPPROTO	BIT(6)
209 #define DPAA2_ETH_DIST_L4SRC	BIT(7)
210 #define DPAA2_ETH_DIST_L4DST	BIT(8)
211 #define DPAA2_ETH_DIST_ALL	(~0ULL)
212 
213 /* L3-L4 network traffic flow hash options. */
214 #define	RXH_L2DA		(1 << 1)
215 #define	RXH_VLAN		(1 << 2)
216 #define	RXH_L3_PROTO		(1 << 3)
217 #define	RXH_IP_SRC		(1 << 4)
218 #define	RXH_IP_DST		(1 << 5)
219 #define	RXH_L4_B_0_1		(1 << 6) /* src port in case of TCP/UDP/SCTP */
220 #define	RXH_L4_B_2_3		(1 << 7) /* dst port in case of TCP/UDP/SCTP */
221 #define	RXH_DISCARD		(1 << 31)
222 
223 /* Transmit checksum offload */
224 #define DPAA2_CSUM_TX_OFFLOAD	(CSUM_IP | CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6)
225 
226 /* Default Rx hash options, set during attaching. */
227 #define DPAA2_RXH_DEFAULT	(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
228 
229 MALLOC_DEFINE(M_DPAA2_NI, "dpaa2_ni", "DPAA2 Network Interface");
230 
231 /*
232  * DPAA2 Network Interface resource specification.
233  *
234  * NOTE: Don't forget to update macros in dpaa2_ni.h in case of any changes in
235  *       the specification!
236  */
237 struct resource_spec dpaa2_ni_spec[] = {
238 	/*
239 	 * DPMCP resources.
240 	 *
241 	 * NOTE: MC command portals (MCPs) are used to send commands to, and
242 	 *	 receive responses from, the MC firmware. One portal per DPNI.
243 	 */
244 	{ DPAA2_DEV_MCP, DPAA2_NI_MCP_RID(0),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
245 	/*
246 	 * DPIO resources (software portals).
247 	 *
248 	 * NOTE: One per running core. While DPIOs are the source of data
249 	 *	 availability interrupts, the DPCONs are used to identify the
250 	 *	 network interface that has produced ingress data to that core.
251 	 */
252 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(0),    RF_ACTIVE | RF_SHAREABLE },
253 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(1),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
254 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(2),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
255 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(3),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
256 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(4),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
257 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(5),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
258 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(6),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
259 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(7),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
260 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(8),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
261 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(9),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
262 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(10),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
263 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(11),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
264 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(12),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
265 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(13),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
266 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(14),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
267 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(15),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
268 	/*
269 	 * DPBP resources (buffer pools).
270 	 *
271 	 * NOTE: One per network interface.
272 	 */
273 	{ DPAA2_DEV_BP,  DPAA2_NI_BP_RID(0),   RF_ACTIVE },
274 	/*
275 	 * DPCON resources (channels).
276 	 *
277 	 * NOTE: One DPCON per core where Rx or Tx confirmation traffic to be
278 	 *	 distributed to.
279 	 * NOTE: Since it is necessary to distinguish between traffic from
280 	 *	 different network interfaces arriving on the same core, the
281 	 *	 DPCONs must be private to the DPNIs.
282 	 */
283 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(0),   RF_ACTIVE },
284 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(1),   RF_ACTIVE | RF_OPTIONAL },
285 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(2),   RF_ACTIVE | RF_OPTIONAL },
286 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(3),   RF_ACTIVE | RF_OPTIONAL },
287 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(4),   RF_ACTIVE | RF_OPTIONAL },
288 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(5),   RF_ACTIVE | RF_OPTIONAL },
289 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(6),   RF_ACTIVE | RF_OPTIONAL },
290 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(7),   RF_ACTIVE | RF_OPTIONAL },
291 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(8),   RF_ACTIVE | RF_OPTIONAL },
292 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(9),   RF_ACTIVE | RF_OPTIONAL },
293 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(10),  RF_ACTIVE | RF_OPTIONAL },
294 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(11),  RF_ACTIVE | RF_OPTIONAL },
295 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(12),  RF_ACTIVE | RF_OPTIONAL },
296 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(13),  RF_ACTIVE | RF_OPTIONAL },
297 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(14),  RF_ACTIVE | RF_OPTIONAL },
298 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(15),  RF_ACTIVE | RF_OPTIONAL },
299 
300 	RESOURCE_SPEC_END
301 };
302 
303 /* Supported header fields for Rx hash distribution key */
304 static const struct dpaa2_eth_dist_fields dist_fields[] = {
305 	{
306 		/* L2 header */
307 		.rxnfc_field = RXH_L2DA,
308 		.cls_prot = NET_PROT_ETH,
309 		.cls_field = NH_FLD_ETH_DA,
310 		.id = DPAA2_ETH_DIST_ETHDST,
311 		.size = 6,
312 	}, {
313 		.cls_prot = NET_PROT_ETH,
314 		.cls_field = NH_FLD_ETH_SA,
315 		.id = DPAA2_ETH_DIST_ETHSRC,
316 		.size = 6,
317 	}, {
318 		/* This is the last ethertype field parsed:
319 		 * depending on frame format, it can be the MAC ethertype
320 		 * or the VLAN etype.
321 		 */
322 		.cls_prot = NET_PROT_ETH,
323 		.cls_field = NH_FLD_ETH_TYPE,
324 		.id = DPAA2_ETH_DIST_ETHTYPE,
325 		.size = 2,
326 	}, {
327 		/* VLAN header */
328 		.rxnfc_field = RXH_VLAN,
329 		.cls_prot = NET_PROT_VLAN,
330 		.cls_field = NH_FLD_VLAN_TCI,
331 		.id = DPAA2_ETH_DIST_VLAN,
332 		.size = 2,
333 	}, {
334 		/* IP header */
335 		.rxnfc_field = RXH_IP_SRC,
336 		.cls_prot = NET_PROT_IP,
337 		.cls_field = NH_FLD_IP_SRC,
338 		.id = DPAA2_ETH_DIST_IPSRC,
339 		.size = 4,
340 	}, {
341 		.rxnfc_field = RXH_IP_DST,
342 		.cls_prot = NET_PROT_IP,
343 		.cls_field = NH_FLD_IP_DST,
344 		.id = DPAA2_ETH_DIST_IPDST,
345 		.size = 4,
346 	}, {
347 		.rxnfc_field = RXH_L3_PROTO,
348 		.cls_prot = NET_PROT_IP,
349 		.cls_field = NH_FLD_IP_PROTO,
350 		.id = DPAA2_ETH_DIST_IPPROTO,
351 		.size = 1,
352 	}, {
353 		/* Using UDP ports, this is functionally equivalent to raw
354 		 * byte pairs from L4 header.
355 		 */
356 		.rxnfc_field = RXH_L4_B_0_1,
357 		.cls_prot = NET_PROT_UDP,
358 		.cls_field = NH_FLD_UDP_PORT_SRC,
359 		.id = DPAA2_ETH_DIST_L4SRC,
360 		.size = 2,
361 	}, {
362 		.rxnfc_field = RXH_L4_B_2_3,
363 		.cls_prot = NET_PROT_UDP,
364 		.cls_field = NH_FLD_UDP_PORT_DST,
365 		.id = DPAA2_ETH_DIST_L4DST,
366 		.size = 2,
367 	},
368 };
369 
370 static struct dpni_stat {
371 	int	 page;
372 	int	 cnt;
373 	char	*name;
374 	char	*desc;
375 } dpni_stat_sysctls[DPAA2_NI_STAT_SYSCTLS] = {
376 	/* PAGE, COUNTER, NAME, DESCRIPTION */
377 	{  0, 0, "in_all_frames",	"All accepted ingress frames" },
378 	{  0, 1, "in_all_bytes",	"Bytes in all accepted ingress frames" },
379 	{  0, 2, "in_multi_frames",	"Multicast accepted ingress frames" },
380 	{  1, 0, "eg_all_frames",	"All egress frames transmitted" },
381 	{  1, 1, "eg_all_bytes",	"Bytes in all frames transmitted" },
382 	{  1, 2, "eg_multi_frames",	"Multicast egress frames transmitted" },
383 	{  2, 0, "in_filtered_frames",	"All ingress frames discarded due to "
384 	   				"filtering" },
385 	{  2, 1, "in_discarded_frames",	"All frames discarded due to errors" },
386 	{  2, 2, "in_nobuf_discards",	"Discards on ingress side due to buffer "
387 	   				"depletion in DPNI buffer pools" },
388 };
389 
390 struct dpaa2_ni_rx_ctx {
391 	struct mbuf	*head;
392 	struct mbuf	*tail;
393 	int		 cnt;
394 	bool		 last;
395 };
396 
397 /* Device interface */
398 static int dpaa2_ni_probe(device_t);
399 static int dpaa2_ni_attach(device_t);
400 static int dpaa2_ni_detach(device_t);
401 
402 /* DPAA2 network interface setup and configuration */
403 static int dpaa2_ni_setup(device_t);
404 static int dpaa2_ni_setup_channels(device_t);
405 static int dpaa2_ni_bind(device_t);
406 static int dpaa2_ni_setup_rx_dist(device_t);
407 static int dpaa2_ni_setup_irqs(device_t);
408 static int dpaa2_ni_setup_msi(struct dpaa2_ni_softc *);
409 static int dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *);
410 static int dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *);
411 static int dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *);
412 static int dpaa2_ni_setup_dma(struct dpaa2_ni_softc *);
413 
414 /* Tx/Rx flow configuration */
415 static int dpaa2_ni_setup_rx_flow(device_t, struct dpaa2_ni_fq *);
416 static int dpaa2_ni_setup_tx_flow(device_t, struct dpaa2_ni_fq *);
417 static int dpaa2_ni_setup_rx_err_flow(device_t, struct dpaa2_ni_fq *);
418 
419 /* Configuration subroutines */
420 static int dpaa2_ni_set_buf_layout(device_t);
421 static int dpaa2_ni_set_pause_frame(device_t);
422 static int dpaa2_ni_set_qos_table(device_t);
423 static int dpaa2_ni_set_mac_addr(device_t);
424 static int dpaa2_ni_set_hash(device_t, uint64_t);
425 static int dpaa2_ni_set_dist_key(device_t, enum dpaa2_ni_dist_mode, uint64_t);
426 
427 /* Frame descriptor routines */
428 static int dpaa2_ni_build_fd(struct dpaa2_ni_softc *, struct dpaa2_ni_tx_ring *,
429     struct dpaa2_buf *, bus_dma_segment_t *, int, struct dpaa2_fd *);
430 static int dpaa2_ni_fd_err(struct dpaa2_fd *);
431 static uint32_t dpaa2_ni_fd_data_len(struct dpaa2_fd *);
432 static int dpaa2_ni_fd_format(struct dpaa2_fd *);
433 static bool dpaa2_ni_fd_short_len(struct dpaa2_fd *);
434 static int dpaa2_ni_fd_offset(struct dpaa2_fd *);
435 
436 /* Various subroutines */
437 static int dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *, uint16_t, uint16_t);
438 static int dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *, uint8_t *);
439 
440 /* Network interface routines */
441 static void dpaa2_ni_init(void *);
442 static int  dpaa2_ni_transmit(if_t , struct mbuf *);
443 static void dpaa2_ni_qflush(if_t );
444 static int  dpaa2_ni_ioctl(if_t , u_long, caddr_t);
445 static int  dpaa2_ni_update_mac_filters(if_t );
446 static u_int dpaa2_ni_add_maddr(void *, struct sockaddr_dl *, u_int);
447 
448 /* Interrupt handlers */
449 static void dpaa2_ni_intr(void *);
450 
451 /* MII handlers */
452 static void dpaa2_ni_miibus_statchg(device_t);
453 static int  dpaa2_ni_media_change(if_t );
454 static void dpaa2_ni_media_status(if_t , struct ifmediareq *);
455 static void dpaa2_ni_media_tick(void *);
456 
457 /* Tx/Rx routines. */
458 static int dpaa2_ni_rx_cleanup(struct dpaa2_channel *);
459 static int dpaa2_ni_tx_cleanup(struct dpaa2_channel *);
460 static void dpaa2_ni_tx(struct dpaa2_ni_softc *, struct dpaa2_channel *,
461     struct dpaa2_ni_tx_ring *, struct mbuf *);
462 static void dpaa2_ni_cleanup_task(void *, int);
463 
464 /* Tx/Rx subroutines */
465 static int dpaa2_ni_consume_frames(struct dpaa2_channel *, struct dpaa2_ni_fq **,
466     uint32_t *);
467 static int dpaa2_ni_rx(struct dpaa2_channel *, struct dpaa2_ni_fq *,
468     struct dpaa2_fd *, struct dpaa2_ni_rx_ctx *);
469 static int dpaa2_ni_rx_err(struct dpaa2_channel *, struct dpaa2_ni_fq *,
470     struct dpaa2_fd *);
471 static int dpaa2_ni_tx_conf(struct dpaa2_channel *, struct dpaa2_ni_fq *,
472     struct dpaa2_fd *);
473 
474 /* sysctl(9) */
475 static int dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS);
476 static int dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS);
477 static int dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS);
478 
479 static int
480 dpaa2_ni_probe(device_t dev)
481 {
482 	/* DPNI device will be added by a parent resource container itself. */
483 	device_set_desc(dev, "DPAA2 Network Interface");
484 	return (BUS_PROBE_DEFAULT);
485 }
486 
487 static int
488 dpaa2_ni_attach(device_t dev)
489 {
490 	device_t pdev = device_get_parent(dev);
491 	device_t child = dev;
492 	device_t mcp_dev;
493 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
494 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
495 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
496 	struct dpaa2_devinfo *mcp_dinfo;
497 	struct dpaa2_cmd cmd;
498 	uint16_t rc_token, ni_token;
499 	if_t ifp;
500 	char tq_name[32];
501 	int error;
502 
503 	sc->dev = dev;
504 	sc->ifp = NULL;
505 	sc->miibus = NULL;
506 	sc->mii = NULL;
507 	sc->media_status = 0;
508 	sc->if_flags = 0;
509 	sc->link_state = LINK_STATE_UNKNOWN;
510 	sc->buf_align = 0;
511 
512 	/* For debug purposes only! */
513 	sc->rx_anomaly_frames = 0;
514 	sc->rx_single_buf_frames = 0;
515 	sc->rx_sg_buf_frames = 0;
516 	sc->rx_enq_rej_frames = 0;
517 	sc->rx_ieoi_err_frames = 0;
518 	sc->tx_single_buf_frames = 0;
519 	sc->tx_sg_frames = 0;
520 
521 	DPAA2_ATOMIC_XCHG(&sc->buf_num, 0);
522 	DPAA2_ATOMIC_XCHG(&sc->buf_free, 0);
523 
524 	sc->rxd_dmat = NULL;
525 	sc->qos_dmat = NULL;
526 
527 	sc->qos_kcfg.dmap = NULL;
528 	sc->qos_kcfg.paddr = 0;
529 	sc->qos_kcfg.vaddr = NULL;
530 
531 	sc->rxd_kcfg.dmap = NULL;
532 	sc->rxd_kcfg.paddr = 0;
533 	sc->rxd_kcfg.vaddr = NULL;
534 
535 	sc->mac.dpmac_id = 0;
536 	sc->mac.phy_dev = NULL;
537 	memset(sc->mac.addr, 0, ETHER_ADDR_LEN);
538 
539 	error = bus_alloc_resources(sc->dev, dpaa2_ni_spec, sc->res);
540 	if (error) {
541 		device_printf(dev, "%s: failed to allocate resources: "
542 		    "error=%d\n", __func__, error);
543 		goto err_exit;
544 	}
545 
546 	/* Obtain MC portal. */
547 	mcp_dev = (device_t) rman_get_start(sc->res[DPAA2_NI_MCP_RID(0)]);
548 	mcp_dinfo = device_get_ivars(mcp_dev);
549 	dinfo->portal = mcp_dinfo->portal;
550 
551 	mtx_init(&sc->lock, device_get_nameunit(dev), "dpaa2_ni", MTX_DEF);
552 
553 	/* Allocate network interface */
554 	ifp = if_alloc(IFT_ETHER);
555 	sc->ifp = ifp;
556 	if_initname(ifp, DPAA2_NI_IFNAME, device_get_unit(sc->dev));
557 
558 	if_setsoftc(ifp, sc);
559 	if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST);
560 	if_setinitfn(ifp, dpaa2_ni_init);
561 	if_setioctlfn(ifp, dpaa2_ni_ioctl);
562 	if_settransmitfn(ifp, dpaa2_ni_transmit);
563 	if_setqflushfn(ifp, dpaa2_ni_qflush);
564 
565 	if_sethwassist(sc->ifp, DPAA2_CSUM_TX_OFFLOAD);
566 	if_setcapabilities(ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM |
567 	    IFCAP_HWCSUM_IPV6 | IFCAP_JUMBO_MTU);
568 	if_setcapenable(ifp, if_getcapabilities(ifp));
569 
570 	DPAA2_CMD_INIT(&cmd);
571 
572 	/* Open resource container and network interface object. */
573 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
574 	if (error) {
575 		device_printf(dev, "%s: failed to open resource container: "
576 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
577 		goto err_exit;
578 	}
579 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
580 	if (error) {
581 		device_printf(dev, "%s: failed to open network interface: "
582 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
583 		goto close_rc;
584 	}
585 
586 	bzero(tq_name, sizeof(tq_name));
587 	snprintf(tq_name, sizeof(tq_name), "%s_tqbp", device_get_nameunit(dev));
588 
589 	/*
590 	 * XXX-DSL: Release new buffers on Buffer Pool State Change Notification
591 	 *          (BPSCN) returned as a result to the VDQ command instead.
592 	 *          It is similar to CDAN processed in dpaa2_io_intr().
593 	 */
594 	/* Create a taskqueue thread to release new buffers to the pool. */
595 	sc->bp_taskq = taskqueue_create(tq_name, M_WAITOK,
596 	    taskqueue_thread_enqueue, &sc->bp_taskq);
597 	taskqueue_start_threads(&sc->bp_taskq, 1, PI_NET, "%s", tq_name);
598 
599 	/* sc->cleanup_taskq = taskqueue_create("dpaa2_ch cleanup", M_WAITOK, */
600 	/*     taskqueue_thread_enqueue, &sc->cleanup_taskq); */
601 	/* taskqueue_start_threads(&sc->cleanup_taskq, 1, PI_NET, */
602 	/*     "dpaa2_ch cleanup"); */
603 
604 	error = dpaa2_ni_setup(dev);
605 	if (error) {
606 		device_printf(dev, "%s: failed to setup DPNI: error=%d\n",
607 		    __func__, error);
608 		goto close_ni;
609 	}
610 	error = dpaa2_ni_setup_channels(dev);
611 	if (error) {
612 		device_printf(dev, "%s: failed to setup QBMan channels: "
613 		    "error=%d\n", __func__, error);
614 		goto close_ni;
615 	}
616 
617 	error = dpaa2_ni_bind(dev);
618 	if (error) {
619 		device_printf(dev, "%s: failed to bind DPNI: error=%d\n",
620 		    __func__, error);
621 		goto close_ni;
622 	}
623 	error = dpaa2_ni_setup_irqs(dev);
624 	if (error) {
625 		device_printf(dev, "%s: failed to setup IRQs: error=%d\n",
626 		    __func__, error);
627 		goto close_ni;
628 	}
629 	error = dpaa2_ni_setup_sysctls(sc);
630 	if (error) {
631 		device_printf(dev, "%s: failed to setup sysctls: error=%d\n",
632 		    __func__, error);
633 		goto close_ni;
634 	}
635 	error = dpaa2_ni_setup_if_caps(sc);
636 	if (error) {
637 		device_printf(dev, "%s: failed to setup interface capabilities: "
638 		    "error=%d\n", __func__, error);
639 		goto close_ni;
640 	}
641 
642 	ether_ifattach(sc->ifp, sc->mac.addr);
643 	callout_init(&sc->mii_callout, 0);
644 
645 	return (0);
646 
647 close_ni:
648 	DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
649 close_rc:
650 	DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
651 err_exit:
652 	return (ENXIO);
653 }
654 
655 static void
656 dpaa2_ni_fixed_media_status(if_t ifp, struct ifmediareq* ifmr)
657 {
658 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
659 
660 	DPNI_LOCK(sc);
661 	ifmr->ifm_count = 0;
662 	ifmr->ifm_mask = 0;
663 	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
664 	ifmr->ifm_current = ifmr->ifm_active =
665 	    sc->fixed_ifmedia.ifm_cur->ifm_media;
666 
667 	/*
668 	 * In non-PHY usecases, we need to signal link state up, otherwise
669 	 * certain things requiring a link event (e.g async DHCP client) from
670 	 * devd do not happen.
671 	 */
672 	if (if_getlinkstate(ifp) == LINK_STATE_UNKNOWN) {
673 		if_link_state_change(ifp, LINK_STATE_UP);
674 	}
675 
676 	/*
677 	 * TODO: Check the status of the link partner (DPMAC, DPNI or other) and
678 	 * reset if down. This is different to the DPAA2_MAC_LINK_TYPE_PHY as
679 	 * the MC firmware sets the status, instead of us telling the MC what
680 	 * it is.
681 	 */
682 	DPNI_UNLOCK(sc);
683 
684 	return;
685 }
686 
687 static void
688 dpaa2_ni_setup_fixed_link(struct dpaa2_ni_softc *sc)
689 {
690 	/*
691 	 * FIXME: When the DPNI is connected to a DPMAC, we can get the
692 	 * 'apparent' speed from it.
693 	 */
694 	sc->fixed_link = true;
695 
696 	ifmedia_init(&sc->fixed_ifmedia, 0, dpaa2_ni_media_change,
697 		     dpaa2_ni_fixed_media_status);
698 	ifmedia_add(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL);
699 	ifmedia_set(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T);
700 }
701 
702 static int
703 dpaa2_ni_detach(device_t dev)
704 {
705 	/* TBD */
706 	return (0);
707 }
708 
709 /**
710  * @brief Configure DPAA2 network interface object.
711  */
712 static int
713 dpaa2_ni_setup(device_t dev)
714 {
715 	device_t pdev = device_get_parent(dev);
716 	device_t child = dev;
717 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
718 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
719 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
720 	struct dpaa2_ep_desc ep1_desc, ep2_desc; /* endpoint descriptors */
721 	struct dpaa2_cmd cmd;
722 	uint8_t eth_bca[ETHER_ADDR_LEN]; /* broadcast physical address */
723 	uint16_t rc_token, ni_token, mac_token;
724 	struct dpaa2_mac_attr attr;
725 	enum dpaa2_mac_link_type link_type;
726 	uint32_t link;
727 	int error;
728 
729 	DPAA2_CMD_INIT(&cmd);
730 
731 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
732 	if (error) {
733 		device_printf(dev, "%s: failed to open resource container: "
734 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
735 		goto err_exit;
736 	}
737 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
738 	if (error) {
739 		device_printf(dev, "%s: failed to open network interface: "
740 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
741 		goto close_rc;
742 	}
743 
744 	/* Check if we can work with this DPNI object. */
745 	error = DPAA2_CMD_NI_GET_API_VERSION(dev, child, &cmd, &sc->api_major,
746 	    &sc->api_minor);
747 	if (error) {
748 		device_printf(dev, "%s: failed to get DPNI API version\n",
749 		    __func__);
750 		goto close_ni;
751 	}
752 	if (dpaa2_ni_cmp_api_version(sc, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
753 		device_printf(dev, "%s: DPNI API version %u.%u not supported, "
754 		    "need >= %u.%u\n", __func__, sc->api_major, sc->api_minor,
755 		    DPNI_VER_MAJOR, DPNI_VER_MINOR);
756 		error = ENODEV;
757 		goto close_ni;
758 	}
759 
760 	/* Reset the DPNI object. */
761 	error = DPAA2_CMD_NI_RESET(dev, child, &cmd);
762 	if (error) {
763 		device_printf(dev, "%s: failed to reset DPNI: id=%d\n",
764 		    __func__, dinfo->id);
765 		goto close_ni;
766 	}
767 
768 	/* Obtain attributes of the DPNI object. */
769 	error = DPAA2_CMD_NI_GET_ATTRIBUTES(dev, child, &cmd, &sc->attr);
770 	if (error) {
771 		device_printf(dev, "%s: failed to obtain DPNI attributes: "
772 		    "id=%d\n", __func__, dinfo->id);
773 		goto close_ni;
774 	}
775 	if (bootverbose) {
776 		device_printf(dev, "\toptions=0x%#x queues=%d tx_channels=%d "
777 		    "wriop_version=%#x\n", sc->attr.options, sc->attr.num.queues,
778 		    sc->attr.num.channels, sc->attr.wriop_ver);
779 		device_printf(dev, "\ttraffic classes: rx=%d tx=%d "
780 		    "cgs_groups=%d\n", sc->attr.num.rx_tcs, sc->attr.num.tx_tcs,
781 		    sc->attr.num.cgs);
782 		device_printf(dev, "\ttable entries: mac=%d vlan=%d qos=%d "
783 		    "fs=%d\n", sc->attr.entries.mac, sc->attr.entries.vlan,
784 		    sc->attr.entries.qos, sc->attr.entries.fs);
785 		device_printf(dev, "\tkey sizes: qos=%d fs=%d\n",
786 		    sc->attr.key_size.qos, sc->attr.key_size.fs);
787 	}
788 
789 	/* Configure buffer layouts of the DPNI queues. */
790 	error = dpaa2_ni_set_buf_layout(dev);
791 	if (error) {
792 		device_printf(dev, "%s: failed to configure buffer layout\n",
793 		    __func__);
794 		goto close_ni;
795 	}
796 
797 	/* Configure DMA resources. */
798 	error = dpaa2_ni_setup_dma(sc);
799 	if (error) {
800 		device_printf(dev, "%s: failed to setup DMA\n", __func__);
801 		goto close_ni;
802 	}
803 
804 	/* Setup link between DPNI and an object it's connected to. */
805 	ep1_desc.obj_id = dinfo->id;
806 	ep1_desc.if_id = 0; /* DPNI has the only endpoint */
807 	ep1_desc.type = dinfo->dtype;
808 
809 	error = DPAA2_CMD_RC_GET_CONN(dev, child, DPAA2_CMD_TK(&cmd, rc_token),
810 	    &ep1_desc, &ep2_desc, &link);
811 	if (error) {
812 		device_printf(dev, "%s: failed to obtain an object DPNI is "
813 		    "connected to: error=%d\n", __func__, error);
814 	} else {
815 		device_printf(dev, "connected to %s (id=%d)\n",
816 		    dpaa2_ttos(ep2_desc.type), ep2_desc.obj_id);
817 
818 		error = dpaa2_ni_set_mac_addr(dev);
819 		if (error) {
820 			device_printf(dev, "%s: failed to set MAC address: "
821 			    "error=%d\n", __func__, error);
822 		}
823 
824 		if (ep2_desc.type == DPAA2_DEV_MAC) {
825 			/*
826 			 * This is the simplest case when DPNI is connected to
827 			 * DPMAC directly.
828 			 */
829 			sc->mac.dpmac_id = ep2_desc.obj_id;
830 
831 			link_type = DPAA2_MAC_LINK_TYPE_NONE;
832 
833 			/*
834 			 * Need to determine if DPMAC type is PHY (attached to
835 			 * conventional MII PHY) or FIXED (usually SFP/SerDes,
836 			 * link state managed by MC firmware).
837 			 */
838 			error = DPAA2_CMD_MAC_OPEN(sc->dev, child,
839 			    DPAA2_CMD_TK(&cmd, rc_token), sc->mac.dpmac_id,
840 			    &mac_token);
841 			/*
842 			 * Under VFIO, the DPMAC might be sitting in another
843 			 * container (DPRC) we don't have access to.
844 			 * Assume DPAA2_MAC_LINK_TYPE_FIXED if this is
845 			 * the case.
846 			 */
847 			if (error) {
848 				device_printf(dev, "%s: failed to open "
849 				    "connected DPMAC: %d (assuming in other DPRC)\n", __func__,
850 				    sc->mac.dpmac_id);
851 				link_type = DPAA2_MAC_LINK_TYPE_FIXED;
852 			} else {
853 				error = DPAA2_CMD_MAC_GET_ATTRIBUTES(dev, child,
854 				    &cmd, &attr);
855 				if (error) {
856 					device_printf(dev, "%s: failed to get "
857 					    "DPMAC attributes: id=%d, "
858 					    "error=%d\n", __func__, dinfo->id,
859 					    error);
860 				} else {
861 					link_type = attr.link_type;
862 				}
863 			}
864 			DPAA2_CMD_MAC_CLOSE(dev, child, &cmd);
865 
866 			if (link_type == DPAA2_MAC_LINK_TYPE_FIXED) {
867 				device_printf(dev, "connected DPMAC is in FIXED "
868 				    "mode\n");
869 				dpaa2_ni_setup_fixed_link(sc);
870 			} else if (link_type == DPAA2_MAC_LINK_TYPE_PHY) {
871 				device_printf(dev, "connected DPMAC is in PHY "
872 				    "mode\n");
873 				error = DPAA2_MC_GET_PHY_DEV(dev,
874 				    &sc->mac.phy_dev, sc->mac.dpmac_id);
875 				if (error == 0) {
876 					error = MEMAC_MDIO_SET_NI_DEV(
877 					    sc->mac.phy_dev, dev);
878 					if (error != 0) {
879 						device_printf(dev, "%s: failed "
880 						    "to set dpni dev on memac "
881 						    "mdio dev %s: error=%d\n",
882 						    __func__,
883 						    device_get_nameunit(
884 						    sc->mac.phy_dev), error);
885 					}
886 				}
887 				if (error == 0) {
888 					error = MEMAC_MDIO_GET_PHY_LOC(
889 					    sc->mac.phy_dev, &sc->mac.phy_loc);
890 					if (error == ENODEV) {
891 						error = 0;
892 					}
893 					if (error != 0) {
894 						device_printf(dev, "%s: failed "
895 						    "to get phy location from "
896 						    "memac mdio dev %s: error=%d\n",
897 						    __func__, device_get_nameunit(
898 						    sc->mac.phy_dev), error);
899 					}
900 				}
901 				if (error == 0) {
902 					error = mii_attach(sc->mac.phy_dev,
903 					    &sc->miibus, sc->ifp,
904 					    dpaa2_ni_media_change,
905 					    dpaa2_ni_media_status,
906 					    BMSR_DEFCAPMASK, sc->mac.phy_loc,
907 					    MII_OFFSET_ANY, 0);
908 					if (error != 0) {
909 						device_printf(dev, "%s: failed "
910 						    "to attach to miibus: "
911 						    "error=%d\n",
912 						    __func__, error);
913 					}
914 				}
915 				if (error == 0) {
916 					sc->mii = device_get_softc(sc->miibus);
917 				}
918 			} else {
919 				device_printf(dev, "%s: DPMAC link type is not "
920 				    "supported\n", __func__);
921 			}
922 		} else if (ep2_desc.type == DPAA2_DEV_NI ||
923 			   ep2_desc.type == DPAA2_DEV_MUX ||
924 			   ep2_desc.type == DPAA2_DEV_SW) {
925 			dpaa2_ni_setup_fixed_link(sc);
926 		}
927 	}
928 
929 	/* Select mode to enqueue frames. */
930 	/* ... TBD ... */
931 
932 	/*
933 	 * Update link configuration to enable Rx/Tx pause frames support.
934 	 *
935 	 * NOTE: MC may generate an interrupt to the DPMAC and request changes
936 	 *       in link configuration. It might be necessary to attach miibus
937 	 *       and PHY before this point.
938 	 */
939 	error = dpaa2_ni_set_pause_frame(dev);
940 	if (error) {
941 		device_printf(dev, "%s: failed to configure Rx/Tx pause "
942 		    "frames\n", __func__);
943 		goto close_ni;
944 	}
945 
946 	/* Configure ingress traffic classification. */
947 	error = dpaa2_ni_set_qos_table(dev);
948 	if (error) {
949 		device_printf(dev, "%s: failed to configure QoS table: "
950 		    "error=%d\n", __func__, error);
951 		goto close_ni;
952 	}
953 
954 	/* Add broadcast physical address to the MAC filtering table. */
955 	memset(eth_bca, 0xff, ETHER_ADDR_LEN);
956 	error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, DPAA2_CMD_TK(&cmd,
957 	    ni_token), eth_bca);
958 	if (error) {
959 		device_printf(dev, "%s: failed to add broadcast physical "
960 		    "address to the MAC filtering table\n", __func__);
961 		goto close_ni;
962 	}
963 
964 	/* Set the maximum allowed length for received frames. */
965 	error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd, DPAA2_ETH_MFL);
966 	if (error) {
967 		device_printf(dev, "%s: failed to set maximum length for "
968 		    "received frames\n", __func__);
969 		goto close_ni;
970 	}
971 
972 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
973 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
974 	return (0);
975 
976 close_ni:
977 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
978 close_rc:
979 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
980 err_exit:
981 	return (error);
982 }
983 
984 /**
985  * @brief Сonfigure QBMan channels and register data availability notifications.
986  */
987 static int
988 dpaa2_ni_setup_channels(device_t dev)
989 {
990 	device_t iodev, condev, bpdev;
991 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
992 	uint32_t i, num_chan;
993 	int error;
994 
995 	/* Calculate number of the channels based on the allocated resources */
996 	for (i = 0; i < DPAA2_NI_IO_RES_NUM; i++) {
997 		if (!sc->res[DPAA2_NI_IO_RID(i)]) {
998 			break;
999 		}
1000 	}
1001 	num_chan = i;
1002 	for (i = 0; i < DPAA2_NI_CON_RES_NUM; i++) {
1003 		if (!sc->res[DPAA2_NI_CON_RID(i)]) {
1004 			break;
1005 		}
1006 	}
1007 	num_chan = i < num_chan ? i : num_chan;
1008 	sc->chan_n = num_chan > DPAA2_MAX_CHANNELS
1009 	    ? DPAA2_MAX_CHANNELS : num_chan;
1010 	sc->chan_n = sc->chan_n > sc->attr.num.queues
1011 	    ? sc->attr.num.queues : sc->chan_n;
1012 
1013 	KASSERT(sc->chan_n > 0u, ("%s: positive number of channels expected: "
1014 	    "chan_n=%d", __func__, sc->chan_n));
1015 
1016 	device_printf(dev, "channels=%d\n", sc->chan_n);
1017 
1018 	for (i = 0; i < sc->chan_n; i++) {
1019 		iodev = (device_t)rman_get_start(sc->res[DPAA2_NI_IO_RID(i)]);
1020 		condev = (device_t)rman_get_start(sc->res[DPAA2_NI_CON_RID(i)]);
1021 		/* Only one buffer pool available at the moment */
1022 		bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
1023 
1024 		error = dpaa2_chan_setup(dev, iodev, condev, bpdev,
1025 		    &sc->channels[i], i, dpaa2_ni_cleanup_task);
1026 		if (error != 0) {
1027 			device_printf(dev, "%s: dpaa2_chan_setup() failed: "
1028 			    "error=%d, chan_id=%d\n", __func__, error, i);
1029 			return (error);
1030 		}
1031 	}
1032 
1033 	/* There is exactly one Rx error queue per network interface */
1034 	error = dpaa2_chan_setup_fq(dev, sc->channels[0], DPAA2_NI_QUEUE_RX_ERR);
1035 	if (error != 0) {
1036 		device_printf(dev, "%s: failed to prepare RxError queue: "
1037 		    "error=%d\n", __func__, error);
1038 		return (error);
1039 	}
1040 
1041 	return (0);
1042 }
1043 
1044 /**
1045  * @brief Bind DPNI to DPBPs, DPIOs, frame queues and channels.
1046  */
1047 static int
1048 dpaa2_ni_bind(device_t dev)
1049 {
1050 	device_t pdev = device_get_parent(dev);
1051 	device_t child = dev;
1052 	device_t bp_dev;
1053 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1054 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1055 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1056 	struct dpaa2_devinfo *bp_info;
1057 	struct dpaa2_cmd cmd;
1058 	struct dpaa2_ni_pools_cfg pools_cfg;
1059 	struct dpaa2_ni_err_cfg err_cfg;
1060 	struct dpaa2_channel *chan;
1061 	uint16_t rc_token, ni_token;
1062 	int error;
1063 
1064 	DPAA2_CMD_INIT(&cmd);
1065 
1066 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1067 	if (error) {
1068 		device_printf(dev, "%s: failed to open resource container: "
1069 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1070 		goto err_exit;
1071 	}
1072 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1073 	if (error) {
1074 		device_printf(dev, "%s: failed to open network interface: "
1075 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1076 		goto close_rc;
1077 	}
1078 
1079 	/* Select buffer pool (only one available at the moment). */
1080 	bp_dev = (device_t) rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
1081 	bp_info = device_get_ivars(bp_dev);
1082 
1083 	/* Configure buffers pool. */
1084 	pools_cfg.pools_num = 1;
1085 	pools_cfg.pools[0].bp_obj_id = bp_info->id;
1086 	pools_cfg.pools[0].backup_flag = 0;
1087 	pools_cfg.pools[0].buf_sz = sc->buf_sz;
1088 	error = DPAA2_CMD_NI_SET_POOLS(dev, child, &cmd, &pools_cfg);
1089 	if (error) {
1090 		device_printf(dev, "%s: failed to set buffer pools\n", __func__);
1091 		goto close_ni;
1092 	}
1093 
1094 	/* Setup ingress traffic distribution. */
1095 	error = dpaa2_ni_setup_rx_dist(dev);
1096 	if (error && error != EOPNOTSUPP) {
1097 		device_printf(dev, "%s: failed to setup ingress traffic "
1098 		    "distribution\n", __func__);
1099 		goto close_ni;
1100 	}
1101 	if (bootverbose && error == EOPNOTSUPP) {
1102 		device_printf(dev, "Ingress traffic distribution not "
1103 		    "supported\n");
1104 	}
1105 
1106 	/* Configure handling of error frames. */
1107 	err_cfg.err_mask = DPAA2_NI_FAS_RX_ERR_MASK;
1108 	err_cfg.set_err_fas = false;
1109 	err_cfg.action = DPAA2_NI_ERR_DISCARD;
1110 	error = DPAA2_CMD_NI_SET_ERR_BEHAVIOR(dev, child, &cmd, &err_cfg);
1111 	if (error) {
1112 		device_printf(dev, "%s: failed to set errors behavior\n",
1113 		    __func__);
1114 		goto close_ni;
1115 	}
1116 
1117 	/* Configure channel queues to generate CDANs. */
1118 	for (uint32_t i = 0; i < sc->chan_n; i++) {
1119 		chan = sc->channels[i];
1120 
1121 		/* Setup Rx flows. */
1122 		for (uint32_t j = 0; j < chan->rxq_n; j++) {
1123 			error = dpaa2_ni_setup_rx_flow(dev, &chan->rx_queues[j]);
1124 			if (error) {
1125 				device_printf(dev, "%s: failed to setup Rx "
1126 				    "flow: error=%d\n", __func__, error);
1127 				goto close_ni;
1128 			}
1129 		}
1130 
1131 		/* Setup Tx flow. */
1132 		error = dpaa2_ni_setup_tx_flow(dev, &chan->txc_queue);
1133 		if (error) {
1134 			device_printf(dev, "%s: failed to setup Tx "
1135 			    "flow: error=%d\n", __func__, error);
1136 			goto close_ni;
1137 		}
1138 	}
1139 
1140 	/* Configure RxError queue to generate CDAN. */
1141 	error = dpaa2_ni_setup_rx_err_flow(dev, &sc->rxe_queue);
1142 	if (error) {
1143 		device_printf(dev, "%s: failed to setup RxError flow: "
1144 		    "error=%d\n", __func__, error);
1145 		goto close_ni;
1146 	}
1147 
1148 	/*
1149 	 * Get the Queuing Destination ID (QDID) that should be used for frame
1150 	 * enqueue operations.
1151 	 */
1152 	error = DPAA2_CMD_NI_GET_QDID(dev, child, &cmd, DPAA2_NI_QUEUE_TX,
1153 	    &sc->tx_qdid);
1154 	if (error) {
1155 		device_printf(dev, "%s: failed to get Tx queuing destination "
1156 		    "ID\n", __func__);
1157 		goto close_ni;
1158 	}
1159 
1160 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1161 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1162 	return (0);
1163 
1164 close_ni:
1165 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1166 close_rc:
1167 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1168 err_exit:
1169 	return (error);
1170 }
1171 
1172 /**
1173  * @brief Setup ingress traffic distribution.
1174  *
1175  * NOTE: Ingress traffic distribution is valid only when DPNI_OPT_NO_FS option
1176  *	 hasn't been set for DPNI and a number of DPNI queues > 1.
1177  */
1178 static int
1179 dpaa2_ni_setup_rx_dist(device_t dev)
1180 {
1181 	/*
1182 	 * Have the interface implicitly distribute traffic based on the default
1183 	 * hash key.
1184 	 */
1185 	return (dpaa2_ni_set_hash(dev, DPAA2_RXH_DEFAULT));
1186 }
1187 
1188 static int
1189 dpaa2_ni_setup_rx_flow(device_t dev, struct dpaa2_ni_fq *fq)
1190 {
1191 	device_t pdev = device_get_parent(dev);
1192 	device_t child = dev;
1193 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1194 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1195 	struct dpaa2_devinfo *con_info;
1196 	struct dpaa2_cmd cmd;
1197 	struct dpaa2_ni_queue_cfg queue_cfg = {0};
1198 	uint16_t rc_token, ni_token;
1199 	int error;
1200 
1201 	DPAA2_CMD_INIT(&cmd);
1202 
1203 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1204 	if (error) {
1205 		device_printf(dev, "%s: failed to open resource container: "
1206 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1207 		goto err_exit;
1208 	}
1209 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1210 	if (error) {
1211 		device_printf(dev, "%s: failed to open network interface: "
1212 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1213 		goto close_rc;
1214 	}
1215 
1216 	/* Obtain DPCON associated with the FQ's channel. */
1217 	con_info = device_get_ivars(fq->chan->con_dev);
1218 
1219 	queue_cfg.type = DPAA2_NI_QUEUE_RX;
1220 	queue_cfg.tc = fq->tc;
1221 	queue_cfg.idx = fq->flowid;
1222 	error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1223 	if (error) {
1224 		device_printf(dev, "%s: failed to obtain Rx queue "
1225 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1226 		    queue_cfg.idx);
1227 		goto close_ni;
1228 	}
1229 
1230 	fq->fqid = queue_cfg.fqid;
1231 
1232 	queue_cfg.dest_id = con_info->id;
1233 	queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1234 	queue_cfg.priority = 1;
1235 	queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1236 	queue_cfg.options =
1237 	    DPAA2_NI_QUEUE_OPT_USER_CTX |
1238 	    DPAA2_NI_QUEUE_OPT_DEST;
1239 	error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
1240 	if (error) {
1241 		device_printf(dev, "%s: failed to update Rx queue "
1242 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1243 		    queue_cfg.idx);
1244 		goto close_ni;
1245 	}
1246 
1247 	if (bootverbose) {
1248 		device_printf(dev, "RX queue idx=%d, tc=%d, chan=%d, fqid=%d, "
1249 		    "user_ctx=%#jx\n", fq->flowid, fq->tc, fq->chan->id,
1250 		    fq->fqid, (uint64_t) fq);
1251 	}
1252 
1253 	(void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
1254 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1255 	return (0);
1256 
1257 close_ni:
1258 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1259 close_rc:
1260 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1261 err_exit:
1262 	return (error);
1263 }
1264 
1265 static int
1266 dpaa2_ni_setup_tx_flow(device_t dev, struct dpaa2_ni_fq *fq)
1267 {
1268 	device_t pdev = device_get_parent(dev);
1269 	device_t child = dev;
1270 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1271 	struct dpaa2_channel *ch = fq->chan;
1272 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1273 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1274 	struct dpaa2_devinfo *con_info;
1275 	struct dpaa2_ni_queue_cfg queue_cfg = {0};
1276 	struct dpaa2_ni_tx_ring *tx;
1277 	struct dpaa2_buf *buf;
1278 	struct dpaa2_cmd cmd;
1279 	uint32_t tx_rings_n = 0;
1280 	uint16_t rc_token, ni_token;
1281 	int error;
1282 
1283 	DPAA2_CMD_INIT(&cmd);
1284 
1285 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1286 	if (error) {
1287 		device_printf(dev, "%s: failed to open resource container: "
1288 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1289 		goto err_exit;
1290 	}
1291 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1292 	if (error) {
1293 		device_printf(dev, "%s: failed to open network interface: "
1294 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1295 		goto close_rc;
1296 	}
1297 
1298 	/* Obtain DPCON associated with the FQ's channel. */
1299 	con_info = device_get_ivars(fq->chan->con_dev);
1300 
1301 	KASSERT(sc->attr.num.tx_tcs <= DPAA2_MAX_TCS,
1302 	    ("%s: too many Tx traffic classes: tx_tcs=%d\n", __func__,
1303 	    sc->attr.num.tx_tcs));
1304 	KASSERT(DPAA2_NI_BUFS_PER_TX <= DPAA2_NI_MAX_BPTX,
1305 	    ("%s: too many Tx buffers (%d): max=%d\n", __func__,
1306 	    DPAA2_NI_BUFS_PER_TX, DPAA2_NI_MAX_BPTX));
1307 
1308 	/* Setup Tx rings. */
1309 	for (int i = 0; i < sc->attr.num.tx_tcs; i++) {
1310 		queue_cfg.type = DPAA2_NI_QUEUE_TX;
1311 		queue_cfg.tc = i;
1312 		queue_cfg.idx = fq->flowid;
1313 		queue_cfg.chan_id = fq->chan->id;
1314 
1315 		error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1316 		if (error) {
1317 			device_printf(dev, "%s: failed to obtain Tx queue "
1318 			    "configuration: tc=%d, flowid=%d\n", __func__,
1319 			    queue_cfg.tc, queue_cfg.idx);
1320 			goto close_ni;
1321 		}
1322 
1323 		tx = &fq->tx_rings[i];
1324 		tx->fq = fq;
1325 		tx->fqid = queue_cfg.fqid;
1326 		tx->txid = tx_rings_n;
1327 
1328 		if (bootverbose) {
1329 			device_printf(dev, "TX queue idx=%d, tc=%d, chan=%d, "
1330 			    "fqid=%d\n", fq->flowid, i, fq->chan->id,
1331 			    queue_cfg.fqid);
1332 		}
1333 
1334 		mtx_init(&tx->lock, "dpaa2_tx_ring", NULL, MTX_DEF);
1335 
1336 		/* Allocate Tx ring buffer. */
1337 		tx->br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF, M_NOWAIT,
1338 		    &tx->lock);
1339 		if (tx->br == NULL) {
1340 			device_printf(dev, "%s: failed to setup Tx ring buffer"
1341 			    " (2) fqid=%d\n", __func__, tx->fqid);
1342 			goto close_ni;
1343 		}
1344 
1345 		/* Configure Tx buffers */
1346 		for (uint64_t j = 0; j < DPAA2_NI_BUFS_PER_TX; j++) {
1347 			buf = malloc(sizeof(struct dpaa2_buf), M_DPAA2_TXB,
1348 			    M_WAITOK);
1349 			/* Keep DMA tag and Tx ring linked to the buffer */
1350 			DPAA2_BUF_INIT_TAGOPT(buf, ch->tx_dmat, tx);
1351 
1352 			buf->sgt = malloc(sizeof(struct dpaa2_buf), M_DPAA2_TXB,
1353 			    M_WAITOK);
1354 			/* Link SGT to DMA tag and back to its Tx buffer */
1355 			DPAA2_BUF_INIT_TAGOPT(buf->sgt, ch->sgt_dmat, buf);
1356 
1357 			error = dpaa2_buf_seed_txb(dev, buf);
1358 
1359 			/* Add Tx buffer to the ring */
1360 			buf_ring_enqueue(tx->br, buf);
1361 		}
1362 
1363 		tx_rings_n++;
1364 	}
1365 
1366 	/* All Tx queues which belong to the same flowid have the same qdbin. */
1367 	fq->tx_qdbin = queue_cfg.qdbin;
1368 
1369 	queue_cfg.type = DPAA2_NI_QUEUE_TX_CONF;
1370 	queue_cfg.tc = 0; /* ignored for TxConf queue */
1371 	queue_cfg.idx = fq->flowid;
1372 	error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1373 	if (error) {
1374 		device_printf(dev, "%s: failed to obtain TxConf queue "
1375 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1376 		    queue_cfg.idx);
1377 		goto close_ni;
1378 	}
1379 
1380 	fq->fqid = queue_cfg.fqid;
1381 
1382 	queue_cfg.dest_id = con_info->id;
1383 	queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1384 	queue_cfg.priority = 0;
1385 	queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1386 	queue_cfg.options =
1387 	    DPAA2_NI_QUEUE_OPT_USER_CTX |
1388 	    DPAA2_NI_QUEUE_OPT_DEST;
1389 	error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
1390 	if (error) {
1391 		device_printf(dev, "%s: failed to update TxConf queue "
1392 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1393 		    queue_cfg.idx);
1394 		goto close_ni;
1395 	}
1396 
1397 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1398 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1399 	return (0);
1400 
1401 close_ni:
1402 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1403 close_rc:
1404 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1405 err_exit:
1406 	return (error);
1407 }
1408 
1409 static int
1410 dpaa2_ni_setup_rx_err_flow(device_t dev, struct dpaa2_ni_fq *fq)
1411 {
1412 	device_t pdev = device_get_parent(dev);
1413 	device_t child = dev;
1414 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1415 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1416 	struct dpaa2_devinfo *con_info;
1417 	struct dpaa2_ni_queue_cfg queue_cfg = {0};
1418 	struct dpaa2_cmd cmd;
1419 	uint16_t rc_token, ni_token;
1420 	int error;
1421 
1422 	DPAA2_CMD_INIT(&cmd);
1423 
1424 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1425 	if (error) {
1426 		device_printf(dev, "%s: failed to open resource container: "
1427 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1428 		goto err_exit;
1429 	}
1430 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1431 	if (error) {
1432 		device_printf(dev, "%s: failed to open network interface: "
1433 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1434 		goto close_rc;
1435 	}
1436 
1437 	/* Obtain DPCON associated with the FQ's channel. */
1438 	con_info = device_get_ivars(fq->chan->con_dev);
1439 
1440 	queue_cfg.type = DPAA2_NI_QUEUE_RX_ERR;
1441 	queue_cfg.tc = fq->tc; /* ignored */
1442 	queue_cfg.idx = fq->flowid; /* ignored */
1443 	error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1444 	if (error) {
1445 		device_printf(dev, "%s: failed to obtain RxErr queue "
1446 		    "configuration\n", __func__);
1447 		goto close_ni;
1448 	}
1449 
1450 	fq->fqid = queue_cfg.fqid;
1451 
1452 	queue_cfg.dest_id = con_info->id;
1453 	queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1454 	queue_cfg.priority = 1;
1455 	queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1456 	queue_cfg.options =
1457 	    DPAA2_NI_QUEUE_OPT_USER_CTX |
1458 	    DPAA2_NI_QUEUE_OPT_DEST;
1459 	error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
1460 	if (error) {
1461 		device_printf(dev, "%s: failed to update RxErr queue "
1462 		    "configuration\n", __func__);
1463 		goto close_ni;
1464 	}
1465 
1466 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1467 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1468 	return (0);
1469 
1470 close_ni:
1471 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1472 close_rc:
1473 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1474 err_exit:
1475 	return (error);
1476 }
1477 
1478 /**
1479  * @brief Configure DPNI object to generate interrupts.
1480  */
1481 static int
1482 dpaa2_ni_setup_irqs(device_t dev)
1483 {
1484 	device_t pdev = device_get_parent(dev);
1485 	device_t child = dev;
1486 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1487 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1488 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1489 	struct dpaa2_cmd cmd;
1490 	uint16_t rc_token, ni_token;
1491 	int error;
1492 
1493 	DPAA2_CMD_INIT(&cmd);
1494 
1495 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1496 	if (error) {
1497 		device_printf(dev, "%s: failed to open resource container: "
1498 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1499 		goto err_exit;
1500 	}
1501 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1502 	if (error) {
1503 		device_printf(dev, "%s: failed to open network interface: "
1504 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1505 		goto close_rc;
1506 	}
1507 
1508 	/* Configure IRQs. */
1509 	error = dpaa2_ni_setup_msi(sc);
1510 	if (error) {
1511 		device_printf(dev, "%s: failed to allocate MSI\n", __func__);
1512 		goto close_ni;
1513 	}
1514 	if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1515 	    &sc->irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) {
1516 		device_printf(dev, "%s: failed to allocate IRQ resource\n",
1517 		    __func__);
1518 		goto close_ni;
1519 	}
1520 	if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
1521 	    NULL, dpaa2_ni_intr, sc, &sc->intr)) {
1522 		device_printf(dev, "%s: failed to setup IRQ resource\n",
1523 		    __func__);
1524 		goto close_ni;
1525 	}
1526 
1527 	error = DPAA2_CMD_NI_SET_IRQ_MASK(dev, child, &cmd, DPNI_IRQ_INDEX,
1528 	    DPNI_IRQ_LINK_CHANGED | DPNI_IRQ_EP_CHANGED);
1529 	if (error) {
1530 		device_printf(dev, "%s: failed to set DPNI IRQ mask\n",
1531 		    __func__);
1532 		goto close_ni;
1533 	}
1534 
1535 	error = DPAA2_CMD_NI_SET_IRQ_ENABLE(dev, child, &cmd, DPNI_IRQ_INDEX,
1536 	    true);
1537 	if (error) {
1538 		device_printf(dev, "%s: failed to enable DPNI IRQ\n", __func__);
1539 		goto close_ni;
1540 	}
1541 
1542 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1543 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1544 	return (0);
1545 
1546 close_ni:
1547 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1548 close_rc:
1549 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1550 err_exit:
1551 	return (error);
1552 }
1553 
1554 /**
1555  * @brief Allocate MSI interrupts for DPNI.
1556  */
1557 static int
1558 dpaa2_ni_setup_msi(struct dpaa2_ni_softc *sc)
1559 {
1560 	int val;
1561 
1562 	val = pci_msi_count(sc->dev);
1563 	if (val < DPAA2_NI_MSI_COUNT)
1564 		device_printf(sc->dev, "MSI: actual=%d, expected=%d\n", val,
1565 		    DPAA2_IO_MSI_COUNT);
1566 	val = MIN(val, DPAA2_NI_MSI_COUNT);
1567 
1568 	if (pci_alloc_msi(sc->dev, &val) != 0)
1569 		return (EINVAL);
1570 
1571 	for (int i = 0; i < val; i++)
1572 		sc->irq_rid[i] = i + 1;
1573 
1574 	return (0);
1575 }
1576 
1577 /**
1578  * @brief Update DPNI according to the updated interface capabilities.
1579  */
1580 static int
1581 dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *sc)
1582 {
1583 	bool en_rxcsum, en_txcsum;
1584 	device_t pdev = device_get_parent(sc->dev);
1585 	device_t dev = sc->dev;
1586 	device_t child = dev;
1587 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1588 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1589 	struct dpaa2_cmd cmd;
1590 	uint16_t rc_token, ni_token;
1591 	int error;
1592 
1593 	DPAA2_CMD_INIT(&cmd);
1594 
1595 	/*
1596 	 * XXX-DSL: DPAA2 allows to validate L3/L4 checksums on reception and/or
1597 	 *          generate L3/L4 checksums on transmission without
1598 	 *          differentiating between IPv4/v6, i.e. enable for both
1599 	 *          protocols if requested.
1600 	 */
1601 	en_rxcsum = if_getcapenable(sc->ifp) &
1602 	    (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6);
1603 	en_txcsum = if_getcapenable(sc->ifp) &
1604 	    (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6);
1605 
1606 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1607 	if (error) {
1608 		device_printf(dev, "%s: failed to open resource container: "
1609 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1610 		goto err_exit;
1611 	}
1612 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1613 	if (error) {
1614 		device_printf(dev, "%s: failed to open network interface: "
1615 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1616 		goto close_rc;
1617 	}
1618 
1619 	/* Setup checksums validation. */
1620 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1621 	    DPAA2_NI_OFL_RX_L3_CSUM, en_rxcsum);
1622 	if (error) {
1623 		device_printf(dev, "%s: failed to %s L3 checksum validation\n",
1624 		    __func__, en_rxcsum ? "enable" : "disable");
1625 		goto close_ni;
1626 	}
1627 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1628 	    DPAA2_NI_OFL_RX_L4_CSUM, en_rxcsum);
1629 	if (error) {
1630 		device_printf(dev, "%s: failed to %s L4 checksum validation\n",
1631 		    __func__, en_rxcsum ? "enable" : "disable");
1632 		goto close_ni;
1633 	}
1634 
1635 	/* Setup checksums generation. */
1636 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1637 	    DPAA2_NI_OFL_TX_L3_CSUM, en_txcsum);
1638 	if (error) {
1639 		device_printf(dev, "%s: failed to %s L3 checksum generation\n",
1640 		    __func__, en_txcsum ? "enable" : "disable");
1641 		goto close_ni;
1642 	}
1643 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1644 	    DPAA2_NI_OFL_TX_L4_CSUM, en_txcsum);
1645 	if (error) {
1646 		device_printf(dev, "%s: failed to %s L4 checksum generation\n",
1647 		    __func__, en_txcsum ? "enable" : "disable");
1648 		goto close_ni;
1649 	}
1650 
1651 	if (bootverbose) {
1652 		device_printf(dev, "%s: L3/L4 checksum validation %s\n",
1653 		    __func__, en_rxcsum ? "enabled" : "disabled");
1654 		device_printf(dev, "%s: L3/L4 checksum generation %s\n",
1655 		    __func__, en_txcsum ? "enabled" : "disabled");
1656 	}
1657 
1658 	(void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
1659 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1660 	return (0);
1661 
1662 close_ni:
1663 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1664 close_rc:
1665 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1666 err_exit:
1667 	return (error);
1668 }
1669 
1670 /**
1671  * @brief Update DPNI according to the updated interface flags.
1672  */
1673 static int
1674 dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *sc)
1675 {
1676 	const bool en_promisc = if_getflags(sc->ifp) & IFF_PROMISC;
1677 	const bool en_allmulti = if_getflags(sc->ifp) & IFF_ALLMULTI;
1678 	device_t pdev = device_get_parent(sc->dev);
1679 	device_t dev = sc->dev;
1680 	device_t child = dev;
1681 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1682 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1683 	struct dpaa2_cmd cmd;
1684 	uint16_t rc_token, ni_token;
1685 	int error;
1686 
1687 	DPAA2_CMD_INIT(&cmd);
1688 
1689 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1690 	if (error) {
1691 		device_printf(dev, "%s: failed to open resource container: "
1692 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1693 		goto err_exit;
1694 	}
1695 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1696 	if (error) {
1697 		device_printf(dev, "%s: failed to open network interface: "
1698 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1699 		goto close_rc;
1700 	}
1701 
1702 	error = DPAA2_CMD_NI_SET_MULTI_PROMISC(dev, child, &cmd,
1703 	    en_promisc ? true : en_allmulti);
1704 	if (error) {
1705 		device_printf(dev, "%s: failed to %s multicast promiscuous "
1706 		    "mode\n", __func__, en_allmulti ? "enable" : "disable");
1707 		goto close_ni;
1708 	}
1709 
1710 	error = DPAA2_CMD_NI_SET_UNI_PROMISC(dev, child, &cmd, en_promisc);
1711 	if (error) {
1712 		device_printf(dev, "%s: failed to %s unicast promiscuous mode\n",
1713 		    __func__, en_promisc ? "enable" : "disable");
1714 		goto close_ni;
1715 	}
1716 
1717 	(void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
1718 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1719 	return (0);
1720 
1721 close_ni:
1722 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1723 close_rc:
1724 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1725 err_exit:
1726 	return (error);
1727 }
1728 
1729 static int
1730 dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *sc)
1731 {
1732 	struct sysctl_ctx_list *ctx;
1733 	struct sysctl_oid *node, *node2;
1734 	struct sysctl_oid_list *parent, *parent2;
1735 	char cbuf[128];
1736 	int i;
1737 
1738 	ctx = device_get_sysctl_ctx(sc->dev);
1739 	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1740 
1741 	/* Add DPNI statistics. */
1742 	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
1743 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Statistics");
1744 	parent = SYSCTL_CHILDREN(node);
1745 	for (i = 0; i < DPAA2_NI_STAT_SYSCTLS; ++i) {
1746 		SYSCTL_ADD_PROC(ctx, parent, i, dpni_stat_sysctls[i].name,
1747 		    CTLTYPE_U64 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_stats,
1748 		    "IU", dpni_stat_sysctls[i].desc);
1749 	}
1750 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_anomaly_frames",
1751 	    CTLFLAG_RD, &sc->rx_anomaly_frames,
1752 	    "Rx frames in the buffers outside of the buffer pools");
1753 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_single_buf_frames",
1754 	    CTLFLAG_RD, &sc->rx_single_buf_frames,
1755 	    "Rx frames in single buffers");
1756 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_sg_buf_frames",
1757 	    CTLFLAG_RD, &sc->rx_sg_buf_frames,
1758 	    "Rx frames in scatter/gather list");
1759 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_enq_rej_frames",
1760 	    CTLFLAG_RD, &sc->rx_enq_rej_frames,
1761 	    "Enqueue rejected by QMan");
1762 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_ieoi_err_frames",
1763 	    CTLFLAG_RD, &sc->rx_ieoi_err_frames,
1764 	    "QMan IEOI error");
1765 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_single_buf_frames",
1766 	    CTLFLAG_RD, &sc->tx_single_buf_frames,
1767 	    "Tx single buffer frames");
1768 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_sg_frames",
1769 	    CTLFLAG_RD, &sc->tx_sg_frames,
1770 	    "Tx S/G frames");
1771 
1772 	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_num",
1773 	    CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_num,
1774 	    "IU", "number of Rx buffers in the buffer pool");
1775 	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_free",
1776 	    CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_free,
1777 	    "IU", "number of free Rx buffers in the buffer pool");
1778 
1779  	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1780 
1781 	/* Add channels statistics. */
1782 	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "channels",
1783 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channels");
1784 	parent = SYSCTL_CHILDREN(node);
1785 	for (int i = 0; i < sc->chan_n; i++) {
1786 		snprintf(cbuf, sizeof(cbuf), "%d", i);
1787 
1788 		node2 = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, cbuf,
1789 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channel");
1790 		parent2 = SYSCTL_CHILDREN(node2);
1791 
1792 		SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_frames",
1793 		    CTLFLAG_RD, &sc->channels[i]->tx_frames,
1794 		    "Tx frames counter");
1795 		SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_dropped",
1796 		    CTLFLAG_RD, &sc->channels[i]->tx_dropped,
1797 		    "Tx dropped counter");
1798 	}
1799 
1800 	return (0);
1801 }
1802 
1803 static int
1804 dpaa2_ni_setup_dma(struct dpaa2_ni_softc *sc)
1805 {
1806 	device_t dev = sc->dev;
1807 	int error;
1808 
1809 	KASSERT((sc->buf_align == BUF_ALIGN) || (sc->buf_align == BUF_ALIGN_V1),
1810 	    ("unexpected buffer alignment: %d\n", sc->buf_align));
1811 
1812 	/* DMA tag for Rx distribution key. */
1813 	error = bus_dma_tag_create(
1814 	    bus_get_dma_tag(dev),
1815 	    PAGE_SIZE, 0,		/* alignment, boundary */
1816 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
1817 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
1818 	    NULL, NULL,			/* filter, filterarg */
1819 	    DPAA2_CLASSIFIER_DMA_SIZE, 1, /* maxsize, nsegments */
1820 	    DPAA2_CLASSIFIER_DMA_SIZE, 0, /* maxsegsize, flags */
1821 	    NULL, NULL,			/* lockfunc, lockarg */
1822 	    &sc->rxd_dmat);
1823 	if (error) {
1824 		device_printf(dev, "%s: failed to create DMA tag for Rx "
1825 		    "distribution key\n", __func__);
1826 		return (error);
1827 	}
1828 
1829 	error = bus_dma_tag_create(
1830 	    bus_get_dma_tag(dev),
1831 	    PAGE_SIZE, 0,		/* alignment, boundary */
1832 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
1833 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
1834 	    NULL, NULL,			/* filter, filterarg */
1835 	    ETH_QOS_KCFG_BUF_SIZE, 1,	/* maxsize, nsegments */
1836 	    ETH_QOS_KCFG_BUF_SIZE, 0,	/* maxsegsize, flags */
1837 	    NULL, NULL,			/* lockfunc, lockarg */
1838 	    &sc->qos_dmat);
1839 	if (error) {
1840 		device_printf(dev, "%s: failed to create DMA tag for QoS key\n",
1841 		    __func__);
1842 		return (error);
1843 	}
1844 
1845 	return (0);
1846 }
1847 
1848 /**
1849  * @brief Configure buffer layouts of the different DPNI queues.
1850  */
1851 static int
1852 dpaa2_ni_set_buf_layout(device_t dev)
1853 {
1854 	device_t pdev = device_get_parent(dev);
1855 	device_t child = dev;
1856 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1857 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1858 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1859 	struct dpaa2_ni_buf_layout buf_layout = {0};
1860 	struct dpaa2_cmd cmd;
1861 	uint16_t rc_token, ni_token;
1862 	int error;
1863 
1864 	DPAA2_CMD_INIT(&cmd);
1865 
1866 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1867 	if (error) {
1868 		device_printf(dev, "%s: failed to open resource container: "
1869 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1870 		goto err_exit;
1871 	}
1872 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1873 	if (error) {
1874 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
1875 		    "error=%d\n", __func__, dinfo->id, error);
1876 		goto close_rc;
1877 	}
1878 
1879 	/*
1880 	 * Select Rx/Tx buffer alignment. It's necessary to ensure that the
1881 	 * buffer size seen by WRIOP is a multiple of 64 or 256 bytes depending
1882 	 * on the WRIOP version.
1883 	 */
1884 	sc->buf_align = (sc->attr.wriop_ver == WRIOP_VERSION(0, 0, 0) ||
1885 	    sc->attr.wriop_ver == WRIOP_VERSION(1, 0, 0))
1886 	    ? BUF_ALIGN_V1 : BUF_ALIGN;
1887 
1888 	/*
1889 	 * We need to ensure that the buffer size seen by WRIOP is a multiple
1890 	 * of 64 or 256 bytes depending on the WRIOP version.
1891 	 */
1892 	sc->buf_sz = ALIGN_DOWN(DPAA2_RX_BUF_SIZE, sc->buf_align);
1893 
1894 	if (bootverbose) {
1895 		device_printf(dev, "Rx/Tx buffers: size=%d, alignment=%d\n",
1896 		    sc->buf_sz, sc->buf_align);
1897 	}
1898 
1899 	/*
1900 	 *    Frame Descriptor       Tx buffer layout
1901 	 *
1902 	 *                ADDR -> |---------------------|
1903 	 *                        | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes
1904 	 *                        |---------------------|
1905 	 *                        | HW FRAME ANNOTATION | BUF_TX_HWA_SIZE bytes
1906 	 *                        |---------------------|
1907 	 *                        |    DATA HEADROOM    |
1908 	 *       ADDR + OFFSET -> |---------------------|
1909 	 *                        |                     |
1910 	 *                        |                     |
1911 	 *                        |     FRAME DATA      |
1912 	 *                        |                     |
1913 	 *                        |                     |
1914 	 *                        |---------------------|
1915 	 *                        |    DATA TAILROOM    |
1916 	 *                        |---------------------|
1917 	 *
1918 	 * NOTE: It's for a single buffer frame only.
1919 	 */
1920 	buf_layout.queue_type = DPAA2_NI_QUEUE_TX;
1921 	buf_layout.pd_size = BUF_SWA_SIZE;
1922 	buf_layout.pass_timestamp = true;
1923 	buf_layout.pass_frame_status = true;
1924 	buf_layout.options =
1925 	    BUF_LOPT_PRIV_DATA_SZ |
1926 	    BUF_LOPT_TIMESTAMP | /* requires 128 bytes in HWA */
1927 	    BUF_LOPT_FRAME_STATUS;
1928 	error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
1929 	if (error) {
1930 		device_printf(dev, "%s: failed to set Tx buffer layout\n",
1931 		    __func__);
1932 		goto close_ni;
1933 	}
1934 
1935 	/* Tx-confirmation buffer layout */
1936 	buf_layout.queue_type = DPAA2_NI_QUEUE_TX_CONF;
1937 	buf_layout.options =
1938 	    BUF_LOPT_TIMESTAMP |
1939 	    BUF_LOPT_FRAME_STATUS;
1940 	error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
1941 	if (error) {
1942 		device_printf(dev, "%s: failed to set TxConf buffer layout\n",
1943 		    __func__);
1944 		goto close_ni;
1945 	}
1946 
1947 	/*
1948 	 * Driver should reserve the amount of space indicated by this command
1949 	 * as headroom in all Tx frames.
1950 	 */
1951 	error = DPAA2_CMD_NI_GET_TX_DATA_OFF(dev, child, &cmd, &sc->tx_data_off);
1952 	if (error) {
1953 		device_printf(dev, "%s: failed to obtain Tx data offset\n",
1954 		    __func__);
1955 		goto close_ni;
1956 	}
1957 
1958 	if (bootverbose) {
1959 		device_printf(dev, "Tx data offset=%d\n", sc->tx_data_off);
1960 	}
1961 	if ((sc->tx_data_off % 64) != 0) {
1962 		device_printf(dev, "Tx data offset (%d) is not a multiplication "
1963 		    "of 64 bytes\n", sc->tx_data_off);
1964 	}
1965 
1966 	/*
1967 	 *    Frame Descriptor       Rx buffer layout
1968 	 *
1969 	 *                ADDR -> |---------------------|
1970 	 *                        | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes
1971 	 *                        |---------------------|
1972 	 *                        | HW FRAME ANNOTATION | BUF_RX_HWA_SIZE bytes
1973 	 *                        |---------------------|
1974 	 *                        |    DATA HEADROOM    | OFFSET-BUF_RX_HWA_SIZE
1975 	 *       ADDR + OFFSET -> |---------------------|
1976 	 *                        |                     |
1977 	 *                        |                     |
1978 	 *                        |     FRAME DATA      |
1979 	 *                        |                     |
1980 	 *                        |                     |
1981 	 *                        |---------------------|
1982 	 *                        |    DATA TAILROOM    | 0 bytes
1983 	 *                        |---------------------|
1984 	 *
1985 	 * NOTE: It's for a single buffer frame only.
1986 	 */
1987 	buf_layout.queue_type = DPAA2_NI_QUEUE_RX;
1988 	buf_layout.pd_size = BUF_SWA_SIZE;
1989 	buf_layout.fd_align = sc->buf_align;
1990 	buf_layout.head_size = sc->tx_data_off - BUF_RX_HWA_SIZE - BUF_SWA_SIZE;
1991 	buf_layout.tail_size = 0;
1992 	buf_layout.pass_frame_status = true;
1993 	buf_layout.pass_parser_result = true;
1994 	buf_layout.pass_timestamp = true;
1995 	buf_layout.options =
1996 	    BUF_LOPT_PRIV_DATA_SZ |
1997 	    BUF_LOPT_DATA_ALIGN |
1998 	    BUF_LOPT_DATA_HEAD_ROOM |
1999 	    BUF_LOPT_DATA_TAIL_ROOM |
2000 	    BUF_LOPT_FRAME_STATUS |
2001 	    BUF_LOPT_PARSER_RESULT |
2002 	    BUF_LOPT_TIMESTAMP;
2003 	error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
2004 	if (error) {
2005 		device_printf(dev, "%s: failed to set Rx buffer layout\n",
2006 		    __func__);
2007 		goto close_ni;
2008 	}
2009 
2010 	error = 0;
2011 close_ni:
2012 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2013 close_rc:
2014 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2015 err_exit:
2016 	return (error);
2017 }
2018 
2019 /**
2020  * @brief Enable Rx/Tx pause frames.
2021  *
2022  * NOTE: DPNI stops sending when a pause frame is received (Rx frame) or DPNI
2023  *       itself generates pause frames (Tx frame).
2024  */
2025 static int
2026 dpaa2_ni_set_pause_frame(device_t dev)
2027 {
2028 	device_t pdev = device_get_parent(dev);
2029 	device_t child = dev;
2030 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2031 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2032 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2033 	struct dpaa2_ni_link_cfg link_cfg = {0};
2034 	struct dpaa2_cmd cmd;
2035 	uint16_t rc_token, ni_token;
2036 	int error;
2037 
2038 	DPAA2_CMD_INIT(&cmd);
2039 
2040 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2041 	if (error) {
2042 		device_printf(dev, "%s: failed to open resource container: "
2043 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2044 		goto err_exit;
2045 	}
2046 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2047 	if (error) {
2048 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2049 		    "error=%d\n", __func__, dinfo->id, error);
2050 		goto close_rc;
2051 	}
2052 
2053 	error = DPAA2_CMD_NI_GET_LINK_CFG(dev, child, &cmd, &link_cfg);
2054 	if (error) {
2055 		device_printf(dev, "%s: failed to obtain link configuration: "
2056 		    "error=%d\n", __func__, error);
2057 		goto close_ni;
2058 	}
2059 
2060 	/* Enable both Rx and Tx pause frames by default. */
2061 	link_cfg.options |= DPAA2_NI_LINK_OPT_PAUSE;
2062 	link_cfg.options &= ~DPAA2_NI_LINK_OPT_ASYM_PAUSE;
2063 
2064 	error = DPAA2_CMD_NI_SET_LINK_CFG(dev, child, &cmd, &link_cfg);
2065 	if (error) {
2066 		device_printf(dev, "%s: failed to set link configuration: "
2067 		    "error=%d\n", __func__, error);
2068 		goto close_ni;
2069 	}
2070 
2071 	sc->link_options = link_cfg.options;
2072 	error = 0;
2073 close_ni:
2074 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2075 close_rc:
2076 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2077 err_exit:
2078 	return (error);
2079 }
2080 
2081 /**
2082  * @brief Configure QoS table to determine the traffic class for the received
2083  * frame.
2084  */
2085 static int
2086 dpaa2_ni_set_qos_table(device_t dev)
2087 {
2088 	device_t pdev = device_get_parent(dev);
2089 	device_t child = dev;
2090 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2091 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2092 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2093 	struct dpaa2_ni_qos_table tbl;
2094 	struct dpaa2_buf *buf = &sc->qos_kcfg;
2095 	struct dpaa2_cmd cmd;
2096 	uint16_t rc_token, ni_token;
2097 	int error;
2098 
2099 	if (sc->attr.num.rx_tcs == 1 ||
2100 	    !(sc->attr.options & DPNI_OPT_HAS_KEY_MASKING)) {
2101 		if (bootverbose) {
2102 			device_printf(dev, "Ingress traffic classification is "
2103 			    "not supported\n");
2104 		}
2105 		return (0);
2106 	}
2107 
2108 	/*
2109 	 * Allocate a buffer visible to the device to hold the QoS table key
2110 	 * configuration.
2111 	 */
2112 
2113 	if (__predict_true(buf->dmat == NULL)) {
2114 		buf->dmat = sc->qos_dmat;
2115 	}
2116 
2117 	error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr,
2118 	    BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap);
2119 	if (error) {
2120 		device_printf(dev, "%s: failed to allocate a buffer for QoS key "
2121 		    "configuration\n", __func__);
2122 		goto err_exit;
2123 	}
2124 
2125 	error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr,
2126 	    ETH_QOS_KCFG_BUF_SIZE, dpaa2_dmamap_oneseg_cb, &buf->paddr,
2127 	    BUS_DMA_NOWAIT);
2128 	if (error) {
2129 		device_printf(dev, "%s: failed to map QoS key configuration "
2130 		    "buffer into bus space\n", __func__);
2131 		goto err_exit;
2132 	}
2133 
2134 	DPAA2_CMD_INIT(&cmd);
2135 
2136 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2137 	if (error) {
2138 		device_printf(dev, "%s: failed to open resource container: "
2139 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2140 		goto err_exit;
2141 	}
2142 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2143 	if (error) {
2144 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2145 		    "error=%d\n", __func__, dinfo->id, error);
2146 		goto close_rc;
2147 	}
2148 
2149 	tbl.default_tc = 0;
2150 	tbl.discard_on_miss = false;
2151 	tbl.keep_entries = false;
2152 	tbl.kcfg_busaddr = buf->paddr;
2153 	error = DPAA2_CMD_NI_SET_QOS_TABLE(dev, child, &cmd, &tbl);
2154 	if (error) {
2155 		device_printf(dev, "%s: failed to set QoS table\n", __func__);
2156 		goto close_ni;
2157 	}
2158 
2159 	error = DPAA2_CMD_NI_CLEAR_QOS_TABLE(dev, child, &cmd);
2160 	if (error) {
2161 		device_printf(dev, "%s: failed to clear QoS table\n", __func__);
2162 		goto close_ni;
2163 	}
2164 
2165 	error = 0;
2166 close_ni:
2167 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2168 close_rc:
2169 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2170 err_exit:
2171 	return (error);
2172 }
2173 
2174 static int
2175 dpaa2_ni_set_mac_addr(device_t dev)
2176 {
2177 	device_t pdev = device_get_parent(dev);
2178 	device_t child = dev;
2179 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2180 	if_t ifp = sc->ifp;
2181 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2182 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2183 	struct dpaa2_cmd cmd;
2184 	struct ether_addr rnd_mac_addr;
2185 	uint16_t rc_token, ni_token;
2186 	uint8_t mac_addr[ETHER_ADDR_LEN];
2187 	uint8_t dpni_mac_addr[ETHER_ADDR_LEN];
2188 	int error;
2189 
2190 	DPAA2_CMD_INIT(&cmd);
2191 
2192 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2193 	if (error) {
2194 		device_printf(dev, "%s: failed to open resource container: "
2195 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2196 		goto err_exit;
2197 	}
2198 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2199 	if (error) {
2200 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2201 		    "error=%d\n", __func__, dinfo->id, error);
2202 		goto close_rc;
2203 	}
2204 
2205 	/*
2206 	 * Get the MAC address associated with the physical port, if the DPNI is
2207 	 * connected to a DPMAC directly associated with one of the physical
2208 	 * ports.
2209 	 */
2210 	error = DPAA2_CMD_NI_GET_PORT_MAC_ADDR(dev, child, &cmd, mac_addr);
2211 	if (error) {
2212 		device_printf(dev, "%s: failed to obtain the MAC address "
2213 		    "associated with the physical port\n", __func__);
2214 		goto close_ni;
2215 	}
2216 
2217 	/* Get primary MAC address from the DPNI attributes. */
2218 	error = DPAA2_CMD_NI_GET_PRIM_MAC_ADDR(dev, child, &cmd, dpni_mac_addr);
2219 	if (error) {
2220 		device_printf(dev, "%s: failed to obtain primary MAC address\n",
2221 		    __func__);
2222 		goto close_ni;
2223 	}
2224 
2225 	if (!ETHER_IS_ZERO(mac_addr)) {
2226 		/* Set MAC address of the physical port as DPNI's primary one. */
2227 		error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd,
2228 		    mac_addr);
2229 		if (error) {
2230 			device_printf(dev, "%s: failed to set primary MAC "
2231 			    "address\n", __func__);
2232 			goto close_ni;
2233 		}
2234 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2235 			sc->mac.addr[i] = mac_addr[i];
2236 		}
2237 	} else if (ETHER_IS_ZERO(dpni_mac_addr)) {
2238 		/* Generate random MAC address as DPNI's primary one. */
2239 		ether_gen_addr(ifp, &rnd_mac_addr);
2240 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2241 			mac_addr[i] = rnd_mac_addr.octet[i];
2242 		}
2243 
2244 		error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd,
2245 		    mac_addr);
2246 		if (error) {
2247 			device_printf(dev, "%s: failed to set random primary "
2248 			    "MAC address\n", __func__);
2249 			goto close_ni;
2250 		}
2251 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2252 			sc->mac.addr[i] = mac_addr[i];
2253 		}
2254 	} else {
2255 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2256 			sc->mac.addr[i] = dpni_mac_addr[i];
2257 		}
2258 	}
2259 
2260 	error = 0;
2261 close_ni:
2262 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2263 close_rc:
2264 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2265 err_exit:
2266 	return (error);
2267 }
2268 
2269 static void
2270 dpaa2_ni_miibus_statchg(device_t dev)
2271 {
2272 	device_t pdev = device_get_parent(dev);
2273 	device_t child = dev;
2274 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2275 	struct dpaa2_mac_link_state mac_link = { 0 };
2276 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2277 	struct dpaa2_cmd cmd;
2278 	uint16_t rc_token, mac_token;
2279 	int error, link_state;
2280 
2281 	if (sc->fixed_link || sc->mii == NULL) {
2282 		return;
2283 	}
2284 	if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0) {
2285 		/*
2286 		 * We will receive calls and adjust the changes but
2287 		 * not have setup everything (called before dpaa2_ni_init()
2288 		 * really).  This will then setup the link and internal
2289 		 * sc->link_state and not trigger the update once needed,
2290 		 * so basically dpmac never knows about it.
2291 		 */
2292 		return;
2293 	}
2294 
2295 	/*
2296 	 * Note: ifp link state will only be changed AFTER we are called so we
2297 	 * cannot rely on ifp->if_linkstate here.
2298 	 */
2299 	if (sc->mii->mii_media_status & IFM_AVALID) {
2300 		if (sc->mii->mii_media_status & IFM_ACTIVE) {
2301 			link_state = LINK_STATE_UP;
2302 		} else {
2303 			link_state = LINK_STATE_DOWN;
2304 		}
2305 	} else {
2306 		link_state = LINK_STATE_UNKNOWN;
2307 	}
2308 
2309 	if (link_state != sc->link_state) {
2310 		sc->link_state = link_state;
2311 
2312 		DPAA2_CMD_INIT(&cmd);
2313 
2314 		error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
2315 		    &rc_token);
2316 		if (error) {
2317 			device_printf(dev, "%s: failed to open resource "
2318 			    "container: id=%d, error=%d\n", __func__, rcinfo->id,
2319 			    error);
2320 			goto err_exit;
2321 		}
2322 		error = DPAA2_CMD_MAC_OPEN(dev, child, &cmd, sc->mac.dpmac_id,
2323 		    &mac_token);
2324 		if (error) {
2325 			device_printf(sc->dev, "%s: failed to open DPMAC: "
2326 			    "id=%d, error=%d\n", __func__, sc->mac.dpmac_id,
2327 			    error);
2328 			goto close_rc;
2329 		}
2330 
2331 		if (link_state == LINK_STATE_UP ||
2332 		    link_state == LINK_STATE_DOWN) {
2333 			/* Update DPMAC link state. */
2334 			mac_link.supported = sc->mii->mii_media.ifm_media;
2335 			mac_link.advert = sc->mii->mii_media.ifm_media;
2336 			mac_link.rate = 1000; /* TODO: Where to get from? */	/* ifmedia_baudrate? */
2337 			mac_link.options =
2338 			    DPAA2_MAC_LINK_OPT_AUTONEG |
2339 			    DPAA2_MAC_LINK_OPT_PAUSE;
2340 			mac_link.up = (link_state == LINK_STATE_UP) ? true : false;
2341 			mac_link.state_valid = true;
2342 
2343 			/* Inform DPMAC about link state. */
2344 			error = DPAA2_CMD_MAC_SET_LINK_STATE(dev, child, &cmd,
2345 			    &mac_link);
2346 			if (error) {
2347 				device_printf(sc->dev, "%s: failed to set DPMAC "
2348 				    "link state: id=%d, error=%d\n", __func__,
2349 				    sc->mac.dpmac_id, error);
2350 			}
2351 		}
2352 		(void)DPAA2_CMD_MAC_CLOSE(dev, child, &cmd);
2353 		(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2354 		    rc_token));
2355 	}
2356 
2357 	return;
2358 
2359 close_rc:
2360 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2361 err_exit:
2362 	return;
2363 }
2364 
2365 /**
2366  * @brief Callback function to process media change request.
2367  */
2368 static int
2369 dpaa2_ni_media_change_locked(struct dpaa2_ni_softc *sc)
2370 {
2371 
2372 	DPNI_LOCK_ASSERT(sc);
2373 	if (sc->mii) {
2374 		mii_mediachg(sc->mii);
2375 		sc->media_status = sc->mii->mii_media.ifm_media;
2376 	} else if (sc->fixed_link) {
2377 		if_printf(sc->ifp, "%s: can't change media in fixed mode\n",
2378 		    __func__);
2379 	}
2380 
2381 	return (0);
2382 }
2383 
2384 static int
2385 dpaa2_ni_media_change(if_t ifp)
2386 {
2387 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2388 	int error;
2389 
2390 	DPNI_LOCK(sc);
2391 	error = dpaa2_ni_media_change_locked(sc);
2392 	DPNI_UNLOCK(sc);
2393 	return (error);
2394 }
2395 
2396 /**
2397  * @brief Callback function to process media status request.
2398  */
2399 static void
2400 dpaa2_ni_media_status(if_t ifp, struct ifmediareq *ifmr)
2401 {
2402 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2403 
2404 	DPNI_LOCK(sc);
2405 	if (sc->mii) {
2406 		mii_pollstat(sc->mii);
2407 		ifmr->ifm_active = sc->mii->mii_media_active;
2408 		ifmr->ifm_status = sc->mii->mii_media_status;
2409 	}
2410 	DPNI_UNLOCK(sc);
2411 }
2412 
2413 /**
2414  * @brief Callout function to check and update media status.
2415  */
2416 static void
2417 dpaa2_ni_media_tick(void *arg)
2418 {
2419 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2420 
2421 	/* Check for media type change */
2422 	if (sc->mii) {
2423 		mii_tick(sc->mii);
2424 		if (sc->media_status != sc->mii->mii_media.ifm_media) {
2425 			printf("%s: media type changed (ifm_media=%x)\n",
2426 			    __func__, sc->mii->mii_media.ifm_media);
2427 			dpaa2_ni_media_change(sc->ifp);
2428 		}
2429 	}
2430 
2431 	/* Schedule another timeout one second from now */
2432 	callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
2433 }
2434 
2435 static void
2436 dpaa2_ni_init(void *arg)
2437 {
2438 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2439 	if_t ifp = sc->ifp;
2440 	device_t pdev = device_get_parent(sc->dev);
2441 	device_t dev = sc->dev;
2442 	device_t child = dev;
2443 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2444 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2445 	struct dpaa2_cmd cmd;
2446 	uint16_t rc_token, ni_token;
2447 	int error;
2448 
2449 	DPNI_LOCK(sc);
2450 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2451 		DPNI_UNLOCK(sc);
2452 		return;
2453 	}
2454 	DPNI_UNLOCK(sc);
2455 
2456 	DPAA2_CMD_INIT(&cmd);
2457 
2458 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2459 	if (error) {
2460 		device_printf(dev, "%s: failed to open resource container: "
2461 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2462 		goto err_exit;
2463 	}
2464 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2465 	if (error) {
2466 		device_printf(dev, "%s: failed to open network interface: "
2467 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
2468 		goto close_rc;
2469 	}
2470 
2471 	error = DPAA2_CMD_NI_ENABLE(dev, child, &cmd);
2472 	if (error) {
2473 		device_printf(dev, "%s: failed to enable DPNI: error=%d\n",
2474 		    __func__, error);
2475 	}
2476 
2477 	DPNI_LOCK(sc);
2478 	/* Announce we are up and running and can queue packets. */
2479 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2480 
2481 	if (sc->mii) {
2482 		/*
2483 		 * mii_mediachg() will trigger a call into
2484 		 * dpaa2_ni_miibus_statchg() to setup link state.
2485 		 */
2486 		dpaa2_ni_media_change_locked(sc);
2487 	}
2488 	callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
2489 
2490 	DPNI_UNLOCK(sc);
2491 
2492 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2493 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2494 	return;
2495 
2496 close_rc:
2497 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2498 err_exit:
2499 	return;
2500 }
2501 
2502 static int
2503 dpaa2_ni_transmit(if_t ifp, struct mbuf *m)
2504 {
2505 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2506 	struct dpaa2_channel *ch;
2507 	uint32_t fqid;
2508 	bool found = false;
2509 	int chidx = 0, error;
2510 
2511 	if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) {
2512 		return (0);
2513 	}
2514 
2515 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
2516 		fqid = m->m_pkthdr.flowid;
2517 		for (int i = 0; i < sc->chan_n; i++) {
2518 			ch = sc->channels[i];
2519 			for (int j = 0; j < ch->rxq_n; j++) {
2520 				if (fqid == ch->rx_queues[j].fqid) {
2521 					chidx = ch->flowid;
2522 					found = true;
2523 					break;
2524 				}
2525 			}
2526 			if (found) {
2527 				break;
2528 			}
2529 		}
2530 	}
2531 
2532 	ch = sc->channels[chidx];
2533 	error = buf_ring_enqueue(ch->xmit_br, m);
2534 	if (__predict_false(error != 0)) {
2535 		m_freem(m);
2536 	} else {
2537 		taskqueue_enqueue(ch->cleanup_tq, &ch->cleanup_task);
2538 	}
2539 
2540 	return (error);
2541 }
2542 
2543 static void
2544 dpaa2_ni_qflush(if_t ifp)
2545 {
2546 	/* TODO: Find a way to drain Tx queues in QBMan. */
2547 	if_qflush(ifp);
2548 }
2549 
2550 static int
2551 dpaa2_ni_ioctl(if_t ifp, u_long c, caddr_t data)
2552 {
2553 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2554 	struct ifreq *ifr = (struct ifreq *) data;
2555 	device_t pdev = device_get_parent(sc->dev);
2556 	device_t dev = sc->dev;
2557 	device_t child = dev;
2558 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2559 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2560 	struct dpaa2_cmd cmd;
2561 	uint32_t changed = 0;
2562 	uint16_t rc_token, ni_token;
2563 	int mtu, error, rc = 0;
2564 
2565 	DPAA2_CMD_INIT(&cmd);
2566 
2567 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2568 	if (error) {
2569 		device_printf(dev, "%s: failed to open resource container: "
2570 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2571 		goto err_exit;
2572 	}
2573 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2574 	if (error) {
2575 		device_printf(dev, "%s: failed to open network interface: "
2576 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
2577 		goto close_rc;
2578 	}
2579 
2580 	switch (c) {
2581 	case SIOCSIFMTU:
2582 		DPNI_LOCK(sc);
2583 		mtu = ifr->ifr_mtu;
2584 		if (mtu < ETHERMIN || mtu > ETHERMTU_JUMBO) {
2585 			DPNI_UNLOCK(sc);
2586 			error = EINVAL;
2587 			goto close_ni;
2588 		}
2589 		if_setmtu(ifp, mtu);
2590 		DPNI_UNLOCK(sc);
2591 
2592 		/* Update maximum frame length. */
2593 		mtu += ETHER_HDR_LEN;
2594 		if (if_getcapenable(ifp) & IFCAP_VLAN_MTU)
2595 			mtu += ETHER_VLAN_ENCAP_LEN;
2596 		error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd, mtu);
2597 		if (error) {
2598 			device_printf(dev, "%s: failed to update maximum frame "
2599 			    "length: error=%d\n", __func__, error);
2600 			goto close_ni;
2601 		}
2602 		break;
2603 	case SIOCSIFCAP:
2604 		changed = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
2605 		if ((changed & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) != 0)
2606 			if_togglecapenable(ifp, IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6);
2607 		if ((changed & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) != 0) {
2608 			if_togglecapenable(ifp, IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6);
2609 			if_togglehwassist(ifp, DPAA2_CSUM_TX_OFFLOAD);
2610 		}
2611 
2612 		rc = dpaa2_ni_setup_if_caps(sc);
2613 		if (rc) {
2614 			printf("%s: failed to update iface capabilities: "
2615 			    "error=%d\n", __func__, rc);
2616 			rc = ENXIO;
2617 		}
2618 		break;
2619 	case SIOCSIFFLAGS:
2620 		DPNI_LOCK(sc);
2621 		if (if_getflags(ifp) & IFF_UP) {
2622 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2623 				changed = if_getflags(ifp) ^ sc->if_flags;
2624 				if (changed & IFF_PROMISC ||
2625 				    changed & IFF_ALLMULTI) {
2626 					rc = dpaa2_ni_setup_if_flags(sc);
2627 				}
2628 			} else {
2629 				DPNI_UNLOCK(sc);
2630 				dpaa2_ni_init(sc);
2631 				DPNI_LOCK(sc);
2632 			}
2633 		} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2634 			/* FIXME: Disable DPNI. See dpaa2_ni_init(). */
2635 		}
2636 
2637 		sc->if_flags = if_getflags(ifp);
2638 		DPNI_UNLOCK(sc);
2639 		break;
2640 	case SIOCADDMULTI:
2641 	case SIOCDELMULTI:
2642 		DPNI_LOCK(sc);
2643 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2644 			DPNI_UNLOCK(sc);
2645 			rc = dpaa2_ni_update_mac_filters(ifp);
2646 			if (rc) {
2647 				device_printf(dev, "%s: failed to update MAC "
2648 				    "filters: error=%d\n", __func__, rc);
2649 			}
2650 			DPNI_LOCK(sc);
2651 		}
2652 		DPNI_UNLOCK(sc);
2653 		break;
2654 	case SIOCGIFMEDIA:
2655 	case SIOCSIFMEDIA:
2656 		if (sc->mii)
2657 			rc = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, c);
2658 		else if(sc->fixed_link) {
2659 			rc = ifmedia_ioctl(ifp, ifr, &sc->fixed_ifmedia, c);
2660 		}
2661 		break;
2662 	default:
2663 		rc = ether_ioctl(ifp, c, data);
2664 		break;
2665 	}
2666 
2667 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2668 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2669 	return (rc);
2670 
2671 close_ni:
2672 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2673 close_rc:
2674 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2675 err_exit:
2676 	return (error);
2677 }
2678 
2679 static int
2680 dpaa2_ni_update_mac_filters(if_t ifp)
2681 {
2682 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2683 	struct dpaa2_ni_mcaddr_ctx ctx;
2684 	device_t pdev = device_get_parent(sc->dev);
2685 	device_t dev = sc->dev;
2686 	device_t child = dev;
2687 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2688 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2689 	struct dpaa2_cmd cmd;
2690 	uint16_t rc_token, ni_token;
2691 	int error;
2692 
2693 	DPAA2_CMD_INIT(&cmd);
2694 
2695 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2696 	if (error) {
2697 		device_printf(dev, "%s: failed to open resource container: "
2698 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2699 		goto err_exit;
2700 	}
2701 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2702 	if (error) {
2703 		device_printf(dev, "%s: failed to open network interface: "
2704 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
2705 		goto close_rc;
2706 	}
2707 
2708 	/* Remove all multicast MAC filters. */
2709 	error = DPAA2_CMD_NI_CLEAR_MAC_FILTERS(dev, child, &cmd, false, true);
2710 	if (error) {
2711 		device_printf(dev, "%s: failed to clear multicast MAC filters: "
2712 		    "error=%d\n", __func__, error);
2713 		goto close_ni;
2714 	}
2715 
2716 	ctx.ifp = ifp;
2717 	ctx.error = 0;
2718 	ctx.nent = 0;
2719 
2720 	if_foreach_llmaddr(ifp, dpaa2_ni_add_maddr, &ctx);
2721 
2722 	error = ctx.error;
2723 close_ni:
2724 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2725 close_rc:
2726 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2727 err_exit:
2728 	return (error);
2729 }
2730 
2731 static u_int
2732 dpaa2_ni_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2733 {
2734 	struct dpaa2_ni_mcaddr_ctx *ctx = arg;
2735 	struct dpaa2_ni_softc *sc = if_getsoftc(ctx->ifp);
2736 	device_t pdev = device_get_parent(sc->dev);
2737 	device_t dev = sc->dev;
2738 	device_t child = dev;
2739 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2740 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2741 	struct dpaa2_cmd cmd;
2742 	uint16_t rc_token, ni_token;
2743 	int error;
2744 
2745 	if (ctx->error != 0) {
2746 		return (0);
2747 	}
2748 
2749 	if (ETHER_IS_MULTICAST(LLADDR(sdl))) {
2750 		DPAA2_CMD_INIT(&cmd);
2751 
2752 		error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
2753 		    &rc_token);
2754 		if (error) {
2755 			device_printf(dev, "%s: failed to open resource "
2756 			    "container: id=%d, error=%d\n", __func__, rcinfo->id,
2757 			    error);
2758 			return (0);
2759 		}
2760 		error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id,
2761 		    &ni_token);
2762 		if (error) {
2763 			device_printf(dev, "%s: failed to open network interface: "
2764 			    "id=%d, error=%d\n", __func__, dinfo->id, error);
2765 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2766 			    rc_token));
2767 			return (0);
2768 		}
2769 
2770 		ctx->error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, &cmd,
2771 		    LLADDR(sdl));
2772 
2773 		(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2774 		    ni_token));
2775 		(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2776 		    rc_token));
2777 
2778 		if (ctx->error != 0) {
2779 			device_printf(dev, "%s: can't add more then %d MAC "
2780 			    "addresses, switching to the multicast promiscuous "
2781 			    "mode\n", __func__, ctx->nent);
2782 
2783 			/* Enable multicast promiscuous mode. */
2784 			DPNI_LOCK(sc);
2785 			if_setflagbits(ctx->ifp, IFF_ALLMULTI, 0);
2786 			sc->if_flags |= IFF_ALLMULTI;
2787 			ctx->error = dpaa2_ni_setup_if_flags(sc);
2788 			DPNI_UNLOCK(sc);
2789 
2790 			return (0);
2791 		}
2792 		ctx->nent++;
2793 	}
2794 
2795 	return (1);
2796 }
2797 
2798 static void
2799 dpaa2_ni_intr(void *arg)
2800 {
2801 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2802 	device_t pdev = device_get_parent(sc->dev);
2803 	device_t dev = sc->dev;
2804 	device_t child = dev;
2805 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2806 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2807 	struct dpaa2_cmd cmd;
2808 	uint32_t status = ~0u; /* clear all IRQ status bits */
2809 	uint16_t rc_token, ni_token;
2810 	int error;
2811 
2812 	DPAA2_CMD_INIT(&cmd);
2813 
2814 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2815 	if (error) {
2816 		device_printf(dev, "%s: failed to open resource container: "
2817 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2818 		goto err_exit;
2819 	}
2820 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2821 	if (error) {
2822 		device_printf(dev, "%s: failed to open network interface: "
2823 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
2824 		goto close_rc;
2825 	}
2826 
2827 	error = DPAA2_CMD_NI_GET_IRQ_STATUS(dev, child, &cmd, DPNI_IRQ_INDEX,
2828 	    &status);
2829 	if (error) {
2830 		device_printf(sc->dev, "%s: failed to obtain IRQ status: "
2831 		    "error=%d\n", __func__, error);
2832 	}
2833 
2834 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2835 close_rc:
2836 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2837 err_exit:
2838 	return;
2839 }
2840 
2841 /**
2842  * @brief Execute channel's Rx/Tx routines.
2843  *
2844  * NOTE: Should not be re-entrant for the same channel. It is achieved by
2845  *       enqueuing the cleanup routine on a single-threaded taskqueue.
2846  */
2847 static void
2848 dpaa2_ni_cleanup_task(void *arg, int count)
2849 {
2850 	struct dpaa2_channel *ch = (struct dpaa2_channel *)arg;
2851 	struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev);
2852 	int error, rxc, txc;
2853 
2854 	for (int i = 0; i < DPAA2_CLEAN_BUDGET; i++) {
2855 		rxc  = dpaa2_ni_rx_cleanup(ch);
2856 		txc  = dpaa2_ni_tx_cleanup(ch);
2857 
2858 		if (__predict_false((if_getdrvflags(sc->ifp) &
2859 		    IFF_DRV_RUNNING) == 0)) {
2860 			return;
2861 		}
2862 
2863 		if ((txc != DPAA2_TX_BUDGET) && (rxc != DPAA2_RX_BUDGET)) {
2864 			break;
2865 		}
2866 	}
2867 
2868 	/* Re-arm channel to generate CDAN */
2869 	error = DPAA2_SWP_CONF_WQ_CHANNEL(ch->io_dev, &ch->ctx);
2870 	if (error != 0) {
2871 		panic("%s: failed to rearm channel: chan_id=%d, error=%d\n",
2872 		    __func__, ch->id, error);
2873 	}
2874 }
2875 
2876 /**
2877  * @brief Poll frames from a specific channel when CDAN is received.
2878  */
2879 static int
2880 dpaa2_ni_rx_cleanup(struct dpaa2_channel *ch)
2881 {
2882 	struct dpaa2_io_softc *iosc = device_get_softc(ch->io_dev);
2883 	struct dpaa2_swp *swp = iosc->swp;
2884 	struct dpaa2_ni_fq *fq;
2885 	struct dpaa2_buf *buf = &ch->store;
2886 	int budget = DPAA2_RX_BUDGET;
2887 	int error, consumed = 0;
2888 
2889 	do {
2890 		error = dpaa2_swp_pull(swp, ch->id, buf, DPAA2_ETH_STORE_FRAMES);
2891 		if (error) {
2892 			device_printf(ch->ni_dev, "%s: failed to pull frames: "
2893 			    "chan_id=%d, error=%d\n", __func__, ch->id, error);
2894 			break;
2895 		}
2896 		error = dpaa2_ni_consume_frames(ch, &fq, &consumed);
2897 		if (error == ENOENT || error == EALREADY) {
2898 			break;
2899 		}
2900 		if (error == ETIMEDOUT) {
2901 			device_printf(ch->ni_dev, "%s: timeout to consume "
2902 			    "frames: chan_id=%d\n", __func__, ch->id);
2903 		}
2904 	} while (--budget);
2905 
2906 	return (DPAA2_RX_BUDGET - budget);
2907 }
2908 
2909 static int
2910 dpaa2_ni_tx_cleanup(struct dpaa2_channel *ch)
2911 {
2912 	struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev);
2913 	struct dpaa2_ni_tx_ring *tx = &ch->txc_queue.tx_rings[0];
2914 	struct mbuf *m = NULL;
2915 	int budget = DPAA2_TX_BUDGET;
2916 
2917 	do {
2918 		mtx_assert(&ch->xmit_mtx, MA_NOTOWNED);
2919 		mtx_lock(&ch->xmit_mtx);
2920 		m = buf_ring_dequeue_sc(ch->xmit_br);
2921 		mtx_unlock(&ch->xmit_mtx);
2922 
2923 		if (__predict_false(m == NULL)) {
2924 			/* TODO: Do not give up easily */
2925 			break;
2926 		} else {
2927 			dpaa2_ni_tx(sc, ch, tx, m);
2928 		}
2929 	} while (--budget);
2930 
2931 	return (DPAA2_TX_BUDGET - budget);
2932 }
2933 
2934 static void
2935 dpaa2_ni_tx(struct dpaa2_ni_softc *sc, struct dpaa2_channel *ch,
2936     struct dpaa2_ni_tx_ring *tx, struct mbuf *m)
2937 {
2938 	device_t dev = sc->dev;
2939 	struct dpaa2_ni_fq *fq = tx->fq;
2940 	struct dpaa2_buf *buf, *sgt;
2941 	struct dpaa2_fd fd;
2942 	struct mbuf *md;
2943 	bus_dma_segment_t segs[DPAA2_TX_SEGLIMIT];
2944 	int rc, nsegs;
2945 	int error;
2946 
2947 	mtx_assert(&tx->lock, MA_NOTOWNED);
2948 	mtx_lock(&tx->lock);
2949 	buf = buf_ring_dequeue_sc(tx->br);
2950 	mtx_unlock(&tx->lock);
2951 	if (__predict_false(buf == NULL)) {
2952 		/* TODO: Do not give up easily */
2953 		m_freem(m);
2954 		return;
2955 	} else {
2956 		DPAA2_BUF_ASSERT_TXREADY(buf);
2957 		buf->m = m;
2958 		sgt = buf->sgt;
2959 	}
2960 
2961 #if defined(INVARIANTS)
2962 	struct dpaa2_ni_tx_ring *btx = (struct dpaa2_ni_tx_ring *)buf->opt;
2963 	KASSERT(buf->opt == tx, ("%s: unexpected Tx ring", __func__));
2964 	KASSERT(btx->fq->chan == ch, ("%s: unexpected channel", __func__));
2965 #endif /* INVARIANTS */
2966 
2967 	BPF_MTAP(sc->ifp, m);
2968 
2969 	error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, m, segs, &nsegs,
2970 	    BUS_DMA_NOWAIT);
2971 	if (__predict_false(error != 0)) {
2972 		/* Too many fragments, trying to defragment... */
2973 		md = m_collapse(m, M_NOWAIT, DPAA2_TX_SEGLIMIT);
2974 		if (md == NULL) {
2975 			device_printf(dev, "%s: m_collapse() failed\n", __func__);
2976 			fq->chan->tx_dropped++;
2977 			goto err;
2978 		}
2979 
2980 		buf->m = m = md;
2981 		error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, m, segs,
2982 		    &nsegs, BUS_DMA_NOWAIT);
2983 		if (__predict_false(error != 0)) {
2984 			device_printf(dev, "%s: bus_dmamap_load_mbuf_sg() "
2985 			    "failed: error=%d\n", __func__, error);
2986 			fq->chan->tx_dropped++;
2987 			goto err;
2988 		}
2989 	}
2990 
2991 	error = dpaa2_ni_build_fd(sc, tx, buf, segs, nsegs, &fd);
2992 	if (__predict_false(error != 0)) {
2993 		device_printf(dev, "%s: failed to build frame descriptor: "
2994 		    "error=%d\n", __func__, error);
2995 		fq->chan->tx_dropped++;
2996 		goto err_unload;
2997 	}
2998 
2999 	/* TODO: Enqueue several frames in a single command */
3000 	for (int i = 0; i < DPAA2_NI_ENQUEUE_RETRIES; i++) {
3001 		/* TODO: Return error codes instead of # of frames */
3002 		rc = DPAA2_SWP_ENQ_MULTIPLE_FQ(fq->chan->io_dev, tx->fqid, &fd, 1);
3003 		if (rc == 1) {
3004 			break;
3005 		}
3006 	}
3007 
3008 	bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_PREWRITE);
3009 	bus_dmamap_sync(sgt->dmat, sgt->dmap, BUS_DMASYNC_PREWRITE);
3010 
3011 	if (rc != 1) {
3012 		fq->chan->tx_dropped++;
3013 		goto err_unload;
3014 	} else {
3015 		fq->chan->tx_frames++;
3016 	}
3017 	return;
3018 
3019 err_unload:
3020 	bus_dmamap_unload(buf->dmat, buf->dmap);
3021 	if (sgt->paddr != 0) {
3022 		bus_dmamap_unload(sgt->dmat, sgt->dmap);
3023 	}
3024 err:
3025 	m_freem(buf->m);
3026 	buf_ring_enqueue(tx->br, buf);
3027 }
3028 
3029 static int
3030 dpaa2_ni_consume_frames(struct dpaa2_channel *chan, struct dpaa2_ni_fq **src,
3031     uint32_t *consumed)
3032 {
3033 	struct dpaa2_ni_fq *fq = NULL;
3034 	struct dpaa2_dq *dq;
3035 	struct dpaa2_fd *fd;
3036 	struct dpaa2_ni_rx_ctx ctx = {
3037 		.head = NULL,
3038 		.tail = NULL,
3039 		.cnt = 0,
3040 		.last = false
3041 	};
3042 	int rc, frames = 0;
3043 
3044 	do {
3045 		rc = dpaa2_chan_next_frame(chan, &dq);
3046 		if (rc == EINPROGRESS) {
3047 			if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
3048 				fd = &dq->fdr.fd;
3049 				fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
3050 
3051 				switch (fq->type) {
3052 				case DPAA2_NI_QUEUE_RX:
3053 					(void)dpaa2_ni_rx(chan, fq, fd, &ctx);
3054 					break;
3055 				case DPAA2_NI_QUEUE_RX_ERR:
3056 					(void)dpaa2_ni_rx_err(chan, fq, fd);
3057 					break;
3058 				case DPAA2_NI_QUEUE_TX_CONF:
3059 					(void)dpaa2_ni_tx_conf(chan, fq, fd);
3060 					break;
3061 				default:
3062 					panic("%s: unknown queue type (1)",
3063 					    __func__);
3064 				}
3065 				frames++;
3066 			}
3067 		} else if (rc == EALREADY || rc == ENOENT) {
3068 			if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
3069 				fd = &dq->fdr.fd;
3070 				fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
3071 
3072 				switch (fq->type) {
3073 				case DPAA2_NI_QUEUE_RX:
3074 					/*
3075 					 * Last VDQ response (mbuf) in a chain
3076 					 * obtained from the Rx queue.
3077 					 */
3078 					ctx.last = true;
3079 					(void)dpaa2_ni_rx(chan, fq, fd, &ctx);
3080 					break;
3081 				case DPAA2_NI_QUEUE_RX_ERR:
3082 					(void)dpaa2_ni_rx_err(chan, fq, fd);
3083 					break;
3084 				case DPAA2_NI_QUEUE_TX_CONF:
3085 					(void)dpaa2_ni_tx_conf(chan, fq, fd);
3086 					break;
3087 				default:
3088 					panic("%s: unknown queue type (2)",
3089 					    __func__);
3090 				}
3091 				frames++;
3092 			}
3093 			break;
3094 		} else {
3095 			panic("%s: should not reach here: rc=%d", __func__, rc);
3096 		}
3097 	} while (true);
3098 
3099 	KASSERT(chan->store_idx < chan->store_sz, ("%s: store_idx(%d) >= "
3100 	    "store_sz(%d)", __func__, chan->store_idx, chan->store_sz));
3101 
3102 	/*
3103 	 * VDQ operation pulls frames from a single queue into the store.
3104 	 * Return the frame queue and a number of consumed frames as an output.
3105 	 */
3106 	if (src != NULL) {
3107 		*src = fq;
3108 	}
3109 	if (consumed != NULL) {
3110 		*consumed = frames;
3111 	}
3112 
3113 	return (rc);
3114 }
3115 
3116 /**
3117  * @brief Receive frames.
3118  */
3119 static int
3120 dpaa2_ni_rx(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq, struct dpaa2_fd *fd,
3121     struct dpaa2_ni_rx_ctx *ctx)
3122 {
3123 	bus_addr_t paddr = (bus_addr_t)fd->addr;
3124 	struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr);
3125 	struct dpaa2_buf *buf = fa->buf;
3126 	struct dpaa2_channel *bch = (struct dpaa2_channel *)buf->opt;
3127 	struct dpaa2_ni_softc *sc = device_get_softc(bch->ni_dev);
3128 	struct dpaa2_bp_softc *bpsc;
3129 	struct mbuf *m;
3130 	device_t bpdev;
3131 	bus_addr_t released[DPAA2_SWP_BUFS_PER_CMD];
3132 	void *buf_data;
3133 	int buf_len, error, released_n = 0;
3134 
3135 	KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
3136 	/*
3137 	 * NOTE: Current channel might not be the same as the "buffer" channel
3138 	 * and it's fine. It must not be NULL though.
3139 	 */
3140 	KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__));
3141 
3142 	if (__predict_false(paddr != buf->paddr)) {
3143 		panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3144 		    __func__, paddr, buf->paddr);
3145 	}
3146 
3147 	switch (dpaa2_ni_fd_err(fd)) {
3148 	case 1: /* Enqueue rejected by QMan */
3149 		sc->rx_enq_rej_frames++;
3150 		break;
3151 	case 2: /* QMan IEOI error */
3152 		sc->rx_ieoi_err_frames++;
3153 		break;
3154 	default:
3155 		break;
3156 	}
3157 	switch (dpaa2_ni_fd_format(fd)) {
3158 	case DPAA2_FD_SINGLE:
3159 		sc->rx_single_buf_frames++;
3160 		break;
3161 	case DPAA2_FD_SG:
3162 		sc->rx_sg_buf_frames++;
3163 		break;
3164 	default:
3165 		break;
3166 	}
3167 
3168 	mtx_assert(&bch->dma_mtx, MA_NOTOWNED);
3169 	mtx_lock(&bch->dma_mtx);
3170 
3171 	bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_POSTREAD);
3172 	bus_dmamap_unload(buf->dmat, buf->dmap);
3173 	m = buf->m;
3174 	buf_len = dpaa2_ni_fd_data_len(fd);
3175 	buf_data = (uint8_t *)buf->vaddr + dpaa2_ni_fd_offset(fd);
3176 	/* Prepare buffer to be re-cycled */
3177 	buf->m = NULL;
3178 	buf->paddr = 0;
3179 	buf->vaddr = NULL;
3180 	buf->seg.ds_addr = 0;
3181 	buf->seg.ds_len = 0;
3182 	buf->nseg = 0;
3183 
3184 	mtx_unlock(&bch->dma_mtx);
3185 
3186 	m->m_flags |= M_PKTHDR;
3187 	m->m_data = buf_data;
3188 	m->m_len = buf_len;
3189 	m->m_pkthdr.len = buf_len;
3190 	m->m_pkthdr.rcvif = sc->ifp;
3191 	m->m_pkthdr.flowid = fq->fqid;
3192 	M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
3193 
3194 	if (ctx->head == NULL) {
3195 		KASSERT(ctx->tail == NULL, ("%s: tail already given?", __func__));
3196 		ctx->head = m;
3197 		ctx->tail = m;
3198 	} else {
3199 		KASSERT(ctx->head != NULL, ("%s: head is NULL", __func__));
3200 		ctx->tail->m_nextpkt = m;
3201 		ctx->tail = m;
3202 	}
3203 	ctx->cnt++;
3204 
3205 	if (ctx->last) {
3206 		ctx->tail->m_nextpkt = NULL;
3207 		if_input(sc->ifp, ctx->head);
3208 	}
3209 
3210 	/* Keep the buffer to be recycled */
3211 	ch->recycled[ch->recycled_n++] = buf;
3212 
3213 	/* Re-seed and release recycled buffers back to the pool */
3214 	if (ch->recycled_n == DPAA2_SWP_BUFS_PER_CMD) {
3215 		/* Release new buffers to the pool if needed */
3216 		taskqueue_enqueue(sc->bp_taskq, &ch->bp_task);
3217 
3218 		for (int i = 0; i < ch->recycled_n; i++) {
3219 			buf = ch->recycled[i];
3220 			bch = (struct dpaa2_channel *)buf->opt;
3221 
3222 			mtx_assert(&bch->dma_mtx, MA_NOTOWNED);
3223 			mtx_lock(&bch->dma_mtx);
3224 			error = dpaa2_buf_seed_rxb(sc->dev, buf,
3225 			    DPAA2_RX_BUF_SIZE, &bch->dma_mtx);
3226 			mtx_unlock(&bch->dma_mtx);
3227 
3228 			if (__predict_false(error != 0)) {
3229 				/* TODO: What else to do with the buffer? */
3230 				panic("%s: failed to recycle buffer: error=%d",
3231 				    __func__, error);
3232 			}
3233 
3234 			/* Prepare buffer to be released in a single command */
3235 			released[released_n++] = buf->paddr;
3236 		}
3237 
3238 		/* There's only one buffer pool for now */
3239 		bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
3240 		bpsc = device_get_softc(bpdev);
3241 
3242 		error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpsc->attr.bpid,
3243 		    released, released_n);
3244 		if (__predict_false(error != 0)) {
3245 			device_printf(sc->dev, "%s: failed to release buffers "
3246 			    "to the pool: error=%d\n", __func__, error);
3247 			return (error);
3248 		}
3249 		ch->recycled_n = 0;
3250 	}
3251 
3252 	return (0);
3253 }
3254 
3255 /**
3256  * @brief Receive Rx error frames.
3257  */
3258 static int
3259 dpaa2_ni_rx_err(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq,
3260     struct dpaa2_fd *fd)
3261 {
3262 	bus_addr_t paddr = (bus_addr_t)fd->addr;
3263 	struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr);
3264 	struct dpaa2_buf *buf = fa->buf;
3265 	struct dpaa2_channel *bch = (struct dpaa2_channel *)buf->opt;
3266 	struct dpaa2_ni_softc *sc = device_get_softc(bch->ni_dev);
3267 	device_t bpdev;
3268 	struct dpaa2_bp_softc *bpsc;
3269 	int error;
3270 
3271 	KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
3272 	/*
3273 	 * NOTE: Current channel might not be the same as the "buffer" channel
3274 	 * and it's fine. It must not be NULL though.
3275 	 */
3276 	KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__));
3277 
3278 	if (__predict_false(paddr != buf->paddr)) {
3279 		panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3280 		    __func__, paddr, buf->paddr);
3281 	}
3282 
3283 	/* There's only one buffer pool for now */
3284 	bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
3285 	bpsc = device_get_softc(bpdev);
3286 
3287 	/* Release buffer to QBMan buffer pool */
3288 	error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpsc->attr.bpid, &paddr, 1);
3289 	if (error != 0) {
3290 		device_printf(sc->dev, "%s: failed to release frame buffer to "
3291 		    "the pool: error=%d\n", __func__, error);
3292 		return (error);
3293 	}
3294 
3295 	return (0);
3296 }
3297 
3298 /**
3299  * @brief Receive Tx confirmation frames.
3300  */
3301 static int
3302 dpaa2_ni_tx_conf(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq,
3303     struct dpaa2_fd *fd)
3304 {
3305 	bus_addr_t paddr = (bus_addr_t)fd->addr;
3306 	struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr);
3307 	struct dpaa2_buf *buf = fa->buf;
3308 	struct dpaa2_buf *sgt = buf->sgt;
3309 	struct dpaa2_ni_tx_ring *tx = (struct dpaa2_ni_tx_ring *)buf->opt;
3310 	struct dpaa2_channel *bch = tx->fq->chan;
3311 
3312 	KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
3313 	KASSERT(tx != NULL, ("%s: Tx ring is NULL", __func__));
3314 	KASSERT(sgt != NULL, ("%s: S/G table is NULL", __func__));
3315 	/*
3316 	 * NOTE: Current channel might not be the same as the "buffer" channel
3317 	 * and it's fine. It must not be NULL though.
3318 	 */
3319 	KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__));
3320 
3321 	if (paddr != buf->paddr) {
3322 		panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3323 		    __func__, paddr, buf->paddr);
3324 	}
3325 
3326 	mtx_assert(&bch->dma_mtx, MA_NOTOWNED);
3327 	mtx_lock(&bch->dma_mtx);
3328 
3329 	bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_POSTWRITE);
3330 	bus_dmamap_sync(sgt->dmat, sgt->dmap, BUS_DMASYNC_POSTWRITE);
3331 	bus_dmamap_unload(buf->dmat, buf->dmap);
3332 	bus_dmamap_unload(sgt->dmat, sgt->dmap);
3333 	m_freem(buf->m);
3334 	buf->m = NULL;
3335 	buf->paddr = 0;
3336 	buf->vaddr = NULL;
3337 	sgt->paddr = 0;
3338 
3339 	mtx_unlock(&bch->dma_mtx);
3340 
3341 	/* Return Tx buffer back to the ring */
3342 	buf_ring_enqueue(tx->br, buf);
3343 
3344 	return (0);
3345 }
3346 
3347 /**
3348  * @brief Compare versions of the DPAA2 network interface API.
3349  */
3350 static int
3351 dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *sc, uint16_t major,
3352     uint16_t minor)
3353 {
3354 	if (sc->api_major == major) {
3355 		return sc->api_minor - minor;
3356 	}
3357 	return sc->api_major - major;
3358 }
3359 
3360 /**
3361  * @brief Build a DPAA2 frame descriptor.
3362  */
3363 static int
3364 dpaa2_ni_build_fd(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx,
3365     struct dpaa2_buf *buf, bus_dma_segment_t *segs, int nsegs, struct dpaa2_fd *fd)
3366 {
3367 	struct dpaa2_buf *sgt = buf->sgt;
3368 	struct dpaa2_sg_entry *sge;
3369 	struct dpaa2_fa *fa;
3370 	int i, error;
3371 
3372 	KASSERT(nsegs <= DPAA2_TX_SEGLIMIT, ("%s: too many segments", __func__));
3373 	KASSERT(buf->opt != NULL, ("%s: no Tx ring?", __func__));
3374 	KASSERT(sgt != NULL, ("%s: no S/G table?", __func__));
3375 	KASSERT(sgt->vaddr != NULL, ("%s: no S/G vaddr?", __func__));
3376 
3377 	memset(fd, 0, sizeof(*fd));
3378 
3379 	/* Populate and map S/G table */
3380 	if (__predict_true(nsegs <= DPAA2_TX_SEGLIMIT)) {
3381 		sge = (struct dpaa2_sg_entry *)sgt->vaddr + sc->tx_data_off;
3382 		for (i = 0; i < nsegs; i++) {
3383 			sge[i].addr = (uint64_t)segs[i].ds_addr;
3384 			sge[i].len = (uint32_t)segs[i].ds_len;
3385 			sge[i].offset_fmt = 0u;
3386 		}
3387 		sge[i-1].offset_fmt |= 0x8000u; /* set final entry flag */
3388 
3389 		KASSERT(sgt->paddr == 0, ("%s: paddr(%#jx) != 0", __func__,
3390 		    sgt->paddr));
3391 
3392 		error = bus_dmamap_load(sgt->dmat, sgt->dmap, sgt->vaddr,
3393 		    DPAA2_TX_SGT_SZ, dpaa2_dmamap_oneseg_cb, &sgt->paddr,
3394 		    BUS_DMA_NOWAIT);
3395 		if (__predict_false(error != 0)) {
3396 			device_printf(sc->dev, "%s: bus_dmamap_load() failed: "
3397 			    "error=%d\n", __func__, error);
3398 			return (error);
3399 		}
3400 
3401 		buf->paddr = sgt->paddr;
3402 		buf->vaddr = sgt->vaddr;
3403 		sc->tx_sg_frames++; /* for sysctl(9) */
3404 	} else {
3405 		return (EINVAL);
3406 	}
3407 
3408 	fa = (struct dpaa2_fa *)sgt->vaddr;
3409 	fa->magic = DPAA2_MAGIC;
3410 	fa->buf = buf;
3411 
3412 	fd->addr = buf->paddr;
3413 	fd->data_length = (uint32_t)buf->m->m_pkthdr.len;
3414 	fd->bpid_ivp_bmt = 0;
3415 	fd->offset_fmt_sl = 0x2000u | sc->tx_data_off;
3416 	fd->ctrl = 0x00800000u;
3417 
3418 	return (0);
3419 }
3420 
3421 static int
3422 dpaa2_ni_fd_err(struct dpaa2_fd *fd)
3423 {
3424 	return ((fd->ctrl >> DPAA2_NI_FD_ERR_SHIFT) & DPAA2_NI_FD_ERR_MASK);
3425 }
3426 
3427 static uint32_t
3428 dpaa2_ni_fd_data_len(struct dpaa2_fd *fd)
3429 {
3430 	if (dpaa2_ni_fd_short_len(fd)) {
3431 		return (fd->data_length & DPAA2_NI_FD_LEN_MASK);
3432 	}
3433 	return (fd->data_length);
3434 }
3435 
3436 static int
3437 dpaa2_ni_fd_format(struct dpaa2_fd *fd)
3438 {
3439 	return ((enum dpaa2_fd_format)((fd->offset_fmt_sl >>
3440 	    DPAA2_NI_FD_FMT_SHIFT) & DPAA2_NI_FD_FMT_MASK));
3441 }
3442 
3443 static bool
3444 dpaa2_ni_fd_short_len(struct dpaa2_fd *fd)
3445 {
3446 	return (((fd->offset_fmt_sl >> DPAA2_NI_FD_SL_SHIFT)
3447 	    & DPAA2_NI_FD_SL_MASK) == 1);
3448 }
3449 
3450 static int
3451 dpaa2_ni_fd_offset(struct dpaa2_fd *fd)
3452 {
3453 	return (fd->offset_fmt_sl & DPAA2_NI_FD_OFFSET_MASK);
3454 }
3455 
3456 /**
3457  * @brief Collect statistics of the network interface.
3458  */
3459 static int
3460 dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS)
3461 {
3462 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3463 	struct dpni_stat *stat = &dpni_stat_sysctls[oidp->oid_number];
3464 	device_t pdev = device_get_parent(sc->dev);
3465 	device_t dev = sc->dev;
3466 	device_t child = dev;
3467 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
3468 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
3469 	struct dpaa2_cmd cmd;
3470 	uint64_t cnt[DPAA2_NI_STAT_COUNTERS];
3471 	uint64_t result = 0;
3472 	uint16_t rc_token, ni_token;
3473 	int error;
3474 
3475 	DPAA2_CMD_INIT(&cmd);
3476 
3477 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
3478 	if (error) {
3479 		device_printf(dev, "%s: failed to open resource container: "
3480 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
3481 		goto exit;
3482 	}
3483 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
3484 	if (error) {
3485 		device_printf(dev, "%s: failed to open network interface: "
3486 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
3487 		goto close_rc;
3488 	}
3489 
3490 	error = DPAA2_CMD_NI_GET_STATISTICS(dev, child, &cmd, stat->page, 0, cnt);
3491 	if (!error) {
3492 		result = cnt[stat->cnt];
3493 	}
3494 
3495 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
3496 close_rc:
3497 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
3498 exit:
3499 	return (sysctl_handle_64(oidp, &result, 0, req));
3500 }
3501 
3502 static int
3503 dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS)
3504 {
3505 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3506 	uint32_t buf_num = DPAA2_ATOMIC_READ(&sc->buf_num);
3507 
3508 	return (sysctl_handle_32(oidp, &buf_num, 0, req));
3509 }
3510 
3511 static int
3512 dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS)
3513 {
3514 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3515 	uint32_t buf_free = DPAA2_ATOMIC_READ(&sc->buf_free);
3516 
3517 	return (sysctl_handle_32(oidp, &buf_free, 0, req));
3518 }
3519 
3520 static int
3521 dpaa2_ni_set_hash(device_t dev, uint64_t flags)
3522 {
3523 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
3524 	uint64_t key = 0;
3525 	int i;
3526 
3527 	if (!(sc->attr.num.queues > 1)) {
3528 		return (EOPNOTSUPP);
3529 	}
3530 
3531 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3532 		if (dist_fields[i].rxnfc_field & flags) {
3533 			key |= dist_fields[i].id;
3534 		}
3535 	}
3536 
3537 	return (dpaa2_ni_set_dist_key(dev, DPAA2_NI_DIST_MODE_HASH, key));
3538 }
3539 
3540 /**
3541  * @brief Set Rx distribution (hash or flow classification) key flags is a
3542  * combination of RXH_ bits.
3543  */
3544 static int
3545 dpaa2_ni_set_dist_key(device_t dev, enum dpaa2_ni_dist_mode type, uint64_t flags)
3546 {
3547 	device_t pdev = device_get_parent(dev);
3548 	device_t child = dev;
3549 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
3550 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
3551 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
3552 	struct dpkg_profile_cfg cls_cfg;
3553 	struct dpkg_extract *key;
3554 	struct dpaa2_buf *buf = &sc->rxd_kcfg;
3555 	struct dpaa2_cmd cmd;
3556 	uint16_t rc_token, ni_token;
3557 	int i, error = 0;
3558 
3559 	if (__predict_true(buf->dmat == NULL)) {
3560 		buf->dmat = sc->rxd_dmat;
3561 	}
3562 
3563 	memset(&cls_cfg, 0, sizeof(cls_cfg));
3564 
3565 	/* Configure extracts according to the given flags. */
3566 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3567 		key = &cls_cfg.extracts[cls_cfg.num_extracts];
3568 
3569 		if (!(flags & dist_fields[i].id)) {
3570 			continue;
3571 		}
3572 
3573 		if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
3574 			device_printf(dev, "%s: failed to add key extraction "
3575 			    "rule\n", __func__);
3576 			return (E2BIG);
3577 		}
3578 
3579 		key->type = DPKG_EXTRACT_FROM_HDR;
3580 		key->extract.from_hdr.prot = dist_fields[i].cls_prot;
3581 		key->extract.from_hdr.type = DPKG_FULL_FIELD;
3582 		key->extract.from_hdr.field = dist_fields[i].cls_field;
3583 		cls_cfg.num_extracts++;
3584 	}
3585 
3586 	error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr,
3587 	    BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap);
3588 	if (error != 0) {
3589 		device_printf(dev, "%s: failed to allocate a buffer for Rx "
3590 		    "traffic distribution key configuration\n", __func__);
3591 		return (error);
3592 	}
3593 
3594 	error = dpaa2_ni_prepare_key_cfg(&cls_cfg, (uint8_t *)buf->vaddr);
3595 	if (error != 0) {
3596 		device_printf(dev, "%s: failed to prepare key configuration: "
3597 		    "error=%d\n", __func__, error);
3598 		return (error);
3599 	}
3600 
3601 	/* Prepare for setting the Rx dist. */
3602 	error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr,
3603 	    DPAA2_CLASSIFIER_DMA_SIZE, dpaa2_dmamap_oneseg_cb, &buf->paddr,
3604 	    BUS_DMA_NOWAIT);
3605 	if (error != 0) {
3606 		device_printf(sc->dev, "%s: failed to map a buffer for Rx "
3607 		    "traffic distribution key configuration\n", __func__);
3608 		return (error);
3609 	}
3610 
3611 	if (type == DPAA2_NI_DIST_MODE_HASH) {
3612 		DPAA2_CMD_INIT(&cmd);
3613 
3614 		error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
3615 		    &rc_token);
3616 		if (error) {
3617 			device_printf(dev, "%s: failed to open resource "
3618 			    "container: id=%d, error=%d\n", __func__, rcinfo->id,
3619 			    error);
3620 			goto err_exit;
3621 		}
3622 		error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id,
3623 		    &ni_token);
3624 		if (error) {
3625 			device_printf(dev, "%s: failed to open network "
3626 			    "interface: id=%d, error=%d\n", __func__, dinfo->id,
3627 			    error);
3628 			goto close_rc;
3629 		}
3630 
3631 		error = DPAA2_CMD_NI_SET_RX_TC_DIST(dev, child, &cmd,
3632 		    sc->attr.num.queues, 0, DPAA2_NI_DIST_MODE_HASH, buf->paddr);
3633 		if (error != 0) {
3634 			device_printf(dev, "%s: failed to set distribution mode "
3635 			    "and size for the traffic class\n", __func__);
3636 		}
3637 
3638 		(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
3639 		    ni_token));
3640 close_rc:
3641 		(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
3642 		    rc_token));
3643 	}
3644 
3645 err_exit:
3646 	return (error);
3647 }
3648 
3649 /**
3650  * @brief Prepares extract parameters.
3651  *
3652  * cfg:		Defining a full Key Generation profile.
3653  * key_cfg_buf:	Zeroed 256 bytes of memory before mapping it to DMA.
3654  */
3655 static int
3656 dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf)
3657 {
3658 	struct dpni_ext_set_rx_tc_dist *dpni_ext;
3659 	struct dpni_dist_extract *extr;
3660 	int i, j;
3661 
3662 	if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
3663 		return (EINVAL);
3664 
3665 	dpni_ext = (struct dpni_ext_set_rx_tc_dist *) key_cfg_buf;
3666 	dpni_ext->num_extracts = cfg->num_extracts;
3667 
3668 	for (i = 0; i < cfg->num_extracts; i++) {
3669 		extr = &dpni_ext->extracts[i];
3670 
3671 		switch (cfg->extracts[i].type) {
3672 		case DPKG_EXTRACT_FROM_HDR:
3673 			extr->prot = cfg->extracts[i].extract.from_hdr.prot;
3674 			extr->efh_type =
3675 			    cfg->extracts[i].extract.from_hdr.type & 0x0Fu;
3676 			extr->size = cfg->extracts[i].extract.from_hdr.size;
3677 			extr->offset = cfg->extracts[i].extract.from_hdr.offset;
3678 			extr->field = cfg->extracts[i].extract.from_hdr.field;
3679 			extr->hdr_index =
3680 				cfg->extracts[i].extract.from_hdr.hdr_index;
3681 			break;
3682 		case DPKG_EXTRACT_FROM_DATA:
3683 			extr->size = cfg->extracts[i].extract.from_data.size;
3684 			extr->offset =
3685 				cfg->extracts[i].extract.from_data.offset;
3686 			break;
3687 		case DPKG_EXTRACT_FROM_PARSE:
3688 			extr->size = cfg->extracts[i].extract.from_parse.size;
3689 			extr->offset =
3690 				cfg->extracts[i].extract.from_parse.offset;
3691 			break;
3692 		default:
3693 			return (EINVAL);
3694 		}
3695 
3696 		extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
3697 		extr->extract_type = cfg->extracts[i].type & 0x0Fu;
3698 
3699 		for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
3700 			extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
3701 			extr->masks[j].offset =
3702 				cfg->extracts[i].masks[j].offset;
3703 		}
3704 	}
3705 
3706 	return (0);
3707 }
3708 
3709 static device_method_t dpaa2_ni_methods[] = {
3710 	/* Device interface */
3711 	DEVMETHOD(device_probe,		dpaa2_ni_probe),
3712 	DEVMETHOD(device_attach,	dpaa2_ni_attach),
3713 	DEVMETHOD(device_detach,	dpaa2_ni_detach),
3714 
3715 	/* mii via memac_mdio */
3716 	DEVMETHOD(miibus_statchg,	dpaa2_ni_miibus_statchg),
3717 
3718 	DEVMETHOD_END
3719 };
3720 
3721 static driver_t dpaa2_ni_driver = {
3722 	"dpaa2_ni",
3723 	dpaa2_ni_methods,
3724 	sizeof(struct dpaa2_ni_softc),
3725 };
3726 
3727 DRIVER_MODULE(miibus, dpaa2_ni, miibus_driver, 0, 0);
3728 DRIVER_MODULE(dpaa2_ni, dpaa2_rc, dpaa2_ni_driver, 0, 0);
3729 
3730 MODULE_DEPEND(dpaa2_ni, miibus, 1, 1, 1);
3731 #ifdef DEV_ACPI
3732 MODULE_DEPEND(dpaa2_ni, memac_mdio_acpi, 1, 1, 1);
3733 #endif
3734 #ifdef FDT
3735 MODULE_DEPEND(dpaa2_ni, memac_mdio_fdt, 1, 1, 1);
3736 #endif
3737