xref: /freebsd/sys/dev/dpaa2/dpaa2_ni.c (revision 17aab35a77a1b1bf02fc85bb8ffadccb0ca5006d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright © 2021-2023 Dmitry Salychev
5  * Copyright © 2022 Mathew McBride
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 /*
31  * The DPAA2 Network Interface (DPNI) driver.
32  *
33  * The DPNI object is a network interface that is configurable to support a wide
34  * range of features from a very basic Ethernet interface up to a
35  * high-functioning network interface. The DPNI supports features that are
36  * expected by standard network stacks, from basic features to offloads.
37  *
38  * DPNIs work with Ethernet traffic, starting with the L2 header. Additional
39  * functions are provided for standard network protocols (L2, L3, L4, etc.).
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/bus.h>
46 #include <sys/rman.h>
47 #include <sys/module.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <sys/mbuf.h>
54 #include <sys/taskqueue.h>
55 #include <sys/sysctl.h>
56 #include <sys/buf_ring.h>
57 #include <sys/smp.h>
58 #include <sys/proc.h>
59 
60 #include <vm/vm.h>
61 #include <vm/pmap.h>
62 
63 #include <machine/bus.h>
64 #include <machine/resource.h>
65 #include <machine/atomic.h>
66 #include <machine/vmparam.h>
67 
68 #include <net/ethernet.h>
69 #include <net/bpf.h>
70 #include <net/if.h>
71 #include <net/if_dl.h>
72 #include <net/if_media.h>
73 #include <net/if_types.h>
74 #include <net/if_var.h>
75 
76 #include <dev/pci/pcivar.h>
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
79 #include <dev/mdio/mdio.h>
80 
81 #include "opt_acpi.h"
82 #include "opt_platform.h"
83 
84 #include "pcib_if.h"
85 #include "pci_if.h"
86 #include "miibus_if.h"
87 #include "memac_mdio_if.h"
88 
89 #include "dpaa2_types.h"
90 #include "dpaa2_mc.h"
91 #include "dpaa2_mc_if.h"
92 #include "dpaa2_mcp.h"
93 #include "dpaa2_swp.h"
94 #include "dpaa2_swp_if.h"
95 #include "dpaa2_cmd_if.h"
96 #include "dpaa2_ni.h"
97 #include "dpaa2_channel.h"
98 #include "dpaa2_buf.h"
99 
100 #define BIT(x)			(1ul << (x))
101 #define WRIOP_VERSION(x, y, z)	((x) << 10 | (y) << 5 | (z) << 0)
102 #define ARRAY_SIZE(a)		(sizeof(a) / sizeof((a)[0]))
103 
104 /* Frame Dequeue Response status bits. */
105 #define IS_NULL_RESPONSE(stat)	((((stat) >> 4) & 1) == 0)
106 
107 #define	ALIGN_UP(x, y)		roundup2((x), (y))
108 #define	ALIGN_DOWN(x, y)	rounddown2((x), (y))
109 #define CACHE_LINE_ALIGN(x)	ALIGN_UP((x), CACHE_LINE_SIZE)
110 
111 #define DPNI_LOCK(__sc) do {			\
112 	mtx_assert(&(__sc)->lock, MA_NOTOWNED);	\
113 	mtx_lock(&(__sc)->lock);		\
114 } while (0)
115 #define	DPNI_UNLOCK(__sc) do {			\
116 	mtx_assert(&(__sc)->lock, MA_OWNED);	\
117 	mtx_unlock(&(__sc)->lock);		\
118 } while (0)
119 #define	DPNI_LOCK_ASSERT(__sc) do {		\
120 	mtx_assert(&(__sc)->lock, MA_OWNED);	\
121 } while (0)
122 
123 #define DPAA2_TX_RING(sc, chan, tc) \
124 	(&(sc)->channels[(chan)]->txc_queue.tx_rings[(tc)])
125 
126 MALLOC_DEFINE(M_DPAA2_TXB, "dpaa2_txb", "DPAA2 DMA-mapped buffer (Tx)");
127 
128 /*
129  * How many times channel cleanup routine will be repeated if the RX or TX
130  * budget was depleted.
131  */
132 #define DPAA2_CLEAN_BUDGET	64 /* sysctl(9)? */
133 /* TX/RX budget for the channel cleanup task */
134 #define DPAA2_TX_BUDGET		128 /* sysctl(9)? */
135 #define DPAA2_RX_BUDGET		256 /* sysctl(9)? */
136 
137 #define DPNI_IRQ_INDEX		0 /* Index of the only DPNI IRQ. */
138 #define DPNI_IRQ_LINK_CHANGED	1 /* Link state changed */
139 #define DPNI_IRQ_EP_CHANGED	2 /* DPAA2 endpoint dis/connected */
140 
141 /* Default maximum RX frame length w/o CRC. */
142 #define	DPAA2_ETH_MFL		(ETHER_MAX_LEN_JUMBO + ETHER_VLAN_ENCAP_LEN - \
143     ETHER_CRC_LEN)
144 
145 /* Minimally supported version of the DPNI API. */
146 #define DPNI_VER_MAJOR		7
147 #define DPNI_VER_MINOR		0
148 
149 /* Rx/Tx buffers configuration. */
150 #define BUF_ALIGN_V1		256 /* WRIOP v1.0.0 limitation */
151 #define BUF_ALIGN		64
152 #define BUF_SWA_SIZE		64  /* SW annotation size */
153 #define BUF_RX_HWA_SIZE		64  /* HW annotation size */
154 #define BUF_TX_HWA_SIZE		128 /* HW annotation size */
155 
156 #define DPAA2_RX_BUFRING_SZ	(4096u)
157 #define DPAA2_RXE_BUFRING_SZ	(1024u)
158 #define DPAA2_TXC_BUFRING_SZ	(4096u)
159 #define DPAA2_TX_SEGLIMIT	(16u) /* arbitrary number */
160 #define DPAA2_TX_SEG_SZ		(PAGE_SIZE)
161 #define DPAA2_TX_SEGS_MAXSZ	(DPAA2_TX_SEGLIMIT * DPAA2_TX_SEG_SZ)
162 #define DPAA2_TX_SGT_SZ		(PAGE_SIZE) /* bytes */
163 
164 /* Size of a buffer to keep a QoS table key configuration. */
165 #define ETH_QOS_KCFG_BUF_SIZE	(PAGE_SIZE)
166 
167 /* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */
168 #define DPAA2_CLASSIFIER_DMA_SIZE (PAGE_SIZE)
169 
170 /* Buffers layout options. */
171 #define BUF_LOPT_TIMESTAMP	0x1
172 #define BUF_LOPT_PARSER_RESULT	0x2
173 #define BUF_LOPT_FRAME_STATUS	0x4
174 #define BUF_LOPT_PRIV_DATA_SZ	0x8
175 #define BUF_LOPT_DATA_ALIGN	0x10
176 #define BUF_LOPT_DATA_HEAD_ROOM	0x20
177 #define BUF_LOPT_DATA_TAIL_ROOM	0x40
178 
179 #define DPAA2_NI_BUF_ADDR_MASK	(0x1FFFFFFFFFFFFul) /* 49-bit addresses max. */
180 #define DPAA2_NI_BUF_CHAN_MASK	(0xFu)
181 #define DPAA2_NI_BUF_CHAN_SHIFT	(60)
182 #define DPAA2_NI_BUF_IDX_MASK	(0x7FFFu)
183 #define DPAA2_NI_BUF_IDX_SHIFT	(49)
184 #define DPAA2_NI_TX_IDX_MASK	(0x7u)
185 #define DPAA2_NI_TX_IDX_SHIFT	(57)
186 #define DPAA2_NI_TXBUF_IDX_MASK	(0xFFu)
187 #define DPAA2_NI_TXBUF_IDX_SHIFT (49)
188 
189 #define DPAA2_NI_FD_FMT_MASK	(0x3u)
190 #define DPAA2_NI_FD_FMT_SHIFT	(12)
191 #define DPAA2_NI_FD_ERR_MASK	(0xFFu)
192 #define DPAA2_NI_FD_ERR_SHIFT	(0)
193 #define DPAA2_NI_FD_SL_MASK	(0x1u)
194 #define DPAA2_NI_FD_SL_SHIFT	(14)
195 #define DPAA2_NI_FD_LEN_MASK	(0x3FFFFu)
196 #define DPAA2_NI_FD_OFFSET_MASK (0x0FFFu)
197 
198 /* Enables TCAM for Flow Steering and QoS look-ups. */
199 #define DPNI_OPT_HAS_KEY_MASKING 0x10
200 
201 /* Unique IDs for the supported Rx classification header fields. */
202 #define DPAA2_ETH_DIST_ETHDST	BIT(0)
203 #define DPAA2_ETH_DIST_ETHSRC	BIT(1)
204 #define DPAA2_ETH_DIST_ETHTYPE	BIT(2)
205 #define DPAA2_ETH_DIST_VLAN	BIT(3)
206 #define DPAA2_ETH_DIST_IPSRC	BIT(4)
207 #define DPAA2_ETH_DIST_IPDST	BIT(5)
208 #define DPAA2_ETH_DIST_IPPROTO	BIT(6)
209 #define DPAA2_ETH_DIST_L4SRC	BIT(7)
210 #define DPAA2_ETH_DIST_L4DST	BIT(8)
211 #define DPAA2_ETH_DIST_ALL	(~0ULL)
212 
213 /* L3-L4 network traffic flow hash options. */
214 #define	RXH_L2DA		(1 << 1)
215 #define	RXH_VLAN		(1 << 2)
216 #define	RXH_L3_PROTO		(1 << 3)
217 #define	RXH_IP_SRC		(1 << 4)
218 #define	RXH_IP_DST		(1 << 5)
219 #define	RXH_L4_B_0_1		(1 << 6) /* src port in case of TCP/UDP/SCTP */
220 #define	RXH_L4_B_2_3		(1 << 7) /* dst port in case of TCP/UDP/SCTP */
221 #define	RXH_DISCARD		(1 << 31)
222 
223 /* Default Rx hash options, set during attaching. */
224 #define DPAA2_RXH_DEFAULT	(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
225 
226 MALLOC_DEFINE(M_DPAA2_NI, "dpaa2_ni", "DPAA2 Network Interface");
227 
228 /*
229  * DPAA2 Network Interface resource specification.
230  *
231  * NOTE: Don't forget to update macros in dpaa2_ni.h in case of any changes in
232  *       the specification!
233  */
234 struct resource_spec dpaa2_ni_spec[] = {
235 	/*
236 	 * DPMCP resources.
237 	 *
238 	 * NOTE: MC command portals (MCPs) are used to send commands to, and
239 	 *	 receive responses from, the MC firmware. One portal per DPNI.
240 	 */
241 	{ DPAA2_DEV_MCP, DPAA2_NI_MCP_RID(0),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
242 	/*
243 	 * DPIO resources (software portals).
244 	 *
245 	 * NOTE: One per running core. While DPIOs are the source of data
246 	 *	 availability interrupts, the DPCONs are used to identify the
247 	 *	 network interface that has produced ingress data to that core.
248 	 */
249 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(0),    RF_ACTIVE | RF_SHAREABLE },
250 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(1),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
251 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(2),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
252 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(3),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
253 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(4),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
254 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(5),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
255 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(6),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
256 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(7),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
257 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(8),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
258 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(9),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
259 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(10),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
260 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(11),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
261 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(12),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
262 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(13),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
263 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(14),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
264 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(15),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
265 	/*
266 	 * DPBP resources (buffer pools).
267 	 *
268 	 * NOTE: One per network interface.
269 	 */
270 	{ DPAA2_DEV_BP,  DPAA2_NI_BP_RID(0),   RF_ACTIVE },
271 	/*
272 	 * DPCON resources (channels).
273 	 *
274 	 * NOTE: One DPCON per core where Rx or Tx confirmation traffic to be
275 	 *	 distributed to.
276 	 * NOTE: Since it is necessary to distinguish between traffic from
277 	 *	 different network interfaces arriving on the same core, the
278 	 *	 DPCONs must be private to the DPNIs.
279 	 */
280 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(0),   RF_ACTIVE },
281 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(1),   RF_ACTIVE | RF_OPTIONAL },
282 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(2),   RF_ACTIVE | RF_OPTIONAL },
283 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(3),   RF_ACTIVE | RF_OPTIONAL },
284 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(4),   RF_ACTIVE | RF_OPTIONAL },
285 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(5),   RF_ACTIVE | RF_OPTIONAL },
286 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(6),   RF_ACTIVE | RF_OPTIONAL },
287 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(7),   RF_ACTIVE | RF_OPTIONAL },
288 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(8),   RF_ACTIVE | RF_OPTIONAL },
289 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(9),   RF_ACTIVE | RF_OPTIONAL },
290 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(10),  RF_ACTIVE | RF_OPTIONAL },
291 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(11),  RF_ACTIVE | RF_OPTIONAL },
292 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(12),  RF_ACTIVE | RF_OPTIONAL },
293 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(13),  RF_ACTIVE | RF_OPTIONAL },
294 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(14),  RF_ACTIVE | RF_OPTIONAL },
295 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(15),  RF_ACTIVE | RF_OPTIONAL },
296 
297 	RESOURCE_SPEC_END
298 };
299 
300 /* Supported header fields for Rx hash distribution key */
301 static const struct dpaa2_eth_dist_fields dist_fields[] = {
302 	{
303 		/* L2 header */
304 		.rxnfc_field = RXH_L2DA,
305 		.cls_prot = NET_PROT_ETH,
306 		.cls_field = NH_FLD_ETH_DA,
307 		.id = DPAA2_ETH_DIST_ETHDST,
308 		.size = 6,
309 	}, {
310 		.cls_prot = NET_PROT_ETH,
311 		.cls_field = NH_FLD_ETH_SA,
312 		.id = DPAA2_ETH_DIST_ETHSRC,
313 		.size = 6,
314 	}, {
315 		/* This is the last ethertype field parsed:
316 		 * depending on frame format, it can be the MAC ethertype
317 		 * or the VLAN etype.
318 		 */
319 		.cls_prot = NET_PROT_ETH,
320 		.cls_field = NH_FLD_ETH_TYPE,
321 		.id = DPAA2_ETH_DIST_ETHTYPE,
322 		.size = 2,
323 	}, {
324 		/* VLAN header */
325 		.rxnfc_field = RXH_VLAN,
326 		.cls_prot = NET_PROT_VLAN,
327 		.cls_field = NH_FLD_VLAN_TCI,
328 		.id = DPAA2_ETH_DIST_VLAN,
329 		.size = 2,
330 	}, {
331 		/* IP header */
332 		.rxnfc_field = RXH_IP_SRC,
333 		.cls_prot = NET_PROT_IP,
334 		.cls_field = NH_FLD_IP_SRC,
335 		.id = DPAA2_ETH_DIST_IPSRC,
336 		.size = 4,
337 	}, {
338 		.rxnfc_field = RXH_IP_DST,
339 		.cls_prot = NET_PROT_IP,
340 		.cls_field = NH_FLD_IP_DST,
341 		.id = DPAA2_ETH_DIST_IPDST,
342 		.size = 4,
343 	}, {
344 		.rxnfc_field = RXH_L3_PROTO,
345 		.cls_prot = NET_PROT_IP,
346 		.cls_field = NH_FLD_IP_PROTO,
347 		.id = DPAA2_ETH_DIST_IPPROTO,
348 		.size = 1,
349 	}, {
350 		/* Using UDP ports, this is functionally equivalent to raw
351 		 * byte pairs from L4 header.
352 		 */
353 		.rxnfc_field = RXH_L4_B_0_1,
354 		.cls_prot = NET_PROT_UDP,
355 		.cls_field = NH_FLD_UDP_PORT_SRC,
356 		.id = DPAA2_ETH_DIST_L4SRC,
357 		.size = 2,
358 	}, {
359 		.rxnfc_field = RXH_L4_B_2_3,
360 		.cls_prot = NET_PROT_UDP,
361 		.cls_field = NH_FLD_UDP_PORT_DST,
362 		.id = DPAA2_ETH_DIST_L4DST,
363 		.size = 2,
364 	},
365 };
366 
367 static struct dpni_stat {
368 	int	 page;
369 	int	 cnt;
370 	char	*name;
371 	char	*desc;
372 } dpni_stat_sysctls[DPAA2_NI_STAT_SYSCTLS] = {
373 	/* PAGE, COUNTER, NAME, DESCRIPTION */
374 	{  0, 0, "in_all_frames",	"All accepted ingress frames" },
375 	{  0, 1, "in_all_bytes",	"Bytes in all accepted ingress frames" },
376 	{  0, 2, "in_multi_frames",	"Multicast accepted ingress frames" },
377 	{  1, 0, "eg_all_frames",	"All egress frames transmitted" },
378 	{  1, 1, "eg_all_bytes",	"Bytes in all frames transmitted" },
379 	{  1, 2, "eg_multi_frames",	"Multicast egress frames transmitted" },
380 	{  2, 0, "in_filtered_frames",	"All ingress frames discarded due to "
381 	   				"filtering" },
382 	{  2, 1, "in_discarded_frames",	"All frames discarded due to errors" },
383 	{  2, 2, "in_nobuf_discards",	"Discards on ingress side due to buffer "
384 	   				"depletion in DPNI buffer pools" },
385 };
386 
387 struct dpaa2_ni_rx_ctx {
388 	struct mbuf	*head;
389 	struct mbuf	*tail;
390 	int		 cnt;
391 	bool		 last;
392 };
393 
394 /* Device interface */
395 static int dpaa2_ni_probe(device_t);
396 static int dpaa2_ni_attach(device_t);
397 static int dpaa2_ni_detach(device_t);
398 
399 /* DPAA2 network interface setup and configuration */
400 static int dpaa2_ni_setup(device_t);
401 static int dpaa2_ni_setup_channels(device_t);
402 static int dpaa2_ni_bind(device_t);
403 static int dpaa2_ni_setup_rx_dist(device_t);
404 static int dpaa2_ni_setup_irqs(device_t);
405 static int dpaa2_ni_setup_msi(struct dpaa2_ni_softc *);
406 static int dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *);
407 static int dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *);
408 static int dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *);
409 static int dpaa2_ni_setup_dma(struct dpaa2_ni_softc *);
410 
411 /* Tx/Rx flow configuration */
412 static int dpaa2_ni_setup_rx_flow(device_t, struct dpaa2_ni_fq *);
413 static int dpaa2_ni_setup_tx_flow(device_t, struct dpaa2_ni_fq *);
414 static int dpaa2_ni_setup_rx_err_flow(device_t, struct dpaa2_ni_fq *);
415 
416 /* Configuration subroutines */
417 static int dpaa2_ni_set_buf_layout(device_t);
418 static int dpaa2_ni_set_pause_frame(device_t);
419 static int dpaa2_ni_set_qos_table(device_t);
420 static int dpaa2_ni_set_mac_addr(device_t);
421 static int dpaa2_ni_set_hash(device_t, uint64_t);
422 static int dpaa2_ni_set_dist_key(device_t, enum dpaa2_ni_dist_mode, uint64_t);
423 
424 /* Frame descriptor routines */
425 static int dpaa2_ni_build_fd(struct dpaa2_ni_softc *, struct dpaa2_ni_tx_ring *,
426     struct dpaa2_buf *, bus_dma_segment_t *, int, struct dpaa2_fd *);
427 static int dpaa2_ni_fd_err(struct dpaa2_fd *);
428 static uint32_t dpaa2_ni_fd_data_len(struct dpaa2_fd *);
429 static int dpaa2_ni_fd_format(struct dpaa2_fd *);
430 static bool dpaa2_ni_fd_short_len(struct dpaa2_fd *);
431 static int dpaa2_ni_fd_offset(struct dpaa2_fd *);
432 
433 /* Various subroutines */
434 static int dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *, uint16_t, uint16_t);
435 static int dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *, uint8_t *);
436 
437 /* Network interface routines */
438 static void dpaa2_ni_init(void *);
439 static int  dpaa2_ni_transmit(if_t , struct mbuf *);
440 static void dpaa2_ni_qflush(if_t );
441 static int  dpaa2_ni_ioctl(if_t , u_long, caddr_t);
442 static int  dpaa2_ni_update_mac_filters(if_t );
443 static u_int dpaa2_ni_add_maddr(void *, struct sockaddr_dl *, u_int);
444 
445 /* Interrupt handlers */
446 static void dpaa2_ni_intr(void *);
447 
448 /* MII handlers */
449 static void dpaa2_ni_miibus_statchg(device_t);
450 static int  dpaa2_ni_media_change(if_t );
451 static void dpaa2_ni_media_status(if_t , struct ifmediareq *);
452 static void dpaa2_ni_media_tick(void *);
453 
454 /* Tx/Rx routines. */
455 static int dpaa2_ni_rx_cleanup(struct dpaa2_channel *);
456 static int dpaa2_ni_tx_cleanup(struct dpaa2_channel *);
457 static void dpaa2_ni_tx(struct dpaa2_ni_softc *, struct dpaa2_channel *,
458     struct dpaa2_ni_tx_ring *, struct mbuf *);
459 static void dpaa2_ni_cleanup_task(void *, int);
460 
461 /* Tx/Rx subroutines */
462 static int dpaa2_ni_consume_frames(struct dpaa2_channel *, struct dpaa2_ni_fq **,
463     uint32_t *);
464 static int dpaa2_ni_rx(struct dpaa2_channel *, struct dpaa2_ni_fq *,
465     struct dpaa2_fd *, struct dpaa2_ni_rx_ctx *);
466 static int dpaa2_ni_rx_err(struct dpaa2_channel *, struct dpaa2_ni_fq *,
467     struct dpaa2_fd *);
468 static int dpaa2_ni_tx_conf(struct dpaa2_channel *, struct dpaa2_ni_fq *,
469     struct dpaa2_fd *);
470 
471 /* sysctl(9) */
472 static int dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS);
473 static int dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS);
474 static int dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS);
475 
476 static int
477 dpaa2_ni_probe(device_t dev)
478 {
479 	/* DPNI device will be added by a parent resource container itself. */
480 	device_set_desc(dev, "DPAA2 Network Interface");
481 	return (BUS_PROBE_DEFAULT);
482 }
483 
484 static int
485 dpaa2_ni_attach(device_t dev)
486 {
487 	device_t pdev = device_get_parent(dev);
488 	device_t child = dev;
489 	device_t mcp_dev;
490 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
491 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
492 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
493 	struct dpaa2_devinfo *mcp_dinfo;
494 	struct dpaa2_cmd cmd;
495 	uint16_t rc_token, ni_token;
496 	if_t ifp;
497 	char tq_name[32];
498 	int error;
499 
500 	sc->dev = dev;
501 	sc->ifp = NULL;
502 	sc->miibus = NULL;
503 	sc->mii = NULL;
504 	sc->media_status = 0;
505 	sc->if_flags = 0;
506 	sc->link_state = LINK_STATE_UNKNOWN;
507 	sc->buf_align = 0;
508 
509 	/* For debug purposes only! */
510 	sc->rx_anomaly_frames = 0;
511 	sc->rx_single_buf_frames = 0;
512 	sc->rx_sg_buf_frames = 0;
513 	sc->rx_enq_rej_frames = 0;
514 	sc->rx_ieoi_err_frames = 0;
515 	sc->tx_single_buf_frames = 0;
516 	sc->tx_sg_frames = 0;
517 
518 	DPAA2_ATOMIC_XCHG(&sc->buf_num, 0);
519 	DPAA2_ATOMIC_XCHG(&sc->buf_free, 0);
520 
521 	sc->rxd_dmat = NULL;
522 	sc->qos_dmat = NULL;
523 
524 	sc->qos_kcfg.dmap = NULL;
525 	sc->qos_kcfg.paddr = 0;
526 	sc->qos_kcfg.vaddr = NULL;
527 
528 	sc->rxd_kcfg.dmap = NULL;
529 	sc->rxd_kcfg.paddr = 0;
530 	sc->rxd_kcfg.vaddr = NULL;
531 
532 	sc->mac.dpmac_id = 0;
533 	sc->mac.phy_dev = NULL;
534 	memset(sc->mac.addr, 0, ETHER_ADDR_LEN);
535 
536 	error = bus_alloc_resources(sc->dev, dpaa2_ni_spec, sc->res);
537 	if (error) {
538 		device_printf(dev, "%s: failed to allocate resources: "
539 		    "error=%d\n", __func__, error);
540 		goto err_exit;
541 	}
542 
543 	/* Obtain MC portal. */
544 	mcp_dev = (device_t) rman_get_start(sc->res[DPAA2_NI_MCP_RID(0)]);
545 	mcp_dinfo = device_get_ivars(mcp_dev);
546 	dinfo->portal = mcp_dinfo->portal;
547 
548 	mtx_init(&sc->lock, device_get_nameunit(dev), "dpaa2_ni", MTX_DEF);
549 
550 	/* Allocate network interface */
551 	ifp = if_alloc(IFT_ETHER);
552 	sc->ifp = ifp;
553 	if_initname(ifp, DPAA2_NI_IFNAME, device_get_unit(sc->dev));
554 
555 	if_setsoftc(ifp, sc);
556 	if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST);
557 	if_setinitfn(ifp, dpaa2_ni_init);
558 	if_setioctlfn(ifp, dpaa2_ni_ioctl);
559 	if_settransmitfn(ifp, dpaa2_ni_transmit);
560 	if_setqflushfn(ifp, dpaa2_ni_qflush);
561 
562 	if_setcapabilities(ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU);
563 	if_setcapenable(ifp, if_getcapabilities(ifp));
564 
565 	DPAA2_CMD_INIT(&cmd);
566 
567 	/* Open resource container and network interface object. */
568 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
569 	if (error) {
570 		device_printf(dev, "%s: failed to open resource container: "
571 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
572 		goto err_exit;
573 	}
574 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
575 	if (error) {
576 		device_printf(dev, "%s: failed to open network interface: "
577 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
578 		goto close_rc;
579 	}
580 
581 	bzero(tq_name, sizeof(tq_name));
582 	snprintf(tq_name, sizeof(tq_name), "%s_tqbp", device_get_nameunit(dev));
583 
584 	/*
585 	 * XXX-DSL: Release new buffers on Buffer Pool State Change Notification
586 	 *          (BPSCN) returned as a result to the VDQ command instead.
587 	 *          It is similar to CDAN processed in dpaa2_io_intr().
588 	 */
589 	/* Create a taskqueue thread to release new buffers to the pool. */
590 	sc->bp_taskq = taskqueue_create(tq_name, M_WAITOK,
591 	    taskqueue_thread_enqueue, &sc->bp_taskq);
592 	taskqueue_start_threads(&sc->bp_taskq, 1, PI_NET, "%s", tq_name);
593 
594 	/* sc->cleanup_taskq = taskqueue_create("dpaa2_ch cleanup", M_WAITOK, */
595 	/*     taskqueue_thread_enqueue, &sc->cleanup_taskq); */
596 	/* taskqueue_start_threads(&sc->cleanup_taskq, 1, PI_NET, */
597 	/*     "dpaa2_ch cleanup"); */
598 
599 	error = dpaa2_ni_setup(dev);
600 	if (error) {
601 		device_printf(dev, "%s: failed to setup DPNI: error=%d\n",
602 		    __func__, error);
603 		goto close_ni;
604 	}
605 	error = dpaa2_ni_setup_channels(dev);
606 	if (error) {
607 		device_printf(dev, "%s: failed to setup QBMan channels: "
608 		    "error=%d\n", __func__, error);
609 		goto close_ni;
610 	}
611 
612 	error = dpaa2_ni_bind(dev);
613 	if (error) {
614 		device_printf(dev, "%s: failed to bind DPNI: error=%d\n",
615 		    __func__, error);
616 		goto close_ni;
617 	}
618 	error = dpaa2_ni_setup_irqs(dev);
619 	if (error) {
620 		device_printf(dev, "%s: failed to setup IRQs: error=%d\n",
621 		    __func__, error);
622 		goto close_ni;
623 	}
624 	error = dpaa2_ni_setup_sysctls(sc);
625 	if (error) {
626 		device_printf(dev, "%s: failed to setup sysctls: error=%d\n",
627 		    __func__, error);
628 		goto close_ni;
629 	}
630 
631 	ether_ifattach(sc->ifp, sc->mac.addr);
632 	callout_init(&sc->mii_callout, 0);
633 
634 	return (0);
635 
636 close_ni:
637 	DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
638 close_rc:
639 	DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
640 err_exit:
641 	return (ENXIO);
642 }
643 
644 static void
645 dpaa2_ni_fixed_media_status(if_t ifp, struct ifmediareq* ifmr)
646 {
647 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
648 
649 	DPNI_LOCK(sc);
650 	ifmr->ifm_count = 0;
651 	ifmr->ifm_mask = 0;
652 	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
653 	ifmr->ifm_current = ifmr->ifm_active =
654 	    sc->fixed_ifmedia.ifm_cur->ifm_media;
655 
656 	/*
657 	 * In non-PHY usecases, we need to signal link state up, otherwise
658 	 * certain things requiring a link event (e.g async DHCP client) from
659 	 * devd do not happen.
660 	 */
661 	if (if_getlinkstate(ifp) == LINK_STATE_UNKNOWN) {
662 		if_link_state_change(ifp, LINK_STATE_UP);
663 	}
664 
665 	/*
666 	 * TODO: Check the status of the link partner (DPMAC, DPNI or other) and
667 	 * reset if down. This is different to the DPAA2_MAC_LINK_TYPE_PHY as
668 	 * the MC firmware sets the status, instead of us telling the MC what
669 	 * it is.
670 	 */
671 	DPNI_UNLOCK(sc);
672 
673 	return;
674 }
675 
676 static void
677 dpaa2_ni_setup_fixed_link(struct dpaa2_ni_softc *sc)
678 {
679 	/*
680 	 * FIXME: When the DPNI is connected to a DPMAC, we can get the
681 	 * 'apparent' speed from it.
682 	 */
683 	sc->fixed_link = true;
684 
685 	ifmedia_init(&sc->fixed_ifmedia, 0, dpaa2_ni_media_change,
686 		     dpaa2_ni_fixed_media_status);
687 	ifmedia_add(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL);
688 	ifmedia_set(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T);
689 }
690 
691 static int
692 dpaa2_ni_detach(device_t dev)
693 {
694 	/* TBD */
695 	return (0);
696 }
697 
698 /**
699  * @brief Configure DPAA2 network interface object.
700  */
701 static int
702 dpaa2_ni_setup(device_t dev)
703 {
704 	device_t pdev = device_get_parent(dev);
705 	device_t child = dev;
706 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
707 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
708 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
709 	struct dpaa2_ep_desc ep1_desc, ep2_desc; /* endpoint descriptors */
710 	struct dpaa2_cmd cmd;
711 	uint8_t eth_bca[ETHER_ADDR_LEN]; /* broadcast physical address */
712 	uint16_t rc_token, ni_token, mac_token;
713 	struct dpaa2_mac_attr attr;
714 	enum dpaa2_mac_link_type link_type;
715 	uint32_t link;
716 	int error;
717 
718 	DPAA2_CMD_INIT(&cmd);
719 
720 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
721 	if (error) {
722 		device_printf(dev, "%s: failed to open resource container: "
723 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
724 		goto err_exit;
725 	}
726 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
727 	if (error) {
728 		device_printf(dev, "%s: failed to open network interface: "
729 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
730 		goto close_rc;
731 	}
732 
733 	/* Check if we can work with this DPNI object. */
734 	error = DPAA2_CMD_NI_GET_API_VERSION(dev, child, &cmd, &sc->api_major,
735 	    &sc->api_minor);
736 	if (error) {
737 		device_printf(dev, "%s: failed to get DPNI API version\n",
738 		    __func__);
739 		goto close_ni;
740 	}
741 	if (dpaa2_ni_cmp_api_version(sc, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
742 		device_printf(dev, "%s: DPNI API version %u.%u not supported, "
743 		    "need >= %u.%u\n", __func__, sc->api_major, sc->api_minor,
744 		    DPNI_VER_MAJOR, DPNI_VER_MINOR);
745 		error = ENODEV;
746 		goto close_ni;
747 	}
748 
749 	/* Reset the DPNI object. */
750 	error = DPAA2_CMD_NI_RESET(dev, child, &cmd);
751 	if (error) {
752 		device_printf(dev, "%s: failed to reset DPNI: id=%d\n",
753 		    __func__, dinfo->id);
754 		goto close_ni;
755 	}
756 
757 	/* Obtain attributes of the DPNI object. */
758 	error = DPAA2_CMD_NI_GET_ATTRIBUTES(dev, child, &cmd, &sc->attr);
759 	if (error) {
760 		device_printf(dev, "%s: failed to obtain DPNI attributes: "
761 		    "id=%d\n", __func__, dinfo->id);
762 		goto close_ni;
763 	}
764 	if (bootverbose) {
765 		device_printf(dev, "\toptions=0x%#x queues=%d tx_channels=%d "
766 		    "wriop_version=%#x\n", sc->attr.options, sc->attr.num.queues,
767 		    sc->attr.num.channels, sc->attr.wriop_ver);
768 		device_printf(dev, "\ttraffic classes: rx=%d tx=%d "
769 		    "cgs_groups=%d\n", sc->attr.num.rx_tcs, sc->attr.num.tx_tcs,
770 		    sc->attr.num.cgs);
771 		device_printf(dev, "\ttable entries: mac=%d vlan=%d qos=%d "
772 		    "fs=%d\n", sc->attr.entries.mac, sc->attr.entries.vlan,
773 		    sc->attr.entries.qos, sc->attr.entries.fs);
774 		device_printf(dev, "\tkey sizes: qos=%d fs=%d\n",
775 		    sc->attr.key_size.qos, sc->attr.key_size.fs);
776 	}
777 
778 	/* Configure buffer layouts of the DPNI queues. */
779 	error = dpaa2_ni_set_buf_layout(dev);
780 	if (error) {
781 		device_printf(dev, "%s: failed to configure buffer layout\n",
782 		    __func__);
783 		goto close_ni;
784 	}
785 
786 	/* Configure DMA resources. */
787 	error = dpaa2_ni_setup_dma(sc);
788 	if (error) {
789 		device_printf(dev, "%s: failed to setup DMA\n", __func__);
790 		goto close_ni;
791 	}
792 
793 	/* Setup link between DPNI and an object it's connected to. */
794 	ep1_desc.obj_id = dinfo->id;
795 	ep1_desc.if_id = 0; /* DPNI has the only endpoint */
796 	ep1_desc.type = dinfo->dtype;
797 
798 	error = DPAA2_CMD_RC_GET_CONN(dev, child, DPAA2_CMD_TK(&cmd, rc_token),
799 	    &ep1_desc, &ep2_desc, &link);
800 	if (error) {
801 		device_printf(dev, "%s: failed to obtain an object DPNI is "
802 		    "connected to: error=%d\n", __func__, error);
803 	} else {
804 		device_printf(dev, "connected to %s (id=%d)\n",
805 		    dpaa2_ttos(ep2_desc.type), ep2_desc.obj_id);
806 
807 		error = dpaa2_ni_set_mac_addr(dev);
808 		if (error) {
809 			device_printf(dev, "%s: failed to set MAC address: "
810 			    "error=%d\n", __func__, error);
811 		}
812 
813 		if (ep2_desc.type == DPAA2_DEV_MAC) {
814 			/*
815 			 * This is the simplest case when DPNI is connected to
816 			 * DPMAC directly.
817 			 */
818 			sc->mac.dpmac_id = ep2_desc.obj_id;
819 
820 			link_type = DPAA2_MAC_LINK_TYPE_NONE;
821 
822 			/*
823 			 * Need to determine if DPMAC type is PHY (attached to
824 			 * conventional MII PHY) or FIXED (usually SFP/SerDes,
825 			 * link state managed by MC firmware).
826 			 */
827 			error = DPAA2_CMD_MAC_OPEN(sc->dev, child,
828 			    DPAA2_CMD_TK(&cmd, rc_token), sc->mac.dpmac_id,
829 			    &mac_token);
830 			/*
831 			 * Under VFIO, the DPMAC might be sitting in another
832 			 * container (DPRC) we don't have access to.
833 			 * Assume DPAA2_MAC_LINK_TYPE_FIXED if this is
834 			 * the case.
835 			 */
836 			if (error) {
837 				device_printf(dev, "%s: failed to open "
838 				    "connected DPMAC: %d (assuming in other DPRC)\n", __func__,
839 				    sc->mac.dpmac_id);
840 				link_type = DPAA2_MAC_LINK_TYPE_FIXED;
841 			} else {
842 				error = DPAA2_CMD_MAC_GET_ATTRIBUTES(dev, child,
843 				    &cmd, &attr);
844 				if (error) {
845 					device_printf(dev, "%s: failed to get "
846 					    "DPMAC attributes: id=%d, "
847 					    "error=%d\n", __func__, dinfo->id,
848 					    error);
849 				} else {
850 					link_type = attr.link_type;
851 				}
852 			}
853 			DPAA2_CMD_MAC_CLOSE(dev, child, &cmd);
854 
855 			if (link_type == DPAA2_MAC_LINK_TYPE_FIXED) {
856 				device_printf(dev, "connected DPMAC is in FIXED "
857 				    "mode\n");
858 				dpaa2_ni_setup_fixed_link(sc);
859 			} else if (link_type == DPAA2_MAC_LINK_TYPE_PHY) {
860 				device_printf(dev, "connected DPMAC is in PHY "
861 				    "mode\n");
862 				error = DPAA2_MC_GET_PHY_DEV(dev,
863 				    &sc->mac.phy_dev, sc->mac.dpmac_id);
864 				if (error == 0) {
865 					error = MEMAC_MDIO_SET_NI_DEV(
866 					    sc->mac.phy_dev, dev);
867 					if (error != 0) {
868 						device_printf(dev, "%s: failed "
869 						    "to set dpni dev on memac "
870 						    "mdio dev %s: error=%d\n",
871 						    __func__,
872 						    device_get_nameunit(
873 						    sc->mac.phy_dev), error);
874 					}
875 				}
876 				if (error == 0) {
877 					error = MEMAC_MDIO_GET_PHY_LOC(
878 					    sc->mac.phy_dev, &sc->mac.phy_loc);
879 					if (error == ENODEV) {
880 						error = 0;
881 					}
882 					if (error != 0) {
883 						device_printf(dev, "%s: failed "
884 						    "to get phy location from "
885 						    "memac mdio dev %s: error=%d\n",
886 						    __func__, device_get_nameunit(
887 						    sc->mac.phy_dev), error);
888 					}
889 				}
890 				if (error == 0) {
891 					error = mii_attach(sc->mac.phy_dev,
892 					    &sc->miibus, sc->ifp,
893 					    dpaa2_ni_media_change,
894 					    dpaa2_ni_media_status,
895 					    BMSR_DEFCAPMASK, sc->mac.phy_loc,
896 					    MII_OFFSET_ANY, 0);
897 					if (error != 0) {
898 						device_printf(dev, "%s: failed "
899 						    "to attach to miibus: "
900 						    "error=%d\n",
901 						    __func__, error);
902 					}
903 				}
904 				if (error == 0) {
905 					sc->mii = device_get_softc(sc->miibus);
906 				}
907 			} else {
908 				device_printf(dev, "%s: DPMAC link type is not "
909 				    "supported\n", __func__);
910 			}
911 		} else if (ep2_desc.type == DPAA2_DEV_NI ||
912 			   ep2_desc.type == DPAA2_DEV_MUX ||
913 			   ep2_desc.type == DPAA2_DEV_SW) {
914 			dpaa2_ni_setup_fixed_link(sc);
915 		}
916 	}
917 
918 	/* Select mode to enqueue frames. */
919 	/* ... TBD ... */
920 
921 	/*
922 	 * Update link configuration to enable Rx/Tx pause frames support.
923 	 *
924 	 * NOTE: MC may generate an interrupt to the DPMAC and request changes
925 	 *       in link configuration. It might be necessary to attach miibus
926 	 *       and PHY before this point.
927 	 */
928 	error = dpaa2_ni_set_pause_frame(dev);
929 	if (error) {
930 		device_printf(dev, "%s: failed to configure Rx/Tx pause "
931 		    "frames\n", __func__);
932 		goto close_ni;
933 	}
934 
935 	/* Configure ingress traffic classification. */
936 	error = dpaa2_ni_set_qos_table(dev);
937 	if (error) {
938 		device_printf(dev, "%s: failed to configure QoS table: "
939 		    "error=%d\n", __func__, error);
940 		goto close_ni;
941 	}
942 
943 	/* Add broadcast physical address to the MAC filtering table. */
944 	memset(eth_bca, 0xff, ETHER_ADDR_LEN);
945 	error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, DPAA2_CMD_TK(&cmd,
946 	    ni_token), eth_bca);
947 	if (error) {
948 		device_printf(dev, "%s: failed to add broadcast physical "
949 		    "address to the MAC filtering table\n", __func__);
950 		goto close_ni;
951 	}
952 
953 	/* Set the maximum allowed length for received frames. */
954 	error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd, DPAA2_ETH_MFL);
955 	if (error) {
956 		device_printf(dev, "%s: failed to set maximum length for "
957 		    "received frames\n", __func__);
958 		goto close_ni;
959 	}
960 
961 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
962 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
963 	return (0);
964 
965 close_ni:
966 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
967 close_rc:
968 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
969 err_exit:
970 	return (error);
971 }
972 
973 /**
974  * @brief Сonfigure QBMan channels and register data availability notifications.
975  */
976 static int
977 dpaa2_ni_setup_channels(device_t dev)
978 {
979 	device_t iodev, condev, bpdev;
980 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
981 	uint32_t i, num_chan;
982 	int error;
983 
984 	/* Calculate number of the channels based on the allocated resources */
985 	for (i = 0; i < DPAA2_NI_IO_RES_NUM; i++) {
986 		if (!sc->res[DPAA2_NI_IO_RID(i)]) {
987 			break;
988 		}
989 	}
990 	num_chan = i;
991 	for (i = 0; i < DPAA2_NI_CON_RES_NUM; i++) {
992 		if (!sc->res[DPAA2_NI_CON_RID(i)]) {
993 			break;
994 		}
995 	}
996 	num_chan = i < num_chan ? i : num_chan;
997 	sc->chan_n = num_chan > DPAA2_MAX_CHANNELS
998 	    ? DPAA2_MAX_CHANNELS : num_chan;
999 	sc->chan_n = sc->chan_n > sc->attr.num.queues
1000 	    ? sc->attr.num.queues : sc->chan_n;
1001 
1002 	KASSERT(sc->chan_n > 0u, ("%s: positive number of channels expected: "
1003 	    "chan_n=%d", __func__, sc->chan_n));
1004 
1005 	device_printf(dev, "channels=%d\n", sc->chan_n);
1006 
1007 	for (i = 0; i < sc->chan_n; i++) {
1008 		iodev = (device_t)rman_get_start(sc->res[DPAA2_NI_IO_RID(i)]);
1009 		condev = (device_t)rman_get_start(sc->res[DPAA2_NI_CON_RID(i)]);
1010 		/* Only one buffer pool available at the moment */
1011 		bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
1012 
1013 		error = dpaa2_chan_setup(dev, iodev, condev, bpdev,
1014 		    &sc->channels[i], i, dpaa2_ni_cleanup_task);
1015 		if (error != 0) {
1016 			device_printf(dev, "%s: dpaa2_chan_setup() failed: "
1017 			    "error=%d, chan_id=%d\n", __func__, error, i);
1018 			return (error);
1019 		}
1020 	}
1021 
1022 	/* There is exactly one Rx error queue per network interface */
1023 	error = dpaa2_chan_setup_fq(dev, sc->channels[0], DPAA2_NI_QUEUE_RX_ERR);
1024 	if (error != 0) {
1025 		device_printf(dev, "%s: failed to prepare RxError queue: "
1026 		    "error=%d\n", __func__, error);
1027 		return (error);
1028 	}
1029 
1030 	return (0);
1031 }
1032 
1033 /**
1034  * @brief Bind DPNI to DPBPs, DPIOs, frame queues and channels.
1035  */
1036 static int
1037 dpaa2_ni_bind(device_t dev)
1038 {
1039 	device_t pdev = device_get_parent(dev);
1040 	device_t child = dev;
1041 	device_t bp_dev;
1042 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1043 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1044 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1045 	struct dpaa2_devinfo *bp_info;
1046 	struct dpaa2_cmd cmd;
1047 	struct dpaa2_ni_pools_cfg pools_cfg;
1048 	struct dpaa2_ni_err_cfg err_cfg;
1049 	struct dpaa2_channel *chan;
1050 	uint16_t rc_token, ni_token;
1051 	int error;
1052 
1053 	DPAA2_CMD_INIT(&cmd);
1054 
1055 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1056 	if (error) {
1057 		device_printf(dev, "%s: failed to open resource container: "
1058 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1059 		goto err_exit;
1060 	}
1061 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1062 	if (error) {
1063 		device_printf(dev, "%s: failed to open network interface: "
1064 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1065 		goto close_rc;
1066 	}
1067 
1068 	/* Select buffer pool (only one available at the moment). */
1069 	bp_dev = (device_t) rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
1070 	bp_info = device_get_ivars(bp_dev);
1071 
1072 	/* Configure buffers pool. */
1073 	pools_cfg.pools_num = 1;
1074 	pools_cfg.pools[0].bp_obj_id = bp_info->id;
1075 	pools_cfg.pools[0].backup_flag = 0;
1076 	pools_cfg.pools[0].buf_sz = sc->buf_sz;
1077 	error = DPAA2_CMD_NI_SET_POOLS(dev, child, &cmd, &pools_cfg);
1078 	if (error) {
1079 		device_printf(dev, "%s: failed to set buffer pools\n", __func__);
1080 		goto close_ni;
1081 	}
1082 
1083 	/* Setup ingress traffic distribution. */
1084 	error = dpaa2_ni_setup_rx_dist(dev);
1085 	if (error && error != EOPNOTSUPP) {
1086 		device_printf(dev, "%s: failed to setup ingress traffic "
1087 		    "distribution\n", __func__);
1088 		goto close_ni;
1089 	}
1090 	if (bootverbose && error == EOPNOTSUPP) {
1091 		device_printf(dev, "Ingress traffic distribution not "
1092 		    "supported\n");
1093 	}
1094 
1095 	/* Configure handling of error frames. */
1096 	err_cfg.err_mask = DPAA2_NI_FAS_RX_ERR_MASK;
1097 	err_cfg.set_err_fas = false;
1098 	err_cfg.action = DPAA2_NI_ERR_DISCARD;
1099 	error = DPAA2_CMD_NI_SET_ERR_BEHAVIOR(dev, child, &cmd, &err_cfg);
1100 	if (error) {
1101 		device_printf(dev, "%s: failed to set errors behavior\n",
1102 		    __func__);
1103 		goto close_ni;
1104 	}
1105 
1106 	/* Configure channel queues to generate CDANs. */
1107 	for (uint32_t i = 0; i < sc->chan_n; i++) {
1108 		chan = sc->channels[i];
1109 
1110 		/* Setup Rx flows. */
1111 		for (uint32_t j = 0; j < chan->rxq_n; j++) {
1112 			error = dpaa2_ni_setup_rx_flow(dev, &chan->rx_queues[j]);
1113 			if (error) {
1114 				device_printf(dev, "%s: failed to setup Rx "
1115 				    "flow: error=%d\n", __func__, error);
1116 				goto close_ni;
1117 			}
1118 		}
1119 
1120 		/* Setup Tx flow. */
1121 		error = dpaa2_ni_setup_tx_flow(dev, &chan->txc_queue);
1122 		if (error) {
1123 			device_printf(dev, "%s: failed to setup Tx "
1124 			    "flow: error=%d\n", __func__, error);
1125 			goto close_ni;
1126 		}
1127 	}
1128 
1129 	/* Configure RxError queue to generate CDAN. */
1130 	error = dpaa2_ni_setup_rx_err_flow(dev, &sc->rxe_queue);
1131 	if (error) {
1132 		device_printf(dev, "%s: failed to setup RxError flow: "
1133 		    "error=%d\n", __func__, error);
1134 		goto close_ni;
1135 	}
1136 
1137 	/*
1138 	 * Get the Queuing Destination ID (QDID) that should be used for frame
1139 	 * enqueue operations.
1140 	 */
1141 	error = DPAA2_CMD_NI_GET_QDID(dev, child, &cmd, DPAA2_NI_QUEUE_TX,
1142 	    &sc->tx_qdid);
1143 	if (error) {
1144 		device_printf(dev, "%s: failed to get Tx queuing destination "
1145 		    "ID\n", __func__);
1146 		goto close_ni;
1147 	}
1148 
1149 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1150 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1151 	return (0);
1152 
1153 close_ni:
1154 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1155 close_rc:
1156 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1157 err_exit:
1158 	return (error);
1159 }
1160 
1161 /**
1162  * @brief Setup ingress traffic distribution.
1163  *
1164  * NOTE: Ingress traffic distribution is valid only when DPNI_OPT_NO_FS option
1165  *	 hasn't been set for DPNI and a number of DPNI queues > 1.
1166  */
1167 static int
1168 dpaa2_ni_setup_rx_dist(device_t dev)
1169 {
1170 	/*
1171 	 * Have the interface implicitly distribute traffic based on the default
1172 	 * hash key.
1173 	 */
1174 	return (dpaa2_ni_set_hash(dev, DPAA2_RXH_DEFAULT));
1175 }
1176 
1177 static int
1178 dpaa2_ni_setup_rx_flow(device_t dev, struct dpaa2_ni_fq *fq)
1179 {
1180 	device_t pdev = device_get_parent(dev);
1181 	device_t child = dev;
1182 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1183 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1184 	struct dpaa2_devinfo *con_info;
1185 	struct dpaa2_cmd cmd;
1186 	struct dpaa2_ni_queue_cfg queue_cfg = {0};
1187 	uint16_t rc_token, ni_token;
1188 	int error;
1189 
1190 	DPAA2_CMD_INIT(&cmd);
1191 
1192 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1193 	if (error) {
1194 		device_printf(dev, "%s: failed to open resource container: "
1195 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1196 		goto err_exit;
1197 	}
1198 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1199 	if (error) {
1200 		device_printf(dev, "%s: failed to open network interface: "
1201 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1202 		goto close_rc;
1203 	}
1204 
1205 	/* Obtain DPCON associated with the FQ's channel. */
1206 	con_info = device_get_ivars(fq->chan->con_dev);
1207 
1208 	queue_cfg.type = DPAA2_NI_QUEUE_RX;
1209 	queue_cfg.tc = fq->tc;
1210 	queue_cfg.idx = fq->flowid;
1211 	error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1212 	if (error) {
1213 		device_printf(dev, "%s: failed to obtain Rx queue "
1214 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1215 		    queue_cfg.idx);
1216 		goto close_ni;
1217 	}
1218 
1219 	fq->fqid = queue_cfg.fqid;
1220 
1221 	queue_cfg.dest_id = con_info->id;
1222 	queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1223 	queue_cfg.priority = 1;
1224 	queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1225 	queue_cfg.options =
1226 	    DPAA2_NI_QUEUE_OPT_USER_CTX |
1227 	    DPAA2_NI_QUEUE_OPT_DEST;
1228 	error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
1229 	if (error) {
1230 		device_printf(dev, "%s: failed to update Rx queue "
1231 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1232 		    queue_cfg.idx);
1233 		goto close_ni;
1234 	}
1235 
1236 	if (bootverbose) {
1237 		device_printf(dev, "RX queue idx=%d, tc=%d, chan=%d, fqid=%d, "
1238 		    "user_ctx=%#jx\n", fq->flowid, fq->tc, fq->chan->id,
1239 		    fq->fqid, (uint64_t) fq);
1240 	}
1241 
1242 	(void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
1243 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1244 	return (0);
1245 
1246 close_ni:
1247 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1248 close_rc:
1249 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1250 err_exit:
1251 	return (error);
1252 }
1253 
1254 static int
1255 dpaa2_ni_setup_tx_flow(device_t dev, struct dpaa2_ni_fq *fq)
1256 {
1257 	device_t pdev = device_get_parent(dev);
1258 	device_t child = dev;
1259 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1260 	struct dpaa2_channel *ch = fq->chan;
1261 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1262 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1263 	struct dpaa2_devinfo *con_info;
1264 	struct dpaa2_ni_queue_cfg queue_cfg = {0};
1265 	struct dpaa2_ni_tx_ring *tx;
1266 	struct dpaa2_buf *buf;
1267 	struct dpaa2_cmd cmd;
1268 	uint32_t tx_rings_n = 0;
1269 	uint16_t rc_token, ni_token;
1270 	int error;
1271 
1272 	DPAA2_CMD_INIT(&cmd);
1273 
1274 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1275 	if (error) {
1276 		device_printf(dev, "%s: failed to open resource container: "
1277 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1278 		goto err_exit;
1279 	}
1280 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1281 	if (error) {
1282 		device_printf(dev, "%s: failed to open network interface: "
1283 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1284 		goto close_rc;
1285 	}
1286 
1287 	/* Obtain DPCON associated with the FQ's channel. */
1288 	con_info = device_get_ivars(fq->chan->con_dev);
1289 
1290 	KASSERT(sc->attr.num.tx_tcs <= DPAA2_MAX_TCS,
1291 	    ("%s: too many Tx traffic classes: tx_tcs=%d\n", __func__,
1292 	    sc->attr.num.tx_tcs));
1293 	KASSERT(DPAA2_NI_BUFS_PER_TX <= DPAA2_NI_MAX_BPTX,
1294 	    ("%s: too many Tx buffers (%d): max=%d\n", __func__,
1295 	    DPAA2_NI_BUFS_PER_TX, DPAA2_NI_MAX_BPTX));
1296 
1297 	/* Setup Tx rings. */
1298 	for (int i = 0; i < sc->attr.num.tx_tcs; i++) {
1299 		queue_cfg.type = DPAA2_NI_QUEUE_TX;
1300 		queue_cfg.tc = i;
1301 		queue_cfg.idx = fq->flowid;
1302 		queue_cfg.chan_id = fq->chan->id;
1303 
1304 		error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1305 		if (error) {
1306 			device_printf(dev, "%s: failed to obtain Tx queue "
1307 			    "configuration: tc=%d, flowid=%d\n", __func__,
1308 			    queue_cfg.tc, queue_cfg.idx);
1309 			goto close_ni;
1310 		}
1311 
1312 		tx = &fq->tx_rings[i];
1313 		tx->fq = fq;
1314 		tx->fqid = queue_cfg.fqid;
1315 		tx->txid = tx_rings_n;
1316 
1317 		if (bootverbose) {
1318 			device_printf(dev, "TX queue idx=%d, tc=%d, chan=%d, "
1319 			    "fqid=%d\n", fq->flowid, i, fq->chan->id,
1320 			    queue_cfg.fqid);
1321 		}
1322 
1323 		mtx_init(&tx->lock, "dpaa2_tx_ring", NULL, MTX_DEF);
1324 
1325 		/* Allocate Tx ring buffer. */
1326 		tx->br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF, M_NOWAIT,
1327 		    &tx->lock);
1328 		if (tx->br == NULL) {
1329 			device_printf(dev, "%s: failed to setup Tx ring buffer"
1330 			    " (2) fqid=%d\n", __func__, tx->fqid);
1331 			goto close_ni;
1332 		}
1333 
1334 		/* Configure Tx buffers */
1335 		for (uint64_t j = 0; j < DPAA2_NI_BUFS_PER_TX; j++) {
1336 			buf = malloc(sizeof(struct dpaa2_buf), M_DPAA2_TXB,
1337 			    M_WAITOK);
1338 			/* Keep DMA tag and Tx ring linked to the buffer */
1339 			DPAA2_BUF_INIT_TAGOPT(buf, ch->tx_dmat, tx);
1340 
1341 			buf->sgt = malloc(sizeof(struct dpaa2_buf), M_DPAA2_TXB,
1342 			    M_WAITOK);
1343 			/* Link SGT to DMA tag and back to its Tx buffer */
1344 			DPAA2_BUF_INIT_TAGOPT(buf->sgt, ch->sgt_dmat, buf);
1345 
1346 			error = dpaa2_buf_seed_txb(dev, buf);
1347 
1348 			/* Add Tx buffer to the ring */
1349 			buf_ring_enqueue(tx->br, buf);
1350 		}
1351 
1352 		tx_rings_n++;
1353 	}
1354 
1355 	/* All Tx queues which belong to the same flowid have the same qdbin. */
1356 	fq->tx_qdbin = queue_cfg.qdbin;
1357 
1358 	queue_cfg.type = DPAA2_NI_QUEUE_TX_CONF;
1359 	queue_cfg.tc = 0; /* ignored for TxConf queue */
1360 	queue_cfg.idx = fq->flowid;
1361 	error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1362 	if (error) {
1363 		device_printf(dev, "%s: failed to obtain TxConf queue "
1364 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1365 		    queue_cfg.idx);
1366 		goto close_ni;
1367 	}
1368 
1369 	fq->fqid = queue_cfg.fqid;
1370 
1371 	queue_cfg.dest_id = con_info->id;
1372 	queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1373 	queue_cfg.priority = 0;
1374 	queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1375 	queue_cfg.options =
1376 	    DPAA2_NI_QUEUE_OPT_USER_CTX |
1377 	    DPAA2_NI_QUEUE_OPT_DEST;
1378 	error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
1379 	if (error) {
1380 		device_printf(dev, "%s: failed to update TxConf queue "
1381 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1382 		    queue_cfg.idx);
1383 		goto close_ni;
1384 	}
1385 
1386 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1387 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1388 	return (0);
1389 
1390 close_ni:
1391 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1392 close_rc:
1393 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1394 err_exit:
1395 	return (error);
1396 }
1397 
1398 static int
1399 dpaa2_ni_setup_rx_err_flow(device_t dev, struct dpaa2_ni_fq *fq)
1400 {
1401 	device_t pdev = device_get_parent(dev);
1402 	device_t child = dev;
1403 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1404 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1405 	struct dpaa2_devinfo *con_info;
1406 	struct dpaa2_ni_queue_cfg queue_cfg = {0};
1407 	struct dpaa2_cmd cmd;
1408 	uint16_t rc_token, ni_token;
1409 	int error;
1410 
1411 	DPAA2_CMD_INIT(&cmd);
1412 
1413 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1414 	if (error) {
1415 		device_printf(dev, "%s: failed to open resource container: "
1416 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1417 		goto err_exit;
1418 	}
1419 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1420 	if (error) {
1421 		device_printf(dev, "%s: failed to open network interface: "
1422 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1423 		goto close_rc;
1424 	}
1425 
1426 	/* Obtain DPCON associated with the FQ's channel. */
1427 	con_info = device_get_ivars(fq->chan->con_dev);
1428 
1429 	queue_cfg.type = DPAA2_NI_QUEUE_RX_ERR;
1430 	queue_cfg.tc = fq->tc; /* ignored */
1431 	queue_cfg.idx = fq->flowid; /* ignored */
1432 	error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1433 	if (error) {
1434 		device_printf(dev, "%s: failed to obtain RxErr queue "
1435 		    "configuration\n", __func__);
1436 		goto close_ni;
1437 	}
1438 
1439 	fq->fqid = queue_cfg.fqid;
1440 
1441 	queue_cfg.dest_id = con_info->id;
1442 	queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1443 	queue_cfg.priority = 1;
1444 	queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1445 	queue_cfg.options =
1446 	    DPAA2_NI_QUEUE_OPT_USER_CTX |
1447 	    DPAA2_NI_QUEUE_OPT_DEST;
1448 	error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
1449 	if (error) {
1450 		device_printf(dev, "%s: failed to update RxErr queue "
1451 		    "configuration\n", __func__);
1452 		goto close_ni;
1453 	}
1454 
1455 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1456 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1457 	return (0);
1458 
1459 close_ni:
1460 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1461 close_rc:
1462 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1463 err_exit:
1464 	return (error);
1465 }
1466 
1467 /**
1468  * @brief Configure DPNI object to generate interrupts.
1469  */
1470 static int
1471 dpaa2_ni_setup_irqs(device_t dev)
1472 {
1473 	device_t pdev = device_get_parent(dev);
1474 	device_t child = dev;
1475 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1476 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1477 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1478 	struct dpaa2_cmd cmd;
1479 	uint16_t rc_token, ni_token;
1480 	int error;
1481 
1482 	DPAA2_CMD_INIT(&cmd);
1483 
1484 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1485 	if (error) {
1486 		device_printf(dev, "%s: failed to open resource container: "
1487 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1488 		goto err_exit;
1489 	}
1490 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1491 	if (error) {
1492 		device_printf(dev, "%s: failed to open network interface: "
1493 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1494 		goto close_rc;
1495 	}
1496 
1497 	/* Configure IRQs. */
1498 	error = dpaa2_ni_setup_msi(sc);
1499 	if (error) {
1500 		device_printf(dev, "%s: failed to allocate MSI\n", __func__);
1501 		goto close_ni;
1502 	}
1503 	if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1504 	    &sc->irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) {
1505 		device_printf(dev, "%s: failed to allocate IRQ resource\n",
1506 		    __func__);
1507 		goto close_ni;
1508 	}
1509 	if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
1510 	    NULL, dpaa2_ni_intr, sc, &sc->intr)) {
1511 		device_printf(dev, "%s: failed to setup IRQ resource\n",
1512 		    __func__);
1513 		goto close_ni;
1514 	}
1515 
1516 	error = DPAA2_CMD_NI_SET_IRQ_MASK(dev, child, &cmd, DPNI_IRQ_INDEX,
1517 	    DPNI_IRQ_LINK_CHANGED | DPNI_IRQ_EP_CHANGED);
1518 	if (error) {
1519 		device_printf(dev, "%s: failed to set DPNI IRQ mask\n",
1520 		    __func__);
1521 		goto close_ni;
1522 	}
1523 
1524 	error = DPAA2_CMD_NI_SET_IRQ_ENABLE(dev, child, &cmd, DPNI_IRQ_INDEX,
1525 	    true);
1526 	if (error) {
1527 		device_printf(dev, "%s: failed to enable DPNI IRQ\n", __func__);
1528 		goto close_ni;
1529 	}
1530 
1531 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1532 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1533 	return (0);
1534 
1535 close_ni:
1536 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1537 close_rc:
1538 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1539 err_exit:
1540 	return (error);
1541 }
1542 
1543 /**
1544  * @brief Allocate MSI interrupts for DPNI.
1545  */
1546 static int
1547 dpaa2_ni_setup_msi(struct dpaa2_ni_softc *sc)
1548 {
1549 	int val;
1550 
1551 	val = pci_msi_count(sc->dev);
1552 	if (val < DPAA2_NI_MSI_COUNT)
1553 		device_printf(sc->dev, "MSI: actual=%d, expected=%d\n", val,
1554 		    DPAA2_IO_MSI_COUNT);
1555 	val = MIN(val, DPAA2_NI_MSI_COUNT);
1556 
1557 	if (pci_alloc_msi(sc->dev, &val) != 0)
1558 		return (EINVAL);
1559 
1560 	for (int i = 0; i < val; i++)
1561 		sc->irq_rid[i] = i + 1;
1562 
1563 	return (0);
1564 }
1565 
1566 /**
1567  * @brief Update DPNI according to the updated interface capabilities.
1568  */
1569 static int
1570 dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *sc)
1571 {
1572 	const bool en_rxcsum = if_getcapenable(sc->ifp) & IFCAP_RXCSUM;
1573 	const bool en_txcsum = if_getcapenable(sc->ifp) & IFCAP_TXCSUM;
1574 	device_t pdev = device_get_parent(sc->dev);
1575 	device_t dev = sc->dev;
1576 	device_t child = dev;
1577 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1578 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1579 	struct dpaa2_cmd cmd;
1580 	uint16_t rc_token, ni_token;
1581 	int error;
1582 
1583 	DPAA2_CMD_INIT(&cmd);
1584 
1585 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1586 	if (error) {
1587 		device_printf(dev, "%s: failed to open resource container: "
1588 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1589 		goto err_exit;
1590 	}
1591 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1592 	if (error) {
1593 		device_printf(dev, "%s: failed to open network interface: "
1594 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1595 		goto close_rc;
1596 	}
1597 
1598 	/* Setup checksums validation. */
1599 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1600 	    DPAA2_NI_OFL_RX_L3_CSUM, en_rxcsum);
1601 	if (error) {
1602 		device_printf(dev, "%s: failed to %s L3 checksum validation\n",
1603 		    __func__, en_rxcsum ? "enable" : "disable");
1604 		goto close_ni;
1605 	}
1606 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1607 	    DPAA2_NI_OFL_RX_L4_CSUM, en_rxcsum);
1608 	if (error) {
1609 		device_printf(dev, "%s: failed to %s L4 checksum validation\n",
1610 		    __func__, en_rxcsum ? "enable" : "disable");
1611 		goto close_ni;
1612 	}
1613 
1614 	/* Setup checksums generation. */
1615 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1616 	    DPAA2_NI_OFL_TX_L3_CSUM, en_txcsum);
1617 	if (error) {
1618 		device_printf(dev, "%s: failed to %s L3 checksum generation\n",
1619 		    __func__, en_txcsum ? "enable" : "disable");
1620 		goto close_ni;
1621 	}
1622 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1623 	    DPAA2_NI_OFL_TX_L4_CSUM, en_txcsum);
1624 	if (error) {
1625 		device_printf(dev, "%s: failed to %s L4 checksum generation\n",
1626 		    __func__, en_txcsum ? "enable" : "disable");
1627 		goto close_ni;
1628 	}
1629 
1630 	(void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
1631 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1632 	return (0);
1633 
1634 close_ni:
1635 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1636 close_rc:
1637 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1638 err_exit:
1639 	return (error);
1640 }
1641 
1642 /**
1643  * @brief Update DPNI according to the updated interface flags.
1644  */
1645 static int
1646 dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *sc)
1647 {
1648 	const bool en_promisc = if_getflags(sc->ifp) & IFF_PROMISC;
1649 	const bool en_allmulti = if_getflags(sc->ifp) & IFF_ALLMULTI;
1650 	device_t pdev = device_get_parent(sc->dev);
1651 	device_t dev = sc->dev;
1652 	device_t child = dev;
1653 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1654 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1655 	struct dpaa2_cmd cmd;
1656 	uint16_t rc_token, ni_token;
1657 	int error;
1658 
1659 	DPAA2_CMD_INIT(&cmd);
1660 
1661 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1662 	if (error) {
1663 		device_printf(dev, "%s: failed to open resource container: "
1664 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1665 		goto err_exit;
1666 	}
1667 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1668 	if (error) {
1669 		device_printf(dev, "%s: failed to open network interface: "
1670 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1671 		goto close_rc;
1672 	}
1673 
1674 	error = DPAA2_CMD_NI_SET_MULTI_PROMISC(dev, child, &cmd,
1675 	    en_promisc ? true : en_allmulti);
1676 	if (error) {
1677 		device_printf(dev, "%s: failed to %s multicast promiscuous "
1678 		    "mode\n", __func__, en_allmulti ? "enable" : "disable");
1679 		goto close_ni;
1680 	}
1681 
1682 	error = DPAA2_CMD_NI_SET_UNI_PROMISC(dev, child, &cmd, en_promisc);
1683 	if (error) {
1684 		device_printf(dev, "%s: failed to %s unicast promiscuous mode\n",
1685 		    __func__, en_promisc ? "enable" : "disable");
1686 		goto close_ni;
1687 	}
1688 
1689 	(void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
1690 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1691 	return (0);
1692 
1693 close_ni:
1694 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1695 close_rc:
1696 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1697 err_exit:
1698 	return (error);
1699 }
1700 
1701 static int
1702 dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *sc)
1703 {
1704 	struct sysctl_ctx_list *ctx;
1705 	struct sysctl_oid *node, *node2;
1706 	struct sysctl_oid_list *parent, *parent2;
1707 	char cbuf[128];
1708 	int i;
1709 
1710 	ctx = device_get_sysctl_ctx(sc->dev);
1711 	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1712 
1713 	/* Add DPNI statistics. */
1714 	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
1715 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Statistics");
1716 	parent = SYSCTL_CHILDREN(node);
1717 	for (i = 0; i < DPAA2_NI_STAT_SYSCTLS; ++i) {
1718 		SYSCTL_ADD_PROC(ctx, parent, i, dpni_stat_sysctls[i].name,
1719 		    CTLTYPE_U64 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_stats,
1720 		    "IU", dpni_stat_sysctls[i].desc);
1721 	}
1722 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_anomaly_frames",
1723 	    CTLFLAG_RD, &sc->rx_anomaly_frames,
1724 	    "Rx frames in the buffers outside of the buffer pools");
1725 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_single_buf_frames",
1726 	    CTLFLAG_RD, &sc->rx_single_buf_frames,
1727 	    "Rx frames in single buffers");
1728 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_sg_buf_frames",
1729 	    CTLFLAG_RD, &sc->rx_sg_buf_frames,
1730 	    "Rx frames in scatter/gather list");
1731 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_enq_rej_frames",
1732 	    CTLFLAG_RD, &sc->rx_enq_rej_frames,
1733 	    "Enqueue rejected by QMan");
1734 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_ieoi_err_frames",
1735 	    CTLFLAG_RD, &sc->rx_ieoi_err_frames,
1736 	    "QMan IEOI error");
1737 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_single_buf_frames",
1738 	    CTLFLAG_RD, &sc->tx_single_buf_frames,
1739 	    "Tx single buffer frames");
1740 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_sg_frames",
1741 	    CTLFLAG_RD, &sc->tx_sg_frames,
1742 	    "Tx S/G frames");
1743 
1744 	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_num",
1745 	    CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_num,
1746 	    "IU", "number of Rx buffers in the buffer pool");
1747 	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_free",
1748 	    CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_free,
1749 	    "IU", "number of free Rx buffers in the buffer pool");
1750 
1751  	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1752 
1753 	/* Add channels statistics. */
1754 	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "channels",
1755 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channels");
1756 	parent = SYSCTL_CHILDREN(node);
1757 	for (int i = 0; i < sc->chan_n; i++) {
1758 		snprintf(cbuf, sizeof(cbuf), "%d", i);
1759 
1760 		node2 = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, cbuf,
1761 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channel");
1762 		parent2 = SYSCTL_CHILDREN(node2);
1763 
1764 		SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_frames",
1765 		    CTLFLAG_RD, &sc->channels[i]->tx_frames,
1766 		    "Tx frames counter");
1767 		SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_dropped",
1768 		    CTLFLAG_RD, &sc->channels[i]->tx_dropped,
1769 		    "Tx dropped counter");
1770 	}
1771 
1772 	return (0);
1773 }
1774 
1775 static int
1776 dpaa2_ni_setup_dma(struct dpaa2_ni_softc *sc)
1777 {
1778 	device_t dev = sc->dev;
1779 	int error;
1780 
1781 	KASSERT((sc->buf_align == BUF_ALIGN) || (sc->buf_align == BUF_ALIGN_V1),
1782 	    ("unexpected buffer alignment: %d\n", sc->buf_align));
1783 
1784 	/* DMA tag for Rx distribution key. */
1785 	error = bus_dma_tag_create(
1786 	    bus_get_dma_tag(dev),
1787 	    PAGE_SIZE, 0,		/* alignment, boundary */
1788 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
1789 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
1790 	    NULL, NULL,			/* filter, filterarg */
1791 	    DPAA2_CLASSIFIER_DMA_SIZE, 1, /* maxsize, nsegments */
1792 	    DPAA2_CLASSIFIER_DMA_SIZE, 0, /* maxsegsize, flags */
1793 	    NULL, NULL,			/* lockfunc, lockarg */
1794 	    &sc->rxd_dmat);
1795 	if (error) {
1796 		device_printf(dev, "%s: failed to create DMA tag for Rx "
1797 		    "distribution key\n", __func__);
1798 		return (error);
1799 	}
1800 
1801 	error = bus_dma_tag_create(
1802 	    bus_get_dma_tag(dev),
1803 	    PAGE_SIZE, 0,		/* alignment, boundary */
1804 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
1805 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
1806 	    NULL, NULL,			/* filter, filterarg */
1807 	    ETH_QOS_KCFG_BUF_SIZE, 1,	/* maxsize, nsegments */
1808 	    ETH_QOS_KCFG_BUF_SIZE, 0,	/* maxsegsize, flags */
1809 	    NULL, NULL,			/* lockfunc, lockarg */
1810 	    &sc->qos_dmat);
1811 	if (error) {
1812 		device_printf(dev, "%s: failed to create DMA tag for QoS key\n",
1813 		    __func__);
1814 		return (error);
1815 	}
1816 
1817 	return (0);
1818 }
1819 
1820 /**
1821  * @brief Configure buffer layouts of the different DPNI queues.
1822  */
1823 static int
1824 dpaa2_ni_set_buf_layout(device_t dev)
1825 {
1826 	device_t pdev = device_get_parent(dev);
1827 	device_t child = dev;
1828 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1829 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1830 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1831 	struct dpaa2_ni_buf_layout buf_layout = {0};
1832 	struct dpaa2_cmd cmd;
1833 	uint16_t rc_token, ni_token;
1834 	int error;
1835 
1836 	DPAA2_CMD_INIT(&cmd);
1837 
1838 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1839 	if (error) {
1840 		device_printf(dev, "%s: failed to open resource container: "
1841 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1842 		goto err_exit;
1843 	}
1844 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1845 	if (error) {
1846 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
1847 		    "error=%d\n", __func__, dinfo->id, error);
1848 		goto close_rc;
1849 	}
1850 
1851 	/*
1852 	 * Select Rx/Tx buffer alignment. It's necessary to ensure that the
1853 	 * buffer size seen by WRIOP is a multiple of 64 or 256 bytes depending
1854 	 * on the WRIOP version.
1855 	 */
1856 	sc->buf_align = (sc->attr.wriop_ver == WRIOP_VERSION(0, 0, 0) ||
1857 	    sc->attr.wriop_ver == WRIOP_VERSION(1, 0, 0))
1858 	    ? BUF_ALIGN_V1 : BUF_ALIGN;
1859 
1860 	/*
1861 	 * We need to ensure that the buffer size seen by WRIOP is a multiple
1862 	 * of 64 or 256 bytes depending on the WRIOP version.
1863 	 */
1864 	sc->buf_sz = ALIGN_DOWN(DPAA2_RX_BUF_SIZE, sc->buf_align);
1865 
1866 	if (bootverbose) {
1867 		device_printf(dev, "Rx/Tx buffers: size=%d, alignment=%d\n",
1868 		    sc->buf_sz, sc->buf_align);
1869 	}
1870 
1871 	/*
1872 	 *    Frame Descriptor       Tx buffer layout
1873 	 *
1874 	 *                ADDR -> |---------------------|
1875 	 *                        | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes
1876 	 *                        |---------------------|
1877 	 *                        | HW FRAME ANNOTATION | BUF_TX_HWA_SIZE bytes
1878 	 *                        |---------------------|
1879 	 *                        |    DATA HEADROOM    |
1880 	 *       ADDR + OFFSET -> |---------------------|
1881 	 *                        |                     |
1882 	 *                        |                     |
1883 	 *                        |     FRAME DATA      |
1884 	 *                        |                     |
1885 	 *                        |                     |
1886 	 *                        |---------------------|
1887 	 *                        |    DATA TAILROOM    |
1888 	 *                        |---------------------|
1889 	 *
1890 	 * NOTE: It's for a single buffer frame only.
1891 	 */
1892 	buf_layout.queue_type = DPAA2_NI_QUEUE_TX;
1893 	buf_layout.pd_size = BUF_SWA_SIZE;
1894 	buf_layout.pass_timestamp = true;
1895 	buf_layout.pass_frame_status = true;
1896 	buf_layout.options =
1897 	    BUF_LOPT_PRIV_DATA_SZ |
1898 	    BUF_LOPT_TIMESTAMP | /* requires 128 bytes in HWA */
1899 	    BUF_LOPT_FRAME_STATUS;
1900 	error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
1901 	if (error) {
1902 		device_printf(dev, "%s: failed to set Tx buffer layout\n",
1903 		    __func__);
1904 		goto close_ni;
1905 	}
1906 
1907 	/* Tx-confirmation buffer layout */
1908 	buf_layout.queue_type = DPAA2_NI_QUEUE_TX_CONF;
1909 	buf_layout.options =
1910 	    BUF_LOPT_TIMESTAMP |
1911 	    BUF_LOPT_FRAME_STATUS;
1912 	error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
1913 	if (error) {
1914 		device_printf(dev, "%s: failed to set TxConf buffer layout\n",
1915 		    __func__);
1916 		goto close_ni;
1917 	}
1918 
1919 	/*
1920 	 * Driver should reserve the amount of space indicated by this command
1921 	 * as headroom in all Tx frames.
1922 	 */
1923 	error = DPAA2_CMD_NI_GET_TX_DATA_OFF(dev, child, &cmd, &sc->tx_data_off);
1924 	if (error) {
1925 		device_printf(dev, "%s: failed to obtain Tx data offset\n",
1926 		    __func__);
1927 		goto close_ni;
1928 	}
1929 
1930 	if (bootverbose) {
1931 		device_printf(dev, "Tx data offset=%d\n", sc->tx_data_off);
1932 	}
1933 	if ((sc->tx_data_off % 64) != 0) {
1934 		device_printf(dev, "Tx data offset (%d) is not a multiplication "
1935 		    "of 64 bytes\n", sc->tx_data_off);
1936 	}
1937 
1938 	/*
1939 	 *    Frame Descriptor       Rx buffer layout
1940 	 *
1941 	 *                ADDR -> |---------------------|
1942 	 *                        | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes
1943 	 *                        |---------------------|
1944 	 *                        | HW FRAME ANNOTATION | BUF_RX_HWA_SIZE bytes
1945 	 *                        |---------------------|
1946 	 *                        |    DATA HEADROOM    | OFFSET-BUF_RX_HWA_SIZE
1947 	 *       ADDR + OFFSET -> |---------------------|
1948 	 *                        |                     |
1949 	 *                        |                     |
1950 	 *                        |     FRAME DATA      |
1951 	 *                        |                     |
1952 	 *                        |                     |
1953 	 *                        |---------------------|
1954 	 *                        |    DATA TAILROOM    | 0 bytes
1955 	 *                        |---------------------|
1956 	 *
1957 	 * NOTE: It's for a single buffer frame only.
1958 	 */
1959 	buf_layout.queue_type = DPAA2_NI_QUEUE_RX;
1960 	buf_layout.pd_size = BUF_SWA_SIZE;
1961 	buf_layout.fd_align = sc->buf_align;
1962 	buf_layout.head_size = sc->tx_data_off - BUF_RX_HWA_SIZE - BUF_SWA_SIZE;
1963 	buf_layout.tail_size = 0;
1964 	buf_layout.pass_frame_status = true;
1965 	buf_layout.pass_parser_result = true;
1966 	buf_layout.pass_timestamp = true;
1967 	buf_layout.options =
1968 	    BUF_LOPT_PRIV_DATA_SZ |
1969 	    BUF_LOPT_DATA_ALIGN |
1970 	    BUF_LOPT_DATA_HEAD_ROOM |
1971 	    BUF_LOPT_DATA_TAIL_ROOM |
1972 	    BUF_LOPT_FRAME_STATUS |
1973 	    BUF_LOPT_PARSER_RESULT |
1974 	    BUF_LOPT_TIMESTAMP;
1975 	error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
1976 	if (error) {
1977 		device_printf(dev, "%s: failed to set Rx buffer layout\n",
1978 		    __func__);
1979 		goto close_ni;
1980 	}
1981 
1982 	error = 0;
1983 close_ni:
1984 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1985 close_rc:
1986 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1987 err_exit:
1988 	return (error);
1989 }
1990 
1991 /**
1992  * @brief Enable Rx/Tx pause frames.
1993  *
1994  * NOTE: DPNI stops sending when a pause frame is received (Rx frame) or DPNI
1995  *       itself generates pause frames (Tx frame).
1996  */
1997 static int
1998 dpaa2_ni_set_pause_frame(device_t dev)
1999 {
2000 	device_t pdev = device_get_parent(dev);
2001 	device_t child = dev;
2002 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2003 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2004 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2005 	struct dpaa2_ni_link_cfg link_cfg = {0};
2006 	struct dpaa2_cmd cmd;
2007 	uint16_t rc_token, ni_token;
2008 	int error;
2009 
2010 	DPAA2_CMD_INIT(&cmd);
2011 
2012 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2013 	if (error) {
2014 		device_printf(dev, "%s: failed to open resource container: "
2015 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2016 		goto err_exit;
2017 	}
2018 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2019 	if (error) {
2020 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2021 		    "error=%d\n", __func__, dinfo->id, error);
2022 		goto close_rc;
2023 	}
2024 
2025 	error = DPAA2_CMD_NI_GET_LINK_CFG(dev, child, &cmd, &link_cfg);
2026 	if (error) {
2027 		device_printf(dev, "%s: failed to obtain link configuration: "
2028 		    "error=%d\n", __func__, error);
2029 		goto close_ni;
2030 	}
2031 
2032 	/* Enable both Rx and Tx pause frames by default. */
2033 	link_cfg.options |= DPAA2_NI_LINK_OPT_PAUSE;
2034 	link_cfg.options &= ~DPAA2_NI_LINK_OPT_ASYM_PAUSE;
2035 
2036 	error = DPAA2_CMD_NI_SET_LINK_CFG(dev, child, &cmd, &link_cfg);
2037 	if (error) {
2038 		device_printf(dev, "%s: failed to set link configuration: "
2039 		    "error=%d\n", __func__, error);
2040 		goto close_ni;
2041 	}
2042 
2043 	sc->link_options = link_cfg.options;
2044 	error = 0;
2045 close_ni:
2046 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2047 close_rc:
2048 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2049 err_exit:
2050 	return (error);
2051 }
2052 
2053 /**
2054  * @brief Configure QoS table to determine the traffic class for the received
2055  * frame.
2056  */
2057 static int
2058 dpaa2_ni_set_qos_table(device_t dev)
2059 {
2060 	device_t pdev = device_get_parent(dev);
2061 	device_t child = dev;
2062 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2063 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2064 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2065 	struct dpaa2_ni_qos_table tbl;
2066 	struct dpaa2_buf *buf = &sc->qos_kcfg;
2067 	struct dpaa2_cmd cmd;
2068 	uint16_t rc_token, ni_token;
2069 	int error;
2070 
2071 	if (sc->attr.num.rx_tcs == 1 ||
2072 	    !(sc->attr.options & DPNI_OPT_HAS_KEY_MASKING)) {
2073 		if (bootverbose) {
2074 			device_printf(dev, "Ingress traffic classification is "
2075 			    "not supported\n");
2076 		}
2077 		return (0);
2078 	}
2079 
2080 	/*
2081 	 * Allocate a buffer visible to the device to hold the QoS table key
2082 	 * configuration.
2083 	 */
2084 
2085 	if (__predict_true(buf->dmat == NULL)) {
2086 		buf->dmat = sc->qos_dmat;
2087 	}
2088 
2089 	error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr,
2090 	    BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap);
2091 	if (error) {
2092 		device_printf(dev, "%s: failed to allocate a buffer for QoS key "
2093 		    "configuration\n", __func__);
2094 		goto err_exit;
2095 	}
2096 
2097 	error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr,
2098 	    ETH_QOS_KCFG_BUF_SIZE, dpaa2_dmamap_oneseg_cb, &buf->paddr,
2099 	    BUS_DMA_NOWAIT);
2100 	if (error) {
2101 		device_printf(dev, "%s: failed to map QoS key configuration "
2102 		    "buffer into bus space\n", __func__);
2103 		goto err_exit;
2104 	}
2105 
2106 	DPAA2_CMD_INIT(&cmd);
2107 
2108 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2109 	if (error) {
2110 		device_printf(dev, "%s: failed to open resource container: "
2111 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2112 		goto err_exit;
2113 	}
2114 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2115 	if (error) {
2116 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2117 		    "error=%d\n", __func__, dinfo->id, error);
2118 		goto close_rc;
2119 	}
2120 
2121 	tbl.default_tc = 0;
2122 	tbl.discard_on_miss = false;
2123 	tbl.keep_entries = false;
2124 	tbl.kcfg_busaddr = buf->paddr;
2125 	error = DPAA2_CMD_NI_SET_QOS_TABLE(dev, child, &cmd, &tbl);
2126 	if (error) {
2127 		device_printf(dev, "%s: failed to set QoS table\n", __func__);
2128 		goto close_ni;
2129 	}
2130 
2131 	error = DPAA2_CMD_NI_CLEAR_QOS_TABLE(dev, child, &cmd);
2132 	if (error) {
2133 		device_printf(dev, "%s: failed to clear QoS table\n", __func__);
2134 		goto close_ni;
2135 	}
2136 
2137 	error = 0;
2138 close_ni:
2139 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2140 close_rc:
2141 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2142 err_exit:
2143 	return (error);
2144 }
2145 
2146 static int
2147 dpaa2_ni_set_mac_addr(device_t dev)
2148 {
2149 	device_t pdev = device_get_parent(dev);
2150 	device_t child = dev;
2151 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2152 	if_t ifp = sc->ifp;
2153 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2154 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2155 	struct dpaa2_cmd cmd;
2156 	struct ether_addr rnd_mac_addr;
2157 	uint16_t rc_token, ni_token;
2158 	uint8_t mac_addr[ETHER_ADDR_LEN];
2159 	uint8_t dpni_mac_addr[ETHER_ADDR_LEN];
2160 	int error;
2161 
2162 	DPAA2_CMD_INIT(&cmd);
2163 
2164 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2165 	if (error) {
2166 		device_printf(dev, "%s: failed to open resource container: "
2167 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2168 		goto err_exit;
2169 	}
2170 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2171 	if (error) {
2172 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2173 		    "error=%d\n", __func__, dinfo->id, error);
2174 		goto close_rc;
2175 	}
2176 
2177 	/*
2178 	 * Get the MAC address associated with the physical port, if the DPNI is
2179 	 * connected to a DPMAC directly associated with one of the physical
2180 	 * ports.
2181 	 */
2182 	error = DPAA2_CMD_NI_GET_PORT_MAC_ADDR(dev, child, &cmd, mac_addr);
2183 	if (error) {
2184 		device_printf(dev, "%s: failed to obtain the MAC address "
2185 		    "associated with the physical port\n", __func__);
2186 		goto close_ni;
2187 	}
2188 
2189 	/* Get primary MAC address from the DPNI attributes. */
2190 	error = DPAA2_CMD_NI_GET_PRIM_MAC_ADDR(dev, child, &cmd, dpni_mac_addr);
2191 	if (error) {
2192 		device_printf(dev, "%s: failed to obtain primary MAC address\n",
2193 		    __func__);
2194 		goto close_ni;
2195 	}
2196 
2197 	if (!ETHER_IS_ZERO(mac_addr)) {
2198 		/* Set MAC address of the physical port as DPNI's primary one. */
2199 		error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd,
2200 		    mac_addr);
2201 		if (error) {
2202 			device_printf(dev, "%s: failed to set primary MAC "
2203 			    "address\n", __func__);
2204 			goto close_ni;
2205 		}
2206 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2207 			sc->mac.addr[i] = mac_addr[i];
2208 		}
2209 	} else if (ETHER_IS_ZERO(dpni_mac_addr)) {
2210 		/* Generate random MAC address as DPNI's primary one. */
2211 		ether_gen_addr(ifp, &rnd_mac_addr);
2212 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2213 			mac_addr[i] = rnd_mac_addr.octet[i];
2214 		}
2215 
2216 		error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd,
2217 		    mac_addr);
2218 		if (error) {
2219 			device_printf(dev, "%s: failed to set random primary "
2220 			    "MAC address\n", __func__);
2221 			goto close_ni;
2222 		}
2223 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2224 			sc->mac.addr[i] = mac_addr[i];
2225 		}
2226 	} else {
2227 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2228 			sc->mac.addr[i] = dpni_mac_addr[i];
2229 		}
2230 	}
2231 
2232 	error = 0;
2233 close_ni:
2234 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2235 close_rc:
2236 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2237 err_exit:
2238 	return (error);
2239 }
2240 
2241 static void
2242 dpaa2_ni_miibus_statchg(device_t dev)
2243 {
2244 	device_t pdev = device_get_parent(dev);
2245 	device_t child = dev;
2246 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2247 	struct dpaa2_mac_link_state mac_link = { 0 };
2248 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2249 	struct dpaa2_cmd cmd;
2250 	uint16_t rc_token, mac_token;
2251 	int error, link_state;
2252 
2253 	if (sc->fixed_link || sc->mii == NULL) {
2254 		return;
2255 	}
2256 	if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0) {
2257 		/*
2258 		 * We will receive calls and adjust the changes but
2259 		 * not have setup everything (called before dpaa2_ni_init()
2260 		 * really).  This will then setup the link and internal
2261 		 * sc->link_state and not trigger the update once needed,
2262 		 * so basically dpmac never knows about it.
2263 		 */
2264 		return;
2265 	}
2266 
2267 	/*
2268 	 * Note: ifp link state will only be changed AFTER we are called so we
2269 	 * cannot rely on ifp->if_linkstate here.
2270 	 */
2271 	if (sc->mii->mii_media_status & IFM_AVALID) {
2272 		if (sc->mii->mii_media_status & IFM_ACTIVE) {
2273 			link_state = LINK_STATE_UP;
2274 		} else {
2275 			link_state = LINK_STATE_DOWN;
2276 		}
2277 	} else {
2278 		link_state = LINK_STATE_UNKNOWN;
2279 	}
2280 
2281 	if (link_state != sc->link_state) {
2282 		sc->link_state = link_state;
2283 
2284 		DPAA2_CMD_INIT(&cmd);
2285 
2286 		error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
2287 		    &rc_token);
2288 		if (error) {
2289 			device_printf(dev, "%s: failed to open resource "
2290 			    "container: id=%d, error=%d\n", __func__, rcinfo->id,
2291 			    error);
2292 			goto err_exit;
2293 		}
2294 		error = DPAA2_CMD_MAC_OPEN(dev, child, &cmd, sc->mac.dpmac_id,
2295 		    &mac_token);
2296 		if (error) {
2297 			device_printf(sc->dev, "%s: failed to open DPMAC: "
2298 			    "id=%d, error=%d\n", __func__, sc->mac.dpmac_id,
2299 			    error);
2300 			goto close_rc;
2301 		}
2302 
2303 		if (link_state == LINK_STATE_UP ||
2304 		    link_state == LINK_STATE_DOWN) {
2305 			/* Update DPMAC link state. */
2306 			mac_link.supported = sc->mii->mii_media.ifm_media;
2307 			mac_link.advert = sc->mii->mii_media.ifm_media;
2308 			mac_link.rate = 1000; /* TODO: Where to get from? */	/* ifmedia_baudrate? */
2309 			mac_link.options =
2310 			    DPAA2_MAC_LINK_OPT_AUTONEG |
2311 			    DPAA2_MAC_LINK_OPT_PAUSE;
2312 			mac_link.up = (link_state == LINK_STATE_UP) ? true : false;
2313 			mac_link.state_valid = true;
2314 
2315 			/* Inform DPMAC about link state. */
2316 			error = DPAA2_CMD_MAC_SET_LINK_STATE(dev, child, &cmd,
2317 			    &mac_link);
2318 			if (error) {
2319 				device_printf(sc->dev, "%s: failed to set DPMAC "
2320 				    "link state: id=%d, error=%d\n", __func__,
2321 				    sc->mac.dpmac_id, error);
2322 			}
2323 		}
2324 		(void)DPAA2_CMD_MAC_CLOSE(dev, child, &cmd);
2325 		(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2326 		    rc_token));
2327 	}
2328 
2329 	return;
2330 
2331 close_rc:
2332 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2333 err_exit:
2334 	return;
2335 }
2336 
2337 /**
2338  * @brief Callback function to process media change request.
2339  */
2340 static int
2341 dpaa2_ni_media_change_locked(struct dpaa2_ni_softc *sc)
2342 {
2343 
2344 	DPNI_LOCK_ASSERT(sc);
2345 	if (sc->mii) {
2346 		mii_mediachg(sc->mii);
2347 		sc->media_status = sc->mii->mii_media.ifm_media;
2348 	} else if (sc->fixed_link) {
2349 		if_printf(sc->ifp, "%s: can't change media in fixed mode\n",
2350 		    __func__);
2351 	}
2352 
2353 	return (0);
2354 }
2355 
2356 static int
2357 dpaa2_ni_media_change(if_t ifp)
2358 {
2359 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2360 	int error;
2361 
2362 	DPNI_LOCK(sc);
2363 	error = dpaa2_ni_media_change_locked(sc);
2364 	DPNI_UNLOCK(sc);
2365 	return (error);
2366 }
2367 
2368 /**
2369  * @brief Callback function to process media status request.
2370  */
2371 static void
2372 dpaa2_ni_media_status(if_t ifp, struct ifmediareq *ifmr)
2373 {
2374 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2375 
2376 	DPNI_LOCK(sc);
2377 	if (sc->mii) {
2378 		mii_pollstat(sc->mii);
2379 		ifmr->ifm_active = sc->mii->mii_media_active;
2380 		ifmr->ifm_status = sc->mii->mii_media_status;
2381 	}
2382 	DPNI_UNLOCK(sc);
2383 }
2384 
2385 /**
2386  * @brief Callout function to check and update media status.
2387  */
2388 static void
2389 dpaa2_ni_media_tick(void *arg)
2390 {
2391 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2392 
2393 	/* Check for media type change */
2394 	if (sc->mii) {
2395 		mii_tick(sc->mii);
2396 		if (sc->media_status != sc->mii->mii_media.ifm_media) {
2397 			printf("%s: media type changed (ifm_media=%x)\n",
2398 			    __func__, sc->mii->mii_media.ifm_media);
2399 			dpaa2_ni_media_change(sc->ifp);
2400 		}
2401 	}
2402 
2403 	/* Schedule another timeout one second from now */
2404 	callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
2405 }
2406 
2407 static void
2408 dpaa2_ni_init(void *arg)
2409 {
2410 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2411 	if_t ifp = sc->ifp;
2412 	device_t pdev = device_get_parent(sc->dev);
2413 	device_t dev = sc->dev;
2414 	device_t child = dev;
2415 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2416 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2417 	struct dpaa2_cmd cmd;
2418 	uint16_t rc_token, ni_token;
2419 	int error;
2420 
2421 	DPNI_LOCK(sc);
2422 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2423 		DPNI_UNLOCK(sc);
2424 		return;
2425 	}
2426 	DPNI_UNLOCK(sc);
2427 
2428 	DPAA2_CMD_INIT(&cmd);
2429 
2430 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2431 	if (error) {
2432 		device_printf(dev, "%s: failed to open resource container: "
2433 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2434 		goto err_exit;
2435 	}
2436 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2437 	if (error) {
2438 		device_printf(dev, "%s: failed to open network interface: "
2439 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
2440 		goto close_rc;
2441 	}
2442 
2443 	error = DPAA2_CMD_NI_ENABLE(dev, child, &cmd);
2444 	if (error) {
2445 		device_printf(dev, "%s: failed to enable DPNI: error=%d\n",
2446 		    __func__, error);
2447 	}
2448 
2449 	DPNI_LOCK(sc);
2450 	/* Announce we are up and running and can queue packets. */
2451 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2452 
2453 	if (sc->mii) {
2454 		/*
2455 		 * mii_mediachg() will trigger a call into
2456 		 * dpaa2_ni_miibus_statchg() to setup link state.
2457 		 */
2458 		dpaa2_ni_media_change_locked(sc);
2459 	}
2460 	callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
2461 
2462 	DPNI_UNLOCK(sc);
2463 
2464 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2465 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2466 	return;
2467 
2468 close_rc:
2469 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2470 err_exit:
2471 	return;
2472 }
2473 
2474 static int
2475 dpaa2_ni_transmit(if_t ifp, struct mbuf *m)
2476 {
2477 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2478 	struct dpaa2_channel *ch;
2479 	uint32_t fqid;
2480 	bool found = false;
2481 	int chidx = 0, error;
2482 
2483 	if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) {
2484 		return (0);
2485 	}
2486 
2487 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
2488 		fqid = m->m_pkthdr.flowid;
2489 		for (int i = 0; i < sc->chan_n; i++) {
2490 			ch = sc->channels[i];
2491 			for (int j = 0; j < ch->rxq_n; j++) {
2492 				if (fqid == ch->rx_queues[j].fqid) {
2493 					chidx = ch->flowid;
2494 					found = true;
2495 					break;
2496 				}
2497 			}
2498 			if (found) {
2499 				break;
2500 			}
2501 		}
2502 	}
2503 
2504 	ch = sc->channels[chidx];
2505 	error = buf_ring_enqueue(ch->xmit_br, m);
2506 	if (__predict_false(error != 0)) {
2507 		m_freem(m);
2508 	} else {
2509 		taskqueue_enqueue(ch->cleanup_tq, &ch->cleanup_task);
2510 	}
2511 
2512 	return (error);
2513 }
2514 
2515 static void
2516 dpaa2_ni_qflush(if_t ifp)
2517 {
2518 	/* TODO: Find a way to drain Tx queues in QBMan. */
2519 	if_qflush(ifp);
2520 }
2521 
2522 static int
2523 dpaa2_ni_ioctl(if_t ifp, u_long c, caddr_t data)
2524 {
2525 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2526 	struct ifreq *ifr = (struct ifreq *) data;
2527 	device_t pdev = device_get_parent(sc->dev);
2528 	device_t dev = sc->dev;
2529 	device_t child = dev;
2530 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2531 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2532 	struct dpaa2_cmd cmd;
2533 	uint32_t changed = 0;
2534 	uint16_t rc_token, ni_token;
2535 	int mtu, error, rc = 0;
2536 
2537 	DPAA2_CMD_INIT(&cmd);
2538 
2539 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2540 	if (error) {
2541 		device_printf(dev, "%s: failed to open resource container: "
2542 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2543 		goto err_exit;
2544 	}
2545 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2546 	if (error) {
2547 		device_printf(dev, "%s: failed to open network interface: "
2548 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
2549 		goto close_rc;
2550 	}
2551 
2552 	switch (c) {
2553 	case SIOCSIFMTU:
2554 		DPNI_LOCK(sc);
2555 		mtu = ifr->ifr_mtu;
2556 		if (mtu < ETHERMIN || mtu > ETHERMTU_JUMBO) {
2557 			DPNI_UNLOCK(sc);
2558 			error = EINVAL;
2559 			goto close_ni;
2560 		}
2561 		if_setmtu(ifp, mtu);
2562 		DPNI_UNLOCK(sc);
2563 
2564 		/* Update maximum frame length. */
2565 		mtu += ETHER_HDR_LEN;
2566 		if (if_getcapenable(ifp) & IFCAP_VLAN_MTU)
2567 			mtu += ETHER_VLAN_ENCAP_LEN;
2568 		error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd, mtu);
2569 		if (error) {
2570 			device_printf(dev, "%s: failed to update maximum frame "
2571 			    "length: error=%d\n", __func__, error);
2572 			goto close_ni;
2573 		}
2574 		break;
2575 	case SIOCSIFCAP:
2576 		changed = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
2577 		if (changed & IFCAP_HWCSUM) {
2578 			if ((ifr->ifr_reqcap & changed) & IFCAP_HWCSUM) {
2579 				if_setcapenablebit(ifp, IFCAP_HWCSUM, 0);
2580 			} else {
2581 				if_setcapenablebit(ifp, 0, IFCAP_HWCSUM);
2582 			}
2583 		}
2584 		rc = dpaa2_ni_setup_if_caps(sc);
2585 		if (rc) {
2586 			printf("%s: failed to update iface capabilities: "
2587 			    "error=%d\n", __func__, rc);
2588 			rc = ENXIO;
2589 		}
2590 		break;
2591 	case SIOCSIFFLAGS:
2592 		DPNI_LOCK(sc);
2593 		if (if_getflags(ifp) & IFF_UP) {
2594 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2595 				changed = if_getflags(ifp) ^ sc->if_flags;
2596 				if (changed & IFF_PROMISC ||
2597 				    changed & IFF_ALLMULTI) {
2598 					rc = dpaa2_ni_setup_if_flags(sc);
2599 				}
2600 			} else {
2601 				DPNI_UNLOCK(sc);
2602 				dpaa2_ni_init(sc);
2603 				DPNI_LOCK(sc);
2604 			}
2605 		} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2606 			/* FIXME: Disable DPNI. See dpaa2_ni_init(). */
2607 		}
2608 
2609 		sc->if_flags = if_getflags(ifp);
2610 		DPNI_UNLOCK(sc);
2611 		break;
2612 	case SIOCADDMULTI:
2613 	case SIOCDELMULTI:
2614 		DPNI_LOCK(sc);
2615 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2616 			DPNI_UNLOCK(sc);
2617 			rc = dpaa2_ni_update_mac_filters(ifp);
2618 			if (rc) {
2619 				device_printf(dev, "%s: failed to update MAC "
2620 				    "filters: error=%d\n", __func__, rc);
2621 			}
2622 			DPNI_LOCK(sc);
2623 		}
2624 		DPNI_UNLOCK(sc);
2625 		break;
2626 	case SIOCGIFMEDIA:
2627 	case SIOCSIFMEDIA:
2628 		if (sc->mii)
2629 			rc = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, c);
2630 		else if(sc->fixed_link) {
2631 			rc = ifmedia_ioctl(ifp, ifr, &sc->fixed_ifmedia, c);
2632 		}
2633 		break;
2634 	default:
2635 		rc = ether_ioctl(ifp, c, data);
2636 		break;
2637 	}
2638 
2639 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2640 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2641 	return (rc);
2642 
2643 close_ni:
2644 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2645 close_rc:
2646 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2647 err_exit:
2648 	return (error);
2649 }
2650 
2651 static int
2652 dpaa2_ni_update_mac_filters(if_t ifp)
2653 {
2654 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2655 	struct dpaa2_ni_mcaddr_ctx ctx;
2656 	device_t pdev = device_get_parent(sc->dev);
2657 	device_t dev = sc->dev;
2658 	device_t child = dev;
2659 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2660 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2661 	struct dpaa2_cmd cmd;
2662 	uint16_t rc_token, ni_token;
2663 	int error;
2664 
2665 	DPAA2_CMD_INIT(&cmd);
2666 
2667 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2668 	if (error) {
2669 		device_printf(dev, "%s: failed to open resource container: "
2670 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2671 		goto err_exit;
2672 	}
2673 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2674 	if (error) {
2675 		device_printf(dev, "%s: failed to open network interface: "
2676 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
2677 		goto close_rc;
2678 	}
2679 
2680 	/* Remove all multicast MAC filters. */
2681 	error = DPAA2_CMD_NI_CLEAR_MAC_FILTERS(dev, child, &cmd, false, true);
2682 	if (error) {
2683 		device_printf(dev, "%s: failed to clear multicast MAC filters: "
2684 		    "error=%d\n", __func__, error);
2685 		goto close_ni;
2686 	}
2687 
2688 	ctx.ifp = ifp;
2689 	ctx.error = 0;
2690 	ctx.nent = 0;
2691 
2692 	if_foreach_llmaddr(ifp, dpaa2_ni_add_maddr, &ctx);
2693 
2694 	error = ctx.error;
2695 close_ni:
2696 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2697 close_rc:
2698 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2699 err_exit:
2700 	return (error);
2701 }
2702 
2703 static u_int
2704 dpaa2_ni_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2705 {
2706 	struct dpaa2_ni_mcaddr_ctx *ctx = arg;
2707 	struct dpaa2_ni_softc *sc = if_getsoftc(ctx->ifp);
2708 	device_t pdev = device_get_parent(sc->dev);
2709 	device_t dev = sc->dev;
2710 	device_t child = dev;
2711 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2712 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2713 	struct dpaa2_cmd cmd;
2714 	uint16_t rc_token, ni_token;
2715 	int error;
2716 
2717 	if (ctx->error != 0) {
2718 		return (0);
2719 	}
2720 
2721 	if (ETHER_IS_MULTICAST(LLADDR(sdl))) {
2722 		DPAA2_CMD_INIT(&cmd);
2723 
2724 		error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
2725 		    &rc_token);
2726 		if (error) {
2727 			device_printf(dev, "%s: failed to open resource "
2728 			    "container: id=%d, error=%d\n", __func__, rcinfo->id,
2729 			    error);
2730 			return (0);
2731 		}
2732 		error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id,
2733 		    &ni_token);
2734 		if (error) {
2735 			device_printf(dev, "%s: failed to open network interface: "
2736 			    "id=%d, error=%d\n", __func__, dinfo->id, error);
2737 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2738 			    rc_token));
2739 			return (0);
2740 		}
2741 
2742 		ctx->error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, &cmd,
2743 		    LLADDR(sdl));
2744 
2745 		(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2746 		    ni_token));
2747 		(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2748 		    rc_token));
2749 
2750 		if (ctx->error != 0) {
2751 			device_printf(dev, "%s: can't add more then %d MAC "
2752 			    "addresses, switching to the multicast promiscuous "
2753 			    "mode\n", __func__, ctx->nent);
2754 
2755 			/* Enable multicast promiscuous mode. */
2756 			DPNI_LOCK(sc);
2757 			if_setflagbits(ctx->ifp, IFF_ALLMULTI, 0);
2758 			sc->if_flags |= IFF_ALLMULTI;
2759 			ctx->error = dpaa2_ni_setup_if_flags(sc);
2760 			DPNI_UNLOCK(sc);
2761 
2762 			return (0);
2763 		}
2764 		ctx->nent++;
2765 	}
2766 
2767 	return (1);
2768 }
2769 
2770 static void
2771 dpaa2_ni_intr(void *arg)
2772 {
2773 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2774 	device_t pdev = device_get_parent(sc->dev);
2775 	device_t dev = sc->dev;
2776 	device_t child = dev;
2777 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2778 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2779 	struct dpaa2_cmd cmd;
2780 	uint32_t status = ~0u; /* clear all IRQ status bits */
2781 	uint16_t rc_token, ni_token;
2782 	int error;
2783 
2784 	DPAA2_CMD_INIT(&cmd);
2785 
2786 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2787 	if (error) {
2788 		device_printf(dev, "%s: failed to open resource container: "
2789 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2790 		goto err_exit;
2791 	}
2792 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2793 	if (error) {
2794 		device_printf(dev, "%s: failed to open network interface: "
2795 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
2796 		goto close_rc;
2797 	}
2798 
2799 	error = DPAA2_CMD_NI_GET_IRQ_STATUS(dev, child, &cmd, DPNI_IRQ_INDEX,
2800 	    &status);
2801 	if (error) {
2802 		device_printf(sc->dev, "%s: failed to obtain IRQ status: "
2803 		    "error=%d\n", __func__, error);
2804 	}
2805 
2806 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2807 close_rc:
2808 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2809 err_exit:
2810 	return;
2811 }
2812 
2813 /**
2814  * @brief Execute channel's Rx/Tx routines.
2815  *
2816  * NOTE: Should not be re-entrant for the same channel. It is achieved by
2817  *       enqueuing the cleanup routine on a single-threaded taskqueue.
2818  */
2819 static void
2820 dpaa2_ni_cleanup_task(void *arg, int count)
2821 {
2822 	struct dpaa2_channel *ch = (struct dpaa2_channel *)arg;
2823 	struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev);
2824 	int error, rxc, txc;
2825 
2826 	for (int i = 0; i < DPAA2_CLEAN_BUDGET; i++) {
2827 		rxc  = dpaa2_ni_rx_cleanup(ch);
2828 		txc  = dpaa2_ni_tx_cleanup(ch);
2829 
2830 		if (__predict_false((if_getdrvflags(sc->ifp) &
2831 		    IFF_DRV_RUNNING) == 0)) {
2832 			return;
2833 		}
2834 
2835 		if ((txc != DPAA2_TX_BUDGET) && (rxc != DPAA2_RX_BUDGET)) {
2836 			break;
2837 		}
2838 	}
2839 
2840 	/* Re-arm channel to generate CDAN */
2841 	error = DPAA2_SWP_CONF_WQ_CHANNEL(ch->io_dev, &ch->ctx);
2842 	if (error != 0) {
2843 		panic("%s: failed to rearm channel: chan_id=%d, error=%d\n",
2844 		    __func__, ch->id, error);
2845 	}
2846 }
2847 
2848 /**
2849  * @brief Poll frames from a specific channel when CDAN is received.
2850  */
2851 static int
2852 dpaa2_ni_rx_cleanup(struct dpaa2_channel *ch)
2853 {
2854 	struct dpaa2_io_softc *iosc = device_get_softc(ch->io_dev);
2855 	struct dpaa2_swp *swp = iosc->swp;
2856 	struct dpaa2_ni_fq *fq;
2857 	struct dpaa2_buf *buf = &ch->store;
2858 	int budget = DPAA2_RX_BUDGET;
2859 	int error, consumed = 0;
2860 
2861 	do {
2862 		error = dpaa2_swp_pull(swp, ch->id, buf, DPAA2_ETH_STORE_FRAMES);
2863 		if (error) {
2864 			device_printf(ch->ni_dev, "%s: failed to pull frames: "
2865 			    "chan_id=%d, error=%d\n", __func__, ch->id, error);
2866 			break;
2867 		}
2868 		error = dpaa2_ni_consume_frames(ch, &fq, &consumed);
2869 		if (error == ENOENT || error == EALREADY) {
2870 			break;
2871 		}
2872 		if (error == ETIMEDOUT) {
2873 			device_printf(ch->ni_dev, "%s: timeout to consume "
2874 			    "frames: chan_id=%d\n", __func__, ch->id);
2875 		}
2876 	} while (--budget);
2877 
2878 	return (DPAA2_RX_BUDGET - budget);
2879 }
2880 
2881 static int
2882 dpaa2_ni_tx_cleanup(struct dpaa2_channel *ch)
2883 {
2884 	struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev);
2885 	struct dpaa2_ni_tx_ring *tx = &ch->txc_queue.tx_rings[0];
2886 	struct mbuf *m = NULL;
2887 	int budget = DPAA2_TX_BUDGET;
2888 
2889 	do {
2890 		mtx_assert(&ch->xmit_mtx, MA_NOTOWNED);
2891 		mtx_lock(&ch->xmit_mtx);
2892 		m = buf_ring_dequeue_sc(ch->xmit_br);
2893 		mtx_unlock(&ch->xmit_mtx);
2894 
2895 		if (__predict_false(m == NULL)) {
2896 			/* TODO: Do not give up easily */
2897 			break;
2898 		} else {
2899 			dpaa2_ni_tx(sc, ch, tx, m);
2900 		}
2901 	} while (--budget);
2902 
2903 	return (DPAA2_TX_BUDGET - budget);
2904 }
2905 
2906 static void
2907 dpaa2_ni_tx(struct dpaa2_ni_softc *sc, struct dpaa2_channel *ch,
2908     struct dpaa2_ni_tx_ring *tx, struct mbuf *m)
2909 {
2910 	device_t dev = sc->dev;
2911 	struct dpaa2_ni_fq *fq = tx->fq;
2912 	struct dpaa2_buf *buf, *sgt;
2913 	struct dpaa2_fd fd;
2914 	struct mbuf *md;
2915 	bus_dma_segment_t segs[DPAA2_TX_SEGLIMIT];
2916 	int rc, nsegs;
2917 	int error;
2918 
2919 	mtx_assert(&tx->lock, MA_NOTOWNED);
2920 	mtx_lock(&tx->lock);
2921 	buf = buf_ring_dequeue_sc(tx->br);
2922 	mtx_unlock(&tx->lock);
2923 	if (__predict_false(buf == NULL)) {
2924 		/* TODO: Do not give up easily */
2925 		m_freem(m);
2926 		return;
2927 	} else {
2928 		DPAA2_BUF_ASSERT_TXREADY(buf);
2929 		buf->m = m;
2930 		sgt = buf->sgt;
2931 	}
2932 
2933 #if defined(INVARIANTS)
2934 	struct dpaa2_ni_tx_ring *btx = (struct dpaa2_ni_tx_ring *)buf->opt;
2935 	KASSERT(buf->opt == tx, ("%s: unexpected Tx ring", __func__));
2936 	KASSERT(btx->fq->chan == ch, ("%s: unexpected channel", __func__));
2937 #endif /* INVARIANTS */
2938 
2939 	BPF_MTAP(sc->ifp, m);
2940 
2941 	error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, m, segs, &nsegs,
2942 	    BUS_DMA_NOWAIT);
2943 	if (__predict_false(error != 0)) {
2944 		/* Too many fragments, trying to defragment... */
2945 		md = m_collapse(m, M_NOWAIT, DPAA2_TX_SEGLIMIT);
2946 		if (md == NULL) {
2947 			device_printf(dev, "%s: m_collapse() failed\n", __func__);
2948 			fq->chan->tx_dropped++;
2949 			goto err;
2950 		}
2951 
2952 		buf->m = m = md;
2953 		error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, m, segs,
2954 		    &nsegs, BUS_DMA_NOWAIT);
2955 		if (__predict_false(error != 0)) {
2956 			device_printf(dev, "%s: bus_dmamap_load_mbuf_sg() "
2957 			    "failed: error=%d\n", __func__, error);
2958 			fq->chan->tx_dropped++;
2959 			goto err;
2960 		}
2961 	}
2962 
2963 	error = dpaa2_ni_build_fd(sc, tx, buf, segs, nsegs, &fd);
2964 	if (__predict_false(error != 0)) {
2965 		device_printf(dev, "%s: failed to build frame descriptor: "
2966 		    "error=%d\n", __func__, error);
2967 		fq->chan->tx_dropped++;
2968 		goto err_unload;
2969 	}
2970 
2971 	/* TODO: Enqueue several frames in a single command */
2972 	for (int i = 0; i < DPAA2_NI_ENQUEUE_RETRIES; i++) {
2973 		/* TODO: Return error codes instead of # of frames */
2974 		rc = DPAA2_SWP_ENQ_MULTIPLE_FQ(fq->chan->io_dev, tx->fqid, &fd, 1);
2975 		if (rc == 1) {
2976 			break;
2977 		}
2978 	}
2979 
2980 	bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_PREWRITE);
2981 	bus_dmamap_sync(sgt->dmat, sgt->dmap, BUS_DMASYNC_PREWRITE);
2982 
2983 	if (rc != 1) {
2984 		fq->chan->tx_dropped++;
2985 		goto err_unload;
2986 	} else {
2987 		fq->chan->tx_frames++;
2988 	}
2989 	return;
2990 
2991 err_unload:
2992 	bus_dmamap_unload(buf->dmat, buf->dmap);
2993 	if (sgt->paddr != 0) {
2994 		bus_dmamap_unload(sgt->dmat, sgt->dmap);
2995 	}
2996 err:
2997 	m_freem(buf->m);
2998 	buf_ring_enqueue(tx->br, buf);
2999 }
3000 
3001 static int
3002 dpaa2_ni_consume_frames(struct dpaa2_channel *chan, struct dpaa2_ni_fq **src,
3003     uint32_t *consumed)
3004 {
3005 	struct dpaa2_ni_fq *fq = NULL;
3006 	struct dpaa2_dq *dq;
3007 	struct dpaa2_fd *fd;
3008 	struct dpaa2_ni_rx_ctx ctx = {
3009 		.head = NULL,
3010 		.tail = NULL,
3011 		.cnt = 0,
3012 		.last = false
3013 	};
3014 	int rc, frames = 0;
3015 
3016 	do {
3017 		rc = dpaa2_chan_next_frame(chan, &dq);
3018 		if (rc == EINPROGRESS) {
3019 			if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
3020 				fd = &dq->fdr.fd;
3021 				fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
3022 
3023 				switch (fq->type) {
3024 				case DPAA2_NI_QUEUE_RX:
3025 					(void)dpaa2_ni_rx(chan, fq, fd, &ctx);
3026 					break;
3027 				case DPAA2_NI_QUEUE_RX_ERR:
3028 					(void)dpaa2_ni_rx_err(chan, fq, fd);
3029 					break;
3030 				case DPAA2_NI_QUEUE_TX_CONF:
3031 					(void)dpaa2_ni_tx_conf(chan, fq, fd);
3032 					break;
3033 				default:
3034 					panic("%s: unknown queue type (1)",
3035 					    __func__);
3036 				}
3037 				frames++;
3038 			}
3039 		} else if (rc == EALREADY || rc == ENOENT) {
3040 			if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
3041 				fd = &dq->fdr.fd;
3042 				fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
3043 
3044 				switch (fq->type) {
3045 				case DPAA2_NI_QUEUE_RX:
3046 					/*
3047 					 * Last VDQ response (mbuf) in a chain
3048 					 * obtained from the Rx queue.
3049 					 */
3050 					ctx.last = true;
3051 					(void)dpaa2_ni_rx(chan, fq, fd, &ctx);
3052 					break;
3053 				case DPAA2_NI_QUEUE_RX_ERR:
3054 					(void)dpaa2_ni_rx_err(chan, fq, fd);
3055 					break;
3056 				case DPAA2_NI_QUEUE_TX_CONF:
3057 					(void)dpaa2_ni_tx_conf(chan, fq, fd);
3058 					break;
3059 				default:
3060 					panic("%s: unknown queue type (2)",
3061 					    __func__);
3062 				}
3063 				frames++;
3064 			}
3065 			break;
3066 		} else {
3067 			panic("%s: should not reach here: rc=%d", __func__, rc);
3068 		}
3069 	} while (true);
3070 
3071 	KASSERT(chan->store_idx < chan->store_sz, ("%s: store_idx(%d) >= "
3072 	    "store_sz(%d)", __func__, chan->store_idx, chan->store_sz));
3073 
3074 	/*
3075 	 * VDQ operation pulls frames from a single queue into the store.
3076 	 * Return the frame queue and a number of consumed frames as an output.
3077 	 */
3078 	if (src != NULL) {
3079 		*src = fq;
3080 	}
3081 	if (consumed != NULL) {
3082 		*consumed = frames;
3083 	}
3084 
3085 	return (rc);
3086 }
3087 
3088 /**
3089  * @brief Receive frames.
3090  */
3091 static int
3092 dpaa2_ni_rx(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq, struct dpaa2_fd *fd,
3093     struct dpaa2_ni_rx_ctx *ctx)
3094 {
3095 	bus_addr_t paddr = (bus_addr_t)fd->addr;
3096 	struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr);
3097 	struct dpaa2_buf *buf = fa->buf;
3098 	struct dpaa2_channel *bch = (struct dpaa2_channel *)buf->opt;
3099 	struct dpaa2_ni_softc *sc = device_get_softc(bch->ni_dev);
3100 	struct dpaa2_bp_softc *bpsc;
3101 	struct mbuf *m;
3102 	device_t bpdev;
3103 	bus_addr_t released[DPAA2_SWP_BUFS_PER_CMD];
3104 	void *buf_data;
3105 	int buf_len, error, released_n = 0;
3106 
3107 	KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
3108 	/*
3109 	 * NOTE: Current channel might not be the same as the "buffer" channel
3110 	 * and it's fine. It must not be NULL though.
3111 	 */
3112 	KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__));
3113 
3114 	if (__predict_false(paddr != buf->paddr)) {
3115 		panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3116 		    __func__, paddr, buf->paddr);
3117 	}
3118 
3119 	switch (dpaa2_ni_fd_err(fd)) {
3120 	case 1: /* Enqueue rejected by QMan */
3121 		sc->rx_enq_rej_frames++;
3122 		break;
3123 	case 2: /* QMan IEOI error */
3124 		sc->rx_ieoi_err_frames++;
3125 		break;
3126 	default:
3127 		break;
3128 	}
3129 	switch (dpaa2_ni_fd_format(fd)) {
3130 	case DPAA2_FD_SINGLE:
3131 		sc->rx_single_buf_frames++;
3132 		break;
3133 	case DPAA2_FD_SG:
3134 		sc->rx_sg_buf_frames++;
3135 		break;
3136 	default:
3137 		break;
3138 	}
3139 
3140 	mtx_assert(&bch->dma_mtx, MA_NOTOWNED);
3141 	mtx_lock(&bch->dma_mtx);
3142 
3143 	bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_POSTREAD);
3144 	bus_dmamap_unload(buf->dmat, buf->dmap);
3145 	m = buf->m;
3146 	buf_len = dpaa2_ni_fd_data_len(fd);
3147 	buf_data = (uint8_t *)buf->vaddr + dpaa2_ni_fd_offset(fd);
3148 	/* Prepare buffer to be re-cycled */
3149 	buf->m = NULL;
3150 	buf->paddr = 0;
3151 	buf->vaddr = NULL;
3152 	buf->seg.ds_addr = 0;
3153 	buf->seg.ds_len = 0;
3154 	buf->nseg = 0;
3155 
3156 	mtx_unlock(&bch->dma_mtx);
3157 
3158 	m->m_flags |= M_PKTHDR;
3159 	m->m_data = buf_data;
3160 	m->m_len = buf_len;
3161 	m->m_pkthdr.len = buf_len;
3162 	m->m_pkthdr.rcvif = sc->ifp;
3163 	m->m_pkthdr.flowid = fq->fqid;
3164 	M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
3165 
3166 	if (ctx->head == NULL) {
3167 		KASSERT(ctx->tail == NULL, ("%s: tail already given?", __func__));
3168 		ctx->head = m;
3169 		ctx->tail = m;
3170 	} else {
3171 		KASSERT(ctx->head != NULL, ("%s: head is NULL", __func__));
3172 		ctx->tail->m_nextpkt = m;
3173 		ctx->tail = m;
3174 	}
3175 	ctx->cnt++;
3176 
3177 	if (ctx->last) {
3178 		ctx->tail->m_nextpkt = NULL;
3179 		if_input(sc->ifp, ctx->head);
3180 	}
3181 
3182 	/* Keep the buffer to be recycled */
3183 	ch->recycled[ch->recycled_n++] = buf;
3184 
3185 	/* Re-seed and release recycled buffers back to the pool */
3186 	if (ch->recycled_n == DPAA2_SWP_BUFS_PER_CMD) {
3187 		/* Release new buffers to the pool if needed */
3188 		taskqueue_enqueue(sc->bp_taskq, &ch->bp_task);
3189 
3190 		for (int i = 0; i < ch->recycled_n; i++) {
3191 			buf = ch->recycled[i];
3192 			bch = (struct dpaa2_channel *)buf->opt;
3193 
3194 			mtx_assert(&bch->dma_mtx, MA_NOTOWNED);
3195 			mtx_lock(&bch->dma_mtx);
3196 			error = dpaa2_buf_seed_rxb(sc->dev, buf,
3197 			    DPAA2_RX_BUF_SIZE, &bch->dma_mtx);
3198 			mtx_unlock(&bch->dma_mtx);
3199 
3200 			if (__predict_false(error != 0)) {
3201 				/* TODO: What else to do with the buffer? */
3202 				panic("%s: failed to recycle buffer: error=%d",
3203 				    __func__, error);
3204 			}
3205 
3206 			/* Prepare buffer to be released in a single command */
3207 			released[released_n++] = buf->paddr;
3208 		}
3209 
3210 		/* There's only one buffer pool for now */
3211 		bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
3212 		bpsc = device_get_softc(bpdev);
3213 
3214 		error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpsc->attr.bpid,
3215 		    released, released_n);
3216 		if (__predict_false(error != 0)) {
3217 			device_printf(sc->dev, "%s: failed to release buffers "
3218 			    "to the pool: error=%d\n", __func__, error);
3219 			return (error);
3220 		}
3221 		ch->recycled_n = 0;
3222 	}
3223 
3224 	return (0);
3225 }
3226 
3227 /**
3228  * @brief Receive Rx error frames.
3229  */
3230 static int
3231 dpaa2_ni_rx_err(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq,
3232     struct dpaa2_fd *fd)
3233 {
3234 	bus_addr_t paddr = (bus_addr_t)fd->addr;
3235 	struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr);
3236 	struct dpaa2_buf *buf = fa->buf;
3237 	struct dpaa2_channel *bch = (struct dpaa2_channel *)buf->opt;
3238 	struct dpaa2_ni_softc *sc = device_get_softc(bch->ni_dev);
3239 	device_t bpdev;
3240 	struct dpaa2_bp_softc *bpsc;
3241 	int error;
3242 
3243 	KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
3244 	/*
3245 	 * NOTE: Current channel might not be the same as the "buffer" channel
3246 	 * and it's fine. It must not be NULL though.
3247 	 */
3248 	KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__));
3249 
3250 	if (__predict_false(paddr != buf->paddr)) {
3251 		panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3252 		    __func__, paddr, buf->paddr);
3253 	}
3254 
3255 	/* There's only one buffer pool for now */
3256 	bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
3257 	bpsc = device_get_softc(bpdev);
3258 
3259 	/* Release buffer to QBMan buffer pool */
3260 	error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpsc->attr.bpid, &paddr, 1);
3261 	if (error != 0) {
3262 		device_printf(sc->dev, "%s: failed to release frame buffer to "
3263 		    "the pool: error=%d\n", __func__, error);
3264 		return (error);
3265 	}
3266 
3267 	return (0);
3268 }
3269 
3270 /**
3271  * @brief Receive Tx confirmation frames.
3272  */
3273 static int
3274 dpaa2_ni_tx_conf(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq,
3275     struct dpaa2_fd *fd)
3276 {
3277 	bus_addr_t paddr = (bus_addr_t)fd->addr;
3278 	struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr);
3279 	struct dpaa2_buf *buf = fa->buf;
3280 	struct dpaa2_buf *sgt = buf->sgt;
3281 	struct dpaa2_ni_tx_ring *tx = (struct dpaa2_ni_tx_ring *)buf->opt;
3282 	struct dpaa2_channel *bch = tx->fq->chan;
3283 
3284 	KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
3285 	KASSERT(tx != NULL, ("%s: Tx ring is NULL", __func__));
3286 	KASSERT(sgt != NULL, ("%s: S/G table is NULL", __func__));
3287 	/*
3288 	 * NOTE: Current channel might not be the same as the "buffer" channel
3289 	 * and it's fine. It must not be NULL though.
3290 	 */
3291 	KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__));
3292 
3293 	if (paddr != buf->paddr) {
3294 		panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3295 		    __func__, paddr, buf->paddr);
3296 	}
3297 
3298 	mtx_assert(&bch->dma_mtx, MA_NOTOWNED);
3299 	mtx_lock(&bch->dma_mtx);
3300 
3301 	bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_POSTWRITE);
3302 	bus_dmamap_sync(sgt->dmat, sgt->dmap, BUS_DMASYNC_POSTWRITE);
3303 	bus_dmamap_unload(buf->dmat, buf->dmap);
3304 	bus_dmamap_unload(sgt->dmat, sgt->dmap);
3305 	m_freem(buf->m);
3306 	buf->m = NULL;
3307 	buf->paddr = 0;
3308 	buf->vaddr = NULL;
3309 	sgt->paddr = 0;
3310 
3311 	mtx_unlock(&bch->dma_mtx);
3312 
3313 	/* Return Tx buffer back to the ring */
3314 	buf_ring_enqueue(tx->br, buf);
3315 
3316 	return (0);
3317 }
3318 
3319 /**
3320  * @brief Compare versions of the DPAA2 network interface API.
3321  */
3322 static int
3323 dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *sc, uint16_t major,
3324     uint16_t minor)
3325 {
3326 	if (sc->api_major == major) {
3327 		return sc->api_minor - minor;
3328 	}
3329 	return sc->api_major - major;
3330 }
3331 
3332 /**
3333  * @brief Build a DPAA2 frame descriptor.
3334  */
3335 static int
3336 dpaa2_ni_build_fd(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx,
3337     struct dpaa2_buf *buf, bus_dma_segment_t *segs, int nsegs, struct dpaa2_fd *fd)
3338 {
3339 	struct dpaa2_buf *sgt = buf->sgt;
3340 	struct dpaa2_sg_entry *sge;
3341 	struct dpaa2_fa *fa;
3342 	int i, error;
3343 
3344 	KASSERT(nsegs <= DPAA2_TX_SEGLIMIT, ("%s: too many segments", __func__));
3345 	KASSERT(buf->opt != NULL, ("%s: no Tx ring?", __func__));
3346 	KASSERT(sgt != NULL, ("%s: no S/G table?", __func__));
3347 	KASSERT(sgt->vaddr != NULL, ("%s: no S/G vaddr?", __func__));
3348 
3349 	memset(fd, 0, sizeof(*fd));
3350 
3351 	/* Populate and map S/G table */
3352 	if (__predict_true(nsegs <= DPAA2_TX_SEGLIMIT)) {
3353 		sge = (struct dpaa2_sg_entry *)sgt->vaddr + sc->tx_data_off;
3354 		for (i = 0; i < nsegs; i++) {
3355 			sge[i].addr = (uint64_t)segs[i].ds_addr;
3356 			sge[i].len = (uint32_t)segs[i].ds_len;
3357 			sge[i].offset_fmt = 0u;
3358 		}
3359 		sge[i-1].offset_fmt |= 0x8000u; /* set final entry flag */
3360 
3361 		KASSERT(sgt->paddr == 0, ("%s: paddr(%#jx) != 0", __func__,
3362 		    sgt->paddr));
3363 
3364 		error = bus_dmamap_load(sgt->dmat, sgt->dmap, sgt->vaddr,
3365 		    DPAA2_TX_SGT_SZ, dpaa2_dmamap_oneseg_cb, &sgt->paddr,
3366 		    BUS_DMA_NOWAIT);
3367 		if (__predict_false(error != 0)) {
3368 			device_printf(sc->dev, "%s: bus_dmamap_load() failed: "
3369 			    "error=%d\n", __func__, error);
3370 			return (error);
3371 		}
3372 
3373 		buf->paddr = sgt->paddr;
3374 		buf->vaddr = sgt->vaddr;
3375 		sc->tx_sg_frames++; /* for sysctl(9) */
3376 	} else {
3377 		return (EINVAL);
3378 	}
3379 
3380 	fa = (struct dpaa2_fa *)sgt->vaddr;
3381 	fa->magic = DPAA2_MAGIC;
3382 	fa->buf = buf;
3383 
3384 	fd->addr = buf->paddr;
3385 	fd->data_length = (uint32_t)buf->m->m_pkthdr.len;
3386 	fd->bpid_ivp_bmt = 0;
3387 	fd->offset_fmt_sl = 0x2000u | sc->tx_data_off;
3388 	fd->ctrl = 0x00800000u;
3389 
3390 	return (0);
3391 }
3392 
3393 static int
3394 dpaa2_ni_fd_err(struct dpaa2_fd *fd)
3395 {
3396 	return ((fd->ctrl >> DPAA2_NI_FD_ERR_SHIFT) & DPAA2_NI_FD_ERR_MASK);
3397 }
3398 
3399 static uint32_t
3400 dpaa2_ni_fd_data_len(struct dpaa2_fd *fd)
3401 {
3402 	if (dpaa2_ni_fd_short_len(fd)) {
3403 		return (fd->data_length & DPAA2_NI_FD_LEN_MASK);
3404 	}
3405 	return (fd->data_length);
3406 }
3407 
3408 static int
3409 dpaa2_ni_fd_format(struct dpaa2_fd *fd)
3410 {
3411 	return ((enum dpaa2_fd_format)((fd->offset_fmt_sl >>
3412 	    DPAA2_NI_FD_FMT_SHIFT) & DPAA2_NI_FD_FMT_MASK));
3413 }
3414 
3415 static bool
3416 dpaa2_ni_fd_short_len(struct dpaa2_fd *fd)
3417 {
3418 	return (((fd->offset_fmt_sl >> DPAA2_NI_FD_SL_SHIFT)
3419 	    & DPAA2_NI_FD_SL_MASK) == 1);
3420 }
3421 
3422 static int
3423 dpaa2_ni_fd_offset(struct dpaa2_fd *fd)
3424 {
3425 	return (fd->offset_fmt_sl & DPAA2_NI_FD_OFFSET_MASK);
3426 }
3427 
3428 /**
3429  * @brief Collect statistics of the network interface.
3430  */
3431 static int
3432 dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS)
3433 {
3434 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3435 	struct dpni_stat *stat = &dpni_stat_sysctls[oidp->oid_number];
3436 	device_t pdev = device_get_parent(sc->dev);
3437 	device_t dev = sc->dev;
3438 	device_t child = dev;
3439 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
3440 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
3441 	struct dpaa2_cmd cmd;
3442 	uint64_t cnt[DPAA2_NI_STAT_COUNTERS];
3443 	uint64_t result = 0;
3444 	uint16_t rc_token, ni_token;
3445 	int error;
3446 
3447 	DPAA2_CMD_INIT(&cmd);
3448 
3449 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
3450 	if (error) {
3451 		device_printf(dev, "%s: failed to open resource container: "
3452 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
3453 		goto exit;
3454 	}
3455 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
3456 	if (error) {
3457 		device_printf(dev, "%s: failed to open network interface: "
3458 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
3459 		goto close_rc;
3460 	}
3461 
3462 	error = DPAA2_CMD_NI_GET_STATISTICS(dev, child, &cmd, stat->page, 0, cnt);
3463 	if (!error) {
3464 		result = cnt[stat->cnt];
3465 	}
3466 
3467 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
3468 close_rc:
3469 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
3470 exit:
3471 	return (sysctl_handle_64(oidp, &result, 0, req));
3472 }
3473 
3474 static int
3475 dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS)
3476 {
3477 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3478 	uint32_t buf_num = DPAA2_ATOMIC_READ(&sc->buf_num);
3479 
3480 	return (sysctl_handle_32(oidp, &buf_num, 0, req));
3481 }
3482 
3483 static int
3484 dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS)
3485 {
3486 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3487 	uint32_t buf_free = DPAA2_ATOMIC_READ(&sc->buf_free);
3488 
3489 	return (sysctl_handle_32(oidp, &buf_free, 0, req));
3490 }
3491 
3492 static int
3493 dpaa2_ni_set_hash(device_t dev, uint64_t flags)
3494 {
3495 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
3496 	uint64_t key = 0;
3497 	int i;
3498 
3499 	if (!(sc->attr.num.queues > 1)) {
3500 		return (EOPNOTSUPP);
3501 	}
3502 
3503 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3504 		if (dist_fields[i].rxnfc_field & flags) {
3505 			key |= dist_fields[i].id;
3506 		}
3507 	}
3508 
3509 	return (dpaa2_ni_set_dist_key(dev, DPAA2_NI_DIST_MODE_HASH, key));
3510 }
3511 
3512 /**
3513  * @brief Set Rx distribution (hash or flow classification) key flags is a
3514  * combination of RXH_ bits.
3515  */
3516 static int
3517 dpaa2_ni_set_dist_key(device_t dev, enum dpaa2_ni_dist_mode type, uint64_t flags)
3518 {
3519 	device_t pdev = device_get_parent(dev);
3520 	device_t child = dev;
3521 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
3522 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
3523 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
3524 	struct dpkg_profile_cfg cls_cfg;
3525 	struct dpkg_extract *key;
3526 	struct dpaa2_buf *buf = &sc->rxd_kcfg;
3527 	struct dpaa2_cmd cmd;
3528 	uint16_t rc_token, ni_token;
3529 	int i, error = 0;
3530 
3531 	if (__predict_true(buf->dmat == NULL)) {
3532 		buf->dmat = sc->rxd_dmat;
3533 	}
3534 
3535 	memset(&cls_cfg, 0, sizeof(cls_cfg));
3536 
3537 	/* Configure extracts according to the given flags. */
3538 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3539 		key = &cls_cfg.extracts[cls_cfg.num_extracts];
3540 
3541 		if (!(flags & dist_fields[i].id)) {
3542 			continue;
3543 		}
3544 
3545 		if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
3546 			device_printf(dev, "%s: failed to add key extraction "
3547 			    "rule\n", __func__);
3548 			return (E2BIG);
3549 		}
3550 
3551 		key->type = DPKG_EXTRACT_FROM_HDR;
3552 		key->extract.from_hdr.prot = dist_fields[i].cls_prot;
3553 		key->extract.from_hdr.type = DPKG_FULL_FIELD;
3554 		key->extract.from_hdr.field = dist_fields[i].cls_field;
3555 		cls_cfg.num_extracts++;
3556 	}
3557 
3558 	error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr,
3559 	    BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap);
3560 	if (error != 0) {
3561 		device_printf(dev, "%s: failed to allocate a buffer for Rx "
3562 		    "traffic distribution key configuration\n", __func__);
3563 		return (error);
3564 	}
3565 
3566 	error = dpaa2_ni_prepare_key_cfg(&cls_cfg, (uint8_t *)buf->vaddr);
3567 	if (error != 0) {
3568 		device_printf(dev, "%s: failed to prepare key configuration: "
3569 		    "error=%d\n", __func__, error);
3570 		return (error);
3571 	}
3572 
3573 	/* Prepare for setting the Rx dist. */
3574 	error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr,
3575 	    DPAA2_CLASSIFIER_DMA_SIZE, dpaa2_dmamap_oneseg_cb, &buf->paddr,
3576 	    BUS_DMA_NOWAIT);
3577 	if (error != 0) {
3578 		device_printf(sc->dev, "%s: failed to map a buffer for Rx "
3579 		    "traffic distribution key configuration\n", __func__);
3580 		return (error);
3581 	}
3582 
3583 	if (type == DPAA2_NI_DIST_MODE_HASH) {
3584 		DPAA2_CMD_INIT(&cmd);
3585 
3586 		error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
3587 		    &rc_token);
3588 		if (error) {
3589 			device_printf(dev, "%s: failed to open resource "
3590 			    "container: id=%d, error=%d\n", __func__, rcinfo->id,
3591 			    error);
3592 			goto err_exit;
3593 		}
3594 		error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id,
3595 		    &ni_token);
3596 		if (error) {
3597 			device_printf(dev, "%s: failed to open network "
3598 			    "interface: id=%d, error=%d\n", __func__, dinfo->id,
3599 			    error);
3600 			goto close_rc;
3601 		}
3602 
3603 		error = DPAA2_CMD_NI_SET_RX_TC_DIST(dev, child, &cmd,
3604 		    sc->attr.num.queues, 0, DPAA2_NI_DIST_MODE_HASH, buf->paddr);
3605 		if (error != 0) {
3606 			device_printf(dev, "%s: failed to set distribution mode "
3607 			    "and size for the traffic class\n", __func__);
3608 		}
3609 
3610 		(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
3611 		    ni_token));
3612 close_rc:
3613 		(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
3614 		    rc_token));
3615 	}
3616 
3617 err_exit:
3618 	return (error);
3619 }
3620 
3621 /**
3622  * @brief Prepares extract parameters.
3623  *
3624  * cfg:		Defining a full Key Generation profile.
3625  * key_cfg_buf:	Zeroed 256 bytes of memory before mapping it to DMA.
3626  */
3627 static int
3628 dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf)
3629 {
3630 	struct dpni_ext_set_rx_tc_dist *dpni_ext;
3631 	struct dpni_dist_extract *extr;
3632 	int i, j;
3633 
3634 	if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
3635 		return (EINVAL);
3636 
3637 	dpni_ext = (struct dpni_ext_set_rx_tc_dist *) key_cfg_buf;
3638 	dpni_ext->num_extracts = cfg->num_extracts;
3639 
3640 	for (i = 0; i < cfg->num_extracts; i++) {
3641 		extr = &dpni_ext->extracts[i];
3642 
3643 		switch (cfg->extracts[i].type) {
3644 		case DPKG_EXTRACT_FROM_HDR:
3645 			extr->prot = cfg->extracts[i].extract.from_hdr.prot;
3646 			extr->efh_type =
3647 			    cfg->extracts[i].extract.from_hdr.type & 0x0Fu;
3648 			extr->size = cfg->extracts[i].extract.from_hdr.size;
3649 			extr->offset = cfg->extracts[i].extract.from_hdr.offset;
3650 			extr->field = cfg->extracts[i].extract.from_hdr.field;
3651 			extr->hdr_index =
3652 				cfg->extracts[i].extract.from_hdr.hdr_index;
3653 			break;
3654 		case DPKG_EXTRACT_FROM_DATA:
3655 			extr->size = cfg->extracts[i].extract.from_data.size;
3656 			extr->offset =
3657 				cfg->extracts[i].extract.from_data.offset;
3658 			break;
3659 		case DPKG_EXTRACT_FROM_PARSE:
3660 			extr->size = cfg->extracts[i].extract.from_parse.size;
3661 			extr->offset =
3662 				cfg->extracts[i].extract.from_parse.offset;
3663 			break;
3664 		default:
3665 			return (EINVAL);
3666 		}
3667 
3668 		extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
3669 		extr->extract_type = cfg->extracts[i].type & 0x0Fu;
3670 
3671 		for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
3672 			extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
3673 			extr->masks[j].offset =
3674 				cfg->extracts[i].masks[j].offset;
3675 		}
3676 	}
3677 
3678 	return (0);
3679 }
3680 
3681 static device_method_t dpaa2_ni_methods[] = {
3682 	/* Device interface */
3683 	DEVMETHOD(device_probe,		dpaa2_ni_probe),
3684 	DEVMETHOD(device_attach,	dpaa2_ni_attach),
3685 	DEVMETHOD(device_detach,	dpaa2_ni_detach),
3686 
3687 	/* mii via memac_mdio */
3688 	DEVMETHOD(miibus_statchg,	dpaa2_ni_miibus_statchg),
3689 
3690 	DEVMETHOD_END
3691 };
3692 
3693 static driver_t dpaa2_ni_driver = {
3694 	"dpaa2_ni",
3695 	dpaa2_ni_methods,
3696 	sizeof(struct dpaa2_ni_softc),
3697 };
3698 
3699 DRIVER_MODULE(miibus, dpaa2_ni, miibus_driver, 0, 0);
3700 DRIVER_MODULE(dpaa2_ni, dpaa2_rc, dpaa2_ni_driver, 0, 0);
3701 
3702 MODULE_DEPEND(dpaa2_ni, miibus, 1, 1, 1);
3703 #ifdef DEV_ACPI
3704 MODULE_DEPEND(dpaa2_ni, memac_mdio_acpi, 1, 1, 1);
3705 #endif
3706 #ifdef FDT
3707 MODULE_DEPEND(dpaa2_ni, memac_mdio_fdt, 1, 1, 1);
3708 #endif
3709