xref: /freebsd/sys/dev/dpaa2/dpaa2_ni.c (revision ce15215befa1022d9a20ec15bdb0739dbe55f10f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright © 2021-2023 Dmitry Salychev
5  * Copyright © 2022 Mathew McBride
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 /*
31  * The DPAA2 Network Interface (DPNI) driver.
32  *
33  * The DPNI object is a network interface that is configurable to support a wide
34  * range of features from a very basic Ethernet interface up to a
35  * high-functioning network interface. The DPNI supports features that are
36  * expected by standard network stacks, from basic features to offloads.
37  *
38  * DPNIs work with Ethernet traffic, starting with the L2 header. Additional
39  * functions are provided for standard network protocols (L2, L3, L4, etc.).
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/bus.h>
46 #include <sys/rman.h>
47 #include <sys/module.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <sys/mbuf.h>
54 #include <sys/taskqueue.h>
55 #include <sys/sysctl.h>
56 #include <sys/buf_ring.h>
57 #include <sys/smp.h>
58 #include <sys/proc.h>
59 
60 #include <vm/vm.h>
61 #include <vm/pmap.h>
62 
63 #include <machine/bus.h>
64 #include <machine/resource.h>
65 #include <machine/atomic.h>
66 #include <machine/vmparam.h>
67 
68 #include <net/ethernet.h>
69 #include <net/bpf.h>
70 #include <net/if.h>
71 #include <net/if_dl.h>
72 #include <net/if_media.h>
73 #include <net/if_types.h>
74 #include <net/if_var.h>
75 
76 #include <dev/pci/pcivar.h>
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
79 #include <dev/mdio/mdio.h>
80 
81 #include "opt_acpi.h"
82 #include "opt_platform.h"
83 
84 #include "pcib_if.h"
85 #include "pci_if.h"
86 #include "miibus_if.h"
87 #include "memac_mdio_if.h"
88 
89 #include "dpaa2_types.h"
90 #include "dpaa2_mc.h"
91 #include "dpaa2_mc_if.h"
92 #include "dpaa2_mcp.h"
93 #include "dpaa2_swp.h"
94 #include "dpaa2_swp_if.h"
95 #include "dpaa2_cmd_if.h"
96 #include "dpaa2_ni.h"
97 #include "dpaa2_channel.h"
98 #include "dpaa2_buf.h"
99 
100 #define BIT(x)			(1ul << (x))
101 #define WRIOP_VERSION(x, y, z)	((x) << 10 | (y) << 5 | (z) << 0)
102 #define ARRAY_SIZE(a)		(sizeof(a) / sizeof((a)[0]))
103 
104 /* Frame Dequeue Response status bits. */
105 #define IS_NULL_RESPONSE(stat)	((((stat) >> 4) & 1) == 0)
106 
107 #define	ALIGN_UP(x, y)		roundup2((x), (y))
108 #define	ALIGN_DOWN(x, y)	rounddown2((x), (y))
109 #define CACHE_LINE_ALIGN(x)	ALIGN_UP((x), CACHE_LINE_SIZE)
110 
111 #define DPNI_LOCK(__sc) do {			\
112 	mtx_assert(&(__sc)->lock, MA_NOTOWNED);	\
113 	mtx_lock(&(__sc)->lock);		\
114 } while (0)
115 #define	DPNI_UNLOCK(__sc) do {			\
116 	mtx_assert(&(__sc)->lock, MA_OWNED);	\
117 	mtx_unlock(&(__sc)->lock);		\
118 } while (0)
119 #define	DPNI_LOCK_ASSERT(__sc) do {		\
120 	mtx_assert(&(__sc)->lock, MA_OWNED);	\
121 } while (0)
122 
123 #define DPAA2_TX_RING(sc, chan, tc) \
124 	(&(sc)->channels[(chan)]->txc_queue.tx_rings[(tc)])
125 
126 MALLOC_DEFINE(M_DPAA2_TXB, "dpaa2_txb", "DPAA2 DMA-mapped buffer (Tx)");
127 
128 /*
129  * How many times channel cleanup routine will be repeated if the RX or TX
130  * budget was depleted.
131  */
132 #define DPAA2_CLEAN_BUDGET	64 /* sysctl(9)? */
133 /* TX/RX budget for the channel cleanup task */
134 #define DPAA2_TX_BUDGET		128 /* sysctl(9)? */
135 #define DPAA2_RX_BUDGET		256 /* sysctl(9)? */
136 
137 #define DPNI_IRQ_INDEX		0 /* Index of the only DPNI IRQ. */
138 #define DPNI_IRQ_LINK_CHANGED	1 /* Link state changed */
139 #define DPNI_IRQ_EP_CHANGED	2 /* DPAA2 endpoint dis/connected */
140 
141 /* Default maximum frame length. */
142 #define DPAA2_ETH_MFL		(ETHER_MAX_LEN - ETHER_CRC_LEN)
143 
144 /* Minimally supported version of the DPNI API. */
145 #define DPNI_VER_MAJOR		7
146 #define DPNI_VER_MINOR		0
147 
148 /* Rx/Tx buffers configuration. */
149 #define BUF_ALIGN_V1		256 /* WRIOP v1.0.0 limitation */
150 #define BUF_ALIGN		64
151 #define BUF_SWA_SIZE		64  /* SW annotation size */
152 #define BUF_RX_HWA_SIZE		64  /* HW annotation size */
153 #define BUF_TX_HWA_SIZE		128 /* HW annotation size */
154 
155 #define DPAA2_RX_BUFRING_SZ	(4096u)
156 #define DPAA2_RXE_BUFRING_SZ	(1024u)
157 #define DPAA2_TXC_BUFRING_SZ	(4096u)
158 #define DPAA2_TX_SEGLIMIT	(16u) /* arbitrary number */
159 #define DPAA2_TX_SEG_SZ		(PAGE_SIZE)
160 #define DPAA2_TX_SEGS_MAXSZ	(DPAA2_TX_SEGLIMIT * DPAA2_TX_SEG_SZ)
161 #define DPAA2_TX_SGT_SZ		(PAGE_SIZE) /* bytes */
162 
163 /* Size of a buffer to keep a QoS table key configuration. */
164 #define ETH_QOS_KCFG_BUF_SIZE	(PAGE_SIZE)
165 
166 /* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */
167 #define DPAA2_CLASSIFIER_DMA_SIZE (PAGE_SIZE)
168 
169 /* Buffers layout options. */
170 #define BUF_LOPT_TIMESTAMP	0x1
171 #define BUF_LOPT_PARSER_RESULT	0x2
172 #define BUF_LOPT_FRAME_STATUS	0x4
173 #define BUF_LOPT_PRIV_DATA_SZ	0x8
174 #define BUF_LOPT_DATA_ALIGN	0x10
175 #define BUF_LOPT_DATA_HEAD_ROOM	0x20
176 #define BUF_LOPT_DATA_TAIL_ROOM	0x40
177 
178 #define DPAA2_NI_BUF_ADDR_MASK	(0x1FFFFFFFFFFFFul) /* 49-bit addresses max. */
179 #define DPAA2_NI_BUF_CHAN_MASK	(0xFu)
180 #define DPAA2_NI_BUF_CHAN_SHIFT	(60)
181 #define DPAA2_NI_BUF_IDX_MASK	(0x7FFFu)
182 #define DPAA2_NI_BUF_IDX_SHIFT	(49)
183 #define DPAA2_NI_TX_IDX_MASK	(0x7u)
184 #define DPAA2_NI_TX_IDX_SHIFT	(57)
185 #define DPAA2_NI_TXBUF_IDX_MASK	(0xFFu)
186 #define DPAA2_NI_TXBUF_IDX_SHIFT (49)
187 
188 #define DPAA2_NI_FD_FMT_MASK	(0x3u)
189 #define DPAA2_NI_FD_FMT_SHIFT	(12)
190 #define DPAA2_NI_FD_ERR_MASK	(0xFFu)
191 #define DPAA2_NI_FD_ERR_SHIFT	(0)
192 #define DPAA2_NI_FD_SL_MASK	(0x1u)
193 #define DPAA2_NI_FD_SL_SHIFT	(14)
194 #define DPAA2_NI_FD_LEN_MASK	(0x3FFFFu)
195 #define DPAA2_NI_FD_OFFSET_MASK (0x0FFFu)
196 
197 /* Enables TCAM for Flow Steering and QoS look-ups. */
198 #define DPNI_OPT_HAS_KEY_MASKING 0x10
199 
200 /* Unique IDs for the supported Rx classification header fields. */
201 #define DPAA2_ETH_DIST_ETHDST	BIT(0)
202 #define DPAA2_ETH_DIST_ETHSRC	BIT(1)
203 #define DPAA2_ETH_DIST_ETHTYPE	BIT(2)
204 #define DPAA2_ETH_DIST_VLAN	BIT(3)
205 #define DPAA2_ETH_DIST_IPSRC	BIT(4)
206 #define DPAA2_ETH_DIST_IPDST	BIT(5)
207 #define DPAA2_ETH_DIST_IPPROTO	BIT(6)
208 #define DPAA2_ETH_DIST_L4SRC	BIT(7)
209 #define DPAA2_ETH_DIST_L4DST	BIT(8)
210 #define DPAA2_ETH_DIST_ALL	(~0ULL)
211 
212 /* L3-L4 network traffic flow hash options. */
213 #define	RXH_L2DA		(1 << 1)
214 #define	RXH_VLAN		(1 << 2)
215 #define	RXH_L3_PROTO		(1 << 3)
216 #define	RXH_IP_SRC		(1 << 4)
217 #define	RXH_IP_DST		(1 << 5)
218 #define	RXH_L4_B_0_1		(1 << 6) /* src port in case of TCP/UDP/SCTP */
219 #define	RXH_L4_B_2_3		(1 << 7) /* dst port in case of TCP/UDP/SCTP */
220 #define	RXH_DISCARD		(1 << 31)
221 
222 /* Default Rx hash options, set during attaching. */
223 #define DPAA2_RXH_DEFAULT	(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
224 
225 MALLOC_DEFINE(M_DPAA2_NI, "dpaa2_ni", "DPAA2 Network Interface");
226 
227 /*
228  * DPAA2 Network Interface resource specification.
229  *
230  * NOTE: Don't forget to update macros in dpaa2_ni.h in case of any changes in
231  *       the specification!
232  */
233 struct resource_spec dpaa2_ni_spec[] = {
234 	/*
235 	 * DPMCP resources.
236 	 *
237 	 * NOTE: MC command portals (MCPs) are used to send commands to, and
238 	 *	 receive responses from, the MC firmware. One portal per DPNI.
239 	 */
240 	{ DPAA2_DEV_MCP, DPAA2_NI_MCP_RID(0),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
241 	/*
242 	 * DPIO resources (software portals).
243 	 *
244 	 * NOTE: One per running core. While DPIOs are the source of data
245 	 *	 availability interrupts, the DPCONs are used to identify the
246 	 *	 network interface that has produced ingress data to that core.
247 	 */
248 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(0),    RF_ACTIVE | RF_SHAREABLE },
249 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(1),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
250 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(2),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
251 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(3),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
252 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(4),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
253 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(5),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
254 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(6),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
255 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(7),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
256 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(8),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
257 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(9),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
258 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(10),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
259 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(11),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
260 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(12),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
261 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(13),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
262 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(14),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
263 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(15),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
264 	/*
265 	 * DPBP resources (buffer pools).
266 	 *
267 	 * NOTE: One per network interface.
268 	 */
269 	{ DPAA2_DEV_BP,  DPAA2_NI_BP_RID(0),   RF_ACTIVE },
270 	/*
271 	 * DPCON resources (channels).
272 	 *
273 	 * NOTE: One DPCON per core where Rx or Tx confirmation traffic to be
274 	 *	 distributed to.
275 	 * NOTE: Since it is necessary to distinguish between traffic from
276 	 *	 different network interfaces arriving on the same core, the
277 	 *	 DPCONs must be private to the DPNIs.
278 	 */
279 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(0),   RF_ACTIVE },
280 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(1),   RF_ACTIVE | RF_OPTIONAL },
281 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(2),   RF_ACTIVE | RF_OPTIONAL },
282 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(3),   RF_ACTIVE | RF_OPTIONAL },
283 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(4),   RF_ACTIVE | RF_OPTIONAL },
284 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(5),   RF_ACTIVE | RF_OPTIONAL },
285 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(6),   RF_ACTIVE | RF_OPTIONAL },
286 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(7),   RF_ACTIVE | RF_OPTIONAL },
287 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(8),   RF_ACTIVE | RF_OPTIONAL },
288 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(9),   RF_ACTIVE | RF_OPTIONAL },
289 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(10),  RF_ACTIVE | RF_OPTIONAL },
290 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(11),  RF_ACTIVE | RF_OPTIONAL },
291 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(12),  RF_ACTIVE | RF_OPTIONAL },
292 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(13),  RF_ACTIVE | RF_OPTIONAL },
293 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(14),  RF_ACTIVE | RF_OPTIONAL },
294 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(15),  RF_ACTIVE | RF_OPTIONAL },
295 
296 	RESOURCE_SPEC_END
297 };
298 
299 /* Supported header fields for Rx hash distribution key */
300 static const struct dpaa2_eth_dist_fields dist_fields[] = {
301 	{
302 		/* L2 header */
303 		.rxnfc_field = RXH_L2DA,
304 		.cls_prot = NET_PROT_ETH,
305 		.cls_field = NH_FLD_ETH_DA,
306 		.id = DPAA2_ETH_DIST_ETHDST,
307 		.size = 6,
308 	}, {
309 		.cls_prot = NET_PROT_ETH,
310 		.cls_field = NH_FLD_ETH_SA,
311 		.id = DPAA2_ETH_DIST_ETHSRC,
312 		.size = 6,
313 	}, {
314 		/* This is the last ethertype field parsed:
315 		 * depending on frame format, it can be the MAC ethertype
316 		 * or the VLAN etype.
317 		 */
318 		.cls_prot = NET_PROT_ETH,
319 		.cls_field = NH_FLD_ETH_TYPE,
320 		.id = DPAA2_ETH_DIST_ETHTYPE,
321 		.size = 2,
322 	}, {
323 		/* VLAN header */
324 		.rxnfc_field = RXH_VLAN,
325 		.cls_prot = NET_PROT_VLAN,
326 		.cls_field = NH_FLD_VLAN_TCI,
327 		.id = DPAA2_ETH_DIST_VLAN,
328 		.size = 2,
329 	}, {
330 		/* IP header */
331 		.rxnfc_field = RXH_IP_SRC,
332 		.cls_prot = NET_PROT_IP,
333 		.cls_field = NH_FLD_IP_SRC,
334 		.id = DPAA2_ETH_DIST_IPSRC,
335 		.size = 4,
336 	}, {
337 		.rxnfc_field = RXH_IP_DST,
338 		.cls_prot = NET_PROT_IP,
339 		.cls_field = NH_FLD_IP_DST,
340 		.id = DPAA2_ETH_DIST_IPDST,
341 		.size = 4,
342 	}, {
343 		.rxnfc_field = RXH_L3_PROTO,
344 		.cls_prot = NET_PROT_IP,
345 		.cls_field = NH_FLD_IP_PROTO,
346 		.id = DPAA2_ETH_DIST_IPPROTO,
347 		.size = 1,
348 	}, {
349 		/* Using UDP ports, this is functionally equivalent to raw
350 		 * byte pairs from L4 header.
351 		 */
352 		.rxnfc_field = RXH_L4_B_0_1,
353 		.cls_prot = NET_PROT_UDP,
354 		.cls_field = NH_FLD_UDP_PORT_SRC,
355 		.id = DPAA2_ETH_DIST_L4SRC,
356 		.size = 2,
357 	}, {
358 		.rxnfc_field = RXH_L4_B_2_3,
359 		.cls_prot = NET_PROT_UDP,
360 		.cls_field = NH_FLD_UDP_PORT_DST,
361 		.id = DPAA2_ETH_DIST_L4DST,
362 		.size = 2,
363 	},
364 };
365 
366 static struct dpni_stat {
367 	int	 page;
368 	int	 cnt;
369 	char	*name;
370 	char	*desc;
371 } dpni_stat_sysctls[DPAA2_NI_STAT_SYSCTLS] = {
372 	/* PAGE, COUNTER, NAME, DESCRIPTION */
373 	{  0, 0, "in_all_frames",	"All accepted ingress frames" },
374 	{  0, 1, "in_all_bytes",	"Bytes in all accepted ingress frames" },
375 	{  0, 2, "in_multi_frames",	"Multicast accepted ingress frames" },
376 	{  1, 0, "eg_all_frames",	"All egress frames transmitted" },
377 	{  1, 1, "eg_all_bytes",	"Bytes in all frames transmitted" },
378 	{  1, 2, "eg_multi_frames",	"Multicast egress frames transmitted" },
379 	{  2, 0, "in_filtered_frames",	"All ingress frames discarded due to "
380 	   				"filtering" },
381 	{  2, 1, "in_discarded_frames",	"All frames discarded due to errors" },
382 	{  2, 2, "in_nobuf_discards",	"Discards on ingress side due to buffer "
383 	   				"depletion in DPNI buffer pools" },
384 };
385 
386 struct dpaa2_ni_rx_ctx {
387 	struct mbuf	*head;
388 	struct mbuf	*tail;
389 	int		 cnt;
390 	bool		 last;
391 };
392 
393 /* Device interface */
394 static int dpaa2_ni_probe(device_t);
395 static int dpaa2_ni_attach(device_t);
396 static int dpaa2_ni_detach(device_t);
397 
398 /* DPAA2 network interface setup and configuration */
399 static int dpaa2_ni_setup(device_t);
400 static int dpaa2_ni_setup_channels(device_t);
401 static int dpaa2_ni_bind(device_t);
402 static int dpaa2_ni_setup_rx_dist(device_t);
403 static int dpaa2_ni_setup_irqs(device_t);
404 static int dpaa2_ni_setup_msi(struct dpaa2_ni_softc *);
405 static int dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *);
406 static int dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *);
407 static int dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *);
408 static int dpaa2_ni_setup_dma(struct dpaa2_ni_softc *);
409 
410 /* Tx/Rx flow configuration */
411 static int dpaa2_ni_setup_rx_flow(device_t, struct dpaa2_ni_fq *);
412 static int dpaa2_ni_setup_tx_flow(device_t, struct dpaa2_ni_fq *);
413 static int dpaa2_ni_setup_rx_err_flow(device_t, struct dpaa2_ni_fq *);
414 
415 /* Configuration subroutines */
416 static int dpaa2_ni_set_buf_layout(device_t);
417 static int dpaa2_ni_set_pause_frame(device_t);
418 static int dpaa2_ni_set_qos_table(device_t);
419 static int dpaa2_ni_set_mac_addr(device_t);
420 static int dpaa2_ni_set_hash(device_t, uint64_t);
421 static int dpaa2_ni_set_dist_key(device_t, enum dpaa2_ni_dist_mode, uint64_t);
422 
423 /* Frame descriptor routines */
424 static int dpaa2_ni_build_fd(struct dpaa2_ni_softc *, struct dpaa2_ni_tx_ring *,
425     struct dpaa2_buf *, bus_dma_segment_t *, int, struct dpaa2_fd *);
426 static int dpaa2_ni_fd_err(struct dpaa2_fd *);
427 static uint32_t dpaa2_ni_fd_data_len(struct dpaa2_fd *);
428 static int dpaa2_ni_fd_format(struct dpaa2_fd *);
429 static bool dpaa2_ni_fd_short_len(struct dpaa2_fd *);
430 static int dpaa2_ni_fd_offset(struct dpaa2_fd *);
431 
432 /* Various subroutines */
433 static int dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *, uint16_t, uint16_t);
434 static int dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *, uint8_t *);
435 
436 /* Network interface routines */
437 static void dpaa2_ni_init(void *);
438 static int  dpaa2_ni_transmit(if_t , struct mbuf *);
439 static void dpaa2_ni_qflush(if_t );
440 static int  dpaa2_ni_ioctl(if_t , u_long, caddr_t);
441 static int  dpaa2_ni_update_mac_filters(if_t );
442 static u_int dpaa2_ni_add_maddr(void *, struct sockaddr_dl *, u_int);
443 
444 /* Interrupt handlers */
445 static void dpaa2_ni_intr(void *);
446 
447 /* MII handlers */
448 static void dpaa2_ni_miibus_statchg(device_t);
449 static int  dpaa2_ni_media_change(if_t );
450 static void dpaa2_ni_media_status(if_t , struct ifmediareq *);
451 static void dpaa2_ni_media_tick(void *);
452 
453 /* Tx/Rx routines. */
454 static int dpaa2_ni_rx_cleanup(struct dpaa2_channel *);
455 static int dpaa2_ni_tx_cleanup(struct dpaa2_channel *);
456 static void dpaa2_ni_tx(struct dpaa2_ni_softc *, struct dpaa2_channel *,
457     struct dpaa2_ni_tx_ring *, struct mbuf *);
458 static void dpaa2_ni_cleanup_task(void *, int);
459 
460 /* Tx/Rx subroutines */
461 static int dpaa2_ni_consume_frames(struct dpaa2_channel *, struct dpaa2_ni_fq **,
462     uint32_t *);
463 static int dpaa2_ni_rx(struct dpaa2_channel *, struct dpaa2_ni_fq *,
464     struct dpaa2_fd *, struct dpaa2_ni_rx_ctx *);
465 static int dpaa2_ni_rx_err(struct dpaa2_channel *, struct dpaa2_ni_fq *,
466     struct dpaa2_fd *);
467 static int dpaa2_ni_tx_conf(struct dpaa2_channel *, struct dpaa2_ni_fq *,
468     struct dpaa2_fd *);
469 
470 /* sysctl(9) */
471 static int dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS);
472 static int dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS);
473 static int dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS);
474 
475 static int
476 dpaa2_ni_probe(device_t dev)
477 {
478 	/* DPNI device will be added by a parent resource container itself. */
479 	device_set_desc(dev, "DPAA2 Network Interface");
480 	return (BUS_PROBE_DEFAULT);
481 }
482 
483 static int
484 dpaa2_ni_attach(device_t dev)
485 {
486 	device_t pdev = device_get_parent(dev);
487 	device_t child = dev;
488 	device_t mcp_dev;
489 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
490 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
491 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
492 	struct dpaa2_devinfo *mcp_dinfo;
493 	struct dpaa2_cmd cmd;
494 	uint16_t rc_token, ni_token;
495 	if_t ifp;
496 	char tq_name[32];
497 	int error;
498 
499 	sc->dev = dev;
500 	sc->ifp = NULL;
501 	sc->miibus = NULL;
502 	sc->mii = NULL;
503 	sc->media_status = 0;
504 	sc->if_flags = 0;
505 	sc->link_state = LINK_STATE_UNKNOWN;
506 	sc->buf_align = 0;
507 
508 	/* For debug purposes only! */
509 	sc->rx_anomaly_frames = 0;
510 	sc->rx_single_buf_frames = 0;
511 	sc->rx_sg_buf_frames = 0;
512 	sc->rx_enq_rej_frames = 0;
513 	sc->rx_ieoi_err_frames = 0;
514 	sc->tx_single_buf_frames = 0;
515 	sc->tx_sg_frames = 0;
516 
517 	DPAA2_ATOMIC_XCHG(&sc->buf_num, 0);
518 	DPAA2_ATOMIC_XCHG(&sc->buf_free, 0);
519 
520 	sc->rxd_dmat = NULL;
521 	sc->qos_dmat = NULL;
522 
523 	sc->qos_kcfg.dmap = NULL;
524 	sc->qos_kcfg.paddr = 0;
525 	sc->qos_kcfg.vaddr = NULL;
526 
527 	sc->rxd_kcfg.dmap = NULL;
528 	sc->rxd_kcfg.paddr = 0;
529 	sc->rxd_kcfg.vaddr = NULL;
530 
531 	sc->mac.dpmac_id = 0;
532 	sc->mac.phy_dev = NULL;
533 	memset(sc->mac.addr, 0, ETHER_ADDR_LEN);
534 
535 	error = bus_alloc_resources(sc->dev, dpaa2_ni_spec, sc->res);
536 	if (error) {
537 		device_printf(dev, "%s: failed to allocate resources: "
538 		    "error=%d\n", __func__, error);
539 		goto err_exit;
540 	}
541 
542 	/* Obtain MC portal. */
543 	mcp_dev = (device_t) rman_get_start(sc->res[DPAA2_NI_MCP_RID(0)]);
544 	mcp_dinfo = device_get_ivars(mcp_dev);
545 	dinfo->portal = mcp_dinfo->portal;
546 
547 	mtx_init(&sc->lock, device_get_nameunit(dev), "dpaa2_ni", MTX_DEF);
548 
549 	/* Allocate network interface */
550 	ifp = if_alloc(IFT_ETHER);
551 	sc->ifp = ifp;
552 	if_initname(ifp, DPAA2_NI_IFNAME, device_get_unit(sc->dev));
553 
554 	if_setsoftc(ifp, sc);
555 	if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST);
556 	if_setinitfn(ifp, dpaa2_ni_init);
557 	if_setioctlfn(ifp, dpaa2_ni_ioctl);
558 	if_settransmitfn(ifp, dpaa2_ni_transmit);
559 	if_setqflushfn(ifp, dpaa2_ni_qflush);
560 
561 	if_setcapabilities(ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU);
562 	if_setcapenable(ifp, if_getcapabilities(ifp));
563 
564 	DPAA2_CMD_INIT(&cmd);
565 
566 	/* Open resource container and network interface object. */
567 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
568 	if (error) {
569 		device_printf(dev, "%s: failed to open resource container: "
570 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
571 		goto err_exit;
572 	}
573 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
574 	if (error) {
575 		device_printf(dev, "%s: failed to open network interface: "
576 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
577 		goto close_rc;
578 	}
579 
580 	bzero(tq_name, sizeof(tq_name));
581 	snprintf(tq_name, sizeof(tq_name), "%s_tqbp", device_get_nameunit(dev));
582 
583 	/*
584 	 * XXX-DSL: Release new buffers on Buffer Pool State Change Notification
585 	 *          (BPSCN) returned as a result to the VDQ command instead.
586 	 *          It is similar to CDAN processed in dpaa2_io_intr().
587 	 */
588 	/* Create a taskqueue thread to release new buffers to the pool. */
589 	sc->bp_taskq = taskqueue_create(tq_name, M_WAITOK,
590 	    taskqueue_thread_enqueue, &sc->bp_taskq);
591 	taskqueue_start_threads(&sc->bp_taskq, 1, PI_NET, "%s", tq_name);
592 
593 	/* sc->cleanup_taskq = taskqueue_create("dpaa2_ch cleanup", M_WAITOK, */
594 	/*     taskqueue_thread_enqueue, &sc->cleanup_taskq); */
595 	/* taskqueue_start_threads(&sc->cleanup_taskq, 1, PI_NET, */
596 	/*     "dpaa2_ch cleanup"); */
597 
598 	error = dpaa2_ni_setup(dev);
599 	if (error) {
600 		device_printf(dev, "%s: failed to setup DPNI: error=%d\n",
601 		    __func__, error);
602 		goto close_ni;
603 	}
604 	error = dpaa2_ni_setup_channels(dev);
605 	if (error) {
606 		device_printf(dev, "%s: failed to setup QBMan channels: "
607 		    "error=%d\n", __func__, error);
608 		goto close_ni;
609 	}
610 
611 	error = dpaa2_ni_bind(dev);
612 	if (error) {
613 		device_printf(dev, "%s: failed to bind DPNI: error=%d\n",
614 		    __func__, error);
615 		goto close_ni;
616 	}
617 	error = dpaa2_ni_setup_irqs(dev);
618 	if (error) {
619 		device_printf(dev, "%s: failed to setup IRQs: error=%d\n",
620 		    __func__, error);
621 		goto close_ni;
622 	}
623 	error = dpaa2_ni_setup_sysctls(sc);
624 	if (error) {
625 		device_printf(dev, "%s: failed to setup sysctls: error=%d\n",
626 		    __func__, error);
627 		goto close_ni;
628 	}
629 
630 	ether_ifattach(sc->ifp, sc->mac.addr);
631 	callout_init(&sc->mii_callout, 0);
632 
633 	return (0);
634 
635 close_ni:
636 	DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
637 close_rc:
638 	DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
639 err_exit:
640 	return (ENXIO);
641 }
642 
643 static void
644 dpaa2_ni_fixed_media_status(if_t ifp, struct ifmediareq* ifmr)
645 {
646 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
647 
648 	DPNI_LOCK(sc);
649 	ifmr->ifm_count = 0;
650 	ifmr->ifm_mask = 0;
651 	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
652 	ifmr->ifm_current = ifmr->ifm_active =
653 	    sc->fixed_ifmedia.ifm_cur->ifm_media;
654 
655 	/*
656 	 * In non-PHY usecases, we need to signal link state up, otherwise
657 	 * certain things requiring a link event (e.g async DHCP client) from
658 	 * devd do not happen.
659 	 */
660 	if (if_getlinkstate(ifp) == LINK_STATE_UNKNOWN) {
661 		if_link_state_change(ifp, LINK_STATE_UP);
662 	}
663 
664 	/*
665 	 * TODO: Check the status of the link partner (DPMAC, DPNI or other) and
666 	 * reset if down. This is different to the DPAA2_MAC_LINK_TYPE_PHY as
667 	 * the MC firmware sets the status, instead of us telling the MC what
668 	 * it is.
669 	 */
670 	DPNI_UNLOCK(sc);
671 
672 	return;
673 }
674 
675 static void
676 dpaa2_ni_setup_fixed_link(struct dpaa2_ni_softc *sc)
677 {
678 	/*
679 	 * FIXME: When the DPNI is connected to a DPMAC, we can get the
680 	 * 'apparent' speed from it.
681 	 */
682 	sc->fixed_link = true;
683 
684 	ifmedia_init(&sc->fixed_ifmedia, 0, dpaa2_ni_media_change,
685 		     dpaa2_ni_fixed_media_status);
686 	ifmedia_add(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL);
687 	ifmedia_set(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T);
688 }
689 
690 static int
691 dpaa2_ni_detach(device_t dev)
692 {
693 	/* TBD */
694 	return (0);
695 }
696 
697 /**
698  * @brief Configure DPAA2 network interface object.
699  */
700 static int
701 dpaa2_ni_setup(device_t dev)
702 {
703 	device_t pdev = device_get_parent(dev);
704 	device_t child = dev;
705 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
706 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
707 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
708 	struct dpaa2_ep_desc ep1_desc, ep2_desc; /* endpoint descriptors */
709 	struct dpaa2_cmd cmd;
710 	uint8_t eth_bca[ETHER_ADDR_LEN]; /* broadcast physical address */
711 	uint16_t rc_token, ni_token, mac_token;
712 	struct dpaa2_mac_attr attr;
713 	enum dpaa2_mac_link_type link_type;
714 	uint32_t link;
715 	int error;
716 
717 	DPAA2_CMD_INIT(&cmd);
718 
719 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
720 	if (error) {
721 		device_printf(dev, "%s: failed to open resource container: "
722 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
723 		goto err_exit;
724 	}
725 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
726 	if (error) {
727 		device_printf(dev, "%s: failed to open network interface: "
728 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
729 		goto close_rc;
730 	}
731 
732 	/* Check if we can work with this DPNI object. */
733 	error = DPAA2_CMD_NI_GET_API_VERSION(dev, child, &cmd, &sc->api_major,
734 	    &sc->api_minor);
735 	if (error) {
736 		device_printf(dev, "%s: failed to get DPNI API version\n",
737 		    __func__);
738 		goto close_ni;
739 	}
740 	if (dpaa2_ni_cmp_api_version(sc, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
741 		device_printf(dev, "%s: DPNI API version %u.%u not supported, "
742 		    "need >= %u.%u\n", __func__, sc->api_major, sc->api_minor,
743 		    DPNI_VER_MAJOR, DPNI_VER_MINOR);
744 		error = ENODEV;
745 		goto close_ni;
746 	}
747 
748 	/* Reset the DPNI object. */
749 	error = DPAA2_CMD_NI_RESET(dev, child, &cmd);
750 	if (error) {
751 		device_printf(dev, "%s: failed to reset DPNI: id=%d\n",
752 		    __func__, dinfo->id);
753 		goto close_ni;
754 	}
755 
756 	/* Obtain attributes of the DPNI object. */
757 	error = DPAA2_CMD_NI_GET_ATTRIBUTES(dev, child, &cmd, &sc->attr);
758 	if (error) {
759 		device_printf(dev, "%s: failed to obtain DPNI attributes: "
760 		    "id=%d\n", __func__, dinfo->id);
761 		goto close_ni;
762 	}
763 	if (bootverbose) {
764 		device_printf(dev, "\toptions=0x%#x queues=%d tx_channels=%d "
765 		    "wriop_version=%#x\n", sc->attr.options, sc->attr.num.queues,
766 		    sc->attr.num.channels, sc->attr.wriop_ver);
767 		device_printf(dev, "\ttraffic classes: rx=%d tx=%d "
768 		    "cgs_groups=%d\n", sc->attr.num.rx_tcs, sc->attr.num.tx_tcs,
769 		    sc->attr.num.cgs);
770 		device_printf(dev, "\ttable entries: mac=%d vlan=%d qos=%d "
771 		    "fs=%d\n", sc->attr.entries.mac, sc->attr.entries.vlan,
772 		    sc->attr.entries.qos, sc->attr.entries.fs);
773 		device_printf(dev, "\tkey sizes: qos=%d fs=%d\n",
774 		    sc->attr.key_size.qos, sc->attr.key_size.fs);
775 	}
776 
777 	/* Configure buffer layouts of the DPNI queues. */
778 	error = dpaa2_ni_set_buf_layout(dev);
779 	if (error) {
780 		device_printf(dev, "%s: failed to configure buffer layout\n",
781 		    __func__);
782 		goto close_ni;
783 	}
784 
785 	/* Configure DMA resources. */
786 	error = dpaa2_ni_setup_dma(sc);
787 	if (error) {
788 		device_printf(dev, "%s: failed to setup DMA\n", __func__);
789 		goto close_ni;
790 	}
791 
792 	/* Setup link between DPNI and an object it's connected to. */
793 	ep1_desc.obj_id = dinfo->id;
794 	ep1_desc.if_id = 0; /* DPNI has the only endpoint */
795 	ep1_desc.type = dinfo->dtype;
796 
797 	error = DPAA2_CMD_RC_GET_CONN(dev, child, DPAA2_CMD_TK(&cmd, rc_token),
798 	    &ep1_desc, &ep2_desc, &link);
799 	if (error) {
800 		device_printf(dev, "%s: failed to obtain an object DPNI is "
801 		    "connected to: error=%d\n", __func__, error);
802 	} else {
803 		device_printf(dev, "connected to %s (id=%d)\n",
804 		    dpaa2_ttos(ep2_desc.type), ep2_desc.obj_id);
805 
806 		error = dpaa2_ni_set_mac_addr(dev);
807 		if (error) {
808 			device_printf(dev, "%s: failed to set MAC address: "
809 			    "error=%d\n", __func__, error);
810 		}
811 
812 		if (ep2_desc.type == DPAA2_DEV_MAC) {
813 			/*
814 			 * This is the simplest case when DPNI is connected to
815 			 * DPMAC directly.
816 			 */
817 			sc->mac.dpmac_id = ep2_desc.obj_id;
818 
819 			link_type = DPAA2_MAC_LINK_TYPE_NONE;
820 
821 			/*
822 			 * Need to determine if DPMAC type is PHY (attached to
823 			 * conventional MII PHY) or FIXED (usually SFP/SerDes,
824 			 * link state managed by MC firmware).
825 			 */
826 			error = DPAA2_CMD_MAC_OPEN(sc->dev, child,
827 			    DPAA2_CMD_TK(&cmd, rc_token), sc->mac.dpmac_id,
828 			    &mac_token);
829 			/*
830 			 * Under VFIO, the DPMAC might be sitting in another
831 			 * container (DPRC) we don't have access to.
832 			 * Assume DPAA2_MAC_LINK_TYPE_FIXED if this is
833 			 * the case.
834 			 */
835 			if (error) {
836 				device_printf(dev, "%s: failed to open "
837 				    "connected DPMAC: %d (assuming in other DPRC)\n", __func__,
838 				    sc->mac.dpmac_id);
839 				link_type = DPAA2_MAC_LINK_TYPE_FIXED;
840 			} else {
841 				error = DPAA2_CMD_MAC_GET_ATTRIBUTES(dev, child,
842 				    &cmd, &attr);
843 				if (error) {
844 					device_printf(dev, "%s: failed to get "
845 					    "DPMAC attributes: id=%d, "
846 					    "error=%d\n", __func__, dinfo->id,
847 					    error);
848 				} else {
849 					link_type = attr.link_type;
850 				}
851 			}
852 			DPAA2_CMD_MAC_CLOSE(dev, child, &cmd);
853 
854 			if (link_type == DPAA2_MAC_LINK_TYPE_FIXED) {
855 				device_printf(dev, "connected DPMAC is in FIXED "
856 				    "mode\n");
857 				dpaa2_ni_setup_fixed_link(sc);
858 			} else if (link_type == DPAA2_MAC_LINK_TYPE_PHY) {
859 				device_printf(dev, "connected DPMAC is in PHY "
860 				    "mode\n");
861 				error = DPAA2_MC_GET_PHY_DEV(dev,
862 				    &sc->mac.phy_dev, sc->mac.dpmac_id);
863 				if (error == 0) {
864 					error = MEMAC_MDIO_SET_NI_DEV(
865 					    sc->mac.phy_dev, dev);
866 					if (error != 0) {
867 						device_printf(dev, "%s: failed "
868 						    "to set dpni dev on memac "
869 						    "mdio dev %s: error=%d\n",
870 						    __func__,
871 						    device_get_nameunit(
872 						    sc->mac.phy_dev), error);
873 					}
874 				}
875 				if (error == 0) {
876 					error = MEMAC_MDIO_GET_PHY_LOC(
877 					    sc->mac.phy_dev, &sc->mac.phy_loc);
878 					if (error == ENODEV) {
879 						error = 0;
880 					}
881 					if (error != 0) {
882 						device_printf(dev, "%s: failed "
883 						    "to get phy location from "
884 						    "memac mdio dev %s: error=%d\n",
885 						    __func__, device_get_nameunit(
886 						    sc->mac.phy_dev), error);
887 					}
888 				}
889 				if (error == 0) {
890 					error = mii_attach(sc->mac.phy_dev,
891 					    &sc->miibus, sc->ifp,
892 					    dpaa2_ni_media_change,
893 					    dpaa2_ni_media_status,
894 					    BMSR_DEFCAPMASK, sc->mac.phy_loc,
895 					    MII_OFFSET_ANY, 0);
896 					if (error != 0) {
897 						device_printf(dev, "%s: failed "
898 						    "to attach to miibus: "
899 						    "error=%d\n",
900 						    __func__, error);
901 					}
902 				}
903 				if (error == 0) {
904 					sc->mii = device_get_softc(sc->miibus);
905 				}
906 			} else {
907 				device_printf(dev, "%s: DPMAC link type is not "
908 				    "supported\n", __func__);
909 			}
910 		} else if (ep2_desc.type == DPAA2_DEV_NI ||
911 			   ep2_desc.type == DPAA2_DEV_MUX ||
912 			   ep2_desc.type == DPAA2_DEV_SW) {
913 			dpaa2_ni_setup_fixed_link(sc);
914 		}
915 	}
916 
917 	/* Select mode to enqueue frames. */
918 	/* ... TBD ... */
919 
920 	/*
921 	 * Update link configuration to enable Rx/Tx pause frames support.
922 	 *
923 	 * NOTE: MC may generate an interrupt to the DPMAC and request changes
924 	 *       in link configuration. It might be necessary to attach miibus
925 	 *       and PHY before this point.
926 	 */
927 	error = dpaa2_ni_set_pause_frame(dev);
928 	if (error) {
929 		device_printf(dev, "%s: failed to configure Rx/Tx pause "
930 		    "frames\n", __func__);
931 		goto close_ni;
932 	}
933 
934 	/* Configure ingress traffic classification. */
935 	error = dpaa2_ni_set_qos_table(dev);
936 	if (error) {
937 		device_printf(dev, "%s: failed to configure QoS table: "
938 		    "error=%d\n", __func__, error);
939 		goto close_ni;
940 	}
941 
942 	/* Add broadcast physical address to the MAC filtering table. */
943 	memset(eth_bca, 0xff, ETHER_ADDR_LEN);
944 	error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, DPAA2_CMD_TK(&cmd,
945 	    ni_token), eth_bca);
946 	if (error) {
947 		device_printf(dev, "%s: failed to add broadcast physical "
948 		    "address to the MAC filtering table\n", __func__);
949 		goto close_ni;
950 	}
951 
952 	/* Set the maximum allowed length for received frames. */
953 	error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd, DPAA2_ETH_MFL);
954 	if (error) {
955 		device_printf(dev, "%s: failed to set maximum length for "
956 		    "received frames\n", __func__);
957 		goto close_ni;
958 	}
959 
960 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
961 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
962 	return (0);
963 
964 close_ni:
965 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
966 close_rc:
967 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
968 err_exit:
969 	return (error);
970 }
971 
972 /**
973  * @brief Сonfigure QBMan channels and register data availability notifications.
974  */
975 static int
976 dpaa2_ni_setup_channels(device_t dev)
977 {
978 	device_t iodev, condev, bpdev;
979 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
980 	uint32_t i, num_chan;
981 	int error;
982 
983 	/* Calculate number of the channels based on the allocated resources */
984 	for (i = 0; i < DPAA2_NI_IO_RES_NUM; i++) {
985 		if (!sc->res[DPAA2_NI_IO_RID(i)]) {
986 			break;
987 		}
988 	}
989 	num_chan = i;
990 	for (i = 0; i < DPAA2_NI_CON_RES_NUM; i++) {
991 		if (!sc->res[DPAA2_NI_CON_RID(i)]) {
992 			break;
993 		}
994 	}
995 	num_chan = i < num_chan ? i : num_chan;
996 	sc->chan_n = num_chan > DPAA2_MAX_CHANNELS
997 	    ? DPAA2_MAX_CHANNELS : num_chan;
998 	sc->chan_n = sc->chan_n > sc->attr.num.queues
999 	    ? sc->attr.num.queues : sc->chan_n;
1000 
1001 	KASSERT(sc->chan_n > 0u, ("%s: positive number of channels expected: "
1002 	    "chan_n=%d", __func__, sc->chan_n));
1003 
1004 	device_printf(dev, "channels=%d\n", sc->chan_n);
1005 
1006 	for (i = 0; i < sc->chan_n; i++) {
1007 		iodev = (device_t)rman_get_start(sc->res[DPAA2_NI_IO_RID(i)]);
1008 		condev = (device_t)rman_get_start(sc->res[DPAA2_NI_CON_RID(i)]);
1009 		/* Only one buffer pool available at the moment */
1010 		bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
1011 
1012 		error = dpaa2_chan_setup(dev, iodev, condev, bpdev,
1013 		    &sc->channels[i], i, dpaa2_ni_cleanup_task);
1014 		if (error != 0) {
1015 			device_printf(dev, "%s: dpaa2_chan_setup() failed: "
1016 			    "error=%d, chan_id=%d\n", __func__, error, i);
1017 			return (error);
1018 		}
1019 	}
1020 
1021 	/* There is exactly one Rx error queue per network interface */
1022 	error = dpaa2_chan_setup_fq(dev, sc->channels[0], DPAA2_NI_QUEUE_RX_ERR);
1023 	if (error != 0) {
1024 		device_printf(dev, "%s: failed to prepare RxError queue: "
1025 		    "error=%d\n", __func__, error);
1026 		return (error);
1027 	}
1028 
1029 	return (0);
1030 }
1031 
1032 /**
1033  * @brief Bind DPNI to DPBPs, DPIOs, frame queues and channels.
1034  */
1035 static int
1036 dpaa2_ni_bind(device_t dev)
1037 {
1038 	device_t pdev = device_get_parent(dev);
1039 	device_t child = dev;
1040 	device_t bp_dev;
1041 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1042 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1043 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1044 	struct dpaa2_devinfo *bp_info;
1045 	struct dpaa2_cmd cmd;
1046 	struct dpaa2_ni_pools_cfg pools_cfg;
1047 	struct dpaa2_ni_err_cfg err_cfg;
1048 	struct dpaa2_channel *chan;
1049 	uint16_t rc_token, ni_token;
1050 	int error;
1051 
1052 	DPAA2_CMD_INIT(&cmd);
1053 
1054 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1055 	if (error) {
1056 		device_printf(dev, "%s: failed to open resource container: "
1057 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1058 		goto err_exit;
1059 	}
1060 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1061 	if (error) {
1062 		device_printf(dev, "%s: failed to open network interface: "
1063 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1064 		goto close_rc;
1065 	}
1066 
1067 	/* Select buffer pool (only one available at the moment). */
1068 	bp_dev = (device_t) rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
1069 	bp_info = device_get_ivars(bp_dev);
1070 
1071 	/* Configure buffers pool. */
1072 	pools_cfg.pools_num = 1;
1073 	pools_cfg.pools[0].bp_obj_id = bp_info->id;
1074 	pools_cfg.pools[0].backup_flag = 0;
1075 	pools_cfg.pools[0].buf_sz = sc->buf_sz;
1076 	error = DPAA2_CMD_NI_SET_POOLS(dev, child, &cmd, &pools_cfg);
1077 	if (error) {
1078 		device_printf(dev, "%s: failed to set buffer pools\n", __func__);
1079 		goto close_ni;
1080 	}
1081 
1082 	/* Setup ingress traffic distribution. */
1083 	error = dpaa2_ni_setup_rx_dist(dev);
1084 	if (error && error != EOPNOTSUPP) {
1085 		device_printf(dev, "%s: failed to setup ingress traffic "
1086 		    "distribution\n", __func__);
1087 		goto close_ni;
1088 	}
1089 	if (bootverbose && error == EOPNOTSUPP) {
1090 		device_printf(dev, "Ingress traffic distribution not "
1091 		    "supported\n");
1092 	}
1093 
1094 	/* Configure handling of error frames. */
1095 	err_cfg.err_mask = DPAA2_NI_FAS_RX_ERR_MASK;
1096 	err_cfg.set_err_fas = false;
1097 	err_cfg.action = DPAA2_NI_ERR_DISCARD;
1098 	error = DPAA2_CMD_NI_SET_ERR_BEHAVIOR(dev, child, &cmd, &err_cfg);
1099 	if (error) {
1100 		device_printf(dev, "%s: failed to set errors behavior\n",
1101 		    __func__);
1102 		goto close_ni;
1103 	}
1104 
1105 	/* Configure channel queues to generate CDANs. */
1106 	for (uint32_t i = 0; i < sc->chan_n; i++) {
1107 		chan = sc->channels[i];
1108 
1109 		/* Setup Rx flows. */
1110 		for (uint32_t j = 0; j < chan->rxq_n; j++) {
1111 			error = dpaa2_ni_setup_rx_flow(dev, &chan->rx_queues[j]);
1112 			if (error) {
1113 				device_printf(dev, "%s: failed to setup Rx "
1114 				    "flow: error=%d\n", __func__, error);
1115 				goto close_ni;
1116 			}
1117 		}
1118 
1119 		/* Setup Tx flow. */
1120 		error = dpaa2_ni_setup_tx_flow(dev, &chan->txc_queue);
1121 		if (error) {
1122 			device_printf(dev, "%s: failed to setup Tx "
1123 			    "flow: error=%d\n", __func__, error);
1124 			goto close_ni;
1125 		}
1126 	}
1127 
1128 	/* Configure RxError queue to generate CDAN. */
1129 	error = dpaa2_ni_setup_rx_err_flow(dev, &sc->rxe_queue);
1130 	if (error) {
1131 		device_printf(dev, "%s: failed to setup RxError flow: "
1132 		    "error=%d\n", __func__, error);
1133 		goto close_ni;
1134 	}
1135 
1136 	/*
1137 	 * Get the Queuing Destination ID (QDID) that should be used for frame
1138 	 * enqueue operations.
1139 	 */
1140 	error = DPAA2_CMD_NI_GET_QDID(dev, child, &cmd, DPAA2_NI_QUEUE_TX,
1141 	    &sc->tx_qdid);
1142 	if (error) {
1143 		device_printf(dev, "%s: failed to get Tx queuing destination "
1144 		    "ID\n", __func__);
1145 		goto close_ni;
1146 	}
1147 
1148 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1149 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1150 	return (0);
1151 
1152 close_ni:
1153 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1154 close_rc:
1155 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1156 err_exit:
1157 	return (error);
1158 }
1159 
1160 /**
1161  * @brief Setup ingress traffic distribution.
1162  *
1163  * NOTE: Ingress traffic distribution is valid only when DPNI_OPT_NO_FS option
1164  *	 hasn't been set for DPNI and a number of DPNI queues > 1.
1165  */
1166 static int
1167 dpaa2_ni_setup_rx_dist(device_t dev)
1168 {
1169 	/*
1170 	 * Have the interface implicitly distribute traffic based on the default
1171 	 * hash key.
1172 	 */
1173 	return (dpaa2_ni_set_hash(dev, DPAA2_RXH_DEFAULT));
1174 }
1175 
1176 static int
1177 dpaa2_ni_setup_rx_flow(device_t dev, struct dpaa2_ni_fq *fq)
1178 {
1179 	device_t pdev = device_get_parent(dev);
1180 	device_t child = dev;
1181 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1182 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1183 	struct dpaa2_devinfo *con_info;
1184 	struct dpaa2_cmd cmd;
1185 	struct dpaa2_ni_queue_cfg queue_cfg = {0};
1186 	uint16_t rc_token, ni_token;
1187 	int error;
1188 
1189 	DPAA2_CMD_INIT(&cmd);
1190 
1191 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1192 	if (error) {
1193 		device_printf(dev, "%s: failed to open resource container: "
1194 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1195 		goto err_exit;
1196 	}
1197 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1198 	if (error) {
1199 		device_printf(dev, "%s: failed to open network interface: "
1200 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1201 		goto close_rc;
1202 	}
1203 
1204 	/* Obtain DPCON associated with the FQ's channel. */
1205 	con_info = device_get_ivars(fq->chan->con_dev);
1206 
1207 	queue_cfg.type = DPAA2_NI_QUEUE_RX;
1208 	queue_cfg.tc = fq->tc;
1209 	queue_cfg.idx = fq->flowid;
1210 	error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1211 	if (error) {
1212 		device_printf(dev, "%s: failed to obtain Rx queue "
1213 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1214 		    queue_cfg.idx);
1215 		goto close_ni;
1216 	}
1217 
1218 	fq->fqid = queue_cfg.fqid;
1219 
1220 	queue_cfg.dest_id = con_info->id;
1221 	queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1222 	queue_cfg.priority = 1;
1223 	queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1224 	queue_cfg.options =
1225 	    DPAA2_NI_QUEUE_OPT_USER_CTX |
1226 	    DPAA2_NI_QUEUE_OPT_DEST;
1227 	error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
1228 	if (error) {
1229 		device_printf(dev, "%s: failed to update Rx queue "
1230 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1231 		    queue_cfg.idx);
1232 		goto close_ni;
1233 	}
1234 
1235 	if (bootverbose) {
1236 		device_printf(dev, "RX queue idx=%d, tc=%d, chan=%d, fqid=%d, "
1237 		    "user_ctx=%#jx\n", fq->flowid, fq->tc, fq->chan->id,
1238 		    fq->fqid, (uint64_t) fq);
1239 	}
1240 
1241 	(void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
1242 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1243 	return (0);
1244 
1245 close_ni:
1246 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1247 close_rc:
1248 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1249 err_exit:
1250 	return (error);
1251 }
1252 
1253 static int
1254 dpaa2_ni_setup_tx_flow(device_t dev, struct dpaa2_ni_fq *fq)
1255 {
1256 	device_t pdev = device_get_parent(dev);
1257 	device_t child = dev;
1258 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1259 	struct dpaa2_channel *ch = fq->chan;
1260 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1261 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1262 	struct dpaa2_devinfo *con_info;
1263 	struct dpaa2_ni_queue_cfg queue_cfg = {0};
1264 	struct dpaa2_ni_tx_ring *tx;
1265 	struct dpaa2_buf *buf;
1266 	struct dpaa2_cmd cmd;
1267 	uint32_t tx_rings_n = 0;
1268 	uint16_t rc_token, ni_token;
1269 	int error;
1270 
1271 	DPAA2_CMD_INIT(&cmd);
1272 
1273 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1274 	if (error) {
1275 		device_printf(dev, "%s: failed to open resource container: "
1276 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1277 		goto err_exit;
1278 	}
1279 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1280 	if (error) {
1281 		device_printf(dev, "%s: failed to open network interface: "
1282 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1283 		goto close_rc;
1284 	}
1285 
1286 	/* Obtain DPCON associated with the FQ's channel. */
1287 	con_info = device_get_ivars(fq->chan->con_dev);
1288 
1289 	KASSERT(sc->attr.num.tx_tcs <= DPAA2_MAX_TCS,
1290 	    ("%s: too many Tx traffic classes: tx_tcs=%d\n", __func__,
1291 	    sc->attr.num.tx_tcs));
1292 	KASSERT(DPAA2_NI_BUFS_PER_TX <= DPAA2_NI_MAX_BPTX,
1293 	    ("%s: too many Tx buffers (%d): max=%d\n", __func__,
1294 	    DPAA2_NI_BUFS_PER_TX, DPAA2_NI_MAX_BPTX));
1295 
1296 	/* Setup Tx rings. */
1297 	for (int i = 0; i < sc->attr.num.tx_tcs; i++) {
1298 		queue_cfg.type = DPAA2_NI_QUEUE_TX;
1299 		queue_cfg.tc = i;
1300 		queue_cfg.idx = fq->flowid;
1301 		queue_cfg.chan_id = fq->chan->id;
1302 
1303 		error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1304 		if (error) {
1305 			device_printf(dev, "%s: failed to obtain Tx queue "
1306 			    "configuration: tc=%d, flowid=%d\n", __func__,
1307 			    queue_cfg.tc, queue_cfg.idx);
1308 			goto close_ni;
1309 		}
1310 
1311 		tx = &fq->tx_rings[i];
1312 		tx->fq = fq;
1313 		tx->fqid = queue_cfg.fqid;
1314 		tx->txid = tx_rings_n;
1315 
1316 		if (bootverbose) {
1317 			device_printf(dev, "TX queue idx=%d, tc=%d, chan=%d, "
1318 			    "fqid=%d\n", fq->flowid, i, fq->chan->id,
1319 			    queue_cfg.fqid);
1320 		}
1321 
1322 		mtx_init(&tx->lock, "dpaa2_tx_ring", NULL, MTX_DEF);
1323 
1324 		/* Allocate Tx ring buffer. */
1325 		tx->br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF, M_NOWAIT,
1326 		    &tx->lock);
1327 		if (tx->br == NULL) {
1328 			device_printf(dev, "%s: failed to setup Tx ring buffer"
1329 			    " (2) fqid=%d\n", __func__, tx->fqid);
1330 			goto close_ni;
1331 		}
1332 
1333 		/* Configure Tx buffers */
1334 		for (uint64_t j = 0; j < DPAA2_NI_BUFS_PER_TX; j++) {
1335 			buf = malloc(sizeof(struct dpaa2_buf), M_DPAA2_TXB,
1336 			    M_WAITOK);
1337 			/* Keep DMA tag and Tx ring linked to the buffer */
1338 			DPAA2_BUF_INIT_TAGOPT(buf, ch->tx_dmat, tx);
1339 
1340 			buf->sgt = malloc(sizeof(struct dpaa2_buf), M_DPAA2_TXB,
1341 			    M_WAITOK);
1342 			/* Link SGT to DMA tag and back to its Tx buffer */
1343 			DPAA2_BUF_INIT_TAGOPT(buf->sgt, ch->sgt_dmat, buf);
1344 
1345 			error = dpaa2_buf_seed_txb(dev, buf);
1346 
1347 			/* Add Tx buffer to the ring */
1348 			buf_ring_enqueue(tx->br, buf);
1349 		}
1350 
1351 		tx_rings_n++;
1352 	}
1353 
1354 	/* All Tx queues which belong to the same flowid have the same qdbin. */
1355 	fq->tx_qdbin = queue_cfg.qdbin;
1356 
1357 	queue_cfg.type = DPAA2_NI_QUEUE_TX_CONF;
1358 	queue_cfg.tc = 0; /* ignored for TxConf queue */
1359 	queue_cfg.idx = fq->flowid;
1360 	error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1361 	if (error) {
1362 		device_printf(dev, "%s: failed to obtain TxConf queue "
1363 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1364 		    queue_cfg.idx);
1365 		goto close_ni;
1366 	}
1367 
1368 	fq->fqid = queue_cfg.fqid;
1369 
1370 	queue_cfg.dest_id = con_info->id;
1371 	queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1372 	queue_cfg.priority = 0;
1373 	queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1374 	queue_cfg.options =
1375 	    DPAA2_NI_QUEUE_OPT_USER_CTX |
1376 	    DPAA2_NI_QUEUE_OPT_DEST;
1377 	error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
1378 	if (error) {
1379 		device_printf(dev, "%s: failed to update TxConf queue "
1380 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1381 		    queue_cfg.idx);
1382 		goto close_ni;
1383 	}
1384 
1385 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1386 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1387 	return (0);
1388 
1389 close_ni:
1390 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1391 close_rc:
1392 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1393 err_exit:
1394 	return (error);
1395 }
1396 
1397 static int
1398 dpaa2_ni_setup_rx_err_flow(device_t dev, struct dpaa2_ni_fq *fq)
1399 {
1400 	device_t pdev = device_get_parent(dev);
1401 	device_t child = dev;
1402 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1403 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1404 	struct dpaa2_devinfo *con_info;
1405 	struct dpaa2_ni_queue_cfg queue_cfg = {0};
1406 	struct dpaa2_cmd cmd;
1407 	uint16_t rc_token, ni_token;
1408 	int error;
1409 
1410 	DPAA2_CMD_INIT(&cmd);
1411 
1412 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1413 	if (error) {
1414 		device_printf(dev, "%s: failed to open resource container: "
1415 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1416 		goto err_exit;
1417 	}
1418 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1419 	if (error) {
1420 		device_printf(dev, "%s: failed to open network interface: "
1421 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1422 		goto close_rc;
1423 	}
1424 
1425 	/* Obtain DPCON associated with the FQ's channel. */
1426 	con_info = device_get_ivars(fq->chan->con_dev);
1427 
1428 	queue_cfg.type = DPAA2_NI_QUEUE_RX_ERR;
1429 	queue_cfg.tc = fq->tc; /* ignored */
1430 	queue_cfg.idx = fq->flowid; /* ignored */
1431 	error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1432 	if (error) {
1433 		device_printf(dev, "%s: failed to obtain RxErr queue "
1434 		    "configuration\n", __func__);
1435 		goto close_ni;
1436 	}
1437 
1438 	fq->fqid = queue_cfg.fqid;
1439 
1440 	queue_cfg.dest_id = con_info->id;
1441 	queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1442 	queue_cfg.priority = 1;
1443 	queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1444 	queue_cfg.options =
1445 	    DPAA2_NI_QUEUE_OPT_USER_CTX |
1446 	    DPAA2_NI_QUEUE_OPT_DEST;
1447 	error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
1448 	if (error) {
1449 		device_printf(dev, "%s: failed to update RxErr queue "
1450 		    "configuration\n", __func__);
1451 		goto close_ni;
1452 	}
1453 
1454 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1455 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1456 	return (0);
1457 
1458 close_ni:
1459 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1460 close_rc:
1461 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1462 err_exit:
1463 	return (error);
1464 }
1465 
1466 /**
1467  * @brief Configure DPNI object to generate interrupts.
1468  */
1469 static int
1470 dpaa2_ni_setup_irqs(device_t dev)
1471 {
1472 	device_t pdev = device_get_parent(dev);
1473 	device_t child = dev;
1474 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1475 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1476 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1477 	struct dpaa2_cmd cmd;
1478 	uint16_t rc_token, ni_token;
1479 	int error;
1480 
1481 	DPAA2_CMD_INIT(&cmd);
1482 
1483 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1484 	if (error) {
1485 		device_printf(dev, "%s: failed to open resource container: "
1486 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1487 		goto err_exit;
1488 	}
1489 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1490 	if (error) {
1491 		device_printf(dev, "%s: failed to open network interface: "
1492 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1493 		goto close_rc;
1494 	}
1495 
1496 	/* Configure IRQs. */
1497 	error = dpaa2_ni_setup_msi(sc);
1498 	if (error) {
1499 		device_printf(dev, "%s: failed to allocate MSI\n", __func__);
1500 		goto close_ni;
1501 	}
1502 	if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1503 	    &sc->irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) {
1504 		device_printf(dev, "%s: failed to allocate IRQ resource\n",
1505 		    __func__);
1506 		goto close_ni;
1507 	}
1508 	if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
1509 	    NULL, dpaa2_ni_intr, sc, &sc->intr)) {
1510 		device_printf(dev, "%s: failed to setup IRQ resource\n",
1511 		    __func__);
1512 		goto close_ni;
1513 	}
1514 
1515 	error = DPAA2_CMD_NI_SET_IRQ_MASK(dev, child, &cmd, DPNI_IRQ_INDEX,
1516 	    DPNI_IRQ_LINK_CHANGED | DPNI_IRQ_EP_CHANGED);
1517 	if (error) {
1518 		device_printf(dev, "%s: failed to set DPNI IRQ mask\n",
1519 		    __func__);
1520 		goto close_ni;
1521 	}
1522 
1523 	error = DPAA2_CMD_NI_SET_IRQ_ENABLE(dev, child, &cmd, DPNI_IRQ_INDEX,
1524 	    true);
1525 	if (error) {
1526 		device_printf(dev, "%s: failed to enable DPNI IRQ\n", __func__);
1527 		goto close_ni;
1528 	}
1529 
1530 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1531 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1532 	return (0);
1533 
1534 close_ni:
1535 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1536 close_rc:
1537 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1538 err_exit:
1539 	return (error);
1540 }
1541 
1542 /**
1543  * @brief Allocate MSI interrupts for DPNI.
1544  */
1545 static int
1546 dpaa2_ni_setup_msi(struct dpaa2_ni_softc *sc)
1547 {
1548 	int val;
1549 
1550 	val = pci_msi_count(sc->dev);
1551 	if (val < DPAA2_NI_MSI_COUNT)
1552 		device_printf(sc->dev, "MSI: actual=%d, expected=%d\n", val,
1553 		    DPAA2_IO_MSI_COUNT);
1554 	val = MIN(val, DPAA2_NI_MSI_COUNT);
1555 
1556 	if (pci_alloc_msi(sc->dev, &val) != 0)
1557 		return (EINVAL);
1558 
1559 	for (int i = 0; i < val; i++)
1560 		sc->irq_rid[i] = i + 1;
1561 
1562 	return (0);
1563 }
1564 
1565 /**
1566  * @brief Update DPNI according to the updated interface capabilities.
1567  */
1568 static int
1569 dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *sc)
1570 {
1571 	const bool en_rxcsum = if_getcapenable(sc->ifp) & IFCAP_RXCSUM;
1572 	const bool en_txcsum = if_getcapenable(sc->ifp) & IFCAP_TXCSUM;
1573 	device_t pdev = device_get_parent(sc->dev);
1574 	device_t dev = sc->dev;
1575 	device_t child = dev;
1576 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1577 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1578 	struct dpaa2_cmd cmd;
1579 	uint16_t rc_token, ni_token;
1580 	int error;
1581 
1582 	DPAA2_CMD_INIT(&cmd);
1583 
1584 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1585 	if (error) {
1586 		device_printf(dev, "%s: failed to open resource container: "
1587 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1588 		goto err_exit;
1589 	}
1590 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1591 	if (error) {
1592 		device_printf(dev, "%s: failed to open network interface: "
1593 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1594 		goto close_rc;
1595 	}
1596 
1597 	/* Setup checksums validation. */
1598 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1599 	    DPAA2_NI_OFL_RX_L3_CSUM, en_rxcsum);
1600 	if (error) {
1601 		device_printf(dev, "%s: failed to %s L3 checksum validation\n",
1602 		    __func__, en_rxcsum ? "enable" : "disable");
1603 		goto close_ni;
1604 	}
1605 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1606 	    DPAA2_NI_OFL_RX_L4_CSUM, en_rxcsum);
1607 	if (error) {
1608 		device_printf(dev, "%s: failed to %s L4 checksum validation\n",
1609 		    __func__, en_rxcsum ? "enable" : "disable");
1610 		goto close_ni;
1611 	}
1612 
1613 	/* Setup checksums generation. */
1614 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1615 	    DPAA2_NI_OFL_TX_L3_CSUM, en_txcsum);
1616 	if (error) {
1617 		device_printf(dev, "%s: failed to %s L3 checksum generation\n",
1618 		    __func__, en_txcsum ? "enable" : "disable");
1619 		goto close_ni;
1620 	}
1621 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1622 	    DPAA2_NI_OFL_TX_L4_CSUM, en_txcsum);
1623 	if (error) {
1624 		device_printf(dev, "%s: failed to %s L4 checksum generation\n",
1625 		    __func__, en_txcsum ? "enable" : "disable");
1626 		goto close_ni;
1627 	}
1628 
1629 	(void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
1630 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1631 	return (0);
1632 
1633 close_ni:
1634 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1635 close_rc:
1636 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1637 err_exit:
1638 	return (error);
1639 }
1640 
1641 /**
1642  * @brief Update DPNI according to the updated interface flags.
1643  */
1644 static int
1645 dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *sc)
1646 {
1647 	const bool en_promisc = if_getflags(sc->ifp) & IFF_PROMISC;
1648 	const bool en_allmulti = if_getflags(sc->ifp) & IFF_ALLMULTI;
1649 	device_t pdev = device_get_parent(sc->dev);
1650 	device_t dev = sc->dev;
1651 	device_t child = dev;
1652 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1653 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1654 	struct dpaa2_cmd cmd;
1655 	uint16_t rc_token, ni_token;
1656 	int error;
1657 
1658 	DPAA2_CMD_INIT(&cmd);
1659 
1660 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1661 	if (error) {
1662 		device_printf(dev, "%s: failed to open resource container: "
1663 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1664 		goto err_exit;
1665 	}
1666 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1667 	if (error) {
1668 		device_printf(dev, "%s: failed to open network interface: "
1669 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1670 		goto close_rc;
1671 	}
1672 
1673 	error = DPAA2_CMD_NI_SET_MULTI_PROMISC(dev, child, &cmd,
1674 	    en_promisc ? true : en_allmulti);
1675 	if (error) {
1676 		device_printf(dev, "%s: failed to %s multicast promiscuous "
1677 		    "mode\n", __func__, en_allmulti ? "enable" : "disable");
1678 		goto close_ni;
1679 	}
1680 
1681 	error = DPAA2_CMD_NI_SET_UNI_PROMISC(dev, child, &cmd, en_promisc);
1682 	if (error) {
1683 		device_printf(dev, "%s: failed to %s unicast promiscuous mode\n",
1684 		    __func__, en_promisc ? "enable" : "disable");
1685 		goto close_ni;
1686 	}
1687 
1688 	(void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
1689 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1690 	return (0);
1691 
1692 close_ni:
1693 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1694 close_rc:
1695 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1696 err_exit:
1697 	return (error);
1698 }
1699 
1700 static int
1701 dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *sc)
1702 {
1703 	struct sysctl_ctx_list *ctx;
1704 	struct sysctl_oid *node, *node2;
1705 	struct sysctl_oid_list *parent, *parent2;
1706 	char cbuf[128];
1707 	int i;
1708 
1709 	ctx = device_get_sysctl_ctx(sc->dev);
1710 	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1711 
1712 	/* Add DPNI statistics. */
1713 	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
1714 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Statistics");
1715 	parent = SYSCTL_CHILDREN(node);
1716 	for (i = 0; i < DPAA2_NI_STAT_SYSCTLS; ++i) {
1717 		SYSCTL_ADD_PROC(ctx, parent, i, dpni_stat_sysctls[i].name,
1718 		    CTLTYPE_U64 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_stats,
1719 		    "IU", dpni_stat_sysctls[i].desc);
1720 	}
1721 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_anomaly_frames",
1722 	    CTLFLAG_RD, &sc->rx_anomaly_frames,
1723 	    "Rx frames in the buffers outside of the buffer pools");
1724 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_single_buf_frames",
1725 	    CTLFLAG_RD, &sc->rx_single_buf_frames,
1726 	    "Rx frames in single buffers");
1727 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_sg_buf_frames",
1728 	    CTLFLAG_RD, &sc->rx_sg_buf_frames,
1729 	    "Rx frames in scatter/gather list");
1730 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_enq_rej_frames",
1731 	    CTLFLAG_RD, &sc->rx_enq_rej_frames,
1732 	    "Enqueue rejected by QMan");
1733 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_ieoi_err_frames",
1734 	    CTLFLAG_RD, &sc->rx_ieoi_err_frames,
1735 	    "QMan IEOI error");
1736 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_single_buf_frames",
1737 	    CTLFLAG_RD, &sc->tx_single_buf_frames,
1738 	    "Tx single buffer frames");
1739 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_sg_frames",
1740 	    CTLFLAG_RD, &sc->tx_sg_frames,
1741 	    "Tx S/G frames");
1742 
1743 	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_num",
1744 	    CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_num,
1745 	    "IU", "number of Rx buffers in the buffer pool");
1746 	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_free",
1747 	    CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_free,
1748 	    "IU", "number of free Rx buffers in the buffer pool");
1749 
1750  	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1751 
1752 	/* Add channels statistics. */
1753 	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "channels",
1754 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channels");
1755 	parent = SYSCTL_CHILDREN(node);
1756 	for (int i = 0; i < sc->chan_n; i++) {
1757 		snprintf(cbuf, sizeof(cbuf), "%d", i);
1758 
1759 		node2 = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, cbuf,
1760 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channel");
1761 		parent2 = SYSCTL_CHILDREN(node2);
1762 
1763 		SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_frames",
1764 		    CTLFLAG_RD, &sc->channels[i]->tx_frames,
1765 		    "Tx frames counter");
1766 		SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_dropped",
1767 		    CTLFLAG_RD, &sc->channels[i]->tx_dropped,
1768 		    "Tx dropped counter");
1769 	}
1770 
1771 	return (0);
1772 }
1773 
1774 static int
1775 dpaa2_ni_setup_dma(struct dpaa2_ni_softc *sc)
1776 {
1777 	device_t dev = sc->dev;
1778 	int error;
1779 
1780 	KASSERT((sc->buf_align == BUF_ALIGN) || (sc->buf_align == BUF_ALIGN_V1),
1781 	    ("unexpected buffer alignment: %d\n", sc->buf_align));
1782 
1783 	/* DMA tag for Rx distribution key. */
1784 	error = bus_dma_tag_create(
1785 	    bus_get_dma_tag(dev),
1786 	    PAGE_SIZE, 0,		/* alignment, boundary */
1787 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
1788 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
1789 	    NULL, NULL,			/* filter, filterarg */
1790 	    DPAA2_CLASSIFIER_DMA_SIZE, 1, /* maxsize, nsegments */
1791 	    DPAA2_CLASSIFIER_DMA_SIZE, 0, /* maxsegsize, flags */
1792 	    NULL, NULL,			/* lockfunc, lockarg */
1793 	    &sc->rxd_dmat);
1794 	if (error) {
1795 		device_printf(dev, "%s: failed to create DMA tag for Rx "
1796 		    "distribution key\n", __func__);
1797 		return (error);
1798 	}
1799 
1800 	error = bus_dma_tag_create(
1801 	    bus_get_dma_tag(dev),
1802 	    PAGE_SIZE, 0,		/* alignment, boundary */
1803 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
1804 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
1805 	    NULL, NULL,			/* filter, filterarg */
1806 	    ETH_QOS_KCFG_BUF_SIZE, 1,	/* maxsize, nsegments */
1807 	    ETH_QOS_KCFG_BUF_SIZE, 0,	/* maxsegsize, flags */
1808 	    NULL, NULL,			/* lockfunc, lockarg */
1809 	    &sc->qos_dmat);
1810 	if (error) {
1811 		device_printf(dev, "%s: failed to create DMA tag for QoS key\n",
1812 		    __func__);
1813 		return (error);
1814 	}
1815 
1816 	return (0);
1817 }
1818 
1819 /**
1820  * @brief Configure buffer layouts of the different DPNI queues.
1821  */
1822 static int
1823 dpaa2_ni_set_buf_layout(device_t dev)
1824 {
1825 	device_t pdev = device_get_parent(dev);
1826 	device_t child = dev;
1827 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1828 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1829 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1830 	struct dpaa2_ni_buf_layout buf_layout = {0};
1831 	struct dpaa2_cmd cmd;
1832 	uint16_t rc_token, ni_token;
1833 	int error;
1834 
1835 	DPAA2_CMD_INIT(&cmd);
1836 
1837 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1838 	if (error) {
1839 		device_printf(dev, "%s: failed to open resource container: "
1840 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1841 		goto err_exit;
1842 	}
1843 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1844 	if (error) {
1845 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
1846 		    "error=%d\n", __func__, dinfo->id, error);
1847 		goto close_rc;
1848 	}
1849 
1850 	/*
1851 	 * Select Rx/Tx buffer alignment. It's necessary to ensure that the
1852 	 * buffer size seen by WRIOP is a multiple of 64 or 256 bytes depending
1853 	 * on the WRIOP version.
1854 	 */
1855 	sc->buf_align = (sc->attr.wriop_ver == WRIOP_VERSION(0, 0, 0) ||
1856 	    sc->attr.wriop_ver == WRIOP_VERSION(1, 0, 0))
1857 	    ? BUF_ALIGN_V1 : BUF_ALIGN;
1858 
1859 	/*
1860 	 * We need to ensure that the buffer size seen by WRIOP is a multiple
1861 	 * of 64 or 256 bytes depending on the WRIOP version.
1862 	 */
1863 	sc->buf_sz = ALIGN_DOWN(DPAA2_RX_BUF_SIZE, sc->buf_align);
1864 
1865 	if (bootverbose) {
1866 		device_printf(dev, "Rx/Tx buffers: size=%d, alignment=%d\n",
1867 		    sc->buf_sz, sc->buf_align);
1868 	}
1869 
1870 	/*
1871 	 *    Frame Descriptor       Tx buffer layout
1872 	 *
1873 	 *                ADDR -> |---------------------|
1874 	 *                        | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes
1875 	 *                        |---------------------|
1876 	 *                        | HW FRAME ANNOTATION | BUF_TX_HWA_SIZE bytes
1877 	 *                        |---------------------|
1878 	 *                        |    DATA HEADROOM    |
1879 	 *       ADDR + OFFSET -> |---------------------|
1880 	 *                        |                     |
1881 	 *                        |                     |
1882 	 *                        |     FRAME DATA      |
1883 	 *                        |                     |
1884 	 *                        |                     |
1885 	 *                        |---------------------|
1886 	 *                        |    DATA TAILROOM    |
1887 	 *                        |---------------------|
1888 	 *
1889 	 * NOTE: It's for a single buffer frame only.
1890 	 */
1891 	buf_layout.queue_type = DPAA2_NI_QUEUE_TX;
1892 	buf_layout.pd_size = BUF_SWA_SIZE;
1893 	buf_layout.pass_timestamp = true;
1894 	buf_layout.pass_frame_status = true;
1895 	buf_layout.options =
1896 	    BUF_LOPT_PRIV_DATA_SZ |
1897 	    BUF_LOPT_TIMESTAMP | /* requires 128 bytes in HWA */
1898 	    BUF_LOPT_FRAME_STATUS;
1899 	error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
1900 	if (error) {
1901 		device_printf(dev, "%s: failed to set Tx buffer layout\n",
1902 		    __func__);
1903 		goto close_ni;
1904 	}
1905 
1906 	/* Tx-confirmation buffer layout */
1907 	buf_layout.queue_type = DPAA2_NI_QUEUE_TX_CONF;
1908 	buf_layout.options =
1909 	    BUF_LOPT_TIMESTAMP |
1910 	    BUF_LOPT_FRAME_STATUS;
1911 	error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
1912 	if (error) {
1913 		device_printf(dev, "%s: failed to set TxConf buffer layout\n",
1914 		    __func__);
1915 		goto close_ni;
1916 	}
1917 
1918 	/*
1919 	 * Driver should reserve the amount of space indicated by this command
1920 	 * as headroom in all Tx frames.
1921 	 */
1922 	error = DPAA2_CMD_NI_GET_TX_DATA_OFF(dev, child, &cmd, &sc->tx_data_off);
1923 	if (error) {
1924 		device_printf(dev, "%s: failed to obtain Tx data offset\n",
1925 		    __func__);
1926 		goto close_ni;
1927 	}
1928 
1929 	if (bootverbose) {
1930 		device_printf(dev, "Tx data offset=%d\n", sc->tx_data_off);
1931 	}
1932 	if ((sc->tx_data_off % 64) != 0) {
1933 		device_printf(dev, "Tx data offset (%d) is not a multiplication "
1934 		    "of 64 bytes\n", sc->tx_data_off);
1935 	}
1936 
1937 	/*
1938 	 *    Frame Descriptor       Rx buffer layout
1939 	 *
1940 	 *                ADDR -> |---------------------|
1941 	 *                        | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes
1942 	 *                        |---------------------|
1943 	 *                        | HW FRAME ANNOTATION | BUF_RX_HWA_SIZE bytes
1944 	 *                        |---------------------|
1945 	 *                        |    DATA HEADROOM    | OFFSET-BUF_RX_HWA_SIZE
1946 	 *       ADDR + OFFSET -> |---------------------|
1947 	 *                        |                     |
1948 	 *                        |                     |
1949 	 *                        |     FRAME DATA      |
1950 	 *                        |                     |
1951 	 *                        |                     |
1952 	 *                        |---------------------|
1953 	 *                        |    DATA TAILROOM    | 0 bytes
1954 	 *                        |---------------------|
1955 	 *
1956 	 * NOTE: It's for a single buffer frame only.
1957 	 */
1958 	buf_layout.queue_type = DPAA2_NI_QUEUE_RX;
1959 	buf_layout.pd_size = BUF_SWA_SIZE;
1960 	buf_layout.fd_align = sc->buf_align;
1961 	buf_layout.head_size = sc->tx_data_off - BUF_RX_HWA_SIZE - BUF_SWA_SIZE;
1962 	buf_layout.tail_size = 0;
1963 	buf_layout.pass_frame_status = true;
1964 	buf_layout.pass_parser_result = true;
1965 	buf_layout.pass_timestamp = true;
1966 	buf_layout.options =
1967 	    BUF_LOPT_PRIV_DATA_SZ |
1968 	    BUF_LOPT_DATA_ALIGN |
1969 	    BUF_LOPT_DATA_HEAD_ROOM |
1970 	    BUF_LOPT_DATA_TAIL_ROOM |
1971 	    BUF_LOPT_FRAME_STATUS |
1972 	    BUF_LOPT_PARSER_RESULT |
1973 	    BUF_LOPT_TIMESTAMP;
1974 	error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
1975 	if (error) {
1976 		device_printf(dev, "%s: failed to set Rx buffer layout\n",
1977 		    __func__);
1978 		goto close_ni;
1979 	}
1980 
1981 	error = 0;
1982 close_ni:
1983 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1984 close_rc:
1985 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1986 err_exit:
1987 	return (error);
1988 }
1989 
1990 /**
1991  * @brief Enable Rx/Tx pause frames.
1992  *
1993  * NOTE: DPNI stops sending when a pause frame is received (Rx frame) or DPNI
1994  *       itself generates pause frames (Tx frame).
1995  */
1996 static int
1997 dpaa2_ni_set_pause_frame(device_t dev)
1998 {
1999 	device_t pdev = device_get_parent(dev);
2000 	device_t child = dev;
2001 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2002 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2003 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2004 	struct dpaa2_ni_link_cfg link_cfg = {0};
2005 	struct dpaa2_cmd cmd;
2006 	uint16_t rc_token, ni_token;
2007 	int error;
2008 
2009 	DPAA2_CMD_INIT(&cmd);
2010 
2011 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2012 	if (error) {
2013 		device_printf(dev, "%s: failed to open resource container: "
2014 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2015 		goto err_exit;
2016 	}
2017 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2018 	if (error) {
2019 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2020 		    "error=%d\n", __func__, dinfo->id, error);
2021 		goto close_rc;
2022 	}
2023 
2024 	error = DPAA2_CMD_NI_GET_LINK_CFG(dev, child, &cmd, &link_cfg);
2025 	if (error) {
2026 		device_printf(dev, "%s: failed to obtain link configuration: "
2027 		    "error=%d\n", __func__, error);
2028 		goto close_ni;
2029 	}
2030 
2031 	/* Enable both Rx and Tx pause frames by default. */
2032 	link_cfg.options |= DPAA2_NI_LINK_OPT_PAUSE;
2033 	link_cfg.options &= ~DPAA2_NI_LINK_OPT_ASYM_PAUSE;
2034 
2035 	error = DPAA2_CMD_NI_SET_LINK_CFG(dev, child, &cmd, &link_cfg);
2036 	if (error) {
2037 		device_printf(dev, "%s: failed to set link configuration: "
2038 		    "error=%d\n", __func__, error);
2039 		goto close_ni;
2040 	}
2041 
2042 	sc->link_options = link_cfg.options;
2043 	error = 0;
2044 close_ni:
2045 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2046 close_rc:
2047 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2048 err_exit:
2049 	return (error);
2050 }
2051 
2052 /**
2053  * @brief Configure QoS table to determine the traffic class for the received
2054  * frame.
2055  */
2056 static int
2057 dpaa2_ni_set_qos_table(device_t dev)
2058 {
2059 	device_t pdev = device_get_parent(dev);
2060 	device_t child = dev;
2061 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2062 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2063 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2064 	struct dpaa2_ni_qos_table tbl;
2065 	struct dpaa2_buf *buf = &sc->qos_kcfg;
2066 	struct dpaa2_cmd cmd;
2067 	uint16_t rc_token, ni_token;
2068 	int error;
2069 
2070 	if (sc->attr.num.rx_tcs == 1 ||
2071 	    !(sc->attr.options & DPNI_OPT_HAS_KEY_MASKING)) {
2072 		if (bootverbose) {
2073 			device_printf(dev, "Ingress traffic classification is "
2074 			    "not supported\n");
2075 		}
2076 		return (0);
2077 	}
2078 
2079 	/*
2080 	 * Allocate a buffer visible to the device to hold the QoS table key
2081 	 * configuration.
2082 	 */
2083 
2084 	if (__predict_true(buf->dmat == NULL)) {
2085 		buf->dmat = sc->qos_dmat;
2086 	}
2087 
2088 	error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr,
2089 	    BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap);
2090 	if (error) {
2091 		device_printf(dev, "%s: failed to allocate a buffer for QoS key "
2092 		    "configuration\n", __func__);
2093 		goto err_exit;
2094 	}
2095 
2096 	error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr,
2097 	    ETH_QOS_KCFG_BUF_SIZE, dpaa2_dmamap_oneseg_cb, &buf->paddr,
2098 	    BUS_DMA_NOWAIT);
2099 	if (error) {
2100 		device_printf(dev, "%s: failed to map QoS key configuration "
2101 		    "buffer into bus space\n", __func__);
2102 		goto err_exit;
2103 	}
2104 
2105 	DPAA2_CMD_INIT(&cmd);
2106 
2107 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2108 	if (error) {
2109 		device_printf(dev, "%s: failed to open resource container: "
2110 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2111 		goto err_exit;
2112 	}
2113 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2114 	if (error) {
2115 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2116 		    "error=%d\n", __func__, dinfo->id, error);
2117 		goto close_rc;
2118 	}
2119 
2120 	tbl.default_tc = 0;
2121 	tbl.discard_on_miss = false;
2122 	tbl.keep_entries = false;
2123 	tbl.kcfg_busaddr = buf->paddr;
2124 	error = DPAA2_CMD_NI_SET_QOS_TABLE(dev, child, &cmd, &tbl);
2125 	if (error) {
2126 		device_printf(dev, "%s: failed to set QoS table\n", __func__);
2127 		goto close_ni;
2128 	}
2129 
2130 	error = DPAA2_CMD_NI_CLEAR_QOS_TABLE(dev, child, &cmd);
2131 	if (error) {
2132 		device_printf(dev, "%s: failed to clear QoS table\n", __func__);
2133 		goto close_ni;
2134 	}
2135 
2136 	error = 0;
2137 close_ni:
2138 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2139 close_rc:
2140 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2141 err_exit:
2142 	return (error);
2143 }
2144 
2145 static int
2146 dpaa2_ni_set_mac_addr(device_t dev)
2147 {
2148 	device_t pdev = device_get_parent(dev);
2149 	device_t child = dev;
2150 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2151 	if_t ifp = sc->ifp;
2152 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2153 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2154 	struct dpaa2_cmd cmd;
2155 	struct ether_addr rnd_mac_addr;
2156 	uint16_t rc_token, ni_token;
2157 	uint8_t mac_addr[ETHER_ADDR_LEN];
2158 	uint8_t dpni_mac_addr[ETHER_ADDR_LEN];
2159 	int error;
2160 
2161 	DPAA2_CMD_INIT(&cmd);
2162 
2163 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2164 	if (error) {
2165 		device_printf(dev, "%s: failed to open resource container: "
2166 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2167 		goto err_exit;
2168 	}
2169 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2170 	if (error) {
2171 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2172 		    "error=%d\n", __func__, dinfo->id, error);
2173 		goto close_rc;
2174 	}
2175 
2176 	/*
2177 	 * Get the MAC address associated with the physical port, if the DPNI is
2178 	 * connected to a DPMAC directly associated with one of the physical
2179 	 * ports.
2180 	 */
2181 	error = DPAA2_CMD_NI_GET_PORT_MAC_ADDR(dev, child, &cmd, mac_addr);
2182 	if (error) {
2183 		device_printf(dev, "%s: failed to obtain the MAC address "
2184 		    "associated with the physical port\n", __func__);
2185 		goto close_ni;
2186 	}
2187 
2188 	/* Get primary MAC address from the DPNI attributes. */
2189 	error = DPAA2_CMD_NI_GET_PRIM_MAC_ADDR(dev, child, &cmd, dpni_mac_addr);
2190 	if (error) {
2191 		device_printf(dev, "%s: failed to obtain primary MAC address\n",
2192 		    __func__);
2193 		goto close_ni;
2194 	}
2195 
2196 	if (!ETHER_IS_ZERO(mac_addr)) {
2197 		/* Set MAC address of the physical port as DPNI's primary one. */
2198 		error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd,
2199 		    mac_addr);
2200 		if (error) {
2201 			device_printf(dev, "%s: failed to set primary MAC "
2202 			    "address\n", __func__);
2203 			goto close_ni;
2204 		}
2205 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2206 			sc->mac.addr[i] = mac_addr[i];
2207 		}
2208 	} else if (ETHER_IS_ZERO(dpni_mac_addr)) {
2209 		/* Generate random MAC address as DPNI's primary one. */
2210 		ether_gen_addr(ifp, &rnd_mac_addr);
2211 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2212 			mac_addr[i] = rnd_mac_addr.octet[i];
2213 		}
2214 
2215 		error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd,
2216 		    mac_addr);
2217 		if (error) {
2218 			device_printf(dev, "%s: failed to set random primary "
2219 			    "MAC address\n", __func__);
2220 			goto close_ni;
2221 		}
2222 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2223 			sc->mac.addr[i] = mac_addr[i];
2224 		}
2225 	} else {
2226 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2227 			sc->mac.addr[i] = dpni_mac_addr[i];
2228 		}
2229 	}
2230 
2231 	error = 0;
2232 close_ni:
2233 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2234 close_rc:
2235 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2236 err_exit:
2237 	return (error);
2238 }
2239 
2240 static void
2241 dpaa2_ni_miibus_statchg(device_t dev)
2242 {
2243 	device_t pdev = device_get_parent(dev);
2244 	device_t child = dev;
2245 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2246 	struct dpaa2_mac_link_state mac_link = { 0 };
2247 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2248 	struct dpaa2_cmd cmd;
2249 	uint16_t rc_token, mac_token;
2250 	int error, link_state;
2251 
2252 	if (sc->fixed_link || sc->mii == NULL) {
2253 		return;
2254 	}
2255 	if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0) {
2256 		/*
2257 		 * We will receive calls and adjust the changes but
2258 		 * not have setup everything (called before dpaa2_ni_init()
2259 		 * really).  This will then setup the link and internal
2260 		 * sc->link_state and not trigger the update once needed,
2261 		 * so basically dpmac never knows about it.
2262 		 */
2263 		return;
2264 	}
2265 
2266 	/*
2267 	 * Note: ifp link state will only be changed AFTER we are called so we
2268 	 * cannot rely on ifp->if_linkstate here.
2269 	 */
2270 	if (sc->mii->mii_media_status & IFM_AVALID) {
2271 		if (sc->mii->mii_media_status & IFM_ACTIVE) {
2272 			link_state = LINK_STATE_UP;
2273 		} else {
2274 			link_state = LINK_STATE_DOWN;
2275 		}
2276 	} else {
2277 		link_state = LINK_STATE_UNKNOWN;
2278 	}
2279 
2280 	if (link_state != sc->link_state) {
2281 		sc->link_state = link_state;
2282 
2283 		DPAA2_CMD_INIT(&cmd);
2284 
2285 		error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
2286 		    &rc_token);
2287 		if (error) {
2288 			device_printf(dev, "%s: failed to open resource "
2289 			    "container: id=%d, error=%d\n", __func__, rcinfo->id,
2290 			    error);
2291 			goto err_exit;
2292 		}
2293 		error = DPAA2_CMD_MAC_OPEN(dev, child, &cmd, sc->mac.dpmac_id,
2294 		    &mac_token);
2295 		if (error) {
2296 			device_printf(sc->dev, "%s: failed to open DPMAC: "
2297 			    "id=%d, error=%d\n", __func__, sc->mac.dpmac_id,
2298 			    error);
2299 			goto close_rc;
2300 		}
2301 
2302 		if (link_state == LINK_STATE_UP ||
2303 		    link_state == LINK_STATE_DOWN) {
2304 			/* Update DPMAC link state. */
2305 			mac_link.supported = sc->mii->mii_media.ifm_media;
2306 			mac_link.advert = sc->mii->mii_media.ifm_media;
2307 			mac_link.rate = 1000; /* TODO: Where to get from? */	/* ifmedia_baudrate? */
2308 			mac_link.options =
2309 			    DPAA2_MAC_LINK_OPT_AUTONEG |
2310 			    DPAA2_MAC_LINK_OPT_PAUSE;
2311 			mac_link.up = (link_state == LINK_STATE_UP) ? true : false;
2312 			mac_link.state_valid = true;
2313 
2314 			/* Inform DPMAC about link state. */
2315 			error = DPAA2_CMD_MAC_SET_LINK_STATE(dev, child, &cmd,
2316 			    &mac_link);
2317 			if (error) {
2318 				device_printf(sc->dev, "%s: failed to set DPMAC "
2319 				    "link state: id=%d, error=%d\n", __func__,
2320 				    sc->mac.dpmac_id, error);
2321 			}
2322 		}
2323 		(void)DPAA2_CMD_MAC_CLOSE(dev, child, &cmd);
2324 		(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2325 		    rc_token));
2326 	}
2327 
2328 	return;
2329 
2330 close_rc:
2331 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2332 err_exit:
2333 	return;
2334 }
2335 
2336 /**
2337  * @brief Callback function to process media change request.
2338  */
2339 static int
2340 dpaa2_ni_media_change_locked(struct dpaa2_ni_softc *sc)
2341 {
2342 
2343 	DPNI_LOCK_ASSERT(sc);
2344 	if (sc->mii) {
2345 		mii_mediachg(sc->mii);
2346 		sc->media_status = sc->mii->mii_media.ifm_media;
2347 	} else if (sc->fixed_link) {
2348 		if_printf(sc->ifp, "%s: can't change media in fixed mode\n",
2349 		    __func__);
2350 	}
2351 
2352 	return (0);
2353 }
2354 
2355 static int
2356 dpaa2_ni_media_change(if_t ifp)
2357 {
2358 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2359 	int error;
2360 
2361 	DPNI_LOCK(sc);
2362 	error = dpaa2_ni_media_change_locked(sc);
2363 	DPNI_UNLOCK(sc);
2364 	return (error);
2365 }
2366 
2367 /**
2368  * @brief Callback function to process media status request.
2369  */
2370 static void
2371 dpaa2_ni_media_status(if_t ifp, struct ifmediareq *ifmr)
2372 {
2373 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2374 
2375 	DPNI_LOCK(sc);
2376 	if (sc->mii) {
2377 		mii_pollstat(sc->mii);
2378 		ifmr->ifm_active = sc->mii->mii_media_active;
2379 		ifmr->ifm_status = sc->mii->mii_media_status;
2380 	}
2381 	DPNI_UNLOCK(sc);
2382 }
2383 
2384 /**
2385  * @brief Callout function to check and update media status.
2386  */
2387 static void
2388 dpaa2_ni_media_tick(void *arg)
2389 {
2390 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2391 
2392 	/* Check for media type change */
2393 	if (sc->mii) {
2394 		mii_tick(sc->mii);
2395 		if (sc->media_status != sc->mii->mii_media.ifm_media) {
2396 			printf("%s: media type changed (ifm_media=%x)\n",
2397 			    __func__, sc->mii->mii_media.ifm_media);
2398 			dpaa2_ni_media_change(sc->ifp);
2399 		}
2400 	}
2401 
2402 	/* Schedule another timeout one second from now */
2403 	callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
2404 }
2405 
2406 static void
2407 dpaa2_ni_init(void *arg)
2408 {
2409 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2410 	if_t ifp = sc->ifp;
2411 	device_t pdev = device_get_parent(sc->dev);
2412 	device_t dev = sc->dev;
2413 	device_t child = dev;
2414 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2415 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2416 	struct dpaa2_cmd cmd;
2417 	uint16_t rc_token, ni_token;
2418 	int error;
2419 
2420 	DPNI_LOCK(sc);
2421 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2422 		DPNI_UNLOCK(sc);
2423 		return;
2424 	}
2425 	DPNI_UNLOCK(sc);
2426 
2427 	DPAA2_CMD_INIT(&cmd);
2428 
2429 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2430 	if (error) {
2431 		device_printf(dev, "%s: failed to open resource container: "
2432 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2433 		goto err_exit;
2434 	}
2435 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2436 	if (error) {
2437 		device_printf(dev, "%s: failed to open network interface: "
2438 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
2439 		goto close_rc;
2440 	}
2441 
2442 	error = DPAA2_CMD_NI_ENABLE(dev, child, &cmd);
2443 	if (error) {
2444 		device_printf(dev, "%s: failed to enable DPNI: error=%d\n",
2445 		    __func__, error);
2446 	}
2447 
2448 	DPNI_LOCK(sc);
2449 	/* Announce we are up and running and can queue packets. */
2450 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2451 
2452 	if (sc->mii) {
2453 		/*
2454 		 * mii_mediachg() will trigger a call into
2455 		 * dpaa2_ni_miibus_statchg() to setup link state.
2456 		 */
2457 		dpaa2_ni_media_change_locked(sc);
2458 	}
2459 	callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
2460 
2461 	DPNI_UNLOCK(sc);
2462 
2463 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2464 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2465 	return;
2466 
2467 close_rc:
2468 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2469 err_exit:
2470 	return;
2471 }
2472 
2473 static int
2474 dpaa2_ni_transmit(if_t ifp, struct mbuf *m)
2475 {
2476 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2477 	struct dpaa2_channel *ch;
2478 	uint32_t fqid;
2479 	bool found = false;
2480 	int chidx = 0, error;
2481 
2482 	if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) {
2483 		return (0);
2484 	}
2485 
2486 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
2487 		fqid = m->m_pkthdr.flowid;
2488 		for (int i = 0; i < sc->chan_n; i++) {
2489 			ch = sc->channels[i];
2490 			for (int j = 0; j < ch->rxq_n; j++) {
2491 				if (fqid == ch->rx_queues[j].fqid) {
2492 					chidx = ch->flowid;
2493 					found = true;
2494 					break;
2495 				}
2496 			}
2497 			if (found) {
2498 				break;
2499 			}
2500 		}
2501 	}
2502 
2503 	ch = sc->channels[chidx];
2504 	error = buf_ring_enqueue(ch->xmit_br, m);
2505 	if (__predict_false(error != 0)) {
2506 		m_freem(m);
2507 	} else {
2508 		taskqueue_enqueue(ch->cleanup_tq, &ch->cleanup_task);
2509 	}
2510 
2511 	return (error);
2512 }
2513 
2514 static void
2515 dpaa2_ni_qflush(if_t ifp)
2516 {
2517 	/* TODO: Find a way to drain Tx queues in QBMan. */
2518 	if_qflush(ifp);
2519 }
2520 
2521 static int
2522 dpaa2_ni_ioctl(if_t ifp, u_long c, caddr_t data)
2523 {
2524 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2525 	struct ifreq *ifr = (struct ifreq *) data;
2526 	device_t pdev = device_get_parent(sc->dev);
2527 	device_t dev = sc->dev;
2528 	device_t child = dev;
2529 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2530 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2531 	struct dpaa2_cmd cmd;
2532 	uint32_t changed = 0;
2533 	uint16_t rc_token, ni_token;
2534 	int mtu, error, rc = 0;
2535 
2536 	DPAA2_CMD_INIT(&cmd);
2537 
2538 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2539 	if (error) {
2540 		device_printf(dev, "%s: failed to open resource container: "
2541 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2542 		goto err_exit;
2543 	}
2544 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2545 	if (error) {
2546 		device_printf(dev, "%s: failed to open network interface: "
2547 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
2548 		goto close_rc;
2549 	}
2550 
2551 	switch (c) {
2552 	case SIOCSIFMTU:
2553 		DPNI_LOCK(sc);
2554 		mtu = ifr->ifr_mtu;
2555 		if (mtu < ETHERMIN || mtu > ETHERMTU_JUMBO) {
2556 			DPNI_UNLOCK(sc);
2557 			error = EINVAL;
2558 			goto close_ni;
2559 		}
2560 		if_setmtu(ifp, mtu);
2561 		DPNI_UNLOCK(sc);
2562 
2563 		/* Update maximum frame length. */
2564 		error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd,
2565 		    mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2566 		if (error) {
2567 			device_printf(dev, "%s: failed to update maximum frame "
2568 			    "length: error=%d\n", __func__, error);
2569 			goto close_ni;
2570 		}
2571 		break;
2572 	case SIOCSIFCAP:
2573 		changed = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
2574 		if (changed & IFCAP_HWCSUM) {
2575 			if ((ifr->ifr_reqcap & changed) & IFCAP_HWCSUM) {
2576 				if_setcapenablebit(ifp, IFCAP_HWCSUM, 0);
2577 			} else {
2578 				if_setcapenablebit(ifp, 0, IFCAP_HWCSUM);
2579 			}
2580 		}
2581 		rc = dpaa2_ni_setup_if_caps(sc);
2582 		if (rc) {
2583 			printf("%s: failed to update iface capabilities: "
2584 			    "error=%d\n", __func__, rc);
2585 			rc = ENXIO;
2586 		}
2587 		break;
2588 	case SIOCSIFFLAGS:
2589 		DPNI_LOCK(sc);
2590 		if (if_getflags(ifp) & IFF_UP) {
2591 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2592 				changed = if_getflags(ifp) ^ sc->if_flags;
2593 				if (changed & IFF_PROMISC ||
2594 				    changed & IFF_ALLMULTI) {
2595 					rc = dpaa2_ni_setup_if_flags(sc);
2596 				}
2597 			} else {
2598 				DPNI_UNLOCK(sc);
2599 				dpaa2_ni_init(sc);
2600 				DPNI_LOCK(sc);
2601 			}
2602 		} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2603 			/* FIXME: Disable DPNI. See dpaa2_ni_init(). */
2604 		}
2605 
2606 		sc->if_flags = if_getflags(ifp);
2607 		DPNI_UNLOCK(sc);
2608 		break;
2609 	case SIOCADDMULTI:
2610 	case SIOCDELMULTI:
2611 		DPNI_LOCK(sc);
2612 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2613 			DPNI_UNLOCK(sc);
2614 			rc = dpaa2_ni_update_mac_filters(ifp);
2615 			if (rc) {
2616 				device_printf(dev, "%s: failed to update MAC "
2617 				    "filters: error=%d\n", __func__, rc);
2618 			}
2619 			DPNI_LOCK(sc);
2620 		}
2621 		DPNI_UNLOCK(sc);
2622 		break;
2623 	case SIOCGIFMEDIA:
2624 	case SIOCSIFMEDIA:
2625 		if (sc->mii)
2626 			rc = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, c);
2627 		else if(sc->fixed_link) {
2628 			rc = ifmedia_ioctl(ifp, ifr, &sc->fixed_ifmedia, c);
2629 		}
2630 		break;
2631 	default:
2632 		rc = ether_ioctl(ifp, c, data);
2633 		break;
2634 	}
2635 
2636 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2637 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2638 	return (rc);
2639 
2640 close_ni:
2641 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2642 close_rc:
2643 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2644 err_exit:
2645 	return (error);
2646 }
2647 
2648 static int
2649 dpaa2_ni_update_mac_filters(if_t ifp)
2650 {
2651 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2652 	struct dpaa2_ni_mcaddr_ctx ctx;
2653 	device_t pdev = device_get_parent(sc->dev);
2654 	device_t dev = sc->dev;
2655 	device_t child = dev;
2656 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2657 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2658 	struct dpaa2_cmd cmd;
2659 	uint16_t rc_token, ni_token;
2660 	int error;
2661 
2662 	DPAA2_CMD_INIT(&cmd);
2663 
2664 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2665 	if (error) {
2666 		device_printf(dev, "%s: failed to open resource container: "
2667 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2668 		goto err_exit;
2669 	}
2670 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2671 	if (error) {
2672 		device_printf(dev, "%s: failed to open network interface: "
2673 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
2674 		goto close_rc;
2675 	}
2676 
2677 	/* Remove all multicast MAC filters. */
2678 	error = DPAA2_CMD_NI_CLEAR_MAC_FILTERS(dev, child, &cmd, false, true);
2679 	if (error) {
2680 		device_printf(dev, "%s: failed to clear multicast MAC filters: "
2681 		    "error=%d\n", __func__, error);
2682 		goto close_ni;
2683 	}
2684 
2685 	ctx.ifp = ifp;
2686 	ctx.error = 0;
2687 	ctx.nent = 0;
2688 
2689 	if_foreach_llmaddr(ifp, dpaa2_ni_add_maddr, &ctx);
2690 
2691 	error = ctx.error;
2692 close_ni:
2693 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2694 close_rc:
2695 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2696 err_exit:
2697 	return (error);
2698 }
2699 
2700 static u_int
2701 dpaa2_ni_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2702 {
2703 	struct dpaa2_ni_mcaddr_ctx *ctx = arg;
2704 	struct dpaa2_ni_softc *sc = if_getsoftc(ctx->ifp);
2705 	device_t pdev = device_get_parent(sc->dev);
2706 	device_t dev = sc->dev;
2707 	device_t child = dev;
2708 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2709 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2710 	struct dpaa2_cmd cmd;
2711 	uint16_t rc_token, ni_token;
2712 	int error;
2713 
2714 	if (ctx->error != 0) {
2715 		return (0);
2716 	}
2717 
2718 	if (ETHER_IS_MULTICAST(LLADDR(sdl))) {
2719 		DPAA2_CMD_INIT(&cmd);
2720 
2721 		error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
2722 		    &rc_token);
2723 		if (error) {
2724 			device_printf(dev, "%s: failed to open resource "
2725 			    "container: id=%d, error=%d\n", __func__, rcinfo->id,
2726 			    error);
2727 			return (0);
2728 		}
2729 		error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id,
2730 		    &ni_token);
2731 		if (error) {
2732 			device_printf(dev, "%s: failed to open network interface: "
2733 			    "id=%d, error=%d\n", __func__, dinfo->id, error);
2734 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2735 			    rc_token));
2736 			return (0);
2737 		}
2738 
2739 		ctx->error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, &cmd,
2740 		    LLADDR(sdl));
2741 
2742 		(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2743 		    ni_token));
2744 		(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2745 		    rc_token));
2746 
2747 		if (ctx->error != 0) {
2748 			device_printf(dev, "%s: can't add more then %d MAC "
2749 			    "addresses, switching to the multicast promiscuous "
2750 			    "mode\n", __func__, ctx->nent);
2751 
2752 			/* Enable multicast promiscuous mode. */
2753 			DPNI_LOCK(sc);
2754 			if_setflagbits(ctx->ifp, IFF_ALLMULTI, 0);
2755 			sc->if_flags |= IFF_ALLMULTI;
2756 			ctx->error = dpaa2_ni_setup_if_flags(sc);
2757 			DPNI_UNLOCK(sc);
2758 
2759 			return (0);
2760 		}
2761 		ctx->nent++;
2762 	}
2763 
2764 	return (1);
2765 }
2766 
2767 static void
2768 dpaa2_ni_intr(void *arg)
2769 {
2770 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2771 	device_t pdev = device_get_parent(sc->dev);
2772 	device_t dev = sc->dev;
2773 	device_t child = dev;
2774 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2775 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2776 	struct dpaa2_cmd cmd;
2777 	uint32_t status = ~0u; /* clear all IRQ status bits */
2778 	uint16_t rc_token, ni_token;
2779 	int error;
2780 
2781 	DPAA2_CMD_INIT(&cmd);
2782 
2783 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2784 	if (error) {
2785 		device_printf(dev, "%s: failed to open resource container: "
2786 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2787 		goto err_exit;
2788 	}
2789 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2790 	if (error) {
2791 		device_printf(dev, "%s: failed to open network interface: "
2792 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
2793 		goto close_rc;
2794 	}
2795 
2796 	error = DPAA2_CMD_NI_GET_IRQ_STATUS(dev, child, &cmd, DPNI_IRQ_INDEX,
2797 	    &status);
2798 	if (error) {
2799 		device_printf(sc->dev, "%s: failed to obtain IRQ status: "
2800 		    "error=%d\n", __func__, error);
2801 	}
2802 
2803 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2804 close_rc:
2805 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2806 err_exit:
2807 	return;
2808 }
2809 
2810 /**
2811  * @brief Execute channel's Rx/Tx routines.
2812  *
2813  * NOTE: Should not be re-entrant for the same channel. It is achieved by
2814  *       enqueuing the cleanup routine on a single-threaded taskqueue.
2815  */
2816 static void
2817 dpaa2_ni_cleanup_task(void *arg, int count)
2818 {
2819 	struct dpaa2_channel *ch = (struct dpaa2_channel *)arg;
2820 	struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev);
2821 	int error, rxc, txc;
2822 
2823 	for (int i = 0; i < DPAA2_CLEAN_BUDGET; i++) {
2824 		rxc  = dpaa2_ni_rx_cleanup(ch);
2825 		txc  = dpaa2_ni_tx_cleanup(ch);
2826 
2827 		if (__predict_false((if_getdrvflags(sc->ifp) &
2828 		    IFF_DRV_RUNNING) == 0)) {
2829 			return;
2830 		}
2831 
2832 		if ((txc != DPAA2_TX_BUDGET) && (rxc != DPAA2_RX_BUDGET)) {
2833 			break;
2834 		}
2835 	}
2836 
2837 	/* Re-arm channel to generate CDAN */
2838 	error = DPAA2_SWP_CONF_WQ_CHANNEL(ch->io_dev, &ch->ctx);
2839 	if (error != 0) {
2840 		panic("%s: failed to rearm channel: chan_id=%d, error=%d\n",
2841 		    __func__, ch->id, error);
2842 	}
2843 }
2844 
2845 /**
2846  * @brief Poll frames from a specific channel when CDAN is received.
2847  */
2848 static int
2849 dpaa2_ni_rx_cleanup(struct dpaa2_channel *ch)
2850 {
2851 	struct dpaa2_io_softc *iosc = device_get_softc(ch->io_dev);
2852 	struct dpaa2_swp *swp = iosc->swp;
2853 	struct dpaa2_ni_fq *fq;
2854 	struct dpaa2_buf *buf = &ch->store;
2855 	int budget = DPAA2_RX_BUDGET;
2856 	int error, consumed = 0;
2857 
2858 	do {
2859 		error = dpaa2_swp_pull(swp, ch->id, buf, DPAA2_ETH_STORE_FRAMES);
2860 		if (error) {
2861 			device_printf(ch->ni_dev, "%s: failed to pull frames: "
2862 			    "chan_id=%d, error=%d\n", __func__, ch->id, error);
2863 			break;
2864 		}
2865 		error = dpaa2_ni_consume_frames(ch, &fq, &consumed);
2866 		if (error == ENOENT || error == EALREADY) {
2867 			break;
2868 		}
2869 		if (error == ETIMEDOUT) {
2870 			device_printf(ch->ni_dev, "%s: timeout to consume "
2871 			    "frames: chan_id=%d\n", __func__, ch->id);
2872 		}
2873 	} while (--budget);
2874 
2875 	return (DPAA2_RX_BUDGET - budget);
2876 }
2877 
2878 static int
2879 dpaa2_ni_tx_cleanup(struct dpaa2_channel *ch)
2880 {
2881 	struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev);
2882 	struct dpaa2_ni_tx_ring *tx = &ch->txc_queue.tx_rings[0];
2883 	struct mbuf *m = NULL;
2884 	int budget = DPAA2_TX_BUDGET;
2885 
2886 	do {
2887 		mtx_assert(&ch->xmit_mtx, MA_NOTOWNED);
2888 		mtx_lock(&ch->xmit_mtx);
2889 		m = buf_ring_dequeue_sc(ch->xmit_br);
2890 		mtx_unlock(&ch->xmit_mtx);
2891 
2892 		if (__predict_false(m == NULL)) {
2893 			/* TODO: Do not give up easily */
2894 			break;
2895 		} else {
2896 			dpaa2_ni_tx(sc, ch, tx, m);
2897 		}
2898 	} while (--budget);
2899 
2900 	return (DPAA2_TX_BUDGET - budget);
2901 }
2902 
2903 static void
2904 dpaa2_ni_tx(struct dpaa2_ni_softc *sc, struct dpaa2_channel *ch,
2905     struct dpaa2_ni_tx_ring *tx, struct mbuf *m)
2906 {
2907 	device_t dev = sc->dev;
2908 	struct dpaa2_ni_fq *fq = tx->fq;
2909 	struct dpaa2_buf *buf, *sgt;
2910 	struct dpaa2_fd fd;
2911 	struct mbuf *md;
2912 	bus_dma_segment_t segs[DPAA2_TX_SEGLIMIT];
2913 	int rc, nsegs;
2914 	int error;
2915 
2916 	mtx_assert(&tx->lock, MA_NOTOWNED);
2917 	mtx_lock(&tx->lock);
2918 	buf = buf_ring_dequeue_sc(tx->br);
2919 	mtx_unlock(&tx->lock);
2920 	if (__predict_false(buf == NULL)) {
2921 		/* TODO: Do not give up easily */
2922 		m_freem(m);
2923 		return;
2924 	} else {
2925 		DPAA2_BUF_ASSERT_TXREADY(buf);
2926 		buf->m = m;
2927 		sgt = buf->sgt;
2928 	}
2929 
2930 #if defined(INVARIANTS)
2931 	struct dpaa2_ni_tx_ring *btx = (struct dpaa2_ni_tx_ring *)buf->opt;
2932 	KASSERT(buf->opt == tx, ("%s: unexpected Tx ring", __func__));
2933 	KASSERT(btx->fq->chan == ch, ("%s: unexpected channel", __func__));
2934 #endif /* INVARIANTS */
2935 
2936 	error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, m, segs, &nsegs,
2937 	    BUS_DMA_NOWAIT);
2938 	if (__predict_false(error != 0)) {
2939 		/* Too many fragments, trying to defragment... */
2940 		md = m_collapse(m, M_NOWAIT, DPAA2_TX_SEGLIMIT);
2941 		if (md == NULL) {
2942 			device_printf(dev, "%s: m_collapse() failed\n", __func__);
2943 			fq->chan->tx_dropped++;
2944 			goto err;
2945 		}
2946 
2947 		buf->m = m = md;
2948 		error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, m, segs,
2949 		    &nsegs, BUS_DMA_NOWAIT);
2950 		if (__predict_false(error != 0)) {
2951 			device_printf(dev, "%s: bus_dmamap_load_mbuf_sg() "
2952 			    "failed: error=%d\n", __func__, error);
2953 			fq->chan->tx_dropped++;
2954 			goto err;
2955 		}
2956 	}
2957 
2958 	error = dpaa2_ni_build_fd(sc, tx, buf, segs, nsegs, &fd);
2959 	if (__predict_false(error != 0)) {
2960 		device_printf(dev, "%s: failed to build frame descriptor: "
2961 		    "error=%d\n", __func__, error);
2962 		fq->chan->tx_dropped++;
2963 		goto err_unload;
2964 	}
2965 
2966 	/* TODO: Enqueue several frames in a single command */
2967 	for (int i = 0; i < DPAA2_NI_ENQUEUE_RETRIES; i++) {
2968 		/* TODO: Return error codes instead of # of frames */
2969 		rc = DPAA2_SWP_ENQ_MULTIPLE_FQ(fq->chan->io_dev, tx->fqid, &fd, 1);
2970 		if (rc == 1) {
2971 			break;
2972 		}
2973 	}
2974 
2975 	bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_PREWRITE);
2976 	bus_dmamap_sync(sgt->dmat, sgt->dmap, BUS_DMASYNC_PREWRITE);
2977 
2978 	if (rc != 1) {
2979 		fq->chan->tx_dropped++;
2980 		goto err_unload;
2981 	} else {
2982 		fq->chan->tx_frames++;
2983 	}
2984 	return;
2985 
2986 err_unload:
2987 	bus_dmamap_unload(buf->dmat, buf->dmap);
2988 	if (sgt->paddr != 0) {
2989 		bus_dmamap_unload(sgt->dmat, sgt->dmap);
2990 	}
2991 err:
2992 	m_freem(buf->m);
2993 	buf_ring_enqueue(tx->br, buf);
2994 }
2995 
2996 static int
2997 dpaa2_ni_consume_frames(struct dpaa2_channel *chan, struct dpaa2_ni_fq **src,
2998     uint32_t *consumed)
2999 {
3000 	struct dpaa2_ni_fq *fq = NULL;
3001 	struct dpaa2_dq *dq;
3002 	struct dpaa2_fd *fd;
3003 	struct dpaa2_ni_rx_ctx ctx = {
3004 		.head = NULL,
3005 		.tail = NULL,
3006 		.cnt = 0,
3007 		.last = false
3008 	};
3009 	int rc, frames = 0;
3010 
3011 	do {
3012 		rc = dpaa2_chan_next_frame(chan, &dq);
3013 		if (rc == EINPROGRESS) {
3014 			if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
3015 				fd = &dq->fdr.fd;
3016 				fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
3017 
3018 				switch (fq->type) {
3019 				case DPAA2_NI_QUEUE_RX:
3020 					(void)dpaa2_ni_rx(chan, fq, fd, &ctx);
3021 					break;
3022 				case DPAA2_NI_QUEUE_RX_ERR:
3023 					(void)dpaa2_ni_rx_err(chan, fq, fd);
3024 					break;
3025 				case DPAA2_NI_QUEUE_TX_CONF:
3026 					(void)dpaa2_ni_tx_conf(chan, fq, fd);
3027 					break;
3028 				default:
3029 					panic("%s: unknown queue type (1)",
3030 					    __func__);
3031 				}
3032 				frames++;
3033 			}
3034 		} else if (rc == EALREADY || rc == ENOENT) {
3035 			if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
3036 				fd = &dq->fdr.fd;
3037 				fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
3038 
3039 				switch (fq->type) {
3040 				case DPAA2_NI_QUEUE_RX:
3041 					/*
3042 					 * Last VDQ response (mbuf) in a chain
3043 					 * obtained from the Rx queue.
3044 					 */
3045 					ctx.last = true;
3046 					(void)dpaa2_ni_rx(chan, fq, fd, &ctx);
3047 					break;
3048 				case DPAA2_NI_QUEUE_RX_ERR:
3049 					(void)dpaa2_ni_rx_err(chan, fq, fd);
3050 					break;
3051 				case DPAA2_NI_QUEUE_TX_CONF:
3052 					(void)dpaa2_ni_tx_conf(chan, fq, fd);
3053 					break;
3054 				default:
3055 					panic("%s: unknown queue type (2)",
3056 					    __func__);
3057 				}
3058 				frames++;
3059 			}
3060 			break;
3061 		} else {
3062 			panic("%s: should not reach here: rc=%d", __func__, rc);
3063 		}
3064 	} while (true);
3065 
3066 	KASSERT(chan->store_idx < chan->store_sz, ("%s: store_idx(%d) >= "
3067 	    "store_sz(%d)", __func__, chan->store_idx, chan->store_sz));
3068 
3069 	/*
3070 	 * VDQ operation pulls frames from a single queue into the store.
3071 	 * Return the frame queue and a number of consumed frames as an output.
3072 	 */
3073 	if (src != NULL) {
3074 		*src = fq;
3075 	}
3076 	if (consumed != NULL) {
3077 		*consumed = frames;
3078 	}
3079 
3080 	return (rc);
3081 }
3082 
3083 /**
3084  * @brief Receive frames.
3085  */
3086 static int
3087 dpaa2_ni_rx(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq, struct dpaa2_fd *fd,
3088     struct dpaa2_ni_rx_ctx *ctx)
3089 {
3090 	bus_addr_t paddr = (bus_addr_t)fd->addr;
3091 	struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr);
3092 	struct dpaa2_buf *buf = fa->buf;
3093 	struct dpaa2_channel *bch = (struct dpaa2_channel *)buf->opt;
3094 	struct dpaa2_ni_softc *sc = device_get_softc(bch->ni_dev);
3095 	struct dpaa2_bp_softc *bpsc;
3096 	struct mbuf *m;
3097 	device_t bpdev;
3098 	bus_addr_t released[DPAA2_SWP_BUFS_PER_CMD];
3099 	void *buf_data;
3100 	int buf_len, error, released_n = 0;
3101 
3102 	KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
3103 	/*
3104 	 * NOTE: Current channel might not be the same as the "buffer" channel
3105 	 * and it's fine. It must not be NULL though.
3106 	 */
3107 	KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__));
3108 
3109 	if (__predict_false(paddr != buf->paddr)) {
3110 		panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3111 		    __func__, paddr, buf->paddr);
3112 	}
3113 
3114 	switch (dpaa2_ni_fd_err(fd)) {
3115 	case 1: /* Enqueue rejected by QMan */
3116 		sc->rx_enq_rej_frames++;
3117 		break;
3118 	case 2: /* QMan IEOI error */
3119 		sc->rx_ieoi_err_frames++;
3120 		break;
3121 	default:
3122 		break;
3123 	}
3124 	switch (dpaa2_ni_fd_format(fd)) {
3125 	case DPAA2_FD_SINGLE:
3126 		sc->rx_single_buf_frames++;
3127 		break;
3128 	case DPAA2_FD_SG:
3129 		sc->rx_sg_buf_frames++;
3130 		break;
3131 	default:
3132 		break;
3133 	}
3134 
3135 	mtx_assert(&bch->dma_mtx, MA_NOTOWNED);
3136 	mtx_lock(&bch->dma_mtx);
3137 
3138 	bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_POSTREAD);
3139 	bus_dmamap_unload(buf->dmat, buf->dmap);
3140 	m = buf->m;
3141 	buf_len = dpaa2_ni_fd_data_len(fd);
3142 	buf_data = (uint8_t *)buf->vaddr + dpaa2_ni_fd_offset(fd);
3143 	/* Prepare buffer to be re-cycled */
3144 	buf->m = NULL;
3145 	buf->paddr = 0;
3146 	buf->vaddr = NULL;
3147 	buf->seg.ds_addr = 0;
3148 	buf->seg.ds_len = 0;
3149 	buf->nseg = 0;
3150 
3151 	mtx_unlock(&bch->dma_mtx);
3152 
3153 	m->m_flags |= M_PKTHDR;
3154 	m->m_data = buf_data;
3155 	m->m_len = buf_len;
3156 	m->m_pkthdr.len = buf_len;
3157 	m->m_pkthdr.rcvif = sc->ifp;
3158 	m->m_pkthdr.flowid = fq->fqid;
3159 	M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
3160 
3161 	if (ctx->head == NULL) {
3162 		KASSERT(ctx->tail == NULL, ("%s: tail already given?", __func__));
3163 		ctx->head = m;
3164 		ctx->tail = m;
3165 	} else {
3166 		KASSERT(ctx->head != NULL, ("%s: head is NULL", __func__));
3167 		ctx->tail->m_nextpkt = m;
3168 		ctx->tail = m;
3169 	}
3170 	ctx->cnt++;
3171 
3172 	if (ctx->last) {
3173 		ctx->tail->m_nextpkt = NULL;
3174 		if_input(sc->ifp, ctx->head);
3175 	}
3176 
3177 	/* Keep the buffer to be recycled */
3178 	ch->recycled[ch->recycled_n++] = buf;
3179 
3180 	/* Re-seed and release recycled buffers back to the pool */
3181 	if (ch->recycled_n == DPAA2_SWP_BUFS_PER_CMD) {
3182 		/* Release new buffers to the pool if needed */
3183 		taskqueue_enqueue(sc->bp_taskq, &ch->bp_task);
3184 
3185 		for (int i = 0; i < ch->recycled_n; i++) {
3186 			buf = ch->recycled[i];
3187 			bch = (struct dpaa2_channel *)buf->opt;
3188 
3189 			mtx_assert(&bch->dma_mtx, MA_NOTOWNED);
3190 			mtx_lock(&bch->dma_mtx);
3191 			error = dpaa2_buf_seed_rxb(sc->dev, buf,
3192 			    DPAA2_RX_BUF_SIZE, &bch->dma_mtx);
3193 			mtx_unlock(&bch->dma_mtx);
3194 
3195 			if (__predict_false(error != 0)) {
3196 				/* TODO: What else to do with the buffer? */
3197 				panic("%s: failed to recycle buffer: error=%d",
3198 				    __func__, error);
3199 			}
3200 
3201 			/* Prepare buffer to be released in a single command */
3202 			released[released_n++] = buf->paddr;
3203 		}
3204 
3205 		/* There's only one buffer pool for now */
3206 		bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
3207 		bpsc = device_get_softc(bpdev);
3208 
3209 		error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpsc->attr.bpid,
3210 		    released, released_n);
3211 		if (__predict_false(error != 0)) {
3212 			device_printf(sc->dev, "%s: failed to release buffers "
3213 			    "to the pool: error=%d\n", __func__, error);
3214 			return (error);
3215 		}
3216 		ch->recycled_n = 0;
3217 	}
3218 
3219 	return (0);
3220 }
3221 
3222 /**
3223  * @brief Receive Rx error frames.
3224  */
3225 static int
3226 dpaa2_ni_rx_err(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq,
3227     struct dpaa2_fd *fd)
3228 {
3229 	bus_addr_t paddr = (bus_addr_t)fd->addr;
3230 	struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr);
3231 	struct dpaa2_buf *buf = fa->buf;
3232 	struct dpaa2_channel *bch = (struct dpaa2_channel *)buf->opt;
3233 	struct dpaa2_ni_softc *sc = device_get_softc(bch->ni_dev);
3234 	device_t bpdev;
3235 	struct dpaa2_bp_softc *bpsc;
3236 	int error;
3237 
3238 	KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
3239 	/*
3240 	 * NOTE: Current channel might not be the same as the "buffer" channel
3241 	 * and it's fine. It must not be NULL though.
3242 	 */
3243 	KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__));
3244 
3245 	if (__predict_false(paddr != buf->paddr)) {
3246 		panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3247 		    __func__, paddr, buf->paddr);
3248 	}
3249 
3250 	/* There's only one buffer pool for now */
3251 	bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
3252 	bpsc = device_get_softc(bpdev);
3253 
3254 	/* Release buffer to QBMan buffer pool */
3255 	error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpsc->attr.bpid, &paddr, 1);
3256 	if (error != 0) {
3257 		device_printf(sc->dev, "%s: failed to release frame buffer to "
3258 		    "the pool: error=%d\n", __func__, error);
3259 		return (error);
3260 	}
3261 
3262 	return (0);
3263 }
3264 
3265 /**
3266  * @brief Receive Tx confirmation frames.
3267  */
3268 static int
3269 dpaa2_ni_tx_conf(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq,
3270     struct dpaa2_fd *fd)
3271 {
3272 	bus_addr_t paddr = (bus_addr_t)fd->addr;
3273 	struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr);
3274 	struct dpaa2_buf *buf = fa->buf;
3275 	struct dpaa2_buf *sgt = buf->sgt;
3276 	struct dpaa2_ni_tx_ring *tx = (struct dpaa2_ni_tx_ring *)buf->opt;
3277 	struct dpaa2_channel *bch = tx->fq->chan;
3278 
3279 	KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
3280 	KASSERT(tx != NULL, ("%s: Tx ring is NULL", __func__));
3281 	KASSERT(sgt != NULL, ("%s: S/G table is NULL", __func__));
3282 	/*
3283 	 * NOTE: Current channel might not be the same as the "buffer" channel
3284 	 * and it's fine. It must not be NULL though.
3285 	 */
3286 	KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__));
3287 
3288 	if (paddr != buf->paddr) {
3289 		panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3290 		    __func__, paddr, buf->paddr);
3291 	}
3292 
3293 	mtx_assert(&bch->dma_mtx, MA_NOTOWNED);
3294 	mtx_lock(&bch->dma_mtx);
3295 
3296 	bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_POSTWRITE);
3297 	bus_dmamap_sync(sgt->dmat, sgt->dmap, BUS_DMASYNC_POSTWRITE);
3298 	bus_dmamap_unload(buf->dmat, buf->dmap);
3299 	bus_dmamap_unload(sgt->dmat, sgt->dmap);
3300 	m_freem(buf->m);
3301 	buf->m = NULL;
3302 	buf->paddr = 0;
3303 	buf->vaddr = NULL;
3304 	sgt->paddr = 0;
3305 
3306 	mtx_unlock(&bch->dma_mtx);
3307 
3308 	/* Return Tx buffer back to the ring */
3309 	buf_ring_enqueue(tx->br, buf);
3310 
3311 	return (0);
3312 }
3313 
3314 /**
3315  * @brief Compare versions of the DPAA2 network interface API.
3316  */
3317 static int
3318 dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *sc, uint16_t major,
3319     uint16_t minor)
3320 {
3321 	if (sc->api_major == major) {
3322 		return sc->api_minor - minor;
3323 	}
3324 	return sc->api_major - major;
3325 }
3326 
3327 /**
3328  * @brief Build a DPAA2 frame descriptor.
3329  */
3330 static int
3331 dpaa2_ni_build_fd(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx,
3332     struct dpaa2_buf *buf, bus_dma_segment_t *segs, int nsegs, struct dpaa2_fd *fd)
3333 {
3334 	struct dpaa2_buf *sgt = buf->sgt;
3335 	struct dpaa2_sg_entry *sge;
3336 	struct dpaa2_fa *fa;
3337 	int i, error;
3338 
3339 	KASSERT(nsegs <= DPAA2_TX_SEGLIMIT, ("%s: too many segments", __func__));
3340 	KASSERT(buf->opt != NULL, ("%s: no Tx ring?", __func__));
3341 	KASSERT(sgt != NULL, ("%s: no S/G table?", __func__));
3342 	KASSERT(sgt->vaddr != NULL, ("%s: no S/G vaddr?", __func__));
3343 
3344 	memset(fd, 0, sizeof(*fd));
3345 
3346 	/* Populate and map S/G table */
3347 	if (__predict_true(nsegs <= DPAA2_TX_SEGLIMIT)) {
3348 		sge = (struct dpaa2_sg_entry *)sgt->vaddr + sc->tx_data_off;
3349 		for (i = 0; i < nsegs; i++) {
3350 			sge[i].addr = (uint64_t)segs[i].ds_addr;
3351 			sge[i].len = (uint32_t)segs[i].ds_len;
3352 			sge[i].offset_fmt = 0u;
3353 		}
3354 		sge[i-1].offset_fmt |= 0x8000u; /* set final entry flag */
3355 
3356 		KASSERT(sgt->paddr == 0, ("%s: paddr(%#jx) != 0", __func__,
3357 		    sgt->paddr));
3358 
3359 		error = bus_dmamap_load(sgt->dmat, sgt->dmap, sgt->vaddr,
3360 		    DPAA2_TX_SGT_SZ, dpaa2_dmamap_oneseg_cb, &sgt->paddr,
3361 		    BUS_DMA_NOWAIT);
3362 		if (__predict_false(error != 0)) {
3363 			device_printf(sc->dev, "%s: bus_dmamap_load() failed: "
3364 			    "error=%d\n", __func__, error);
3365 			return (error);
3366 		}
3367 
3368 		buf->paddr = sgt->paddr;
3369 		buf->vaddr = sgt->vaddr;
3370 		sc->tx_sg_frames++; /* for sysctl(9) */
3371 	} else {
3372 		return (EINVAL);
3373 	}
3374 
3375 	fa = (struct dpaa2_fa *)sgt->vaddr;
3376 	fa->magic = DPAA2_MAGIC;
3377 	fa->buf = buf;
3378 
3379 	fd->addr = buf->paddr;
3380 	fd->data_length = (uint32_t)buf->m->m_pkthdr.len;
3381 	fd->bpid_ivp_bmt = 0;
3382 	fd->offset_fmt_sl = 0x2000u | sc->tx_data_off;
3383 	fd->ctrl = 0x00800000u;
3384 
3385 	return (0);
3386 }
3387 
3388 static int
3389 dpaa2_ni_fd_err(struct dpaa2_fd *fd)
3390 {
3391 	return ((fd->ctrl >> DPAA2_NI_FD_ERR_SHIFT) & DPAA2_NI_FD_ERR_MASK);
3392 }
3393 
3394 static uint32_t
3395 dpaa2_ni_fd_data_len(struct dpaa2_fd *fd)
3396 {
3397 	if (dpaa2_ni_fd_short_len(fd)) {
3398 		return (fd->data_length & DPAA2_NI_FD_LEN_MASK);
3399 	}
3400 	return (fd->data_length);
3401 }
3402 
3403 static int
3404 dpaa2_ni_fd_format(struct dpaa2_fd *fd)
3405 {
3406 	return ((enum dpaa2_fd_format)((fd->offset_fmt_sl >>
3407 	    DPAA2_NI_FD_FMT_SHIFT) & DPAA2_NI_FD_FMT_MASK));
3408 }
3409 
3410 static bool
3411 dpaa2_ni_fd_short_len(struct dpaa2_fd *fd)
3412 {
3413 	return (((fd->offset_fmt_sl >> DPAA2_NI_FD_SL_SHIFT)
3414 	    & DPAA2_NI_FD_SL_MASK) == 1);
3415 }
3416 
3417 static int
3418 dpaa2_ni_fd_offset(struct dpaa2_fd *fd)
3419 {
3420 	return (fd->offset_fmt_sl & DPAA2_NI_FD_OFFSET_MASK);
3421 }
3422 
3423 /**
3424  * @brief Collect statistics of the network interface.
3425  */
3426 static int
3427 dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS)
3428 {
3429 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3430 	struct dpni_stat *stat = &dpni_stat_sysctls[oidp->oid_number];
3431 	device_t pdev = device_get_parent(sc->dev);
3432 	device_t dev = sc->dev;
3433 	device_t child = dev;
3434 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
3435 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
3436 	struct dpaa2_cmd cmd;
3437 	uint64_t cnt[DPAA2_NI_STAT_COUNTERS];
3438 	uint64_t result = 0;
3439 	uint16_t rc_token, ni_token;
3440 	int error;
3441 
3442 	DPAA2_CMD_INIT(&cmd);
3443 
3444 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
3445 	if (error) {
3446 		device_printf(dev, "%s: failed to open resource container: "
3447 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
3448 		goto exit;
3449 	}
3450 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
3451 	if (error) {
3452 		device_printf(dev, "%s: failed to open network interface: "
3453 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
3454 		goto close_rc;
3455 	}
3456 
3457 	error = DPAA2_CMD_NI_GET_STATISTICS(dev, child, &cmd, stat->page, 0, cnt);
3458 	if (!error) {
3459 		result = cnt[stat->cnt];
3460 	}
3461 
3462 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
3463 close_rc:
3464 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
3465 exit:
3466 	return (sysctl_handle_64(oidp, &result, 0, req));
3467 }
3468 
3469 static int
3470 dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS)
3471 {
3472 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3473 	uint32_t buf_num = DPAA2_ATOMIC_READ(&sc->buf_num);
3474 
3475 	return (sysctl_handle_32(oidp, &buf_num, 0, req));
3476 }
3477 
3478 static int
3479 dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS)
3480 {
3481 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3482 	uint32_t buf_free = DPAA2_ATOMIC_READ(&sc->buf_free);
3483 
3484 	return (sysctl_handle_32(oidp, &buf_free, 0, req));
3485 }
3486 
3487 static int
3488 dpaa2_ni_set_hash(device_t dev, uint64_t flags)
3489 {
3490 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
3491 	uint64_t key = 0;
3492 	int i;
3493 
3494 	if (!(sc->attr.num.queues > 1)) {
3495 		return (EOPNOTSUPP);
3496 	}
3497 
3498 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3499 		if (dist_fields[i].rxnfc_field & flags) {
3500 			key |= dist_fields[i].id;
3501 		}
3502 	}
3503 
3504 	return (dpaa2_ni_set_dist_key(dev, DPAA2_NI_DIST_MODE_HASH, key));
3505 }
3506 
3507 /**
3508  * @brief Set Rx distribution (hash or flow classification) key flags is a
3509  * combination of RXH_ bits.
3510  */
3511 static int
3512 dpaa2_ni_set_dist_key(device_t dev, enum dpaa2_ni_dist_mode type, uint64_t flags)
3513 {
3514 	device_t pdev = device_get_parent(dev);
3515 	device_t child = dev;
3516 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
3517 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
3518 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
3519 	struct dpkg_profile_cfg cls_cfg;
3520 	struct dpkg_extract *key;
3521 	struct dpaa2_buf *buf = &sc->rxd_kcfg;
3522 	struct dpaa2_cmd cmd;
3523 	uint16_t rc_token, ni_token;
3524 	int i, error = 0;
3525 
3526 	if (__predict_true(buf->dmat == NULL)) {
3527 		buf->dmat = sc->rxd_dmat;
3528 	}
3529 
3530 	memset(&cls_cfg, 0, sizeof(cls_cfg));
3531 
3532 	/* Configure extracts according to the given flags. */
3533 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3534 		key = &cls_cfg.extracts[cls_cfg.num_extracts];
3535 
3536 		if (!(flags & dist_fields[i].id)) {
3537 			continue;
3538 		}
3539 
3540 		if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
3541 			device_printf(dev, "%s: failed to add key extraction "
3542 			    "rule\n", __func__);
3543 			return (E2BIG);
3544 		}
3545 
3546 		key->type = DPKG_EXTRACT_FROM_HDR;
3547 		key->extract.from_hdr.prot = dist_fields[i].cls_prot;
3548 		key->extract.from_hdr.type = DPKG_FULL_FIELD;
3549 		key->extract.from_hdr.field = dist_fields[i].cls_field;
3550 		cls_cfg.num_extracts++;
3551 	}
3552 
3553 	error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr,
3554 	    BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap);
3555 	if (error != 0) {
3556 		device_printf(dev, "%s: failed to allocate a buffer for Rx "
3557 		    "traffic distribution key configuration\n", __func__);
3558 		return (error);
3559 	}
3560 
3561 	error = dpaa2_ni_prepare_key_cfg(&cls_cfg, (uint8_t *)buf->vaddr);
3562 	if (error != 0) {
3563 		device_printf(dev, "%s: failed to prepare key configuration: "
3564 		    "error=%d\n", __func__, error);
3565 		return (error);
3566 	}
3567 
3568 	/* Prepare for setting the Rx dist. */
3569 	error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr,
3570 	    DPAA2_CLASSIFIER_DMA_SIZE, dpaa2_dmamap_oneseg_cb, &buf->paddr,
3571 	    BUS_DMA_NOWAIT);
3572 	if (error != 0) {
3573 		device_printf(sc->dev, "%s: failed to map a buffer for Rx "
3574 		    "traffic distribution key configuration\n", __func__);
3575 		return (error);
3576 	}
3577 
3578 	if (type == DPAA2_NI_DIST_MODE_HASH) {
3579 		DPAA2_CMD_INIT(&cmd);
3580 
3581 		error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
3582 		    &rc_token);
3583 		if (error) {
3584 			device_printf(dev, "%s: failed to open resource "
3585 			    "container: id=%d, error=%d\n", __func__, rcinfo->id,
3586 			    error);
3587 			goto err_exit;
3588 		}
3589 		error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id,
3590 		    &ni_token);
3591 		if (error) {
3592 			device_printf(dev, "%s: failed to open network "
3593 			    "interface: id=%d, error=%d\n", __func__, dinfo->id,
3594 			    error);
3595 			goto close_rc;
3596 		}
3597 
3598 		error = DPAA2_CMD_NI_SET_RX_TC_DIST(dev, child, &cmd,
3599 		    sc->attr.num.queues, 0, DPAA2_NI_DIST_MODE_HASH, buf->paddr);
3600 		if (error != 0) {
3601 			device_printf(dev, "%s: failed to set distribution mode "
3602 			    "and size for the traffic class\n", __func__);
3603 		}
3604 
3605 		(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
3606 		    ni_token));
3607 close_rc:
3608 		(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
3609 		    rc_token));
3610 	}
3611 
3612 err_exit:
3613 	return (error);
3614 }
3615 
3616 /**
3617  * @brief Prepares extract parameters.
3618  *
3619  * cfg:		Defining a full Key Generation profile.
3620  * key_cfg_buf:	Zeroed 256 bytes of memory before mapping it to DMA.
3621  */
3622 static int
3623 dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf)
3624 {
3625 	struct dpni_ext_set_rx_tc_dist *dpni_ext;
3626 	struct dpni_dist_extract *extr;
3627 	int i, j;
3628 
3629 	if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
3630 		return (EINVAL);
3631 
3632 	dpni_ext = (struct dpni_ext_set_rx_tc_dist *) key_cfg_buf;
3633 	dpni_ext->num_extracts = cfg->num_extracts;
3634 
3635 	for (i = 0; i < cfg->num_extracts; i++) {
3636 		extr = &dpni_ext->extracts[i];
3637 
3638 		switch (cfg->extracts[i].type) {
3639 		case DPKG_EXTRACT_FROM_HDR:
3640 			extr->prot = cfg->extracts[i].extract.from_hdr.prot;
3641 			extr->efh_type =
3642 			    cfg->extracts[i].extract.from_hdr.type & 0x0Fu;
3643 			extr->size = cfg->extracts[i].extract.from_hdr.size;
3644 			extr->offset = cfg->extracts[i].extract.from_hdr.offset;
3645 			extr->field = cfg->extracts[i].extract.from_hdr.field;
3646 			extr->hdr_index =
3647 				cfg->extracts[i].extract.from_hdr.hdr_index;
3648 			break;
3649 		case DPKG_EXTRACT_FROM_DATA:
3650 			extr->size = cfg->extracts[i].extract.from_data.size;
3651 			extr->offset =
3652 				cfg->extracts[i].extract.from_data.offset;
3653 			break;
3654 		case DPKG_EXTRACT_FROM_PARSE:
3655 			extr->size = cfg->extracts[i].extract.from_parse.size;
3656 			extr->offset =
3657 				cfg->extracts[i].extract.from_parse.offset;
3658 			break;
3659 		default:
3660 			return (EINVAL);
3661 		}
3662 
3663 		extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
3664 		extr->extract_type = cfg->extracts[i].type & 0x0Fu;
3665 
3666 		for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
3667 			extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
3668 			extr->masks[j].offset =
3669 				cfg->extracts[i].masks[j].offset;
3670 		}
3671 	}
3672 
3673 	return (0);
3674 }
3675 
3676 static device_method_t dpaa2_ni_methods[] = {
3677 	/* Device interface */
3678 	DEVMETHOD(device_probe,		dpaa2_ni_probe),
3679 	DEVMETHOD(device_attach,	dpaa2_ni_attach),
3680 	DEVMETHOD(device_detach,	dpaa2_ni_detach),
3681 
3682 	/* mii via memac_mdio */
3683 	DEVMETHOD(miibus_statchg,	dpaa2_ni_miibus_statchg),
3684 
3685 	DEVMETHOD_END
3686 };
3687 
3688 static driver_t dpaa2_ni_driver = {
3689 	"dpaa2_ni",
3690 	dpaa2_ni_methods,
3691 	sizeof(struct dpaa2_ni_softc),
3692 };
3693 
3694 DRIVER_MODULE(miibus, dpaa2_ni, miibus_driver, 0, 0);
3695 DRIVER_MODULE(dpaa2_ni, dpaa2_rc, dpaa2_ni_driver, 0, 0);
3696 
3697 MODULE_DEPEND(dpaa2_ni, miibus, 1, 1, 1);
3698 #ifdef DEV_ACPI
3699 MODULE_DEPEND(dpaa2_ni, memac_mdio_acpi, 1, 1, 1);
3700 #endif
3701 #ifdef FDT
3702 MODULE_DEPEND(dpaa2_ni, memac_mdio_fdt, 1, 1, 1);
3703 #endif
3704