xref: /freebsd/sys/dev/dpaa2/dpaa2_ni.c (revision 397e83df75e0fcd0d3fcb95ae4d794cb7600fc89)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright © 2021-2023 Dmitry Salychev
5  * Copyright © 2022 Mathew McBride
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 /*
31  * The DPAA2 Network Interface (DPNI) driver.
32  *
33  * The DPNI object is a network interface that is configurable to support a wide
34  * range of features from a very basic Ethernet interface up to a
35  * high-functioning network interface. The DPNI supports features that are
36  * expected by standard network stacks, from basic features to offloads.
37  *
38  * DPNIs work with Ethernet traffic, starting with the L2 header. Additional
39  * functions are provided for standard network protocols (L2, L3, L4, etc.).
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/bus.h>
46 #include <sys/rman.h>
47 #include <sys/module.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <sys/mbuf.h>
54 #include <sys/taskqueue.h>
55 #include <sys/sysctl.h>
56 #include <sys/buf_ring.h>
57 #include <sys/smp.h>
58 #include <sys/proc.h>
59 
60 #include <vm/vm.h>
61 #include <vm/pmap.h>
62 
63 #include <machine/bus.h>
64 #include <machine/resource.h>
65 #include <machine/atomic.h>
66 #include <machine/vmparam.h>
67 
68 #include <net/ethernet.h>
69 #include <net/bpf.h>
70 #include <net/if.h>
71 #include <net/if_dl.h>
72 #include <net/if_media.h>
73 #include <net/if_types.h>
74 #include <net/if_var.h>
75 
76 #include <dev/pci/pcivar.h>
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
79 #include <dev/mdio/mdio.h>
80 
81 #include "opt_acpi.h"
82 #include "opt_platform.h"
83 
84 #include "pcib_if.h"
85 #include "pci_if.h"
86 #include "miibus_if.h"
87 #include "memac_mdio_if.h"
88 
89 #include "dpaa2_types.h"
90 #include "dpaa2_mc.h"
91 #include "dpaa2_mc_if.h"
92 #include "dpaa2_mcp.h"
93 #include "dpaa2_swp.h"
94 #include "dpaa2_swp_if.h"
95 #include "dpaa2_cmd_if.h"
96 #include "dpaa2_ni.h"
97 #include "dpaa2_channel.h"
98 #include "dpaa2_buf.h"
99 
100 #define BIT(x)			(1ul << (x))
101 #define WRIOP_VERSION(x, y, z)	((x) << 10 | (y) << 5 | (z) << 0)
102 #define ARRAY_SIZE(a)		(sizeof(a) / sizeof((a)[0]))
103 
104 /* Frame Dequeue Response status bits. */
105 #define IS_NULL_RESPONSE(stat)	((((stat) >> 4) & 1) == 0)
106 
107 #define	ALIGN_UP(x, y)		roundup2((x), (y))
108 #define	ALIGN_DOWN(x, y)	rounddown2((x), (y))
109 #define CACHE_LINE_ALIGN(x)	ALIGN_UP((x), CACHE_LINE_SIZE)
110 
111 #define DPNI_LOCK(__sc) do {			\
112 	mtx_assert(&(__sc)->lock, MA_NOTOWNED);	\
113 	mtx_lock(&(__sc)->lock);		\
114 } while (0)
115 #define	DPNI_UNLOCK(__sc) do {			\
116 	mtx_assert(&(__sc)->lock, MA_OWNED);	\
117 	mtx_unlock(&(__sc)->lock);		\
118 } while (0)
119 #define	DPNI_LOCK_ASSERT(__sc) do {		\
120 	mtx_assert(&(__sc)->lock, MA_OWNED);	\
121 } while (0)
122 
123 #define DPAA2_TX_RING(sc, chan, tc) \
124 	(&(sc)->channels[(chan)]->txc_queue.tx_rings[(tc)])
125 
126 MALLOC_DEFINE(M_DPAA2_TXB, "dpaa2_txb", "DPAA2 DMA-mapped buffer (Tx)");
127 
128 /*
129  * How many times channel cleanup routine will be repeated if the RX or TX
130  * budget was depleted.
131  */
132 #define DPAA2_CLEAN_BUDGET	64 /* sysctl(9)? */
133 /* TX/RX budget for the channel cleanup task */
134 #define DPAA2_TX_BUDGET		128 /* sysctl(9)? */
135 #define DPAA2_RX_BUDGET		256 /* sysctl(9)? */
136 
137 #define DPNI_IRQ_INDEX		0 /* Index of the only DPNI IRQ. */
138 #define DPNI_IRQ_LINK_CHANGED	1 /* Link state changed */
139 #define DPNI_IRQ_EP_CHANGED	2 /* DPAA2 endpoint dis/connected */
140 
141 /* Default maximum frame length. */
142 #define DPAA2_ETH_MFL		(ETHER_MAX_LEN - ETHER_CRC_LEN)
143 
144 /* Minimally supported version of the DPNI API. */
145 #define DPNI_VER_MAJOR		7
146 #define DPNI_VER_MINOR		0
147 
148 /* Rx/Tx buffers configuration. */
149 #define BUF_ALIGN_V1		256 /* WRIOP v1.0.0 limitation */
150 #define BUF_ALIGN		64
151 #define BUF_SWA_SIZE		64  /* SW annotation size */
152 #define BUF_RX_HWA_SIZE		64  /* HW annotation size */
153 #define BUF_TX_HWA_SIZE		128 /* HW annotation size */
154 
155 #define DPAA2_RX_BUFRING_SZ	(4096u)
156 #define DPAA2_RXE_BUFRING_SZ	(1024u)
157 #define DPAA2_TXC_BUFRING_SZ	(4096u)
158 #define DPAA2_TX_SEGLIMIT	(16u) /* arbitrary number */
159 #define DPAA2_TX_SEG_SZ		(PAGE_SIZE)
160 #define DPAA2_TX_SEGS_MAXSZ	(DPAA2_TX_SEGLIMIT * DPAA2_TX_SEG_SZ)
161 #define DPAA2_TX_SGT_SZ		(PAGE_SIZE) /* bytes */
162 
163 /* Size of a buffer to keep a QoS table key configuration. */
164 #define ETH_QOS_KCFG_BUF_SIZE	(PAGE_SIZE)
165 
166 /* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */
167 #define DPAA2_CLASSIFIER_DMA_SIZE (PAGE_SIZE)
168 
169 /* Buffers layout options. */
170 #define BUF_LOPT_TIMESTAMP	0x1
171 #define BUF_LOPT_PARSER_RESULT	0x2
172 #define BUF_LOPT_FRAME_STATUS	0x4
173 #define BUF_LOPT_PRIV_DATA_SZ	0x8
174 #define BUF_LOPT_DATA_ALIGN	0x10
175 #define BUF_LOPT_DATA_HEAD_ROOM	0x20
176 #define BUF_LOPT_DATA_TAIL_ROOM	0x40
177 
178 #define DPAA2_NI_BUF_ADDR_MASK	(0x1FFFFFFFFFFFFul) /* 49-bit addresses max. */
179 #define DPAA2_NI_BUF_CHAN_MASK	(0xFu)
180 #define DPAA2_NI_BUF_CHAN_SHIFT	(60)
181 #define DPAA2_NI_BUF_IDX_MASK	(0x7FFFu)
182 #define DPAA2_NI_BUF_IDX_SHIFT	(49)
183 #define DPAA2_NI_TX_IDX_MASK	(0x7u)
184 #define DPAA2_NI_TX_IDX_SHIFT	(57)
185 #define DPAA2_NI_TXBUF_IDX_MASK	(0xFFu)
186 #define DPAA2_NI_TXBUF_IDX_SHIFT (49)
187 
188 #define DPAA2_NI_FD_FMT_MASK	(0x3u)
189 #define DPAA2_NI_FD_FMT_SHIFT	(12)
190 #define DPAA2_NI_FD_ERR_MASK	(0xFFu)
191 #define DPAA2_NI_FD_ERR_SHIFT	(0)
192 #define DPAA2_NI_FD_SL_MASK	(0x1u)
193 #define DPAA2_NI_FD_SL_SHIFT	(14)
194 #define DPAA2_NI_FD_LEN_MASK	(0x3FFFFu)
195 #define DPAA2_NI_FD_OFFSET_MASK (0x0FFFu)
196 
197 /* Enables TCAM for Flow Steering and QoS look-ups. */
198 #define DPNI_OPT_HAS_KEY_MASKING 0x10
199 
200 /* Unique IDs for the supported Rx classification header fields. */
201 #define DPAA2_ETH_DIST_ETHDST	BIT(0)
202 #define DPAA2_ETH_DIST_ETHSRC	BIT(1)
203 #define DPAA2_ETH_DIST_ETHTYPE	BIT(2)
204 #define DPAA2_ETH_DIST_VLAN	BIT(3)
205 #define DPAA2_ETH_DIST_IPSRC	BIT(4)
206 #define DPAA2_ETH_DIST_IPDST	BIT(5)
207 #define DPAA2_ETH_DIST_IPPROTO	BIT(6)
208 #define DPAA2_ETH_DIST_L4SRC	BIT(7)
209 #define DPAA2_ETH_DIST_L4DST	BIT(8)
210 #define DPAA2_ETH_DIST_ALL	(~0ULL)
211 
212 /* L3-L4 network traffic flow hash options. */
213 #define	RXH_L2DA		(1 << 1)
214 #define	RXH_VLAN		(1 << 2)
215 #define	RXH_L3_PROTO		(1 << 3)
216 #define	RXH_IP_SRC		(1 << 4)
217 #define	RXH_IP_DST		(1 << 5)
218 #define	RXH_L4_B_0_1		(1 << 6) /* src port in case of TCP/UDP/SCTP */
219 #define	RXH_L4_B_2_3		(1 << 7) /* dst port in case of TCP/UDP/SCTP */
220 #define	RXH_DISCARD		(1 << 31)
221 
222 /* Default Rx hash options, set during attaching. */
223 #define DPAA2_RXH_DEFAULT	(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
224 
225 MALLOC_DEFINE(M_DPAA2_NI, "dpaa2_ni", "DPAA2 Network Interface");
226 
227 /*
228  * DPAA2 Network Interface resource specification.
229  *
230  * NOTE: Don't forget to update macros in dpaa2_ni.h in case of any changes in
231  *       the specification!
232  */
233 struct resource_spec dpaa2_ni_spec[] = {
234 	/*
235 	 * DPMCP resources.
236 	 *
237 	 * NOTE: MC command portals (MCPs) are used to send commands to, and
238 	 *	 receive responses from, the MC firmware. One portal per DPNI.
239 	 */
240 	{ DPAA2_DEV_MCP, DPAA2_NI_MCP_RID(0),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
241 	/*
242 	 * DPIO resources (software portals).
243 	 *
244 	 * NOTE: One per running core. While DPIOs are the source of data
245 	 *	 availability interrupts, the DPCONs are used to identify the
246 	 *	 network interface that has produced ingress data to that core.
247 	 */
248 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(0),    RF_ACTIVE | RF_SHAREABLE },
249 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(1),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
250 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(2),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
251 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(3),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
252 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(4),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
253 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(5),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
254 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(6),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
255 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(7),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
256 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(8),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
257 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(9),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
258 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(10),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
259 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(11),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
260 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(12),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
261 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(13),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
262 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(14),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
263 	{ DPAA2_DEV_IO,  DPAA2_NI_IO_RID(15),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
264 	/*
265 	 * DPBP resources (buffer pools).
266 	 *
267 	 * NOTE: One per network interface.
268 	 */
269 	{ DPAA2_DEV_BP,  DPAA2_NI_BP_RID(0),   RF_ACTIVE },
270 	/*
271 	 * DPCON resources (channels).
272 	 *
273 	 * NOTE: One DPCON per core where Rx or Tx confirmation traffic to be
274 	 *	 distributed to.
275 	 * NOTE: Since it is necessary to distinguish between traffic from
276 	 *	 different network interfaces arriving on the same core, the
277 	 *	 DPCONs must be private to the DPNIs.
278 	 */
279 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(0),   RF_ACTIVE },
280 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(1),   RF_ACTIVE | RF_OPTIONAL },
281 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(2),   RF_ACTIVE | RF_OPTIONAL },
282 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(3),   RF_ACTIVE | RF_OPTIONAL },
283 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(4),   RF_ACTIVE | RF_OPTIONAL },
284 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(5),   RF_ACTIVE | RF_OPTIONAL },
285 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(6),   RF_ACTIVE | RF_OPTIONAL },
286 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(7),   RF_ACTIVE | RF_OPTIONAL },
287 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(8),   RF_ACTIVE | RF_OPTIONAL },
288 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(9),   RF_ACTIVE | RF_OPTIONAL },
289 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(10),  RF_ACTIVE | RF_OPTIONAL },
290 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(11),  RF_ACTIVE | RF_OPTIONAL },
291 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(12),  RF_ACTIVE | RF_OPTIONAL },
292 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(13),  RF_ACTIVE | RF_OPTIONAL },
293 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(14),  RF_ACTIVE | RF_OPTIONAL },
294 	{ DPAA2_DEV_CON, DPAA2_NI_CON_RID(15),  RF_ACTIVE | RF_OPTIONAL },
295 
296 	RESOURCE_SPEC_END
297 };
298 
299 /* Supported header fields for Rx hash distribution key */
300 static const struct dpaa2_eth_dist_fields dist_fields[] = {
301 	{
302 		/* L2 header */
303 		.rxnfc_field = RXH_L2DA,
304 		.cls_prot = NET_PROT_ETH,
305 		.cls_field = NH_FLD_ETH_DA,
306 		.id = DPAA2_ETH_DIST_ETHDST,
307 		.size = 6,
308 	}, {
309 		.cls_prot = NET_PROT_ETH,
310 		.cls_field = NH_FLD_ETH_SA,
311 		.id = DPAA2_ETH_DIST_ETHSRC,
312 		.size = 6,
313 	}, {
314 		/* This is the last ethertype field parsed:
315 		 * depending on frame format, it can be the MAC ethertype
316 		 * or the VLAN etype.
317 		 */
318 		.cls_prot = NET_PROT_ETH,
319 		.cls_field = NH_FLD_ETH_TYPE,
320 		.id = DPAA2_ETH_DIST_ETHTYPE,
321 		.size = 2,
322 	}, {
323 		/* VLAN header */
324 		.rxnfc_field = RXH_VLAN,
325 		.cls_prot = NET_PROT_VLAN,
326 		.cls_field = NH_FLD_VLAN_TCI,
327 		.id = DPAA2_ETH_DIST_VLAN,
328 		.size = 2,
329 	}, {
330 		/* IP header */
331 		.rxnfc_field = RXH_IP_SRC,
332 		.cls_prot = NET_PROT_IP,
333 		.cls_field = NH_FLD_IP_SRC,
334 		.id = DPAA2_ETH_DIST_IPSRC,
335 		.size = 4,
336 	}, {
337 		.rxnfc_field = RXH_IP_DST,
338 		.cls_prot = NET_PROT_IP,
339 		.cls_field = NH_FLD_IP_DST,
340 		.id = DPAA2_ETH_DIST_IPDST,
341 		.size = 4,
342 	}, {
343 		.rxnfc_field = RXH_L3_PROTO,
344 		.cls_prot = NET_PROT_IP,
345 		.cls_field = NH_FLD_IP_PROTO,
346 		.id = DPAA2_ETH_DIST_IPPROTO,
347 		.size = 1,
348 	}, {
349 		/* Using UDP ports, this is functionally equivalent to raw
350 		 * byte pairs from L4 header.
351 		 */
352 		.rxnfc_field = RXH_L4_B_0_1,
353 		.cls_prot = NET_PROT_UDP,
354 		.cls_field = NH_FLD_UDP_PORT_SRC,
355 		.id = DPAA2_ETH_DIST_L4SRC,
356 		.size = 2,
357 	}, {
358 		.rxnfc_field = RXH_L4_B_2_3,
359 		.cls_prot = NET_PROT_UDP,
360 		.cls_field = NH_FLD_UDP_PORT_DST,
361 		.id = DPAA2_ETH_DIST_L4DST,
362 		.size = 2,
363 	},
364 };
365 
366 static struct dpni_stat {
367 	int	 page;
368 	int	 cnt;
369 	char	*name;
370 	char	*desc;
371 } dpni_stat_sysctls[DPAA2_NI_STAT_SYSCTLS] = {
372 	/* PAGE, COUNTER, NAME, DESCRIPTION */
373 	{  0, 0, "in_all_frames",	"All accepted ingress frames" },
374 	{  0, 1, "in_all_bytes",	"Bytes in all accepted ingress frames" },
375 	{  0, 2, "in_multi_frames",	"Multicast accepted ingress frames" },
376 	{  1, 0, "eg_all_frames",	"All egress frames transmitted" },
377 	{  1, 1, "eg_all_bytes",	"Bytes in all frames transmitted" },
378 	{  1, 2, "eg_multi_frames",	"Multicast egress frames transmitted" },
379 	{  2, 0, "in_filtered_frames",	"All ingress frames discarded due to "
380 	   				"filtering" },
381 	{  2, 1, "in_discarded_frames",	"All frames discarded due to errors" },
382 	{  2, 2, "in_nobuf_discards",	"Discards on ingress side due to buffer "
383 	   				"depletion in DPNI buffer pools" },
384 };
385 
386 struct dpaa2_ni_rx_ctx {
387 	struct mbuf	*head;
388 	struct mbuf	*tail;
389 	int		 cnt;
390 	bool		 last;
391 };
392 
393 /* Device interface */
394 static int dpaa2_ni_probe(device_t);
395 static int dpaa2_ni_attach(device_t);
396 static int dpaa2_ni_detach(device_t);
397 
398 /* DPAA2 network interface setup and configuration */
399 static int dpaa2_ni_setup(device_t);
400 static int dpaa2_ni_setup_channels(device_t);
401 static int dpaa2_ni_bind(device_t);
402 static int dpaa2_ni_setup_rx_dist(device_t);
403 static int dpaa2_ni_setup_irqs(device_t);
404 static int dpaa2_ni_setup_msi(struct dpaa2_ni_softc *);
405 static int dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *);
406 static int dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *);
407 static int dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *);
408 static int dpaa2_ni_setup_dma(struct dpaa2_ni_softc *);
409 
410 /* Tx/Rx flow configuration */
411 static int dpaa2_ni_setup_rx_flow(device_t, struct dpaa2_ni_fq *);
412 static int dpaa2_ni_setup_tx_flow(device_t, struct dpaa2_ni_fq *);
413 static int dpaa2_ni_setup_rx_err_flow(device_t, struct dpaa2_ni_fq *);
414 
415 /* Configuration subroutines */
416 static int dpaa2_ni_set_buf_layout(device_t);
417 static int dpaa2_ni_set_pause_frame(device_t);
418 static int dpaa2_ni_set_qos_table(device_t);
419 static int dpaa2_ni_set_mac_addr(device_t);
420 static int dpaa2_ni_set_hash(device_t, uint64_t);
421 static int dpaa2_ni_set_dist_key(device_t, enum dpaa2_ni_dist_mode, uint64_t);
422 
423 /* Frame descriptor routines */
424 static int dpaa2_ni_build_fd(struct dpaa2_ni_softc *, struct dpaa2_ni_tx_ring *,
425     struct dpaa2_buf *, bus_dma_segment_t *, int, struct dpaa2_fd *);
426 static int dpaa2_ni_fd_err(struct dpaa2_fd *);
427 static uint32_t dpaa2_ni_fd_data_len(struct dpaa2_fd *);
428 static int dpaa2_ni_fd_format(struct dpaa2_fd *);
429 static bool dpaa2_ni_fd_short_len(struct dpaa2_fd *);
430 static int dpaa2_ni_fd_offset(struct dpaa2_fd *);
431 
432 /* Various subroutines */
433 static int dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *, uint16_t, uint16_t);
434 static int dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *, uint8_t *);
435 
436 /* Network interface routines */
437 static void dpaa2_ni_init(void *);
438 static int  dpaa2_ni_transmit(if_t , struct mbuf *);
439 static void dpaa2_ni_qflush(if_t );
440 static int  dpaa2_ni_ioctl(if_t , u_long, caddr_t);
441 static int  dpaa2_ni_update_mac_filters(if_t );
442 static u_int dpaa2_ni_add_maddr(void *, struct sockaddr_dl *, u_int);
443 
444 /* Interrupt handlers */
445 static void dpaa2_ni_intr(void *);
446 
447 /* MII handlers */
448 static void dpaa2_ni_miibus_statchg(device_t);
449 static int  dpaa2_ni_media_change(if_t );
450 static void dpaa2_ni_media_status(if_t , struct ifmediareq *);
451 static void dpaa2_ni_media_tick(void *);
452 
453 /* Tx/Rx routines. */
454 static int dpaa2_ni_rx_cleanup(struct dpaa2_channel *);
455 static int dpaa2_ni_tx_cleanup(struct dpaa2_channel *);
456 static void dpaa2_ni_tx(struct dpaa2_ni_softc *, struct dpaa2_channel *,
457     struct dpaa2_ni_tx_ring *, struct mbuf *);
458 static void dpaa2_ni_cleanup_task(void *, int);
459 
460 /* Tx/Rx subroutines */
461 static int dpaa2_ni_consume_frames(struct dpaa2_channel *, struct dpaa2_ni_fq **,
462     uint32_t *);
463 static int dpaa2_ni_rx(struct dpaa2_channel *, struct dpaa2_ni_fq *,
464     struct dpaa2_fd *, struct dpaa2_ni_rx_ctx *);
465 static int dpaa2_ni_rx_err(struct dpaa2_channel *, struct dpaa2_ni_fq *,
466     struct dpaa2_fd *);
467 static int dpaa2_ni_tx_conf(struct dpaa2_channel *, struct dpaa2_ni_fq *,
468     struct dpaa2_fd *);
469 
470 /* sysctl(9) */
471 static int dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS);
472 static int dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS);
473 static int dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS);
474 
475 static int
476 dpaa2_ni_probe(device_t dev)
477 {
478 	/* DPNI device will be added by a parent resource container itself. */
479 	device_set_desc(dev, "DPAA2 Network Interface");
480 	return (BUS_PROBE_DEFAULT);
481 }
482 
483 static int
484 dpaa2_ni_attach(device_t dev)
485 {
486 	device_t pdev = device_get_parent(dev);
487 	device_t child = dev;
488 	device_t mcp_dev;
489 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
490 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
491 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
492 	struct dpaa2_devinfo *mcp_dinfo;
493 	struct dpaa2_cmd cmd;
494 	uint16_t rc_token, ni_token;
495 	if_t ifp;
496 	char tq_name[32];
497 	int error;
498 
499 	sc->dev = dev;
500 	sc->ifp = NULL;
501 	sc->miibus = NULL;
502 	sc->mii = NULL;
503 	sc->media_status = 0;
504 	sc->if_flags = 0;
505 	sc->link_state = LINK_STATE_UNKNOWN;
506 	sc->buf_align = 0;
507 
508 	/* For debug purposes only! */
509 	sc->rx_anomaly_frames = 0;
510 	sc->rx_single_buf_frames = 0;
511 	sc->rx_sg_buf_frames = 0;
512 	sc->rx_enq_rej_frames = 0;
513 	sc->rx_ieoi_err_frames = 0;
514 	sc->tx_single_buf_frames = 0;
515 	sc->tx_sg_frames = 0;
516 
517 	DPAA2_ATOMIC_XCHG(&sc->buf_num, 0);
518 	DPAA2_ATOMIC_XCHG(&sc->buf_free, 0);
519 
520 	sc->rxd_dmat = NULL;
521 	sc->qos_dmat = NULL;
522 
523 	sc->qos_kcfg.dmap = NULL;
524 	sc->qos_kcfg.paddr = 0;
525 	sc->qos_kcfg.vaddr = NULL;
526 
527 	sc->rxd_kcfg.dmap = NULL;
528 	sc->rxd_kcfg.paddr = 0;
529 	sc->rxd_kcfg.vaddr = NULL;
530 
531 	sc->mac.dpmac_id = 0;
532 	sc->mac.phy_dev = NULL;
533 	memset(sc->mac.addr, 0, ETHER_ADDR_LEN);
534 
535 	error = bus_alloc_resources(sc->dev, dpaa2_ni_spec, sc->res);
536 	if (error) {
537 		device_printf(dev, "%s: failed to allocate resources: "
538 		    "error=%d\n", __func__, error);
539 		goto err_exit;
540 	}
541 
542 	/* Obtain MC portal. */
543 	mcp_dev = (device_t) rman_get_start(sc->res[DPAA2_NI_MCP_RID(0)]);
544 	mcp_dinfo = device_get_ivars(mcp_dev);
545 	dinfo->portal = mcp_dinfo->portal;
546 
547 	mtx_init(&sc->lock, device_get_nameunit(dev), "dpaa2_ni", MTX_DEF);
548 
549 	/* Allocate network interface */
550 	ifp = if_alloc(IFT_ETHER);
551 	if (ifp == NULL) {
552 		device_printf(dev, "%s: failed to allocate network interface\n",
553 		    __func__);
554 		goto err_exit;
555 	}
556 	sc->ifp = ifp;
557 	if_initname(ifp, DPAA2_NI_IFNAME, device_get_unit(sc->dev));
558 
559 	if_setsoftc(ifp, sc);
560 	if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST);
561 	if_setinitfn(ifp, dpaa2_ni_init);
562 	if_setioctlfn(ifp, dpaa2_ni_ioctl);
563 	if_settransmitfn(ifp, dpaa2_ni_transmit);
564 	if_setqflushfn(ifp, dpaa2_ni_qflush);
565 
566 	if_setcapabilities(ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU);
567 	if_setcapenable(ifp, if_getcapabilities(ifp));
568 
569 	DPAA2_CMD_INIT(&cmd);
570 
571 	/* Open resource container and network interface object. */
572 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
573 	if (error) {
574 		device_printf(dev, "%s: failed to open resource container: "
575 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
576 		goto err_exit;
577 	}
578 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
579 	if (error) {
580 		device_printf(dev, "%s: failed to open network interface: "
581 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
582 		goto close_rc;
583 	}
584 
585 	bzero(tq_name, sizeof(tq_name));
586 	snprintf(tq_name, sizeof(tq_name), "%s_tqbp", device_get_nameunit(dev));
587 
588 	/*
589 	 * XXX-DSL: Release new buffers on Buffer Pool State Change Notification
590 	 *          (BPSCN) returned as a result to the VDQ command instead.
591 	 *          It is similar to CDAN processed in dpaa2_io_intr().
592 	 */
593 	/* Create a taskqueue thread to release new buffers to the pool. */
594 	sc->bp_taskq = taskqueue_create(tq_name, M_WAITOK,
595 	    taskqueue_thread_enqueue, &sc->bp_taskq);
596 	if (sc->bp_taskq == NULL) {
597 		device_printf(dev, "%s: failed to allocate task queue: %s\n",
598 		    __func__, tq_name);
599 		goto close_ni;
600 	}
601 	taskqueue_start_threads(&sc->bp_taskq, 1, PI_NET, "%s", tq_name);
602 
603 	/* sc->cleanup_taskq = taskqueue_create("dpaa2_ch cleanup", M_WAITOK, */
604 	/*     taskqueue_thread_enqueue, &sc->cleanup_taskq); */
605 	/* taskqueue_start_threads(&sc->cleanup_taskq, 1, PI_NET, */
606 	/*     "dpaa2_ch cleanup"); */
607 
608 	error = dpaa2_ni_setup(dev);
609 	if (error) {
610 		device_printf(dev, "%s: failed to setup DPNI: error=%d\n",
611 		    __func__, error);
612 		goto close_ni;
613 	}
614 	error = dpaa2_ni_setup_channels(dev);
615 	if (error) {
616 		device_printf(dev, "%s: failed to setup QBMan channels: "
617 		    "error=%d\n", __func__, error);
618 		goto close_ni;
619 	}
620 
621 	error = dpaa2_ni_bind(dev);
622 	if (error) {
623 		device_printf(dev, "%s: failed to bind DPNI: error=%d\n",
624 		    __func__, error);
625 		goto close_ni;
626 	}
627 	error = dpaa2_ni_setup_irqs(dev);
628 	if (error) {
629 		device_printf(dev, "%s: failed to setup IRQs: error=%d\n",
630 		    __func__, error);
631 		goto close_ni;
632 	}
633 	error = dpaa2_ni_setup_sysctls(sc);
634 	if (error) {
635 		device_printf(dev, "%s: failed to setup sysctls: error=%d\n",
636 		    __func__, error);
637 		goto close_ni;
638 	}
639 
640 	ether_ifattach(sc->ifp, sc->mac.addr);
641 	callout_init(&sc->mii_callout, 0);
642 
643 	return (0);
644 
645 close_ni:
646 	DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
647 close_rc:
648 	DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
649 err_exit:
650 	return (ENXIO);
651 }
652 
653 static void
654 dpaa2_ni_fixed_media_status(if_t ifp, struct ifmediareq* ifmr)
655 {
656 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
657 
658 	DPNI_LOCK(sc);
659 	ifmr->ifm_count = 0;
660 	ifmr->ifm_mask = 0;
661 	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
662 	ifmr->ifm_current = ifmr->ifm_active =
663 	    sc->fixed_ifmedia.ifm_cur->ifm_media;
664 
665 	/*
666 	 * In non-PHY usecases, we need to signal link state up, otherwise
667 	 * certain things requiring a link event (e.g async DHCP client) from
668 	 * devd do not happen.
669 	 */
670 	if (if_getlinkstate(ifp) == LINK_STATE_UNKNOWN) {
671 		if_link_state_change(ifp, LINK_STATE_UP);
672 	}
673 
674 	/*
675 	 * TODO: Check the status of the link partner (DPMAC, DPNI or other) and
676 	 * reset if down. This is different to the DPAA2_MAC_LINK_TYPE_PHY as
677 	 * the MC firmware sets the status, instead of us telling the MC what
678 	 * it is.
679 	 */
680 	DPNI_UNLOCK(sc);
681 
682 	return;
683 }
684 
685 static void
686 dpaa2_ni_setup_fixed_link(struct dpaa2_ni_softc *sc)
687 {
688 	/*
689 	 * FIXME: When the DPNI is connected to a DPMAC, we can get the
690 	 * 'apparent' speed from it.
691 	 */
692 	sc->fixed_link = true;
693 
694 	ifmedia_init(&sc->fixed_ifmedia, 0, dpaa2_ni_media_change,
695 		     dpaa2_ni_fixed_media_status);
696 	ifmedia_add(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL);
697 	ifmedia_set(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T);
698 }
699 
700 static int
701 dpaa2_ni_detach(device_t dev)
702 {
703 	/* TBD */
704 	return (0);
705 }
706 
707 /**
708  * @brief Configure DPAA2 network interface object.
709  */
710 static int
711 dpaa2_ni_setup(device_t dev)
712 {
713 	device_t pdev = device_get_parent(dev);
714 	device_t child = dev;
715 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
716 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
717 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
718 	struct dpaa2_ep_desc ep1_desc, ep2_desc; /* endpoint descriptors */
719 	struct dpaa2_cmd cmd;
720 	uint8_t eth_bca[ETHER_ADDR_LEN]; /* broadcast physical address */
721 	uint16_t rc_token, ni_token, mac_token;
722 	struct dpaa2_mac_attr attr;
723 	enum dpaa2_mac_link_type link_type;
724 	uint32_t link;
725 	int error;
726 
727 	DPAA2_CMD_INIT(&cmd);
728 
729 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
730 	if (error) {
731 		device_printf(dev, "%s: failed to open resource container: "
732 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
733 		goto err_exit;
734 	}
735 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
736 	if (error) {
737 		device_printf(dev, "%s: failed to open network interface: "
738 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
739 		goto close_rc;
740 	}
741 
742 	/* Check if we can work with this DPNI object. */
743 	error = DPAA2_CMD_NI_GET_API_VERSION(dev, child, &cmd, &sc->api_major,
744 	    &sc->api_minor);
745 	if (error) {
746 		device_printf(dev, "%s: failed to get DPNI API version\n",
747 		    __func__);
748 		goto close_ni;
749 	}
750 	if (dpaa2_ni_cmp_api_version(sc, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
751 		device_printf(dev, "%s: DPNI API version %u.%u not supported, "
752 		    "need >= %u.%u\n", __func__, sc->api_major, sc->api_minor,
753 		    DPNI_VER_MAJOR, DPNI_VER_MINOR);
754 		error = ENODEV;
755 		goto close_ni;
756 	}
757 
758 	/* Reset the DPNI object. */
759 	error = DPAA2_CMD_NI_RESET(dev, child, &cmd);
760 	if (error) {
761 		device_printf(dev, "%s: failed to reset DPNI: id=%d\n",
762 		    __func__, dinfo->id);
763 		goto close_ni;
764 	}
765 
766 	/* Obtain attributes of the DPNI object. */
767 	error = DPAA2_CMD_NI_GET_ATTRIBUTES(dev, child, &cmd, &sc->attr);
768 	if (error) {
769 		device_printf(dev, "%s: failed to obtain DPNI attributes: "
770 		    "id=%d\n", __func__, dinfo->id);
771 		goto close_ni;
772 	}
773 	if (bootverbose) {
774 		device_printf(dev, "\toptions=0x%#x queues=%d tx_channels=%d "
775 		    "wriop_version=%#x\n", sc->attr.options, sc->attr.num.queues,
776 		    sc->attr.num.channels, sc->attr.wriop_ver);
777 		device_printf(dev, "\ttraffic classes: rx=%d tx=%d "
778 		    "cgs_groups=%d\n", sc->attr.num.rx_tcs, sc->attr.num.tx_tcs,
779 		    sc->attr.num.cgs);
780 		device_printf(dev, "\ttable entries: mac=%d vlan=%d qos=%d "
781 		    "fs=%d\n", sc->attr.entries.mac, sc->attr.entries.vlan,
782 		    sc->attr.entries.qos, sc->attr.entries.fs);
783 		device_printf(dev, "\tkey sizes: qos=%d fs=%d\n",
784 		    sc->attr.key_size.qos, sc->attr.key_size.fs);
785 	}
786 
787 	/* Configure buffer layouts of the DPNI queues. */
788 	error = dpaa2_ni_set_buf_layout(dev);
789 	if (error) {
790 		device_printf(dev, "%s: failed to configure buffer layout\n",
791 		    __func__);
792 		goto close_ni;
793 	}
794 
795 	/* Configure DMA resources. */
796 	error = dpaa2_ni_setup_dma(sc);
797 	if (error) {
798 		device_printf(dev, "%s: failed to setup DMA\n", __func__);
799 		goto close_ni;
800 	}
801 
802 	/* Setup link between DPNI and an object it's connected to. */
803 	ep1_desc.obj_id = dinfo->id;
804 	ep1_desc.if_id = 0; /* DPNI has the only endpoint */
805 	ep1_desc.type = dinfo->dtype;
806 
807 	error = DPAA2_CMD_RC_GET_CONN(dev, child, DPAA2_CMD_TK(&cmd, rc_token),
808 	    &ep1_desc, &ep2_desc, &link);
809 	if (error) {
810 		device_printf(dev, "%s: failed to obtain an object DPNI is "
811 		    "connected to: error=%d\n", __func__, error);
812 	} else {
813 		device_printf(dev, "connected to %s (id=%d)\n",
814 		    dpaa2_ttos(ep2_desc.type), ep2_desc.obj_id);
815 
816 		error = dpaa2_ni_set_mac_addr(dev);
817 		if (error) {
818 			device_printf(dev, "%s: failed to set MAC address: "
819 			    "error=%d\n", __func__, error);
820 		}
821 
822 		if (ep2_desc.type == DPAA2_DEV_MAC) {
823 			/*
824 			 * This is the simplest case when DPNI is connected to
825 			 * DPMAC directly.
826 			 */
827 			sc->mac.dpmac_id = ep2_desc.obj_id;
828 
829 			link_type = DPAA2_MAC_LINK_TYPE_NONE;
830 
831 			/*
832 			 * Need to determine if DPMAC type is PHY (attached to
833 			 * conventional MII PHY) or FIXED (usually SFP/SerDes,
834 			 * link state managed by MC firmware).
835 			 */
836 			error = DPAA2_CMD_MAC_OPEN(sc->dev, child,
837 			    DPAA2_CMD_TK(&cmd, rc_token), sc->mac.dpmac_id,
838 			    &mac_token);
839 			/*
840 			 * Under VFIO, the DPMAC might be sitting in another
841 			 * container (DPRC) we don't have access to.
842 			 * Assume DPAA2_MAC_LINK_TYPE_FIXED if this is
843 			 * the case.
844 			 */
845 			if (error) {
846 				device_printf(dev, "%s: failed to open "
847 				    "connected DPMAC: %d (assuming in other DPRC)\n", __func__,
848 				    sc->mac.dpmac_id);
849 				link_type = DPAA2_MAC_LINK_TYPE_FIXED;
850 			} else {
851 				error = DPAA2_CMD_MAC_GET_ATTRIBUTES(dev, child,
852 				    &cmd, &attr);
853 				if (error) {
854 					device_printf(dev, "%s: failed to get "
855 					    "DPMAC attributes: id=%d, "
856 					    "error=%d\n", __func__, dinfo->id,
857 					    error);
858 				} else {
859 					link_type = attr.link_type;
860 				}
861 			}
862 			DPAA2_CMD_MAC_CLOSE(dev, child, &cmd);
863 
864 			if (link_type == DPAA2_MAC_LINK_TYPE_FIXED) {
865 				device_printf(dev, "connected DPMAC is in FIXED "
866 				    "mode\n");
867 				dpaa2_ni_setup_fixed_link(sc);
868 			} else if (link_type == DPAA2_MAC_LINK_TYPE_PHY) {
869 				device_printf(dev, "connected DPMAC is in PHY "
870 				    "mode\n");
871 				error = DPAA2_MC_GET_PHY_DEV(dev,
872 				    &sc->mac.phy_dev, sc->mac.dpmac_id);
873 				if (error == 0) {
874 					error = MEMAC_MDIO_SET_NI_DEV(
875 					    sc->mac.phy_dev, dev);
876 					if (error != 0) {
877 						device_printf(dev, "%s: failed "
878 						    "to set dpni dev on memac "
879 						    "mdio dev %s: error=%d\n",
880 						    __func__,
881 						    device_get_nameunit(
882 						    sc->mac.phy_dev), error);
883 					}
884 				}
885 				if (error == 0) {
886 					error = MEMAC_MDIO_GET_PHY_LOC(
887 					    sc->mac.phy_dev, &sc->mac.phy_loc);
888 					if (error == ENODEV) {
889 						error = 0;
890 					}
891 					if (error != 0) {
892 						device_printf(dev, "%s: failed "
893 						    "to get phy location from "
894 						    "memac mdio dev %s: error=%d\n",
895 						    __func__, device_get_nameunit(
896 						    sc->mac.phy_dev), error);
897 					}
898 				}
899 				if (error == 0) {
900 					error = mii_attach(sc->mac.phy_dev,
901 					    &sc->miibus, sc->ifp,
902 					    dpaa2_ni_media_change,
903 					    dpaa2_ni_media_status,
904 					    BMSR_DEFCAPMASK, sc->mac.phy_loc,
905 					    MII_OFFSET_ANY, 0);
906 					if (error != 0) {
907 						device_printf(dev, "%s: failed "
908 						    "to attach to miibus: "
909 						    "error=%d\n",
910 						    __func__, error);
911 					}
912 				}
913 				if (error == 0) {
914 					sc->mii = device_get_softc(sc->miibus);
915 				}
916 			} else {
917 				device_printf(dev, "%s: DPMAC link type is not "
918 				    "supported\n", __func__);
919 			}
920 		} else if (ep2_desc.type == DPAA2_DEV_NI ||
921 			   ep2_desc.type == DPAA2_DEV_MUX ||
922 			   ep2_desc.type == DPAA2_DEV_SW) {
923 			dpaa2_ni_setup_fixed_link(sc);
924 		}
925 	}
926 
927 	/* Select mode to enqueue frames. */
928 	/* ... TBD ... */
929 
930 	/*
931 	 * Update link configuration to enable Rx/Tx pause frames support.
932 	 *
933 	 * NOTE: MC may generate an interrupt to the DPMAC and request changes
934 	 *       in link configuration. It might be necessary to attach miibus
935 	 *       and PHY before this point.
936 	 */
937 	error = dpaa2_ni_set_pause_frame(dev);
938 	if (error) {
939 		device_printf(dev, "%s: failed to configure Rx/Tx pause "
940 		    "frames\n", __func__);
941 		goto close_ni;
942 	}
943 
944 	/* Configure ingress traffic classification. */
945 	error = dpaa2_ni_set_qos_table(dev);
946 	if (error) {
947 		device_printf(dev, "%s: failed to configure QoS table: "
948 		    "error=%d\n", __func__, error);
949 		goto close_ni;
950 	}
951 
952 	/* Add broadcast physical address to the MAC filtering table. */
953 	memset(eth_bca, 0xff, ETHER_ADDR_LEN);
954 	error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, DPAA2_CMD_TK(&cmd,
955 	    ni_token), eth_bca);
956 	if (error) {
957 		device_printf(dev, "%s: failed to add broadcast physical "
958 		    "address to the MAC filtering table\n", __func__);
959 		goto close_ni;
960 	}
961 
962 	/* Set the maximum allowed length for received frames. */
963 	error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd, DPAA2_ETH_MFL);
964 	if (error) {
965 		device_printf(dev, "%s: failed to set maximum length for "
966 		    "received frames\n", __func__);
967 		goto close_ni;
968 	}
969 
970 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
971 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
972 	return (0);
973 
974 close_ni:
975 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
976 close_rc:
977 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
978 err_exit:
979 	return (error);
980 }
981 
982 /**
983  * @brief Сonfigure QBMan channels and register data availability notifications.
984  */
985 static int
986 dpaa2_ni_setup_channels(device_t dev)
987 {
988 	device_t iodev, condev, bpdev;
989 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
990 	uint32_t i, num_chan;
991 	int error;
992 
993 	/* Calculate number of the channels based on the allocated resources */
994 	for (i = 0; i < DPAA2_NI_IO_RES_NUM; i++) {
995 		if (!sc->res[DPAA2_NI_IO_RID(i)]) {
996 			break;
997 		}
998 	}
999 	num_chan = i;
1000 	for (i = 0; i < DPAA2_NI_CON_RES_NUM; i++) {
1001 		if (!sc->res[DPAA2_NI_CON_RID(i)]) {
1002 			break;
1003 		}
1004 	}
1005 	num_chan = i < num_chan ? i : num_chan;
1006 	sc->chan_n = num_chan > DPAA2_MAX_CHANNELS
1007 	    ? DPAA2_MAX_CHANNELS : num_chan;
1008 	sc->chan_n = sc->chan_n > sc->attr.num.queues
1009 	    ? sc->attr.num.queues : sc->chan_n;
1010 
1011 	KASSERT(sc->chan_n > 0u, ("%s: positive number of channels expected: "
1012 	    "chan_n=%d", __func__, sc->chan_n));
1013 
1014 	device_printf(dev, "channels=%d\n", sc->chan_n);
1015 
1016 	for (i = 0; i < sc->chan_n; i++) {
1017 		iodev = (device_t)rman_get_start(sc->res[DPAA2_NI_IO_RID(i)]);
1018 		condev = (device_t)rman_get_start(sc->res[DPAA2_NI_CON_RID(i)]);
1019 		/* Only one buffer pool available at the moment */
1020 		bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
1021 
1022 		error = dpaa2_chan_setup(dev, iodev, condev, bpdev,
1023 		    &sc->channels[i], i, dpaa2_ni_cleanup_task);
1024 		if (error != 0) {
1025 			device_printf(dev, "%s: dpaa2_chan_setup() failed: "
1026 			    "error=%d, chan_id=%d\n", __func__, error, i);
1027 			return (error);
1028 		}
1029 	}
1030 
1031 	/* There is exactly one Rx error queue per network interface */
1032 	error = dpaa2_chan_setup_fq(dev, sc->channels[0], DPAA2_NI_QUEUE_RX_ERR);
1033 	if (error != 0) {
1034 		device_printf(dev, "%s: failed to prepare RxError queue: "
1035 		    "error=%d\n", __func__, error);
1036 		return (error);
1037 	}
1038 
1039 	return (0);
1040 }
1041 
1042 /**
1043  * @brief Bind DPNI to DPBPs, DPIOs, frame queues and channels.
1044  */
1045 static int
1046 dpaa2_ni_bind(device_t dev)
1047 {
1048 	device_t pdev = device_get_parent(dev);
1049 	device_t child = dev;
1050 	device_t bp_dev;
1051 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1052 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1053 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1054 	struct dpaa2_devinfo *bp_info;
1055 	struct dpaa2_cmd cmd;
1056 	struct dpaa2_ni_pools_cfg pools_cfg;
1057 	struct dpaa2_ni_err_cfg err_cfg;
1058 	struct dpaa2_channel *chan;
1059 	uint16_t rc_token, ni_token;
1060 	int error;
1061 
1062 	DPAA2_CMD_INIT(&cmd);
1063 
1064 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1065 	if (error) {
1066 		device_printf(dev, "%s: failed to open resource container: "
1067 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1068 		goto err_exit;
1069 	}
1070 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1071 	if (error) {
1072 		device_printf(dev, "%s: failed to open network interface: "
1073 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1074 		goto close_rc;
1075 	}
1076 
1077 	/* Select buffer pool (only one available at the moment). */
1078 	bp_dev = (device_t) rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
1079 	bp_info = device_get_ivars(bp_dev);
1080 
1081 	/* Configure buffers pool. */
1082 	pools_cfg.pools_num = 1;
1083 	pools_cfg.pools[0].bp_obj_id = bp_info->id;
1084 	pools_cfg.pools[0].backup_flag = 0;
1085 	pools_cfg.pools[0].buf_sz = sc->buf_sz;
1086 	error = DPAA2_CMD_NI_SET_POOLS(dev, child, &cmd, &pools_cfg);
1087 	if (error) {
1088 		device_printf(dev, "%s: failed to set buffer pools\n", __func__);
1089 		goto close_ni;
1090 	}
1091 
1092 	/* Setup ingress traffic distribution. */
1093 	error = dpaa2_ni_setup_rx_dist(dev);
1094 	if (error && error != EOPNOTSUPP) {
1095 		device_printf(dev, "%s: failed to setup ingress traffic "
1096 		    "distribution\n", __func__);
1097 		goto close_ni;
1098 	}
1099 	if (bootverbose && error == EOPNOTSUPP) {
1100 		device_printf(dev, "Ingress traffic distribution not "
1101 		    "supported\n");
1102 	}
1103 
1104 	/* Configure handling of error frames. */
1105 	err_cfg.err_mask = DPAA2_NI_FAS_RX_ERR_MASK;
1106 	err_cfg.set_err_fas = false;
1107 	err_cfg.action = DPAA2_NI_ERR_DISCARD;
1108 	error = DPAA2_CMD_NI_SET_ERR_BEHAVIOR(dev, child, &cmd, &err_cfg);
1109 	if (error) {
1110 		device_printf(dev, "%s: failed to set errors behavior\n",
1111 		    __func__);
1112 		goto close_ni;
1113 	}
1114 
1115 	/* Configure channel queues to generate CDANs. */
1116 	for (uint32_t i = 0; i < sc->chan_n; i++) {
1117 		chan = sc->channels[i];
1118 
1119 		/* Setup Rx flows. */
1120 		for (uint32_t j = 0; j < chan->rxq_n; j++) {
1121 			error = dpaa2_ni_setup_rx_flow(dev, &chan->rx_queues[j]);
1122 			if (error) {
1123 				device_printf(dev, "%s: failed to setup Rx "
1124 				    "flow: error=%d\n", __func__, error);
1125 				goto close_ni;
1126 			}
1127 		}
1128 
1129 		/* Setup Tx flow. */
1130 		error = dpaa2_ni_setup_tx_flow(dev, &chan->txc_queue);
1131 		if (error) {
1132 			device_printf(dev, "%s: failed to setup Tx "
1133 			    "flow: error=%d\n", __func__, error);
1134 			goto close_ni;
1135 		}
1136 	}
1137 
1138 	/* Configure RxError queue to generate CDAN. */
1139 	error = dpaa2_ni_setup_rx_err_flow(dev, &sc->rxe_queue);
1140 	if (error) {
1141 		device_printf(dev, "%s: failed to setup RxError flow: "
1142 		    "error=%d\n", __func__, error);
1143 		goto close_ni;
1144 	}
1145 
1146 	/*
1147 	 * Get the Queuing Destination ID (QDID) that should be used for frame
1148 	 * enqueue operations.
1149 	 */
1150 	error = DPAA2_CMD_NI_GET_QDID(dev, child, &cmd, DPAA2_NI_QUEUE_TX,
1151 	    &sc->tx_qdid);
1152 	if (error) {
1153 		device_printf(dev, "%s: failed to get Tx queuing destination "
1154 		    "ID\n", __func__);
1155 		goto close_ni;
1156 	}
1157 
1158 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1159 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1160 	return (0);
1161 
1162 close_ni:
1163 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1164 close_rc:
1165 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1166 err_exit:
1167 	return (error);
1168 }
1169 
1170 /**
1171  * @brief Setup ingress traffic distribution.
1172  *
1173  * NOTE: Ingress traffic distribution is valid only when DPNI_OPT_NO_FS option
1174  *	 hasn't been set for DPNI and a number of DPNI queues > 1.
1175  */
1176 static int
1177 dpaa2_ni_setup_rx_dist(device_t dev)
1178 {
1179 	/*
1180 	 * Have the interface implicitly distribute traffic based on the default
1181 	 * hash key.
1182 	 */
1183 	return (dpaa2_ni_set_hash(dev, DPAA2_RXH_DEFAULT));
1184 }
1185 
1186 static int
1187 dpaa2_ni_setup_rx_flow(device_t dev, struct dpaa2_ni_fq *fq)
1188 {
1189 	device_t pdev = device_get_parent(dev);
1190 	device_t child = dev;
1191 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1192 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1193 	struct dpaa2_devinfo *con_info;
1194 	struct dpaa2_cmd cmd;
1195 	struct dpaa2_ni_queue_cfg queue_cfg = {0};
1196 	uint16_t rc_token, ni_token;
1197 	int error;
1198 
1199 	DPAA2_CMD_INIT(&cmd);
1200 
1201 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1202 	if (error) {
1203 		device_printf(dev, "%s: failed to open resource container: "
1204 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1205 		goto err_exit;
1206 	}
1207 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1208 	if (error) {
1209 		device_printf(dev, "%s: failed to open network interface: "
1210 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1211 		goto close_rc;
1212 	}
1213 
1214 	/* Obtain DPCON associated with the FQ's channel. */
1215 	con_info = device_get_ivars(fq->chan->con_dev);
1216 
1217 	queue_cfg.type = DPAA2_NI_QUEUE_RX;
1218 	queue_cfg.tc = fq->tc;
1219 	queue_cfg.idx = fq->flowid;
1220 	error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1221 	if (error) {
1222 		device_printf(dev, "%s: failed to obtain Rx queue "
1223 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1224 		    queue_cfg.idx);
1225 		goto close_ni;
1226 	}
1227 
1228 	fq->fqid = queue_cfg.fqid;
1229 
1230 	queue_cfg.dest_id = con_info->id;
1231 	queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1232 	queue_cfg.priority = 1;
1233 	queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1234 	queue_cfg.options =
1235 	    DPAA2_NI_QUEUE_OPT_USER_CTX |
1236 	    DPAA2_NI_QUEUE_OPT_DEST;
1237 	error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
1238 	if (error) {
1239 		device_printf(dev, "%s: failed to update Rx queue "
1240 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1241 		    queue_cfg.idx);
1242 		goto close_ni;
1243 	}
1244 
1245 	if (bootverbose) {
1246 		device_printf(dev, "RX queue idx=%d, tc=%d, chan=%d, fqid=%d, "
1247 		    "user_ctx=%#jx\n", fq->flowid, fq->tc, fq->chan->id,
1248 		    fq->fqid, (uint64_t) fq);
1249 	}
1250 
1251 	(void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
1252 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1253 	return (0);
1254 
1255 close_ni:
1256 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1257 close_rc:
1258 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1259 err_exit:
1260 	return (error);
1261 }
1262 
1263 static int
1264 dpaa2_ni_setup_tx_flow(device_t dev, struct dpaa2_ni_fq *fq)
1265 {
1266 	device_t pdev = device_get_parent(dev);
1267 	device_t child = dev;
1268 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1269 	struct dpaa2_channel *ch = fq->chan;
1270 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1271 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1272 	struct dpaa2_devinfo *con_info;
1273 	struct dpaa2_ni_queue_cfg queue_cfg = {0};
1274 	struct dpaa2_ni_tx_ring *tx;
1275 	struct dpaa2_buf *buf;
1276 	struct dpaa2_cmd cmd;
1277 	uint32_t tx_rings_n = 0;
1278 	uint16_t rc_token, ni_token;
1279 	int error;
1280 
1281 	DPAA2_CMD_INIT(&cmd);
1282 
1283 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1284 	if (error) {
1285 		device_printf(dev, "%s: failed to open resource container: "
1286 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1287 		goto err_exit;
1288 	}
1289 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1290 	if (error) {
1291 		device_printf(dev, "%s: failed to open network interface: "
1292 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1293 		goto close_rc;
1294 	}
1295 
1296 	/* Obtain DPCON associated with the FQ's channel. */
1297 	con_info = device_get_ivars(fq->chan->con_dev);
1298 
1299 	KASSERT(sc->attr.num.tx_tcs <= DPAA2_MAX_TCS,
1300 	    ("%s: too many Tx traffic classes: tx_tcs=%d\n", __func__,
1301 	    sc->attr.num.tx_tcs));
1302 	KASSERT(DPAA2_NI_BUFS_PER_TX <= DPAA2_NI_MAX_BPTX,
1303 	    ("%s: too many Tx buffers (%d): max=%d\n", __func__,
1304 	    DPAA2_NI_BUFS_PER_TX, DPAA2_NI_MAX_BPTX));
1305 
1306 	/* Setup Tx rings. */
1307 	for (int i = 0; i < sc->attr.num.tx_tcs; i++) {
1308 		queue_cfg.type = DPAA2_NI_QUEUE_TX;
1309 		queue_cfg.tc = i;
1310 		queue_cfg.idx = fq->flowid;
1311 		queue_cfg.chan_id = fq->chan->id;
1312 
1313 		error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1314 		if (error) {
1315 			device_printf(dev, "%s: failed to obtain Tx queue "
1316 			    "configuration: tc=%d, flowid=%d\n", __func__,
1317 			    queue_cfg.tc, queue_cfg.idx);
1318 			goto close_ni;
1319 		}
1320 
1321 		tx = &fq->tx_rings[i];
1322 		tx->fq = fq;
1323 		tx->fqid = queue_cfg.fqid;
1324 		tx->txid = tx_rings_n;
1325 
1326 		if (bootverbose) {
1327 			device_printf(dev, "TX queue idx=%d, tc=%d, chan=%d, "
1328 			    "fqid=%d\n", fq->flowid, i, fq->chan->id,
1329 			    queue_cfg.fqid);
1330 		}
1331 
1332 		mtx_init(&tx->lock, "dpaa2_tx_ring", NULL, MTX_DEF);
1333 
1334 		/* Allocate Tx ring buffer. */
1335 		tx->br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF, M_NOWAIT,
1336 		    &tx->lock);
1337 		if (tx->br == NULL) {
1338 			device_printf(dev, "%s: failed to setup Tx ring buffer"
1339 			    " (2) fqid=%d\n", __func__, tx->fqid);
1340 			goto close_ni;
1341 		}
1342 
1343 		/* Configure Tx buffers */
1344 		for (uint64_t j = 0; j < DPAA2_NI_BUFS_PER_TX; j++) {
1345 			buf = malloc(sizeof(struct dpaa2_buf), M_DPAA2_TXB,
1346 			    M_WAITOK);
1347 			if (buf == NULL) {
1348 				device_printf(dev, "%s: malloc() failed (buf)\n",
1349 				    __func__);
1350 				return (ENOMEM);
1351 			}
1352 			/* Keep DMA tag and Tx ring linked to the buffer */
1353 			DPAA2_BUF_INIT_TAGOPT(buf, ch->tx_dmat, tx);
1354 
1355 			buf->sgt = malloc(sizeof(struct dpaa2_buf), M_DPAA2_TXB,
1356 			    M_WAITOK);
1357 			if (buf->sgt == NULL) {
1358 				device_printf(dev, "%s: malloc() failed (sgt)\n",
1359 				    __func__);
1360 				return (ENOMEM);
1361 			}
1362 			/* Link SGT to DMA tag and back to its Tx buffer */
1363 			DPAA2_BUF_INIT_TAGOPT(buf->sgt, ch->sgt_dmat, buf);
1364 
1365 			error = dpaa2_buf_seed_txb(dev, buf);
1366 
1367 			/* Add Tx buffer to the ring */
1368 			buf_ring_enqueue(tx->br, buf);
1369 		}
1370 
1371 		tx_rings_n++;
1372 	}
1373 
1374 	/* All Tx queues which belong to the same flowid have the same qdbin. */
1375 	fq->tx_qdbin = queue_cfg.qdbin;
1376 
1377 	queue_cfg.type = DPAA2_NI_QUEUE_TX_CONF;
1378 	queue_cfg.tc = 0; /* ignored for TxConf queue */
1379 	queue_cfg.idx = fq->flowid;
1380 	error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1381 	if (error) {
1382 		device_printf(dev, "%s: failed to obtain TxConf queue "
1383 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1384 		    queue_cfg.idx);
1385 		goto close_ni;
1386 	}
1387 
1388 	fq->fqid = queue_cfg.fqid;
1389 
1390 	queue_cfg.dest_id = con_info->id;
1391 	queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1392 	queue_cfg.priority = 0;
1393 	queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1394 	queue_cfg.options =
1395 	    DPAA2_NI_QUEUE_OPT_USER_CTX |
1396 	    DPAA2_NI_QUEUE_OPT_DEST;
1397 	error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
1398 	if (error) {
1399 		device_printf(dev, "%s: failed to update TxConf queue "
1400 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1401 		    queue_cfg.idx);
1402 		goto close_ni;
1403 	}
1404 
1405 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1406 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1407 	return (0);
1408 
1409 close_ni:
1410 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1411 close_rc:
1412 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1413 err_exit:
1414 	return (error);
1415 }
1416 
1417 static int
1418 dpaa2_ni_setup_rx_err_flow(device_t dev, struct dpaa2_ni_fq *fq)
1419 {
1420 	device_t pdev = device_get_parent(dev);
1421 	device_t child = dev;
1422 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1423 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1424 	struct dpaa2_devinfo *con_info;
1425 	struct dpaa2_ni_queue_cfg queue_cfg = {0};
1426 	struct dpaa2_cmd cmd;
1427 	uint16_t rc_token, ni_token;
1428 	int error;
1429 
1430 	DPAA2_CMD_INIT(&cmd);
1431 
1432 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1433 	if (error) {
1434 		device_printf(dev, "%s: failed to open resource container: "
1435 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1436 		goto err_exit;
1437 	}
1438 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1439 	if (error) {
1440 		device_printf(dev, "%s: failed to open network interface: "
1441 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1442 		goto close_rc;
1443 	}
1444 
1445 	/* Obtain DPCON associated with the FQ's channel. */
1446 	con_info = device_get_ivars(fq->chan->con_dev);
1447 
1448 	queue_cfg.type = DPAA2_NI_QUEUE_RX_ERR;
1449 	queue_cfg.tc = fq->tc; /* ignored */
1450 	queue_cfg.idx = fq->flowid; /* ignored */
1451 	error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1452 	if (error) {
1453 		device_printf(dev, "%s: failed to obtain RxErr queue "
1454 		    "configuration\n", __func__);
1455 		goto close_ni;
1456 	}
1457 
1458 	fq->fqid = queue_cfg.fqid;
1459 
1460 	queue_cfg.dest_id = con_info->id;
1461 	queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1462 	queue_cfg.priority = 1;
1463 	queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1464 	queue_cfg.options =
1465 	    DPAA2_NI_QUEUE_OPT_USER_CTX |
1466 	    DPAA2_NI_QUEUE_OPT_DEST;
1467 	error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
1468 	if (error) {
1469 		device_printf(dev, "%s: failed to update RxErr queue "
1470 		    "configuration\n", __func__);
1471 		goto close_ni;
1472 	}
1473 
1474 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1475 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1476 	return (0);
1477 
1478 close_ni:
1479 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1480 close_rc:
1481 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1482 err_exit:
1483 	return (error);
1484 }
1485 
1486 /**
1487  * @brief Configure DPNI object to generate interrupts.
1488  */
1489 static int
1490 dpaa2_ni_setup_irqs(device_t dev)
1491 {
1492 	device_t pdev = device_get_parent(dev);
1493 	device_t child = dev;
1494 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1495 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1496 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1497 	struct dpaa2_cmd cmd;
1498 	uint16_t rc_token, ni_token;
1499 	int error;
1500 
1501 	DPAA2_CMD_INIT(&cmd);
1502 
1503 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1504 	if (error) {
1505 		device_printf(dev, "%s: failed to open resource container: "
1506 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1507 		goto err_exit;
1508 	}
1509 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1510 	if (error) {
1511 		device_printf(dev, "%s: failed to open network interface: "
1512 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1513 		goto close_rc;
1514 	}
1515 
1516 	/* Configure IRQs. */
1517 	error = dpaa2_ni_setup_msi(sc);
1518 	if (error) {
1519 		device_printf(dev, "%s: failed to allocate MSI\n", __func__);
1520 		goto close_ni;
1521 	}
1522 	if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1523 	    &sc->irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) {
1524 		device_printf(dev, "%s: failed to allocate IRQ resource\n",
1525 		    __func__);
1526 		goto close_ni;
1527 	}
1528 	if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
1529 	    NULL, dpaa2_ni_intr, sc, &sc->intr)) {
1530 		device_printf(dev, "%s: failed to setup IRQ resource\n",
1531 		    __func__);
1532 		goto close_ni;
1533 	}
1534 
1535 	error = DPAA2_CMD_NI_SET_IRQ_MASK(dev, child, &cmd, DPNI_IRQ_INDEX,
1536 	    DPNI_IRQ_LINK_CHANGED | DPNI_IRQ_EP_CHANGED);
1537 	if (error) {
1538 		device_printf(dev, "%s: failed to set DPNI IRQ mask\n",
1539 		    __func__);
1540 		goto close_ni;
1541 	}
1542 
1543 	error = DPAA2_CMD_NI_SET_IRQ_ENABLE(dev, child, &cmd, DPNI_IRQ_INDEX,
1544 	    true);
1545 	if (error) {
1546 		device_printf(dev, "%s: failed to enable DPNI IRQ\n", __func__);
1547 		goto close_ni;
1548 	}
1549 
1550 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1551 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1552 	return (0);
1553 
1554 close_ni:
1555 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1556 close_rc:
1557 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1558 err_exit:
1559 	return (error);
1560 }
1561 
1562 /**
1563  * @brief Allocate MSI interrupts for DPNI.
1564  */
1565 static int
1566 dpaa2_ni_setup_msi(struct dpaa2_ni_softc *sc)
1567 {
1568 	int val;
1569 
1570 	val = pci_msi_count(sc->dev);
1571 	if (val < DPAA2_NI_MSI_COUNT)
1572 		device_printf(sc->dev, "MSI: actual=%d, expected=%d\n", val,
1573 		    DPAA2_IO_MSI_COUNT);
1574 	val = MIN(val, DPAA2_NI_MSI_COUNT);
1575 
1576 	if (pci_alloc_msi(sc->dev, &val) != 0)
1577 		return (EINVAL);
1578 
1579 	for (int i = 0; i < val; i++)
1580 		sc->irq_rid[i] = i + 1;
1581 
1582 	return (0);
1583 }
1584 
1585 /**
1586  * @brief Update DPNI according to the updated interface capabilities.
1587  */
1588 static int
1589 dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *sc)
1590 {
1591 	const bool en_rxcsum = if_getcapenable(sc->ifp) & IFCAP_RXCSUM;
1592 	const bool en_txcsum = if_getcapenable(sc->ifp) & IFCAP_TXCSUM;
1593 	device_t pdev = device_get_parent(sc->dev);
1594 	device_t dev = sc->dev;
1595 	device_t child = dev;
1596 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1597 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1598 	struct dpaa2_cmd cmd;
1599 	uint16_t rc_token, ni_token;
1600 	int error;
1601 
1602 	DPAA2_CMD_INIT(&cmd);
1603 
1604 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1605 	if (error) {
1606 		device_printf(dev, "%s: failed to open resource container: "
1607 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1608 		goto err_exit;
1609 	}
1610 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1611 	if (error) {
1612 		device_printf(dev, "%s: failed to open network interface: "
1613 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1614 		goto close_rc;
1615 	}
1616 
1617 	/* Setup checksums validation. */
1618 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1619 	    DPAA2_NI_OFL_RX_L3_CSUM, en_rxcsum);
1620 	if (error) {
1621 		device_printf(dev, "%s: failed to %s L3 checksum validation\n",
1622 		    __func__, en_rxcsum ? "enable" : "disable");
1623 		goto close_ni;
1624 	}
1625 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1626 	    DPAA2_NI_OFL_RX_L4_CSUM, en_rxcsum);
1627 	if (error) {
1628 		device_printf(dev, "%s: failed to %s L4 checksum validation\n",
1629 		    __func__, en_rxcsum ? "enable" : "disable");
1630 		goto close_ni;
1631 	}
1632 
1633 	/* Setup checksums generation. */
1634 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1635 	    DPAA2_NI_OFL_TX_L3_CSUM, en_txcsum);
1636 	if (error) {
1637 		device_printf(dev, "%s: failed to %s L3 checksum generation\n",
1638 		    __func__, en_txcsum ? "enable" : "disable");
1639 		goto close_ni;
1640 	}
1641 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1642 	    DPAA2_NI_OFL_TX_L4_CSUM, en_txcsum);
1643 	if (error) {
1644 		device_printf(dev, "%s: failed to %s L4 checksum generation\n",
1645 		    __func__, en_txcsum ? "enable" : "disable");
1646 		goto close_ni;
1647 	}
1648 
1649 	(void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
1650 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1651 	return (0);
1652 
1653 close_ni:
1654 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1655 close_rc:
1656 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1657 err_exit:
1658 	return (error);
1659 }
1660 
1661 /**
1662  * @brief Update DPNI according to the updated interface flags.
1663  */
1664 static int
1665 dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *sc)
1666 {
1667 	const bool en_promisc = if_getflags(sc->ifp) & IFF_PROMISC;
1668 	const bool en_allmulti = if_getflags(sc->ifp) & IFF_ALLMULTI;
1669 	device_t pdev = device_get_parent(sc->dev);
1670 	device_t dev = sc->dev;
1671 	device_t child = dev;
1672 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1673 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1674 	struct dpaa2_cmd cmd;
1675 	uint16_t rc_token, ni_token;
1676 	int error;
1677 
1678 	DPAA2_CMD_INIT(&cmd);
1679 
1680 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1681 	if (error) {
1682 		device_printf(dev, "%s: failed to open resource container: "
1683 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1684 		goto err_exit;
1685 	}
1686 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1687 	if (error) {
1688 		device_printf(dev, "%s: failed to open network interface: "
1689 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
1690 		goto close_rc;
1691 	}
1692 
1693 	error = DPAA2_CMD_NI_SET_MULTI_PROMISC(dev, child, &cmd,
1694 	    en_promisc ? true : en_allmulti);
1695 	if (error) {
1696 		device_printf(dev, "%s: failed to %s multicast promiscuous "
1697 		    "mode\n", __func__, en_allmulti ? "enable" : "disable");
1698 		goto close_ni;
1699 	}
1700 
1701 	error = DPAA2_CMD_NI_SET_UNI_PROMISC(dev, child, &cmd, en_promisc);
1702 	if (error) {
1703 		device_printf(dev, "%s: failed to %s unicast promiscuous mode\n",
1704 		    __func__, en_promisc ? "enable" : "disable");
1705 		goto close_ni;
1706 	}
1707 
1708 	(void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
1709 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1710 	return (0);
1711 
1712 close_ni:
1713 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1714 close_rc:
1715 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1716 err_exit:
1717 	return (error);
1718 }
1719 
1720 static int
1721 dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *sc)
1722 {
1723 	struct sysctl_ctx_list *ctx;
1724 	struct sysctl_oid *node, *node2;
1725 	struct sysctl_oid_list *parent, *parent2;
1726 	char cbuf[128];
1727 	int i;
1728 
1729 	ctx = device_get_sysctl_ctx(sc->dev);
1730 	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1731 
1732 	/* Add DPNI statistics. */
1733 	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
1734 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Statistics");
1735 	parent = SYSCTL_CHILDREN(node);
1736 	for (i = 0; i < DPAA2_NI_STAT_SYSCTLS; ++i) {
1737 		SYSCTL_ADD_PROC(ctx, parent, i, dpni_stat_sysctls[i].name,
1738 		    CTLTYPE_U64 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_stats,
1739 		    "IU", dpni_stat_sysctls[i].desc);
1740 	}
1741 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_anomaly_frames",
1742 	    CTLFLAG_RD, &sc->rx_anomaly_frames,
1743 	    "Rx frames in the buffers outside of the buffer pools");
1744 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_single_buf_frames",
1745 	    CTLFLAG_RD, &sc->rx_single_buf_frames,
1746 	    "Rx frames in single buffers");
1747 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_sg_buf_frames",
1748 	    CTLFLAG_RD, &sc->rx_sg_buf_frames,
1749 	    "Rx frames in scatter/gather list");
1750 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_enq_rej_frames",
1751 	    CTLFLAG_RD, &sc->rx_enq_rej_frames,
1752 	    "Enqueue rejected by QMan");
1753 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_ieoi_err_frames",
1754 	    CTLFLAG_RD, &sc->rx_ieoi_err_frames,
1755 	    "QMan IEOI error");
1756 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_single_buf_frames",
1757 	    CTLFLAG_RD, &sc->tx_single_buf_frames,
1758 	    "Tx single buffer frames");
1759 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_sg_frames",
1760 	    CTLFLAG_RD, &sc->tx_sg_frames,
1761 	    "Tx S/G frames");
1762 
1763 	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_num",
1764 	    CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_num,
1765 	    "IU", "number of Rx buffers in the buffer pool");
1766 	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_free",
1767 	    CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_free,
1768 	    "IU", "number of free Rx buffers in the buffer pool");
1769 
1770  	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1771 
1772 	/* Add channels statistics. */
1773 	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "channels",
1774 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channels");
1775 	parent = SYSCTL_CHILDREN(node);
1776 	for (int i = 0; i < sc->chan_n; i++) {
1777 		snprintf(cbuf, sizeof(cbuf), "%d", i);
1778 
1779 		node2 = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, cbuf,
1780 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channel");
1781 		parent2 = SYSCTL_CHILDREN(node2);
1782 
1783 		SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_frames",
1784 		    CTLFLAG_RD, &sc->channels[i]->tx_frames,
1785 		    "Tx frames counter");
1786 		SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_dropped",
1787 		    CTLFLAG_RD, &sc->channels[i]->tx_dropped,
1788 		    "Tx dropped counter");
1789 	}
1790 
1791 	return (0);
1792 }
1793 
1794 static int
1795 dpaa2_ni_setup_dma(struct dpaa2_ni_softc *sc)
1796 {
1797 	device_t dev = sc->dev;
1798 	int error;
1799 
1800 	KASSERT((sc->buf_align == BUF_ALIGN) || (sc->buf_align == BUF_ALIGN_V1),
1801 	    ("unexpected buffer alignment: %d\n", sc->buf_align));
1802 
1803 	/* DMA tag for Rx distribution key. */
1804 	error = bus_dma_tag_create(
1805 	    bus_get_dma_tag(dev),
1806 	    PAGE_SIZE, 0,		/* alignment, boundary */
1807 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
1808 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
1809 	    NULL, NULL,			/* filter, filterarg */
1810 	    DPAA2_CLASSIFIER_DMA_SIZE, 1, /* maxsize, nsegments */
1811 	    DPAA2_CLASSIFIER_DMA_SIZE, 0, /* maxsegsize, flags */
1812 	    NULL, NULL,			/* lockfunc, lockarg */
1813 	    &sc->rxd_dmat);
1814 	if (error) {
1815 		device_printf(dev, "%s: failed to create DMA tag for Rx "
1816 		    "distribution key\n", __func__);
1817 		return (error);
1818 	}
1819 
1820 	error = bus_dma_tag_create(
1821 	    bus_get_dma_tag(dev),
1822 	    PAGE_SIZE, 0,		/* alignment, boundary */
1823 	    BUS_SPACE_MAXADDR,		/* low restricted addr */
1824 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
1825 	    NULL, NULL,			/* filter, filterarg */
1826 	    ETH_QOS_KCFG_BUF_SIZE, 1,	/* maxsize, nsegments */
1827 	    ETH_QOS_KCFG_BUF_SIZE, 0,	/* maxsegsize, flags */
1828 	    NULL, NULL,			/* lockfunc, lockarg */
1829 	    &sc->qos_dmat);
1830 	if (error) {
1831 		device_printf(dev, "%s: failed to create DMA tag for QoS key\n",
1832 		    __func__);
1833 		return (error);
1834 	}
1835 
1836 	return (0);
1837 }
1838 
1839 /**
1840  * @brief Configure buffer layouts of the different DPNI queues.
1841  */
1842 static int
1843 dpaa2_ni_set_buf_layout(device_t dev)
1844 {
1845 	device_t pdev = device_get_parent(dev);
1846 	device_t child = dev;
1847 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1848 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1849 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1850 	struct dpaa2_ni_buf_layout buf_layout = {0};
1851 	struct dpaa2_cmd cmd;
1852 	uint16_t rc_token, ni_token;
1853 	int error;
1854 
1855 	DPAA2_CMD_INIT(&cmd);
1856 
1857 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1858 	if (error) {
1859 		device_printf(dev, "%s: failed to open resource container: "
1860 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
1861 		goto err_exit;
1862 	}
1863 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1864 	if (error) {
1865 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
1866 		    "error=%d\n", __func__, dinfo->id, error);
1867 		goto close_rc;
1868 	}
1869 
1870 	/*
1871 	 * Select Rx/Tx buffer alignment. It's necessary to ensure that the
1872 	 * buffer size seen by WRIOP is a multiple of 64 or 256 bytes depending
1873 	 * on the WRIOP version.
1874 	 */
1875 	sc->buf_align = (sc->attr.wriop_ver == WRIOP_VERSION(0, 0, 0) ||
1876 	    sc->attr.wriop_ver == WRIOP_VERSION(1, 0, 0))
1877 	    ? BUF_ALIGN_V1 : BUF_ALIGN;
1878 
1879 	/*
1880 	 * We need to ensure that the buffer size seen by WRIOP is a multiple
1881 	 * of 64 or 256 bytes depending on the WRIOP version.
1882 	 */
1883 	sc->buf_sz = ALIGN_DOWN(DPAA2_RX_BUF_SIZE, sc->buf_align);
1884 
1885 	if (bootverbose) {
1886 		device_printf(dev, "Rx/Tx buffers: size=%d, alignment=%d\n",
1887 		    sc->buf_sz, sc->buf_align);
1888 	}
1889 
1890 	/*
1891 	 *    Frame Descriptor       Tx buffer layout
1892 	 *
1893 	 *                ADDR -> |---------------------|
1894 	 *                        | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes
1895 	 *                        |---------------------|
1896 	 *                        | HW FRAME ANNOTATION | BUF_TX_HWA_SIZE bytes
1897 	 *                        |---------------------|
1898 	 *                        |    DATA HEADROOM    |
1899 	 *       ADDR + OFFSET -> |---------------------|
1900 	 *                        |                     |
1901 	 *                        |                     |
1902 	 *                        |     FRAME DATA      |
1903 	 *                        |                     |
1904 	 *                        |                     |
1905 	 *                        |---------------------|
1906 	 *                        |    DATA TAILROOM    |
1907 	 *                        |---------------------|
1908 	 *
1909 	 * NOTE: It's for a single buffer frame only.
1910 	 */
1911 	buf_layout.queue_type = DPAA2_NI_QUEUE_TX;
1912 	buf_layout.pd_size = BUF_SWA_SIZE;
1913 	buf_layout.pass_timestamp = true;
1914 	buf_layout.pass_frame_status = true;
1915 	buf_layout.options =
1916 	    BUF_LOPT_PRIV_DATA_SZ |
1917 	    BUF_LOPT_TIMESTAMP | /* requires 128 bytes in HWA */
1918 	    BUF_LOPT_FRAME_STATUS;
1919 	error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
1920 	if (error) {
1921 		device_printf(dev, "%s: failed to set Tx buffer layout\n",
1922 		    __func__);
1923 		goto close_ni;
1924 	}
1925 
1926 	/* Tx-confirmation buffer layout */
1927 	buf_layout.queue_type = DPAA2_NI_QUEUE_TX_CONF;
1928 	buf_layout.options =
1929 	    BUF_LOPT_TIMESTAMP |
1930 	    BUF_LOPT_FRAME_STATUS;
1931 	error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
1932 	if (error) {
1933 		device_printf(dev, "%s: failed to set TxConf buffer layout\n",
1934 		    __func__);
1935 		goto close_ni;
1936 	}
1937 
1938 	/*
1939 	 * Driver should reserve the amount of space indicated by this command
1940 	 * as headroom in all Tx frames.
1941 	 */
1942 	error = DPAA2_CMD_NI_GET_TX_DATA_OFF(dev, child, &cmd, &sc->tx_data_off);
1943 	if (error) {
1944 		device_printf(dev, "%s: failed to obtain Tx data offset\n",
1945 		    __func__);
1946 		goto close_ni;
1947 	}
1948 
1949 	if (bootverbose) {
1950 		device_printf(dev, "Tx data offset=%d\n", sc->tx_data_off);
1951 	}
1952 	if ((sc->tx_data_off % 64) != 0) {
1953 		device_printf(dev, "Tx data offset (%d) is not a multiplication "
1954 		    "of 64 bytes\n", sc->tx_data_off);
1955 	}
1956 
1957 	/*
1958 	 *    Frame Descriptor       Rx buffer layout
1959 	 *
1960 	 *                ADDR -> |---------------------|
1961 	 *                        | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes
1962 	 *                        |---------------------|
1963 	 *                        | HW FRAME ANNOTATION | BUF_RX_HWA_SIZE bytes
1964 	 *                        |---------------------|
1965 	 *                        |    DATA HEADROOM    | OFFSET-BUF_RX_HWA_SIZE
1966 	 *       ADDR + OFFSET -> |---------------------|
1967 	 *                        |                     |
1968 	 *                        |                     |
1969 	 *                        |     FRAME DATA      |
1970 	 *                        |                     |
1971 	 *                        |                     |
1972 	 *                        |---------------------|
1973 	 *                        |    DATA TAILROOM    | 0 bytes
1974 	 *                        |---------------------|
1975 	 *
1976 	 * NOTE: It's for a single buffer frame only.
1977 	 */
1978 	buf_layout.queue_type = DPAA2_NI_QUEUE_RX;
1979 	buf_layout.pd_size = BUF_SWA_SIZE;
1980 	buf_layout.fd_align = sc->buf_align;
1981 	buf_layout.head_size = sc->tx_data_off - BUF_RX_HWA_SIZE - BUF_SWA_SIZE;
1982 	buf_layout.tail_size = 0;
1983 	buf_layout.pass_frame_status = true;
1984 	buf_layout.pass_parser_result = true;
1985 	buf_layout.pass_timestamp = true;
1986 	buf_layout.options =
1987 	    BUF_LOPT_PRIV_DATA_SZ |
1988 	    BUF_LOPT_DATA_ALIGN |
1989 	    BUF_LOPT_DATA_HEAD_ROOM |
1990 	    BUF_LOPT_DATA_TAIL_ROOM |
1991 	    BUF_LOPT_FRAME_STATUS |
1992 	    BUF_LOPT_PARSER_RESULT |
1993 	    BUF_LOPT_TIMESTAMP;
1994 	error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
1995 	if (error) {
1996 		device_printf(dev, "%s: failed to set Rx buffer layout\n",
1997 		    __func__);
1998 		goto close_ni;
1999 	}
2000 
2001 	error = 0;
2002 close_ni:
2003 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2004 close_rc:
2005 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2006 err_exit:
2007 	return (error);
2008 }
2009 
2010 /**
2011  * @brief Enable Rx/Tx pause frames.
2012  *
2013  * NOTE: DPNI stops sending when a pause frame is received (Rx frame) or DPNI
2014  *       itself generates pause frames (Tx frame).
2015  */
2016 static int
2017 dpaa2_ni_set_pause_frame(device_t dev)
2018 {
2019 	device_t pdev = device_get_parent(dev);
2020 	device_t child = dev;
2021 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2022 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2023 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2024 	struct dpaa2_ni_link_cfg link_cfg = {0};
2025 	struct dpaa2_cmd cmd;
2026 	uint16_t rc_token, ni_token;
2027 	int error;
2028 
2029 	DPAA2_CMD_INIT(&cmd);
2030 
2031 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2032 	if (error) {
2033 		device_printf(dev, "%s: failed to open resource container: "
2034 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2035 		goto err_exit;
2036 	}
2037 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2038 	if (error) {
2039 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2040 		    "error=%d\n", __func__, dinfo->id, error);
2041 		goto close_rc;
2042 	}
2043 
2044 	error = DPAA2_CMD_NI_GET_LINK_CFG(dev, child, &cmd, &link_cfg);
2045 	if (error) {
2046 		device_printf(dev, "%s: failed to obtain link configuration: "
2047 		    "error=%d\n", __func__, error);
2048 		goto close_ni;
2049 	}
2050 
2051 	/* Enable both Rx and Tx pause frames by default. */
2052 	link_cfg.options |= DPAA2_NI_LINK_OPT_PAUSE;
2053 	link_cfg.options &= ~DPAA2_NI_LINK_OPT_ASYM_PAUSE;
2054 
2055 	error = DPAA2_CMD_NI_SET_LINK_CFG(dev, child, &cmd, &link_cfg);
2056 	if (error) {
2057 		device_printf(dev, "%s: failed to set link configuration: "
2058 		    "error=%d\n", __func__, error);
2059 		goto close_ni;
2060 	}
2061 
2062 	sc->link_options = link_cfg.options;
2063 	error = 0;
2064 close_ni:
2065 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2066 close_rc:
2067 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2068 err_exit:
2069 	return (error);
2070 }
2071 
2072 /**
2073  * @brief Configure QoS table to determine the traffic class for the received
2074  * frame.
2075  */
2076 static int
2077 dpaa2_ni_set_qos_table(device_t dev)
2078 {
2079 	device_t pdev = device_get_parent(dev);
2080 	device_t child = dev;
2081 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2082 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2083 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2084 	struct dpaa2_ni_qos_table tbl;
2085 	struct dpaa2_buf *buf = &sc->qos_kcfg;
2086 	struct dpaa2_cmd cmd;
2087 	uint16_t rc_token, ni_token;
2088 	int error;
2089 
2090 	if (sc->attr.num.rx_tcs == 1 ||
2091 	    !(sc->attr.options & DPNI_OPT_HAS_KEY_MASKING)) {
2092 		if (bootverbose) {
2093 			device_printf(dev, "Ingress traffic classification is "
2094 			    "not supported\n");
2095 		}
2096 		return (0);
2097 	}
2098 
2099 	/*
2100 	 * Allocate a buffer visible to the device to hold the QoS table key
2101 	 * configuration.
2102 	 */
2103 
2104 	if (__predict_true(buf->dmat == NULL)) {
2105 		buf->dmat = sc->qos_dmat;
2106 	}
2107 
2108 	error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr,
2109 	    BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap);
2110 	if (error) {
2111 		device_printf(dev, "%s: failed to allocate a buffer for QoS key "
2112 		    "configuration\n", __func__);
2113 		goto err_exit;
2114 	}
2115 
2116 	error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr,
2117 	    ETH_QOS_KCFG_BUF_SIZE, dpaa2_dmamap_oneseg_cb, &buf->paddr,
2118 	    BUS_DMA_NOWAIT);
2119 	if (error) {
2120 		device_printf(dev, "%s: failed to map QoS key configuration "
2121 		    "buffer into bus space\n", __func__);
2122 		goto err_exit;
2123 	}
2124 
2125 	DPAA2_CMD_INIT(&cmd);
2126 
2127 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2128 	if (error) {
2129 		device_printf(dev, "%s: failed to open resource container: "
2130 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2131 		goto err_exit;
2132 	}
2133 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2134 	if (error) {
2135 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2136 		    "error=%d\n", __func__, dinfo->id, error);
2137 		goto close_rc;
2138 	}
2139 
2140 	tbl.default_tc = 0;
2141 	tbl.discard_on_miss = false;
2142 	tbl.keep_entries = false;
2143 	tbl.kcfg_busaddr = buf->paddr;
2144 	error = DPAA2_CMD_NI_SET_QOS_TABLE(dev, child, &cmd, &tbl);
2145 	if (error) {
2146 		device_printf(dev, "%s: failed to set QoS table\n", __func__);
2147 		goto close_ni;
2148 	}
2149 
2150 	error = DPAA2_CMD_NI_CLEAR_QOS_TABLE(dev, child, &cmd);
2151 	if (error) {
2152 		device_printf(dev, "%s: failed to clear QoS table\n", __func__);
2153 		goto close_ni;
2154 	}
2155 
2156 	error = 0;
2157 close_ni:
2158 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2159 close_rc:
2160 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2161 err_exit:
2162 	return (error);
2163 }
2164 
2165 static int
2166 dpaa2_ni_set_mac_addr(device_t dev)
2167 {
2168 	device_t pdev = device_get_parent(dev);
2169 	device_t child = dev;
2170 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2171 	if_t ifp = sc->ifp;
2172 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2173 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2174 	struct dpaa2_cmd cmd;
2175 	struct ether_addr rnd_mac_addr;
2176 	uint16_t rc_token, ni_token;
2177 	uint8_t mac_addr[ETHER_ADDR_LEN];
2178 	uint8_t dpni_mac_addr[ETHER_ADDR_LEN];
2179 	int error;
2180 
2181 	DPAA2_CMD_INIT(&cmd);
2182 
2183 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2184 	if (error) {
2185 		device_printf(dev, "%s: failed to open resource container: "
2186 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2187 		goto err_exit;
2188 	}
2189 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2190 	if (error) {
2191 		device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2192 		    "error=%d\n", __func__, dinfo->id, error);
2193 		goto close_rc;
2194 	}
2195 
2196 	/*
2197 	 * Get the MAC address associated with the physical port, if the DPNI is
2198 	 * connected to a DPMAC directly associated with one of the physical
2199 	 * ports.
2200 	 */
2201 	error = DPAA2_CMD_NI_GET_PORT_MAC_ADDR(dev, child, &cmd, mac_addr);
2202 	if (error) {
2203 		device_printf(dev, "%s: failed to obtain the MAC address "
2204 		    "associated with the physical port\n", __func__);
2205 		goto close_ni;
2206 	}
2207 
2208 	/* Get primary MAC address from the DPNI attributes. */
2209 	error = DPAA2_CMD_NI_GET_PRIM_MAC_ADDR(dev, child, &cmd, dpni_mac_addr);
2210 	if (error) {
2211 		device_printf(dev, "%s: failed to obtain primary MAC address\n",
2212 		    __func__);
2213 		goto close_ni;
2214 	}
2215 
2216 	if (!ETHER_IS_ZERO(mac_addr)) {
2217 		/* Set MAC address of the physical port as DPNI's primary one. */
2218 		error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd,
2219 		    mac_addr);
2220 		if (error) {
2221 			device_printf(dev, "%s: failed to set primary MAC "
2222 			    "address\n", __func__);
2223 			goto close_ni;
2224 		}
2225 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2226 			sc->mac.addr[i] = mac_addr[i];
2227 		}
2228 	} else if (ETHER_IS_ZERO(dpni_mac_addr)) {
2229 		/* Generate random MAC address as DPNI's primary one. */
2230 		ether_gen_addr(ifp, &rnd_mac_addr);
2231 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2232 			mac_addr[i] = rnd_mac_addr.octet[i];
2233 		}
2234 
2235 		error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd,
2236 		    mac_addr);
2237 		if (error) {
2238 			device_printf(dev, "%s: failed to set random primary "
2239 			    "MAC address\n", __func__);
2240 			goto close_ni;
2241 		}
2242 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2243 			sc->mac.addr[i] = mac_addr[i];
2244 		}
2245 	} else {
2246 		for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2247 			sc->mac.addr[i] = dpni_mac_addr[i];
2248 		}
2249 	}
2250 
2251 	error = 0;
2252 close_ni:
2253 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2254 close_rc:
2255 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2256 err_exit:
2257 	return (error);
2258 }
2259 
2260 static void
2261 dpaa2_ni_miibus_statchg(device_t dev)
2262 {
2263 	device_t pdev = device_get_parent(dev);
2264 	device_t child = dev;
2265 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2266 	struct dpaa2_mac_link_state mac_link = { 0 };
2267 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2268 	struct dpaa2_cmd cmd;
2269 	uint16_t rc_token, mac_token;
2270 	int error, link_state;
2271 
2272 	if (sc->fixed_link || sc->mii == NULL) {
2273 		return;
2274 	}
2275 	if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0) {
2276 		/*
2277 		 * We will receive calls and adjust the changes but
2278 		 * not have setup everything (called before dpaa2_ni_init()
2279 		 * really).  This will then setup the link and internal
2280 		 * sc->link_state and not trigger the update once needed,
2281 		 * so basically dpmac never knows about it.
2282 		 */
2283 		return;
2284 	}
2285 
2286 	/*
2287 	 * Note: ifp link state will only be changed AFTER we are called so we
2288 	 * cannot rely on ifp->if_linkstate here.
2289 	 */
2290 	if (sc->mii->mii_media_status & IFM_AVALID) {
2291 		if (sc->mii->mii_media_status & IFM_ACTIVE) {
2292 			link_state = LINK_STATE_UP;
2293 		} else {
2294 			link_state = LINK_STATE_DOWN;
2295 		}
2296 	} else {
2297 		link_state = LINK_STATE_UNKNOWN;
2298 	}
2299 
2300 	if (link_state != sc->link_state) {
2301 		sc->link_state = link_state;
2302 
2303 		DPAA2_CMD_INIT(&cmd);
2304 
2305 		error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
2306 		    &rc_token);
2307 		if (error) {
2308 			device_printf(dev, "%s: failed to open resource "
2309 			    "container: id=%d, error=%d\n", __func__, rcinfo->id,
2310 			    error);
2311 			goto err_exit;
2312 		}
2313 		error = DPAA2_CMD_MAC_OPEN(dev, child, &cmd, sc->mac.dpmac_id,
2314 		    &mac_token);
2315 		if (error) {
2316 			device_printf(sc->dev, "%s: failed to open DPMAC: "
2317 			    "id=%d, error=%d\n", __func__, sc->mac.dpmac_id,
2318 			    error);
2319 			goto close_rc;
2320 		}
2321 
2322 		if (link_state == LINK_STATE_UP ||
2323 		    link_state == LINK_STATE_DOWN) {
2324 			/* Update DPMAC link state. */
2325 			mac_link.supported = sc->mii->mii_media.ifm_media;
2326 			mac_link.advert = sc->mii->mii_media.ifm_media;
2327 			mac_link.rate = 1000; /* TODO: Where to get from? */	/* ifmedia_baudrate? */
2328 			mac_link.options =
2329 			    DPAA2_MAC_LINK_OPT_AUTONEG |
2330 			    DPAA2_MAC_LINK_OPT_PAUSE;
2331 			mac_link.up = (link_state == LINK_STATE_UP) ? true : false;
2332 			mac_link.state_valid = true;
2333 
2334 			/* Inform DPMAC about link state. */
2335 			error = DPAA2_CMD_MAC_SET_LINK_STATE(dev, child, &cmd,
2336 			    &mac_link);
2337 			if (error) {
2338 				device_printf(sc->dev, "%s: failed to set DPMAC "
2339 				    "link state: id=%d, error=%d\n", __func__,
2340 				    sc->mac.dpmac_id, error);
2341 			}
2342 		}
2343 		(void)DPAA2_CMD_MAC_CLOSE(dev, child, &cmd);
2344 		(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2345 		    rc_token));
2346 	}
2347 
2348 	return;
2349 
2350 close_rc:
2351 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2352 err_exit:
2353 	return;
2354 }
2355 
2356 /**
2357  * @brief Callback function to process media change request.
2358  */
2359 static int
2360 dpaa2_ni_media_change_locked(struct dpaa2_ni_softc *sc)
2361 {
2362 
2363 	DPNI_LOCK_ASSERT(sc);
2364 	if (sc->mii) {
2365 		mii_mediachg(sc->mii);
2366 		sc->media_status = sc->mii->mii_media.ifm_media;
2367 	} else if (sc->fixed_link) {
2368 		if_printf(sc->ifp, "%s: can't change media in fixed mode\n",
2369 		    __func__);
2370 	}
2371 
2372 	return (0);
2373 }
2374 
2375 static int
2376 dpaa2_ni_media_change(if_t ifp)
2377 {
2378 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2379 	int error;
2380 
2381 	DPNI_LOCK(sc);
2382 	error = dpaa2_ni_media_change_locked(sc);
2383 	DPNI_UNLOCK(sc);
2384 	return (error);
2385 }
2386 
2387 /**
2388  * @brief Callback function to process media status request.
2389  */
2390 static void
2391 dpaa2_ni_media_status(if_t ifp, struct ifmediareq *ifmr)
2392 {
2393 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2394 
2395 	DPNI_LOCK(sc);
2396 	if (sc->mii) {
2397 		mii_pollstat(sc->mii);
2398 		ifmr->ifm_active = sc->mii->mii_media_active;
2399 		ifmr->ifm_status = sc->mii->mii_media_status;
2400 	}
2401 	DPNI_UNLOCK(sc);
2402 }
2403 
2404 /**
2405  * @brief Callout function to check and update media status.
2406  */
2407 static void
2408 dpaa2_ni_media_tick(void *arg)
2409 {
2410 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2411 
2412 	/* Check for media type change */
2413 	if (sc->mii) {
2414 		mii_tick(sc->mii);
2415 		if (sc->media_status != sc->mii->mii_media.ifm_media) {
2416 			printf("%s: media type changed (ifm_media=%x)\n",
2417 			    __func__, sc->mii->mii_media.ifm_media);
2418 			dpaa2_ni_media_change(sc->ifp);
2419 		}
2420 	}
2421 
2422 	/* Schedule another timeout one second from now */
2423 	callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
2424 }
2425 
2426 static void
2427 dpaa2_ni_init(void *arg)
2428 {
2429 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2430 	if_t ifp = sc->ifp;
2431 	device_t pdev = device_get_parent(sc->dev);
2432 	device_t dev = sc->dev;
2433 	device_t child = dev;
2434 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2435 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2436 	struct dpaa2_cmd cmd;
2437 	uint16_t rc_token, ni_token;
2438 	int error;
2439 
2440 	DPNI_LOCK(sc);
2441 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2442 		DPNI_UNLOCK(sc);
2443 		return;
2444 	}
2445 	DPNI_UNLOCK(sc);
2446 
2447 	DPAA2_CMD_INIT(&cmd);
2448 
2449 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2450 	if (error) {
2451 		device_printf(dev, "%s: failed to open resource container: "
2452 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2453 		goto err_exit;
2454 	}
2455 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2456 	if (error) {
2457 		device_printf(dev, "%s: failed to open network interface: "
2458 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
2459 		goto close_rc;
2460 	}
2461 
2462 	error = DPAA2_CMD_NI_ENABLE(dev, child, &cmd);
2463 	if (error) {
2464 		device_printf(dev, "%s: failed to enable DPNI: error=%d\n",
2465 		    __func__, error);
2466 	}
2467 
2468 	DPNI_LOCK(sc);
2469 	/* Announce we are up and running and can queue packets. */
2470 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2471 
2472 	if (sc->mii) {
2473 		/*
2474 		 * mii_mediachg() will trigger a call into
2475 		 * dpaa2_ni_miibus_statchg() to setup link state.
2476 		 */
2477 		dpaa2_ni_media_change_locked(sc);
2478 	}
2479 	callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
2480 
2481 	DPNI_UNLOCK(sc);
2482 
2483 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2484 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2485 	return;
2486 
2487 close_rc:
2488 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2489 err_exit:
2490 	return;
2491 }
2492 
2493 static int
2494 dpaa2_ni_transmit(if_t ifp, struct mbuf *m)
2495 {
2496 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2497 	struct dpaa2_channel *ch;
2498 	uint32_t fqid;
2499 	bool found = false;
2500 	int chidx = 0, error;
2501 
2502 	if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) {
2503 		return (0);
2504 	}
2505 
2506 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
2507 		fqid = m->m_pkthdr.flowid;
2508 		for (int i = 0; i < sc->chan_n; i++) {
2509 			ch = sc->channels[i];
2510 			for (int j = 0; j < ch->rxq_n; j++) {
2511 				if (fqid == ch->rx_queues[j].fqid) {
2512 					chidx = ch->flowid;
2513 					found = true;
2514 					break;
2515 				}
2516 			}
2517 			if (found) {
2518 				break;
2519 			}
2520 		}
2521 	}
2522 
2523 	ch = sc->channels[chidx];
2524 	error = buf_ring_enqueue(ch->xmit_br, m);
2525 	if (__predict_false(error != 0)) {
2526 		m_freem(m);
2527 	} else {
2528 		taskqueue_enqueue(ch->cleanup_tq, &ch->cleanup_task);
2529 	}
2530 
2531 	return (error);
2532 }
2533 
2534 static void
2535 dpaa2_ni_qflush(if_t ifp)
2536 {
2537 	/* TODO: Find a way to drain Tx queues in QBMan. */
2538 	if_qflush(ifp);
2539 }
2540 
2541 static int
2542 dpaa2_ni_ioctl(if_t ifp, u_long c, caddr_t data)
2543 {
2544 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2545 	struct ifreq *ifr = (struct ifreq *) data;
2546 	device_t pdev = device_get_parent(sc->dev);
2547 	device_t dev = sc->dev;
2548 	device_t child = dev;
2549 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2550 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2551 	struct dpaa2_cmd cmd;
2552 	uint32_t changed = 0;
2553 	uint16_t rc_token, ni_token;
2554 	int mtu, error, rc = 0;
2555 
2556 	DPAA2_CMD_INIT(&cmd);
2557 
2558 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2559 	if (error) {
2560 		device_printf(dev, "%s: failed to open resource container: "
2561 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2562 		goto err_exit;
2563 	}
2564 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2565 	if (error) {
2566 		device_printf(dev, "%s: failed to open network interface: "
2567 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
2568 		goto close_rc;
2569 	}
2570 
2571 	switch (c) {
2572 	case SIOCSIFMTU:
2573 		DPNI_LOCK(sc);
2574 		mtu = ifr->ifr_mtu;
2575 		if (mtu < ETHERMIN || mtu > ETHERMTU_JUMBO) {
2576 			DPNI_UNLOCK(sc);
2577 			error = EINVAL;
2578 			goto close_ni;
2579 		}
2580 		if_setmtu(ifp, mtu);
2581 		DPNI_UNLOCK(sc);
2582 
2583 		/* Update maximum frame length. */
2584 		error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd,
2585 		    mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2586 		if (error) {
2587 			device_printf(dev, "%s: failed to update maximum frame "
2588 			    "length: error=%d\n", __func__, error);
2589 			goto close_ni;
2590 		}
2591 		break;
2592 	case SIOCSIFCAP:
2593 		changed = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
2594 		if (changed & IFCAP_HWCSUM) {
2595 			if ((ifr->ifr_reqcap & changed) & IFCAP_HWCSUM) {
2596 				if_setcapenablebit(ifp, IFCAP_HWCSUM, 0);
2597 			} else {
2598 				if_setcapenablebit(ifp, 0, IFCAP_HWCSUM);
2599 			}
2600 		}
2601 		rc = dpaa2_ni_setup_if_caps(sc);
2602 		if (rc) {
2603 			printf("%s: failed to update iface capabilities: "
2604 			    "error=%d\n", __func__, rc);
2605 			rc = ENXIO;
2606 		}
2607 		break;
2608 	case SIOCSIFFLAGS:
2609 		DPNI_LOCK(sc);
2610 		if (if_getflags(ifp) & IFF_UP) {
2611 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2612 				changed = if_getflags(ifp) ^ sc->if_flags;
2613 				if (changed & IFF_PROMISC ||
2614 				    changed & IFF_ALLMULTI) {
2615 					rc = dpaa2_ni_setup_if_flags(sc);
2616 				}
2617 			} else {
2618 				DPNI_UNLOCK(sc);
2619 				dpaa2_ni_init(sc);
2620 				DPNI_LOCK(sc);
2621 			}
2622 		} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2623 			/* FIXME: Disable DPNI. See dpaa2_ni_init(). */
2624 		}
2625 
2626 		sc->if_flags = if_getflags(ifp);
2627 		DPNI_UNLOCK(sc);
2628 		break;
2629 	case SIOCADDMULTI:
2630 	case SIOCDELMULTI:
2631 		DPNI_LOCK(sc);
2632 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2633 			DPNI_UNLOCK(sc);
2634 			rc = dpaa2_ni_update_mac_filters(ifp);
2635 			if (rc) {
2636 				device_printf(dev, "%s: failed to update MAC "
2637 				    "filters: error=%d\n", __func__, rc);
2638 			}
2639 			DPNI_LOCK(sc);
2640 		}
2641 		DPNI_UNLOCK(sc);
2642 		break;
2643 	case SIOCGIFMEDIA:
2644 	case SIOCSIFMEDIA:
2645 		if (sc->mii)
2646 			rc = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, c);
2647 		else if(sc->fixed_link) {
2648 			rc = ifmedia_ioctl(ifp, ifr, &sc->fixed_ifmedia, c);
2649 		}
2650 		break;
2651 	default:
2652 		rc = ether_ioctl(ifp, c, data);
2653 		break;
2654 	}
2655 
2656 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2657 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2658 	return (rc);
2659 
2660 close_ni:
2661 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2662 close_rc:
2663 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2664 err_exit:
2665 	return (error);
2666 }
2667 
2668 static int
2669 dpaa2_ni_update_mac_filters(if_t ifp)
2670 {
2671 	struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2672 	struct dpaa2_ni_mcaddr_ctx ctx;
2673 	device_t pdev = device_get_parent(sc->dev);
2674 	device_t dev = sc->dev;
2675 	device_t child = dev;
2676 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2677 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2678 	struct dpaa2_cmd cmd;
2679 	uint16_t rc_token, ni_token;
2680 	int error;
2681 
2682 	DPAA2_CMD_INIT(&cmd);
2683 
2684 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2685 	if (error) {
2686 		device_printf(dev, "%s: failed to open resource container: "
2687 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2688 		goto err_exit;
2689 	}
2690 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2691 	if (error) {
2692 		device_printf(dev, "%s: failed to open network interface: "
2693 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
2694 		goto close_rc;
2695 	}
2696 
2697 	/* Remove all multicast MAC filters. */
2698 	error = DPAA2_CMD_NI_CLEAR_MAC_FILTERS(dev, child, &cmd, false, true);
2699 	if (error) {
2700 		device_printf(dev, "%s: failed to clear multicast MAC filters: "
2701 		    "error=%d\n", __func__, error);
2702 		goto close_ni;
2703 	}
2704 
2705 	ctx.ifp = ifp;
2706 	ctx.error = 0;
2707 	ctx.nent = 0;
2708 
2709 	if_foreach_llmaddr(ifp, dpaa2_ni_add_maddr, &ctx);
2710 
2711 	error = ctx.error;
2712 close_ni:
2713 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2714 close_rc:
2715 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2716 err_exit:
2717 	return (error);
2718 }
2719 
2720 static u_int
2721 dpaa2_ni_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2722 {
2723 	struct dpaa2_ni_mcaddr_ctx *ctx = arg;
2724 	struct dpaa2_ni_softc *sc = if_getsoftc(ctx->ifp);
2725 	device_t pdev = device_get_parent(sc->dev);
2726 	device_t dev = sc->dev;
2727 	device_t child = dev;
2728 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2729 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2730 	struct dpaa2_cmd cmd;
2731 	uint16_t rc_token, ni_token;
2732 	int error;
2733 
2734 	if (ctx->error != 0) {
2735 		return (0);
2736 	}
2737 
2738 	if (ETHER_IS_MULTICAST(LLADDR(sdl))) {
2739 		DPAA2_CMD_INIT(&cmd);
2740 
2741 		error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
2742 		    &rc_token);
2743 		if (error) {
2744 			device_printf(dev, "%s: failed to open resource "
2745 			    "container: id=%d, error=%d\n", __func__, rcinfo->id,
2746 			    error);
2747 			return (0);
2748 		}
2749 		error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id,
2750 		    &ni_token);
2751 		if (error) {
2752 			device_printf(dev, "%s: failed to open network interface: "
2753 			    "id=%d, error=%d\n", __func__, dinfo->id, error);
2754 			(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2755 			    rc_token));
2756 			return (0);
2757 		}
2758 
2759 		ctx->error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, &cmd,
2760 		    LLADDR(sdl));
2761 
2762 		(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2763 		    ni_token));
2764 		(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2765 		    rc_token));
2766 
2767 		if (ctx->error != 0) {
2768 			device_printf(dev, "%s: can't add more then %d MAC "
2769 			    "addresses, switching to the multicast promiscuous "
2770 			    "mode\n", __func__, ctx->nent);
2771 
2772 			/* Enable multicast promiscuous mode. */
2773 			DPNI_LOCK(sc);
2774 			if_setflagbits(ctx->ifp, IFF_ALLMULTI, 0);
2775 			sc->if_flags |= IFF_ALLMULTI;
2776 			ctx->error = dpaa2_ni_setup_if_flags(sc);
2777 			DPNI_UNLOCK(sc);
2778 
2779 			return (0);
2780 		}
2781 		ctx->nent++;
2782 	}
2783 
2784 	return (1);
2785 }
2786 
2787 static void
2788 dpaa2_ni_intr(void *arg)
2789 {
2790 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2791 	device_t pdev = device_get_parent(sc->dev);
2792 	device_t dev = sc->dev;
2793 	device_t child = dev;
2794 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2795 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2796 	struct dpaa2_cmd cmd;
2797 	uint32_t status = ~0u; /* clear all IRQ status bits */
2798 	uint16_t rc_token, ni_token;
2799 	int error;
2800 
2801 	DPAA2_CMD_INIT(&cmd);
2802 
2803 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2804 	if (error) {
2805 		device_printf(dev, "%s: failed to open resource container: "
2806 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
2807 		goto err_exit;
2808 	}
2809 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2810 	if (error) {
2811 		device_printf(dev, "%s: failed to open network interface: "
2812 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
2813 		goto close_rc;
2814 	}
2815 
2816 	error = DPAA2_CMD_NI_GET_IRQ_STATUS(dev, child, &cmd, DPNI_IRQ_INDEX,
2817 	    &status);
2818 	if (error) {
2819 		device_printf(sc->dev, "%s: failed to obtain IRQ status: "
2820 		    "error=%d\n", __func__, error);
2821 	}
2822 
2823 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2824 close_rc:
2825 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2826 err_exit:
2827 	return;
2828 }
2829 
2830 /**
2831  * @brief Execute channel's Rx/Tx routines.
2832  *
2833  * NOTE: Should not be re-entrant for the same channel. It is achieved by
2834  *       enqueuing the cleanup routine on a single-threaded taskqueue.
2835  */
2836 static void
2837 dpaa2_ni_cleanup_task(void *arg, int count)
2838 {
2839 	struct dpaa2_channel *ch = (struct dpaa2_channel *)arg;
2840 	struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev);
2841 	int error, rxc, txc;
2842 
2843 	for (int i = 0; i < DPAA2_CLEAN_BUDGET; i++) {
2844 		rxc  = dpaa2_ni_rx_cleanup(ch);
2845 		txc  = dpaa2_ni_tx_cleanup(ch);
2846 
2847 		if (__predict_false((if_getdrvflags(sc->ifp) &
2848 		    IFF_DRV_RUNNING) == 0)) {
2849 			return;
2850 		}
2851 
2852 		if ((txc != DPAA2_TX_BUDGET) && (rxc != DPAA2_RX_BUDGET)) {
2853 			break;
2854 		}
2855 	}
2856 
2857 	/* Re-arm channel to generate CDAN */
2858 	error = DPAA2_SWP_CONF_WQ_CHANNEL(ch->io_dev, &ch->ctx);
2859 	if (error != 0) {
2860 		panic("%s: failed to rearm channel: chan_id=%d, error=%d\n",
2861 		    __func__, ch->id, error);
2862 	}
2863 }
2864 
2865 /**
2866  * @brief Poll frames from a specific channel when CDAN is received.
2867  */
2868 static int
2869 dpaa2_ni_rx_cleanup(struct dpaa2_channel *ch)
2870 {
2871 	struct dpaa2_io_softc *iosc = device_get_softc(ch->io_dev);
2872 	struct dpaa2_swp *swp = iosc->swp;
2873 	struct dpaa2_ni_fq *fq;
2874 	struct dpaa2_buf *buf = &ch->store;
2875 	int budget = DPAA2_RX_BUDGET;
2876 	int error, consumed = 0;
2877 
2878 	do {
2879 		error = dpaa2_swp_pull(swp, ch->id, buf, DPAA2_ETH_STORE_FRAMES);
2880 		if (error) {
2881 			device_printf(ch->ni_dev, "%s: failed to pull frames: "
2882 			    "chan_id=%d, error=%d\n", __func__, ch->id, error);
2883 			break;
2884 		}
2885 		error = dpaa2_ni_consume_frames(ch, &fq, &consumed);
2886 		if (error == ENOENT || error == EALREADY) {
2887 			break;
2888 		}
2889 		if (error == ETIMEDOUT) {
2890 			device_printf(ch->ni_dev, "%s: timeout to consume "
2891 			    "frames: chan_id=%d\n", __func__, ch->id);
2892 		}
2893 	} while (--budget);
2894 
2895 	return (DPAA2_RX_BUDGET - budget);
2896 }
2897 
2898 static int
2899 dpaa2_ni_tx_cleanup(struct dpaa2_channel *ch)
2900 {
2901 	struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev);
2902 	struct dpaa2_ni_tx_ring *tx = &ch->txc_queue.tx_rings[0];
2903 	struct mbuf *m = NULL;
2904 	int budget = DPAA2_TX_BUDGET;
2905 
2906 	do {
2907 		mtx_assert(&ch->xmit_mtx, MA_NOTOWNED);
2908 		mtx_lock(&ch->xmit_mtx);
2909 		m = buf_ring_dequeue_sc(ch->xmit_br);
2910 		mtx_unlock(&ch->xmit_mtx);
2911 
2912 		if (__predict_false(m == NULL)) {
2913 			/* TODO: Do not give up easily */
2914 			break;
2915 		} else {
2916 			dpaa2_ni_tx(sc, ch, tx, m);
2917 		}
2918 	} while (--budget);
2919 
2920 	return (DPAA2_TX_BUDGET - budget);
2921 }
2922 
2923 static void
2924 dpaa2_ni_tx(struct dpaa2_ni_softc *sc, struct dpaa2_channel *ch,
2925     struct dpaa2_ni_tx_ring *tx, struct mbuf *m)
2926 {
2927 	device_t dev = sc->dev;
2928 	struct dpaa2_ni_fq *fq = tx->fq;
2929 	struct dpaa2_buf *buf, *sgt;
2930 	struct dpaa2_fd fd;
2931 	struct mbuf *md;
2932 	bus_dma_segment_t segs[DPAA2_TX_SEGLIMIT];
2933 	int rc, nsegs;
2934 	int error;
2935 
2936 	mtx_assert(&tx->lock, MA_NOTOWNED);
2937 	mtx_lock(&tx->lock);
2938 	buf = buf_ring_dequeue_sc(tx->br);
2939 	mtx_unlock(&tx->lock);
2940 	if (__predict_false(buf == NULL)) {
2941 		/* TODO: Do not give up easily */
2942 		m_freem(m);
2943 		return;
2944 	} else {
2945 		DPAA2_BUF_ASSERT_TXREADY(buf);
2946 		buf->m = m;
2947 		sgt = buf->sgt;
2948 	}
2949 
2950 #if defined(INVARIANTS)
2951 	struct dpaa2_ni_tx_ring *btx = (struct dpaa2_ni_tx_ring *)buf->opt;
2952 	KASSERT(buf->opt == tx, ("%s: unexpected Tx ring", __func__));
2953 	KASSERT(btx->fq->chan == ch, ("%s: unexpected channel", __func__));
2954 #endif /* INVARIANTS */
2955 
2956 	error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, m, segs, &nsegs,
2957 	    BUS_DMA_NOWAIT);
2958 	if (__predict_false(error != 0)) {
2959 		/* Too many fragments, trying to defragment... */
2960 		md = m_collapse(m, M_NOWAIT, DPAA2_TX_SEGLIMIT);
2961 		if (md == NULL) {
2962 			device_printf(dev, "%s: m_collapse() failed\n", __func__);
2963 			fq->chan->tx_dropped++;
2964 			goto err;
2965 		}
2966 
2967 		buf->m = m = md;
2968 		error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, m, segs,
2969 		    &nsegs, BUS_DMA_NOWAIT);
2970 		if (__predict_false(error != 0)) {
2971 			device_printf(dev, "%s: bus_dmamap_load_mbuf_sg() "
2972 			    "failed: error=%d\n", __func__, error);
2973 			fq->chan->tx_dropped++;
2974 			goto err;
2975 		}
2976 	}
2977 
2978 	error = dpaa2_ni_build_fd(sc, tx, buf, segs, nsegs, &fd);
2979 	if (__predict_false(error != 0)) {
2980 		device_printf(dev, "%s: failed to build frame descriptor: "
2981 		    "error=%d\n", __func__, error);
2982 		fq->chan->tx_dropped++;
2983 		goto err_unload;
2984 	}
2985 
2986 	/* TODO: Enqueue several frames in a single command */
2987 	for (int i = 0; i < DPAA2_NI_ENQUEUE_RETRIES; i++) {
2988 		/* TODO: Return error codes instead of # of frames */
2989 		rc = DPAA2_SWP_ENQ_MULTIPLE_FQ(fq->chan->io_dev, tx->fqid, &fd, 1);
2990 		if (rc == 1) {
2991 			break;
2992 		}
2993 	}
2994 
2995 	bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_PREWRITE);
2996 	bus_dmamap_sync(sgt->dmat, sgt->dmap, BUS_DMASYNC_PREWRITE);
2997 
2998 	if (rc != 1) {
2999 		fq->chan->tx_dropped++;
3000 		goto err_unload;
3001 	} else {
3002 		fq->chan->tx_frames++;
3003 	}
3004 	return;
3005 
3006 err_unload:
3007 	bus_dmamap_unload(buf->dmat, buf->dmap);
3008 	if (sgt->paddr != 0) {
3009 		bus_dmamap_unload(sgt->dmat, sgt->dmap);
3010 	}
3011 err:
3012 	m_freem(buf->m);
3013 	buf_ring_enqueue(tx->br, buf);
3014 }
3015 
3016 static int
3017 dpaa2_ni_consume_frames(struct dpaa2_channel *chan, struct dpaa2_ni_fq **src,
3018     uint32_t *consumed)
3019 {
3020 	struct dpaa2_ni_fq *fq = NULL;
3021 	struct dpaa2_dq *dq;
3022 	struct dpaa2_fd *fd;
3023 	struct dpaa2_ni_rx_ctx ctx = {
3024 		.head = NULL,
3025 		.tail = NULL,
3026 		.cnt = 0,
3027 		.last = false
3028 	};
3029 	int rc, frames = 0;
3030 
3031 	do {
3032 		rc = dpaa2_chan_next_frame(chan, &dq);
3033 		if (rc == EINPROGRESS) {
3034 			if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
3035 				fd = &dq->fdr.fd;
3036 				fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
3037 
3038 				switch (fq->type) {
3039 				case DPAA2_NI_QUEUE_RX:
3040 					(void)dpaa2_ni_rx(chan, fq, fd, &ctx);
3041 					break;
3042 				case DPAA2_NI_QUEUE_RX_ERR:
3043 					(void)dpaa2_ni_rx_err(chan, fq, fd);
3044 					break;
3045 				case DPAA2_NI_QUEUE_TX_CONF:
3046 					(void)dpaa2_ni_tx_conf(chan, fq, fd);
3047 					break;
3048 				default:
3049 					panic("%s: unknown queue type (1)",
3050 					    __func__);
3051 				}
3052 				frames++;
3053 			}
3054 		} else if (rc == EALREADY || rc == ENOENT) {
3055 			if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
3056 				fd = &dq->fdr.fd;
3057 				fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
3058 
3059 				switch (fq->type) {
3060 				case DPAA2_NI_QUEUE_RX:
3061 					/*
3062 					 * Last VDQ response (mbuf) in a chain
3063 					 * obtained from the Rx queue.
3064 					 */
3065 					ctx.last = true;
3066 					(void)dpaa2_ni_rx(chan, fq, fd, &ctx);
3067 					break;
3068 				case DPAA2_NI_QUEUE_RX_ERR:
3069 					(void)dpaa2_ni_rx_err(chan, fq, fd);
3070 					break;
3071 				case DPAA2_NI_QUEUE_TX_CONF:
3072 					(void)dpaa2_ni_tx_conf(chan, fq, fd);
3073 					break;
3074 				default:
3075 					panic("%s: unknown queue type (2)",
3076 					    __func__);
3077 				}
3078 				frames++;
3079 			}
3080 			break;
3081 		} else {
3082 			panic("%s: should not reach here: rc=%d", __func__, rc);
3083 		}
3084 	} while (true);
3085 
3086 	KASSERT(chan->store_idx < chan->store_sz, ("%s: store_idx(%d) >= "
3087 	    "store_sz(%d)", __func__, chan->store_idx, chan->store_sz));
3088 
3089 	/*
3090 	 * VDQ operation pulls frames from a single queue into the store.
3091 	 * Return the frame queue and a number of consumed frames as an output.
3092 	 */
3093 	if (src != NULL) {
3094 		*src = fq;
3095 	}
3096 	if (consumed != NULL) {
3097 		*consumed = frames;
3098 	}
3099 
3100 	return (rc);
3101 }
3102 
3103 /**
3104  * @brief Receive frames.
3105  */
3106 static int
3107 dpaa2_ni_rx(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq, struct dpaa2_fd *fd,
3108     struct dpaa2_ni_rx_ctx *ctx)
3109 {
3110 	bus_addr_t paddr = (bus_addr_t)fd->addr;
3111 	struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr);
3112 	struct dpaa2_buf *buf = fa->buf;
3113 	struct dpaa2_channel *bch = (struct dpaa2_channel *)buf->opt;
3114 	struct dpaa2_ni_softc *sc = device_get_softc(bch->ni_dev);
3115 	struct dpaa2_bp_softc *bpsc;
3116 	struct mbuf *m;
3117 	device_t bpdev;
3118 	bus_addr_t released[DPAA2_SWP_BUFS_PER_CMD];
3119 	void *buf_data;
3120 	int buf_len, error, released_n = 0;
3121 
3122 	KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
3123 	/*
3124 	 * NOTE: Current channel might not be the same as the "buffer" channel
3125 	 * and it's fine. It must not be NULL though.
3126 	 */
3127 	KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__));
3128 
3129 	if (__predict_false(paddr != buf->paddr)) {
3130 		panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3131 		    __func__, paddr, buf->paddr);
3132 	}
3133 
3134 	switch (dpaa2_ni_fd_err(fd)) {
3135 	case 1: /* Enqueue rejected by QMan */
3136 		sc->rx_enq_rej_frames++;
3137 		break;
3138 	case 2: /* QMan IEOI error */
3139 		sc->rx_ieoi_err_frames++;
3140 		break;
3141 	default:
3142 		break;
3143 	}
3144 	switch (dpaa2_ni_fd_format(fd)) {
3145 	case DPAA2_FD_SINGLE:
3146 		sc->rx_single_buf_frames++;
3147 		break;
3148 	case DPAA2_FD_SG:
3149 		sc->rx_sg_buf_frames++;
3150 		break;
3151 	default:
3152 		break;
3153 	}
3154 
3155 	mtx_assert(&bch->dma_mtx, MA_NOTOWNED);
3156 	mtx_lock(&bch->dma_mtx);
3157 
3158 	bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_POSTREAD);
3159 	bus_dmamap_unload(buf->dmat, buf->dmap);
3160 	m = buf->m;
3161 	buf_len = dpaa2_ni_fd_data_len(fd);
3162 	buf_data = (uint8_t *)buf->vaddr + dpaa2_ni_fd_offset(fd);
3163 	/* Prepare buffer to be re-cycled */
3164 	buf->m = NULL;
3165 	buf->paddr = 0;
3166 	buf->vaddr = NULL;
3167 	buf->seg.ds_addr = 0;
3168 	buf->seg.ds_len = 0;
3169 	buf->nseg = 0;
3170 
3171 	mtx_unlock(&bch->dma_mtx);
3172 
3173 	m->m_flags |= M_PKTHDR;
3174 	m->m_data = buf_data;
3175 	m->m_len = buf_len;
3176 	m->m_pkthdr.len = buf_len;
3177 	m->m_pkthdr.rcvif = sc->ifp;
3178 	m->m_pkthdr.flowid = fq->fqid;
3179 	M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
3180 
3181 	if (ctx->head == NULL) {
3182 		KASSERT(ctx->tail == NULL, ("%s: tail already given?", __func__));
3183 		ctx->head = m;
3184 		ctx->tail = m;
3185 	} else {
3186 		KASSERT(ctx->head != NULL, ("%s: head is NULL", __func__));
3187 		ctx->tail->m_nextpkt = m;
3188 		ctx->tail = m;
3189 	}
3190 	ctx->cnt++;
3191 
3192 	if (ctx->last) {
3193 		ctx->tail->m_nextpkt = NULL;
3194 		if_input(sc->ifp, ctx->head);
3195 	}
3196 
3197 	/* Keep the buffer to be recycled */
3198 	ch->recycled[ch->recycled_n++] = buf;
3199 
3200 	/* Re-seed and release recycled buffers back to the pool */
3201 	if (ch->recycled_n == DPAA2_SWP_BUFS_PER_CMD) {
3202 		/* Release new buffers to the pool if needed */
3203 		taskqueue_enqueue(sc->bp_taskq, &ch->bp_task);
3204 
3205 		for (int i = 0; i < ch->recycled_n; i++) {
3206 			buf = ch->recycled[i];
3207 			bch = (struct dpaa2_channel *)buf->opt;
3208 
3209 			mtx_assert(&bch->dma_mtx, MA_NOTOWNED);
3210 			mtx_lock(&bch->dma_mtx);
3211 			error = dpaa2_buf_seed_rxb(sc->dev, buf,
3212 			    DPAA2_RX_BUF_SIZE, &bch->dma_mtx);
3213 			mtx_unlock(&bch->dma_mtx);
3214 
3215 			if (__predict_false(error != 0)) {
3216 				/* TODO: What else to do with the buffer? */
3217 				panic("%s: failed to recycle buffer: error=%d",
3218 				    __func__, error);
3219 			}
3220 
3221 			/* Prepare buffer to be released in a single command */
3222 			released[released_n++] = buf->paddr;
3223 		}
3224 
3225 		/* There's only one buffer pool for now */
3226 		bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
3227 		bpsc = device_get_softc(bpdev);
3228 
3229 		error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpsc->attr.bpid,
3230 		    released, released_n);
3231 		if (__predict_false(error != 0)) {
3232 			device_printf(sc->dev, "%s: failed to release buffers "
3233 			    "to the pool: error=%d\n", __func__, error);
3234 			return (error);
3235 		}
3236 		ch->recycled_n = 0;
3237 	}
3238 
3239 	return (0);
3240 }
3241 
3242 /**
3243  * @brief Receive Rx error frames.
3244  */
3245 static int
3246 dpaa2_ni_rx_err(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq,
3247     struct dpaa2_fd *fd)
3248 {
3249 	bus_addr_t paddr = (bus_addr_t)fd->addr;
3250 	struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr);
3251 	struct dpaa2_buf *buf = fa->buf;
3252 	struct dpaa2_channel *bch = (struct dpaa2_channel *)buf->opt;
3253 	struct dpaa2_ni_softc *sc = device_get_softc(bch->ni_dev);
3254 	device_t bpdev;
3255 	struct dpaa2_bp_softc *bpsc;
3256 	int error;
3257 
3258 	KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
3259 	/*
3260 	 * NOTE: Current channel might not be the same as the "buffer" channel
3261 	 * and it's fine. It must not be NULL though.
3262 	 */
3263 	KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__));
3264 
3265 	if (__predict_false(paddr != buf->paddr)) {
3266 		panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3267 		    __func__, paddr, buf->paddr);
3268 	}
3269 
3270 	/* There's only one buffer pool for now */
3271 	bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
3272 	bpsc = device_get_softc(bpdev);
3273 
3274 	/* Release buffer to QBMan buffer pool */
3275 	error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpsc->attr.bpid, &paddr, 1);
3276 	if (error != 0) {
3277 		device_printf(sc->dev, "%s: failed to release frame buffer to "
3278 		    "the pool: error=%d\n", __func__, error);
3279 		return (error);
3280 	}
3281 
3282 	return (0);
3283 }
3284 
3285 /**
3286  * @brief Receive Tx confirmation frames.
3287  */
3288 static int
3289 dpaa2_ni_tx_conf(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq,
3290     struct dpaa2_fd *fd)
3291 {
3292 	bus_addr_t paddr = (bus_addr_t)fd->addr;
3293 	struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr);
3294 	struct dpaa2_buf *buf = fa->buf;
3295 	struct dpaa2_buf *sgt = buf->sgt;
3296 	struct dpaa2_ni_tx_ring *tx = (struct dpaa2_ni_tx_ring *)buf->opt;
3297 	struct dpaa2_channel *bch = tx->fq->chan;
3298 
3299 	KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
3300 	KASSERT(tx != NULL, ("%s: Tx ring is NULL", __func__));
3301 	KASSERT(sgt != NULL, ("%s: S/G table is NULL", __func__));
3302 	/*
3303 	 * NOTE: Current channel might not be the same as the "buffer" channel
3304 	 * and it's fine. It must not be NULL though.
3305 	 */
3306 	KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__));
3307 
3308 	if (paddr != buf->paddr) {
3309 		panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3310 		    __func__, paddr, buf->paddr);
3311 	}
3312 
3313 	mtx_assert(&bch->dma_mtx, MA_NOTOWNED);
3314 	mtx_lock(&bch->dma_mtx);
3315 
3316 	bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_POSTWRITE);
3317 	bus_dmamap_sync(sgt->dmat, sgt->dmap, BUS_DMASYNC_POSTWRITE);
3318 	bus_dmamap_unload(buf->dmat, buf->dmap);
3319 	bus_dmamap_unload(sgt->dmat, sgt->dmap);
3320 	m_freem(buf->m);
3321 	buf->m = NULL;
3322 	buf->paddr = 0;
3323 	buf->vaddr = NULL;
3324 	sgt->paddr = 0;
3325 
3326 	mtx_unlock(&bch->dma_mtx);
3327 
3328 	/* Return Tx buffer back to the ring */
3329 	buf_ring_enqueue(tx->br, buf);
3330 
3331 	return (0);
3332 }
3333 
3334 /**
3335  * @brief Compare versions of the DPAA2 network interface API.
3336  */
3337 static int
3338 dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *sc, uint16_t major,
3339     uint16_t minor)
3340 {
3341 	if (sc->api_major == major) {
3342 		return sc->api_minor - minor;
3343 	}
3344 	return sc->api_major - major;
3345 }
3346 
3347 /**
3348  * @brief Build a DPAA2 frame descriptor.
3349  */
3350 static int
3351 dpaa2_ni_build_fd(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx,
3352     struct dpaa2_buf *buf, bus_dma_segment_t *segs, int nsegs, struct dpaa2_fd *fd)
3353 {
3354 	struct dpaa2_buf *sgt = buf->sgt;
3355 	struct dpaa2_sg_entry *sge;
3356 	struct dpaa2_fa *fa;
3357 	int i, error;
3358 
3359 	KASSERT(nsegs <= DPAA2_TX_SEGLIMIT, ("%s: too many segments", __func__));
3360 	KASSERT(buf->opt != NULL, ("%s: no Tx ring?", __func__));
3361 	KASSERT(sgt != NULL, ("%s: no S/G table?", __func__));
3362 	KASSERT(sgt->vaddr != NULL, ("%s: no S/G vaddr?", __func__));
3363 
3364 	memset(fd, 0, sizeof(*fd));
3365 
3366 	/* Populate and map S/G table */
3367 	if (__predict_true(nsegs <= DPAA2_TX_SEGLIMIT)) {
3368 		sge = (struct dpaa2_sg_entry *)sgt->vaddr + sc->tx_data_off;
3369 		for (i = 0; i < nsegs; i++) {
3370 			sge[i].addr = (uint64_t)segs[i].ds_addr;
3371 			sge[i].len = (uint32_t)segs[i].ds_len;
3372 			sge[i].offset_fmt = 0u;
3373 		}
3374 		sge[i-1].offset_fmt |= 0x8000u; /* set final entry flag */
3375 
3376 		KASSERT(sgt->paddr == 0, ("%s: paddr(%#jx) != 0", __func__,
3377 		    sgt->paddr));
3378 
3379 		error = bus_dmamap_load(sgt->dmat, sgt->dmap, sgt->vaddr,
3380 		    DPAA2_TX_SGT_SZ, dpaa2_dmamap_oneseg_cb, &sgt->paddr,
3381 		    BUS_DMA_NOWAIT);
3382 		if (__predict_false(error != 0)) {
3383 			device_printf(sc->dev, "%s: bus_dmamap_load() failed: "
3384 			    "error=%d\n", __func__, error);
3385 			return (error);
3386 		}
3387 
3388 		buf->paddr = sgt->paddr;
3389 		buf->vaddr = sgt->vaddr;
3390 		sc->tx_sg_frames++; /* for sysctl(9) */
3391 	} else {
3392 		return (EINVAL);
3393 	}
3394 
3395 	fa = (struct dpaa2_fa *)sgt->vaddr;
3396 	fa->magic = DPAA2_MAGIC;
3397 	fa->buf = buf;
3398 
3399 	fd->addr = buf->paddr;
3400 	fd->data_length = (uint32_t)buf->m->m_pkthdr.len;
3401 	fd->bpid_ivp_bmt = 0;
3402 	fd->offset_fmt_sl = 0x2000u | sc->tx_data_off;
3403 	fd->ctrl = 0x00800000u;
3404 
3405 	return (0);
3406 }
3407 
3408 static int
3409 dpaa2_ni_fd_err(struct dpaa2_fd *fd)
3410 {
3411 	return ((fd->ctrl >> DPAA2_NI_FD_ERR_SHIFT) & DPAA2_NI_FD_ERR_MASK);
3412 }
3413 
3414 static uint32_t
3415 dpaa2_ni_fd_data_len(struct dpaa2_fd *fd)
3416 {
3417 	if (dpaa2_ni_fd_short_len(fd)) {
3418 		return (fd->data_length & DPAA2_NI_FD_LEN_MASK);
3419 	}
3420 	return (fd->data_length);
3421 }
3422 
3423 static int
3424 dpaa2_ni_fd_format(struct dpaa2_fd *fd)
3425 {
3426 	return ((enum dpaa2_fd_format)((fd->offset_fmt_sl >>
3427 	    DPAA2_NI_FD_FMT_SHIFT) & DPAA2_NI_FD_FMT_MASK));
3428 }
3429 
3430 static bool
3431 dpaa2_ni_fd_short_len(struct dpaa2_fd *fd)
3432 {
3433 	return (((fd->offset_fmt_sl >> DPAA2_NI_FD_SL_SHIFT)
3434 	    & DPAA2_NI_FD_SL_MASK) == 1);
3435 }
3436 
3437 static int
3438 dpaa2_ni_fd_offset(struct dpaa2_fd *fd)
3439 {
3440 	return (fd->offset_fmt_sl & DPAA2_NI_FD_OFFSET_MASK);
3441 }
3442 
3443 /**
3444  * @brief Collect statistics of the network interface.
3445  */
3446 static int
3447 dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS)
3448 {
3449 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3450 	struct dpni_stat *stat = &dpni_stat_sysctls[oidp->oid_number];
3451 	device_t pdev = device_get_parent(sc->dev);
3452 	device_t dev = sc->dev;
3453 	device_t child = dev;
3454 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
3455 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
3456 	struct dpaa2_cmd cmd;
3457 	uint64_t cnt[DPAA2_NI_STAT_COUNTERS];
3458 	uint64_t result = 0;
3459 	uint16_t rc_token, ni_token;
3460 	int error;
3461 
3462 	DPAA2_CMD_INIT(&cmd);
3463 
3464 	error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
3465 	if (error) {
3466 		device_printf(dev, "%s: failed to open resource container: "
3467 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
3468 		goto exit;
3469 	}
3470 	error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
3471 	if (error) {
3472 		device_printf(dev, "%s: failed to open network interface: "
3473 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
3474 		goto close_rc;
3475 	}
3476 
3477 	error = DPAA2_CMD_NI_GET_STATISTICS(dev, child, &cmd, stat->page, 0, cnt);
3478 	if (!error) {
3479 		result = cnt[stat->cnt];
3480 	}
3481 
3482 	(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
3483 close_rc:
3484 	(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
3485 exit:
3486 	return (sysctl_handle_64(oidp, &result, 0, req));
3487 }
3488 
3489 static int
3490 dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS)
3491 {
3492 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3493 	uint32_t buf_num = DPAA2_ATOMIC_READ(&sc->buf_num);
3494 
3495 	return (sysctl_handle_32(oidp, &buf_num, 0, req));
3496 }
3497 
3498 static int
3499 dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS)
3500 {
3501 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3502 	uint32_t buf_free = DPAA2_ATOMIC_READ(&sc->buf_free);
3503 
3504 	return (sysctl_handle_32(oidp, &buf_free, 0, req));
3505 }
3506 
3507 static int
3508 dpaa2_ni_set_hash(device_t dev, uint64_t flags)
3509 {
3510 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
3511 	uint64_t key = 0;
3512 	int i;
3513 
3514 	if (!(sc->attr.num.queues > 1)) {
3515 		return (EOPNOTSUPP);
3516 	}
3517 
3518 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3519 		if (dist_fields[i].rxnfc_field & flags) {
3520 			key |= dist_fields[i].id;
3521 		}
3522 	}
3523 
3524 	return (dpaa2_ni_set_dist_key(dev, DPAA2_NI_DIST_MODE_HASH, key));
3525 }
3526 
3527 /**
3528  * @brief Set Rx distribution (hash or flow classification) key flags is a
3529  * combination of RXH_ bits.
3530  */
3531 static int
3532 dpaa2_ni_set_dist_key(device_t dev, enum dpaa2_ni_dist_mode type, uint64_t flags)
3533 {
3534 	device_t pdev = device_get_parent(dev);
3535 	device_t child = dev;
3536 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
3537 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
3538 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
3539 	struct dpkg_profile_cfg cls_cfg;
3540 	struct dpkg_extract *key;
3541 	struct dpaa2_buf *buf = &sc->rxd_kcfg;
3542 	struct dpaa2_cmd cmd;
3543 	uint16_t rc_token, ni_token;
3544 	int i, error = 0;
3545 
3546 	if (__predict_true(buf->dmat == NULL)) {
3547 		buf->dmat = sc->rxd_dmat;
3548 	}
3549 
3550 	memset(&cls_cfg, 0, sizeof(cls_cfg));
3551 
3552 	/* Configure extracts according to the given flags. */
3553 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3554 		key = &cls_cfg.extracts[cls_cfg.num_extracts];
3555 
3556 		if (!(flags & dist_fields[i].id)) {
3557 			continue;
3558 		}
3559 
3560 		if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
3561 			device_printf(dev, "%s: failed to add key extraction "
3562 			    "rule\n", __func__);
3563 			return (E2BIG);
3564 		}
3565 
3566 		key->type = DPKG_EXTRACT_FROM_HDR;
3567 		key->extract.from_hdr.prot = dist_fields[i].cls_prot;
3568 		key->extract.from_hdr.type = DPKG_FULL_FIELD;
3569 		key->extract.from_hdr.field = dist_fields[i].cls_field;
3570 		cls_cfg.num_extracts++;
3571 	}
3572 
3573 	error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr,
3574 	    BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap);
3575 	if (error != 0) {
3576 		device_printf(dev, "%s: failed to allocate a buffer for Rx "
3577 		    "traffic distribution key configuration\n", __func__);
3578 		return (error);
3579 	}
3580 
3581 	error = dpaa2_ni_prepare_key_cfg(&cls_cfg, (uint8_t *)buf->vaddr);
3582 	if (error != 0) {
3583 		device_printf(dev, "%s: failed to prepare key configuration: "
3584 		    "error=%d\n", __func__, error);
3585 		return (error);
3586 	}
3587 
3588 	/* Prepare for setting the Rx dist. */
3589 	error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr,
3590 	    DPAA2_CLASSIFIER_DMA_SIZE, dpaa2_dmamap_oneseg_cb, &buf->paddr,
3591 	    BUS_DMA_NOWAIT);
3592 	if (error != 0) {
3593 		device_printf(sc->dev, "%s: failed to map a buffer for Rx "
3594 		    "traffic distribution key configuration\n", __func__);
3595 		return (error);
3596 	}
3597 
3598 	if (type == DPAA2_NI_DIST_MODE_HASH) {
3599 		DPAA2_CMD_INIT(&cmd);
3600 
3601 		error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
3602 		    &rc_token);
3603 		if (error) {
3604 			device_printf(dev, "%s: failed to open resource "
3605 			    "container: id=%d, error=%d\n", __func__, rcinfo->id,
3606 			    error);
3607 			goto err_exit;
3608 		}
3609 		error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id,
3610 		    &ni_token);
3611 		if (error) {
3612 			device_printf(dev, "%s: failed to open network "
3613 			    "interface: id=%d, error=%d\n", __func__, dinfo->id,
3614 			    error);
3615 			goto close_rc;
3616 		}
3617 
3618 		error = DPAA2_CMD_NI_SET_RX_TC_DIST(dev, child, &cmd,
3619 		    sc->attr.num.queues, 0, DPAA2_NI_DIST_MODE_HASH, buf->paddr);
3620 		if (error != 0) {
3621 			device_printf(dev, "%s: failed to set distribution mode "
3622 			    "and size for the traffic class\n", __func__);
3623 		}
3624 
3625 		(void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
3626 		    ni_token));
3627 close_rc:
3628 		(void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
3629 		    rc_token));
3630 	}
3631 
3632 err_exit:
3633 	return (error);
3634 }
3635 
3636 /**
3637  * @brief Prepares extract parameters.
3638  *
3639  * cfg:		Defining a full Key Generation profile.
3640  * key_cfg_buf:	Zeroed 256 bytes of memory before mapping it to DMA.
3641  */
3642 static int
3643 dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf)
3644 {
3645 	struct dpni_ext_set_rx_tc_dist *dpni_ext;
3646 	struct dpni_dist_extract *extr;
3647 	int i, j;
3648 
3649 	if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
3650 		return (EINVAL);
3651 
3652 	dpni_ext = (struct dpni_ext_set_rx_tc_dist *) key_cfg_buf;
3653 	dpni_ext->num_extracts = cfg->num_extracts;
3654 
3655 	for (i = 0; i < cfg->num_extracts; i++) {
3656 		extr = &dpni_ext->extracts[i];
3657 
3658 		switch (cfg->extracts[i].type) {
3659 		case DPKG_EXTRACT_FROM_HDR:
3660 			extr->prot = cfg->extracts[i].extract.from_hdr.prot;
3661 			extr->efh_type =
3662 			    cfg->extracts[i].extract.from_hdr.type & 0x0Fu;
3663 			extr->size = cfg->extracts[i].extract.from_hdr.size;
3664 			extr->offset = cfg->extracts[i].extract.from_hdr.offset;
3665 			extr->field = cfg->extracts[i].extract.from_hdr.field;
3666 			extr->hdr_index =
3667 				cfg->extracts[i].extract.from_hdr.hdr_index;
3668 			break;
3669 		case DPKG_EXTRACT_FROM_DATA:
3670 			extr->size = cfg->extracts[i].extract.from_data.size;
3671 			extr->offset =
3672 				cfg->extracts[i].extract.from_data.offset;
3673 			break;
3674 		case DPKG_EXTRACT_FROM_PARSE:
3675 			extr->size = cfg->extracts[i].extract.from_parse.size;
3676 			extr->offset =
3677 				cfg->extracts[i].extract.from_parse.offset;
3678 			break;
3679 		default:
3680 			return (EINVAL);
3681 		}
3682 
3683 		extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
3684 		extr->extract_type = cfg->extracts[i].type & 0x0Fu;
3685 
3686 		for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
3687 			extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
3688 			extr->masks[j].offset =
3689 				cfg->extracts[i].masks[j].offset;
3690 		}
3691 	}
3692 
3693 	return (0);
3694 }
3695 
3696 static device_method_t dpaa2_ni_methods[] = {
3697 	/* Device interface */
3698 	DEVMETHOD(device_probe,		dpaa2_ni_probe),
3699 	DEVMETHOD(device_attach,	dpaa2_ni_attach),
3700 	DEVMETHOD(device_detach,	dpaa2_ni_detach),
3701 
3702 	/* mii via memac_mdio */
3703 	DEVMETHOD(miibus_statchg,	dpaa2_ni_miibus_statchg),
3704 
3705 	DEVMETHOD_END
3706 };
3707 
3708 static driver_t dpaa2_ni_driver = {
3709 	"dpaa2_ni",
3710 	dpaa2_ni_methods,
3711 	sizeof(struct dpaa2_ni_softc),
3712 };
3713 
3714 DRIVER_MODULE(miibus, dpaa2_ni, miibus_driver, 0, 0);
3715 DRIVER_MODULE(dpaa2_ni, dpaa2_rc, dpaa2_ni_driver, 0, 0);
3716 
3717 MODULE_DEPEND(dpaa2_ni, miibus, 1, 1, 1);
3718 #ifdef DEV_ACPI
3719 MODULE_DEPEND(dpaa2_ni, memac_mdio_acpi, 1, 1, 1);
3720 #endif
3721 #ifdef FDT
3722 MODULE_DEPEND(dpaa2_ni, memac_mdio_fdt, 1, 1, 1);
3723 #endif
3724