xref: /freebsd/sys/dev/dpaa2/dpaa2_ni.c (revision 9c067b844f85a224f0416e6eb46ba3ef82aec5c4)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright © 2021-2022 Dmitry Salychev
5  * Copyright © 2022 Mathew McBride
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 /*
33  * The DPAA2 Network Interface (DPNI) driver.
34  *
35  * The DPNI object is a network interface that is configurable to support a wide
36  * range of features from a very basic Ethernet interface up to a
37  * high-functioning network interface. The DPNI supports features that are
38  * expected by standard network stacks, from basic features to offloads.
39  *
40  * DPNIs work with Ethernet traffic, starting with the L2 header. Additional
41  * functions are provided for standard network protocols (L2, L3, L4, etc.).
42  */
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/bus.h>
48 #include <sys/rman.h>
49 #include <sys/module.h>
50 #include <sys/malloc.h>
51 #include <sys/mutex.h>
52 #include <sys/socket.h>
53 #include <sys/sockio.h>
54 #include <sys/sysctl.h>
55 #include <sys/mbuf.h>
56 #include <sys/taskqueue.h>
57 #include <sys/sysctl.h>
58 #include <sys/buf_ring.h>
59 #include <sys/smp.h>
60 #include <sys/proc.h>
61 
62 #include <vm/vm.h>
63 #include <vm/pmap.h>
64 
65 #include <machine/bus.h>
66 #include <machine/resource.h>
67 #include <machine/atomic.h>
68 
69 #include <net/ethernet.h>
70 #include <net/bpf.h>
71 #include <net/if.h>
72 #include <net/if_dl.h>
73 #include <net/if_media.h>
74 #include <net/if_types.h>
75 #include <net/if_var.h>
76 
77 #include <dev/pci/pcivar.h>
78 #include <dev/mii/mii.h>
79 #include <dev/mii/miivar.h>
80 #include <dev/mdio/mdio.h>
81 
82 #include "opt_acpi.h"
83 #include "opt_platform.h"
84 
85 #include "pcib_if.h"
86 #include "pci_if.h"
87 #include "miibus_if.h"
88 #include "memac_mdio_if.h"
89 
90 #include "dpaa2_types.h"
91 #include "dpaa2_mc.h"
92 #include "dpaa2_mc_if.h"
93 #include "dpaa2_mcp.h"
94 #include "dpaa2_swp.h"
95 #include "dpaa2_swp_if.h"
96 #include "dpaa2_cmd_if.h"
97 #include "dpaa2_ni.h"
98 
99 #define BIT(x)			(1ul << (x))
100 #define WRIOP_VERSION(x, y, z)	((x) << 10 | (y) << 5 | (z) << 0)
101 #define ARRAY_SIZE(a)		(sizeof(a) / sizeof((a)[0]))
102 
103 /* Frame Dequeue Response status bits. */
104 #define IS_NULL_RESPONSE(stat)	((((stat) >> 4) & 1) == 0)
105 
106 #define	ALIGN_UP(x, y)		roundup2((x), (y))
107 #define	ALIGN_DOWN(x, y)	rounddown2((x), (y))
108 #define CACHE_LINE_ALIGN(x)	ALIGN_UP((x), CACHE_LINE_SIZE)
109 
110 #define DPNI_LOCK(__sc) do {			\
111 	mtx_assert(&(__sc)->lock, MA_NOTOWNED);	\
112 	mtx_lock(&(__sc)->lock);		\
113 } while (0)
114 #define	DPNI_UNLOCK(__sc) do {			\
115 	mtx_assert(&(__sc)->lock, MA_OWNED);	\
116 	mtx_unlock(&(__sc)->lock);		\
117 } while (0)
118 
119 #define TX_LOCK(__tx) do {			\
120 	mtx_assert(&(__tx)->lock, MA_NOTOWNED);	\
121 	mtx_lock(&(__tx)->lock);		\
122 } while (0)
123 #define	TX_UNLOCK(__tx) do {			\
124 	mtx_assert(&(__tx)->lock, MA_OWNED);	\
125 	mtx_unlock(&(__tx)->lock);		\
126 } while (0)
127 
128 #define DPAA2_TX_RING(sc, chan, tc)				\
129 	(&(sc)->channels[(chan)]->txc_queue.tx_rings[(tc)])
130 
131 #define DPNI_IRQ_INDEX		0 /* Index of the only DPNI IRQ. */
132 #define DPNI_IRQ_LINK_CHANGED	1 /* Link state changed */
133 #define DPNI_IRQ_EP_CHANGED	2 /* DPAA2 endpoint dis/connected */
134 
135 /* Default maximum frame length. */
136 #define DPAA2_ETH_MFL		(ETHER_MAX_LEN - ETHER_CRC_LEN)
137 
138 /* Minimally supported version of the DPNI API. */
139 #define DPNI_VER_MAJOR		7
140 #define DPNI_VER_MINOR		0
141 
142 /* Rx/Tx buffers configuration. */
143 #define BUF_ALIGN_V1		256 /* WRIOP v1.0.0 limitation */
144 #define BUF_ALIGN		64
145 #define BUF_SWA_SIZE		64  /* SW annotation size */
146 #define BUF_RX_HWA_SIZE		64  /* HW annotation size */
147 #define BUF_TX_HWA_SIZE		128 /* HW annotation size */
148 #define BUF_SIZE		(MJUM9BYTES)
149 #define	BUF_MAXADDR_49BIT	0x1FFFFFFFFFFFFul
150 #define	BUF_MAXADDR		(BUS_SPACE_MAXADDR)
151 
152 #define DPAA2_TX_BUFRING_SZ	(4096u)
153 #define DPAA2_TX_SEGLIMIT	(16u) /* arbitrary number */
154 #define DPAA2_TX_SEG_SZ		(4096u)
155 #define DPAA2_TX_SEGS_MAXSZ	(DPAA2_TX_SEGLIMIT * DPAA2_TX_SEG_SZ)
156 #define DPAA2_TX_SGT_SZ		(512u) /* bytes */
157 
158 /* Size of a buffer to keep a QoS table key configuration. */
159 #define ETH_QOS_KCFG_BUF_SIZE	256
160 
161 /* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */
162 #define DPAA2_CLASSIFIER_DMA_SIZE 256
163 
164 /* Channel storage buffer configuration. */
165 #define ETH_STORE_FRAMES	16u
166 #define ETH_STORE_SIZE		((ETH_STORE_FRAMES + 1) * sizeof(struct dpaa2_dq))
167 #define ETH_STORE_ALIGN		64u
168 
169 /* Buffers layout options. */
170 #define BUF_LOPT_TIMESTAMP	0x1
171 #define BUF_LOPT_PARSER_RESULT	0x2
172 #define BUF_LOPT_FRAME_STATUS	0x4
173 #define BUF_LOPT_PRIV_DATA_SZ	0x8
174 #define BUF_LOPT_DATA_ALIGN	0x10
175 #define BUF_LOPT_DATA_HEAD_ROOM	0x20
176 #define BUF_LOPT_DATA_TAIL_ROOM	0x40
177 
178 #define DPAA2_NI_BUF_ADDR_MASK	(0x1FFFFFFFFFFFFul) /* 49-bit addresses max. */
179 #define DPAA2_NI_BUF_CHAN_MASK	(0xFu)
180 #define DPAA2_NI_BUF_CHAN_SHIFT	(60)
181 #define DPAA2_NI_BUF_IDX_MASK	(0x7FFFu)
182 #define DPAA2_NI_BUF_IDX_SHIFT	(49)
183 #define DPAA2_NI_TX_IDX_MASK	(0x7u)
184 #define DPAA2_NI_TX_IDX_SHIFT	(57)
185 #define DPAA2_NI_TXBUF_IDX_MASK	(0xFFu)
186 #define DPAA2_NI_TXBUF_IDX_SHIFT (49)
187 
188 #define DPAA2_NI_FD_FMT_MASK	(0x3u)
189 #define DPAA2_NI_FD_FMT_SHIFT	(12)
190 #define DPAA2_NI_FD_ERR_MASK	(0xFFu)
191 #define DPAA2_NI_FD_ERR_SHIFT	(0)
192 #define DPAA2_NI_FD_SL_MASK	(0x1u)
193 #define DPAA2_NI_FD_SL_SHIFT	(14)
194 #define DPAA2_NI_FD_LEN_MASK	(0x3FFFFu)
195 #define DPAA2_NI_FD_OFFSET_MASK (0x0FFFu)
196 
197 /* Enables TCAM for Flow Steering and QoS look-ups. */
198 #define DPNI_OPT_HAS_KEY_MASKING 0x10
199 
200 /* Unique IDs for the supported Rx classification header fields. */
201 #define DPAA2_ETH_DIST_ETHDST	BIT(0)
202 #define DPAA2_ETH_DIST_ETHSRC	BIT(1)
203 #define DPAA2_ETH_DIST_ETHTYPE	BIT(2)
204 #define DPAA2_ETH_DIST_VLAN	BIT(3)
205 #define DPAA2_ETH_DIST_IPSRC	BIT(4)
206 #define DPAA2_ETH_DIST_IPDST	BIT(5)
207 #define DPAA2_ETH_DIST_IPPROTO	BIT(6)
208 #define DPAA2_ETH_DIST_L4SRC	BIT(7)
209 #define DPAA2_ETH_DIST_L4DST	BIT(8)
210 #define DPAA2_ETH_DIST_ALL	(~0ULL)
211 
212 /* L3-L4 network traffic flow hash options. */
213 #define	RXH_L2DA		(1 << 1)
214 #define	RXH_VLAN		(1 << 2)
215 #define	RXH_L3_PROTO		(1 << 3)
216 #define	RXH_IP_SRC		(1 << 4)
217 #define	RXH_IP_DST		(1 << 5)
218 #define	RXH_L4_B_0_1		(1 << 6) /* src port in case of TCP/UDP/SCTP */
219 #define	RXH_L4_B_2_3		(1 << 7) /* dst port in case of TCP/UDP/SCTP */
220 #define	RXH_DISCARD		(1 << 31)
221 
222 /* Default Rx hash options, set during attaching. */
223 #define DPAA2_RXH_DEFAULT	(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
224 
225 MALLOC_DEFINE(M_DPAA2_NI, "dpaa2_ni", "DPAA2 Network Interface");
226 
227 /* DPAA2 Network Interface resource specification. */
228 struct resource_spec dpaa2_ni_spec[] = {
229 	/*
230 	 * DPMCP resources.
231 	 *
232 	 * NOTE: MC command portals (MCPs) are used to send commands to, and
233 	 *	 receive responses from, the MC firmware. One portal per DPNI.
234 	 */
235 #define MCP_RES_NUM	(1u)
236 #define MCP_RID_OFF	(0u)
237 #define MCP_RID(rid)	((rid) + MCP_RID_OFF)
238 	/* --- */
239 	{ DPAA2_DEV_MCP, MCP_RID(0),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
240 	/*
241 	 * DPIO resources (software portals).
242 	 *
243 	 * NOTE: One per running core. While DPIOs are the source of data
244 	 *	 availability interrupts, the DPCONs are used to identify the
245 	 *	 network interface that has produced ingress data to that core.
246 	 */
247 #define IO_RES_NUM	(16u)
248 #define IO_RID_OFF	(MCP_RID_OFF + MCP_RES_NUM)
249 #define IO_RID(rid)	((rid) + IO_RID_OFF)
250 	/* --- */
251 	{ DPAA2_DEV_IO,  IO_RID(0),    RF_ACTIVE | RF_SHAREABLE },
252 	{ DPAA2_DEV_IO,  IO_RID(1),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
253 	{ DPAA2_DEV_IO,  IO_RID(2),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
254 	{ DPAA2_DEV_IO,  IO_RID(3),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
255 	{ DPAA2_DEV_IO,  IO_RID(4),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
256 	{ DPAA2_DEV_IO,  IO_RID(5),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
257 	{ DPAA2_DEV_IO,  IO_RID(6),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
258 	{ DPAA2_DEV_IO,  IO_RID(7),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
259 	{ DPAA2_DEV_IO,  IO_RID(8),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
260 	{ DPAA2_DEV_IO,  IO_RID(9),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
261 	{ DPAA2_DEV_IO,  IO_RID(10),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
262 	{ DPAA2_DEV_IO,  IO_RID(11),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
263 	{ DPAA2_DEV_IO,  IO_RID(12),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
264 	{ DPAA2_DEV_IO,  IO_RID(13),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
265 	{ DPAA2_DEV_IO,  IO_RID(14),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
266 	{ DPAA2_DEV_IO,  IO_RID(15),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
267 	/*
268 	 * DPBP resources (buffer pools).
269 	 *
270 	 * NOTE: One per network interface.
271 	 */
272 #define BP_RES_NUM	(1u)
273 #define BP_RID_OFF	(IO_RID_OFF + IO_RES_NUM)
274 #define BP_RID(rid)	((rid) + BP_RID_OFF)
275 	/* --- */
276 	{ DPAA2_DEV_BP,  BP_RID(0),   RF_ACTIVE },
277 	/*
278 	 * DPCON resources (channels).
279 	 *
280 	 * NOTE: One DPCON per core where Rx or Tx confirmation traffic to be
281 	 *	 distributed to.
282 	 * NOTE: Since it is necessary to distinguish between traffic from
283 	 *	 different network interfaces arriving on the same core, the
284 	 *	 DPCONs must be private to the DPNIs.
285 	 */
286 #define CON_RES_NUM	(16u)
287 #define CON_RID_OFF	(BP_RID_OFF + BP_RES_NUM)
288 #define CON_RID(rid)	((rid) + CON_RID_OFF)
289 	/* --- */
290 	{ DPAA2_DEV_CON, CON_RID(0),   RF_ACTIVE },
291 	{ DPAA2_DEV_CON, CON_RID(1),   RF_ACTIVE | RF_OPTIONAL },
292 	{ DPAA2_DEV_CON, CON_RID(2),   RF_ACTIVE | RF_OPTIONAL },
293  	{ DPAA2_DEV_CON, CON_RID(3),   RF_ACTIVE | RF_OPTIONAL },
294  	{ DPAA2_DEV_CON, CON_RID(4),   RF_ACTIVE | RF_OPTIONAL },
295  	{ DPAA2_DEV_CON, CON_RID(5),   RF_ACTIVE | RF_OPTIONAL },
296  	{ DPAA2_DEV_CON, CON_RID(6),   RF_ACTIVE | RF_OPTIONAL },
297  	{ DPAA2_DEV_CON, CON_RID(7),   RF_ACTIVE | RF_OPTIONAL },
298  	{ DPAA2_DEV_CON, CON_RID(8),   RF_ACTIVE | RF_OPTIONAL },
299  	{ DPAA2_DEV_CON, CON_RID(9),   RF_ACTIVE | RF_OPTIONAL },
300  	{ DPAA2_DEV_CON, CON_RID(10),  RF_ACTIVE | RF_OPTIONAL },
301  	{ DPAA2_DEV_CON, CON_RID(11),  RF_ACTIVE | RF_OPTIONAL },
302  	{ DPAA2_DEV_CON, CON_RID(12),  RF_ACTIVE | RF_OPTIONAL },
303  	{ DPAA2_DEV_CON, CON_RID(13),  RF_ACTIVE | RF_OPTIONAL },
304  	{ DPAA2_DEV_CON, CON_RID(14),  RF_ACTIVE | RF_OPTIONAL },
305  	{ DPAA2_DEV_CON, CON_RID(15),  RF_ACTIVE | RF_OPTIONAL },
306 	/* --- */
307 	RESOURCE_SPEC_END
308 };
309 
310 /* Supported header fields for Rx hash distribution key */
311 static const struct dpaa2_eth_dist_fields dist_fields[] = {
312 	{
313 		/* L2 header */
314 		.rxnfc_field = RXH_L2DA,
315 		.cls_prot = NET_PROT_ETH,
316 		.cls_field = NH_FLD_ETH_DA,
317 		.id = DPAA2_ETH_DIST_ETHDST,
318 		.size = 6,
319 	}, {
320 		.cls_prot = NET_PROT_ETH,
321 		.cls_field = NH_FLD_ETH_SA,
322 		.id = DPAA2_ETH_DIST_ETHSRC,
323 		.size = 6,
324 	}, {
325 		/* This is the last ethertype field parsed:
326 		 * depending on frame format, it can be the MAC ethertype
327 		 * or the VLAN etype.
328 		 */
329 		.cls_prot = NET_PROT_ETH,
330 		.cls_field = NH_FLD_ETH_TYPE,
331 		.id = DPAA2_ETH_DIST_ETHTYPE,
332 		.size = 2,
333 	}, {
334 		/* VLAN header */
335 		.rxnfc_field = RXH_VLAN,
336 		.cls_prot = NET_PROT_VLAN,
337 		.cls_field = NH_FLD_VLAN_TCI,
338 		.id = DPAA2_ETH_DIST_VLAN,
339 		.size = 2,
340 	}, {
341 		/* IP header */
342 		.rxnfc_field = RXH_IP_SRC,
343 		.cls_prot = NET_PROT_IP,
344 		.cls_field = NH_FLD_IP_SRC,
345 		.id = DPAA2_ETH_DIST_IPSRC,
346 		.size = 4,
347 	}, {
348 		.rxnfc_field = RXH_IP_DST,
349 		.cls_prot = NET_PROT_IP,
350 		.cls_field = NH_FLD_IP_DST,
351 		.id = DPAA2_ETH_DIST_IPDST,
352 		.size = 4,
353 	}, {
354 		.rxnfc_field = RXH_L3_PROTO,
355 		.cls_prot = NET_PROT_IP,
356 		.cls_field = NH_FLD_IP_PROTO,
357 		.id = DPAA2_ETH_DIST_IPPROTO,
358 		.size = 1,
359 	}, {
360 		/* Using UDP ports, this is functionally equivalent to raw
361 		 * byte pairs from L4 header.
362 		 */
363 		.rxnfc_field = RXH_L4_B_0_1,
364 		.cls_prot = NET_PROT_UDP,
365 		.cls_field = NH_FLD_UDP_PORT_SRC,
366 		.id = DPAA2_ETH_DIST_L4SRC,
367 		.size = 2,
368 	}, {
369 		.rxnfc_field = RXH_L4_B_2_3,
370 		.cls_prot = NET_PROT_UDP,
371 		.cls_field = NH_FLD_UDP_PORT_DST,
372 		.id = DPAA2_ETH_DIST_L4DST,
373 		.size = 2,
374 	},
375 };
376 
377 static struct dpni_stat {
378 	int	 page;
379 	int	 cnt;
380 	char	*name;
381 	char	*desc;
382 } dpni_stat_sysctls[DPAA2_NI_STAT_SYSCTLS] = {
383 	/* PAGE, COUNTER, NAME, DESCRIPTION */
384 	{  0, 0, "in_all_frames",	"All accepted ingress frames" },
385 	{  0, 1, "in_all_bytes",	"Bytes in all accepted ingress frames" },
386 	{  0, 2, "in_multi_frames",	"Multicast accepted ingress frames" },
387 	{  1, 0, "eg_all_frames",	"All egress frames transmitted" },
388 	{  1, 1, "eg_all_bytes",	"Bytes in all frames transmitted" },
389 	{  1, 2, "eg_multi_frames",	"Multicast egress frames transmitted" },
390 	{  2, 0, "in_filtered_frames",	"All ingress frames discarded due to "
391 	   				"filtering" },
392 	{  2, 1, "in_discarded_frames",	"All frames discarded due to errors" },
393 	{  2, 2, "in_nobuf_discards",	"Discards on ingress side due to buffer "
394 	   				"depletion in DPNI buffer pools" },
395 };
396 
397 /* Device interface */
398 static int dpaa2_ni_probe(device_t);
399 static int dpaa2_ni_attach(device_t);
400 static int dpaa2_ni_detach(device_t);
401 
402 /* DPAA2 network interface setup and configuration */
403 static int dpaa2_ni_setup(device_t);
404 static int dpaa2_ni_setup_channels(device_t);
405 static int dpaa2_ni_setup_fq(device_t, struct dpaa2_ni_channel *,
406     enum dpaa2_ni_queue_type);
407 static int dpaa2_ni_bind(device_t);
408 static int dpaa2_ni_setup_rx_dist(device_t);
409 static int dpaa2_ni_setup_irqs(device_t);
410 static int dpaa2_ni_setup_msi(struct dpaa2_ni_softc *);
411 static int dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *);
412 static int dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *);
413 static int dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *);
414 static int dpaa2_ni_setup_dma(struct dpaa2_ni_softc *);
415 
416 /* Tx/Rx flow configuration */
417 static int dpaa2_ni_setup_rx_flow(device_t, struct dpaa2_cmd *,
418     struct dpaa2_ni_fq *);
419 static int dpaa2_ni_setup_tx_flow(device_t, struct dpaa2_cmd *,
420     struct dpaa2_ni_fq *);
421 static int dpaa2_ni_setup_rx_err_flow(device_t, struct dpaa2_cmd *,
422     struct dpaa2_ni_fq *);
423 
424 /* Configuration subroutines */
425 static int dpaa2_ni_set_buf_layout(device_t, struct dpaa2_cmd *);
426 static int dpaa2_ni_set_pause_frame(device_t, struct dpaa2_cmd *);
427 static int dpaa2_ni_set_qos_table(device_t, struct dpaa2_cmd *);
428 static int dpaa2_ni_set_mac_addr(device_t, struct dpaa2_cmd *, uint16_t,
429     uint16_t);
430 static int dpaa2_ni_set_hash(device_t, uint64_t);
431 static int dpaa2_ni_set_dist_key(device_t, enum dpaa2_ni_dist_mode, uint64_t);
432 
433 /* Buffers and buffer pools */
434 static int dpaa2_ni_seed_buf_pool(struct dpaa2_ni_softc *, uint32_t);
435 static int dpaa2_ni_seed_rxbuf(struct dpaa2_ni_softc *, struct dpaa2_buf *, int);
436 static int dpaa2_ni_seed_txbuf(struct dpaa2_ni_softc *, struct dpaa2_buf *, int);
437 static int dpaa2_ni_seed_chan_storage(struct dpaa2_ni_softc *,
438     struct dpaa2_ni_channel *);
439 
440 /* Frame descriptor routines */
441 static int dpaa2_ni_build_fd(struct dpaa2_ni_softc *, struct dpaa2_ni_tx_ring *,
442     struct dpaa2_buf *, bus_dma_segment_t *, int, struct dpaa2_fd *);
443 static int dpaa2_ni_fd_err(struct dpaa2_fd *);
444 static uint32_t dpaa2_ni_fd_data_len(struct dpaa2_fd *);
445 static int dpaa2_ni_fd_chan_idx(struct dpaa2_fd *);
446 static int dpaa2_ni_fd_buf_idx(struct dpaa2_fd *);
447 static int dpaa2_ni_fd_tx_idx(struct dpaa2_fd *);
448 static int dpaa2_ni_fd_txbuf_idx(struct dpaa2_fd *);
449 static int dpaa2_ni_fd_format(struct dpaa2_fd *);
450 static bool dpaa2_ni_fd_short_len(struct dpaa2_fd *);
451 static int dpaa2_ni_fd_offset(struct dpaa2_fd *);
452 
453 /* Various subroutines */
454 static int dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *, uint16_t, uint16_t);
455 static int dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *, uint8_t *);
456 static int dpaa2_ni_chan_storage_next(struct dpaa2_ni_channel *,
457     struct dpaa2_dq **);
458 
459 /* Network interface routines */
460 static void dpaa2_ni_init(void *);
461 static int  dpaa2_ni_transmit(struct ifnet *, struct mbuf *);
462 static void dpaa2_ni_qflush(struct ifnet *);
463 static int  dpaa2_ni_ioctl(struct ifnet *, u_long, caddr_t);
464 static int  dpaa2_ni_update_mac_filters(struct ifnet *);
465 static u_int dpaa2_ni_add_maddr(void *, struct sockaddr_dl *, u_int);
466 
467 /* Interrupt handlers */
468 static void dpaa2_ni_intr(void *);
469 
470 /* MII handlers */
471 static void dpaa2_ni_miibus_statchg(device_t);
472 static int  dpaa2_ni_media_change(struct ifnet *);
473 static void dpaa2_ni_media_status(struct ifnet *, struct ifmediareq *);
474 static void dpaa2_ni_media_tick(void *);
475 
476 /* DMA mapping callback */
477 static void dpaa2_ni_dmamap_cb(void *, bus_dma_segment_t *, int, int);
478 
479 /* Tx/Rx routines. */
480 static void dpaa2_ni_poll(void *);
481 static void dpaa2_ni_tx_locked(struct dpaa2_ni_softc *,
482     struct dpaa2_ni_tx_ring *, struct mbuf *);
483 static void dpaa2_ni_bp_task(void *, int);
484 
485 /* Tx/Rx subroutines */
486 static int  dpaa2_ni_consume_frames(struct dpaa2_ni_channel *,
487     struct dpaa2_ni_fq **, uint32_t *);
488 static int  dpaa2_ni_rx(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *,
489     struct dpaa2_fd *);
490 static int  dpaa2_ni_rx_err(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *,
491     struct dpaa2_fd *);
492 static int  dpaa2_ni_tx_conf(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *,
493     struct dpaa2_fd *);
494 
495 /* sysctl(9) */
496 static int dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS);
497 static int dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS);
498 static int dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS);
499 
500 static int
501 dpaa2_ni_probe(device_t dev)
502 {
503 	/* DPNI device will be added by a parent resource container itself. */
504 	device_set_desc(dev, "DPAA2 Network Interface");
505 	return (BUS_PROBE_DEFAULT);
506 }
507 
508 static int
509 dpaa2_ni_attach(device_t dev)
510 {
511 	device_t pdev = device_get_parent(dev);
512 	device_t child = dev;
513 	device_t mcp_dev;
514 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
515 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
516 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
517 	struct dpaa2_devinfo *mcp_dinfo;
518 	struct ifnet *ifp;
519 	char tq_name[32];
520 	int error;
521 
522 	sc->dev = dev;
523 	sc->ifp = NULL;
524 	sc->miibus = NULL;
525 	sc->mii = NULL;
526 	sc->media_status = 0;
527 	sc->if_flags = 0;
528 	sc->link_state = LINK_STATE_UNKNOWN;
529 	sc->buf_align = 0;
530 
531 	/* For debug purposes only! */
532 	sc->rx_anomaly_frames = 0;
533 	sc->rx_single_buf_frames = 0;
534 	sc->rx_sg_buf_frames = 0;
535 	sc->rx_enq_rej_frames = 0;
536 	sc->rx_ieoi_err_frames = 0;
537 	sc->tx_single_buf_frames = 0;
538 	sc->tx_sg_frames = 0;
539 
540 	DPAA2_ATOMIC_XCHG(&sc->buf_num, 0);
541 	DPAA2_ATOMIC_XCHG(&sc->buf_free, 0);
542 
543 	sc->bp_dmat = NULL;
544 	sc->st_dmat = NULL;
545 	sc->rxd_dmat = NULL;
546 	sc->qos_dmat = NULL;
547 
548 	sc->qos_kcfg.type = DPAA2_BUF_STORE;
549 	sc->qos_kcfg.store.dmap = NULL;
550 	sc->qos_kcfg.store.paddr = 0;
551 	sc->qos_kcfg.store.vaddr = NULL;
552 
553 	sc->rxd_kcfg.type = DPAA2_BUF_STORE;
554 	sc->rxd_kcfg.store.dmap = NULL;
555 	sc->rxd_kcfg.store.paddr = 0;
556 	sc->rxd_kcfg.store.vaddr = NULL;
557 
558 	sc->mac.dpmac_id = 0;
559 	sc->mac.phy_dev = NULL;
560 	memset(sc->mac.addr, 0, ETHER_ADDR_LEN);
561 
562 	error = bus_alloc_resources(sc->dev, dpaa2_ni_spec, sc->res);
563 	if (error) {
564 		device_printf(dev, "%s: failed to allocate resources: "
565 		    "error=%d\n", __func__, error);
566 		return (ENXIO);
567 	}
568 
569 	/* Obtain MC portal. */
570 	mcp_dev = (device_t) rman_get_start(sc->res[MCP_RID(0)]);
571 	mcp_dinfo = device_get_ivars(mcp_dev);
572 	dinfo->portal = mcp_dinfo->portal;
573 
574 	mtx_init(&sc->lock, device_get_nameunit(dev), "dpaa2_ni", MTX_DEF);
575 
576 	/* Allocate network interface */
577 	ifp = if_alloc(IFT_ETHER);
578 	if (ifp == NULL) {
579 		device_printf(dev, "%s: failed to allocate network interface\n",
580 		    __func__);
581 		return (ENXIO);
582 	}
583 	sc->ifp = ifp;
584 	if_initname(ifp, DPAA2_NI_IFNAME, device_get_unit(sc->dev));
585 
586 	ifp->if_softc = sc;
587 	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
588 	ifp->if_init = dpaa2_ni_init;
589 	ifp->if_ioctl = dpaa2_ni_ioctl;
590 	ifp->if_transmit = dpaa2_ni_transmit;
591 	ifp->if_qflush = dpaa2_ni_qflush;
592 
593 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU;
594 	ifp->if_capenable = ifp->if_capabilities;
595 
596 	/* Allocate a command to send to MC hardware. */
597 	error = dpaa2_mcp_init_command(&sc->cmd, DPAA2_CMD_DEF);
598 	if (error) {
599 		device_printf(dev, "%s: failed to allocate dpaa2_cmd: "
600 		    "error=%d\n", __func__, error);
601 		goto err_exit;
602 	}
603 
604 	/* Open resource container and network interface object. */
605 	error = DPAA2_CMD_RC_OPEN(dev, child, sc->cmd, rcinfo->id,
606 	    &sc->rc_token);
607 	if (error) {
608 		device_printf(dev, "%s: failed to open resource container: "
609 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
610 		goto err_free_cmd;
611 	}
612 	error = DPAA2_CMD_NI_OPEN(dev, child, dpaa2_mcp_tk(sc->cmd,
613 	    sc->rc_token), dinfo->id, &sc->ni_token);
614 	if (error) {
615 		device_printf(dev, "%s: failed to open network interface: "
616 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
617 		goto err_close_rc;
618 	}
619 
620 	/* Create a taskqueue thread to release new buffers to the pool. */
621 	TASK_INIT(&sc->bp_task, 0, dpaa2_ni_bp_task, sc);
622 	bzero(tq_name, sizeof (tq_name));
623 	snprintf(tq_name, sizeof (tq_name), "%s_tqbp",
624 	    device_get_nameunit(dev));
625 	sc->bp_taskq = taskqueue_create(tq_name, M_WAITOK,
626 	    taskqueue_thread_enqueue, &sc->bp_taskq);
627 	if (sc->bp_taskq == NULL) {
628 		device_printf(dev, "%s: failed to allocate task queue: %s\n",
629 		    __func__, tq_name);
630 		goto err_close_ni;
631 	}
632 	taskqueue_start_threads(&sc->bp_taskq, 1, PI_NET, "%s", tq_name);
633 
634 	error = dpaa2_ni_setup(dev);
635 	if (error) {
636 		device_printf(dev, "%s: failed to setup DPNI: error=%d\n",
637 		    __func__, error);
638 		goto err_close_ni;
639 	}
640 	error = dpaa2_ni_setup_channels(dev);
641 	if (error) {
642 		device_printf(dev, "%s: failed to setup QBMan channels: "
643 		    "error=%d\n", __func__, error);
644 		goto err_close_ni;
645 	}
646 
647 	error = dpaa2_ni_bind(dev);
648 	if (error) {
649 		device_printf(dev, "%s: failed to bind DPNI: error=%d\n",
650 		    __func__, error);
651 		goto err_close_ni;
652 	}
653 	error = dpaa2_ni_setup_irqs(dev);
654 	if (error) {
655 		device_printf(dev, "%s: failed to setup IRQs: error=%d\n",
656 		    __func__, error);
657 		goto err_close_ni;
658 	}
659 	error = dpaa2_ni_setup_sysctls(sc);
660 	if (error) {
661 		device_printf(dev, "%s: failed to setup sysctls: error=%d\n",
662 		    __func__, error);
663 		goto err_close_ni;
664 	}
665 
666 	ether_ifattach(sc->ifp, sc->mac.addr);
667 	callout_init(&sc->mii_callout, 0);
668 
669 	return (0);
670 
671 err_close_ni:
672 	DPAA2_CMD_NI_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->ni_token));
673 err_close_rc:
674 	DPAA2_CMD_RC_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->rc_token));
675 err_free_cmd:
676 	dpaa2_mcp_free_command(sc->cmd);
677 err_exit:
678 	return (ENXIO);
679 }
680 
681 static void
682 dpaa2_ni_fixed_media_status(if_t ifp, struct ifmediareq* ifmr)
683 {
684 	struct dpaa2_ni_softc *sc = ifp->if_softc;
685 
686 	DPNI_LOCK(sc);
687 	ifmr->ifm_count = 0;
688 	ifmr->ifm_mask = 0;
689 	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
690 	ifmr->ifm_current = ifmr->ifm_active =
691 	    sc->fixed_ifmedia.ifm_cur->ifm_media;
692 
693 	/*
694 	 * In non-PHY usecases, we need to signal link state up, otherwise
695 	 * certain things requiring a link event (e.g async DHCP client) from
696 	 * devd do not happen.
697 	 */
698 	if (ifp->if_link_state == LINK_STATE_UNKNOWN) {
699 		if_link_state_change(ifp, LINK_STATE_UP);
700 	}
701 
702 	/*
703 	 * TODO: Check the status of the link partner (DPMAC, DPNI or other) and
704 	 * reset if down. This is different to the DPAA2_MAC_LINK_TYPE_PHY as
705 	 * the MC firmware sets the status, instead of us telling the MC what
706 	 * it is.
707 	 */
708 	DPNI_UNLOCK(sc);
709 
710 	return;
711 }
712 
713 static void
714 dpaa2_ni_setup_fixed_link(struct dpaa2_ni_softc *sc)
715 {
716 	/*
717 	 * FIXME: When the DPNI is connected to a DPMAC, we can get the
718 	 * 'apparent' speed from it.
719 	 */
720 	sc->fixed_link = true;
721 
722 	ifmedia_init(&sc->fixed_ifmedia, 0, dpaa2_ni_media_change,
723 		     dpaa2_ni_fixed_media_status);
724 	ifmedia_add(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL);
725 	ifmedia_set(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T);
726 }
727 
728 static int
729 dpaa2_ni_detach(device_t dev)
730 {
731 	device_t child = dev;
732 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
733 
734 	DPAA2_CMD_NI_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->ni_token));
735 	DPAA2_CMD_RC_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->rc_token));
736 	dpaa2_mcp_free_command(sc->cmd);
737 
738 	sc->cmd = NULL;
739 	sc->ni_token = 0;
740 	sc->rc_token = 0;
741 
742 	return (0);
743 }
744 
745 /**
746  * @brief Configure DPAA2 network interface object.
747  */
748 static int
749 dpaa2_ni_setup(device_t dev)
750 {
751 	device_t child = dev;
752 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
753 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
754 	struct dpaa2_ep_desc ep1_desc, ep2_desc; /* endpoint descriptors */
755 	struct dpaa2_cmd *cmd = sc->cmd;
756 	uint8_t eth_bca[ETHER_ADDR_LEN]; /* broadcast physical address */
757 	uint16_t rc_token = sc->rc_token;
758 	uint16_t ni_token = sc->ni_token;
759 	uint16_t mac_token;
760 	struct dpaa2_mac_attr attr;
761 	enum dpaa2_mac_link_type link_type;
762 	uint32_t link;
763 	int error;
764 
765 	/* Check if we can work with this DPNI object. */
766 	error = DPAA2_CMD_NI_GET_API_VERSION(dev, child, dpaa2_mcp_tk(cmd,
767 	    ni_token), &sc->api_major, &sc->api_minor);
768 	if (error) {
769 		device_printf(dev, "%s: failed to get DPNI API version\n",
770 		    __func__);
771 		return (error);
772 	}
773 	if (dpaa2_ni_cmp_api_version(sc, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
774 		device_printf(dev, "%s: DPNI API version %u.%u not supported, "
775 		    "need >= %u.%u\n", __func__, sc->api_major, sc->api_minor,
776 		    DPNI_VER_MAJOR, DPNI_VER_MINOR);
777 		error = ENODEV;
778 		return (error);
779 	}
780 
781 	/* Reset the DPNI object. */
782 	error = DPAA2_CMD_NI_RESET(dev, child, cmd);
783 	if (error) {
784 		device_printf(dev, "%s: failed to reset DPNI: id=%d\n",
785 		    __func__, dinfo->id);
786 		return (error);
787 	}
788 
789 	/* Obtain attributes of the DPNI object. */
790 	error = DPAA2_CMD_NI_GET_ATTRIBUTES(dev, child, cmd, &sc->attr);
791 	if (error) {
792 		device_printf(dev, "%s: failed to obtain DPNI attributes: "
793 		    "id=%d\n", __func__, dinfo->id);
794 		return (error);
795 	}
796 	if (bootverbose) {
797 		device_printf(dev, "options=0x%#x queues=%d tx_channels=%d "
798 		    "wriop_version=%#x\n", sc->attr.options, sc->attr.num.queues,
799 		    sc->attr.num.channels, sc->attr.wriop_ver);
800 		device_printf(dev, "\ttraffic classes: rx=%d tx=%d "
801 		    "cgs_groups=%d\n", sc->attr.num.rx_tcs, sc->attr.num.tx_tcs,
802 		    sc->attr.num.cgs);
803 		device_printf(dev, "\ttable entries: mac=%d vlan=%d qos=%d "
804 		    "fs=%d\n", sc->attr.entries.mac, sc->attr.entries.vlan,
805 		    sc->attr.entries.qos, sc->attr.entries.fs);
806 		device_printf(dev, "\tkey sizes: qos=%d fs=%d\n",
807 		    sc->attr.key_size.qos, sc->attr.key_size.fs);
808 	}
809 
810 	/* Configure buffer layouts of the DPNI queues. */
811 	error = dpaa2_ni_set_buf_layout(dev, cmd);
812 	if (error) {
813 		device_printf(dev, "%s: failed to configure buffer layout\n",
814 		    __func__);
815 		return (error);
816 	}
817 
818 	/* Configure DMA resources. */
819 	error = dpaa2_ni_setup_dma(sc);
820 	if (error) {
821 		device_printf(dev, "%s: failed to setup DMA\n", __func__);
822 		return (error);
823 	}
824 
825 	/* Setup link between DPNI and an object it's connected to. */
826 	ep1_desc.obj_id = dinfo->id;
827 	ep1_desc.if_id = 0; /* DPNI has the only endpoint */
828 	ep1_desc.type = dinfo->dtype;
829 
830 	error = DPAA2_CMD_RC_GET_CONN(dev, child, dpaa2_mcp_tk(cmd, rc_token),
831 	    &ep1_desc, &ep2_desc, &link);
832 	if (error)
833 		device_printf(dev, "%s: failed to obtain an object DPNI is "
834 		    "connected to: error=%d\n", __func__, error);
835 	else {
836 		device_printf(dev, "connected to %s (id=%d)\n",
837 		    dpaa2_ttos(ep2_desc.type), ep2_desc.obj_id);
838 
839 		error = dpaa2_ni_set_mac_addr(dev, cmd, rc_token, ni_token);
840 		if (error)
841 			device_printf(dev, "%s: failed to set MAC "
842 				      "address: error=%d\n", __func__, error);
843 
844 		if (ep2_desc.type == DPAA2_DEV_MAC) {
845 			/*
846 			 * This is the simplest case when DPNI is connected to
847 			 * DPMAC directly.
848 			 */
849 			sc->mac.dpmac_id = ep2_desc.obj_id;
850 
851 			link_type = DPAA2_MAC_LINK_TYPE_NONE;
852 
853 			/*
854 			 * Need to determine if DPMAC type is PHY (attached to
855 			 * conventional MII PHY) or FIXED (usually SFP/SerDes,
856 			 * link state managed by MC firmware).
857 			 */
858 			error = DPAA2_CMD_MAC_OPEN(sc->dev, child,
859 			    dpaa2_mcp_tk(sc->cmd, sc->rc_token),
860 			    sc->mac.dpmac_id, &mac_token);
861 			/*
862 			 * Under VFIO, the DPMAC might be sitting in another
863 			 * container (DPRC) we don't have access to.
864 			 * Assume DPAA2_MAC_LINK_TYPE_FIXED if this is
865 			 * the case.
866 			 */
867 			if (error) {
868 				device_printf(dev, "%s: failed to open "
869 				    "connected DPMAC: %d (assuming in other DPRC)\n", __func__,
870 				    sc->mac.dpmac_id);
871 				link_type = DPAA2_MAC_LINK_TYPE_FIXED;
872 			} else {
873 				error = DPAA2_CMD_MAC_GET_ATTRIBUTES(dev, child,
874 				    sc->cmd, &attr);
875 				if (error)
876 					device_printf(dev, "%s: failed to get "
877 					    "DPMAC attributes: id=%d, "
878 					    "error=%d\n", __func__, dinfo->id,
879 					    error);
880 				else
881 					link_type = attr.link_type;
882 			}
883 			DPAA2_CMD_MAC_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd,
884 			    mac_token));
885 
886 			if (link_type == DPAA2_MAC_LINK_TYPE_FIXED) {
887 				device_printf(dev, "connected DPMAC is in FIXED "
888 				    "mode\n");
889 				dpaa2_ni_setup_fixed_link(sc);
890 			} else if (link_type == DPAA2_MAC_LINK_TYPE_PHY) {
891 				device_printf(dev, "connected DPMAC is in PHY "
892 				    "mode\n");
893 				error = DPAA2_MC_GET_PHY_DEV(dev,
894 				    &sc->mac.phy_dev, sc->mac.dpmac_id);
895 				if (error == 0) {
896 					error = MEMAC_MDIO_SET_NI_DEV(
897 					    sc->mac.phy_dev, dev);
898 					if (error != 0)
899 						device_printf(dev, "%s: failed "
900 						    "to set dpni dev on memac "
901 						    "mdio dev %s: error=%d\n",
902 						    __func__,
903 						    device_get_nameunit(
904 						    sc->mac.phy_dev), error);
905 				}
906 				if (error == 0) {
907 					error = MEMAC_MDIO_GET_PHY_LOC(
908 					    sc->mac.phy_dev, &sc->mac.phy_loc);
909 					if (error == ENODEV)
910 						error = 0;
911 					if (error != 0)
912 						device_printf(dev, "%s: failed "
913 						    "to get phy location from "
914 						    "memac mdio dev %s: error=%d\n",
915 						    __func__, device_get_nameunit(
916 						    sc->mac.phy_dev), error);
917 				}
918 				if (error == 0) {
919 					error = mii_attach(sc->mac.phy_dev,
920 					    &sc->miibus, sc->ifp,
921 					    dpaa2_ni_media_change,
922 					    dpaa2_ni_media_status,
923 					    BMSR_DEFCAPMASK, sc->mac.phy_loc,
924 					    MII_OFFSET_ANY, 0);
925 					if (error != 0)
926 						device_printf(dev, "%s: failed "
927 						    "to attach to miibus: "
928 						    "error=%d\n",
929 						    __func__, error);
930 				}
931 				if (error == 0)
932 					sc->mii = device_get_softc(sc->miibus);
933 			} else {
934 				device_printf(dev, "%s: DPMAC link type is not "
935 				    "supported\n", __func__);
936 			}
937 		} else if (ep2_desc.type == DPAA2_DEV_NI ||
938 			   ep2_desc.type == DPAA2_DEV_MUX ||
939 			   ep2_desc.type == DPAA2_DEV_SW) {
940 			dpaa2_ni_setup_fixed_link(sc);
941 		}
942 	}
943 
944 	/* Select mode to enqueue frames. */
945 	/* ... TBD ... */
946 
947 	/*
948 	 * Update link configuration to enable Rx/Tx pause frames support.
949 	 *
950 	 * NOTE: MC may generate an interrupt to the DPMAC and request changes
951 	 *       in link configuration. It might be necessary to attach miibus
952 	 *       and PHY before this point.
953 	 */
954 	error = dpaa2_ni_set_pause_frame(dev, dpaa2_mcp_tk(cmd, ni_token));
955 	if (error) {
956 		device_printf(dev, "%s: failed to configure Rx/Tx pause "
957 		    "frames\n", __func__);
958 		return (error);
959 	}
960 
961 	/* Configure ingress traffic classification. */
962 	error = dpaa2_ni_set_qos_table(dev, dpaa2_mcp_tk(cmd, ni_token));
963 	if (error)
964 		device_printf(dev, "%s: failed to configure QoS table: "
965 		    "error=%d\n", __func__, error);
966 
967 	/* Add broadcast physical address to the MAC filtering table. */
968 	memset(eth_bca, 0xff, ETHER_ADDR_LEN);
969 	error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, cmd, eth_bca);
970 	if (error) {
971 		device_printf(dev, "%s: failed to add broadcast physical "
972 		    "address to the MAC filtering table\n", __func__);
973 		return (error);
974 	}
975 
976 	/* Set the maximum allowed length for received frames. */
977 	error = DPAA2_CMD_NI_SET_MFL(dev, child, cmd, DPAA2_ETH_MFL);
978 	if (error) {
979 		device_printf(dev, "%s: failed to set maximum length for "
980 		    "received frames\n", __func__);
981 		return (error);
982 	}
983 
984 	return (0);
985 }
986 
987 /**
988  * @brief Сonfigure QBMan channels and register data availability notifications.
989  */
990 static int
991 dpaa2_ni_setup_channels(device_t dev)
992 {
993 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
994 	struct dpaa2_con_softc *consc;
995 	struct dpaa2_devinfo *io_info, *con_info;
996 	device_t io_dev, con_dev, child = dev;
997 	struct dpaa2_ni_channel *channel;
998 	struct dpaa2_io_notif_ctx *ctx;
999 	struct dpaa2_con_notif_cfg notif_cfg;
1000 	struct dpaa2_buf *buf;
1001 	int error;
1002 	struct sysctl_ctx_list *sysctl_ctx;
1003 	struct sysctl_oid *node;
1004 	struct sysctl_oid_list *parent;
1005 	uint32_t i, num_chan;
1006 
1007 	/* Calculate number of the channels based on the allocated resources. */
1008 	for (i = 0; i < IO_RES_NUM; i++)
1009 		if (!sc->res[IO_RID(i)])
1010 			break;
1011 	num_chan = i;
1012 	for (i = 0; i < CON_RES_NUM; i++)
1013 		if (!sc->res[CON_RID(i)])
1014 			break;
1015 	num_chan = i < num_chan ? i : num_chan;
1016 
1017 	/* Limit maximum channels. */
1018 	sc->chan_n = num_chan > DPAA2_NI_MAX_CHANNELS
1019 	    ? DPAA2_NI_MAX_CHANNELS : num_chan;
1020 
1021 	/* Limit channels by number of the queues. */
1022 	sc->chan_n = sc->chan_n > sc->attr.num.queues
1023 	    ? sc->attr.num.queues : sc->chan_n;
1024 
1025 	device_printf(dev, "channels=%d\n", sc->chan_n);
1026 
1027 	sysctl_ctx = device_get_sysctl_ctx(sc->dev);
1028 	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1029 
1030 	node = SYSCTL_ADD_NODE(sysctl_ctx, parent, OID_AUTO, "channels",
1031 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channels");
1032 	parent = SYSCTL_CHILDREN(node);
1033 
1034 	/* Setup channels for the portal. */
1035 	for (uint32_t i = 0; i < sc->chan_n; i++) {
1036 		/* Select software portal. */
1037 		io_dev = (device_t) rman_get_start(sc->res[IO_RID(i)]);
1038 		io_info = device_get_ivars(io_dev);
1039 
1040 		/* Select DPCON (channel). */
1041 		con_dev = (device_t) rman_get_start(sc->res[CON_RID(i)]);
1042 		consc = device_get_softc(con_dev);
1043 		con_info = device_get_ivars(con_dev);
1044 
1045 		/* Enable selected channel. */
1046 		error = DPAA2_CMD_CON_ENABLE(dev, child, dpaa2_mcp_tk(consc->cmd,
1047 		    consc->con_token));
1048 		if (error) {
1049 			device_printf(dev, "%s: failed to enable channel: "
1050 			    "dpcon_id=%d, chan_id=%d\n", __func__, con_info->id,
1051 			    consc->attr.chan_id);
1052 			return (error);
1053 		}
1054 
1055 		channel = malloc(sizeof(struct dpaa2_ni_channel), M_DPAA2_NI,
1056 		    M_WAITOK | M_ZERO);
1057 		if (!channel) {
1058 			device_printf(dev, "%s: failed to allocate a channel\n",
1059 			    __func__);
1060 			return (ENOMEM);
1061 		}
1062 
1063 		sc->channels[i] = channel;
1064 
1065 		channel->id = consc->attr.chan_id;
1066 		channel->flowid = i;
1067 		channel->ni_dev = dev;
1068 		channel->io_dev = io_dev;
1069 		channel->con_dev = con_dev;
1070 		channel->recycled_n = 0;
1071 
1072 		buf = &channel->store;
1073 		buf->type = DPAA2_BUF_STORE;
1074 		buf->store.dmat = NULL;
1075 		buf->store.dmap = NULL;
1076 		buf->store.paddr = 0;
1077 		buf->store.vaddr = NULL;
1078 
1079 		/* For debug purposes only! */
1080 		channel->tx_frames = 0;
1081 		channel->tx_dropped = 0;
1082 
1083 		/* None of the frame queues for this channel configured yet. */
1084 		channel->rxq_n = 0;
1085 
1086 		/* Setup WQ channel notification context. */
1087 		ctx = &channel->ctx;
1088 		ctx->qman_ctx = (uint64_t) ctx;
1089 		ctx->cdan_en = true;
1090 		ctx->fq_chan_id = channel->id;
1091 		ctx->io_dev = channel->io_dev;
1092 		ctx->channel = channel;
1093 		ctx->poll = dpaa2_ni_poll;
1094 
1095 		/* Register the new notification context. */
1096 		error = DPAA2_SWP_CONF_WQ_CHANNEL(channel->io_dev, ctx);
1097 		if (error) {
1098 			device_printf(dev, "%s: failed to register notification "
1099 			    "context\n", __func__);
1100 			return (error);
1101 		}
1102 
1103 		/* Register DPCON notification with Management Complex. */
1104 		notif_cfg.dpio_id = io_info->id;
1105 		notif_cfg.prior = 0;
1106 		notif_cfg.qman_ctx = ctx->qman_ctx;
1107 		error = DPAA2_CMD_CON_SET_NOTIF(dev, child, dpaa2_mcp_tk(
1108 		    consc->cmd, consc->con_token), &notif_cfg);
1109 		if (error) {
1110 			device_printf(dev, "%s: failed to set DPCON "
1111 			    "notification: dpcon_id=%d, chan_id=%d\n", __func__,
1112 			    con_info->id, consc->attr.chan_id);
1113 			return (error);
1114 		}
1115 
1116 		/* Allocate initial # of Rx buffers and a channel storage. */
1117 		error = dpaa2_ni_seed_buf_pool(sc, DPAA2_NI_BUFS_INIT);
1118 		if (error) {
1119 			device_printf(dev, "%s: failed to seed buffer pool\n",
1120 			    __func__);
1121 			return (error);
1122 		}
1123 		error = dpaa2_ni_seed_chan_storage(sc, channel);
1124 		if (error) {
1125 			device_printf(dev, "%s: failed to seed channel "
1126 			    "storage\n", __func__);
1127 			return (error);
1128 		}
1129 
1130 		/* Prepare queues for this channel. */
1131 		error = dpaa2_ni_setup_fq(dev, channel, DPAA2_NI_QUEUE_TX_CONF);
1132 		if (error) {
1133 			device_printf(dev, "%s: failed to prepare TxConf "
1134 			    "queue: error=%d\n", __func__, error);
1135 			return (error);
1136 		}
1137 		error = dpaa2_ni_setup_fq(dev, channel, DPAA2_NI_QUEUE_RX);
1138 		if (error) {
1139 			device_printf(dev, "%s: failed to prepare Rx queue: "
1140 			    "error=%d\n", __func__, error);
1141 			return (error);
1142 		}
1143 
1144 		if (bootverbose)
1145 			device_printf(dev, "channel: dpio_id=%d "
1146 			    "dpcon_id=%d chan_id=%d, priorities=%d\n",
1147 			    io_info->id, con_info->id, channel->id,
1148 			    consc->attr.prior_num);
1149 	}
1150 
1151 	/* There is exactly one Rx error queue per DPNI. */
1152 	error = dpaa2_ni_setup_fq(dev, sc->channels[0], DPAA2_NI_QUEUE_RX_ERR);
1153 	if (error) {
1154 		device_printf(dev, "%s: failed to prepare RxError queue: "
1155 		    "error=%d\n", __func__, error);
1156 		return (error);
1157 	}
1158 
1159 	return (0);
1160 }
1161 
1162 /**
1163  * @brief Performs an initial configuration of the frame queues.
1164  */
1165 static int
1166 dpaa2_ni_setup_fq(device_t dev, struct dpaa2_ni_channel *chan,
1167     enum dpaa2_ni_queue_type queue_type)
1168 {
1169 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1170 	struct dpaa2_ni_fq *fq;
1171 
1172 	switch (queue_type) {
1173 	case DPAA2_NI_QUEUE_TX_CONF:
1174 		/* One queue per channel. */
1175 		fq = &chan->txc_queue;
1176 
1177 		fq->consume = dpaa2_ni_tx_conf;
1178 		fq->chan = chan;
1179 		fq->flowid = chan->flowid;
1180 		fq->tc = 0; /* ignored */
1181 		fq->type = queue_type;
1182 
1183 		break;
1184 	case DPAA2_NI_QUEUE_RX:
1185 		KASSERT(sc->attr.num.rx_tcs <= DPAA2_NI_MAX_TCS,
1186 		    ("too many Rx traffic classes: rx_tcs=%d\n",
1187 		    sc->attr.num.rx_tcs));
1188 
1189 		/* One queue per Rx traffic class within a channel. */
1190 		for (int i = 0; i < sc->attr.num.rx_tcs; i++) {
1191 			fq = &chan->rx_queues[i];
1192 
1193 			fq->consume = dpaa2_ni_rx;
1194 			fq->chan = chan;
1195 			fq->flowid = chan->flowid;
1196 			fq->tc = (uint8_t) i;
1197 			fq->type = queue_type;
1198 
1199 			chan->rxq_n++;
1200 		}
1201 		break;
1202 	case DPAA2_NI_QUEUE_RX_ERR:
1203 		/* One queue per network interface. */
1204 		fq = &sc->rxe_queue;
1205 
1206 		fq->consume = dpaa2_ni_rx_err;
1207 		fq->chan = chan;
1208 		fq->flowid = 0; /* ignored */
1209 		fq->tc = 0; /* ignored */
1210 		fq->type = queue_type;
1211 		break;
1212 	default:
1213 		device_printf(dev, "%s: unexpected frame queue type: %d\n",
1214 		    __func__, queue_type);
1215 		return (EINVAL);
1216 	}
1217 
1218 	return (0);
1219 }
1220 
1221 /**
1222  * @brief Bind DPNI to DPBPs, DPIOs, frame queues and channels.
1223  */
1224 static int
1225 dpaa2_ni_bind(device_t dev)
1226 {
1227 	device_t bp_dev, child = dev;
1228 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1229 	struct dpaa2_devinfo *bp_info;
1230 	struct dpaa2_cmd *cmd = sc->cmd;
1231 	struct dpaa2_ni_pools_cfg pools_cfg;
1232 	struct dpaa2_ni_err_cfg err_cfg;
1233 	struct dpaa2_ni_channel *chan;
1234 	uint16_t ni_token = sc->ni_token;
1235 	int error;
1236 
1237 	/* Select buffer pool (only one available at the moment). */
1238 	bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
1239 	bp_info = device_get_ivars(bp_dev);
1240 
1241 	/* Configure buffers pool. */
1242 	pools_cfg.pools_num = 1;
1243 	pools_cfg.pools[0].bp_obj_id = bp_info->id;
1244 	pools_cfg.pools[0].backup_flag = 0;
1245 	pools_cfg.pools[0].buf_sz = sc->buf_sz;
1246 	error = DPAA2_CMD_NI_SET_POOLS(dev, child, dpaa2_mcp_tk(cmd, ni_token),
1247 	    &pools_cfg);
1248 	if (error) {
1249 		device_printf(dev, "%s: failed to set buffer pools\n", __func__);
1250 		return (error);
1251 	}
1252 
1253 	/* Setup ingress traffic distribution. */
1254 	error = dpaa2_ni_setup_rx_dist(dev);
1255 	if (error && error != EOPNOTSUPP) {
1256 		device_printf(dev, "%s: failed to setup ingress traffic "
1257 		    "distribution\n", __func__);
1258 		return (error);
1259 	}
1260 	if (bootverbose && error == EOPNOTSUPP)
1261 		device_printf(dev, "Ingress traffic distribution not "
1262 		    "supported\n");
1263 
1264 	/* Configure handling of error frames. */
1265 	err_cfg.err_mask = DPAA2_NI_FAS_RX_ERR_MASK;
1266 	err_cfg.set_err_fas = false;
1267 	err_cfg.action = DPAA2_NI_ERR_DISCARD;
1268 	error = DPAA2_CMD_NI_SET_ERR_BEHAVIOR(dev, child, cmd, &err_cfg);
1269 	if (error) {
1270 		device_printf(dev, "%s: failed to set errors behavior\n",
1271 		    __func__);
1272 		return (error);
1273 	}
1274 
1275 	/* Configure channel queues to generate CDANs. */
1276 	for (uint32_t i = 0; i < sc->chan_n; i++) {
1277 		chan = sc->channels[i];
1278 
1279 		/* Setup Rx flows. */
1280 		for (uint32_t j = 0; j < chan->rxq_n; j++) {
1281 			error = dpaa2_ni_setup_rx_flow(dev, cmd,
1282 			    &chan->rx_queues[j]);
1283 			if (error) {
1284 				device_printf(dev, "%s: failed to setup Rx "
1285 				    "flow: error=%d\n", __func__, error);
1286 				return (error);
1287 			}
1288 		}
1289 
1290 		/* Setup Tx flow. */
1291 		error = dpaa2_ni_setup_tx_flow(dev, cmd, &chan->txc_queue);
1292 		if (error) {
1293 			device_printf(dev, "%s: failed to setup Tx "
1294 			    "flow: error=%d\n", __func__, error);
1295 			return (error);
1296 		}
1297 	}
1298 
1299 	/* Configure RxError queue to generate CDAN. */
1300 	error = dpaa2_ni_setup_rx_err_flow(dev, cmd, &sc->rxe_queue);
1301 	if (error) {
1302 		device_printf(dev, "%s: failed to setup RxError flow: "
1303 		    "error=%d\n", __func__, error);
1304 		return (error);
1305 	}
1306 
1307 	/*
1308 	 * Get the Queuing Destination ID (QDID) that should be used for frame
1309 	 * enqueue operations.
1310 	 */
1311 	error = DPAA2_CMD_NI_GET_QDID(dev, child, cmd, DPAA2_NI_QUEUE_TX,
1312 	    &sc->tx_qdid);
1313 	if (error) {
1314 		device_printf(dev, "%s: failed to get Tx queuing destination "
1315 		    "ID\n", __func__);
1316 		return (error);
1317 	}
1318 
1319 	return (0);
1320 }
1321 
1322 /**
1323  * @brief Setup ingress traffic distribution.
1324  *
1325  * NOTE: Ingress traffic distribution is valid only when DPNI_OPT_NO_FS option
1326  *	 hasn't been set for DPNI and a number of DPNI queues > 1.
1327  */
1328 static int
1329 dpaa2_ni_setup_rx_dist(device_t dev)
1330 {
1331 	/*
1332 	 * Have the interface implicitly distribute traffic based on the default
1333 	 * hash key.
1334 	 */
1335 	return (dpaa2_ni_set_hash(dev, DPAA2_RXH_DEFAULT));
1336 }
1337 
1338 static int
1339 dpaa2_ni_setup_rx_flow(device_t dev, struct dpaa2_cmd *cmd,
1340     struct dpaa2_ni_fq *fq)
1341 {
1342 	device_t child = dev;
1343 	struct dpaa2_devinfo *con_info;
1344 	struct dpaa2_ni_queue_cfg queue_cfg = {0};
1345 	int error;
1346 
1347 	/* Obtain DPCON associated with the FQ's channel. */
1348 	con_info = device_get_ivars(fq->chan->con_dev);
1349 
1350 	queue_cfg.type = DPAA2_NI_QUEUE_RX;
1351 	queue_cfg.tc = fq->tc;
1352 	queue_cfg.idx = fq->flowid;
1353 	error = DPAA2_CMD_NI_GET_QUEUE(dev, child, cmd, &queue_cfg);
1354 	if (error) {
1355 		device_printf(dev, "%s: failed to obtain Rx queue "
1356 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1357 		    queue_cfg.idx);
1358 		return (error);
1359 	}
1360 
1361 	fq->fqid = queue_cfg.fqid;
1362 
1363 	queue_cfg.dest_id = con_info->id;
1364 	queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1365 	queue_cfg.priority = 1;
1366 	queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1367 	queue_cfg.options =
1368 	    DPAA2_NI_QUEUE_OPT_USER_CTX |
1369 	    DPAA2_NI_QUEUE_OPT_DEST;
1370 	error = DPAA2_CMD_NI_SET_QUEUE(dev, child, cmd, &queue_cfg);
1371 	if (error) {
1372 		device_printf(dev, "%s: failed to update Rx queue "
1373 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1374 		    queue_cfg.idx);
1375 		return (error);
1376 	}
1377 
1378 	if (bootverbose) {
1379 		device_printf(dev, "RX queue idx=%d, tc=%d, chan=%d, fqid=%d, "
1380 		    "user_ctx=%#jx\n", fq->flowid, fq->tc, fq->chan->id,
1381 		    fq->fqid, (uint64_t) fq);
1382 	}
1383 
1384 	return (0);
1385 }
1386 
1387 static int
1388 dpaa2_ni_setup_tx_flow(device_t dev, struct dpaa2_cmd *cmd,
1389     struct dpaa2_ni_fq *fq)
1390 {
1391 	device_t child = dev;
1392 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1393 	struct dpaa2_devinfo *con_info;
1394 	struct dpaa2_ni_queue_cfg queue_cfg = {0};
1395 	struct dpaa2_ni_tx_ring *tx;
1396 	struct dpaa2_buf *buf;
1397 	uint32_t tx_rings_n = 0;
1398 	int error;
1399 
1400 	/* Obtain DPCON associated with the FQ's channel. */
1401 	con_info = device_get_ivars(fq->chan->con_dev);
1402 
1403 	KASSERT(sc->attr.num.tx_tcs <= DPAA2_NI_MAX_TCS,
1404 	    ("%s: too many Tx traffic classes: tx_tcs=%d\n", __func__,
1405 	    sc->attr.num.tx_tcs));
1406 	KASSERT(DPAA2_NI_BUFS_PER_TX <= DPAA2_NI_MAX_BPTX,
1407 	    ("%s: too many Tx buffers (%d): max=%d\n", __func__,
1408 	    DPAA2_NI_BUFS_PER_TX, DPAA2_NI_MAX_BPTX));
1409 
1410 	/* Setup Tx rings. */
1411 	for (int i = 0; i < sc->attr.num.tx_tcs; i++) {
1412 		queue_cfg.type = DPAA2_NI_QUEUE_TX;
1413 		queue_cfg.tc = i;
1414 		queue_cfg.idx = fq->flowid;
1415 		queue_cfg.chan_id = fq->chan->id;
1416 
1417 		error = DPAA2_CMD_NI_GET_QUEUE(dev, child, cmd, &queue_cfg);
1418 		if (error) {
1419 			device_printf(dev, "%s: failed to obtain Tx queue "
1420 			    "configuration: tc=%d, flowid=%d\n", __func__,
1421 			    queue_cfg.tc, queue_cfg.idx);
1422 			return (error);
1423 		}
1424 
1425 		tx = &fq->tx_rings[i];
1426 		tx->fq = fq;
1427 		tx->fqid = queue_cfg.fqid;
1428 		tx->txid = tx_rings_n;
1429 
1430 		if (bootverbose) {
1431 			device_printf(dev, "TX queue idx=%d, tc=%d, chan=%d, "
1432 			    "fqid=%d\n", fq->flowid, i, fq->chan->id,
1433 			    queue_cfg.fqid);
1434 		}
1435 
1436 		mtx_init(&tx->lock, "dpaa2_tx_ring", NULL, MTX_DEF);
1437 
1438 		/* Allocate Tx ring buffer. */
1439 		tx->idx_br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF,
1440 		    M_NOWAIT, &tx->lock);
1441 		if (tx->idx_br == NULL) {
1442 			device_printf(dev, "%s: failed to setup Tx ring buffer"
1443 			    " (2) fqid=%d\n", __func__, tx->fqid);
1444 			return (ENOMEM);
1445 		}
1446 
1447 		/* Configure Tx buffers. */
1448 		for (uint64_t j = 0; j < DPAA2_NI_BUFS_PER_TX; j++) {
1449 			buf = &tx->buf[j];
1450 			buf->type = DPAA2_BUF_TX;
1451 			buf->tx.dmat = buf->tx.sgt_dmat = NULL;
1452 			buf->tx.dmap = buf->tx.sgt_dmap = NULL;
1453 			buf->tx.paddr = buf->tx.sgt_paddr = 0;
1454 			buf->tx.vaddr = buf->tx.sgt_vaddr = NULL;
1455 			buf->tx.m = NULL;
1456 			buf->tx.idx = 0;
1457 
1458 			error = dpaa2_ni_seed_txbuf(sc, buf, j);
1459 
1460 			/* Add index of the Tx buffer to the ring. */
1461 			buf_ring_enqueue(tx->idx_br, (void *) j);
1462 		}
1463 
1464 		tx_rings_n++;
1465 	}
1466 
1467 	/* All Tx queues which belong to the same flowid have the same qdbin. */
1468 	fq->tx_qdbin = queue_cfg.qdbin;
1469 
1470 	queue_cfg.type = DPAA2_NI_QUEUE_TX_CONF;
1471 	queue_cfg.tc = 0; /* ignored for TxConf queue */
1472 	queue_cfg.idx = fq->flowid;
1473 	error = DPAA2_CMD_NI_GET_QUEUE(dev, child, cmd, &queue_cfg);
1474 	if (error) {
1475 		device_printf(dev, "%s: failed to obtain TxConf queue "
1476 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1477 		    queue_cfg.idx);
1478 		return (error);
1479 	}
1480 
1481 	fq->fqid = queue_cfg.fqid;
1482 
1483 	queue_cfg.dest_id = con_info->id;
1484 	queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1485 	queue_cfg.priority = 0;
1486 	queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1487 	queue_cfg.options =
1488 	    DPAA2_NI_QUEUE_OPT_USER_CTX |
1489 	    DPAA2_NI_QUEUE_OPT_DEST;
1490 	error = DPAA2_CMD_NI_SET_QUEUE(dev, child, cmd, &queue_cfg);
1491 	if (error) {
1492 		device_printf(dev, "%s: failed to update TxConf queue "
1493 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1494 		    queue_cfg.idx);
1495 		return (error);
1496 	}
1497 
1498 	return (0);
1499 }
1500 
1501 static int
1502 dpaa2_ni_setup_rx_err_flow(device_t dev, struct dpaa2_cmd *cmd,
1503     struct dpaa2_ni_fq *fq)
1504 {
1505 	device_t child = dev;
1506 	struct dpaa2_devinfo *con_info;
1507 	struct dpaa2_ni_queue_cfg queue_cfg = {0};
1508 	int error;
1509 
1510 	/* Obtain DPCON associated with the FQ's channel. */
1511 	con_info = device_get_ivars(fq->chan->con_dev);
1512 
1513 	queue_cfg.type = DPAA2_NI_QUEUE_RX_ERR;
1514 	queue_cfg.tc = fq->tc; /* ignored */
1515 	queue_cfg.idx = fq->flowid; /* ignored */
1516 	error = DPAA2_CMD_NI_GET_QUEUE(dev, child, cmd, &queue_cfg);
1517 	if (error) {
1518 		device_printf(dev, "%s: failed to obtain RxErr queue "
1519 		    "configuration\n", __func__);
1520 		return (error);
1521 	}
1522 
1523 	fq->fqid = queue_cfg.fqid;
1524 
1525 	queue_cfg.dest_id = con_info->id;
1526 	queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1527 	queue_cfg.priority = 1;
1528 	queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1529 	queue_cfg.options =
1530 	    DPAA2_NI_QUEUE_OPT_USER_CTX |
1531 	    DPAA2_NI_QUEUE_OPT_DEST;
1532 	error = DPAA2_CMD_NI_SET_QUEUE(dev, child, cmd, &queue_cfg);
1533 	if (error) {
1534 		device_printf(dev, "%s: failed to update RxErr queue "
1535 		    "configuration\n", __func__);
1536 		return (error);
1537 	}
1538 
1539 	return (0);
1540 }
1541 
1542 /**
1543  * @brief Configure DPNI object to generate interrupts.
1544  */
1545 static int
1546 dpaa2_ni_setup_irqs(device_t dev)
1547 {
1548 	device_t child = dev;
1549 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1550 	struct dpaa2_cmd *cmd = sc->cmd;
1551 	uint16_t ni_token = sc->ni_token;
1552 	int error;
1553 
1554 	/* Configure IRQs. */
1555 	error = dpaa2_ni_setup_msi(sc);
1556 	if (error) {
1557 		device_printf(dev, "%s: failed to allocate MSI\n", __func__);
1558 		return (error);
1559 	}
1560 	if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1561 	    &sc->irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) {
1562 		device_printf(dev, "%s: failed to allocate IRQ resource\n",
1563 		    __func__);
1564 		return (ENXIO);
1565 	}
1566 	if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
1567 	    NULL, dpaa2_ni_intr, sc, &sc->intr)) {
1568 		device_printf(dev, "%s: failed to setup IRQ resource\n",
1569 		    __func__);
1570 		return (ENXIO);
1571 	}
1572 
1573 	/* Configure DPNI to generate interrupts. */
1574 	error = DPAA2_CMD_NI_SET_IRQ_MASK(dev, child, dpaa2_mcp_tk(cmd,
1575 	    ni_token), DPNI_IRQ_INDEX,
1576 	    DPNI_IRQ_LINK_CHANGED | DPNI_IRQ_EP_CHANGED);
1577 	if (error) {
1578 		device_printf(dev, "%s: failed to set DPNI IRQ mask\n",
1579 		    __func__);
1580 		return (error);
1581 	}
1582 
1583 	/* Enable IRQ. */
1584 	error = DPAA2_CMD_NI_SET_IRQ_ENABLE(dev, child, cmd, DPNI_IRQ_INDEX,
1585 	    true);
1586 	if (error) {
1587 		device_printf(dev, "%s: failed to enable DPNI IRQ\n", __func__);
1588 		return (error);
1589 	}
1590 
1591 	return (0);
1592 }
1593 
1594 /**
1595  * @brief Allocate MSI interrupts for DPNI.
1596  */
1597 static int
1598 dpaa2_ni_setup_msi(struct dpaa2_ni_softc *sc)
1599 {
1600 	int val;
1601 
1602 	val = pci_msi_count(sc->dev);
1603 	if (val < DPAA2_NI_MSI_COUNT)
1604 		device_printf(sc->dev, "MSI: actual=%d, expected=%d\n", val,
1605 		    DPAA2_IO_MSI_COUNT);
1606 	val = MIN(val, DPAA2_NI_MSI_COUNT);
1607 
1608 	if (pci_alloc_msi(sc->dev, &val) != 0)
1609 		return (EINVAL);
1610 
1611 	for (int i = 0; i < val; i++)
1612 		sc->irq_rid[i] = i + 1;
1613 
1614 	return (0);
1615 }
1616 
1617 /**
1618  * @brief Update DPNI according to the updated interface capabilities.
1619  */
1620 static int
1621 dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *sc)
1622 {
1623 	const bool en_rxcsum = sc->ifp->if_capenable & IFCAP_RXCSUM;
1624 	const bool en_txcsum = sc->ifp->if_capenable & IFCAP_TXCSUM;
1625 	device_t dev = sc->dev;
1626 	device_t child = dev;
1627 	int error;
1628 
1629 	/* Setup checksums validation. */
1630 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, dpaa2_mcp_tk(sc->cmd,
1631 	    sc->ni_token), DPAA2_NI_OFL_RX_L3_CSUM, en_rxcsum);
1632 	if (error) {
1633 		device_printf(dev, "%s: failed to %s L3 checksum validation\n",
1634 		    __func__, en_rxcsum ? "enable" : "disable");
1635 		return (error);
1636 	}
1637 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, sc->cmd,
1638 	    DPAA2_NI_OFL_RX_L4_CSUM, en_rxcsum);
1639 	if (error) {
1640 		device_printf(dev, "%s: failed to %s L4 checksum validation\n",
1641 		    __func__, en_rxcsum ? "enable" : "disable");
1642 		return (error);
1643 	}
1644 
1645 	/* Setup checksums generation. */
1646 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, sc->cmd,
1647 	    DPAA2_NI_OFL_TX_L3_CSUM, en_txcsum);
1648 	if (error) {
1649 		device_printf(dev, "%s: failed to %s L3 checksum generation\n",
1650 		    __func__, en_txcsum ? "enable" : "disable");
1651 		return (error);
1652 	}
1653 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, sc->cmd,
1654 	    DPAA2_NI_OFL_TX_L4_CSUM, en_txcsum);
1655 	if (error) {
1656 		device_printf(dev, "%s: failed to %s L4 checksum generation\n",
1657 		    __func__, en_txcsum ? "enable" : "disable");
1658 		return (error);
1659 	}
1660 
1661 	return (0);
1662 }
1663 
1664 /**
1665  * @brief Update DPNI according to the updated interface flags.
1666  */
1667 static int
1668 dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *sc)
1669 {
1670 	const bool en_promisc = sc->ifp->if_flags & IFF_PROMISC;
1671 	const bool en_allmulti = sc->ifp->if_flags & IFF_ALLMULTI;
1672 	device_t dev = sc->dev;
1673 	device_t child = dev;
1674 	int error;
1675 
1676 	error = DPAA2_CMD_NI_SET_MULTI_PROMISC(dev, child, dpaa2_mcp_tk(sc->cmd,
1677 	    sc->ni_token), en_promisc ? true : en_allmulti);
1678 	if (error) {
1679 		device_printf(dev, "%s: failed to %s multicast promiscuous "
1680 		    "mode\n", __func__, en_allmulti ? "enable" : "disable");
1681 		return (error);
1682 	}
1683 
1684 	error = DPAA2_CMD_NI_SET_UNI_PROMISC(dev, child, sc->cmd, en_promisc);
1685 	if (error) {
1686 		device_printf(dev, "%s: failed to %s unicast promiscuous mode\n",
1687 		    __func__, en_promisc ? "enable" : "disable");
1688 		return (error);
1689 	}
1690 
1691 	return (0);
1692 }
1693 
1694 static int
1695 dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *sc)
1696 {
1697 	struct sysctl_ctx_list *ctx;
1698 	struct sysctl_oid *node, *node2;
1699 	struct sysctl_oid_list *parent, *parent2;
1700 	char cbuf[128];
1701 	int i;
1702 
1703 	ctx = device_get_sysctl_ctx(sc->dev);
1704 	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1705 
1706 	/* Add DPNI statistics. */
1707 	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
1708 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Statistics");
1709 	parent = SYSCTL_CHILDREN(node);
1710 	for (i = 0; i < DPAA2_NI_STAT_SYSCTLS; ++i) {
1711 		SYSCTL_ADD_PROC(ctx, parent, i, dpni_stat_sysctls[i].name,
1712 		    CTLTYPE_U64 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_stats,
1713 		    "IU", dpni_stat_sysctls[i].desc);
1714 	}
1715 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_anomaly_frames",
1716 	    CTLFLAG_RD, &sc->rx_anomaly_frames,
1717 	    "Rx frames in the buffers outside of the buffer pools");
1718 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_single_buf_frames",
1719 	    CTLFLAG_RD, &sc->rx_single_buf_frames,
1720 	    "Rx frames in single buffers");
1721 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_sg_buf_frames",
1722 	    CTLFLAG_RD, &sc->rx_sg_buf_frames,
1723 	    "Rx frames in scatter/gather list");
1724 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_enq_rej_frames",
1725 	    CTLFLAG_RD, &sc->rx_enq_rej_frames,
1726 	    "Enqueue rejected by QMan");
1727 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_ieoi_err_frames",
1728 	    CTLFLAG_RD, &sc->rx_ieoi_err_frames,
1729 	    "QMan IEOI error");
1730 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_single_buf_frames",
1731 	    CTLFLAG_RD, &sc->tx_single_buf_frames,
1732 	    "Tx single buffer frames");
1733 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_sg_frames",
1734 	    CTLFLAG_RD, &sc->tx_sg_frames,
1735 	    "Tx S/G frames");
1736 
1737 	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_num",
1738 	    CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_num,
1739 	    "IU", "number of Rx buffers in the buffer pool");
1740 	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_free",
1741 	    CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_free,
1742 	    "IU", "number of free Rx buffers in the buffer pool");
1743 
1744  	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1745 
1746 	/* Add channels statistics. */
1747 	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "channels",
1748 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channels");
1749 	parent = SYSCTL_CHILDREN(node);
1750 	for (int i = 0; i < sc->chan_n; i++) {
1751 		snprintf(cbuf, sizeof(cbuf), "%d", i);
1752 
1753 		node2 = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, cbuf,
1754 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channel");
1755 		parent2 = SYSCTL_CHILDREN(node2);
1756 
1757 		SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_frames",
1758 		    CTLFLAG_RD, &sc->channels[i]->tx_frames,
1759 		    "Tx frames counter");
1760 		SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_dropped",
1761 		    CTLFLAG_RD, &sc->channels[i]->tx_dropped,
1762 		    "Tx dropped counter");
1763 	}
1764 
1765 	return (0);
1766 }
1767 
1768 static int
1769 dpaa2_ni_setup_dma(struct dpaa2_ni_softc *sc)
1770 {
1771 	device_t dev = sc->dev;
1772 	int error;
1773 
1774 	KASSERT((sc->buf_align == BUF_ALIGN) || (sc->buf_align == BUF_ALIGN_V1),
1775 	    ("unexpected buffer alignment: %d\n", sc->buf_align));
1776 
1777 	/*
1778 	 * DMA tag to allocate buffers for buffer pool.
1779 	 *
1780 	 * NOTE: QBMan supports DMA addresses up to 49-bits maximum.
1781 	 *	 Bits 63-49 are not used by QBMan.
1782 	 */
1783 	error = bus_dma_tag_create(
1784 	    bus_get_dma_tag(dev),
1785 	    sc->buf_align, 0,		/* alignment, boundary */
1786 	    BUF_MAXADDR_49BIT,		/* low restricted addr */
1787 	    BUF_MAXADDR,		/* high restricted addr */
1788 	    NULL, NULL,			/* filter, filterarg */
1789 	    BUF_SIZE, 1,		/* maxsize, nsegments */
1790 	    BUF_SIZE, 0,		/* maxsegsize, flags */
1791 	    NULL, NULL,			/* lockfunc, lockarg */
1792 	    &sc->bp_dmat);
1793 	if (error) {
1794 		device_printf(dev, "%s: failed to create DMA tag for buffer "
1795 		    "pool\n", __func__);
1796 		return (error);
1797 	}
1798 
1799 	/* DMA tag to map Tx mbufs. */
1800 	error = bus_dma_tag_create(
1801 	    bus_get_dma_tag(dev),
1802 	    sc->buf_align, 0,		/* alignment, boundary */
1803 	    BUF_MAXADDR_49BIT,		/* low restricted addr */
1804 	    BUF_MAXADDR,		/* high restricted addr */
1805 	    NULL, NULL,			/* filter, filterarg */
1806 	    DPAA2_TX_SEGS_MAXSZ,	/* maxsize */
1807 	    DPAA2_TX_SEGLIMIT,		/* nsegments */
1808 	    DPAA2_TX_SEG_SZ, 0,		/* maxsegsize, flags */
1809 	    NULL, NULL,			/* lockfunc, lockarg */
1810 	    &sc->tx_dmat);
1811 	if (error) {
1812 		device_printf(dev, "%s: failed to create DMA tag for Tx "
1813 		    "buffers\n", __func__);
1814 		return (error);
1815 	}
1816 
1817 	/* DMA tag to allocate channel storage. */
1818 	error = bus_dma_tag_create(
1819 	    bus_get_dma_tag(dev),
1820 	    ETH_STORE_ALIGN, 0,		/* alignment, boundary */
1821 	    BUS_SPACE_MAXADDR_32BIT,	/* low restricted addr */
1822 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
1823 	    NULL, NULL,			/* filter, filterarg */
1824 	    ETH_STORE_SIZE, 1,		/* maxsize, nsegments */
1825 	    ETH_STORE_SIZE, 0,		/* maxsegsize, flags */
1826 	    NULL, NULL,			/* lockfunc, lockarg */
1827 	    &sc->st_dmat);
1828 	if (error) {
1829 		device_printf(dev, "%s: failed to create DMA tag for channel "
1830 		    "storage\n", __func__);
1831 		return (error);
1832 	}
1833 
1834 	/* DMA tag for Rx distribution key. */
1835 	error = bus_dma_tag_create(
1836 	    bus_get_dma_tag(dev),
1837 	    PAGE_SIZE, 0,		/* alignment, boundary */
1838 	    BUS_SPACE_MAXADDR_32BIT,	/* low restricted addr */
1839 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
1840 	    NULL, NULL,			/* filter, filterarg */
1841 	    DPAA2_CLASSIFIER_DMA_SIZE, 1, /* maxsize, nsegments */
1842 	    DPAA2_CLASSIFIER_DMA_SIZE, 0, /* maxsegsize, flags */
1843 	    NULL, NULL,			/* lockfunc, lockarg */
1844 	    &sc->rxd_dmat);
1845 	if (error) {
1846 		device_printf(dev, "%s: failed to create DMA tag for Rx "
1847 		    "distribution key\n", __func__);
1848 		return (error);
1849 	}
1850 
1851 	error = bus_dma_tag_create(
1852 	    bus_get_dma_tag(dev),
1853 	    PAGE_SIZE, 0,		/* alignment, boundary */
1854 	    BUS_SPACE_MAXADDR_32BIT,	/* low restricted addr */
1855 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
1856 	    NULL, NULL,			/* filter, filterarg */
1857 	    ETH_QOS_KCFG_BUF_SIZE, 1,	/* maxsize, nsegments */
1858 	    ETH_QOS_KCFG_BUF_SIZE, 0,	/* maxsegsize, flags */
1859 	    NULL, NULL,			/* lockfunc, lockarg */
1860 	    &sc->qos_dmat);
1861 	if (error) {
1862 		device_printf(dev, "%s: failed to create DMA tag for QoS key\n",
1863 		    __func__);
1864 		return (error);
1865 	}
1866 
1867 	error = bus_dma_tag_create(
1868 	    bus_get_dma_tag(dev),
1869 	    PAGE_SIZE, 0,		/* alignment, boundary */
1870 	    BUS_SPACE_MAXADDR_32BIT,	/* low restricted addr */
1871 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
1872 	    NULL, NULL,			/* filter, filterarg */
1873 	    DPAA2_TX_SGT_SZ, 1,		/* maxsize, nsegments */
1874 	    DPAA2_TX_SGT_SZ, 0,		/* maxsegsize, flags */
1875 	    NULL, NULL,			/* lockfunc, lockarg */
1876 	    &sc->sgt_dmat);
1877 	if (error) {
1878 		device_printf(dev, "%s: failed to create DMA tag for S/G "
1879 		    "tables\n", __func__);
1880 		return (error);
1881 	}
1882 
1883 	return (0);
1884 }
1885 
1886 /**
1887  * @brief Configure buffer layouts of the different DPNI queues.
1888  */
1889 static int
1890 dpaa2_ni_set_buf_layout(device_t dev, struct dpaa2_cmd *cmd)
1891 {
1892 	device_t child = dev;
1893 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1894 	struct dpaa2_ni_buf_layout buf_layout = {0};
1895 	int error;
1896 
1897 	/*
1898 	 * Select Rx/Tx buffer alignment. It's necessary to ensure that the
1899 	 * buffer size seen by WRIOP is a multiple of 64 or 256 bytes depending
1900 	 * on the WRIOP version.
1901 	 */
1902 	sc->buf_align = (sc->attr.wriop_ver == WRIOP_VERSION(0, 0, 0) ||
1903 	    sc->attr.wriop_ver == WRIOP_VERSION(1, 0, 0))
1904 	    ? BUF_ALIGN_V1 : BUF_ALIGN;
1905 
1906 	/*
1907 	 * We need to ensure that the buffer size seen by WRIOP is a multiple
1908 	 * of 64 or 256 bytes depending on the WRIOP version.
1909 	 */
1910 	sc->buf_sz = ALIGN_DOWN(BUF_SIZE, sc->buf_align);
1911 
1912 	if (bootverbose)
1913 		device_printf(dev, "Rx/Tx buffers: size=%d, alignment=%d\n",
1914 		    sc->buf_sz, sc->buf_align);
1915 
1916 	/*
1917 	 *    Frame Descriptor       Tx buffer layout
1918 	 *
1919 	 *                ADDR -> |---------------------|
1920 	 *                        | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes
1921 	 *                        |---------------------|
1922 	 *                        | HW FRAME ANNOTATION | BUF_TX_HWA_SIZE bytes
1923 	 *                        |---------------------|
1924 	 *                        |    DATA HEADROOM    |
1925 	 *       ADDR + OFFSET -> |---------------------|
1926 	 *                        |                     |
1927 	 *                        |                     |
1928 	 *                        |     FRAME DATA      |
1929 	 *                        |                     |
1930 	 *                        |                     |
1931 	 *                        |---------------------|
1932 	 *                        |    DATA TAILROOM    |
1933 	 *                        |---------------------|
1934 	 *
1935 	 * NOTE: It's for a single buffer frame only.
1936 	 */
1937 	buf_layout.queue_type = DPAA2_NI_QUEUE_TX;
1938 	buf_layout.pd_size = BUF_SWA_SIZE;
1939 	buf_layout.pass_timestamp = true;
1940 	buf_layout.pass_frame_status = true;
1941 	buf_layout.options =
1942 	    BUF_LOPT_PRIV_DATA_SZ |
1943 	    BUF_LOPT_TIMESTAMP | /* requires 128 bytes in HWA */
1944 	    BUF_LOPT_FRAME_STATUS;
1945 	error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, cmd, &buf_layout);
1946 	if (error) {
1947 		device_printf(dev, "%s: failed to set Tx buffer layout\n",
1948 		    __func__);
1949 		return (error);
1950 	}
1951 
1952 	/* Tx-confirmation buffer layout */
1953 	buf_layout.queue_type = DPAA2_NI_QUEUE_TX_CONF;
1954 	buf_layout.options =
1955 	    BUF_LOPT_TIMESTAMP |
1956 	    BUF_LOPT_FRAME_STATUS;
1957 	error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, cmd, &buf_layout);
1958 	if (error) {
1959 		device_printf(dev, "%s: failed to set TxConf buffer layout\n",
1960 		    __func__);
1961 		return (error);
1962 	}
1963 
1964 	/*
1965 	 * Driver should reserve the amount of space indicated by this command
1966 	 * as headroom in all Tx frames.
1967 	 */
1968 	error = DPAA2_CMD_NI_GET_TX_DATA_OFF(dev, child, cmd, &sc->tx_data_off);
1969 	if (error) {
1970 		device_printf(dev, "%s: failed to obtain Tx data offset\n",
1971 		    __func__);
1972 		return (error);
1973 	}
1974 
1975 	if (bootverbose)
1976 		device_printf(dev, "Tx data offset=%d\n", sc->tx_data_off);
1977 	if ((sc->tx_data_off % 64) != 0)
1978 		device_printf(dev, "Tx data offset (%d) is not a multiplication "
1979 		    "of 64 bytes\n", sc->tx_data_off);
1980 
1981 	/*
1982 	 *    Frame Descriptor       Rx buffer layout
1983 	 *
1984 	 *                ADDR -> |---------------------|
1985 	 *                        | SW FRAME ANNOTATION | 0 bytes
1986 	 *                        |---------------------|
1987 	 *                        | HW FRAME ANNOTATION | BUF_RX_HWA_SIZE bytes
1988 	 *                        |---------------------|
1989 	 *                        |    DATA HEADROOM    | OFFSET-BUF_RX_HWA_SIZE
1990 	 *       ADDR + OFFSET -> |---------------------|
1991 	 *                        |                     |
1992 	 *                        |                     |
1993 	 *                        |     FRAME DATA      |
1994 	 *                        |                     |
1995 	 *                        |                     |
1996 	 *                        |---------------------|
1997 	 *                        |    DATA TAILROOM    | 0 bytes
1998 	 *                        |---------------------|
1999 	 *
2000 	 * NOTE: It's for a single buffer frame only.
2001 	 */
2002 	buf_layout.queue_type = DPAA2_NI_QUEUE_RX;
2003 	buf_layout.pd_size = 0;
2004 	buf_layout.fd_align = sc->buf_align;
2005 	buf_layout.head_size = sc->tx_data_off - BUF_RX_HWA_SIZE;
2006 	buf_layout.tail_size = 0;
2007 	buf_layout.pass_frame_status = true;
2008 	buf_layout.pass_parser_result = true;
2009 	buf_layout.pass_timestamp = true;
2010 	buf_layout.options =
2011 	    BUF_LOPT_PRIV_DATA_SZ |
2012 	    BUF_LOPT_DATA_ALIGN |
2013 	    BUF_LOPT_DATA_HEAD_ROOM |
2014 	    BUF_LOPT_DATA_TAIL_ROOM |
2015 	    BUF_LOPT_FRAME_STATUS |
2016 	    BUF_LOPT_PARSER_RESULT |
2017 	    BUF_LOPT_TIMESTAMP;
2018 	error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, cmd, &buf_layout);
2019 	if (error) {
2020 		device_printf(dev, "%s: failed to set Rx buffer layout\n",
2021 		    __func__);
2022 		return (error);
2023 	}
2024 
2025 	return (0);
2026 }
2027 
2028 /**
2029  * @brief Enable Rx/Tx pause frames.
2030  *
2031  * NOTE: DPNI stops sending when a pause frame is received (Rx frame) or DPNI
2032  *       itself generates pause frames (Tx frame).
2033  */
2034 static int
2035 dpaa2_ni_set_pause_frame(device_t dev, struct dpaa2_cmd *cmd)
2036 {
2037 	device_t child = dev;
2038 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2039 	struct dpaa2_ni_link_cfg link_cfg = {0};
2040 	int error;
2041 
2042 	error = DPAA2_CMD_NI_GET_LINK_CFG(dev, child, cmd, &link_cfg);
2043 	if (error) {
2044 		device_printf(dev, "%s: failed to obtain link configuration: "
2045 		    "error=%d\n", __func__, error);
2046 		return (error);
2047 	}
2048 
2049 	/* Enable both Rx and Tx pause frames by default. */
2050 	link_cfg.options |= DPAA2_NI_LINK_OPT_PAUSE;
2051 	link_cfg.options &= ~DPAA2_NI_LINK_OPT_ASYM_PAUSE;
2052 
2053 	error = DPAA2_CMD_NI_SET_LINK_CFG(dev, child, cmd, &link_cfg);
2054 	if (error) {
2055 		device_printf(dev, "%s: failed to set link configuration: "
2056 		    "error=%d\n", __func__, error);
2057 		return (error);
2058 	}
2059 
2060 	sc->link_options = link_cfg.options;
2061 
2062 	return (0);
2063 }
2064 
2065 /**
2066  * @brief Configure QoS table to determine the traffic class for the received
2067  * frame.
2068  */
2069 static int
2070 dpaa2_ni_set_qos_table(device_t dev, struct dpaa2_cmd *cmd)
2071 {
2072 	device_t child = dev;
2073 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2074 	struct dpaa2_ni_qos_table tbl;
2075 	struct dpaa2_buf *buf = &sc->qos_kcfg;
2076 	int error;
2077 
2078 	if (sc->attr.num.rx_tcs == 1 ||
2079 	    !(sc->attr.options & DPNI_OPT_HAS_KEY_MASKING)) {
2080 		if (bootverbose)
2081 			device_printf(dev, "Ingress traffic classification is "
2082 			    "not supported\n");
2083 		return (0);
2084 	}
2085 
2086 	/*
2087 	 * Allocate a buffer visible to the device to hold the QoS table key
2088 	 * configuration.
2089 	 */
2090 	KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not storage buffer",
2091 	    __func__));
2092 	if (__predict_true(buf->store.dmat == NULL))
2093 		buf->store.dmat = sc->qos_dmat;
2094 
2095 	error = bus_dmamem_alloc(buf->store.dmat, &buf->store.vaddr,
2096 	    BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->store.dmap);
2097 	if (error) {
2098 		device_printf(dev, "%s: failed to allocate a buffer for QoS key "
2099 		    "configuration\n", __func__);
2100 		return (error);
2101 	}
2102 
2103 	error = bus_dmamap_load(buf->store.dmat, buf->store.dmap,
2104 	    buf->store.vaddr, ETH_QOS_KCFG_BUF_SIZE, dpaa2_ni_dmamap_cb,
2105 	    &buf->store.paddr, BUS_DMA_NOWAIT);
2106 	if (error) {
2107 		device_printf(dev, "%s: failed to map QoS key configuration "
2108 		    "buffer into bus space\n", __func__);
2109 		return (error);
2110 	}
2111 
2112 	tbl.default_tc = 0;
2113 	tbl.discard_on_miss = false;
2114 	tbl.keep_entries = false;
2115 	tbl.kcfg_busaddr = buf->store.paddr;
2116 	error = DPAA2_CMD_NI_SET_QOS_TABLE(dev, child, cmd, &tbl);
2117 	if (error) {
2118 		device_printf(dev, "%s: failed to set QoS table\n", __func__);
2119 		return (error);
2120 	}
2121 
2122 	error = DPAA2_CMD_NI_CLEAR_QOS_TABLE(dev, child, cmd);
2123 	if (error) {
2124 		device_printf(dev, "%s: failed to clear QoS table\n", __func__);
2125 		return (error);
2126 	}
2127 
2128 	return (0);
2129 }
2130 
2131 static int
2132 dpaa2_ni_set_mac_addr(device_t dev, struct dpaa2_cmd *cmd, uint16_t rc_token,
2133     uint16_t ni_token)
2134 {
2135 	device_t child = dev;
2136 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2137 	struct ifnet *ifp = sc->ifp;
2138 	struct ether_addr rnd_mac_addr;
2139 	uint8_t mac_addr[ETHER_ADDR_LEN];
2140 	uint8_t dpni_mac_addr[ETHER_ADDR_LEN];
2141 	int error;
2142 
2143 	/*
2144 	 * Get the MAC address associated with the physical port, if the DPNI is
2145 	 * connected to a DPMAC directly associated with one of the physical
2146 	 * ports.
2147 	 */
2148 	error = DPAA2_CMD_NI_GET_PORT_MAC_ADDR(dev, child, dpaa2_mcp_tk(cmd,
2149 	    ni_token), mac_addr);
2150 	if (error) {
2151 		device_printf(dev, "%s: failed to obtain the MAC address "
2152 		    "associated with the physical port\n", __func__);
2153 		return (error);
2154 	}
2155 
2156 	/* Get primary MAC address from the DPNI attributes. */
2157 	error = DPAA2_CMD_NI_GET_PRIM_MAC_ADDR(dev, child, cmd, dpni_mac_addr);
2158 	if (error) {
2159 		device_printf(dev, "%s: failed to obtain primary MAC address\n",
2160 		    __func__);
2161 		return (error);
2162 	}
2163 
2164 	if (!ETHER_IS_ZERO(mac_addr)) {
2165 		/* Set MAC address of the physical port as DPNI's primary one. */
2166 		error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, cmd,
2167 		    mac_addr);
2168 		if (error) {
2169 			device_printf(dev, "%s: failed to set primary MAC "
2170 			    "address\n", __func__);
2171 			return (error);
2172 		}
2173 		for (int i = 0; i < ETHER_ADDR_LEN; i++)
2174 			sc->mac.addr[i] = mac_addr[i];
2175 	} else if (ETHER_IS_ZERO(dpni_mac_addr)) {
2176 		/* Generate random MAC address as DPNI's primary one. */
2177 		ether_gen_addr(ifp, &rnd_mac_addr);
2178 		for (int i = 0; i < ETHER_ADDR_LEN; i++)
2179 			mac_addr[i] = rnd_mac_addr.octet[i];
2180 
2181 		error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, cmd,
2182 		    mac_addr);
2183 		if (error) {
2184 			device_printf(dev, "%s: failed to set random primary "
2185 			    "MAC address\n", __func__);
2186 			return (error);
2187 		}
2188 		for (int i = 0; i < ETHER_ADDR_LEN; i++)
2189 			sc->mac.addr[i] = mac_addr[i];
2190 	} else {
2191 		for (int i = 0; i < ETHER_ADDR_LEN; i++)
2192 			sc->mac.addr[i] = dpni_mac_addr[i];
2193 	}
2194 
2195 	return (0);
2196 }
2197 
2198 static void
2199 dpaa2_ni_miibus_statchg(device_t dev)
2200 {
2201 	struct dpaa2_ni_softc *sc;
2202 	device_t child;
2203 	struct dpaa2_mac_link_state mac_link = { 0 };
2204 	uint16_t mac_token;
2205 	int error, link_state;
2206 
2207 	sc = device_get_softc(dev);
2208 	if (sc->fixed_link || sc->mii == NULL)
2209 		return;
2210 
2211 	/*
2212 	 * Note: ifp link state will only be changed AFTER we are called so we
2213 	 * cannot rely on ifp->if_linkstate here.
2214 	 */
2215 	if (sc->mii->mii_media_status & IFM_AVALID) {
2216 		if (sc->mii->mii_media_status & IFM_ACTIVE)
2217 			link_state = LINK_STATE_UP;
2218 		else
2219 			link_state = LINK_STATE_DOWN;
2220 	} else
2221 		link_state = LINK_STATE_UNKNOWN;
2222 
2223 	if (link_state != sc->link_state) {
2224 
2225 		sc->link_state = link_state;
2226 
2227 		child = sc->dev;
2228 		error = DPAA2_CMD_MAC_OPEN(sc->dev, child, dpaa2_mcp_tk(sc->cmd,
2229 		    sc->rc_token), sc->mac.dpmac_id, &mac_token);
2230 		if (error) {
2231 			device_printf(sc->dev, "%s: failed to open DPMAC: "
2232 			    "id=%d, error=%d\n", __func__, sc->mac.dpmac_id,
2233 			    error);
2234 			return;
2235 		}
2236 
2237 		if (link_state == LINK_STATE_UP ||
2238 		    link_state == LINK_STATE_DOWN) {
2239 			/* Update DPMAC link state. */
2240 			mac_link.supported = sc->mii->mii_media.ifm_media;
2241 			mac_link.advert = sc->mii->mii_media.ifm_media;
2242 			mac_link.rate = 1000; /* TODO: Where to get from? */	/* ifmedia_baudrate? */
2243 			mac_link.options =
2244 			    DPAA2_MAC_LINK_OPT_AUTONEG |
2245 			    DPAA2_MAC_LINK_OPT_PAUSE;
2246 			mac_link.up = (link_state == LINK_STATE_UP) ? true : false;
2247 			mac_link.state_valid = true;
2248 
2249 			/* Inform DPMAC about link state. */
2250 			error = DPAA2_CMD_MAC_SET_LINK_STATE(sc->dev, child,
2251 			    sc->cmd, &mac_link);
2252 			if (error)
2253 				device_printf(sc->dev, "%s: failed to set DPMAC "
2254 				    "link state: id=%d, error=%d\n", __func__,
2255 				    sc->mac.dpmac_id, error);
2256 		}
2257 		DPAA2_CMD_MAC_CLOSE(sc->dev, child, dpaa2_mcp_tk(sc->cmd,
2258 		    mac_token));
2259 	}
2260 }
2261 
2262 /**
2263  * @brief Callback function to process media change request.
2264  */
2265 static int
2266 dpaa2_ni_media_change(struct ifnet *ifp)
2267 {
2268 	struct dpaa2_ni_softc *sc = ifp->if_softc;
2269 
2270 	DPNI_LOCK(sc);
2271 	if (sc->mii) {
2272 		mii_mediachg(sc->mii);
2273 		sc->media_status = sc->mii->mii_media.ifm_media;
2274 	} else if (sc->fixed_link) {
2275 		if_printf(ifp, "%s: can't change media in fixed mode\n",
2276 		    __func__);
2277 	}
2278 	DPNI_UNLOCK(sc);
2279 
2280 	return (0);
2281 }
2282 
2283 /**
2284  * @brief Callback function to process media status request.
2285  */
2286 static void
2287 dpaa2_ni_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2288 {
2289 	struct dpaa2_ni_softc *sc = ifp->if_softc;
2290 
2291 	DPNI_LOCK(sc);
2292 	if (sc->mii) {
2293 		mii_pollstat(sc->mii);
2294 		ifmr->ifm_active = sc->mii->mii_media_active;
2295 		ifmr->ifm_status = sc->mii->mii_media_status;
2296 	}
2297 	DPNI_UNLOCK(sc);
2298 }
2299 
2300 /**
2301  * @brief Callout function to check and update media status.
2302  */
2303 static void
2304 dpaa2_ni_media_tick(void *arg)
2305 {
2306 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2307 
2308 	/* Check for media type change */
2309 	if (sc->mii) {
2310 		mii_tick(sc->mii);
2311 		if (sc->media_status != sc->mii->mii_media.ifm_media) {
2312 			printf("%s: media type changed (ifm_media=%x)\n",
2313 			    __func__, sc->mii->mii_media.ifm_media);
2314 			dpaa2_ni_media_change(sc->ifp);
2315 		}
2316 	}
2317 
2318 	/* Schedule another timeout one second from now */
2319 	callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
2320 }
2321 
2322 static void
2323 dpaa2_ni_init(void *arg)
2324 {
2325 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2326 	struct ifnet *ifp = sc->ifp;
2327 	device_t dev = sc->dev;
2328 	device_t child = dev;
2329 	int error;
2330 
2331 	DPNI_LOCK(sc);
2332 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2333 		DPNI_UNLOCK(sc);
2334 		return;
2335 	}
2336 	DPNI_UNLOCK(sc);
2337 
2338 	error = DPAA2_CMD_NI_ENABLE(dev, child, dpaa2_mcp_tk(sc->cmd,
2339 	    sc->ni_token));
2340 	if (error)
2341 		device_printf(dev, "%s: failed to enable DPNI: error=%d\n",
2342 		    __func__, error);
2343 
2344 	DPNI_LOCK(sc);
2345 	if (sc->mii)
2346 		mii_mediachg(sc->mii);
2347 	callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
2348 
2349 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2350 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2351 	DPNI_UNLOCK(sc);
2352 
2353 	/* Force link-state update to initilize things. */
2354 	dpaa2_ni_miibus_statchg(dev);
2355 
2356 	return;
2357 }
2358 
2359 static int
2360 dpaa2_ni_transmit(struct ifnet *ifp, struct mbuf *m)
2361 {
2362 	struct dpaa2_ni_softc *sc = ifp->if_softc;
2363 	struct dpaa2_ni_channel	*chan;
2364 	struct dpaa2_ni_tx_ring *tx;
2365 	uint32_t fqid;
2366 	boolean_t found = false;
2367 	int chan_n = 0;
2368 
2369 	if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING)))
2370 		return (0);
2371 
2372 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
2373 		fqid = m->m_pkthdr.flowid;
2374 		for (int i = 0; i < sc->chan_n; i++) {
2375 			chan = sc->channels[i];
2376 			for (int j = 0; j < chan->rxq_n; j++) {
2377 				if (fqid == chan->rx_queues[j].fqid) {
2378 					chan_n = chan->flowid;
2379 					found = true;
2380 					break;
2381 				}
2382 			}
2383 			if (found) {
2384 				break;
2385 			}
2386 		}
2387 	}
2388 	tx = DPAA2_TX_RING(sc, chan_n, 0);
2389 
2390 	TX_LOCK(tx);
2391 	dpaa2_ni_tx_locked(sc, tx, m);
2392 	TX_UNLOCK(tx);
2393 
2394 	return (0);
2395 }
2396 
2397 static void
2398 dpaa2_ni_qflush(struct ifnet *ifp)
2399 {
2400 	/* TODO: Find a way to drain Tx queues in QBMan. */
2401 	if_qflush(ifp);
2402 }
2403 
2404 static int
2405 dpaa2_ni_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2406 {
2407 	struct dpaa2_ni_softc *sc = ifp->if_softc;
2408 	struct ifreq *ifr = (struct ifreq *) data;
2409 	device_t dev, child;
2410 	uint32_t changed = 0;
2411 	int mtu, error, rc = 0;
2412 
2413 	dev = child = sc->dev;
2414 
2415 	switch (cmd) {
2416 	case SIOCSIFMTU:
2417 		DPNI_LOCK(sc);
2418 		mtu = ifr->ifr_mtu;
2419 		if (mtu < ETHERMIN || mtu > ETHERMTU_JUMBO) {
2420 			DPNI_UNLOCK(sc);
2421 			return (EINVAL);
2422 		}
2423 		ifp->if_mtu = mtu;
2424 		DPNI_UNLOCK(sc);
2425 
2426 		/* Update maximum frame length. */
2427 		error = DPAA2_CMD_NI_SET_MFL(dev, child, dpaa2_mcp_tk(sc->cmd,
2428 		    sc->ni_token), mtu + ETHER_HDR_LEN);
2429 		if (error) {
2430 			device_printf(dev, "%s: failed to update maximum frame "
2431 			    "length: error=%d\n", __func__, error);
2432 			return (error);
2433 		}
2434 		break;
2435 	case SIOCSIFCAP:
2436 		changed = ifp->if_capenable ^ ifr->ifr_reqcap;
2437 		if (changed & IFCAP_HWCSUM) {
2438 			if ((ifr->ifr_reqcap & changed) & IFCAP_HWCSUM)
2439 				ifp->if_capenable |= IFCAP_HWCSUM;
2440 			else
2441 				ifp->if_capenable &= ~IFCAP_HWCSUM;
2442 		}
2443 		rc = dpaa2_ni_setup_if_caps(sc);
2444 		if (rc) {
2445 			printf("%s: failed to update iface capabilities: "
2446 			    "error=%d\n", __func__, rc);
2447 			rc = ENXIO;
2448 		}
2449 		break;
2450 	case SIOCSIFFLAGS:
2451 		DPNI_LOCK(sc);
2452 		if (ifp->if_flags & IFF_UP) {
2453 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2454 				changed = ifp->if_flags ^ sc->if_flags;
2455 				if (changed & IFF_PROMISC ||
2456 				    changed & IFF_ALLMULTI) {
2457 					rc = dpaa2_ni_setup_if_flags(sc);
2458 				}
2459 			} else {
2460 				DPNI_UNLOCK(sc);
2461 				dpaa2_ni_init(sc);
2462 				DPNI_LOCK(sc);
2463 			}
2464 		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2465 			/* dpni_if_stop(sc); */
2466 		}
2467 
2468 		sc->if_flags = ifp->if_flags;
2469 		DPNI_UNLOCK(sc);
2470 		break;
2471 	case SIOCADDMULTI:
2472 	case SIOCDELMULTI:
2473 		DPNI_LOCK(sc);
2474 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2475 			DPNI_UNLOCK(sc);
2476 			rc = dpaa2_ni_update_mac_filters(ifp);
2477 			if (rc)
2478 				device_printf(dev, "%s: failed to update MAC "
2479 				    "filters: error=%d\n", __func__, rc);
2480 			DPNI_LOCK(sc);
2481 		}
2482 		DPNI_UNLOCK(sc);
2483 		break;
2484 	case SIOCGIFMEDIA:
2485 	case SIOCSIFMEDIA:
2486 		if (sc->mii)
2487 			rc = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, cmd);
2488 		else if(sc->fixed_link) {
2489 			rc = ifmedia_ioctl(ifp, ifr, &sc->fixed_ifmedia, cmd);
2490 		}
2491 		break;
2492 	default:
2493 		rc = ether_ioctl(ifp, cmd, data);
2494 	}
2495 
2496 	return (rc);
2497 }
2498 
2499 static int
2500 dpaa2_ni_update_mac_filters(struct ifnet *ifp)
2501 {
2502 	struct dpaa2_ni_softc *sc = ifp->if_softc;
2503 	struct dpaa2_ni_mcaddr_ctx ctx;
2504 	device_t dev, child;
2505 	int error;
2506 
2507 	dev = child = sc->dev;
2508 
2509 	/* Remove all multicast MAC filters. */
2510 	error = DPAA2_CMD_NI_CLEAR_MAC_FILTERS(dev, child, dpaa2_mcp_tk(sc->cmd,
2511 	    sc->ni_token), false, true);
2512 	if (error) {
2513 		device_printf(dev, "%s: failed to clear multicast MAC filters: "
2514 		    "error=%d\n", __func__, error);
2515 		return (error);
2516 	}
2517 
2518 	ctx.ifp = ifp;
2519 	ctx.error = 0;
2520 	ctx.nent = 0;
2521 
2522 	if_foreach_llmaddr(ifp, dpaa2_ni_add_maddr, &ctx);
2523 
2524 	return (ctx.error);
2525 }
2526 
2527 static u_int
2528 dpaa2_ni_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2529 {
2530 	struct dpaa2_ni_mcaddr_ctx *ctx = arg;
2531 	struct dpaa2_ni_softc *sc = ctx->ifp->if_softc;
2532 	device_t dev, child;
2533 
2534 	dev = child = sc->dev;
2535 
2536 	if (ctx->error != 0)
2537 		return (0);
2538 
2539 	if (ETHER_IS_MULTICAST(LLADDR(sdl))) {
2540 		ctx->error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, dpaa2_mcp_tk(
2541 		    sc->cmd, sc->ni_token), LLADDR(sdl));
2542 		if (ctx->error != 0) {
2543 			device_printf(dev, "%s: can't add more then %d MAC "
2544 			    "addresses, switching to the multicast promiscuous "
2545 			    "mode\n", __func__, ctx->nent);
2546 
2547 			/* Enable multicast promiscuous mode. */
2548 			DPNI_LOCK(sc);
2549 			ctx->ifp->if_flags |= IFF_ALLMULTI;
2550 			sc->if_flags |= IFF_ALLMULTI;
2551 			ctx->error = dpaa2_ni_setup_if_flags(sc);
2552 			DPNI_UNLOCK(sc);
2553 
2554 			return (0);
2555 		}
2556 		ctx->nent++;
2557 	}
2558 
2559 	return (1);
2560 }
2561 
2562 static void
2563 dpaa2_ni_intr(void *arg)
2564 {
2565 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2566 	device_t child = sc->dev;
2567 	uint32_t status = ~0u; /* clear all IRQ status bits */
2568 	int error;
2569 
2570 	error = DPAA2_CMD_NI_GET_IRQ_STATUS(sc->dev, child, dpaa2_mcp_tk(sc->cmd,
2571 	    sc->ni_token), DPNI_IRQ_INDEX, &status);
2572 	if (error)
2573 		device_printf(sc->dev, "%s: failed to obtain IRQ status: "
2574 		    "error=%d\n", __func__, error);
2575 }
2576 
2577 /**
2578  * @brief Callback to obtain a physical address of the only DMA segment mapped.
2579  */
2580 static void
2581 dpaa2_ni_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2582 {
2583 	if (error == 0) {
2584 		KASSERT(nseg == 1, ("too many segments: nseg=%d\n", nseg));
2585 		*(bus_addr_t *) arg = segs[0].ds_addr;
2586 	}
2587 }
2588 
2589 /**
2590  * @brief Release new buffers to the buffer pool if necessary.
2591  */
2592 static void
2593 dpaa2_ni_bp_task(void *arg, int count)
2594 {
2595 	device_t bp_dev;
2596 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2597 	struct dpaa2_bp_softc *bpsc;
2598 	struct dpaa2_bp_conf bp_conf;
2599 	const int buf_num = DPAA2_ATOMIC_READ(&sc->buf_num);
2600 	int error;
2601 
2602 	/* There's only one buffer pool for now. */
2603 	bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
2604 	bpsc = device_get_softc(bp_dev);
2605 
2606 	/* Get state of the buffer pool. */
2607 	error = DPAA2_SWP_QUERY_BP(sc->channels[0]->io_dev, bpsc->attr.bpid,
2608 	    &bp_conf);
2609 	if (error) {
2610 		device_printf(sc->dev, "%s: failed to query buffer pool "
2611 		    "configuration: error=%d\n", __func__, error);
2612 		return;
2613 	}
2614 
2615 	/* Double allocated buffers number if free buffers < 25%. */
2616 	if (bp_conf.free_bufn < (buf_num >> 2)) {
2617 		(void)dpaa2_ni_seed_buf_pool(sc, buf_num);
2618 		DPAA2_ATOMIC_XCHG(&sc->buf_free, bp_conf.free_bufn);
2619 	}
2620 }
2621 
2622 /**
2623  * @brief Poll frames from a specific channel when CDAN is received.
2624  *
2625  * NOTE: To be called from the DPIO interrupt handler.
2626  */
2627 static void
2628 dpaa2_ni_poll(void *arg)
2629 {
2630 	struct dpaa2_ni_channel *chan = (struct dpaa2_ni_channel *) arg;
2631 	struct dpaa2_io_softc *iosc;
2632 	struct dpaa2_swp *swp;
2633 	struct dpaa2_ni_fq *fq;
2634 	int error, consumed = 0;
2635 
2636 	KASSERT(chan != NULL, ("%s: channel is NULL", __func__));
2637 
2638 	iosc = device_get_softc(chan->io_dev);
2639 	swp = iosc->swp;
2640 
2641 	do {
2642 		error = dpaa2_swp_pull(swp, chan->id, &chan->store,
2643 		    ETH_STORE_FRAMES);
2644 		if (error) {
2645 			device_printf(chan->ni_dev, "%s: failed to pull frames: "
2646 			    "chan_id=%d, error=%d\n", __func__, chan->id, error);
2647 			break;
2648 		}
2649 
2650 		/*
2651 		 * TODO: Combine frames from the same Rx queue returned as
2652 		 * a result to the current VDQ command into a chain (linked
2653 		 * with m_nextpkt) to ammortize the FQ lock.
2654 		 */
2655 		error = dpaa2_ni_consume_frames(chan, &fq, &consumed);
2656 		if (error == ENOENT) {
2657 			break;
2658 		}
2659 		if (error == ETIMEDOUT) {
2660 			device_printf(chan->ni_dev, "%s: timeout to consume "
2661 			    "frames: chan_id=%d\n", __func__, chan->id);
2662 		}
2663 	} while (true);
2664 
2665 	/* Re-arm channel to generate CDAN. */
2666 	error = DPAA2_SWP_CONF_WQ_CHANNEL(chan->io_dev, &chan->ctx);
2667 	if (error) {
2668 		device_printf(chan->ni_dev, "%s: failed to rearm: chan_id=%d, "
2669 		    "error=%d\n", __func__, chan->id, error);
2670 	}
2671 }
2672 
2673 /**
2674  * @brief Transmit mbufs.
2675  */
2676 static void
2677 dpaa2_ni_tx_locked(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx,
2678     struct mbuf *m)
2679 {
2680 	struct dpaa2_ni_fq *fq = tx->fq;
2681 	struct dpaa2_buf *buf;
2682 	struct dpaa2_fd fd;
2683 	struct mbuf *m_d;
2684 	bus_dma_segment_t txsegs[DPAA2_TX_SEGLIMIT];
2685 	uint64_t idx;
2686 	void *pidx;
2687 	int error, rc, txnsegs;
2688 
2689 	/* Obtain an index of a Tx buffer. */
2690 	pidx = buf_ring_dequeue_sc(tx->idx_br);
2691 	if (__predict_false(pidx == NULL)) {
2692 		/* TODO: Do not give up easily. */
2693 		m_freem(m);
2694 		return;
2695 	} else {
2696 		idx = (uint64_t) pidx;
2697 		buf = &tx->buf[idx];
2698 		buf->tx.m = m;
2699 		buf->tx.idx = idx;
2700 		buf->tx.sgt_paddr = 0;
2701 	}
2702 
2703 	/* Load mbuf to transmit. */
2704 	error = bus_dmamap_load_mbuf_sg(buf->tx.dmat, buf->tx.dmap, m,
2705 	    txsegs, &txnsegs, BUS_DMA_NOWAIT);
2706 	if (__predict_false(error != 0)) {
2707 		/* Too many fragments, trying to defragment... */
2708 		m_d = m_collapse(m, M_NOWAIT, DPAA2_TX_SEGLIMIT);
2709 		if (m_d == NULL) {
2710 			device_printf(sc->dev, "%s: mbuf "
2711 			    "defragmentation failed\n", __func__);
2712 			fq->chan->tx_dropped++;
2713 			goto err;
2714 		}
2715 
2716 		buf->tx.m = m = m_d;
2717 		error = bus_dmamap_load_mbuf_sg(buf->tx.dmat,
2718 		    buf->tx.dmap, m, txsegs, &txnsegs, BUS_DMA_NOWAIT);
2719 		if (__predict_false(error != 0)) {
2720 			device_printf(sc->dev, "%s: failed to load "
2721 			    "mbuf: error=%d\n", __func__, error);
2722 			fq->chan->tx_dropped++;
2723 			goto err;
2724 		}
2725 	}
2726 
2727 	/* Build frame descriptor. */
2728 	error = dpaa2_ni_build_fd(sc, tx, buf, txsegs, txnsegs, &fd);
2729 	if (__predict_false(error != 0)) {
2730 		device_printf(sc->dev, "%s: failed to build frame "
2731 		    "descriptor: error=%d\n", __func__, error);
2732 		fq->chan->tx_dropped++;
2733 		goto err_unload;
2734 	}
2735 
2736 	/* TODO: Enqueue several frames in a single command. */
2737 	for (int i = 0; i < DPAA2_NI_ENQUEUE_RETRIES; i++) {
2738 		/* TODO: Return error codes instead of # of frames. */
2739 		rc = DPAA2_SWP_ENQ_MULTIPLE_FQ(fq->chan->io_dev, tx->fqid,
2740 		    &fd, 1);
2741 		if (rc == 1) {
2742 			break;
2743 		}
2744 	}
2745 
2746 	bus_dmamap_sync(buf->tx.dmat, buf->tx.dmap,
2747 	    BUS_DMASYNC_PREWRITE);
2748 	bus_dmamap_sync(buf->tx.sgt_dmat, buf->tx.sgt_dmap,
2749 	    BUS_DMASYNC_PREWRITE);
2750 
2751 	if (rc != 1) {
2752 		fq->chan->tx_dropped++;
2753 		goto err_unload;
2754 	} else {
2755 		fq->chan->tx_frames++;
2756 	}
2757 	return;
2758 
2759 err_unload:
2760 	bus_dmamap_unload(buf->tx.dmat, buf->tx.dmap);
2761 	if (buf->tx.sgt_paddr != 0) {
2762 		bus_dmamap_unload(buf->tx.sgt_dmat, buf->tx.sgt_dmap);
2763 	}
2764 err:
2765 	m_freem(buf->tx.m);
2766 	buf_ring_enqueue(tx->idx_br, pidx);
2767 }
2768 
2769 static int
2770 dpaa2_ni_consume_frames(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq **src,
2771     uint32_t *consumed)
2772 {
2773 	struct dpaa2_ni_fq *fq = NULL;
2774 	struct dpaa2_dq *dq;
2775 	struct dpaa2_fd *fd;
2776 	int rc, frames = 0;
2777 
2778 	do {
2779 		rc = dpaa2_ni_chan_storage_next(chan, &dq);
2780 		if (rc == EINPROGRESS) {
2781 			if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
2782 				fd = &dq->fdr.fd;
2783 				fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
2784 				fq->consume(chan, fq, fd);
2785 				frames++;
2786 			}
2787 		} else if (rc == EALREADY || rc == ENOENT) {
2788 			if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
2789 				fd = &dq->fdr.fd;
2790 				fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
2791 				fq->consume(chan, fq, fd);
2792 				frames++;
2793 			}
2794 			break;
2795 		} else {
2796 			KASSERT(1 == 0, ("%s: should not reach here", __func__));
2797 		}
2798 	} while (true);
2799 
2800 	KASSERT(chan->store_idx < chan->store_sz,
2801 	    ("channel store idx >= size: store_idx=%d, store_sz=%d",
2802 	    chan->store_idx, chan->store_sz));
2803 
2804 	/*
2805 	 * A dequeue operation pulls frames from a single queue into the store.
2806 	 * Return the frame queue and a number of consumed frames as an output.
2807 	 */
2808 	if (src != NULL)
2809 		*src = fq;
2810 	if (consumed != NULL)
2811 		*consumed = frames;
2812 
2813 	return (rc);
2814 }
2815 
2816 /**
2817  * @brief Receive frames.
2818  */
2819 static int
2820 dpaa2_ni_rx(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq,
2821     struct dpaa2_fd *fd)
2822 {
2823 	struct dpaa2_ni_softc *sc = device_get_softc(chan->ni_dev);
2824 	struct dpaa2_bp_softc *bpsc;
2825 	struct dpaa2_buf *buf;
2826 	struct ifnet *ifp = sc->ifp;
2827 	struct mbuf *m;
2828 	device_t bp_dev;
2829 	bus_addr_t paddr = (bus_addr_t) fd->addr;
2830 	bus_addr_t released[DPAA2_SWP_BUFS_PER_CMD];
2831 	void *buf_data;
2832 	int buf_idx, buf_len;
2833 	int error, released_n = 0;
2834 
2835 	/*
2836 	 * Get buffer index from the ADDR_TOK (not used by QBMan) bits of the
2837 	 * physical address.
2838 	 */
2839 	buf_idx = dpaa2_ni_fd_buf_idx(fd);
2840 	buf = &sc->buf[buf_idx];
2841 
2842 	KASSERT(buf->type == DPAA2_BUF_RX, ("%s: not Rx buffer", __func__));
2843 	if (paddr != buf->rx.paddr) {
2844 		panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
2845 		    __func__, paddr, buf->rx.paddr);
2846 	}
2847 
2848 	/* Update statistics. */
2849 	switch (dpaa2_ni_fd_err(fd)) {
2850 	case 1: /* Enqueue rejected by QMan */
2851 		sc->rx_enq_rej_frames++;
2852 		break;
2853 	case 2: /* QMan IEOI error */
2854 		sc->rx_ieoi_err_frames++;
2855 		break;
2856 	default:
2857 		break;
2858 	}
2859 	switch (dpaa2_ni_fd_format(fd)) {
2860 	case DPAA2_FD_SINGLE:
2861 		sc->rx_single_buf_frames++;
2862 		break;
2863 	case DPAA2_FD_SG:
2864 		sc->rx_sg_buf_frames++;
2865 		break;
2866 	default:
2867 		break;
2868 	}
2869 
2870 	m = buf->rx.m;
2871 	buf->rx.m = NULL;
2872 	bus_dmamap_sync(buf->rx.dmat, buf->rx.dmap,
2873 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2874 	bus_dmamap_unload(buf->rx.dmat, buf->rx.dmap);
2875 
2876 	buf_len = dpaa2_ni_fd_data_len(fd);
2877 	buf_data = (uint8_t *)buf->rx.vaddr + dpaa2_ni_fd_offset(fd);
2878 
2879 	/* Prefetch mbuf data. */
2880 	__builtin_prefetch(buf_data);
2881 
2882 	/* Write value to mbuf (avoid reading). */
2883 	m->m_flags |= M_PKTHDR;
2884 	m->m_data = buf_data;
2885 	m->m_len = buf_len;
2886 	m->m_pkthdr.len = buf_len;
2887 	m->m_pkthdr.rcvif = ifp;
2888 	m->m_pkthdr.flowid = fq->fqid;
2889 	M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
2890 
2891 	(*ifp->if_input)(ifp, m);
2892 
2893 	/* Keep the buffer to be recycled. */
2894 	chan->recycled[chan->recycled_n++] = paddr;
2895 	KASSERT(chan->recycled_n <= DPAA2_SWP_BUFS_PER_CMD,
2896 	    ("%s: too many buffers to recycle", __func__));
2897 
2898 	/* Re-seed and release recycled buffers back to the pool. */
2899 	if (chan->recycled_n == DPAA2_SWP_BUFS_PER_CMD) {
2900 		/* Release new buffers to the pool if needed. */
2901 		taskqueue_enqueue(sc->bp_taskq, &sc->bp_task);
2902 
2903 		for (int i = 0; i < chan->recycled_n; i++) {
2904 			paddr = chan->recycled[i];
2905 
2906 			/* Parse ADDR_TOK of the recycled buffer. */
2907 			buf_idx = (paddr >> DPAA2_NI_BUF_IDX_SHIFT)
2908 			    & DPAA2_NI_BUF_IDX_MASK;
2909 			buf = &sc->buf[buf_idx];
2910 
2911 			/* Seed recycled buffer. */
2912 			error = dpaa2_ni_seed_rxbuf(sc, buf, buf_idx);
2913 			KASSERT(error == 0, ("%s: failed to seed recycled "
2914 			    "buffer: error=%d", __func__, error));
2915 			if (__predict_false(error != 0)) {
2916 				device_printf(sc->dev, "%s: failed to seed "
2917 				    "recycled buffer: error=%d\n", __func__,
2918 				    error);
2919 				continue;
2920 			}
2921 
2922 			/* Prepare buffer to be released in a single command. */
2923 			released[released_n++] = buf->rx.paddr;
2924 		}
2925 
2926 		/* There's only one buffer pool for now. */
2927 		bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
2928 		bpsc = device_get_softc(bp_dev);
2929 
2930 		error = DPAA2_SWP_RELEASE_BUFS(chan->io_dev, bpsc->attr.bpid,
2931 		    released, released_n);
2932 		if (__predict_false(error != 0)) {
2933 			device_printf(sc->dev, "%s: failed to release buffers "
2934 			    "to the pool: error=%d\n", __func__, error);
2935 			return (error);
2936 		}
2937 
2938 		/* Be ready to recycle the next portion of the buffers. */
2939 		chan->recycled_n = 0;
2940 	}
2941 
2942 	return (0);
2943 }
2944 
2945 /**
2946  * @brief Receive Rx error frames.
2947  */
2948 static int
2949 dpaa2_ni_rx_err(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq,
2950     struct dpaa2_fd *fd)
2951 {
2952 	device_t bp_dev;
2953 	struct dpaa2_ni_softc *sc = device_get_softc(chan->ni_dev);
2954 	struct dpaa2_bp_softc *bpsc;
2955 	struct dpaa2_buf *buf;
2956 	bus_addr_t paddr = (bus_addr_t) fd->addr;
2957 	int buf_idx, error;
2958 
2959 	/*
2960 	 * Get buffer index from the ADDR_TOK (not used by QBMan) bits of the
2961 	 * physical address.
2962 	 */
2963 	buf_idx = dpaa2_ni_fd_buf_idx(fd);
2964 	buf = &sc->buf[buf_idx];
2965 
2966 	KASSERT(buf->type == DPAA2_BUF_RX, ("%s: not Rx buffer", __func__));
2967 	if (paddr != buf->rx.paddr) {
2968 		panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
2969 		    __func__, paddr, buf->rx.paddr);
2970 	}
2971 
2972 	/* There's only one buffer pool for now. */
2973 	bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
2974 	bpsc = device_get_softc(bp_dev);
2975 
2976 	/* Release buffer to QBMan buffer pool. */
2977 	error = DPAA2_SWP_RELEASE_BUFS(chan->io_dev, bpsc->attr.bpid, &paddr, 1);
2978 	if (error != 0) {
2979 		device_printf(sc->dev, "%s: failed to release frame buffer to "
2980 		    "the pool: error=%d\n", __func__, error);
2981 		return (error);
2982 	}
2983 
2984 	return (0);
2985 }
2986 
2987 /**
2988  * @brief Receive Tx confirmation frames.
2989  */
2990 static int
2991 dpaa2_ni_tx_conf(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq,
2992     struct dpaa2_fd *fd)
2993 {
2994 	struct dpaa2_ni_softc *sc = device_get_softc(chan->ni_dev);
2995 	struct dpaa2_ni_channel	*buf_chan;
2996 	struct dpaa2_ni_tx_ring *tx;
2997 	struct dpaa2_buf *buf;
2998 	bus_addr_t paddr = (bus_addr_t) (fd->addr & BUF_MAXADDR_49BIT);
2999 	uint64_t buf_idx;
3000 	int chan_idx, tx_idx;
3001 
3002 	/*
3003 	 * Get channel, Tx ring and buffer indexes from the ADDR_TOK bits
3004 	 * (not used by QBMan) of the physical address.
3005 	 */
3006 	chan_idx = dpaa2_ni_fd_chan_idx(fd);
3007 	tx_idx = dpaa2_ni_fd_tx_idx(fd);
3008 	buf_idx = (uint64_t) dpaa2_ni_fd_txbuf_idx(fd);
3009 
3010 	KASSERT(tx_idx < DPAA2_NI_MAX_TCS, ("%s: invalid Tx ring index",
3011 	    __func__));
3012 	KASSERT(buf_idx < DPAA2_NI_BUFS_PER_TX, ("%s: invalid Tx buffer index",
3013 	    __func__));
3014 
3015 	buf_chan = sc->channels[chan_idx];
3016 	tx = &buf_chan->txc_queue.tx_rings[tx_idx];
3017 	buf = &tx->buf[buf_idx];
3018 
3019 	KASSERT(buf->type == DPAA2_BUF_TX, ("%s: not Tx buffer", __func__));
3020 	if (paddr != buf->tx.paddr) {
3021 		panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3022 		    __func__, paddr, buf->tx.paddr);
3023 	}
3024 
3025 
3026 	bus_dmamap_unload(buf->tx.dmat, buf->tx.dmap);
3027 	if (buf->tx.sgt_paddr != 0)
3028 		bus_dmamap_unload(buf->tx.sgt_dmat, buf->tx.sgt_dmap);
3029 	m_freem(buf->tx.m);
3030 
3031 	/* Return Tx buffer index back to the ring. */
3032 	buf_ring_enqueue(tx->idx_br, (void *) buf_idx);
3033 
3034 	return (0);
3035 }
3036 
3037 /**
3038  * @brief Compare versions of the DPAA2 network interface API.
3039  */
3040 static int
3041 dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *sc, uint16_t major,
3042     uint16_t minor)
3043 {
3044 	if (sc->api_major == major)
3045 		return sc->api_minor - minor;
3046 	return sc->api_major - major;
3047 }
3048 
3049 /**
3050  * @brief Allocate Rx buffers visible to QBMan and release them to the pool.
3051  */
3052 static int
3053 dpaa2_ni_seed_buf_pool(struct dpaa2_ni_softc *sc, uint32_t seedn)
3054 {
3055 	device_t bp_dev;
3056 	struct dpaa2_bp_softc *bpsc;
3057 	struct dpaa2_buf *buf;
3058 	bus_addr_t paddr[DPAA2_SWP_BUFS_PER_CMD];
3059 	const int allocated = DPAA2_ATOMIC_READ(&sc->buf_num);
3060 	int i, error, bufn = 0;
3061 
3062 	KASSERT(sc->bp_dmat != NULL, ("%s: DMA tag for buffer pool not "
3063 	    "created?", __func__));
3064 
3065 	/* There's only one buffer pool for now. */
3066 	bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
3067 	bpsc = device_get_softc(bp_dev);
3068 
3069 	/* Limit # of buffers released to the pool. */
3070 	if (allocated + seedn > DPAA2_NI_BUFS_MAX)
3071 		seedn = DPAA2_NI_BUFS_MAX - allocated;
3072 
3073 	/* Release "seedn" buffers to the pool. */
3074 	for (i = allocated; i < (allocated + seedn); i++) {
3075 		/* Enough buffers were allocated for a single command. */
3076 		if (bufn == DPAA2_SWP_BUFS_PER_CMD) {
3077 			error = DPAA2_SWP_RELEASE_BUFS(sc->channels[0]->io_dev,
3078 			    bpsc->attr.bpid, paddr, bufn);
3079 			if (error) {
3080 				device_printf(sc->dev, "%s: failed to release "
3081 				    "buffers to the pool (1)\n", __func__);
3082 				return (error);
3083 			}
3084 			DPAA2_ATOMIC_ADD(&sc->buf_num, bufn);
3085 			bufn = 0;
3086 		}
3087 
3088 		buf = &sc->buf[i];
3089 		buf->type = DPAA2_BUF_RX;
3090 		buf->rx.m = NULL;
3091 		buf->rx.dmap = NULL;
3092 		buf->rx.paddr = 0;
3093 		buf->rx.vaddr = NULL;
3094 		error = dpaa2_ni_seed_rxbuf(sc, buf, i);
3095 		if (error)
3096 			break;
3097 		paddr[bufn] = buf->rx.paddr;
3098 		bufn++;
3099 	}
3100 
3101 	/* Release if there are buffers left. */
3102 	if (bufn > 0) {
3103 		error = DPAA2_SWP_RELEASE_BUFS(sc->channels[0]->io_dev,
3104 		    bpsc->attr.bpid, paddr, bufn);
3105 		if (error) {
3106 			device_printf(sc->dev, "%s: failed to release "
3107 			    "buffers to the pool (2)\n", __func__);
3108 			return (error);
3109 		}
3110 		DPAA2_ATOMIC_ADD(&sc->buf_num, bufn);
3111 	}
3112 
3113 	return (0);
3114 }
3115 
3116 /**
3117  * @brief Prepare Rx buffer to be released to the buffer pool.
3118  */
3119 static int
3120 dpaa2_ni_seed_rxbuf(struct dpaa2_ni_softc *sc, struct dpaa2_buf *buf, int idx)
3121 {
3122 	struct mbuf *m;
3123 	bus_dmamap_t dmap;
3124 	bus_dma_segment_t segs;
3125 	int error, nsegs;
3126 
3127 	KASSERT(sc->bp_dmat != NULL, ("%s: Buffer pool DMA tag is not "
3128 	    "allocated?", __func__));
3129 	KASSERT(buf->type == DPAA2_BUF_RX, ("%s: not Rx buffer", __func__));
3130 
3131 	/* Keep DMA tag for this buffer. */
3132 	if (__predict_false(buf->rx.dmat == NULL))
3133 		buf->rx.dmat = sc->bp_dmat;
3134 
3135 	/* Create a DMA map for the giving buffer if it doesn't exist yet. */
3136 	if (__predict_false(buf->rx.dmap == NULL)) {
3137 		error = bus_dmamap_create(buf->rx.dmat, 0, &dmap);
3138 		if (error) {
3139 			device_printf(sc->dev, "%s: failed to create DMA map "
3140 			    "for buffer: buf_idx=%d, error=%d\n", __func__,
3141 			    idx, error);
3142 			return (error);
3143 		}
3144 		buf->rx.dmap = dmap;
3145 	}
3146 
3147 	/* Allocate mbuf if needed. */
3148 	if (__predict_false(buf->rx.m == NULL)) {
3149 		m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, BUF_SIZE);
3150 		if (__predict_false(m == NULL)) {
3151 			device_printf(sc->dev, "%s: failed to allocate mbuf for "
3152 			    "buffer\n", __func__);
3153 			return (ENOMEM);
3154 		}
3155 		m->m_len = m->m_ext.ext_size;
3156 		m->m_pkthdr.len = m->m_ext.ext_size;
3157 		buf->rx.m = m;
3158 	} else
3159 		m = buf->rx.m;
3160 
3161 	error = bus_dmamap_load_mbuf_sg(buf->rx.dmat, buf->rx.dmap,
3162 	    m, &segs, &nsegs, BUS_DMA_NOWAIT);
3163 	KASSERT(nsegs == 1, ("one segment expected: nsegs=%d", nsegs));
3164 	KASSERT(error == 0, ("failed to map mbuf: error=%d", error));
3165 	if (__predict_false(error != 0 || nsegs != 1)) {
3166 		device_printf(sc->dev, "%s: failed to map mbuf: error=%d, "
3167 		    "nsegs=%d\n", __func__, error, nsegs);
3168 		bus_dmamap_unload(buf->rx.dmat, buf->rx.dmap);
3169 		m_freem(m);
3170 		return (error);
3171 	}
3172 	buf->rx.paddr = segs.ds_addr;
3173 	buf->rx.vaddr = m->m_data;
3174 
3175 	/*
3176 	 * Write buffer index to the ADDR_TOK (bits 63-49) which is not used by
3177 	 * QBMan and is supposed to assist in physical to virtual address
3178 	 * translation.
3179 	 *
3180 	 * NOTE: "lowaddr" and "highaddr" of the window which cannot be accessed
3181 	 * 	 by QBMan must be configured in the DMA tag accordingly.
3182 	 */
3183 	buf->rx.paddr =
3184 	    ((uint64_t)(idx & DPAA2_NI_BUF_IDX_MASK) <<
3185 		DPAA2_NI_BUF_IDX_SHIFT) |
3186 	    (buf->rx.paddr & DPAA2_NI_BUF_ADDR_MASK);
3187 
3188 	return (0);
3189 }
3190 
3191 /**
3192  * @brief Prepare Tx buffer to be added to the Tx ring.
3193  */
3194 static int
3195 dpaa2_ni_seed_txbuf(struct dpaa2_ni_softc *sc, struct dpaa2_buf *buf, int idx)
3196 {
3197 	bus_dmamap_t dmap;
3198 	int error;
3199 
3200 	KASSERT(sc->tx_dmat != NULL, ("%s: Tx DMA tag is not allocated?",
3201 	    __func__));
3202 	KASSERT(sc->sgt_dmat != NULL, ("%s: S/G DMA tag not allocated?",
3203 	    __func__));
3204 	KASSERT(buf->type == DPAA2_BUF_TX, ("%s: not Tx buffer", __func__));
3205 
3206 	/* Keep DMA tags for this buffer. */
3207 	if (__predict_true(buf->tx.dmat == NULL))
3208 		buf->tx.dmat = sc->tx_dmat;
3209 	if (__predict_true(buf->tx.sgt_dmat == NULL))
3210 		buf->tx.sgt_dmat = sc->sgt_dmat;
3211 
3212 	/* Create a DMA map for the giving buffer if it doesn't exist yet. */
3213 	if (__predict_true(buf->tx.dmap == NULL)) {
3214 		error = bus_dmamap_create(buf->tx.dmat, 0, &dmap);
3215 		if (error != 0) {
3216 			device_printf(sc->dev, "%s: failed to create "
3217 			    "Tx DMA map: error=%d\n", __func__, error);
3218 			return (error);
3219 		}
3220 		buf->tx.dmap = dmap;
3221 	}
3222 
3223 	/* Allocate a buffer to store scatter/gather table. */
3224 	if (__predict_true(buf->tx.sgt_vaddr == NULL)) {
3225 		error = bus_dmamem_alloc(buf->tx.sgt_dmat,
3226 		    &buf->tx.sgt_vaddr, BUS_DMA_ZERO | BUS_DMA_COHERENT,
3227 		    &buf->tx.sgt_dmap);
3228 		if (error != 0) {
3229 			device_printf(sc->dev, "%s: failed to allocate "
3230 			    "S/G table: error=%d\n", __func__, error);
3231 			return (error);
3232 		}
3233 	}
3234 
3235 	return (0);
3236 }
3237 
3238 /**
3239  * @brief Allocate channel storage visible to QBMan.
3240  */
3241 static int
3242 dpaa2_ni_seed_chan_storage(struct dpaa2_ni_softc *sc,
3243     struct dpaa2_ni_channel *chan)
3244 {
3245 	struct dpaa2_buf *buf = &chan->store;
3246 	int error;
3247 
3248 	KASSERT(sc->st_dmat != NULL, ("%s: channel storage DMA tag is not "
3249 	    "allocated?", __func__));
3250 	KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not channel storage buffer",
3251 	    __func__));
3252 
3253 	/* Keep DMA tag for this buffer. */
3254 	if (__predict_false(buf->store.dmat == NULL)) {
3255 		buf->store.dmat = sc->st_dmat;
3256 	}
3257 
3258 	if (__predict_false(buf->store.vaddr == NULL)) {
3259 		error = bus_dmamem_alloc(buf->store.dmat, &buf->store.vaddr,
3260 		    BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->store.dmap);
3261 		if (error) {
3262 			device_printf(sc->dev, "%s: failed to allocate channel "
3263 			    "storage\n", __func__);
3264 			return (error);
3265 		}
3266 	}
3267 
3268 	if (__predict_false(buf->store.paddr == 0)) {
3269 		error = bus_dmamap_load(buf->store.dmat, buf->store.dmap,
3270 		    buf->store.vaddr, ETH_STORE_SIZE, dpaa2_ni_dmamap_cb,
3271 		    &buf->store.paddr, BUS_DMA_NOWAIT);
3272 		if (error) {
3273 			device_printf(sc->dev, "%s: failed to map channel "
3274 			    "storage\n", __func__);
3275 			return (error);
3276 		}
3277 	}
3278 
3279 	chan->store_sz = ETH_STORE_FRAMES;
3280 	chan->store_idx = 0;
3281 
3282 	return (0);
3283 }
3284 
3285 /**
3286  * @brief Build a DPAA2 frame descriptor.
3287  */
3288 static int
3289 dpaa2_ni_build_fd(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx,
3290     struct dpaa2_buf *buf, bus_dma_segment_t *txsegs, int txnsegs,
3291     struct dpaa2_fd *fd)
3292 {
3293 	struct dpaa2_ni_channel	*chan = tx->fq->chan;
3294 	struct dpaa2_sg_entry *sgt;
3295 	int i, error;
3296 
3297 	KASSERT(txnsegs <= DPAA2_TX_SEGLIMIT, ("%s: too many segments, "
3298 	    "txnsegs (%d) > %d", __func__, txnsegs, DPAA2_TX_SEGLIMIT));
3299 	KASSERT(buf->type == DPAA2_BUF_TX, ("%s: not Tx buffer", __func__));
3300 	KASSERT(buf->tx.sgt_vaddr != NULL, ("%s: S/G table not allocated?",
3301 	    __func__));
3302 
3303 	/* Reset frame descriptor fields. */
3304 	memset(fd, 0, sizeof(*fd));
3305 
3306 	if (__predict_true(txnsegs <= DPAA2_TX_SEGLIMIT)) {
3307 		/* Populate S/G table. */
3308 		sgt = (struct dpaa2_sg_entry *) buf->tx.sgt_vaddr +
3309 		    sc->tx_data_off;
3310 		for (i = 0; i < txnsegs; i++) {
3311 			sgt[i].addr = (uint64_t) txsegs[i].ds_addr;
3312 			sgt[i].len = (uint32_t) txsegs[i].ds_len;
3313 			sgt[i].offset_fmt = 0u;
3314 		}
3315 		sgt[i-1].offset_fmt |= 0x8000u; /* set final entry flag */
3316 
3317 		KASSERT(buf->tx.sgt_paddr == 0, ("%s: sgt_paddr(%#jx) != 0",
3318 		    __func__, buf->tx.sgt_paddr));
3319 
3320 		/* Load S/G table. */
3321 		error = bus_dmamap_load(buf->tx.sgt_dmat, buf->tx.sgt_dmap,
3322 		    buf->tx.sgt_vaddr, DPAA2_TX_SGT_SZ, dpaa2_ni_dmamap_cb,
3323 		    &buf->tx.sgt_paddr, BUS_DMA_NOWAIT);
3324 		if (__predict_false(error != 0)) {
3325 			device_printf(sc->dev, "%s: failed to map S/G table: "
3326 			    "error=%d\n", __func__, error);
3327 			return (error);
3328 		}
3329 		buf->tx.paddr = buf->tx.sgt_paddr;
3330 		buf->tx.vaddr = buf->tx.sgt_vaddr;
3331 		sc->tx_sg_frames++; /* for sysctl(9) */
3332 	} else {
3333 		return (EINVAL);
3334 	}
3335 
3336 	fd->addr =
3337 	    ((uint64_t)(chan->flowid & DPAA2_NI_BUF_CHAN_MASK) <<
3338 		DPAA2_NI_BUF_CHAN_SHIFT) |
3339 	    ((uint64_t)(tx->txid & DPAA2_NI_TX_IDX_MASK) <<
3340 		DPAA2_NI_TX_IDX_SHIFT) |
3341 	    ((uint64_t)(buf->tx.idx & DPAA2_NI_TXBUF_IDX_MASK) <<
3342 		DPAA2_NI_TXBUF_IDX_SHIFT) |
3343 	    (buf->tx.paddr & DPAA2_NI_BUF_ADDR_MASK);
3344 
3345 	fd->data_length = (uint32_t) buf->tx.m->m_pkthdr.len;
3346 	fd->bpid_ivp_bmt = 0;
3347 	fd->offset_fmt_sl = 0x2000u | sc->tx_data_off;
3348 	fd->ctrl = 0x00800000u;
3349 
3350 	return (0);
3351 }
3352 
3353 static int
3354 dpaa2_ni_fd_err(struct dpaa2_fd *fd)
3355 {
3356 	return ((fd->ctrl >> DPAA2_NI_FD_ERR_SHIFT) & DPAA2_NI_FD_ERR_MASK);
3357 }
3358 
3359 static uint32_t
3360 dpaa2_ni_fd_data_len(struct dpaa2_fd *fd)
3361 {
3362 	if (dpaa2_ni_fd_short_len(fd))
3363 		return (fd->data_length & DPAA2_NI_FD_LEN_MASK);
3364 
3365 	return (fd->data_length);
3366 }
3367 
3368 static int
3369 dpaa2_ni_fd_chan_idx(struct dpaa2_fd *fd)
3370 {
3371 	return ((((bus_addr_t) fd->addr) >> DPAA2_NI_BUF_CHAN_SHIFT) &
3372 	    DPAA2_NI_BUF_CHAN_MASK);
3373 }
3374 
3375 static int
3376 dpaa2_ni_fd_buf_idx(struct dpaa2_fd *fd)
3377 {
3378 	return ((((bus_addr_t) fd->addr) >> DPAA2_NI_BUF_IDX_SHIFT) &
3379 	    DPAA2_NI_BUF_IDX_MASK);
3380 }
3381 
3382 static int
3383 dpaa2_ni_fd_tx_idx(struct dpaa2_fd *fd)
3384 {
3385 	return ((((bus_addr_t) fd->addr) >> DPAA2_NI_TX_IDX_SHIFT) &
3386 	    DPAA2_NI_TX_IDX_MASK);
3387 }
3388 
3389 static int
3390 dpaa2_ni_fd_txbuf_idx(struct dpaa2_fd *fd)
3391 {
3392 	return ((((bus_addr_t) fd->addr) >> DPAA2_NI_TXBUF_IDX_SHIFT) &
3393 	    DPAA2_NI_TXBUF_IDX_MASK);
3394 }
3395 
3396 static int
3397 dpaa2_ni_fd_format(struct dpaa2_fd *fd)
3398 {
3399 	return ((enum dpaa2_fd_format)((fd->offset_fmt_sl >>
3400 	    DPAA2_NI_FD_FMT_SHIFT) & DPAA2_NI_FD_FMT_MASK));
3401 }
3402 
3403 static bool
3404 dpaa2_ni_fd_short_len(struct dpaa2_fd *fd)
3405 {
3406 	return (((fd->offset_fmt_sl >> DPAA2_NI_FD_SL_SHIFT)
3407 	    & DPAA2_NI_FD_SL_MASK) == 1);
3408 }
3409 
3410 static int
3411 dpaa2_ni_fd_offset(struct dpaa2_fd *fd)
3412 {
3413 	return (fd->offset_fmt_sl & DPAA2_NI_FD_OFFSET_MASK);
3414 }
3415 
3416 /**
3417  * @brief Collect statistics of the network interface.
3418  */
3419 static int
3420 dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS)
3421 {
3422 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3423 	struct dpni_stat *stat = &dpni_stat_sysctls[oidp->oid_number];
3424 	device_t child = sc->dev;
3425 	uint64_t cnt[DPAA2_NI_STAT_COUNTERS];
3426 	uint64_t result = 0;
3427 	int error;
3428 
3429 	error = DPAA2_CMD_NI_GET_STATISTICS(sc->dev, child,
3430 	    dpaa2_mcp_tk(sc->cmd, sc->ni_token), stat->page, 0, cnt);
3431 	if (!error)
3432 		result = cnt[stat->cnt];
3433 
3434 	return (sysctl_handle_64(oidp, &result, 0, req));
3435 }
3436 
3437 static int
3438 dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS)
3439 {
3440 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3441 	uint32_t buf_num = DPAA2_ATOMIC_READ(&sc->buf_num);
3442 
3443 	return (sysctl_handle_32(oidp, &buf_num, 0, req));
3444 }
3445 
3446 static int
3447 dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS)
3448 {
3449 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3450 	uint32_t buf_free = DPAA2_ATOMIC_READ(&sc->buf_free);
3451 
3452 	return (sysctl_handle_32(oidp, &buf_free, 0, req));
3453 }
3454 
3455 static int
3456 dpaa2_ni_set_hash(device_t dev, uint64_t flags)
3457 {
3458 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
3459 	uint64_t key = 0;
3460 	int i;
3461 
3462 	if (!(sc->attr.num.queues > 1)) {
3463 		return (EOPNOTSUPP);
3464 	}
3465 
3466 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3467 		if (dist_fields[i].rxnfc_field & flags) {
3468 			key |= dist_fields[i].id;
3469 		}
3470 	}
3471 
3472 	return (dpaa2_ni_set_dist_key(dev, DPAA2_NI_DIST_MODE_HASH, key));
3473 }
3474 
3475 /**
3476  * @brief Set Rx distribution (hash or flow classification) key flags is a
3477  * combination of RXH_ bits.
3478  */
3479 static int
3480 dpaa2_ni_set_dist_key(device_t dev, enum dpaa2_ni_dist_mode type, uint64_t flags)
3481 {
3482 	device_t child = dev;
3483 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
3484 	struct dpkg_profile_cfg cls_cfg;
3485 	struct dpkg_extract *key;
3486 	struct dpaa2_buf *buf = &sc->rxd_kcfg;
3487 	int i, error = 0;
3488 
3489 	KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not storage buffer",
3490 	    __func__));
3491 	if (__predict_true(buf->store.dmat == NULL))
3492 		buf->store.dmat = sc->rxd_dmat;
3493 
3494 	memset(&cls_cfg, 0, sizeof(cls_cfg));
3495 
3496 	/* Configure extracts according to the given flags. */
3497 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3498 		key = &cls_cfg.extracts[cls_cfg.num_extracts];
3499 
3500 		if (!(flags & dist_fields[i].id))
3501 			continue;
3502 
3503 		if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
3504 			device_printf(dev, "%s: failed to add key extraction "
3505 			    "rule\n", __func__);
3506 			return (E2BIG);
3507 		}
3508 
3509 		key->type = DPKG_EXTRACT_FROM_HDR;
3510 		key->extract.from_hdr.prot = dist_fields[i].cls_prot;
3511 		key->extract.from_hdr.type = DPKG_FULL_FIELD;
3512 		key->extract.from_hdr.field = dist_fields[i].cls_field;
3513 		cls_cfg.num_extracts++;
3514 	}
3515 
3516 	error = bus_dmamem_alloc(buf->store.dmat, &buf->store.vaddr,
3517 	    BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->store.dmap);
3518 	if (error != 0) {
3519 		device_printf(dev, "%s: failed to allocate a buffer for Rx "
3520 		    "traffic distribution key configuration\n", __func__);
3521 		return (error);
3522 	}
3523 
3524 	error = dpaa2_ni_prepare_key_cfg(&cls_cfg, (uint8_t *) buf->store.vaddr);
3525 	if (error != 0) {
3526 		device_printf(dev, "%s: failed to prepare key configuration: "
3527 		    "error=%d\n", __func__, error);
3528 		return (error);
3529 	}
3530 
3531 	/* Prepare for setting the Rx dist. */
3532 	error = bus_dmamap_load(buf->store.dmat, buf->store.dmap,
3533 	    buf->store.vaddr, DPAA2_CLASSIFIER_DMA_SIZE, dpaa2_ni_dmamap_cb,
3534 	    &buf->store.paddr, BUS_DMA_NOWAIT);
3535 	if (error != 0) {
3536 		device_printf(sc->dev, "%s: failed to map a buffer for Rx "
3537 		    "traffic distribution key configuration\n", __func__);
3538 		return (error);
3539 	}
3540 
3541 	if (type == DPAA2_NI_DIST_MODE_HASH) {
3542 		error = DPAA2_CMD_NI_SET_RX_TC_DIST(dev, child, dpaa2_mcp_tk(
3543 		    sc->cmd, sc->ni_token), sc->attr.num.queues, 0,
3544 		    DPAA2_NI_DIST_MODE_HASH, buf->store.paddr);
3545 		if (error != 0)
3546 			device_printf(dev, "%s: failed to set distribution mode "
3547 			    "and size for the traffic class\n", __func__);
3548 	}
3549 
3550 	return (error);
3551 }
3552 
3553 /**
3554  * @brief Prepares extract parameters.
3555  *
3556  * cfg:		Defining a full Key Generation profile.
3557  * key_cfg_buf:	Zeroed 256 bytes of memory before mapping it to DMA.
3558  */
3559 static int
3560 dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf)
3561 {
3562 	struct dpni_ext_set_rx_tc_dist *dpni_ext;
3563 	struct dpni_dist_extract *extr;
3564 	int i, j;
3565 
3566 	if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
3567 		return (EINVAL);
3568 
3569 	dpni_ext = (struct dpni_ext_set_rx_tc_dist *) key_cfg_buf;
3570 	dpni_ext->num_extracts = cfg->num_extracts;
3571 
3572 	for (i = 0; i < cfg->num_extracts; i++) {
3573 		extr = &dpni_ext->extracts[i];
3574 
3575 		switch (cfg->extracts[i].type) {
3576 		case DPKG_EXTRACT_FROM_HDR:
3577 			extr->prot = cfg->extracts[i].extract.from_hdr.prot;
3578 			extr->efh_type =
3579 			    cfg->extracts[i].extract.from_hdr.type & 0x0Fu;
3580 			extr->size = cfg->extracts[i].extract.from_hdr.size;
3581 			extr->offset = cfg->extracts[i].extract.from_hdr.offset;
3582 			extr->field = cfg->extracts[i].extract.from_hdr.field;
3583 			extr->hdr_index =
3584 				cfg->extracts[i].extract.from_hdr.hdr_index;
3585 			break;
3586 		case DPKG_EXTRACT_FROM_DATA:
3587 			extr->size = cfg->extracts[i].extract.from_data.size;
3588 			extr->offset =
3589 				cfg->extracts[i].extract.from_data.offset;
3590 			break;
3591 		case DPKG_EXTRACT_FROM_PARSE:
3592 			extr->size = cfg->extracts[i].extract.from_parse.size;
3593 			extr->offset =
3594 				cfg->extracts[i].extract.from_parse.offset;
3595 			break;
3596 		default:
3597 			return (EINVAL);
3598 		}
3599 
3600 		extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
3601 		extr->extract_type = cfg->extracts[i].type & 0x0Fu;
3602 
3603 		for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
3604 			extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
3605 			extr->masks[j].offset =
3606 				cfg->extracts[i].masks[j].offset;
3607 		}
3608 	}
3609 
3610 	return (0);
3611 }
3612 
3613 /**
3614  * @brief Obtain the next dequeue response from the channel storage.
3615  */
3616 static int
3617 dpaa2_ni_chan_storage_next(struct dpaa2_ni_channel *chan, struct dpaa2_dq **dq)
3618 {
3619 	struct dpaa2_buf *buf = &chan->store;
3620 	struct dpaa2_dq *msgs = buf->store.vaddr;
3621 	struct dpaa2_dq *msg = &msgs[chan->store_idx];
3622 	int rc = EINPROGRESS;
3623 
3624 	chan->store_idx++;
3625 
3626 	if (msg->fdr.desc.stat & DPAA2_DQ_STAT_EXPIRED) {
3627 		rc = EALREADY; /* VDQ command is expired */
3628 		chan->store_idx = 0;
3629 		if (!(msg->fdr.desc.stat & DPAA2_DQ_STAT_VALIDFRAME))
3630 			msg = NULL; /* Null response, FD is invalid */
3631 	}
3632 	if (msg->fdr.desc.stat & DPAA2_DQ_STAT_FQEMPTY) {
3633 		rc = ENOENT; /* FQ is empty */
3634 		chan->store_idx = 0;
3635 	}
3636 
3637 	if (dq != NULL)
3638 		*dq = msg;
3639 
3640 	return (rc);
3641 }
3642 
3643 static device_method_t dpaa2_ni_methods[] = {
3644 	/* Device interface */
3645 	DEVMETHOD(device_probe,		dpaa2_ni_probe),
3646 	DEVMETHOD(device_attach,	dpaa2_ni_attach),
3647 	DEVMETHOD(device_detach,	dpaa2_ni_detach),
3648 
3649 	/* mii via memac_mdio */
3650 	DEVMETHOD(miibus_statchg,	dpaa2_ni_miibus_statchg),
3651 
3652 	DEVMETHOD_END
3653 };
3654 
3655 static driver_t dpaa2_ni_driver = {
3656 	"dpaa2_ni",
3657 	dpaa2_ni_methods,
3658 	sizeof(struct dpaa2_ni_softc),
3659 };
3660 
3661 DRIVER_MODULE(miibus, dpaa2_ni, miibus_driver, 0, 0);
3662 DRIVER_MODULE(dpaa2_ni, dpaa2_rc, dpaa2_ni_driver, 0, 0);
3663 
3664 MODULE_DEPEND(dpaa2_ni, miibus, 1, 1, 1);
3665 #ifdef DEV_ACPI
3666 MODULE_DEPEND(dpaa2_ni, memac_mdio_acpi, 1, 1, 1);
3667 #endif
3668 #ifdef FDT
3669 MODULE_DEPEND(dpaa2_ni, memac_mdio_fdt, 1, 1, 1);
3670 #endif
3671