xref: /freebsd/sys/dev/dpaa2/dpaa2_ni.c (revision ba7319e9091b4f6ef15a9c4be3d3d076f3047f72)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright © 2021-2022 Dmitry Salychev
5  * Copyright © 2022 Mathew McBride
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 /*
33  * The DPAA2 Network Interface (DPNI) driver.
34  *
35  * The DPNI object is a network interface that is configurable to support a wide
36  * range of features from a very basic Ethernet interface up to a
37  * high-functioning network interface. The DPNI supports features that are
38  * expected by standard network stacks, from basic features to offloads.
39  *
40  * DPNIs work with Ethernet traffic, starting with the L2 header. Additional
41  * functions are provided for standard network protocols (L2, L3, L4, etc.).
42  */
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/bus.h>
48 #include <sys/rman.h>
49 #include <sys/module.h>
50 #include <sys/malloc.h>
51 #include <sys/mutex.h>
52 #include <sys/socket.h>
53 #include <sys/sockio.h>
54 #include <sys/sysctl.h>
55 #include <sys/mbuf.h>
56 #include <sys/taskqueue.h>
57 #include <sys/sysctl.h>
58 #include <sys/buf_ring.h>
59 #include <sys/smp.h>
60 #include <sys/proc.h>
61 
62 #include <vm/vm.h>
63 #include <vm/pmap.h>
64 
65 #include <machine/bus.h>
66 #include <machine/resource.h>
67 #include <machine/atomic.h>
68 
69 #include <net/ethernet.h>
70 #include <net/bpf.h>
71 #include <net/if.h>
72 #include <net/if_dl.h>
73 #include <net/if_media.h>
74 #include <net/if_types.h>
75 #include <net/if_var.h>
76 
77 #include <dev/pci/pcivar.h>
78 #include <dev/mii/mii.h>
79 #include <dev/mii/miivar.h>
80 #include <dev/mdio/mdio.h>
81 
82 #include "opt_acpi.h"
83 #include "opt_platform.h"
84 
85 #include "pcib_if.h"
86 #include "pci_if.h"
87 #include "miibus_if.h"
88 #include "mdio_if.h"
89 #include "memac_mdio_if.h"
90 
91 #include "dpaa2_types.h"
92 #include "dpaa2_mc.h"
93 #include "dpaa2_mc_if.h"
94 #include "dpaa2_mcp.h"
95 #include "dpaa2_swp.h"
96 #include "dpaa2_swp_if.h"
97 #include "dpaa2_cmd_if.h"
98 #include "dpaa2_ni.h"
99 
100 #define BIT(x)			(1ul << (x))
101 #define WRIOP_VERSION(x, y, z)	((x) << 10 | (y) << 5 | (z) << 0)
102 #define ARRAY_SIZE(a)		(sizeof(a) / sizeof((a)[0]))
103 
104 /* Frame Dequeue Response status bits. */
105 #define IS_NULL_RESPONSE(stat)	((((stat) >> 4) & 1) == 0)
106 
107 #define	ALIGN_UP(x, y)		roundup2((x), (y))
108 #define	ALIGN_DOWN(x, y)	rounddown2((x), (y))
109 #define CACHE_LINE_ALIGN(x)	ALIGN_UP((x), CACHE_LINE_SIZE)
110 
111 #define DPNI_LOCK(__sc) do {			\
112 	mtx_assert(&(__sc)->lock, MA_NOTOWNED);	\
113 	mtx_lock(&(__sc)->lock);		\
114 } while (0)
115 #define	DPNI_UNLOCK(__sc) do {			\
116 	mtx_assert(&(__sc)->lock, MA_OWNED);	\
117 	mtx_unlock(&(__sc)->lock);		\
118 } while (0)
119 
120 #define TX_LOCK(__tx) do {			\
121 	mtx_assert(&(__tx)->lock, MA_NOTOWNED);	\
122 	mtx_lock(&(__tx)->lock);		\
123 } while (0)
124 #define	TX_UNLOCK(__tx) do {			\
125 	mtx_assert(&(__tx)->lock, MA_OWNED);	\
126 	mtx_unlock(&(__tx)->lock);		\
127 } while (0)
128 
129 #define DPAA2_TX_RING(sc, chan, tc)				\
130 	(&(sc)->channels[(chan)]->txc_queue.tx_rings[(tc)])
131 
132 #define DPNI_IRQ_INDEX		0 /* Index of the only DPNI IRQ. */
133 #define DPNI_IRQ_LINK_CHANGED	1 /* Link state changed */
134 #define DPNI_IRQ_EP_CHANGED	2 /* DPAA2 endpoint dis/connected */
135 
136 /* Default maximum frame length. */
137 #define DPAA2_ETH_MFL		(ETHER_MAX_LEN - ETHER_CRC_LEN)
138 
139 /* Minimally supported version of the DPNI API. */
140 #define DPNI_VER_MAJOR		7
141 #define DPNI_VER_MINOR		0
142 
143 /* Rx/Tx buffers configuration. */
144 #define BUF_ALIGN_V1		256 /* WRIOP v1.0.0 limitation */
145 #define BUF_ALIGN		64
146 #define BUF_SWA_SIZE		64  /* SW annotation size */
147 #define BUF_RX_HWA_SIZE		64  /* HW annotation size */
148 #define BUF_TX_HWA_SIZE		128 /* HW annotation size */
149 #define BUF_SIZE		(MJUM9BYTES)
150 #define	BUF_MAXADDR_49BIT	0x1FFFFFFFFFFFFul
151 #define	BUF_MAXADDR		(BUS_SPACE_MAXADDR)
152 
153 #define DPAA2_TX_BUFRING_SZ	(4096u)
154 #define DPAA2_TX_SEGLIMIT	(16u) /* arbitrary number */
155 #define DPAA2_TX_SEG_SZ		(4096u)
156 #define DPAA2_TX_SEGS_MAXSZ	(DPAA2_TX_SEGLIMIT * DPAA2_TX_SEG_SZ)
157 #define DPAA2_TX_SGT_SZ		(512u) /* bytes */
158 
159 /* Size of a buffer to keep a QoS table key configuration. */
160 #define ETH_QOS_KCFG_BUF_SIZE	256
161 
162 /* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */
163 #define DPAA2_CLASSIFIER_DMA_SIZE 256
164 
165 /* Channel storage buffer configuration. */
166 #define ETH_STORE_FRAMES	16u
167 #define ETH_STORE_SIZE		((ETH_STORE_FRAMES + 1) * sizeof(struct dpaa2_dq))
168 #define ETH_STORE_ALIGN		64u
169 
170 /* Buffers layout options. */
171 #define BUF_LOPT_TIMESTAMP	0x1
172 #define BUF_LOPT_PARSER_RESULT	0x2
173 #define BUF_LOPT_FRAME_STATUS	0x4
174 #define BUF_LOPT_PRIV_DATA_SZ	0x8
175 #define BUF_LOPT_DATA_ALIGN	0x10
176 #define BUF_LOPT_DATA_HEAD_ROOM	0x20
177 #define BUF_LOPT_DATA_TAIL_ROOM	0x40
178 
179 #define DPAA2_NI_BUF_ADDR_MASK	(0x1FFFFFFFFFFFFul) /* 49-bit addresses max. */
180 #define DPAA2_NI_BUF_CHAN_MASK	(0xFu)
181 #define DPAA2_NI_BUF_CHAN_SHIFT	(60)
182 #define DPAA2_NI_BUF_IDX_MASK	(0x7FFFu)
183 #define DPAA2_NI_BUF_IDX_SHIFT	(49)
184 #define DPAA2_NI_TX_IDX_MASK	(0x7u)
185 #define DPAA2_NI_TX_IDX_SHIFT	(57)
186 #define DPAA2_NI_TXBUF_IDX_MASK	(0xFFu)
187 #define DPAA2_NI_TXBUF_IDX_SHIFT (49)
188 
189 #define DPAA2_NI_FD_FMT_MASK	(0x3u)
190 #define DPAA2_NI_FD_FMT_SHIFT	(12)
191 #define DPAA2_NI_FD_ERR_MASK	(0xFFu)
192 #define DPAA2_NI_FD_ERR_SHIFT	(0)
193 #define DPAA2_NI_FD_SL_MASK	(0x1u)
194 #define DPAA2_NI_FD_SL_SHIFT	(14)
195 #define DPAA2_NI_FD_LEN_MASK	(0x3FFFFu)
196 #define DPAA2_NI_FD_OFFSET_MASK (0x0FFFu)
197 
198 /* Enables TCAM for Flow Steering and QoS look-ups. */
199 #define DPNI_OPT_HAS_KEY_MASKING 0x10
200 
201 /* Unique IDs for the supported Rx classification header fields. */
202 #define DPAA2_ETH_DIST_ETHDST	BIT(0)
203 #define DPAA2_ETH_DIST_ETHSRC	BIT(1)
204 #define DPAA2_ETH_DIST_ETHTYPE	BIT(2)
205 #define DPAA2_ETH_DIST_VLAN	BIT(3)
206 #define DPAA2_ETH_DIST_IPSRC	BIT(4)
207 #define DPAA2_ETH_DIST_IPDST	BIT(5)
208 #define DPAA2_ETH_DIST_IPPROTO	BIT(6)
209 #define DPAA2_ETH_DIST_L4SRC	BIT(7)
210 #define DPAA2_ETH_DIST_L4DST	BIT(8)
211 #define DPAA2_ETH_DIST_ALL	(~0ULL)
212 
213 /* L3-L4 network traffic flow hash options. */
214 #define	RXH_L2DA		(1 << 1)
215 #define	RXH_VLAN		(1 << 2)
216 #define	RXH_L3_PROTO		(1 << 3)
217 #define	RXH_IP_SRC		(1 << 4)
218 #define	RXH_IP_DST		(1 << 5)
219 #define	RXH_L4_B_0_1		(1 << 6) /* src port in case of TCP/UDP/SCTP */
220 #define	RXH_L4_B_2_3		(1 << 7) /* dst port in case of TCP/UDP/SCTP */
221 #define	RXH_DISCARD		(1 << 31)
222 
223 /* Default Rx hash options, set during attaching. */
224 #define DPAA2_RXH_DEFAULT	(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
225 
226 MALLOC_DEFINE(M_DPAA2_NI, "dpaa2_ni", "DPAA2 Network Interface");
227 
228 /* DPAA2 Network Interface resource specification. */
229 struct resource_spec dpaa2_ni_spec[] = {
230 	/*
231 	 * DPMCP resources.
232 	 *
233 	 * NOTE: MC command portals (MCPs) are used to send commands to, and
234 	 *	 receive responses from, the MC firmware. One portal per DPNI.
235 	 */
236 #define MCP_RES_NUM	(1u)
237 #define MCP_RID_OFF	(0u)
238 #define MCP_RID(rid)	((rid) + MCP_RID_OFF)
239 	/* --- */
240 	{ DPAA2_DEV_MCP, MCP_RID(0),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
241 	/*
242 	 * DPIO resources (software portals).
243 	 *
244 	 * NOTE: One per running core. While DPIOs are the source of data
245 	 *	 availability interrupts, the DPCONs are used to identify the
246 	 *	 network interface that has produced ingress data to that core.
247 	 */
248 #define IO_RES_NUM	(16u)
249 #define IO_RID_OFF	(MCP_RID_OFF + MCP_RES_NUM)
250 #define IO_RID(rid)	((rid) + IO_RID_OFF)
251 	/* --- */
252 	{ DPAA2_DEV_IO,  IO_RID(0),    RF_ACTIVE | RF_SHAREABLE },
253 	{ DPAA2_DEV_IO,  IO_RID(1),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
254 	{ DPAA2_DEV_IO,  IO_RID(2),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
255 	{ DPAA2_DEV_IO,  IO_RID(3),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
256 	{ DPAA2_DEV_IO,  IO_RID(4),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
257 	{ DPAA2_DEV_IO,  IO_RID(5),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
258 	{ DPAA2_DEV_IO,  IO_RID(6),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
259 	{ DPAA2_DEV_IO,  IO_RID(7),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
260 	{ DPAA2_DEV_IO,  IO_RID(8),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
261 	{ DPAA2_DEV_IO,  IO_RID(9),    RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
262 	{ DPAA2_DEV_IO,  IO_RID(10),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
263 	{ DPAA2_DEV_IO,  IO_RID(11),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
264 	{ DPAA2_DEV_IO,  IO_RID(12),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
265 	{ DPAA2_DEV_IO,  IO_RID(13),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
266 	{ DPAA2_DEV_IO,  IO_RID(14),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
267 	{ DPAA2_DEV_IO,  IO_RID(15),   RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
268 	/*
269 	 * DPBP resources (buffer pools).
270 	 *
271 	 * NOTE: One per network interface.
272 	 */
273 #define BP_RES_NUM	(1u)
274 #define BP_RID_OFF	(IO_RID_OFF + IO_RES_NUM)
275 #define BP_RID(rid)	((rid) + BP_RID_OFF)
276 	/* --- */
277 	{ DPAA2_DEV_BP,  BP_RID(0),   RF_ACTIVE },
278 	/*
279 	 * DPCON resources (channels).
280 	 *
281 	 * NOTE: One DPCON per core where Rx or Tx confirmation traffic to be
282 	 *	 distributed to.
283 	 * NOTE: Since it is necessary to distinguish between traffic from
284 	 *	 different network interfaces arriving on the same core, the
285 	 *	 DPCONs must be private to the DPNIs.
286 	 */
287 #define CON_RES_NUM	(16u)
288 #define CON_RID_OFF	(BP_RID_OFF + BP_RES_NUM)
289 #define CON_RID(rid)	((rid) + CON_RID_OFF)
290 	/* --- */
291 	{ DPAA2_DEV_CON, CON_RID(0),   RF_ACTIVE },
292 	{ DPAA2_DEV_CON, CON_RID(1),   RF_ACTIVE | RF_OPTIONAL },
293 	{ DPAA2_DEV_CON, CON_RID(2),   RF_ACTIVE | RF_OPTIONAL },
294  	{ DPAA2_DEV_CON, CON_RID(3),   RF_ACTIVE | RF_OPTIONAL },
295  	{ DPAA2_DEV_CON, CON_RID(4),   RF_ACTIVE | RF_OPTIONAL },
296  	{ DPAA2_DEV_CON, CON_RID(5),   RF_ACTIVE | RF_OPTIONAL },
297  	{ DPAA2_DEV_CON, CON_RID(6),   RF_ACTIVE | RF_OPTIONAL },
298  	{ DPAA2_DEV_CON, CON_RID(7),   RF_ACTIVE | RF_OPTIONAL },
299  	{ DPAA2_DEV_CON, CON_RID(8),   RF_ACTIVE | RF_OPTIONAL },
300  	{ DPAA2_DEV_CON, CON_RID(9),   RF_ACTIVE | RF_OPTIONAL },
301  	{ DPAA2_DEV_CON, CON_RID(10),  RF_ACTIVE | RF_OPTIONAL },
302  	{ DPAA2_DEV_CON, CON_RID(11),  RF_ACTIVE | RF_OPTIONAL },
303  	{ DPAA2_DEV_CON, CON_RID(12),  RF_ACTIVE | RF_OPTIONAL },
304  	{ DPAA2_DEV_CON, CON_RID(13),  RF_ACTIVE | RF_OPTIONAL },
305  	{ DPAA2_DEV_CON, CON_RID(14),  RF_ACTIVE | RF_OPTIONAL },
306  	{ DPAA2_DEV_CON, CON_RID(15),  RF_ACTIVE | RF_OPTIONAL },
307 	/* --- */
308 	RESOURCE_SPEC_END
309 };
310 
311 /* Supported header fields for Rx hash distribution key */
312 static const struct dpaa2_eth_dist_fields dist_fields[] = {
313 	{
314 		/* L2 header */
315 		.rxnfc_field = RXH_L2DA,
316 		.cls_prot = NET_PROT_ETH,
317 		.cls_field = NH_FLD_ETH_DA,
318 		.id = DPAA2_ETH_DIST_ETHDST,
319 		.size = 6,
320 	}, {
321 		.cls_prot = NET_PROT_ETH,
322 		.cls_field = NH_FLD_ETH_SA,
323 		.id = DPAA2_ETH_DIST_ETHSRC,
324 		.size = 6,
325 	}, {
326 		/* This is the last ethertype field parsed:
327 		 * depending on frame format, it can be the MAC ethertype
328 		 * or the VLAN etype.
329 		 */
330 		.cls_prot = NET_PROT_ETH,
331 		.cls_field = NH_FLD_ETH_TYPE,
332 		.id = DPAA2_ETH_DIST_ETHTYPE,
333 		.size = 2,
334 	}, {
335 		/* VLAN header */
336 		.rxnfc_field = RXH_VLAN,
337 		.cls_prot = NET_PROT_VLAN,
338 		.cls_field = NH_FLD_VLAN_TCI,
339 		.id = DPAA2_ETH_DIST_VLAN,
340 		.size = 2,
341 	}, {
342 		/* IP header */
343 		.rxnfc_field = RXH_IP_SRC,
344 		.cls_prot = NET_PROT_IP,
345 		.cls_field = NH_FLD_IP_SRC,
346 		.id = DPAA2_ETH_DIST_IPSRC,
347 		.size = 4,
348 	}, {
349 		.rxnfc_field = RXH_IP_DST,
350 		.cls_prot = NET_PROT_IP,
351 		.cls_field = NH_FLD_IP_DST,
352 		.id = DPAA2_ETH_DIST_IPDST,
353 		.size = 4,
354 	}, {
355 		.rxnfc_field = RXH_L3_PROTO,
356 		.cls_prot = NET_PROT_IP,
357 		.cls_field = NH_FLD_IP_PROTO,
358 		.id = DPAA2_ETH_DIST_IPPROTO,
359 		.size = 1,
360 	}, {
361 		/* Using UDP ports, this is functionally equivalent to raw
362 		 * byte pairs from L4 header.
363 		 */
364 		.rxnfc_field = RXH_L4_B_0_1,
365 		.cls_prot = NET_PROT_UDP,
366 		.cls_field = NH_FLD_UDP_PORT_SRC,
367 		.id = DPAA2_ETH_DIST_L4SRC,
368 		.size = 2,
369 	}, {
370 		.rxnfc_field = RXH_L4_B_2_3,
371 		.cls_prot = NET_PROT_UDP,
372 		.cls_field = NH_FLD_UDP_PORT_DST,
373 		.id = DPAA2_ETH_DIST_L4DST,
374 		.size = 2,
375 	},
376 };
377 
378 static struct dpni_stat {
379 	int	 page;
380 	int	 cnt;
381 	char	*name;
382 	char	*desc;
383 } dpni_stat_sysctls[DPAA2_NI_STAT_SYSCTLS] = {
384 	/* PAGE, COUNTER, NAME, DESCRIPTION */
385 	{  0, 0, "in_all_frames",	"All accepted ingress frames" },
386 	{  0, 1, "in_all_bytes",	"Bytes in all accepted ingress frames" },
387 	{  0, 2, "in_multi_frames",	"Multicast accepted ingress frames" },
388 	{  1, 0, "eg_all_frames",	"All egress frames transmitted" },
389 	{  1, 1, "eg_all_bytes",	"Bytes in all frames transmitted" },
390 	{  1, 2, "eg_multi_frames",	"Multicast egress frames transmitted" },
391 	{  2, 0, "in_filtered_frames",	"All ingress frames discarded due to "
392 	   				"filtering" },
393 	{  2, 1, "in_discarded_frames",	"All frames discarded due to errors" },
394 	{  2, 2, "in_nobuf_discards",	"Discards on ingress side due to buffer "
395 	   				"depletion in DPNI buffer pools" },
396 };
397 
398 /* Device interface */
399 static int dpaa2_ni_probe(device_t);
400 static int dpaa2_ni_attach(device_t);
401 static int dpaa2_ni_detach(device_t);
402 
403 /* DPAA2 network interface setup and configuration */
404 static int dpaa2_ni_setup(device_t);
405 static int dpaa2_ni_setup_channels(device_t);
406 static int dpaa2_ni_setup_fq(device_t, struct dpaa2_ni_channel *,
407     enum dpaa2_ni_queue_type);
408 static int dpaa2_ni_bind(device_t);
409 static int dpaa2_ni_setup_rx_dist(device_t);
410 static int dpaa2_ni_setup_irqs(device_t);
411 static int dpaa2_ni_setup_msi(struct dpaa2_ni_softc *);
412 static int dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *);
413 static int dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *);
414 static int dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *);
415 static int dpaa2_ni_setup_dma(struct dpaa2_ni_softc *);
416 
417 /* Tx/Rx flow configuration */
418 static int dpaa2_ni_setup_rx_flow(device_t, struct dpaa2_cmd *,
419     struct dpaa2_ni_fq *);
420 static int dpaa2_ni_setup_tx_flow(device_t, struct dpaa2_cmd *,
421     struct dpaa2_ni_fq *);
422 static int dpaa2_ni_setup_rx_err_flow(device_t, struct dpaa2_cmd *,
423     struct dpaa2_ni_fq *);
424 
425 /* Configuration subroutines */
426 static int dpaa2_ni_set_buf_layout(device_t, struct dpaa2_cmd *);
427 static int dpaa2_ni_set_pause_frame(device_t, struct dpaa2_cmd *);
428 static int dpaa2_ni_set_qos_table(device_t, struct dpaa2_cmd *);
429 static int dpaa2_ni_set_mac_addr(device_t, struct dpaa2_cmd *, uint16_t,
430     uint16_t);
431 static int dpaa2_ni_set_hash(device_t, uint64_t);
432 static int dpaa2_ni_set_dist_key(device_t, enum dpaa2_ni_dist_mode, uint64_t);
433 
434 /* Buffers and buffer pools */
435 static int dpaa2_ni_seed_buf_pool(struct dpaa2_ni_softc *, uint32_t);
436 static int dpaa2_ni_seed_rxbuf(struct dpaa2_ni_softc *, struct dpaa2_buf *, int);
437 static int dpaa2_ni_seed_txbuf(struct dpaa2_ni_softc *, struct dpaa2_buf *, int);
438 static int dpaa2_ni_seed_chan_storage(struct dpaa2_ni_softc *,
439     struct dpaa2_ni_channel *);
440 
441 /* Frame descriptor routines */
442 static int dpaa2_ni_build_fd(struct dpaa2_ni_softc *, struct dpaa2_ni_tx_ring *,
443     struct dpaa2_buf *, bus_dma_segment_t *, int, struct dpaa2_fd *);
444 static int dpaa2_ni_fd_err(struct dpaa2_fd *);
445 static uint32_t dpaa2_ni_fd_data_len(struct dpaa2_fd *);
446 static int dpaa2_ni_fd_chan_idx(struct dpaa2_fd *);
447 static int dpaa2_ni_fd_buf_idx(struct dpaa2_fd *);
448 static int dpaa2_ni_fd_tx_idx(struct dpaa2_fd *);
449 static int dpaa2_ni_fd_txbuf_idx(struct dpaa2_fd *);
450 static int dpaa2_ni_fd_format(struct dpaa2_fd *);
451 static bool dpaa2_ni_fd_short_len(struct dpaa2_fd *);
452 static int dpaa2_ni_fd_offset(struct dpaa2_fd *);
453 
454 /* Various subroutines */
455 static int dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *, uint16_t, uint16_t);
456 static int dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *, uint8_t *);
457 static int dpaa2_ni_chan_storage_next(struct dpaa2_ni_channel *,
458     struct dpaa2_dq **);
459 
460 /* Network interface routines */
461 static void dpaa2_ni_init(void *);
462 static int  dpaa2_ni_transmit(struct ifnet *, struct mbuf *);
463 static void dpaa2_ni_qflush(struct ifnet *);
464 static int  dpaa2_ni_ioctl(struct ifnet *, u_long, caddr_t);
465 static int  dpaa2_ni_update_mac_filters(struct ifnet *);
466 static u_int dpaa2_ni_add_maddr(void *, struct sockaddr_dl *, u_int);
467 
468 /* Interrupt handlers */
469 static void dpaa2_ni_intr(void *);
470 
471 /* MII handlers */
472 static void dpaa2_ni_miibus_statchg(device_t);
473 static int  dpaa2_ni_media_change(struct ifnet *);
474 static void dpaa2_ni_media_status(struct ifnet *, struct ifmediareq *);
475 static void dpaa2_ni_media_tick(void *);
476 
477 /* DMA mapping callback */
478 static void dpaa2_ni_dmamap_cb(void *, bus_dma_segment_t *, int, int);
479 
480 /* Tx/Rx routines. */
481 static void dpaa2_ni_poll(void *);
482 static void dpaa2_ni_tx_locked(struct dpaa2_ni_softc *,
483     struct dpaa2_ni_tx_ring *, struct mbuf *);
484 static void dpaa2_ni_bp_task(void *, int);
485 
486 /* Tx/Rx subroutines */
487 static int  dpaa2_ni_consume_frames(struct dpaa2_ni_channel *,
488     struct dpaa2_ni_fq **, uint32_t *);
489 static int  dpaa2_ni_rx(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *,
490     struct dpaa2_fd *);
491 static int  dpaa2_ni_rx_err(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *,
492     struct dpaa2_fd *);
493 static int  dpaa2_ni_tx_conf(struct dpaa2_ni_channel *, struct dpaa2_ni_fq *,
494     struct dpaa2_fd *);
495 
496 /* sysctl(9) */
497 static int dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS);
498 static int dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS);
499 static int dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS);
500 
501 static int
502 dpaa2_ni_probe(device_t dev)
503 {
504 	/* DPNI device will be added by a parent resource container itself. */
505 	device_set_desc(dev, "DPAA2 Network Interface");
506 	return (BUS_PROBE_DEFAULT);
507 }
508 
509 static int
510 dpaa2_ni_attach(device_t dev)
511 {
512 	device_t pdev = device_get_parent(dev);
513 	device_t child = dev;
514 	device_t mcp_dev;
515 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
516 	struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
517 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
518 	struct dpaa2_devinfo *mcp_dinfo;
519 	struct ifnet *ifp;
520 	char tq_name[32];
521 	int error;
522 
523 	sc->dev = dev;
524 	sc->ifp = NULL;
525 	sc->miibus = NULL;
526 	sc->mii = NULL;
527 	sc->media_status = 0;
528 	sc->if_flags = 0;
529 	sc->link_state = LINK_STATE_UNKNOWN;
530 	sc->buf_align = 0;
531 
532 	/* For debug purposes only! */
533 	sc->rx_anomaly_frames = 0;
534 	sc->rx_single_buf_frames = 0;
535 	sc->rx_sg_buf_frames = 0;
536 	sc->rx_enq_rej_frames = 0;
537 	sc->rx_ieoi_err_frames = 0;
538 	sc->tx_single_buf_frames = 0;
539 	sc->tx_sg_frames = 0;
540 
541 	DPAA2_ATOMIC_XCHG(&sc->buf_num, 0);
542 	DPAA2_ATOMIC_XCHG(&sc->buf_free, 0);
543 
544 	sc->bp_dmat = NULL;
545 	sc->st_dmat = NULL;
546 	sc->rxd_dmat = NULL;
547 	sc->qos_dmat = NULL;
548 
549 	sc->qos_kcfg.type = DPAA2_BUF_STORE;
550 	sc->qos_kcfg.store.dmap = NULL;
551 	sc->qos_kcfg.store.paddr = 0;
552 	sc->qos_kcfg.store.vaddr = NULL;
553 
554 	sc->rxd_kcfg.type = DPAA2_BUF_STORE;
555 	sc->rxd_kcfg.store.dmap = NULL;
556 	sc->rxd_kcfg.store.paddr = 0;
557 	sc->rxd_kcfg.store.vaddr = NULL;
558 
559 	sc->mac.dpmac_id = 0;
560 	sc->mac.phy_dev = NULL;
561 	memset(sc->mac.addr, 0, ETHER_ADDR_LEN);
562 
563 	error = bus_alloc_resources(sc->dev, dpaa2_ni_spec, sc->res);
564 	if (error) {
565 		device_printf(dev, "%s: failed to allocate resources: "
566 		    "error=%d\n", __func__, error);
567 		return (ENXIO);
568 	}
569 
570 	/* Obtain MC portal. */
571 	mcp_dev = (device_t) rman_get_start(sc->res[MCP_RID(0)]);
572 	mcp_dinfo = device_get_ivars(mcp_dev);
573 	dinfo->portal = mcp_dinfo->portal;
574 
575 	mtx_init(&sc->lock, device_get_nameunit(dev), "dpaa2_ni", MTX_DEF);
576 
577 	/* Allocate network interface */
578 	ifp = if_alloc(IFT_ETHER);
579 	if (ifp == NULL) {
580 		device_printf(dev, "%s: failed to allocate network interface\n",
581 		    __func__);
582 		return (ENXIO);
583 	}
584 	sc->ifp = ifp;
585 	if_initname(ifp, DPAA2_NI_IFNAME, device_get_unit(sc->dev));
586 
587 	ifp->if_softc = sc;
588 	ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST;
589 	ifp->if_init = dpaa2_ni_init;
590 	ifp->if_ioctl = dpaa2_ni_ioctl;
591 	ifp->if_transmit = dpaa2_ni_transmit;
592 	ifp->if_qflush = dpaa2_ni_qflush;
593 
594 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU;
595 	ifp->if_capenable = ifp->if_capabilities;
596 
597 	/* Allocate a command to send to MC hardware. */
598 	error = dpaa2_mcp_init_command(&sc->cmd, DPAA2_CMD_DEF);
599 	if (error) {
600 		device_printf(dev, "%s: failed to allocate dpaa2_cmd: "
601 		    "error=%d\n", __func__, error);
602 		goto err_exit;
603 	}
604 
605 	/* Open resource container and network interface object. */
606 	error = DPAA2_CMD_RC_OPEN(dev, child, sc->cmd, rcinfo->id,
607 	    &sc->rc_token);
608 	if (error) {
609 		device_printf(dev, "%s: failed to open resource container: "
610 		    "id=%d, error=%d\n", __func__, rcinfo->id, error);
611 		goto err_free_cmd;
612 	}
613 	error = DPAA2_CMD_NI_OPEN(dev, child, dpaa2_mcp_tk(sc->cmd,
614 	    sc->rc_token), dinfo->id, &sc->ni_token);
615 	if (error) {
616 		device_printf(dev, "%s: failed to open network interface: "
617 		    "id=%d, error=%d\n", __func__, dinfo->id, error);
618 		goto err_close_rc;
619 	}
620 
621 	/* Create a taskqueue thread to release new buffers to the pool. */
622 	TASK_INIT(&sc->bp_task, 0, dpaa2_ni_bp_task, sc);
623 	bzero(tq_name, sizeof (tq_name));
624 	snprintf(tq_name, sizeof (tq_name), "%s_tqbp",
625 	    device_get_nameunit(dev));
626 	sc->bp_taskq = taskqueue_create(tq_name, M_WAITOK,
627 	    taskqueue_thread_enqueue, &sc->bp_taskq);
628 	if (sc->bp_taskq == NULL) {
629 		device_printf(dev, "%s: failed to allocate task queue: %s\n",
630 		    __func__, tq_name);
631 		goto err_close_ni;
632 	}
633 	taskqueue_start_threads(&sc->bp_taskq, 1, PI_NET, "%s", tq_name);
634 
635 	error = dpaa2_ni_setup(dev);
636 	if (error) {
637 		device_printf(dev, "%s: failed to setup DPNI: error=%d\n",
638 		    __func__, error);
639 		goto err_close_ni;
640 	}
641 	error = dpaa2_ni_setup_channels(dev);
642 	if (error) {
643 		device_printf(dev, "%s: failed to setup QBMan channels: "
644 		    "error=%d\n", __func__, error);
645 		goto err_close_ni;
646 	}
647 
648 	error = dpaa2_ni_bind(dev);
649 	if (error) {
650 		device_printf(dev, "%s: failed to bind DPNI: error=%d\n",
651 		    __func__, error);
652 		goto err_close_ni;
653 	}
654 	error = dpaa2_ni_setup_irqs(dev);
655 	if (error) {
656 		device_printf(dev, "%s: failed to setup IRQs: error=%d\n",
657 		    __func__, error);
658 		goto err_close_ni;
659 	}
660 	error = dpaa2_ni_setup_sysctls(sc);
661 	if (error) {
662 		device_printf(dev, "%s: failed to setup sysctls: error=%d\n",
663 		    __func__, error);
664 		goto err_close_ni;
665 	}
666 
667 	ether_ifattach(sc->ifp, sc->mac.addr);
668 	callout_init(&sc->mii_callout, 0);
669 
670 	return (0);
671 
672 err_close_ni:
673 	DPAA2_CMD_NI_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->ni_token));
674 err_close_rc:
675 	DPAA2_CMD_RC_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->rc_token));
676 err_free_cmd:
677 	dpaa2_mcp_free_command(sc->cmd);
678 err_exit:
679 	return (ENXIO);
680 }
681 
682 static void
683 dpaa2_ni_fixed_media_status(if_t ifp, struct ifmediareq* ifmr)
684 {
685 	struct dpaa2_ni_softc *sc = ifp->if_softc;
686 
687 	DPNI_LOCK(sc);
688 	ifmr->ifm_count = 0;
689 	ifmr->ifm_mask = 0;
690 	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
691 	ifmr->ifm_current = ifmr->ifm_active =
692 	    sc->fixed_ifmedia.ifm_cur->ifm_media;
693 
694 	/*
695 	 * In non-PHY usecases, we need to signal link state up, otherwise
696 	 * certain things requiring a link event (e.g async DHCP client) from
697 	 * devd do not happen.
698 	 */
699 	if (ifp->if_link_state == LINK_STATE_UNKNOWN) {
700 		if_link_state_change(ifp, LINK_STATE_UP);
701 	}
702 
703 	/*
704 	 * TODO: Check the status of the link partner (DPMAC, DPNI or other) and
705 	 * reset if down. This is different to the DPAA2_MAC_LINK_TYPE_PHY as
706 	 * the MC firmware sets the status, instead of us telling the MC what
707 	 * it is.
708 	 */
709 	DPNI_UNLOCK(sc);
710 
711 	return;
712 }
713 
714 static void
715 dpaa2_ni_setup_fixed_link(struct dpaa2_ni_softc *sc)
716 {
717 	/*
718 	 * FIXME: When the DPNI is connected to a DPMAC, we can get the
719 	 * 'apparent' speed from it.
720 	 */
721 	sc->fixed_link = true;
722 
723 	ifmedia_init(&sc->fixed_ifmedia, 0, dpaa2_ni_media_change,
724 		     dpaa2_ni_fixed_media_status);
725 	ifmedia_add(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL);
726 	ifmedia_set(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T);
727 }
728 
729 static int
730 dpaa2_ni_detach(device_t dev)
731 {
732 	device_t child = dev;
733 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
734 
735 	DPAA2_CMD_NI_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->ni_token));
736 	DPAA2_CMD_RC_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd, sc->rc_token));
737 	dpaa2_mcp_free_command(sc->cmd);
738 
739 	sc->cmd = NULL;
740 	sc->ni_token = 0;
741 	sc->rc_token = 0;
742 
743 	return (0);
744 }
745 
746 /**
747  * @brief Configure DPAA2 network interface object.
748  */
749 static int
750 dpaa2_ni_setup(device_t dev)
751 {
752 	device_t child = dev;
753 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
754 	struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
755 	struct dpaa2_ep_desc ep1_desc, ep2_desc; /* endpoint descriptors */
756 	struct dpaa2_cmd *cmd = sc->cmd;
757 	uint8_t eth_bca[ETHER_ADDR_LEN]; /* broadcast physical address */
758 	uint16_t rc_token = sc->rc_token;
759 	uint16_t ni_token = sc->ni_token;
760 	uint16_t mac_token;
761 	struct dpaa2_mac_attr attr;
762 	enum dpaa2_mac_link_type link_type;
763 	uint32_t link;
764 	int error;
765 
766 	/* Check if we can work with this DPNI object. */
767 	error = DPAA2_CMD_NI_GET_API_VERSION(dev, child, dpaa2_mcp_tk(cmd,
768 	    ni_token), &sc->api_major, &sc->api_minor);
769 	if (error) {
770 		device_printf(dev, "%s: failed to get DPNI API version\n",
771 		    __func__);
772 		return (error);
773 	}
774 	if (dpaa2_ni_cmp_api_version(sc, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
775 		device_printf(dev, "%s: DPNI API version %u.%u not supported, "
776 		    "need >= %u.%u\n", __func__, sc->api_major, sc->api_minor,
777 		    DPNI_VER_MAJOR, DPNI_VER_MINOR);
778 		error = ENODEV;
779 		return (error);
780 	}
781 
782 	/* Reset the DPNI object. */
783 	error = DPAA2_CMD_NI_RESET(dev, child, cmd);
784 	if (error) {
785 		device_printf(dev, "%s: failed to reset DPNI: id=%d\n",
786 		    __func__, dinfo->id);
787 		return (error);
788 	}
789 
790 	/* Obtain attributes of the DPNI object. */
791 	error = DPAA2_CMD_NI_GET_ATTRIBUTES(dev, child, cmd, &sc->attr);
792 	if (error) {
793 		device_printf(dev, "%s: failed to obtain DPNI attributes: "
794 		    "id=%d\n", __func__, dinfo->id);
795 		return (error);
796 	}
797 	if (bootverbose) {
798 		device_printf(dev, "options=0x%#x queues=%d tx_channels=%d "
799 		    "wriop_version=%#x\n", sc->attr.options, sc->attr.num.queues,
800 		    sc->attr.num.channels, sc->attr.wriop_ver);
801 		device_printf(dev, "\ttraffic classes: rx=%d tx=%d "
802 		    "cgs_groups=%d\n", sc->attr.num.rx_tcs, sc->attr.num.tx_tcs,
803 		    sc->attr.num.cgs);
804 		device_printf(dev, "\ttable entries: mac=%d vlan=%d qos=%d "
805 		    "fs=%d\n", sc->attr.entries.mac, sc->attr.entries.vlan,
806 		    sc->attr.entries.qos, sc->attr.entries.fs);
807 		device_printf(dev, "\tkey sizes: qos=%d fs=%d\n",
808 		    sc->attr.key_size.qos, sc->attr.key_size.fs);
809 	}
810 
811 	/* Configure buffer layouts of the DPNI queues. */
812 	error = dpaa2_ni_set_buf_layout(dev, cmd);
813 	if (error) {
814 		device_printf(dev, "%s: failed to configure buffer layout\n",
815 		    __func__);
816 		return (error);
817 	}
818 
819 	/* Configure DMA resources. */
820 	error = dpaa2_ni_setup_dma(sc);
821 	if (error) {
822 		device_printf(dev, "%s: failed to setup DMA\n", __func__);
823 		return (error);
824 	}
825 
826 	/* Setup link between DPNI and an object it's connected to. */
827 	ep1_desc.obj_id = dinfo->id;
828 	ep1_desc.if_id = 0; /* DPNI has the only endpoint */
829 	ep1_desc.type = dinfo->dtype;
830 
831 	error = DPAA2_CMD_RC_GET_CONN(dev, child, dpaa2_mcp_tk(cmd, rc_token),
832 	    &ep1_desc, &ep2_desc, &link);
833 	if (error)
834 		device_printf(dev, "%s: failed to obtain an object DPNI is "
835 		    "connected to: error=%d\n", __func__, error);
836 	else {
837 		device_printf(dev, "connected to %s (id=%d)\n",
838 		    dpaa2_ttos(ep2_desc.type), ep2_desc.obj_id);
839 
840 		error = dpaa2_ni_set_mac_addr(dev, cmd, rc_token, ni_token);
841 		if (error)
842 			device_printf(dev, "%s: failed to set MAC "
843 				      "address: error=%d\n", __func__, error);
844 
845 		if (ep2_desc.type == DPAA2_DEV_MAC) {
846 			/*
847 			 * This is the simplest case when DPNI is connected to
848 			 * DPMAC directly.
849 			 */
850 			sc->mac.dpmac_id = ep2_desc.obj_id;
851 
852 			link_type = DPAA2_MAC_LINK_TYPE_NONE;
853 
854 			/*
855 			 * Need to determine if DPMAC type is PHY (attached to
856 			 * conventional MII PHY) or FIXED (usually SFP/SerDes,
857 			 * link state managed by MC firmware).
858 			 */
859 			error = DPAA2_CMD_MAC_OPEN(sc->dev, child,
860 			    dpaa2_mcp_tk(sc->cmd, sc->rc_token),
861 			    sc->mac.dpmac_id, &mac_token);
862 			/*
863 			 * Under VFIO, the DPMAC might be sitting in another
864 			 * container (DPRC) we don't have access to.
865 			 * Assume DPAA2_MAC_LINK_TYPE_FIXED if this is
866 			 * the case.
867 			 */
868 			if (error) {
869 				device_printf(dev, "%s: failed to open "
870 				    "connected DPMAC: %d (assuming in other DPRC)\n", __func__,
871 				    sc->mac.dpmac_id);
872 				link_type = DPAA2_MAC_LINK_TYPE_FIXED;
873 			} else {
874 				error = DPAA2_CMD_MAC_GET_ATTRIBUTES(dev, child,
875 				    sc->cmd, &attr);
876 				if (error)
877 					device_printf(dev, "%s: failed to get "
878 					    "DPMAC attributes: id=%d, "
879 					    "error=%d\n", __func__, dinfo->id,
880 					    error);
881 				else
882 					link_type = attr.link_type;
883 			}
884 			DPAA2_CMD_MAC_CLOSE(dev, child, dpaa2_mcp_tk(sc->cmd,
885 			    mac_token));
886 
887 			if (link_type == DPAA2_MAC_LINK_TYPE_FIXED) {
888 				device_printf(dev, "connected DPMAC is in FIXED "
889 				    "mode\n");
890 				dpaa2_ni_setup_fixed_link(sc);
891 			} else if (link_type == DPAA2_MAC_LINK_TYPE_PHY) {
892 				device_printf(dev, "connected DPMAC is in PHY "
893 				    "mode\n");
894 				error = DPAA2_MC_GET_PHY_DEV(dev,
895 				    &sc->mac.phy_dev, sc->mac.dpmac_id);
896 				if (error == 0) {
897 					error = MEMAC_MDIO_SET_NI_DEV(
898 					    sc->mac.phy_dev, dev);
899 					if (error != 0)
900 						device_printf(dev, "%s: failed "
901 						    "to set dpni dev on memac "
902 						    "mdio dev %s: error=%d\n",
903 						    __func__,
904 						    device_get_nameunit(
905 						    sc->mac.phy_dev), error);
906 				}
907 				if (error == 0) {
908 					error = MEMAC_MDIO_GET_PHY_LOC(
909 					    sc->mac.phy_dev, &sc->mac.phy_loc);
910 					if (error == ENODEV)
911 						error = 0;
912 					if (error != 0)
913 						device_printf(dev, "%s: failed "
914 						    "to get phy location from "
915 						    "memac mdio dev %s: error=%d\n",
916 						    __func__, device_get_nameunit(
917 						    sc->mac.phy_dev), error);
918 				}
919 				if (error == 0) {
920 					error = mii_attach(sc->mac.phy_dev,
921 					    &sc->miibus, sc->ifp,
922 					    dpaa2_ni_media_change,
923 					    dpaa2_ni_media_status,
924 					    BMSR_DEFCAPMASK, sc->mac.phy_loc,
925 					    MII_OFFSET_ANY, 0);
926 					if (error != 0)
927 						device_printf(dev, "%s: failed "
928 						    "to attach to miibus: "
929 						    "error=%d\n",
930 						    __func__, error);
931 				}
932 				if (error == 0)
933 					sc->mii = device_get_softc(sc->miibus);
934 			} else {
935 				device_printf(dev, "%s: DPMAC link type is not "
936 				    "supported\n", __func__);
937 			}
938 		} else if (ep2_desc.type == DPAA2_DEV_NI ||
939 			   ep2_desc.type == DPAA2_DEV_MUX ||
940 			   ep2_desc.type == DPAA2_DEV_SW) {
941 			dpaa2_ni_setup_fixed_link(sc);
942 		}
943 	}
944 
945 	/* Select mode to enqueue frames. */
946 	/* ... TBD ... */
947 
948 	/*
949 	 * Update link configuration to enable Rx/Tx pause frames support.
950 	 *
951 	 * NOTE: MC may generate an interrupt to the DPMAC and request changes
952 	 *       in link configuration. It might be necessary to attach miibus
953 	 *       and PHY before this point.
954 	 */
955 	error = dpaa2_ni_set_pause_frame(dev, dpaa2_mcp_tk(cmd, ni_token));
956 	if (error) {
957 		device_printf(dev, "%s: failed to configure Rx/Tx pause "
958 		    "frames\n", __func__);
959 		return (error);
960 	}
961 
962 	/* Configure ingress traffic classification. */
963 	error = dpaa2_ni_set_qos_table(dev, dpaa2_mcp_tk(cmd, ni_token));
964 	if (error)
965 		device_printf(dev, "%s: failed to configure QoS table: "
966 		    "error=%d\n", __func__, error);
967 
968 	/* Add broadcast physical address to the MAC filtering table. */
969 	memset(eth_bca, 0xff, ETHER_ADDR_LEN);
970 	error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, cmd, eth_bca);
971 	if (error) {
972 		device_printf(dev, "%s: failed to add broadcast physical "
973 		    "address to the MAC filtering table\n", __func__);
974 		return (error);
975 	}
976 
977 	/* Set the maximum allowed length for received frames. */
978 	error = DPAA2_CMD_NI_SET_MFL(dev, child, cmd, DPAA2_ETH_MFL);
979 	if (error) {
980 		device_printf(dev, "%s: failed to set maximum length for "
981 		    "received frames\n", __func__);
982 		return (error);
983 	}
984 
985 	return (0);
986 }
987 
988 /**
989  * @brief Сonfigure QBMan channels and register data availability notifications.
990  */
991 static int
992 dpaa2_ni_setup_channels(device_t dev)
993 {
994 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
995 	struct dpaa2_con_softc *consc;
996 	struct dpaa2_devinfo *io_info, *con_info;
997 	device_t io_dev, con_dev, child = dev;
998 	struct dpaa2_ni_channel *channel;
999 	struct dpaa2_io_notif_ctx *ctx;
1000 	struct dpaa2_con_notif_cfg notif_cfg;
1001 	struct dpaa2_buf *buf;
1002 	int error;
1003 	struct sysctl_ctx_list *sysctl_ctx;
1004 	struct sysctl_oid *node;
1005 	struct sysctl_oid_list *parent;
1006 	uint32_t i, num_chan;
1007 
1008 	/* Calculate number of the channels based on the allocated resources. */
1009 	for (i = 0; i < IO_RES_NUM; i++)
1010 		if (!sc->res[IO_RID(i)])
1011 			break;
1012 	num_chan = i;
1013 	for (i = 0; i < CON_RES_NUM; i++)
1014 		if (!sc->res[CON_RID(i)])
1015 			break;
1016 	num_chan = i < num_chan ? i : num_chan;
1017 
1018 	/* Limit maximum channels. */
1019 	sc->chan_n = num_chan > DPAA2_NI_MAX_CHANNELS
1020 	    ? DPAA2_NI_MAX_CHANNELS : num_chan;
1021 
1022 	/* Limit channels by number of the queues. */
1023 	sc->chan_n = sc->chan_n > sc->attr.num.queues
1024 	    ? sc->attr.num.queues : sc->chan_n;
1025 
1026 	device_printf(dev, "channels=%d\n", sc->chan_n);
1027 
1028 	sysctl_ctx = device_get_sysctl_ctx(sc->dev);
1029 	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1030 
1031 	node = SYSCTL_ADD_NODE(sysctl_ctx, parent, OID_AUTO, "channels",
1032 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channels");
1033 	parent = SYSCTL_CHILDREN(node);
1034 
1035 	/* Setup channels for the portal. */
1036 	for (uint32_t i = 0; i < sc->chan_n; i++) {
1037 		/* Select software portal. */
1038 		io_dev = (device_t) rman_get_start(sc->res[IO_RID(i)]);
1039 		io_info = device_get_ivars(io_dev);
1040 
1041 		/* Select DPCON (channel). */
1042 		con_dev = (device_t) rman_get_start(sc->res[CON_RID(i)]);
1043 		consc = device_get_softc(con_dev);
1044 		con_info = device_get_ivars(con_dev);
1045 
1046 		/* Enable selected channel. */
1047 		error = DPAA2_CMD_CON_ENABLE(dev, child, dpaa2_mcp_tk(consc->cmd,
1048 		    consc->con_token));
1049 		if (error) {
1050 			device_printf(dev, "%s: failed to enable channel: "
1051 			    "dpcon_id=%d, chan_id=%d\n", __func__, con_info->id,
1052 			    consc->attr.chan_id);
1053 			return (error);
1054 		}
1055 
1056 		channel = malloc(sizeof(struct dpaa2_ni_channel), M_DPAA2_NI,
1057 		    M_WAITOK | M_ZERO);
1058 		if (!channel) {
1059 			device_printf(dev, "%s: failed to allocate a channel\n",
1060 			    __func__);
1061 			return (ENOMEM);
1062 		}
1063 
1064 		sc->channels[i] = channel;
1065 
1066 		channel->id = consc->attr.chan_id;
1067 		channel->flowid = i;
1068 		channel->ni_dev = dev;
1069 		channel->io_dev = io_dev;
1070 		channel->con_dev = con_dev;
1071 		channel->recycled_n = 0;
1072 
1073 		buf = &channel->store;
1074 		buf->type = DPAA2_BUF_STORE;
1075 		buf->store.dmat = NULL;
1076 		buf->store.dmap = NULL;
1077 		buf->store.paddr = 0;
1078 		buf->store.vaddr = NULL;
1079 
1080 		/* For debug purposes only! */
1081 		channel->tx_frames = 0;
1082 		channel->tx_dropped = 0;
1083 
1084 		/* None of the frame queues for this channel configured yet. */
1085 		channel->rxq_n = 0;
1086 
1087 		/* Setup WQ channel notification context. */
1088 		ctx = &channel->ctx;
1089 		ctx->qman_ctx = (uint64_t) ctx;
1090 		ctx->cdan_en = true;
1091 		ctx->fq_chan_id = channel->id;
1092 		ctx->io_dev = channel->io_dev;
1093 		ctx->channel = channel;
1094 		ctx->poll = dpaa2_ni_poll;
1095 
1096 		/* Register the new notification context. */
1097 		error = DPAA2_SWP_CONF_WQ_CHANNEL(channel->io_dev, ctx);
1098 		if (error) {
1099 			device_printf(dev, "%s: failed to register notification "
1100 			    "context\n", __func__);
1101 			return (error);
1102 		}
1103 
1104 		/* Register DPCON notification with Management Complex. */
1105 		notif_cfg.dpio_id = io_info->id;
1106 		notif_cfg.prior = 0;
1107 		notif_cfg.qman_ctx = ctx->qman_ctx;
1108 		error = DPAA2_CMD_CON_SET_NOTIF(dev, child, dpaa2_mcp_tk(
1109 		    consc->cmd, consc->con_token), &notif_cfg);
1110 		if (error) {
1111 			device_printf(dev, "%s: failed to set DPCON "
1112 			    "notification: dpcon_id=%d, chan_id=%d\n", __func__,
1113 			    con_info->id, consc->attr.chan_id);
1114 			return (error);
1115 		}
1116 
1117 		/* Allocate initial # of Rx buffers and a channel storage. */
1118 		error = dpaa2_ni_seed_buf_pool(sc, DPAA2_NI_BUFS_INIT);
1119 		if (error) {
1120 			device_printf(dev, "%s: failed to seed buffer pool\n",
1121 			    __func__);
1122 			return (error);
1123 		}
1124 		error = dpaa2_ni_seed_chan_storage(sc, channel);
1125 		if (error) {
1126 			device_printf(dev, "%s: failed to seed channel "
1127 			    "storage\n", __func__);
1128 			return (error);
1129 		}
1130 
1131 		/* Prepare queues for this channel. */
1132 		error = dpaa2_ni_setup_fq(dev, channel, DPAA2_NI_QUEUE_TX_CONF);
1133 		if (error) {
1134 			device_printf(dev, "%s: failed to prepare TxConf "
1135 			    "queue: error=%d\n", __func__, error);
1136 			return (error);
1137 		}
1138 		error = dpaa2_ni_setup_fq(dev, channel, DPAA2_NI_QUEUE_RX);
1139 		if (error) {
1140 			device_printf(dev, "%s: failed to prepare Rx queue: "
1141 			    "error=%d\n", __func__, error);
1142 			return (error);
1143 		}
1144 
1145 		if (bootverbose)
1146 			device_printf(dev, "channel: dpio_id=%d "
1147 			    "dpcon_id=%d chan_id=%d, priorities=%d\n",
1148 			    io_info->id, con_info->id, channel->id,
1149 			    consc->attr.prior_num);
1150 	}
1151 
1152 	/* There is exactly one Rx error queue per DPNI. */
1153 	error = dpaa2_ni_setup_fq(dev, sc->channels[0], DPAA2_NI_QUEUE_RX_ERR);
1154 	if (error) {
1155 		device_printf(dev, "%s: failed to prepare RxError queue: "
1156 		    "error=%d\n", __func__, error);
1157 		return (error);
1158 	}
1159 
1160 	return (0);
1161 }
1162 
1163 /**
1164  * @brief Performs an initial configuration of the frame queues.
1165  */
1166 static int
1167 dpaa2_ni_setup_fq(device_t dev, struct dpaa2_ni_channel *chan,
1168     enum dpaa2_ni_queue_type queue_type)
1169 {
1170 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1171 	struct dpaa2_ni_fq *fq;
1172 
1173 	switch (queue_type) {
1174 	case DPAA2_NI_QUEUE_TX_CONF:
1175 		/* One queue per channel. */
1176 		fq = &chan->txc_queue;
1177 
1178 		fq->consume = dpaa2_ni_tx_conf;
1179 		fq->chan = chan;
1180 		fq->flowid = chan->flowid;
1181 		fq->tc = 0; /* ignored */
1182 		fq->type = queue_type;
1183 
1184 		break;
1185 	case DPAA2_NI_QUEUE_RX:
1186 		KASSERT(sc->attr.num.rx_tcs <= DPAA2_NI_MAX_TCS,
1187 		    ("too many Rx traffic classes: rx_tcs=%d\n",
1188 		    sc->attr.num.rx_tcs));
1189 
1190 		/* One queue per Rx traffic class within a channel. */
1191 		for (int i = 0; i < sc->attr.num.rx_tcs; i++) {
1192 			fq = &chan->rx_queues[i];
1193 
1194 			fq->consume = dpaa2_ni_rx;
1195 			fq->chan = chan;
1196 			fq->flowid = chan->flowid;
1197 			fq->tc = (uint8_t) i;
1198 			fq->type = queue_type;
1199 
1200 			chan->rxq_n++;
1201 		}
1202 		break;
1203 	case DPAA2_NI_QUEUE_RX_ERR:
1204 		/* One queue per network interface. */
1205 		fq = &sc->rxe_queue;
1206 
1207 		fq->consume = dpaa2_ni_rx_err;
1208 		fq->chan = chan;
1209 		fq->flowid = 0; /* ignored */
1210 		fq->tc = 0; /* ignored */
1211 		fq->type = queue_type;
1212 		break;
1213 	default:
1214 		device_printf(dev, "%s: unexpected frame queue type: %d\n",
1215 		    __func__, queue_type);
1216 		return (EINVAL);
1217 	}
1218 
1219 	return (0);
1220 }
1221 
1222 /**
1223  * @brief Bind DPNI to DPBPs, DPIOs, frame queues and channels.
1224  */
1225 static int
1226 dpaa2_ni_bind(device_t dev)
1227 {
1228 	device_t bp_dev, child = dev;
1229 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1230 	struct dpaa2_devinfo *bp_info;
1231 	struct dpaa2_cmd *cmd = sc->cmd;
1232 	struct dpaa2_ni_pools_cfg pools_cfg;
1233 	struct dpaa2_ni_err_cfg err_cfg;
1234 	struct dpaa2_ni_channel *chan;
1235 	uint16_t ni_token = sc->ni_token;
1236 	int error;
1237 
1238 	/* Select buffer pool (only one available at the moment). */
1239 	bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
1240 	bp_info = device_get_ivars(bp_dev);
1241 
1242 	/* Configure buffers pool. */
1243 	pools_cfg.pools_num = 1;
1244 	pools_cfg.pools[0].bp_obj_id = bp_info->id;
1245 	pools_cfg.pools[0].backup_flag = 0;
1246 	pools_cfg.pools[0].buf_sz = sc->buf_sz;
1247 	error = DPAA2_CMD_NI_SET_POOLS(dev, child, dpaa2_mcp_tk(cmd, ni_token),
1248 	    &pools_cfg);
1249 	if (error) {
1250 		device_printf(dev, "%s: failed to set buffer pools\n", __func__);
1251 		return (error);
1252 	}
1253 
1254 	/* Setup ingress traffic distribution. */
1255 	error = dpaa2_ni_setup_rx_dist(dev);
1256 	if (error && error != EOPNOTSUPP) {
1257 		device_printf(dev, "%s: failed to setup ingress traffic "
1258 		    "distribution\n", __func__);
1259 		return (error);
1260 	}
1261 	if (bootverbose && error == EOPNOTSUPP)
1262 		device_printf(dev, "Ingress traffic distribution not "
1263 		    "supported\n");
1264 
1265 	/* Configure handling of error frames. */
1266 	err_cfg.err_mask = DPAA2_NI_FAS_RX_ERR_MASK;
1267 	err_cfg.set_err_fas = false;
1268 	err_cfg.action = DPAA2_NI_ERR_DISCARD;
1269 	error = DPAA2_CMD_NI_SET_ERR_BEHAVIOR(dev, child, cmd, &err_cfg);
1270 	if (error) {
1271 		device_printf(dev, "%s: failed to set errors behavior\n",
1272 		    __func__);
1273 		return (error);
1274 	}
1275 
1276 	/* Configure channel queues to generate CDANs. */
1277 	for (uint32_t i = 0; i < sc->chan_n; i++) {
1278 		chan = sc->channels[i];
1279 
1280 		/* Setup Rx flows. */
1281 		for (uint32_t j = 0; j < chan->rxq_n; j++) {
1282 			error = dpaa2_ni_setup_rx_flow(dev, cmd,
1283 			    &chan->rx_queues[j]);
1284 			if (error) {
1285 				device_printf(dev, "%s: failed to setup Rx "
1286 				    "flow: error=%d\n", __func__, error);
1287 				return (error);
1288 			}
1289 		}
1290 
1291 		/* Setup Tx flow. */
1292 		error = dpaa2_ni_setup_tx_flow(dev, cmd, &chan->txc_queue);
1293 		if (error) {
1294 			device_printf(dev, "%s: failed to setup Tx "
1295 			    "flow: error=%d\n", __func__, error);
1296 			return (error);
1297 		}
1298 	}
1299 
1300 	/* Configure RxError queue to generate CDAN. */
1301 	error = dpaa2_ni_setup_rx_err_flow(dev, cmd, &sc->rxe_queue);
1302 	if (error) {
1303 		device_printf(dev, "%s: failed to setup RxError flow: "
1304 		    "error=%d\n", __func__, error);
1305 		return (error);
1306 	}
1307 
1308 	/*
1309 	 * Get the Queuing Destination ID (QDID) that should be used for frame
1310 	 * enqueue operations.
1311 	 */
1312 	error = DPAA2_CMD_NI_GET_QDID(dev, child, cmd, DPAA2_NI_QUEUE_TX,
1313 	    &sc->tx_qdid);
1314 	if (error) {
1315 		device_printf(dev, "%s: failed to get Tx queuing destination "
1316 		    "ID\n", __func__);
1317 		return (error);
1318 	}
1319 
1320 	return (0);
1321 }
1322 
1323 /**
1324  * @brief Setup ingress traffic distribution.
1325  *
1326  * NOTE: Ingress traffic distribution is valid only when DPNI_OPT_NO_FS option
1327  *	 hasn't been set for DPNI and a number of DPNI queues > 1.
1328  */
1329 static int
1330 dpaa2_ni_setup_rx_dist(device_t dev)
1331 {
1332 	/*
1333 	 * Have the interface implicitly distribute traffic based on the default
1334 	 * hash key.
1335 	 */
1336 	return (dpaa2_ni_set_hash(dev, DPAA2_RXH_DEFAULT));
1337 }
1338 
1339 static int
1340 dpaa2_ni_setup_rx_flow(device_t dev, struct dpaa2_cmd *cmd,
1341     struct dpaa2_ni_fq *fq)
1342 {
1343 	device_t child = dev;
1344 	struct dpaa2_devinfo *con_info;
1345 	struct dpaa2_ni_queue_cfg queue_cfg = {0};
1346 	int error;
1347 
1348 	/* Obtain DPCON associated with the FQ's channel. */
1349 	con_info = device_get_ivars(fq->chan->con_dev);
1350 
1351 	queue_cfg.type = DPAA2_NI_QUEUE_RX;
1352 	queue_cfg.tc = fq->tc;
1353 	queue_cfg.idx = fq->flowid;
1354 	error = DPAA2_CMD_NI_GET_QUEUE(dev, child, cmd, &queue_cfg);
1355 	if (error) {
1356 		device_printf(dev, "%s: failed to obtain Rx queue "
1357 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1358 		    queue_cfg.idx);
1359 		return (error);
1360 	}
1361 
1362 	fq->fqid = queue_cfg.fqid;
1363 
1364 	queue_cfg.dest_id = con_info->id;
1365 	queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1366 	queue_cfg.priority = 1;
1367 	queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1368 	queue_cfg.options =
1369 	    DPAA2_NI_QUEUE_OPT_USER_CTX |
1370 	    DPAA2_NI_QUEUE_OPT_DEST;
1371 	error = DPAA2_CMD_NI_SET_QUEUE(dev, child, cmd, &queue_cfg);
1372 	if (error) {
1373 		device_printf(dev, "%s: failed to update Rx queue "
1374 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1375 		    queue_cfg.idx);
1376 		return (error);
1377 	}
1378 
1379 	if (bootverbose) {
1380 		device_printf(dev, "RX queue idx=%d, tc=%d, chan=%d, fqid=%d, "
1381 		    "user_ctx=%#jx\n", fq->flowid, fq->tc, fq->chan->id,
1382 		    fq->fqid, (uint64_t) fq);
1383 	}
1384 
1385 	return (0);
1386 }
1387 
1388 static int
1389 dpaa2_ni_setup_tx_flow(device_t dev, struct dpaa2_cmd *cmd,
1390     struct dpaa2_ni_fq *fq)
1391 {
1392 	device_t child = dev;
1393 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1394 	struct dpaa2_devinfo *con_info;
1395 	struct dpaa2_ni_queue_cfg queue_cfg = {0};
1396 	struct dpaa2_ni_tx_ring *tx;
1397 	struct dpaa2_buf *buf;
1398 	uint32_t tx_rings_n = 0;
1399 	int error;
1400 
1401 	/* Obtain DPCON associated with the FQ's channel. */
1402 	con_info = device_get_ivars(fq->chan->con_dev);
1403 
1404 	KASSERT(sc->attr.num.tx_tcs <= DPAA2_NI_MAX_TCS,
1405 	    ("%s: too many Tx traffic classes: tx_tcs=%d\n", __func__,
1406 	    sc->attr.num.tx_tcs));
1407 	KASSERT(DPAA2_NI_BUFS_PER_TX <= DPAA2_NI_MAX_BPTX,
1408 	    ("%s: too many Tx buffers (%d): max=%d\n", __func__,
1409 	    DPAA2_NI_BUFS_PER_TX, DPAA2_NI_MAX_BPTX));
1410 
1411 	/* Setup Tx rings. */
1412 	for (int i = 0; i < sc->attr.num.tx_tcs; i++) {
1413 		queue_cfg.type = DPAA2_NI_QUEUE_TX;
1414 		queue_cfg.tc = i;
1415 		queue_cfg.idx = fq->flowid;
1416 		queue_cfg.chan_id = fq->chan->id;
1417 
1418 		error = DPAA2_CMD_NI_GET_QUEUE(dev, child, cmd, &queue_cfg);
1419 		if (error) {
1420 			device_printf(dev, "%s: failed to obtain Tx queue "
1421 			    "configuration: tc=%d, flowid=%d\n", __func__,
1422 			    queue_cfg.tc, queue_cfg.idx);
1423 			return (error);
1424 		}
1425 
1426 		tx = &fq->tx_rings[i];
1427 		tx->fq = fq;
1428 		tx->fqid = queue_cfg.fqid;
1429 		tx->txid = tx_rings_n;
1430 
1431 		if (bootverbose) {
1432 			device_printf(dev, "TX queue idx=%d, tc=%d, chan=%d, "
1433 			    "fqid=%d\n", fq->flowid, i, fq->chan->id,
1434 			    queue_cfg.fqid);
1435 		}
1436 
1437 		mtx_init(&tx->lock, "dpaa2_tx_ring", NULL, MTX_DEF);
1438 
1439 		/* Allocate Tx ring buffer. */
1440 		tx->idx_br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF,
1441 		    M_NOWAIT, &tx->lock);
1442 		if (tx->idx_br == NULL) {
1443 			device_printf(dev, "%s: failed to setup Tx ring buffer"
1444 			    " (2) fqid=%d\n", __func__, tx->fqid);
1445 			return (ENOMEM);
1446 		}
1447 
1448 		/* Configure Tx buffers. */
1449 		for (uint64_t j = 0; j < DPAA2_NI_BUFS_PER_TX; j++) {
1450 			buf = &tx->buf[j];
1451 			buf->type = DPAA2_BUF_TX;
1452 			buf->tx.dmat = buf->tx.sgt_dmat = NULL;
1453 			buf->tx.dmap = buf->tx.sgt_dmap = NULL;
1454 			buf->tx.paddr = buf->tx.sgt_paddr = 0;
1455 			buf->tx.vaddr = buf->tx.sgt_vaddr = NULL;
1456 			buf->tx.m = NULL;
1457 			buf->tx.idx = 0;
1458 
1459 			error = dpaa2_ni_seed_txbuf(sc, buf, j);
1460 
1461 			/* Add index of the Tx buffer to the ring. */
1462 			buf_ring_enqueue(tx->idx_br, (void *) j);
1463 		}
1464 
1465 		tx_rings_n++;
1466 	}
1467 
1468 	/* All Tx queues which belong to the same flowid have the same qdbin. */
1469 	fq->tx_qdbin = queue_cfg.qdbin;
1470 
1471 	queue_cfg.type = DPAA2_NI_QUEUE_TX_CONF;
1472 	queue_cfg.tc = 0; /* ignored for TxConf queue */
1473 	queue_cfg.idx = fq->flowid;
1474 	error = DPAA2_CMD_NI_GET_QUEUE(dev, child, cmd, &queue_cfg);
1475 	if (error) {
1476 		device_printf(dev, "%s: failed to obtain TxConf queue "
1477 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1478 		    queue_cfg.idx);
1479 		return (error);
1480 	}
1481 
1482 	fq->fqid = queue_cfg.fqid;
1483 
1484 	queue_cfg.dest_id = con_info->id;
1485 	queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1486 	queue_cfg.priority = 0;
1487 	queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1488 	queue_cfg.options =
1489 	    DPAA2_NI_QUEUE_OPT_USER_CTX |
1490 	    DPAA2_NI_QUEUE_OPT_DEST;
1491 	error = DPAA2_CMD_NI_SET_QUEUE(dev, child, cmd, &queue_cfg);
1492 	if (error) {
1493 		device_printf(dev, "%s: failed to update TxConf queue "
1494 		    "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1495 		    queue_cfg.idx);
1496 		return (error);
1497 	}
1498 
1499 	return (0);
1500 }
1501 
1502 static int
1503 dpaa2_ni_setup_rx_err_flow(device_t dev, struct dpaa2_cmd *cmd,
1504     struct dpaa2_ni_fq *fq)
1505 {
1506 	device_t child = dev;
1507 	struct dpaa2_devinfo *con_info;
1508 	struct dpaa2_ni_queue_cfg queue_cfg = {0};
1509 	int error;
1510 
1511 	/* Obtain DPCON associated with the FQ's channel. */
1512 	con_info = device_get_ivars(fq->chan->con_dev);
1513 
1514 	queue_cfg.type = DPAA2_NI_QUEUE_RX_ERR;
1515 	queue_cfg.tc = fq->tc; /* ignored */
1516 	queue_cfg.idx = fq->flowid; /* ignored */
1517 	error = DPAA2_CMD_NI_GET_QUEUE(dev, child, cmd, &queue_cfg);
1518 	if (error) {
1519 		device_printf(dev, "%s: failed to obtain RxErr queue "
1520 		    "configuration\n", __func__);
1521 		return (error);
1522 	}
1523 
1524 	fq->fqid = queue_cfg.fqid;
1525 
1526 	queue_cfg.dest_id = con_info->id;
1527 	queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1528 	queue_cfg.priority = 1;
1529 	queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1530 	queue_cfg.options =
1531 	    DPAA2_NI_QUEUE_OPT_USER_CTX |
1532 	    DPAA2_NI_QUEUE_OPT_DEST;
1533 	error = DPAA2_CMD_NI_SET_QUEUE(dev, child, cmd, &queue_cfg);
1534 	if (error) {
1535 		device_printf(dev, "%s: failed to update RxErr queue "
1536 		    "configuration\n", __func__);
1537 		return (error);
1538 	}
1539 
1540 	return (0);
1541 }
1542 
1543 /**
1544  * @brief Configure DPNI object to generate interrupts.
1545  */
1546 static int
1547 dpaa2_ni_setup_irqs(device_t dev)
1548 {
1549 	device_t child = dev;
1550 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1551 	struct dpaa2_cmd *cmd = sc->cmd;
1552 	uint16_t ni_token = sc->ni_token;
1553 	int error;
1554 
1555 	/* Configure IRQs. */
1556 	error = dpaa2_ni_setup_msi(sc);
1557 	if (error) {
1558 		device_printf(dev, "%s: failed to allocate MSI\n", __func__);
1559 		return (error);
1560 	}
1561 	if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1562 	    &sc->irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) {
1563 		device_printf(dev, "%s: failed to allocate IRQ resource\n",
1564 		    __func__);
1565 		return (ENXIO);
1566 	}
1567 	if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
1568 	    NULL, dpaa2_ni_intr, sc, &sc->intr)) {
1569 		device_printf(dev, "%s: failed to setup IRQ resource\n",
1570 		    __func__);
1571 		return (ENXIO);
1572 	}
1573 
1574 	/* Configure DPNI to generate interrupts. */
1575 	error = DPAA2_CMD_NI_SET_IRQ_MASK(dev, child, dpaa2_mcp_tk(cmd,
1576 	    ni_token), DPNI_IRQ_INDEX,
1577 	    DPNI_IRQ_LINK_CHANGED | DPNI_IRQ_EP_CHANGED);
1578 	if (error) {
1579 		device_printf(dev, "%s: failed to set DPNI IRQ mask\n",
1580 		    __func__);
1581 		return (error);
1582 	}
1583 
1584 	/* Enable IRQ. */
1585 	error = DPAA2_CMD_NI_SET_IRQ_ENABLE(dev, child, cmd, DPNI_IRQ_INDEX,
1586 	    true);
1587 	if (error) {
1588 		device_printf(dev, "%s: failed to enable DPNI IRQ\n", __func__);
1589 		return (error);
1590 	}
1591 
1592 	return (0);
1593 }
1594 
1595 /**
1596  * @brief Allocate MSI interrupts for DPNI.
1597  */
1598 static int
1599 dpaa2_ni_setup_msi(struct dpaa2_ni_softc *sc)
1600 {
1601 	int val;
1602 
1603 	val = pci_msi_count(sc->dev);
1604 	if (val < DPAA2_NI_MSI_COUNT)
1605 		device_printf(sc->dev, "MSI: actual=%d, expected=%d\n", val,
1606 		    DPAA2_IO_MSI_COUNT);
1607 	val = MIN(val, DPAA2_NI_MSI_COUNT);
1608 
1609 	if (pci_alloc_msi(sc->dev, &val) != 0)
1610 		return (EINVAL);
1611 
1612 	for (int i = 0; i < val; i++)
1613 		sc->irq_rid[i] = i + 1;
1614 
1615 	return (0);
1616 }
1617 
1618 /**
1619  * @brief Update DPNI according to the updated interface capabilities.
1620  */
1621 static int
1622 dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *sc)
1623 {
1624 	const bool en_rxcsum = sc->ifp->if_capenable & IFCAP_RXCSUM;
1625 	const bool en_txcsum = sc->ifp->if_capenable & IFCAP_TXCSUM;
1626 	device_t dev = sc->dev;
1627 	device_t child = dev;
1628 	int error;
1629 
1630 	/* Setup checksums validation. */
1631 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, dpaa2_mcp_tk(sc->cmd,
1632 	    sc->ni_token), DPAA2_NI_OFL_RX_L3_CSUM, en_rxcsum);
1633 	if (error) {
1634 		device_printf(dev, "%s: failed to %s L3 checksum validation\n",
1635 		    __func__, en_rxcsum ? "enable" : "disable");
1636 		return (error);
1637 	}
1638 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, sc->cmd,
1639 	    DPAA2_NI_OFL_RX_L4_CSUM, en_rxcsum);
1640 	if (error) {
1641 		device_printf(dev, "%s: failed to %s L4 checksum validation\n",
1642 		    __func__, en_rxcsum ? "enable" : "disable");
1643 		return (error);
1644 	}
1645 
1646 	/* Setup checksums generation. */
1647 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, sc->cmd,
1648 	    DPAA2_NI_OFL_TX_L3_CSUM, en_txcsum);
1649 	if (error) {
1650 		device_printf(dev, "%s: failed to %s L3 checksum generation\n",
1651 		    __func__, en_txcsum ? "enable" : "disable");
1652 		return (error);
1653 	}
1654 	error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, sc->cmd,
1655 	    DPAA2_NI_OFL_TX_L4_CSUM, en_txcsum);
1656 	if (error) {
1657 		device_printf(dev, "%s: failed to %s L4 checksum generation\n",
1658 		    __func__, en_txcsum ? "enable" : "disable");
1659 		return (error);
1660 	}
1661 
1662 	return (0);
1663 }
1664 
1665 /**
1666  * @brief Update DPNI according to the updated interface flags.
1667  */
1668 static int
1669 dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *sc)
1670 {
1671 	const bool en_promisc = sc->ifp->if_flags & IFF_PROMISC;
1672 	const bool en_allmulti = sc->ifp->if_flags & IFF_ALLMULTI;
1673 	device_t dev = sc->dev;
1674 	device_t child = dev;
1675 	int error;
1676 
1677 	error = DPAA2_CMD_NI_SET_MULTI_PROMISC(dev, child, dpaa2_mcp_tk(sc->cmd,
1678 	    sc->ni_token), en_promisc ? true : en_allmulti);
1679 	if (error) {
1680 		device_printf(dev, "%s: failed to %s multicast promiscuous "
1681 		    "mode\n", __func__, en_allmulti ? "enable" : "disable");
1682 		return (error);
1683 	}
1684 
1685 	error = DPAA2_CMD_NI_SET_UNI_PROMISC(dev, child, sc->cmd, en_promisc);
1686 	if (error) {
1687 		device_printf(dev, "%s: failed to %s unicast promiscuous mode\n",
1688 		    __func__, en_promisc ? "enable" : "disable");
1689 		return (error);
1690 	}
1691 
1692 	return (0);
1693 }
1694 
1695 static int
1696 dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *sc)
1697 {
1698 	struct sysctl_ctx_list *ctx;
1699 	struct sysctl_oid *node, *node2;
1700 	struct sysctl_oid_list *parent, *parent2;
1701 	char cbuf[128];
1702 	int i;
1703 
1704 	ctx = device_get_sysctl_ctx(sc->dev);
1705 	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1706 
1707 	/* Add DPNI statistics. */
1708 	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
1709 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Statistics");
1710 	parent = SYSCTL_CHILDREN(node);
1711 	for (i = 0; i < DPAA2_NI_STAT_SYSCTLS; ++i) {
1712 		SYSCTL_ADD_PROC(ctx, parent, i, dpni_stat_sysctls[i].name,
1713 		    CTLTYPE_U64 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_stats,
1714 		    "IU", dpni_stat_sysctls[i].desc);
1715 	}
1716 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_anomaly_frames",
1717 	    CTLFLAG_RD, &sc->rx_anomaly_frames,
1718 	    "Rx frames in the buffers outside of the buffer pools");
1719 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_single_buf_frames",
1720 	    CTLFLAG_RD, &sc->rx_single_buf_frames,
1721 	    "Rx frames in single buffers");
1722 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_sg_buf_frames",
1723 	    CTLFLAG_RD, &sc->rx_sg_buf_frames,
1724 	    "Rx frames in scatter/gather list");
1725 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_enq_rej_frames",
1726 	    CTLFLAG_RD, &sc->rx_enq_rej_frames,
1727 	    "Enqueue rejected by QMan");
1728 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_ieoi_err_frames",
1729 	    CTLFLAG_RD, &sc->rx_ieoi_err_frames,
1730 	    "QMan IEOI error");
1731 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_single_buf_frames",
1732 	    CTLFLAG_RD, &sc->tx_single_buf_frames,
1733 	    "Tx single buffer frames");
1734 	SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_sg_frames",
1735 	    CTLFLAG_RD, &sc->tx_sg_frames,
1736 	    "Tx S/G frames");
1737 
1738 	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_num",
1739 	    CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_num,
1740 	    "IU", "number of Rx buffers in the buffer pool");
1741 	SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_free",
1742 	    CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_free,
1743 	    "IU", "number of free Rx buffers in the buffer pool");
1744 
1745  	parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1746 
1747 	/* Add channels statistics. */
1748 	node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "channels",
1749 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channels");
1750 	parent = SYSCTL_CHILDREN(node);
1751 	for (int i = 0; i < sc->chan_n; i++) {
1752 		snprintf(cbuf, sizeof(cbuf), "%d", i);
1753 
1754 		node2 = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, cbuf,
1755 		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channel");
1756 		parent2 = SYSCTL_CHILDREN(node2);
1757 
1758 		SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_frames",
1759 		    CTLFLAG_RD, &sc->channels[i]->tx_frames,
1760 		    "Tx frames counter");
1761 		SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_dropped",
1762 		    CTLFLAG_RD, &sc->channels[i]->tx_dropped,
1763 		    "Tx dropped counter");
1764 	}
1765 
1766 	return (0);
1767 }
1768 
1769 static int
1770 dpaa2_ni_setup_dma(struct dpaa2_ni_softc *sc)
1771 {
1772 	device_t dev = sc->dev;
1773 	int error;
1774 
1775 	KASSERT((sc->buf_align == BUF_ALIGN) || (sc->buf_align == BUF_ALIGN_V1),
1776 	    ("unexpected buffer alignment: %d\n", sc->buf_align));
1777 
1778 	/*
1779 	 * DMA tag to allocate buffers for buffer pool.
1780 	 *
1781 	 * NOTE: QBMan supports DMA addresses up to 49-bits maximum.
1782 	 *	 Bits 63-49 are not used by QBMan.
1783 	 */
1784 	error = bus_dma_tag_create(
1785 	    bus_get_dma_tag(dev),
1786 	    sc->buf_align, 0,		/* alignment, boundary */
1787 	    BUF_MAXADDR_49BIT,		/* low restricted addr */
1788 	    BUF_MAXADDR,		/* high restricted addr */
1789 	    NULL, NULL,			/* filter, filterarg */
1790 	    BUF_SIZE, 1,		/* maxsize, nsegments */
1791 	    BUF_SIZE, 0,		/* maxsegsize, flags */
1792 	    NULL, NULL,			/* lockfunc, lockarg */
1793 	    &sc->bp_dmat);
1794 	if (error) {
1795 		device_printf(dev, "%s: failed to create DMA tag for buffer "
1796 		    "pool\n", __func__);
1797 		return (error);
1798 	}
1799 
1800 	/* DMA tag to map Tx mbufs. */
1801 	error = bus_dma_tag_create(
1802 	    bus_get_dma_tag(dev),
1803 	    sc->buf_align, 0,		/* alignment, boundary */
1804 	    BUF_MAXADDR_49BIT,		/* low restricted addr */
1805 	    BUF_MAXADDR,		/* high restricted addr */
1806 	    NULL, NULL,			/* filter, filterarg */
1807 	    DPAA2_TX_SEGS_MAXSZ,	/* maxsize */
1808 	    DPAA2_TX_SEGLIMIT,		/* nsegments */
1809 	    DPAA2_TX_SEG_SZ, 0,		/* maxsegsize, flags */
1810 	    NULL, NULL,			/* lockfunc, lockarg */
1811 	    &sc->tx_dmat);
1812 	if (error) {
1813 		device_printf(dev, "%s: failed to create DMA tag for Tx "
1814 		    "buffers\n", __func__);
1815 		return (error);
1816 	}
1817 
1818 	/* DMA tag to allocate channel storage. */
1819 	error = bus_dma_tag_create(
1820 	    bus_get_dma_tag(dev),
1821 	    ETH_STORE_ALIGN, 0,		/* alignment, boundary */
1822 	    BUS_SPACE_MAXADDR_32BIT,	/* low restricted addr */
1823 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
1824 	    NULL, NULL,			/* filter, filterarg */
1825 	    ETH_STORE_SIZE, 1,		/* maxsize, nsegments */
1826 	    ETH_STORE_SIZE, 0,		/* maxsegsize, flags */
1827 	    NULL, NULL,			/* lockfunc, lockarg */
1828 	    &sc->st_dmat);
1829 	if (error) {
1830 		device_printf(dev, "%s: failed to create DMA tag for channel "
1831 		    "storage\n", __func__);
1832 		return (error);
1833 	}
1834 
1835 	/* DMA tag for Rx distribution key. */
1836 	error = bus_dma_tag_create(
1837 	    bus_get_dma_tag(dev),
1838 	    PAGE_SIZE, 0,		/* alignment, boundary */
1839 	    BUS_SPACE_MAXADDR_32BIT,	/* low restricted addr */
1840 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
1841 	    NULL, NULL,			/* filter, filterarg */
1842 	    DPAA2_CLASSIFIER_DMA_SIZE, 1, /* maxsize, nsegments */
1843 	    DPAA2_CLASSIFIER_DMA_SIZE, 0, /* maxsegsize, flags */
1844 	    NULL, NULL,			/* lockfunc, lockarg */
1845 	    &sc->rxd_dmat);
1846 	if (error) {
1847 		device_printf(dev, "%s: failed to create DMA tag for Rx "
1848 		    "distribution key\n", __func__);
1849 		return (error);
1850 	}
1851 
1852 	error = bus_dma_tag_create(
1853 	    bus_get_dma_tag(dev),
1854 	    PAGE_SIZE, 0,		/* alignment, boundary */
1855 	    BUS_SPACE_MAXADDR_32BIT,	/* low restricted addr */
1856 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
1857 	    NULL, NULL,			/* filter, filterarg */
1858 	    ETH_QOS_KCFG_BUF_SIZE, 1,	/* maxsize, nsegments */
1859 	    ETH_QOS_KCFG_BUF_SIZE, 0,	/* maxsegsize, flags */
1860 	    NULL, NULL,			/* lockfunc, lockarg */
1861 	    &sc->qos_dmat);
1862 	if (error) {
1863 		device_printf(dev, "%s: failed to create DMA tag for QoS key\n",
1864 		    __func__);
1865 		return (error);
1866 	}
1867 
1868 	error = bus_dma_tag_create(
1869 	    bus_get_dma_tag(dev),
1870 	    PAGE_SIZE, 0,		/* alignment, boundary */
1871 	    BUS_SPACE_MAXADDR_32BIT,	/* low restricted addr */
1872 	    BUS_SPACE_MAXADDR,		/* high restricted addr */
1873 	    NULL, NULL,			/* filter, filterarg */
1874 	    DPAA2_TX_SGT_SZ, 1,		/* maxsize, nsegments */
1875 	    DPAA2_TX_SGT_SZ, 0,		/* maxsegsize, flags */
1876 	    NULL, NULL,			/* lockfunc, lockarg */
1877 	    &sc->sgt_dmat);
1878 	if (error) {
1879 		device_printf(dev, "%s: failed to create DMA tag for S/G "
1880 		    "tables\n", __func__);
1881 		return (error);
1882 	}
1883 
1884 	return (0);
1885 }
1886 
1887 /**
1888  * @brief Configure buffer layouts of the different DPNI queues.
1889  */
1890 static int
1891 dpaa2_ni_set_buf_layout(device_t dev, struct dpaa2_cmd *cmd)
1892 {
1893 	device_t child = dev;
1894 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
1895 	struct dpaa2_ni_buf_layout buf_layout = {0};
1896 	int error;
1897 
1898 	/*
1899 	 * Select Rx/Tx buffer alignment. It's necessary to ensure that the
1900 	 * buffer size seen by WRIOP is a multiple of 64 or 256 bytes depending
1901 	 * on the WRIOP version.
1902 	 */
1903 	sc->buf_align = (sc->attr.wriop_ver == WRIOP_VERSION(0, 0, 0) ||
1904 	    sc->attr.wriop_ver == WRIOP_VERSION(1, 0, 0))
1905 	    ? BUF_ALIGN_V1 : BUF_ALIGN;
1906 
1907 	/*
1908 	 * We need to ensure that the buffer size seen by WRIOP is a multiple
1909 	 * of 64 or 256 bytes depending on the WRIOP version.
1910 	 */
1911 	sc->buf_sz = ALIGN_DOWN(BUF_SIZE, sc->buf_align);
1912 
1913 	if (bootverbose)
1914 		device_printf(dev, "Rx/Tx buffers: size=%d, alignment=%d\n",
1915 		    sc->buf_sz, sc->buf_align);
1916 
1917 	/*
1918 	 *    Frame Descriptor       Tx buffer layout
1919 	 *
1920 	 *                ADDR -> |---------------------|
1921 	 *                        | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes
1922 	 *                        |---------------------|
1923 	 *                        | HW FRAME ANNOTATION | BUF_TX_HWA_SIZE bytes
1924 	 *                        |---------------------|
1925 	 *                        |    DATA HEADROOM    |
1926 	 *       ADDR + OFFSET -> |---------------------|
1927 	 *                        |                     |
1928 	 *                        |                     |
1929 	 *                        |     FRAME DATA      |
1930 	 *                        |                     |
1931 	 *                        |                     |
1932 	 *                        |---------------------|
1933 	 *                        |    DATA TAILROOM    |
1934 	 *                        |---------------------|
1935 	 *
1936 	 * NOTE: It's for a single buffer frame only.
1937 	 */
1938 	buf_layout.queue_type = DPAA2_NI_QUEUE_TX;
1939 	buf_layout.pd_size = BUF_SWA_SIZE;
1940 	buf_layout.pass_timestamp = true;
1941 	buf_layout.pass_frame_status = true;
1942 	buf_layout.options =
1943 	    BUF_LOPT_PRIV_DATA_SZ |
1944 	    BUF_LOPT_TIMESTAMP | /* requires 128 bytes in HWA */
1945 	    BUF_LOPT_FRAME_STATUS;
1946 	error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, cmd, &buf_layout);
1947 	if (error) {
1948 		device_printf(dev, "%s: failed to set Tx buffer layout\n",
1949 		    __func__);
1950 		return (error);
1951 	}
1952 
1953 	/* Tx-confirmation buffer layout */
1954 	buf_layout.queue_type = DPAA2_NI_QUEUE_TX_CONF;
1955 	buf_layout.options =
1956 	    BUF_LOPT_TIMESTAMP |
1957 	    BUF_LOPT_FRAME_STATUS;
1958 	error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, cmd, &buf_layout);
1959 	if (error) {
1960 		device_printf(dev, "%s: failed to set TxConf buffer layout\n",
1961 		    __func__);
1962 		return (error);
1963 	}
1964 
1965 	/*
1966 	 * Driver should reserve the amount of space indicated by this command
1967 	 * as headroom in all Tx frames.
1968 	 */
1969 	error = DPAA2_CMD_NI_GET_TX_DATA_OFF(dev, child, cmd, &sc->tx_data_off);
1970 	if (error) {
1971 		device_printf(dev, "%s: failed to obtain Tx data offset\n",
1972 		    __func__);
1973 		return (error);
1974 	}
1975 
1976 	if (bootverbose)
1977 		device_printf(dev, "Tx data offset=%d\n", sc->tx_data_off);
1978 	if ((sc->tx_data_off % 64) != 0)
1979 		device_printf(dev, "Tx data offset (%d) is not a multiplication "
1980 		    "of 64 bytes\n", sc->tx_data_off);
1981 
1982 	/*
1983 	 *    Frame Descriptor       Rx buffer layout
1984 	 *
1985 	 *                ADDR -> |---------------------|
1986 	 *                        | SW FRAME ANNOTATION | 0 bytes
1987 	 *                        |---------------------|
1988 	 *                        | HW FRAME ANNOTATION | BUF_RX_HWA_SIZE bytes
1989 	 *                        |---------------------|
1990 	 *                        |    DATA HEADROOM    | OFFSET-BUF_RX_HWA_SIZE
1991 	 *       ADDR + OFFSET -> |---------------------|
1992 	 *                        |                     |
1993 	 *                        |                     |
1994 	 *                        |     FRAME DATA      |
1995 	 *                        |                     |
1996 	 *                        |                     |
1997 	 *                        |---------------------|
1998 	 *                        |    DATA TAILROOM    | 0 bytes
1999 	 *                        |---------------------|
2000 	 *
2001 	 * NOTE: It's for a single buffer frame only.
2002 	 */
2003 	buf_layout.queue_type = DPAA2_NI_QUEUE_RX;
2004 	buf_layout.pd_size = 0;
2005 	buf_layout.fd_align = sc->buf_align;
2006 	buf_layout.head_size = sc->tx_data_off - BUF_RX_HWA_SIZE;
2007 	buf_layout.tail_size = 0;
2008 	buf_layout.pass_frame_status = true;
2009 	buf_layout.pass_parser_result = true;
2010 	buf_layout.pass_timestamp = true;
2011 	buf_layout.options =
2012 	    BUF_LOPT_PRIV_DATA_SZ |
2013 	    BUF_LOPT_DATA_ALIGN |
2014 	    BUF_LOPT_DATA_HEAD_ROOM |
2015 	    BUF_LOPT_DATA_TAIL_ROOM |
2016 	    BUF_LOPT_FRAME_STATUS |
2017 	    BUF_LOPT_PARSER_RESULT |
2018 	    BUF_LOPT_TIMESTAMP;
2019 	error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, cmd, &buf_layout);
2020 	if (error) {
2021 		device_printf(dev, "%s: failed to set Rx buffer layout\n",
2022 		    __func__);
2023 		return (error);
2024 	}
2025 
2026 	return (0);
2027 }
2028 
2029 /**
2030  * @brief Enable Rx/Tx pause frames.
2031  *
2032  * NOTE: DPNI stops sending when a pause frame is received (Rx frame) or DPNI
2033  *       itself generates pause frames (Tx frame).
2034  */
2035 static int
2036 dpaa2_ni_set_pause_frame(device_t dev, struct dpaa2_cmd *cmd)
2037 {
2038 	device_t child = dev;
2039 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2040 	struct dpaa2_ni_link_cfg link_cfg = {0};
2041 	int error;
2042 
2043 	error = DPAA2_CMD_NI_GET_LINK_CFG(dev, child, cmd, &link_cfg);
2044 	if (error) {
2045 		device_printf(dev, "%s: failed to obtain link configuration: "
2046 		    "error=%d\n", __func__, error);
2047 		return (error);
2048 	}
2049 
2050 	/* Enable both Rx and Tx pause frames by default. */
2051 	link_cfg.options |= DPAA2_NI_LINK_OPT_PAUSE;
2052 	link_cfg.options &= ~DPAA2_NI_LINK_OPT_ASYM_PAUSE;
2053 
2054 	error = DPAA2_CMD_NI_SET_LINK_CFG(dev, child, cmd, &link_cfg);
2055 	if (error) {
2056 		device_printf(dev, "%s: failed to set link configuration: "
2057 		    "error=%d\n", __func__, error);
2058 		return (error);
2059 	}
2060 
2061 	sc->link_options = link_cfg.options;
2062 
2063 	return (0);
2064 }
2065 
2066 /**
2067  * @brief Configure QoS table to determine the traffic class for the received
2068  * frame.
2069  */
2070 static int
2071 dpaa2_ni_set_qos_table(device_t dev, struct dpaa2_cmd *cmd)
2072 {
2073 	device_t child = dev;
2074 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2075 	struct dpaa2_ni_qos_table tbl;
2076 	struct dpaa2_buf *buf = &sc->qos_kcfg;
2077 	int error;
2078 
2079 	if (sc->attr.num.rx_tcs == 1 ||
2080 	    !(sc->attr.options & DPNI_OPT_HAS_KEY_MASKING)) {
2081 		if (bootverbose)
2082 			device_printf(dev, "Ingress traffic classification is "
2083 			    "not supported\n");
2084 		return (0);
2085 	}
2086 
2087 	/*
2088 	 * Allocate a buffer visible to the device to hold the QoS table key
2089 	 * configuration.
2090 	 */
2091 	KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not storage buffer",
2092 	    __func__));
2093 	if (__predict_true(buf->store.dmat == NULL))
2094 		buf->store.dmat = sc->qos_dmat;
2095 
2096 	error = bus_dmamem_alloc(buf->store.dmat, &buf->store.vaddr,
2097 	    BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->store.dmap);
2098 	if (error) {
2099 		device_printf(dev, "%s: failed to allocate a buffer for QoS key "
2100 		    "configuration\n", __func__);
2101 		return (error);
2102 	}
2103 
2104 	error = bus_dmamap_load(buf->store.dmat, buf->store.dmap,
2105 	    buf->store.vaddr, ETH_QOS_KCFG_BUF_SIZE, dpaa2_ni_dmamap_cb,
2106 	    &buf->store.paddr, BUS_DMA_NOWAIT);
2107 	if (error) {
2108 		device_printf(dev, "%s: failed to map QoS key configuration "
2109 		    "buffer into bus space\n", __func__);
2110 		return (error);
2111 	}
2112 
2113 	tbl.default_tc = 0;
2114 	tbl.discard_on_miss = false;
2115 	tbl.keep_entries = false;
2116 	tbl.kcfg_busaddr = buf->store.paddr;
2117 	error = DPAA2_CMD_NI_SET_QOS_TABLE(dev, child, cmd, &tbl);
2118 	if (error) {
2119 		device_printf(dev, "%s: failed to set QoS table\n", __func__);
2120 		return (error);
2121 	}
2122 
2123 	error = DPAA2_CMD_NI_CLEAR_QOS_TABLE(dev, child, cmd);
2124 	if (error) {
2125 		device_printf(dev, "%s: failed to clear QoS table\n", __func__);
2126 		return (error);
2127 	}
2128 
2129 	return (0);
2130 }
2131 
2132 static int
2133 dpaa2_ni_set_mac_addr(device_t dev, struct dpaa2_cmd *cmd, uint16_t rc_token,
2134     uint16_t ni_token)
2135 {
2136 	device_t child = dev;
2137 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
2138 	struct ifnet *ifp = sc->ifp;
2139 	struct ether_addr rnd_mac_addr;
2140 	uint8_t mac_addr[ETHER_ADDR_LEN];
2141 	uint8_t dpni_mac_addr[ETHER_ADDR_LEN];
2142 	int error;
2143 
2144 	/*
2145 	 * Get the MAC address associated with the physical port, if the DPNI is
2146 	 * connected to a DPMAC directly associated with one of the physical
2147 	 * ports.
2148 	 */
2149 	error = DPAA2_CMD_NI_GET_PORT_MAC_ADDR(dev, child, dpaa2_mcp_tk(cmd,
2150 	    ni_token), mac_addr);
2151 	if (error) {
2152 		device_printf(dev, "%s: failed to obtain the MAC address "
2153 		    "associated with the physical port\n", __func__);
2154 		return (error);
2155 	}
2156 
2157 	/* Get primary MAC address from the DPNI attributes. */
2158 	error = DPAA2_CMD_NI_GET_PRIM_MAC_ADDR(dev, child, cmd, dpni_mac_addr);
2159 	if (error) {
2160 		device_printf(dev, "%s: failed to obtain primary MAC address\n",
2161 		    __func__);
2162 		return (error);
2163 	}
2164 
2165 	if (!ETHER_IS_ZERO(mac_addr)) {
2166 		/* Set MAC address of the physical port as DPNI's primary one. */
2167 		error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, cmd,
2168 		    mac_addr);
2169 		if (error) {
2170 			device_printf(dev, "%s: failed to set primary MAC "
2171 			    "address\n", __func__);
2172 			return (error);
2173 		}
2174 		for (int i = 0; i < ETHER_ADDR_LEN; i++)
2175 			sc->mac.addr[i] = mac_addr[i];
2176 	} else if (ETHER_IS_ZERO(dpni_mac_addr)) {
2177 		/* Generate random MAC address as DPNI's primary one. */
2178 		ether_gen_addr(ifp, &rnd_mac_addr);
2179 		for (int i = 0; i < ETHER_ADDR_LEN; i++)
2180 			mac_addr[i] = rnd_mac_addr.octet[i];
2181 
2182 		error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, cmd,
2183 		    mac_addr);
2184 		if (error) {
2185 			device_printf(dev, "%s: failed to set random primary "
2186 			    "MAC address\n", __func__);
2187 			return (error);
2188 		}
2189 		for (int i = 0; i < ETHER_ADDR_LEN; i++)
2190 			sc->mac.addr[i] = mac_addr[i];
2191 	} else {
2192 		for (int i = 0; i < ETHER_ADDR_LEN; i++)
2193 			sc->mac.addr[i] = dpni_mac_addr[i];
2194 	}
2195 
2196 	return (0);
2197 }
2198 
2199 static void
2200 dpaa2_ni_miibus_statchg(device_t dev)
2201 {
2202 	struct dpaa2_ni_softc *sc;
2203 	device_t child;
2204 	struct dpaa2_mac_link_state mac_link = { 0 };
2205 	uint16_t mac_token;
2206 	int error, link_state;
2207 
2208 	sc = device_get_softc(dev);
2209 	if (sc->fixed_link || sc->mii == NULL)
2210 		return;
2211 
2212 	/*
2213 	 * Note: ifp link state will only be changed AFTER we are called so we
2214 	 * cannot rely on ifp->if_linkstate here.
2215 	 */
2216 	if (sc->mii->mii_media_status & IFM_AVALID) {
2217 		if (sc->mii->mii_media_status & IFM_ACTIVE)
2218 			link_state = LINK_STATE_UP;
2219 		else
2220 			link_state = LINK_STATE_DOWN;
2221 	} else
2222 		link_state = LINK_STATE_UNKNOWN;
2223 
2224 	if (link_state != sc->link_state) {
2225 
2226 		sc->link_state = link_state;
2227 
2228 		child = sc->dev;
2229 		error = DPAA2_CMD_MAC_OPEN(sc->dev, child, dpaa2_mcp_tk(sc->cmd,
2230 		    sc->rc_token), sc->mac.dpmac_id, &mac_token);
2231 		if (error) {
2232 			device_printf(sc->dev, "%s: failed to open DPMAC: "
2233 			    "id=%d, error=%d\n", __func__, sc->mac.dpmac_id,
2234 			    error);
2235 			return;
2236 		}
2237 
2238 		if (link_state == LINK_STATE_UP ||
2239 		    link_state == LINK_STATE_DOWN) {
2240 			/* Update DPMAC link state. */
2241 			mac_link.supported = sc->mii->mii_media.ifm_media;
2242 			mac_link.advert = sc->mii->mii_media.ifm_media;
2243 			mac_link.rate = 1000; /* TODO: Where to get from? */	/* ifmedia_baudrate? */
2244 			mac_link.options =
2245 			    DPAA2_MAC_LINK_OPT_AUTONEG |
2246 			    DPAA2_MAC_LINK_OPT_PAUSE;
2247 			mac_link.up = (link_state == LINK_STATE_UP) ? true : false;
2248 			mac_link.state_valid = true;
2249 
2250 			/* Inform DPMAC about link state. */
2251 			error = DPAA2_CMD_MAC_SET_LINK_STATE(sc->dev, child,
2252 			    sc->cmd, &mac_link);
2253 			if (error)
2254 				device_printf(sc->dev, "%s: failed to set DPMAC "
2255 				    "link state: id=%d, error=%d\n", __func__,
2256 				    sc->mac.dpmac_id, error);
2257 		}
2258 		DPAA2_CMD_MAC_CLOSE(sc->dev, child, dpaa2_mcp_tk(sc->cmd,
2259 		    mac_token));
2260 	}
2261 }
2262 
2263 /**
2264  * @brief Callback function to process media change request.
2265  */
2266 static int
2267 dpaa2_ni_media_change(struct ifnet *ifp)
2268 {
2269 	struct dpaa2_ni_softc *sc = ifp->if_softc;
2270 
2271 	DPNI_LOCK(sc);
2272 	if (sc->mii) {
2273 		mii_mediachg(sc->mii);
2274 		sc->media_status = sc->mii->mii_media.ifm_media;
2275 	} else if (sc->fixed_link) {
2276 		if_printf(ifp, "%s: can't change media in fixed mode\n",
2277 		    __func__);
2278 	}
2279 	DPNI_UNLOCK(sc);
2280 
2281 	return (0);
2282 }
2283 
2284 /**
2285  * @brief Callback function to process media status request.
2286  */
2287 static void
2288 dpaa2_ni_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2289 {
2290 	struct dpaa2_ni_softc *sc = ifp->if_softc;
2291 
2292 	DPNI_LOCK(sc);
2293 	if (sc->mii) {
2294 		mii_pollstat(sc->mii);
2295 		ifmr->ifm_active = sc->mii->mii_media_active;
2296 		ifmr->ifm_status = sc->mii->mii_media_status;
2297 	}
2298 	DPNI_UNLOCK(sc);
2299 }
2300 
2301 /**
2302  * @brief Callout function to check and update media status.
2303  */
2304 static void
2305 dpaa2_ni_media_tick(void *arg)
2306 {
2307 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2308 
2309 	/* Check for media type change */
2310 	if (sc->mii) {
2311 		mii_tick(sc->mii);
2312 		if (sc->media_status != sc->mii->mii_media.ifm_media) {
2313 			printf("%s: media type changed (ifm_media=%x)\n",
2314 			    __func__, sc->mii->mii_media.ifm_media);
2315 			dpaa2_ni_media_change(sc->ifp);
2316 		}
2317 	}
2318 
2319 	/* Schedule another timeout one second from now */
2320 	callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
2321 }
2322 
2323 static void
2324 dpaa2_ni_init(void *arg)
2325 {
2326 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2327 	struct ifnet *ifp = sc->ifp;
2328 	device_t dev = sc->dev;
2329 	device_t child = dev;
2330 	int error;
2331 
2332 	DPNI_LOCK(sc);
2333 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2334 		DPNI_UNLOCK(sc);
2335 		return;
2336 	}
2337 	DPNI_UNLOCK(sc);
2338 
2339 	error = DPAA2_CMD_NI_ENABLE(dev, child, dpaa2_mcp_tk(sc->cmd,
2340 	    sc->ni_token));
2341 	if (error)
2342 		device_printf(dev, "%s: failed to enable DPNI: error=%d\n",
2343 		    __func__, error);
2344 
2345 	DPNI_LOCK(sc);
2346 	if (sc->mii)
2347 		mii_mediachg(sc->mii);
2348 	callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
2349 
2350 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2351 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2352 	DPNI_UNLOCK(sc);
2353 
2354 	/* Force link-state update to initilize things. */
2355 	dpaa2_ni_miibus_statchg(dev);
2356 
2357 	return;
2358 }
2359 
2360 static int
2361 dpaa2_ni_transmit(struct ifnet *ifp, struct mbuf *m)
2362 {
2363 	struct dpaa2_ni_softc *sc = ifp->if_softc;
2364 	struct dpaa2_ni_channel	*chan;
2365 	struct dpaa2_ni_tx_ring *tx;
2366 	uint32_t fqid;
2367 	boolean_t found = false;
2368 	int chan_n = 0;
2369 
2370 	if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING)))
2371 		return (0);
2372 
2373 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
2374 		fqid = m->m_pkthdr.flowid;
2375 		for (int i = 0; i < sc->chan_n; i++) {
2376 			chan = sc->channels[i];
2377 			for (int j = 0; j < chan->rxq_n; j++) {
2378 				if (fqid == chan->rx_queues[j].fqid) {
2379 					chan_n = chan->flowid;
2380 					found = true;
2381 					break;
2382 				}
2383 			}
2384 			if (found) {
2385 				break;
2386 			}
2387 		}
2388 	}
2389 	tx = DPAA2_TX_RING(sc, chan_n, 0);
2390 
2391 	TX_LOCK(tx);
2392 	dpaa2_ni_tx_locked(sc, tx, m);
2393 	TX_UNLOCK(tx);
2394 
2395 	return (0);
2396 }
2397 
2398 static void
2399 dpaa2_ni_qflush(struct ifnet *ifp)
2400 {
2401 	/* TODO: Find a way to drain Tx queues in QBMan. */
2402 	if_qflush(ifp);
2403 }
2404 
2405 static int
2406 dpaa2_ni_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2407 {
2408 	struct dpaa2_ni_softc *sc = ifp->if_softc;
2409 	struct ifreq *ifr = (struct ifreq *) data;
2410 	device_t dev, child;
2411 	uint32_t changed = 0;
2412 	int mtu, error, rc = 0;
2413 
2414 	dev = child = sc->dev;
2415 
2416 	switch (cmd) {
2417 	case SIOCSIFMTU:
2418 		DPNI_LOCK(sc);
2419 		mtu = ifr->ifr_mtu;
2420 		if (mtu < ETHERMIN || mtu > ETHERMTU_JUMBO) {
2421 			DPNI_UNLOCK(sc);
2422 			return (EINVAL);
2423 		}
2424 		ifp->if_mtu = mtu;
2425 		DPNI_UNLOCK(sc);
2426 
2427 		/* Update maximum frame length. */
2428 		error = DPAA2_CMD_NI_SET_MFL(dev, child, dpaa2_mcp_tk(sc->cmd,
2429 		    sc->ni_token), mtu + ETHER_HDR_LEN);
2430 		if (error) {
2431 			device_printf(dev, "%s: failed to update maximum frame "
2432 			    "length: error=%d\n", __func__, error);
2433 			return (error);
2434 		}
2435 		break;
2436 	case SIOCSIFCAP:
2437 		changed = ifp->if_capenable ^ ifr->ifr_reqcap;
2438 		if (changed & IFCAP_HWCSUM) {
2439 			if ((ifr->ifr_reqcap & changed) & IFCAP_HWCSUM)
2440 				ifp->if_capenable |= IFCAP_HWCSUM;
2441 			else
2442 				ifp->if_capenable &= ~IFCAP_HWCSUM;
2443 		}
2444 		rc = dpaa2_ni_setup_if_caps(sc);
2445 		if (rc) {
2446 			printf("%s: failed to update iface capabilities: "
2447 			    "error=%d\n", __func__, rc);
2448 			rc = ENXIO;
2449 		}
2450 		break;
2451 	case SIOCSIFFLAGS:
2452 		DPNI_LOCK(sc);
2453 		if (ifp->if_flags & IFF_UP) {
2454 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2455 				changed = ifp->if_flags ^ sc->if_flags;
2456 				if (changed & IFF_PROMISC ||
2457 				    changed & IFF_ALLMULTI) {
2458 					rc = dpaa2_ni_setup_if_flags(sc);
2459 				}
2460 			} else {
2461 				DPNI_UNLOCK(sc);
2462 				dpaa2_ni_init(sc);
2463 				DPNI_LOCK(sc);
2464 			}
2465 		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2466 			/* dpni_if_stop(sc); */
2467 		}
2468 
2469 		sc->if_flags = ifp->if_flags;
2470 		DPNI_UNLOCK(sc);
2471 		break;
2472 	case SIOCADDMULTI:
2473 	case SIOCDELMULTI:
2474 		DPNI_LOCK(sc);
2475 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2476 			DPNI_UNLOCK(sc);
2477 			rc = dpaa2_ni_update_mac_filters(ifp);
2478 			if (rc)
2479 				device_printf(dev, "%s: failed to update MAC "
2480 				    "filters: error=%d\n", __func__, rc);
2481 			DPNI_LOCK(sc);
2482 		}
2483 		DPNI_UNLOCK(sc);
2484 		break;
2485 	case SIOCGIFMEDIA:
2486 	case SIOCSIFMEDIA:
2487 		if (sc->mii)
2488 			rc = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, cmd);
2489 		else if(sc->fixed_link) {
2490 			rc = ifmedia_ioctl(ifp, ifr, &sc->fixed_ifmedia, cmd);
2491 		}
2492 		break;
2493 	default:
2494 		rc = ether_ioctl(ifp, cmd, data);
2495 	}
2496 
2497 	return (rc);
2498 }
2499 
2500 static int
2501 dpaa2_ni_update_mac_filters(struct ifnet *ifp)
2502 {
2503 	struct dpaa2_ni_softc *sc = ifp->if_softc;
2504 	struct dpaa2_ni_mcaddr_ctx ctx;
2505 	device_t dev, child;
2506 	int error;
2507 
2508 	dev = child = sc->dev;
2509 
2510 	/* Remove all multicast MAC filters. */
2511 	error = DPAA2_CMD_NI_CLEAR_MAC_FILTERS(dev, child, dpaa2_mcp_tk(sc->cmd,
2512 	    sc->ni_token), false, true);
2513 	if (error) {
2514 		device_printf(dev, "%s: failed to clear multicast MAC filters: "
2515 		    "error=%d\n", __func__, error);
2516 		return (error);
2517 	}
2518 
2519 	ctx.ifp = ifp;
2520 	ctx.error = 0;
2521 	ctx.nent = 0;
2522 
2523 	if_foreach_llmaddr(ifp, dpaa2_ni_add_maddr, &ctx);
2524 
2525 	return (ctx.error);
2526 }
2527 
2528 static u_int
2529 dpaa2_ni_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2530 {
2531 	struct dpaa2_ni_mcaddr_ctx *ctx = arg;
2532 	struct dpaa2_ni_softc *sc = ctx->ifp->if_softc;
2533 	device_t dev, child;
2534 
2535 	dev = child = sc->dev;
2536 
2537 	if (ctx->error != 0)
2538 		return (0);
2539 
2540 	if (ETHER_IS_MULTICAST(LLADDR(sdl))) {
2541 		ctx->error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, dpaa2_mcp_tk(
2542 		    sc->cmd, sc->ni_token), LLADDR(sdl));
2543 		if (ctx->error != 0) {
2544 			device_printf(dev, "%s: can't add more then %d MAC "
2545 			    "addresses, switching to the multicast promiscuous "
2546 			    "mode\n", __func__, ctx->nent);
2547 
2548 			/* Enable multicast promiscuous mode. */
2549 			DPNI_LOCK(sc);
2550 			ctx->ifp->if_flags |= IFF_ALLMULTI;
2551 			sc->if_flags |= IFF_ALLMULTI;
2552 			ctx->error = dpaa2_ni_setup_if_flags(sc);
2553 			DPNI_UNLOCK(sc);
2554 
2555 			return (0);
2556 		}
2557 		ctx->nent++;
2558 	}
2559 
2560 	return (1);
2561 }
2562 
2563 static void
2564 dpaa2_ni_intr(void *arg)
2565 {
2566 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2567 	device_t child = sc->dev;
2568 	uint32_t status = ~0u; /* clear all IRQ status bits */
2569 	int error;
2570 
2571 	error = DPAA2_CMD_NI_GET_IRQ_STATUS(sc->dev, child, dpaa2_mcp_tk(sc->cmd,
2572 	    sc->ni_token), DPNI_IRQ_INDEX, &status);
2573 	if (error)
2574 		device_printf(sc->dev, "%s: failed to obtain IRQ status: "
2575 		    "error=%d\n", __func__, error);
2576 }
2577 
2578 /**
2579  * @brief Callback to obtain a physical address of the only DMA segment mapped.
2580  */
2581 static void
2582 dpaa2_ni_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2583 {
2584 	if (error == 0) {
2585 		KASSERT(nseg == 1, ("too many segments: nseg=%d\n", nseg));
2586 		*(bus_addr_t *) arg = segs[0].ds_addr;
2587 	}
2588 }
2589 
2590 /**
2591  * @brief Release new buffers to the buffer pool if necessary.
2592  */
2593 static void
2594 dpaa2_ni_bp_task(void *arg, int count)
2595 {
2596 	device_t bp_dev;
2597 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2598 	struct dpaa2_bp_softc *bpsc;
2599 	struct dpaa2_bp_conf bp_conf;
2600 	const int buf_num = DPAA2_ATOMIC_READ(&sc->buf_num);
2601 	int error;
2602 
2603 	/* There's only one buffer pool for now. */
2604 	bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
2605 	bpsc = device_get_softc(bp_dev);
2606 
2607 	/* Get state of the buffer pool. */
2608 	error = DPAA2_SWP_QUERY_BP(sc->channels[0]->io_dev, bpsc->attr.bpid,
2609 	    &bp_conf);
2610 	if (error) {
2611 		device_printf(sc->dev, "%s: failed to query buffer pool "
2612 		    "configuration: error=%d\n", __func__, error);
2613 		return;
2614 	}
2615 
2616 	/* Double allocated buffers number if free buffers < 25%. */
2617 	if (bp_conf.free_bufn < (buf_num >> 2)) {
2618 		(void)dpaa2_ni_seed_buf_pool(sc, buf_num);
2619 		DPAA2_ATOMIC_XCHG(&sc->buf_free, bp_conf.free_bufn);
2620 	}
2621 }
2622 
2623 /**
2624  * @brief Poll frames from a specific channel when CDAN is received.
2625  *
2626  * NOTE: To be called from the DPIO interrupt handler.
2627  */
2628 static void
2629 dpaa2_ni_poll(void *arg)
2630 {
2631 	struct dpaa2_ni_channel *chan = (struct dpaa2_ni_channel *) arg;
2632 	struct dpaa2_io_softc *iosc;
2633 	struct dpaa2_swp *swp;
2634 	struct dpaa2_ni_fq *fq;
2635 	int error, consumed = 0;
2636 
2637 	KASSERT(chan != NULL, ("%s: channel is NULL", __func__));
2638 
2639 	iosc = device_get_softc(chan->io_dev);
2640 	swp = iosc->swp;
2641 
2642 	do {
2643 		error = dpaa2_swp_pull(swp, chan->id, &chan->store,
2644 		    ETH_STORE_FRAMES);
2645 		if (error) {
2646 			device_printf(chan->ni_dev, "%s: failed to pull frames: "
2647 			    "chan_id=%d, error=%d\n", __func__, chan->id, error);
2648 			break;
2649 		}
2650 
2651 		/*
2652 		 * TODO: Combine frames from the same Rx queue returned as
2653 		 * a result to the current VDQ command into a chain (linked
2654 		 * with m_nextpkt) to ammortize the FQ lock.
2655 		 */
2656 		error = dpaa2_ni_consume_frames(chan, &fq, &consumed);
2657 		if (error == ENOENT) {
2658 			break;
2659 		}
2660 		if (error == ETIMEDOUT) {
2661 			device_printf(chan->ni_dev, "%s: timeout to consume "
2662 			    "frames: chan_id=%d\n", __func__, chan->id);
2663 		}
2664 	} while (true);
2665 
2666 	/* Re-arm channel to generate CDAN. */
2667 	error = DPAA2_SWP_CONF_WQ_CHANNEL(chan->io_dev, &chan->ctx);
2668 	if (error) {
2669 		device_printf(chan->ni_dev, "%s: failed to rearm: chan_id=%d, "
2670 		    "error=%d\n", __func__, chan->id, error);
2671 	}
2672 }
2673 
2674 /**
2675  * @brief Transmit mbufs.
2676  */
2677 static void
2678 dpaa2_ni_tx_locked(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx,
2679     struct mbuf *m)
2680 {
2681 	struct dpaa2_ni_fq *fq = tx->fq;
2682 	struct dpaa2_buf *buf;
2683 	struct dpaa2_fd fd;
2684 	struct mbuf *m_d;
2685 	bus_dma_segment_t txsegs[DPAA2_TX_SEGLIMIT];
2686 	uint64_t idx;
2687 	void *pidx;
2688 	int error, rc, txnsegs;
2689 
2690 	/* Obtain an index of a Tx buffer. */
2691 	pidx = buf_ring_dequeue_sc(tx->idx_br);
2692 	if (__predict_false(pidx == NULL)) {
2693 		/* TODO: Do not give up easily. */
2694 		m_freem(m);
2695 		return;
2696 	} else {
2697 		idx = (uint64_t) pidx;
2698 		buf = &tx->buf[idx];
2699 		buf->tx.m = m;
2700 		buf->tx.idx = idx;
2701 		buf->tx.sgt_paddr = 0;
2702 	}
2703 
2704 	/* Load mbuf to transmit. */
2705 	error = bus_dmamap_load_mbuf_sg(buf->tx.dmat, buf->tx.dmap, m,
2706 	    txsegs, &txnsegs, BUS_DMA_NOWAIT);
2707 	if (__predict_false(error != 0)) {
2708 		/* Too many fragments, trying to defragment... */
2709 		m_d = m_collapse(m, M_NOWAIT, DPAA2_TX_SEGLIMIT);
2710 		if (m_d == NULL) {
2711 			device_printf(sc->dev, "%s: mbuf "
2712 			    "defragmentation failed\n", __func__);
2713 			fq->chan->tx_dropped++;
2714 			goto err;
2715 		}
2716 
2717 		buf->tx.m = m = m_d;
2718 		error = bus_dmamap_load_mbuf_sg(buf->tx.dmat,
2719 		    buf->tx.dmap, m, txsegs, &txnsegs, BUS_DMA_NOWAIT);
2720 		if (__predict_false(error != 0)) {
2721 			device_printf(sc->dev, "%s: failed to load "
2722 			    "mbuf: error=%d\n", __func__, error);
2723 			fq->chan->tx_dropped++;
2724 			goto err;
2725 		}
2726 	}
2727 
2728 	/* Build frame descriptor. */
2729 	error = dpaa2_ni_build_fd(sc, tx, buf, txsegs, txnsegs, &fd);
2730 	if (__predict_false(error != 0)) {
2731 		device_printf(sc->dev, "%s: failed to build frame "
2732 		    "descriptor: error=%d\n", __func__, error);
2733 		fq->chan->tx_dropped++;
2734 		goto err_unload;
2735 	}
2736 
2737 	/* TODO: Enqueue several frames in a single command. */
2738 	for (int i = 0; i < DPAA2_NI_ENQUEUE_RETRIES; i++) {
2739 		/* TODO: Return error codes instead of # of frames. */
2740 		rc = DPAA2_SWP_ENQ_MULTIPLE_FQ(fq->chan->io_dev, tx->fqid,
2741 		    &fd, 1);
2742 		if (rc == 1) {
2743 			break;
2744 		}
2745 	}
2746 
2747 	bus_dmamap_sync(buf->tx.dmat, buf->tx.dmap,
2748 	    BUS_DMASYNC_PREWRITE);
2749 	bus_dmamap_sync(buf->tx.sgt_dmat, buf->tx.sgt_dmap,
2750 	    BUS_DMASYNC_PREWRITE);
2751 
2752 	if (rc != 1) {
2753 		fq->chan->tx_dropped++;
2754 		goto err_unload;
2755 	} else {
2756 		fq->chan->tx_frames++;
2757 	}
2758 	return;
2759 
2760 err_unload:
2761 	bus_dmamap_unload(buf->tx.dmat, buf->tx.dmap);
2762 	if (buf->tx.sgt_paddr != 0) {
2763 		bus_dmamap_unload(buf->tx.sgt_dmat, buf->tx.sgt_dmap);
2764 	}
2765 err:
2766 	m_freem(buf->tx.m);
2767 	buf_ring_enqueue(tx->idx_br, pidx);
2768 }
2769 
2770 static int
2771 dpaa2_ni_consume_frames(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq **src,
2772     uint32_t *consumed)
2773 {
2774 	struct dpaa2_ni_fq *fq = NULL;
2775 	struct dpaa2_dq *dq;
2776 	struct dpaa2_fd *fd;
2777 	int rc, frames = 0;
2778 
2779 	do {
2780 		rc = dpaa2_ni_chan_storage_next(chan, &dq);
2781 		if (rc == EINPROGRESS) {
2782 			if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
2783 				fd = &dq->fdr.fd;
2784 				fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
2785 				fq->consume(chan, fq, fd);
2786 				frames++;
2787 			}
2788 		} else if (rc == EALREADY || rc == ENOENT) {
2789 			if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
2790 				fd = &dq->fdr.fd;
2791 				fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
2792 				fq->consume(chan, fq, fd);
2793 				frames++;
2794 			}
2795 			break;
2796 		} else {
2797 			KASSERT(1 == 0, ("%s: should not reach here", __func__));
2798 		}
2799 	} while (true);
2800 
2801 	KASSERT(chan->store_idx < chan->store_sz,
2802 	    ("channel store idx >= size: store_idx=%d, store_sz=%d",
2803 	    chan->store_idx, chan->store_sz));
2804 
2805 	/*
2806 	 * A dequeue operation pulls frames from a single queue into the store.
2807 	 * Return the frame queue and a number of consumed frames as an output.
2808 	 */
2809 	if (src != NULL)
2810 		*src = fq;
2811 	if (consumed != NULL)
2812 		*consumed = frames;
2813 
2814 	return (rc);
2815 }
2816 
2817 /**
2818  * @brief Receive frames.
2819  */
2820 static int
2821 dpaa2_ni_rx(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq,
2822     struct dpaa2_fd *fd)
2823 {
2824 	struct dpaa2_ni_softc *sc = device_get_softc(chan->ni_dev);
2825 	struct dpaa2_bp_softc *bpsc;
2826 	struct dpaa2_buf *buf;
2827 	struct ifnet *ifp = sc->ifp;
2828 	struct mbuf *m;
2829 	device_t bp_dev;
2830 	bus_addr_t paddr = (bus_addr_t) fd->addr;
2831 	bus_addr_t released[DPAA2_SWP_BUFS_PER_CMD];
2832 	void *buf_data;
2833 	int buf_idx, buf_len;
2834 	int error, released_n = 0;
2835 
2836 	/*
2837 	 * Get buffer index from the ADDR_TOK (not used by QBMan) bits of the
2838 	 * physical address.
2839 	 */
2840 	buf_idx = dpaa2_ni_fd_buf_idx(fd);
2841 	buf = &sc->buf[buf_idx];
2842 
2843 	KASSERT(buf->type == DPAA2_BUF_RX, ("%s: not Rx buffer", __func__));
2844 	if (paddr != buf->rx.paddr) {
2845 		panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
2846 		    __func__, paddr, buf->rx.paddr);
2847 	}
2848 
2849 	/* Update statistics. */
2850 	switch (dpaa2_ni_fd_err(fd)) {
2851 	case 1: /* Enqueue rejected by QMan */
2852 		sc->rx_enq_rej_frames++;
2853 		break;
2854 	case 2: /* QMan IEOI error */
2855 		sc->rx_ieoi_err_frames++;
2856 		break;
2857 	default:
2858 		break;
2859 	}
2860 	switch (dpaa2_ni_fd_format(fd)) {
2861 	case DPAA2_FD_SINGLE:
2862 		sc->rx_single_buf_frames++;
2863 		break;
2864 	case DPAA2_FD_SG:
2865 		sc->rx_sg_buf_frames++;
2866 		break;
2867 	default:
2868 		break;
2869 	}
2870 
2871 	m = buf->rx.m;
2872 	buf->rx.m = NULL;
2873 	bus_dmamap_sync(buf->rx.dmat, buf->rx.dmap,
2874 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2875 	bus_dmamap_unload(buf->rx.dmat, buf->rx.dmap);
2876 
2877 	buf_len = dpaa2_ni_fd_data_len(fd);
2878 	buf_data = (uint8_t *)buf->rx.vaddr + dpaa2_ni_fd_offset(fd);
2879 
2880 	/* Prefetch mbuf data. */
2881 	__builtin_prefetch(buf_data);
2882 
2883 	/* Write value to mbuf (avoid reading). */
2884 	m->m_flags |= M_PKTHDR;
2885 	m->m_data = buf_data;
2886 	m->m_len = buf_len;
2887 	m->m_pkthdr.len = buf_len;
2888 	m->m_pkthdr.rcvif = ifp;
2889 	m->m_pkthdr.flowid = fq->fqid;
2890 	M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
2891 
2892 	(*ifp->if_input)(ifp, m);
2893 
2894 	/* Keep the buffer to be recycled. */
2895 	chan->recycled[chan->recycled_n++] = paddr;
2896 	KASSERT(chan->recycled_n <= DPAA2_SWP_BUFS_PER_CMD,
2897 	    ("%s: too many buffers to recycle", __func__));
2898 
2899 	/* Re-seed and release recycled buffers back to the pool. */
2900 	if (chan->recycled_n == DPAA2_SWP_BUFS_PER_CMD) {
2901 		/* Release new buffers to the pool if needed. */
2902 		taskqueue_enqueue(sc->bp_taskq, &sc->bp_task);
2903 
2904 		for (int i = 0; i < chan->recycled_n; i++) {
2905 			paddr = chan->recycled[i];
2906 
2907 			/* Parse ADDR_TOK of the recycled buffer. */
2908 			buf_idx = (paddr >> DPAA2_NI_BUF_IDX_SHIFT)
2909 			    & DPAA2_NI_BUF_IDX_MASK;
2910 			buf = &sc->buf[buf_idx];
2911 
2912 			/* Seed recycled buffer. */
2913 			error = dpaa2_ni_seed_rxbuf(sc, buf, buf_idx);
2914 			KASSERT(error == 0, ("%s: failed to seed recycled "
2915 			    "buffer: error=%d", __func__, error));
2916 			if (__predict_false(error != 0)) {
2917 				device_printf(sc->dev, "%s: failed to seed "
2918 				    "recycled buffer: error=%d\n", __func__,
2919 				    error);
2920 				continue;
2921 			}
2922 
2923 			/* Prepare buffer to be released in a single command. */
2924 			released[released_n++] = buf->rx.paddr;
2925 		}
2926 
2927 		/* There's only one buffer pool for now. */
2928 		bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
2929 		bpsc = device_get_softc(bp_dev);
2930 
2931 		error = DPAA2_SWP_RELEASE_BUFS(chan->io_dev, bpsc->attr.bpid,
2932 		    released, released_n);
2933 		if (__predict_false(error != 0)) {
2934 			device_printf(sc->dev, "%s: failed to release buffers "
2935 			    "to the pool: error=%d\n", __func__, error);
2936 			return (error);
2937 		}
2938 
2939 		/* Be ready to recycle the next portion of the buffers. */
2940 		chan->recycled_n = 0;
2941 	}
2942 
2943 	return (0);
2944 }
2945 
2946 /**
2947  * @brief Receive Rx error frames.
2948  */
2949 static int
2950 dpaa2_ni_rx_err(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq,
2951     struct dpaa2_fd *fd)
2952 {
2953 	device_t bp_dev;
2954 	struct dpaa2_ni_softc *sc = device_get_softc(chan->ni_dev);
2955 	struct dpaa2_bp_softc *bpsc;
2956 	struct dpaa2_buf *buf;
2957 	bus_addr_t paddr = (bus_addr_t) fd->addr;
2958 	int buf_idx, error;
2959 
2960 	/*
2961 	 * Get buffer index from the ADDR_TOK (not used by QBMan) bits of the
2962 	 * physical address.
2963 	 */
2964 	buf_idx = dpaa2_ni_fd_buf_idx(fd);
2965 	buf = &sc->buf[buf_idx];
2966 
2967 	KASSERT(buf->type == DPAA2_BUF_RX, ("%s: not Rx buffer", __func__));
2968 	if (paddr != buf->rx.paddr) {
2969 		panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
2970 		    __func__, paddr, buf->rx.paddr);
2971 	}
2972 
2973 	/* There's only one buffer pool for now. */
2974 	bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
2975 	bpsc = device_get_softc(bp_dev);
2976 
2977 	/* Release buffer to QBMan buffer pool. */
2978 	error = DPAA2_SWP_RELEASE_BUFS(chan->io_dev, bpsc->attr.bpid, &paddr, 1);
2979 	if (error != 0) {
2980 		device_printf(sc->dev, "%s: failed to release frame buffer to "
2981 		    "the pool: error=%d\n", __func__, error);
2982 		return (error);
2983 	}
2984 
2985 	return (0);
2986 }
2987 
2988 /**
2989  * @brief Receive Tx confirmation frames.
2990  */
2991 static int
2992 dpaa2_ni_tx_conf(struct dpaa2_ni_channel *chan, struct dpaa2_ni_fq *fq,
2993     struct dpaa2_fd *fd)
2994 {
2995 	struct dpaa2_ni_softc *sc = device_get_softc(chan->ni_dev);
2996 	struct dpaa2_ni_channel	*buf_chan;
2997 	struct dpaa2_ni_tx_ring *tx;
2998 	struct dpaa2_buf *buf;
2999 	bus_addr_t paddr = (bus_addr_t) (fd->addr & BUF_MAXADDR_49BIT);
3000 	uint64_t buf_idx;
3001 	int chan_idx, tx_idx;
3002 
3003 	/*
3004 	 * Get channel, Tx ring and buffer indexes from the ADDR_TOK bits
3005 	 * (not used by QBMan) of the physical address.
3006 	 */
3007 	chan_idx = dpaa2_ni_fd_chan_idx(fd);
3008 	tx_idx = dpaa2_ni_fd_tx_idx(fd);
3009 	buf_idx = (uint64_t) dpaa2_ni_fd_txbuf_idx(fd);
3010 
3011 	KASSERT(tx_idx < DPAA2_NI_MAX_TCS, ("%s: invalid Tx ring index",
3012 	    __func__));
3013 	KASSERT(buf_idx < DPAA2_NI_BUFS_PER_TX, ("%s: invalid Tx buffer index",
3014 	    __func__));
3015 
3016 	buf_chan = sc->channels[chan_idx];
3017 	tx = &buf_chan->txc_queue.tx_rings[tx_idx];
3018 	buf = &tx->buf[buf_idx];
3019 
3020 	KASSERT(buf->type == DPAA2_BUF_TX, ("%s: not Tx buffer", __func__));
3021 	if (paddr != buf->tx.paddr) {
3022 		panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3023 		    __func__, paddr, buf->tx.paddr);
3024 	}
3025 
3026 
3027 	bus_dmamap_unload(buf->tx.dmat, buf->tx.dmap);
3028 	if (buf->tx.sgt_paddr != 0)
3029 		bus_dmamap_unload(buf->tx.sgt_dmat, buf->tx.sgt_dmap);
3030 	m_freem(buf->tx.m);
3031 
3032 	/* Return Tx buffer index back to the ring. */
3033 	buf_ring_enqueue(tx->idx_br, (void *) buf_idx);
3034 
3035 	return (0);
3036 }
3037 
3038 /**
3039  * @brief Compare versions of the DPAA2 network interface API.
3040  */
3041 static int
3042 dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *sc, uint16_t major,
3043     uint16_t minor)
3044 {
3045 	if (sc->api_major == major)
3046 		return sc->api_minor - minor;
3047 	return sc->api_major - major;
3048 }
3049 
3050 /**
3051  * @brief Allocate Rx buffers visible to QBMan and release them to the pool.
3052  */
3053 static int
3054 dpaa2_ni_seed_buf_pool(struct dpaa2_ni_softc *sc, uint32_t seedn)
3055 {
3056 	device_t bp_dev;
3057 	struct dpaa2_bp_softc *bpsc;
3058 	struct dpaa2_buf *buf;
3059 	bus_addr_t paddr[DPAA2_SWP_BUFS_PER_CMD];
3060 	const int allocated = DPAA2_ATOMIC_READ(&sc->buf_num);
3061 	int i, error, bufn = 0;
3062 
3063 	KASSERT(sc->bp_dmat != NULL, ("%s: DMA tag for buffer pool not "
3064 	    "created?", __func__));
3065 
3066 	/* There's only one buffer pool for now. */
3067 	bp_dev = (device_t) rman_get_start(sc->res[BP_RID(0)]);
3068 	bpsc = device_get_softc(bp_dev);
3069 
3070 	/* Limit # of buffers released to the pool. */
3071 	if (allocated + seedn > DPAA2_NI_BUFS_MAX)
3072 		seedn = DPAA2_NI_BUFS_MAX - allocated;
3073 
3074 	/* Release "seedn" buffers to the pool. */
3075 	for (i = allocated; i < (allocated + seedn); i++) {
3076 		/* Enough buffers were allocated for a single command. */
3077 		if (bufn == DPAA2_SWP_BUFS_PER_CMD) {
3078 			error = DPAA2_SWP_RELEASE_BUFS(sc->channels[0]->io_dev,
3079 			    bpsc->attr.bpid, paddr, bufn);
3080 			if (error) {
3081 				device_printf(sc->dev, "%s: failed to release "
3082 				    "buffers to the pool (1)\n", __func__);
3083 				return (error);
3084 			}
3085 			DPAA2_ATOMIC_ADD(&sc->buf_num, bufn);
3086 			bufn = 0;
3087 		}
3088 
3089 		buf = &sc->buf[i];
3090 		buf->type = DPAA2_BUF_RX;
3091 		buf->rx.m = NULL;
3092 		buf->rx.dmap = NULL;
3093 		buf->rx.paddr = 0;
3094 		buf->rx.vaddr = NULL;
3095 		error = dpaa2_ni_seed_rxbuf(sc, buf, i);
3096 		if (error)
3097 			break;
3098 		paddr[bufn] = buf->rx.paddr;
3099 		bufn++;
3100 	}
3101 
3102 	/* Release if there are buffers left. */
3103 	if (bufn > 0) {
3104 		error = DPAA2_SWP_RELEASE_BUFS(sc->channels[0]->io_dev,
3105 		    bpsc->attr.bpid, paddr, bufn);
3106 		if (error) {
3107 			device_printf(sc->dev, "%s: failed to release "
3108 			    "buffers to the pool (2)\n", __func__);
3109 			return (error);
3110 		}
3111 		DPAA2_ATOMIC_ADD(&sc->buf_num, bufn);
3112 	}
3113 
3114 	return (0);
3115 }
3116 
3117 /**
3118  * @brief Prepare Rx buffer to be released to the buffer pool.
3119  */
3120 static int
3121 dpaa2_ni_seed_rxbuf(struct dpaa2_ni_softc *sc, struct dpaa2_buf *buf, int idx)
3122 {
3123 	struct mbuf *m;
3124 	bus_dmamap_t dmap;
3125 	bus_dma_segment_t segs;
3126 	int error, nsegs;
3127 
3128 	KASSERT(sc->bp_dmat != NULL, ("%s: Buffer pool DMA tag is not "
3129 	    "allocated?", __func__));
3130 	KASSERT(buf->type == DPAA2_BUF_RX, ("%s: not Rx buffer", __func__));
3131 
3132 	/* Keep DMA tag for this buffer. */
3133 	if (__predict_false(buf->rx.dmat == NULL))
3134 		buf->rx.dmat = sc->bp_dmat;
3135 
3136 	/* Create a DMA map for the giving buffer if it doesn't exist yet. */
3137 	if (__predict_false(buf->rx.dmap == NULL)) {
3138 		error = bus_dmamap_create(buf->rx.dmat, 0, &dmap);
3139 		if (error) {
3140 			device_printf(sc->dev, "%s: failed to create DMA map "
3141 			    "for buffer: buf_idx=%d, error=%d\n", __func__,
3142 			    idx, error);
3143 			return (error);
3144 		}
3145 		buf->rx.dmap = dmap;
3146 	}
3147 
3148 	/* Allocate mbuf if needed. */
3149 	if (__predict_false(buf->rx.m == NULL)) {
3150 		m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, BUF_SIZE);
3151 		if (__predict_false(m == NULL)) {
3152 			device_printf(sc->dev, "%s: failed to allocate mbuf for "
3153 			    "buffer\n", __func__);
3154 			return (ENOMEM);
3155 		}
3156 		m->m_len = m->m_ext.ext_size;
3157 		m->m_pkthdr.len = m->m_ext.ext_size;
3158 		buf->rx.m = m;
3159 	} else
3160 		m = buf->rx.m;
3161 
3162 	error = bus_dmamap_load_mbuf_sg(buf->rx.dmat, buf->rx.dmap,
3163 	    m, &segs, &nsegs, BUS_DMA_NOWAIT);
3164 	KASSERT(nsegs == 1, ("one segment expected: nsegs=%d", nsegs));
3165 	KASSERT(error == 0, ("failed to map mbuf: error=%d", error));
3166 	if (__predict_false(error != 0 || nsegs != 1)) {
3167 		device_printf(sc->dev, "%s: failed to map mbuf: error=%d, "
3168 		    "nsegs=%d\n", __func__, error, nsegs);
3169 		bus_dmamap_unload(buf->rx.dmat, buf->rx.dmap);
3170 		m_freem(m);
3171 		return (error);
3172 	}
3173 	buf->rx.paddr = segs.ds_addr;
3174 	buf->rx.vaddr = m->m_data;
3175 
3176 	/*
3177 	 * Write buffer index to the ADDR_TOK (bits 63-49) which is not used by
3178 	 * QBMan and is supposed to assist in physical to virtual address
3179 	 * translation.
3180 	 *
3181 	 * NOTE: "lowaddr" and "highaddr" of the window which cannot be accessed
3182 	 * 	 by QBMan must be configured in the DMA tag accordingly.
3183 	 */
3184 	buf->rx.paddr =
3185 	    ((uint64_t)(idx & DPAA2_NI_BUF_IDX_MASK) <<
3186 		DPAA2_NI_BUF_IDX_SHIFT) |
3187 	    (buf->rx.paddr & DPAA2_NI_BUF_ADDR_MASK);
3188 
3189 	return (0);
3190 }
3191 
3192 /**
3193  * @brief Prepare Tx buffer to be added to the Tx ring.
3194  */
3195 static int
3196 dpaa2_ni_seed_txbuf(struct dpaa2_ni_softc *sc, struct dpaa2_buf *buf, int idx)
3197 {
3198 	bus_dmamap_t dmap;
3199 	int error;
3200 
3201 	KASSERT(sc->tx_dmat != NULL, ("%s: Tx DMA tag is not allocated?",
3202 	    __func__));
3203 	KASSERT(sc->sgt_dmat != NULL, ("%s: S/G DMA tag not allocated?",
3204 	    __func__));
3205 	KASSERT(buf->type == DPAA2_BUF_TX, ("%s: not Tx buffer", __func__));
3206 
3207 	/* Keep DMA tags for this buffer. */
3208 	if (__predict_true(buf->tx.dmat == NULL))
3209 		buf->tx.dmat = sc->tx_dmat;
3210 	if (__predict_true(buf->tx.sgt_dmat == NULL))
3211 		buf->tx.sgt_dmat = sc->sgt_dmat;
3212 
3213 	/* Create a DMA map for the giving buffer if it doesn't exist yet. */
3214 	if (__predict_true(buf->tx.dmap == NULL)) {
3215 		error = bus_dmamap_create(buf->tx.dmat, 0, &dmap);
3216 		if (error != 0) {
3217 			device_printf(sc->dev, "%s: failed to create "
3218 			    "Tx DMA map: error=%d\n", __func__, error);
3219 			return (error);
3220 		}
3221 		buf->tx.dmap = dmap;
3222 	}
3223 
3224 	/* Allocate a buffer to store scatter/gather table. */
3225 	if (__predict_true(buf->tx.sgt_vaddr == NULL)) {
3226 		error = bus_dmamem_alloc(buf->tx.sgt_dmat,
3227 		    &buf->tx.sgt_vaddr, BUS_DMA_ZERO | BUS_DMA_COHERENT,
3228 		    &buf->tx.sgt_dmap);
3229 		if (error != 0) {
3230 			device_printf(sc->dev, "%s: failed to allocate "
3231 			    "S/G table: error=%d\n", __func__, error);
3232 			return (error);
3233 		}
3234 	}
3235 
3236 	return (0);
3237 }
3238 
3239 /**
3240  * @brief Allocate channel storage visible to QBMan.
3241  */
3242 static int
3243 dpaa2_ni_seed_chan_storage(struct dpaa2_ni_softc *sc,
3244     struct dpaa2_ni_channel *chan)
3245 {
3246 	struct dpaa2_buf *buf = &chan->store;
3247 	int error;
3248 
3249 	KASSERT(sc->st_dmat != NULL, ("%s: channel storage DMA tag is not "
3250 	    "allocated?", __func__));
3251 	KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not channel storage buffer",
3252 	    __func__));
3253 
3254 	/* Keep DMA tag for this buffer. */
3255 	if (__predict_false(buf->store.dmat == NULL)) {
3256 		buf->store.dmat = sc->st_dmat;
3257 	}
3258 
3259 	if (__predict_false(buf->store.vaddr == NULL)) {
3260 		error = bus_dmamem_alloc(buf->store.dmat, &buf->store.vaddr,
3261 		    BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->store.dmap);
3262 		if (error) {
3263 			device_printf(sc->dev, "%s: failed to allocate channel "
3264 			    "storage\n", __func__);
3265 			return (error);
3266 		}
3267 	}
3268 
3269 	if (__predict_false(buf->store.paddr == 0)) {
3270 		error = bus_dmamap_load(buf->store.dmat, buf->store.dmap,
3271 		    buf->store.vaddr, ETH_STORE_SIZE, dpaa2_ni_dmamap_cb,
3272 		    &buf->store.paddr, BUS_DMA_NOWAIT);
3273 		if (error) {
3274 			device_printf(sc->dev, "%s: failed to map channel "
3275 			    "storage\n", __func__);
3276 			return (error);
3277 		}
3278 	}
3279 
3280 	chan->store_sz = ETH_STORE_FRAMES;
3281 	chan->store_idx = 0;
3282 
3283 	return (0);
3284 }
3285 
3286 /**
3287  * @brief Build a DPAA2 frame descriptor.
3288  */
3289 static int
3290 dpaa2_ni_build_fd(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx,
3291     struct dpaa2_buf *buf, bus_dma_segment_t *txsegs, int txnsegs,
3292     struct dpaa2_fd *fd)
3293 {
3294 	struct dpaa2_ni_channel	*chan = tx->fq->chan;
3295 	struct dpaa2_sg_entry *sgt;
3296 	int i, error;
3297 
3298 	KASSERT(txnsegs <= DPAA2_TX_SEGLIMIT, ("%s: too many segments, "
3299 	    "txnsegs (%d) > %d", __func__, txnsegs, DPAA2_TX_SEGLIMIT));
3300 	KASSERT(buf->type == DPAA2_BUF_TX, ("%s: not Tx buffer", __func__));
3301 	KASSERT(buf->tx.sgt_vaddr != NULL, ("%s: S/G table not allocated?",
3302 	    __func__));
3303 
3304 	/* Reset frame descriptor fields. */
3305 	memset(fd, 0, sizeof(*fd));
3306 
3307 	if (__predict_true(txnsegs <= DPAA2_TX_SEGLIMIT)) {
3308 		/* Populate S/G table. */
3309 		sgt = (struct dpaa2_sg_entry *) buf->tx.sgt_vaddr +
3310 		    sc->tx_data_off;
3311 		for (i = 0; i < txnsegs; i++) {
3312 			sgt[i].addr = (uint64_t) txsegs[i].ds_addr;
3313 			sgt[i].len = (uint32_t) txsegs[i].ds_len;
3314 			sgt[i].offset_fmt = 0u;
3315 		}
3316 		sgt[i-1].offset_fmt |= 0x8000u; /* set final entry flag */
3317 
3318 		KASSERT(buf->tx.sgt_paddr == 0, ("%s: sgt_paddr(%#jx) != 0",
3319 		    __func__, buf->tx.sgt_paddr));
3320 
3321 		/* Load S/G table. */
3322 		error = bus_dmamap_load(buf->tx.sgt_dmat, buf->tx.sgt_dmap,
3323 		    buf->tx.sgt_vaddr, DPAA2_TX_SGT_SZ, dpaa2_ni_dmamap_cb,
3324 		    &buf->tx.sgt_paddr, BUS_DMA_NOWAIT);
3325 		if (__predict_false(error != 0)) {
3326 			device_printf(sc->dev, "%s: failed to map S/G table: "
3327 			    "error=%d\n", __func__, error);
3328 			return (error);
3329 		}
3330 		buf->tx.paddr = buf->tx.sgt_paddr;
3331 		buf->tx.vaddr = buf->tx.sgt_vaddr;
3332 		sc->tx_sg_frames++; /* for sysctl(9) */
3333 	} else {
3334 		return (EINVAL);
3335 	}
3336 
3337 	fd->addr =
3338 	    ((uint64_t)(chan->flowid & DPAA2_NI_BUF_CHAN_MASK) <<
3339 		DPAA2_NI_BUF_CHAN_SHIFT) |
3340 	    ((uint64_t)(tx->txid & DPAA2_NI_TX_IDX_MASK) <<
3341 		DPAA2_NI_TX_IDX_SHIFT) |
3342 	    ((uint64_t)(buf->tx.idx & DPAA2_NI_TXBUF_IDX_MASK) <<
3343 		DPAA2_NI_TXBUF_IDX_SHIFT) |
3344 	    (buf->tx.paddr & DPAA2_NI_BUF_ADDR_MASK);
3345 
3346 	fd->data_length = (uint32_t) buf->tx.m->m_pkthdr.len;
3347 	fd->bpid_ivp_bmt = 0;
3348 	fd->offset_fmt_sl = 0x2000u | sc->tx_data_off;
3349 	fd->ctrl = 0x00800000u;
3350 
3351 	return (0);
3352 }
3353 
3354 static int
3355 dpaa2_ni_fd_err(struct dpaa2_fd *fd)
3356 {
3357 	return ((fd->ctrl >> DPAA2_NI_FD_ERR_SHIFT) & DPAA2_NI_FD_ERR_MASK);
3358 }
3359 
3360 static uint32_t
3361 dpaa2_ni_fd_data_len(struct dpaa2_fd *fd)
3362 {
3363 	if (dpaa2_ni_fd_short_len(fd))
3364 		return (fd->data_length & DPAA2_NI_FD_LEN_MASK);
3365 
3366 	return (fd->data_length);
3367 }
3368 
3369 static int
3370 dpaa2_ni_fd_chan_idx(struct dpaa2_fd *fd)
3371 {
3372 	return ((((bus_addr_t) fd->addr) >> DPAA2_NI_BUF_CHAN_SHIFT) &
3373 	    DPAA2_NI_BUF_CHAN_MASK);
3374 }
3375 
3376 static int
3377 dpaa2_ni_fd_buf_idx(struct dpaa2_fd *fd)
3378 {
3379 	return ((((bus_addr_t) fd->addr) >> DPAA2_NI_BUF_IDX_SHIFT) &
3380 	    DPAA2_NI_BUF_IDX_MASK);
3381 }
3382 
3383 static int
3384 dpaa2_ni_fd_tx_idx(struct dpaa2_fd *fd)
3385 {
3386 	return ((((bus_addr_t) fd->addr) >> DPAA2_NI_TX_IDX_SHIFT) &
3387 	    DPAA2_NI_TX_IDX_MASK);
3388 }
3389 
3390 static int
3391 dpaa2_ni_fd_txbuf_idx(struct dpaa2_fd *fd)
3392 {
3393 	return ((((bus_addr_t) fd->addr) >> DPAA2_NI_TXBUF_IDX_SHIFT) &
3394 	    DPAA2_NI_TXBUF_IDX_MASK);
3395 }
3396 
3397 static int
3398 dpaa2_ni_fd_format(struct dpaa2_fd *fd)
3399 {
3400 	return ((enum dpaa2_fd_format)((fd->offset_fmt_sl >>
3401 	    DPAA2_NI_FD_FMT_SHIFT) & DPAA2_NI_FD_FMT_MASK));
3402 }
3403 
3404 static bool
3405 dpaa2_ni_fd_short_len(struct dpaa2_fd *fd)
3406 {
3407 	return (((fd->offset_fmt_sl >> DPAA2_NI_FD_SL_SHIFT)
3408 	    & DPAA2_NI_FD_SL_MASK) == 1);
3409 }
3410 
3411 static int
3412 dpaa2_ni_fd_offset(struct dpaa2_fd *fd)
3413 {
3414 	return (fd->offset_fmt_sl & DPAA2_NI_FD_OFFSET_MASK);
3415 }
3416 
3417 /**
3418  * @brief Collect statistics of the network interface.
3419  */
3420 static int
3421 dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS)
3422 {
3423 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3424 	struct dpni_stat *stat = &dpni_stat_sysctls[oidp->oid_number];
3425 	device_t child = sc->dev;
3426 	uint64_t cnt[DPAA2_NI_STAT_COUNTERS];
3427 	uint64_t result = 0;
3428 	int error;
3429 
3430 	error = DPAA2_CMD_NI_GET_STATISTICS(sc->dev, child,
3431 	    dpaa2_mcp_tk(sc->cmd, sc->ni_token), stat->page, 0, cnt);
3432 	if (!error)
3433 		result = cnt[stat->cnt];
3434 
3435 	return (sysctl_handle_64(oidp, &result, 0, req));
3436 }
3437 
3438 static int
3439 dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS)
3440 {
3441 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3442 	uint32_t buf_num = DPAA2_ATOMIC_READ(&sc->buf_num);
3443 
3444 	return (sysctl_handle_32(oidp, &buf_num, 0, req));
3445 }
3446 
3447 static int
3448 dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS)
3449 {
3450 	struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3451 	uint32_t buf_free = DPAA2_ATOMIC_READ(&sc->buf_free);
3452 
3453 	return (sysctl_handle_32(oidp, &buf_free, 0, req));
3454 }
3455 
3456 static int
3457 dpaa2_ni_set_hash(device_t dev, uint64_t flags)
3458 {
3459 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
3460 	uint64_t key = 0;
3461 	int i;
3462 
3463 	if (!(sc->attr.num.queues > 1)) {
3464 		return (EOPNOTSUPP);
3465 	}
3466 
3467 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3468 		if (dist_fields[i].rxnfc_field & flags) {
3469 			key |= dist_fields[i].id;
3470 		}
3471 	}
3472 
3473 	return (dpaa2_ni_set_dist_key(dev, DPAA2_NI_DIST_MODE_HASH, key));
3474 }
3475 
3476 /**
3477  * @brief Set Rx distribution (hash or flow classification) key flags is a
3478  * combination of RXH_ bits.
3479  */
3480 static int
3481 dpaa2_ni_set_dist_key(device_t dev, enum dpaa2_ni_dist_mode type, uint64_t flags)
3482 {
3483 	device_t child = dev;
3484 	struct dpaa2_ni_softc *sc = device_get_softc(dev);
3485 	struct dpkg_profile_cfg cls_cfg;
3486 	struct dpkg_extract *key;
3487 	struct dpaa2_buf *buf = &sc->rxd_kcfg;
3488 	int i, error = 0;
3489 
3490 	KASSERT(buf->type == DPAA2_BUF_STORE, ("%s: not storage buffer",
3491 	    __func__));
3492 	if (__predict_true(buf->store.dmat == NULL))
3493 		buf->store.dmat = sc->rxd_dmat;
3494 
3495 	memset(&cls_cfg, 0, sizeof(cls_cfg));
3496 
3497 	/* Configure extracts according to the given flags. */
3498 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3499 		key = &cls_cfg.extracts[cls_cfg.num_extracts];
3500 
3501 		if (!(flags & dist_fields[i].id))
3502 			continue;
3503 
3504 		if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
3505 			device_printf(dev, "%s: failed to add key extraction "
3506 			    "rule\n", __func__);
3507 			return (E2BIG);
3508 		}
3509 
3510 		key->type = DPKG_EXTRACT_FROM_HDR;
3511 		key->extract.from_hdr.prot = dist_fields[i].cls_prot;
3512 		key->extract.from_hdr.type = DPKG_FULL_FIELD;
3513 		key->extract.from_hdr.field = dist_fields[i].cls_field;
3514 		cls_cfg.num_extracts++;
3515 	}
3516 
3517 	error = bus_dmamem_alloc(buf->store.dmat, &buf->store.vaddr,
3518 	    BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->store.dmap);
3519 	if (error != 0) {
3520 		device_printf(dev, "%s: failed to allocate a buffer for Rx "
3521 		    "traffic distribution key configuration\n", __func__);
3522 		return (error);
3523 	}
3524 
3525 	error = dpaa2_ni_prepare_key_cfg(&cls_cfg, (uint8_t *) buf->store.vaddr);
3526 	if (error != 0) {
3527 		device_printf(dev, "%s: failed to prepare key configuration: "
3528 		    "error=%d\n", __func__, error);
3529 		return (error);
3530 	}
3531 
3532 	/* Prepare for setting the Rx dist. */
3533 	error = bus_dmamap_load(buf->store.dmat, buf->store.dmap,
3534 	    buf->store.vaddr, DPAA2_CLASSIFIER_DMA_SIZE, dpaa2_ni_dmamap_cb,
3535 	    &buf->store.paddr, BUS_DMA_NOWAIT);
3536 	if (error != 0) {
3537 		device_printf(sc->dev, "%s: failed to map a buffer for Rx "
3538 		    "traffic distribution key configuration\n", __func__);
3539 		return (error);
3540 	}
3541 
3542 	if (type == DPAA2_NI_DIST_MODE_HASH) {
3543 		error = DPAA2_CMD_NI_SET_RX_TC_DIST(dev, child, dpaa2_mcp_tk(
3544 		    sc->cmd, sc->ni_token), sc->attr.num.queues, 0,
3545 		    DPAA2_NI_DIST_MODE_HASH, buf->store.paddr);
3546 		if (error != 0)
3547 			device_printf(dev, "%s: failed to set distribution mode "
3548 			    "and size for the traffic class\n", __func__);
3549 	}
3550 
3551 	return (error);
3552 }
3553 
3554 /**
3555  * @brief Prepares extract parameters.
3556  *
3557  * cfg:		Defining a full Key Generation profile.
3558  * key_cfg_buf:	Zeroed 256 bytes of memory before mapping it to DMA.
3559  */
3560 static int
3561 dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf)
3562 {
3563 	struct dpni_ext_set_rx_tc_dist *dpni_ext;
3564 	struct dpni_dist_extract *extr;
3565 	int i, j;
3566 
3567 	if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
3568 		return (EINVAL);
3569 
3570 	dpni_ext = (struct dpni_ext_set_rx_tc_dist *) key_cfg_buf;
3571 	dpni_ext->num_extracts = cfg->num_extracts;
3572 
3573 	for (i = 0; i < cfg->num_extracts; i++) {
3574 		extr = &dpni_ext->extracts[i];
3575 
3576 		switch (cfg->extracts[i].type) {
3577 		case DPKG_EXTRACT_FROM_HDR:
3578 			extr->prot = cfg->extracts[i].extract.from_hdr.prot;
3579 			extr->efh_type =
3580 			    cfg->extracts[i].extract.from_hdr.type & 0x0Fu;
3581 			extr->size = cfg->extracts[i].extract.from_hdr.size;
3582 			extr->offset = cfg->extracts[i].extract.from_hdr.offset;
3583 			extr->field = cfg->extracts[i].extract.from_hdr.field;
3584 			extr->hdr_index =
3585 				cfg->extracts[i].extract.from_hdr.hdr_index;
3586 			break;
3587 		case DPKG_EXTRACT_FROM_DATA:
3588 			extr->size = cfg->extracts[i].extract.from_data.size;
3589 			extr->offset =
3590 				cfg->extracts[i].extract.from_data.offset;
3591 			break;
3592 		case DPKG_EXTRACT_FROM_PARSE:
3593 			extr->size = cfg->extracts[i].extract.from_parse.size;
3594 			extr->offset =
3595 				cfg->extracts[i].extract.from_parse.offset;
3596 			break;
3597 		default:
3598 			return (EINVAL);
3599 		}
3600 
3601 		extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
3602 		extr->extract_type = cfg->extracts[i].type & 0x0Fu;
3603 
3604 		for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
3605 			extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
3606 			extr->masks[j].offset =
3607 				cfg->extracts[i].masks[j].offset;
3608 		}
3609 	}
3610 
3611 	return (0);
3612 }
3613 
3614 /**
3615  * @brief Obtain the next dequeue response from the channel storage.
3616  */
3617 static int
3618 dpaa2_ni_chan_storage_next(struct dpaa2_ni_channel *chan, struct dpaa2_dq **dq)
3619 {
3620 	struct dpaa2_buf *buf = &chan->store;
3621 	struct dpaa2_dq *msgs = buf->store.vaddr;
3622 	struct dpaa2_dq *msg = &msgs[chan->store_idx];
3623 	int rc = EINPROGRESS;
3624 
3625 	chan->store_idx++;
3626 
3627 	if (msg->fdr.desc.stat & DPAA2_DQ_STAT_EXPIRED) {
3628 		rc = EALREADY; /* VDQ command is expired */
3629 		chan->store_idx = 0;
3630 		if (!(msg->fdr.desc.stat & DPAA2_DQ_STAT_VALIDFRAME))
3631 			msg = NULL; /* Null response, FD is invalid */
3632 	}
3633 	if (msg->fdr.desc.stat & DPAA2_DQ_STAT_FQEMPTY) {
3634 		rc = ENOENT; /* FQ is empty */
3635 		chan->store_idx = 0;
3636 	}
3637 
3638 	if (dq != NULL)
3639 		*dq = msg;
3640 
3641 	return (rc);
3642 }
3643 
3644 static device_method_t dpaa2_ni_methods[] = {
3645 	/* Device interface */
3646 	DEVMETHOD(device_probe,		dpaa2_ni_probe),
3647 	DEVMETHOD(device_attach,	dpaa2_ni_attach),
3648 	DEVMETHOD(device_detach,	dpaa2_ni_detach),
3649 
3650 	/* mii via memac_mdio */
3651 	DEVMETHOD(miibus_statchg,	dpaa2_ni_miibus_statchg),
3652 
3653 	DEVMETHOD_END
3654 };
3655 
3656 static driver_t dpaa2_ni_driver = {
3657 	"dpaa2_ni",
3658 	dpaa2_ni_methods,
3659 	sizeof(struct dpaa2_ni_softc),
3660 };
3661 
3662 DRIVER_MODULE(miibus, dpaa2_ni, miibus_driver, 0, 0);
3663 DRIVER_MODULE(dpaa2_ni, dpaa2_rc, dpaa2_ni_driver, 0, 0);
3664 
3665 MODULE_DEPEND(dpaa2_ni, miibus, 1, 1, 1);
3666 #ifdef DEV_ACPI
3667 MODULE_DEPEND(dpaa2_ni, memac_mdio_acpi, 1, 1, 1);
3668 #endif
3669 #ifdef FDT
3670 MODULE_DEPEND(dpaa2_ni, memac_mdio_fdt, 1, 1, 1);
3671 #endif
3672