1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright © 2021-2023 Dmitry Salychev
5 * Copyright © 2022 Mathew McBride
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 /*
31 * The DPAA2 Network Interface (DPNI) driver.
32 *
33 * The DPNI object is a network interface that is configurable to support a wide
34 * range of features from a very basic Ethernet interface up to a
35 * high-functioning network interface. The DPNI supports features that are
36 * expected by standard network stacks, from basic features to offloads.
37 *
38 * DPNIs work with Ethernet traffic, starting with the L2 header. Additional
39 * functions are provided for standard network protocols (L2, L3, L4, etc.).
40 */
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/bus.h>
46 #include <sys/rman.h>
47 #include <sys/module.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <sys/mbuf.h>
54 #include <sys/taskqueue.h>
55 #include <sys/sysctl.h>
56 #include <sys/buf_ring.h>
57 #include <sys/smp.h>
58 #include <sys/proc.h>
59
60 #include <vm/vm.h>
61 #include <vm/pmap.h>
62
63 #include <machine/bus.h>
64 #include <machine/resource.h>
65 #include <machine/atomic.h>
66 #include <machine/vmparam.h>
67
68 #include <net/ethernet.h>
69 #include <net/bpf.h>
70 #include <net/if.h>
71 #include <net/if_dl.h>
72 #include <net/if_media.h>
73 #include <net/if_types.h>
74 #include <net/if_var.h>
75
76 #include <dev/pci/pcivar.h>
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
79 #include <dev/mdio/mdio.h>
80
81 #include "opt_acpi.h"
82 #include "opt_platform.h"
83
84 #include "pcib_if.h"
85 #include "pci_if.h"
86 #include "miibus_if.h"
87 #include "memac_mdio_if.h"
88
89 #include "dpaa2_types.h"
90 #include "dpaa2_mc.h"
91 #include "dpaa2_mc_if.h"
92 #include "dpaa2_mcp.h"
93 #include "dpaa2_swp.h"
94 #include "dpaa2_swp_if.h"
95 #include "dpaa2_cmd_if.h"
96 #include "dpaa2_ni.h"
97 #include "dpaa2_channel.h"
98 #include "dpaa2_buf.h"
99
100 #define BIT(x) (1ul << (x))
101 #define WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0)
102 #define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
103
104 /* Frame Dequeue Response status bits. */
105 #define IS_NULL_RESPONSE(stat) ((((stat) >> 4) & 1) == 0)
106
107 #define ALIGN_UP(x, y) roundup2((x), (y))
108 #define ALIGN_DOWN(x, y) rounddown2((x), (y))
109 #define CACHE_LINE_ALIGN(x) ALIGN_UP((x), CACHE_LINE_SIZE)
110
111 #define DPNI_LOCK(__sc) do { \
112 mtx_assert(&(__sc)->lock, MA_NOTOWNED); \
113 mtx_lock(&(__sc)->lock); \
114 } while (0)
115 #define DPNI_UNLOCK(__sc) do { \
116 mtx_assert(&(__sc)->lock, MA_OWNED); \
117 mtx_unlock(&(__sc)->lock); \
118 } while (0)
119 #define DPNI_LOCK_ASSERT(__sc) do { \
120 mtx_assert(&(__sc)->lock, MA_OWNED); \
121 } while (0)
122
123 #define DPAA2_TX_RING(sc, chan, tc) \
124 (&(sc)->channels[(chan)]->txc_queue.tx_rings[(tc)])
125
126 MALLOC_DEFINE(M_DPAA2_TXB, "dpaa2_txb", "DPAA2 DMA-mapped buffer (Tx)");
127
128 /*
129 * How many times channel cleanup routine will be repeated if the RX or TX
130 * budget was depleted.
131 */
132 #define DPAA2_CLEAN_BUDGET 64 /* sysctl(9)? */
133 /* TX/RX budget for the channel cleanup task */
134 #define DPAA2_TX_BUDGET 128 /* sysctl(9)? */
135 #define DPAA2_RX_BUDGET 256 /* sysctl(9)? */
136
137 #define DPNI_IRQ_INDEX 0 /* Index of the only DPNI IRQ. */
138 #define DPNI_IRQ_LINK_CHANGED 1 /* Link state changed */
139 #define DPNI_IRQ_EP_CHANGED 2 /* DPAA2 endpoint dis/connected */
140
141 /* Default maximum RX frame length w/o CRC. */
142 #define DPAA2_ETH_MFL (ETHER_MAX_LEN_JUMBO + ETHER_VLAN_ENCAP_LEN - \
143 ETHER_CRC_LEN)
144
145 /* Minimally supported version of the DPNI API. */
146 #define DPNI_VER_MAJOR 7
147 #define DPNI_VER_MINOR 0
148
149 /* Rx/Tx buffers configuration. */
150 #define BUF_ALIGN_V1 256 /* WRIOP v1.0.0 limitation */
151 #define BUF_ALIGN 64
152 #define BUF_SWA_SIZE 64 /* SW annotation size */
153 #define BUF_RX_HWA_SIZE 64 /* HW annotation size */
154 #define BUF_TX_HWA_SIZE 128 /* HW annotation size */
155
156 #define DPAA2_RX_BUFRING_SZ (4096u)
157 #define DPAA2_RXE_BUFRING_SZ (1024u)
158 #define DPAA2_TXC_BUFRING_SZ (4096u)
159 #define DPAA2_TX_SEGLIMIT (16u) /* arbitrary number */
160 #define DPAA2_TX_SEG_SZ (PAGE_SIZE)
161 #define DPAA2_TX_SEGS_MAXSZ (DPAA2_TX_SEGLIMIT * DPAA2_TX_SEG_SZ)
162 #define DPAA2_TX_SGT_SZ (PAGE_SIZE) /* bytes */
163
164 /* Size of a buffer to keep a QoS table key configuration. */
165 #define ETH_QOS_KCFG_BUF_SIZE (PAGE_SIZE)
166
167 /* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */
168 #define DPAA2_CLASSIFIER_DMA_SIZE (PAGE_SIZE)
169
170 /* Buffers layout options. */
171 #define BUF_LOPT_TIMESTAMP 0x1
172 #define BUF_LOPT_PARSER_RESULT 0x2
173 #define BUF_LOPT_FRAME_STATUS 0x4
174 #define BUF_LOPT_PRIV_DATA_SZ 0x8
175 #define BUF_LOPT_DATA_ALIGN 0x10
176 #define BUF_LOPT_DATA_HEAD_ROOM 0x20
177 #define BUF_LOPT_DATA_TAIL_ROOM 0x40
178
179 #define DPAA2_NI_BUF_ADDR_MASK (0x1FFFFFFFFFFFFul) /* 49-bit addresses max. */
180 #define DPAA2_NI_BUF_CHAN_MASK (0xFu)
181 #define DPAA2_NI_BUF_CHAN_SHIFT (60)
182 #define DPAA2_NI_BUF_IDX_MASK (0x7FFFu)
183 #define DPAA2_NI_BUF_IDX_SHIFT (49)
184 #define DPAA2_NI_TX_IDX_MASK (0x7u)
185 #define DPAA2_NI_TX_IDX_SHIFT (57)
186 #define DPAA2_NI_TXBUF_IDX_MASK (0xFFu)
187 #define DPAA2_NI_TXBUF_IDX_SHIFT (49)
188
189 #define DPAA2_NI_FD_FMT_MASK (0x3u)
190 #define DPAA2_NI_FD_FMT_SHIFT (12)
191 #define DPAA2_NI_FD_ERR_MASK (0xFFu)
192 #define DPAA2_NI_FD_ERR_SHIFT (0)
193 #define DPAA2_NI_FD_SL_MASK (0x1u)
194 #define DPAA2_NI_FD_SL_SHIFT (14)
195 #define DPAA2_NI_FD_LEN_MASK (0x3FFFFu)
196 #define DPAA2_NI_FD_OFFSET_MASK (0x0FFFu)
197
198 /* Enables TCAM for Flow Steering and QoS look-ups. */
199 #define DPNI_OPT_HAS_KEY_MASKING 0x10
200
201 /* Unique IDs for the supported Rx classification header fields. */
202 #define DPAA2_ETH_DIST_ETHDST BIT(0)
203 #define DPAA2_ETH_DIST_ETHSRC BIT(1)
204 #define DPAA2_ETH_DIST_ETHTYPE BIT(2)
205 #define DPAA2_ETH_DIST_VLAN BIT(3)
206 #define DPAA2_ETH_DIST_IPSRC BIT(4)
207 #define DPAA2_ETH_DIST_IPDST BIT(5)
208 #define DPAA2_ETH_DIST_IPPROTO BIT(6)
209 #define DPAA2_ETH_DIST_L4SRC BIT(7)
210 #define DPAA2_ETH_DIST_L4DST BIT(8)
211 #define DPAA2_ETH_DIST_ALL (~0ULL)
212
213 /* L3-L4 network traffic flow hash options. */
214 #define RXH_L2DA (1 << 1)
215 #define RXH_VLAN (1 << 2)
216 #define RXH_L3_PROTO (1 << 3)
217 #define RXH_IP_SRC (1 << 4)
218 #define RXH_IP_DST (1 << 5)
219 #define RXH_L4_B_0_1 (1 << 6) /* src port in case of TCP/UDP/SCTP */
220 #define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */
221 #define RXH_DISCARD (1 << 31)
222
223 /* Default Rx hash options, set during attaching. */
224 #define DPAA2_RXH_DEFAULT (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
225
226 MALLOC_DEFINE(M_DPAA2_NI, "dpaa2_ni", "DPAA2 Network Interface");
227
228 /*
229 * DPAA2 Network Interface resource specification.
230 *
231 * NOTE: Don't forget to update macros in dpaa2_ni.h in case of any changes in
232 * the specification!
233 */
234 struct resource_spec dpaa2_ni_spec[] = {
235 /*
236 * DPMCP resources.
237 *
238 * NOTE: MC command portals (MCPs) are used to send commands to, and
239 * receive responses from, the MC firmware. One portal per DPNI.
240 */
241 { DPAA2_DEV_MCP, DPAA2_NI_MCP_RID(0), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
242 /*
243 * DPIO resources (software portals).
244 *
245 * NOTE: One per running core. While DPIOs are the source of data
246 * availability interrupts, the DPCONs are used to identify the
247 * network interface that has produced ingress data to that core.
248 */
249 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(0), RF_ACTIVE | RF_SHAREABLE },
250 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(1), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
251 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(2), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
252 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(3), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
253 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(4), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
254 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(5), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
255 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(6), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
256 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(7), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
257 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(8), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
258 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(9), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
259 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(10), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
260 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(11), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
261 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(12), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
262 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(13), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
263 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(14), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
264 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(15), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
265 /*
266 * DPBP resources (buffer pools).
267 *
268 * NOTE: One per network interface.
269 */
270 { DPAA2_DEV_BP, DPAA2_NI_BP_RID(0), RF_ACTIVE },
271 /*
272 * DPCON resources (channels).
273 *
274 * NOTE: One DPCON per core where Rx or Tx confirmation traffic to be
275 * distributed to.
276 * NOTE: Since it is necessary to distinguish between traffic from
277 * different network interfaces arriving on the same core, the
278 * DPCONs must be private to the DPNIs.
279 */
280 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(0), RF_ACTIVE },
281 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(1), RF_ACTIVE | RF_OPTIONAL },
282 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(2), RF_ACTIVE | RF_OPTIONAL },
283 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(3), RF_ACTIVE | RF_OPTIONAL },
284 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(4), RF_ACTIVE | RF_OPTIONAL },
285 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(5), RF_ACTIVE | RF_OPTIONAL },
286 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(6), RF_ACTIVE | RF_OPTIONAL },
287 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(7), RF_ACTIVE | RF_OPTIONAL },
288 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(8), RF_ACTIVE | RF_OPTIONAL },
289 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(9), RF_ACTIVE | RF_OPTIONAL },
290 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(10), RF_ACTIVE | RF_OPTIONAL },
291 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(11), RF_ACTIVE | RF_OPTIONAL },
292 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(12), RF_ACTIVE | RF_OPTIONAL },
293 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(13), RF_ACTIVE | RF_OPTIONAL },
294 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(14), RF_ACTIVE | RF_OPTIONAL },
295 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(15), RF_ACTIVE | RF_OPTIONAL },
296
297 RESOURCE_SPEC_END
298 };
299
300 /* Supported header fields for Rx hash distribution key */
301 static const struct dpaa2_eth_dist_fields dist_fields[] = {
302 {
303 /* L2 header */
304 .rxnfc_field = RXH_L2DA,
305 .cls_prot = NET_PROT_ETH,
306 .cls_field = NH_FLD_ETH_DA,
307 .id = DPAA2_ETH_DIST_ETHDST,
308 .size = 6,
309 }, {
310 .cls_prot = NET_PROT_ETH,
311 .cls_field = NH_FLD_ETH_SA,
312 .id = DPAA2_ETH_DIST_ETHSRC,
313 .size = 6,
314 }, {
315 /* This is the last ethertype field parsed:
316 * depending on frame format, it can be the MAC ethertype
317 * or the VLAN etype.
318 */
319 .cls_prot = NET_PROT_ETH,
320 .cls_field = NH_FLD_ETH_TYPE,
321 .id = DPAA2_ETH_DIST_ETHTYPE,
322 .size = 2,
323 }, {
324 /* VLAN header */
325 .rxnfc_field = RXH_VLAN,
326 .cls_prot = NET_PROT_VLAN,
327 .cls_field = NH_FLD_VLAN_TCI,
328 .id = DPAA2_ETH_DIST_VLAN,
329 .size = 2,
330 }, {
331 /* IP header */
332 .rxnfc_field = RXH_IP_SRC,
333 .cls_prot = NET_PROT_IP,
334 .cls_field = NH_FLD_IP_SRC,
335 .id = DPAA2_ETH_DIST_IPSRC,
336 .size = 4,
337 }, {
338 .rxnfc_field = RXH_IP_DST,
339 .cls_prot = NET_PROT_IP,
340 .cls_field = NH_FLD_IP_DST,
341 .id = DPAA2_ETH_DIST_IPDST,
342 .size = 4,
343 }, {
344 .rxnfc_field = RXH_L3_PROTO,
345 .cls_prot = NET_PROT_IP,
346 .cls_field = NH_FLD_IP_PROTO,
347 .id = DPAA2_ETH_DIST_IPPROTO,
348 .size = 1,
349 }, {
350 /* Using UDP ports, this is functionally equivalent to raw
351 * byte pairs from L4 header.
352 */
353 .rxnfc_field = RXH_L4_B_0_1,
354 .cls_prot = NET_PROT_UDP,
355 .cls_field = NH_FLD_UDP_PORT_SRC,
356 .id = DPAA2_ETH_DIST_L4SRC,
357 .size = 2,
358 }, {
359 .rxnfc_field = RXH_L4_B_2_3,
360 .cls_prot = NET_PROT_UDP,
361 .cls_field = NH_FLD_UDP_PORT_DST,
362 .id = DPAA2_ETH_DIST_L4DST,
363 .size = 2,
364 },
365 };
366
367 static struct dpni_stat {
368 int page;
369 int cnt;
370 char *name;
371 char *desc;
372 } dpni_stat_sysctls[DPAA2_NI_STAT_SYSCTLS] = {
373 /* PAGE, COUNTER, NAME, DESCRIPTION */
374 { 0, 0, "in_all_frames", "All accepted ingress frames" },
375 { 0, 1, "in_all_bytes", "Bytes in all accepted ingress frames" },
376 { 0, 2, "in_multi_frames", "Multicast accepted ingress frames" },
377 { 1, 0, "eg_all_frames", "All egress frames transmitted" },
378 { 1, 1, "eg_all_bytes", "Bytes in all frames transmitted" },
379 { 1, 2, "eg_multi_frames", "Multicast egress frames transmitted" },
380 { 2, 0, "in_filtered_frames", "All ingress frames discarded due to "
381 "filtering" },
382 { 2, 1, "in_discarded_frames", "All frames discarded due to errors" },
383 { 2, 2, "in_nobuf_discards", "Discards on ingress side due to buffer "
384 "depletion in DPNI buffer pools" },
385 };
386
387 struct dpaa2_ni_rx_ctx {
388 struct mbuf *head;
389 struct mbuf *tail;
390 int cnt;
391 bool last;
392 };
393
394 /* Device interface */
395 static int dpaa2_ni_probe(device_t);
396 static int dpaa2_ni_attach(device_t);
397 static int dpaa2_ni_detach(device_t);
398
399 /* DPAA2 network interface setup and configuration */
400 static int dpaa2_ni_setup(device_t);
401 static int dpaa2_ni_setup_channels(device_t);
402 static int dpaa2_ni_bind(device_t);
403 static int dpaa2_ni_setup_rx_dist(device_t);
404 static int dpaa2_ni_setup_irqs(device_t);
405 static int dpaa2_ni_setup_msi(struct dpaa2_ni_softc *);
406 static int dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *);
407 static int dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *);
408 static int dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *);
409 static int dpaa2_ni_setup_dma(struct dpaa2_ni_softc *);
410
411 /* Tx/Rx flow configuration */
412 static int dpaa2_ni_setup_rx_flow(device_t, struct dpaa2_ni_fq *);
413 static int dpaa2_ni_setup_tx_flow(device_t, struct dpaa2_ni_fq *);
414 static int dpaa2_ni_setup_rx_err_flow(device_t, struct dpaa2_ni_fq *);
415
416 /* Configuration subroutines */
417 static int dpaa2_ni_set_buf_layout(device_t);
418 static int dpaa2_ni_set_pause_frame(device_t);
419 static int dpaa2_ni_set_qos_table(device_t);
420 static int dpaa2_ni_set_mac_addr(device_t);
421 static int dpaa2_ni_set_hash(device_t, uint64_t);
422 static int dpaa2_ni_set_dist_key(device_t, enum dpaa2_ni_dist_mode, uint64_t);
423
424 /* Frame descriptor routines */
425 static int dpaa2_ni_build_fd(struct dpaa2_ni_softc *, struct dpaa2_ni_tx_ring *,
426 struct dpaa2_buf *, bus_dma_segment_t *, int, struct dpaa2_fd *);
427 static int dpaa2_ni_fd_err(struct dpaa2_fd *);
428 static uint32_t dpaa2_ni_fd_data_len(struct dpaa2_fd *);
429 static int dpaa2_ni_fd_format(struct dpaa2_fd *);
430 static bool dpaa2_ni_fd_short_len(struct dpaa2_fd *);
431 static int dpaa2_ni_fd_offset(struct dpaa2_fd *);
432
433 /* Various subroutines */
434 static int dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *, uint16_t, uint16_t);
435 static int dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *, uint8_t *);
436
437 /* Network interface routines */
438 static void dpaa2_ni_init(void *);
439 static int dpaa2_ni_transmit(if_t , struct mbuf *);
440 static void dpaa2_ni_qflush(if_t );
441 static int dpaa2_ni_ioctl(if_t , u_long, caddr_t);
442 static int dpaa2_ni_update_mac_filters(if_t );
443 static u_int dpaa2_ni_add_maddr(void *, struct sockaddr_dl *, u_int);
444
445 /* Interrupt handlers */
446 static void dpaa2_ni_intr(void *);
447
448 /* MII handlers */
449 static void dpaa2_ni_miibus_statchg(device_t);
450 static int dpaa2_ni_media_change(if_t );
451 static void dpaa2_ni_media_status(if_t , struct ifmediareq *);
452 static void dpaa2_ni_media_tick(void *);
453
454 /* Tx/Rx routines. */
455 static int dpaa2_ni_rx_cleanup(struct dpaa2_channel *);
456 static int dpaa2_ni_tx_cleanup(struct dpaa2_channel *);
457 static void dpaa2_ni_tx(struct dpaa2_ni_softc *, struct dpaa2_channel *,
458 struct dpaa2_ni_tx_ring *, struct mbuf *);
459 static void dpaa2_ni_cleanup_task(void *, int);
460
461 /* Tx/Rx subroutines */
462 static int dpaa2_ni_consume_frames(struct dpaa2_channel *, struct dpaa2_ni_fq **,
463 uint32_t *);
464 static int dpaa2_ni_rx(struct dpaa2_channel *, struct dpaa2_ni_fq *,
465 struct dpaa2_fd *, struct dpaa2_ni_rx_ctx *);
466 static int dpaa2_ni_rx_err(struct dpaa2_channel *, struct dpaa2_ni_fq *,
467 struct dpaa2_fd *);
468 static int dpaa2_ni_tx_conf(struct dpaa2_channel *, struct dpaa2_ni_fq *,
469 struct dpaa2_fd *);
470
471 /* sysctl(9) */
472 static int dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS);
473 static int dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS);
474 static int dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS);
475
476 static int
dpaa2_ni_probe(device_t dev)477 dpaa2_ni_probe(device_t dev)
478 {
479 /* DPNI device will be added by a parent resource container itself. */
480 device_set_desc(dev, "DPAA2 Network Interface");
481 return (BUS_PROBE_DEFAULT);
482 }
483
484 static int
dpaa2_ni_attach(device_t dev)485 dpaa2_ni_attach(device_t dev)
486 {
487 device_t pdev = device_get_parent(dev);
488 device_t child = dev;
489 device_t mcp_dev;
490 struct dpaa2_ni_softc *sc = device_get_softc(dev);
491 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
492 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
493 struct dpaa2_devinfo *mcp_dinfo;
494 struct dpaa2_cmd cmd;
495 uint16_t rc_token, ni_token;
496 if_t ifp;
497 char tq_name[32];
498 int error;
499
500 sc->dev = dev;
501 sc->ifp = NULL;
502 sc->miibus = NULL;
503 sc->mii = NULL;
504 sc->media_status = 0;
505 sc->if_flags = 0;
506 sc->link_state = LINK_STATE_UNKNOWN;
507 sc->buf_align = 0;
508
509 /* For debug purposes only! */
510 sc->rx_anomaly_frames = 0;
511 sc->rx_single_buf_frames = 0;
512 sc->rx_sg_buf_frames = 0;
513 sc->rx_enq_rej_frames = 0;
514 sc->rx_ieoi_err_frames = 0;
515 sc->tx_single_buf_frames = 0;
516 sc->tx_sg_frames = 0;
517
518 DPAA2_ATOMIC_XCHG(&sc->buf_num, 0);
519 DPAA2_ATOMIC_XCHG(&sc->buf_free, 0);
520
521 sc->rxd_dmat = NULL;
522 sc->qos_dmat = NULL;
523
524 sc->qos_kcfg.dmap = NULL;
525 sc->qos_kcfg.paddr = 0;
526 sc->qos_kcfg.vaddr = NULL;
527
528 sc->rxd_kcfg.dmap = NULL;
529 sc->rxd_kcfg.paddr = 0;
530 sc->rxd_kcfg.vaddr = NULL;
531
532 sc->mac.dpmac_id = 0;
533 sc->mac.phy_dev = NULL;
534 memset(sc->mac.addr, 0, ETHER_ADDR_LEN);
535
536 error = bus_alloc_resources(sc->dev, dpaa2_ni_spec, sc->res);
537 if (error) {
538 device_printf(dev, "%s: failed to allocate resources: "
539 "error=%d\n", __func__, error);
540 goto err_exit;
541 }
542
543 /* Obtain MC portal. */
544 mcp_dev = (device_t) rman_get_start(sc->res[DPAA2_NI_MCP_RID(0)]);
545 mcp_dinfo = device_get_ivars(mcp_dev);
546 dinfo->portal = mcp_dinfo->portal;
547
548 mtx_init(&sc->lock, device_get_nameunit(dev), "dpaa2_ni", MTX_DEF);
549
550 /* Allocate network interface */
551 ifp = if_alloc(IFT_ETHER);
552 sc->ifp = ifp;
553 if_initname(ifp, DPAA2_NI_IFNAME, device_get_unit(sc->dev));
554
555 if_setsoftc(ifp, sc);
556 if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST);
557 if_setinitfn(ifp, dpaa2_ni_init);
558 if_setioctlfn(ifp, dpaa2_ni_ioctl);
559 if_settransmitfn(ifp, dpaa2_ni_transmit);
560 if_setqflushfn(ifp, dpaa2_ni_qflush);
561
562 if_setcapabilities(ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM |
563 IFCAP_HWCSUM_IPV6 | IFCAP_JUMBO_MTU);
564 if_setcapenable(ifp, if_getcapabilities(ifp));
565
566 DPAA2_CMD_INIT(&cmd);
567
568 /* Open resource container and network interface object. */
569 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
570 if (error) {
571 device_printf(dev, "%s: failed to open resource container: "
572 "id=%d, error=%d\n", __func__, rcinfo->id, error);
573 goto err_exit;
574 }
575 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
576 if (error) {
577 device_printf(dev, "%s: failed to open network interface: "
578 "id=%d, error=%d\n", __func__, dinfo->id, error);
579 goto close_rc;
580 }
581
582 bzero(tq_name, sizeof(tq_name));
583 snprintf(tq_name, sizeof(tq_name), "%s_tqbp", device_get_nameunit(dev));
584
585 /*
586 * XXX-DSL: Release new buffers on Buffer Pool State Change Notification
587 * (BPSCN) returned as a result to the VDQ command instead.
588 * It is similar to CDAN processed in dpaa2_io_intr().
589 */
590 /* Create a taskqueue thread to release new buffers to the pool. */
591 sc->bp_taskq = taskqueue_create(tq_name, M_WAITOK,
592 taskqueue_thread_enqueue, &sc->bp_taskq);
593 taskqueue_start_threads(&sc->bp_taskq, 1, PI_NET, "%s", tq_name);
594
595 /* sc->cleanup_taskq = taskqueue_create("dpaa2_ch cleanup", M_WAITOK, */
596 /* taskqueue_thread_enqueue, &sc->cleanup_taskq); */
597 /* taskqueue_start_threads(&sc->cleanup_taskq, 1, PI_NET, */
598 /* "dpaa2_ch cleanup"); */
599
600 error = dpaa2_ni_setup(dev);
601 if (error) {
602 device_printf(dev, "%s: failed to setup DPNI: error=%d\n",
603 __func__, error);
604 goto close_ni;
605 }
606 error = dpaa2_ni_setup_channels(dev);
607 if (error) {
608 device_printf(dev, "%s: failed to setup QBMan channels: "
609 "error=%d\n", __func__, error);
610 goto close_ni;
611 }
612
613 error = dpaa2_ni_bind(dev);
614 if (error) {
615 device_printf(dev, "%s: failed to bind DPNI: error=%d\n",
616 __func__, error);
617 goto close_ni;
618 }
619 error = dpaa2_ni_setup_irqs(dev);
620 if (error) {
621 device_printf(dev, "%s: failed to setup IRQs: error=%d\n",
622 __func__, error);
623 goto close_ni;
624 }
625 error = dpaa2_ni_setup_sysctls(sc);
626 if (error) {
627 device_printf(dev, "%s: failed to setup sysctls: error=%d\n",
628 __func__, error);
629 goto close_ni;
630 }
631 error = dpaa2_ni_setup_if_caps(sc);
632 if (error) {
633 device_printf(dev, "%s: failed to setup interface capabilities: "
634 "error=%d\n", __func__, error);
635 goto close_ni;
636 }
637
638 ether_ifattach(sc->ifp, sc->mac.addr);
639 callout_init(&sc->mii_callout, 0);
640
641 return (0);
642
643 close_ni:
644 DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
645 close_rc:
646 DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
647 err_exit:
648 return (ENXIO);
649 }
650
651 static void
dpaa2_ni_fixed_media_status(if_t ifp,struct ifmediareq * ifmr)652 dpaa2_ni_fixed_media_status(if_t ifp, struct ifmediareq* ifmr)
653 {
654 struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
655
656 DPNI_LOCK(sc);
657 ifmr->ifm_count = 0;
658 ifmr->ifm_mask = 0;
659 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
660 ifmr->ifm_current = ifmr->ifm_active =
661 sc->fixed_ifmedia.ifm_cur->ifm_media;
662
663 /*
664 * In non-PHY usecases, we need to signal link state up, otherwise
665 * certain things requiring a link event (e.g async DHCP client) from
666 * devd do not happen.
667 */
668 if (if_getlinkstate(ifp) == LINK_STATE_UNKNOWN) {
669 if_link_state_change(ifp, LINK_STATE_UP);
670 }
671
672 /*
673 * TODO: Check the status of the link partner (DPMAC, DPNI or other) and
674 * reset if down. This is different to the DPAA2_MAC_LINK_TYPE_PHY as
675 * the MC firmware sets the status, instead of us telling the MC what
676 * it is.
677 */
678 DPNI_UNLOCK(sc);
679
680 return;
681 }
682
683 static void
dpaa2_ni_setup_fixed_link(struct dpaa2_ni_softc * sc)684 dpaa2_ni_setup_fixed_link(struct dpaa2_ni_softc *sc)
685 {
686 /*
687 * FIXME: When the DPNI is connected to a DPMAC, we can get the
688 * 'apparent' speed from it.
689 */
690 sc->fixed_link = true;
691
692 ifmedia_init(&sc->fixed_ifmedia, 0, dpaa2_ni_media_change,
693 dpaa2_ni_fixed_media_status);
694 ifmedia_add(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL);
695 ifmedia_set(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T);
696 }
697
698 static int
dpaa2_ni_detach(device_t dev)699 dpaa2_ni_detach(device_t dev)
700 {
701 /* TBD */
702 return (0);
703 }
704
705 /**
706 * @brief Configure DPAA2 network interface object.
707 */
708 static int
dpaa2_ni_setup(device_t dev)709 dpaa2_ni_setup(device_t dev)
710 {
711 device_t pdev = device_get_parent(dev);
712 device_t child = dev;
713 struct dpaa2_ni_softc *sc = device_get_softc(dev);
714 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
715 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
716 struct dpaa2_ep_desc ep1_desc, ep2_desc; /* endpoint descriptors */
717 struct dpaa2_cmd cmd;
718 uint8_t eth_bca[ETHER_ADDR_LEN]; /* broadcast physical address */
719 uint16_t rc_token, ni_token, mac_token;
720 struct dpaa2_mac_attr attr;
721 enum dpaa2_mac_link_type link_type;
722 uint32_t link;
723 int error;
724
725 DPAA2_CMD_INIT(&cmd);
726
727 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
728 if (error) {
729 device_printf(dev, "%s: failed to open resource container: "
730 "id=%d, error=%d\n", __func__, rcinfo->id, error);
731 goto err_exit;
732 }
733 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
734 if (error) {
735 device_printf(dev, "%s: failed to open network interface: "
736 "id=%d, error=%d\n", __func__, dinfo->id, error);
737 goto close_rc;
738 }
739
740 /* Check if we can work with this DPNI object. */
741 error = DPAA2_CMD_NI_GET_API_VERSION(dev, child, &cmd, &sc->api_major,
742 &sc->api_minor);
743 if (error) {
744 device_printf(dev, "%s: failed to get DPNI API version\n",
745 __func__);
746 goto close_ni;
747 }
748 if (dpaa2_ni_cmp_api_version(sc, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
749 device_printf(dev, "%s: DPNI API version %u.%u not supported, "
750 "need >= %u.%u\n", __func__, sc->api_major, sc->api_minor,
751 DPNI_VER_MAJOR, DPNI_VER_MINOR);
752 error = ENODEV;
753 goto close_ni;
754 }
755
756 /* Reset the DPNI object. */
757 error = DPAA2_CMD_NI_RESET(dev, child, &cmd);
758 if (error) {
759 device_printf(dev, "%s: failed to reset DPNI: id=%d\n",
760 __func__, dinfo->id);
761 goto close_ni;
762 }
763
764 /* Obtain attributes of the DPNI object. */
765 error = DPAA2_CMD_NI_GET_ATTRIBUTES(dev, child, &cmd, &sc->attr);
766 if (error) {
767 device_printf(dev, "%s: failed to obtain DPNI attributes: "
768 "id=%d\n", __func__, dinfo->id);
769 goto close_ni;
770 }
771 if (bootverbose) {
772 device_printf(dev, "\toptions=0x%#x queues=%d tx_channels=%d "
773 "wriop_version=%#x\n", sc->attr.options, sc->attr.num.queues,
774 sc->attr.num.channels, sc->attr.wriop_ver);
775 device_printf(dev, "\ttraffic classes: rx=%d tx=%d "
776 "cgs_groups=%d\n", sc->attr.num.rx_tcs, sc->attr.num.tx_tcs,
777 sc->attr.num.cgs);
778 device_printf(dev, "\ttable entries: mac=%d vlan=%d qos=%d "
779 "fs=%d\n", sc->attr.entries.mac, sc->attr.entries.vlan,
780 sc->attr.entries.qos, sc->attr.entries.fs);
781 device_printf(dev, "\tkey sizes: qos=%d fs=%d\n",
782 sc->attr.key_size.qos, sc->attr.key_size.fs);
783 }
784
785 /* Configure buffer layouts of the DPNI queues. */
786 error = dpaa2_ni_set_buf_layout(dev);
787 if (error) {
788 device_printf(dev, "%s: failed to configure buffer layout\n",
789 __func__);
790 goto close_ni;
791 }
792
793 /* Configure DMA resources. */
794 error = dpaa2_ni_setup_dma(sc);
795 if (error) {
796 device_printf(dev, "%s: failed to setup DMA\n", __func__);
797 goto close_ni;
798 }
799
800 /* Setup link between DPNI and an object it's connected to. */
801 ep1_desc.obj_id = dinfo->id;
802 ep1_desc.if_id = 0; /* DPNI has the only endpoint */
803 ep1_desc.type = dinfo->dtype;
804
805 error = DPAA2_CMD_RC_GET_CONN(dev, child, DPAA2_CMD_TK(&cmd, rc_token),
806 &ep1_desc, &ep2_desc, &link);
807 if (error) {
808 device_printf(dev, "%s: failed to obtain an object DPNI is "
809 "connected to: error=%d\n", __func__, error);
810 } else {
811 device_printf(dev, "connected to %s (id=%d)\n",
812 dpaa2_ttos(ep2_desc.type), ep2_desc.obj_id);
813
814 error = dpaa2_ni_set_mac_addr(dev);
815 if (error) {
816 device_printf(dev, "%s: failed to set MAC address: "
817 "error=%d\n", __func__, error);
818 }
819
820 if (ep2_desc.type == DPAA2_DEV_MAC) {
821 /*
822 * This is the simplest case when DPNI is connected to
823 * DPMAC directly.
824 */
825 sc->mac.dpmac_id = ep2_desc.obj_id;
826
827 link_type = DPAA2_MAC_LINK_TYPE_NONE;
828
829 /*
830 * Need to determine if DPMAC type is PHY (attached to
831 * conventional MII PHY) or FIXED (usually SFP/SerDes,
832 * link state managed by MC firmware).
833 */
834 error = DPAA2_CMD_MAC_OPEN(sc->dev, child,
835 DPAA2_CMD_TK(&cmd, rc_token), sc->mac.dpmac_id,
836 &mac_token);
837 /*
838 * Under VFIO, the DPMAC might be sitting in another
839 * container (DPRC) we don't have access to.
840 * Assume DPAA2_MAC_LINK_TYPE_FIXED if this is
841 * the case.
842 */
843 if (error) {
844 device_printf(dev, "%s: failed to open "
845 "connected DPMAC: %d (assuming in other DPRC)\n", __func__,
846 sc->mac.dpmac_id);
847 link_type = DPAA2_MAC_LINK_TYPE_FIXED;
848 } else {
849 error = DPAA2_CMD_MAC_GET_ATTRIBUTES(dev, child,
850 &cmd, &attr);
851 if (error) {
852 device_printf(dev, "%s: failed to get "
853 "DPMAC attributes: id=%d, "
854 "error=%d\n", __func__, dinfo->id,
855 error);
856 } else {
857 link_type = attr.link_type;
858 }
859 }
860 DPAA2_CMD_MAC_CLOSE(dev, child, &cmd);
861
862 if (link_type == DPAA2_MAC_LINK_TYPE_FIXED) {
863 device_printf(dev, "connected DPMAC is in FIXED "
864 "mode\n");
865 dpaa2_ni_setup_fixed_link(sc);
866 } else if (link_type == DPAA2_MAC_LINK_TYPE_PHY) {
867 device_printf(dev, "connected DPMAC is in PHY "
868 "mode\n");
869 error = DPAA2_MC_GET_PHY_DEV(dev,
870 &sc->mac.phy_dev, sc->mac.dpmac_id);
871 if (error == 0) {
872 error = MEMAC_MDIO_SET_NI_DEV(
873 sc->mac.phy_dev, dev);
874 if (error != 0) {
875 device_printf(dev, "%s: failed "
876 "to set dpni dev on memac "
877 "mdio dev %s: error=%d\n",
878 __func__,
879 device_get_nameunit(
880 sc->mac.phy_dev), error);
881 }
882 }
883 if (error == 0) {
884 error = MEMAC_MDIO_GET_PHY_LOC(
885 sc->mac.phy_dev, &sc->mac.phy_loc);
886 if (error == ENODEV) {
887 error = 0;
888 }
889 if (error != 0) {
890 device_printf(dev, "%s: failed "
891 "to get phy location from "
892 "memac mdio dev %s: error=%d\n",
893 __func__, device_get_nameunit(
894 sc->mac.phy_dev), error);
895 }
896 }
897 if (error == 0) {
898 error = mii_attach(sc->mac.phy_dev,
899 &sc->miibus, sc->ifp,
900 dpaa2_ni_media_change,
901 dpaa2_ni_media_status,
902 BMSR_DEFCAPMASK, sc->mac.phy_loc,
903 MII_OFFSET_ANY, 0);
904 if (error != 0) {
905 device_printf(dev, "%s: failed "
906 "to attach to miibus: "
907 "error=%d\n",
908 __func__, error);
909 }
910 }
911 if (error == 0) {
912 sc->mii = device_get_softc(sc->miibus);
913 }
914 } else {
915 device_printf(dev, "%s: DPMAC link type is not "
916 "supported\n", __func__);
917 }
918 } else if (ep2_desc.type == DPAA2_DEV_NI ||
919 ep2_desc.type == DPAA2_DEV_MUX ||
920 ep2_desc.type == DPAA2_DEV_SW) {
921 dpaa2_ni_setup_fixed_link(sc);
922 }
923 }
924
925 /* Select mode to enqueue frames. */
926 /* ... TBD ... */
927
928 /*
929 * Update link configuration to enable Rx/Tx pause frames support.
930 *
931 * NOTE: MC may generate an interrupt to the DPMAC and request changes
932 * in link configuration. It might be necessary to attach miibus
933 * and PHY before this point.
934 */
935 error = dpaa2_ni_set_pause_frame(dev);
936 if (error) {
937 device_printf(dev, "%s: failed to configure Rx/Tx pause "
938 "frames\n", __func__);
939 goto close_ni;
940 }
941
942 /* Configure ingress traffic classification. */
943 error = dpaa2_ni_set_qos_table(dev);
944 if (error) {
945 device_printf(dev, "%s: failed to configure QoS table: "
946 "error=%d\n", __func__, error);
947 goto close_ni;
948 }
949
950 /* Add broadcast physical address to the MAC filtering table. */
951 memset(eth_bca, 0xff, ETHER_ADDR_LEN);
952 error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, DPAA2_CMD_TK(&cmd,
953 ni_token), eth_bca);
954 if (error) {
955 device_printf(dev, "%s: failed to add broadcast physical "
956 "address to the MAC filtering table\n", __func__);
957 goto close_ni;
958 }
959
960 /* Set the maximum allowed length for received frames. */
961 error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd, DPAA2_ETH_MFL);
962 if (error) {
963 device_printf(dev, "%s: failed to set maximum length for "
964 "received frames\n", __func__);
965 goto close_ni;
966 }
967
968 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
969 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
970 return (0);
971
972 close_ni:
973 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
974 close_rc:
975 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
976 err_exit:
977 return (error);
978 }
979
980 /**
981 * @brief Сonfigure QBMan channels and register data availability notifications.
982 */
983 static int
dpaa2_ni_setup_channels(device_t dev)984 dpaa2_ni_setup_channels(device_t dev)
985 {
986 device_t iodev, condev, bpdev;
987 struct dpaa2_ni_softc *sc = device_get_softc(dev);
988 uint32_t i, num_chan;
989 int error;
990
991 /* Calculate number of the channels based on the allocated resources */
992 for (i = 0; i < DPAA2_NI_IO_RES_NUM; i++) {
993 if (!sc->res[DPAA2_NI_IO_RID(i)]) {
994 break;
995 }
996 }
997 num_chan = i;
998 for (i = 0; i < DPAA2_NI_CON_RES_NUM; i++) {
999 if (!sc->res[DPAA2_NI_CON_RID(i)]) {
1000 break;
1001 }
1002 }
1003 num_chan = i < num_chan ? i : num_chan;
1004 sc->chan_n = num_chan > DPAA2_MAX_CHANNELS
1005 ? DPAA2_MAX_CHANNELS : num_chan;
1006 sc->chan_n = sc->chan_n > sc->attr.num.queues
1007 ? sc->attr.num.queues : sc->chan_n;
1008
1009 KASSERT(sc->chan_n > 0u, ("%s: positive number of channels expected: "
1010 "chan_n=%d", __func__, sc->chan_n));
1011
1012 device_printf(dev, "channels=%d\n", sc->chan_n);
1013
1014 for (i = 0; i < sc->chan_n; i++) {
1015 iodev = (device_t)rman_get_start(sc->res[DPAA2_NI_IO_RID(i)]);
1016 condev = (device_t)rman_get_start(sc->res[DPAA2_NI_CON_RID(i)]);
1017 /* Only one buffer pool available at the moment */
1018 bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
1019
1020 error = dpaa2_chan_setup(dev, iodev, condev, bpdev,
1021 &sc->channels[i], i, dpaa2_ni_cleanup_task);
1022 if (error != 0) {
1023 device_printf(dev, "%s: dpaa2_chan_setup() failed: "
1024 "error=%d, chan_id=%d\n", __func__, error, i);
1025 return (error);
1026 }
1027 }
1028
1029 /* There is exactly one Rx error queue per network interface */
1030 error = dpaa2_chan_setup_fq(dev, sc->channels[0], DPAA2_NI_QUEUE_RX_ERR);
1031 if (error != 0) {
1032 device_printf(dev, "%s: failed to prepare RxError queue: "
1033 "error=%d\n", __func__, error);
1034 return (error);
1035 }
1036
1037 return (0);
1038 }
1039
1040 /**
1041 * @brief Bind DPNI to DPBPs, DPIOs, frame queues and channels.
1042 */
1043 static int
dpaa2_ni_bind(device_t dev)1044 dpaa2_ni_bind(device_t dev)
1045 {
1046 device_t pdev = device_get_parent(dev);
1047 device_t child = dev;
1048 device_t bp_dev;
1049 struct dpaa2_ni_softc *sc = device_get_softc(dev);
1050 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1051 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1052 struct dpaa2_devinfo *bp_info;
1053 struct dpaa2_cmd cmd;
1054 struct dpaa2_ni_pools_cfg pools_cfg;
1055 struct dpaa2_ni_err_cfg err_cfg;
1056 struct dpaa2_channel *chan;
1057 uint16_t rc_token, ni_token;
1058 int error;
1059
1060 DPAA2_CMD_INIT(&cmd);
1061
1062 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1063 if (error) {
1064 device_printf(dev, "%s: failed to open resource container: "
1065 "id=%d, error=%d\n", __func__, rcinfo->id, error);
1066 goto err_exit;
1067 }
1068 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1069 if (error) {
1070 device_printf(dev, "%s: failed to open network interface: "
1071 "id=%d, error=%d\n", __func__, dinfo->id, error);
1072 goto close_rc;
1073 }
1074
1075 /* Select buffer pool (only one available at the moment). */
1076 bp_dev = (device_t) rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
1077 bp_info = device_get_ivars(bp_dev);
1078
1079 /* Configure buffers pool. */
1080 pools_cfg.pools_num = 1;
1081 pools_cfg.pools[0].bp_obj_id = bp_info->id;
1082 pools_cfg.pools[0].backup_flag = 0;
1083 pools_cfg.pools[0].buf_sz = sc->buf_sz;
1084 error = DPAA2_CMD_NI_SET_POOLS(dev, child, &cmd, &pools_cfg);
1085 if (error) {
1086 device_printf(dev, "%s: failed to set buffer pools\n", __func__);
1087 goto close_ni;
1088 }
1089
1090 /* Setup ingress traffic distribution. */
1091 error = dpaa2_ni_setup_rx_dist(dev);
1092 if (error && error != EOPNOTSUPP) {
1093 device_printf(dev, "%s: failed to setup ingress traffic "
1094 "distribution\n", __func__);
1095 goto close_ni;
1096 }
1097 if (bootverbose && error == EOPNOTSUPP) {
1098 device_printf(dev, "Ingress traffic distribution not "
1099 "supported\n");
1100 }
1101
1102 /* Configure handling of error frames. */
1103 err_cfg.err_mask = DPAA2_NI_FAS_RX_ERR_MASK;
1104 err_cfg.set_err_fas = false;
1105 err_cfg.action = DPAA2_NI_ERR_DISCARD;
1106 error = DPAA2_CMD_NI_SET_ERR_BEHAVIOR(dev, child, &cmd, &err_cfg);
1107 if (error) {
1108 device_printf(dev, "%s: failed to set errors behavior\n",
1109 __func__);
1110 goto close_ni;
1111 }
1112
1113 /* Configure channel queues to generate CDANs. */
1114 for (uint32_t i = 0; i < sc->chan_n; i++) {
1115 chan = sc->channels[i];
1116
1117 /* Setup Rx flows. */
1118 for (uint32_t j = 0; j < chan->rxq_n; j++) {
1119 error = dpaa2_ni_setup_rx_flow(dev, &chan->rx_queues[j]);
1120 if (error) {
1121 device_printf(dev, "%s: failed to setup Rx "
1122 "flow: error=%d\n", __func__, error);
1123 goto close_ni;
1124 }
1125 }
1126
1127 /* Setup Tx flow. */
1128 error = dpaa2_ni_setup_tx_flow(dev, &chan->txc_queue);
1129 if (error) {
1130 device_printf(dev, "%s: failed to setup Tx "
1131 "flow: error=%d\n", __func__, error);
1132 goto close_ni;
1133 }
1134 }
1135
1136 /* Configure RxError queue to generate CDAN. */
1137 error = dpaa2_ni_setup_rx_err_flow(dev, &sc->rxe_queue);
1138 if (error) {
1139 device_printf(dev, "%s: failed to setup RxError flow: "
1140 "error=%d\n", __func__, error);
1141 goto close_ni;
1142 }
1143
1144 /*
1145 * Get the Queuing Destination ID (QDID) that should be used for frame
1146 * enqueue operations.
1147 */
1148 error = DPAA2_CMD_NI_GET_QDID(dev, child, &cmd, DPAA2_NI_QUEUE_TX,
1149 &sc->tx_qdid);
1150 if (error) {
1151 device_printf(dev, "%s: failed to get Tx queuing destination "
1152 "ID\n", __func__);
1153 goto close_ni;
1154 }
1155
1156 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1157 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1158 return (0);
1159
1160 close_ni:
1161 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1162 close_rc:
1163 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1164 err_exit:
1165 return (error);
1166 }
1167
1168 /**
1169 * @brief Setup ingress traffic distribution.
1170 *
1171 * NOTE: Ingress traffic distribution is valid only when DPNI_OPT_NO_FS option
1172 * hasn't been set for DPNI and a number of DPNI queues > 1.
1173 */
1174 static int
dpaa2_ni_setup_rx_dist(device_t dev)1175 dpaa2_ni_setup_rx_dist(device_t dev)
1176 {
1177 /*
1178 * Have the interface implicitly distribute traffic based on the default
1179 * hash key.
1180 */
1181 return (dpaa2_ni_set_hash(dev, DPAA2_RXH_DEFAULT));
1182 }
1183
1184 static int
dpaa2_ni_setup_rx_flow(device_t dev,struct dpaa2_ni_fq * fq)1185 dpaa2_ni_setup_rx_flow(device_t dev, struct dpaa2_ni_fq *fq)
1186 {
1187 device_t pdev = device_get_parent(dev);
1188 device_t child = dev;
1189 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1190 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1191 struct dpaa2_devinfo *con_info;
1192 struct dpaa2_cmd cmd;
1193 struct dpaa2_ni_queue_cfg queue_cfg = {0};
1194 uint16_t rc_token, ni_token;
1195 int error;
1196
1197 DPAA2_CMD_INIT(&cmd);
1198
1199 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1200 if (error) {
1201 device_printf(dev, "%s: failed to open resource container: "
1202 "id=%d, error=%d\n", __func__, rcinfo->id, error);
1203 goto err_exit;
1204 }
1205 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1206 if (error) {
1207 device_printf(dev, "%s: failed to open network interface: "
1208 "id=%d, error=%d\n", __func__, dinfo->id, error);
1209 goto close_rc;
1210 }
1211
1212 /* Obtain DPCON associated with the FQ's channel. */
1213 con_info = device_get_ivars(fq->chan->con_dev);
1214
1215 queue_cfg.type = DPAA2_NI_QUEUE_RX;
1216 queue_cfg.tc = fq->tc;
1217 queue_cfg.idx = fq->flowid;
1218 error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1219 if (error) {
1220 device_printf(dev, "%s: failed to obtain Rx queue "
1221 "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1222 queue_cfg.idx);
1223 goto close_ni;
1224 }
1225
1226 fq->fqid = queue_cfg.fqid;
1227
1228 queue_cfg.dest_id = con_info->id;
1229 queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1230 queue_cfg.priority = 1;
1231 queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1232 queue_cfg.options =
1233 DPAA2_NI_QUEUE_OPT_USER_CTX |
1234 DPAA2_NI_QUEUE_OPT_DEST;
1235 error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
1236 if (error) {
1237 device_printf(dev, "%s: failed to update Rx queue "
1238 "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1239 queue_cfg.idx);
1240 goto close_ni;
1241 }
1242
1243 if (bootverbose) {
1244 device_printf(dev, "RX queue idx=%d, tc=%d, chan=%d, fqid=%d, "
1245 "user_ctx=%#jx\n", fq->flowid, fq->tc, fq->chan->id,
1246 fq->fqid, (uint64_t) fq);
1247 }
1248
1249 (void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
1250 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1251 return (0);
1252
1253 close_ni:
1254 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1255 close_rc:
1256 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1257 err_exit:
1258 return (error);
1259 }
1260
1261 static int
dpaa2_ni_setup_tx_flow(device_t dev,struct dpaa2_ni_fq * fq)1262 dpaa2_ni_setup_tx_flow(device_t dev, struct dpaa2_ni_fq *fq)
1263 {
1264 device_t pdev = device_get_parent(dev);
1265 device_t child = dev;
1266 struct dpaa2_ni_softc *sc = device_get_softc(dev);
1267 struct dpaa2_channel *ch = fq->chan;
1268 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1269 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1270 struct dpaa2_devinfo *con_info;
1271 struct dpaa2_ni_queue_cfg queue_cfg = {0};
1272 struct dpaa2_ni_tx_ring *tx;
1273 struct dpaa2_buf *buf;
1274 struct dpaa2_cmd cmd;
1275 uint32_t tx_rings_n = 0;
1276 uint16_t rc_token, ni_token;
1277 int error;
1278
1279 DPAA2_CMD_INIT(&cmd);
1280
1281 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1282 if (error) {
1283 device_printf(dev, "%s: failed to open resource container: "
1284 "id=%d, error=%d\n", __func__, rcinfo->id, error);
1285 goto err_exit;
1286 }
1287 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1288 if (error) {
1289 device_printf(dev, "%s: failed to open network interface: "
1290 "id=%d, error=%d\n", __func__, dinfo->id, error);
1291 goto close_rc;
1292 }
1293
1294 /* Obtain DPCON associated with the FQ's channel. */
1295 con_info = device_get_ivars(fq->chan->con_dev);
1296
1297 KASSERT(sc->attr.num.tx_tcs <= DPAA2_MAX_TCS,
1298 ("%s: too many Tx traffic classes: tx_tcs=%d\n", __func__,
1299 sc->attr.num.tx_tcs));
1300 KASSERT(DPAA2_NI_BUFS_PER_TX <= DPAA2_NI_MAX_BPTX,
1301 ("%s: too many Tx buffers (%d): max=%d\n", __func__,
1302 DPAA2_NI_BUFS_PER_TX, DPAA2_NI_MAX_BPTX));
1303
1304 /* Setup Tx rings. */
1305 for (int i = 0; i < sc->attr.num.tx_tcs; i++) {
1306 queue_cfg.type = DPAA2_NI_QUEUE_TX;
1307 queue_cfg.tc = i;
1308 queue_cfg.idx = fq->flowid;
1309 queue_cfg.chan_id = fq->chan->id;
1310
1311 error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1312 if (error) {
1313 device_printf(dev, "%s: failed to obtain Tx queue "
1314 "configuration: tc=%d, flowid=%d\n", __func__,
1315 queue_cfg.tc, queue_cfg.idx);
1316 goto close_ni;
1317 }
1318
1319 tx = &fq->tx_rings[i];
1320 tx->fq = fq;
1321 tx->fqid = queue_cfg.fqid;
1322 tx->txid = tx_rings_n;
1323
1324 if (bootverbose) {
1325 device_printf(dev, "TX queue idx=%d, tc=%d, chan=%d, "
1326 "fqid=%d\n", fq->flowid, i, fq->chan->id,
1327 queue_cfg.fqid);
1328 }
1329
1330 mtx_init(&tx->lock, "dpaa2_tx_ring", NULL, MTX_DEF);
1331
1332 /* Allocate Tx ring buffer. */
1333 tx->br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF, M_NOWAIT,
1334 &tx->lock);
1335 if (tx->br == NULL) {
1336 device_printf(dev, "%s: failed to setup Tx ring buffer"
1337 " (2) fqid=%d\n", __func__, tx->fqid);
1338 goto close_ni;
1339 }
1340
1341 /* Configure Tx buffers */
1342 for (uint64_t j = 0; j < DPAA2_NI_BUFS_PER_TX; j++) {
1343 buf = malloc(sizeof(struct dpaa2_buf), M_DPAA2_TXB,
1344 M_WAITOK);
1345 /* Keep DMA tag and Tx ring linked to the buffer */
1346 DPAA2_BUF_INIT_TAGOPT(buf, ch->tx_dmat, tx);
1347
1348 buf->sgt = malloc(sizeof(struct dpaa2_buf), M_DPAA2_TXB,
1349 M_WAITOK);
1350 /* Link SGT to DMA tag and back to its Tx buffer */
1351 DPAA2_BUF_INIT_TAGOPT(buf->sgt, ch->sgt_dmat, buf);
1352
1353 error = dpaa2_buf_seed_txb(dev, buf);
1354
1355 /* Add Tx buffer to the ring */
1356 buf_ring_enqueue(tx->br, buf);
1357 }
1358
1359 tx_rings_n++;
1360 }
1361
1362 /* All Tx queues which belong to the same flowid have the same qdbin. */
1363 fq->tx_qdbin = queue_cfg.qdbin;
1364
1365 queue_cfg.type = DPAA2_NI_QUEUE_TX_CONF;
1366 queue_cfg.tc = 0; /* ignored for TxConf queue */
1367 queue_cfg.idx = fq->flowid;
1368 error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1369 if (error) {
1370 device_printf(dev, "%s: failed to obtain TxConf queue "
1371 "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1372 queue_cfg.idx);
1373 goto close_ni;
1374 }
1375
1376 fq->fqid = queue_cfg.fqid;
1377
1378 queue_cfg.dest_id = con_info->id;
1379 queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1380 queue_cfg.priority = 0;
1381 queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1382 queue_cfg.options =
1383 DPAA2_NI_QUEUE_OPT_USER_CTX |
1384 DPAA2_NI_QUEUE_OPT_DEST;
1385 error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
1386 if (error) {
1387 device_printf(dev, "%s: failed to update TxConf queue "
1388 "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1389 queue_cfg.idx);
1390 goto close_ni;
1391 }
1392
1393 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1394 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1395 return (0);
1396
1397 close_ni:
1398 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1399 close_rc:
1400 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1401 err_exit:
1402 return (error);
1403 }
1404
1405 static int
dpaa2_ni_setup_rx_err_flow(device_t dev,struct dpaa2_ni_fq * fq)1406 dpaa2_ni_setup_rx_err_flow(device_t dev, struct dpaa2_ni_fq *fq)
1407 {
1408 device_t pdev = device_get_parent(dev);
1409 device_t child = dev;
1410 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1411 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1412 struct dpaa2_devinfo *con_info;
1413 struct dpaa2_ni_queue_cfg queue_cfg = {0};
1414 struct dpaa2_cmd cmd;
1415 uint16_t rc_token, ni_token;
1416 int error;
1417
1418 DPAA2_CMD_INIT(&cmd);
1419
1420 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1421 if (error) {
1422 device_printf(dev, "%s: failed to open resource container: "
1423 "id=%d, error=%d\n", __func__, rcinfo->id, error);
1424 goto err_exit;
1425 }
1426 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1427 if (error) {
1428 device_printf(dev, "%s: failed to open network interface: "
1429 "id=%d, error=%d\n", __func__, dinfo->id, error);
1430 goto close_rc;
1431 }
1432
1433 /* Obtain DPCON associated with the FQ's channel. */
1434 con_info = device_get_ivars(fq->chan->con_dev);
1435
1436 queue_cfg.type = DPAA2_NI_QUEUE_RX_ERR;
1437 queue_cfg.tc = fq->tc; /* ignored */
1438 queue_cfg.idx = fq->flowid; /* ignored */
1439 error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1440 if (error) {
1441 device_printf(dev, "%s: failed to obtain RxErr queue "
1442 "configuration\n", __func__);
1443 goto close_ni;
1444 }
1445
1446 fq->fqid = queue_cfg.fqid;
1447
1448 queue_cfg.dest_id = con_info->id;
1449 queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1450 queue_cfg.priority = 1;
1451 queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1452 queue_cfg.options =
1453 DPAA2_NI_QUEUE_OPT_USER_CTX |
1454 DPAA2_NI_QUEUE_OPT_DEST;
1455 error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
1456 if (error) {
1457 device_printf(dev, "%s: failed to update RxErr queue "
1458 "configuration\n", __func__);
1459 goto close_ni;
1460 }
1461
1462 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1463 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1464 return (0);
1465
1466 close_ni:
1467 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1468 close_rc:
1469 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1470 err_exit:
1471 return (error);
1472 }
1473
1474 /**
1475 * @brief Configure DPNI object to generate interrupts.
1476 */
1477 static int
dpaa2_ni_setup_irqs(device_t dev)1478 dpaa2_ni_setup_irqs(device_t dev)
1479 {
1480 device_t pdev = device_get_parent(dev);
1481 device_t child = dev;
1482 struct dpaa2_ni_softc *sc = device_get_softc(dev);
1483 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1484 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1485 struct dpaa2_cmd cmd;
1486 uint16_t rc_token, ni_token;
1487 int error;
1488
1489 DPAA2_CMD_INIT(&cmd);
1490
1491 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1492 if (error) {
1493 device_printf(dev, "%s: failed to open resource container: "
1494 "id=%d, error=%d\n", __func__, rcinfo->id, error);
1495 goto err_exit;
1496 }
1497 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1498 if (error) {
1499 device_printf(dev, "%s: failed to open network interface: "
1500 "id=%d, error=%d\n", __func__, dinfo->id, error);
1501 goto close_rc;
1502 }
1503
1504 /* Configure IRQs. */
1505 error = dpaa2_ni_setup_msi(sc);
1506 if (error) {
1507 device_printf(dev, "%s: failed to allocate MSI\n", __func__);
1508 goto close_ni;
1509 }
1510 if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1511 &sc->irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) {
1512 device_printf(dev, "%s: failed to allocate IRQ resource\n",
1513 __func__);
1514 goto close_ni;
1515 }
1516 if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
1517 NULL, dpaa2_ni_intr, sc, &sc->intr)) {
1518 device_printf(dev, "%s: failed to setup IRQ resource\n",
1519 __func__);
1520 goto close_ni;
1521 }
1522
1523 error = DPAA2_CMD_NI_SET_IRQ_MASK(dev, child, &cmd, DPNI_IRQ_INDEX,
1524 DPNI_IRQ_LINK_CHANGED | DPNI_IRQ_EP_CHANGED);
1525 if (error) {
1526 device_printf(dev, "%s: failed to set DPNI IRQ mask\n",
1527 __func__);
1528 goto close_ni;
1529 }
1530
1531 error = DPAA2_CMD_NI_SET_IRQ_ENABLE(dev, child, &cmd, DPNI_IRQ_INDEX,
1532 true);
1533 if (error) {
1534 device_printf(dev, "%s: failed to enable DPNI IRQ\n", __func__);
1535 goto close_ni;
1536 }
1537
1538 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1539 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1540 return (0);
1541
1542 close_ni:
1543 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1544 close_rc:
1545 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1546 err_exit:
1547 return (error);
1548 }
1549
1550 /**
1551 * @brief Allocate MSI interrupts for DPNI.
1552 */
1553 static int
dpaa2_ni_setup_msi(struct dpaa2_ni_softc * sc)1554 dpaa2_ni_setup_msi(struct dpaa2_ni_softc *sc)
1555 {
1556 int val;
1557
1558 val = pci_msi_count(sc->dev);
1559 if (val < DPAA2_NI_MSI_COUNT)
1560 device_printf(sc->dev, "MSI: actual=%d, expected=%d\n", val,
1561 DPAA2_IO_MSI_COUNT);
1562 val = MIN(val, DPAA2_NI_MSI_COUNT);
1563
1564 if (pci_alloc_msi(sc->dev, &val) != 0)
1565 return (EINVAL);
1566
1567 for (int i = 0; i < val; i++)
1568 sc->irq_rid[i] = i + 1;
1569
1570 return (0);
1571 }
1572
1573 /**
1574 * @brief Update DPNI according to the updated interface capabilities.
1575 */
1576 static int
dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc * sc)1577 dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *sc)
1578 {
1579 bool en_rxcsum, en_txcsum;
1580 device_t pdev = device_get_parent(sc->dev);
1581 device_t dev = sc->dev;
1582 device_t child = dev;
1583 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1584 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1585 struct dpaa2_cmd cmd;
1586 uint16_t rc_token, ni_token;
1587 int error;
1588
1589 DPAA2_CMD_INIT(&cmd);
1590
1591 /*
1592 * XXX-DSL: DPAA2 allows to validate L3/L4 checksums on reception and/or
1593 * generate L3/L4 checksums on transmission without
1594 * differentiating between IPv4/v6, i.e. enable for both
1595 * protocols if requested.
1596 */
1597 en_rxcsum = if_getcapenable(sc->ifp) &
1598 (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6);
1599 en_txcsum = if_getcapenable(sc->ifp) &
1600 (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6);
1601
1602 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1603 if (error) {
1604 device_printf(dev, "%s: failed to open resource container: "
1605 "id=%d, error=%d\n", __func__, rcinfo->id, error);
1606 goto err_exit;
1607 }
1608 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1609 if (error) {
1610 device_printf(dev, "%s: failed to open network interface: "
1611 "id=%d, error=%d\n", __func__, dinfo->id, error);
1612 goto close_rc;
1613 }
1614
1615 /* Setup checksums validation. */
1616 error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1617 DPAA2_NI_OFL_RX_L3_CSUM, en_rxcsum);
1618 if (error) {
1619 device_printf(dev, "%s: failed to %s L3 checksum validation\n",
1620 __func__, en_rxcsum ? "enable" : "disable");
1621 goto close_ni;
1622 }
1623 error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1624 DPAA2_NI_OFL_RX_L4_CSUM, en_rxcsum);
1625 if (error) {
1626 device_printf(dev, "%s: failed to %s L4 checksum validation\n",
1627 __func__, en_rxcsum ? "enable" : "disable");
1628 goto close_ni;
1629 }
1630
1631 /* Setup checksums generation. */
1632 error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1633 DPAA2_NI_OFL_TX_L3_CSUM, en_txcsum);
1634 if (error) {
1635 device_printf(dev, "%s: failed to %s L3 checksum generation\n",
1636 __func__, en_txcsum ? "enable" : "disable");
1637 goto close_ni;
1638 }
1639 error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1640 DPAA2_NI_OFL_TX_L4_CSUM, en_txcsum);
1641 if (error) {
1642 device_printf(dev, "%s: failed to %s L4 checksum generation\n",
1643 __func__, en_txcsum ? "enable" : "disable");
1644 goto close_ni;
1645 }
1646
1647 if (bootverbose) {
1648 device_printf(dev, "%s: L3/L4 checksum validation %s\n",
1649 __func__, en_rxcsum ? "enabled" : "disabled");
1650 device_printf(dev, "%s: L3/L4 checksum generation %s\n",
1651 __func__, en_txcsum ? "enabled" : "disabled");
1652 }
1653
1654 (void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
1655 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1656 return (0);
1657
1658 close_ni:
1659 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1660 close_rc:
1661 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1662 err_exit:
1663 return (error);
1664 }
1665
1666 /**
1667 * @brief Update DPNI according to the updated interface flags.
1668 */
1669 static int
dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc * sc)1670 dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *sc)
1671 {
1672 const bool en_promisc = if_getflags(sc->ifp) & IFF_PROMISC;
1673 const bool en_allmulti = if_getflags(sc->ifp) & IFF_ALLMULTI;
1674 device_t pdev = device_get_parent(sc->dev);
1675 device_t dev = sc->dev;
1676 device_t child = dev;
1677 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1678 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1679 struct dpaa2_cmd cmd;
1680 uint16_t rc_token, ni_token;
1681 int error;
1682
1683 DPAA2_CMD_INIT(&cmd);
1684
1685 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1686 if (error) {
1687 device_printf(dev, "%s: failed to open resource container: "
1688 "id=%d, error=%d\n", __func__, rcinfo->id, error);
1689 goto err_exit;
1690 }
1691 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1692 if (error) {
1693 device_printf(dev, "%s: failed to open network interface: "
1694 "id=%d, error=%d\n", __func__, dinfo->id, error);
1695 goto close_rc;
1696 }
1697
1698 error = DPAA2_CMD_NI_SET_MULTI_PROMISC(dev, child, &cmd,
1699 en_promisc ? true : en_allmulti);
1700 if (error) {
1701 device_printf(dev, "%s: failed to %s multicast promiscuous "
1702 "mode\n", __func__, en_allmulti ? "enable" : "disable");
1703 goto close_ni;
1704 }
1705
1706 error = DPAA2_CMD_NI_SET_UNI_PROMISC(dev, child, &cmd, en_promisc);
1707 if (error) {
1708 device_printf(dev, "%s: failed to %s unicast promiscuous mode\n",
1709 __func__, en_promisc ? "enable" : "disable");
1710 goto close_ni;
1711 }
1712
1713 (void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
1714 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1715 return (0);
1716
1717 close_ni:
1718 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1719 close_rc:
1720 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1721 err_exit:
1722 return (error);
1723 }
1724
1725 static int
dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc * sc)1726 dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *sc)
1727 {
1728 struct sysctl_ctx_list *ctx;
1729 struct sysctl_oid *node, *node2;
1730 struct sysctl_oid_list *parent, *parent2;
1731 char cbuf[128];
1732 int i;
1733
1734 ctx = device_get_sysctl_ctx(sc->dev);
1735 parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1736
1737 /* Add DPNI statistics. */
1738 node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
1739 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Statistics");
1740 parent = SYSCTL_CHILDREN(node);
1741 for (i = 0; i < DPAA2_NI_STAT_SYSCTLS; ++i) {
1742 SYSCTL_ADD_PROC(ctx, parent, i, dpni_stat_sysctls[i].name,
1743 CTLTYPE_U64 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_stats,
1744 "IU", dpni_stat_sysctls[i].desc);
1745 }
1746 SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_anomaly_frames",
1747 CTLFLAG_RD, &sc->rx_anomaly_frames,
1748 "Rx frames in the buffers outside of the buffer pools");
1749 SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_single_buf_frames",
1750 CTLFLAG_RD, &sc->rx_single_buf_frames,
1751 "Rx frames in single buffers");
1752 SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_sg_buf_frames",
1753 CTLFLAG_RD, &sc->rx_sg_buf_frames,
1754 "Rx frames in scatter/gather list");
1755 SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_enq_rej_frames",
1756 CTLFLAG_RD, &sc->rx_enq_rej_frames,
1757 "Enqueue rejected by QMan");
1758 SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_ieoi_err_frames",
1759 CTLFLAG_RD, &sc->rx_ieoi_err_frames,
1760 "QMan IEOI error");
1761 SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_single_buf_frames",
1762 CTLFLAG_RD, &sc->tx_single_buf_frames,
1763 "Tx single buffer frames");
1764 SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_sg_frames",
1765 CTLFLAG_RD, &sc->tx_sg_frames,
1766 "Tx S/G frames");
1767
1768 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_num",
1769 CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_num,
1770 "IU", "number of Rx buffers in the buffer pool");
1771 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_free",
1772 CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_free,
1773 "IU", "number of free Rx buffers in the buffer pool");
1774
1775 parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1776
1777 /* Add channels statistics. */
1778 node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "channels",
1779 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channels");
1780 parent = SYSCTL_CHILDREN(node);
1781 for (int i = 0; i < sc->chan_n; i++) {
1782 snprintf(cbuf, sizeof(cbuf), "%d", i);
1783
1784 node2 = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, cbuf,
1785 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channel");
1786 parent2 = SYSCTL_CHILDREN(node2);
1787
1788 SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_frames",
1789 CTLFLAG_RD, &sc->channels[i]->tx_frames,
1790 "Tx frames counter");
1791 SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_dropped",
1792 CTLFLAG_RD, &sc->channels[i]->tx_dropped,
1793 "Tx dropped counter");
1794 }
1795
1796 return (0);
1797 }
1798
1799 static int
dpaa2_ni_setup_dma(struct dpaa2_ni_softc * sc)1800 dpaa2_ni_setup_dma(struct dpaa2_ni_softc *sc)
1801 {
1802 device_t dev = sc->dev;
1803 int error;
1804
1805 KASSERT((sc->buf_align == BUF_ALIGN) || (sc->buf_align == BUF_ALIGN_V1),
1806 ("unexpected buffer alignment: %d\n", sc->buf_align));
1807
1808 /* DMA tag for Rx distribution key. */
1809 error = bus_dma_tag_create(
1810 bus_get_dma_tag(dev),
1811 PAGE_SIZE, 0, /* alignment, boundary */
1812 BUS_SPACE_MAXADDR, /* low restricted addr */
1813 BUS_SPACE_MAXADDR, /* high restricted addr */
1814 NULL, NULL, /* filter, filterarg */
1815 DPAA2_CLASSIFIER_DMA_SIZE, 1, /* maxsize, nsegments */
1816 DPAA2_CLASSIFIER_DMA_SIZE, 0, /* maxsegsize, flags */
1817 NULL, NULL, /* lockfunc, lockarg */
1818 &sc->rxd_dmat);
1819 if (error) {
1820 device_printf(dev, "%s: failed to create DMA tag for Rx "
1821 "distribution key\n", __func__);
1822 return (error);
1823 }
1824
1825 error = bus_dma_tag_create(
1826 bus_get_dma_tag(dev),
1827 PAGE_SIZE, 0, /* alignment, boundary */
1828 BUS_SPACE_MAXADDR, /* low restricted addr */
1829 BUS_SPACE_MAXADDR, /* high restricted addr */
1830 NULL, NULL, /* filter, filterarg */
1831 ETH_QOS_KCFG_BUF_SIZE, 1, /* maxsize, nsegments */
1832 ETH_QOS_KCFG_BUF_SIZE, 0, /* maxsegsize, flags */
1833 NULL, NULL, /* lockfunc, lockarg */
1834 &sc->qos_dmat);
1835 if (error) {
1836 device_printf(dev, "%s: failed to create DMA tag for QoS key\n",
1837 __func__);
1838 return (error);
1839 }
1840
1841 return (0);
1842 }
1843
1844 /**
1845 * @brief Configure buffer layouts of the different DPNI queues.
1846 */
1847 static int
dpaa2_ni_set_buf_layout(device_t dev)1848 dpaa2_ni_set_buf_layout(device_t dev)
1849 {
1850 device_t pdev = device_get_parent(dev);
1851 device_t child = dev;
1852 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1853 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1854 struct dpaa2_ni_softc *sc = device_get_softc(dev);
1855 struct dpaa2_ni_buf_layout buf_layout = {0};
1856 struct dpaa2_cmd cmd;
1857 uint16_t rc_token, ni_token;
1858 int error;
1859
1860 DPAA2_CMD_INIT(&cmd);
1861
1862 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1863 if (error) {
1864 device_printf(dev, "%s: failed to open resource container: "
1865 "id=%d, error=%d\n", __func__, rcinfo->id, error);
1866 goto err_exit;
1867 }
1868 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1869 if (error) {
1870 device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
1871 "error=%d\n", __func__, dinfo->id, error);
1872 goto close_rc;
1873 }
1874
1875 /*
1876 * Select Rx/Tx buffer alignment. It's necessary to ensure that the
1877 * buffer size seen by WRIOP is a multiple of 64 or 256 bytes depending
1878 * on the WRIOP version.
1879 */
1880 sc->buf_align = (sc->attr.wriop_ver == WRIOP_VERSION(0, 0, 0) ||
1881 sc->attr.wriop_ver == WRIOP_VERSION(1, 0, 0))
1882 ? BUF_ALIGN_V1 : BUF_ALIGN;
1883
1884 /*
1885 * We need to ensure that the buffer size seen by WRIOP is a multiple
1886 * of 64 or 256 bytes depending on the WRIOP version.
1887 */
1888 sc->buf_sz = ALIGN_DOWN(DPAA2_RX_BUF_SIZE, sc->buf_align);
1889
1890 if (bootverbose) {
1891 device_printf(dev, "Rx/Tx buffers: size=%d, alignment=%d\n",
1892 sc->buf_sz, sc->buf_align);
1893 }
1894
1895 /*
1896 * Frame Descriptor Tx buffer layout
1897 *
1898 * ADDR -> |---------------------|
1899 * | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes
1900 * |---------------------|
1901 * | HW FRAME ANNOTATION | BUF_TX_HWA_SIZE bytes
1902 * |---------------------|
1903 * | DATA HEADROOM |
1904 * ADDR + OFFSET -> |---------------------|
1905 * | |
1906 * | |
1907 * | FRAME DATA |
1908 * | |
1909 * | |
1910 * |---------------------|
1911 * | DATA TAILROOM |
1912 * |---------------------|
1913 *
1914 * NOTE: It's for a single buffer frame only.
1915 */
1916 buf_layout.queue_type = DPAA2_NI_QUEUE_TX;
1917 buf_layout.pd_size = BUF_SWA_SIZE;
1918 buf_layout.pass_timestamp = true;
1919 buf_layout.pass_frame_status = true;
1920 buf_layout.options =
1921 BUF_LOPT_PRIV_DATA_SZ |
1922 BUF_LOPT_TIMESTAMP | /* requires 128 bytes in HWA */
1923 BUF_LOPT_FRAME_STATUS;
1924 error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
1925 if (error) {
1926 device_printf(dev, "%s: failed to set Tx buffer layout\n",
1927 __func__);
1928 goto close_ni;
1929 }
1930
1931 /* Tx-confirmation buffer layout */
1932 buf_layout.queue_type = DPAA2_NI_QUEUE_TX_CONF;
1933 buf_layout.options =
1934 BUF_LOPT_TIMESTAMP |
1935 BUF_LOPT_FRAME_STATUS;
1936 error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
1937 if (error) {
1938 device_printf(dev, "%s: failed to set TxConf buffer layout\n",
1939 __func__);
1940 goto close_ni;
1941 }
1942
1943 /*
1944 * Driver should reserve the amount of space indicated by this command
1945 * as headroom in all Tx frames.
1946 */
1947 error = DPAA2_CMD_NI_GET_TX_DATA_OFF(dev, child, &cmd, &sc->tx_data_off);
1948 if (error) {
1949 device_printf(dev, "%s: failed to obtain Tx data offset\n",
1950 __func__);
1951 goto close_ni;
1952 }
1953
1954 if (bootverbose) {
1955 device_printf(dev, "Tx data offset=%d\n", sc->tx_data_off);
1956 }
1957 if ((sc->tx_data_off % 64) != 0) {
1958 device_printf(dev, "Tx data offset (%d) is not a multiplication "
1959 "of 64 bytes\n", sc->tx_data_off);
1960 }
1961
1962 /*
1963 * Frame Descriptor Rx buffer layout
1964 *
1965 * ADDR -> |---------------------|
1966 * | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes
1967 * |---------------------|
1968 * | HW FRAME ANNOTATION | BUF_RX_HWA_SIZE bytes
1969 * |---------------------|
1970 * | DATA HEADROOM | OFFSET-BUF_RX_HWA_SIZE
1971 * ADDR + OFFSET -> |---------------------|
1972 * | |
1973 * | |
1974 * | FRAME DATA |
1975 * | |
1976 * | |
1977 * |---------------------|
1978 * | DATA TAILROOM | 0 bytes
1979 * |---------------------|
1980 *
1981 * NOTE: It's for a single buffer frame only.
1982 */
1983 buf_layout.queue_type = DPAA2_NI_QUEUE_RX;
1984 buf_layout.pd_size = BUF_SWA_SIZE;
1985 buf_layout.fd_align = sc->buf_align;
1986 buf_layout.head_size = sc->tx_data_off - BUF_RX_HWA_SIZE - BUF_SWA_SIZE;
1987 buf_layout.tail_size = 0;
1988 buf_layout.pass_frame_status = true;
1989 buf_layout.pass_parser_result = true;
1990 buf_layout.pass_timestamp = true;
1991 buf_layout.options =
1992 BUF_LOPT_PRIV_DATA_SZ |
1993 BUF_LOPT_DATA_ALIGN |
1994 BUF_LOPT_DATA_HEAD_ROOM |
1995 BUF_LOPT_DATA_TAIL_ROOM |
1996 BUF_LOPT_FRAME_STATUS |
1997 BUF_LOPT_PARSER_RESULT |
1998 BUF_LOPT_TIMESTAMP;
1999 error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
2000 if (error) {
2001 device_printf(dev, "%s: failed to set Rx buffer layout\n",
2002 __func__);
2003 goto close_ni;
2004 }
2005
2006 error = 0;
2007 close_ni:
2008 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2009 close_rc:
2010 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2011 err_exit:
2012 return (error);
2013 }
2014
2015 /**
2016 * @brief Enable Rx/Tx pause frames.
2017 *
2018 * NOTE: DPNI stops sending when a pause frame is received (Rx frame) or DPNI
2019 * itself generates pause frames (Tx frame).
2020 */
2021 static int
dpaa2_ni_set_pause_frame(device_t dev)2022 dpaa2_ni_set_pause_frame(device_t dev)
2023 {
2024 device_t pdev = device_get_parent(dev);
2025 device_t child = dev;
2026 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2027 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2028 struct dpaa2_ni_softc *sc = device_get_softc(dev);
2029 struct dpaa2_ni_link_cfg link_cfg = {0};
2030 struct dpaa2_cmd cmd;
2031 uint16_t rc_token, ni_token;
2032 int error;
2033
2034 DPAA2_CMD_INIT(&cmd);
2035
2036 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2037 if (error) {
2038 device_printf(dev, "%s: failed to open resource container: "
2039 "id=%d, error=%d\n", __func__, rcinfo->id, error);
2040 goto err_exit;
2041 }
2042 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2043 if (error) {
2044 device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2045 "error=%d\n", __func__, dinfo->id, error);
2046 goto close_rc;
2047 }
2048
2049 error = DPAA2_CMD_NI_GET_LINK_CFG(dev, child, &cmd, &link_cfg);
2050 if (error) {
2051 device_printf(dev, "%s: failed to obtain link configuration: "
2052 "error=%d\n", __func__, error);
2053 goto close_ni;
2054 }
2055
2056 /* Enable both Rx and Tx pause frames by default. */
2057 link_cfg.options |= DPAA2_NI_LINK_OPT_PAUSE;
2058 link_cfg.options &= ~DPAA2_NI_LINK_OPT_ASYM_PAUSE;
2059
2060 error = DPAA2_CMD_NI_SET_LINK_CFG(dev, child, &cmd, &link_cfg);
2061 if (error) {
2062 device_printf(dev, "%s: failed to set link configuration: "
2063 "error=%d\n", __func__, error);
2064 goto close_ni;
2065 }
2066
2067 sc->link_options = link_cfg.options;
2068 error = 0;
2069 close_ni:
2070 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2071 close_rc:
2072 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2073 err_exit:
2074 return (error);
2075 }
2076
2077 /**
2078 * @brief Configure QoS table to determine the traffic class for the received
2079 * frame.
2080 */
2081 static int
dpaa2_ni_set_qos_table(device_t dev)2082 dpaa2_ni_set_qos_table(device_t dev)
2083 {
2084 device_t pdev = device_get_parent(dev);
2085 device_t child = dev;
2086 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2087 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2088 struct dpaa2_ni_softc *sc = device_get_softc(dev);
2089 struct dpaa2_ni_qos_table tbl;
2090 struct dpaa2_buf *buf = &sc->qos_kcfg;
2091 struct dpaa2_cmd cmd;
2092 uint16_t rc_token, ni_token;
2093 int error;
2094
2095 if (sc->attr.num.rx_tcs == 1 ||
2096 !(sc->attr.options & DPNI_OPT_HAS_KEY_MASKING)) {
2097 if (bootverbose) {
2098 device_printf(dev, "Ingress traffic classification is "
2099 "not supported\n");
2100 }
2101 return (0);
2102 }
2103
2104 /*
2105 * Allocate a buffer visible to the device to hold the QoS table key
2106 * configuration.
2107 */
2108
2109 if (__predict_true(buf->dmat == NULL)) {
2110 buf->dmat = sc->qos_dmat;
2111 }
2112
2113 error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr,
2114 BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap);
2115 if (error) {
2116 device_printf(dev, "%s: failed to allocate a buffer for QoS key "
2117 "configuration\n", __func__);
2118 goto err_exit;
2119 }
2120
2121 error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr,
2122 ETH_QOS_KCFG_BUF_SIZE, dpaa2_dmamap_oneseg_cb, &buf->paddr,
2123 BUS_DMA_NOWAIT);
2124 if (error) {
2125 device_printf(dev, "%s: failed to map QoS key configuration "
2126 "buffer into bus space\n", __func__);
2127 goto err_exit;
2128 }
2129
2130 DPAA2_CMD_INIT(&cmd);
2131
2132 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2133 if (error) {
2134 device_printf(dev, "%s: failed to open resource container: "
2135 "id=%d, error=%d\n", __func__, rcinfo->id, error);
2136 goto err_exit;
2137 }
2138 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2139 if (error) {
2140 device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2141 "error=%d\n", __func__, dinfo->id, error);
2142 goto close_rc;
2143 }
2144
2145 tbl.default_tc = 0;
2146 tbl.discard_on_miss = false;
2147 tbl.keep_entries = false;
2148 tbl.kcfg_busaddr = buf->paddr;
2149 error = DPAA2_CMD_NI_SET_QOS_TABLE(dev, child, &cmd, &tbl);
2150 if (error) {
2151 device_printf(dev, "%s: failed to set QoS table\n", __func__);
2152 goto close_ni;
2153 }
2154
2155 error = DPAA2_CMD_NI_CLEAR_QOS_TABLE(dev, child, &cmd);
2156 if (error) {
2157 device_printf(dev, "%s: failed to clear QoS table\n", __func__);
2158 goto close_ni;
2159 }
2160
2161 error = 0;
2162 close_ni:
2163 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2164 close_rc:
2165 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2166 err_exit:
2167 return (error);
2168 }
2169
2170 static int
dpaa2_ni_set_mac_addr(device_t dev)2171 dpaa2_ni_set_mac_addr(device_t dev)
2172 {
2173 device_t pdev = device_get_parent(dev);
2174 device_t child = dev;
2175 struct dpaa2_ni_softc *sc = device_get_softc(dev);
2176 if_t ifp = sc->ifp;
2177 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2178 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2179 struct dpaa2_cmd cmd;
2180 struct ether_addr rnd_mac_addr;
2181 uint16_t rc_token, ni_token;
2182 uint8_t mac_addr[ETHER_ADDR_LEN];
2183 uint8_t dpni_mac_addr[ETHER_ADDR_LEN];
2184 int error;
2185
2186 DPAA2_CMD_INIT(&cmd);
2187
2188 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2189 if (error) {
2190 device_printf(dev, "%s: failed to open resource container: "
2191 "id=%d, error=%d\n", __func__, rcinfo->id, error);
2192 goto err_exit;
2193 }
2194 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2195 if (error) {
2196 device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2197 "error=%d\n", __func__, dinfo->id, error);
2198 goto close_rc;
2199 }
2200
2201 /*
2202 * Get the MAC address associated with the physical port, if the DPNI is
2203 * connected to a DPMAC directly associated with one of the physical
2204 * ports.
2205 */
2206 error = DPAA2_CMD_NI_GET_PORT_MAC_ADDR(dev, child, &cmd, mac_addr);
2207 if (error) {
2208 device_printf(dev, "%s: failed to obtain the MAC address "
2209 "associated with the physical port\n", __func__);
2210 goto close_ni;
2211 }
2212
2213 /* Get primary MAC address from the DPNI attributes. */
2214 error = DPAA2_CMD_NI_GET_PRIM_MAC_ADDR(dev, child, &cmd, dpni_mac_addr);
2215 if (error) {
2216 device_printf(dev, "%s: failed to obtain primary MAC address\n",
2217 __func__);
2218 goto close_ni;
2219 }
2220
2221 if (!ETHER_IS_ZERO(mac_addr)) {
2222 /* Set MAC address of the physical port as DPNI's primary one. */
2223 error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd,
2224 mac_addr);
2225 if (error) {
2226 device_printf(dev, "%s: failed to set primary MAC "
2227 "address\n", __func__);
2228 goto close_ni;
2229 }
2230 for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2231 sc->mac.addr[i] = mac_addr[i];
2232 }
2233 } else if (ETHER_IS_ZERO(dpni_mac_addr)) {
2234 /* Generate random MAC address as DPNI's primary one. */
2235 ether_gen_addr(ifp, &rnd_mac_addr);
2236 for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2237 mac_addr[i] = rnd_mac_addr.octet[i];
2238 }
2239
2240 error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd,
2241 mac_addr);
2242 if (error) {
2243 device_printf(dev, "%s: failed to set random primary "
2244 "MAC address\n", __func__);
2245 goto close_ni;
2246 }
2247 for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2248 sc->mac.addr[i] = mac_addr[i];
2249 }
2250 } else {
2251 for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2252 sc->mac.addr[i] = dpni_mac_addr[i];
2253 }
2254 }
2255
2256 error = 0;
2257 close_ni:
2258 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2259 close_rc:
2260 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2261 err_exit:
2262 return (error);
2263 }
2264
2265 static void
dpaa2_ni_miibus_statchg(device_t dev)2266 dpaa2_ni_miibus_statchg(device_t dev)
2267 {
2268 device_t pdev = device_get_parent(dev);
2269 device_t child = dev;
2270 struct dpaa2_ni_softc *sc = device_get_softc(dev);
2271 struct dpaa2_mac_link_state mac_link = { 0 };
2272 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2273 struct dpaa2_cmd cmd;
2274 uint16_t rc_token, mac_token;
2275 int error, link_state;
2276
2277 if (sc->fixed_link || sc->mii == NULL) {
2278 return;
2279 }
2280 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0) {
2281 /*
2282 * We will receive calls and adjust the changes but
2283 * not have setup everything (called before dpaa2_ni_init()
2284 * really). This will then setup the link and internal
2285 * sc->link_state and not trigger the update once needed,
2286 * so basically dpmac never knows about it.
2287 */
2288 return;
2289 }
2290
2291 /*
2292 * Note: ifp link state will only be changed AFTER we are called so we
2293 * cannot rely on ifp->if_linkstate here.
2294 */
2295 if (sc->mii->mii_media_status & IFM_AVALID) {
2296 if (sc->mii->mii_media_status & IFM_ACTIVE) {
2297 link_state = LINK_STATE_UP;
2298 } else {
2299 link_state = LINK_STATE_DOWN;
2300 }
2301 } else {
2302 link_state = LINK_STATE_UNKNOWN;
2303 }
2304
2305 if (link_state != sc->link_state) {
2306 sc->link_state = link_state;
2307
2308 DPAA2_CMD_INIT(&cmd);
2309
2310 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
2311 &rc_token);
2312 if (error) {
2313 device_printf(dev, "%s: failed to open resource "
2314 "container: id=%d, error=%d\n", __func__, rcinfo->id,
2315 error);
2316 goto err_exit;
2317 }
2318 error = DPAA2_CMD_MAC_OPEN(dev, child, &cmd, sc->mac.dpmac_id,
2319 &mac_token);
2320 if (error) {
2321 device_printf(sc->dev, "%s: failed to open DPMAC: "
2322 "id=%d, error=%d\n", __func__, sc->mac.dpmac_id,
2323 error);
2324 goto close_rc;
2325 }
2326
2327 if (link_state == LINK_STATE_UP ||
2328 link_state == LINK_STATE_DOWN) {
2329 /* Update DPMAC link state. */
2330 mac_link.supported = sc->mii->mii_media.ifm_media;
2331 mac_link.advert = sc->mii->mii_media.ifm_media;
2332 mac_link.rate = 1000; /* TODO: Where to get from? */ /* ifmedia_baudrate? */
2333 mac_link.options =
2334 DPAA2_MAC_LINK_OPT_AUTONEG |
2335 DPAA2_MAC_LINK_OPT_PAUSE;
2336 mac_link.up = (link_state == LINK_STATE_UP) ? true : false;
2337 mac_link.state_valid = true;
2338
2339 /* Inform DPMAC about link state. */
2340 error = DPAA2_CMD_MAC_SET_LINK_STATE(dev, child, &cmd,
2341 &mac_link);
2342 if (error) {
2343 device_printf(sc->dev, "%s: failed to set DPMAC "
2344 "link state: id=%d, error=%d\n", __func__,
2345 sc->mac.dpmac_id, error);
2346 }
2347 }
2348 (void)DPAA2_CMD_MAC_CLOSE(dev, child, &cmd);
2349 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2350 rc_token));
2351 }
2352
2353 return;
2354
2355 close_rc:
2356 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2357 err_exit:
2358 return;
2359 }
2360
2361 /**
2362 * @brief Callback function to process media change request.
2363 */
2364 static int
dpaa2_ni_media_change_locked(struct dpaa2_ni_softc * sc)2365 dpaa2_ni_media_change_locked(struct dpaa2_ni_softc *sc)
2366 {
2367
2368 DPNI_LOCK_ASSERT(sc);
2369 if (sc->mii) {
2370 mii_mediachg(sc->mii);
2371 sc->media_status = sc->mii->mii_media.ifm_media;
2372 } else if (sc->fixed_link) {
2373 if_printf(sc->ifp, "%s: can't change media in fixed mode\n",
2374 __func__);
2375 }
2376
2377 return (0);
2378 }
2379
2380 static int
dpaa2_ni_media_change(if_t ifp)2381 dpaa2_ni_media_change(if_t ifp)
2382 {
2383 struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2384 int error;
2385
2386 DPNI_LOCK(sc);
2387 error = dpaa2_ni_media_change_locked(sc);
2388 DPNI_UNLOCK(sc);
2389 return (error);
2390 }
2391
2392 /**
2393 * @brief Callback function to process media status request.
2394 */
2395 static void
dpaa2_ni_media_status(if_t ifp,struct ifmediareq * ifmr)2396 dpaa2_ni_media_status(if_t ifp, struct ifmediareq *ifmr)
2397 {
2398 struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2399
2400 DPNI_LOCK(sc);
2401 if (sc->mii) {
2402 mii_pollstat(sc->mii);
2403 ifmr->ifm_active = sc->mii->mii_media_active;
2404 ifmr->ifm_status = sc->mii->mii_media_status;
2405 }
2406 DPNI_UNLOCK(sc);
2407 }
2408
2409 /**
2410 * @brief Callout function to check and update media status.
2411 */
2412 static void
dpaa2_ni_media_tick(void * arg)2413 dpaa2_ni_media_tick(void *arg)
2414 {
2415 struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2416
2417 /* Check for media type change */
2418 if (sc->mii) {
2419 mii_tick(sc->mii);
2420 if (sc->media_status != sc->mii->mii_media.ifm_media) {
2421 printf("%s: media type changed (ifm_media=%x)\n",
2422 __func__, sc->mii->mii_media.ifm_media);
2423 dpaa2_ni_media_change(sc->ifp);
2424 }
2425 }
2426
2427 /* Schedule another timeout one second from now */
2428 callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
2429 }
2430
2431 static void
dpaa2_ni_init(void * arg)2432 dpaa2_ni_init(void *arg)
2433 {
2434 struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2435 if_t ifp = sc->ifp;
2436 device_t pdev = device_get_parent(sc->dev);
2437 device_t dev = sc->dev;
2438 device_t child = dev;
2439 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2440 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2441 struct dpaa2_cmd cmd;
2442 uint16_t rc_token, ni_token;
2443 int error;
2444
2445 DPNI_LOCK(sc);
2446 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2447 DPNI_UNLOCK(sc);
2448 return;
2449 }
2450 DPNI_UNLOCK(sc);
2451
2452 DPAA2_CMD_INIT(&cmd);
2453
2454 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2455 if (error) {
2456 device_printf(dev, "%s: failed to open resource container: "
2457 "id=%d, error=%d\n", __func__, rcinfo->id, error);
2458 goto err_exit;
2459 }
2460 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2461 if (error) {
2462 device_printf(dev, "%s: failed to open network interface: "
2463 "id=%d, error=%d\n", __func__, dinfo->id, error);
2464 goto close_rc;
2465 }
2466
2467 error = DPAA2_CMD_NI_ENABLE(dev, child, &cmd);
2468 if (error) {
2469 device_printf(dev, "%s: failed to enable DPNI: error=%d\n",
2470 __func__, error);
2471 }
2472
2473 DPNI_LOCK(sc);
2474 /* Announce we are up and running and can queue packets. */
2475 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2476
2477 if (sc->mii) {
2478 /*
2479 * mii_mediachg() will trigger a call into
2480 * dpaa2_ni_miibus_statchg() to setup link state.
2481 */
2482 dpaa2_ni_media_change_locked(sc);
2483 }
2484 callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
2485
2486 DPNI_UNLOCK(sc);
2487
2488 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2489 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2490 return;
2491
2492 close_rc:
2493 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2494 err_exit:
2495 return;
2496 }
2497
2498 static int
dpaa2_ni_transmit(if_t ifp,struct mbuf * m)2499 dpaa2_ni_transmit(if_t ifp, struct mbuf *m)
2500 {
2501 struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2502 struct dpaa2_channel *ch;
2503 uint32_t fqid;
2504 bool found = false;
2505 int chidx = 0, error;
2506
2507 if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) {
2508 return (0);
2509 }
2510
2511 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
2512 fqid = m->m_pkthdr.flowid;
2513 for (int i = 0; i < sc->chan_n; i++) {
2514 ch = sc->channels[i];
2515 for (int j = 0; j < ch->rxq_n; j++) {
2516 if (fqid == ch->rx_queues[j].fqid) {
2517 chidx = ch->flowid;
2518 found = true;
2519 break;
2520 }
2521 }
2522 if (found) {
2523 break;
2524 }
2525 }
2526 }
2527
2528 ch = sc->channels[chidx];
2529 error = buf_ring_enqueue(ch->xmit_br, m);
2530 if (__predict_false(error != 0)) {
2531 m_freem(m);
2532 } else {
2533 taskqueue_enqueue(ch->cleanup_tq, &ch->cleanup_task);
2534 }
2535
2536 return (error);
2537 }
2538
2539 static void
dpaa2_ni_qflush(if_t ifp)2540 dpaa2_ni_qflush(if_t ifp)
2541 {
2542 /* TODO: Find a way to drain Tx queues in QBMan. */
2543 if_qflush(ifp);
2544 }
2545
2546 static int
dpaa2_ni_ioctl(if_t ifp,u_long c,caddr_t data)2547 dpaa2_ni_ioctl(if_t ifp, u_long c, caddr_t data)
2548 {
2549 struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2550 struct ifreq *ifr = (struct ifreq *) data;
2551 device_t pdev = device_get_parent(sc->dev);
2552 device_t dev = sc->dev;
2553 device_t child = dev;
2554 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2555 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2556 struct dpaa2_cmd cmd;
2557 uint32_t changed = 0;
2558 uint16_t rc_token, ni_token;
2559 int mtu, error, rc = 0;
2560
2561 DPAA2_CMD_INIT(&cmd);
2562
2563 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2564 if (error) {
2565 device_printf(dev, "%s: failed to open resource container: "
2566 "id=%d, error=%d\n", __func__, rcinfo->id, error);
2567 goto err_exit;
2568 }
2569 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2570 if (error) {
2571 device_printf(dev, "%s: failed to open network interface: "
2572 "id=%d, error=%d\n", __func__, dinfo->id, error);
2573 goto close_rc;
2574 }
2575
2576 switch (c) {
2577 case SIOCSIFMTU:
2578 DPNI_LOCK(sc);
2579 mtu = ifr->ifr_mtu;
2580 if (mtu < ETHERMIN || mtu > ETHERMTU_JUMBO) {
2581 DPNI_UNLOCK(sc);
2582 error = EINVAL;
2583 goto close_ni;
2584 }
2585 if_setmtu(ifp, mtu);
2586 DPNI_UNLOCK(sc);
2587
2588 /* Update maximum frame length. */
2589 mtu += ETHER_HDR_LEN;
2590 if (if_getcapenable(ifp) & IFCAP_VLAN_MTU)
2591 mtu += ETHER_VLAN_ENCAP_LEN;
2592 error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd, mtu);
2593 if (error) {
2594 device_printf(dev, "%s: failed to update maximum frame "
2595 "length: error=%d\n", __func__, error);
2596 goto close_ni;
2597 }
2598 break;
2599 case SIOCSIFCAP:
2600 changed = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
2601 if (changed & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
2602 if ((ifr->ifr_reqcap & changed) &
2603 (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
2604 if_setcapenablebit(ifp,
2605 IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6, 0);
2606 } else {
2607 if_setcapenablebit(ifp, 0,
2608 IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6);
2609 }
2610 }
2611 if (changed & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) {
2612 if ((ifr->ifr_reqcap & changed) &
2613 (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) {
2614 if_setcapenablebit(ifp,
2615 IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6, 0);
2616 } else {
2617 if_setcapenablebit(ifp, 0,
2618 IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6);
2619 }
2620 }
2621
2622 rc = dpaa2_ni_setup_if_caps(sc);
2623 if (rc) {
2624 printf("%s: failed to update iface capabilities: "
2625 "error=%d\n", __func__, rc);
2626 rc = ENXIO;
2627 }
2628 break;
2629 case SIOCSIFFLAGS:
2630 DPNI_LOCK(sc);
2631 if (if_getflags(ifp) & IFF_UP) {
2632 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2633 changed = if_getflags(ifp) ^ sc->if_flags;
2634 if (changed & IFF_PROMISC ||
2635 changed & IFF_ALLMULTI) {
2636 rc = dpaa2_ni_setup_if_flags(sc);
2637 }
2638 } else {
2639 DPNI_UNLOCK(sc);
2640 dpaa2_ni_init(sc);
2641 DPNI_LOCK(sc);
2642 }
2643 } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2644 /* FIXME: Disable DPNI. See dpaa2_ni_init(). */
2645 }
2646
2647 sc->if_flags = if_getflags(ifp);
2648 DPNI_UNLOCK(sc);
2649 break;
2650 case SIOCADDMULTI:
2651 case SIOCDELMULTI:
2652 DPNI_LOCK(sc);
2653 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2654 DPNI_UNLOCK(sc);
2655 rc = dpaa2_ni_update_mac_filters(ifp);
2656 if (rc) {
2657 device_printf(dev, "%s: failed to update MAC "
2658 "filters: error=%d\n", __func__, rc);
2659 }
2660 DPNI_LOCK(sc);
2661 }
2662 DPNI_UNLOCK(sc);
2663 break;
2664 case SIOCGIFMEDIA:
2665 case SIOCSIFMEDIA:
2666 if (sc->mii)
2667 rc = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, c);
2668 else if(sc->fixed_link) {
2669 rc = ifmedia_ioctl(ifp, ifr, &sc->fixed_ifmedia, c);
2670 }
2671 break;
2672 default:
2673 rc = ether_ioctl(ifp, c, data);
2674 break;
2675 }
2676
2677 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2678 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2679 return (rc);
2680
2681 close_ni:
2682 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2683 close_rc:
2684 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2685 err_exit:
2686 return (error);
2687 }
2688
2689 static int
dpaa2_ni_update_mac_filters(if_t ifp)2690 dpaa2_ni_update_mac_filters(if_t ifp)
2691 {
2692 struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2693 struct dpaa2_ni_mcaddr_ctx ctx;
2694 device_t pdev = device_get_parent(sc->dev);
2695 device_t dev = sc->dev;
2696 device_t child = dev;
2697 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2698 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2699 struct dpaa2_cmd cmd;
2700 uint16_t rc_token, ni_token;
2701 int error;
2702
2703 DPAA2_CMD_INIT(&cmd);
2704
2705 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2706 if (error) {
2707 device_printf(dev, "%s: failed to open resource container: "
2708 "id=%d, error=%d\n", __func__, rcinfo->id, error);
2709 goto err_exit;
2710 }
2711 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2712 if (error) {
2713 device_printf(dev, "%s: failed to open network interface: "
2714 "id=%d, error=%d\n", __func__, dinfo->id, error);
2715 goto close_rc;
2716 }
2717
2718 /* Remove all multicast MAC filters. */
2719 error = DPAA2_CMD_NI_CLEAR_MAC_FILTERS(dev, child, &cmd, false, true);
2720 if (error) {
2721 device_printf(dev, "%s: failed to clear multicast MAC filters: "
2722 "error=%d\n", __func__, error);
2723 goto close_ni;
2724 }
2725
2726 ctx.ifp = ifp;
2727 ctx.error = 0;
2728 ctx.nent = 0;
2729
2730 if_foreach_llmaddr(ifp, dpaa2_ni_add_maddr, &ctx);
2731
2732 error = ctx.error;
2733 close_ni:
2734 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2735 close_rc:
2736 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2737 err_exit:
2738 return (error);
2739 }
2740
2741 static u_int
dpaa2_ni_add_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)2742 dpaa2_ni_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2743 {
2744 struct dpaa2_ni_mcaddr_ctx *ctx = arg;
2745 struct dpaa2_ni_softc *sc = if_getsoftc(ctx->ifp);
2746 device_t pdev = device_get_parent(sc->dev);
2747 device_t dev = sc->dev;
2748 device_t child = dev;
2749 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2750 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2751 struct dpaa2_cmd cmd;
2752 uint16_t rc_token, ni_token;
2753 int error;
2754
2755 if (ctx->error != 0) {
2756 return (0);
2757 }
2758
2759 if (ETHER_IS_MULTICAST(LLADDR(sdl))) {
2760 DPAA2_CMD_INIT(&cmd);
2761
2762 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
2763 &rc_token);
2764 if (error) {
2765 device_printf(dev, "%s: failed to open resource "
2766 "container: id=%d, error=%d\n", __func__, rcinfo->id,
2767 error);
2768 return (0);
2769 }
2770 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id,
2771 &ni_token);
2772 if (error) {
2773 device_printf(dev, "%s: failed to open network interface: "
2774 "id=%d, error=%d\n", __func__, dinfo->id, error);
2775 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2776 rc_token));
2777 return (0);
2778 }
2779
2780 ctx->error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, &cmd,
2781 LLADDR(sdl));
2782
2783 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2784 ni_token));
2785 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2786 rc_token));
2787
2788 if (ctx->error != 0) {
2789 device_printf(dev, "%s: can't add more then %d MAC "
2790 "addresses, switching to the multicast promiscuous "
2791 "mode\n", __func__, ctx->nent);
2792
2793 /* Enable multicast promiscuous mode. */
2794 DPNI_LOCK(sc);
2795 if_setflagbits(ctx->ifp, IFF_ALLMULTI, 0);
2796 sc->if_flags |= IFF_ALLMULTI;
2797 ctx->error = dpaa2_ni_setup_if_flags(sc);
2798 DPNI_UNLOCK(sc);
2799
2800 return (0);
2801 }
2802 ctx->nent++;
2803 }
2804
2805 return (1);
2806 }
2807
2808 static void
dpaa2_ni_intr(void * arg)2809 dpaa2_ni_intr(void *arg)
2810 {
2811 struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2812 device_t pdev = device_get_parent(sc->dev);
2813 device_t dev = sc->dev;
2814 device_t child = dev;
2815 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2816 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2817 struct dpaa2_cmd cmd;
2818 uint32_t status = ~0u; /* clear all IRQ status bits */
2819 uint16_t rc_token, ni_token;
2820 int error;
2821
2822 DPAA2_CMD_INIT(&cmd);
2823
2824 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2825 if (error) {
2826 device_printf(dev, "%s: failed to open resource container: "
2827 "id=%d, error=%d\n", __func__, rcinfo->id, error);
2828 goto err_exit;
2829 }
2830 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2831 if (error) {
2832 device_printf(dev, "%s: failed to open network interface: "
2833 "id=%d, error=%d\n", __func__, dinfo->id, error);
2834 goto close_rc;
2835 }
2836
2837 error = DPAA2_CMD_NI_GET_IRQ_STATUS(dev, child, &cmd, DPNI_IRQ_INDEX,
2838 &status);
2839 if (error) {
2840 device_printf(sc->dev, "%s: failed to obtain IRQ status: "
2841 "error=%d\n", __func__, error);
2842 }
2843
2844 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2845 close_rc:
2846 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2847 err_exit:
2848 return;
2849 }
2850
2851 /**
2852 * @brief Execute channel's Rx/Tx routines.
2853 *
2854 * NOTE: Should not be re-entrant for the same channel. It is achieved by
2855 * enqueuing the cleanup routine on a single-threaded taskqueue.
2856 */
2857 static void
dpaa2_ni_cleanup_task(void * arg,int count)2858 dpaa2_ni_cleanup_task(void *arg, int count)
2859 {
2860 struct dpaa2_channel *ch = (struct dpaa2_channel *)arg;
2861 struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev);
2862 int error, rxc, txc;
2863
2864 for (int i = 0; i < DPAA2_CLEAN_BUDGET; i++) {
2865 rxc = dpaa2_ni_rx_cleanup(ch);
2866 txc = dpaa2_ni_tx_cleanup(ch);
2867
2868 if (__predict_false((if_getdrvflags(sc->ifp) &
2869 IFF_DRV_RUNNING) == 0)) {
2870 return;
2871 }
2872
2873 if ((txc != DPAA2_TX_BUDGET) && (rxc != DPAA2_RX_BUDGET)) {
2874 break;
2875 }
2876 }
2877
2878 /* Re-arm channel to generate CDAN */
2879 error = DPAA2_SWP_CONF_WQ_CHANNEL(ch->io_dev, &ch->ctx);
2880 if (error != 0) {
2881 panic("%s: failed to rearm channel: chan_id=%d, error=%d\n",
2882 __func__, ch->id, error);
2883 }
2884 }
2885
2886 /**
2887 * @brief Poll frames from a specific channel when CDAN is received.
2888 */
2889 static int
dpaa2_ni_rx_cleanup(struct dpaa2_channel * ch)2890 dpaa2_ni_rx_cleanup(struct dpaa2_channel *ch)
2891 {
2892 struct dpaa2_io_softc *iosc = device_get_softc(ch->io_dev);
2893 struct dpaa2_swp *swp = iosc->swp;
2894 struct dpaa2_ni_fq *fq;
2895 struct dpaa2_buf *buf = &ch->store;
2896 int budget = DPAA2_RX_BUDGET;
2897 int error, consumed = 0;
2898
2899 do {
2900 error = dpaa2_swp_pull(swp, ch->id, buf, DPAA2_ETH_STORE_FRAMES);
2901 if (error) {
2902 device_printf(ch->ni_dev, "%s: failed to pull frames: "
2903 "chan_id=%d, error=%d\n", __func__, ch->id, error);
2904 break;
2905 }
2906 error = dpaa2_ni_consume_frames(ch, &fq, &consumed);
2907 if (error == ENOENT || error == EALREADY) {
2908 break;
2909 }
2910 if (error == ETIMEDOUT) {
2911 device_printf(ch->ni_dev, "%s: timeout to consume "
2912 "frames: chan_id=%d\n", __func__, ch->id);
2913 }
2914 } while (--budget);
2915
2916 return (DPAA2_RX_BUDGET - budget);
2917 }
2918
2919 static int
dpaa2_ni_tx_cleanup(struct dpaa2_channel * ch)2920 dpaa2_ni_tx_cleanup(struct dpaa2_channel *ch)
2921 {
2922 struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev);
2923 struct dpaa2_ni_tx_ring *tx = &ch->txc_queue.tx_rings[0];
2924 struct mbuf *m = NULL;
2925 int budget = DPAA2_TX_BUDGET;
2926
2927 do {
2928 mtx_assert(&ch->xmit_mtx, MA_NOTOWNED);
2929 mtx_lock(&ch->xmit_mtx);
2930 m = buf_ring_dequeue_sc(ch->xmit_br);
2931 mtx_unlock(&ch->xmit_mtx);
2932
2933 if (__predict_false(m == NULL)) {
2934 /* TODO: Do not give up easily */
2935 break;
2936 } else {
2937 dpaa2_ni_tx(sc, ch, tx, m);
2938 }
2939 } while (--budget);
2940
2941 return (DPAA2_TX_BUDGET - budget);
2942 }
2943
2944 static void
dpaa2_ni_tx(struct dpaa2_ni_softc * sc,struct dpaa2_channel * ch,struct dpaa2_ni_tx_ring * tx,struct mbuf * m)2945 dpaa2_ni_tx(struct dpaa2_ni_softc *sc, struct dpaa2_channel *ch,
2946 struct dpaa2_ni_tx_ring *tx, struct mbuf *m)
2947 {
2948 device_t dev = sc->dev;
2949 struct dpaa2_ni_fq *fq = tx->fq;
2950 struct dpaa2_buf *buf, *sgt;
2951 struct dpaa2_fd fd;
2952 struct mbuf *md;
2953 bus_dma_segment_t segs[DPAA2_TX_SEGLIMIT];
2954 int rc, nsegs;
2955 int error;
2956
2957 mtx_assert(&tx->lock, MA_NOTOWNED);
2958 mtx_lock(&tx->lock);
2959 buf = buf_ring_dequeue_sc(tx->br);
2960 mtx_unlock(&tx->lock);
2961 if (__predict_false(buf == NULL)) {
2962 /* TODO: Do not give up easily */
2963 m_freem(m);
2964 return;
2965 } else {
2966 DPAA2_BUF_ASSERT_TXREADY(buf);
2967 buf->m = m;
2968 sgt = buf->sgt;
2969 }
2970
2971 #if defined(INVARIANTS)
2972 struct dpaa2_ni_tx_ring *btx = (struct dpaa2_ni_tx_ring *)buf->opt;
2973 KASSERT(buf->opt == tx, ("%s: unexpected Tx ring", __func__));
2974 KASSERT(btx->fq->chan == ch, ("%s: unexpected channel", __func__));
2975 #endif /* INVARIANTS */
2976
2977 BPF_MTAP(sc->ifp, m);
2978
2979 error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, m, segs, &nsegs,
2980 BUS_DMA_NOWAIT);
2981 if (__predict_false(error != 0)) {
2982 /* Too many fragments, trying to defragment... */
2983 md = m_collapse(m, M_NOWAIT, DPAA2_TX_SEGLIMIT);
2984 if (md == NULL) {
2985 device_printf(dev, "%s: m_collapse() failed\n", __func__);
2986 fq->chan->tx_dropped++;
2987 goto err;
2988 }
2989
2990 buf->m = m = md;
2991 error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, m, segs,
2992 &nsegs, BUS_DMA_NOWAIT);
2993 if (__predict_false(error != 0)) {
2994 device_printf(dev, "%s: bus_dmamap_load_mbuf_sg() "
2995 "failed: error=%d\n", __func__, error);
2996 fq->chan->tx_dropped++;
2997 goto err;
2998 }
2999 }
3000
3001 error = dpaa2_ni_build_fd(sc, tx, buf, segs, nsegs, &fd);
3002 if (__predict_false(error != 0)) {
3003 device_printf(dev, "%s: failed to build frame descriptor: "
3004 "error=%d\n", __func__, error);
3005 fq->chan->tx_dropped++;
3006 goto err_unload;
3007 }
3008
3009 /* TODO: Enqueue several frames in a single command */
3010 for (int i = 0; i < DPAA2_NI_ENQUEUE_RETRIES; i++) {
3011 /* TODO: Return error codes instead of # of frames */
3012 rc = DPAA2_SWP_ENQ_MULTIPLE_FQ(fq->chan->io_dev, tx->fqid, &fd, 1);
3013 if (rc == 1) {
3014 break;
3015 }
3016 }
3017
3018 bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_PREWRITE);
3019 bus_dmamap_sync(sgt->dmat, sgt->dmap, BUS_DMASYNC_PREWRITE);
3020
3021 if (rc != 1) {
3022 fq->chan->tx_dropped++;
3023 goto err_unload;
3024 } else {
3025 fq->chan->tx_frames++;
3026 }
3027 return;
3028
3029 err_unload:
3030 bus_dmamap_unload(buf->dmat, buf->dmap);
3031 if (sgt->paddr != 0) {
3032 bus_dmamap_unload(sgt->dmat, sgt->dmap);
3033 }
3034 err:
3035 m_freem(buf->m);
3036 buf_ring_enqueue(tx->br, buf);
3037 }
3038
3039 static int
dpaa2_ni_consume_frames(struct dpaa2_channel * chan,struct dpaa2_ni_fq ** src,uint32_t * consumed)3040 dpaa2_ni_consume_frames(struct dpaa2_channel *chan, struct dpaa2_ni_fq **src,
3041 uint32_t *consumed)
3042 {
3043 struct dpaa2_ni_fq *fq = NULL;
3044 struct dpaa2_dq *dq;
3045 struct dpaa2_fd *fd;
3046 struct dpaa2_ni_rx_ctx ctx = {
3047 .head = NULL,
3048 .tail = NULL,
3049 .cnt = 0,
3050 .last = false
3051 };
3052 int rc, frames = 0;
3053
3054 do {
3055 rc = dpaa2_chan_next_frame(chan, &dq);
3056 if (rc == EINPROGRESS) {
3057 if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
3058 fd = &dq->fdr.fd;
3059 fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
3060
3061 switch (fq->type) {
3062 case DPAA2_NI_QUEUE_RX:
3063 (void)dpaa2_ni_rx(chan, fq, fd, &ctx);
3064 break;
3065 case DPAA2_NI_QUEUE_RX_ERR:
3066 (void)dpaa2_ni_rx_err(chan, fq, fd);
3067 break;
3068 case DPAA2_NI_QUEUE_TX_CONF:
3069 (void)dpaa2_ni_tx_conf(chan, fq, fd);
3070 break;
3071 default:
3072 panic("%s: unknown queue type (1)",
3073 __func__);
3074 }
3075 frames++;
3076 }
3077 } else if (rc == EALREADY || rc == ENOENT) {
3078 if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
3079 fd = &dq->fdr.fd;
3080 fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
3081
3082 switch (fq->type) {
3083 case DPAA2_NI_QUEUE_RX:
3084 /*
3085 * Last VDQ response (mbuf) in a chain
3086 * obtained from the Rx queue.
3087 */
3088 ctx.last = true;
3089 (void)dpaa2_ni_rx(chan, fq, fd, &ctx);
3090 break;
3091 case DPAA2_NI_QUEUE_RX_ERR:
3092 (void)dpaa2_ni_rx_err(chan, fq, fd);
3093 break;
3094 case DPAA2_NI_QUEUE_TX_CONF:
3095 (void)dpaa2_ni_tx_conf(chan, fq, fd);
3096 break;
3097 default:
3098 panic("%s: unknown queue type (2)",
3099 __func__);
3100 }
3101 frames++;
3102 }
3103 break;
3104 } else {
3105 panic("%s: should not reach here: rc=%d", __func__, rc);
3106 }
3107 } while (true);
3108
3109 KASSERT(chan->store_idx < chan->store_sz, ("%s: store_idx(%d) >= "
3110 "store_sz(%d)", __func__, chan->store_idx, chan->store_sz));
3111
3112 /*
3113 * VDQ operation pulls frames from a single queue into the store.
3114 * Return the frame queue and a number of consumed frames as an output.
3115 */
3116 if (src != NULL) {
3117 *src = fq;
3118 }
3119 if (consumed != NULL) {
3120 *consumed = frames;
3121 }
3122
3123 return (rc);
3124 }
3125
3126 /**
3127 * @brief Receive frames.
3128 */
3129 static int
dpaa2_ni_rx(struct dpaa2_channel * ch,struct dpaa2_ni_fq * fq,struct dpaa2_fd * fd,struct dpaa2_ni_rx_ctx * ctx)3130 dpaa2_ni_rx(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq, struct dpaa2_fd *fd,
3131 struct dpaa2_ni_rx_ctx *ctx)
3132 {
3133 bus_addr_t paddr = (bus_addr_t)fd->addr;
3134 struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr);
3135 struct dpaa2_buf *buf = fa->buf;
3136 struct dpaa2_channel *bch = (struct dpaa2_channel *)buf->opt;
3137 struct dpaa2_ni_softc *sc = device_get_softc(bch->ni_dev);
3138 struct dpaa2_bp_softc *bpsc;
3139 struct mbuf *m;
3140 device_t bpdev;
3141 bus_addr_t released[DPAA2_SWP_BUFS_PER_CMD];
3142 void *buf_data;
3143 int buf_len, error, released_n = 0;
3144
3145 KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
3146 /*
3147 * NOTE: Current channel might not be the same as the "buffer" channel
3148 * and it's fine. It must not be NULL though.
3149 */
3150 KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__));
3151
3152 if (__predict_false(paddr != buf->paddr)) {
3153 panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3154 __func__, paddr, buf->paddr);
3155 }
3156
3157 switch (dpaa2_ni_fd_err(fd)) {
3158 case 1: /* Enqueue rejected by QMan */
3159 sc->rx_enq_rej_frames++;
3160 break;
3161 case 2: /* QMan IEOI error */
3162 sc->rx_ieoi_err_frames++;
3163 break;
3164 default:
3165 break;
3166 }
3167 switch (dpaa2_ni_fd_format(fd)) {
3168 case DPAA2_FD_SINGLE:
3169 sc->rx_single_buf_frames++;
3170 break;
3171 case DPAA2_FD_SG:
3172 sc->rx_sg_buf_frames++;
3173 break;
3174 default:
3175 break;
3176 }
3177
3178 mtx_assert(&bch->dma_mtx, MA_NOTOWNED);
3179 mtx_lock(&bch->dma_mtx);
3180
3181 bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_POSTREAD);
3182 bus_dmamap_unload(buf->dmat, buf->dmap);
3183 m = buf->m;
3184 buf_len = dpaa2_ni_fd_data_len(fd);
3185 buf_data = (uint8_t *)buf->vaddr + dpaa2_ni_fd_offset(fd);
3186 /* Prepare buffer to be re-cycled */
3187 buf->m = NULL;
3188 buf->paddr = 0;
3189 buf->vaddr = NULL;
3190 buf->seg.ds_addr = 0;
3191 buf->seg.ds_len = 0;
3192 buf->nseg = 0;
3193
3194 mtx_unlock(&bch->dma_mtx);
3195
3196 m->m_flags |= M_PKTHDR;
3197 m->m_data = buf_data;
3198 m->m_len = buf_len;
3199 m->m_pkthdr.len = buf_len;
3200 m->m_pkthdr.rcvif = sc->ifp;
3201 m->m_pkthdr.flowid = fq->fqid;
3202 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
3203
3204 if (ctx->head == NULL) {
3205 KASSERT(ctx->tail == NULL, ("%s: tail already given?", __func__));
3206 ctx->head = m;
3207 ctx->tail = m;
3208 } else {
3209 KASSERT(ctx->head != NULL, ("%s: head is NULL", __func__));
3210 ctx->tail->m_nextpkt = m;
3211 ctx->tail = m;
3212 }
3213 ctx->cnt++;
3214
3215 if (ctx->last) {
3216 ctx->tail->m_nextpkt = NULL;
3217 if_input(sc->ifp, ctx->head);
3218 }
3219
3220 /* Keep the buffer to be recycled */
3221 ch->recycled[ch->recycled_n++] = buf;
3222
3223 /* Re-seed and release recycled buffers back to the pool */
3224 if (ch->recycled_n == DPAA2_SWP_BUFS_PER_CMD) {
3225 /* Release new buffers to the pool if needed */
3226 taskqueue_enqueue(sc->bp_taskq, &ch->bp_task);
3227
3228 for (int i = 0; i < ch->recycled_n; i++) {
3229 buf = ch->recycled[i];
3230 bch = (struct dpaa2_channel *)buf->opt;
3231
3232 mtx_assert(&bch->dma_mtx, MA_NOTOWNED);
3233 mtx_lock(&bch->dma_mtx);
3234 error = dpaa2_buf_seed_rxb(sc->dev, buf,
3235 DPAA2_RX_BUF_SIZE, &bch->dma_mtx);
3236 mtx_unlock(&bch->dma_mtx);
3237
3238 if (__predict_false(error != 0)) {
3239 /* TODO: What else to do with the buffer? */
3240 panic("%s: failed to recycle buffer: error=%d",
3241 __func__, error);
3242 }
3243
3244 /* Prepare buffer to be released in a single command */
3245 released[released_n++] = buf->paddr;
3246 }
3247
3248 /* There's only one buffer pool for now */
3249 bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
3250 bpsc = device_get_softc(bpdev);
3251
3252 error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpsc->attr.bpid,
3253 released, released_n);
3254 if (__predict_false(error != 0)) {
3255 device_printf(sc->dev, "%s: failed to release buffers "
3256 "to the pool: error=%d\n", __func__, error);
3257 return (error);
3258 }
3259 ch->recycled_n = 0;
3260 }
3261
3262 return (0);
3263 }
3264
3265 /**
3266 * @brief Receive Rx error frames.
3267 */
3268 static int
dpaa2_ni_rx_err(struct dpaa2_channel * ch,struct dpaa2_ni_fq * fq,struct dpaa2_fd * fd)3269 dpaa2_ni_rx_err(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq,
3270 struct dpaa2_fd *fd)
3271 {
3272 bus_addr_t paddr = (bus_addr_t)fd->addr;
3273 struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr);
3274 struct dpaa2_buf *buf = fa->buf;
3275 struct dpaa2_channel *bch = (struct dpaa2_channel *)buf->opt;
3276 struct dpaa2_ni_softc *sc = device_get_softc(bch->ni_dev);
3277 device_t bpdev;
3278 struct dpaa2_bp_softc *bpsc;
3279 int error;
3280
3281 KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
3282 /*
3283 * NOTE: Current channel might not be the same as the "buffer" channel
3284 * and it's fine. It must not be NULL though.
3285 */
3286 KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__));
3287
3288 if (__predict_false(paddr != buf->paddr)) {
3289 panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3290 __func__, paddr, buf->paddr);
3291 }
3292
3293 /* There's only one buffer pool for now */
3294 bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
3295 bpsc = device_get_softc(bpdev);
3296
3297 /* Release buffer to QBMan buffer pool */
3298 error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpsc->attr.bpid, &paddr, 1);
3299 if (error != 0) {
3300 device_printf(sc->dev, "%s: failed to release frame buffer to "
3301 "the pool: error=%d\n", __func__, error);
3302 return (error);
3303 }
3304
3305 return (0);
3306 }
3307
3308 /**
3309 * @brief Receive Tx confirmation frames.
3310 */
3311 static int
dpaa2_ni_tx_conf(struct dpaa2_channel * ch,struct dpaa2_ni_fq * fq,struct dpaa2_fd * fd)3312 dpaa2_ni_tx_conf(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq,
3313 struct dpaa2_fd *fd)
3314 {
3315 bus_addr_t paddr = (bus_addr_t)fd->addr;
3316 struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr);
3317 struct dpaa2_buf *buf = fa->buf;
3318 struct dpaa2_buf *sgt = buf->sgt;
3319 struct dpaa2_ni_tx_ring *tx = (struct dpaa2_ni_tx_ring *)buf->opt;
3320 struct dpaa2_channel *bch = tx->fq->chan;
3321
3322 KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
3323 KASSERT(tx != NULL, ("%s: Tx ring is NULL", __func__));
3324 KASSERT(sgt != NULL, ("%s: S/G table is NULL", __func__));
3325 /*
3326 * NOTE: Current channel might not be the same as the "buffer" channel
3327 * and it's fine. It must not be NULL though.
3328 */
3329 KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__));
3330
3331 if (paddr != buf->paddr) {
3332 panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3333 __func__, paddr, buf->paddr);
3334 }
3335
3336 mtx_assert(&bch->dma_mtx, MA_NOTOWNED);
3337 mtx_lock(&bch->dma_mtx);
3338
3339 bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_POSTWRITE);
3340 bus_dmamap_sync(sgt->dmat, sgt->dmap, BUS_DMASYNC_POSTWRITE);
3341 bus_dmamap_unload(buf->dmat, buf->dmap);
3342 bus_dmamap_unload(sgt->dmat, sgt->dmap);
3343 m_freem(buf->m);
3344 buf->m = NULL;
3345 buf->paddr = 0;
3346 buf->vaddr = NULL;
3347 sgt->paddr = 0;
3348
3349 mtx_unlock(&bch->dma_mtx);
3350
3351 /* Return Tx buffer back to the ring */
3352 buf_ring_enqueue(tx->br, buf);
3353
3354 return (0);
3355 }
3356
3357 /**
3358 * @brief Compare versions of the DPAA2 network interface API.
3359 */
3360 static int
dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc * sc,uint16_t major,uint16_t minor)3361 dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *sc, uint16_t major,
3362 uint16_t minor)
3363 {
3364 if (sc->api_major == major) {
3365 return sc->api_minor - minor;
3366 }
3367 return sc->api_major - major;
3368 }
3369
3370 /**
3371 * @brief Build a DPAA2 frame descriptor.
3372 */
3373 static int
dpaa2_ni_build_fd(struct dpaa2_ni_softc * sc,struct dpaa2_ni_tx_ring * tx,struct dpaa2_buf * buf,bus_dma_segment_t * segs,int nsegs,struct dpaa2_fd * fd)3374 dpaa2_ni_build_fd(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx,
3375 struct dpaa2_buf *buf, bus_dma_segment_t *segs, int nsegs, struct dpaa2_fd *fd)
3376 {
3377 struct dpaa2_buf *sgt = buf->sgt;
3378 struct dpaa2_sg_entry *sge;
3379 struct dpaa2_fa *fa;
3380 int i, error;
3381
3382 KASSERT(nsegs <= DPAA2_TX_SEGLIMIT, ("%s: too many segments", __func__));
3383 KASSERT(buf->opt != NULL, ("%s: no Tx ring?", __func__));
3384 KASSERT(sgt != NULL, ("%s: no S/G table?", __func__));
3385 KASSERT(sgt->vaddr != NULL, ("%s: no S/G vaddr?", __func__));
3386
3387 memset(fd, 0, sizeof(*fd));
3388
3389 /* Populate and map S/G table */
3390 if (__predict_true(nsegs <= DPAA2_TX_SEGLIMIT)) {
3391 sge = (struct dpaa2_sg_entry *)sgt->vaddr + sc->tx_data_off;
3392 for (i = 0; i < nsegs; i++) {
3393 sge[i].addr = (uint64_t)segs[i].ds_addr;
3394 sge[i].len = (uint32_t)segs[i].ds_len;
3395 sge[i].offset_fmt = 0u;
3396 }
3397 sge[i-1].offset_fmt |= 0x8000u; /* set final entry flag */
3398
3399 KASSERT(sgt->paddr == 0, ("%s: paddr(%#jx) != 0", __func__,
3400 sgt->paddr));
3401
3402 error = bus_dmamap_load(sgt->dmat, sgt->dmap, sgt->vaddr,
3403 DPAA2_TX_SGT_SZ, dpaa2_dmamap_oneseg_cb, &sgt->paddr,
3404 BUS_DMA_NOWAIT);
3405 if (__predict_false(error != 0)) {
3406 device_printf(sc->dev, "%s: bus_dmamap_load() failed: "
3407 "error=%d\n", __func__, error);
3408 return (error);
3409 }
3410
3411 buf->paddr = sgt->paddr;
3412 buf->vaddr = sgt->vaddr;
3413 sc->tx_sg_frames++; /* for sysctl(9) */
3414 } else {
3415 return (EINVAL);
3416 }
3417
3418 fa = (struct dpaa2_fa *)sgt->vaddr;
3419 fa->magic = DPAA2_MAGIC;
3420 fa->buf = buf;
3421
3422 fd->addr = buf->paddr;
3423 fd->data_length = (uint32_t)buf->m->m_pkthdr.len;
3424 fd->bpid_ivp_bmt = 0;
3425 fd->offset_fmt_sl = 0x2000u | sc->tx_data_off;
3426 fd->ctrl = 0x00800000u;
3427
3428 return (0);
3429 }
3430
3431 static int
dpaa2_ni_fd_err(struct dpaa2_fd * fd)3432 dpaa2_ni_fd_err(struct dpaa2_fd *fd)
3433 {
3434 return ((fd->ctrl >> DPAA2_NI_FD_ERR_SHIFT) & DPAA2_NI_FD_ERR_MASK);
3435 }
3436
3437 static uint32_t
dpaa2_ni_fd_data_len(struct dpaa2_fd * fd)3438 dpaa2_ni_fd_data_len(struct dpaa2_fd *fd)
3439 {
3440 if (dpaa2_ni_fd_short_len(fd)) {
3441 return (fd->data_length & DPAA2_NI_FD_LEN_MASK);
3442 }
3443 return (fd->data_length);
3444 }
3445
3446 static int
dpaa2_ni_fd_format(struct dpaa2_fd * fd)3447 dpaa2_ni_fd_format(struct dpaa2_fd *fd)
3448 {
3449 return ((enum dpaa2_fd_format)((fd->offset_fmt_sl >>
3450 DPAA2_NI_FD_FMT_SHIFT) & DPAA2_NI_FD_FMT_MASK));
3451 }
3452
3453 static bool
dpaa2_ni_fd_short_len(struct dpaa2_fd * fd)3454 dpaa2_ni_fd_short_len(struct dpaa2_fd *fd)
3455 {
3456 return (((fd->offset_fmt_sl >> DPAA2_NI_FD_SL_SHIFT)
3457 & DPAA2_NI_FD_SL_MASK) == 1);
3458 }
3459
3460 static int
dpaa2_ni_fd_offset(struct dpaa2_fd * fd)3461 dpaa2_ni_fd_offset(struct dpaa2_fd *fd)
3462 {
3463 return (fd->offset_fmt_sl & DPAA2_NI_FD_OFFSET_MASK);
3464 }
3465
3466 /**
3467 * @brief Collect statistics of the network interface.
3468 */
3469 static int
dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS)3470 dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS)
3471 {
3472 struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3473 struct dpni_stat *stat = &dpni_stat_sysctls[oidp->oid_number];
3474 device_t pdev = device_get_parent(sc->dev);
3475 device_t dev = sc->dev;
3476 device_t child = dev;
3477 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
3478 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
3479 struct dpaa2_cmd cmd;
3480 uint64_t cnt[DPAA2_NI_STAT_COUNTERS];
3481 uint64_t result = 0;
3482 uint16_t rc_token, ni_token;
3483 int error;
3484
3485 DPAA2_CMD_INIT(&cmd);
3486
3487 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
3488 if (error) {
3489 device_printf(dev, "%s: failed to open resource container: "
3490 "id=%d, error=%d\n", __func__, rcinfo->id, error);
3491 goto exit;
3492 }
3493 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
3494 if (error) {
3495 device_printf(dev, "%s: failed to open network interface: "
3496 "id=%d, error=%d\n", __func__, dinfo->id, error);
3497 goto close_rc;
3498 }
3499
3500 error = DPAA2_CMD_NI_GET_STATISTICS(dev, child, &cmd, stat->page, 0, cnt);
3501 if (!error) {
3502 result = cnt[stat->cnt];
3503 }
3504
3505 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
3506 close_rc:
3507 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
3508 exit:
3509 return (sysctl_handle_64(oidp, &result, 0, req));
3510 }
3511
3512 static int
dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS)3513 dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS)
3514 {
3515 struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3516 uint32_t buf_num = DPAA2_ATOMIC_READ(&sc->buf_num);
3517
3518 return (sysctl_handle_32(oidp, &buf_num, 0, req));
3519 }
3520
3521 static int
dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS)3522 dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS)
3523 {
3524 struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3525 uint32_t buf_free = DPAA2_ATOMIC_READ(&sc->buf_free);
3526
3527 return (sysctl_handle_32(oidp, &buf_free, 0, req));
3528 }
3529
3530 static int
dpaa2_ni_set_hash(device_t dev,uint64_t flags)3531 dpaa2_ni_set_hash(device_t dev, uint64_t flags)
3532 {
3533 struct dpaa2_ni_softc *sc = device_get_softc(dev);
3534 uint64_t key = 0;
3535 int i;
3536
3537 if (!(sc->attr.num.queues > 1)) {
3538 return (EOPNOTSUPP);
3539 }
3540
3541 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3542 if (dist_fields[i].rxnfc_field & flags) {
3543 key |= dist_fields[i].id;
3544 }
3545 }
3546
3547 return (dpaa2_ni_set_dist_key(dev, DPAA2_NI_DIST_MODE_HASH, key));
3548 }
3549
3550 /**
3551 * @brief Set Rx distribution (hash or flow classification) key flags is a
3552 * combination of RXH_ bits.
3553 */
3554 static int
dpaa2_ni_set_dist_key(device_t dev,enum dpaa2_ni_dist_mode type,uint64_t flags)3555 dpaa2_ni_set_dist_key(device_t dev, enum dpaa2_ni_dist_mode type, uint64_t flags)
3556 {
3557 device_t pdev = device_get_parent(dev);
3558 device_t child = dev;
3559 struct dpaa2_ni_softc *sc = device_get_softc(dev);
3560 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
3561 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
3562 struct dpkg_profile_cfg cls_cfg;
3563 struct dpkg_extract *key;
3564 struct dpaa2_buf *buf = &sc->rxd_kcfg;
3565 struct dpaa2_cmd cmd;
3566 uint16_t rc_token, ni_token;
3567 int i, error = 0;
3568
3569 if (__predict_true(buf->dmat == NULL)) {
3570 buf->dmat = sc->rxd_dmat;
3571 }
3572
3573 memset(&cls_cfg, 0, sizeof(cls_cfg));
3574
3575 /* Configure extracts according to the given flags. */
3576 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3577 key = &cls_cfg.extracts[cls_cfg.num_extracts];
3578
3579 if (!(flags & dist_fields[i].id)) {
3580 continue;
3581 }
3582
3583 if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
3584 device_printf(dev, "%s: failed to add key extraction "
3585 "rule\n", __func__);
3586 return (E2BIG);
3587 }
3588
3589 key->type = DPKG_EXTRACT_FROM_HDR;
3590 key->extract.from_hdr.prot = dist_fields[i].cls_prot;
3591 key->extract.from_hdr.type = DPKG_FULL_FIELD;
3592 key->extract.from_hdr.field = dist_fields[i].cls_field;
3593 cls_cfg.num_extracts++;
3594 }
3595
3596 error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr,
3597 BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap);
3598 if (error != 0) {
3599 device_printf(dev, "%s: failed to allocate a buffer for Rx "
3600 "traffic distribution key configuration\n", __func__);
3601 return (error);
3602 }
3603
3604 error = dpaa2_ni_prepare_key_cfg(&cls_cfg, (uint8_t *)buf->vaddr);
3605 if (error != 0) {
3606 device_printf(dev, "%s: failed to prepare key configuration: "
3607 "error=%d\n", __func__, error);
3608 return (error);
3609 }
3610
3611 /* Prepare for setting the Rx dist. */
3612 error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr,
3613 DPAA2_CLASSIFIER_DMA_SIZE, dpaa2_dmamap_oneseg_cb, &buf->paddr,
3614 BUS_DMA_NOWAIT);
3615 if (error != 0) {
3616 device_printf(sc->dev, "%s: failed to map a buffer for Rx "
3617 "traffic distribution key configuration\n", __func__);
3618 return (error);
3619 }
3620
3621 if (type == DPAA2_NI_DIST_MODE_HASH) {
3622 DPAA2_CMD_INIT(&cmd);
3623
3624 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
3625 &rc_token);
3626 if (error) {
3627 device_printf(dev, "%s: failed to open resource "
3628 "container: id=%d, error=%d\n", __func__, rcinfo->id,
3629 error);
3630 goto err_exit;
3631 }
3632 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id,
3633 &ni_token);
3634 if (error) {
3635 device_printf(dev, "%s: failed to open network "
3636 "interface: id=%d, error=%d\n", __func__, dinfo->id,
3637 error);
3638 goto close_rc;
3639 }
3640
3641 error = DPAA2_CMD_NI_SET_RX_TC_DIST(dev, child, &cmd,
3642 sc->attr.num.queues, 0, DPAA2_NI_DIST_MODE_HASH, buf->paddr);
3643 if (error != 0) {
3644 device_printf(dev, "%s: failed to set distribution mode "
3645 "and size for the traffic class\n", __func__);
3646 }
3647
3648 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
3649 ni_token));
3650 close_rc:
3651 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
3652 rc_token));
3653 }
3654
3655 err_exit:
3656 return (error);
3657 }
3658
3659 /**
3660 * @brief Prepares extract parameters.
3661 *
3662 * cfg: Defining a full Key Generation profile.
3663 * key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA.
3664 */
3665 static int
dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg * cfg,uint8_t * key_cfg_buf)3666 dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf)
3667 {
3668 struct dpni_ext_set_rx_tc_dist *dpni_ext;
3669 struct dpni_dist_extract *extr;
3670 int i, j;
3671
3672 if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
3673 return (EINVAL);
3674
3675 dpni_ext = (struct dpni_ext_set_rx_tc_dist *) key_cfg_buf;
3676 dpni_ext->num_extracts = cfg->num_extracts;
3677
3678 for (i = 0; i < cfg->num_extracts; i++) {
3679 extr = &dpni_ext->extracts[i];
3680
3681 switch (cfg->extracts[i].type) {
3682 case DPKG_EXTRACT_FROM_HDR:
3683 extr->prot = cfg->extracts[i].extract.from_hdr.prot;
3684 extr->efh_type =
3685 cfg->extracts[i].extract.from_hdr.type & 0x0Fu;
3686 extr->size = cfg->extracts[i].extract.from_hdr.size;
3687 extr->offset = cfg->extracts[i].extract.from_hdr.offset;
3688 extr->field = cfg->extracts[i].extract.from_hdr.field;
3689 extr->hdr_index =
3690 cfg->extracts[i].extract.from_hdr.hdr_index;
3691 break;
3692 case DPKG_EXTRACT_FROM_DATA:
3693 extr->size = cfg->extracts[i].extract.from_data.size;
3694 extr->offset =
3695 cfg->extracts[i].extract.from_data.offset;
3696 break;
3697 case DPKG_EXTRACT_FROM_PARSE:
3698 extr->size = cfg->extracts[i].extract.from_parse.size;
3699 extr->offset =
3700 cfg->extracts[i].extract.from_parse.offset;
3701 break;
3702 default:
3703 return (EINVAL);
3704 }
3705
3706 extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
3707 extr->extract_type = cfg->extracts[i].type & 0x0Fu;
3708
3709 for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
3710 extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
3711 extr->masks[j].offset =
3712 cfg->extracts[i].masks[j].offset;
3713 }
3714 }
3715
3716 return (0);
3717 }
3718
3719 static device_method_t dpaa2_ni_methods[] = {
3720 /* Device interface */
3721 DEVMETHOD(device_probe, dpaa2_ni_probe),
3722 DEVMETHOD(device_attach, dpaa2_ni_attach),
3723 DEVMETHOD(device_detach, dpaa2_ni_detach),
3724
3725 /* mii via memac_mdio */
3726 DEVMETHOD(miibus_statchg, dpaa2_ni_miibus_statchg),
3727
3728 DEVMETHOD_END
3729 };
3730
3731 static driver_t dpaa2_ni_driver = {
3732 "dpaa2_ni",
3733 dpaa2_ni_methods,
3734 sizeof(struct dpaa2_ni_softc),
3735 };
3736
3737 DRIVER_MODULE(miibus, dpaa2_ni, miibus_driver, 0, 0);
3738 DRIVER_MODULE(dpaa2_ni, dpaa2_rc, dpaa2_ni_driver, 0, 0);
3739
3740 MODULE_DEPEND(dpaa2_ni, miibus, 1, 1, 1);
3741 #ifdef DEV_ACPI
3742 MODULE_DEPEND(dpaa2_ni, memac_mdio_acpi, 1, 1, 1);
3743 #endif
3744 #ifdef FDT
3745 MODULE_DEPEND(dpaa2_ni, memac_mdio_fdt, 1, 1, 1);
3746 #endif
3747