1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright © 2021-2023 Dmitry Salychev
5 * Copyright © 2022 Mathew McBride
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 /*
31 * The DPAA2 Network Interface (DPNI) driver.
32 *
33 * The DPNI object is a network interface that is configurable to support a wide
34 * range of features from a very basic Ethernet interface up to a
35 * high-functioning network interface. The DPNI supports features that are
36 * expected by standard network stacks, from basic features to offloads.
37 *
38 * DPNIs work with Ethernet traffic, starting with the L2 header. Additional
39 * functions are provided for standard network protocols (L2, L3, L4, etc.).
40 */
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/bus.h>
46 #include <sys/rman.h>
47 #include <sys/module.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/sysctl.h>
53 #include <sys/mbuf.h>
54 #include <sys/taskqueue.h>
55 #include <sys/sysctl.h>
56 #include <sys/buf_ring.h>
57 #include <sys/smp.h>
58 #include <sys/proc.h>
59
60 #include <vm/vm.h>
61 #include <vm/pmap.h>
62
63 #include <machine/bus.h>
64 #include <machine/resource.h>
65 #include <machine/atomic.h>
66 #include <machine/vmparam.h>
67
68 #include <net/ethernet.h>
69 #include <net/bpf.h>
70 #include <net/if.h>
71 #include <net/if_dl.h>
72 #include <net/if_media.h>
73 #include <net/if_types.h>
74 #include <net/if_var.h>
75
76 #include <dev/pci/pcivar.h>
77 #include <dev/mii/mii.h>
78 #include <dev/mii/miivar.h>
79 #include <dev/mdio/mdio.h>
80
81 #include "opt_acpi.h"
82 #include "opt_platform.h"
83
84 #include "pcib_if.h"
85 #include "pci_if.h"
86 #include "miibus_if.h"
87 #include "memac_mdio_if.h"
88
89 #include "dpaa2_types.h"
90 #include "dpaa2_mc.h"
91 #include "dpaa2_mc_if.h"
92 #include "dpaa2_mcp.h"
93 #include "dpaa2_swp.h"
94 #include "dpaa2_swp_if.h"
95 #include "dpaa2_cmd_if.h"
96 #include "dpaa2_ni.h"
97 #include "dpaa2_channel.h"
98 #include "dpaa2_buf.h"
99
100 #define BIT(x) (1ul << (x))
101 #define WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0)
102 #define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
103
104 /* Frame Dequeue Response status bits. */
105 #define IS_NULL_RESPONSE(stat) ((((stat) >> 4) & 1) == 0)
106
107 #define ALIGN_UP(x, y) roundup2((x), (y))
108 #define ALIGN_DOWN(x, y) rounddown2((x), (y))
109 #define CACHE_LINE_ALIGN(x) ALIGN_UP((x), CACHE_LINE_SIZE)
110
111 #define DPNI_LOCK(__sc) do { \
112 mtx_assert(&(__sc)->lock, MA_NOTOWNED); \
113 mtx_lock(&(__sc)->lock); \
114 } while (0)
115 #define DPNI_UNLOCK(__sc) do { \
116 mtx_assert(&(__sc)->lock, MA_OWNED); \
117 mtx_unlock(&(__sc)->lock); \
118 } while (0)
119 #define DPNI_LOCK_ASSERT(__sc) do { \
120 mtx_assert(&(__sc)->lock, MA_OWNED); \
121 } while (0)
122
123 #define DPAA2_TX_RING(sc, chan, tc) \
124 (&(sc)->channels[(chan)]->txc_queue.tx_rings[(tc)])
125
126 MALLOC_DEFINE(M_DPAA2_TXB, "dpaa2_txb", "DPAA2 DMA-mapped buffer (Tx)");
127
128 /*
129 * How many times channel cleanup routine will be repeated if the RX or TX
130 * budget was depleted.
131 */
132 #define DPAA2_CLEAN_BUDGET 64 /* sysctl(9)? */
133 /* TX/RX budget for the channel cleanup task */
134 #define DPAA2_TX_BUDGET 128 /* sysctl(9)? */
135 #define DPAA2_RX_BUDGET 256 /* sysctl(9)? */
136
137 #define DPNI_IRQ_INDEX 0 /* Index of the only DPNI IRQ. */
138 #define DPNI_IRQ_LINK_CHANGED 1 /* Link state changed */
139 #define DPNI_IRQ_EP_CHANGED 2 /* DPAA2 endpoint dis/connected */
140
141 /* Default maximum RX frame length w/o CRC. */
142 #define DPAA2_ETH_MFL (ETHER_MAX_LEN_JUMBO + ETHER_VLAN_ENCAP_LEN - \
143 ETHER_CRC_LEN)
144
145 /* Minimally supported version of the DPNI API. */
146 #define DPNI_VER_MAJOR 7
147 #define DPNI_VER_MINOR 0
148
149 /* Rx/Tx buffers configuration. */
150 #define BUF_ALIGN_V1 256 /* WRIOP v1.0.0 limitation */
151 #define BUF_ALIGN 64
152 #define BUF_SWA_SIZE 64 /* SW annotation size */
153 #define BUF_RX_HWA_SIZE 64 /* HW annotation size */
154 #define BUF_TX_HWA_SIZE 128 /* HW annotation size */
155
156 #define DPAA2_RX_BUFRING_SZ (4096u)
157 #define DPAA2_RXE_BUFRING_SZ (1024u)
158 #define DPAA2_TXC_BUFRING_SZ (4096u)
159 #define DPAA2_TX_SEGLIMIT (16u) /* arbitrary number */
160 #define DPAA2_TX_SEG_SZ (PAGE_SIZE)
161 #define DPAA2_TX_SEGS_MAXSZ (DPAA2_TX_SEGLIMIT * DPAA2_TX_SEG_SZ)
162 #define DPAA2_TX_SGT_SZ (PAGE_SIZE) /* bytes */
163
164 /* Size of a buffer to keep a QoS table key configuration. */
165 #define ETH_QOS_KCFG_BUF_SIZE (PAGE_SIZE)
166
167 /* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */
168 #define DPAA2_CLASSIFIER_DMA_SIZE (PAGE_SIZE)
169
170 /* Buffers layout options. */
171 #define BUF_LOPT_TIMESTAMP 0x1
172 #define BUF_LOPT_PARSER_RESULT 0x2
173 #define BUF_LOPT_FRAME_STATUS 0x4
174 #define BUF_LOPT_PRIV_DATA_SZ 0x8
175 #define BUF_LOPT_DATA_ALIGN 0x10
176 #define BUF_LOPT_DATA_HEAD_ROOM 0x20
177 #define BUF_LOPT_DATA_TAIL_ROOM 0x40
178
179 #define DPAA2_NI_BUF_ADDR_MASK (0x1FFFFFFFFFFFFul) /* 49-bit addresses max. */
180 #define DPAA2_NI_BUF_CHAN_MASK (0xFu)
181 #define DPAA2_NI_BUF_CHAN_SHIFT (60)
182 #define DPAA2_NI_BUF_IDX_MASK (0x7FFFu)
183 #define DPAA2_NI_BUF_IDX_SHIFT (49)
184 #define DPAA2_NI_TX_IDX_MASK (0x7u)
185 #define DPAA2_NI_TX_IDX_SHIFT (57)
186 #define DPAA2_NI_TXBUF_IDX_MASK (0xFFu)
187 #define DPAA2_NI_TXBUF_IDX_SHIFT (49)
188
189 #define DPAA2_NI_FD_FMT_MASK (0x3u)
190 #define DPAA2_NI_FD_FMT_SHIFT (12)
191 #define DPAA2_NI_FD_ERR_MASK (0xFFu)
192 #define DPAA2_NI_FD_ERR_SHIFT (0)
193 #define DPAA2_NI_FD_SL_MASK (0x1u)
194 #define DPAA2_NI_FD_SL_SHIFT (14)
195 #define DPAA2_NI_FD_LEN_MASK (0x3FFFFu)
196 #define DPAA2_NI_FD_OFFSET_MASK (0x0FFFu)
197
198 /* Enables TCAM for Flow Steering and QoS look-ups. */
199 #define DPNI_OPT_HAS_KEY_MASKING 0x10
200
201 /* Unique IDs for the supported Rx classification header fields. */
202 #define DPAA2_ETH_DIST_ETHDST BIT(0)
203 #define DPAA2_ETH_DIST_ETHSRC BIT(1)
204 #define DPAA2_ETH_DIST_ETHTYPE BIT(2)
205 #define DPAA2_ETH_DIST_VLAN BIT(3)
206 #define DPAA2_ETH_DIST_IPSRC BIT(4)
207 #define DPAA2_ETH_DIST_IPDST BIT(5)
208 #define DPAA2_ETH_DIST_IPPROTO BIT(6)
209 #define DPAA2_ETH_DIST_L4SRC BIT(7)
210 #define DPAA2_ETH_DIST_L4DST BIT(8)
211 #define DPAA2_ETH_DIST_ALL (~0ULL)
212
213 /* L3-L4 network traffic flow hash options. */
214 #define RXH_L2DA (1 << 1)
215 #define RXH_VLAN (1 << 2)
216 #define RXH_L3_PROTO (1 << 3)
217 #define RXH_IP_SRC (1 << 4)
218 #define RXH_IP_DST (1 << 5)
219 #define RXH_L4_B_0_1 (1 << 6) /* src port in case of TCP/UDP/SCTP */
220 #define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */
221 #define RXH_DISCARD (1 << 31)
222
223 /* Transmit checksum offload */
224 #define DPAA2_CSUM_TX_OFFLOAD (CSUM_IP | CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6)
225
226 /* Default Rx hash options, set during attaching. */
227 #define DPAA2_RXH_DEFAULT (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
228
229 MALLOC_DEFINE(M_DPAA2_NI, "dpaa2_ni", "DPAA2 Network Interface");
230
231 /*
232 * DPAA2 Network Interface resource specification.
233 *
234 * NOTE: Don't forget to update macros in dpaa2_ni.h in case of any changes in
235 * the specification!
236 */
237 struct resource_spec dpaa2_ni_spec[] = {
238 /*
239 * DPMCP resources.
240 *
241 * NOTE: MC command portals (MCPs) are used to send commands to, and
242 * receive responses from, the MC firmware. One portal per DPNI.
243 */
244 { DPAA2_DEV_MCP, DPAA2_NI_MCP_RID(0), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
245 /*
246 * DPIO resources (software portals).
247 *
248 * NOTE: One per running core. While DPIOs are the source of data
249 * availability interrupts, the DPCONs are used to identify the
250 * network interface that has produced ingress data to that core.
251 */
252 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(0), RF_ACTIVE | RF_SHAREABLE },
253 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(1), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
254 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(2), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
255 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(3), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
256 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(4), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
257 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(5), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
258 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(6), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
259 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(7), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
260 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(8), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
261 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(9), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
262 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(10), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
263 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(11), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
264 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(12), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
265 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(13), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
266 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(14), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
267 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(15), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL },
268 /*
269 * DPBP resources (buffer pools).
270 *
271 * NOTE: One per network interface.
272 */
273 { DPAA2_DEV_BP, DPAA2_NI_BP_RID(0), RF_ACTIVE },
274 /*
275 * DPCON resources (channels).
276 *
277 * NOTE: One DPCON per core where Rx or Tx confirmation traffic to be
278 * distributed to.
279 * NOTE: Since it is necessary to distinguish between traffic from
280 * different network interfaces arriving on the same core, the
281 * DPCONs must be private to the DPNIs.
282 */
283 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(0), RF_ACTIVE },
284 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(1), RF_ACTIVE | RF_OPTIONAL },
285 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(2), RF_ACTIVE | RF_OPTIONAL },
286 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(3), RF_ACTIVE | RF_OPTIONAL },
287 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(4), RF_ACTIVE | RF_OPTIONAL },
288 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(5), RF_ACTIVE | RF_OPTIONAL },
289 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(6), RF_ACTIVE | RF_OPTIONAL },
290 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(7), RF_ACTIVE | RF_OPTIONAL },
291 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(8), RF_ACTIVE | RF_OPTIONAL },
292 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(9), RF_ACTIVE | RF_OPTIONAL },
293 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(10), RF_ACTIVE | RF_OPTIONAL },
294 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(11), RF_ACTIVE | RF_OPTIONAL },
295 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(12), RF_ACTIVE | RF_OPTIONAL },
296 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(13), RF_ACTIVE | RF_OPTIONAL },
297 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(14), RF_ACTIVE | RF_OPTIONAL },
298 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(15), RF_ACTIVE | RF_OPTIONAL },
299
300 RESOURCE_SPEC_END
301 };
302
303 /* Supported header fields for Rx hash distribution key */
304 static const struct dpaa2_eth_dist_fields dist_fields[] = {
305 {
306 /* L2 header */
307 .rxnfc_field = RXH_L2DA,
308 .cls_prot = NET_PROT_ETH,
309 .cls_field = NH_FLD_ETH_DA,
310 .id = DPAA2_ETH_DIST_ETHDST,
311 .size = 6,
312 }, {
313 .cls_prot = NET_PROT_ETH,
314 .cls_field = NH_FLD_ETH_SA,
315 .id = DPAA2_ETH_DIST_ETHSRC,
316 .size = 6,
317 }, {
318 /* This is the last ethertype field parsed:
319 * depending on frame format, it can be the MAC ethertype
320 * or the VLAN etype.
321 */
322 .cls_prot = NET_PROT_ETH,
323 .cls_field = NH_FLD_ETH_TYPE,
324 .id = DPAA2_ETH_DIST_ETHTYPE,
325 .size = 2,
326 }, {
327 /* VLAN header */
328 .rxnfc_field = RXH_VLAN,
329 .cls_prot = NET_PROT_VLAN,
330 .cls_field = NH_FLD_VLAN_TCI,
331 .id = DPAA2_ETH_DIST_VLAN,
332 .size = 2,
333 }, {
334 /* IP header */
335 .rxnfc_field = RXH_IP_SRC,
336 .cls_prot = NET_PROT_IP,
337 .cls_field = NH_FLD_IP_SRC,
338 .id = DPAA2_ETH_DIST_IPSRC,
339 .size = 4,
340 }, {
341 .rxnfc_field = RXH_IP_DST,
342 .cls_prot = NET_PROT_IP,
343 .cls_field = NH_FLD_IP_DST,
344 .id = DPAA2_ETH_DIST_IPDST,
345 .size = 4,
346 }, {
347 .rxnfc_field = RXH_L3_PROTO,
348 .cls_prot = NET_PROT_IP,
349 .cls_field = NH_FLD_IP_PROTO,
350 .id = DPAA2_ETH_DIST_IPPROTO,
351 .size = 1,
352 }, {
353 /* Using UDP ports, this is functionally equivalent to raw
354 * byte pairs from L4 header.
355 */
356 .rxnfc_field = RXH_L4_B_0_1,
357 .cls_prot = NET_PROT_UDP,
358 .cls_field = NH_FLD_UDP_PORT_SRC,
359 .id = DPAA2_ETH_DIST_L4SRC,
360 .size = 2,
361 }, {
362 .rxnfc_field = RXH_L4_B_2_3,
363 .cls_prot = NET_PROT_UDP,
364 .cls_field = NH_FLD_UDP_PORT_DST,
365 .id = DPAA2_ETH_DIST_L4DST,
366 .size = 2,
367 },
368 };
369
370 static struct dpni_stat {
371 int page;
372 int cnt;
373 char *name;
374 char *desc;
375 } dpni_stat_sysctls[DPAA2_NI_STAT_SYSCTLS] = {
376 /* PAGE, COUNTER, NAME, DESCRIPTION */
377 { 0, 0, "in_all_frames", "All accepted ingress frames" },
378 { 0, 1, "in_all_bytes", "Bytes in all accepted ingress frames" },
379 { 0, 2, "in_multi_frames", "Multicast accepted ingress frames" },
380 { 1, 0, "eg_all_frames", "All egress frames transmitted" },
381 { 1, 1, "eg_all_bytes", "Bytes in all frames transmitted" },
382 { 1, 2, "eg_multi_frames", "Multicast egress frames transmitted" },
383 { 2, 0, "in_filtered_frames", "All ingress frames discarded due to "
384 "filtering" },
385 { 2, 1, "in_discarded_frames", "All frames discarded due to errors" },
386 { 2, 2, "in_nobuf_discards", "Discards on ingress side due to buffer "
387 "depletion in DPNI buffer pools" },
388 };
389
390 struct dpaa2_ni_rx_ctx {
391 struct mbuf *head;
392 struct mbuf *tail;
393 int cnt;
394 bool last;
395 };
396
397 /* Device interface */
398 static int dpaa2_ni_probe(device_t);
399 static int dpaa2_ni_attach(device_t);
400 static int dpaa2_ni_detach(device_t);
401
402 /* DPAA2 network interface setup and configuration */
403 static int dpaa2_ni_setup(device_t);
404 static int dpaa2_ni_setup_channels(device_t);
405 static int dpaa2_ni_bind(device_t);
406 static int dpaa2_ni_setup_rx_dist(device_t);
407 static int dpaa2_ni_setup_irqs(device_t);
408 static int dpaa2_ni_setup_msi(struct dpaa2_ni_softc *);
409 static int dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *);
410 static int dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *);
411 static int dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *);
412 static int dpaa2_ni_setup_dma(struct dpaa2_ni_softc *);
413
414 /* Tx/Rx flow configuration */
415 static int dpaa2_ni_setup_rx_flow(device_t, struct dpaa2_ni_fq *);
416 static int dpaa2_ni_setup_tx_flow(device_t, struct dpaa2_ni_fq *);
417 static int dpaa2_ni_setup_rx_err_flow(device_t, struct dpaa2_ni_fq *);
418
419 /* Configuration subroutines */
420 static int dpaa2_ni_set_buf_layout(device_t);
421 static int dpaa2_ni_set_pause_frame(device_t);
422 static int dpaa2_ni_set_qos_table(device_t);
423 static int dpaa2_ni_set_mac_addr(device_t);
424 static int dpaa2_ni_set_hash(device_t, uint64_t);
425 static int dpaa2_ni_set_dist_key(device_t, enum dpaa2_ni_dist_mode, uint64_t);
426
427 /* Frame descriptor routines */
428 static int dpaa2_ni_build_fd(struct dpaa2_ni_softc *, struct dpaa2_ni_tx_ring *,
429 struct dpaa2_buf *, bus_dma_segment_t *, int, struct dpaa2_fd *);
430 static int dpaa2_ni_fd_err(struct dpaa2_fd *);
431 static uint32_t dpaa2_ni_fd_data_len(struct dpaa2_fd *);
432 static int dpaa2_ni_fd_format(struct dpaa2_fd *);
433 static bool dpaa2_ni_fd_short_len(struct dpaa2_fd *);
434 static int dpaa2_ni_fd_offset(struct dpaa2_fd *);
435
436 /* Various subroutines */
437 static int dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *, uint16_t, uint16_t);
438 static int dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *, uint8_t *);
439
440 /* Network interface routines */
441 static void dpaa2_ni_init(void *);
442 static int dpaa2_ni_transmit(if_t , struct mbuf *);
443 static void dpaa2_ni_qflush(if_t );
444 static int dpaa2_ni_ioctl(if_t , u_long, caddr_t);
445 static int dpaa2_ni_update_mac_filters(if_t );
446 static u_int dpaa2_ni_add_maddr(void *, struct sockaddr_dl *, u_int);
447
448 /* Interrupt handlers */
449 static void dpaa2_ni_intr(void *);
450
451 /* MII handlers */
452 static void dpaa2_ni_miibus_statchg(device_t);
453 static int dpaa2_ni_media_change(if_t );
454 static void dpaa2_ni_media_status(if_t , struct ifmediareq *);
455 static void dpaa2_ni_media_tick(void *);
456
457 /* Tx/Rx routines. */
458 static int dpaa2_ni_rx_cleanup(struct dpaa2_channel *);
459 static int dpaa2_ni_tx_cleanup(struct dpaa2_channel *);
460 static void dpaa2_ni_tx(struct dpaa2_ni_softc *, struct dpaa2_channel *,
461 struct dpaa2_ni_tx_ring *, struct mbuf *);
462 static void dpaa2_ni_cleanup_task(void *, int);
463
464 /* Tx/Rx subroutines */
465 static int dpaa2_ni_consume_frames(struct dpaa2_channel *, struct dpaa2_ni_fq **,
466 uint32_t *);
467 static int dpaa2_ni_rx(struct dpaa2_channel *, struct dpaa2_ni_fq *,
468 struct dpaa2_fd *, struct dpaa2_ni_rx_ctx *);
469 static int dpaa2_ni_rx_err(struct dpaa2_channel *, struct dpaa2_ni_fq *,
470 struct dpaa2_fd *);
471 static int dpaa2_ni_tx_conf(struct dpaa2_channel *, struct dpaa2_ni_fq *,
472 struct dpaa2_fd *);
473
474 /* sysctl(9) */
475 static int dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS);
476 static int dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS);
477 static int dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS);
478
479 static int
dpaa2_ni_probe(device_t dev)480 dpaa2_ni_probe(device_t dev)
481 {
482 /* DPNI device will be added by a parent resource container itself. */
483 device_set_desc(dev, "DPAA2 Network Interface");
484 return (BUS_PROBE_DEFAULT);
485 }
486
487 static int
dpaa2_ni_attach(device_t dev)488 dpaa2_ni_attach(device_t dev)
489 {
490 device_t pdev = device_get_parent(dev);
491 device_t child = dev;
492 device_t mcp_dev;
493 struct dpaa2_ni_softc *sc = device_get_softc(dev);
494 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
495 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
496 struct dpaa2_devinfo *mcp_dinfo;
497 struct dpaa2_cmd cmd;
498 uint16_t rc_token, ni_token;
499 if_t ifp;
500 char tq_name[32];
501 int error;
502
503 sc->dev = dev;
504 sc->ifp = NULL;
505 sc->miibus = NULL;
506 sc->mii = NULL;
507 sc->media_status = 0;
508 sc->if_flags = 0;
509 sc->link_state = LINK_STATE_UNKNOWN;
510 sc->buf_align = 0;
511
512 /* For debug purposes only! */
513 sc->rx_anomaly_frames = 0;
514 sc->rx_single_buf_frames = 0;
515 sc->rx_sg_buf_frames = 0;
516 sc->rx_enq_rej_frames = 0;
517 sc->rx_ieoi_err_frames = 0;
518 sc->tx_single_buf_frames = 0;
519 sc->tx_sg_frames = 0;
520
521 DPAA2_ATOMIC_XCHG(&sc->buf_num, 0);
522 DPAA2_ATOMIC_XCHG(&sc->buf_free, 0);
523
524 sc->rxd_dmat = NULL;
525 sc->qos_dmat = NULL;
526
527 sc->qos_kcfg.dmap = NULL;
528 sc->qos_kcfg.paddr = 0;
529 sc->qos_kcfg.vaddr = NULL;
530
531 sc->rxd_kcfg.dmap = NULL;
532 sc->rxd_kcfg.paddr = 0;
533 sc->rxd_kcfg.vaddr = NULL;
534
535 sc->mac.dpmac_id = 0;
536 sc->mac.phy_dev = NULL;
537 memset(sc->mac.addr, 0, ETHER_ADDR_LEN);
538
539 error = bus_alloc_resources(sc->dev, dpaa2_ni_spec, sc->res);
540 if (error) {
541 device_printf(dev, "%s: failed to allocate resources: "
542 "error=%d\n", __func__, error);
543 goto err_exit;
544 }
545
546 /* Obtain MC portal. */
547 mcp_dev = (device_t) rman_get_start(sc->res[DPAA2_NI_MCP_RID(0)]);
548 mcp_dinfo = device_get_ivars(mcp_dev);
549 dinfo->portal = mcp_dinfo->portal;
550
551 mtx_init(&sc->lock, device_get_nameunit(dev), "dpaa2_ni", MTX_DEF);
552
553 /* Allocate network interface */
554 ifp = if_alloc(IFT_ETHER);
555 sc->ifp = ifp;
556 if_initname(ifp, DPAA2_NI_IFNAME, device_get_unit(sc->dev));
557
558 if_setsoftc(ifp, sc);
559 if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST);
560 if_setinitfn(ifp, dpaa2_ni_init);
561 if_setioctlfn(ifp, dpaa2_ni_ioctl);
562 if_settransmitfn(ifp, dpaa2_ni_transmit);
563 if_setqflushfn(ifp, dpaa2_ni_qflush);
564
565 if_sethwassist(sc->ifp, DPAA2_CSUM_TX_OFFLOAD);
566 if_setcapabilities(ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM |
567 IFCAP_HWCSUM_IPV6 | IFCAP_JUMBO_MTU);
568 if_setcapenable(ifp, if_getcapabilities(ifp));
569
570 DPAA2_CMD_INIT(&cmd);
571
572 /* Open resource container and network interface object. */
573 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
574 if (error) {
575 device_printf(dev, "%s: failed to open resource container: "
576 "id=%d, error=%d\n", __func__, rcinfo->id, error);
577 goto err_exit;
578 }
579 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
580 if (error) {
581 device_printf(dev, "%s: failed to open network interface: "
582 "id=%d, error=%d\n", __func__, dinfo->id, error);
583 goto close_rc;
584 }
585
586 bzero(tq_name, sizeof(tq_name));
587 snprintf(tq_name, sizeof(tq_name), "%s_tqbp", device_get_nameunit(dev));
588
589 /*
590 * XXX-DSL: Release new buffers on Buffer Pool State Change Notification
591 * (BPSCN) returned as a result to the VDQ command instead.
592 * It is similar to CDAN processed in dpaa2_io_intr().
593 */
594 /* Create a taskqueue thread to release new buffers to the pool. */
595 sc->bp_taskq = taskqueue_create(tq_name, M_WAITOK,
596 taskqueue_thread_enqueue, &sc->bp_taskq);
597 taskqueue_start_threads(&sc->bp_taskq, 1, PI_NET, "%s", tq_name);
598
599 /* sc->cleanup_taskq = taskqueue_create("dpaa2_ch cleanup", M_WAITOK, */
600 /* taskqueue_thread_enqueue, &sc->cleanup_taskq); */
601 /* taskqueue_start_threads(&sc->cleanup_taskq, 1, PI_NET, */
602 /* "dpaa2_ch cleanup"); */
603
604 error = dpaa2_ni_setup(dev);
605 if (error) {
606 device_printf(dev, "%s: failed to setup DPNI: error=%d\n",
607 __func__, error);
608 goto close_ni;
609 }
610 error = dpaa2_ni_setup_channels(dev);
611 if (error) {
612 device_printf(dev, "%s: failed to setup QBMan channels: "
613 "error=%d\n", __func__, error);
614 goto close_ni;
615 }
616
617 error = dpaa2_ni_bind(dev);
618 if (error) {
619 device_printf(dev, "%s: failed to bind DPNI: error=%d\n",
620 __func__, error);
621 goto close_ni;
622 }
623 error = dpaa2_ni_setup_irqs(dev);
624 if (error) {
625 device_printf(dev, "%s: failed to setup IRQs: error=%d\n",
626 __func__, error);
627 goto close_ni;
628 }
629 error = dpaa2_ni_setup_sysctls(sc);
630 if (error) {
631 device_printf(dev, "%s: failed to setup sysctls: error=%d\n",
632 __func__, error);
633 goto close_ni;
634 }
635 error = dpaa2_ni_setup_if_caps(sc);
636 if (error) {
637 device_printf(dev, "%s: failed to setup interface capabilities: "
638 "error=%d\n", __func__, error);
639 goto close_ni;
640 }
641
642 ether_ifattach(sc->ifp, sc->mac.addr);
643 callout_init(&sc->mii_callout, 0);
644
645 return (0);
646
647 close_ni:
648 DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
649 close_rc:
650 DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
651 err_exit:
652 return (ENXIO);
653 }
654
655 static void
dpaa2_ni_fixed_media_status(if_t ifp,struct ifmediareq * ifmr)656 dpaa2_ni_fixed_media_status(if_t ifp, struct ifmediareq* ifmr)
657 {
658 struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
659
660 DPNI_LOCK(sc);
661 ifmr->ifm_count = 0;
662 ifmr->ifm_mask = 0;
663 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
664 ifmr->ifm_current = ifmr->ifm_active =
665 sc->fixed_ifmedia.ifm_cur->ifm_media;
666
667 /*
668 * In non-PHY usecases, we need to signal link state up, otherwise
669 * certain things requiring a link event (e.g async DHCP client) from
670 * devd do not happen.
671 */
672 if (if_getlinkstate(ifp) == LINK_STATE_UNKNOWN) {
673 if_link_state_change(ifp, LINK_STATE_UP);
674 }
675
676 /*
677 * TODO: Check the status of the link partner (DPMAC, DPNI or other) and
678 * reset if down. This is different to the DPAA2_MAC_LINK_TYPE_PHY as
679 * the MC firmware sets the status, instead of us telling the MC what
680 * it is.
681 */
682 DPNI_UNLOCK(sc);
683
684 return;
685 }
686
687 static void
dpaa2_ni_setup_fixed_link(struct dpaa2_ni_softc * sc)688 dpaa2_ni_setup_fixed_link(struct dpaa2_ni_softc *sc)
689 {
690 /*
691 * FIXME: When the DPNI is connected to a DPMAC, we can get the
692 * 'apparent' speed from it.
693 */
694 sc->fixed_link = true;
695
696 ifmedia_init(&sc->fixed_ifmedia, 0, dpaa2_ni_media_change,
697 dpaa2_ni_fixed_media_status);
698 ifmedia_add(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL);
699 ifmedia_set(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T);
700 }
701
702 static int
dpaa2_ni_detach(device_t dev)703 dpaa2_ni_detach(device_t dev)
704 {
705 /* TBD */
706 return (0);
707 }
708
709 /**
710 * @brief Configure DPAA2 network interface object.
711 */
712 static int
dpaa2_ni_setup(device_t dev)713 dpaa2_ni_setup(device_t dev)
714 {
715 device_t pdev = device_get_parent(dev);
716 device_t child = dev;
717 struct dpaa2_ni_softc *sc = device_get_softc(dev);
718 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
719 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
720 struct dpaa2_ep_desc ep1_desc, ep2_desc; /* endpoint descriptors */
721 struct dpaa2_cmd cmd;
722 uint8_t eth_bca[ETHER_ADDR_LEN]; /* broadcast physical address */
723 uint16_t rc_token, ni_token, mac_token;
724 struct dpaa2_mac_attr attr;
725 enum dpaa2_mac_link_type link_type;
726 uint32_t link;
727 int error;
728
729 DPAA2_CMD_INIT(&cmd);
730
731 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
732 if (error) {
733 device_printf(dev, "%s: failed to open resource container: "
734 "id=%d, error=%d\n", __func__, rcinfo->id, error);
735 goto err_exit;
736 }
737 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
738 if (error) {
739 device_printf(dev, "%s: failed to open network interface: "
740 "id=%d, error=%d\n", __func__, dinfo->id, error);
741 goto close_rc;
742 }
743
744 /* Check if we can work with this DPNI object. */
745 error = DPAA2_CMD_NI_GET_API_VERSION(dev, child, &cmd, &sc->api_major,
746 &sc->api_minor);
747 if (error) {
748 device_printf(dev, "%s: failed to get DPNI API version\n",
749 __func__);
750 goto close_ni;
751 }
752 if (dpaa2_ni_cmp_api_version(sc, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
753 device_printf(dev, "%s: DPNI API version %u.%u not supported, "
754 "need >= %u.%u\n", __func__, sc->api_major, sc->api_minor,
755 DPNI_VER_MAJOR, DPNI_VER_MINOR);
756 error = ENODEV;
757 goto close_ni;
758 }
759
760 /* Reset the DPNI object. */
761 error = DPAA2_CMD_NI_RESET(dev, child, &cmd);
762 if (error) {
763 device_printf(dev, "%s: failed to reset DPNI: id=%d\n",
764 __func__, dinfo->id);
765 goto close_ni;
766 }
767
768 /* Obtain attributes of the DPNI object. */
769 error = DPAA2_CMD_NI_GET_ATTRIBUTES(dev, child, &cmd, &sc->attr);
770 if (error) {
771 device_printf(dev, "%s: failed to obtain DPNI attributes: "
772 "id=%d\n", __func__, dinfo->id);
773 goto close_ni;
774 }
775 if (bootverbose) {
776 device_printf(dev, "\toptions=0x%#x queues=%d tx_channels=%d "
777 "wriop_version=%#x\n", sc->attr.options, sc->attr.num.queues,
778 sc->attr.num.channels, sc->attr.wriop_ver);
779 device_printf(dev, "\ttraffic classes: rx=%d tx=%d "
780 "cgs_groups=%d\n", sc->attr.num.rx_tcs, sc->attr.num.tx_tcs,
781 sc->attr.num.cgs);
782 device_printf(dev, "\ttable entries: mac=%d vlan=%d qos=%d "
783 "fs=%d\n", sc->attr.entries.mac, sc->attr.entries.vlan,
784 sc->attr.entries.qos, sc->attr.entries.fs);
785 device_printf(dev, "\tkey sizes: qos=%d fs=%d\n",
786 sc->attr.key_size.qos, sc->attr.key_size.fs);
787 }
788
789 /* Configure buffer layouts of the DPNI queues. */
790 error = dpaa2_ni_set_buf_layout(dev);
791 if (error) {
792 device_printf(dev, "%s: failed to configure buffer layout\n",
793 __func__);
794 goto close_ni;
795 }
796
797 /* Configure DMA resources. */
798 error = dpaa2_ni_setup_dma(sc);
799 if (error) {
800 device_printf(dev, "%s: failed to setup DMA\n", __func__);
801 goto close_ni;
802 }
803
804 /* Setup link between DPNI and an object it's connected to. */
805 ep1_desc.obj_id = dinfo->id;
806 ep1_desc.if_id = 0; /* DPNI has the only endpoint */
807 ep1_desc.type = dinfo->dtype;
808
809 error = DPAA2_CMD_RC_GET_CONN(dev, child, DPAA2_CMD_TK(&cmd, rc_token),
810 &ep1_desc, &ep2_desc, &link);
811 if (error) {
812 device_printf(dev, "%s: failed to obtain an object DPNI is "
813 "connected to: error=%d\n", __func__, error);
814 } else {
815 device_printf(dev, "connected to %s (id=%d)\n",
816 dpaa2_ttos(ep2_desc.type), ep2_desc.obj_id);
817
818 error = dpaa2_ni_set_mac_addr(dev);
819 if (error) {
820 device_printf(dev, "%s: failed to set MAC address: "
821 "error=%d\n", __func__, error);
822 }
823
824 if (ep2_desc.type == DPAA2_DEV_MAC) {
825 /*
826 * This is the simplest case when DPNI is connected to
827 * DPMAC directly.
828 */
829 sc->mac.dpmac_id = ep2_desc.obj_id;
830
831 link_type = DPAA2_MAC_LINK_TYPE_NONE;
832
833 /*
834 * Need to determine if DPMAC type is PHY (attached to
835 * conventional MII PHY) or FIXED (usually SFP/SerDes,
836 * link state managed by MC firmware).
837 */
838 error = DPAA2_CMD_MAC_OPEN(sc->dev, child,
839 DPAA2_CMD_TK(&cmd, rc_token), sc->mac.dpmac_id,
840 &mac_token);
841 /*
842 * Under VFIO, the DPMAC might be sitting in another
843 * container (DPRC) we don't have access to.
844 * Assume DPAA2_MAC_LINK_TYPE_FIXED if this is
845 * the case.
846 */
847 if (error) {
848 device_printf(dev, "%s: failed to open "
849 "connected DPMAC: %d (assuming in other DPRC)\n", __func__,
850 sc->mac.dpmac_id);
851 link_type = DPAA2_MAC_LINK_TYPE_FIXED;
852 } else {
853 error = DPAA2_CMD_MAC_GET_ATTRIBUTES(dev, child,
854 &cmd, &attr);
855 if (error) {
856 device_printf(dev, "%s: failed to get "
857 "DPMAC attributes: id=%d, "
858 "error=%d\n", __func__, dinfo->id,
859 error);
860 } else {
861 link_type = attr.link_type;
862 }
863 }
864 DPAA2_CMD_MAC_CLOSE(dev, child, &cmd);
865
866 if (link_type == DPAA2_MAC_LINK_TYPE_FIXED) {
867 device_printf(dev, "connected DPMAC is in FIXED "
868 "mode\n");
869 dpaa2_ni_setup_fixed_link(sc);
870 } else if (link_type == DPAA2_MAC_LINK_TYPE_PHY) {
871 device_printf(dev, "connected DPMAC is in PHY "
872 "mode\n");
873 error = DPAA2_MC_GET_PHY_DEV(dev,
874 &sc->mac.phy_dev, sc->mac.dpmac_id);
875 if (error == 0) {
876 error = MEMAC_MDIO_SET_NI_DEV(
877 sc->mac.phy_dev, dev);
878 if (error != 0) {
879 device_printf(dev, "%s: failed "
880 "to set dpni dev on memac "
881 "mdio dev %s: error=%d\n",
882 __func__,
883 device_get_nameunit(
884 sc->mac.phy_dev), error);
885 }
886 }
887 if (error == 0) {
888 error = MEMAC_MDIO_GET_PHY_LOC(
889 sc->mac.phy_dev, &sc->mac.phy_loc);
890 if (error == ENODEV) {
891 error = 0;
892 }
893 if (error != 0) {
894 device_printf(dev, "%s: failed "
895 "to get phy location from "
896 "memac mdio dev %s: error=%d\n",
897 __func__, device_get_nameunit(
898 sc->mac.phy_dev), error);
899 }
900 }
901 if (error == 0) {
902 error = mii_attach(sc->mac.phy_dev,
903 &sc->miibus, sc->ifp,
904 dpaa2_ni_media_change,
905 dpaa2_ni_media_status,
906 BMSR_DEFCAPMASK, sc->mac.phy_loc,
907 MII_OFFSET_ANY, 0);
908 if (error != 0) {
909 device_printf(dev, "%s: failed "
910 "to attach to miibus: "
911 "error=%d\n",
912 __func__, error);
913 }
914 }
915 if (error == 0) {
916 sc->mii = device_get_softc(sc->miibus);
917 }
918 } else {
919 device_printf(dev, "%s: DPMAC link type is not "
920 "supported\n", __func__);
921 }
922 } else if (ep2_desc.type == DPAA2_DEV_NI ||
923 ep2_desc.type == DPAA2_DEV_MUX ||
924 ep2_desc.type == DPAA2_DEV_SW) {
925 dpaa2_ni_setup_fixed_link(sc);
926 }
927 }
928
929 /* Select mode to enqueue frames. */
930 /* ... TBD ... */
931
932 /*
933 * Update link configuration to enable Rx/Tx pause frames support.
934 *
935 * NOTE: MC may generate an interrupt to the DPMAC and request changes
936 * in link configuration. It might be necessary to attach miibus
937 * and PHY before this point.
938 */
939 error = dpaa2_ni_set_pause_frame(dev);
940 if (error) {
941 device_printf(dev, "%s: failed to configure Rx/Tx pause "
942 "frames\n", __func__);
943 goto close_ni;
944 }
945
946 /* Configure ingress traffic classification. */
947 error = dpaa2_ni_set_qos_table(dev);
948 if (error) {
949 device_printf(dev, "%s: failed to configure QoS table: "
950 "error=%d\n", __func__, error);
951 goto close_ni;
952 }
953
954 /* Add broadcast physical address to the MAC filtering table. */
955 memset(eth_bca, 0xff, ETHER_ADDR_LEN);
956 error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, DPAA2_CMD_TK(&cmd,
957 ni_token), eth_bca);
958 if (error) {
959 device_printf(dev, "%s: failed to add broadcast physical "
960 "address to the MAC filtering table\n", __func__);
961 goto close_ni;
962 }
963
964 /* Set the maximum allowed length for received frames. */
965 error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd, DPAA2_ETH_MFL);
966 if (error) {
967 device_printf(dev, "%s: failed to set maximum length for "
968 "received frames\n", __func__);
969 goto close_ni;
970 }
971
972 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
973 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
974 return (0);
975
976 close_ni:
977 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
978 close_rc:
979 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
980 err_exit:
981 return (error);
982 }
983
984 /**
985 * @brief Сonfigure QBMan channels and register data availability notifications.
986 */
987 static int
dpaa2_ni_setup_channels(device_t dev)988 dpaa2_ni_setup_channels(device_t dev)
989 {
990 device_t iodev, condev, bpdev;
991 struct dpaa2_ni_softc *sc = device_get_softc(dev);
992 uint32_t i, num_chan;
993 int error;
994
995 /* Calculate number of the channels based on the allocated resources */
996 for (i = 0; i < DPAA2_NI_IO_RES_NUM; i++) {
997 if (!sc->res[DPAA2_NI_IO_RID(i)]) {
998 break;
999 }
1000 }
1001 num_chan = i;
1002 for (i = 0; i < DPAA2_NI_CON_RES_NUM; i++) {
1003 if (!sc->res[DPAA2_NI_CON_RID(i)]) {
1004 break;
1005 }
1006 }
1007 num_chan = i < num_chan ? i : num_chan;
1008 sc->chan_n = num_chan > DPAA2_MAX_CHANNELS
1009 ? DPAA2_MAX_CHANNELS : num_chan;
1010 sc->chan_n = sc->chan_n > sc->attr.num.queues
1011 ? sc->attr.num.queues : sc->chan_n;
1012
1013 KASSERT(sc->chan_n > 0u, ("%s: positive number of channels expected: "
1014 "chan_n=%d", __func__, sc->chan_n));
1015
1016 device_printf(dev, "channels=%d\n", sc->chan_n);
1017
1018 for (i = 0; i < sc->chan_n; i++) {
1019 iodev = (device_t)rman_get_start(sc->res[DPAA2_NI_IO_RID(i)]);
1020 condev = (device_t)rman_get_start(sc->res[DPAA2_NI_CON_RID(i)]);
1021 /* Only one buffer pool available at the moment */
1022 bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
1023
1024 error = dpaa2_chan_setup(dev, iodev, condev, bpdev,
1025 &sc->channels[i], i, dpaa2_ni_cleanup_task);
1026 if (error != 0) {
1027 device_printf(dev, "%s: dpaa2_chan_setup() failed: "
1028 "error=%d, chan_id=%d\n", __func__, error, i);
1029 return (error);
1030 }
1031 }
1032
1033 /* There is exactly one Rx error queue per network interface */
1034 error = dpaa2_chan_setup_fq(dev, sc->channels[0], DPAA2_NI_QUEUE_RX_ERR);
1035 if (error != 0) {
1036 device_printf(dev, "%s: failed to prepare RxError queue: "
1037 "error=%d\n", __func__, error);
1038 return (error);
1039 }
1040
1041 return (0);
1042 }
1043
1044 /**
1045 * @brief Bind DPNI to DPBPs, DPIOs, frame queues and channels.
1046 */
1047 static int
dpaa2_ni_bind(device_t dev)1048 dpaa2_ni_bind(device_t dev)
1049 {
1050 device_t pdev = device_get_parent(dev);
1051 device_t child = dev;
1052 device_t bp_dev;
1053 struct dpaa2_ni_softc *sc = device_get_softc(dev);
1054 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1055 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1056 struct dpaa2_devinfo *bp_info;
1057 struct dpaa2_cmd cmd;
1058 struct dpaa2_ni_pools_cfg pools_cfg;
1059 struct dpaa2_ni_err_cfg err_cfg;
1060 struct dpaa2_channel *chan;
1061 uint16_t rc_token, ni_token;
1062 int error;
1063
1064 DPAA2_CMD_INIT(&cmd);
1065
1066 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1067 if (error) {
1068 device_printf(dev, "%s: failed to open resource container: "
1069 "id=%d, error=%d\n", __func__, rcinfo->id, error);
1070 goto err_exit;
1071 }
1072 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1073 if (error) {
1074 device_printf(dev, "%s: failed to open network interface: "
1075 "id=%d, error=%d\n", __func__, dinfo->id, error);
1076 goto close_rc;
1077 }
1078
1079 /* Select buffer pool (only one available at the moment). */
1080 bp_dev = (device_t) rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
1081 bp_info = device_get_ivars(bp_dev);
1082
1083 /* Configure buffers pool. */
1084 pools_cfg.pools_num = 1;
1085 pools_cfg.pools[0].bp_obj_id = bp_info->id;
1086 pools_cfg.pools[0].backup_flag = 0;
1087 pools_cfg.pools[0].buf_sz = sc->buf_sz;
1088 error = DPAA2_CMD_NI_SET_POOLS(dev, child, &cmd, &pools_cfg);
1089 if (error) {
1090 device_printf(dev, "%s: failed to set buffer pools\n", __func__);
1091 goto close_ni;
1092 }
1093
1094 /* Setup ingress traffic distribution. */
1095 error = dpaa2_ni_setup_rx_dist(dev);
1096 if (error && error != EOPNOTSUPP) {
1097 device_printf(dev, "%s: failed to setup ingress traffic "
1098 "distribution\n", __func__);
1099 goto close_ni;
1100 }
1101 if (bootverbose && error == EOPNOTSUPP) {
1102 device_printf(dev, "Ingress traffic distribution not "
1103 "supported\n");
1104 }
1105
1106 /* Configure handling of error frames. */
1107 err_cfg.err_mask = DPAA2_NI_FAS_RX_ERR_MASK;
1108 err_cfg.set_err_fas = false;
1109 err_cfg.action = DPAA2_NI_ERR_DISCARD;
1110 error = DPAA2_CMD_NI_SET_ERR_BEHAVIOR(dev, child, &cmd, &err_cfg);
1111 if (error) {
1112 device_printf(dev, "%s: failed to set errors behavior\n",
1113 __func__);
1114 goto close_ni;
1115 }
1116
1117 /* Configure channel queues to generate CDANs. */
1118 for (uint32_t i = 0; i < sc->chan_n; i++) {
1119 chan = sc->channels[i];
1120
1121 /* Setup Rx flows. */
1122 for (uint32_t j = 0; j < chan->rxq_n; j++) {
1123 error = dpaa2_ni_setup_rx_flow(dev, &chan->rx_queues[j]);
1124 if (error) {
1125 device_printf(dev, "%s: failed to setup Rx "
1126 "flow: error=%d\n", __func__, error);
1127 goto close_ni;
1128 }
1129 }
1130
1131 /* Setup Tx flow. */
1132 error = dpaa2_ni_setup_tx_flow(dev, &chan->txc_queue);
1133 if (error) {
1134 device_printf(dev, "%s: failed to setup Tx "
1135 "flow: error=%d\n", __func__, error);
1136 goto close_ni;
1137 }
1138 }
1139
1140 /* Configure RxError queue to generate CDAN. */
1141 error = dpaa2_ni_setup_rx_err_flow(dev, &sc->rxe_queue);
1142 if (error) {
1143 device_printf(dev, "%s: failed to setup RxError flow: "
1144 "error=%d\n", __func__, error);
1145 goto close_ni;
1146 }
1147
1148 /*
1149 * Get the Queuing Destination ID (QDID) that should be used for frame
1150 * enqueue operations.
1151 */
1152 error = DPAA2_CMD_NI_GET_QDID(dev, child, &cmd, DPAA2_NI_QUEUE_TX,
1153 &sc->tx_qdid);
1154 if (error) {
1155 device_printf(dev, "%s: failed to get Tx queuing destination "
1156 "ID\n", __func__);
1157 goto close_ni;
1158 }
1159
1160 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1161 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1162 return (0);
1163
1164 close_ni:
1165 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1166 close_rc:
1167 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1168 err_exit:
1169 return (error);
1170 }
1171
1172 /**
1173 * @brief Setup ingress traffic distribution.
1174 *
1175 * NOTE: Ingress traffic distribution is valid only when DPNI_OPT_NO_FS option
1176 * hasn't been set for DPNI and a number of DPNI queues > 1.
1177 */
1178 static int
dpaa2_ni_setup_rx_dist(device_t dev)1179 dpaa2_ni_setup_rx_dist(device_t dev)
1180 {
1181 /*
1182 * Have the interface implicitly distribute traffic based on the default
1183 * hash key.
1184 */
1185 return (dpaa2_ni_set_hash(dev, DPAA2_RXH_DEFAULT));
1186 }
1187
1188 static int
dpaa2_ni_setup_rx_flow(device_t dev,struct dpaa2_ni_fq * fq)1189 dpaa2_ni_setup_rx_flow(device_t dev, struct dpaa2_ni_fq *fq)
1190 {
1191 device_t pdev = device_get_parent(dev);
1192 device_t child = dev;
1193 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1194 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1195 struct dpaa2_devinfo *con_info;
1196 struct dpaa2_cmd cmd;
1197 struct dpaa2_ni_queue_cfg queue_cfg = {0};
1198 uint16_t rc_token, ni_token;
1199 int error;
1200
1201 DPAA2_CMD_INIT(&cmd);
1202
1203 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1204 if (error) {
1205 device_printf(dev, "%s: failed to open resource container: "
1206 "id=%d, error=%d\n", __func__, rcinfo->id, error);
1207 goto err_exit;
1208 }
1209 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1210 if (error) {
1211 device_printf(dev, "%s: failed to open network interface: "
1212 "id=%d, error=%d\n", __func__, dinfo->id, error);
1213 goto close_rc;
1214 }
1215
1216 /* Obtain DPCON associated with the FQ's channel. */
1217 con_info = device_get_ivars(fq->chan->con_dev);
1218
1219 queue_cfg.type = DPAA2_NI_QUEUE_RX;
1220 queue_cfg.tc = fq->tc;
1221 queue_cfg.idx = fq->flowid;
1222 error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1223 if (error) {
1224 device_printf(dev, "%s: failed to obtain Rx queue "
1225 "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1226 queue_cfg.idx);
1227 goto close_ni;
1228 }
1229
1230 fq->fqid = queue_cfg.fqid;
1231
1232 queue_cfg.dest_id = con_info->id;
1233 queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1234 queue_cfg.priority = 1;
1235 queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1236 queue_cfg.options =
1237 DPAA2_NI_QUEUE_OPT_USER_CTX |
1238 DPAA2_NI_QUEUE_OPT_DEST;
1239 error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
1240 if (error) {
1241 device_printf(dev, "%s: failed to update Rx queue "
1242 "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1243 queue_cfg.idx);
1244 goto close_ni;
1245 }
1246
1247 if (bootverbose) {
1248 device_printf(dev, "RX queue idx=%d, tc=%d, chan=%d, fqid=%d, "
1249 "user_ctx=%#jx\n", fq->flowid, fq->tc, fq->chan->id,
1250 fq->fqid, (uint64_t) fq);
1251 }
1252
1253 (void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
1254 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1255 return (0);
1256
1257 close_ni:
1258 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1259 close_rc:
1260 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1261 err_exit:
1262 return (error);
1263 }
1264
1265 static int
dpaa2_ni_setup_tx_flow(device_t dev,struct dpaa2_ni_fq * fq)1266 dpaa2_ni_setup_tx_flow(device_t dev, struct dpaa2_ni_fq *fq)
1267 {
1268 device_t pdev = device_get_parent(dev);
1269 device_t child = dev;
1270 struct dpaa2_ni_softc *sc = device_get_softc(dev);
1271 struct dpaa2_channel *ch = fq->chan;
1272 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1273 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1274 struct dpaa2_devinfo *con_info;
1275 struct dpaa2_ni_queue_cfg queue_cfg = {0};
1276 struct dpaa2_ni_tx_ring *tx;
1277 struct dpaa2_buf *buf;
1278 struct dpaa2_cmd cmd;
1279 uint32_t tx_rings_n = 0;
1280 uint16_t rc_token, ni_token;
1281 int error;
1282
1283 DPAA2_CMD_INIT(&cmd);
1284
1285 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1286 if (error) {
1287 device_printf(dev, "%s: failed to open resource container: "
1288 "id=%d, error=%d\n", __func__, rcinfo->id, error);
1289 goto err_exit;
1290 }
1291 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1292 if (error) {
1293 device_printf(dev, "%s: failed to open network interface: "
1294 "id=%d, error=%d\n", __func__, dinfo->id, error);
1295 goto close_rc;
1296 }
1297
1298 /* Obtain DPCON associated with the FQ's channel. */
1299 con_info = device_get_ivars(fq->chan->con_dev);
1300
1301 KASSERT(sc->attr.num.tx_tcs <= DPAA2_MAX_TCS,
1302 ("%s: too many Tx traffic classes: tx_tcs=%d\n", __func__,
1303 sc->attr.num.tx_tcs));
1304 KASSERT(DPAA2_NI_BUFS_PER_TX <= DPAA2_NI_MAX_BPTX,
1305 ("%s: too many Tx buffers (%d): max=%d\n", __func__,
1306 DPAA2_NI_BUFS_PER_TX, DPAA2_NI_MAX_BPTX));
1307
1308 /* Setup Tx rings. */
1309 for (int i = 0; i < sc->attr.num.tx_tcs; i++) {
1310 queue_cfg.type = DPAA2_NI_QUEUE_TX;
1311 queue_cfg.tc = i;
1312 queue_cfg.idx = fq->flowid;
1313 queue_cfg.chan_id = fq->chan->id;
1314
1315 error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1316 if (error) {
1317 device_printf(dev, "%s: failed to obtain Tx queue "
1318 "configuration: tc=%d, flowid=%d\n", __func__,
1319 queue_cfg.tc, queue_cfg.idx);
1320 goto close_ni;
1321 }
1322
1323 tx = &fq->tx_rings[i];
1324 tx->fq = fq;
1325 tx->fqid = queue_cfg.fqid;
1326 tx->txid = tx_rings_n;
1327
1328 if (bootverbose) {
1329 device_printf(dev, "TX queue idx=%d, tc=%d, chan=%d, "
1330 "fqid=%d\n", fq->flowid, i, fq->chan->id,
1331 queue_cfg.fqid);
1332 }
1333
1334 mtx_init(&tx->lock, "dpaa2_tx_ring", NULL, MTX_DEF);
1335
1336 /* Allocate Tx ring buffer. */
1337 tx->br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF, M_NOWAIT,
1338 &tx->lock);
1339 if (tx->br == NULL) {
1340 device_printf(dev, "%s: failed to setup Tx ring buffer"
1341 " (2) fqid=%d\n", __func__, tx->fqid);
1342 goto close_ni;
1343 }
1344
1345 /* Configure Tx buffers */
1346 for (uint64_t j = 0; j < DPAA2_NI_BUFS_PER_TX; j++) {
1347 buf = malloc(sizeof(struct dpaa2_buf), M_DPAA2_TXB,
1348 M_WAITOK);
1349 /* Keep DMA tag and Tx ring linked to the buffer */
1350 DPAA2_BUF_INIT_TAGOPT(buf, ch->tx_dmat, tx);
1351
1352 buf->sgt = malloc(sizeof(struct dpaa2_buf), M_DPAA2_TXB,
1353 M_WAITOK);
1354 /* Link SGT to DMA tag and back to its Tx buffer */
1355 DPAA2_BUF_INIT_TAGOPT(buf->sgt, ch->sgt_dmat, buf);
1356
1357 error = dpaa2_buf_seed_txb(dev, buf);
1358
1359 /* Add Tx buffer to the ring */
1360 buf_ring_enqueue(tx->br, buf);
1361 }
1362
1363 tx_rings_n++;
1364 }
1365
1366 /* All Tx queues which belong to the same flowid have the same qdbin. */
1367 fq->tx_qdbin = queue_cfg.qdbin;
1368
1369 queue_cfg.type = DPAA2_NI_QUEUE_TX_CONF;
1370 queue_cfg.tc = 0; /* ignored for TxConf queue */
1371 queue_cfg.idx = fq->flowid;
1372 error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1373 if (error) {
1374 device_printf(dev, "%s: failed to obtain TxConf queue "
1375 "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1376 queue_cfg.idx);
1377 goto close_ni;
1378 }
1379
1380 fq->fqid = queue_cfg.fqid;
1381
1382 queue_cfg.dest_id = con_info->id;
1383 queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1384 queue_cfg.priority = 0;
1385 queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1386 queue_cfg.options =
1387 DPAA2_NI_QUEUE_OPT_USER_CTX |
1388 DPAA2_NI_QUEUE_OPT_DEST;
1389 error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
1390 if (error) {
1391 device_printf(dev, "%s: failed to update TxConf queue "
1392 "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc,
1393 queue_cfg.idx);
1394 goto close_ni;
1395 }
1396
1397 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1398 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1399 return (0);
1400
1401 close_ni:
1402 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1403 close_rc:
1404 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1405 err_exit:
1406 return (error);
1407 }
1408
1409 static int
dpaa2_ni_setup_rx_err_flow(device_t dev,struct dpaa2_ni_fq * fq)1410 dpaa2_ni_setup_rx_err_flow(device_t dev, struct dpaa2_ni_fq *fq)
1411 {
1412 device_t pdev = device_get_parent(dev);
1413 device_t child = dev;
1414 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1415 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1416 struct dpaa2_devinfo *con_info;
1417 struct dpaa2_ni_queue_cfg queue_cfg = {0};
1418 struct dpaa2_cmd cmd;
1419 uint16_t rc_token, ni_token;
1420 int error;
1421
1422 DPAA2_CMD_INIT(&cmd);
1423
1424 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1425 if (error) {
1426 device_printf(dev, "%s: failed to open resource container: "
1427 "id=%d, error=%d\n", __func__, rcinfo->id, error);
1428 goto err_exit;
1429 }
1430 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1431 if (error) {
1432 device_printf(dev, "%s: failed to open network interface: "
1433 "id=%d, error=%d\n", __func__, dinfo->id, error);
1434 goto close_rc;
1435 }
1436
1437 /* Obtain DPCON associated with the FQ's channel. */
1438 con_info = device_get_ivars(fq->chan->con_dev);
1439
1440 queue_cfg.type = DPAA2_NI_QUEUE_RX_ERR;
1441 queue_cfg.tc = fq->tc; /* ignored */
1442 queue_cfg.idx = fq->flowid; /* ignored */
1443 error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg);
1444 if (error) {
1445 device_printf(dev, "%s: failed to obtain RxErr queue "
1446 "configuration\n", __func__);
1447 goto close_ni;
1448 }
1449
1450 fq->fqid = queue_cfg.fqid;
1451
1452 queue_cfg.dest_id = con_info->id;
1453 queue_cfg.dest_type = DPAA2_NI_DEST_DPCON;
1454 queue_cfg.priority = 1;
1455 queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq;
1456 queue_cfg.options =
1457 DPAA2_NI_QUEUE_OPT_USER_CTX |
1458 DPAA2_NI_QUEUE_OPT_DEST;
1459 error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg);
1460 if (error) {
1461 device_printf(dev, "%s: failed to update RxErr queue "
1462 "configuration\n", __func__);
1463 goto close_ni;
1464 }
1465
1466 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1467 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1468 return (0);
1469
1470 close_ni:
1471 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1472 close_rc:
1473 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1474 err_exit:
1475 return (error);
1476 }
1477
1478 /**
1479 * @brief Configure DPNI object to generate interrupts.
1480 */
1481 static int
dpaa2_ni_setup_irqs(device_t dev)1482 dpaa2_ni_setup_irqs(device_t dev)
1483 {
1484 device_t pdev = device_get_parent(dev);
1485 device_t child = dev;
1486 struct dpaa2_ni_softc *sc = device_get_softc(dev);
1487 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1488 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1489 struct dpaa2_cmd cmd;
1490 uint16_t rc_token, ni_token;
1491 int error;
1492
1493 DPAA2_CMD_INIT(&cmd);
1494
1495 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1496 if (error) {
1497 device_printf(dev, "%s: failed to open resource container: "
1498 "id=%d, error=%d\n", __func__, rcinfo->id, error);
1499 goto err_exit;
1500 }
1501 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1502 if (error) {
1503 device_printf(dev, "%s: failed to open network interface: "
1504 "id=%d, error=%d\n", __func__, dinfo->id, error);
1505 goto close_rc;
1506 }
1507
1508 /* Configure IRQs. */
1509 error = dpaa2_ni_setup_msi(sc);
1510 if (error) {
1511 device_printf(dev, "%s: failed to allocate MSI\n", __func__);
1512 goto close_ni;
1513 }
1514 if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1515 &sc->irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) {
1516 device_printf(dev, "%s: failed to allocate IRQ resource\n",
1517 __func__);
1518 goto close_ni;
1519 }
1520 if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
1521 NULL, dpaa2_ni_intr, sc, &sc->intr)) {
1522 device_printf(dev, "%s: failed to setup IRQ resource\n",
1523 __func__);
1524 goto close_ni;
1525 }
1526
1527 error = DPAA2_CMD_NI_SET_IRQ_MASK(dev, child, &cmd, DPNI_IRQ_INDEX,
1528 DPNI_IRQ_LINK_CHANGED | DPNI_IRQ_EP_CHANGED);
1529 if (error) {
1530 device_printf(dev, "%s: failed to set DPNI IRQ mask\n",
1531 __func__);
1532 goto close_ni;
1533 }
1534
1535 error = DPAA2_CMD_NI_SET_IRQ_ENABLE(dev, child, &cmd, DPNI_IRQ_INDEX,
1536 true);
1537 if (error) {
1538 device_printf(dev, "%s: failed to enable DPNI IRQ\n", __func__);
1539 goto close_ni;
1540 }
1541
1542 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1543 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1544 return (0);
1545
1546 close_ni:
1547 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1548 close_rc:
1549 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1550 err_exit:
1551 return (error);
1552 }
1553
1554 /**
1555 * @brief Allocate MSI interrupts for DPNI.
1556 */
1557 static int
dpaa2_ni_setup_msi(struct dpaa2_ni_softc * sc)1558 dpaa2_ni_setup_msi(struct dpaa2_ni_softc *sc)
1559 {
1560 int val;
1561
1562 val = pci_msi_count(sc->dev);
1563 if (val < DPAA2_NI_MSI_COUNT)
1564 device_printf(sc->dev, "MSI: actual=%d, expected=%d\n", val,
1565 DPAA2_IO_MSI_COUNT);
1566 val = MIN(val, DPAA2_NI_MSI_COUNT);
1567
1568 if (pci_alloc_msi(sc->dev, &val) != 0)
1569 return (EINVAL);
1570
1571 for (int i = 0; i < val; i++)
1572 sc->irq_rid[i] = i + 1;
1573
1574 return (0);
1575 }
1576
1577 /**
1578 * @brief Update DPNI according to the updated interface capabilities.
1579 */
1580 static int
dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc * sc)1581 dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *sc)
1582 {
1583 bool en_rxcsum, en_txcsum;
1584 device_t pdev = device_get_parent(sc->dev);
1585 device_t dev = sc->dev;
1586 device_t child = dev;
1587 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1588 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1589 struct dpaa2_cmd cmd;
1590 uint16_t rc_token, ni_token;
1591 int error;
1592
1593 DPAA2_CMD_INIT(&cmd);
1594
1595 /*
1596 * XXX-DSL: DPAA2 allows to validate L3/L4 checksums on reception and/or
1597 * generate L3/L4 checksums on transmission without
1598 * differentiating between IPv4/v6, i.e. enable for both
1599 * protocols if requested.
1600 */
1601 en_rxcsum = if_getcapenable(sc->ifp) &
1602 (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6);
1603 en_txcsum = if_getcapenable(sc->ifp) &
1604 (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6);
1605
1606 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1607 if (error) {
1608 device_printf(dev, "%s: failed to open resource container: "
1609 "id=%d, error=%d\n", __func__, rcinfo->id, error);
1610 goto err_exit;
1611 }
1612 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1613 if (error) {
1614 device_printf(dev, "%s: failed to open network interface: "
1615 "id=%d, error=%d\n", __func__, dinfo->id, error);
1616 goto close_rc;
1617 }
1618
1619 /* Setup checksums validation. */
1620 error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1621 DPAA2_NI_OFL_RX_L3_CSUM, en_rxcsum);
1622 if (error) {
1623 device_printf(dev, "%s: failed to %s L3 checksum validation\n",
1624 __func__, en_rxcsum ? "enable" : "disable");
1625 goto close_ni;
1626 }
1627 error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1628 DPAA2_NI_OFL_RX_L4_CSUM, en_rxcsum);
1629 if (error) {
1630 device_printf(dev, "%s: failed to %s L4 checksum validation\n",
1631 __func__, en_rxcsum ? "enable" : "disable");
1632 goto close_ni;
1633 }
1634
1635 /* Setup checksums generation. */
1636 error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1637 DPAA2_NI_OFL_TX_L3_CSUM, en_txcsum);
1638 if (error) {
1639 device_printf(dev, "%s: failed to %s L3 checksum generation\n",
1640 __func__, en_txcsum ? "enable" : "disable");
1641 goto close_ni;
1642 }
1643 error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd,
1644 DPAA2_NI_OFL_TX_L4_CSUM, en_txcsum);
1645 if (error) {
1646 device_printf(dev, "%s: failed to %s L4 checksum generation\n",
1647 __func__, en_txcsum ? "enable" : "disable");
1648 goto close_ni;
1649 }
1650
1651 if (bootverbose) {
1652 device_printf(dev, "%s: L3/L4 checksum validation %s\n",
1653 __func__, en_rxcsum ? "enabled" : "disabled");
1654 device_printf(dev, "%s: L3/L4 checksum generation %s\n",
1655 __func__, en_txcsum ? "enabled" : "disabled");
1656 }
1657
1658 (void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
1659 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1660 return (0);
1661
1662 close_ni:
1663 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1664 close_rc:
1665 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1666 err_exit:
1667 return (error);
1668 }
1669
1670 /**
1671 * @brief Update DPNI according to the updated interface flags.
1672 */
1673 static int
dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc * sc)1674 dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *sc)
1675 {
1676 const bool en_promisc = if_getflags(sc->ifp) & IFF_PROMISC;
1677 const bool en_allmulti = if_getflags(sc->ifp) & IFF_ALLMULTI;
1678 device_t pdev = device_get_parent(sc->dev);
1679 device_t dev = sc->dev;
1680 device_t child = dev;
1681 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1682 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1683 struct dpaa2_cmd cmd;
1684 uint16_t rc_token, ni_token;
1685 int error;
1686
1687 DPAA2_CMD_INIT(&cmd);
1688
1689 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1690 if (error) {
1691 device_printf(dev, "%s: failed to open resource container: "
1692 "id=%d, error=%d\n", __func__, rcinfo->id, error);
1693 goto err_exit;
1694 }
1695 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1696 if (error) {
1697 device_printf(dev, "%s: failed to open network interface: "
1698 "id=%d, error=%d\n", __func__, dinfo->id, error);
1699 goto close_rc;
1700 }
1701
1702 error = DPAA2_CMD_NI_SET_MULTI_PROMISC(dev, child, &cmd,
1703 en_promisc ? true : en_allmulti);
1704 if (error) {
1705 device_printf(dev, "%s: failed to %s multicast promiscuous "
1706 "mode\n", __func__, en_allmulti ? "enable" : "disable");
1707 goto close_ni;
1708 }
1709
1710 error = DPAA2_CMD_NI_SET_UNI_PROMISC(dev, child, &cmd, en_promisc);
1711 if (error) {
1712 device_printf(dev, "%s: failed to %s unicast promiscuous mode\n",
1713 __func__, en_promisc ? "enable" : "disable");
1714 goto close_ni;
1715 }
1716
1717 (void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd);
1718 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1719 return (0);
1720
1721 close_ni:
1722 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
1723 close_rc:
1724 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
1725 err_exit:
1726 return (error);
1727 }
1728
1729 static int
dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc * sc)1730 dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *sc)
1731 {
1732 struct sysctl_ctx_list *ctx;
1733 struct sysctl_oid *node, *node2;
1734 struct sysctl_oid_list *parent, *parent2;
1735 char cbuf[128];
1736 int i;
1737
1738 ctx = device_get_sysctl_ctx(sc->dev);
1739 parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1740
1741 /* Add DPNI statistics. */
1742 node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
1743 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Statistics");
1744 parent = SYSCTL_CHILDREN(node);
1745 for (i = 0; i < DPAA2_NI_STAT_SYSCTLS; ++i) {
1746 SYSCTL_ADD_PROC(ctx, parent, i, dpni_stat_sysctls[i].name,
1747 CTLTYPE_U64 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_stats,
1748 "IU", dpni_stat_sysctls[i].desc);
1749 }
1750 SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_anomaly_frames",
1751 CTLFLAG_RD, &sc->rx_anomaly_frames,
1752 "Rx frames in the buffers outside of the buffer pools");
1753 SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_single_buf_frames",
1754 CTLFLAG_RD, &sc->rx_single_buf_frames,
1755 "Rx frames in single buffers");
1756 SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_sg_buf_frames",
1757 CTLFLAG_RD, &sc->rx_sg_buf_frames,
1758 "Rx frames in scatter/gather list");
1759 SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_enq_rej_frames",
1760 CTLFLAG_RD, &sc->rx_enq_rej_frames,
1761 "Enqueue rejected by QMan");
1762 SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_ieoi_err_frames",
1763 CTLFLAG_RD, &sc->rx_ieoi_err_frames,
1764 "QMan IEOI error");
1765 SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_single_buf_frames",
1766 CTLFLAG_RD, &sc->tx_single_buf_frames,
1767 "Tx single buffer frames");
1768 SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_sg_frames",
1769 CTLFLAG_RD, &sc->tx_sg_frames,
1770 "Tx S/G frames");
1771
1772 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_num",
1773 CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_num,
1774 "IU", "number of Rx buffers in the buffer pool");
1775 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_free",
1776 CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_free,
1777 "IU", "number of free Rx buffers in the buffer pool");
1778
1779 parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
1780
1781 /* Add channels statistics. */
1782 node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "channels",
1783 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channels");
1784 parent = SYSCTL_CHILDREN(node);
1785 for (int i = 0; i < sc->chan_n; i++) {
1786 snprintf(cbuf, sizeof(cbuf), "%d", i);
1787
1788 node2 = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, cbuf,
1789 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channel");
1790 parent2 = SYSCTL_CHILDREN(node2);
1791
1792 SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_frames",
1793 CTLFLAG_RD, &sc->channels[i]->tx_frames,
1794 "Tx frames counter");
1795 SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_dropped",
1796 CTLFLAG_RD, &sc->channels[i]->tx_dropped,
1797 "Tx dropped counter");
1798 }
1799
1800 return (0);
1801 }
1802
1803 static int
dpaa2_ni_setup_dma(struct dpaa2_ni_softc * sc)1804 dpaa2_ni_setup_dma(struct dpaa2_ni_softc *sc)
1805 {
1806 device_t dev = sc->dev;
1807 int error;
1808
1809 KASSERT((sc->buf_align == BUF_ALIGN) || (sc->buf_align == BUF_ALIGN_V1),
1810 ("unexpected buffer alignment: %d\n", sc->buf_align));
1811
1812 /* DMA tag for Rx distribution key. */
1813 error = bus_dma_tag_create(
1814 bus_get_dma_tag(dev),
1815 PAGE_SIZE, 0, /* alignment, boundary */
1816 BUS_SPACE_MAXADDR, /* low restricted addr */
1817 BUS_SPACE_MAXADDR, /* high restricted addr */
1818 NULL, NULL, /* filter, filterarg */
1819 DPAA2_CLASSIFIER_DMA_SIZE, 1, /* maxsize, nsegments */
1820 DPAA2_CLASSIFIER_DMA_SIZE, 0, /* maxsegsize, flags */
1821 NULL, NULL, /* lockfunc, lockarg */
1822 &sc->rxd_dmat);
1823 if (error) {
1824 device_printf(dev, "%s: failed to create DMA tag for Rx "
1825 "distribution key\n", __func__);
1826 return (error);
1827 }
1828
1829 error = bus_dma_tag_create(
1830 bus_get_dma_tag(dev),
1831 PAGE_SIZE, 0, /* alignment, boundary */
1832 BUS_SPACE_MAXADDR, /* low restricted addr */
1833 BUS_SPACE_MAXADDR, /* high restricted addr */
1834 NULL, NULL, /* filter, filterarg */
1835 ETH_QOS_KCFG_BUF_SIZE, 1, /* maxsize, nsegments */
1836 ETH_QOS_KCFG_BUF_SIZE, 0, /* maxsegsize, flags */
1837 NULL, NULL, /* lockfunc, lockarg */
1838 &sc->qos_dmat);
1839 if (error) {
1840 device_printf(dev, "%s: failed to create DMA tag for QoS key\n",
1841 __func__);
1842 return (error);
1843 }
1844
1845 return (0);
1846 }
1847
1848 /**
1849 * @brief Configure buffer layouts of the different DPNI queues.
1850 */
1851 static int
dpaa2_ni_set_buf_layout(device_t dev)1852 dpaa2_ni_set_buf_layout(device_t dev)
1853 {
1854 device_t pdev = device_get_parent(dev);
1855 device_t child = dev;
1856 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
1857 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
1858 struct dpaa2_ni_softc *sc = device_get_softc(dev);
1859 struct dpaa2_ni_buf_layout buf_layout = {0};
1860 struct dpaa2_cmd cmd;
1861 uint16_t rc_token, ni_token;
1862 int error;
1863
1864 DPAA2_CMD_INIT(&cmd);
1865
1866 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
1867 if (error) {
1868 device_printf(dev, "%s: failed to open resource container: "
1869 "id=%d, error=%d\n", __func__, rcinfo->id, error);
1870 goto err_exit;
1871 }
1872 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
1873 if (error) {
1874 device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
1875 "error=%d\n", __func__, dinfo->id, error);
1876 goto close_rc;
1877 }
1878
1879 /*
1880 * Select Rx/Tx buffer alignment. It's necessary to ensure that the
1881 * buffer size seen by WRIOP is a multiple of 64 or 256 bytes depending
1882 * on the WRIOP version.
1883 */
1884 sc->buf_align = (sc->attr.wriop_ver == WRIOP_VERSION(0, 0, 0) ||
1885 sc->attr.wriop_ver == WRIOP_VERSION(1, 0, 0))
1886 ? BUF_ALIGN_V1 : BUF_ALIGN;
1887
1888 /*
1889 * We need to ensure that the buffer size seen by WRIOP is a multiple
1890 * of 64 or 256 bytes depending on the WRIOP version.
1891 */
1892 sc->buf_sz = ALIGN_DOWN(DPAA2_RX_BUF_SIZE, sc->buf_align);
1893
1894 if (bootverbose) {
1895 device_printf(dev, "Rx/Tx buffers: size=%d, alignment=%d\n",
1896 sc->buf_sz, sc->buf_align);
1897 }
1898
1899 /*
1900 * Frame Descriptor Tx buffer layout
1901 *
1902 * ADDR -> |---------------------|
1903 * | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes
1904 * |---------------------|
1905 * | HW FRAME ANNOTATION | BUF_TX_HWA_SIZE bytes
1906 * |---------------------|
1907 * | DATA HEADROOM |
1908 * ADDR + OFFSET -> |---------------------|
1909 * | |
1910 * | |
1911 * | FRAME DATA |
1912 * | |
1913 * | |
1914 * |---------------------|
1915 * | DATA TAILROOM |
1916 * |---------------------|
1917 *
1918 * NOTE: It's for a single buffer frame only.
1919 */
1920 buf_layout.queue_type = DPAA2_NI_QUEUE_TX;
1921 buf_layout.pd_size = BUF_SWA_SIZE;
1922 buf_layout.pass_timestamp = true;
1923 buf_layout.pass_frame_status = true;
1924 buf_layout.options =
1925 BUF_LOPT_PRIV_DATA_SZ |
1926 BUF_LOPT_TIMESTAMP | /* requires 128 bytes in HWA */
1927 BUF_LOPT_FRAME_STATUS;
1928 error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
1929 if (error) {
1930 device_printf(dev, "%s: failed to set Tx buffer layout\n",
1931 __func__);
1932 goto close_ni;
1933 }
1934
1935 /* Tx-confirmation buffer layout */
1936 buf_layout.queue_type = DPAA2_NI_QUEUE_TX_CONF;
1937 buf_layout.options =
1938 BUF_LOPT_TIMESTAMP |
1939 BUF_LOPT_FRAME_STATUS;
1940 error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
1941 if (error) {
1942 device_printf(dev, "%s: failed to set TxConf buffer layout\n",
1943 __func__);
1944 goto close_ni;
1945 }
1946
1947 /*
1948 * Driver should reserve the amount of space indicated by this command
1949 * as headroom in all Tx frames.
1950 */
1951 error = DPAA2_CMD_NI_GET_TX_DATA_OFF(dev, child, &cmd, &sc->tx_data_off);
1952 if (error) {
1953 device_printf(dev, "%s: failed to obtain Tx data offset\n",
1954 __func__);
1955 goto close_ni;
1956 }
1957
1958 if (bootverbose) {
1959 device_printf(dev, "Tx data offset=%d\n", sc->tx_data_off);
1960 }
1961 if ((sc->tx_data_off % 64) != 0) {
1962 device_printf(dev, "Tx data offset (%d) is not a multiplication "
1963 "of 64 bytes\n", sc->tx_data_off);
1964 }
1965
1966 /*
1967 * Frame Descriptor Rx buffer layout
1968 *
1969 * ADDR -> |---------------------|
1970 * | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes
1971 * |---------------------|
1972 * | HW FRAME ANNOTATION | BUF_RX_HWA_SIZE bytes
1973 * |---------------------|
1974 * | DATA HEADROOM | OFFSET-BUF_RX_HWA_SIZE
1975 * ADDR + OFFSET -> |---------------------|
1976 * | |
1977 * | |
1978 * | FRAME DATA |
1979 * | |
1980 * | |
1981 * |---------------------|
1982 * | DATA TAILROOM | 0 bytes
1983 * |---------------------|
1984 *
1985 * NOTE: It's for a single buffer frame only.
1986 */
1987 buf_layout.queue_type = DPAA2_NI_QUEUE_RX;
1988 buf_layout.pd_size = BUF_SWA_SIZE;
1989 buf_layout.fd_align = sc->buf_align;
1990 buf_layout.head_size = sc->tx_data_off - BUF_RX_HWA_SIZE - BUF_SWA_SIZE;
1991 buf_layout.tail_size = 0;
1992 buf_layout.pass_frame_status = true;
1993 buf_layout.pass_parser_result = true;
1994 buf_layout.pass_timestamp = true;
1995 buf_layout.options =
1996 BUF_LOPT_PRIV_DATA_SZ |
1997 BUF_LOPT_DATA_ALIGN |
1998 BUF_LOPT_DATA_HEAD_ROOM |
1999 BUF_LOPT_DATA_TAIL_ROOM |
2000 BUF_LOPT_FRAME_STATUS |
2001 BUF_LOPT_PARSER_RESULT |
2002 BUF_LOPT_TIMESTAMP;
2003 error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout);
2004 if (error) {
2005 device_printf(dev, "%s: failed to set Rx buffer layout\n",
2006 __func__);
2007 goto close_ni;
2008 }
2009
2010 error = 0;
2011 close_ni:
2012 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2013 close_rc:
2014 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2015 err_exit:
2016 return (error);
2017 }
2018
2019 /**
2020 * @brief Enable Rx/Tx pause frames.
2021 *
2022 * NOTE: DPNI stops sending when a pause frame is received (Rx frame) or DPNI
2023 * itself generates pause frames (Tx frame).
2024 */
2025 static int
dpaa2_ni_set_pause_frame(device_t dev)2026 dpaa2_ni_set_pause_frame(device_t dev)
2027 {
2028 device_t pdev = device_get_parent(dev);
2029 device_t child = dev;
2030 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2031 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2032 struct dpaa2_ni_softc *sc = device_get_softc(dev);
2033 struct dpaa2_ni_link_cfg link_cfg = {0};
2034 struct dpaa2_cmd cmd;
2035 uint16_t rc_token, ni_token;
2036 int error;
2037
2038 DPAA2_CMD_INIT(&cmd);
2039
2040 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2041 if (error) {
2042 device_printf(dev, "%s: failed to open resource container: "
2043 "id=%d, error=%d\n", __func__, rcinfo->id, error);
2044 goto err_exit;
2045 }
2046 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2047 if (error) {
2048 device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2049 "error=%d\n", __func__, dinfo->id, error);
2050 goto close_rc;
2051 }
2052
2053 error = DPAA2_CMD_NI_GET_LINK_CFG(dev, child, &cmd, &link_cfg);
2054 if (error) {
2055 device_printf(dev, "%s: failed to obtain link configuration: "
2056 "error=%d\n", __func__, error);
2057 goto close_ni;
2058 }
2059
2060 /* Enable both Rx and Tx pause frames by default. */
2061 link_cfg.options |= DPAA2_NI_LINK_OPT_PAUSE;
2062 link_cfg.options &= ~DPAA2_NI_LINK_OPT_ASYM_PAUSE;
2063
2064 error = DPAA2_CMD_NI_SET_LINK_CFG(dev, child, &cmd, &link_cfg);
2065 if (error) {
2066 device_printf(dev, "%s: failed to set link configuration: "
2067 "error=%d\n", __func__, error);
2068 goto close_ni;
2069 }
2070
2071 sc->link_options = link_cfg.options;
2072 error = 0;
2073 close_ni:
2074 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2075 close_rc:
2076 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2077 err_exit:
2078 return (error);
2079 }
2080
2081 /**
2082 * @brief Configure QoS table to determine the traffic class for the received
2083 * frame.
2084 */
2085 static int
dpaa2_ni_set_qos_table(device_t dev)2086 dpaa2_ni_set_qos_table(device_t dev)
2087 {
2088 device_t pdev = device_get_parent(dev);
2089 device_t child = dev;
2090 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2091 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2092 struct dpaa2_ni_softc *sc = device_get_softc(dev);
2093 struct dpaa2_ni_qos_table tbl;
2094 struct dpaa2_buf *buf = &sc->qos_kcfg;
2095 struct dpaa2_cmd cmd;
2096 uint16_t rc_token, ni_token;
2097 int error;
2098
2099 if (sc->attr.num.rx_tcs == 1 ||
2100 !(sc->attr.options & DPNI_OPT_HAS_KEY_MASKING)) {
2101 if (bootverbose) {
2102 device_printf(dev, "Ingress traffic classification is "
2103 "not supported\n");
2104 }
2105 return (0);
2106 }
2107
2108 /*
2109 * Allocate a buffer visible to the device to hold the QoS table key
2110 * configuration.
2111 */
2112
2113 if (__predict_true(buf->dmat == NULL)) {
2114 buf->dmat = sc->qos_dmat;
2115 }
2116
2117 error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr,
2118 BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap);
2119 if (error) {
2120 device_printf(dev, "%s: failed to allocate a buffer for QoS key "
2121 "configuration\n", __func__);
2122 goto err_exit;
2123 }
2124
2125 error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr,
2126 ETH_QOS_KCFG_BUF_SIZE, dpaa2_dmamap_oneseg_cb, &buf->paddr,
2127 BUS_DMA_NOWAIT);
2128 if (error) {
2129 device_printf(dev, "%s: failed to map QoS key configuration "
2130 "buffer into bus space\n", __func__);
2131 goto err_exit;
2132 }
2133
2134 DPAA2_CMD_INIT(&cmd);
2135
2136 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2137 if (error) {
2138 device_printf(dev, "%s: failed to open resource container: "
2139 "id=%d, error=%d\n", __func__, rcinfo->id, error);
2140 goto err_exit;
2141 }
2142 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2143 if (error) {
2144 device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2145 "error=%d\n", __func__, dinfo->id, error);
2146 goto close_rc;
2147 }
2148
2149 tbl.default_tc = 0;
2150 tbl.discard_on_miss = false;
2151 tbl.keep_entries = false;
2152 tbl.kcfg_busaddr = buf->paddr;
2153 error = DPAA2_CMD_NI_SET_QOS_TABLE(dev, child, &cmd, &tbl);
2154 if (error) {
2155 device_printf(dev, "%s: failed to set QoS table\n", __func__);
2156 goto close_ni;
2157 }
2158
2159 error = DPAA2_CMD_NI_CLEAR_QOS_TABLE(dev, child, &cmd);
2160 if (error) {
2161 device_printf(dev, "%s: failed to clear QoS table\n", __func__);
2162 goto close_ni;
2163 }
2164
2165 error = 0;
2166 close_ni:
2167 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2168 close_rc:
2169 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2170 err_exit:
2171 return (error);
2172 }
2173
2174 static int
dpaa2_ni_set_mac_addr(device_t dev)2175 dpaa2_ni_set_mac_addr(device_t dev)
2176 {
2177 device_t pdev = device_get_parent(dev);
2178 device_t child = dev;
2179 struct dpaa2_ni_softc *sc = device_get_softc(dev);
2180 if_t ifp = sc->ifp;
2181 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2182 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2183 struct dpaa2_cmd cmd;
2184 struct ether_addr rnd_mac_addr;
2185 uint16_t rc_token, ni_token;
2186 uint8_t mac_addr[ETHER_ADDR_LEN];
2187 uint8_t dpni_mac_addr[ETHER_ADDR_LEN];
2188 int error;
2189
2190 DPAA2_CMD_INIT(&cmd);
2191
2192 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2193 if (error) {
2194 device_printf(dev, "%s: failed to open resource container: "
2195 "id=%d, error=%d\n", __func__, rcinfo->id, error);
2196 goto err_exit;
2197 }
2198 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2199 if (error) {
2200 device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, "
2201 "error=%d\n", __func__, dinfo->id, error);
2202 goto close_rc;
2203 }
2204
2205 /*
2206 * Get the MAC address associated with the physical port, if the DPNI is
2207 * connected to a DPMAC directly associated with one of the physical
2208 * ports.
2209 */
2210 error = DPAA2_CMD_NI_GET_PORT_MAC_ADDR(dev, child, &cmd, mac_addr);
2211 if (error) {
2212 device_printf(dev, "%s: failed to obtain the MAC address "
2213 "associated with the physical port\n", __func__);
2214 goto close_ni;
2215 }
2216
2217 /* Get primary MAC address from the DPNI attributes. */
2218 error = DPAA2_CMD_NI_GET_PRIM_MAC_ADDR(dev, child, &cmd, dpni_mac_addr);
2219 if (error) {
2220 device_printf(dev, "%s: failed to obtain primary MAC address\n",
2221 __func__);
2222 goto close_ni;
2223 }
2224
2225 if (!ETHER_IS_ZERO(mac_addr)) {
2226 /* Set MAC address of the physical port as DPNI's primary one. */
2227 error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd,
2228 mac_addr);
2229 if (error) {
2230 device_printf(dev, "%s: failed to set primary MAC "
2231 "address\n", __func__);
2232 goto close_ni;
2233 }
2234 for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2235 sc->mac.addr[i] = mac_addr[i];
2236 }
2237 } else if (ETHER_IS_ZERO(dpni_mac_addr)) {
2238 /* Generate random MAC address as DPNI's primary one. */
2239 ether_gen_addr(ifp, &rnd_mac_addr);
2240 for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2241 mac_addr[i] = rnd_mac_addr.octet[i];
2242 }
2243
2244 error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd,
2245 mac_addr);
2246 if (error) {
2247 device_printf(dev, "%s: failed to set random primary "
2248 "MAC address\n", __func__);
2249 goto close_ni;
2250 }
2251 for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2252 sc->mac.addr[i] = mac_addr[i];
2253 }
2254 } else {
2255 for (int i = 0; i < ETHER_ADDR_LEN; i++) {
2256 sc->mac.addr[i] = dpni_mac_addr[i];
2257 }
2258 }
2259
2260 error = 0;
2261 close_ni:
2262 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2263 close_rc:
2264 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2265 err_exit:
2266 return (error);
2267 }
2268
2269 static void
dpaa2_ni_miibus_statchg(device_t dev)2270 dpaa2_ni_miibus_statchg(device_t dev)
2271 {
2272 device_t pdev = device_get_parent(dev);
2273 device_t child = dev;
2274 struct dpaa2_ni_softc *sc = device_get_softc(dev);
2275 struct dpaa2_mac_link_state mac_link = { 0 };
2276 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2277 struct dpaa2_cmd cmd;
2278 uint16_t rc_token, mac_token;
2279 int error, link_state;
2280
2281 if (sc->fixed_link || sc->mii == NULL) {
2282 return;
2283 }
2284 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0) {
2285 /*
2286 * We will receive calls and adjust the changes but
2287 * not have setup everything (called before dpaa2_ni_init()
2288 * really). This will then setup the link and internal
2289 * sc->link_state and not trigger the update once needed,
2290 * so basically dpmac never knows about it.
2291 */
2292 return;
2293 }
2294
2295 /*
2296 * Note: ifp link state will only be changed AFTER we are called so we
2297 * cannot rely on ifp->if_linkstate here.
2298 */
2299 if (sc->mii->mii_media_status & IFM_AVALID) {
2300 if (sc->mii->mii_media_status & IFM_ACTIVE) {
2301 link_state = LINK_STATE_UP;
2302 } else {
2303 link_state = LINK_STATE_DOWN;
2304 }
2305 } else {
2306 link_state = LINK_STATE_UNKNOWN;
2307 }
2308
2309 if (link_state != sc->link_state) {
2310 sc->link_state = link_state;
2311
2312 DPAA2_CMD_INIT(&cmd);
2313
2314 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
2315 &rc_token);
2316 if (error) {
2317 device_printf(dev, "%s: failed to open resource "
2318 "container: id=%d, error=%d\n", __func__, rcinfo->id,
2319 error);
2320 goto err_exit;
2321 }
2322 error = DPAA2_CMD_MAC_OPEN(dev, child, &cmd, sc->mac.dpmac_id,
2323 &mac_token);
2324 if (error) {
2325 device_printf(sc->dev, "%s: failed to open DPMAC: "
2326 "id=%d, error=%d\n", __func__, sc->mac.dpmac_id,
2327 error);
2328 goto close_rc;
2329 }
2330
2331 if (link_state == LINK_STATE_UP ||
2332 link_state == LINK_STATE_DOWN) {
2333 /* Update DPMAC link state. */
2334 mac_link.supported = sc->mii->mii_media.ifm_media;
2335 mac_link.advert = sc->mii->mii_media.ifm_media;
2336 mac_link.rate = 1000; /* TODO: Where to get from? */ /* ifmedia_baudrate? */
2337 mac_link.options =
2338 DPAA2_MAC_LINK_OPT_AUTONEG |
2339 DPAA2_MAC_LINK_OPT_PAUSE;
2340 mac_link.up = (link_state == LINK_STATE_UP) ? true : false;
2341 mac_link.state_valid = true;
2342
2343 /* Inform DPMAC about link state. */
2344 error = DPAA2_CMD_MAC_SET_LINK_STATE(dev, child, &cmd,
2345 &mac_link);
2346 if (error) {
2347 device_printf(sc->dev, "%s: failed to set DPMAC "
2348 "link state: id=%d, error=%d\n", __func__,
2349 sc->mac.dpmac_id, error);
2350 }
2351 }
2352 (void)DPAA2_CMD_MAC_CLOSE(dev, child, &cmd);
2353 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2354 rc_token));
2355 }
2356
2357 return;
2358
2359 close_rc:
2360 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2361 err_exit:
2362 return;
2363 }
2364
2365 /**
2366 * @brief Callback function to process media change request.
2367 */
2368 static int
dpaa2_ni_media_change_locked(struct dpaa2_ni_softc * sc)2369 dpaa2_ni_media_change_locked(struct dpaa2_ni_softc *sc)
2370 {
2371
2372 DPNI_LOCK_ASSERT(sc);
2373 if (sc->mii) {
2374 mii_mediachg(sc->mii);
2375 sc->media_status = sc->mii->mii_media.ifm_media;
2376 } else if (sc->fixed_link) {
2377 if_printf(sc->ifp, "%s: can't change media in fixed mode\n",
2378 __func__);
2379 }
2380
2381 return (0);
2382 }
2383
2384 static int
dpaa2_ni_media_change(if_t ifp)2385 dpaa2_ni_media_change(if_t ifp)
2386 {
2387 struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2388 int error;
2389
2390 DPNI_LOCK(sc);
2391 error = dpaa2_ni_media_change_locked(sc);
2392 DPNI_UNLOCK(sc);
2393 return (error);
2394 }
2395
2396 /**
2397 * @brief Callback function to process media status request.
2398 */
2399 static void
dpaa2_ni_media_status(if_t ifp,struct ifmediareq * ifmr)2400 dpaa2_ni_media_status(if_t ifp, struct ifmediareq *ifmr)
2401 {
2402 struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2403
2404 DPNI_LOCK(sc);
2405 if (sc->mii) {
2406 mii_pollstat(sc->mii);
2407 ifmr->ifm_active = sc->mii->mii_media_active;
2408 ifmr->ifm_status = sc->mii->mii_media_status;
2409 }
2410 DPNI_UNLOCK(sc);
2411 }
2412
2413 /**
2414 * @brief Callout function to check and update media status.
2415 */
2416 static void
dpaa2_ni_media_tick(void * arg)2417 dpaa2_ni_media_tick(void *arg)
2418 {
2419 struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2420
2421 /* Check for media type change */
2422 if (sc->mii) {
2423 mii_tick(sc->mii);
2424 if (sc->media_status != sc->mii->mii_media.ifm_media) {
2425 printf("%s: media type changed (ifm_media=%x)\n",
2426 __func__, sc->mii->mii_media.ifm_media);
2427 dpaa2_ni_media_change(sc->ifp);
2428 }
2429 }
2430
2431 /* Schedule another timeout one second from now */
2432 callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
2433 }
2434
2435 static void
dpaa2_ni_init(void * arg)2436 dpaa2_ni_init(void *arg)
2437 {
2438 struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2439 if_t ifp = sc->ifp;
2440 device_t pdev = device_get_parent(sc->dev);
2441 device_t dev = sc->dev;
2442 device_t child = dev;
2443 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2444 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2445 struct dpaa2_cmd cmd;
2446 uint16_t rc_token, ni_token;
2447 int error;
2448
2449 DPNI_LOCK(sc);
2450 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2451 DPNI_UNLOCK(sc);
2452 return;
2453 }
2454 DPNI_UNLOCK(sc);
2455
2456 DPAA2_CMD_INIT(&cmd);
2457
2458 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2459 if (error) {
2460 device_printf(dev, "%s: failed to open resource container: "
2461 "id=%d, error=%d\n", __func__, rcinfo->id, error);
2462 goto err_exit;
2463 }
2464 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2465 if (error) {
2466 device_printf(dev, "%s: failed to open network interface: "
2467 "id=%d, error=%d\n", __func__, dinfo->id, error);
2468 goto close_rc;
2469 }
2470
2471 error = DPAA2_CMD_NI_ENABLE(dev, child, &cmd);
2472 if (error) {
2473 device_printf(dev, "%s: failed to enable DPNI: error=%d\n",
2474 __func__, error);
2475 }
2476
2477 DPNI_LOCK(sc);
2478 /* Announce we are up and running and can queue packets. */
2479 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
2480
2481 if (sc->mii) {
2482 /*
2483 * mii_mediachg() will trigger a call into
2484 * dpaa2_ni_miibus_statchg() to setup link state.
2485 */
2486 dpaa2_ni_media_change_locked(sc);
2487 }
2488 callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc);
2489
2490 DPNI_UNLOCK(sc);
2491
2492 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2493 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2494 return;
2495
2496 close_rc:
2497 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2498 err_exit:
2499 return;
2500 }
2501
2502 static int
dpaa2_ni_transmit(if_t ifp,struct mbuf * m)2503 dpaa2_ni_transmit(if_t ifp, struct mbuf *m)
2504 {
2505 struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2506 struct dpaa2_channel *ch;
2507 uint32_t fqid;
2508 bool found = false;
2509 int chidx = 0, error;
2510
2511 if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) {
2512 return (0);
2513 }
2514
2515 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
2516 fqid = m->m_pkthdr.flowid;
2517 for (int i = 0; i < sc->chan_n; i++) {
2518 ch = sc->channels[i];
2519 for (int j = 0; j < ch->rxq_n; j++) {
2520 if (fqid == ch->rx_queues[j].fqid) {
2521 chidx = ch->flowid;
2522 found = true;
2523 break;
2524 }
2525 }
2526 if (found) {
2527 break;
2528 }
2529 }
2530 }
2531
2532 ch = sc->channels[chidx];
2533 error = buf_ring_enqueue(ch->xmit_br, m);
2534 if (__predict_false(error != 0)) {
2535 if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1);
2536 m_freem(m);
2537 } else {
2538 taskqueue_enqueue(ch->cleanup_tq, &ch->cleanup_task);
2539 }
2540
2541 return (error);
2542 }
2543
2544 static void
dpaa2_ni_qflush(if_t ifp)2545 dpaa2_ni_qflush(if_t ifp)
2546 {
2547 /* TODO: Find a way to drain Tx queues in QBMan. */
2548 if_qflush(ifp);
2549 }
2550
2551 static int
dpaa2_ni_ioctl(if_t ifp,u_long c,caddr_t data)2552 dpaa2_ni_ioctl(if_t ifp, u_long c, caddr_t data)
2553 {
2554 struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2555 struct ifreq *ifr = (struct ifreq *) data;
2556 device_t pdev = device_get_parent(sc->dev);
2557 device_t dev = sc->dev;
2558 device_t child = dev;
2559 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2560 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2561 struct dpaa2_cmd cmd;
2562 uint32_t changed = 0;
2563 uint16_t rc_token, ni_token;
2564 int mtu, error, rc = 0;
2565
2566 DPAA2_CMD_INIT(&cmd);
2567
2568 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2569 if (error) {
2570 device_printf(dev, "%s: failed to open resource container: "
2571 "id=%d, error=%d\n", __func__, rcinfo->id, error);
2572 goto err_exit;
2573 }
2574 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2575 if (error) {
2576 device_printf(dev, "%s: failed to open network interface: "
2577 "id=%d, error=%d\n", __func__, dinfo->id, error);
2578 goto close_rc;
2579 }
2580
2581 switch (c) {
2582 case SIOCSIFMTU:
2583 DPNI_LOCK(sc);
2584 mtu = ifr->ifr_mtu;
2585 if (mtu < ETHERMIN || mtu > ETHERMTU_JUMBO) {
2586 DPNI_UNLOCK(sc);
2587 error = EINVAL;
2588 goto close_ni;
2589 }
2590 if_setmtu(ifp, mtu);
2591 DPNI_UNLOCK(sc);
2592
2593 /* Update maximum frame length. */
2594 mtu += ETHER_HDR_LEN;
2595 if (if_getcapenable(ifp) & IFCAP_VLAN_MTU)
2596 mtu += ETHER_VLAN_ENCAP_LEN;
2597 error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd, mtu);
2598 if (error) {
2599 device_printf(dev, "%s: failed to update maximum frame "
2600 "length: error=%d\n", __func__, error);
2601 goto close_ni;
2602 }
2603 break;
2604 case SIOCSIFCAP:
2605 changed = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
2606 if ((changed & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) != 0)
2607 if_togglecapenable(ifp, IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6);
2608 if ((changed & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) != 0) {
2609 if_togglecapenable(ifp, IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6);
2610 if_togglehwassist(ifp, DPAA2_CSUM_TX_OFFLOAD);
2611 }
2612
2613 rc = dpaa2_ni_setup_if_caps(sc);
2614 if (rc) {
2615 printf("%s: failed to update iface capabilities: "
2616 "error=%d\n", __func__, rc);
2617 rc = ENXIO;
2618 }
2619 break;
2620 case SIOCSIFFLAGS:
2621 DPNI_LOCK(sc);
2622 if (if_getflags(ifp) & IFF_UP) {
2623 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2624 changed = if_getflags(ifp) ^ sc->if_flags;
2625 if (changed & IFF_PROMISC ||
2626 changed & IFF_ALLMULTI) {
2627 rc = dpaa2_ni_setup_if_flags(sc);
2628 }
2629 } else {
2630 DPNI_UNLOCK(sc);
2631 dpaa2_ni_init(sc);
2632 DPNI_LOCK(sc);
2633 }
2634 } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2635 /* FIXME: Disable DPNI. See dpaa2_ni_init(). */
2636 }
2637
2638 sc->if_flags = if_getflags(ifp);
2639 DPNI_UNLOCK(sc);
2640 break;
2641 case SIOCADDMULTI:
2642 case SIOCDELMULTI:
2643 DPNI_LOCK(sc);
2644 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2645 DPNI_UNLOCK(sc);
2646 rc = dpaa2_ni_update_mac_filters(ifp);
2647 if (rc) {
2648 device_printf(dev, "%s: failed to update MAC "
2649 "filters: error=%d\n", __func__, rc);
2650 }
2651 DPNI_LOCK(sc);
2652 }
2653 DPNI_UNLOCK(sc);
2654 break;
2655 case SIOCGIFMEDIA:
2656 case SIOCSIFMEDIA:
2657 if (sc->mii)
2658 rc = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, c);
2659 else if(sc->fixed_link) {
2660 rc = ifmedia_ioctl(ifp, ifr, &sc->fixed_ifmedia, c);
2661 }
2662 break;
2663 default:
2664 rc = ether_ioctl(ifp, c, data);
2665 break;
2666 }
2667
2668 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2669 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2670 return (rc);
2671
2672 close_ni:
2673 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2674 close_rc:
2675 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2676 err_exit:
2677 return (error);
2678 }
2679
2680 static int
dpaa2_ni_update_mac_filters(if_t ifp)2681 dpaa2_ni_update_mac_filters(if_t ifp)
2682 {
2683 struct dpaa2_ni_softc *sc = if_getsoftc(ifp);
2684 struct dpaa2_ni_mcaddr_ctx ctx;
2685 device_t pdev = device_get_parent(sc->dev);
2686 device_t dev = sc->dev;
2687 device_t child = dev;
2688 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2689 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2690 struct dpaa2_cmd cmd;
2691 uint16_t rc_token, ni_token;
2692 int error;
2693
2694 DPAA2_CMD_INIT(&cmd);
2695
2696 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2697 if (error) {
2698 device_printf(dev, "%s: failed to open resource container: "
2699 "id=%d, error=%d\n", __func__, rcinfo->id, error);
2700 goto err_exit;
2701 }
2702 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2703 if (error) {
2704 device_printf(dev, "%s: failed to open network interface: "
2705 "id=%d, error=%d\n", __func__, dinfo->id, error);
2706 goto close_rc;
2707 }
2708
2709 /* Remove all multicast MAC filters. */
2710 error = DPAA2_CMD_NI_CLEAR_MAC_FILTERS(dev, child, &cmd, false, true);
2711 if (error) {
2712 device_printf(dev, "%s: failed to clear multicast MAC filters: "
2713 "error=%d\n", __func__, error);
2714 goto close_ni;
2715 }
2716
2717 ctx.ifp = ifp;
2718 ctx.error = 0;
2719 ctx.nent = 0;
2720
2721 if_foreach_llmaddr(ifp, dpaa2_ni_add_maddr, &ctx);
2722
2723 error = ctx.error;
2724 close_ni:
2725 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2726 close_rc:
2727 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2728 err_exit:
2729 return (error);
2730 }
2731
2732 static u_int
dpaa2_ni_add_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)2733 dpaa2_ni_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2734 {
2735 struct dpaa2_ni_mcaddr_ctx *ctx = arg;
2736 struct dpaa2_ni_softc *sc = if_getsoftc(ctx->ifp);
2737 device_t pdev = device_get_parent(sc->dev);
2738 device_t dev = sc->dev;
2739 device_t child = dev;
2740 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2741 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2742 struct dpaa2_cmd cmd;
2743 uint16_t rc_token, ni_token;
2744 int error;
2745
2746 if (ctx->error != 0) {
2747 return (0);
2748 }
2749
2750 if (ETHER_IS_MULTICAST(LLADDR(sdl))) {
2751 DPAA2_CMD_INIT(&cmd);
2752
2753 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
2754 &rc_token);
2755 if (error) {
2756 device_printf(dev, "%s: failed to open resource "
2757 "container: id=%d, error=%d\n", __func__, rcinfo->id,
2758 error);
2759 return (0);
2760 }
2761 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id,
2762 &ni_token);
2763 if (error) {
2764 device_printf(dev, "%s: failed to open network interface: "
2765 "id=%d, error=%d\n", __func__, dinfo->id, error);
2766 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2767 rc_token));
2768 return (0);
2769 }
2770
2771 ctx->error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, &cmd,
2772 LLADDR(sdl));
2773
2774 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2775 ni_token));
2776 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
2777 rc_token));
2778
2779 if (ctx->error != 0) {
2780 device_printf(dev, "%s: can't add more then %d MAC "
2781 "addresses, switching to the multicast promiscuous "
2782 "mode\n", __func__, ctx->nent);
2783
2784 /* Enable multicast promiscuous mode. */
2785 DPNI_LOCK(sc);
2786 if_setflagbits(ctx->ifp, IFF_ALLMULTI, 0);
2787 sc->if_flags |= IFF_ALLMULTI;
2788 ctx->error = dpaa2_ni_setup_if_flags(sc);
2789 DPNI_UNLOCK(sc);
2790
2791 return (0);
2792 }
2793 ctx->nent++;
2794 }
2795
2796 return (1);
2797 }
2798
2799 static void
dpaa2_ni_intr(void * arg)2800 dpaa2_ni_intr(void *arg)
2801 {
2802 struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg;
2803 device_t pdev = device_get_parent(sc->dev);
2804 device_t dev = sc->dev;
2805 device_t child = dev;
2806 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
2807 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
2808 struct dpaa2_cmd cmd;
2809 uint32_t status = ~0u; /* clear all IRQ status bits */
2810 uint16_t rc_token, ni_token;
2811 int error;
2812
2813 DPAA2_CMD_INIT(&cmd);
2814
2815 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
2816 if (error) {
2817 device_printf(dev, "%s: failed to open resource container: "
2818 "id=%d, error=%d\n", __func__, rcinfo->id, error);
2819 goto err_exit;
2820 }
2821 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
2822 if (error) {
2823 device_printf(dev, "%s: failed to open network interface: "
2824 "id=%d, error=%d\n", __func__, dinfo->id, error);
2825 goto close_rc;
2826 }
2827
2828 error = DPAA2_CMD_NI_GET_IRQ_STATUS(dev, child, &cmd, DPNI_IRQ_INDEX,
2829 &status);
2830 if (error) {
2831 device_printf(sc->dev, "%s: failed to obtain IRQ status: "
2832 "error=%d\n", __func__, error);
2833 }
2834
2835 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
2836 close_rc:
2837 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
2838 err_exit:
2839 return;
2840 }
2841
2842 /**
2843 * @brief Execute channel's Rx/Tx routines.
2844 *
2845 * NOTE: Should not be re-entrant for the same channel. It is achieved by
2846 * enqueuing the cleanup routine on a single-threaded taskqueue.
2847 */
2848 static void
dpaa2_ni_cleanup_task(void * arg,int count)2849 dpaa2_ni_cleanup_task(void *arg, int count)
2850 {
2851 struct dpaa2_channel *ch = (struct dpaa2_channel *)arg;
2852 struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev);
2853 int error, rxc, txc;
2854
2855 for (int i = 0; i < DPAA2_CLEAN_BUDGET; i++) {
2856 rxc = dpaa2_ni_rx_cleanup(ch);
2857 txc = dpaa2_ni_tx_cleanup(ch);
2858
2859 if (__predict_false((if_getdrvflags(sc->ifp) &
2860 IFF_DRV_RUNNING) == 0)) {
2861 return;
2862 }
2863
2864 if ((txc != DPAA2_TX_BUDGET) && (rxc != DPAA2_RX_BUDGET)) {
2865 break;
2866 }
2867 }
2868
2869 /* Re-arm channel to generate CDAN */
2870 error = DPAA2_SWP_CONF_WQ_CHANNEL(ch->io_dev, &ch->ctx);
2871 if (error != 0) {
2872 panic("%s: failed to rearm channel: chan_id=%d, error=%d\n",
2873 __func__, ch->id, error);
2874 }
2875 }
2876
2877 /**
2878 * @brief Poll frames from a specific channel when CDAN is received.
2879 */
2880 static int
dpaa2_ni_rx_cleanup(struct dpaa2_channel * ch)2881 dpaa2_ni_rx_cleanup(struct dpaa2_channel *ch)
2882 {
2883 struct dpaa2_io_softc *iosc = device_get_softc(ch->io_dev);
2884 struct dpaa2_swp *swp = iosc->swp;
2885 struct dpaa2_ni_fq *fq;
2886 struct dpaa2_buf *buf = &ch->store;
2887 int budget = DPAA2_RX_BUDGET;
2888 int error, consumed = 0;
2889
2890 do {
2891 error = dpaa2_swp_pull(swp, ch->id, buf, DPAA2_ETH_STORE_FRAMES);
2892 if (error) {
2893 device_printf(ch->ni_dev, "%s: failed to pull frames: "
2894 "chan_id=%d, error=%d\n", __func__, ch->id, error);
2895 break;
2896 }
2897 error = dpaa2_ni_consume_frames(ch, &fq, &consumed);
2898 if (error == ENOENT || error == EALREADY) {
2899 break;
2900 }
2901 if (error == ETIMEDOUT) {
2902 device_printf(ch->ni_dev, "%s: timeout to consume "
2903 "frames: chan_id=%d\n", __func__, ch->id);
2904 }
2905 } while (--budget);
2906
2907 return (DPAA2_RX_BUDGET - budget);
2908 }
2909
2910 static int
dpaa2_ni_tx_cleanup(struct dpaa2_channel * ch)2911 dpaa2_ni_tx_cleanup(struct dpaa2_channel *ch)
2912 {
2913 struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev);
2914 struct dpaa2_ni_tx_ring *tx = &ch->txc_queue.tx_rings[0];
2915 struct mbuf *m = NULL;
2916 int budget = DPAA2_TX_BUDGET;
2917
2918 do {
2919 mtx_assert(&ch->xmit_mtx, MA_NOTOWNED);
2920 mtx_lock(&ch->xmit_mtx);
2921 m = buf_ring_dequeue_sc(ch->xmit_br);
2922 mtx_unlock(&ch->xmit_mtx);
2923
2924 if (__predict_false(m == NULL)) {
2925 /* TODO: Do not give up easily */
2926 break;
2927 } else {
2928 dpaa2_ni_tx(sc, ch, tx, m);
2929 }
2930 } while (--budget);
2931
2932 return (DPAA2_TX_BUDGET - budget);
2933 }
2934
2935 static void
dpaa2_ni_tx(struct dpaa2_ni_softc * sc,struct dpaa2_channel * ch,struct dpaa2_ni_tx_ring * tx,struct mbuf * m)2936 dpaa2_ni_tx(struct dpaa2_ni_softc *sc, struct dpaa2_channel *ch,
2937 struct dpaa2_ni_tx_ring *tx, struct mbuf *m)
2938 {
2939 device_t dev = sc->dev;
2940 struct dpaa2_ni_fq *fq = tx->fq;
2941 struct dpaa2_buf *buf, *sgt;
2942 struct dpaa2_fd fd;
2943 struct mbuf *md;
2944 bus_dma_segment_t segs[DPAA2_TX_SEGLIMIT];
2945 int rc, nsegs;
2946 int error;
2947 int len;
2948 bool mcast;
2949
2950 mtx_assert(&tx->lock, MA_NOTOWNED);
2951 mtx_lock(&tx->lock);
2952 buf = buf_ring_dequeue_sc(tx->br);
2953 mtx_unlock(&tx->lock);
2954 if (__predict_false(buf == NULL)) {
2955 /* TODO: Do not give up easily */
2956 m_freem(m);
2957 return;
2958 } else {
2959 DPAA2_BUF_ASSERT_TXREADY(buf);
2960 buf->m = m;
2961 sgt = buf->sgt;
2962 }
2963 len = m->m_pkthdr.len;
2964 mcast = (m->m_flags & M_MCAST) != 0;
2965
2966 #if defined(INVARIANTS)
2967 struct dpaa2_ni_tx_ring *btx = (struct dpaa2_ni_tx_ring *)buf->opt;
2968 KASSERT(buf->opt == tx, ("%s: unexpected Tx ring", __func__));
2969 KASSERT(btx->fq->chan == ch, ("%s: unexpected channel", __func__));
2970 #endif /* INVARIANTS */
2971
2972 BPF_MTAP(sc->ifp, m);
2973
2974 error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, m, segs, &nsegs,
2975 BUS_DMA_NOWAIT);
2976 if (__predict_false(error != 0)) {
2977 /* Too many fragments, trying to defragment... */
2978 md = m_collapse(m, M_NOWAIT, DPAA2_TX_SEGLIMIT);
2979 if (md == NULL) {
2980 device_printf(dev, "%s: m_collapse() failed\n", __func__);
2981 fq->chan->tx_dropped++;
2982 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
2983 goto err;
2984 }
2985
2986 buf->m = m = md;
2987 error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, m, segs,
2988 &nsegs, BUS_DMA_NOWAIT);
2989 if (__predict_false(error != 0)) {
2990 device_printf(dev, "%s: bus_dmamap_load_mbuf_sg() "
2991 "failed: error=%d\n", __func__, error);
2992 fq->chan->tx_dropped++;
2993 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
2994 goto err;
2995 }
2996 }
2997
2998 error = dpaa2_ni_build_fd(sc, tx, buf, segs, nsegs, &fd);
2999 if (__predict_false(error != 0)) {
3000 device_printf(dev, "%s: failed to build frame descriptor: "
3001 "error=%d\n", __func__, error);
3002 fq->chan->tx_dropped++;
3003 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
3004 goto err_unload;
3005 }
3006
3007 /* TODO: Enqueue several frames in a single command */
3008 for (int i = 0; i < DPAA2_NI_ENQUEUE_RETRIES; i++) {
3009 /* TODO: Return error codes instead of # of frames */
3010 rc = DPAA2_SWP_ENQ_MULTIPLE_FQ(fq->chan->io_dev, tx->fqid, &fd, 1);
3011 if (rc == 1) {
3012 break;
3013 }
3014 }
3015
3016 bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_PREWRITE);
3017 bus_dmamap_sync(sgt->dmat, sgt->dmap, BUS_DMASYNC_PREWRITE);
3018
3019 if (rc != 1) {
3020 fq->chan->tx_dropped++;
3021 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
3022 goto err_unload;
3023 } else {
3024 if (mcast)
3025 if_inc_counter(sc->ifp, IFCOUNTER_OMCASTS, 1);
3026 if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1);
3027 if_inc_counter(sc->ifp, IFCOUNTER_OBYTES, len);
3028 fq->chan->tx_frames++;
3029 }
3030 return;
3031
3032 err_unload:
3033 bus_dmamap_unload(buf->dmat, buf->dmap);
3034 if (sgt->paddr != 0) {
3035 bus_dmamap_unload(sgt->dmat, sgt->dmap);
3036 }
3037 err:
3038 m_freem(buf->m);
3039 buf_ring_enqueue(tx->br, buf);
3040 }
3041
3042 static int
dpaa2_ni_consume_frames(struct dpaa2_channel * chan,struct dpaa2_ni_fq ** src,uint32_t * consumed)3043 dpaa2_ni_consume_frames(struct dpaa2_channel *chan, struct dpaa2_ni_fq **src,
3044 uint32_t *consumed)
3045 {
3046 struct dpaa2_ni_fq *fq = NULL;
3047 struct dpaa2_dq *dq;
3048 struct dpaa2_fd *fd;
3049 struct dpaa2_ni_rx_ctx ctx = {
3050 .head = NULL,
3051 .tail = NULL,
3052 .cnt = 0,
3053 .last = false
3054 };
3055 int rc, frames = 0;
3056
3057 do {
3058 rc = dpaa2_chan_next_frame(chan, &dq);
3059 if (rc == EINPROGRESS) {
3060 if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
3061 fd = &dq->fdr.fd;
3062 fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
3063
3064 switch (fq->type) {
3065 case DPAA2_NI_QUEUE_RX:
3066 (void)dpaa2_ni_rx(chan, fq, fd, &ctx);
3067 break;
3068 case DPAA2_NI_QUEUE_RX_ERR:
3069 (void)dpaa2_ni_rx_err(chan, fq, fd);
3070 break;
3071 case DPAA2_NI_QUEUE_TX_CONF:
3072 (void)dpaa2_ni_tx_conf(chan, fq, fd);
3073 break;
3074 default:
3075 panic("%s: unknown queue type (1)",
3076 __func__);
3077 }
3078 frames++;
3079 }
3080 } else if (rc == EALREADY || rc == ENOENT) {
3081 if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) {
3082 fd = &dq->fdr.fd;
3083 fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx;
3084
3085 switch (fq->type) {
3086 case DPAA2_NI_QUEUE_RX:
3087 /*
3088 * Last VDQ response (mbuf) in a chain
3089 * obtained from the Rx queue.
3090 */
3091 ctx.last = true;
3092 (void)dpaa2_ni_rx(chan, fq, fd, &ctx);
3093 break;
3094 case DPAA2_NI_QUEUE_RX_ERR:
3095 (void)dpaa2_ni_rx_err(chan, fq, fd);
3096 break;
3097 case DPAA2_NI_QUEUE_TX_CONF:
3098 (void)dpaa2_ni_tx_conf(chan, fq, fd);
3099 break;
3100 default:
3101 panic("%s: unknown queue type (2)",
3102 __func__);
3103 }
3104 frames++;
3105 }
3106 break;
3107 } else {
3108 panic("%s: should not reach here: rc=%d", __func__, rc);
3109 }
3110 } while (true);
3111
3112 KASSERT(chan->store_idx < chan->store_sz, ("%s: store_idx(%d) >= "
3113 "store_sz(%d)", __func__, chan->store_idx, chan->store_sz));
3114
3115 /*
3116 * VDQ operation pulls frames from a single queue into the store.
3117 * Return the frame queue and a number of consumed frames as an output.
3118 */
3119 if (src != NULL) {
3120 *src = fq;
3121 }
3122 if (consumed != NULL) {
3123 *consumed = frames;
3124 }
3125
3126 return (rc);
3127 }
3128
3129 /**
3130 * @brief Receive frames.
3131 */
3132 static int
dpaa2_ni_rx(struct dpaa2_channel * ch,struct dpaa2_ni_fq * fq,struct dpaa2_fd * fd,struct dpaa2_ni_rx_ctx * ctx)3133 dpaa2_ni_rx(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq, struct dpaa2_fd *fd,
3134 struct dpaa2_ni_rx_ctx *ctx)
3135 {
3136 bus_addr_t paddr = (bus_addr_t)fd->addr;
3137 struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr);
3138 struct dpaa2_buf *buf = fa->buf;
3139 struct dpaa2_channel *bch = (struct dpaa2_channel *)buf->opt;
3140 struct dpaa2_ni_softc *sc = device_get_softc(bch->ni_dev);
3141 struct dpaa2_bp_softc *bpsc;
3142 struct mbuf *m;
3143 device_t bpdev;
3144 bus_addr_t released[DPAA2_SWP_BUFS_PER_CMD];
3145 void *buf_data;
3146 int buf_len, error, released_n = 0;
3147
3148 KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
3149 /*
3150 * NOTE: Current channel might not be the same as the "buffer" channel
3151 * and it's fine. It must not be NULL though.
3152 */
3153 KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__));
3154
3155 if (__predict_false(paddr != buf->paddr)) {
3156 panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3157 __func__, paddr, buf->paddr);
3158 }
3159
3160 switch (dpaa2_ni_fd_err(fd)) {
3161 case 1: /* Enqueue rejected by QMan */
3162 sc->rx_enq_rej_frames++;
3163 break;
3164 case 2: /* QMan IEOI error */
3165 sc->rx_ieoi_err_frames++;
3166 break;
3167 default:
3168 break;
3169 }
3170 switch (dpaa2_ni_fd_format(fd)) {
3171 case DPAA2_FD_SINGLE:
3172 sc->rx_single_buf_frames++;
3173 break;
3174 case DPAA2_FD_SG:
3175 sc->rx_sg_buf_frames++;
3176 break;
3177 default:
3178 break;
3179 }
3180
3181 mtx_assert(&bch->dma_mtx, MA_NOTOWNED);
3182 mtx_lock(&bch->dma_mtx);
3183
3184 bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_POSTREAD);
3185 bus_dmamap_unload(buf->dmat, buf->dmap);
3186 m = buf->m;
3187 buf_len = dpaa2_ni_fd_data_len(fd);
3188 buf_data = (uint8_t *)buf->vaddr + dpaa2_ni_fd_offset(fd);
3189 /* Prepare buffer to be re-cycled */
3190 buf->m = NULL;
3191 buf->paddr = 0;
3192 buf->vaddr = NULL;
3193 buf->seg.ds_addr = 0;
3194 buf->seg.ds_len = 0;
3195 buf->nseg = 0;
3196
3197 mtx_unlock(&bch->dma_mtx);
3198
3199 m->m_flags |= M_PKTHDR;
3200 m->m_data = buf_data;
3201 m->m_len = buf_len;
3202 m->m_pkthdr.len = buf_len;
3203 m->m_pkthdr.rcvif = sc->ifp;
3204 m->m_pkthdr.flowid = fq->fqid;
3205 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
3206 if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
3207
3208 if (ctx->head == NULL) {
3209 KASSERT(ctx->tail == NULL, ("%s: tail already given?", __func__));
3210 ctx->head = m;
3211 ctx->tail = m;
3212 } else {
3213 KASSERT(ctx->head != NULL, ("%s: head is NULL", __func__));
3214 ctx->tail->m_nextpkt = m;
3215 ctx->tail = m;
3216 }
3217 ctx->cnt++;
3218
3219 if (ctx->last) {
3220 ctx->tail->m_nextpkt = NULL;
3221 if_input(sc->ifp, ctx->head);
3222 }
3223
3224 /* Keep the buffer to be recycled */
3225 ch->recycled[ch->recycled_n++] = buf;
3226
3227 /* Re-seed and release recycled buffers back to the pool */
3228 if (ch->recycled_n == DPAA2_SWP_BUFS_PER_CMD) {
3229 /* Release new buffers to the pool if needed */
3230 taskqueue_enqueue(sc->bp_taskq, &ch->bp_task);
3231
3232 for (int i = 0; i < ch->recycled_n; i++) {
3233 buf = ch->recycled[i];
3234 bch = (struct dpaa2_channel *)buf->opt;
3235
3236 mtx_assert(&bch->dma_mtx, MA_NOTOWNED);
3237 mtx_lock(&bch->dma_mtx);
3238 error = dpaa2_buf_seed_rxb(sc->dev, buf,
3239 DPAA2_RX_BUF_SIZE, &bch->dma_mtx);
3240 mtx_unlock(&bch->dma_mtx);
3241
3242 if (__predict_false(error != 0)) {
3243 /* TODO: What else to do with the buffer? */
3244 panic("%s: failed to recycle buffer: error=%d",
3245 __func__, error);
3246 }
3247
3248 /* Prepare buffer to be released in a single command */
3249 released[released_n++] = buf->paddr;
3250 }
3251
3252 /* There's only one buffer pool for now */
3253 bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
3254 bpsc = device_get_softc(bpdev);
3255
3256 error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpsc->attr.bpid,
3257 released, released_n);
3258 if (__predict_false(error != 0)) {
3259 device_printf(sc->dev, "%s: failed to release buffers "
3260 "to the pool: error=%d\n", __func__, error);
3261 return (error);
3262 }
3263 ch->recycled_n = 0;
3264 }
3265
3266 return (0);
3267 }
3268
3269 /**
3270 * @brief Receive Rx error frames.
3271 */
3272 static int
dpaa2_ni_rx_err(struct dpaa2_channel * ch,struct dpaa2_ni_fq * fq,struct dpaa2_fd * fd)3273 dpaa2_ni_rx_err(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq,
3274 struct dpaa2_fd *fd)
3275 {
3276 bus_addr_t paddr = (bus_addr_t)fd->addr;
3277 struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr);
3278 struct dpaa2_buf *buf = fa->buf;
3279 struct dpaa2_channel *bch = (struct dpaa2_channel *)buf->opt;
3280 struct dpaa2_ni_softc *sc = device_get_softc(bch->ni_dev);
3281 device_t bpdev;
3282 struct dpaa2_bp_softc *bpsc;
3283 int error;
3284
3285 KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
3286 /*
3287 * NOTE: Current channel might not be the same as the "buffer" channel
3288 * and it's fine. It must not be NULL though.
3289 */
3290 KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__));
3291
3292 if (__predict_false(paddr != buf->paddr)) {
3293 panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3294 __func__, paddr, buf->paddr);
3295 }
3296
3297 /* There's only one buffer pool for now */
3298 bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]);
3299 bpsc = device_get_softc(bpdev);
3300
3301 /* Release buffer to QBMan buffer pool */
3302 error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpsc->attr.bpid, &paddr, 1);
3303 if (error != 0) {
3304 device_printf(sc->dev, "%s: failed to release frame buffer to "
3305 "the pool: error=%d\n", __func__, error);
3306 return (error);
3307 }
3308
3309 return (0);
3310 }
3311
3312 /**
3313 * @brief Receive Tx confirmation frames.
3314 */
3315 static int
dpaa2_ni_tx_conf(struct dpaa2_channel * ch,struct dpaa2_ni_fq * fq,struct dpaa2_fd * fd)3316 dpaa2_ni_tx_conf(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq,
3317 struct dpaa2_fd *fd)
3318 {
3319 bus_addr_t paddr = (bus_addr_t)fd->addr;
3320 struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr);
3321 struct dpaa2_buf *buf = fa->buf;
3322 struct dpaa2_buf *sgt = buf->sgt;
3323 struct dpaa2_ni_tx_ring *tx = (struct dpaa2_ni_tx_ring *)buf->opt;
3324 struct dpaa2_channel *bch = tx->fq->chan;
3325
3326 KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__));
3327 KASSERT(tx != NULL, ("%s: Tx ring is NULL", __func__));
3328 KASSERT(sgt != NULL, ("%s: S/G table is NULL", __func__));
3329 /*
3330 * NOTE: Current channel might not be the same as the "buffer" channel
3331 * and it's fine. It must not be NULL though.
3332 */
3333 KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__));
3334
3335 if (paddr != buf->paddr) {
3336 panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)",
3337 __func__, paddr, buf->paddr);
3338 }
3339
3340 mtx_assert(&bch->dma_mtx, MA_NOTOWNED);
3341 mtx_lock(&bch->dma_mtx);
3342
3343 bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_POSTWRITE);
3344 bus_dmamap_sync(sgt->dmat, sgt->dmap, BUS_DMASYNC_POSTWRITE);
3345 bus_dmamap_unload(buf->dmat, buf->dmap);
3346 bus_dmamap_unload(sgt->dmat, sgt->dmap);
3347 m_freem(buf->m);
3348 buf->m = NULL;
3349 buf->paddr = 0;
3350 buf->vaddr = NULL;
3351 sgt->paddr = 0;
3352
3353 mtx_unlock(&bch->dma_mtx);
3354
3355 /* Return Tx buffer back to the ring */
3356 buf_ring_enqueue(tx->br, buf);
3357
3358 return (0);
3359 }
3360
3361 /**
3362 * @brief Compare versions of the DPAA2 network interface API.
3363 */
3364 static int
dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc * sc,uint16_t major,uint16_t minor)3365 dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *sc, uint16_t major,
3366 uint16_t minor)
3367 {
3368 if (sc->api_major == major) {
3369 return sc->api_minor - minor;
3370 }
3371 return sc->api_major - major;
3372 }
3373
3374 /**
3375 * @brief Build a DPAA2 frame descriptor.
3376 */
3377 static int
dpaa2_ni_build_fd(struct dpaa2_ni_softc * sc,struct dpaa2_ni_tx_ring * tx,struct dpaa2_buf * buf,bus_dma_segment_t * segs,int nsegs,struct dpaa2_fd * fd)3378 dpaa2_ni_build_fd(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx,
3379 struct dpaa2_buf *buf, bus_dma_segment_t *segs, int nsegs, struct dpaa2_fd *fd)
3380 {
3381 struct dpaa2_buf *sgt = buf->sgt;
3382 struct dpaa2_sg_entry *sge;
3383 struct dpaa2_fa *fa;
3384 int i, error;
3385
3386 KASSERT(nsegs <= DPAA2_TX_SEGLIMIT, ("%s: too many segments", __func__));
3387 KASSERT(buf->opt != NULL, ("%s: no Tx ring?", __func__));
3388 KASSERT(sgt != NULL, ("%s: no S/G table?", __func__));
3389 KASSERT(sgt->vaddr != NULL, ("%s: no S/G vaddr?", __func__));
3390
3391 memset(fd, 0, sizeof(*fd));
3392
3393 /* Populate and map S/G table */
3394 if (__predict_true(nsegs <= DPAA2_TX_SEGLIMIT)) {
3395 sge = (struct dpaa2_sg_entry *)sgt->vaddr + sc->tx_data_off;
3396 for (i = 0; i < nsegs; i++) {
3397 sge[i].addr = (uint64_t)segs[i].ds_addr;
3398 sge[i].len = (uint32_t)segs[i].ds_len;
3399 sge[i].offset_fmt = 0u;
3400 }
3401 sge[i-1].offset_fmt |= 0x8000u; /* set final entry flag */
3402
3403 KASSERT(sgt->paddr == 0, ("%s: paddr(%#jx) != 0", __func__,
3404 sgt->paddr));
3405
3406 error = bus_dmamap_load(sgt->dmat, sgt->dmap, sgt->vaddr,
3407 DPAA2_TX_SGT_SZ, dpaa2_dmamap_oneseg_cb, &sgt->paddr,
3408 BUS_DMA_NOWAIT);
3409 if (__predict_false(error != 0)) {
3410 device_printf(sc->dev, "%s: bus_dmamap_load() failed: "
3411 "error=%d\n", __func__, error);
3412 return (error);
3413 }
3414
3415 buf->paddr = sgt->paddr;
3416 buf->vaddr = sgt->vaddr;
3417 sc->tx_sg_frames++; /* for sysctl(9) */
3418 } else {
3419 return (EINVAL);
3420 }
3421
3422 fa = (struct dpaa2_fa *)sgt->vaddr;
3423 fa->magic = DPAA2_MAGIC;
3424 fa->buf = buf;
3425
3426 fd->addr = buf->paddr;
3427 fd->data_length = (uint32_t)buf->m->m_pkthdr.len;
3428 fd->bpid_ivp_bmt = 0;
3429 fd->offset_fmt_sl = 0x2000u | sc->tx_data_off;
3430 fd->ctrl = 0x00800000u;
3431
3432 return (0);
3433 }
3434
3435 static int
dpaa2_ni_fd_err(struct dpaa2_fd * fd)3436 dpaa2_ni_fd_err(struct dpaa2_fd *fd)
3437 {
3438 return ((fd->ctrl >> DPAA2_NI_FD_ERR_SHIFT) & DPAA2_NI_FD_ERR_MASK);
3439 }
3440
3441 static uint32_t
dpaa2_ni_fd_data_len(struct dpaa2_fd * fd)3442 dpaa2_ni_fd_data_len(struct dpaa2_fd *fd)
3443 {
3444 if (dpaa2_ni_fd_short_len(fd)) {
3445 return (fd->data_length & DPAA2_NI_FD_LEN_MASK);
3446 }
3447 return (fd->data_length);
3448 }
3449
3450 static int
dpaa2_ni_fd_format(struct dpaa2_fd * fd)3451 dpaa2_ni_fd_format(struct dpaa2_fd *fd)
3452 {
3453 return ((enum dpaa2_fd_format)((fd->offset_fmt_sl >>
3454 DPAA2_NI_FD_FMT_SHIFT) & DPAA2_NI_FD_FMT_MASK));
3455 }
3456
3457 static bool
dpaa2_ni_fd_short_len(struct dpaa2_fd * fd)3458 dpaa2_ni_fd_short_len(struct dpaa2_fd *fd)
3459 {
3460 return (((fd->offset_fmt_sl >> DPAA2_NI_FD_SL_SHIFT)
3461 & DPAA2_NI_FD_SL_MASK) == 1);
3462 }
3463
3464 static int
dpaa2_ni_fd_offset(struct dpaa2_fd * fd)3465 dpaa2_ni_fd_offset(struct dpaa2_fd *fd)
3466 {
3467 return (fd->offset_fmt_sl & DPAA2_NI_FD_OFFSET_MASK);
3468 }
3469
3470 /**
3471 * @brief Collect statistics of the network interface.
3472 */
3473 static int
dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS)3474 dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS)
3475 {
3476 struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3477 struct dpni_stat *stat = &dpni_stat_sysctls[oidp->oid_number];
3478 device_t pdev = device_get_parent(sc->dev);
3479 device_t dev = sc->dev;
3480 device_t child = dev;
3481 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
3482 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
3483 struct dpaa2_cmd cmd;
3484 uint64_t cnt[DPAA2_NI_STAT_COUNTERS];
3485 uint64_t result = 0;
3486 uint16_t rc_token, ni_token;
3487 int error;
3488
3489 DPAA2_CMD_INIT(&cmd);
3490
3491 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token);
3492 if (error) {
3493 device_printf(dev, "%s: failed to open resource container: "
3494 "id=%d, error=%d\n", __func__, rcinfo->id, error);
3495 goto exit;
3496 }
3497 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token);
3498 if (error) {
3499 device_printf(dev, "%s: failed to open network interface: "
3500 "id=%d, error=%d\n", __func__, dinfo->id, error);
3501 goto close_rc;
3502 }
3503
3504 error = DPAA2_CMD_NI_GET_STATISTICS(dev, child, &cmd, stat->page, 0, cnt);
3505 if (!error) {
3506 result = cnt[stat->cnt];
3507 }
3508
3509 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token));
3510 close_rc:
3511 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token));
3512 exit:
3513 return (sysctl_handle_64(oidp, &result, 0, req));
3514 }
3515
3516 static int
dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS)3517 dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS)
3518 {
3519 struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3520 uint32_t buf_num = DPAA2_ATOMIC_READ(&sc->buf_num);
3521
3522 return (sysctl_handle_32(oidp, &buf_num, 0, req));
3523 }
3524
3525 static int
dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS)3526 dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS)
3527 {
3528 struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1;
3529 uint32_t buf_free = DPAA2_ATOMIC_READ(&sc->buf_free);
3530
3531 return (sysctl_handle_32(oidp, &buf_free, 0, req));
3532 }
3533
3534 static int
dpaa2_ni_set_hash(device_t dev,uint64_t flags)3535 dpaa2_ni_set_hash(device_t dev, uint64_t flags)
3536 {
3537 struct dpaa2_ni_softc *sc = device_get_softc(dev);
3538 uint64_t key = 0;
3539 int i;
3540
3541 if (!(sc->attr.num.queues > 1)) {
3542 return (EOPNOTSUPP);
3543 }
3544
3545 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3546 if (dist_fields[i].rxnfc_field & flags) {
3547 key |= dist_fields[i].id;
3548 }
3549 }
3550
3551 return (dpaa2_ni_set_dist_key(dev, DPAA2_NI_DIST_MODE_HASH, key));
3552 }
3553
3554 /**
3555 * @brief Set Rx distribution (hash or flow classification) key flags is a
3556 * combination of RXH_ bits.
3557 */
3558 static int
dpaa2_ni_set_dist_key(device_t dev,enum dpaa2_ni_dist_mode type,uint64_t flags)3559 dpaa2_ni_set_dist_key(device_t dev, enum dpaa2_ni_dist_mode type, uint64_t flags)
3560 {
3561 device_t pdev = device_get_parent(dev);
3562 device_t child = dev;
3563 struct dpaa2_ni_softc *sc = device_get_softc(dev);
3564 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev);
3565 struct dpaa2_devinfo *dinfo = device_get_ivars(dev);
3566 struct dpkg_profile_cfg cls_cfg;
3567 struct dpkg_extract *key;
3568 struct dpaa2_buf *buf = &sc->rxd_kcfg;
3569 struct dpaa2_cmd cmd;
3570 uint16_t rc_token, ni_token;
3571 int i, error = 0;
3572
3573 if (__predict_true(buf->dmat == NULL)) {
3574 buf->dmat = sc->rxd_dmat;
3575 }
3576
3577 memset(&cls_cfg, 0, sizeof(cls_cfg));
3578
3579 /* Configure extracts according to the given flags. */
3580 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3581 key = &cls_cfg.extracts[cls_cfg.num_extracts];
3582
3583 if (!(flags & dist_fields[i].id)) {
3584 continue;
3585 }
3586
3587 if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
3588 device_printf(dev, "%s: failed to add key extraction "
3589 "rule\n", __func__);
3590 return (E2BIG);
3591 }
3592
3593 key->type = DPKG_EXTRACT_FROM_HDR;
3594 key->extract.from_hdr.prot = dist_fields[i].cls_prot;
3595 key->extract.from_hdr.type = DPKG_FULL_FIELD;
3596 key->extract.from_hdr.field = dist_fields[i].cls_field;
3597 cls_cfg.num_extracts++;
3598 }
3599
3600 error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr,
3601 BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap);
3602 if (error != 0) {
3603 device_printf(dev, "%s: failed to allocate a buffer for Rx "
3604 "traffic distribution key configuration\n", __func__);
3605 return (error);
3606 }
3607
3608 error = dpaa2_ni_prepare_key_cfg(&cls_cfg, (uint8_t *)buf->vaddr);
3609 if (error != 0) {
3610 device_printf(dev, "%s: failed to prepare key configuration: "
3611 "error=%d\n", __func__, error);
3612 return (error);
3613 }
3614
3615 /* Prepare for setting the Rx dist. */
3616 error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr,
3617 DPAA2_CLASSIFIER_DMA_SIZE, dpaa2_dmamap_oneseg_cb, &buf->paddr,
3618 BUS_DMA_NOWAIT);
3619 if (error != 0) {
3620 device_printf(sc->dev, "%s: failed to map a buffer for Rx "
3621 "traffic distribution key configuration\n", __func__);
3622 return (error);
3623 }
3624
3625 if (type == DPAA2_NI_DIST_MODE_HASH) {
3626 DPAA2_CMD_INIT(&cmd);
3627
3628 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id,
3629 &rc_token);
3630 if (error) {
3631 device_printf(dev, "%s: failed to open resource "
3632 "container: id=%d, error=%d\n", __func__, rcinfo->id,
3633 error);
3634 goto err_exit;
3635 }
3636 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id,
3637 &ni_token);
3638 if (error) {
3639 device_printf(dev, "%s: failed to open network "
3640 "interface: id=%d, error=%d\n", __func__, dinfo->id,
3641 error);
3642 goto close_rc;
3643 }
3644
3645 error = DPAA2_CMD_NI_SET_RX_TC_DIST(dev, child, &cmd,
3646 sc->attr.num.queues, 0, DPAA2_NI_DIST_MODE_HASH, buf->paddr);
3647 if (error != 0) {
3648 device_printf(dev, "%s: failed to set distribution mode "
3649 "and size for the traffic class\n", __func__);
3650 }
3651
3652 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
3653 ni_token));
3654 close_rc:
3655 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd,
3656 rc_token));
3657 }
3658
3659 err_exit:
3660 return (error);
3661 }
3662
3663 /**
3664 * @brief Prepares extract parameters.
3665 *
3666 * cfg: Defining a full Key Generation profile.
3667 * key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA.
3668 */
3669 static int
dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg * cfg,uint8_t * key_cfg_buf)3670 dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf)
3671 {
3672 struct dpni_ext_set_rx_tc_dist *dpni_ext;
3673 struct dpni_dist_extract *extr;
3674 int i, j;
3675
3676 if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS)
3677 return (EINVAL);
3678
3679 dpni_ext = (struct dpni_ext_set_rx_tc_dist *) key_cfg_buf;
3680 dpni_ext->num_extracts = cfg->num_extracts;
3681
3682 for (i = 0; i < cfg->num_extracts; i++) {
3683 extr = &dpni_ext->extracts[i];
3684
3685 switch (cfg->extracts[i].type) {
3686 case DPKG_EXTRACT_FROM_HDR:
3687 extr->prot = cfg->extracts[i].extract.from_hdr.prot;
3688 extr->efh_type =
3689 cfg->extracts[i].extract.from_hdr.type & 0x0Fu;
3690 extr->size = cfg->extracts[i].extract.from_hdr.size;
3691 extr->offset = cfg->extracts[i].extract.from_hdr.offset;
3692 extr->field = cfg->extracts[i].extract.from_hdr.field;
3693 extr->hdr_index =
3694 cfg->extracts[i].extract.from_hdr.hdr_index;
3695 break;
3696 case DPKG_EXTRACT_FROM_DATA:
3697 extr->size = cfg->extracts[i].extract.from_data.size;
3698 extr->offset =
3699 cfg->extracts[i].extract.from_data.offset;
3700 break;
3701 case DPKG_EXTRACT_FROM_PARSE:
3702 extr->size = cfg->extracts[i].extract.from_parse.size;
3703 extr->offset =
3704 cfg->extracts[i].extract.from_parse.offset;
3705 break;
3706 default:
3707 return (EINVAL);
3708 }
3709
3710 extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks;
3711 extr->extract_type = cfg->extracts[i].type & 0x0Fu;
3712
3713 for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
3714 extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
3715 extr->masks[j].offset =
3716 cfg->extracts[i].masks[j].offset;
3717 }
3718 }
3719
3720 return (0);
3721 }
3722
3723 static device_method_t dpaa2_ni_methods[] = {
3724 /* Device interface */
3725 DEVMETHOD(device_probe, dpaa2_ni_probe),
3726 DEVMETHOD(device_attach, dpaa2_ni_attach),
3727 DEVMETHOD(device_detach, dpaa2_ni_detach),
3728
3729 /* mii via memac_mdio */
3730 DEVMETHOD(miibus_statchg, dpaa2_ni_miibus_statchg),
3731
3732 DEVMETHOD_END
3733 };
3734
3735 static driver_t dpaa2_ni_driver = {
3736 "dpaa2_ni",
3737 dpaa2_ni_methods,
3738 sizeof(struct dpaa2_ni_softc),
3739 };
3740
3741 DRIVER_MODULE(miibus, dpaa2_ni, miibus_driver, 0, 0);
3742 DRIVER_MODULE(dpaa2_ni, dpaa2_rc, dpaa2_ni_driver, 0, 0);
3743
3744 MODULE_DEPEND(dpaa2_ni, miibus, 1, 1, 1);
3745 #ifdef DEV_ACPI
3746 MODULE_DEPEND(dpaa2_ni, memac_mdio_acpi, 1, 1, 1);
3747 #endif
3748 #ifdef FDT
3749 MODULE_DEPEND(dpaa2_ni, memac_mdio_fdt, 1, 1, 1);
3750 #endif
3751