1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright © 2021-2023 Dmitry Salychev 5 * Copyright © 2022 Mathew McBride 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 /* 31 * The DPAA2 Network Interface (DPNI) driver. 32 * 33 * The DPNI object is a network interface that is configurable to support a wide 34 * range of features from a very basic Ethernet interface up to a 35 * high-functioning network interface. The DPNI supports features that are 36 * expected by standard network stacks, from basic features to offloads. 37 * 38 * DPNIs work with Ethernet traffic, starting with the L2 header. Additional 39 * functions are provided for standard network protocols (L2, L3, L4, etc.). 40 */ 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/bus.h> 46 #include <sys/rman.h> 47 #include <sys/module.h> 48 #include <sys/malloc.h> 49 #include <sys/mutex.h> 50 #include <sys/socket.h> 51 #include <sys/sockio.h> 52 #include <sys/sysctl.h> 53 #include <sys/mbuf.h> 54 #include <sys/taskqueue.h> 55 #include <sys/sysctl.h> 56 #include <sys/buf_ring.h> 57 #include <sys/smp.h> 58 #include <sys/proc.h> 59 60 #include <vm/vm.h> 61 #include <vm/pmap.h> 62 63 #include <machine/bus.h> 64 #include <machine/resource.h> 65 #include <machine/atomic.h> 66 #include <machine/vmparam.h> 67 68 #include <net/ethernet.h> 69 #include <net/bpf.h> 70 #include <net/if.h> 71 #include <net/if_dl.h> 72 #include <net/if_media.h> 73 #include <net/if_types.h> 74 #include <net/if_var.h> 75 76 #include <dev/pci/pcivar.h> 77 #include <dev/mii/mii.h> 78 #include <dev/mii/miivar.h> 79 #include <dev/mdio/mdio.h> 80 81 #include "opt_acpi.h" 82 #include "opt_platform.h" 83 84 #include "pcib_if.h" 85 #include "pci_if.h" 86 #include "miibus_if.h" 87 #include "memac_mdio_if.h" 88 89 #include "dpaa2_types.h" 90 #include "dpaa2_mc.h" 91 #include "dpaa2_mc_if.h" 92 #include "dpaa2_mcp.h" 93 #include "dpaa2_swp.h" 94 #include "dpaa2_swp_if.h" 95 #include "dpaa2_cmd_if.h" 96 #include "dpaa2_ni.h" 97 #include "dpaa2_channel.h" 98 #include "dpaa2_buf.h" 99 100 #define BIT(x) (1ul << (x)) 101 #define WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0) 102 #define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0])) 103 104 /* Frame Dequeue Response status bits. */ 105 #define IS_NULL_RESPONSE(stat) ((((stat) >> 4) & 1) == 0) 106 107 #define ALIGN_UP(x, y) roundup2((x), (y)) 108 #define ALIGN_DOWN(x, y) rounddown2((x), (y)) 109 #define CACHE_LINE_ALIGN(x) ALIGN_UP((x), CACHE_LINE_SIZE) 110 111 #define DPNI_LOCK(__sc) do { \ 112 mtx_assert(&(__sc)->lock, MA_NOTOWNED); \ 113 mtx_lock(&(__sc)->lock); \ 114 } while (0) 115 #define DPNI_UNLOCK(__sc) do { \ 116 mtx_assert(&(__sc)->lock, MA_OWNED); \ 117 mtx_unlock(&(__sc)->lock); \ 118 } while (0) 119 120 #define DPAA2_TX_RING(sc, chan, tc) \ 121 (&(sc)->channels[(chan)]->txc_queue.tx_rings[(tc)]) 122 123 MALLOC_DEFINE(M_DPAA2_TXB, "dpaa2_txb", "DPAA2 DMA-mapped buffer (Tx)"); 124 125 /* 126 * How many times channel cleanup routine will be repeated if the RX or TX 127 * budget was depleted. 128 */ 129 #define DPAA2_CLEAN_BUDGET 64 /* sysctl(9)? */ 130 /* TX/RX budget for the channel cleanup task */ 131 #define DPAA2_TX_BUDGET 128 /* sysctl(9)? */ 132 #define DPAA2_RX_BUDGET 256 /* sysctl(9)? */ 133 134 #define DPNI_IRQ_INDEX 0 /* Index of the only DPNI IRQ. */ 135 #define DPNI_IRQ_LINK_CHANGED 1 /* Link state changed */ 136 #define DPNI_IRQ_EP_CHANGED 2 /* DPAA2 endpoint dis/connected */ 137 138 /* Default maximum frame length. */ 139 #define DPAA2_ETH_MFL (ETHER_MAX_LEN - ETHER_CRC_LEN) 140 141 /* Minimally supported version of the DPNI API. */ 142 #define DPNI_VER_MAJOR 7 143 #define DPNI_VER_MINOR 0 144 145 /* Rx/Tx buffers configuration. */ 146 #define BUF_ALIGN_V1 256 /* WRIOP v1.0.0 limitation */ 147 #define BUF_ALIGN 64 148 #define BUF_SWA_SIZE 64 /* SW annotation size */ 149 #define BUF_RX_HWA_SIZE 64 /* HW annotation size */ 150 #define BUF_TX_HWA_SIZE 128 /* HW annotation size */ 151 152 #define DPAA2_RX_BUFRING_SZ (4096u) 153 #define DPAA2_RXE_BUFRING_SZ (1024u) 154 #define DPAA2_TXC_BUFRING_SZ (4096u) 155 #define DPAA2_TX_SEGLIMIT (16u) /* arbitrary number */ 156 #define DPAA2_TX_SEG_SZ (PAGE_SIZE) 157 #define DPAA2_TX_SEGS_MAXSZ (DPAA2_TX_SEGLIMIT * DPAA2_TX_SEG_SZ) 158 #define DPAA2_TX_SGT_SZ (PAGE_SIZE) /* bytes */ 159 160 /* Size of a buffer to keep a QoS table key configuration. */ 161 #define ETH_QOS_KCFG_BUF_SIZE (PAGE_SIZE) 162 163 /* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */ 164 #define DPAA2_CLASSIFIER_DMA_SIZE (PAGE_SIZE) 165 166 /* Buffers layout options. */ 167 #define BUF_LOPT_TIMESTAMP 0x1 168 #define BUF_LOPT_PARSER_RESULT 0x2 169 #define BUF_LOPT_FRAME_STATUS 0x4 170 #define BUF_LOPT_PRIV_DATA_SZ 0x8 171 #define BUF_LOPT_DATA_ALIGN 0x10 172 #define BUF_LOPT_DATA_HEAD_ROOM 0x20 173 #define BUF_LOPT_DATA_TAIL_ROOM 0x40 174 175 #define DPAA2_NI_BUF_ADDR_MASK (0x1FFFFFFFFFFFFul) /* 49-bit addresses max. */ 176 #define DPAA2_NI_BUF_CHAN_MASK (0xFu) 177 #define DPAA2_NI_BUF_CHAN_SHIFT (60) 178 #define DPAA2_NI_BUF_IDX_MASK (0x7FFFu) 179 #define DPAA2_NI_BUF_IDX_SHIFT (49) 180 #define DPAA2_NI_TX_IDX_MASK (0x7u) 181 #define DPAA2_NI_TX_IDX_SHIFT (57) 182 #define DPAA2_NI_TXBUF_IDX_MASK (0xFFu) 183 #define DPAA2_NI_TXBUF_IDX_SHIFT (49) 184 185 #define DPAA2_NI_FD_FMT_MASK (0x3u) 186 #define DPAA2_NI_FD_FMT_SHIFT (12) 187 #define DPAA2_NI_FD_ERR_MASK (0xFFu) 188 #define DPAA2_NI_FD_ERR_SHIFT (0) 189 #define DPAA2_NI_FD_SL_MASK (0x1u) 190 #define DPAA2_NI_FD_SL_SHIFT (14) 191 #define DPAA2_NI_FD_LEN_MASK (0x3FFFFu) 192 #define DPAA2_NI_FD_OFFSET_MASK (0x0FFFu) 193 194 /* Enables TCAM for Flow Steering and QoS look-ups. */ 195 #define DPNI_OPT_HAS_KEY_MASKING 0x10 196 197 /* Unique IDs for the supported Rx classification header fields. */ 198 #define DPAA2_ETH_DIST_ETHDST BIT(0) 199 #define DPAA2_ETH_DIST_ETHSRC BIT(1) 200 #define DPAA2_ETH_DIST_ETHTYPE BIT(2) 201 #define DPAA2_ETH_DIST_VLAN BIT(3) 202 #define DPAA2_ETH_DIST_IPSRC BIT(4) 203 #define DPAA2_ETH_DIST_IPDST BIT(5) 204 #define DPAA2_ETH_DIST_IPPROTO BIT(6) 205 #define DPAA2_ETH_DIST_L4SRC BIT(7) 206 #define DPAA2_ETH_DIST_L4DST BIT(8) 207 #define DPAA2_ETH_DIST_ALL (~0ULL) 208 209 /* L3-L4 network traffic flow hash options. */ 210 #define RXH_L2DA (1 << 1) 211 #define RXH_VLAN (1 << 2) 212 #define RXH_L3_PROTO (1 << 3) 213 #define RXH_IP_SRC (1 << 4) 214 #define RXH_IP_DST (1 << 5) 215 #define RXH_L4_B_0_1 (1 << 6) /* src port in case of TCP/UDP/SCTP */ 216 #define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */ 217 #define RXH_DISCARD (1 << 31) 218 219 /* Default Rx hash options, set during attaching. */ 220 #define DPAA2_RXH_DEFAULT (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3) 221 222 MALLOC_DEFINE(M_DPAA2_NI, "dpaa2_ni", "DPAA2 Network Interface"); 223 224 /* 225 * DPAA2 Network Interface resource specification. 226 * 227 * NOTE: Don't forget to update macros in dpaa2_ni.h in case of any changes in 228 * the specification! 229 */ 230 struct resource_spec dpaa2_ni_spec[] = { 231 /* 232 * DPMCP resources. 233 * 234 * NOTE: MC command portals (MCPs) are used to send commands to, and 235 * receive responses from, the MC firmware. One portal per DPNI. 236 */ 237 { DPAA2_DEV_MCP, DPAA2_NI_MCP_RID(0), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, 238 /* 239 * DPIO resources (software portals). 240 * 241 * NOTE: One per running core. While DPIOs are the source of data 242 * availability interrupts, the DPCONs are used to identify the 243 * network interface that has produced ingress data to that core. 244 */ 245 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(0), RF_ACTIVE | RF_SHAREABLE }, 246 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(1), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, 247 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(2), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, 248 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(3), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, 249 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(4), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, 250 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(5), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, 251 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(6), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, 252 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(7), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, 253 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(8), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, 254 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(9), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, 255 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(10), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, 256 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(11), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, 257 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(12), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, 258 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(13), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, 259 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(14), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, 260 { DPAA2_DEV_IO, DPAA2_NI_IO_RID(15), RF_ACTIVE | RF_SHAREABLE | RF_OPTIONAL }, 261 /* 262 * DPBP resources (buffer pools). 263 * 264 * NOTE: One per network interface. 265 */ 266 { DPAA2_DEV_BP, DPAA2_NI_BP_RID(0), RF_ACTIVE }, 267 /* 268 * DPCON resources (channels). 269 * 270 * NOTE: One DPCON per core where Rx or Tx confirmation traffic to be 271 * distributed to. 272 * NOTE: Since it is necessary to distinguish between traffic from 273 * different network interfaces arriving on the same core, the 274 * DPCONs must be private to the DPNIs. 275 */ 276 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(0), RF_ACTIVE }, 277 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(1), RF_ACTIVE | RF_OPTIONAL }, 278 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(2), RF_ACTIVE | RF_OPTIONAL }, 279 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(3), RF_ACTIVE | RF_OPTIONAL }, 280 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(4), RF_ACTIVE | RF_OPTIONAL }, 281 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(5), RF_ACTIVE | RF_OPTIONAL }, 282 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(6), RF_ACTIVE | RF_OPTIONAL }, 283 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(7), RF_ACTIVE | RF_OPTIONAL }, 284 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(8), RF_ACTIVE | RF_OPTIONAL }, 285 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(9), RF_ACTIVE | RF_OPTIONAL }, 286 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(10), RF_ACTIVE | RF_OPTIONAL }, 287 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(11), RF_ACTIVE | RF_OPTIONAL }, 288 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(12), RF_ACTIVE | RF_OPTIONAL }, 289 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(13), RF_ACTIVE | RF_OPTIONAL }, 290 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(14), RF_ACTIVE | RF_OPTIONAL }, 291 { DPAA2_DEV_CON, DPAA2_NI_CON_RID(15), RF_ACTIVE | RF_OPTIONAL }, 292 293 RESOURCE_SPEC_END 294 }; 295 296 /* Supported header fields for Rx hash distribution key */ 297 static const struct dpaa2_eth_dist_fields dist_fields[] = { 298 { 299 /* L2 header */ 300 .rxnfc_field = RXH_L2DA, 301 .cls_prot = NET_PROT_ETH, 302 .cls_field = NH_FLD_ETH_DA, 303 .id = DPAA2_ETH_DIST_ETHDST, 304 .size = 6, 305 }, { 306 .cls_prot = NET_PROT_ETH, 307 .cls_field = NH_FLD_ETH_SA, 308 .id = DPAA2_ETH_DIST_ETHSRC, 309 .size = 6, 310 }, { 311 /* This is the last ethertype field parsed: 312 * depending on frame format, it can be the MAC ethertype 313 * or the VLAN etype. 314 */ 315 .cls_prot = NET_PROT_ETH, 316 .cls_field = NH_FLD_ETH_TYPE, 317 .id = DPAA2_ETH_DIST_ETHTYPE, 318 .size = 2, 319 }, { 320 /* VLAN header */ 321 .rxnfc_field = RXH_VLAN, 322 .cls_prot = NET_PROT_VLAN, 323 .cls_field = NH_FLD_VLAN_TCI, 324 .id = DPAA2_ETH_DIST_VLAN, 325 .size = 2, 326 }, { 327 /* IP header */ 328 .rxnfc_field = RXH_IP_SRC, 329 .cls_prot = NET_PROT_IP, 330 .cls_field = NH_FLD_IP_SRC, 331 .id = DPAA2_ETH_DIST_IPSRC, 332 .size = 4, 333 }, { 334 .rxnfc_field = RXH_IP_DST, 335 .cls_prot = NET_PROT_IP, 336 .cls_field = NH_FLD_IP_DST, 337 .id = DPAA2_ETH_DIST_IPDST, 338 .size = 4, 339 }, { 340 .rxnfc_field = RXH_L3_PROTO, 341 .cls_prot = NET_PROT_IP, 342 .cls_field = NH_FLD_IP_PROTO, 343 .id = DPAA2_ETH_DIST_IPPROTO, 344 .size = 1, 345 }, { 346 /* Using UDP ports, this is functionally equivalent to raw 347 * byte pairs from L4 header. 348 */ 349 .rxnfc_field = RXH_L4_B_0_1, 350 .cls_prot = NET_PROT_UDP, 351 .cls_field = NH_FLD_UDP_PORT_SRC, 352 .id = DPAA2_ETH_DIST_L4SRC, 353 .size = 2, 354 }, { 355 .rxnfc_field = RXH_L4_B_2_3, 356 .cls_prot = NET_PROT_UDP, 357 .cls_field = NH_FLD_UDP_PORT_DST, 358 .id = DPAA2_ETH_DIST_L4DST, 359 .size = 2, 360 }, 361 }; 362 363 static struct dpni_stat { 364 int page; 365 int cnt; 366 char *name; 367 char *desc; 368 } dpni_stat_sysctls[DPAA2_NI_STAT_SYSCTLS] = { 369 /* PAGE, COUNTER, NAME, DESCRIPTION */ 370 { 0, 0, "in_all_frames", "All accepted ingress frames" }, 371 { 0, 1, "in_all_bytes", "Bytes in all accepted ingress frames" }, 372 { 0, 2, "in_multi_frames", "Multicast accepted ingress frames" }, 373 { 1, 0, "eg_all_frames", "All egress frames transmitted" }, 374 { 1, 1, "eg_all_bytes", "Bytes in all frames transmitted" }, 375 { 1, 2, "eg_multi_frames", "Multicast egress frames transmitted" }, 376 { 2, 0, "in_filtered_frames", "All ingress frames discarded due to " 377 "filtering" }, 378 { 2, 1, "in_discarded_frames", "All frames discarded due to errors" }, 379 { 2, 2, "in_nobuf_discards", "Discards on ingress side due to buffer " 380 "depletion in DPNI buffer pools" }, 381 }; 382 383 struct dpaa2_ni_rx_ctx { 384 struct mbuf *head; 385 struct mbuf *tail; 386 int cnt; 387 bool last; 388 }; 389 390 /* Device interface */ 391 static int dpaa2_ni_probe(device_t); 392 static int dpaa2_ni_attach(device_t); 393 static int dpaa2_ni_detach(device_t); 394 395 /* DPAA2 network interface setup and configuration */ 396 static int dpaa2_ni_setup(device_t); 397 static int dpaa2_ni_setup_channels(device_t); 398 static int dpaa2_ni_bind(device_t); 399 static int dpaa2_ni_setup_rx_dist(device_t); 400 static int dpaa2_ni_setup_irqs(device_t); 401 static int dpaa2_ni_setup_msi(struct dpaa2_ni_softc *); 402 static int dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *); 403 static int dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *); 404 static int dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *); 405 static int dpaa2_ni_setup_dma(struct dpaa2_ni_softc *); 406 407 /* Tx/Rx flow configuration */ 408 static int dpaa2_ni_setup_rx_flow(device_t, struct dpaa2_ni_fq *); 409 static int dpaa2_ni_setup_tx_flow(device_t, struct dpaa2_ni_fq *); 410 static int dpaa2_ni_setup_rx_err_flow(device_t, struct dpaa2_ni_fq *); 411 412 /* Configuration subroutines */ 413 static int dpaa2_ni_set_buf_layout(device_t); 414 static int dpaa2_ni_set_pause_frame(device_t); 415 static int dpaa2_ni_set_qos_table(device_t); 416 static int dpaa2_ni_set_mac_addr(device_t); 417 static int dpaa2_ni_set_hash(device_t, uint64_t); 418 static int dpaa2_ni_set_dist_key(device_t, enum dpaa2_ni_dist_mode, uint64_t); 419 420 /* Frame descriptor routines */ 421 static int dpaa2_ni_build_fd(struct dpaa2_ni_softc *, struct dpaa2_ni_tx_ring *, 422 struct dpaa2_buf *, bus_dma_segment_t *, int, struct dpaa2_fd *); 423 static int dpaa2_ni_fd_err(struct dpaa2_fd *); 424 static uint32_t dpaa2_ni_fd_data_len(struct dpaa2_fd *); 425 static int dpaa2_ni_fd_format(struct dpaa2_fd *); 426 static bool dpaa2_ni_fd_short_len(struct dpaa2_fd *); 427 static int dpaa2_ni_fd_offset(struct dpaa2_fd *); 428 429 /* Various subroutines */ 430 static int dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *, uint16_t, uint16_t); 431 static int dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *, uint8_t *); 432 433 /* Network interface routines */ 434 static void dpaa2_ni_init(void *); 435 static int dpaa2_ni_transmit(if_t , struct mbuf *); 436 static void dpaa2_ni_qflush(if_t ); 437 static int dpaa2_ni_ioctl(if_t , u_long, caddr_t); 438 static int dpaa2_ni_update_mac_filters(if_t ); 439 static u_int dpaa2_ni_add_maddr(void *, struct sockaddr_dl *, u_int); 440 441 /* Interrupt handlers */ 442 static void dpaa2_ni_intr(void *); 443 444 /* MII handlers */ 445 static void dpaa2_ni_miibus_statchg(device_t); 446 static int dpaa2_ni_media_change(if_t ); 447 static void dpaa2_ni_media_status(if_t , struct ifmediareq *); 448 static void dpaa2_ni_media_tick(void *); 449 450 /* Tx/Rx routines. */ 451 static int dpaa2_ni_rx_cleanup(struct dpaa2_channel *); 452 static int dpaa2_ni_tx_cleanup(struct dpaa2_channel *); 453 static void dpaa2_ni_tx(struct dpaa2_ni_softc *, struct dpaa2_channel *, 454 struct dpaa2_ni_tx_ring *, struct mbuf *); 455 static void dpaa2_ni_cleanup_task(void *, int); 456 457 /* Tx/Rx subroutines */ 458 static int dpaa2_ni_consume_frames(struct dpaa2_channel *, struct dpaa2_ni_fq **, 459 uint32_t *); 460 static int dpaa2_ni_rx(struct dpaa2_channel *, struct dpaa2_ni_fq *, 461 struct dpaa2_fd *, struct dpaa2_ni_rx_ctx *); 462 static int dpaa2_ni_rx_err(struct dpaa2_channel *, struct dpaa2_ni_fq *, 463 struct dpaa2_fd *); 464 static int dpaa2_ni_tx_conf(struct dpaa2_channel *, struct dpaa2_ni_fq *, 465 struct dpaa2_fd *); 466 467 /* sysctl(9) */ 468 static int dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS); 469 static int dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS); 470 static int dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS); 471 472 static int 473 dpaa2_ni_probe(device_t dev) 474 { 475 /* DPNI device will be added by a parent resource container itself. */ 476 device_set_desc(dev, "DPAA2 Network Interface"); 477 return (BUS_PROBE_DEFAULT); 478 } 479 480 static int 481 dpaa2_ni_attach(device_t dev) 482 { 483 device_t pdev = device_get_parent(dev); 484 device_t child = dev; 485 device_t mcp_dev; 486 struct dpaa2_ni_softc *sc = device_get_softc(dev); 487 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); 488 struct dpaa2_devinfo *dinfo = device_get_ivars(dev); 489 struct dpaa2_devinfo *mcp_dinfo; 490 struct dpaa2_cmd cmd; 491 uint16_t rc_token, ni_token; 492 if_t ifp; 493 char tq_name[32]; 494 int error; 495 496 sc->dev = dev; 497 sc->ifp = NULL; 498 sc->miibus = NULL; 499 sc->mii = NULL; 500 sc->media_status = 0; 501 sc->if_flags = 0; 502 sc->link_state = LINK_STATE_UNKNOWN; 503 sc->buf_align = 0; 504 505 /* For debug purposes only! */ 506 sc->rx_anomaly_frames = 0; 507 sc->rx_single_buf_frames = 0; 508 sc->rx_sg_buf_frames = 0; 509 sc->rx_enq_rej_frames = 0; 510 sc->rx_ieoi_err_frames = 0; 511 sc->tx_single_buf_frames = 0; 512 sc->tx_sg_frames = 0; 513 514 DPAA2_ATOMIC_XCHG(&sc->buf_num, 0); 515 DPAA2_ATOMIC_XCHG(&sc->buf_free, 0); 516 517 sc->rxd_dmat = NULL; 518 sc->qos_dmat = NULL; 519 520 sc->qos_kcfg.dmap = NULL; 521 sc->qos_kcfg.paddr = 0; 522 sc->qos_kcfg.vaddr = NULL; 523 524 sc->rxd_kcfg.dmap = NULL; 525 sc->rxd_kcfg.paddr = 0; 526 sc->rxd_kcfg.vaddr = NULL; 527 528 sc->mac.dpmac_id = 0; 529 sc->mac.phy_dev = NULL; 530 memset(sc->mac.addr, 0, ETHER_ADDR_LEN); 531 532 error = bus_alloc_resources(sc->dev, dpaa2_ni_spec, sc->res); 533 if (error) { 534 device_printf(dev, "%s: failed to allocate resources: " 535 "error=%d\n", __func__, error); 536 goto err_exit; 537 } 538 539 /* Obtain MC portal. */ 540 mcp_dev = (device_t) rman_get_start(sc->res[DPAA2_NI_MCP_RID(0)]); 541 mcp_dinfo = device_get_ivars(mcp_dev); 542 dinfo->portal = mcp_dinfo->portal; 543 544 mtx_init(&sc->lock, device_get_nameunit(dev), "dpaa2_ni", MTX_DEF); 545 546 /* Allocate network interface */ 547 ifp = if_alloc(IFT_ETHER); 548 if (ifp == NULL) { 549 device_printf(dev, "%s: failed to allocate network interface\n", 550 __func__); 551 goto err_exit; 552 } 553 sc->ifp = ifp; 554 if_initname(ifp, DPAA2_NI_IFNAME, device_get_unit(sc->dev)); 555 556 if_setsoftc(ifp, sc); 557 if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST); 558 if_setinitfn(ifp, dpaa2_ni_init); 559 if_setioctlfn(ifp, dpaa2_ni_ioctl); 560 if_settransmitfn(ifp, dpaa2_ni_transmit); 561 if_setqflushfn(ifp, dpaa2_ni_qflush); 562 563 if_setcapabilities(ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU); 564 if_setcapenable(ifp, if_getcapabilities(ifp)); 565 566 DPAA2_CMD_INIT(&cmd); 567 568 /* Open resource container and network interface object. */ 569 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); 570 if (error) { 571 device_printf(dev, "%s: failed to open resource container: " 572 "id=%d, error=%d\n", __func__, rcinfo->id, error); 573 goto err_exit; 574 } 575 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); 576 if (error) { 577 device_printf(dev, "%s: failed to open network interface: " 578 "id=%d, error=%d\n", __func__, dinfo->id, error); 579 goto close_rc; 580 } 581 582 bzero(tq_name, sizeof(tq_name)); 583 snprintf(tq_name, sizeof(tq_name), "%s_tqbp", device_get_nameunit(dev)); 584 585 /* 586 * XXX-DSL: Release new buffers on Buffer Pool State Change Notification 587 * (BPSCN) returned as a result to the VDQ command instead. 588 * It is similar to CDAN processed in dpaa2_io_intr(). 589 */ 590 /* Create a taskqueue thread to release new buffers to the pool. */ 591 sc->bp_taskq = taskqueue_create(tq_name, M_WAITOK, 592 taskqueue_thread_enqueue, &sc->bp_taskq); 593 if (sc->bp_taskq == NULL) { 594 device_printf(dev, "%s: failed to allocate task queue: %s\n", 595 __func__, tq_name); 596 goto close_ni; 597 } 598 taskqueue_start_threads(&sc->bp_taskq, 1, PI_NET, "%s", tq_name); 599 600 /* sc->cleanup_taskq = taskqueue_create("dpaa2_ch cleanup", M_WAITOK, */ 601 /* taskqueue_thread_enqueue, &sc->cleanup_taskq); */ 602 /* taskqueue_start_threads(&sc->cleanup_taskq, 1, PI_NET, */ 603 /* "dpaa2_ch cleanup"); */ 604 605 error = dpaa2_ni_setup(dev); 606 if (error) { 607 device_printf(dev, "%s: failed to setup DPNI: error=%d\n", 608 __func__, error); 609 goto close_ni; 610 } 611 error = dpaa2_ni_setup_channels(dev); 612 if (error) { 613 device_printf(dev, "%s: failed to setup QBMan channels: " 614 "error=%d\n", __func__, error); 615 goto close_ni; 616 } 617 618 error = dpaa2_ni_bind(dev); 619 if (error) { 620 device_printf(dev, "%s: failed to bind DPNI: error=%d\n", 621 __func__, error); 622 goto close_ni; 623 } 624 error = dpaa2_ni_setup_irqs(dev); 625 if (error) { 626 device_printf(dev, "%s: failed to setup IRQs: error=%d\n", 627 __func__, error); 628 goto close_ni; 629 } 630 error = dpaa2_ni_setup_sysctls(sc); 631 if (error) { 632 device_printf(dev, "%s: failed to setup sysctls: error=%d\n", 633 __func__, error); 634 goto close_ni; 635 } 636 637 ether_ifattach(sc->ifp, sc->mac.addr); 638 callout_init(&sc->mii_callout, 0); 639 640 return (0); 641 642 close_ni: 643 DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); 644 close_rc: 645 DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 646 err_exit: 647 return (ENXIO); 648 } 649 650 static void 651 dpaa2_ni_fixed_media_status(if_t ifp, struct ifmediareq* ifmr) 652 { 653 struct dpaa2_ni_softc *sc = if_getsoftc(ifp); 654 655 DPNI_LOCK(sc); 656 ifmr->ifm_count = 0; 657 ifmr->ifm_mask = 0; 658 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; 659 ifmr->ifm_current = ifmr->ifm_active = 660 sc->fixed_ifmedia.ifm_cur->ifm_media; 661 662 /* 663 * In non-PHY usecases, we need to signal link state up, otherwise 664 * certain things requiring a link event (e.g async DHCP client) from 665 * devd do not happen. 666 */ 667 if (if_getlinkstate(ifp) == LINK_STATE_UNKNOWN) { 668 if_link_state_change(ifp, LINK_STATE_UP); 669 } 670 671 /* 672 * TODO: Check the status of the link partner (DPMAC, DPNI or other) and 673 * reset if down. This is different to the DPAA2_MAC_LINK_TYPE_PHY as 674 * the MC firmware sets the status, instead of us telling the MC what 675 * it is. 676 */ 677 DPNI_UNLOCK(sc); 678 679 return; 680 } 681 682 static void 683 dpaa2_ni_setup_fixed_link(struct dpaa2_ni_softc *sc) 684 { 685 /* 686 * FIXME: When the DPNI is connected to a DPMAC, we can get the 687 * 'apparent' speed from it. 688 */ 689 sc->fixed_link = true; 690 691 ifmedia_init(&sc->fixed_ifmedia, 0, dpaa2_ni_media_change, 692 dpaa2_ni_fixed_media_status); 693 ifmedia_add(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL); 694 ifmedia_set(&sc->fixed_ifmedia, IFM_ETHER | IFM_1000_T); 695 } 696 697 static int 698 dpaa2_ni_detach(device_t dev) 699 { 700 /* TBD */ 701 return (0); 702 } 703 704 /** 705 * @brief Configure DPAA2 network interface object. 706 */ 707 static int 708 dpaa2_ni_setup(device_t dev) 709 { 710 device_t pdev = device_get_parent(dev); 711 device_t child = dev; 712 struct dpaa2_ni_softc *sc = device_get_softc(dev); 713 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); 714 struct dpaa2_devinfo *dinfo = device_get_ivars(dev); 715 struct dpaa2_ep_desc ep1_desc, ep2_desc; /* endpoint descriptors */ 716 struct dpaa2_cmd cmd; 717 uint8_t eth_bca[ETHER_ADDR_LEN]; /* broadcast physical address */ 718 uint16_t rc_token, ni_token, mac_token; 719 struct dpaa2_mac_attr attr; 720 enum dpaa2_mac_link_type link_type; 721 uint32_t link; 722 int error; 723 724 DPAA2_CMD_INIT(&cmd); 725 726 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); 727 if (error) { 728 device_printf(dev, "%s: failed to open resource container: " 729 "id=%d, error=%d\n", __func__, rcinfo->id, error); 730 goto err_exit; 731 } 732 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); 733 if (error) { 734 device_printf(dev, "%s: failed to open network interface: " 735 "id=%d, error=%d\n", __func__, dinfo->id, error); 736 goto close_rc; 737 } 738 739 /* Check if we can work with this DPNI object. */ 740 error = DPAA2_CMD_NI_GET_API_VERSION(dev, child, &cmd, &sc->api_major, 741 &sc->api_minor); 742 if (error) { 743 device_printf(dev, "%s: failed to get DPNI API version\n", 744 __func__); 745 goto close_ni; 746 } 747 if (dpaa2_ni_cmp_api_version(sc, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) { 748 device_printf(dev, "%s: DPNI API version %u.%u not supported, " 749 "need >= %u.%u\n", __func__, sc->api_major, sc->api_minor, 750 DPNI_VER_MAJOR, DPNI_VER_MINOR); 751 error = ENODEV; 752 goto close_ni; 753 } 754 755 /* Reset the DPNI object. */ 756 error = DPAA2_CMD_NI_RESET(dev, child, &cmd); 757 if (error) { 758 device_printf(dev, "%s: failed to reset DPNI: id=%d\n", 759 __func__, dinfo->id); 760 goto close_ni; 761 } 762 763 /* Obtain attributes of the DPNI object. */ 764 error = DPAA2_CMD_NI_GET_ATTRIBUTES(dev, child, &cmd, &sc->attr); 765 if (error) { 766 device_printf(dev, "%s: failed to obtain DPNI attributes: " 767 "id=%d\n", __func__, dinfo->id); 768 goto close_ni; 769 } 770 if (bootverbose) { 771 device_printf(dev, "\toptions=0x%#x queues=%d tx_channels=%d " 772 "wriop_version=%#x\n", sc->attr.options, sc->attr.num.queues, 773 sc->attr.num.channels, sc->attr.wriop_ver); 774 device_printf(dev, "\ttraffic classes: rx=%d tx=%d " 775 "cgs_groups=%d\n", sc->attr.num.rx_tcs, sc->attr.num.tx_tcs, 776 sc->attr.num.cgs); 777 device_printf(dev, "\ttable entries: mac=%d vlan=%d qos=%d " 778 "fs=%d\n", sc->attr.entries.mac, sc->attr.entries.vlan, 779 sc->attr.entries.qos, sc->attr.entries.fs); 780 device_printf(dev, "\tkey sizes: qos=%d fs=%d\n", 781 sc->attr.key_size.qos, sc->attr.key_size.fs); 782 } 783 784 /* Configure buffer layouts of the DPNI queues. */ 785 error = dpaa2_ni_set_buf_layout(dev); 786 if (error) { 787 device_printf(dev, "%s: failed to configure buffer layout\n", 788 __func__); 789 goto close_ni; 790 } 791 792 /* Configure DMA resources. */ 793 error = dpaa2_ni_setup_dma(sc); 794 if (error) { 795 device_printf(dev, "%s: failed to setup DMA\n", __func__); 796 goto close_ni; 797 } 798 799 /* Setup link between DPNI and an object it's connected to. */ 800 ep1_desc.obj_id = dinfo->id; 801 ep1_desc.if_id = 0; /* DPNI has the only endpoint */ 802 ep1_desc.type = dinfo->dtype; 803 804 error = DPAA2_CMD_RC_GET_CONN(dev, child, DPAA2_CMD_TK(&cmd, rc_token), 805 &ep1_desc, &ep2_desc, &link); 806 if (error) { 807 device_printf(dev, "%s: failed to obtain an object DPNI is " 808 "connected to: error=%d\n", __func__, error); 809 } else { 810 device_printf(dev, "connected to %s (id=%d)\n", 811 dpaa2_ttos(ep2_desc.type), ep2_desc.obj_id); 812 813 error = dpaa2_ni_set_mac_addr(dev); 814 if (error) { 815 device_printf(dev, "%s: failed to set MAC address: " 816 "error=%d\n", __func__, error); 817 } 818 819 if (ep2_desc.type == DPAA2_DEV_MAC) { 820 /* 821 * This is the simplest case when DPNI is connected to 822 * DPMAC directly. 823 */ 824 sc->mac.dpmac_id = ep2_desc.obj_id; 825 826 link_type = DPAA2_MAC_LINK_TYPE_NONE; 827 828 /* 829 * Need to determine if DPMAC type is PHY (attached to 830 * conventional MII PHY) or FIXED (usually SFP/SerDes, 831 * link state managed by MC firmware). 832 */ 833 error = DPAA2_CMD_MAC_OPEN(sc->dev, child, 834 DPAA2_CMD_TK(&cmd, rc_token), sc->mac.dpmac_id, 835 &mac_token); 836 /* 837 * Under VFIO, the DPMAC might be sitting in another 838 * container (DPRC) we don't have access to. 839 * Assume DPAA2_MAC_LINK_TYPE_FIXED if this is 840 * the case. 841 */ 842 if (error) { 843 device_printf(dev, "%s: failed to open " 844 "connected DPMAC: %d (assuming in other DPRC)\n", __func__, 845 sc->mac.dpmac_id); 846 link_type = DPAA2_MAC_LINK_TYPE_FIXED; 847 } else { 848 error = DPAA2_CMD_MAC_GET_ATTRIBUTES(dev, child, 849 &cmd, &attr); 850 if (error) { 851 device_printf(dev, "%s: failed to get " 852 "DPMAC attributes: id=%d, " 853 "error=%d\n", __func__, dinfo->id, 854 error); 855 } else { 856 link_type = attr.link_type; 857 } 858 } 859 DPAA2_CMD_MAC_CLOSE(dev, child, &cmd); 860 861 if (link_type == DPAA2_MAC_LINK_TYPE_FIXED) { 862 device_printf(dev, "connected DPMAC is in FIXED " 863 "mode\n"); 864 dpaa2_ni_setup_fixed_link(sc); 865 } else if (link_type == DPAA2_MAC_LINK_TYPE_PHY) { 866 device_printf(dev, "connected DPMAC is in PHY " 867 "mode\n"); 868 error = DPAA2_MC_GET_PHY_DEV(dev, 869 &sc->mac.phy_dev, sc->mac.dpmac_id); 870 if (error == 0) { 871 error = MEMAC_MDIO_SET_NI_DEV( 872 sc->mac.phy_dev, dev); 873 if (error != 0) { 874 device_printf(dev, "%s: failed " 875 "to set dpni dev on memac " 876 "mdio dev %s: error=%d\n", 877 __func__, 878 device_get_nameunit( 879 sc->mac.phy_dev), error); 880 } 881 } 882 if (error == 0) { 883 error = MEMAC_MDIO_GET_PHY_LOC( 884 sc->mac.phy_dev, &sc->mac.phy_loc); 885 if (error == ENODEV) { 886 error = 0; 887 } 888 if (error != 0) { 889 device_printf(dev, "%s: failed " 890 "to get phy location from " 891 "memac mdio dev %s: error=%d\n", 892 __func__, device_get_nameunit( 893 sc->mac.phy_dev), error); 894 } 895 } 896 if (error == 0) { 897 error = mii_attach(sc->mac.phy_dev, 898 &sc->miibus, sc->ifp, 899 dpaa2_ni_media_change, 900 dpaa2_ni_media_status, 901 BMSR_DEFCAPMASK, sc->mac.phy_loc, 902 MII_OFFSET_ANY, 0); 903 if (error != 0) { 904 device_printf(dev, "%s: failed " 905 "to attach to miibus: " 906 "error=%d\n", 907 __func__, error); 908 } 909 } 910 if (error == 0) { 911 sc->mii = device_get_softc(sc->miibus); 912 } 913 } else { 914 device_printf(dev, "%s: DPMAC link type is not " 915 "supported\n", __func__); 916 } 917 } else if (ep2_desc.type == DPAA2_DEV_NI || 918 ep2_desc.type == DPAA2_DEV_MUX || 919 ep2_desc.type == DPAA2_DEV_SW) { 920 dpaa2_ni_setup_fixed_link(sc); 921 } 922 } 923 924 /* Select mode to enqueue frames. */ 925 /* ... TBD ... */ 926 927 /* 928 * Update link configuration to enable Rx/Tx pause frames support. 929 * 930 * NOTE: MC may generate an interrupt to the DPMAC and request changes 931 * in link configuration. It might be necessary to attach miibus 932 * and PHY before this point. 933 */ 934 error = dpaa2_ni_set_pause_frame(dev); 935 if (error) { 936 device_printf(dev, "%s: failed to configure Rx/Tx pause " 937 "frames\n", __func__); 938 goto close_ni; 939 } 940 941 /* Configure ingress traffic classification. */ 942 error = dpaa2_ni_set_qos_table(dev); 943 if (error) { 944 device_printf(dev, "%s: failed to configure QoS table: " 945 "error=%d\n", __func__, error); 946 goto close_ni; 947 } 948 949 /* Add broadcast physical address to the MAC filtering table. */ 950 memset(eth_bca, 0xff, ETHER_ADDR_LEN); 951 error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, DPAA2_CMD_TK(&cmd, 952 ni_token), eth_bca); 953 if (error) { 954 device_printf(dev, "%s: failed to add broadcast physical " 955 "address to the MAC filtering table\n", __func__); 956 goto close_ni; 957 } 958 959 /* Set the maximum allowed length for received frames. */ 960 error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd, DPAA2_ETH_MFL); 961 if (error) { 962 device_printf(dev, "%s: failed to set maximum length for " 963 "received frames\n", __func__); 964 goto close_ni; 965 } 966 967 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); 968 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 969 return (0); 970 971 close_ni: 972 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); 973 close_rc: 974 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 975 err_exit: 976 return (error); 977 } 978 979 /** 980 * @brief Сonfigure QBMan channels and register data availability notifications. 981 */ 982 static int 983 dpaa2_ni_setup_channels(device_t dev) 984 { 985 device_t iodev, condev, bpdev; 986 struct dpaa2_ni_softc *sc = device_get_softc(dev); 987 uint32_t i, num_chan; 988 int error; 989 990 /* Calculate number of the channels based on the allocated resources */ 991 for (i = 0; i < DPAA2_NI_IO_RES_NUM; i++) { 992 if (!sc->res[DPAA2_NI_IO_RID(i)]) { 993 break; 994 } 995 } 996 num_chan = i; 997 for (i = 0; i < DPAA2_NI_CON_RES_NUM; i++) { 998 if (!sc->res[DPAA2_NI_CON_RID(i)]) { 999 break; 1000 } 1001 } 1002 num_chan = i < num_chan ? i : num_chan; 1003 sc->chan_n = num_chan > DPAA2_MAX_CHANNELS 1004 ? DPAA2_MAX_CHANNELS : num_chan; 1005 sc->chan_n = sc->chan_n > sc->attr.num.queues 1006 ? sc->attr.num.queues : sc->chan_n; 1007 1008 KASSERT(sc->chan_n > 0u, ("%s: positive number of channels expected: " 1009 "chan_n=%d", __func__, sc->chan_n)); 1010 1011 device_printf(dev, "channels=%d\n", sc->chan_n); 1012 1013 for (i = 0; i < sc->chan_n; i++) { 1014 iodev = (device_t)rman_get_start(sc->res[DPAA2_NI_IO_RID(i)]); 1015 condev = (device_t)rman_get_start(sc->res[DPAA2_NI_CON_RID(i)]); 1016 /* Only one buffer pool available at the moment */ 1017 bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]); 1018 1019 error = dpaa2_chan_setup(dev, iodev, condev, bpdev, 1020 &sc->channels[i], i, dpaa2_ni_cleanup_task); 1021 if (error != 0) { 1022 device_printf(dev, "%s: dpaa2_chan_setup() failed: " 1023 "error=%d, chan_id=%d\n", __func__, error, i); 1024 return (error); 1025 } 1026 } 1027 1028 /* There is exactly one Rx error queue per network interface */ 1029 error = dpaa2_chan_setup_fq(dev, sc->channels[0], DPAA2_NI_QUEUE_RX_ERR); 1030 if (error != 0) { 1031 device_printf(dev, "%s: failed to prepare RxError queue: " 1032 "error=%d\n", __func__, error); 1033 return (error); 1034 } 1035 1036 return (0); 1037 } 1038 1039 /** 1040 * @brief Bind DPNI to DPBPs, DPIOs, frame queues and channels. 1041 */ 1042 static int 1043 dpaa2_ni_bind(device_t dev) 1044 { 1045 device_t pdev = device_get_parent(dev); 1046 device_t child = dev; 1047 device_t bp_dev; 1048 struct dpaa2_ni_softc *sc = device_get_softc(dev); 1049 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); 1050 struct dpaa2_devinfo *dinfo = device_get_ivars(dev); 1051 struct dpaa2_devinfo *bp_info; 1052 struct dpaa2_cmd cmd; 1053 struct dpaa2_ni_pools_cfg pools_cfg; 1054 struct dpaa2_ni_err_cfg err_cfg; 1055 struct dpaa2_channel *chan; 1056 uint16_t rc_token, ni_token; 1057 int error; 1058 1059 DPAA2_CMD_INIT(&cmd); 1060 1061 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); 1062 if (error) { 1063 device_printf(dev, "%s: failed to open resource container: " 1064 "id=%d, error=%d\n", __func__, rcinfo->id, error); 1065 goto err_exit; 1066 } 1067 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); 1068 if (error) { 1069 device_printf(dev, "%s: failed to open network interface: " 1070 "id=%d, error=%d\n", __func__, dinfo->id, error); 1071 goto close_rc; 1072 } 1073 1074 /* Select buffer pool (only one available at the moment). */ 1075 bp_dev = (device_t) rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]); 1076 bp_info = device_get_ivars(bp_dev); 1077 1078 /* Configure buffers pool. */ 1079 pools_cfg.pools_num = 1; 1080 pools_cfg.pools[0].bp_obj_id = bp_info->id; 1081 pools_cfg.pools[0].backup_flag = 0; 1082 pools_cfg.pools[0].buf_sz = sc->buf_sz; 1083 error = DPAA2_CMD_NI_SET_POOLS(dev, child, &cmd, &pools_cfg); 1084 if (error) { 1085 device_printf(dev, "%s: failed to set buffer pools\n", __func__); 1086 goto close_ni; 1087 } 1088 1089 /* Setup ingress traffic distribution. */ 1090 error = dpaa2_ni_setup_rx_dist(dev); 1091 if (error && error != EOPNOTSUPP) { 1092 device_printf(dev, "%s: failed to setup ingress traffic " 1093 "distribution\n", __func__); 1094 goto close_ni; 1095 } 1096 if (bootverbose && error == EOPNOTSUPP) { 1097 device_printf(dev, "Ingress traffic distribution not " 1098 "supported\n"); 1099 } 1100 1101 /* Configure handling of error frames. */ 1102 err_cfg.err_mask = DPAA2_NI_FAS_RX_ERR_MASK; 1103 err_cfg.set_err_fas = false; 1104 err_cfg.action = DPAA2_NI_ERR_DISCARD; 1105 error = DPAA2_CMD_NI_SET_ERR_BEHAVIOR(dev, child, &cmd, &err_cfg); 1106 if (error) { 1107 device_printf(dev, "%s: failed to set errors behavior\n", 1108 __func__); 1109 goto close_ni; 1110 } 1111 1112 /* Configure channel queues to generate CDANs. */ 1113 for (uint32_t i = 0; i < sc->chan_n; i++) { 1114 chan = sc->channels[i]; 1115 1116 /* Setup Rx flows. */ 1117 for (uint32_t j = 0; j < chan->rxq_n; j++) { 1118 error = dpaa2_ni_setup_rx_flow(dev, &chan->rx_queues[j]); 1119 if (error) { 1120 device_printf(dev, "%s: failed to setup Rx " 1121 "flow: error=%d\n", __func__, error); 1122 goto close_ni; 1123 } 1124 } 1125 1126 /* Setup Tx flow. */ 1127 error = dpaa2_ni_setup_tx_flow(dev, &chan->txc_queue); 1128 if (error) { 1129 device_printf(dev, "%s: failed to setup Tx " 1130 "flow: error=%d\n", __func__, error); 1131 goto close_ni; 1132 } 1133 } 1134 1135 /* Configure RxError queue to generate CDAN. */ 1136 error = dpaa2_ni_setup_rx_err_flow(dev, &sc->rxe_queue); 1137 if (error) { 1138 device_printf(dev, "%s: failed to setup RxError flow: " 1139 "error=%d\n", __func__, error); 1140 goto close_ni; 1141 } 1142 1143 /* 1144 * Get the Queuing Destination ID (QDID) that should be used for frame 1145 * enqueue operations. 1146 */ 1147 error = DPAA2_CMD_NI_GET_QDID(dev, child, &cmd, DPAA2_NI_QUEUE_TX, 1148 &sc->tx_qdid); 1149 if (error) { 1150 device_printf(dev, "%s: failed to get Tx queuing destination " 1151 "ID\n", __func__); 1152 goto close_ni; 1153 } 1154 1155 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); 1156 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 1157 return (0); 1158 1159 close_ni: 1160 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); 1161 close_rc: 1162 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 1163 err_exit: 1164 return (error); 1165 } 1166 1167 /** 1168 * @brief Setup ingress traffic distribution. 1169 * 1170 * NOTE: Ingress traffic distribution is valid only when DPNI_OPT_NO_FS option 1171 * hasn't been set for DPNI and a number of DPNI queues > 1. 1172 */ 1173 static int 1174 dpaa2_ni_setup_rx_dist(device_t dev) 1175 { 1176 /* 1177 * Have the interface implicitly distribute traffic based on the default 1178 * hash key. 1179 */ 1180 return (dpaa2_ni_set_hash(dev, DPAA2_RXH_DEFAULT)); 1181 } 1182 1183 static int 1184 dpaa2_ni_setup_rx_flow(device_t dev, struct dpaa2_ni_fq *fq) 1185 { 1186 device_t pdev = device_get_parent(dev); 1187 device_t child = dev; 1188 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); 1189 struct dpaa2_devinfo *dinfo = device_get_ivars(dev); 1190 struct dpaa2_devinfo *con_info; 1191 struct dpaa2_cmd cmd; 1192 struct dpaa2_ni_queue_cfg queue_cfg = {0}; 1193 uint16_t rc_token, ni_token; 1194 int error; 1195 1196 DPAA2_CMD_INIT(&cmd); 1197 1198 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); 1199 if (error) { 1200 device_printf(dev, "%s: failed to open resource container: " 1201 "id=%d, error=%d\n", __func__, rcinfo->id, error); 1202 goto err_exit; 1203 } 1204 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); 1205 if (error) { 1206 device_printf(dev, "%s: failed to open network interface: " 1207 "id=%d, error=%d\n", __func__, dinfo->id, error); 1208 goto close_rc; 1209 } 1210 1211 /* Obtain DPCON associated with the FQ's channel. */ 1212 con_info = device_get_ivars(fq->chan->con_dev); 1213 1214 queue_cfg.type = DPAA2_NI_QUEUE_RX; 1215 queue_cfg.tc = fq->tc; 1216 queue_cfg.idx = fq->flowid; 1217 error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg); 1218 if (error) { 1219 device_printf(dev, "%s: failed to obtain Rx queue " 1220 "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc, 1221 queue_cfg.idx); 1222 goto close_ni; 1223 } 1224 1225 fq->fqid = queue_cfg.fqid; 1226 1227 queue_cfg.dest_id = con_info->id; 1228 queue_cfg.dest_type = DPAA2_NI_DEST_DPCON; 1229 queue_cfg.priority = 1; 1230 queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq; 1231 queue_cfg.options = 1232 DPAA2_NI_QUEUE_OPT_USER_CTX | 1233 DPAA2_NI_QUEUE_OPT_DEST; 1234 error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg); 1235 if (error) { 1236 device_printf(dev, "%s: failed to update Rx queue " 1237 "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc, 1238 queue_cfg.idx); 1239 goto close_ni; 1240 } 1241 1242 if (bootverbose) { 1243 device_printf(dev, "RX queue idx=%d, tc=%d, chan=%d, fqid=%d, " 1244 "user_ctx=%#jx\n", fq->flowid, fq->tc, fq->chan->id, 1245 fq->fqid, (uint64_t) fq); 1246 } 1247 1248 (void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd); 1249 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 1250 return (0); 1251 1252 close_ni: 1253 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); 1254 close_rc: 1255 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 1256 err_exit: 1257 return (error); 1258 } 1259 1260 static int 1261 dpaa2_ni_setup_tx_flow(device_t dev, struct dpaa2_ni_fq *fq) 1262 { 1263 device_t pdev = device_get_parent(dev); 1264 device_t child = dev; 1265 struct dpaa2_ni_softc *sc = device_get_softc(dev); 1266 struct dpaa2_channel *ch = fq->chan; 1267 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); 1268 struct dpaa2_devinfo *dinfo = device_get_ivars(dev); 1269 struct dpaa2_devinfo *con_info; 1270 struct dpaa2_ni_queue_cfg queue_cfg = {0}; 1271 struct dpaa2_ni_tx_ring *tx; 1272 struct dpaa2_buf *buf; 1273 struct dpaa2_cmd cmd; 1274 uint32_t tx_rings_n = 0; 1275 uint16_t rc_token, ni_token; 1276 int error; 1277 1278 DPAA2_CMD_INIT(&cmd); 1279 1280 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); 1281 if (error) { 1282 device_printf(dev, "%s: failed to open resource container: " 1283 "id=%d, error=%d\n", __func__, rcinfo->id, error); 1284 goto err_exit; 1285 } 1286 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); 1287 if (error) { 1288 device_printf(dev, "%s: failed to open network interface: " 1289 "id=%d, error=%d\n", __func__, dinfo->id, error); 1290 goto close_rc; 1291 } 1292 1293 /* Obtain DPCON associated with the FQ's channel. */ 1294 con_info = device_get_ivars(fq->chan->con_dev); 1295 1296 KASSERT(sc->attr.num.tx_tcs <= DPAA2_MAX_TCS, 1297 ("%s: too many Tx traffic classes: tx_tcs=%d\n", __func__, 1298 sc->attr.num.tx_tcs)); 1299 KASSERT(DPAA2_NI_BUFS_PER_TX <= DPAA2_NI_MAX_BPTX, 1300 ("%s: too many Tx buffers (%d): max=%d\n", __func__, 1301 DPAA2_NI_BUFS_PER_TX, DPAA2_NI_MAX_BPTX)); 1302 1303 /* Setup Tx rings. */ 1304 for (int i = 0; i < sc->attr.num.tx_tcs; i++) { 1305 queue_cfg.type = DPAA2_NI_QUEUE_TX; 1306 queue_cfg.tc = i; 1307 queue_cfg.idx = fq->flowid; 1308 queue_cfg.chan_id = fq->chan->id; 1309 1310 error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg); 1311 if (error) { 1312 device_printf(dev, "%s: failed to obtain Tx queue " 1313 "configuration: tc=%d, flowid=%d\n", __func__, 1314 queue_cfg.tc, queue_cfg.idx); 1315 goto close_ni; 1316 } 1317 1318 tx = &fq->tx_rings[i]; 1319 tx->fq = fq; 1320 tx->fqid = queue_cfg.fqid; 1321 tx->txid = tx_rings_n; 1322 1323 if (bootverbose) { 1324 device_printf(dev, "TX queue idx=%d, tc=%d, chan=%d, " 1325 "fqid=%d\n", fq->flowid, i, fq->chan->id, 1326 queue_cfg.fqid); 1327 } 1328 1329 mtx_init(&tx->lock, "dpaa2_tx_ring", NULL, MTX_DEF); 1330 1331 /* Allocate Tx ring buffer. */ 1332 tx->br = buf_ring_alloc(DPAA2_TX_BUFRING_SZ, M_DEVBUF, M_NOWAIT, 1333 &tx->lock); 1334 if (tx->br == NULL) { 1335 device_printf(dev, "%s: failed to setup Tx ring buffer" 1336 " (2) fqid=%d\n", __func__, tx->fqid); 1337 goto close_ni; 1338 } 1339 1340 /* Configure Tx buffers */ 1341 for (uint64_t j = 0; j < DPAA2_NI_BUFS_PER_TX; j++) { 1342 buf = malloc(sizeof(struct dpaa2_buf), M_DPAA2_TXB, 1343 M_WAITOK); 1344 if (buf == NULL) { 1345 device_printf(dev, "%s: malloc() failed (buf)\n", 1346 __func__); 1347 return (ENOMEM); 1348 } 1349 /* Keep DMA tag and Tx ring linked to the buffer */ 1350 DPAA2_BUF_INIT_TAGOPT(buf, ch->tx_dmat, tx); 1351 1352 buf->sgt = malloc(sizeof(struct dpaa2_buf), M_DPAA2_TXB, 1353 M_WAITOK); 1354 if (buf->sgt == NULL) { 1355 device_printf(dev, "%s: malloc() failed (sgt)\n", 1356 __func__); 1357 return (ENOMEM); 1358 } 1359 /* Link SGT to DMA tag and back to its Tx buffer */ 1360 DPAA2_BUF_INIT_TAGOPT(buf->sgt, ch->sgt_dmat, buf); 1361 1362 error = dpaa2_buf_seed_txb(dev, buf); 1363 1364 /* Add Tx buffer to the ring */ 1365 buf_ring_enqueue(tx->br, buf); 1366 } 1367 1368 tx_rings_n++; 1369 } 1370 1371 /* All Tx queues which belong to the same flowid have the same qdbin. */ 1372 fq->tx_qdbin = queue_cfg.qdbin; 1373 1374 queue_cfg.type = DPAA2_NI_QUEUE_TX_CONF; 1375 queue_cfg.tc = 0; /* ignored for TxConf queue */ 1376 queue_cfg.idx = fq->flowid; 1377 error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg); 1378 if (error) { 1379 device_printf(dev, "%s: failed to obtain TxConf queue " 1380 "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc, 1381 queue_cfg.idx); 1382 goto close_ni; 1383 } 1384 1385 fq->fqid = queue_cfg.fqid; 1386 1387 queue_cfg.dest_id = con_info->id; 1388 queue_cfg.dest_type = DPAA2_NI_DEST_DPCON; 1389 queue_cfg.priority = 0; 1390 queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq; 1391 queue_cfg.options = 1392 DPAA2_NI_QUEUE_OPT_USER_CTX | 1393 DPAA2_NI_QUEUE_OPT_DEST; 1394 error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg); 1395 if (error) { 1396 device_printf(dev, "%s: failed to update TxConf queue " 1397 "configuration: tc=%d, flowid=%d\n", __func__, queue_cfg.tc, 1398 queue_cfg.idx); 1399 goto close_ni; 1400 } 1401 1402 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); 1403 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 1404 return (0); 1405 1406 close_ni: 1407 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); 1408 close_rc: 1409 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 1410 err_exit: 1411 return (error); 1412 } 1413 1414 static int 1415 dpaa2_ni_setup_rx_err_flow(device_t dev, struct dpaa2_ni_fq *fq) 1416 { 1417 device_t pdev = device_get_parent(dev); 1418 device_t child = dev; 1419 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); 1420 struct dpaa2_devinfo *dinfo = device_get_ivars(dev); 1421 struct dpaa2_devinfo *con_info; 1422 struct dpaa2_ni_queue_cfg queue_cfg = {0}; 1423 struct dpaa2_cmd cmd; 1424 uint16_t rc_token, ni_token; 1425 int error; 1426 1427 DPAA2_CMD_INIT(&cmd); 1428 1429 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); 1430 if (error) { 1431 device_printf(dev, "%s: failed to open resource container: " 1432 "id=%d, error=%d\n", __func__, rcinfo->id, error); 1433 goto err_exit; 1434 } 1435 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); 1436 if (error) { 1437 device_printf(dev, "%s: failed to open network interface: " 1438 "id=%d, error=%d\n", __func__, dinfo->id, error); 1439 goto close_rc; 1440 } 1441 1442 /* Obtain DPCON associated with the FQ's channel. */ 1443 con_info = device_get_ivars(fq->chan->con_dev); 1444 1445 queue_cfg.type = DPAA2_NI_QUEUE_RX_ERR; 1446 queue_cfg.tc = fq->tc; /* ignored */ 1447 queue_cfg.idx = fq->flowid; /* ignored */ 1448 error = DPAA2_CMD_NI_GET_QUEUE(dev, child, &cmd, &queue_cfg); 1449 if (error) { 1450 device_printf(dev, "%s: failed to obtain RxErr queue " 1451 "configuration\n", __func__); 1452 goto close_ni; 1453 } 1454 1455 fq->fqid = queue_cfg.fqid; 1456 1457 queue_cfg.dest_id = con_info->id; 1458 queue_cfg.dest_type = DPAA2_NI_DEST_DPCON; 1459 queue_cfg.priority = 1; 1460 queue_cfg.user_ctx = (uint64_t)(uintmax_t) fq; 1461 queue_cfg.options = 1462 DPAA2_NI_QUEUE_OPT_USER_CTX | 1463 DPAA2_NI_QUEUE_OPT_DEST; 1464 error = DPAA2_CMD_NI_SET_QUEUE(dev, child, &cmd, &queue_cfg); 1465 if (error) { 1466 device_printf(dev, "%s: failed to update RxErr queue " 1467 "configuration\n", __func__); 1468 goto close_ni; 1469 } 1470 1471 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); 1472 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 1473 return (0); 1474 1475 close_ni: 1476 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); 1477 close_rc: 1478 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 1479 err_exit: 1480 return (error); 1481 } 1482 1483 /** 1484 * @brief Configure DPNI object to generate interrupts. 1485 */ 1486 static int 1487 dpaa2_ni_setup_irqs(device_t dev) 1488 { 1489 device_t pdev = device_get_parent(dev); 1490 device_t child = dev; 1491 struct dpaa2_ni_softc *sc = device_get_softc(dev); 1492 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); 1493 struct dpaa2_devinfo *dinfo = device_get_ivars(dev); 1494 struct dpaa2_cmd cmd; 1495 uint16_t rc_token, ni_token; 1496 int error; 1497 1498 DPAA2_CMD_INIT(&cmd); 1499 1500 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); 1501 if (error) { 1502 device_printf(dev, "%s: failed to open resource container: " 1503 "id=%d, error=%d\n", __func__, rcinfo->id, error); 1504 goto err_exit; 1505 } 1506 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); 1507 if (error) { 1508 device_printf(dev, "%s: failed to open network interface: " 1509 "id=%d, error=%d\n", __func__, dinfo->id, error); 1510 goto close_rc; 1511 } 1512 1513 /* Configure IRQs. */ 1514 error = dpaa2_ni_setup_msi(sc); 1515 if (error) { 1516 device_printf(dev, "%s: failed to allocate MSI\n", __func__); 1517 goto close_ni; 1518 } 1519 if ((sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 1520 &sc->irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) { 1521 device_printf(dev, "%s: failed to allocate IRQ resource\n", 1522 __func__); 1523 goto close_ni; 1524 } 1525 if (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE, 1526 NULL, dpaa2_ni_intr, sc, &sc->intr)) { 1527 device_printf(dev, "%s: failed to setup IRQ resource\n", 1528 __func__); 1529 goto close_ni; 1530 } 1531 1532 error = DPAA2_CMD_NI_SET_IRQ_MASK(dev, child, &cmd, DPNI_IRQ_INDEX, 1533 DPNI_IRQ_LINK_CHANGED | DPNI_IRQ_EP_CHANGED); 1534 if (error) { 1535 device_printf(dev, "%s: failed to set DPNI IRQ mask\n", 1536 __func__); 1537 goto close_ni; 1538 } 1539 1540 error = DPAA2_CMD_NI_SET_IRQ_ENABLE(dev, child, &cmd, DPNI_IRQ_INDEX, 1541 true); 1542 if (error) { 1543 device_printf(dev, "%s: failed to enable DPNI IRQ\n", __func__); 1544 goto close_ni; 1545 } 1546 1547 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); 1548 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 1549 return (0); 1550 1551 close_ni: 1552 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); 1553 close_rc: 1554 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 1555 err_exit: 1556 return (error); 1557 } 1558 1559 /** 1560 * @brief Allocate MSI interrupts for DPNI. 1561 */ 1562 static int 1563 dpaa2_ni_setup_msi(struct dpaa2_ni_softc *sc) 1564 { 1565 int val; 1566 1567 val = pci_msi_count(sc->dev); 1568 if (val < DPAA2_NI_MSI_COUNT) 1569 device_printf(sc->dev, "MSI: actual=%d, expected=%d\n", val, 1570 DPAA2_IO_MSI_COUNT); 1571 val = MIN(val, DPAA2_NI_MSI_COUNT); 1572 1573 if (pci_alloc_msi(sc->dev, &val) != 0) 1574 return (EINVAL); 1575 1576 for (int i = 0; i < val; i++) 1577 sc->irq_rid[i] = i + 1; 1578 1579 return (0); 1580 } 1581 1582 /** 1583 * @brief Update DPNI according to the updated interface capabilities. 1584 */ 1585 static int 1586 dpaa2_ni_setup_if_caps(struct dpaa2_ni_softc *sc) 1587 { 1588 const bool en_rxcsum = if_getcapenable(sc->ifp) & IFCAP_RXCSUM; 1589 const bool en_txcsum = if_getcapenable(sc->ifp) & IFCAP_TXCSUM; 1590 device_t pdev = device_get_parent(sc->dev); 1591 device_t dev = sc->dev; 1592 device_t child = dev; 1593 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); 1594 struct dpaa2_devinfo *dinfo = device_get_ivars(dev); 1595 struct dpaa2_cmd cmd; 1596 uint16_t rc_token, ni_token; 1597 int error; 1598 1599 DPAA2_CMD_INIT(&cmd); 1600 1601 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); 1602 if (error) { 1603 device_printf(dev, "%s: failed to open resource container: " 1604 "id=%d, error=%d\n", __func__, rcinfo->id, error); 1605 goto err_exit; 1606 } 1607 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); 1608 if (error) { 1609 device_printf(dev, "%s: failed to open network interface: " 1610 "id=%d, error=%d\n", __func__, dinfo->id, error); 1611 goto close_rc; 1612 } 1613 1614 /* Setup checksums validation. */ 1615 error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd, 1616 DPAA2_NI_OFL_RX_L3_CSUM, en_rxcsum); 1617 if (error) { 1618 device_printf(dev, "%s: failed to %s L3 checksum validation\n", 1619 __func__, en_rxcsum ? "enable" : "disable"); 1620 goto close_ni; 1621 } 1622 error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd, 1623 DPAA2_NI_OFL_RX_L4_CSUM, en_rxcsum); 1624 if (error) { 1625 device_printf(dev, "%s: failed to %s L4 checksum validation\n", 1626 __func__, en_rxcsum ? "enable" : "disable"); 1627 goto close_ni; 1628 } 1629 1630 /* Setup checksums generation. */ 1631 error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd, 1632 DPAA2_NI_OFL_TX_L3_CSUM, en_txcsum); 1633 if (error) { 1634 device_printf(dev, "%s: failed to %s L3 checksum generation\n", 1635 __func__, en_txcsum ? "enable" : "disable"); 1636 goto close_ni; 1637 } 1638 error = DPAA2_CMD_NI_SET_OFFLOAD(dev, child, &cmd, 1639 DPAA2_NI_OFL_TX_L4_CSUM, en_txcsum); 1640 if (error) { 1641 device_printf(dev, "%s: failed to %s L4 checksum generation\n", 1642 __func__, en_txcsum ? "enable" : "disable"); 1643 goto close_ni; 1644 } 1645 1646 (void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd); 1647 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 1648 return (0); 1649 1650 close_ni: 1651 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); 1652 close_rc: 1653 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 1654 err_exit: 1655 return (error); 1656 } 1657 1658 /** 1659 * @brief Update DPNI according to the updated interface flags. 1660 */ 1661 static int 1662 dpaa2_ni_setup_if_flags(struct dpaa2_ni_softc *sc) 1663 { 1664 const bool en_promisc = if_getflags(sc->ifp) & IFF_PROMISC; 1665 const bool en_allmulti = if_getflags(sc->ifp) & IFF_ALLMULTI; 1666 device_t pdev = device_get_parent(sc->dev); 1667 device_t dev = sc->dev; 1668 device_t child = dev; 1669 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); 1670 struct dpaa2_devinfo *dinfo = device_get_ivars(dev); 1671 struct dpaa2_cmd cmd; 1672 uint16_t rc_token, ni_token; 1673 int error; 1674 1675 DPAA2_CMD_INIT(&cmd); 1676 1677 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); 1678 if (error) { 1679 device_printf(dev, "%s: failed to open resource container: " 1680 "id=%d, error=%d\n", __func__, rcinfo->id, error); 1681 goto err_exit; 1682 } 1683 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); 1684 if (error) { 1685 device_printf(dev, "%s: failed to open network interface: " 1686 "id=%d, error=%d\n", __func__, dinfo->id, error); 1687 goto close_rc; 1688 } 1689 1690 error = DPAA2_CMD_NI_SET_MULTI_PROMISC(dev, child, &cmd, 1691 en_promisc ? true : en_allmulti); 1692 if (error) { 1693 device_printf(dev, "%s: failed to %s multicast promiscuous " 1694 "mode\n", __func__, en_allmulti ? "enable" : "disable"); 1695 goto close_ni; 1696 } 1697 1698 error = DPAA2_CMD_NI_SET_UNI_PROMISC(dev, child, &cmd, en_promisc); 1699 if (error) { 1700 device_printf(dev, "%s: failed to %s unicast promiscuous mode\n", 1701 __func__, en_promisc ? "enable" : "disable"); 1702 goto close_ni; 1703 } 1704 1705 (void)DPAA2_CMD_NI_CLOSE(dev, child, &cmd); 1706 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 1707 return (0); 1708 1709 close_ni: 1710 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); 1711 close_rc: 1712 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 1713 err_exit: 1714 return (error); 1715 } 1716 1717 static int 1718 dpaa2_ni_setup_sysctls(struct dpaa2_ni_softc *sc) 1719 { 1720 struct sysctl_ctx_list *ctx; 1721 struct sysctl_oid *node, *node2; 1722 struct sysctl_oid_list *parent, *parent2; 1723 char cbuf[128]; 1724 int i; 1725 1726 ctx = device_get_sysctl_ctx(sc->dev); 1727 parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 1728 1729 /* Add DPNI statistics. */ 1730 node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", 1731 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Statistics"); 1732 parent = SYSCTL_CHILDREN(node); 1733 for (i = 0; i < DPAA2_NI_STAT_SYSCTLS; ++i) { 1734 SYSCTL_ADD_PROC(ctx, parent, i, dpni_stat_sysctls[i].name, 1735 CTLTYPE_U64 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_stats, 1736 "IU", dpni_stat_sysctls[i].desc); 1737 } 1738 SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_anomaly_frames", 1739 CTLFLAG_RD, &sc->rx_anomaly_frames, 1740 "Rx frames in the buffers outside of the buffer pools"); 1741 SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_single_buf_frames", 1742 CTLFLAG_RD, &sc->rx_single_buf_frames, 1743 "Rx frames in single buffers"); 1744 SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_sg_buf_frames", 1745 CTLFLAG_RD, &sc->rx_sg_buf_frames, 1746 "Rx frames in scatter/gather list"); 1747 SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_enq_rej_frames", 1748 CTLFLAG_RD, &sc->rx_enq_rej_frames, 1749 "Enqueue rejected by QMan"); 1750 SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "rx_ieoi_err_frames", 1751 CTLFLAG_RD, &sc->rx_ieoi_err_frames, 1752 "QMan IEOI error"); 1753 SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_single_buf_frames", 1754 CTLFLAG_RD, &sc->tx_single_buf_frames, 1755 "Tx single buffer frames"); 1756 SYSCTL_ADD_UQUAD(ctx, parent, OID_AUTO, "tx_sg_frames", 1757 CTLFLAG_RD, &sc->tx_sg_frames, 1758 "Tx S/G frames"); 1759 1760 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_num", 1761 CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_num, 1762 "IU", "number of Rx buffers in the buffer pool"); 1763 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "buf_free", 1764 CTLTYPE_U32 | CTLFLAG_RD, sc, 0, dpaa2_ni_collect_buf_free, 1765 "IU", "number of free Rx buffers in the buffer pool"); 1766 1767 parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 1768 1769 /* Add channels statistics. */ 1770 node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "channels", 1771 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channels"); 1772 parent = SYSCTL_CHILDREN(node); 1773 for (int i = 0; i < sc->chan_n; i++) { 1774 snprintf(cbuf, sizeof(cbuf), "%d", i); 1775 1776 node2 = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, cbuf, 1777 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "DPNI Channel"); 1778 parent2 = SYSCTL_CHILDREN(node2); 1779 1780 SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_frames", 1781 CTLFLAG_RD, &sc->channels[i]->tx_frames, 1782 "Tx frames counter"); 1783 SYSCTL_ADD_UQUAD(ctx, parent2, OID_AUTO, "tx_dropped", 1784 CTLFLAG_RD, &sc->channels[i]->tx_dropped, 1785 "Tx dropped counter"); 1786 } 1787 1788 return (0); 1789 } 1790 1791 static int 1792 dpaa2_ni_setup_dma(struct dpaa2_ni_softc *sc) 1793 { 1794 device_t dev = sc->dev; 1795 int error; 1796 1797 KASSERT((sc->buf_align == BUF_ALIGN) || (sc->buf_align == BUF_ALIGN_V1), 1798 ("unexpected buffer alignment: %d\n", sc->buf_align)); 1799 1800 /* DMA tag for Rx distribution key. */ 1801 error = bus_dma_tag_create( 1802 bus_get_dma_tag(dev), 1803 PAGE_SIZE, 0, /* alignment, boundary */ 1804 BUS_SPACE_MAXADDR, /* low restricted addr */ 1805 BUS_SPACE_MAXADDR, /* high restricted addr */ 1806 NULL, NULL, /* filter, filterarg */ 1807 DPAA2_CLASSIFIER_DMA_SIZE, 1, /* maxsize, nsegments */ 1808 DPAA2_CLASSIFIER_DMA_SIZE, 0, /* maxsegsize, flags */ 1809 NULL, NULL, /* lockfunc, lockarg */ 1810 &sc->rxd_dmat); 1811 if (error) { 1812 device_printf(dev, "%s: failed to create DMA tag for Rx " 1813 "distribution key\n", __func__); 1814 return (error); 1815 } 1816 1817 error = bus_dma_tag_create( 1818 bus_get_dma_tag(dev), 1819 PAGE_SIZE, 0, /* alignment, boundary */ 1820 BUS_SPACE_MAXADDR, /* low restricted addr */ 1821 BUS_SPACE_MAXADDR, /* high restricted addr */ 1822 NULL, NULL, /* filter, filterarg */ 1823 ETH_QOS_KCFG_BUF_SIZE, 1, /* maxsize, nsegments */ 1824 ETH_QOS_KCFG_BUF_SIZE, 0, /* maxsegsize, flags */ 1825 NULL, NULL, /* lockfunc, lockarg */ 1826 &sc->qos_dmat); 1827 if (error) { 1828 device_printf(dev, "%s: failed to create DMA tag for QoS key\n", 1829 __func__); 1830 return (error); 1831 } 1832 1833 return (0); 1834 } 1835 1836 /** 1837 * @brief Configure buffer layouts of the different DPNI queues. 1838 */ 1839 static int 1840 dpaa2_ni_set_buf_layout(device_t dev) 1841 { 1842 device_t pdev = device_get_parent(dev); 1843 device_t child = dev; 1844 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); 1845 struct dpaa2_devinfo *dinfo = device_get_ivars(dev); 1846 struct dpaa2_ni_softc *sc = device_get_softc(dev); 1847 struct dpaa2_ni_buf_layout buf_layout = {0}; 1848 struct dpaa2_cmd cmd; 1849 uint16_t rc_token, ni_token; 1850 int error; 1851 1852 DPAA2_CMD_INIT(&cmd); 1853 1854 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); 1855 if (error) { 1856 device_printf(dev, "%s: failed to open resource container: " 1857 "id=%d, error=%d\n", __func__, rcinfo->id, error); 1858 goto err_exit; 1859 } 1860 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); 1861 if (error) { 1862 device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, " 1863 "error=%d\n", __func__, dinfo->id, error); 1864 goto close_rc; 1865 } 1866 1867 /* 1868 * Select Rx/Tx buffer alignment. It's necessary to ensure that the 1869 * buffer size seen by WRIOP is a multiple of 64 or 256 bytes depending 1870 * on the WRIOP version. 1871 */ 1872 sc->buf_align = (sc->attr.wriop_ver == WRIOP_VERSION(0, 0, 0) || 1873 sc->attr.wriop_ver == WRIOP_VERSION(1, 0, 0)) 1874 ? BUF_ALIGN_V1 : BUF_ALIGN; 1875 1876 /* 1877 * We need to ensure that the buffer size seen by WRIOP is a multiple 1878 * of 64 or 256 bytes depending on the WRIOP version. 1879 */ 1880 sc->buf_sz = ALIGN_DOWN(DPAA2_RX_BUF_SIZE, sc->buf_align); 1881 1882 if (bootverbose) { 1883 device_printf(dev, "Rx/Tx buffers: size=%d, alignment=%d\n", 1884 sc->buf_sz, sc->buf_align); 1885 } 1886 1887 /* 1888 * Frame Descriptor Tx buffer layout 1889 * 1890 * ADDR -> |---------------------| 1891 * | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes 1892 * |---------------------| 1893 * | HW FRAME ANNOTATION | BUF_TX_HWA_SIZE bytes 1894 * |---------------------| 1895 * | DATA HEADROOM | 1896 * ADDR + OFFSET -> |---------------------| 1897 * | | 1898 * | | 1899 * | FRAME DATA | 1900 * | | 1901 * | | 1902 * |---------------------| 1903 * | DATA TAILROOM | 1904 * |---------------------| 1905 * 1906 * NOTE: It's for a single buffer frame only. 1907 */ 1908 buf_layout.queue_type = DPAA2_NI_QUEUE_TX; 1909 buf_layout.pd_size = BUF_SWA_SIZE; 1910 buf_layout.pass_timestamp = true; 1911 buf_layout.pass_frame_status = true; 1912 buf_layout.options = 1913 BUF_LOPT_PRIV_DATA_SZ | 1914 BUF_LOPT_TIMESTAMP | /* requires 128 bytes in HWA */ 1915 BUF_LOPT_FRAME_STATUS; 1916 error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout); 1917 if (error) { 1918 device_printf(dev, "%s: failed to set Tx buffer layout\n", 1919 __func__); 1920 goto close_ni; 1921 } 1922 1923 /* Tx-confirmation buffer layout */ 1924 buf_layout.queue_type = DPAA2_NI_QUEUE_TX_CONF; 1925 buf_layout.options = 1926 BUF_LOPT_TIMESTAMP | 1927 BUF_LOPT_FRAME_STATUS; 1928 error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout); 1929 if (error) { 1930 device_printf(dev, "%s: failed to set TxConf buffer layout\n", 1931 __func__); 1932 goto close_ni; 1933 } 1934 1935 /* 1936 * Driver should reserve the amount of space indicated by this command 1937 * as headroom in all Tx frames. 1938 */ 1939 error = DPAA2_CMD_NI_GET_TX_DATA_OFF(dev, child, &cmd, &sc->tx_data_off); 1940 if (error) { 1941 device_printf(dev, "%s: failed to obtain Tx data offset\n", 1942 __func__); 1943 goto close_ni; 1944 } 1945 1946 if (bootverbose) { 1947 device_printf(dev, "Tx data offset=%d\n", sc->tx_data_off); 1948 } 1949 if ((sc->tx_data_off % 64) != 0) { 1950 device_printf(dev, "Tx data offset (%d) is not a multiplication " 1951 "of 64 bytes\n", sc->tx_data_off); 1952 } 1953 1954 /* 1955 * Frame Descriptor Rx buffer layout 1956 * 1957 * ADDR -> |---------------------| 1958 * | SW FRAME ANNOTATION | BUF_SWA_SIZE bytes 1959 * |---------------------| 1960 * | HW FRAME ANNOTATION | BUF_RX_HWA_SIZE bytes 1961 * |---------------------| 1962 * | DATA HEADROOM | OFFSET-BUF_RX_HWA_SIZE 1963 * ADDR + OFFSET -> |---------------------| 1964 * | | 1965 * | | 1966 * | FRAME DATA | 1967 * | | 1968 * | | 1969 * |---------------------| 1970 * | DATA TAILROOM | 0 bytes 1971 * |---------------------| 1972 * 1973 * NOTE: It's for a single buffer frame only. 1974 */ 1975 buf_layout.queue_type = DPAA2_NI_QUEUE_RX; 1976 buf_layout.pd_size = BUF_SWA_SIZE; 1977 buf_layout.fd_align = sc->buf_align; 1978 buf_layout.head_size = sc->tx_data_off - BUF_RX_HWA_SIZE - BUF_SWA_SIZE; 1979 buf_layout.tail_size = 0; 1980 buf_layout.pass_frame_status = true; 1981 buf_layout.pass_parser_result = true; 1982 buf_layout.pass_timestamp = true; 1983 buf_layout.options = 1984 BUF_LOPT_PRIV_DATA_SZ | 1985 BUF_LOPT_DATA_ALIGN | 1986 BUF_LOPT_DATA_HEAD_ROOM | 1987 BUF_LOPT_DATA_TAIL_ROOM | 1988 BUF_LOPT_FRAME_STATUS | 1989 BUF_LOPT_PARSER_RESULT | 1990 BUF_LOPT_TIMESTAMP; 1991 error = DPAA2_CMD_NI_SET_BUF_LAYOUT(dev, child, &cmd, &buf_layout); 1992 if (error) { 1993 device_printf(dev, "%s: failed to set Rx buffer layout\n", 1994 __func__); 1995 goto close_ni; 1996 } 1997 1998 error = 0; 1999 close_ni: 2000 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); 2001 close_rc: 2002 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 2003 err_exit: 2004 return (error); 2005 } 2006 2007 /** 2008 * @brief Enable Rx/Tx pause frames. 2009 * 2010 * NOTE: DPNI stops sending when a pause frame is received (Rx frame) or DPNI 2011 * itself generates pause frames (Tx frame). 2012 */ 2013 static int 2014 dpaa2_ni_set_pause_frame(device_t dev) 2015 { 2016 device_t pdev = device_get_parent(dev); 2017 device_t child = dev; 2018 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); 2019 struct dpaa2_devinfo *dinfo = device_get_ivars(dev); 2020 struct dpaa2_ni_softc *sc = device_get_softc(dev); 2021 struct dpaa2_ni_link_cfg link_cfg = {0}; 2022 struct dpaa2_cmd cmd; 2023 uint16_t rc_token, ni_token; 2024 int error; 2025 2026 DPAA2_CMD_INIT(&cmd); 2027 2028 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); 2029 if (error) { 2030 device_printf(dev, "%s: failed to open resource container: " 2031 "id=%d, error=%d\n", __func__, rcinfo->id, error); 2032 goto err_exit; 2033 } 2034 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); 2035 if (error) { 2036 device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, " 2037 "error=%d\n", __func__, dinfo->id, error); 2038 goto close_rc; 2039 } 2040 2041 error = DPAA2_CMD_NI_GET_LINK_CFG(dev, child, &cmd, &link_cfg); 2042 if (error) { 2043 device_printf(dev, "%s: failed to obtain link configuration: " 2044 "error=%d\n", __func__, error); 2045 goto close_ni; 2046 } 2047 2048 /* Enable both Rx and Tx pause frames by default. */ 2049 link_cfg.options |= DPAA2_NI_LINK_OPT_PAUSE; 2050 link_cfg.options &= ~DPAA2_NI_LINK_OPT_ASYM_PAUSE; 2051 2052 error = DPAA2_CMD_NI_SET_LINK_CFG(dev, child, &cmd, &link_cfg); 2053 if (error) { 2054 device_printf(dev, "%s: failed to set link configuration: " 2055 "error=%d\n", __func__, error); 2056 goto close_ni; 2057 } 2058 2059 sc->link_options = link_cfg.options; 2060 error = 0; 2061 close_ni: 2062 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); 2063 close_rc: 2064 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 2065 err_exit: 2066 return (error); 2067 } 2068 2069 /** 2070 * @brief Configure QoS table to determine the traffic class for the received 2071 * frame. 2072 */ 2073 static int 2074 dpaa2_ni_set_qos_table(device_t dev) 2075 { 2076 device_t pdev = device_get_parent(dev); 2077 device_t child = dev; 2078 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); 2079 struct dpaa2_devinfo *dinfo = device_get_ivars(dev); 2080 struct dpaa2_ni_softc *sc = device_get_softc(dev); 2081 struct dpaa2_ni_qos_table tbl; 2082 struct dpaa2_buf *buf = &sc->qos_kcfg; 2083 struct dpaa2_cmd cmd; 2084 uint16_t rc_token, ni_token; 2085 int error; 2086 2087 if (sc->attr.num.rx_tcs == 1 || 2088 !(sc->attr.options & DPNI_OPT_HAS_KEY_MASKING)) { 2089 if (bootverbose) { 2090 device_printf(dev, "Ingress traffic classification is " 2091 "not supported\n"); 2092 } 2093 return (0); 2094 } 2095 2096 /* 2097 * Allocate a buffer visible to the device to hold the QoS table key 2098 * configuration. 2099 */ 2100 2101 if (__predict_true(buf->dmat == NULL)) { 2102 buf->dmat = sc->qos_dmat; 2103 } 2104 2105 error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr, 2106 BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap); 2107 if (error) { 2108 device_printf(dev, "%s: failed to allocate a buffer for QoS key " 2109 "configuration\n", __func__); 2110 goto err_exit; 2111 } 2112 2113 error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr, 2114 ETH_QOS_KCFG_BUF_SIZE, dpaa2_dmamap_oneseg_cb, &buf->paddr, 2115 BUS_DMA_NOWAIT); 2116 if (error) { 2117 device_printf(dev, "%s: failed to map QoS key configuration " 2118 "buffer into bus space\n", __func__); 2119 goto err_exit; 2120 } 2121 2122 DPAA2_CMD_INIT(&cmd); 2123 2124 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); 2125 if (error) { 2126 device_printf(dev, "%s: failed to open resource container: " 2127 "id=%d, error=%d\n", __func__, rcinfo->id, error); 2128 goto err_exit; 2129 } 2130 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); 2131 if (error) { 2132 device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, " 2133 "error=%d\n", __func__, dinfo->id, error); 2134 goto close_rc; 2135 } 2136 2137 tbl.default_tc = 0; 2138 tbl.discard_on_miss = false; 2139 tbl.keep_entries = false; 2140 tbl.kcfg_busaddr = buf->paddr; 2141 error = DPAA2_CMD_NI_SET_QOS_TABLE(dev, child, &cmd, &tbl); 2142 if (error) { 2143 device_printf(dev, "%s: failed to set QoS table\n", __func__); 2144 goto close_ni; 2145 } 2146 2147 error = DPAA2_CMD_NI_CLEAR_QOS_TABLE(dev, child, &cmd); 2148 if (error) { 2149 device_printf(dev, "%s: failed to clear QoS table\n", __func__); 2150 goto close_ni; 2151 } 2152 2153 error = 0; 2154 close_ni: 2155 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); 2156 close_rc: 2157 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 2158 err_exit: 2159 return (error); 2160 } 2161 2162 static int 2163 dpaa2_ni_set_mac_addr(device_t dev) 2164 { 2165 device_t pdev = device_get_parent(dev); 2166 device_t child = dev; 2167 struct dpaa2_ni_softc *sc = device_get_softc(dev); 2168 if_t ifp = sc->ifp; 2169 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); 2170 struct dpaa2_devinfo *dinfo = device_get_ivars(dev); 2171 struct dpaa2_cmd cmd; 2172 struct ether_addr rnd_mac_addr; 2173 uint16_t rc_token, ni_token; 2174 uint8_t mac_addr[ETHER_ADDR_LEN]; 2175 uint8_t dpni_mac_addr[ETHER_ADDR_LEN]; 2176 int error; 2177 2178 DPAA2_CMD_INIT(&cmd); 2179 2180 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); 2181 if (error) { 2182 device_printf(dev, "%s: failed to open resource container: " 2183 "id=%d, error=%d\n", __func__, rcinfo->id, error); 2184 goto err_exit; 2185 } 2186 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); 2187 if (error) { 2188 device_printf(sc->dev, "%s: failed to open DPMAC: id=%d, " 2189 "error=%d\n", __func__, dinfo->id, error); 2190 goto close_rc; 2191 } 2192 2193 /* 2194 * Get the MAC address associated with the physical port, if the DPNI is 2195 * connected to a DPMAC directly associated with one of the physical 2196 * ports. 2197 */ 2198 error = DPAA2_CMD_NI_GET_PORT_MAC_ADDR(dev, child, &cmd, mac_addr); 2199 if (error) { 2200 device_printf(dev, "%s: failed to obtain the MAC address " 2201 "associated with the physical port\n", __func__); 2202 goto close_ni; 2203 } 2204 2205 /* Get primary MAC address from the DPNI attributes. */ 2206 error = DPAA2_CMD_NI_GET_PRIM_MAC_ADDR(dev, child, &cmd, dpni_mac_addr); 2207 if (error) { 2208 device_printf(dev, "%s: failed to obtain primary MAC address\n", 2209 __func__); 2210 goto close_ni; 2211 } 2212 2213 if (!ETHER_IS_ZERO(mac_addr)) { 2214 /* Set MAC address of the physical port as DPNI's primary one. */ 2215 error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd, 2216 mac_addr); 2217 if (error) { 2218 device_printf(dev, "%s: failed to set primary MAC " 2219 "address\n", __func__); 2220 goto close_ni; 2221 } 2222 for (int i = 0; i < ETHER_ADDR_LEN; i++) { 2223 sc->mac.addr[i] = mac_addr[i]; 2224 } 2225 } else if (ETHER_IS_ZERO(dpni_mac_addr)) { 2226 /* Generate random MAC address as DPNI's primary one. */ 2227 ether_gen_addr(ifp, &rnd_mac_addr); 2228 for (int i = 0; i < ETHER_ADDR_LEN; i++) { 2229 mac_addr[i] = rnd_mac_addr.octet[i]; 2230 } 2231 2232 error = DPAA2_CMD_NI_SET_PRIM_MAC_ADDR(dev, child, &cmd, 2233 mac_addr); 2234 if (error) { 2235 device_printf(dev, "%s: failed to set random primary " 2236 "MAC address\n", __func__); 2237 goto close_ni; 2238 } 2239 for (int i = 0; i < ETHER_ADDR_LEN; i++) { 2240 sc->mac.addr[i] = mac_addr[i]; 2241 } 2242 } else { 2243 for (int i = 0; i < ETHER_ADDR_LEN; i++) { 2244 sc->mac.addr[i] = dpni_mac_addr[i]; 2245 } 2246 } 2247 2248 error = 0; 2249 close_ni: 2250 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); 2251 close_rc: 2252 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 2253 err_exit: 2254 return (error); 2255 } 2256 2257 static void 2258 dpaa2_ni_miibus_statchg(device_t dev) 2259 { 2260 device_t pdev = device_get_parent(dev); 2261 device_t child = dev; 2262 struct dpaa2_ni_softc *sc = device_get_softc(dev); 2263 struct dpaa2_mac_link_state mac_link = { 0 }; 2264 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); 2265 struct dpaa2_cmd cmd; 2266 uint16_t rc_token, mac_token; 2267 int error, link_state; 2268 2269 if (sc->fixed_link || sc->mii == NULL) { 2270 return; 2271 } 2272 2273 /* 2274 * Note: ifp link state will only be changed AFTER we are called so we 2275 * cannot rely on ifp->if_linkstate here. 2276 */ 2277 if (sc->mii->mii_media_status & IFM_AVALID) { 2278 if (sc->mii->mii_media_status & IFM_ACTIVE) { 2279 link_state = LINK_STATE_UP; 2280 } else { 2281 link_state = LINK_STATE_DOWN; 2282 } 2283 } else { 2284 link_state = LINK_STATE_UNKNOWN; 2285 } 2286 2287 if (link_state != sc->link_state) { 2288 sc->link_state = link_state; 2289 2290 DPAA2_CMD_INIT(&cmd); 2291 2292 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, 2293 &rc_token); 2294 if (error) { 2295 device_printf(dev, "%s: failed to open resource " 2296 "container: id=%d, error=%d\n", __func__, rcinfo->id, 2297 error); 2298 goto err_exit; 2299 } 2300 error = DPAA2_CMD_MAC_OPEN(dev, child, &cmd, sc->mac.dpmac_id, 2301 &mac_token); 2302 if (error) { 2303 device_printf(sc->dev, "%s: failed to open DPMAC: " 2304 "id=%d, error=%d\n", __func__, sc->mac.dpmac_id, 2305 error); 2306 goto close_rc; 2307 } 2308 2309 if (link_state == LINK_STATE_UP || 2310 link_state == LINK_STATE_DOWN) { 2311 /* Update DPMAC link state. */ 2312 mac_link.supported = sc->mii->mii_media.ifm_media; 2313 mac_link.advert = sc->mii->mii_media.ifm_media; 2314 mac_link.rate = 1000; /* TODO: Where to get from? */ /* ifmedia_baudrate? */ 2315 mac_link.options = 2316 DPAA2_MAC_LINK_OPT_AUTONEG | 2317 DPAA2_MAC_LINK_OPT_PAUSE; 2318 mac_link.up = (link_state == LINK_STATE_UP) ? true : false; 2319 mac_link.state_valid = true; 2320 2321 /* Inform DPMAC about link state. */ 2322 error = DPAA2_CMD_MAC_SET_LINK_STATE(dev, child, &cmd, 2323 &mac_link); 2324 if (error) { 2325 device_printf(sc->dev, "%s: failed to set DPMAC " 2326 "link state: id=%d, error=%d\n", __func__, 2327 sc->mac.dpmac_id, error); 2328 } 2329 } 2330 (void)DPAA2_CMD_MAC_CLOSE(dev, child, &cmd); 2331 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, 2332 rc_token)); 2333 } 2334 2335 return; 2336 2337 close_rc: 2338 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 2339 err_exit: 2340 return; 2341 } 2342 2343 /** 2344 * @brief Callback function to process media change request. 2345 */ 2346 static int 2347 dpaa2_ni_media_change(if_t ifp) 2348 { 2349 struct dpaa2_ni_softc *sc = if_getsoftc(ifp); 2350 2351 DPNI_LOCK(sc); 2352 if (sc->mii) { 2353 mii_mediachg(sc->mii); 2354 sc->media_status = sc->mii->mii_media.ifm_media; 2355 } else if (sc->fixed_link) { 2356 if_printf(ifp, "%s: can't change media in fixed mode\n", 2357 __func__); 2358 } 2359 DPNI_UNLOCK(sc); 2360 2361 return (0); 2362 } 2363 2364 /** 2365 * @brief Callback function to process media status request. 2366 */ 2367 static void 2368 dpaa2_ni_media_status(if_t ifp, struct ifmediareq *ifmr) 2369 { 2370 struct dpaa2_ni_softc *sc = if_getsoftc(ifp); 2371 2372 DPNI_LOCK(sc); 2373 if (sc->mii) { 2374 mii_pollstat(sc->mii); 2375 ifmr->ifm_active = sc->mii->mii_media_active; 2376 ifmr->ifm_status = sc->mii->mii_media_status; 2377 } 2378 DPNI_UNLOCK(sc); 2379 } 2380 2381 /** 2382 * @brief Callout function to check and update media status. 2383 */ 2384 static void 2385 dpaa2_ni_media_tick(void *arg) 2386 { 2387 struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg; 2388 2389 /* Check for media type change */ 2390 if (sc->mii) { 2391 mii_tick(sc->mii); 2392 if (sc->media_status != sc->mii->mii_media.ifm_media) { 2393 printf("%s: media type changed (ifm_media=%x)\n", 2394 __func__, sc->mii->mii_media.ifm_media); 2395 dpaa2_ni_media_change(sc->ifp); 2396 } 2397 } 2398 2399 /* Schedule another timeout one second from now */ 2400 callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc); 2401 } 2402 2403 static void 2404 dpaa2_ni_init(void *arg) 2405 { 2406 struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg; 2407 if_t ifp = sc->ifp; 2408 device_t pdev = device_get_parent(sc->dev); 2409 device_t dev = sc->dev; 2410 device_t child = dev; 2411 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); 2412 struct dpaa2_devinfo *dinfo = device_get_ivars(dev); 2413 struct dpaa2_cmd cmd; 2414 uint16_t rc_token, ni_token; 2415 int error; 2416 2417 DPNI_LOCK(sc); 2418 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 2419 DPNI_UNLOCK(sc); 2420 return; 2421 } 2422 DPNI_UNLOCK(sc); 2423 2424 DPAA2_CMD_INIT(&cmd); 2425 2426 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); 2427 if (error) { 2428 device_printf(dev, "%s: failed to open resource container: " 2429 "id=%d, error=%d\n", __func__, rcinfo->id, error); 2430 goto err_exit; 2431 } 2432 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); 2433 if (error) { 2434 device_printf(dev, "%s: failed to open network interface: " 2435 "id=%d, error=%d\n", __func__, dinfo->id, error); 2436 goto close_rc; 2437 } 2438 2439 error = DPAA2_CMD_NI_ENABLE(dev, child, &cmd); 2440 if (error) { 2441 device_printf(dev, "%s: failed to enable DPNI: error=%d\n", 2442 __func__, error); 2443 } 2444 2445 DPNI_LOCK(sc); 2446 if (sc->mii) { 2447 mii_mediachg(sc->mii); 2448 } 2449 callout_reset(&sc->mii_callout, hz, dpaa2_ni_media_tick, sc); 2450 2451 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 2452 DPNI_UNLOCK(sc); 2453 2454 /* Force link-state update to initilize things. */ 2455 dpaa2_ni_miibus_statchg(dev); 2456 2457 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); 2458 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 2459 return; 2460 2461 close_rc: 2462 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 2463 err_exit: 2464 return; 2465 } 2466 2467 static int 2468 dpaa2_ni_transmit(if_t ifp, struct mbuf *m) 2469 { 2470 struct dpaa2_ni_softc *sc = if_getsoftc(ifp); 2471 struct dpaa2_channel *ch; 2472 uint32_t fqid; 2473 bool found = false; 2474 int chidx = 0, error; 2475 2476 if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))) { 2477 return (0); 2478 } 2479 2480 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { 2481 fqid = m->m_pkthdr.flowid; 2482 for (int i = 0; i < sc->chan_n; i++) { 2483 ch = sc->channels[i]; 2484 for (int j = 0; j < ch->rxq_n; j++) { 2485 if (fqid == ch->rx_queues[j].fqid) { 2486 chidx = ch->flowid; 2487 found = true; 2488 break; 2489 } 2490 } 2491 if (found) { 2492 break; 2493 } 2494 } 2495 } 2496 2497 ch = sc->channels[chidx]; 2498 error = buf_ring_enqueue(ch->xmit_br, m); 2499 if (__predict_false(error != 0)) { 2500 m_freem(m); 2501 } else { 2502 taskqueue_enqueue(ch->cleanup_tq, &ch->cleanup_task); 2503 } 2504 2505 return (error); 2506 } 2507 2508 static void 2509 dpaa2_ni_qflush(if_t ifp) 2510 { 2511 /* TODO: Find a way to drain Tx queues in QBMan. */ 2512 if_qflush(ifp); 2513 } 2514 2515 static int 2516 dpaa2_ni_ioctl(if_t ifp, u_long c, caddr_t data) 2517 { 2518 struct dpaa2_ni_softc *sc = if_getsoftc(ifp); 2519 struct ifreq *ifr = (struct ifreq *) data; 2520 device_t pdev = device_get_parent(sc->dev); 2521 device_t dev = sc->dev; 2522 device_t child = dev; 2523 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); 2524 struct dpaa2_devinfo *dinfo = device_get_ivars(dev); 2525 struct dpaa2_cmd cmd; 2526 uint32_t changed = 0; 2527 uint16_t rc_token, ni_token; 2528 int mtu, error, rc = 0; 2529 2530 DPAA2_CMD_INIT(&cmd); 2531 2532 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); 2533 if (error) { 2534 device_printf(dev, "%s: failed to open resource container: " 2535 "id=%d, error=%d\n", __func__, rcinfo->id, error); 2536 goto err_exit; 2537 } 2538 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); 2539 if (error) { 2540 device_printf(dev, "%s: failed to open network interface: " 2541 "id=%d, error=%d\n", __func__, dinfo->id, error); 2542 goto close_rc; 2543 } 2544 2545 switch (c) { 2546 case SIOCSIFMTU: 2547 DPNI_LOCK(sc); 2548 mtu = ifr->ifr_mtu; 2549 if (mtu < ETHERMIN || mtu > ETHERMTU_JUMBO) { 2550 DPNI_UNLOCK(sc); 2551 error = EINVAL; 2552 goto close_ni; 2553 } 2554 if_setmtu(ifp, mtu); 2555 DPNI_UNLOCK(sc); 2556 2557 /* Update maximum frame length. */ 2558 error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd, 2559 mtu + ETHER_HDR_LEN); 2560 if (error) { 2561 device_printf(dev, "%s: failed to update maximum frame " 2562 "length: error=%d\n", __func__, error); 2563 goto close_ni; 2564 } 2565 break; 2566 case SIOCSIFCAP: 2567 changed = if_getcapenable(ifp) ^ ifr->ifr_reqcap; 2568 if (changed & IFCAP_HWCSUM) { 2569 if ((ifr->ifr_reqcap & changed) & IFCAP_HWCSUM) { 2570 if_setcapenablebit(ifp, IFCAP_HWCSUM, 0); 2571 } else { 2572 if_setcapenablebit(ifp, 0, IFCAP_HWCSUM); 2573 } 2574 } 2575 rc = dpaa2_ni_setup_if_caps(sc); 2576 if (rc) { 2577 printf("%s: failed to update iface capabilities: " 2578 "error=%d\n", __func__, rc); 2579 rc = ENXIO; 2580 } 2581 break; 2582 case SIOCSIFFLAGS: 2583 DPNI_LOCK(sc); 2584 if (if_getflags(ifp) & IFF_UP) { 2585 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 2586 changed = if_getflags(ifp) ^ sc->if_flags; 2587 if (changed & IFF_PROMISC || 2588 changed & IFF_ALLMULTI) { 2589 rc = dpaa2_ni_setup_if_flags(sc); 2590 } 2591 } else { 2592 DPNI_UNLOCK(sc); 2593 dpaa2_ni_init(sc); 2594 DPNI_LOCK(sc); 2595 } 2596 } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 2597 /* FIXME: Disable DPNI. See dpaa2_ni_init(). */ 2598 } 2599 2600 sc->if_flags = if_getflags(ifp); 2601 DPNI_UNLOCK(sc); 2602 break; 2603 case SIOCADDMULTI: 2604 case SIOCDELMULTI: 2605 DPNI_LOCK(sc); 2606 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 2607 DPNI_UNLOCK(sc); 2608 rc = dpaa2_ni_update_mac_filters(ifp); 2609 if (rc) { 2610 device_printf(dev, "%s: failed to update MAC " 2611 "filters: error=%d\n", __func__, rc); 2612 } 2613 DPNI_LOCK(sc); 2614 } 2615 DPNI_UNLOCK(sc); 2616 break; 2617 case SIOCGIFMEDIA: 2618 case SIOCSIFMEDIA: 2619 if (sc->mii) 2620 rc = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, c); 2621 else if(sc->fixed_link) { 2622 rc = ifmedia_ioctl(ifp, ifr, &sc->fixed_ifmedia, c); 2623 } 2624 break; 2625 default: 2626 rc = ether_ioctl(ifp, c, data); 2627 break; 2628 } 2629 2630 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); 2631 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 2632 return (rc); 2633 2634 close_ni: 2635 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); 2636 close_rc: 2637 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 2638 err_exit: 2639 return (error); 2640 } 2641 2642 static int 2643 dpaa2_ni_update_mac_filters(if_t ifp) 2644 { 2645 struct dpaa2_ni_softc *sc = if_getsoftc(ifp); 2646 struct dpaa2_ni_mcaddr_ctx ctx; 2647 device_t pdev = device_get_parent(sc->dev); 2648 device_t dev = sc->dev; 2649 device_t child = dev; 2650 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); 2651 struct dpaa2_devinfo *dinfo = device_get_ivars(dev); 2652 struct dpaa2_cmd cmd; 2653 uint16_t rc_token, ni_token; 2654 int error; 2655 2656 DPAA2_CMD_INIT(&cmd); 2657 2658 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); 2659 if (error) { 2660 device_printf(dev, "%s: failed to open resource container: " 2661 "id=%d, error=%d\n", __func__, rcinfo->id, error); 2662 goto err_exit; 2663 } 2664 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); 2665 if (error) { 2666 device_printf(dev, "%s: failed to open network interface: " 2667 "id=%d, error=%d\n", __func__, dinfo->id, error); 2668 goto close_rc; 2669 } 2670 2671 /* Remove all multicast MAC filters. */ 2672 error = DPAA2_CMD_NI_CLEAR_MAC_FILTERS(dev, child, &cmd, false, true); 2673 if (error) { 2674 device_printf(dev, "%s: failed to clear multicast MAC filters: " 2675 "error=%d\n", __func__, error); 2676 goto close_ni; 2677 } 2678 2679 ctx.ifp = ifp; 2680 ctx.error = 0; 2681 ctx.nent = 0; 2682 2683 if_foreach_llmaddr(ifp, dpaa2_ni_add_maddr, &ctx); 2684 2685 error = ctx.error; 2686 close_ni: 2687 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); 2688 close_rc: 2689 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 2690 err_exit: 2691 return (error); 2692 } 2693 2694 static u_int 2695 dpaa2_ni_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 2696 { 2697 struct dpaa2_ni_mcaddr_ctx *ctx = arg; 2698 struct dpaa2_ni_softc *sc = if_getsoftc(ctx->ifp); 2699 device_t pdev = device_get_parent(sc->dev); 2700 device_t dev = sc->dev; 2701 device_t child = dev; 2702 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); 2703 struct dpaa2_devinfo *dinfo = device_get_ivars(dev); 2704 struct dpaa2_cmd cmd; 2705 uint16_t rc_token, ni_token; 2706 int error; 2707 2708 if (ctx->error != 0) { 2709 return (0); 2710 } 2711 2712 if (ETHER_IS_MULTICAST(LLADDR(sdl))) { 2713 DPAA2_CMD_INIT(&cmd); 2714 2715 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, 2716 &rc_token); 2717 if (error) { 2718 device_printf(dev, "%s: failed to open resource " 2719 "container: id=%d, error=%d\n", __func__, rcinfo->id, 2720 error); 2721 return (0); 2722 } 2723 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, 2724 &ni_token); 2725 if (error) { 2726 device_printf(dev, "%s: failed to open network interface: " 2727 "id=%d, error=%d\n", __func__, dinfo->id, error); 2728 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, 2729 rc_token)); 2730 return (0); 2731 } 2732 2733 ctx->error = DPAA2_CMD_NI_ADD_MAC_ADDR(dev, child, &cmd, 2734 LLADDR(sdl)); 2735 2736 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, 2737 ni_token)); 2738 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, 2739 rc_token)); 2740 2741 if (ctx->error != 0) { 2742 device_printf(dev, "%s: can't add more then %d MAC " 2743 "addresses, switching to the multicast promiscuous " 2744 "mode\n", __func__, ctx->nent); 2745 2746 /* Enable multicast promiscuous mode. */ 2747 DPNI_LOCK(sc); 2748 if_setflagbits(ctx->ifp, IFF_ALLMULTI, 0); 2749 sc->if_flags |= IFF_ALLMULTI; 2750 ctx->error = dpaa2_ni_setup_if_flags(sc); 2751 DPNI_UNLOCK(sc); 2752 2753 return (0); 2754 } 2755 ctx->nent++; 2756 } 2757 2758 return (1); 2759 } 2760 2761 static void 2762 dpaa2_ni_intr(void *arg) 2763 { 2764 struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg; 2765 device_t pdev = device_get_parent(sc->dev); 2766 device_t dev = sc->dev; 2767 device_t child = dev; 2768 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); 2769 struct dpaa2_devinfo *dinfo = device_get_ivars(dev); 2770 struct dpaa2_cmd cmd; 2771 uint32_t status = ~0u; /* clear all IRQ status bits */ 2772 uint16_t rc_token, ni_token; 2773 int error; 2774 2775 DPAA2_CMD_INIT(&cmd); 2776 2777 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); 2778 if (error) { 2779 device_printf(dev, "%s: failed to open resource container: " 2780 "id=%d, error=%d\n", __func__, rcinfo->id, error); 2781 goto err_exit; 2782 } 2783 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); 2784 if (error) { 2785 device_printf(dev, "%s: failed to open network interface: " 2786 "id=%d, error=%d\n", __func__, dinfo->id, error); 2787 goto close_rc; 2788 } 2789 2790 error = DPAA2_CMD_NI_GET_IRQ_STATUS(dev, child, &cmd, DPNI_IRQ_INDEX, 2791 &status); 2792 if (error) { 2793 device_printf(sc->dev, "%s: failed to obtain IRQ status: " 2794 "error=%d\n", __func__, error); 2795 } 2796 2797 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); 2798 close_rc: 2799 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 2800 err_exit: 2801 return; 2802 } 2803 2804 /** 2805 * @brief Execute channel's Rx/Tx routines. 2806 * 2807 * NOTE: Should not be re-entrant for the same channel. It is achieved by 2808 * enqueuing the cleanup routine on a single-threaded taskqueue. 2809 */ 2810 static void 2811 dpaa2_ni_cleanup_task(void *arg, int count) 2812 { 2813 struct dpaa2_channel *ch = (struct dpaa2_channel *)arg; 2814 struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev); 2815 int error, rxc, txc; 2816 2817 for (int i = 0; i < DPAA2_CLEAN_BUDGET; i++) { 2818 rxc = dpaa2_ni_rx_cleanup(ch); 2819 txc = dpaa2_ni_tx_cleanup(ch); 2820 2821 if (__predict_false((if_getdrvflags(sc->ifp) & 2822 IFF_DRV_RUNNING) == 0)) { 2823 return; 2824 } 2825 2826 if ((txc != DPAA2_TX_BUDGET) && (rxc != DPAA2_RX_BUDGET)) { 2827 break; 2828 } 2829 } 2830 2831 /* Re-arm channel to generate CDAN */ 2832 error = DPAA2_SWP_CONF_WQ_CHANNEL(ch->io_dev, &ch->ctx); 2833 if (error != 0) { 2834 panic("%s: failed to rearm channel: chan_id=%d, error=%d\n", 2835 __func__, ch->id, error); 2836 } 2837 } 2838 2839 /** 2840 * @brief Poll frames from a specific channel when CDAN is received. 2841 */ 2842 static int 2843 dpaa2_ni_rx_cleanup(struct dpaa2_channel *ch) 2844 { 2845 struct dpaa2_io_softc *iosc = device_get_softc(ch->io_dev); 2846 struct dpaa2_swp *swp = iosc->swp; 2847 struct dpaa2_ni_fq *fq; 2848 struct dpaa2_buf *buf = &ch->store; 2849 int budget = DPAA2_RX_BUDGET; 2850 int error, consumed = 0; 2851 2852 do { 2853 error = dpaa2_swp_pull(swp, ch->id, buf, DPAA2_ETH_STORE_FRAMES); 2854 if (error) { 2855 device_printf(ch->ni_dev, "%s: failed to pull frames: " 2856 "chan_id=%d, error=%d\n", __func__, ch->id, error); 2857 break; 2858 } 2859 error = dpaa2_ni_consume_frames(ch, &fq, &consumed); 2860 if (error == ENOENT || error == EALREADY) { 2861 break; 2862 } 2863 if (error == ETIMEDOUT) { 2864 device_printf(ch->ni_dev, "%s: timeout to consume " 2865 "frames: chan_id=%d\n", __func__, ch->id); 2866 } 2867 } while (--budget); 2868 2869 return (DPAA2_RX_BUDGET - budget); 2870 } 2871 2872 static int 2873 dpaa2_ni_tx_cleanup(struct dpaa2_channel *ch) 2874 { 2875 struct dpaa2_ni_softc *sc = device_get_softc(ch->ni_dev); 2876 struct dpaa2_ni_tx_ring *tx = &ch->txc_queue.tx_rings[0]; 2877 struct mbuf *m = NULL; 2878 int budget = DPAA2_TX_BUDGET; 2879 2880 do { 2881 mtx_assert(&ch->xmit_mtx, MA_NOTOWNED); 2882 mtx_lock(&ch->xmit_mtx); 2883 m = buf_ring_dequeue_sc(ch->xmit_br); 2884 mtx_unlock(&ch->xmit_mtx); 2885 2886 if (__predict_false(m == NULL)) { 2887 /* TODO: Do not give up easily */ 2888 break; 2889 } else { 2890 dpaa2_ni_tx(sc, ch, tx, m); 2891 } 2892 } while (--budget); 2893 2894 return (DPAA2_TX_BUDGET - budget); 2895 } 2896 2897 static void 2898 dpaa2_ni_tx(struct dpaa2_ni_softc *sc, struct dpaa2_channel *ch, 2899 struct dpaa2_ni_tx_ring *tx, struct mbuf *m) 2900 { 2901 device_t dev = sc->dev; 2902 struct dpaa2_ni_fq *fq = tx->fq; 2903 struct dpaa2_buf *buf, *sgt; 2904 struct dpaa2_fd fd; 2905 struct mbuf *md; 2906 bus_dma_segment_t segs[DPAA2_TX_SEGLIMIT]; 2907 int rc, nsegs; 2908 int error; 2909 2910 mtx_assert(&tx->lock, MA_NOTOWNED); 2911 mtx_lock(&tx->lock); 2912 buf = buf_ring_dequeue_sc(tx->br); 2913 mtx_unlock(&tx->lock); 2914 if (__predict_false(buf == NULL)) { 2915 /* TODO: Do not give up easily */ 2916 m_freem(m); 2917 return; 2918 } else { 2919 DPAA2_BUF_ASSERT_TXREADY(buf); 2920 buf->m = m; 2921 sgt = buf->sgt; 2922 } 2923 2924 #if defined(INVARIANTS) 2925 struct dpaa2_ni_tx_ring *btx = (struct dpaa2_ni_tx_ring *)buf->opt; 2926 KASSERT(buf->opt == tx, ("%s: unexpected Tx ring", __func__)); 2927 KASSERT(btx->fq->chan == ch, ("%s: unexpected channel", __func__)); 2928 #endif /* INVARIANTS */ 2929 2930 error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, m, segs, &nsegs, 2931 BUS_DMA_NOWAIT); 2932 if (__predict_false(error != 0)) { 2933 /* Too many fragments, trying to defragment... */ 2934 md = m_collapse(m, M_NOWAIT, DPAA2_TX_SEGLIMIT); 2935 if (md == NULL) { 2936 device_printf(dev, "%s: m_collapse() failed\n", __func__); 2937 fq->chan->tx_dropped++; 2938 goto err; 2939 } 2940 2941 buf->m = m = md; 2942 error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, m, segs, 2943 &nsegs, BUS_DMA_NOWAIT); 2944 if (__predict_false(error != 0)) { 2945 device_printf(dev, "%s: bus_dmamap_load_mbuf_sg() " 2946 "failed: error=%d\n", __func__, error); 2947 fq->chan->tx_dropped++; 2948 goto err; 2949 } 2950 } 2951 2952 error = dpaa2_ni_build_fd(sc, tx, buf, segs, nsegs, &fd); 2953 if (__predict_false(error != 0)) { 2954 device_printf(dev, "%s: failed to build frame descriptor: " 2955 "error=%d\n", __func__, error); 2956 fq->chan->tx_dropped++; 2957 goto err_unload; 2958 } 2959 2960 /* TODO: Enqueue several frames in a single command */ 2961 for (int i = 0; i < DPAA2_NI_ENQUEUE_RETRIES; i++) { 2962 /* TODO: Return error codes instead of # of frames */ 2963 rc = DPAA2_SWP_ENQ_MULTIPLE_FQ(fq->chan->io_dev, tx->fqid, &fd, 1); 2964 if (rc == 1) { 2965 break; 2966 } 2967 } 2968 2969 bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_PREWRITE); 2970 bus_dmamap_sync(sgt->dmat, sgt->dmap, BUS_DMASYNC_PREWRITE); 2971 2972 if (rc != 1) { 2973 fq->chan->tx_dropped++; 2974 goto err_unload; 2975 } else { 2976 fq->chan->tx_frames++; 2977 } 2978 return; 2979 2980 err_unload: 2981 bus_dmamap_unload(buf->dmat, buf->dmap); 2982 if (sgt->paddr != 0) { 2983 bus_dmamap_unload(sgt->dmat, sgt->dmap); 2984 } 2985 err: 2986 m_freem(buf->m); 2987 buf_ring_enqueue(tx->br, buf); 2988 } 2989 2990 static int 2991 dpaa2_ni_consume_frames(struct dpaa2_channel *chan, struct dpaa2_ni_fq **src, 2992 uint32_t *consumed) 2993 { 2994 struct dpaa2_ni_fq *fq = NULL; 2995 struct dpaa2_dq *dq; 2996 struct dpaa2_fd *fd; 2997 struct dpaa2_ni_rx_ctx ctx = { 2998 .head = NULL, 2999 .tail = NULL, 3000 .cnt = 0, 3001 .last = false 3002 }; 3003 int rc, frames = 0; 3004 3005 do { 3006 rc = dpaa2_chan_next_frame(chan, &dq); 3007 if (rc == EINPROGRESS) { 3008 if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) { 3009 fd = &dq->fdr.fd; 3010 fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx; 3011 3012 switch (fq->type) { 3013 case DPAA2_NI_QUEUE_RX: 3014 (void)dpaa2_ni_rx(chan, fq, fd, &ctx); 3015 break; 3016 case DPAA2_NI_QUEUE_RX_ERR: 3017 (void)dpaa2_ni_rx_err(chan, fq, fd); 3018 break; 3019 case DPAA2_NI_QUEUE_TX_CONF: 3020 (void)dpaa2_ni_tx_conf(chan, fq, fd); 3021 break; 3022 default: 3023 panic("%s: unknown queue type (1)", 3024 __func__); 3025 } 3026 frames++; 3027 } 3028 } else if (rc == EALREADY || rc == ENOENT) { 3029 if (dq != NULL && !IS_NULL_RESPONSE(dq->fdr.desc.stat)) { 3030 fd = &dq->fdr.fd; 3031 fq = (struct dpaa2_ni_fq *) dq->fdr.desc.fqd_ctx; 3032 3033 switch (fq->type) { 3034 case DPAA2_NI_QUEUE_RX: 3035 /* 3036 * Last VDQ response (mbuf) in a chain 3037 * obtained from the Rx queue. 3038 */ 3039 ctx.last = true; 3040 (void)dpaa2_ni_rx(chan, fq, fd, &ctx); 3041 break; 3042 case DPAA2_NI_QUEUE_RX_ERR: 3043 (void)dpaa2_ni_rx_err(chan, fq, fd); 3044 break; 3045 case DPAA2_NI_QUEUE_TX_CONF: 3046 (void)dpaa2_ni_tx_conf(chan, fq, fd); 3047 break; 3048 default: 3049 panic("%s: unknown queue type (2)", 3050 __func__); 3051 } 3052 frames++; 3053 } 3054 break; 3055 } else { 3056 panic("%s: should not reach here: rc=%d", __func__, rc); 3057 } 3058 } while (true); 3059 3060 KASSERT(chan->store_idx < chan->store_sz, ("%s: store_idx(%d) >= " 3061 "store_sz(%d)", __func__, chan->store_idx, chan->store_sz)); 3062 3063 /* 3064 * VDQ operation pulls frames from a single queue into the store. 3065 * Return the frame queue and a number of consumed frames as an output. 3066 */ 3067 if (src != NULL) { 3068 *src = fq; 3069 } 3070 if (consumed != NULL) { 3071 *consumed = frames; 3072 } 3073 3074 return (rc); 3075 } 3076 3077 /** 3078 * @brief Receive frames. 3079 */ 3080 static int 3081 dpaa2_ni_rx(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq, struct dpaa2_fd *fd, 3082 struct dpaa2_ni_rx_ctx *ctx) 3083 { 3084 bus_addr_t paddr = (bus_addr_t)fd->addr; 3085 struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr); 3086 struct dpaa2_buf *buf = fa->buf; 3087 struct dpaa2_channel *bch = (struct dpaa2_channel *)buf->opt; 3088 struct dpaa2_ni_softc *sc = device_get_softc(bch->ni_dev); 3089 struct dpaa2_bp_softc *bpsc; 3090 struct mbuf *m; 3091 device_t bpdev; 3092 bus_addr_t released[DPAA2_SWP_BUFS_PER_CMD]; 3093 void *buf_data; 3094 int buf_len, error, released_n = 0; 3095 3096 KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__)); 3097 /* 3098 * NOTE: Current channel might not be the same as the "buffer" channel 3099 * and it's fine. It must not be NULL though. 3100 */ 3101 KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__)); 3102 3103 if (__predict_false(paddr != buf->paddr)) { 3104 panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)", 3105 __func__, paddr, buf->paddr); 3106 } 3107 3108 switch (dpaa2_ni_fd_err(fd)) { 3109 case 1: /* Enqueue rejected by QMan */ 3110 sc->rx_enq_rej_frames++; 3111 break; 3112 case 2: /* QMan IEOI error */ 3113 sc->rx_ieoi_err_frames++; 3114 break; 3115 default: 3116 break; 3117 } 3118 switch (dpaa2_ni_fd_format(fd)) { 3119 case DPAA2_FD_SINGLE: 3120 sc->rx_single_buf_frames++; 3121 break; 3122 case DPAA2_FD_SG: 3123 sc->rx_sg_buf_frames++; 3124 break; 3125 default: 3126 break; 3127 } 3128 3129 mtx_assert(&bch->dma_mtx, MA_NOTOWNED); 3130 mtx_lock(&bch->dma_mtx); 3131 3132 bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_POSTREAD); 3133 bus_dmamap_unload(buf->dmat, buf->dmap); 3134 m = buf->m; 3135 buf_len = dpaa2_ni_fd_data_len(fd); 3136 buf_data = (uint8_t *)buf->vaddr + dpaa2_ni_fd_offset(fd); 3137 /* Prepare buffer to be re-cycled */ 3138 buf->m = NULL; 3139 buf->paddr = 0; 3140 buf->vaddr = NULL; 3141 buf->seg.ds_addr = 0; 3142 buf->seg.ds_len = 0; 3143 buf->nseg = 0; 3144 3145 mtx_unlock(&bch->dma_mtx); 3146 3147 m->m_flags |= M_PKTHDR; 3148 m->m_data = buf_data; 3149 m->m_len = buf_len; 3150 m->m_pkthdr.len = buf_len; 3151 m->m_pkthdr.rcvif = sc->ifp; 3152 m->m_pkthdr.flowid = fq->fqid; 3153 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); 3154 3155 if (ctx->head == NULL) { 3156 KASSERT(ctx->tail == NULL, ("%s: tail already given?", __func__)); 3157 ctx->head = m; 3158 ctx->tail = m; 3159 } else { 3160 KASSERT(ctx->head != NULL, ("%s: head is NULL", __func__)); 3161 ctx->tail->m_nextpkt = m; 3162 ctx->tail = m; 3163 } 3164 ctx->cnt++; 3165 3166 if (ctx->last) { 3167 ctx->tail->m_nextpkt = NULL; 3168 if_input(sc->ifp, ctx->head); 3169 } 3170 3171 /* Keep the buffer to be recycled */ 3172 ch->recycled[ch->recycled_n++] = buf; 3173 3174 /* Re-seed and release recycled buffers back to the pool */ 3175 if (ch->recycled_n == DPAA2_SWP_BUFS_PER_CMD) { 3176 /* Release new buffers to the pool if needed */ 3177 taskqueue_enqueue(sc->bp_taskq, &ch->bp_task); 3178 3179 for (int i = 0; i < ch->recycled_n; i++) { 3180 buf = ch->recycled[i]; 3181 bch = (struct dpaa2_channel *)buf->opt; 3182 3183 mtx_assert(&bch->dma_mtx, MA_NOTOWNED); 3184 mtx_lock(&bch->dma_mtx); 3185 error = dpaa2_buf_seed_rxb(sc->dev, buf, 3186 DPAA2_RX_BUF_SIZE, &bch->dma_mtx); 3187 mtx_unlock(&bch->dma_mtx); 3188 3189 if (__predict_false(error != 0)) { 3190 /* TODO: What else to do with the buffer? */ 3191 panic("%s: failed to recycle buffer: error=%d", 3192 __func__, error); 3193 } 3194 3195 /* Prepare buffer to be released in a single command */ 3196 released[released_n++] = buf->paddr; 3197 } 3198 3199 /* There's only one buffer pool for now */ 3200 bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]); 3201 bpsc = device_get_softc(bpdev); 3202 3203 error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpsc->attr.bpid, 3204 released, released_n); 3205 if (__predict_false(error != 0)) { 3206 device_printf(sc->dev, "%s: failed to release buffers " 3207 "to the pool: error=%d\n", __func__, error); 3208 return (error); 3209 } 3210 ch->recycled_n = 0; 3211 } 3212 3213 return (0); 3214 } 3215 3216 /** 3217 * @brief Receive Rx error frames. 3218 */ 3219 static int 3220 dpaa2_ni_rx_err(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq, 3221 struct dpaa2_fd *fd) 3222 { 3223 bus_addr_t paddr = (bus_addr_t)fd->addr; 3224 struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr); 3225 struct dpaa2_buf *buf = fa->buf; 3226 struct dpaa2_channel *bch = (struct dpaa2_channel *)buf->opt; 3227 struct dpaa2_ni_softc *sc = device_get_softc(bch->ni_dev); 3228 device_t bpdev; 3229 struct dpaa2_bp_softc *bpsc; 3230 int error; 3231 3232 KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__)); 3233 /* 3234 * NOTE: Current channel might not be the same as the "buffer" channel 3235 * and it's fine. It must not be NULL though. 3236 */ 3237 KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__)); 3238 3239 if (__predict_false(paddr != buf->paddr)) { 3240 panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)", 3241 __func__, paddr, buf->paddr); 3242 } 3243 3244 /* There's only one buffer pool for now */ 3245 bpdev = (device_t)rman_get_start(sc->res[DPAA2_NI_BP_RID(0)]); 3246 bpsc = device_get_softc(bpdev); 3247 3248 /* Release buffer to QBMan buffer pool */ 3249 error = DPAA2_SWP_RELEASE_BUFS(ch->io_dev, bpsc->attr.bpid, &paddr, 1); 3250 if (error != 0) { 3251 device_printf(sc->dev, "%s: failed to release frame buffer to " 3252 "the pool: error=%d\n", __func__, error); 3253 return (error); 3254 } 3255 3256 return (0); 3257 } 3258 3259 /** 3260 * @brief Receive Tx confirmation frames. 3261 */ 3262 static int 3263 dpaa2_ni_tx_conf(struct dpaa2_channel *ch, struct dpaa2_ni_fq *fq, 3264 struct dpaa2_fd *fd) 3265 { 3266 bus_addr_t paddr = (bus_addr_t)fd->addr; 3267 struct dpaa2_fa *fa = (struct dpaa2_fa *)PHYS_TO_DMAP(paddr); 3268 struct dpaa2_buf *buf = fa->buf; 3269 struct dpaa2_buf *sgt = buf->sgt; 3270 struct dpaa2_ni_tx_ring *tx = (struct dpaa2_ni_tx_ring *)buf->opt; 3271 struct dpaa2_channel *bch = tx->fq->chan; 3272 3273 KASSERT(fa->magic == DPAA2_MAGIC, ("%s: wrong magic", __func__)); 3274 KASSERT(tx != NULL, ("%s: Tx ring is NULL", __func__)); 3275 KASSERT(sgt != NULL, ("%s: S/G table is NULL", __func__)); 3276 /* 3277 * NOTE: Current channel might not be the same as the "buffer" channel 3278 * and it's fine. It must not be NULL though. 3279 */ 3280 KASSERT(bch != NULL, ("%s: buffer channel is NULL", __func__)); 3281 3282 if (paddr != buf->paddr) { 3283 panic("%s: unexpected physical address: fd(%#jx) != buf(%#jx)", 3284 __func__, paddr, buf->paddr); 3285 } 3286 3287 mtx_assert(&bch->dma_mtx, MA_NOTOWNED); 3288 mtx_lock(&bch->dma_mtx); 3289 3290 bus_dmamap_sync(buf->dmat, buf->dmap, BUS_DMASYNC_POSTWRITE); 3291 bus_dmamap_sync(sgt->dmat, sgt->dmap, BUS_DMASYNC_POSTWRITE); 3292 bus_dmamap_unload(buf->dmat, buf->dmap); 3293 bus_dmamap_unload(sgt->dmat, sgt->dmap); 3294 m_freem(buf->m); 3295 buf->m = NULL; 3296 buf->paddr = 0; 3297 buf->vaddr = NULL; 3298 sgt->paddr = 0; 3299 3300 mtx_unlock(&bch->dma_mtx); 3301 3302 /* Return Tx buffer back to the ring */ 3303 buf_ring_enqueue(tx->br, buf); 3304 3305 return (0); 3306 } 3307 3308 /** 3309 * @brief Compare versions of the DPAA2 network interface API. 3310 */ 3311 static int 3312 dpaa2_ni_cmp_api_version(struct dpaa2_ni_softc *sc, uint16_t major, 3313 uint16_t minor) 3314 { 3315 if (sc->api_major == major) { 3316 return sc->api_minor - minor; 3317 } 3318 return sc->api_major - major; 3319 } 3320 3321 /** 3322 * @brief Build a DPAA2 frame descriptor. 3323 */ 3324 static int 3325 dpaa2_ni_build_fd(struct dpaa2_ni_softc *sc, struct dpaa2_ni_tx_ring *tx, 3326 struct dpaa2_buf *buf, bus_dma_segment_t *segs, int nsegs, struct dpaa2_fd *fd) 3327 { 3328 struct dpaa2_buf *sgt = buf->sgt; 3329 struct dpaa2_sg_entry *sge; 3330 struct dpaa2_fa *fa; 3331 int i, error; 3332 3333 KASSERT(nsegs <= DPAA2_TX_SEGLIMIT, ("%s: too many segments", __func__)); 3334 KASSERT(buf->opt != NULL, ("%s: no Tx ring?", __func__)); 3335 KASSERT(sgt != NULL, ("%s: no S/G table?", __func__)); 3336 KASSERT(sgt->vaddr != NULL, ("%s: no S/G vaddr?", __func__)); 3337 3338 memset(fd, 0, sizeof(*fd)); 3339 3340 /* Populate and map S/G table */ 3341 if (__predict_true(nsegs <= DPAA2_TX_SEGLIMIT)) { 3342 sge = (struct dpaa2_sg_entry *)sgt->vaddr + sc->tx_data_off; 3343 for (i = 0; i < nsegs; i++) { 3344 sge[i].addr = (uint64_t)segs[i].ds_addr; 3345 sge[i].len = (uint32_t)segs[i].ds_len; 3346 sge[i].offset_fmt = 0u; 3347 } 3348 sge[i-1].offset_fmt |= 0x8000u; /* set final entry flag */ 3349 3350 KASSERT(sgt->paddr == 0, ("%s: paddr(%#jx) != 0", __func__, 3351 sgt->paddr)); 3352 3353 error = bus_dmamap_load(sgt->dmat, sgt->dmap, sgt->vaddr, 3354 DPAA2_TX_SGT_SZ, dpaa2_dmamap_oneseg_cb, &sgt->paddr, 3355 BUS_DMA_NOWAIT); 3356 if (__predict_false(error != 0)) { 3357 device_printf(sc->dev, "%s: bus_dmamap_load() failed: " 3358 "error=%d\n", __func__, error); 3359 return (error); 3360 } 3361 3362 buf->paddr = sgt->paddr; 3363 buf->vaddr = sgt->vaddr; 3364 sc->tx_sg_frames++; /* for sysctl(9) */ 3365 } else { 3366 return (EINVAL); 3367 } 3368 3369 fa = (struct dpaa2_fa *)sgt->vaddr; 3370 fa->magic = DPAA2_MAGIC; 3371 fa->buf = buf; 3372 3373 fd->addr = buf->paddr; 3374 fd->data_length = (uint32_t)buf->m->m_pkthdr.len; 3375 fd->bpid_ivp_bmt = 0; 3376 fd->offset_fmt_sl = 0x2000u | sc->tx_data_off; 3377 fd->ctrl = 0x00800000u; 3378 3379 return (0); 3380 } 3381 3382 static int 3383 dpaa2_ni_fd_err(struct dpaa2_fd *fd) 3384 { 3385 return ((fd->ctrl >> DPAA2_NI_FD_ERR_SHIFT) & DPAA2_NI_FD_ERR_MASK); 3386 } 3387 3388 static uint32_t 3389 dpaa2_ni_fd_data_len(struct dpaa2_fd *fd) 3390 { 3391 if (dpaa2_ni_fd_short_len(fd)) { 3392 return (fd->data_length & DPAA2_NI_FD_LEN_MASK); 3393 } 3394 return (fd->data_length); 3395 } 3396 3397 static int 3398 dpaa2_ni_fd_format(struct dpaa2_fd *fd) 3399 { 3400 return ((enum dpaa2_fd_format)((fd->offset_fmt_sl >> 3401 DPAA2_NI_FD_FMT_SHIFT) & DPAA2_NI_FD_FMT_MASK)); 3402 } 3403 3404 static bool 3405 dpaa2_ni_fd_short_len(struct dpaa2_fd *fd) 3406 { 3407 return (((fd->offset_fmt_sl >> DPAA2_NI_FD_SL_SHIFT) 3408 & DPAA2_NI_FD_SL_MASK) == 1); 3409 } 3410 3411 static int 3412 dpaa2_ni_fd_offset(struct dpaa2_fd *fd) 3413 { 3414 return (fd->offset_fmt_sl & DPAA2_NI_FD_OFFSET_MASK); 3415 } 3416 3417 /** 3418 * @brief Collect statistics of the network interface. 3419 */ 3420 static int 3421 dpaa2_ni_collect_stats(SYSCTL_HANDLER_ARGS) 3422 { 3423 struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1; 3424 struct dpni_stat *stat = &dpni_stat_sysctls[oidp->oid_number]; 3425 device_t pdev = device_get_parent(sc->dev); 3426 device_t dev = sc->dev; 3427 device_t child = dev; 3428 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); 3429 struct dpaa2_devinfo *dinfo = device_get_ivars(dev); 3430 struct dpaa2_cmd cmd; 3431 uint64_t cnt[DPAA2_NI_STAT_COUNTERS]; 3432 uint64_t result = 0; 3433 uint16_t rc_token, ni_token; 3434 int error; 3435 3436 DPAA2_CMD_INIT(&cmd); 3437 3438 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, &rc_token); 3439 if (error) { 3440 device_printf(dev, "%s: failed to open resource container: " 3441 "id=%d, error=%d\n", __func__, rcinfo->id, error); 3442 goto exit; 3443 } 3444 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, &ni_token); 3445 if (error) { 3446 device_printf(dev, "%s: failed to open network interface: " 3447 "id=%d, error=%d\n", __func__, dinfo->id, error); 3448 goto close_rc; 3449 } 3450 3451 error = DPAA2_CMD_NI_GET_STATISTICS(dev, child, &cmd, stat->page, 0, cnt); 3452 if (!error) { 3453 result = cnt[stat->cnt]; 3454 } 3455 3456 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, ni_token)); 3457 close_rc: 3458 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, rc_token)); 3459 exit: 3460 return (sysctl_handle_64(oidp, &result, 0, req)); 3461 } 3462 3463 static int 3464 dpaa2_ni_collect_buf_num(SYSCTL_HANDLER_ARGS) 3465 { 3466 struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1; 3467 uint32_t buf_num = DPAA2_ATOMIC_READ(&sc->buf_num); 3468 3469 return (sysctl_handle_32(oidp, &buf_num, 0, req)); 3470 } 3471 3472 static int 3473 dpaa2_ni_collect_buf_free(SYSCTL_HANDLER_ARGS) 3474 { 3475 struct dpaa2_ni_softc *sc = (struct dpaa2_ni_softc *) arg1; 3476 uint32_t buf_free = DPAA2_ATOMIC_READ(&sc->buf_free); 3477 3478 return (sysctl_handle_32(oidp, &buf_free, 0, req)); 3479 } 3480 3481 static int 3482 dpaa2_ni_set_hash(device_t dev, uint64_t flags) 3483 { 3484 struct dpaa2_ni_softc *sc = device_get_softc(dev); 3485 uint64_t key = 0; 3486 int i; 3487 3488 if (!(sc->attr.num.queues > 1)) { 3489 return (EOPNOTSUPP); 3490 } 3491 3492 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { 3493 if (dist_fields[i].rxnfc_field & flags) { 3494 key |= dist_fields[i].id; 3495 } 3496 } 3497 3498 return (dpaa2_ni_set_dist_key(dev, DPAA2_NI_DIST_MODE_HASH, key)); 3499 } 3500 3501 /** 3502 * @brief Set Rx distribution (hash or flow classification) key flags is a 3503 * combination of RXH_ bits. 3504 */ 3505 static int 3506 dpaa2_ni_set_dist_key(device_t dev, enum dpaa2_ni_dist_mode type, uint64_t flags) 3507 { 3508 device_t pdev = device_get_parent(dev); 3509 device_t child = dev; 3510 struct dpaa2_ni_softc *sc = device_get_softc(dev); 3511 struct dpaa2_devinfo *rcinfo = device_get_ivars(pdev); 3512 struct dpaa2_devinfo *dinfo = device_get_ivars(dev); 3513 struct dpkg_profile_cfg cls_cfg; 3514 struct dpkg_extract *key; 3515 struct dpaa2_buf *buf = &sc->rxd_kcfg; 3516 struct dpaa2_cmd cmd; 3517 uint16_t rc_token, ni_token; 3518 int i, error = 0; 3519 3520 if (__predict_true(buf->dmat == NULL)) { 3521 buf->dmat = sc->rxd_dmat; 3522 } 3523 3524 memset(&cls_cfg, 0, sizeof(cls_cfg)); 3525 3526 /* Configure extracts according to the given flags. */ 3527 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { 3528 key = &cls_cfg.extracts[cls_cfg.num_extracts]; 3529 3530 if (!(flags & dist_fields[i].id)) { 3531 continue; 3532 } 3533 3534 if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { 3535 device_printf(dev, "%s: failed to add key extraction " 3536 "rule\n", __func__); 3537 return (E2BIG); 3538 } 3539 3540 key->type = DPKG_EXTRACT_FROM_HDR; 3541 key->extract.from_hdr.prot = dist_fields[i].cls_prot; 3542 key->extract.from_hdr.type = DPKG_FULL_FIELD; 3543 key->extract.from_hdr.field = dist_fields[i].cls_field; 3544 cls_cfg.num_extracts++; 3545 } 3546 3547 error = bus_dmamem_alloc(buf->dmat, (void **)&buf->vaddr, 3548 BUS_DMA_ZERO | BUS_DMA_COHERENT, &buf->dmap); 3549 if (error != 0) { 3550 device_printf(dev, "%s: failed to allocate a buffer for Rx " 3551 "traffic distribution key configuration\n", __func__); 3552 return (error); 3553 } 3554 3555 error = dpaa2_ni_prepare_key_cfg(&cls_cfg, (uint8_t *)buf->vaddr); 3556 if (error != 0) { 3557 device_printf(dev, "%s: failed to prepare key configuration: " 3558 "error=%d\n", __func__, error); 3559 return (error); 3560 } 3561 3562 /* Prepare for setting the Rx dist. */ 3563 error = bus_dmamap_load(buf->dmat, buf->dmap, buf->vaddr, 3564 DPAA2_CLASSIFIER_DMA_SIZE, dpaa2_dmamap_oneseg_cb, &buf->paddr, 3565 BUS_DMA_NOWAIT); 3566 if (error != 0) { 3567 device_printf(sc->dev, "%s: failed to map a buffer for Rx " 3568 "traffic distribution key configuration\n", __func__); 3569 return (error); 3570 } 3571 3572 if (type == DPAA2_NI_DIST_MODE_HASH) { 3573 DPAA2_CMD_INIT(&cmd); 3574 3575 error = DPAA2_CMD_RC_OPEN(dev, child, &cmd, rcinfo->id, 3576 &rc_token); 3577 if (error) { 3578 device_printf(dev, "%s: failed to open resource " 3579 "container: id=%d, error=%d\n", __func__, rcinfo->id, 3580 error); 3581 goto err_exit; 3582 } 3583 error = DPAA2_CMD_NI_OPEN(dev, child, &cmd, dinfo->id, 3584 &ni_token); 3585 if (error) { 3586 device_printf(dev, "%s: failed to open network " 3587 "interface: id=%d, error=%d\n", __func__, dinfo->id, 3588 error); 3589 goto close_rc; 3590 } 3591 3592 error = DPAA2_CMD_NI_SET_RX_TC_DIST(dev, child, &cmd, 3593 sc->attr.num.queues, 0, DPAA2_NI_DIST_MODE_HASH, buf->paddr); 3594 if (error != 0) { 3595 device_printf(dev, "%s: failed to set distribution mode " 3596 "and size for the traffic class\n", __func__); 3597 } 3598 3599 (void)DPAA2_CMD_NI_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, 3600 ni_token)); 3601 close_rc: 3602 (void)DPAA2_CMD_RC_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, 3603 rc_token)); 3604 } 3605 3606 err_exit: 3607 return (error); 3608 } 3609 3610 /** 3611 * @brief Prepares extract parameters. 3612 * 3613 * cfg: Defining a full Key Generation profile. 3614 * key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA. 3615 */ 3616 static int 3617 dpaa2_ni_prepare_key_cfg(struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf) 3618 { 3619 struct dpni_ext_set_rx_tc_dist *dpni_ext; 3620 struct dpni_dist_extract *extr; 3621 int i, j; 3622 3623 if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS) 3624 return (EINVAL); 3625 3626 dpni_ext = (struct dpni_ext_set_rx_tc_dist *) key_cfg_buf; 3627 dpni_ext->num_extracts = cfg->num_extracts; 3628 3629 for (i = 0; i < cfg->num_extracts; i++) { 3630 extr = &dpni_ext->extracts[i]; 3631 3632 switch (cfg->extracts[i].type) { 3633 case DPKG_EXTRACT_FROM_HDR: 3634 extr->prot = cfg->extracts[i].extract.from_hdr.prot; 3635 extr->efh_type = 3636 cfg->extracts[i].extract.from_hdr.type & 0x0Fu; 3637 extr->size = cfg->extracts[i].extract.from_hdr.size; 3638 extr->offset = cfg->extracts[i].extract.from_hdr.offset; 3639 extr->field = cfg->extracts[i].extract.from_hdr.field; 3640 extr->hdr_index = 3641 cfg->extracts[i].extract.from_hdr.hdr_index; 3642 break; 3643 case DPKG_EXTRACT_FROM_DATA: 3644 extr->size = cfg->extracts[i].extract.from_data.size; 3645 extr->offset = 3646 cfg->extracts[i].extract.from_data.offset; 3647 break; 3648 case DPKG_EXTRACT_FROM_PARSE: 3649 extr->size = cfg->extracts[i].extract.from_parse.size; 3650 extr->offset = 3651 cfg->extracts[i].extract.from_parse.offset; 3652 break; 3653 default: 3654 return (EINVAL); 3655 } 3656 3657 extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks; 3658 extr->extract_type = cfg->extracts[i].type & 0x0Fu; 3659 3660 for (j = 0; j < DPKG_NUM_OF_MASKS; j++) { 3661 extr->masks[j].mask = cfg->extracts[i].masks[j].mask; 3662 extr->masks[j].offset = 3663 cfg->extracts[i].masks[j].offset; 3664 } 3665 } 3666 3667 return (0); 3668 } 3669 3670 static device_method_t dpaa2_ni_methods[] = { 3671 /* Device interface */ 3672 DEVMETHOD(device_probe, dpaa2_ni_probe), 3673 DEVMETHOD(device_attach, dpaa2_ni_attach), 3674 DEVMETHOD(device_detach, dpaa2_ni_detach), 3675 3676 /* mii via memac_mdio */ 3677 DEVMETHOD(miibus_statchg, dpaa2_ni_miibus_statchg), 3678 3679 DEVMETHOD_END 3680 }; 3681 3682 static driver_t dpaa2_ni_driver = { 3683 "dpaa2_ni", 3684 dpaa2_ni_methods, 3685 sizeof(struct dpaa2_ni_softc), 3686 }; 3687 3688 DRIVER_MODULE(miibus, dpaa2_ni, miibus_driver, 0, 0); 3689 DRIVER_MODULE(dpaa2_ni, dpaa2_rc, dpaa2_ni_driver, 0, 0); 3690 3691 MODULE_DEPEND(dpaa2_ni, miibus, 1, 1, 1); 3692 #ifdef DEV_ACPI 3693 MODULE_DEPEND(dpaa2_ni, memac_mdio_acpi, 1, 1, 1); 3694 #endif 3695 #ifdef FDT 3696 MODULE_DEPEND(dpaa2_ni, memac_mdio_fdt, 1, 1, 1); 3697 #endif 3698