1 /* 2 * Copyright (c) 2017 Stormshield. 3 * Copyright (c) 2017 Semihalf. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include "opt_platform.h" 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/endian.h> 35 #include <sys/mbuf.h> 36 #include <sys/lock.h> 37 #include <sys/mutex.h> 38 #include <sys/kernel.h> 39 #include <sys/module.h> 40 #include <sys/socket.h> 41 #include <sys/sysctl.h> 42 #include <sys/smp.h> 43 #include <sys/taskqueue.h> 44 #ifdef MVNETA_KTR 45 #include <sys/ktr.h> 46 #endif 47 48 #include <net/ethernet.h> 49 #include <net/bpf.h> 50 #include <net/if.h> 51 #include <net/if_arp.h> 52 #include <net/if_dl.h> 53 #include <net/if_media.h> 54 #include <net/if_types.h> 55 #include <net/if_vlan_var.h> 56 57 #include <netinet/in_systm.h> 58 #include <netinet/in.h> 59 #include <netinet/ip.h> 60 #include <netinet/tcp_lro.h> 61 62 #include <sys/sockio.h> 63 #include <sys/bus.h> 64 #include <machine/bus.h> 65 #include <sys/rman.h> 66 #include <machine/resource.h> 67 68 #include <dev/mii/mii.h> 69 #include <dev/mii/miivar.h> 70 71 #include <dev/ofw/openfirm.h> 72 #include <dev/ofw/ofw_bus.h> 73 #include <dev/ofw/ofw_bus_subr.h> 74 75 #include <dev/mdio/mdio.h> 76 77 #include <arm/mv/mvvar.h> 78 79 #if !defined(__aarch64__) 80 #include <arm/mv/mvreg.h> 81 #include <arm/mv/mvwin.h> 82 #endif 83 84 #include "if_mvnetareg.h" 85 #include "if_mvnetavar.h" 86 87 #include "miibus_if.h" 88 #include "mdio_if.h" 89 90 #ifdef MVNETA_DEBUG 91 #define STATIC /* nothing */ 92 #else 93 #define STATIC static 94 #endif 95 96 #define DASSERT(x) KASSERT((x), (#x)) 97 98 #define A3700_TCLK_250MHZ 250000000 99 100 /* Device Register Initialization */ 101 STATIC int mvneta_initreg(struct ifnet *); 102 103 /* Descriptor Ring Control for each of queues */ 104 STATIC int mvneta_ring_alloc_rx_queue(struct mvneta_softc *, int); 105 STATIC int mvneta_ring_alloc_tx_queue(struct mvneta_softc *, int); 106 STATIC void mvneta_ring_dealloc_rx_queue(struct mvneta_softc *, int); 107 STATIC void mvneta_ring_dealloc_tx_queue(struct mvneta_softc *, int); 108 STATIC int mvneta_ring_init_rx_queue(struct mvneta_softc *, int); 109 STATIC int mvneta_ring_init_tx_queue(struct mvneta_softc *, int); 110 STATIC void mvneta_ring_flush_rx_queue(struct mvneta_softc *, int); 111 STATIC void mvneta_ring_flush_tx_queue(struct mvneta_softc *, int); 112 STATIC void mvneta_dmamap_cb(void *, bus_dma_segment_t *, int, int); 113 STATIC int mvneta_dma_create(struct mvneta_softc *); 114 115 /* Rx/Tx Queue Control */ 116 STATIC int mvneta_rx_queue_init(struct ifnet *, int); 117 STATIC int mvneta_tx_queue_init(struct ifnet *, int); 118 STATIC int mvneta_rx_queue_enable(struct ifnet *, int); 119 STATIC int mvneta_tx_queue_enable(struct ifnet *, int); 120 STATIC void mvneta_rx_lockq(struct mvneta_softc *, int); 121 STATIC void mvneta_rx_unlockq(struct mvneta_softc *, int); 122 STATIC void mvneta_tx_lockq(struct mvneta_softc *, int); 123 STATIC void mvneta_tx_unlockq(struct mvneta_softc *, int); 124 125 /* Interrupt Handlers */ 126 STATIC void mvneta_disable_intr(struct mvneta_softc *); 127 STATIC void mvneta_enable_intr(struct mvneta_softc *); 128 STATIC void mvneta_rxtxth_intr(void *); 129 STATIC int mvneta_misc_intr(struct mvneta_softc *); 130 STATIC void mvneta_tick(void *); 131 /* struct ifnet and mii callbacks*/ 132 STATIC int mvneta_xmitfast_locked(struct mvneta_softc *, int, struct mbuf **); 133 STATIC int mvneta_xmit_locked(struct mvneta_softc *, int); 134 #ifdef MVNETA_MULTIQUEUE 135 STATIC int mvneta_transmit(struct ifnet *, struct mbuf *); 136 #else /* !MVNETA_MULTIQUEUE */ 137 STATIC void mvneta_start(struct ifnet *); 138 #endif 139 STATIC void mvneta_qflush(struct ifnet *); 140 STATIC void mvneta_tx_task(void *, int); 141 STATIC int mvneta_ioctl(struct ifnet *, u_long, caddr_t); 142 STATIC void mvneta_init(void *); 143 STATIC void mvneta_init_locked(void *); 144 STATIC void mvneta_stop(struct mvneta_softc *); 145 STATIC void mvneta_stop_locked(struct mvneta_softc *); 146 STATIC int mvneta_mediachange(struct ifnet *); 147 STATIC void mvneta_mediastatus(struct ifnet *, struct ifmediareq *); 148 STATIC void mvneta_portup(struct mvneta_softc *); 149 STATIC void mvneta_portdown(struct mvneta_softc *); 150 151 /* Link State Notify */ 152 STATIC void mvneta_update_autoneg(struct mvneta_softc *, int); 153 STATIC int mvneta_update_media(struct mvneta_softc *, int); 154 STATIC void mvneta_adjust_link(struct mvneta_softc *); 155 STATIC void mvneta_update_eee(struct mvneta_softc *); 156 STATIC void mvneta_update_fc(struct mvneta_softc *); 157 STATIC void mvneta_link_isr(struct mvneta_softc *); 158 STATIC void mvneta_linkupdate(struct mvneta_softc *, boolean_t); 159 STATIC void mvneta_linkup(struct mvneta_softc *); 160 STATIC void mvneta_linkdown(struct mvneta_softc *); 161 STATIC void mvneta_linkreset(struct mvneta_softc *); 162 163 /* Tx Subroutines */ 164 STATIC int mvneta_tx_queue(struct mvneta_softc *, struct mbuf **, int); 165 STATIC void mvneta_tx_set_csumflag(struct ifnet *, 166 struct mvneta_tx_desc *, struct mbuf *); 167 STATIC void mvneta_tx_queue_complete(struct mvneta_softc *, int); 168 STATIC void mvneta_tx_drain(struct mvneta_softc *); 169 170 /* Rx Subroutines */ 171 STATIC int mvneta_rx(struct mvneta_softc *, int, int); 172 STATIC void mvneta_rx_queue(struct mvneta_softc *, int, int); 173 STATIC void mvneta_rx_queue_refill(struct mvneta_softc *, int); 174 STATIC void mvneta_rx_set_csumflag(struct ifnet *, 175 struct mvneta_rx_desc *, struct mbuf *); 176 STATIC void mvneta_rx_buf_free(struct mvneta_softc *, struct mvneta_buf *); 177 178 /* MAC address filter */ 179 STATIC void mvneta_filter_setup(struct mvneta_softc *); 180 181 /* sysctl(9) */ 182 STATIC int sysctl_read_mib(SYSCTL_HANDLER_ARGS); 183 STATIC int sysctl_clear_mib(SYSCTL_HANDLER_ARGS); 184 STATIC int sysctl_set_queue_rxthtime(SYSCTL_HANDLER_ARGS); 185 STATIC void sysctl_mvneta_init(struct mvneta_softc *); 186 187 /* MIB */ 188 STATIC void mvneta_clear_mib(struct mvneta_softc *); 189 STATIC void mvneta_update_mib(struct mvneta_softc *); 190 191 /* Switch */ 192 STATIC boolean_t mvneta_has_switch(device_t); 193 194 #define mvneta_sc_lock(sc) mtx_lock(&sc->mtx) 195 #define mvneta_sc_unlock(sc) mtx_unlock(&sc->mtx) 196 197 STATIC struct mtx mii_mutex; 198 STATIC int mii_init = 0; 199 200 /* Device */ 201 STATIC int mvneta_detach(device_t); 202 /* MII */ 203 STATIC int mvneta_miibus_readreg(device_t, int, int); 204 STATIC int mvneta_miibus_writereg(device_t, int, int, int); 205 206 /* Clock */ 207 STATIC uint32_t mvneta_get_clk(void); 208 209 static device_method_t mvneta_methods[] = { 210 /* Device interface */ 211 DEVMETHOD(device_detach, mvneta_detach), 212 /* MII interface */ 213 DEVMETHOD(miibus_readreg, mvneta_miibus_readreg), 214 DEVMETHOD(miibus_writereg, mvneta_miibus_writereg), 215 /* MDIO interface */ 216 DEVMETHOD(mdio_readreg, mvneta_miibus_readreg), 217 DEVMETHOD(mdio_writereg, mvneta_miibus_writereg), 218 219 /* End */ 220 DEVMETHOD_END 221 }; 222 223 DEFINE_CLASS_0(mvneta, mvneta_driver, mvneta_methods, sizeof(struct mvneta_softc)); 224 225 DRIVER_MODULE(miibus, mvneta, miibus_driver, miibus_devclass, 0, 0); 226 DRIVER_MODULE(mdio, mvneta, mdio_driver, mdio_devclass, 0, 0); 227 MODULE_DEPEND(mvneta, mdio, 1, 1, 1); 228 MODULE_DEPEND(mvneta, ether, 1, 1, 1); 229 MODULE_DEPEND(mvneta, miibus, 1, 1, 1); 230 MODULE_DEPEND(mvneta, mvxpbm, 1, 1, 1); 231 232 /* 233 * List of MIB register and names 234 */ 235 enum mvneta_mib_idx 236 { 237 MVNETA_MIB_RX_GOOD_OCT_IDX, 238 MVNETA_MIB_RX_BAD_OCT_IDX, 239 MVNETA_MIB_TX_MAC_TRNS_ERR_IDX, 240 MVNETA_MIB_RX_GOOD_FRAME_IDX, 241 MVNETA_MIB_RX_BAD_FRAME_IDX, 242 MVNETA_MIB_RX_BCAST_FRAME_IDX, 243 MVNETA_MIB_RX_MCAST_FRAME_IDX, 244 MVNETA_MIB_RX_FRAME64_OCT_IDX, 245 MVNETA_MIB_RX_FRAME127_OCT_IDX, 246 MVNETA_MIB_RX_FRAME255_OCT_IDX, 247 MVNETA_MIB_RX_FRAME511_OCT_IDX, 248 MVNETA_MIB_RX_FRAME1023_OCT_IDX, 249 MVNETA_MIB_RX_FRAMEMAX_OCT_IDX, 250 MVNETA_MIB_TX_GOOD_OCT_IDX, 251 MVNETA_MIB_TX_GOOD_FRAME_IDX, 252 MVNETA_MIB_TX_EXCES_COL_IDX, 253 MVNETA_MIB_TX_MCAST_FRAME_IDX, 254 MVNETA_MIB_TX_BCAST_FRAME_IDX, 255 MVNETA_MIB_TX_MAC_CTL_ERR_IDX, 256 MVNETA_MIB_FC_SENT_IDX, 257 MVNETA_MIB_FC_GOOD_IDX, 258 MVNETA_MIB_FC_BAD_IDX, 259 MVNETA_MIB_PKT_UNDERSIZE_IDX, 260 MVNETA_MIB_PKT_FRAGMENT_IDX, 261 MVNETA_MIB_PKT_OVERSIZE_IDX, 262 MVNETA_MIB_PKT_JABBER_IDX, 263 MVNETA_MIB_MAC_RX_ERR_IDX, 264 MVNETA_MIB_MAC_CRC_ERR_IDX, 265 MVNETA_MIB_MAC_COL_IDX, 266 MVNETA_MIB_MAC_LATE_COL_IDX, 267 }; 268 269 STATIC struct mvneta_mib_def { 270 uint32_t regnum; 271 int reg64; 272 const char *sysctl_name; 273 const char *desc; 274 } mvneta_mib_list[] = { 275 [MVNETA_MIB_RX_GOOD_OCT_IDX] = {MVNETA_MIB_RX_GOOD_OCT, 1, 276 "rx_good_oct", "Good Octets Rx"}, 277 [MVNETA_MIB_RX_BAD_OCT_IDX] = {MVNETA_MIB_RX_BAD_OCT, 0, 278 "rx_bad_oct", "Bad Octets Rx"}, 279 [MVNETA_MIB_TX_MAC_TRNS_ERR_IDX] = {MVNETA_MIB_TX_MAC_TRNS_ERR, 0, 280 "tx_mac_err", "MAC Transmit Error"}, 281 [MVNETA_MIB_RX_GOOD_FRAME_IDX] = {MVNETA_MIB_RX_GOOD_FRAME, 0, 282 "rx_good_frame", "Good Frames Rx"}, 283 [MVNETA_MIB_RX_BAD_FRAME_IDX] = {MVNETA_MIB_RX_BAD_FRAME, 0, 284 "rx_bad_frame", "Bad Frames Rx"}, 285 [MVNETA_MIB_RX_BCAST_FRAME_IDX] = {MVNETA_MIB_RX_BCAST_FRAME, 0, 286 "rx_bcast_frame", "Broadcast Frames Rx"}, 287 [MVNETA_MIB_RX_MCAST_FRAME_IDX] = {MVNETA_MIB_RX_MCAST_FRAME, 0, 288 "rx_mcast_frame", "Multicast Frames Rx"}, 289 [MVNETA_MIB_RX_FRAME64_OCT_IDX] = {MVNETA_MIB_RX_FRAME64_OCT, 0, 290 "rx_frame_1_64", "Frame Size 1 - 64"}, 291 [MVNETA_MIB_RX_FRAME127_OCT_IDX] = {MVNETA_MIB_RX_FRAME127_OCT, 0, 292 "rx_frame_65_127", "Frame Size 65 - 127"}, 293 [MVNETA_MIB_RX_FRAME255_OCT_IDX] = {MVNETA_MIB_RX_FRAME255_OCT, 0, 294 "rx_frame_128_255", "Frame Size 128 - 255"}, 295 [MVNETA_MIB_RX_FRAME511_OCT_IDX] = {MVNETA_MIB_RX_FRAME511_OCT, 0, 296 "rx_frame_256_511", "Frame Size 256 - 511"}, 297 [MVNETA_MIB_RX_FRAME1023_OCT_IDX] = {MVNETA_MIB_RX_FRAME1023_OCT, 0, 298 "rx_frame_512_1023", "Frame Size 512 - 1023"}, 299 [MVNETA_MIB_RX_FRAMEMAX_OCT_IDX] = {MVNETA_MIB_RX_FRAMEMAX_OCT, 0, 300 "rx_fame_1024_max", "Frame Size 1024 - Max"}, 301 [MVNETA_MIB_TX_GOOD_OCT_IDX] = {MVNETA_MIB_TX_GOOD_OCT, 1, 302 "tx_good_oct", "Good Octets Tx"}, 303 [MVNETA_MIB_TX_GOOD_FRAME_IDX] = {MVNETA_MIB_TX_GOOD_FRAME, 0, 304 "tx_good_frame", "Good Frames Tx"}, 305 [MVNETA_MIB_TX_EXCES_COL_IDX] = {MVNETA_MIB_TX_EXCES_COL, 0, 306 "tx_exces_collision", "Excessive Collision"}, 307 [MVNETA_MIB_TX_MCAST_FRAME_IDX] = {MVNETA_MIB_TX_MCAST_FRAME, 0, 308 "tx_mcast_frame", "Multicast Frames Tx"}, 309 [MVNETA_MIB_TX_BCAST_FRAME_IDX] = {MVNETA_MIB_TX_BCAST_FRAME, 0, 310 "tx_bcast_frame", "Broadcast Frames Tx"}, 311 [MVNETA_MIB_TX_MAC_CTL_ERR_IDX] = {MVNETA_MIB_TX_MAC_CTL_ERR, 0, 312 "tx_mac_ctl_err", "Unknown MAC Control"}, 313 [MVNETA_MIB_FC_SENT_IDX] = {MVNETA_MIB_FC_SENT, 0, 314 "fc_tx", "Flow Control Tx"}, 315 [MVNETA_MIB_FC_GOOD_IDX] = {MVNETA_MIB_FC_GOOD, 0, 316 "fc_rx_good", "Good Flow Control Rx"}, 317 [MVNETA_MIB_FC_BAD_IDX] = {MVNETA_MIB_FC_BAD, 0, 318 "fc_rx_bad", "Bad Flow Control Rx"}, 319 [MVNETA_MIB_PKT_UNDERSIZE_IDX] = {MVNETA_MIB_PKT_UNDERSIZE, 0, 320 "pkt_undersize", "Undersized Packets Rx"}, 321 [MVNETA_MIB_PKT_FRAGMENT_IDX] = {MVNETA_MIB_PKT_FRAGMENT, 0, 322 "pkt_fragment", "Fragmented Packets Rx"}, 323 [MVNETA_MIB_PKT_OVERSIZE_IDX] = {MVNETA_MIB_PKT_OVERSIZE, 0, 324 "pkt_oversize", "Oversized Packets Rx"}, 325 [MVNETA_MIB_PKT_JABBER_IDX] = {MVNETA_MIB_PKT_JABBER, 0, 326 "pkt_jabber", "Jabber Packets Rx"}, 327 [MVNETA_MIB_MAC_RX_ERR_IDX] = {MVNETA_MIB_MAC_RX_ERR, 0, 328 "mac_rx_err", "MAC Rx Errors"}, 329 [MVNETA_MIB_MAC_CRC_ERR_IDX] = {MVNETA_MIB_MAC_CRC_ERR, 0, 330 "mac_crc_err", "MAC CRC Errors"}, 331 [MVNETA_MIB_MAC_COL_IDX] = {MVNETA_MIB_MAC_COL, 0, 332 "mac_collision", "MAC Collision"}, 333 [MVNETA_MIB_MAC_LATE_COL_IDX] = {MVNETA_MIB_MAC_LATE_COL, 0, 334 "mac_late_collision", "MAC Late Collision"}, 335 }; 336 337 static struct resource_spec res_spec[] = { 338 { SYS_RES_MEMORY, 0, RF_ACTIVE }, 339 { SYS_RES_IRQ, 0, RF_ACTIVE }, 340 { -1, 0} 341 }; 342 343 static struct { 344 driver_intr_t *handler; 345 char * description; 346 } mvneta_intrs[] = { 347 { mvneta_rxtxth_intr, "MVNETA aggregated interrupt" }, 348 }; 349 350 STATIC uint32_t 351 mvneta_get_clk() 352 { 353 #if defined(__aarch64__) 354 return (A3700_TCLK_250MHZ); 355 #else 356 return (get_tclk()); 357 #endif 358 } 359 360 static int 361 mvneta_set_mac_address(struct mvneta_softc *sc, uint8_t *addr) 362 { 363 unsigned int mac_h; 364 unsigned int mac_l; 365 366 mac_l = (addr[4] << 8) | (addr[5]); 367 mac_h = (addr[0] << 24) | (addr[1] << 16) | 368 (addr[2] << 8) | (addr[3] << 0); 369 370 MVNETA_WRITE(sc, MVNETA_MACAL, mac_l); 371 MVNETA_WRITE(sc, MVNETA_MACAH, mac_h); 372 return (0); 373 } 374 375 static int 376 mvneta_get_mac_address(struct mvneta_softc *sc, uint8_t *addr) 377 { 378 uint32_t mac_l, mac_h; 379 380 #ifdef FDT 381 if (mvneta_fdt_mac_address(sc, addr) == 0) 382 return (0); 383 #endif 384 /* 385 * Fall back -- use the currently programmed address. 386 */ 387 mac_l = MVNETA_READ(sc, MVNETA_MACAL); 388 mac_h = MVNETA_READ(sc, MVNETA_MACAH); 389 if (mac_l == 0 && mac_h == 0) { 390 /* 391 * Generate pseudo-random MAC. 392 * Set lower part to random number | unit number. 393 */ 394 mac_l = arc4random() & ~0xff; 395 mac_l |= device_get_unit(sc->dev) & 0xff; 396 mac_h = arc4random(); 397 mac_h &= ~(3 << 24); /* Clear multicast and LAA bits */ 398 if (bootverbose) { 399 device_printf(sc->dev, 400 "Could not acquire MAC address. " 401 "Using randomized one.\n"); 402 } 403 } 404 405 addr[0] = (mac_h & 0xff000000) >> 24; 406 addr[1] = (mac_h & 0x00ff0000) >> 16; 407 addr[2] = (mac_h & 0x0000ff00) >> 8; 408 addr[3] = (mac_h & 0x000000ff); 409 addr[4] = (mac_l & 0x0000ff00) >> 8; 410 addr[5] = (mac_l & 0x000000ff); 411 return (0); 412 } 413 414 STATIC boolean_t 415 mvneta_has_switch(device_t self) 416 { 417 phandle_t node, switch_node, switch_eth, switch_eth_handle; 418 419 node = ofw_bus_get_node(self); 420 switch_node = 421 ofw_bus_find_compatible(OF_finddevice("/"), "marvell,dsa"); 422 switch_eth = 0; 423 424 OF_getencprop(switch_node, "dsa,ethernet", 425 (void*)&switch_eth_handle, sizeof(switch_eth_handle)); 426 427 if (switch_eth_handle > 0) 428 switch_eth = OF_node_from_xref(switch_eth_handle); 429 430 /* Return true if dsa,ethernet cell points to us */ 431 return (node == switch_eth); 432 } 433 434 STATIC int 435 mvneta_dma_create(struct mvneta_softc *sc) 436 { 437 size_t maxsize, maxsegsz; 438 size_t q; 439 int error; 440 441 /* 442 * Create Tx DMA 443 */ 444 maxsize = maxsegsz = sizeof(struct mvneta_tx_desc) * MVNETA_TX_RING_CNT; 445 446 error = bus_dma_tag_create( 447 bus_get_dma_tag(sc->dev), /* parent */ 448 16, 0, /* alignment, boundary */ 449 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 450 BUS_SPACE_MAXADDR, /* highaddr */ 451 NULL, NULL, /* filtfunc, filtfuncarg */ 452 maxsize, /* maxsize */ 453 1, /* nsegments */ 454 maxsegsz, /* maxsegsz */ 455 0, /* flags */ 456 NULL, NULL, /* lockfunc, lockfuncarg */ 457 &sc->tx_dtag); /* dmat */ 458 if (error != 0) { 459 device_printf(sc->dev, 460 "Failed to create DMA tag for Tx descriptors.\n"); 461 goto fail; 462 } 463 error = bus_dma_tag_create( 464 bus_get_dma_tag(sc->dev), /* parent */ 465 1, 0, /* alignment, boundary */ 466 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 467 BUS_SPACE_MAXADDR, /* highaddr */ 468 NULL, NULL, /* filtfunc, filtfuncarg */ 469 MVNETA_PACKET_SIZE, /* maxsize */ 470 MVNETA_TX_SEGLIMIT, /* nsegments */ 471 MVNETA_PACKET_SIZE, /* maxsegsz */ 472 BUS_DMA_ALLOCNOW, /* flags */ 473 NULL, NULL, /* lockfunc, lockfuncarg */ 474 &sc->txmbuf_dtag); 475 if (error != 0) { 476 device_printf(sc->dev, 477 "Failed to create DMA tag for Tx mbufs.\n"); 478 goto fail; 479 } 480 481 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) { 482 error = mvneta_ring_alloc_tx_queue(sc, q); 483 if (error != 0) { 484 device_printf(sc->dev, 485 "Failed to allocate DMA safe memory for TxQ: %zu\n", q); 486 goto fail; 487 } 488 } 489 490 /* 491 * Create Rx DMA. 492 */ 493 /* Create tag for Rx descripors */ 494 error = bus_dma_tag_create( 495 bus_get_dma_tag(sc->dev), /* parent */ 496 32, 0, /* alignment, boundary */ 497 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 498 BUS_SPACE_MAXADDR, /* highaddr */ 499 NULL, NULL, /* filtfunc, filtfuncarg */ 500 sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, /* maxsize */ 501 1, /* nsegments */ 502 sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, /* maxsegsz */ 503 0, /* flags */ 504 NULL, NULL, /* lockfunc, lockfuncarg */ 505 &sc->rx_dtag); /* dmat */ 506 if (error != 0) { 507 device_printf(sc->dev, 508 "Failed to create DMA tag for Rx descriptors.\n"); 509 goto fail; 510 } 511 512 /* Create tag for Rx buffers */ 513 error = bus_dma_tag_create( 514 bus_get_dma_tag(sc->dev), /* parent */ 515 32, 0, /* alignment, boundary */ 516 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 517 BUS_SPACE_MAXADDR, /* highaddr */ 518 NULL, NULL, /* filtfunc, filtfuncarg */ 519 MVNETA_PACKET_SIZE, 1, /* maxsize, nsegments */ 520 MVNETA_PACKET_SIZE, /* maxsegsz */ 521 0, /* flags */ 522 NULL, NULL, /* lockfunc, lockfuncarg */ 523 &sc->rxbuf_dtag); /* dmat */ 524 if (error != 0) { 525 device_printf(sc->dev, 526 "Failed to create DMA tag for Rx buffers.\n"); 527 goto fail; 528 } 529 530 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { 531 if (mvneta_ring_alloc_rx_queue(sc, q) != 0) { 532 device_printf(sc->dev, 533 "Failed to allocate DMA safe memory for RxQ: %zu\n", q); 534 goto fail; 535 } 536 } 537 538 return (0); 539 fail: 540 mvneta_detach(sc->dev); 541 542 return (error); 543 } 544 545 /* ARGSUSED */ 546 int 547 mvneta_attach(device_t self) 548 { 549 struct mvneta_softc *sc; 550 struct ifnet *ifp; 551 device_t child; 552 int ifm_target; 553 int q, error; 554 #if !defined(__aarch64__) 555 uint32_t reg; 556 #endif 557 558 sc = device_get_softc(self); 559 sc->dev = self; 560 561 mtx_init(&sc->mtx, "mvneta_sc", NULL, MTX_DEF); 562 563 error = bus_alloc_resources(self, res_spec, sc->res); 564 if (error) { 565 device_printf(self, "could not allocate resources\n"); 566 return (ENXIO); 567 } 568 569 sc->version = MVNETA_READ(sc, MVNETA_PV); 570 device_printf(self, "version is %x\n", sc->version); 571 callout_init(&sc->tick_ch, 0); 572 573 /* 574 * make sure DMA engines are in reset state 575 */ 576 MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000001); 577 MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000001); 578 579 #if !defined(__aarch64__) 580 /* 581 * Disable port snoop for buffers and descriptors 582 * to avoid L2 caching of both without DRAM copy. 583 * Obtain coherency settings from the first MBUS 584 * window attribute. 585 */ 586 if ((MVNETA_READ(sc, MV_WIN_NETA_BASE(0)) & IO_WIN_COH_ATTR_MASK) == 0) { 587 reg = MVNETA_READ(sc, MVNETA_PSNPCFG); 588 reg &= ~MVNETA_PSNPCFG_DESCSNP_MASK; 589 reg &= ~MVNETA_PSNPCFG_BUFSNP_MASK; 590 MVNETA_WRITE(sc, MVNETA_PSNPCFG, reg); 591 } 592 #endif 593 594 /* 595 * MAC address 596 */ 597 if (mvneta_get_mac_address(sc, sc->enaddr)) { 598 device_printf(self, "no mac address.\n"); 599 return (ENXIO); 600 } 601 mvneta_set_mac_address(sc, sc->enaddr); 602 603 mvneta_disable_intr(sc); 604 605 /* Allocate network interface */ 606 ifp = sc->ifp = if_alloc(IFT_ETHER); 607 if (ifp == NULL) { 608 device_printf(self, "if_alloc() failed\n"); 609 mvneta_detach(self); 610 return (ENOMEM); 611 } 612 if_initname(ifp, device_get_name(self), device_get_unit(self)); 613 614 /* 615 * We can support 802.1Q VLAN-sized frames and jumbo 616 * Ethernet frames. 617 */ 618 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU; 619 620 ifp->if_softc = sc; 621 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 622 #ifdef MVNETA_MULTIQUEUE 623 ifp->if_transmit = mvneta_transmit; 624 ifp->if_qflush = mvneta_qflush; 625 #else /* !MVNETA_MULTIQUEUE */ 626 ifp->if_start = mvneta_start; 627 ifp->if_snd.ifq_drv_maxlen = MVNETA_TX_RING_CNT - 1; 628 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 629 IFQ_SET_READY(&ifp->if_snd); 630 #endif 631 ifp->if_init = mvneta_init; 632 ifp->if_ioctl = mvneta_ioctl; 633 634 /* 635 * We can do IPv4/TCPv4/UDPv4/TCPv6/UDPv6 checksums in hardware. 636 */ 637 ifp->if_capabilities |= IFCAP_HWCSUM; 638 639 /* 640 * As VLAN hardware tagging is not supported 641 * but is necessary to perform VLAN hardware checksums, 642 * it is done in the driver 643 */ 644 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; 645 646 /* 647 * Currently IPv6 HW checksum is broken, so make sure it is disabled. 648 */ 649 ifp->if_capabilities &= ~IFCAP_HWCSUM_IPV6; 650 ifp->if_capenable = ifp->if_capabilities; 651 652 /* 653 * Disabled option(s): 654 * - Support for Large Receive Offload 655 */ 656 ifp->if_capabilities |= IFCAP_LRO; 657 658 ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP; 659 660 /* 661 * Device DMA Buffer allocation. 662 * Handles resource deallocation in case of failure. 663 */ 664 error = mvneta_dma_create(sc); 665 if (error != 0) { 666 mvneta_detach(self); 667 return (error); 668 } 669 670 /* Initialize queues */ 671 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) { 672 error = mvneta_ring_init_tx_queue(sc, q); 673 if (error != 0) { 674 mvneta_detach(self); 675 return (error); 676 } 677 } 678 679 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { 680 error = mvneta_ring_init_rx_queue(sc, q); 681 if (error != 0) { 682 mvneta_detach(self); 683 return (error); 684 } 685 } 686 687 ether_ifattach(ifp, sc->enaddr); 688 689 /* 690 * Enable DMA engines and Initialize Device Registers. 691 */ 692 MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000000); 693 MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000000); 694 MVNETA_WRITE(sc, MVNETA_PACC, MVNETA_PACC_ACCELERATIONMODE_EDM); 695 mvneta_sc_lock(sc); 696 mvneta_filter_setup(sc); 697 mvneta_sc_unlock(sc); 698 mvneta_initreg(ifp); 699 700 /* 701 * Now MAC is working, setup MII. 702 */ 703 if (mii_init == 0) { 704 /* 705 * MII bus is shared by all MACs and all PHYs in SoC. 706 * serializing the bus access should be safe. 707 */ 708 mtx_init(&mii_mutex, "mvneta_mii", NULL, MTX_DEF); 709 mii_init = 1; 710 } 711 712 /* Attach PHY(s) */ 713 if ((sc->phy_addr != MII_PHY_ANY) && (!sc->use_inband_status)) { 714 error = mii_attach(self, &sc->miibus, ifp, mvneta_mediachange, 715 mvneta_mediastatus, BMSR_DEFCAPMASK, sc->phy_addr, 716 MII_OFFSET_ANY, 0); 717 if (error != 0) { 718 if (bootverbose) { 719 device_printf(self, 720 "MII attach failed, error: %d\n", error); 721 } 722 ether_ifdetach(sc->ifp); 723 mvneta_detach(self); 724 return (error); 725 } 726 sc->mii = device_get_softc(sc->miibus); 727 sc->phy_attached = 1; 728 729 /* Disable auto-negotiation in MAC - rely on PHY layer */ 730 mvneta_update_autoneg(sc, FALSE); 731 } else if (sc->use_inband_status == TRUE) { 732 /* In-band link status */ 733 ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange, 734 mvneta_mediastatus); 735 736 /* Configure media */ 737 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX, 738 0, NULL); 739 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL); 740 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 741 0, NULL); 742 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T, 0, NULL); 743 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX, 744 0, NULL); 745 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); 746 ifmedia_set(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO); 747 748 /* Enable auto-negotiation */ 749 mvneta_update_autoneg(sc, TRUE); 750 751 mvneta_sc_lock(sc); 752 if (MVNETA_IS_LINKUP(sc)) 753 mvneta_linkup(sc); 754 else 755 mvneta_linkdown(sc); 756 mvneta_sc_unlock(sc); 757 758 } else { 759 /* Fixed-link, use predefined values */ 760 ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange, 761 mvneta_mediastatus); 762 763 ifm_target = IFM_ETHER; 764 switch (sc->phy_speed) { 765 case 2500: 766 if (sc->phy_mode != MVNETA_PHY_SGMII && 767 sc->phy_mode != MVNETA_PHY_QSGMII) { 768 device_printf(self, 769 "2.5G speed can work only in (Q)SGMII mode\n"); 770 ether_ifdetach(sc->ifp); 771 mvneta_detach(self); 772 return (ENXIO); 773 } 774 ifm_target |= IFM_2500_T; 775 break; 776 case 1000: 777 ifm_target |= IFM_1000_T; 778 break; 779 case 100: 780 ifm_target |= IFM_100_TX; 781 break; 782 case 10: 783 ifm_target |= IFM_10_T; 784 break; 785 default: 786 ether_ifdetach(sc->ifp); 787 mvneta_detach(self); 788 return (ENXIO); 789 } 790 791 if (sc->phy_fdx) 792 ifm_target |= IFM_FDX; 793 else 794 ifm_target |= IFM_HDX; 795 796 ifmedia_add(&sc->mvneta_ifmedia, ifm_target, 0, NULL); 797 ifmedia_set(&sc->mvneta_ifmedia, ifm_target); 798 if_link_state_change(sc->ifp, LINK_STATE_UP); 799 800 if (mvneta_has_switch(self)) { 801 child = device_add_child(sc->dev, "mdio", -1); 802 if (child == NULL) { 803 ether_ifdetach(sc->ifp); 804 mvneta_detach(self); 805 return (ENXIO); 806 } 807 bus_generic_attach(sc->dev); 808 bus_generic_attach(child); 809 } 810 811 /* Configure MAC media */ 812 mvneta_update_media(sc, ifm_target); 813 } 814 815 sysctl_mvneta_init(sc); 816 817 callout_reset(&sc->tick_ch, 0, mvneta_tick, sc); 818 819 error = bus_setup_intr(self, sc->res[1], 820 INTR_TYPE_NET | INTR_MPSAFE, NULL, mvneta_intrs[0].handler, sc, 821 &sc->ih_cookie[0]); 822 if (error) { 823 device_printf(self, "could not setup %s\n", 824 mvneta_intrs[0].description); 825 ether_ifdetach(sc->ifp); 826 mvneta_detach(self); 827 return (error); 828 } 829 830 return (0); 831 } 832 833 STATIC int 834 mvneta_detach(device_t dev) 835 { 836 struct mvneta_softc *sc; 837 int q; 838 839 sc = device_get_softc(dev); 840 841 mvneta_stop(sc); 842 /* Detach network interface */ 843 if (sc->ifp) 844 if_free(sc->ifp); 845 846 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) 847 mvneta_ring_dealloc_rx_queue(sc, q); 848 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) 849 mvneta_ring_dealloc_tx_queue(sc, q); 850 851 if (sc->tx_dtag != NULL) 852 bus_dma_tag_destroy(sc->tx_dtag); 853 if (sc->rx_dtag != NULL) 854 bus_dma_tag_destroy(sc->rx_dtag); 855 if (sc->txmbuf_dtag != NULL) 856 bus_dma_tag_destroy(sc->txmbuf_dtag); 857 858 bus_release_resources(dev, res_spec, sc->res); 859 return (0); 860 } 861 862 /* 863 * MII 864 */ 865 STATIC int 866 mvneta_miibus_readreg(device_t dev, int phy, int reg) 867 { 868 struct mvneta_softc *sc; 869 struct ifnet *ifp; 870 uint32_t smi, val; 871 int i; 872 873 sc = device_get_softc(dev); 874 ifp = sc->ifp; 875 876 mtx_lock(&mii_mutex); 877 878 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) { 879 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0) 880 break; 881 DELAY(1); 882 } 883 if (i == MVNETA_PHY_TIMEOUT) { 884 if_printf(ifp, "SMI busy timeout\n"); 885 mtx_unlock(&mii_mutex); 886 return (-1); 887 } 888 889 smi = MVNETA_SMI_PHYAD(phy) | 890 MVNETA_SMI_REGAD(reg) | MVNETA_SMI_OPCODE_READ; 891 MVNETA_WRITE(sc, MVNETA_SMI, smi); 892 893 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) { 894 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0) 895 break; 896 DELAY(1); 897 } 898 899 if (i == MVNETA_PHY_TIMEOUT) { 900 if_printf(ifp, "SMI busy timeout\n"); 901 mtx_unlock(&mii_mutex); 902 return (-1); 903 } 904 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) { 905 smi = MVNETA_READ(sc, MVNETA_SMI); 906 if (smi & MVNETA_SMI_READVALID) 907 break; 908 DELAY(1); 909 } 910 911 if (i == MVNETA_PHY_TIMEOUT) { 912 if_printf(ifp, "SMI busy timeout\n"); 913 mtx_unlock(&mii_mutex); 914 return (-1); 915 } 916 917 mtx_unlock(&mii_mutex); 918 919 #ifdef MVNETA_KTR 920 CTR3(KTR_SPARE2, "%s i=%d, timeout=%d\n", ifp->if_xname, i, 921 MVNETA_PHY_TIMEOUT); 922 #endif 923 924 val = smi & MVNETA_SMI_DATA_MASK; 925 926 #ifdef MVNETA_KTR 927 CTR4(KTR_SPARE2, "%s phy=%d, reg=%#x, val=%#x\n", ifp->if_xname, phy, 928 reg, val); 929 #endif 930 return (val); 931 } 932 933 STATIC int 934 mvneta_miibus_writereg(device_t dev, int phy, int reg, int val) 935 { 936 struct mvneta_softc *sc; 937 struct ifnet *ifp; 938 uint32_t smi; 939 int i; 940 941 sc = device_get_softc(dev); 942 ifp = sc->ifp; 943 #ifdef MVNETA_KTR 944 CTR4(KTR_SPARE2, "%s phy=%d, reg=%#x, val=%#x\n", ifp->if_xname, 945 phy, reg, val); 946 #endif 947 948 mtx_lock(&mii_mutex); 949 950 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) { 951 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0) 952 break; 953 DELAY(1); 954 } 955 if (i == MVNETA_PHY_TIMEOUT) { 956 if_printf(ifp, "SMI busy timeout\n"); 957 mtx_unlock(&mii_mutex); 958 return (0); 959 } 960 961 smi = MVNETA_SMI_PHYAD(phy) | MVNETA_SMI_REGAD(reg) | 962 MVNETA_SMI_OPCODE_WRITE | (val & MVNETA_SMI_DATA_MASK); 963 MVNETA_WRITE(sc, MVNETA_SMI, smi); 964 965 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) { 966 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0) 967 break; 968 DELAY(1); 969 } 970 971 mtx_unlock(&mii_mutex); 972 973 if (i == MVNETA_PHY_TIMEOUT) 974 if_printf(ifp, "phy write timed out\n"); 975 976 return (0); 977 } 978 979 STATIC void 980 mvneta_portup(struct mvneta_softc *sc) 981 { 982 int q; 983 984 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { 985 mvneta_rx_lockq(sc, q); 986 mvneta_rx_queue_enable(sc->ifp, q); 987 mvneta_rx_unlockq(sc, q); 988 } 989 990 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) { 991 mvneta_tx_lockq(sc, q); 992 mvneta_tx_queue_enable(sc->ifp, q); 993 mvneta_tx_unlockq(sc, q); 994 } 995 996 } 997 998 STATIC void 999 mvneta_portdown(struct mvneta_softc *sc) 1000 { 1001 struct mvneta_rx_ring *rx; 1002 struct mvneta_tx_ring *tx; 1003 int q, cnt; 1004 uint32_t reg; 1005 1006 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { 1007 rx = MVNETA_RX_RING(sc, q); 1008 mvneta_rx_lockq(sc, q); 1009 rx->queue_status = MVNETA_QUEUE_DISABLED; 1010 mvneta_rx_unlockq(sc, q); 1011 } 1012 1013 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) { 1014 tx = MVNETA_TX_RING(sc, q); 1015 mvneta_tx_lockq(sc, q); 1016 tx->queue_status = MVNETA_QUEUE_DISABLED; 1017 mvneta_tx_unlockq(sc, q); 1018 } 1019 1020 /* Wait for all Rx activity to terminate. */ 1021 reg = MVNETA_READ(sc, MVNETA_RQC) & MVNETA_RQC_EN_MASK; 1022 reg = MVNETA_RQC_DIS(reg); 1023 MVNETA_WRITE(sc, MVNETA_RQC, reg); 1024 cnt = 0; 1025 do { 1026 if (cnt >= RX_DISABLE_TIMEOUT) { 1027 if_printf(sc->ifp, 1028 "timeout for RX stopped. rqc 0x%x\n", reg); 1029 break; 1030 } 1031 cnt++; 1032 reg = MVNETA_READ(sc, MVNETA_RQC); 1033 } while ((reg & MVNETA_RQC_EN_MASK) != 0); 1034 1035 /* Wait for all Tx activity to terminate. */ 1036 reg = MVNETA_READ(sc, MVNETA_PIE); 1037 reg &= ~MVNETA_PIE_TXPKTINTRPTENB_MASK; 1038 MVNETA_WRITE(sc, MVNETA_PIE, reg); 1039 1040 reg = MVNETA_READ(sc, MVNETA_PRXTXTIM); 1041 reg &= ~MVNETA_PRXTXTI_TBTCQ_MASK; 1042 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg); 1043 1044 reg = MVNETA_READ(sc, MVNETA_TQC) & MVNETA_TQC_EN_MASK; 1045 reg = MVNETA_TQC_DIS(reg); 1046 MVNETA_WRITE(sc, MVNETA_TQC, reg); 1047 cnt = 0; 1048 do { 1049 if (cnt >= TX_DISABLE_TIMEOUT) { 1050 if_printf(sc->ifp, 1051 "timeout for TX stopped. tqc 0x%x\n", reg); 1052 break; 1053 } 1054 cnt++; 1055 reg = MVNETA_READ(sc, MVNETA_TQC); 1056 } while ((reg & MVNETA_TQC_EN_MASK) != 0); 1057 1058 /* Wait for all Tx FIFO is empty */ 1059 cnt = 0; 1060 do { 1061 if (cnt >= TX_FIFO_EMPTY_TIMEOUT) { 1062 if_printf(sc->ifp, 1063 "timeout for TX FIFO drained. ps0 0x%x\n", reg); 1064 break; 1065 } 1066 cnt++; 1067 reg = MVNETA_READ(sc, MVNETA_PS0); 1068 } while (((reg & MVNETA_PS0_TXFIFOEMP) == 0) && 1069 ((reg & MVNETA_PS0_TXINPROG) != 0)); 1070 } 1071 1072 /* 1073 * Device Register Initialization 1074 * reset device registers to device driver default value. 1075 * the device is not enabled here. 1076 */ 1077 STATIC int 1078 mvneta_initreg(struct ifnet *ifp) 1079 { 1080 struct mvneta_softc *sc; 1081 int q, i; 1082 uint32_t reg; 1083 1084 sc = ifp->if_softc; 1085 #ifdef MVNETA_KTR 1086 CTR1(KTR_SPARE2, "%s initializing device register", ifp->if_xname); 1087 #endif 1088 1089 /* Disable Legacy WRR, Disable EJP, Release from reset. */ 1090 MVNETA_WRITE(sc, MVNETA_TQC_1, 0); 1091 /* Enable mbus retry. */ 1092 MVNETA_WRITE(sc, MVNETA_MBUS_CONF, MVNETA_MBUS_RETRY_EN); 1093 1094 /* Init TX/RX Queue Registers */ 1095 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { 1096 mvneta_rx_lockq(sc, q); 1097 if (mvneta_rx_queue_init(ifp, q) != 0) { 1098 device_printf(sc->dev, 1099 "initialization failed: cannot initialize queue\n"); 1100 mvneta_rx_unlockq(sc, q); 1101 return (ENOBUFS); 1102 } 1103 mvneta_rx_unlockq(sc, q); 1104 } 1105 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) { 1106 mvneta_tx_lockq(sc, q); 1107 if (mvneta_tx_queue_init(ifp, q) != 0) { 1108 device_printf(sc->dev, 1109 "initialization failed: cannot initialize queue\n"); 1110 mvneta_tx_unlockq(sc, q); 1111 return (ENOBUFS); 1112 } 1113 mvneta_tx_unlockq(sc, q); 1114 } 1115 1116 /* 1117 * Ethernet Unit Control - disable automatic PHY management by HW. 1118 * In case the port uses SMI-controlled PHY, poll its status with 1119 * mii_tick() and update MAC settings accordingly. 1120 */ 1121 reg = MVNETA_READ(sc, MVNETA_EUC); 1122 reg &= ~MVNETA_EUC_POLLING; 1123 MVNETA_WRITE(sc, MVNETA_EUC, reg); 1124 1125 /* EEE: Low Power Idle */ 1126 reg = MVNETA_LPIC0_LILIMIT(MVNETA_LPI_LI); 1127 reg |= MVNETA_LPIC0_TSLIMIT(MVNETA_LPI_TS); 1128 MVNETA_WRITE(sc, MVNETA_LPIC0, reg); 1129 1130 reg = MVNETA_LPIC1_TWLIMIT(MVNETA_LPI_TW); 1131 MVNETA_WRITE(sc, MVNETA_LPIC1, reg); 1132 1133 reg = MVNETA_LPIC2_MUSTSET; 1134 MVNETA_WRITE(sc, MVNETA_LPIC2, reg); 1135 1136 /* Port MAC Control set 0 */ 1137 reg = MVNETA_PMACC0_MUSTSET; /* must write 0x1 */ 1138 reg &= ~MVNETA_PMACC0_PORTEN; /* port is still disabled */ 1139 reg |= MVNETA_PMACC0_FRAMESIZELIMIT(MVNETA_MAX_FRAME); 1140 MVNETA_WRITE(sc, MVNETA_PMACC0, reg); 1141 1142 /* Port MAC Control set 2 */ 1143 reg = MVNETA_READ(sc, MVNETA_PMACC2); 1144 switch (sc->phy_mode) { 1145 case MVNETA_PHY_QSGMII: 1146 reg |= (MVNETA_PMACC2_PCSEN | MVNETA_PMACC2_RGMIIEN); 1147 MVNETA_WRITE(sc, MVNETA_PSERDESCFG, MVNETA_PSERDESCFG_QSGMII); 1148 break; 1149 case MVNETA_PHY_SGMII: 1150 reg |= (MVNETA_PMACC2_PCSEN | MVNETA_PMACC2_RGMIIEN); 1151 MVNETA_WRITE(sc, MVNETA_PSERDESCFG, MVNETA_PSERDESCFG_SGMII); 1152 break; 1153 case MVNETA_PHY_RGMII: 1154 case MVNETA_PHY_RGMII_ID: 1155 reg |= MVNETA_PMACC2_RGMIIEN; 1156 break; 1157 } 1158 reg |= MVNETA_PMACC2_MUSTSET; 1159 reg &= ~MVNETA_PMACC2_PORTMACRESET; 1160 MVNETA_WRITE(sc, MVNETA_PMACC2, reg); 1161 1162 /* Port Configuration Extended: enable Tx CRC generation */ 1163 reg = MVNETA_READ(sc, MVNETA_PXCX); 1164 reg &= ~MVNETA_PXCX_TXCRCDIS; 1165 MVNETA_WRITE(sc, MVNETA_PXCX, reg); 1166 1167 /* clear MIB counter registers(clear by read) */ 1168 for (i = 0; i < nitems(mvneta_mib_list); i++) { 1169 if (mvneta_mib_list[i].reg64) 1170 MVNETA_READ_MIB_8(sc, mvneta_mib_list[i].regnum); 1171 else 1172 MVNETA_READ_MIB_4(sc, mvneta_mib_list[i].regnum); 1173 } 1174 MVNETA_READ(sc, MVNETA_PDFC); 1175 MVNETA_READ(sc, MVNETA_POFC); 1176 1177 /* Set SDC register except IPGINT bits */ 1178 reg = MVNETA_SDC_RXBSZ_16_64BITWORDS; 1179 reg |= MVNETA_SDC_TXBSZ_16_64BITWORDS; 1180 reg |= MVNETA_SDC_BLMR; 1181 reg |= MVNETA_SDC_BLMT; 1182 MVNETA_WRITE(sc, MVNETA_SDC, reg); 1183 1184 return (0); 1185 } 1186 1187 STATIC void 1188 mvneta_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error) 1189 { 1190 1191 if (error != 0) 1192 return; 1193 *(bus_addr_t *)arg = segs->ds_addr; 1194 } 1195 1196 STATIC int 1197 mvneta_ring_alloc_rx_queue(struct mvneta_softc *sc, int q) 1198 { 1199 struct mvneta_rx_ring *rx; 1200 struct mvneta_buf *rxbuf; 1201 bus_dmamap_t dmap; 1202 int i, error; 1203 1204 if (q >= MVNETA_RX_QNUM_MAX) 1205 return (EINVAL); 1206 1207 rx = MVNETA_RX_RING(sc, q); 1208 mtx_init(&rx->ring_mtx, "mvneta_rx", NULL, MTX_DEF); 1209 /* Allocate DMA memory for Rx descriptors */ 1210 error = bus_dmamem_alloc(sc->rx_dtag, 1211 (void**)&(rx->desc), 1212 BUS_DMA_NOWAIT | BUS_DMA_ZERO, 1213 &rx->desc_map); 1214 if (error != 0 || rx->desc == NULL) 1215 goto fail; 1216 error = bus_dmamap_load(sc->rx_dtag, rx->desc_map, 1217 rx->desc, 1218 sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, 1219 mvneta_dmamap_cb, &rx->desc_pa, BUS_DMA_NOWAIT); 1220 if (error != 0) 1221 goto fail; 1222 1223 for (i = 0; i < MVNETA_RX_RING_CNT; i++) { 1224 error = bus_dmamap_create(sc->rxbuf_dtag, 0, &dmap); 1225 if (error != 0) { 1226 device_printf(sc->dev, 1227 "Failed to create DMA map for Rx buffer num: %d\n", i); 1228 goto fail; 1229 } 1230 rxbuf = &rx->rxbuf[i]; 1231 rxbuf->dmap = dmap; 1232 rxbuf->m = NULL; 1233 } 1234 1235 return (0); 1236 fail: 1237 mvneta_ring_dealloc_rx_queue(sc, q); 1238 device_printf(sc->dev, "DMA Ring buffer allocation failure.\n"); 1239 return (error); 1240 } 1241 1242 STATIC int 1243 mvneta_ring_alloc_tx_queue(struct mvneta_softc *sc, int q) 1244 { 1245 struct mvneta_tx_ring *tx; 1246 int error; 1247 1248 if (q >= MVNETA_TX_QNUM_MAX) 1249 return (EINVAL); 1250 tx = MVNETA_TX_RING(sc, q); 1251 mtx_init(&tx->ring_mtx, "mvneta_tx", NULL, MTX_DEF); 1252 error = bus_dmamem_alloc(sc->tx_dtag, 1253 (void**)&(tx->desc), 1254 BUS_DMA_NOWAIT | BUS_DMA_ZERO, 1255 &tx->desc_map); 1256 if (error != 0 || tx->desc == NULL) 1257 goto fail; 1258 error = bus_dmamap_load(sc->tx_dtag, tx->desc_map, 1259 tx->desc, 1260 sizeof(struct mvneta_tx_desc) * MVNETA_TX_RING_CNT, 1261 mvneta_dmamap_cb, &tx->desc_pa, BUS_DMA_NOWAIT); 1262 if (error != 0) 1263 goto fail; 1264 1265 #ifdef MVNETA_MULTIQUEUE 1266 tx->br = buf_ring_alloc(MVNETA_BUFRING_SIZE, M_DEVBUF, M_NOWAIT, 1267 &tx->ring_mtx); 1268 if (tx->br == NULL) { 1269 device_printf(sc->dev, 1270 "Could not setup buffer ring for TxQ(%d)\n", q); 1271 error = ENOMEM; 1272 goto fail; 1273 } 1274 #endif 1275 1276 return (0); 1277 fail: 1278 mvneta_ring_dealloc_tx_queue(sc, q); 1279 device_printf(sc->dev, "DMA Ring buffer allocation failure.\n"); 1280 return (error); 1281 } 1282 1283 STATIC void 1284 mvneta_ring_dealloc_tx_queue(struct mvneta_softc *sc, int q) 1285 { 1286 struct mvneta_tx_ring *tx; 1287 struct mvneta_buf *txbuf; 1288 void *kva; 1289 int error; 1290 int i; 1291 1292 if (q >= MVNETA_TX_QNUM_MAX) 1293 return; 1294 tx = MVNETA_TX_RING(sc, q); 1295 1296 if (tx->taskq != NULL) { 1297 /* Remove task */ 1298 while (taskqueue_cancel(tx->taskq, &tx->task, NULL) != 0) 1299 taskqueue_drain(tx->taskq, &tx->task); 1300 } 1301 #ifdef MVNETA_MULTIQUEUE 1302 if (tx->br != NULL) 1303 drbr_free(tx->br, M_DEVBUF); 1304 #endif 1305 1306 if (sc->txmbuf_dtag != NULL) { 1307 if (mtx_name(&tx->ring_mtx) != NULL) { 1308 /* 1309 * It is assumed that maps are being loaded after mutex 1310 * is initialized. Therefore we can skip unloading maps 1311 * when mutex is empty. 1312 */ 1313 mvneta_tx_lockq(sc, q); 1314 mvneta_ring_flush_tx_queue(sc, q); 1315 mvneta_tx_unlockq(sc, q); 1316 } 1317 for (i = 0; i < MVNETA_TX_RING_CNT; i++) { 1318 txbuf = &tx->txbuf[i]; 1319 if (txbuf->dmap != NULL) { 1320 error = bus_dmamap_destroy(sc->txmbuf_dtag, 1321 txbuf->dmap); 1322 if (error != 0) { 1323 panic("%s: map busy for Tx descriptor (Q%d, %d)", 1324 __func__, q, i); 1325 } 1326 } 1327 } 1328 } 1329 1330 if (tx->desc_pa != 0) 1331 bus_dmamap_unload(sc->tx_dtag, tx->desc_map); 1332 1333 kva = (void *)tx->desc; 1334 if (kva != NULL) 1335 bus_dmamem_free(sc->tx_dtag, tx->desc, tx->desc_map); 1336 1337 if (mtx_name(&tx->ring_mtx) != NULL) 1338 mtx_destroy(&tx->ring_mtx); 1339 1340 memset(tx, 0, sizeof(*tx)); 1341 } 1342 1343 STATIC void 1344 mvneta_ring_dealloc_rx_queue(struct mvneta_softc *sc, int q) 1345 { 1346 struct mvneta_rx_ring *rx; 1347 struct lro_ctrl *lro; 1348 void *kva; 1349 1350 if (q >= MVNETA_RX_QNUM_MAX) 1351 return; 1352 1353 rx = MVNETA_RX_RING(sc, q); 1354 1355 mvneta_ring_flush_rx_queue(sc, q); 1356 1357 if (rx->desc_pa != 0) 1358 bus_dmamap_unload(sc->rx_dtag, rx->desc_map); 1359 1360 kva = (void *)rx->desc; 1361 if (kva != NULL) 1362 bus_dmamem_free(sc->rx_dtag, rx->desc, rx->desc_map); 1363 1364 lro = &rx->lro; 1365 tcp_lro_free(lro); 1366 1367 if (mtx_name(&rx->ring_mtx) != NULL) 1368 mtx_destroy(&rx->ring_mtx); 1369 1370 memset(rx, 0, sizeof(*rx)); 1371 } 1372 1373 STATIC int 1374 mvneta_ring_init_rx_queue(struct mvneta_softc *sc, int q) 1375 { 1376 struct mvneta_rx_ring *rx; 1377 struct lro_ctrl *lro; 1378 int error; 1379 1380 if (q >= MVNETA_RX_QNUM_MAX) 1381 return (0); 1382 1383 rx = MVNETA_RX_RING(sc, q); 1384 rx->dma = rx->cpu = 0; 1385 rx->queue_th_received = MVNETA_RXTH_COUNT; 1386 rx->queue_th_time = (mvneta_get_clk() / 1000) / 10; /* 0.1 [ms] */ 1387 1388 /* Initialize LRO */ 1389 rx->lro_enabled = FALSE; 1390 if ((sc->ifp->if_capenable & IFCAP_LRO) != 0) { 1391 lro = &rx->lro; 1392 error = tcp_lro_init(lro); 1393 if (error != 0) 1394 device_printf(sc->dev, "LRO Initialization failed!\n"); 1395 else { 1396 rx->lro_enabled = TRUE; 1397 lro->ifp = sc->ifp; 1398 } 1399 } 1400 1401 return (0); 1402 } 1403 1404 STATIC int 1405 mvneta_ring_init_tx_queue(struct mvneta_softc *sc, int q) 1406 { 1407 struct mvneta_tx_ring *tx; 1408 struct mvneta_buf *txbuf; 1409 int i, error; 1410 1411 if (q >= MVNETA_TX_QNUM_MAX) 1412 return (0); 1413 1414 tx = MVNETA_TX_RING(sc, q); 1415 1416 /* Tx handle */ 1417 for (i = 0; i < MVNETA_TX_RING_CNT; i++) { 1418 txbuf = &tx->txbuf[i]; 1419 txbuf->m = NULL; 1420 /* Tx handle needs DMA map for busdma_load_mbuf() */ 1421 error = bus_dmamap_create(sc->txmbuf_dtag, 0, 1422 &txbuf->dmap); 1423 if (error != 0) { 1424 device_printf(sc->dev, 1425 "can't create dma map (tx ring %d)\n", i); 1426 return (error); 1427 } 1428 } 1429 tx->dma = tx->cpu = 0; 1430 tx->used = 0; 1431 tx->drv_error = 0; 1432 tx->queue_status = MVNETA_QUEUE_DISABLED; 1433 tx->queue_hung = FALSE; 1434 1435 tx->ifp = sc->ifp; 1436 tx->qidx = q; 1437 TASK_INIT(&tx->task, 0, mvneta_tx_task, tx); 1438 tx->taskq = taskqueue_create_fast("mvneta_tx_taskq", M_WAITOK, 1439 taskqueue_thread_enqueue, &tx->taskq); 1440 taskqueue_start_threads(&tx->taskq, 1, PI_NET, "%s: tx_taskq(%d)", 1441 device_get_nameunit(sc->dev), q); 1442 1443 return (0); 1444 } 1445 1446 STATIC void 1447 mvneta_ring_flush_tx_queue(struct mvneta_softc *sc, int q) 1448 { 1449 struct mvneta_tx_ring *tx; 1450 struct mvneta_buf *txbuf; 1451 int i; 1452 1453 tx = MVNETA_TX_RING(sc, q); 1454 KASSERT_TX_MTX(sc, q); 1455 1456 /* Tx handle */ 1457 for (i = 0; i < MVNETA_TX_RING_CNT; i++) { 1458 txbuf = &tx->txbuf[i]; 1459 bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap); 1460 if (txbuf->m != NULL) { 1461 m_freem(txbuf->m); 1462 txbuf->m = NULL; 1463 } 1464 } 1465 tx->dma = tx->cpu = 0; 1466 tx->used = 0; 1467 } 1468 1469 STATIC void 1470 mvneta_ring_flush_rx_queue(struct mvneta_softc *sc, int q) 1471 { 1472 struct mvneta_rx_ring *rx; 1473 struct mvneta_buf *rxbuf; 1474 int i; 1475 1476 rx = MVNETA_RX_RING(sc, q); 1477 KASSERT_RX_MTX(sc, q); 1478 1479 /* Rx handle */ 1480 for (i = 0; i < MVNETA_RX_RING_CNT; i++) { 1481 rxbuf = &rx->rxbuf[i]; 1482 mvneta_rx_buf_free(sc, rxbuf); 1483 } 1484 rx->dma = rx->cpu = 0; 1485 } 1486 1487 /* 1488 * Rx/Tx Queue Control 1489 */ 1490 STATIC int 1491 mvneta_rx_queue_init(struct ifnet *ifp, int q) 1492 { 1493 struct mvneta_softc *sc; 1494 struct mvneta_rx_ring *rx; 1495 uint32_t reg; 1496 1497 sc = ifp->if_softc; 1498 KASSERT_RX_MTX(sc, q); 1499 rx = MVNETA_RX_RING(sc, q); 1500 DASSERT(rx->desc_pa != 0); 1501 1502 /* descriptor address */ 1503 MVNETA_WRITE(sc, MVNETA_PRXDQA(q), rx->desc_pa); 1504 1505 /* Rx buffer size and descriptor ring size */ 1506 reg = MVNETA_PRXDQS_BUFFERSIZE(MVNETA_PACKET_SIZE >> 3); 1507 reg |= MVNETA_PRXDQS_DESCRIPTORSQUEUESIZE(MVNETA_RX_RING_CNT); 1508 MVNETA_WRITE(sc, MVNETA_PRXDQS(q), reg); 1509 #ifdef MVNETA_KTR 1510 CTR3(KTR_SPARE2, "%s PRXDQS(%d): %#x", ifp->if_xname, q, 1511 MVNETA_READ(sc, MVNETA_PRXDQS(q))); 1512 #endif 1513 /* Rx packet offset address */ 1514 reg = MVNETA_PRXC_PACKETOFFSET(MVNETA_PACKET_OFFSET >> 3); 1515 MVNETA_WRITE(sc, MVNETA_PRXC(q), reg); 1516 #ifdef MVNETA_KTR 1517 CTR3(KTR_SPARE2, "%s PRXC(%d): %#x", ifp->if_xname, q, 1518 MVNETA_READ(sc, MVNETA_PRXC(q))); 1519 #endif 1520 1521 /* if DMA is not working, register is not updated */ 1522 DASSERT(MVNETA_READ(sc, MVNETA_PRXDQA(q)) == rx->desc_pa); 1523 return (0); 1524 } 1525 1526 STATIC int 1527 mvneta_tx_queue_init(struct ifnet *ifp, int q) 1528 { 1529 struct mvneta_softc *sc; 1530 struct mvneta_tx_ring *tx; 1531 uint32_t reg; 1532 1533 sc = ifp->if_softc; 1534 KASSERT_TX_MTX(sc, q); 1535 tx = MVNETA_TX_RING(sc, q); 1536 DASSERT(tx->desc_pa != 0); 1537 1538 /* descriptor address */ 1539 MVNETA_WRITE(sc, MVNETA_PTXDQA(q), tx->desc_pa); 1540 1541 /* descriptor ring size */ 1542 reg = MVNETA_PTXDQS_DQS(MVNETA_TX_RING_CNT); 1543 MVNETA_WRITE(sc, MVNETA_PTXDQS(q), reg); 1544 1545 /* if DMA is not working, register is not updated */ 1546 DASSERT(MVNETA_READ(sc, MVNETA_PTXDQA(q)) == tx->desc_pa); 1547 return (0); 1548 } 1549 1550 STATIC int 1551 mvneta_rx_queue_enable(struct ifnet *ifp, int q) 1552 { 1553 struct mvneta_softc *sc; 1554 struct mvneta_rx_ring *rx; 1555 uint32_t reg; 1556 1557 sc = ifp->if_softc; 1558 rx = MVNETA_RX_RING(sc, q); 1559 KASSERT_RX_MTX(sc, q); 1560 1561 /* Set Rx interrupt threshold */ 1562 reg = MVNETA_PRXDQTH_ODT(rx->queue_th_received); 1563 MVNETA_WRITE(sc, MVNETA_PRXDQTH(q), reg); 1564 1565 reg = MVNETA_PRXITTH_RITT(rx->queue_th_time); 1566 MVNETA_WRITE(sc, MVNETA_PRXITTH(q), reg); 1567 1568 /* Unmask RXTX_TH Intr. */ 1569 reg = MVNETA_READ(sc, MVNETA_PRXTXTIM); 1570 reg |= MVNETA_PRXTXTI_RBICTAPQ(q); /* Rx Buffer Interrupt Coalese */ 1571 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg); 1572 1573 /* Enable Rx queue */ 1574 reg = MVNETA_READ(sc, MVNETA_RQC) & MVNETA_RQC_EN_MASK; 1575 reg |= MVNETA_RQC_ENQ(q); 1576 MVNETA_WRITE(sc, MVNETA_RQC, reg); 1577 1578 rx->queue_status = MVNETA_QUEUE_WORKING; 1579 return (0); 1580 } 1581 1582 STATIC int 1583 mvneta_tx_queue_enable(struct ifnet *ifp, int q) 1584 { 1585 struct mvneta_softc *sc; 1586 struct mvneta_tx_ring *tx; 1587 1588 sc = ifp->if_softc; 1589 tx = MVNETA_TX_RING(sc, q); 1590 KASSERT_TX_MTX(sc, q); 1591 1592 /* Enable Tx queue */ 1593 MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_ENQ(q)); 1594 1595 tx->queue_status = MVNETA_QUEUE_IDLE; 1596 tx->queue_hung = FALSE; 1597 return (0); 1598 } 1599 1600 STATIC __inline void 1601 mvneta_rx_lockq(struct mvneta_softc *sc, int q) 1602 { 1603 1604 DASSERT(q >= 0); 1605 DASSERT(q < MVNETA_RX_QNUM_MAX); 1606 mtx_lock(&sc->rx_ring[q].ring_mtx); 1607 } 1608 1609 STATIC __inline void 1610 mvneta_rx_unlockq(struct mvneta_softc *sc, int q) 1611 { 1612 1613 DASSERT(q >= 0); 1614 DASSERT(q < MVNETA_RX_QNUM_MAX); 1615 mtx_unlock(&sc->rx_ring[q].ring_mtx); 1616 } 1617 1618 STATIC __inline int __unused 1619 mvneta_tx_trylockq(struct mvneta_softc *sc, int q) 1620 { 1621 1622 DASSERT(q >= 0); 1623 DASSERT(q < MVNETA_TX_QNUM_MAX); 1624 return (mtx_trylock(&sc->tx_ring[q].ring_mtx)); 1625 } 1626 1627 STATIC __inline void 1628 mvneta_tx_lockq(struct mvneta_softc *sc, int q) 1629 { 1630 1631 DASSERT(q >= 0); 1632 DASSERT(q < MVNETA_TX_QNUM_MAX); 1633 mtx_lock(&sc->tx_ring[q].ring_mtx); 1634 } 1635 1636 STATIC __inline void 1637 mvneta_tx_unlockq(struct mvneta_softc *sc, int q) 1638 { 1639 1640 DASSERT(q >= 0); 1641 DASSERT(q < MVNETA_TX_QNUM_MAX); 1642 mtx_unlock(&sc->tx_ring[q].ring_mtx); 1643 } 1644 1645 /* 1646 * Interrupt Handlers 1647 */ 1648 STATIC void 1649 mvneta_disable_intr(struct mvneta_softc *sc) 1650 { 1651 1652 MVNETA_WRITE(sc, MVNETA_EUIM, 0); 1653 MVNETA_WRITE(sc, MVNETA_EUIC, 0); 1654 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, 0); 1655 MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0); 1656 MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0); 1657 MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0); 1658 MVNETA_WRITE(sc, MVNETA_PMIM, 0); 1659 MVNETA_WRITE(sc, MVNETA_PMIC, 0); 1660 MVNETA_WRITE(sc, MVNETA_PIE, 0); 1661 } 1662 1663 STATIC void 1664 mvneta_enable_intr(struct mvneta_softc *sc) 1665 { 1666 uint32_t reg; 1667 1668 /* Enable Summary Bit to check all interrupt cause. */ 1669 reg = MVNETA_READ(sc, MVNETA_PRXTXTIM); 1670 reg |= MVNETA_PRXTXTI_PMISCICSUMMARY; 1671 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg); 1672 1673 if (sc->use_inband_status) { 1674 /* Enable Port MISC Intr. (via RXTX_TH_Summary bit) */ 1675 MVNETA_WRITE(sc, MVNETA_PMIM, MVNETA_PMI_PHYSTATUSCHNG | 1676 MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHANGE); 1677 } 1678 1679 /* Enable All Queue Interrupt */ 1680 reg = MVNETA_READ(sc, MVNETA_PIE); 1681 reg |= MVNETA_PIE_RXPKTINTRPTENB_MASK; 1682 reg |= MVNETA_PIE_TXPKTINTRPTENB_MASK; 1683 MVNETA_WRITE(sc, MVNETA_PIE, reg); 1684 } 1685 1686 STATIC void 1687 mvneta_rxtxth_intr(void *arg) 1688 { 1689 struct mvneta_softc *sc; 1690 struct ifnet *ifp; 1691 uint32_t ic, queues; 1692 1693 sc = arg; 1694 ifp = sc->ifp; 1695 #ifdef MVNETA_KTR 1696 CTR1(KTR_SPARE2, "%s got RXTX_TH_Intr", ifp->if_xname); 1697 #endif 1698 ic = MVNETA_READ(sc, MVNETA_PRXTXTIC); 1699 if (ic == 0) 1700 return; 1701 MVNETA_WRITE(sc, MVNETA_PRXTXTIC, ~ic); 1702 1703 /* Ack maintance interrupt first */ 1704 if (__predict_false((ic & MVNETA_PRXTXTI_PMISCICSUMMARY) && 1705 sc->use_inband_status)) { 1706 mvneta_sc_lock(sc); 1707 mvneta_misc_intr(sc); 1708 mvneta_sc_unlock(sc); 1709 } 1710 if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) 1711 return; 1712 /* RxTxTH interrupt */ 1713 queues = MVNETA_PRXTXTI_GET_RBICTAPQ(ic); 1714 if (__predict_true(queues)) { 1715 #ifdef MVNETA_KTR 1716 CTR1(KTR_SPARE2, "%s got PRXTXTIC: +RXEOF", ifp->if_xname); 1717 #endif 1718 /* At the moment the driver support only one RX queue. */ 1719 DASSERT(MVNETA_IS_QUEUE_SET(queues, 0)); 1720 mvneta_rx(sc, 0, 0); 1721 } 1722 } 1723 1724 STATIC int 1725 mvneta_misc_intr(struct mvneta_softc *sc) 1726 { 1727 uint32_t ic; 1728 int claimed = 0; 1729 1730 #ifdef MVNETA_KTR 1731 CTR1(KTR_SPARE2, "%s got MISC_INTR", sc->ifp->if_xname); 1732 #endif 1733 KASSERT_SC_MTX(sc); 1734 1735 for (;;) { 1736 ic = MVNETA_READ(sc, MVNETA_PMIC); 1737 ic &= MVNETA_READ(sc, MVNETA_PMIM); 1738 if (ic == 0) 1739 break; 1740 MVNETA_WRITE(sc, MVNETA_PMIC, ~ic); 1741 claimed = 1; 1742 1743 if (ic & (MVNETA_PMI_PHYSTATUSCHNG | 1744 MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHANGE)) 1745 mvneta_link_isr(sc); 1746 } 1747 return (claimed); 1748 } 1749 1750 STATIC void 1751 mvneta_tick(void *arg) 1752 { 1753 struct mvneta_softc *sc; 1754 struct mvneta_tx_ring *tx; 1755 struct mvneta_rx_ring *rx; 1756 int q; 1757 uint32_t fc_prev, fc_curr; 1758 1759 sc = arg; 1760 1761 /* 1762 * This is done before mib update to get the right stats 1763 * for this tick. 1764 */ 1765 mvneta_tx_drain(sc); 1766 1767 /* Extract previous flow-control frame received counter. */ 1768 fc_prev = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter; 1769 /* Read mib registers (clear by read). */ 1770 mvneta_update_mib(sc); 1771 /* Extract current flow-control frame received counter. */ 1772 fc_curr = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter; 1773 1774 1775 if (sc->phy_attached && sc->ifp->if_flags & IFF_UP) { 1776 mvneta_sc_lock(sc); 1777 mii_tick(sc->mii); 1778 1779 /* Adjust MAC settings */ 1780 mvneta_adjust_link(sc); 1781 mvneta_sc_unlock(sc); 1782 } 1783 1784 /* 1785 * We were unable to refill the rx queue and left the rx func, leaving 1786 * the ring without mbuf and no way to call the refill func. 1787 */ 1788 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { 1789 rx = MVNETA_RX_RING(sc, q); 1790 if (rx->needs_refill == TRUE) { 1791 mvneta_rx_lockq(sc, q); 1792 mvneta_rx_queue_refill(sc, q); 1793 mvneta_rx_unlockq(sc, q); 1794 } 1795 } 1796 1797 /* 1798 * Watchdog: 1799 * - check if queue is mark as hung. 1800 * - ignore hung status if we received some pause frame 1801 * as hardware may have paused packet transmit. 1802 */ 1803 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) { 1804 /* 1805 * We should take queue lock, but as we only read 1806 * queue status we can do it without lock, we may 1807 * only missdetect queue status for one tick. 1808 */ 1809 tx = MVNETA_TX_RING(sc, q); 1810 1811 if (tx->queue_hung && (fc_curr - fc_prev) == 0) 1812 goto timeout; 1813 } 1814 1815 callout_schedule(&sc->tick_ch, hz); 1816 return; 1817 1818 timeout: 1819 if_printf(sc->ifp, "watchdog timeout\n"); 1820 1821 mvneta_sc_lock(sc); 1822 sc->counter_watchdog++; 1823 sc->counter_watchdog_mib++; 1824 /* Trigger reinitialize sequence. */ 1825 mvneta_stop_locked(sc); 1826 mvneta_init_locked(sc); 1827 mvneta_sc_unlock(sc); 1828 } 1829 1830 STATIC void 1831 mvneta_qflush(struct ifnet *ifp) 1832 { 1833 #ifdef MVNETA_MULTIQUEUE 1834 struct mvneta_softc *sc; 1835 struct mvneta_tx_ring *tx; 1836 struct mbuf *m; 1837 size_t q; 1838 1839 sc = ifp->if_softc; 1840 1841 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) { 1842 tx = MVNETA_TX_RING(sc, q); 1843 mvneta_tx_lockq(sc, q); 1844 while ((m = buf_ring_dequeue_sc(tx->br)) != NULL) 1845 m_freem(m); 1846 mvneta_tx_unlockq(sc, q); 1847 } 1848 #endif 1849 if_qflush(ifp); 1850 } 1851 1852 STATIC void 1853 mvneta_tx_task(void *arg, int pending) 1854 { 1855 struct mvneta_softc *sc; 1856 struct mvneta_tx_ring *tx; 1857 struct ifnet *ifp; 1858 int error; 1859 1860 tx = arg; 1861 ifp = tx->ifp; 1862 sc = ifp->if_softc; 1863 1864 mvneta_tx_lockq(sc, tx->qidx); 1865 error = mvneta_xmit_locked(sc, tx->qidx); 1866 mvneta_tx_unlockq(sc, tx->qidx); 1867 1868 /* Try again */ 1869 if (__predict_false(error != 0 && error != ENETDOWN)) { 1870 pause("mvneta_tx_task_sleep", 1); 1871 taskqueue_enqueue(tx->taskq, &tx->task); 1872 } 1873 } 1874 1875 STATIC int 1876 mvneta_xmitfast_locked(struct mvneta_softc *sc, int q, struct mbuf **m) 1877 { 1878 struct mvneta_tx_ring *tx; 1879 struct ifnet *ifp; 1880 int error; 1881 1882 KASSERT_TX_MTX(sc, q); 1883 tx = MVNETA_TX_RING(sc, q); 1884 error = 0; 1885 1886 ifp = sc->ifp; 1887 1888 /* Dont enqueue packet if the queue is disabled. */ 1889 if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED)) { 1890 m_freem(*m); 1891 *m = NULL; 1892 return (ENETDOWN); 1893 } 1894 1895 /* Reclaim mbuf if above threshold. */ 1896 if (__predict_true(tx->used > MVNETA_TX_RECLAIM_COUNT)) 1897 mvneta_tx_queue_complete(sc, q); 1898 1899 /* Do not call transmit path if queue is already too full. */ 1900 if (__predict_false(tx->used > 1901 MVNETA_TX_RING_CNT - MVNETA_TX_SEGLIMIT)) 1902 return (ENOBUFS); 1903 1904 error = mvneta_tx_queue(sc, m, q); 1905 if (__predict_false(error != 0)) 1906 return (error); 1907 1908 /* Send a copy of the frame to the BPF listener */ 1909 ETHER_BPF_MTAP(ifp, *m); 1910 1911 /* Set watchdog on */ 1912 tx->watchdog_time = ticks; 1913 tx->queue_status = MVNETA_QUEUE_WORKING; 1914 1915 return (error); 1916 } 1917 1918 #ifdef MVNETA_MULTIQUEUE 1919 STATIC int 1920 mvneta_transmit(struct ifnet *ifp, struct mbuf *m) 1921 { 1922 struct mvneta_softc *sc; 1923 struct mvneta_tx_ring *tx; 1924 int error; 1925 int q; 1926 1927 sc = ifp->if_softc; 1928 1929 /* Use default queue if there is no flow id as thread can migrate. */ 1930 if (__predict_true(M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)) 1931 q = m->m_pkthdr.flowid % MVNETA_TX_QNUM_MAX; 1932 else 1933 q = 0; 1934 1935 tx = MVNETA_TX_RING(sc, q); 1936 1937 /* If buf_ring is full start transmit immediatly. */ 1938 if (buf_ring_full(tx->br)) { 1939 mvneta_tx_lockq(sc, q); 1940 mvneta_xmit_locked(sc, q); 1941 mvneta_tx_unlockq(sc, q); 1942 } 1943 1944 /* 1945 * If the buf_ring is empty we will not reorder packets. 1946 * If the lock is available transmit without using buf_ring. 1947 */ 1948 if (buf_ring_empty(tx->br) && mvneta_tx_trylockq(sc, q) != 0) { 1949 error = mvneta_xmitfast_locked(sc, q, &m); 1950 mvneta_tx_unlockq(sc, q); 1951 if (__predict_true(error == 0)) 1952 return (0); 1953 1954 /* Transmit can fail in fastpath. */ 1955 if (__predict_false(m == NULL)) 1956 return (error); 1957 } 1958 1959 /* Enqueue then schedule taskqueue. */ 1960 error = drbr_enqueue(ifp, tx->br, m); 1961 if (__predict_false(error != 0)) 1962 return (error); 1963 1964 taskqueue_enqueue(tx->taskq, &tx->task); 1965 return (0); 1966 } 1967 1968 STATIC int 1969 mvneta_xmit_locked(struct mvneta_softc *sc, int q) 1970 { 1971 struct ifnet *ifp; 1972 struct mvneta_tx_ring *tx; 1973 struct mbuf *m; 1974 int error; 1975 1976 KASSERT_TX_MTX(sc, q); 1977 ifp = sc->ifp; 1978 tx = MVNETA_TX_RING(sc, q); 1979 error = 0; 1980 1981 while ((m = drbr_peek(ifp, tx->br)) != NULL) { 1982 error = mvneta_xmitfast_locked(sc, q, &m); 1983 if (__predict_false(error != 0)) { 1984 if (m != NULL) 1985 drbr_putback(ifp, tx->br, m); 1986 else 1987 drbr_advance(ifp, tx->br); 1988 break; 1989 } 1990 drbr_advance(ifp, tx->br); 1991 } 1992 1993 return (error); 1994 } 1995 #else /* !MVNETA_MULTIQUEUE */ 1996 STATIC void 1997 mvneta_start(struct ifnet *ifp) 1998 { 1999 struct mvneta_softc *sc; 2000 struct mvneta_tx_ring *tx; 2001 int error; 2002 2003 sc = ifp->if_softc; 2004 tx = MVNETA_TX_RING(sc, 0); 2005 2006 mvneta_tx_lockq(sc, 0); 2007 error = mvneta_xmit_locked(sc, 0); 2008 mvneta_tx_unlockq(sc, 0); 2009 /* Handle retransmit in the background taskq. */ 2010 if (__predict_false(error != 0 && error != ENETDOWN)) 2011 taskqueue_enqueue(tx->taskq, &tx->task); 2012 } 2013 2014 STATIC int 2015 mvneta_xmit_locked(struct mvneta_softc *sc, int q) 2016 { 2017 struct ifnet *ifp; 2018 struct mvneta_tx_ring *tx; 2019 struct mbuf *m; 2020 int error; 2021 2022 KASSERT_TX_MTX(sc, q); 2023 ifp = sc->ifp; 2024 tx = MVNETA_TX_RING(sc, 0); 2025 error = 0; 2026 2027 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 2028 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 2029 if (m == NULL) 2030 break; 2031 2032 error = mvneta_xmitfast_locked(sc, q, &m); 2033 if (__predict_false(error != 0)) { 2034 if (m != NULL) 2035 IFQ_DRV_PREPEND(&ifp->if_snd, m); 2036 break; 2037 } 2038 } 2039 2040 return (error); 2041 } 2042 #endif 2043 2044 STATIC int 2045 mvneta_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2046 { 2047 struct mvneta_softc *sc; 2048 struct mvneta_rx_ring *rx; 2049 struct ifreq *ifr; 2050 int error, mask; 2051 uint32_t flags; 2052 int q; 2053 2054 error = 0; 2055 sc = ifp->if_softc; 2056 ifr = (struct ifreq *)data; 2057 switch (cmd) { 2058 case SIOCSIFFLAGS: 2059 mvneta_sc_lock(sc); 2060 if (ifp->if_flags & IFF_UP) { 2061 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2062 flags = ifp->if_flags ^ sc->mvneta_if_flags; 2063 2064 if (flags != 0) 2065 sc->mvneta_if_flags = ifp->if_flags; 2066 2067 if ((flags & IFF_PROMISC) != 0) 2068 mvneta_filter_setup(sc); 2069 } else { 2070 mvneta_init_locked(sc); 2071 sc->mvneta_if_flags = ifp->if_flags; 2072 if (sc->phy_attached) 2073 mii_mediachg(sc->mii); 2074 mvneta_sc_unlock(sc); 2075 break; 2076 } 2077 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2078 mvneta_stop_locked(sc); 2079 2080 sc->mvneta_if_flags = ifp->if_flags; 2081 mvneta_sc_unlock(sc); 2082 break; 2083 case SIOCSIFCAP: 2084 if (ifp->if_mtu > MVNETA_MAX_CSUM_MTU && 2085 ifr->ifr_reqcap & IFCAP_TXCSUM) 2086 ifr->ifr_reqcap &= ~IFCAP_TXCSUM; 2087 mask = ifp->if_capenable ^ ifr->ifr_reqcap; 2088 if (mask & IFCAP_HWCSUM) { 2089 ifp->if_capenable &= ~IFCAP_HWCSUM; 2090 ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap; 2091 if (ifp->if_capenable & IFCAP_TXCSUM) 2092 ifp->if_hwassist = CSUM_IP | CSUM_TCP | 2093 CSUM_UDP; 2094 else 2095 ifp->if_hwassist = 0; 2096 } 2097 if (mask & IFCAP_LRO) { 2098 mvneta_sc_lock(sc); 2099 ifp->if_capenable ^= IFCAP_LRO; 2100 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2101 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { 2102 rx = MVNETA_RX_RING(sc, q); 2103 rx->lro_enabled = !rx->lro_enabled; 2104 } 2105 } 2106 mvneta_sc_unlock(sc); 2107 } 2108 VLAN_CAPABILITIES(ifp); 2109 break; 2110 case SIOCSIFMEDIA: 2111 if ((IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T || 2112 IFM_SUBTYPE(ifr->ifr_media) == IFM_2500_T) && 2113 (ifr->ifr_media & IFM_FDX) == 0) { 2114 device_printf(sc->dev, 2115 "%s half-duplex unsupported\n", 2116 IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T ? 2117 "1000Base-T" : 2118 "2500Base-T"); 2119 error = EINVAL; 2120 break; 2121 } 2122 case SIOCGIFMEDIA: /* FALLTHROUGH */ 2123 case SIOCGIFXMEDIA: 2124 if (!sc->phy_attached) 2125 error = ifmedia_ioctl(ifp, ifr, &sc->mvneta_ifmedia, 2126 cmd); 2127 else 2128 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, 2129 cmd); 2130 break; 2131 case SIOCSIFMTU: 2132 if (ifr->ifr_mtu < 68 || ifr->ifr_mtu > MVNETA_MAX_FRAME - 2133 MVNETA_ETHER_SIZE) { 2134 error = EINVAL; 2135 } else { 2136 ifp->if_mtu = ifr->ifr_mtu; 2137 mvneta_sc_lock(sc); 2138 if (ifp->if_mtu > MVNETA_MAX_CSUM_MTU) { 2139 ifp->if_capenable &= ~IFCAP_TXCSUM; 2140 ifp->if_hwassist = 0; 2141 } else { 2142 ifp->if_capenable |= IFCAP_TXCSUM; 2143 ifp->if_hwassist = CSUM_IP | CSUM_TCP | 2144 CSUM_UDP; 2145 } 2146 2147 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2148 /* Trigger reinitialize sequence */ 2149 mvneta_stop_locked(sc); 2150 mvneta_init_locked(sc); 2151 } 2152 mvneta_sc_unlock(sc); 2153 } 2154 break; 2155 2156 default: 2157 error = ether_ioctl(ifp, cmd, data); 2158 break; 2159 } 2160 2161 return (error); 2162 } 2163 2164 STATIC void 2165 mvneta_init_locked(void *arg) 2166 { 2167 struct mvneta_softc *sc; 2168 struct ifnet *ifp; 2169 uint32_t reg; 2170 int q, cpu; 2171 2172 sc = arg; 2173 ifp = sc->ifp; 2174 2175 if (!device_is_attached(sc->dev) || 2176 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2177 return; 2178 2179 mvneta_disable_intr(sc); 2180 callout_stop(&sc->tick_ch); 2181 2182 /* Get the latest mac address */ 2183 bcopy(IF_LLADDR(ifp), sc->enaddr, ETHER_ADDR_LEN); 2184 mvneta_set_mac_address(sc, sc->enaddr); 2185 mvneta_filter_setup(sc); 2186 2187 /* Start DMA Engine */ 2188 MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000000); 2189 MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000000); 2190 MVNETA_WRITE(sc, MVNETA_PACC, MVNETA_PACC_ACCELERATIONMODE_EDM); 2191 2192 /* Enable port */ 2193 reg = MVNETA_READ(sc, MVNETA_PMACC0); 2194 reg |= MVNETA_PMACC0_PORTEN; 2195 MVNETA_WRITE(sc, MVNETA_PMACC0, reg); 2196 2197 /* Allow access to each TXQ/RXQ from both CPU's */ 2198 for (cpu = 0; cpu < mp_ncpus; ++cpu) 2199 MVNETA_WRITE(sc, MVNETA_PCP2Q(cpu), 2200 MVNETA_PCP2Q_TXQEN_MASK | MVNETA_PCP2Q_RXQEN_MASK); 2201 2202 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { 2203 mvneta_rx_lockq(sc, q); 2204 mvneta_rx_queue_refill(sc, q); 2205 mvneta_rx_unlockq(sc, q); 2206 } 2207 2208 if (!sc->phy_attached) 2209 mvneta_linkup(sc); 2210 2211 /* Enable interrupt */ 2212 mvneta_enable_intr(sc); 2213 2214 /* Set Counter */ 2215 callout_schedule(&sc->tick_ch, hz); 2216 2217 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2218 } 2219 2220 STATIC void 2221 mvneta_init(void *arg) 2222 { 2223 struct mvneta_softc *sc; 2224 2225 sc = arg; 2226 mvneta_sc_lock(sc); 2227 mvneta_init_locked(sc); 2228 if (sc->phy_attached) 2229 mii_mediachg(sc->mii); 2230 mvneta_sc_unlock(sc); 2231 } 2232 2233 /* ARGSUSED */ 2234 STATIC void 2235 mvneta_stop_locked(struct mvneta_softc *sc) 2236 { 2237 struct ifnet *ifp; 2238 struct mvneta_rx_ring *rx; 2239 struct mvneta_tx_ring *tx; 2240 uint32_t reg; 2241 int q; 2242 2243 ifp = sc->ifp; 2244 if (ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2245 return; 2246 2247 mvneta_disable_intr(sc); 2248 2249 callout_stop(&sc->tick_ch); 2250 2251 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2252 2253 /* Link down */ 2254 if (sc->linkup == TRUE) 2255 mvneta_linkdown(sc); 2256 2257 /* Reset the MAC Port Enable bit */ 2258 reg = MVNETA_READ(sc, MVNETA_PMACC0); 2259 reg &= ~MVNETA_PMACC0_PORTEN; 2260 MVNETA_WRITE(sc, MVNETA_PMACC0, reg); 2261 2262 /* Disable each of queue */ 2263 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { 2264 rx = MVNETA_RX_RING(sc, q); 2265 2266 mvneta_rx_lockq(sc, q); 2267 mvneta_ring_flush_rx_queue(sc, q); 2268 mvneta_rx_unlockq(sc, q); 2269 } 2270 2271 /* 2272 * Hold Reset state of DMA Engine 2273 * (must write 0x0 to restart it) 2274 */ 2275 MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000001); 2276 MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000001); 2277 2278 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) { 2279 tx = MVNETA_TX_RING(sc, q); 2280 2281 mvneta_tx_lockq(sc, q); 2282 mvneta_ring_flush_tx_queue(sc, q); 2283 mvneta_tx_unlockq(sc, q); 2284 } 2285 } 2286 2287 STATIC void 2288 mvneta_stop(struct mvneta_softc *sc) 2289 { 2290 2291 mvneta_sc_lock(sc); 2292 mvneta_stop_locked(sc); 2293 mvneta_sc_unlock(sc); 2294 } 2295 2296 STATIC int 2297 mvneta_mediachange(struct ifnet *ifp) 2298 { 2299 struct mvneta_softc *sc; 2300 2301 sc = ifp->if_softc; 2302 2303 if (!sc->phy_attached && !sc->use_inband_status) { 2304 /* We shouldn't be here */ 2305 if_printf(ifp, "Cannot change media in fixed-link mode!\n"); 2306 return (0); 2307 } 2308 2309 if (sc->use_inband_status) { 2310 mvneta_update_media(sc, sc->mvneta_ifmedia.ifm_media); 2311 return (0); 2312 } 2313 2314 mvneta_sc_lock(sc); 2315 2316 /* Update PHY */ 2317 mii_mediachg(sc->mii); 2318 2319 mvneta_sc_unlock(sc); 2320 2321 return (0); 2322 } 2323 2324 STATIC void 2325 mvneta_get_media(struct mvneta_softc *sc, struct ifmediareq *ifmr) 2326 { 2327 uint32_t psr; 2328 2329 psr = MVNETA_READ(sc, MVNETA_PSR); 2330 2331 /* Speed */ 2332 if (psr & MVNETA_PSR_GMIISPEED) 2333 ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_1000_T); 2334 else if (psr & MVNETA_PSR_MIISPEED) 2335 ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_100_TX); 2336 else if (psr & MVNETA_PSR_LINKUP) 2337 ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_10_T); 2338 2339 /* Duplex */ 2340 if (psr & MVNETA_PSR_FULLDX) 2341 ifmr->ifm_active |= IFM_FDX; 2342 2343 /* Link */ 2344 ifmr->ifm_status = IFM_AVALID; 2345 if (psr & MVNETA_PSR_LINKUP) 2346 ifmr->ifm_status |= IFM_ACTIVE; 2347 } 2348 2349 STATIC void 2350 mvneta_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 2351 { 2352 struct mvneta_softc *sc; 2353 struct mii_data *mii; 2354 2355 sc = ifp->if_softc; 2356 2357 if (!sc->phy_attached && !sc->use_inband_status) { 2358 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; 2359 return; 2360 } 2361 2362 mvneta_sc_lock(sc); 2363 2364 if (sc->use_inband_status) { 2365 mvneta_get_media(sc, ifmr); 2366 mvneta_sc_unlock(sc); 2367 return; 2368 } 2369 2370 mii = sc->mii; 2371 mii_pollstat(mii); 2372 2373 ifmr->ifm_active = mii->mii_media_active; 2374 ifmr->ifm_status = mii->mii_media_status; 2375 2376 mvneta_sc_unlock(sc); 2377 } 2378 2379 /* 2380 * Link State Notify 2381 */ 2382 STATIC void 2383 mvneta_update_autoneg(struct mvneta_softc *sc, int enable) 2384 { 2385 int reg; 2386 2387 if (enable) { 2388 reg = MVNETA_READ(sc, MVNETA_PANC); 2389 reg &= ~(MVNETA_PANC_FORCELINKFAIL | MVNETA_PANC_FORCELINKPASS | 2390 MVNETA_PANC_ANFCEN); 2391 reg |= MVNETA_PANC_ANDUPLEXEN | MVNETA_PANC_ANSPEEDEN | 2392 MVNETA_PANC_INBANDANEN; 2393 MVNETA_WRITE(sc, MVNETA_PANC, reg); 2394 2395 reg = MVNETA_READ(sc, MVNETA_PMACC2); 2396 reg |= MVNETA_PMACC2_INBANDANMODE; 2397 MVNETA_WRITE(sc, MVNETA_PMACC2, reg); 2398 2399 reg = MVNETA_READ(sc, MVNETA_PSOMSCD); 2400 reg |= MVNETA_PSOMSCD_ENABLE; 2401 MVNETA_WRITE(sc, MVNETA_PSOMSCD, reg); 2402 } else { 2403 reg = MVNETA_READ(sc, MVNETA_PANC); 2404 reg &= ~(MVNETA_PANC_FORCELINKFAIL | MVNETA_PANC_FORCELINKPASS | 2405 MVNETA_PANC_ANDUPLEXEN | MVNETA_PANC_ANSPEEDEN | 2406 MVNETA_PANC_INBANDANEN); 2407 MVNETA_WRITE(sc, MVNETA_PANC, reg); 2408 2409 reg = MVNETA_READ(sc, MVNETA_PMACC2); 2410 reg &= ~MVNETA_PMACC2_INBANDANMODE; 2411 MVNETA_WRITE(sc, MVNETA_PMACC2, reg); 2412 2413 reg = MVNETA_READ(sc, MVNETA_PSOMSCD); 2414 reg &= ~MVNETA_PSOMSCD_ENABLE; 2415 MVNETA_WRITE(sc, MVNETA_PSOMSCD, reg); 2416 } 2417 } 2418 2419 STATIC int 2420 mvneta_update_media(struct mvneta_softc *sc, int media) 2421 { 2422 int reg, err; 2423 boolean_t running; 2424 2425 err = 0; 2426 2427 mvneta_sc_lock(sc); 2428 2429 mvneta_linkreset(sc); 2430 2431 running = (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; 2432 if (running) 2433 mvneta_stop_locked(sc); 2434 2435 sc->autoneg = (IFM_SUBTYPE(media) == IFM_AUTO); 2436 2437 if (sc->use_inband_status) 2438 mvneta_update_autoneg(sc, IFM_SUBTYPE(media) == IFM_AUTO); 2439 2440 mvneta_update_eee(sc); 2441 mvneta_update_fc(sc); 2442 2443 if (IFM_SUBTYPE(media) != IFM_AUTO) { 2444 reg = MVNETA_READ(sc, MVNETA_PANC); 2445 reg &= ~(MVNETA_PANC_SETGMIISPEED | 2446 MVNETA_PANC_SETMIISPEED | 2447 MVNETA_PANC_SETFULLDX); 2448 if (IFM_SUBTYPE(media) == IFM_1000_T || 2449 IFM_SUBTYPE(media) == IFM_2500_T) { 2450 if ((media & IFM_FDX) == 0) { 2451 device_printf(sc->dev, 2452 "%s half-duplex unsupported\n", 2453 IFM_SUBTYPE(media) == IFM_1000_T ? 2454 "1000Base-T" : 2455 "2500Base-T"); 2456 err = EINVAL; 2457 goto out; 2458 } 2459 reg |= MVNETA_PANC_SETGMIISPEED; 2460 } else if (IFM_SUBTYPE(media) == IFM_100_TX) 2461 reg |= MVNETA_PANC_SETMIISPEED; 2462 2463 if (media & IFM_FDX) 2464 reg |= MVNETA_PANC_SETFULLDX; 2465 2466 MVNETA_WRITE(sc, MVNETA_PANC, reg); 2467 } 2468 out: 2469 if (running) 2470 mvneta_init_locked(sc); 2471 mvneta_sc_unlock(sc); 2472 return (err); 2473 } 2474 2475 STATIC void 2476 mvneta_adjust_link(struct mvneta_softc *sc) 2477 { 2478 boolean_t phy_linkup; 2479 int reg; 2480 2481 /* Update eee/fc */ 2482 mvneta_update_eee(sc); 2483 mvneta_update_fc(sc); 2484 2485 /* Check for link change */ 2486 phy_linkup = (sc->mii->mii_media_status & 2487 (IFM_AVALID | IFM_ACTIVE)) == (IFM_AVALID | IFM_ACTIVE); 2488 2489 if (sc->linkup != phy_linkup) 2490 mvneta_linkupdate(sc, phy_linkup); 2491 2492 /* Don't update media on disabled link */ 2493 if (!phy_linkup) 2494 return; 2495 2496 /* Check for media type change */ 2497 if (sc->mvneta_media != sc->mii->mii_media_active) { 2498 sc->mvneta_media = sc->mii->mii_media_active; 2499 2500 reg = MVNETA_READ(sc, MVNETA_PANC); 2501 reg &= ~(MVNETA_PANC_SETGMIISPEED | 2502 MVNETA_PANC_SETMIISPEED | 2503 MVNETA_PANC_SETFULLDX); 2504 if (IFM_SUBTYPE(sc->mvneta_media) == IFM_1000_T || 2505 IFM_SUBTYPE(sc->mvneta_media) == IFM_2500_T) { 2506 reg |= MVNETA_PANC_SETGMIISPEED; 2507 } else if (IFM_SUBTYPE(sc->mvneta_media) == IFM_100_TX) 2508 reg |= MVNETA_PANC_SETMIISPEED; 2509 2510 if (sc->mvneta_media & IFM_FDX) 2511 reg |= MVNETA_PANC_SETFULLDX; 2512 2513 MVNETA_WRITE(sc, MVNETA_PANC, reg); 2514 } 2515 } 2516 2517 STATIC void 2518 mvneta_link_isr(struct mvneta_softc *sc) 2519 { 2520 int linkup; 2521 2522 KASSERT_SC_MTX(sc); 2523 2524 linkup = MVNETA_IS_LINKUP(sc) ? TRUE : FALSE; 2525 if (sc->linkup == linkup) 2526 return; 2527 2528 if (linkup == TRUE) 2529 mvneta_linkup(sc); 2530 else 2531 mvneta_linkdown(sc); 2532 2533 #ifdef DEBUG 2534 log(LOG_DEBUG, 2535 "%s: link %s\n", device_xname(sc->dev), linkup ? "up" : "down"); 2536 #endif 2537 } 2538 2539 STATIC void 2540 mvneta_linkupdate(struct mvneta_softc *sc, boolean_t linkup) 2541 { 2542 2543 KASSERT_SC_MTX(sc); 2544 2545 if (linkup == TRUE) 2546 mvneta_linkup(sc); 2547 else 2548 mvneta_linkdown(sc); 2549 2550 #ifdef DEBUG 2551 log(LOG_DEBUG, 2552 "%s: link %s\n", device_xname(sc->dev), linkup ? "up" : "down"); 2553 #endif 2554 } 2555 2556 STATIC void 2557 mvneta_update_eee(struct mvneta_softc *sc) 2558 { 2559 uint32_t reg; 2560 2561 KASSERT_SC_MTX(sc); 2562 2563 /* set EEE parameters */ 2564 reg = MVNETA_READ(sc, MVNETA_LPIC1); 2565 if (sc->cf_lpi) 2566 reg |= MVNETA_LPIC1_LPIRE; 2567 else 2568 reg &= ~MVNETA_LPIC1_LPIRE; 2569 MVNETA_WRITE(sc, MVNETA_LPIC1, reg); 2570 } 2571 2572 STATIC void 2573 mvneta_update_fc(struct mvneta_softc *sc) 2574 { 2575 uint32_t reg; 2576 2577 KASSERT_SC_MTX(sc); 2578 2579 reg = MVNETA_READ(sc, MVNETA_PANC); 2580 if (sc->cf_fc) { 2581 /* Flow control negotiation */ 2582 reg |= MVNETA_PANC_PAUSEADV; 2583 reg |= MVNETA_PANC_ANFCEN; 2584 } else { 2585 /* Disable flow control negotiation */ 2586 reg &= ~MVNETA_PANC_PAUSEADV; 2587 reg &= ~MVNETA_PANC_ANFCEN; 2588 } 2589 2590 MVNETA_WRITE(sc, MVNETA_PANC, reg); 2591 } 2592 2593 STATIC void 2594 mvneta_linkup(struct mvneta_softc *sc) 2595 { 2596 uint32_t reg; 2597 2598 KASSERT_SC_MTX(sc); 2599 2600 if (!sc->use_inband_status) { 2601 reg = MVNETA_READ(sc, MVNETA_PANC); 2602 reg |= MVNETA_PANC_FORCELINKPASS; 2603 reg &= ~MVNETA_PANC_FORCELINKFAIL; 2604 MVNETA_WRITE(sc, MVNETA_PANC, reg); 2605 } 2606 2607 mvneta_qflush(sc->ifp); 2608 mvneta_portup(sc); 2609 sc->linkup = TRUE; 2610 if_link_state_change(sc->ifp, LINK_STATE_UP); 2611 } 2612 2613 STATIC void 2614 mvneta_linkdown(struct mvneta_softc *sc) 2615 { 2616 uint32_t reg; 2617 2618 KASSERT_SC_MTX(sc); 2619 2620 if (!sc->use_inband_status) { 2621 reg = MVNETA_READ(sc, MVNETA_PANC); 2622 reg &= ~MVNETA_PANC_FORCELINKPASS; 2623 reg |= MVNETA_PANC_FORCELINKFAIL; 2624 MVNETA_WRITE(sc, MVNETA_PANC, reg); 2625 } 2626 2627 mvneta_portdown(sc); 2628 mvneta_qflush(sc->ifp); 2629 sc->linkup = FALSE; 2630 if_link_state_change(sc->ifp, LINK_STATE_DOWN); 2631 } 2632 2633 STATIC void 2634 mvneta_linkreset(struct mvneta_softc *sc) 2635 { 2636 struct mii_softc *mii; 2637 2638 if (sc->phy_attached) { 2639 /* Force reset PHY */ 2640 mii = LIST_FIRST(&sc->mii->mii_phys); 2641 if (mii) 2642 mii_phy_reset(mii); 2643 } 2644 } 2645 2646 /* 2647 * Tx Subroutines 2648 */ 2649 STATIC int 2650 mvneta_tx_queue(struct mvneta_softc *sc, struct mbuf **mbufp, int q) 2651 { 2652 struct ifnet *ifp; 2653 bus_dma_segment_t txsegs[MVNETA_TX_SEGLIMIT]; 2654 struct mbuf *mtmp, *mbuf; 2655 struct mvneta_tx_ring *tx; 2656 struct mvneta_buf *txbuf; 2657 struct mvneta_tx_desc *t; 2658 uint32_t ptxsu; 2659 int start, used, error, i, txnsegs; 2660 2661 mbuf = *mbufp; 2662 tx = MVNETA_TX_RING(sc, q); 2663 DASSERT(tx->used >= 0); 2664 DASSERT(tx->used <= MVNETA_TX_RING_CNT); 2665 t = NULL; 2666 ifp = sc->ifp; 2667 2668 if (__predict_false(mbuf->m_flags & M_VLANTAG)) { 2669 mbuf = ether_vlanencap(mbuf, mbuf->m_pkthdr.ether_vtag); 2670 if (mbuf == NULL) { 2671 tx->drv_error++; 2672 *mbufp = NULL; 2673 return (ENOBUFS); 2674 } 2675 mbuf->m_flags &= ~M_VLANTAG; 2676 *mbufp = mbuf; 2677 } 2678 2679 if (__predict_false(mbuf->m_next != NULL && 2680 (mbuf->m_pkthdr.csum_flags & 2681 (CSUM_IP | CSUM_TCP | CSUM_UDP)) != 0)) { 2682 if (M_WRITABLE(mbuf) == 0) { 2683 mtmp = m_dup(mbuf, M_NOWAIT); 2684 m_freem(mbuf); 2685 if (mtmp == NULL) { 2686 tx->drv_error++; 2687 *mbufp = NULL; 2688 return (ENOBUFS); 2689 } 2690 *mbufp = mbuf = mtmp; 2691 } 2692 } 2693 2694 /* load mbuf using dmamap of 1st descriptor */ 2695 txbuf = &tx->txbuf[tx->cpu]; 2696 error = bus_dmamap_load_mbuf_sg(sc->txmbuf_dtag, 2697 txbuf->dmap, mbuf, txsegs, &txnsegs, 2698 BUS_DMA_NOWAIT); 2699 if (__predict_false(error != 0)) { 2700 #ifdef MVNETA_KTR 2701 CTR3(KTR_SPARE2, "%s:%u bus_dmamap_load_mbuf_sg error=%d", ifp->if_xname, q, error); 2702 #endif 2703 /* This is the only recoverable error (except EFBIG). */ 2704 if (error != ENOMEM) { 2705 tx->drv_error++; 2706 m_freem(mbuf); 2707 *mbufp = NULL; 2708 return (ENOBUFS); 2709 } 2710 return (error); 2711 } 2712 2713 if (__predict_false(txnsegs <= 0 2714 || (txnsegs + tx->used) > MVNETA_TX_RING_CNT)) { 2715 /* we have no enough descriptors or mbuf is broken */ 2716 #ifdef MVNETA_KTR 2717 CTR3(KTR_SPARE2, "%s:%u not enough descriptors txnsegs=%d", 2718 ifp->if_xname, q, txnsegs); 2719 #endif 2720 bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap); 2721 return (ENOBUFS); 2722 } 2723 DASSERT(txbuf->m == NULL); 2724 2725 /* remember mbuf using 1st descriptor */ 2726 txbuf->m = mbuf; 2727 bus_dmamap_sync(sc->txmbuf_dtag, txbuf->dmap, 2728 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2729 2730 /* load to tx descriptors */ 2731 start = tx->cpu; 2732 used = 0; 2733 for (i = 0; i < txnsegs; i++) { 2734 t = &tx->desc[tx->cpu]; 2735 t->command = 0; 2736 t->l4ichk = 0; 2737 t->flags = 0; 2738 if (__predict_true(i == 0)) { 2739 /* 1st descriptor */ 2740 t->command |= MVNETA_TX_CMD_W_PACKET_OFFSET(0); 2741 t->command |= MVNETA_TX_CMD_F; 2742 mvneta_tx_set_csumflag(ifp, t, mbuf); 2743 } 2744 t->bufptr_pa = txsegs[i].ds_addr; 2745 t->bytecnt = txsegs[i].ds_len; 2746 tx->cpu = tx_counter_adv(tx->cpu, 1); 2747 2748 tx->used++; 2749 used++; 2750 } 2751 /* t is last descriptor here */ 2752 DASSERT(t != NULL); 2753 t->command |= MVNETA_TX_CMD_L|MVNETA_TX_CMD_PADDING; 2754 2755 bus_dmamap_sync(sc->tx_dtag, tx->desc_map, 2756 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2757 2758 while (__predict_false(used > 255)) { 2759 ptxsu = MVNETA_PTXSU_NOWD(255); 2760 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu); 2761 used -= 255; 2762 } 2763 if (__predict_true(used > 0)) { 2764 ptxsu = MVNETA_PTXSU_NOWD(used); 2765 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu); 2766 } 2767 return (0); 2768 } 2769 2770 STATIC void 2771 mvneta_tx_set_csumflag(struct ifnet *ifp, 2772 struct mvneta_tx_desc *t, struct mbuf *m) 2773 { 2774 struct ether_header *eh; 2775 int csum_flags; 2776 uint32_t iphl, ipoff; 2777 struct ip *ip; 2778 2779 iphl = ipoff = 0; 2780 csum_flags = ifp->if_hwassist & m->m_pkthdr.csum_flags; 2781 eh = mtod(m, struct ether_header *); 2782 switch (ntohs(eh->ether_type)) { 2783 case ETHERTYPE_IP: 2784 ipoff = ETHER_HDR_LEN; 2785 break; 2786 case ETHERTYPE_IPV6: 2787 return; 2788 case ETHERTYPE_VLAN: 2789 ipoff = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 2790 break; 2791 } 2792 2793 if (__predict_true(csum_flags & (CSUM_IP|CSUM_IP_TCP|CSUM_IP_UDP))) { 2794 ip = (struct ip *)(m->m_data + ipoff); 2795 iphl = ip->ip_hl<<2; 2796 t->command |= MVNETA_TX_CMD_L3_IP4; 2797 } else { 2798 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE; 2799 return; 2800 } 2801 2802 2803 /* L3 */ 2804 if (csum_flags & CSUM_IP) { 2805 t->command |= MVNETA_TX_CMD_IP4_CHECKSUM; 2806 } 2807 2808 /* L4 */ 2809 if (csum_flags & CSUM_IP_TCP) { 2810 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG; 2811 t->command |= MVNETA_TX_CMD_L4_TCP; 2812 } else if (csum_flags & CSUM_IP_UDP) { 2813 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG; 2814 t->command |= MVNETA_TX_CMD_L4_UDP; 2815 } else 2816 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE; 2817 2818 t->l4ichk = 0; 2819 t->command |= MVNETA_TX_CMD_IP_HEADER_LEN(iphl >> 2); 2820 t->command |= MVNETA_TX_CMD_L3_OFFSET(ipoff); 2821 } 2822 2823 STATIC void 2824 mvneta_tx_queue_complete(struct mvneta_softc *sc, int q) 2825 { 2826 struct mvneta_tx_ring *tx; 2827 struct mvneta_buf *txbuf; 2828 struct mvneta_tx_desc *t; 2829 uint32_t ptxs, ptxsu, ndesc; 2830 int i; 2831 2832 KASSERT_TX_MTX(sc, q); 2833 2834 tx = MVNETA_TX_RING(sc, q); 2835 if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED)) 2836 return; 2837 2838 ptxs = MVNETA_READ(sc, MVNETA_PTXS(q)); 2839 ndesc = MVNETA_PTXS_GET_TBC(ptxs); 2840 2841 if (__predict_false(ndesc == 0)) { 2842 if (tx->used == 0) 2843 tx->queue_status = MVNETA_QUEUE_IDLE; 2844 else if (tx->queue_status == MVNETA_QUEUE_WORKING && 2845 ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG)) 2846 tx->queue_hung = TRUE; 2847 return; 2848 } 2849 2850 #ifdef MVNETA_KTR 2851 CTR3(KTR_SPARE2, "%s:%u tx_complete begin ndesc=%u", 2852 sc->ifp->if_xname, q, ndesc); 2853 #endif 2854 2855 bus_dmamap_sync(sc->tx_dtag, tx->desc_map, 2856 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2857 2858 for (i = 0; i < ndesc; i++) { 2859 t = &tx->desc[tx->dma]; 2860 #ifdef MVNETA_KTR 2861 if (t->flags & MVNETA_TX_F_ES) 2862 CTR3(KTR_SPARE2, "%s tx error queue %d desc %d", 2863 sc->ifp->if_xname, q, tx->dma); 2864 #endif 2865 txbuf = &tx->txbuf[tx->dma]; 2866 if (__predict_true(txbuf->m != NULL)) { 2867 DASSERT((t->command & MVNETA_TX_CMD_F) != 0); 2868 bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap); 2869 m_freem(txbuf->m); 2870 txbuf->m = NULL; 2871 } 2872 else 2873 DASSERT((t->flags & MVNETA_TX_CMD_F) == 0); 2874 tx->dma = tx_counter_adv(tx->dma, 1); 2875 tx->used--; 2876 } 2877 DASSERT(tx->used >= 0); 2878 DASSERT(tx->used <= MVNETA_TX_RING_CNT); 2879 while (__predict_false(ndesc > 255)) { 2880 ptxsu = MVNETA_PTXSU_NORB(255); 2881 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu); 2882 ndesc -= 255; 2883 } 2884 if (__predict_true(ndesc > 0)) { 2885 ptxsu = MVNETA_PTXSU_NORB(ndesc); 2886 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu); 2887 } 2888 #ifdef MVNETA_KTR 2889 CTR5(KTR_SPARE2, "%s:%u tx_complete tx_cpu=%d tx_dma=%d tx_used=%d", 2890 sc->ifp->if_xname, q, tx->cpu, tx->dma, tx->used); 2891 #endif 2892 2893 tx->watchdog_time = ticks; 2894 2895 if (tx->used == 0) 2896 tx->queue_status = MVNETA_QUEUE_IDLE; 2897 } 2898 2899 /* 2900 * Do a final TX complete when TX is idle. 2901 */ 2902 STATIC void 2903 mvneta_tx_drain(struct mvneta_softc *sc) 2904 { 2905 struct mvneta_tx_ring *tx; 2906 int q; 2907 2908 /* 2909 * Handle trailing mbuf on TX queue. 2910 * Check is done lockess to avoid TX path contention. 2911 */ 2912 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) { 2913 tx = MVNETA_TX_RING(sc, q); 2914 if ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG_TXCOMP && 2915 tx->used > 0) { 2916 mvneta_tx_lockq(sc, q); 2917 mvneta_tx_queue_complete(sc, q); 2918 mvneta_tx_unlockq(sc, q); 2919 } 2920 } 2921 } 2922 2923 /* 2924 * Rx Subroutines 2925 */ 2926 STATIC int 2927 mvneta_rx(struct mvneta_softc *sc, int q, int count) 2928 { 2929 uint32_t prxs, npkt; 2930 int more; 2931 2932 more = 0; 2933 mvneta_rx_lockq(sc, q); 2934 prxs = MVNETA_READ(sc, MVNETA_PRXS(q)); 2935 npkt = MVNETA_PRXS_GET_ODC(prxs); 2936 if (__predict_false(npkt == 0)) 2937 goto out; 2938 2939 if (count > 0 && npkt > count) { 2940 more = 1; 2941 npkt = count; 2942 } 2943 mvneta_rx_queue(sc, q, npkt); 2944 out: 2945 mvneta_rx_unlockq(sc, q); 2946 return more; 2947 } 2948 2949 /* 2950 * Helper routine for updating PRXSU register of a given queue. 2951 * Handles number of processed descriptors bigger than maximum acceptable value. 2952 */ 2953 STATIC __inline void 2954 mvneta_prxsu_update(struct mvneta_softc *sc, int q, int processed) 2955 { 2956 uint32_t prxsu; 2957 2958 while (__predict_false(processed > 255)) { 2959 prxsu = MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(255); 2960 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu); 2961 processed -= 255; 2962 } 2963 prxsu = MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(processed); 2964 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu); 2965 } 2966 2967 static __inline void 2968 mvneta_prefetch(void *p) 2969 { 2970 2971 __builtin_prefetch(p); 2972 } 2973 2974 STATIC void 2975 mvneta_rx_queue(struct mvneta_softc *sc, int q, int npkt) 2976 { 2977 struct ifnet *ifp; 2978 struct mvneta_rx_ring *rx; 2979 struct mvneta_rx_desc *r; 2980 struct mvneta_buf *rxbuf; 2981 struct mbuf *m; 2982 struct lro_ctrl *lro; 2983 struct lro_entry *queued; 2984 void *pktbuf; 2985 int i, pktlen, processed, ndma; 2986 2987 KASSERT_RX_MTX(sc, q); 2988 2989 ifp = sc->ifp; 2990 rx = MVNETA_RX_RING(sc, q); 2991 processed = 0; 2992 2993 if (__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED)) 2994 return; 2995 2996 bus_dmamap_sync(sc->rx_dtag, rx->desc_map, 2997 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2998 2999 for (i = 0; i < npkt; i++) { 3000 /* Prefetch next desc, rxbuf. */ 3001 ndma = rx_counter_adv(rx->dma, 1); 3002 mvneta_prefetch(&rx->desc[ndma]); 3003 mvneta_prefetch(&rx->rxbuf[ndma]); 3004 3005 /* get descriptor and packet */ 3006 r = &rx->desc[rx->dma]; 3007 rxbuf = &rx->rxbuf[rx->dma]; 3008 m = rxbuf->m; 3009 rxbuf->m = NULL; 3010 DASSERT(m != NULL); 3011 bus_dmamap_sync(sc->rxbuf_dtag, rxbuf->dmap, 3012 BUS_DMASYNC_POSTREAD); 3013 bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap); 3014 /* Prefetch mbuf header. */ 3015 mvneta_prefetch(m); 3016 3017 processed++; 3018 /* Drop desc with error status or not in a single buffer. */ 3019 DASSERT((r->status & (MVNETA_RX_F|MVNETA_RX_L)) == 3020 (MVNETA_RX_F|MVNETA_RX_L)); 3021 if (__predict_false((r->status & MVNETA_RX_ES) || 3022 (r->status & (MVNETA_RX_F|MVNETA_RX_L)) != 3023 (MVNETA_RX_F|MVNETA_RX_L))) 3024 goto rx_error; 3025 3026 /* 3027 * [ OFF | MH | PKT | CRC ] 3028 * bytecnt cover MH, PKT, CRC 3029 */ 3030 pktlen = r->bytecnt - ETHER_CRC_LEN - MVNETA_HWHEADER_SIZE; 3031 pktbuf = (uint8_t *)rx->rxbuf_virt_addr[rx->dma] + MVNETA_PACKET_OFFSET + 3032 MVNETA_HWHEADER_SIZE; 3033 3034 /* Prefetch mbuf data. */ 3035 mvneta_prefetch(pktbuf); 3036 3037 /* Write value to mbuf (avoid read). */ 3038 m->m_data = pktbuf; 3039 m->m_len = m->m_pkthdr.len = pktlen; 3040 m->m_pkthdr.rcvif = ifp; 3041 mvneta_rx_set_csumflag(ifp, r, m); 3042 3043 /* Increase rx_dma before releasing the lock. */ 3044 rx->dma = ndma; 3045 3046 if (__predict_false(rx->lro_enabled && 3047 ((r->status & MVNETA_RX_L3_IP) != 0) && 3048 ((r->status & MVNETA_RX_L4_MASK) == MVNETA_RX_L4_TCP) && 3049 (m->m_pkthdr.csum_flags & 3050 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == 3051 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR))) { 3052 if (rx->lro.lro_cnt != 0) { 3053 if (tcp_lro_rx(&rx->lro, m, 0) == 0) 3054 goto rx_done; 3055 } 3056 } 3057 3058 mvneta_rx_unlockq(sc, q); 3059 (*ifp->if_input)(ifp, m); 3060 mvneta_rx_lockq(sc, q); 3061 /* 3062 * Check whether this queue has been disabled in the 3063 * meantime. If yes, then clear LRO and exit. 3064 */ 3065 if(__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED)) 3066 goto rx_lro; 3067 rx_done: 3068 /* Refresh receive ring to avoid stall and minimize jitter. */ 3069 if (processed >= MVNETA_RX_REFILL_COUNT) { 3070 mvneta_prxsu_update(sc, q, processed); 3071 mvneta_rx_queue_refill(sc, q); 3072 processed = 0; 3073 } 3074 continue; 3075 rx_error: 3076 m_freem(m); 3077 rx->dma = ndma; 3078 /* Refresh receive ring to avoid stall and minimize jitter. */ 3079 if (processed >= MVNETA_RX_REFILL_COUNT) { 3080 mvneta_prxsu_update(sc, q, processed); 3081 mvneta_rx_queue_refill(sc, q); 3082 processed = 0; 3083 } 3084 } 3085 #ifdef MVNETA_KTR 3086 CTR3(KTR_SPARE2, "%s:%u %u packets received", ifp->if_xname, q, npkt); 3087 #endif 3088 /* DMA status update */ 3089 mvneta_prxsu_update(sc, q, processed); 3090 /* Refill the rest of buffers if there are any to refill */ 3091 mvneta_rx_queue_refill(sc, q); 3092 3093 rx_lro: 3094 /* 3095 * Flush any outstanding LRO work 3096 */ 3097 lro = &rx->lro; 3098 while (__predict_false((queued = LIST_FIRST(&lro->lro_active)) != NULL)) { 3099 LIST_REMOVE(LIST_FIRST((&lro->lro_active)), next); 3100 tcp_lro_flush(lro, queued); 3101 } 3102 } 3103 3104 STATIC void 3105 mvneta_rx_buf_free(struct mvneta_softc *sc, struct mvneta_buf *rxbuf) 3106 { 3107 3108 bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap); 3109 /* This will remove all data at once */ 3110 m_freem(rxbuf->m); 3111 } 3112 3113 STATIC void 3114 mvneta_rx_queue_refill(struct mvneta_softc *sc, int q) 3115 { 3116 struct mvneta_rx_ring *rx; 3117 struct mvneta_rx_desc *r; 3118 struct mvneta_buf *rxbuf; 3119 bus_dma_segment_t segs; 3120 struct mbuf *m; 3121 uint32_t prxs, prxsu, ndesc; 3122 int npkt, refill, nsegs, error; 3123 3124 KASSERT_RX_MTX(sc, q); 3125 3126 rx = MVNETA_RX_RING(sc, q); 3127 prxs = MVNETA_READ(sc, MVNETA_PRXS(q)); 3128 ndesc = MVNETA_PRXS_GET_NODC(prxs) + MVNETA_PRXS_GET_ODC(prxs); 3129 refill = MVNETA_RX_RING_CNT - ndesc; 3130 #ifdef MVNETA_KTR 3131 CTR3(KTR_SPARE2, "%s:%u refill %u packets", sc->ifp->if_xname, q, 3132 refill); 3133 #endif 3134 if (__predict_false(refill <= 0)) 3135 return; 3136 3137 for (npkt = 0; npkt < refill; npkt++) { 3138 rxbuf = &rx->rxbuf[rx->cpu]; 3139 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 3140 if (__predict_false(m == NULL)) { 3141 error = ENOBUFS; 3142 break; 3143 } 3144 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 3145 3146 error = bus_dmamap_load_mbuf_sg(sc->rxbuf_dtag, rxbuf->dmap, 3147 m, &segs, &nsegs, BUS_DMA_NOWAIT); 3148 if (__predict_false(error != 0 || nsegs != 1)) { 3149 KASSERT(1, ("Failed to load Rx mbuf DMA map")); 3150 m_freem(m); 3151 break; 3152 } 3153 3154 /* Add the packet to the ring */ 3155 rxbuf->m = m; 3156 r = &rx->desc[rx->cpu]; 3157 r->bufptr_pa = segs.ds_addr; 3158 rx->rxbuf_virt_addr[rx->cpu] = m->m_data; 3159 3160 rx->cpu = rx_counter_adv(rx->cpu, 1); 3161 } 3162 if (npkt == 0) { 3163 if (refill == MVNETA_RX_RING_CNT) 3164 rx->needs_refill = TRUE; 3165 return; 3166 } 3167 3168 rx->needs_refill = FALSE; 3169 bus_dmamap_sync(sc->rx_dtag, rx->desc_map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 3170 3171 while (__predict_false(npkt > 255)) { 3172 prxsu = MVNETA_PRXSU_NOOFNEWDESCRIPTORS(255); 3173 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu); 3174 npkt -= 255; 3175 } 3176 if (__predict_true(npkt > 0)) { 3177 prxsu = MVNETA_PRXSU_NOOFNEWDESCRIPTORS(npkt); 3178 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu); 3179 } 3180 } 3181 3182 STATIC __inline void 3183 mvneta_rx_set_csumflag(struct ifnet *ifp, 3184 struct mvneta_rx_desc *r, struct mbuf *m) 3185 { 3186 uint32_t csum_flags; 3187 3188 csum_flags = 0; 3189 if (__predict_false((r->status & 3190 (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP)) == 0)) 3191 return; /* not a IP packet */ 3192 3193 /* L3 */ 3194 if (__predict_true((r->status & MVNETA_RX_IP_HEADER_OK) == 3195 MVNETA_RX_IP_HEADER_OK)) 3196 csum_flags |= CSUM_L3_CALC|CSUM_L3_VALID; 3197 3198 if (__predict_true((r->status & (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP)) == 3199 (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP))) { 3200 /* L4 */ 3201 switch (r->status & MVNETA_RX_L4_MASK) { 3202 case MVNETA_RX_L4_TCP: 3203 case MVNETA_RX_L4_UDP: 3204 csum_flags |= CSUM_L4_CALC; 3205 if (__predict_true((r->status & 3206 MVNETA_RX_L4_CHECKSUM_OK) == MVNETA_RX_L4_CHECKSUM_OK)) { 3207 csum_flags |= CSUM_L4_VALID; 3208 m->m_pkthdr.csum_data = htons(0xffff); 3209 } 3210 break; 3211 case MVNETA_RX_L4_OTH: 3212 default: 3213 break; 3214 } 3215 } 3216 m->m_pkthdr.csum_flags = csum_flags; 3217 } 3218 3219 /* 3220 * MAC address filter 3221 */ 3222 STATIC void 3223 mvneta_filter_setup(struct mvneta_softc *sc) 3224 { 3225 struct ifnet *ifp; 3226 uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT]; 3227 uint32_t pxc; 3228 int i; 3229 3230 KASSERT_SC_MTX(sc); 3231 3232 memset(dfut, 0, sizeof(dfut)); 3233 memset(dfsmt, 0, sizeof(dfsmt)); 3234 memset(dfomt, 0, sizeof(dfomt)); 3235 3236 ifp = sc->ifp; 3237 ifp->if_flags |= IFF_ALLMULTI; 3238 if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) { 3239 for (i = 0; i < MVNETA_NDFSMT; i++) { 3240 dfsmt[i] = dfomt[i] = 3241 MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) | 3242 MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) | 3243 MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) | 3244 MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS); 3245 } 3246 } 3247 3248 pxc = MVNETA_READ(sc, MVNETA_PXC); 3249 pxc &= ~(MVNETA_PXC_UPM | MVNETA_PXC_RXQ_MASK | MVNETA_PXC_RXQARP_MASK | 3250 MVNETA_PXC_TCPQ_MASK | MVNETA_PXC_UDPQ_MASK | MVNETA_PXC_BPDUQ_MASK); 3251 pxc |= MVNETA_PXC_RXQ(MVNETA_RX_QNUM_MAX-1); 3252 pxc |= MVNETA_PXC_RXQARP(MVNETA_RX_QNUM_MAX-1); 3253 pxc |= MVNETA_PXC_TCPQ(MVNETA_RX_QNUM_MAX-1); 3254 pxc |= MVNETA_PXC_UDPQ(MVNETA_RX_QNUM_MAX-1); 3255 pxc |= MVNETA_PXC_BPDUQ(MVNETA_RX_QNUM_MAX-1); 3256 pxc |= MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP; 3257 if (ifp->if_flags & IFF_BROADCAST) { 3258 pxc &= ~(MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP); 3259 } 3260 if (ifp->if_flags & IFF_PROMISC) { 3261 pxc |= MVNETA_PXC_UPM; 3262 } 3263 MVNETA_WRITE(sc, MVNETA_PXC, pxc); 3264 3265 /* Set Destination Address Filter Unicast Table */ 3266 if (ifp->if_flags & IFF_PROMISC) { 3267 /* pass all unicast addresses */ 3268 for (i = 0; i < MVNETA_NDFUT; i++) { 3269 dfut[i] = 3270 MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) | 3271 MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) | 3272 MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) | 3273 MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS); 3274 } 3275 } else { 3276 i = sc->enaddr[5] & 0xf; /* last nibble */ 3277 dfut[i>>2] = MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS); 3278 } 3279 MVNETA_WRITE_REGION(sc, MVNETA_DFUT(0), dfut, MVNETA_NDFUT); 3280 3281 /* Set Destination Address Filter Multicast Tables */ 3282 MVNETA_WRITE_REGION(sc, MVNETA_DFSMT(0), dfsmt, MVNETA_NDFSMT); 3283 MVNETA_WRITE_REGION(sc, MVNETA_DFOMT(0), dfomt, MVNETA_NDFOMT); 3284 } 3285 3286 /* 3287 * sysctl(9) 3288 */ 3289 STATIC int 3290 sysctl_read_mib(SYSCTL_HANDLER_ARGS) 3291 { 3292 struct mvneta_sysctl_mib *arg; 3293 struct mvneta_softc *sc; 3294 uint64_t val; 3295 3296 arg = (struct mvneta_sysctl_mib *)arg1; 3297 if (arg == NULL) 3298 return (EINVAL); 3299 3300 sc = arg->sc; 3301 if (sc == NULL) 3302 return (EINVAL); 3303 if (arg->index < 0 || arg->index > MVNETA_PORTMIB_NOCOUNTER) 3304 return (EINVAL); 3305 3306 mvneta_sc_lock(sc); 3307 val = arg->counter; 3308 mvneta_sc_unlock(sc); 3309 return sysctl_handle_64(oidp, &val, 0, req); 3310 } 3311 3312 3313 STATIC int 3314 sysctl_clear_mib(SYSCTL_HANDLER_ARGS) 3315 { 3316 struct mvneta_softc *sc; 3317 int err, val; 3318 3319 val = 0; 3320 sc = (struct mvneta_softc *)arg1; 3321 if (sc == NULL) 3322 return (EINVAL); 3323 3324 err = sysctl_handle_int(oidp, &val, 0, req); 3325 if (err != 0) 3326 return (err); 3327 3328 if (val < 0 || val > 1) 3329 return (EINVAL); 3330 3331 if (val == 1) { 3332 mvneta_sc_lock(sc); 3333 mvneta_clear_mib(sc); 3334 mvneta_sc_unlock(sc); 3335 } 3336 3337 return (0); 3338 } 3339 3340 STATIC int 3341 sysctl_set_queue_rxthtime(SYSCTL_HANDLER_ARGS) 3342 { 3343 struct mvneta_sysctl_queue *arg; 3344 struct mvneta_rx_ring *rx; 3345 struct mvneta_softc *sc; 3346 uint32_t reg, time_mvtclk; 3347 int err, time_us; 3348 3349 rx = NULL; 3350 arg = (struct mvneta_sysctl_queue *)arg1; 3351 if (arg == NULL) 3352 return (EINVAL); 3353 if (arg->queue < 0 || arg->queue > MVNETA_RX_RING_CNT) 3354 return (EINVAL); 3355 if (arg->rxtx != MVNETA_SYSCTL_RX) 3356 return (EINVAL); 3357 3358 sc = arg->sc; 3359 if (sc == NULL) 3360 return (EINVAL); 3361 3362 /* read queue length */ 3363 mvneta_sc_lock(sc); 3364 mvneta_rx_lockq(sc, arg->queue); 3365 rx = MVNETA_RX_RING(sc, arg->queue); 3366 time_mvtclk = rx->queue_th_time; 3367 time_us = ((uint64_t)time_mvtclk * 1000ULL * 1000ULL) / mvneta_get_clk(); 3368 mvneta_rx_unlockq(sc, arg->queue); 3369 mvneta_sc_unlock(sc); 3370 3371 err = sysctl_handle_int(oidp, &time_us, 0, req); 3372 if (err != 0) 3373 return (err); 3374 3375 mvneta_sc_lock(sc); 3376 mvneta_rx_lockq(sc, arg->queue); 3377 3378 /* update queue length (0[sec] - 1[sec]) */ 3379 if (time_us < 0 || time_us > (1000 * 1000)) { 3380 mvneta_rx_unlockq(sc, arg->queue); 3381 mvneta_sc_unlock(sc); 3382 return (EINVAL); 3383 } 3384 time_mvtclk = 3385 (uint64_t)mvneta_get_clk() * (uint64_t)time_us / (1000ULL * 1000ULL); 3386 rx->queue_th_time = time_mvtclk; 3387 reg = MVNETA_PRXITTH_RITT(rx->queue_th_time); 3388 MVNETA_WRITE(sc, MVNETA_PRXITTH(arg->queue), reg); 3389 mvneta_rx_unlockq(sc, arg->queue); 3390 mvneta_sc_unlock(sc); 3391 3392 return (0); 3393 } 3394 3395 STATIC void 3396 sysctl_mvneta_init(struct mvneta_softc *sc) 3397 { 3398 struct sysctl_ctx_list *ctx; 3399 struct sysctl_oid_list *children; 3400 struct sysctl_oid_list *rxchildren; 3401 struct sysctl_oid_list *qchildren, *mchildren; 3402 struct sysctl_oid *tree; 3403 int i, q; 3404 struct mvneta_sysctl_queue *rxarg; 3405 #define MVNETA_SYSCTL_NAME(num) "queue" # num 3406 static const char *sysctl_queue_names[] = { 3407 MVNETA_SYSCTL_NAME(0), MVNETA_SYSCTL_NAME(1), 3408 MVNETA_SYSCTL_NAME(2), MVNETA_SYSCTL_NAME(3), 3409 MVNETA_SYSCTL_NAME(4), MVNETA_SYSCTL_NAME(5), 3410 MVNETA_SYSCTL_NAME(6), MVNETA_SYSCTL_NAME(7), 3411 }; 3412 #undef MVNETA_SYSCTL_NAME 3413 3414 #ifndef NO_SYSCTL_DESCR 3415 #define MVNETA_SYSCTL_DESCR(num) "configuration parameters for queue " # num 3416 static const char *sysctl_queue_descrs[] = { 3417 MVNETA_SYSCTL_DESCR(0), MVNETA_SYSCTL_DESCR(1), 3418 MVNETA_SYSCTL_DESCR(2), MVNETA_SYSCTL_DESCR(3), 3419 MVNETA_SYSCTL_DESCR(4), MVNETA_SYSCTL_DESCR(5), 3420 MVNETA_SYSCTL_DESCR(6), MVNETA_SYSCTL_DESCR(7), 3421 }; 3422 #undef MVNETA_SYSCTL_DESCR 3423 #endif 3424 3425 3426 ctx = device_get_sysctl_ctx(sc->dev); 3427 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 3428 3429 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rx", 3430 CTLFLAG_RD, 0, "NETA RX"); 3431 rxchildren = SYSCTL_CHILDREN(tree); 3432 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "mib", 3433 CTLFLAG_RD, 0, "NETA MIB"); 3434 mchildren = SYSCTL_CHILDREN(tree); 3435 3436 3437 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "flow_control", 3438 CTLFLAG_RW, &sc->cf_fc, 0, "flow control"); 3439 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lpi", 3440 CTLFLAG_RW, &sc->cf_lpi, 0, "Low Power Idle"); 3441 3442 /* 3443 * MIB access 3444 */ 3445 /* dev.mvneta.[unit].mib.<mibs> */ 3446 for (i = 0; i < MVNETA_PORTMIB_NOCOUNTER; i++) { 3447 struct mvneta_sysctl_mib *mib_arg = &sc->sysctl_mib[i]; 3448 3449 mib_arg->sc = sc; 3450 mib_arg->index = i; 3451 SYSCTL_ADD_PROC(ctx, mchildren, OID_AUTO, 3452 mvneta_mib_list[i].sysctl_name, 3453 CTLTYPE_U64|CTLFLAG_RD, (void *)mib_arg, 0, 3454 sysctl_read_mib, "I", mvneta_mib_list[i].desc); 3455 } 3456 SYSCTL_ADD_UQUAD(ctx, mchildren, OID_AUTO, "rx_discard", 3457 CTLFLAG_RD, &sc->counter_pdfc, "Port Rx Discard Frame Counter"); 3458 SYSCTL_ADD_UQUAD(ctx, mchildren, OID_AUTO, "overrun", 3459 CTLFLAG_RD, &sc->counter_pofc, "Port Overrun Frame Counter"); 3460 SYSCTL_ADD_UINT(ctx, mchildren, OID_AUTO, "watchdog", 3461 CTLFLAG_RD, &sc->counter_watchdog, 0, "TX Watchdog Counter"); 3462 3463 SYSCTL_ADD_PROC(ctx, mchildren, OID_AUTO, "reset", 3464 CTLTYPE_INT|CTLFLAG_RW, (void *)sc, 0, 3465 sysctl_clear_mib, "I", "Reset MIB counters"); 3466 3467 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { 3468 rxarg = &sc->sysctl_rx_queue[q]; 3469 3470 rxarg->sc = sc; 3471 rxarg->queue = q; 3472 rxarg->rxtx = MVNETA_SYSCTL_RX; 3473 3474 /* hw.mvneta.mvneta[unit].rx.[queue] */ 3475 tree = SYSCTL_ADD_NODE(ctx, rxchildren, OID_AUTO, 3476 sysctl_queue_names[q], CTLFLAG_RD, 0, 3477 sysctl_queue_descrs[q]); 3478 qchildren = SYSCTL_CHILDREN(tree); 3479 3480 /* hw.mvneta.mvneta[unit].rx.[queue].threshold_timer_us */ 3481 SYSCTL_ADD_PROC(ctx, qchildren, OID_AUTO, "threshold_timer_us", 3482 CTLTYPE_UINT | CTLFLAG_RW, rxarg, 0, 3483 sysctl_set_queue_rxthtime, "I", 3484 "interrupt coalescing threshold timer [us]"); 3485 } 3486 } 3487 3488 /* 3489 * MIB 3490 */ 3491 STATIC void 3492 mvneta_clear_mib(struct mvneta_softc *sc) 3493 { 3494 int i; 3495 3496 KASSERT_SC_MTX(sc); 3497 3498 for (i = 0; i < nitems(mvneta_mib_list); i++) { 3499 if (mvneta_mib_list[i].reg64) 3500 MVNETA_READ_MIB_8(sc, mvneta_mib_list[i].regnum); 3501 else 3502 MVNETA_READ_MIB_4(sc, mvneta_mib_list[i].regnum); 3503 sc->sysctl_mib[i].counter = 0; 3504 } 3505 MVNETA_READ(sc, MVNETA_PDFC); 3506 sc->counter_pdfc = 0; 3507 MVNETA_READ(sc, MVNETA_POFC); 3508 sc->counter_pofc = 0; 3509 sc->counter_watchdog = 0; 3510 } 3511 3512 STATIC void 3513 mvneta_update_mib(struct mvneta_softc *sc) 3514 { 3515 struct mvneta_tx_ring *tx; 3516 int i; 3517 uint64_t val; 3518 uint32_t reg; 3519 3520 for (i = 0; i < nitems(mvneta_mib_list); i++) { 3521 3522 if (mvneta_mib_list[i].reg64) 3523 val = MVNETA_READ_MIB_8(sc, mvneta_mib_list[i].regnum); 3524 else 3525 val = MVNETA_READ_MIB_4(sc, mvneta_mib_list[i].regnum); 3526 3527 if (val == 0) 3528 continue; 3529 3530 sc->sysctl_mib[i].counter += val; 3531 switch (mvneta_mib_list[i].regnum) { 3532 case MVNETA_MIB_RX_GOOD_OCT: 3533 if_inc_counter(sc->ifp, IFCOUNTER_IBYTES, val); 3534 break; 3535 case MVNETA_MIB_RX_BAD_FRAME: 3536 if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, val); 3537 break; 3538 case MVNETA_MIB_RX_GOOD_FRAME: 3539 if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, val); 3540 break; 3541 case MVNETA_MIB_RX_MCAST_FRAME: 3542 if_inc_counter(sc->ifp, IFCOUNTER_IMCASTS, val); 3543 break; 3544 case MVNETA_MIB_TX_GOOD_OCT: 3545 if_inc_counter(sc->ifp, IFCOUNTER_OBYTES, val); 3546 break; 3547 case MVNETA_MIB_TX_GOOD_FRAME: 3548 if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, val); 3549 break; 3550 case MVNETA_MIB_TX_MCAST_FRAME: 3551 if_inc_counter(sc->ifp, IFCOUNTER_OMCASTS, val); 3552 break; 3553 case MVNETA_MIB_MAC_COL: 3554 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, val); 3555 break; 3556 case MVNETA_MIB_TX_MAC_TRNS_ERR: 3557 case MVNETA_MIB_TX_EXCES_COL: 3558 case MVNETA_MIB_MAC_LATE_COL: 3559 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, val); 3560 break; 3561 } 3562 } 3563 3564 reg = MVNETA_READ(sc, MVNETA_PDFC); 3565 sc->counter_pdfc += reg; 3566 if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg); 3567 reg = MVNETA_READ(sc, MVNETA_POFC); 3568 sc->counter_pofc += reg; 3569 if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg); 3570 3571 /* TX watchdog. */ 3572 if (sc->counter_watchdog_mib > 0) { 3573 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, sc->counter_watchdog_mib); 3574 sc->counter_watchdog_mib = 0; 3575 } 3576 /* 3577 * TX driver errors: 3578 * We do not take queue locks to not disrupt TX path. 3579 * We may only miss one drv error which will be fixed at 3580 * next mib update. We may also clear counter when TX path 3581 * is incrementing it but we only do it if counter was not zero 3582 * thus we may only loose one error. 3583 */ 3584 for (i = 0; i < MVNETA_TX_QNUM_MAX; i++) { 3585 tx = MVNETA_TX_RING(sc, i); 3586 3587 if (tx->drv_error > 0) { 3588 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, tx->drv_error); 3589 tx->drv_error = 0; 3590 } 3591 } 3592 } 3593