1 /****************************************************************************** 2 * 3 * Name : sky2.c 4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x 5 * Version: $Revision: 1.23 $ 6 * Date : $Date: 2005/12/22 09:04:11 $ 7 * Purpose: Main driver source file 8 * 9 *****************************************************************************/ 10 11 /****************************************************************************** 12 * 13 * LICENSE: 14 * Copyright (C) Marvell International Ltd. and/or its affiliates 15 * 16 * The computer program files contained in this folder ("Files") 17 * are provided to you under the BSD-type license terms provided 18 * below, and any use of such Files and any derivative works 19 * thereof created by you shall be governed by the following terms 20 * and conditions: 21 * 22 * - Redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer. 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials provided 27 * with the distribution. 28 * - Neither the name of Marvell nor the names of its contributors 29 * may be used to endorse or promote products derived from this 30 * software without specific prior written permission. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 43 * OF THE POSSIBILITY OF SUCH DAMAGE. 44 * /LICENSE 45 * 46 *****************************************************************************/ 47 48 /*- 49 * Copyright (c) 1997, 1998, 1999, 2000 50 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 51 * 52 * Redistribution and use in source and binary forms, with or without 53 * modification, are permitted provided that the following conditions 54 * are met: 55 * 1. Redistributions of source code must retain the above copyright 56 * notice, this list of conditions and the following disclaimer. 57 * 2. Redistributions in binary form must reproduce the above copyright 58 * notice, this list of conditions and the following disclaimer in the 59 * documentation and/or other materials provided with the distribution. 60 * 3. All advertising materials mentioning features or use of this software 61 * must display the following acknowledgement: 62 * This product includes software developed by Bill Paul. 63 * 4. Neither the name of the author nor the names of any co-contributors 64 * may be used to endorse or promote products derived from this software 65 * without specific prior written permission. 66 * 67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 70 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 77 * THE POSSIBILITY OF SUCH DAMAGE. 78 */ 79 /*- 80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 81 * 82 * Permission to use, copy, modify, and distribute this software for any 83 * purpose with or without fee is hereby granted, provided that the above 84 * copyright notice and this permission notice appear in all copies. 85 * 86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 93 */ 94 95 /* 96 * Device driver for the Marvell Yukon II Ethernet controller. 97 * Due to lack of documentation, this driver is based on the code from 98 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x. 99 */ 100 101 #include <sys/cdefs.h> 102 __FBSDID("$FreeBSD$"); 103 104 #include <sys/param.h> 105 #include <sys/systm.h> 106 #include <sys/bus.h> 107 #include <sys/endian.h> 108 #include <sys/mbuf.h> 109 #include <sys/malloc.h> 110 #include <sys/kernel.h> 111 #include <sys/module.h> 112 #include <sys/socket.h> 113 #include <sys/sockio.h> 114 #include <sys/queue.h> 115 #include <sys/sysctl.h> 116 117 #include <net/bpf.h> 118 #include <net/ethernet.h> 119 #include <net/if.h> 120 #include <net/if_var.h> 121 #include <net/if_arp.h> 122 #include <net/if_dl.h> 123 #include <net/if_media.h> 124 #include <net/if_types.h> 125 #include <net/if_vlan_var.h> 126 127 #include <netinet/in.h> 128 #include <netinet/in_systm.h> 129 #include <netinet/ip.h> 130 #include <netinet/tcp.h> 131 #include <netinet/udp.h> 132 133 #include <machine/bus.h> 134 #include <machine/in_cksum.h> 135 #include <machine/resource.h> 136 #include <sys/rman.h> 137 138 #include <dev/mii/mii.h> 139 #include <dev/mii/miivar.h> 140 141 #include <dev/pci/pcireg.h> 142 #include <dev/pci/pcivar.h> 143 144 #include <dev/msk/if_mskreg.h> 145 146 MODULE_DEPEND(msk, pci, 1, 1, 1); 147 MODULE_DEPEND(msk, ether, 1, 1, 1); 148 MODULE_DEPEND(msk, miibus, 1, 1, 1); 149 150 /* "device miibus" required. See GENERIC if you get errors here. */ 151 #include "miibus_if.h" 152 153 /* Tunables. */ 154 static int msi_disable = 0; 155 TUNABLE_INT("hw.msk.msi_disable", &msi_disable); 156 static int legacy_intr = 0; 157 TUNABLE_INT("hw.msk.legacy_intr", &legacy_intr); 158 static int jumbo_disable = 0; 159 TUNABLE_INT("hw.msk.jumbo_disable", &jumbo_disable); 160 161 #define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 162 163 /* 164 * Devices supported by this driver. 165 */ 166 static const struct msk_product { 167 uint16_t msk_vendorid; 168 uint16_t msk_deviceid; 169 const char *msk_name; 170 } msk_products[] = { 171 { VENDORID_SK, DEVICEID_SK_YUKON2, 172 "SK-9Sxx Gigabit Ethernet" }, 173 { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR, 174 "SK-9Exx Gigabit Ethernet"}, 175 { VENDORID_MARVELL, DEVICEID_MRVL_8021CU, 176 "Marvell Yukon 88E8021CU Gigabit Ethernet" }, 177 { VENDORID_MARVELL, DEVICEID_MRVL_8021X, 178 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" }, 179 { VENDORID_MARVELL, DEVICEID_MRVL_8022CU, 180 "Marvell Yukon 88E8022CU Gigabit Ethernet" }, 181 { VENDORID_MARVELL, DEVICEID_MRVL_8022X, 182 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" }, 183 { VENDORID_MARVELL, DEVICEID_MRVL_8061CU, 184 "Marvell Yukon 88E8061CU Gigabit Ethernet" }, 185 { VENDORID_MARVELL, DEVICEID_MRVL_8061X, 186 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" }, 187 { VENDORID_MARVELL, DEVICEID_MRVL_8062CU, 188 "Marvell Yukon 88E8062CU Gigabit Ethernet" }, 189 { VENDORID_MARVELL, DEVICEID_MRVL_8062X, 190 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" }, 191 { VENDORID_MARVELL, DEVICEID_MRVL_8035, 192 "Marvell Yukon 88E8035 Fast Ethernet" }, 193 { VENDORID_MARVELL, DEVICEID_MRVL_8036, 194 "Marvell Yukon 88E8036 Fast Ethernet" }, 195 { VENDORID_MARVELL, DEVICEID_MRVL_8038, 196 "Marvell Yukon 88E8038 Fast Ethernet" }, 197 { VENDORID_MARVELL, DEVICEID_MRVL_8039, 198 "Marvell Yukon 88E8039 Fast Ethernet" }, 199 { VENDORID_MARVELL, DEVICEID_MRVL_8040, 200 "Marvell Yukon 88E8040 Fast Ethernet" }, 201 { VENDORID_MARVELL, DEVICEID_MRVL_8040T, 202 "Marvell Yukon 88E8040T Fast Ethernet" }, 203 { VENDORID_MARVELL, DEVICEID_MRVL_8042, 204 "Marvell Yukon 88E8042 Fast Ethernet" }, 205 { VENDORID_MARVELL, DEVICEID_MRVL_8048, 206 "Marvell Yukon 88E8048 Fast Ethernet" }, 207 { VENDORID_MARVELL, DEVICEID_MRVL_4361, 208 "Marvell Yukon 88E8050 Gigabit Ethernet" }, 209 { VENDORID_MARVELL, DEVICEID_MRVL_4360, 210 "Marvell Yukon 88E8052 Gigabit Ethernet" }, 211 { VENDORID_MARVELL, DEVICEID_MRVL_4362, 212 "Marvell Yukon 88E8053 Gigabit Ethernet" }, 213 { VENDORID_MARVELL, DEVICEID_MRVL_4363, 214 "Marvell Yukon 88E8055 Gigabit Ethernet" }, 215 { VENDORID_MARVELL, DEVICEID_MRVL_4364, 216 "Marvell Yukon 88E8056 Gigabit Ethernet" }, 217 { VENDORID_MARVELL, DEVICEID_MRVL_4365, 218 "Marvell Yukon 88E8070 Gigabit Ethernet" }, 219 { VENDORID_MARVELL, DEVICEID_MRVL_436A, 220 "Marvell Yukon 88E8058 Gigabit Ethernet" }, 221 { VENDORID_MARVELL, DEVICEID_MRVL_436B, 222 "Marvell Yukon 88E8071 Gigabit Ethernet" }, 223 { VENDORID_MARVELL, DEVICEID_MRVL_436C, 224 "Marvell Yukon 88E8072 Gigabit Ethernet" }, 225 { VENDORID_MARVELL, DEVICEID_MRVL_436D, 226 "Marvell Yukon 88E8055 Gigabit Ethernet" }, 227 { VENDORID_MARVELL, DEVICEID_MRVL_4370, 228 "Marvell Yukon 88E8075 Gigabit Ethernet" }, 229 { VENDORID_MARVELL, DEVICEID_MRVL_4380, 230 "Marvell Yukon 88E8057 Gigabit Ethernet" }, 231 { VENDORID_MARVELL, DEVICEID_MRVL_4381, 232 "Marvell Yukon 88E8059 Gigabit Ethernet" }, 233 { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX, 234 "D-Link 550SX Gigabit Ethernet" }, 235 { VENDORID_DLINK, DEVICEID_DLINK_DGE560SX, 236 "D-Link 560SX Gigabit Ethernet" }, 237 { VENDORID_DLINK, DEVICEID_DLINK_DGE560T, 238 "D-Link 560T Gigabit Ethernet" } 239 }; 240 241 static const char *model_name[] = { 242 "Yukon XL", 243 "Yukon EC Ultra", 244 "Yukon EX", 245 "Yukon EC", 246 "Yukon FE", 247 "Yukon FE+", 248 "Yukon Supreme", 249 "Yukon Ultra 2", 250 "Yukon Unknown", 251 "Yukon Optima", 252 }; 253 254 static int mskc_probe(device_t); 255 static int mskc_attach(device_t); 256 static int mskc_detach(device_t); 257 static int mskc_shutdown(device_t); 258 static int mskc_setup_rambuffer(struct msk_softc *); 259 static int mskc_suspend(device_t); 260 static int mskc_resume(device_t); 261 static bus_dma_tag_t mskc_get_dma_tag(device_t, device_t); 262 static void mskc_reset(struct msk_softc *); 263 264 static int msk_probe(device_t); 265 static int msk_attach(device_t); 266 static int msk_detach(device_t); 267 268 static void msk_tick(void *); 269 static void msk_intr(void *); 270 static void msk_intr_phy(struct msk_if_softc *); 271 static void msk_intr_gmac(struct msk_if_softc *); 272 static __inline void msk_rxput(struct msk_if_softc *); 273 static int msk_handle_events(struct msk_softc *); 274 static void msk_handle_hwerr(struct msk_if_softc *, uint32_t); 275 static void msk_intr_hwerr(struct msk_softc *); 276 #ifndef __NO_STRICT_ALIGNMENT 277 static __inline void msk_fixup_rx(struct mbuf *); 278 #endif 279 static __inline void msk_rxcsum(struct msk_if_softc *, uint32_t, struct mbuf *); 280 static void msk_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int); 281 static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int); 282 static void msk_txeof(struct msk_if_softc *, int); 283 static int msk_encap(struct msk_if_softc *, struct mbuf **); 284 static void msk_start(struct ifnet *); 285 static void msk_start_locked(struct ifnet *); 286 static int msk_ioctl(struct ifnet *, u_long, caddr_t); 287 static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t); 288 static void msk_set_rambuffer(struct msk_if_softc *); 289 static void msk_set_tx_stfwd(struct msk_if_softc *); 290 static void msk_init(void *); 291 static void msk_init_locked(struct msk_if_softc *); 292 static void msk_stop(struct msk_if_softc *); 293 static void msk_watchdog(struct msk_if_softc *); 294 static int msk_mediachange(struct ifnet *); 295 static void msk_mediastatus(struct ifnet *, struct ifmediareq *); 296 static void msk_phy_power(struct msk_softc *, int); 297 static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int); 298 static int msk_status_dma_alloc(struct msk_softc *); 299 static void msk_status_dma_free(struct msk_softc *); 300 static int msk_txrx_dma_alloc(struct msk_if_softc *); 301 static int msk_rx_dma_jalloc(struct msk_if_softc *); 302 static void msk_txrx_dma_free(struct msk_if_softc *); 303 static void msk_rx_dma_jfree(struct msk_if_softc *); 304 static int msk_rx_fill(struct msk_if_softc *, int); 305 static int msk_init_rx_ring(struct msk_if_softc *); 306 static int msk_init_jumbo_rx_ring(struct msk_if_softc *); 307 static void msk_init_tx_ring(struct msk_if_softc *); 308 static __inline void msk_discard_rxbuf(struct msk_if_softc *, int); 309 static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int); 310 static int msk_newbuf(struct msk_if_softc *, int); 311 static int msk_jumbo_newbuf(struct msk_if_softc *, int); 312 313 static int msk_phy_readreg(struct msk_if_softc *, int, int); 314 static int msk_phy_writereg(struct msk_if_softc *, int, int, int); 315 static int msk_miibus_readreg(device_t, int, int); 316 static int msk_miibus_writereg(device_t, int, int, int); 317 static void msk_miibus_statchg(device_t); 318 319 static void msk_rxfilter(struct msk_if_softc *); 320 static void msk_setvlan(struct msk_if_softc *, struct ifnet *); 321 322 static void msk_stats_clear(struct msk_if_softc *); 323 static void msk_stats_update(struct msk_if_softc *); 324 static int msk_sysctl_stat32(SYSCTL_HANDLER_ARGS); 325 static int msk_sysctl_stat64(SYSCTL_HANDLER_ARGS); 326 static void msk_sysctl_node(struct msk_if_softc *); 327 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 328 static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS); 329 330 static device_method_t mskc_methods[] = { 331 /* Device interface */ 332 DEVMETHOD(device_probe, mskc_probe), 333 DEVMETHOD(device_attach, mskc_attach), 334 DEVMETHOD(device_detach, mskc_detach), 335 DEVMETHOD(device_suspend, mskc_suspend), 336 DEVMETHOD(device_resume, mskc_resume), 337 DEVMETHOD(device_shutdown, mskc_shutdown), 338 339 DEVMETHOD(bus_get_dma_tag, mskc_get_dma_tag), 340 341 DEVMETHOD_END 342 }; 343 344 static driver_t mskc_driver = { 345 "mskc", 346 mskc_methods, 347 sizeof(struct msk_softc) 348 }; 349 350 static devclass_t mskc_devclass; 351 352 static device_method_t msk_methods[] = { 353 /* Device interface */ 354 DEVMETHOD(device_probe, msk_probe), 355 DEVMETHOD(device_attach, msk_attach), 356 DEVMETHOD(device_detach, msk_detach), 357 DEVMETHOD(device_shutdown, bus_generic_shutdown), 358 359 /* MII interface */ 360 DEVMETHOD(miibus_readreg, msk_miibus_readreg), 361 DEVMETHOD(miibus_writereg, msk_miibus_writereg), 362 DEVMETHOD(miibus_statchg, msk_miibus_statchg), 363 364 DEVMETHOD_END 365 }; 366 367 static driver_t msk_driver = { 368 "msk", 369 msk_methods, 370 sizeof(struct msk_if_softc) 371 }; 372 373 static devclass_t msk_devclass; 374 375 DRIVER_MODULE(mskc, pci, mskc_driver, mskc_devclass, NULL, NULL); 376 DRIVER_MODULE(msk, mskc, msk_driver, msk_devclass, NULL, NULL); 377 DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, NULL, NULL); 378 379 static struct resource_spec msk_res_spec_io[] = { 380 { SYS_RES_IOPORT, PCIR_BAR(1), RF_ACTIVE }, 381 { -1, 0, 0 } 382 }; 383 384 static struct resource_spec msk_res_spec_mem[] = { 385 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, 386 { -1, 0, 0 } 387 }; 388 389 static struct resource_spec msk_irq_spec_legacy[] = { 390 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 391 { -1, 0, 0 } 392 }; 393 394 static struct resource_spec msk_irq_spec_msi[] = { 395 { SYS_RES_IRQ, 1, RF_ACTIVE }, 396 { -1, 0, 0 } 397 }; 398 399 static int 400 msk_miibus_readreg(device_t dev, int phy, int reg) 401 { 402 struct msk_if_softc *sc_if; 403 404 sc_if = device_get_softc(dev); 405 406 return (msk_phy_readreg(sc_if, phy, reg)); 407 } 408 409 static int 410 msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg) 411 { 412 struct msk_softc *sc; 413 int i, val; 414 415 sc = sc_if->msk_softc; 416 417 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL, 418 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); 419 420 for (i = 0; i < MSK_TIMEOUT; i++) { 421 DELAY(1); 422 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL); 423 if ((val & GM_SMI_CT_RD_VAL) != 0) { 424 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA); 425 break; 426 } 427 } 428 429 if (i == MSK_TIMEOUT) { 430 if_printf(sc_if->msk_ifp, "phy failed to come ready\n"); 431 val = 0; 432 } 433 434 return (val); 435 } 436 437 static int 438 msk_miibus_writereg(device_t dev, int phy, int reg, int val) 439 { 440 struct msk_if_softc *sc_if; 441 442 sc_if = device_get_softc(dev); 443 444 return (msk_phy_writereg(sc_if, phy, reg, val)); 445 } 446 447 static int 448 msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val) 449 { 450 struct msk_softc *sc; 451 int i; 452 453 sc = sc_if->msk_softc; 454 455 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val); 456 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL, 457 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg)); 458 for (i = 0; i < MSK_TIMEOUT; i++) { 459 DELAY(1); 460 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) & 461 GM_SMI_CT_BUSY) == 0) 462 break; 463 } 464 if (i == MSK_TIMEOUT) 465 if_printf(sc_if->msk_ifp, "phy write timeout\n"); 466 467 return (0); 468 } 469 470 static void 471 msk_miibus_statchg(device_t dev) 472 { 473 struct msk_softc *sc; 474 struct msk_if_softc *sc_if; 475 struct mii_data *mii; 476 struct ifnet *ifp; 477 uint32_t gmac; 478 479 sc_if = device_get_softc(dev); 480 sc = sc_if->msk_softc; 481 482 MSK_IF_LOCK_ASSERT(sc_if); 483 484 mii = device_get_softc(sc_if->msk_miibus); 485 ifp = sc_if->msk_ifp; 486 if (mii == NULL || ifp == NULL || 487 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 488 return; 489 490 sc_if->msk_flags &= ~MSK_FLAG_LINK; 491 if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) == 492 (IFM_AVALID | IFM_ACTIVE)) { 493 switch (IFM_SUBTYPE(mii->mii_media_active)) { 494 case IFM_10_T: 495 case IFM_100_TX: 496 sc_if->msk_flags |= MSK_FLAG_LINK; 497 break; 498 case IFM_1000_T: 499 case IFM_1000_SX: 500 case IFM_1000_LX: 501 case IFM_1000_CX: 502 if ((sc_if->msk_flags & MSK_FLAG_FASTETHER) == 0) 503 sc_if->msk_flags |= MSK_FLAG_LINK; 504 break; 505 default: 506 break; 507 } 508 } 509 510 if ((sc_if->msk_flags & MSK_FLAG_LINK) != 0) { 511 /* Enable Tx FIFO Underrun. */ 512 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 513 GM_IS_TX_FF_UR | GM_IS_RX_FF_OR); 514 /* 515 * Because mii(4) notify msk(4) that it detected link status 516 * change, there is no need to enable automatic 517 * speed/flow-control/duplex updates. 518 */ 519 gmac = GM_GPCR_AU_ALL_DIS; 520 switch (IFM_SUBTYPE(mii->mii_media_active)) { 521 case IFM_1000_SX: 522 case IFM_1000_T: 523 gmac |= GM_GPCR_SPEED_1000; 524 break; 525 case IFM_100_TX: 526 gmac |= GM_GPCR_SPEED_100; 527 break; 528 case IFM_10_T: 529 break; 530 } 531 532 if ((IFM_OPTIONS(mii->mii_media_active) & 533 IFM_ETH_RXPAUSE) == 0) 534 gmac |= GM_GPCR_FC_RX_DIS; 535 if ((IFM_OPTIONS(mii->mii_media_active) & 536 IFM_ETH_TXPAUSE) == 0) 537 gmac |= GM_GPCR_FC_TX_DIS; 538 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 539 gmac |= GM_GPCR_DUP_FULL; 540 else 541 gmac |= GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS; 542 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; 543 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 544 /* Read again to ensure writing. */ 545 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 546 gmac = GMC_PAUSE_OFF; 547 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 548 if ((IFM_OPTIONS(mii->mii_media_active) & 549 IFM_ETH_RXPAUSE) != 0) 550 gmac = GMC_PAUSE_ON; 551 } 552 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac); 553 554 /* Enable PHY interrupt for FIFO underrun/overflow. */ 555 msk_phy_writereg(sc_if, PHY_ADDR_MARV, 556 PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR); 557 } else { 558 /* 559 * Link state changed to down. 560 * Disable PHY interrupts. 561 */ 562 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); 563 /* Disable Rx/Tx MAC. */ 564 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 565 if ((gmac & (GM_GPCR_RX_ENA | GM_GPCR_TX_ENA)) != 0) { 566 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 567 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 568 /* Read again to ensure writing. */ 569 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 570 } 571 } 572 } 573 574 static void 575 msk_rxfilter(struct msk_if_softc *sc_if) 576 { 577 struct msk_softc *sc; 578 struct ifnet *ifp; 579 struct ifmultiaddr *ifma; 580 uint32_t mchash[2]; 581 uint32_t crc; 582 uint16_t mode; 583 584 sc = sc_if->msk_softc; 585 586 MSK_IF_LOCK_ASSERT(sc_if); 587 588 ifp = sc_if->msk_ifp; 589 590 bzero(mchash, sizeof(mchash)); 591 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL); 592 if ((ifp->if_flags & IFF_PROMISC) != 0) 593 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 594 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 595 mode |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA; 596 mchash[0] = 0xffff; 597 mchash[1] = 0xffff; 598 } else { 599 mode |= GM_RXCR_UCF_ENA; 600 if_maddr_rlock(ifp); 601 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 602 if (ifma->ifma_addr->sa_family != AF_LINK) 603 continue; 604 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 605 ifma->ifma_addr), ETHER_ADDR_LEN); 606 /* Just want the 6 least significant bits. */ 607 crc &= 0x3f; 608 /* Set the corresponding bit in the hash table. */ 609 mchash[crc >> 5] |= 1 << (crc & 0x1f); 610 } 611 if_maddr_runlock(ifp); 612 if (mchash[0] != 0 || mchash[1] != 0) 613 mode |= GM_RXCR_MCF_ENA; 614 } 615 616 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1, 617 mchash[0] & 0xffff); 618 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2, 619 (mchash[0] >> 16) & 0xffff); 620 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3, 621 mchash[1] & 0xffff); 622 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4, 623 (mchash[1] >> 16) & 0xffff); 624 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode); 625 } 626 627 static void 628 msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp) 629 { 630 struct msk_softc *sc; 631 632 sc = sc_if->msk_softc; 633 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 634 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 635 RX_VLAN_STRIP_ON); 636 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 637 TX_VLAN_TAG_ON); 638 } else { 639 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 640 RX_VLAN_STRIP_OFF); 641 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 642 TX_VLAN_TAG_OFF); 643 } 644 } 645 646 static int 647 msk_rx_fill(struct msk_if_softc *sc_if, int jumbo) 648 { 649 uint16_t idx; 650 int i; 651 652 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 && 653 (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) { 654 /* Wait until controller executes OP_TCPSTART command. */ 655 for (i = 100; i > 0; i--) { 656 DELAY(100); 657 idx = CSR_READ_2(sc_if->msk_softc, 658 Y2_PREF_Q_ADDR(sc_if->msk_rxq, 659 PREF_UNIT_GET_IDX_REG)); 660 if (idx != 0) 661 break; 662 } 663 if (i == 0) { 664 device_printf(sc_if->msk_if_dev, 665 "prefetch unit stuck?\n"); 666 return (ETIMEDOUT); 667 } 668 /* 669 * Fill consumed LE with free buffer. This can be done 670 * in Rx handler but we don't want to add special code 671 * in fast handler. 672 */ 673 if (jumbo > 0) { 674 if (msk_jumbo_newbuf(sc_if, 0) != 0) 675 return (ENOBUFS); 676 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 677 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 678 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 679 } else { 680 if (msk_newbuf(sc_if, 0) != 0) 681 return (ENOBUFS); 682 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag, 683 sc_if->msk_cdata.msk_rx_ring_map, 684 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 685 } 686 sc_if->msk_cdata.msk_rx_prod = 0; 687 CSR_WRITE_2(sc_if->msk_softc, 688 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 689 sc_if->msk_cdata.msk_rx_prod); 690 } 691 return (0); 692 } 693 694 static int 695 msk_init_rx_ring(struct msk_if_softc *sc_if) 696 { 697 struct msk_ring_data *rd; 698 struct msk_rxdesc *rxd; 699 int i, nbuf, prod; 700 701 MSK_IF_LOCK_ASSERT(sc_if); 702 703 sc_if->msk_cdata.msk_rx_cons = 0; 704 sc_if->msk_cdata.msk_rx_prod = 0; 705 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; 706 707 rd = &sc_if->msk_rdata; 708 bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT); 709 for (i = prod = 0; i < MSK_RX_RING_CNT; i++) { 710 rxd = &sc_if->msk_cdata.msk_rxdesc[prod]; 711 rxd->rx_m = NULL; 712 rxd->rx_le = &rd->msk_rx_ring[prod]; 713 MSK_INC(prod, MSK_RX_RING_CNT); 714 } 715 nbuf = MSK_RX_BUF_CNT; 716 prod = 0; 717 /* Have controller know how to compute Rx checksum. */ 718 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 && 719 (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) { 720 #ifdef MSK_64BIT_DMA 721 rxd = &sc_if->msk_cdata.msk_rxdesc[prod]; 722 rxd->rx_m = NULL; 723 rxd->rx_le = &rd->msk_rx_ring[prod]; 724 rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 | 725 ETHER_HDR_LEN); 726 rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER); 727 MSK_INC(prod, MSK_RX_RING_CNT); 728 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT); 729 #endif 730 rxd = &sc_if->msk_cdata.msk_rxdesc[prod]; 731 rxd->rx_m = NULL; 732 rxd->rx_le = &rd->msk_rx_ring[prod]; 733 rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 | 734 ETHER_HDR_LEN); 735 rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER); 736 MSK_INC(prod, MSK_RX_RING_CNT); 737 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT); 738 nbuf--; 739 } 740 for (i = 0; i < nbuf; i++) { 741 if (msk_newbuf(sc_if, prod) != 0) 742 return (ENOBUFS); 743 MSK_RX_INC(prod, MSK_RX_RING_CNT); 744 } 745 746 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag, 747 sc_if->msk_cdata.msk_rx_ring_map, 748 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 749 750 /* Update prefetch unit. */ 751 sc_if->msk_cdata.msk_rx_prod = prod; 752 CSR_WRITE_2(sc_if->msk_softc, 753 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 754 (sc_if->msk_cdata.msk_rx_prod + MSK_RX_RING_CNT - 1) % 755 MSK_RX_RING_CNT); 756 if (msk_rx_fill(sc_if, 0) != 0) 757 return (ENOBUFS); 758 return (0); 759 } 760 761 static int 762 msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if) 763 { 764 struct msk_ring_data *rd; 765 struct msk_rxdesc *rxd; 766 int i, nbuf, prod; 767 768 MSK_IF_LOCK_ASSERT(sc_if); 769 770 sc_if->msk_cdata.msk_rx_cons = 0; 771 sc_if->msk_cdata.msk_rx_prod = 0; 772 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; 773 774 rd = &sc_if->msk_rdata; 775 bzero(rd->msk_jumbo_rx_ring, 776 sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT); 777 for (i = prod = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 778 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod]; 779 rxd->rx_m = NULL; 780 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod]; 781 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT); 782 } 783 nbuf = MSK_RX_BUF_CNT; 784 prod = 0; 785 /* Have controller know how to compute Rx checksum. */ 786 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 && 787 (sc_if->msk_ifp->if_capenable & IFCAP_RXCSUM) != 0) { 788 #ifdef MSK_64BIT_DMA 789 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod]; 790 rxd->rx_m = NULL; 791 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod]; 792 rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 | 793 ETHER_HDR_LEN); 794 rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER); 795 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT); 796 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT); 797 #endif 798 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod]; 799 rxd->rx_m = NULL; 800 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod]; 801 rxd->rx_le->msk_addr = htole32(ETHER_HDR_LEN << 16 | 802 ETHER_HDR_LEN); 803 rxd->rx_le->msk_control = htole32(OP_TCPSTART | HW_OWNER); 804 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT); 805 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT); 806 nbuf--; 807 } 808 for (i = 0; i < nbuf; i++) { 809 if (msk_jumbo_newbuf(sc_if, prod) != 0) 810 return (ENOBUFS); 811 MSK_RX_INC(prod, MSK_JUMBO_RX_RING_CNT); 812 } 813 814 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 815 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 816 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 817 818 /* Update prefetch unit. */ 819 sc_if->msk_cdata.msk_rx_prod = prod; 820 CSR_WRITE_2(sc_if->msk_softc, 821 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 822 (sc_if->msk_cdata.msk_rx_prod + MSK_JUMBO_RX_RING_CNT - 1) % 823 MSK_JUMBO_RX_RING_CNT); 824 if (msk_rx_fill(sc_if, 1) != 0) 825 return (ENOBUFS); 826 return (0); 827 } 828 829 static void 830 msk_init_tx_ring(struct msk_if_softc *sc_if) 831 { 832 struct msk_ring_data *rd; 833 struct msk_txdesc *txd; 834 int i; 835 836 sc_if->msk_cdata.msk_tso_mtu = 0; 837 sc_if->msk_cdata.msk_last_csum = 0; 838 sc_if->msk_cdata.msk_tx_prod = 0; 839 sc_if->msk_cdata.msk_tx_cons = 0; 840 sc_if->msk_cdata.msk_tx_cnt = 0; 841 sc_if->msk_cdata.msk_tx_high_addr = 0; 842 843 rd = &sc_if->msk_rdata; 844 bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT); 845 for (i = 0; i < MSK_TX_RING_CNT; i++) { 846 txd = &sc_if->msk_cdata.msk_txdesc[i]; 847 txd->tx_m = NULL; 848 txd->tx_le = &rd->msk_tx_ring[i]; 849 } 850 851 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 852 sc_if->msk_cdata.msk_tx_ring_map, 853 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 854 } 855 856 static __inline void 857 msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx) 858 { 859 struct msk_rx_desc *rx_le; 860 struct msk_rxdesc *rxd; 861 struct mbuf *m; 862 863 #ifdef MSK_64BIT_DMA 864 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 865 rx_le = rxd->rx_le; 866 rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER); 867 MSK_INC(idx, MSK_RX_RING_CNT); 868 #endif 869 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 870 m = rxd->rx_m; 871 rx_le = rxd->rx_le; 872 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); 873 } 874 875 static __inline void 876 msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx) 877 { 878 struct msk_rx_desc *rx_le; 879 struct msk_rxdesc *rxd; 880 struct mbuf *m; 881 882 #ifdef MSK_64BIT_DMA 883 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 884 rx_le = rxd->rx_le; 885 rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER); 886 MSK_INC(idx, MSK_JUMBO_RX_RING_CNT); 887 #endif 888 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 889 m = rxd->rx_m; 890 rx_le = rxd->rx_le; 891 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); 892 } 893 894 static int 895 msk_newbuf(struct msk_if_softc *sc_if, int idx) 896 { 897 struct msk_rx_desc *rx_le; 898 struct msk_rxdesc *rxd; 899 struct mbuf *m; 900 bus_dma_segment_t segs[1]; 901 bus_dmamap_t map; 902 int nsegs; 903 904 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 905 if (m == NULL) 906 return (ENOBUFS); 907 908 m->m_len = m->m_pkthdr.len = MCLBYTES; 909 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) 910 m_adj(m, ETHER_ALIGN); 911 #ifndef __NO_STRICT_ALIGNMENT 912 else 913 m_adj(m, MSK_RX_BUF_ALIGN); 914 #endif 915 916 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag, 917 sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs, 918 BUS_DMA_NOWAIT) != 0) { 919 m_freem(m); 920 return (ENOBUFS); 921 } 922 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 923 924 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 925 #ifdef MSK_64BIT_DMA 926 rx_le = rxd->rx_le; 927 rx_le->msk_addr = htole32(MSK_ADDR_HI(segs[0].ds_addr)); 928 rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER); 929 MSK_INC(idx, MSK_RX_RING_CNT); 930 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 931 #endif 932 if (rxd->rx_m != NULL) { 933 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap, 934 BUS_DMASYNC_POSTREAD); 935 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap); 936 rxd->rx_m = NULL; 937 } 938 map = rxd->rx_dmamap; 939 rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap; 940 sc_if->msk_cdata.msk_rx_sparemap = map; 941 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap, 942 BUS_DMASYNC_PREREAD); 943 rxd->rx_m = m; 944 rx_le = rxd->rx_le; 945 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr)); 946 rx_le->msk_control = 947 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER); 948 949 return (0); 950 } 951 952 static int 953 msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx) 954 { 955 struct msk_rx_desc *rx_le; 956 struct msk_rxdesc *rxd; 957 struct mbuf *m; 958 bus_dma_segment_t segs[1]; 959 bus_dmamap_t map; 960 int nsegs; 961 962 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); 963 if (m == NULL) 964 return (ENOBUFS); 965 m->m_len = m->m_pkthdr.len = MJUM9BYTES; 966 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) 967 m_adj(m, ETHER_ALIGN); 968 #ifndef __NO_STRICT_ALIGNMENT 969 else 970 m_adj(m, MSK_RX_BUF_ALIGN); 971 #endif 972 973 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag, 974 sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs, 975 BUS_DMA_NOWAIT) != 0) { 976 m_freem(m); 977 return (ENOBUFS); 978 } 979 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 980 981 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 982 #ifdef MSK_64BIT_DMA 983 rx_le = rxd->rx_le; 984 rx_le->msk_addr = htole32(MSK_ADDR_HI(segs[0].ds_addr)); 985 rx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER); 986 MSK_INC(idx, MSK_JUMBO_RX_RING_CNT); 987 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 988 #endif 989 if (rxd->rx_m != NULL) { 990 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, 991 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 992 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, 993 rxd->rx_dmamap); 994 rxd->rx_m = NULL; 995 } 996 map = rxd->rx_dmamap; 997 rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap; 998 sc_if->msk_cdata.msk_jumbo_rx_sparemap = map; 999 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap, 1000 BUS_DMASYNC_PREREAD); 1001 rxd->rx_m = m; 1002 rx_le = rxd->rx_le; 1003 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr)); 1004 rx_le->msk_control = 1005 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER); 1006 1007 return (0); 1008 } 1009 1010 /* 1011 * Set media options. 1012 */ 1013 static int 1014 msk_mediachange(struct ifnet *ifp) 1015 { 1016 struct msk_if_softc *sc_if; 1017 struct mii_data *mii; 1018 int error; 1019 1020 sc_if = ifp->if_softc; 1021 1022 MSK_IF_LOCK(sc_if); 1023 mii = device_get_softc(sc_if->msk_miibus); 1024 error = mii_mediachg(mii); 1025 MSK_IF_UNLOCK(sc_if); 1026 1027 return (error); 1028 } 1029 1030 /* 1031 * Report current media status. 1032 */ 1033 static void 1034 msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1035 { 1036 struct msk_if_softc *sc_if; 1037 struct mii_data *mii; 1038 1039 sc_if = ifp->if_softc; 1040 MSK_IF_LOCK(sc_if); 1041 if ((ifp->if_flags & IFF_UP) == 0) { 1042 MSK_IF_UNLOCK(sc_if); 1043 return; 1044 } 1045 mii = device_get_softc(sc_if->msk_miibus); 1046 1047 mii_pollstat(mii); 1048 ifmr->ifm_active = mii->mii_media_active; 1049 ifmr->ifm_status = mii->mii_media_status; 1050 MSK_IF_UNLOCK(sc_if); 1051 } 1052 1053 static int 1054 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1055 { 1056 struct msk_if_softc *sc_if; 1057 struct ifreq *ifr; 1058 struct mii_data *mii; 1059 int error, mask, reinit; 1060 1061 sc_if = ifp->if_softc; 1062 ifr = (struct ifreq *)data; 1063 error = 0; 1064 1065 switch(command) { 1066 case SIOCSIFMTU: 1067 MSK_IF_LOCK(sc_if); 1068 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN) 1069 error = EINVAL; 1070 else if (ifp->if_mtu != ifr->ifr_mtu) { 1071 if (ifr->ifr_mtu > ETHERMTU) { 1072 if ((sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) { 1073 error = EINVAL; 1074 MSK_IF_UNLOCK(sc_if); 1075 break; 1076 } 1077 if ((sc_if->msk_flags & 1078 MSK_FLAG_JUMBO_NOCSUM) != 0) { 1079 ifp->if_hwassist &= 1080 ~(MSK_CSUM_FEATURES | CSUM_TSO); 1081 ifp->if_capenable &= 1082 ~(IFCAP_TSO4 | IFCAP_TXCSUM); 1083 VLAN_CAPABILITIES(ifp); 1084 } 1085 } 1086 ifp->if_mtu = ifr->ifr_mtu; 1087 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1088 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1089 msk_init_locked(sc_if); 1090 } 1091 } 1092 MSK_IF_UNLOCK(sc_if); 1093 break; 1094 case SIOCSIFFLAGS: 1095 MSK_IF_LOCK(sc_if); 1096 if ((ifp->if_flags & IFF_UP) != 0) { 1097 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 1098 ((ifp->if_flags ^ sc_if->msk_if_flags) & 1099 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 1100 msk_rxfilter(sc_if); 1101 else if ((sc_if->msk_flags & MSK_FLAG_DETACH) == 0) 1102 msk_init_locked(sc_if); 1103 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1104 msk_stop(sc_if); 1105 sc_if->msk_if_flags = ifp->if_flags; 1106 MSK_IF_UNLOCK(sc_if); 1107 break; 1108 case SIOCADDMULTI: 1109 case SIOCDELMULTI: 1110 MSK_IF_LOCK(sc_if); 1111 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1112 msk_rxfilter(sc_if); 1113 MSK_IF_UNLOCK(sc_if); 1114 break; 1115 case SIOCGIFMEDIA: 1116 case SIOCSIFMEDIA: 1117 mii = device_get_softc(sc_if->msk_miibus); 1118 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1119 break; 1120 case SIOCSIFCAP: 1121 reinit = 0; 1122 MSK_IF_LOCK(sc_if); 1123 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1124 if ((mask & IFCAP_TXCSUM) != 0 && 1125 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) { 1126 ifp->if_capenable ^= IFCAP_TXCSUM; 1127 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) 1128 ifp->if_hwassist |= MSK_CSUM_FEATURES; 1129 else 1130 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 1131 } 1132 if ((mask & IFCAP_RXCSUM) != 0 && 1133 (IFCAP_RXCSUM & ifp->if_capabilities) != 0) { 1134 ifp->if_capenable ^= IFCAP_RXCSUM; 1135 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0) 1136 reinit = 1; 1137 } 1138 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 1139 (IFCAP_VLAN_HWCSUM & ifp->if_capabilities) != 0) 1140 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1141 if ((mask & IFCAP_TSO4) != 0 && 1142 (IFCAP_TSO4 & ifp->if_capabilities) != 0) { 1143 ifp->if_capenable ^= IFCAP_TSO4; 1144 if ((IFCAP_TSO4 & ifp->if_capenable) != 0) 1145 ifp->if_hwassist |= CSUM_TSO; 1146 else 1147 ifp->if_hwassist &= ~CSUM_TSO; 1148 } 1149 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 1150 (IFCAP_VLAN_HWTSO & ifp->if_capabilities) != 0) 1151 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1152 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 1153 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) { 1154 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1155 if ((IFCAP_VLAN_HWTAGGING & ifp->if_capenable) == 0) 1156 ifp->if_capenable &= 1157 ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM); 1158 msk_setvlan(sc_if, ifp); 1159 } 1160 if (ifp->if_mtu > ETHERMTU && 1161 (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) { 1162 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO); 1163 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM); 1164 } 1165 VLAN_CAPABILITIES(ifp); 1166 if (reinit > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1167 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1168 msk_init_locked(sc_if); 1169 } 1170 MSK_IF_UNLOCK(sc_if); 1171 break; 1172 default: 1173 error = ether_ioctl(ifp, command, data); 1174 break; 1175 } 1176 1177 return (error); 1178 } 1179 1180 static int 1181 mskc_probe(device_t dev) 1182 { 1183 const struct msk_product *mp; 1184 uint16_t vendor, devid; 1185 int i; 1186 1187 vendor = pci_get_vendor(dev); 1188 devid = pci_get_device(dev); 1189 mp = msk_products; 1190 for (i = 0; i < nitems(msk_products); i++, mp++) { 1191 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) { 1192 device_set_desc(dev, mp->msk_name); 1193 return (BUS_PROBE_DEFAULT); 1194 } 1195 } 1196 1197 return (ENXIO); 1198 } 1199 1200 static int 1201 mskc_setup_rambuffer(struct msk_softc *sc) 1202 { 1203 int next; 1204 int i; 1205 1206 /* Get adapter SRAM size. */ 1207 sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4; 1208 if (bootverbose) 1209 device_printf(sc->msk_dev, 1210 "RAM buffer size : %dKB\n", sc->msk_ramsize); 1211 if (sc->msk_ramsize == 0) 1212 return (0); 1213 1214 sc->msk_pflags |= MSK_FLAG_RAMBUF; 1215 /* 1216 * Give receiver 2/3 of memory and round down to the multiple 1217 * of 1024. Tx/Rx RAM buffer size of Yukon II should be multiple 1218 * of 1024. 1219 */ 1220 sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024); 1221 sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize; 1222 for (i = 0, next = 0; i < sc->msk_num_port; i++) { 1223 sc->msk_rxqstart[i] = next; 1224 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1; 1225 next = sc->msk_rxqend[i] + 1; 1226 sc->msk_txqstart[i] = next; 1227 sc->msk_txqend[i] = next + sc->msk_txqsize - 1; 1228 next = sc->msk_txqend[i] + 1; 1229 if (bootverbose) { 1230 device_printf(sc->msk_dev, 1231 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i, 1232 sc->msk_rxqsize / 1024, sc->msk_rxqstart[i], 1233 sc->msk_rxqend[i]); 1234 device_printf(sc->msk_dev, 1235 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i, 1236 sc->msk_txqsize / 1024, sc->msk_txqstart[i], 1237 sc->msk_txqend[i]); 1238 } 1239 } 1240 1241 return (0); 1242 } 1243 1244 static void 1245 msk_phy_power(struct msk_softc *sc, int mode) 1246 { 1247 uint32_t our, val; 1248 int i; 1249 1250 switch (mode) { 1251 case MSK_PHY_POWERUP: 1252 /* Switch power to VCC (WA for VAUX problem). */ 1253 CSR_WRITE_1(sc, B0_POWER_CTRL, 1254 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); 1255 /* Disable Core Clock Division, set Clock Select to 0. */ 1256 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS); 1257 1258 val = 0; 1259 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1260 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1261 /* Enable bits are inverted. */ 1262 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1263 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1264 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 1265 } 1266 /* 1267 * Enable PCI & Core Clock, enable clock gating for both Links. 1268 */ 1269 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); 1270 1271 our = CSR_PCI_READ_4(sc, PCI_OUR_REG_1); 1272 our &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); 1273 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) { 1274 if (sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1275 /* Deassert Low Power for 1st PHY. */ 1276 our |= PCI_Y2_PHY1_COMA; 1277 if (sc->msk_num_port > 1) 1278 our |= PCI_Y2_PHY2_COMA; 1279 } 1280 } 1281 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U || 1282 sc->msk_hw_id == CHIP_ID_YUKON_EX || 1283 sc->msk_hw_id >= CHIP_ID_YUKON_FE_P) { 1284 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_4); 1285 val &= (PCI_FORCE_ASPM_REQUEST | 1286 PCI_ASPM_GPHY_LINK_DOWN | PCI_ASPM_INT_FIFO_EMPTY | 1287 PCI_ASPM_CLKRUN_REQUEST); 1288 /* Set all bits to 0 except bits 15..12. */ 1289 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_4, val); 1290 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_5); 1291 val &= PCI_CTL_TIM_VMAIN_AV_MSK; 1292 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_5, val); 1293 CSR_PCI_WRITE_4(sc, PCI_CFG_REG_1, 0); 1294 CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON); 1295 /* 1296 * Disable status race, workaround for 1297 * Yukon EC Ultra & Yukon EX. 1298 */ 1299 val = CSR_READ_4(sc, B2_GP_IO); 1300 val |= GLB_GPIO_STAT_RACE_DIS; 1301 CSR_WRITE_4(sc, B2_GP_IO, val); 1302 CSR_READ_4(sc, B2_GP_IO); 1303 } 1304 /* Release PHY from PowerDown/COMA mode. */ 1305 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, our); 1306 1307 for (i = 0; i < sc->msk_num_port; i++) { 1308 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), 1309 GMLC_RST_SET); 1310 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), 1311 GMLC_RST_CLR); 1312 } 1313 break; 1314 case MSK_PHY_POWERDOWN: 1315 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_1); 1316 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD; 1317 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1318 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1319 val &= ~PCI_Y2_PHY1_COMA; 1320 if (sc->msk_num_port > 1) 1321 val &= ~PCI_Y2_PHY2_COMA; 1322 } 1323 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, val); 1324 1325 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1326 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1327 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 1328 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1329 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1330 /* Enable bits are inverted. */ 1331 val = 0; 1332 } 1333 /* 1334 * Disable PCI & Core Clock, disable clock gating for 1335 * both Links. 1336 */ 1337 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); 1338 CSR_WRITE_1(sc, B0_POWER_CTRL, 1339 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); 1340 break; 1341 default: 1342 break; 1343 } 1344 } 1345 1346 static void 1347 mskc_reset(struct msk_softc *sc) 1348 { 1349 bus_addr_t addr; 1350 uint16_t status; 1351 uint32_t val; 1352 int i, initram; 1353 1354 /* Disable ASF. */ 1355 if (sc->msk_hw_id >= CHIP_ID_YUKON_XL && 1356 sc->msk_hw_id <= CHIP_ID_YUKON_SUPR) { 1357 if (sc->msk_hw_id == CHIP_ID_YUKON_EX || 1358 sc->msk_hw_id == CHIP_ID_YUKON_SUPR) { 1359 CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0); 1360 status = CSR_READ_2(sc, B28_Y2_ASF_HCU_CCSR); 1361 /* Clear AHB bridge & microcontroller reset. */ 1362 status &= ~(Y2_ASF_HCU_CCSR_AHB_RST | 1363 Y2_ASF_HCU_CCSR_CPU_RST_MODE); 1364 /* Clear ASF microcontroller state. */ 1365 status &= ~Y2_ASF_HCU_CCSR_UC_STATE_MSK; 1366 status &= ~Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE_MSK; 1367 CSR_WRITE_2(sc, B28_Y2_ASF_HCU_CCSR, status); 1368 CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0); 1369 } else 1370 CSR_WRITE_1(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); 1371 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE); 1372 /* 1373 * Since we disabled ASF, S/W reset is required for 1374 * Power Management. 1375 */ 1376 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1377 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1378 } 1379 1380 /* Clear all error bits in the PCI status register. */ 1381 status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); 1382 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1383 1384 pci_write_config(sc->msk_dev, PCIR_STATUS, status | 1385 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | 1386 PCIM_STATUS_RTABORT | PCIM_STATUS_MDPERR, 2); 1387 CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR); 1388 1389 switch (sc->msk_bustype) { 1390 case MSK_PEX_BUS: 1391 /* Clear all PEX errors. */ 1392 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); 1393 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); 1394 if ((val & PEX_RX_OV) != 0) { 1395 sc->msk_intrmask &= ~Y2_IS_HW_ERR; 1396 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; 1397 } 1398 break; 1399 case MSK_PCI_BUS: 1400 case MSK_PCIX_BUS: 1401 /* Set Cache Line Size to 2(8bytes) if configured to 0. */ 1402 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1); 1403 if (val == 0) 1404 pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1); 1405 if (sc->msk_bustype == MSK_PCIX_BUS) { 1406 /* Set Cache Line Size opt. */ 1407 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1408 val |= PCI_CLS_OPT; 1409 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1410 } 1411 break; 1412 } 1413 /* Set PHY power state. */ 1414 msk_phy_power(sc, MSK_PHY_POWERUP); 1415 1416 /* Reset GPHY/GMAC Control */ 1417 for (i = 0; i < sc->msk_num_port; i++) { 1418 /* GPHY Control reset. */ 1419 CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET); 1420 CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR); 1421 /* GMAC Control reset. */ 1422 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET); 1423 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR); 1424 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF); 1425 if (sc->msk_hw_id == CHIP_ID_YUKON_EX || 1426 sc->msk_hw_id == CHIP_ID_YUKON_SUPR) 1427 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), 1428 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON | 1429 GMC_BYP_RETR_ON); 1430 } 1431 1432 if (sc->msk_hw_id == CHIP_ID_YUKON_SUPR && 1433 sc->msk_hw_rev > CHIP_REV_YU_SU_B0) 1434 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, PCI_CLK_MACSEC_DIS); 1435 if (sc->msk_hw_id == CHIP_ID_YUKON_OPT && sc->msk_hw_rev == 0) { 1436 /* Disable PCIe PHY powerdown(reg 0x80, bit7). */ 1437 CSR_WRITE_4(sc, Y2_PEX_PHY_DATA, (0x0080 << 16) | 0x0080); 1438 } 1439 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1440 1441 /* LED On. */ 1442 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON); 1443 1444 /* Clear TWSI IRQ. */ 1445 CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ); 1446 1447 /* Turn off hardware timer. */ 1448 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP); 1449 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ); 1450 1451 /* Turn off descriptor polling. */ 1452 CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP); 1453 1454 /* Turn off time stamps. */ 1455 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP); 1456 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 1457 1458 initram = 0; 1459 if (sc->msk_hw_id == CHIP_ID_YUKON_XL || 1460 sc->msk_hw_id == CHIP_ID_YUKON_EC || 1461 sc->msk_hw_id == CHIP_ID_YUKON_FE) 1462 initram++; 1463 1464 /* Configure timeout values. */ 1465 for (i = 0; initram > 0 && i < sc->msk_num_port; i++) { 1466 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET); 1467 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR); 1468 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1), 1469 MSK_RI_TO_53); 1470 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1), 1471 MSK_RI_TO_53); 1472 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1), 1473 MSK_RI_TO_53); 1474 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1), 1475 MSK_RI_TO_53); 1476 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1), 1477 MSK_RI_TO_53); 1478 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1), 1479 MSK_RI_TO_53); 1480 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2), 1481 MSK_RI_TO_53); 1482 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2), 1483 MSK_RI_TO_53); 1484 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2), 1485 MSK_RI_TO_53); 1486 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2), 1487 MSK_RI_TO_53); 1488 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2), 1489 MSK_RI_TO_53); 1490 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2), 1491 MSK_RI_TO_53); 1492 } 1493 1494 /* Disable all interrupts. */ 1495 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 1496 CSR_READ_4(sc, B0_HWE_IMSK); 1497 CSR_WRITE_4(sc, B0_IMSK, 0); 1498 CSR_READ_4(sc, B0_IMSK); 1499 1500 /* 1501 * On dual port PCI-X card, there is an problem where status 1502 * can be received out of order due to split transactions. 1503 */ 1504 if (sc->msk_pcixcap != 0 && sc->msk_num_port > 1) { 1505 uint16_t pcix_cmd; 1506 1507 pcix_cmd = pci_read_config(sc->msk_dev, 1508 sc->msk_pcixcap + PCIXR_COMMAND, 2); 1509 /* Clear Max Outstanding Split Transactions. */ 1510 pcix_cmd &= ~PCIXM_COMMAND_MAX_SPLITS; 1511 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1512 pci_write_config(sc->msk_dev, 1513 sc->msk_pcixcap + PCIXR_COMMAND, pcix_cmd, 2); 1514 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1515 } 1516 if (sc->msk_expcap != 0) { 1517 /* Change Max. Read Request Size to 2048 bytes. */ 1518 if (pci_get_max_read_req(sc->msk_dev) == 512) 1519 pci_set_max_read_req(sc->msk_dev, 2048); 1520 } 1521 1522 /* Clear status list. */ 1523 bzero(sc->msk_stat_ring, 1524 sizeof(struct msk_stat_desc) * sc->msk_stat_count); 1525 sc->msk_stat_cons = 0; 1526 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map, 1527 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1528 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET); 1529 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR); 1530 /* Set the status list base address. */ 1531 addr = sc->msk_stat_ring_paddr; 1532 CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr)); 1533 CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr)); 1534 /* Set the status list last index. */ 1535 CSR_WRITE_2(sc, STAT_LAST_IDX, sc->msk_stat_count - 1); 1536 if (sc->msk_hw_id == CHIP_ID_YUKON_EC && 1537 sc->msk_hw_rev == CHIP_REV_YU_EC_A1) { 1538 /* WA for dev. #4.3 */ 1539 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK); 1540 /* WA for dev. #4.18 */ 1541 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21); 1542 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07); 1543 } else { 1544 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a); 1545 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10); 1546 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1547 sc->msk_hw_rev == CHIP_REV_YU_XL_A0) 1548 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04); 1549 else 1550 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10); 1551 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190); 1552 } 1553 /* 1554 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI. 1555 */ 1556 CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000)); 1557 1558 /* Enable status unit. */ 1559 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON); 1560 1561 CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START); 1562 CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START); 1563 CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START); 1564 } 1565 1566 static int 1567 msk_probe(device_t dev) 1568 { 1569 struct msk_softc *sc; 1570 char desc[100]; 1571 1572 sc = device_get_softc(device_get_parent(dev)); 1573 /* 1574 * Not much to do here. We always know there will be 1575 * at least one GMAC present, and if there are two, 1576 * mskc_attach() will create a second device instance 1577 * for us. 1578 */ 1579 snprintf(desc, sizeof(desc), 1580 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x", 1581 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id, 1582 sc->msk_hw_rev); 1583 device_set_desc_copy(dev, desc); 1584 1585 return (BUS_PROBE_DEFAULT); 1586 } 1587 1588 static int 1589 msk_attach(device_t dev) 1590 { 1591 struct msk_softc *sc; 1592 struct msk_if_softc *sc_if; 1593 struct ifnet *ifp; 1594 struct msk_mii_data *mmd; 1595 int i, port, error; 1596 uint8_t eaddr[6]; 1597 1598 if (dev == NULL) 1599 return (EINVAL); 1600 1601 error = 0; 1602 sc_if = device_get_softc(dev); 1603 sc = device_get_softc(device_get_parent(dev)); 1604 mmd = device_get_ivars(dev); 1605 port = mmd->port; 1606 1607 sc_if->msk_if_dev = dev; 1608 sc_if->msk_port = port; 1609 sc_if->msk_softc = sc; 1610 sc_if->msk_flags = sc->msk_pflags; 1611 sc->msk_if[port] = sc_if; 1612 /* Setup Tx/Rx queue register offsets. */ 1613 if (port == MSK_PORT_A) { 1614 sc_if->msk_txq = Q_XA1; 1615 sc_if->msk_txsq = Q_XS1; 1616 sc_if->msk_rxq = Q_R1; 1617 } else { 1618 sc_if->msk_txq = Q_XA2; 1619 sc_if->msk_txsq = Q_XS2; 1620 sc_if->msk_rxq = Q_R2; 1621 } 1622 1623 callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0); 1624 msk_sysctl_node(sc_if); 1625 1626 if ((error = msk_txrx_dma_alloc(sc_if) != 0)) 1627 goto fail; 1628 msk_rx_dma_jalloc(sc_if); 1629 1630 ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER); 1631 if (ifp == NULL) { 1632 device_printf(sc_if->msk_if_dev, "can not if_alloc()\n"); 1633 error = ENOSPC; 1634 goto fail; 1635 } 1636 ifp->if_softc = sc_if; 1637 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1638 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1639 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4; 1640 /* 1641 * Enable Rx checksum offloading if controller supports 1642 * new descriptor formant and controller is not Yukon XL. 1643 */ 1644 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 && 1645 sc->msk_hw_id != CHIP_ID_YUKON_XL) 1646 ifp->if_capabilities |= IFCAP_RXCSUM; 1647 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 && 1648 (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0) 1649 ifp->if_capabilities |= IFCAP_RXCSUM; 1650 ifp->if_hwassist = MSK_CSUM_FEATURES | CSUM_TSO; 1651 ifp->if_capenable = ifp->if_capabilities; 1652 ifp->if_ioctl = msk_ioctl; 1653 ifp->if_start = msk_start; 1654 ifp->if_init = msk_init; 1655 IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1); 1656 ifp->if_snd.ifq_drv_maxlen = MSK_TX_RING_CNT - 1; 1657 IFQ_SET_READY(&ifp->if_snd); 1658 /* 1659 * Get station address for this interface. Note that 1660 * dual port cards actually come with three station 1661 * addresses: one for each port, plus an extra. The 1662 * extra one is used by the SysKonnect driver software 1663 * as a 'virtual' station address for when both ports 1664 * are operating in failover mode. Currently we don't 1665 * use this extra address. 1666 */ 1667 MSK_IF_LOCK(sc_if); 1668 for (i = 0; i < ETHER_ADDR_LEN; i++) 1669 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i); 1670 1671 /* 1672 * Call MI attach routine. Can't hold locks when calling into ether_*. 1673 */ 1674 MSK_IF_UNLOCK(sc_if); 1675 ether_ifattach(ifp, eaddr); 1676 MSK_IF_LOCK(sc_if); 1677 1678 /* VLAN capability setup */ 1679 ifp->if_capabilities |= IFCAP_VLAN_MTU; 1680 if ((sc_if->msk_flags & MSK_FLAG_NOHWVLAN) == 0) { 1681 /* 1682 * Due to Tx checksum offload hardware bugs, msk(4) manually 1683 * computes checksum for short frames. For VLAN tagged frames 1684 * this workaround does not work so disable checksum offload 1685 * for VLAN interface. 1686 */ 1687 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO; 1688 /* 1689 * Enable Rx checksum offloading for VLAN tagged frames 1690 * if controller support new descriptor format. 1691 */ 1692 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 && 1693 (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0) 1694 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 1695 } 1696 ifp->if_capenable = ifp->if_capabilities; 1697 /* 1698 * Disable RX checksum offloading on controllers that don't use 1699 * new descriptor format but give chance to enable it. 1700 */ 1701 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0) 1702 ifp->if_capenable &= ~IFCAP_RXCSUM; 1703 1704 /* 1705 * Tell the upper layer(s) we support long frames. 1706 * Must appear after the call to ether_ifattach() because 1707 * ether_ifattach() sets ifi_hdrlen to the default value. 1708 */ 1709 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 1710 1711 /* 1712 * Do miibus setup. 1713 */ 1714 MSK_IF_UNLOCK(sc_if); 1715 error = mii_attach(dev, &sc_if->msk_miibus, ifp, msk_mediachange, 1716 msk_mediastatus, BMSR_DEFCAPMASK, PHY_ADDR_MARV, MII_OFFSET_ANY, 1717 mmd->mii_flags); 1718 if (error != 0) { 1719 device_printf(sc_if->msk_if_dev, "attaching PHYs failed\n"); 1720 ether_ifdetach(ifp); 1721 error = ENXIO; 1722 goto fail; 1723 } 1724 1725 fail: 1726 if (error != 0) { 1727 /* Access should be ok even though lock has been dropped */ 1728 sc->msk_if[port] = NULL; 1729 msk_detach(dev); 1730 } 1731 1732 return (error); 1733 } 1734 1735 /* 1736 * Attach the interface. Allocate softc structures, do ifmedia 1737 * setup and ethernet/BPF attach. 1738 */ 1739 static int 1740 mskc_attach(device_t dev) 1741 { 1742 struct msk_softc *sc; 1743 struct msk_mii_data *mmd; 1744 int error, msic, msir, reg; 1745 1746 sc = device_get_softc(dev); 1747 sc->msk_dev = dev; 1748 mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1749 MTX_DEF); 1750 1751 /* 1752 * Map control/status registers. 1753 */ 1754 pci_enable_busmaster(dev); 1755 1756 /* Allocate I/O resource */ 1757 #ifdef MSK_USEIOSPACE 1758 sc->msk_res_spec = msk_res_spec_io; 1759 #else 1760 sc->msk_res_spec = msk_res_spec_mem; 1761 #endif 1762 sc->msk_irq_spec = msk_irq_spec_legacy; 1763 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res); 1764 if (error) { 1765 if (sc->msk_res_spec == msk_res_spec_mem) 1766 sc->msk_res_spec = msk_res_spec_io; 1767 else 1768 sc->msk_res_spec = msk_res_spec_mem; 1769 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res); 1770 if (error) { 1771 device_printf(dev, "couldn't allocate %s resources\n", 1772 sc->msk_res_spec == msk_res_spec_mem ? "memory" : 1773 "I/O"); 1774 mtx_destroy(&sc->msk_mtx); 1775 return (ENXIO); 1776 } 1777 } 1778 1779 /* Enable all clocks before accessing any registers. */ 1780 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0); 1781 1782 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1783 sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID); 1784 sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f; 1785 /* Bail out if chip is not recognized. */ 1786 if (sc->msk_hw_id < CHIP_ID_YUKON_XL || 1787 sc->msk_hw_id > CHIP_ID_YUKON_OPT || 1788 sc->msk_hw_id == CHIP_ID_YUKON_UNKNOWN) { 1789 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n", 1790 sc->msk_hw_id, sc->msk_hw_rev); 1791 mtx_destroy(&sc->msk_mtx); 1792 return (ENXIO); 1793 } 1794 1795 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 1796 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1797 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, 1798 &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I", 1799 "max number of Rx events to process"); 1800 1801 sc->msk_process_limit = MSK_PROC_DEFAULT; 1802 error = resource_int_value(device_get_name(dev), device_get_unit(dev), 1803 "process_limit", &sc->msk_process_limit); 1804 if (error == 0) { 1805 if (sc->msk_process_limit < MSK_PROC_MIN || 1806 sc->msk_process_limit > MSK_PROC_MAX) { 1807 device_printf(dev, "process_limit value out of range; " 1808 "using default: %d\n", MSK_PROC_DEFAULT); 1809 sc->msk_process_limit = MSK_PROC_DEFAULT; 1810 } 1811 } 1812 1813 sc->msk_int_holdoff = MSK_INT_HOLDOFF_DEFAULT; 1814 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 1815 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, 1816 "int_holdoff", CTLFLAG_RW, &sc->msk_int_holdoff, 0, 1817 "Maximum number of time to delay interrupts"); 1818 resource_int_value(device_get_name(dev), device_get_unit(dev), 1819 "int_holdoff", &sc->msk_int_holdoff); 1820 1821 sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP); 1822 /* Check number of MACs. */ 1823 sc->msk_num_port = 1; 1824 if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) == 1825 CFG_DUAL_MAC_MSK) { 1826 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) 1827 sc->msk_num_port++; 1828 } 1829 1830 /* Check bus type. */ 1831 if (pci_find_cap(sc->msk_dev, PCIY_EXPRESS, ®) == 0) { 1832 sc->msk_bustype = MSK_PEX_BUS; 1833 sc->msk_expcap = reg; 1834 } else if (pci_find_cap(sc->msk_dev, PCIY_PCIX, ®) == 0) { 1835 sc->msk_bustype = MSK_PCIX_BUS; 1836 sc->msk_pcixcap = reg; 1837 } else 1838 sc->msk_bustype = MSK_PCI_BUS; 1839 1840 switch (sc->msk_hw_id) { 1841 case CHIP_ID_YUKON_EC: 1842 sc->msk_clock = 125; /* 125 MHz */ 1843 sc->msk_pflags |= MSK_FLAG_JUMBO; 1844 break; 1845 case CHIP_ID_YUKON_EC_U: 1846 sc->msk_clock = 125; /* 125 MHz */ 1847 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_JUMBO_NOCSUM; 1848 break; 1849 case CHIP_ID_YUKON_EX: 1850 sc->msk_clock = 125; /* 125 MHz */ 1851 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 | 1852 MSK_FLAG_AUTOTX_CSUM; 1853 /* 1854 * Yukon Extreme seems to have silicon bug for 1855 * automatic Tx checksum calculation capability. 1856 */ 1857 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0) 1858 sc->msk_pflags &= ~MSK_FLAG_AUTOTX_CSUM; 1859 /* 1860 * Yukon Extreme A0 could not use store-and-forward 1861 * for jumbo frames, so disable Tx checksum 1862 * offloading for jumbo frames. 1863 */ 1864 if (sc->msk_hw_rev == CHIP_REV_YU_EX_A0) 1865 sc->msk_pflags |= MSK_FLAG_JUMBO_NOCSUM; 1866 break; 1867 case CHIP_ID_YUKON_FE: 1868 sc->msk_clock = 100; /* 100 MHz */ 1869 sc->msk_pflags |= MSK_FLAG_FASTETHER; 1870 break; 1871 case CHIP_ID_YUKON_FE_P: 1872 sc->msk_clock = 50; /* 50 MHz */ 1873 sc->msk_pflags |= MSK_FLAG_FASTETHER | MSK_FLAG_DESCV2 | 1874 MSK_FLAG_AUTOTX_CSUM; 1875 if (sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) { 1876 /* 1877 * XXX 1878 * FE+ A0 has status LE writeback bug so msk(4) 1879 * does not rely on status word of received frame 1880 * in msk_rxeof() which in turn disables all 1881 * hardware assistance bits reported by the status 1882 * word as well as validity of the received frame. 1883 * Just pass received frames to upper stack with 1884 * minimal test and let upper stack handle them. 1885 */ 1886 sc->msk_pflags |= MSK_FLAG_NOHWVLAN | 1887 MSK_FLAG_NORXCHK | MSK_FLAG_NORX_CSUM; 1888 } 1889 break; 1890 case CHIP_ID_YUKON_XL: 1891 sc->msk_clock = 156; /* 156 MHz */ 1892 sc->msk_pflags |= MSK_FLAG_JUMBO; 1893 break; 1894 case CHIP_ID_YUKON_SUPR: 1895 sc->msk_clock = 125; /* 125 MHz */ 1896 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 | 1897 MSK_FLAG_AUTOTX_CSUM; 1898 break; 1899 case CHIP_ID_YUKON_UL_2: 1900 sc->msk_clock = 125; /* 125 MHz */ 1901 sc->msk_pflags |= MSK_FLAG_JUMBO; 1902 break; 1903 case CHIP_ID_YUKON_OPT: 1904 sc->msk_clock = 125; /* 125 MHz */ 1905 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2; 1906 break; 1907 default: 1908 sc->msk_clock = 156; /* 156 MHz */ 1909 break; 1910 } 1911 1912 /* Allocate IRQ resources. */ 1913 msic = pci_msi_count(dev); 1914 if (bootverbose) 1915 device_printf(dev, "MSI count : %d\n", msic); 1916 if (legacy_intr != 0) 1917 msi_disable = 1; 1918 if (msi_disable == 0 && msic > 0) { 1919 msir = 1; 1920 if (pci_alloc_msi(dev, &msir) == 0) { 1921 if (msir == 1) { 1922 sc->msk_pflags |= MSK_FLAG_MSI; 1923 sc->msk_irq_spec = msk_irq_spec_msi; 1924 } else 1925 pci_release_msi(dev); 1926 } 1927 } 1928 1929 error = bus_alloc_resources(dev, sc->msk_irq_spec, sc->msk_irq); 1930 if (error) { 1931 device_printf(dev, "couldn't allocate IRQ resources\n"); 1932 goto fail; 1933 } 1934 1935 if ((error = msk_status_dma_alloc(sc)) != 0) 1936 goto fail; 1937 1938 /* Set base interrupt mask. */ 1939 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU; 1940 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR | 1941 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP; 1942 1943 /* Reset the adapter. */ 1944 mskc_reset(sc); 1945 1946 if ((error = mskc_setup_rambuffer(sc)) != 0) 1947 goto fail; 1948 1949 sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1); 1950 if (sc->msk_devs[MSK_PORT_A] == NULL) { 1951 device_printf(dev, "failed to add child for PORT_A\n"); 1952 error = ENXIO; 1953 goto fail; 1954 } 1955 mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK | M_ZERO); 1956 if (mmd == NULL) { 1957 device_printf(dev, "failed to allocate memory for " 1958 "ivars of PORT_A\n"); 1959 error = ENXIO; 1960 goto fail; 1961 } 1962 mmd->port = MSK_PORT_A; 1963 mmd->pmd = sc->msk_pmd; 1964 mmd->mii_flags |= MIIF_DOPAUSE; 1965 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S') 1966 mmd->mii_flags |= MIIF_HAVEFIBER; 1967 if (sc->msk_pmd == 'P') 1968 mmd->mii_flags |= MIIF_HAVEFIBER | MIIF_MACPRIV0; 1969 device_set_ivars(sc->msk_devs[MSK_PORT_A], mmd); 1970 1971 if (sc->msk_num_port > 1) { 1972 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1); 1973 if (sc->msk_devs[MSK_PORT_B] == NULL) { 1974 device_printf(dev, "failed to add child for PORT_B\n"); 1975 error = ENXIO; 1976 goto fail; 1977 } 1978 mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK | 1979 M_ZERO); 1980 if (mmd == NULL) { 1981 device_printf(dev, "failed to allocate memory for " 1982 "ivars of PORT_B\n"); 1983 error = ENXIO; 1984 goto fail; 1985 } 1986 mmd->port = MSK_PORT_B; 1987 mmd->pmd = sc->msk_pmd; 1988 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S') 1989 mmd->mii_flags |= MIIF_HAVEFIBER; 1990 if (sc->msk_pmd == 'P') 1991 mmd->mii_flags |= MIIF_HAVEFIBER | MIIF_MACPRIV0; 1992 device_set_ivars(sc->msk_devs[MSK_PORT_B], mmd); 1993 } 1994 1995 error = bus_generic_attach(dev); 1996 if (error) { 1997 device_printf(dev, "failed to attach port(s)\n"); 1998 goto fail; 1999 } 2000 2001 /* Hook interrupt last to avoid having to lock softc. */ 2002 error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET | 2003 INTR_MPSAFE, NULL, msk_intr, sc, &sc->msk_intrhand); 2004 if (error != 0) { 2005 device_printf(dev, "couldn't set up interrupt handler\n"); 2006 goto fail; 2007 } 2008 fail: 2009 if (error != 0) 2010 mskc_detach(dev); 2011 2012 return (error); 2013 } 2014 2015 /* 2016 * Shutdown hardware and free up resources. This can be called any 2017 * time after the mutex has been initialized. It is called in both 2018 * the error case in attach and the normal detach case so it needs 2019 * to be careful about only freeing resources that have actually been 2020 * allocated. 2021 */ 2022 static int 2023 msk_detach(device_t dev) 2024 { 2025 struct msk_softc *sc; 2026 struct msk_if_softc *sc_if; 2027 struct ifnet *ifp; 2028 2029 sc_if = device_get_softc(dev); 2030 KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx), 2031 ("msk mutex not initialized in msk_detach")); 2032 MSK_IF_LOCK(sc_if); 2033 2034 ifp = sc_if->msk_ifp; 2035 if (device_is_attached(dev)) { 2036 /* XXX */ 2037 sc_if->msk_flags |= MSK_FLAG_DETACH; 2038 msk_stop(sc_if); 2039 /* Can't hold locks while calling detach. */ 2040 MSK_IF_UNLOCK(sc_if); 2041 callout_drain(&sc_if->msk_tick_ch); 2042 if (ifp) 2043 ether_ifdetach(ifp); 2044 MSK_IF_LOCK(sc_if); 2045 } 2046 2047 /* 2048 * We're generally called from mskc_detach() which is using 2049 * device_delete_child() to get to here. It's already trashed 2050 * miibus for us, so don't do it here or we'll panic. 2051 * 2052 * if (sc_if->msk_miibus != NULL) { 2053 * device_delete_child(dev, sc_if->msk_miibus); 2054 * sc_if->msk_miibus = NULL; 2055 * } 2056 */ 2057 2058 msk_rx_dma_jfree(sc_if); 2059 msk_txrx_dma_free(sc_if); 2060 bus_generic_detach(dev); 2061 2062 if (ifp) 2063 if_free(ifp); 2064 sc = sc_if->msk_softc; 2065 sc->msk_if[sc_if->msk_port] = NULL; 2066 MSK_IF_UNLOCK(sc_if); 2067 2068 return (0); 2069 } 2070 2071 static int 2072 mskc_detach(device_t dev) 2073 { 2074 struct msk_softc *sc; 2075 2076 sc = device_get_softc(dev); 2077 KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized")); 2078 2079 if (device_is_alive(dev)) { 2080 if (sc->msk_devs[MSK_PORT_A] != NULL) { 2081 free(device_get_ivars(sc->msk_devs[MSK_PORT_A]), 2082 M_DEVBUF); 2083 device_delete_child(dev, sc->msk_devs[MSK_PORT_A]); 2084 } 2085 if (sc->msk_devs[MSK_PORT_B] != NULL) { 2086 free(device_get_ivars(sc->msk_devs[MSK_PORT_B]), 2087 M_DEVBUF); 2088 device_delete_child(dev, sc->msk_devs[MSK_PORT_B]); 2089 } 2090 bus_generic_detach(dev); 2091 } 2092 2093 /* Disable all interrupts. */ 2094 CSR_WRITE_4(sc, B0_IMSK, 0); 2095 CSR_READ_4(sc, B0_IMSK); 2096 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 2097 CSR_READ_4(sc, B0_HWE_IMSK); 2098 2099 /* LED Off. */ 2100 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF); 2101 2102 /* Put hardware reset. */ 2103 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 2104 2105 msk_status_dma_free(sc); 2106 2107 if (sc->msk_intrhand) { 2108 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand); 2109 sc->msk_intrhand = NULL; 2110 } 2111 bus_release_resources(dev, sc->msk_irq_spec, sc->msk_irq); 2112 if ((sc->msk_pflags & MSK_FLAG_MSI) != 0) 2113 pci_release_msi(dev); 2114 bus_release_resources(dev, sc->msk_res_spec, sc->msk_res); 2115 mtx_destroy(&sc->msk_mtx); 2116 2117 return (0); 2118 } 2119 2120 static bus_dma_tag_t 2121 mskc_get_dma_tag(device_t bus, device_t child __unused) 2122 { 2123 2124 return (bus_get_dma_tag(bus)); 2125 } 2126 2127 struct msk_dmamap_arg { 2128 bus_addr_t msk_busaddr; 2129 }; 2130 2131 static void 2132 msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 2133 { 2134 struct msk_dmamap_arg *ctx; 2135 2136 if (error != 0) 2137 return; 2138 ctx = arg; 2139 ctx->msk_busaddr = segs[0].ds_addr; 2140 } 2141 2142 /* Create status DMA region. */ 2143 static int 2144 msk_status_dma_alloc(struct msk_softc *sc) 2145 { 2146 struct msk_dmamap_arg ctx; 2147 bus_size_t stat_sz; 2148 int count, error; 2149 2150 /* 2151 * It seems controller requires number of status LE entries 2152 * is power of 2 and the maximum number of status LE entries 2153 * is 4096. For dual-port controllers, the number of status 2154 * LE entries should be large enough to hold both port's 2155 * status updates. 2156 */ 2157 count = 3 * MSK_RX_RING_CNT + MSK_TX_RING_CNT; 2158 count = imin(4096, roundup2(count, 1024)); 2159 sc->msk_stat_count = count; 2160 stat_sz = count * sizeof(struct msk_stat_desc); 2161 error = bus_dma_tag_create( 2162 bus_get_dma_tag(sc->msk_dev), /* parent */ 2163 MSK_STAT_ALIGN, 0, /* alignment, boundary */ 2164 BUS_SPACE_MAXADDR, /* lowaddr */ 2165 BUS_SPACE_MAXADDR, /* highaddr */ 2166 NULL, NULL, /* filter, filterarg */ 2167 stat_sz, /* maxsize */ 2168 1, /* nsegments */ 2169 stat_sz, /* maxsegsize */ 2170 0, /* flags */ 2171 NULL, NULL, /* lockfunc, lockarg */ 2172 &sc->msk_stat_tag); 2173 if (error != 0) { 2174 device_printf(sc->msk_dev, 2175 "failed to create status DMA tag\n"); 2176 return (error); 2177 } 2178 2179 /* Allocate DMA'able memory and load the DMA map for status ring. */ 2180 error = bus_dmamem_alloc(sc->msk_stat_tag, 2181 (void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT | 2182 BUS_DMA_ZERO, &sc->msk_stat_map); 2183 if (error != 0) { 2184 device_printf(sc->msk_dev, 2185 "failed to allocate DMA'able memory for status ring\n"); 2186 return (error); 2187 } 2188 2189 ctx.msk_busaddr = 0; 2190 error = bus_dmamap_load(sc->msk_stat_tag, sc->msk_stat_map, 2191 sc->msk_stat_ring, stat_sz, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 2192 if (error != 0) { 2193 device_printf(sc->msk_dev, 2194 "failed to load DMA'able memory for status ring\n"); 2195 return (error); 2196 } 2197 sc->msk_stat_ring_paddr = ctx.msk_busaddr; 2198 2199 return (0); 2200 } 2201 2202 static void 2203 msk_status_dma_free(struct msk_softc *sc) 2204 { 2205 2206 /* Destroy status block. */ 2207 if (sc->msk_stat_tag) { 2208 if (sc->msk_stat_ring_paddr) { 2209 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map); 2210 sc->msk_stat_ring_paddr = 0; 2211 } 2212 if (sc->msk_stat_ring) { 2213 bus_dmamem_free(sc->msk_stat_tag, 2214 sc->msk_stat_ring, sc->msk_stat_map); 2215 sc->msk_stat_ring = NULL; 2216 } 2217 bus_dma_tag_destroy(sc->msk_stat_tag); 2218 sc->msk_stat_tag = NULL; 2219 } 2220 } 2221 2222 static int 2223 msk_txrx_dma_alloc(struct msk_if_softc *sc_if) 2224 { 2225 struct msk_dmamap_arg ctx; 2226 struct msk_txdesc *txd; 2227 struct msk_rxdesc *rxd; 2228 bus_size_t rxalign; 2229 int error, i; 2230 2231 /* Create parent DMA tag. */ 2232 error = bus_dma_tag_create( 2233 bus_get_dma_tag(sc_if->msk_if_dev), /* parent */ 2234 1, 0, /* alignment, boundary */ 2235 BUS_SPACE_MAXADDR, /* lowaddr */ 2236 BUS_SPACE_MAXADDR, /* highaddr */ 2237 NULL, NULL, /* filter, filterarg */ 2238 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 2239 0, /* nsegments */ 2240 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 2241 0, /* flags */ 2242 NULL, NULL, /* lockfunc, lockarg */ 2243 &sc_if->msk_cdata.msk_parent_tag); 2244 if (error != 0) { 2245 device_printf(sc_if->msk_if_dev, 2246 "failed to create parent DMA tag\n"); 2247 goto fail; 2248 } 2249 /* Create tag for Tx ring. */ 2250 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2251 MSK_RING_ALIGN, 0, /* alignment, boundary */ 2252 BUS_SPACE_MAXADDR, /* lowaddr */ 2253 BUS_SPACE_MAXADDR, /* highaddr */ 2254 NULL, NULL, /* filter, filterarg */ 2255 MSK_TX_RING_SZ, /* maxsize */ 2256 1, /* nsegments */ 2257 MSK_TX_RING_SZ, /* maxsegsize */ 2258 0, /* flags */ 2259 NULL, NULL, /* lockfunc, lockarg */ 2260 &sc_if->msk_cdata.msk_tx_ring_tag); 2261 if (error != 0) { 2262 device_printf(sc_if->msk_if_dev, 2263 "failed to create Tx ring DMA tag\n"); 2264 goto fail; 2265 } 2266 2267 /* Create tag for Rx ring. */ 2268 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2269 MSK_RING_ALIGN, 0, /* alignment, boundary */ 2270 BUS_SPACE_MAXADDR, /* lowaddr */ 2271 BUS_SPACE_MAXADDR, /* highaddr */ 2272 NULL, NULL, /* filter, filterarg */ 2273 MSK_RX_RING_SZ, /* maxsize */ 2274 1, /* nsegments */ 2275 MSK_RX_RING_SZ, /* maxsegsize */ 2276 0, /* flags */ 2277 NULL, NULL, /* lockfunc, lockarg */ 2278 &sc_if->msk_cdata.msk_rx_ring_tag); 2279 if (error != 0) { 2280 device_printf(sc_if->msk_if_dev, 2281 "failed to create Rx ring DMA tag\n"); 2282 goto fail; 2283 } 2284 2285 /* Create tag for Tx buffers. */ 2286 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2287 1, 0, /* alignment, boundary */ 2288 BUS_SPACE_MAXADDR, /* lowaddr */ 2289 BUS_SPACE_MAXADDR, /* highaddr */ 2290 NULL, NULL, /* filter, filterarg */ 2291 MSK_TSO_MAXSIZE, /* maxsize */ 2292 MSK_MAXTXSEGS, /* nsegments */ 2293 MSK_TSO_MAXSGSIZE, /* maxsegsize */ 2294 0, /* flags */ 2295 NULL, NULL, /* lockfunc, lockarg */ 2296 &sc_if->msk_cdata.msk_tx_tag); 2297 if (error != 0) { 2298 device_printf(sc_if->msk_if_dev, 2299 "failed to create Tx DMA tag\n"); 2300 goto fail; 2301 } 2302 2303 rxalign = 1; 2304 /* 2305 * Workaround hardware hang which seems to happen when Rx buffer 2306 * is not aligned on multiple of FIFO word(8 bytes). 2307 */ 2308 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0) 2309 rxalign = MSK_RX_BUF_ALIGN; 2310 /* Create tag for Rx buffers. */ 2311 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2312 rxalign, 0, /* alignment, boundary */ 2313 BUS_SPACE_MAXADDR, /* lowaddr */ 2314 BUS_SPACE_MAXADDR, /* highaddr */ 2315 NULL, NULL, /* filter, filterarg */ 2316 MCLBYTES, /* maxsize */ 2317 1, /* nsegments */ 2318 MCLBYTES, /* maxsegsize */ 2319 0, /* flags */ 2320 NULL, NULL, /* lockfunc, lockarg */ 2321 &sc_if->msk_cdata.msk_rx_tag); 2322 if (error != 0) { 2323 device_printf(sc_if->msk_if_dev, 2324 "failed to create Rx DMA tag\n"); 2325 goto fail; 2326 } 2327 2328 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 2329 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag, 2330 (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK | 2331 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map); 2332 if (error != 0) { 2333 device_printf(sc_if->msk_if_dev, 2334 "failed to allocate DMA'able memory for Tx ring\n"); 2335 goto fail; 2336 } 2337 2338 ctx.msk_busaddr = 0; 2339 error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag, 2340 sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring, 2341 MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 2342 if (error != 0) { 2343 device_printf(sc_if->msk_if_dev, 2344 "failed to load DMA'able memory for Tx ring\n"); 2345 goto fail; 2346 } 2347 sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr; 2348 2349 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 2350 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag, 2351 (void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK | 2352 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map); 2353 if (error != 0) { 2354 device_printf(sc_if->msk_if_dev, 2355 "failed to allocate DMA'able memory for Rx ring\n"); 2356 goto fail; 2357 } 2358 2359 ctx.msk_busaddr = 0; 2360 error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag, 2361 sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring, 2362 MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 2363 if (error != 0) { 2364 device_printf(sc_if->msk_if_dev, 2365 "failed to load DMA'able memory for Rx ring\n"); 2366 goto fail; 2367 } 2368 sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr; 2369 2370 /* Create DMA maps for Tx buffers. */ 2371 for (i = 0; i < MSK_TX_RING_CNT; i++) { 2372 txd = &sc_if->msk_cdata.msk_txdesc[i]; 2373 txd->tx_m = NULL; 2374 txd->tx_dmamap = NULL; 2375 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0, 2376 &txd->tx_dmamap); 2377 if (error != 0) { 2378 device_printf(sc_if->msk_if_dev, 2379 "failed to create Tx dmamap\n"); 2380 goto fail; 2381 } 2382 } 2383 /* Create DMA maps for Rx buffers. */ 2384 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0, 2385 &sc_if->msk_cdata.msk_rx_sparemap)) != 0) { 2386 device_printf(sc_if->msk_if_dev, 2387 "failed to create spare Rx dmamap\n"); 2388 goto fail; 2389 } 2390 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2391 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 2392 rxd->rx_m = NULL; 2393 rxd->rx_dmamap = NULL; 2394 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0, 2395 &rxd->rx_dmamap); 2396 if (error != 0) { 2397 device_printf(sc_if->msk_if_dev, 2398 "failed to create Rx dmamap\n"); 2399 goto fail; 2400 } 2401 } 2402 2403 fail: 2404 return (error); 2405 } 2406 2407 static int 2408 msk_rx_dma_jalloc(struct msk_if_softc *sc_if) 2409 { 2410 struct msk_dmamap_arg ctx; 2411 struct msk_rxdesc *jrxd; 2412 bus_size_t rxalign; 2413 int error, i; 2414 2415 if (jumbo_disable != 0 || (sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) { 2416 sc_if->msk_flags &= ~MSK_FLAG_JUMBO; 2417 device_printf(sc_if->msk_if_dev, 2418 "disabling jumbo frame support\n"); 2419 return (0); 2420 } 2421 /* Create tag for jumbo Rx ring. */ 2422 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2423 MSK_RING_ALIGN, 0, /* alignment, boundary */ 2424 BUS_SPACE_MAXADDR, /* lowaddr */ 2425 BUS_SPACE_MAXADDR, /* highaddr */ 2426 NULL, NULL, /* filter, filterarg */ 2427 MSK_JUMBO_RX_RING_SZ, /* maxsize */ 2428 1, /* nsegments */ 2429 MSK_JUMBO_RX_RING_SZ, /* maxsegsize */ 2430 0, /* flags */ 2431 NULL, NULL, /* lockfunc, lockarg */ 2432 &sc_if->msk_cdata.msk_jumbo_rx_ring_tag); 2433 if (error != 0) { 2434 device_printf(sc_if->msk_if_dev, 2435 "failed to create jumbo Rx ring DMA tag\n"); 2436 goto jumbo_fail; 2437 } 2438 2439 rxalign = 1; 2440 /* 2441 * Workaround hardware hang which seems to happen when Rx buffer 2442 * is not aligned on multiple of FIFO word(8 bytes). 2443 */ 2444 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0) 2445 rxalign = MSK_RX_BUF_ALIGN; 2446 /* Create tag for jumbo Rx buffers. */ 2447 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2448 rxalign, 0, /* alignment, boundary */ 2449 BUS_SPACE_MAXADDR, /* lowaddr */ 2450 BUS_SPACE_MAXADDR, /* highaddr */ 2451 NULL, NULL, /* filter, filterarg */ 2452 MJUM9BYTES, /* maxsize */ 2453 1, /* nsegments */ 2454 MJUM9BYTES, /* maxsegsize */ 2455 0, /* flags */ 2456 NULL, NULL, /* lockfunc, lockarg */ 2457 &sc_if->msk_cdata.msk_jumbo_rx_tag); 2458 if (error != 0) { 2459 device_printf(sc_if->msk_if_dev, 2460 "failed to create jumbo Rx DMA tag\n"); 2461 goto jumbo_fail; 2462 } 2463 2464 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */ 2465 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2466 (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring, 2467 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 2468 &sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2469 if (error != 0) { 2470 device_printf(sc_if->msk_if_dev, 2471 "failed to allocate DMA'able memory for jumbo Rx ring\n"); 2472 goto jumbo_fail; 2473 } 2474 2475 ctx.msk_busaddr = 0; 2476 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2477 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 2478 sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ, 2479 msk_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 2480 if (error != 0) { 2481 device_printf(sc_if->msk_if_dev, 2482 "failed to load DMA'able memory for jumbo Rx ring\n"); 2483 goto jumbo_fail; 2484 } 2485 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr; 2486 2487 /* Create DMA maps for jumbo Rx buffers. */ 2488 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, 2489 &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) { 2490 device_printf(sc_if->msk_if_dev, 2491 "failed to create spare jumbo Rx dmamap\n"); 2492 goto jumbo_fail; 2493 } 2494 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 2495 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 2496 jrxd->rx_m = NULL; 2497 jrxd->rx_dmamap = NULL; 2498 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, 2499 &jrxd->rx_dmamap); 2500 if (error != 0) { 2501 device_printf(sc_if->msk_if_dev, 2502 "failed to create jumbo Rx dmamap\n"); 2503 goto jumbo_fail; 2504 } 2505 } 2506 2507 return (0); 2508 2509 jumbo_fail: 2510 msk_rx_dma_jfree(sc_if); 2511 device_printf(sc_if->msk_if_dev, "disabling jumbo frame support " 2512 "due to resource shortage\n"); 2513 sc_if->msk_flags &= ~MSK_FLAG_JUMBO; 2514 return (error); 2515 } 2516 2517 static void 2518 msk_txrx_dma_free(struct msk_if_softc *sc_if) 2519 { 2520 struct msk_txdesc *txd; 2521 struct msk_rxdesc *rxd; 2522 int i; 2523 2524 /* Tx ring. */ 2525 if (sc_if->msk_cdata.msk_tx_ring_tag) { 2526 if (sc_if->msk_rdata.msk_tx_ring_paddr) 2527 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag, 2528 sc_if->msk_cdata.msk_tx_ring_map); 2529 if (sc_if->msk_rdata.msk_tx_ring) 2530 bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag, 2531 sc_if->msk_rdata.msk_tx_ring, 2532 sc_if->msk_cdata.msk_tx_ring_map); 2533 sc_if->msk_rdata.msk_tx_ring = NULL; 2534 sc_if->msk_rdata.msk_tx_ring_paddr = 0; 2535 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag); 2536 sc_if->msk_cdata.msk_tx_ring_tag = NULL; 2537 } 2538 /* Rx ring. */ 2539 if (sc_if->msk_cdata.msk_rx_ring_tag) { 2540 if (sc_if->msk_rdata.msk_rx_ring_paddr) 2541 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag, 2542 sc_if->msk_cdata.msk_rx_ring_map); 2543 if (sc_if->msk_rdata.msk_rx_ring) 2544 bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag, 2545 sc_if->msk_rdata.msk_rx_ring, 2546 sc_if->msk_cdata.msk_rx_ring_map); 2547 sc_if->msk_rdata.msk_rx_ring = NULL; 2548 sc_if->msk_rdata.msk_rx_ring_paddr = 0; 2549 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag); 2550 sc_if->msk_cdata.msk_rx_ring_tag = NULL; 2551 } 2552 /* Tx buffers. */ 2553 if (sc_if->msk_cdata.msk_tx_tag) { 2554 for (i = 0; i < MSK_TX_RING_CNT; i++) { 2555 txd = &sc_if->msk_cdata.msk_txdesc[i]; 2556 if (txd->tx_dmamap) { 2557 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag, 2558 txd->tx_dmamap); 2559 txd->tx_dmamap = NULL; 2560 } 2561 } 2562 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag); 2563 sc_if->msk_cdata.msk_tx_tag = NULL; 2564 } 2565 /* Rx buffers. */ 2566 if (sc_if->msk_cdata.msk_rx_tag) { 2567 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2568 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 2569 if (rxd->rx_dmamap) { 2570 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2571 rxd->rx_dmamap); 2572 rxd->rx_dmamap = NULL; 2573 } 2574 } 2575 if (sc_if->msk_cdata.msk_rx_sparemap) { 2576 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2577 sc_if->msk_cdata.msk_rx_sparemap); 2578 sc_if->msk_cdata.msk_rx_sparemap = 0; 2579 } 2580 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 2581 sc_if->msk_cdata.msk_rx_tag = NULL; 2582 } 2583 if (sc_if->msk_cdata.msk_parent_tag) { 2584 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag); 2585 sc_if->msk_cdata.msk_parent_tag = NULL; 2586 } 2587 } 2588 2589 static void 2590 msk_rx_dma_jfree(struct msk_if_softc *sc_if) 2591 { 2592 struct msk_rxdesc *jrxd; 2593 int i; 2594 2595 /* Jumbo Rx ring. */ 2596 if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) { 2597 if (sc_if->msk_rdata.msk_jumbo_rx_ring_paddr) 2598 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2599 sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2600 if (sc_if->msk_rdata.msk_jumbo_rx_ring) 2601 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2602 sc_if->msk_rdata.msk_jumbo_rx_ring, 2603 sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2604 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL; 2605 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = 0; 2606 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag); 2607 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL; 2608 } 2609 /* Jumbo Rx buffers. */ 2610 if (sc_if->msk_cdata.msk_jumbo_rx_tag) { 2611 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 2612 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 2613 if (jrxd->rx_dmamap) { 2614 bus_dmamap_destroy( 2615 sc_if->msk_cdata.msk_jumbo_rx_tag, 2616 jrxd->rx_dmamap); 2617 jrxd->rx_dmamap = NULL; 2618 } 2619 } 2620 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) { 2621 bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag, 2622 sc_if->msk_cdata.msk_jumbo_rx_sparemap); 2623 sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0; 2624 } 2625 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag); 2626 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL; 2627 } 2628 } 2629 2630 static int 2631 msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head) 2632 { 2633 struct msk_txdesc *txd, *txd_last; 2634 struct msk_tx_desc *tx_le; 2635 struct mbuf *m; 2636 bus_dmamap_t map; 2637 bus_dma_segment_t txsegs[MSK_MAXTXSEGS]; 2638 uint32_t control, csum, prod, si; 2639 uint16_t offset, tcp_offset, tso_mtu; 2640 int error, i, nseg, tso; 2641 2642 MSK_IF_LOCK_ASSERT(sc_if); 2643 2644 tcp_offset = offset = 0; 2645 m = *m_head; 2646 if (((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 && 2647 (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) || 2648 ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 && 2649 (m->m_pkthdr.csum_flags & CSUM_TSO) != 0)) { 2650 /* 2651 * Since mbuf has no protocol specific structure information 2652 * in it we have to inspect protocol information here to 2653 * setup TSO and checksum offload. I don't know why Marvell 2654 * made a such decision in chip design because other GigE 2655 * hardwares normally takes care of all these chores in 2656 * hardware. However, TSO performance of Yukon II is very 2657 * good such that it's worth to implement it. 2658 */ 2659 struct ether_header *eh; 2660 struct ip *ip; 2661 struct tcphdr *tcp; 2662 2663 if (M_WRITABLE(m) == 0) { 2664 /* Get a writable copy. */ 2665 m = m_dup(*m_head, M_NOWAIT); 2666 m_freem(*m_head); 2667 if (m == NULL) { 2668 *m_head = NULL; 2669 return (ENOBUFS); 2670 } 2671 *m_head = m; 2672 } 2673 2674 offset = sizeof(struct ether_header); 2675 m = m_pullup(m, offset); 2676 if (m == NULL) { 2677 *m_head = NULL; 2678 return (ENOBUFS); 2679 } 2680 eh = mtod(m, struct ether_header *); 2681 /* Check if hardware VLAN insertion is off. */ 2682 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2683 offset = sizeof(struct ether_vlan_header); 2684 m = m_pullup(m, offset); 2685 if (m == NULL) { 2686 *m_head = NULL; 2687 return (ENOBUFS); 2688 } 2689 } 2690 m = m_pullup(m, offset + sizeof(struct ip)); 2691 if (m == NULL) { 2692 *m_head = NULL; 2693 return (ENOBUFS); 2694 } 2695 ip = (struct ip *)(mtod(m, char *) + offset); 2696 offset += (ip->ip_hl << 2); 2697 tcp_offset = offset; 2698 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2699 m = m_pullup(m, offset + sizeof(struct tcphdr)); 2700 if (m == NULL) { 2701 *m_head = NULL; 2702 return (ENOBUFS); 2703 } 2704 tcp = (struct tcphdr *)(mtod(m, char *) + offset); 2705 offset += (tcp->th_off << 2); 2706 } else if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 && 2707 (m->m_pkthdr.len < MSK_MIN_FRAMELEN) && 2708 (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) { 2709 /* 2710 * It seems that Yukon II has Tx checksum offload bug 2711 * for small TCP packets that's less than 60 bytes in 2712 * size (e.g. TCP window probe packet, pure ACK packet). 2713 * Common work around like padding with zeros to make 2714 * the frame minimum ethernet frame size didn't work at 2715 * all. 2716 * Instead of disabling checksum offload completely we 2717 * resort to S/W checksum routine when we encounter 2718 * short TCP frames. 2719 * Short UDP packets appear to be handled correctly by 2720 * Yukon II. Also I assume this bug does not happen on 2721 * controllers that use newer descriptor format or 2722 * automatic Tx checksum calculation. 2723 */ 2724 m = m_pullup(m, offset + sizeof(struct tcphdr)); 2725 if (m == NULL) { 2726 *m_head = NULL; 2727 return (ENOBUFS); 2728 } 2729 *(uint16_t *)(m->m_data + offset + 2730 m->m_pkthdr.csum_data) = in_cksum_skip(m, 2731 m->m_pkthdr.len, offset); 2732 m->m_pkthdr.csum_flags &= ~CSUM_TCP; 2733 } 2734 *m_head = m; 2735 } 2736 2737 prod = sc_if->msk_cdata.msk_tx_prod; 2738 txd = &sc_if->msk_cdata.msk_txdesc[prod]; 2739 txd_last = txd; 2740 map = txd->tx_dmamap; 2741 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map, 2742 *m_head, txsegs, &nseg, BUS_DMA_NOWAIT); 2743 if (error == EFBIG) { 2744 m = m_collapse(*m_head, M_NOWAIT, MSK_MAXTXSEGS); 2745 if (m == NULL) { 2746 m_freem(*m_head); 2747 *m_head = NULL; 2748 return (ENOBUFS); 2749 } 2750 *m_head = m; 2751 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, 2752 map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT); 2753 if (error != 0) { 2754 m_freem(*m_head); 2755 *m_head = NULL; 2756 return (error); 2757 } 2758 } else if (error != 0) 2759 return (error); 2760 if (nseg == 0) { 2761 m_freem(*m_head); 2762 *m_head = NULL; 2763 return (EIO); 2764 } 2765 2766 /* Check number of available descriptors. */ 2767 if (sc_if->msk_cdata.msk_tx_cnt + nseg >= 2768 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) { 2769 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map); 2770 return (ENOBUFS); 2771 } 2772 2773 control = 0; 2774 tso = 0; 2775 tx_le = NULL; 2776 2777 /* Check TSO support. */ 2778 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2779 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0) 2780 tso_mtu = m->m_pkthdr.tso_segsz; 2781 else 2782 tso_mtu = offset + m->m_pkthdr.tso_segsz; 2783 if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) { 2784 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2785 tx_le->msk_addr = htole32(tso_mtu); 2786 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0) 2787 tx_le->msk_control = htole32(OP_MSS | HW_OWNER); 2788 else 2789 tx_le->msk_control = 2790 htole32(OP_LRGLEN | HW_OWNER); 2791 sc_if->msk_cdata.msk_tx_cnt++; 2792 MSK_INC(prod, MSK_TX_RING_CNT); 2793 sc_if->msk_cdata.msk_tso_mtu = tso_mtu; 2794 } 2795 tso++; 2796 } 2797 /* Check if we have a VLAN tag to insert. */ 2798 if ((m->m_flags & M_VLANTAG) != 0) { 2799 if (tx_le == NULL) { 2800 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2801 tx_le->msk_addr = htole32(0); 2802 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER | 2803 htons(m->m_pkthdr.ether_vtag)); 2804 sc_if->msk_cdata.msk_tx_cnt++; 2805 MSK_INC(prod, MSK_TX_RING_CNT); 2806 } else { 2807 tx_le->msk_control |= htole32(OP_VLAN | 2808 htons(m->m_pkthdr.ether_vtag)); 2809 } 2810 control |= INS_VLAN; 2811 } 2812 /* Check if we have to handle checksum offload. */ 2813 if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) { 2814 if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) != 0) 2815 control |= CALSUM; 2816 else { 2817 control |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; 2818 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2819 control |= UDPTCP; 2820 /* Checksum write position. */ 2821 csum = (tcp_offset + m->m_pkthdr.csum_data) & 0xffff; 2822 /* Checksum start position. */ 2823 csum |= (uint32_t)tcp_offset << 16; 2824 if (csum != sc_if->msk_cdata.msk_last_csum) { 2825 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2826 tx_le->msk_addr = htole32(csum); 2827 tx_le->msk_control = htole32(1 << 16 | 2828 (OP_TCPLISW | HW_OWNER)); 2829 sc_if->msk_cdata.msk_tx_cnt++; 2830 MSK_INC(prod, MSK_TX_RING_CNT); 2831 sc_if->msk_cdata.msk_last_csum = csum; 2832 } 2833 } 2834 } 2835 2836 #ifdef MSK_64BIT_DMA 2837 if (MSK_ADDR_HI(txsegs[0].ds_addr) != 2838 sc_if->msk_cdata.msk_tx_high_addr) { 2839 sc_if->msk_cdata.msk_tx_high_addr = 2840 MSK_ADDR_HI(txsegs[0].ds_addr); 2841 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2842 tx_le->msk_addr = htole32(MSK_ADDR_HI(txsegs[0].ds_addr)); 2843 tx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER); 2844 sc_if->msk_cdata.msk_tx_cnt++; 2845 MSK_INC(prod, MSK_TX_RING_CNT); 2846 } 2847 #endif 2848 si = prod; 2849 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2850 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr)); 2851 if (tso == 0) 2852 tx_le->msk_control = htole32(txsegs[0].ds_len | control | 2853 OP_PACKET); 2854 else 2855 tx_le->msk_control = htole32(txsegs[0].ds_len | control | 2856 OP_LARGESEND); 2857 sc_if->msk_cdata.msk_tx_cnt++; 2858 MSK_INC(prod, MSK_TX_RING_CNT); 2859 2860 for (i = 1; i < nseg; i++) { 2861 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2862 #ifdef MSK_64BIT_DMA 2863 if (MSK_ADDR_HI(txsegs[i].ds_addr) != 2864 sc_if->msk_cdata.msk_tx_high_addr) { 2865 sc_if->msk_cdata.msk_tx_high_addr = 2866 MSK_ADDR_HI(txsegs[i].ds_addr); 2867 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2868 tx_le->msk_addr = 2869 htole32(MSK_ADDR_HI(txsegs[i].ds_addr)); 2870 tx_le->msk_control = htole32(OP_ADDR64 | HW_OWNER); 2871 sc_if->msk_cdata.msk_tx_cnt++; 2872 MSK_INC(prod, MSK_TX_RING_CNT); 2873 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2874 } 2875 #endif 2876 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr)); 2877 tx_le->msk_control = htole32(txsegs[i].ds_len | control | 2878 OP_BUFFER | HW_OWNER); 2879 sc_if->msk_cdata.msk_tx_cnt++; 2880 MSK_INC(prod, MSK_TX_RING_CNT); 2881 } 2882 /* Update producer index. */ 2883 sc_if->msk_cdata.msk_tx_prod = prod; 2884 2885 /* Set EOP on the last descriptor. */ 2886 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT; 2887 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2888 tx_le->msk_control |= htole32(EOP); 2889 2890 /* Turn the first descriptor ownership to hardware. */ 2891 tx_le = &sc_if->msk_rdata.msk_tx_ring[si]; 2892 tx_le->msk_control |= htole32(HW_OWNER); 2893 2894 txd = &sc_if->msk_cdata.msk_txdesc[prod]; 2895 map = txd_last->tx_dmamap; 2896 txd_last->tx_dmamap = txd->tx_dmamap; 2897 txd->tx_dmamap = map; 2898 txd->tx_m = m; 2899 2900 /* Sync descriptors. */ 2901 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE); 2902 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 2903 sc_if->msk_cdata.msk_tx_ring_map, 2904 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2905 2906 return (0); 2907 } 2908 2909 static void 2910 msk_start(struct ifnet *ifp) 2911 { 2912 struct msk_if_softc *sc_if; 2913 2914 sc_if = ifp->if_softc; 2915 MSK_IF_LOCK(sc_if); 2916 msk_start_locked(ifp); 2917 MSK_IF_UNLOCK(sc_if); 2918 } 2919 2920 static void 2921 msk_start_locked(struct ifnet *ifp) 2922 { 2923 struct msk_if_softc *sc_if; 2924 struct mbuf *m_head; 2925 int enq; 2926 2927 sc_if = ifp->if_softc; 2928 MSK_IF_LOCK_ASSERT(sc_if); 2929 2930 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2931 IFF_DRV_RUNNING || (sc_if->msk_flags & MSK_FLAG_LINK) == 0) 2932 return; 2933 2934 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 2935 sc_if->msk_cdata.msk_tx_cnt < 2936 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) { 2937 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2938 if (m_head == NULL) 2939 break; 2940 /* 2941 * Pack the data into the transmit ring. If we 2942 * don't have room, set the OACTIVE flag and wait 2943 * for the NIC to drain the ring. 2944 */ 2945 if (msk_encap(sc_if, &m_head) != 0) { 2946 if (m_head == NULL) 2947 break; 2948 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2949 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2950 break; 2951 } 2952 2953 enq++; 2954 /* 2955 * If there's a BPF listener, bounce a copy of this frame 2956 * to him. 2957 */ 2958 ETHER_BPF_MTAP(ifp, m_head); 2959 } 2960 2961 if (enq > 0) { 2962 /* Transmit */ 2963 CSR_WRITE_2(sc_if->msk_softc, 2964 Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG), 2965 sc_if->msk_cdata.msk_tx_prod); 2966 2967 /* Set a timeout in case the chip goes out to lunch. */ 2968 sc_if->msk_watchdog_timer = MSK_TX_TIMEOUT; 2969 } 2970 } 2971 2972 static void 2973 msk_watchdog(struct msk_if_softc *sc_if) 2974 { 2975 struct ifnet *ifp; 2976 2977 MSK_IF_LOCK_ASSERT(sc_if); 2978 2979 if (sc_if->msk_watchdog_timer == 0 || --sc_if->msk_watchdog_timer) 2980 return; 2981 ifp = sc_if->msk_ifp; 2982 if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0) { 2983 if (bootverbose) 2984 if_printf(sc_if->msk_ifp, "watchdog timeout " 2985 "(missed link)\n"); 2986 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2987 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2988 msk_init_locked(sc_if); 2989 return; 2990 } 2991 2992 if_printf(ifp, "watchdog timeout\n"); 2993 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2994 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2995 msk_init_locked(sc_if); 2996 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2997 msk_start_locked(ifp); 2998 } 2999 3000 static int 3001 mskc_shutdown(device_t dev) 3002 { 3003 struct msk_softc *sc; 3004 int i; 3005 3006 sc = device_get_softc(dev); 3007 MSK_LOCK(sc); 3008 for (i = 0; i < sc->msk_num_port; i++) { 3009 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 3010 ((sc->msk_if[i]->msk_ifp->if_drv_flags & 3011 IFF_DRV_RUNNING) != 0)) 3012 msk_stop(sc->msk_if[i]); 3013 } 3014 MSK_UNLOCK(sc); 3015 3016 /* Put hardware reset. */ 3017 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 3018 return (0); 3019 } 3020 3021 static int 3022 mskc_suspend(device_t dev) 3023 { 3024 struct msk_softc *sc; 3025 int i; 3026 3027 sc = device_get_softc(dev); 3028 3029 MSK_LOCK(sc); 3030 3031 for (i = 0; i < sc->msk_num_port; i++) { 3032 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 3033 ((sc->msk_if[i]->msk_ifp->if_drv_flags & 3034 IFF_DRV_RUNNING) != 0)) 3035 msk_stop(sc->msk_if[i]); 3036 } 3037 3038 /* Disable all interrupts. */ 3039 CSR_WRITE_4(sc, B0_IMSK, 0); 3040 CSR_READ_4(sc, B0_IMSK); 3041 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 3042 CSR_READ_4(sc, B0_HWE_IMSK); 3043 3044 msk_phy_power(sc, MSK_PHY_POWERDOWN); 3045 3046 /* Put hardware reset. */ 3047 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 3048 sc->msk_pflags |= MSK_FLAG_SUSPEND; 3049 3050 MSK_UNLOCK(sc); 3051 3052 return (0); 3053 } 3054 3055 static int 3056 mskc_resume(device_t dev) 3057 { 3058 struct msk_softc *sc; 3059 int i; 3060 3061 sc = device_get_softc(dev); 3062 3063 MSK_LOCK(sc); 3064 3065 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0); 3066 mskc_reset(sc); 3067 for (i = 0; i < sc->msk_num_port; i++) { 3068 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 3069 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0)) { 3070 sc->msk_if[i]->msk_ifp->if_drv_flags &= 3071 ~IFF_DRV_RUNNING; 3072 msk_init_locked(sc->msk_if[i]); 3073 } 3074 } 3075 sc->msk_pflags &= ~MSK_FLAG_SUSPEND; 3076 3077 MSK_UNLOCK(sc); 3078 3079 return (0); 3080 } 3081 3082 #ifndef __NO_STRICT_ALIGNMENT 3083 static __inline void 3084 msk_fixup_rx(struct mbuf *m) 3085 { 3086 int i; 3087 uint16_t *src, *dst; 3088 3089 src = mtod(m, uint16_t *); 3090 dst = src - 3; 3091 3092 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 3093 *dst++ = *src++; 3094 3095 m->m_data -= (MSK_RX_BUF_ALIGN - ETHER_ALIGN); 3096 } 3097 #endif 3098 3099 static __inline void 3100 msk_rxcsum(struct msk_if_softc *sc_if, uint32_t control, struct mbuf *m) 3101 { 3102 struct ether_header *eh; 3103 struct ip *ip; 3104 struct udphdr *uh; 3105 int32_t hlen, len, pktlen, temp32; 3106 uint16_t csum, *opts; 3107 3108 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0) { 3109 if ((control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) { 3110 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 3111 if ((control & CSS_IPV4_CSUM_OK) != 0) 3112 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 3113 if ((control & (CSS_TCP | CSS_UDP)) != 0 && 3114 (control & (CSS_TCPUDP_CSUM_OK)) != 0) { 3115 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 3116 CSUM_PSEUDO_HDR; 3117 m->m_pkthdr.csum_data = 0xffff; 3118 } 3119 } 3120 return; 3121 } 3122 /* 3123 * Marvell Yukon controllers that support OP_RXCHKS has known 3124 * to have various Rx checksum offloading bugs. These 3125 * controllers can be configured to compute simple checksum 3126 * at two different positions. So we can compute IP and TCP/UDP 3127 * checksum at the same time. We intentionally have controller 3128 * compute TCP/UDP checksum twice by specifying the same 3129 * checksum start position and compare the result. If the value 3130 * is different it would indicate the hardware logic was wrong. 3131 */ 3132 if ((sc_if->msk_csum & 0xFFFF) != (sc_if->msk_csum >> 16)) { 3133 if (bootverbose) 3134 device_printf(sc_if->msk_if_dev, 3135 "Rx checksum value mismatch!\n"); 3136 return; 3137 } 3138 pktlen = m->m_pkthdr.len; 3139 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) 3140 return; 3141 eh = mtod(m, struct ether_header *); 3142 if (eh->ether_type != htons(ETHERTYPE_IP)) 3143 return; 3144 ip = (struct ip *)(eh + 1); 3145 if (ip->ip_v != IPVERSION) 3146 return; 3147 3148 hlen = ip->ip_hl << 2; 3149 pktlen -= sizeof(struct ether_header); 3150 if (hlen < sizeof(struct ip)) 3151 return; 3152 if (ntohs(ip->ip_len) < hlen) 3153 return; 3154 if (ntohs(ip->ip_len) != pktlen) 3155 return; 3156 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) 3157 return; /* can't handle fragmented packet. */ 3158 3159 switch (ip->ip_p) { 3160 case IPPROTO_TCP: 3161 if (pktlen < (hlen + sizeof(struct tcphdr))) 3162 return; 3163 break; 3164 case IPPROTO_UDP: 3165 if (pktlen < (hlen + sizeof(struct udphdr))) 3166 return; 3167 uh = (struct udphdr *)((caddr_t)ip + hlen); 3168 if (uh->uh_sum == 0) 3169 return; /* no checksum */ 3170 break; 3171 default: 3172 return; 3173 } 3174 csum = bswap16(sc_if->msk_csum & 0xFFFF); 3175 /* Checksum fixup for IP options. */ 3176 len = hlen - sizeof(struct ip); 3177 if (len > 0) { 3178 opts = (uint16_t *)(ip + 1); 3179 for (; len > 0; len -= sizeof(uint16_t), opts++) { 3180 temp32 = csum - *opts; 3181 temp32 = (temp32 >> 16) + (temp32 & 65535); 3182 csum = temp32 & 65535; 3183 } 3184 } 3185 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; 3186 m->m_pkthdr.csum_data = csum; 3187 } 3188 3189 static void 3190 msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control, 3191 int len) 3192 { 3193 struct mbuf *m; 3194 struct ifnet *ifp; 3195 struct msk_rxdesc *rxd; 3196 int cons, rxlen; 3197 3198 ifp = sc_if->msk_ifp; 3199 3200 MSK_IF_LOCK_ASSERT(sc_if); 3201 3202 cons = sc_if->msk_cdata.msk_rx_cons; 3203 do { 3204 rxlen = status >> 16; 3205 if ((status & GMR_FS_VLAN) != 0 && 3206 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 3207 rxlen -= ETHER_VLAN_ENCAP_LEN; 3208 if ((sc_if->msk_flags & MSK_FLAG_NORXCHK) != 0) { 3209 /* 3210 * For controllers that returns bogus status code 3211 * just do minimal check and let upper stack 3212 * handle this frame. 3213 */ 3214 if (len > MSK_MAX_FRAMELEN || len < ETHER_HDR_LEN) { 3215 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 3216 msk_discard_rxbuf(sc_if, cons); 3217 break; 3218 } 3219 } else if (len > sc_if->msk_framesize || 3220 ((status & GMR_FS_ANY_ERR) != 0) || 3221 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { 3222 /* Don't count flow-control packet as errors. */ 3223 if ((status & GMR_FS_GOOD_FC) == 0) 3224 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 3225 msk_discard_rxbuf(sc_if, cons); 3226 break; 3227 } 3228 #ifdef MSK_64BIT_DMA 3229 rxd = &sc_if->msk_cdata.msk_rxdesc[(cons + 1) % 3230 MSK_RX_RING_CNT]; 3231 #else 3232 rxd = &sc_if->msk_cdata.msk_rxdesc[cons]; 3233 #endif 3234 m = rxd->rx_m; 3235 if (msk_newbuf(sc_if, cons) != 0) { 3236 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 3237 /* Reuse old buffer. */ 3238 msk_discard_rxbuf(sc_if, cons); 3239 break; 3240 } 3241 m->m_pkthdr.rcvif = ifp; 3242 m->m_pkthdr.len = m->m_len = len; 3243 #ifndef __NO_STRICT_ALIGNMENT 3244 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0) 3245 msk_fixup_rx(m); 3246 #endif 3247 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 3248 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 3249 msk_rxcsum(sc_if, control, m); 3250 /* Check for VLAN tagged packets. */ 3251 if ((status & GMR_FS_VLAN) != 0 && 3252 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 3253 m->m_pkthdr.ether_vtag = sc_if->msk_vtag; 3254 m->m_flags |= M_VLANTAG; 3255 } 3256 MSK_IF_UNLOCK(sc_if); 3257 (*ifp->if_input)(ifp, m); 3258 MSK_IF_LOCK(sc_if); 3259 } while (0); 3260 3261 MSK_RX_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT); 3262 MSK_RX_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT); 3263 } 3264 3265 static void 3266 msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control, 3267 int len) 3268 { 3269 struct mbuf *m; 3270 struct ifnet *ifp; 3271 struct msk_rxdesc *jrxd; 3272 int cons, rxlen; 3273 3274 ifp = sc_if->msk_ifp; 3275 3276 MSK_IF_LOCK_ASSERT(sc_if); 3277 3278 cons = sc_if->msk_cdata.msk_rx_cons; 3279 do { 3280 rxlen = status >> 16; 3281 if ((status & GMR_FS_VLAN) != 0 && 3282 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 3283 rxlen -= ETHER_VLAN_ENCAP_LEN; 3284 if (len > sc_if->msk_framesize || 3285 ((status & GMR_FS_ANY_ERR) != 0) || 3286 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { 3287 /* Don't count flow-control packet as errors. */ 3288 if ((status & GMR_FS_GOOD_FC) == 0) 3289 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 3290 msk_discard_jumbo_rxbuf(sc_if, cons); 3291 break; 3292 } 3293 #ifdef MSK_64BIT_DMA 3294 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[(cons + 1) % 3295 MSK_JUMBO_RX_RING_CNT]; 3296 #else 3297 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons]; 3298 #endif 3299 m = jrxd->rx_m; 3300 if (msk_jumbo_newbuf(sc_if, cons) != 0) { 3301 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 3302 /* Reuse old buffer. */ 3303 msk_discard_jumbo_rxbuf(sc_if, cons); 3304 break; 3305 } 3306 m->m_pkthdr.rcvif = ifp; 3307 m->m_pkthdr.len = m->m_len = len; 3308 #ifndef __NO_STRICT_ALIGNMENT 3309 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0) 3310 msk_fixup_rx(m); 3311 #endif 3312 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 3313 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 3314 msk_rxcsum(sc_if, control, m); 3315 /* Check for VLAN tagged packets. */ 3316 if ((status & GMR_FS_VLAN) != 0 && 3317 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 3318 m->m_pkthdr.ether_vtag = sc_if->msk_vtag; 3319 m->m_flags |= M_VLANTAG; 3320 } 3321 MSK_IF_UNLOCK(sc_if); 3322 (*ifp->if_input)(ifp, m); 3323 MSK_IF_LOCK(sc_if); 3324 } while (0); 3325 3326 MSK_RX_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT); 3327 MSK_RX_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT); 3328 } 3329 3330 static void 3331 msk_txeof(struct msk_if_softc *sc_if, int idx) 3332 { 3333 struct msk_txdesc *txd; 3334 struct msk_tx_desc *cur_tx; 3335 struct ifnet *ifp; 3336 uint32_t control; 3337 int cons, prog; 3338 3339 MSK_IF_LOCK_ASSERT(sc_if); 3340 3341 ifp = sc_if->msk_ifp; 3342 3343 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 3344 sc_if->msk_cdata.msk_tx_ring_map, 3345 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3346 /* 3347 * Go through our tx ring and free mbufs for those 3348 * frames that have been sent. 3349 */ 3350 cons = sc_if->msk_cdata.msk_tx_cons; 3351 prog = 0; 3352 for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) { 3353 if (sc_if->msk_cdata.msk_tx_cnt <= 0) 3354 break; 3355 prog++; 3356 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons]; 3357 control = le32toh(cur_tx->msk_control); 3358 sc_if->msk_cdata.msk_tx_cnt--; 3359 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3360 if ((control & EOP) == 0) 3361 continue; 3362 txd = &sc_if->msk_cdata.msk_txdesc[cons]; 3363 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap, 3364 BUS_DMASYNC_POSTWRITE); 3365 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap); 3366 3367 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 3368 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!", 3369 __func__)); 3370 m_freem(txd->tx_m); 3371 txd->tx_m = NULL; 3372 } 3373 3374 if (prog > 0) { 3375 sc_if->msk_cdata.msk_tx_cons = cons; 3376 if (sc_if->msk_cdata.msk_tx_cnt == 0) 3377 sc_if->msk_watchdog_timer = 0; 3378 /* No need to sync LEs as we didn't update LEs. */ 3379 } 3380 } 3381 3382 static void 3383 msk_tick(void *xsc_if) 3384 { 3385 struct msk_if_softc *sc_if; 3386 struct mii_data *mii; 3387 3388 sc_if = xsc_if; 3389 3390 MSK_IF_LOCK_ASSERT(sc_if); 3391 3392 mii = device_get_softc(sc_if->msk_miibus); 3393 3394 mii_tick(mii); 3395 if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0) 3396 msk_miibus_statchg(sc_if->msk_if_dev); 3397 msk_handle_events(sc_if->msk_softc); 3398 msk_watchdog(sc_if); 3399 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); 3400 } 3401 3402 static void 3403 msk_intr_phy(struct msk_if_softc *sc_if) 3404 { 3405 uint16_t status; 3406 3407 msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); 3408 status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); 3409 /* Handle FIFO Underrun/Overflow? */ 3410 if ((status & PHY_M_IS_FIFO_ERROR)) 3411 device_printf(sc_if->msk_if_dev, 3412 "PHY FIFO underrun/overflow.\n"); 3413 } 3414 3415 static void 3416 msk_intr_gmac(struct msk_if_softc *sc_if) 3417 { 3418 struct msk_softc *sc; 3419 uint8_t status; 3420 3421 sc = sc_if->msk_softc; 3422 status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); 3423 3424 /* GMAC Rx FIFO overrun. */ 3425 if ((status & GM_IS_RX_FF_OR) != 0) 3426 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 3427 GMF_CLI_RX_FO); 3428 /* GMAC Tx FIFO underrun. */ 3429 if ((status & GM_IS_TX_FF_UR) != 0) { 3430 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3431 GMF_CLI_TX_FU); 3432 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n"); 3433 /* 3434 * XXX 3435 * In case of Tx underrun, we may need to flush/reset 3436 * Tx MAC but that would also require resynchronization 3437 * with status LEs. Reinitializing status LEs would 3438 * affect other port in dual MAC configuration so it 3439 * should be avoided as possible as we can. 3440 * Due to lack of documentation it's all vague guess but 3441 * it needs more investigation. 3442 */ 3443 } 3444 } 3445 3446 static void 3447 msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status) 3448 { 3449 struct msk_softc *sc; 3450 3451 sc = sc_if->msk_softc; 3452 if ((status & Y2_IS_PAR_RD1) != 0) { 3453 device_printf(sc_if->msk_if_dev, 3454 "RAM buffer read parity error\n"); 3455 /* Clear IRQ. */ 3456 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), 3457 RI_CLR_RD_PERR); 3458 } 3459 if ((status & Y2_IS_PAR_WR1) != 0) { 3460 device_printf(sc_if->msk_if_dev, 3461 "RAM buffer write parity error\n"); 3462 /* Clear IRQ. */ 3463 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), 3464 RI_CLR_WR_PERR); 3465 } 3466 if ((status & Y2_IS_PAR_MAC1) != 0) { 3467 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n"); 3468 /* Clear IRQ. */ 3469 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3470 GMF_CLI_TX_PE); 3471 } 3472 if ((status & Y2_IS_PAR_RX1) != 0) { 3473 device_printf(sc_if->msk_if_dev, "Rx parity error\n"); 3474 /* Clear IRQ. */ 3475 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR); 3476 } 3477 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) { 3478 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n"); 3479 /* Clear IRQ. */ 3480 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP); 3481 } 3482 } 3483 3484 static void 3485 msk_intr_hwerr(struct msk_softc *sc) 3486 { 3487 uint32_t status; 3488 uint32_t tlphead[4]; 3489 3490 status = CSR_READ_4(sc, B0_HWE_ISRC); 3491 /* Time Stamp timer overflow. */ 3492 if ((status & Y2_IS_TIST_OV) != 0) 3493 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 3494 if ((status & Y2_IS_PCI_NEXP) != 0) { 3495 /* 3496 * PCI Express Error occured which is not described in PEX 3497 * spec. 3498 * This error is also mapped either to Master Abort( 3499 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and 3500 * can only be cleared there. 3501 */ 3502 device_printf(sc->msk_dev, 3503 "PCI Express protocol violation error\n"); 3504 } 3505 3506 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) { 3507 uint16_t v16; 3508 3509 if ((status & Y2_IS_MST_ERR) != 0) 3510 device_printf(sc->msk_dev, 3511 "unexpected IRQ Status error\n"); 3512 else 3513 device_printf(sc->msk_dev, 3514 "unexpected IRQ Master error\n"); 3515 /* Reset all bits in the PCI status register. */ 3516 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); 3517 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3518 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 | 3519 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | 3520 PCIM_STATUS_RTABORT | PCIM_STATUS_MDPERR, 2); 3521 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3522 } 3523 3524 /* Check for PCI Express Uncorrectable Error. */ 3525 if ((status & Y2_IS_PCI_EXP) != 0) { 3526 uint32_t v32; 3527 3528 /* 3529 * On PCI Express bus bridges are called root complexes (RC). 3530 * PCI Express errors are recognized by the root complex too, 3531 * which requests the system to handle the problem. After 3532 * error occurrence it may be that no access to the adapter 3533 * may be performed any longer. 3534 */ 3535 3536 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); 3537 if ((v32 & PEX_UNSUP_REQ) != 0) { 3538 /* Ignore unsupported request error. */ 3539 device_printf(sc->msk_dev, 3540 "Uncorrectable PCI Express error\n"); 3541 } 3542 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) { 3543 int i; 3544 3545 /* Get TLP header form Log Registers. */ 3546 for (i = 0; i < 4; i++) 3547 tlphead[i] = CSR_PCI_READ_4(sc, 3548 PEX_HEADER_LOG + i * 4); 3549 /* Check for vendor defined broadcast message. */ 3550 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) { 3551 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; 3552 CSR_WRITE_4(sc, B0_HWE_IMSK, 3553 sc->msk_intrhwemask); 3554 CSR_READ_4(sc, B0_HWE_IMSK); 3555 } 3556 } 3557 /* Clear the interrupt. */ 3558 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3559 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); 3560 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3561 } 3562 3563 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL) 3564 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status); 3565 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL) 3566 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8); 3567 } 3568 3569 static __inline void 3570 msk_rxput(struct msk_if_softc *sc_if) 3571 { 3572 struct msk_softc *sc; 3573 3574 sc = sc_if->msk_softc; 3575 if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) 3576 bus_dmamap_sync( 3577 sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 3578 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 3579 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3580 else 3581 bus_dmamap_sync( 3582 sc_if->msk_cdata.msk_rx_ring_tag, 3583 sc_if->msk_cdata.msk_rx_ring_map, 3584 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3585 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, 3586 PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod); 3587 } 3588 3589 static int 3590 msk_handle_events(struct msk_softc *sc) 3591 { 3592 struct msk_if_softc *sc_if; 3593 int rxput[2]; 3594 struct msk_stat_desc *sd; 3595 uint32_t control, status; 3596 int cons, len, port, rxprog; 3597 3598 if (sc->msk_stat_cons == CSR_READ_2(sc, STAT_PUT_IDX)) 3599 return (0); 3600 3601 /* Sync status LEs. */ 3602 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map, 3603 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3604 3605 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0; 3606 rxprog = 0; 3607 cons = sc->msk_stat_cons; 3608 for (;;) { 3609 sd = &sc->msk_stat_ring[cons]; 3610 control = le32toh(sd->msk_control); 3611 if ((control & HW_OWNER) == 0) 3612 break; 3613 control &= ~HW_OWNER; 3614 sd->msk_control = htole32(control); 3615 status = le32toh(sd->msk_status); 3616 len = control & STLE_LEN_MASK; 3617 port = (control >> 16) & 0x01; 3618 sc_if = sc->msk_if[port]; 3619 if (sc_if == NULL) { 3620 device_printf(sc->msk_dev, "invalid port opcode " 3621 "0x%08x\n", control & STLE_OP_MASK); 3622 continue; 3623 } 3624 3625 switch (control & STLE_OP_MASK) { 3626 case OP_RXVLAN: 3627 sc_if->msk_vtag = ntohs(len); 3628 break; 3629 case OP_RXCHKSVLAN: 3630 sc_if->msk_vtag = ntohs(len); 3631 /* FALLTHROUGH */ 3632 case OP_RXCHKS: 3633 sc_if->msk_csum = status; 3634 break; 3635 case OP_RXSTAT: 3636 if (!(sc_if->msk_ifp->if_drv_flags & IFF_DRV_RUNNING)) 3637 break; 3638 if (sc_if->msk_framesize > 3639 (MCLBYTES - MSK_RX_BUF_ALIGN)) 3640 msk_jumbo_rxeof(sc_if, status, control, len); 3641 else 3642 msk_rxeof(sc_if, status, control, len); 3643 rxprog++; 3644 /* 3645 * Because there is no way to sync single Rx LE 3646 * put the DMA sync operation off until the end of 3647 * event processing. 3648 */ 3649 rxput[port]++; 3650 /* Update prefetch unit if we've passed water mark. */ 3651 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) { 3652 msk_rxput(sc_if); 3653 rxput[port] = 0; 3654 } 3655 break; 3656 case OP_TXINDEXLE: 3657 if (sc->msk_if[MSK_PORT_A] != NULL) 3658 msk_txeof(sc->msk_if[MSK_PORT_A], 3659 status & STLE_TXA1_MSKL); 3660 if (sc->msk_if[MSK_PORT_B] != NULL) 3661 msk_txeof(sc->msk_if[MSK_PORT_B], 3662 ((status & STLE_TXA2_MSKL) >> 3663 STLE_TXA2_SHIFTL) | 3664 ((len & STLE_TXA2_MSKH) << 3665 STLE_TXA2_SHIFTH)); 3666 break; 3667 default: 3668 device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n", 3669 control & STLE_OP_MASK); 3670 break; 3671 } 3672 MSK_INC(cons, sc->msk_stat_count); 3673 if (rxprog > sc->msk_process_limit) 3674 break; 3675 } 3676 3677 sc->msk_stat_cons = cons; 3678 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map, 3679 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3680 3681 if (rxput[MSK_PORT_A] > 0) 3682 msk_rxput(sc->msk_if[MSK_PORT_A]); 3683 if (rxput[MSK_PORT_B] > 0) 3684 msk_rxput(sc->msk_if[MSK_PORT_B]); 3685 3686 return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX)); 3687 } 3688 3689 static void 3690 msk_intr(void *xsc) 3691 { 3692 struct msk_softc *sc; 3693 struct msk_if_softc *sc_if0, *sc_if1; 3694 struct ifnet *ifp0, *ifp1; 3695 uint32_t status; 3696 int domore; 3697 3698 sc = xsc; 3699 MSK_LOCK(sc); 3700 3701 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */ 3702 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2); 3703 if (status == 0 || status == 0xffffffff || 3704 (sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 || 3705 (status & sc->msk_intrmask) == 0) { 3706 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3707 MSK_UNLOCK(sc); 3708 return; 3709 } 3710 3711 sc_if0 = sc->msk_if[MSK_PORT_A]; 3712 sc_if1 = sc->msk_if[MSK_PORT_B]; 3713 ifp0 = ifp1 = NULL; 3714 if (sc_if0 != NULL) 3715 ifp0 = sc_if0->msk_ifp; 3716 if (sc_if1 != NULL) 3717 ifp1 = sc_if1->msk_ifp; 3718 3719 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL) 3720 msk_intr_phy(sc_if0); 3721 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL) 3722 msk_intr_phy(sc_if1); 3723 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL) 3724 msk_intr_gmac(sc_if0); 3725 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL) 3726 msk_intr_gmac(sc_if1); 3727 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) { 3728 device_printf(sc->msk_dev, "Rx descriptor error\n"); 3729 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2); 3730 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3731 CSR_READ_4(sc, B0_IMSK); 3732 } 3733 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) { 3734 device_printf(sc->msk_dev, "Tx descriptor error\n"); 3735 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2); 3736 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3737 CSR_READ_4(sc, B0_IMSK); 3738 } 3739 if ((status & Y2_IS_HW_ERR) != 0) 3740 msk_intr_hwerr(sc); 3741 3742 domore = msk_handle_events(sc); 3743 if ((status & Y2_IS_STAT_BMU) != 0 && domore == 0) 3744 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ); 3745 3746 /* Reenable interrupts. */ 3747 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3748 3749 if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 && 3750 !IFQ_DRV_IS_EMPTY(&ifp0->if_snd)) 3751 msk_start_locked(ifp0); 3752 if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 && 3753 !IFQ_DRV_IS_EMPTY(&ifp1->if_snd)) 3754 msk_start_locked(ifp1); 3755 3756 MSK_UNLOCK(sc); 3757 } 3758 3759 static void 3760 msk_set_tx_stfwd(struct msk_if_softc *sc_if) 3761 { 3762 struct msk_softc *sc; 3763 struct ifnet *ifp; 3764 3765 ifp = sc_if->msk_ifp; 3766 sc = sc_if->msk_softc; 3767 if ((sc->msk_hw_id == CHIP_ID_YUKON_EX && 3768 sc->msk_hw_rev != CHIP_REV_YU_EX_A0) || 3769 sc->msk_hw_id >= CHIP_ID_YUKON_SUPR) { 3770 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3771 TX_STFW_ENA); 3772 } else { 3773 if (ifp->if_mtu > ETHERMTU) { 3774 /* Set Tx GMAC FIFO Almost Empty Threshold. */ 3775 CSR_WRITE_4(sc, 3776 MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR), 3777 MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR); 3778 /* Disable Store & Forward mode for Tx. */ 3779 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3780 TX_STFW_DIS); 3781 } else { 3782 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3783 TX_STFW_ENA); 3784 } 3785 } 3786 } 3787 3788 static void 3789 msk_init(void *xsc) 3790 { 3791 struct msk_if_softc *sc_if = xsc; 3792 3793 MSK_IF_LOCK(sc_if); 3794 msk_init_locked(sc_if); 3795 MSK_IF_UNLOCK(sc_if); 3796 } 3797 3798 static void 3799 msk_init_locked(struct msk_if_softc *sc_if) 3800 { 3801 struct msk_softc *sc; 3802 struct ifnet *ifp; 3803 struct mii_data *mii; 3804 uint8_t *eaddr; 3805 uint16_t gmac; 3806 uint32_t reg; 3807 int error; 3808 3809 MSK_IF_LOCK_ASSERT(sc_if); 3810 3811 ifp = sc_if->msk_ifp; 3812 sc = sc_if->msk_softc; 3813 mii = device_get_softc(sc_if->msk_miibus); 3814 3815 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 3816 return; 3817 3818 error = 0; 3819 /* Cancel pending I/O and free all Rx/Tx buffers. */ 3820 msk_stop(sc_if); 3821 3822 if (ifp->if_mtu < ETHERMTU) 3823 sc_if->msk_framesize = ETHERMTU; 3824 else 3825 sc_if->msk_framesize = ifp->if_mtu; 3826 sc_if->msk_framesize += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 3827 if (ifp->if_mtu > ETHERMTU && 3828 (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) { 3829 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO); 3830 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM); 3831 } 3832 3833 /* GMAC Control reset. */ 3834 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET); 3835 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR); 3836 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF); 3837 if (sc->msk_hw_id == CHIP_ID_YUKON_EX || 3838 sc->msk_hw_id == CHIP_ID_YUKON_SUPR) 3839 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), 3840 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON | 3841 GMC_BYP_RETR_ON); 3842 3843 /* 3844 * Initialize GMAC first such that speed/duplex/flow-control 3845 * parameters are renegotiated when interface is brought up. 3846 */ 3847 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, 0); 3848 3849 /* Dummy read the Interrupt Source Register. */ 3850 CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); 3851 3852 /* Clear MIB stats. */ 3853 msk_stats_clear(sc_if); 3854 3855 /* Disable FCS. */ 3856 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS); 3857 3858 /* Setup Transmit Control Register. */ 3859 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); 3860 3861 /* Setup Transmit Flow Control Register. */ 3862 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff); 3863 3864 /* Setup Transmit Parameter Register. */ 3865 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM, 3866 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | 3867 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF)); 3868 3869 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) | 3870 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); 3871 3872 if (ifp->if_mtu > ETHERMTU) 3873 gmac |= GM_SMOD_JUMBO_ENA; 3874 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac); 3875 3876 /* Set station address. */ 3877 eaddr = IF_LLADDR(ifp); 3878 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L, 3879 eaddr[0] | (eaddr[1] << 8)); 3880 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1M, 3881 eaddr[2] | (eaddr[3] << 8)); 3882 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1H, 3883 eaddr[4] | (eaddr[5] << 8)); 3884 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L, 3885 eaddr[0] | (eaddr[1] << 8)); 3886 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2M, 3887 eaddr[2] | (eaddr[3] << 8)); 3888 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2H, 3889 eaddr[4] | (eaddr[5] << 8)); 3890 3891 /* Disable interrupts for counter overflows. */ 3892 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0); 3893 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0); 3894 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0); 3895 3896 /* Configure Rx MAC FIFO. */ 3897 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); 3898 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR); 3899 reg = GMF_OPER_ON | GMF_RX_F_FL_ON; 3900 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P || 3901 sc->msk_hw_id == CHIP_ID_YUKON_EX) 3902 reg |= GMF_RX_OVER_ON; 3903 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), reg); 3904 3905 /* Set receive filter. */ 3906 msk_rxfilter(sc_if); 3907 3908 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) { 3909 /* Clear flush mask - HW bug. */ 3910 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 0); 3911 } else { 3912 /* Flush Rx MAC FIFO on any flow control or error. */ 3913 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 3914 GMR_FS_ANY_ERR); 3915 } 3916 3917 /* 3918 * Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word 3919 * due to hardware hang on receipt of pause frames. 3920 */ 3921 reg = RX_GMF_FL_THR_DEF + 1; 3922 /* Another magic for Yukon FE+ - From Linux. */ 3923 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P && 3924 sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) 3925 reg = 0x178; 3926 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), reg); 3927 3928 /* Configure Tx MAC FIFO. */ 3929 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); 3930 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR); 3931 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON); 3932 3933 /* Configure hardware VLAN tag insertion/stripping. */ 3934 msk_setvlan(sc_if, ifp); 3935 3936 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) { 3937 /* Set Rx Pause threshold. */ 3938 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR), 3939 MSK_ECU_LLPP); 3940 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR), 3941 MSK_ECU_ULPP); 3942 /* Configure store-and-forward for Tx. */ 3943 msk_set_tx_stfwd(sc_if); 3944 } 3945 3946 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P && 3947 sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) { 3948 /* Disable dynamic watermark - from Linux. */ 3949 reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA)); 3950 reg &= ~0x03; 3951 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg); 3952 } 3953 3954 /* 3955 * Disable Force Sync bit and Alloc bit in Tx RAM interface 3956 * arbiter as we don't use Sync Tx queue. 3957 */ 3958 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), 3959 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); 3960 /* Enable the RAM Interface Arbiter. */ 3961 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB); 3962 3963 /* Setup RAM buffer. */ 3964 msk_set_rambuffer(sc_if); 3965 3966 /* Disable Tx sync Queue. */ 3967 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET); 3968 3969 /* Setup Tx Queue Bus Memory Interface. */ 3970 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET); 3971 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT); 3972 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON); 3973 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM); 3974 switch (sc->msk_hw_id) { 3975 case CHIP_ID_YUKON_EC_U: 3976 if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) { 3977 /* Fix for Yukon-EC Ultra: set BMU FIFO level */ 3978 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL), 3979 MSK_ECU_TXFF_LEV); 3980 } 3981 break; 3982 case CHIP_ID_YUKON_EX: 3983 /* 3984 * Yukon Extreme seems to have silicon bug for 3985 * automatic Tx checksum calculation capability. 3986 */ 3987 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0) 3988 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_F), 3989 F_TX_CHK_AUTO_OFF); 3990 break; 3991 } 3992 3993 /* Setup Rx Queue Bus Memory Interface. */ 3994 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET); 3995 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT); 3996 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON); 3997 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM); 3998 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U && 3999 sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) { 4000 /* MAC Rx RAM Read is controlled by hardware. */ 4001 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS); 4002 } 4003 4004 msk_set_prefetch(sc, sc_if->msk_txq, 4005 sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1); 4006 msk_init_tx_ring(sc_if); 4007 4008 /* Disable Rx checksum offload and RSS hash. */ 4009 reg = BMU_DIS_RX_RSS_HASH; 4010 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 && 4011 (ifp->if_capenable & IFCAP_RXCSUM) != 0) 4012 reg |= BMU_ENA_RX_CHKSUM; 4013 else 4014 reg |= BMU_DIS_RX_CHKSUM; 4015 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), reg); 4016 if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) { 4017 msk_set_prefetch(sc, sc_if->msk_rxq, 4018 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr, 4019 MSK_JUMBO_RX_RING_CNT - 1); 4020 error = msk_init_jumbo_rx_ring(sc_if); 4021 } else { 4022 msk_set_prefetch(sc, sc_if->msk_rxq, 4023 sc_if->msk_rdata.msk_rx_ring_paddr, 4024 MSK_RX_RING_CNT - 1); 4025 error = msk_init_rx_ring(sc_if); 4026 } 4027 if (error != 0) { 4028 device_printf(sc_if->msk_if_dev, 4029 "initialization failed: no memory for Rx buffers\n"); 4030 msk_stop(sc_if); 4031 return; 4032 } 4033 if (sc->msk_hw_id == CHIP_ID_YUKON_EX || 4034 sc->msk_hw_id == CHIP_ID_YUKON_SUPR) { 4035 /* Disable flushing of non-ASF packets. */ 4036 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 4037 GMF_RX_MACSEC_FLUSH_OFF); 4038 } 4039 4040 /* Configure interrupt handling. */ 4041 if (sc_if->msk_port == MSK_PORT_A) { 4042 sc->msk_intrmask |= Y2_IS_PORT_A; 4043 sc->msk_intrhwemask |= Y2_HWE_L1_MASK; 4044 } else { 4045 sc->msk_intrmask |= Y2_IS_PORT_B; 4046 sc->msk_intrhwemask |= Y2_HWE_L2_MASK; 4047 } 4048 /* Configure IRQ moderation mask. */ 4049 CSR_WRITE_4(sc, B2_IRQM_MSK, sc->msk_intrmask); 4050 if (sc->msk_int_holdoff > 0) { 4051 /* Configure initial IRQ moderation timer value. */ 4052 CSR_WRITE_4(sc, B2_IRQM_INI, 4053 MSK_USECS(sc, sc->msk_int_holdoff)); 4054 CSR_WRITE_4(sc, B2_IRQM_VAL, 4055 MSK_USECS(sc, sc->msk_int_holdoff)); 4056 /* Start IRQ moderation. */ 4057 CSR_WRITE_1(sc, B2_IRQM_CTRL, TIM_START); 4058 } 4059 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); 4060 CSR_READ_4(sc, B0_HWE_IMSK); 4061 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 4062 CSR_READ_4(sc, B0_IMSK); 4063 4064 ifp->if_drv_flags |= IFF_DRV_RUNNING; 4065 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4066 4067 sc_if->msk_flags &= ~MSK_FLAG_LINK; 4068 mii_mediachg(mii); 4069 4070 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); 4071 } 4072 4073 static void 4074 msk_set_rambuffer(struct msk_if_softc *sc_if) 4075 { 4076 struct msk_softc *sc; 4077 int ltpp, utpp; 4078 4079 sc = sc_if->msk_softc; 4080 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) 4081 return; 4082 4083 /* Setup Rx Queue. */ 4084 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR); 4085 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START), 4086 sc->msk_rxqstart[sc_if->msk_port] / 8); 4087 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END), 4088 sc->msk_rxqend[sc_if->msk_port] / 8); 4089 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP), 4090 sc->msk_rxqstart[sc_if->msk_port] / 8); 4091 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP), 4092 sc->msk_rxqstart[sc_if->msk_port] / 8); 4093 4094 utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - 4095 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8; 4096 ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - 4097 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8; 4098 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE) 4099 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8; 4100 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp); 4101 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp); 4102 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */ 4103 4104 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD); 4105 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL)); 4106 4107 /* Setup Tx Queue. */ 4108 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR); 4109 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START), 4110 sc->msk_txqstart[sc_if->msk_port] / 8); 4111 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END), 4112 sc->msk_txqend[sc_if->msk_port] / 8); 4113 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP), 4114 sc->msk_txqstart[sc_if->msk_port] / 8); 4115 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP), 4116 sc->msk_txqstart[sc_if->msk_port] / 8); 4117 /* Enable Store & Forward for Tx side. */ 4118 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD); 4119 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD); 4120 CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL)); 4121 } 4122 4123 static void 4124 msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr, 4125 uint32_t count) 4126 { 4127 4128 /* Reset the prefetch unit. */ 4129 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 4130 PREF_UNIT_RST_SET); 4131 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 4132 PREF_UNIT_RST_CLR); 4133 /* Set LE base address. */ 4134 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG), 4135 MSK_ADDR_LO(addr)); 4136 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG), 4137 MSK_ADDR_HI(addr)); 4138 /* Set the list last index. */ 4139 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG), 4140 count); 4141 /* Turn on prefetch unit. */ 4142 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 4143 PREF_UNIT_OP_ON); 4144 /* Dummy read to ensure write. */ 4145 CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG)); 4146 } 4147 4148 static void 4149 msk_stop(struct msk_if_softc *sc_if) 4150 { 4151 struct msk_softc *sc; 4152 struct msk_txdesc *txd; 4153 struct msk_rxdesc *rxd; 4154 struct msk_rxdesc *jrxd; 4155 struct ifnet *ifp; 4156 uint32_t val; 4157 int i; 4158 4159 MSK_IF_LOCK_ASSERT(sc_if); 4160 sc = sc_if->msk_softc; 4161 ifp = sc_if->msk_ifp; 4162 4163 callout_stop(&sc_if->msk_tick_ch); 4164 sc_if->msk_watchdog_timer = 0; 4165 4166 /* Disable interrupts. */ 4167 if (sc_if->msk_port == MSK_PORT_A) { 4168 sc->msk_intrmask &= ~Y2_IS_PORT_A; 4169 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK; 4170 } else { 4171 sc->msk_intrmask &= ~Y2_IS_PORT_B; 4172 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK; 4173 } 4174 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); 4175 CSR_READ_4(sc, B0_HWE_IMSK); 4176 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 4177 CSR_READ_4(sc, B0_IMSK); 4178 4179 /* Disable Tx/Rx MAC. */ 4180 val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 4181 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 4182 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val); 4183 /* Read again to ensure writing. */ 4184 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 4185 /* Update stats and clear counters. */ 4186 msk_stats_update(sc_if); 4187 4188 /* Stop Tx BMU. */ 4189 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP); 4190 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); 4191 for (i = 0; i < MSK_TIMEOUT; i++) { 4192 if ((val & (BMU_STOP | BMU_IDLE)) == 0) { 4193 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 4194 BMU_STOP); 4195 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); 4196 } else 4197 break; 4198 DELAY(1); 4199 } 4200 if (i == MSK_TIMEOUT) 4201 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n"); 4202 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), 4203 RB_RST_SET | RB_DIS_OP_MD); 4204 4205 /* Disable all GMAC interrupt. */ 4206 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0); 4207 /* Disable PHY interrupt. */ 4208 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); 4209 4210 /* Disable the RAM Interface Arbiter. */ 4211 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB); 4212 4213 /* Reset the PCI FIFO of the async Tx queue */ 4214 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 4215 BMU_RST_SET | BMU_FIFO_RST); 4216 4217 /* Reset the Tx prefetch units. */ 4218 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG), 4219 PREF_UNIT_RST_SET); 4220 4221 /* Reset the RAM Buffer async Tx queue. */ 4222 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET); 4223 4224 /* Reset Tx MAC FIFO. */ 4225 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); 4226 /* Set Pause Off. */ 4227 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF); 4228 4229 /* 4230 * The Rx Stop command will not work for Yukon-2 if the BMU does not 4231 * reach the end of packet and since we can't make sure that we have 4232 * incoming data, we must reset the BMU while it is not during a DMA 4233 * transfer. Since it is possible that the Rx path is still active, 4234 * the Rx RAM buffer will be stopped first, so any possible incoming 4235 * data will not trigger a DMA. After the RAM buffer is stopped, the 4236 * BMU is polled until any DMA in progress is ended and only then it 4237 * will be reset. 4238 */ 4239 4240 /* Disable the RAM Buffer receive queue. */ 4241 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD); 4242 for (i = 0; i < MSK_TIMEOUT; i++) { 4243 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) == 4244 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL))) 4245 break; 4246 DELAY(1); 4247 } 4248 if (i == MSK_TIMEOUT) 4249 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n"); 4250 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), 4251 BMU_RST_SET | BMU_FIFO_RST); 4252 /* Reset the Rx prefetch unit. */ 4253 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG), 4254 PREF_UNIT_RST_SET); 4255 /* Reset the RAM Buffer receive queue. */ 4256 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET); 4257 /* Reset Rx MAC FIFO. */ 4258 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); 4259 4260 /* Free Rx and Tx mbufs still in the queues. */ 4261 for (i = 0; i < MSK_RX_RING_CNT; i++) { 4262 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 4263 if (rxd->rx_m != NULL) { 4264 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, 4265 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 4266 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, 4267 rxd->rx_dmamap); 4268 m_freem(rxd->rx_m); 4269 rxd->rx_m = NULL; 4270 } 4271 } 4272 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 4273 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 4274 if (jrxd->rx_m != NULL) { 4275 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, 4276 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 4277 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, 4278 jrxd->rx_dmamap); 4279 m_freem(jrxd->rx_m); 4280 jrxd->rx_m = NULL; 4281 } 4282 } 4283 for (i = 0; i < MSK_TX_RING_CNT; i++) { 4284 txd = &sc_if->msk_cdata.msk_txdesc[i]; 4285 if (txd->tx_m != NULL) { 4286 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, 4287 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 4288 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, 4289 txd->tx_dmamap); 4290 m_freem(txd->tx_m); 4291 txd->tx_m = NULL; 4292 } 4293 } 4294 4295 /* 4296 * Mark the interface down. 4297 */ 4298 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 4299 sc_if->msk_flags &= ~MSK_FLAG_LINK; 4300 } 4301 4302 /* 4303 * When GM_PAR_MIB_CLR bit of GM_PHY_ADDR is set, reading lower 4304 * counter clears high 16 bits of the counter such that accessing 4305 * lower 16 bits should be the last operation. 4306 */ 4307 #define MSK_READ_MIB32(x, y) \ 4308 (((uint32_t)GMAC_READ_2(sc, x, (y) + 4)) << 16) + \ 4309 (uint32_t)GMAC_READ_2(sc, x, y) 4310 #define MSK_READ_MIB64(x, y) \ 4311 (((uint64_t)MSK_READ_MIB32(x, (y) + 8)) << 32) + \ 4312 (uint64_t)MSK_READ_MIB32(x, y) 4313 4314 static void 4315 msk_stats_clear(struct msk_if_softc *sc_if) 4316 { 4317 struct msk_softc *sc; 4318 uint32_t reg; 4319 uint16_t gmac; 4320 int i; 4321 4322 MSK_IF_LOCK_ASSERT(sc_if); 4323 4324 sc = sc_if->msk_softc; 4325 /* Set MIB Clear Counter Mode. */ 4326 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR); 4327 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR); 4328 /* Read all MIB Counters with Clear Mode set. */ 4329 for (i = GM_RXF_UC_OK; i <= GM_TXE_FIFO_UR; i += sizeof(uint32_t)) 4330 reg = MSK_READ_MIB32(sc_if->msk_port, i); 4331 /* Clear MIB Clear Counter Mode. */ 4332 gmac &= ~GM_PAR_MIB_CLR; 4333 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac); 4334 } 4335 4336 static void 4337 msk_stats_update(struct msk_if_softc *sc_if) 4338 { 4339 struct msk_softc *sc; 4340 struct ifnet *ifp; 4341 struct msk_hw_stats *stats; 4342 uint16_t gmac; 4343 uint32_t reg; 4344 4345 MSK_IF_LOCK_ASSERT(sc_if); 4346 4347 ifp = sc_if->msk_ifp; 4348 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 4349 return; 4350 sc = sc_if->msk_softc; 4351 stats = &sc_if->msk_stats; 4352 /* Set MIB Clear Counter Mode. */ 4353 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR); 4354 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR); 4355 4356 /* Rx stats. */ 4357 stats->rx_ucast_frames += 4358 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_UC_OK); 4359 stats->rx_bcast_frames += 4360 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_BC_OK); 4361 stats->rx_pause_frames += 4362 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MPAUSE); 4363 stats->rx_mcast_frames += 4364 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MC_OK); 4365 stats->rx_crc_errs += 4366 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_FCS_ERR); 4367 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE1); 4368 stats->rx_good_octets += 4369 MSK_READ_MIB64(sc_if->msk_port, GM_RXO_OK_LO); 4370 stats->rx_bad_octets += 4371 MSK_READ_MIB64(sc_if->msk_port, GM_RXO_ERR_LO); 4372 stats->rx_runts += 4373 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SHT); 4374 stats->rx_runt_errs += 4375 MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FRAG); 4376 stats->rx_pkts_64 += 4377 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_64B); 4378 stats->rx_pkts_65_127 += 4379 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_127B); 4380 stats->rx_pkts_128_255 += 4381 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_255B); 4382 stats->rx_pkts_256_511 += 4383 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_511B); 4384 stats->rx_pkts_512_1023 += 4385 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1023B); 4386 stats->rx_pkts_1024_1518 += 4387 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1518B); 4388 stats->rx_pkts_1519_max += 4389 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MAX_SZ); 4390 stats->rx_pkts_too_long += 4391 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_LNG_ERR); 4392 stats->rx_pkts_jabbers += 4393 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_JAB_PKT); 4394 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE2); 4395 stats->rx_fifo_oflows += 4396 MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FIFO_OV); 4397 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE3); 4398 4399 /* Tx stats. */ 4400 stats->tx_ucast_frames += 4401 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_UC_OK); 4402 stats->tx_bcast_frames += 4403 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_BC_OK); 4404 stats->tx_pause_frames += 4405 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MPAUSE); 4406 stats->tx_mcast_frames += 4407 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MC_OK); 4408 stats->tx_octets += 4409 MSK_READ_MIB64(sc_if->msk_port, GM_TXO_OK_LO); 4410 stats->tx_pkts_64 += 4411 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_64B); 4412 stats->tx_pkts_65_127 += 4413 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_127B); 4414 stats->tx_pkts_128_255 += 4415 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_255B); 4416 stats->tx_pkts_256_511 += 4417 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_511B); 4418 stats->tx_pkts_512_1023 += 4419 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1023B); 4420 stats->tx_pkts_1024_1518 += 4421 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1518B); 4422 stats->tx_pkts_1519_max += 4423 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MAX_SZ); 4424 reg = MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SPARE1); 4425 stats->tx_colls += 4426 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_COL); 4427 stats->tx_late_colls += 4428 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_LAT_COL); 4429 stats->tx_excess_colls += 4430 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_ABO_COL); 4431 stats->tx_multi_colls += 4432 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MUL_COL); 4433 stats->tx_single_colls += 4434 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SNG_COL); 4435 stats->tx_underflows += 4436 MSK_READ_MIB32(sc_if->msk_port, GM_TXE_FIFO_UR); 4437 /* Clear MIB Clear Counter Mode. */ 4438 gmac &= ~GM_PAR_MIB_CLR; 4439 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac); 4440 } 4441 4442 static int 4443 msk_sysctl_stat32(SYSCTL_HANDLER_ARGS) 4444 { 4445 struct msk_softc *sc; 4446 struct msk_if_softc *sc_if; 4447 uint32_t result, *stat; 4448 int off; 4449 4450 sc_if = (struct msk_if_softc *)arg1; 4451 sc = sc_if->msk_softc; 4452 off = arg2; 4453 stat = (uint32_t *)((uint8_t *)&sc_if->msk_stats + off); 4454 4455 MSK_IF_LOCK(sc_if); 4456 result = MSK_READ_MIB32(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2); 4457 result += *stat; 4458 MSK_IF_UNLOCK(sc_if); 4459 4460 return (sysctl_handle_int(oidp, &result, 0, req)); 4461 } 4462 4463 static int 4464 msk_sysctl_stat64(SYSCTL_HANDLER_ARGS) 4465 { 4466 struct msk_softc *sc; 4467 struct msk_if_softc *sc_if; 4468 uint64_t result, *stat; 4469 int off; 4470 4471 sc_if = (struct msk_if_softc *)arg1; 4472 sc = sc_if->msk_softc; 4473 off = arg2; 4474 stat = (uint64_t *)((uint8_t *)&sc_if->msk_stats + off); 4475 4476 MSK_IF_LOCK(sc_if); 4477 result = MSK_READ_MIB64(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2); 4478 result += *stat; 4479 MSK_IF_UNLOCK(sc_if); 4480 4481 return (sysctl_handle_64(oidp, &result, 0, req)); 4482 } 4483 4484 #undef MSK_READ_MIB32 4485 #undef MSK_READ_MIB64 4486 4487 #define MSK_SYSCTL_STAT32(sc, c, o, p, n, d) \ 4488 SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD, \ 4489 sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat32, \ 4490 "IU", d) 4491 #define MSK_SYSCTL_STAT64(sc, c, o, p, n, d) \ 4492 SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_U64 | CTLFLAG_RD, \ 4493 sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat64, \ 4494 "QU", d) 4495 4496 static void 4497 msk_sysctl_node(struct msk_if_softc *sc_if) 4498 { 4499 struct sysctl_ctx_list *ctx; 4500 struct sysctl_oid_list *child, *schild; 4501 struct sysctl_oid *tree; 4502 4503 ctx = device_get_sysctl_ctx(sc_if->msk_if_dev); 4504 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc_if->msk_if_dev)); 4505 4506 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 4507 NULL, "MSK Statistics"); 4508 schild = child = SYSCTL_CHILDREN(tree); 4509 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD, 4510 NULL, "MSK RX Statistics"); 4511 child = SYSCTL_CHILDREN(tree); 4512 MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames", 4513 child, rx_ucast_frames, "Good unicast frames"); 4514 MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames", 4515 child, rx_bcast_frames, "Good broadcast frames"); 4516 MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames", 4517 child, rx_pause_frames, "Pause frames"); 4518 MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames", 4519 child, rx_mcast_frames, "Multicast frames"); 4520 MSK_SYSCTL_STAT32(sc_if, ctx, "crc_errs", 4521 child, rx_crc_errs, "CRC errors"); 4522 MSK_SYSCTL_STAT64(sc_if, ctx, "good_octets", 4523 child, rx_good_octets, "Good octets"); 4524 MSK_SYSCTL_STAT64(sc_if, ctx, "bad_octets", 4525 child, rx_bad_octets, "Bad octets"); 4526 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64", 4527 child, rx_pkts_64, "64 bytes frames"); 4528 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127", 4529 child, rx_pkts_65_127, "65 to 127 bytes frames"); 4530 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255", 4531 child, rx_pkts_128_255, "128 to 255 bytes frames"); 4532 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511", 4533 child, rx_pkts_256_511, "256 to 511 bytes frames"); 4534 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023", 4535 child, rx_pkts_512_1023, "512 to 1023 bytes frames"); 4536 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518", 4537 child, rx_pkts_1024_1518, "1024 to 1518 bytes frames"); 4538 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max", 4539 child, rx_pkts_1519_max, "1519 to max frames"); 4540 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_too_long", 4541 child, rx_pkts_too_long, "frames too long"); 4542 MSK_SYSCTL_STAT32(sc_if, ctx, "jabbers", 4543 child, rx_pkts_jabbers, "Jabber errors"); 4544 MSK_SYSCTL_STAT32(sc_if, ctx, "overflows", 4545 child, rx_fifo_oflows, "FIFO overflows"); 4546 4547 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD, 4548 NULL, "MSK TX Statistics"); 4549 child = SYSCTL_CHILDREN(tree); 4550 MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames", 4551 child, tx_ucast_frames, "Unicast frames"); 4552 MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames", 4553 child, tx_bcast_frames, "Broadcast frames"); 4554 MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames", 4555 child, tx_pause_frames, "Pause frames"); 4556 MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames", 4557 child, tx_mcast_frames, "Multicast frames"); 4558 MSK_SYSCTL_STAT64(sc_if, ctx, "octets", 4559 child, tx_octets, "Octets"); 4560 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64", 4561 child, tx_pkts_64, "64 bytes frames"); 4562 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127", 4563 child, tx_pkts_65_127, "65 to 127 bytes frames"); 4564 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255", 4565 child, tx_pkts_128_255, "128 to 255 bytes frames"); 4566 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511", 4567 child, tx_pkts_256_511, "256 to 511 bytes frames"); 4568 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023", 4569 child, tx_pkts_512_1023, "512 to 1023 bytes frames"); 4570 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518", 4571 child, tx_pkts_1024_1518, "1024 to 1518 bytes frames"); 4572 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max", 4573 child, tx_pkts_1519_max, "1519 to max frames"); 4574 MSK_SYSCTL_STAT32(sc_if, ctx, "colls", 4575 child, tx_colls, "Collisions"); 4576 MSK_SYSCTL_STAT32(sc_if, ctx, "late_colls", 4577 child, tx_late_colls, "Late collisions"); 4578 MSK_SYSCTL_STAT32(sc_if, ctx, "excess_colls", 4579 child, tx_excess_colls, "Excessive collisions"); 4580 MSK_SYSCTL_STAT32(sc_if, ctx, "multi_colls", 4581 child, tx_multi_colls, "Multiple collisions"); 4582 MSK_SYSCTL_STAT32(sc_if, ctx, "single_colls", 4583 child, tx_single_colls, "Single collisions"); 4584 MSK_SYSCTL_STAT32(sc_if, ctx, "underflows", 4585 child, tx_underflows, "FIFO underflows"); 4586 } 4587 4588 #undef MSK_SYSCTL_STAT32 4589 #undef MSK_SYSCTL_STAT64 4590 4591 static int 4592 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 4593 { 4594 int error, value; 4595 4596 if (!arg1) 4597 return (EINVAL); 4598 value = *(int *)arg1; 4599 error = sysctl_handle_int(oidp, &value, 0, req); 4600 if (error || !req->newptr) 4601 return (error); 4602 if (value < low || value > high) 4603 return (EINVAL); 4604 *(int *)arg1 = value; 4605 4606 return (0); 4607 } 4608 4609 static int 4610 sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS) 4611 { 4612 4613 return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN, 4614 MSK_PROC_MAX)); 4615 } 4616