1 /****************************************************************************** 2 * 3 * Name : sky2.c 4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x 5 * Version: $Revision: 1.23 $ 6 * Date : $Date: 2005/12/22 09:04:11 $ 7 * Purpose: Main driver source file 8 * 9 *****************************************************************************/ 10 11 /****************************************************************************** 12 * 13 * LICENSE: 14 * Copyright (C) Marvell International Ltd. and/or its affiliates 15 * 16 * The computer program files contained in this folder ("Files") 17 * are provided to you under the BSD-type license terms provided 18 * below, and any use of such Files and any derivative works 19 * thereof created by you shall be governed by the following terms 20 * and conditions: 21 * 22 * - Redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer. 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials provided 27 * with the distribution. 28 * - Neither the name of Marvell nor the names of its contributors 29 * may be used to endorse or promote products derived from this 30 * software without specific prior written permission. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 43 * OF THE POSSIBILITY OF SUCH DAMAGE. 44 * /LICENSE 45 * 46 *****************************************************************************/ 47 48 /*- 49 * Copyright (c) 1997, 1998, 1999, 2000 50 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 51 * 52 * Redistribution and use in source and binary forms, with or without 53 * modification, are permitted provided that the following conditions 54 * are met: 55 * 1. Redistributions of source code must retain the above copyright 56 * notice, this list of conditions and the following disclaimer. 57 * 2. Redistributions in binary form must reproduce the above copyright 58 * notice, this list of conditions and the following disclaimer in the 59 * documentation and/or other materials provided with the distribution. 60 * 3. All advertising materials mentioning features or use of this software 61 * must display the following acknowledgement: 62 * This product includes software developed by Bill Paul. 63 * 4. Neither the name of the author nor the names of any co-contributors 64 * may be used to endorse or promote products derived from this software 65 * without specific prior written permission. 66 * 67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 70 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 77 * THE POSSIBILITY OF SUCH DAMAGE. 78 */ 79 /*- 80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 81 * 82 * Permission to use, copy, modify, and distribute this software for any 83 * purpose with or without fee is hereby granted, provided that the above 84 * copyright notice and this permission notice appear in all copies. 85 * 86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 93 */ 94 95 /* 96 * Device driver for the Marvell Yukon II Ethernet controller. 97 * Due to lack of documentation, this driver is based on the code from 98 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x. 99 */ 100 101 #include <sys/cdefs.h> 102 __FBSDID("$FreeBSD$"); 103 104 #include <sys/param.h> 105 #include <sys/systm.h> 106 #include <sys/bus.h> 107 #include <sys/endian.h> 108 #include <sys/mbuf.h> 109 #include <sys/malloc.h> 110 #include <sys/kernel.h> 111 #include <sys/module.h> 112 #include <sys/socket.h> 113 #include <sys/sockio.h> 114 #include <sys/queue.h> 115 #include <sys/sysctl.h> 116 #include <sys/taskqueue.h> 117 118 #include <net/bpf.h> 119 #include <net/ethernet.h> 120 #include <net/if.h> 121 #include <net/if_arp.h> 122 #include <net/if_dl.h> 123 #include <net/if_media.h> 124 #include <net/if_types.h> 125 #include <net/if_vlan_var.h> 126 127 #include <netinet/in.h> 128 #include <netinet/in_systm.h> 129 #include <netinet/ip.h> 130 #include <netinet/tcp.h> 131 #include <netinet/udp.h> 132 133 #include <machine/bus.h> 134 #include <machine/in_cksum.h> 135 #include <machine/resource.h> 136 #include <sys/rman.h> 137 138 #include <dev/mii/mii.h> 139 #include <dev/mii/miivar.h> 140 141 #include <dev/pci/pcireg.h> 142 #include <dev/pci/pcivar.h> 143 144 #include <dev/msk/if_mskreg.h> 145 146 MODULE_DEPEND(msk, pci, 1, 1, 1); 147 MODULE_DEPEND(msk, ether, 1, 1, 1); 148 MODULE_DEPEND(msk, miibus, 1, 1, 1); 149 150 /* "device miibus" required. See GENERIC if you get errors here. */ 151 #include "miibus_if.h" 152 153 /* Tunables. */ 154 static int msi_disable = 0; 155 TUNABLE_INT("hw.msk.msi_disable", &msi_disable); 156 static int legacy_intr = 0; 157 TUNABLE_INT("hw.msk.legacy_intr", &legacy_intr); 158 static int jumbo_disable = 0; 159 TUNABLE_INT("hw.msk.jumbo_disable", &jumbo_disable); 160 161 #define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 162 163 /* 164 * Devices supported by this driver. 165 */ 166 static struct msk_product { 167 uint16_t msk_vendorid; 168 uint16_t msk_deviceid; 169 const char *msk_name; 170 } msk_products[] = { 171 { VENDORID_SK, DEVICEID_SK_YUKON2, 172 "SK-9Sxx Gigabit Ethernet" }, 173 { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR, 174 "SK-9Exx Gigabit Ethernet"}, 175 { VENDORID_MARVELL, DEVICEID_MRVL_8021CU, 176 "Marvell Yukon 88E8021CU Gigabit Ethernet" }, 177 { VENDORID_MARVELL, DEVICEID_MRVL_8021X, 178 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" }, 179 { VENDORID_MARVELL, DEVICEID_MRVL_8022CU, 180 "Marvell Yukon 88E8022CU Gigabit Ethernet" }, 181 { VENDORID_MARVELL, DEVICEID_MRVL_8022X, 182 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" }, 183 { VENDORID_MARVELL, DEVICEID_MRVL_8061CU, 184 "Marvell Yukon 88E8061CU Gigabit Ethernet" }, 185 { VENDORID_MARVELL, DEVICEID_MRVL_8061X, 186 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" }, 187 { VENDORID_MARVELL, DEVICEID_MRVL_8062CU, 188 "Marvell Yukon 88E8062CU Gigabit Ethernet" }, 189 { VENDORID_MARVELL, DEVICEID_MRVL_8062X, 190 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" }, 191 { VENDORID_MARVELL, DEVICEID_MRVL_8035, 192 "Marvell Yukon 88E8035 Fast Ethernet" }, 193 { VENDORID_MARVELL, DEVICEID_MRVL_8036, 194 "Marvell Yukon 88E8036 Fast Ethernet" }, 195 { VENDORID_MARVELL, DEVICEID_MRVL_8038, 196 "Marvell Yukon 88E8038 Fast Ethernet" }, 197 { VENDORID_MARVELL, DEVICEID_MRVL_8039, 198 "Marvell Yukon 88E8039 Fast Ethernet" }, 199 { VENDORID_MARVELL, DEVICEID_MRVL_8040, 200 "Marvell Yukon 88E8040 Fast Ethernet" }, 201 { VENDORID_MARVELL, DEVICEID_MRVL_8040T, 202 "Marvell Yukon 88E8040T Fast Ethernet" }, 203 { VENDORID_MARVELL, DEVICEID_MRVL_8042, 204 "Marvell Yukon 88E8042 Fast Ethernet" }, 205 { VENDORID_MARVELL, DEVICEID_MRVL_8048, 206 "Marvell Yukon 88E8048 Fast Ethernet" }, 207 { VENDORID_MARVELL, DEVICEID_MRVL_4361, 208 "Marvell Yukon 88E8050 Gigabit Ethernet" }, 209 { VENDORID_MARVELL, DEVICEID_MRVL_4360, 210 "Marvell Yukon 88E8052 Gigabit Ethernet" }, 211 { VENDORID_MARVELL, DEVICEID_MRVL_4362, 212 "Marvell Yukon 88E8053 Gigabit Ethernet" }, 213 { VENDORID_MARVELL, DEVICEID_MRVL_4363, 214 "Marvell Yukon 88E8055 Gigabit Ethernet" }, 215 { VENDORID_MARVELL, DEVICEID_MRVL_4364, 216 "Marvell Yukon 88E8056 Gigabit Ethernet" }, 217 { VENDORID_MARVELL, DEVICEID_MRVL_4365, 218 "Marvell Yukon 88E8070 Gigabit Ethernet" }, 219 { VENDORID_MARVELL, DEVICEID_MRVL_436A, 220 "Marvell Yukon 88E8058 Gigabit Ethernet" }, 221 { VENDORID_MARVELL, DEVICEID_MRVL_436B, 222 "Marvell Yukon 88E8071 Gigabit Ethernet" }, 223 { VENDORID_MARVELL, DEVICEID_MRVL_436C, 224 "Marvell Yukon 88E8072 Gigabit Ethernet" }, 225 { VENDORID_MARVELL, DEVICEID_MRVL_4380, 226 "Marvell Yukon 88E8057 Gigabit Ethernet" }, 227 { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX, 228 "D-Link 550SX Gigabit Ethernet" }, 229 { VENDORID_DLINK, DEVICEID_DLINK_DGE560SX, 230 "D-Link 560SX Gigabit Ethernet" }, 231 { VENDORID_DLINK, DEVICEID_DLINK_DGE560T, 232 "D-Link 560T Gigabit Ethernet" } 233 }; 234 235 static const char *model_name[] = { 236 "Yukon XL", 237 "Yukon EC Ultra", 238 "Yukon EX", 239 "Yukon EC", 240 "Yukon FE", 241 "Yukon FE+", 242 "Yukon Supreme", 243 "Yukon Ultra 2" 244 }; 245 246 static int mskc_probe(device_t); 247 static int mskc_attach(device_t); 248 static int mskc_detach(device_t); 249 static int mskc_shutdown(device_t); 250 static int mskc_setup_rambuffer(struct msk_softc *); 251 static int mskc_suspend(device_t); 252 static int mskc_resume(device_t); 253 static void mskc_reset(struct msk_softc *); 254 255 static int msk_probe(device_t); 256 static int msk_attach(device_t); 257 static int msk_detach(device_t); 258 259 static void msk_tick(void *); 260 static void msk_legacy_intr(void *); 261 static int msk_intr(void *); 262 static void msk_int_task(void *, int); 263 static void msk_intr_phy(struct msk_if_softc *); 264 static void msk_intr_gmac(struct msk_if_softc *); 265 static __inline void msk_rxput(struct msk_if_softc *); 266 static int msk_handle_events(struct msk_softc *); 267 static void msk_handle_hwerr(struct msk_if_softc *, uint32_t); 268 static void msk_intr_hwerr(struct msk_softc *); 269 #ifndef __NO_STRICT_ALIGNMENT 270 static __inline void msk_fixup_rx(struct mbuf *); 271 #endif 272 static void msk_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int); 273 static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, uint32_t, int); 274 static void msk_txeof(struct msk_if_softc *, int); 275 static int msk_encap(struct msk_if_softc *, struct mbuf **); 276 static void msk_tx_task(void *, int); 277 static void msk_start(struct ifnet *); 278 static int msk_ioctl(struct ifnet *, u_long, caddr_t); 279 static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t); 280 static void msk_set_rambuffer(struct msk_if_softc *); 281 static void msk_set_tx_stfwd(struct msk_if_softc *); 282 static void msk_init(void *); 283 static void msk_init_locked(struct msk_if_softc *); 284 static void msk_stop(struct msk_if_softc *); 285 static void msk_watchdog(struct msk_if_softc *); 286 static int msk_mediachange(struct ifnet *); 287 static void msk_mediastatus(struct ifnet *, struct ifmediareq *); 288 static void msk_phy_power(struct msk_softc *, int); 289 static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int); 290 static int msk_status_dma_alloc(struct msk_softc *); 291 static void msk_status_dma_free(struct msk_softc *); 292 static int msk_txrx_dma_alloc(struct msk_if_softc *); 293 static int msk_rx_dma_jalloc(struct msk_if_softc *); 294 static void msk_txrx_dma_free(struct msk_if_softc *); 295 static void msk_rx_dma_jfree(struct msk_if_softc *); 296 static int msk_init_rx_ring(struct msk_if_softc *); 297 static int msk_init_jumbo_rx_ring(struct msk_if_softc *); 298 static void msk_init_tx_ring(struct msk_if_softc *); 299 static __inline void msk_discard_rxbuf(struct msk_if_softc *, int); 300 static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int); 301 static int msk_newbuf(struct msk_if_softc *, int); 302 static int msk_jumbo_newbuf(struct msk_if_softc *, int); 303 304 static int msk_phy_readreg(struct msk_if_softc *, int, int); 305 static int msk_phy_writereg(struct msk_if_softc *, int, int, int); 306 static int msk_miibus_readreg(device_t, int, int); 307 static int msk_miibus_writereg(device_t, int, int, int); 308 static void msk_miibus_statchg(device_t); 309 310 static void msk_rxfilter(struct msk_if_softc *); 311 static void msk_setvlan(struct msk_if_softc *, struct ifnet *); 312 313 static void msk_stats_clear(struct msk_if_softc *); 314 static void msk_stats_update(struct msk_if_softc *); 315 static int msk_sysctl_stat32(SYSCTL_HANDLER_ARGS); 316 static int msk_sysctl_stat64(SYSCTL_HANDLER_ARGS); 317 static void msk_sysctl_node(struct msk_if_softc *); 318 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 319 static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS); 320 321 static device_method_t mskc_methods[] = { 322 /* Device interface */ 323 DEVMETHOD(device_probe, mskc_probe), 324 DEVMETHOD(device_attach, mskc_attach), 325 DEVMETHOD(device_detach, mskc_detach), 326 DEVMETHOD(device_suspend, mskc_suspend), 327 DEVMETHOD(device_resume, mskc_resume), 328 DEVMETHOD(device_shutdown, mskc_shutdown), 329 330 /* bus interface */ 331 DEVMETHOD(bus_print_child, bus_generic_print_child), 332 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 333 334 { NULL, NULL } 335 }; 336 337 static driver_t mskc_driver = { 338 "mskc", 339 mskc_methods, 340 sizeof(struct msk_softc) 341 }; 342 343 static devclass_t mskc_devclass; 344 345 static device_method_t msk_methods[] = { 346 /* Device interface */ 347 DEVMETHOD(device_probe, msk_probe), 348 DEVMETHOD(device_attach, msk_attach), 349 DEVMETHOD(device_detach, msk_detach), 350 DEVMETHOD(device_shutdown, bus_generic_shutdown), 351 352 /* bus interface */ 353 DEVMETHOD(bus_print_child, bus_generic_print_child), 354 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 355 356 /* MII interface */ 357 DEVMETHOD(miibus_readreg, msk_miibus_readreg), 358 DEVMETHOD(miibus_writereg, msk_miibus_writereg), 359 DEVMETHOD(miibus_statchg, msk_miibus_statchg), 360 361 { NULL, NULL } 362 }; 363 364 static driver_t msk_driver = { 365 "msk", 366 msk_methods, 367 sizeof(struct msk_if_softc) 368 }; 369 370 static devclass_t msk_devclass; 371 372 DRIVER_MODULE(mskc, pci, mskc_driver, mskc_devclass, 0, 0); 373 DRIVER_MODULE(msk, mskc, msk_driver, msk_devclass, 0, 0); 374 DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, 0, 0); 375 376 static struct resource_spec msk_res_spec_io[] = { 377 { SYS_RES_IOPORT, PCIR_BAR(1), RF_ACTIVE }, 378 { -1, 0, 0 } 379 }; 380 381 static struct resource_spec msk_res_spec_mem[] = { 382 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, 383 { -1, 0, 0 } 384 }; 385 386 static struct resource_spec msk_irq_spec_legacy[] = { 387 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 388 { -1, 0, 0 } 389 }; 390 391 static struct resource_spec msk_irq_spec_msi[] = { 392 { SYS_RES_IRQ, 1, RF_ACTIVE }, 393 { -1, 0, 0 } 394 }; 395 396 static int 397 msk_miibus_readreg(device_t dev, int phy, int reg) 398 { 399 struct msk_if_softc *sc_if; 400 401 if (phy != PHY_ADDR_MARV) 402 return (0); 403 404 sc_if = device_get_softc(dev); 405 406 return (msk_phy_readreg(sc_if, phy, reg)); 407 } 408 409 static int 410 msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg) 411 { 412 struct msk_softc *sc; 413 int i, val; 414 415 sc = sc_if->msk_softc; 416 417 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL, 418 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); 419 420 for (i = 0; i < MSK_TIMEOUT; i++) { 421 DELAY(1); 422 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL); 423 if ((val & GM_SMI_CT_RD_VAL) != 0) { 424 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA); 425 break; 426 } 427 } 428 429 if (i == MSK_TIMEOUT) { 430 if_printf(sc_if->msk_ifp, "phy failed to come ready\n"); 431 val = 0; 432 } 433 434 return (val); 435 } 436 437 static int 438 msk_miibus_writereg(device_t dev, int phy, int reg, int val) 439 { 440 struct msk_if_softc *sc_if; 441 442 if (phy != PHY_ADDR_MARV) 443 return (0); 444 445 sc_if = device_get_softc(dev); 446 447 return (msk_phy_writereg(sc_if, phy, reg, val)); 448 } 449 450 static int 451 msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val) 452 { 453 struct msk_softc *sc; 454 int i; 455 456 sc = sc_if->msk_softc; 457 458 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val); 459 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL, 460 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg)); 461 for (i = 0; i < MSK_TIMEOUT; i++) { 462 DELAY(1); 463 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) & 464 GM_SMI_CT_BUSY) == 0) 465 break; 466 } 467 if (i == MSK_TIMEOUT) 468 if_printf(sc_if->msk_ifp, "phy write timeout\n"); 469 470 return (0); 471 } 472 473 static void 474 msk_miibus_statchg(device_t dev) 475 { 476 struct msk_softc *sc; 477 struct msk_if_softc *sc_if; 478 struct mii_data *mii; 479 struct ifnet *ifp; 480 uint32_t gmac; 481 482 sc_if = device_get_softc(dev); 483 sc = sc_if->msk_softc; 484 485 MSK_IF_LOCK_ASSERT(sc_if); 486 487 mii = device_get_softc(sc_if->msk_miibus); 488 ifp = sc_if->msk_ifp; 489 if (mii == NULL || ifp == NULL || 490 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 491 return; 492 493 sc_if->msk_flags &= ~MSK_FLAG_LINK; 494 if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) == 495 (IFM_AVALID | IFM_ACTIVE)) { 496 switch (IFM_SUBTYPE(mii->mii_media_active)) { 497 case IFM_10_T: 498 case IFM_100_TX: 499 sc_if->msk_flags |= MSK_FLAG_LINK; 500 break; 501 case IFM_1000_T: 502 case IFM_1000_SX: 503 case IFM_1000_LX: 504 case IFM_1000_CX: 505 if ((sc_if->msk_flags & MSK_FLAG_FASTETHER) == 0) 506 sc_if->msk_flags |= MSK_FLAG_LINK; 507 break; 508 default: 509 break; 510 } 511 } 512 513 if ((sc_if->msk_flags & MSK_FLAG_LINK) != 0) { 514 /* Enable Tx FIFO Underrun. */ 515 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 516 GM_IS_TX_FF_UR | GM_IS_RX_FF_OR); 517 /* 518 * Because mii(4) notify msk(4) that it detected link status 519 * change, there is no need to enable automatic 520 * speed/flow-control/duplex updates. 521 */ 522 gmac = GM_GPCR_AU_ALL_DIS; 523 switch (IFM_SUBTYPE(mii->mii_media_active)) { 524 case IFM_1000_SX: 525 case IFM_1000_T: 526 gmac |= GM_GPCR_SPEED_1000; 527 break; 528 case IFM_100_TX: 529 gmac |= GM_GPCR_SPEED_100; 530 break; 531 case IFM_10_T: 532 break; 533 } 534 535 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0) 536 gmac |= GM_GPCR_DUP_FULL; 537 /* Disable Rx flow control. */ 538 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) == 0) 539 gmac |= GM_GPCR_FC_RX_DIS; 540 /* Disable Tx flow control. */ 541 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) == 0) 542 gmac |= GM_GPCR_FC_TX_DIS; 543 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; 544 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 545 /* Read again to ensure writing. */ 546 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 547 548 gmac = GMC_PAUSE_ON; 549 if (((mii->mii_media_active & IFM_GMASK) & 550 (IFM_FLAG0 | IFM_FLAG1)) == 0) 551 gmac = GMC_PAUSE_OFF; 552 /* Diable pause for 10/100 Mbps in half-duplex mode. */ 553 if ((((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) && 554 (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX || 555 IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T)) 556 gmac = GMC_PAUSE_OFF; 557 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac); 558 559 /* Enable PHY interrupt for FIFO underrun/overflow. */ 560 msk_phy_writereg(sc_if, PHY_ADDR_MARV, 561 PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR); 562 } else { 563 /* 564 * Link state changed to down. 565 * Disable PHY interrupts. 566 */ 567 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); 568 /* Disable Rx/Tx MAC. */ 569 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 570 if ((GM_GPCR_RX_ENA | GM_GPCR_TX_ENA) != 0) { 571 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 572 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 573 /* Read again to ensure writing. */ 574 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 575 } 576 } 577 } 578 579 static void 580 msk_rxfilter(struct msk_if_softc *sc_if) 581 { 582 struct msk_softc *sc; 583 struct ifnet *ifp; 584 struct ifmultiaddr *ifma; 585 uint32_t mchash[2]; 586 uint32_t crc; 587 uint16_t mode; 588 589 sc = sc_if->msk_softc; 590 591 MSK_IF_LOCK_ASSERT(sc_if); 592 593 ifp = sc_if->msk_ifp; 594 595 bzero(mchash, sizeof(mchash)); 596 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL); 597 if ((ifp->if_flags & IFF_PROMISC) != 0) 598 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 599 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 600 mode |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA; 601 mchash[0] = 0xffff; 602 mchash[1] = 0xffff; 603 } else { 604 mode |= GM_RXCR_UCF_ENA; 605 if_maddr_rlock(ifp); 606 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 607 if (ifma->ifma_addr->sa_family != AF_LINK) 608 continue; 609 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 610 ifma->ifma_addr), ETHER_ADDR_LEN); 611 /* Just want the 6 least significant bits. */ 612 crc &= 0x3f; 613 /* Set the corresponding bit in the hash table. */ 614 mchash[crc >> 5] |= 1 << (crc & 0x1f); 615 } 616 if_maddr_runlock(ifp); 617 if (mchash[0] != 0 || mchash[1] != 0) 618 mode |= GM_RXCR_MCF_ENA; 619 } 620 621 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1, 622 mchash[0] & 0xffff); 623 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2, 624 (mchash[0] >> 16) & 0xffff); 625 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3, 626 mchash[1] & 0xffff); 627 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4, 628 (mchash[1] >> 16) & 0xffff); 629 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode); 630 } 631 632 static void 633 msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp) 634 { 635 struct msk_softc *sc; 636 637 sc = sc_if->msk_softc; 638 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 639 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 640 RX_VLAN_STRIP_ON); 641 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 642 TX_VLAN_TAG_ON); 643 } else { 644 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 645 RX_VLAN_STRIP_OFF); 646 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 647 TX_VLAN_TAG_OFF); 648 } 649 } 650 651 static int 652 msk_init_rx_ring(struct msk_if_softc *sc_if) 653 { 654 struct msk_ring_data *rd; 655 struct msk_rxdesc *rxd; 656 int i, prod; 657 658 MSK_IF_LOCK_ASSERT(sc_if); 659 660 sc_if->msk_cdata.msk_rx_cons = 0; 661 sc_if->msk_cdata.msk_rx_prod = 0; 662 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; 663 664 rd = &sc_if->msk_rdata; 665 bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT); 666 prod = sc_if->msk_cdata.msk_rx_prod; 667 for (i = 0; i < MSK_RX_RING_CNT; i++) { 668 rxd = &sc_if->msk_cdata.msk_rxdesc[prod]; 669 rxd->rx_m = NULL; 670 rxd->rx_le = &rd->msk_rx_ring[prod]; 671 if (msk_newbuf(sc_if, prod) != 0) 672 return (ENOBUFS); 673 MSK_INC(prod, MSK_RX_RING_CNT); 674 } 675 676 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag, 677 sc_if->msk_cdata.msk_rx_ring_map, 678 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 679 680 /* Update prefetch unit. */ 681 sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1; 682 CSR_WRITE_2(sc_if->msk_softc, 683 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 684 sc_if->msk_cdata.msk_rx_prod); 685 686 return (0); 687 } 688 689 static int 690 msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if) 691 { 692 struct msk_ring_data *rd; 693 struct msk_rxdesc *rxd; 694 int i, prod; 695 696 MSK_IF_LOCK_ASSERT(sc_if); 697 698 sc_if->msk_cdata.msk_rx_cons = 0; 699 sc_if->msk_cdata.msk_rx_prod = 0; 700 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; 701 702 rd = &sc_if->msk_rdata; 703 bzero(rd->msk_jumbo_rx_ring, 704 sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT); 705 prod = sc_if->msk_cdata.msk_rx_prod; 706 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 707 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod]; 708 rxd->rx_m = NULL; 709 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod]; 710 if (msk_jumbo_newbuf(sc_if, prod) != 0) 711 return (ENOBUFS); 712 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT); 713 } 714 715 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 716 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 717 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 718 719 sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1; 720 CSR_WRITE_2(sc_if->msk_softc, 721 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 722 sc_if->msk_cdata.msk_rx_prod); 723 724 return (0); 725 } 726 727 static void 728 msk_init_tx_ring(struct msk_if_softc *sc_if) 729 { 730 struct msk_ring_data *rd; 731 struct msk_txdesc *txd; 732 int i; 733 734 sc_if->msk_cdata.msk_tso_mtu = 0; 735 sc_if->msk_cdata.msk_last_csum = 0; 736 sc_if->msk_cdata.msk_tx_prod = 0; 737 sc_if->msk_cdata.msk_tx_cons = 0; 738 sc_if->msk_cdata.msk_tx_cnt = 0; 739 740 rd = &sc_if->msk_rdata; 741 bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT); 742 for (i = 0; i < MSK_TX_RING_CNT; i++) { 743 txd = &sc_if->msk_cdata.msk_txdesc[i]; 744 txd->tx_m = NULL; 745 txd->tx_le = &rd->msk_tx_ring[i]; 746 } 747 748 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 749 sc_if->msk_cdata.msk_tx_ring_map, 750 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 751 } 752 753 static __inline void 754 msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx) 755 { 756 struct msk_rx_desc *rx_le; 757 struct msk_rxdesc *rxd; 758 struct mbuf *m; 759 760 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 761 m = rxd->rx_m; 762 rx_le = rxd->rx_le; 763 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); 764 } 765 766 static __inline void 767 msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx) 768 { 769 struct msk_rx_desc *rx_le; 770 struct msk_rxdesc *rxd; 771 struct mbuf *m; 772 773 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 774 m = rxd->rx_m; 775 rx_le = rxd->rx_le; 776 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); 777 } 778 779 static int 780 msk_newbuf(struct msk_if_softc *sc_if, int idx) 781 { 782 struct msk_rx_desc *rx_le; 783 struct msk_rxdesc *rxd; 784 struct mbuf *m; 785 bus_dma_segment_t segs[1]; 786 bus_dmamap_t map; 787 int nsegs; 788 789 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 790 if (m == NULL) 791 return (ENOBUFS); 792 793 m->m_len = m->m_pkthdr.len = MCLBYTES; 794 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) 795 m_adj(m, ETHER_ALIGN); 796 #ifndef __NO_STRICT_ALIGNMENT 797 else 798 m_adj(m, MSK_RX_BUF_ALIGN); 799 #endif 800 801 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag, 802 sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs, 803 BUS_DMA_NOWAIT) != 0) { 804 m_freem(m); 805 return (ENOBUFS); 806 } 807 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 808 809 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 810 if (rxd->rx_m != NULL) { 811 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap, 812 BUS_DMASYNC_POSTREAD); 813 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap); 814 } 815 map = rxd->rx_dmamap; 816 rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap; 817 sc_if->msk_cdata.msk_rx_sparemap = map; 818 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap, 819 BUS_DMASYNC_PREREAD); 820 rxd->rx_m = m; 821 rx_le = rxd->rx_le; 822 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr)); 823 rx_le->msk_control = 824 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER); 825 826 return (0); 827 } 828 829 static int 830 msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx) 831 { 832 struct msk_rx_desc *rx_le; 833 struct msk_rxdesc *rxd; 834 struct mbuf *m; 835 bus_dma_segment_t segs[1]; 836 bus_dmamap_t map; 837 int nsegs; 838 839 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); 840 if (m == NULL) 841 return (ENOBUFS); 842 if ((m->m_flags & M_EXT) == 0) { 843 m_freem(m); 844 return (ENOBUFS); 845 } 846 m->m_len = m->m_pkthdr.len = MJUM9BYTES; 847 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) 848 m_adj(m, ETHER_ALIGN); 849 #ifndef __NO_STRICT_ALIGNMENT 850 else 851 m_adj(m, MSK_RX_BUF_ALIGN); 852 #endif 853 854 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag, 855 sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs, 856 BUS_DMA_NOWAIT) != 0) { 857 m_freem(m); 858 return (ENOBUFS); 859 } 860 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 861 862 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 863 if (rxd->rx_m != NULL) { 864 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, 865 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 866 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, 867 rxd->rx_dmamap); 868 } 869 map = rxd->rx_dmamap; 870 rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap; 871 sc_if->msk_cdata.msk_jumbo_rx_sparemap = map; 872 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap, 873 BUS_DMASYNC_PREREAD); 874 rxd->rx_m = m; 875 rx_le = rxd->rx_le; 876 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr)); 877 rx_le->msk_control = 878 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER); 879 880 return (0); 881 } 882 883 /* 884 * Set media options. 885 */ 886 static int 887 msk_mediachange(struct ifnet *ifp) 888 { 889 struct msk_if_softc *sc_if; 890 struct mii_data *mii; 891 int error; 892 893 sc_if = ifp->if_softc; 894 895 MSK_IF_LOCK(sc_if); 896 mii = device_get_softc(sc_if->msk_miibus); 897 error = mii_mediachg(mii); 898 MSK_IF_UNLOCK(sc_if); 899 900 return (error); 901 } 902 903 /* 904 * Report current media status. 905 */ 906 static void 907 msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 908 { 909 struct msk_if_softc *sc_if; 910 struct mii_data *mii; 911 912 sc_if = ifp->if_softc; 913 MSK_IF_LOCK(sc_if); 914 if ((ifp->if_flags & IFF_UP) == 0) { 915 MSK_IF_UNLOCK(sc_if); 916 return; 917 } 918 mii = device_get_softc(sc_if->msk_miibus); 919 920 mii_pollstat(mii); 921 MSK_IF_UNLOCK(sc_if); 922 ifmr->ifm_active = mii->mii_media_active; 923 ifmr->ifm_status = mii->mii_media_status; 924 } 925 926 static int 927 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 928 { 929 struct msk_if_softc *sc_if; 930 struct ifreq *ifr; 931 struct mii_data *mii; 932 int error, mask; 933 934 sc_if = ifp->if_softc; 935 ifr = (struct ifreq *)data; 936 error = 0; 937 938 switch(command) { 939 case SIOCSIFMTU: 940 MSK_IF_LOCK(sc_if); 941 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN) 942 error = EINVAL; 943 else if (ifp->if_mtu != ifr->ifr_mtu) { 944 if (ifr->ifr_mtu > ETHERMTU) { 945 if ((sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) { 946 error = EINVAL; 947 MSK_IF_UNLOCK(sc_if); 948 break; 949 } 950 if ((sc_if->msk_flags & 951 MSK_FLAG_JUMBO_NOCSUM) != 0) { 952 ifp->if_hwassist &= 953 ~(MSK_CSUM_FEATURES | CSUM_TSO); 954 ifp->if_capenable &= 955 ~(IFCAP_TSO4 | IFCAP_TXCSUM); 956 VLAN_CAPABILITIES(ifp); 957 } 958 } 959 ifp->if_mtu = ifr->ifr_mtu; 960 msk_init_locked(sc_if); 961 } 962 MSK_IF_UNLOCK(sc_if); 963 break; 964 case SIOCSIFFLAGS: 965 MSK_IF_LOCK(sc_if); 966 if ((ifp->if_flags & IFF_UP) != 0) { 967 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 968 ((ifp->if_flags ^ sc_if->msk_if_flags) & 969 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 970 msk_rxfilter(sc_if); 971 else if ((sc_if->msk_flags & MSK_FLAG_DETACH) == 0) 972 msk_init_locked(sc_if); 973 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 974 msk_stop(sc_if); 975 sc_if->msk_if_flags = ifp->if_flags; 976 MSK_IF_UNLOCK(sc_if); 977 break; 978 case SIOCADDMULTI: 979 case SIOCDELMULTI: 980 MSK_IF_LOCK(sc_if); 981 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 982 msk_rxfilter(sc_if); 983 MSK_IF_UNLOCK(sc_if); 984 break; 985 case SIOCGIFMEDIA: 986 case SIOCSIFMEDIA: 987 mii = device_get_softc(sc_if->msk_miibus); 988 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 989 break; 990 case SIOCSIFCAP: 991 MSK_IF_LOCK(sc_if); 992 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 993 if ((mask & IFCAP_TXCSUM) != 0 && 994 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) { 995 ifp->if_capenable ^= IFCAP_TXCSUM; 996 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) 997 ifp->if_hwassist |= MSK_CSUM_FEATURES; 998 else 999 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 1000 } 1001 if ((mask & IFCAP_RXCSUM) != 0 && 1002 (IFCAP_RXCSUM & ifp->if_capabilities) != 0) 1003 ifp->if_capenable ^= IFCAP_RXCSUM; 1004 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 1005 (IFCAP_VLAN_HWCSUM & ifp->if_capabilities) != 0) 1006 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1007 if ((mask & IFCAP_TSO4) != 0 && 1008 (IFCAP_TSO4 & ifp->if_capabilities) != 0) { 1009 ifp->if_capenable ^= IFCAP_TSO4; 1010 if ((IFCAP_TSO4 & ifp->if_capenable) != 0) 1011 ifp->if_hwassist |= CSUM_TSO; 1012 else 1013 ifp->if_hwassist &= ~CSUM_TSO; 1014 } 1015 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 1016 (IFCAP_VLAN_HWTSO & ifp->if_capabilities) != 0) 1017 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1018 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 1019 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) { 1020 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1021 if ((IFCAP_VLAN_HWTAGGING & ifp->if_capenable) == 0) 1022 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO; 1023 msk_setvlan(sc_if, ifp); 1024 } 1025 if (ifp->if_mtu > ETHERMTU && 1026 (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) { 1027 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO); 1028 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM); 1029 } 1030 1031 VLAN_CAPABILITIES(ifp); 1032 MSK_IF_UNLOCK(sc_if); 1033 break; 1034 default: 1035 error = ether_ioctl(ifp, command, data); 1036 break; 1037 } 1038 1039 return (error); 1040 } 1041 1042 static int 1043 mskc_probe(device_t dev) 1044 { 1045 struct msk_product *mp; 1046 uint16_t vendor, devid; 1047 int i; 1048 1049 vendor = pci_get_vendor(dev); 1050 devid = pci_get_device(dev); 1051 mp = msk_products; 1052 for (i = 0; i < sizeof(msk_products)/sizeof(msk_products[0]); 1053 i++, mp++) { 1054 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) { 1055 device_set_desc(dev, mp->msk_name); 1056 return (BUS_PROBE_DEFAULT); 1057 } 1058 } 1059 1060 return (ENXIO); 1061 } 1062 1063 static int 1064 mskc_setup_rambuffer(struct msk_softc *sc) 1065 { 1066 int next; 1067 int i; 1068 1069 /* Get adapter SRAM size. */ 1070 sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4; 1071 if (bootverbose) 1072 device_printf(sc->msk_dev, 1073 "RAM buffer size : %dKB\n", sc->msk_ramsize); 1074 if (sc->msk_ramsize == 0) 1075 return (0); 1076 1077 sc->msk_pflags |= MSK_FLAG_RAMBUF; 1078 /* 1079 * Give receiver 2/3 of memory and round down to the multiple 1080 * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple 1081 * of 1024. 1082 */ 1083 sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024); 1084 sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize; 1085 for (i = 0, next = 0; i < sc->msk_num_port; i++) { 1086 sc->msk_rxqstart[i] = next; 1087 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1; 1088 next = sc->msk_rxqend[i] + 1; 1089 sc->msk_txqstart[i] = next; 1090 sc->msk_txqend[i] = next + sc->msk_txqsize - 1; 1091 next = sc->msk_txqend[i] + 1; 1092 if (bootverbose) { 1093 device_printf(sc->msk_dev, 1094 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i, 1095 sc->msk_rxqsize / 1024, sc->msk_rxqstart[i], 1096 sc->msk_rxqend[i]); 1097 device_printf(sc->msk_dev, 1098 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i, 1099 sc->msk_txqsize / 1024, sc->msk_txqstart[i], 1100 sc->msk_txqend[i]); 1101 } 1102 } 1103 1104 return (0); 1105 } 1106 1107 static void 1108 msk_phy_power(struct msk_softc *sc, int mode) 1109 { 1110 uint32_t our, val; 1111 int i; 1112 1113 switch (mode) { 1114 case MSK_PHY_POWERUP: 1115 /* Switch power to VCC (WA for VAUX problem). */ 1116 CSR_WRITE_1(sc, B0_POWER_CTRL, 1117 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); 1118 /* Disable Core Clock Division, set Clock Select to 0. */ 1119 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS); 1120 1121 val = 0; 1122 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1123 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1124 /* Enable bits are inverted. */ 1125 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1126 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1127 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 1128 } 1129 /* 1130 * Enable PCI & Core Clock, enable clock gating for both Links. 1131 */ 1132 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); 1133 1134 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1135 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); 1136 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) { 1137 if (sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1138 /* Deassert Low Power for 1st PHY. */ 1139 val |= PCI_Y2_PHY1_COMA; 1140 if (sc->msk_num_port > 1) 1141 val |= PCI_Y2_PHY2_COMA; 1142 } 1143 } 1144 /* Release PHY from PowerDown/COMA mode. */ 1145 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1146 switch (sc->msk_hw_id) { 1147 case CHIP_ID_YUKON_EC_U: 1148 case CHIP_ID_YUKON_EX: 1149 case CHIP_ID_YUKON_FE_P: 1150 case CHIP_ID_YUKON_UL_2: 1151 CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_OFF); 1152 1153 /* Enable all clocks. */ 1154 pci_write_config(sc->msk_dev, PCI_OUR_REG_3, 0, 4); 1155 our = pci_read_config(sc->msk_dev, PCI_OUR_REG_4, 4); 1156 our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN| 1157 PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST); 1158 /* Set all bits to 0 except bits 15..12. */ 1159 pci_write_config(sc->msk_dev, PCI_OUR_REG_4, our, 4); 1160 our = pci_read_config(sc->msk_dev, PCI_OUR_REG_5, 4); 1161 our &= PCI_CTL_TIM_VMAIN_AV_MSK; 1162 pci_write_config(sc->msk_dev, PCI_OUR_REG_5, our, 4); 1163 pci_write_config(sc->msk_dev, PCI_CFG_REG_1, 0, 4); 1164 /* 1165 * Disable status race, workaround for 1166 * Yukon EC Ultra & Yukon EX. 1167 */ 1168 val = CSR_READ_4(sc, B2_GP_IO); 1169 val |= GLB_GPIO_STAT_RACE_DIS; 1170 CSR_WRITE_4(sc, B2_GP_IO, val); 1171 CSR_READ_4(sc, B2_GP_IO); 1172 break; 1173 default: 1174 break; 1175 } 1176 for (i = 0; i < sc->msk_num_port; i++) { 1177 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), 1178 GMLC_RST_SET); 1179 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), 1180 GMLC_RST_CLR); 1181 } 1182 break; 1183 case MSK_PHY_POWERDOWN: 1184 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1185 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD; 1186 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1187 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1188 val &= ~PCI_Y2_PHY1_COMA; 1189 if (sc->msk_num_port > 1) 1190 val &= ~PCI_Y2_PHY2_COMA; 1191 } 1192 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1193 1194 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1195 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1196 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 1197 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1198 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1199 /* Enable bits are inverted. */ 1200 val = 0; 1201 } 1202 /* 1203 * Disable PCI & Core Clock, disable clock gating for 1204 * both Links. 1205 */ 1206 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); 1207 CSR_WRITE_1(sc, B0_POWER_CTRL, 1208 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); 1209 break; 1210 default: 1211 break; 1212 } 1213 } 1214 1215 static void 1216 mskc_reset(struct msk_softc *sc) 1217 { 1218 bus_addr_t addr; 1219 uint16_t status; 1220 uint32_t val; 1221 int i; 1222 1223 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1224 1225 /* Disable ASF. */ 1226 if (sc->msk_hw_id == CHIP_ID_YUKON_EX) { 1227 status = CSR_READ_2(sc, B28_Y2_ASF_HCU_CCSR); 1228 /* Clear AHB bridge & microcontroller reset. */ 1229 status &= ~(Y2_ASF_HCU_CCSR_AHB_RST | 1230 Y2_ASF_HCU_CCSR_CPU_RST_MODE); 1231 /* Clear ASF microcontroller state. */ 1232 status &= ~ Y2_ASF_HCU_CCSR_UC_STATE_MSK; 1233 CSR_WRITE_2(sc, B28_Y2_ASF_HCU_CCSR, status); 1234 } else 1235 CSR_WRITE_1(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); 1236 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE); 1237 1238 /* 1239 * Since we disabled ASF, S/W reset is required for Power Management. 1240 */ 1241 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1242 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1243 1244 /* Clear all error bits in the PCI status register. */ 1245 status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); 1246 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1247 1248 pci_write_config(sc->msk_dev, PCIR_STATUS, status | 1249 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | 1250 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2); 1251 CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR); 1252 1253 switch (sc->msk_bustype) { 1254 case MSK_PEX_BUS: 1255 /* Clear all PEX errors. */ 1256 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); 1257 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); 1258 if ((val & PEX_RX_OV) != 0) { 1259 sc->msk_intrmask &= ~Y2_IS_HW_ERR; 1260 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; 1261 } 1262 break; 1263 case MSK_PCI_BUS: 1264 case MSK_PCIX_BUS: 1265 /* Set Cache Line Size to 2(8bytes) if configured to 0. */ 1266 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1); 1267 if (val == 0) 1268 pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1); 1269 if (sc->msk_bustype == MSK_PCIX_BUS) { 1270 /* Set Cache Line Size opt. */ 1271 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1272 val |= PCI_CLS_OPT; 1273 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1274 } 1275 break; 1276 } 1277 /* Set PHY power state. */ 1278 msk_phy_power(sc, MSK_PHY_POWERUP); 1279 1280 /* Reset GPHY/GMAC Control */ 1281 for (i = 0; i < sc->msk_num_port; i++) { 1282 /* GPHY Control reset. */ 1283 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET); 1284 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR); 1285 /* GMAC Control reset. */ 1286 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET); 1287 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR); 1288 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF); 1289 if (sc->msk_hw_id == CHIP_ID_YUKON_EX) 1290 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), 1291 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON | 1292 GMC_BYP_RETR_ON); 1293 } 1294 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1295 1296 /* LED On. */ 1297 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON); 1298 1299 /* Clear TWSI IRQ. */ 1300 CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ); 1301 1302 /* Turn off hardware timer. */ 1303 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP); 1304 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ); 1305 1306 /* Turn off descriptor polling. */ 1307 CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP); 1308 1309 /* Turn off time stamps. */ 1310 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP); 1311 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 1312 1313 /* Configure timeout values. */ 1314 for (i = 0; i < sc->msk_num_port; i++) { 1315 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET); 1316 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR); 1317 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1), 1318 MSK_RI_TO_53); 1319 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1), 1320 MSK_RI_TO_53); 1321 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1), 1322 MSK_RI_TO_53); 1323 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1), 1324 MSK_RI_TO_53); 1325 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1), 1326 MSK_RI_TO_53); 1327 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1), 1328 MSK_RI_TO_53); 1329 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2), 1330 MSK_RI_TO_53); 1331 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2), 1332 MSK_RI_TO_53); 1333 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2), 1334 MSK_RI_TO_53); 1335 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2), 1336 MSK_RI_TO_53); 1337 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2), 1338 MSK_RI_TO_53); 1339 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2), 1340 MSK_RI_TO_53); 1341 } 1342 1343 /* Disable all interrupts. */ 1344 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 1345 CSR_READ_4(sc, B0_HWE_IMSK); 1346 CSR_WRITE_4(sc, B0_IMSK, 0); 1347 CSR_READ_4(sc, B0_IMSK); 1348 1349 /* 1350 * On dual port PCI-X card, there is an problem where status 1351 * can be received out of order due to split transactions. 1352 */ 1353 if (sc->msk_pcixcap != 0 && sc->msk_num_port > 1) { 1354 uint16_t pcix_cmd; 1355 1356 pcix_cmd = pci_read_config(sc->msk_dev, 1357 sc->msk_pcixcap + PCIXR_COMMAND, 2); 1358 /* Clear Max Outstanding Split Transactions. */ 1359 pcix_cmd &= ~PCIXM_COMMAND_MAX_SPLITS; 1360 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1361 pci_write_config(sc->msk_dev, 1362 sc->msk_pcixcap + PCIXR_COMMAND, pcix_cmd, 2); 1363 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1364 } 1365 if (sc->msk_expcap != 0) { 1366 /* Change Max. Read Request Size to 2048 bytes. */ 1367 if (pci_get_max_read_req(sc->msk_dev) == 512) 1368 pci_set_max_read_req(sc->msk_dev, 2048); 1369 } 1370 1371 /* Clear status list. */ 1372 bzero(sc->msk_stat_ring, 1373 sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT); 1374 sc->msk_stat_cons = 0; 1375 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map, 1376 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1377 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET); 1378 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR); 1379 /* Set the status list base address. */ 1380 addr = sc->msk_stat_ring_paddr; 1381 CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr)); 1382 CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr)); 1383 /* Set the status list last index. */ 1384 CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1); 1385 if (sc->msk_hw_id == CHIP_ID_YUKON_EC && 1386 sc->msk_hw_rev == CHIP_REV_YU_EC_A1) { 1387 /* WA for dev. #4.3 */ 1388 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK); 1389 /* WA for dev. #4.18 */ 1390 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21); 1391 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07); 1392 } else { 1393 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a); 1394 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10); 1395 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1396 sc->msk_hw_rev == CHIP_REV_YU_XL_A0) 1397 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04); 1398 else 1399 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10); 1400 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190); 1401 } 1402 /* 1403 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI. 1404 */ 1405 CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000)); 1406 1407 /* Enable status unit. */ 1408 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON); 1409 1410 CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START); 1411 CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START); 1412 CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START); 1413 } 1414 1415 static int 1416 msk_probe(device_t dev) 1417 { 1418 struct msk_softc *sc; 1419 char desc[100]; 1420 1421 sc = device_get_softc(device_get_parent(dev)); 1422 /* 1423 * Not much to do here. We always know there will be 1424 * at least one GMAC present, and if there are two, 1425 * mskc_attach() will create a second device instance 1426 * for us. 1427 */ 1428 snprintf(desc, sizeof(desc), 1429 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x", 1430 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id, 1431 sc->msk_hw_rev); 1432 device_set_desc_copy(dev, desc); 1433 1434 return (BUS_PROBE_DEFAULT); 1435 } 1436 1437 static int 1438 msk_attach(device_t dev) 1439 { 1440 struct msk_softc *sc; 1441 struct msk_if_softc *sc_if; 1442 struct ifnet *ifp; 1443 struct msk_mii_data *mmd; 1444 int i, port, error; 1445 uint8_t eaddr[6]; 1446 1447 if (dev == NULL) 1448 return (EINVAL); 1449 1450 error = 0; 1451 sc_if = device_get_softc(dev); 1452 sc = device_get_softc(device_get_parent(dev)); 1453 mmd = device_get_ivars(dev); 1454 port = mmd->port; 1455 1456 sc_if->msk_if_dev = dev; 1457 sc_if->msk_port = port; 1458 sc_if->msk_softc = sc; 1459 sc_if->msk_flags = sc->msk_pflags; 1460 sc->msk_if[port] = sc_if; 1461 /* Setup Tx/Rx queue register offsets. */ 1462 if (port == MSK_PORT_A) { 1463 sc_if->msk_txq = Q_XA1; 1464 sc_if->msk_txsq = Q_XS1; 1465 sc_if->msk_rxq = Q_R1; 1466 } else { 1467 sc_if->msk_txq = Q_XA2; 1468 sc_if->msk_txsq = Q_XS2; 1469 sc_if->msk_rxq = Q_R2; 1470 } 1471 1472 callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0); 1473 msk_sysctl_node(sc_if); 1474 1475 if ((error = msk_txrx_dma_alloc(sc_if) != 0)) 1476 goto fail; 1477 msk_rx_dma_jalloc(sc_if); 1478 1479 ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER); 1480 if (ifp == NULL) { 1481 device_printf(sc_if->msk_if_dev, "can not if_alloc()\n"); 1482 error = ENOSPC; 1483 goto fail; 1484 } 1485 ifp->if_softc = sc_if; 1486 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1487 ifp->if_mtu = ETHERMTU; 1488 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1489 /* 1490 * IFCAP_RXCSUM capability is intentionally disabled as the hardware 1491 * has serious bug in Rx checksum offload for all Yukon II family 1492 * hardware. It seems there is a workaround to make it work somtimes. 1493 * However, the workaround also have to check OP code sequences to 1494 * verify whether the OP code is correct. Sometimes it should compute 1495 * IP/TCP/UDP checksum in driver in order to verify correctness of 1496 * checksum computed by hardware. If you have to compute checksum 1497 * with software to verify the hardware's checksum why have hardware 1498 * compute the checksum? I think there is no reason to spend time to 1499 * make Rx checksum offload work on Yukon II hardware. 1500 */ 1501 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4; 1502 /* 1503 * Enable Rx checksum offloading if controller support new 1504 * descriptor format. 1505 */ 1506 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 && 1507 (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0) 1508 ifp->if_capabilities |= IFCAP_RXCSUM; 1509 ifp->if_hwassist = MSK_CSUM_FEATURES | CSUM_TSO; 1510 ifp->if_capenable = ifp->if_capabilities; 1511 ifp->if_ioctl = msk_ioctl; 1512 ifp->if_start = msk_start; 1513 ifp->if_init = msk_init; 1514 IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1); 1515 ifp->if_snd.ifq_drv_maxlen = MSK_TX_RING_CNT - 1; 1516 IFQ_SET_READY(&ifp->if_snd); 1517 1518 TASK_INIT(&sc_if->msk_tx_task, 1, msk_tx_task, ifp); 1519 1520 /* 1521 * Get station address for this interface. Note that 1522 * dual port cards actually come with three station 1523 * addresses: one for each port, plus an extra. The 1524 * extra one is used by the SysKonnect driver software 1525 * as a 'virtual' station address for when both ports 1526 * are operating in failover mode. Currently we don't 1527 * use this extra address. 1528 */ 1529 MSK_IF_LOCK(sc_if); 1530 for (i = 0; i < ETHER_ADDR_LEN; i++) 1531 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i); 1532 1533 /* 1534 * Call MI attach routine. Can't hold locks when calling into ether_*. 1535 */ 1536 MSK_IF_UNLOCK(sc_if); 1537 ether_ifattach(ifp, eaddr); 1538 MSK_IF_LOCK(sc_if); 1539 1540 /* VLAN capability setup */ 1541 ifp->if_capabilities |= IFCAP_VLAN_MTU; 1542 if ((sc_if->msk_flags & MSK_FLAG_NOHWVLAN) == 0) { 1543 /* 1544 * Due to Tx checksum offload hardware bugs, msk(4) manually 1545 * computes checksum for short frames. For VLAN tagged frames 1546 * this workaround does not work so disable checksum offload 1547 * for VLAN interface. 1548 */ 1549 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO; 1550 /* 1551 * Enable Rx checksum offloading for VLAN taggedd frames 1552 * if controller support new descriptor format. 1553 */ 1554 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0 && 1555 (sc_if->msk_flags & MSK_FLAG_NORX_CSUM) == 0) 1556 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 1557 } 1558 ifp->if_capenable = ifp->if_capabilities; 1559 1560 /* 1561 * Tell the upper layer(s) we support long frames. 1562 * Must appear after the call to ether_ifattach() because 1563 * ether_ifattach() sets ifi_hdrlen to the default value. 1564 */ 1565 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1566 1567 /* 1568 * Do miibus setup. 1569 */ 1570 MSK_IF_UNLOCK(sc_if); 1571 error = mii_phy_probe(dev, &sc_if->msk_miibus, msk_mediachange, 1572 msk_mediastatus); 1573 if (error != 0) { 1574 device_printf(sc_if->msk_if_dev, "no PHY found!\n"); 1575 ether_ifdetach(ifp); 1576 error = ENXIO; 1577 goto fail; 1578 } 1579 1580 fail: 1581 if (error != 0) { 1582 /* Access should be ok even though lock has been dropped */ 1583 sc->msk_if[port] = NULL; 1584 msk_detach(dev); 1585 } 1586 1587 return (error); 1588 } 1589 1590 /* 1591 * Attach the interface. Allocate softc structures, do ifmedia 1592 * setup and ethernet/BPF attach. 1593 */ 1594 static int 1595 mskc_attach(device_t dev) 1596 { 1597 struct msk_softc *sc; 1598 struct msk_mii_data *mmd; 1599 int error, msic, msir, reg; 1600 1601 sc = device_get_softc(dev); 1602 sc->msk_dev = dev; 1603 mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1604 MTX_DEF); 1605 1606 /* 1607 * Map control/status registers. 1608 */ 1609 pci_enable_busmaster(dev); 1610 1611 /* Allocate I/O resource */ 1612 #ifdef MSK_USEIOSPACE 1613 sc->msk_res_spec = msk_res_spec_io; 1614 #else 1615 sc->msk_res_spec = msk_res_spec_mem; 1616 #endif 1617 sc->msk_irq_spec = msk_irq_spec_legacy; 1618 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res); 1619 if (error) { 1620 if (sc->msk_res_spec == msk_res_spec_mem) 1621 sc->msk_res_spec = msk_res_spec_io; 1622 else 1623 sc->msk_res_spec = msk_res_spec_mem; 1624 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res); 1625 if (error) { 1626 device_printf(dev, "couldn't allocate %s resources\n", 1627 sc->msk_res_spec == msk_res_spec_mem ? "memory" : 1628 "I/O"); 1629 mtx_destroy(&sc->msk_mtx); 1630 return (ENXIO); 1631 } 1632 } 1633 1634 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1635 sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID); 1636 sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f; 1637 /* Bail out if chip is not recognized. */ 1638 if (sc->msk_hw_id < CHIP_ID_YUKON_XL || 1639 sc->msk_hw_id > CHIP_ID_YUKON_UL_2 || 1640 sc->msk_hw_id == CHIP_ID_YUKON_SUPR) { 1641 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n", 1642 sc->msk_hw_id, sc->msk_hw_rev); 1643 mtx_destroy(&sc->msk_mtx); 1644 return (ENXIO); 1645 } 1646 1647 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 1648 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1649 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, 1650 &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I", 1651 "max number of Rx events to process"); 1652 1653 sc->msk_process_limit = MSK_PROC_DEFAULT; 1654 error = resource_int_value(device_get_name(dev), device_get_unit(dev), 1655 "process_limit", &sc->msk_process_limit); 1656 if (error == 0) { 1657 if (sc->msk_process_limit < MSK_PROC_MIN || 1658 sc->msk_process_limit > MSK_PROC_MAX) { 1659 device_printf(dev, "process_limit value out of range; " 1660 "using default: %d\n", MSK_PROC_DEFAULT); 1661 sc->msk_process_limit = MSK_PROC_DEFAULT; 1662 } 1663 } 1664 1665 /* Soft reset. */ 1666 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1667 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1668 sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP); 1669 /* Check number of MACs. */ 1670 sc->msk_num_port = 1; 1671 if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) == 1672 CFG_DUAL_MAC_MSK) { 1673 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) 1674 sc->msk_num_port++; 1675 } 1676 1677 /* Check bus type. */ 1678 if (pci_find_extcap(sc->msk_dev, PCIY_EXPRESS, ®) == 0) { 1679 sc->msk_bustype = MSK_PEX_BUS; 1680 sc->msk_expcap = reg; 1681 } else if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, ®) == 0) { 1682 sc->msk_bustype = MSK_PCIX_BUS; 1683 sc->msk_pcixcap = reg; 1684 } else 1685 sc->msk_bustype = MSK_PCI_BUS; 1686 1687 switch (sc->msk_hw_id) { 1688 case CHIP_ID_YUKON_EC: 1689 sc->msk_clock = 125; /* 125 MHz */ 1690 sc->msk_pflags |= MSK_FLAG_JUMBO; 1691 break; 1692 case CHIP_ID_YUKON_EC_U: 1693 sc->msk_clock = 125; /* 125 MHz */ 1694 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_JUMBO_NOCSUM; 1695 break; 1696 case CHIP_ID_YUKON_EX: 1697 sc->msk_clock = 125; /* 125 MHz */ 1698 sc->msk_pflags |= MSK_FLAG_JUMBO | MSK_FLAG_DESCV2 | 1699 MSK_FLAG_AUTOTX_CSUM; 1700 /* 1701 * Yukon Extreme seems to have silicon bug for 1702 * automatic Tx checksum calculation capability. 1703 */ 1704 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0) 1705 sc->msk_pflags &= ~MSK_FLAG_AUTOTX_CSUM; 1706 /* 1707 * Yukon Extreme A0 could not use store-and-forward 1708 * for jumbo frames, so disable Tx checksum 1709 * offloading for jumbo frames. 1710 */ 1711 if (sc->msk_hw_rev == CHIP_REV_YU_EX_A0) 1712 sc->msk_pflags |= MSK_FLAG_JUMBO_NOCSUM; 1713 break; 1714 case CHIP_ID_YUKON_FE: 1715 sc->msk_clock = 100; /* 100 MHz */ 1716 sc->msk_pflags |= MSK_FLAG_FASTETHER; 1717 break; 1718 case CHIP_ID_YUKON_FE_P: 1719 sc->msk_clock = 50; /* 50 MHz */ 1720 sc->msk_pflags |= MSK_FLAG_FASTETHER | MSK_FLAG_DESCV2 | 1721 MSK_FLAG_AUTOTX_CSUM; 1722 if (sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) { 1723 /* 1724 * XXX 1725 * FE+ A0 has status LE writeback bug so msk(4) 1726 * does not rely on status word of received frame 1727 * in msk_rxeof() which in turn disables all 1728 * hardware assistance bits reported by the status 1729 * word as well as validity of the recevied frame. 1730 * Just pass received frames to upper stack with 1731 * minimal test and let upper stack handle them. 1732 */ 1733 sc->msk_pflags |= MSK_FLAG_NOHWVLAN | 1734 MSK_FLAG_NORXCHK | MSK_FLAG_NORX_CSUM; 1735 } 1736 break; 1737 case CHIP_ID_YUKON_XL: 1738 sc->msk_clock = 156; /* 156 MHz */ 1739 sc->msk_pflags |= MSK_FLAG_JUMBO; 1740 break; 1741 case CHIP_ID_YUKON_UL_2: 1742 sc->msk_clock = 125; /* 125 MHz */ 1743 sc->msk_pflags |= MSK_FLAG_JUMBO; 1744 break; 1745 default: 1746 sc->msk_clock = 156; /* 156 MHz */ 1747 break; 1748 } 1749 1750 /* Allocate IRQ resources. */ 1751 msic = pci_msi_count(dev); 1752 if (bootverbose) 1753 device_printf(dev, "MSI count : %d\n", msic); 1754 if (legacy_intr != 0) 1755 msi_disable = 1; 1756 if (msi_disable == 0 && msic > 0) { 1757 msir = 1; 1758 if (pci_alloc_msi(dev, &msir) == 0) { 1759 if (msir == 1) { 1760 sc->msk_pflags |= MSK_FLAG_MSI; 1761 sc->msk_irq_spec = msk_irq_spec_msi; 1762 } else 1763 pci_release_msi(dev); 1764 } 1765 } 1766 1767 error = bus_alloc_resources(dev, sc->msk_irq_spec, sc->msk_irq); 1768 if (error) { 1769 device_printf(dev, "couldn't allocate IRQ resources\n"); 1770 goto fail; 1771 } 1772 1773 if ((error = msk_status_dma_alloc(sc)) != 0) 1774 goto fail; 1775 1776 /* Set base interrupt mask. */ 1777 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU; 1778 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR | 1779 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP; 1780 1781 /* Reset the adapter. */ 1782 mskc_reset(sc); 1783 1784 if ((error = mskc_setup_rambuffer(sc)) != 0) 1785 goto fail; 1786 1787 sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1); 1788 if (sc->msk_devs[MSK_PORT_A] == NULL) { 1789 device_printf(dev, "failed to add child for PORT_A\n"); 1790 error = ENXIO; 1791 goto fail; 1792 } 1793 mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK | M_ZERO); 1794 if (mmd == NULL) { 1795 device_printf(dev, "failed to allocate memory for " 1796 "ivars of PORT_A\n"); 1797 error = ENXIO; 1798 goto fail; 1799 } 1800 mmd->port = MSK_PORT_A; 1801 mmd->pmd = sc->msk_pmd; 1802 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S' || sc->msk_pmd == 'P') 1803 mmd->mii_flags |= MIIF_HAVEFIBER; 1804 device_set_ivars(sc->msk_devs[MSK_PORT_A], mmd); 1805 1806 if (sc->msk_num_port > 1) { 1807 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1); 1808 if (sc->msk_devs[MSK_PORT_B] == NULL) { 1809 device_printf(dev, "failed to add child for PORT_B\n"); 1810 error = ENXIO; 1811 goto fail; 1812 } 1813 mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK | M_ZERO); 1814 if (mmd == NULL) { 1815 device_printf(dev, "failed to allocate memory for " 1816 "ivars of PORT_B\n"); 1817 error = ENXIO; 1818 goto fail; 1819 } 1820 mmd->port = MSK_PORT_B; 1821 mmd->pmd = sc->msk_pmd; 1822 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S' || sc->msk_pmd == 'P') 1823 mmd->mii_flags |= MIIF_HAVEFIBER; 1824 device_set_ivars(sc->msk_devs[MSK_PORT_B], mmd); 1825 } 1826 1827 error = bus_generic_attach(dev); 1828 if (error) { 1829 device_printf(dev, "failed to attach port(s)\n"); 1830 goto fail; 1831 } 1832 1833 /* Hook interrupt last to avoid having to lock softc. */ 1834 if (legacy_intr) 1835 error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET | 1836 INTR_MPSAFE, NULL, msk_legacy_intr, sc, 1837 &sc->msk_intrhand); 1838 else { 1839 TASK_INIT(&sc->msk_int_task, 0, msk_int_task, sc); 1840 sc->msk_tq = taskqueue_create_fast("msk_taskq", M_WAITOK, 1841 taskqueue_thread_enqueue, &sc->msk_tq); 1842 taskqueue_start_threads(&sc->msk_tq, 1, PI_NET, "%s taskq", 1843 device_get_nameunit(sc->msk_dev)); 1844 error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET | 1845 INTR_MPSAFE, msk_intr, NULL, sc, &sc->msk_intrhand); 1846 } 1847 1848 if (error != 0) { 1849 device_printf(dev, "couldn't set up interrupt handler\n"); 1850 if (legacy_intr == 0) 1851 taskqueue_free(sc->msk_tq); 1852 sc->msk_tq = NULL; 1853 goto fail; 1854 } 1855 fail: 1856 if (error != 0) 1857 mskc_detach(dev); 1858 1859 return (error); 1860 } 1861 1862 /* 1863 * Shutdown hardware and free up resources. This can be called any 1864 * time after the mutex has been initialized. It is called in both 1865 * the error case in attach and the normal detach case so it needs 1866 * to be careful about only freeing resources that have actually been 1867 * allocated. 1868 */ 1869 static int 1870 msk_detach(device_t dev) 1871 { 1872 struct msk_softc *sc; 1873 struct msk_if_softc *sc_if; 1874 struct ifnet *ifp; 1875 1876 sc_if = device_get_softc(dev); 1877 KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx), 1878 ("msk mutex not initialized in msk_detach")); 1879 MSK_IF_LOCK(sc_if); 1880 1881 ifp = sc_if->msk_ifp; 1882 if (device_is_attached(dev)) { 1883 /* XXX */ 1884 sc_if->msk_flags |= MSK_FLAG_DETACH; 1885 msk_stop(sc_if); 1886 /* Can't hold locks while calling detach. */ 1887 MSK_IF_UNLOCK(sc_if); 1888 callout_drain(&sc_if->msk_tick_ch); 1889 taskqueue_drain(taskqueue_fast, &sc_if->msk_tx_task); 1890 ether_ifdetach(ifp); 1891 MSK_IF_LOCK(sc_if); 1892 } 1893 1894 /* 1895 * We're generally called from mskc_detach() which is using 1896 * device_delete_child() to get to here. It's already trashed 1897 * miibus for us, so don't do it here or we'll panic. 1898 * 1899 * if (sc_if->msk_miibus != NULL) { 1900 * device_delete_child(dev, sc_if->msk_miibus); 1901 * sc_if->msk_miibus = NULL; 1902 * } 1903 */ 1904 1905 msk_rx_dma_jfree(sc_if); 1906 msk_txrx_dma_free(sc_if); 1907 bus_generic_detach(dev); 1908 1909 if (ifp) 1910 if_free(ifp); 1911 sc = sc_if->msk_softc; 1912 sc->msk_if[sc_if->msk_port] = NULL; 1913 MSK_IF_UNLOCK(sc_if); 1914 1915 return (0); 1916 } 1917 1918 static int 1919 mskc_detach(device_t dev) 1920 { 1921 struct msk_softc *sc; 1922 1923 sc = device_get_softc(dev); 1924 KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized")); 1925 1926 if (device_is_alive(dev)) { 1927 if (sc->msk_devs[MSK_PORT_A] != NULL) { 1928 free(device_get_ivars(sc->msk_devs[MSK_PORT_A]), 1929 M_DEVBUF); 1930 device_delete_child(dev, sc->msk_devs[MSK_PORT_A]); 1931 } 1932 if (sc->msk_devs[MSK_PORT_B] != NULL) { 1933 free(device_get_ivars(sc->msk_devs[MSK_PORT_B]), 1934 M_DEVBUF); 1935 device_delete_child(dev, sc->msk_devs[MSK_PORT_B]); 1936 } 1937 bus_generic_detach(dev); 1938 } 1939 1940 /* Disable all interrupts. */ 1941 CSR_WRITE_4(sc, B0_IMSK, 0); 1942 CSR_READ_4(sc, B0_IMSK); 1943 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 1944 CSR_READ_4(sc, B0_HWE_IMSK); 1945 1946 /* LED Off. */ 1947 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF); 1948 1949 /* Put hardware reset. */ 1950 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1951 1952 msk_status_dma_free(sc); 1953 1954 if (legacy_intr == 0 && sc->msk_tq != NULL) { 1955 taskqueue_drain(sc->msk_tq, &sc->msk_int_task); 1956 taskqueue_free(sc->msk_tq); 1957 sc->msk_tq = NULL; 1958 } 1959 if (sc->msk_intrhand) { 1960 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand); 1961 sc->msk_intrhand = NULL; 1962 } 1963 bus_release_resources(dev, sc->msk_irq_spec, sc->msk_irq); 1964 if ((sc->msk_pflags & MSK_FLAG_MSI) != 0) 1965 pci_release_msi(dev); 1966 bus_release_resources(dev, sc->msk_res_spec, sc->msk_res); 1967 mtx_destroy(&sc->msk_mtx); 1968 1969 return (0); 1970 } 1971 1972 struct msk_dmamap_arg { 1973 bus_addr_t msk_busaddr; 1974 }; 1975 1976 static void 1977 msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1978 { 1979 struct msk_dmamap_arg *ctx; 1980 1981 if (error != 0) 1982 return; 1983 ctx = arg; 1984 ctx->msk_busaddr = segs[0].ds_addr; 1985 } 1986 1987 /* Create status DMA region. */ 1988 static int 1989 msk_status_dma_alloc(struct msk_softc *sc) 1990 { 1991 struct msk_dmamap_arg ctx; 1992 int error; 1993 1994 error = bus_dma_tag_create( 1995 bus_get_dma_tag(sc->msk_dev), /* parent */ 1996 MSK_STAT_ALIGN, 0, /* alignment, boundary */ 1997 BUS_SPACE_MAXADDR, /* lowaddr */ 1998 BUS_SPACE_MAXADDR, /* highaddr */ 1999 NULL, NULL, /* filter, filterarg */ 2000 MSK_STAT_RING_SZ, /* maxsize */ 2001 1, /* nsegments */ 2002 MSK_STAT_RING_SZ, /* maxsegsize */ 2003 0, /* flags */ 2004 NULL, NULL, /* lockfunc, lockarg */ 2005 &sc->msk_stat_tag); 2006 if (error != 0) { 2007 device_printf(sc->msk_dev, 2008 "failed to create status DMA tag\n"); 2009 return (error); 2010 } 2011 2012 /* Allocate DMA'able memory and load the DMA map for status ring. */ 2013 error = bus_dmamem_alloc(sc->msk_stat_tag, 2014 (void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT | 2015 BUS_DMA_ZERO, &sc->msk_stat_map); 2016 if (error != 0) { 2017 device_printf(sc->msk_dev, 2018 "failed to allocate DMA'able memory for status ring\n"); 2019 return (error); 2020 } 2021 2022 ctx.msk_busaddr = 0; 2023 error = bus_dmamap_load(sc->msk_stat_tag, 2024 sc->msk_stat_map, sc->msk_stat_ring, MSK_STAT_RING_SZ, 2025 msk_dmamap_cb, &ctx, 0); 2026 if (error != 0) { 2027 device_printf(sc->msk_dev, 2028 "failed to load DMA'able memory for status ring\n"); 2029 return (error); 2030 } 2031 sc->msk_stat_ring_paddr = ctx.msk_busaddr; 2032 2033 return (0); 2034 } 2035 2036 static void 2037 msk_status_dma_free(struct msk_softc *sc) 2038 { 2039 2040 /* Destroy status block. */ 2041 if (sc->msk_stat_tag) { 2042 if (sc->msk_stat_map) { 2043 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map); 2044 if (sc->msk_stat_ring) { 2045 bus_dmamem_free(sc->msk_stat_tag, 2046 sc->msk_stat_ring, sc->msk_stat_map); 2047 sc->msk_stat_ring = NULL; 2048 } 2049 sc->msk_stat_map = NULL; 2050 } 2051 bus_dma_tag_destroy(sc->msk_stat_tag); 2052 sc->msk_stat_tag = NULL; 2053 } 2054 } 2055 2056 static int 2057 msk_txrx_dma_alloc(struct msk_if_softc *sc_if) 2058 { 2059 struct msk_dmamap_arg ctx; 2060 struct msk_txdesc *txd; 2061 struct msk_rxdesc *rxd; 2062 bus_size_t rxalign; 2063 int error, i; 2064 2065 /* Create parent DMA tag. */ 2066 /* 2067 * XXX 2068 * It seems that Yukon II supports full 64bits DMA operations. But 2069 * it needs two descriptors(list elements) for 64bits DMA operations. 2070 * Since we don't know what DMA address mappings(32bits or 64bits) 2071 * would be used in advance for each mbufs, we limits its DMA space 2072 * to be in range of 32bits address space. Otherwise, we should check 2073 * what DMA address is used and chain another descriptor for the 2074 * 64bits DMA operation. This also means descriptor ring size is 2075 * variable. Limiting DMA address to be in 32bit address space greatly 2076 * simplyfies descriptor handling and possibly would increase 2077 * performance a bit due to efficient handling of descriptors. 2078 * Apart from harassing checksum offloading mechanisms, it seems 2079 * it's really bad idea to use a seperate descriptor for 64bit 2080 * DMA operation to save small descriptor memory. Anyway, I've 2081 * never seen these exotic scheme on ethernet interface hardware. 2082 */ 2083 error = bus_dma_tag_create( 2084 bus_get_dma_tag(sc_if->msk_if_dev), /* parent */ 2085 1, 0, /* alignment, boundary */ 2086 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2087 BUS_SPACE_MAXADDR, /* highaddr */ 2088 NULL, NULL, /* filter, filterarg */ 2089 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 2090 0, /* nsegments */ 2091 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 2092 0, /* flags */ 2093 NULL, NULL, /* lockfunc, lockarg */ 2094 &sc_if->msk_cdata.msk_parent_tag); 2095 if (error != 0) { 2096 device_printf(sc_if->msk_if_dev, 2097 "failed to create parent DMA tag\n"); 2098 goto fail; 2099 } 2100 /* Create tag for Tx ring. */ 2101 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2102 MSK_RING_ALIGN, 0, /* alignment, boundary */ 2103 BUS_SPACE_MAXADDR, /* lowaddr */ 2104 BUS_SPACE_MAXADDR, /* highaddr */ 2105 NULL, NULL, /* filter, filterarg */ 2106 MSK_TX_RING_SZ, /* maxsize */ 2107 1, /* nsegments */ 2108 MSK_TX_RING_SZ, /* maxsegsize */ 2109 0, /* flags */ 2110 NULL, NULL, /* lockfunc, lockarg */ 2111 &sc_if->msk_cdata.msk_tx_ring_tag); 2112 if (error != 0) { 2113 device_printf(sc_if->msk_if_dev, 2114 "failed to create Tx ring DMA tag\n"); 2115 goto fail; 2116 } 2117 2118 /* Create tag for Rx ring. */ 2119 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2120 MSK_RING_ALIGN, 0, /* alignment, boundary */ 2121 BUS_SPACE_MAXADDR, /* lowaddr */ 2122 BUS_SPACE_MAXADDR, /* highaddr */ 2123 NULL, NULL, /* filter, filterarg */ 2124 MSK_RX_RING_SZ, /* maxsize */ 2125 1, /* nsegments */ 2126 MSK_RX_RING_SZ, /* maxsegsize */ 2127 0, /* flags */ 2128 NULL, NULL, /* lockfunc, lockarg */ 2129 &sc_if->msk_cdata.msk_rx_ring_tag); 2130 if (error != 0) { 2131 device_printf(sc_if->msk_if_dev, 2132 "failed to create Rx ring DMA tag\n"); 2133 goto fail; 2134 } 2135 2136 /* Create tag for Tx buffers. */ 2137 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2138 1, 0, /* alignment, boundary */ 2139 BUS_SPACE_MAXADDR, /* lowaddr */ 2140 BUS_SPACE_MAXADDR, /* highaddr */ 2141 NULL, NULL, /* filter, filterarg */ 2142 MSK_TSO_MAXSIZE, /* maxsize */ 2143 MSK_MAXTXSEGS, /* nsegments */ 2144 MSK_TSO_MAXSGSIZE, /* maxsegsize */ 2145 0, /* flags */ 2146 NULL, NULL, /* lockfunc, lockarg */ 2147 &sc_if->msk_cdata.msk_tx_tag); 2148 if (error != 0) { 2149 device_printf(sc_if->msk_if_dev, 2150 "failed to create Tx DMA tag\n"); 2151 goto fail; 2152 } 2153 2154 rxalign = 1; 2155 /* 2156 * Workaround hardware hang which seems to happen when Rx buffer 2157 * is not aligned on multiple of FIFO word(8 bytes). 2158 */ 2159 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0) 2160 rxalign = MSK_RX_BUF_ALIGN; 2161 /* Create tag for Rx buffers. */ 2162 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2163 rxalign, 0, /* alignment, boundary */ 2164 BUS_SPACE_MAXADDR, /* lowaddr */ 2165 BUS_SPACE_MAXADDR, /* highaddr */ 2166 NULL, NULL, /* filter, filterarg */ 2167 MCLBYTES, /* maxsize */ 2168 1, /* nsegments */ 2169 MCLBYTES, /* maxsegsize */ 2170 0, /* flags */ 2171 NULL, NULL, /* lockfunc, lockarg */ 2172 &sc_if->msk_cdata.msk_rx_tag); 2173 if (error != 0) { 2174 device_printf(sc_if->msk_if_dev, 2175 "failed to create Rx DMA tag\n"); 2176 goto fail; 2177 } 2178 2179 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 2180 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag, 2181 (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK | 2182 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map); 2183 if (error != 0) { 2184 device_printf(sc_if->msk_if_dev, 2185 "failed to allocate DMA'able memory for Tx ring\n"); 2186 goto fail; 2187 } 2188 2189 ctx.msk_busaddr = 0; 2190 error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag, 2191 sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring, 2192 MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, 0); 2193 if (error != 0) { 2194 device_printf(sc_if->msk_if_dev, 2195 "failed to load DMA'able memory for Tx ring\n"); 2196 goto fail; 2197 } 2198 sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr; 2199 2200 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 2201 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag, 2202 (void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK | 2203 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map); 2204 if (error != 0) { 2205 device_printf(sc_if->msk_if_dev, 2206 "failed to allocate DMA'able memory for Rx ring\n"); 2207 goto fail; 2208 } 2209 2210 ctx.msk_busaddr = 0; 2211 error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag, 2212 sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring, 2213 MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, 0); 2214 if (error != 0) { 2215 device_printf(sc_if->msk_if_dev, 2216 "failed to load DMA'able memory for Rx ring\n"); 2217 goto fail; 2218 } 2219 sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr; 2220 2221 /* Create DMA maps for Tx buffers. */ 2222 for (i = 0; i < MSK_TX_RING_CNT; i++) { 2223 txd = &sc_if->msk_cdata.msk_txdesc[i]; 2224 txd->tx_m = NULL; 2225 txd->tx_dmamap = NULL; 2226 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0, 2227 &txd->tx_dmamap); 2228 if (error != 0) { 2229 device_printf(sc_if->msk_if_dev, 2230 "failed to create Tx dmamap\n"); 2231 goto fail; 2232 } 2233 } 2234 /* Create DMA maps for Rx buffers. */ 2235 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0, 2236 &sc_if->msk_cdata.msk_rx_sparemap)) != 0) { 2237 device_printf(sc_if->msk_if_dev, 2238 "failed to create spare Rx dmamap\n"); 2239 goto fail; 2240 } 2241 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2242 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 2243 rxd->rx_m = NULL; 2244 rxd->rx_dmamap = NULL; 2245 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0, 2246 &rxd->rx_dmamap); 2247 if (error != 0) { 2248 device_printf(sc_if->msk_if_dev, 2249 "failed to create Rx dmamap\n"); 2250 goto fail; 2251 } 2252 } 2253 2254 fail: 2255 return (error); 2256 } 2257 2258 static int 2259 msk_rx_dma_jalloc(struct msk_if_softc *sc_if) 2260 { 2261 struct msk_dmamap_arg ctx; 2262 struct msk_rxdesc *jrxd; 2263 bus_size_t rxalign; 2264 int error, i; 2265 2266 if (jumbo_disable != 0 || (sc_if->msk_flags & MSK_FLAG_JUMBO) == 0) { 2267 sc_if->msk_flags &= ~MSK_FLAG_JUMBO; 2268 device_printf(sc_if->msk_if_dev, 2269 "disabling jumbo frame support\n"); 2270 return (0); 2271 } 2272 /* Create tag for jumbo Rx ring. */ 2273 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2274 MSK_RING_ALIGN, 0, /* alignment, boundary */ 2275 BUS_SPACE_MAXADDR, /* lowaddr */ 2276 BUS_SPACE_MAXADDR, /* highaddr */ 2277 NULL, NULL, /* filter, filterarg */ 2278 MSK_JUMBO_RX_RING_SZ, /* maxsize */ 2279 1, /* nsegments */ 2280 MSK_JUMBO_RX_RING_SZ, /* maxsegsize */ 2281 0, /* flags */ 2282 NULL, NULL, /* lockfunc, lockarg */ 2283 &sc_if->msk_cdata.msk_jumbo_rx_ring_tag); 2284 if (error != 0) { 2285 device_printf(sc_if->msk_if_dev, 2286 "failed to create jumbo Rx ring DMA tag\n"); 2287 goto jumbo_fail; 2288 } 2289 2290 rxalign = 1; 2291 /* 2292 * Workaround hardware hang which seems to happen when Rx buffer 2293 * is not aligned on multiple of FIFO word(8 bytes). 2294 */ 2295 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0) 2296 rxalign = MSK_RX_BUF_ALIGN; 2297 /* Create tag for jumbo Rx buffers. */ 2298 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2299 rxalign, 0, /* alignment, boundary */ 2300 BUS_SPACE_MAXADDR, /* lowaddr */ 2301 BUS_SPACE_MAXADDR, /* highaddr */ 2302 NULL, NULL, /* filter, filterarg */ 2303 MJUM9BYTES, /* maxsize */ 2304 1, /* nsegments */ 2305 MJUM9BYTES, /* maxsegsize */ 2306 0, /* flags */ 2307 NULL, NULL, /* lockfunc, lockarg */ 2308 &sc_if->msk_cdata.msk_jumbo_rx_tag); 2309 if (error != 0) { 2310 device_printf(sc_if->msk_if_dev, 2311 "failed to create jumbo Rx DMA tag\n"); 2312 goto jumbo_fail; 2313 } 2314 2315 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */ 2316 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2317 (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring, 2318 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 2319 &sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2320 if (error != 0) { 2321 device_printf(sc_if->msk_if_dev, 2322 "failed to allocate DMA'able memory for jumbo Rx ring\n"); 2323 goto jumbo_fail; 2324 } 2325 2326 ctx.msk_busaddr = 0; 2327 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2328 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 2329 sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ, 2330 msk_dmamap_cb, &ctx, 0); 2331 if (error != 0) { 2332 device_printf(sc_if->msk_if_dev, 2333 "failed to load DMA'able memory for jumbo Rx ring\n"); 2334 goto jumbo_fail; 2335 } 2336 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr; 2337 2338 /* Create DMA maps for jumbo Rx buffers. */ 2339 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, 2340 &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) { 2341 device_printf(sc_if->msk_if_dev, 2342 "failed to create spare jumbo Rx dmamap\n"); 2343 goto jumbo_fail; 2344 } 2345 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 2346 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 2347 jrxd->rx_m = NULL; 2348 jrxd->rx_dmamap = NULL; 2349 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, 2350 &jrxd->rx_dmamap); 2351 if (error != 0) { 2352 device_printf(sc_if->msk_if_dev, 2353 "failed to create jumbo Rx dmamap\n"); 2354 goto jumbo_fail; 2355 } 2356 } 2357 2358 return (0); 2359 2360 jumbo_fail: 2361 msk_rx_dma_jfree(sc_if); 2362 device_printf(sc_if->msk_if_dev, "disabling jumbo frame support " 2363 "due to resource shortage\n"); 2364 sc_if->msk_flags &= ~MSK_FLAG_JUMBO; 2365 return (error); 2366 } 2367 2368 static void 2369 msk_txrx_dma_free(struct msk_if_softc *sc_if) 2370 { 2371 struct msk_txdesc *txd; 2372 struct msk_rxdesc *rxd; 2373 int i; 2374 2375 /* Tx ring. */ 2376 if (sc_if->msk_cdata.msk_tx_ring_tag) { 2377 if (sc_if->msk_cdata.msk_tx_ring_map) 2378 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag, 2379 sc_if->msk_cdata.msk_tx_ring_map); 2380 if (sc_if->msk_cdata.msk_tx_ring_map && 2381 sc_if->msk_rdata.msk_tx_ring) 2382 bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag, 2383 sc_if->msk_rdata.msk_tx_ring, 2384 sc_if->msk_cdata.msk_tx_ring_map); 2385 sc_if->msk_rdata.msk_tx_ring = NULL; 2386 sc_if->msk_cdata.msk_tx_ring_map = NULL; 2387 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag); 2388 sc_if->msk_cdata.msk_tx_ring_tag = NULL; 2389 } 2390 /* Rx ring. */ 2391 if (sc_if->msk_cdata.msk_rx_ring_tag) { 2392 if (sc_if->msk_cdata.msk_rx_ring_map) 2393 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag, 2394 sc_if->msk_cdata.msk_rx_ring_map); 2395 if (sc_if->msk_cdata.msk_rx_ring_map && 2396 sc_if->msk_rdata.msk_rx_ring) 2397 bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag, 2398 sc_if->msk_rdata.msk_rx_ring, 2399 sc_if->msk_cdata.msk_rx_ring_map); 2400 sc_if->msk_rdata.msk_rx_ring = NULL; 2401 sc_if->msk_cdata.msk_rx_ring_map = NULL; 2402 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag); 2403 sc_if->msk_cdata.msk_rx_ring_tag = NULL; 2404 } 2405 /* Tx buffers. */ 2406 if (sc_if->msk_cdata.msk_tx_tag) { 2407 for (i = 0; i < MSK_TX_RING_CNT; i++) { 2408 txd = &sc_if->msk_cdata.msk_txdesc[i]; 2409 if (txd->tx_dmamap) { 2410 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag, 2411 txd->tx_dmamap); 2412 txd->tx_dmamap = NULL; 2413 } 2414 } 2415 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag); 2416 sc_if->msk_cdata.msk_tx_tag = NULL; 2417 } 2418 /* Rx buffers. */ 2419 if (sc_if->msk_cdata.msk_rx_tag) { 2420 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2421 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 2422 if (rxd->rx_dmamap) { 2423 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2424 rxd->rx_dmamap); 2425 rxd->rx_dmamap = NULL; 2426 } 2427 } 2428 if (sc_if->msk_cdata.msk_rx_sparemap) { 2429 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2430 sc_if->msk_cdata.msk_rx_sparemap); 2431 sc_if->msk_cdata.msk_rx_sparemap = 0; 2432 } 2433 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 2434 sc_if->msk_cdata.msk_rx_tag = NULL; 2435 } 2436 if (sc_if->msk_cdata.msk_parent_tag) { 2437 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag); 2438 sc_if->msk_cdata.msk_parent_tag = NULL; 2439 } 2440 } 2441 2442 static void 2443 msk_rx_dma_jfree(struct msk_if_softc *sc_if) 2444 { 2445 struct msk_rxdesc *jrxd; 2446 int i; 2447 2448 /* Jumbo Rx ring. */ 2449 if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) { 2450 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map) 2451 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2452 sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2453 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map && 2454 sc_if->msk_rdata.msk_jumbo_rx_ring) 2455 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2456 sc_if->msk_rdata.msk_jumbo_rx_ring, 2457 sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2458 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL; 2459 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL; 2460 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag); 2461 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL; 2462 } 2463 /* Jumbo Rx buffers. */ 2464 if (sc_if->msk_cdata.msk_jumbo_rx_tag) { 2465 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 2466 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 2467 if (jrxd->rx_dmamap) { 2468 bus_dmamap_destroy( 2469 sc_if->msk_cdata.msk_jumbo_rx_tag, 2470 jrxd->rx_dmamap); 2471 jrxd->rx_dmamap = NULL; 2472 } 2473 } 2474 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) { 2475 bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag, 2476 sc_if->msk_cdata.msk_jumbo_rx_sparemap); 2477 sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0; 2478 } 2479 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag); 2480 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL; 2481 } 2482 } 2483 2484 static int 2485 msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head) 2486 { 2487 struct msk_txdesc *txd, *txd_last; 2488 struct msk_tx_desc *tx_le; 2489 struct mbuf *m; 2490 bus_dmamap_t map; 2491 bus_dma_segment_t txsegs[MSK_MAXTXSEGS]; 2492 uint32_t control, csum, prod, si; 2493 uint16_t offset, tcp_offset, tso_mtu; 2494 int error, i, nseg, tso; 2495 2496 MSK_IF_LOCK_ASSERT(sc_if); 2497 2498 tcp_offset = offset = 0; 2499 m = *m_head; 2500 if (((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 && 2501 (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) || 2502 ((sc_if->msk_flags & MSK_FLAG_DESCV2) == 0 && 2503 (m->m_pkthdr.csum_flags & CSUM_TSO) != 0)) { 2504 /* 2505 * Since mbuf has no protocol specific structure information 2506 * in it we have to inspect protocol information here to 2507 * setup TSO and checksum offload. I don't know why Marvell 2508 * made a such decision in chip design because other GigE 2509 * hardwares normally takes care of all these chores in 2510 * hardware. However, TSO performance of Yukon II is very 2511 * good such that it's worth to implement it. 2512 */ 2513 struct ether_header *eh; 2514 struct ip *ip; 2515 struct tcphdr *tcp; 2516 2517 if (M_WRITABLE(m) == 0) { 2518 /* Get a writable copy. */ 2519 m = m_dup(*m_head, M_DONTWAIT); 2520 m_freem(*m_head); 2521 if (m == NULL) { 2522 *m_head = NULL; 2523 return (ENOBUFS); 2524 } 2525 *m_head = m; 2526 } 2527 2528 offset = sizeof(struct ether_header); 2529 m = m_pullup(m, offset); 2530 if (m == NULL) { 2531 *m_head = NULL; 2532 return (ENOBUFS); 2533 } 2534 eh = mtod(m, struct ether_header *); 2535 /* Check if hardware VLAN insertion is off. */ 2536 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2537 offset = sizeof(struct ether_vlan_header); 2538 m = m_pullup(m, offset); 2539 if (m == NULL) { 2540 *m_head = NULL; 2541 return (ENOBUFS); 2542 } 2543 } 2544 m = m_pullup(m, offset + sizeof(struct ip)); 2545 if (m == NULL) { 2546 *m_head = NULL; 2547 return (ENOBUFS); 2548 } 2549 ip = (struct ip *)(mtod(m, char *) + offset); 2550 offset += (ip->ip_hl << 2); 2551 tcp_offset = offset; 2552 /* 2553 * It seems that Yukon II has Tx checksum offload bug for 2554 * small TCP packets that's less than 60 bytes in size 2555 * (e.g. TCP window probe packet, pure ACK packet). 2556 * Common work around like padding with zeros to make the 2557 * frame minimum ethernet frame size didn't work at all. 2558 * Instead of disabling checksum offload completely we 2559 * resort to S/W checksum routine when we encounter short 2560 * TCP frames. 2561 * Short UDP packets appear to be handled correctly by 2562 * Yukon II. Also I assume this bug does not happen on 2563 * controllers that use newer descriptor format or 2564 * automatic Tx checksum calaulcation. 2565 */ 2566 if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) == 0 && 2567 (m->m_pkthdr.len < MSK_MIN_FRAMELEN) && 2568 (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) { 2569 m = m_pullup(m, offset + sizeof(struct tcphdr)); 2570 if (m == NULL) { 2571 *m_head = NULL; 2572 return (ENOBUFS); 2573 } 2574 *(uint16_t *)(m->m_data + offset + 2575 m->m_pkthdr.csum_data) = in_cksum_skip(m, 2576 m->m_pkthdr.len, offset); 2577 m->m_pkthdr.csum_flags &= ~CSUM_TCP; 2578 } 2579 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2580 m = m_pullup(m, offset + sizeof(struct tcphdr)); 2581 if (m == NULL) { 2582 *m_head = NULL; 2583 return (ENOBUFS); 2584 } 2585 tcp = (struct tcphdr *)(mtod(m, char *) + offset); 2586 offset += (tcp->th_off << 2); 2587 } 2588 *m_head = m; 2589 } 2590 2591 prod = sc_if->msk_cdata.msk_tx_prod; 2592 txd = &sc_if->msk_cdata.msk_txdesc[prod]; 2593 txd_last = txd; 2594 map = txd->tx_dmamap; 2595 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map, 2596 *m_head, txsegs, &nseg, BUS_DMA_NOWAIT); 2597 if (error == EFBIG) { 2598 m = m_collapse(*m_head, M_DONTWAIT, MSK_MAXTXSEGS); 2599 if (m == NULL) { 2600 m_freem(*m_head); 2601 *m_head = NULL; 2602 return (ENOBUFS); 2603 } 2604 *m_head = m; 2605 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, 2606 map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT); 2607 if (error != 0) { 2608 m_freem(*m_head); 2609 *m_head = NULL; 2610 return (error); 2611 } 2612 } else if (error != 0) 2613 return (error); 2614 if (nseg == 0) { 2615 m_freem(*m_head); 2616 *m_head = NULL; 2617 return (EIO); 2618 } 2619 2620 /* Check number of available descriptors. */ 2621 if (sc_if->msk_cdata.msk_tx_cnt + nseg >= 2622 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) { 2623 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map); 2624 return (ENOBUFS); 2625 } 2626 2627 control = 0; 2628 tso = 0; 2629 tx_le = NULL; 2630 2631 /* Check TSO support. */ 2632 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2633 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0) 2634 tso_mtu = m->m_pkthdr.tso_segsz; 2635 else 2636 tso_mtu = offset + m->m_pkthdr.tso_segsz; 2637 if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) { 2638 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2639 tx_le->msk_addr = htole32(tso_mtu); 2640 if ((sc_if->msk_flags & MSK_FLAG_DESCV2) != 0) 2641 tx_le->msk_control = htole32(OP_MSS | HW_OWNER); 2642 else 2643 tx_le->msk_control = 2644 htole32(OP_LRGLEN | HW_OWNER); 2645 sc_if->msk_cdata.msk_tx_cnt++; 2646 MSK_INC(prod, MSK_TX_RING_CNT); 2647 sc_if->msk_cdata.msk_tso_mtu = tso_mtu; 2648 } 2649 tso++; 2650 } 2651 /* Check if we have a VLAN tag to insert. */ 2652 if ((m->m_flags & M_VLANTAG) != 0) { 2653 if (tx_le == NULL) { 2654 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2655 tx_le->msk_addr = htole32(0); 2656 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER | 2657 htons(m->m_pkthdr.ether_vtag)); 2658 sc_if->msk_cdata.msk_tx_cnt++; 2659 MSK_INC(prod, MSK_TX_RING_CNT); 2660 } else { 2661 tx_le->msk_control |= htole32(OP_VLAN | 2662 htons(m->m_pkthdr.ether_vtag)); 2663 } 2664 control |= INS_VLAN; 2665 } 2666 /* Check if we have to handle checksum offload. */ 2667 if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) { 2668 if ((sc_if->msk_flags & MSK_FLAG_AUTOTX_CSUM) != 0) 2669 control |= CALSUM; 2670 else { 2671 control |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; 2672 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2673 control |= UDPTCP; 2674 /* Checksum write position. */ 2675 csum = (tcp_offset + m->m_pkthdr.csum_data) & 0xffff; 2676 /* Checksum start position. */ 2677 csum |= (uint32_t)tcp_offset << 16; 2678 if (csum != sc_if->msk_cdata.msk_last_csum) { 2679 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2680 tx_le->msk_addr = htole32(csum); 2681 tx_le->msk_control = htole32(1 << 16 | 2682 (OP_TCPLISW | HW_OWNER)); 2683 sc_if->msk_cdata.msk_tx_cnt++; 2684 MSK_INC(prod, MSK_TX_RING_CNT); 2685 sc_if->msk_cdata.msk_last_csum = csum; 2686 } 2687 } 2688 } 2689 2690 si = prod; 2691 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2692 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr)); 2693 if (tso == 0) 2694 tx_le->msk_control = htole32(txsegs[0].ds_len | control | 2695 OP_PACKET); 2696 else 2697 tx_le->msk_control = htole32(txsegs[0].ds_len | control | 2698 OP_LARGESEND); 2699 sc_if->msk_cdata.msk_tx_cnt++; 2700 MSK_INC(prod, MSK_TX_RING_CNT); 2701 2702 for (i = 1; i < nseg; i++) { 2703 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2704 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr)); 2705 tx_le->msk_control = htole32(txsegs[i].ds_len | control | 2706 OP_BUFFER | HW_OWNER); 2707 sc_if->msk_cdata.msk_tx_cnt++; 2708 MSK_INC(prod, MSK_TX_RING_CNT); 2709 } 2710 /* Update producer index. */ 2711 sc_if->msk_cdata.msk_tx_prod = prod; 2712 2713 /* Set EOP on the last desciptor. */ 2714 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT; 2715 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2716 tx_le->msk_control |= htole32(EOP); 2717 2718 /* Turn the first descriptor ownership to hardware. */ 2719 tx_le = &sc_if->msk_rdata.msk_tx_ring[si]; 2720 tx_le->msk_control |= htole32(HW_OWNER); 2721 2722 txd = &sc_if->msk_cdata.msk_txdesc[prod]; 2723 map = txd_last->tx_dmamap; 2724 txd_last->tx_dmamap = txd->tx_dmamap; 2725 txd->tx_dmamap = map; 2726 txd->tx_m = m; 2727 2728 /* Sync descriptors. */ 2729 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE); 2730 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 2731 sc_if->msk_cdata.msk_tx_ring_map, 2732 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2733 2734 return (0); 2735 } 2736 2737 static void 2738 msk_tx_task(void *arg, int pending) 2739 { 2740 struct ifnet *ifp; 2741 2742 ifp = arg; 2743 msk_start(ifp); 2744 } 2745 2746 static void 2747 msk_start(struct ifnet *ifp) 2748 { 2749 struct msk_if_softc *sc_if; 2750 struct mbuf *m_head; 2751 int enq; 2752 2753 sc_if = ifp->if_softc; 2754 2755 MSK_IF_LOCK(sc_if); 2756 2757 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2758 IFF_DRV_RUNNING || (sc_if->msk_flags & MSK_FLAG_LINK) == 0) { 2759 MSK_IF_UNLOCK(sc_if); 2760 return; 2761 } 2762 2763 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 2764 sc_if->msk_cdata.msk_tx_cnt < 2765 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) { 2766 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2767 if (m_head == NULL) 2768 break; 2769 /* 2770 * Pack the data into the transmit ring. If we 2771 * don't have room, set the OACTIVE flag and wait 2772 * for the NIC to drain the ring. 2773 */ 2774 if (msk_encap(sc_if, &m_head) != 0) { 2775 if (m_head == NULL) 2776 break; 2777 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2778 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2779 break; 2780 } 2781 2782 enq++; 2783 /* 2784 * If there's a BPF listener, bounce a copy of this frame 2785 * to him. 2786 */ 2787 ETHER_BPF_MTAP(ifp, m_head); 2788 } 2789 2790 if (enq > 0) { 2791 /* Transmit */ 2792 CSR_WRITE_2(sc_if->msk_softc, 2793 Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG), 2794 sc_if->msk_cdata.msk_tx_prod); 2795 2796 /* Set a timeout in case the chip goes out to lunch. */ 2797 sc_if->msk_watchdog_timer = MSK_TX_TIMEOUT; 2798 } 2799 2800 MSK_IF_UNLOCK(sc_if); 2801 } 2802 2803 static void 2804 msk_watchdog(struct msk_if_softc *sc_if) 2805 { 2806 struct ifnet *ifp; 2807 uint32_t ridx; 2808 int idx; 2809 2810 MSK_IF_LOCK_ASSERT(sc_if); 2811 2812 if (sc_if->msk_watchdog_timer == 0 || --sc_if->msk_watchdog_timer) 2813 return; 2814 ifp = sc_if->msk_ifp; 2815 if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0) { 2816 if (bootverbose) 2817 if_printf(sc_if->msk_ifp, "watchdog timeout " 2818 "(missed link)\n"); 2819 ifp->if_oerrors++; 2820 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2821 msk_init_locked(sc_if); 2822 return; 2823 } 2824 2825 /* 2826 * Reclaim first as there is a possibility of losing Tx completion 2827 * interrupts. 2828 */ 2829 ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX; 2830 idx = CSR_READ_2(sc_if->msk_softc, ridx); 2831 if (sc_if->msk_cdata.msk_tx_cons != idx) { 2832 msk_txeof(sc_if, idx); 2833 if (sc_if->msk_cdata.msk_tx_cnt == 0) { 2834 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 2835 "-- recovering\n"); 2836 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2837 taskqueue_enqueue(taskqueue_fast, 2838 &sc_if->msk_tx_task); 2839 return; 2840 } 2841 } 2842 2843 if_printf(ifp, "watchdog timeout\n"); 2844 ifp->if_oerrors++; 2845 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2846 msk_init_locked(sc_if); 2847 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2848 taskqueue_enqueue(taskqueue_fast, &sc_if->msk_tx_task); 2849 } 2850 2851 static int 2852 mskc_shutdown(device_t dev) 2853 { 2854 struct msk_softc *sc; 2855 int i; 2856 2857 sc = device_get_softc(dev); 2858 MSK_LOCK(sc); 2859 for (i = 0; i < sc->msk_num_port; i++) { 2860 if (sc->msk_if[i] != NULL) 2861 msk_stop(sc->msk_if[i]); 2862 } 2863 2864 /* Disable all interrupts. */ 2865 CSR_WRITE_4(sc, B0_IMSK, 0); 2866 CSR_READ_4(sc, B0_IMSK); 2867 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 2868 CSR_READ_4(sc, B0_HWE_IMSK); 2869 2870 /* Put hardware reset. */ 2871 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 2872 2873 MSK_UNLOCK(sc); 2874 return (0); 2875 } 2876 2877 static int 2878 mskc_suspend(device_t dev) 2879 { 2880 struct msk_softc *sc; 2881 int i; 2882 2883 sc = device_get_softc(dev); 2884 2885 MSK_LOCK(sc); 2886 2887 for (i = 0; i < sc->msk_num_port; i++) { 2888 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 2889 ((sc->msk_if[i]->msk_ifp->if_drv_flags & 2890 IFF_DRV_RUNNING) != 0)) 2891 msk_stop(sc->msk_if[i]); 2892 } 2893 2894 /* Disable all interrupts. */ 2895 CSR_WRITE_4(sc, B0_IMSK, 0); 2896 CSR_READ_4(sc, B0_IMSK); 2897 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 2898 CSR_READ_4(sc, B0_HWE_IMSK); 2899 2900 msk_phy_power(sc, MSK_PHY_POWERDOWN); 2901 2902 /* Put hardware reset. */ 2903 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 2904 sc->msk_pflags |= MSK_FLAG_SUSPEND; 2905 2906 MSK_UNLOCK(sc); 2907 2908 return (0); 2909 } 2910 2911 static int 2912 mskc_resume(device_t dev) 2913 { 2914 struct msk_softc *sc; 2915 int i; 2916 2917 sc = device_get_softc(dev); 2918 2919 MSK_LOCK(sc); 2920 2921 mskc_reset(sc); 2922 for (i = 0; i < sc->msk_num_port; i++) { 2923 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 2924 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0)) { 2925 sc->msk_if[i]->msk_ifp->if_drv_flags &= 2926 ~IFF_DRV_RUNNING; 2927 msk_init_locked(sc->msk_if[i]); 2928 } 2929 } 2930 sc->msk_pflags &= ~MSK_FLAG_SUSPEND; 2931 2932 MSK_UNLOCK(sc); 2933 2934 return (0); 2935 } 2936 2937 #ifndef __NO_STRICT_ALIGNMENT 2938 static __inline void 2939 msk_fixup_rx(struct mbuf *m) 2940 { 2941 int i; 2942 uint16_t *src, *dst; 2943 2944 src = mtod(m, uint16_t *); 2945 dst = src - 3; 2946 2947 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 2948 *dst++ = *src++; 2949 2950 m->m_data -= (MSK_RX_BUF_ALIGN - ETHER_ALIGN); 2951 } 2952 #endif 2953 2954 static void 2955 msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control, 2956 int len) 2957 { 2958 struct mbuf *m; 2959 struct ifnet *ifp; 2960 struct msk_rxdesc *rxd; 2961 int cons, rxlen; 2962 2963 ifp = sc_if->msk_ifp; 2964 2965 MSK_IF_LOCK_ASSERT(sc_if); 2966 2967 cons = sc_if->msk_cdata.msk_rx_cons; 2968 do { 2969 rxlen = status >> 16; 2970 if ((status & GMR_FS_VLAN) != 0 && 2971 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2972 rxlen -= ETHER_VLAN_ENCAP_LEN; 2973 if ((sc_if->msk_flags & MSK_FLAG_NORXCHK) != 0) { 2974 /* 2975 * For controllers that returns bogus status code 2976 * just do minimal check and let upper stack 2977 * handle this frame. 2978 */ 2979 if (len > MSK_MAX_FRAMELEN || len < ETHER_HDR_LEN) { 2980 ifp->if_ierrors++; 2981 msk_discard_rxbuf(sc_if, cons); 2982 break; 2983 } 2984 } else if (len > sc_if->msk_framesize || 2985 ((status & GMR_FS_ANY_ERR) != 0) || 2986 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { 2987 /* Don't count flow-control packet as errors. */ 2988 if ((status & GMR_FS_GOOD_FC) == 0) 2989 ifp->if_ierrors++; 2990 msk_discard_rxbuf(sc_if, cons); 2991 break; 2992 } 2993 rxd = &sc_if->msk_cdata.msk_rxdesc[cons]; 2994 m = rxd->rx_m; 2995 if (msk_newbuf(sc_if, cons) != 0) { 2996 ifp->if_iqdrops++; 2997 /* Reuse old buffer. */ 2998 msk_discard_rxbuf(sc_if, cons); 2999 break; 3000 } 3001 m->m_pkthdr.rcvif = ifp; 3002 m->m_pkthdr.len = m->m_len = len; 3003 #ifndef __NO_STRICT_ALIGNMENT 3004 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0) 3005 msk_fixup_rx(m); 3006 #endif 3007 ifp->if_ipackets++; 3008 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && 3009 (control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) { 3010 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 3011 if ((control & CSS_IPV4_CSUM_OK) != 0) 3012 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 3013 if ((control & (CSS_TCP | CSS_UDP)) != 0 && 3014 (control & (CSS_TCPUDP_CSUM_OK)) != 0) { 3015 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 3016 CSUM_PSEUDO_HDR; 3017 m->m_pkthdr.csum_data = 0xffff; 3018 } 3019 } 3020 /* Check for VLAN tagged packets. */ 3021 if ((status & GMR_FS_VLAN) != 0 && 3022 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 3023 m->m_pkthdr.ether_vtag = sc_if->msk_vtag; 3024 m->m_flags |= M_VLANTAG; 3025 } 3026 MSK_IF_UNLOCK(sc_if); 3027 (*ifp->if_input)(ifp, m); 3028 MSK_IF_LOCK(sc_if); 3029 } while (0); 3030 3031 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT); 3032 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT); 3033 } 3034 3035 static void 3036 msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, uint32_t control, 3037 int len) 3038 { 3039 struct mbuf *m; 3040 struct ifnet *ifp; 3041 struct msk_rxdesc *jrxd; 3042 int cons, rxlen; 3043 3044 ifp = sc_if->msk_ifp; 3045 3046 MSK_IF_LOCK_ASSERT(sc_if); 3047 3048 cons = sc_if->msk_cdata.msk_rx_cons; 3049 do { 3050 rxlen = status >> 16; 3051 if ((status & GMR_FS_VLAN) != 0 && 3052 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 3053 rxlen -= ETHER_VLAN_ENCAP_LEN; 3054 if (len > sc_if->msk_framesize || 3055 ((status & GMR_FS_ANY_ERR) != 0) || 3056 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { 3057 /* Don't count flow-control packet as errors. */ 3058 if ((status & GMR_FS_GOOD_FC) == 0) 3059 ifp->if_ierrors++; 3060 msk_discard_jumbo_rxbuf(sc_if, cons); 3061 break; 3062 } 3063 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons]; 3064 m = jrxd->rx_m; 3065 if (msk_jumbo_newbuf(sc_if, cons) != 0) { 3066 ifp->if_iqdrops++; 3067 /* Reuse old buffer. */ 3068 msk_discard_jumbo_rxbuf(sc_if, cons); 3069 break; 3070 } 3071 m->m_pkthdr.rcvif = ifp; 3072 m->m_pkthdr.len = m->m_len = len; 3073 #ifndef __NO_STRICT_ALIGNMENT 3074 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0) 3075 msk_fixup_rx(m); 3076 #endif 3077 ifp->if_ipackets++; 3078 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && 3079 (control & (CSS_IPV4 | CSS_IPFRAG)) == CSS_IPV4) { 3080 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 3081 if ((control & CSS_IPV4_CSUM_OK) != 0) 3082 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 3083 if ((control & (CSS_TCP | CSS_UDP)) != 0 && 3084 (control & (CSS_TCPUDP_CSUM_OK)) != 0) { 3085 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 3086 CSUM_PSEUDO_HDR; 3087 m->m_pkthdr.csum_data = 0xffff; 3088 } 3089 } 3090 /* Check for VLAN tagged packets. */ 3091 if ((status & GMR_FS_VLAN) != 0 && 3092 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 3093 m->m_pkthdr.ether_vtag = sc_if->msk_vtag; 3094 m->m_flags |= M_VLANTAG; 3095 } 3096 MSK_IF_UNLOCK(sc_if); 3097 (*ifp->if_input)(ifp, m); 3098 MSK_IF_LOCK(sc_if); 3099 } while (0); 3100 3101 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT); 3102 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT); 3103 } 3104 3105 static void 3106 msk_txeof(struct msk_if_softc *sc_if, int idx) 3107 { 3108 struct msk_txdesc *txd; 3109 struct msk_tx_desc *cur_tx; 3110 struct ifnet *ifp; 3111 uint32_t control; 3112 int cons, prog; 3113 3114 MSK_IF_LOCK_ASSERT(sc_if); 3115 3116 ifp = sc_if->msk_ifp; 3117 3118 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 3119 sc_if->msk_cdata.msk_tx_ring_map, 3120 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3121 /* 3122 * Go through our tx ring and free mbufs for those 3123 * frames that have been sent. 3124 */ 3125 cons = sc_if->msk_cdata.msk_tx_cons; 3126 prog = 0; 3127 for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) { 3128 if (sc_if->msk_cdata.msk_tx_cnt <= 0) 3129 break; 3130 prog++; 3131 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons]; 3132 control = le32toh(cur_tx->msk_control); 3133 sc_if->msk_cdata.msk_tx_cnt--; 3134 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3135 if ((control & EOP) == 0) 3136 continue; 3137 txd = &sc_if->msk_cdata.msk_txdesc[cons]; 3138 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap, 3139 BUS_DMASYNC_POSTWRITE); 3140 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap); 3141 3142 ifp->if_opackets++; 3143 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!", 3144 __func__)); 3145 m_freem(txd->tx_m); 3146 txd->tx_m = NULL; 3147 } 3148 3149 if (prog > 0) { 3150 sc_if->msk_cdata.msk_tx_cons = cons; 3151 if (sc_if->msk_cdata.msk_tx_cnt == 0) 3152 sc_if->msk_watchdog_timer = 0; 3153 /* No need to sync LEs as we didn't update LEs. */ 3154 } 3155 } 3156 3157 static void 3158 msk_tick(void *xsc_if) 3159 { 3160 struct msk_if_softc *sc_if; 3161 struct mii_data *mii; 3162 3163 sc_if = xsc_if; 3164 3165 MSK_IF_LOCK_ASSERT(sc_if); 3166 3167 mii = device_get_softc(sc_if->msk_miibus); 3168 3169 mii_tick(mii); 3170 if ((sc_if->msk_flags & MSK_FLAG_LINK) == 0) 3171 msk_miibus_statchg(sc_if->msk_if_dev); 3172 msk_watchdog(sc_if); 3173 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); 3174 } 3175 3176 static void 3177 msk_intr_phy(struct msk_if_softc *sc_if) 3178 { 3179 uint16_t status; 3180 3181 msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); 3182 status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); 3183 /* Handle FIFO Underrun/Overflow? */ 3184 if ((status & PHY_M_IS_FIFO_ERROR)) 3185 device_printf(sc_if->msk_if_dev, 3186 "PHY FIFO underrun/overflow.\n"); 3187 } 3188 3189 static void 3190 msk_intr_gmac(struct msk_if_softc *sc_if) 3191 { 3192 struct msk_softc *sc; 3193 uint8_t status; 3194 3195 sc = sc_if->msk_softc; 3196 status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); 3197 3198 /* GMAC Rx FIFO overrun. */ 3199 if ((status & GM_IS_RX_FF_OR) != 0) 3200 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 3201 GMF_CLI_RX_FO); 3202 /* GMAC Tx FIFO underrun. */ 3203 if ((status & GM_IS_TX_FF_UR) != 0) { 3204 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3205 GMF_CLI_TX_FU); 3206 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n"); 3207 /* 3208 * XXX 3209 * In case of Tx underrun, we may need to flush/reset 3210 * Tx MAC but that would also require resynchronization 3211 * with status LEs. Reintializing status LEs would 3212 * affect other port in dual MAC configuration so it 3213 * should be avoided as possible as we can. 3214 * Due to lack of documentation it's all vague guess but 3215 * it needs more investigation. 3216 */ 3217 } 3218 } 3219 3220 static void 3221 msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status) 3222 { 3223 struct msk_softc *sc; 3224 3225 sc = sc_if->msk_softc; 3226 if ((status & Y2_IS_PAR_RD1) != 0) { 3227 device_printf(sc_if->msk_if_dev, 3228 "RAM buffer read parity error\n"); 3229 /* Clear IRQ. */ 3230 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), 3231 RI_CLR_RD_PERR); 3232 } 3233 if ((status & Y2_IS_PAR_WR1) != 0) { 3234 device_printf(sc_if->msk_if_dev, 3235 "RAM buffer write parity error\n"); 3236 /* Clear IRQ. */ 3237 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), 3238 RI_CLR_WR_PERR); 3239 } 3240 if ((status & Y2_IS_PAR_MAC1) != 0) { 3241 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n"); 3242 /* Clear IRQ. */ 3243 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3244 GMF_CLI_TX_PE); 3245 } 3246 if ((status & Y2_IS_PAR_RX1) != 0) { 3247 device_printf(sc_if->msk_if_dev, "Rx parity error\n"); 3248 /* Clear IRQ. */ 3249 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR); 3250 } 3251 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) { 3252 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n"); 3253 /* Clear IRQ. */ 3254 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP); 3255 } 3256 } 3257 3258 static void 3259 msk_intr_hwerr(struct msk_softc *sc) 3260 { 3261 uint32_t status; 3262 uint32_t tlphead[4]; 3263 3264 status = CSR_READ_4(sc, B0_HWE_ISRC); 3265 /* Time Stamp timer overflow. */ 3266 if ((status & Y2_IS_TIST_OV) != 0) 3267 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 3268 if ((status & Y2_IS_PCI_NEXP) != 0) { 3269 /* 3270 * PCI Express Error occured which is not described in PEX 3271 * spec. 3272 * This error is also mapped either to Master Abort( 3273 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and 3274 * can only be cleared there. 3275 */ 3276 device_printf(sc->msk_dev, 3277 "PCI Express protocol violation error\n"); 3278 } 3279 3280 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) { 3281 uint16_t v16; 3282 3283 if ((status & Y2_IS_MST_ERR) != 0) 3284 device_printf(sc->msk_dev, 3285 "unexpected IRQ Status error\n"); 3286 else 3287 device_printf(sc->msk_dev, 3288 "unexpected IRQ Master error\n"); 3289 /* Reset all bits in the PCI status register. */ 3290 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); 3291 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3292 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 | 3293 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | 3294 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2); 3295 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3296 } 3297 3298 /* Check for PCI Express Uncorrectable Error. */ 3299 if ((status & Y2_IS_PCI_EXP) != 0) { 3300 uint32_t v32; 3301 3302 /* 3303 * On PCI Express bus bridges are called root complexes (RC). 3304 * PCI Express errors are recognized by the root complex too, 3305 * which requests the system to handle the problem. After 3306 * error occurence it may be that no access to the adapter 3307 * may be performed any longer. 3308 */ 3309 3310 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); 3311 if ((v32 & PEX_UNSUP_REQ) != 0) { 3312 /* Ignore unsupported request error. */ 3313 device_printf(sc->msk_dev, 3314 "Uncorrectable PCI Express error\n"); 3315 } 3316 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) { 3317 int i; 3318 3319 /* Get TLP header form Log Registers. */ 3320 for (i = 0; i < 4; i++) 3321 tlphead[i] = CSR_PCI_READ_4(sc, 3322 PEX_HEADER_LOG + i * 4); 3323 /* Check for vendor defined broadcast message. */ 3324 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) { 3325 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; 3326 CSR_WRITE_4(sc, B0_HWE_IMSK, 3327 sc->msk_intrhwemask); 3328 CSR_READ_4(sc, B0_HWE_IMSK); 3329 } 3330 } 3331 /* Clear the interrupt. */ 3332 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3333 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); 3334 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3335 } 3336 3337 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL) 3338 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status); 3339 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL) 3340 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8); 3341 } 3342 3343 static __inline void 3344 msk_rxput(struct msk_if_softc *sc_if) 3345 { 3346 struct msk_softc *sc; 3347 3348 sc = sc_if->msk_softc; 3349 if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) 3350 bus_dmamap_sync( 3351 sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 3352 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 3353 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3354 else 3355 bus_dmamap_sync( 3356 sc_if->msk_cdata.msk_rx_ring_tag, 3357 sc_if->msk_cdata.msk_rx_ring_map, 3358 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3359 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, 3360 PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod); 3361 } 3362 3363 static int 3364 msk_handle_events(struct msk_softc *sc) 3365 { 3366 struct msk_if_softc *sc_if; 3367 int rxput[2]; 3368 struct msk_stat_desc *sd; 3369 uint32_t control, status; 3370 int cons, idx, len, port, rxprog; 3371 3372 idx = CSR_READ_2(sc, STAT_PUT_IDX); 3373 if (idx == sc->msk_stat_cons) 3374 return (0); 3375 3376 /* Sync status LEs. */ 3377 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map, 3378 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3379 /* XXX Sync Rx LEs here. */ 3380 3381 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0; 3382 3383 rxprog = 0; 3384 for (cons = sc->msk_stat_cons; cons != idx;) { 3385 sd = &sc->msk_stat_ring[cons]; 3386 control = le32toh(sd->msk_control); 3387 if ((control & HW_OWNER) == 0) 3388 break; 3389 /* 3390 * Marvell's FreeBSD driver updates status LE after clearing 3391 * HW_OWNER. However we don't have a way to sync single LE 3392 * with bus_dma(9) API. bus_dma(9) provides a way to sync 3393 * an entire DMA map. So don't sync LE until we have a better 3394 * way to sync LEs. 3395 */ 3396 control &= ~HW_OWNER; 3397 sd->msk_control = htole32(control); 3398 status = le32toh(sd->msk_status); 3399 len = control & STLE_LEN_MASK; 3400 port = (control >> 16) & 0x01; 3401 sc_if = sc->msk_if[port]; 3402 if (sc_if == NULL) { 3403 device_printf(sc->msk_dev, "invalid port opcode " 3404 "0x%08x\n", control & STLE_OP_MASK); 3405 continue; 3406 } 3407 3408 switch (control & STLE_OP_MASK) { 3409 case OP_RXVLAN: 3410 sc_if->msk_vtag = ntohs(len); 3411 break; 3412 case OP_RXCHKSVLAN: 3413 sc_if->msk_vtag = ntohs(len); 3414 break; 3415 case OP_RXSTAT: 3416 if (sc_if->msk_framesize > 3417 (MCLBYTES - MSK_RX_BUF_ALIGN)) 3418 msk_jumbo_rxeof(sc_if, status, control, len); 3419 else 3420 msk_rxeof(sc_if, status, control, len); 3421 rxprog++; 3422 /* 3423 * Because there is no way to sync single Rx LE 3424 * put the DMA sync operation off until the end of 3425 * event processing. 3426 */ 3427 rxput[port]++; 3428 /* Update prefetch unit if we've passed water mark. */ 3429 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) { 3430 msk_rxput(sc_if); 3431 rxput[port] = 0; 3432 } 3433 break; 3434 case OP_TXINDEXLE: 3435 if (sc->msk_if[MSK_PORT_A] != NULL) 3436 msk_txeof(sc->msk_if[MSK_PORT_A], 3437 status & STLE_TXA1_MSKL); 3438 if (sc->msk_if[MSK_PORT_B] != NULL) 3439 msk_txeof(sc->msk_if[MSK_PORT_B], 3440 ((status & STLE_TXA2_MSKL) >> 3441 STLE_TXA2_SHIFTL) | 3442 ((len & STLE_TXA2_MSKH) << 3443 STLE_TXA2_SHIFTH)); 3444 break; 3445 default: 3446 device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n", 3447 control & STLE_OP_MASK); 3448 break; 3449 } 3450 MSK_INC(cons, MSK_STAT_RING_CNT); 3451 if (rxprog > sc->msk_process_limit) 3452 break; 3453 } 3454 3455 sc->msk_stat_cons = cons; 3456 /* XXX We should sync status LEs here. See above notes. */ 3457 3458 if (rxput[MSK_PORT_A] > 0) 3459 msk_rxput(sc->msk_if[MSK_PORT_A]); 3460 if (rxput[MSK_PORT_B] > 0) 3461 msk_rxput(sc->msk_if[MSK_PORT_B]); 3462 3463 return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX)); 3464 } 3465 3466 /* Legacy interrupt handler for shared interrupt. */ 3467 static void 3468 msk_legacy_intr(void *xsc) 3469 { 3470 struct msk_softc *sc; 3471 struct msk_if_softc *sc_if0, *sc_if1; 3472 struct ifnet *ifp0, *ifp1; 3473 uint32_t status; 3474 3475 sc = xsc; 3476 MSK_LOCK(sc); 3477 3478 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */ 3479 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2); 3480 if (status == 0 || status == 0xffffffff || 3481 (sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 || 3482 (status & sc->msk_intrmask) == 0) { 3483 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3484 return; 3485 } 3486 3487 sc_if0 = sc->msk_if[MSK_PORT_A]; 3488 sc_if1 = sc->msk_if[MSK_PORT_B]; 3489 ifp0 = ifp1 = NULL; 3490 if (sc_if0 != NULL) 3491 ifp0 = sc_if0->msk_ifp; 3492 if (sc_if1 != NULL) 3493 ifp1 = sc_if1->msk_ifp; 3494 3495 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL) 3496 msk_intr_phy(sc_if0); 3497 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL) 3498 msk_intr_phy(sc_if1); 3499 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL) 3500 msk_intr_gmac(sc_if0); 3501 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL) 3502 msk_intr_gmac(sc_if1); 3503 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) { 3504 device_printf(sc->msk_dev, "Rx descriptor error\n"); 3505 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2); 3506 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3507 CSR_READ_4(sc, B0_IMSK); 3508 } 3509 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) { 3510 device_printf(sc->msk_dev, "Tx descriptor error\n"); 3511 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2); 3512 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3513 CSR_READ_4(sc, B0_IMSK); 3514 } 3515 if ((status & Y2_IS_HW_ERR) != 0) 3516 msk_intr_hwerr(sc); 3517 3518 while (msk_handle_events(sc) != 0) 3519 ; 3520 if ((status & Y2_IS_STAT_BMU) != 0) 3521 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ); 3522 3523 /* Reenable interrupts. */ 3524 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3525 3526 if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 && 3527 !IFQ_DRV_IS_EMPTY(&ifp0->if_snd)) 3528 taskqueue_enqueue(taskqueue_fast, &sc_if0->msk_tx_task); 3529 if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 && 3530 !IFQ_DRV_IS_EMPTY(&ifp1->if_snd)) 3531 taskqueue_enqueue(taskqueue_fast, &sc_if1->msk_tx_task); 3532 3533 MSK_UNLOCK(sc); 3534 } 3535 3536 static int 3537 msk_intr(void *xsc) 3538 { 3539 struct msk_softc *sc; 3540 uint32_t status; 3541 3542 sc = xsc; 3543 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2); 3544 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */ 3545 if (status == 0 || status == 0xffffffff) { 3546 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3547 return (FILTER_STRAY); 3548 } 3549 3550 taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task); 3551 return (FILTER_HANDLED); 3552 } 3553 3554 static void 3555 msk_int_task(void *arg, int pending) 3556 { 3557 struct msk_softc *sc; 3558 struct msk_if_softc *sc_if0, *sc_if1; 3559 struct ifnet *ifp0, *ifp1; 3560 uint32_t status; 3561 int domore; 3562 3563 sc = arg; 3564 MSK_LOCK(sc); 3565 3566 /* Get interrupt source. */ 3567 status = CSR_READ_4(sc, B0_ISRC); 3568 if (status == 0 || status == 0xffffffff || 3569 (sc->msk_pflags & MSK_FLAG_SUSPEND) != 0 || 3570 (status & sc->msk_intrmask) == 0) 3571 goto done; 3572 3573 sc_if0 = sc->msk_if[MSK_PORT_A]; 3574 sc_if1 = sc->msk_if[MSK_PORT_B]; 3575 ifp0 = ifp1 = NULL; 3576 if (sc_if0 != NULL) 3577 ifp0 = sc_if0->msk_ifp; 3578 if (sc_if1 != NULL) 3579 ifp1 = sc_if1->msk_ifp; 3580 3581 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL) 3582 msk_intr_phy(sc_if0); 3583 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL) 3584 msk_intr_phy(sc_if1); 3585 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL) 3586 msk_intr_gmac(sc_if0); 3587 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL) 3588 msk_intr_gmac(sc_if1); 3589 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) { 3590 device_printf(sc->msk_dev, "Rx descriptor error\n"); 3591 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2); 3592 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3593 CSR_READ_4(sc, B0_IMSK); 3594 } 3595 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) { 3596 device_printf(sc->msk_dev, "Tx descriptor error\n"); 3597 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2); 3598 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3599 CSR_READ_4(sc, B0_IMSK); 3600 } 3601 if ((status & Y2_IS_HW_ERR) != 0) 3602 msk_intr_hwerr(sc); 3603 3604 domore = msk_handle_events(sc); 3605 if ((status & Y2_IS_STAT_BMU) != 0) 3606 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ); 3607 3608 if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 && 3609 !IFQ_DRV_IS_EMPTY(&ifp0->if_snd)) 3610 taskqueue_enqueue(taskqueue_fast, &sc_if0->msk_tx_task); 3611 if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 && 3612 !IFQ_DRV_IS_EMPTY(&ifp1->if_snd)) 3613 taskqueue_enqueue(taskqueue_fast, &sc_if1->msk_tx_task); 3614 3615 if (domore > 0) { 3616 taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task); 3617 MSK_UNLOCK(sc); 3618 return; 3619 } 3620 done: 3621 MSK_UNLOCK(sc); 3622 3623 /* Reenable interrupts. */ 3624 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3625 } 3626 3627 static void 3628 msk_set_tx_stfwd(struct msk_if_softc *sc_if) 3629 { 3630 struct msk_softc *sc; 3631 struct ifnet *ifp; 3632 3633 ifp = sc_if->msk_ifp; 3634 sc = sc_if->msk_softc; 3635 switch (sc->msk_hw_id) { 3636 case CHIP_ID_YUKON_EX: 3637 if (sc->msk_hw_rev == CHIP_REV_YU_EX_A0) 3638 goto yukon_ex_workaround; 3639 if (ifp->if_mtu > ETHERMTU) 3640 CSR_WRITE_4(sc, 3641 MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3642 TX_JUMBO_ENA | TX_STFW_ENA); 3643 else 3644 CSR_WRITE_4(sc, 3645 MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3646 TX_JUMBO_DIS | TX_STFW_ENA); 3647 break; 3648 default: 3649 yukon_ex_workaround: 3650 if (ifp->if_mtu > ETHERMTU) { 3651 /* Set Tx GMAC FIFO Almost Empty Threshold. */ 3652 CSR_WRITE_4(sc, 3653 MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR), 3654 MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR); 3655 /* Disable Store & Forward mode for Tx. */ 3656 CSR_WRITE_4(sc, 3657 MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3658 TX_JUMBO_ENA | TX_STFW_DIS); 3659 } else { 3660 /* Enable Store & Forward mode for Tx. */ 3661 CSR_WRITE_4(sc, 3662 MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3663 TX_JUMBO_DIS | TX_STFW_ENA); 3664 } 3665 break; 3666 } 3667 } 3668 3669 static void 3670 msk_init(void *xsc) 3671 { 3672 struct msk_if_softc *sc_if = xsc; 3673 3674 MSK_IF_LOCK(sc_if); 3675 msk_init_locked(sc_if); 3676 MSK_IF_UNLOCK(sc_if); 3677 } 3678 3679 static void 3680 msk_init_locked(struct msk_if_softc *sc_if) 3681 { 3682 struct msk_softc *sc; 3683 struct ifnet *ifp; 3684 struct mii_data *mii; 3685 uint8_t *eaddr; 3686 uint16_t gmac; 3687 uint32_t reg; 3688 int error; 3689 3690 MSK_IF_LOCK_ASSERT(sc_if); 3691 3692 ifp = sc_if->msk_ifp; 3693 sc = sc_if->msk_softc; 3694 mii = device_get_softc(sc_if->msk_miibus); 3695 3696 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 3697 return; 3698 3699 error = 0; 3700 /* Cancel pending I/O and free all Rx/Tx buffers. */ 3701 msk_stop(sc_if); 3702 3703 if (ifp->if_mtu < ETHERMTU) 3704 sc_if->msk_framesize = ETHERMTU; 3705 else 3706 sc_if->msk_framesize = ifp->if_mtu; 3707 sc_if->msk_framesize += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 3708 if (ifp->if_mtu > ETHERMTU && 3709 (sc_if->msk_flags & MSK_FLAG_JUMBO_NOCSUM) != 0) { 3710 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO); 3711 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM); 3712 } 3713 3714 /* GMAC Control reset. */ 3715 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET); 3716 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR); 3717 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF); 3718 if (sc->msk_hw_id == CHIP_ID_YUKON_EX) 3719 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), 3720 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON | 3721 GMC_BYP_RETR_ON); 3722 3723 /* 3724 * Initialize GMAC first such that speed/duplex/flow-control 3725 * parameters are renegotiated when interface is brought up. 3726 */ 3727 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, 0); 3728 3729 /* Dummy read the Interrupt Source Register. */ 3730 CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); 3731 3732 /* Clear MIB stats. */ 3733 msk_stats_clear(sc_if); 3734 3735 /* Disable FCS. */ 3736 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS); 3737 3738 /* Setup Transmit Control Register. */ 3739 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); 3740 3741 /* Setup Transmit Flow Control Register. */ 3742 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff); 3743 3744 /* Setup Transmit Parameter Register. */ 3745 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM, 3746 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | 3747 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF)); 3748 3749 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) | 3750 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); 3751 3752 if (ifp->if_mtu > ETHERMTU) 3753 gmac |= GM_SMOD_JUMBO_ENA; 3754 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac); 3755 3756 /* Set station address. */ 3757 eaddr = IF_LLADDR(ifp); 3758 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L, 3759 eaddr[0] | (eaddr[1] << 8)); 3760 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1M, 3761 eaddr[2] | (eaddr[3] << 8)); 3762 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1H, 3763 eaddr[4] | (eaddr[5] << 8)); 3764 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L, 3765 eaddr[0] | (eaddr[1] << 8)); 3766 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2M, 3767 eaddr[2] | (eaddr[3] << 8)); 3768 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2H, 3769 eaddr[4] | (eaddr[5] << 8)); 3770 3771 /* Disable interrupts for counter overflows. */ 3772 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0); 3773 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0); 3774 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0); 3775 3776 /* Configure Rx MAC FIFO. */ 3777 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); 3778 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR); 3779 reg = GMF_OPER_ON | GMF_RX_F_FL_ON; 3780 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P || 3781 sc->msk_hw_id == CHIP_ID_YUKON_EX) 3782 reg |= GMF_RX_OVER_ON; 3783 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), reg); 3784 3785 /* Set receive filter. */ 3786 msk_rxfilter(sc_if); 3787 3788 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) { 3789 /* Clear flush mask - HW bug. */ 3790 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 0); 3791 } else { 3792 /* Flush Rx MAC FIFO on any flow control or error. */ 3793 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 3794 GMR_FS_ANY_ERR); 3795 } 3796 3797 /* 3798 * Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word 3799 * due to hardware hang on receipt of pause frames. 3800 */ 3801 reg = RX_GMF_FL_THR_DEF + 1; 3802 /* Another magic for Yukon FE+ - From Linux. */ 3803 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P && 3804 sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) 3805 reg = 0x178; 3806 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), reg); 3807 3808 /* Configure Tx MAC FIFO. */ 3809 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); 3810 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR); 3811 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON); 3812 3813 /* Configure hardware VLAN tag insertion/stripping. */ 3814 msk_setvlan(sc_if, ifp); 3815 3816 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) { 3817 /* Set Rx Pause threshould. */ 3818 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR), 3819 MSK_ECU_LLPP); 3820 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR), 3821 MSK_ECU_ULPP); 3822 /* Configure store-and-forward for Tx. */ 3823 msk_set_tx_stfwd(sc_if); 3824 } 3825 3826 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P && 3827 sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) { 3828 /* Disable dynamic watermark - from Linux. */ 3829 reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA)); 3830 reg &= ~0x03; 3831 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg); 3832 } 3833 3834 /* 3835 * Disable Force Sync bit and Alloc bit in Tx RAM interface 3836 * arbiter as we don't use Sync Tx queue. 3837 */ 3838 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), 3839 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); 3840 /* Enable the RAM Interface Arbiter. */ 3841 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB); 3842 3843 /* Setup RAM buffer. */ 3844 msk_set_rambuffer(sc_if); 3845 3846 /* Disable Tx sync Queue. */ 3847 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET); 3848 3849 /* Setup Tx Queue Bus Memory Interface. */ 3850 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET); 3851 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT); 3852 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON); 3853 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM); 3854 switch (sc->msk_hw_id) { 3855 case CHIP_ID_YUKON_EC_U: 3856 if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) { 3857 /* Fix for Yukon-EC Ultra: set BMU FIFO level */ 3858 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL), 3859 MSK_ECU_TXFF_LEV); 3860 } 3861 break; 3862 case CHIP_ID_YUKON_EX: 3863 /* 3864 * Yukon Extreme seems to have silicon bug for 3865 * automatic Tx checksum calculation capability. 3866 */ 3867 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0) 3868 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_F), 3869 F_TX_CHK_AUTO_OFF); 3870 break; 3871 } 3872 3873 /* Setup Rx Queue Bus Memory Interface. */ 3874 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET); 3875 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT); 3876 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON); 3877 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM); 3878 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U && 3879 sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) { 3880 /* MAC Rx RAM Read is controlled by hardware. */ 3881 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS); 3882 } 3883 3884 msk_set_prefetch(sc, sc_if->msk_txq, 3885 sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1); 3886 msk_init_tx_ring(sc_if); 3887 3888 /* Disable Rx checksum offload and RSS hash. */ 3889 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), 3890 BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH); 3891 if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) { 3892 msk_set_prefetch(sc, sc_if->msk_rxq, 3893 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr, 3894 MSK_JUMBO_RX_RING_CNT - 1); 3895 error = msk_init_jumbo_rx_ring(sc_if); 3896 } else { 3897 msk_set_prefetch(sc, sc_if->msk_rxq, 3898 sc_if->msk_rdata.msk_rx_ring_paddr, 3899 MSK_RX_RING_CNT - 1); 3900 error = msk_init_rx_ring(sc_if); 3901 } 3902 if (error != 0) { 3903 device_printf(sc_if->msk_if_dev, 3904 "initialization failed: no memory for Rx buffers\n"); 3905 msk_stop(sc_if); 3906 return; 3907 } 3908 3909 /* Configure interrupt handling. */ 3910 if (sc_if->msk_port == MSK_PORT_A) { 3911 sc->msk_intrmask |= Y2_IS_PORT_A; 3912 sc->msk_intrhwemask |= Y2_HWE_L1_MASK; 3913 } else { 3914 sc->msk_intrmask |= Y2_IS_PORT_B; 3915 sc->msk_intrhwemask |= Y2_HWE_L2_MASK; 3916 } 3917 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); 3918 CSR_READ_4(sc, B0_HWE_IMSK); 3919 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3920 CSR_READ_4(sc, B0_IMSK); 3921 3922 sc_if->msk_flags &= ~MSK_FLAG_LINK; 3923 mii_mediachg(mii); 3924 3925 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3926 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3927 3928 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); 3929 } 3930 3931 static void 3932 msk_set_rambuffer(struct msk_if_softc *sc_if) 3933 { 3934 struct msk_softc *sc; 3935 int ltpp, utpp; 3936 3937 sc = sc_if->msk_softc; 3938 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) 3939 return; 3940 3941 /* Setup Rx Queue. */ 3942 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR); 3943 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START), 3944 sc->msk_rxqstart[sc_if->msk_port] / 8); 3945 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END), 3946 sc->msk_rxqend[sc_if->msk_port] / 8); 3947 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP), 3948 sc->msk_rxqstart[sc_if->msk_port] / 8); 3949 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP), 3950 sc->msk_rxqstart[sc_if->msk_port] / 8); 3951 3952 utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - 3953 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8; 3954 ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - 3955 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8; 3956 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE) 3957 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8; 3958 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp); 3959 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp); 3960 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */ 3961 3962 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD); 3963 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL)); 3964 3965 /* Setup Tx Queue. */ 3966 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR); 3967 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START), 3968 sc->msk_txqstart[sc_if->msk_port] / 8); 3969 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END), 3970 sc->msk_txqend[sc_if->msk_port] / 8); 3971 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP), 3972 sc->msk_txqstart[sc_if->msk_port] / 8); 3973 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP), 3974 sc->msk_txqstart[sc_if->msk_port] / 8); 3975 /* Enable Store & Forward for Tx side. */ 3976 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD); 3977 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD); 3978 CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL)); 3979 } 3980 3981 static void 3982 msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr, 3983 uint32_t count) 3984 { 3985 3986 /* Reset the prefetch unit. */ 3987 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3988 PREF_UNIT_RST_SET); 3989 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3990 PREF_UNIT_RST_CLR); 3991 /* Set LE base address. */ 3992 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG), 3993 MSK_ADDR_LO(addr)); 3994 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG), 3995 MSK_ADDR_HI(addr)); 3996 /* Set the list last index. */ 3997 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG), 3998 count); 3999 /* Turn on prefetch unit. */ 4000 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 4001 PREF_UNIT_OP_ON); 4002 /* Dummy read to ensure write. */ 4003 CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG)); 4004 } 4005 4006 static void 4007 msk_stop(struct msk_if_softc *sc_if) 4008 { 4009 struct msk_softc *sc; 4010 struct msk_txdesc *txd; 4011 struct msk_rxdesc *rxd; 4012 struct msk_rxdesc *jrxd; 4013 struct ifnet *ifp; 4014 uint32_t val; 4015 int i; 4016 4017 MSK_IF_LOCK_ASSERT(sc_if); 4018 sc = sc_if->msk_softc; 4019 ifp = sc_if->msk_ifp; 4020 4021 callout_stop(&sc_if->msk_tick_ch); 4022 sc_if->msk_watchdog_timer = 0; 4023 4024 /* Disable interrupts. */ 4025 if (sc_if->msk_port == MSK_PORT_A) { 4026 sc->msk_intrmask &= ~Y2_IS_PORT_A; 4027 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK; 4028 } else { 4029 sc->msk_intrmask &= ~Y2_IS_PORT_B; 4030 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK; 4031 } 4032 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); 4033 CSR_READ_4(sc, B0_HWE_IMSK); 4034 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 4035 CSR_READ_4(sc, B0_IMSK); 4036 4037 /* Disable Tx/Rx MAC. */ 4038 val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 4039 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 4040 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val); 4041 /* Read again to ensure writing. */ 4042 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 4043 /* Update stats and clear counters. */ 4044 msk_stats_update(sc_if); 4045 4046 /* Stop Tx BMU. */ 4047 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP); 4048 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); 4049 for (i = 0; i < MSK_TIMEOUT; i++) { 4050 if ((val & (BMU_STOP | BMU_IDLE)) == 0) { 4051 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 4052 BMU_STOP); 4053 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); 4054 } else 4055 break; 4056 DELAY(1); 4057 } 4058 if (i == MSK_TIMEOUT) 4059 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n"); 4060 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), 4061 RB_RST_SET | RB_DIS_OP_MD); 4062 4063 /* Disable all GMAC interrupt. */ 4064 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0); 4065 /* Disable PHY interrupt. */ 4066 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); 4067 4068 /* Disable the RAM Interface Arbiter. */ 4069 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB); 4070 4071 /* Reset the PCI FIFO of the async Tx queue */ 4072 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 4073 BMU_RST_SET | BMU_FIFO_RST); 4074 4075 /* Reset the Tx prefetch units. */ 4076 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG), 4077 PREF_UNIT_RST_SET); 4078 4079 /* Reset the RAM Buffer async Tx queue. */ 4080 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET); 4081 4082 /* Reset Tx MAC FIFO. */ 4083 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); 4084 /* Set Pause Off. */ 4085 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF); 4086 4087 /* 4088 * The Rx Stop command will not work for Yukon-2 if the BMU does not 4089 * reach the end of packet and since we can't make sure that we have 4090 * incoming data, we must reset the BMU while it is not during a DMA 4091 * transfer. Since it is possible that the Rx path is still active, 4092 * the Rx RAM buffer will be stopped first, so any possible incoming 4093 * data will not trigger a DMA. After the RAM buffer is stopped, the 4094 * BMU is polled until any DMA in progress is ended and only then it 4095 * will be reset. 4096 */ 4097 4098 /* Disable the RAM Buffer receive queue. */ 4099 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD); 4100 for (i = 0; i < MSK_TIMEOUT; i++) { 4101 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) == 4102 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL))) 4103 break; 4104 DELAY(1); 4105 } 4106 if (i == MSK_TIMEOUT) 4107 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n"); 4108 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), 4109 BMU_RST_SET | BMU_FIFO_RST); 4110 /* Reset the Rx prefetch unit. */ 4111 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG), 4112 PREF_UNIT_RST_SET); 4113 /* Reset the RAM Buffer receive queue. */ 4114 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET); 4115 /* Reset Rx MAC FIFO. */ 4116 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); 4117 4118 /* Free Rx and Tx mbufs still in the queues. */ 4119 for (i = 0; i < MSK_RX_RING_CNT; i++) { 4120 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 4121 if (rxd->rx_m != NULL) { 4122 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, 4123 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 4124 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, 4125 rxd->rx_dmamap); 4126 m_freem(rxd->rx_m); 4127 rxd->rx_m = NULL; 4128 } 4129 } 4130 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 4131 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 4132 if (jrxd->rx_m != NULL) { 4133 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, 4134 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 4135 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, 4136 jrxd->rx_dmamap); 4137 m_freem(jrxd->rx_m); 4138 jrxd->rx_m = NULL; 4139 } 4140 } 4141 for (i = 0; i < MSK_TX_RING_CNT; i++) { 4142 txd = &sc_if->msk_cdata.msk_txdesc[i]; 4143 if (txd->tx_m != NULL) { 4144 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, 4145 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 4146 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, 4147 txd->tx_dmamap); 4148 m_freem(txd->tx_m); 4149 txd->tx_m = NULL; 4150 } 4151 } 4152 4153 /* 4154 * Mark the interface down. 4155 */ 4156 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 4157 sc_if->msk_flags &= ~MSK_FLAG_LINK; 4158 } 4159 4160 /* 4161 * When GM_PAR_MIB_CLR bit of GM_PHY_ADDR is set, reading lower 4162 * counter clears high 16 bits of the counter such that accessing 4163 * lower 16 bits should be the last operation. 4164 */ 4165 #define MSK_READ_MIB32(x, y) \ 4166 (((uint32_t)GMAC_READ_2(sc, x, (y) + 4)) << 16) + \ 4167 (uint32_t)GMAC_READ_2(sc, x, y) 4168 #define MSK_READ_MIB64(x, y) \ 4169 (((uint64_t)MSK_READ_MIB32(x, (y) + 8)) << 32) + \ 4170 (uint64_t)MSK_READ_MIB32(x, y) 4171 4172 static void 4173 msk_stats_clear(struct msk_if_softc *sc_if) 4174 { 4175 struct msk_softc *sc; 4176 uint32_t reg; 4177 uint16_t gmac; 4178 int i; 4179 4180 MSK_IF_LOCK_ASSERT(sc_if); 4181 4182 sc = sc_if->msk_softc; 4183 /* Set MIB Clear Counter Mode. */ 4184 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR); 4185 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR); 4186 /* Read all MIB Counters with Clear Mode set. */ 4187 for (i = GM_RXF_UC_OK; i <= GM_TXE_FIFO_UR; i += sizeof(uint32_t)) 4188 reg = MSK_READ_MIB32(sc_if->msk_port, i); 4189 /* Clear MIB Clear Counter Mode. */ 4190 gmac &= ~GM_PAR_MIB_CLR; 4191 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac); 4192 } 4193 4194 static void 4195 msk_stats_update(struct msk_if_softc *sc_if) 4196 { 4197 struct msk_softc *sc; 4198 struct ifnet *ifp; 4199 struct msk_hw_stats *stats; 4200 uint16_t gmac; 4201 uint32_t reg; 4202 4203 MSK_IF_LOCK_ASSERT(sc_if); 4204 4205 ifp = sc_if->msk_ifp; 4206 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 4207 return; 4208 sc = sc_if->msk_softc; 4209 stats = &sc_if->msk_stats; 4210 /* Set MIB Clear Counter Mode. */ 4211 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR); 4212 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR); 4213 4214 /* Rx stats. */ 4215 stats->rx_ucast_frames += 4216 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_UC_OK); 4217 stats->rx_bcast_frames += 4218 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_BC_OK); 4219 stats->rx_pause_frames += 4220 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MPAUSE); 4221 stats->rx_mcast_frames += 4222 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MC_OK); 4223 stats->rx_crc_errs += 4224 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_FCS_ERR); 4225 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE1); 4226 stats->rx_good_octets += 4227 MSK_READ_MIB64(sc_if->msk_port, GM_RXO_OK_LO); 4228 stats->rx_bad_octets += 4229 MSK_READ_MIB64(sc_if->msk_port, GM_RXO_ERR_LO); 4230 stats->rx_runts += 4231 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SHT); 4232 stats->rx_runt_errs += 4233 MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FRAG); 4234 stats->rx_pkts_64 += 4235 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_64B); 4236 stats->rx_pkts_65_127 += 4237 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_127B); 4238 stats->rx_pkts_128_255 += 4239 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_255B); 4240 stats->rx_pkts_256_511 += 4241 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_511B); 4242 stats->rx_pkts_512_1023 += 4243 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1023B); 4244 stats->rx_pkts_1024_1518 += 4245 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_1518B); 4246 stats->rx_pkts_1519_max += 4247 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_MAX_SZ); 4248 stats->rx_pkts_too_long += 4249 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_LNG_ERR); 4250 stats->rx_pkts_jabbers += 4251 MSK_READ_MIB32(sc_if->msk_port, GM_RXF_JAB_PKT); 4252 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE2); 4253 stats->rx_fifo_oflows += 4254 MSK_READ_MIB32(sc_if->msk_port, GM_RXE_FIFO_OV); 4255 reg = MSK_READ_MIB32(sc_if->msk_port, GM_RXF_SPARE3); 4256 4257 /* Tx stats. */ 4258 stats->tx_ucast_frames += 4259 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_UC_OK); 4260 stats->tx_bcast_frames += 4261 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_BC_OK); 4262 stats->tx_pause_frames += 4263 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MPAUSE); 4264 stats->tx_mcast_frames += 4265 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MC_OK); 4266 stats->tx_octets += 4267 MSK_READ_MIB64(sc_if->msk_port, GM_TXO_OK_LO); 4268 stats->tx_pkts_64 += 4269 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_64B); 4270 stats->tx_pkts_65_127 += 4271 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_127B); 4272 stats->tx_pkts_128_255 += 4273 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_255B); 4274 stats->tx_pkts_256_511 += 4275 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_511B); 4276 stats->tx_pkts_512_1023 += 4277 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1023B); 4278 stats->tx_pkts_1024_1518 += 4279 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_1518B); 4280 stats->tx_pkts_1519_max += 4281 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MAX_SZ); 4282 reg = MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SPARE1); 4283 stats->tx_colls += 4284 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_COL); 4285 stats->tx_late_colls += 4286 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_LAT_COL); 4287 stats->tx_excess_colls += 4288 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_ABO_COL); 4289 stats->tx_multi_colls += 4290 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_MUL_COL); 4291 stats->tx_single_colls += 4292 MSK_READ_MIB32(sc_if->msk_port, GM_TXF_SNG_COL); 4293 stats->tx_underflows += 4294 MSK_READ_MIB32(sc_if->msk_port, GM_TXE_FIFO_UR); 4295 /* Clear MIB Clear Counter Mode. */ 4296 gmac &= ~GM_PAR_MIB_CLR; 4297 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac); 4298 } 4299 4300 static int 4301 msk_sysctl_stat32(SYSCTL_HANDLER_ARGS) 4302 { 4303 struct msk_softc *sc; 4304 struct msk_if_softc *sc_if; 4305 uint32_t result, *stat; 4306 int off; 4307 4308 sc_if = (struct msk_if_softc *)arg1; 4309 sc = sc_if->msk_softc; 4310 off = arg2; 4311 stat = (uint32_t *)((uint8_t *)&sc_if->msk_stats + off); 4312 4313 MSK_IF_LOCK(sc_if); 4314 result = MSK_READ_MIB32(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2); 4315 result += *stat; 4316 MSK_IF_UNLOCK(sc_if); 4317 4318 return (sysctl_handle_int(oidp, &result, 0, req)); 4319 } 4320 4321 static int 4322 msk_sysctl_stat64(SYSCTL_HANDLER_ARGS) 4323 { 4324 struct msk_softc *sc; 4325 struct msk_if_softc *sc_if; 4326 uint64_t result, *stat; 4327 int off; 4328 4329 sc_if = (struct msk_if_softc *)arg1; 4330 sc = sc_if->msk_softc; 4331 off = arg2; 4332 stat = (uint64_t *)((uint8_t *)&sc_if->msk_stats + off); 4333 4334 MSK_IF_LOCK(sc_if); 4335 result = MSK_READ_MIB64(sc_if->msk_port, GM_MIB_CNT_BASE + off * 2); 4336 result += *stat; 4337 MSK_IF_UNLOCK(sc_if); 4338 4339 return (sysctl_handle_quad(oidp, &result, 0, req)); 4340 } 4341 4342 #undef MSK_READ_MIB32 4343 #undef MSK_READ_MIB64 4344 4345 #define MSK_SYSCTL_STAT32(sc, c, o, p, n, d) \ 4346 SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD, \ 4347 sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat32, \ 4348 "IU", d) 4349 #define MSK_SYSCTL_STAT64(sc, c, o, p, n, d) \ 4350 SYSCTL_ADD_PROC(c, p, OID_AUTO, o, CTLTYPE_UINT | CTLFLAG_RD, \ 4351 sc, offsetof(struct msk_hw_stats, n), msk_sysctl_stat64, \ 4352 "Q", d) 4353 4354 static void 4355 msk_sysctl_node(struct msk_if_softc *sc_if) 4356 { 4357 struct sysctl_ctx_list *ctx; 4358 struct sysctl_oid_list *child, *schild; 4359 struct sysctl_oid *tree; 4360 4361 ctx = device_get_sysctl_ctx(sc_if->msk_if_dev); 4362 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc_if->msk_if_dev)); 4363 4364 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 4365 NULL, "MSK Statistics"); 4366 schild = child = SYSCTL_CHILDREN(tree); 4367 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD, 4368 NULL, "MSK RX Statistics"); 4369 child = SYSCTL_CHILDREN(tree); 4370 MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames", 4371 child, rx_ucast_frames, "Good unicast frames"); 4372 MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames", 4373 child, rx_bcast_frames, "Good broadcast frames"); 4374 MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames", 4375 child, rx_pause_frames, "Pause frames"); 4376 MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames", 4377 child, rx_mcast_frames, "Multicast frames"); 4378 MSK_SYSCTL_STAT32(sc_if, ctx, "crc_errs", 4379 child, rx_crc_errs, "CRC errors"); 4380 MSK_SYSCTL_STAT64(sc_if, ctx, "good_octets", 4381 child, rx_good_octets, "Good octets"); 4382 MSK_SYSCTL_STAT64(sc_if, ctx, "bad_octets", 4383 child, rx_bad_octets, "Bad octets"); 4384 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64", 4385 child, rx_pkts_64, "64 bytes frames"); 4386 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127", 4387 child, rx_pkts_65_127, "65 to 127 bytes frames"); 4388 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255", 4389 child, rx_pkts_128_255, "128 to 255 bytes frames"); 4390 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511", 4391 child, rx_pkts_256_511, "256 to 511 bytes frames"); 4392 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023", 4393 child, rx_pkts_512_1023, "512 to 1023 bytes frames"); 4394 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518", 4395 child, rx_pkts_1024_1518, "1024 to 1518 bytes frames"); 4396 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max", 4397 child, rx_pkts_1519_max, "1519 to max frames"); 4398 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_too_long", 4399 child, rx_pkts_too_long, "frames too long"); 4400 MSK_SYSCTL_STAT32(sc_if, ctx, "jabbers", 4401 child, rx_pkts_jabbers, "Jabber errors"); 4402 MSK_SYSCTL_STAT32(sc_if, ctx, "overflows", 4403 child, rx_fifo_oflows, "FIFO overflows"); 4404 4405 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD, 4406 NULL, "MSK TX Statistics"); 4407 child = SYSCTL_CHILDREN(tree); 4408 MSK_SYSCTL_STAT32(sc_if, ctx, "ucast_frames", 4409 child, tx_ucast_frames, "Unicast frames"); 4410 MSK_SYSCTL_STAT32(sc_if, ctx, "bcast_frames", 4411 child, tx_bcast_frames, "Broadcast frames"); 4412 MSK_SYSCTL_STAT32(sc_if, ctx, "pause_frames", 4413 child, tx_pause_frames, "Pause frames"); 4414 MSK_SYSCTL_STAT32(sc_if, ctx, "mcast_frames", 4415 child, tx_mcast_frames, "Multicast frames"); 4416 MSK_SYSCTL_STAT64(sc_if, ctx, "octets", 4417 child, tx_octets, "Octets"); 4418 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_64", 4419 child, tx_pkts_64, "64 bytes frames"); 4420 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_65_127", 4421 child, tx_pkts_65_127, "65 to 127 bytes frames"); 4422 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_128_255", 4423 child, tx_pkts_128_255, "128 to 255 bytes frames"); 4424 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_256_511", 4425 child, tx_pkts_256_511, "256 to 511 bytes frames"); 4426 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_512_1023", 4427 child, tx_pkts_512_1023, "512 to 1023 bytes frames"); 4428 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1024_1518", 4429 child, tx_pkts_1024_1518, "1024 to 1518 bytes frames"); 4430 MSK_SYSCTL_STAT32(sc_if, ctx, "frames_1519_max", 4431 child, tx_pkts_1519_max, "1519 to max frames"); 4432 MSK_SYSCTL_STAT32(sc_if, ctx, "colls", 4433 child, tx_colls, "Collisions"); 4434 MSK_SYSCTL_STAT32(sc_if, ctx, "late_colls", 4435 child, tx_late_colls, "Late collisions"); 4436 MSK_SYSCTL_STAT32(sc_if, ctx, "excess_colls", 4437 child, tx_excess_colls, "Excessive collisions"); 4438 MSK_SYSCTL_STAT32(sc_if, ctx, "multi_colls", 4439 child, tx_multi_colls, "Multiple collisions"); 4440 MSK_SYSCTL_STAT32(sc_if, ctx, "single_colls", 4441 child, tx_single_colls, "Single collisions"); 4442 MSK_SYSCTL_STAT32(sc_if, ctx, "underflows", 4443 child, tx_underflows, "FIFO underflows"); 4444 } 4445 4446 #undef MSK_SYSCTL_STAT32 4447 #undef MSK_SYSCTL_STAT64 4448 4449 static int 4450 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 4451 { 4452 int error, value; 4453 4454 if (!arg1) 4455 return (EINVAL); 4456 value = *(int *)arg1; 4457 error = sysctl_handle_int(oidp, &value, 0, req); 4458 if (error || !req->newptr) 4459 return (error); 4460 if (value < low || value > high) 4461 return (EINVAL); 4462 *(int *)arg1 = value; 4463 4464 return (0); 4465 } 4466 4467 static int 4468 sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS) 4469 { 4470 4471 return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN, 4472 MSK_PROC_MAX)); 4473 } 4474