1 /****************************************************************************** 2 * 3 * Name : sky2.c 4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x 5 * Version: $Revision: 1.23 $ 6 * Date : $Date: 2005/12/22 09:04:11 $ 7 * Purpose: Main driver source file 8 * 9 *****************************************************************************/ 10 11 /****************************************************************************** 12 * 13 * LICENSE: 14 * Copyright (C) Marvell International Ltd. and/or its affiliates 15 * 16 * The computer program files contained in this folder ("Files") 17 * are provided to you under the BSD-type license terms provided 18 * below, and any use of such Files and any derivative works 19 * thereof created by you shall be governed by the following terms 20 * and conditions: 21 * 22 * - Redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer. 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials provided 27 * with the distribution. 28 * - Neither the name of Marvell nor the names of its contributors 29 * may be used to endorse or promote products derived from this 30 * software without specific prior written permission. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 43 * OF THE POSSIBILITY OF SUCH DAMAGE. 44 * /LICENSE 45 * 46 *****************************************************************************/ 47 48 /*- 49 * Copyright (c) 1997, 1998, 1999, 2000 50 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 51 * 52 * Redistribution and use in source and binary forms, with or without 53 * modification, are permitted provided that the following conditions 54 * are met: 55 * 1. Redistributions of source code must retain the above copyright 56 * notice, this list of conditions and the following disclaimer. 57 * 2. Redistributions in binary form must reproduce the above copyright 58 * notice, this list of conditions and the following disclaimer in the 59 * documentation and/or other materials provided with the distribution. 60 * 3. All advertising materials mentioning features or use of this software 61 * must display the following acknowledgement: 62 * This product includes software developed by Bill Paul. 63 * 4. Neither the name of the author nor the names of any co-contributors 64 * may be used to endorse or promote products derived from this software 65 * without specific prior written permission. 66 * 67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 70 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 77 * THE POSSIBILITY OF SUCH DAMAGE. 78 */ 79 /*- 80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 81 * 82 * Permission to use, copy, modify, and distribute this software for any 83 * purpose with or without fee is hereby granted, provided that the above 84 * copyright notice and this permission notice appear in all copies. 85 * 86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 93 */ 94 95 /* 96 * Device driver for the Marvell Yukon II Ethernet controller. 97 * Due to lack of documentation, this driver is based on the code from 98 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x. 99 */ 100 101 #include <sys/cdefs.h> 102 __FBSDID("$FreeBSD$"); 103 104 #include <sys/param.h> 105 #include <sys/systm.h> 106 #include <sys/bus.h> 107 #include <sys/endian.h> 108 #include <sys/mbuf.h> 109 #include <sys/malloc.h> 110 #include <sys/kernel.h> 111 #include <sys/module.h> 112 #include <sys/socket.h> 113 #include <sys/sockio.h> 114 #include <sys/queue.h> 115 #include <sys/sysctl.h> 116 #include <sys/taskqueue.h> 117 118 #include <net/bpf.h> 119 #include <net/ethernet.h> 120 #include <net/if.h> 121 #include <net/if_arp.h> 122 #include <net/if_dl.h> 123 #include <net/if_media.h> 124 #include <net/if_types.h> 125 #include <net/if_vlan_var.h> 126 127 #include <netinet/in.h> 128 #include <netinet/in_systm.h> 129 #include <netinet/ip.h> 130 #include <netinet/tcp.h> 131 #include <netinet/udp.h> 132 133 #include <machine/bus.h> 134 #include <machine/in_cksum.h> 135 #include <machine/resource.h> 136 #include <sys/rman.h> 137 138 #include <dev/mii/mii.h> 139 #include <dev/mii/miivar.h> 140 #include <dev/mii/brgphyreg.h> 141 142 #include <dev/pci/pcireg.h> 143 #include <dev/pci/pcivar.h> 144 145 #include <dev/msk/if_mskreg.h> 146 147 MODULE_DEPEND(msk, pci, 1, 1, 1); 148 MODULE_DEPEND(msk, ether, 1, 1, 1); 149 MODULE_DEPEND(msk, miibus, 1, 1, 1); 150 151 /* "device miibus" required. See GENERIC if you get errors here. */ 152 #include "miibus_if.h" 153 154 /* Tunables. */ 155 static int msi_disable = 0; 156 TUNABLE_INT("hw.msk.msi_disable", &msi_disable); 157 static int legacy_intr = 0; 158 TUNABLE_INT("hw.msk.legacy_intr", &legacy_intr); 159 static int jumbo_disable = 0; 160 TUNABLE_INT("hw.msk.jumbo_disable", &jumbo_disable); 161 162 #define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 163 164 /* 165 * Devices supported by this driver. 166 */ 167 static struct msk_product { 168 uint16_t msk_vendorid; 169 uint16_t msk_deviceid; 170 const char *msk_name; 171 } msk_products[] = { 172 { VENDORID_SK, DEVICEID_SK_YUKON2, 173 "SK-9Sxx Gigabit Ethernet" }, 174 { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR, 175 "SK-9Exx Gigabit Ethernet"}, 176 { VENDORID_MARVELL, DEVICEID_MRVL_8021CU, 177 "Marvell Yukon 88E8021CU Gigabit Ethernet" }, 178 { VENDORID_MARVELL, DEVICEID_MRVL_8021X, 179 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" }, 180 { VENDORID_MARVELL, DEVICEID_MRVL_8022CU, 181 "Marvell Yukon 88E8022CU Gigabit Ethernet" }, 182 { VENDORID_MARVELL, DEVICEID_MRVL_8022X, 183 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" }, 184 { VENDORID_MARVELL, DEVICEID_MRVL_8061CU, 185 "Marvell Yukon 88E8061CU Gigabit Ethernet" }, 186 { VENDORID_MARVELL, DEVICEID_MRVL_8061X, 187 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" }, 188 { VENDORID_MARVELL, DEVICEID_MRVL_8062CU, 189 "Marvell Yukon 88E8062CU Gigabit Ethernet" }, 190 { VENDORID_MARVELL, DEVICEID_MRVL_8062X, 191 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" }, 192 { VENDORID_MARVELL, DEVICEID_MRVL_8035, 193 "Marvell Yukon 88E8035 Gigabit Ethernet" }, 194 { VENDORID_MARVELL, DEVICEID_MRVL_8036, 195 "Marvell Yukon 88E8036 Gigabit Ethernet" }, 196 { VENDORID_MARVELL, DEVICEID_MRVL_8038, 197 "Marvell Yukon 88E8038 Gigabit Ethernet" }, 198 { VENDORID_MARVELL, DEVICEID_MRVL_8039, 199 "Marvell Yukon 88E8039 Gigabit Ethernet" }, 200 { VENDORID_MARVELL, DEVICEID_MRVL_4361, 201 "Marvell Yukon 88E8050 Gigabit Ethernet" }, 202 { VENDORID_MARVELL, DEVICEID_MRVL_4360, 203 "Marvell Yukon 88E8052 Gigabit Ethernet" }, 204 { VENDORID_MARVELL, DEVICEID_MRVL_4362, 205 "Marvell Yukon 88E8053 Gigabit Ethernet" }, 206 { VENDORID_MARVELL, DEVICEID_MRVL_4363, 207 "Marvell Yukon 88E8055 Gigabit Ethernet" }, 208 { VENDORID_MARVELL, DEVICEID_MRVL_4364, 209 "Marvell Yukon 88E8056 Gigabit Ethernet" }, 210 { VENDORID_MARVELL, DEVICEID_MRVL_436A, 211 "Marvell Yukon 88E8058 Gigabit Ethernet" }, 212 { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX, 213 "D-Link 550SX Gigabit Ethernet" }, 214 { VENDORID_DLINK, DEVICEID_DLINK_DGE560T, 215 "D-Link 560T Gigabit Ethernet" } 216 }; 217 218 static const char *model_name[] = { 219 "Yukon XL", 220 "Yukon EC Ultra", 221 "Yukon Unknown", 222 "Yukon EC", 223 "Yukon FE" 224 }; 225 226 static int mskc_probe(device_t); 227 static int mskc_attach(device_t); 228 static int mskc_detach(device_t); 229 static int mskc_shutdown(device_t); 230 static int mskc_setup_rambuffer(struct msk_softc *); 231 static int mskc_suspend(device_t); 232 static int mskc_resume(device_t); 233 static void mskc_reset(struct msk_softc *); 234 235 static int msk_probe(device_t); 236 static int msk_attach(device_t); 237 static int msk_detach(device_t); 238 239 static void msk_tick(void *); 240 static void msk_legacy_intr(void *); 241 static int msk_intr(void *); 242 static void msk_int_task(void *, int); 243 static void msk_intr_phy(struct msk_if_softc *); 244 static void msk_intr_gmac(struct msk_if_softc *); 245 static __inline void msk_rxput(struct msk_if_softc *); 246 static int msk_handle_events(struct msk_softc *); 247 static void msk_handle_hwerr(struct msk_if_softc *, uint32_t); 248 static void msk_intr_hwerr(struct msk_softc *); 249 #ifndef __NO_STRICT_ALIGNMENT 250 static __inline void msk_fixup_rx(struct mbuf *); 251 #endif 252 static void msk_rxeof(struct msk_if_softc *, uint32_t, int); 253 static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, int); 254 static void msk_txeof(struct msk_if_softc *, int); 255 static int msk_encap(struct msk_if_softc *, struct mbuf **); 256 static void msk_tx_task(void *, int); 257 static void msk_start(struct ifnet *); 258 static int msk_ioctl(struct ifnet *, u_long, caddr_t); 259 static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t); 260 static void msk_set_rambuffer(struct msk_if_softc *); 261 static void msk_init(void *); 262 static void msk_init_locked(struct msk_if_softc *); 263 static void msk_stop(struct msk_if_softc *); 264 static void msk_watchdog(struct msk_if_softc *); 265 static int msk_mediachange(struct ifnet *); 266 static void msk_mediastatus(struct ifnet *, struct ifmediareq *); 267 static void msk_phy_power(struct msk_softc *, int); 268 static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int); 269 static int msk_status_dma_alloc(struct msk_softc *); 270 static void msk_status_dma_free(struct msk_softc *); 271 static int msk_txrx_dma_alloc(struct msk_if_softc *); 272 static int msk_rx_dma_jalloc(struct msk_if_softc *); 273 static void msk_txrx_dma_free(struct msk_if_softc *); 274 static void msk_rx_dma_jfree(struct msk_if_softc *); 275 static int msk_init_rx_ring(struct msk_if_softc *); 276 static int msk_init_jumbo_rx_ring(struct msk_if_softc *); 277 static void msk_init_tx_ring(struct msk_if_softc *); 278 static __inline void msk_discard_rxbuf(struct msk_if_softc *, int); 279 static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int); 280 static int msk_newbuf(struct msk_if_softc *, int); 281 static int msk_jumbo_newbuf(struct msk_if_softc *, int); 282 283 static int msk_phy_readreg(struct msk_if_softc *, int, int); 284 static int msk_phy_writereg(struct msk_if_softc *, int, int, int); 285 static int msk_miibus_readreg(device_t, int, int); 286 static int msk_miibus_writereg(device_t, int, int, int); 287 static void msk_miibus_statchg(device_t); 288 static void msk_link_task(void *, int); 289 290 static void msk_setmulti(struct msk_if_softc *); 291 static void msk_setvlan(struct msk_if_softc *, struct ifnet *); 292 static void msk_setpromisc(struct msk_if_softc *); 293 294 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 295 static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS); 296 297 static device_method_t mskc_methods[] = { 298 /* Device interface */ 299 DEVMETHOD(device_probe, mskc_probe), 300 DEVMETHOD(device_attach, mskc_attach), 301 DEVMETHOD(device_detach, mskc_detach), 302 DEVMETHOD(device_suspend, mskc_suspend), 303 DEVMETHOD(device_resume, mskc_resume), 304 DEVMETHOD(device_shutdown, mskc_shutdown), 305 306 /* bus interface */ 307 DEVMETHOD(bus_print_child, bus_generic_print_child), 308 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 309 310 { NULL, NULL } 311 }; 312 313 static driver_t mskc_driver = { 314 "mskc", 315 mskc_methods, 316 sizeof(struct msk_softc) 317 }; 318 319 static devclass_t mskc_devclass; 320 321 static device_method_t msk_methods[] = { 322 /* Device interface */ 323 DEVMETHOD(device_probe, msk_probe), 324 DEVMETHOD(device_attach, msk_attach), 325 DEVMETHOD(device_detach, msk_detach), 326 DEVMETHOD(device_shutdown, bus_generic_shutdown), 327 328 /* bus interface */ 329 DEVMETHOD(bus_print_child, bus_generic_print_child), 330 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 331 332 /* MII interface */ 333 DEVMETHOD(miibus_readreg, msk_miibus_readreg), 334 DEVMETHOD(miibus_writereg, msk_miibus_writereg), 335 DEVMETHOD(miibus_statchg, msk_miibus_statchg), 336 337 { NULL, NULL } 338 }; 339 340 static driver_t msk_driver = { 341 "msk", 342 msk_methods, 343 sizeof(struct msk_if_softc) 344 }; 345 346 static devclass_t msk_devclass; 347 348 DRIVER_MODULE(mskc, pci, mskc_driver, mskc_devclass, 0, 0); 349 DRIVER_MODULE(msk, mskc, msk_driver, msk_devclass, 0, 0); 350 DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, 0, 0); 351 352 static struct resource_spec msk_res_spec_io[] = { 353 { SYS_RES_IOPORT, PCIR_BAR(1), RF_ACTIVE }, 354 { -1, 0, 0 } 355 }; 356 357 static struct resource_spec msk_res_spec_mem[] = { 358 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, 359 { -1, 0, 0 } 360 }; 361 362 static struct resource_spec msk_irq_spec_legacy[] = { 363 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 364 { -1, 0, 0 } 365 }; 366 367 static struct resource_spec msk_irq_spec_msi[] = { 368 { SYS_RES_IRQ, 1, RF_ACTIVE }, 369 { -1, 0, 0 } 370 }; 371 372 static struct resource_spec msk_irq_spec_msi2[] = { 373 { SYS_RES_IRQ, 1, RF_ACTIVE }, 374 { SYS_RES_IRQ, 2, RF_ACTIVE }, 375 { -1, 0, 0 } 376 }; 377 378 static int 379 msk_miibus_readreg(device_t dev, int phy, int reg) 380 { 381 struct msk_if_softc *sc_if; 382 383 if (phy != PHY_ADDR_MARV) 384 return (0); 385 386 sc_if = device_get_softc(dev); 387 388 return (msk_phy_readreg(sc_if, phy, reg)); 389 } 390 391 static int 392 msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg) 393 { 394 struct msk_softc *sc; 395 int i, val; 396 397 sc = sc_if->msk_softc; 398 399 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL, 400 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); 401 402 for (i = 0; i < MSK_TIMEOUT; i++) { 403 DELAY(1); 404 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL); 405 if ((val & GM_SMI_CT_RD_VAL) != 0) { 406 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA); 407 break; 408 } 409 } 410 411 if (i == MSK_TIMEOUT) { 412 if_printf(sc_if->msk_ifp, "phy failed to come ready\n"); 413 val = 0; 414 } 415 416 return (val); 417 } 418 419 static int 420 msk_miibus_writereg(device_t dev, int phy, int reg, int val) 421 { 422 struct msk_if_softc *sc_if; 423 424 if (phy != PHY_ADDR_MARV) 425 return (0); 426 427 sc_if = device_get_softc(dev); 428 429 return (msk_phy_writereg(sc_if, phy, reg, val)); 430 } 431 432 static int 433 msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val) 434 { 435 struct msk_softc *sc; 436 int i; 437 438 sc = sc_if->msk_softc; 439 440 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val); 441 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL, 442 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg)); 443 for (i = 0; i < MSK_TIMEOUT; i++) { 444 DELAY(1); 445 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) & 446 GM_SMI_CT_BUSY) == 0) 447 break; 448 } 449 if (i == MSK_TIMEOUT) 450 if_printf(sc_if->msk_ifp, "phy write timeout\n"); 451 452 return (0); 453 } 454 455 static void 456 msk_miibus_statchg(device_t dev) 457 { 458 struct msk_if_softc *sc_if; 459 460 sc_if = device_get_softc(dev); 461 taskqueue_enqueue(taskqueue_swi, &sc_if->msk_link_task); 462 } 463 464 static void 465 msk_link_task(void *arg, int pending) 466 { 467 struct msk_softc *sc; 468 struct msk_if_softc *sc_if; 469 struct mii_data *mii; 470 struct ifnet *ifp; 471 uint32_t gmac; 472 473 sc_if = (struct msk_if_softc *)arg; 474 sc = sc_if->msk_softc; 475 476 MSK_IF_LOCK(sc_if); 477 478 mii = device_get_softc(sc_if->msk_miibus); 479 ifp = sc_if->msk_ifp; 480 if (mii == NULL || ifp == NULL) { 481 MSK_IF_UNLOCK(sc_if); 482 return; 483 } 484 485 if (mii->mii_media_status & IFM_ACTIVE) { 486 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 487 sc_if->msk_link = 1; 488 } else 489 sc_if->msk_link = 0; 490 491 if (sc_if->msk_link != 0) { 492 /* Enable Tx FIFO Underrun. */ 493 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 494 GM_IS_TX_FF_UR | GM_IS_RX_FF_OR); 495 /* 496 * Because mii(4) notify msk(4) that it detected link status 497 * change, there is no need to enable automatic 498 * speed/flow-control/duplex updates. 499 */ 500 gmac = GM_GPCR_AU_ALL_DIS; 501 switch (IFM_SUBTYPE(mii->mii_media_active)) { 502 case IFM_1000_SX: 503 case IFM_1000_T: 504 gmac |= GM_GPCR_SPEED_1000; 505 break; 506 case IFM_100_TX: 507 gmac |= GM_GPCR_SPEED_100; 508 break; 509 case IFM_10_T: 510 break; 511 } 512 513 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0) 514 gmac |= GM_GPCR_DUP_FULL; 515 /* Disable Rx flow control. */ 516 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) == 0) 517 gmac |= GM_GPCR_FC_RX_DIS; 518 /* Disable Tx flow control. */ 519 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) == 0) 520 gmac |= GM_GPCR_FC_TX_DIS; 521 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; 522 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 523 /* Read again to ensure writing. */ 524 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 525 526 gmac = GMC_PAUSE_ON; 527 if (((mii->mii_media_active & IFM_GMASK) & 528 (IFM_FLAG0 | IFM_FLAG1)) == 0) 529 gmac = GMC_PAUSE_OFF; 530 /* Diable pause for 10/100 Mbps in half-duplex mode. */ 531 if ((((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) && 532 (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX || 533 IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T)) 534 gmac = GMC_PAUSE_OFF; 535 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac); 536 537 /* Enable PHY interrupt for FIFO underrun/overflow. */ 538 msk_phy_writereg(sc_if, PHY_ADDR_MARV, 539 PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR); 540 } else { 541 /* 542 * Link state changed to down. 543 * Disable PHY interrupts. 544 */ 545 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); 546 /* Disable Rx/Tx MAC. */ 547 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 548 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 549 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 550 /* Read again to ensure writing. */ 551 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 552 } 553 554 MSK_IF_UNLOCK(sc_if); 555 } 556 557 static void 558 msk_setmulti(struct msk_if_softc *sc_if) 559 { 560 struct msk_softc *sc; 561 struct ifnet *ifp; 562 struct ifmultiaddr *ifma; 563 uint32_t mchash[2]; 564 uint32_t crc; 565 uint16_t mode; 566 567 sc = sc_if->msk_softc; 568 569 MSK_IF_LOCK_ASSERT(sc_if); 570 571 ifp = sc_if->msk_ifp; 572 573 bzero(mchash, sizeof(mchash)); 574 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL); 575 mode |= GM_RXCR_UCF_ENA; 576 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 577 if ((ifp->if_flags & IFF_PROMISC) != 0) 578 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 579 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 580 mchash[0] = 0xffff; 581 mchash[1] = 0xffff; 582 } 583 } else { 584 IF_ADDR_LOCK(ifp); 585 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 586 if (ifma->ifma_addr->sa_family != AF_LINK) 587 continue; 588 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 589 ifma->ifma_addr), ETHER_ADDR_LEN); 590 /* Just want the 6 least significant bits. */ 591 crc &= 0x3f; 592 /* Set the corresponding bit in the hash table. */ 593 mchash[crc >> 5] |= 1 << (crc & 0x1f); 594 } 595 IF_ADDR_UNLOCK(ifp); 596 mode |= GM_RXCR_MCF_ENA; 597 } 598 599 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1, 600 mchash[0] & 0xffff); 601 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2, 602 (mchash[0] >> 16) & 0xffff); 603 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3, 604 mchash[1] & 0xffff); 605 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4, 606 (mchash[1] >> 16) & 0xffff); 607 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode); 608 } 609 610 static void 611 msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp) 612 { 613 struct msk_softc *sc; 614 615 sc = sc_if->msk_softc; 616 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 617 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 618 RX_VLAN_STRIP_ON); 619 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 620 TX_VLAN_TAG_ON); 621 } else { 622 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 623 RX_VLAN_STRIP_OFF); 624 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 625 TX_VLAN_TAG_OFF); 626 } 627 } 628 629 static void 630 msk_setpromisc(struct msk_if_softc *sc_if) 631 { 632 struct msk_softc *sc; 633 struct ifnet *ifp; 634 uint16_t mode; 635 636 MSK_IF_LOCK_ASSERT(sc_if); 637 638 sc = sc_if->msk_softc; 639 ifp = sc_if->msk_ifp; 640 641 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL); 642 if (ifp->if_flags & IFF_PROMISC) 643 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 644 else 645 mode |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 646 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode); 647 } 648 649 static int 650 msk_init_rx_ring(struct msk_if_softc *sc_if) 651 { 652 struct msk_ring_data *rd; 653 struct msk_rxdesc *rxd; 654 int i, prod; 655 656 MSK_IF_LOCK_ASSERT(sc_if); 657 658 sc_if->msk_cdata.msk_rx_cons = 0; 659 sc_if->msk_cdata.msk_rx_prod = 0; 660 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; 661 662 rd = &sc_if->msk_rdata; 663 bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT); 664 prod = sc_if->msk_cdata.msk_rx_prod; 665 for (i = 0; i < MSK_RX_RING_CNT; i++) { 666 rxd = &sc_if->msk_cdata.msk_rxdesc[prod]; 667 rxd->rx_m = NULL; 668 rxd->rx_le = &rd->msk_rx_ring[prod]; 669 if (msk_newbuf(sc_if, prod) != 0) 670 return (ENOBUFS); 671 MSK_INC(prod, MSK_RX_RING_CNT); 672 } 673 674 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag, 675 sc_if->msk_cdata.msk_rx_ring_map, 676 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 677 678 /* Update prefetch unit. */ 679 sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1; 680 CSR_WRITE_2(sc_if->msk_softc, 681 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 682 sc_if->msk_cdata.msk_rx_prod); 683 684 return (0); 685 } 686 687 static int 688 msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if) 689 { 690 struct msk_ring_data *rd; 691 struct msk_rxdesc *rxd; 692 int i, prod; 693 694 MSK_IF_LOCK_ASSERT(sc_if); 695 696 sc_if->msk_cdata.msk_rx_cons = 0; 697 sc_if->msk_cdata.msk_rx_prod = 0; 698 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; 699 700 rd = &sc_if->msk_rdata; 701 bzero(rd->msk_jumbo_rx_ring, 702 sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT); 703 prod = sc_if->msk_cdata.msk_rx_prod; 704 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 705 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod]; 706 rxd->rx_m = NULL; 707 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod]; 708 if (msk_jumbo_newbuf(sc_if, prod) != 0) 709 return (ENOBUFS); 710 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT); 711 } 712 713 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 714 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 715 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 716 717 sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1; 718 CSR_WRITE_2(sc_if->msk_softc, 719 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 720 sc_if->msk_cdata.msk_rx_prod); 721 722 return (0); 723 } 724 725 static void 726 msk_init_tx_ring(struct msk_if_softc *sc_if) 727 { 728 struct msk_ring_data *rd; 729 struct msk_txdesc *txd; 730 int i; 731 732 sc_if->msk_cdata.msk_tso_mtu = 0; 733 sc_if->msk_cdata.msk_tx_prod = 0; 734 sc_if->msk_cdata.msk_tx_cons = 0; 735 sc_if->msk_cdata.msk_tx_cnt = 0; 736 737 rd = &sc_if->msk_rdata; 738 bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT); 739 for (i = 0; i < MSK_TX_RING_CNT; i++) { 740 txd = &sc_if->msk_cdata.msk_txdesc[i]; 741 txd->tx_m = NULL; 742 txd->tx_le = &rd->msk_tx_ring[i]; 743 } 744 745 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 746 sc_if->msk_cdata.msk_tx_ring_map, 747 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 748 } 749 750 static __inline void 751 msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx) 752 { 753 struct msk_rx_desc *rx_le; 754 struct msk_rxdesc *rxd; 755 struct mbuf *m; 756 757 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 758 m = rxd->rx_m; 759 rx_le = rxd->rx_le; 760 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); 761 } 762 763 static __inline void 764 msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx) 765 { 766 struct msk_rx_desc *rx_le; 767 struct msk_rxdesc *rxd; 768 struct mbuf *m; 769 770 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 771 m = rxd->rx_m; 772 rx_le = rxd->rx_le; 773 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); 774 } 775 776 static int 777 msk_newbuf(struct msk_if_softc *sc_if, int idx) 778 { 779 struct msk_rx_desc *rx_le; 780 struct msk_rxdesc *rxd; 781 struct mbuf *m; 782 bus_dma_segment_t segs[1]; 783 bus_dmamap_t map; 784 int nsegs; 785 786 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 787 if (m == NULL) 788 return (ENOBUFS); 789 790 m->m_len = m->m_pkthdr.len = MCLBYTES; 791 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) 792 m_adj(m, ETHER_ALIGN); 793 #ifndef __NO_STRICT_ALIGNMENT 794 else 795 m_adj(m, MSK_RX_BUF_ALIGN); 796 #endif 797 798 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag, 799 sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs, 800 BUS_DMA_NOWAIT) != 0) { 801 m_freem(m); 802 return (ENOBUFS); 803 } 804 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 805 806 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 807 if (rxd->rx_m != NULL) { 808 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap, 809 BUS_DMASYNC_POSTREAD); 810 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap); 811 } 812 map = rxd->rx_dmamap; 813 rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap; 814 sc_if->msk_cdata.msk_rx_sparemap = map; 815 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap, 816 BUS_DMASYNC_PREREAD); 817 rxd->rx_m = m; 818 rx_le = rxd->rx_le; 819 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr)); 820 rx_le->msk_control = 821 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER); 822 823 return (0); 824 } 825 826 static int 827 msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx) 828 { 829 struct msk_rx_desc *rx_le; 830 struct msk_rxdesc *rxd; 831 struct mbuf *m; 832 bus_dma_segment_t segs[1]; 833 bus_dmamap_t map; 834 int nsegs; 835 836 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); 837 if (m == NULL) 838 return (ENOBUFS); 839 if ((m->m_flags & M_EXT) == 0) { 840 m_freem(m); 841 return (ENOBUFS); 842 } 843 m->m_len = m->m_pkthdr.len = MJUM9BYTES; 844 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) 845 m_adj(m, ETHER_ALIGN); 846 #ifndef __NO_STRICT_ALIGNMENT 847 else 848 m_adj(m, MSK_RX_BUF_ALIGN); 849 #endif 850 851 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag, 852 sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs, 853 BUS_DMA_NOWAIT) != 0) { 854 m_freem(m); 855 return (ENOBUFS); 856 } 857 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 858 859 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 860 if (rxd->rx_m != NULL) { 861 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, 862 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 863 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, 864 rxd->rx_dmamap); 865 } 866 map = rxd->rx_dmamap; 867 rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap; 868 sc_if->msk_cdata.msk_jumbo_rx_sparemap = map; 869 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap, 870 BUS_DMASYNC_PREREAD); 871 rxd->rx_m = m; 872 rx_le = rxd->rx_le; 873 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr)); 874 rx_le->msk_control = 875 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER); 876 877 return (0); 878 } 879 880 /* 881 * Set media options. 882 */ 883 static int 884 msk_mediachange(struct ifnet *ifp) 885 { 886 struct msk_if_softc *sc_if; 887 struct mii_data *mii; 888 889 sc_if = ifp->if_softc; 890 891 MSK_IF_LOCK(sc_if); 892 mii = device_get_softc(sc_if->msk_miibus); 893 mii_mediachg(mii); 894 MSK_IF_UNLOCK(sc_if); 895 896 return (0); 897 } 898 899 /* 900 * Report current media status. 901 */ 902 static void 903 msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 904 { 905 struct msk_if_softc *sc_if; 906 struct mii_data *mii; 907 908 sc_if = ifp->if_softc; 909 MSK_IF_LOCK(sc_if); 910 mii = device_get_softc(sc_if->msk_miibus); 911 912 mii_pollstat(mii); 913 MSK_IF_UNLOCK(sc_if); 914 ifmr->ifm_active = mii->mii_media_active; 915 ifmr->ifm_status = mii->mii_media_status; 916 } 917 918 static int 919 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 920 { 921 struct msk_if_softc *sc_if; 922 struct ifreq *ifr; 923 struct mii_data *mii; 924 int error, mask; 925 926 sc_if = ifp->if_softc; 927 ifr = (struct ifreq *)data; 928 error = 0; 929 930 switch(command) { 931 case SIOCSIFMTU: 932 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN) 933 error = EINVAL; 934 else if (ifp->if_mtu != ifr->ifr_mtu) { 935 if ((sc_if->msk_flags & MSK_FLAG_NOJUMBO) != 0 && 936 ifr->ifr_mtu > ETHERMTU) 937 error = EINVAL; 938 else { 939 MSK_IF_LOCK(sc_if); 940 ifp->if_mtu = ifr->ifr_mtu; 941 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 942 msk_init_locked(sc_if); 943 MSK_IF_UNLOCK(sc_if); 944 } 945 } 946 break; 947 case SIOCSIFFLAGS: 948 MSK_IF_LOCK(sc_if); 949 if ((ifp->if_flags & IFF_UP) != 0) { 950 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 951 if (((ifp->if_flags ^ sc_if->msk_if_flags) 952 & IFF_PROMISC) != 0) { 953 msk_setpromisc(sc_if); 954 msk_setmulti(sc_if); 955 } 956 } else { 957 if (sc_if->msk_detach == 0) 958 msk_init_locked(sc_if); 959 } 960 } else { 961 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 962 msk_stop(sc_if); 963 } 964 sc_if->msk_if_flags = ifp->if_flags; 965 MSK_IF_UNLOCK(sc_if); 966 break; 967 case SIOCADDMULTI: 968 case SIOCDELMULTI: 969 MSK_IF_LOCK(sc_if); 970 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 971 msk_setmulti(sc_if); 972 MSK_IF_UNLOCK(sc_if); 973 break; 974 case SIOCGIFMEDIA: 975 case SIOCSIFMEDIA: 976 mii = device_get_softc(sc_if->msk_miibus); 977 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 978 break; 979 case SIOCSIFCAP: 980 MSK_IF_LOCK(sc_if); 981 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 982 if ((mask & IFCAP_TXCSUM) != 0) { 983 ifp->if_capenable ^= IFCAP_TXCSUM; 984 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 && 985 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) 986 ifp->if_hwassist |= MSK_CSUM_FEATURES; 987 else 988 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 989 } 990 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) { 991 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 992 msk_setvlan(sc_if, ifp); 993 } 994 995 if ((mask & IFCAP_TSO4) != 0) { 996 ifp->if_capenable ^= IFCAP_TSO4; 997 if ((IFCAP_TSO4 & ifp->if_capenable) != 0 && 998 (IFCAP_TSO4 & ifp->if_capabilities) != 0) 999 ifp->if_hwassist |= CSUM_TSO; 1000 else 1001 ifp->if_hwassist &= ~CSUM_TSO; 1002 } 1003 if (ifp->if_mtu > ETHERMTU && 1004 sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 1005 /* 1006 * In Yukon EC Ultra, TSO & checksum offload is not 1007 * supported for jumbo frame. 1008 */ 1009 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO); 1010 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM); 1011 } 1012 1013 VLAN_CAPABILITIES(ifp); 1014 MSK_IF_UNLOCK(sc_if); 1015 break; 1016 default: 1017 error = ether_ioctl(ifp, command, data); 1018 break; 1019 } 1020 1021 return (error); 1022 } 1023 1024 static int 1025 mskc_probe(device_t dev) 1026 { 1027 struct msk_product *mp; 1028 uint16_t vendor, devid; 1029 int i; 1030 1031 vendor = pci_get_vendor(dev); 1032 devid = pci_get_device(dev); 1033 mp = msk_products; 1034 for (i = 0; i < sizeof(msk_products)/sizeof(msk_products[0]); 1035 i++, mp++) { 1036 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) { 1037 device_set_desc(dev, mp->msk_name); 1038 return (BUS_PROBE_DEFAULT); 1039 } 1040 } 1041 1042 return (ENXIO); 1043 } 1044 1045 static int 1046 mskc_setup_rambuffer(struct msk_softc *sc) 1047 { 1048 int next; 1049 int i; 1050 1051 /* Get adapter SRAM size. */ 1052 sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4; 1053 if (bootverbose) 1054 device_printf(sc->msk_dev, 1055 "RAM buffer size : %dKB\n", sc->msk_ramsize); 1056 if (sc->msk_ramsize == 0) 1057 return (0); 1058 1059 sc->msk_pflags |= MSK_FLAG_RAMBUF; 1060 /* 1061 * Give receiver 2/3 of memory and round down to the multiple 1062 * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple 1063 * of 1024. 1064 */ 1065 sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024); 1066 sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize; 1067 for (i = 0, next = 0; i < sc->msk_num_port; i++) { 1068 sc->msk_rxqstart[i] = next; 1069 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1; 1070 next = sc->msk_rxqend[i] + 1; 1071 sc->msk_txqstart[i] = next; 1072 sc->msk_txqend[i] = next + sc->msk_txqsize - 1; 1073 next = sc->msk_txqend[i] + 1; 1074 if (bootverbose) { 1075 device_printf(sc->msk_dev, 1076 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i, 1077 sc->msk_rxqsize / 1024, sc->msk_rxqstart[i], 1078 sc->msk_rxqend[i]); 1079 device_printf(sc->msk_dev, 1080 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i, 1081 sc->msk_txqsize / 1024, sc->msk_txqstart[i], 1082 sc->msk_txqend[i]); 1083 } 1084 } 1085 1086 return (0); 1087 } 1088 1089 static void 1090 msk_phy_power(struct msk_softc *sc, int mode) 1091 { 1092 uint32_t val; 1093 int i; 1094 1095 switch (mode) { 1096 case MSK_PHY_POWERUP: 1097 /* Switch power to VCC (WA for VAUX problem). */ 1098 CSR_WRITE_1(sc, B0_POWER_CTRL, 1099 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); 1100 /* Disable Core Clock Division, set Clock Select to 0. */ 1101 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS); 1102 1103 val = 0; 1104 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1105 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1106 /* Enable bits are inverted. */ 1107 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1108 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1109 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 1110 } 1111 /* 1112 * Enable PCI & Core Clock, enable clock gating for both Links. 1113 */ 1114 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); 1115 1116 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1117 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); 1118 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1119 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1120 /* Deassert Low Power for 1st PHY. */ 1121 val |= PCI_Y2_PHY1_COMA; 1122 if (sc->msk_num_port > 1) 1123 val |= PCI_Y2_PHY2_COMA; 1124 } else if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 1125 uint32_t our; 1126 1127 CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON); 1128 1129 /* Enable all clocks. */ 1130 pci_write_config(sc->msk_dev, PCI_OUR_REG_3, 0, 4); 1131 our = pci_read_config(sc->msk_dev, PCI_OUR_REG_4, 4); 1132 our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN| 1133 PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST); 1134 /* Set all bits to 0 except bits 15..12. */ 1135 pci_write_config(sc->msk_dev, PCI_OUR_REG_4, our, 4); 1136 /* Set to default value. */ 1137 pci_write_config(sc->msk_dev, PCI_OUR_REG_5, 0, 4); 1138 } 1139 /* Release PHY from PowerDown/COMA mode. */ 1140 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1141 for (i = 0; i < sc->msk_num_port; i++) { 1142 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), 1143 GMLC_RST_SET); 1144 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), 1145 GMLC_RST_CLR); 1146 } 1147 break; 1148 case MSK_PHY_POWERDOWN: 1149 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1150 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD; 1151 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1152 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1153 val &= ~PCI_Y2_PHY1_COMA; 1154 if (sc->msk_num_port > 1) 1155 val &= ~PCI_Y2_PHY2_COMA; 1156 } 1157 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1158 1159 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1160 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1161 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 1162 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1163 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1164 /* Enable bits are inverted. */ 1165 val = 0; 1166 } 1167 /* 1168 * Disable PCI & Core Clock, disable clock gating for 1169 * both Links. 1170 */ 1171 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); 1172 CSR_WRITE_1(sc, B0_POWER_CTRL, 1173 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); 1174 break; 1175 default: 1176 break; 1177 } 1178 } 1179 1180 static void 1181 mskc_reset(struct msk_softc *sc) 1182 { 1183 bus_addr_t addr; 1184 uint16_t status; 1185 uint32_t val; 1186 int i; 1187 1188 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1189 1190 /* Disable ASF. */ 1191 if (sc->msk_hw_id < CHIP_ID_YUKON_XL) { 1192 CSR_WRITE_4(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); 1193 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE); 1194 } 1195 /* 1196 * Since we disabled ASF, S/W reset is required for Power Management. 1197 */ 1198 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1199 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1200 1201 /* Clear all error bits in the PCI status register. */ 1202 status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); 1203 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1204 1205 pci_write_config(sc->msk_dev, PCIR_STATUS, status | 1206 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | 1207 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2); 1208 CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR); 1209 1210 switch (sc->msk_bustype) { 1211 case MSK_PEX_BUS: 1212 /* Clear all PEX errors. */ 1213 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); 1214 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); 1215 if ((val & PEX_RX_OV) != 0) { 1216 sc->msk_intrmask &= ~Y2_IS_HW_ERR; 1217 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; 1218 } 1219 break; 1220 case MSK_PCI_BUS: 1221 case MSK_PCIX_BUS: 1222 /* Set Cache Line Size to 2(8bytes) if configured to 0. */ 1223 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1); 1224 if (val == 0) 1225 pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1); 1226 if (sc->msk_bustype == MSK_PCIX_BUS) { 1227 /* Set Cache Line Size opt. */ 1228 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1229 val |= PCI_CLS_OPT; 1230 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1231 } 1232 break; 1233 } 1234 /* Set PHY power state. */ 1235 msk_phy_power(sc, MSK_PHY_POWERUP); 1236 1237 /* Reset GPHY/GMAC Control */ 1238 for (i = 0; i < sc->msk_num_port; i++) { 1239 /* GPHY Control reset. */ 1240 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET); 1241 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR); 1242 /* GMAC Control reset. */ 1243 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET); 1244 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR); 1245 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF); 1246 } 1247 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1248 1249 /* LED On. */ 1250 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON); 1251 1252 /* Clear TWSI IRQ. */ 1253 CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ); 1254 1255 /* Turn off hardware timer. */ 1256 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP); 1257 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ); 1258 1259 /* Turn off descriptor polling. */ 1260 CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP); 1261 1262 /* Turn off time stamps. */ 1263 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP); 1264 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 1265 1266 /* Configure timeout values. */ 1267 for (i = 0; i < sc->msk_num_port; i++) { 1268 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET); 1269 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR); 1270 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1), 1271 MSK_RI_TO_53); 1272 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1), 1273 MSK_RI_TO_53); 1274 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1), 1275 MSK_RI_TO_53); 1276 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1), 1277 MSK_RI_TO_53); 1278 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1), 1279 MSK_RI_TO_53); 1280 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1), 1281 MSK_RI_TO_53); 1282 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2), 1283 MSK_RI_TO_53); 1284 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2), 1285 MSK_RI_TO_53); 1286 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2), 1287 MSK_RI_TO_53); 1288 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2), 1289 MSK_RI_TO_53); 1290 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2), 1291 MSK_RI_TO_53); 1292 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2), 1293 MSK_RI_TO_53); 1294 } 1295 1296 /* Disable all interrupts. */ 1297 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 1298 CSR_READ_4(sc, B0_HWE_IMSK); 1299 CSR_WRITE_4(sc, B0_IMSK, 0); 1300 CSR_READ_4(sc, B0_IMSK); 1301 1302 /* 1303 * On dual port PCI-X card, there is an problem where status 1304 * can be received out of order due to split transactions. 1305 */ 1306 if (sc->msk_bustype == MSK_PCIX_BUS && sc->msk_num_port > 1) { 1307 int pcix; 1308 uint16_t pcix_cmd; 1309 1310 if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, &pcix) == 0) { 1311 pcix_cmd = pci_read_config(sc->msk_dev, pcix + 2, 2); 1312 /* Clear Max Outstanding Split Transactions. */ 1313 pcix_cmd &= ~0x70; 1314 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1315 pci_write_config(sc->msk_dev, pcix + 2, pcix_cmd, 2); 1316 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1317 } 1318 } 1319 if (sc->msk_bustype == MSK_PEX_BUS) { 1320 uint16_t v, width; 1321 1322 v = pci_read_config(sc->msk_dev, PEX_DEV_CTRL, 2); 1323 /* Change Max. Read Request Size to 4096 bytes. */ 1324 v &= ~PEX_DC_MAX_RRS_MSK; 1325 v |= PEX_DC_MAX_RD_RQ_SIZE(5); 1326 pci_write_config(sc->msk_dev, PEX_DEV_CTRL, v, 2); 1327 width = pci_read_config(sc->msk_dev, PEX_LNK_STAT, 2); 1328 width = (width & PEX_LS_LINK_WI_MSK) >> 4; 1329 v = pci_read_config(sc->msk_dev, PEX_LNK_CAP, 2); 1330 v = (v & PEX_LS_LINK_WI_MSK) >> 4; 1331 if (v != width) 1332 device_printf(sc->msk_dev, 1333 "negotiated width of link(x%d) != " 1334 "max. width of link(x%d)\n", width, v); 1335 } 1336 1337 /* Clear status list. */ 1338 bzero(sc->msk_stat_ring, 1339 sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT); 1340 sc->msk_stat_cons = 0; 1341 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map, 1342 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1343 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET); 1344 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR); 1345 /* Set the status list base address. */ 1346 addr = sc->msk_stat_ring_paddr; 1347 CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr)); 1348 CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr)); 1349 /* Set the status list last index. */ 1350 CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1); 1351 if (sc->msk_hw_id == CHIP_ID_YUKON_EC && 1352 sc->msk_hw_rev == CHIP_REV_YU_EC_A1) { 1353 /* WA for dev. #4.3 */ 1354 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK); 1355 /* WA for dev. #4.18 */ 1356 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21); 1357 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07); 1358 } else { 1359 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a); 1360 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10); 1361 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1362 sc->msk_hw_rev == CHIP_REV_YU_XL_A0) 1363 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04); 1364 else 1365 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10); 1366 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190); 1367 } 1368 /* 1369 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI. 1370 */ 1371 CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000)); 1372 1373 /* Enable status unit. */ 1374 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON); 1375 1376 CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START); 1377 CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START); 1378 CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START); 1379 } 1380 1381 static int 1382 msk_probe(device_t dev) 1383 { 1384 struct msk_softc *sc; 1385 char desc[100]; 1386 1387 sc = device_get_softc(device_get_parent(dev)); 1388 /* 1389 * Not much to do here. We always know there will be 1390 * at least one GMAC present, and if there are two, 1391 * mskc_attach() will create a second device instance 1392 * for us. 1393 */ 1394 snprintf(desc, sizeof(desc), 1395 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x", 1396 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id, 1397 sc->msk_hw_rev); 1398 device_set_desc_copy(dev, desc); 1399 1400 return (BUS_PROBE_DEFAULT); 1401 } 1402 1403 static int 1404 msk_attach(device_t dev) 1405 { 1406 struct msk_softc *sc; 1407 struct msk_if_softc *sc_if; 1408 struct ifnet *ifp; 1409 int i, port, error; 1410 uint8_t eaddr[6]; 1411 1412 if (dev == NULL) 1413 return (EINVAL); 1414 1415 error = 0; 1416 sc_if = device_get_softc(dev); 1417 sc = device_get_softc(device_get_parent(dev)); 1418 port = *(int *)device_get_ivars(dev); 1419 1420 sc_if->msk_if_dev = dev; 1421 sc_if->msk_port = port; 1422 sc_if->msk_softc = sc; 1423 sc_if->msk_flags = sc->msk_pflags; 1424 sc->msk_if[port] = sc_if; 1425 /* Setup Tx/Rx queue register offsets. */ 1426 if (port == MSK_PORT_A) { 1427 sc_if->msk_txq = Q_XA1; 1428 sc_if->msk_txsq = Q_XS1; 1429 sc_if->msk_rxq = Q_R1; 1430 } else { 1431 sc_if->msk_txq = Q_XA2; 1432 sc_if->msk_txsq = Q_XS2; 1433 sc_if->msk_rxq = Q_R2; 1434 } 1435 1436 callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0); 1437 TASK_INIT(&sc_if->msk_link_task, 0, msk_link_task, sc_if); 1438 1439 /* Disable jumbo frame for Yukon FE. */ 1440 if (sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_FE) 1441 sc_if->msk_flags |= MSK_FLAG_NOJUMBO; 1442 1443 if ((error = msk_txrx_dma_alloc(sc_if) != 0)) 1444 goto fail; 1445 msk_rx_dma_jalloc(sc_if); 1446 1447 ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER); 1448 if (ifp == NULL) { 1449 device_printf(sc_if->msk_if_dev, "can not if_alloc()\n"); 1450 error = ENOSPC; 1451 goto fail; 1452 } 1453 ifp->if_softc = sc_if; 1454 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1455 ifp->if_mtu = ETHERMTU; 1456 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1457 /* 1458 * IFCAP_RXCSUM capability is intentionally disabled as the hardware 1459 * has serious bug in Rx checksum offload for all Yukon II family 1460 * hardware. It seems there is a workaround to make it work somtimes. 1461 * However, the workaround also have to check OP code sequences to 1462 * verify whether the OP code is correct. Sometimes it should compute 1463 * IP/TCP/UDP checksum in driver in order to verify correctness of 1464 * checksum computed by hardware. If you have to compute checksum 1465 * with software to verify the hardware's checksum why have hardware 1466 * compute the checksum? I think there is no reason to spend time to 1467 * make Rx checksum offload work on Yukon II hardware. 1468 */ 1469 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4; 1470 ifp->if_hwassist = MSK_CSUM_FEATURES | CSUM_TSO; 1471 ifp->if_capenable = ifp->if_capabilities; 1472 ifp->if_ioctl = msk_ioctl; 1473 ifp->if_start = msk_start; 1474 ifp->if_timer = 0; 1475 ifp->if_watchdog = NULL; 1476 ifp->if_init = msk_init; 1477 IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1); 1478 ifp->if_snd.ifq_drv_maxlen = MSK_TX_RING_CNT - 1; 1479 IFQ_SET_READY(&ifp->if_snd); 1480 1481 TASK_INIT(&sc_if->msk_tx_task, 1, msk_tx_task, ifp); 1482 1483 /* 1484 * Get station address for this interface. Note that 1485 * dual port cards actually come with three station 1486 * addresses: one for each port, plus an extra. The 1487 * extra one is used by the SysKonnect driver software 1488 * as a 'virtual' station address for when both ports 1489 * are operating in failover mode. Currently we don't 1490 * use this extra address. 1491 */ 1492 MSK_IF_LOCK(sc_if); 1493 for (i = 0; i < ETHER_ADDR_LEN; i++) 1494 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i); 1495 1496 /* 1497 * Call MI attach routine. Can't hold locks when calling into ether_*. 1498 */ 1499 MSK_IF_UNLOCK(sc_if); 1500 ether_ifattach(ifp, eaddr); 1501 MSK_IF_LOCK(sc_if); 1502 1503 /* 1504 * VLAN capability setup 1505 * Due to Tx checksum offload hardware bugs, msk(4) manually 1506 * computes checksum for short frames. For VLAN tagged frames 1507 * this workaround does not work so disable checksum offload 1508 * for VLAN interface. 1509 */ 1510 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 1511 ifp->if_capenable = ifp->if_capabilities; 1512 1513 /* 1514 * Tell the upper layer(s) we support long frames. 1515 * Must appear after the call to ether_ifattach() because 1516 * ether_ifattach() sets ifi_hdrlen to the default value. 1517 */ 1518 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1519 1520 /* 1521 * Do miibus setup. 1522 */ 1523 MSK_IF_UNLOCK(sc_if); 1524 error = mii_phy_probe(dev, &sc_if->msk_miibus, msk_mediachange, 1525 msk_mediastatus); 1526 if (error != 0) { 1527 device_printf(sc_if->msk_if_dev, "no PHY found!\n"); 1528 ether_ifdetach(ifp); 1529 error = ENXIO; 1530 goto fail; 1531 } 1532 1533 fail: 1534 if (error != 0) { 1535 /* Access should be ok even though lock has been dropped */ 1536 sc->msk_if[port] = NULL; 1537 msk_detach(dev); 1538 } 1539 1540 return (error); 1541 } 1542 1543 /* 1544 * Attach the interface. Allocate softc structures, do ifmedia 1545 * setup and ethernet/BPF attach. 1546 */ 1547 static int 1548 mskc_attach(device_t dev) 1549 { 1550 struct msk_softc *sc; 1551 int error, msic, msir, *port, reg; 1552 1553 sc = device_get_softc(dev); 1554 sc->msk_dev = dev; 1555 mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1556 MTX_DEF); 1557 1558 /* 1559 * Map control/status registers. 1560 */ 1561 pci_enable_busmaster(dev); 1562 1563 /* Allocate I/O resource */ 1564 #ifdef MSK_USEIOSPACE 1565 sc->msk_res_spec = msk_res_spec_io; 1566 #else 1567 sc->msk_res_spec = msk_res_spec_mem; 1568 #endif 1569 sc->msk_irq_spec = msk_irq_spec_legacy; 1570 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res); 1571 if (error) { 1572 if (sc->msk_res_spec == msk_res_spec_mem) 1573 sc->msk_res_spec = msk_res_spec_io; 1574 else 1575 sc->msk_res_spec = msk_res_spec_mem; 1576 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res); 1577 if (error) { 1578 device_printf(dev, "couldn't allocate %s resources\n", 1579 sc->msk_res_spec == msk_res_spec_mem ? "memory" : 1580 "I/O"); 1581 mtx_destroy(&sc->msk_mtx); 1582 return (ENXIO); 1583 } 1584 } 1585 1586 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1587 sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID); 1588 sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f; 1589 /* Bail out if chip is not recognized. */ 1590 if (sc->msk_hw_id < CHIP_ID_YUKON_XL || 1591 sc->msk_hw_id > CHIP_ID_YUKON_FE) { 1592 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n", 1593 sc->msk_hw_id, sc->msk_hw_rev); 1594 mtx_destroy(&sc->msk_mtx); 1595 return (ENXIO); 1596 } 1597 1598 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 1599 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1600 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, 1601 &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I", 1602 "max number of Rx events to process"); 1603 1604 sc->msk_process_limit = MSK_PROC_DEFAULT; 1605 error = resource_int_value(device_get_name(dev), device_get_unit(dev), 1606 "process_limit", &sc->msk_process_limit); 1607 if (error == 0) { 1608 if (sc->msk_process_limit < MSK_PROC_MIN || 1609 sc->msk_process_limit > MSK_PROC_MAX) { 1610 device_printf(dev, "process_limit value out of range; " 1611 "using default: %d\n", MSK_PROC_DEFAULT); 1612 sc->msk_process_limit = MSK_PROC_DEFAULT; 1613 } 1614 } 1615 1616 /* Soft reset. */ 1617 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1618 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1619 sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP); 1620 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S') 1621 sc->msk_coppertype = 0; 1622 else 1623 sc->msk_coppertype = 1; 1624 /* Check number of MACs. */ 1625 sc->msk_num_port = 1; 1626 if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) == 1627 CFG_DUAL_MAC_MSK) { 1628 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) 1629 sc->msk_num_port++; 1630 } 1631 1632 /* Check bus type. */ 1633 if (pci_find_extcap(sc->msk_dev, PCIY_EXPRESS, ®) == 0) 1634 sc->msk_bustype = MSK_PEX_BUS; 1635 else if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, ®) == 0) 1636 sc->msk_bustype = MSK_PCIX_BUS; 1637 else 1638 sc->msk_bustype = MSK_PCI_BUS; 1639 1640 switch (sc->msk_hw_id) { 1641 case CHIP_ID_YUKON_EC: 1642 case CHIP_ID_YUKON_EC_U: 1643 sc->msk_clock = 125; /* 125 Mhz */ 1644 break; 1645 case CHIP_ID_YUKON_FE: 1646 sc->msk_clock = 100; /* 100 Mhz */ 1647 break; 1648 case CHIP_ID_YUKON_XL: 1649 sc->msk_clock = 156; /* 156 Mhz */ 1650 break; 1651 default: 1652 sc->msk_clock = 156; /* 156 Mhz */ 1653 break; 1654 } 1655 1656 /* Allocate IRQ resources. */ 1657 msic = pci_msi_count(dev); 1658 if (bootverbose) 1659 device_printf(dev, "MSI count : %d\n", msic); 1660 /* 1661 * The Yukon II reports it can handle two messages, one for each 1662 * possible port. We go ahead and allocate two messages and only 1663 * setup a handler for both if we have a dual port card. 1664 * 1665 * XXX: I haven't untangled the interrupt handler to handle dual 1666 * port cards with separate MSI messages, so for now I disable MSI 1667 * on dual port cards. 1668 */ 1669 if (legacy_intr != 0) 1670 msi_disable = 1; 1671 if (msi_disable == 0) { 1672 switch (msic) { 1673 case 2: 1674 case 1: /* 88E8058 reports 1 MSI message */ 1675 msir = msic; 1676 if (sc->msk_num_port == 1 && 1677 pci_alloc_msi(dev, &msir) == 0) { 1678 if (msic == msir) { 1679 sc->msk_msi = 1; 1680 sc->msk_irq_spec = msic == 2 ? 1681 msk_irq_spec_msi2 : 1682 msk_irq_spec_msi; 1683 } else 1684 pci_release_msi(dev); 1685 } 1686 break; 1687 default: 1688 device_printf(dev, 1689 "Unexpected number of MSI messages : %d\n", msic); 1690 break; 1691 } 1692 } 1693 1694 error = bus_alloc_resources(dev, sc->msk_irq_spec, sc->msk_irq); 1695 if (error) { 1696 device_printf(dev, "couldn't allocate IRQ resources\n"); 1697 goto fail; 1698 } 1699 1700 if ((error = msk_status_dma_alloc(sc)) != 0) 1701 goto fail; 1702 1703 /* Set base interrupt mask. */ 1704 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU; 1705 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR | 1706 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP; 1707 1708 /* Reset the adapter. */ 1709 mskc_reset(sc); 1710 1711 if ((error = mskc_setup_rambuffer(sc)) != 0) 1712 goto fail; 1713 1714 sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1); 1715 if (sc->msk_devs[MSK_PORT_A] == NULL) { 1716 device_printf(dev, "failed to add child for PORT_A\n"); 1717 error = ENXIO; 1718 goto fail; 1719 } 1720 port = malloc(sizeof(int), M_DEVBUF, M_WAITOK); 1721 if (port == NULL) { 1722 device_printf(dev, "failed to allocate memory for " 1723 "ivars of PORT_A\n"); 1724 error = ENXIO; 1725 goto fail; 1726 } 1727 *port = MSK_PORT_A; 1728 device_set_ivars(sc->msk_devs[MSK_PORT_A], port); 1729 1730 if (sc->msk_num_port > 1) { 1731 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1); 1732 if (sc->msk_devs[MSK_PORT_B] == NULL) { 1733 device_printf(dev, "failed to add child for PORT_B\n"); 1734 error = ENXIO; 1735 goto fail; 1736 } 1737 port = malloc(sizeof(int), M_DEVBUF, M_WAITOK); 1738 if (port == NULL) { 1739 device_printf(dev, "failed to allocate memory for " 1740 "ivars of PORT_B\n"); 1741 error = ENXIO; 1742 goto fail; 1743 } 1744 *port = MSK_PORT_B; 1745 device_set_ivars(sc->msk_devs[MSK_PORT_B], port); 1746 } 1747 1748 error = bus_generic_attach(dev); 1749 if (error) { 1750 device_printf(dev, "failed to attach port(s)\n"); 1751 goto fail; 1752 } 1753 1754 /* Hook interrupt last to avoid having to lock softc. */ 1755 if (legacy_intr) 1756 error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET | 1757 INTR_MPSAFE, NULL, msk_legacy_intr, sc, 1758 &sc->msk_intrhand[0]); 1759 else { 1760 TASK_INIT(&sc->msk_int_task, 0, msk_int_task, sc); 1761 sc->msk_tq = taskqueue_create_fast("msk_taskq", M_WAITOK, 1762 taskqueue_thread_enqueue, &sc->msk_tq); 1763 taskqueue_start_threads(&sc->msk_tq, 1, PI_NET, "%s taskq", 1764 device_get_nameunit(sc->msk_dev)); 1765 error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET | 1766 INTR_MPSAFE, msk_intr, NULL, sc, &sc->msk_intrhand[0]); 1767 } 1768 1769 if (error != 0) { 1770 device_printf(dev, "couldn't set up interrupt handler\n"); 1771 if (legacy_intr == 0) 1772 taskqueue_free(sc->msk_tq); 1773 sc->msk_tq = NULL; 1774 goto fail; 1775 } 1776 fail: 1777 if (error != 0) 1778 mskc_detach(dev); 1779 1780 return (error); 1781 } 1782 1783 /* 1784 * Shutdown hardware and free up resources. This can be called any 1785 * time after the mutex has been initialized. It is called in both 1786 * the error case in attach and the normal detach case so it needs 1787 * to be careful about only freeing resources that have actually been 1788 * allocated. 1789 */ 1790 static int 1791 msk_detach(device_t dev) 1792 { 1793 struct msk_softc *sc; 1794 struct msk_if_softc *sc_if; 1795 struct ifnet *ifp; 1796 1797 sc_if = device_get_softc(dev); 1798 KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx), 1799 ("msk mutex not initialized in msk_detach")); 1800 MSK_IF_LOCK(sc_if); 1801 1802 ifp = sc_if->msk_ifp; 1803 if (device_is_attached(dev)) { 1804 /* XXX */ 1805 sc_if->msk_detach = 1; 1806 msk_stop(sc_if); 1807 /* Can't hold locks while calling detach. */ 1808 MSK_IF_UNLOCK(sc_if); 1809 callout_drain(&sc_if->msk_tick_ch); 1810 taskqueue_drain(taskqueue_fast, &sc_if->msk_tx_task); 1811 taskqueue_drain(taskqueue_swi, &sc_if->msk_link_task); 1812 ether_ifdetach(ifp); 1813 MSK_IF_LOCK(sc_if); 1814 } 1815 1816 /* 1817 * We're generally called from mskc_detach() which is using 1818 * device_delete_child() to get to here. It's already trashed 1819 * miibus for us, so don't do it here or we'll panic. 1820 * 1821 * if (sc_if->msk_miibus != NULL) { 1822 * device_delete_child(dev, sc_if->msk_miibus); 1823 * sc_if->msk_miibus = NULL; 1824 * } 1825 */ 1826 1827 msk_rx_dma_jfree(sc_if); 1828 msk_txrx_dma_free(sc_if); 1829 bus_generic_detach(dev); 1830 1831 if (ifp) 1832 if_free(ifp); 1833 sc = sc_if->msk_softc; 1834 sc->msk_if[sc_if->msk_port] = NULL; 1835 MSK_IF_UNLOCK(sc_if); 1836 1837 return (0); 1838 } 1839 1840 static int 1841 mskc_detach(device_t dev) 1842 { 1843 struct msk_softc *sc; 1844 1845 sc = device_get_softc(dev); 1846 KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized")); 1847 1848 if (device_is_alive(dev)) { 1849 if (sc->msk_devs[MSK_PORT_A] != NULL) { 1850 free(device_get_ivars(sc->msk_devs[MSK_PORT_A]), 1851 M_DEVBUF); 1852 device_delete_child(dev, sc->msk_devs[MSK_PORT_A]); 1853 } 1854 if (sc->msk_devs[MSK_PORT_B] != NULL) { 1855 free(device_get_ivars(sc->msk_devs[MSK_PORT_B]), 1856 M_DEVBUF); 1857 device_delete_child(dev, sc->msk_devs[MSK_PORT_B]); 1858 } 1859 bus_generic_detach(dev); 1860 } 1861 1862 /* Disable all interrupts. */ 1863 CSR_WRITE_4(sc, B0_IMSK, 0); 1864 CSR_READ_4(sc, B0_IMSK); 1865 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 1866 CSR_READ_4(sc, B0_HWE_IMSK); 1867 1868 /* LED Off. */ 1869 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF); 1870 1871 /* Put hardware reset. */ 1872 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1873 1874 msk_status_dma_free(sc); 1875 1876 if (legacy_intr == 0 && sc->msk_tq != NULL) { 1877 taskqueue_drain(sc->msk_tq, &sc->msk_int_task); 1878 taskqueue_free(sc->msk_tq); 1879 sc->msk_tq = NULL; 1880 } 1881 if (sc->msk_intrhand[0]) { 1882 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand[0]); 1883 sc->msk_intrhand[0] = NULL; 1884 } 1885 if (sc->msk_intrhand[1]) { 1886 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand[0]); 1887 sc->msk_intrhand[1] = NULL; 1888 } 1889 bus_release_resources(dev, sc->msk_irq_spec, sc->msk_irq); 1890 if (sc->msk_msi) 1891 pci_release_msi(dev); 1892 bus_release_resources(dev, sc->msk_res_spec, sc->msk_res); 1893 mtx_destroy(&sc->msk_mtx); 1894 1895 return (0); 1896 } 1897 1898 struct msk_dmamap_arg { 1899 bus_addr_t msk_busaddr; 1900 }; 1901 1902 static void 1903 msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1904 { 1905 struct msk_dmamap_arg *ctx; 1906 1907 if (error != 0) 1908 return; 1909 ctx = arg; 1910 ctx->msk_busaddr = segs[0].ds_addr; 1911 } 1912 1913 /* Create status DMA region. */ 1914 static int 1915 msk_status_dma_alloc(struct msk_softc *sc) 1916 { 1917 struct msk_dmamap_arg ctx; 1918 int error; 1919 1920 error = bus_dma_tag_create( 1921 bus_get_dma_tag(sc->msk_dev), /* parent */ 1922 MSK_STAT_ALIGN, 0, /* alignment, boundary */ 1923 BUS_SPACE_MAXADDR, /* lowaddr */ 1924 BUS_SPACE_MAXADDR, /* highaddr */ 1925 NULL, NULL, /* filter, filterarg */ 1926 MSK_STAT_RING_SZ, /* maxsize */ 1927 1, /* nsegments */ 1928 MSK_STAT_RING_SZ, /* maxsegsize */ 1929 0, /* flags */ 1930 NULL, NULL, /* lockfunc, lockarg */ 1931 &sc->msk_stat_tag); 1932 if (error != 0) { 1933 device_printf(sc->msk_dev, 1934 "failed to create status DMA tag\n"); 1935 return (error); 1936 } 1937 1938 /* Allocate DMA'able memory and load the DMA map for status ring. */ 1939 error = bus_dmamem_alloc(sc->msk_stat_tag, 1940 (void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT | 1941 BUS_DMA_ZERO, &sc->msk_stat_map); 1942 if (error != 0) { 1943 device_printf(sc->msk_dev, 1944 "failed to allocate DMA'able memory for status ring\n"); 1945 return (error); 1946 } 1947 1948 ctx.msk_busaddr = 0; 1949 error = bus_dmamap_load(sc->msk_stat_tag, 1950 sc->msk_stat_map, sc->msk_stat_ring, MSK_STAT_RING_SZ, 1951 msk_dmamap_cb, &ctx, 0); 1952 if (error != 0) { 1953 device_printf(sc->msk_dev, 1954 "failed to load DMA'able memory for status ring\n"); 1955 return (error); 1956 } 1957 sc->msk_stat_ring_paddr = ctx.msk_busaddr; 1958 1959 return (0); 1960 } 1961 1962 static void 1963 msk_status_dma_free(struct msk_softc *sc) 1964 { 1965 1966 /* Destroy status block. */ 1967 if (sc->msk_stat_tag) { 1968 if (sc->msk_stat_map) { 1969 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map); 1970 if (sc->msk_stat_ring) { 1971 bus_dmamem_free(sc->msk_stat_tag, 1972 sc->msk_stat_ring, sc->msk_stat_map); 1973 sc->msk_stat_ring = NULL; 1974 } 1975 sc->msk_stat_map = NULL; 1976 } 1977 bus_dma_tag_destroy(sc->msk_stat_tag); 1978 sc->msk_stat_tag = NULL; 1979 } 1980 } 1981 1982 static int 1983 msk_txrx_dma_alloc(struct msk_if_softc *sc_if) 1984 { 1985 struct msk_dmamap_arg ctx; 1986 struct msk_txdesc *txd; 1987 struct msk_rxdesc *rxd; 1988 bus_size_t rxalign; 1989 int error, i; 1990 1991 /* Create parent DMA tag. */ 1992 /* 1993 * XXX 1994 * It seems that Yukon II supports full 64bits DMA operations. But 1995 * it needs two descriptors(list elements) for 64bits DMA operations. 1996 * Since we don't know what DMA address mappings(32bits or 64bits) 1997 * would be used in advance for each mbufs, we limits its DMA space 1998 * to be in range of 32bits address space. Otherwise, we should check 1999 * what DMA address is used and chain another descriptor for the 2000 * 64bits DMA operation. This also means descriptor ring size is 2001 * variable. Limiting DMA address to be in 32bit address space greatly 2002 * simplyfies descriptor handling and possibly would increase 2003 * performance a bit due to efficient handling of descriptors. 2004 * Apart from harassing checksum offloading mechanisms, it seems 2005 * it's really bad idea to use a seperate descriptor for 64bit 2006 * DMA operation to save small descriptor memory. Anyway, I've 2007 * never seen these exotic scheme on ethernet interface hardware. 2008 */ 2009 error = bus_dma_tag_create( 2010 bus_get_dma_tag(sc_if->msk_if_dev), /* parent */ 2011 1, 0, /* alignment, boundary */ 2012 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2013 BUS_SPACE_MAXADDR, /* highaddr */ 2014 NULL, NULL, /* filter, filterarg */ 2015 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 2016 0, /* nsegments */ 2017 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 2018 0, /* flags */ 2019 NULL, NULL, /* lockfunc, lockarg */ 2020 &sc_if->msk_cdata.msk_parent_tag); 2021 if (error != 0) { 2022 device_printf(sc_if->msk_if_dev, 2023 "failed to create parent DMA tag\n"); 2024 goto fail; 2025 } 2026 /* Create tag for Tx ring. */ 2027 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2028 MSK_RING_ALIGN, 0, /* alignment, boundary */ 2029 BUS_SPACE_MAXADDR, /* lowaddr */ 2030 BUS_SPACE_MAXADDR, /* highaddr */ 2031 NULL, NULL, /* filter, filterarg */ 2032 MSK_TX_RING_SZ, /* maxsize */ 2033 1, /* nsegments */ 2034 MSK_TX_RING_SZ, /* maxsegsize */ 2035 0, /* flags */ 2036 NULL, NULL, /* lockfunc, lockarg */ 2037 &sc_if->msk_cdata.msk_tx_ring_tag); 2038 if (error != 0) { 2039 device_printf(sc_if->msk_if_dev, 2040 "failed to create Tx ring DMA tag\n"); 2041 goto fail; 2042 } 2043 2044 /* Create tag for Rx ring. */ 2045 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2046 MSK_RING_ALIGN, 0, /* alignment, boundary */ 2047 BUS_SPACE_MAXADDR, /* lowaddr */ 2048 BUS_SPACE_MAXADDR, /* highaddr */ 2049 NULL, NULL, /* filter, filterarg */ 2050 MSK_RX_RING_SZ, /* maxsize */ 2051 1, /* nsegments */ 2052 MSK_RX_RING_SZ, /* maxsegsize */ 2053 0, /* flags */ 2054 NULL, NULL, /* lockfunc, lockarg */ 2055 &sc_if->msk_cdata.msk_rx_ring_tag); 2056 if (error != 0) { 2057 device_printf(sc_if->msk_if_dev, 2058 "failed to create Rx ring DMA tag\n"); 2059 goto fail; 2060 } 2061 2062 /* Create tag for Tx buffers. */ 2063 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2064 1, 0, /* alignment, boundary */ 2065 BUS_SPACE_MAXADDR, /* lowaddr */ 2066 BUS_SPACE_MAXADDR, /* highaddr */ 2067 NULL, NULL, /* filter, filterarg */ 2068 MSK_TSO_MAXSIZE, /* maxsize */ 2069 MSK_MAXTXSEGS, /* nsegments */ 2070 MSK_TSO_MAXSGSIZE, /* maxsegsize */ 2071 0, /* flags */ 2072 NULL, NULL, /* lockfunc, lockarg */ 2073 &sc_if->msk_cdata.msk_tx_tag); 2074 if (error != 0) { 2075 device_printf(sc_if->msk_if_dev, 2076 "failed to create Tx DMA tag\n"); 2077 goto fail; 2078 } 2079 2080 rxalign = 1; 2081 /* 2082 * Workaround hardware hang which seems to happen when Rx buffer 2083 * is not aligned on multiple of FIFO word(8 bytes). 2084 */ 2085 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0) 2086 rxalign = MSK_RX_BUF_ALIGN; 2087 /* Create tag for Rx buffers. */ 2088 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2089 rxalign, 0, /* alignment, boundary */ 2090 BUS_SPACE_MAXADDR, /* lowaddr */ 2091 BUS_SPACE_MAXADDR, /* highaddr */ 2092 NULL, NULL, /* filter, filterarg */ 2093 MCLBYTES, /* maxsize */ 2094 1, /* nsegments */ 2095 MCLBYTES, /* maxsegsize */ 2096 0, /* flags */ 2097 NULL, NULL, /* lockfunc, lockarg */ 2098 &sc_if->msk_cdata.msk_rx_tag); 2099 if (error != 0) { 2100 device_printf(sc_if->msk_if_dev, 2101 "failed to create Rx DMA tag\n"); 2102 goto fail; 2103 } 2104 2105 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 2106 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag, 2107 (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK | 2108 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map); 2109 if (error != 0) { 2110 device_printf(sc_if->msk_if_dev, 2111 "failed to allocate DMA'able memory for Tx ring\n"); 2112 goto fail; 2113 } 2114 2115 ctx.msk_busaddr = 0; 2116 error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag, 2117 sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring, 2118 MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, 0); 2119 if (error != 0) { 2120 device_printf(sc_if->msk_if_dev, 2121 "failed to load DMA'able memory for Tx ring\n"); 2122 goto fail; 2123 } 2124 sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr; 2125 2126 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 2127 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag, 2128 (void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK | 2129 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map); 2130 if (error != 0) { 2131 device_printf(sc_if->msk_if_dev, 2132 "failed to allocate DMA'able memory for Rx ring\n"); 2133 goto fail; 2134 } 2135 2136 ctx.msk_busaddr = 0; 2137 error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag, 2138 sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring, 2139 MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, 0); 2140 if (error != 0) { 2141 device_printf(sc_if->msk_if_dev, 2142 "failed to load DMA'able memory for Rx ring\n"); 2143 goto fail; 2144 } 2145 sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr; 2146 2147 /* Create DMA maps for Tx buffers. */ 2148 for (i = 0; i < MSK_TX_RING_CNT; i++) { 2149 txd = &sc_if->msk_cdata.msk_txdesc[i]; 2150 txd->tx_m = NULL; 2151 txd->tx_dmamap = NULL; 2152 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0, 2153 &txd->tx_dmamap); 2154 if (error != 0) { 2155 device_printf(sc_if->msk_if_dev, 2156 "failed to create Tx dmamap\n"); 2157 goto fail; 2158 } 2159 } 2160 /* Create DMA maps for Rx buffers. */ 2161 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0, 2162 &sc_if->msk_cdata.msk_rx_sparemap)) != 0) { 2163 device_printf(sc_if->msk_if_dev, 2164 "failed to create spare Rx dmamap\n"); 2165 goto fail; 2166 } 2167 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2168 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 2169 rxd->rx_m = NULL; 2170 rxd->rx_dmamap = NULL; 2171 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0, 2172 &rxd->rx_dmamap); 2173 if (error != 0) { 2174 device_printf(sc_if->msk_if_dev, 2175 "failed to create Rx dmamap\n"); 2176 goto fail; 2177 } 2178 } 2179 2180 fail: 2181 return (error); 2182 } 2183 2184 static int 2185 msk_rx_dma_jalloc(struct msk_if_softc *sc_if) 2186 { 2187 struct msk_dmamap_arg ctx; 2188 struct msk_rxdesc *jrxd; 2189 bus_size_t rxalign; 2190 int error, i; 2191 2192 if (jumbo_disable != 0 || (sc_if->msk_flags & MSK_FLAG_NOJUMBO) != 0) { 2193 sc_if->msk_flags |= MSK_FLAG_NOJUMBO; 2194 device_printf(sc_if->msk_if_dev, 2195 "disabling jumbo frame support\n"); 2196 sc_if->msk_flags |= MSK_FLAG_NOJUMBO; 2197 return (0); 2198 } 2199 /* Create tag for jumbo Rx ring. */ 2200 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2201 MSK_RING_ALIGN, 0, /* alignment, boundary */ 2202 BUS_SPACE_MAXADDR, /* lowaddr */ 2203 BUS_SPACE_MAXADDR, /* highaddr */ 2204 NULL, NULL, /* filter, filterarg */ 2205 MSK_JUMBO_RX_RING_SZ, /* maxsize */ 2206 1, /* nsegments */ 2207 MSK_JUMBO_RX_RING_SZ, /* maxsegsize */ 2208 0, /* flags */ 2209 NULL, NULL, /* lockfunc, lockarg */ 2210 &sc_if->msk_cdata.msk_jumbo_rx_ring_tag); 2211 if (error != 0) { 2212 device_printf(sc_if->msk_if_dev, 2213 "failed to create jumbo Rx ring DMA tag\n"); 2214 goto jumbo_fail; 2215 } 2216 2217 rxalign = 1; 2218 /* 2219 * Workaround hardware hang which seems to happen when Rx buffer 2220 * is not aligned on multiple of FIFO word(8 bytes). 2221 */ 2222 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0) 2223 rxalign = MSK_RX_BUF_ALIGN; 2224 /* Create tag for jumbo Rx buffers. */ 2225 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2226 rxalign, 0, /* alignment, boundary */ 2227 BUS_SPACE_MAXADDR, /* lowaddr */ 2228 BUS_SPACE_MAXADDR, /* highaddr */ 2229 NULL, NULL, /* filter, filterarg */ 2230 MJUM9BYTES, /* maxsize */ 2231 1, /* nsegments */ 2232 MJUM9BYTES, /* maxsegsize */ 2233 0, /* flags */ 2234 NULL, NULL, /* lockfunc, lockarg */ 2235 &sc_if->msk_cdata.msk_jumbo_rx_tag); 2236 if (error != 0) { 2237 device_printf(sc_if->msk_if_dev, 2238 "failed to create jumbo Rx DMA tag\n"); 2239 goto jumbo_fail; 2240 } 2241 2242 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */ 2243 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2244 (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring, 2245 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 2246 &sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2247 if (error != 0) { 2248 device_printf(sc_if->msk_if_dev, 2249 "failed to allocate DMA'able memory for jumbo Rx ring\n"); 2250 goto jumbo_fail; 2251 } 2252 2253 ctx.msk_busaddr = 0; 2254 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2255 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 2256 sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ, 2257 msk_dmamap_cb, &ctx, 0); 2258 if (error != 0) { 2259 device_printf(sc_if->msk_if_dev, 2260 "failed to load DMA'able memory for jumbo Rx ring\n"); 2261 goto jumbo_fail; 2262 } 2263 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr; 2264 2265 /* Create DMA maps for jumbo Rx buffers. */ 2266 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, 2267 &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) { 2268 device_printf(sc_if->msk_if_dev, 2269 "failed to create spare jumbo Rx dmamap\n"); 2270 goto jumbo_fail; 2271 } 2272 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 2273 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 2274 jrxd->rx_m = NULL; 2275 jrxd->rx_dmamap = NULL; 2276 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, 2277 &jrxd->rx_dmamap); 2278 if (error != 0) { 2279 device_printf(sc_if->msk_if_dev, 2280 "failed to create jumbo Rx dmamap\n"); 2281 goto jumbo_fail; 2282 } 2283 } 2284 2285 return (0); 2286 2287 jumbo_fail: 2288 msk_rx_dma_jfree(sc_if); 2289 device_printf(sc_if->msk_if_dev, "disabling jumbo frame support " 2290 "due to resource shortage\n"); 2291 sc_if->msk_flags |= MSK_FLAG_NOJUMBO; 2292 return (error); 2293 } 2294 2295 static void 2296 msk_txrx_dma_free(struct msk_if_softc *sc_if) 2297 { 2298 struct msk_txdesc *txd; 2299 struct msk_rxdesc *rxd; 2300 int i; 2301 2302 /* Tx ring. */ 2303 if (sc_if->msk_cdata.msk_tx_ring_tag) { 2304 if (sc_if->msk_cdata.msk_tx_ring_map) 2305 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag, 2306 sc_if->msk_cdata.msk_tx_ring_map); 2307 if (sc_if->msk_cdata.msk_tx_ring_map && 2308 sc_if->msk_rdata.msk_tx_ring) 2309 bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag, 2310 sc_if->msk_rdata.msk_tx_ring, 2311 sc_if->msk_cdata.msk_tx_ring_map); 2312 sc_if->msk_rdata.msk_tx_ring = NULL; 2313 sc_if->msk_cdata.msk_tx_ring_map = NULL; 2314 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag); 2315 sc_if->msk_cdata.msk_tx_ring_tag = NULL; 2316 } 2317 /* Rx ring. */ 2318 if (sc_if->msk_cdata.msk_rx_ring_tag) { 2319 if (sc_if->msk_cdata.msk_rx_ring_map) 2320 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag, 2321 sc_if->msk_cdata.msk_rx_ring_map); 2322 if (sc_if->msk_cdata.msk_rx_ring_map && 2323 sc_if->msk_rdata.msk_rx_ring) 2324 bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag, 2325 sc_if->msk_rdata.msk_rx_ring, 2326 sc_if->msk_cdata.msk_rx_ring_map); 2327 sc_if->msk_rdata.msk_rx_ring = NULL; 2328 sc_if->msk_cdata.msk_rx_ring_map = NULL; 2329 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag); 2330 sc_if->msk_cdata.msk_rx_ring_tag = NULL; 2331 } 2332 /* Tx buffers. */ 2333 if (sc_if->msk_cdata.msk_tx_tag) { 2334 for (i = 0; i < MSK_TX_RING_CNT; i++) { 2335 txd = &sc_if->msk_cdata.msk_txdesc[i]; 2336 if (txd->tx_dmamap) { 2337 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag, 2338 txd->tx_dmamap); 2339 txd->tx_dmamap = NULL; 2340 } 2341 } 2342 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag); 2343 sc_if->msk_cdata.msk_tx_tag = NULL; 2344 } 2345 /* Rx buffers. */ 2346 if (sc_if->msk_cdata.msk_rx_tag) { 2347 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2348 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 2349 if (rxd->rx_dmamap) { 2350 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2351 rxd->rx_dmamap); 2352 rxd->rx_dmamap = NULL; 2353 } 2354 } 2355 if (sc_if->msk_cdata.msk_rx_sparemap) { 2356 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2357 sc_if->msk_cdata.msk_rx_sparemap); 2358 sc_if->msk_cdata.msk_rx_sparemap = 0; 2359 } 2360 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 2361 sc_if->msk_cdata.msk_rx_tag = NULL; 2362 } 2363 if (sc_if->msk_cdata.msk_parent_tag) { 2364 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag); 2365 sc_if->msk_cdata.msk_parent_tag = NULL; 2366 } 2367 } 2368 2369 static void 2370 msk_rx_dma_jfree(struct msk_if_softc *sc_if) 2371 { 2372 struct msk_rxdesc *jrxd; 2373 int i; 2374 2375 /* Jumbo Rx ring. */ 2376 if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) { 2377 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map) 2378 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2379 sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2380 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map && 2381 sc_if->msk_rdata.msk_jumbo_rx_ring) 2382 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2383 sc_if->msk_rdata.msk_jumbo_rx_ring, 2384 sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2385 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL; 2386 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL; 2387 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag); 2388 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL; 2389 } 2390 /* Jumbo Rx buffers. */ 2391 if (sc_if->msk_cdata.msk_jumbo_rx_tag) { 2392 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 2393 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 2394 if (jrxd->rx_dmamap) { 2395 bus_dmamap_destroy( 2396 sc_if->msk_cdata.msk_jumbo_rx_tag, 2397 jrxd->rx_dmamap); 2398 jrxd->rx_dmamap = NULL; 2399 } 2400 } 2401 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) { 2402 bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag, 2403 sc_if->msk_cdata.msk_jumbo_rx_sparemap); 2404 sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0; 2405 } 2406 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag); 2407 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL; 2408 } 2409 } 2410 2411 static int 2412 msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head) 2413 { 2414 struct msk_txdesc *txd, *txd_last; 2415 struct msk_tx_desc *tx_le; 2416 struct mbuf *m; 2417 bus_dmamap_t map; 2418 bus_dma_segment_t txsegs[MSK_MAXTXSEGS]; 2419 uint32_t control, prod, si; 2420 uint16_t offset, tcp_offset, tso_mtu; 2421 int error, i, nseg, tso; 2422 2423 MSK_IF_LOCK_ASSERT(sc_if); 2424 2425 tcp_offset = offset = 0; 2426 m = *m_head; 2427 if ((m->m_pkthdr.csum_flags & (MSK_CSUM_FEATURES | CSUM_TSO)) != 0) { 2428 /* 2429 * Since mbuf has no protocol specific structure information 2430 * in it we have to inspect protocol information here to 2431 * setup TSO and checksum offload. I don't know why Marvell 2432 * made a such decision in chip design because other GigE 2433 * hardwares normally takes care of all these chores in 2434 * hardware. However, TSO performance of Yukon II is very 2435 * good such that it's worth to implement it. 2436 */ 2437 struct ether_header *eh; 2438 struct ip *ip; 2439 struct tcphdr *tcp; 2440 2441 if (M_WRITABLE(m) == 0) { 2442 /* Get a writable copy. */ 2443 m = m_dup(*m_head, M_DONTWAIT); 2444 m_freem(*m_head); 2445 if (m == NULL) { 2446 *m_head = NULL; 2447 return (ENOBUFS); 2448 } 2449 *m_head = m; 2450 } 2451 2452 offset = sizeof(struct ether_header); 2453 m = m_pullup(m, offset); 2454 if (m == NULL) { 2455 *m_head = NULL; 2456 return (ENOBUFS); 2457 } 2458 eh = mtod(m, struct ether_header *); 2459 /* Check if hardware VLAN insertion is off. */ 2460 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2461 offset = sizeof(struct ether_vlan_header); 2462 m = m_pullup(m, offset); 2463 if (m == NULL) { 2464 *m_head = NULL; 2465 return (ENOBUFS); 2466 } 2467 } 2468 m = m_pullup(m, offset + sizeof(struct ip)); 2469 if (m == NULL) { 2470 *m_head = NULL; 2471 return (ENOBUFS); 2472 } 2473 ip = (struct ip *)(mtod(m, char *) + offset); 2474 offset += (ip->ip_hl << 2); 2475 tcp_offset = offset; 2476 /* 2477 * It seems that Yukon II has Tx checksum offload bug for 2478 * small TCP packets that's less than 60 bytes in size 2479 * (e.g. TCP window probe packet, pure ACK packet). 2480 * Common work around like padding with zeros to make the 2481 * frame minimum ethernet frame size didn't work at all. 2482 * Instead of disabling checksum offload completely we 2483 * resort to S/W checksum routine when we encounter short 2484 * TCP frames. 2485 * Short UDP packets appear to be handled correctly by 2486 * Yukon II. 2487 */ 2488 if (m->m_pkthdr.len < MSK_MIN_FRAMELEN && 2489 (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) { 2490 m = m_pullup(m, offset + sizeof(struct tcphdr)); 2491 if (m == NULL) { 2492 *m_head = NULL; 2493 return (ENOBUFS); 2494 } 2495 *(uint16_t *)(m->m_data + offset + 2496 m->m_pkthdr.csum_data) = in_cksum_skip(m, 2497 m->m_pkthdr.len, offset); 2498 m->m_pkthdr.csum_flags &= ~CSUM_TCP; 2499 } 2500 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2501 m = m_pullup(m, offset + sizeof(struct tcphdr)); 2502 if (m == NULL) { 2503 *m_head = NULL; 2504 return (ENOBUFS); 2505 } 2506 tcp = (struct tcphdr *)(mtod(m, char *) + offset); 2507 offset += (tcp->th_off << 2); 2508 } 2509 *m_head = m; 2510 } 2511 2512 prod = sc_if->msk_cdata.msk_tx_prod; 2513 txd = &sc_if->msk_cdata.msk_txdesc[prod]; 2514 txd_last = txd; 2515 map = txd->tx_dmamap; 2516 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map, 2517 *m_head, txsegs, &nseg, BUS_DMA_NOWAIT); 2518 if (error == EFBIG) { 2519 m = m_collapse(*m_head, M_DONTWAIT, MSK_MAXTXSEGS); 2520 if (m == NULL) { 2521 m_freem(*m_head); 2522 *m_head = NULL; 2523 return (ENOBUFS); 2524 } 2525 *m_head = m; 2526 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, 2527 map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT); 2528 if (error != 0) { 2529 m_freem(*m_head); 2530 *m_head = NULL; 2531 return (error); 2532 } 2533 } else if (error != 0) 2534 return (error); 2535 if (nseg == 0) { 2536 m_freem(*m_head); 2537 *m_head = NULL; 2538 return (EIO); 2539 } 2540 2541 /* Check number of available descriptors. */ 2542 if (sc_if->msk_cdata.msk_tx_cnt + nseg >= 2543 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) { 2544 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map); 2545 return (ENOBUFS); 2546 } 2547 2548 control = 0; 2549 tso = 0; 2550 tx_le = NULL; 2551 2552 /* Check TSO support. */ 2553 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2554 tso_mtu = offset + m->m_pkthdr.tso_segsz; 2555 if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) { 2556 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2557 tx_le->msk_addr = htole32(tso_mtu); 2558 tx_le->msk_control = htole32(OP_LRGLEN | HW_OWNER); 2559 sc_if->msk_cdata.msk_tx_cnt++; 2560 MSK_INC(prod, MSK_TX_RING_CNT); 2561 sc_if->msk_cdata.msk_tso_mtu = tso_mtu; 2562 } 2563 tso++; 2564 } 2565 /* Check if we have a VLAN tag to insert. */ 2566 if ((m->m_flags & M_VLANTAG) != 0) { 2567 if (tso == 0) { 2568 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2569 tx_le->msk_addr = htole32(0); 2570 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER | 2571 htons(m->m_pkthdr.ether_vtag)); 2572 sc_if->msk_cdata.msk_tx_cnt++; 2573 MSK_INC(prod, MSK_TX_RING_CNT); 2574 } else { 2575 tx_le->msk_control |= htole32(OP_VLAN | 2576 htons(m->m_pkthdr.ether_vtag)); 2577 } 2578 control |= INS_VLAN; 2579 } 2580 /* Check if we have to handle checksum offload. */ 2581 if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) { 2582 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2583 tx_le->msk_addr = htole32(((tcp_offset + m->m_pkthdr.csum_data) 2584 & 0xffff) | ((uint32_t)tcp_offset << 16)); 2585 tx_le->msk_control = htole32(1 << 16 | (OP_TCPLISW | HW_OWNER)); 2586 control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; 2587 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2588 control |= UDPTCP; 2589 sc_if->msk_cdata.msk_tx_cnt++; 2590 MSK_INC(prod, MSK_TX_RING_CNT); 2591 } 2592 2593 si = prod; 2594 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2595 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr)); 2596 if (tso == 0) 2597 tx_le->msk_control = htole32(txsegs[0].ds_len | control | 2598 OP_PACKET); 2599 else 2600 tx_le->msk_control = htole32(txsegs[0].ds_len | control | 2601 OP_LARGESEND); 2602 sc_if->msk_cdata.msk_tx_cnt++; 2603 MSK_INC(prod, MSK_TX_RING_CNT); 2604 2605 for (i = 1; i < nseg; i++) { 2606 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2607 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr)); 2608 tx_le->msk_control = htole32(txsegs[i].ds_len | control | 2609 OP_BUFFER | HW_OWNER); 2610 sc_if->msk_cdata.msk_tx_cnt++; 2611 MSK_INC(prod, MSK_TX_RING_CNT); 2612 } 2613 /* Update producer index. */ 2614 sc_if->msk_cdata.msk_tx_prod = prod; 2615 2616 /* Set EOP on the last desciptor. */ 2617 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT; 2618 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2619 tx_le->msk_control |= htole32(EOP); 2620 2621 /* Turn the first descriptor ownership to hardware. */ 2622 tx_le = &sc_if->msk_rdata.msk_tx_ring[si]; 2623 tx_le->msk_control |= htole32(HW_OWNER); 2624 2625 txd = &sc_if->msk_cdata.msk_txdesc[prod]; 2626 map = txd_last->tx_dmamap; 2627 txd_last->tx_dmamap = txd->tx_dmamap; 2628 txd->tx_dmamap = map; 2629 txd->tx_m = m; 2630 2631 /* Sync descriptors. */ 2632 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE); 2633 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 2634 sc_if->msk_cdata.msk_tx_ring_map, 2635 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2636 2637 return (0); 2638 } 2639 2640 static void 2641 msk_tx_task(void *arg, int pending) 2642 { 2643 struct ifnet *ifp; 2644 2645 ifp = arg; 2646 msk_start(ifp); 2647 } 2648 2649 static void 2650 msk_start(struct ifnet *ifp) 2651 { 2652 struct msk_if_softc *sc_if; 2653 struct mbuf *m_head; 2654 int enq; 2655 2656 sc_if = ifp->if_softc; 2657 2658 MSK_IF_LOCK(sc_if); 2659 2660 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2661 IFF_DRV_RUNNING || sc_if->msk_link == 0) { 2662 MSK_IF_UNLOCK(sc_if); 2663 return; 2664 } 2665 2666 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 2667 sc_if->msk_cdata.msk_tx_cnt < 2668 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) { 2669 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2670 if (m_head == NULL) 2671 break; 2672 /* 2673 * Pack the data into the transmit ring. If we 2674 * don't have room, set the OACTIVE flag and wait 2675 * for the NIC to drain the ring. 2676 */ 2677 if (msk_encap(sc_if, &m_head) != 0) { 2678 if (m_head == NULL) 2679 break; 2680 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2681 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2682 break; 2683 } 2684 2685 enq++; 2686 /* 2687 * If there's a BPF listener, bounce a copy of this frame 2688 * to him. 2689 */ 2690 ETHER_BPF_MTAP(ifp, m_head); 2691 } 2692 2693 if (enq > 0) { 2694 /* Transmit */ 2695 CSR_WRITE_2(sc_if->msk_softc, 2696 Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG), 2697 sc_if->msk_cdata.msk_tx_prod); 2698 2699 /* Set a timeout in case the chip goes out to lunch. */ 2700 sc_if->msk_watchdog_timer = MSK_TX_TIMEOUT; 2701 } 2702 2703 MSK_IF_UNLOCK(sc_if); 2704 } 2705 2706 static void 2707 msk_watchdog(struct msk_if_softc *sc_if) 2708 { 2709 struct ifnet *ifp; 2710 uint32_t ridx; 2711 int idx; 2712 2713 MSK_IF_LOCK_ASSERT(sc_if); 2714 2715 if (sc_if->msk_watchdog_timer == 0 || --sc_if->msk_watchdog_timer) 2716 return; 2717 ifp = sc_if->msk_ifp; 2718 if (sc_if->msk_link == 0) { 2719 if (bootverbose) 2720 if_printf(sc_if->msk_ifp, "watchdog timeout " 2721 "(missed link)\n"); 2722 ifp->if_oerrors++; 2723 msk_init_locked(sc_if); 2724 return; 2725 } 2726 2727 /* 2728 * Reclaim first as there is a possibility of losing Tx completion 2729 * interrupts. 2730 */ 2731 ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX; 2732 idx = CSR_READ_2(sc_if->msk_softc, ridx); 2733 if (sc_if->msk_cdata.msk_tx_cons != idx) { 2734 msk_txeof(sc_if, idx); 2735 if (sc_if->msk_cdata.msk_tx_cnt == 0) { 2736 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 2737 "-- recovering\n"); 2738 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2739 taskqueue_enqueue(taskqueue_fast, 2740 &sc_if->msk_tx_task); 2741 return; 2742 } 2743 } 2744 2745 if_printf(ifp, "watchdog timeout\n"); 2746 ifp->if_oerrors++; 2747 msk_init_locked(sc_if); 2748 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2749 taskqueue_enqueue(taskqueue_fast, &sc_if->msk_tx_task); 2750 } 2751 2752 static int 2753 mskc_shutdown(device_t dev) 2754 { 2755 struct msk_softc *sc; 2756 int i; 2757 2758 sc = device_get_softc(dev); 2759 MSK_LOCK(sc); 2760 for (i = 0; i < sc->msk_num_port; i++) { 2761 if (sc->msk_if[i] != NULL) 2762 msk_stop(sc->msk_if[i]); 2763 } 2764 2765 /* Disable all interrupts. */ 2766 CSR_WRITE_4(sc, B0_IMSK, 0); 2767 CSR_READ_4(sc, B0_IMSK); 2768 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 2769 CSR_READ_4(sc, B0_HWE_IMSK); 2770 2771 /* Put hardware reset. */ 2772 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 2773 2774 MSK_UNLOCK(sc); 2775 return (0); 2776 } 2777 2778 static int 2779 mskc_suspend(device_t dev) 2780 { 2781 struct msk_softc *sc; 2782 int i; 2783 2784 sc = device_get_softc(dev); 2785 2786 MSK_LOCK(sc); 2787 2788 for (i = 0; i < sc->msk_num_port; i++) { 2789 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 2790 ((sc->msk_if[i]->msk_ifp->if_drv_flags & 2791 IFF_DRV_RUNNING) != 0)) 2792 msk_stop(sc->msk_if[i]); 2793 } 2794 2795 /* Disable all interrupts. */ 2796 CSR_WRITE_4(sc, B0_IMSK, 0); 2797 CSR_READ_4(sc, B0_IMSK); 2798 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 2799 CSR_READ_4(sc, B0_HWE_IMSK); 2800 2801 msk_phy_power(sc, MSK_PHY_POWERDOWN); 2802 2803 /* Put hardware reset. */ 2804 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 2805 sc->msk_suspended = 1; 2806 2807 MSK_UNLOCK(sc); 2808 2809 return (0); 2810 } 2811 2812 static int 2813 mskc_resume(device_t dev) 2814 { 2815 struct msk_softc *sc; 2816 int i; 2817 2818 sc = device_get_softc(dev); 2819 2820 MSK_LOCK(sc); 2821 2822 mskc_reset(sc); 2823 for (i = 0; i < sc->msk_num_port; i++) { 2824 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 2825 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0)) 2826 msk_init_locked(sc->msk_if[i]); 2827 } 2828 sc->msk_suspended = 0; 2829 2830 MSK_UNLOCK(sc); 2831 2832 return (0); 2833 } 2834 2835 #ifndef __NO_STRICT_ALIGNMENT 2836 static __inline void 2837 msk_fixup_rx(struct mbuf *m) 2838 { 2839 int i; 2840 uint16_t *src, *dst; 2841 2842 src = mtod(m, uint16_t *); 2843 dst = src - 3; 2844 2845 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 2846 *dst++ = *src++; 2847 2848 m->m_data -= (MSK_RX_BUF_ALIGN - ETHER_ALIGN); 2849 } 2850 #endif 2851 2852 static void 2853 msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len) 2854 { 2855 struct mbuf *m; 2856 struct ifnet *ifp; 2857 struct msk_rxdesc *rxd; 2858 int cons, rxlen; 2859 2860 ifp = sc_if->msk_ifp; 2861 2862 MSK_IF_LOCK_ASSERT(sc_if); 2863 2864 cons = sc_if->msk_cdata.msk_rx_cons; 2865 do { 2866 rxlen = status >> 16; 2867 if ((status & GMR_FS_VLAN) != 0 && 2868 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2869 rxlen -= ETHER_VLAN_ENCAP_LEN; 2870 if (len > sc_if->msk_framesize || 2871 ((status & GMR_FS_ANY_ERR) != 0) || 2872 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { 2873 /* Don't count flow-control packet as errors. */ 2874 if ((status & GMR_FS_GOOD_FC) == 0) 2875 ifp->if_ierrors++; 2876 msk_discard_rxbuf(sc_if, cons); 2877 break; 2878 } 2879 rxd = &sc_if->msk_cdata.msk_rxdesc[cons]; 2880 m = rxd->rx_m; 2881 if (msk_newbuf(sc_if, cons) != 0) { 2882 ifp->if_iqdrops++; 2883 /* Reuse old buffer. */ 2884 msk_discard_rxbuf(sc_if, cons); 2885 break; 2886 } 2887 m->m_pkthdr.rcvif = ifp; 2888 m->m_pkthdr.len = m->m_len = len; 2889 #ifndef __NO_STRICT_ALIGNMENT 2890 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0) 2891 msk_fixup_rx(m); 2892 #endif 2893 ifp->if_ipackets++; 2894 /* Check for VLAN tagged packets. */ 2895 if ((status & GMR_FS_VLAN) != 0 && 2896 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2897 m->m_pkthdr.ether_vtag = sc_if->msk_vtag; 2898 m->m_flags |= M_VLANTAG; 2899 } 2900 MSK_IF_UNLOCK(sc_if); 2901 (*ifp->if_input)(ifp, m); 2902 MSK_IF_LOCK(sc_if); 2903 } while (0); 2904 2905 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT); 2906 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT); 2907 } 2908 2909 static void 2910 msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len) 2911 { 2912 struct mbuf *m; 2913 struct ifnet *ifp; 2914 struct msk_rxdesc *jrxd; 2915 int cons, rxlen; 2916 2917 ifp = sc_if->msk_ifp; 2918 2919 MSK_IF_LOCK_ASSERT(sc_if); 2920 2921 cons = sc_if->msk_cdata.msk_rx_cons; 2922 do { 2923 rxlen = status >> 16; 2924 if ((status & GMR_FS_VLAN) != 0 && 2925 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2926 rxlen -= ETHER_VLAN_ENCAP_LEN; 2927 if (len > sc_if->msk_framesize || 2928 ((status & GMR_FS_ANY_ERR) != 0) || 2929 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { 2930 /* Don't count flow-control packet as errors. */ 2931 if ((status & GMR_FS_GOOD_FC) == 0) 2932 ifp->if_ierrors++; 2933 msk_discard_jumbo_rxbuf(sc_if, cons); 2934 break; 2935 } 2936 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons]; 2937 m = jrxd->rx_m; 2938 if (msk_jumbo_newbuf(sc_if, cons) != 0) { 2939 ifp->if_iqdrops++; 2940 /* Reuse old buffer. */ 2941 msk_discard_jumbo_rxbuf(sc_if, cons); 2942 break; 2943 } 2944 m->m_pkthdr.rcvif = ifp; 2945 m->m_pkthdr.len = m->m_len = len; 2946 #ifndef __NO_STRICT_ALIGNMENT 2947 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) != 0) 2948 msk_fixup_rx(m); 2949 #endif 2950 ifp->if_ipackets++; 2951 /* Check for VLAN tagged packets. */ 2952 if ((status & GMR_FS_VLAN) != 0 && 2953 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2954 m->m_pkthdr.ether_vtag = sc_if->msk_vtag; 2955 m->m_flags |= M_VLANTAG; 2956 } 2957 MSK_IF_UNLOCK(sc_if); 2958 (*ifp->if_input)(ifp, m); 2959 MSK_IF_LOCK(sc_if); 2960 } while (0); 2961 2962 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT); 2963 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT); 2964 } 2965 2966 static void 2967 msk_txeof(struct msk_if_softc *sc_if, int idx) 2968 { 2969 struct msk_txdesc *txd; 2970 struct msk_tx_desc *cur_tx; 2971 struct ifnet *ifp; 2972 uint32_t control; 2973 int cons, prog; 2974 2975 MSK_IF_LOCK_ASSERT(sc_if); 2976 2977 ifp = sc_if->msk_ifp; 2978 2979 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 2980 sc_if->msk_cdata.msk_tx_ring_map, 2981 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2982 /* 2983 * Go through our tx ring and free mbufs for those 2984 * frames that have been sent. 2985 */ 2986 cons = sc_if->msk_cdata.msk_tx_cons; 2987 prog = 0; 2988 for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) { 2989 if (sc_if->msk_cdata.msk_tx_cnt <= 0) 2990 break; 2991 prog++; 2992 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons]; 2993 control = le32toh(cur_tx->msk_control); 2994 sc_if->msk_cdata.msk_tx_cnt--; 2995 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2996 if ((control & EOP) == 0) 2997 continue; 2998 txd = &sc_if->msk_cdata.msk_txdesc[cons]; 2999 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap, 3000 BUS_DMASYNC_POSTWRITE); 3001 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap); 3002 3003 ifp->if_opackets++; 3004 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!", 3005 __func__)); 3006 m_freem(txd->tx_m); 3007 txd->tx_m = NULL; 3008 } 3009 3010 if (prog > 0) { 3011 sc_if->msk_cdata.msk_tx_cons = cons; 3012 if (sc_if->msk_cdata.msk_tx_cnt == 0) 3013 sc_if->msk_watchdog_timer = 0; 3014 /* No need to sync LEs as we didn't update LEs. */ 3015 } 3016 } 3017 3018 static void 3019 msk_tick(void *xsc_if) 3020 { 3021 struct msk_if_softc *sc_if; 3022 struct mii_data *mii; 3023 3024 sc_if = xsc_if; 3025 3026 MSK_IF_LOCK_ASSERT(sc_if); 3027 3028 mii = device_get_softc(sc_if->msk_miibus); 3029 3030 mii_tick(mii); 3031 msk_watchdog(sc_if); 3032 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); 3033 } 3034 3035 static void 3036 msk_intr_phy(struct msk_if_softc *sc_if) 3037 { 3038 uint16_t status; 3039 3040 msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); 3041 status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); 3042 /* Handle FIFO Underrun/Overflow? */ 3043 if ((status & PHY_M_IS_FIFO_ERROR)) 3044 device_printf(sc_if->msk_if_dev, 3045 "PHY FIFO underrun/overflow.\n"); 3046 } 3047 3048 static void 3049 msk_intr_gmac(struct msk_if_softc *sc_if) 3050 { 3051 struct msk_softc *sc; 3052 uint8_t status; 3053 3054 sc = sc_if->msk_softc; 3055 status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); 3056 3057 /* GMAC Rx FIFO overrun. */ 3058 if ((status & GM_IS_RX_FF_OR) != 0) { 3059 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 3060 GMF_CLI_RX_FO); 3061 device_printf(sc_if->msk_if_dev, "Rx FIFO overrun!\n"); 3062 } 3063 /* GMAC Tx FIFO underrun. */ 3064 if ((status & GM_IS_TX_FF_UR) != 0) { 3065 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3066 GMF_CLI_TX_FU); 3067 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n"); 3068 /* 3069 * XXX 3070 * In case of Tx underrun, we may need to flush/reset 3071 * Tx MAC but that would also require resynchronization 3072 * with status LEs. Reintializing status LEs would 3073 * affect other port in dual MAC configuration so it 3074 * should be avoided as possible as we can. 3075 * Due to lack of documentation it's all vague guess but 3076 * it needs more investigation. 3077 */ 3078 } 3079 } 3080 3081 static void 3082 msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status) 3083 { 3084 struct msk_softc *sc; 3085 3086 sc = sc_if->msk_softc; 3087 if ((status & Y2_IS_PAR_RD1) != 0) { 3088 device_printf(sc_if->msk_if_dev, 3089 "RAM buffer read parity error\n"); 3090 /* Clear IRQ. */ 3091 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), 3092 RI_CLR_RD_PERR); 3093 } 3094 if ((status & Y2_IS_PAR_WR1) != 0) { 3095 device_printf(sc_if->msk_if_dev, 3096 "RAM buffer write parity error\n"); 3097 /* Clear IRQ. */ 3098 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), 3099 RI_CLR_WR_PERR); 3100 } 3101 if ((status & Y2_IS_PAR_MAC1) != 0) { 3102 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n"); 3103 /* Clear IRQ. */ 3104 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3105 GMF_CLI_TX_PE); 3106 } 3107 if ((status & Y2_IS_PAR_RX1) != 0) { 3108 device_printf(sc_if->msk_if_dev, "Rx parity error\n"); 3109 /* Clear IRQ. */ 3110 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR); 3111 } 3112 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) { 3113 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n"); 3114 /* Clear IRQ. */ 3115 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP); 3116 } 3117 } 3118 3119 static void 3120 msk_intr_hwerr(struct msk_softc *sc) 3121 { 3122 uint32_t status; 3123 uint32_t tlphead[4]; 3124 3125 status = CSR_READ_4(sc, B0_HWE_ISRC); 3126 /* Time Stamp timer overflow. */ 3127 if ((status & Y2_IS_TIST_OV) != 0) 3128 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 3129 if ((status & Y2_IS_PCI_NEXP) != 0) { 3130 /* 3131 * PCI Express Error occured which is not described in PEX 3132 * spec. 3133 * This error is also mapped either to Master Abort( 3134 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and 3135 * can only be cleared there. 3136 */ 3137 device_printf(sc->msk_dev, 3138 "PCI Express protocol violation error\n"); 3139 } 3140 3141 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) { 3142 uint16_t v16; 3143 3144 if ((status & Y2_IS_MST_ERR) != 0) 3145 device_printf(sc->msk_dev, 3146 "unexpected IRQ Status error\n"); 3147 else 3148 device_printf(sc->msk_dev, 3149 "unexpected IRQ Master error\n"); 3150 /* Reset all bits in the PCI status register. */ 3151 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); 3152 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3153 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 | 3154 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | 3155 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2); 3156 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3157 } 3158 3159 /* Check for PCI Express Uncorrectable Error. */ 3160 if ((status & Y2_IS_PCI_EXP) != 0) { 3161 uint32_t v32; 3162 3163 /* 3164 * On PCI Express bus bridges are called root complexes (RC). 3165 * PCI Express errors are recognized by the root complex too, 3166 * which requests the system to handle the problem. After 3167 * error occurence it may be that no access to the adapter 3168 * may be performed any longer. 3169 */ 3170 3171 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); 3172 if ((v32 & PEX_UNSUP_REQ) != 0) { 3173 /* Ignore unsupported request error. */ 3174 device_printf(sc->msk_dev, 3175 "Uncorrectable PCI Express error\n"); 3176 } 3177 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) { 3178 int i; 3179 3180 /* Get TLP header form Log Registers. */ 3181 for (i = 0; i < 4; i++) 3182 tlphead[i] = CSR_PCI_READ_4(sc, 3183 PEX_HEADER_LOG + i * 4); 3184 /* Check for vendor defined broadcast message. */ 3185 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) { 3186 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; 3187 CSR_WRITE_4(sc, B0_HWE_IMSK, 3188 sc->msk_intrhwemask); 3189 CSR_READ_4(sc, B0_HWE_IMSK); 3190 } 3191 } 3192 /* Clear the interrupt. */ 3193 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3194 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); 3195 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3196 } 3197 3198 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL) 3199 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status); 3200 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL) 3201 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8); 3202 } 3203 3204 static __inline void 3205 msk_rxput(struct msk_if_softc *sc_if) 3206 { 3207 struct msk_softc *sc; 3208 3209 sc = sc_if->msk_softc; 3210 if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) 3211 bus_dmamap_sync( 3212 sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 3213 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 3214 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3215 else 3216 bus_dmamap_sync( 3217 sc_if->msk_cdata.msk_rx_ring_tag, 3218 sc_if->msk_cdata.msk_rx_ring_map, 3219 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3220 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, 3221 PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod); 3222 } 3223 3224 static int 3225 msk_handle_events(struct msk_softc *sc) 3226 { 3227 struct msk_if_softc *sc_if; 3228 int rxput[2]; 3229 struct msk_stat_desc *sd; 3230 uint32_t control, status; 3231 int cons, idx, len, port, rxprog; 3232 3233 idx = CSR_READ_2(sc, STAT_PUT_IDX); 3234 if (idx == sc->msk_stat_cons) 3235 return (0); 3236 3237 /* Sync status LEs. */ 3238 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map, 3239 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3240 /* XXX Sync Rx LEs here. */ 3241 3242 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0; 3243 3244 rxprog = 0; 3245 for (cons = sc->msk_stat_cons; cons != idx;) { 3246 sd = &sc->msk_stat_ring[cons]; 3247 control = le32toh(sd->msk_control); 3248 if ((control & HW_OWNER) == 0) 3249 break; 3250 /* 3251 * Marvell's FreeBSD driver updates status LE after clearing 3252 * HW_OWNER. However we don't have a way to sync single LE 3253 * with bus_dma(9) API. bus_dma(9) provides a way to sync 3254 * an entire DMA map. So don't sync LE until we have a better 3255 * way to sync LEs. 3256 */ 3257 control &= ~HW_OWNER; 3258 sd->msk_control = htole32(control); 3259 status = le32toh(sd->msk_status); 3260 len = control & STLE_LEN_MASK; 3261 port = (control >> 16) & 0x01; 3262 sc_if = sc->msk_if[port]; 3263 if (sc_if == NULL) { 3264 device_printf(sc->msk_dev, "invalid port opcode " 3265 "0x%08x\n", control & STLE_OP_MASK); 3266 continue; 3267 } 3268 3269 switch (control & STLE_OP_MASK) { 3270 case OP_RXVLAN: 3271 sc_if->msk_vtag = ntohs(len); 3272 break; 3273 case OP_RXCHKSVLAN: 3274 sc_if->msk_vtag = ntohs(len); 3275 break; 3276 case OP_RXSTAT: 3277 if (sc_if->msk_framesize > 3278 (MCLBYTES - MSK_RX_BUF_ALIGN)) 3279 msk_jumbo_rxeof(sc_if, status, len); 3280 else 3281 msk_rxeof(sc_if, status, len); 3282 rxprog++; 3283 /* 3284 * Because there is no way to sync single Rx LE 3285 * put the DMA sync operation off until the end of 3286 * event processing. 3287 */ 3288 rxput[port]++; 3289 /* Update prefetch unit if we've passed water mark. */ 3290 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) { 3291 msk_rxput(sc_if); 3292 rxput[port] = 0; 3293 } 3294 break; 3295 case OP_TXINDEXLE: 3296 if (sc->msk_if[MSK_PORT_A] != NULL) 3297 msk_txeof(sc->msk_if[MSK_PORT_A], 3298 status & STLE_TXA1_MSKL); 3299 if (sc->msk_if[MSK_PORT_B] != NULL) 3300 msk_txeof(sc->msk_if[MSK_PORT_B], 3301 ((status & STLE_TXA2_MSKL) >> 3302 STLE_TXA2_SHIFTL) | 3303 ((len & STLE_TXA2_MSKH) << 3304 STLE_TXA2_SHIFTH)); 3305 break; 3306 default: 3307 device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n", 3308 control & STLE_OP_MASK); 3309 break; 3310 } 3311 MSK_INC(cons, MSK_STAT_RING_CNT); 3312 if (rxprog > sc->msk_process_limit) 3313 break; 3314 } 3315 3316 sc->msk_stat_cons = cons; 3317 /* XXX We should sync status LEs here. See above notes. */ 3318 3319 if (rxput[MSK_PORT_A] > 0) 3320 msk_rxput(sc->msk_if[MSK_PORT_A]); 3321 if (rxput[MSK_PORT_B] > 0) 3322 msk_rxput(sc->msk_if[MSK_PORT_B]); 3323 3324 return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX)); 3325 } 3326 3327 /* Legacy interrupt handler for shared interrupt. */ 3328 static void 3329 msk_legacy_intr(void *xsc) 3330 { 3331 struct msk_softc *sc; 3332 struct msk_if_softc *sc_if0, *sc_if1; 3333 struct ifnet *ifp0, *ifp1; 3334 uint32_t status; 3335 3336 sc = xsc; 3337 MSK_LOCK(sc); 3338 3339 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */ 3340 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2); 3341 if (status == 0 || status == 0xffffffff || sc->msk_suspended != 0 || 3342 (status & sc->msk_intrmask) == 0) { 3343 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3344 return; 3345 } 3346 3347 sc_if0 = sc->msk_if[MSK_PORT_A]; 3348 sc_if1 = sc->msk_if[MSK_PORT_B]; 3349 ifp0 = ifp1 = NULL; 3350 if (sc_if0 != NULL) 3351 ifp0 = sc_if0->msk_ifp; 3352 if (sc_if1 != NULL) 3353 ifp1 = sc_if1->msk_ifp; 3354 3355 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL) 3356 msk_intr_phy(sc_if0); 3357 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL) 3358 msk_intr_phy(sc_if1); 3359 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL) 3360 msk_intr_gmac(sc_if0); 3361 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL) 3362 msk_intr_gmac(sc_if1); 3363 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) { 3364 device_printf(sc->msk_dev, "Rx descriptor error\n"); 3365 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2); 3366 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3367 CSR_READ_4(sc, B0_IMSK); 3368 } 3369 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) { 3370 device_printf(sc->msk_dev, "Tx descriptor error\n"); 3371 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2); 3372 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3373 CSR_READ_4(sc, B0_IMSK); 3374 } 3375 if ((status & Y2_IS_HW_ERR) != 0) 3376 msk_intr_hwerr(sc); 3377 3378 while (msk_handle_events(sc) != 0) 3379 ; 3380 if ((status & Y2_IS_STAT_BMU) != 0) 3381 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ); 3382 3383 /* Reenable interrupts. */ 3384 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3385 3386 if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 && 3387 !IFQ_DRV_IS_EMPTY(&ifp0->if_snd)) 3388 taskqueue_enqueue(taskqueue_fast, &sc_if0->msk_tx_task); 3389 if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 && 3390 !IFQ_DRV_IS_EMPTY(&ifp1->if_snd)) 3391 taskqueue_enqueue(taskqueue_fast, &sc_if1->msk_tx_task); 3392 3393 MSK_UNLOCK(sc); 3394 } 3395 3396 static int 3397 msk_intr(void *xsc) 3398 { 3399 struct msk_softc *sc; 3400 uint32_t status; 3401 3402 sc = xsc; 3403 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2); 3404 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */ 3405 if (status == 0 || status == 0xffffffff) { 3406 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3407 return (FILTER_STRAY); 3408 } 3409 3410 taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task); 3411 return (FILTER_HANDLED); 3412 } 3413 3414 static void 3415 msk_int_task(void *arg, int pending) 3416 { 3417 struct msk_softc *sc; 3418 struct msk_if_softc *sc_if0, *sc_if1; 3419 struct ifnet *ifp0, *ifp1; 3420 uint32_t status; 3421 int domore; 3422 3423 sc = arg; 3424 MSK_LOCK(sc); 3425 3426 /* Get interrupt source. */ 3427 status = CSR_READ_4(sc, B0_ISRC); 3428 if (status == 0 || status == 0xffffffff || sc->msk_suspended != 0 || 3429 (status & sc->msk_intrmask) == 0) 3430 goto done; 3431 3432 sc_if0 = sc->msk_if[MSK_PORT_A]; 3433 sc_if1 = sc->msk_if[MSK_PORT_B]; 3434 ifp0 = ifp1 = NULL; 3435 if (sc_if0 != NULL) 3436 ifp0 = sc_if0->msk_ifp; 3437 if (sc_if1 != NULL) 3438 ifp1 = sc_if1->msk_ifp; 3439 3440 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL) 3441 msk_intr_phy(sc_if0); 3442 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL) 3443 msk_intr_phy(sc_if1); 3444 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL) 3445 msk_intr_gmac(sc_if0); 3446 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL) 3447 msk_intr_gmac(sc_if1); 3448 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) { 3449 device_printf(sc->msk_dev, "Rx descriptor error\n"); 3450 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2); 3451 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3452 CSR_READ_4(sc, B0_IMSK); 3453 } 3454 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) { 3455 device_printf(sc->msk_dev, "Tx descriptor error\n"); 3456 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2); 3457 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3458 CSR_READ_4(sc, B0_IMSK); 3459 } 3460 if ((status & Y2_IS_HW_ERR) != 0) 3461 msk_intr_hwerr(sc); 3462 3463 domore = msk_handle_events(sc); 3464 if ((status & Y2_IS_STAT_BMU) != 0) 3465 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ); 3466 3467 if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 && 3468 !IFQ_DRV_IS_EMPTY(&ifp0->if_snd)) 3469 taskqueue_enqueue(taskqueue_fast, &sc_if0->msk_tx_task); 3470 if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 && 3471 !IFQ_DRV_IS_EMPTY(&ifp1->if_snd)) 3472 taskqueue_enqueue(taskqueue_fast, &sc_if1->msk_tx_task); 3473 3474 if (domore > 0) { 3475 taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task); 3476 MSK_UNLOCK(sc); 3477 return; 3478 } 3479 done: 3480 MSK_UNLOCK(sc); 3481 3482 /* Reenable interrupts. */ 3483 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3484 } 3485 3486 static void 3487 msk_init(void *xsc) 3488 { 3489 struct msk_if_softc *sc_if = xsc; 3490 3491 MSK_IF_LOCK(sc_if); 3492 msk_init_locked(sc_if); 3493 MSK_IF_UNLOCK(sc_if); 3494 } 3495 3496 static void 3497 msk_init_locked(struct msk_if_softc *sc_if) 3498 { 3499 struct msk_softc *sc; 3500 struct ifnet *ifp; 3501 struct mii_data *mii; 3502 uint16_t eaddr[ETHER_ADDR_LEN / 2]; 3503 uint16_t gmac; 3504 int error, i; 3505 3506 MSK_IF_LOCK_ASSERT(sc_if); 3507 3508 ifp = sc_if->msk_ifp; 3509 sc = sc_if->msk_softc; 3510 mii = device_get_softc(sc_if->msk_miibus); 3511 3512 error = 0; 3513 /* Cancel pending I/O and free all Rx/Tx buffers. */ 3514 msk_stop(sc_if); 3515 3516 if (ifp->if_mtu < ETHERMTU) 3517 sc_if->msk_framesize = ETHERMTU; 3518 else 3519 sc_if->msk_framesize = ifp->if_mtu; 3520 sc_if->msk_framesize += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 3521 if (ifp->if_mtu > ETHERMTU && 3522 sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 3523 /* 3524 * In Yukon EC Ultra, TSO & checksum offload is not 3525 * supported for jumbo frame. 3526 */ 3527 ifp->if_hwassist &= ~(MSK_CSUM_FEATURES | CSUM_TSO); 3528 ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TXCSUM); 3529 } 3530 3531 /* 3532 * Initialize GMAC first. 3533 * Without this initialization, Rx MAC did not work as expected 3534 * and Rx MAC garbled status LEs and it resulted in out-of-order 3535 * or duplicated frame delivery which in turn showed very poor 3536 * Rx performance.(I had to write a packet analysis code that 3537 * could be embeded in driver to diagnose this issue.) 3538 * I've spent almost 2 months to fix this issue. If I have had 3539 * datasheet for Yukon II I wouldn't have encountered this. :-( 3540 */ 3541 gmac = GM_GPCR_SPEED_100 | GM_GPCR_SPEED_1000 | GM_GPCR_DUP_FULL; 3542 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 3543 3544 /* Dummy read the Interrupt Source Register. */ 3545 CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); 3546 3547 /* Set MIB Clear Counter Mode. */ 3548 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR); 3549 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR); 3550 /* Read all MIB Counters with Clear Mode set. */ 3551 for (i = 0; i < GM_MIB_CNT_SIZE; i++) 3552 GMAC_READ_2(sc, sc_if->msk_port, GM_MIB_CNT_BASE + 8 * i); 3553 /* Clear MIB Clear Counter Mode. */ 3554 gmac &= ~GM_PAR_MIB_CLR; 3555 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac); 3556 3557 /* Disable FCS. */ 3558 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS); 3559 3560 /* Setup Transmit Control Register. */ 3561 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); 3562 3563 /* Setup Transmit Flow Control Register. */ 3564 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff); 3565 3566 /* Setup Transmit Parameter Register. */ 3567 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM, 3568 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | 3569 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF)); 3570 3571 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) | 3572 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); 3573 3574 if (ifp->if_mtu > ETHERMTU) 3575 gmac |= GM_SMOD_JUMBO_ENA; 3576 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac); 3577 3578 /* Set station address. */ 3579 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 3580 for (i = 0; i < ETHER_ADDR_LEN /2; i++) 3581 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L + i * 4, 3582 eaddr[i]); 3583 for (i = 0; i < ETHER_ADDR_LEN /2; i++) 3584 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L + i * 4, 3585 eaddr[i]); 3586 3587 /* Disable interrupts for counter overflows. */ 3588 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0); 3589 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0); 3590 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0); 3591 3592 /* Configure Rx MAC FIFO. */ 3593 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); 3594 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR); 3595 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 3596 GMF_OPER_ON | GMF_RX_F_FL_ON); 3597 3598 /* Set promiscuous mode. */ 3599 msk_setpromisc(sc_if); 3600 3601 /* Set multicast filter. */ 3602 msk_setmulti(sc_if); 3603 3604 /* Flush Rx MAC FIFO on any flow control or error. */ 3605 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 3606 GMR_FS_ANY_ERR); 3607 3608 /* 3609 * Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word 3610 * due to hardware hang on receipt of pause frames. 3611 */ 3612 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), 3613 RX_GMF_FL_THR_DEF + 1); 3614 3615 /* Configure Tx MAC FIFO. */ 3616 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); 3617 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR); 3618 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON); 3619 3620 /* Configure hardware VLAN tag insertion/stripping. */ 3621 msk_setvlan(sc_if, ifp); 3622 3623 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) { 3624 /* Set Rx Pause threshould. */ 3625 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR), 3626 MSK_ECU_LLPP); 3627 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR), 3628 MSK_ECU_ULPP); 3629 if (ifp->if_mtu > ETHERMTU) { 3630 /* 3631 * Set Tx GMAC FIFO Almost Empty Threshold. 3632 */ 3633 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR), 3634 MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR); 3635 /* Disable Store & Forward mode for Tx. */ 3636 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3637 TX_JUMBO_ENA | TX_STFW_DIS); 3638 } else { 3639 /* Enable Store & Forward mode for Tx. */ 3640 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3641 TX_JUMBO_DIS | TX_STFW_ENA); 3642 } 3643 } 3644 3645 /* 3646 * Disable Force Sync bit and Alloc bit in Tx RAM interface 3647 * arbiter as we don't use Sync Tx queue. 3648 */ 3649 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), 3650 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); 3651 /* Enable the RAM Interface Arbiter. */ 3652 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB); 3653 3654 /* Setup RAM buffer. */ 3655 msk_set_rambuffer(sc_if); 3656 3657 /* Disable Tx sync Queue. */ 3658 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET); 3659 3660 /* Setup Tx Queue Bus Memory Interface. */ 3661 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET); 3662 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT); 3663 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON); 3664 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM); 3665 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U && 3666 sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) { 3667 /* Fix for Yukon-EC Ultra: set BMU FIFO level */ 3668 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL), MSK_ECU_TXFF_LEV); 3669 } 3670 3671 /* Setup Rx Queue Bus Memory Interface. */ 3672 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET); 3673 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT); 3674 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON); 3675 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM); 3676 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U && 3677 sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) { 3678 /* MAC Rx RAM Read is controlled by hardware. */ 3679 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS); 3680 } 3681 3682 msk_set_prefetch(sc, sc_if->msk_txq, 3683 sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1); 3684 msk_init_tx_ring(sc_if); 3685 3686 /* Disable Rx checksum offload and RSS hash. */ 3687 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), 3688 BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH); 3689 if (sc_if->msk_framesize > (MCLBYTES - MSK_RX_BUF_ALIGN)) { 3690 msk_set_prefetch(sc, sc_if->msk_rxq, 3691 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr, 3692 MSK_JUMBO_RX_RING_CNT - 1); 3693 error = msk_init_jumbo_rx_ring(sc_if); 3694 } else { 3695 msk_set_prefetch(sc, sc_if->msk_rxq, 3696 sc_if->msk_rdata.msk_rx_ring_paddr, 3697 MSK_RX_RING_CNT - 1); 3698 error = msk_init_rx_ring(sc_if); 3699 } 3700 if (error != 0) { 3701 device_printf(sc_if->msk_if_dev, 3702 "initialization failed: no memory for Rx buffers\n"); 3703 msk_stop(sc_if); 3704 return; 3705 } 3706 3707 /* Configure interrupt handling. */ 3708 if (sc_if->msk_port == MSK_PORT_A) { 3709 sc->msk_intrmask |= Y2_IS_PORT_A; 3710 sc->msk_intrhwemask |= Y2_HWE_L1_MASK; 3711 } else { 3712 sc->msk_intrmask |= Y2_IS_PORT_B; 3713 sc->msk_intrhwemask |= Y2_HWE_L2_MASK; 3714 } 3715 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); 3716 CSR_READ_4(sc, B0_HWE_IMSK); 3717 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3718 CSR_READ_4(sc, B0_IMSK); 3719 3720 sc_if->msk_link = 0; 3721 mii_mediachg(mii); 3722 3723 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3724 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3725 3726 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); 3727 } 3728 3729 static void 3730 msk_set_rambuffer(struct msk_if_softc *sc_if) 3731 { 3732 struct msk_softc *sc; 3733 int ltpp, utpp; 3734 3735 sc = sc_if->msk_softc; 3736 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) 3737 return; 3738 3739 /* Setup Rx Queue. */ 3740 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR); 3741 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START), 3742 sc->msk_rxqstart[sc_if->msk_port] / 8); 3743 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END), 3744 sc->msk_rxqend[sc_if->msk_port] / 8); 3745 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP), 3746 sc->msk_rxqstart[sc_if->msk_port] / 8); 3747 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP), 3748 sc->msk_rxqstart[sc_if->msk_port] / 8); 3749 3750 utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - 3751 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8; 3752 ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - 3753 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8; 3754 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE) 3755 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8; 3756 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp); 3757 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp); 3758 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */ 3759 3760 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD); 3761 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL)); 3762 3763 /* Setup Tx Queue. */ 3764 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR); 3765 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START), 3766 sc->msk_txqstart[sc_if->msk_port] / 8); 3767 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END), 3768 sc->msk_txqend[sc_if->msk_port] / 8); 3769 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP), 3770 sc->msk_txqstart[sc_if->msk_port] / 8); 3771 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP), 3772 sc->msk_txqstart[sc_if->msk_port] / 8); 3773 /* Enable Store & Forward for Tx side. */ 3774 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD); 3775 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD); 3776 CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL)); 3777 } 3778 3779 static void 3780 msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr, 3781 uint32_t count) 3782 { 3783 3784 /* Reset the prefetch unit. */ 3785 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3786 PREF_UNIT_RST_SET); 3787 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3788 PREF_UNIT_RST_CLR); 3789 /* Set LE base address. */ 3790 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG), 3791 MSK_ADDR_LO(addr)); 3792 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG), 3793 MSK_ADDR_HI(addr)); 3794 /* Set the list last index. */ 3795 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG), 3796 count); 3797 /* Turn on prefetch unit. */ 3798 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3799 PREF_UNIT_OP_ON); 3800 /* Dummy read to ensure write. */ 3801 CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG)); 3802 } 3803 3804 static void 3805 msk_stop(struct msk_if_softc *sc_if) 3806 { 3807 struct msk_softc *sc; 3808 struct msk_txdesc *txd; 3809 struct msk_rxdesc *rxd; 3810 struct msk_rxdesc *jrxd; 3811 struct ifnet *ifp; 3812 uint32_t val; 3813 int i; 3814 3815 MSK_IF_LOCK_ASSERT(sc_if); 3816 sc = sc_if->msk_softc; 3817 ifp = sc_if->msk_ifp; 3818 3819 callout_stop(&sc_if->msk_tick_ch); 3820 sc_if->msk_watchdog_timer = 0; 3821 3822 /* Disable interrupts. */ 3823 if (sc_if->msk_port == MSK_PORT_A) { 3824 sc->msk_intrmask &= ~Y2_IS_PORT_A; 3825 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK; 3826 } else { 3827 sc->msk_intrmask &= ~Y2_IS_PORT_B; 3828 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK; 3829 } 3830 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); 3831 CSR_READ_4(sc, B0_HWE_IMSK); 3832 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3833 CSR_READ_4(sc, B0_IMSK); 3834 3835 /* Disable Tx/Rx MAC. */ 3836 val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 3837 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 3838 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val); 3839 /* Read again to ensure writing. */ 3840 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 3841 3842 /* Stop Tx BMU. */ 3843 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP); 3844 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); 3845 for (i = 0; i < MSK_TIMEOUT; i++) { 3846 if ((val & (BMU_STOP | BMU_IDLE)) == 0) { 3847 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 3848 BMU_STOP); 3849 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); 3850 } else 3851 break; 3852 DELAY(1); 3853 } 3854 if (i == MSK_TIMEOUT) 3855 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n"); 3856 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), 3857 RB_RST_SET | RB_DIS_OP_MD); 3858 3859 /* Disable all GMAC interrupt. */ 3860 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0); 3861 /* Disable PHY interrupt. */ 3862 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); 3863 3864 /* Disable the RAM Interface Arbiter. */ 3865 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB); 3866 3867 /* Reset the PCI FIFO of the async Tx queue */ 3868 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 3869 BMU_RST_SET | BMU_FIFO_RST); 3870 3871 /* Reset the Tx prefetch units. */ 3872 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG), 3873 PREF_UNIT_RST_SET); 3874 3875 /* Reset the RAM Buffer async Tx queue. */ 3876 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET); 3877 3878 /* Reset Tx MAC FIFO. */ 3879 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); 3880 /* Set Pause Off. */ 3881 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF); 3882 3883 /* 3884 * The Rx Stop command will not work for Yukon-2 if the BMU does not 3885 * reach the end of packet and since we can't make sure that we have 3886 * incoming data, we must reset the BMU while it is not during a DMA 3887 * transfer. Since it is possible that the Rx path is still active, 3888 * the Rx RAM buffer will be stopped first, so any possible incoming 3889 * data will not trigger a DMA. After the RAM buffer is stopped, the 3890 * BMU is polled until any DMA in progress is ended and only then it 3891 * will be reset. 3892 */ 3893 3894 /* Disable the RAM Buffer receive queue. */ 3895 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD); 3896 for (i = 0; i < MSK_TIMEOUT; i++) { 3897 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) == 3898 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL))) 3899 break; 3900 DELAY(1); 3901 } 3902 if (i == MSK_TIMEOUT) 3903 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n"); 3904 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), 3905 BMU_RST_SET | BMU_FIFO_RST); 3906 /* Reset the Rx prefetch unit. */ 3907 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG), 3908 PREF_UNIT_RST_SET); 3909 /* Reset the RAM Buffer receive queue. */ 3910 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET); 3911 /* Reset Rx MAC FIFO. */ 3912 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); 3913 3914 /* Free Rx and Tx mbufs still in the queues. */ 3915 for (i = 0; i < MSK_RX_RING_CNT; i++) { 3916 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 3917 if (rxd->rx_m != NULL) { 3918 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, 3919 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3920 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, 3921 rxd->rx_dmamap); 3922 m_freem(rxd->rx_m); 3923 rxd->rx_m = NULL; 3924 } 3925 } 3926 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 3927 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 3928 if (jrxd->rx_m != NULL) { 3929 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, 3930 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3931 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, 3932 jrxd->rx_dmamap); 3933 m_freem(jrxd->rx_m); 3934 jrxd->rx_m = NULL; 3935 } 3936 } 3937 for (i = 0; i < MSK_TX_RING_CNT; i++) { 3938 txd = &sc_if->msk_cdata.msk_txdesc[i]; 3939 if (txd->tx_m != NULL) { 3940 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, 3941 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 3942 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, 3943 txd->tx_dmamap); 3944 m_freem(txd->tx_m); 3945 txd->tx_m = NULL; 3946 } 3947 } 3948 3949 /* 3950 * Mark the interface down. 3951 */ 3952 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 3953 sc_if->msk_link = 0; 3954 } 3955 3956 static int 3957 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 3958 { 3959 int error, value; 3960 3961 if (!arg1) 3962 return (EINVAL); 3963 value = *(int *)arg1; 3964 error = sysctl_handle_int(oidp, &value, 0, req); 3965 if (error || !req->newptr) 3966 return (error); 3967 if (value < low || value > high) 3968 return (EINVAL); 3969 *(int *)arg1 = value; 3970 3971 return (0); 3972 } 3973 3974 static int 3975 sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS) 3976 { 3977 3978 return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN, 3979 MSK_PROC_MAX)); 3980 } 3981