1 /****************************************************************************** 2 * 3 * Name : sky2.c 4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x 5 * Version: $Revision: 1.23 $ 6 * Date : $Date: 2005/12/22 09:04:11 $ 7 * Purpose: Main driver source file 8 * 9 *****************************************************************************/ 10 11 /****************************************************************************** 12 * 13 * LICENSE: 14 * Copyright (C) Marvell International Ltd. and/or its affiliates 15 * 16 * The computer program files contained in this folder ("Files") 17 * are provided to you under the BSD-type license terms provided 18 * below, and any use of such Files and any derivative works 19 * thereof created by you shall be governed by the following terms 20 * and conditions: 21 * 22 * - Redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer. 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials provided 27 * with the distribution. 28 * - Neither the name of Marvell nor the names of its contributors 29 * may be used to endorse or promote products derived from this 30 * software without specific prior written permission. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 43 * OF THE POSSIBILITY OF SUCH DAMAGE. 44 * /LICENSE 45 * 46 *****************************************************************************/ 47 48 /*- 49 * Copyright (c) 1997, 1998, 1999, 2000 50 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 51 * 52 * Redistribution and use in source and binary forms, with or without 53 * modification, are permitted provided that the following conditions 54 * are met: 55 * 1. Redistributions of source code must retain the above copyright 56 * notice, this list of conditions and the following disclaimer. 57 * 2. Redistributions in binary form must reproduce the above copyright 58 * notice, this list of conditions and the following disclaimer in the 59 * documentation and/or other materials provided with the distribution. 60 * 3. All advertising materials mentioning features or use of this software 61 * must display the following acknowledgement: 62 * This product includes software developed by Bill Paul. 63 * 4. Neither the name of the author nor the names of any co-contributors 64 * may be used to endorse or promote products derived from this software 65 * without specific prior written permission. 66 * 67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 70 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 77 * THE POSSIBILITY OF SUCH DAMAGE. 78 */ 79 /*- 80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 81 * 82 * Permission to use, copy, modify, and distribute this software for any 83 * purpose with or without fee is hereby granted, provided that the above 84 * copyright notice and this permission notice appear in all copies. 85 * 86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 93 */ 94 95 /* 96 * Device driver for the Marvell Yukon II Ethernet controller. 97 * Due to lack of documentation, this driver is based on the code from 98 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x. 99 */ 100 101 #include <sys/cdefs.h> 102 __FBSDID("$FreeBSD$"); 103 104 #include <sys/param.h> 105 #include <sys/systm.h> 106 #include <sys/bus.h> 107 #include <sys/endian.h> 108 #include <sys/mbuf.h> 109 #include <sys/malloc.h> 110 #include <sys/kernel.h> 111 #include <sys/module.h> 112 #include <sys/socket.h> 113 #include <sys/sockio.h> 114 #include <sys/queue.h> 115 #include <sys/sysctl.h> 116 #include <sys/taskqueue.h> 117 118 #include <net/bpf.h> 119 #include <net/ethernet.h> 120 #include <net/if.h> 121 #include <net/if_arp.h> 122 #include <net/if_dl.h> 123 #include <net/if_media.h> 124 #include <net/if_types.h> 125 #include <net/if_vlan_var.h> 126 127 #include <netinet/in.h> 128 #include <netinet/in_systm.h> 129 #include <netinet/ip.h> 130 #include <netinet/tcp.h> 131 #include <netinet/udp.h> 132 133 #include <machine/bus.h> 134 #include <machine/in_cksum.h> 135 #include <machine/resource.h> 136 #include <sys/rman.h> 137 138 #include <dev/mii/mii.h> 139 #include <dev/mii/miivar.h> 140 #include <dev/mii/brgphyreg.h> 141 142 #include <dev/pci/pcireg.h> 143 #include <dev/pci/pcivar.h> 144 145 #include <dev/msk/if_mskreg.h> 146 147 MODULE_DEPEND(msk, pci, 1, 1, 1); 148 MODULE_DEPEND(msk, ether, 1, 1, 1); 149 MODULE_DEPEND(msk, miibus, 1, 1, 1); 150 151 /* "device miibus" required. See GENERIC if you get errors here. */ 152 #include "miibus_if.h" 153 154 /* Tunables. */ 155 static int msi_disable = 0; 156 TUNABLE_INT("hw.msk.msi_disable", &msi_disable); 157 158 #define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 159 160 /* 161 * Devices supported by this driver. 162 */ 163 static struct msk_product { 164 uint16_t msk_vendorid; 165 uint16_t msk_deviceid; 166 const char *msk_name; 167 } msk_products[] = { 168 { VENDORID_SK, DEVICEID_SK_YUKON2, 169 "SK-9Sxx Gigabit Ethernet" }, 170 { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR, 171 "SK-9Exx Gigabit Ethernet"}, 172 { VENDORID_MARVELL, DEVICEID_MRVL_8021CU, 173 "Marvell Yukon 88E8021CU Gigabit Ethernet" }, 174 { VENDORID_MARVELL, DEVICEID_MRVL_8021X, 175 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" }, 176 { VENDORID_MARVELL, DEVICEID_MRVL_8022CU, 177 "Marvell Yukon 88E8022CU Gigabit Ethernet" }, 178 { VENDORID_MARVELL, DEVICEID_MRVL_8022X, 179 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" }, 180 { VENDORID_MARVELL, DEVICEID_MRVL_8061CU, 181 "Marvell Yukon 88E8061CU Gigabit Ethernet" }, 182 { VENDORID_MARVELL, DEVICEID_MRVL_8061X, 183 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" }, 184 { VENDORID_MARVELL, DEVICEID_MRVL_8062CU, 185 "Marvell Yukon 88E8062CU Gigabit Ethernet" }, 186 { VENDORID_MARVELL, DEVICEID_MRVL_8062X, 187 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" }, 188 { VENDORID_MARVELL, DEVICEID_MRVL_8035, 189 "Marvell Yukon 88E8035 Gigabit Ethernet" }, 190 { VENDORID_MARVELL, DEVICEID_MRVL_8036, 191 "Marvell Yukon 88E8036 Gigabit Ethernet" }, 192 { VENDORID_MARVELL, DEVICEID_MRVL_8038, 193 "Marvell Yukon 88E8038 Gigabit Ethernet" }, 194 { VENDORID_MARVELL, DEVICEID_MRVL_4361, 195 "Marvell Yukon 88E8050 Gigabit Ethernet" }, 196 { VENDORID_MARVELL, DEVICEID_MRVL_4360, 197 "Marvell Yukon 88E8052 Gigabit Ethernet" }, 198 { VENDORID_MARVELL, DEVICEID_MRVL_4362, 199 "Marvell Yukon 88E8053 Gigabit Ethernet" }, 200 { VENDORID_MARVELL, DEVICEID_MRVL_4363, 201 "Marvell Yukon 88E8055 Gigabit Ethernet" }, 202 { VENDORID_MARVELL, DEVICEID_MRVL_4364, 203 "Marvell Yukon 88E8056 Gigabit Ethernet" }, 204 { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX, 205 "D-Link 550SX Gigabit Ethernet" }, 206 { VENDORID_DLINK, DEVICEID_DLINK_DGE560T, 207 "D-Link 560T Gigabit Ethernet" } 208 }; 209 210 static const char *model_name[] = { 211 "Yukon XL", 212 "Yukon EC Ultra", 213 "Yukon Unknown", 214 "Yukon EC", 215 "Yukon FE" 216 }; 217 218 static int mskc_probe(device_t); 219 static int mskc_attach(device_t); 220 static int mskc_detach(device_t); 221 static void mskc_shutdown(device_t); 222 static int mskc_setup_rambuffer(struct msk_softc *); 223 static int mskc_suspend(device_t); 224 static int mskc_resume(device_t); 225 static void mskc_reset(struct msk_softc *); 226 227 static int msk_probe(device_t); 228 static int msk_attach(device_t); 229 static int msk_detach(device_t); 230 231 static void msk_tick(void *); 232 static int msk_intr(void *); 233 static void msk_int_task(void *, int); 234 static void msk_intr_phy(struct msk_if_softc *); 235 static void msk_intr_gmac(struct msk_if_softc *); 236 static __inline void msk_rxput(struct msk_if_softc *); 237 static int msk_handle_events(struct msk_softc *); 238 static void msk_handle_hwerr(struct msk_if_softc *, uint32_t); 239 static void msk_intr_hwerr(struct msk_softc *); 240 static void msk_rxeof(struct msk_if_softc *, uint32_t, int); 241 static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, int); 242 static void msk_txeof(struct msk_if_softc *, int); 243 static struct mbuf *msk_defrag(struct mbuf *, int, int); 244 static int msk_encap(struct msk_if_softc *, struct mbuf **); 245 static void msk_tx_task(void *, int); 246 static void msk_start(struct ifnet *); 247 static int msk_ioctl(struct ifnet *, u_long, caddr_t); 248 static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t); 249 static void msk_set_rambuffer(struct msk_if_softc *); 250 static void msk_init(void *); 251 static void msk_init_locked(struct msk_if_softc *); 252 static void msk_stop(struct msk_if_softc *); 253 static void msk_watchdog(struct msk_if_softc *); 254 static int msk_mediachange(struct ifnet *); 255 static void msk_mediastatus(struct ifnet *, struct ifmediareq *); 256 static void msk_phy_power(struct msk_softc *, int); 257 static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int); 258 static int msk_status_dma_alloc(struct msk_softc *); 259 static void msk_status_dma_free(struct msk_softc *); 260 static int msk_txrx_dma_alloc(struct msk_if_softc *); 261 static void msk_txrx_dma_free(struct msk_if_softc *); 262 static void *msk_jalloc(struct msk_if_softc *); 263 static void msk_jfree(void *, void *); 264 static int msk_init_rx_ring(struct msk_if_softc *); 265 static int msk_init_jumbo_rx_ring(struct msk_if_softc *); 266 static void msk_init_tx_ring(struct msk_if_softc *); 267 static __inline void msk_discard_rxbuf(struct msk_if_softc *, int); 268 static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int); 269 static int msk_newbuf(struct msk_if_softc *, int); 270 static int msk_jumbo_newbuf(struct msk_if_softc *, int); 271 272 static int msk_phy_readreg(struct msk_if_softc *, int, int); 273 static int msk_phy_writereg(struct msk_if_softc *, int, int, int); 274 static int msk_miibus_readreg(device_t, int, int); 275 static int msk_miibus_writereg(device_t, int, int, int); 276 static void msk_miibus_statchg(device_t); 277 static void msk_link_task(void *, int); 278 279 static void msk_setmulti(struct msk_if_softc *); 280 static void msk_setvlan(struct msk_if_softc *, struct ifnet *); 281 static void msk_setpromisc(struct msk_if_softc *); 282 283 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 284 static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS); 285 286 static device_method_t mskc_methods[] = { 287 /* Device interface */ 288 DEVMETHOD(device_probe, mskc_probe), 289 DEVMETHOD(device_attach, mskc_attach), 290 DEVMETHOD(device_detach, mskc_detach), 291 DEVMETHOD(device_suspend, mskc_suspend), 292 DEVMETHOD(device_resume, mskc_resume), 293 DEVMETHOD(device_shutdown, mskc_shutdown), 294 295 /* bus interface */ 296 DEVMETHOD(bus_print_child, bus_generic_print_child), 297 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 298 299 { NULL, NULL } 300 }; 301 302 static driver_t mskc_driver = { 303 "mskc", 304 mskc_methods, 305 sizeof(struct msk_softc) 306 }; 307 308 static devclass_t mskc_devclass; 309 310 static device_method_t msk_methods[] = { 311 /* Device interface */ 312 DEVMETHOD(device_probe, msk_probe), 313 DEVMETHOD(device_attach, msk_attach), 314 DEVMETHOD(device_detach, msk_detach), 315 DEVMETHOD(device_shutdown, bus_generic_shutdown), 316 317 /* bus interface */ 318 DEVMETHOD(bus_print_child, bus_generic_print_child), 319 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 320 321 /* MII interface */ 322 DEVMETHOD(miibus_readreg, msk_miibus_readreg), 323 DEVMETHOD(miibus_writereg, msk_miibus_writereg), 324 DEVMETHOD(miibus_statchg, msk_miibus_statchg), 325 326 { NULL, NULL } 327 }; 328 329 static driver_t msk_driver = { 330 "msk", 331 msk_methods, 332 sizeof(struct msk_if_softc) 333 }; 334 335 static devclass_t msk_devclass; 336 337 DRIVER_MODULE(mskc, pci, mskc_driver, mskc_devclass, 0, 0); 338 DRIVER_MODULE(msk, mskc, msk_driver, msk_devclass, 0, 0); 339 DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, 0, 0); 340 341 static struct resource_spec msk_res_spec_io[] = { 342 { SYS_RES_IOPORT, PCIR_BAR(1), RF_ACTIVE }, 343 { -1, 0, 0 } 344 }; 345 346 static struct resource_spec msk_res_spec_mem[] = { 347 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, 348 { -1, 0, 0 } 349 }; 350 351 static struct resource_spec msk_irq_spec_legacy[] = { 352 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 353 { -1, 0, 0 } 354 }; 355 356 static struct resource_spec msk_irq_spec_msi[] = { 357 { SYS_RES_IRQ, 1, RF_ACTIVE }, 358 { SYS_RES_IRQ, 2, RF_ACTIVE }, 359 { -1, 0, 0 } 360 }; 361 362 static int 363 msk_miibus_readreg(device_t dev, int phy, int reg) 364 { 365 struct msk_if_softc *sc_if; 366 367 sc_if = device_get_softc(dev); 368 369 return (msk_phy_readreg(sc_if, phy, reg)); 370 } 371 372 static int 373 msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg) 374 { 375 struct msk_softc *sc; 376 int i, val; 377 378 sc = sc_if->msk_softc; 379 380 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL, 381 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); 382 383 for (i = 0; i < MSK_TIMEOUT; i++) { 384 DELAY(1); 385 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL); 386 if ((val & GM_SMI_CT_RD_VAL) != 0) { 387 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA); 388 break; 389 } 390 } 391 392 if (i == MSK_TIMEOUT) { 393 if_printf(sc_if->msk_ifp, "phy failed to come ready\n"); 394 val = 0; 395 } 396 397 return (val); 398 } 399 400 static int 401 msk_miibus_writereg(device_t dev, int phy, int reg, int val) 402 { 403 struct msk_if_softc *sc_if; 404 405 sc_if = device_get_softc(dev); 406 407 return (msk_phy_writereg(sc_if, phy, reg, val)); 408 } 409 410 static int 411 msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val) 412 { 413 struct msk_softc *sc; 414 int i; 415 416 sc = sc_if->msk_softc; 417 418 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val); 419 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL, 420 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg)); 421 for (i = 0; i < MSK_TIMEOUT; i++) { 422 DELAY(1); 423 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) & 424 GM_SMI_CT_BUSY) == 0) 425 break; 426 } 427 if (i == MSK_TIMEOUT) 428 if_printf(sc_if->msk_ifp, "phy write timeout\n"); 429 430 return (0); 431 } 432 433 static void 434 msk_miibus_statchg(device_t dev) 435 { 436 struct msk_if_softc *sc_if; 437 438 sc_if = device_get_softc(dev); 439 taskqueue_enqueue(taskqueue_swi, &sc_if->msk_link_task); 440 } 441 442 static void 443 msk_link_task(void *arg, int pending) 444 { 445 struct msk_softc *sc; 446 struct msk_if_softc *sc_if; 447 struct mii_data *mii; 448 struct ifnet *ifp; 449 uint32_t gmac; 450 451 sc_if = (struct msk_if_softc *)arg; 452 sc = sc_if->msk_softc; 453 454 MSK_IF_LOCK(sc_if); 455 456 mii = device_get_softc(sc_if->msk_miibus); 457 ifp = sc_if->msk_ifp; 458 if (mii == NULL || ifp == NULL) { 459 MSK_IF_UNLOCK(sc_if); 460 return; 461 } 462 463 if (mii->mii_media_status & IFM_ACTIVE) { 464 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 465 sc_if->msk_link = 1; 466 } else 467 sc_if->msk_link = 0; 468 469 if (sc_if->msk_link != 0) { 470 /* Enable Tx FIFO Underrun. */ 471 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 472 GM_IS_TX_FF_UR | GM_IS_RX_FF_OR); 473 /* 474 * Because mii(4) notify msk(4) that it detected link status 475 * change, there is no need to enable automatic 476 * speed/flow-control/duplex updates. 477 */ 478 gmac = GM_GPCR_AU_ALL_DIS; 479 switch (IFM_SUBTYPE(mii->mii_media_active)) { 480 case IFM_1000_SX: 481 case IFM_1000_T: 482 gmac |= GM_GPCR_SPEED_1000; 483 break; 484 case IFM_100_TX: 485 gmac |= GM_GPCR_SPEED_100; 486 break; 487 case IFM_10_T: 488 break; 489 } 490 491 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0) 492 gmac |= GM_GPCR_DUP_FULL; 493 /* Disable Rx flow control. */ 494 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) == 0) 495 gmac |= GM_GPCR_FC_RX_DIS; 496 /* Disable Tx flow control. */ 497 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) == 0) 498 gmac |= GM_GPCR_FC_TX_DIS; 499 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; 500 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 501 /* Read again to ensure writing. */ 502 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 503 504 gmac = GMC_PAUSE_ON; 505 if (((mii->mii_media_active & IFM_GMASK) & 506 (IFM_FLAG0 | IFM_FLAG1)) == 0) 507 gmac = GMC_PAUSE_OFF; 508 /* Diable pause for 10/100 Mbps in half-duplex mode. */ 509 if ((((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) && 510 (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX || 511 IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T)) 512 gmac = GMC_PAUSE_OFF; 513 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac); 514 515 /* Enable PHY interrupt for FIFO underrun/overflow. */ 516 if (sc->msk_marvell_phy) 517 msk_phy_writereg(sc_if, PHY_ADDR_MARV, 518 PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR); 519 } else { 520 /* 521 * Link state changed to down. 522 * Disable PHY interrupts. 523 */ 524 if (sc->msk_marvell_phy) 525 msk_phy_writereg(sc_if, PHY_ADDR_MARV, 526 PHY_MARV_INT_MASK, 0); 527 /* Disable Rx/Tx MAC. */ 528 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 529 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 530 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 531 /* Read again to ensure writing. */ 532 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 533 } 534 535 MSK_IF_UNLOCK(sc_if); 536 } 537 538 static void 539 msk_setmulti(struct msk_if_softc *sc_if) 540 { 541 struct msk_softc *sc; 542 struct ifnet *ifp; 543 struct ifmultiaddr *ifma; 544 uint32_t mchash[2]; 545 uint32_t crc; 546 uint16_t mode; 547 548 sc = sc_if->msk_softc; 549 550 MSK_IF_LOCK_ASSERT(sc_if); 551 552 ifp = sc_if->msk_ifp; 553 554 bzero(mchash, sizeof(mchash)); 555 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL); 556 mode |= GM_RXCR_UCF_ENA; 557 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 558 if ((ifp->if_flags & IFF_PROMISC) != 0) 559 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 560 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 561 mchash[0] = 0xffff; 562 mchash[1] = 0xffff; 563 } 564 } else { 565 IF_ADDR_LOCK(ifp); 566 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 567 if (ifma->ifma_addr->sa_family != AF_LINK) 568 continue; 569 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 570 ifma->ifma_addr), ETHER_ADDR_LEN); 571 /* Just want the 6 least significant bits. */ 572 crc &= 0x3f; 573 /* Set the corresponding bit in the hash table. */ 574 mchash[crc >> 5] |= 1 << (crc & 0x1f); 575 } 576 IF_ADDR_UNLOCK(ifp); 577 mode |= GM_RXCR_MCF_ENA; 578 } 579 580 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1, 581 mchash[0] & 0xffff); 582 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2, 583 (mchash[0] >> 16) & 0xffff); 584 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3, 585 mchash[1] & 0xffff); 586 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4, 587 (mchash[1] >> 16) & 0xffff); 588 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode); 589 } 590 591 static void 592 msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp) 593 { 594 struct msk_softc *sc; 595 596 sc = sc_if->msk_softc; 597 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 598 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 599 RX_VLAN_STRIP_ON); 600 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 601 TX_VLAN_TAG_ON); 602 } else { 603 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 604 RX_VLAN_STRIP_OFF); 605 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 606 TX_VLAN_TAG_OFF); 607 } 608 } 609 610 static void 611 msk_setpromisc(struct msk_if_softc *sc_if) 612 { 613 struct msk_softc *sc; 614 struct ifnet *ifp; 615 uint16_t mode; 616 617 MSK_IF_LOCK_ASSERT(sc_if); 618 619 sc = sc_if->msk_softc; 620 ifp = sc_if->msk_ifp; 621 622 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL); 623 if (ifp->if_flags & IFF_PROMISC) 624 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 625 else 626 mode |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 627 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode); 628 } 629 630 static int 631 msk_init_rx_ring(struct msk_if_softc *sc_if) 632 { 633 struct msk_ring_data *rd; 634 struct msk_rxdesc *rxd; 635 int i, prod; 636 637 MSK_IF_LOCK_ASSERT(sc_if); 638 639 sc_if->msk_cdata.msk_rx_cons = 0; 640 sc_if->msk_cdata.msk_rx_prod = 0; 641 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; 642 643 rd = &sc_if->msk_rdata; 644 bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT); 645 prod = sc_if->msk_cdata.msk_rx_prod; 646 for (i = 0; i < MSK_RX_RING_CNT; i++) { 647 rxd = &sc_if->msk_cdata.msk_rxdesc[prod]; 648 rxd->rx_m = NULL; 649 rxd->rx_le = &rd->msk_rx_ring[prod]; 650 if (msk_newbuf(sc_if, prod) != 0) 651 return (ENOBUFS); 652 MSK_INC(prod, MSK_RX_RING_CNT); 653 } 654 655 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag, 656 sc_if->msk_cdata.msk_rx_ring_map, 657 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 658 659 /* Update prefetch unit. */ 660 sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1; 661 CSR_WRITE_2(sc_if->msk_softc, 662 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 663 sc_if->msk_cdata.msk_rx_prod); 664 665 return (0); 666 } 667 668 static int 669 msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if) 670 { 671 struct msk_ring_data *rd; 672 struct msk_rxdesc *rxd; 673 int i, prod; 674 675 MSK_IF_LOCK_ASSERT(sc_if); 676 677 sc_if->msk_cdata.msk_rx_cons = 0; 678 sc_if->msk_cdata.msk_rx_prod = 0; 679 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; 680 681 rd = &sc_if->msk_rdata; 682 bzero(rd->msk_jumbo_rx_ring, 683 sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT); 684 prod = sc_if->msk_cdata.msk_rx_prod; 685 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 686 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod]; 687 rxd->rx_m = NULL; 688 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod]; 689 if (msk_jumbo_newbuf(sc_if, prod) != 0) 690 return (ENOBUFS); 691 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT); 692 } 693 694 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 695 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 696 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 697 698 sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1; 699 CSR_WRITE_2(sc_if->msk_softc, 700 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 701 sc_if->msk_cdata.msk_rx_prod); 702 703 return (0); 704 } 705 706 static void 707 msk_init_tx_ring(struct msk_if_softc *sc_if) 708 { 709 struct msk_ring_data *rd; 710 struct msk_txdesc *txd; 711 int i; 712 713 sc_if->msk_cdata.msk_tso_mtu = 0; 714 sc_if->msk_cdata.msk_tx_prod = 0; 715 sc_if->msk_cdata.msk_tx_cons = 0; 716 sc_if->msk_cdata.msk_tx_cnt = 0; 717 718 rd = &sc_if->msk_rdata; 719 bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT); 720 for (i = 0; i < MSK_TX_RING_CNT; i++) { 721 txd = &sc_if->msk_cdata.msk_txdesc[i]; 722 txd->tx_m = NULL; 723 txd->tx_le = &rd->msk_tx_ring[i]; 724 } 725 726 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 727 sc_if->msk_cdata.msk_tx_ring_map, 728 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 729 } 730 731 static __inline void 732 msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx) 733 { 734 struct msk_rx_desc *rx_le; 735 struct msk_rxdesc *rxd; 736 struct mbuf *m; 737 738 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 739 m = rxd->rx_m; 740 rx_le = rxd->rx_le; 741 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); 742 } 743 744 static __inline void 745 msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx) 746 { 747 struct msk_rx_desc *rx_le; 748 struct msk_rxdesc *rxd; 749 struct mbuf *m; 750 751 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 752 m = rxd->rx_m; 753 rx_le = rxd->rx_le; 754 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); 755 } 756 757 static int 758 msk_newbuf(struct msk_if_softc *sc_if, int idx) 759 { 760 struct msk_rx_desc *rx_le; 761 struct msk_rxdesc *rxd; 762 struct mbuf *m; 763 bus_dma_segment_t segs[1]; 764 bus_dmamap_t map; 765 int nsegs; 766 767 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 768 if (m == NULL) 769 return (ENOBUFS); 770 771 m->m_len = m->m_pkthdr.len = MCLBYTES; 772 m_adj(m, ETHER_ALIGN); 773 774 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag, 775 sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs, 776 BUS_DMA_NOWAIT) != 0) { 777 m_freem(m); 778 return (ENOBUFS); 779 } 780 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 781 782 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 783 if (rxd->rx_m != NULL) { 784 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap, 785 BUS_DMASYNC_POSTREAD); 786 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap); 787 } 788 map = rxd->rx_dmamap; 789 rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap; 790 sc_if->msk_cdata.msk_rx_sparemap = map; 791 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap, 792 BUS_DMASYNC_PREREAD); 793 rxd->rx_m = m; 794 rx_le = rxd->rx_le; 795 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr)); 796 rx_le->msk_control = 797 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER); 798 799 return (0); 800 } 801 802 static int 803 msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx) 804 { 805 struct msk_rx_desc *rx_le; 806 struct msk_rxdesc *rxd; 807 struct mbuf *m; 808 bus_dma_segment_t segs[1]; 809 bus_dmamap_t map; 810 int nsegs; 811 void *buf; 812 813 MGETHDR(m, M_DONTWAIT, MT_DATA); 814 if (m == NULL) 815 return (ENOBUFS); 816 buf = msk_jalloc(sc_if); 817 if (buf == NULL) { 818 m_freem(m); 819 return (ENOBUFS); 820 } 821 /* Attach the buffer to the mbuf. */ 822 MEXTADD(m, buf, MSK_JLEN, msk_jfree, (struct msk_if_softc *)sc_if, 0, 823 EXT_NET_DRV); 824 if ((m->m_flags & M_EXT) == 0) { 825 m_freem(m); 826 return (ENOBUFS); 827 } 828 m->m_pkthdr.len = m->m_len = MSK_JLEN; 829 m_adj(m, ETHER_ALIGN); 830 831 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag, 832 sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs, 833 BUS_DMA_NOWAIT) != 0) { 834 m_freem(m); 835 return (ENOBUFS); 836 } 837 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 838 839 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 840 if (rxd->rx_m != NULL) { 841 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, 842 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 843 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, 844 rxd->rx_dmamap); 845 } 846 map = rxd->rx_dmamap; 847 rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap; 848 sc_if->msk_cdata.msk_jumbo_rx_sparemap = map; 849 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap, 850 BUS_DMASYNC_PREREAD); 851 rxd->rx_m = m; 852 rx_le = rxd->rx_le; 853 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr)); 854 rx_le->msk_control = 855 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER); 856 857 return (0); 858 } 859 860 /* 861 * Set media options. 862 */ 863 static int 864 msk_mediachange(struct ifnet *ifp) 865 { 866 struct msk_if_softc *sc_if; 867 struct mii_data *mii; 868 869 sc_if = ifp->if_softc; 870 871 MSK_IF_LOCK(sc_if); 872 mii = device_get_softc(sc_if->msk_miibus); 873 mii_mediachg(mii); 874 MSK_IF_UNLOCK(sc_if); 875 876 return (0); 877 } 878 879 /* 880 * Report current media status. 881 */ 882 static void 883 msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 884 { 885 struct msk_if_softc *sc_if; 886 struct mii_data *mii; 887 888 sc_if = ifp->if_softc; 889 MSK_IF_LOCK(sc_if); 890 mii = device_get_softc(sc_if->msk_miibus); 891 892 mii_pollstat(mii); 893 MSK_IF_UNLOCK(sc_if); 894 ifmr->ifm_active = mii->mii_media_active; 895 ifmr->ifm_status = mii->mii_media_status; 896 } 897 898 static int 899 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 900 { 901 struct msk_if_softc *sc_if; 902 struct ifreq *ifr; 903 struct mii_data *mii; 904 int error, mask; 905 906 sc_if = ifp->if_softc; 907 ifr = (struct ifreq *)data; 908 error = 0; 909 910 switch(command) { 911 case SIOCSIFMTU: 912 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN) { 913 error = EINVAL; 914 break; 915 } 916 if (sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U && 917 ifr->ifr_mtu > MSK_MAX_FRAMELEN) { 918 error = EINVAL; 919 break; 920 } 921 MSK_IF_LOCK(sc_if); 922 ifp->if_mtu = ifr->ifr_mtu; 923 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 924 msk_init_locked(sc_if); 925 MSK_IF_UNLOCK(sc_if); 926 break; 927 case SIOCSIFFLAGS: 928 MSK_IF_LOCK(sc_if); 929 if ((ifp->if_flags & IFF_UP) != 0) { 930 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 931 if (((ifp->if_flags ^ sc_if->msk_if_flags) 932 & IFF_PROMISC) != 0) { 933 msk_setpromisc(sc_if); 934 msk_setmulti(sc_if); 935 } 936 } else { 937 if (sc_if->msk_detach == 0) 938 msk_init_locked(sc_if); 939 } 940 } else { 941 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 942 msk_stop(sc_if); 943 } 944 sc_if->msk_if_flags = ifp->if_flags; 945 MSK_IF_UNLOCK(sc_if); 946 break; 947 case SIOCADDMULTI: 948 case SIOCDELMULTI: 949 MSK_IF_LOCK(sc_if); 950 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 951 msk_setmulti(sc_if); 952 MSK_IF_UNLOCK(sc_if); 953 break; 954 case SIOCGIFMEDIA: 955 case SIOCSIFMEDIA: 956 mii = device_get_softc(sc_if->msk_miibus); 957 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 958 break; 959 case SIOCSIFCAP: 960 MSK_IF_LOCK(sc_if); 961 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 962 if ((mask & IFCAP_TXCSUM) != 0) { 963 ifp->if_capenable ^= IFCAP_TXCSUM; 964 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 && 965 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) 966 ifp->if_hwassist |= MSK_CSUM_FEATURES; 967 else 968 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 969 } 970 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) { 971 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 972 msk_setvlan(sc_if, ifp); 973 } 974 975 if ((mask & IFCAP_TSO4) != 0) { 976 ifp->if_capenable ^= IFCAP_TSO4; 977 if ((IFCAP_TSO4 & ifp->if_capenable) != 0 && 978 (IFCAP_TSO4 & ifp->if_capabilities) != 0) 979 ifp->if_hwassist |= CSUM_TSO; 980 else 981 ifp->if_hwassist &= ~CSUM_TSO; 982 } 983 VLAN_CAPABILITIES(ifp); 984 MSK_IF_UNLOCK(sc_if); 985 break; 986 default: 987 error = ether_ioctl(ifp, command, data); 988 break; 989 } 990 991 return (error); 992 } 993 994 static int 995 mskc_probe(device_t dev) 996 { 997 struct msk_product *mp; 998 uint16_t vendor, devid; 999 int i; 1000 1001 vendor = pci_get_vendor(dev); 1002 devid = pci_get_device(dev); 1003 mp = msk_products; 1004 for (i = 0; i < sizeof(msk_products)/sizeof(msk_products[0]); 1005 i++, mp++) { 1006 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) { 1007 device_set_desc(dev, mp->msk_name); 1008 return (BUS_PROBE_DEFAULT); 1009 } 1010 } 1011 1012 return (ENXIO); 1013 } 1014 1015 static int 1016 mskc_setup_rambuffer(struct msk_softc *sc) 1017 { 1018 int totqsize, minqsize; 1019 int avail, next; 1020 int i; 1021 uint8_t val; 1022 1023 /* Get adapter SRAM size. */ 1024 val = CSR_READ_1(sc, B2_E_0); 1025 sc->msk_ramsize = (val == 0) ? 128 : val * 4; 1026 if (sc->msk_hw_id == CHIP_ID_YUKON_FE) 1027 sc->msk_ramsize = 4 * 4; 1028 if (bootverbose) 1029 device_printf(sc->msk_dev, 1030 "RAM buffer size : %dKB\n", sc->msk_ramsize); 1031 1032 totqsize = sc->msk_ramsize * sc->msk_num_port; 1033 minqsize = MSK_MIN_RXQ_SIZE + MSK_MIN_TXQ_SIZE; 1034 if (minqsize > sc->msk_ramsize) 1035 minqsize = sc->msk_ramsize; 1036 1037 if (minqsize * sc->msk_num_port > totqsize) { 1038 device_printf(sc->msk_dev, 1039 "not enough RAM buffer memory : %d/%dKB\n", 1040 minqsize * sc->msk_num_port, totqsize); 1041 return (ENOSPC); 1042 } 1043 1044 avail = totqsize; 1045 if (sc->msk_num_port > 1) { 1046 /* 1047 * Divide up the memory evenly so that everyone gets a 1048 * fair share for dual port adapters. 1049 */ 1050 avail = sc->msk_ramsize; 1051 } 1052 1053 /* Take away the minimum memory for active queues. */ 1054 avail -= minqsize; 1055 /* Rx queue gets the minimum + 80% of the rest. */ 1056 sc->msk_rxqsize = 1057 (avail * MSK_RAM_QUOTA_RX) / 100 + MSK_MIN_RXQ_SIZE; 1058 avail -= (sc->msk_rxqsize - MSK_MIN_RXQ_SIZE); 1059 sc->msk_txqsize = avail + MSK_MIN_TXQ_SIZE; 1060 1061 for (i = 0, next = 0; i < sc->msk_num_port; i++) { 1062 sc->msk_rxqstart[i] = next; 1063 sc->msk_rxqend[i] = next + (sc->msk_rxqsize * 1024) - 1; 1064 next = sc->msk_rxqend[i] + 1; 1065 sc->msk_txqstart[i] = next; 1066 sc->msk_txqend[i] = next + (sc->msk_txqsize * 1024) - 1; 1067 next = sc->msk_txqend[i] + 1; 1068 if (bootverbose) { 1069 device_printf(sc->msk_dev, 1070 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i, 1071 sc->msk_rxqsize, sc->msk_rxqstart[i], 1072 sc->msk_rxqend[i]); 1073 device_printf(sc->msk_dev, 1074 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i, 1075 sc->msk_txqsize, sc->msk_txqstart[i], 1076 sc->msk_txqend[i]); 1077 } 1078 } 1079 1080 return (0); 1081 } 1082 1083 static void 1084 msk_phy_power(struct msk_softc *sc, int mode) 1085 { 1086 uint32_t val; 1087 int i; 1088 1089 switch (mode) { 1090 case MSK_PHY_POWERUP: 1091 /* Switch power to VCC (WA for VAUX problem). */ 1092 CSR_WRITE_1(sc, B0_POWER_CTRL, 1093 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); 1094 /* Disable Core Clock Division, set Clock Select to 0. */ 1095 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS); 1096 1097 val = 0; 1098 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1099 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1100 /* Enable bits are inverted. */ 1101 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1102 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1103 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 1104 } 1105 /* 1106 * Enable PCI & Core Clock, enable clock gating for both Links. 1107 */ 1108 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); 1109 1110 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1111 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); 1112 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1113 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1114 /* Deassert Low Power for 1st PHY. */ 1115 val |= PCI_Y2_PHY1_COMA; 1116 if (sc->msk_num_port > 1) 1117 val |= PCI_Y2_PHY2_COMA; 1118 } else if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 1119 uint32_t our; 1120 1121 CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON); 1122 1123 /* Enable all clocks. */ 1124 pci_write_config(sc->msk_dev, PCI_OUR_REG_3, 0, 4); 1125 our = pci_read_config(sc->msk_dev, PCI_OUR_REG_4, 4); 1126 our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN| 1127 PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST); 1128 /* Set all bits to 0 except bits 15..12. */ 1129 pci_write_config(sc->msk_dev, PCI_OUR_REG_4, our, 4); 1130 /* Set to default value. */ 1131 pci_write_config(sc->msk_dev, PCI_OUR_REG_5, 0, 4); 1132 } 1133 /* Release PHY from PowerDown/COMA mode. */ 1134 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1135 for (i = 0; i < sc->msk_num_port; i++) { 1136 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), 1137 GMLC_RST_SET); 1138 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), 1139 GMLC_RST_CLR); 1140 } 1141 break; 1142 case MSK_PHY_POWERDOWN: 1143 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1144 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD; 1145 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1146 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1147 val &= ~PCI_Y2_PHY1_COMA; 1148 if (sc->msk_num_port > 1) 1149 val &= ~PCI_Y2_PHY2_COMA; 1150 } 1151 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1152 1153 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1154 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1155 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 1156 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1157 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1158 /* Enable bits are inverted. */ 1159 val = 0; 1160 } 1161 /* 1162 * Disable PCI & Core Clock, disable clock gating for 1163 * both Links. 1164 */ 1165 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); 1166 CSR_WRITE_1(sc, B0_POWER_CTRL, 1167 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); 1168 break; 1169 default: 1170 break; 1171 } 1172 } 1173 1174 static void 1175 mskc_reset(struct msk_softc *sc) 1176 { 1177 bus_addr_t addr; 1178 uint16_t status; 1179 uint32_t val; 1180 int i; 1181 1182 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1183 1184 /* Disable ASF. */ 1185 if (sc->msk_hw_id < CHIP_ID_YUKON_XL) { 1186 CSR_WRITE_4(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); 1187 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE); 1188 } 1189 /* 1190 * Since we disabled ASF, S/W reset is required for Power Management. 1191 */ 1192 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1193 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1194 1195 /* Clear all error bits in the PCI status register. */ 1196 status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); 1197 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1198 1199 pci_write_config(sc->msk_dev, PCIR_STATUS, status | 1200 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | 1201 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2); 1202 CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR); 1203 1204 switch (sc->msk_bustype) { 1205 case MSK_PEX_BUS: 1206 /* Clear all PEX errors. */ 1207 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); 1208 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); 1209 if ((val & PEX_RX_OV) != 0) { 1210 sc->msk_intrmask &= ~Y2_IS_HW_ERR; 1211 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; 1212 } 1213 break; 1214 case MSK_PCI_BUS: 1215 case MSK_PCIX_BUS: 1216 /* Set Cache Line Size to 2(8bytes) if configured to 0. */ 1217 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1); 1218 if (val == 0) 1219 pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1); 1220 if (sc->msk_bustype == MSK_PCIX_BUS) { 1221 /* Set Cache Line Size opt. */ 1222 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1223 val |= PCI_CLS_OPT; 1224 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1225 } 1226 break; 1227 } 1228 /* Set PHY power state. */ 1229 msk_phy_power(sc, MSK_PHY_POWERUP); 1230 1231 /* Reset GPHY/GMAC Control */ 1232 for (i = 0; i < sc->msk_num_port; i++) { 1233 /* GPHY Control reset. */ 1234 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET); 1235 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR); 1236 /* GMAC Control reset. */ 1237 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET); 1238 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR); 1239 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF); 1240 } 1241 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1242 1243 /* LED On. */ 1244 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON); 1245 1246 /* Clear TWSI IRQ. */ 1247 CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ); 1248 1249 /* Turn off hardware timer. */ 1250 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP); 1251 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ); 1252 1253 /* Turn off descriptor polling. */ 1254 CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP); 1255 1256 /* Turn off time stamps. */ 1257 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP); 1258 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 1259 1260 /* Configure timeout values. */ 1261 for (i = 0; i < sc->msk_num_port; i++) { 1262 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET); 1263 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR); 1264 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1), 1265 MSK_RI_TO_53); 1266 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1), 1267 MSK_RI_TO_53); 1268 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1), 1269 MSK_RI_TO_53); 1270 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1), 1271 MSK_RI_TO_53); 1272 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1), 1273 MSK_RI_TO_53); 1274 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1), 1275 MSK_RI_TO_53); 1276 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2), 1277 MSK_RI_TO_53); 1278 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2), 1279 MSK_RI_TO_53); 1280 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2), 1281 MSK_RI_TO_53); 1282 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2), 1283 MSK_RI_TO_53); 1284 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2), 1285 MSK_RI_TO_53); 1286 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2), 1287 MSK_RI_TO_53); 1288 } 1289 1290 /* Disable all interrupts. */ 1291 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 1292 CSR_READ_4(sc, B0_HWE_IMSK); 1293 CSR_WRITE_4(sc, B0_IMSK, 0); 1294 CSR_READ_4(sc, B0_IMSK); 1295 1296 /* 1297 * On dual port PCI-X card, there is an problem where status 1298 * can be received out of order due to split transactions. 1299 */ 1300 if (sc->msk_bustype == MSK_PCIX_BUS && sc->msk_num_port > 1) { 1301 int pcix; 1302 uint16_t pcix_cmd; 1303 1304 if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, &pcix) == 0) { 1305 pcix_cmd = pci_read_config(sc->msk_dev, pcix + 2, 2); 1306 /* Clear Max Outstanding Split Transactions. */ 1307 pcix_cmd &= ~0x70; 1308 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1309 pci_write_config(sc->msk_dev, pcix + 2, pcix_cmd, 2); 1310 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1311 } 1312 } 1313 if (sc->msk_bustype == MSK_PEX_BUS) { 1314 uint16_t v, width; 1315 1316 v = pci_read_config(sc->msk_dev, PEX_DEV_CTRL, 2); 1317 /* Change Max. Read Request Size to 4096 bytes. */ 1318 v &= ~PEX_DC_MAX_RRS_MSK; 1319 v |= PEX_DC_MAX_RD_RQ_SIZE(5); 1320 pci_write_config(sc->msk_dev, PEX_DEV_CTRL, v, 2); 1321 width = pci_read_config(sc->msk_dev, PEX_LNK_STAT, 2); 1322 width = (width & PEX_LS_LINK_WI_MSK) >> 4; 1323 v = pci_read_config(sc->msk_dev, PEX_LNK_CAP, 2); 1324 v = (v & PEX_LS_LINK_WI_MSK) >> 4; 1325 if (v != width) 1326 device_printf(sc->msk_dev, 1327 "negotiated width of link(x%d) != " 1328 "max. width of link(x%d)\n", width, v); 1329 } 1330 1331 /* Clear status list. */ 1332 bzero(sc->msk_stat_ring, 1333 sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT); 1334 sc->msk_stat_cons = 0; 1335 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map, 1336 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1337 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET); 1338 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR); 1339 /* Set the status list base address. */ 1340 addr = sc->msk_stat_ring_paddr; 1341 CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr)); 1342 CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr)); 1343 /* Set the status list last index. */ 1344 CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1); 1345 if (HW_FEATURE(sc, HWF_WA_DEV_43_418)) { 1346 /* WA for dev. #4.3 */ 1347 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK); 1348 /* WA for dev. #4.18 */ 1349 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21); 1350 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07); 1351 } else { 1352 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a); 1353 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10); 1354 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 1355 HW_FEATURE(sc, HWF_WA_DEV_4109) ? 0x10 : 0x04); 1356 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190); 1357 } 1358 /* 1359 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI. 1360 */ 1361 CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000)); 1362 1363 /* Enable status unit. */ 1364 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON); 1365 1366 CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START); 1367 CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START); 1368 CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START); 1369 } 1370 1371 static int 1372 msk_probe(device_t dev) 1373 { 1374 struct msk_softc *sc; 1375 char desc[100]; 1376 1377 sc = device_get_softc(device_get_parent(dev)); 1378 /* 1379 * Not much to do here. We always know there will be 1380 * at least one GMAC present, and if there are two, 1381 * mskc_attach() will create a second device instance 1382 * for us. 1383 */ 1384 snprintf(desc, sizeof(desc), 1385 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x", 1386 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id, 1387 sc->msk_hw_rev); 1388 device_set_desc_copy(dev, desc); 1389 1390 return (BUS_PROBE_DEFAULT); 1391 } 1392 1393 static int 1394 msk_attach(device_t dev) 1395 { 1396 struct msk_softc *sc; 1397 struct msk_if_softc *sc_if; 1398 struct ifnet *ifp; 1399 int i, port, error; 1400 uint8_t eaddr[6]; 1401 1402 if (dev == NULL) 1403 return (EINVAL); 1404 1405 error = 0; 1406 sc_if = device_get_softc(dev); 1407 sc = device_get_softc(device_get_parent(dev)); 1408 port = *(int *)device_get_ivars(dev); 1409 1410 sc_if->msk_if_dev = dev; 1411 sc_if->msk_port = port; 1412 sc_if->msk_softc = sc; 1413 sc->msk_if[port] = sc_if; 1414 /* Setup Tx/Rx queue register offsets. */ 1415 if (port == MSK_PORT_A) { 1416 sc_if->msk_txq = Q_XA1; 1417 sc_if->msk_txsq = Q_XS1; 1418 sc_if->msk_rxq = Q_R1; 1419 } else { 1420 sc_if->msk_txq = Q_XA2; 1421 sc_if->msk_txsq = Q_XS2; 1422 sc_if->msk_rxq = Q_R2; 1423 } 1424 1425 callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0); 1426 TASK_INIT(&sc_if->msk_link_task, 0, msk_link_task, sc_if); 1427 1428 if ((error = msk_txrx_dma_alloc(sc_if) != 0)) 1429 goto fail; 1430 1431 ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER); 1432 if (ifp == NULL) { 1433 device_printf(sc_if->msk_if_dev, "can not if_alloc()\n"); 1434 error = ENOSPC; 1435 goto fail; 1436 } 1437 ifp->if_softc = sc_if; 1438 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1439 ifp->if_mtu = ETHERMTU; 1440 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1441 /* 1442 * IFCAP_RXCSUM capability is intentionally disabled as the hardware 1443 * has serious bug in Rx checksum offload for all Yukon II family 1444 * hardware. It seems there is a workaround to make it work somtimes. 1445 * However, the workaround also have to check OP code sequences to 1446 * verify whether the OP code is correct. Sometimes it should compute 1447 * IP/TCP/UDP checksum in driver in order to verify correctness of 1448 * checksum computed by hardware. If you have to compute checksum 1449 * with software to verify the hardware's checksum why have hardware 1450 * compute the checksum? I think there is no reason to spend time to 1451 * make Rx checksum offload work on Yukon II hardware. 1452 */ 1453 ifp->if_capabilities = IFCAP_TXCSUM; 1454 ifp->if_hwassist = MSK_CSUM_FEATURES; 1455 if (sc->msk_hw_id != CHIP_ID_YUKON_EC_U) { 1456 /* It seems Yukon EC Ultra doesn't support TSO. */ 1457 ifp->if_capabilities |= IFCAP_TSO4; 1458 ifp->if_hwassist |= CSUM_TSO; 1459 } 1460 ifp->if_capenable = ifp->if_capabilities; 1461 ifp->if_ioctl = msk_ioctl; 1462 ifp->if_start = msk_start; 1463 ifp->if_timer = 0; 1464 ifp->if_watchdog = NULL; 1465 ifp->if_init = msk_init; 1466 IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1); 1467 ifp->if_snd.ifq_drv_maxlen = MSK_TX_RING_CNT - 1; 1468 IFQ_SET_READY(&ifp->if_snd); 1469 1470 TASK_INIT(&sc_if->msk_tx_task, 1, msk_tx_task, ifp); 1471 1472 /* 1473 * Get station address for this interface. Note that 1474 * dual port cards actually come with three station 1475 * addresses: one for each port, plus an extra. The 1476 * extra one is used by the SysKonnect driver software 1477 * as a 'virtual' station address for when both ports 1478 * are operating in failover mode. Currently we don't 1479 * use this extra address. 1480 */ 1481 MSK_IF_LOCK(sc_if); 1482 for (i = 0; i < ETHER_ADDR_LEN; i++) 1483 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i); 1484 1485 /* 1486 * Call MI attach routine. Can't hold locks when calling into ether_*. 1487 */ 1488 MSK_IF_UNLOCK(sc_if); 1489 ether_ifattach(ifp, eaddr); 1490 MSK_IF_LOCK(sc_if); 1491 1492 /* VLAN capability setup */ 1493 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 1494 if (ifp->if_capabilities & IFCAP_HWCSUM) 1495 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 1496 ifp->if_capenable = ifp->if_capabilities; 1497 1498 /* 1499 * Tell the upper layer(s) we support long frames. 1500 * Must appear after the call to ether_ifattach() because 1501 * ether_ifattach() sets ifi_hdrlen to the default value. 1502 */ 1503 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1504 1505 /* 1506 * Do miibus setup. 1507 */ 1508 MSK_IF_UNLOCK(sc_if); 1509 error = mii_phy_probe(dev, &sc_if->msk_miibus, msk_mediachange, 1510 msk_mediastatus); 1511 if (error != 0) { 1512 device_printf(sc_if->msk_if_dev, "no PHY found!\n"); 1513 ether_ifdetach(ifp); 1514 error = ENXIO; 1515 goto fail; 1516 } 1517 /* Check whether PHY Id is MARVELL. */ 1518 if (msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_ID0) 1519 == PHY_MARV_ID0_VAL) 1520 sc->msk_marvell_phy = 1; 1521 1522 fail: 1523 if (error != 0) { 1524 /* Access should be ok even though lock has been dropped */ 1525 sc->msk_if[port] = NULL; 1526 msk_detach(dev); 1527 } 1528 1529 return (error); 1530 } 1531 1532 /* 1533 * Attach the interface. Allocate softc structures, do ifmedia 1534 * setup and ethernet/BPF attach. 1535 */ 1536 static int 1537 mskc_attach(device_t dev) 1538 { 1539 struct msk_softc *sc; 1540 int error, msic, *port, reg; 1541 1542 sc = device_get_softc(dev); 1543 sc->msk_dev = dev; 1544 mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1545 MTX_DEF); 1546 1547 /* 1548 * Map control/status registers. 1549 */ 1550 pci_enable_busmaster(dev); 1551 1552 /* Allocate I/O resource */ 1553 #ifdef MSK_USEIOSPACE 1554 sc->msk_res_spec = msk_res_spec_io; 1555 #else 1556 sc->msk_res_spec = msk_res_spec_mem; 1557 #endif 1558 sc->msk_irq_spec = msk_irq_spec_legacy; 1559 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res); 1560 if (error) { 1561 if (sc->msk_res_spec == msk_res_spec_mem) 1562 sc->msk_res_spec = msk_res_spec_io; 1563 else 1564 sc->msk_res_spec = msk_res_spec_mem; 1565 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res); 1566 if (error) { 1567 device_printf(dev, "couldn't allocate %s resources\n", 1568 sc->msk_res_spec == msk_res_spec_mem ? "memory" : 1569 "I/O"); 1570 mtx_destroy(&sc->msk_mtx); 1571 return (ENXIO); 1572 } 1573 } 1574 1575 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1576 sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID); 1577 sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f; 1578 /* Bail out if chip is not recognized. */ 1579 if (sc->msk_hw_id < CHIP_ID_YUKON_XL || 1580 sc->msk_hw_id > CHIP_ID_YUKON_FE) { 1581 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n", 1582 sc->msk_hw_id, sc->msk_hw_rev); 1583 mtx_destroy(&sc->msk_mtx); 1584 return (ENXIO); 1585 } 1586 1587 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 1588 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1589 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, 1590 &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I", 1591 "max number of Rx events to process"); 1592 1593 sc->msk_process_limit = MSK_PROC_DEFAULT; 1594 error = resource_int_value(device_get_name(dev), device_get_unit(dev), 1595 "process_limit", &sc->msk_process_limit); 1596 if (error == 0) { 1597 if (sc->msk_process_limit < MSK_PROC_MIN || 1598 sc->msk_process_limit > MSK_PROC_MAX) { 1599 device_printf(dev, "process_limit value out of range; " 1600 "using default: %d\n", MSK_PROC_DEFAULT); 1601 sc->msk_process_limit = MSK_PROC_DEFAULT; 1602 } 1603 } 1604 1605 /* Soft reset. */ 1606 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1607 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1608 sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP); 1609 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S') 1610 sc->msk_coppertype = 0; 1611 else 1612 sc->msk_coppertype = 1; 1613 /* Check number of MACs. */ 1614 sc->msk_num_port = 1; 1615 if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) == 1616 CFG_DUAL_MAC_MSK) { 1617 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) 1618 sc->msk_num_port++; 1619 } 1620 1621 /* Check bus type. */ 1622 if (pci_find_extcap(sc->msk_dev, PCIY_EXPRESS, ®) == 0) 1623 sc->msk_bustype = MSK_PEX_BUS; 1624 else if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, ®) == 0) 1625 sc->msk_bustype = MSK_PCIX_BUS; 1626 else 1627 sc->msk_bustype = MSK_PCI_BUS; 1628 1629 /* Get H/W features(bugs). */ 1630 switch (sc->msk_hw_id) { 1631 case CHIP_ID_YUKON_EC: 1632 sc->msk_clock = 125; /* 125 Mhz */ 1633 if (sc->msk_hw_rev == CHIP_REV_YU_EC_A1) { 1634 sc->msk_hw_feature = 1635 HWF_WA_DEV_42 | HWF_WA_DEV_46 | HWF_WA_DEV_43_418 | 1636 HWF_WA_DEV_420 | HWF_WA_DEV_423 | 1637 HWF_WA_DEV_424 | HWF_WA_DEV_425 | HWF_WA_DEV_427 | 1638 HWF_WA_DEV_428 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 | 1639 HWF_WA_DEV_4152 | HWF_WA_DEV_4167; 1640 } else { 1641 /* A2/A3 */ 1642 sc->msk_hw_feature = 1643 HWF_WA_DEV_424 | HWF_WA_DEV_425 | HWF_WA_DEV_427 | 1644 HWF_WA_DEV_428 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 | 1645 HWF_WA_DEV_4152 | HWF_WA_DEV_4167; 1646 } 1647 break; 1648 case CHIP_ID_YUKON_EC_U: 1649 sc->msk_clock = 125; /* 125 Mhz */ 1650 if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) { 1651 sc->msk_hw_feature = HWF_WA_DEV_427 | HWF_WA_DEV_483 | 1652 HWF_WA_DEV_4109; 1653 } else if (sc->msk_hw_rev == CHIP_REV_YU_EC_A1) { 1654 uint16_t v; 1655 1656 sc->msk_hw_feature = HWF_WA_DEV_427 | HWF_WA_DEV_4109 | 1657 HWF_WA_DEV_4185; 1658 v = CSR_READ_2(sc, Q_ADDR(Q_XA1, Q_WM)); 1659 if (v == 0) 1660 sc->msk_hw_feature |= HWF_WA_DEV_4185CS | 1661 HWF_WA_DEV_4200; 1662 } 1663 break; 1664 case CHIP_ID_YUKON_FE: 1665 sc->msk_clock = 100; /* 100 Mhz */ 1666 sc->msk_hw_feature = HWF_WA_DEV_427 | HWF_WA_DEV_4109 | 1667 HWF_WA_DEV_4152 | HWF_WA_DEV_4167; 1668 break; 1669 case CHIP_ID_YUKON_XL: 1670 sc->msk_clock = 156; /* 156 Mhz */ 1671 switch (sc->msk_hw_rev) { 1672 case CHIP_REV_YU_XL_A0: 1673 sc->msk_hw_feature = 1674 HWF_WA_DEV_427 | HWF_WA_DEV_463 | HWF_WA_DEV_472 | 1675 HWF_WA_DEV_479 | HWF_WA_DEV_483 | HWF_WA_DEV_4115 | 1676 HWF_WA_DEV_4152 | HWF_WA_DEV_4167; 1677 break; 1678 case CHIP_REV_YU_XL_A1: 1679 sc->msk_hw_feature = 1680 HWF_WA_DEV_427 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 | 1681 HWF_WA_DEV_4115 | HWF_WA_DEV_4152 | HWF_WA_DEV_4167; 1682 break; 1683 case CHIP_REV_YU_XL_A2: 1684 sc->msk_hw_feature = 1685 HWF_WA_DEV_427 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 | 1686 HWF_WA_DEV_4115 | HWF_WA_DEV_4167; 1687 break; 1688 case CHIP_REV_YU_XL_A3: 1689 sc->msk_hw_feature = 1690 HWF_WA_DEV_427 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 | 1691 HWF_WA_DEV_4115; 1692 } 1693 break; 1694 default: 1695 sc->msk_clock = 156; /* 156 Mhz */ 1696 sc->msk_hw_feature = 0; 1697 } 1698 1699 /* Allocate IRQ resources. */ 1700 msic = pci_msi_count(dev); 1701 if (bootverbose) 1702 device_printf(dev, "MSI count : %d\n", msic); 1703 /* 1704 * The Yukon II reports it can handle two messages, one for each 1705 * possible port. We go ahead and allocate two messages and only 1706 * setup a handler for both if we have a dual port card. 1707 * 1708 * XXX: I haven't untangled the interrupt handler to handle dual 1709 * port cards with separate MSI messages, so for now I disable MSI 1710 * on dual port cards. 1711 */ 1712 if (msic == 2 && msi_disable == 0 && sc->msk_num_port == 1 && 1713 pci_alloc_msi(dev, &msic) == 0) { 1714 if (msic == 2) { 1715 sc->msk_msi = 1; 1716 sc->msk_irq_spec = msk_irq_spec_msi; 1717 } else 1718 pci_release_msi(dev); 1719 } 1720 1721 error = bus_alloc_resources(dev, sc->msk_irq_spec, sc->msk_irq); 1722 if (error) { 1723 device_printf(dev, "couldn't allocate IRQ resources\n"); 1724 goto fail; 1725 } 1726 1727 if ((error = msk_status_dma_alloc(sc)) != 0) 1728 goto fail; 1729 1730 /* Set base interrupt mask. */ 1731 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU; 1732 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR | 1733 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP; 1734 1735 /* Reset the adapter. */ 1736 mskc_reset(sc); 1737 1738 if ((error = mskc_setup_rambuffer(sc)) != 0) 1739 goto fail; 1740 1741 sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1); 1742 if (sc->msk_devs[MSK_PORT_A] == NULL) { 1743 device_printf(dev, "failed to add child for PORT_A\n"); 1744 error = ENXIO; 1745 goto fail; 1746 } 1747 port = malloc(sizeof(int), M_DEVBUF, M_WAITOK); 1748 if (port == NULL) { 1749 device_printf(dev, "failed to allocate memory for " 1750 "ivars of PORT_A\n"); 1751 error = ENXIO; 1752 goto fail; 1753 } 1754 *port = MSK_PORT_A; 1755 device_set_ivars(sc->msk_devs[MSK_PORT_A], port); 1756 1757 if (sc->msk_num_port > 1) { 1758 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1); 1759 if (sc->msk_devs[MSK_PORT_B] == NULL) { 1760 device_printf(dev, "failed to add child for PORT_B\n"); 1761 error = ENXIO; 1762 goto fail; 1763 } 1764 port = malloc(sizeof(int), M_DEVBUF, M_WAITOK); 1765 if (port == NULL) { 1766 device_printf(dev, "failed to allocate memory for " 1767 "ivars of PORT_B\n"); 1768 error = ENXIO; 1769 goto fail; 1770 } 1771 *port = MSK_PORT_B; 1772 device_set_ivars(sc->msk_devs[MSK_PORT_B], port); 1773 } 1774 1775 error = bus_generic_attach(dev); 1776 if (error) { 1777 device_printf(dev, "failed to attach port(s)\n"); 1778 goto fail; 1779 } 1780 1781 TASK_INIT(&sc->msk_int_task, 0, msk_int_task, sc); 1782 sc->msk_tq = taskqueue_create_fast("msk_taskq", M_WAITOK, 1783 taskqueue_thread_enqueue, &sc->msk_tq); 1784 taskqueue_start_threads(&sc->msk_tq, 1, PI_NET, "%s taskq", 1785 device_get_nameunit(sc->msk_dev)); 1786 /* Hook interrupt last to avoid having to lock softc. */ 1787 error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET | 1788 INTR_MPSAFE, msk_intr, NULL, sc, &sc->msk_intrhand[0]); 1789 1790 if (error != 0) { 1791 device_printf(dev, "couldn't set up interrupt handler\n"); 1792 taskqueue_free(sc->msk_tq); 1793 sc->msk_tq = NULL; 1794 goto fail; 1795 } 1796 fail: 1797 if (error != 0) 1798 mskc_detach(dev); 1799 1800 return (error); 1801 } 1802 1803 /* 1804 * Shutdown hardware and free up resources. This can be called any 1805 * time after the mutex has been initialized. It is called in both 1806 * the error case in attach and the normal detach case so it needs 1807 * to be careful about only freeing resources that have actually been 1808 * allocated. 1809 */ 1810 static int 1811 msk_detach(device_t dev) 1812 { 1813 struct msk_softc *sc; 1814 struct msk_if_softc *sc_if; 1815 struct ifnet *ifp; 1816 1817 sc_if = device_get_softc(dev); 1818 KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx), 1819 ("msk mutex not initialized in msk_detach")); 1820 MSK_IF_LOCK(sc_if); 1821 1822 ifp = sc_if->msk_ifp; 1823 if (device_is_attached(dev)) { 1824 /* XXX */ 1825 sc_if->msk_detach = 1; 1826 msk_stop(sc_if); 1827 /* Can't hold locks while calling detach. */ 1828 MSK_IF_UNLOCK(sc_if); 1829 callout_drain(&sc_if->msk_tick_ch); 1830 taskqueue_drain(taskqueue_fast, &sc_if->msk_tx_task); 1831 taskqueue_drain(taskqueue_swi, &sc_if->msk_link_task); 1832 ether_ifdetach(ifp); 1833 MSK_IF_LOCK(sc_if); 1834 } 1835 1836 /* 1837 * We're generally called from mskc_detach() which is using 1838 * device_delete_child() to get to here. It's already trashed 1839 * miibus for us, so don't do it here or we'll panic. 1840 * 1841 * if (sc_if->msk_miibus != NULL) { 1842 * device_delete_child(dev, sc_if->msk_miibus); 1843 * sc_if->msk_miibus = NULL; 1844 * } 1845 */ 1846 1847 msk_txrx_dma_free(sc_if); 1848 bus_generic_detach(dev); 1849 1850 if (ifp) 1851 if_free(ifp); 1852 sc = sc_if->msk_softc; 1853 sc->msk_if[sc_if->msk_port] = NULL; 1854 MSK_IF_UNLOCK(sc_if); 1855 1856 return (0); 1857 } 1858 1859 static int 1860 mskc_detach(device_t dev) 1861 { 1862 struct msk_softc *sc; 1863 1864 sc = device_get_softc(dev); 1865 KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized")); 1866 1867 if (device_is_alive(dev)) { 1868 if (sc->msk_devs[MSK_PORT_A] != NULL) { 1869 free(device_get_ivars(sc->msk_devs[MSK_PORT_A]), 1870 M_DEVBUF); 1871 device_delete_child(dev, sc->msk_devs[MSK_PORT_A]); 1872 } 1873 if (sc->msk_devs[MSK_PORT_B] != NULL) { 1874 free(device_get_ivars(sc->msk_devs[MSK_PORT_B]), 1875 M_DEVBUF); 1876 device_delete_child(dev, sc->msk_devs[MSK_PORT_B]); 1877 } 1878 bus_generic_detach(dev); 1879 } 1880 1881 /* Disable all interrupts. */ 1882 CSR_WRITE_4(sc, B0_IMSK, 0); 1883 CSR_READ_4(sc, B0_IMSK); 1884 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 1885 CSR_READ_4(sc, B0_HWE_IMSK); 1886 1887 /* LED Off. */ 1888 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF); 1889 1890 /* Put hardware reset. */ 1891 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1892 1893 msk_status_dma_free(sc); 1894 1895 if (sc->msk_tq != NULL) { 1896 taskqueue_drain(sc->msk_tq, &sc->msk_int_task); 1897 taskqueue_free(sc->msk_tq); 1898 sc->msk_tq = NULL; 1899 } 1900 if (sc->msk_intrhand[0]) { 1901 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand[0]); 1902 sc->msk_intrhand[0] = NULL; 1903 } 1904 if (sc->msk_intrhand[1]) { 1905 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand[0]); 1906 sc->msk_intrhand[1] = NULL; 1907 } 1908 bus_release_resources(dev, sc->msk_irq_spec, sc->msk_irq); 1909 if (sc->msk_msi) 1910 pci_release_msi(dev); 1911 bus_release_resources(dev, sc->msk_res_spec, sc->msk_res); 1912 mtx_destroy(&sc->msk_mtx); 1913 1914 return (0); 1915 } 1916 1917 struct msk_dmamap_arg { 1918 bus_addr_t msk_busaddr; 1919 }; 1920 1921 static void 1922 msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1923 { 1924 struct msk_dmamap_arg *ctx; 1925 1926 if (error != 0) 1927 return; 1928 ctx = arg; 1929 ctx->msk_busaddr = segs[0].ds_addr; 1930 } 1931 1932 /* Create status DMA region. */ 1933 static int 1934 msk_status_dma_alloc(struct msk_softc *sc) 1935 { 1936 struct msk_dmamap_arg ctx; 1937 int error; 1938 1939 error = bus_dma_tag_create( 1940 bus_get_dma_tag(sc->msk_dev), /* parent */ 1941 MSK_STAT_ALIGN, 0, /* alignment, boundary */ 1942 BUS_SPACE_MAXADDR, /* lowaddr */ 1943 BUS_SPACE_MAXADDR, /* highaddr */ 1944 NULL, NULL, /* filter, filterarg */ 1945 MSK_STAT_RING_SZ, /* maxsize */ 1946 1, /* nsegments */ 1947 MSK_STAT_RING_SZ, /* maxsegsize */ 1948 0, /* flags */ 1949 NULL, NULL, /* lockfunc, lockarg */ 1950 &sc->msk_stat_tag); 1951 if (error != 0) { 1952 device_printf(sc->msk_dev, 1953 "failed to create status DMA tag\n"); 1954 return (error); 1955 } 1956 1957 /* Allocate DMA'able memory and load the DMA map for status ring. */ 1958 error = bus_dmamem_alloc(sc->msk_stat_tag, 1959 (void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT | 1960 BUS_DMA_ZERO, &sc->msk_stat_map); 1961 if (error != 0) { 1962 device_printf(sc->msk_dev, 1963 "failed to allocate DMA'able memory for status ring\n"); 1964 return (error); 1965 } 1966 1967 ctx.msk_busaddr = 0; 1968 error = bus_dmamap_load(sc->msk_stat_tag, 1969 sc->msk_stat_map, sc->msk_stat_ring, MSK_STAT_RING_SZ, 1970 msk_dmamap_cb, &ctx, 0); 1971 if (error != 0) { 1972 device_printf(sc->msk_dev, 1973 "failed to load DMA'able memory for status ring\n"); 1974 return (error); 1975 } 1976 sc->msk_stat_ring_paddr = ctx.msk_busaddr; 1977 1978 return (0); 1979 } 1980 1981 static void 1982 msk_status_dma_free(struct msk_softc *sc) 1983 { 1984 1985 /* Destroy status block. */ 1986 if (sc->msk_stat_tag) { 1987 if (sc->msk_stat_map) { 1988 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map); 1989 if (sc->msk_stat_ring) { 1990 bus_dmamem_free(sc->msk_stat_tag, 1991 sc->msk_stat_ring, sc->msk_stat_map); 1992 sc->msk_stat_ring = NULL; 1993 } 1994 sc->msk_stat_map = NULL; 1995 } 1996 bus_dma_tag_destroy(sc->msk_stat_tag); 1997 sc->msk_stat_tag = NULL; 1998 } 1999 } 2000 2001 static int 2002 msk_txrx_dma_alloc(struct msk_if_softc *sc_if) 2003 { 2004 struct msk_dmamap_arg ctx; 2005 struct msk_txdesc *txd; 2006 struct msk_rxdesc *rxd; 2007 struct msk_rxdesc *jrxd; 2008 struct msk_jpool_entry *entry; 2009 uint8_t *ptr; 2010 int error, i; 2011 2012 mtx_init(&sc_if->msk_jlist_mtx, "msk_jlist_mtx", NULL, MTX_DEF); 2013 SLIST_INIT(&sc_if->msk_jfree_listhead); 2014 SLIST_INIT(&sc_if->msk_jinuse_listhead); 2015 2016 /* Create parent DMA tag. */ 2017 /* 2018 * XXX 2019 * It seems that Yukon II supports full 64bits DMA operations. But 2020 * it needs two descriptors(list elements) for 64bits DMA operations. 2021 * Since we don't know what DMA address mappings(32bits or 64bits) 2022 * would be used in advance for each mbufs, we limits its DMA space 2023 * to be in range of 32bits address space. Otherwise, we should check 2024 * what DMA address is used and chain another descriptor for the 2025 * 64bits DMA operation. This also means descriptor ring size is 2026 * variable. Limiting DMA address to be in 32bit address space greatly 2027 * simplyfies descriptor handling and possibly would increase 2028 * performance a bit due to efficient handling of descriptors. 2029 * Apart from harassing checksum offloading mechanisms, it seems 2030 * it's really bad idea to use a seperate descriptor for 64bit 2031 * DMA operation to save small descriptor memory. Anyway, I've 2032 * never seen these exotic scheme on ethernet interface hardware. 2033 */ 2034 error = bus_dma_tag_create( 2035 bus_get_dma_tag(sc_if->msk_if_dev), /* parent */ 2036 1, 0, /* alignment, boundary */ 2037 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2038 BUS_SPACE_MAXADDR, /* highaddr */ 2039 NULL, NULL, /* filter, filterarg */ 2040 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 2041 0, /* nsegments */ 2042 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 2043 0, /* flags */ 2044 NULL, NULL, /* lockfunc, lockarg */ 2045 &sc_if->msk_cdata.msk_parent_tag); 2046 if (error != 0) { 2047 device_printf(sc_if->msk_if_dev, 2048 "failed to create parent DMA tag\n"); 2049 goto fail; 2050 } 2051 /* Create tag for Tx ring. */ 2052 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2053 MSK_RING_ALIGN, 0, /* alignment, boundary */ 2054 BUS_SPACE_MAXADDR, /* lowaddr */ 2055 BUS_SPACE_MAXADDR, /* highaddr */ 2056 NULL, NULL, /* filter, filterarg */ 2057 MSK_TX_RING_SZ, /* maxsize */ 2058 1, /* nsegments */ 2059 MSK_TX_RING_SZ, /* maxsegsize */ 2060 0, /* flags */ 2061 NULL, NULL, /* lockfunc, lockarg */ 2062 &sc_if->msk_cdata.msk_tx_ring_tag); 2063 if (error != 0) { 2064 device_printf(sc_if->msk_if_dev, 2065 "failed to create Tx ring DMA tag\n"); 2066 goto fail; 2067 } 2068 2069 /* Create tag for Rx ring. */ 2070 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2071 MSK_RING_ALIGN, 0, /* alignment, boundary */ 2072 BUS_SPACE_MAXADDR, /* lowaddr */ 2073 BUS_SPACE_MAXADDR, /* highaddr */ 2074 NULL, NULL, /* filter, filterarg */ 2075 MSK_RX_RING_SZ, /* maxsize */ 2076 1, /* nsegments */ 2077 MSK_RX_RING_SZ, /* maxsegsize */ 2078 0, /* flags */ 2079 NULL, NULL, /* lockfunc, lockarg */ 2080 &sc_if->msk_cdata.msk_rx_ring_tag); 2081 if (error != 0) { 2082 device_printf(sc_if->msk_if_dev, 2083 "failed to create Rx ring DMA tag\n"); 2084 goto fail; 2085 } 2086 2087 /* Create tag for jumbo Rx ring. */ 2088 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2089 MSK_RING_ALIGN, 0, /* alignment, boundary */ 2090 BUS_SPACE_MAXADDR, /* lowaddr */ 2091 BUS_SPACE_MAXADDR, /* highaddr */ 2092 NULL, NULL, /* filter, filterarg */ 2093 MSK_JUMBO_RX_RING_SZ, /* maxsize */ 2094 1, /* nsegments */ 2095 MSK_JUMBO_RX_RING_SZ, /* maxsegsize */ 2096 0, /* flags */ 2097 NULL, NULL, /* lockfunc, lockarg */ 2098 &sc_if->msk_cdata.msk_jumbo_rx_ring_tag); 2099 if (error != 0) { 2100 device_printf(sc_if->msk_if_dev, 2101 "failed to create jumbo Rx ring DMA tag\n"); 2102 goto fail; 2103 } 2104 2105 /* Create tag for jumbo buffer blocks. */ 2106 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2107 PAGE_SIZE, 0, /* alignment, boundary */ 2108 BUS_SPACE_MAXADDR, /* lowaddr */ 2109 BUS_SPACE_MAXADDR, /* highaddr */ 2110 NULL, NULL, /* filter, filterarg */ 2111 MSK_JMEM, /* maxsize */ 2112 1, /* nsegments */ 2113 MSK_JMEM, /* maxsegsize */ 2114 0, /* flags */ 2115 NULL, NULL, /* lockfunc, lockarg */ 2116 &sc_if->msk_cdata.msk_jumbo_tag); 2117 if (error != 0) { 2118 device_printf(sc_if->msk_if_dev, 2119 "failed to create jumbo Rx buffer block DMA tag\n"); 2120 goto fail; 2121 } 2122 2123 /* Create tag for Tx buffers. */ 2124 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2125 1, 0, /* alignment, boundary */ 2126 BUS_SPACE_MAXADDR, /* lowaddr */ 2127 BUS_SPACE_MAXADDR, /* highaddr */ 2128 NULL, NULL, /* filter, filterarg */ 2129 MCLBYTES * MSK_MAXTXSEGS, /* maxsize */ 2130 MSK_MAXTXSEGS, /* nsegments */ 2131 MCLBYTES, /* maxsegsize */ 2132 0, /* flags */ 2133 NULL, NULL, /* lockfunc, lockarg */ 2134 &sc_if->msk_cdata.msk_tx_tag); 2135 if (error != 0) { 2136 device_printf(sc_if->msk_if_dev, 2137 "failed to create Tx DMA tag\n"); 2138 goto fail; 2139 } 2140 2141 /* Create tag for Rx buffers. */ 2142 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2143 1, 0, /* alignment, boundary */ 2144 BUS_SPACE_MAXADDR, /* lowaddr */ 2145 BUS_SPACE_MAXADDR, /* highaddr */ 2146 NULL, NULL, /* filter, filterarg */ 2147 MCLBYTES, /* maxsize */ 2148 1, /* nsegments */ 2149 MCLBYTES, /* maxsegsize */ 2150 0, /* flags */ 2151 NULL, NULL, /* lockfunc, lockarg */ 2152 &sc_if->msk_cdata.msk_rx_tag); 2153 if (error != 0) { 2154 device_printf(sc_if->msk_if_dev, 2155 "failed to create Rx DMA tag\n"); 2156 goto fail; 2157 } 2158 2159 /* Create tag for jumbo Rx buffers. */ 2160 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2161 PAGE_SIZE, 0, /* alignment, boundary */ 2162 BUS_SPACE_MAXADDR, /* lowaddr */ 2163 BUS_SPACE_MAXADDR, /* highaddr */ 2164 NULL, NULL, /* filter, filterarg */ 2165 MCLBYTES * MSK_MAXRXSEGS, /* maxsize */ 2166 MSK_MAXRXSEGS, /* nsegments */ 2167 MSK_JLEN, /* maxsegsize */ 2168 0, /* flags */ 2169 NULL, NULL, /* lockfunc, lockarg */ 2170 &sc_if->msk_cdata.msk_jumbo_rx_tag); 2171 if (error != 0) { 2172 device_printf(sc_if->msk_if_dev, 2173 "failed to create jumbo Rx DMA tag\n"); 2174 goto fail; 2175 } 2176 2177 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 2178 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag, 2179 (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK | 2180 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map); 2181 if (error != 0) { 2182 device_printf(sc_if->msk_if_dev, 2183 "failed to allocate DMA'able memory for Tx ring\n"); 2184 goto fail; 2185 } 2186 2187 ctx.msk_busaddr = 0; 2188 error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag, 2189 sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring, 2190 MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, 0); 2191 if (error != 0) { 2192 device_printf(sc_if->msk_if_dev, 2193 "failed to load DMA'able memory for Tx ring\n"); 2194 goto fail; 2195 } 2196 sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr; 2197 2198 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 2199 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag, 2200 (void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK | 2201 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map); 2202 if (error != 0) { 2203 device_printf(sc_if->msk_if_dev, 2204 "failed to allocate DMA'able memory for Rx ring\n"); 2205 goto fail; 2206 } 2207 2208 ctx.msk_busaddr = 0; 2209 error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag, 2210 sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring, 2211 MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, 0); 2212 if (error != 0) { 2213 device_printf(sc_if->msk_if_dev, 2214 "failed to load DMA'able memory for Rx ring\n"); 2215 goto fail; 2216 } 2217 sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr; 2218 2219 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */ 2220 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2221 (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring, 2222 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 2223 &sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2224 if (error != 0) { 2225 device_printf(sc_if->msk_if_dev, 2226 "failed to allocate DMA'able memory for jumbo Rx ring\n"); 2227 goto fail; 2228 } 2229 2230 ctx.msk_busaddr = 0; 2231 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2232 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 2233 sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ, 2234 msk_dmamap_cb, &ctx, 0); 2235 if (error != 0) { 2236 device_printf(sc_if->msk_if_dev, 2237 "failed to load DMA'able memory for jumbo Rx ring\n"); 2238 goto fail; 2239 } 2240 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr; 2241 2242 /* Create DMA maps for Tx buffers. */ 2243 for (i = 0; i < MSK_TX_RING_CNT; i++) { 2244 txd = &sc_if->msk_cdata.msk_txdesc[i]; 2245 txd->tx_m = NULL; 2246 txd->tx_dmamap = NULL; 2247 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0, 2248 &txd->tx_dmamap); 2249 if (error != 0) { 2250 device_printf(sc_if->msk_if_dev, 2251 "failed to create Tx dmamap\n"); 2252 goto fail; 2253 } 2254 } 2255 /* Create DMA maps for Rx buffers. */ 2256 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0, 2257 &sc_if->msk_cdata.msk_rx_sparemap)) != 0) { 2258 device_printf(sc_if->msk_if_dev, 2259 "failed to create spare Rx dmamap\n"); 2260 goto fail; 2261 } 2262 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2263 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 2264 rxd->rx_m = NULL; 2265 rxd->rx_dmamap = NULL; 2266 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0, 2267 &rxd->rx_dmamap); 2268 if (error != 0) { 2269 device_printf(sc_if->msk_if_dev, 2270 "failed to create Rx dmamap\n"); 2271 goto fail; 2272 } 2273 } 2274 /* Create DMA maps for jumbo Rx buffers. */ 2275 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, 2276 &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) { 2277 device_printf(sc_if->msk_if_dev, 2278 "failed to create spare jumbo Rx dmamap\n"); 2279 goto fail; 2280 } 2281 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 2282 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 2283 jrxd->rx_m = NULL; 2284 jrxd->rx_dmamap = NULL; 2285 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, 2286 &jrxd->rx_dmamap); 2287 if (error != 0) { 2288 device_printf(sc_if->msk_if_dev, 2289 "failed to create jumbo Rx dmamap\n"); 2290 goto fail; 2291 } 2292 } 2293 2294 /* Allocate DMA'able memory and load the DMA map for jumbo buf. */ 2295 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_tag, 2296 (void **)&sc_if->msk_rdata.msk_jumbo_buf, 2297 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 2298 &sc_if->msk_cdata.msk_jumbo_map); 2299 if (error != 0) { 2300 device_printf(sc_if->msk_if_dev, 2301 "failed to allocate DMA'able memory for jumbo buf\n"); 2302 goto fail; 2303 } 2304 2305 ctx.msk_busaddr = 0; 2306 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_tag, 2307 sc_if->msk_cdata.msk_jumbo_map, sc_if->msk_rdata.msk_jumbo_buf, 2308 MSK_JMEM, msk_dmamap_cb, &ctx, 0); 2309 if (error != 0) { 2310 device_printf(sc_if->msk_if_dev, 2311 "failed to load DMA'able memory for jumbobuf\n"); 2312 goto fail; 2313 } 2314 sc_if->msk_rdata.msk_jumbo_buf_paddr = ctx.msk_busaddr; 2315 2316 /* 2317 * Now divide it up into 9K pieces and save the addresses 2318 * in an array. 2319 */ 2320 ptr = sc_if->msk_rdata.msk_jumbo_buf; 2321 for (i = 0; i < MSK_JSLOTS; i++) { 2322 sc_if->msk_cdata.msk_jslots[i] = ptr; 2323 ptr += MSK_JLEN; 2324 entry = malloc(sizeof(struct msk_jpool_entry), 2325 M_DEVBUF, M_WAITOK); 2326 if (entry == NULL) { 2327 device_printf(sc_if->msk_if_dev, 2328 "no memory for jumbo buffers!\n"); 2329 error = ENOMEM; 2330 goto fail; 2331 } 2332 entry->slot = i; 2333 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, 2334 jpool_entries); 2335 } 2336 2337 fail: 2338 return (error); 2339 } 2340 2341 static void 2342 msk_txrx_dma_free(struct msk_if_softc *sc_if) 2343 { 2344 struct msk_txdesc *txd; 2345 struct msk_rxdesc *rxd; 2346 struct msk_rxdesc *jrxd; 2347 struct msk_jpool_entry *entry; 2348 int i; 2349 2350 MSK_JLIST_LOCK(sc_if); 2351 while ((entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead))) { 2352 device_printf(sc_if->msk_if_dev, 2353 "asked to free buffer that is in use!\n"); 2354 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries); 2355 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, 2356 jpool_entries); 2357 } 2358 2359 while (!SLIST_EMPTY(&sc_if->msk_jfree_listhead)) { 2360 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead); 2361 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries); 2362 free(entry, M_DEVBUF); 2363 } 2364 MSK_JLIST_UNLOCK(sc_if); 2365 2366 /* Destroy jumbo buffer block. */ 2367 if (sc_if->msk_cdata.msk_jumbo_map) 2368 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_tag, 2369 sc_if->msk_cdata.msk_jumbo_map); 2370 2371 if (sc_if->msk_rdata.msk_jumbo_buf) { 2372 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_tag, 2373 sc_if->msk_rdata.msk_jumbo_buf, 2374 sc_if->msk_cdata.msk_jumbo_map); 2375 sc_if->msk_rdata.msk_jumbo_buf = NULL; 2376 sc_if->msk_cdata.msk_jumbo_map = NULL; 2377 } 2378 2379 /* Tx ring. */ 2380 if (sc_if->msk_cdata.msk_tx_ring_tag) { 2381 if (sc_if->msk_cdata.msk_tx_ring_map) 2382 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag, 2383 sc_if->msk_cdata.msk_tx_ring_map); 2384 if (sc_if->msk_cdata.msk_tx_ring_map && 2385 sc_if->msk_rdata.msk_tx_ring) 2386 bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag, 2387 sc_if->msk_rdata.msk_tx_ring, 2388 sc_if->msk_cdata.msk_tx_ring_map); 2389 sc_if->msk_rdata.msk_tx_ring = NULL; 2390 sc_if->msk_cdata.msk_tx_ring_map = NULL; 2391 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag); 2392 sc_if->msk_cdata.msk_tx_ring_tag = NULL; 2393 } 2394 /* Rx ring. */ 2395 if (sc_if->msk_cdata.msk_rx_ring_tag) { 2396 if (sc_if->msk_cdata.msk_rx_ring_map) 2397 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag, 2398 sc_if->msk_cdata.msk_rx_ring_map); 2399 if (sc_if->msk_cdata.msk_rx_ring_map && 2400 sc_if->msk_rdata.msk_rx_ring) 2401 bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag, 2402 sc_if->msk_rdata.msk_rx_ring, 2403 sc_if->msk_cdata.msk_rx_ring_map); 2404 sc_if->msk_rdata.msk_rx_ring = NULL; 2405 sc_if->msk_cdata.msk_rx_ring_map = NULL; 2406 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag); 2407 sc_if->msk_cdata.msk_rx_ring_tag = NULL; 2408 } 2409 /* Jumbo Rx ring. */ 2410 if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) { 2411 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map) 2412 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2413 sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2414 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map && 2415 sc_if->msk_rdata.msk_jumbo_rx_ring) 2416 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2417 sc_if->msk_rdata.msk_jumbo_rx_ring, 2418 sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2419 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL; 2420 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL; 2421 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag); 2422 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL; 2423 } 2424 /* Tx buffers. */ 2425 if (sc_if->msk_cdata.msk_tx_tag) { 2426 for (i = 0; i < MSK_TX_RING_CNT; i++) { 2427 txd = &sc_if->msk_cdata.msk_txdesc[i]; 2428 if (txd->tx_dmamap) { 2429 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag, 2430 txd->tx_dmamap); 2431 txd->tx_dmamap = NULL; 2432 } 2433 } 2434 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag); 2435 sc_if->msk_cdata.msk_tx_tag = NULL; 2436 } 2437 /* Rx buffers. */ 2438 if (sc_if->msk_cdata.msk_rx_tag) { 2439 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2440 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 2441 if (rxd->rx_dmamap) { 2442 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2443 rxd->rx_dmamap); 2444 rxd->rx_dmamap = NULL; 2445 } 2446 } 2447 if (sc_if->msk_cdata.msk_rx_sparemap) { 2448 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2449 sc_if->msk_cdata.msk_rx_sparemap); 2450 sc_if->msk_cdata.msk_rx_sparemap = 0; 2451 } 2452 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 2453 sc_if->msk_cdata.msk_rx_tag = NULL; 2454 } 2455 /* Jumbo Rx buffers. */ 2456 if (sc_if->msk_cdata.msk_jumbo_rx_tag) { 2457 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 2458 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 2459 if (jrxd->rx_dmamap) { 2460 bus_dmamap_destroy( 2461 sc_if->msk_cdata.msk_jumbo_rx_tag, 2462 jrxd->rx_dmamap); 2463 jrxd->rx_dmamap = NULL; 2464 } 2465 } 2466 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) { 2467 bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag, 2468 sc_if->msk_cdata.msk_jumbo_rx_sparemap); 2469 sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0; 2470 } 2471 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag); 2472 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL; 2473 } 2474 2475 if (sc_if->msk_cdata.msk_parent_tag) { 2476 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag); 2477 sc_if->msk_cdata.msk_parent_tag = NULL; 2478 } 2479 mtx_destroy(&sc_if->msk_jlist_mtx); 2480 } 2481 2482 /* 2483 * Allocate a jumbo buffer. 2484 */ 2485 static void * 2486 msk_jalloc(struct msk_if_softc *sc_if) 2487 { 2488 struct msk_jpool_entry *entry; 2489 2490 MSK_JLIST_LOCK(sc_if); 2491 2492 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead); 2493 2494 if (entry == NULL) { 2495 MSK_JLIST_UNLOCK(sc_if); 2496 return (NULL); 2497 } 2498 2499 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries); 2500 SLIST_INSERT_HEAD(&sc_if->msk_jinuse_listhead, entry, jpool_entries); 2501 2502 MSK_JLIST_UNLOCK(sc_if); 2503 2504 return (sc_if->msk_cdata.msk_jslots[entry->slot]); 2505 } 2506 2507 /* 2508 * Release a jumbo buffer. 2509 */ 2510 static void 2511 msk_jfree(void *buf, void *args) 2512 { 2513 struct msk_if_softc *sc_if; 2514 struct msk_jpool_entry *entry; 2515 int i; 2516 2517 /* Extract the softc struct pointer. */ 2518 sc_if = (struct msk_if_softc *)args; 2519 KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__)); 2520 2521 MSK_JLIST_LOCK(sc_if); 2522 /* Calculate the slot this buffer belongs to. */ 2523 i = ((vm_offset_t)buf 2524 - (vm_offset_t)sc_if->msk_rdata.msk_jumbo_buf) / MSK_JLEN; 2525 KASSERT(i >= 0 && i < MSK_JSLOTS, 2526 ("%s: asked to free buffer that we don't manage!", __func__)); 2527 2528 entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead); 2529 KASSERT(entry != NULL, ("%s: buffer not in use!", __func__)); 2530 entry->slot = i; 2531 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries); 2532 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, jpool_entries); 2533 if (SLIST_EMPTY(&sc_if->msk_jinuse_listhead)) 2534 wakeup(sc_if); 2535 2536 MSK_JLIST_UNLOCK(sc_if); 2537 } 2538 2539 /* 2540 * It's copy of ath_defrag(ath(4)). 2541 * 2542 * Defragment an mbuf chain, returning at most maxfrags separate 2543 * mbufs+clusters. If this is not possible NULL is returned and 2544 * the original mbuf chain is left in it's present (potentially 2545 * modified) state. We use two techniques: collapsing consecutive 2546 * mbufs and replacing consecutive mbufs by a cluster. 2547 */ 2548 static struct mbuf * 2549 msk_defrag(struct mbuf *m0, int how, int maxfrags) 2550 { 2551 struct mbuf *m, *n, *n2, **prev; 2552 u_int curfrags; 2553 2554 /* 2555 * Calculate the current number of frags. 2556 */ 2557 curfrags = 0; 2558 for (m = m0; m != NULL; m = m->m_next) 2559 curfrags++; 2560 /* 2561 * First, try to collapse mbufs. Note that we always collapse 2562 * towards the front so we don't need to deal with moving the 2563 * pkthdr. This may be suboptimal if the first mbuf has much 2564 * less data than the following. 2565 */ 2566 m = m0; 2567 again: 2568 for (;;) { 2569 n = m->m_next; 2570 if (n == NULL) 2571 break; 2572 if ((m->m_flags & M_RDONLY) == 0 && 2573 n->m_len < M_TRAILINGSPACE(m)) { 2574 bcopy(mtod(n, void *), mtod(m, char *) + m->m_len, 2575 n->m_len); 2576 m->m_len += n->m_len; 2577 m->m_next = n->m_next; 2578 m_free(n); 2579 if (--curfrags <= maxfrags) 2580 return (m0); 2581 } else 2582 m = n; 2583 } 2584 KASSERT(maxfrags > 1, 2585 ("maxfrags %u, but normal collapse failed", maxfrags)); 2586 /* 2587 * Collapse consecutive mbufs to a cluster. 2588 */ 2589 prev = &m0->m_next; /* NB: not the first mbuf */ 2590 while ((n = *prev) != NULL) { 2591 if ((n2 = n->m_next) != NULL && 2592 n->m_len + n2->m_len < MCLBYTES) { 2593 m = m_getcl(how, MT_DATA, 0); 2594 if (m == NULL) 2595 goto bad; 2596 bcopy(mtod(n, void *), mtod(m, void *), n->m_len); 2597 bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len, 2598 n2->m_len); 2599 m->m_len = n->m_len + n2->m_len; 2600 m->m_next = n2->m_next; 2601 *prev = m; 2602 m_free(n); 2603 m_free(n2); 2604 if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */ 2605 return m0; 2606 /* 2607 * Still not there, try the normal collapse 2608 * again before we allocate another cluster. 2609 */ 2610 goto again; 2611 } 2612 prev = &n->m_next; 2613 } 2614 /* 2615 * No place where we can collapse to a cluster; punt. 2616 * This can occur if, for example, you request 2 frags 2617 * but the packet requires that both be clusters (we 2618 * never reallocate the first mbuf to avoid moving the 2619 * packet header). 2620 */ 2621 bad: 2622 return (NULL); 2623 } 2624 2625 static int 2626 msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head) 2627 { 2628 struct msk_txdesc *txd, *txd_last; 2629 struct msk_tx_desc *tx_le; 2630 struct mbuf *m; 2631 bus_dmamap_t map; 2632 bus_dma_segment_t txsegs[MSK_MAXTXSEGS]; 2633 uint32_t control, prod, si; 2634 uint16_t offset, tcp_offset, tso_mtu; 2635 int error, i, nseg, tso; 2636 2637 MSK_IF_LOCK_ASSERT(sc_if); 2638 2639 tcp_offset = offset = 0; 2640 m = *m_head; 2641 if ((m->m_pkthdr.csum_flags & (MSK_CSUM_FEATURES | CSUM_TSO)) != 0) { 2642 /* 2643 * Since mbuf has no protocol specific structure information 2644 * in it we have to inspect protocol information here to 2645 * setup TSO and checksum offload. I don't know why Marvell 2646 * made a such decision in chip design because other GigE 2647 * hardwares normally takes care of all these chores in 2648 * hardware. However, TSO performance of Yukon II is very 2649 * good such that it's worth to implement it. 2650 */ 2651 struct ether_header *eh; 2652 struct ip *ip; 2653 struct tcphdr *tcp; 2654 2655 /* TODO check for M_WRITABLE(m) */ 2656 2657 offset = sizeof(struct ether_header); 2658 m = m_pullup(m, offset); 2659 if (m == NULL) { 2660 *m_head = NULL; 2661 return (ENOBUFS); 2662 } 2663 eh = mtod(m, struct ether_header *); 2664 /* Check if hardware VLAN insertion is off. */ 2665 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2666 offset = sizeof(struct ether_vlan_header); 2667 m = m_pullup(m, offset); 2668 if (m == NULL) { 2669 *m_head = NULL; 2670 return (ENOBUFS); 2671 } 2672 } 2673 m = m_pullup(m, offset + sizeof(struct ip)); 2674 if (m == NULL) { 2675 *m_head = NULL; 2676 return (ENOBUFS); 2677 } 2678 ip = (struct ip *)(mtod(m, char *) + offset); 2679 offset += (ip->ip_hl << 2); 2680 tcp_offset = offset; 2681 /* 2682 * It seems that Yukon II has Tx checksum offload bug for 2683 * small TCP packets that's less than 60 bytes in size 2684 * (e.g. TCP window probe packet, pure ACK packet). 2685 * Common work around like padding with zeros to make the 2686 * frame minimum ethernet frame size didn't work at all. 2687 * Instead of disabling checksum offload completely we 2688 * resort to S/W checksum routine when we encounter short 2689 * TCP frames. 2690 * Short UDP packets appear to be handled correctly by 2691 * Yukon II. 2692 */ 2693 if (m->m_pkthdr.len < MSK_MIN_FRAMELEN && 2694 (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) { 2695 uint16_t csum; 2696 2697 csum = in_cksum_skip(m, ntohs(ip->ip_len) + offset - 2698 (ip->ip_hl << 2), offset); 2699 *(uint16_t *)(m->m_data + offset + 2700 m->m_pkthdr.csum_data) = csum; 2701 m->m_pkthdr.csum_flags &= ~CSUM_TCP; 2702 } 2703 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2704 m = m_pullup(m, offset + sizeof(struct tcphdr)); 2705 if (m == NULL) { 2706 *m_head = NULL; 2707 return (ENOBUFS); 2708 } 2709 tcp = mtod(m, struct tcphdr *); 2710 offset += (tcp->th_off << 2); 2711 } 2712 *m_head = m; 2713 } 2714 2715 prod = sc_if->msk_cdata.msk_tx_prod; 2716 txd = &sc_if->msk_cdata.msk_txdesc[prod]; 2717 txd_last = txd; 2718 map = txd->tx_dmamap; 2719 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map, 2720 *m_head, txsegs, &nseg, BUS_DMA_NOWAIT); 2721 if (error == EFBIG) { 2722 m = msk_defrag(*m_head, M_DONTWAIT, MSK_MAXTXSEGS); 2723 if (m == NULL) { 2724 m_freem(*m_head); 2725 *m_head = NULL; 2726 return (ENOBUFS); 2727 } 2728 *m_head = m; 2729 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, 2730 map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT); 2731 if (error != 0) { 2732 m_freem(*m_head); 2733 *m_head = NULL; 2734 return (error); 2735 } 2736 } else if (error != 0) 2737 return (error); 2738 if (nseg == 0) { 2739 m_freem(*m_head); 2740 *m_head = NULL; 2741 return (EIO); 2742 } 2743 2744 /* Check number of available descriptors. */ 2745 if (sc_if->msk_cdata.msk_tx_cnt + nseg >= 2746 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) { 2747 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map); 2748 return (ENOBUFS); 2749 } 2750 2751 control = 0; 2752 tso = 0; 2753 tx_le = NULL; 2754 2755 /* Check TSO support. */ 2756 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2757 tso_mtu = offset + m->m_pkthdr.tso_segsz; 2758 if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) { 2759 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2760 tx_le->msk_addr = htole32(tso_mtu); 2761 tx_le->msk_control = htole32(OP_LRGLEN | HW_OWNER); 2762 sc_if->msk_cdata.msk_tx_cnt++; 2763 MSK_INC(prod, MSK_TX_RING_CNT); 2764 sc_if->msk_cdata.msk_tso_mtu = tso_mtu; 2765 } 2766 tso++; 2767 } 2768 /* Check if we have a VLAN tag to insert. */ 2769 if ((m->m_flags & M_VLANTAG) != 0) { 2770 if (tso == 0) { 2771 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2772 tx_le->msk_addr = htole32(0); 2773 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER | 2774 htons(m->m_pkthdr.ether_vtag)); 2775 sc_if->msk_cdata.msk_tx_cnt++; 2776 MSK_INC(prod, MSK_TX_RING_CNT); 2777 } else { 2778 tx_le->msk_control |= htole32(OP_VLAN | 2779 htons(m->m_pkthdr.ether_vtag)); 2780 } 2781 control |= INS_VLAN; 2782 } 2783 /* Check if we have to handle checksum offload. */ 2784 if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) { 2785 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2786 tx_le->msk_addr = htole32(((tcp_offset + m->m_pkthdr.csum_data) 2787 & 0xffff) | ((uint32_t)tcp_offset << 16)); 2788 tx_le->msk_control = htole32(1 << 16 | (OP_TCPLISW | HW_OWNER)); 2789 control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; 2790 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2791 control |= UDPTCP; 2792 sc_if->msk_cdata.msk_tx_cnt++; 2793 MSK_INC(prod, MSK_TX_RING_CNT); 2794 } 2795 2796 si = prod; 2797 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2798 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr)); 2799 if (tso == 0) 2800 tx_le->msk_control = htole32(txsegs[0].ds_len | control | 2801 OP_PACKET); 2802 else 2803 tx_le->msk_control = htole32(txsegs[0].ds_len | control | 2804 OP_LARGESEND); 2805 sc_if->msk_cdata.msk_tx_cnt++; 2806 MSK_INC(prod, MSK_TX_RING_CNT); 2807 2808 for (i = 1; i < nseg; i++) { 2809 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2810 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr)); 2811 tx_le->msk_control = htole32(txsegs[i].ds_len | control | 2812 OP_BUFFER | HW_OWNER); 2813 sc_if->msk_cdata.msk_tx_cnt++; 2814 MSK_INC(prod, MSK_TX_RING_CNT); 2815 } 2816 /* Update producer index. */ 2817 sc_if->msk_cdata.msk_tx_prod = prod; 2818 2819 /* Set EOP on the last desciptor. */ 2820 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT; 2821 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2822 tx_le->msk_control |= htole32(EOP); 2823 2824 /* Turn the first descriptor ownership to hardware. */ 2825 tx_le = &sc_if->msk_rdata.msk_tx_ring[si]; 2826 tx_le->msk_control |= htole32(HW_OWNER); 2827 2828 txd = &sc_if->msk_cdata.msk_txdesc[prod]; 2829 map = txd_last->tx_dmamap; 2830 txd_last->tx_dmamap = txd->tx_dmamap; 2831 txd->tx_dmamap = map; 2832 txd->tx_m = m; 2833 2834 /* Sync descriptors. */ 2835 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE); 2836 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 2837 sc_if->msk_cdata.msk_tx_ring_map, 2838 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2839 2840 return (0); 2841 } 2842 2843 static void 2844 msk_tx_task(void *arg, int pending) 2845 { 2846 struct ifnet *ifp; 2847 2848 ifp = arg; 2849 msk_start(ifp); 2850 } 2851 2852 static void 2853 msk_start(struct ifnet *ifp) 2854 { 2855 struct msk_if_softc *sc_if; 2856 struct mbuf *m_head; 2857 int enq; 2858 2859 sc_if = ifp->if_softc; 2860 2861 MSK_IF_LOCK(sc_if); 2862 2863 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2864 IFF_DRV_RUNNING || sc_if->msk_link == 0) { 2865 MSK_IF_UNLOCK(sc_if); 2866 return; 2867 } 2868 2869 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 2870 sc_if->msk_cdata.msk_tx_cnt < 2871 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) { 2872 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2873 if (m_head == NULL) 2874 break; 2875 /* 2876 * Pack the data into the transmit ring. If we 2877 * don't have room, set the OACTIVE flag and wait 2878 * for the NIC to drain the ring. 2879 */ 2880 if (msk_encap(sc_if, &m_head) != 0) { 2881 if (m_head == NULL) 2882 break; 2883 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2884 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2885 break; 2886 } 2887 2888 enq++; 2889 /* 2890 * If there's a BPF listener, bounce a copy of this frame 2891 * to him. 2892 */ 2893 ETHER_BPF_MTAP(ifp, m_head); 2894 } 2895 2896 if (enq > 0) { 2897 /* Transmit */ 2898 CSR_WRITE_2(sc_if->msk_softc, 2899 Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG), 2900 sc_if->msk_cdata.msk_tx_prod); 2901 2902 /* Set a timeout in case the chip goes out to lunch. */ 2903 sc_if->msk_watchdog_timer = MSK_TX_TIMEOUT; 2904 } 2905 2906 MSK_IF_UNLOCK(sc_if); 2907 } 2908 2909 static void 2910 msk_watchdog(struct msk_if_softc *sc_if) 2911 { 2912 struct ifnet *ifp; 2913 uint32_t ridx; 2914 int idx; 2915 2916 MSK_IF_LOCK_ASSERT(sc_if); 2917 2918 if (sc_if->msk_watchdog_timer == 0 || --sc_if->msk_watchdog_timer) 2919 return; 2920 ifp = sc_if->msk_ifp; 2921 if (sc_if->msk_link == 0) { 2922 if (bootverbose) 2923 if_printf(sc_if->msk_ifp, "watchdog timeout " 2924 "(missed link)\n"); 2925 ifp->if_oerrors++; 2926 msk_init_locked(sc_if); 2927 return; 2928 } 2929 2930 /* 2931 * Reclaim first as there is a possibility of losing Tx completion 2932 * interrupts. 2933 */ 2934 ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX; 2935 idx = CSR_READ_2(sc_if->msk_softc, ridx); 2936 if (sc_if->msk_cdata.msk_tx_cons != idx) { 2937 msk_txeof(sc_if, idx); 2938 if (sc_if->msk_cdata.msk_tx_cnt == 0) { 2939 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 2940 "-- recovering\n"); 2941 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2942 taskqueue_enqueue(taskqueue_fast, 2943 &sc_if->msk_tx_task); 2944 return; 2945 } 2946 } 2947 2948 if_printf(ifp, "watchdog timeout\n"); 2949 ifp->if_oerrors++; 2950 msk_init_locked(sc_if); 2951 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2952 taskqueue_enqueue(taskqueue_fast, &sc_if->msk_tx_task); 2953 } 2954 2955 static void 2956 mskc_shutdown(device_t dev) 2957 { 2958 struct msk_softc *sc; 2959 int i; 2960 2961 sc = device_get_softc(dev); 2962 MSK_LOCK(sc); 2963 for (i = 0; i < sc->msk_num_port; i++) { 2964 if (sc->msk_if[i] != NULL) 2965 msk_stop(sc->msk_if[i]); 2966 } 2967 2968 /* Disable all interrupts. */ 2969 CSR_WRITE_4(sc, B0_IMSK, 0); 2970 CSR_READ_4(sc, B0_IMSK); 2971 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 2972 CSR_READ_4(sc, B0_HWE_IMSK); 2973 2974 /* Put hardware reset. */ 2975 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 2976 2977 MSK_UNLOCK(sc); 2978 } 2979 2980 static int 2981 mskc_suspend(device_t dev) 2982 { 2983 struct msk_softc *sc; 2984 int i; 2985 2986 sc = device_get_softc(dev); 2987 2988 MSK_LOCK(sc); 2989 2990 for (i = 0; i < sc->msk_num_port; i++) { 2991 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 2992 ((sc->msk_if[i]->msk_ifp->if_drv_flags & 2993 IFF_DRV_RUNNING) != 0)) 2994 msk_stop(sc->msk_if[i]); 2995 } 2996 2997 /* Disable all interrupts. */ 2998 CSR_WRITE_4(sc, B0_IMSK, 0); 2999 CSR_READ_4(sc, B0_IMSK); 3000 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 3001 CSR_READ_4(sc, B0_HWE_IMSK); 3002 3003 msk_phy_power(sc, MSK_PHY_POWERDOWN); 3004 3005 /* Put hardware reset. */ 3006 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 3007 sc->msk_suspended = 1; 3008 3009 MSK_UNLOCK(sc); 3010 3011 return (0); 3012 } 3013 3014 static int 3015 mskc_resume(device_t dev) 3016 { 3017 struct msk_softc *sc; 3018 int i; 3019 3020 sc = device_get_softc(dev); 3021 3022 MSK_LOCK(sc); 3023 3024 mskc_reset(sc); 3025 for (i = 0; i < sc->msk_num_port; i++) { 3026 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 3027 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0)) 3028 msk_init_locked(sc->msk_if[i]); 3029 } 3030 sc->msk_suspended = 0; 3031 3032 MSK_UNLOCK(sc); 3033 3034 return (0); 3035 } 3036 3037 static void 3038 msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len) 3039 { 3040 struct mbuf *m; 3041 struct ifnet *ifp; 3042 struct msk_rxdesc *rxd; 3043 int cons, rxlen; 3044 3045 ifp = sc_if->msk_ifp; 3046 3047 MSK_IF_LOCK_ASSERT(sc_if); 3048 3049 cons = sc_if->msk_cdata.msk_rx_cons; 3050 do { 3051 rxlen = status >> 16; 3052 if ((status & GMR_FS_VLAN) != 0 && 3053 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 3054 rxlen -= ETHER_VLAN_ENCAP_LEN; 3055 if (len > sc_if->msk_framesize || 3056 ((status & GMR_FS_ANY_ERR) != 0) || 3057 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { 3058 /* Don't count flow-control packet as errors. */ 3059 if ((status & GMR_FS_GOOD_FC) == 0) 3060 ifp->if_ierrors++; 3061 msk_discard_rxbuf(sc_if, cons); 3062 break; 3063 } 3064 rxd = &sc_if->msk_cdata.msk_rxdesc[cons]; 3065 m = rxd->rx_m; 3066 if (msk_newbuf(sc_if, cons) != 0) { 3067 ifp->if_iqdrops++; 3068 /* Reuse old buffer. */ 3069 msk_discard_rxbuf(sc_if, cons); 3070 break; 3071 } 3072 m->m_pkthdr.rcvif = ifp; 3073 m->m_pkthdr.len = m->m_len = len; 3074 ifp->if_ipackets++; 3075 /* Check for VLAN tagged packets. */ 3076 if ((status & GMR_FS_VLAN) != 0 && 3077 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 3078 m->m_pkthdr.ether_vtag = sc_if->msk_vtag; 3079 m->m_flags |= M_VLANTAG; 3080 } 3081 MSK_IF_UNLOCK(sc_if); 3082 (*ifp->if_input)(ifp, m); 3083 MSK_IF_LOCK(sc_if); 3084 } while (0); 3085 3086 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT); 3087 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT); 3088 } 3089 3090 static void 3091 msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len) 3092 { 3093 struct mbuf *m; 3094 struct ifnet *ifp; 3095 struct msk_rxdesc *jrxd; 3096 int cons, rxlen; 3097 3098 ifp = sc_if->msk_ifp; 3099 3100 MSK_IF_LOCK_ASSERT(sc_if); 3101 3102 cons = sc_if->msk_cdata.msk_rx_cons; 3103 do { 3104 rxlen = status >> 16; 3105 if ((status & GMR_FS_VLAN) != 0 && 3106 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 3107 rxlen -= ETHER_VLAN_ENCAP_LEN; 3108 if (len > sc_if->msk_framesize || 3109 ((status & GMR_FS_ANY_ERR) != 0) || 3110 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { 3111 /* Don't count flow-control packet as errors. */ 3112 if ((status & GMR_FS_GOOD_FC) == 0) 3113 ifp->if_ierrors++; 3114 msk_discard_jumbo_rxbuf(sc_if, cons); 3115 break; 3116 } 3117 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons]; 3118 m = jrxd->rx_m; 3119 if (msk_jumbo_newbuf(sc_if, cons) != 0) { 3120 ifp->if_iqdrops++; 3121 /* Reuse old buffer. */ 3122 msk_discard_jumbo_rxbuf(sc_if, cons); 3123 break; 3124 } 3125 m->m_pkthdr.rcvif = ifp; 3126 m->m_pkthdr.len = m->m_len = len; 3127 ifp->if_ipackets++; 3128 /* Check for VLAN tagged packets. */ 3129 if ((status & GMR_FS_VLAN) != 0 && 3130 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 3131 m->m_pkthdr.ether_vtag = sc_if->msk_vtag; 3132 m->m_flags |= M_VLANTAG; 3133 } 3134 MSK_IF_UNLOCK(sc_if); 3135 (*ifp->if_input)(ifp, m); 3136 MSK_IF_LOCK(sc_if); 3137 } while (0); 3138 3139 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT); 3140 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT); 3141 } 3142 3143 static void 3144 msk_txeof(struct msk_if_softc *sc_if, int idx) 3145 { 3146 struct msk_txdesc *txd; 3147 struct msk_tx_desc *cur_tx; 3148 struct ifnet *ifp; 3149 uint32_t control; 3150 int cons, prog; 3151 3152 MSK_IF_LOCK_ASSERT(sc_if); 3153 3154 ifp = sc_if->msk_ifp; 3155 3156 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 3157 sc_if->msk_cdata.msk_tx_ring_map, 3158 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3159 /* 3160 * Go through our tx ring and free mbufs for those 3161 * frames that have been sent. 3162 */ 3163 cons = sc_if->msk_cdata.msk_tx_cons; 3164 prog = 0; 3165 for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) { 3166 if (sc_if->msk_cdata.msk_tx_cnt <= 0) 3167 break; 3168 prog++; 3169 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons]; 3170 control = le32toh(cur_tx->msk_control); 3171 sc_if->msk_cdata.msk_tx_cnt--; 3172 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3173 if ((control & EOP) == 0) 3174 continue; 3175 txd = &sc_if->msk_cdata.msk_txdesc[cons]; 3176 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap, 3177 BUS_DMASYNC_POSTWRITE); 3178 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap); 3179 3180 ifp->if_opackets++; 3181 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!", 3182 __func__)); 3183 m_freem(txd->tx_m); 3184 txd->tx_m = NULL; 3185 } 3186 3187 if (prog > 0) { 3188 sc_if->msk_cdata.msk_tx_cons = cons; 3189 if (sc_if->msk_cdata.msk_tx_cnt == 0) 3190 sc_if->msk_watchdog_timer = 0; 3191 /* No need to sync LEs as we didn't update LEs. */ 3192 } 3193 } 3194 3195 static void 3196 msk_tick(void *xsc_if) 3197 { 3198 struct msk_if_softc *sc_if; 3199 struct mii_data *mii; 3200 3201 sc_if = xsc_if; 3202 3203 MSK_IF_LOCK_ASSERT(sc_if); 3204 3205 mii = device_get_softc(sc_if->msk_miibus); 3206 3207 mii_tick(mii); 3208 msk_watchdog(sc_if); 3209 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); 3210 } 3211 3212 static void 3213 msk_intr_phy(struct msk_if_softc *sc_if) 3214 { 3215 uint16_t status; 3216 3217 if (sc_if->msk_softc->msk_marvell_phy) { 3218 msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); 3219 status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, 3220 PHY_MARV_INT_STAT); 3221 /* Handle FIFO Underrun/Overflow? */ 3222 if ((status & PHY_M_IS_FIFO_ERROR)) 3223 device_printf(sc_if->msk_if_dev, 3224 "PHY FIFO underrun/overflow.\n"); 3225 } 3226 } 3227 3228 static void 3229 msk_intr_gmac(struct msk_if_softc *sc_if) 3230 { 3231 struct msk_softc *sc; 3232 uint8_t status; 3233 3234 sc = sc_if->msk_softc; 3235 status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); 3236 3237 /* GMAC Rx FIFO overrun. */ 3238 if ((status & GM_IS_RX_FF_OR) != 0) { 3239 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 3240 GMF_CLI_RX_FO); 3241 device_printf(sc_if->msk_if_dev, "Rx FIFO overrun!\n"); 3242 } 3243 /* GMAC Tx FIFO underrun. */ 3244 if ((status & GM_IS_TX_FF_UR) != 0) { 3245 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3246 GMF_CLI_TX_FU); 3247 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n"); 3248 /* 3249 * XXX 3250 * In case of Tx underrun, we may need to flush/reset 3251 * Tx MAC but that would also require resynchronization 3252 * with status LEs. Reintializing status LEs would 3253 * affect other port in dual MAC configuration so it 3254 * should be avoided as possible as we can. 3255 * Due to lack of documentation it's all vague guess but 3256 * it needs more investigation. 3257 */ 3258 } 3259 } 3260 3261 static void 3262 msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status) 3263 { 3264 struct msk_softc *sc; 3265 3266 sc = sc_if->msk_softc; 3267 if ((status & Y2_IS_PAR_RD1) != 0) { 3268 device_printf(sc_if->msk_if_dev, 3269 "RAM buffer read parity error\n"); 3270 /* Clear IRQ. */ 3271 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), 3272 RI_CLR_RD_PERR); 3273 } 3274 if ((status & Y2_IS_PAR_WR1) != 0) { 3275 device_printf(sc_if->msk_if_dev, 3276 "RAM buffer write parity error\n"); 3277 /* Clear IRQ. */ 3278 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), 3279 RI_CLR_WR_PERR); 3280 } 3281 if ((status & Y2_IS_PAR_MAC1) != 0) { 3282 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n"); 3283 /* Clear IRQ. */ 3284 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3285 GMF_CLI_TX_PE); 3286 } 3287 if ((status & Y2_IS_PAR_RX1) != 0) { 3288 device_printf(sc_if->msk_if_dev, "Rx parity error\n"); 3289 /* Clear IRQ. */ 3290 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR); 3291 } 3292 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) { 3293 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n"); 3294 /* Clear IRQ. */ 3295 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP); 3296 } 3297 } 3298 3299 static void 3300 msk_intr_hwerr(struct msk_softc *sc) 3301 { 3302 uint32_t status; 3303 uint32_t tlphead[4]; 3304 3305 status = CSR_READ_4(sc, B0_HWE_ISRC); 3306 /* Time Stamp timer overflow. */ 3307 if ((status & Y2_IS_TIST_OV) != 0) 3308 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 3309 if ((status & Y2_IS_PCI_NEXP) != 0) { 3310 /* 3311 * PCI Express Error occured which is not described in PEX 3312 * spec. 3313 * This error is also mapped either to Master Abort( 3314 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and 3315 * can only be cleared there. 3316 */ 3317 device_printf(sc->msk_dev, 3318 "PCI Express protocol violation error\n"); 3319 } 3320 3321 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) { 3322 uint16_t v16; 3323 3324 if ((status & Y2_IS_MST_ERR) != 0) 3325 device_printf(sc->msk_dev, 3326 "unexpected IRQ Status error\n"); 3327 else 3328 device_printf(sc->msk_dev, 3329 "unexpected IRQ Master error\n"); 3330 /* Reset all bits in the PCI status register. */ 3331 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); 3332 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3333 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 | 3334 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | 3335 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2); 3336 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3337 } 3338 3339 /* Check for PCI Express Uncorrectable Error. */ 3340 if ((status & Y2_IS_PCI_EXP) != 0) { 3341 uint32_t v32; 3342 3343 /* 3344 * On PCI Express bus bridges are called root complexes (RC). 3345 * PCI Express errors are recognized by the root complex too, 3346 * which requests the system to handle the problem. After 3347 * error occurence it may be that no access to the adapter 3348 * may be performed any longer. 3349 */ 3350 3351 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); 3352 if ((v32 & PEX_UNSUP_REQ) != 0) { 3353 /* Ignore unsupported request error. */ 3354 device_printf(sc->msk_dev, 3355 "Uncorrectable PCI Express error\n"); 3356 } 3357 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) { 3358 int i; 3359 3360 /* Get TLP header form Log Registers. */ 3361 for (i = 0; i < 4; i++) 3362 tlphead[i] = CSR_PCI_READ_4(sc, 3363 PEX_HEADER_LOG + i * 4); 3364 /* Check for vendor defined broadcast message. */ 3365 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) { 3366 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; 3367 CSR_WRITE_4(sc, B0_HWE_IMSK, 3368 sc->msk_intrhwemask); 3369 CSR_READ_4(sc, B0_HWE_IMSK); 3370 } 3371 } 3372 /* Clear the interrupt. */ 3373 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3374 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); 3375 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3376 } 3377 3378 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL) 3379 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status); 3380 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL) 3381 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8); 3382 } 3383 3384 static __inline void 3385 msk_rxput(struct msk_if_softc *sc_if) 3386 { 3387 struct msk_softc *sc; 3388 3389 sc = sc_if->msk_softc; 3390 if (sc_if->msk_framesize >(MCLBYTES - ETHER_HDR_LEN)) 3391 bus_dmamap_sync( 3392 sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 3393 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 3394 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3395 else 3396 bus_dmamap_sync( 3397 sc_if->msk_cdata.msk_rx_ring_tag, 3398 sc_if->msk_cdata.msk_rx_ring_map, 3399 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3400 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, 3401 PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod); 3402 } 3403 3404 static int 3405 msk_handle_events(struct msk_softc *sc) 3406 { 3407 struct msk_if_softc *sc_if; 3408 int rxput[2]; 3409 struct msk_stat_desc *sd; 3410 uint32_t control, status; 3411 int cons, idx, len, port, rxprog; 3412 3413 idx = CSR_READ_2(sc, STAT_PUT_IDX); 3414 if (idx == sc->msk_stat_cons) 3415 return (0); 3416 3417 /* Sync status LEs. */ 3418 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map, 3419 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3420 /* XXX Sync Rx LEs here. */ 3421 3422 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0; 3423 3424 rxprog = 0; 3425 for (cons = sc->msk_stat_cons; cons != idx;) { 3426 sd = &sc->msk_stat_ring[cons]; 3427 control = le32toh(sd->msk_control); 3428 if ((control & HW_OWNER) == 0) 3429 break; 3430 /* 3431 * Marvell's FreeBSD driver updates status LE after clearing 3432 * HW_OWNER. However we don't have a way to sync single LE 3433 * with bus_dma(9) API. bus_dma(9) provides a way to sync 3434 * an entire DMA map. So don't sync LE until we have a better 3435 * way to sync LEs. 3436 */ 3437 control &= ~HW_OWNER; 3438 sd->msk_control = htole32(control); 3439 status = le32toh(sd->msk_status); 3440 len = control & STLE_LEN_MASK; 3441 port = (control >> 16) & 0x01; 3442 sc_if = sc->msk_if[port]; 3443 if (sc_if == NULL) { 3444 device_printf(sc->msk_dev, "invalid port opcode " 3445 "0x%08x\n", control & STLE_OP_MASK); 3446 continue; 3447 } 3448 3449 switch (control & STLE_OP_MASK) { 3450 case OP_RXVLAN: 3451 sc_if->msk_vtag = ntohs(len); 3452 break; 3453 case OP_RXCHKSVLAN: 3454 sc_if->msk_vtag = ntohs(len); 3455 break; 3456 case OP_RXSTAT: 3457 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) 3458 msk_jumbo_rxeof(sc_if, status, len); 3459 else 3460 msk_rxeof(sc_if, status, len); 3461 rxprog++; 3462 /* 3463 * Because there is no way to sync single Rx LE 3464 * put the DMA sync operation off until the end of 3465 * event processing. 3466 */ 3467 rxput[port]++; 3468 /* Update prefetch unit if we've passed water mark. */ 3469 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) { 3470 msk_rxput(sc_if); 3471 rxput[port] = 0; 3472 } 3473 break; 3474 case OP_TXINDEXLE: 3475 if (sc->msk_if[MSK_PORT_A] != NULL) 3476 msk_txeof(sc->msk_if[MSK_PORT_A], 3477 status & STLE_TXA1_MSKL); 3478 if (sc->msk_if[MSK_PORT_B] != NULL) 3479 msk_txeof(sc->msk_if[MSK_PORT_B], 3480 ((status & STLE_TXA2_MSKL) >> 3481 STLE_TXA2_SHIFTL) | 3482 ((len & STLE_TXA2_MSKH) << 3483 STLE_TXA2_SHIFTH)); 3484 break; 3485 default: 3486 device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n", 3487 control & STLE_OP_MASK); 3488 break; 3489 } 3490 MSK_INC(cons, MSK_STAT_RING_CNT); 3491 if (rxprog > sc->msk_process_limit) 3492 break; 3493 } 3494 3495 sc->msk_stat_cons = cons; 3496 /* XXX We should sync status LEs here. See above notes. */ 3497 3498 if (rxput[MSK_PORT_A] > 0) 3499 msk_rxput(sc->msk_if[MSK_PORT_A]); 3500 if (rxput[MSK_PORT_B] > 0) 3501 msk_rxput(sc->msk_if[MSK_PORT_B]); 3502 3503 return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX)); 3504 } 3505 3506 static int 3507 msk_intr(void *xsc) 3508 { 3509 struct msk_softc *sc; 3510 uint32_t status; 3511 3512 sc = xsc; 3513 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2); 3514 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */ 3515 if (status == 0 || status == 0xffffffff) { 3516 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3517 return (FILTER_STRAY); 3518 } 3519 3520 taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task); 3521 return (FILTER_HANDLED); 3522 } 3523 3524 static void 3525 msk_int_task(void *arg, int pending) 3526 { 3527 struct msk_softc *sc; 3528 struct msk_if_softc *sc_if0, *sc_if1; 3529 struct ifnet *ifp0, *ifp1; 3530 uint32_t status; 3531 int domore; 3532 3533 sc = arg; 3534 MSK_LOCK(sc); 3535 3536 /* Get interrupt source. */ 3537 status = CSR_READ_4(sc, B0_ISRC); 3538 if (status == 0 || status == 0xffffffff || sc->msk_suspended != 0 || 3539 (status & sc->msk_intrmask) == 0) 3540 goto done; 3541 3542 sc_if0 = sc->msk_if[MSK_PORT_A]; 3543 sc_if1 = sc->msk_if[MSK_PORT_B]; 3544 ifp0 = ifp1 = NULL; 3545 if (sc_if0 != NULL) 3546 ifp0 = sc_if0->msk_ifp; 3547 if (sc_if1 != NULL) 3548 ifp1 = sc_if1->msk_ifp; 3549 3550 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL) 3551 msk_intr_phy(sc_if0); 3552 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL) 3553 msk_intr_phy(sc_if1); 3554 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL) 3555 msk_intr_gmac(sc_if0); 3556 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL) 3557 msk_intr_gmac(sc_if1); 3558 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) { 3559 device_printf(sc->msk_dev, "Rx descriptor error\n"); 3560 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2); 3561 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3562 CSR_READ_4(sc, B0_IMSK); 3563 } 3564 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) { 3565 device_printf(sc->msk_dev, "Tx descriptor error\n"); 3566 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2); 3567 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3568 CSR_READ_4(sc, B0_IMSK); 3569 } 3570 if ((status & Y2_IS_HW_ERR) != 0) 3571 msk_intr_hwerr(sc); 3572 3573 domore = msk_handle_events(sc); 3574 if ((status & Y2_IS_STAT_BMU) != 0) 3575 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ); 3576 3577 if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 && 3578 !IFQ_DRV_IS_EMPTY(&ifp0->if_snd)) 3579 taskqueue_enqueue(taskqueue_fast, &sc_if0->msk_tx_task); 3580 if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 && 3581 !IFQ_DRV_IS_EMPTY(&ifp1->if_snd)) 3582 taskqueue_enqueue(taskqueue_fast, &sc_if1->msk_tx_task); 3583 3584 if (domore > 0) { 3585 taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task); 3586 MSK_UNLOCK(sc); 3587 return; 3588 } 3589 done: 3590 MSK_UNLOCK(sc); 3591 3592 /* Reenable interrupts. */ 3593 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3594 } 3595 3596 static void 3597 msk_init(void *xsc) 3598 { 3599 struct msk_if_softc *sc_if = xsc; 3600 3601 MSK_IF_LOCK(sc_if); 3602 msk_init_locked(sc_if); 3603 MSK_IF_UNLOCK(sc_if); 3604 } 3605 3606 static void 3607 msk_init_locked(struct msk_if_softc *sc_if) 3608 { 3609 struct msk_softc *sc; 3610 struct ifnet *ifp; 3611 struct mii_data *mii; 3612 uint16_t eaddr[ETHER_ADDR_LEN / 2]; 3613 uint16_t gmac; 3614 int error, i; 3615 3616 MSK_IF_LOCK_ASSERT(sc_if); 3617 3618 ifp = sc_if->msk_ifp; 3619 sc = sc_if->msk_softc; 3620 mii = device_get_softc(sc_if->msk_miibus); 3621 3622 error = 0; 3623 /* Cancel pending I/O and free all Rx/Tx buffers. */ 3624 msk_stop(sc_if); 3625 3626 sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + 3627 ETHER_VLAN_ENCAP_LEN; 3628 3629 /* 3630 * Initialize GMAC first. 3631 * Without this initialization, Rx MAC did not work as expected 3632 * and Rx MAC garbled status LEs and it resulted in out-of-order 3633 * or duplicated frame delivery which in turn showed very poor 3634 * Rx performance.(I had to write a packet analysis code that 3635 * could be embeded in driver to diagnose this issue.) 3636 * I've spent almost 2 months to fix this issue. If I have had 3637 * datasheet for Yukon II I wouldn't have encountered this. :-( 3638 */ 3639 gmac = GM_GPCR_SPEED_100 | GM_GPCR_SPEED_1000 | GM_GPCR_DUP_FULL; 3640 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 3641 3642 /* Dummy read the Interrupt Source Register. */ 3643 CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); 3644 3645 /* Set MIB Clear Counter Mode. */ 3646 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR); 3647 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR); 3648 /* Read all MIB Counters with Clear Mode set. */ 3649 for (i = 0; i < GM_MIB_CNT_SIZE; i++) 3650 GMAC_READ_2(sc, sc_if->msk_port, GM_MIB_CNT_BASE + 8 * i); 3651 /* Clear MIB Clear Counter Mode. */ 3652 gmac &= ~GM_PAR_MIB_CLR; 3653 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac); 3654 3655 /* Disable FCS. */ 3656 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS); 3657 3658 /* Setup Transmit Control Register. */ 3659 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); 3660 3661 /* Setup Transmit Flow Control Register. */ 3662 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff); 3663 3664 /* Setup Transmit Parameter Register. */ 3665 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM, 3666 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | 3667 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF)); 3668 3669 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) | 3670 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); 3671 3672 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN) 3673 gmac |= GM_SMOD_JUMBO_ENA; 3674 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac); 3675 3676 /* Set station address. */ 3677 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 3678 for (i = 0; i < ETHER_ADDR_LEN /2; i++) 3679 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L + i * 4, 3680 eaddr[i]); 3681 for (i = 0; i < ETHER_ADDR_LEN /2; i++) 3682 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L + i * 4, 3683 eaddr[i]); 3684 3685 /* Disable interrupts for counter overflows. */ 3686 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0); 3687 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0); 3688 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0); 3689 3690 /* Configure Rx MAC FIFO. */ 3691 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); 3692 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR); 3693 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 3694 GMF_OPER_ON | GMF_RX_F_FL_ON); 3695 3696 /* Set promiscuous mode. */ 3697 msk_setpromisc(sc_if); 3698 3699 /* Set multicast filter. */ 3700 msk_setmulti(sc_if); 3701 3702 /* Flush Rx MAC FIFO on any flow control or error. */ 3703 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 3704 GMR_FS_ANY_ERR); 3705 3706 /* Set Rx FIFO flush threshold to 64 bytes. */ 3707 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), 3708 RX_GMF_FL_THR_DEF); 3709 3710 /* Configure Tx MAC FIFO. */ 3711 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); 3712 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR); 3713 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON); 3714 3715 /* Configure hardware VLAN tag insertion/stripping. */ 3716 msk_setvlan(sc_if, ifp); 3717 3718 /* XXX It seems STFW is requried for all cases. */ 3719 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), TX_STFW_ENA); 3720 3721 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 3722 /* Set Rx Pause threshould. */ 3723 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR), 3724 MSK_ECU_LLPP); 3725 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR), 3726 MSK_ECU_ULPP); 3727 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN) { 3728 /* 3729 * Can't sure the following code is needed as Yukon 3730 * Yukon EC Ultra may not support jumbo frames. 3731 * 3732 * Set Tx GMAC FIFO Almost Empty Threshold. 3733 */ 3734 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR), 3735 MSK_ECU_AE_THR); 3736 /* Disable Store & Forward mode for Tx. */ 3737 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3738 TX_STFW_DIS); 3739 } 3740 } 3741 3742 /* 3743 * Disable Force Sync bit and Alloc bit in Tx RAM interface 3744 * arbiter as we don't use Sync Tx queue. 3745 */ 3746 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), 3747 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); 3748 /* Enable the RAM Interface Arbiter. */ 3749 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB); 3750 3751 /* Setup RAM buffer. */ 3752 msk_set_rambuffer(sc_if); 3753 3754 /* Disable Tx sync Queue. */ 3755 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET); 3756 3757 /* Setup Tx Queue Bus Memory Interface. */ 3758 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET); 3759 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT); 3760 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON); 3761 /* Increase IPID when hardware generates IP packets in TSO. */ 3762 if ((ifp->if_hwassist & CSUM_TSO) != 0) 3763 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 3764 BMU_TX_IPIDINCR_ON); 3765 else 3766 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 3767 BMU_TX_IPIDINCR_OFF); 3768 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM); 3769 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U && 3770 sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) { 3771 /* Fix for Yukon-EC Ultra: set BMU FIFO level */ 3772 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL), MSK_ECU_TXFF_LEV); 3773 } 3774 3775 /* Setup Rx Queue Bus Memory Interface. */ 3776 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET); 3777 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT); 3778 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON); 3779 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM); 3780 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U && 3781 sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) { 3782 /* MAC Rx RAM Read is controlled by hardware. */ 3783 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS); 3784 } 3785 3786 msk_set_prefetch(sc, sc_if->msk_txq, 3787 sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1); 3788 msk_init_tx_ring(sc_if); 3789 3790 /* Disable Rx checksum offload and RSS hash. */ 3791 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), 3792 BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH); 3793 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) { 3794 msk_set_prefetch(sc, sc_if->msk_rxq, 3795 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr, 3796 MSK_JUMBO_RX_RING_CNT - 1); 3797 error = msk_init_jumbo_rx_ring(sc_if); 3798 } else { 3799 msk_set_prefetch(sc, sc_if->msk_rxq, 3800 sc_if->msk_rdata.msk_rx_ring_paddr, 3801 MSK_RX_RING_CNT - 1); 3802 error = msk_init_rx_ring(sc_if); 3803 } 3804 if (error != 0) { 3805 device_printf(sc_if->msk_if_dev, 3806 "initialization failed: no memory for Rx buffers\n"); 3807 msk_stop(sc_if); 3808 return; 3809 } 3810 3811 /* Configure interrupt handling. */ 3812 if (sc_if->msk_port == MSK_PORT_A) { 3813 sc->msk_intrmask |= Y2_IS_PORT_A; 3814 sc->msk_intrhwemask |= Y2_HWE_L1_MASK; 3815 } else { 3816 sc->msk_intrmask |= Y2_IS_PORT_B; 3817 sc->msk_intrhwemask |= Y2_HWE_L2_MASK; 3818 } 3819 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); 3820 CSR_READ_4(sc, B0_HWE_IMSK); 3821 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3822 CSR_READ_4(sc, B0_IMSK); 3823 3824 sc_if->msk_link = 0; 3825 mii_mediachg(mii); 3826 3827 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3828 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3829 3830 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); 3831 } 3832 3833 static void 3834 msk_set_rambuffer(struct msk_if_softc *sc_if) 3835 { 3836 struct msk_softc *sc; 3837 int ltpp, utpp; 3838 3839 sc = sc_if->msk_softc; 3840 3841 /* Setup Rx Queue. */ 3842 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR); 3843 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START), 3844 sc->msk_rxqstart[sc_if->msk_port] / 8); 3845 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END), 3846 sc->msk_rxqend[sc_if->msk_port] / 8); 3847 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP), 3848 sc->msk_rxqstart[sc_if->msk_port] / 8); 3849 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP), 3850 sc->msk_rxqstart[sc_if->msk_port] / 8); 3851 3852 utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - 3853 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8; 3854 ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - 3855 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8; 3856 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE) 3857 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8; 3858 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp); 3859 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp); 3860 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */ 3861 3862 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD); 3863 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL)); 3864 3865 /* Setup Tx Queue. */ 3866 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR); 3867 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START), 3868 sc->msk_txqstart[sc_if->msk_port] / 8); 3869 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END), 3870 sc->msk_txqend[sc_if->msk_port] / 8); 3871 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP), 3872 sc->msk_txqstart[sc_if->msk_port] / 8); 3873 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP), 3874 sc->msk_txqstart[sc_if->msk_port] / 8); 3875 /* Enable Store & Forward for Tx side. */ 3876 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD); 3877 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD); 3878 CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL)); 3879 } 3880 3881 static void 3882 msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr, 3883 uint32_t count) 3884 { 3885 3886 /* Reset the prefetch unit. */ 3887 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3888 PREF_UNIT_RST_SET); 3889 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3890 PREF_UNIT_RST_CLR); 3891 /* Set LE base address. */ 3892 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG), 3893 MSK_ADDR_LO(addr)); 3894 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG), 3895 MSK_ADDR_HI(addr)); 3896 /* Set the list last index. */ 3897 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG), 3898 count); 3899 /* Turn on prefetch unit. */ 3900 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3901 PREF_UNIT_OP_ON); 3902 /* Dummy read to ensure write. */ 3903 CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG)); 3904 } 3905 3906 static void 3907 msk_stop(struct msk_if_softc *sc_if) 3908 { 3909 struct msk_softc *sc; 3910 struct msk_txdesc *txd; 3911 struct msk_rxdesc *rxd; 3912 struct msk_rxdesc *jrxd; 3913 struct ifnet *ifp; 3914 uint32_t val; 3915 int i; 3916 3917 MSK_IF_LOCK_ASSERT(sc_if); 3918 sc = sc_if->msk_softc; 3919 ifp = sc_if->msk_ifp; 3920 3921 callout_stop(&sc_if->msk_tick_ch); 3922 sc_if->msk_watchdog_timer = 0; 3923 3924 /* Disable interrupts. */ 3925 if (sc_if->msk_port == MSK_PORT_A) { 3926 sc->msk_intrmask &= ~Y2_IS_PORT_A; 3927 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK; 3928 } else { 3929 sc->msk_intrmask &= ~Y2_IS_PORT_B; 3930 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK; 3931 } 3932 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); 3933 CSR_READ_4(sc, B0_HWE_IMSK); 3934 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3935 CSR_READ_4(sc, B0_IMSK); 3936 3937 /* Disable Tx/Rx MAC. */ 3938 val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 3939 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 3940 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val); 3941 /* Read again to ensure writing. */ 3942 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 3943 3944 /* Stop Tx BMU. */ 3945 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP); 3946 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); 3947 for (i = 0; i < MSK_TIMEOUT; i++) { 3948 if ((val & (BMU_STOP | BMU_IDLE)) == 0) { 3949 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 3950 BMU_STOP); 3951 CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); 3952 } else 3953 break; 3954 DELAY(1); 3955 } 3956 if (i == MSK_TIMEOUT) 3957 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n"); 3958 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), 3959 RB_RST_SET | RB_DIS_OP_MD); 3960 3961 /* Disable all GMAC interrupt. */ 3962 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0); 3963 /* Disable PHY interrupt. */ 3964 if (sc->msk_marvell_phy) 3965 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); 3966 3967 /* Disable the RAM Interface Arbiter. */ 3968 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB); 3969 3970 /* Reset the PCI FIFO of the async Tx queue */ 3971 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 3972 BMU_RST_SET | BMU_FIFO_RST); 3973 3974 /* Reset the Tx prefetch units. */ 3975 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG), 3976 PREF_UNIT_RST_SET); 3977 3978 /* Reset the RAM Buffer async Tx queue. */ 3979 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET); 3980 3981 /* Reset Tx MAC FIFO. */ 3982 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); 3983 /* Set Pause Off. */ 3984 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF); 3985 3986 /* 3987 * The Rx Stop command will not work for Yukon-2 if the BMU does not 3988 * reach the end of packet and since we can't make sure that we have 3989 * incoming data, we must reset the BMU while it is not during a DMA 3990 * transfer. Since it is possible that the Rx path is still active, 3991 * the Rx RAM buffer will be stopped first, so any possible incoming 3992 * data will not trigger a DMA. After the RAM buffer is stopped, the 3993 * BMU is polled until any DMA in progress is ended and only then it 3994 * will be reset. 3995 */ 3996 3997 /* Disable the RAM Buffer receive queue. */ 3998 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD); 3999 for (i = 0; i < MSK_TIMEOUT; i++) { 4000 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) == 4001 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL))) 4002 break; 4003 DELAY(1); 4004 } 4005 if (i == MSK_TIMEOUT) 4006 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n"); 4007 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), 4008 BMU_RST_SET | BMU_FIFO_RST); 4009 /* Reset the Rx prefetch unit. */ 4010 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG), 4011 PREF_UNIT_RST_SET); 4012 /* Reset the RAM Buffer receive queue. */ 4013 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET); 4014 /* Reset Rx MAC FIFO. */ 4015 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); 4016 4017 /* Free Rx and Tx mbufs still in the queues. */ 4018 for (i = 0; i < MSK_RX_RING_CNT; i++) { 4019 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 4020 if (rxd->rx_m != NULL) { 4021 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, 4022 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 4023 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, 4024 rxd->rx_dmamap); 4025 m_freem(rxd->rx_m); 4026 rxd->rx_m = NULL; 4027 } 4028 } 4029 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 4030 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 4031 if (jrxd->rx_m != NULL) { 4032 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, 4033 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 4034 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, 4035 jrxd->rx_dmamap); 4036 m_freem(jrxd->rx_m); 4037 jrxd->rx_m = NULL; 4038 } 4039 } 4040 for (i = 0; i < MSK_TX_RING_CNT; i++) { 4041 txd = &sc_if->msk_cdata.msk_txdesc[i]; 4042 if (txd->tx_m != NULL) { 4043 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, 4044 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 4045 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, 4046 txd->tx_dmamap); 4047 m_freem(txd->tx_m); 4048 txd->tx_m = NULL; 4049 } 4050 } 4051 4052 /* 4053 * Mark the interface down. 4054 */ 4055 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 4056 sc_if->msk_link = 0; 4057 } 4058 4059 static int 4060 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 4061 { 4062 int error, value; 4063 4064 if (!arg1) 4065 return (EINVAL); 4066 value = *(int *)arg1; 4067 error = sysctl_handle_int(oidp, &value, 0, req); 4068 if (error || !req->newptr) 4069 return (error); 4070 if (value < low || value > high) 4071 return (EINVAL); 4072 *(int *)arg1 = value; 4073 4074 return (0); 4075 } 4076 4077 static int 4078 sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS) 4079 { 4080 4081 return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN, 4082 MSK_PROC_MAX)); 4083 } 4084