1 /****************************************************************************** 2 * 3 * Name : sky2.c 4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x 5 * Version: $Revision: 1.23 $ 6 * Date : $Date: 2005/12/22 09:04:11 $ 7 * Purpose: Main driver source file 8 * 9 *****************************************************************************/ 10 11 /****************************************************************************** 12 * 13 * LICENSE: 14 * Copyright (C) Marvell International Ltd. and/or its affiliates 15 * 16 * The computer program files contained in this folder ("Files") 17 * are provided to you under the BSD-type license terms provided 18 * below, and any use of such Files and any derivative works 19 * thereof created by you shall be governed by the following terms 20 * and conditions: 21 * 22 * - Redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer. 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials provided 27 * with the distribution. 28 * - Neither the name of Marvell nor the names of its contributors 29 * may be used to endorse or promote products derived from this 30 * software without specific prior written permission. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 43 * OF THE POSSIBILITY OF SUCH DAMAGE. 44 * /LICENSE 45 * 46 *****************************************************************************/ 47 48 /*- 49 * Copyright (c) 1997, 1998, 1999, 2000 50 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 51 * 52 * Redistribution and use in source and binary forms, with or without 53 * modification, are permitted provided that the following conditions 54 * are met: 55 * 1. Redistributions of source code must retain the above copyright 56 * notice, this list of conditions and the following disclaimer. 57 * 2. Redistributions in binary form must reproduce the above copyright 58 * notice, this list of conditions and the following disclaimer in the 59 * documentation and/or other materials provided with the distribution. 60 * 3. All advertising materials mentioning features or use of this software 61 * must display the following acknowledgement: 62 * This product includes software developed by Bill Paul. 63 * 4. Neither the name of the author nor the names of any co-contributors 64 * may be used to endorse or promote products derived from this software 65 * without specific prior written permission. 66 * 67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 70 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 77 * THE POSSIBILITY OF SUCH DAMAGE. 78 */ 79 /*- 80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 81 * 82 * Permission to use, copy, modify, and distribute this software for any 83 * purpose with or without fee is hereby granted, provided that the above 84 * copyright notice and this permission notice appear in all copies. 85 * 86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 93 */ 94 95 /* 96 * Device driver for the Marvell Yukon II Ethernet controller. 97 * Due to lack of documentation, this driver is based on the code from 98 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x. 99 */ 100 101 #include <sys/cdefs.h> 102 __FBSDID("$FreeBSD$"); 103 104 #include <sys/param.h> 105 #include <sys/systm.h> 106 #include <sys/bus.h> 107 #include <sys/endian.h> 108 #include <sys/mbuf.h> 109 #include <sys/malloc.h> 110 #include <sys/kernel.h> 111 #include <sys/module.h> 112 #include <sys/socket.h> 113 #include <sys/sockio.h> 114 #include <sys/queue.h> 115 #include <sys/sysctl.h> 116 #include <sys/taskqueue.h> 117 118 #include <net/bpf.h> 119 #include <net/ethernet.h> 120 #include <net/if.h> 121 #include <net/if_arp.h> 122 #include <net/if_dl.h> 123 #include <net/if_media.h> 124 #include <net/if_types.h> 125 #include <net/if_vlan_var.h> 126 127 #include <netinet/in.h> 128 #include <netinet/in_systm.h> 129 #include <netinet/ip.h> 130 #include <netinet/tcp.h> 131 #include <netinet/udp.h> 132 133 #include <machine/bus.h> 134 #include <machine/resource.h> 135 #include <sys/rman.h> 136 137 #include <dev/mii/mii.h> 138 #include <dev/mii/miivar.h> 139 #include <dev/mii/brgphyreg.h> 140 141 #include <dev/pci/pcireg.h> 142 #include <dev/pci/pcivar.h> 143 144 #include <dev/msk/if_mskreg.h> 145 146 MODULE_DEPEND(msk, pci, 1, 1, 1); 147 MODULE_DEPEND(msk, ether, 1, 1, 1); 148 MODULE_DEPEND(msk, miibus, 1, 1, 1); 149 150 /* "device miibus" required. See GENERIC if you get errors here. */ 151 #include "miibus_if.h" 152 153 /* Tunables. */ 154 static int msi_disable = 0; 155 TUNABLE_INT("hw.msk.msi_disable", &msi_disable); 156 157 #define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 158 159 /* 160 * Devices supported by this driver. 161 */ 162 static struct msk_product { 163 uint16_t msk_vendorid; 164 uint16_t msk_deviceid; 165 const char *msk_name; 166 } msk_products[] = { 167 { VENDORID_SK, DEVICEID_SK_YUKON2, 168 "SK-9Sxx Gigabit Ethernet" }, 169 { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR, 170 "SK-9Exx Gigabit Ethernet"}, 171 { VENDORID_MARVELL, DEVICEID_MRVL_8021CU, 172 "Marvell Yukon 88E8021CU Gigabit Ethernet" }, 173 { VENDORID_MARVELL, DEVICEID_MRVL_8021X, 174 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" }, 175 { VENDORID_MARVELL, DEVICEID_MRVL_8022CU, 176 "Marvell Yukon 88E8022CU Gigabit Ethernet" }, 177 { VENDORID_MARVELL, DEVICEID_MRVL_8022X, 178 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" }, 179 { VENDORID_MARVELL, DEVICEID_MRVL_8061CU, 180 "Marvell Yukon 88E8061CU Gigabit Ethernet" }, 181 { VENDORID_MARVELL, DEVICEID_MRVL_8061X, 182 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" }, 183 { VENDORID_MARVELL, DEVICEID_MRVL_8062CU, 184 "Marvell Yukon 88E8062CU Gigabit Ethernet" }, 185 { VENDORID_MARVELL, DEVICEID_MRVL_8062X, 186 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" }, 187 { VENDORID_MARVELL, DEVICEID_MRVL_8035, 188 "Marvell Yukon 88E8035 Gigabit Ethernet" }, 189 { VENDORID_MARVELL, DEVICEID_MRVL_8036, 190 "Marvell Yukon 88E8036 Gigabit Ethernet" }, 191 { VENDORID_MARVELL, DEVICEID_MRVL_8038, 192 "Marvell Yukon 88E8038 Gigabit Ethernet" }, 193 { VENDORID_MARVELL, DEVICEID_MRVL_4361, 194 "Marvell Yukon 88E8050 Gigabit Ethernet" }, 195 { VENDORID_MARVELL, DEVICEID_MRVL_4360, 196 "Marvell Yukon 88E8052 Gigabit Ethernet" }, 197 { VENDORID_MARVELL, DEVICEID_MRVL_4362, 198 "Marvell Yukon 88E8053 Gigabit Ethernet" }, 199 { VENDORID_MARVELL, DEVICEID_MRVL_4363, 200 "Marvell Yukon 88E8055 Gigabit Ethernet" }, 201 { VENDORID_MARVELL, DEVICEID_MRVL_4364, 202 "Marvell Yukon 88E8056 Gigabit Ethernet" }, 203 { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX, 204 "D-Link 550SX Gigabit Ethernet" }, 205 { VENDORID_DLINK, DEVICEID_DLINK_DGE560T, 206 "D-Link 560T Gigabit Ethernet" } 207 }; 208 209 static const char *model_name[] = { 210 "Yukon XL", 211 "Yukon EC Ultra", 212 "Yukon Unknown", 213 "Yukon EC", 214 "Yukon FE" 215 }; 216 217 static int mskc_probe(device_t); 218 static int mskc_attach(device_t); 219 static int mskc_detach(device_t); 220 static void mskc_shutdown(device_t); 221 static int mskc_setup_rambuffer(struct msk_softc *); 222 static int mskc_suspend(device_t); 223 static int mskc_resume(device_t); 224 static void mskc_reset(struct msk_softc *); 225 226 static int msk_probe(device_t); 227 static int msk_attach(device_t); 228 static int msk_detach(device_t); 229 230 static void msk_tick(void *); 231 static void msk_intr(void *); 232 static void msk_int_task(void *, int); 233 static void msk_intr_phy(struct msk_if_softc *); 234 static void msk_intr_gmac(struct msk_if_softc *); 235 static __inline void msk_rxput(struct msk_if_softc *); 236 static int msk_handle_events(struct msk_softc *); 237 static void msk_handle_hwerr(struct msk_if_softc *, uint32_t); 238 static void msk_intr_hwerr(struct msk_softc *); 239 static void msk_rxeof(struct msk_if_softc *, uint32_t, int); 240 static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, int); 241 static void msk_txeof(struct msk_if_softc *, int); 242 static struct mbuf *msk_defrag(struct mbuf *, int, int); 243 static int msk_encap(struct msk_if_softc *, struct mbuf **); 244 static void msk_tx_task(void *, int); 245 static void msk_start(struct ifnet *); 246 static int msk_ioctl(struct ifnet *, u_long, caddr_t); 247 static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t); 248 static void msk_set_rambuffer(struct msk_if_softc *); 249 static void msk_init(void *); 250 static void msk_init_locked(struct msk_if_softc *); 251 static void msk_stop(struct msk_if_softc *); 252 static void msk_watchdog(struct msk_if_softc *); 253 static int msk_mediachange(struct ifnet *); 254 static void msk_mediastatus(struct ifnet *, struct ifmediareq *); 255 static void msk_phy_power(struct msk_softc *, int); 256 static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int); 257 static int msk_status_dma_alloc(struct msk_softc *); 258 static void msk_status_dma_free(struct msk_softc *); 259 static int msk_txrx_dma_alloc(struct msk_if_softc *); 260 static void msk_txrx_dma_free(struct msk_if_softc *); 261 static void *msk_jalloc(struct msk_if_softc *); 262 static void msk_jfree(void *, void *); 263 static int msk_init_rx_ring(struct msk_if_softc *); 264 static int msk_init_jumbo_rx_ring(struct msk_if_softc *); 265 static void msk_init_tx_ring(struct msk_if_softc *); 266 static __inline void msk_discard_rxbuf(struct msk_if_softc *, int); 267 static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int); 268 static int msk_newbuf(struct msk_if_softc *, int); 269 static int msk_jumbo_newbuf(struct msk_if_softc *, int); 270 271 static int msk_phy_readreg(struct msk_if_softc *, int, int); 272 static int msk_phy_writereg(struct msk_if_softc *, int, int, int); 273 static int msk_miibus_readreg(device_t, int, int); 274 static int msk_miibus_writereg(device_t, int, int, int); 275 static void msk_miibus_statchg(device_t); 276 static void msk_link_task(void *, int); 277 278 static void msk_setmulti(struct msk_if_softc *); 279 static void msk_setvlan(struct msk_if_softc *, struct ifnet *); 280 static void msk_setpromisc(struct msk_if_softc *); 281 282 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 283 static int sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS); 284 285 static device_method_t mskc_methods[] = { 286 /* Device interface */ 287 DEVMETHOD(device_probe, mskc_probe), 288 DEVMETHOD(device_attach, mskc_attach), 289 DEVMETHOD(device_detach, mskc_detach), 290 DEVMETHOD(device_suspend, mskc_suspend), 291 DEVMETHOD(device_resume, mskc_resume), 292 DEVMETHOD(device_shutdown, mskc_shutdown), 293 294 /* bus interface */ 295 DEVMETHOD(bus_print_child, bus_generic_print_child), 296 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 297 298 { NULL, NULL } 299 }; 300 301 static driver_t mskc_driver = { 302 "mskc", 303 mskc_methods, 304 sizeof(struct msk_softc) 305 }; 306 307 static devclass_t mskc_devclass; 308 309 static device_method_t msk_methods[] = { 310 /* Device interface */ 311 DEVMETHOD(device_probe, msk_probe), 312 DEVMETHOD(device_attach, msk_attach), 313 DEVMETHOD(device_detach, msk_detach), 314 DEVMETHOD(device_shutdown, bus_generic_shutdown), 315 316 /* bus interface */ 317 DEVMETHOD(bus_print_child, bus_generic_print_child), 318 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 319 320 /* MII interface */ 321 DEVMETHOD(miibus_readreg, msk_miibus_readreg), 322 DEVMETHOD(miibus_writereg, msk_miibus_writereg), 323 DEVMETHOD(miibus_statchg, msk_miibus_statchg), 324 325 { NULL, NULL } 326 }; 327 328 static driver_t msk_driver = { 329 "msk", 330 msk_methods, 331 sizeof(struct msk_if_softc) 332 }; 333 334 static devclass_t msk_devclass; 335 336 DRIVER_MODULE(mskc, pci, mskc_driver, mskc_devclass, 0, 0); 337 DRIVER_MODULE(msk, mskc, msk_driver, msk_devclass, 0, 0); 338 DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, 0, 0); 339 340 static struct resource_spec msk_res_spec_io[] = { 341 { SYS_RES_IOPORT, PCIR_BAR(1), RF_ACTIVE }, 342 { -1, 0, 0 } 343 }; 344 345 static struct resource_spec msk_res_spec_mem[] = { 346 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, 347 { -1, 0, 0 } 348 }; 349 350 static struct resource_spec msk_irq_spec_legacy[] = { 351 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 352 { -1, 0, 0 } 353 }; 354 355 static struct resource_spec msk_irq_spec_msi[] = { 356 { SYS_RES_IRQ, 1, RF_ACTIVE }, 357 { SYS_RES_IRQ, 2, RF_ACTIVE }, 358 { -1, 0, 0 } 359 }; 360 361 static int 362 msk_miibus_readreg(device_t dev, int phy, int reg) 363 { 364 struct msk_if_softc *sc_if; 365 366 sc_if = device_get_softc(dev); 367 368 return (msk_phy_readreg(sc_if, phy, reg)); 369 } 370 371 static int 372 msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg) 373 { 374 struct msk_softc *sc; 375 int i, val; 376 377 sc = sc_if->msk_softc; 378 379 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL, 380 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); 381 382 for (i = 0; i < MSK_TIMEOUT; i++) { 383 DELAY(1); 384 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL); 385 if ((val & GM_SMI_CT_RD_VAL) != 0) { 386 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA); 387 break; 388 } 389 } 390 391 if (i == MSK_TIMEOUT) { 392 if_printf(sc_if->msk_ifp, "phy failed to come ready\n"); 393 val = 0; 394 } 395 396 return (val); 397 } 398 399 static int 400 msk_miibus_writereg(device_t dev, int phy, int reg, int val) 401 { 402 struct msk_if_softc *sc_if; 403 404 sc_if = device_get_softc(dev); 405 406 return (msk_phy_writereg(sc_if, phy, reg, val)); 407 } 408 409 static int 410 msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val) 411 { 412 struct msk_softc *sc; 413 int i; 414 415 sc = sc_if->msk_softc; 416 417 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val); 418 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL, 419 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg)); 420 for (i = 0; i < MSK_TIMEOUT; i++) { 421 DELAY(1); 422 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) & 423 GM_SMI_CT_BUSY) == 0) 424 break; 425 } 426 if (i == MSK_TIMEOUT) 427 if_printf(sc_if->msk_ifp, "phy write timeout\n"); 428 429 return (0); 430 } 431 432 static void 433 msk_miibus_statchg(device_t dev) 434 { 435 struct msk_if_softc *sc_if; 436 437 sc_if = device_get_softc(dev); 438 taskqueue_enqueue(taskqueue_swi, &sc_if->msk_link_task); 439 } 440 441 static void 442 msk_link_task(void *arg, int pending) 443 { 444 struct msk_softc *sc; 445 struct msk_if_softc *sc_if; 446 struct mii_data *mii; 447 struct ifnet *ifp; 448 uint32_t gmac, ane; 449 450 sc_if = (struct msk_if_softc *)arg; 451 sc = sc_if->msk_softc; 452 453 MSK_IF_LOCK(sc_if); 454 455 mii = device_get_softc(sc_if->msk_miibus); 456 ifp = sc_if->msk_ifp; 457 if (mii == NULL || ifp == NULL) { 458 MSK_IF_UNLOCK(sc_if); 459 return; 460 } 461 462 if (mii->mii_media_status & IFM_ACTIVE) { 463 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 464 sc_if->msk_link = 1; 465 } else 466 sc_if->msk_link = 0; 467 468 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 469 ane = 0; 470 if (sc_if->msk_link != 0) { 471 /* Enable Tx FIFO Underrun. */ 472 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 473 GM_IS_TX_FF_UR | GM_IS_RX_FF_OR); 474 switch (IFM_SUBTYPE(mii->mii_media_active)) { 475 case IFM_AUTO: 476 ane = 1; 477 break; 478 case IFM_1000_SX: 479 case IFM_1000_T: 480 gmac &= ~GM_GPCR_SPEED_100; 481 gmac |= GM_GPCR_SPEED_1000; 482 break; 483 case IFM_100_TX: 484 gmac |= GM_GPCR_SPEED_100; 485 gmac &= ~GM_GPCR_SPEED_1000; 486 break; 487 case IFM_10_T: 488 gmac &= ~(GM_GPCR_SPEED_100 | GM_GPCR_SPEED_1000); 489 break; 490 } 491 492 if (ane == 0) 493 gmac |= GM_GPCR_AU_ALL_DIS; 494 else 495 gmac &= ~GM_GPCR_AU_ALL_DIS; 496 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0) 497 gmac |= GM_GPCR_DUP_FULL; 498 /* Enable Rx flow control. */ 499 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) != 0) 500 gmac &= ~GM_GPCR_FC_RX_DIS; 501 /* Enable Tx flow control. */ 502 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) != 0) 503 gmac &= ~GM_GPCR_FC_TX_DIS; 504 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; 505 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 506 /* Read again to ensure writing. */ 507 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 508 509 gmac = GMC_PAUSE_ON; 510 if (((mii->mii_media_active & IFM_GMASK) & 511 (IFM_FLAG0 | IFM_FLAG1)) == 0) 512 gmac = GMC_PAUSE_OFF; 513 /* Diable pause for 10/100 Mbps in half-duplex mode. */ 514 if ((((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) && 515 (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX || 516 IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T)) 517 gmac = GMC_PAUSE_OFF; 518 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac); 519 520 /* Enable PHY interrupt for FIFO underrun/overflow. */ 521 if (sc->msk_marvell_phy) 522 msk_phy_writereg(sc_if, PHY_ADDR_MARV, 523 PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR); 524 } else { 525 /* 526 * Link state changed to down. 527 * Disable PHY interrupts. 528 */ 529 if (sc->msk_marvell_phy) 530 msk_phy_writereg(sc_if, PHY_ADDR_MARV, 531 PHY_MARV_INT_MASK, 0); 532 /* Disable Rx/Tx MAC. */ 533 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 534 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 535 /* Read again to ensure writing. */ 536 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 537 } 538 539 MSK_IF_UNLOCK(sc_if); 540 } 541 542 static void 543 msk_setmulti(struct msk_if_softc *sc_if) 544 { 545 struct msk_softc *sc; 546 struct ifnet *ifp; 547 struct ifmultiaddr *ifma; 548 uint32_t mchash[2]; 549 uint32_t crc; 550 uint16_t mode; 551 552 sc = sc_if->msk_softc; 553 554 MSK_IF_LOCK_ASSERT(sc_if); 555 556 ifp = sc_if->msk_ifp; 557 558 bzero(mchash, sizeof(mchash)); 559 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL); 560 mode |= GM_RXCR_UCF_ENA; 561 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 562 if ((ifp->if_flags & IFF_PROMISC) != 0) 563 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 564 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 565 mchash[0] = 0xffff; 566 mchash[1] = 0xffff; 567 } 568 } else { 569 IF_ADDR_LOCK(ifp); 570 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 571 if (ifma->ifma_addr->sa_family != AF_LINK) 572 continue; 573 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 574 ifma->ifma_addr), ETHER_ADDR_LEN); 575 /* Just want the 6 least significant bits. */ 576 crc &= 0x3f; 577 /* Set the corresponding bit in the hash table. */ 578 mchash[crc >> 5] |= 1 << (crc & 0x1f); 579 } 580 IF_ADDR_UNLOCK(ifp); 581 mode |= GM_RXCR_MCF_ENA; 582 } 583 584 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1, 585 mchash[0] & 0xffff); 586 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2, 587 (mchash[0] >> 16) & 0xffff); 588 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3, 589 mchash[1] & 0xffff); 590 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4, 591 (mchash[1] >> 16) & 0xffff); 592 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode); 593 } 594 595 static void 596 msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp) 597 { 598 struct msk_softc *sc; 599 600 sc = sc_if->msk_softc; 601 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 602 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 603 RX_VLAN_STRIP_ON); 604 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 605 TX_VLAN_TAG_ON); 606 } else { 607 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 608 RX_VLAN_STRIP_OFF); 609 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 610 TX_VLAN_TAG_OFF); 611 } 612 } 613 614 static void 615 msk_setpromisc(struct msk_if_softc *sc_if) 616 { 617 struct msk_softc *sc; 618 struct ifnet *ifp; 619 uint16_t mode; 620 621 MSK_IF_LOCK_ASSERT(sc_if); 622 623 sc = sc_if->msk_softc; 624 ifp = sc_if->msk_ifp; 625 626 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL); 627 if (ifp->if_flags & IFF_PROMISC) 628 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 629 else 630 mode |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 631 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode); 632 } 633 634 static int 635 msk_init_rx_ring(struct msk_if_softc *sc_if) 636 { 637 struct msk_ring_data *rd; 638 struct msk_rxdesc *rxd; 639 int i, prod; 640 641 MSK_IF_LOCK_ASSERT(sc_if); 642 643 sc_if->msk_cdata.msk_rx_cons = 0; 644 sc_if->msk_cdata.msk_rx_prod = 0; 645 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; 646 647 rd = &sc_if->msk_rdata; 648 bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT); 649 prod = sc_if->msk_cdata.msk_rx_prod; 650 for (i = 0; i < MSK_RX_RING_CNT; i++) { 651 rxd = &sc_if->msk_cdata.msk_rxdesc[prod]; 652 rxd->rx_m = NULL; 653 rxd->rx_le = &rd->msk_rx_ring[prod]; 654 if (msk_newbuf(sc_if, prod) != 0) 655 return (ENOBUFS); 656 MSK_INC(prod, MSK_RX_RING_CNT); 657 } 658 659 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag, 660 sc_if->msk_cdata.msk_rx_ring_map, 661 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 662 663 /* Update prefetch unit. */ 664 sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1; 665 CSR_WRITE_2(sc_if->msk_softc, 666 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 667 sc_if->msk_cdata.msk_rx_prod); 668 669 return (0); 670 } 671 672 static int 673 msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if) 674 { 675 struct msk_ring_data *rd; 676 struct msk_rxdesc *rxd; 677 int i, prod; 678 679 MSK_IF_LOCK_ASSERT(sc_if); 680 681 sc_if->msk_cdata.msk_rx_cons = 0; 682 sc_if->msk_cdata.msk_rx_prod = 0; 683 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; 684 685 rd = &sc_if->msk_rdata; 686 bzero(rd->msk_jumbo_rx_ring, 687 sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT); 688 prod = sc_if->msk_cdata.msk_rx_prod; 689 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 690 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod]; 691 rxd->rx_m = NULL; 692 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod]; 693 if (msk_jumbo_newbuf(sc_if, prod) != 0) 694 return (ENOBUFS); 695 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT); 696 } 697 698 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 699 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 700 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 701 702 sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1; 703 CSR_WRITE_2(sc_if->msk_softc, 704 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 705 sc_if->msk_cdata.msk_rx_prod); 706 707 return (0); 708 } 709 710 static void 711 msk_init_tx_ring(struct msk_if_softc *sc_if) 712 { 713 struct msk_ring_data *rd; 714 struct msk_txdesc *txd; 715 int i; 716 717 sc_if->msk_cdata.msk_tso_mtu = 0; 718 sc_if->msk_cdata.msk_tx_prod = 0; 719 sc_if->msk_cdata.msk_tx_cons = 0; 720 sc_if->msk_cdata.msk_tx_cnt = 0; 721 722 rd = &sc_if->msk_rdata; 723 bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT); 724 for (i = 0; i < MSK_TX_RING_CNT; i++) { 725 txd = &sc_if->msk_cdata.msk_txdesc[i]; 726 txd->tx_m = NULL; 727 txd->tx_le = &rd->msk_tx_ring[i]; 728 } 729 730 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 731 sc_if->msk_cdata.msk_tx_ring_map, 732 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 733 } 734 735 static __inline void 736 msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx) 737 { 738 struct msk_rx_desc *rx_le; 739 struct msk_rxdesc *rxd; 740 struct mbuf *m; 741 742 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 743 m = rxd->rx_m; 744 rx_le = rxd->rx_le; 745 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); 746 } 747 748 static __inline void 749 msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx) 750 { 751 struct msk_rx_desc *rx_le; 752 struct msk_rxdesc *rxd; 753 struct mbuf *m; 754 755 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 756 m = rxd->rx_m; 757 rx_le = rxd->rx_le; 758 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); 759 } 760 761 static int 762 msk_newbuf(struct msk_if_softc *sc_if, int idx) 763 { 764 struct msk_rx_desc *rx_le; 765 struct msk_rxdesc *rxd; 766 struct mbuf *m; 767 bus_dma_segment_t segs[1]; 768 bus_dmamap_t map; 769 int nsegs; 770 771 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 772 if (m == NULL) 773 return (ENOBUFS); 774 775 m->m_len = m->m_pkthdr.len = MCLBYTES; 776 m_adj(m, ETHER_ALIGN); 777 778 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_rx_tag, 779 sc_if->msk_cdata.msk_rx_sparemap, m, segs, &nsegs, 780 BUS_DMA_NOWAIT) != 0) { 781 m_freem(m); 782 return (ENOBUFS); 783 } 784 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 785 786 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 787 if (rxd->rx_m != NULL) { 788 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap, 789 BUS_DMASYNC_POSTREAD); 790 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap); 791 } 792 map = rxd->rx_dmamap; 793 rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap; 794 sc_if->msk_cdata.msk_rx_sparemap = map; 795 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap, 796 BUS_DMASYNC_PREREAD); 797 rxd->rx_m = m; 798 rx_le = rxd->rx_le; 799 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr)); 800 rx_le->msk_control = 801 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER); 802 803 return (0); 804 } 805 806 static int 807 msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx) 808 { 809 struct msk_rx_desc *rx_le; 810 struct msk_rxdesc *rxd; 811 struct mbuf *m; 812 bus_dma_segment_t segs[1]; 813 bus_dmamap_t map; 814 int nsegs; 815 void *buf; 816 817 MGETHDR(m, M_DONTWAIT, MT_DATA); 818 if (m == NULL) 819 return (ENOBUFS); 820 buf = msk_jalloc(sc_if); 821 if (buf == NULL) { 822 m_freem(m); 823 return (ENOBUFS); 824 } 825 /* Attach the buffer to the mbuf. */ 826 MEXTADD(m, buf, MSK_JLEN, msk_jfree, (struct msk_if_softc *)sc_if, 0, 827 EXT_NET_DRV); 828 if ((m->m_flags & M_EXT) == 0) { 829 m_freem(m); 830 return (ENOBUFS); 831 } 832 m->m_pkthdr.len = m->m_len = MSK_JLEN; 833 m_adj(m, ETHER_ALIGN); 834 835 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag, 836 sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs, 837 BUS_DMA_NOWAIT) != 0) { 838 m_freem(m); 839 return (ENOBUFS); 840 } 841 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 842 843 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 844 if (rxd->rx_m != NULL) { 845 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, 846 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 847 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, 848 rxd->rx_dmamap); 849 } 850 map = rxd->rx_dmamap; 851 rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap; 852 sc_if->msk_cdata.msk_jumbo_rx_sparemap = map; 853 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap, 854 BUS_DMASYNC_PREREAD); 855 rxd->rx_m = m; 856 rx_le = rxd->rx_le; 857 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr)); 858 rx_le->msk_control = 859 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER); 860 861 return (0); 862 } 863 864 /* 865 * Set media options. 866 */ 867 static int 868 msk_mediachange(struct ifnet *ifp) 869 { 870 struct msk_if_softc *sc_if; 871 struct mii_data *mii; 872 873 sc_if = ifp->if_softc; 874 875 MSK_IF_LOCK(sc_if); 876 mii = device_get_softc(sc_if->msk_miibus); 877 mii_mediachg(mii); 878 MSK_IF_UNLOCK(sc_if); 879 880 return (0); 881 } 882 883 /* 884 * Report current media status. 885 */ 886 static void 887 msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 888 { 889 struct msk_if_softc *sc_if; 890 struct mii_data *mii; 891 892 sc_if = ifp->if_softc; 893 MSK_IF_LOCK(sc_if); 894 mii = device_get_softc(sc_if->msk_miibus); 895 896 mii_pollstat(mii); 897 MSK_IF_UNLOCK(sc_if); 898 ifmr->ifm_active = mii->mii_media_active; 899 ifmr->ifm_status = mii->mii_media_status; 900 } 901 902 static int 903 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 904 { 905 struct msk_if_softc *sc_if; 906 struct ifreq *ifr; 907 struct mii_data *mii; 908 int error, mask; 909 910 sc_if = ifp->if_softc; 911 ifr = (struct ifreq *)data; 912 error = 0; 913 914 switch(command) { 915 case SIOCSIFMTU: 916 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN) { 917 error = EINVAL; 918 break; 919 } 920 if (sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U && 921 ifr->ifr_mtu > MSK_MAX_FRAMELEN) { 922 error = EINVAL; 923 break; 924 } 925 MSK_IF_LOCK(sc_if); 926 ifp->if_mtu = ifr->ifr_mtu; 927 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 928 msk_init_locked(sc_if); 929 MSK_IF_UNLOCK(sc_if); 930 break; 931 case SIOCSIFFLAGS: 932 MSK_IF_LOCK(sc_if); 933 if ((ifp->if_flags & IFF_UP) != 0) { 934 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 935 if (((ifp->if_flags ^ sc_if->msk_if_flags) 936 & IFF_PROMISC) != 0) { 937 msk_setpromisc(sc_if); 938 msk_setmulti(sc_if); 939 } 940 } else { 941 if (sc_if->msk_detach == 0) 942 msk_init_locked(sc_if); 943 } 944 } else { 945 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 946 msk_stop(sc_if); 947 } 948 sc_if->msk_if_flags = ifp->if_flags; 949 MSK_IF_UNLOCK(sc_if); 950 break; 951 case SIOCADDMULTI: 952 case SIOCDELMULTI: 953 MSK_IF_LOCK(sc_if); 954 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 955 msk_setmulti(sc_if); 956 MSK_IF_UNLOCK(sc_if); 957 break; 958 case SIOCGIFMEDIA: 959 case SIOCSIFMEDIA: 960 mii = device_get_softc(sc_if->msk_miibus); 961 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 962 break; 963 case SIOCSIFCAP: 964 MSK_IF_LOCK(sc_if); 965 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 966 if ((mask & IFCAP_TXCSUM) != 0) { 967 ifp->if_capenable ^= IFCAP_TXCSUM; 968 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 && 969 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) 970 ifp->if_hwassist |= MSK_CSUM_FEATURES; 971 else 972 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 973 } 974 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) { 975 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 976 msk_setvlan(sc_if, ifp); 977 } 978 979 if ((mask & IFCAP_TSO4) != 0) { 980 ifp->if_capenable ^= IFCAP_TSO4; 981 if ((IFCAP_TSO4 & ifp->if_capenable) != 0 && 982 (IFCAP_TSO4 & ifp->if_capabilities) != 0) 983 ifp->if_hwassist |= CSUM_TSO; 984 else 985 ifp->if_hwassist &= ~CSUM_TSO; 986 } 987 VLAN_CAPABILITIES(ifp); 988 MSK_IF_UNLOCK(sc_if); 989 break; 990 default: 991 error = ether_ioctl(ifp, command, data); 992 break; 993 } 994 995 return (error); 996 } 997 998 static int 999 mskc_probe(device_t dev) 1000 { 1001 struct msk_product *mp; 1002 uint16_t vendor, devid; 1003 int i; 1004 1005 vendor = pci_get_vendor(dev); 1006 devid = pci_get_device(dev); 1007 mp = msk_products; 1008 for (i = 0; i < sizeof(msk_products)/sizeof(msk_products[0]); 1009 i++, mp++) { 1010 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) { 1011 device_set_desc(dev, mp->msk_name); 1012 return (BUS_PROBE_DEFAULT); 1013 } 1014 } 1015 1016 return (ENXIO); 1017 } 1018 1019 static int 1020 mskc_setup_rambuffer(struct msk_softc *sc) 1021 { 1022 int totqsize, minqsize; 1023 int avail, next; 1024 int i; 1025 uint8_t val; 1026 1027 /* Get adapter SRAM size. */ 1028 val = CSR_READ_1(sc, B2_E_0); 1029 sc->msk_ramsize = (val == 0) ? 128 : val * 4; 1030 if (sc->msk_hw_id == CHIP_ID_YUKON_FE) 1031 sc->msk_ramsize = 4 * 4; 1032 if (bootverbose) 1033 device_printf(sc->msk_dev, 1034 "RAM buffer size : %dKB\n", sc->msk_ramsize); 1035 1036 totqsize = sc->msk_ramsize * sc->msk_num_port; 1037 minqsize = MSK_MIN_RXQ_SIZE + MSK_MIN_TXQ_SIZE; 1038 if (minqsize > sc->msk_ramsize) 1039 minqsize = sc->msk_ramsize; 1040 1041 if (minqsize * sc->msk_num_port > totqsize) { 1042 device_printf(sc->msk_dev, 1043 "not enough RAM buffer memory : %d/%dKB\n", 1044 minqsize * sc->msk_num_port, totqsize); 1045 return (ENOSPC); 1046 } 1047 1048 avail = totqsize; 1049 if (sc->msk_num_port > 1) { 1050 /* 1051 * Divide up the memory evenly so that everyone gets a 1052 * fair share for dual port adapters. 1053 */ 1054 avail = sc->msk_ramsize; 1055 } 1056 1057 /* Take away the minimum memory for active queues. */ 1058 avail -= minqsize; 1059 /* Rx queue gets the minimum + 80% of the rest. */ 1060 sc->msk_rxqsize = 1061 (avail * MSK_RAM_QUOTA_RX) / 100 + MSK_MIN_RXQ_SIZE; 1062 avail -= (sc->msk_rxqsize - MSK_MIN_RXQ_SIZE); 1063 sc->msk_txqsize = avail + MSK_MIN_TXQ_SIZE; 1064 1065 for (i = 0, next = 0; i < sc->msk_num_port; i++) { 1066 sc->msk_rxqstart[i] = next; 1067 sc->msk_rxqend[i] = next + (sc->msk_rxqsize * 1024) - 1; 1068 next = sc->msk_rxqend[i] + 1; 1069 sc->msk_txqstart[i] = next; 1070 sc->msk_txqend[i] = next + (sc->msk_txqsize * 1024) - 1; 1071 next = sc->msk_txqend[i] + 1; 1072 if (bootverbose) { 1073 device_printf(sc->msk_dev, 1074 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i, 1075 sc->msk_rxqsize, sc->msk_rxqstart[i], 1076 sc->msk_rxqend[i]); 1077 device_printf(sc->msk_dev, 1078 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i, 1079 sc->msk_txqsize, sc->msk_txqstart[i], 1080 sc->msk_txqend[i]); 1081 } 1082 } 1083 1084 return (0); 1085 } 1086 1087 static void 1088 msk_phy_power(struct msk_softc *sc, int mode) 1089 { 1090 uint32_t val; 1091 int i; 1092 1093 switch (mode) { 1094 case MSK_PHY_POWERUP: 1095 /* Switch power to VCC (WA for VAUX problem). */ 1096 CSR_WRITE_1(sc, B0_POWER_CTRL, 1097 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); 1098 /* Disable Core Clock Division, set Clock Select to 0. */ 1099 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS); 1100 1101 val = 0; 1102 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1103 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1104 /* Enable bits are inverted. */ 1105 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1106 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1107 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 1108 } 1109 /* 1110 * Enable PCI & Core Clock, enable clock gating for both Links. 1111 */ 1112 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); 1113 1114 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1115 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); 1116 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1117 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1118 /* Deassert Low Power for 1st PHY. */ 1119 val |= PCI_Y2_PHY1_COMA; 1120 if (sc->msk_num_port > 1) 1121 val |= PCI_Y2_PHY2_COMA; 1122 } else if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 1123 uint32_t our; 1124 1125 CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON); 1126 1127 /* Enable all clocks. */ 1128 pci_write_config(sc->msk_dev, PCI_OUR_REG_3, 0, 4); 1129 our = pci_read_config(sc->msk_dev, PCI_OUR_REG_4, 4); 1130 our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN| 1131 PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST); 1132 /* Set all bits to 0 except bits 15..12. */ 1133 pci_write_config(sc->msk_dev, PCI_OUR_REG_4, our, 4); 1134 /* Set to default value. */ 1135 pci_write_config(sc->msk_dev, PCI_OUR_REG_5, 0, 4); 1136 } 1137 /* Release PHY from PowerDown/COMA mode. */ 1138 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1139 for (i = 0; i < sc->msk_num_port; i++) { 1140 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), 1141 GMLC_RST_SET); 1142 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), 1143 GMLC_RST_CLR); 1144 } 1145 break; 1146 case MSK_PHY_POWERDOWN: 1147 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1148 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD; 1149 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1150 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1151 val &= ~PCI_Y2_PHY1_COMA; 1152 if (sc->msk_num_port > 1) 1153 val &= ~PCI_Y2_PHY2_COMA; 1154 } 1155 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1156 1157 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1158 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1159 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 1160 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1161 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1162 /* Enable bits are inverted. */ 1163 val = 0; 1164 } 1165 /* 1166 * Disable PCI & Core Clock, disable clock gating for 1167 * both Links. 1168 */ 1169 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); 1170 CSR_WRITE_1(sc, B0_POWER_CTRL, 1171 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); 1172 break; 1173 default: 1174 break; 1175 } 1176 } 1177 1178 static void 1179 mskc_reset(struct msk_softc *sc) 1180 { 1181 bus_addr_t addr; 1182 uint16_t status; 1183 uint32_t val; 1184 int i; 1185 1186 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1187 1188 /* Disable ASF. */ 1189 if (sc->msk_hw_id < CHIP_ID_YUKON_XL) { 1190 CSR_WRITE_4(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); 1191 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE); 1192 } 1193 /* 1194 * Since we disabled ASF, S/W reset is required for Power Management. 1195 */ 1196 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1197 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1198 1199 /* Clear all error bits in the PCI status register. */ 1200 status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); 1201 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1202 1203 pci_write_config(sc->msk_dev, PCIR_STATUS, status | 1204 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | 1205 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2); 1206 CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR); 1207 1208 switch (sc->msk_bustype) { 1209 case MSK_PEX_BUS: 1210 /* Clear all PEX errors. */ 1211 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); 1212 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); 1213 if ((val & PEX_RX_OV) != 0) { 1214 sc->msk_intrmask &= ~Y2_IS_HW_ERR; 1215 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; 1216 } 1217 break; 1218 case MSK_PCI_BUS: 1219 case MSK_PCIX_BUS: 1220 /* Set Cache Line Size to 2(8bytes) if configured to 0. */ 1221 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1); 1222 if (val == 0) 1223 pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1); 1224 if (sc->msk_bustype == MSK_PCIX_BUS) { 1225 /* Set Cache Line Size opt. */ 1226 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4); 1227 val |= PCI_CLS_OPT; 1228 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4); 1229 } 1230 break; 1231 } 1232 /* Set PHY power state. */ 1233 msk_phy_power(sc, MSK_PHY_POWERUP); 1234 1235 /* Reset GPHY/GMAC Control */ 1236 for (i = 0; i < sc->msk_num_port; i++) { 1237 /* GPHY Control reset. */ 1238 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET); 1239 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR); 1240 /* GMAC Control reset. */ 1241 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET); 1242 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR); 1243 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF); 1244 } 1245 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1246 1247 /* LED On. */ 1248 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON); 1249 1250 /* Clear TWSI IRQ. */ 1251 CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ); 1252 1253 /* Turn off hardware timer. */ 1254 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP); 1255 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ); 1256 1257 /* Turn off descriptor polling. */ 1258 CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP); 1259 1260 /* Turn off time stamps. */ 1261 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP); 1262 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 1263 1264 /* Configure timeout values. */ 1265 for (i = 0; i < sc->msk_num_port; i++) { 1266 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET); 1267 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR); 1268 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1), 1269 MSK_RI_TO_53); 1270 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1), 1271 MSK_RI_TO_53); 1272 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1), 1273 MSK_RI_TO_53); 1274 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1), 1275 MSK_RI_TO_53); 1276 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1), 1277 MSK_RI_TO_53); 1278 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1), 1279 MSK_RI_TO_53); 1280 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2), 1281 MSK_RI_TO_53); 1282 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2), 1283 MSK_RI_TO_53); 1284 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2), 1285 MSK_RI_TO_53); 1286 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2), 1287 MSK_RI_TO_53); 1288 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2), 1289 MSK_RI_TO_53); 1290 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2), 1291 MSK_RI_TO_53); 1292 } 1293 1294 /* Disable all interrupts. */ 1295 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 1296 CSR_READ_4(sc, B0_HWE_IMSK); 1297 CSR_WRITE_4(sc, B0_IMSK, 0); 1298 CSR_READ_4(sc, B0_IMSK); 1299 1300 /* 1301 * On dual port PCI-X card, there is an problem where status 1302 * can be received out of order due to split transactions. 1303 */ 1304 if (sc->msk_bustype == MSK_PCIX_BUS && sc->msk_num_port > 1) { 1305 int pcix; 1306 uint16_t pcix_cmd; 1307 1308 if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, &pcix) == 0) { 1309 pcix_cmd = pci_read_config(sc->msk_dev, pcix + 2, 2); 1310 /* Clear Max Outstanding Split Transactions. */ 1311 pcix_cmd &= ~0x70; 1312 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1313 pci_write_config(sc->msk_dev, pcix + 2, pcix_cmd, 2); 1314 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1315 } 1316 } 1317 if (sc->msk_bustype == MSK_PEX_BUS) { 1318 uint16_t v, width; 1319 1320 v = pci_read_config(sc->msk_dev, PEX_DEV_CTRL, 2); 1321 /* Change Max. Read Request Size to 4096 bytes. */ 1322 v &= ~PEX_DC_MAX_RRS_MSK; 1323 v |= PEX_DC_MAX_RD_RQ_SIZE(5); 1324 pci_write_config(sc->msk_dev, PEX_DEV_CTRL, v, 2); 1325 width = pci_read_config(sc->msk_dev, PEX_LNK_STAT, 2); 1326 width = (width & PEX_LS_LINK_WI_MSK) >> 4; 1327 v = pci_read_config(sc->msk_dev, PEX_LNK_CAP, 2); 1328 v = (v & PEX_LS_LINK_WI_MSK) >> 4; 1329 if (v != width) 1330 device_printf(sc->msk_dev, 1331 "negotiated width of link(x%d) != " 1332 "max. width of link(x%d)\n", width, v); 1333 } 1334 1335 /* Clear status list. */ 1336 bzero(sc->msk_stat_ring, 1337 sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT); 1338 sc->msk_stat_cons = 0; 1339 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map, 1340 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1341 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET); 1342 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR); 1343 /* Set the status list base address. */ 1344 addr = sc->msk_stat_ring_paddr; 1345 CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr)); 1346 CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr)); 1347 /* Set the status list last index. */ 1348 CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1); 1349 if (HW_FEATURE(sc, HWF_WA_DEV_43_418)) { 1350 /* WA for dev. #4.3 */ 1351 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK); 1352 /* WA for dev. #4.18 */ 1353 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21); 1354 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07); 1355 } else { 1356 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a); 1357 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10); 1358 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 1359 HW_FEATURE(sc, HWF_WA_DEV_4109) ? 0x10 : 0x04); 1360 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190); 1361 } 1362 /* 1363 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI. 1364 */ 1365 CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000)); 1366 1367 /* Enable status unit. */ 1368 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON); 1369 1370 CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START); 1371 CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START); 1372 CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START); 1373 } 1374 1375 static int 1376 msk_probe(device_t dev) 1377 { 1378 struct msk_softc *sc; 1379 char desc[100]; 1380 1381 sc = device_get_softc(device_get_parent(dev)); 1382 /* 1383 * Not much to do here. We always know there will be 1384 * at least one GMAC present, and if there are two, 1385 * mskc_attach() will create a second device instance 1386 * for us. 1387 */ 1388 snprintf(desc, sizeof(desc), 1389 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x", 1390 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id, 1391 sc->msk_hw_rev); 1392 device_set_desc_copy(dev, desc); 1393 1394 return (BUS_PROBE_DEFAULT); 1395 } 1396 1397 static int 1398 msk_attach(device_t dev) 1399 { 1400 struct msk_softc *sc; 1401 struct msk_if_softc *sc_if; 1402 struct ifnet *ifp; 1403 int i, port, error; 1404 uint8_t eaddr[6]; 1405 1406 if (dev == NULL) 1407 return (EINVAL); 1408 1409 error = 0; 1410 sc_if = device_get_softc(dev); 1411 sc = device_get_softc(device_get_parent(dev)); 1412 port = *(int *)device_get_ivars(dev); 1413 1414 sc_if->msk_if_dev = dev; 1415 sc_if->msk_port = port; 1416 sc_if->msk_softc = sc; 1417 sc->msk_if[port] = sc_if; 1418 /* Setup Tx/Rx queue register offsets. */ 1419 if (port == MSK_PORT_A) { 1420 sc_if->msk_txq = Q_XA1; 1421 sc_if->msk_txsq = Q_XS1; 1422 sc_if->msk_rxq = Q_R1; 1423 } else { 1424 sc_if->msk_txq = Q_XA2; 1425 sc_if->msk_txsq = Q_XS2; 1426 sc_if->msk_rxq = Q_R2; 1427 } 1428 1429 callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0); 1430 TASK_INIT(&sc_if->msk_link_task, 0, msk_link_task, sc_if); 1431 1432 if ((error = msk_txrx_dma_alloc(sc_if) != 0)) 1433 goto fail; 1434 1435 ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER); 1436 if (ifp == NULL) { 1437 device_printf(sc_if->msk_if_dev, "can not if_alloc()\n"); 1438 error = ENOSPC; 1439 goto fail; 1440 } 1441 ifp->if_softc = sc_if; 1442 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1443 ifp->if_mtu = ETHERMTU; 1444 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1445 /* 1446 * IFCAP_RXCSUM capability is intentionally disabled as the hardware 1447 * has serious bug in Rx checksum offload for all Yukon II family 1448 * hardware. It seems there is a workaround to make it work somtimes. 1449 * However, the workaround also have to check OP code sequences to 1450 * verify whether the OP code is correct. Sometimes it should compute 1451 * IP/TCP/UDP checksum in driver in order to verify correctness of 1452 * checksum computed by hardware. If you have to compute checksum 1453 * with software to verify the hardware's checksum why have hardware 1454 * compute the checksum? I think there is no reason to spend time to 1455 * make Rx checksum offload work on Yukon II hardware. 1456 */ 1457 ifp->if_capabilities = IFCAP_TXCSUM; 1458 ifp->if_hwassist = MSK_CSUM_FEATURES | CSUM_TSO; 1459 if (sc->msk_hw_id != CHIP_ID_YUKON_EC_U) { 1460 /* It seems Yukon EC Ultra doesn't support TSO. */ 1461 ifp->if_capabilities |= IFCAP_TSO4; 1462 ifp->if_hwassist |= CSUM_TSO; 1463 } 1464 ifp->if_capenable = ifp->if_capabilities; 1465 ifp->if_ioctl = msk_ioctl; 1466 ifp->if_start = msk_start; 1467 ifp->if_timer = 0; 1468 ifp->if_watchdog = NULL; 1469 ifp->if_init = msk_init; 1470 IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1); 1471 ifp->if_snd.ifq_drv_maxlen = MSK_TX_RING_CNT - 1; 1472 IFQ_SET_READY(&ifp->if_snd); 1473 1474 TASK_INIT(&sc_if->msk_tx_task, 1, msk_tx_task, ifp); 1475 1476 /* 1477 * Get station address for this interface. Note that 1478 * dual port cards actually come with three station 1479 * addresses: one for each port, plus an extra. The 1480 * extra one is used by the SysKonnect driver software 1481 * as a 'virtual' station address for when both ports 1482 * are operating in failover mode. Currently we don't 1483 * use this extra address. 1484 */ 1485 MSK_IF_LOCK(sc_if); 1486 for (i = 0; i < ETHER_ADDR_LEN; i++) 1487 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i); 1488 1489 /* 1490 * Call MI attach routine. Can't hold locks when calling into ether_*. 1491 */ 1492 MSK_IF_UNLOCK(sc_if); 1493 ether_ifattach(ifp, eaddr); 1494 MSK_IF_LOCK(sc_if); 1495 1496 /* VLAN capability setup */ 1497 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 1498 if (ifp->if_capabilities & IFCAP_HWCSUM) 1499 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 1500 ifp->if_capenable = ifp->if_capabilities; 1501 1502 /* 1503 * Tell the upper layer(s) we support long frames. 1504 * Must appear after the call to ether_ifattach() because 1505 * ether_ifattach() sets ifi_hdrlen to the default value. 1506 */ 1507 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1508 1509 /* 1510 * Do miibus setup. 1511 */ 1512 MSK_IF_UNLOCK(sc_if); 1513 error = mii_phy_probe(dev, &sc_if->msk_miibus, msk_mediachange, 1514 msk_mediastatus); 1515 if (error != 0) { 1516 device_printf(sc_if->msk_if_dev, "no PHY found!\n"); 1517 ether_ifdetach(ifp); 1518 error = ENXIO; 1519 goto fail; 1520 } 1521 /* Check whether PHY Id is MARVELL. */ 1522 if (msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_ID0) 1523 == PHY_MARV_ID0_VAL) 1524 sc->msk_marvell_phy = 1; 1525 1526 fail: 1527 if (error != 0) { 1528 /* Access should be ok even though lock has been dropped */ 1529 sc->msk_if[port] = NULL; 1530 msk_detach(dev); 1531 } 1532 1533 return (error); 1534 } 1535 1536 /* 1537 * Attach the interface. Allocate softc structures, do ifmedia 1538 * setup and ethernet/BPF attach. 1539 */ 1540 static int 1541 mskc_attach(device_t dev) 1542 { 1543 struct msk_softc *sc; 1544 int error, msic, *port, reg; 1545 1546 sc = device_get_softc(dev); 1547 sc->msk_dev = dev; 1548 mtx_init(&sc->msk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1549 MTX_DEF); 1550 1551 /* 1552 * Map control/status registers. 1553 */ 1554 pci_enable_busmaster(dev); 1555 1556 /* Allocate I/O resource */ 1557 #ifdef MSK_USEIOSPACE 1558 sc->msk_res_spec = msk_res_spec_io; 1559 #else 1560 sc->msk_res_spec = msk_res_spec_mem; 1561 #endif 1562 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res); 1563 if (error) { 1564 if (sc->msk_res_spec == msk_res_spec_mem) 1565 sc->msk_res_spec = msk_res_spec_io; 1566 else 1567 sc->msk_res_spec = msk_res_spec_mem; 1568 error = bus_alloc_resources(dev, sc->msk_res_spec, sc->msk_res); 1569 if (error) { 1570 device_printf(dev, "couldn't allocate %s resources\n", 1571 sc->msk_res_spec == msk_res_spec_mem ? "memory" : 1572 "I/O"); 1573 mtx_destroy(&sc->msk_mtx); 1574 return (ENXIO); 1575 } 1576 } 1577 1578 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1579 sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID); 1580 sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f; 1581 /* Bail out if chip is not recognized. */ 1582 if (sc->msk_hw_id < CHIP_ID_YUKON_XL || 1583 sc->msk_hw_id > CHIP_ID_YUKON_FE) { 1584 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n", 1585 sc->msk_hw_id, sc->msk_hw_rev); 1586 error = ENXIO; 1587 goto fail; 1588 } 1589 1590 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 1591 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1592 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, 1593 &sc->msk_process_limit, 0, sysctl_hw_msk_proc_limit, "I", 1594 "max number of Rx events to process"); 1595 1596 sc->msk_process_limit = MSK_PROC_DEFAULT; 1597 error = resource_int_value(device_get_name(dev), device_get_unit(dev), 1598 "process_limit", &sc->msk_process_limit); 1599 if (error == 0) { 1600 if (sc->msk_process_limit < MSK_PROC_MIN || 1601 sc->msk_process_limit > MSK_PROC_MAX) { 1602 device_printf(dev, "process_limit value out of range; " 1603 "using default: %d\n", MSK_PROC_DEFAULT); 1604 sc->msk_process_limit = MSK_PROC_DEFAULT; 1605 } 1606 } 1607 1608 /* Soft reset. */ 1609 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1610 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1611 sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP); 1612 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S') 1613 sc->msk_coppertype = 0; 1614 else 1615 sc->msk_coppertype = 1; 1616 /* Check number of MACs. */ 1617 sc->msk_num_port = 1; 1618 if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) == 1619 CFG_DUAL_MAC_MSK) { 1620 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) 1621 sc->msk_num_port++; 1622 } 1623 1624 /* Check bus type. */ 1625 if (pci_find_extcap(sc->msk_dev, PCIY_EXPRESS, ®) == 0) 1626 sc->msk_bustype = MSK_PEX_BUS; 1627 else if (pci_find_extcap(sc->msk_dev, PCIY_PCIX, ®) == 0) 1628 sc->msk_bustype = MSK_PCIX_BUS; 1629 else 1630 sc->msk_bustype = MSK_PCI_BUS; 1631 1632 /* Get H/W features(bugs). */ 1633 switch (sc->msk_hw_id) { 1634 case CHIP_ID_YUKON_EC: 1635 sc->msk_clock = 125; /* 125 Mhz */ 1636 if (sc->msk_hw_rev == CHIP_REV_YU_EC_A1) { 1637 sc->msk_hw_feature = 1638 HWF_WA_DEV_42 | HWF_WA_DEV_46 | HWF_WA_DEV_43_418 | 1639 HWF_WA_DEV_420 | HWF_WA_DEV_423 | 1640 HWF_WA_DEV_424 | HWF_WA_DEV_425 | HWF_WA_DEV_427 | 1641 HWF_WA_DEV_428 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 | 1642 HWF_WA_DEV_4152 | HWF_WA_DEV_4167; 1643 } else { 1644 /* A2/A3 */ 1645 sc->msk_hw_feature = 1646 HWF_WA_DEV_424 | HWF_WA_DEV_425 | HWF_WA_DEV_427 | 1647 HWF_WA_DEV_428 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 | 1648 HWF_WA_DEV_4152 | HWF_WA_DEV_4167; 1649 } 1650 break; 1651 case CHIP_ID_YUKON_EC_U: 1652 sc->msk_clock = 125; /* 125 Mhz */ 1653 if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) { 1654 sc->msk_hw_feature = HWF_WA_DEV_427 | HWF_WA_DEV_483 | 1655 HWF_WA_DEV_4109; 1656 } else if (sc->msk_hw_rev == CHIP_REV_YU_EC_A1) { 1657 uint16_t v; 1658 1659 sc->msk_hw_feature = HWF_WA_DEV_427 | HWF_WA_DEV_4109 | 1660 HWF_WA_DEV_4185; 1661 v = CSR_READ_2(sc, Q_ADDR(Q_XA1, Q_WM)); 1662 if (v == 0) 1663 sc->msk_hw_feature |= HWF_WA_DEV_4185CS | 1664 HWF_WA_DEV_4200; 1665 } 1666 break; 1667 case CHIP_ID_YUKON_FE: 1668 sc->msk_clock = 100; /* 100 Mhz */ 1669 sc->msk_hw_feature = HWF_WA_DEV_427 | HWF_WA_DEV_4109 | 1670 HWF_WA_DEV_4152 | HWF_WA_DEV_4167; 1671 break; 1672 case CHIP_ID_YUKON_XL: 1673 sc->msk_clock = 156; /* 156 Mhz */ 1674 switch (sc->msk_hw_rev) { 1675 case CHIP_REV_YU_XL_A0: 1676 sc->msk_hw_feature = 1677 HWF_WA_DEV_427 | HWF_WA_DEV_463 | HWF_WA_DEV_472 | 1678 HWF_WA_DEV_479 | HWF_WA_DEV_483 | HWF_WA_DEV_4115 | 1679 HWF_WA_DEV_4152 | HWF_WA_DEV_4167; 1680 break; 1681 case CHIP_REV_YU_XL_A1: 1682 sc->msk_hw_feature = 1683 HWF_WA_DEV_427 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 | 1684 HWF_WA_DEV_4115 | HWF_WA_DEV_4152 | HWF_WA_DEV_4167; 1685 break; 1686 case CHIP_REV_YU_XL_A2: 1687 sc->msk_hw_feature = 1688 HWF_WA_DEV_427 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 | 1689 HWF_WA_DEV_4115 | HWF_WA_DEV_4167; 1690 break; 1691 case CHIP_REV_YU_XL_A3: 1692 sc->msk_hw_feature = 1693 HWF_WA_DEV_427 | HWF_WA_DEV_483 | HWF_WA_DEV_4109 | 1694 HWF_WA_DEV_4115; 1695 } 1696 break; 1697 default: 1698 sc->msk_clock = 156; /* 156 Mhz */ 1699 sc->msk_hw_feature = 0; 1700 } 1701 1702 /* Allocate IRQ resources. */ 1703 msic = pci_msi_count(dev); 1704 if (bootverbose) 1705 device_printf(dev, "MSI count : %d\n", msic); 1706 /* 1707 * The Yukon II reports it can handle two messages, one for each 1708 * possible port. We go ahead and allocate two messages and only 1709 * setup a handler for both if we have a dual port card. 1710 * 1711 * XXX: I haven't untangled the interrupt handler to handle dual 1712 * port cards with separate MSI messages, so for now I disable MSI 1713 * on dual port cards. 1714 */ 1715 if (msic == 2 && msi_disable == 0 && sc->msk_num_port == 1 && 1716 pci_alloc_msi(dev, &msic) == 0) { 1717 if (msic == 2) { 1718 sc->msk_msi = 1; 1719 sc->msk_irq_spec = msk_irq_spec_msi; 1720 } else { 1721 pci_release_msi(dev); 1722 sc->msk_irq_spec = msk_irq_spec_legacy; 1723 } 1724 } 1725 1726 error = bus_alloc_resources(dev, sc->msk_irq_spec, sc->msk_irq); 1727 if (error) { 1728 device_printf(dev, "couldn't allocate IRQ resources\n"); 1729 goto fail; 1730 } 1731 1732 if ((error = msk_status_dma_alloc(sc)) != 0) 1733 goto fail; 1734 1735 /* Set base interrupt mask. */ 1736 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU; 1737 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR | 1738 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP; 1739 1740 /* Reset the adapter. */ 1741 mskc_reset(sc); 1742 1743 if ((error = mskc_setup_rambuffer(sc)) != 0) 1744 goto fail; 1745 1746 sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1); 1747 if (sc->msk_devs[MSK_PORT_A] == NULL) { 1748 device_printf(dev, "failed to add child for PORT_A\n"); 1749 error = ENXIO; 1750 goto fail; 1751 } 1752 port = malloc(sizeof(int), M_DEVBUF, M_WAITOK); 1753 if (port == NULL) { 1754 device_printf(dev, "failed to allocate memory for " 1755 "ivars of PORT_A\n"); 1756 error = ENXIO; 1757 goto fail; 1758 } 1759 *port = MSK_PORT_A; 1760 device_set_ivars(sc->msk_devs[MSK_PORT_A], port); 1761 1762 if (sc->msk_num_port > 1) { 1763 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1); 1764 if (sc->msk_devs[MSK_PORT_B] == NULL) { 1765 device_printf(dev, "failed to add child for PORT_B\n"); 1766 error = ENXIO; 1767 goto fail; 1768 } 1769 port = malloc(sizeof(int), M_DEVBUF, M_WAITOK); 1770 if (port == NULL) { 1771 device_printf(dev, "failed to allocate memory for " 1772 "ivars of PORT_B\n"); 1773 error = ENXIO; 1774 goto fail; 1775 } 1776 *port = MSK_PORT_B; 1777 device_set_ivars(sc->msk_devs[MSK_PORT_B], port); 1778 } 1779 1780 error = bus_generic_attach(dev); 1781 if (error) { 1782 device_printf(dev, "failed to attach port(s)\n"); 1783 goto fail; 1784 } 1785 1786 TASK_INIT(&sc->msk_int_task, 0, msk_int_task, sc); 1787 sc->msk_tq = taskqueue_create_fast("msk_taskq", M_WAITOK, 1788 taskqueue_thread_enqueue, &sc->msk_tq); 1789 taskqueue_start_threads(&sc->msk_tq, 1, PI_NET, "%s taskq", 1790 device_get_nameunit(sc->msk_dev)); 1791 /* Hook interrupt last to avoid having to lock softc. */ 1792 error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET | 1793 INTR_MPSAFE | INTR_FAST, msk_intr, sc, &sc->msk_intrhand[0]); 1794 1795 if (error != 0) { 1796 device_printf(dev, "couldn't set up interrupt handler\n"); 1797 taskqueue_free(sc->msk_tq); 1798 sc->msk_tq = NULL; 1799 goto fail; 1800 } 1801 fail: 1802 if (error != 0) 1803 mskc_detach(dev); 1804 1805 return (error); 1806 } 1807 1808 /* 1809 * Shutdown hardware and free up resources. This can be called any 1810 * time after the mutex has been initialized. It is called in both 1811 * the error case in attach and the normal detach case so it needs 1812 * to be careful about only freeing resources that have actually been 1813 * allocated. 1814 */ 1815 static int 1816 msk_detach(device_t dev) 1817 { 1818 struct msk_softc *sc; 1819 struct msk_if_softc *sc_if; 1820 struct ifnet *ifp; 1821 1822 sc_if = device_get_softc(dev); 1823 KASSERT(mtx_initialized(&sc_if->msk_softc->msk_mtx), 1824 ("msk mutex not initialized in msk_detach")); 1825 MSK_IF_LOCK(sc_if); 1826 1827 ifp = sc_if->msk_ifp; 1828 if (device_is_attached(dev)) { 1829 /* XXX */ 1830 sc_if->msk_detach = 1; 1831 msk_stop(sc_if); 1832 /* Can't hold locks while calling detach. */ 1833 MSK_IF_UNLOCK(sc_if); 1834 callout_drain(&sc_if->msk_tick_ch); 1835 taskqueue_drain(taskqueue_fast, &sc_if->msk_tx_task); 1836 taskqueue_drain(taskqueue_swi, &sc_if->msk_link_task); 1837 ether_ifdetach(ifp); 1838 MSK_IF_LOCK(sc_if); 1839 } 1840 1841 /* 1842 * We're generally called from mskc_detach() which is using 1843 * device_delete_child() to get to here. It's already trashed 1844 * miibus for us, so don't do it here or we'll panic. 1845 * 1846 * if (sc_if->msk_miibus != NULL) { 1847 * device_delete_child(dev, sc_if->msk_miibus); 1848 * sc_if->msk_miibus = NULL; 1849 * } 1850 */ 1851 1852 msk_txrx_dma_free(sc_if); 1853 bus_generic_detach(dev); 1854 1855 if (ifp) 1856 if_free(ifp); 1857 sc = sc_if->msk_softc; 1858 sc->msk_if[sc_if->msk_port] = NULL; 1859 MSK_IF_UNLOCK(sc_if); 1860 1861 return (0); 1862 } 1863 1864 static int 1865 mskc_detach(device_t dev) 1866 { 1867 struct msk_softc *sc; 1868 1869 sc = device_get_softc(dev); 1870 KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized")); 1871 1872 if (device_is_alive(dev)) { 1873 if (sc->msk_devs[MSK_PORT_A] != NULL) { 1874 free(device_get_ivars(sc->msk_devs[MSK_PORT_A]), 1875 M_DEVBUF); 1876 device_delete_child(dev, sc->msk_devs[MSK_PORT_A]); 1877 } 1878 if (sc->msk_devs[MSK_PORT_B] != NULL) { 1879 free(device_get_ivars(sc->msk_devs[MSK_PORT_B]), 1880 M_DEVBUF); 1881 device_delete_child(dev, sc->msk_devs[MSK_PORT_B]); 1882 } 1883 bus_generic_detach(dev); 1884 } 1885 1886 /* Disable all interrupts. */ 1887 CSR_WRITE_4(sc, B0_IMSK, 0); 1888 CSR_READ_4(sc, B0_IMSK); 1889 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 1890 CSR_READ_4(sc, B0_HWE_IMSK); 1891 1892 /* LED Off. */ 1893 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF); 1894 1895 /* Put hardware reset. */ 1896 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1897 1898 msk_status_dma_free(sc); 1899 1900 if (sc->msk_tq != NULL) { 1901 taskqueue_drain(sc->msk_tq, &sc->msk_int_task); 1902 taskqueue_free(sc->msk_tq); 1903 sc->msk_tq = NULL; 1904 } 1905 if (sc->msk_intrhand[0]) { 1906 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand[0]); 1907 sc->msk_intrhand[0] = NULL; 1908 } 1909 if (sc->msk_intrhand[1]) { 1910 bus_teardown_intr(dev, sc->msk_irq[0], sc->msk_intrhand[0]); 1911 sc->msk_intrhand[1] = NULL; 1912 } 1913 bus_release_resources(dev, sc->msk_irq_spec, sc->msk_irq); 1914 if (sc->msk_msi) 1915 pci_release_msi(dev); 1916 bus_release_resources(dev, sc->msk_res_spec, sc->msk_res); 1917 mtx_destroy(&sc->msk_mtx); 1918 1919 return (0); 1920 } 1921 1922 struct msk_dmamap_arg { 1923 bus_addr_t msk_busaddr; 1924 }; 1925 1926 static void 1927 msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1928 { 1929 struct msk_dmamap_arg *ctx; 1930 1931 if (error != 0) 1932 return; 1933 ctx = arg; 1934 ctx->msk_busaddr = segs[0].ds_addr; 1935 } 1936 1937 /* Create status DMA region. */ 1938 static int 1939 msk_status_dma_alloc(struct msk_softc *sc) 1940 { 1941 struct msk_dmamap_arg ctx; 1942 int error; 1943 1944 error = bus_dma_tag_create( 1945 bus_get_dma_tag(sc->msk_dev), /* parent */ 1946 MSK_STAT_ALIGN, 0, /* alignment, boundary */ 1947 BUS_SPACE_MAXADDR, /* lowaddr */ 1948 BUS_SPACE_MAXADDR, /* highaddr */ 1949 NULL, NULL, /* filter, filterarg */ 1950 MSK_STAT_RING_SZ, /* maxsize */ 1951 1, /* nsegments */ 1952 MSK_STAT_RING_SZ, /* maxsegsize */ 1953 0, /* flags */ 1954 NULL, NULL, /* lockfunc, lockarg */ 1955 &sc->msk_stat_tag); 1956 if (error != 0) { 1957 device_printf(sc->msk_dev, 1958 "failed to create status DMA tag\n"); 1959 return (error); 1960 } 1961 1962 /* Allocate DMA'able memory and load the DMA map for status ring. */ 1963 error = bus_dmamem_alloc(sc->msk_stat_tag, 1964 (void **)&sc->msk_stat_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT | 1965 BUS_DMA_ZERO, &sc->msk_stat_map); 1966 if (error != 0) { 1967 device_printf(sc->msk_dev, 1968 "failed to allocate DMA'able memory for status ring\n"); 1969 return (error); 1970 } 1971 1972 ctx.msk_busaddr = 0; 1973 error = bus_dmamap_load(sc->msk_stat_tag, 1974 sc->msk_stat_map, sc->msk_stat_ring, MSK_STAT_RING_SZ, 1975 msk_dmamap_cb, &ctx, 0); 1976 if (error != 0) { 1977 device_printf(sc->msk_dev, 1978 "failed to load DMA'able memory for status ring\n"); 1979 return (error); 1980 } 1981 sc->msk_stat_ring_paddr = ctx.msk_busaddr; 1982 1983 return (0); 1984 } 1985 1986 static void 1987 msk_status_dma_free(struct msk_softc *sc) 1988 { 1989 1990 /* Destroy status block. */ 1991 if (sc->msk_stat_tag) { 1992 if (sc->msk_stat_map) { 1993 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map); 1994 if (sc->msk_stat_ring) { 1995 bus_dmamem_free(sc->msk_stat_tag, 1996 sc->msk_stat_ring, sc->msk_stat_map); 1997 sc->msk_stat_ring = NULL; 1998 } 1999 sc->msk_stat_map = NULL; 2000 } 2001 bus_dma_tag_destroy(sc->msk_stat_tag); 2002 sc->msk_stat_tag = NULL; 2003 } 2004 } 2005 2006 static int 2007 msk_txrx_dma_alloc(struct msk_if_softc *sc_if) 2008 { 2009 struct msk_dmamap_arg ctx; 2010 struct msk_txdesc *txd; 2011 struct msk_rxdesc *rxd; 2012 struct msk_rxdesc *jrxd; 2013 struct msk_jpool_entry *entry; 2014 uint8_t *ptr; 2015 int error, i; 2016 2017 mtx_init(&sc_if->msk_jlist_mtx, "msk_jlist_mtx", NULL, MTX_DEF); 2018 SLIST_INIT(&sc_if->msk_jfree_listhead); 2019 SLIST_INIT(&sc_if->msk_jinuse_listhead); 2020 2021 /* Create parent DMA tag. */ 2022 /* 2023 * XXX 2024 * It seems that Yukon II supports full 64bits DMA operations. But 2025 * it needs two descriptors(list elements) for 64bits DMA operations. 2026 * Since we don't know what DMA address mappings(32bits or 64bits) 2027 * would be used in advance for each mbufs, we limits its DMA space 2028 * to be in range of 32bits address space. Otherwise, we should check 2029 * what DMA address is used and chain another descriptor for the 2030 * 64bits DMA operation. This also means descriptor ring size is 2031 * variable. Limiting DMA address to be in 32bit address space greatly 2032 * simplyfies descriptor handling and possibly would increase 2033 * performance a bit due to efficient handling of descriptors. 2034 * Apart from harassing checksum offloading mechanisms, it seems 2035 * it's really bad idea to use a seperate descriptor for 64bit 2036 * DMA operation to save small descriptor memory. Anyway, I've 2037 * never seen these exotic scheme on ethernet interface hardware. 2038 */ 2039 error = bus_dma_tag_create( 2040 bus_get_dma_tag(sc_if->msk_if_dev), /* parent */ 2041 1, 0, /* alignment, boundary */ 2042 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2043 BUS_SPACE_MAXADDR, /* highaddr */ 2044 NULL, NULL, /* filter, filterarg */ 2045 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 2046 0, /* nsegments */ 2047 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 2048 0, /* flags */ 2049 NULL, NULL, /* lockfunc, lockarg */ 2050 &sc_if->msk_cdata.msk_parent_tag); 2051 if (error != 0) { 2052 device_printf(sc_if->msk_if_dev, 2053 "failed to create parent DMA tag\n"); 2054 goto fail; 2055 } 2056 /* Create tag for Tx ring. */ 2057 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2058 MSK_RING_ALIGN, 0, /* alignment, boundary */ 2059 BUS_SPACE_MAXADDR, /* lowaddr */ 2060 BUS_SPACE_MAXADDR, /* highaddr */ 2061 NULL, NULL, /* filter, filterarg */ 2062 MSK_TX_RING_SZ, /* maxsize */ 2063 1, /* nsegments */ 2064 MSK_TX_RING_SZ, /* maxsegsize */ 2065 0, /* flags */ 2066 NULL, NULL, /* lockfunc, lockarg */ 2067 &sc_if->msk_cdata.msk_tx_ring_tag); 2068 if (error != 0) { 2069 device_printf(sc_if->msk_if_dev, 2070 "failed to create Tx ring DMA tag\n"); 2071 goto fail; 2072 } 2073 2074 /* Create tag for Rx ring. */ 2075 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2076 MSK_RING_ALIGN, 0, /* alignment, boundary */ 2077 BUS_SPACE_MAXADDR, /* lowaddr */ 2078 BUS_SPACE_MAXADDR, /* highaddr */ 2079 NULL, NULL, /* filter, filterarg */ 2080 MSK_RX_RING_SZ, /* maxsize */ 2081 1, /* nsegments */ 2082 MSK_RX_RING_SZ, /* maxsegsize */ 2083 0, /* flags */ 2084 NULL, NULL, /* lockfunc, lockarg */ 2085 &sc_if->msk_cdata.msk_rx_ring_tag); 2086 if (error != 0) { 2087 device_printf(sc_if->msk_if_dev, 2088 "failed to create Rx ring DMA tag\n"); 2089 goto fail; 2090 } 2091 2092 /* Create tag for jumbo Rx ring. */ 2093 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2094 MSK_RING_ALIGN, 0, /* alignment, boundary */ 2095 BUS_SPACE_MAXADDR, /* lowaddr */ 2096 BUS_SPACE_MAXADDR, /* highaddr */ 2097 NULL, NULL, /* filter, filterarg */ 2098 MSK_JUMBO_RX_RING_SZ, /* maxsize */ 2099 1, /* nsegments */ 2100 MSK_JUMBO_RX_RING_SZ, /* maxsegsize */ 2101 0, /* flags */ 2102 NULL, NULL, /* lockfunc, lockarg */ 2103 &sc_if->msk_cdata.msk_jumbo_rx_ring_tag); 2104 if (error != 0) { 2105 device_printf(sc_if->msk_if_dev, 2106 "failed to create jumbo Rx ring DMA tag\n"); 2107 goto fail; 2108 } 2109 2110 /* Create tag for jumbo buffer blocks. */ 2111 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2112 PAGE_SIZE, 0, /* alignment, boundary */ 2113 BUS_SPACE_MAXADDR, /* lowaddr */ 2114 BUS_SPACE_MAXADDR, /* highaddr */ 2115 NULL, NULL, /* filter, filterarg */ 2116 MSK_JMEM, /* maxsize */ 2117 1, /* nsegments */ 2118 MSK_JMEM, /* maxsegsize */ 2119 0, /* flags */ 2120 NULL, NULL, /* lockfunc, lockarg */ 2121 &sc_if->msk_cdata.msk_jumbo_tag); 2122 if (error != 0) { 2123 device_printf(sc_if->msk_if_dev, 2124 "failed to create jumbo Rx buffer block DMA tag\n"); 2125 goto fail; 2126 } 2127 2128 /* Create tag for Tx buffers. */ 2129 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2130 1, 0, /* alignment, boundary */ 2131 BUS_SPACE_MAXADDR, /* lowaddr */ 2132 BUS_SPACE_MAXADDR, /* highaddr */ 2133 NULL, NULL, /* filter, filterarg */ 2134 MCLBYTES * MSK_MAXTXSEGS, /* maxsize */ 2135 MSK_MAXTXSEGS, /* nsegments */ 2136 MCLBYTES, /* maxsegsize */ 2137 0, /* flags */ 2138 NULL, NULL, /* lockfunc, lockarg */ 2139 &sc_if->msk_cdata.msk_tx_tag); 2140 if (error != 0) { 2141 device_printf(sc_if->msk_if_dev, 2142 "failed to create Tx DMA tag\n"); 2143 goto fail; 2144 } 2145 2146 /* Create tag for Rx buffers. */ 2147 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2148 1, 0, /* alignment, boundary */ 2149 BUS_SPACE_MAXADDR, /* lowaddr */ 2150 BUS_SPACE_MAXADDR, /* highaddr */ 2151 NULL, NULL, /* filter, filterarg */ 2152 MCLBYTES, /* maxsize */ 2153 1, /* nsegments */ 2154 MCLBYTES, /* maxsegsize */ 2155 0, /* flags */ 2156 NULL, NULL, /* lockfunc, lockarg */ 2157 &sc_if->msk_cdata.msk_rx_tag); 2158 if (error != 0) { 2159 device_printf(sc_if->msk_if_dev, 2160 "failed to create Rx DMA tag\n"); 2161 goto fail; 2162 } 2163 2164 /* Create tag for jumbo Rx buffers. */ 2165 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2166 PAGE_SIZE, 0, /* alignment, boundary */ 2167 BUS_SPACE_MAXADDR, /* lowaddr */ 2168 BUS_SPACE_MAXADDR, /* highaddr */ 2169 NULL, NULL, /* filter, filterarg */ 2170 MCLBYTES * MSK_MAXRXSEGS, /* maxsize */ 2171 MSK_MAXRXSEGS, /* nsegments */ 2172 MSK_JLEN, /* maxsegsize */ 2173 0, /* flags */ 2174 NULL, NULL, /* lockfunc, lockarg */ 2175 &sc_if->msk_cdata.msk_jumbo_rx_tag); 2176 if (error != 0) { 2177 device_printf(sc_if->msk_if_dev, 2178 "failed to create jumbo Rx DMA tag\n"); 2179 goto fail; 2180 } 2181 2182 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 2183 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_tx_ring_tag, 2184 (void **)&sc_if->msk_rdata.msk_tx_ring, BUS_DMA_WAITOK | 2185 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_tx_ring_map); 2186 if (error != 0) { 2187 device_printf(sc_if->msk_if_dev, 2188 "failed to allocate DMA'able memory for Tx ring\n"); 2189 goto fail; 2190 } 2191 2192 ctx.msk_busaddr = 0; 2193 error = bus_dmamap_load(sc_if->msk_cdata.msk_tx_ring_tag, 2194 sc_if->msk_cdata.msk_tx_ring_map, sc_if->msk_rdata.msk_tx_ring, 2195 MSK_TX_RING_SZ, msk_dmamap_cb, &ctx, 0); 2196 if (error != 0) { 2197 device_printf(sc_if->msk_if_dev, 2198 "failed to load DMA'able memory for Tx ring\n"); 2199 goto fail; 2200 } 2201 sc_if->msk_rdata.msk_tx_ring_paddr = ctx.msk_busaddr; 2202 2203 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 2204 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_rx_ring_tag, 2205 (void **)&sc_if->msk_rdata.msk_rx_ring, BUS_DMA_WAITOK | 2206 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->msk_cdata.msk_rx_ring_map); 2207 if (error != 0) { 2208 device_printf(sc_if->msk_if_dev, 2209 "failed to allocate DMA'able memory for Rx ring\n"); 2210 goto fail; 2211 } 2212 2213 ctx.msk_busaddr = 0; 2214 error = bus_dmamap_load(sc_if->msk_cdata.msk_rx_ring_tag, 2215 sc_if->msk_cdata.msk_rx_ring_map, sc_if->msk_rdata.msk_rx_ring, 2216 MSK_RX_RING_SZ, msk_dmamap_cb, &ctx, 0); 2217 if (error != 0) { 2218 device_printf(sc_if->msk_if_dev, 2219 "failed to load DMA'able memory for Rx ring\n"); 2220 goto fail; 2221 } 2222 sc_if->msk_rdata.msk_rx_ring_paddr = ctx.msk_busaddr; 2223 2224 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */ 2225 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2226 (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring, 2227 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 2228 &sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2229 if (error != 0) { 2230 device_printf(sc_if->msk_if_dev, 2231 "failed to allocate DMA'able memory for jumbo Rx ring\n"); 2232 goto fail; 2233 } 2234 2235 ctx.msk_busaddr = 0; 2236 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2237 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 2238 sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ, 2239 msk_dmamap_cb, &ctx, 0); 2240 if (error != 0) { 2241 device_printf(sc_if->msk_if_dev, 2242 "failed to load DMA'able memory for jumbo Rx ring\n"); 2243 goto fail; 2244 } 2245 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr; 2246 2247 /* Create DMA maps for Tx buffers. */ 2248 for (i = 0; i < MSK_TX_RING_CNT; i++) { 2249 txd = &sc_if->msk_cdata.msk_txdesc[i]; 2250 txd->tx_m = NULL; 2251 txd->tx_dmamap = NULL; 2252 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0, 2253 &txd->tx_dmamap); 2254 if (error != 0) { 2255 device_printf(sc_if->msk_if_dev, 2256 "failed to create Tx dmamap\n"); 2257 goto fail; 2258 } 2259 } 2260 /* Create DMA maps for Rx buffers. */ 2261 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0, 2262 &sc_if->msk_cdata.msk_rx_sparemap)) != 0) { 2263 device_printf(sc_if->msk_if_dev, 2264 "failed to create spare Rx dmamap\n"); 2265 goto fail; 2266 } 2267 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2268 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 2269 rxd->rx_m = NULL; 2270 rxd->rx_dmamap = NULL; 2271 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0, 2272 &rxd->rx_dmamap); 2273 if (error != 0) { 2274 device_printf(sc_if->msk_if_dev, 2275 "failed to create Rx dmamap\n"); 2276 goto fail; 2277 } 2278 } 2279 /* Create DMA maps for jumbo Rx buffers. */ 2280 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, 2281 &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) { 2282 device_printf(sc_if->msk_if_dev, 2283 "failed to create spare jumbo Rx dmamap\n"); 2284 goto fail; 2285 } 2286 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 2287 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 2288 jrxd->rx_m = NULL; 2289 jrxd->rx_dmamap = NULL; 2290 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, 2291 &jrxd->rx_dmamap); 2292 if (error != 0) { 2293 device_printf(sc_if->msk_if_dev, 2294 "failed to create jumbo Rx dmamap\n"); 2295 goto fail; 2296 } 2297 } 2298 2299 /* Allocate DMA'able memory and load the DMA map for jumbo buf. */ 2300 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_tag, 2301 (void **)&sc_if->msk_rdata.msk_jumbo_buf, 2302 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 2303 &sc_if->msk_cdata.msk_jumbo_map); 2304 if (error != 0) { 2305 device_printf(sc_if->msk_if_dev, 2306 "failed to allocate DMA'able memory for jumbo buf\n"); 2307 goto fail; 2308 } 2309 2310 ctx.msk_busaddr = 0; 2311 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_tag, 2312 sc_if->msk_cdata.msk_jumbo_map, sc_if->msk_rdata.msk_jumbo_buf, 2313 MSK_JMEM, msk_dmamap_cb, &ctx, 0); 2314 if (error != 0) { 2315 device_printf(sc_if->msk_if_dev, 2316 "failed to load DMA'able memory for jumbobuf\n"); 2317 goto fail; 2318 } 2319 sc_if->msk_rdata.msk_jumbo_buf_paddr = ctx.msk_busaddr; 2320 2321 /* 2322 * Now divide it up into 9K pieces and save the addresses 2323 * in an array. 2324 */ 2325 ptr = sc_if->msk_rdata.msk_jumbo_buf; 2326 for (i = 0; i < MSK_JSLOTS; i++) { 2327 sc_if->msk_cdata.msk_jslots[i] = ptr; 2328 ptr += MSK_JLEN; 2329 entry = malloc(sizeof(struct msk_jpool_entry), 2330 M_DEVBUF, M_WAITOK); 2331 if (entry == NULL) { 2332 device_printf(sc_if->msk_if_dev, 2333 "no memory for jumbo buffers!\n"); 2334 error = ENOMEM; 2335 goto fail; 2336 } 2337 entry->slot = i; 2338 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, 2339 jpool_entries); 2340 } 2341 2342 fail: 2343 return (error); 2344 } 2345 2346 static void 2347 msk_txrx_dma_free(struct msk_if_softc *sc_if) 2348 { 2349 struct msk_txdesc *txd; 2350 struct msk_rxdesc *rxd; 2351 struct msk_rxdesc *jrxd; 2352 struct msk_jpool_entry *entry; 2353 int i; 2354 2355 MSK_JLIST_LOCK(sc_if); 2356 while ((entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead))) { 2357 device_printf(sc_if->msk_if_dev, 2358 "asked to free buffer that is in use!\n"); 2359 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries); 2360 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, 2361 jpool_entries); 2362 } 2363 2364 while (!SLIST_EMPTY(&sc_if->msk_jfree_listhead)) { 2365 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead); 2366 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries); 2367 free(entry, M_DEVBUF); 2368 } 2369 MSK_JLIST_UNLOCK(sc_if); 2370 2371 /* Destroy jumbo buffer block. */ 2372 if (sc_if->msk_cdata.msk_jumbo_map) 2373 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_tag, 2374 sc_if->msk_cdata.msk_jumbo_map); 2375 2376 if (sc_if->msk_rdata.msk_jumbo_buf) { 2377 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_tag, 2378 sc_if->msk_rdata.msk_jumbo_buf, 2379 sc_if->msk_cdata.msk_jumbo_map); 2380 sc_if->msk_rdata.msk_jumbo_buf = NULL; 2381 sc_if->msk_cdata.msk_jumbo_map = NULL; 2382 } 2383 2384 /* Tx ring. */ 2385 if (sc_if->msk_cdata.msk_tx_ring_tag) { 2386 if (sc_if->msk_cdata.msk_tx_ring_map) 2387 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_ring_tag, 2388 sc_if->msk_cdata.msk_tx_ring_map); 2389 if (sc_if->msk_cdata.msk_tx_ring_map && 2390 sc_if->msk_rdata.msk_tx_ring) 2391 bus_dmamem_free(sc_if->msk_cdata.msk_tx_ring_tag, 2392 sc_if->msk_rdata.msk_tx_ring, 2393 sc_if->msk_cdata.msk_tx_ring_map); 2394 sc_if->msk_rdata.msk_tx_ring = NULL; 2395 sc_if->msk_cdata.msk_tx_ring_map = NULL; 2396 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_ring_tag); 2397 sc_if->msk_cdata.msk_tx_ring_tag = NULL; 2398 } 2399 /* Rx ring. */ 2400 if (sc_if->msk_cdata.msk_rx_ring_tag) { 2401 if (sc_if->msk_cdata.msk_rx_ring_map) 2402 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_ring_tag, 2403 sc_if->msk_cdata.msk_rx_ring_map); 2404 if (sc_if->msk_cdata.msk_rx_ring_map && 2405 sc_if->msk_rdata.msk_rx_ring) 2406 bus_dmamem_free(sc_if->msk_cdata.msk_rx_ring_tag, 2407 sc_if->msk_rdata.msk_rx_ring, 2408 sc_if->msk_cdata.msk_rx_ring_map); 2409 sc_if->msk_rdata.msk_rx_ring = NULL; 2410 sc_if->msk_cdata.msk_rx_ring_map = NULL; 2411 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_ring_tag); 2412 sc_if->msk_cdata.msk_rx_ring_tag = NULL; 2413 } 2414 /* Jumbo Rx ring. */ 2415 if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) { 2416 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map) 2417 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2418 sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2419 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map && 2420 sc_if->msk_rdata.msk_jumbo_rx_ring) 2421 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2422 sc_if->msk_rdata.msk_jumbo_rx_ring, 2423 sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2424 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL; 2425 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL; 2426 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag); 2427 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL; 2428 } 2429 /* Tx buffers. */ 2430 if (sc_if->msk_cdata.msk_tx_tag) { 2431 for (i = 0; i < MSK_TX_RING_CNT; i++) { 2432 txd = &sc_if->msk_cdata.msk_txdesc[i]; 2433 if (txd->tx_dmamap) { 2434 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag, 2435 txd->tx_dmamap); 2436 txd->tx_dmamap = NULL; 2437 } 2438 } 2439 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag); 2440 sc_if->msk_cdata.msk_tx_tag = NULL; 2441 } 2442 /* Rx buffers. */ 2443 if (sc_if->msk_cdata.msk_rx_tag) { 2444 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2445 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 2446 if (rxd->rx_dmamap) { 2447 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2448 rxd->rx_dmamap); 2449 rxd->rx_dmamap = NULL; 2450 } 2451 } 2452 if (sc_if->msk_cdata.msk_rx_sparemap) { 2453 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2454 sc_if->msk_cdata.msk_rx_sparemap); 2455 sc_if->msk_cdata.msk_rx_sparemap = 0; 2456 } 2457 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 2458 sc_if->msk_cdata.msk_rx_tag = NULL; 2459 } 2460 /* Jumbo Rx buffers. */ 2461 if (sc_if->msk_cdata.msk_jumbo_rx_tag) { 2462 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 2463 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 2464 if (jrxd->rx_dmamap) { 2465 bus_dmamap_destroy( 2466 sc_if->msk_cdata.msk_jumbo_rx_tag, 2467 jrxd->rx_dmamap); 2468 jrxd->rx_dmamap = NULL; 2469 } 2470 } 2471 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) { 2472 bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag, 2473 sc_if->msk_cdata.msk_jumbo_rx_sparemap); 2474 sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0; 2475 } 2476 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag); 2477 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL; 2478 } 2479 2480 if (sc_if->msk_cdata.msk_parent_tag) { 2481 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag); 2482 sc_if->msk_cdata.msk_parent_tag = NULL; 2483 } 2484 mtx_destroy(&sc_if->msk_jlist_mtx); 2485 } 2486 2487 /* 2488 * Allocate a jumbo buffer. 2489 */ 2490 static void * 2491 msk_jalloc(struct msk_if_softc *sc_if) 2492 { 2493 struct msk_jpool_entry *entry; 2494 2495 MSK_JLIST_LOCK(sc_if); 2496 2497 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead); 2498 2499 if (entry == NULL) { 2500 MSK_JLIST_UNLOCK(sc_if); 2501 return (NULL); 2502 } 2503 2504 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries); 2505 SLIST_INSERT_HEAD(&sc_if->msk_jinuse_listhead, entry, jpool_entries); 2506 2507 MSK_JLIST_UNLOCK(sc_if); 2508 2509 return (sc_if->msk_cdata.msk_jslots[entry->slot]); 2510 } 2511 2512 /* 2513 * Release a jumbo buffer. 2514 */ 2515 static void 2516 msk_jfree(void *buf, void *args) 2517 { 2518 struct msk_if_softc *sc_if; 2519 struct msk_jpool_entry *entry; 2520 int i; 2521 2522 /* Extract the softc struct pointer. */ 2523 sc_if = (struct msk_if_softc *)args; 2524 KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__)); 2525 2526 MSK_JLIST_LOCK(sc_if); 2527 /* Calculate the slot this buffer belongs to. */ 2528 i = ((vm_offset_t)buf 2529 - (vm_offset_t)sc_if->msk_rdata.msk_jumbo_buf) / MSK_JLEN; 2530 KASSERT(i >= 0 && i < MSK_JSLOTS, 2531 ("%s: asked to free buffer that we don't manage!", __func__)); 2532 2533 entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead); 2534 KASSERT(entry != NULL, ("%s: buffer not in use!", __func__)); 2535 entry->slot = i; 2536 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries); 2537 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, jpool_entries); 2538 if (SLIST_EMPTY(&sc_if->msk_jinuse_listhead)) 2539 wakeup(sc_if); 2540 2541 MSK_JLIST_UNLOCK(sc_if); 2542 } 2543 2544 /* 2545 * It's copy of ath_defrag(ath(4)). 2546 * 2547 * Defragment an mbuf chain, returning at most maxfrags separate 2548 * mbufs+clusters. If this is not possible NULL is returned and 2549 * the original mbuf chain is left in it's present (potentially 2550 * modified) state. We use two techniques: collapsing consecutive 2551 * mbufs and replacing consecutive mbufs by a cluster. 2552 */ 2553 static struct mbuf * 2554 msk_defrag(struct mbuf *m0, int how, int maxfrags) 2555 { 2556 struct mbuf *m, *n, *n2, **prev; 2557 u_int curfrags; 2558 2559 /* 2560 * Calculate the current number of frags. 2561 */ 2562 curfrags = 0; 2563 for (m = m0; m != NULL; m = m->m_next) 2564 curfrags++; 2565 /* 2566 * First, try to collapse mbufs. Note that we always collapse 2567 * towards the front so we don't need to deal with moving the 2568 * pkthdr. This may be suboptimal if the first mbuf has much 2569 * less data than the following. 2570 */ 2571 m = m0; 2572 again: 2573 for (;;) { 2574 n = m->m_next; 2575 if (n == NULL) 2576 break; 2577 if ((m->m_flags & M_RDONLY) == 0 && 2578 n->m_len < M_TRAILINGSPACE(m)) { 2579 bcopy(mtod(n, void *), mtod(m, char *) + m->m_len, 2580 n->m_len); 2581 m->m_len += n->m_len; 2582 m->m_next = n->m_next; 2583 m_free(n); 2584 if (--curfrags <= maxfrags) 2585 return (m0); 2586 } else 2587 m = n; 2588 } 2589 KASSERT(maxfrags > 1, 2590 ("maxfrags %u, but normal collapse failed", maxfrags)); 2591 /* 2592 * Collapse consecutive mbufs to a cluster. 2593 */ 2594 prev = &m0->m_next; /* NB: not the first mbuf */ 2595 while ((n = *prev) != NULL) { 2596 if ((n2 = n->m_next) != NULL && 2597 n->m_len + n2->m_len < MCLBYTES) { 2598 m = m_getcl(how, MT_DATA, 0); 2599 if (m == NULL) 2600 goto bad; 2601 bcopy(mtod(n, void *), mtod(m, void *), n->m_len); 2602 bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len, 2603 n2->m_len); 2604 m->m_len = n->m_len + n2->m_len; 2605 m->m_next = n2->m_next; 2606 *prev = m; 2607 m_free(n); 2608 m_free(n2); 2609 if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */ 2610 return m0; 2611 /* 2612 * Still not there, try the normal collapse 2613 * again before we allocate another cluster. 2614 */ 2615 goto again; 2616 } 2617 prev = &n->m_next; 2618 } 2619 /* 2620 * No place where we can collapse to a cluster; punt. 2621 * This can occur if, for example, you request 2 frags 2622 * but the packet requires that both be clusters (we 2623 * never reallocate the first mbuf to avoid moving the 2624 * packet header). 2625 */ 2626 bad: 2627 return (NULL); 2628 } 2629 2630 static int 2631 msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head) 2632 { 2633 struct msk_txdesc *txd, *txd_last; 2634 struct msk_tx_desc *tx_le; 2635 struct mbuf *m; 2636 bus_dmamap_t map; 2637 bus_dma_segment_t txsegs[MSK_MAXTXSEGS]; 2638 uint32_t control, prod, si; 2639 uint16_t offset, tcp_offset, tso_mtu; 2640 int error, i, nseg, tso; 2641 2642 MSK_IF_LOCK_ASSERT(sc_if); 2643 2644 tcp_offset = offset = 0; 2645 m = *m_head; 2646 if ((m->m_pkthdr.csum_flags & (MSK_CSUM_FEATURES | CSUM_TSO)) != 0) { 2647 /* 2648 * Since mbuf has no protocol specific structure information 2649 * in it we have to inspect protocol information here to 2650 * setup TSO and checksum offload. I don't know why Marvell 2651 * made a such decision in chip design because other GigE 2652 * hardwares normally takes care of all these chores in 2653 * hardware. However, TSO performance of Yukon II is very 2654 * good such that it's worth to implement it. 2655 */ 2656 struct ether_vlan_header *evh; 2657 struct ether_header *eh; 2658 struct ip *ip; 2659 struct tcphdr *tcp; 2660 2661 /* TODO check for M_WRITABLE(m) */ 2662 2663 offset = sizeof(struct ether_header); 2664 m = m_pullup(m, offset); 2665 if (m == NULL) { 2666 *m_head = NULL; 2667 return (ENOBUFS); 2668 } 2669 eh = mtod(m, struct ether_header *); 2670 /* Check if hardware VLAN insertion is off. */ 2671 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2672 offset = sizeof(struct ether_vlan_header); 2673 m = m_pullup(m, offset); 2674 if (m == NULL) { 2675 *m_head = NULL; 2676 return (ENOBUFS); 2677 } 2678 evh = mtod(m, struct ether_vlan_header *); 2679 ip = (struct ip *)(evh + 1); 2680 } else 2681 ip = (struct ip *)(eh + 1); 2682 m = m_pullup(m, offset + sizeof(struct ip)); 2683 if (m == NULL) { 2684 *m_head = NULL; 2685 return (ENOBUFS); 2686 } 2687 offset += (ip->ip_hl << 2); 2688 tcp_offset = offset; 2689 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2690 m = m_pullup(m, offset + sizeof(struct tcphdr)); 2691 if (m == NULL) { 2692 *m_head = NULL; 2693 return (ENOBUFS); 2694 } 2695 tcp = mtod(m, struct tcphdr *); 2696 offset += (tcp->th_off << 2); 2697 } 2698 *m_head = m; 2699 } 2700 2701 prod = sc_if->msk_cdata.msk_tx_prod; 2702 txd = &sc_if->msk_cdata.msk_txdesc[prod]; 2703 txd_last = txd; 2704 map = txd->tx_dmamap; 2705 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map, 2706 *m_head, txsegs, &nseg, BUS_DMA_NOWAIT); 2707 if (error == EFBIG) { 2708 m = msk_defrag(*m_head, M_DONTWAIT, MSK_MAXTXSEGS); 2709 if (m == NULL) { 2710 m_freem(*m_head); 2711 *m_head = NULL; 2712 return (ENOBUFS); 2713 } 2714 *m_head = m; 2715 error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, 2716 map, *m_head, txsegs, &nseg, BUS_DMA_NOWAIT); 2717 if (error != 0) { 2718 m_freem(*m_head); 2719 *m_head = NULL; 2720 return (error); 2721 } 2722 } else if (error != 0) 2723 return (error); 2724 if (nseg == 0) { 2725 m_freem(*m_head); 2726 *m_head = NULL; 2727 return (EIO); 2728 } 2729 2730 /* Check number of available descriptors. */ 2731 if (sc_if->msk_cdata.msk_tx_cnt + nseg >= 2732 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) { 2733 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map); 2734 return (ENOBUFS); 2735 } 2736 2737 control = 0; 2738 tso = 0; 2739 tx_le = NULL; 2740 2741 /* Check TSO support. */ 2742 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2743 tso_mtu = offset + m->m_pkthdr.tso_segsz; 2744 if (tso_mtu != sc_if->msk_cdata.msk_tso_mtu) { 2745 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2746 tx_le->msk_addr = htole32(tso_mtu); 2747 tx_le->msk_control = htole32(OP_LRGLEN | HW_OWNER); 2748 sc_if->msk_cdata.msk_tx_cnt++; 2749 MSK_INC(prod, MSK_TX_RING_CNT); 2750 sc_if->msk_cdata.msk_tso_mtu = tso_mtu; 2751 } 2752 tso++; 2753 } 2754 /* Check if we have a VLAN tag to insert. */ 2755 if ((m->m_flags & M_VLANTAG) != 0) { 2756 if (tso == 0) { 2757 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2758 tx_le->msk_addr = htole32(0); 2759 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER | 2760 htons(m->m_pkthdr.ether_vtag)); 2761 sc_if->msk_cdata.msk_tx_cnt++; 2762 MSK_INC(prod, MSK_TX_RING_CNT); 2763 } else { 2764 tx_le->msk_control |= htole32(OP_VLAN | 2765 htons(m->m_pkthdr.ether_vtag)); 2766 } 2767 control |= INS_VLAN; 2768 } 2769 /* Check if we have to handle checksum offload. */ 2770 if (tso == 0 && (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) != 0) { 2771 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2772 tx_le->msk_addr = htole32(((tcp_offset + m->m_pkthdr.csum_data) 2773 & 0xffff) | ((uint32_t)tcp_offset << 16)); 2774 tx_le->msk_control = htole32(1 << 16 | (OP_TCPLISW | HW_OWNER)); 2775 control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; 2776 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2777 control |= UDPTCP; 2778 sc_if->msk_cdata.msk_tx_cnt++; 2779 MSK_INC(prod, MSK_TX_RING_CNT); 2780 } 2781 2782 si = prod; 2783 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2784 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr)); 2785 if (tso == 0) 2786 tx_le->msk_control = htole32(txsegs[0].ds_len | control | 2787 OP_PACKET); 2788 else 2789 tx_le->msk_control = htole32(txsegs[0].ds_len | control | 2790 OP_LARGESEND); 2791 sc_if->msk_cdata.msk_tx_cnt++; 2792 MSK_INC(prod, MSK_TX_RING_CNT); 2793 2794 for (i = 1; i < nseg; i++) { 2795 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2796 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr)); 2797 tx_le->msk_control = htole32(txsegs[i].ds_len | control | 2798 OP_BUFFER | HW_OWNER); 2799 sc_if->msk_cdata.msk_tx_cnt++; 2800 MSK_INC(prod, MSK_TX_RING_CNT); 2801 } 2802 /* Update producer index. */ 2803 sc_if->msk_cdata.msk_tx_prod = prod; 2804 2805 /* Set EOP on the last desciptor. */ 2806 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT; 2807 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2808 tx_le->msk_control |= htole32(EOP); 2809 2810 /* Turn the first descriptor ownership to hardware. */ 2811 tx_le = &sc_if->msk_rdata.msk_tx_ring[si]; 2812 tx_le->msk_control |= htole32(HW_OWNER); 2813 2814 txd = &sc_if->msk_cdata.msk_txdesc[prod]; 2815 map = txd_last->tx_dmamap; 2816 txd_last->tx_dmamap = txd->tx_dmamap; 2817 txd->tx_dmamap = map; 2818 txd->tx_m = m; 2819 2820 /* Sync descriptors. */ 2821 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE); 2822 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 2823 sc_if->msk_cdata.msk_tx_ring_map, 2824 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2825 2826 return (0); 2827 } 2828 2829 static void 2830 msk_tx_task(void *arg, int pending) 2831 { 2832 struct ifnet *ifp; 2833 2834 ifp = arg; 2835 msk_start(ifp); 2836 } 2837 2838 static void 2839 msk_start(struct ifnet *ifp) 2840 { 2841 struct msk_if_softc *sc_if; 2842 struct mbuf *m_head; 2843 int enq; 2844 2845 sc_if = ifp->if_softc; 2846 2847 MSK_IF_LOCK(sc_if); 2848 2849 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2850 IFF_DRV_RUNNING || sc_if->msk_link == 0) { 2851 MSK_IF_UNLOCK(sc_if); 2852 return; 2853 } 2854 2855 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 2856 sc_if->msk_cdata.msk_tx_cnt < 2857 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) { 2858 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2859 if (m_head == NULL) 2860 break; 2861 /* 2862 * Pack the data into the transmit ring. If we 2863 * don't have room, set the OACTIVE flag and wait 2864 * for the NIC to drain the ring. 2865 */ 2866 if (msk_encap(sc_if, &m_head) != 0) { 2867 if (m_head == NULL) 2868 break; 2869 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2870 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2871 break; 2872 } 2873 2874 enq++; 2875 /* 2876 * If there's a BPF listener, bounce a copy of this frame 2877 * to him. 2878 */ 2879 BPF_MTAP(ifp, m_head); 2880 } 2881 2882 if (enq > 0) { 2883 /* Transmit */ 2884 CSR_WRITE_2(sc_if->msk_softc, 2885 Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG), 2886 sc_if->msk_cdata.msk_tx_prod); 2887 2888 /* Set a timeout in case the chip goes out to lunch. */ 2889 sc_if->msk_watchdog_timer = MSK_TX_TIMEOUT; 2890 } 2891 2892 MSK_IF_UNLOCK(sc_if); 2893 } 2894 2895 static void 2896 msk_watchdog(struct msk_if_softc *sc_if) 2897 { 2898 struct ifnet *ifp; 2899 uint32_t ridx; 2900 int idx; 2901 2902 MSK_IF_LOCK_ASSERT(sc_if); 2903 2904 if (sc_if->msk_watchdog_timer == 0 || --sc_if->msk_watchdog_timer) 2905 return; 2906 ifp = sc_if->msk_ifp; 2907 if (sc_if->msk_link == 0) { 2908 if (bootverbose) 2909 if_printf(sc_if->msk_ifp, "watchdog timeout " 2910 "(missed link)\n"); 2911 ifp->if_oerrors++; 2912 msk_init_locked(sc_if); 2913 return; 2914 } 2915 2916 /* 2917 * Reclaim first as there is a possibility of losing Tx completion 2918 * interrupts. 2919 */ 2920 ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX; 2921 idx = CSR_READ_2(sc_if->msk_softc, ridx); 2922 if (sc_if->msk_cdata.msk_tx_cons != idx) { 2923 msk_txeof(sc_if, idx); 2924 if (sc_if->msk_cdata.msk_tx_cnt == 0) { 2925 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 2926 "-- recovering\n"); 2927 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2928 taskqueue_enqueue(taskqueue_fast, 2929 &sc_if->msk_tx_task); 2930 return; 2931 } 2932 } 2933 2934 if_printf(ifp, "watchdog timeout\n"); 2935 ifp->if_oerrors++; 2936 msk_init_locked(sc_if); 2937 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2938 taskqueue_enqueue(taskqueue_fast, &sc_if->msk_tx_task); 2939 } 2940 2941 static void 2942 mskc_shutdown(device_t dev) 2943 { 2944 struct msk_softc *sc; 2945 int i; 2946 2947 sc = device_get_softc(dev); 2948 MSK_LOCK(sc); 2949 for (i = 0; i < sc->msk_num_port; i++) { 2950 if (sc->msk_if[i] != NULL) 2951 msk_stop(sc->msk_if[i]); 2952 } 2953 2954 /* Disable all interrupts. */ 2955 CSR_WRITE_4(sc, B0_IMSK, 0); 2956 CSR_READ_4(sc, B0_IMSK); 2957 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 2958 CSR_READ_4(sc, B0_HWE_IMSK); 2959 2960 /* Put hardware reset. */ 2961 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 2962 2963 MSK_UNLOCK(sc); 2964 } 2965 2966 static int 2967 mskc_suspend(device_t dev) 2968 { 2969 struct msk_softc *sc; 2970 int i; 2971 2972 sc = device_get_softc(dev); 2973 2974 MSK_LOCK(sc); 2975 2976 for (i = 0; i < sc->msk_num_port; i++) { 2977 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 2978 ((sc->msk_if[i]->msk_ifp->if_drv_flags & 2979 IFF_DRV_RUNNING) != 0)) 2980 msk_stop(sc->msk_if[i]); 2981 } 2982 2983 /* Disable all interrupts. */ 2984 CSR_WRITE_4(sc, B0_IMSK, 0); 2985 CSR_READ_4(sc, B0_IMSK); 2986 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 2987 CSR_READ_4(sc, B0_HWE_IMSK); 2988 2989 msk_phy_power(sc, MSK_PHY_POWERDOWN); 2990 2991 /* Put hardware reset. */ 2992 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 2993 sc->msk_suspended = 1; 2994 2995 MSK_UNLOCK(sc); 2996 2997 return (0); 2998 } 2999 3000 static int 3001 mskc_resume(device_t dev) 3002 { 3003 struct msk_softc *sc; 3004 int i; 3005 3006 sc = device_get_softc(dev); 3007 3008 MSK_LOCK(sc); 3009 3010 mskc_reset(sc); 3011 for (i = 0; i < sc->msk_num_port; i++) { 3012 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 3013 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0)) 3014 msk_init_locked(sc->msk_if[i]); 3015 } 3016 sc->msk_suspended = 0; 3017 3018 MSK_UNLOCK(sc); 3019 3020 return (0); 3021 } 3022 3023 static void 3024 msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len) 3025 { 3026 struct mbuf *m; 3027 struct ifnet *ifp; 3028 struct msk_rxdesc *rxd; 3029 int cons, rxlen; 3030 3031 ifp = sc_if->msk_ifp; 3032 3033 MSK_IF_LOCK_ASSERT(sc_if); 3034 3035 cons = sc_if->msk_cdata.msk_rx_cons; 3036 do { 3037 rxlen = status >> 16; 3038 if ((status & GMR_FS_VLAN) != 0) 3039 rxlen -= ETHER_VLAN_ENCAP_LEN; 3040 if (len > sc_if->msk_framesize || 3041 ((status & GMR_FS_ANY_ERR) != 0) || 3042 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { 3043 /* Don't count flow-control packet as errors. */ 3044 if ((status & GMR_FS_GOOD_FC) == 0) 3045 ifp->if_ierrors++; 3046 msk_discard_rxbuf(sc_if, cons); 3047 break; 3048 } 3049 rxd = &sc_if->msk_cdata.msk_rxdesc[cons]; 3050 m = rxd->rx_m; 3051 if (msk_newbuf(sc_if, cons) != 0) { 3052 ifp->if_iqdrops++; 3053 /* Reuse old buffer. */ 3054 msk_discard_rxbuf(sc_if, cons); 3055 break; 3056 } 3057 m->m_pkthdr.rcvif = ifp; 3058 m->m_pkthdr.len = m->m_len = len; 3059 ifp->if_ipackets++; 3060 /* Check for VLAN tagged packets. */ 3061 if ((status & GMR_FS_VLAN) != 0 && 3062 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 3063 m->m_pkthdr.ether_vtag = sc_if->msk_vtag; 3064 m->m_flags |= M_VLANTAG; 3065 } 3066 MSK_IF_UNLOCK(sc_if); 3067 (*ifp->if_input)(ifp, m); 3068 MSK_IF_LOCK(sc_if); 3069 } while (0); 3070 3071 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT); 3072 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT); 3073 } 3074 3075 static void 3076 msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len) 3077 { 3078 struct mbuf *m; 3079 struct ifnet *ifp; 3080 struct msk_rxdesc *jrxd; 3081 int cons, rxlen; 3082 3083 ifp = sc_if->msk_ifp; 3084 3085 MSK_IF_LOCK_ASSERT(sc_if); 3086 3087 cons = sc_if->msk_cdata.msk_rx_cons; 3088 do { 3089 rxlen = status >> 16; 3090 if ((status & GMR_FS_VLAN) != 0) 3091 rxlen -= ETHER_VLAN_ENCAP_LEN; 3092 if (len > sc_if->msk_framesize || 3093 ((status & GMR_FS_ANY_ERR) != 0) || 3094 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { 3095 /* Don't count flow-control packet as errors. */ 3096 if ((status & GMR_FS_GOOD_FC) == 0) 3097 ifp->if_ierrors++; 3098 msk_discard_jumbo_rxbuf(sc_if, cons); 3099 break; 3100 } 3101 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons]; 3102 m = jrxd->rx_m; 3103 if (msk_jumbo_newbuf(sc_if, cons) != 0) { 3104 ifp->if_iqdrops++; 3105 /* Reuse old buffer. */ 3106 msk_discard_jumbo_rxbuf(sc_if, cons); 3107 break; 3108 } 3109 m->m_pkthdr.rcvif = ifp; 3110 m->m_pkthdr.len = m->m_len = len; 3111 ifp->if_ipackets++; 3112 /* Check for VLAN tagged packets. */ 3113 if ((status & GMR_FS_VLAN) != 0 && 3114 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 3115 m->m_pkthdr.ether_vtag = sc_if->msk_vtag; 3116 m->m_flags |= M_VLANTAG; 3117 } 3118 MSK_IF_UNLOCK(sc_if); 3119 (*ifp->if_input)(ifp, m); 3120 MSK_IF_LOCK(sc_if); 3121 } while (0); 3122 3123 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT); 3124 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT); 3125 } 3126 3127 static void 3128 msk_txeof(struct msk_if_softc *sc_if, int idx) 3129 { 3130 struct msk_txdesc *txd; 3131 struct msk_tx_desc *cur_tx; 3132 struct ifnet *ifp; 3133 uint32_t control; 3134 int cons, prog; 3135 3136 MSK_IF_LOCK_ASSERT(sc_if); 3137 3138 ifp = sc_if->msk_ifp; 3139 3140 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag, 3141 sc_if->msk_cdata.msk_tx_ring_map, 3142 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3143 /* 3144 * Go through our tx ring and free mbufs for those 3145 * frames that have been sent. 3146 */ 3147 cons = sc_if->msk_cdata.msk_tx_cons; 3148 prog = 0; 3149 for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) { 3150 if (sc_if->msk_cdata.msk_tx_cnt <= 0) 3151 break; 3152 prog++; 3153 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons]; 3154 control = le32toh(cur_tx->msk_control); 3155 sc_if->msk_cdata.msk_tx_cnt--; 3156 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3157 if ((control & EOP) == 0) 3158 continue; 3159 txd = &sc_if->msk_cdata.msk_txdesc[cons]; 3160 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap, 3161 BUS_DMASYNC_POSTWRITE); 3162 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap); 3163 3164 ifp->if_opackets++; 3165 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!", 3166 __func__)); 3167 m_freem(txd->tx_m); 3168 txd->tx_m = NULL; 3169 } 3170 3171 if (prog > 0) { 3172 sc_if->msk_cdata.msk_tx_cons = cons; 3173 if (sc_if->msk_cdata.msk_tx_cnt == 0) 3174 sc_if->msk_watchdog_timer = 0; 3175 /* No need to sync LEs as we didn't update LEs. */ 3176 } 3177 } 3178 3179 static void 3180 msk_tick(void *xsc_if) 3181 { 3182 struct msk_if_softc *sc_if; 3183 struct mii_data *mii; 3184 3185 sc_if = xsc_if; 3186 3187 MSK_IF_LOCK_ASSERT(sc_if); 3188 3189 mii = device_get_softc(sc_if->msk_miibus); 3190 3191 mii_tick(mii); 3192 msk_watchdog(sc_if); 3193 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); 3194 } 3195 3196 static void 3197 msk_intr_phy(struct msk_if_softc *sc_if) 3198 { 3199 uint16_t status; 3200 3201 if (sc_if->msk_softc->msk_marvell_phy) { 3202 msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); 3203 status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, 3204 PHY_MARV_INT_STAT); 3205 /* Handle FIFO Underrun/Overflow? */ 3206 if ((status & PHY_M_IS_FIFO_ERROR)) 3207 device_printf(sc_if->msk_if_dev, 3208 "PHY FIFO underrun/overflow.\n"); 3209 } 3210 } 3211 3212 static void 3213 msk_intr_gmac(struct msk_if_softc *sc_if) 3214 { 3215 struct msk_softc *sc; 3216 uint8_t status; 3217 3218 sc = sc_if->msk_softc; 3219 status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); 3220 3221 /* GMAC Rx FIFO overrun. */ 3222 if ((status & GM_IS_RX_FF_OR) != 0) { 3223 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 3224 GMF_CLI_RX_FO); 3225 device_printf(sc_if->msk_if_dev, "Rx FIFO overrun!\n"); 3226 } 3227 /* GMAC Tx FIFO underrun. */ 3228 if ((status & GM_IS_TX_FF_UR) != 0) { 3229 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3230 GMF_CLI_TX_FU); 3231 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n"); 3232 /* 3233 * XXX 3234 * In case of Tx underrun, we may need to flush/reset 3235 * Tx MAC but that would also require resynchronization 3236 * with status LEs. Reintializing status LEs would 3237 * affect other port in dual MAC configuration so it 3238 * should be avoided as possible as we can. 3239 * Due to lack of documentation it's all vague guess but 3240 * it needs more investigation. 3241 */ 3242 } 3243 } 3244 3245 static void 3246 msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status) 3247 { 3248 struct msk_softc *sc; 3249 3250 sc = sc_if->msk_softc; 3251 if ((status & Y2_IS_PAR_RD1) != 0) { 3252 device_printf(sc_if->msk_if_dev, 3253 "RAM buffer read parity error\n"); 3254 /* Clear IRQ. */ 3255 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), 3256 RI_CLR_RD_PERR); 3257 } 3258 if ((status & Y2_IS_PAR_WR1) != 0) { 3259 device_printf(sc_if->msk_if_dev, 3260 "RAM buffer write parity error\n"); 3261 /* Clear IRQ. */ 3262 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), 3263 RI_CLR_WR_PERR); 3264 } 3265 if ((status & Y2_IS_PAR_MAC1) != 0) { 3266 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n"); 3267 /* Clear IRQ. */ 3268 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3269 GMF_CLI_TX_PE); 3270 } 3271 if ((status & Y2_IS_PAR_RX1) != 0) { 3272 device_printf(sc_if->msk_if_dev, "Rx parity error\n"); 3273 /* Clear IRQ. */ 3274 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR); 3275 } 3276 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) { 3277 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n"); 3278 /* Clear IRQ. */ 3279 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP); 3280 } 3281 } 3282 3283 static void 3284 msk_intr_hwerr(struct msk_softc *sc) 3285 { 3286 uint32_t status; 3287 uint32_t tlphead[4]; 3288 3289 status = CSR_READ_4(sc, B0_HWE_ISRC); 3290 /* Time Stamp timer overflow. */ 3291 if ((status & Y2_IS_TIST_OV) != 0) 3292 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 3293 if ((status & Y2_IS_PCI_NEXP) != 0) { 3294 /* 3295 * PCI Express Error occured which is not described in PEX 3296 * spec. 3297 * This error is also mapped either to Master Abort( 3298 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and 3299 * can only be cleared there. 3300 */ 3301 device_printf(sc->msk_dev, 3302 "PCI Express protocol violation error\n"); 3303 } 3304 3305 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) { 3306 uint16_t v16; 3307 3308 if ((status & Y2_IS_MST_ERR) != 0) 3309 device_printf(sc->msk_dev, 3310 "unexpected IRQ Status error\n"); 3311 else 3312 device_printf(sc->msk_dev, 3313 "unexpected IRQ Master error\n"); 3314 /* Reset all bits in the PCI status register. */ 3315 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); 3316 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3317 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 | 3318 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | 3319 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2); 3320 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3321 } 3322 3323 /* Check for PCI Express Uncorrectable Error. */ 3324 if ((status & Y2_IS_PCI_EXP) != 0) { 3325 uint32_t v32; 3326 3327 /* 3328 * On PCI Express bus bridges are called root complexes (RC). 3329 * PCI Express errors are recognized by the root complex too, 3330 * which requests the system to handle the problem. After 3331 * error occurence it may be that no access to the adapter 3332 * may be performed any longer. 3333 */ 3334 3335 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); 3336 if ((v32 & PEX_UNSUP_REQ) != 0) { 3337 /* Ignore unsupported request error. */ 3338 device_printf(sc->msk_dev, 3339 "Uncorrectable PCI Express error\n"); 3340 } 3341 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) { 3342 int i; 3343 3344 /* Get TLP header form Log Registers. */ 3345 for (i = 0; i < 4; i++) 3346 tlphead[i] = CSR_PCI_READ_4(sc, 3347 PEX_HEADER_LOG + i * 4); 3348 /* Check for vendor defined broadcast message. */ 3349 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) { 3350 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; 3351 CSR_WRITE_4(sc, B0_HWE_IMSK, 3352 sc->msk_intrhwemask); 3353 CSR_READ_4(sc, B0_HWE_IMSK); 3354 } 3355 } 3356 /* Clear the interrupt. */ 3357 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3358 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); 3359 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3360 } 3361 3362 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL) 3363 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status); 3364 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL) 3365 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8); 3366 } 3367 3368 static __inline void 3369 msk_rxput(struct msk_if_softc *sc_if) 3370 { 3371 struct msk_softc *sc; 3372 3373 sc = sc_if->msk_softc; 3374 if (sc_if->msk_framesize >(MCLBYTES - ETHER_HDR_LEN)) 3375 bus_dmamap_sync( 3376 sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 3377 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 3378 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3379 else 3380 bus_dmamap_sync( 3381 sc_if->msk_cdata.msk_rx_ring_tag, 3382 sc_if->msk_cdata.msk_rx_ring_map, 3383 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3384 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, 3385 PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod); 3386 } 3387 3388 static int 3389 msk_handle_events(struct msk_softc *sc) 3390 { 3391 struct msk_if_softc *sc_if; 3392 int rxput[2]; 3393 struct msk_stat_desc *sd; 3394 uint32_t control, status; 3395 int cons, idx, len, port, rxprog; 3396 3397 idx = CSR_READ_2(sc, STAT_PUT_IDX); 3398 if (idx == sc->msk_stat_cons) 3399 return (0); 3400 3401 /* Sync status LEs. */ 3402 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map, 3403 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3404 /* XXX Sync Rx LEs here. */ 3405 3406 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0; 3407 3408 rxprog = 0; 3409 for (cons = sc->msk_stat_cons; cons != idx;) { 3410 sd = &sc->msk_stat_ring[cons]; 3411 control = le32toh(sd->msk_control); 3412 if ((control & HW_OWNER) == 0) 3413 break; 3414 /* 3415 * Marvell's FreeBSD driver updates status LE after clearing 3416 * HW_OWNER. However we don't have a way to sync single LE 3417 * with bus_dma(9) API. bus_dma(9) provides a way to sync 3418 * an entire DMA map. So don't sync LE until we have a better 3419 * way to sync LEs. 3420 */ 3421 control &= ~HW_OWNER; 3422 sd->msk_control = htole32(control); 3423 status = le32toh(sd->msk_status); 3424 len = control & STLE_LEN_MASK; 3425 port = (control >> 16) & 0x01; 3426 sc_if = sc->msk_if[port]; 3427 if (sc_if == NULL) { 3428 device_printf(sc->msk_dev, "invalid port opcode " 3429 "0x%08x\n", control & STLE_OP_MASK); 3430 continue; 3431 } 3432 3433 switch (control & STLE_OP_MASK) { 3434 case OP_RXVLAN: 3435 sc_if->msk_vtag = ntohs(len); 3436 break; 3437 case OP_RXCHKSVLAN: 3438 sc_if->msk_vtag = ntohs(len); 3439 break; 3440 case OP_RXSTAT: 3441 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) 3442 msk_jumbo_rxeof(sc_if, status, len); 3443 else 3444 msk_rxeof(sc_if, status, len); 3445 rxprog++; 3446 /* 3447 * Because there is no way to sync single Rx LE 3448 * put the DMA sync operation off until the end of 3449 * event processing. 3450 */ 3451 rxput[port]++; 3452 /* Update prefetch unit if we've passed water mark. */ 3453 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) { 3454 msk_rxput(sc_if); 3455 rxput[port] = 0; 3456 } 3457 break; 3458 case OP_TXINDEXLE: 3459 if (sc->msk_if[MSK_PORT_A] != NULL) 3460 msk_txeof(sc->msk_if[MSK_PORT_A], 3461 status & STLE_TXA1_MSKL); 3462 if (sc->msk_if[MSK_PORT_B] != NULL) 3463 msk_txeof(sc->msk_if[MSK_PORT_B], 3464 ((status & STLE_TXA2_MSKL) >> 3465 STLE_TXA2_SHIFTL) | 3466 ((len & STLE_TXA2_MSKH) << 3467 STLE_TXA2_SHIFTH)); 3468 break; 3469 default: 3470 device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n", 3471 control & STLE_OP_MASK); 3472 break; 3473 } 3474 MSK_INC(cons, MSK_STAT_RING_CNT); 3475 if (rxprog > sc->msk_process_limit) 3476 break; 3477 } 3478 3479 sc->msk_stat_cons = cons; 3480 /* XXX We should sync status LEs here. See above notes. */ 3481 3482 if (rxput[MSK_PORT_A] > 0) 3483 msk_rxput(sc->msk_if[MSK_PORT_A]); 3484 if (rxput[MSK_PORT_B] > 0) 3485 msk_rxput(sc->msk_if[MSK_PORT_B]); 3486 3487 return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX)); 3488 } 3489 3490 static void 3491 msk_intr(void *xsc) 3492 { 3493 struct msk_softc *sc; 3494 uint32_t status; 3495 3496 sc = xsc; 3497 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2); 3498 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */ 3499 if (status == 0 || status == 0xffffffff) { 3500 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3501 return; 3502 } 3503 3504 taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task); 3505 } 3506 3507 static void 3508 msk_int_task(void *arg, int pending) 3509 { 3510 struct msk_softc *sc; 3511 struct msk_if_softc *sc_if0, *sc_if1; 3512 struct ifnet *ifp0, *ifp1; 3513 uint32_t status; 3514 int domore; 3515 3516 sc = arg; 3517 MSK_LOCK(sc); 3518 3519 /* Get interrupt source. */ 3520 status = CSR_READ_4(sc, B0_ISRC); 3521 if (status == 0 || status == 0xffffffff || sc->msk_suspended != 0 || 3522 (status & sc->msk_intrmask) == 0) 3523 goto done; 3524 3525 sc_if0 = sc->msk_if[MSK_PORT_A]; 3526 sc_if1 = sc->msk_if[MSK_PORT_B]; 3527 ifp0 = ifp1 = NULL; 3528 if (sc_if0 != NULL) 3529 ifp0 = sc_if0->msk_ifp; 3530 if (sc_if1 != NULL) 3531 ifp1 = sc_if1->msk_ifp; 3532 3533 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL) 3534 msk_intr_phy(sc_if0); 3535 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL) 3536 msk_intr_phy(sc_if1); 3537 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL) 3538 msk_intr_gmac(sc_if0); 3539 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL) 3540 msk_intr_gmac(sc_if1); 3541 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) { 3542 device_printf(sc->msk_dev, "Rx descriptor error\n"); 3543 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2); 3544 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3545 CSR_READ_4(sc, B0_IMSK); 3546 } 3547 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) { 3548 device_printf(sc->msk_dev, "Tx descriptor error\n"); 3549 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2); 3550 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3551 CSR_READ_4(sc, B0_IMSK); 3552 } 3553 if ((status & Y2_IS_HW_ERR) != 0) 3554 msk_intr_hwerr(sc); 3555 3556 domore = msk_handle_events(sc); 3557 if ((status & Y2_IS_STAT_BMU) != 0) 3558 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ); 3559 3560 if (ifp0 != NULL && (ifp0->if_drv_flags & IFF_DRV_RUNNING) != 0 && 3561 !IFQ_DRV_IS_EMPTY(&ifp0->if_snd)) 3562 taskqueue_enqueue(taskqueue_fast, &sc_if0->msk_tx_task); 3563 if (ifp1 != NULL && (ifp1->if_drv_flags & IFF_DRV_RUNNING) != 0 && 3564 !IFQ_DRV_IS_EMPTY(&ifp1->if_snd)) 3565 taskqueue_enqueue(taskqueue_fast, &sc_if1->msk_tx_task); 3566 3567 if (domore > 0) { 3568 taskqueue_enqueue(sc->msk_tq, &sc->msk_int_task); 3569 MSK_UNLOCK(sc); 3570 return; 3571 } 3572 done: 3573 MSK_UNLOCK(sc); 3574 3575 /* Reenable interrupts. */ 3576 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3577 } 3578 3579 static void 3580 msk_init(void *xsc) 3581 { 3582 struct msk_if_softc *sc_if = xsc; 3583 3584 MSK_IF_LOCK(sc_if); 3585 msk_init_locked(sc_if); 3586 MSK_IF_UNLOCK(sc_if); 3587 } 3588 3589 static void 3590 msk_init_locked(struct msk_if_softc *sc_if) 3591 { 3592 struct msk_softc *sc; 3593 struct ifnet *ifp; 3594 struct mii_data *mii; 3595 uint16_t eaddr[ETHER_ADDR_LEN / 2]; 3596 uint16_t gmac; 3597 int error, i; 3598 3599 MSK_IF_LOCK_ASSERT(sc_if); 3600 3601 ifp = sc_if->msk_ifp; 3602 sc = sc_if->msk_softc; 3603 mii = device_get_softc(sc_if->msk_miibus); 3604 3605 error = 0; 3606 /* Cancel pending I/O and free all Rx/Tx buffers. */ 3607 msk_stop(sc_if); 3608 3609 sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + 3610 ETHER_VLAN_ENCAP_LEN; 3611 3612 /* 3613 * Initialize GMAC first. 3614 * Without this initialization, Rx MAC did not work as expected 3615 * and Rx MAC garbled status LEs and it resulted in out-of-order 3616 * or duplicated frame delivery which in turn showed very poor 3617 * Rx performance.(I had to write a packet analysis code that 3618 * could be embeded in driver to diagnose this issue.) 3619 * I've spent almost 2 months to fix this issue. If I have had 3620 * datasheet for Yukon II I wouldn't have encountered this. :-( 3621 */ 3622 gmac = GM_GPCR_SPEED_100 | GM_GPCR_SPEED_1000 | GM_GPCR_DUP_FULL; 3623 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 3624 3625 /* Dummy read the Interrupt Source Register. */ 3626 CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); 3627 3628 /* Set MIB Clear Counter Mode. */ 3629 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR); 3630 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR); 3631 /* Read all MIB Counters with Clear Mode set. */ 3632 for (i = 0; i < GM_MIB_CNT_SIZE; i++) 3633 GMAC_READ_2(sc, sc_if->msk_port, GM_MIB_CNT_BASE + 8 * i); 3634 /* Clear MIB Clear Counter Mode. */ 3635 gmac &= ~GM_PAR_MIB_CLR; 3636 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac); 3637 3638 /* Disable FCS. */ 3639 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS); 3640 3641 /* Setup Transmit Control Register. */ 3642 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); 3643 3644 /* Setup Transmit Flow Control Register. */ 3645 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff); 3646 3647 /* Setup Transmit Parameter Register. */ 3648 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM, 3649 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | 3650 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF)); 3651 3652 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) | 3653 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); 3654 3655 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN) 3656 gmac |= GM_SMOD_JUMBO_ENA; 3657 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac); 3658 3659 /* Set station address. */ 3660 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 3661 for (i = 0; i < ETHER_ADDR_LEN /2; i++) 3662 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L + i * 4, 3663 eaddr[i]); 3664 for (i = 0; i < ETHER_ADDR_LEN /2; i++) 3665 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L + i * 4, 3666 eaddr[i]); 3667 3668 /* Disable interrupts for counter overflows. */ 3669 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0); 3670 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0); 3671 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0); 3672 3673 /* Configure Rx MAC FIFO. */ 3674 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); 3675 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR); 3676 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 3677 GMF_OPER_ON | GMF_RX_F_FL_ON); 3678 3679 /* Set promiscuous mode. */ 3680 msk_setpromisc(sc_if); 3681 3682 /* Set multicast filter. */ 3683 msk_setmulti(sc_if); 3684 3685 /* Flush Rx MAC FIFO on any flow control or error. */ 3686 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 3687 GMR_FS_ANY_ERR); 3688 3689 /* Set Rx FIFO flush threshold to 64 bytes. */ 3690 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), 3691 RX_GMF_FL_THR_DEF); 3692 3693 /* Configure Tx MAC FIFO. */ 3694 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); 3695 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR); 3696 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON); 3697 3698 /* Configure hardware VLAN tag insertion/stripping. */ 3699 msk_setvlan(sc_if, ifp); 3700 3701 /* XXX It seems STFW is requried for all cases. */ 3702 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), TX_STFW_ENA); 3703 3704 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 3705 /* Set Rx Pause threshould. */ 3706 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR), 3707 MSK_ECU_LLPP); 3708 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR), 3709 MSK_ECU_ULPP); 3710 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN) { 3711 /* 3712 * Can't sure the following code is needed as Yukon 3713 * Yukon EC Ultra may not support jumbo frames. 3714 * 3715 * Set Tx GMAC FIFO Almost Empty Threshold. 3716 */ 3717 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR), 3718 MSK_ECU_AE_THR); 3719 /* Disable Store & Forward mode for Tx. */ 3720 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3721 TX_STFW_DIS); 3722 } 3723 } 3724 3725 /* 3726 * Disable Force Sync bit and Alloc bit in Tx RAM interface 3727 * arbiter as we don't use Sync Tx queue. 3728 */ 3729 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), 3730 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); 3731 /* Enable the RAM Interface Arbiter. */ 3732 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB); 3733 3734 /* Setup RAM buffer. */ 3735 msk_set_rambuffer(sc_if); 3736 3737 /* Disable Tx sync Queue. */ 3738 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET); 3739 3740 /* Setup Tx Queue Bus Memory Interface. */ 3741 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET); 3742 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT); 3743 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON); 3744 /* Increase IPID when hardware generates IP packets in TSO. */ 3745 if ((ifp->if_hwassist & CSUM_TSO) != 0) 3746 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 3747 BMU_TX_IPIDINCR_ON); 3748 else 3749 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 3750 BMU_TX_IPIDINCR_OFF); 3751 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM); 3752 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U && 3753 sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) { 3754 /* Fix for Yukon-EC Ultra: set BMU FIFO level */ 3755 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL), MSK_ECU_TXFF_LEV); 3756 } 3757 3758 /* Setup Rx Queue Bus Memory Interface. */ 3759 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET); 3760 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT); 3761 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON); 3762 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM); 3763 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U && 3764 sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) { 3765 /* MAC Rx RAM Read is controlled by hardware. */ 3766 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS); 3767 } 3768 3769 msk_set_prefetch(sc, sc_if->msk_txq, 3770 sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1); 3771 msk_init_tx_ring(sc_if); 3772 3773 /* Disable Rx checksum offload and RSS hash. */ 3774 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), 3775 BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH); 3776 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) { 3777 msk_set_prefetch(sc, sc_if->msk_rxq, 3778 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr, 3779 MSK_JUMBO_RX_RING_CNT - 1); 3780 error = msk_init_jumbo_rx_ring(sc_if); 3781 } else { 3782 msk_set_prefetch(sc, sc_if->msk_rxq, 3783 sc_if->msk_rdata.msk_rx_ring_paddr, 3784 MSK_RX_RING_CNT - 1); 3785 error = msk_init_rx_ring(sc_if); 3786 } 3787 if (error != 0) { 3788 device_printf(sc_if->msk_if_dev, 3789 "initialization failed: no memory for Rx buffers\n"); 3790 msk_stop(sc_if); 3791 return; 3792 } 3793 3794 /* Configure interrupt handling. */ 3795 if (sc_if->msk_port == MSK_PORT_A) { 3796 sc->msk_intrmask |= Y2_IS_PORT_A; 3797 sc->msk_intrhwemask |= Y2_HWE_L1_MASK; 3798 } else { 3799 sc->msk_intrmask |= Y2_IS_PORT_B; 3800 sc->msk_intrhwemask |= Y2_HWE_L2_MASK; 3801 } 3802 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); 3803 CSR_READ_4(sc, B0_HWE_IMSK); 3804 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3805 CSR_READ_4(sc, B0_IMSK); 3806 3807 sc_if->msk_link = 0; 3808 mii_mediachg(mii); 3809 3810 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3811 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3812 3813 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); 3814 } 3815 3816 static void 3817 msk_set_rambuffer(struct msk_if_softc *sc_if) 3818 { 3819 struct msk_softc *sc; 3820 int ltpp, utpp; 3821 3822 sc = sc_if->msk_softc; 3823 3824 /* Setup Rx Queue. */ 3825 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR); 3826 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START), 3827 sc->msk_rxqstart[sc_if->msk_port] / 8); 3828 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END), 3829 sc->msk_rxqend[sc_if->msk_port] / 8); 3830 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP), 3831 sc->msk_rxqstart[sc_if->msk_port] / 8); 3832 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP), 3833 sc->msk_rxqstart[sc_if->msk_port] / 8); 3834 3835 utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - 3836 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8; 3837 ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - 3838 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8; 3839 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE) 3840 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8; 3841 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp); 3842 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp); 3843 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */ 3844 3845 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD); 3846 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL)); 3847 3848 /* Setup Tx Queue. */ 3849 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR); 3850 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START), 3851 sc->msk_txqstart[sc_if->msk_port] / 8); 3852 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END), 3853 sc->msk_txqend[sc_if->msk_port] / 8); 3854 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP), 3855 sc->msk_txqstart[sc_if->msk_port] / 8); 3856 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP), 3857 sc->msk_txqstart[sc_if->msk_port] / 8); 3858 /* Enable Store & Forward for Tx side. */ 3859 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD); 3860 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD); 3861 CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL)); 3862 } 3863 3864 static void 3865 msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr, 3866 uint32_t count) 3867 { 3868 3869 /* Reset the prefetch unit. */ 3870 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3871 PREF_UNIT_RST_SET); 3872 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3873 PREF_UNIT_RST_CLR); 3874 /* Set LE base address. */ 3875 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG), 3876 MSK_ADDR_LO(addr)); 3877 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG), 3878 MSK_ADDR_HI(addr)); 3879 /* Set the list last index. */ 3880 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG), 3881 count); 3882 /* Turn on prefetch unit. */ 3883 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3884 PREF_UNIT_OP_ON); 3885 /* Dummy read to ensure write. */ 3886 CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG)); 3887 } 3888 3889 static void 3890 msk_stop(struct msk_if_softc *sc_if) 3891 { 3892 struct msk_softc *sc; 3893 struct msk_txdesc *txd; 3894 struct msk_rxdesc *rxd; 3895 struct msk_rxdesc *jrxd; 3896 struct ifnet *ifp; 3897 uint32_t val; 3898 int i; 3899 3900 MSK_IF_LOCK_ASSERT(sc_if); 3901 sc = sc_if->msk_softc; 3902 ifp = sc_if->msk_ifp; 3903 3904 callout_stop(&sc_if->msk_tick_ch); 3905 sc_if->msk_watchdog_timer = 0; 3906 3907 /* Disable interrupts. */ 3908 if (sc_if->msk_port == MSK_PORT_A) { 3909 sc->msk_intrmask &= ~Y2_IS_PORT_A; 3910 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK; 3911 } else { 3912 sc->msk_intrmask &= ~Y2_IS_PORT_B; 3913 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK; 3914 } 3915 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); 3916 CSR_READ_4(sc, B0_HWE_IMSK); 3917 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3918 CSR_READ_4(sc, B0_IMSK); 3919 3920 /* Disable Tx/Rx MAC. */ 3921 val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 3922 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 3923 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val); 3924 /* Read again to ensure writing. */ 3925 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 3926 3927 /* Stop Tx BMU. */ 3928 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP); 3929 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); 3930 for (i = 0; i < MSK_TIMEOUT; i++) { 3931 if ((val & (BMU_STOP | BMU_IDLE)) == 0) { 3932 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 3933 BMU_STOP); 3934 CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); 3935 } else 3936 break; 3937 DELAY(1); 3938 } 3939 if (i == MSK_TIMEOUT) 3940 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n"); 3941 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), 3942 RB_RST_SET | RB_DIS_OP_MD); 3943 3944 /* Disable all GMAC interrupt. */ 3945 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0); 3946 /* Disable PHY interrupt. */ 3947 if (sc->msk_marvell_phy) 3948 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); 3949 3950 /* Disable the RAM Interface Arbiter. */ 3951 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB); 3952 3953 /* Reset the PCI FIFO of the async Tx queue */ 3954 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 3955 BMU_RST_SET | BMU_FIFO_RST); 3956 3957 /* Reset the Tx prefetch units. */ 3958 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG), 3959 PREF_UNIT_RST_SET); 3960 3961 /* Reset the RAM Buffer async Tx queue. */ 3962 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET); 3963 3964 /* Reset Tx MAC FIFO. */ 3965 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); 3966 /* Set Pause Off. */ 3967 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF); 3968 3969 /* 3970 * The Rx Stop command will not work for Yukon-2 if the BMU does not 3971 * reach the end of packet and since we can't make sure that we have 3972 * incoming data, we must reset the BMU while it is not during a DMA 3973 * transfer. Since it is possible that the Rx path is still active, 3974 * the Rx RAM buffer will be stopped first, so any possible incoming 3975 * data will not trigger a DMA. After the RAM buffer is stopped, the 3976 * BMU is polled until any DMA in progress is ended and only then it 3977 * will be reset. 3978 */ 3979 3980 /* Disable the RAM Buffer receive queue. */ 3981 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD); 3982 for (i = 0; i < MSK_TIMEOUT; i++) { 3983 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) == 3984 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL))) 3985 break; 3986 DELAY(1); 3987 } 3988 if (i == MSK_TIMEOUT) 3989 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n"); 3990 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), 3991 BMU_RST_SET | BMU_FIFO_RST); 3992 /* Reset the Rx prefetch unit. */ 3993 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG), 3994 PREF_UNIT_RST_SET); 3995 /* Reset the RAM Buffer receive queue. */ 3996 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET); 3997 /* Reset Rx MAC FIFO. */ 3998 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); 3999 4000 /* Free Rx and Tx mbufs still in the queues. */ 4001 for (i = 0; i < MSK_RX_RING_CNT; i++) { 4002 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 4003 if (rxd->rx_m != NULL) { 4004 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, 4005 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 4006 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, 4007 rxd->rx_dmamap); 4008 m_freem(rxd->rx_m); 4009 rxd->rx_m = NULL; 4010 } 4011 } 4012 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 4013 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 4014 if (jrxd->rx_m != NULL) { 4015 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, 4016 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 4017 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, 4018 jrxd->rx_dmamap); 4019 m_freem(jrxd->rx_m); 4020 jrxd->rx_m = NULL; 4021 } 4022 } 4023 for (i = 0; i < MSK_TX_RING_CNT; i++) { 4024 txd = &sc_if->msk_cdata.msk_txdesc[i]; 4025 if (txd->tx_m != NULL) { 4026 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, 4027 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 4028 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, 4029 txd->tx_dmamap); 4030 m_freem(txd->tx_m); 4031 txd->tx_m = NULL; 4032 } 4033 } 4034 4035 /* 4036 * Mark the interface down. 4037 */ 4038 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 4039 sc_if->msk_link = 0; 4040 } 4041 4042 static int 4043 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 4044 { 4045 int error, value; 4046 4047 if (!arg1) 4048 return (EINVAL); 4049 value = *(int *)arg1; 4050 error = sysctl_handle_int(oidp, &value, 0, req); 4051 if (error || !req->newptr) 4052 return (error); 4053 if (value < low || value > high) 4054 return (EINVAL); 4055 *(int *)arg1 = value; 4056 4057 return (0); 4058 } 4059 4060 static int 4061 sysctl_hw_msk_proc_limit(SYSCTL_HANDLER_ARGS) 4062 { 4063 4064 return (sysctl_int_range(oidp, arg1, arg2, req, MSK_PROC_MIN, 4065 MSK_PROC_MAX)); 4066 } 4067