1 /* $OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998, 1999, 2000 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 /*- 35 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 36 * 37 * Permission to use, copy, modify, and distribute this software for any 38 * purpose with or without fee is hereby granted, provided that the above 39 * copyright notice and this permission notice appear in all copies. 40 * 41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 48 */ 49 50 #include <sys/cdefs.h> 51 __FBSDID("$FreeBSD$"); 52 53 /* 54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports 55 * the SK-984x series adapters, both single port and dual port. 56 * References: 57 * The XaQti XMAC II datasheet, 58 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 59 * The SysKonnect GEnesis manual, http://www.syskonnect.com 60 * 61 * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the 62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a 63 * convenience to others until Vitesse corrects this problem: 64 * 65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 66 * 67 * Written by Bill Paul <wpaul@ee.columbia.edu> 68 * Department of Electrical Engineering 69 * Columbia University, New York City 70 */ 71 /* 72 * The SysKonnect gigabit ethernet adapters consist of two main 73 * components: the SysKonnect GEnesis controller chip and the XaQti Corp. 74 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC 75 * components and a PHY while the GEnesis controller provides a PCI 76 * interface with DMA support. Each card may have between 512K and 77 * 2MB of SRAM on board depending on the configuration. 78 * 79 * The SysKonnect GEnesis controller can have either one or two XMAC 80 * chips connected to it, allowing single or dual port NIC configurations. 81 * SysKonnect has the distinction of being the only vendor on the market 82 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs, 83 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the 84 * XMAC registers. This driver takes advantage of these features to allow 85 * both XMACs to operate as independent interfaces. 86 */ 87 88 #include <sys/param.h> 89 #include <sys/systm.h> 90 #include <sys/bus.h> 91 #include <sys/endian.h> 92 #include <sys/mbuf.h> 93 #include <sys/malloc.h> 94 #include <sys/kernel.h> 95 #include <sys/module.h> 96 #include <sys/socket.h> 97 #include <sys/sockio.h> 98 #include <sys/queue.h> 99 #include <sys/sysctl.h> 100 101 #include <net/bpf.h> 102 #include <net/ethernet.h> 103 #include <net/if.h> 104 #include <net/if_arp.h> 105 #include <net/if_dl.h> 106 #include <net/if_media.h> 107 #include <net/if_types.h> 108 #include <net/if_vlan_var.h> 109 110 #include <netinet/in.h> 111 #include <netinet/in_systm.h> 112 #include <netinet/ip.h> 113 114 #include <machine/bus.h> 115 #include <machine/in_cksum.h> 116 #include <machine/resource.h> 117 #include <sys/rman.h> 118 119 #include <dev/mii/mii.h> 120 #include <dev/mii/miivar.h> 121 #include <dev/mii/brgphyreg.h> 122 123 #include <dev/pci/pcireg.h> 124 #include <dev/pci/pcivar.h> 125 126 #if 0 127 #define SK_USEIOSPACE 128 #endif 129 130 #include <dev/sk/if_skreg.h> 131 #include <dev/sk/xmaciireg.h> 132 #include <dev/sk/yukonreg.h> 133 134 MODULE_DEPEND(sk, pci, 1, 1, 1); 135 MODULE_DEPEND(sk, ether, 1, 1, 1); 136 MODULE_DEPEND(sk, miibus, 1, 1, 1); 137 138 /* "device miibus" required. See GENERIC if you get errors here. */ 139 #include "miibus_if.h" 140 141 #ifndef lint 142 static const char rcsid[] = 143 "$FreeBSD$"; 144 #endif 145 146 static struct sk_type sk_devs[] = { 147 { 148 VENDORID_SK, 149 DEVICEID_SK_V1, 150 "SysKonnect Gigabit Ethernet (V1.0)" 151 }, 152 { 153 VENDORID_SK, 154 DEVICEID_SK_V2, 155 "SysKonnect Gigabit Ethernet (V2.0)" 156 }, 157 { 158 VENDORID_MARVELL, 159 DEVICEID_SK_V2, 160 "Marvell Gigabit Ethernet" 161 }, 162 #ifdef not_yet 163 { 164 VENDORID_MARVELL, 165 DEVICEID_MRVL_4360, 166 "Marvell 88E8052 Gigabit Ethernet Controller" 167 }, 168 { 169 VENDORID_MARVELL, 170 DEVICEID_MRVL_4361, 171 "Marvell 88E8050 Gigabit Ethernet Controller" 172 }, 173 { 174 VENDORID_MARVELL, 175 DEVICEID_MRVL_4362, 176 "Marvell 88E8053 Gigabit Ethernet Controller" 177 }, 178 #endif 179 { 180 VENDORID_MARVELL, 181 DEVICEID_BELKIN_5005, 182 "Belkin F5D5005 Gigabit Ethernet" 183 }, 184 { 185 VENDORID_3COM, 186 DEVICEID_3COM_3C940, 187 "3Com 3C940 Gigabit Ethernet" 188 }, 189 { 190 VENDORID_LINKSYS, 191 DEVICEID_LINKSYS_EG1032, 192 "Linksys EG1032 Gigabit Ethernet" 193 }, 194 { 195 VENDORID_DLINK, 196 DEVICEID_DLINK_DGE530T_A1, 197 "D-Link DGE-530T Gigabit Ethernet" 198 }, 199 { 200 VENDORID_DLINK, 201 DEVICEID_DLINK_DGE530T_B1, 202 "D-Link DGE-530T Gigabit Ethernet" 203 }, 204 { 0, 0, NULL } 205 }; 206 207 static int skc_probe(device_t); 208 static int skc_attach(device_t); 209 static int skc_detach(device_t); 210 static void skc_shutdown(device_t); 211 static int skc_suspend(device_t); 212 static int skc_resume(device_t); 213 static int sk_detach(device_t); 214 static int sk_probe(device_t); 215 static int sk_attach(device_t); 216 static void sk_tick(void *); 217 static void sk_yukon_tick(void *); 218 static void sk_intr(void *); 219 static void sk_intr_xmac(struct sk_if_softc *); 220 static void sk_intr_bcom(struct sk_if_softc *); 221 static void sk_intr_yukon(struct sk_if_softc *); 222 static __inline void sk_rxcksum(struct ifnet *, struct mbuf *, u_int32_t); 223 static __inline int sk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t); 224 static void sk_rxeof(struct sk_if_softc *); 225 static void sk_jumbo_rxeof(struct sk_if_softc *); 226 static void sk_txeof(struct sk_if_softc *); 227 static void sk_txcksum(struct ifnet *, struct mbuf *, struct sk_tx_desc *); 228 static int sk_encap(struct sk_if_softc *, struct mbuf **); 229 static void sk_start(struct ifnet *); 230 static void sk_start_locked(struct ifnet *); 231 static int sk_ioctl(struct ifnet *, u_long, caddr_t); 232 static void sk_init(void *); 233 static void sk_init_locked(struct sk_if_softc *); 234 static void sk_init_xmac(struct sk_if_softc *); 235 static void sk_init_yukon(struct sk_if_softc *); 236 static void sk_stop(struct sk_if_softc *); 237 static void sk_watchdog(struct ifnet *); 238 static int sk_ifmedia_upd(struct ifnet *); 239 static void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *); 240 static void sk_reset(struct sk_softc *); 241 static __inline void sk_discard_rxbuf(struct sk_if_softc *, int); 242 static __inline void sk_discard_jumbo_rxbuf(struct sk_if_softc *, int); 243 static int sk_newbuf(struct sk_if_softc *, int); 244 static int sk_jumbo_newbuf(struct sk_if_softc *, int); 245 static void sk_dmamap_cb(void *, bus_dma_segment_t *, int, int); 246 static int sk_dma_alloc(struct sk_if_softc *); 247 static void sk_dma_free(struct sk_if_softc *); 248 static void *sk_jalloc(struct sk_if_softc *); 249 static void sk_jfree(void *, void *); 250 static int sk_init_rx_ring(struct sk_if_softc *); 251 static int sk_init_jumbo_rx_ring(struct sk_if_softc *); 252 static void sk_init_tx_ring(struct sk_if_softc *); 253 static u_int32_t sk_win_read_4(struct sk_softc *, int); 254 static u_int16_t sk_win_read_2(struct sk_softc *, int); 255 static u_int8_t sk_win_read_1(struct sk_softc *, int); 256 static void sk_win_write_4(struct sk_softc *, int, u_int32_t); 257 static void sk_win_write_2(struct sk_softc *, int, u_int32_t); 258 static void sk_win_write_1(struct sk_softc *, int, u_int32_t); 259 260 static int sk_miibus_readreg(device_t, int, int); 261 static int sk_miibus_writereg(device_t, int, int, int); 262 static void sk_miibus_statchg(device_t); 263 264 static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int); 265 static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int, 266 int); 267 static void sk_xmac_miibus_statchg(struct sk_if_softc *); 268 269 static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int); 270 static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int, 271 int); 272 static void sk_marv_miibus_statchg(struct sk_if_softc *); 273 274 static uint32_t sk_xmchash(const uint8_t *); 275 static uint32_t sk_gmchash(const uint8_t *); 276 static void sk_setfilt(struct sk_if_softc *, u_int16_t *, int); 277 static void sk_setmulti(struct sk_if_softc *); 278 static void sk_setpromisc(struct sk_if_softc *); 279 280 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high); 281 static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS); 282 283 /* 284 * It seems that SK-NET GENESIS supports very simple checksum offload 285 * capability for Tx and I believe it can generate 0 checksum value for 286 * UDP packets in Tx as the hardware can't differenciate UDP packets from 287 * TCP packets. 0 chcecksum value for UDP packet is an invalid one as it 288 * means sender didn't perforam checksum computation. For the safety I 289 * disabled UDP checksum offload capability at the moment. Alternatively 290 * we can intrduce a LINK0/LINK1 flag as hme(4) did in its Tx checksum 291 * offload routine. 292 */ 293 #define SK_CSUM_FEATURES (CSUM_TCP) 294 295 /* 296 * Note that we have newbus methods for both the GEnesis controller 297 * itself and the XMAC(s). The XMACs are children of the GEnesis, and 298 * the miibus code is a child of the XMACs. We need to do it this way 299 * so that the miibus drivers can access the PHY registers on the 300 * right PHY. It's not quite what I had in mind, but it's the only 301 * design that achieves the desired effect. 302 */ 303 static device_method_t skc_methods[] = { 304 /* Device interface */ 305 DEVMETHOD(device_probe, skc_probe), 306 DEVMETHOD(device_attach, skc_attach), 307 DEVMETHOD(device_detach, skc_detach), 308 DEVMETHOD(device_suspend, skc_suspend), 309 DEVMETHOD(device_resume, skc_resume), 310 DEVMETHOD(device_shutdown, skc_shutdown), 311 312 /* bus interface */ 313 DEVMETHOD(bus_print_child, bus_generic_print_child), 314 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 315 316 { 0, 0 } 317 }; 318 319 static driver_t skc_driver = { 320 "skc", 321 skc_methods, 322 sizeof(struct sk_softc) 323 }; 324 325 static devclass_t skc_devclass; 326 327 static device_method_t sk_methods[] = { 328 /* Device interface */ 329 DEVMETHOD(device_probe, sk_probe), 330 DEVMETHOD(device_attach, sk_attach), 331 DEVMETHOD(device_detach, sk_detach), 332 DEVMETHOD(device_shutdown, bus_generic_shutdown), 333 334 /* bus interface */ 335 DEVMETHOD(bus_print_child, bus_generic_print_child), 336 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 337 338 /* MII interface */ 339 DEVMETHOD(miibus_readreg, sk_miibus_readreg), 340 DEVMETHOD(miibus_writereg, sk_miibus_writereg), 341 DEVMETHOD(miibus_statchg, sk_miibus_statchg), 342 343 { 0, 0 } 344 }; 345 346 static driver_t sk_driver = { 347 "sk", 348 sk_methods, 349 sizeof(struct sk_if_softc) 350 }; 351 352 static devclass_t sk_devclass; 353 354 DRIVER_MODULE(skc, pci, skc_driver, skc_devclass, 0, 0); 355 DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0); 356 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0); 357 358 static struct resource_spec sk_res_spec_io[] = { 359 { SYS_RES_IOPORT, PCIR_BAR(1), RF_ACTIVE }, 360 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 361 { -1, 0, 0 } 362 }; 363 364 static struct resource_spec sk_res_spec_mem[] = { 365 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, 366 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 367 { -1, 0, 0 } 368 }; 369 370 #define SK_SETBIT(sc, reg, x) \ 371 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x) 372 373 #define SK_CLRBIT(sc, reg, x) \ 374 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x) 375 376 #define SK_WIN_SETBIT_4(sc, reg, x) \ 377 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x) 378 379 #define SK_WIN_CLRBIT_4(sc, reg, x) \ 380 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x) 381 382 #define SK_WIN_SETBIT_2(sc, reg, x) \ 383 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x) 384 385 #define SK_WIN_CLRBIT_2(sc, reg, x) \ 386 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x) 387 388 static u_int32_t 389 sk_win_read_4(sc, reg) 390 struct sk_softc *sc; 391 int reg; 392 { 393 #ifdef SK_USEIOSPACE 394 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 395 return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg))); 396 #else 397 return(CSR_READ_4(sc, reg)); 398 #endif 399 } 400 401 static u_int16_t 402 sk_win_read_2(sc, reg) 403 struct sk_softc *sc; 404 int reg; 405 { 406 #ifdef SK_USEIOSPACE 407 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 408 return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg))); 409 #else 410 return(CSR_READ_2(sc, reg)); 411 #endif 412 } 413 414 static u_int8_t 415 sk_win_read_1(sc, reg) 416 struct sk_softc *sc; 417 int reg; 418 { 419 #ifdef SK_USEIOSPACE 420 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 421 return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg))); 422 #else 423 return(CSR_READ_1(sc, reg)); 424 #endif 425 } 426 427 static void 428 sk_win_write_4(sc, reg, val) 429 struct sk_softc *sc; 430 int reg; 431 u_int32_t val; 432 { 433 #ifdef SK_USEIOSPACE 434 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 435 CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val); 436 #else 437 CSR_WRITE_4(sc, reg, val); 438 #endif 439 return; 440 } 441 442 static void 443 sk_win_write_2(sc, reg, val) 444 struct sk_softc *sc; 445 int reg; 446 u_int32_t val; 447 { 448 #ifdef SK_USEIOSPACE 449 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 450 CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val); 451 #else 452 CSR_WRITE_2(sc, reg, val); 453 #endif 454 return; 455 } 456 457 static void 458 sk_win_write_1(sc, reg, val) 459 struct sk_softc *sc; 460 int reg; 461 u_int32_t val; 462 { 463 #ifdef SK_USEIOSPACE 464 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 465 CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val); 466 #else 467 CSR_WRITE_1(sc, reg, val); 468 #endif 469 return; 470 } 471 472 static int 473 sk_miibus_readreg(dev, phy, reg) 474 device_t dev; 475 int phy, reg; 476 { 477 struct sk_if_softc *sc_if; 478 int v; 479 480 sc_if = device_get_softc(dev); 481 482 SK_IF_MII_LOCK(sc_if); 483 switch(sc_if->sk_softc->sk_type) { 484 case SK_GENESIS: 485 v = sk_xmac_miibus_readreg(sc_if, phy, reg); 486 break; 487 case SK_YUKON: 488 case SK_YUKON_LITE: 489 case SK_YUKON_LP: 490 case SK_YUKON_EC: 491 v = sk_marv_miibus_readreg(sc_if, phy, reg); 492 break; 493 default: 494 v = 0; 495 break; 496 } 497 SK_IF_MII_UNLOCK(sc_if); 498 499 return (v); 500 } 501 502 static int 503 sk_miibus_writereg(dev, phy, reg, val) 504 device_t dev; 505 int phy, reg, val; 506 { 507 struct sk_if_softc *sc_if; 508 int v; 509 510 sc_if = device_get_softc(dev); 511 512 SK_IF_MII_LOCK(sc_if); 513 switch(sc_if->sk_softc->sk_type) { 514 case SK_GENESIS: 515 v = sk_xmac_miibus_writereg(sc_if, phy, reg, val); 516 break; 517 case SK_YUKON: 518 case SK_YUKON_LITE: 519 case SK_YUKON_LP: 520 case SK_YUKON_EC: 521 v = sk_marv_miibus_writereg(sc_if, phy, reg, val); 522 break; 523 default: 524 v = 0; 525 break; 526 } 527 SK_IF_MII_UNLOCK(sc_if); 528 529 return (v); 530 } 531 532 static void 533 sk_miibus_statchg(dev) 534 device_t dev; 535 { 536 struct sk_if_softc *sc_if; 537 538 sc_if = device_get_softc(dev); 539 540 SK_IF_MII_LOCK(sc_if); 541 switch(sc_if->sk_softc->sk_type) { 542 case SK_GENESIS: 543 sk_xmac_miibus_statchg(sc_if); 544 break; 545 case SK_YUKON: 546 case SK_YUKON_LITE: 547 case SK_YUKON_LP: 548 case SK_YUKON_EC: 549 sk_marv_miibus_statchg(sc_if); 550 break; 551 } 552 SK_IF_MII_UNLOCK(sc_if); 553 554 return; 555 } 556 557 static int 558 sk_xmac_miibus_readreg(sc_if, phy, reg) 559 struct sk_if_softc *sc_if; 560 int phy, reg; 561 { 562 int i; 563 564 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0) 565 return(0); 566 567 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); 568 SK_XM_READ_2(sc_if, XM_PHY_DATA); 569 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { 570 for (i = 0; i < SK_TIMEOUT; i++) { 571 DELAY(1); 572 if (SK_XM_READ_2(sc_if, XM_MMUCMD) & 573 XM_MMUCMD_PHYDATARDY) 574 break; 575 } 576 577 if (i == SK_TIMEOUT) { 578 if_printf(sc_if->sk_ifp, "phy failed to come ready\n"); 579 return(0); 580 } 581 } 582 DELAY(1); 583 i = SK_XM_READ_2(sc_if, XM_PHY_DATA); 584 585 return(i); 586 } 587 588 static int 589 sk_xmac_miibus_writereg(sc_if, phy, reg, val) 590 struct sk_if_softc *sc_if; 591 int phy, reg, val; 592 { 593 int i; 594 595 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); 596 for (i = 0; i < SK_TIMEOUT; i++) { 597 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) 598 break; 599 } 600 601 if (i == SK_TIMEOUT) { 602 if_printf(sc_if->sk_ifp, "phy failed to come ready\n"); 603 return (ETIMEDOUT); 604 } 605 606 SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val); 607 for (i = 0; i < SK_TIMEOUT; i++) { 608 DELAY(1); 609 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) 610 break; 611 } 612 if (i == SK_TIMEOUT) 613 if_printf(sc_if->sk_ifp, "phy write timed out\n"); 614 615 return(0); 616 } 617 618 static void 619 sk_xmac_miibus_statchg(sc_if) 620 struct sk_if_softc *sc_if; 621 { 622 struct mii_data *mii; 623 624 mii = device_get_softc(sc_if->sk_miibus); 625 626 /* 627 * If this is a GMII PHY, manually set the XMAC's 628 * duplex mode accordingly. 629 */ 630 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { 631 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 632 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); 633 } else { 634 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); 635 } 636 } 637 } 638 639 static int 640 sk_marv_miibus_readreg(sc_if, phy, reg) 641 struct sk_if_softc *sc_if; 642 int phy, reg; 643 { 644 u_int16_t val; 645 int i; 646 647 if (phy != 0 || 648 (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER && 649 sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) { 650 return(0); 651 } 652 653 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 654 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ); 655 656 for (i = 0; i < SK_TIMEOUT; i++) { 657 DELAY(1); 658 val = SK_YU_READ_2(sc_if, YUKON_SMICR); 659 if (val & YU_SMICR_READ_VALID) 660 break; 661 } 662 663 if (i == SK_TIMEOUT) { 664 if_printf(sc_if->sk_ifp, "phy failed to come ready\n"); 665 return(0); 666 } 667 668 val = SK_YU_READ_2(sc_if, YUKON_SMIDR); 669 670 return(val); 671 } 672 673 static int 674 sk_marv_miibus_writereg(sc_if, phy, reg, val) 675 struct sk_if_softc *sc_if; 676 int phy, reg, val; 677 { 678 int i; 679 680 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val); 681 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 682 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE); 683 684 for (i = 0; i < SK_TIMEOUT; i++) { 685 DELAY(1); 686 if ((SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY) == 0) 687 break; 688 } 689 if (i == SK_TIMEOUT) 690 if_printf(sc_if->sk_ifp, "phy write timeout\n"); 691 692 return(0); 693 } 694 695 static void 696 sk_marv_miibus_statchg(sc_if) 697 struct sk_if_softc *sc_if; 698 { 699 return; 700 } 701 702 #define HASH_BITS 6 703 704 static u_int32_t 705 sk_xmchash(addr) 706 const uint8_t *addr; 707 { 708 uint32_t crc; 709 710 /* Compute CRC for the address value. */ 711 crc = ether_crc32_le(addr, ETHER_ADDR_LEN); 712 713 return (~crc & ((1 << HASH_BITS) - 1)); 714 } 715 716 /* gmchash is just a big endian crc */ 717 static u_int32_t 718 sk_gmchash(addr) 719 const uint8_t *addr; 720 { 721 uint32_t crc; 722 723 /* Compute CRC for the address value. */ 724 crc = ether_crc32_be(addr, ETHER_ADDR_LEN); 725 726 return (crc & ((1 << HASH_BITS) - 1)); 727 } 728 729 static void 730 sk_setfilt(sc_if, addr, slot) 731 struct sk_if_softc *sc_if; 732 u_int16_t *addr; 733 int slot; 734 { 735 int base; 736 737 base = XM_RXFILT_ENTRY(slot); 738 739 SK_XM_WRITE_2(sc_if, base, addr[0]); 740 SK_XM_WRITE_2(sc_if, base + 2, addr[1]); 741 SK_XM_WRITE_2(sc_if, base + 4, addr[2]); 742 743 return; 744 } 745 746 static void 747 sk_setmulti(sc_if) 748 struct sk_if_softc *sc_if; 749 { 750 struct sk_softc *sc = sc_if->sk_softc; 751 struct ifnet *ifp = sc_if->sk_ifp; 752 u_int32_t hashes[2] = { 0, 0 }; 753 int h = 0, i; 754 struct ifmultiaddr *ifma; 755 u_int16_t dummy[] = { 0, 0, 0 }; 756 u_int16_t maddr[(ETHER_ADDR_LEN+1)/2]; 757 758 SK_IF_LOCK_ASSERT(sc_if); 759 760 /* First, zot all the existing filters. */ 761 switch(sc->sk_type) { 762 case SK_GENESIS: 763 for (i = 1; i < XM_RXFILT_MAX; i++) 764 sk_setfilt(sc_if, dummy, i); 765 766 SK_XM_WRITE_4(sc_if, XM_MAR0, 0); 767 SK_XM_WRITE_4(sc_if, XM_MAR2, 0); 768 break; 769 case SK_YUKON: 770 case SK_YUKON_LITE: 771 case SK_YUKON_LP: 772 case SK_YUKON_EC: 773 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0); 774 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0); 775 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0); 776 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0); 777 break; 778 } 779 780 /* Now program new ones. */ 781 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 782 hashes[0] = 0xFFFFFFFF; 783 hashes[1] = 0xFFFFFFFF; 784 } else { 785 i = 1; 786 IF_ADDR_LOCK(ifp); 787 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) { 788 if (ifma->ifma_addr->sa_family != AF_LINK) 789 continue; 790 /* 791 * Program the first XM_RXFILT_MAX multicast groups 792 * into the perfect filter. For all others, 793 * use the hash table. 794 */ 795 if (sc->sk_type == SK_GENESIS && i < XM_RXFILT_MAX) { 796 bcopy(LLADDR( 797 (struct sockaddr_dl *)ifma->ifma_addr), 798 maddr, ETHER_ADDR_LEN); 799 sk_setfilt(sc_if, maddr, i); 800 i++; 801 continue; 802 } 803 804 switch(sc->sk_type) { 805 case SK_GENESIS: 806 bcopy(LLADDR( 807 (struct sockaddr_dl *)ifma->ifma_addr), 808 maddr, ETHER_ADDR_LEN); 809 h = sk_xmchash((const uint8_t *)maddr); 810 break; 811 case SK_YUKON: 812 case SK_YUKON_LITE: 813 case SK_YUKON_LP: 814 case SK_YUKON_EC: 815 bcopy(LLADDR( 816 (struct sockaddr_dl *)ifma->ifma_addr), 817 maddr, ETHER_ADDR_LEN); 818 h = sk_gmchash((const uint8_t *)maddr); 819 break; 820 } 821 if (h < 32) 822 hashes[0] |= (1 << h); 823 else 824 hashes[1] |= (1 << (h - 32)); 825 } 826 IF_ADDR_UNLOCK(ifp); 827 } 828 829 switch(sc->sk_type) { 830 case SK_GENESIS: 831 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH| 832 XM_MODE_RX_USE_PERFECT); 833 SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]); 834 SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]); 835 break; 836 case SK_YUKON: 837 case SK_YUKON_LITE: 838 case SK_YUKON_LP: 839 case SK_YUKON_EC: 840 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff); 841 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff); 842 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff); 843 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff); 844 break; 845 } 846 847 return; 848 } 849 850 static void 851 sk_setpromisc(sc_if) 852 struct sk_if_softc *sc_if; 853 { 854 struct sk_softc *sc = sc_if->sk_softc; 855 struct ifnet *ifp = sc_if->sk_ifp; 856 857 SK_IF_LOCK_ASSERT(sc_if); 858 859 switch(sc->sk_type) { 860 case SK_GENESIS: 861 if (ifp->if_flags & IFF_PROMISC) { 862 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); 863 } else { 864 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); 865 } 866 break; 867 case SK_YUKON: 868 case SK_YUKON_LITE: 869 case SK_YUKON_LP: 870 case SK_YUKON_EC: 871 if (ifp->if_flags & IFF_PROMISC) { 872 SK_YU_CLRBIT_2(sc_if, YUKON_RCR, 873 YU_RCR_UFLEN | YU_RCR_MUFLEN); 874 } else { 875 SK_YU_SETBIT_2(sc_if, YUKON_RCR, 876 YU_RCR_UFLEN | YU_RCR_MUFLEN); 877 } 878 break; 879 } 880 881 return; 882 } 883 884 static int 885 sk_init_rx_ring(sc_if) 886 struct sk_if_softc *sc_if; 887 { 888 struct sk_ring_data *rd; 889 bus_addr_t addr; 890 u_int32_t csum_start; 891 int i; 892 893 sc_if->sk_cdata.sk_rx_cons = 0; 894 895 csum_start = (ETHER_HDR_LEN + sizeof(struct ip)) << 16 | 896 ETHER_HDR_LEN; 897 rd = &sc_if->sk_rdata; 898 bzero(rd->sk_rx_ring, sizeof(struct sk_rx_desc) * SK_RX_RING_CNT); 899 for (i = 0; i < SK_RX_RING_CNT; i++) { 900 if (sk_newbuf(sc_if, i) != 0) 901 return (ENOBUFS); 902 if (i == (SK_RX_RING_CNT - 1)) 903 addr = SK_RX_RING_ADDR(sc_if, 0); 904 else 905 addr = SK_RX_RING_ADDR(sc_if, i + 1); 906 rd->sk_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr)); 907 rd->sk_rx_ring[i].sk_csum_start = htole32(csum_start); 908 } 909 910 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag, 911 sc_if->sk_cdata.sk_rx_ring_map, 912 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 913 914 return(0); 915 } 916 917 static int 918 sk_init_jumbo_rx_ring(sc_if) 919 struct sk_if_softc *sc_if; 920 { 921 struct sk_ring_data *rd; 922 bus_addr_t addr; 923 u_int32_t csum_start; 924 int i; 925 926 sc_if->sk_cdata.sk_jumbo_rx_cons = 0; 927 928 csum_start = ((ETHER_HDR_LEN + sizeof(struct ip)) << 16) | 929 ETHER_HDR_LEN; 930 rd = &sc_if->sk_rdata; 931 bzero(rd->sk_jumbo_rx_ring, 932 sizeof(struct sk_rx_desc) * SK_JUMBO_RX_RING_CNT); 933 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) { 934 if (sk_jumbo_newbuf(sc_if, i) != 0) 935 return (ENOBUFS); 936 if (i == (SK_JUMBO_RX_RING_CNT - 1)) 937 addr = SK_JUMBO_RX_RING_ADDR(sc_if, 0); 938 else 939 addr = SK_JUMBO_RX_RING_ADDR(sc_if, i + 1); 940 rd->sk_jumbo_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr)); 941 rd->sk_jumbo_rx_ring[i].sk_csum_start = htole32(csum_start); 942 } 943 944 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, 945 sc_if->sk_cdata.sk_jumbo_rx_ring_map, 946 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 947 948 return (0); 949 } 950 951 static void 952 sk_init_tx_ring(sc_if) 953 struct sk_if_softc *sc_if; 954 { 955 struct sk_ring_data *rd; 956 struct sk_txdesc *txd; 957 bus_addr_t addr; 958 int i; 959 960 STAILQ_INIT(&sc_if->sk_cdata.sk_txfreeq); 961 STAILQ_INIT(&sc_if->sk_cdata.sk_txbusyq); 962 963 sc_if->sk_cdata.sk_tx_prod = 0; 964 sc_if->sk_cdata.sk_tx_cons = 0; 965 sc_if->sk_cdata.sk_tx_cnt = 0; 966 967 rd = &sc_if->sk_rdata; 968 bzero(rd->sk_tx_ring, sizeof(struct sk_tx_desc) * SK_TX_RING_CNT); 969 for (i = 0; i < SK_TX_RING_CNT; i++) { 970 if (i == (SK_TX_RING_CNT - 1)) 971 addr = SK_TX_RING_ADDR(sc_if, 0); 972 else 973 addr = SK_TX_RING_ADDR(sc_if, i + 1); 974 rd->sk_tx_ring[i].sk_next = htole32(SK_ADDR_LO(addr)); 975 txd = &sc_if->sk_cdata.sk_txdesc[i]; 976 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q); 977 } 978 979 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag, 980 sc_if->sk_cdata.sk_tx_ring_map, 981 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 982 } 983 984 static __inline void 985 sk_discard_rxbuf(sc_if, idx) 986 struct sk_if_softc *sc_if; 987 int idx; 988 { 989 struct sk_rx_desc *r; 990 struct sk_rxdesc *rxd; 991 struct mbuf *m; 992 993 994 r = &sc_if->sk_rdata.sk_rx_ring[idx]; 995 rxd = &sc_if->sk_cdata.sk_rxdesc[idx]; 996 m = rxd->rx_m; 997 r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM); 998 } 999 1000 static __inline void 1001 sk_discard_jumbo_rxbuf(sc_if, idx) 1002 struct sk_if_softc *sc_if; 1003 int idx; 1004 { 1005 struct sk_rx_desc *r; 1006 struct sk_rxdesc *rxd; 1007 struct mbuf *m; 1008 1009 r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx]; 1010 rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx]; 1011 m = rxd->rx_m; 1012 r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM); 1013 } 1014 1015 static int 1016 sk_newbuf(sc_if, idx) 1017 struct sk_if_softc *sc_if; 1018 int idx; 1019 { 1020 struct sk_rx_desc *r; 1021 struct sk_rxdesc *rxd; 1022 struct mbuf *m; 1023 bus_dma_segment_t segs[1]; 1024 bus_dmamap_t map; 1025 int nsegs; 1026 1027 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1028 if (m == NULL) 1029 return (ENOBUFS); 1030 m->m_len = m->m_pkthdr.len = MCLBYTES; 1031 m_adj(m, ETHER_ALIGN); 1032 1033 if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_rx_tag, 1034 sc_if->sk_cdata.sk_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1035 m_freem(m); 1036 return (ENOBUFS); 1037 } 1038 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1039 1040 rxd = &sc_if->sk_cdata.sk_rxdesc[idx]; 1041 if (rxd->rx_m != NULL) { 1042 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap, 1043 BUS_DMASYNC_POSTREAD); 1044 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap); 1045 } 1046 map = rxd->rx_dmamap; 1047 rxd->rx_dmamap = sc_if->sk_cdata.sk_rx_sparemap; 1048 sc_if->sk_cdata.sk_rx_sparemap = map; 1049 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap, 1050 BUS_DMASYNC_PREREAD); 1051 rxd->rx_m = m; 1052 r = &sc_if->sk_rdata.sk_rx_ring[idx]; 1053 r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr)); 1054 r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr)); 1055 r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM); 1056 1057 return (0); 1058 } 1059 1060 static int 1061 sk_jumbo_newbuf(sc_if, idx) 1062 struct sk_if_softc *sc_if; 1063 int idx; 1064 { 1065 struct sk_rx_desc *r; 1066 struct sk_rxdesc *rxd; 1067 struct mbuf *m; 1068 bus_dma_segment_t segs[1]; 1069 bus_dmamap_t map; 1070 int nsegs; 1071 void *buf; 1072 1073 MGETHDR(m, M_DONTWAIT, MT_DATA); 1074 if (m == NULL) 1075 return (ENOBUFS); 1076 buf = sk_jalloc(sc_if); 1077 if (buf == NULL) { 1078 m_freem(m); 1079 return (ENOBUFS); 1080 } 1081 /* Attach the buffer to the mbuf */ 1082 MEXTADD(m, buf, SK_JLEN, sk_jfree, (struct sk_if_softc *)sc_if, 0, 1083 EXT_NET_DRV); 1084 if ((m->m_flags & M_EXT) == 0) { 1085 m_freem(m); 1086 return (ENOBUFS); 1087 } 1088 m->m_pkthdr.len = m->m_len = SK_JLEN; 1089 /* 1090 * Adjust alignment so packet payload begins on a 1091 * longword boundary. Mandatory for Alpha, useful on 1092 * x86 too. 1093 */ 1094 m_adj(m, ETHER_ALIGN); 1095 1096 if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_jumbo_rx_tag, 1097 sc_if->sk_cdata.sk_jumbo_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1098 m_freem(m); 1099 return (ENOBUFS); 1100 } 1101 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1102 1103 rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx]; 1104 if (rxd->rx_m != NULL) { 1105 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap, 1106 BUS_DMASYNC_POSTREAD); 1107 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag, 1108 rxd->rx_dmamap); 1109 } 1110 map = rxd->rx_dmamap; 1111 rxd->rx_dmamap = sc_if->sk_cdata.sk_jumbo_rx_sparemap; 1112 sc_if->sk_cdata.sk_jumbo_rx_sparemap = map; 1113 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap, 1114 BUS_DMASYNC_PREREAD); 1115 rxd->rx_m = m; 1116 r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx]; 1117 r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr)); 1118 r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr)); 1119 r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM); 1120 1121 return (0); 1122 } 1123 1124 /* 1125 * Set media options. 1126 */ 1127 static int 1128 sk_ifmedia_upd(ifp) 1129 struct ifnet *ifp; 1130 { 1131 struct sk_if_softc *sc_if = ifp->if_softc; 1132 struct mii_data *mii; 1133 1134 mii = device_get_softc(sc_if->sk_miibus); 1135 sk_init(sc_if); 1136 mii_mediachg(mii); 1137 1138 return(0); 1139 } 1140 1141 /* 1142 * Report current media status. 1143 */ 1144 static void 1145 sk_ifmedia_sts(ifp, ifmr) 1146 struct ifnet *ifp; 1147 struct ifmediareq *ifmr; 1148 { 1149 struct sk_if_softc *sc_if; 1150 struct mii_data *mii; 1151 1152 sc_if = ifp->if_softc; 1153 mii = device_get_softc(sc_if->sk_miibus); 1154 1155 mii_pollstat(mii); 1156 ifmr->ifm_active = mii->mii_media_active; 1157 ifmr->ifm_status = mii->mii_media_status; 1158 1159 return; 1160 } 1161 1162 static int 1163 sk_ioctl(ifp, command, data) 1164 struct ifnet *ifp; 1165 u_long command; 1166 caddr_t data; 1167 { 1168 struct sk_if_softc *sc_if = ifp->if_softc; 1169 struct ifreq *ifr = (struct ifreq *) data; 1170 int error, mask; 1171 struct mii_data *mii; 1172 1173 error = 0; 1174 switch(command) { 1175 case SIOCSIFMTU: 1176 SK_IF_LOCK(sc_if); 1177 if (ifr->ifr_mtu > SK_JUMBO_MTU) 1178 error = EINVAL; 1179 else { 1180 ifp->if_mtu = ifr->ifr_mtu; 1181 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1182 sk_init_locked(sc_if); 1183 } 1184 SK_IF_UNLOCK(sc_if); 1185 break; 1186 case SIOCSIFFLAGS: 1187 SK_IF_LOCK(sc_if); 1188 if (ifp->if_flags & IFF_UP) { 1189 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1190 if ((ifp->if_flags ^ sc_if->sk_if_flags) 1191 & IFF_PROMISC) { 1192 sk_setpromisc(sc_if); 1193 sk_setmulti(sc_if); 1194 } 1195 } else 1196 sk_init_locked(sc_if); 1197 } else { 1198 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1199 sk_stop(sc_if); 1200 } 1201 sc_if->sk_if_flags = ifp->if_flags; 1202 SK_IF_UNLOCK(sc_if); 1203 break; 1204 case SIOCADDMULTI: 1205 case SIOCDELMULTI: 1206 SK_IF_LOCK(sc_if); 1207 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1208 sk_setmulti(sc_if); 1209 SK_IF_UNLOCK(sc_if); 1210 break; 1211 case SIOCGIFMEDIA: 1212 case SIOCSIFMEDIA: 1213 mii = device_get_softc(sc_if->sk_miibus); 1214 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1215 break; 1216 case SIOCSIFCAP: 1217 SK_IF_LOCK(sc_if); 1218 if (sc_if->sk_softc->sk_type == SK_GENESIS) { 1219 SK_IF_UNLOCK(sc_if); 1220 break; 1221 } 1222 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1223 if (mask & IFCAP_HWCSUM) { 1224 ifp->if_capenable ^= IFCAP_HWCSUM; 1225 if (IFCAP_HWCSUM & ifp->if_capenable && 1226 IFCAP_HWCSUM & ifp->if_capabilities) 1227 ifp->if_hwassist = SK_CSUM_FEATURES; 1228 else 1229 ifp->if_hwassist = 0; 1230 } 1231 SK_IF_UNLOCK(sc_if); 1232 break; 1233 default: 1234 error = ether_ioctl(ifp, command, data); 1235 break; 1236 } 1237 1238 return (error); 1239 } 1240 1241 /* 1242 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device 1243 * IDs against our list and return a device name if we find a match. 1244 */ 1245 static int 1246 skc_probe(dev) 1247 device_t dev; 1248 { 1249 struct sk_type *t = sk_devs; 1250 1251 while(t->sk_name != NULL) { 1252 if ((pci_get_vendor(dev) == t->sk_vid) && 1253 (pci_get_device(dev) == t->sk_did)) { 1254 /* 1255 * Only attach to rev. 2 of the Linksys EG1032 adapter. 1256 * Rev. 3 is supported by re(4). 1257 */ 1258 if ((t->sk_vid == VENDORID_LINKSYS) && 1259 (t->sk_did == DEVICEID_LINKSYS_EG1032) && 1260 (pci_get_subdevice(dev) != 1261 SUBDEVICEID_LINKSYS_EG1032_REV2)) { 1262 t++; 1263 continue; 1264 } 1265 device_set_desc(dev, t->sk_name); 1266 return (BUS_PROBE_DEFAULT); 1267 } 1268 t++; 1269 } 1270 1271 return(ENXIO); 1272 } 1273 1274 /* 1275 * Force the GEnesis into reset, then bring it out of reset. 1276 */ 1277 static void 1278 sk_reset(sc) 1279 struct sk_softc *sc; 1280 { 1281 1282 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET); 1283 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET); 1284 if (SK_YUKON_FAMILY(sc->sk_type)) 1285 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET); 1286 1287 DELAY(1000); 1288 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET); 1289 DELAY(2); 1290 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET); 1291 if (SK_YUKON_FAMILY(sc->sk_type)) 1292 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR); 1293 1294 if (sc->sk_type == SK_GENESIS) { 1295 /* Configure packet arbiter */ 1296 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET); 1297 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT); 1298 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT); 1299 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT); 1300 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT); 1301 } 1302 1303 /* Enable RAM interface */ 1304 sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET); 1305 1306 /* 1307 * Configure interrupt moderation. The moderation timer 1308 * defers interrupts specified in the interrupt moderation 1309 * timer mask based on the timeout specified in the interrupt 1310 * moderation timer init register. Each bit in the timer 1311 * register represents one tick, so to specify a timeout in 1312 * microseconds, we have to multiply by the correct number of 1313 * ticks-per-microsecond. 1314 */ 1315 switch (sc->sk_type) { 1316 case SK_GENESIS: 1317 sc->sk_int_ticks = SK_IMTIMER_TICKS_GENESIS; 1318 break; 1319 case SK_YUKON_EC: 1320 sc->sk_int_ticks = SK_IMTIMER_TICKS_YUKON_EC; 1321 break; 1322 default: 1323 sc->sk_int_ticks = SK_IMTIMER_TICKS_YUKON; 1324 break; 1325 } 1326 if (bootverbose) 1327 device_printf(sc->sk_dev, "interrupt moderation is %d us\n", 1328 sc->sk_int_mod); 1329 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod, 1330 sc->sk_int_ticks)); 1331 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF| 1332 SK_ISR_RX1_EOF|SK_ISR_RX2_EOF); 1333 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START); 1334 1335 return; 1336 } 1337 1338 static int 1339 sk_probe(dev) 1340 device_t dev; 1341 { 1342 struct sk_softc *sc; 1343 1344 sc = device_get_softc(device_get_parent(dev)); 1345 1346 /* 1347 * Not much to do here. We always know there will be 1348 * at least one XMAC present, and if there are two, 1349 * skc_attach() will create a second device instance 1350 * for us. 1351 */ 1352 switch (sc->sk_type) { 1353 case SK_GENESIS: 1354 device_set_desc(dev, "XaQti Corp. XMAC II"); 1355 break; 1356 case SK_YUKON: 1357 case SK_YUKON_LITE: 1358 case SK_YUKON_LP: 1359 case SK_YUKON_EC: 1360 device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon"); 1361 break; 1362 } 1363 1364 return (BUS_PROBE_DEFAULT); 1365 } 1366 1367 /* 1368 * Each XMAC chip is attached as a separate logical IP interface. 1369 * Single port cards will have only one logical interface of course. 1370 */ 1371 static int 1372 sk_attach(dev) 1373 device_t dev; 1374 { 1375 struct sk_softc *sc; 1376 struct sk_if_softc *sc_if; 1377 struct ifnet *ifp; 1378 int i, port, error; 1379 u_char eaddr[6]; 1380 1381 if (dev == NULL) 1382 return(EINVAL); 1383 1384 error = 0; 1385 sc_if = device_get_softc(dev); 1386 sc = device_get_softc(device_get_parent(dev)); 1387 port = *(int *)device_get_ivars(dev); 1388 1389 sc_if->sk_if_dev = dev; 1390 sc_if->sk_port = port; 1391 sc_if->sk_softc = sc; 1392 sc->sk_if[port] = sc_if; 1393 if (port == SK_PORT_A) 1394 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0; 1395 if (port == SK_PORT_B) 1396 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1; 1397 1398 callout_init_mtx(&sc_if->sk_tick_ch, &sc_if->sk_softc->sk_mtx, 0); 1399 1400 if (sk_dma_alloc(sc_if) != 0) { 1401 error = ENOMEM; 1402 goto fail; 1403 } 1404 1405 ifp = sc_if->sk_ifp = if_alloc(IFT_ETHER); 1406 if (ifp == NULL) { 1407 device_printf(sc_if->sk_if_dev, "can not if_alloc()\n"); 1408 error = ENOSPC; 1409 goto fail; 1410 } 1411 ifp->if_softc = sc_if; 1412 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1413 ifp->if_mtu = ETHERMTU; 1414 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1415 /* 1416 * SK_GENESIS has a bug in checksum offload - From linux. 1417 */ 1418 if (sc_if->sk_softc->sk_type != SK_GENESIS) { 1419 ifp->if_capabilities = IFCAP_HWCSUM; 1420 ifp->if_hwassist = SK_CSUM_FEATURES; 1421 } else { 1422 ifp->if_capabilities = 0; 1423 ifp->if_hwassist = 0; 1424 } 1425 ifp->if_capenable = ifp->if_capabilities; 1426 ifp->if_ioctl = sk_ioctl; 1427 ifp->if_start = sk_start; 1428 ifp->if_watchdog = sk_watchdog; 1429 ifp->if_init = sk_init; 1430 IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1); 1431 ifp->if_snd.ifq_drv_maxlen = SK_TX_RING_CNT - 1; 1432 IFQ_SET_READY(&ifp->if_snd); 1433 1434 /* 1435 * Get station address for this interface. Note that 1436 * dual port cards actually come with three station 1437 * addresses: one for each port, plus an extra. The 1438 * extra one is used by the SysKonnect driver software 1439 * as a 'virtual' station address for when both ports 1440 * are operating in failover mode. Currently we don't 1441 * use this extra address. 1442 */ 1443 SK_IF_LOCK(sc_if); 1444 for (i = 0; i < ETHER_ADDR_LEN; i++) 1445 eaddr[i] = 1446 sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i); 1447 1448 /* 1449 * Set up RAM buffer addresses. The NIC will have a certain 1450 * amount of SRAM on it, somewhere between 512K and 2MB. We 1451 * need to divide this up a) between the transmitter and 1452 * receiver and b) between the two XMACs, if this is a 1453 * dual port NIC. Our algotithm is to divide up the memory 1454 * evenly so that everyone gets a fair share. 1455 * 1456 * Just to be contrary, Yukon2 appears to have separate memory 1457 * for each MAC. 1458 */ 1459 if (SK_IS_YUKON2(sc) || 1460 sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) { 1461 u_int32_t chunk, val; 1462 1463 chunk = sc->sk_ramsize / 2; 1464 val = sc->sk_rboff / sizeof(u_int64_t); 1465 sc_if->sk_rx_ramstart = val; 1466 val += (chunk / sizeof(u_int64_t)); 1467 sc_if->sk_rx_ramend = val - 1; 1468 sc_if->sk_tx_ramstart = val; 1469 val += (chunk / sizeof(u_int64_t)); 1470 sc_if->sk_tx_ramend = val - 1; 1471 } else { 1472 u_int32_t chunk, val; 1473 1474 chunk = sc->sk_ramsize / 4; 1475 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) / 1476 sizeof(u_int64_t); 1477 sc_if->sk_rx_ramstart = val; 1478 val += (chunk / sizeof(u_int64_t)); 1479 sc_if->sk_rx_ramend = val - 1; 1480 sc_if->sk_tx_ramstart = val; 1481 val += (chunk / sizeof(u_int64_t)); 1482 sc_if->sk_tx_ramend = val - 1; 1483 } 1484 1485 /* Read and save PHY type and set PHY address */ 1486 sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF; 1487 if (!SK_YUKON_FAMILY(sc->sk_type)) { 1488 switch(sc_if->sk_phytype) { 1489 case SK_PHYTYPE_XMAC: 1490 sc_if->sk_phyaddr = SK_PHYADDR_XMAC; 1491 break; 1492 case SK_PHYTYPE_BCOM: 1493 sc_if->sk_phyaddr = SK_PHYADDR_BCOM; 1494 break; 1495 default: 1496 device_printf(sc->sk_dev, "unsupported PHY type: %d\n", 1497 sc_if->sk_phytype); 1498 error = ENODEV; 1499 SK_IF_UNLOCK(sc_if); 1500 goto fail; 1501 } 1502 } else { 1503 if (sc_if->sk_phytype < SK_PHYTYPE_MARV_COPPER && 1504 sc->sk_pmd != 'S') { 1505 /* not initialized, punt */ 1506 sc_if->sk_phytype = SK_PHYTYPE_MARV_COPPER; 1507 sc->sk_coppertype = 1; 1508 } 1509 1510 sc_if->sk_phyaddr = SK_PHYADDR_MARV; 1511 1512 if (!(sc->sk_coppertype)) 1513 sc_if->sk_phytype = SK_PHYTYPE_MARV_FIBER; 1514 } 1515 1516 /* 1517 * Call MI attach routine. Can't hold locks when calling into ether_*. 1518 */ 1519 SK_IF_UNLOCK(sc_if); 1520 ether_ifattach(ifp, eaddr); 1521 SK_IF_LOCK(sc_if); 1522 1523 /* 1524 * The hardware should be ready for VLAN_MTU by default: 1525 * XMAC II has 0x8100 in VLAN Tag Level 1 register initially; 1526 * YU_SMR_MFL_VLAN is set by this driver in Yukon. 1527 * 1528 */ 1529 ifp->if_capabilities |= IFCAP_VLAN_MTU; 1530 ifp->if_capenable |= IFCAP_VLAN_MTU; 1531 /* 1532 * Tell the upper layer(s) we support long frames. 1533 * Must appear after the call to ether_ifattach() because 1534 * ether_ifattach() sets ifi_hdrlen to the default value. 1535 */ 1536 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1537 1538 /* 1539 * Do miibus setup. 1540 */ 1541 switch (sc->sk_type) { 1542 case SK_GENESIS: 1543 sk_init_xmac(sc_if); 1544 break; 1545 case SK_YUKON: 1546 case SK_YUKON_LITE: 1547 case SK_YUKON_LP: 1548 case SK_YUKON_EC: 1549 sk_init_yukon(sc_if); 1550 break; 1551 } 1552 1553 SK_IF_UNLOCK(sc_if); 1554 if (mii_phy_probe(dev, &sc_if->sk_miibus, 1555 sk_ifmedia_upd, sk_ifmedia_sts)) { 1556 device_printf(sc_if->sk_if_dev, "no PHY found!\n"); 1557 ether_ifdetach(ifp); 1558 error = ENXIO; 1559 goto fail; 1560 } 1561 1562 fail: 1563 if (error) { 1564 /* Access should be ok even though lock has been dropped */ 1565 sc->sk_if[port] = NULL; 1566 sk_detach(dev); 1567 } 1568 1569 return(error); 1570 } 1571 1572 /* 1573 * Attach the interface. Allocate softc structures, do ifmedia 1574 * setup and ethernet/BPF attach. 1575 */ 1576 static int 1577 skc_attach(dev) 1578 device_t dev; 1579 { 1580 struct sk_softc *sc; 1581 int error = 0, *port, sk_macs; 1582 uint8_t skrs; 1583 const char *pname; 1584 char *revstr; 1585 1586 sc = device_get_softc(dev); 1587 sc->sk_dev = dev; 1588 1589 mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1590 MTX_DEF); 1591 mtx_init(&sc->sk_mii_mtx, "sk_mii_mutex", NULL, MTX_DEF); 1592 /* 1593 * Map control/status registers. 1594 */ 1595 pci_enable_busmaster(dev); 1596 1597 /* Allocate resources */ 1598 #ifdef SK_USEIOSPACE 1599 sc->sk_res_spec = sk_res_spec_io; 1600 #else 1601 sc->sk_res_spec = sk_res_spec_mem; 1602 #endif 1603 error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res); 1604 if (error) { 1605 if (sc->sk_res_spec == sk_res_spec_mem) 1606 sc->sk_res_spec = sk_res_spec_io; 1607 else 1608 sc->sk_res_spec = sk_res_spec_mem; 1609 error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res); 1610 if (error) { 1611 device_printf(dev, "couldn't allocate %s resources\n", 1612 sc->sk_res_spec == sk_res_spec_mem ? "memory" : 1613 "I/O"); 1614 goto fail; 1615 } 1616 } 1617 1618 sc->sk_type = sk_win_read_1(sc, SK_CHIPVER); 1619 sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4) & 0xf; 1620 1621 /* Bail out if chip is not recognized. */ 1622 if (sc->sk_type != SK_GENESIS && !SK_YUKON_FAMILY(sc->sk_type)) { 1623 device_printf(dev, "unknown device: chipver=%02x, rev=%x\n", 1624 sc->sk_type, sc->sk_rev); 1625 error = ENXIO; 1626 goto fail; 1627 } 1628 1629 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 1630 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1631 OID_AUTO, "int_mod", CTLTYPE_INT|CTLFLAG_RW, 1632 &sc->sk_int_mod, 0, sysctl_hw_sk_int_mod, "I", 1633 "SK interrupt moderation"); 1634 1635 /* Pull in device tunables. */ 1636 sc->sk_int_mod = SK_IM_DEFAULT; 1637 error = resource_int_value(device_get_name(dev), device_get_unit(dev), 1638 "int_mod", &sc->sk_int_mod); 1639 if (error == 0) { 1640 if (sc->sk_int_mod < SK_IM_MIN || 1641 sc->sk_int_mod > SK_IM_MAX) { 1642 device_printf(dev, "int_mod value out of range; " 1643 "using default: %d\n", SK_IM_DEFAULT); 1644 sc->sk_int_mod = SK_IM_DEFAULT; 1645 } 1646 } 1647 1648 /* Reset the adapter. */ 1649 sk_reset(sc); 1650 1651 skrs = sk_win_read_1(sc, SK_EPROM0); 1652 if (sc->sk_type == SK_GENESIS) { 1653 /* Read and save RAM size and RAMbuffer offset */ 1654 switch(skrs) { 1655 case SK_RAMSIZE_512K_64: 1656 sc->sk_ramsize = 0x80000; 1657 sc->sk_rboff = SK_RBOFF_0; 1658 break; 1659 case SK_RAMSIZE_1024K_64: 1660 sc->sk_ramsize = 0x100000; 1661 sc->sk_rboff = SK_RBOFF_80000; 1662 break; 1663 case SK_RAMSIZE_1024K_128: 1664 sc->sk_ramsize = 0x100000; 1665 sc->sk_rboff = SK_RBOFF_0; 1666 break; 1667 case SK_RAMSIZE_2048K_128: 1668 sc->sk_ramsize = 0x200000; 1669 sc->sk_rboff = SK_RBOFF_0; 1670 break; 1671 default: 1672 device_printf(dev, "unknown ram size: %d\n", skrs); 1673 error = ENXIO; 1674 goto fail; 1675 } 1676 } else { /* SK_YUKON_FAMILY */ 1677 if (skrs == 0x00) 1678 sc->sk_ramsize = 0x20000; 1679 else 1680 sc->sk_ramsize = skrs * (1<<12); 1681 sc->sk_rboff = SK_RBOFF_0; 1682 } 1683 1684 /* Read and save physical media type */ 1685 sc->sk_pmd = sk_win_read_1(sc, SK_PMDTYPE); 1686 1687 if (sc->sk_pmd == 'T' || sc->sk_pmd == '1') 1688 sc->sk_coppertype = 1; 1689 else 1690 sc->sk_coppertype = 0; 1691 1692 /* Determine whether to name it with VPD PN or just make it up. 1693 * Marvell Yukon VPD PN seems to freqently be bogus. */ 1694 switch (pci_get_device(dev)) { 1695 case DEVICEID_SK_V1: 1696 case DEVICEID_BELKIN_5005: 1697 case DEVICEID_3COM_3C940: 1698 case DEVICEID_LINKSYS_EG1032: 1699 case DEVICEID_DLINK_DGE530T_A1: 1700 case DEVICEID_DLINK_DGE530T_B1: 1701 /* Stay with VPD PN. */ 1702 if (pci_get_vpd_ident(dev, &pname)) 1703 goto vpdfailed; 1704 break; 1705 case DEVICEID_SK_V2: 1706 case DEVICEID_MRVL_4360: 1707 case DEVICEID_MRVL_4361: 1708 case DEVICEID_MRVL_4362: 1709 /* YUKON VPD PN might bear no resemblance to reality. */ 1710 switch (sc->sk_type) { 1711 case SK_GENESIS: 1712 /* Stay with VPD PN. */ 1713 if (pci_get_vpd_ident(dev, &pname)) 1714 goto vpdfailed; 1715 break; 1716 case SK_YUKON: 1717 pname = "Marvell Yukon Gigabit Ethernet"; 1718 break; 1719 case SK_YUKON_LITE: 1720 pname = "Marvell Yukon Lite Gigabit Ethernet"; 1721 break; 1722 case SK_YUKON_LP: 1723 pname = "Marvell Yukon LP Gigabit Ethernet"; 1724 break; 1725 case SK_YUKON_EC: 1726 pname = "Marvell Yukon-2 EC Gigabit Ethernet"; 1727 break; 1728 default: 1729 pname = "Marvell Yukon (Unknown) Gigabit Ethernet"; 1730 break; 1731 } 1732 1733 /* Yukon Lite Rev. A0 needs special test. */ 1734 if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) { 1735 u_int32_t far; 1736 u_int8_t testbyte; 1737 1738 /* Save flash address register before testing. */ 1739 far = sk_win_read_4(sc, SK_EP_ADDR); 1740 1741 sk_win_write_1(sc, SK_EP_ADDR+0x03, 0xff); 1742 testbyte = sk_win_read_1(sc, SK_EP_ADDR+0x03); 1743 1744 if (testbyte != 0x00) { 1745 /* Yukon Lite Rev. A0 detected. */ 1746 sc->sk_type = SK_YUKON_LITE; 1747 sc->sk_rev = SK_YUKON_LITE_REV_A0; 1748 /* Restore flash address register. */ 1749 sk_win_write_4(sc, SK_EP_ADDR, far); 1750 } 1751 } 1752 break; 1753 default: 1754 vpdfailed: 1755 device_printf(dev, "unknown device: vendor=%04x, device=%04x, " 1756 "chipver=%02x, rev=%x\n", 1757 pci_get_vendor(dev), pci_get_device(dev), 1758 sc->sk_type, sc->sk_rev); 1759 error = ENXIO; 1760 goto fail; 1761 } 1762 1763 if (sc->sk_type == SK_YUKON_LITE) { 1764 switch (sc->sk_rev) { 1765 case SK_YUKON_LITE_REV_A0: 1766 revstr = "A0"; 1767 break; 1768 case SK_YUKON_LITE_REV_A1: 1769 revstr = "A1"; 1770 break; 1771 case SK_YUKON_LITE_REV_A3: 1772 revstr = "A3"; 1773 break; 1774 default: 1775 revstr = ""; 1776 break; 1777 } 1778 } else if (sc->sk_type == SK_YUKON_EC) { 1779 switch (sc->sk_rev) { 1780 case SK_YUKON_EC_REV_A1: 1781 revstr = "A1"; 1782 break; 1783 case SK_YUKON_EC_REV_A2: 1784 revstr = "A2"; 1785 break; 1786 case SK_YUKON_EC_REV_A3: 1787 revstr = "A3"; 1788 break; 1789 default: 1790 revstr = ""; 1791 break; 1792 } 1793 } else { 1794 revstr = ""; 1795 } 1796 1797 /* Announce the product name and more VPD data if there. */ 1798 device_printf(dev, "%s rev. %s(0x%x)\n", 1799 pname != NULL ? pname : "<unknown>", revstr, sc->sk_rev); 1800 1801 if (bootverbose) { 1802 device_printf(dev, "chip ver = 0x%02x\n", sc->sk_type); 1803 device_printf(dev, "chip rev = 0x%02x\n", sc->sk_rev); 1804 device_printf(dev, "SK_EPROM0 = 0x%02x\n", skrs); 1805 device_printf(dev, "SRAM size = 0x%06x\n", sc->sk_ramsize); 1806 } 1807 1808 sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1); 1809 if (sc->sk_devs[SK_PORT_A] == NULL) { 1810 device_printf(dev, "failed to add child for PORT_A\n"); 1811 error = ENXIO; 1812 goto fail; 1813 } 1814 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); 1815 if (port == NULL) { 1816 device_printf(dev, "failed to allocate memory for " 1817 "ivars of PORT_A\n"); 1818 error = ENXIO; 1819 goto fail; 1820 } 1821 *port = SK_PORT_A; 1822 device_set_ivars(sc->sk_devs[SK_PORT_A], port); 1823 1824 sk_macs = 1; 1825 1826 if (SK_IS_YUKON2(sc)) { 1827 u_int8_t hw; 1828 1829 hw = sk_win_read_1(sc, SK_Y2_HWRES); 1830 if ((hw & SK_Y2_HWRES_LINK_MASK) == SK_Y2_HWRES_LINK_DUAL) { 1831 if ((sk_win_read_1(sc, SK_Y2_CLKGATE) & 1832 SK_Y2_CLKGATE_LINK2_INACTIVE) == 0) 1833 sk_macs++; 1834 } 1835 } else { 1836 if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) 1837 sk_macs++; 1838 } 1839 1840 if (sk_macs > 1) { 1841 sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1); 1842 if (sc->sk_devs[SK_PORT_B] == NULL) { 1843 device_printf(dev, "failed to add child for PORT_B\n"); 1844 error = ENXIO; 1845 goto fail; 1846 } 1847 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); 1848 if (port == NULL) { 1849 device_printf(dev, "failed to allocate memory for " 1850 "ivars of PORT_B\n"); 1851 error = ENXIO; 1852 goto fail; 1853 } 1854 *port = SK_PORT_B; 1855 device_set_ivars(sc->sk_devs[SK_PORT_B], port); 1856 } 1857 1858 /* Turn on the 'driver is loaded' LED. */ 1859 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON); 1860 1861 error = bus_generic_attach(dev); 1862 if (error) { 1863 device_printf(dev, "failed to attach port(s)\n"); 1864 goto fail; 1865 } 1866 1867 /* Hook interrupt last to avoid having to lock softc */ 1868 error = bus_setup_intr(dev, sc->sk_res[1], INTR_TYPE_NET|INTR_MPSAFE, 1869 sk_intr, sc, &sc->sk_intrhand); 1870 1871 if (error) { 1872 device_printf(dev, "couldn't set up irq\n"); 1873 goto fail; 1874 } 1875 1876 fail: 1877 if (error) 1878 skc_detach(dev); 1879 1880 return(error); 1881 } 1882 1883 /* 1884 * Shutdown hardware and free up resources. This can be called any 1885 * time after the mutex has been initialized. It is called in both 1886 * the error case in attach and the normal detach case so it needs 1887 * to be careful about only freeing resources that have actually been 1888 * allocated. 1889 */ 1890 static int 1891 sk_detach(dev) 1892 device_t dev; 1893 { 1894 struct sk_if_softc *sc_if; 1895 struct ifnet *ifp; 1896 1897 sc_if = device_get_softc(dev); 1898 KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx), 1899 ("sk mutex not initialized in sk_detach")); 1900 SK_IF_LOCK(sc_if); 1901 1902 ifp = sc_if->sk_ifp; 1903 /* These should only be active if attach_xmac succeeded */ 1904 if (device_is_attached(dev)) { 1905 sk_stop(sc_if); 1906 /* Can't hold locks while calling detach */ 1907 SK_IF_UNLOCK(sc_if); 1908 callout_drain(&sc_if->sk_tick_ch); 1909 ether_ifdetach(ifp); 1910 SK_IF_LOCK(sc_if); 1911 } 1912 if (ifp) 1913 if_free(ifp); 1914 /* 1915 * We're generally called from skc_detach() which is using 1916 * device_delete_child() to get to here. It's already trashed 1917 * miibus for us, so don't do it here or we'll panic. 1918 */ 1919 /* 1920 if (sc_if->sk_miibus != NULL) 1921 device_delete_child(dev, sc_if->sk_miibus); 1922 */ 1923 bus_generic_detach(dev); 1924 sk_dma_free(sc_if); 1925 SK_IF_UNLOCK(sc_if); 1926 1927 return(0); 1928 } 1929 1930 static int 1931 skc_detach(dev) 1932 device_t dev; 1933 { 1934 struct sk_softc *sc; 1935 1936 sc = device_get_softc(dev); 1937 KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized")); 1938 1939 if (device_is_alive(dev)) { 1940 if (sc->sk_devs[SK_PORT_A] != NULL) { 1941 free(device_get_ivars(sc->sk_devs[SK_PORT_A]), M_DEVBUF); 1942 device_delete_child(dev, sc->sk_devs[SK_PORT_A]); 1943 } 1944 if (sc->sk_devs[SK_PORT_B] != NULL) { 1945 free(device_get_ivars(sc->sk_devs[SK_PORT_B]), M_DEVBUF); 1946 device_delete_child(dev, sc->sk_devs[SK_PORT_B]); 1947 } 1948 bus_generic_detach(dev); 1949 } 1950 1951 if (sc->sk_intrhand) 1952 bus_teardown_intr(dev, sc->sk_res[1], sc->sk_intrhand); 1953 bus_release_resources(dev, sc->sk_res_spec, sc->sk_res); 1954 1955 mtx_destroy(&sc->sk_mii_mtx); 1956 mtx_destroy(&sc->sk_mtx); 1957 1958 return(0); 1959 } 1960 1961 struct sk_dmamap_arg { 1962 bus_addr_t sk_busaddr; 1963 }; 1964 1965 static void 1966 sk_dmamap_cb(arg, segs, nseg, error) 1967 void *arg; 1968 bus_dma_segment_t *segs; 1969 int nseg; 1970 int error; 1971 { 1972 struct sk_dmamap_arg *ctx; 1973 1974 if (error != 0) 1975 return; 1976 1977 ctx = arg; 1978 ctx->sk_busaddr = segs[0].ds_addr; 1979 } 1980 1981 /* 1982 * Allocate jumbo buffer storage. The SysKonnect adapters support 1983 * "jumbograms" (9K frames), although SysKonnect doesn't currently 1984 * use them in their drivers. In order for us to use them, we need 1985 * large 9K receive buffers, however standard mbuf clusters are only 1986 * 2048 bytes in size. Consequently, we need to allocate and manage 1987 * our own jumbo buffer pool. Fortunately, this does not require an 1988 * excessive amount of additional code. 1989 */ 1990 static int 1991 sk_dma_alloc(sc_if) 1992 struct sk_if_softc *sc_if; 1993 { 1994 struct sk_dmamap_arg ctx; 1995 struct sk_txdesc *txd; 1996 struct sk_rxdesc *rxd; 1997 struct sk_rxdesc *jrxd; 1998 u_int8_t *ptr; 1999 struct sk_jpool_entry *entry; 2000 int error, i; 2001 2002 mtx_init(&sc_if->sk_jlist_mtx, "sk_jlist_mtx", NULL, MTX_DEF); 2003 SLIST_INIT(&sc_if->sk_jfree_listhead); 2004 SLIST_INIT(&sc_if->sk_jinuse_listhead); 2005 2006 /* create parent tag */ 2007 /* 2008 * XXX 2009 * This driver should use BUS_SPACE_MAXADDR for lowaddr argument 2010 * in bus_dma_tag_create(9) as the NIC would support DAC mode. 2011 * However bz@ reported that it does not work on amd64 with > 4GB 2012 * RAM. Until we have more clues of the breakage, disable DAC mode 2013 * by limiting DMA address to be in 32bit address space. 2014 */ 2015 error = bus_dma_tag_create(NULL, /* parent */ 2016 1, 0, /* algnmnt, boundary */ 2017 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2018 BUS_SPACE_MAXADDR, /* highaddr */ 2019 NULL, NULL, /* filter, filterarg */ 2020 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 2021 0, /* nsegments */ 2022 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 2023 0, /* flags */ 2024 NULL, NULL, /* lockfunc, lockarg */ 2025 &sc_if->sk_cdata.sk_parent_tag); 2026 if (error != 0) { 2027 device_printf(sc_if->sk_if_dev, 2028 "failed to create parent DMA tag\n"); 2029 goto fail; 2030 } 2031 /* create tag for Tx ring */ 2032 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ 2033 SK_RING_ALIGN, 0, /* algnmnt, boundary */ 2034 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2035 BUS_SPACE_MAXADDR, /* highaddr */ 2036 NULL, NULL, /* filter, filterarg */ 2037 SK_TX_RING_SZ, /* maxsize */ 2038 1, /* nsegments */ 2039 SK_TX_RING_SZ, /* maxsegsize */ 2040 0, /* flags */ 2041 NULL, NULL, /* lockfunc, lockarg */ 2042 &sc_if->sk_cdata.sk_tx_ring_tag); 2043 if (error != 0) { 2044 device_printf(sc_if->sk_if_dev, 2045 "failed to allocate Tx ring DMA tag\n"); 2046 goto fail; 2047 } 2048 2049 /* create tag for Rx ring */ 2050 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ 2051 SK_RING_ALIGN, 0, /* algnmnt, boundary */ 2052 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2053 BUS_SPACE_MAXADDR, /* highaddr */ 2054 NULL, NULL, /* filter, filterarg */ 2055 SK_RX_RING_SZ, /* maxsize */ 2056 1, /* nsegments */ 2057 SK_RX_RING_SZ, /* maxsegsize */ 2058 0, /* flags */ 2059 NULL, NULL, /* lockfunc, lockarg */ 2060 &sc_if->sk_cdata.sk_rx_ring_tag); 2061 if (error != 0) { 2062 device_printf(sc_if->sk_if_dev, 2063 "failed to allocate Rx ring DMA tag\n"); 2064 goto fail; 2065 } 2066 2067 /* create tag for jumbo Rx ring */ 2068 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ 2069 SK_RING_ALIGN, 0, /* algnmnt, boundary */ 2070 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2071 BUS_SPACE_MAXADDR, /* highaddr */ 2072 NULL, NULL, /* filter, filterarg */ 2073 SK_JUMBO_RX_RING_SZ, /* maxsize */ 2074 1, /* nsegments */ 2075 SK_JUMBO_RX_RING_SZ, /* maxsegsize */ 2076 0, /* flags */ 2077 NULL, NULL, /* lockfunc, lockarg */ 2078 &sc_if->sk_cdata.sk_jumbo_rx_ring_tag); 2079 if (error != 0) { 2080 device_printf(sc_if->sk_if_dev, 2081 "failed to allocate jumbo Rx ring DMA tag\n"); 2082 goto fail; 2083 } 2084 2085 /* create tag for jumbo buffer blocks */ 2086 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ 2087 PAGE_SIZE, 0, /* algnmnt, boundary */ 2088 BUS_SPACE_MAXADDR, /* lowaddr */ 2089 BUS_SPACE_MAXADDR, /* highaddr */ 2090 NULL, NULL, /* filter, filterarg */ 2091 SK_JMEM, /* maxsize */ 2092 1, /* nsegments */ 2093 SK_JMEM, /* maxsegsize */ 2094 0, /* flags */ 2095 NULL, NULL, /* lockfunc, lockarg */ 2096 &sc_if->sk_cdata.sk_jumbo_tag); 2097 if (error != 0) { 2098 device_printf(sc_if->sk_if_dev, 2099 "failed to allocate jumbo Rx buffer block DMA tag\n"); 2100 goto fail; 2101 } 2102 2103 /* create tag for Tx buffers */ 2104 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ 2105 1, 0, /* algnmnt, boundary */ 2106 BUS_SPACE_MAXADDR, /* lowaddr */ 2107 BUS_SPACE_MAXADDR, /* highaddr */ 2108 NULL, NULL, /* filter, filterarg */ 2109 MCLBYTES * SK_MAXTXSEGS, /* maxsize */ 2110 SK_MAXTXSEGS, /* nsegments */ 2111 MCLBYTES, /* maxsegsize */ 2112 0, /* flags */ 2113 NULL, NULL, /* lockfunc, lockarg */ 2114 &sc_if->sk_cdata.sk_tx_tag); 2115 if (error != 0) { 2116 device_printf(sc_if->sk_if_dev, 2117 "failed to allocate Tx DMA tag\n"); 2118 goto fail; 2119 } 2120 2121 /* create tag for Rx buffers */ 2122 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ 2123 1, 0, /* algnmnt, boundary */ 2124 BUS_SPACE_MAXADDR, /* lowaddr */ 2125 BUS_SPACE_MAXADDR, /* highaddr */ 2126 NULL, NULL, /* filter, filterarg */ 2127 MCLBYTES, /* maxsize */ 2128 1, /* nsegments */ 2129 MCLBYTES, /* maxsegsize */ 2130 0, /* flags */ 2131 NULL, NULL, /* lockfunc, lockarg */ 2132 &sc_if->sk_cdata.sk_rx_tag); 2133 if (error != 0) { 2134 device_printf(sc_if->sk_if_dev, 2135 "failed to allocate Rx DMA tag\n"); 2136 goto fail; 2137 } 2138 2139 /* create tag for jumbo Rx buffers */ 2140 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ 2141 PAGE_SIZE, 0, /* algnmnt, boundary */ 2142 BUS_SPACE_MAXADDR, /* lowaddr */ 2143 BUS_SPACE_MAXADDR, /* highaddr */ 2144 NULL, NULL, /* filter, filterarg */ 2145 MCLBYTES * SK_MAXRXSEGS, /* maxsize */ 2146 SK_MAXRXSEGS, /* nsegments */ 2147 SK_JLEN, /* maxsegsize */ 2148 0, /* flags */ 2149 NULL, NULL, /* lockfunc, lockarg */ 2150 &sc_if->sk_cdata.sk_jumbo_rx_tag); 2151 if (error != 0) { 2152 device_printf(sc_if->sk_if_dev, 2153 "failed to allocate jumbo Rx DMA tag\n"); 2154 goto fail; 2155 } 2156 2157 /* allocate DMA'able memory and load the DMA map for Tx ring */ 2158 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_tx_ring_tag, 2159 (void **)&sc_if->sk_rdata.sk_tx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 2160 &sc_if->sk_cdata.sk_tx_ring_map); 2161 if (error != 0) { 2162 device_printf(sc_if->sk_if_dev, 2163 "failed to allocate DMA'able memory for Tx ring\n"); 2164 goto fail; 2165 } 2166 2167 ctx.sk_busaddr = 0; 2168 error = bus_dmamap_load(sc_if->sk_cdata.sk_tx_ring_tag, 2169 sc_if->sk_cdata.sk_tx_ring_map, sc_if->sk_rdata.sk_tx_ring, 2170 SK_TX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 2171 if (error != 0) { 2172 device_printf(sc_if->sk_if_dev, 2173 "failed to load DMA'able memory for Tx ring\n"); 2174 goto fail; 2175 } 2176 sc_if->sk_rdata.sk_tx_ring_paddr = ctx.sk_busaddr; 2177 2178 /* allocate DMA'able memory and load the DMA map for Rx ring */ 2179 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_rx_ring_tag, 2180 (void **)&sc_if->sk_rdata.sk_rx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 2181 &sc_if->sk_cdata.sk_rx_ring_map); 2182 if (error != 0) { 2183 device_printf(sc_if->sk_if_dev, 2184 "failed to allocate DMA'able memory for Rx ring\n"); 2185 goto fail; 2186 } 2187 2188 ctx.sk_busaddr = 0; 2189 error = bus_dmamap_load(sc_if->sk_cdata.sk_rx_ring_tag, 2190 sc_if->sk_cdata.sk_rx_ring_map, sc_if->sk_rdata.sk_rx_ring, 2191 SK_RX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 2192 if (error != 0) { 2193 device_printf(sc_if->sk_if_dev, 2194 "failed to load DMA'able memory for Rx ring\n"); 2195 goto fail; 2196 } 2197 sc_if->sk_rdata.sk_rx_ring_paddr = ctx.sk_busaddr; 2198 2199 /* allocate DMA'able memory and load the DMA map for jumbo Rx ring */ 2200 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, 2201 (void **)&sc_if->sk_rdata.sk_jumbo_rx_ring, 2202 BUS_DMA_NOWAIT|BUS_DMA_ZERO, &sc_if->sk_cdata.sk_jumbo_rx_ring_map); 2203 if (error != 0) { 2204 device_printf(sc_if->sk_if_dev, 2205 "failed to allocate DMA'able memory for jumbo Rx ring\n"); 2206 goto fail; 2207 } 2208 2209 ctx.sk_busaddr = 0; 2210 error = bus_dmamap_load(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, 2211 sc_if->sk_cdata.sk_jumbo_rx_ring_map, 2212 sc_if->sk_rdata.sk_jumbo_rx_ring, SK_JUMBO_RX_RING_SZ, sk_dmamap_cb, 2213 &ctx, BUS_DMA_NOWAIT); 2214 if (error != 0) { 2215 device_printf(sc_if->sk_if_dev, 2216 "failed to load DMA'able memory for jumbo Rx ring\n"); 2217 goto fail; 2218 } 2219 sc_if->sk_rdata.sk_jumbo_rx_ring_paddr = ctx.sk_busaddr; 2220 2221 /* create DMA maps for Tx buffers */ 2222 for (i = 0; i < SK_TX_RING_CNT; i++) { 2223 txd = &sc_if->sk_cdata.sk_txdesc[i]; 2224 txd->tx_m = NULL; 2225 txd->tx_dmamap = 0; 2226 error = bus_dmamap_create(sc_if->sk_cdata.sk_tx_tag, 0, 2227 &txd->tx_dmamap); 2228 if (error != 0) { 2229 device_printf(sc_if->sk_if_dev, 2230 "failed to create Tx dmamap\n"); 2231 goto fail; 2232 } 2233 } 2234 /* create DMA maps for Rx buffers */ 2235 if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0, 2236 &sc_if->sk_cdata.sk_rx_sparemap)) != 0) { 2237 device_printf(sc_if->sk_if_dev, 2238 "failed to create spare Rx dmamap\n"); 2239 goto fail; 2240 } 2241 for (i = 0; i < SK_RX_RING_CNT; i++) { 2242 rxd = &sc_if->sk_cdata.sk_rxdesc[i]; 2243 rxd->rx_m = NULL; 2244 rxd->rx_dmamap = 0; 2245 error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0, 2246 &rxd->rx_dmamap); 2247 if (error != 0) { 2248 device_printf(sc_if->sk_if_dev, 2249 "failed to create Rx dmamap\n"); 2250 goto fail; 2251 } 2252 } 2253 /* create DMA maps for jumbo Rx buffers */ 2254 if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0, 2255 &sc_if->sk_cdata.sk_jumbo_rx_sparemap)) != 0) { 2256 device_printf(sc_if->sk_if_dev, 2257 "failed to create spare jumbo Rx dmamap\n"); 2258 goto fail; 2259 } 2260 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) { 2261 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i]; 2262 jrxd->rx_m = NULL; 2263 jrxd->rx_dmamap = 0; 2264 error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0, 2265 &jrxd->rx_dmamap); 2266 if (error != 0) { 2267 device_printf(sc_if->sk_if_dev, 2268 "failed to create jumbo Rx dmamap\n"); 2269 goto fail; 2270 } 2271 } 2272 2273 /* allocate DMA'able memory and load the DMA map for jumbo buf */ 2274 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_jumbo_tag, 2275 (void **)&sc_if->sk_rdata.sk_jumbo_buf, 2276 BUS_DMA_NOWAIT|BUS_DMA_ZERO, &sc_if->sk_cdata.sk_jumbo_map); 2277 if (error != 0) { 2278 device_printf(sc_if->sk_if_dev, 2279 "failed to allocate DMA'able memory for jumbo buf\n"); 2280 goto fail; 2281 } 2282 2283 ctx.sk_busaddr = 0; 2284 error = bus_dmamap_load(sc_if->sk_cdata.sk_jumbo_tag, 2285 sc_if->sk_cdata.sk_jumbo_map, 2286 sc_if->sk_rdata.sk_jumbo_buf, SK_JMEM, sk_dmamap_cb, 2287 &ctx, BUS_DMA_NOWAIT); 2288 if (error != 0) { 2289 device_printf(sc_if->sk_if_dev, 2290 "failed to load DMA'able memory for jumbobuf\n"); 2291 goto fail; 2292 } 2293 sc_if->sk_rdata.sk_jumbo_buf_paddr = ctx.sk_busaddr; 2294 2295 /* 2296 * Now divide it up into 9K pieces and save the addresses 2297 * in an array. 2298 */ 2299 ptr = sc_if->sk_rdata.sk_jumbo_buf; 2300 for (i = 0; i < SK_JSLOTS; i++) { 2301 sc_if->sk_cdata.sk_jslots[i] = ptr; 2302 ptr += SK_JLEN; 2303 entry = malloc(sizeof(struct sk_jpool_entry), 2304 M_DEVBUF, M_NOWAIT); 2305 if (entry == NULL) { 2306 device_printf(sc_if->sk_if_dev, 2307 "no memory for jumbo buffers!\n"); 2308 error = ENOMEM; 2309 goto fail; 2310 } 2311 entry->slot = i; 2312 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, 2313 jpool_entries); 2314 } 2315 2316 fail: 2317 return (error); 2318 } 2319 2320 static void 2321 sk_dma_free(sc_if) 2322 struct sk_if_softc *sc_if; 2323 { 2324 struct sk_txdesc *txd; 2325 struct sk_rxdesc *rxd; 2326 struct sk_rxdesc *jrxd; 2327 struct sk_jpool_entry *entry; 2328 int i; 2329 2330 SK_JLIST_LOCK(sc_if); 2331 while ((entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead))) { 2332 device_printf(sc_if->sk_if_dev, 2333 "asked to free buffer that is in use!\n"); 2334 SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries); 2335 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, 2336 jpool_entries); 2337 } 2338 2339 while (!SLIST_EMPTY(&sc_if->sk_jfree_listhead)) { 2340 entry = SLIST_FIRST(&sc_if->sk_jfree_listhead); 2341 SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries); 2342 free(entry, M_DEVBUF); 2343 } 2344 SK_JLIST_UNLOCK(sc_if); 2345 2346 /* destroy jumbo buffer block */ 2347 if (sc_if->sk_cdata.sk_jumbo_map) 2348 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_tag, 2349 sc_if->sk_cdata.sk_jumbo_map); 2350 2351 if (sc_if->sk_rdata.sk_jumbo_buf) { 2352 bus_dmamem_free(sc_if->sk_cdata.sk_jumbo_tag, 2353 sc_if->sk_rdata.sk_jumbo_buf, 2354 sc_if->sk_cdata.sk_jumbo_map); 2355 sc_if->sk_rdata.sk_jumbo_buf = NULL; 2356 sc_if->sk_cdata.sk_jumbo_map = 0; 2357 } 2358 2359 /* Tx ring */ 2360 if (sc_if->sk_cdata.sk_tx_ring_tag) { 2361 if (sc_if->sk_cdata.sk_tx_ring_map) 2362 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_ring_tag, 2363 sc_if->sk_cdata.sk_tx_ring_map); 2364 if (sc_if->sk_cdata.sk_tx_ring_map && 2365 sc_if->sk_rdata.sk_tx_ring) 2366 bus_dmamem_free(sc_if->sk_cdata.sk_tx_ring_tag, 2367 sc_if->sk_rdata.sk_tx_ring, 2368 sc_if->sk_cdata.sk_tx_ring_map); 2369 sc_if->sk_rdata.sk_tx_ring = NULL; 2370 sc_if->sk_cdata.sk_tx_ring_map = 0; 2371 bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_ring_tag); 2372 sc_if->sk_cdata.sk_tx_ring_tag = NULL; 2373 } 2374 /* Rx ring */ 2375 if (sc_if->sk_cdata.sk_rx_ring_tag) { 2376 if (sc_if->sk_cdata.sk_rx_ring_map) 2377 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_ring_tag, 2378 sc_if->sk_cdata.sk_rx_ring_map); 2379 if (sc_if->sk_cdata.sk_rx_ring_map && 2380 sc_if->sk_rdata.sk_rx_ring) 2381 bus_dmamem_free(sc_if->sk_cdata.sk_rx_ring_tag, 2382 sc_if->sk_rdata.sk_rx_ring, 2383 sc_if->sk_cdata.sk_rx_ring_map); 2384 sc_if->sk_rdata.sk_rx_ring = NULL; 2385 sc_if->sk_cdata.sk_rx_ring_map = 0; 2386 bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_ring_tag); 2387 sc_if->sk_cdata.sk_rx_ring_tag = NULL; 2388 } 2389 /* jumbo Rx ring */ 2390 if (sc_if->sk_cdata.sk_jumbo_rx_ring_tag) { 2391 if (sc_if->sk_cdata.sk_jumbo_rx_ring_map) 2392 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, 2393 sc_if->sk_cdata.sk_jumbo_rx_ring_map); 2394 if (sc_if->sk_cdata.sk_jumbo_rx_ring_map && 2395 sc_if->sk_rdata.sk_jumbo_rx_ring) 2396 bus_dmamem_free(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, 2397 sc_if->sk_rdata.sk_jumbo_rx_ring, 2398 sc_if->sk_cdata.sk_jumbo_rx_ring_map); 2399 sc_if->sk_rdata.sk_jumbo_rx_ring = NULL; 2400 sc_if->sk_cdata.sk_jumbo_rx_ring_map = 0; 2401 bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_ring_tag); 2402 sc_if->sk_cdata.sk_jumbo_rx_ring_tag = NULL; 2403 } 2404 /* Tx buffers */ 2405 if (sc_if->sk_cdata.sk_tx_tag) { 2406 for (i = 0; i < SK_TX_RING_CNT; i++) { 2407 txd = &sc_if->sk_cdata.sk_txdesc[i]; 2408 if (txd->tx_dmamap) { 2409 bus_dmamap_destroy(sc_if->sk_cdata.sk_tx_tag, 2410 txd->tx_dmamap); 2411 txd->tx_dmamap = 0; 2412 } 2413 } 2414 bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_tag); 2415 sc_if->sk_cdata.sk_tx_tag = NULL; 2416 } 2417 /* Rx buffers */ 2418 if (sc_if->sk_cdata.sk_rx_tag) { 2419 for (i = 0; i < SK_RX_RING_CNT; i++) { 2420 rxd = &sc_if->sk_cdata.sk_rxdesc[i]; 2421 if (rxd->rx_dmamap) { 2422 bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag, 2423 rxd->rx_dmamap); 2424 rxd->rx_dmamap = 0; 2425 } 2426 } 2427 if (sc_if->sk_cdata.sk_rx_sparemap) { 2428 bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag, 2429 sc_if->sk_cdata.sk_rx_sparemap); 2430 sc_if->sk_cdata.sk_rx_sparemap = 0; 2431 } 2432 bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_tag); 2433 sc_if->sk_cdata.sk_rx_tag = NULL; 2434 } 2435 /* jumbo Rx buffers */ 2436 if (sc_if->sk_cdata.sk_jumbo_rx_tag) { 2437 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) { 2438 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i]; 2439 if (jrxd->rx_dmamap) { 2440 bus_dmamap_destroy( 2441 sc_if->sk_cdata.sk_jumbo_rx_tag, 2442 jrxd->rx_dmamap); 2443 jrxd->rx_dmamap = 0; 2444 } 2445 } 2446 if (sc_if->sk_cdata.sk_jumbo_rx_sparemap) { 2447 bus_dmamap_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag, 2448 sc_if->sk_cdata.sk_jumbo_rx_sparemap); 2449 sc_if->sk_cdata.sk_jumbo_rx_sparemap = 0; 2450 } 2451 bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag); 2452 sc_if->sk_cdata.sk_jumbo_rx_tag = NULL; 2453 } 2454 2455 if (sc_if->sk_cdata.sk_parent_tag) { 2456 bus_dma_tag_destroy(sc_if->sk_cdata.sk_parent_tag); 2457 sc_if->sk_cdata.sk_parent_tag = NULL; 2458 } 2459 mtx_destroy(&sc_if->sk_jlist_mtx); 2460 } 2461 2462 /* 2463 * Allocate a jumbo buffer. 2464 */ 2465 static void * 2466 sk_jalloc(sc_if) 2467 struct sk_if_softc *sc_if; 2468 { 2469 struct sk_jpool_entry *entry; 2470 2471 SK_JLIST_LOCK(sc_if); 2472 2473 entry = SLIST_FIRST(&sc_if->sk_jfree_listhead); 2474 2475 if (entry == NULL) { 2476 SK_JLIST_UNLOCK(sc_if); 2477 return (NULL); 2478 } 2479 2480 SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries); 2481 SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries); 2482 2483 SK_JLIST_UNLOCK(sc_if); 2484 2485 return (sc_if->sk_cdata.sk_jslots[entry->slot]); 2486 } 2487 2488 /* 2489 * Release a jumbo buffer. 2490 */ 2491 static void 2492 sk_jfree(buf, args) 2493 void *buf; 2494 void *args; 2495 { 2496 struct sk_if_softc *sc_if; 2497 struct sk_jpool_entry *entry; 2498 int i; 2499 2500 /* Extract the softc struct pointer. */ 2501 sc_if = (struct sk_if_softc *)args; 2502 KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__)); 2503 2504 SK_JLIST_LOCK(sc_if); 2505 /* calculate the slot this buffer belongs to */ 2506 i = ((vm_offset_t)buf 2507 - (vm_offset_t)sc_if->sk_rdata.sk_jumbo_buf) / SK_JLEN; 2508 KASSERT(i >= 0 && i < SK_JSLOTS, 2509 ("%s: asked to free buffer that we don't manage!", __func__)); 2510 2511 entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead); 2512 KASSERT(entry != NULL, ("%s: buffer not in use!", __func__)); 2513 entry->slot = i; 2514 SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries); 2515 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries); 2516 if (SLIST_EMPTY(&sc_if->sk_jinuse_listhead)) 2517 wakeup(sc_if); 2518 2519 SK_JLIST_UNLOCK(sc_if); 2520 } 2521 2522 static void 2523 sk_txcksum(ifp, m, f) 2524 struct ifnet *ifp; 2525 struct mbuf *m; 2526 struct sk_tx_desc *f; 2527 { 2528 struct ip *ip; 2529 u_int16_t offset; 2530 u_int8_t *p; 2531 2532 offset = sizeof(struct ip) + ETHER_HDR_LEN; 2533 for(; m && m->m_len == 0; m = m->m_next) 2534 ; 2535 if (m == NULL || m->m_len < ETHER_HDR_LEN) { 2536 if_printf(ifp, "%s: m_len < ETHER_HDR_LEN\n", __func__); 2537 /* checksum may be corrupted */ 2538 goto sendit; 2539 } 2540 if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) { 2541 if (m->m_len != ETHER_HDR_LEN) { 2542 if_printf(ifp, "%s: m_len != ETHER_HDR_LEN\n", 2543 __func__); 2544 /* checksum may be corrupted */ 2545 goto sendit; 2546 } 2547 for(m = m->m_next; m && m->m_len == 0; m = m->m_next) 2548 ; 2549 if (m == NULL) { 2550 offset = sizeof(struct ip) + ETHER_HDR_LEN; 2551 /* checksum may be corrupted */ 2552 goto sendit; 2553 } 2554 ip = mtod(m, struct ip *); 2555 } else { 2556 p = mtod(m, u_int8_t *); 2557 p += ETHER_HDR_LEN; 2558 ip = (struct ip *)p; 2559 } 2560 offset = (ip->ip_hl << 2) + ETHER_HDR_LEN; 2561 2562 sendit: 2563 f->sk_csum_startval = 0; 2564 f->sk_csum_start = htole32(((offset + m->m_pkthdr.csum_data) & 0xffff) | 2565 (offset << 16)); 2566 } 2567 2568 static int 2569 sk_encap(sc_if, m_head) 2570 struct sk_if_softc *sc_if; 2571 struct mbuf **m_head; 2572 { 2573 struct sk_txdesc *txd; 2574 struct sk_tx_desc *f = NULL; 2575 struct mbuf *m; 2576 bus_dma_segment_t txsegs[SK_MAXTXSEGS]; 2577 u_int32_t cflags, frag, si, sk_ctl; 2578 int error, i, nseg; 2579 2580 SK_IF_LOCK_ASSERT(sc_if); 2581 2582 if ((txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txfreeq)) == NULL) 2583 return (ENOBUFS); 2584 2585 error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag, 2586 txd->tx_dmamap, *m_head, txsegs, &nseg, 0); 2587 if (error == EFBIG) { 2588 m = m_defrag(*m_head, M_DONTWAIT); 2589 if (m == NULL) { 2590 m_freem(*m_head); 2591 *m_head = NULL; 2592 return (ENOMEM); 2593 } 2594 *m_head = m; 2595 error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag, 2596 txd->tx_dmamap, *m_head, txsegs, &nseg, 0); 2597 if (error != 0) { 2598 m_freem(*m_head); 2599 *m_head = NULL; 2600 return (error); 2601 } 2602 } else if (error != 0) 2603 return (error); 2604 if (nseg == 0) { 2605 m_freem(*m_head); 2606 *m_head = NULL; 2607 return (EIO); 2608 } 2609 if (sc_if->sk_cdata.sk_tx_cnt + nseg >= SK_TX_RING_CNT) { 2610 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap); 2611 return (ENOBUFS); 2612 } 2613 2614 m = *m_head; 2615 if ((m->m_pkthdr.csum_flags & sc_if->sk_ifp->if_hwassist) != 0) 2616 cflags = SK_OPCODE_CSUM; 2617 else 2618 cflags = SK_OPCODE_DEFAULT; 2619 si = frag = sc_if->sk_cdata.sk_tx_prod; 2620 for (i = 0; i < nseg; i++) { 2621 f = &sc_if->sk_rdata.sk_tx_ring[frag]; 2622 f->sk_data_lo = htole32(SK_ADDR_LO(txsegs[i].ds_addr)); 2623 f->sk_data_hi = htole32(SK_ADDR_HI(txsegs[i].ds_addr)); 2624 sk_ctl = txsegs[i].ds_len | cflags; 2625 if (i == 0) { 2626 if (cflags == SK_OPCODE_CSUM) 2627 sk_txcksum(sc_if->sk_ifp, m, f); 2628 sk_ctl |= SK_TXCTL_FIRSTFRAG; 2629 } else 2630 sk_ctl |= SK_TXCTL_OWN; 2631 f->sk_ctl = htole32(sk_ctl); 2632 sc_if->sk_cdata.sk_tx_cnt++; 2633 SK_INC(frag, SK_TX_RING_CNT); 2634 } 2635 sc_if->sk_cdata.sk_tx_prod = frag; 2636 2637 /* set EOF on the last desciptor */ 2638 frag = (frag + SK_TX_RING_CNT - 1) % SK_TX_RING_CNT; 2639 f = &sc_if->sk_rdata.sk_tx_ring[frag]; 2640 f->sk_ctl |= htole32(SK_TXCTL_LASTFRAG | SK_TXCTL_EOF_INTR); 2641 2642 /* turn the first descriptor ownership to NIC */ 2643 f = &sc_if->sk_rdata.sk_tx_ring[si]; 2644 f->sk_ctl |= htole32(SK_TXCTL_OWN); 2645 2646 STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txfreeq, tx_q); 2647 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txbusyq, txd, tx_q); 2648 txd->tx_m = m; 2649 2650 /* sync descriptors */ 2651 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap, 2652 BUS_DMASYNC_PREWRITE); 2653 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag, 2654 sc_if->sk_cdata.sk_tx_ring_map, 2655 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2656 2657 return (0); 2658 } 2659 2660 static void 2661 sk_start(ifp) 2662 struct ifnet *ifp; 2663 { 2664 struct sk_if_softc *sc_if; 2665 2666 sc_if = ifp->if_softc; 2667 2668 SK_IF_LOCK(sc_if); 2669 sk_start_locked(ifp); 2670 SK_IF_UNLOCK(sc_if); 2671 2672 return; 2673 } 2674 2675 static void 2676 sk_start_locked(ifp) 2677 struct ifnet *ifp; 2678 { 2679 struct sk_softc *sc; 2680 struct sk_if_softc *sc_if; 2681 struct mbuf *m_head; 2682 int enq; 2683 2684 sc_if = ifp->if_softc; 2685 sc = sc_if->sk_softc; 2686 2687 SK_IF_LOCK_ASSERT(sc_if); 2688 2689 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 2690 sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 1; ) { 2691 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2692 if (m_head == NULL) 2693 break; 2694 2695 /* 2696 * Pack the data into the transmit ring. If we 2697 * don't have room, set the OACTIVE flag and wait 2698 * for the NIC to drain the ring. 2699 */ 2700 if (sk_encap(sc_if, &m_head)) { 2701 if (m_head == NULL) 2702 break; 2703 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2704 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2705 break; 2706 } 2707 2708 enq++; 2709 /* 2710 * If there's a BPF listener, bounce a copy of this frame 2711 * to him. 2712 */ 2713 BPF_MTAP(ifp, m_head); 2714 } 2715 2716 if (enq > 0) { 2717 /* Transmit */ 2718 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); 2719 2720 /* Set a timeout in case the chip goes out to lunch. */ 2721 ifp->if_timer = 5; 2722 } 2723 } 2724 2725 2726 static void 2727 sk_watchdog(ifp) 2728 struct ifnet *ifp; 2729 { 2730 struct sk_if_softc *sc_if; 2731 2732 sc_if = ifp->if_softc; 2733 2734 SK_IF_LOCK(sc_if); 2735 /* 2736 * Reclaim first as there is a possibility of losing Tx completion 2737 * interrupts. 2738 */ 2739 sk_txeof(sc_if); 2740 if (sc_if->sk_cdata.sk_tx_cnt != 0) { 2741 if_printf(sc_if->sk_ifp, "watchdog timeout\n"); 2742 ifp->if_oerrors++; 2743 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2744 sk_init_locked(sc_if); 2745 } 2746 SK_IF_UNLOCK(sc_if); 2747 2748 return; 2749 } 2750 2751 static void 2752 skc_shutdown(dev) 2753 device_t dev; 2754 { 2755 struct sk_softc *sc; 2756 2757 sc = device_get_softc(dev); 2758 SK_LOCK(sc); 2759 2760 /* Turn off the 'driver is loaded' LED. */ 2761 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF); 2762 2763 /* 2764 * Reset the GEnesis controller. Doing this should also 2765 * assert the resets on the attached XMAC(s). 2766 */ 2767 sk_reset(sc); 2768 SK_UNLOCK(sc); 2769 2770 return; 2771 } 2772 2773 static int 2774 skc_suspend(dev) 2775 device_t dev; 2776 { 2777 struct sk_softc *sc; 2778 struct sk_if_softc *sc_if0, *sc_if1; 2779 struct ifnet *ifp0 = NULL, *ifp1 = NULL; 2780 2781 sc = device_get_softc(dev); 2782 2783 SK_LOCK(sc); 2784 2785 sc_if0 = sc->sk_if[SK_PORT_A]; 2786 sc_if1 = sc->sk_if[SK_PORT_B]; 2787 if (sc_if0 != NULL) 2788 ifp0 = sc_if0->sk_ifp; 2789 if (sc_if1 != NULL) 2790 ifp1 = sc_if1->sk_ifp; 2791 if (ifp0 != NULL) 2792 sk_stop(sc_if0); 2793 if (ifp1 != NULL) 2794 sk_stop(sc_if1); 2795 sc->sk_suspended = 1; 2796 2797 SK_UNLOCK(sc); 2798 2799 return (0); 2800 } 2801 2802 static int 2803 skc_resume(dev) 2804 device_t dev; 2805 { 2806 struct sk_softc *sc; 2807 struct sk_if_softc *sc_if0, *sc_if1; 2808 struct ifnet *ifp0 = NULL, *ifp1 = NULL; 2809 2810 sc = device_get_softc(dev); 2811 2812 SK_LOCK(sc); 2813 2814 sc_if0 = sc->sk_if[SK_PORT_A]; 2815 sc_if1 = sc->sk_if[SK_PORT_B]; 2816 if (sc_if0 != NULL) 2817 ifp0 = sc_if0->sk_ifp; 2818 if (sc_if1 != NULL) 2819 ifp1 = sc_if1->sk_ifp; 2820 if (ifp0 != NULL && ifp0->if_flags & IFF_UP) 2821 sk_init_locked(sc_if0); 2822 if (ifp1 != NULL && ifp1->if_flags & IFF_UP) 2823 sk_init_locked(sc_if1); 2824 sc->sk_suspended = 0; 2825 2826 SK_UNLOCK(sc); 2827 2828 return (0); 2829 } 2830 2831 /* 2832 * According to the data sheet from SK-NET GENESIS the hardware can compute 2833 * two Rx checksums at the same time(Each checksum start position is 2834 * programmed in Rx descriptors). However it seems that TCP/UDP checksum 2835 * does not work at least on my Yukon hardware. I tried every possible ways 2836 * to get correct checksum value but couldn't get correct one. So TCP/UDP 2837 * checksum offload was disabled at the moment and only IP checksum offload 2838 * was enabled. 2839 * As nomral IP header size is 20 bytes I can't expect it would give an 2840 * increase in throughput. However it seems it doesn't hurt performance in 2841 * my testing. If there is a more detailed information for checksum secret 2842 * of the hardware in question please contact yongari@FreeBSD.org to add 2843 * TCP/UDP checksum offload support. 2844 */ 2845 static __inline void 2846 sk_rxcksum(ifp, m, csum) 2847 struct ifnet *ifp; 2848 struct mbuf *m; 2849 u_int32_t csum; 2850 { 2851 struct ether_header *eh; 2852 struct ip *ip; 2853 int32_t hlen, len, pktlen; 2854 u_int16_t csum1, csum2, ipcsum; 2855 2856 pktlen = m->m_pkthdr.len; 2857 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) 2858 return; 2859 eh = mtod(m, struct ether_header *); 2860 if (eh->ether_type != htons(ETHERTYPE_IP)) 2861 return; 2862 ip = (struct ip *)(eh + 1); 2863 if (ip->ip_v != IPVERSION) 2864 return; 2865 hlen = ip->ip_hl << 2; 2866 pktlen -= sizeof(struct ether_header); 2867 if (hlen < sizeof(struct ip)) 2868 return; 2869 if (ntohs(ip->ip_len) < hlen) 2870 return; 2871 if (ntohs(ip->ip_len) != pktlen) 2872 return; 2873 2874 csum1 = htons(csum & 0xffff); 2875 csum2 = htons((csum >> 16) & 0xffff); 2876 ipcsum = in_addword(csum1, ~csum2 & 0xffff); 2877 /* checksum fixup for IP options */ 2878 len = hlen - sizeof(struct ip); 2879 if (len > 0) { 2880 /* 2881 * If the second checksum value is correct we can compute IP 2882 * checksum with simple math. Unfortunately the second checksum 2883 * value is wrong so we can't verify the checksum from the 2884 * value(It seems there is some magic here to get correct 2885 * value). If the second checksum value is correct it also 2886 * means we can get TCP/UDP checksum) here. However, it still 2887 * needs pseudo header checksum calculation due to hardware 2888 * limitations. 2889 */ 2890 return; 2891 } 2892 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED; 2893 if (ipcsum == 0xffff) 2894 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2895 } 2896 2897 static __inline int 2898 sk_rxvalid(sc, stat, len) 2899 struct sk_softc *sc; 2900 u_int32_t stat, len; 2901 { 2902 2903 if (sc->sk_type == SK_GENESIS) { 2904 if ((stat & XM_RXSTAT_ERRFRAME) == XM_RXSTAT_ERRFRAME || 2905 XM_RXSTAT_BYTES(stat) != len) 2906 return (0); 2907 } else { 2908 if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR | 2909 YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | 2910 YU_RXSTAT_JABBER)) != 0 || 2911 (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK || 2912 YU_RXSTAT_BYTES(stat) != len) 2913 return (0); 2914 } 2915 2916 return (1); 2917 } 2918 2919 static void 2920 sk_rxeof(sc_if) 2921 struct sk_if_softc *sc_if; 2922 { 2923 struct sk_softc *sc; 2924 struct mbuf *m; 2925 struct ifnet *ifp; 2926 struct sk_rx_desc *cur_rx; 2927 struct sk_rxdesc *rxd; 2928 int cons, prog; 2929 u_int32_t csum, rxstat, sk_ctl; 2930 2931 sc = sc_if->sk_softc; 2932 ifp = sc_if->sk_ifp; 2933 2934 SK_IF_LOCK_ASSERT(sc_if); 2935 2936 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag, 2937 sc_if->sk_cdata.sk_rx_ring_map, BUS_DMASYNC_POSTREAD); 2938 2939 prog = 0; 2940 for (cons = sc_if->sk_cdata.sk_rx_cons; prog < SK_RX_RING_CNT; 2941 prog++, SK_INC(cons, SK_RX_RING_CNT)) { 2942 cur_rx = &sc_if->sk_rdata.sk_rx_ring[cons]; 2943 sk_ctl = le32toh(cur_rx->sk_ctl); 2944 if ((sk_ctl & SK_RXCTL_OWN) != 0) 2945 break; 2946 rxd = &sc_if->sk_cdata.sk_rxdesc[cons]; 2947 rxstat = le32toh(cur_rx->sk_xmac_rxstat); 2948 2949 if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG | 2950 SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID | 2951 SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) || 2952 SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN || 2953 SK_RXBYTES(sk_ctl) > SK_MAX_FRAMELEN || 2954 sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) { 2955 ifp->if_ierrors++; 2956 sk_discard_rxbuf(sc_if, cons); 2957 continue; 2958 } 2959 2960 m = rxd->rx_m; 2961 csum = le32toh(cur_rx->sk_csum); 2962 if (sk_newbuf(sc_if, cons) != 0) { 2963 ifp->if_iqdrops++; 2964 /* reuse old buffer */ 2965 sk_discard_rxbuf(sc_if, cons); 2966 continue; 2967 } 2968 m->m_pkthdr.rcvif = ifp; 2969 m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl); 2970 ifp->if_ipackets++; 2971 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 2972 sk_rxcksum(ifp, m, csum); 2973 SK_IF_UNLOCK(sc_if); 2974 (*ifp->if_input)(ifp, m); 2975 SK_IF_LOCK(sc_if); 2976 } 2977 2978 if (prog > 0) { 2979 sc_if->sk_cdata.sk_rx_cons = cons; 2980 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag, 2981 sc_if->sk_cdata.sk_rx_ring_map, 2982 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2983 } 2984 } 2985 2986 static void 2987 sk_jumbo_rxeof(sc_if) 2988 struct sk_if_softc *sc_if; 2989 { 2990 struct sk_softc *sc; 2991 struct mbuf *m; 2992 struct ifnet *ifp; 2993 struct sk_rx_desc *cur_rx; 2994 struct sk_rxdesc *jrxd; 2995 int cons, prog; 2996 u_int32_t csum, rxstat, sk_ctl; 2997 2998 sc = sc_if->sk_softc; 2999 ifp = sc_if->sk_ifp; 3000 3001 SK_IF_LOCK_ASSERT(sc_if); 3002 3003 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, 3004 sc_if->sk_cdata.sk_jumbo_rx_ring_map, BUS_DMASYNC_POSTREAD); 3005 3006 prog = 0; 3007 for (cons = sc_if->sk_cdata.sk_jumbo_rx_cons; 3008 prog < SK_JUMBO_RX_RING_CNT; 3009 prog++, SK_INC(cons, SK_JUMBO_RX_RING_CNT)) { 3010 cur_rx = &sc_if->sk_rdata.sk_jumbo_rx_ring[cons]; 3011 sk_ctl = le32toh(cur_rx->sk_ctl); 3012 if ((sk_ctl & SK_RXCTL_OWN) != 0) 3013 break; 3014 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[cons]; 3015 rxstat = le32toh(cur_rx->sk_xmac_rxstat); 3016 3017 if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG | 3018 SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID | 3019 SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) || 3020 SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN || 3021 SK_RXBYTES(sk_ctl) > SK_JUMBO_FRAMELEN || 3022 sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) { 3023 ifp->if_ierrors++; 3024 sk_discard_jumbo_rxbuf(sc_if, cons); 3025 continue; 3026 } 3027 3028 m = jrxd->rx_m; 3029 csum = le32toh(cur_rx->sk_csum); 3030 if (sk_jumbo_newbuf(sc_if, cons) != 0) { 3031 ifp->if_iqdrops++; 3032 /* reuse old buffer */ 3033 sk_discard_jumbo_rxbuf(sc_if, cons); 3034 continue; 3035 } 3036 m->m_pkthdr.rcvif = ifp; 3037 m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl); 3038 ifp->if_ipackets++; 3039 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 3040 sk_rxcksum(ifp, m, csum); 3041 SK_IF_UNLOCK(sc_if); 3042 (*ifp->if_input)(ifp, m); 3043 SK_IF_LOCK(sc_if); 3044 } 3045 3046 if (prog > 0) { 3047 sc_if->sk_cdata.sk_jumbo_rx_cons = cons; 3048 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, 3049 sc_if->sk_cdata.sk_jumbo_rx_ring_map, 3050 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3051 } 3052 } 3053 3054 static void 3055 sk_txeof(sc_if) 3056 struct sk_if_softc *sc_if; 3057 { 3058 struct sk_softc *sc; 3059 struct sk_txdesc *txd; 3060 struct sk_tx_desc *cur_tx; 3061 struct ifnet *ifp; 3062 u_int32_t idx, sk_ctl; 3063 3064 sc = sc_if->sk_softc; 3065 ifp = sc_if->sk_ifp; 3066 3067 txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq); 3068 if (txd == NULL) 3069 return; 3070 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag, 3071 sc_if->sk_cdata.sk_tx_ring_map, BUS_DMASYNC_POSTREAD); 3072 /* 3073 * Go through our tx ring and free mbufs for those 3074 * frames that have been sent. 3075 */ 3076 for (idx = sc_if->sk_cdata.sk_tx_cons;; SK_INC(idx, SK_TX_RING_CNT)) { 3077 if (sc_if->sk_cdata.sk_tx_cnt <= 0) 3078 break; 3079 cur_tx = &sc_if->sk_rdata.sk_tx_ring[idx]; 3080 sk_ctl = le32toh(cur_tx->sk_ctl); 3081 if (sk_ctl & SK_TXCTL_OWN) 3082 break; 3083 sc_if->sk_cdata.sk_tx_cnt--; 3084 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3085 if ((sk_ctl & SK_TXCTL_LASTFRAG) == 0) 3086 continue; 3087 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap, 3088 BUS_DMASYNC_POSTWRITE); 3089 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap); 3090 3091 ifp->if_opackets++; 3092 m_freem(txd->tx_m); 3093 txd->tx_m = NULL; 3094 STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txbusyq, tx_q); 3095 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q); 3096 txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq); 3097 } 3098 sc_if->sk_cdata.sk_tx_cons = idx; 3099 ifp->if_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0; 3100 3101 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag, 3102 sc_if->sk_cdata.sk_tx_ring_map, 3103 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3104 } 3105 3106 static void 3107 sk_tick(xsc_if) 3108 void *xsc_if; 3109 { 3110 struct sk_if_softc *sc_if; 3111 struct mii_data *mii; 3112 struct ifnet *ifp; 3113 int i; 3114 3115 sc_if = xsc_if; 3116 ifp = sc_if->sk_ifp; 3117 mii = device_get_softc(sc_if->sk_miibus); 3118 3119 if (!(ifp->if_flags & IFF_UP)) 3120 return; 3121 3122 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 3123 sk_intr_bcom(sc_if); 3124 return; 3125 } 3126 3127 /* 3128 * According to SysKonnect, the correct way to verify that 3129 * the link has come back up is to poll bit 0 of the GPIO 3130 * register three times. This pin has the signal from the 3131 * link_sync pin connected to it; if we read the same link 3132 * state 3 times in a row, we know the link is up. 3133 */ 3134 for (i = 0; i < 3; i++) { 3135 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET) 3136 break; 3137 } 3138 3139 if (i != 3) { 3140 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if); 3141 return; 3142 } 3143 3144 /* Turn the GP0 interrupt back on. */ 3145 SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); 3146 SK_XM_READ_2(sc_if, XM_ISR); 3147 mii_tick(mii); 3148 callout_stop(&sc_if->sk_tick_ch); 3149 } 3150 3151 static void 3152 sk_yukon_tick(xsc_if) 3153 void *xsc_if; 3154 { 3155 struct sk_if_softc *sc_if; 3156 struct mii_data *mii; 3157 3158 sc_if = xsc_if; 3159 mii = device_get_softc(sc_if->sk_miibus); 3160 3161 mii_tick(mii); 3162 callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if); 3163 } 3164 3165 static void 3166 sk_intr_bcom(sc_if) 3167 struct sk_if_softc *sc_if; 3168 { 3169 struct mii_data *mii; 3170 struct ifnet *ifp; 3171 int status; 3172 mii = device_get_softc(sc_if->sk_miibus); 3173 ifp = sc_if->sk_ifp; 3174 3175 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 3176 3177 /* 3178 * Read the PHY interrupt register to make sure 3179 * we clear any pending interrupts. 3180 */ 3181 status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR); 3182 3183 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 3184 sk_init_xmac(sc_if); 3185 return; 3186 } 3187 3188 if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) { 3189 int lstat; 3190 lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 3191 BRGPHY_MII_AUXSTS); 3192 3193 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) { 3194 mii_mediachg(mii); 3195 /* Turn off the link LED. */ 3196 SK_IF_WRITE_1(sc_if, 0, 3197 SK_LINKLED1_CTL, SK_LINKLED_OFF); 3198 sc_if->sk_link = 0; 3199 } else if (status & BRGPHY_ISR_LNK_CHG) { 3200 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 3201 BRGPHY_MII_IMR, 0xFF00); 3202 mii_tick(mii); 3203 sc_if->sk_link = 1; 3204 /* Turn on the link LED. */ 3205 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, 3206 SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF| 3207 SK_LINKLED_BLINK_OFF); 3208 } else { 3209 mii_tick(mii); 3210 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if); 3211 } 3212 } 3213 3214 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 3215 3216 return; 3217 } 3218 3219 static void 3220 sk_intr_xmac(sc_if) 3221 struct sk_if_softc *sc_if; 3222 { 3223 struct sk_softc *sc; 3224 u_int16_t status; 3225 3226 sc = sc_if->sk_softc; 3227 status = SK_XM_READ_2(sc_if, XM_ISR); 3228 3229 /* 3230 * Link has gone down. Start MII tick timeout to 3231 * watch for link resync. 3232 */ 3233 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) { 3234 if (status & XM_ISR_GP0_SET) { 3235 SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); 3236 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if); 3237 } 3238 3239 if (status & XM_ISR_AUTONEG_DONE) { 3240 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if); 3241 } 3242 } 3243 3244 if (status & XM_IMR_TX_UNDERRUN) 3245 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO); 3246 3247 if (status & XM_IMR_RX_OVERRUN) 3248 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO); 3249 3250 status = SK_XM_READ_2(sc_if, XM_ISR); 3251 3252 return; 3253 } 3254 3255 static void 3256 sk_intr_yukon(sc_if) 3257 struct sk_if_softc *sc_if; 3258 { 3259 u_int8_t status; 3260 3261 status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR); 3262 /* RX overrun */ 3263 if ((status & SK_GMAC_INT_RX_OVER) != 0) { 3264 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, 3265 SK_RFCTL_RX_FIFO_OVER); 3266 } 3267 /* TX underrun */ 3268 if ((status & SK_GMAC_INT_TX_UNDER) != 0) { 3269 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, 3270 SK_TFCTL_TX_FIFO_UNDER); 3271 } 3272 } 3273 3274 static void 3275 sk_intr(xsc) 3276 void *xsc; 3277 { 3278 struct sk_softc *sc = xsc; 3279 struct sk_if_softc *sc_if0, *sc_if1; 3280 struct ifnet *ifp0 = NULL, *ifp1 = NULL; 3281 u_int32_t status; 3282 3283 SK_LOCK(sc); 3284 3285 status = CSR_READ_4(sc, SK_ISSR); 3286 if (status == 0 || status == 0xffffffff || sc->sk_suspended) 3287 goto done_locked; 3288 3289 sc_if0 = sc->sk_if[SK_PORT_A]; 3290 sc_if1 = sc->sk_if[SK_PORT_B]; 3291 3292 if (sc_if0 != NULL) 3293 ifp0 = sc_if0->sk_ifp; 3294 if (sc_if1 != NULL) 3295 ifp1 = sc_if1->sk_ifp; 3296 3297 for (; (status &= sc->sk_intrmask) != 0;) { 3298 /* Handle receive interrupts first. */ 3299 if (status & SK_ISR_RX1_EOF) { 3300 if (ifp0->if_mtu > SK_MAX_FRAMELEN) 3301 sk_jumbo_rxeof(sc_if0); 3302 else 3303 sk_rxeof(sc_if0); 3304 CSR_WRITE_4(sc, SK_BMU_RX_CSR0, 3305 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); 3306 } 3307 if (status & SK_ISR_RX2_EOF) { 3308 if (ifp1->if_mtu > SK_MAX_FRAMELEN) 3309 sk_jumbo_rxeof(sc_if1); 3310 else 3311 sk_rxeof(sc_if1); 3312 CSR_WRITE_4(sc, SK_BMU_RX_CSR1, 3313 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); 3314 } 3315 3316 /* Then transmit interrupts. */ 3317 if (status & SK_ISR_TX1_S_EOF) { 3318 sk_txeof(sc_if0); 3319 CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, SK_TXBMU_CLR_IRQ_EOF); 3320 } 3321 if (status & SK_ISR_TX2_S_EOF) { 3322 sk_txeof(sc_if1); 3323 CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, SK_TXBMU_CLR_IRQ_EOF); 3324 } 3325 3326 /* Then MAC interrupts. */ 3327 if (status & SK_ISR_MAC1 && 3328 ifp0->if_drv_flags & IFF_DRV_RUNNING) { 3329 if (sc->sk_type == SK_GENESIS) 3330 sk_intr_xmac(sc_if0); 3331 else 3332 sk_intr_yukon(sc_if0); 3333 } 3334 3335 if (status & SK_ISR_MAC2 && 3336 ifp1->if_drv_flags & IFF_DRV_RUNNING) { 3337 if (sc->sk_type == SK_GENESIS) 3338 sk_intr_xmac(sc_if1); 3339 else 3340 sk_intr_yukon(sc_if1); 3341 } 3342 3343 if (status & SK_ISR_EXTERNAL_REG) { 3344 if (ifp0 != NULL && 3345 sc_if0->sk_phytype == SK_PHYTYPE_BCOM) 3346 sk_intr_bcom(sc_if0); 3347 if (ifp1 != NULL && 3348 sc_if1->sk_phytype == SK_PHYTYPE_BCOM) 3349 sk_intr_bcom(sc_if1); 3350 } 3351 status = CSR_READ_4(sc, SK_ISSR); 3352 } 3353 3354 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 3355 3356 if (ifp0 != NULL && !IFQ_DRV_IS_EMPTY(&ifp0->if_snd)) 3357 sk_start_locked(ifp0); 3358 if (ifp1 != NULL && !IFQ_DRV_IS_EMPTY(&ifp1->if_snd)) 3359 sk_start_locked(ifp1); 3360 3361 done_locked: 3362 SK_UNLOCK(sc); 3363 } 3364 3365 static void 3366 sk_init_xmac(sc_if) 3367 struct sk_if_softc *sc_if; 3368 { 3369 struct sk_softc *sc; 3370 struct ifnet *ifp; 3371 u_int16_t eaddr[(ETHER_ADDR_LEN+1)/2]; 3372 struct sk_bcom_hack bhack[] = { 3373 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 }, 3374 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 }, 3375 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, 3376 { 0, 0 } }; 3377 3378 SK_IF_LOCK_ASSERT(sc_if); 3379 3380 sc = sc_if->sk_softc; 3381 ifp = sc_if->sk_ifp; 3382 3383 /* Unreset the XMAC. */ 3384 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET); 3385 DELAY(1000); 3386 3387 /* Reset the XMAC's internal state. */ 3388 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); 3389 3390 /* Save the XMAC II revision */ 3391 sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID)); 3392 3393 /* 3394 * Perform additional initialization for external PHYs, 3395 * namely for the 1000baseTX cards that use the XMAC's 3396 * GMII mode. 3397 */ 3398 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 3399 int i = 0; 3400 u_int32_t val; 3401 3402 /* Take PHY out of reset. */ 3403 val = sk_win_read_4(sc, SK_GPIO); 3404 if (sc_if->sk_port == SK_PORT_A) 3405 val |= SK_GPIO_DIR0|SK_GPIO_DAT0; 3406 else 3407 val |= SK_GPIO_DIR2|SK_GPIO_DAT2; 3408 sk_win_write_4(sc, SK_GPIO, val); 3409 3410 /* Enable GMII mode on the XMAC. */ 3411 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE); 3412 3413 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 3414 BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET); 3415 DELAY(10000); 3416 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 3417 BRGPHY_MII_IMR, 0xFFF0); 3418 3419 /* 3420 * Early versions of the BCM5400 apparently have 3421 * a bug that requires them to have their reserved 3422 * registers initialized to some magic values. I don't 3423 * know what the numbers do, I'm just the messenger. 3424 */ 3425 if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03) 3426 == 0x6041) { 3427 while(bhack[i].reg) { 3428 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 3429 bhack[i].reg, bhack[i].val); 3430 i++; 3431 } 3432 } 3433 } 3434 3435 /* Set station address */ 3436 bcopy(IF_LLADDR(sc_if->sk_ifp), eaddr, ETHER_ADDR_LEN); 3437 SK_XM_WRITE_2(sc_if, XM_PAR0, eaddr[0]); 3438 SK_XM_WRITE_2(sc_if, XM_PAR1, eaddr[1]); 3439 SK_XM_WRITE_2(sc_if, XM_PAR2, eaddr[2]); 3440 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION); 3441 3442 if (ifp->if_flags & IFF_BROADCAST) { 3443 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); 3444 } else { 3445 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); 3446 } 3447 3448 /* We don't need the FCS appended to the packet. */ 3449 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS); 3450 3451 /* We want short frames padded to 60 bytes. */ 3452 SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD); 3453 3454 /* 3455 * Enable the reception of all error frames. This is is 3456 * a necessary evil due to the design of the XMAC. The 3457 * XMAC's receive FIFO is only 8K in size, however jumbo 3458 * frames can be up to 9000 bytes in length. When bad 3459 * frame filtering is enabled, the XMAC's RX FIFO operates 3460 * in 'store and forward' mode. For this to work, the 3461 * entire frame has to fit into the FIFO, but that means 3462 * that jumbo frames larger than 8192 bytes will be 3463 * truncated. Disabling all bad frame filtering causes 3464 * the RX FIFO to operate in streaming mode, in which 3465 * case the XMAC will start transfering frames out of the 3466 * RX FIFO as soon as the FIFO threshold is reached. 3467 */ 3468 if (ifp->if_mtu > SK_MAX_FRAMELEN) { 3469 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES| 3470 XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS| 3471 XM_MODE_RX_INRANGELEN); 3472 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); 3473 } else 3474 SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); 3475 3476 /* 3477 * Bump up the transmit threshold. This helps hold off transmit 3478 * underruns when we're blasting traffic from both ports at once. 3479 */ 3480 SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH); 3481 3482 /* Set promiscuous mode */ 3483 sk_setpromisc(sc_if); 3484 3485 /* Set multicast filter */ 3486 sk_setmulti(sc_if); 3487 3488 /* Clear and enable interrupts */ 3489 SK_XM_READ_2(sc_if, XM_ISR); 3490 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) 3491 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS); 3492 else 3493 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); 3494 3495 /* Configure MAC arbiter */ 3496 switch(sc_if->sk_xmac_rev) { 3497 case XM_XMAC_REV_B2: 3498 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2); 3499 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2); 3500 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2); 3501 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2); 3502 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2); 3503 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2); 3504 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2); 3505 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2); 3506 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); 3507 break; 3508 case XM_XMAC_REV_C1: 3509 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1); 3510 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1); 3511 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1); 3512 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1); 3513 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1); 3514 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1); 3515 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1); 3516 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1); 3517 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); 3518 break; 3519 default: 3520 break; 3521 } 3522 sk_win_write_2(sc, SK_MACARB_CTL, 3523 SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF); 3524 3525 sc_if->sk_link = 1; 3526 3527 return; 3528 } 3529 3530 static void 3531 sk_init_yukon(sc_if) 3532 struct sk_if_softc *sc_if; 3533 { 3534 u_int32_t phy, v; 3535 u_int16_t reg; 3536 struct sk_softc *sc; 3537 struct ifnet *ifp; 3538 int i; 3539 3540 SK_IF_LOCK_ASSERT(sc_if); 3541 3542 sc = sc_if->sk_softc; 3543 ifp = sc_if->sk_ifp; 3544 3545 if (sc->sk_type == SK_YUKON_LITE && 3546 sc->sk_rev >= SK_YUKON_LITE_REV_A3) { 3547 /* 3548 * Workaround code for COMA mode, set PHY reset. 3549 * Otherwise it will not correctly take chip out of 3550 * powerdown (coma) 3551 */ 3552 v = sk_win_read_4(sc, SK_GPIO); 3553 v |= SK_GPIO_DIR9 | SK_GPIO_DAT9; 3554 sk_win_write_4(sc, SK_GPIO, v); 3555 } 3556 3557 /* GMAC and GPHY Reset */ 3558 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET); 3559 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); 3560 DELAY(1000); 3561 3562 if (sc->sk_type == SK_YUKON_LITE && 3563 sc->sk_rev >= SK_YUKON_LITE_REV_A3) { 3564 /* 3565 * Workaround code for COMA mode, clear PHY reset 3566 */ 3567 v = sk_win_read_4(sc, SK_GPIO); 3568 v |= SK_GPIO_DIR9; 3569 v &= ~SK_GPIO_DAT9; 3570 sk_win_write_4(sc, SK_GPIO, v); 3571 } 3572 3573 phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP | 3574 SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE; 3575 3576 if (sc->sk_coppertype) 3577 phy |= SK_GPHY_COPPER; 3578 else 3579 phy |= SK_GPHY_FIBER; 3580 3581 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET); 3582 DELAY(1000); 3583 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR); 3584 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF | 3585 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR); 3586 3587 /* unused read of the interrupt source register */ 3588 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); 3589 3590 reg = SK_YU_READ_2(sc_if, YUKON_PAR); 3591 3592 /* MIB Counter Clear Mode set */ 3593 reg |= YU_PAR_MIB_CLR; 3594 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 3595 3596 /* MIB Counter Clear Mode clear */ 3597 reg &= ~YU_PAR_MIB_CLR; 3598 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 3599 3600 /* receive control reg */ 3601 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR); 3602 3603 /* transmit parameter register */ 3604 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) | 3605 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) ); 3606 3607 /* serial mode register */ 3608 reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e); 3609 if (ifp->if_mtu > SK_MAX_FRAMELEN) 3610 reg |= YU_SMR_MFL_JUMBO; 3611 SK_YU_WRITE_2(sc_if, YUKON_SMR, reg); 3612 3613 /* Setup Yukon's address */ 3614 for (i = 0; i < 3; i++) { 3615 /* Write Source Address 1 (unicast filter) */ 3616 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4, 3617 IF_LLADDR(sc_if->sk_ifp)[i * 2] | 3618 IF_LLADDR(sc_if->sk_ifp)[i * 2 + 1] << 8); 3619 } 3620 3621 for (i = 0; i < 3; i++) { 3622 reg = sk_win_read_2(sc_if->sk_softc, 3623 SK_MAC1_0 + i * 2 + sc_if->sk_port * 8); 3624 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg); 3625 } 3626 3627 /* Set promiscuous mode */ 3628 sk_setpromisc(sc_if); 3629 3630 /* Set multicast filter */ 3631 sk_setmulti(sc_if); 3632 3633 /* enable interrupt mask for counter overflows */ 3634 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0); 3635 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0); 3636 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0); 3637 3638 /* Configure RX MAC FIFO Flush Mask */ 3639 v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR | 3640 YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT | 3641 YU_RXSTAT_JABBER; 3642 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v); 3643 3644 /* Disable RX MAC FIFO Flush for YUKON-Lite Rev. A0 only */ 3645 if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev == SK_YUKON_LITE_REV_A0) 3646 v = SK_TFCTL_OPERATION_ON; 3647 else 3648 v = SK_TFCTL_OPERATION_ON | SK_RFCTL_FIFO_FLUSH_ON; 3649 /* Configure RX MAC FIFO */ 3650 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR); 3651 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, v); 3652 3653 /* Increase flush threshould to 64 bytes */ 3654 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD, 3655 SK_RFCTL_FIFO_THRESHOLD + 1); 3656 3657 /* Configure TX MAC FIFO */ 3658 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR); 3659 SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON); 3660 } 3661 3662 /* 3663 * Note that to properly initialize any part of the GEnesis chip, 3664 * you first have to take it out of reset mode. 3665 */ 3666 static void 3667 sk_init(xsc) 3668 void *xsc; 3669 { 3670 struct sk_if_softc *sc_if = xsc; 3671 3672 SK_IF_LOCK(sc_if); 3673 sk_init_locked(sc_if); 3674 SK_IF_UNLOCK(sc_if); 3675 3676 return; 3677 } 3678 3679 static void 3680 sk_init_locked(sc_if) 3681 struct sk_if_softc *sc_if; 3682 { 3683 struct sk_softc *sc; 3684 struct ifnet *ifp; 3685 struct mii_data *mii; 3686 u_int16_t reg; 3687 u_int32_t imr; 3688 int error; 3689 3690 SK_IF_LOCK_ASSERT(sc_if); 3691 3692 ifp = sc_if->sk_ifp; 3693 sc = sc_if->sk_softc; 3694 mii = device_get_softc(sc_if->sk_miibus); 3695 3696 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3697 return; 3698 3699 /* Cancel pending I/O and free all RX/TX buffers. */ 3700 sk_stop(sc_if); 3701 3702 if (sc->sk_type == SK_GENESIS) { 3703 /* Configure LINK_SYNC LED */ 3704 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON); 3705 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, 3706 SK_LINKLED_LINKSYNC_ON); 3707 3708 /* Configure RX LED */ 3709 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, 3710 SK_RXLEDCTL_COUNTER_START); 3711 3712 /* Configure TX LED */ 3713 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, 3714 SK_TXLEDCTL_COUNTER_START); 3715 } 3716 3717 /* 3718 * Configure descriptor poll timer 3719 * 3720 * SK-NET GENESIS data sheet says that possibility of losing Start 3721 * transmit command due to CPU/cache related interim storage problems 3722 * under certain conditions. The document recommends a polling 3723 * mechanism to send a Start transmit command to initiate transfer 3724 * of ready descriptors regulary. To cope with this issue sk(4) now 3725 * enables descriptor poll timer to initiate descriptor processing 3726 * periodically as defined by SK_DPT_TIMER_MAX. However sk(4) still 3727 * issue SK_TXBMU_TX_START to Tx BMU to get fast execution of Tx 3728 * command instead of waiting for next descriptor polling time. 3729 * The same rule may apply to Rx side too but it seems that is not 3730 * needed at the moment. 3731 * Since sk(4) uses descriptor polling as a last resort there is no 3732 * need to set smaller polling time than maximum allowable one. 3733 */ 3734 SK_IF_WRITE_4(sc_if, 0, SK_DPT_INIT, SK_DPT_TIMER_MAX); 3735 3736 /* Configure I2C registers */ 3737 3738 /* Configure XMAC(s) */ 3739 switch (sc->sk_type) { 3740 case SK_GENESIS: 3741 sk_init_xmac(sc_if); 3742 break; 3743 case SK_YUKON: 3744 case SK_YUKON_LITE: 3745 case SK_YUKON_LP: 3746 case SK_YUKON_EC: 3747 sk_init_yukon(sc_if); 3748 break; 3749 } 3750 mii_mediachg(mii); 3751 3752 if (sc->sk_type == SK_GENESIS) { 3753 /* Configure MAC FIFOs */ 3754 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET); 3755 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END); 3756 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON); 3757 3758 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET); 3759 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END); 3760 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON); 3761 } 3762 3763 /* Configure transmit arbiter(s) */ 3764 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, 3765 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON); 3766 3767 /* Configure RAMbuffers */ 3768 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET); 3769 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart); 3770 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart); 3771 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart); 3772 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend); 3773 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON); 3774 3775 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET); 3776 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON); 3777 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart); 3778 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart); 3779 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart); 3780 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend); 3781 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON); 3782 3783 /* Configure BMUs */ 3784 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE); 3785 if (ifp->if_mtu > SK_MAX_FRAMELEN) { 3786 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO, 3787 SK_ADDR_LO(SK_JUMBO_RX_RING_ADDR(sc_if, 0))); 3788 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 3789 SK_ADDR_HI(SK_JUMBO_RX_RING_ADDR(sc_if, 0))); 3790 } else { 3791 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO, 3792 SK_ADDR_LO(SK_RX_RING_ADDR(sc_if, 0))); 3793 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 3794 SK_ADDR_HI(SK_RX_RING_ADDR(sc_if, 0))); 3795 } 3796 3797 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE); 3798 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO, 3799 SK_ADDR_LO(SK_TX_RING_ADDR(sc_if, 0))); 3800 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 3801 SK_ADDR_HI(SK_TX_RING_ADDR(sc_if, 0))); 3802 3803 /* Init descriptors */ 3804 if (ifp->if_mtu > SK_MAX_FRAMELEN) 3805 error = sk_init_jumbo_rx_ring(sc_if); 3806 else 3807 error = sk_init_rx_ring(sc_if); 3808 if (error != 0) { 3809 device_printf(sc_if->sk_if_dev, 3810 "initialization failed: no memory for rx buffers\n"); 3811 sk_stop(sc_if); 3812 return; 3813 } 3814 sk_init_tx_ring(sc_if); 3815 3816 /* Set interrupt moderation if changed via sysctl. */ 3817 imr = sk_win_read_4(sc, SK_IMTIMERINIT); 3818 if (imr != SK_IM_USECS(sc->sk_int_mod, sc->sk_int_ticks)) { 3819 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod, 3820 sc->sk_int_ticks)); 3821 if (bootverbose) 3822 device_printf(sc_if->sk_if_dev, 3823 "interrupt moderation is %d us.\n", 3824 sc->sk_int_mod); 3825 } 3826 3827 /* Configure interrupt handling */ 3828 CSR_READ_4(sc, SK_ISSR); 3829 if (sc_if->sk_port == SK_PORT_A) 3830 sc->sk_intrmask |= SK_INTRS1; 3831 else 3832 sc->sk_intrmask |= SK_INTRS2; 3833 3834 sc->sk_intrmask |= SK_ISR_EXTERNAL_REG; 3835 3836 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 3837 3838 /* Start BMUs. */ 3839 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START); 3840 3841 switch(sc->sk_type) { 3842 case SK_GENESIS: 3843 /* Enable XMACs TX and RX state machines */ 3844 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE); 3845 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 3846 break; 3847 case SK_YUKON: 3848 case SK_YUKON_LITE: 3849 case SK_YUKON_LP: 3850 case SK_YUKON_EC: 3851 reg = SK_YU_READ_2(sc_if, YUKON_GPCR); 3852 reg |= YU_GPCR_TXEN | YU_GPCR_RXEN; 3853 #if 0 3854 /* XXX disable 100Mbps and full duplex mode? */ 3855 reg &= ~(YU_GPCR_SPEED | YU_GPCR_DPLX_DIS); 3856 #endif 3857 SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg); 3858 } 3859 3860 /* Activate descriptor polling timer */ 3861 SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_START); 3862 /* start transfer of Tx descriptors */ 3863 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); 3864 3865 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3866 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3867 3868 switch (sc->sk_type) { 3869 case SK_YUKON: 3870 case SK_YUKON_LITE: 3871 case SK_YUKON_LP: 3872 case SK_YUKON_EC: 3873 callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if); 3874 break; 3875 } 3876 3877 return; 3878 } 3879 3880 static void 3881 sk_stop(sc_if) 3882 struct sk_if_softc *sc_if; 3883 { 3884 int i; 3885 struct sk_softc *sc; 3886 struct sk_txdesc *txd; 3887 struct sk_rxdesc *rxd; 3888 struct sk_rxdesc *jrxd; 3889 struct ifnet *ifp; 3890 u_int32_t val; 3891 3892 SK_IF_LOCK_ASSERT(sc_if); 3893 sc = sc_if->sk_softc; 3894 ifp = sc_if->sk_ifp; 3895 3896 callout_stop(&sc_if->sk_tick_ch); 3897 3898 /* stop Tx descriptor polling timer */ 3899 SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP); 3900 /* stop transfer of Tx descriptors */ 3901 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_STOP); 3902 for (i = 0; i < SK_TIMEOUT; i++) { 3903 val = CSR_READ_4(sc, sc_if->sk_tx_bmu); 3904 if ((val & SK_TXBMU_TX_STOP) == 0) 3905 break; 3906 DELAY(1); 3907 } 3908 if (i == SK_TIMEOUT) 3909 device_printf(sc_if->sk_if_dev, 3910 "can not stop transfer of Tx descriptor\n"); 3911 /* stop transfer of Rx descriptors */ 3912 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_STOP); 3913 for (i = 0; i < SK_TIMEOUT; i++) { 3914 val = SK_IF_READ_4(sc_if, 0, SK_RXQ1_BMU_CSR); 3915 if ((val & SK_RXBMU_RX_STOP) == 0) 3916 break; 3917 DELAY(1); 3918 } 3919 if (i == SK_TIMEOUT) 3920 device_printf(sc_if->sk_if_dev, 3921 "can not stop transfer of Rx descriptor\n"); 3922 3923 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 3924 /* Put PHY back into reset. */ 3925 val = sk_win_read_4(sc, SK_GPIO); 3926 if (sc_if->sk_port == SK_PORT_A) { 3927 val |= SK_GPIO_DIR0; 3928 val &= ~SK_GPIO_DAT0; 3929 } else { 3930 val |= SK_GPIO_DIR2; 3931 val &= ~SK_GPIO_DAT2; 3932 } 3933 sk_win_write_4(sc, SK_GPIO, val); 3934 } 3935 3936 /* Turn off various components of this interface. */ 3937 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); 3938 switch (sc->sk_type) { 3939 case SK_GENESIS: 3940 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET); 3941 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET); 3942 break; 3943 case SK_YUKON: 3944 case SK_YUKON_LITE: 3945 case SK_YUKON_LP: 3946 case SK_YUKON_EC: 3947 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET); 3948 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET); 3949 break; 3950 } 3951 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE); 3952 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 3953 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE); 3954 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 3955 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF); 3956 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 3957 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 3958 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF); 3959 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF); 3960 3961 /* Disable interrupts */ 3962 if (sc_if->sk_port == SK_PORT_A) 3963 sc->sk_intrmask &= ~SK_INTRS1; 3964 else 3965 sc->sk_intrmask &= ~SK_INTRS2; 3966 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 3967 3968 SK_XM_READ_2(sc_if, XM_ISR); 3969 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); 3970 3971 /* Free RX and TX mbufs still in the queues. */ 3972 for (i = 0; i < SK_RX_RING_CNT; i++) { 3973 rxd = &sc_if->sk_cdata.sk_rxdesc[i]; 3974 if (rxd->rx_m != NULL) { 3975 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, 3976 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3977 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag, 3978 rxd->rx_dmamap); 3979 m_freem(rxd->rx_m); 3980 rxd->rx_m = NULL; 3981 } 3982 } 3983 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) { 3984 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i]; 3985 if (jrxd->rx_m != NULL) { 3986 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, 3987 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3988 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag, 3989 jrxd->rx_dmamap); 3990 m_freem(jrxd->rx_m); 3991 jrxd->rx_m = NULL; 3992 } 3993 } 3994 for (i = 0; i < SK_TX_RING_CNT; i++) { 3995 txd = &sc_if->sk_cdata.sk_txdesc[i]; 3996 if (txd->tx_m != NULL) { 3997 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, 3998 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 3999 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, 4000 txd->tx_dmamap); 4001 m_freem(txd->tx_m); 4002 txd->tx_m = NULL; 4003 } 4004 } 4005 4006 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE); 4007 4008 return; 4009 } 4010 4011 static int 4012 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 4013 { 4014 int error, value; 4015 4016 if (!arg1) 4017 return (EINVAL); 4018 value = *(int *)arg1; 4019 error = sysctl_handle_int(oidp, &value, 0, req); 4020 if (error || !req->newptr) 4021 return (error); 4022 if (value < low || value > high) 4023 return (EINVAL); 4024 *(int *)arg1 = value; 4025 return (0); 4026 } 4027 4028 static int 4029 sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS) 4030 { 4031 return (sysctl_int_range(oidp, arg1, arg2, req, SK_IM_MIN, SK_IM_MAX)); 4032 } 4033