1 /* $OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998, 1999, 2000 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 /* 35 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 36 * 37 * Permission to use, copy, modify, and distribute this software for any 38 * purpose with or without fee is hereby granted, provided that the above 39 * copyright notice and this permission notice appear in all copies. 40 * 41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 48 */ 49 50 #include <sys/cdefs.h> 51 __FBSDID("$FreeBSD$"); 52 53 /* 54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports 55 * the SK-984x series adapters, both single port and dual port. 56 * References: 57 * The XaQti XMAC II datasheet, 58 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 59 * The SysKonnect GEnesis manual, http://www.syskonnect.com 60 * 61 * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the 62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a 63 * convenience to others until Vitesse corrects this problem: 64 * 65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 66 * 67 * Written by Bill Paul <wpaul@ee.columbia.edu> 68 * Department of Electrical Engineering 69 * Columbia University, New York City 70 */ 71 /* 72 * The SysKonnect gigabit ethernet adapters consist of two main 73 * components: the SysKonnect GEnesis controller chip and the XaQti Corp. 74 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC 75 * components and a PHY while the GEnesis controller provides a PCI 76 * interface with DMA support. Each card may have between 512K and 77 * 2MB of SRAM on board depending on the configuration. 78 * 79 * The SysKonnect GEnesis controller can have either one or two XMAC 80 * chips connected to it, allowing single or dual port NIC configurations. 81 * SysKonnect has the distinction of being the only vendor on the market 82 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs, 83 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the 84 * XMAC registers. This driver takes advantage of these features to allow 85 * both XMACs to operate as independent interfaces. 86 */ 87 88 #include <sys/param.h> 89 #include <sys/systm.h> 90 #include <sys/sockio.h> 91 #include <sys/mbuf.h> 92 #include <sys/malloc.h> 93 #include <sys/kernel.h> 94 #include <sys/socket.h> 95 #include <sys/queue.h> 96 97 #include <net/if.h> 98 #include <net/if_arp.h> 99 #include <net/ethernet.h> 100 #include <net/if_dl.h> 101 #include <net/if_media.h> 102 103 #include <net/bpf.h> 104 105 #include <vm/vm.h> /* for vtophys */ 106 #include <vm/pmap.h> /* for vtophys */ 107 #include <machine/bus_pio.h> 108 #include <machine/bus_memio.h> 109 #include <machine/bus.h> 110 #include <machine/resource.h> 111 #include <sys/bus.h> 112 #include <sys/rman.h> 113 114 #include <dev/mii/mii.h> 115 #include <dev/mii/miivar.h> 116 #include <dev/mii/brgphyreg.h> 117 118 #include <dev/pci/pcireg.h> 119 #include <dev/pci/pcivar.h> 120 121 #if 0 122 #define SK_USEIOSPACE 123 #endif 124 125 #include <pci/if_skreg.h> 126 #include <pci/xmaciireg.h> 127 #include <pci/yukonreg.h> 128 129 MODULE_DEPEND(sk, pci, 1, 1, 1); 130 MODULE_DEPEND(sk, ether, 1, 1, 1); 131 MODULE_DEPEND(sk, miibus, 1, 1, 1); 132 133 /* "controller miibus0" required. See GENERIC if you get errors here. */ 134 #include "miibus_if.h" 135 136 #ifndef lint 137 static const char rcsid[] = 138 "$FreeBSD$"; 139 #endif 140 141 static struct sk_type sk_devs[] = { 142 { 143 VENDORID_SK, 144 DEVICEID_SK_V1, 145 "SysKonnect Gigabit Ethernet (V1.0)" 146 }, 147 { 148 VENDORID_SK, 149 DEVICEID_SK_V2, 150 "SysKonnect Gigabit Ethernet (V2.0)" 151 }, 152 { 153 VENDORID_MARVELL, 154 DEVICEID_SK_V2, 155 "Marvell Gigabit Ethernet" 156 }, 157 { 158 VENDORID_3COM, 159 DEVICEID_3COM_3C940, 160 "3Com 3C940 Gigabit Ethernet" 161 }, 162 { 163 VENDORID_LINKSYS, 164 DEVICEID_LINKSYS_EG1032, 165 "Linksys EG1032 Gigabit Ethernet" 166 }, 167 { 0, 0, NULL } 168 }; 169 170 static int skc_probe (device_t); 171 static int skc_attach (device_t); 172 static int skc_detach (device_t); 173 static void skc_shutdown (device_t); 174 static int sk_detach (device_t); 175 static int sk_probe (device_t); 176 static int sk_attach (device_t); 177 static void sk_tick (void *); 178 static void sk_intr (void *); 179 static void sk_intr_xmac (struct sk_if_softc *); 180 static void sk_intr_bcom (struct sk_if_softc *); 181 static void sk_intr_yukon (struct sk_if_softc *); 182 static void sk_rxeof (struct sk_if_softc *); 183 static void sk_txeof (struct sk_if_softc *); 184 static int sk_encap (struct sk_if_softc *, struct mbuf *, 185 u_int32_t *); 186 static void sk_start (struct ifnet *); 187 static int sk_ioctl (struct ifnet *, u_long, caddr_t); 188 static void sk_init (void *); 189 static void sk_init_xmac (struct sk_if_softc *); 190 static void sk_init_yukon (struct sk_if_softc *); 191 static void sk_stop (struct sk_if_softc *); 192 static void sk_watchdog (struct ifnet *); 193 static int sk_ifmedia_upd (struct ifnet *); 194 static void sk_ifmedia_sts (struct ifnet *, struct ifmediareq *); 195 static void sk_reset (struct sk_softc *); 196 static int sk_newbuf (struct sk_if_softc *, 197 struct sk_chain *, struct mbuf *); 198 static int sk_alloc_jumbo_mem (struct sk_if_softc *); 199 static void *sk_jalloc (struct sk_if_softc *); 200 static void sk_jfree (void *, void *); 201 static int sk_init_rx_ring (struct sk_if_softc *); 202 static void sk_init_tx_ring (struct sk_if_softc *); 203 static u_int32_t sk_win_read_4 (struct sk_softc *, int); 204 static u_int16_t sk_win_read_2 (struct sk_softc *, int); 205 static u_int8_t sk_win_read_1 (struct sk_softc *, int); 206 static void sk_win_write_4 (struct sk_softc *, int, u_int32_t); 207 static void sk_win_write_2 (struct sk_softc *, int, u_int32_t); 208 static void sk_win_write_1 (struct sk_softc *, int, u_int32_t); 209 static u_int8_t sk_vpd_readbyte (struct sk_softc *, int); 210 static void sk_vpd_read_res (struct sk_softc *, struct vpd_res *, int); 211 static void sk_vpd_read (struct sk_softc *); 212 213 static int sk_miibus_readreg (device_t, int, int); 214 static int sk_miibus_writereg (device_t, int, int, int); 215 static void sk_miibus_statchg (device_t); 216 217 static int sk_xmac_miibus_readreg (struct sk_if_softc *, int, int); 218 static int sk_xmac_miibus_writereg (struct sk_if_softc *, int, int, 219 int); 220 static void sk_xmac_miibus_statchg (struct sk_if_softc *); 221 222 static int sk_marv_miibus_readreg (struct sk_if_softc *, int, int); 223 static int sk_marv_miibus_writereg (struct sk_if_softc *, int, int, 224 int); 225 static void sk_marv_miibus_statchg (struct sk_if_softc *); 226 227 static uint32_t sk_xmchash (const uint8_t *); 228 static uint32_t sk_gmchash (const uint8_t *); 229 static void sk_setfilt (struct sk_if_softc *, caddr_t, int); 230 static void sk_setmulti (struct sk_if_softc *); 231 static void sk_setpromisc (struct sk_if_softc *); 232 233 #ifdef SK_USEIOSPACE 234 #define SK_RES SYS_RES_IOPORT 235 #define SK_RID SK_PCI_LOIO 236 #else 237 #define SK_RES SYS_RES_MEMORY 238 #define SK_RID SK_PCI_LOMEM 239 #endif 240 241 /* 242 * Note that we have newbus methods for both the GEnesis controller 243 * itself and the XMAC(s). The XMACs are children of the GEnesis, and 244 * the miibus code is a child of the XMACs. We need to do it this way 245 * so that the miibus drivers can access the PHY registers on the 246 * right PHY. It's not quite what I had in mind, but it's the only 247 * design that achieves the desired effect. 248 */ 249 static device_method_t skc_methods[] = { 250 /* Device interface */ 251 DEVMETHOD(device_probe, skc_probe), 252 DEVMETHOD(device_attach, skc_attach), 253 DEVMETHOD(device_detach, skc_detach), 254 DEVMETHOD(device_shutdown, skc_shutdown), 255 256 /* bus interface */ 257 DEVMETHOD(bus_print_child, bus_generic_print_child), 258 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 259 260 { 0, 0 } 261 }; 262 263 static driver_t skc_driver = { 264 "skc", 265 skc_methods, 266 sizeof(struct sk_softc) 267 }; 268 269 static devclass_t skc_devclass; 270 271 static device_method_t sk_methods[] = { 272 /* Device interface */ 273 DEVMETHOD(device_probe, sk_probe), 274 DEVMETHOD(device_attach, sk_attach), 275 DEVMETHOD(device_detach, sk_detach), 276 DEVMETHOD(device_shutdown, bus_generic_shutdown), 277 278 /* bus interface */ 279 DEVMETHOD(bus_print_child, bus_generic_print_child), 280 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 281 282 /* MII interface */ 283 DEVMETHOD(miibus_readreg, sk_miibus_readreg), 284 DEVMETHOD(miibus_writereg, sk_miibus_writereg), 285 DEVMETHOD(miibus_statchg, sk_miibus_statchg), 286 287 { 0, 0 } 288 }; 289 290 static driver_t sk_driver = { 291 "sk", 292 sk_methods, 293 sizeof(struct sk_if_softc) 294 }; 295 296 static devclass_t sk_devclass; 297 298 DRIVER_MODULE(sk, pci, skc_driver, skc_devclass, 0, 0); 299 DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0); 300 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0); 301 302 #define SK_SETBIT(sc, reg, x) \ 303 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x) 304 305 #define SK_CLRBIT(sc, reg, x) \ 306 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x) 307 308 #define SK_WIN_SETBIT_4(sc, reg, x) \ 309 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x) 310 311 #define SK_WIN_CLRBIT_4(sc, reg, x) \ 312 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x) 313 314 #define SK_WIN_SETBIT_2(sc, reg, x) \ 315 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x) 316 317 #define SK_WIN_CLRBIT_2(sc, reg, x) \ 318 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x) 319 320 static u_int32_t 321 sk_win_read_4(sc, reg) 322 struct sk_softc *sc; 323 int reg; 324 { 325 #ifdef SK_USEIOSPACE 326 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 327 return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg))); 328 #else 329 return(CSR_READ_4(sc, reg)); 330 #endif 331 } 332 333 static u_int16_t 334 sk_win_read_2(sc, reg) 335 struct sk_softc *sc; 336 int reg; 337 { 338 #ifdef SK_USEIOSPACE 339 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 340 return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg))); 341 #else 342 return(CSR_READ_2(sc, reg)); 343 #endif 344 } 345 346 static u_int8_t 347 sk_win_read_1(sc, reg) 348 struct sk_softc *sc; 349 int reg; 350 { 351 #ifdef SK_USEIOSPACE 352 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 353 return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg))); 354 #else 355 return(CSR_READ_1(sc, reg)); 356 #endif 357 } 358 359 static void 360 sk_win_write_4(sc, reg, val) 361 struct sk_softc *sc; 362 int reg; 363 u_int32_t val; 364 { 365 #ifdef SK_USEIOSPACE 366 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 367 CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val); 368 #else 369 CSR_WRITE_4(sc, reg, val); 370 #endif 371 return; 372 } 373 374 static void 375 sk_win_write_2(sc, reg, val) 376 struct sk_softc *sc; 377 int reg; 378 u_int32_t val; 379 { 380 #ifdef SK_USEIOSPACE 381 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 382 CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val); 383 #else 384 CSR_WRITE_2(sc, reg, val); 385 #endif 386 return; 387 } 388 389 static void 390 sk_win_write_1(sc, reg, val) 391 struct sk_softc *sc; 392 int reg; 393 u_int32_t val; 394 { 395 #ifdef SK_USEIOSPACE 396 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 397 CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val); 398 #else 399 CSR_WRITE_1(sc, reg, val); 400 #endif 401 return; 402 } 403 404 /* 405 * The VPD EEPROM contains Vital Product Data, as suggested in 406 * the PCI 2.1 specification. The VPD data is separared into areas 407 * denoted by resource IDs. The SysKonnect VPD contains an ID string 408 * resource (the name of the adapter), a read-only area resource 409 * containing various key/data fields and a read/write area which 410 * can be used to store asset management information or log messages. 411 * We read the ID string and read-only into buffers attached to 412 * the controller softc structure for later use. At the moment, 413 * we only use the ID string during skc_attach(). 414 */ 415 static u_int8_t 416 sk_vpd_readbyte(sc, addr) 417 struct sk_softc *sc; 418 int addr; 419 { 420 int i; 421 422 sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr); 423 for (i = 0; i < SK_TIMEOUT; i++) { 424 DELAY(1); 425 if (sk_win_read_2(sc, 426 SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG) 427 break; 428 } 429 430 if (i == SK_TIMEOUT) 431 return(0); 432 433 return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA))); 434 } 435 436 static void 437 sk_vpd_read_res(sc, res, addr) 438 struct sk_softc *sc; 439 struct vpd_res *res; 440 int addr; 441 { 442 int i; 443 u_int8_t *ptr; 444 445 ptr = (u_int8_t *)res; 446 for (i = 0; i < sizeof(struct vpd_res); i++) 447 ptr[i] = sk_vpd_readbyte(sc, i + addr); 448 449 return; 450 } 451 452 static void 453 sk_vpd_read(sc) 454 struct sk_softc *sc; 455 { 456 int pos = 0, i; 457 struct vpd_res res; 458 459 if (sc->sk_vpd_prodname != NULL) 460 free(sc->sk_vpd_prodname, M_DEVBUF); 461 if (sc->sk_vpd_readonly != NULL) 462 free(sc->sk_vpd_readonly, M_DEVBUF); 463 sc->sk_vpd_prodname = NULL; 464 sc->sk_vpd_readonly = NULL; 465 466 sk_vpd_read_res(sc, &res, pos); 467 468 if (res.vr_id != VPD_RES_ID) { 469 printf("skc%d: bad VPD resource id: expected %x got %x\n", 470 sc->sk_unit, VPD_RES_ID, res.vr_id); 471 return; 472 } 473 474 pos += sizeof(res); 475 sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 476 for (i = 0; i < res.vr_len; i++) 477 sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos); 478 sc->sk_vpd_prodname[i] = '\0'; 479 pos += i; 480 481 sk_vpd_read_res(sc, &res, pos); 482 483 if (res.vr_id != VPD_RES_READ) { 484 printf("skc%d: bad VPD resource id: expected %x got %x\n", 485 sc->sk_unit, VPD_RES_READ, res.vr_id); 486 return; 487 } 488 489 pos += sizeof(res); 490 sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 491 for (i = 0; i < res.vr_len + 1; i++) 492 sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos); 493 494 return; 495 } 496 497 static int 498 sk_miibus_readreg(dev, phy, reg) 499 device_t dev; 500 int phy, reg; 501 { 502 struct sk_if_softc *sc_if; 503 504 sc_if = device_get_softc(dev); 505 506 switch(sc_if->sk_softc->sk_type) { 507 case SK_GENESIS: 508 return(sk_xmac_miibus_readreg(sc_if, phy, reg)); 509 case SK_YUKON: 510 return(sk_marv_miibus_readreg(sc_if, phy, reg)); 511 } 512 513 return(0); 514 } 515 516 static int 517 sk_miibus_writereg(dev, phy, reg, val) 518 device_t dev; 519 int phy, reg, val; 520 { 521 struct sk_if_softc *sc_if; 522 523 sc_if = device_get_softc(dev); 524 525 switch(sc_if->sk_softc->sk_type) { 526 case SK_GENESIS: 527 return(sk_xmac_miibus_writereg(sc_if, phy, reg, val)); 528 case SK_YUKON: 529 return(sk_marv_miibus_writereg(sc_if, phy, reg, val)); 530 } 531 532 return(0); 533 } 534 535 static void 536 sk_miibus_statchg(dev) 537 device_t dev; 538 { 539 struct sk_if_softc *sc_if; 540 541 sc_if = device_get_softc(dev); 542 543 switch(sc_if->sk_softc->sk_type) { 544 case SK_GENESIS: 545 sk_xmac_miibus_statchg(sc_if); 546 break; 547 case SK_YUKON: 548 sk_marv_miibus_statchg(sc_if); 549 break; 550 } 551 552 return; 553 } 554 555 static int 556 sk_xmac_miibus_readreg(sc_if, phy, reg) 557 struct sk_if_softc *sc_if; 558 int phy, reg; 559 { 560 int i; 561 562 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0) 563 return(0); 564 565 SK_IF_LOCK(sc_if); 566 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); 567 SK_XM_READ_2(sc_if, XM_PHY_DATA); 568 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { 569 for (i = 0; i < SK_TIMEOUT; i++) { 570 DELAY(1); 571 if (SK_XM_READ_2(sc_if, XM_MMUCMD) & 572 XM_MMUCMD_PHYDATARDY) 573 break; 574 } 575 576 if (i == SK_TIMEOUT) { 577 printf("sk%d: phy failed to come ready\n", 578 sc_if->sk_unit); 579 SK_IF_UNLOCK(sc_if); 580 return(0); 581 } 582 } 583 DELAY(1); 584 i = SK_XM_READ_2(sc_if, XM_PHY_DATA); 585 SK_IF_UNLOCK(sc_if); 586 return(i); 587 } 588 589 static int 590 sk_xmac_miibus_writereg(sc_if, phy, reg, val) 591 struct sk_if_softc *sc_if; 592 int phy, reg, val; 593 { 594 int i; 595 596 SK_IF_LOCK(sc_if); 597 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); 598 for (i = 0; i < SK_TIMEOUT; i++) { 599 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) 600 break; 601 } 602 603 if (i == SK_TIMEOUT) { 604 printf("sk%d: phy failed to come ready\n", sc_if->sk_unit); 605 SK_IF_UNLOCK(sc_if); 606 return(ETIMEDOUT); 607 } 608 609 SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val); 610 for (i = 0; i < SK_TIMEOUT; i++) { 611 DELAY(1); 612 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) 613 break; 614 } 615 SK_IF_UNLOCK(sc_if); 616 if (i == SK_TIMEOUT) 617 printf("sk%d: phy write timed out\n", sc_if->sk_unit); 618 619 return(0); 620 } 621 622 static void 623 sk_xmac_miibus_statchg(sc_if) 624 struct sk_if_softc *sc_if; 625 { 626 struct mii_data *mii; 627 628 mii = device_get_softc(sc_if->sk_miibus); 629 630 SK_IF_LOCK(sc_if); 631 /* 632 * If this is a GMII PHY, manually set the XMAC's 633 * duplex mode accordingly. 634 */ 635 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { 636 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 637 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); 638 } else { 639 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); 640 } 641 } 642 SK_IF_UNLOCK(sc_if); 643 644 return; 645 } 646 647 static int 648 sk_marv_miibus_readreg(sc_if, phy, reg) 649 struct sk_if_softc *sc_if; 650 int phy, reg; 651 { 652 u_int16_t val; 653 int i; 654 655 if (phy != 0 || 656 (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER && 657 sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) { 658 return(0); 659 } 660 661 SK_IF_LOCK(sc_if); 662 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 663 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ); 664 665 for (i = 0; i < SK_TIMEOUT; i++) { 666 DELAY(1); 667 val = SK_YU_READ_2(sc_if, YUKON_SMICR); 668 if (val & YU_SMICR_READ_VALID) 669 break; 670 } 671 672 if (i == SK_TIMEOUT) { 673 printf("sk%d: phy failed to come ready\n", 674 sc_if->sk_unit); 675 SK_IF_UNLOCK(sc_if); 676 return(0); 677 } 678 679 val = SK_YU_READ_2(sc_if, YUKON_SMIDR); 680 SK_IF_UNLOCK(sc_if); 681 682 return(val); 683 } 684 685 static int 686 sk_marv_miibus_writereg(sc_if, phy, reg, val) 687 struct sk_if_softc *sc_if; 688 int phy, reg, val; 689 { 690 int i; 691 692 SK_IF_LOCK(sc_if); 693 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val); 694 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 695 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE); 696 697 for (i = 0; i < SK_TIMEOUT; i++) { 698 DELAY(1); 699 if (SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY) 700 break; 701 } 702 SK_IF_UNLOCK(sc_if); 703 704 return(0); 705 } 706 707 static void 708 sk_marv_miibus_statchg(sc_if) 709 struct sk_if_softc *sc_if; 710 { 711 return; 712 } 713 714 #define XMAC_POLY 0xEDB88320 715 #define GMAC_POLY 0x04C11DB7L 716 #define HASH_BITS 6 717 718 static u_int32_t 719 sk_xmchash(addr) 720 const uint8_t *addr; 721 { 722 uint32_t crc; 723 int idx, bit; 724 uint8_t data; 725 726 /* Compute CRC for the address value. */ 727 crc = 0xFFFFFFFF; /* initial value */ 728 729 for (idx = 0; idx < 6; idx++) { 730 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) 731 crc = (crc >> 1) ^ (((crc ^ data) & 1) ? XMAC_POLY : 0); 732 } 733 734 return (~crc & ((1 << HASH_BITS) - 1)); 735 } 736 737 static u_int32_t 738 sk_gmchash(addr) 739 const uint8_t *addr; 740 { 741 u_int32_t crc; 742 uint idx, bit; 743 uint8_t tmpData, data; 744 745 /* Compute CRC for the address value. */ 746 crc = 0xFFFFFFFF; /* initial value */ 747 748 for (idx = 0; idx < 6; idx++) { 749 data = *addr++; 750 751 /* Change bit order in byte. */ 752 tmpData = data; 753 for (bit = 0; bit < 8; bit++) { 754 if (tmpData & 1) { 755 data |= 1 << (7 - bit); 756 } else { 757 data &= ~(1 << (7 - bit)); 758 } 759 tmpData >>= 1; 760 } 761 762 crc ^= (data << 24); 763 for (bit = 0; bit < 8; bit++) { 764 if (crc & 0x80000000) { 765 crc = (crc << 1) ^ GMAC_POLY; 766 } else { 767 crc <<= 1; 768 } 769 } 770 } 771 772 return (crc & ((1 << HASH_BITS) - 1)); 773 } 774 775 static void 776 sk_setfilt(sc_if, addr, slot) 777 struct sk_if_softc *sc_if; 778 caddr_t addr; 779 int slot; 780 { 781 int base; 782 783 base = XM_RXFILT_ENTRY(slot); 784 785 SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0])); 786 SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2])); 787 SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4])); 788 789 return; 790 } 791 792 static void 793 sk_setmulti(sc_if) 794 struct sk_if_softc *sc_if; 795 { 796 struct sk_softc *sc = sc_if->sk_softc; 797 struct ifnet *ifp = &sc_if->arpcom.ac_if; 798 u_int32_t hashes[2] = { 0, 0 }; 799 int h = 0, i; 800 struct ifmultiaddr *ifma; 801 u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 }; 802 803 804 /* First, zot all the existing filters. */ 805 switch(sc->sk_type) { 806 case SK_GENESIS: 807 for (i = 1; i < XM_RXFILT_MAX; i++) 808 sk_setfilt(sc_if, (caddr_t)&dummy, i); 809 810 SK_XM_WRITE_4(sc_if, XM_MAR0, 0); 811 SK_XM_WRITE_4(sc_if, XM_MAR2, 0); 812 break; 813 case SK_YUKON: 814 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0); 815 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0); 816 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0); 817 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0); 818 break; 819 } 820 821 /* Now program new ones. */ 822 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 823 hashes[0] = 0xFFFFFFFF; 824 hashes[1] = 0xFFFFFFFF; 825 } else { 826 i = 1; 827 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) { 828 if (ifma->ifma_addr->sa_family != AF_LINK) 829 continue; 830 /* 831 * Program the first XM_RXFILT_MAX multicast groups 832 * into the perfect filter. For all others, 833 * use the hash table. 834 */ 835 if (sc->sk_type == SK_GENESIS && i < XM_RXFILT_MAX) { 836 sk_setfilt(sc_if, 837 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i); 838 i++; 839 continue; 840 } 841 842 switch(sc->sk_type) { 843 case SK_GENESIS: 844 h = sk_xmchash( 845 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 846 break; 847 case SK_YUKON: 848 h = sk_gmchash( 849 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 850 break; 851 } 852 if (h < 32) 853 hashes[0] |= (1 << h); 854 else 855 hashes[1] |= (1 << (h - 32)); 856 } 857 } 858 859 switch(sc->sk_type) { 860 case SK_GENESIS: 861 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH| 862 XM_MODE_RX_USE_PERFECT); 863 SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]); 864 SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]); 865 break; 866 case SK_YUKON: 867 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff); 868 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff); 869 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff); 870 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff); 871 break; 872 } 873 874 return; 875 } 876 877 static void 878 sk_setpromisc(sc_if) 879 struct sk_if_softc *sc_if; 880 { 881 struct sk_softc *sc = sc_if->sk_softc; 882 struct ifnet *ifp = &sc_if->arpcom.ac_if; 883 884 switch(sc->sk_type) { 885 case SK_GENESIS: 886 if (ifp->if_flags & IFF_PROMISC) { 887 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); 888 } else { 889 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); 890 } 891 break; 892 case SK_YUKON: 893 if (ifp->if_flags & IFF_PROMISC) { 894 SK_YU_CLRBIT_2(sc_if, YUKON_RCR, 895 YU_RCR_UFLEN | YU_RCR_MUFLEN); 896 } else { 897 SK_YU_SETBIT_2(sc_if, YUKON_RCR, 898 YU_RCR_UFLEN | YU_RCR_MUFLEN); 899 } 900 break; 901 } 902 903 return; 904 } 905 906 static int 907 sk_init_rx_ring(sc_if) 908 struct sk_if_softc *sc_if; 909 { 910 struct sk_chain_data *cd = &sc_if->sk_cdata; 911 struct sk_ring_data *rd = sc_if->sk_rdata; 912 int i; 913 914 bzero((char *)rd->sk_rx_ring, 915 sizeof(struct sk_rx_desc) * SK_RX_RING_CNT); 916 917 for (i = 0; i < SK_RX_RING_CNT; i++) { 918 cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i]; 919 if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS) 920 return(ENOBUFS); 921 if (i == (SK_RX_RING_CNT - 1)) { 922 cd->sk_rx_chain[i].sk_next = 923 &cd->sk_rx_chain[0]; 924 rd->sk_rx_ring[i].sk_next = 925 vtophys(&rd->sk_rx_ring[0]); 926 } else { 927 cd->sk_rx_chain[i].sk_next = 928 &cd->sk_rx_chain[i + 1]; 929 rd->sk_rx_ring[i].sk_next = 930 vtophys(&rd->sk_rx_ring[i + 1]); 931 } 932 } 933 934 sc_if->sk_cdata.sk_rx_prod = 0; 935 sc_if->sk_cdata.sk_rx_cons = 0; 936 937 return(0); 938 } 939 940 static void 941 sk_init_tx_ring(sc_if) 942 struct sk_if_softc *sc_if; 943 { 944 struct sk_chain_data *cd = &sc_if->sk_cdata; 945 struct sk_ring_data *rd = sc_if->sk_rdata; 946 int i; 947 948 bzero((char *)sc_if->sk_rdata->sk_tx_ring, 949 sizeof(struct sk_tx_desc) * SK_TX_RING_CNT); 950 951 for (i = 0; i < SK_TX_RING_CNT; i++) { 952 cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i]; 953 if (i == (SK_TX_RING_CNT - 1)) { 954 cd->sk_tx_chain[i].sk_next = 955 &cd->sk_tx_chain[0]; 956 rd->sk_tx_ring[i].sk_next = 957 vtophys(&rd->sk_tx_ring[0]); 958 } else { 959 cd->sk_tx_chain[i].sk_next = 960 &cd->sk_tx_chain[i + 1]; 961 rd->sk_tx_ring[i].sk_next = 962 vtophys(&rd->sk_tx_ring[i + 1]); 963 } 964 } 965 966 sc_if->sk_cdata.sk_tx_prod = 0; 967 sc_if->sk_cdata.sk_tx_cons = 0; 968 sc_if->sk_cdata.sk_tx_cnt = 0; 969 970 return; 971 } 972 973 static int 974 sk_newbuf(sc_if, c, m) 975 struct sk_if_softc *sc_if; 976 struct sk_chain *c; 977 struct mbuf *m; 978 { 979 struct mbuf *m_new = NULL; 980 struct sk_rx_desc *r; 981 982 if (m == NULL) { 983 caddr_t *buf = NULL; 984 985 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 986 if (m_new == NULL) 987 return(ENOBUFS); 988 989 /* Allocate the jumbo buffer */ 990 buf = sk_jalloc(sc_if); 991 if (buf == NULL) { 992 m_freem(m_new); 993 #ifdef SK_VERBOSE 994 printf("sk%d: jumbo allocation failed " 995 "-- packet dropped!\n", sc_if->sk_unit); 996 #endif 997 return(ENOBUFS); 998 } 999 1000 /* Attach the buffer to the mbuf */ 1001 MEXTADD(m_new, buf, SK_JLEN, sk_jfree, 1002 (struct sk_if_softc *)sc_if, 0, EXT_NET_DRV); 1003 m_new->m_data = (void *)buf; 1004 m_new->m_pkthdr.len = m_new->m_len = SK_JLEN; 1005 } else { 1006 /* 1007 * We're re-using a previously allocated mbuf; 1008 * be sure to re-init pointers and lengths to 1009 * default values. 1010 */ 1011 m_new = m; 1012 m_new->m_len = m_new->m_pkthdr.len = SK_JLEN; 1013 m_new->m_data = m_new->m_ext.ext_buf; 1014 } 1015 1016 /* 1017 * Adjust alignment so packet payload begins on a 1018 * longword boundary. Mandatory for Alpha, useful on 1019 * x86 too. 1020 */ 1021 m_adj(m_new, ETHER_ALIGN); 1022 1023 r = c->sk_desc; 1024 c->sk_mbuf = m_new; 1025 r->sk_data_lo = vtophys(mtod(m_new, caddr_t)); 1026 r->sk_ctl = m_new->m_len | SK_RXSTAT; 1027 1028 return(0); 1029 } 1030 1031 /* 1032 * Allocate jumbo buffer storage. The SysKonnect adapters support 1033 * "jumbograms" (9K frames), although SysKonnect doesn't currently 1034 * use them in their drivers. In order for us to use them, we need 1035 * large 9K receive buffers, however standard mbuf clusters are only 1036 * 2048 bytes in size. Consequently, we need to allocate and manage 1037 * our own jumbo buffer pool. Fortunately, this does not require an 1038 * excessive amount of additional code. 1039 */ 1040 static int 1041 sk_alloc_jumbo_mem(sc_if) 1042 struct sk_if_softc *sc_if; 1043 { 1044 caddr_t ptr; 1045 register int i; 1046 struct sk_jpool_entry *entry; 1047 1048 /* Grab a big chunk o' storage. */ 1049 sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF, 1050 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 1051 1052 if (sc_if->sk_cdata.sk_jumbo_buf == NULL) { 1053 printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit); 1054 return(ENOBUFS); 1055 } 1056 1057 SLIST_INIT(&sc_if->sk_jfree_listhead); 1058 SLIST_INIT(&sc_if->sk_jinuse_listhead); 1059 1060 /* 1061 * Now divide it up into 9K pieces and save the addresses 1062 * in an array. 1063 */ 1064 ptr = sc_if->sk_cdata.sk_jumbo_buf; 1065 for (i = 0; i < SK_JSLOTS; i++) { 1066 sc_if->sk_cdata.sk_jslots[i] = ptr; 1067 ptr += SK_JLEN; 1068 entry = malloc(sizeof(struct sk_jpool_entry), 1069 M_DEVBUF, M_NOWAIT); 1070 if (entry == NULL) { 1071 free(sc_if->sk_cdata.sk_jumbo_buf, M_DEVBUF); 1072 sc_if->sk_cdata.sk_jumbo_buf = NULL; 1073 printf("sk%d: no memory for jumbo " 1074 "buffer queue!\n", sc_if->sk_unit); 1075 return(ENOBUFS); 1076 } 1077 entry->slot = i; 1078 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, 1079 entry, jpool_entries); 1080 } 1081 1082 return(0); 1083 } 1084 1085 /* 1086 * Allocate a jumbo buffer. 1087 */ 1088 static void * 1089 sk_jalloc(sc_if) 1090 struct sk_if_softc *sc_if; 1091 { 1092 struct sk_jpool_entry *entry; 1093 1094 entry = SLIST_FIRST(&sc_if->sk_jfree_listhead); 1095 1096 if (entry == NULL) { 1097 #ifdef SK_VERBOSE 1098 printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit); 1099 #endif 1100 return(NULL); 1101 } 1102 1103 SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries); 1104 SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries); 1105 return(sc_if->sk_cdata.sk_jslots[entry->slot]); 1106 } 1107 1108 /* 1109 * Release a jumbo buffer. 1110 */ 1111 static void 1112 sk_jfree(buf, args) 1113 void *buf; 1114 void *args; 1115 { 1116 struct sk_if_softc *sc_if; 1117 int i; 1118 struct sk_jpool_entry *entry; 1119 1120 /* Extract the softc struct pointer. */ 1121 sc_if = (struct sk_if_softc *)args; 1122 1123 if (sc_if == NULL) 1124 panic("sk_jfree: didn't get softc pointer!"); 1125 1126 /* calculate the slot this buffer belongs to */ 1127 i = ((vm_offset_t)buf 1128 - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN; 1129 1130 if ((i < 0) || (i >= SK_JSLOTS)) 1131 panic("sk_jfree: asked to free buffer that we don't manage!"); 1132 1133 entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead); 1134 if (entry == NULL) 1135 panic("sk_jfree: buffer not in use!"); 1136 entry->slot = i; 1137 SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries); 1138 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries); 1139 1140 return; 1141 } 1142 1143 /* 1144 * Set media options. 1145 */ 1146 static int 1147 sk_ifmedia_upd(ifp) 1148 struct ifnet *ifp; 1149 { 1150 struct sk_if_softc *sc_if = ifp->if_softc; 1151 struct mii_data *mii; 1152 1153 mii = device_get_softc(sc_if->sk_miibus); 1154 sk_init(sc_if); 1155 mii_mediachg(mii); 1156 1157 return(0); 1158 } 1159 1160 /* 1161 * Report current media status. 1162 */ 1163 static void 1164 sk_ifmedia_sts(ifp, ifmr) 1165 struct ifnet *ifp; 1166 struct ifmediareq *ifmr; 1167 { 1168 struct sk_if_softc *sc_if; 1169 struct mii_data *mii; 1170 1171 sc_if = ifp->if_softc; 1172 mii = device_get_softc(sc_if->sk_miibus); 1173 1174 mii_pollstat(mii); 1175 ifmr->ifm_active = mii->mii_media_active; 1176 ifmr->ifm_status = mii->mii_media_status; 1177 1178 return; 1179 } 1180 1181 static int 1182 sk_ioctl(ifp, command, data) 1183 struct ifnet *ifp; 1184 u_long command; 1185 caddr_t data; 1186 { 1187 struct sk_if_softc *sc_if = ifp->if_softc; 1188 struct ifreq *ifr = (struct ifreq *) data; 1189 int error = 0; 1190 struct mii_data *mii; 1191 1192 SK_IF_LOCK(sc_if); 1193 1194 switch(command) { 1195 case SIOCSIFMTU: 1196 if (ifr->ifr_mtu > SK_JUMBO_MTU) 1197 error = EINVAL; 1198 else { 1199 ifp->if_mtu = ifr->ifr_mtu; 1200 sk_init(sc_if); 1201 } 1202 break; 1203 case SIOCSIFFLAGS: 1204 if (ifp->if_flags & IFF_UP) { 1205 if (ifp->if_flags & IFF_RUNNING) { 1206 if ((ifp->if_flags ^ sc_if->sk_if_flags) 1207 & IFF_PROMISC) { 1208 sk_setpromisc(sc_if); 1209 sk_setmulti(sc_if); 1210 } 1211 } else 1212 sk_init(sc_if); 1213 } else { 1214 if (ifp->if_flags & IFF_RUNNING) 1215 sk_stop(sc_if); 1216 } 1217 sc_if->sk_if_flags = ifp->if_flags; 1218 error = 0; 1219 break; 1220 case SIOCADDMULTI: 1221 case SIOCDELMULTI: 1222 sk_setmulti(sc_if); 1223 error = 0; 1224 break; 1225 case SIOCGIFMEDIA: 1226 case SIOCSIFMEDIA: 1227 mii = device_get_softc(sc_if->sk_miibus); 1228 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1229 break; 1230 default: 1231 error = ether_ioctl(ifp, command, data); 1232 break; 1233 } 1234 1235 SK_IF_UNLOCK(sc_if); 1236 1237 return(error); 1238 } 1239 1240 /* 1241 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device 1242 * IDs against our list and return a device name if we find a match. 1243 */ 1244 static int 1245 skc_probe(dev) 1246 device_t dev; 1247 { 1248 struct sk_softc *sc; 1249 struct sk_type *t = sk_devs; 1250 1251 sc = device_get_softc(dev); 1252 1253 while(t->sk_name != NULL) { 1254 if ((pci_get_vendor(dev) == t->sk_vid) && 1255 (pci_get_device(dev) == t->sk_did)) { 1256 device_set_desc(dev, t->sk_name); 1257 return(0); 1258 } 1259 t++; 1260 } 1261 1262 return(ENXIO); 1263 } 1264 1265 /* 1266 * Force the GEnesis into reset, then bring it out of reset. 1267 */ 1268 static void 1269 sk_reset(sc) 1270 struct sk_softc *sc; 1271 { 1272 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET); 1273 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET); 1274 if (sc->sk_type == SK_YUKON) 1275 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET); 1276 1277 DELAY(1000); 1278 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET); 1279 DELAY(2); 1280 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET); 1281 if (sc->sk_type == SK_YUKON) 1282 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR); 1283 1284 if (sc->sk_type == SK_GENESIS) { 1285 /* Configure packet arbiter */ 1286 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET); 1287 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT); 1288 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT); 1289 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT); 1290 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT); 1291 } 1292 1293 /* Enable RAM interface */ 1294 sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET); 1295 1296 /* 1297 * Configure interrupt moderation. The moderation timer 1298 * defers interrupts specified in the interrupt moderation 1299 * timer mask based on the timeout specified in the interrupt 1300 * moderation timer init register. Each bit in the timer 1301 * register represents 18.825ns, so to specify a timeout in 1302 * microseconds, we have to multiply by 54. 1303 */ 1304 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(200)); 1305 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF| 1306 SK_ISR_RX1_EOF|SK_ISR_RX2_EOF); 1307 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START); 1308 1309 return; 1310 } 1311 1312 static int 1313 sk_probe(dev) 1314 device_t dev; 1315 { 1316 struct sk_softc *sc; 1317 1318 sc = device_get_softc(device_get_parent(dev)); 1319 1320 /* 1321 * Not much to do here. We always know there will be 1322 * at least one XMAC present, and if there are two, 1323 * skc_attach() will create a second device instance 1324 * for us. 1325 */ 1326 switch (sc->sk_type) { 1327 case SK_GENESIS: 1328 device_set_desc(dev, "XaQti Corp. XMAC II"); 1329 break; 1330 case SK_YUKON: 1331 device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon"); 1332 break; 1333 } 1334 1335 return(0); 1336 } 1337 1338 /* 1339 * Each XMAC chip is attached as a separate logical IP interface. 1340 * Single port cards will have only one logical interface of course. 1341 */ 1342 static int 1343 sk_attach(dev) 1344 device_t dev; 1345 { 1346 struct sk_softc *sc; 1347 struct sk_if_softc *sc_if; 1348 struct ifnet *ifp; 1349 int i, port, error; 1350 1351 if (dev == NULL) 1352 return(EINVAL); 1353 1354 error = 0; 1355 sc_if = device_get_softc(dev); 1356 sc = device_get_softc(device_get_parent(dev)); 1357 SK_LOCK(sc); 1358 port = *(int *)device_get_ivars(dev); 1359 free(device_get_ivars(dev), M_DEVBUF); 1360 device_set_ivars(dev, NULL); 1361 1362 sc_if->sk_dev = dev; 1363 sc_if->sk_unit = device_get_unit(dev); 1364 sc_if->sk_port = port; 1365 sc_if->sk_softc = sc; 1366 sc->sk_if[port] = sc_if; 1367 if (port == SK_PORT_A) 1368 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0; 1369 if (port == SK_PORT_B) 1370 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1; 1371 1372 /* 1373 * Get station address for this interface. Note that 1374 * dual port cards actually come with three station 1375 * addresses: one for each port, plus an extra. The 1376 * extra one is used by the SysKonnect driver software 1377 * as a 'virtual' station address for when both ports 1378 * are operating in failover mode. Currently we don't 1379 * use this extra address. 1380 */ 1381 for (i = 0; i < ETHER_ADDR_LEN; i++) 1382 sc_if->arpcom.ac_enaddr[i] = 1383 sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i); 1384 1385 /* 1386 * Set up RAM buffer addresses. The NIC will have a certain 1387 * amount of SRAM on it, somewhere between 512K and 2MB. We 1388 * need to divide this up a) between the transmitter and 1389 * receiver and b) between the two XMACs, if this is a 1390 * dual port NIC. Our algotithm is to divide up the memory 1391 * evenly so that everyone gets a fair share. 1392 */ 1393 if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) { 1394 u_int32_t chunk, val; 1395 1396 chunk = sc->sk_ramsize / 2; 1397 val = sc->sk_rboff / sizeof(u_int64_t); 1398 sc_if->sk_rx_ramstart = val; 1399 val += (chunk / sizeof(u_int64_t)); 1400 sc_if->sk_rx_ramend = val - 1; 1401 sc_if->sk_tx_ramstart = val; 1402 val += (chunk / sizeof(u_int64_t)); 1403 sc_if->sk_tx_ramend = val - 1; 1404 } else { 1405 u_int32_t chunk, val; 1406 1407 chunk = sc->sk_ramsize / 4; 1408 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) / 1409 sizeof(u_int64_t); 1410 sc_if->sk_rx_ramstart = val; 1411 val += (chunk / sizeof(u_int64_t)); 1412 sc_if->sk_rx_ramend = val - 1; 1413 sc_if->sk_tx_ramstart = val; 1414 val += (chunk / sizeof(u_int64_t)); 1415 sc_if->sk_tx_ramend = val - 1; 1416 } 1417 1418 /* Read and save PHY type and set PHY address */ 1419 sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF; 1420 switch(sc_if->sk_phytype) { 1421 case SK_PHYTYPE_XMAC: 1422 sc_if->sk_phyaddr = SK_PHYADDR_XMAC; 1423 break; 1424 case SK_PHYTYPE_BCOM: 1425 sc_if->sk_phyaddr = SK_PHYADDR_BCOM; 1426 break; 1427 case SK_PHYTYPE_MARV_COPPER: 1428 sc_if->sk_phyaddr = SK_PHYADDR_MARV; 1429 break; 1430 default: 1431 printf("skc%d: unsupported PHY type: %d\n", 1432 sc->sk_unit, sc_if->sk_phytype); 1433 error = ENODEV; 1434 goto fail; 1435 } 1436 1437 /* Allocate the descriptor queues. */ 1438 sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF, 1439 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 1440 1441 if (sc_if->sk_rdata == NULL) { 1442 printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit); 1443 error = ENOMEM; 1444 goto fail; 1445 } 1446 1447 bzero(sc_if->sk_rdata, sizeof(struct sk_ring_data)); 1448 1449 /* Try to allocate memory for jumbo buffers. */ 1450 if (sk_alloc_jumbo_mem(sc_if)) { 1451 printf("sk%d: jumbo buffer allocation failed\n", 1452 sc_if->sk_unit); 1453 error = ENOMEM; 1454 goto fail; 1455 } 1456 1457 ifp = &sc_if->arpcom.ac_if; 1458 ifp->if_softc = sc_if; 1459 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1460 ifp->if_mtu = ETHERMTU; 1461 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1462 ifp->if_ioctl = sk_ioctl; 1463 ifp->if_start = sk_start; 1464 ifp->if_watchdog = sk_watchdog; 1465 ifp->if_init = sk_init; 1466 ifp->if_baudrate = 1000000000; 1467 ifp->if_snd.ifq_maxlen = SK_TX_RING_CNT - 1; 1468 1469 callout_handle_init(&sc_if->sk_tick_ch); 1470 1471 /* 1472 * Call MI attach routine. 1473 */ 1474 ether_ifattach(ifp, sc_if->arpcom.ac_enaddr); 1475 1476 /* 1477 * Do miibus setup. 1478 */ 1479 switch (sc->sk_type) { 1480 case SK_GENESIS: 1481 sk_init_xmac(sc_if); 1482 break; 1483 case SK_YUKON: 1484 sk_init_yukon(sc_if); 1485 break; 1486 } 1487 1488 if (mii_phy_probe(dev, &sc_if->sk_miibus, 1489 sk_ifmedia_upd, sk_ifmedia_sts)) { 1490 printf("skc%d: no PHY found!\n", sc_if->sk_unit); 1491 ether_ifdetach(ifp); 1492 error = ENXIO; 1493 goto fail; 1494 } 1495 1496 fail: 1497 SK_UNLOCK(sc); 1498 if (error) { 1499 /* Access should be ok even though lock has been dropped */ 1500 sc->sk_if[port] = NULL; 1501 sk_detach(dev); 1502 } 1503 1504 return(error); 1505 } 1506 1507 /* 1508 * Attach the interface. Allocate softc structures, do ifmedia 1509 * setup and ethernet/BPF attach. 1510 */ 1511 static int 1512 skc_attach(dev) 1513 device_t dev; 1514 { 1515 struct sk_softc *sc; 1516 int unit, error = 0, rid, *port; 1517 1518 sc = device_get_softc(dev); 1519 unit = device_get_unit(dev); 1520 1521 mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1522 MTX_DEF | MTX_RECURSE); 1523 #ifndef BURN_BRIDGES 1524 /* 1525 * Handle power management nonsense. 1526 */ 1527 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1528 u_int32_t iobase, membase, irq; 1529 1530 /* Save important PCI config data. */ 1531 iobase = pci_read_config(dev, SK_PCI_LOIO, 4); 1532 membase = pci_read_config(dev, SK_PCI_LOMEM, 4); 1533 irq = pci_read_config(dev, SK_PCI_INTLINE, 4); 1534 1535 /* Reset the power state. */ 1536 printf("skc%d: chip is in D%d power mode " 1537 "-- setting to D0\n", unit, 1538 pci_get_powerstate(dev)); 1539 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1540 1541 /* Restore PCI config data. */ 1542 pci_write_config(dev, SK_PCI_LOIO, iobase, 4); 1543 pci_write_config(dev, SK_PCI_LOMEM, membase, 4); 1544 pci_write_config(dev, SK_PCI_INTLINE, irq, 4); 1545 } 1546 #endif 1547 /* 1548 * Map control/status registers. 1549 */ 1550 pci_enable_busmaster(dev); 1551 1552 rid = SK_RID; 1553 sc->sk_res = bus_alloc_resource_any(dev, SK_RES, &rid, RF_ACTIVE); 1554 1555 if (sc->sk_res == NULL) { 1556 printf("sk%d: couldn't map ports/memory\n", unit); 1557 error = ENXIO; 1558 goto fail; 1559 } 1560 1561 sc->sk_btag = rman_get_bustag(sc->sk_res); 1562 sc->sk_bhandle = rman_get_bushandle(sc->sk_res); 1563 1564 /* Allocate interrupt */ 1565 rid = 0; 1566 sc->sk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1567 RF_SHAREABLE | RF_ACTIVE); 1568 1569 if (sc->sk_irq == NULL) { 1570 printf("skc%d: couldn't map interrupt\n", unit); 1571 error = ENXIO; 1572 goto fail; 1573 } 1574 1575 /* Set adapter type */ 1576 switch (pci_get_device(dev)) { 1577 case DEVICEID_SK_V1: 1578 sc->sk_type = SK_GENESIS; 1579 break; 1580 case DEVICEID_SK_V2: 1581 case DEVICEID_3COM_3C940: 1582 case DEVICEID_LINKSYS_EG1032: 1583 sc->sk_type = SK_YUKON; 1584 break; 1585 } 1586 1587 /* Reset the adapter. */ 1588 sk_reset(sc); 1589 1590 sc->sk_unit = unit; 1591 1592 /* Read and save vital product data from EEPROM. */ 1593 sk_vpd_read(sc); 1594 1595 if (sc->sk_type == SK_GENESIS) { 1596 /* Read and save RAM size and RAMbuffer offset */ 1597 switch(sk_win_read_1(sc, SK_EPROM0)) { 1598 case SK_RAMSIZE_512K_64: 1599 sc->sk_ramsize = 0x80000; 1600 sc->sk_rboff = SK_RBOFF_0; 1601 break; 1602 case SK_RAMSIZE_1024K_64: 1603 sc->sk_ramsize = 0x100000; 1604 sc->sk_rboff = SK_RBOFF_80000; 1605 break; 1606 case SK_RAMSIZE_1024K_128: 1607 sc->sk_ramsize = 0x100000; 1608 sc->sk_rboff = SK_RBOFF_0; 1609 break; 1610 case SK_RAMSIZE_2048K_128: 1611 sc->sk_ramsize = 0x200000; 1612 sc->sk_rboff = SK_RBOFF_0; 1613 break; 1614 default: 1615 printf("skc%d: unknown ram size: %d\n", 1616 sc->sk_unit, sk_win_read_1(sc, SK_EPROM0)); 1617 error = ENXIO; 1618 goto fail; 1619 } 1620 } else { 1621 sc->sk_ramsize = 0x20000; 1622 sc->sk_rboff = SK_RBOFF_0; 1623 } 1624 1625 /* Read and save physical media type */ 1626 switch(sk_win_read_1(sc, SK_PMDTYPE)) { 1627 case SK_PMD_1000BASESX: 1628 sc->sk_pmd = IFM_1000_SX; 1629 break; 1630 case SK_PMD_1000BASELX: 1631 sc->sk_pmd = IFM_1000_LX; 1632 break; 1633 case SK_PMD_1000BASECX: 1634 sc->sk_pmd = IFM_1000_CX; 1635 break; 1636 case SK_PMD_1000BASETX: 1637 sc->sk_pmd = IFM_1000_T; 1638 break; 1639 default: 1640 printf("skc%d: unknown media type: 0x%x\n", 1641 sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE)); 1642 error = ENXIO; 1643 goto fail; 1644 } 1645 1646 /* Announce the product name. */ 1647 printf("skc%d: %s\n", sc->sk_unit, sc->sk_vpd_prodname); 1648 sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1); 1649 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); 1650 *port = SK_PORT_A; 1651 device_set_ivars(sc->sk_devs[SK_PORT_A], port); 1652 1653 if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) { 1654 sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1); 1655 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); 1656 *port = SK_PORT_B; 1657 device_set_ivars(sc->sk_devs[SK_PORT_B], port); 1658 } 1659 1660 /* Turn on the 'driver is loaded' LED. */ 1661 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON); 1662 1663 bus_generic_attach(dev); 1664 1665 /* Hook interrupt last to avoid having to lock softc */ 1666 error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET, 1667 sk_intr, sc, &sc->sk_intrhand); 1668 1669 if (error) { 1670 printf("skc%d: couldn't set up irq\n", unit); 1671 goto fail; 1672 } 1673 1674 fail: 1675 if (error) 1676 skc_detach(dev); 1677 1678 return(error); 1679 } 1680 1681 /* 1682 * Shutdown hardware and free up resources. This can be called any 1683 * time after the mutex has been initialized. It is called in both 1684 * the error case in attach and the normal detach case so it needs 1685 * to be careful about only freeing resources that have actually been 1686 * allocated. 1687 */ 1688 static int 1689 sk_detach(dev) 1690 device_t dev; 1691 { 1692 struct sk_if_softc *sc_if; 1693 struct ifnet *ifp; 1694 1695 sc_if = device_get_softc(dev); 1696 KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx), 1697 ("sk mutex not initialized in sk_detach")); 1698 SK_IF_LOCK(sc_if); 1699 1700 ifp = &sc_if->arpcom.ac_if; 1701 /* These should only be active if attach_xmac succeeded */ 1702 if (device_is_attached(dev)) { 1703 sk_stop(sc_if); 1704 ether_ifdetach(ifp); 1705 } 1706 if (sc_if->sk_miibus) 1707 device_delete_child(dev, sc_if->sk_miibus); 1708 bus_generic_detach(dev); 1709 if (sc_if->sk_cdata.sk_jumbo_buf) 1710 contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF); 1711 if (sc_if->sk_rdata) { 1712 contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data), 1713 M_DEVBUF); 1714 } 1715 SK_IF_UNLOCK(sc_if); 1716 1717 return(0); 1718 } 1719 1720 static int 1721 skc_detach(dev) 1722 device_t dev; 1723 { 1724 struct sk_softc *sc; 1725 1726 sc = device_get_softc(dev); 1727 KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized")); 1728 SK_LOCK(sc); 1729 1730 if (device_is_alive(dev)) { 1731 if (sc->sk_devs[SK_PORT_A] != NULL) 1732 device_delete_child(dev, sc->sk_devs[SK_PORT_A]); 1733 if (sc->sk_devs[SK_PORT_B] != NULL) 1734 device_delete_child(dev, sc->sk_devs[SK_PORT_B]); 1735 bus_generic_detach(dev); 1736 } 1737 1738 if (sc->sk_intrhand) 1739 bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand); 1740 if (sc->sk_irq) 1741 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq); 1742 if (sc->sk_res) 1743 bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res); 1744 1745 SK_UNLOCK(sc); 1746 mtx_destroy(&sc->sk_mtx); 1747 1748 return(0); 1749 } 1750 1751 static int 1752 sk_encap(sc_if, m_head, txidx) 1753 struct sk_if_softc *sc_if; 1754 struct mbuf *m_head; 1755 u_int32_t *txidx; 1756 { 1757 struct sk_tx_desc *f = NULL; 1758 struct mbuf *m; 1759 u_int32_t frag, cur, cnt = 0; 1760 1761 m = m_head; 1762 cur = frag = *txidx; 1763 1764 /* 1765 * Start packing the mbufs in this chain into 1766 * the fragment pointers. Stop when we run out 1767 * of fragments or hit the end of the mbuf chain. 1768 */ 1769 for (m = m_head; m != NULL; m = m->m_next) { 1770 if (m->m_len != 0) { 1771 if ((SK_TX_RING_CNT - 1772 (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2) 1773 return(ENOBUFS); 1774 f = &sc_if->sk_rdata->sk_tx_ring[frag]; 1775 f->sk_data_lo = vtophys(mtod(m, vm_offset_t)); 1776 f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT; 1777 if (cnt == 0) 1778 f->sk_ctl |= SK_TXCTL_FIRSTFRAG; 1779 else 1780 f->sk_ctl |= SK_TXCTL_OWN; 1781 cur = frag; 1782 SK_INC(frag, SK_TX_RING_CNT); 1783 cnt++; 1784 } 1785 } 1786 1787 if (m != NULL) 1788 return(ENOBUFS); 1789 1790 sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |= 1791 SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR; 1792 sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head; 1793 sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN; 1794 sc_if->sk_cdata.sk_tx_cnt += cnt; 1795 1796 *txidx = frag; 1797 1798 return(0); 1799 } 1800 1801 static void 1802 sk_start(ifp) 1803 struct ifnet *ifp; 1804 { 1805 struct sk_softc *sc; 1806 struct sk_if_softc *sc_if; 1807 struct mbuf *m_head = NULL; 1808 u_int32_t idx; 1809 1810 sc_if = ifp->if_softc; 1811 sc = sc_if->sk_softc; 1812 1813 SK_IF_LOCK(sc_if); 1814 1815 idx = sc_if->sk_cdata.sk_tx_prod; 1816 1817 while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) { 1818 IF_DEQUEUE(&ifp->if_snd, m_head); 1819 if (m_head == NULL) 1820 break; 1821 1822 /* 1823 * Pack the data into the transmit ring. If we 1824 * don't have room, set the OACTIVE flag and wait 1825 * for the NIC to drain the ring. 1826 */ 1827 if (sk_encap(sc_if, m_head, &idx)) { 1828 IF_PREPEND(&ifp->if_snd, m_head); 1829 ifp->if_flags |= IFF_OACTIVE; 1830 break; 1831 } 1832 1833 /* 1834 * If there's a BPF listener, bounce a copy of this frame 1835 * to him. 1836 */ 1837 BPF_MTAP(ifp, m_head); 1838 } 1839 1840 /* Transmit */ 1841 sc_if->sk_cdata.sk_tx_prod = idx; 1842 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); 1843 1844 /* Set a timeout in case the chip goes out to lunch. */ 1845 ifp->if_timer = 5; 1846 SK_IF_UNLOCK(sc_if); 1847 1848 return; 1849 } 1850 1851 1852 static void 1853 sk_watchdog(ifp) 1854 struct ifnet *ifp; 1855 { 1856 struct sk_if_softc *sc_if; 1857 1858 sc_if = ifp->if_softc; 1859 1860 printf("sk%d: watchdog timeout\n", sc_if->sk_unit); 1861 sk_init(sc_if); 1862 1863 return; 1864 } 1865 1866 static void 1867 skc_shutdown(dev) 1868 device_t dev; 1869 { 1870 struct sk_softc *sc; 1871 1872 sc = device_get_softc(dev); 1873 SK_LOCK(sc); 1874 1875 /* Turn off the 'driver is loaded' LED. */ 1876 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF); 1877 1878 /* 1879 * Reset the GEnesis controller. Doing this should also 1880 * assert the resets on the attached XMAC(s). 1881 */ 1882 sk_reset(sc); 1883 SK_UNLOCK(sc); 1884 1885 return; 1886 } 1887 1888 static void 1889 sk_rxeof(sc_if) 1890 struct sk_if_softc *sc_if; 1891 { 1892 struct sk_softc *sc; 1893 struct mbuf *m; 1894 struct ifnet *ifp; 1895 struct sk_chain *cur_rx; 1896 int total_len = 0; 1897 int i; 1898 u_int32_t rxstat; 1899 1900 sc = sc_if->sk_softc; 1901 ifp = &sc_if->arpcom.ac_if; 1902 i = sc_if->sk_cdata.sk_rx_prod; 1903 cur_rx = &sc_if->sk_cdata.sk_rx_chain[i]; 1904 1905 SK_LOCK_ASSERT(sc); 1906 1907 while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) { 1908 1909 cur_rx = &sc_if->sk_cdata.sk_rx_chain[i]; 1910 rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat; 1911 m = cur_rx->sk_mbuf; 1912 cur_rx->sk_mbuf = NULL; 1913 total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl); 1914 SK_INC(i, SK_RX_RING_CNT); 1915 1916 if (rxstat & XM_RXSTAT_ERRFRAME) { 1917 ifp->if_ierrors++; 1918 sk_newbuf(sc_if, cur_rx, m); 1919 continue; 1920 } 1921 1922 /* 1923 * Try to allocate a new jumbo buffer. If that 1924 * fails, copy the packet to mbufs and put the 1925 * jumbo buffer back in the ring so it can be 1926 * re-used. If allocating mbufs fails, then we 1927 * have to drop the packet. 1928 */ 1929 if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) { 1930 struct mbuf *m0; 1931 m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, 1932 ifp, NULL); 1933 sk_newbuf(sc_if, cur_rx, m); 1934 if (m0 == NULL) { 1935 printf("sk%d: no receive buffers " 1936 "available -- packet dropped!\n", 1937 sc_if->sk_unit); 1938 ifp->if_ierrors++; 1939 continue; 1940 } 1941 m = m0; 1942 } else { 1943 m->m_pkthdr.rcvif = ifp; 1944 m->m_pkthdr.len = m->m_len = total_len; 1945 } 1946 1947 ifp->if_ipackets++; 1948 SK_UNLOCK(sc); 1949 (*ifp->if_input)(ifp, m); 1950 SK_LOCK(sc); 1951 } 1952 1953 sc_if->sk_cdata.sk_rx_prod = i; 1954 1955 return; 1956 } 1957 1958 static void 1959 sk_txeof(sc_if) 1960 struct sk_if_softc *sc_if; 1961 { 1962 struct sk_tx_desc *cur_tx = NULL; 1963 struct ifnet *ifp; 1964 u_int32_t idx; 1965 1966 ifp = &sc_if->arpcom.ac_if; 1967 1968 /* 1969 * Go through our tx ring and free mbufs for those 1970 * frames that have been sent. 1971 */ 1972 idx = sc_if->sk_cdata.sk_tx_cons; 1973 while(idx != sc_if->sk_cdata.sk_tx_prod) { 1974 cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx]; 1975 if (cur_tx->sk_ctl & SK_TXCTL_OWN) 1976 break; 1977 if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG) 1978 ifp->if_opackets++; 1979 if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) { 1980 m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf); 1981 sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL; 1982 } 1983 sc_if->sk_cdata.sk_tx_cnt--; 1984 SK_INC(idx, SK_TX_RING_CNT); 1985 ifp->if_timer = 0; 1986 } 1987 1988 sc_if->sk_cdata.sk_tx_cons = idx; 1989 1990 if (cur_tx != NULL) 1991 ifp->if_flags &= ~IFF_OACTIVE; 1992 1993 return; 1994 } 1995 1996 static void 1997 sk_tick(xsc_if) 1998 void *xsc_if; 1999 { 2000 struct sk_if_softc *sc_if; 2001 struct mii_data *mii; 2002 struct ifnet *ifp; 2003 int i; 2004 2005 sc_if = xsc_if; 2006 SK_IF_LOCK(sc_if); 2007 ifp = &sc_if->arpcom.ac_if; 2008 mii = device_get_softc(sc_if->sk_miibus); 2009 2010 if (!(ifp->if_flags & IFF_UP)) { 2011 SK_IF_UNLOCK(sc_if); 2012 return; 2013 } 2014 2015 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 2016 sk_intr_bcom(sc_if); 2017 SK_IF_UNLOCK(sc_if); 2018 return; 2019 } 2020 2021 /* 2022 * According to SysKonnect, the correct way to verify that 2023 * the link has come back up is to poll bit 0 of the GPIO 2024 * register three times. This pin has the signal from the 2025 * link_sync pin connected to it; if we read the same link 2026 * state 3 times in a row, we know the link is up. 2027 */ 2028 for (i = 0; i < 3; i++) { 2029 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET) 2030 break; 2031 } 2032 2033 if (i != 3) { 2034 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); 2035 SK_IF_UNLOCK(sc_if); 2036 return; 2037 } 2038 2039 /* Turn the GP0 interrupt back on. */ 2040 SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); 2041 SK_XM_READ_2(sc_if, XM_ISR); 2042 mii_tick(mii); 2043 untimeout(sk_tick, sc_if, sc_if->sk_tick_ch); 2044 2045 SK_IF_UNLOCK(sc_if); 2046 return; 2047 } 2048 2049 static void 2050 sk_intr_bcom(sc_if) 2051 struct sk_if_softc *sc_if; 2052 { 2053 struct mii_data *mii; 2054 struct ifnet *ifp; 2055 int status; 2056 mii = device_get_softc(sc_if->sk_miibus); 2057 ifp = &sc_if->arpcom.ac_if; 2058 2059 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 2060 2061 /* 2062 * Read the PHY interrupt register to make sure 2063 * we clear any pending interrupts. 2064 */ 2065 status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR); 2066 2067 if (!(ifp->if_flags & IFF_RUNNING)) { 2068 sk_init_xmac(sc_if); 2069 return; 2070 } 2071 2072 if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) { 2073 int lstat; 2074 lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 2075 BRGPHY_MII_AUXSTS); 2076 2077 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) { 2078 mii_mediachg(mii); 2079 /* Turn off the link LED. */ 2080 SK_IF_WRITE_1(sc_if, 0, 2081 SK_LINKLED1_CTL, SK_LINKLED_OFF); 2082 sc_if->sk_link = 0; 2083 } else if (status & BRGPHY_ISR_LNK_CHG) { 2084 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 2085 BRGPHY_MII_IMR, 0xFF00); 2086 mii_tick(mii); 2087 sc_if->sk_link = 1; 2088 /* Turn on the link LED. */ 2089 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, 2090 SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF| 2091 SK_LINKLED_BLINK_OFF); 2092 } else { 2093 mii_tick(mii); 2094 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); 2095 } 2096 } 2097 2098 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 2099 2100 return; 2101 } 2102 2103 static void 2104 sk_intr_xmac(sc_if) 2105 struct sk_if_softc *sc_if; 2106 { 2107 struct sk_softc *sc; 2108 u_int16_t status; 2109 2110 sc = sc_if->sk_softc; 2111 status = SK_XM_READ_2(sc_if, XM_ISR); 2112 2113 /* 2114 * Link has gone down. Start MII tick timeout to 2115 * watch for link resync. 2116 */ 2117 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) { 2118 if (status & XM_ISR_GP0_SET) { 2119 SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); 2120 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); 2121 } 2122 2123 if (status & XM_ISR_AUTONEG_DONE) { 2124 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); 2125 } 2126 } 2127 2128 if (status & XM_IMR_TX_UNDERRUN) 2129 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO); 2130 2131 if (status & XM_IMR_RX_OVERRUN) 2132 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO); 2133 2134 status = SK_XM_READ_2(sc_if, XM_ISR); 2135 2136 return; 2137 } 2138 2139 static void 2140 sk_intr_yukon(sc_if) 2141 struct sk_if_softc *sc_if; 2142 { 2143 int status; 2144 2145 status = SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); 2146 2147 return; 2148 } 2149 2150 static void 2151 sk_intr(xsc) 2152 void *xsc; 2153 { 2154 struct sk_softc *sc = xsc; 2155 struct sk_if_softc *sc_if0 = NULL, *sc_if1 = NULL; 2156 struct ifnet *ifp0 = NULL, *ifp1 = NULL; 2157 u_int32_t status; 2158 2159 SK_LOCK(sc); 2160 2161 sc_if0 = sc->sk_if[SK_PORT_A]; 2162 sc_if1 = sc->sk_if[SK_PORT_B]; 2163 2164 if (sc_if0 != NULL) 2165 ifp0 = &sc_if0->arpcom.ac_if; 2166 if (sc_if1 != NULL) 2167 ifp1 = &sc_if1->arpcom.ac_if; 2168 2169 for (;;) { 2170 status = CSR_READ_4(sc, SK_ISSR); 2171 if (!(status & sc->sk_intrmask)) 2172 break; 2173 2174 /* Handle receive interrupts first. */ 2175 if (status & SK_ISR_RX1_EOF) { 2176 sk_rxeof(sc_if0); 2177 CSR_WRITE_4(sc, SK_BMU_RX_CSR0, 2178 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); 2179 } 2180 if (status & SK_ISR_RX2_EOF) { 2181 sk_rxeof(sc_if1); 2182 CSR_WRITE_4(sc, SK_BMU_RX_CSR1, 2183 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); 2184 } 2185 2186 /* Then transmit interrupts. */ 2187 if (status & SK_ISR_TX1_S_EOF) { 2188 sk_txeof(sc_if0); 2189 CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, 2190 SK_TXBMU_CLR_IRQ_EOF); 2191 } 2192 if (status & SK_ISR_TX2_S_EOF) { 2193 sk_txeof(sc_if1); 2194 CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, 2195 SK_TXBMU_CLR_IRQ_EOF); 2196 } 2197 2198 /* Then MAC interrupts. */ 2199 if (status & SK_ISR_MAC1 && ifp0->if_flags & IFF_RUNNING) { 2200 if (sc->sk_type == SK_GENESIS) 2201 sk_intr_xmac(sc_if0); 2202 else 2203 sk_intr_yukon(sc_if0); 2204 } 2205 2206 if (status & SK_ISR_MAC2 && ifp1->if_flags & IFF_RUNNING) { 2207 if (sc->sk_type == SK_GENESIS) 2208 sk_intr_xmac(sc_if1); 2209 else 2210 sk_intr_yukon(sc_if1); 2211 } 2212 2213 if (status & SK_ISR_EXTERNAL_REG) { 2214 if (ifp0 != NULL && 2215 sc_if0->sk_phytype == SK_PHYTYPE_BCOM) 2216 sk_intr_bcom(sc_if0); 2217 if (ifp1 != NULL && 2218 sc_if1->sk_phytype == SK_PHYTYPE_BCOM) 2219 sk_intr_bcom(sc_if1); 2220 } 2221 } 2222 2223 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2224 2225 if (ifp0 != NULL && ifp0->if_snd.ifq_head != NULL) 2226 sk_start(ifp0); 2227 if (ifp1 != NULL && ifp1->if_snd.ifq_head != NULL) 2228 sk_start(ifp1); 2229 2230 SK_UNLOCK(sc); 2231 2232 return; 2233 } 2234 2235 static void 2236 sk_init_xmac(sc_if) 2237 struct sk_if_softc *sc_if; 2238 { 2239 struct sk_softc *sc; 2240 struct ifnet *ifp; 2241 struct sk_bcom_hack bhack[] = { 2242 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 }, 2243 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 }, 2244 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, 2245 { 0, 0 } }; 2246 2247 sc = sc_if->sk_softc; 2248 ifp = &sc_if->arpcom.ac_if; 2249 2250 /* Unreset the XMAC. */ 2251 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET); 2252 DELAY(1000); 2253 2254 /* Reset the XMAC's internal state. */ 2255 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); 2256 2257 /* Save the XMAC II revision */ 2258 sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID)); 2259 2260 /* 2261 * Perform additional initialization for external PHYs, 2262 * namely for the 1000baseTX cards that use the XMAC's 2263 * GMII mode. 2264 */ 2265 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 2266 int i = 0; 2267 u_int32_t val; 2268 2269 /* Take PHY out of reset. */ 2270 val = sk_win_read_4(sc, SK_GPIO); 2271 if (sc_if->sk_port == SK_PORT_A) 2272 val |= SK_GPIO_DIR0|SK_GPIO_DAT0; 2273 else 2274 val |= SK_GPIO_DIR2|SK_GPIO_DAT2; 2275 sk_win_write_4(sc, SK_GPIO, val); 2276 2277 /* Enable GMII mode on the XMAC. */ 2278 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE); 2279 2280 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 2281 BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET); 2282 DELAY(10000); 2283 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 2284 BRGPHY_MII_IMR, 0xFFF0); 2285 2286 /* 2287 * Early versions of the BCM5400 apparently have 2288 * a bug that requires them to have their reserved 2289 * registers initialized to some magic values. I don't 2290 * know what the numbers do, I'm just the messenger. 2291 */ 2292 if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03) 2293 == 0x6041) { 2294 while(bhack[i].reg) { 2295 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 2296 bhack[i].reg, bhack[i].val); 2297 i++; 2298 } 2299 } 2300 } 2301 2302 /* Set station address */ 2303 SK_XM_WRITE_2(sc_if, XM_PAR0, 2304 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0])); 2305 SK_XM_WRITE_2(sc_if, XM_PAR1, 2306 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2])); 2307 SK_XM_WRITE_2(sc_if, XM_PAR2, 2308 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4])); 2309 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION); 2310 2311 if (ifp->if_flags & IFF_BROADCAST) { 2312 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); 2313 } else { 2314 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); 2315 } 2316 2317 /* We don't need the FCS appended to the packet. */ 2318 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS); 2319 2320 /* We want short frames padded to 60 bytes. */ 2321 SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD); 2322 2323 /* 2324 * Enable the reception of all error frames. This is is 2325 * a necessary evil due to the design of the XMAC. The 2326 * XMAC's receive FIFO is only 8K in size, however jumbo 2327 * frames can be up to 9000 bytes in length. When bad 2328 * frame filtering is enabled, the XMAC's RX FIFO operates 2329 * in 'store and forward' mode. For this to work, the 2330 * entire frame has to fit into the FIFO, but that means 2331 * that jumbo frames larger than 8192 bytes will be 2332 * truncated. Disabling all bad frame filtering causes 2333 * the RX FIFO to operate in streaming mode, in which 2334 * case the XMAC will start transfering frames out of the 2335 * RX FIFO as soon as the FIFO threshold is reached. 2336 */ 2337 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES| 2338 XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS| 2339 XM_MODE_RX_INRANGELEN); 2340 2341 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 2342 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); 2343 else 2344 SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); 2345 2346 /* 2347 * Bump up the transmit threshold. This helps hold off transmit 2348 * underruns when we're blasting traffic from both ports at once. 2349 */ 2350 SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH); 2351 2352 /* Set promiscuous mode */ 2353 sk_setpromisc(sc_if); 2354 2355 /* Set multicast filter */ 2356 sk_setmulti(sc_if); 2357 2358 /* Clear and enable interrupts */ 2359 SK_XM_READ_2(sc_if, XM_ISR); 2360 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) 2361 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS); 2362 else 2363 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); 2364 2365 /* Configure MAC arbiter */ 2366 switch(sc_if->sk_xmac_rev) { 2367 case XM_XMAC_REV_B2: 2368 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2); 2369 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2); 2370 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2); 2371 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2); 2372 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2); 2373 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2); 2374 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2); 2375 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2); 2376 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); 2377 break; 2378 case XM_XMAC_REV_C1: 2379 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1); 2380 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1); 2381 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1); 2382 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1); 2383 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1); 2384 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1); 2385 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1); 2386 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1); 2387 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); 2388 break; 2389 default: 2390 break; 2391 } 2392 sk_win_write_2(sc, SK_MACARB_CTL, 2393 SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF); 2394 2395 sc_if->sk_link = 1; 2396 2397 return; 2398 } 2399 2400 static void sk_init_yukon(sc_if) 2401 struct sk_if_softc *sc_if; 2402 { 2403 u_int32_t phy; 2404 u_int16_t reg; 2405 int i; 2406 2407 /* GMAC and GPHY Reset */ 2408 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET); 2409 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); 2410 DELAY(1000); 2411 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_CLEAR); 2412 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); 2413 DELAY(1000); 2414 2415 phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP | 2416 SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE; 2417 2418 switch(sc_if->sk_softc->sk_pmd) { 2419 case IFM_1000_SX: 2420 case IFM_1000_LX: 2421 phy |= SK_GPHY_FIBER; 2422 break; 2423 2424 case IFM_1000_CX: 2425 case IFM_1000_T: 2426 phy |= SK_GPHY_COPPER; 2427 break; 2428 } 2429 2430 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET); 2431 DELAY(1000); 2432 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR); 2433 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF | 2434 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR); 2435 2436 /* unused read of the interrupt source register */ 2437 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); 2438 2439 reg = SK_YU_READ_2(sc_if, YUKON_PAR); 2440 2441 /* MIB Counter Clear Mode set */ 2442 reg |= YU_PAR_MIB_CLR; 2443 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 2444 2445 /* MIB Counter Clear Mode clear */ 2446 reg &= ~YU_PAR_MIB_CLR; 2447 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 2448 2449 /* receive control reg */ 2450 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR); 2451 2452 /* transmit parameter register */ 2453 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) | 2454 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) ); 2455 2456 /* serial mode register */ 2457 SK_YU_WRITE_2(sc_if, YUKON_SMR, YU_SMR_DATA_BLIND(0x1c) | 2458 YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e)); 2459 2460 /* Setup Yukon's address */ 2461 for (i = 0; i < 3; i++) { 2462 /* Write Source Address 1 (unicast filter) */ 2463 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4, 2464 sc_if->arpcom.ac_enaddr[i * 2] | 2465 sc_if->arpcom.ac_enaddr[i * 2 + 1] << 8); 2466 } 2467 2468 for (i = 0; i < 3; i++) { 2469 reg = sk_win_read_2(sc_if->sk_softc, 2470 SK_MAC1_0 + i * 2 + sc_if->sk_port * 8); 2471 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg); 2472 } 2473 2474 /* Set promiscuous mode */ 2475 sk_setpromisc(sc_if); 2476 2477 /* Set multicast filter */ 2478 sk_setmulti(sc_if); 2479 2480 /* enable interrupt mask for counter overflows */ 2481 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0); 2482 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0); 2483 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0); 2484 2485 /* Configure RX MAC FIFO */ 2486 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR); 2487 SK_IF_WRITE_4(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON); 2488 2489 /* Configure TX MAC FIFO */ 2490 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR); 2491 SK_IF_WRITE_4(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON); 2492 } 2493 2494 /* 2495 * Note that to properly initialize any part of the GEnesis chip, 2496 * you first have to take it out of reset mode. 2497 */ 2498 static void 2499 sk_init(xsc) 2500 void *xsc; 2501 { 2502 struct sk_if_softc *sc_if = xsc; 2503 struct sk_softc *sc; 2504 struct ifnet *ifp; 2505 struct mii_data *mii; 2506 u_int16_t reg; 2507 2508 SK_IF_LOCK(sc_if); 2509 2510 ifp = &sc_if->arpcom.ac_if; 2511 sc = sc_if->sk_softc; 2512 mii = device_get_softc(sc_if->sk_miibus); 2513 2514 /* Cancel pending I/O and free all RX/TX buffers. */ 2515 sk_stop(sc_if); 2516 2517 if (sc->sk_type == SK_GENESIS) { 2518 /* Configure LINK_SYNC LED */ 2519 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON); 2520 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, 2521 SK_LINKLED_LINKSYNC_ON); 2522 2523 /* Configure RX LED */ 2524 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, 2525 SK_RXLEDCTL_COUNTER_START); 2526 2527 /* Configure TX LED */ 2528 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, 2529 SK_TXLEDCTL_COUNTER_START); 2530 } 2531 2532 /* Configure I2C registers */ 2533 2534 /* Configure XMAC(s) */ 2535 switch (sc->sk_type) { 2536 case SK_GENESIS: 2537 sk_init_xmac(sc_if); 2538 break; 2539 case SK_YUKON: 2540 sk_init_yukon(sc_if); 2541 break; 2542 } 2543 mii_mediachg(mii); 2544 2545 if (sc->sk_type == SK_GENESIS) { 2546 /* Configure MAC FIFOs */ 2547 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET); 2548 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END); 2549 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON); 2550 2551 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET); 2552 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END); 2553 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON); 2554 } 2555 2556 /* Configure transmit arbiter(s) */ 2557 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, 2558 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON); 2559 2560 /* Configure RAMbuffers */ 2561 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET); 2562 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart); 2563 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart); 2564 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart); 2565 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend); 2566 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON); 2567 2568 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET); 2569 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON); 2570 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart); 2571 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart); 2572 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart); 2573 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend); 2574 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON); 2575 2576 /* Configure BMUs */ 2577 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE); 2578 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO, 2579 vtophys(&sc_if->sk_rdata->sk_rx_ring[0])); 2580 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0); 2581 2582 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE); 2583 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO, 2584 vtophys(&sc_if->sk_rdata->sk_tx_ring[0])); 2585 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0); 2586 2587 /* Init descriptors */ 2588 if (sk_init_rx_ring(sc_if) == ENOBUFS) { 2589 printf("sk%d: initialization failed: no " 2590 "memory for rx buffers\n", sc_if->sk_unit); 2591 sk_stop(sc_if); 2592 SK_IF_UNLOCK(sc_if); 2593 return; 2594 } 2595 sk_init_tx_ring(sc_if); 2596 2597 /* Configure interrupt handling */ 2598 CSR_READ_4(sc, SK_ISSR); 2599 if (sc_if->sk_port == SK_PORT_A) 2600 sc->sk_intrmask |= SK_INTRS1; 2601 else 2602 sc->sk_intrmask |= SK_INTRS2; 2603 2604 sc->sk_intrmask |= SK_ISR_EXTERNAL_REG; 2605 2606 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2607 2608 /* Start BMUs. */ 2609 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START); 2610 2611 switch(sc->sk_type) { 2612 case SK_GENESIS: 2613 /* Enable XMACs TX and RX state machines */ 2614 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE); 2615 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 2616 break; 2617 case SK_YUKON: 2618 reg = SK_YU_READ_2(sc_if, YUKON_GPCR); 2619 reg |= YU_GPCR_TXEN | YU_GPCR_RXEN; 2620 reg &= ~(YU_GPCR_SPEED_EN | YU_GPCR_DPLX_EN); 2621 SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg); 2622 } 2623 2624 ifp->if_flags |= IFF_RUNNING; 2625 ifp->if_flags &= ~IFF_OACTIVE; 2626 2627 SK_IF_UNLOCK(sc_if); 2628 2629 return; 2630 } 2631 2632 static void 2633 sk_stop(sc_if) 2634 struct sk_if_softc *sc_if; 2635 { 2636 int i; 2637 struct sk_softc *sc; 2638 struct ifnet *ifp; 2639 2640 SK_IF_LOCK(sc_if); 2641 sc = sc_if->sk_softc; 2642 ifp = &sc_if->arpcom.ac_if; 2643 2644 untimeout(sk_tick, sc_if, sc_if->sk_tick_ch); 2645 2646 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 2647 u_int32_t val; 2648 2649 /* Put PHY back into reset. */ 2650 val = sk_win_read_4(sc, SK_GPIO); 2651 if (sc_if->sk_port == SK_PORT_A) { 2652 val |= SK_GPIO_DIR0; 2653 val &= ~SK_GPIO_DAT0; 2654 } else { 2655 val |= SK_GPIO_DIR2; 2656 val &= ~SK_GPIO_DAT2; 2657 } 2658 sk_win_write_4(sc, SK_GPIO, val); 2659 } 2660 2661 /* Turn off various components of this interface. */ 2662 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); 2663 switch (sc->sk_type) { 2664 case SK_GENESIS: 2665 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET); 2666 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET); 2667 break; 2668 case SK_YUKON: 2669 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET); 2670 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET); 2671 break; 2672 } 2673 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE); 2674 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 2675 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE); 2676 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 2677 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF); 2678 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 2679 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 2680 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF); 2681 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF); 2682 2683 /* Disable interrupts */ 2684 if (sc_if->sk_port == SK_PORT_A) 2685 sc->sk_intrmask &= ~SK_INTRS1; 2686 else 2687 sc->sk_intrmask &= ~SK_INTRS2; 2688 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2689 2690 SK_XM_READ_2(sc_if, XM_ISR); 2691 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); 2692 2693 /* Free RX and TX mbufs still in the queues. */ 2694 for (i = 0; i < SK_RX_RING_CNT; i++) { 2695 if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) { 2696 m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf); 2697 sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL; 2698 } 2699 } 2700 2701 for (i = 0; i < SK_TX_RING_CNT; i++) { 2702 if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) { 2703 m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf); 2704 sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL; 2705 } 2706 } 2707 2708 ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE); 2709 SK_IF_UNLOCK(sc_if); 2710 return; 2711 } 2712