1 /* $OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998, 1999, 2000 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 /* 35 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 36 * 37 * Permission to use, copy, modify, and distribute this software for any 38 * purpose with or without fee is hereby granted, provided that the above 39 * copyright notice and this permission notice appear in all copies. 40 * 41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 48 */ 49 50 #include <sys/cdefs.h> 51 __FBSDID("$FreeBSD$"); 52 53 /* 54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports 55 * the SK-984x series adapters, both single port and dual port. 56 * References: 57 * The XaQti XMAC II datasheet, 58 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 59 * The SysKonnect GEnesis manual, http://www.syskonnect.com 60 * 61 * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the 62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a 63 * convenience to others until Vitesse corrects this problem: 64 * 65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 66 * 67 * Written by Bill Paul <wpaul@ee.columbia.edu> 68 * Department of Electrical Engineering 69 * Columbia University, New York City 70 */ 71 /* 72 * The SysKonnect gigabit ethernet adapters consist of two main 73 * components: the SysKonnect GEnesis controller chip and the XaQti Corp. 74 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC 75 * components and a PHY while the GEnesis controller provides a PCI 76 * interface with DMA support. Each card may have between 512K and 77 * 2MB of SRAM on board depending on the configuration. 78 * 79 * The SysKonnect GEnesis controller can have either one or two XMAC 80 * chips connected to it, allowing single or dual port NIC configurations. 81 * SysKonnect has the distinction of being the only vendor on the market 82 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs, 83 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the 84 * XMAC registers. This driver takes advantage of these features to allow 85 * both XMACs to operate as independent interfaces. 86 */ 87 88 #include <sys/param.h> 89 #include <sys/systm.h> 90 #include <sys/sockio.h> 91 #include <sys/mbuf.h> 92 #include <sys/malloc.h> 93 #include <sys/kernel.h> 94 #include <sys/socket.h> 95 #include <sys/queue.h> 96 97 #include <net/if.h> 98 #include <net/if_arp.h> 99 #include <net/ethernet.h> 100 #include <net/if_dl.h> 101 #include <net/if_media.h> 102 103 #include <net/bpf.h> 104 105 #include <vm/vm.h> /* for vtophys */ 106 #include <vm/pmap.h> /* for vtophys */ 107 #include <machine/bus_pio.h> 108 #include <machine/bus_memio.h> 109 #include <machine/bus.h> 110 #include <machine/resource.h> 111 #include <sys/bus.h> 112 #include <sys/rman.h> 113 114 #include <dev/mii/mii.h> 115 #include <dev/mii/miivar.h> 116 #include <dev/mii/brgphyreg.h> 117 118 #include <dev/pci/pcireg.h> 119 #include <dev/pci/pcivar.h> 120 121 #if 0 122 #define SK_USEIOSPACE 123 #endif 124 125 #include <pci/if_skreg.h> 126 #include <pci/xmaciireg.h> 127 #include <pci/yukonreg.h> 128 129 MODULE_DEPEND(sk, pci, 1, 1, 1); 130 MODULE_DEPEND(sk, ether, 1, 1, 1); 131 MODULE_DEPEND(sk, miibus, 1, 1, 1); 132 133 /* "controller miibus0" required. See GENERIC if you get errors here. */ 134 #include "miibus_if.h" 135 136 #ifndef lint 137 static const char rcsid[] = 138 "$FreeBSD$"; 139 #endif 140 141 static struct sk_type sk_devs[] = { 142 { 143 VENDORID_SK, 144 DEVICEID_SK_V1, 145 "SysKonnect Gigabit Ethernet (V1.0)" 146 }, 147 { 148 VENDORID_SK, 149 DEVICEID_SK_V2, 150 "SysKonnect Gigabit Ethernet (V2.0)" 151 }, 152 { 153 VENDORID_MARVELL, 154 DEVICEID_SK_V2, 155 "Marvell Gigabit Ethernet" 156 }, 157 { 158 VENDORID_3COM, 159 DEVICEID_3COM_3C940, 160 "3Com 3C940 Gigabit Ethernet" 161 }, 162 { 163 VENDORID_LINKSYS, 164 DEVICEID_LINKSYS_EG1032, 165 "Linksys EG1032 Gigabit Ethernet" 166 }, 167 { 168 VENDORID_DLINK, 169 DEVICEID_DLINK_DGE530T, 170 "D-Link DGE-530T Gigabit Ethernet" 171 }, 172 { 0, 0, NULL } 173 }; 174 175 static int skc_probe (device_t); 176 static int skc_attach (device_t); 177 static int skc_detach (device_t); 178 static void skc_shutdown (device_t); 179 static int sk_detach (device_t); 180 static int sk_probe (device_t); 181 static int sk_attach (device_t); 182 static void sk_tick (void *); 183 static void sk_intr (void *); 184 static void sk_intr_xmac (struct sk_if_softc *); 185 static void sk_intr_bcom (struct sk_if_softc *); 186 static void sk_intr_yukon (struct sk_if_softc *); 187 static void sk_rxeof (struct sk_if_softc *); 188 static void sk_txeof (struct sk_if_softc *); 189 static int sk_encap (struct sk_if_softc *, struct mbuf *, 190 u_int32_t *); 191 static void sk_start (struct ifnet *); 192 static int sk_ioctl (struct ifnet *, u_long, caddr_t); 193 static void sk_init (void *); 194 static void sk_init_xmac (struct sk_if_softc *); 195 static void sk_init_yukon (struct sk_if_softc *); 196 static void sk_stop (struct sk_if_softc *); 197 static void sk_watchdog (struct ifnet *); 198 static int sk_ifmedia_upd (struct ifnet *); 199 static void sk_ifmedia_sts (struct ifnet *, struct ifmediareq *); 200 static void sk_reset (struct sk_softc *); 201 static int sk_newbuf (struct sk_if_softc *, 202 struct sk_chain *, struct mbuf *); 203 static int sk_alloc_jumbo_mem (struct sk_if_softc *); 204 static void *sk_jalloc (struct sk_if_softc *); 205 static void sk_jfree (void *, void *); 206 static int sk_init_rx_ring (struct sk_if_softc *); 207 static void sk_init_tx_ring (struct sk_if_softc *); 208 static u_int32_t sk_win_read_4 (struct sk_softc *, int); 209 static u_int16_t sk_win_read_2 (struct sk_softc *, int); 210 static u_int8_t sk_win_read_1 (struct sk_softc *, int); 211 static void sk_win_write_4 (struct sk_softc *, int, u_int32_t); 212 static void sk_win_write_2 (struct sk_softc *, int, u_int32_t); 213 static void sk_win_write_1 (struct sk_softc *, int, u_int32_t); 214 static u_int8_t sk_vpd_readbyte (struct sk_softc *, int); 215 static void sk_vpd_read_res (struct sk_softc *, struct vpd_res *, int); 216 static void sk_vpd_read (struct sk_softc *); 217 218 static int sk_miibus_readreg (device_t, int, int); 219 static int sk_miibus_writereg (device_t, int, int, int); 220 static void sk_miibus_statchg (device_t); 221 222 static int sk_xmac_miibus_readreg (struct sk_if_softc *, int, int); 223 static int sk_xmac_miibus_writereg (struct sk_if_softc *, int, int, 224 int); 225 static void sk_xmac_miibus_statchg (struct sk_if_softc *); 226 227 static int sk_marv_miibus_readreg (struct sk_if_softc *, int, int); 228 static int sk_marv_miibus_writereg (struct sk_if_softc *, int, int, 229 int); 230 static void sk_marv_miibus_statchg (struct sk_if_softc *); 231 232 static uint32_t sk_xmchash (const uint8_t *); 233 static uint32_t sk_gmchash (const uint8_t *); 234 static void sk_setfilt (struct sk_if_softc *, caddr_t, int); 235 static void sk_setmulti (struct sk_if_softc *); 236 static void sk_setpromisc (struct sk_if_softc *); 237 238 #ifdef SK_USEIOSPACE 239 #define SK_RES SYS_RES_IOPORT 240 #define SK_RID SK_PCI_LOIO 241 #else 242 #define SK_RES SYS_RES_MEMORY 243 #define SK_RID SK_PCI_LOMEM 244 #endif 245 246 /* 247 * Note that we have newbus methods for both the GEnesis controller 248 * itself and the XMAC(s). The XMACs are children of the GEnesis, and 249 * the miibus code is a child of the XMACs. We need to do it this way 250 * so that the miibus drivers can access the PHY registers on the 251 * right PHY. It's not quite what I had in mind, but it's the only 252 * design that achieves the desired effect. 253 */ 254 static device_method_t skc_methods[] = { 255 /* Device interface */ 256 DEVMETHOD(device_probe, skc_probe), 257 DEVMETHOD(device_attach, skc_attach), 258 DEVMETHOD(device_detach, skc_detach), 259 DEVMETHOD(device_shutdown, skc_shutdown), 260 261 /* bus interface */ 262 DEVMETHOD(bus_print_child, bus_generic_print_child), 263 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 264 265 { 0, 0 } 266 }; 267 268 static driver_t skc_driver = { 269 "skc", 270 skc_methods, 271 sizeof(struct sk_softc) 272 }; 273 274 static devclass_t skc_devclass; 275 276 static device_method_t sk_methods[] = { 277 /* Device interface */ 278 DEVMETHOD(device_probe, sk_probe), 279 DEVMETHOD(device_attach, sk_attach), 280 DEVMETHOD(device_detach, sk_detach), 281 DEVMETHOD(device_shutdown, bus_generic_shutdown), 282 283 /* bus interface */ 284 DEVMETHOD(bus_print_child, bus_generic_print_child), 285 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 286 287 /* MII interface */ 288 DEVMETHOD(miibus_readreg, sk_miibus_readreg), 289 DEVMETHOD(miibus_writereg, sk_miibus_writereg), 290 DEVMETHOD(miibus_statchg, sk_miibus_statchg), 291 292 { 0, 0 } 293 }; 294 295 static driver_t sk_driver = { 296 "sk", 297 sk_methods, 298 sizeof(struct sk_if_softc) 299 }; 300 301 static devclass_t sk_devclass; 302 303 DRIVER_MODULE(sk, pci, skc_driver, skc_devclass, 0, 0); 304 DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0); 305 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0); 306 307 #define SK_SETBIT(sc, reg, x) \ 308 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x) 309 310 #define SK_CLRBIT(sc, reg, x) \ 311 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x) 312 313 #define SK_WIN_SETBIT_4(sc, reg, x) \ 314 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x) 315 316 #define SK_WIN_CLRBIT_4(sc, reg, x) \ 317 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x) 318 319 #define SK_WIN_SETBIT_2(sc, reg, x) \ 320 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x) 321 322 #define SK_WIN_CLRBIT_2(sc, reg, x) \ 323 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x) 324 325 static u_int32_t 326 sk_win_read_4(sc, reg) 327 struct sk_softc *sc; 328 int reg; 329 { 330 #ifdef SK_USEIOSPACE 331 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 332 return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg))); 333 #else 334 return(CSR_READ_4(sc, reg)); 335 #endif 336 } 337 338 static u_int16_t 339 sk_win_read_2(sc, reg) 340 struct sk_softc *sc; 341 int reg; 342 { 343 #ifdef SK_USEIOSPACE 344 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 345 return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg))); 346 #else 347 return(CSR_READ_2(sc, reg)); 348 #endif 349 } 350 351 static u_int8_t 352 sk_win_read_1(sc, reg) 353 struct sk_softc *sc; 354 int reg; 355 { 356 #ifdef SK_USEIOSPACE 357 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 358 return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg))); 359 #else 360 return(CSR_READ_1(sc, reg)); 361 #endif 362 } 363 364 static void 365 sk_win_write_4(sc, reg, val) 366 struct sk_softc *sc; 367 int reg; 368 u_int32_t val; 369 { 370 #ifdef SK_USEIOSPACE 371 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 372 CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val); 373 #else 374 CSR_WRITE_4(sc, reg, val); 375 #endif 376 return; 377 } 378 379 static void 380 sk_win_write_2(sc, reg, val) 381 struct sk_softc *sc; 382 int reg; 383 u_int32_t val; 384 { 385 #ifdef SK_USEIOSPACE 386 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 387 CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val); 388 #else 389 CSR_WRITE_2(sc, reg, val); 390 #endif 391 return; 392 } 393 394 static void 395 sk_win_write_1(sc, reg, val) 396 struct sk_softc *sc; 397 int reg; 398 u_int32_t val; 399 { 400 #ifdef SK_USEIOSPACE 401 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 402 CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val); 403 #else 404 CSR_WRITE_1(sc, reg, val); 405 #endif 406 return; 407 } 408 409 /* 410 * The VPD EEPROM contains Vital Product Data, as suggested in 411 * the PCI 2.1 specification. The VPD data is separared into areas 412 * denoted by resource IDs. The SysKonnect VPD contains an ID string 413 * resource (the name of the adapter), a read-only area resource 414 * containing various key/data fields and a read/write area which 415 * can be used to store asset management information or log messages. 416 * We read the ID string and read-only into buffers attached to 417 * the controller softc structure for later use. At the moment, 418 * we only use the ID string during skc_attach(). 419 */ 420 static u_int8_t 421 sk_vpd_readbyte(sc, addr) 422 struct sk_softc *sc; 423 int addr; 424 { 425 int i; 426 427 sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr); 428 for (i = 0; i < SK_TIMEOUT; i++) { 429 DELAY(1); 430 if (sk_win_read_2(sc, 431 SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG) 432 break; 433 } 434 435 if (i == SK_TIMEOUT) 436 return(0); 437 438 return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA))); 439 } 440 441 static void 442 sk_vpd_read_res(sc, res, addr) 443 struct sk_softc *sc; 444 struct vpd_res *res; 445 int addr; 446 { 447 int i; 448 u_int8_t *ptr; 449 450 ptr = (u_int8_t *)res; 451 for (i = 0; i < sizeof(struct vpd_res); i++) 452 ptr[i] = sk_vpd_readbyte(sc, i + addr); 453 454 return; 455 } 456 457 static void 458 sk_vpd_read(sc) 459 struct sk_softc *sc; 460 { 461 int pos = 0, i; 462 struct vpd_res res; 463 464 if (sc->sk_vpd_prodname != NULL) 465 free(sc->sk_vpd_prodname, M_DEVBUF); 466 if (sc->sk_vpd_readonly != NULL) 467 free(sc->sk_vpd_readonly, M_DEVBUF); 468 sc->sk_vpd_prodname = NULL; 469 sc->sk_vpd_readonly = NULL; 470 471 sk_vpd_read_res(sc, &res, pos); 472 473 /* 474 * Bail out quietly if the eeprom appears to be missing or empty. 475 */ 476 if (res.vr_id == 0xff && res.vr_len == 0xff && res.vr_pad == 0xff) 477 return; 478 479 if (res.vr_id != VPD_RES_ID) { 480 printf("skc%d: bad VPD resource id: expected %x got %x\n", 481 sc->sk_unit, VPD_RES_ID, res.vr_id); 482 return; 483 } 484 485 pos += sizeof(res); 486 sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 487 for (i = 0; i < res.vr_len; i++) 488 sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos); 489 sc->sk_vpd_prodname[i] = '\0'; 490 pos += i; 491 492 sk_vpd_read_res(sc, &res, pos); 493 494 if (res.vr_id != VPD_RES_READ) { 495 printf("skc%d: bad VPD resource id: expected %x got %x\n", 496 sc->sk_unit, VPD_RES_READ, res.vr_id); 497 return; 498 } 499 500 pos += sizeof(res); 501 sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 502 for (i = 0; i < res.vr_len + 1; i++) 503 sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos); 504 505 return; 506 } 507 508 static int 509 sk_miibus_readreg(dev, phy, reg) 510 device_t dev; 511 int phy, reg; 512 { 513 struct sk_if_softc *sc_if; 514 515 sc_if = device_get_softc(dev); 516 517 switch(sc_if->sk_softc->sk_type) { 518 case SK_GENESIS: 519 return(sk_xmac_miibus_readreg(sc_if, phy, reg)); 520 case SK_YUKON: 521 return(sk_marv_miibus_readreg(sc_if, phy, reg)); 522 } 523 524 return(0); 525 } 526 527 static int 528 sk_miibus_writereg(dev, phy, reg, val) 529 device_t dev; 530 int phy, reg, val; 531 { 532 struct sk_if_softc *sc_if; 533 534 sc_if = device_get_softc(dev); 535 536 switch(sc_if->sk_softc->sk_type) { 537 case SK_GENESIS: 538 return(sk_xmac_miibus_writereg(sc_if, phy, reg, val)); 539 case SK_YUKON: 540 return(sk_marv_miibus_writereg(sc_if, phy, reg, val)); 541 } 542 543 return(0); 544 } 545 546 static void 547 sk_miibus_statchg(dev) 548 device_t dev; 549 { 550 struct sk_if_softc *sc_if; 551 552 sc_if = device_get_softc(dev); 553 554 switch(sc_if->sk_softc->sk_type) { 555 case SK_GENESIS: 556 sk_xmac_miibus_statchg(sc_if); 557 break; 558 case SK_YUKON: 559 sk_marv_miibus_statchg(sc_if); 560 break; 561 } 562 563 return; 564 } 565 566 static int 567 sk_xmac_miibus_readreg(sc_if, phy, reg) 568 struct sk_if_softc *sc_if; 569 int phy, reg; 570 { 571 int i; 572 573 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0) 574 return(0); 575 576 SK_IF_LOCK(sc_if); 577 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); 578 SK_XM_READ_2(sc_if, XM_PHY_DATA); 579 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { 580 for (i = 0; i < SK_TIMEOUT; i++) { 581 DELAY(1); 582 if (SK_XM_READ_2(sc_if, XM_MMUCMD) & 583 XM_MMUCMD_PHYDATARDY) 584 break; 585 } 586 587 if (i == SK_TIMEOUT) { 588 printf("sk%d: phy failed to come ready\n", 589 sc_if->sk_unit); 590 SK_IF_UNLOCK(sc_if); 591 return(0); 592 } 593 } 594 DELAY(1); 595 i = SK_XM_READ_2(sc_if, XM_PHY_DATA); 596 SK_IF_UNLOCK(sc_if); 597 return(i); 598 } 599 600 static int 601 sk_xmac_miibus_writereg(sc_if, phy, reg, val) 602 struct sk_if_softc *sc_if; 603 int phy, reg, val; 604 { 605 int i; 606 607 SK_IF_LOCK(sc_if); 608 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); 609 for (i = 0; i < SK_TIMEOUT; i++) { 610 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) 611 break; 612 } 613 614 if (i == SK_TIMEOUT) { 615 printf("sk%d: phy failed to come ready\n", sc_if->sk_unit); 616 SK_IF_UNLOCK(sc_if); 617 return(ETIMEDOUT); 618 } 619 620 SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val); 621 for (i = 0; i < SK_TIMEOUT; i++) { 622 DELAY(1); 623 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) 624 break; 625 } 626 SK_IF_UNLOCK(sc_if); 627 if (i == SK_TIMEOUT) 628 printf("sk%d: phy write timed out\n", sc_if->sk_unit); 629 630 return(0); 631 } 632 633 static void 634 sk_xmac_miibus_statchg(sc_if) 635 struct sk_if_softc *sc_if; 636 { 637 struct mii_data *mii; 638 639 mii = device_get_softc(sc_if->sk_miibus); 640 641 SK_IF_LOCK(sc_if); 642 /* 643 * If this is a GMII PHY, manually set the XMAC's 644 * duplex mode accordingly. 645 */ 646 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { 647 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 648 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); 649 } else { 650 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); 651 } 652 } 653 SK_IF_UNLOCK(sc_if); 654 655 return; 656 } 657 658 static int 659 sk_marv_miibus_readreg(sc_if, phy, reg) 660 struct sk_if_softc *sc_if; 661 int phy, reg; 662 { 663 u_int16_t val; 664 int i; 665 666 if (phy != 0 || 667 (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER && 668 sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) { 669 return(0); 670 } 671 672 SK_IF_LOCK(sc_if); 673 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 674 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ); 675 676 for (i = 0; i < SK_TIMEOUT; i++) { 677 DELAY(1); 678 val = SK_YU_READ_2(sc_if, YUKON_SMICR); 679 if (val & YU_SMICR_READ_VALID) 680 break; 681 } 682 683 if (i == SK_TIMEOUT) { 684 printf("sk%d: phy failed to come ready\n", 685 sc_if->sk_unit); 686 SK_IF_UNLOCK(sc_if); 687 return(0); 688 } 689 690 val = SK_YU_READ_2(sc_if, YUKON_SMIDR); 691 SK_IF_UNLOCK(sc_if); 692 693 return(val); 694 } 695 696 static int 697 sk_marv_miibus_writereg(sc_if, phy, reg, val) 698 struct sk_if_softc *sc_if; 699 int phy, reg, val; 700 { 701 int i; 702 703 SK_IF_LOCK(sc_if); 704 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val); 705 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 706 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE); 707 708 for (i = 0; i < SK_TIMEOUT; i++) { 709 DELAY(1); 710 if (SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY) 711 break; 712 } 713 SK_IF_UNLOCK(sc_if); 714 715 return(0); 716 } 717 718 static void 719 sk_marv_miibus_statchg(sc_if) 720 struct sk_if_softc *sc_if; 721 { 722 return; 723 } 724 725 #define XMAC_POLY 0xEDB88320 726 #define GMAC_POLY 0x04C11DB7L 727 #define HASH_BITS 6 728 729 static u_int32_t 730 sk_xmchash(addr) 731 const uint8_t *addr; 732 { 733 uint32_t crc; 734 int idx, bit; 735 uint8_t data; 736 737 /* Compute CRC for the address value. */ 738 crc = 0xFFFFFFFF; /* initial value */ 739 740 for (idx = 0; idx < 6; idx++) { 741 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) 742 crc = (crc >> 1) ^ (((crc ^ data) & 1) ? XMAC_POLY : 0); 743 } 744 745 return (~crc & ((1 << HASH_BITS) - 1)); 746 } 747 748 static u_int32_t 749 sk_gmchash(addr) 750 const uint8_t *addr; 751 { 752 u_int32_t crc; 753 u_int idx, bit; 754 uint8_t tmpData, data; 755 756 /* Compute CRC for the address value. */ 757 crc = 0xFFFFFFFF; /* initial value */ 758 759 for (idx = 0; idx < 6; idx++) { 760 data = *addr++; 761 762 /* Change bit order in byte. */ 763 tmpData = data; 764 for (bit = 0; bit < 8; bit++) { 765 if (tmpData & 1) { 766 data |= 1 << (7 - bit); 767 } else { 768 data &= ~(1 << (7 - bit)); 769 } 770 tmpData >>= 1; 771 } 772 773 crc ^= (data << 24); 774 for (bit = 0; bit < 8; bit++) { 775 if (crc & 0x80000000) { 776 crc = (crc << 1) ^ GMAC_POLY; 777 } else { 778 crc <<= 1; 779 } 780 } 781 } 782 783 return (crc & ((1 << HASH_BITS) - 1)); 784 } 785 786 static void 787 sk_setfilt(sc_if, addr, slot) 788 struct sk_if_softc *sc_if; 789 caddr_t addr; 790 int slot; 791 { 792 int base; 793 794 base = XM_RXFILT_ENTRY(slot); 795 796 SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0])); 797 SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2])); 798 SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4])); 799 800 return; 801 } 802 803 static void 804 sk_setmulti(sc_if) 805 struct sk_if_softc *sc_if; 806 { 807 struct sk_softc *sc = sc_if->sk_softc; 808 struct ifnet *ifp = &sc_if->arpcom.ac_if; 809 u_int32_t hashes[2] = { 0, 0 }; 810 int h = 0, i; 811 struct ifmultiaddr *ifma; 812 u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 }; 813 814 815 /* First, zot all the existing filters. */ 816 switch(sc->sk_type) { 817 case SK_GENESIS: 818 for (i = 1; i < XM_RXFILT_MAX; i++) 819 sk_setfilt(sc_if, (caddr_t)&dummy, i); 820 821 SK_XM_WRITE_4(sc_if, XM_MAR0, 0); 822 SK_XM_WRITE_4(sc_if, XM_MAR2, 0); 823 break; 824 case SK_YUKON: 825 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0); 826 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0); 827 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0); 828 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0); 829 break; 830 } 831 832 /* Now program new ones. */ 833 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 834 hashes[0] = 0xFFFFFFFF; 835 hashes[1] = 0xFFFFFFFF; 836 } else { 837 i = 1; 838 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) { 839 if (ifma->ifma_addr->sa_family != AF_LINK) 840 continue; 841 /* 842 * Program the first XM_RXFILT_MAX multicast groups 843 * into the perfect filter. For all others, 844 * use the hash table. 845 */ 846 if (sc->sk_type == SK_GENESIS && i < XM_RXFILT_MAX) { 847 sk_setfilt(sc_if, 848 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i); 849 i++; 850 continue; 851 } 852 853 switch(sc->sk_type) { 854 case SK_GENESIS: 855 h = sk_xmchash( 856 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 857 break; 858 case SK_YUKON: 859 h = sk_gmchash( 860 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 861 break; 862 } 863 if (h < 32) 864 hashes[0] |= (1 << h); 865 else 866 hashes[1] |= (1 << (h - 32)); 867 } 868 } 869 870 switch(sc->sk_type) { 871 case SK_GENESIS: 872 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH| 873 XM_MODE_RX_USE_PERFECT); 874 SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]); 875 SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]); 876 break; 877 case SK_YUKON: 878 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff); 879 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff); 880 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff); 881 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff); 882 break; 883 } 884 885 return; 886 } 887 888 static void 889 sk_setpromisc(sc_if) 890 struct sk_if_softc *sc_if; 891 { 892 struct sk_softc *sc = sc_if->sk_softc; 893 struct ifnet *ifp = &sc_if->arpcom.ac_if; 894 895 switch(sc->sk_type) { 896 case SK_GENESIS: 897 if (ifp->if_flags & IFF_PROMISC) { 898 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); 899 } else { 900 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); 901 } 902 break; 903 case SK_YUKON: 904 if (ifp->if_flags & IFF_PROMISC) { 905 SK_YU_CLRBIT_2(sc_if, YUKON_RCR, 906 YU_RCR_UFLEN | YU_RCR_MUFLEN); 907 } else { 908 SK_YU_SETBIT_2(sc_if, YUKON_RCR, 909 YU_RCR_UFLEN | YU_RCR_MUFLEN); 910 } 911 break; 912 } 913 914 return; 915 } 916 917 static int 918 sk_init_rx_ring(sc_if) 919 struct sk_if_softc *sc_if; 920 { 921 struct sk_chain_data *cd = &sc_if->sk_cdata; 922 struct sk_ring_data *rd = sc_if->sk_rdata; 923 int i; 924 925 bzero((char *)rd->sk_rx_ring, 926 sizeof(struct sk_rx_desc) * SK_RX_RING_CNT); 927 928 for (i = 0; i < SK_RX_RING_CNT; i++) { 929 cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i]; 930 if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS) 931 return(ENOBUFS); 932 if (i == (SK_RX_RING_CNT - 1)) { 933 cd->sk_rx_chain[i].sk_next = 934 &cd->sk_rx_chain[0]; 935 rd->sk_rx_ring[i].sk_next = 936 vtophys(&rd->sk_rx_ring[0]); 937 } else { 938 cd->sk_rx_chain[i].sk_next = 939 &cd->sk_rx_chain[i + 1]; 940 rd->sk_rx_ring[i].sk_next = 941 vtophys(&rd->sk_rx_ring[i + 1]); 942 } 943 } 944 945 sc_if->sk_cdata.sk_rx_prod = 0; 946 sc_if->sk_cdata.sk_rx_cons = 0; 947 948 return(0); 949 } 950 951 static void 952 sk_init_tx_ring(sc_if) 953 struct sk_if_softc *sc_if; 954 { 955 struct sk_chain_data *cd = &sc_if->sk_cdata; 956 struct sk_ring_data *rd = sc_if->sk_rdata; 957 int i; 958 959 bzero((char *)sc_if->sk_rdata->sk_tx_ring, 960 sizeof(struct sk_tx_desc) * SK_TX_RING_CNT); 961 962 for (i = 0; i < SK_TX_RING_CNT; i++) { 963 cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i]; 964 if (i == (SK_TX_RING_CNT - 1)) { 965 cd->sk_tx_chain[i].sk_next = 966 &cd->sk_tx_chain[0]; 967 rd->sk_tx_ring[i].sk_next = 968 vtophys(&rd->sk_tx_ring[0]); 969 } else { 970 cd->sk_tx_chain[i].sk_next = 971 &cd->sk_tx_chain[i + 1]; 972 rd->sk_tx_ring[i].sk_next = 973 vtophys(&rd->sk_tx_ring[i + 1]); 974 } 975 } 976 977 sc_if->sk_cdata.sk_tx_prod = 0; 978 sc_if->sk_cdata.sk_tx_cons = 0; 979 sc_if->sk_cdata.sk_tx_cnt = 0; 980 981 return; 982 } 983 984 static int 985 sk_newbuf(sc_if, c, m) 986 struct sk_if_softc *sc_if; 987 struct sk_chain *c; 988 struct mbuf *m; 989 { 990 struct mbuf *m_new = NULL; 991 struct sk_rx_desc *r; 992 993 if (m == NULL) { 994 caddr_t *buf = NULL; 995 996 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 997 if (m_new == NULL) 998 return(ENOBUFS); 999 1000 /* Allocate the jumbo buffer */ 1001 buf = sk_jalloc(sc_if); 1002 if (buf == NULL) { 1003 m_freem(m_new); 1004 #ifdef SK_VERBOSE 1005 printf("sk%d: jumbo allocation failed " 1006 "-- packet dropped!\n", sc_if->sk_unit); 1007 #endif 1008 return(ENOBUFS); 1009 } 1010 1011 /* Attach the buffer to the mbuf */ 1012 MEXTADD(m_new, buf, SK_JLEN, sk_jfree, 1013 (struct sk_if_softc *)sc_if, 0, EXT_NET_DRV); 1014 m_new->m_data = (void *)buf; 1015 m_new->m_pkthdr.len = m_new->m_len = SK_JLEN; 1016 } else { 1017 /* 1018 * We're re-using a previously allocated mbuf; 1019 * be sure to re-init pointers and lengths to 1020 * default values. 1021 */ 1022 m_new = m; 1023 m_new->m_len = m_new->m_pkthdr.len = SK_JLEN; 1024 m_new->m_data = m_new->m_ext.ext_buf; 1025 } 1026 1027 /* 1028 * Adjust alignment so packet payload begins on a 1029 * longword boundary. Mandatory for Alpha, useful on 1030 * x86 too. 1031 */ 1032 m_adj(m_new, ETHER_ALIGN); 1033 1034 r = c->sk_desc; 1035 c->sk_mbuf = m_new; 1036 r->sk_data_lo = vtophys(mtod(m_new, caddr_t)); 1037 r->sk_ctl = m_new->m_len | SK_RXSTAT; 1038 1039 return(0); 1040 } 1041 1042 /* 1043 * Allocate jumbo buffer storage. The SysKonnect adapters support 1044 * "jumbograms" (9K frames), although SysKonnect doesn't currently 1045 * use them in their drivers. In order for us to use them, we need 1046 * large 9K receive buffers, however standard mbuf clusters are only 1047 * 2048 bytes in size. Consequently, we need to allocate and manage 1048 * our own jumbo buffer pool. Fortunately, this does not require an 1049 * excessive amount of additional code. 1050 */ 1051 static int 1052 sk_alloc_jumbo_mem(sc_if) 1053 struct sk_if_softc *sc_if; 1054 { 1055 caddr_t ptr; 1056 register int i; 1057 struct sk_jpool_entry *entry; 1058 1059 /* Grab a big chunk o' storage. */ 1060 sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF, 1061 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 1062 1063 if (sc_if->sk_cdata.sk_jumbo_buf == NULL) { 1064 printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit); 1065 return(ENOBUFS); 1066 } 1067 1068 SLIST_INIT(&sc_if->sk_jfree_listhead); 1069 SLIST_INIT(&sc_if->sk_jinuse_listhead); 1070 1071 /* 1072 * Now divide it up into 9K pieces and save the addresses 1073 * in an array. 1074 */ 1075 ptr = sc_if->sk_cdata.sk_jumbo_buf; 1076 for (i = 0; i < SK_JSLOTS; i++) { 1077 sc_if->sk_cdata.sk_jslots[i] = ptr; 1078 ptr += SK_JLEN; 1079 entry = malloc(sizeof(struct sk_jpool_entry), 1080 M_DEVBUF, M_NOWAIT); 1081 if (entry == NULL) { 1082 free(sc_if->sk_cdata.sk_jumbo_buf, M_DEVBUF); 1083 sc_if->sk_cdata.sk_jumbo_buf = NULL; 1084 printf("sk%d: no memory for jumbo " 1085 "buffer queue!\n", sc_if->sk_unit); 1086 return(ENOBUFS); 1087 } 1088 entry->slot = i; 1089 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, 1090 entry, jpool_entries); 1091 } 1092 1093 return(0); 1094 } 1095 1096 /* 1097 * Allocate a jumbo buffer. 1098 */ 1099 static void * 1100 sk_jalloc(sc_if) 1101 struct sk_if_softc *sc_if; 1102 { 1103 struct sk_jpool_entry *entry; 1104 1105 entry = SLIST_FIRST(&sc_if->sk_jfree_listhead); 1106 1107 if (entry == NULL) { 1108 #ifdef SK_VERBOSE 1109 printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit); 1110 #endif 1111 return(NULL); 1112 } 1113 1114 SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries); 1115 SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries); 1116 return(sc_if->sk_cdata.sk_jslots[entry->slot]); 1117 } 1118 1119 /* 1120 * Release a jumbo buffer. 1121 */ 1122 static void 1123 sk_jfree(buf, args) 1124 void *buf; 1125 void *args; 1126 { 1127 struct sk_if_softc *sc_if; 1128 int i; 1129 struct sk_jpool_entry *entry; 1130 1131 /* Extract the softc struct pointer. */ 1132 sc_if = (struct sk_if_softc *)args; 1133 1134 if (sc_if == NULL) 1135 panic("sk_jfree: didn't get softc pointer!"); 1136 1137 /* calculate the slot this buffer belongs to */ 1138 i = ((vm_offset_t)buf 1139 - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN; 1140 1141 if ((i < 0) || (i >= SK_JSLOTS)) 1142 panic("sk_jfree: asked to free buffer that we don't manage!"); 1143 1144 entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead); 1145 if (entry == NULL) 1146 panic("sk_jfree: buffer not in use!"); 1147 entry->slot = i; 1148 SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries); 1149 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries); 1150 1151 return; 1152 } 1153 1154 /* 1155 * Set media options. 1156 */ 1157 static int 1158 sk_ifmedia_upd(ifp) 1159 struct ifnet *ifp; 1160 { 1161 struct sk_if_softc *sc_if = ifp->if_softc; 1162 struct mii_data *mii; 1163 1164 mii = device_get_softc(sc_if->sk_miibus); 1165 sk_init(sc_if); 1166 mii_mediachg(mii); 1167 1168 return(0); 1169 } 1170 1171 /* 1172 * Report current media status. 1173 */ 1174 static void 1175 sk_ifmedia_sts(ifp, ifmr) 1176 struct ifnet *ifp; 1177 struct ifmediareq *ifmr; 1178 { 1179 struct sk_if_softc *sc_if; 1180 struct mii_data *mii; 1181 1182 sc_if = ifp->if_softc; 1183 mii = device_get_softc(sc_if->sk_miibus); 1184 1185 mii_pollstat(mii); 1186 ifmr->ifm_active = mii->mii_media_active; 1187 ifmr->ifm_status = mii->mii_media_status; 1188 1189 return; 1190 } 1191 1192 static int 1193 sk_ioctl(ifp, command, data) 1194 struct ifnet *ifp; 1195 u_long command; 1196 caddr_t data; 1197 { 1198 struct sk_if_softc *sc_if = ifp->if_softc; 1199 struct ifreq *ifr = (struct ifreq *) data; 1200 int error = 0; 1201 struct mii_data *mii; 1202 1203 SK_IF_LOCK(sc_if); 1204 1205 switch(command) { 1206 case SIOCSIFMTU: 1207 if (ifr->ifr_mtu > SK_JUMBO_MTU) 1208 error = EINVAL; 1209 else { 1210 ifp->if_mtu = ifr->ifr_mtu; 1211 sk_init(sc_if); 1212 } 1213 break; 1214 case SIOCSIFFLAGS: 1215 if (ifp->if_flags & IFF_UP) { 1216 if (ifp->if_flags & IFF_RUNNING) { 1217 if ((ifp->if_flags ^ sc_if->sk_if_flags) 1218 & IFF_PROMISC) { 1219 sk_setpromisc(sc_if); 1220 sk_setmulti(sc_if); 1221 } 1222 } else 1223 sk_init(sc_if); 1224 } else { 1225 if (ifp->if_flags & IFF_RUNNING) 1226 sk_stop(sc_if); 1227 } 1228 sc_if->sk_if_flags = ifp->if_flags; 1229 error = 0; 1230 break; 1231 case SIOCADDMULTI: 1232 case SIOCDELMULTI: 1233 sk_setmulti(sc_if); 1234 error = 0; 1235 break; 1236 case SIOCGIFMEDIA: 1237 case SIOCSIFMEDIA: 1238 mii = device_get_softc(sc_if->sk_miibus); 1239 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1240 break; 1241 default: 1242 error = ether_ioctl(ifp, command, data); 1243 break; 1244 } 1245 1246 SK_IF_UNLOCK(sc_if); 1247 1248 return(error); 1249 } 1250 1251 /* 1252 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device 1253 * IDs against our list and return a device name if we find a match. 1254 */ 1255 static int 1256 skc_probe(dev) 1257 device_t dev; 1258 { 1259 struct sk_softc *sc; 1260 struct sk_type *t = sk_devs; 1261 1262 sc = device_get_softc(dev); 1263 1264 while(t->sk_name != NULL) { 1265 if ((pci_get_vendor(dev) == t->sk_vid) && 1266 (pci_get_device(dev) == t->sk_did)) { 1267 device_set_desc(dev, t->sk_name); 1268 return(0); 1269 } 1270 t++; 1271 } 1272 1273 return(ENXIO); 1274 } 1275 1276 /* 1277 * Force the GEnesis into reset, then bring it out of reset. 1278 */ 1279 static void 1280 sk_reset(sc) 1281 struct sk_softc *sc; 1282 { 1283 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET); 1284 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET); 1285 if (sc->sk_type == SK_YUKON) 1286 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET); 1287 1288 DELAY(1000); 1289 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET); 1290 DELAY(2); 1291 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET); 1292 if (sc->sk_type == SK_YUKON) 1293 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR); 1294 1295 if (sc->sk_type == SK_GENESIS) { 1296 /* Configure packet arbiter */ 1297 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET); 1298 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT); 1299 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT); 1300 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT); 1301 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT); 1302 } 1303 1304 /* Enable RAM interface */ 1305 sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET); 1306 1307 /* 1308 * Configure interrupt moderation. The moderation timer 1309 * defers interrupts specified in the interrupt moderation 1310 * timer mask based on the timeout specified in the interrupt 1311 * moderation timer init register. Each bit in the timer 1312 * register represents 18.825ns, so to specify a timeout in 1313 * microseconds, we have to multiply by 54. 1314 */ 1315 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(200)); 1316 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF| 1317 SK_ISR_RX1_EOF|SK_ISR_RX2_EOF); 1318 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START); 1319 1320 return; 1321 } 1322 1323 static int 1324 sk_probe(dev) 1325 device_t dev; 1326 { 1327 struct sk_softc *sc; 1328 1329 sc = device_get_softc(device_get_parent(dev)); 1330 1331 /* 1332 * Not much to do here. We always know there will be 1333 * at least one XMAC present, and if there are two, 1334 * skc_attach() will create a second device instance 1335 * for us. 1336 */ 1337 switch (sc->sk_type) { 1338 case SK_GENESIS: 1339 device_set_desc(dev, "XaQti Corp. XMAC II"); 1340 break; 1341 case SK_YUKON: 1342 device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon"); 1343 break; 1344 } 1345 1346 return(0); 1347 } 1348 1349 /* 1350 * Each XMAC chip is attached as a separate logical IP interface. 1351 * Single port cards will have only one logical interface of course. 1352 */ 1353 static int 1354 sk_attach(dev) 1355 device_t dev; 1356 { 1357 struct sk_softc *sc; 1358 struct sk_if_softc *sc_if; 1359 struct ifnet *ifp; 1360 int i, port, error; 1361 1362 if (dev == NULL) 1363 return(EINVAL); 1364 1365 error = 0; 1366 sc_if = device_get_softc(dev); 1367 sc = device_get_softc(device_get_parent(dev)); 1368 SK_LOCK(sc); 1369 port = *(int *)device_get_ivars(dev); 1370 free(device_get_ivars(dev), M_DEVBUF); 1371 device_set_ivars(dev, NULL); 1372 1373 sc_if->sk_dev = dev; 1374 sc_if->sk_unit = device_get_unit(dev); 1375 sc_if->sk_port = port; 1376 sc_if->sk_softc = sc; 1377 sc->sk_if[port] = sc_if; 1378 if (port == SK_PORT_A) 1379 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0; 1380 if (port == SK_PORT_B) 1381 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1; 1382 1383 /* 1384 * Get station address for this interface. Note that 1385 * dual port cards actually come with three station 1386 * addresses: one for each port, plus an extra. The 1387 * extra one is used by the SysKonnect driver software 1388 * as a 'virtual' station address for when both ports 1389 * are operating in failover mode. Currently we don't 1390 * use this extra address. 1391 */ 1392 for (i = 0; i < ETHER_ADDR_LEN; i++) 1393 sc_if->arpcom.ac_enaddr[i] = 1394 sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i); 1395 1396 /* 1397 * Set up RAM buffer addresses. The NIC will have a certain 1398 * amount of SRAM on it, somewhere between 512K and 2MB. We 1399 * need to divide this up a) between the transmitter and 1400 * receiver and b) between the two XMACs, if this is a 1401 * dual port NIC. Our algotithm is to divide up the memory 1402 * evenly so that everyone gets a fair share. 1403 */ 1404 if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) { 1405 u_int32_t chunk, val; 1406 1407 chunk = sc->sk_ramsize / 2; 1408 val = sc->sk_rboff / sizeof(u_int64_t); 1409 sc_if->sk_rx_ramstart = val; 1410 val += (chunk / sizeof(u_int64_t)); 1411 sc_if->sk_rx_ramend = val - 1; 1412 sc_if->sk_tx_ramstart = val; 1413 val += (chunk / sizeof(u_int64_t)); 1414 sc_if->sk_tx_ramend = val - 1; 1415 } else { 1416 u_int32_t chunk, val; 1417 1418 chunk = sc->sk_ramsize / 4; 1419 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) / 1420 sizeof(u_int64_t); 1421 sc_if->sk_rx_ramstart = val; 1422 val += (chunk / sizeof(u_int64_t)); 1423 sc_if->sk_rx_ramend = val - 1; 1424 sc_if->sk_tx_ramstart = val; 1425 val += (chunk / sizeof(u_int64_t)); 1426 sc_if->sk_tx_ramend = val - 1; 1427 } 1428 1429 /* Read and save PHY type and set PHY address */ 1430 sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF; 1431 switch(sc_if->sk_phytype) { 1432 case SK_PHYTYPE_XMAC: 1433 sc_if->sk_phyaddr = SK_PHYADDR_XMAC; 1434 break; 1435 case SK_PHYTYPE_BCOM: 1436 sc_if->sk_phyaddr = SK_PHYADDR_BCOM; 1437 break; 1438 case SK_PHYTYPE_MARV_COPPER: 1439 sc_if->sk_phyaddr = SK_PHYADDR_MARV; 1440 break; 1441 default: 1442 printf("skc%d: unsupported PHY type: %d\n", 1443 sc->sk_unit, sc_if->sk_phytype); 1444 error = ENODEV; 1445 goto fail; 1446 } 1447 1448 /* Allocate the descriptor queues. */ 1449 sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF, 1450 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 1451 1452 if (sc_if->sk_rdata == NULL) { 1453 printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit); 1454 error = ENOMEM; 1455 goto fail; 1456 } 1457 1458 bzero(sc_if->sk_rdata, sizeof(struct sk_ring_data)); 1459 1460 /* Try to allocate memory for jumbo buffers. */ 1461 if (sk_alloc_jumbo_mem(sc_if)) { 1462 printf("sk%d: jumbo buffer allocation failed\n", 1463 sc_if->sk_unit); 1464 error = ENOMEM; 1465 goto fail; 1466 } 1467 1468 ifp = &sc_if->arpcom.ac_if; 1469 ifp->if_softc = sc_if; 1470 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1471 ifp->if_mtu = ETHERMTU; 1472 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1473 ifp->if_ioctl = sk_ioctl; 1474 ifp->if_start = sk_start; 1475 ifp->if_watchdog = sk_watchdog; 1476 ifp->if_init = sk_init; 1477 ifp->if_baudrate = 1000000000; 1478 ifp->if_snd.ifq_maxlen = SK_TX_RING_CNT - 1; 1479 1480 callout_handle_init(&sc_if->sk_tick_ch); 1481 1482 /* 1483 * Call MI attach routine. 1484 */ 1485 ether_ifattach(ifp, sc_if->arpcom.ac_enaddr); 1486 1487 /* 1488 * Do miibus setup. 1489 */ 1490 switch (sc->sk_type) { 1491 case SK_GENESIS: 1492 sk_init_xmac(sc_if); 1493 break; 1494 case SK_YUKON: 1495 sk_init_yukon(sc_if); 1496 break; 1497 } 1498 1499 if (mii_phy_probe(dev, &sc_if->sk_miibus, 1500 sk_ifmedia_upd, sk_ifmedia_sts)) { 1501 printf("skc%d: no PHY found!\n", sc_if->sk_unit); 1502 ether_ifdetach(ifp); 1503 error = ENXIO; 1504 goto fail; 1505 } 1506 1507 fail: 1508 SK_UNLOCK(sc); 1509 if (error) { 1510 /* Access should be ok even though lock has been dropped */ 1511 sc->sk_if[port] = NULL; 1512 sk_detach(dev); 1513 } 1514 1515 return(error); 1516 } 1517 1518 /* 1519 * Attach the interface. Allocate softc structures, do ifmedia 1520 * setup and ethernet/BPF attach. 1521 */ 1522 static int 1523 skc_attach(dev) 1524 device_t dev; 1525 { 1526 struct sk_softc *sc; 1527 int unit, error = 0, rid, *port; 1528 1529 sc = device_get_softc(dev); 1530 unit = device_get_unit(dev); 1531 1532 mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1533 MTX_DEF | MTX_RECURSE); 1534 #ifndef BURN_BRIDGES 1535 /* 1536 * Handle power management nonsense. 1537 */ 1538 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1539 u_int32_t iobase, membase, irq; 1540 1541 /* Save important PCI config data. */ 1542 iobase = pci_read_config(dev, SK_PCI_LOIO, 4); 1543 membase = pci_read_config(dev, SK_PCI_LOMEM, 4); 1544 irq = pci_read_config(dev, SK_PCI_INTLINE, 4); 1545 1546 /* Reset the power state. */ 1547 printf("skc%d: chip is in D%d power mode " 1548 "-- setting to D0\n", unit, 1549 pci_get_powerstate(dev)); 1550 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1551 1552 /* Restore PCI config data. */ 1553 pci_write_config(dev, SK_PCI_LOIO, iobase, 4); 1554 pci_write_config(dev, SK_PCI_LOMEM, membase, 4); 1555 pci_write_config(dev, SK_PCI_INTLINE, irq, 4); 1556 } 1557 #endif 1558 /* 1559 * Map control/status registers. 1560 */ 1561 pci_enable_busmaster(dev); 1562 1563 rid = SK_RID; 1564 sc->sk_res = bus_alloc_resource_any(dev, SK_RES, &rid, RF_ACTIVE); 1565 1566 if (sc->sk_res == NULL) { 1567 printf("sk%d: couldn't map ports/memory\n", unit); 1568 error = ENXIO; 1569 goto fail; 1570 } 1571 1572 sc->sk_btag = rman_get_bustag(sc->sk_res); 1573 sc->sk_bhandle = rman_get_bushandle(sc->sk_res); 1574 1575 /* Allocate interrupt */ 1576 rid = 0; 1577 sc->sk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1578 RF_SHAREABLE | RF_ACTIVE); 1579 1580 if (sc->sk_irq == NULL) { 1581 printf("skc%d: couldn't map interrupt\n", unit); 1582 error = ENXIO; 1583 goto fail; 1584 } 1585 1586 /* Set adapter type */ 1587 switch (pci_get_device(dev)) { 1588 case DEVICEID_SK_V1: 1589 sc->sk_type = SK_GENESIS; 1590 break; 1591 case DEVICEID_SK_V2: 1592 case DEVICEID_3COM_3C940: 1593 case DEVICEID_LINKSYS_EG1032: 1594 case DEVICEID_DLINK_DGE530T: 1595 sc->sk_type = SK_YUKON; 1596 break; 1597 default: 1598 printf("skc%d: unknown device!\n", unit); 1599 error = ENXIO; 1600 goto fail; 1601 } 1602 1603 /* Reset the adapter. */ 1604 sk_reset(sc); 1605 1606 sc->sk_unit = unit; 1607 1608 /* Read and save vital product data from EEPROM. */ 1609 sk_vpd_read(sc); 1610 1611 if (sc->sk_type == SK_GENESIS) { 1612 /* Read and save RAM size and RAMbuffer offset */ 1613 switch(sk_win_read_1(sc, SK_EPROM0)) { 1614 case SK_RAMSIZE_512K_64: 1615 sc->sk_ramsize = 0x80000; 1616 sc->sk_rboff = SK_RBOFF_0; 1617 break; 1618 case SK_RAMSIZE_1024K_64: 1619 sc->sk_ramsize = 0x100000; 1620 sc->sk_rboff = SK_RBOFF_80000; 1621 break; 1622 case SK_RAMSIZE_1024K_128: 1623 sc->sk_ramsize = 0x100000; 1624 sc->sk_rboff = SK_RBOFF_0; 1625 break; 1626 case SK_RAMSIZE_2048K_128: 1627 sc->sk_ramsize = 0x200000; 1628 sc->sk_rboff = SK_RBOFF_0; 1629 break; 1630 default: 1631 printf("skc%d: unknown ram size: %d\n", 1632 sc->sk_unit, sk_win_read_1(sc, SK_EPROM0)); 1633 error = ENXIO; 1634 goto fail; 1635 } 1636 } else { 1637 sc->sk_ramsize = 0x20000; 1638 sc->sk_rboff = SK_RBOFF_0; 1639 } 1640 1641 /* Read and save physical media type */ 1642 switch(sk_win_read_1(sc, SK_PMDTYPE)) { 1643 case SK_PMD_1000BASESX: 1644 sc->sk_pmd = IFM_1000_SX; 1645 break; 1646 case SK_PMD_1000BASELX: 1647 sc->sk_pmd = IFM_1000_LX; 1648 break; 1649 case SK_PMD_1000BASECX: 1650 sc->sk_pmd = IFM_1000_CX; 1651 break; 1652 case SK_PMD_1000BASETX: 1653 sc->sk_pmd = IFM_1000_T; 1654 break; 1655 default: 1656 printf("skc%d: unknown media type: 0x%x\n", 1657 sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE)); 1658 error = ENXIO; 1659 goto fail; 1660 } 1661 1662 /* Announce the product name. */ 1663 if (sc->sk_vpd_prodname != NULL) 1664 printf("skc%d: %s\n", sc->sk_unit, sc->sk_vpd_prodname); 1665 sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1); 1666 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); 1667 *port = SK_PORT_A; 1668 device_set_ivars(sc->sk_devs[SK_PORT_A], port); 1669 1670 if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) { 1671 sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1); 1672 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); 1673 *port = SK_PORT_B; 1674 device_set_ivars(sc->sk_devs[SK_PORT_B], port); 1675 } 1676 1677 /* Turn on the 'driver is loaded' LED. */ 1678 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON); 1679 1680 bus_generic_attach(dev); 1681 1682 /* Hook interrupt last to avoid having to lock softc */ 1683 error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET, 1684 sk_intr, sc, &sc->sk_intrhand); 1685 1686 if (error) { 1687 printf("skc%d: couldn't set up irq\n", unit); 1688 goto fail; 1689 } 1690 1691 fail: 1692 if (error) 1693 skc_detach(dev); 1694 1695 return(error); 1696 } 1697 1698 /* 1699 * Shutdown hardware and free up resources. This can be called any 1700 * time after the mutex has been initialized. It is called in both 1701 * the error case in attach and the normal detach case so it needs 1702 * to be careful about only freeing resources that have actually been 1703 * allocated. 1704 */ 1705 static int 1706 sk_detach(dev) 1707 device_t dev; 1708 { 1709 struct sk_if_softc *sc_if; 1710 struct ifnet *ifp; 1711 1712 sc_if = device_get_softc(dev); 1713 KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx), 1714 ("sk mutex not initialized in sk_detach")); 1715 SK_IF_LOCK(sc_if); 1716 1717 ifp = &sc_if->arpcom.ac_if; 1718 /* These should only be active if attach_xmac succeeded */ 1719 if (device_is_attached(dev)) { 1720 sk_stop(sc_if); 1721 ether_ifdetach(ifp); 1722 } 1723 if (sc_if->sk_miibus) 1724 device_delete_child(dev, sc_if->sk_miibus); 1725 bus_generic_detach(dev); 1726 if (sc_if->sk_cdata.sk_jumbo_buf) 1727 contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF); 1728 if (sc_if->sk_rdata) { 1729 contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data), 1730 M_DEVBUF); 1731 } 1732 SK_IF_UNLOCK(sc_if); 1733 1734 return(0); 1735 } 1736 1737 static int 1738 skc_detach(dev) 1739 device_t dev; 1740 { 1741 struct sk_softc *sc; 1742 1743 sc = device_get_softc(dev); 1744 KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized")); 1745 SK_LOCK(sc); 1746 1747 if (device_is_alive(dev)) { 1748 if (sc->sk_devs[SK_PORT_A] != NULL) 1749 device_delete_child(dev, sc->sk_devs[SK_PORT_A]); 1750 if (sc->sk_devs[SK_PORT_B] != NULL) 1751 device_delete_child(dev, sc->sk_devs[SK_PORT_B]); 1752 bus_generic_detach(dev); 1753 } 1754 1755 if (sc->sk_intrhand) 1756 bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand); 1757 if (sc->sk_irq) 1758 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq); 1759 if (sc->sk_res) 1760 bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res); 1761 1762 SK_UNLOCK(sc); 1763 mtx_destroy(&sc->sk_mtx); 1764 1765 return(0); 1766 } 1767 1768 static int 1769 sk_encap(sc_if, m_head, txidx) 1770 struct sk_if_softc *sc_if; 1771 struct mbuf *m_head; 1772 u_int32_t *txidx; 1773 { 1774 struct sk_tx_desc *f = NULL; 1775 struct mbuf *m; 1776 u_int32_t frag, cur, cnt = 0; 1777 1778 m = m_head; 1779 cur = frag = *txidx; 1780 1781 /* 1782 * Start packing the mbufs in this chain into 1783 * the fragment pointers. Stop when we run out 1784 * of fragments or hit the end of the mbuf chain. 1785 */ 1786 for (m = m_head; m != NULL; m = m->m_next) { 1787 if (m->m_len != 0) { 1788 if ((SK_TX_RING_CNT - 1789 (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2) 1790 return(ENOBUFS); 1791 f = &sc_if->sk_rdata->sk_tx_ring[frag]; 1792 f->sk_data_lo = vtophys(mtod(m, vm_offset_t)); 1793 f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT; 1794 if (cnt == 0) 1795 f->sk_ctl |= SK_TXCTL_FIRSTFRAG; 1796 else 1797 f->sk_ctl |= SK_TXCTL_OWN; 1798 cur = frag; 1799 SK_INC(frag, SK_TX_RING_CNT); 1800 cnt++; 1801 } 1802 } 1803 1804 if (m != NULL) 1805 return(ENOBUFS); 1806 1807 sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |= 1808 SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR; 1809 sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head; 1810 sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN; 1811 sc_if->sk_cdata.sk_tx_cnt += cnt; 1812 1813 *txidx = frag; 1814 1815 return(0); 1816 } 1817 1818 static void 1819 sk_start(ifp) 1820 struct ifnet *ifp; 1821 { 1822 struct sk_softc *sc; 1823 struct sk_if_softc *sc_if; 1824 struct mbuf *m_head = NULL; 1825 u_int32_t idx; 1826 1827 sc_if = ifp->if_softc; 1828 sc = sc_if->sk_softc; 1829 1830 SK_IF_LOCK(sc_if); 1831 1832 idx = sc_if->sk_cdata.sk_tx_prod; 1833 1834 while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) { 1835 IF_DEQUEUE(&ifp->if_snd, m_head); 1836 if (m_head == NULL) 1837 break; 1838 1839 /* 1840 * Pack the data into the transmit ring. If we 1841 * don't have room, set the OACTIVE flag and wait 1842 * for the NIC to drain the ring. 1843 */ 1844 if (sk_encap(sc_if, m_head, &idx)) { 1845 IF_PREPEND(&ifp->if_snd, m_head); 1846 ifp->if_flags |= IFF_OACTIVE; 1847 break; 1848 } 1849 1850 /* 1851 * If there's a BPF listener, bounce a copy of this frame 1852 * to him. 1853 */ 1854 BPF_MTAP(ifp, m_head); 1855 } 1856 1857 /* Transmit */ 1858 sc_if->sk_cdata.sk_tx_prod = idx; 1859 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); 1860 1861 /* Set a timeout in case the chip goes out to lunch. */ 1862 ifp->if_timer = 5; 1863 SK_IF_UNLOCK(sc_if); 1864 1865 return; 1866 } 1867 1868 1869 static void 1870 sk_watchdog(ifp) 1871 struct ifnet *ifp; 1872 { 1873 struct sk_if_softc *sc_if; 1874 1875 sc_if = ifp->if_softc; 1876 1877 printf("sk%d: watchdog timeout\n", sc_if->sk_unit); 1878 sk_init(sc_if); 1879 1880 return; 1881 } 1882 1883 static void 1884 skc_shutdown(dev) 1885 device_t dev; 1886 { 1887 struct sk_softc *sc; 1888 1889 sc = device_get_softc(dev); 1890 SK_LOCK(sc); 1891 1892 /* Turn off the 'driver is loaded' LED. */ 1893 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF); 1894 1895 /* 1896 * Reset the GEnesis controller. Doing this should also 1897 * assert the resets on the attached XMAC(s). 1898 */ 1899 sk_reset(sc); 1900 SK_UNLOCK(sc); 1901 1902 return; 1903 } 1904 1905 static void 1906 sk_rxeof(sc_if) 1907 struct sk_if_softc *sc_if; 1908 { 1909 struct sk_softc *sc; 1910 struct mbuf *m; 1911 struct ifnet *ifp; 1912 struct sk_chain *cur_rx; 1913 int total_len = 0; 1914 int i; 1915 u_int32_t rxstat; 1916 1917 sc = sc_if->sk_softc; 1918 ifp = &sc_if->arpcom.ac_if; 1919 i = sc_if->sk_cdata.sk_rx_prod; 1920 cur_rx = &sc_if->sk_cdata.sk_rx_chain[i]; 1921 1922 SK_LOCK_ASSERT(sc); 1923 1924 while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) { 1925 1926 cur_rx = &sc_if->sk_cdata.sk_rx_chain[i]; 1927 rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat; 1928 m = cur_rx->sk_mbuf; 1929 cur_rx->sk_mbuf = NULL; 1930 total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl); 1931 SK_INC(i, SK_RX_RING_CNT); 1932 1933 if (rxstat & XM_RXSTAT_ERRFRAME) { 1934 ifp->if_ierrors++; 1935 sk_newbuf(sc_if, cur_rx, m); 1936 continue; 1937 } 1938 1939 /* 1940 * Try to allocate a new jumbo buffer. If that 1941 * fails, copy the packet to mbufs and put the 1942 * jumbo buffer back in the ring so it can be 1943 * re-used. If allocating mbufs fails, then we 1944 * have to drop the packet. 1945 */ 1946 if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) { 1947 struct mbuf *m0; 1948 m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, 1949 ifp, NULL); 1950 sk_newbuf(sc_if, cur_rx, m); 1951 if (m0 == NULL) { 1952 printf("sk%d: no receive buffers " 1953 "available -- packet dropped!\n", 1954 sc_if->sk_unit); 1955 ifp->if_ierrors++; 1956 continue; 1957 } 1958 m = m0; 1959 } else { 1960 m->m_pkthdr.rcvif = ifp; 1961 m->m_pkthdr.len = m->m_len = total_len; 1962 } 1963 1964 ifp->if_ipackets++; 1965 SK_UNLOCK(sc); 1966 (*ifp->if_input)(ifp, m); 1967 SK_LOCK(sc); 1968 } 1969 1970 sc_if->sk_cdata.sk_rx_prod = i; 1971 1972 return; 1973 } 1974 1975 static void 1976 sk_txeof(sc_if) 1977 struct sk_if_softc *sc_if; 1978 { 1979 struct sk_tx_desc *cur_tx = NULL; 1980 struct ifnet *ifp; 1981 u_int32_t idx; 1982 1983 ifp = &sc_if->arpcom.ac_if; 1984 1985 /* 1986 * Go through our tx ring and free mbufs for those 1987 * frames that have been sent. 1988 */ 1989 idx = sc_if->sk_cdata.sk_tx_cons; 1990 while(idx != sc_if->sk_cdata.sk_tx_prod) { 1991 cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx]; 1992 if (cur_tx->sk_ctl & SK_TXCTL_OWN) 1993 break; 1994 if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG) 1995 ifp->if_opackets++; 1996 if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) { 1997 m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf); 1998 sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL; 1999 } 2000 sc_if->sk_cdata.sk_tx_cnt--; 2001 SK_INC(idx, SK_TX_RING_CNT); 2002 ifp->if_timer = 0; 2003 } 2004 2005 sc_if->sk_cdata.sk_tx_cons = idx; 2006 2007 if (cur_tx != NULL) 2008 ifp->if_flags &= ~IFF_OACTIVE; 2009 2010 return; 2011 } 2012 2013 static void 2014 sk_tick(xsc_if) 2015 void *xsc_if; 2016 { 2017 struct sk_if_softc *sc_if; 2018 struct mii_data *mii; 2019 struct ifnet *ifp; 2020 int i; 2021 2022 sc_if = xsc_if; 2023 SK_IF_LOCK(sc_if); 2024 ifp = &sc_if->arpcom.ac_if; 2025 mii = device_get_softc(sc_if->sk_miibus); 2026 2027 if (!(ifp->if_flags & IFF_UP)) { 2028 SK_IF_UNLOCK(sc_if); 2029 return; 2030 } 2031 2032 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 2033 sk_intr_bcom(sc_if); 2034 SK_IF_UNLOCK(sc_if); 2035 return; 2036 } 2037 2038 /* 2039 * According to SysKonnect, the correct way to verify that 2040 * the link has come back up is to poll bit 0 of the GPIO 2041 * register three times. This pin has the signal from the 2042 * link_sync pin connected to it; if we read the same link 2043 * state 3 times in a row, we know the link is up. 2044 */ 2045 for (i = 0; i < 3; i++) { 2046 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET) 2047 break; 2048 } 2049 2050 if (i != 3) { 2051 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); 2052 SK_IF_UNLOCK(sc_if); 2053 return; 2054 } 2055 2056 /* Turn the GP0 interrupt back on. */ 2057 SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); 2058 SK_XM_READ_2(sc_if, XM_ISR); 2059 mii_tick(mii); 2060 untimeout(sk_tick, sc_if, sc_if->sk_tick_ch); 2061 2062 SK_IF_UNLOCK(sc_if); 2063 return; 2064 } 2065 2066 static void 2067 sk_intr_bcom(sc_if) 2068 struct sk_if_softc *sc_if; 2069 { 2070 struct mii_data *mii; 2071 struct ifnet *ifp; 2072 int status; 2073 mii = device_get_softc(sc_if->sk_miibus); 2074 ifp = &sc_if->arpcom.ac_if; 2075 2076 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 2077 2078 /* 2079 * Read the PHY interrupt register to make sure 2080 * we clear any pending interrupts. 2081 */ 2082 status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR); 2083 2084 if (!(ifp->if_flags & IFF_RUNNING)) { 2085 sk_init_xmac(sc_if); 2086 return; 2087 } 2088 2089 if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) { 2090 int lstat; 2091 lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 2092 BRGPHY_MII_AUXSTS); 2093 2094 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) { 2095 mii_mediachg(mii); 2096 /* Turn off the link LED. */ 2097 SK_IF_WRITE_1(sc_if, 0, 2098 SK_LINKLED1_CTL, SK_LINKLED_OFF); 2099 sc_if->sk_link = 0; 2100 } else if (status & BRGPHY_ISR_LNK_CHG) { 2101 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 2102 BRGPHY_MII_IMR, 0xFF00); 2103 mii_tick(mii); 2104 sc_if->sk_link = 1; 2105 /* Turn on the link LED. */ 2106 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, 2107 SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF| 2108 SK_LINKLED_BLINK_OFF); 2109 } else { 2110 mii_tick(mii); 2111 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); 2112 } 2113 } 2114 2115 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 2116 2117 return; 2118 } 2119 2120 static void 2121 sk_intr_xmac(sc_if) 2122 struct sk_if_softc *sc_if; 2123 { 2124 struct sk_softc *sc; 2125 u_int16_t status; 2126 2127 sc = sc_if->sk_softc; 2128 status = SK_XM_READ_2(sc_if, XM_ISR); 2129 2130 /* 2131 * Link has gone down. Start MII tick timeout to 2132 * watch for link resync. 2133 */ 2134 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) { 2135 if (status & XM_ISR_GP0_SET) { 2136 SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); 2137 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); 2138 } 2139 2140 if (status & XM_ISR_AUTONEG_DONE) { 2141 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); 2142 } 2143 } 2144 2145 if (status & XM_IMR_TX_UNDERRUN) 2146 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO); 2147 2148 if (status & XM_IMR_RX_OVERRUN) 2149 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO); 2150 2151 status = SK_XM_READ_2(sc_if, XM_ISR); 2152 2153 return; 2154 } 2155 2156 static void 2157 sk_intr_yukon(sc_if) 2158 struct sk_if_softc *sc_if; 2159 { 2160 int status; 2161 2162 status = SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); 2163 2164 return; 2165 } 2166 2167 static void 2168 sk_intr(xsc) 2169 void *xsc; 2170 { 2171 struct sk_softc *sc = xsc; 2172 struct sk_if_softc *sc_if0 = NULL, *sc_if1 = NULL; 2173 struct ifnet *ifp0 = NULL, *ifp1 = NULL; 2174 u_int32_t status; 2175 2176 SK_LOCK(sc); 2177 2178 sc_if0 = sc->sk_if[SK_PORT_A]; 2179 sc_if1 = sc->sk_if[SK_PORT_B]; 2180 2181 if (sc_if0 != NULL) 2182 ifp0 = &sc_if0->arpcom.ac_if; 2183 if (sc_if1 != NULL) 2184 ifp1 = &sc_if1->arpcom.ac_if; 2185 2186 for (;;) { 2187 status = CSR_READ_4(sc, SK_ISSR); 2188 if (!(status & sc->sk_intrmask)) 2189 break; 2190 2191 /* Handle receive interrupts first. */ 2192 if (status & SK_ISR_RX1_EOF) { 2193 sk_rxeof(sc_if0); 2194 CSR_WRITE_4(sc, SK_BMU_RX_CSR0, 2195 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); 2196 } 2197 if (status & SK_ISR_RX2_EOF) { 2198 sk_rxeof(sc_if1); 2199 CSR_WRITE_4(sc, SK_BMU_RX_CSR1, 2200 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); 2201 } 2202 2203 /* Then transmit interrupts. */ 2204 if (status & SK_ISR_TX1_S_EOF) { 2205 sk_txeof(sc_if0); 2206 CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, 2207 SK_TXBMU_CLR_IRQ_EOF); 2208 } 2209 if (status & SK_ISR_TX2_S_EOF) { 2210 sk_txeof(sc_if1); 2211 CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, 2212 SK_TXBMU_CLR_IRQ_EOF); 2213 } 2214 2215 /* Then MAC interrupts. */ 2216 if (status & SK_ISR_MAC1 && ifp0->if_flags & IFF_RUNNING) { 2217 if (sc->sk_type == SK_GENESIS) 2218 sk_intr_xmac(sc_if0); 2219 else 2220 sk_intr_yukon(sc_if0); 2221 } 2222 2223 if (status & SK_ISR_MAC2 && ifp1->if_flags & IFF_RUNNING) { 2224 if (sc->sk_type == SK_GENESIS) 2225 sk_intr_xmac(sc_if1); 2226 else 2227 sk_intr_yukon(sc_if1); 2228 } 2229 2230 if (status & SK_ISR_EXTERNAL_REG) { 2231 if (ifp0 != NULL && 2232 sc_if0->sk_phytype == SK_PHYTYPE_BCOM) 2233 sk_intr_bcom(sc_if0); 2234 if (ifp1 != NULL && 2235 sc_if1->sk_phytype == SK_PHYTYPE_BCOM) 2236 sk_intr_bcom(sc_if1); 2237 } 2238 } 2239 2240 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2241 2242 if (ifp0 != NULL && ifp0->if_snd.ifq_head != NULL) 2243 sk_start(ifp0); 2244 if (ifp1 != NULL && ifp1->if_snd.ifq_head != NULL) 2245 sk_start(ifp1); 2246 2247 SK_UNLOCK(sc); 2248 2249 return; 2250 } 2251 2252 static void 2253 sk_init_xmac(sc_if) 2254 struct sk_if_softc *sc_if; 2255 { 2256 struct sk_softc *sc; 2257 struct ifnet *ifp; 2258 struct sk_bcom_hack bhack[] = { 2259 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 }, 2260 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 }, 2261 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, 2262 { 0, 0 } }; 2263 2264 sc = sc_if->sk_softc; 2265 ifp = &sc_if->arpcom.ac_if; 2266 2267 /* Unreset the XMAC. */ 2268 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET); 2269 DELAY(1000); 2270 2271 /* Reset the XMAC's internal state. */ 2272 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); 2273 2274 /* Save the XMAC II revision */ 2275 sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID)); 2276 2277 /* 2278 * Perform additional initialization for external PHYs, 2279 * namely for the 1000baseTX cards that use the XMAC's 2280 * GMII mode. 2281 */ 2282 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 2283 int i = 0; 2284 u_int32_t val; 2285 2286 /* Take PHY out of reset. */ 2287 val = sk_win_read_4(sc, SK_GPIO); 2288 if (sc_if->sk_port == SK_PORT_A) 2289 val |= SK_GPIO_DIR0|SK_GPIO_DAT0; 2290 else 2291 val |= SK_GPIO_DIR2|SK_GPIO_DAT2; 2292 sk_win_write_4(sc, SK_GPIO, val); 2293 2294 /* Enable GMII mode on the XMAC. */ 2295 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE); 2296 2297 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 2298 BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET); 2299 DELAY(10000); 2300 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 2301 BRGPHY_MII_IMR, 0xFFF0); 2302 2303 /* 2304 * Early versions of the BCM5400 apparently have 2305 * a bug that requires them to have their reserved 2306 * registers initialized to some magic values. I don't 2307 * know what the numbers do, I'm just the messenger. 2308 */ 2309 if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03) 2310 == 0x6041) { 2311 while(bhack[i].reg) { 2312 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 2313 bhack[i].reg, bhack[i].val); 2314 i++; 2315 } 2316 } 2317 } 2318 2319 /* Set station address */ 2320 SK_XM_WRITE_2(sc_if, XM_PAR0, 2321 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0])); 2322 SK_XM_WRITE_2(sc_if, XM_PAR1, 2323 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2])); 2324 SK_XM_WRITE_2(sc_if, XM_PAR2, 2325 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4])); 2326 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION); 2327 2328 if (ifp->if_flags & IFF_BROADCAST) { 2329 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); 2330 } else { 2331 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); 2332 } 2333 2334 /* We don't need the FCS appended to the packet. */ 2335 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS); 2336 2337 /* We want short frames padded to 60 bytes. */ 2338 SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD); 2339 2340 /* 2341 * Enable the reception of all error frames. This is is 2342 * a necessary evil due to the design of the XMAC. The 2343 * XMAC's receive FIFO is only 8K in size, however jumbo 2344 * frames can be up to 9000 bytes in length. When bad 2345 * frame filtering is enabled, the XMAC's RX FIFO operates 2346 * in 'store and forward' mode. For this to work, the 2347 * entire frame has to fit into the FIFO, but that means 2348 * that jumbo frames larger than 8192 bytes will be 2349 * truncated. Disabling all bad frame filtering causes 2350 * the RX FIFO to operate in streaming mode, in which 2351 * case the XMAC will start transfering frames out of the 2352 * RX FIFO as soon as the FIFO threshold is reached. 2353 */ 2354 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES| 2355 XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS| 2356 XM_MODE_RX_INRANGELEN); 2357 2358 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 2359 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); 2360 else 2361 SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); 2362 2363 /* 2364 * Bump up the transmit threshold. This helps hold off transmit 2365 * underruns when we're blasting traffic from both ports at once. 2366 */ 2367 SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH); 2368 2369 /* Set promiscuous mode */ 2370 sk_setpromisc(sc_if); 2371 2372 /* Set multicast filter */ 2373 sk_setmulti(sc_if); 2374 2375 /* Clear and enable interrupts */ 2376 SK_XM_READ_2(sc_if, XM_ISR); 2377 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) 2378 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS); 2379 else 2380 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); 2381 2382 /* Configure MAC arbiter */ 2383 switch(sc_if->sk_xmac_rev) { 2384 case XM_XMAC_REV_B2: 2385 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2); 2386 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2); 2387 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2); 2388 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2); 2389 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2); 2390 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2); 2391 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2); 2392 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2); 2393 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); 2394 break; 2395 case XM_XMAC_REV_C1: 2396 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1); 2397 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1); 2398 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1); 2399 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1); 2400 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1); 2401 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1); 2402 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1); 2403 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1); 2404 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); 2405 break; 2406 default: 2407 break; 2408 } 2409 sk_win_write_2(sc, SK_MACARB_CTL, 2410 SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF); 2411 2412 sc_if->sk_link = 1; 2413 2414 return; 2415 } 2416 2417 static void sk_init_yukon(sc_if) 2418 struct sk_if_softc *sc_if; 2419 { 2420 u_int32_t phy; 2421 u_int16_t reg; 2422 int i; 2423 2424 /* GMAC and GPHY Reset */ 2425 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET); 2426 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); 2427 DELAY(1000); 2428 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_CLEAR); 2429 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); 2430 DELAY(1000); 2431 2432 phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP | 2433 SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE; 2434 2435 switch(sc_if->sk_softc->sk_pmd) { 2436 case IFM_1000_SX: 2437 case IFM_1000_LX: 2438 phy |= SK_GPHY_FIBER; 2439 break; 2440 2441 case IFM_1000_CX: 2442 case IFM_1000_T: 2443 phy |= SK_GPHY_COPPER; 2444 break; 2445 } 2446 2447 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET); 2448 DELAY(1000); 2449 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR); 2450 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF | 2451 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR); 2452 2453 /* unused read of the interrupt source register */ 2454 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); 2455 2456 reg = SK_YU_READ_2(sc_if, YUKON_PAR); 2457 2458 /* MIB Counter Clear Mode set */ 2459 reg |= YU_PAR_MIB_CLR; 2460 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 2461 2462 /* MIB Counter Clear Mode clear */ 2463 reg &= ~YU_PAR_MIB_CLR; 2464 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 2465 2466 /* receive control reg */ 2467 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR); 2468 2469 /* transmit parameter register */ 2470 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) | 2471 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) ); 2472 2473 /* serial mode register */ 2474 SK_YU_WRITE_2(sc_if, YUKON_SMR, YU_SMR_DATA_BLIND(0x1c) | 2475 YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e)); 2476 2477 /* Setup Yukon's address */ 2478 for (i = 0; i < 3; i++) { 2479 /* Write Source Address 1 (unicast filter) */ 2480 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4, 2481 sc_if->arpcom.ac_enaddr[i * 2] | 2482 sc_if->arpcom.ac_enaddr[i * 2 + 1] << 8); 2483 } 2484 2485 for (i = 0; i < 3; i++) { 2486 reg = sk_win_read_2(sc_if->sk_softc, 2487 SK_MAC1_0 + i * 2 + sc_if->sk_port * 8); 2488 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg); 2489 } 2490 2491 /* Set promiscuous mode */ 2492 sk_setpromisc(sc_if); 2493 2494 /* Set multicast filter */ 2495 sk_setmulti(sc_if); 2496 2497 /* enable interrupt mask for counter overflows */ 2498 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0); 2499 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0); 2500 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0); 2501 2502 /* Configure RX MAC FIFO */ 2503 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR); 2504 SK_IF_WRITE_4(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON); 2505 2506 /* Configure TX MAC FIFO */ 2507 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR); 2508 SK_IF_WRITE_4(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON); 2509 } 2510 2511 /* 2512 * Note that to properly initialize any part of the GEnesis chip, 2513 * you first have to take it out of reset mode. 2514 */ 2515 static void 2516 sk_init(xsc) 2517 void *xsc; 2518 { 2519 struct sk_if_softc *sc_if = xsc; 2520 struct sk_softc *sc; 2521 struct ifnet *ifp; 2522 struct mii_data *mii; 2523 u_int16_t reg; 2524 2525 SK_IF_LOCK(sc_if); 2526 2527 ifp = &sc_if->arpcom.ac_if; 2528 sc = sc_if->sk_softc; 2529 mii = device_get_softc(sc_if->sk_miibus); 2530 2531 /* Cancel pending I/O and free all RX/TX buffers. */ 2532 sk_stop(sc_if); 2533 2534 if (sc->sk_type == SK_GENESIS) { 2535 /* Configure LINK_SYNC LED */ 2536 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON); 2537 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, 2538 SK_LINKLED_LINKSYNC_ON); 2539 2540 /* Configure RX LED */ 2541 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, 2542 SK_RXLEDCTL_COUNTER_START); 2543 2544 /* Configure TX LED */ 2545 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, 2546 SK_TXLEDCTL_COUNTER_START); 2547 } 2548 2549 /* Configure I2C registers */ 2550 2551 /* Configure XMAC(s) */ 2552 switch (sc->sk_type) { 2553 case SK_GENESIS: 2554 sk_init_xmac(sc_if); 2555 break; 2556 case SK_YUKON: 2557 sk_init_yukon(sc_if); 2558 break; 2559 } 2560 mii_mediachg(mii); 2561 2562 if (sc->sk_type == SK_GENESIS) { 2563 /* Configure MAC FIFOs */ 2564 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET); 2565 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END); 2566 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON); 2567 2568 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET); 2569 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END); 2570 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON); 2571 } 2572 2573 /* Configure transmit arbiter(s) */ 2574 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, 2575 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON); 2576 2577 /* Configure RAMbuffers */ 2578 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET); 2579 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart); 2580 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart); 2581 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart); 2582 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend); 2583 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON); 2584 2585 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET); 2586 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON); 2587 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart); 2588 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart); 2589 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart); 2590 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend); 2591 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON); 2592 2593 /* Configure BMUs */ 2594 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE); 2595 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO, 2596 vtophys(&sc_if->sk_rdata->sk_rx_ring[0])); 2597 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0); 2598 2599 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE); 2600 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO, 2601 vtophys(&sc_if->sk_rdata->sk_tx_ring[0])); 2602 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0); 2603 2604 /* Init descriptors */ 2605 if (sk_init_rx_ring(sc_if) == ENOBUFS) { 2606 printf("sk%d: initialization failed: no " 2607 "memory for rx buffers\n", sc_if->sk_unit); 2608 sk_stop(sc_if); 2609 SK_IF_UNLOCK(sc_if); 2610 return; 2611 } 2612 sk_init_tx_ring(sc_if); 2613 2614 /* Configure interrupt handling */ 2615 CSR_READ_4(sc, SK_ISSR); 2616 if (sc_if->sk_port == SK_PORT_A) 2617 sc->sk_intrmask |= SK_INTRS1; 2618 else 2619 sc->sk_intrmask |= SK_INTRS2; 2620 2621 sc->sk_intrmask |= SK_ISR_EXTERNAL_REG; 2622 2623 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2624 2625 /* Start BMUs. */ 2626 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START); 2627 2628 switch(sc->sk_type) { 2629 case SK_GENESIS: 2630 /* Enable XMACs TX and RX state machines */ 2631 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE); 2632 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 2633 break; 2634 case SK_YUKON: 2635 reg = SK_YU_READ_2(sc_if, YUKON_GPCR); 2636 reg |= YU_GPCR_TXEN | YU_GPCR_RXEN; 2637 reg &= ~(YU_GPCR_SPEED_EN | YU_GPCR_DPLX_EN); 2638 SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg); 2639 } 2640 2641 ifp->if_flags |= IFF_RUNNING; 2642 ifp->if_flags &= ~IFF_OACTIVE; 2643 2644 SK_IF_UNLOCK(sc_if); 2645 2646 return; 2647 } 2648 2649 static void 2650 sk_stop(sc_if) 2651 struct sk_if_softc *sc_if; 2652 { 2653 int i; 2654 struct sk_softc *sc; 2655 struct ifnet *ifp; 2656 2657 SK_IF_LOCK(sc_if); 2658 sc = sc_if->sk_softc; 2659 ifp = &sc_if->arpcom.ac_if; 2660 2661 untimeout(sk_tick, sc_if, sc_if->sk_tick_ch); 2662 2663 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 2664 u_int32_t val; 2665 2666 /* Put PHY back into reset. */ 2667 val = sk_win_read_4(sc, SK_GPIO); 2668 if (sc_if->sk_port == SK_PORT_A) { 2669 val |= SK_GPIO_DIR0; 2670 val &= ~SK_GPIO_DAT0; 2671 } else { 2672 val |= SK_GPIO_DIR2; 2673 val &= ~SK_GPIO_DAT2; 2674 } 2675 sk_win_write_4(sc, SK_GPIO, val); 2676 } 2677 2678 /* Turn off various components of this interface. */ 2679 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); 2680 switch (sc->sk_type) { 2681 case SK_GENESIS: 2682 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET); 2683 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET); 2684 break; 2685 case SK_YUKON: 2686 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET); 2687 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET); 2688 break; 2689 } 2690 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE); 2691 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 2692 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE); 2693 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 2694 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF); 2695 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 2696 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 2697 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF); 2698 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF); 2699 2700 /* Disable interrupts */ 2701 if (sc_if->sk_port == SK_PORT_A) 2702 sc->sk_intrmask &= ~SK_INTRS1; 2703 else 2704 sc->sk_intrmask &= ~SK_INTRS2; 2705 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2706 2707 SK_XM_READ_2(sc_if, XM_ISR); 2708 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); 2709 2710 /* Free RX and TX mbufs still in the queues. */ 2711 for (i = 0; i < SK_RX_RING_CNT; i++) { 2712 if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) { 2713 m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf); 2714 sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL; 2715 } 2716 } 2717 2718 for (i = 0; i < SK_TX_RING_CNT; i++) { 2719 if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) { 2720 m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf); 2721 sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL; 2722 } 2723 } 2724 2725 ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE); 2726 SK_IF_UNLOCK(sc_if); 2727 return; 2728 } 2729