1 /* $OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998, 1999, 2000 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 /* 35 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 36 * 37 * Permission to use, copy, modify, and distribute this software for any 38 * purpose with or without fee is hereby granted, provided that the above 39 * copyright notice and this permission notice appear in all copies. 40 * 41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 48 */ 49 50 #include <sys/cdefs.h> 51 __FBSDID("$FreeBSD$"); 52 53 /* 54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports 55 * the SK-984x series adapters, both single port and dual port. 56 * References: 57 * The XaQti XMAC II datasheet, 58 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 59 * The SysKonnect GEnesis manual, http://www.syskonnect.com 60 * 61 * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the 62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a 63 * convenience to others until Vitesse corrects this problem: 64 * 65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 66 * 67 * Written by Bill Paul <wpaul@ee.columbia.edu> 68 * Department of Electrical Engineering 69 * Columbia University, New York City 70 */ 71 /* 72 * The SysKonnect gigabit ethernet adapters consist of two main 73 * components: the SysKonnect GEnesis controller chip and the XaQti Corp. 74 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC 75 * components and a PHY while the GEnesis controller provides a PCI 76 * interface with DMA support. Each card may have between 512K and 77 * 2MB of SRAM on board depending on the configuration. 78 * 79 * The SysKonnect GEnesis controller can have either one or two XMAC 80 * chips connected to it, allowing single or dual port NIC configurations. 81 * SysKonnect has the distinction of being the only vendor on the market 82 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs, 83 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the 84 * XMAC registers. This driver takes advantage of these features to allow 85 * both XMACs to operate as independent interfaces. 86 */ 87 88 #include <sys/param.h> 89 #include <sys/systm.h> 90 #include <sys/sockio.h> 91 #include <sys/mbuf.h> 92 #include <sys/malloc.h> 93 #include <sys/kernel.h> 94 #include <sys/module.h> 95 #include <sys/socket.h> 96 #include <sys/queue.h> 97 98 #include <net/if.h> 99 #include <net/if_arp.h> 100 #include <net/ethernet.h> 101 #include <net/if_dl.h> 102 #include <net/if_media.h> 103 104 #include <net/bpf.h> 105 106 #include <vm/vm.h> /* for vtophys */ 107 #include <vm/pmap.h> /* for vtophys */ 108 #include <machine/bus_pio.h> 109 #include <machine/bus_memio.h> 110 #include <machine/bus.h> 111 #include <machine/resource.h> 112 #include <sys/bus.h> 113 #include <sys/rman.h> 114 115 #include <dev/mii/mii.h> 116 #include <dev/mii/miivar.h> 117 #include <dev/mii/brgphyreg.h> 118 119 #include <dev/pci/pcireg.h> 120 #include <dev/pci/pcivar.h> 121 122 #if 0 123 #define SK_USEIOSPACE 124 #endif 125 126 #include <pci/if_skreg.h> 127 #include <pci/xmaciireg.h> 128 #include <pci/yukonreg.h> 129 130 MODULE_DEPEND(sk, pci, 1, 1, 1); 131 MODULE_DEPEND(sk, ether, 1, 1, 1); 132 MODULE_DEPEND(sk, miibus, 1, 1, 1); 133 134 /* "controller miibus0" required. See GENERIC if you get errors here. */ 135 #include "miibus_if.h" 136 137 #ifndef lint 138 static const char rcsid[] = 139 "$FreeBSD$"; 140 #endif 141 142 static struct sk_type sk_devs[] = { 143 { 144 VENDORID_SK, 145 DEVICEID_SK_V1, 146 "SysKonnect Gigabit Ethernet (V1.0)" 147 }, 148 { 149 VENDORID_SK, 150 DEVICEID_SK_V2, 151 "SysKonnect Gigabit Ethernet (V2.0)" 152 }, 153 { 154 VENDORID_MARVELL, 155 DEVICEID_SK_V2, 156 "Marvell Gigabit Ethernet" 157 }, 158 { 159 VENDORID_3COM, 160 DEVICEID_3COM_3C940, 161 "3Com 3C940 Gigabit Ethernet" 162 }, 163 { 164 VENDORID_LINKSYS, 165 DEVICEID_LINKSYS_EG1032, 166 "Linksys EG1032 Gigabit Ethernet" 167 }, 168 { 169 VENDORID_DLINK, 170 DEVICEID_DLINK_DGE530T, 171 "D-Link DGE-530T Gigabit Ethernet" 172 }, 173 { 0, 0, NULL } 174 }; 175 176 static int skc_probe (device_t); 177 static int skc_attach (device_t); 178 static int skc_detach (device_t); 179 static void skc_shutdown (device_t); 180 static int sk_detach (device_t); 181 static int sk_probe (device_t); 182 static int sk_attach (device_t); 183 static void sk_tick (void *); 184 static void sk_intr (void *); 185 static void sk_intr_xmac (struct sk_if_softc *); 186 static void sk_intr_bcom (struct sk_if_softc *); 187 static void sk_intr_yukon (struct sk_if_softc *); 188 static void sk_rxeof (struct sk_if_softc *); 189 static void sk_txeof (struct sk_if_softc *); 190 static int sk_encap (struct sk_if_softc *, struct mbuf *, 191 u_int32_t *); 192 static void sk_start (struct ifnet *); 193 static int sk_ioctl (struct ifnet *, u_long, caddr_t); 194 static void sk_init (void *); 195 static void sk_init_xmac (struct sk_if_softc *); 196 static void sk_init_yukon (struct sk_if_softc *); 197 static void sk_stop (struct sk_if_softc *); 198 static void sk_watchdog (struct ifnet *); 199 static int sk_ifmedia_upd (struct ifnet *); 200 static void sk_ifmedia_sts (struct ifnet *, struct ifmediareq *); 201 static void sk_reset (struct sk_softc *); 202 static int sk_newbuf (struct sk_if_softc *, 203 struct sk_chain *, struct mbuf *); 204 static int sk_alloc_jumbo_mem (struct sk_if_softc *); 205 static void *sk_jalloc (struct sk_if_softc *); 206 static void sk_jfree (void *, void *); 207 static int sk_init_rx_ring (struct sk_if_softc *); 208 static void sk_init_tx_ring (struct sk_if_softc *); 209 static u_int32_t sk_win_read_4 (struct sk_softc *, int); 210 static u_int16_t sk_win_read_2 (struct sk_softc *, int); 211 static u_int8_t sk_win_read_1 (struct sk_softc *, int); 212 static void sk_win_write_4 (struct sk_softc *, int, u_int32_t); 213 static void sk_win_write_2 (struct sk_softc *, int, u_int32_t); 214 static void sk_win_write_1 (struct sk_softc *, int, u_int32_t); 215 static u_int8_t sk_vpd_readbyte (struct sk_softc *, int); 216 static void sk_vpd_read_res (struct sk_softc *, struct vpd_res *, int); 217 static void sk_vpd_read (struct sk_softc *); 218 219 static int sk_miibus_readreg (device_t, int, int); 220 static int sk_miibus_writereg (device_t, int, int, int); 221 static void sk_miibus_statchg (device_t); 222 223 static int sk_xmac_miibus_readreg (struct sk_if_softc *, int, int); 224 static int sk_xmac_miibus_writereg (struct sk_if_softc *, int, int, 225 int); 226 static void sk_xmac_miibus_statchg (struct sk_if_softc *); 227 228 static int sk_marv_miibus_readreg (struct sk_if_softc *, int, int); 229 static int sk_marv_miibus_writereg (struct sk_if_softc *, int, int, 230 int); 231 static void sk_marv_miibus_statchg (struct sk_if_softc *); 232 233 static uint32_t sk_xmchash (const uint8_t *); 234 static uint32_t sk_gmchash (const uint8_t *); 235 static void sk_setfilt (struct sk_if_softc *, caddr_t, int); 236 static void sk_setmulti (struct sk_if_softc *); 237 static void sk_setpromisc (struct sk_if_softc *); 238 239 #ifdef SK_USEIOSPACE 240 #define SK_RES SYS_RES_IOPORT 241 #define SK_RID SK_PCI_LOIO 242 #else 243 #define SK_RES SYS_RES_MEMORY 244 #define SK_RID SK_PCI_LOMEM 245 #endif 246 247 /* 248 * Note that we have newbus methods for both the GEnesis controller 249 * itself and the XMAC(s). The XMACs are children of the GEnesis, and 250 * the miibus code is a child of the XMACs. We need to do it this way 251 * so that the miibus drivers can access the PHY registers on the 252 * right PHY. It's not quite what I had in mind, but it's the only 253 * design that achieves the desired effect. 254 */ 255 static device_method_t skc_methods[] = { 256 /* Device interface */ 257 DEVMETHOD(device_probe, skc_probe), 258 DEVMETHOD(device_attach, skc_attach), 259 DEVMETHOD(device_detach, skc_detach), 260 DEVMETHOD(device_shutdown, skc_shutdown), 261 262 /* bus interface */ 263 DEVMETHOD(bus_print_child, bus_generic_print_child), 264 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 265 266 { 0, 0 } 267 }; 268 269 static driver_t skc_driver = { 270 "skc", 271 skc_methods, 272 sizeof(struct sk_softc) 273 }; 274 275 static devclass_t skc_devclass; 276 277 static device_method_t sk_methods[] = { 278 /* Device interface */ 279 DEVMETHOD(device_probe, sk_probe), 280 DEVMETHOD(device_attach, sk_attach), 281 DEVMETHOD(device_detach, sk_detach), 282 DEVMETHOD(device_shutdown, bus_generic_shutdown), 283 284 /* bus interface */ 285 DEVMETHOD(bus_print_child, bus_generic_print_child), 286 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 287 288 /* MII interface */ 289 DEVMETHOD(miibus_readreg, sk_miibus_readreg), 290 DEVMETHOD(miibus_writereg, sk_miibus_writereg), 291 DEVMETHOD(miibus_statchg, sk_miibus_statchg), 292 293 { 0, 0 } 294 }; 295 296 static driver_t sk_driver = { 297 "sk", 298 sk_methods, 299 sizeof(struct sk_if_softc) 300 }; 301 302 static devclass_t sk_devclass; 303 304 DRIVER_MODULE(sk, pci, skc_driver, skc_devclass, 0, 0); 305 DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0); 306 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0); 307 308 #define SK_SETBIT(sc, reg, x) \ 309 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x) 310 311 #define SK_CLRBIT(sc, reg, x) \ 312 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x) 313 314 #define SK_WIN_SETBIT_4(sc, reg, x) \ 315 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x) 316 317 #define SK_WIN_CLRBIT_4(sc, reg, x) \ 318 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x) 319 320 #define SK_WIN_SETBIT_2(sc, reg, x) \ 321 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x) 322 323 #define SK_WIN_CLRBIT_2(sc, reg, x) \ 324 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x) 325 326 static u_int32_t 327 sk_win_read_4(sc, reg) 328 struct sk_softc *sc; 329 int reg; 330 { 331 #ifdef SK_USEIOSPACE 332 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 333 return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg))); 334 #else 335 return(CSR_READ_4(sc, reg)); 336 #endif 337 } 338 339 static u_int16_t 340 sk_win_read_2(sc, reg) 341 struct sk_softc *sc; 342 int reg; 343 { 344 #ifdef SK_USEIOSPACE 345 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 346 return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg))); 347 #else 348 return(CSR_READ_2(sc, reg)); 349 #endif 350 } 351 352 static u_int8_t 353 sk_win_read_1(sc, reg) 354 struct sk_softc *sc; 355 int reg; 356 { 357 #ifdef SK_USEIOSPACE 358 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 359 return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg))); 360 #else 361 return(CSR_READ_1(sc, reg)); 362 #endif 363 } 364 365 static void 366 sk_win_write_4(sc, reg, val) 367 struct sk_softc *sc; 368 int reg; 369 u_int32_t val; 370 { 371 #ifdef SK_USEIOSPACE 372 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 373 CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val); 374 #else 375 CSR_WRITE_4(sc, reg, val); 376 #endif 377 return; 378 } 379 380 static void 381 sk_win_write_2(sc, reg, val) 382 struct sk_softc *sc; 383 int reg; 384 u_int32_t val; 385 { 386 #ifdef SK_USEIOSPACE 387 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 388 CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val); 389 #else 390 CSR_WRITE_2(sc, reg, val); 391 #endif 392 return; 393 } 394 395 static void 396 sk_win_write_1(sc, reg, val) 397 struct sk_softc *sc; 398 int reg; 399 u_int32_t val; 400 { 401 #ifdef SK_USEIOSPACE 402 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 403 CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val); 404 #else 405 CSR_WRITE_1(sc, reg, val); 406 #endif 407 return; 408 } 409 410 /* 411 * The VPD EEPROM contains Vital Product Data, as suggested in 412 * the PCI 2.1 specification. The VPD data is separared into areas 413 * denoted by resource IDs. The SysKonnect VPD contains an ID string 414 * resource (the name of the adapter), a read-only area resource 415 * containing various key/data fields and a read/write area which 416 * can be used to store asset management information or log messages. 417 * We read the ID string and read-only into buffers attached to 418 * the controller softc structure for later use. At the moment, 419 * we only use the ID string during skc_attach(). 420 */ 421 static u_int8_t 422 sk_vpd_readbyte(sc, addr) 423 struct sk_softc *sc; 424 int addr; 425 { 426 int i; 427 428 sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr); 429 for (i = 0; i < SK_TIMEOUT; i++) { 430 DELAY(1); 431 if (sk_win_read_2(sc, 432 SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG) 433 break; 434 } 435 436 if (i == SK_TIMEOUT) 437 return(0); 438 439 return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA))); 440 } 441 442 static void 443 sk_vpd_read_res(sc, res, addr) 444 struct sk_softc *sc; 445 struct vpd_res *res; 446 int addr; 447 { 448 int i; 449 u_int8_t *ptr; 450 451 ptr = (u_int8_t *)res; 452 for (i = 0; i < sizeof(struct vpd_res); i++) 453 ptr[i] = sk_vpd_readbyte(sc, i + addr); 454 455 return; 456 } 457 458 static void 459 sk_vpd_read(sc) 460 struct sk_softc *sc; 461 { 462 int pos = 0, i; 463 struct vpd_res res; 464 465 if (sc->sk_vpd_prodname != NULL) 466 free(sc->sk_vpd_prodname, M_DEVBUF); 467 if (sc->sk_vpd_readonly != NULL) 468 free(sc->sk_vpd_readonly, M_DEVBUF); 469 sc->sk_vpd_prodname = NULL; 470 sc->sk_vpd_readonly = NULL; 471 472 sk_vpd_read_res(sc, &res, pos); 473 474 /* 475 * Bail out quietly if the eeprom appears to be missing or empty. 476 */ 477 if (res.vr_id == 0xff && res.vr_len == 0xff && res.vr_pad == 0xff) 478 return; 479 480 if (res.vr_id != VPD_RES_ID) { 481 printf("skc%d: bad VPD resource id: expected %x got %x\n", 482 sc->sk_unit, VPD_RES_ID, res.vr_id); 483 return; 484 } 485 486 pos += sizeof(res); 487 sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 488 for (i = 0; i < res.vr_len; i++) 489 sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos); 490 sc->sk_vpd_prodname[i] = '\0'; 491 pos += i; 492 493 sk_vpd_read_res(sc, &res, pos); 494 495 if (res.vr_id != VPD_RES_READ) { 496 printf("skc%d: bad VPD resource id: expected %x got %x\n", 497 sc->sk_unit, VPD_RES_READ, res.vr_id); 498 return; 499 } 500 501 pos += sizeof(res); 502 sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 503 for (i = 0; i < res.vr_len + 1; i++) 504 sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos); 505 506 return; 507 } 508 509 static int 510 sk_miibus_readreg(dev, phy, reg) 511 device_t dev; 512 int phy, reg; 513 { 514 struct sk_if_softc *sc_if; 515 516 sc_if = device_get_softc(dev); 517 518 switch(sc_if->sk_softc->sk_type) { 519 case SK_GENESIS: 520 return(sk_xmac_miibus_readreg(sc_if, phy, reg)); 521 case SK_YUKON: 522 return(sk_marv_miibus_readreg(sc_if, phy, reg)); 523 } 524 525 return(0); 526 } 527 528 static int 529 sk_miibus_writereg(dev, phy, reg, val) 530 device_t dev; 531 int phy, reg, val; 532 { 533 struct sk_if_softc *sc_if; 534 535 sc_if = device_get_softc(dev); 536 537 switch(sc_if->sk_softc->sk_type) { 538 case SK_GENESIS: 539 return(sk_xmac_miibus_writereg(sc_if, phy, reg, val)); 540 case SK_YUKON: 541 return(sk_marv_miibus_writereg(sc_if, phy, reg, val)); 542 } 543 544 return(0); 545 } 546 547 static void 548 sk_miibus_statchg(dev) 549 device_t dev; 550 { 551 struct sk_if_softc *sc_if; 552 553 sc_if = device_get_softc(dev); 554 555 switch(sc_if->sk_softc->sk_type) { 556 case SK_GENESIS: 557 sk_xmac_miibus_statchg(sc_if); 558 break; 559 case SK_YUKON: 560 sk_marv_miibus_statchg(sc_if); 561 break; 562 } 563 564 return; 565 } 566 567 static int 568 sk_xmac_miibus_readreg(sc_if, phy, reg) 569 struct sk_if_softc *sc_if; 570 int phy, reg; 571 { 572 int i; 573 574 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0) 575 return(0); 576 577 SK_IF_LOCK(sc_if); 578 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); 579 SK_XM_READ_2(sc_if, XM_PHY_DATA); 580 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { 581 for (i = 0; i < SK_TIMEOUT; i++) { 582 DELAY(1); 583 if (SK_XM_READ_2(sc_if, XM_MMUCMD) & 584 XM_MMUCMD_PHYDATARDY) 585 break; 586 } 587 588 if (i == SK_TIMEOUT) { 589 printf("sk%d: phy failed to come ready\n", 590 sc_if->sk_unit); 591 SK_IF_UNLOCK(sc_if); 592 return(0); 593 } 594 } 595 DELAY(1); 596 i = SK_XM_READ_2(sc_if, XM_PHY_DATA); 597 SK_IF_UNLOCK(sc_if); 598 return(i); 599 } 600 601 static int 602 sk_xmac_miibus_writereg(sc_if, phy, reg, val) 603 struct sk_if_softc *sc_if; 604 int phy, reg, val; 605 { 606 int i; 607 608 SK_IF_LOCK(sc_if); 609 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); 610 for (i = 0; i < SK_TIMEOUT; i++) { 611 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) 612 break; 613 } 614 615 if (i == SK_TIMEOUT) { 616 printf("sk%d: phy failed to come ready\n", sc_if->sk_unit); 617 SK_IF_UNLOCK(sc_if); 618 return(ETIMEDOUT); 619 } 620 621 SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val); 622 for (i = 0; i < SK_TIMEOUT; i++) { 623 DELAY(1); 624 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) 625 break; 626 } 627 SK_IF_UNLOCK(sc_if); 628 if (i == SK_TIMEOUT) 629 printf("sk%d: phy write timed out\n", sc_if->sk_unit); 630 631 return(0); 632 } 633 634 static void 635 sk_xmac_miibus_statchg(sc_if) 636 struct sk_if_softc *sc_if; 637 { 638 struct mii_data *mii; 639 640 mii = device_get_softc(sc_if->sk_miibus); 641 642 SK_IF_LOCK(sc_if); 643 /* 644 * If this is a GMII PHY, manually set the XMAC's 645 * duplex mode accordingly. 646 */ 647 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { 648 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 649 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); 650 } else { 651 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); 652 } 653 } 654 SK_IF_UNLOCK(sc_if); 655 656 return; 657 } 658 659 static int 660 sk_marv_miibus_readreg(sc_if, phy, reg) 661 struct sk_if_softc *sc_if; 662 int phy, reg; 663 { 664 u_int16_t val; 665 int i; 666 667 if (phy != 0 || 668 (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER && 669 sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) { 670 return(0); 671 } 672 673 SK_IF_LOCK(sc_if); 674 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 675 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ); 676 677 for (i = 0; i < SK_TIMEOUT; i++) { 678 DELAY(1); 679 val = SK_YU_READ_2(sc_if, YUKON_SMICR); 680 if (val & YU_SMICR_READ_VALID) 681 break; 682 } 683 684 if (i == SK_TIMEOUT) { 685 printf("sk%d: phy failed to come ready\n", 686 sc_if->sk_unit); 687 SK_IF_UNLOCK(sc_if); 688 return(0); 689 } 690 691 val = SK_YU_READ_2(sc_if, YUKON_SMIDR); 692 SK_IF_UNLOCK(sc_if); 693 694 return(val); 695 } 696 697 static int 698 sk_marv_miibus_writereg(sc_if, phy, reg, val) 699 struct sk_if_softc *sc_if; 700 int phy, reg, val; 701 { 702 int i; 703 704 SK_IF_LOCK(sc_if); 705 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val); 706 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 707 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE); 708 709 for (i = 0; i < SK_TIMEOUT; i++) { 710 DELAY(1); 711 if (SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY) 712 break; 713 } 714 SK_IF_UNLOCK(sc_if); 715 716 return(0); 717 } 718 719 static void 720 sk_marv_miibus_statchg(sc_if) 721 struct sk_if_softc *sc_if; 722 { 723 return; 724 } 725 726 #define HASH_BITS 6 727 728 static u_int32_t 729 sk_xmchash(addr) 730 const uint8_t *addr; 731 { 732 uint32_t crc; 733 734 /* Compute CRC for the address value. */ 735 crc = ether_crc32_le(addr, ETHER_ADDR_LEN); 736 737 return (~crc & ((1 << HASH_BITS) - 1)); 738 } 739 740 /* gmchash is just a big endian crc */ 741 static u_int32_t 742 sk_gmchash(addr) 743 const uint8_t *addr; 744 { 745 uint32_t crc; 746 747 /* Compute CRC for the address value. */ 748 crc = ether_crc32_be(addr, ETHER_ADDR_LEN); 749 750 return (crc & ((1 << HASH_BITS) - 1)); 751 } 752 753 static void 754 sk_setfilt(sc_if, addr, slot) 755 struct sk_if_softc *sc_if; 756 caddr_t addr; 757 int slot; 758 { 759 int base; 760 761 base = XM_RXFILT_ENTRY(slot); 762 763 SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0])); 764 SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2])); 765 SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4])); 766 767 return; 768 } 769 770 static void 771 sk_setmulti(sc_if) 772 struct sk_if_softc *sc_if; 773 { 774 struct sk_softc *sc = sc_if->sk_softc; 775 struct ifnet *ifp = &sc_if->arpcom.ac_if; 776 u_int32_t hashes[2] = { 0, 0 }; 777 int h = 0, i; 778 struct ifmultiaddr *ifma; 779 u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 }; 780 781 782 /* First, zot all the existing filters. */ 783 switch(sc->sk_type) { 784 case SK_GENESIS: 785 for (i = 1; i < XM_RXFILT_MAX; i++) 786 sk_setfilt(sc_if, (caddr_t)&dummy, i); 787 788 SK_XM_WRITE_4(sc_if, XM_MAR0, 0); 789 SK_XM_WRITE_4(sc_if, XM_MAR2, 0); 790 break; 791 case SK_YUKON: 792 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0); 793 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0); 794 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0); 795 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0); 796 break; 797 } 798 799 /* Now program new ones. */ 800 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 801 hashes[0] = 0xFFFFFFFF; 802 hashes[1] = 0xFFFFFFFF; 803 } else { 804 i = 1; 805 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) { 806 if (ifma->ifma_addr->sa_family != AF_LINK) 807 continue; 808 /* 809 * Program the first XM_RXFILT_MAX multicast groups 810 * into the perfect filter. For all others, 811 * use the hash table. 812 */ 813 if (sc->sk_type == SK_GENESIS && i < XM_RXFILT_MAX) { 814 sk_setfilt(sc_if, 815 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i); 816 i++; 817 continue; 818 } 819 820 switch(sc->sk_type) { 821 case SK_GENESIS: 822 h = sk_xmchash( 823 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 824 break; 825 case SK_YUKON: 826 h = sk_gmchash( 827 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 828 break; 829 } 830 if (h < 32) 831 hashes[0] |= (1 << h); 832 else 833 hashes[1] |= (1 << (h - 32)); 834 } 835 } 836 837 switch(sc->sk_type) { 838 case SK_GENESIS: 839 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH| 840 XM_MODE_RX_USE_PERFECT); 841 SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]); 842 SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]); 843 break; 844 case SK_YUKON: 845 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff); 846 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff); 847 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff); 848 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff); 849 break; 850 } 851 852 return; 853 } 854 855 static void 856 sk_setpromisc(sc_if) 857 struct sk_if_softc *sc_if; 858 { 859 struct sk_softc *sc = sc_if->sk_softc; 860 struct ifnet *ifp = &sc_if->arpcom.ac_if; 861 862 switch(sc->sk_type) { 863 case SK_GENESIS: 864 if (ifp->if_flags & IFF_PROMISC) { 865 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); 866 } else { 867 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); 868 } 869 break; 870 case SK_YUKON: 871 if (ifp->if_flags & IFF_PROMISC) { 872 SK_YU_CLRBIT_2(sc_if, YUKON_RCR, 873 YU_RCR_UFLEN | YU_RCR_MUFLEN); 874 } else { 875 SK_YU_SETBIT_2(sc_if, YUKON_RCR, 876 YU_RCR_UFLEN | YU_RCR_MUFLEN); 877 } 878 break; 879 } 880 881 return; 882 } 883 884 static int 885 sk_init_rx_ring(sc_if) 886 struct sk_if_softc *sc_if; 887 { 888 struct sk_chain_data *cd = &sc_if->sk_cdata; 889 struct sk_ring_data *rd = sc_if->sk_rdata; 890 int i; 891 892 bzero((char *)rd->sk_rx_ring, 893 sizeof(struct sk_rx_desc) * SK_RX_RING_CNT); 894 895 for (i = 0; i < SK_RX_RING_CNT; i++) { 896 cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i]; 897 if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS) 898 return(ENOBUFS); 899 if (i == (SK_RX_RING_CNT - 1)) { 900 cd->sk_rx_chain[i].sk_next = 901 &cd->sk_rx_chain[0]; 902 rd->sk_rx_ring[i].sk_next = 903 vtophys(&rd->sk_rx_ring[0]); 904 } else { 905 cd->sk_rx_chain[i].sk_next = 906 &cd->sk_rx_chain[i + 1]; 907 rd->sk_rx_ring[i].sk_next = 908 vtophys(&rd->sk_rx_ring[i + 1]); 909 } 910 } 911 912 sc_if->sk_cdata.sk_rx_prod = 0; 913 sc_if->sk_cdata.sk_rx_cons = 0; 914 915 return(0); 916 } 917 918 static void 919 sk_init_tx_ring(sc_if) 920 struct sk_if_softc *sc_if; 921 { 922 struct sk_chain_data *cd = &sc_if->sk_cdata; 923 struct sk_ring_data *rd = sc_if->sk_rdata; 924 int i; 925 926 bzero((char *)sc_if->sk_rdata->sk_tx_ring, 927 sizeof(struct sk_tx_desc) * SK_TX_RING_CNT); 928 929 for (i = 0; i < SK_TX_RING_CNT; i++) { 930 cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i]; 931 if (i == (SK_TX_RING_CNT - 1)) { 932 cd->sk_tx_chain[i].sk_next = 933 &cd->sk_tx_chain[0]; 934 rd->sk_tx_ring[i].sk_next = 935 vtophys(&rd->sk_tx_ring[0]); 936 } else { 937 cd->sk_tx_chain[i].sk_next = 938 &cd->sk_tx_chain[i + 1]; 939 rd->sk_tx_ring[i].sk_next = 940 vtophys(&rd->sk_tx_ring[i + 1]); 941 } 942 } 943 944 sc_if->sk_cdata.sk_tx_prod = 0; 945 sc_if->sk_cdata.sk_tx_cons = 0; 946 sc_if->sk_cdata.sk_tx_cnt = 0; 947 948 return; 949 } 950 951 static int 952 sk_newbuf(sc_if, c, m) 953 struct sk_if_softc *sc_if; 954 struct sk_chain *c; 955 struct mbuf *m; 956 { 957 struct mbuf *m_new = NULL; 958 struct sk_rx_desc *r; 959 960 if (m == NULL) { 961 caddr_t *buf = NULL; 962 963 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 964 if (m_new == NULL) 965 return(ENOBUFS); 966 967 /* Allocate the jumbo buffer */ 968 buf = sk_jalloc(sc_if); 969 if (buf == NULL) { 970 m_freem(m_new); 971 #ifdef SK_VERBOSE 972 printf("sk%d: jumbo allocation failed " 973 "-- packet dropped!\n", sc_if->sk_unit); 974 #endif 975 return(ENOBUFS); 976 } 977 978 /* Attach the buffer to the mbuf */ 979 MEXTADD(m_new, buf, SK_JLEN, sk_jfree, 980 (struct sk_if_softc *)sc_if, 0, EXT_NET_DRV); 981 m_new->m_data = (void *)buf; 982 m_new->m_pkthdr.len = m_new->m_len = SK_JLEN; 983 } else { 984 /* 985 * We're re-using a previously allocated mbuf; 986 * be sure to re-init pointers and lengths to 987 * default values. 988 */ 989 m_new = m; 990 m_new->m_len = m_new->m_pkthdr.len = SK_JLEN; 991 m_new->m_data = m_new->m_ext.ext_buf; 992 } 993 994 /* 995 * Adjust alignment so packet payload begins on a 996 * longword boundary. Mandatory for Alpha, useful on 997 * x86 too. 998 */ 999 m_adj(m_new, ETHER_ALIGN); 1000 1001 r = c->sk_desc; 1002 c->sk_mbuf = m_new; 1003 r->sk_data_lo = vtophys(mtod(m_new, caddr_t)); 1004 r->sk_ctl = m_new->m_len | SK_RXSTAT; 1005 1006 return(0); 1007 } 1008 1009 /* 1010 * Allocate jumbo buffer storage. The SysKonnect adapters support 1011 * "jumbograms" (9K frames), although SysKonnect doesn't currently 1012 * use them in their drivers. In order for us to use them, we need 1013 * large 9K receive buffers, however standard mbuf clusters are only 1014 * 2048 bytes in size. Consequently, we need to allocate and manage 1015 * our own jumbo buffer pool. Fortunately, this does not require an 1016 * excessive amount of additional code. 1017 */ 1018 static int 1019 sk_alloc_jumbo_mem(sc_if) 1020 struct sk_if_softc *sc_if; 1021 { 1022 caddr_t ptr; 1023 register int i; 1024 struct sk_jpool_entry *entry; 1025 1026 /* Grab a big chunk o' storage. */ 1027 sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF, 1028 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 1029 1030 if (sc_if->sk_cdata.sk_jumbo_buf == NULL) { 1031 printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit); 1032 return(ENOBUFS); 1033 } 1034 1035 SLIST_INIT(&sc_if->sk_jfree_listhead); 1036 SLIST_INIT(&sc_if->sk_jinuse_listhead); 1037 1038 /* 1039 * Now divide it up into 9K pieces and save the addresses 1040 * in an array. 1041 */ 1042 ptr = sc_if->sk_cdata.sk_jumbo_buf; 1043 for (i = 0; i < SK_JSLOTS; i++) { 1044 sc_if->sk_cdata.sk_jslots[i] = ptr; 1045 ptr += SK_JLEN; 1046 entry = malloc(sizeof(struct sk_jpool_entry), 1047 M_DEVBUF, M_NOWAIT); 1048 if (entry == NULL) { 1049 free(sc_if->sk_cdata.sk_jumbo_buf, M_DEVBUF); 1050 sc_if->sk_cdata.sk_jumbo_buf = NULL; 1051 printf("sk%d: no memory for jumbo " 1052 "buffer queue!\n", sc_if->sk_unit); 1053 return(ENOBUFS); 1054 } 1055 entry->slot = i; 1056 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, 1057 entry, jpool_entries); 1058 } 1059 1060 return(0); 1061 } 1062 1063 /* 1064 * Allocate a jumbo buffer. 1065 */ 1066 static void * 1067 sk_jalloc(sc_if) 1068 struct sk_if_softc *sc_if; 1069 { 1070 struct sk_jpool_entry *entry; 1071 1072 entry = SLIST_FIRST(&sc_if->sk_jfree_listhead); 1073 1074 if (entry == NULL) { 1075 #ifdef SK_VERBOSE 1076 printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit); 1077 #endif 1078 return(NULL); 1079 } 1080 1081 SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries); 1082 SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries); 1083 return(sc_if->sk_cdata.sk_jslots[entry->slot]); 1084 } 1085 1086 /* 1087 * Release a jumbo buffer. 1088 */ 1089 static void 1090 sk_jfree(buf, args) 1091 void *buf; 1092 void *args; 1093 { 1094 struct sk_if_softc *sc_if; 1095 int i; 1096 struct sk_jpool_entry *entry; 1097 1098 /* Extract the softc struct pointer. */ 1099 sc_if = (struct sk_if_softc *)args; 1100 1101 if (sc_if == NULL) 1102 panic("sk_jfree: didn't get softc pointer!"); 1103 1104 /* calculate the slot this buffer belongs to */ 1105 i = ((vm_offset_t)buf 1106 - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN; 1107 1108 if ((i < 0) || (i >= SK_JSLOTS)) 1109 panic("sk_jfree: asked to free buffer that we don't manage!"); 1110 1111 entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead); 1112 if (entry == NULL) 1113 panic("sk_jfree: buffer not in use!"); 1114 entry->slot = i; 1115 SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries); 1116 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries); 1117 1118 return; 1119 } 1120 1121 /* 1122 * Set media options. 1123 */ 1124 static int 1125 sk_ifmedia_upd(ifp) 1126 struct ifnet *ifp; 1127 { 1128 struct sk_if_softc *sc_if = ifp->if_softc; 1129 struct mii_data *mii; 1130 1131 mii = device_get_softc(sc_if->sk_miibus); 1132 sk_init(sc_if); 1133 mii_mediachg(mii); 1134 1135 return(0); 1136 } 1137 1138 /* 1139 * Report current media status. 1140 */ 1141 static void 1142 sk_ifmedia_sts(ifp, ifmr) 1143 struct ifnet *ifp; 1144 struct ifmediareq *ifmr; 1145 { 1146 struct sk_if_softc *sc_if; 1147 struct mii_data *mii; 1148 1149 sc_if = ifp->if_softc; 1150 mii = device_get_softc(sc_if->sk_miibus); 1151 1152 mii_pollstat(mii); 1153 ifmr->ifm_active = mii->mii_media_active; 1154 ifmr->ifm_status = mii->mii_media_status; 1155 1156 return; 1157 } 1158 1159 static int 1160 sk_ioctl(ifp, command, data) 1161 struct ifnet *ifp; 1162 u_long command; 1163 caddr_t data; 1164 { 1165 struct sk_if_softc *sc_if = ifp->if_softc; 1166 struct ifreq *ifr = (struct ifreq *) data; 1167 int error = 0; 1168 struct mii_data *mii; 1169 1170 SK_IF_LOCK(sc_if); 1171 1172 switch(command) { 1173 case SIOCSIFMTU: 1174 if (ifr->ifr_mtu > SK_JUMBO_MTU) 1175 error = EINVAL; 1176 else { 1177 ifp->if_mtu = ifr->ifr_mtu; 1178 sk_init(sc_if); 1179 } 1180 break; 1181 case SIOCSIFFLAGS: 1182 if (ifp->if_flags & IFF_UP) { 1183 if (ifp->if_flags & IFF_RUNNING) { 1184 if ((ifp->if_flags ^ sc_if->sk_if_flags) 1185 & IFF_PROMISC) { 1186 sk_setpromisc(sc_if); 1187 sk_setmulti(sc_if); 1188 } 1189 } else 1190 sk_init(sc_if); 1191 } else { 1192 if (ifp->if_flags & IFF_RUNNING) 1193 sk_stop(sc_if); 1194 } 1195 sc_if->sk_if_flags = ifp->if_flags; 1196 error = 0; 1197 break; 1198 case SIOCADDMULTI: 1199 case SIOCDELMULTI: 1200 sk_setmulti(sc_if); 1201 error = 0; 1202 break; 1203 case SIOCGIFMEDIA: 1204 case SIOCSIFMEDIA: 1205 mii = device_get_softc(sc_if->sk_miibus); 1206 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1207 break; 1208 default: 1209 error = ether_ioctl(ifp, command, data); 1210 break; 1211 } 1212 1213 SK_IF_UNLOCK(sc_if); 1214 1215 return(error); 1216 } 1217 1218 /* 1219 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device 1220 * IDs against our list and return a device name if we find a match. 1221 */ 1222 static int 1223 skc_probe(dev) 1224 device_t dev; 1225 { 1226 struct sk_softc *sc; 1227 struct sk_type *t = sk_devs; 1228 1229 sc = device_get_softc(dev); 1230 1231 while(t->sk_name != NULL) { 1232 if ((pci_get_vendor(dev) == t->sk_vid) && 1233 (pci_get_device(dev) == t->sk_did)) { 1234 device_set_desc(dev, t->sk_name); 1235 return(0); 1236 } 1237 t++; 1238 } 1239 1240 return(ENXIO); 1241 } 1242 1243 /* 1244 * Force the GEnesis into reset, then bring it out of reset. 1245 */ 1246 static void 1247 sk_reset(sc) 1248 struct sk_softc *sc; 1249 { 1250 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET); 1251 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET); 1252 if (sc->sk_type == SK_YUKON) 1253 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET); 1254 1255 DELAY(1000); 1256 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET); 1257 DELAY(2); 1258 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET); 1259 if (sc->sk_type == SK_YUKON) 1260 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR); 1261 1262 if (sc->sk_type == SK_GENESIS) { 1263 /* Configure packet arbiter */ 1264 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET); 1265 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT); 1266 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT); 1267 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT); 1268 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT); 1269 } 1270 1271 /* Enable RAM interface */ 1272 sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET); 1273 1274 /* 1275 * Configure interrupt moderation. The moderation timer 1276 * defers interrupts specified in the interrupt moderation 1277 * timer mask based on the timeout specified in the interrupt 1278 * moderation timer init register. Each bit in the timer 1279 * register represents 18.825ns, so to specify a timeout in 1280 * microseconds, we have to multiply by 54. 1281 */ 1282 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(200)); 1283 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF| 1284 SK_ISR_RX1_EOF|SK_ISR_RX2_EOF); 1285 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START); 1286 1287 return; 1288 } 1289 1290 static int 1291 sk_probe(dev) 1292 device_t dev; 1293 { 1294 struct sk_softc *sc; 1295 1296 sc = device_get_softc(device_get_parent(dev)); 1297 1298 /* 1299 * Not much to do here. We always know there will be 1300 * at least one XMAC present, and if there are two, 1301 * skc_attach() will create a second device instance 1302 * for us. 1303 */ 1304 switch (sc->sk_type) { 1305 case SK_GENESIS: 1306 device_set_desc(dev, "XaQti Corp. XMAC II"); 1307 break; 1308 case SK_YUKON: 1309 device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon"); 1310 break; 1311 } 1312 1313 return(0); 1314 } 1315 1316 /* 1317 * Each XMAC chip is attached as a separate logical IP interface. 1318 * Single port cards will have only one logical interface of course. 1319 */ 1320 static int 1321 sk_attach(dev) 1322 device_t dev; 1323 { 1324 struct sk_softc *sc; 1325 struct sk_if_softc *sc_if; 1326 struct ifnet *ifp; 1327 int i, port, error; 1328 1329 if (dev == NULL) 1330 return(EINVAL); 1331 1332 error = 0; 1333 sc_if = device_get_softc(dev); 1334 sc = device_get_softc(device_get_parent(dev)); 1335 SK_LOCK(sc); 1336 port = *(int *)device_get_ivars(dev); 1337 free(device_get_ivars(dev), M_DEVBUF); 1338 device_set_ivars(dev, NULL); 1339 1340 sc_if->sk_dev = dev; 1341 sc_if->sk_unit = device_get_unit(dev); 1342 sc_if->sk_port = port; 1343 sc_if->sk_softc = sc; 1344 sc->sk_if[port] = sc_if; 1345 if (port == SK_PORT_A) 1346 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0; 1347 if (port == SK_PORT_B) 1348 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1; 1349 1350 /* 1351 * Get station address for this interface. Note that 1352 * dual port cards actually come with three station 1353 * addresses: one for each port, plus an extra. The 1354 * extra one is used by the SysKonnect driver software 1355 * as a 'virtual' station address for when both ports 1356 * are operating in failover mode. Currently we don't 1357 * use this extra address. 1358 */ 1359 for (i = 0; i < ETHER_ADDR_LEN; i++) 1360 sc_if->arpcom.ac_enaddr[i] = 1361 sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i); 1362 1363 /* 1364 * Set up RAM buffer addresses. The NIC will have a certain 1365 * amount of SRAM on it, somewhere between 512K and 2MB. We 1366 * need to divide this up a) between the transmitter and 1367 * receiver and b) between the two XMACs, if this is a 1368 * dual port NIC. Our algotithm is to divide up the memory 1369 * evenly so that everyone gets a fair share. 1370 */ 1371 if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) { 1372 u_int32_t chunk, val; 1373 1374 chunk = sc->sk_ramsize / 2; 1375 val = sc->sk_rboff / sizeof(u_int64_t); 1376 sc_if->sk_rx_ramstart = val; 1377 val += (chunk / sizeof(u_int64_t)); 1378 sc_if->sk_rx_ramend = val - 1; 1379 sc_if->sk_tx_ramstart = val; 1380 val += (chunk / sizeof(u_int64_t)); 1381 sc_if->sk_tx_ramend = val - 1; 1382 } else { 1383 u_int32_t chunk, val; 1384 1385 chunk = sc->sk_ramsize / 4; 1386 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) / 1387 sizeof(u_int64_t); 1388 sc_if->sk_rx_ramstart = val; 1389 val += (chunk / sizeof(u_int64_t)); 1390 sc_if->sk_rx_ramend = val - 1; 1391 sc_if->sk_tx_ramstart = val; 1392 val += (chunk / sizeof(u_int64_t)); 1393 sc_if->sk_tx_ramend = val - 1; 1394 } 1395 1396 /* Read and save PHY type and set PHY address */ 1397 sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF; 1398 switch(sc_if->sk_phytype) { 1399 case SK_PHYTYPE_XMAC: 1400 sc_if->sk_phyaddr = SK_PHYADDR_XMAC; 1401 break; 1402 case SK_PHYTYPE_BCOM: 1403 sc_if->sk_phyaddr = SK_PHYADDR_BCOM; 1404 break; 1405 case SK_PHYTYPE_MARV_COPPER: 1406 sc_if->sk_phyaddr = SK_PHYADDR_MARV; 1407 break; 1408 default: 1409 printf("skc%d: unsupported PHY type: %d\n", 1410 sc->sk_unit, sc_if->sk_phytype); 1411 error = ENODEV; 1412 goto fail; 1413 } 1414 1415 /* Allocate the descriptor queues. */ 1416 sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF, 1417 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 1418 1419 if (sc_if->sk_rdata == NULL) { 1420 printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit); 1421 error = ENOMEM; 1422 goto fail; 1423 } 1424 1425 bzero(sc_if->sk_rdata, sizeof(struct sk_ring_data)); 1426 1427 /* Try to allocate memory for jumbo buffers. */ 1428 if (sk_alloc_jumbo_mem(sc_if)) { 1429 printf("sk%d: jumbo buffer allocation failed\n", 1430 sc_if->sk_unit); 1431 error = ENOMEM; 1432 goto fail; 1433 } 1434 1435 ifp = &sc_if->arpcom.ac_if; 1436 ifp->if_softc = sc_if; 1437 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1438 ifp->if_mtu = ETHERMTU; 1439 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1440 ifp->if_ioctl = sk_ioctl; 1441 ifp->if_start = sk_start; 1442 ifp->if_watchdog = sk_watchdog; 1443 ifp->if_init = sk_init; 1444 ifp->if_baudrate = 1000000000; 1445 ifp->if_snd.ifq_maxlen = SK_TX_RING_CNT - 1; 1446 1447 callout_handle_init(&sc_if->sk_tick_ch); 1448 1449 /* 1450 * Call MI attach routine. 1451 */ 1452 ether_ifattach(ifp, sc_if->arpcom.ac_enaddr); 1453 1454 /* 1455 * Do miibus setup. 1456 */ 1457 switch (sc->sk_type) { 1458 case SK_GENESIS: 1459 sk_init_xmac(sc_if); 1460 break; 1461 case SK_YUKON: 1462 sk_init_yukon(sc_if); 1463 break; 1464 } 1465 1466 if (mii_phy_probe(dev, &sc_if->sk_miibus, 1467 sk_ifmedia_upd, sk_ifmedia_sts)) { 1468 printf("skc%d: no PHY found!\n", sc_if->sk_unit); 1469 ether_ifdetach(ifp); 1470 error = ENXIO; 1471 goto fail; 1472 } 1473 1474 fail: 1475 SK_UNLOCK(sc); 1476 if (error) { 1477 /* Access should be ok even though lock has been dropped */ 1478 sc->sk_if[port] = NULL; 1479 sk_detach(dev); 1480 } 1481 1482 return(error); 1483 } 1484 1485 /* 1486 * Attach the interface. Allocate softc structures, do ifmedia 1487 * setup and ethernet/BPF attach. 1488 */ 1489 static int 1490 skc_attach(dev) 1491 device_t dev; 1492 { 1493 struct sk_softc *sc; 1494 int unit, error = 0, rid, *port; 1495 1496 sc = device_get_softc(dev); 1497 unit = device_get_unit(dev); 1498 1499 mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1500 MTX_DEF | MTX_RECURSE); 1501 /* 1502 * Map control/status registers. 1503 */ 1504 pci_enable_busmaster(dev); 1505 1506 rid = SK_RID; 1507 sc->sk_res = bus_alloc_resource_any(dev, SK_RES, &rid, RF_ACTIVE); 1508 1509 if (sc->sk_res == NULL) { 1510 printf("sk%d: couldn't map ports/memory\n", unit); 1511 error = ENXIO; 1512 goto fail; 1513 } 1514 1515 sc->sk_btag = rman_get_bustag(sc->sk_res); 1516 sc->sk_bhandle = rman_get_bushandle(sc->sk_res); 1517 1518 /* Allocate interrupt */ 1519 rid = 0; 1520 sc->sk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1521 RF_SHAREABLE | RF_ACTIVE); 1522 1523 if (sc->sk_irq == NULL) { 1524 printf("skc%d: couldn't map interrupt\n", unit); 1525 error = ENXIO; 1526 goto fail; 1527 } 1528 1529 /* Set adapter type */ 1530 switch (pci_get_device(dev)) { 1531 case DEVICEID_SK_V1: 1532 sc->sk_type = SK_GENESIS; 1533 break; 1534 case DEVICEID_SK_V2: 1535 case DEVICEID_3COM_3C940: 1536 case DEVICEID_LINKSYS_EG1032: 1537 case DEVICEID_DLINK_DGE530T: 1538 sc->sk_type = SK_YUKON; 1539 break; 1540 default: 1541 printf("skc%d: unknown device!\n", unit); 1542 error = ENXIO; 1543 goto fail; 1544 } 1545 1546 /* Reset the adapter. */ 1547 sk_reset(sc); 1548 1549 sc->sk_unit = unit; 1550 1551 /* Read and save vital product data from EEPROM. */ 1552 sk_vpd_read(sc); 1553 1554 if (sc->sk_type == SK_GENESIS) { 1555 /* Read and save RAM size and RAMbuffer offset */ 1556 switch(sk_win_read_1(sc, SK_EPROM0)) { 1557 case SK_RAMSIZE_512K_64: 1558 sc->sk_ramsize = 0x80000; 1559 sc->sk_rboff = SK_RBOFF_0; 1560 break; 1561 case SK_RAMSIZE_1024K_64: 1562 sc->sk_ramsize = 0x100000; 1563 sc->sk_rboff = SK_RBOFF_80000; 1564 break; 1565 case SK_RAMSIZE_1024K_128: 1566 sc->sk_ramsize = 0x100000; 1567 sc->sk_rboff = SK_RBOFF_0; 1568 break; 1569 case SK_RAMSIZE_2048K_128: 1570 sc->sk_ramsize = 0x200000; 1571 sc->sk_rboff = SK_RBOFF_0; 1572 break; 1573 default: 1574 printf("skc%d: unknown ram size: %d\n", 1575 sc->sk_unit, sk_win_read_1(sc, SK_EPROM0)); 1576 error = ENXIO; 1577 goto fail; 1578 } 1579 } else { 1580 sc->sk_ramsize = 0x20000; 1581 sc->sk_rboff = SK_RBOFF_0; 1582 } 1583 1584 /* Read and save physical media type */ 1585 switch(sk_win_read_1(sc, SK_PMDTYPE)) { 1586 case SK_PMD_1000BASESX: 1587 sc->sk_pmd = IFM_1000_SX; 1588 break; 1589 case SK_PMD_1000BASELX: 1590 sc->sk_pmd = IFM_1000_LX; 1591 break; 1592 case SK_PMD_1000BASECX: 1593 sc->sk_pmd = IFM_1000_CX; 1594 break; 1595 case SK_PMD_1000BASETX: 1596 sc->sk_pmd = IFM_1000_T; 1597 break; 1598 default: 1599 printf("skc%d: unknown media type: 0x%x\n", 1600 sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE)); 1601 error = ENXIO; 1602 goto fail; 1603 } 1604 1605 /* Announce the product name. */ 1606 if (sc->sk_vpd_prodname != NULL) 1607 printf("skc%d: %s\n", sc->sk_unit, sc->sk_vpd_prodname); 1608 sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1); 1609 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); 1610 *port = SK_PORT_A; 1611 device_set_ivars(sc->sk_devs[SK_PORT_A], port); 1612 1613 if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) { 1614 sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1); 1615 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); 1616 *port = SK_PORT_B; 1617 device_set_ivars(sc->sk_devs[SK_PORT_B], port); 1618 } 1619 1620 /* Turn on the 'driver is loaded' LED. */ 1621 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON); 1622 1623 bus_generic_attach(dev); 1624 1625 /* Hook interrupt last to avoid having to lock softc */ 1626 error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET, 1627 sk_intr, sc, &sc->sk_intrhand); 1628 1629 if (error) { 1630 printf("skc%d: couldn't set up irq\n", unit); 1631 goto fail; 1632 } 1633 1634 fail: 1635 if (error) 1636 skc_detach(dev); 1637 1638 return(error); 1639 } 1640 1641 /* 1642 * Shutdown hardware and free up resources. This can be called any 1643 * time after the mutex has been initialized. It is called in both 1644 * the error case in attach and the normal detach case so it needs 1645 * to be careful about only freeing resources that have actually been 1646 * allocated. 1647 */ 1648 static int 1649 sk_detach(dev) 1650 device_t dev; 1651 { 1652 struct sk_if_softc *sc_if; 1653 struct ifnet *ifp; 1654 1655 sc_if = device_get_softc(dev); 1656 KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx), 1657 ("sk mutex not initialized in sk_detach")); 1658 SK_IF_LOCK(sc_if); 1659 1660 ifp = &sc_if->arpcom.ac_if; 1661 /* These should only be active if attach_xmac succeeded */ 1662 if (device_is_attached(dev)) { 1663 sk_stop(sc_if); 1664 ether_ifdetach(ifp); 1665 } 1666 if (sc_if->sk_miibus) 1667 device_delete_child(dev, sc_if->sk_miibus); 1668 bus_generic_detach(dev); 1669 if (sc_if->sk_cdata.sk_jumbo_buf) 1670 contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF); 1671 if (sc_if->sk_rdata) { 1672 contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data), 1673 M_DEVBUF); 1674 } 1675 SK_IF_UNLOCK(sc_if); 1676 1677 return(0); 1678 } 1679 1680 static int 1681 skc_detach(dev) 1682 device_t dev; 1683 { 1684 struct sk_softc *sc; 1685 1686 sc = device_get_softc(dev); 1687 KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized")); 1688 SK_LOCK(sc); 1689 1690 if (device_is_alive(dev)) { 1691 if (sc->sk_devs[SK_PORT_A] != NULL) 1692 device_delete_child(dev, sc->sk_devs[SK_PORT_A]); 1693 if (sc->sk_devs[SK_PORT_B] != NULL) 1694 device_delete_child(dev, sc->sk_devs[SK_PORT_B]); 1695 bus_generic_detach(dev); 1696 } 1697 1698 if (sc->sk_intrhand) 1699 bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand); 1700 if (sc->sk_irq) 1701 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq); 1702 if (sc->sk_res) 1703 bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res); 1704 1705 SK_UNLOCK(sc); 1706 mtx_destroy(&sc->sk_mtx); 1707 1708 return(0); 1709 } 1710 1711 static int 1712 sk_encap(sc_if, m_head, txidx) 1713 struct sk_if_softc *sc_if; 1714 struct mbuf *m_head; 1715 u_int32_t *txidx; 1716 { 1717 struct sk_tx_desc *f = NULL; 1718 struct mbuf *m; 1719 u_int32_t frag, cur, cnt = 0; 1720 1721 m = m_head; 1722 cur = frag = *txidx; 1723 1724 /* 1725 * Start packing the mbufs in this chain into 1726 * the fragment pointers. Stop when we run out 1727 * of fragments or hit the end of the mbuf chain. 1728 */ 1729 for (m = m_head; m != NULL; m = m->m_next) { 1730 if (m->m_len != 0) { 1731 if ((SK_TX_RING_CNT - 1732 (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2) 1733 return(ENOBUFS); 1734 f = &sc_if->sk_rdata->sk_tx_ring[frag]; 1735 f->sk_data_lo = vtophys(mtod(m, vm_offset_t)); 1736 f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT; 1737 if (cnt == 0) 1738 f->sk_ctl |= SK_TXCTL_FIRSTFRAG; 1739 else 1740 f->sk_ctl |= SK_TXCTL_OWN; 1741 cur = frag; 1742 SK_INC(frag, SK_TX_RING_CNT); 1743 cnt++; 1744 } 1745 } 1746 1747 if (m != NULL) 1748 return(ENOBUFS); 1749 1750 sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |= 1751 SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR; 1752 sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head; 1753 sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN; 1754 sc_if->sk_cdata.sk_tx_cnt += cnt; 1755 1756 *txidx = frag; 1757 1758 return(0); 1759 } 1760 1761 static void 1762 sk_start(ifp) 1763 struct ifnet *ifp; 1764 { 1765 struct sk_softc *sc; 1766 struct sk_if_softc *sc_if; 1767 struct mbuf *m_head = NULL; 1768 u_int32_t idx; 1769 1770 sc_if = ifp->if_softc; 1771 sc = sc_if->sk_softc; 1772 1773 SK_IF_LOCK(sc_if); 1774 1775 idx = sc_if->sk_cdata.sk_tx_prod; 1776 1777 while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) { 1778 IF_DEQUEUE(&ifp->if_snd, m_head); 1779 if (m_head == NULL) 1780 break; 1781 1782 /* 1783 * Pack the data into the transmit ring. If we 1784 * don't have room, set the OACTIVE flag and wait 1785 * for the NIC to drain the ring. 1786 */ 1787 if (sk_encap(sc_if, m_head, &idx)) { 1788 IF_PREPEND(&ifp->if_snd, m_head); 1789 ifp->if_flags |= IFF_OACTIVE; 1790 break; 1791 } 1792 1793 /* 1794 * If there's a BPF listener, bounce a copy of this frame 1795 * to him. 1796 */ 1797 BPF_MTAP(ifp, m_head); 1798 } 1799 1800 /* Transmit */ 1801 sc_if->sk_cdata.sk_tx_prod = idx; 1802 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); 1803 1804 /* Set a timeout in case the chip goes out to lunch. */ 1805 ifp->if_timer = 5; 1806 SK_IF_UNLOCK(sc_if); 1807 1808 return; 1809 } 1810 1811 1812 static void 1813 sk_watchdog(ifp) 1814 struct ifnet *ifp; 1815 { 1816 struct sk_if_softc *sc_if; 1817 1818 sc_if = ifp->if_softc; 1819 1820 printf("sk%d: watchdog timeout\n", sc_if->sk_unit); 1821 sk_init(sc_if); 1822 1823 return; 1824 } 1825 1826 static void 1827 skc_shutdown(dev) 1828 device_t dev; 1829 { 1830 struct sk_softc *sc; 1831 1832 sc = device_get_softc(dev); 1833 SK_LOCK(sc); 1834 1835 /* Turn off the 'driver is loaded' LED. */ 1836 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF); 1837 1838 /* 1839 * Reset the GEnesis controller. Doing this should also 1840 * assert the resets on the attached XMAC(s). 1841 */ 1842 sk_reset(sc); 1843 SK_UNLOCK(sc); 1844 1845 return; 1846 } 1847 1848 static void 1849 sk_rxeof(sc_if) 1850 struct sk_if_softc *sc_if; 1851 { 1852 struct sk_softc *sc; 1853 struct mbuf *m; 1854 struct ifnet *ifp; 1855 struct sk_chain *cur_rx; 1856 int total_len = 0; 1857 int i; 1858 u_int32_t rxstat; 1859 1860 sc = sc_if->sk_softc; 1861 ifp = &sc_if->arpcom.ac_if; 1862 i = sc_if->sk_cdata.sk_rx_prod; 1863 cur_rx = &sc_if->sk_cdata.sk_rx_chain[i]; 1864 1865 SK_LOCK_ASSERT(sc); 1866 1867 while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) { 1868 1869 cur_rx = &sc_if->sk_cdata.sk_rx_chain[i]; 1870 rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat; 1871 m = cur_rx->sk_mbuf; 1872 cur_rx->sk_mbuf = NULL; 1873 total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl); 1874 SK_INC(i, SK_RX_RING_CNT); 1875 1876 if (rxstat & XM_RXSTAT_ERRFRAME) { 1877 ifp->if_ierrors++; 1878 sk_newbuf(sc_if, cur_rx, m); 1879 continue; 1880 } 1881 1882 /* 1883 * Try to allocate a new jumbo buffer. If that 1884 * fails, copy the packet to mbufs and put the 1885 * jumbo buffer back in the ring so it can be 1886 * re-used. If allocating mbufs fails, then we 1887 * have to drop the packet. 1888 */ 1889 if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) { 1890 struct mbuf *m0; 1891 m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, 1892 ifp, NULL); 1893 sk_newbuf(sc_if, cur_rx, m); 1894 if (m0 == NULL) { 1895 printf("sk%d: no receive buffers " 1896 "available -- packet dropped!\n", 1897 sc_if->sk_unit); 1898 ifp->if_ierrors++; 1899 continue; 1900 } 1901 m = m0; 1902 } else { 1903 m->m_pkthdr.rcvif = ifp; 1904 m->m_pkthdr.len = m->m_len = total_len; 1905 } 1906 1907 ifp->if_ipackets++; 1908 SK_UNLOCK(sc); 1909 (*ifp->if_input)(ifp, m); 1910 SK_LOCK(sc); 1911 } 1912 1913 sc_if->sk_cdata.sk_rx_prod = i; 1914 1915 return; 1916 } 1917 1918 static void 1919 sk_txeof(sc_if) 1920 struct sk_if_softc *sc_if; 1921 { 1922 struct sk_tx_desc *cur_tx = NULL; 1923 struct ifnet *ifp; 1924 u_int32_t idx; 1925 1926 ifp = &sc_if->arpcom.ac_if; 1927 1928 /* 1929 * Go through our tx ring and free mbufs for those 1930 * frames that have been sent. 1931 */ 1932 idx = sc_if->sk_cdata.sk_tx_cons; 1933 while(idx != sc_if->sk_cdata.sk_tx_prod) { 1934 cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx]; 1935 if (cur_tx->sk_ctl & SK_TXCTL_OWN) 1936 break; 1937 if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG) 1938 ifp->if_opackets++; 1939 if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) { 1940 m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf); 1941 sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL; 1942 } 1943 sc_if->sk_cdata.sk_tx_cnt--; 1944 SK_INC(idx, SK_TX_RING_CNT); 1945 ifp->if_timer = 0; 1946 } 1947 1948 sc_if->sk_cdata.sk_tx_cons = idx; 1949 1950 if (cur_tx != NULL) 1951 ifp->if_flags &= ~IFF_OACTIVE; 1952 1953 return; 1954 } 1955 1956 static void 1957 sk_tick(xsc_if) 1958 void *xsc_if; 1959 { 1960 struct sk_if_softc *sc_if; 1961 struct mii_data *mii; 1962 struct ifnet *ifp; 1963 int i; 1964 1965 sc_if = xsc_if; 1966 SK_IF_LOCK(sc_if); 1967 ifp = &sc_if->arpcom.ac_if; 1968 mii = device_get_softc(sc_if->sk_miibus); 1969 1970 if (!(ifp->if_flags & IFF_UP)) { 1971 SK_IF_UNLOCK(sc_if); 1972 return; 1973 } 1974 1975 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 1976 sk_intr_bcom(sc_if); 1977 SK_IF_UNLOCK(sc_if); 1978 return; 1979 } 1980 1981 /* 1982 * According to SysKonnect, the correct way to verify that 1983 * the link has come back up is to poll bit 0 of the GPIO 1984 * register three times. This pin has the signal from the 1985 * link_sync pin connected to it; if we read the same link 1986 * state 3 times in a row, we know the link is up. 1987 */ 1988 for (i = 0; i < 3; i++) { 1989 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET) 1990 break; 1991 } 1992 1993 if (i != 3) { 1994 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); 1995 SK_IF_UNLOCK(sc_if); 1996 return; 1997 } 1998 1999 /* Turn the GP0 interrupt back on. */ 2000 SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); 2001 SK_XM_READ_2(sc_if, XM_ISR); 2002 mii_tick(mii); 2003 untimeout(sk_tick, sc_if, sc_if->sk_tick_ch); 2004 2005 SK_IF_UNLOCK(sc_if); 2006 return; 2007 } 2008 2009 static void 2010 sk_intr_bcom(sc_if) 2011 struct sk_if_softc *sc_if; 2012 { 2013 struct mii_data *mii; 2014 struct ifnet *ifp; 2015 int status; 2016 mii = device_get_softc(sc_if->sk_miibus); 2017 ifp = &sc_if->arpcom.ac_if; 2018 2019 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 2020 2021 /* 2022 * Read the PHY interrupt register to make sure 2023 * we clear any pending interrupts. 2024 */ 2025 status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR); 2026 2027 if (!(ifp->if_flags & IFF_RUNNING)) { 2028 sk_init_xmac(sc_if); 2029 return; 2030 } 2031 2032 if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) { 2033 int lstat; 2034 lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 2035 BRGPHY_MII_AUXSTS); 2036 2037 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) { 2038 mii_mediachg(mii); 2039 /* Turn off the link LED. */ 2040 SK_IF_WRITE_1(sc_if, 0, 2041 SK_LINKLED1_CTL, SK_LINKLED_OFF); 2042 sc_if->sk_link = 0; 2043 } else if (status & BRGPHY_ISR_LNK_CHG) { 2044 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 2045 BRGPHY_MII_IMR, 0xFF00); 2046 mii_tick(mii); 2047 sc_if->sk_link = 1; 2048 /* Turn on the link LED. */ 2049 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, 2050 SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF| 2051 SK_LINKLED_BLINK_OFF); 2052 } else { 2053 mii_tick(mii); 2054 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); 2055 } 2056 } 2057 2058 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 2059 2060 return; 2061 } 2062 2063 static void 2064 sk_intr_xmac(sc_if) 2065 struct sk_if_softc *sc_if; 2066 { 2067 struct sk_softc *sc; 2068 u_int16_t status; 2069 2070 sc = sc_if->sk_softc; 2071 status = SK_XM_READ_2(sc_if, XM_ISR); 2072 2073 /* 2074 * Link has gone down. Start MII tick timeout to 2075 * watch for link resync. 2076 */ 2077 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) { 2078 if (status & XM_ISR_GP0_SET) { 2079 SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); 2080 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); 2081 } 2082 2083 if (status & XM_ISR_AUTONEG_DONE) { 2084 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); 2085 } 2086 } 2087 2088 if (status & XM_IMR_TX_UNDERRUN) 2089 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO); 2090 2091 if (status & XM_IMR_RX_OVERRUN) 2092 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO); 2093 2094 status = SK_XM_READ_2(sc_if, XM_ISR); 2095 2096 return; 2097 } 2098 2099 static void 2100 sk_intr_yukon(sc_if) 2101 struct sk_if_softc *sc_if; 2102 { 2103 int status; 2104 2105 status = SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); 2106 2107 return; 2108 } 2109 2110 static void 2111 sk_intr(xsc) 2112 void *xsc; 2113 { 2114 struct sk_softc *sc = xsc; 2115 struct sk_if_softc *sc_if0 = NULL, *sc_if1 = NULL; 2116 struct ifnet *ifp0 = NULL, *ifp1 = NULL; 2117 u_int32_t status; 2118 2119 SK_LOCK(sc); 2120 2121 sc_if0 = sc->sk_if[SK_PORT_A]; 2122 sc_if1 = sc->sk_if[SK_PORT_B]; 2123 2124 if (sc_if0 != NULL) 2125 ifp0 = &sc_if0->arpcom.ac_if; 2126 if (sc_if1 != NULL) 2127 ifp1 = &sc_if1->arpcom.ac_if; 2128 2129 for (;;) { 2130 status = CSR_READ_4(sc, SK_ISSR); 2131 if (!(status & sc->sk_intrmask)) 2132 break; 2133 2134 /* Handle receive interrupts first. */ 2135 if (status & SK_ISR_RX1_EOF) { 2136 sk_rxeof(sc_if0); 2137 CSR_WRITE_4(sc, SK_BMU_RX_CSR0, 2138 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); 2139 } 2140 if (status & SK_ISR_RX2_EOF) { 2141 sk_rxeof(sc_if1); 2142 CSR_WRITE_4(sc, SK_BMU_RX_CSR1, 2143 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); 2144 } 2145 2146 /* Then transmit interrupts. */ 2147 if (status & SK_ISR_TX1_S_EOF) { 2148 sk_txeof(sc_if0); 2149 CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, 2150 SK_TXBMU_CLR_IRQ_EOF); 2151 } 2152 if (status & SK_ISR_TX2_S_EOF) { 2153 sk_txeof(sc_if1); 2154 CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, 2155 SK_TXBMU_CLR_IRQ_EOF); 2156 } 2157 2158 /* Then MAC interrupts. */ 2159 if (status & SK_ISR_MAC1 && ifp0->if_flags & IFF_RUNNING) { 2160 if (sc->sk_type == SK_GENESIS) 2161 sk_intr_xmac(sc_if0); 2162 else 2163 sk_intr_yukon(sc_if0); 2164 } 2165 2166 if (status & SK_ISR_MAC2 && ifp1->if_flags & IFF_RUNNING) { 2167 if (sc->sk_type == SK_GENESIS) 2168 sk_intr_xmac(sc_if1); 2169 else 2170 sk_intr_yukon(sc_if1); 2171 } 2172 2173 if (status & SK_ISR_EXTERNAL_REG) { 2174 if (ifp0 != NULL && 2175 sc_if0->sk_phytype == SK_PHYTYPE_BCOM) 2176 sk_intr_bcom(sc_if0); 2177 if (ifp1 != NULL && 2178 sc_if1->sk_phytype == SK_PHYTYPE_BCOM) 2179 sk_intr_bcom(sc_if1); 2180 } 2181 } 2182 2183 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2184 2185 if (ifp0 != NULL && ifp0->if_snd.ifq_head != NULL) 2186 sk_start(ifp0); 2187 if (ifp1 != NULL && ifp1->if_snd.ifq_head != NULL) 2188 sk_start(ifp1); 2189 2190 SK_UNLOCK(sc); 2191 2192 return; 2193 } 2194 2195 static void 2196 sk_init_xmac(sc_if) 2197 struct sk_if_softc *sc_if; 2198 { 2199 struct sk_softc *sc; 2200 struct ifnet *ifp; 2201 struct sk_bcom_hack bhack[] = { 2202 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 }, 2203 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 }, 2204 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, 2205 { 0, 0 } }; 2206 2207 sc = sc_if->sk_softc; 2208 ifp = &sc_if->arpcom.ac_if; 2209 2210 /* Unreset the XMAC. */ 2211 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET); 2212 DELAY(1000); 2213 2214 /* Reset the XMAC's internal state. */ 2215 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); 2216 2217 /* Save the XMAC II revision */ 2218 sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID)); 2219 2220 /* 2221 * Perform additional initialization for external PHYs, 2222 * namely for the 1000baseTX cards that use the XMAC's 2223 * GMII mode. 2224 */ 2225 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 2226 int i = 0; 2227 u_int32_t val; 2228 2229 /* Take PHY out of reset. */ 2230 val = sk_win_read_4(sc, SK_GPIO); 2231 if (sc_if->sk_port == SK_PORT_A) 2232 val |= SK_GPIO_DIR0|SK_GPIO_DAT0; 2233 else 2234 val |= SK_GPIO_DIR2|SK_GPIO_DAT2; 2235 sk_win_write_4(sc, SK_GPIO, val); 2236 2237 /* Enable GMII mode on the XMAC. */ 2238 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE); 2239 2240 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 2241 BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET); 2242 DELAY(10000); 2243 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 2244 BRGPHY_MII_IMR, 0xFFF0); 2245 2246 /* 2247 * Early versions of the BCM5400 apparently have 2248 * a bug that requires them to have their reserved 2249 * registers initialized to some magic values. I don't 2250 * know what the numbers do, I'm just the messenger. 2251 */ 2252 if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03) 2253 == 0x6041) { 2254 while(bhack[i].reg) { 2255 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 2256 bhack[i].reg, bhack[i].val); 2257 i++; 2258 } 2259 } 2260 } 2261 2262 /* Set station address */ 2263 SK_XM_WRITE_2(sc_if, XM_PAR0, 2264 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0])); 2265 SK_XM_WRITE_2(sc_if, XM_PAR1, 2266 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2])); 2267 SK_XM_WRITE_2(sc_if, XM_PAR2, 2268 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4])); 2269 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION); 2270 2271 if (ifp->if_flags & IFF_BROADCAST) { 2272 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); 2273 } else { 2274 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); 2275 } 2276 2277 /* We don't need the FCS appended to the packet. */ 2278 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS); 2279 2280 /* We want short frames padded to 60 bytes. */ 2281 SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD); 2282 2283 /* 2284 * Enable the reception of all error frames. This is is 2285 * a necessary evil due to the design of the XMAC. The 2286 * XMAC's receive FIFO is only 8K in size, however jumbo 2287 * frames can be up to 9000 bytes in length. When bad 2288 * frame filtering is enabled, the XMAC's RX FIFO operates 2289 * in 'store and forward' mode. For this to work, the 2290 * entire frame has to fit into the FIFO, but that means 2291 * that jumbo frames larger than 8192 bytes will be 2292 * truncated. Disabling all bad frame filtering causes 2293 * the RX FIFO to operate in streaming mode, in which 2294 * case the XMAC will start transfering frames out of the 2295 * RX FIFO as soon as the FIFO threshold is reached. 2296 */ 2297 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES| 2298 XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS| 2299 XM_MODE_RX_INRANGELEN); 2300 2301 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 2302 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); 2303 else 2304 SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); 2305 2306 /* 2307 * Bump up the transmit threshold. This helps hold off transmit 2308 * underruns when we're blasting traffic from both ports at once. 2309 */ 2310 SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH); 2311 2312 /* Set promiscuous mode */ 2313 sk_setpromisc(sc_if); 2314 2315 /* Set multicast filter */ 2316 sk_setmulti(sc_if); 2317 2318 /* Clear and enable interrupts */ 2319 SK_XM_READ_2(sc_if, XM_ISR); 2320 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) 2321 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS); 2322 else 2323 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); 2324 2325 /* Configure MAC arbiter */ 2326 switch(sc_if->sk_xmac_rev) { 2327 case XM_XMAC_REV_B2: 2328 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2); 2329 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2); 2330 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2); 2331 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2); 2332 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2); 2333 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2); 2334 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2); 2335 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2); 2336 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); 2337 break; 2338 case XM_XMAC_REV_C1: 2339 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1); 2340 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1); 2341 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1); 2342 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1); 2343 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1); 2344 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1); 2345 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1); 2346 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1); 2347 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); 2348 break; 2349 default: 2350 break; 2351 } 2352 sk_win_write_2(sc, SK_MACARB_CTL, 2353 SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF); 2354 2355 sc_if->sk_link = 1; 2356 2357 return; 2358 } 2359 2360 static void sk_init_yukon(sc_if) 2361 struct sk_if_softc *sc_if; 2362 { 2363 u_int32_t phy; 2364 u_int16_t reg; 2365 int i; 2366 2367 /* GMAC and GPHY Reset */ 2368 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET); 2369 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); 2370 DELAY(1000); 2371 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_CLEAR); 2372 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); 2373 DELAY(1000); 2374 2375 phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP | 2376 SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE; 2377 2378 switch(sc_if->sk_softc->sk_pmd) { 2379 case IFM_1000_SX: 2380 case IFM_1000_LX: 2381 phy |= SK_GPHY_FIBER; 2382 break; 2383 2384 case IFM_1000_CX: 2385 case IFM_1000_T: 2386 phy |= SK_GPHY_COPPER; 2387 break; 2388 } 2389 2390 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET); 2391 DELAY(1000); 2392 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR); 2393 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF | 2394 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR); 2395 2396 /* unused read of the interrupt source register */ 2397 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); 2398 2399 reg = SK_YU_READ_2(sc_if, YUKON_PAR); 2400 2401 /* MIB Counter Clear Mode set */ 2402 reg |= YU_PAR_MIB_CLR; 2403 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 2404 2405 /* MIB Counter Clear Mode clear */ 2406 reg &= ~YU_PAR_MIB_CLR; 2407 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 2408 2409 /* receive control reg */ 2410 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR); 2411 2412 /* transmit parameter register */ 2413 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) | 2414 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) ); 2415 2416 /* serial mode register */ 2417 SK_YU_WRITE_2(sc_if, YUKON_SMR, YU_SMR_DATA_BLIND(0x1c) | 2418 YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e)); 2419 2420 /* Setup Yukon's address */ 2421 for (i = 0; i < 3; i++) { 2422 /* Write Source Address 1 (unicast filter) */ 2423 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4, 2424 sc_if->arpcom.ac_enaddr[i * 2] | 2425 sc_if->arpcom.ac_enaddr[i * 2 + 1] << 8); 2426 } 2427 2428 for (i = 0; i < 3; i++) { 2429 reg = sk_win_read_2(sc_if->sk_softc, 2430 SK_MAC1_0 + i * 2 + sc_if->sk_port * 8); 2431 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg); 2432 } 2433 2434 /* Set promiscuous mode */ 2435 sk_setpromisc(sc_if); 2436 2437 /* Set multicast filter */ 2438 sk_setmulti(sc_if); 2439 2440 /* enable interrupt mask for counter overflows */ 2441 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0); 2442 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0); 2443 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0); 2444 2445 /* Configure RX MAC FIFO */ 2446 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR); 2447 SK_IF_WRITE_4(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON); 2448 2449 /* Configure TX MAC FIFO */ 2450 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR); 2451 SK_IF_WRITE_4(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON); 2452 } 2453 2454 /* 2455 * Note that to properly initialize any part of the GEnesis chip, 2456 * you first have to take it out of reset mode. 2457 */ 2458 static void 2459 sk_init(xsc) 2460 void *xsc; 2461 { 2462 struct sk_if_softc *sc_if = xsc; 2463 struct sk_softc *sc; 2464 struct ifnet *ifp; 2465 struct mii_data *mii; 2466 u_int16_t reg; 2467 2468 SK_IF_LOCK(sc_if); 2469 2470 ifp = &sc_if->arpcom.ac_if; 2471 sc = sc_if->sk_softc; 2472 mii = device_get_softc(sc_if->sk_miibus); 2473 2474 /* Cancel pending I/O and free all RX/TX buffers. */ 2475 sk_stop(sc_if); 2476 2477 if (sc->sk_type == SK_GENESIS) { 2478 /* Configure LINK_SYNC LED */ 2479 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON); 2480 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, 2481 SK_LINKLED_LINKSYNC_ON); 2482 2483 /* Configure RX LED */ 2484 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, 2485 SK_RXLEDCTL_COUNTER_START); 2486 2487 /* Configure TX LED */ 2488 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, 2489 SK_TXLEDCTL_COUNTER_START); 2490 } 2491 2492 /* Configure I2C registers */ 2493 2494 /* Configure XMAC(s) */ 2495 switch (sc->sk_type) { 2496 case SK_GENESIS: 2497 sk_init_xmac(sc_if); 2498 break; 2499 case SK_YUKON: 2500 sk_init_yukon(sc_if); 2501 break; 2502 } 2503 mii_mediachg(mii); 2504 2505 if (sc->sk_type == SK_GENESIS) { 2506 /* Configure MAC FIFOs */ 2507 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET); 2508 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END); 2509 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON); 2510 2511 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET); 2512 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END); 2513 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON); 2514 } 2515 2516 /* Configure transmit arbiter(s) */ 2517 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, 2518 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON); 2519 2520 /* Configure RAMbuffers */ 2521 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET); 2522 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart); 2523 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart); 2524 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart); 2525 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend); 2526 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON); 2527 2528 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET); 2529 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON); 2530 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart); 2531 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart); 2532 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart); 2533 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend); 2534 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON); 2535 2536 /* Configure BMUs */ 2537 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE); 2538 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO, 2539 vtophys(&sc_if->sk_rdata->sk_rx_ring[0])); 2540 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0); 2541 2542 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE); 2543 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO, 2544 vtophys(&sc_if->sk_rdata->sk_tx_ring[0])); 2545 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0); 2546 2547 /* Init descriptors */ 2548 if (sk_init_rx_ring(sc_if) == ENOBUFS) { 2549 printf("sk%d: initialization failed: no " 2550 "memory for rx buffers\n", sc_if->sk_unit); 2551 sk_stop(sc_if); 2552 SK_IF_UNLOCK(sc_if); 2553 return; 2554 } 2555 sk_init_tx_ring(sc_if); 2556 2557 /* Configure interrupt handling */ 2558 CSR_READ_4(sc, SK_ISSR); 2559 if (sc_if->sk_port == SK_PORT_A) 2560 sc->sk_intrmask |= SK_INTRS1; 2561 else 2562 sc->sk_intrmask |= SK_INTRS2; 2563 2564 sc->sk_intrmask |= SK_ISR_EXTERNAL_REG; 2565 2566 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2567 2568 /* Start BMUs. */ 2569 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START); 2570 2571 switch(sc->sk_type) { 2572 case SK_GENESIS: 2573 /* Enable XMACs TX and RX state machines */ 2574 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE); 2575 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 2576 break; 2577 case SK_YUKON: 2578 reg = SK_YU_READ_2(sc_if, YUKON_GPCR); 2579 reg |= YU_GPCR_TXEN | YU_GPCR_RXEN; 2580 reg &= ~(YU_GPCR_SPEED_EN | YU_GPCR_DPLX_EN); 2581 SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg); 2582 } 2583 2584 ifp->if_flags |= IFF_RUNNING; 2585 ifp->if_flags &= ~IFF_OACTIVE; 2586 2587 SK_IF_UNLOCK(sc_if); 2588 2589 return; 2590 } 2591 2592 static void 2593 sk_stop(sc_if) 2594 struct sk_if_softc *sc_if; 2595 { 2596 int i; 2597 struct sk_softc *sc; 2598 struct ifnet *ifp; 2599 2600 SK_IF_LOCK(sc_if); 2601 sc = sc_if->sk_softc; 2602 ifp = &sc_if->arpcom.ac_if; 2603 2604 untimeout(sk_tick, sc_if, sc_if->sk_tick_ch); 2605 2606 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 2607 u_int32_t val; 2608 2609 /* Put PHY back into reset. */ 2610 val = sk_win_read_4(sc, SK_GPIO); 2611 if (sc_if->sk_port == SK_PORT_A) { 2612 val |= SK_GPIO_DIR0; 2613 val &= ~SK_GPIO_DAT0; 2614 } else { 2615 val |= SK_GPIO_DIR2; 2616 val &= ~SK_GPIO_DAT2; 2617 } 2618 sk_win_write_4(sc, SK_GPIO, val); 2619 } 2620 2621 /* Turn off various components of this interface. */ 2622 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); 2623 switch (sc->sk_type) { 2624 case SK_GENESIS: 2625 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET); 2626 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET); 2627 break; 2628 case SK_YUKON: 2629 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET); 2630 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET); 2631 break; 2632 } 2633 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE); 2634 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 2635 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE); 2636 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 2637 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF); 2638 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 2639 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 2640 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF); 2641 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF); 2642 2643 /* Disable interrupts */ 2644 if (sc_if->sk_port == SK_PORT_A) 2645 sc->sk_intrmask &= ~SK_INTRS1; 2646 else 2647 sc->sk_intrmask &= ~SK_INTRS2; 2648 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2649 2650 SK_XM_READ_2(sc_if, XM_ISR); 2651 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); 2652 2653 /* Free RX and TX mbufs still in the queues. */ 2654 for (i = 0; i < SK_RX_RING_CNT; i++) { 2655 if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) { 2656 m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf); 2657 sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL; 2658 } 2659 } 2660 2661 for (i = 0; i < SK_TX_RING_CNT; i++) { 2662 if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) { 2663 m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf); 2664 sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL; 2665 } 2666 } 2667 2668 ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE); 2669 SK_IF_UNLOCK(sc_if); 2670 return; 2671 } 2672