1 /* $OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998, 1999, 2000 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 /*- 35 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 36 * 37 * Permission to use, copy, modify, and distribute this software for any 38 * purpose with or without fee is hereby granted, provided that the above 39 * copyright notice and this permission notice appear in all copies. 40 * 41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 48 */ 49 50 #include <sys/cdefs.h> 51 __FBSDID("$FreeBSD$"); 52 53 /* 54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports 55 * the SK-984x series adapters, both single port and dual port. 56 * References: 57 * The XaQti XMAC II datasheet, 58 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 59 * The SysKonnect GEnesis manual, http://www.syskonnect.com 60 * 61 * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the 62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a 63 * convenience to others until Vitesse corrects this problem: 64 * 65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 66 * 67 * Written by Bill Paul <wpaul@ee.columbia.edu> 68 * Department of Electrical Engineering 69 * Columbia University, New York City 70 */ 71 /* 72 * The SysKonnect gigabit ethernet adapters consist of two main 73 * components: the SysKonnect GEnesis controller chip and the XaQti Corp. 74 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC 75 * components and a PHY while the GEnesis controller provides a PCI 76 * interface with DMA support. Each card may have between 512K and 77 * 2MB of SRAM on board depending on the configuration. 78 * 79 * The SysKonnect GEnesis controller can have either one or two XMAC 80 * chips connected to it, allowing single or dual port NIC configurations. 81 * SysKonnect has the distinction of being the only vendor on the market 82 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs, 83 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the 84 * XMAC registers. This driver takes advantage of these features to allow 85 * both XMACs to operate as independent interfaces. 86 */ 87 88 #include <sys/param.h> 89 #include <sys/systm.h> 90 #include <sys/sockio.h> 91 #include <sys/mbuf.h> 92 #include <sys/malloc.h> 93 #include <sys/kernel.h> 94 #include <sys/module.h> 95 #include <sys/socket.h> 96 #include <sys/queue.h> 97 #include <sys/sysctl.h> 98 99 #include <net/if.h> 100 #include <net/if_arp.h> 101 #include <net/ethernet.h> 102 #include <net/if_dl.h> 103 #include <net/if_media.h> 104 #include <net/if_types.h> 105 106 #include <net/bpf.h> 107 108 #include <vm/vm.h> /* for vtophys */ 109 #include <vm/pmap.h> /* for vtophys */ 110 #include <machine/bus.h> 111 #include <machine/resource.h> 112 #include <sys/bus.h> 113 #include <sys/rman.h> 114 115 #include <dev/mii/mii.h> 116 #include <dev/mii/miivar.h> 117 #include <dev/mii/brgphyreg.h> 118 119 #include <dev/pci/pcireg.h> 120 #include <dev/pci/pcivar.h> 121 122 #if 0 123 #define SK_USEIOSPACE 124 #endif 125 126 #include <pci/if_skreg.h> 127 #include <pci/xmaciireg.h> 128 #include <pci/yukonreg.h> 129 130 MODULE_DEPEND(sk, pci, 1, 1, 1); 131 MODULE_DEPEND(sk, ether, 1, 1, 1); 132 MODULE_DEPEND(sk, miibus, 1, 1, 1); 133 134 /* "device miibus" required. See GENERIC if you get errors here. */ 135 #include "miibus_if.h" 136 137 #ifndef lint 138 static const char rcsid[] = 139 "$FreeBSD$"; 140 #endif 141 142 static struct sk_type sk_devs[] = { 143 { 144 VENDORID_SK, 145 DEVICEID_SK_V1, 146 "SysKonnect Gigabit Ethernet (V1.0)" 147 }, 148 { 149 VENDORID_SK, 150 DEVICEID_SK_V2, 151 "SysKonnect Gigabit Ethernet (V2.0)" 152 }, 153 { 154 VENDORID_MARVELL, 155 DEVICEID_SK_V2, 156 "Marvell Gigabit Ethernet" 157 }, 158 { 159 VENDORID_MARVELL, 160 DEVICEID_BELKIN_5005, 161 "Belkin F5D5005 Gigabit Ethernet" 162 }, 163 { 164 VENDORID_3COM, 165 DEVICEID_3COM_3C940, 166 "3Com 3C940 Gigabit Ethernet" 167 }, 168 { 169 VENDORID_LINKSYS, 170 DEVICEID_LINKSYS_EG1032, 171 "Linksys EG1032 Gigabit Ethernet" 172 }, 173 { 174 VENDORID_DLINK, 175 DEVICEID_DLINK_DGE530T, 176 "D-Link DGE-530T Gigabit Ethernet" 177 }, 178 { 0, 0, NULL } 179 }; 180 181 static int skc_probe(device_t); 182 static int skc_attach(device_t); 183 static int skc_detach(device_t); 184 static void skc_shutdown(device_t); 185 static int sk_detach(device_t); 186 static int sk_probe(device_t); 187 static int sk_attach(device_t); 188 static void sk_tick(void *); 189 static void sk_intr(void *); 190 static void sk_intr_xmac(struct sk_if_softc *); 191 static void sk_intr_bcom(struct sk_if_softc *); 192 static void sk_intr_yukon(struct sk_if_softc *); 193 static void sk_rxeof(struct sk_if_softc *); 194 static void sk_txeof(struct sk_if_softc *); 195 static int sk_encap(struct sk_if_softc *, struct mbuf *, 196 u_int32_t *); 197 static void sk_start(struct ifnet *); 198 static void sk_start_locked(struct ifnet *); 199 static int sk_ioctl(struct ifnet *, u_long, caddr_t); 200 static void sk_init(void *); 201 static void sk_init_locked(struct sk_if_softc *); 202 static void sk_init_xmac(struct sk_if_softc *); 203 static void sk_init_yukon(struct sk_if_softc *); 204 static void sk_stop(struct sk_if_softc *); 205 static void sk_watchdog(struct ifnet *); 206 static int sk_ifmedia_upd(struct ifnet *); 207 static void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *); 208 static void sk_reset(struct sk_softc *); 209 static int sk_newbuf(struct sk_if_softc *, 210 struct sk_chain *, struct mbuf *); 211 static int sk_alloc_jumbo_mem(struct sk_if_softc *); 212 static void sk_free_jumbo_mem(struct sk_if_softc *); 213 static void *sk_jalloc(struct sk_if_softc *); 214 static void sk_jfree(void *, void *); 215 static int sk_init_rx_ring(struct sk_if_softc *); 216 static void sk_init_tx_ring(struct sk_if_softc *); 217 static u_int32_t sk_win_read_4(struct sk_softc *, int); 218 static u_int16_t sk_win_read_2(struct sk_softc *, int); 219 static u_int8_t sk_win_read_1(struct sk_softc *, int); 220 static void sk_win_write_4(struct sk_softc *, int, u_int32_t); 221 static void sk_win_write_2(struct sk_softc *, int, u_int32_t); 222 static void sk_win_write_1(struct sk_softc *, int, u_int32_t); 223 static u_int8_t sk_vpd_readbyte(struct sk_softc *, int); 224 static void sk_vpd_read_res(struct sk_softc *, struct vpd_res *, int); 225 static void sk_vpd_read(struct sk_softc *); 226 227 static int sk_miibus_readreg(device_t, int, int); 228 static int sk_miibus_writereg(device_t, int, int, int); 229 static void sk_miibus_statchg(device_t); 230 231 static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int); 232 static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int, 233 int); 234 static void sk_xmac_miibus_statchg(struct sk_if_softc *); 235 236 static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int); 237 static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int, 238 int); 239 static void sk_marv_miibus_statchg(struct sk_if_softc *); 240 241 static uint32_t sk_xmchash(const uint8_t *); 242 static uint32_t sk_gmchash(const uint8_t *); 243 static void sk_setfilt(struct sk_if_softc *, caddr_t, int); 244 static void sk_setmulti(struct sk_if_softc *); 245 static void sk_setpromisc(struct sk_if_softc *); 246 247 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high); 248 static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS); 249 250 #ifdef SK_USEIOSPACE 251 #define SK_RES SYS_RES_IOPORT 252 #define SK_RID SK_PCI_LOIO 253 #else 254 #define SK_RES SYS_RES_MEMORY 255 #define SK_RID SK_PCI_LOMEM 256 #endif 257 258 /* 259 * Note that we have newbus methods for both the GEnesis controller 260 * itself and the XMAC(s). The XMACs are children of the GEnesis, and 261 * the miibus code is a child of the XMACs. We need to do it this way 262 * so that the miibus drivers can access the PHY registers on the 263 * right PHY. It's not quite what I had in mind, but it's the only 264 * design that achieves the desired effect. 265 */ 266 static device_method_t skc_methods[] = { 267 /* Device interface */ 268 DEVMETHOD(device_probe, skc_probe), 269 DEVMETHOD(device_attach, skc_attach), 270 DEVMETHOD(device_detach, skc_detach), 271 DEVMETHOD(device_shutdown, skc_shutdown), 272 273 /* bus interface */ 274 DEVMETHOD(bus_print_child, bus_generic_print_child), 275 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 276 277 { 0, 0 } 278 }; 279 280 static driver_t skc_driver = { 281 "skc", 282 skc_methods, 283 sizeof(struct sk_softc) 284 }; 285 286 static devclass_t skc_devclass; 287 288 static device_method_t sk_methods[] = { 289 /* Device interface */ 290 DEVMETHOD(device_probe, sk_probe), 291 DEVMETHOD(device_attach, sk_attach), 292 DEVMETHOD(device_detach, sk_detach), 293 DEVMETHOD(device_shutdown, bus_generic_shutdown), 294 295 /* bus interface */ 296 DEVMETHOD(bus_print_child, bus_generic_print_child), 297 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 298 299 /* MII interface */ 300 DEVMETHOD(miibus_readreg, sk_miibus_readreg), 301 DEVMETHOD(miibus_writereg, sk_miibus_writereg), 302 DEVMETHOD(miibus_statchg, sk_miibus_statchg), 303 304 { 0, 0 } 305 }; 306 307 static driver_t sk_driver = { 308 "sk", 309 sk_methods, 310 sizeof(struct sk_if_softc) 311 }; 312 313 static devclass_t sk_devclass; 314 315 DRIVER_MODULE(sk, pci, skc_driver, skc_devclass, 0, 0); 316 DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0); 317 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0); 318 319 #define SK_SETBIT(sc, reg, x) \ 320 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x) 321 322 #define SK_CLRBIT(sc, reg, x) \ 323 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x) 324 325 #define SK_WIN_SETBIT_4(sc, reg, x) \ 326 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x) 327 328 #define SK_WIN_CLRBIT_4(sc, reg, x) \ 329 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x) 330 331 #define SK_WIN_SETBIT_2(sc, reg, x) \ 332 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x) 333 334 #define SK_WIN_CLRBIT_2(sc, reg, x) \ 335 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x) 336 337 static u_int32_t 338 sk_win_read_4(sc, reg) 339 struct sk_softc *sc; 340 int reg; 341 { 342 #ifdef SK_USEIOSPACE 343 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 344 return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg))); 345 #else 346 return(CSR_READ_4(sc, reg)); 347 #endif 348 } 349 350 static u_int16_t 351 sk_win_read_2(sc, reg) 352 struct sk_softc *sc; 353 int reg; 354 { 355 #ifdef SK_USEIOSPACE 356 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 357 return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg))); 358 #else 359 return(CSR_READ_2(sc, reg)); 360 #endif 361 } 362 363 static u_int8_t 364 sk_win_read_1(sc, reg) 365 struct sk_softc *sc; 366 int reg; 367 { 368 #ifdef SK_USEIOSPACE 369 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 370 return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg))); 371 #else 372 return(CSR_READ_1(sc, reg)); 373 #endif 374 } 375 376 static void 377 sk_win_write_4(sc, reg, val) 378 struct sk_softc *sc; 379 int reg; 380 u_int32_t val; 381 { 382 #ifdef SK_USEIOSPACE 383 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 384 CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val); 385 #else 386 CSR_WRITE_4(sc, reg, val); 387 #endif 388 return; 389 } 390 391 static void 392 sk_win_write_2(sc, reg, val) 393 struct sk_softc *sc; 394 int reg; 395 u_int32_t val; 396 { 397 #ifdef SK_USEIOSPACE 398 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 399 CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val); 400 #else 401 CSR_WRITE_2(sc, reg, val); 402 #endif 403 return; 404 } 405 406 static void 407 sk_win_write_1(sc, reg, val) 408 struct sk_softc *sc; 409 int reg; 410 u_int32_t val; 411 { 412 #ifdef SK_USEIOSPACE 413 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 414 CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val); 415 #else 416 CSR_WRITE_1(sc, reg, val); 417 #endif 418 return; 419 } 420 421 /* 422 * The VPD EEPROM contains Vital Product Data, as suggested in 423 * the PCI 2.1 specification. The VPD data is separared into areas 424 * denoted by resource IDs. The SysKonnect VPD contains an ID string 425 * resource (the name of the adapter), a read-only area resource 426 * containing various key/data fields and a read/write area which 427 * can be used to store asset management information or log messages. 428 * We read the ID string and read-only into buffers attached to 429 * the controller softc structure for later use. At the moment, 430 * we only use the ID string during skc_attach(). 431 */ 432 static u_int8_t 433 sk_vpd_readbyte(sc, addr) 434 struct sk_softc *sc; 435 int addr; 436 { 437 int i; 438 439 sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr); 440 for (i = 0; i < SK_TIMEOUT; i++) { 441 DELAY(1); 442 if (sk_win_read_2(sc, 443 SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG) 444 break; 445 } 446 447 if (i == SK_TIMEOUT) 448 return(0); 449 450 return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA))); 451 } 452 453 static void 454 sk_vpd_read_res(sc, res, addr) 455 struct sk_softc *sc; 456 struct vpd_res *res; 457 int addr; 458 { 459 int i; 460 u_int8_t *ptr; 461 462 ptr = (u_int8_t *)res; 463 for (i = 0; i < sizeof(struct vpd_res); i++) 464 ptr[i] = sk_vpd_readbyte(sc, i + addr); 465 466 return; 467 } 468 469 static void 470 sk_vpd_read(sc) 471 struct sk_softc *sc; 472 { 473 int pos = 0, i; 474 struct vpd_res res; 475 476 if (sc->sk_vpd_prodname != NULL) 477 free(sc->sk_vpd_prodname, M_DEVBUF); 478 if (sc->sk_vpd_readonly != NULL) 479 free(sc->sk_vpd_readonly, M_DEVBUF); 480 sc->sk_vpd_prodname = NULL; 481 sc->sk_vpd_readonly = NULL; 482 sc->sk_vpd_readonly_len = 0; 483 484 sk_vpd_read_res(sc, &res, pos); 485 486 /* 487 * Bail out quietly if the eeprom appears to be missing or empty. 488 */ 489 if (res.vr_id == 0xff && res.vr_len == 0xff && res.vr_pad == 0xff) 490 return; 491 492 if (res.vr_id != VPD_RES_ID) { 493 printf("skc%d: bad VPD resource id: expected %x got %x\n", 494 sc->sk_unit, VPD_RES_ID, res.vr_id); 495 return; 496 } 497 498 pos += sizeof(res); 499 sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 500 if (sc->sk_vpd_prodname != NULL) { 501 for (i = 0; i < res.vr_len; i++) 502 sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos); 503 sc->sk_vpd_prodname[i] = '\0'; 504 } 505 pos += res.vr_len; 506 507 sk_vpd_read_res(sc, &res, pos); 508 509 if (res.vr_id != VPD_RES_READ) { 510 printf("skc%d: bad VPD resource id: expected %x got %x\n", 511 sc->sk_unit, VPD_RES_READ, res.vr_id); 512 return; 513 } 514 515 pos += sizeof(res); 516 sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 517 for (i = 0; i < res.vr_len; i++) 518 sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos); 519 sc->sk_vpd_readonly_len = res.vr_len; 520 521 return; 522 } 523 524 static int 525 sk_miibus_readreg(dev, phy, reg) 526 device_t dev; 527 int phy, reg; 528 { 529 struct sk_if_softc *sc_if; 530 531 sc_if = device_get_softc(dev); 532 533 switch(sc_if->sk_softc->sk_type) { 534 case SK_GENESIS: 535 return(sk_xmac_miibus_readreg(sc_if, phy, reg)); 536 case SK_YUKON: 537 case SK_YUKON_LITE: 538 case SK_YUKON_LP: 539 return(sk_marv_miibus_readreg(sc_if, phy, reg)); 540 } 541 542 return(0); 543 } 544 545 static int 546 sk_miibus_writereg(dev, phy, reg, val) 547 device_t dev; 548 int phy, reg, val; 549 { 550 struct sk_if_softc *sc_if; 551 552 sc_if = device_get_softc(dev); 553 554 switch(sc_if->sk_softc->sk_type) { 555 case SK_GENESIS: 556 return(sk_xmac_miibus_writereg(sc_if, phy, reg, val)); 557 case SK_YUKON: 558 case SK_YUKON_LITE: 559 case SK_YUKON_LP: 560 return(sk_marv_miibus_writereg(sc_if, phy, reg, val)); 561 } 562 563 return(0); 564 } 565 566 static void 567 sk_miibus_statchg(dev) 568 device_t dev; 569 { 570 struct sk_if_softc *sc_if; 571 572 sc_if = device_get_softc(dev); 573 574 switch(sc_if->sk_softc->sk_type) { 575 case SK_GENESIS: 576 sk_xmac_miibus_statchg(sc_if); 577 break; 578 case SK_YUKON: 579 case SK_YUKON_LITE: 580 case SK_YUKON_LP: 581 sk_marv_miibus_statchg(sc_if); 582 break; 583 } 584 585 return; 586 } 587 588 static int 589 sk_xmac_miibus_readreg(sc_if, phy, reg) 590 struct sk_if_softc *sc_if; 591 int phy, reg; 592 { 593 int i; 594 595 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0) 596 return(0); 597 598 SK_IF_LOCK(sc_if); 599 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); 600 SK_XM_READ_2(sc_if, XM_PHY_DATA); 601 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { 602 for (i = 0; i < SK_TIMEOUT; i++) { 603 DELAY(1); 604 if (SK_XM_READ_2(sc_if, XM_MMUCMD) & 605 XM_MMUCMD_PHYDATARDY) 606 break; 607 } 608 609 if (i == SK_TIMEOUT) { 610 printf("sk%d: phy failed to come ready\n", 611 sc_if->sk_unit); 612 SK_IF_UNLOCK(sc_if); 613 return(0); 614 } 615 } 616 DELAY(1); 617 i = SK_XM_READ_2(sc_if, XM_PHY_DATA); 618 SK_IF_UNLOCK(sc_if); 619 return(i); 620 } 621 622 static int 623 sk_xmac_miibus_writereg(sc_if, phy, reg, val) 624 struct sk_if_softc *sc_if; 625 int phy, reg, val; 626 { 627 int i; 628 629 SK_IF_LOCK(sc_if); 630 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); 631 for (i = 0; i < SK_TIMEOUT; i++) { 632 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) 633 break; 634 } 635 636 if (i == SK_TIMEOUT) { 637 printf("sk%d: phy failed to come ready\n", sc_if->sk_unit); 638 SK_IF_UNLOCK(sc_if); 639 return(ETIMEDOUT); 640 } 641 642 SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val); 643 for (i = 0; i < SK_TIMEOUT; i++) { 644 DELAY(1); 645 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) 646 break; 647 } 648 SK_IF_UNLOCK(sc_if); 649 if (i == SK_TIMEOUT) 650 printf("sk%d: phy write timed out\n", sc_if->sk_unit); 651 652 return(0); 653 } 654 655 static void 656 sk_xmac_miibus_statchg(sc_if) 657 struct sk_if_softc *sc_if; 658 { 659 struct mii_data *mii; 660 661 mii = device_get_softc(sc_if->sk_miibus); 662 663 SK_IF_LOCK(sc_if); 664 /* 665 * If this is a GMII PHY, manually set the XMAC's 666 * duplex mode accordingly. 667 */ 668 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { 669 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 670 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); 671 } else { 672 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); 673 } 674 } 675 SK_IF_UNLOCK(sc_if); 676 677 return; 678 } 679 680 static int 681 sk_marv_miibus_readreg(sc_if, phy, reg) 682 struct sk_if_softc *sc_if; 683 int phy, reg; 684 { 685 u_int16_t val; 686 int i; 687 688 if (phy != 0 || 689 (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER && 690 sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) { 691 return(0); 692 } 693 694 SK_IF_LOCK(sc_if); 695 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 696 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ); 697 698 for (i = 0; i < SK_TIMEOUT; i++) { 699 DELAY(1); 700 val = SK_YU_READ_2(sc_if, YUKON_SMICR); 701 if (val & YU_SMICR_READ_VALID) 702 break; 703 } 704 705 if (i == SK_TIMEOUT) { 706 printf("sk%d: phy failed to come ready\n", 707 sc_if->sk_unit); 708 SK_IF_UNLOCK(sc_if); 709 return(0); 710 } 711 712 val = SK_YU_READ_2(sc_if, YUKON_SMIDR); 713 SK_IF_UNLOCK(sc_if); 714 715 return(val); 716 } 717 718 static int 719 sk_marv_miibus_writereg(sc_if, phy, reg, val) 720 struct sk_if_softc *sc_if; 721 int phy, reg, val; 722 { 723 int i; 724 725 SK_IF_LOCK(sc_if); 726 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val); 727 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 728 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE); 729 730 for (i = 0; i < SK_TIMEOUT; i++) { 731 DELAY(1); 732 if (SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY) 733 break; 734 } 735 SK_IF_UNLOCK(sc_if); 736 737 return(0); 738 } 739 740 static void 741 sk_marv_miibus_statchg(sc_if) 742 struct sk_if_softc *sc_if; 743 { 744 return; 745 } 746 747 #define HASH_BITS 6 748 749 static u_int32_t 750 sk_xmchash(addr) 751 const uint8_t *addr; 752 { 753 uint32_t crc; 754 755 /* Compute CRC for the address value. */ 756 crc = ether_crc32_le(addr, ETHER_ADDR_LEN); 757 758 return (~crc & ((1 << HASH_BITS) - 1)); 759 } 760 761 /* gmchash is just a big endian crc */ 762 static u_int32_t 763 sk_gmchash(addr) 764 const uint8_t *addr; 765 { 766 uint32_t crc; 767 768 /* Compute CRC for the address value. */ 769 crc = ether_crc32_be(addr, ETHER_ADDR_LEN); 770 771 return (crc & ((1 << HASH_BITS) - 1)); 772 } 773 774 static void 775 sk_setfilt(sc_if, addr, slot) 776 struct sk_if_softc *sc_if; 777 caddr_t addr; 778 int slot; 779 { 780 int base; 781 782 base = XM_RXFILT_ENTRY(slot); 783 784 SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0])); 785 SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2])); 786 SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4])); 787 788 return; 789 } 790 791 static void 792 sk_setmulti(sc_if) 793 struct sk_if_softc *sc_if; 794 { 795 struct sk_softc *sc = sc_if->sk_softc; 796 struct ifnet *ifp = sc_if->sk_ifp; 797 u_int32_t hashes[2] = { 0, 0 }; 798 int h = 0, i; 799 struct ifmultiaddr *ifma; 800 u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 }; 801 802 SK_IF_LOCK_ASSERT(sc_if); 803 804 /* First, zot all the existing filters. */ 805 switch(sc->sk_type) { 806 case SK_GENESIS: 807 for (i = 1; i < XM_RXFILT_MAX; i++) 808 sk_setfilt(sc_if, (caddr_t)&dummy, i); 809 810 SK_XM_WRITE_4(sc_if, XM_MAR0, 0); 811 SK_XM_WRITE_4(sc_if, XM_MAR2, 0); 812 break; 813 case SK_YUKON: 814 case SK_YUKON_LITE: 815 case SK_YUKON_LP: 816 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0); 817 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0); 818 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0); 819 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0); 820 break; 821 } 822 823 /* Now program new ones. */ 824 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 825 hashes[0] = 0xFFFFFFFF; 826 hashes[1] = 0xFFFFFFFF; 827 } else { 828 i = 1; 829 IF_ADDR_LOCK(ifp); 830 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) { 831 if (ifma->ifma_addr->sa_family != AF_LINK) 832 continue; 833 /* 834 * Program the first XM_RXFILT_MAX multicast groups 835 * into the perfect filter. For all others, 836 * use the hash table. 837 */ 838 if (sc->sk_type == SK_GENESIS && i < XM_RXFILT_MAX) { 839 sk_setfilt(sc_if, 840 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i); 841 i++; 842 continue; 843 } 844 845 switch(sc->sk_type) { 846 case SK_GENESIS: 847 h = sk_xmchash( 848 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 849 break; 850 case SK_YUKON: 851 case SK_YUKON_LITE: 852 case SK_YUKON_LP: 853 h = sk_gmchash( 854 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 855 break; 856 } 857 if (h < 32) 858 hashes[0] |= (1 << h); 859 else 860 hashes[1] |= (1 << (h - 32)); 861 } 862 IF_ADDR_UNLOCK(ifp); 863 } 864 865 switch(sc->sk_type) { 866 case SK_GENESIS: 867 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH| 868 XM_MODE_RX_USE_PERFECT); 869 SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]); 870 SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]); 871 break; 872 case SK_YUKON: 873 case SK_YUKON_LITE: 874 case SK_YUKON_LP: 875 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff); 876 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff); 877 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff); 878 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff); 879 break; 880 } 881 882 return; 883 } 884 885 static void 886 sk_setpromisc(sc_if) 887 struct sk_if_softc *sc_if; 888 { 889 struct sk_softc *sc = sc_if->sk_softc; 890 struct ifnet *ifp = sc_if->sk_ifp; 891 892 SK_IF_LOCK_ASSERT(sc_if); 893 894 switch(sc->sk_type) { 895 case SK_GENESIS: 896 if (ifp->if_flags & IFF_PROMISC) { 897 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); 898 } else { 899 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); 900 } 901 break; 902 case SK_YUKON: 903 case SK_YUKON_LITE: 904 case SK_YUKON_LP: 905 if (ifp->if_flags & IFF_PROMISC) { 906 SK_YU_CLRBIT_2(sc_if, YUKON_RCR, 907 YU_RCR_UFLEN | YU_RCR_MUFLEN); 908 } else { 909 SK_YU_SETBIT_2(sc_if, YUKON_RCR, 910 YU_RCR_UFLEN | YU_RCR_MUFLEN); 911 } 912 break; 913 } 914 915 return; 916 } 917 918 static int 919 sk_init_rx_ring(sc_if) 920 struct sk_if_softc *sc_if; 921 { 922 struct sk_chain_data *cd = &sc_if->sk_cdata; 923 struct sk_ring_data *rd = sc_if->sk_rdata; 924 int i; 925 926 bzero((char *)rd->sk_rx_ring, 927 sizeof(struct sk_rx_desc) * SK_RX_RING_CNT); 928 929 for (i = 0; i < SK_RX_RING_CNT; i++) { 930 cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i]; 931 if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS) 932 return(ENOBUFS); 933 if (i == (SK_RX_RING_CNT - 1)) { 934 cd->sk_rx_chain[i].sk_next = 935 &cd->sk_rx_chain[0]; 936 rd->sk_rx_ring[i].sk_next = 937 vtophys(&rd->sk_rx_ring[0]); 938 } else { 939 cd->sk_rx_chain[i].sk_next = 940 &cd->sk_rx_chain[i + 1]; 941 rd->sk_rx_ring[i].sk_next = 942 vtophys(&rd->sk_rx_ring[i + 1]); 943 } 944 } 945 946 sc_if->sk_cdata.sk_rx_prod = 0; 947 sc_if->sk_cdata.sk_rx_cons = 0; 948 949 return(0); 950 } 951 952 static void 953 sk_init_tx_ring(sc_if) 954 struct sk_if_softc *sc_if; 955 { 956 struct sk_chain_data *cd = &sc_if->sk_cdata; 957 struct sk_ring_data *rd = sc_if->sk_rdata; 958 int i; 959 960 bzero((char *)sc_if->sk_rdata->sk_tx_ring, 961 sizeof(struct sk_tx_desc) * SK_TX_RING_CNT); 962 963 for (i = 0; i < SK_TX_RING_CNT; i++) { 964 cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i]; 965 if (i == (SK_TX_RING_CNT - 1)) { 966 cd->sk_tx_chain[i].sk_next = 967 &cd->sk_tx_chain[0]; 968 rd->sk_tx_ring[i].sk_next = 969 vtophys(&rd->sk_tx_ring[0]); 970 } else { 971 cd->sk_tx_chain[i].sk_next = 972 &cd->sk_tx_chain[i + 1]; 973 rd->sk_tx_ring[i].sk_next = 974 vtophys(&rd->sk_tx_ring[i + 1]); 975 } 976 } 977 978 sc_if->sk_cdata.sk_tx_prod = 0; 979 sc_if->sk_cdata.sk_tx_cons = 0; 980 sc_if->sk_cdata.sk_tx_cnt = 0; 981 982 return; 983 } 984 985 static int 986 sk_newbuf(sc_if, c, m) 987 struct sk_if_softc *sc_if; 988 struct sk_chain *c; 989 struct mbuf *m; 990 { 991 struct mbuf *m_new = NULL; 992 struct sk_rx_desc *r; 993 994 if (m == NULL) { 995 caddr_t *buf = NULL; 996 997 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 998 if (m_new == NULL) 999 return(ENOBUFS); 1000 1001 /* Allocate the jumbo buffer */ 1002 buf = sk_jalloc(sc_if); 1003 if (buf == NULL) { 1004 m_freem(m_new); 1005 #ifdef SK_VERBOSE 1006 printf("sk%d: jumbo allocation failed " 1007 "-- packet dropped!\n", sc_if->sk_unit); 1008 #endif 1009 return(ENOBUFS); 1010 } 1011 1012 /* Attach the buffer to the mbuf */ 1013 MEXTADD(m_new, buf, SK_JLEN, sk_jfree, 1014 (struct sk_if_softc *)sc_if, 0, EXT_NET_DRV); 1015 m_new->m_data = (void *)buf; 1016 m_new->m_pkthdr.len = m_new->m_len = SK_JLEN; 1017 } else { 1018 /* 1019 * We're re-using a previously allocated mbuf; 1020 * be sure to re-init pointers and lengths to 1021 * default values. 1022 */ 1023 m_new = m; 1024 m_new->m_len = m_new->m_pkthdr.len = SK_JLEN; 1025 m_new->m_data = m_new->m_ext.ext_buf; 1026 } 1027 1028 /* 1029 * Adjust alignment so packet payload begins on a 1030 * longword boundary. Mandatory for Alpha, useful on 1031 * x86 too. 1032 */ 1033 m_adj(m_new, ETHER_ALIGN); 1034 1035 r = c->sk_desc; 1036 c->sk_mbuf = m_new; 1037 r->sk_data_lo = vtophys(mtod(m_new, caddr_t)); 1038 r->sk_ctl = m_new->m_len | SK_RXSTAT; 1039 1040 return(0); 1041 } 1042 1043 /* 1044 * Allocate jumbo buffer storage. The SysKonnect adapters support 1045 * "jumbograms" (9K frames), although SysKonnect doesn't currently 1046 * use them in their drivers. In order for us to use them, we need 1047 * large 9K receive buffers, however standard mbuf clusters are only 1048 * 2048 bytes in size. Consequently, we need to allocate and manage 1049 * our own jumbo buffer pool. Fortunately, this does not require an 1050 * excessive amount of additional code. 1051 */ 1052 static int 1053 sk_alloc_jumbo_mem(sc_if) 1054 struct sk_if_softc *sc_if; 1055 { 1056 caddr_t ptr; 1057 register int i; 1058 struct sk_jpool_entry *entry; 1059 1060 /* Grab a big chunk o' storage. */ 1061 sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF, 1062 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 1063 1064 if (sc_if->sk_cdata.sk_jumbo_buf == NULL) { 1065 printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit); 1066 return(ENOBUFS); 1067 } 1068 1069 mtx_init(&sc_if->sk_jlist_mtx, "sk_jlist_mtx", NULL, MTX_DEF); 1070 1071 SLIST_INIT(&sc_if->sk_jfree_listhead); 1072 SLIST_INIT(&sc_if->sk_jinuse_listhead); 1073 1074 /* 1075 * Now divide it up into 9K pieces and save the addresses 1076 * in an array. 1077 */ 1078 ptr = sc_if->sk_cdata.sk_jumbo_buf; 1079 for (i = 0; i < SK_JSLOTS; i++) { 1080 sc_if->sk_cdata.sk_jslots[i] = ptr; 1081 ptr += SK_JLEN; 1082 entry = malloc(sizeof(struct sk_jpool_entry), 1083 M_DEVBUF, M_NOWAIT); 1084 if (entry == NULL) { 1085 sk_free_jumbo_mem(sc_if); 1086 sc_if->sk_cdata.sk_jumbo_buf = NULL; 1087 printf("sk%d: no memory for jumbo " 1088 "buffer queue!\n", sc_if->sk_unit); 1089 return(ENOBUFS); 1090 } 1091 entry->slot = i; 1092 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, 1093 entry, jpool_entries); 1094 } 1095 1096 return(0); 1097 } 1098 1099 static void 1100 sk_free_jumbo_mem(sc_if) 1101 struct sk_if_softc *sc_if; 1102 { 1103 struct sk_jpool_entry *entry; 1104 1105 SK_JLIST_LOCK(sc_if); 1106 1107 /* We cannot release external mbuf storage while in use. */ 1108 if (!SLIST_EMPTY(&sc_if->sk_jinuse_listhead)) { 1109 printf("sk%d: will leak jumbo buffer memory!\n", sc_if->sk_unit); 1110 SK_JLIST_UNLOCK(sc_if); 1111 return; 1112 } 1113 1114 while (!SLIST_EMPTY(&sc_if->sk_jfree_listhead)) { 1115 entry = SLIST_FIRST(&sc_if->sk_jfree_listhead); 1116 SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries); 1117 free(entry, M_DEVBUF); 1118 } 1119 1120 SK_JLIST_UNLOCK(sc_if); 1121 1122 mtx_destroy(&sc_if->sk_jlist_mtx); 1123 1124 contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF); 1125 1126 return; 1127 } 1128 1129 /* 1130 * Allocate a jumbo buffer. 1131 */ 1132 static void * 1133 sk_jalloc(sc_if) 1134 struct sk_if_softc *sc_if; 1135 { 1136 struct sk_jpool_entry *entry; 1137 1138 SK_JLIST_LOCK(sc_if); 1139 1140 entry = SLIST_FIRST(&sc_if->sk_jfree_listhead); 1141 1142 if (entry == NULL) { 1143 #ifdef SK_VERBOSE 1144 printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit); 1145 #endif 1146 SK_JLIST_UNLOCK(sc_if); 1147 return(NULL); 1148 } 1149 1150 SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries); 1151 SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries); 1152 1153 SK_JLIST_UNLOCK(sc_if); 1154 1155 return(sc_if->sk_cdata.sk_jslots[entry->slot]); 1156 } 1157 1158 /* 1159 * Release a jumbo buffer. 1160 */ 1161 static void 1162 sk_jfree(buf, args) 1163 void *buf; 1164 void *args; 1165 { 1166 struct sk_if_softc *sc_if; 1167 int i; 1168 struct sk_jpool_entry *entry; 1169 1170 /* Extract the softc struct pointer. */ 1171 sc_if = (struct sk_if_softc *)args; 1172 if (sc_if == NULL) 1173 panic("sk_jfree: didn't get softc pointer!"); 1174 1175 SK_JLIST_LOCK(sc_if); 1176 1177 /* calculate the slot this buffer belongs to */ 1178 i = ((vm_offset_t)buf 1179 - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN; 1180 1181 if ((i < 0) || (i >= SK_JSLOTS)) 1182 panic("sk_jfree: asked to free buffer that we don't manage!"); 1183 1184 entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead); 1185 if (entry == NULL) 1186 panic("sk_jfree: buffer not in use!"); 1187 entry->slot = i; 1188 SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries); 1189 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries); 1190 if (SLIST_EMPTY(&sc_if->sk_jinuse_listhead)) 1191 wakeup(sc_if); 1192 1193 SK_JLIST_UNLOCK(sc_if); 1194 return; 1195 } 1196 1197 /* 1198 * Set media options. 1199 */ 1200 static int 1201 sk_ifmedia_upd(ifp) 1202 struct ifnet *ifp; 1203 { 1204 struct sk_if_softc *sc_if = ifp->if_softc; 1205 struct mii_data *mii; 1206 1207 mii = device_get_softc(sc_if->sk_miibus); 1208 sk_init(sc_if); 1209 mii_mediachg(mii); 1210 1211 return(0); 1212 } 1213 1214 /* 1215 * Report current media status. 1216 */ 1217 static void 1218 sk_ifmedia_sts(ifp, ifmr) 1219 struct ifnet *ifp; 1220 struct ifmediareq *ifmr; 1221 { 1222 struct sk_if_softc *sc_if; 1223 struct mii_data *mii; 1224 1225 sc_if = ifp->if_softc; 1226 mii = device_get_softc(sc_if->sk_miibus); 1227 1228 mii_pollstat(mii); 1229 ifmr->ifm_active = mii->mii_media_active; 1230 ifmr->ifm_status = mii->mii_media_status; 1231 1232 return; 1233 } 1234 1235 static int 1236 sk_ioctl(ifp, command, data) 1237 struct ifnet *ifp; 1238 u_long command; 1239 caddr_t data; 1240 { 1241 struct sk_if_softc *sc_if = ifp->if_softc; 1242 struct ifreq *ifr = (struct ifreq *) data; 1243 int error = 0; 1244 struct mii_data *mii; 1245 1246 switch(command) { 1247 case SIOCSIFMTU: 1248 SK_IF_LOCK(sc_if); 1249 if (ifr->ifr_mtu > SK_JUMBO_MTU) 1250 error = EINVAL; 1251 else { 1252 ifp->if_mtu = ifr->ifr_mtu; 1253 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1254 sk_init_locked(sc_if); 1255 } 1256 SK_IF_UNLOCK(sc_if); 1257 break; 1258 case SIOCSIFFLAGS: 1259 SK_IF_LOCK(sc_if); 1260 if (ifp->if_flags & IFF_UP) { 1261 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1262 if ((ifp->if_flags ^ sc_if->sk_if_flags) 1263 & IFF_PROMISC) { 1264 sk_setpromisc(sc_if); 1265 sk_setmulti(sc_if); 1266 } 1267 } else 1268 sk_init_locked(sc_if); 1269 } else { 1270 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1271 sk_stop(sc_if); 1272 } 1273 sc_if->sk_if_flags = ifp->if_flags; 1274 SK_IF_UNLOCK(sc_if); 1275 error = 0; 1276 break; 1277 case SIOCADDMULTI: 1278 case SIOCDELMULTI: 1279 SK_IF_LOCK(sc_if); 1280 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1281 sk_setmulti(sc_if); 1282 error = 0; 1283 } 1284 SK_IF_UNLOCK(sc_if); 1285 break; 1286 case SIOCGIFMEDIA: 1287 case SIOCSIFMEDIA: 1288 mii = device_get_softc(sc_if->sk_miibus); 1289 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1290 break; 1291 default: 1292 error = ether_ioctl(ifp, command, data); 1293 break; 1294 } 1295 1296 return(error); 1297 } 1298 1299 /* 1300 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device 1301 * IDs against our list and return a device name if we find a match. 1302 */ 1303 static int 1304 skc_probe(dev) 1305 device_t dev; 1306 { 1307 struct sk_type *t = sk_devs; 1308 1309 while(t->sk_name != NULL) { 1310 if ((pci_get_vendor(dev) == t->sk_vid) && 1311 (pci_get_device(dev) == t->sk_did)) { 1312 /* 1313 * Only attach to rev. 2 of the Linksys EG1032 adapter. 1314 * Rev. 3 is supported by re(4). 1315 */ 1316 if ((t->sk_vid == VENDORID_LINKSYS) && 1317 (t->sk_did == DEVICEID_LINKSYS_EG1032) && 1318 (pci_get_subdevice(dev) != 1319 SUBDEVICEID_LINKSYS_EG1032_REV2)) { 1320 t++; 1321 continue; 1322 } 1323 device_set_desc(dev, t->sk_name); 1324 return (BUS_PROBE_DEFAULT); 1325 } 1326 t++; 1327 } 1328 1329 return(ENXIO); 1330 } 1331 1332 /* 1333 * Force the GEnesis into reset, then bring it out of reset. 1334 */ 1335 static void 1336 sk_reset(sc) 1337 struct sk_softc *sc; 1338 { 1339 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET); 1340 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET); 1341 if (SK_YUKON_FAMILY(sc->sk_type)) 1342 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET); 1343 1344 DELAY(1000); 1345 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET); 1346 DELAY(2); 1347 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET); 1348 if (SK_YUKON_FAMILY(sc->sk_type)) 1349 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR); 1350 1351 if (sc->sk_type == SK_GENESIS) { 1352 /* Configure packet arbiter */ 1353 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET); 1354 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT); 1355 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT); 1356 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT); 1357 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT); 1358 } 1359 1360 /* Enable RAM interface */ 1361 sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET); 1362 1363 /* 1364 * Configure interrupt moderation. The moderation timer 1365 * defers interrupts specified in the interrupt moderation 1366 * timer mask based on the timeout specified in the interrupt 1367 * moderation timer init register. Each bit in the timer 1368 * register represents one tick, so to specify a timeout in 1369 * microseconds, we have to multiply by the correct number of 1370 * ticks-per-microsecond. 1371 */ 1372 switch (sc->sk_type) { 1373 case SK_GENESIS: 1374 sc->sk_int_ticks = SK_IMTIMER_TICKS_GENESIS; 1375 break; 1376 default: 1377 sc->sk_int_ticks = SK_IMTIMER_TICKS_YUKON; 1378 break; 1379 } 1380 if (bootverbose) 1381 printf("skc%d: interrupt moderation is %d us\n", 1382 sc->sk_unit, sc->sk_int_mod); 1383 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod, 1384 sc->sk_int_ticks)); 1385 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF| 1386 SK_ISR_RX1_EOF|SK_ISR_RX2_EOF); 1387 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START); 1388 1389 return; 1390 } 1391 1392 static int 1393 sk_probe(dev) 1394 device_t dev; 1395 { 1396 struct sk_softc *sc; 1397 1398 sc = device_get_softc(device_get_parent(dev)); 1399 1400 /* 1401 * Not much to do here. We always know there will be 1402 * at least one XMAC present, and if there are two, 1403 * skc_attach() will create a second device instance 1404 * for us. 1405 */ 1406 switch (sc->sk_type) { 1407 case SK_GENESIS: 1408 device_set_desc(dev, "XaQti Corp. XMAC II"); 1409 break; 1410 case SK_YUKON: 1411 case SK_YUKON_LITE: 1412 case SK_YUKON_LP: 1413 device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon"); 1414 break; 1415 } 1416 1417 return (BUS_PROBE_DEFAULT); 1418 } 1419 1420 /* 1421 * Each XMAC chip is attached as a separate logical IP interface. 1422 * Single port cards will have only one logical interface of course. 1423 */ 1424 static int 1425 sk_attach(dev) 1426 device_t dev; 1427 { 1428 struct sk_softc *sc; 1429 struct sk_if_softc *sc_if; 1430 struct ifnet *ifp; 1431 int i, port, error; 1432 u_char eaddr[6]; 1433 1434 if (dev == NULL) 1435 return(EINVAL); 1436 1437 error = 0; 1438 sc_if = device_get_softc(dev); 1439 sc = device_get_softc(device_get_parent(dev)); 1440 port = *(int *)device_get_ivars(dev); 1441 1442 sc_if->sk_dev = dev; 1443 sc_if->sk_unit = device_get_unit(dev); 1444 sc_if->sk_port = port; 1445 sc_if->sk_softc = sc; 1446 sc->sk_if[port] = sc_if; 1447 if (port == SK_PORT_A) 1448 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0; 1449 if (port == SK_PORT_B) 1450 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1; 1451 1452 /* Allocate the descriptor queues. */ 1453 sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF, 1454 M_NOWAIT, M_ZERO, 0xffffffff, PAGE_SIZE, 0); 1455 1456 if (sc_if->sk_rdata == NULL) { 1457 printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit); 1458 error = ENOMEM; 1459 goto fail; 1460 } 1461 1462 /* Try to allocate memory for jumbo buffers. */ 1463 if (sk_alloc_jumbo_mem(sc_if)) { 1464 printf("sk%d: jumbo buffer allocation failed\n", 1465 sc_if->sk_unit); 1466 error = ENOMEM; 1467 goto fail; 1468 } 1469 1470 ifp = sc_if->sk_ifp = if_alloc(IFT_ETHER); 1471 if (ifp == NULL) { 1472 printf("sk%d: can not if_alloc()\n", sc_if->sk_unit); 1473 error = ENOSPC; 1474 goto fail; 1475 } 1476 ifp->if_softc = sc_if; 1477 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1478 ifp->if_mtu = ETHERMTU; 1479 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1480 /* 1481 * The hardware should be ready for VLAN_MTU by default: 1482 * XMAC II has 0x8100 in VLAN Tag Level 1 register initially; 1483 * YU_SMR_MFL_VLAN is set by this driver in Yukon. 1484 */ 1485 ifp->if_capabilities = ifp->if_capenable = IFCAP_VLAN_MTU; 1486 ifp->if_ioctl = sk_ioctl; 1487 ifp->if_start = sk_start; 1488 ifp->if_watchdog = sk_watchdog; 1489 ifp->if_init = sk_init; 1490 IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1); 1491 ifp->if_snd.ifq_drv_maxlen = SK_TX_RING_CNT - 1; 1492 IFQ_SET_READY(&ifp->if_snd); 1493 1494 callout_handle_init(&sc_if->sk_tick_ch); 1495 1496 /* 1497 * Get station address for this interface. Note that 1498 * dual port cards actually come with three station 1499 * addresses: one for each port, plus an extra. The 1500 * extra one is used by the SysKonnect driver software 1501 * as a 'virtual' station address for when both ports 1502 * are operating in failover mode. Currently we don't 1503 * use this extra address. 1504 */ 1505 SK_LOCK(sc); 1506 for (i = 0; i < ETHER_ADDR_LEN; i++) 1507 eaddr[i] = 1508 sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i); 1509 1510 /* 1511 * Set up RAM buffer addresses. The NIC will have a certain 1512 * amount of SRAM on it, somewhere between 512K and 2MB. We 1513 * need to divide this up a) between the transmitter and 1514 * receiver and b) between the two XMACs, if this is a 1515 * dual port NIC. Our algotithm is to divide up the memory 1516 * evenly so that everyone gets a fair share. 1517 */ 1518 if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) { 1519 u_int32_t chunk, val; 1520 1521 chunk = sc->sk_ramsize / 2; 1522 val = sc->sk_rboff / sizeof(u_int64_t); 1523 sc_if->sk_rx_ramstart = val; 1524 val += (chunk / sizeof(u_int64_t)); 1525 sc_if->sk_rx_ramend = val - 1; 1526 sc_if->sk_tx_ramstart = val; 1527 val += (chunk / sizeof(u_int64_t)); 1528 sc_if->sk_tx_ramend = val - 1; 1529 } else { 1530 u_int32_t chunk, val; 1531 1532 chunk = sc->sk_ramsize / 4; 1533 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) / 1534 sizeof(u_int64_t); 1535 sc_if->sk_rx_ramstart = val; 1536 val += (chunk / sizeof(u_int64_t)); 1537 sc_if->sk_rx_ramend = val - 1; 1538 sc_if->sk_tx_ramstart = val; 1539 val += (chunk / sizeof(u_int64_t)); 1540 sc_if->sk_tx_ramend = val - 1; 1541 } 1542 1543 /* Read and save PHY type and set PHY address */ 1544 sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF; 1545 switch(sc_if->sk_phytype) { 1546 case SK_PHYTYPE_XMAC: 1547 sc_if->sk_phyaddr = SK_PHYADDR_XMAC; 1548 break; 1549 case SK_PHYTYPE_BCOM: 1550 sc_if->sk_phyaddr = SK_PHYADDR_BCOM; 1551 break; 1552 case SK_PHYTYPE_MARV_COPPER: 1553 sc_if->sk_phyaddr = SK_PHYADDR_MARV; 1554 break; 1555 default: 1556 printf("skc%d: unsupported PHY type: %d\n", 1557 sc->sk_unit, sc_if->sk_phytype); 1558 error = ENODEV; 1559 SK_UNLOCK(sc); 1560 goto fail; 1561 } 1562 1563 1564 /* 1565 * Call MI attach routine. Can't hold locks when calling into ether_*. 1566 */ 1567 SK_UNLOCK(sc); 1568 ether_ifattach(ifp, eaddr); 1569 SK_LOCK(sc); 1570 1571 /* 1572 * Do miibus setup. 1573 */ 1574 switch (sc->sk_type) { 1575 case SK_GENESIS: 1576 sk_init_xmac(sc_if); 1577 break; 1578 case SK_YUKON: 1579 case SK_YUKON_LITE: 1580 case SK_YUKON_LP: 1581 sk_init_yukon(sc_if); 1582 break; 1583 } 1584 1585 SK_UNLOCK(sc); 1586 if (mii_phy_probe(dev, &sc_if->sk_miibus, 1587 sk_ifmedia_upd, sk_ifmedia_sts)) { 1588 printf("skc%d: no PHY found!\n", sc_if->sk_unit); 1589 ether_ifdetach(ifp); 1590 error = ENXIO; 1591 goto fail; 1592 } 1593 1594 fail: 1595 if (error) { 1596 /* Access should be ok even though lock has been dropped */ 1597 sc->sk_if[port] = NULL; 1598 sk_detach(dev); 1599 } 1600 1601 return(error); 1602 } 1603 1604 /* 1605 * Attach the interface. Allocate softc structures, do ifmedia 1606 * setup and ethernet/BPF attach. 1607 */ 1608 static int 1609 skc_attach(dev) 1610 device_t dev; 1611 { 1612 struct sk_softc *sc; 1613 int unit, error = 0, rid, *port; 1614 uint8_t skrs; 1615 char *pname, *revstr; 1616 1617 sc = device_get_softc(dev); 1618 unit = device_get_unit(dev); 1619 1620 mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1621 MTX_DEF | MTX_RECURSE); 1622 /* 1623 * Map control/status registers. 1624 */ 1625 pci_enable_busmaster(dev); 1626 1627 rid = SK_RID; 1628 sc->sk_res = bus_alloc_resource_any(dev, SK_RES, &rid, RF_ACTIVE); 1629 1630 if (sc->sk_res == NULL) { 1631 printf("sk%d: couldn't map ports/memory\n", unit); 1632 error = ENXIO; 1633 goto fail; 1634 } 1635 1636 sc->sk_btag = rman_get_bustag(sc->sk_res); 1637 sc->sk_bhandle = rman_get_bushandle(sc->sk_res); 1638 1639 sc->sk_type = sk_win_read_1(sc, SK_CHIPVER); 1640 sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4) & 0xf; 1641 1642 /* Bail out if chip is not recognized. */ 1643 if (sc->sk_type != SK_GENESIS && !SK_YUKON_FAMILY(sc->sk_type)) { 1644 printf("skc%d: unknown device: chipver=%02x, rev=%x\n", 1645 unit, sc->sk_type, sc->sk_rev); 1646 error = ENXIO; 1647 goto fail; 1648 } 1649 1650 /* Allocate interrupt */ 1651 rid = 0; 1652 sc->sk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1653 RF_SHAREABLE | RF_ACTIVE); 1654 1655 if (sc->sk_irq == NULL) { 1656 printf("skc%d: couldn't map interrupt\n", unit); 1657 error = ENXIO; 1658 goto fail; 1659 } 1660 1661 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 1662 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1663 OID_AUTO, "int_mod", CTLTYPE_INT|CTLFLAG_RW, 1664 &sc->sk_int_mod, 0, sysctl_hw_sk_int_mod, "I", 1665 "SK interrupt moderation"); 1666 1667 /* Pull in device tunables. */ 1668 sc->sk_int_mod = SK_IM_DEFAULT; 1669 error = resource_int_value(device_get_name(dev), unit, 1670 "int_mod", &sc->sk_int_mod); 1671 if (error == 0) { 1672 if (sc->sk_int_mod < SK_IM_MIN || 1673 sc->sk_int_mod > SK_IM_MAX) { 1674 printf("skc%d: int_mod value out of range; " 1675 "using default: %d\n", unit, SK_IM_DEFAULT); 1676 sc->sk_int_mod = SK_IM_DEFAULT; 1677 } 1678 } 1679 1680 /* Reset the adapter. */ 1681 sk_reset(sc); 1682 1683 sc->sk_unit = unit; 1684 1685 /* Read and save vital product data from EEPROM. */ 1686 sk_vpd_read(sc); 1687 1688 skrs = sk_win_read_1(sc, SK_EPROM0); 1689 if (sc->sk_type == SK_GENESIS) { 1690 /* Read and save RAM size and RAMbuffer offset */ 1691 switch(skrs) { 1692 case SK_RAMSIZE_512K_64: 1693 sc->sk_ramsize = 0x80000; 1694 sc->sk_rboff = SK_RBOFF_0; 1695 break; 1696 case SK_RAMSIZE_1024K_64: 1697 sc->sk_ramsize = 0x100000; 1698 sc->sk_rboff = SK_RBOFF_80000; 1699 break; 1700 case SK_RAMSIZE_1024K_128: 1701 sc->sk_ramsize = 0x100000; 1702 sc->sk_rboff = SK_RBOFF_0; 1703 break; 1704 case SK_RAMSIZE_2048K_128: 1705 sc->sk_ramsize = 0x200000; 1706 sc->sk_rboff = SK_RBOFF_0; 1707 break; 1708 default: 1709 printf("skc%d: unknown ram size: %d\n", 1710 sc->sk_unit, skrs); 1711 error = ENXIO; 1712 goto fail; 1713 } 1714 } else { /* SK_YUKON_FAMILY */ 1715 if (skrs == 0x00) 1716 sc->sk_ramsize = 0x20000; 1717 else 1718 sc->sk_ramsize = skrs * (1<<12); 1719 sc->sk_rboff = SK_RBOFF_0; 1720 } 1721 1722 /* Read and save physical media type */ 1723 switch(sk_win_read_1(sc, SK_PMDTYPE)) { 1724 case SK_PMD_1000BASESX: 1725 sc->sk_pmd = IFM_1000_SX; 1726 break; 1727 case SK_PMD_1000BASELX: 1728 sc->sk_pmd = IFM_1000_LX; 1729 break; 1730 case SK_PMD_1000BASECX: 1731 sc->sk_pmd = IFM_1000_CX; 1732 break; 1733 case SK_PMD_1000BASETX: 1734 sc->sk_pmd = IFM_1000_T; 1735 break; 1736 default: 1737 printf("skc%d: unknown media type: 0x%x\n", 1738 sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE)); 1739 error = ENXIO; 1740 goto fail; 1741 } 1742 1743 /* Determine whether to name it with VPD PN or just make it up. 1744 * Marvell Yukon VPD PN seems to freqently be bogus. */ 1745 switch (pci_get_device(dev)) { 1746 case DEVICEID_SK_V1: 1747 case DEVICEID_BELKIN_5005: 1748 case DEVICEID_3COM_3C940: 1749 case DEVICEID_LINKSYS_EG1032: 1750 case DEVICEID_DLINK_DGE530T: 1751 /* Stay with VPD PN. */ 1752 pname = sc->sk_vpd_prodname; 1753 break; 1754 case DEVICEID_SK_V2: 1755 /* YUKON VPD PN might bear no resemblance to reality. */ 1756 switch (sc->sk_type) { 1757 case SK_GENESIS: 1758 /* Stay with VPD PN. */ 1759 pname = sc->sk_vpd_prodname; 1760 break; 1761 case SK_YUKON: 1762 pname = "Marvell Yukon Gigabit Ethernet"; 1763 break; 1764 case SK_YUKON_LITE: 1765 pname = "Marvell Yukon Lite Gigabit Ethernet"; 1766 break; 1767 case SK_YUKON_LP: 1768 pname = "Marvell Yukon LP Gigabit Ethernet"; 1769 break; 1770 default: 1771 pname = "Marvell Yukon (Unknown) Gigabit Ethernet"; 1772 break; 1773 } 1774 1775 /* Yukon Lite Rev. A0 needs special test. */ 1776 if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) { 1777 u_int32_t far; 1778 u_int8_t testbyte; 1779 1780 /* Save flash address register before testing. */ 1781 far = sk_win_read_4(sc, SK_EP_ADDR); 1782 1783 sk_win_write_1(sc, SK_EP_ADDR+0x03, 0xff); 1784 testbyte = sk_win_read_1(sc, SK_EP_ADDR+0x03); 1785 1786 if (testbyte != 0x00) { 1787 /* Yukon Lite Rev. A0 detected. */ 1788 sc->sk_type = SK_YUKON_LITE; 1789 sc->sk_rev = SK_YUKON_LITE_REV_A0; 1790 /* Restore flash address register. */ 1791 sk_win_write_4(sc, SK_EP_ADDR, far); 1792 } 1793 } 1794 break; 1795 default: 1796 device_printf(dev, "unknown device: vendor=%04x, device=%04x, " 1797 "chipver=%02x, rev=%x\n", 1798 pci_get_vendor(dev), pci_get_device(dev), 1799 sc->sk_type, sc->sk_rev); 1800 error = ENXIO; 1801 goto fail; 1802 } 1803 1804 if (sc->sk_type == SK_YUKON_LITE) { 1805 switch (sc->sk_rev) { 1806 case SK_YUKON_LITE_REV_A0: 1807 revstr = "A0"; 1808 break; 1809 case SK_YUKON_LITE_REV_A1: 1810 revstr = "A1"; 1811 break; 1812 case SK_YUKON_LITE_REV_A3: 1813 revstr = "A3"; 1814 break; 1815 default: 1816 revstr = ""; 1817 break; 1818 } 1819 } else { 1820 revstr = ""; 1821 } 1822 1823 /* Announce the product name and more VPD data if there. */ 1824 device_printf(dev, "%s rev. %s(0x%x)\n", 1825 pname != NULL ? pname : "<unknown>", revstr, sc->sk_rev); 1826 1827 if (bootverbose) { 1828 if (sc->sk_vpd_readonly != NULL && 1829 sc->sk_vpd_readonly_len != 0) { 1830 char buf[256]; 1831 char *dp = sc->sk_vpd_readonly; 1832 uint16_t l, len = sc->sk_vpd_readonly_len; 1833 1834 while (len >= 3) { 1835 if ((*dp == 'P' && *(dp+1) == 'N') || 1836 (*dp == 'E' && *(dp+1) == 'C') || 1837 (*dp == 'M' && *(dp+1) == 'N') || 1838 (*dp == 'S' && *(dp+1) == 'N')) { 1839 l = 0; 1840 while (l < *(dp+2)) { 1841 buf[l] = *(dp+3+l); 1842 ++l; 1843 } 1844 buf[l] = '\0'; 1845 device_printf(dev, "%c%c: %s\n", 1846 *dp, *(dp+1), buf); 1847 len -= (3 + l); 1848 dp += (3 + l); 1849 } else { 1850 len -= (3 + *(dp+2)); 1851 dp += (3 + *(dp+2)); 1852 } 1853 } 1854 } 1855 device_printf(dev, "chip ver = 0x%02x\n", sc->sk_type); 1856 device_printf(dev, "chip rev = 0x%02x\n", sc->sk_rev); 1857 device_printf(dev, "SK_EPROM0 = 0x%02x\n", skrs); 1858 device_printf(dev, "SRAM size = 0x%06x\n", sc->sk_ramsize); 1859 } 1860 1861 sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1); 1862 if (sc->sk_devs[SK_PORT_A] == NULL) { 1863 device_printf(dev, "failed to add child for PORT_A\n"); 1864 error = ENXIO; 1865 goto fail; 1866 } 1867 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); 1868 if (port == NULL) { 1869 device_printf(dev, "failed to allocate memory for " 1870 "ivars of PORT_A\n"); 1871 error = ENXIO; 1872 goto fail; 1873 } 1874 *port = SK_PORT_A; 1875 device_set_ivars(sc->sk_devs[SK_PORT_A], port); 1876 1877 if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) { 1878 sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1); 1879 if (sc->sk_devs[SK_PORT_B] == NULL) { 1880 device_printf(dev, "failed to add child for PORT_B\n"); 1881 error = ENXIO; 1882 goto fail; 1883 } 1884 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); 1885 if (port == NULL) { 1886 device_printf(dev, "failed to allocate memory for " 1887 "ivars of PORT_B\n"); 1888 error = ENXIO; 1889 goto fail; 1890 } 1891 *port = SK_PORT_B; 1892 device_set_ivars(sc->sk_devs[SK_PORT_B], port); 1893 } 1894 1895 /* Turn on the 'driver is loaded' LED. */ 1896 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON); 1897 1898 error = bus_generic_attach(dev); 1899 if (error) { 1900 device_printf(dev, "failed to attach port(s)\n"); 1901 goto fail; 1902 } 1903 1904 /* Hook interrupt last to avoid having to lock softc */ 1905 error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET|INTR_MPSAFE, 1906 sk_intr, sc, &sc->sk_intrhand); 1907 1908 if (error) { 1909 printf("skc%d: couldn't set up irq\n", unit); 1910 goto fail; 1911 } 1912 1913 fail: 1914 if (error) 1915 skc_detach(dev); 1916 1917 return(error); 1918 } 1919 1920 /* 1921 * Shutdown hardware and free up resources. This can be called any 1922 * time after the mutex has been initialized. It is called in both 1923 * the error case in attach and the normal detach case so it needs 1924 * to be careful about only freeing resources that have actually been 1925 * allocated. 1926 */ 1927 static int 1928 sk_detach(dev) 1929 device_t dev; 1930 { 1931 struct sk_if_softc *sc_if; 1932 struct ifnet *ifp; 1933 1934 sc_if = device_get_softc(dev); 1935 KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx), 1936 ("sk mutex not initialized in sk_detach")); 1937 SK_IF_LOCK(sc_if); 1938 1939 ifp = sc_if->sk_ifp; 1940 /* These should only be active if attach_xmac succeeded */ 1941 if (device_is_attached(dev)) { 1942 sk_stop(sc_if); 1943 /* Can't hold locks while calling detach */ 1944 SK_IF_UNLOCK(sc_if); 1945 ether_ifdetach(ifp); 1946 SK_IF_LOCK(sc_if); 1947 } 1948 if (ifp) 1949 if_free(ifp); 1950 /* 1951 * We're generally called from skc_detach() which is using 1952 * device_delete_child() to get to here. It's already trashed 1953 * miibus for us, so don't do it here or we'll panic. 1954 */ 1955 /* 1956 if (sc_if->sk_miibus != NULL) 1957 device_delete_child(dev, sc_if->sk_miibus); 1958 */ 1959 bus_generic_detach(dev); 1960 if (sc_if->sk_cdata.sk_jumbo_buf != NULL) 1961 sk_free_jumbo_mem(sc_if); 1962 if (sc_if->sk_rdata != NULL) { 1963 contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data), 1964 M_DEVBUF); 1965 } 1966 SK_IF_UNLOCK(sc_if); 1967 1968 return(0); 1969 } 1970 1971 static int 1972 skc_detach(dev) 1973 device_t dev; 1974 { 1975 struct sk_softc *sc; 1976 1977 sc = device_get_softc(dev); 1978 KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized")); 1979 1980 if (device_is_alive(dev)) { 1981 if (sc->sk_devs[SK_PORT_A] != NULL) { 1982 free(device_get_ivars(sc->sk_devs[SK_PORT_A]), M_DEVBUF); 1983 device_delete_child(dev, sc->sk_devs[SK_PORT_A]); 1984 } 1985 if (sc->sk_devs[SK_PORT_B] != NULL) { 1986 free(device_get_ivars(sc->sk_devs[SK_PORT_B]), M_DEVBUF); 1987 device_delete_child(dev, sc->sk_devs[SK_PORT_B]); 1988 } 1989 bus_generic_detach(dev); 1990 } 1991 1992 if (sc->sk_vpd_prodname != NULL) 1993 free(sc->sk_vpd_prodname, M_DEVBUF); 1994 if (sc->sk_vpd_readonly != NULL) 1995 free(sc->sk_vpd_readonly, M_DEVBUF); 1996 1997 if (sc->sk_intrhand) 1998 bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand); 1999 if (sc->sk_irq) 2000 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq); 2001 if (sc->sk_res) 2002 bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res); 2003 2004 mtx_destroy(&sc->sk_mtx); 2005 2006 return(0); 2007 } 2008 2009 static int 2010 sk_encap(sc_if, m_head, txidx) 2011 struct sk_if_softc *sc_if; 2012 struct mbuf *m_head; 2013 u_int32_t *txidx; 2014 { 2015 struct sk_tx_desc *f = NULL; 2016 struct mbuf *m; 2017 u_int32_t frag, cur, cnt = 0; 2018 2019 SK_IF_LOCK_ASSERT(sc_if); 2020 2021 m = m_head; 2022 cur = frag = *txidx; 2023 2024 /* 2025 * Start packing the mbufs in this chain into 2026 * the fragment pointers. Stop when we run out 2027 * of fragments or hit the end of the mbuf chain. 2028 */ 2029 for (m = m_head; m != NULL; m = m->m_next) { 2030 if (m->m_len != 0) { 2031 if ((SK_TX_RING_CNT - 2032 (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2) 2033 return(ENOBUFS); 2034 f = &sc_if->sk_rdata->sk_tx_ring[frag]; 2035 f->sk_data_lo = vtophys(mtod(m, vm_offset_t)); 2036 f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT; 2037 if (cnt == 0) 2038 f->sk_ctl |= SK_TXCTL_FIRSTFRAG; 2039 else 2040 f->sk_ctl |= SK_TXCTL_OWN; 2041 cur = frag; 2042 SK_INC(frag, SK_TX_RING_CNT); 2043 cnt++; 2044 } 2045 } 2046 2047 if (m != NULL) 2048 return(ENOBUFS); 2049 2050 sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |= 2051 SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR; 2052 sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head; 2053 sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN; 2054 sc_if->sk_cdata.sk_tx_cnt += cnt; 2055 2056 *txidx = frag; 2057 2058 return(0); 2059 } 2060 2061 static void 2062 sk_start(ifp) 2063 struct ifnet *ifp; 2064 { 2065 struct sk_if_softc *sc_if; 2066 2067 sc_if = ifp->if_softc; 2068 2069 SK_IF_LOCK(sc_if); 2070 sk_start_locked(ifp); 2071 SK_IF_UNLOCK(sc_if); 2072 2073 return; 2074 } 2075 2076 static void 2077 sk_start_locked(ifp) 2078 struct ifnet *ifp; 2079 { 2080 struct sk_softc *sc; 2081 struct sk_if_softc *sc_if; 2082 struct mbuf *m_head = NULL; 2083 u_int32_t idx; 2084 2085 sc_if = ifp->if_softc; 2086 sc = sc_if->sk_softc; 2087 2088 SK_IF_LOCK_ASSERT(sc_if); 2089 2090 idx = sc_if->sk_cdata.sk_tx_prod; 2091 2092 while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) { 2093 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2094 if (m_head == NULL) 2095 break; 2096 2097 /* 2098 * Pack the data into the transmit ring. If we 2099 * don't have room, set the OACTIVE flag and wait 2100 * for the NIC to drain the ring. 2101 */ 2102 if (sk_encap(sc_if, m_head, &idx)) { 2103 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2104 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2105 break; 2106 } 2107 2108 /* 2109 * If there's a BPF listener, bounce a copy of this frame 2110 * to him. 2111 */ 2112 BPF_MTAP(ifp, m_head); 2113 } 2114 2115 /* Transmit */ 2116 if (idx != sc_if->sk_cdata.sk_tx_prod) { 2117 sc_if->sk_cdata.sk_tx_prod = idx; 2118 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); 2119 2120 /* Set a timeout in case the chip goes out to lunch. */ 2121 ifp->if_timer = 5; 2122 } 2123 2124 return; 2125 } 2126 2127 2128 static void 2129 sk_watchdog(ifp) 2130 struct ifnet *ifp; 2131 { 2132 struct sk_if_softc *sc_if; 2133 2134 sc_if = ifp->if_softc; 2135 2136 printf("sk%d: watchdog timeout\n", sc_if->sk_unit); 2137 SK_IF_LOCK(sc_if); 2138 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2139 sk_init_locked(sc_if); 2140 SK_IF_UNLOCK(sc_if); 2141 2142 return; 2143 } 2144 2145 static void 2146 skc_shutdown(dev) 2147 device_t dev; 2148 { 2149 struct sk_softc *sc; 2150 2151 sc = device_get_softc(dev); 2152 SK_LOCK(sc); 2153 2154 /* Turn off the 'driver is loaded' LED. */ 2155 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF); 2156 2157 /* 2158 * Reset the GEnesis controller. Doing this should also 2159 * assert the resets on the attached XMAC(s). 2160 */ 2161 sk_reset(sc); 2162 SK_UNLOCK(sc); 2163 2164 return; 2165 } 2166 2167 static void 2168 sk_rxeof(sc_if) 2169 struct sk_if_softc *sc_if; 2170 { 2171 struct sk_softc *sc; 2172 struct mbuf *m; 2173 struct ifnet *ifp; 2174 struct sk_chain *cur_rx; 2175 int total_len = 0; 2176 int i; 2177 u_int32_t rxstat; 2178 2179 sc = sc_if->sk_softc; 2180 ifp = sc_if->sk_ifp; 2181 i = sc_if->sk_cdata.sk_rx_prod; 2182 cur_rx = &sc_if->sk_cdata.sk_rx_chain[i]; 2183 2184 SK_LOCK_ASSERT(sc); 2185 2186 while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) { 2187 2188 cur_rx = &sc_if->sk_cdata.sk_rx_chain[i]; 2189 rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat; 2190 m = cur_rx->sk_mbuf; 2191 cur_rx->sk_mbuf = NULL; 2192 total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl); 2193 SK_INC(i, SK_RX_RING_CNT); 2194 2195 if (rxstat & XM_RXSTAT_ERRFRAME) { 2196 ifp->if_ierrors++; 2197 sk_newbuf(sc_if, cur_rx, m); 2198 continue; 2199 } 2200 2201 /* 2202 * Try to allocate a new jumbo buffer. If that 2203 * fails, copy the packet to mbufs and put the 2204 * jumbo buffer back in the ring so it can be 2205 * re-used. If allocating mbufs fails, then we 2206 * have to drop the packet. 2207 */ 2208 if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) { 2209 struct mbuf *m0; 2210 m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, 2211 ifp, NULL); 2212 sk_newbuf(sc_if, cur_rx, m); 2213 if (m0 == NULL) { 2214 printf("sk%d: no receive buffers " 2215 "available -- packet dropped!\n", 2216 sc_if->sk_unit); 2217 ifp->if_ierrors++; 2218 continue; 2219 } 2220 m = m0; 2221 } else { 2222 m->m_pkthdr.rcvif = ifp; 2223 m->m_pkthdr.len = m->m_len = total_len; 2224 } 2225 2226 ifp->if_ipackets++; 2227 SK_UNLOCK(sc); 2228 (*ifp->if_input)(ifp, m); 2229 SK_LOCK(sc); 2230 } 2231 2232 sc_if->sk_cdata.sk_rx_prod = i; 2233 2234 return; 2235 } 2236 2237 static void 2238 sk_txeof(sc_if) 2239 struct sk_if_softc *sc_if; 2240 { 2241 struct sk_softc *sc; 2242 struct sk_tx_desc *cur_tx; 2243 struct ifnet *ifp; 2244 u_int32_t idx; 2245 2246 sc = sc_if->sk_softc; 2247 ifp = sc_if->sk_ifp; 2248 2249 /* 2250 * Go through our tx ring and free mbufs for those 2251 * frames that have been sent. 2252 */ 2253 idx = sc_if->sk_cdata.sk_tx_cons; 2254 while(idx != sc_if->sk_cdata.sk_tx_prod) { 2255 cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx]; 2256 if (cur_tx->sk_ctl & SK_TXCTL_OWN) 2257 break; 2258 if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG) 2259 ifp->if_opackets++; 2260 if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) { 2261 m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf); 2262 sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL; 2263 } 2264 sc_if->sk_cdata.sk_tx_cnt--; 2265 SK_INC(idx, SK_TX_RING_CNT); 2266 } 2267 2268 if (sc_if->sk_cdata.sk_tx_cnt == 0) { 2269 ifp->if_timer = 0; 2270 } else /* nudge chip to keep tx ring moving */ 2271 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); 2272 2273 if (sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 2) 2274 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2275 2276 sc_if->sk_cdata.sk_tx_cons = idx; 2277 } 2278 2279 static void 2280 sk_tick(xsc_if) 2281 void *xsc_if; 2282 { 2283 struct sk_if_softc *sc_if; 2284 struct mii_data *mii; 2285 struct ifnet *ifp; 2286 int i; 2287 2288 sc_if = xsc_if; 2289 SK_IF_LOCK(sc_if); 2290 ifp = sc_if->sk_ifp; 2291 mii = device_get_softc(sc_if->sk_miibus); 2292 2293 if (!(ifp->if_flags & IFF_UP)) { 2294 SK_IF_UNLOCK(sc_if); 2295 return; 2296 } 2297 2298 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 2299 sk_intr_bcom(sc_if); 2300 SK_IF_UNLOCK(sc_if); 2301 return; 2302 } 2303 2304 /* 2305 * According to SysKonnect, the correct way to verify that 2306 * the link has come back up is to poll bit 0 of the GPIO 2307 * register three times. This pin has the signal from the 2308 * link_sync pin connected to it; if we read the same link 2309 * state 3 times in a row, we know the link is up. 2310 */ 2311 for (i = 0; i < 3; i++) { 2312 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET) 2313 break; 2314 } 2315 2316 if (i != 3) { 2317 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); 2318 SK_IF_UNLOCK(sc_if); 2319 return; 2320 } 2321 2322 /* Turn the GP0 interrupt back on. */ 2323 SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); 2324 SK_XM_READ_2(sc_if, XM_ISR); 2325 mii_tick(mii); 2326 untimeout(sk_tick, sc_if, sc_if->sk_tick_ch); 2327 2328 SK_IF_UNLOCK(sc_if); 2329 return; 2330 } 2331 2332 static void 2333 sk_intr_bcom(sc_if) 2334 struct sk_if_softc *sc_if; 2335 { 2336 struct mii_data *mii; 2337 struct ifnet *ifp; 2338 int status; 2339 mii = device_get_softc(sc_if->sk_miibus); 2340 ifp = sc_if->sk_ifp; 2341 2342 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 2343 2344 /* 2345 * Read the PHY interrupt register to make sure 2346 * we clear any pending interrupts. 2347 */ 2348 status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR); 2349 2350 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2351 sk_init_xmac(sc_if); 2352 return; 2353 } 2354 2355 if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) { 2356 int lstat; 2357 lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 2358 BRGPHY_MII_AUXSTS); 2359 2360 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) { 2361 mii_mediachg(mii); 2362 /* Turn off the link LED. */ 2363 SK_IF_WRITE_1(sc_if, 0, 2364 SK_LINKLED1_CTL, SK_LINKLED_OFF); 2365 sc_if->sk_link = 0; 2366 } else if (status & BRGPHY_ISR_LNK_CHG) { 2367 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 2368 BRGPHY_MII_IMR, 0xFF00); 2369 mii_tick(mii); 2370 sc_if->sk_link = 1; 2371 /* Turn on the link LED. */ 2372 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, 2373 SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF| 2374 SK_LINKLED_BLINK_OFF); 2375 } else { 2376 mii_tick(mii); 2377 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); 2378 } 2379 } 2380 2381 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 2382 2383 return; 2384 } 2385 2386 static void 2387 sk_intr_xmac(sc_if) 2388 struct sk_if_softc *sc_if; 2389 { 2390 struct sk_softc *sc; 2391 u_int16_t status; 2392 2393 sc = sc_if->sk_softc; 2394 status = SK_XM_READ_2(sc_if, XM_ISR); 2395 2396 /* 2397 * Link has gone down. Start MII tick timeout to 2398 * watch for link resync. 2399 */ 2400 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) { 2401 if (status & XM_ISR_GP0_SET) { 2402 SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); 2403 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); 2404 } 2405 2406 if (status & XM_ISR_AUTONEG_DONE) { 2407 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); 2408 } 2409 } 2410 2411 if (status & XM_IMR_TX_UNDERRUN) 2412 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO); 2413 2414 if (status & XM_IMR_RX_OVERRUN) 2415 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO); 2416 2417 status = SK_XM_READ_2(sc_if, XM_ISR); 2418 2419 return; 2420 } 2421 2422 static void 2423 sk_intr_yukon(sc_if) 2424 struct sk_if_softc *sc_if; 2425 { 2426 int status; 2427 2428 status = SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); 2429 2430 return; 2431 } 2432 2433 static void 2434 sk_intr(xsc) 2435 void *xsc; 2436 { 2437 struct sk_softc *sc = xsc; 2438 struct sk_if_softc *sc_if0 = NULL, *sc_if1 = NULL; 2439 struct ifnet *ifp0 = NULL, *ifp1 = NULL; 2440 u_int32_t status; 2441 2442 SK_LOCK(sc); 2443 2444 sc_if0 = sc->sk_if[SK_PORT_A]; 2445 sc_if1 = sc->sk_if[SK_PORT_B]; 2446 2447 if (sc_if0 != NULL) 2448 ifp0 = sc_if0->sk_ifp; 2449 if (sc_if1 != NULL) 2450 ifp1 = sc_if1->sk_ifp; 2451 2452 for (;;) { 2453 status = CSR_READ_4(sc, SK_ISSR); 2454 if (!(status & sc->sk_intrmask)) 2455 break; 2456 2457 /* Handle receive interrupts first. */ 2458 if (status & SK_ISR_RX1_EOF) { 2459 sk_rxeof(sc_if0); 2460 CSR_WRITE_4(sc, SK_BMU_RX_CSR0, 2461 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); 2462 } 2463 if (status & SK_ISR_RX2_EOF) { 2464 sk_rxeof(sc_if1); 2465 CSR_WRITE_4(sc, SK_BMU_RX_CSR1, 2466 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); 2467 } 2468 2469 /* Then transmit interrupts. */ 2470 if (status & SK_ISR_TX1_S_EOF) { 2471 sk_txeof(sc_if0); 2472 CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, 2473 SK_TXBMU_CLR_IRQ_EOF); 2474 } 2475 if (status & SK_ISR_TX2_S_EOF) { 2476 sk_txeof(sc_if1); 2477 CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, 2478 SK_TXBMU_CLR_IRQ_EOF); 2479 } 2480 2481 /* Then MAC interrupts. */ 2482 if (status & SK_ISR_MAC1 && 2483 ifp0->if_drv_flags & IFF_DRV_RUNNING) { 2484 if (sc->sk_type == SK_GENESIS) 2485 sk_intr_xmac(sc_if0); 2486 else 2487 sk_intr_yukon(sc_if0); 2488 } 2489 2490 if (status & SK_ISR_MAC2 && 2491 ifp1->if_drv_flags & IFF_DRV_RUNNING) { 2492 if (sc->sk_type == SK_GENESIS) 2493 sk_intr_xmac(sc_if1); 2494 else 2495 sk_intr_yukon(sc_if1); 2496 } 2497 2498 if (status & SK_ISR_EXTERNAL_REG) { 2499 if (ifp0 != NULL && 2500 sc_if0->sk_phytype == SK_PHYTYPE_BCOM) 2501 sk_intr_bcom(sc_if0); 2502 if (ifp1 != NULL && 2503 sc_if1->sk_phytype == SK_PHYTYPE_BCOM) 2504 sk_intr_bcom(sc_if1); 2505 } 2506 } 2507 2508 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2509 2510 if (ifp0 != NULL && !IFQ_DRV_IS_EMPTY(&ifp0->if_snd)) 2511 sk_start_locked(ifp0); 2512 if (ifp1 != NULL && !IFQ_DRV_IS_EMPTY(&ifp1->if_snd)) 2513 sk_start_locked(ifp1); 2514 2515 SK_UNLOCK(sc); 2516 2517 return; 2518 } 2519 2520 static void 2521 sk_init_xmac(sc_if) 2522 struct sk_if_softc *sc_if; 2523 { 2524 struct sk_softc *sc; 2525 struct ifnet *ifp; 2526 struct sk_bcom_hack bhack[] = { 2527 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 }, 2528 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 }, 2529 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, 2530 { 0, 0 } }; 2531 2532 sc = sc_if->sk_softc; 2533 ifp = sc_if->sk_ifp; 2534 2535 /* Unreset the XMAC. */ 2536 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET); 2537 DELAY(1000); 2538 2539 /* Reset the XMAC's internal state. */ 2540 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); 2541 2542 /* Save the XMAC II revision */ 2543 sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID)); 2544 2545 /* 2546 * Perform additional initialization for external PHYs, 2547 * namely for the 1000baseTX cards that use the XMAC's 2548 * GMII mode. 2549 */ 2550 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 2551 int i = 0; 2552 u_int32_t val; 2553 2554 /* Take PHY out of reset. */ 2555 val = sk_win_read_4(sc, SK_GPIO); 2556 if (sc_if->sk_port == SK_PORT_A) 2557 val |= SK_GPIO_DIR0|SK_GPIO_DAT0; 2558 else 2559 val |= SK_GPIO_DIR2|SK_GPIO_DAT2; 2560 sk_win_write_4(sc, SK_GPIO, val); 2561 2562 /* Enable GMII mode on the XMAC. */ 2563 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE); 2564 2565 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 2566 BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET); 2567 DELAY(10000); 2568 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 2569 BRGPHY_MII_IMR, 0xFFF0); 2570 2571 /* 2572 * Early versions of the BCM5400 apparently have 2573 * a bug that requires them to have their reserved 2574 * registers initialized to some magic values. I don't 2575 * know what the numbers do, I'm just the messenger. 2576 */ 2577 if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03) 2578 == 0x6041) { 2579 while(bhack[i].reg) { 2580 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 2581 bhack[i].reg, bhack[i].val); 2582 i++; 2583 } 2584 } 2585 } 2586 2587 /* Set station address */ 2588 SK_XM_WRITE_2(sc_if, XM_PAR0, 2589 *(u_int16_t *)(&IF_LLADDR(sc_if->sk_ifp)[0])); 2590 SK_XM_WRITE_2(sc_if, XM_PAR1, 2591 *(u_int16_t *)(&IF_LLADDR(sc_if->sk_ifp)[2])); 2592 SK_XM_WRITE_2(sc_if, XM_PAR2, 2593 *(u_int16_t *)(&IF_LLADDR(sc_if->sk_ifp)[4])); 2594 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION); 2595 2596 if (ifp->if_flags & IFF_BROADCAST) { 2597 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); 2598 } else { 2599 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); 2600 } 2601 2602 /* We don't need the FCS appended to the packet. */ 2603 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS); 2604 2605 /* We want short frames padded to 60 bytes. */ 2606 SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD); 2607 2608 /* 2609 * Enable the reception of all error frames. This is is 2610 * a necessary evil due to the design of the XMAC. The 2611 * XMAC's receive FIFO is only 8K in size, however jumbo 2612 * frames can be up to 9000 bytes in length. When bad 2613 * frame filtering is enabled, the XMAC's RX FIFO operates 2614 * in 'store and forward' mode. For this to work, the 2615 * entire frame has to fit into the FIFO, but that means 2616 * that jumbo frames larger than 8192 bytes will be 2617 * truncated. Disabling all bad frame filtering causes 2618 * the RX FIFO to operate in streaming mode, in which 2619 * case the XMAC will start transfering frames out of the 2620 * RX FIFO as soon as the FIFO threshold is reached. 2621 */ 2622 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES| 2623 XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS| 2624 XM_MODE_RX_INRANGELEN); 2625 2626 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 2627 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); 2628 else 2629 SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); 2630 2631 /* 2632 * Bump up the transmit threshold. This helps hold off transmit 2633 * underruns when we're blasting traffic from both ports at once. 2634 */ 2635 SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH); 2636 2637 /* Set promiscuous mode */ 2638 sk_setpromisc(sc_if); 2639 2640 /* Set multicast filter */ 2641 sk_setmulti(sc_if); 2642 2643 /* Clear and enable interrupts */ 2644 SK_XM_READ_2(sc_if, XM_ISR); 2645 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) 2646 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS); 2647 else 2648 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); 2649 2650 /* Configure MAC arbiter */ 2651 switch(sc_if->sk_xmac_rev) { 2652 case XM_XMAC_REV_B2: 2653 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2); 2654 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2); 2655 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2); 2656 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2); 2657 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2); 2658 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2); 2659 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2); 2660 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2); 2661 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); 2662 break; 2663 case XM_XMAC_REV_C1: 2664 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1); 2665 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1); 2666 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1); 2667 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1); 2668 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1); 2669 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1); 2670 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1); 2671 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1); 2672 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); 2673 break; 2674 default: 2675 break; 2676 } 2677 sk_win_write_2(sc, SK_MACARB_CTL, 2678 SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF); 2679 2680 sc_if->sk_link = 1; 2681 2682 return; 2683 } 2684 2685 static void 2686 sk_init_yukon(sc_if) 2687 struct sk_if_softc *sc_if; 2688 { 2689 u_int32_t phy; 2690 u_int16_t reg; 2691 struct sk_softc *sc; 2692 struct ifnet *ifp; 2693 int i; 2694 2695 sc = sc_if->sk_softc; 2696 ifp = sc_if->sk_ifp; 2697 2698 if (sc->sk_type == SK_YUKON_LITE && 2699 sc->sk_rev >= SK_YUKON_LITE_REV_A3) { 2700 /* Take PHY out of reset. */ 2701 sk_win_write_4(sc, SK_GPIO, 2702 (sk_win_read_4(sc, SK_GPIO) | SK_GPIO_DIR9) & ~SK_GPIO_DAT9); 2703 } 2704 2705 /* GMAC and GPHY Reset */ 2706 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET); 2707 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); 2708 DELAY(1000); 2709 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_CLEAR); 2710 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); 2711 DELAY(1000); 2712 2713 phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP | 2714 SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE; 2715 2716 switch(sc_if->sk_softc->sk_pmd) { 2717 case IFM_1000_SX: 2718 case IFM_1000_LX: 2719 phy |= SK_GPHY_FIBER; 2720 break; 2721 2722 case IFM_1000_CX: 2723 case IFM_1000_T: 2724 phy |= SK_GPHY_COPPER; 2725 break; 2726 } 2727 2728 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET); 2729 DELAY(1000); 2730 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR); 2731 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF | 2732 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR); 2733 2734 /* unused read of the interrupt source register */ 2735 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); 2736 2737 reg = SK_YU_READ_2(sc_if, YUKON_PAR); 2738 2739 /* MIB Counter Clear Mode set */ 2740 reg |= YU_PAR_MIB_CLR; 2741 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 2742 2743 /* MIB Counter Clear Mode clear */ 2744 reg &= ~YU_PAR_MIB_CLR; 2745 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 2746 2747 /* receive control reg */ 2748 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR); 2749 2750 /* transmit parameter register */ 2751 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) | 2752 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) ); 2753 2754 /* serial mode register */ 2755 reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e); 2756 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 2757 reg |= YU_SMR_MFL_JUMBO; 2758 SK_YU_WRITE_2(sc_if, YUKON_SMR, reg); 2759 2760 /* Setup Yukon's address */ 2761 for (i = 0; i < 3; i++) { 2762 /* Write Source Address 1 (unicast filter) */ 2763 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4, 2764 IF_LLADDR(sc_if->sk_ifp)[i * 2] | 2765 IF_LLADDR(sc_if->sk_ifp)[i * 2 + 1] << 8); 2766 } 2767 2768 for (i = 0; i < 3; i++) { 2769 reg = sk_win_read_2(sc_if->sk_softc, 2770 SK_MAC1_0 + i * 2 + sc_if->sk_port * 8); 2771 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg); 2772 } 2773 2774 /* Set promiscuous mode */ 2775 sk_setpromisc(sc_if); 2776 2777 /* Set multicast filter */ 2778 sk_setmulti(sc_if); 2779 2780 /* enable interrupt mask for counter overflows */ 2781 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0); 2782 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0); 2783 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0); 2784 2785 /* Configure RX MAC FIFO */ 2786 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR); 2787 SK_IF_WRITE_4(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON); 2788 2789 /* Configure TX MAC FIFO */ 2790 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR); 2791 SK_IF_WRITE_4(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON); 2792 } 2793 2794 /* 2795 * Note that to properly initialize any part of the GEnesis chip, 2796 * you first have to take it out of reset mode. 2797 */ 2798 static void 2799 sk_init(xsc) 2800 void *xsc; 2801 { 2802 struct sk_if_softc *sc_if = xsc; 2803 2804 SK_IF_LOCK(sc_if); 2805 sk_init_locked(sc_if); 2806 SK_IF_UNLOCK(sc_if); 2807 2808 return; 2809 } 2810 2811 static void 2812 sk_init_locked(sc_if) 2813 struct sk_if_softc *sc_if; 2814 { 2815 struct sk_softc *sc; 2816 struct ifnet *ifp; 2817 struct mii_data *mii; 2818 u_int16_t reg; 2819 u_int32_t imr; 2820 2821 SK_IF_LOCK_ASSERT(sc_if); 2822 2823 ifp = sc_if->sk_ifp; 2824 sc = sc_if->sk_softc; 2825 mii = device_get_softc(sc_if->sk_miibus); 2826 2827 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2828 return; 2829 2830 /* Cancel pending I/O and free all RX/TX buffers. */ 2831 sk_stop(sc_if); 2832 2833 if (sc->sk_type == SK_GENESIS) { 2834 /* Configure LINK_SYNC LED */ 2835 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON); 2836 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, 2837 SK_LINKLED_LINKSYNC_ON); 2838 2839 /* Configure RX LED */ 2840 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, 2841 SK_RXLEDCTL_COUNTER_START); 2842 2843 /* Configure TX LED */ 2844 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, 2845 SK_TXLEDCTL_COUNTER_START); 2846 } 2847 2848 /* Configure I2C registers */ 2849 2850 /* Configure XMAC(s) */ 2851 switch (sc->sk_type) { 2852 case SK_GENESIS: 2853 sk_init_xmac(sc_if); 2854 break; 2855 case SK_YUKON: 2856 case SK_YUKON_LITE: 2857 case SK_YUKON_LP: 2858 sk_init_yukon(sc_if); 2859 break; 2860 } 2861 mii_mediachg(mii); 2862 2863 if (sc->sk_type == SK_GENESIS) { 2864 /* Configure MAC FIFOs */ 2865 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET); 2866 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END); 2867 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON); 2868 2869 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET); 2870 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END); 2871 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON); 2872 } 2873 2874 /* Configure transmit arbiter(s) */ 2875 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, 2876 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON); 2877 2878 /* Configure RAMbuffers */ 2879 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET); 2880 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart); 2881 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart); 2882 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart); 2883 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend); 2884 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON); 2885 2886 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET); 2887 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON); 2888 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart); 2889 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart); 2890 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart); 2891 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend); 2892 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON); 2893 2894 /* Configure BMUs */ 2895 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE); 2896 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO, 2897 vtophys(&sc_if->sk_rdata->sk_rx_ring[0])); 2898 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0); 2899 2900 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE); 2901 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO, 2902 vtophys(&sc_if->sk_rdata->sk_tx_ring[0])); 2903 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0); 2904 2905 /* Init descriptors */ 2906 if (sk_init_rx_ring(sc_if) == ENOBUFS) { 2907 printf("sk%d: initialization failed: no " 2908 "memory for rx buffers\n", sc_if->sk_unit); 2909 sk_stop(sc_if); 2910 return; 2911 } 2912 sk_init_tx_ring(sc_if); 2913 2914 /* Set interrupt moderation if changed via sysctl. */ 2915 /* SK_LOCK(sc); */ 2916 imr = sk_win_read_4(sc, SK_IMTIMERINIT); 2917 if (imr != SK_IM_USECS(sc->sk_int_mod, sc->sk_int_ticks)) { 2918 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod, 2919 sc->sk_int_ticks)); 2920 if (bootverbose) 2921 printf("skc%d: interrupt moderation is %d us\n", 2922 sc->sk_unit, sc->sk_int_mod); 2923 } 2924 /* SK_UNLOCK(sc); */ 2925 2926 /* Configure interrupt handling */ 2927 CSR_READ_4(sc, SK_ISSR); 2928 if (sc_if->sk_port == SK_PORT_A) 2929 sc->sk_intrmask |= SK_INTRS1; 2930 else 2931 sc->sk_intrmask |= SK_INTRS2; 2932 2933 sc->sk_intrmask |= SK_ISR_EXTERNAL_REG; 2934 2935 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2936 2937 /* Start BMUs. */ 2938 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START); 2939 2940 switch(sc->sk_type) { 2941 case SK_GENESIS: 2942 /* Enable XMACs TX and RX state machines */ 2943 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE); 2944 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 2945 break; 2946 case SK_YUKON: 2947 case SK_YUKON_LITE: 2948 case SK_YUKON_LP: 2949 reg = SK_YU_READ_2(sc_if, YUKON_GPCR); 2950 reg |= YU_GPCR_TXEN | YU_GPCR_RXEN; 2951 reg &= ~(YU_GPCR_SPEED_EN | YU_GPCR_DPLX_EN); 2952 SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg); 2953 } 2954 2955 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2956 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2957 2958 return; 2959 } 2960 2961 static void 2962 sk_stop(sc_if) 2963 struct sk_if_softc *sc_if; 2964 { 2965 int i; 2966 struct sk_softc *sc; 2967 struct ifnet *ifp; 2968 2969 SK_IF_LOCK_ASSERT(sc_if); 2970 sc = sc_if->sk_softc; 2971 ifp = sc_if->sk_ifp; 2972 2973 untimeout(sk_tick, sc_if, sc_if->sk_tick_ch); 2974 2975 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 2976 u_int32_t val; 2977 2978 /* Put PHY back into reset. */ 2979 val = sk_win_read_4(sc, SK_GPIO); 2980 if (sc_if->sk_port == SK_PORT_A) { 2981 val |= SK_GPIO_DIR0; 2982 val &= ~SK_GPIO_DAT0; 2983 } else { 2984 val |= SK_GPIO_DIR2; 2985 val &= ~SK_GPIO_DAT2; 2986 } 2987 sk_win_write_4(sc, SK_GPIO, val); 2988 } 2989 2990 /* Turn off various components of this interface. */ 2991 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); 2992 switch (sc->sk_type) { 2993 case SK_GENESIS: 2994 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET); 2995 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET); 2996 break; 2997 case SK_YUKON: 2998 case SK_YUKON_LITE: 2999 case SK_YUKON_LP: 3000 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET); 3001 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET); 3002 break; 3003 } 3004 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE); 3005 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 3006 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE); 3007 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 3008 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF); 3009 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 3010 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 3011 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF); 3012 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF); 3013 3014 /* Disable interrupts */ 3015 if (sc_if->sk_port == SK_PORT_A) 3016 sc->sk_intrmask &= ~SK_INTRS1; 3017 else 3018 sc->sk_intrmask &= ~SK_INTRS2; 3019 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 3020 3021 SK_XM_READ_2(sc_if, XM_ISR); 3022 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); 3023 3024 /* Free RX and TX mbufs still in the queues. */ 3025 for (i = 0; i < SK_RX_RING_CNT; i++) { 3026 if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) { 3027 m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf); 3028 sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL; 3029 } 3030 } 3031 3032 for (i = 0; i < SK_TX_RING_CNT; i++) { 3033 if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) { 3034 m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf); 3035 sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL; 3036 } 3037 } 3038 3039 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE); 3040 3041 return; 3042 } 3043 3044 static int 3045 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 3046 { 3047 int error, value; 3048 3049 if (!arg1) 3050 return (EINVAL); 3051 value = *(int *)arg1; 3052 error = sysctl_handle_int(oidp, &value, 0, req); 3053 if (error || !req->newptr) 3054 return (error); 3055 if (value < low || value > high) 3056 return (EINVAL); 3057 *(int *)arg1 = value; 3058 return (0); 3059 } 3060 3061 static int 3062 sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS) 3063 { 3064 return (sysctl_int_range(oidp, arg1, arg2, req, SK_IM_MIN, SK_IM_MAX)); 3065 } 3066