1 /* $OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998, 1999, 2000 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 /*- 35 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 36 * 37 * Permission to use, copy, modify, and distribute this software for any 38 * purpose with or without fee is hereby granted, provided that the above 39 * copyright notice and this permission notice appear in all copies. 40 * 41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 48 */ 49 50 #include <sys/cdefs.h> 51 __FBSDID("$FreeBSD$"); 52 53 /* 54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports 55 * the SK-984x series adapters, both single port and dual port. 56 * References: 57 * The XaQti XMAC II datasheet, 58 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 59 * The SysKonnect GEnesis manual, http://www.syskonnect.com 60 * 61 * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the 62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a 63 * convenience to others until Vitesse corrects this problem: 64 * 65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 66 * 67 * Written by Bill Paul <wpaul@ee.columbia.edu> 68 * Department of Electrical Engineering 69 * Columbia University, New York City 70 */ 71 /* 72 * The SysKonnect gigabit ethernet adapters consist of two main 73 * components: the SysKonnect GEnesis controller chip and the XaQti Corp. 74 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC 75 * components and a PHY while the GEnesis controller provides a PCI 76 * interface with DMA support. Each card may have between 512K and 77 * 2MB of SRAM on board depending on the configuration. 78 * 79 * The SysKonnect GEnesis controller can have either one or two XMAC 80 * chips connected to it, allowing single or dual port NIC configurations. 81 * SysKonnect has the distinction of being the only vendor on the market 82 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs, 83 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the 84 * XMAC registers. This driver takes advantage of these features to allow 85 * both XMACs to operate as independent interfaces. 86 */ 87 88 #include <sys/param.h> 89 #include <sys/systm.h> 90 #include <sys/sockio.h> 91 #include <sys/mbuf.h> 92 #include <sys/malloc.h> 93 #include <sys/kernel.h> 94 #include <sys/module.h> 95 #include <sys/socket.h> 96 #include <sys/queue.h> 97 #include <sys/sysctl.h> 98 99 #include <net/if.h> 100 #include <net/if_arp.h> 101 #include <net/ethernet.h> 102 #include <net/if_dl.h> 103 #include <net/if_media.h> 104 #include <net/if_types.h> 105 106 #include <net/bpf.h> 107 108 #include <vm/vm.h> /* for vtophys */ 109 #include <vm/pmap.h> /* for vtophys */ 110 #include <machine/bus.h> 111 #include <machine/resource.h> 112 #include <sys/bus.h> 113 #include <sys/rman.h> 114 115 #include <dev/mii/mii.h> 116 #include <dev/mii/miivar.h> 117 #include <dev/mii/brgphyreg.h> 118 119 #include <dev/pci/pcireg.h> 120 #include <dev/pci/pcivar.h> 121 122 #if 0 123 #define SK_USEIOSPACE 124 #endif 125 126 #include <pci/if_skreg.h> 127 #include <pci/xmaciireg.h> 128 #include <pci/yukonreg.h> 129 130 MODULE_DEPEND(sk, pci, 1, 1, 1); 131 MODULE_DEPEND(sk, ether, 1, 1, 1); 132 MODULE_DEPEND(sk, miibus, 1, 1, 1); 133 134 /* "controller miibus0" required. See GENERIC if you get errors here. */ 135 #include "miibus_if.h" 136 137 #ifndef lint 138 static const char rcsid[] = 139 "$FreeBSD$"; 140 #endif 141 142 static struct sk_type sk_devs[] = { 143 { 144 VENDORID_SK, 145 DEVICEID_SK_V1, 146 "SysKonnect Gigabit Ethernet (V1.0)" 147 }, 148 { 149 VENDORID_SK, 150 DEVICEID_SK_V2, 151 "SysKonnect Gigabit Ethernet (V2.0)" 152 }, 153 { 154 VENDORID_MARVELL, 155 DEVICEID_SK_V2, 156 "Marvell Gigabit Ethernet" 157 }, 158 { 159 VENDORID_MARVELL, 160 DEVICEID_BELKIN_5005, 161 "Belkin F5D5005 Gigabit Ethernet" 162 }, 163 { 164 VENDORID_3COM, 165 DEVICEID_3COM_3C940, 166 "3Com 3C940 Gigabit Ethernet" 167 }, 168 { 169 VENDORID_LINKSYS, 170 DEVICEID_LINKSYS_EG1032, 171 "Linksys EG1032 Gigabit Ethernet" 172 }, 173 { 174 VENDORID_DLINK, 175 DEVICEID_DLINK_DGE530T, 176 "D-Link DGE-530T Gigabit Ethernet" 177 }, 178 { 0, 0, NULL } 179 }; 180 181 static int skc_probe(device_t); 182 static int skc_attach(device_t); 183 static int skc_detach(device_t); 184 static void skc_shutdown(device_t); 185 static int sk_detach(device_t); 186 static int sk_probe(device_t); 187 static int sk_attach(device_t); 188 static void sk_tick(void *); 189 static void sk_intr(void *); 190 static void sk_intr_xmac(struct sk_if_softc *); 191 static void sk_intr_bcom(struct sk_if_softc *); 192 static void sk_intr_yukon(struct sk_if_softc *); 193 static void sk_rxeof(struct sk_if_softc *); 194 static void sk_txeof(struct sk_if_softc *); 195 static int sk_encap(struct sk_if_softc *, struct mbuf *, 196 u_int32_t *); 197 static void sk_start(struct ifnet *); 198 static void sk_start_locked(struct ifnet *); 199 static int sk_ioctl(struct ifnet *, u_long, caddr_t); 200 static void sk_init(void *); 201 static void sk_init_locked(struct sk_if_softc *); 202 static void sk_init_xmac(struct sk_if_softc *); 203 static void sk_init_yukon(struct sk_if_softc *); 204 static void sk_stop(struct sk_if_softc *); 205 static void sk_watchdog(struct ifnet *); 206 static int sk_ifmedia_upd(struct ifnet *); 207 static void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *); 208 static void sk_reset(struct sk_softc *); 209 static int sk_newbuf(struct sk_if_softc *, 210 struct sk_chain *, struct mbuf *); 211 static int sk_alloc_jumbo_mem(struct sk_if_softc *); 212 static void sk_free_jumbo_mem(struct sk_if_softc *); 213 static void *sk_jalloc(struct sk_if_softc *); 214 static void sk_jfree(void *, void *); 215 static int sk_init_rx_ring(struct sk_if_softc *); 216 static void sk_init_tx_ring(struct sk_if_softc *); 217 static u_int32_t sk_win_read_4(struct sk_softc *, int); 218 static u_int16_t sk_win_read_2(struct sk_softc *, int); 219 static u_int8_t sk_win_read_1(struct sk_softc *, int); 220 static void sk_win_write_4(struct sk_softc *, int, u_int32_t); 221 static void sk_win_write_2(struct sk_softc *, int, u_int32_t); 222 static void sk_win_write_1(struct sk_softc *, int, u_int32_t); 223 static u_int8_t sk_vpd_readbyte(struct sk_softc *, int); 224 static void sk_vpd_read_res(struct sk_softc *, struct vpd_res *, int); 225 static void sk_vpd_read(struct sk_softc *); 226 227 static int sk_miibus_readreg(device_t, int, int); 228 static int sk_miibus_writereg(device_t, int, int, int); 229 static void sk_miibus_statchg(device_t); 230 231 static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int); 232 static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int, 233 int); 234 static void sk_xmac_miibus_statchg(struct sk_if_softc *); 235 236 static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int); 237 static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int, 238 int); 239 static void sk_marv_miibus_statchg(struct sk_if_softc *); 240 241 static uint32_t sk_xmchash(const uint8_t *); 242 static uint32_t sk_gmchash(const uint8_t *); 243 static void sk_setfilt(struct sk_if_softc *, caddr_t, int); 244 static void sk_setmulti(struct sk_if_softc *); 245 static void sk_setpromisc(struct sk_if_softc *); 246 247 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high); 248 static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS); 249 250 #ifdef SK_USEIOSPACE 251 #define SK_RES SYS_RES_IOPORT 252 #define SK_RID SK_PCI_LOIO 253 #else 254 #define SK_RES SYS_RES_MEMORY 255 #define SK_RID SK_PCI_LOMEM 256 #endif 257 258 /* 259 * Note that we have newbus methods for both the GEnesis controller 260 * itself and the XMAC(s). The XMACs are children of the GEnesis, and 261 * the miibus code is a child of the XMACs. We need to do it this way 262 * so that the miibus drivers can access the PHY registers on the 263 * right PHY. It's not quite what I had in mind, but it's the only 264 * design that achieves the desired effect. 265 */ 266 static device_method_t skc_methods[] = { 267 /* Device interface */ 268 DEVMETHOD(device_probe, skc_probe), 269 DEVMETHOD(device_attach, skc_attach), 270 DEVMETHOD(device_detach, skc_detach), 271 DEVMETHOD(device_shutdown, skc_shutdown), 272 273 /* bus interface */ 274 DEVMETHOD(bus_print_child, bus_generic_print_child), 275 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 276 277 { 0, 0 } 278 }; 279 280 static driver_t skc_driver = { 281 "skc", 282 skc_methods, 283 sizeof(struct sk_softc) 284 }; 285 286 static devclass_t skc_devclass; 287 288 static device_method_t sk_methods[] = { 289 /* Device interface */ 290 DEVMETHOD(device_probe, sk_probe), 291 DEVMETHOD(device_attach, sk_attach), 292 DEVMETHOD(device_detach, sk_detach), 293 DEVMETHOD(device_shutdown, bus_generic_shutdown), 294 295 /* bus interface */ 296 DEVMETHOD(bus_print_child, bus_generic_print_child), 297 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 298 299 /* MII interface */ 300 DEVMETHOD(miibus_readreg, sk_miibus_readreg), 301 DEVMETHOD(miibus_writereg, sk_miibus_writereg), 302 DEVMETHOD(miibus_statchg, sk_miibus_statchg), 303 304 { 0, 0 } 305 }; 306 307 static driver_t sk_driver = { 308 "sk", 309 sk_methods, 310 sizeof(struct sk_if_softc) 311 }; 312 313 static devclass_t sk_devclass; 314 315 DRIVER_MODULE(sk, pci, skc_driver, skc_devclass, 0, 0); 316 DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0); 317 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0); 318 319 #define SK_SETBIT(sc, reg, x) \ 320 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x) 321 322 #define SK_CLRBIT(sc, reg, x) \ 323 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x) 324 325 #define SK_WIN_SETBIT_4(sc, reg, x) \ 326 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x) 327 328 #define SK_WIN_CLRBIT_4(sc, reg, x) \ 329 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x) 330 331 #define SK_WIN_SETBIT_2(sc, reg, x) \ 332 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x) 333 334 #define SK_WIN_CLRBIT_2(sc, reg, x) \ 335 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x) 336 337 static u_int32_t 338 sk_win_read_4(sc, reg) 339 struct sk_softc *sc; 340 int reg; 341 { 342 #ifdef SK_USEIOSPACE 343 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 344 return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg))); 345 #else 346 return(CSR_READ_4(sc, reg)); 347 #endif 348 } 349 350 static u_int16_t 351 sk_win_read_2(sc, reg) 352 struct sk_softc *sc; 353 int reg; 354 { 355 #ifdef SK_USEIOSPACE 356 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 357 return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg))); 358 #else 359 return(CSR_READ_2(sc, reg)); 360 #endif 361 } 362 363 static u_int8_t 364 sk_win_read_1(sc, reg) 365 struct sk_softc *sc; 366 int reg; 367 { 368 #ifdef SK_USEIOSPACE 369 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 370 return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg))); 371 #else 372 return(CSR_READ_1(sc, reg)); 373 #endif 374 } 375 376 static void 377 sk_win_write_4(sc, reg, val) 378 struct sk_softc *sc; 379 int reg; 380 u_int32_t val; 381 { 382 #ifdef SK_USEIOSPACE 383 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 384 CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val); 385 #else 386 CSR_WRITE_4(sc, reg, val); 387 #endif 388 return; 389 } 390 391 static void 392 sk_win_write_2(sc, reg, val) 393 struct sk_softc *sc; 394 int reg; 395 u_int32_t val; 396 { 397 #ifdef SK_USEIOSPACE 398 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 399 CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val); 400 #else 401 CSR_WRITE_2(sc, reg, val); 402 #endif 403 return; 404 } 405 406 static void 407 sk_win_write_1(sc, reg, val) 408 struct sk_softc *sc; 409 int reg; 410 u_int32_t val; 411 { 412 #ifdef SK_USEIOSPACE 413 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 414 CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val); 415 #else 416 CSR_WRITE_1(sc, reg, val); 417 #endif 418 return; 419 } 420 421 /* 422 * The VPD EEPROM contains Vital Product Data, as suggested in 423 * the PCI 2.1 specification. The VPD data is separared into areas 424 * denoted by resource IDs. The SysKonnect VPD contains an ID string 425 * resource (the name of the adapter), a read-only area resource 426 * containing various key/data fields and a read/write area which 427 * can be used to store asset management information or log messages. 428 * We read the ID string and read-only into buffers attached to 429 * the controller softc structure for later use. At the moment, 430 * we only use the ID string during skc_attach(). 431 */ 432 static u_int8_t 433 sk_vpd_readbyte(sc, addr) 434 struct sk_softc *sc; 435 int addr; 436 { 437 int i; 438 439 sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr); 440 for (i = 0; i < SK_TIMEOUT; i++) { 441 DELAY(1); 442 if (sk_win_read_2(sc, 443 SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG) 444 break; 445 } 446 447 if (i == SK_TIMEOUT) 448 return(0); 449 450 return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA))); 451 } 452 453 static void 454 sk_vpd_read_res(sc, res, addr) 455 struct sk_softc *sc; 456 struct vpd_res *res; 457 int addr; 458 { 459 int i; 460 u_int8_t *ptr; 461 462 ptr = (u_int8_t *)res; 463 for (i = 0; i < sizeof(struct vpd_res); i++) 464 ptr[i] = sk_vpd_readbyte(sc, i + addr); 465 466 return; 467 } 468 469 static void 470 sk_vpd_read(sc) 471 struct sk_softc *sc; 472 { 473 int pos = 0, i; 474 struct vpd_res res; 475 476 if (sc->sk_vpd_prodname != NULL) 477 free(sc->sk_vpd_prodname, M_DEVBUF); 478 if (sc->sk_vpd_readonly != NULL) 479 free(sc->sk_vpd_readonly, M_DEVBUF); 480 sc->sk_vpd_prodname = NULL; 481 sc->sk_vpd_readonly = NULL; 482 sc->sk_vpd_readonly_len = 0; 483 484 sk_vpd_read_res(sc, &res, pos); 485 486 /* 487 * Bail out quietly if the eeprom appears to be missing or empty. 488 */ 489 if (res.vr_id == 0xff && res.vr_len == 0xff && res.vr_pad == 0xff) 490 return; 491 492 if (res.vr_id != VPD_RES_ID) { 493 printf("skc%d: bad VPD resource id: expected %x got %x\n", 494 sc->sk_unit, VPD_RES_ID, res.vr_id); 495 return; 496 } 497 498 pos += sizeof(res); 499 sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 500 if (sc->sk_vpd_prodname != NULL) { 501 for (i = 0; i < res.vr_len; i++) 502 sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos); 503 sc->sk_vpd_prodname[i] = '\0'; 504 } 505 pos += res.vr_len; 506 507 sk_vpd_read_res(sc, &res, pos); 508 509 if (res.vr_id != VPD_RES_READ) { 510 printf("skc%d: bad VPD resource id: expected %x got %x\n", 511 sc->sk_unit, VPD_RES_READ, res.vr_id); 512 return; 513 } 514 515 pos += sizeof(res); 516 sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 517 for (i = 0; i < res.vr_len; i++) 518 sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos); 519 sc->sk_vpd_readonly_len = res.vr_len; 520 521 return; 522 } 523 524 static int 525 sk_miibus_readreg(dev, phy, reg) 526 device_t dev; 527 int phy, reg; 528 { 529 struct sk_if_softc *sc_if; 530 531 sc_if = device_get_softc(dev); 532 533 switch(sc_if->sk_softc->sk_type) { 534 case SK_GENESIS: 535 return(sk_xmac_miibus_readreg(sc_if, phy, reg)); 536 case SK_YUKON: 537 case SK_YUKON_LITE: 538 case SK_YUKON_LP: 539 return(sk_marv_miibus_readreg(sc_if, phy, reg)); 540 } 541 542 return(0); 543 } 544 545 static int 546 sk_miibus_writereg(dev, phy, reg, val) 547 device_t dev; 548 int phy, reg, val; 549 { 550 struct sk_if_softc *sc_if; 551 552 sc_if = device_get_softc(dev); 553 554 switch(sc_if->sk_softc->sk_type) { 555 case SK_GENESIS: 556 return(sk_xmac_miibus_writereg(sc_if, phy, reg, val)); 557 case SK_YUKON: 558 case SK_YUKON_LITE: 559 case SK_YUKON_LP: 560 return(sk_marv_miibus_writereg(sc_if, phy, reg, val)); 561 } 562 563 return(0); 564 } 565 566 static void 567 sk_miibus_statchg(dev) 568 device_t dev; 569 { 570 struct sk_if_softc *sc_if; 571 572 sc_if = device_get_softc(dev); 573 574 switch(sc_if->sk_softc->sk_type) { 575 case SK_GENESIS: 576 sk_xmac_miibus_statchg(sc_if); 577 break; 578 case SK_YUKON: 579 case SK_YUKON_LITE: 580 case SK_YUKON_LP: 581 sk_marv_miibus_statchg(sc_if); 582 break; 583 } 584 585 return; 586 } 587 588 static int 589 sk_xmac_miibus_readreg(sc_if, phy, reg) 590 struct sk_if_softc *sc_if; 591 int phy, reg; 592 { 593 int i; 594 595 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0) 596 return(0); 597 598 SK_IF_LOCK(sc_if); 599 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); 600 SK_XM_READ_2(sc_if, XM_PHY_DATA); 601 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { 602 for (i = 0; i < SK_TIMEOUT; i++) { 603 DELAY(1); 604 if (SK_XM_READ_2(sc_if, XM_MMUCMD) & 605 XM_MMUCMD_PHYDATARDY) 606 break; 607 } 608 609 if (i == SK_TIMEOUT) { 610 printf("sk%d: phy failed to come ready\n", 611 sc_if->sk_unit); 612 SK_IF_UNLOCK(sc_if); 613 return(0); 614 } 615 } 616 DELAY(1); 617 i = SK_XM_READ_2(sc_if, XM_PHY_DATA); 618 SK_IF_UNLOCK(sc_if); 619 return(i); 620 } 621 622 static int 623 sk_xmac_miibus_writereg(sc_if, phy, reg, val) 624 struct sk_if_softc *sc_if; 625 int phy, reg, val; 626 { 627 int i; 628 629 SK_IF_LOCK(sc_if); 630 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); 631 for (i = 0; i < SK_TIMEOUT; i++) { 632 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) 633 break; 634 } 635 636 if (i == SK_TIMEOUT) { 637 printf("sk%d: phy failed to come ready\n", sc_if->sk_unit); 638 SK_IF_UNLOCK(sc_if); 639 return(ETIMEDOUT); 640 } 641 642 SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val); 643 for (i = 0; i < SK_TIMEOUT; i++) { 644 DELAY(1); 645 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) 646 break; 647 } 648 SK_IF_UNLOCK(sc_if); 649 if (i == SK_TIMEOUT) 650 printf("sk%d: phy write timed out\n", sc_if->sk_unit); 651 652 return(0); 653 } 654 655 static void 656 sk_xmac_miibus_statchg(sc_if) 657 struct sk_if_softc *sc_if; 658 { 659 struct mii_data *mii; 660 661 mii = device_get_softc(sc_if->sk_miibus); 662 663 SK_IF_LOCK(sc_if); 664 /* 665 * If this is a GMII PHY, manually set the XMAC's 666 * duplex mode accordingly. 667 */ 668 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { 669 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 670 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); 671 } else { 672 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); 673 } 674 } 675 SK_IF_UNLOCK(sc_if); 676 677 return; 678 } 679 680 static int 681 sk_marv_miibus_readreg(sc_if, phy, reg) 682 struct sk_if_softc *sc_if; 683 int phy, reg; 684 { 685 u_int16_t val; 686 int i; 687 688 if (phy != 0 || 689 (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER && 690 sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) { 691 return(0); 692 } 693 694 SK_IF_LOCK(sc_if); 695 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 696 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ); 697 698 for (i = 0; i < SK_TIMEOUT; i++) { 699 DELAY(1); 700 val = SK_YU_READ_2(sc_if, YUKON_SMICR); 701 if (val & YU_SMICR_READ_VALID) 702 break; 703 } 704 705 if (i == SK_TIMEOUT) { 706 printf("sk%d: phy failed to come ready\n", 707 sc_if->sk_unit); 708 SK_IF_UNLOCK(sc_if); 709 return(0); 710 } 711 712 val = SK_YU_READ_2(sc_if, YUKON_SMIDR); 713 SK_IF_UNLOCK(sc_if); 714 715 return(val); 716 } 717 718 static int 719 sk_marv_miibus_writereg(sc_if, phy, reg, val) 720 struct sk_if_softc *sc_if; 721 int phy, reg, val; 722 { 723 int i; 724 725 SK_IF_LOCK(sc_if); 726 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val); 727 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 728 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE); 729 730 for (i = 0; i < SK_TIMEOUT; i++) { 731 DELAY(1); 732 if (SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY) 733 break; 734 } 735 SK_IF_UNLOCK(sc_if); 736 737 return(0); 738 } 739 740 static void 741 sk_marv_miibus_statchg(sc_if) 742 struct sk_if_softc *sc_if; 743 { 744 return; 745 } 746 747 #define HASH_BITS 6 748 749 static u_int32_t 750 sk_xmchash(addr) 751 const uint8_t *addr; 752 { 753 uint32_t crc; 754 755 /* Compute CRC for the address value. */ 756 crc = ether_crc32_le(addr, ETHER_ADDR_LEN); 757 758 return (~crc & ((1 << HASH_BITS) - 1)); 759 } 760 761 /* gmchash is just a big endian crc */ 762 static u_int32_t 763 sk_gmchash(addr) 764 const uint8_t *addr; 765 { 766 uint32_t crc; 767 768 /* Compute CRC for the address value. */ 769 crc = ether_crc32_be(addr, ETHER_ADDR_LEN); 770 771 return (crc & ((1 << HASH_BITS) - 1)); 772 } 773 774 static void 775 sk_setfilt(sc_if, addr, slot) 776 struct sk_if_softc *sc_if; 777 caddr_t addr; 778 int slot; 779 { 780 int base; 781 782 base = XM_RXFILT_ENTRY(slot); 783 784 SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0])); 785 SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2])); 786 SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4])); 787 788 return; 789 } 790 791 static void 792 sk_setmulti(sc_if) 793 struct sk_if_softc *sc_if; 794 { 795 struct sk_softc *sc = sc_if->sk_softc; 796 struct ifnet *ifp = sc_if->sk_ifp; 797 u_int32_t hashes[2] = { 0, 0 }; 798 int h = 0, i; 799 struct ifmultiaddr *ifma; 800 u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 }; 801 802 SK_IF_LOCK_ASSERT(sc_if); 803 804 /* First, zot all the existing filters. */ 805 switch(sc->sk_type) { 806 case SK_GENESIS: 807 for (i = 1; i < XM_RXFILT_MAX; i++) 808 sk_setfilt(sc_if, (caddr_t)&dummy, i); 809 810 SK_XM_WRITE_4(sc_if, XM_MAR0, 0); 811 SK_XM_WRITE_4(sc_if, XM_MAR2, 0); 812 break; 813 case SK_YUKON: 814 case SK_YUKON_LITE: 815 case SK_YUKON_LP: 816 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0); 817 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0); 818 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0); 819 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0); 820 break; 821 } 822 823 /* Now program new ones. */ 824 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 825 hashes[0] = 0xFFFFFFFF; 826 hashes[1] = 0xFFFFFFFF; 827 } else { 828 i = 1; 829 IF_ADDR_LOCK(ifp); 830 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) { 831 if (ifma->ifma_addr->sa_family != AF_LINK) 832 continue; 833 /* 834 * Program the first XM_RXFILT_MAX multicast groups 835 * into the perfect filter. For all others, 836 * use the hash table. 837 */ 838 if (sc->sk_type == SK_GENESIS && i < XM_RXFILT_MAX) { 839 sk_setfilt(sc_if, 840 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i); 841 i++; 842 continue; 843 } 844 845 switch(sc->sk_type) { 846 case SK_GENESIS: 847 h = sk_xmchash( 848 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 849 break; 850 case SK_YUKON: 851 case SK_YUKON_LITE: 852 case SK_YUKON_LP: 853 h = sk_gmchash( 854 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 855 break; 856 } 857 if (h < 32) 858 hashes[0] |= (1 << h); 859 else 860 hashes[1] |= (1 << (h - 32)); 861 } 862 IF_ADDR_UNLOCK(ifp); 863 } 864 865 switch(sc->sk_type) { 866 case SK_GENESIS: 867 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH| 868 XM_MODE_RX_USE_PERFECT); 869 SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]); 870 SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]); 871 break; 872 case SK_YUKON: 873 case SK_YUKON_LITE: 874 case SK_YUKON_LP: 875 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff); 876 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff); 877 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff); 878 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff); 879 break; 880 } 881 882 return; 883 } 884 885 static void 886 sk_setpromisc(sc_if) 887 struct sk_if_softc *sc_if; 888 { 889 struct sk_softc *sc = sc_if->sk_softc; 890 struct ifnet *ifp = sc_if->sk_ifp; 891 892 SK_IF_LOCK_ASSERT(sc_if); 893 894 switch(sc->sk_type) { 895 case SK_GENESIS: 896 if (ifp->if_flags & IFF_PROMISC) { 897 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); 898 } else { 899 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); 900 } 901 break; 902 case SK_YUKON: 903 case SK_YUKON_LITE: 904 case SK_YUKON_LP: 905 if (ifp->if_flags & IFF_PROMISC) { 906 SK_YU_CLRBIT_2(sc_if, YUKON_RCR, 907 YU_RCR_UFLEN | YU_RCR_MUFLEN); 908 } else { 909 SK_YU_SETBIT_2(sc_if, YUKON_RCR, 910 YU_RCR_UFLEN | YU_RCR_MUFLEN); 911 } 912 break; 913 } 914 915 return; 916 } 917 918 static int 919 sk_init_rx_ring(sc_if) 920 struct sk_if_softc *sc_if; 921 { 922 struct sk_chain_data *cd = &sc_if->sk_cdata; 923 struct sk_ring_data *rd = sc_if->sk_rdata; 924 int i; 925 926 bzero((char *)rd->sk_rx_ring, 927 sizeof(struct sk_rx_desc) * SK_RX_RING_CNT); 928 929 for (i = 0; i < SK_RX_RING_CNT; i++) { 930 cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i]; 931 if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS) 932 return(ENOBUFS); 933 if (i == (SK_RX_RING_CNT - 1)) { 934 cd->sk_rx_chain[i].sk_next = 935 &cd->sk_rx_chain[0]; 936 rd->sk_rx_ring[i].sk_next = 937 vtophys(&rd->sk_rx_ring[0]); 938 } else { 939 cd->sk_rx_chain[i].sk_next = 940 &cd->sk_rx_chain[i + 1]; 941 rd->sk_rx_ring[i].sk_next = 942 vtophys(&rd->sk_rx_ring[i + 1]); 943 } 944 } 945 946 sc_if->sk_cdata.sk_rx_prod = 0; 947 sc_if->sk_cdata.sk_rx_cons = 0; 948 949 return(0); 950 } 951 952 static void 953 sk_init_tx_ring(sc_if) 954 struct sk_if_softc *sc_if; 955 { 956 struct sk_chain_data *cd = &sc_if->sk_cdata; 957 struct sk_ring_data *rd = sc_if->sk_rdata; 958 int i; 959 960 bzero((char *)sc_if->sk_rdata->sk_tx_ring, 961 sizeof(struct sk_tx_desc) * SK_TX_RING_CNT); 962 963 for (i = 0; i < SK_TX_RING_CNT; i++) { 964 cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i]; 965 if (i == (SK_TX_RING_CNT - 1)) { 966 cd->sk_tx_chain[i].sk_next = 967 &cd->sk_tx_chain[0]; 968 rd->sk_tx_ring[i].sk_next = 969 vtophys(&rd->sk_tx_ring[0]); 970 } else { 971 cd->sk_tx_chain[i].sk_next = 972 &cd->sk_tx_chain[i + 1]; 973 rd->sk_tx_ring[i].sk_next = 974 vtophys(&rd->sk_tx_ring[i + 1]); 975 } 976 } 977 978 sc_if->sk_cdata.sk_tx_prod = 0; 979 sc_if->sk_cdata.sk_tx_cons = 0; 980 sc_if->sk_cdata.sk_tx_cnt = 0; 981 982 return; 983 } 984 985 static int 986 sk_newbuf(sc_if, c, m) 987 struct sk_if_softc *sc_if; 988 struct sk_chain *c; 989 struct mbuf *m; 990 { 991 struct mbuf *m_new = NULL; 992 struct sk_rx_desc *r; 993 994 if (m == NULL) { 995 caddr_t *buf = NULL; 996 997 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 998 if (m_new == NULL) 999 return(ENOBUFS); 1000 1001 /* Allocate the jumbo buffer */ 1002 buf = sk_jalloc(sc_if); 1003 if (buf == NULL) { 1004 m_freem(m_new); 1005 #ifdef SK_VERBOSE 1006 printf("sk%d: jumbo allocation failed " 1007 "-- packet dropped!\n", sc_if->sk_unit); 1008 #endif 1009 return(ENOBUFS); 1010 } 1011 1012 /* Attach the buffer to the mbuf */ 1013 MEXTADD(m_new, buf, SK_JLEN, sk_jfree, 1014 (struct sk_if_softc *)sc_if, 0, EXT_NET_DRV); 1015 m_new->m_data = (void *)buf; 1016 m_new->m_pkthdr.len = m_new->m_len = SK_JLEN; 1017 } else { 1018 /* 1019 * We're re-using a previously allocated mbuf; 1020 * be sure to re-init pointers and lengths to 1021 * default values. 1022 */ 1023 m_new = m; 1024 m_new->m_len = m_new->m_pkthdr.len = SK_JLEN; 1025 m_new->m_data = m_new->m_ext.ext_buf; 1026 } 1027 1028 /* 1029 * Adjust alignment so packet payload begins on a 1030 * longword boundary. Mandatory for Alpha, useful on 1031 * x86 too. 1032 */ 1033 m_adj(m_new, ETHER_ALIGN); 1034 1035 r = c->sk_desc; 1036 c->sk_mbuf = m_new; 1037 r->sk_data_lo = vtophys(mtod(m_new, caddr_t)); 1038 r->sk_ctl = m_new->m_len | SK_RXSTAT; 1039 1040 return(0); 1041 } 1042 1043 /* 1044 * Allocate jumbo buffer storage. The SysKonnect adapters support 1045 * "jumbograms" (9K frames), although SysKonnect doesn't currently 1046 * use them in their drivers. In order for us to use them, we need 1047 * large 9K receive buffers, however standard mbuf clusters are only 1048 * 2048 bytes in size. Consequently, we need to allocate and manage 1049 * our own jumbo buffer pool. Fortunately, this does not require an 1050 * excessive amount of additional code. 1051 */ 1052 static int 1053 sk_alloc_jumbo_mem(sc_if) 1054 struct sk_if_softc *sc_if; 1055 { 1056 caddr_t ptr; 1057 register int i; 1058 struct sk_jpool_entry *entry; 1059 1060 /* Grab a big chunk o' storage. */ 1061 sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF, 1062 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 1063 1064 if (sc_if->sk_cdata.sk_jumbo_buf == NULL) { 1065 printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit); 1066 return(ENOBUFS); 1067 } 1068 1069 mtx_init(&sc_if->sk_jlist_mtx, "sk_jlist_mtx", NULL, MTX_DEF); 1070 1071 SLIST_INIT(&sc_if->sk_jfree_listhead); 1072 SLIST_INIT(&sc_if->sk_jinuse_listhead); 1073 1074 /* 1075 * Now divide it up into 9K pieces and save the addresses 1076 * in an array. 1077 */ 1078 ptr = sc_if->sk_cdata.sk_jumbo_buf; 1079 for (i = 0; i < SK_JSLOTS; i++) { 1080 sc_if->sk_cdata.sk_jslots[i] = ptr; 1081 ptr += SK_JLEN; 1082 entry = malloc(sizeof(struct sk_jpool_entry), 1083 M_DEVBUF, M_NOWAIT); 1084 if (entry == NULL) { 1085 sk_free_jumbo_mem(sc_if); 1086 sc_if->sk_cdata.sk_jumbo_buf = NULL; 1087 printf("sk%d: no memory for jumbo " 1088 "buffer queue!\n", sc_if->sk_unit); 1089 return(ENOBUFS); 1090 } 1091 entry->slot = i; 1092 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, 1093 entry, jpool_entries); 1094 } 1095 1096 return(0); 1097 } 1098 1099 static void 1100 sk_free_jumbo_mem(sc_if) 1101 struct sk_if_softc *sc_if; 1102 { 1103 struct sk_jpool_entry *entry; 1104 1105 SK_JLIST_LOCK(sc_if); 1106 1107 /* We cannot release external mbuf storage while in use. */ 1108 if (!SLIST_EMPTY(&sc_if->sk_jinuse_listhead)) { 1109 printf("sk%d: will leak jumbo buffer memory!\n", sc_if->sk_unit); 1110 SK_JLIST_UNLOCK(sc_if); 1111 return; 1112 } 1113 1114 while (!SLIST_EMPTY(&sc_if->sk_jfree_listhead)) { 1115 entry = SLIST_FIRST(&sc_if->sk_jfree_listhead); 1116 SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries); 1117 free(entry, M_DEVBUF); 1118 } 1119 1120 SK_JLIST_UNLOCK(sc_if); 1121 1122 mtx_destroy(&sc_if->sk_jlist_mtx); 1123 1124 contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF); 1125 1126 return; 1127 } 1128 1129 /* 1130 * Allocate a jumbo buffer. 1131 */ 1132 static void * 1133 sk_jalloc(sc_if) 1134 struct sk_if_softc *sc_if; 1135 { 1136 struct sk_jpool_entry *entry; 1137 1138 SK_JLIST_LOCK(sc_if); 1139 1140 entry = SLIST_FIRST(&sc_if->sk_jfree_listhead); 1141 1142 if (entry == NULL) { 1143 #ifdef SK_VERBOSE 1144 printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit); 1145 #endif 1146 SK_JLIST_UNLOCK(sc_if); 1147 return(NULL); 1148 } 1149 1150 SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries); 1151 SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries); 1152 1153 SK_JLIST_UNLOCK(sc_if); 1154 1155 return(sc_if->sk_cdata.sk_jslots[entry->slot]); 1156 } 1157 1158 /* 1159 * Release a jumbo buffer. 1160 */ 1161 static void 1162 sk_jfree(buf, args) 1163 void *buf; 1164 void *args; 1165 { 1166 struct sk_if_softc *sc_if; 1167 int i; 1168 struct sk_jpool_entry *entry; 1169 1170 /* Extract the softc struct pointer. */ 1171 sc_if = (struct sk_if_softc *)args; 1172 if (sc_if == NULL) 1173 panic("sk_jfree: didn't get softc pointer!"); 1174 1175 SK_JLIST_LOCK(sc_if); 1176 1177 /* calculate the slot this buffer belongs to */ 1178 i = ((vm_offset_t)buf 1179 - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN; 1180 1181 if ((i < 0) || (i >= SK_JSLOTS)) 1182 panic("sk_jfree: asked to free buffer that we don't manage!"); 1183 1184 entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead); 1185 if (entry == NULL) 1186 panic("sk_jfree: buffer not in use!"); 1187 entry->slot = i; 1188 SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries); 1189 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries); 1190 if (SLIST_EMPTY(&sc_if->sk_jinuse_listhead)) 1191 wakeup(sc_if); 1192 1193 SK_JLIST_UNLOCK(sc_if); 1194 return; 1195 } 1196 1197 /* 1198 * Set media options. 1199 */ 1200 static int 1201 sk_ifmedia_upd(ifp) 1202 struct ifnet *ifp; 1203 { 1204 struct sk_if_softc *sc_if = ifp->if_softc; 1205 struct mii_data *mii; 1206 1207 mii = device_get_softc(sc_if->sk_miibus); 1208 sk_init(sc_if); 1209 mii_mediachg(mii); 1210 1211 return(0); 1212 } 1213 1214 /* 1215 * Report current media status. 1216 */ 1217 static void 1218 sk_ifmedia_sts(ifp, ifmr) 1219 struct ifnet *ifp; 1220 struct ifmediareq *ifmr; 1221 { 1222 struct sk_if_softc *sc_if; 1223 struct mii_data *mii; 1224 1225 sc_if = ifp->if_softc; 1226 mii = device_get_softc(sc_if->sk_miibus); 1227 1228 mii_pollstat(mii); 1229 ifmr->ifm_active = mii->mii_media_active; 1230 ifmr->ifm_status = mii->mii_media_status; 1231 1232 return; 1233 } 1234 1235 static int 1236 sk_ioctl(ifp, command, data) 1237 struct ifnet *ifp; 1238 u_long command; 1239 caddr_t data; 1240 { 1241 struct sk_if_softc *sc_if = ifp->if_softc; 1242 struct ifreq *ifr = (struct ifreq *) data; 1243 int error = 0; 1244 struct mii_data *mii; 1245 1246 switch(command) { 1247 case SIOCSIFMTU: 1248 SK_IF_LOCK(sc_if); 1249 if (ifr->ifr_mtu > SK_JUMBO_MTU) 1250 error = EINVAL; 1251 else { 1252 ifp->if_mtu = ifr->ifr_mtu; 1253 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1254 sk_init_locked(sc_if); 1255 } 1256 SK_IF_UNLOCK(sc_if); 1257 break; 1258 case SIOCSIFFLAGS: 1259 SK_IF_LOCK(sc_if); 1260 if (ifp->if_flags & IFF_UP) { 1261 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1262 if ((ifp->if_flags ^ sc_if->sk_if_flags) 1263 & IFF_PROMISC) { 1264 sk_setpromisc(sc_if); 1265 sk_setmulti(sc_if); 1266 } 1267 } else 1268 sk_init_locked(sc_if); 1269 } else { 1270 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1271 sk_stop(sc_if); 1272 } 1273 sc_if->sk_if_flags = ifp->if_flags; 1274 SK_IF_UNLOCK(sc_if); 1275 error = 0; 1276 break; 1277 case SIOCADDMULTI: 1278 case SIOCDELMULTI: 1279 SK_IF_LOCK(sc_if); 1280 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1281 sk_setmulti(sc_if); 1282 error = 0; 1283 } 1284 SK_IF_UNLOCK(sc_if); 1285 break; 1286 case SIOCGIFMEDIA: 1287 case SIOCSIFMEDIA: 1288 mii = device_get_softc(sc_if->sk_miibus); 1289 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1290 break; 1291 default: 1292 error = ether_ioctl(ifp, command, data); 1293 break; 1294 } 1295 1296 return(error); 1297 } 1298 1299 /* 1300 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device 1301 * IDs against our list and return a device name if we find a match. 1302 */ 1303 static int 1304 skc_probe(dev) 1305 device_t dev; 1306 { 1307 struct sk_softc *sc; 1308 struct sk_type *t = sk_devs; 1309 1310 sc = device_get_softc(dev); 1311 1312 while(t->sk_name != NULL) { 1313 if ((pci_get_vendor(dev) == t->sk_vid) && 1314 (pci_get_device(dev) == t->sk_did)) { 1315 /* 1316 * Only attach to rev. 2 of the Linksys EG1032 adapter. 1317 * Rev. 3 is supported by re(4). 1318 */ 1319 if ((t->sk_vid == VENDORID_LINKSYS) && 1320 (t->sk_did == DEVICEID_LINKSYS_EG1032) && 1321 (pci_get_subdevice(dev) != 1322 SUBDEVICEID_LINKSYS_EG1032_REV2)) { 1323 t++; 1324 continue; 1325 } 1326 device_set_desc(dev, t->sk_name); 1327 return (BUS_PROBE_DEFAULT); 1328 } 1329 t++; 1330 } 1331 1332 return(ENXIO); 1333 } 1334 1335 /* 1336 * Force the GEnesis into reset, then bring it out of reset. 1337 */ 1338 static void 1339 sk_reset(sc) 1340 struct sk_softc *sc; 1341 { 1342 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET); 1343 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET); 1344 if (SK_YUKON_FAMILY(sc->sk_type)) 1345 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET); 1346 1347 DELAY(1000); 1348 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET); 1349 DELAY(2); 1350 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET); 1351 if (SK_YUKON_FAMILY(sc->sk_type)) 1352 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR); 1353 1354 if (sc->sk_type == SK_GENESIS) { 1355 /* Configure packet arbiter */ 1356 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET); 1357 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT); 1358 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT); 1359 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT); 1360 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT); 1361 } 1362 1363 /* Enable RAM interface */ 1364 sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET); 1365 1366 /* 1367 * Configure interrupt moderation. The moderation timer 1368 * defers interrupts specified in the interrupt moderation 1369 * timer mask based on the timeout specified in the interrupt 1370 * moderation timer init register. Each bit in the timer 1371 * register represents 18.825ns, so to specify a timeout in 1372 * microseconds, we have to multiply by 54. 1373 */ 1374 if (bootverbose) 1375 printf("skc%d: interrupt moderation is %d us\n", 1376 sc->sk_unit, sc->sk_int_mod); 1377 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod)); 1378 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF| 1379 SK_ISR_RX1_EOF|SK_ISR_RX2_EOF); 1380 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START); 1381 1382 return; 1383 } 1384 1385 static int 1386 sk_probe(dev) 1387 device_t dev; 1388 { 1389 struct sk_softc *sc; 1390 1391 sc = device_get_softc(device_get_parent(dev)); 1392 1393 /* 1394 * Not much to do here. We always know there will be 1395 * at least one XMAC present, and if there are two, 1396 * skc_attach() will create a second device instance 1397 * for us. 1398 */ 1399 switch (sc->sk_type) { 1400 case SK_GENESIS: 1401 device_set_desc(dev, "XaQti Corp. XMAC II"); 1402 break; 1403 case SK_YUKON: 1404 case SK_YUKON_LITE: 1405 case SK_YUKON_LP: 1406 device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon"); 1407 break; 1408 } 1409 1410 return (BUS_PROBE_DEFAULT); 1411 } 1412 1413 /* 1414 * Each XMAC chip is attached as a separate logical IP interface. 1415 * Single port cards will have only one logical interface of course. 1416 */ 1417 static int 1418 sk_attach(dev) 1419 device_t dev; 1420 { 1421 struct sk_softc *sc; 1422 struct sk_if_softc *sc_if; 1423 struct ifnet *ifp; 1424 int i, port, error; 1425 u_char eaddr[6]; 1426 1427 if (dev == NULL) 1428 return(EINVAL); 1429 1430 error = 0; 1431 sc_if = device_get_softc(dev); 1432 sc = device_get_softc(device_get_parent(dev)); 1433 port = *(int *)device_get_ivars(dev); 1434 1435 sc_if->sk_dev = dev; 1436 sc_if->sk_unit = device_get_unit(dev); 1437 sc_if->sk_port = port; 1438 sc_if->sk_softc = sc; 1439 sc->sk_if[port] = sc_if; 1440 if (port == SK_PORT_A) 1441 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0; 1442 if (port == SK_PORT_B) 1443 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1; 1444 1445 /* Allocate the descriptor queues. */ 1446 sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF, 1447 M_NOWAIT, M_ZERO, 0xffffffff, PAGE_SIZE, 0); 1448 1449 if (sc_if->sk_rdata == NULL) { 1450 printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit); 1451 error = ENOMEM; 1452 goto fail; 1453 } 1454 1455 /* Try to allocate memory for jumbo buffers. */ 1456 if (sk_alloc_jumbo_mem(sc_if)) { 1457 printf("sk%d: jumbo buffer allocation failed\n", 1458 sc_if->sk_unit); 1459 error = ENOMEM; 1460 goto fail; 1461 } 1462 1463 ifp = sc_if->sk_ifp = if_alloc(IFT_ETHER); 1464 if (ifp == NULL) { 1465 printf("sk%d: can not if_alloc()\n", sc_if->sk_unit); 1466 error = ENOSPC; 1467 goto fail; 1468 } 1469 ifp->if_softc = sc_if; 1470 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1471 ifp->if_mtu = ETHERMTU; 1472 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1473 /* 1474 * The hardware should be ready for VLAN_MTU by default: 1475 * XMAC II has 0x8100 in VLAN Tag Level 1 register initially; 1476 * YU_SMR_MFL_VLAN is set by this driver in Yukon. 1477 */ 1478 ifp->if_capabilities = ifp->if_capenable = IFCAP_VLAN_MTU; 1479 ifp->if_ioctl = sk_ioctl; 1480 ifp->if_start = sk_start; 1481 ifp->if_watchdog = sk_watchdog; 1482 ifp->if_init = sk_init; 1483 ifp->if_baudrate = 1000000000; 1484 IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1); 1485 ifp->if_snd.ifq_drv_maxlen = SK_TX_RING_CNT - 1; 1486 IFQ_SET_READY(&ifp->if_snd); 1487 1488 callout_handle_init(&sc_if->sk_tick_ch); 1489 1490 /* 1491 * Get station address for this interface. Note that 1492 * dual port cards actually come with three station 1493 * addresses: one for each port, plus an extra. The 1494 * extra one is used by the SysKonnect driver software 1495 * as a 'virtual' station address for when both ports 1496 * are operating in failover mode. Currently we don't 1497 * use this extra address. 1498 */ 1499 SK_LOCK(sc); 1500 for (i = 0; i < ETHER_ADDR_LEN; i++) 1501 eaddr[i] = 1502 sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i); 1503 1504 /* 1505 * Set up RAM buffer addresses. The NIC will have a certain 1506 * amount of SRAM on it, somewhere between 512K and 2MB. We 1507 * need to divide this up a) between the transmitter and 1508 * receiver and b) between the two XMACs, if this is a 1509 * dual port NIC. Our algotithm is to divide up the memory 1510 * evenly so that everyone gets a fair share. 1511 */ 1512 if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) { 1513 u_int32_t chunk, val; 1514 1515 chunk = sc->sk_ramsize / 2; 1516 val = sc->sk_rboff / sizeof(u_int64_t); 1517 sc_if->sk_rx_ramstart = val; 1518 val += (chunk / sizeof(u_int64_t)); 1519 sc_if->sk_rx_ramend = val - 1; 1520 sc_if->sk_tx_ramstart = val; 1521 val += (chunk / sizeof(u_int64_t)); 1522 sc_if->sk_tx_ramend = val - 1; 1523 } else { 1524 u_int32_t chunk, val; 1525 1526 chunk = sc->sk_ramsize / 4; 1527 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) / 1528 sizeof(u_int64_t); 1529 sc_if->sk_rx_ramstart = val; 1530 val += (chunk / sizeof(u_int64_t)); 1531 sc_if->sk_rx_ramend = val - 1; 1532 sc_if->sk_tx_ramstart = val; 1533 val += (chunk / sizeof(u_int64_t)); 1534 sc_if->sk_tx_ramend = val - 1; 1535 } 1536 1537 /* Read and save PHY type and set PHY address */ 1538 sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF; 1539 switch(sc_if->sk_phytype) { 1540 case SK_PHYTYPE_XMAC: 1541 sc_if->sk_phyaddr = SK_PHYADDR_XMAC; 1542 break; 1543 case SK_PHYTYPE_BCOM: 1544 sc_if->sk_phyaddr = SK_PHYADDR_BCOM; 1545 break; 1546 case SK_PHYTYPE_MARV_COPPER: 1547 sc_if->sk_phyaddr = SK_PHYADDR_MARV; 1548 break; 1549 default: 1550 printf("skc%d: unsupported PHY type: %d\n", 1551 sc->sk_unit, sc_if->sk_phytype); 1552 error = ENODEV; 1553 SK_UNLOCK(sc); 1554 goto fail; 1555 } 1556 1557 1558 /* 1559 * Call MI attach routine. Can't hold locks when calling into ether_*. 1560 */ 1561 SK_UNLOCK(sc); 1562 ether_ifattach(ifp, eaddr); 1563 SK_LOCK(sc); 1564 1565 /* 1566 * Do miibus setup. 1567 */ 1568 switch (sc->sk_type) { 1569 case SK_GENESIS: 1570 sk_init_xmac(sc_if); 1571 break; 1572 case SK_YUKON: 1573 case SK_YUKON_LITE: 1574 case SK_YUKON_LP: 1575 sk_init_yukon(sc_if); 1576 break; 1577 } 1578 1579 SK_UNLOCK(sc); 1580 if (mii_phy_probe(dev, &sc_if->sk_miibus, 1581 sk_ifmedia_upd, sk_ifmedia_sts)) { 1582 printf("skc%d: no PHY found!\n", sc_if->sk_unit); 1583 ether_ifdetach(ifp); 1584 error = ENXIO; 1585 goto fail; 1586 } 1587 1588 fail: 1589 if (error) { 1590 /* Access should be ok even though lock has been dropped */ 1591 sc->sk_if[port] = NULL; 1592 sk_detach(dev); 1593 } 1594 1595 return(error); 1596 } 1597 1598 /* 1599 * Attach the interface. Allocate softc structures, do ifmedia 1600 * setup and ethernet/BPF attach. 1601 */ 1602 static int 1603 skc_attach(dev) 1604 device_t dev; 1605 { 1606 struct sk_softc *sc; 1607 int unit, error = 0, rid, *port; 1608 uint8_t skrs; 1609 char *pname, *revstr; 1610 1611 sc = device_get_softc(dev); 1612 unit = device_get_unit(dev); 1613 1614 mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1615 MTX_DEF | MTX_RECURSE); 1616 /* 1617 * Map control/status registers. 1618 */ 1619 pci_enable_busmaster(dev); 1620 1621 rid = SK_RID; 1622 sc->sk_res = bus_alloc_resource_any(dev, SK_RES, &rid, RF_ACTIVE); 1623 1624 if (sc->sk_res == NULL) { 1625 printf("sk%d: couldn't map ports/memory\n", unit); 1626 error = ENXIO; 1627 goto fail; 1628 } 1629 1630 sc->sk_btag = rman_get_bustag(sc->sk_res); 1631 sc->sk_bhandle = rman_get_bushandle(sc->sk_res); 1632 1633 sc->sk_type = sk_win_read_1(sc, SK_CHIPVER); 1634 sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4) & 0xf; 1635 1636 /* Bail out if chip is not recognized. */ 1637 if (sc->sk_type != SK_GENESIS && !SK_YUKON_FAMILY(sc->sk_type)) { 1638 printf("skc%d: unknown device: chipver=%02x, rev=%x\n", 1639 unit, sc->sk_type, sc->sk_rev); 1640 error = ENXIO; 1641 goto fail; 1642 } 1643 1644 /* Allocate interrupt */ 1645 rid = 0; 1646 sc->sk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1647 RF_SHAREABLE | RF_ACTIVE); 1648 1649 if (sc->sk_irq == NULL) { 1650 printf("skc%d: couldn't map interrupt\n", unit); 1651 error = ENXIO; 1652 goto fail; 1653 } 1654 1655 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 1656 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1657 OID_AUTO, "int_mod", CTLTYPE_INT|CTLFLAG_RW, 1658 &sc->sk_int_mod, 0, sysctl_hw_sk_int_mod, "I", 1659 "SK interrupt moderation"); 1660 1661 /* Pull in device tunables. */ 1662 sc->sk_int_mod = SK_IM_DEFAULT; 1663 error = resource_int_value(device_get_name(dev), unit, 1664 "int_mod", &sc->sk_int_mod); 1665 if (error == 0) { 1666 if (sc->sk_int_mod < SK_IM_MIN || 1667 sc->sk_int_mod > SK_IM_MAX) { 1668 printf("skc%d: int_mod value out of range; " 1669 "using default: %d\n", unit, SK_IM_DEFAULT); 1670 sc->sk_int_mod = SK_IM_DEFAULT; 1671 } 1672 } 1673 1674 /* Reset the adapter. */ 1675 sk_reset(sc); 1676 1677 sc->sk_unit = unit; 1678 1679 /* Read and save vital product data from EEPROM. */ 1680 sk_vpd_read(sc); 1681 1682 skrs = sk_win_read_1(sc, SK_EPROM0); 1683 if (sc->sk_type == SK_GENESIS) { 1684 /* Read and save RAM size and RAMbuffer offset */ 1685 switch(skrs) { 1686 case SK_RAMSIZE_512K_64: 1687 sc->sk_ramsize = 0x80000; 1688 sc->sk_rboff = SK_RBOFF_0; 1689 break; 1690 case SK_RAMSIZE_1024K_64: 1691 sc->sk_ramsize = 0x100000; 1692 sc->sk_rboff = SK_RBOFF_80000; 1693 break; 1694 case SK_RAMSIZE_1024K_128: 1695 sc->sk_ramsize = 0x100000; 1696 sc->sk_rboff = SK_RBOFF_0; 1697 break; 1698 case SK_RAMSIZE_2048K_128: 1699 sc->sk_ramsize = 0x200000; 1700 sc->sk_rboff = SK_RBOFF_0; 1701 break; 1702 default: 1703 printf("skc%d: unknown ram size: %d\n", 1704 sc->sk_unit, skrs); 1705 error = ENXIO; 1706 goto fail; 1707 } 1708 } else { /* SK_YUKON_FAMILY */ 1709 if (skrs == 0x00) 1710 sc->sk_ramsize = 0x20000; 1711 else 1712 sc->sk_ramsize = skrs * (1<<12); 1713 sc->sk_rboff = SK_RBOFF_0; 1714 } 1715 1716 /* Read and save physical media type */ 1717 switch(sk_win_read_1(sc, SK_PMDTYPE)) { 1718 case SK_PMD_1000BASESX: 1719 sc->sk_pmd = IFM_1000_SX; 1720 break; 1721 case SK_PMD_1000BASELX: 1722 sc->sk_pmd = IFM_1000_LX; 1723 break; 1724 case SK_PMD_1000BASECX: 1725 sc->sk_pmd = IFM_1000_CX; 1726 break; 1727 case SK_PMD_1000BASETX: 1728 sc->sk_pmd = IFM_1000_T; 1729 break; 1730 default: 1731 printf("skc%d: unknown media type: 0x%x\n", 1732 sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE)); 1733 error = ENXIO; 1734 goto fail; 1735 } 1736 1737 /* Determine whether to name it with VPD PN or just make it up. 1738 * Marvell Yukon VPD PN seems to freqently be bogus. */ 1739 switch (pci_get_device(dev)) { 1740 case DEVICEID_SK_V1: 1741 case DEVICEID_BELKIN_5005: 1742 case DEVICEID_3COM_3C940: 1743 case DEVICEID_LINKSYS_EG1032: 1744 case DEVICEID_DLINK_DGE530T: 1745 /* Stay with VPD PN. */ 1746 pname = sc->sk_vpd_prodname; 1747 break; 1748 case DEVICEID_SK_V2: 1749 /* YUKON VPD PN might bear no resemblance to reality. */ 1750 switch (sc->sk_type) { 1751 case SK_GENESIS: 1752 /* Stay with VPD PN. */ 1753 pname = sc->sk_vpd_prodname; 1754 break; 1755 case SK_YUKON: 1756 pname = "Marvell Yukon Gigabit Ethernet"; 1757 break; 1758 case SK_YUKON_LITE: 1759 pname = "Marvell Yukon Lite Gigabit Ethernet"; 1760 break; 1761 case SK_YUKON_LP: 1762 pname = "Marvell Yukon LP Gigabit Ethernet"; 1763 break; 1764 default: 1765 pname = "Marvell Yukon (Unknown) Gigabit Ethernet"; 1766 break; 1767 } 1768 1769 /* Yukon Lite Rev. A0 needs special test. */ 1770 if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) { 1771 u_int32_t far; 1772 u_int8_t testbyte; 1773 1774 /* Save flash address register before testing. */ 1775 far = sk_win_read_4(sc, SK_EP_ADDR); 1776 1777 sk_win_write_1(sc, SK_EP_ADDR+0x03, 0xff); 1778 testbyte = sk_win_read_1(sc, SK_EP_ADDR+0x03); 1779 1780 if (testbyte != 0x00) { 1781 /* Yukon Lite Rev. A0 detected. */ 1782 sc->sk_type = SK_YUKON_LITE; 1783 sc->sk_rev = SK_YUKON_LITE_REV_A0; 1784 /* Restore flash address register. */ 1785 sk_win_write_4(sc, SK_EP_ADDR, far); 1786 } 1787 } 1788 break; 1789 default: 1790 device_printf(dev, "unknown device: vendor=%04x, device=%04x, " 1791 "chipver=%02x, rev=%x\n", 1792 pci_get_vendor(dev), pci_get_device(dev), 1793 sc->sk_type, sc->sk_rev); 1794 error = ENXIO; 1795 goto fail; 1796 } 1797 1798 if (sc->sk_type == SK_YUKON_LITE) { 1799 switch (sc->sk_rev) { 1800 case SK_YUKON_LITE_REV_A0: 1801 revstr = "A0"; 1802 break; 1803 case SK_YUKON_LITE_REV_A1: 1804 revstr = "A1"; 1805 break; 1806 case SK_YUKON_LITE_REV_A3: 1807 revstr = "A3"; 1808 break; 1809 default: 1810 revstr = ""; 1811 break; 1812 } 1813 } else { 1814 revstr = ""; 1815 } 1816 1817 /* Announce the product name and more VPD data if there. */ 1818 device_printf(dev, "%s rev. %s(0x%x)\n", 1819 pname != NULL ? pname : "<unknown>", revstr, sc->sk_rev); 1820 1821 if (bootverbose) { 1822 if (sc->sk_vpd_readonly != NULL && 1823 sc->sk_vpd_readonly_len != 0) { 1824 char buf[256]; 1825 char *dp = sc->sk_vpd_readonly; 1826 uint16_t l, len = sc->sk_vpd_readonly_len; 1827 1828 while (len >= 3) { 1829 if ((*dp == 'P' && *(dp+1) == 'N') || 1830 (*dp == 'E' && *(dp+1) == 'C') || 1831 (*dp == 'M' && *(dp+1) == 'N') || 1832 (*dp == 'S' && *(dp+1) == 'N')) { 1833 l = 0; 1834 while (l < *(dp+2)) { 1835 buf[l] = *(dp+3+l); 1836 ++l; 1837 } 1838 buf[l] = '\0'; 1839 device_printf(dev, "%c%c: %s\n", 1840 *dp, *(dp+1), buf); 1841 len -= (3 + l); 1842 dp += (3 + l); 1843 } else { 1844 len -= (3 + *(dp+2)); 1845 dp += (3 + *(dp+2)); 1846 } 1847 } 1848 } 1849 device_printf(dev, "chip ver = 0x%02x\n", sc->sk_type); 1850 device_printf(dev, "chip rev = 0x%02x\n", sc->sk_rev); 1851 device_printf(dev, "SK_EPROM0 = 0x%02x\n", skrs); 1852 device_printf(dev, "SRAM size = 0x%06x\n", sc->sk_ramsize); 1853 } 1854 1855 sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1); 1856 if (sc->sk_devs[SK_PORT_A] == NULL) { 1857 device_printf(dev, "failed to add child for PORT_A\n"); 1858 error = ENXIO; 1859 goto fail; 1860 } 1861 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); 1862 if (port == NULL) { 1863 device_printf(dev, "failed to allocate memory for " 1864 "ivars of PORT_A\n"); 1865 error = ENXIO; 1866 goto fail; 1867 } 1868 *port = SK_PORT_A; 1869 device_set_ivars(sc->sk_devs[SK_PORT_A], port); 1870 1871 if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) { 1872 sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1); 1873 if (sc->sk_devs[SK_PORT_B] == NULL) { 1874 device_printf(dev, "failed to add child for PORT_B\n"); 1875 error = ENXIO; 1876 goto fail; 1877 } 1878 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); 1879 if (port == NULL) { 1880 device_printf(dev, "failed to allocate memory for " 1881 "ivars of PORT_B\n"); 1882 error = ENXIO; 1883 goto fail; 1884 } 1885 *port = SK_PORT_B; 1886 device_set_ivars(sc->sk_devs[SK_PORT_B], port); 1887 } 1888 1889 /* Turn on the 'driver is loaded' LED. */ 1890 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON); 1891 1892 error = bus_generic_attach(dev); 1893 if (error) { 1894 device_printf(dev, "failed to attach port(s)\n"); 1895 goto fail; 1896 } 1897 1898 /* Hook interrupt last to avoid having to lock softc */ 1899 error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET|INTR_MPSAFE, 1900 sk_intr, sc, &sc->sk_intrhand); 1901 1902 if (error) { 1903 printf("skc%d: couldn't set up irq\n", unit); 1904 goto fail; 1905 } 1906 1907 fail: 1908 if (error) 1909 skc_detach(dev); 1910 1911 return(error); 1912 } 1913 1914 /* 1915 * Shutdown hardware and free up resources. This can be called any 1916 * time after the mutex has been initialized. It is called in both 1917 * the error case in attach and the normal detach case so it needs 1918 * to be careful about only freeing resources that have actually been 1919 * allocated. 1920 */ 1921 static int 1922 sk_detach(dev) 1923 device_t dev; 1924 { 1925 struct sk_if_softc *sc_if; 1926 struct ifnet *ifp; 1927 1928 sc_if = device_get_softc(dev); 1929 KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx), 1930 ("sk mutex not initialized in sk_detach")); 1931 SK_IF_LOCK(sc_if); 1932 1933 ifp = sc_if->sk_ifp; 1934 /* These should only be active if attach_xmac succeeded */ 1935 if (device_is_attached(dev)) { 1936 sk_stop(sc_if); 1937 /* Can't hold locks while calling detach */ 1938 SK_IF_UNLOCK(sc_if); 1939 ether_ifdetach(ifp); 1940 SK_IF_LOCK(sc_if); 1941 } 1942 if (ifp) 1943 if_free(ifp); 1944 /* 1945 * We're generally called from skc_detach() which is using 1946 * device_delete_child() to get to here. It's already trashed 1947 * miibus for us, so don't do it here or we'll panic. 1948 */ 1949 /* 1950 if (sc_if->sk_miibus != NULL) 1951 device_delete_child(dev, sc_if->sk_miibus); 1952 */ 1953 bus_generic_detach(dev); 1954 if (sc_if->sk_cdata.sk_jumbo_buf != NULL) 1955 sk_free_jumbo_mem(sc_if); 1956 if (sc_if->sk_rdata != NULL) { 1957 contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data), 1958 M_DEVBUF); 1959 } 1960 SK_IF_UNLOCK(sc_if); 1961 1962 return(0); 1963 } 1964 1965 static int 1966 skc_detach(dev) 1967 device_t dev; 1968 { 1969 struct sk_softc *sc; 1970 1971 sc = device_get_softc(dev); 1972 KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized")); 1973 1974 if (device_is_alive(dev)) { 1975 if (sc->sk_devs[SK_PORT_A] != NULL) { 1976 free(device_get_ivars(sc->sk_devs[SK_PORT_A]), M_DEVBUF); 1977 device_delete_child(dev, sc->sk_devs[SK_PORT_A]); 1978 } 1979 if (sc->sk_devs[SK_PORT_B] != NULL) { 1980 free(device_get_ivars(sc->sk_devs[SK_PORT_B]), M_DEVBUF); 1981 device_delete_child(dev, sc->sk_devs[SK_PORT_B]); 1982 } 1983 bus_generic_detach(dev); 1984 } 1985 1986 if (sc->sk_vpd_prodname != NULL) 1987 free(sc->sk_vpd_prodname, M_DEVBUF); 1988 if (sc->sk_vpd_readonly != NULL) 1989 free(sc->sk_vpd_readonly, M_DEVBUF); 1990 1991 if (sc->sk_intrhand) 1992 bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand); 1993 if (sc->sk_irq) 1994 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq); 1995 if (sc->sk_res) 1996 bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res); 1997 1998 mtx_destroy(&sc->sk_mtx); 1999 2000 return(0); 2001 } 2002 2003 static int 2004 sk_encap(sc_if, m_head, txidx) 2005 struct sk_if_softc *sc_if; 2006 struct mbuf *m_head; 2007 u_int32_t *txidx; 2008 { 2009 struct sk_tx_desc *f = NULL; 2010 struct mbuf *m; 2011 u_int32_t frag, cur, cnt = 0; 2012 2013 SK_IF_LOCK_ASSERT(sc_if); 2014 2015 m = m_head; 2016 cur = frag = *txidx; 2017 2018 /* 2019 * Start packing the mbufs in this chain into 2020 * the fragment pointers. Stop when we run out 2021 * of fragments or hit the end of the mbuf chain. 2022 */ 2023 for (m = m_head; m != NULL; m = m->m_next) { 2024 if (m->m_len != 0) { 2025 if ((SK_TX_RING_CNT - 2026 (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2) 2027 return(ENOBUFS); 2028 f = &sc_if->sk_rdata->sk_tx_ring[frag]; 2029 f->sk_data_lo = vtophys(mtod(m, vm_offset_t)); 2030 f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT; 2031 if (cnt == 0) 2032 f->sk_ctl |= SK_TXCTL_FIRSTFRAG; 2033 else 2034 f->sk_ctl |= SK_TXCTL_OWN; 2035 cur = frag; 2036 SK_INC(frag, SK_TX_RING_CNT); 2037 cnt++; 2038 } 2039 } 2040 2041 if (m != NULL) 2042 return(ENOBUFS); 2043 2044 sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |= 2045 SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR; 2046 sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head; 2047 sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN; 2048 sc_if->sk_cdata.sk_tx_cnt += cnt; 2049 2050 *txidx = frag; 2051 2052 return(0); 2053 } 2054 2055 static void 2056 sk_start(ifp) 2057 struct ifnet *ifp; 2058 { 2059 struct sk_if_softc *sc_if; 2060 2061 sc_if = ifp->if_softc; 2062 2063 SK_IF_LOCK(sc_if); 2064 sk_start_locked(ifp); 2065 SK_IF_UNLOCK(sc_if); 2066 2067 return; 2068 } 2069 2070 static void 2071 sk_start_locked(ifp) 2072 struct ifnet *ifp; 2073 { 2074 struct sk_softc *sc; 2075 struct sk_if_softc *sc_if; 2076 struct mbuf *m_head = NULL; 2077 u_int32_t idx; 2078 2079 sc_if = ifp->if_softc; 2080 sc = sc_if->sk_softc; 2081 2082 SK_IF_LOCK_ASSERT(sc_if); 2083 2084 idx = sc_if->sk_cdata.sk_tx_prod; 2085 2086 while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) { 2087 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2088 if (m_head == NULL) 2089 break; 2090 2091 /* 2092 * Pack the data into the transmit ring. If we 2093 * don't have room, set the OACTIVE flag and wait 2094 * for the NIC to drain the ring. 2095 */ 2096 if (sk_encap(sc_if, m_head, &idx)) { 2097 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2098 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2099 break; 2100 } 2101 2102 /* 2103 * If there's a BPF listener, bounce a copy of this frame 2104 * to him. 2105 */ 2106 BPF_MTAP(ifp, m_head); 2107 } 2108 2109 /* Transmit */ 2110 if (idx != sc_if->sk_cdata.sk_tx_prod) { 2111 sc_if->sk_cdata.sk_tx_prod = idx; 2112 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); 2113 2114 /* Set a timeout in case the chip goes out to lunch. */ 2115 ifp->if_timer = 5; 2116 } 2117 2118 return; 2119 } 2120 2121 2122 static void 2123 sk_watchdog(ifp) 2124 struct ifnet *ifp; 2125 { 2126 struct sk_if_softc *sc_if; 2127 2128 sc_if = ifp->if_softc; 2129 2130 printf("sk%d: watchdog timeout\n", sc_if->sk_unit); 2131 SK_IF_LOCK(sc_if); 2132 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2133 sk_init_locked(sc_if); 2134 SK_IF_UNLOCK(sc_if); 2135 2136 return; 2137 } 2138 2139 static void 2140 skc_shutdown(dev) 2141 device_t dev; 2142 { 2143 struct sk_softc *sc; 2144 2145 sc = device_get_softc(dev); 2146 SK_LOCK(sc); 2147 2148 /* Turn off the 'driver is loaded' LED. */ 2149 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF); 2150 2151 /* 2152 * Reset the GEnesis controller. Doing this should also 2153 * assert the resets on the attached XMAC(s). 2154 */ 2155 sk_reset(sc); 2156 SK_UNLOCK(sc); 2157 2158 return; 2159 } 2160 2161 static void 2162 sk_rxeof(sc_if) 2163 struct sk_if_softc *sc_if; 2164 { 2165 struct sk_softc *sc; 2166 struct mbuf *m; 2167 struct ifnet *ifp; 2168 struct sk_chain *cur_rx; 2169 int total_len = 0; 2170 int i; 2171 u_int32_t rxstat; 2172 2173 sc = sc_if->sk_softc; 2174 ifp = sc_if->sk_ifp; 2175 i = sc_if->sk_cdata.sk_rx_prod; 2176 cur_rx = &sc_if->sk_cdata.sk_rx_chain[i]; 2177 2178 SK_LOCK_ASSERT(sc); 2179 2180 while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) { 2181 2182 cur_rx = &sc_if->sk_cdata.sk_rx_chain[i]; 2183 rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat; 2184 m = cur_rx->sk_mbuf; 2185 cur_rx->sk_mbuf = NULL; 2186 total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl); 2187 SK_INC(i, SK_RX_RING_CNT); 2188 2189 if (rxstat & XM_RXSTAT_ERRFRAME) { 2190 ifp->if_ierrors++; 2191 sk_newbuf(sc_if, cur_rx, m); 2192 continue; 2193 } 2194 2195 /* 2196 * Try to allocate a new jumbo buffer. If that 2197 * fails, copy the packet to mbufs and put the 2198 * jumbo buffer back in the ring so it can be 2199 * re-used. If allocating mbufs fails, then we 2200 * have to drop the packet. 2201 */ 2202 if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) { 2203 struct mbuf *m0; 2204 m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, 2205 ifp, NULL); 2206 sk_newbuf(sc_if, cur_rx, m); 2207 if (m0 == NULL) { 2208 printf("sk%d: no receive buffers " 2209 "available -- packet dropped!\n", 2210 sc_if->sk_unit); 2211 ifp->if_ierrors++; 2212 continue; 2213 } 2214 m = m0; 2215 } else { 2216 m->m_pkthdr.rcvif = ifp; 2217 m->m_pkthdr.len = m->m_len = total_len; 2218 } 2219 2220 ifp->if_ipackets++; 2221 SK_UNLOCK(sc); 2222 (*ifp->if_input)(ifp, m); 2223 SK_LOCK(sc); 2224 } 2225 2226 sc_if->sk_cdata.sk_rx_prod = i; 2227 2228 return; 2229 } 2230 2231 static void 2232 sk_txeof(sc_if) 2233 struct sk_if_softc *sc_if; 2234 { 2235 struct sk_softc *sc; 2236 struct sk_tx_desc *cur_tx; 2237 struct ifnet *ifp; 2238 u_int32_t idx; 2239 2240 sc = sc_if->sk_softc; 2241 ifp = sc_if->sk_ifp; 2242 2243 /* 2244 * Go through our tx ring and free mbufs for those 2245 * frames that have been sent. 2246 */ 2247 idx = sc_if->sk_cdata.sk_tx_cons; 2248 while(idx != sc_if->sk_cdata.sk_tx_prod) { 2249 cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx]; 2250 if (cur_tx->sk_ctl & SK_TXCTL_OWN) 2251 break; 2252 if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG) 2253 ifp->if_opackets++; 2254 if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) { 2255 m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf); 2256 sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL; 2257 } 2258 sc_if->sk_cdata.sk_tx_cnt--; 2259 SK_INC(idx, SK_TX_RING_CNT); 2260 } 2261 2262 if (sc_if->sk_cdata.sk_tx_cnt == 0) { 2263 ifp->if_timer = 0; 2264 } else /* nudge chip to keep tx ring moving */ 2265 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); 2266 2267 if (sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 2) 2268 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2269 2270 sc_if->sk_cdata.sk_tx_cons = idx; 2271 } 2272 2273 static void 2274 sk_tick(xsc_if) 2275 void *xsc_if; 2276 { 2277 struct sk_if_softc *sc_if; 2278 struct mii_data *mii; 2279 struct ifnet *ifp; 2280 int i; 2281 2282 sc_if = xsc_if; 2283 SK_IF_LOCK(sc_if); 2284 ifp = sc_if->sk_ifp; 2285 mii = device_get_softc(sc_if->sk_miibus); 2286 2287 if (!(ifp->if_flags & IFF_UP)) { 2288 SK_IF_UNLOCK(sc_if); 2289 return; 2290 } 2291 2292 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 2293 sk_intr_bcom(sc_if); 2294 SK_IF_UNLOCK(sc_if); 2295 return; 2296 } 2297 2298 /* 2299 * According to SysKonnect, the correct way to verify that 2300 * the link has come back up is to poll bit 0 of the GPIO 2301 * register three times. This pin has the signal from the 2302 * link_sync pin connected to it; if we read the same link 2303 * state 3 times in a row, we know the link is up. 2304 */ 2305 for (i = 0; i < 3; i++) { 2306 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET) 2307 break; 2308 } 2309 2310 if (i != 3) { 2311 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); 2312 SK_IF_UNLOCK(sc_if); 2313 return; 2314 } 2315 2316 /* Turn the GP0 interrupt back on. */ 2317 SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); 2318 SK_XM_READ_2(sc_if, XM_ISR); 2319 mii_tick(mii); 2320 untimeout(sk_tick, sc_if, sc_if->sk_tick_ch); 2321 2322 SK_IF_UNLOCK(sc_if); 2323 return; 2324 } 2325 2326 static void 2327 sk_intr_bcom(sc_if) 2328 struct sk_if_softc *sc_if; 2329 { 2330 struct mii_data *mii; 2331 struct ifnet *ifp; 2332 int status; 2333 mii = device_get_softc(sc_if->sk_miibus); 2334 ifp = sc_if->sk_ifp; 2335 2336 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 2337 2338 /* 2339 * Read the PHY interrupt register to make sure 2340 * we clear any pending interrupts. 2341 */ 2342 status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR); 2343 2344 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2345 sk_init_xmac(sc_if); 2346 return; 2347 } 2348 2349 if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) { 2350 int lstat; 2351 lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 2352 BRGPHY_MII_AUXSTS); 2353 2354 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) { 2355 mii_mediachg(mii); 2356 /* Turn off the link LED. */ 2357 SK_IF_WRITE_1(sc_if, 0, 2358 SK_LINKLED1_CTL, SK_LINKLED_OFF); 2359 sc_if->sk_link = 0; 2360 } else if (status & BRGPHY_ISR_LNK_CHG) { 2361 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 2362 BRGPHY_MII_IMR, 0xFF00); 2363 mii_tick(mii); 2364 sc_if->sk_link = 1; 2365 /* Turn on the link LED. */ 2366 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, 2367 SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF| 2368 SK_LINKLED_BLINK_OFF); 2369 } else { 2370 mii_tick(mii); 2371 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); 2372 } 2373 } 2374 2375 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 2376 2377 return; 2378 } 2379 2380 static void 2381 sk_intr_xmac(sc_if) 2382 struct sk_if_softc *sc_if; 2383 { 2384 struct sk_softc *sc; 2385 u_int16_t status; 2386 2387 sc = sc_if->sk_softc; 2388 status = SK_XM_READ_2(sc_if, XM_ISR); 2389 2390 /* 2391 * Link has gone down. Start MII tick timeout to 2392 * watch for link resync. 2393 */ 2394 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) { 2395 if (status & XM_ISR_GP0_SET) { 2396 SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); 2397 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); 2398 } 2399 2400 if (status & XM_ISR_AUTONEG_DONE) { 2401 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); 2402 } 2403 } 2404 2405 if (status & XM_IMR_TX_UNDERRUN) 2406 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO); 2407 2408 if (status & XM_IMR_RX_OVERRUN) 2409 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO); 2410 2411 status = SK_XM_READ_2(sc_if, XM_ISR); 2412 2413 return; 2414 } 2415 2416 static void 2417 sk_intr_yukon(sc_if) 2418 struct sk_if_softc *sc_if; 2419 { 2420 int status; 2421 2422 status = SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); 2423 2424 return; 2425 } 2426 2427 static void 2428 sk_intr(xsc) 2429 void *xsc; 2430 { 2431 struct sk_softc *sc = xsc; 2432 struct sk_if_softc *sc_if0 = NULL, *sc_if1 = NULL; 2433 struct ifnet *ifp0 = NULL, *ifp1 = NULL; 2434 u_int32_t status; 2435 2436 SK_LOCK(sc); 2437 2438 sc_if0 = sc->sk_if[SK_PORT_A]; 2439 sc_if1 = sc->sk_if[SK_PORT_B]; 2440 2441 if (sc_if0 != NULL) 2442 ifp0 = sc_if0->sk_ifp; 2443 if (sc_if1 != NULL) 2444 ifp1 = sc_if1->sk_ifp; 2445 2446 for (;;) { 2447 status = CSR_READ_4(sc, SK_ISSR); 2448 if (!(status & sc->sk_intrmask)) 2449 break; 2450 2451 /* Handle receive interrupts first. */ 2452 if (status & SK_ISR_RX1_EOF) { 2453 sk_rxeof(sc_if0); 2454 CSR_WRITE_4(sc, SK_BMU_RX_CSR0, 2455 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); 2456 } 2457 if (status & SK_ISR_RX2_EOF) { 2458 sk_rxeof(sc_if1); 2459 CSR_WRITE_4(sc, SK_BMU_RX_CSR1, 2460 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); 2461 } 2462 2463 /* Then transmit interrupts. */ 2464 if (status & SK_ISR_TX1_S_EOF) { 2465 sk_txeof(sc_if0); 2466 CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, 2467 SK_TXBMU_CLR_IRQ_EOF); 2468 } 2469 if (status & SK_ISR_TX2_S_EOF) { 2470 sk_txeof(sc_if1); 2471 CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, 2472 SK_TXBMU_CLR_IRQ_EOF); 2473 } 2474 2475 /* Then MAC interrupts. */ 2476 if (status & SK_ISR_MAC1 && 2477 ifp0->if_drv_flags & IFF_DRV_RUNNING) { 2478 if (sc->sk_type == SK_GENESIS) 2479 sk_intr_xmac(sc_if0); 2480 else 2481 sk_intr_yukon(sc_if0); 2482 } 2483 2484 if (status & SK_ISR_MAC2 && 2485 ifp1->if_drv_flags & IFF_DRV_RUNNING) { 2486 if (sc->sk_type == SK_GENESIS) 2487 sk_intr_xmac(sc_if1); 2488 else 2489 sk_intr_yukon(sc_if1); 2490 } 2491 2492 if (status & SK_ISR_EXTERNAL_REG) { 2493 if (ifp0 != NULL && 2494 sc_if0->sk_phytype == SK_PHYTYPE_BCOM) 2495 sk_intr_bcom(sc_if0); 2496 if (ifp1 != NULL && 2497 sc_if1->sk_phytype == SK_PHYTYPE_BCOM) 2498 sk_intr_bcom(sc_if1); 2499 } 2500 } 2501 2502 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2503 2504 if (ifp0 != NULL && !IFQ_DRV_IS_EMPTY(&ifp0->if_snd)) 2505 sk_start_locked(ifp0); 2506 if (ifp1 != NULL && !IFQ_DRV_IS_EMPTY(&ifp1->if_snd)) 2507 sk_start_locked(ifp1); 2508 2509 SK_UNLOCK(sc); 2510 2511 return; 2512 } 2513 2514 static void 2515 sk_init_xmac(sc_if) 2516 struct sk_if_softc *sc_if; 2517 { 2518 struct sk_softc *sc; 2519 struct ifnet *ifp; 2520 struct sk_bcom_hack bhack[] = { 2521 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 }, 2522 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 }, 2523 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, 2524 { 0, 0 } }; 2525 2526 sc = sc_if->sk_softc; 2527 ifp = sc_if->sk_ifp; 2528 2529 /* Unreset the XMAC. */ 2530 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET); 2531 DELAY(1000); 2532 2533 /* Reset the XMAC's internal state. */ 2534 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); 2535 2536 /* Save the XMAC II revision */ 2537 sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID)); 2538 2539 /* 2540 * Perform additional initialization for external PHYs, 2541 * namely for the 1000baseTX cards that use the XMAC's 2542 * GMII mode. 2543 */ 2544 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 2545 int i = 0; 2546 u_int32_t val; 2547 2548 /* Take PHY out of reset. */ 2549 val = sk_win_read_4(sc, SK_GPIO); 2550 if (sc_if->sk_port == SK_PORT_A) 2551 val |= SK_GPIO_DIR0|SK_GPIO_DAT0; 2552 else 2553 val |= SK_GPIO_DIR2|SK_GPIO_DAT2; 2554 sk_win_write_4(sc, SK_GPIO, val); 2555 2556 /* Enable GMII mode on the XMAC. */ 2557 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE); 2558 2559 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 2560 BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET); 2561 DELAY(10000); 2562 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 2563 BRGPHY_MII_IMR, 0xFFF0); 2564 2565 /* 2566 * Early versions of the BCM5400 apparently have 2567 * a bug that requires them to have their reserved 2568 * registers initialized to some magic values. I don't 2569 * know what the numbers do, I'm just the messenger. 2570 */ 2571 if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03) 2572 == 0x6041) { 2573 while(bhack[i].reg) { 2574 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 2575 bhack[i].reg, bhack[i].val); 2576 i++; 2577 } 2578 } 2579 } 2580 2581 /* Set station address */ 2582 SK_XM_WRITE_2(sc_if, XM_PAR0, 2583 *(u_int16_t *)(&IFP2ENADDR(sc_if->sk_ifp)[0])); 2584 SK_XM_WRITE_2(sc_if, XM_PAR1, 2585 *(u_int16_t *)(&IFP2ENADDR(sc_if->sk_ifp)[2])); 2586 SK_XM_WRITE_2(sc_if, XM_PAR2, 2587 *(u_int16_t *)(&IFP2ENADDR(sc_if->sk_ifp)[4])); 2588 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION); 2589 2590 if (ifp->if_flags & IFF_BROADCAST) { 2591 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); 2592 } else { 2593 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); 2594 } 2595 2596 /* We don't need the FCS appended to the packet. */ 2597 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS); 2598 2599 /* We want short frames padded to 60 bytes. */ 2600 SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD); 2601 2602 /* 2603 * Enable the reception of all error frames. This is is 2604 * a necessary evil due to the design of the XMAC. The 2605 * XMAC's receive FIFO is only 8K in size, however jumbo 2606 * frames can be up to 9000 bytes in length. When bad 2607 * frame filtering is enabled, the XMAC's RX FIFO operates 2608 * in 'store and forward' mode. For this to work, the 2609 * entire frame has to fit into the FIFO, but that means 2610 * that jumbo frames larger than 8192 bytes will be 2611 * truncated. Disabling all bad frame filtering causes 2612 * the RX FIFO to operate in streaming mode, in which 2613 * case the XMAC will start transfering frames out of the 2614 * RX FIFO as soon as the FIFO threshold is reached. 2615 */ 2616 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES| 2617 XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS| 2618 XM_MODE_RX_INRANGELEN); 2619 2620 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 2621 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); 2622 else 2623 SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); 2624 2625 /* 2626 * Bump up the transmit threshold. This helps hold off transmit 2627 * underruns when we're blasting traffic from both ports at once. 2628 */ 2629 SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH); 2630 2631 /* Set promiscuous mode */ 2632 sk_setpromisc(sc_if); 2633 2634 /* Set multicast filter */ 2635 sk_setmulti(sc_if); 2636 2637 /* Clear and enable interrupts */ 2638 SK_XM_READ_2(sc_if, XM_ISR); 2639 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) 2640 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS); 2641 else 2642 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); 2643 2644 /* Configure MAC arbiter */ 2645 switch(sc_if->sk_xmac_rev) { 2646 case XM_XMAC_REV_B2: 2647 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2); 2648 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2); 2649 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2); 2650 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2); 2651 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2); 2652 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2); 2653 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2); 2654 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2); 2655 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); 2656 break; 2657 case XM_XMAC_REV_C1: 2658 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1); 2659 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1); 2660 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1); 2661 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1); 2662 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1); 2663 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1); 2664 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1); 2665 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1); 2666 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); 2667 break; 2668 default: 2669 break; 2670 } 2671 sk_win_write_2(sc, SK_MACARB_CTL, 2672 SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF); 2673 2674 sc_if->sk_link = 1; 2675 2676 return; 2677 } 2678 2679 static void 2680 sk_init_yukon(sc_if) 2681 struct sk_if_softc *sc_if; 2682 { 2683 u_int32_t phy; 2684 u_int16_t reg; 2685 struct sk_softc *sc; 2686 struct ifnet *ifp; 2687 int i; 2688 2689 sc = sc_if->sk_softc; 2690 ifp = sc_if->sk_ifp; 2691 2692 if (sc->sk_type == SK_YUKON_LITE && 2693 sc->sk_rev >= SK_YUKON_LITE_REV_A3) { 2694 /* Take PHY out of reset. */ 2695 sk_win_write_4(sc, SK_GPIO, 2696 (sk_win_read_4(sc, SK_GPIO) | SK_GPIO_DIR9) & ~SK_GPIO_DAT9); 2697 } 2698 2699 /* GMAC and GPHY Reset */ 2700 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET); 2701 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); 2702 DELAY(1000); 2703 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_CLEAR); 2704 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); 2705 DELAY(1000); 2706 2707 phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP | 2708 SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE; 2709 2710 switch(sc_if->sk_softc->sk_pmd) { 2711 case IFM_1000_SX: 2712 case IFM_1000_LX: 2713 phy |= SK_GPHY_FIBER; 2714 break; 2715 2716 case IFM_1000_CX: 2717 case IFM_1000_T: 2718 phy |= SK_GPHY_COPPER; 2719 break; 2720 } 2721 2722 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET); 2723 DELAY(1000); 2724 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR); 2725 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF | 2726 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR); 2727 2728 /* unused read of the interrupt source register */ 2729 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); 2730 2731 reg = SK_YU_READ_2(sc_if, YUKON_PAR); 2732 2733 /* MIB Counter Clear Mode set */ 2734 reg |= YU_PAR_MIB_CLR; 2735 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 2736 2737 /* MIB Counter Clear Mode clear */ 2738 reg &= ~YU_PAR_MIB_CLR; 2739 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 2740 2741 /* receive control reg */ 2742 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR); 2743 2744 /* transmit parameter register */ 2745 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) | 2746 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) ); 2747 2748 /* serial mode register */ 2749 reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e); 2750 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 2751 reg |= YU_SMR_MFL_JUMBO; 2752 SK_YU_WRITE_2(sc_if, YUKON_SMR, reg); 2753 2754 /* Setup Yukon's address */ 2755 for (i = 0; i < 3; i++) { 2756 /* Write Source Address 1 (unicast filter) */ 2757 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4, 2758 IFP2ENADDR(sc_if->sk_ifp)[i * 2] | 2759 IFP2ENADDR(sc_if->sk_ifp)[i * 2 + 1] << 8); 2760 } 2761 2762 for (i = 0; i < 3; i++) { 2763 reg = sk_win_read_2(sc_if->sk_softc, 2764 SK_MAC1_0 + i * 2 + sc_if->sk_port * 8); 2765 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg); 2766 } 2767 2768 /* Set promiscuous mode */ 2769 sk_setpromisc(sc_if); 2770 2771 /* Set multicast filter */ 2772 sk_setmulti(sc_if); 2773 2774 /* enable interrupt mask for counter overflows */ 2775 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0); 2776 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0); 2777 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0); 2778 2779 /* Configure RX MAC FIFO */ 2780 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR); 2781 SK_IF_WRITE_4(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON); 2782 2783 /* Configure TX MAC FIFO */ 2784 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR); 2785 SK_IF_WRITE_4(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON); 2786 } 2787 2788 /* 2789 * Note that to properly initialize any part of the GEnesis chip, 2790 * you first have to take it out of reset mode. 2791 */ 2792 static void 2793 sk_init(xsc) 2794 void *xsc; 2795 { 2796 struct sk_if_softc *sc_if = xsc; 2797 2798 SK_IF_LOCK(sc_if); 2799 sk_init_locked(sc_if); 2800 SK_IF_UNLOCK(sc_if); 2801 2802 return; 2803 } 2804 2805 static void 2806 sk_init_locked(sc_if) 2807 struct sk_if_softc *sc_if; 2808 { 2809 struct sk_softc *sc; 2810 struct ifnet *ifp; 2811 struct mii_data *mii; 2812 u_int16_t reg; 2813 u_int32_t imr; 2814 2815 SK_IF_LOCK_ASSERT(sc_if); 2816 2817 ifp = sc_if->sk_ifp; 2818 sc = sc_if->sk_softc; 2819 mii = device_get_softc(sc_if->sk_miibus); 2820 2821 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2822 return; 2823 2824 /* Cancel pending I/O and free all RX/TX buffers. */ 2825 sk_stop(sc_if); 2826 2827 if (sc->sk_type == SK_GENESIS) { 2828 /* Configure LINK_SYNC LED */ 2829 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON); 2830 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, 2831 SK_LINKLED_LINKSYNC_ON); 2832 2833 /* Configure RX LED */ 2834 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, 2835 SK_RXLEDCTL_COUNTER_START); 2836 2837 /* Configure TX LED */ 2838 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, 2839 SK_TXLEDCTL_COUNTER_START); 2840 } 2841 2842 /* Configure I2C registers */ 2843 2844 /* Configure XMAC(s) */ 2845 switch (sc->sk_type) { 2846 case SK_GENESIS: 2847 sk_init_xmac(sc_if); 2848 break; 2849 case SK_YUKON: 2850 case SK_YUKON_LITE: 2851 case SK_YUKON_LP: 2852 sk_init_yukon(sc_if); 2853 break; 2854 } 2855 mii_mediachg(mii); 2856 2857 if (sc->sk_type == SK_GENESIS) { 2858 /* Configure MAC FIFOs */ 2859 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET); 2860 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END); 2861 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON); 2862 2863 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET); 2864 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END); 2865 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON); 2866 } 2867 2868 /* Configure transmit arbiter(s) */ 2869 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, 2870 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON); 2871 2872 /* Configure RAMbuffers */ 2873 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET); 2874 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart); 2875 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart); 2876 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart); 2877 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend); 2878 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON); 2879 2880 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET); 2881 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON); 2882 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart); 2883 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart); 2884 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart); 2885 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend); 2886 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON); 2887 2888 /* Configure BMUs */ 2889 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE); 2890 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO, 2891 vtophys(&sc_if->sk_rdata->sk_rx_ring[0])); 2892 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0); 2893 2894 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE); 2895 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO, 2896 vtophys(&sc_if->sk_rdata->sk_tx_ring[0])); 2897 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0); 2898 2899 /* Init descriptors */ 2900 if (sk_init_rx_ring(sc_if) == ENOBUFS) { 2901 printf("sk%d: initialization failed: no " 2902 "memory for rx buffers\n", sc_if->sk_unit); 2903 sk_stop(sc_if); 2904 return; 2905 } 2906 sk_init_tx_ring(sc_if); 2907 2908 /* Set interrupt moderation if changed via sysctl. */ 2909 /* SK_LOCK(sc); */ 2910 imr = sk_win_read_4(sc, SK_IMTIMERINIT); 2911 if (imr != SK_IM_USECS(sc->sk_int_mod)) { 2912 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod)); 2913 if (bootverbose) 2914 printf("skc%d: interrupt moderation is %d us\n", 2915 sc->sk_unit, sc->sk_int_mod); 2916 } 2917 /* SK_UNLOCK(sc); */ 2918 2919 /* Configure interrupt handling */ 2920 CSR_READ_4(sc, SK_ISSR); 2921 if (sc_if->sk_port == SK_PORT_A) 2922 sc->sk_intrmask |= SK_INTRS1; 2923 else 2924 sc->sk_intrmask |= SK_INTRS2; 2925 2926 sc->sk_intrmask |= SK_ISR_EXTERNAL_REG; 2927 2928 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2929 2930 /* Start BMUs. */ 2931 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START); 2932 2933 switch(sc->sk_type) { 2934 case SK_GENESIS: 2935 /* Enable XMACs TX and RX state machines */ 2936 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE); 2937 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 2938 break; 2939 case SK_YUKON: 2940 case SK_YUKON_LITE: 2941 case SK_YUKON_LP: 2942 reg = SK_YU_READ_2(sc_if, YUKON_GPCR); 2943 reg |= YU_GPCR_TXEN | YU_GPCR_RXEN; 2944 reg &= ~(YU_GPCR_SPEED_EN | YU_GPCR_DPLX_EN); 2945 SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg); 2946 } 2947 2948 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2949 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2950 2951 return; 2952 } 2953 2954 static void 2955 sk_stop(sc_if) 2956 struct sk_if_softc *sc_if; 2957 { 2958 int i; 2959 struct sk_softc *sc; 2960 struct ifnet *ifp; 2961 2962 SK_IF_LOCK_ASSERT(sc_if); 2963 sc = sc_if->sk_softc; 2964 ifp = sc_if->sk_ifp; 2965 2966 untimeout(sk_tick, sc_if, sc_if->sk_tick_ch); 2967 2968 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 2969 u_int32_t val; 2970 2971 /* Put PHY back into reset. */ 2972 val = sk_win_read_4(sc, SK_GPIO); 2973 if (sc_if->sk_port == SK_PORT_A) { 2974 val |= SK_GPIO_DIR0; 2975 val &= ~SK_GPIO_DAT0; 2976 } else { 2977 val |= SK_GPIO_DIR2; 2978 val &= ~SK_GPIO_DAT2; 2979 } 2980 sk_win_write_4(sc, SK_GPIO, val); 2981 } 2982 2983 /* Turn off various components of this interface. */ 2984 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); 2985 switch (sc->sk_type) { 2986 case SK_GENESIS: 2987 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET); 2988 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET); 2989 break; 2990 case SK_YUKON: 2991 case SK_YUKON_LITE: 2992 case SK_YUKON_LP: 2993 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET); 2994 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET); 2995 break; 2996 } 2997 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE); 2998 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 2999 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE); 3000 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 3001 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF); 3002 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 3003 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 3004 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF); 3005 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF); 3006 3007 /* Disable interrupts */ 3008 if (sc_if->sk_port == SK_PORT_A) 3009 sc->sk_intrmask &= ~SK_INTRS1; 3010 else 3011 sc->sk_intrmask &= ~SK_INTRS2; 3012 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 3013 3014 SK_XM_READ_2(sc_if, XM_ISR); 3015 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); 3016 3017 /* Free RX and TX mbufs still in the queues. */ 3018 for (i = 0; i < SK_RX_RING_CNT; i++) { 3019 if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) { 3020 m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf); 3021 sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL; 3022 } 3023 } 3024 3025 for (i = 0; i < SK_TX_RING_CNT; i++) { 3026 if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) { 3027 m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf); 3028 sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL; 3029 } 3030 } 3031 3032 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE); 3033 3034 return; 3035 } 3036 3037 static int 3038 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 3039 { 3040 int error, value; 3041 3042 if (!arg1) 3043 return (EINVAL); 3044 value = *(int *)arg1; 3045 error = sysctl_handle_int(oidp, &value, 0, req); 3046 if (error || !req->newptr) 3047 return (error); 3048 if (value < low || value > high) 3049 return (EINVAL); 3050 *(int *)arg1 = value; 3051 return (0); 3052 } 3053 3054 static int 3055 sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS) 3056 { 3057 return (sysctl_int_range(oidp, arg1, arg2, req, SK_IM_MIN, SK_IM_MAX)); 3058 } 3059