1 /* $OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998, 1999, 2000 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 /*- 35 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 36 * 37 * Permission to use, copy, modify, and distribute this software for any 38 * purpose with or without fee is hereby granted, provided that the above 39 * copyright notice and this permission notice appear in all copies. 40 * 41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 48 */ 49 50 #include <sys/cdefs.h> 51 __FBSDID("$FreeBSD$"); 52 53 /* 54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports 55 * the SK-984x series adapters, both single port and dual port. 56 * References: 57 * The XaQti XMAC II datasheet, 58 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 59 * The SysKonnect GEnesis manual, http://www.syskonnect.com 60 * 61 * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the 62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a 63 * convenience to others until Vitesse corrects this problem: 64 * 65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 66 * 67 * Written by Bill Paul <wpaul@ee.columbia.edu> 68 * Department of Electrical Engineering 69 * Columbia University, New York City 70 */ 71 /* 72 * The SysKonnect gigabit ethernet adapters consist of two main 73 * components: the SysKonnect GEnesis controller chip and the XaQti Corp. 74 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC 75 * components and a PHY while the GEnesis controller provides a PCI 76 * interface with DMA support. Each card may have between 512K and 77 * 2MB of SRAM on board depending on the configuration. 78 * 79 * The SysKonnect GEnesis controller can have either one or two XMAC 80 * chips connected to it, allowing single or dual port NIC configurations. 81 * SysKonnect has the distinction of being the only vendor on the market 82 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs, 83 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the 84 * XMAC registers. This driver takes advantage of these features to allow 85 * both XMACs to operate as independent interfaces. 86 */ 87 88 #include <sys/param.h> 89 #include <sys/systm.h> 90 #include <sys/sockio.h> 91 #include <sys/mbuf.h> 92 #include <sys/malloc.h> 93 #include <sys/kernel.h> 94 #include <sys/module.h> 95 #include <sys/socket.h> 96 #include <sys/queue.h> 97 #include <sys/sysctl.h> 98 99 #include <net/if.h> 100 #include <net/if_arp.h> 101 #include <net/ethernet.h> 102 #include <net/if_dl.h> 103 #include <net/if_media.h> 104 105 #include <net/bpf.h> 106 107 #include <vm/vm.h> /* for vtophys */ 108 #include <vm/pmap.h> /* for vtophys */ 109 #include <machine/bus_pio.h> 110 #include <machine/bus_memio.h> 111 #include <machine/bus.h> 112 #include <machine/resource.h> 113 #include <sys/bus.h> 114 #include <sys/rman.h> 115 116 #include <dev/mii/mii.h> 117 #include <dev/mii/miivar.h> 118 #include <dev/mii/brgphyreg.h> 119 120 #include <dev/pci/pcireg.h> 121 #include <dev/pci/pcivar.h> 122 123 #if 0 124 #define SK_USEIOSPACE 125 #endif 126 127 #include <pci/if_skreg.h> 128 #include <pci/xmaciireg.h> 129 #include <pci/yukonreg.h> 130 131 MODULE_DEPEND(sk, pci, 1, 1, 1); 132 MODULE_DEPEND(sk, ether, 1, 1, 1); 133 MODULE_DEPEND(sk, miibus, 1, 1, 1); 134 135 /* "controller miibus0" required. See GENERIC if you get errors here. */ 136 #include "miibus_if.h" 137 138 #ifndef lint 139 static const char rcsid[] = 140 "$FreeBSD$"; 141 #endif 142 143 static struct sk_type sk_devs[] = { 144 { 145 VENDORID_SK, 146 DEVICEID_SK_V1, 147 "SysKonnect Gigabit Ethernet (V1.0)" 148 }, 149 { 150 VENDORID_SK, 151 DEVICEID_SK_V2, 152 "SysKonnect Gigabit Ethernet (V2.0)" 153 }, 154 { 155 VENDORID_MARVELL, 156 DEVICEID_SK_V2, 157 "Marvell Gigabit Ethernet" 158 }, 159 { 160 VENDORID_MARVELL, 161 DEVICEID_BELKIN_5005, 162 "Belkin F5D5005 Gigabit Ethernet" 163 }, 164 { 165 VENDORID_3COM, 166 DEVICEID_3COM_3C940, 167 "3Com 3C940 Gigabit Ethernet" 168 }, 169 { 170 VENDORID_LINKSYS, 171 DEVICEID_LINKSYS_EG1032, 172 "Linksys EG1032 Gigabit Ethernet" 173 }, 174 { 175 VENDORID_DLINK, 176 DEVICEID_DLINK_DGE530T, 177 "D-Link DGE-530T Gigabit Ethernet" 178 }, 179 { 0, 0, NULL } 180 }; 181 182 static int skc_probe(device_t); 183 static int skc_attach(device_t); 184 static int skc_detach(device_t); 185 static void skc_shutdown(device_t); 186 static int sk_detach(device_t); 187 static int sk_probe(device_t); 188 static int sk_attach(device_t); 189 static void sk_tick(void *); 190 static void sk_intr(void *); 191 static void sk_intr_xmac(struct sk_if_softc *); 192 static void sk_intr_bcom(struct sk_if_softc *); 193 static void sk_intr_yukon(struct sk_if_softc *); 194 static void sk_rxeof(struct sk_if_softc *); 195 static void sk_txeof(struct sk_if_softc *); 196 static int sk_encap(struct sk_if_softc *, struct mbuf *, 197 u_int32_t *); 198 static void sk_start(struct ifnet *); 199 static int sk_ioctl(struct ifnet *, u_long, caddr_t); 200 static void sk_init(void *); 201 static void sk_init_xmac(struct sk_if_softc *); 202 static void sk_init_yukon(struct sk_if_softc *); 203 static void sk_stop(struct sk_if_softc *); 204 static void sk_watchdog(struct ifnet *); 205 static int sk_ifmedia_upd(struct ifnet *); 206 static void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *); 207 static void sk_reset(struct sk_softc *); 208 static int sk_newbuf(struct sk_if_softc *, 209 struct sk_chain *, struct mbuf *); 210 static int sk_alloc_jumbo_mem(struct sk_if_softc *); 211 static void sk_free_jumbo_mem(struct sk_if_softc *); 212 static void *sk_jalloc(struct sk_if_softc *); 213 static void sk_jfree(void *, void *); 214 static int sk_init_rx_ring(struct sk_if_softc *); 215 static void sk_init_tx_ring(struct sk_if_softc *); 216 static u_int32_t sk_win_read_4(struct sk_softc *, int); 217 static u_int16_t sk_win_read_2(struct sk_softc *, int); 218 static u_int8_t sk_win_read_1(struct sk_softc *, int); 219 static void sk_win_write_4(struct sk_softc *, int, u_int32_t); 220 static void sk_win_write_2(struct sk_softc *, int, u_int32_t); 221 static void sk_win_write_1(struct sk_softc *, int, u_int32_t); 222 static u_int8_t sk_vpd_readbyte(struct sk_softc *, int); 223 static void sk_vpd_read_res(struct sk_softc *, struct vpd_res *, int); 224 static void sk_vpd_read(struct sk_softc *); 225 226 static int sk_miibus_readreg(device_t, int, int); 227 static int sk_miibus_writereg(device_t, int, int, int); 228 static void sk_miibus_statchg(device_t); 229 230 static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int); 231 static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int, 232 int); 233 static void sk_xmac_miibus_statchg(struct sk_if_softc *); 234 235 static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int); 236 static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int, 237 int); 238 static void sk_marv_miibus_statchg(struct sk_if_softc *); 239 240 static uint32_t sk_xmchash(const uint8_t *); 241 static uint32_t sk_gmchash(const uint8_t *); 242 static void sk_setfilt(struct sk_if_softc *, caddr_t, int); 243 static void sk_setmulti(struct sk_if_softc *); 244 static void sk_setpromisc(struct sk_if_softc *); 245 246 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high); 247 static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS); 248 249 #ifdef SK_USEIOSPACE 250 #define SK_RES SYS_RES_IOPORT 251 #define SK_RID SK_PCI_LOIO 252 #else 253 #define SK_RES SYS_RES_MEMORY 254 #define SK_RID SK_PCI_LOMEM 255 #endif 256 257 /* 258 * Note that we have newbus methods for both the GEnesis controller 259 * itself and the XMAC(s). The XMACs are children of the GEnesis, and 260 * the miibus code is a child of the XMACs. We need to do it this way 261 * so that the miibus drivers can access the PHY registers on the 262 * right PHY. It's not quite what I had in mind, but it's the only 263 * design that achieves the desired effect. 264 */ 265 static device_method_t skc_methods[] = { 266 /* Device interface */ 267 DEVMETHOD(device_probe, skc_probe), 268 DEVMETHOD(device_attach, skc_attach), 269 DEVMETHOD(device_detach, skc_detach), 270 DEVMETHOD(device_shutdown, skc_shutdown), 271 272 /* bus interface */ 273 DEVMETHOD(bus_print_child, bus_generic_print_child), 274 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 275 276 { 0, 0 } 277 }; 278 279 static driver_t skc_driver = { 280 "skc", 281 skc_methods, 282 sizeof(struct sk_softc) 283 }; 284 285 static devclass_t skc_devclass; 286 287 static device_method_t sk_methods[] = { 288 /* Device interface */ 289 DEVMETHOD(device_probe, sk_probe), 290 DEVMETHOD(device_attach, sk_attach), 291 DEVMETHOD(device_detach, sk_detach), 292 DEVMETHOD(device_shutdown, bus_generic_shutdown), 293 294 /* bus interface */ 295 DEVMETHOD(bus_print_child, bus_generic_print_child), 296 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 297 298 /* MII interface */ 299 DEVMETHOD(miibus_readreg, sk_miibus_readreg), 300 DEVMETHOD(miibus_writereg, sk_miibus_writereg), 301 DEVMETHOD(miibus_statchg, sk_miibus_statchg), 302 303 { 0, 0 } 304 }; 305 306 static driver_t sk_driver = { 307 "sk", 308 sk_methods, 309 sizeof(struct sk_if_softc) 310 }; 311 312 static devclass_t sk_devclass; 313 314 DRIVER_MODULE(sk, pci, skc_driver, skc_devclass, 0, 0); 315 DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0); 316 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0); 317 318 #define SK_SETBIT(sc, reg, x) \ 319 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x) 320 321 #define SK_CLRBIT(sc, reg, x) \ 322 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x) 323 324 #define SK_WIN_SETBIT_4(sc, reg, x) \ 325 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x) 326 327 #define SK_WIN_CLRBIT_4(sc, reg, x) \ 328 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x) 329 330 #define SK_WIN_SETBIT_2(sc, reg, x) \ 331 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x) 332 333 #define SK_WIN_CLRBIT_2(sc, reg, x) \ 334 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x) 335 336 static u_int32_t 337 sk_win_read_4(sc, reg) 338 struct sk_softc *sc; 339 int reg; 340 { 341 #ifdef SK_USEIOSPACE 342 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 343 return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg))); 344 #else 345 return(CSR_READ_4(sc, reg)); 346 #endif 347 } 348 349 static u_int16_t 350 sk_win_read_2(sc, reg) 351 struct sk_softc *sc; 352 int reg; 353 { 354 #ifdef SK_USEIOSPACE 355 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 356 return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg))); 357 #else 358 return(CSR_READ_2(sc, reg)); 359 #endif 360 } 361 362 static u_int8_t 363 sk_win_read_1(sc, reg) 364 struct sk_softc *sc; 365 int reg; 366 { 367 #ifdef SK_USEIOSPACE 368 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 369 return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg))); 370 #else 371 return(CSR_READ_1(sc, reg)); 372 #endif 373 } 374 375 static void 376 sk_win_write_4(sc, reg, val) 377 struct sk_softc *sc; 378 int reg; 379 u_int32_t val; 380 { 381 #ifdef SK_USEIOSPACE 382 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 383 CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val); 384 #else 385 CSR_WRITE_4(sc, reg, val); 386 #endif 387 return; 388 } 389 390 static void 391 sk_win_write_2(sc, reg, val) 392 struct sk_softc *sc; 393 int reg; 394 u_int32_t val; 395 { 396 #ifdef SK_USEIOSPACE 397 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 398 CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val); 399 #else 400 CSR_WRITE_2(sc, reg, val); 401 #endif 402 return; 403 } 404 405 static void 406 sk_win_write_1(sc, reg, val) 407 struct sk_softc *sc; 408 int reg; 409 u_int32_t val; 410 { 411 #ifdef SK_USEIOSPACE 412 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 413 CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val); 414 #else 415 CSR_WRITE_1(sc, reg, val); 416 #endif 417 return; 418 } 419 420 /* 421 * The VPD EEPROM contains Vital Product Data, as suggested in 422 * the PCI 2.1 specification. The VPD data is separared into areas 423 * denoted by resource IDs. The SysKonnect VPD contains an ID string 424 * resource (the name of the adapter), a read-only area resource 425 * containing various key/data fields and a read/write area which 426 * can be used to store asset management information or log messages. 427 * We read the ID string and read-only into buffers attached to 428 * the controller softc structure for later use. At the moment, 429 * we only use the ID string during skc_attach(). 430 */ 431 static u_int8_t 432 sk_vpd_readbyte(sc, addr) 433 struct sk_softc *sc; 434 int addr; 435 { 436 int i; 437 438 sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr); 439 for (i = 0; i < SK_TIMEOUT; i++) { 440 DELAY(1); 441 if (sk_win_read_2(sc, 442 SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG) 443 break; 444 } 445 446 if (i == SK_TIMEOUT) 447 return(0); 448 449 return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA))); 450 } 451 452 static void 453 sk_vpd_read_res(sc, res, addr) 454 struct sk_softc *sc; 455 struct vpd_res *res; 456 int addr; 457 { 458 int i; 459 u_int8_t *ptr; 460 461 ptr = (u_int8_t *)res; 462 for (i = 0; i < sizeof(struct vpd_res); i++) 463 ptr[i] = sk_vpd_readbyte(sc, i + addr); 464 465 return; 466 } 467 468 static void 469 sk_vpd_read(sc) 470 struct sk_softc *sc; 471 { 472 int pos = 0, i; 473 struct vpd_res res; 474 475 if (sc->sk_vpd_prodname != NULL) 476 free(sc->sk_vpd_prodname, M_DEVBUF); 477 if (sc->sk_vpd_readonly != NULL) 478 free(sc->sk_vpd_readonly, M_DEVBUF); 479 sc->sk_vpd_prodname = NULL; 480 sc->sk_vpd_readonly = NULL; 481 sc->sk_vpd_readonly_len = 0; 482 483 sk_vpd_read_res(sc, &res, pos); 484 485 /* 486 * Bail out quietly if the eeprom appears to be missing or empty. 487 */ 488 if (res.vr_id == 0xff && res.vr_len == 0xff && res.vr_pad == 0xff) 489 return; 490 491 if (res.vr_id != VPD_RES_ID) { 492 printf("skc%d: bad VPD resource id: expected %x got %x\n", 493 sc->sk_unit, VPD_RES_ID, res.vr_id); 494 return; 495 } 496 497 pos += sizeof(res); 498 sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 499 for (i = 0; i < res.vr_len; i++) 500 sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos); 501 sc->sk_vpd_prodname[i] = '\0'; 502 pos += i; 503 504 sk_vpd_read_res(sc, &res, pos); 505 506 if (res.vr_id != VPD_RES_READ) { 507 printf("skc%d: bad VPD resource id: expected %x got %x\n", 508 sc->sk_unit, VPD_RES_READ, res.vr_id); 509 return; 510 } 511 512 pos += sizeof(res); 513 sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 514 for (i = 0; i < res.vr_len; i++) 515 sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos); 516 sc->sk_vpd_readonly_len = res.vr_len; 517 518 return; 519 } 520 521 static int 522 sk_miibus_readreg(dev, phy, reg) 523 device_t dev; 524 int phy, reg; 525 { 526 struct sk_if_softc *sc_if; 527 528 sc_if = device_get_softc(dev); 529 530 switch(sc_if->sk_softc->sk_type) { 531 case SK_GENESIS: 532 return(sk_xmac_miibus_readreg(sc_if, phy, reg)); 533 case SK_YUKON: 534 case SK_YUKON_LITE: 535 case SK_YUKON_LP: 536 return(sk_marv_miibus_readreg(sc_if, phy, reg)); 537 } 538 539 return(0); 540 } 541 542 static int 543 sk_miibus_writereg(dev, phy, reg, val) 544 device_t dev; 545 int phy, reg, val; 546 { 547 struct sk_if_softc *sc_if; 548 549 sc_if = device_get_softc(dev); 550 551 switch(sc_if->sk_softc->sk_type) { 552 case SK_GENESIS: 553 return(sk_xmac_miibus_writereg(sc_if, phy, reg, val)); 554 case SK_YUKON: 555 case SK_YUKON_LITE: 556 case SK_YUKON_LP: 557 return(sk_marv_miibus_writereg(sc_if, phy, reg, val)); 558 } 559 560 return(0); 561 } 562 563 static void 564 sk_miibus_statchg(dev) 565 device_t dev; 566 { 567 struct sk_if_softc *sc_if; 568 569 sc_if = device_get_softc(dev); 570 571 switch(sc_if->sk_softc->sk_type) { 572 case SK_GENESIS: 573 sk_xmac_miibus_statchg(sc_if); 574 break; 575 case SK_YUKON: 576 case SK_YUKON_LITE: 577 case SK_YUKON_LP: 578 sk_marv_miibus_statchg(sc_if); 579 break; 580 } 581 582 return; 583 } 584 585 static int 586 sk_xmac_miibus_readreg(sc_if, phy, reg) 587 struct sk_if_softc *sc_if; 588 int phy, reg; 589 { 590 int i; 591 592 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0) 593 return(0); 594 595 SK_IF_LOCK(sc_if); 596 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); 597 SK_XM_READ_2(sc_if, XM_PHY_DATA); 598 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { 599 for (i = 0; i < SK_TIMEOUT; i++) { 600 DELAY(1); 601 if (SK_XM_READ_2(sc_if, XM_MMUCMD) & 602 XM_MMUCMD_PHYDATARDY) 603 break; 604 } 605 606 if (i == SK_TIMEOUT) { 607 printf("sk%d: phy failed to come ready\n", 608 sc_if->sk_unit); 609 SK_IF_UNLOCK(sc_if); 610 return(0); 611 } 612 } 613 DELAY(1); 614 i = SK_XM_READ_2(sc_if, XM_PHY_DATA); 615 SK_IF_UNLOCK(sc_if); 616 return(i); 617 } 618 619 static int 620 sk_xmac_miibus_writereg(sc_if, phy, reg, val) 621 struct sk_if_softc *sc_if; 622 int phy, reg, val; 623 { 624 int i; 625 626 SK_IF_LOCK(sc_if); 627 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); 628 for (i = 0; i < SK_TIMEOUT; i++) { 629 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) 630 break; 631 } 632 633 if (i == SK_TIMEOUT) { 634 printf("sk%d: phy failed to come ready\n", sc_if->sk_unit); 635 SK_IF_UNLOCK(sc_if); 636 return(ETIMEDOUT); 637 } 638 639 SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val); 640 for (i = 0; i < SK_TIMEOUT; i++) { 641 DELAY(1); 642 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) 643 break; 644 } 645 SK_IF_UNLOCK(sc_if); 646 if (i == SK_TIMEOUT) 647 printf("sk%d: phy write timed out\n", sc_if->sk_unit); 648 649 return(0); 650 } 651 652 static void 653 sk_xmac_miibus_statchg(sc_if) 654 struct sk_if_softc *sc_if; 655 { 656 struct mii_data *mii; 657 658 mii = device_get_softc(sc_if->sk_miibus); 659 660 SK_IF_LOCK(sc_if); 661 /* 662 * If this is a GMII PHY, manually set the XMAC's 663 * duplex mode accordingly. 664 */ 665 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { 666 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 667 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); 668 } else { 669 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); 670 } 671 } 672 SK_IF_UNLOCK(sc_if); 673 674 return; 675 } 676 677 static int 678 sk_marv_miibus_readreg(sc_if, phy, reg) 679 struct sk_if_softc *sc_if; 680 int phy, reg; 681 { 682 u_int16_t val; 683 int i; 684 685 if (phy != 0 || 686 (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER && 687 sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) { 688 return(0); 689 } 690 691 SK_IF_LOCK(sc_if); 692 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 693 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ); 694 695 for (i = 0; i < SK_TIMEOUT; i++) { 696 DELAY(1); 697 val = SK_YU_READ_2(sc_if, YUKON_SMICR); 698 if (val & YU_SMICR_READ_VALID) 699 break; 700 } 701 702 if (i == SK_TIMEOUT) { 703 printf("sk%d: phy failed to come ready\n", 704 sc_if->sk_unit); 705 SK_IF_UNLOCK(sc_if); 706 return(0); 707 } 708 709 val = SK_YU_READ_2(sc_if, YUKON_SMIDR); 710 SK_IF_UNLOCK(sc_if); 711 712 return(val); 713 } 714 715 static int 716 sk_marv_miibus_writereg(sc_if, phy, reg, val) 717 struct sk_if_softc *sc_if; 718 int phy, reg, val; 719 { 720 int i; 721 722 SK_IF_LOCK(sc_if); 723 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val); 724 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 725 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE); 726 727 for (i = 0; i < SK_TIMEOUT; i++) { 728 DELAY(1); 729 if (SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY) 730 break; 731 } 732 SK_IF_UNLOCK(sc_if); 733 734 return(0); 735 } 736 737 static void 738 sk_marv_miibus_statchg(sc_if) 739 struct sk_if_softc *sc_if; 740 { 741 return; 742 } 743 744 #define HASH_BITS 6 745 746 static u_int32_t 747 sk_xmchash(addr) 748 const uint8_t *addr; 749 { 750 uint32_t crc; 751 752 /* Compute CRC for the address value. */ 753 crc = ether_crc32_le(addr, ETHER_ADDR_LEN); 754 755 return (~crc & ((1 << HASH_BITS) - 1)); 756 } 757 758 /* gmchash is just a big endian crc */ 759 static u_int32_t 760 sk_gmchash(addr) 761 const uint8_t *addr; 762 { 763 uint32_t crc; 764 765 /* Compute CRC for the address value. */ 766 crc = ether_crc32_be(addr, ETHER_ADDR_LEN); 767 768 return (crc & ((1 << HASH_BITS) - 1)); 769 } 770 771 static void 772 sk_setfilt(sc_if, addr, slot) 773 struct sk_if_softc *sc_if; 774 caddr_t addr; 775 int slot; 776 { 777 int base; 778 779 base = XM_RXFILT_ENTRY(slot); 780 781 SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0])); 782 SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2])); 783 SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4])); 784 785 return; 786 } 787 788 static void 789 sk_setmulti(sc_if) 790 struct sk_if_softc *sc_if; 791 { 792 struct sk_softc *sc = sc_if->sk_softc; 793 struct ifnet *ifp = &sc_if->arpcom.ac_if; 794 u_int32_t hashes[2] = { 0, 0 }; 795 int h = 0, i; 796 struct ifmultiaddr *ifma; 797 u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 }; 798 799 800 /* First, zot all the existing filters. */ 801 switch(sc->sk_type) { 802 case SK_GENESIS: 803 for (i = 1; i < XM_RXFILT_MAX; i++) 804 sk_setfilt(sc_if, (caddr_t)&dummy, i); 805 806 SK_XM_WRITE_4(sc_if, XM_MAR0, 0); 807 SK_XM_WRITE_4(sc_if, XM_MAR2, 0); 808 break; 809 case SK_YUKON: 810 case SK_YUKON_LITE: 811 case SK_YUKON_LP: 812 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0); 813 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0); 814 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0); 815 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0); 816 break; 817 } 818 819 /* Now program new ones. */ 820 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 821 hashes[0] = 0xFFFFFFFF; 822 hashes[1] = 0xFFFFFFFF; 823 } else { 824 i = 1; 825 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) { 826 if (ifma->ifma_addr->sa_family != AF_LINK) 827 continue; 828 /* 829 * Program the first XM_RXFILT_MAX multicast groups 830 * into the perfect filter. For all others, 831 * use the hash table. 832 */ 833 if (sc->sk_type == SK_GENESIS && i < XM_RXFILT_MAX) { 834 sk_setfilt(sc_if, 835 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i); 836 i++; 837 continue; 838 } 839 840 switch(sc->sk_type) { 841 case SK_GENESIS: 842 h = sk_xmchash( 843 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 844 break; 845 case SK_YUKON: 846 case SK_YUKON_LITE: 847 case SK_YUKON_LP: 848 h = sk_gmchash( 849 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 850 break; 851 } 852 if (h < 32) 853 hashes[0] |= (1 << h); 854 else 855 hashes[1] |= (1 << (h - 32)); 856 } 857 } 858 859 switch(sc->sk_type) { 860 case SK_GENESIS: 861 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH| 862 XM_MODE_RX_USE_PERFECT); 863 SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]); 864 SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]); 865 break; 866 case SK_YUKON: 867 case SK_YUKON_LITE: 868 case SK_YUKON_LP: 869 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff); 870 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff); 871 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff); 872 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff); 873 break; 874 } 875 876 return; 877 } 878 879 static void 880 sk_setpromisc(sc_if) 881 struct sk_if_softc *sc_if; 882 { 883 struct sk_softc *sc = sc_if->sk_softc; 884 struct ifnet *ifp = &sc_if->arpcom.ac_if; 885 886 switch(sc->sk_type) { 887 case SK_GENESIS: 888 if (ifp->if_flags & IFF_PROMISC) { 889 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); 890 } else { 891 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); 892 } 893 break; 894 case SK_YUKON: 895 case SK_YUKON_LITE: 896 case SK_YUKON_LP: 897 if (ifp->if_flags & IFF_PROMISC) { 898 SK_YU_CLRBIT_2(sc_if, YUKON_RCR, 899 YU_RCR_UFLEN | YU_RCR_MUFLEN); 900 } else { 901 SK_YU_SETBIT_2(sc_if, YUKON_RCR, 902 YU_RCR_UFLEN | YU_RCR_MUFLEN); 903 } 904 break; 905 } 906 907 return; 908 } 909 910 static int 911 sk_init_rx_ring(sc_if) 912 struct sk_if_softc *sc_if; 913 { 914 struct sk_chain_data *cd = &sc_if->sk_cdata; 915 struct sk_ring_data *rd = sc_if->sk_rdata; 916 int i; 917 918 bzero((char *)rd->sk_rx_ring, 919 sizeof(struct sk_rx_desc) * SK_RX_RING_CNT); 920 921 for (i = 0; i < SK_RX_RING_CNT; i++) { 922 cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i]; 923 if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS) 924 return(ENOBUFS); 925 if (i == (SK_RX_RING_CNT - 1)) { 926 cd->sk_rx_chain[i].sk_next = 927 &cd->sk_rx_chain[0]; 928 rd->sk_rx_ring[i].sk_next = 929 vtophys(&rd->sk_rx_ring[0]); 930 } else { 931 cd->sk_rx_chain[i].sk_next = 932 &cd->sk_rx_chain[i + 1]; 933 rd->sk_rx_ring[i].sk_next = 934 vtophys(&rd->sk_rx_ring[i + 1]); 935 } 936 } 937 938 sc_if->sk_cdata.sk_rx_prod = 0; 939 sc_if->sk_cdata.sk_rx_cons = 0; 940 941 return(0); 942 } 943 944 static void 945 sk_init_tx_ring(sc_if) 946 struct sk_if_softc *sc_if; 947 { 948 struct sk_chain_data *cd = &sc_if->sk_cdata; 949 struct sk_ring_data *rd = sc_if->sk_rdata; 950 int i; 951 952 bzero((char *)sc_if->sk_rdata->sk_tx_ring, 953 sizeof(struct sk_tx_desc) * SK_TX_RING_CNT); 954 955 for (i = 0; i < SK_TX_RING_CNT; i++) { 956 cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i]; 957 if (i == (SK_TX_RING_CNT - 1)) { 958 cd->sk_tx_chain[i].sk_next = 959 &cd->sk_tx_chain[0]; 960 rd->sk_tx_ring[i].sk_next = 961 vtophys(&rd->sk_tx_ring[0]); 962 } else { 963 cd->sk_tx_chain[i].sk_next = 964 &cd->sk_tx_chain[i + 1]; 965 rd->sk_tx_ring[i].sk_next = 966 vtophys(&rd->sk_tx_ring[i + 1]); 967 } 968 } 969 970 sc_if->sk_cdata.sk_tx_prod = 0; 971 sc_if->sk_cdata.sk_tx_cons = 0; 972 sc_if->sk_cdata.sk_tx_cnt = 0; 973 974 return; 975 } 976 977 static int 978 sk_newbuf(sc_if, c, m) 979 struct sk_if_softc *sc_if; 980 struct sk_chain *c; 981 struct mbuf *m; 982 { 983 struct mbuf *m_new = NULL; 984 struct sk_rx_desc *r; 985 986 if (m == NULL) { 987 caddr_t *buf = NULL; 988 989 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 990 if (m_new == NULL) 991 return(ENOBUFS); 992 993 /* Allocate the jumbo buffer */ 994 buf = sk_jalloc(sc_if); 995 if (buf == NULL) { 996 m_freem(m_new); 997 #ifdef SK_VERBOSE 998 printf("sk%d: jumbo allocation failed " 999 "-- packet dropped!\n", sc_if->sk_unit); 1000 #endif 1001 return(ENOBUFS); 1002 } 1003 1004 /* Attach the buffer to the mbuf */ 1005 MEXTADD(m_new, buf, SK_JLEN, sk_jfree, 1006 (struct sk_if_softc *)sc_if, 0, EXT_NET_DRV); 1007 m_new->m_data = (void *)buf; 1008 m_new->m_pkthdr.len = m_new->m_len = SK_JLEN; 1009 } else { 1010 /* 1011 * We're re-using a previously allocated mbuf; 1012 * be sure to re-init pointers and lengths to 1013 * default values. 1014 */ 1015 m_new = m; 1016 m_new->m_len = m_new->m_pkthdr.len = SK_JLEN; 1017 m_new->m_data = m_new->m_ext.ext_buf; 1018 } 1019 1020 /* 1021 * Adjust alignment so packet payload begins on a 1022 * longword boundary. Mandatory for Alpha, useful on 1023 * x86 too. 1024 */ 1025 m_adj(m_new, ETHER_ALIGN); 1026 1027 r = c->sk_desc; 1028 c->sk_mbuf = m_new; 1029 r->sk_data_lo = vtophys(mtod(m_new, caddr_t)); 1030 r->sk_ctl = m_new->m_len | SK_RXSTAT; 1031 1032 return(0); 1033 } 1034 1035 /* 1036 * Allocate jumbo buffer storage. The SysKonnect adapters support 1037 * "jumbograms" (9K frames), although SysKonnect doesn't currently 1038 * use them in their drivers. In order for us to use them, we need 1039 * large 9K receive buffers, however standard mbuf clusters are only 1040 * 2048 bytes in size. Consequently, we need to allocate and manage 1041 * our own jumbo buffer pool. Fortunately, this does not require an 1042 * excessive amount of additional code. 1043 */ 1044 static int 1045 sk_alloc_jumbo_mem(sc_if) 1046 struct sk_if_softc *sc_if; 1047 { 1048 caddr_t ptr; 1049 register int i; 1050 struct sk_jpool_entry *entry; 1051 1052 /* Grab a big chunk o' storage. */ 1053 sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF, 1054 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 1055 1056 if (sc_if->sk_cdata.sk_jumbo_buf == NULL) { 1057 printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit); 1058 return(ENOBUFS); 1059 } 1060 1061 mtx_init(&sc_if->sk_jlist_mtx, "sk_jlist_mtx", NULL, MTX_DEF); 1062 1063 SLIST_INIT(&sc_if->sk_jfree_listhead); 1064 SLIST_INIT(&sc_if->sk_jinuse_listhead); 1065 1066 /* 1067 * Now divide it up into 9K pieces and save the addresses 1068 * in an array. 1069 */ 1070 ptr = sc_if->sk_cdata.sk_jumbo_buf; 1071 for (i = 0; i < SK_JSLOTS; i++) { 1072 sc_if->sk_cdata.sk_jslots[i] = ptr; 1073 ptr += SK_JLEN; 1074 entry = malloc(sizeof(struct sk_jpool_entry), 1075 M_DEVBUF, M_NOWAIT); 1076 if (entry == NULL) { 1077 sk_free_jumbo_mem(sc_if); 1078 sc_if->sk_cdata.sk_jumbo_buf = NULL; 1079 printf("sk%d: no memory for jumbo " 1080 "buffer queue!\n", sc_if->sk_unit); 1081 return(ENOBUFS); 1082 } 1083 entry->slot = i; 1084 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, 1085 entry, jpool_entries); 1086 } 1087 1088 return(0); 1089 } 1090 1091 static void 1092 sk_free_jumbo_mem(sc_if) 1093 struct sk_if_softc *sc_if; 1094 { 1095 struct sk_jpool_entry *entry; 1096 1097 SK_JLIST_LOCK(sc_if); 1098 1099 /* We cannot release external mbuf storage while in use. */ 1100 if (!SLIST_EMPTY(&sc_if->sk_jinuse_listhead)) { 1101 printf("sk%d: will leak jumbo buffer memory!\n", sc_if->sk_unit); 1102 SK_JLIST_UNLOCK(sc_if); 1103 return; 1104 } 1105 1106 while (!SLIST_EMPTY(&sc_if->sk_jfree_listhead)) { 1107 entry = SLIST_FIRST(&sc_if->sk_jfree_listhead); 1108 SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries); 1109 free(entry, M_DEVBUF); 1110 } 1111 1112 SK_JLIST_UNLOCK(sc_if); 1113 1114 mtx_destroy(&sc_if->sk_jlist_mtx); 1115 1116 contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF); 1117 1118 return; 1119 } 1120 1121 /* 1122 * Allocate a jumbo buffer. 1123 */ 1124 static void * 1125 sk_jalloc(sc_if) 1126 struct sk_if_softc *sc_if; 1127 { 1128 struct sk_jpool_entry *entry; 1129 1130 SK_JLIST_LOCK(sc_if); 1131 1132 entry = SLIST_FIRST(&sc_if->sk_jfree_listhead); 1133 1134 if (entry == NULL) { 1135 #ifdef SK_VERBOSE 1136 printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit); 1137 #endif 1138 SK_JLIST_UNLOCK(sc_if); 1139 return(NULL); 1140 } 1141 1142 SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries); 1143 SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries); 1144 1145 SK_JLIST_UNLOCK(sc_if); 1146 1147 return(sc_if->sk_cdata.sk_jslots[entry->slot]); 1148 } 1149 1150 /* 1151 * Release a jumbo buffer. 1152 */ 1153 static void 1154 sk_jfree(buf, args) 1155 void *buf; 1156 void *args; 1157 { 1158 struct sk_if_softc *sc_if; 1159 int i; 1160 struct sk_jpool_entry *entry; 1161 1162 /* Extract the softc struct pointer. */ 1163 sc_if = (struct sk_if_softc *)args; 1164 if (sc_if == NULL) 1165 panic("sk_jfree: didn't get softc pointer!"); 1166 1167 SK_JLIST_LOCK(sc_if); 1168 1169 /* calculate the slot this buffer belongs to */ 1170 i = ((vm_offset_t)buf 1171 - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN; 1172 1173 if ((i < 0) || (i >= SK_JSLOTS)) 1174 panic("sk_jfree: asked to free buffer that we don't manage!"); 1175 1176 entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead); 1177 if (entry == NULL) 1178 panic("sk_jfree: buffer not in use!"); 1179 entry->slot = i; 1180 SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries); 1181 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries); 1182 if (SLIST_EMPTY(&sc_if->sk_jinuse_listhead)) 1183 wakeup(sc_if); 1184 1185 SK_JLIST_UNLOCK(sc_if); 1186 return; 1187 } 1188 1189 /* 1190 * Set media options. 1191 */ 1192 static int 1193 sk_ifmedia_upd(ifp) 1194 struct ifnet *ifp; 1195 { 1196 struct sk_if_softc *sc_if = ifp->if_softc; 1197 struct mii_data *mii; 1198 1199 mii = device_get_softc(sc_if->sk_miibus); 1200 sk_init(sc_if); 1201 mii_mediachg(mii); 1202 1203 return(0); 1204 } 1205 1206 /* 1207 * Report current media status. 1208 */ 1209 static void 1210 sk_ifmedia_sts(ifp, ifmr) 1211 struct ifnet *ifp; 1212 struct ifmediareq *ifmr; 1213 { 1214 struct sk_if_softc *sc_if; 1215 struct mii_data *mii; 1216 1217 sc_if = ifp->if_softc; 1218 mii = device_get_softc(sc_if->sk_miibus); 1219 1220 mii_pollstat(mii); 1221 ifmr->ifm_active = mii->mii_media_active; 1222 ifmr->ifm_status = mii->mii_media_status; 1223 1224 return; 1225 } 1226 1227 static int 1228 sk_ioctl(ifp, command, data) 1229 struct ifnet *ifp; 1230 u_long command; 1231 caddr_t data; 1232 { 1233 struct sk_if_softc *sc_if = ifp->if_softc; 1234 struct ifreq *ifr = (struct ifreq *) data; 1235 int error = 0; 1236 struct mii_data *mii; 1237 1238 switch(command) { 1239 case SIOCSIFMTU: 1240 if (ifr->ifr_mtu > SK_JUMBO_MTU) 1241 error = EINVAL; 1242 else { 1243 ifp->if_mtu = ifr->ifr_mtu; 1244 ifp->if_flags &= ~IFF_RUNNING; 1245 sk_init(sc_if); 1246 } 1247 break; 1248 case SIOCSIFFLAGS: 1249 SK_IF_LOCK(sc_if); 1250 if (ifp->if_flags & IFF_UP) { 1251 if (ifp->if_flags & IFF_RUNNING) { 1252 if ((ifp->if_flags ^ sc_if->sk_if_flags) 1253 & IFF_PROMISC) { 1254 sk_setpromisc(sc_if); 1255 sk_setmulti(sc_if); 1256 } 1257 } else 1258 sk_init(sc_if); 1259 } else { 1260 if (ifp->if_flags & IFF_RUNNING) 1261 sk_stop(sc_if); 1262 } 1263 sc_if->sk_if_flags = ifp->if_flags; 1264 SK_IF_UNLOCK(sc_if); 1265 error = 0; 1266 break; 1267 case SIOCADDMULTI: 1268 case SIOCDELMULTI: 1269 if (ifp->if_flags & IFF_RUNNING) { 1270 SK_IF_LOCK(sc_if); 1271 sk_setmulti(sc_if); 1272 SK_IF_UNLOCK(sc_if); 1273 error = 0; 1274 } 1275 break; 1276 case SIOCGIFMEDIA: 1277 case SIOCSIFMEDIA: 1278 mii = device_get_softc(sc_if->sk_miibus); 1279 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1280 break; 1281 default: 1282 error = ether_ioctl(ifp, command, data); 1283 break; 1284 } 1285 1286 return(error); 1287 } 1288 1289 /* 1290 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device 1291 * IDs against our list and return a device name if we find a match. 1292 */ 1293 static int 1294 skc_probe(dev) 1295 device_t dev; 1296 { 1297 struct sk_softc *sc; 1298 struct sk_type *t = sk_devs; 1299 1300 sc = device_get_softc(dev); 1301 1302 while(t->sk_name != NULL) { 1303 if ((pci_get_vendor(dev) == t->sk_vid) && 1304 (pci_get_device(dev) == t->sk_did)) { 1305 device_set_desc(dev, t->sk_name); 1306 return (BUS_PROBE_DEFAULT); 1307 } 1308 t++; 1309 } 1310 1311 return(ENXIO); 1312 } 1313 1314 /* 1315 * Force the GEnesis into reset, then bring it out of reset. 1316 */ 1317 static void 1318 sk_reset(sc) 1319 struct sk_softc *sc; 1320 { 1321 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET); 1322 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET); 1323 if (SK_YUKON_FAMILY(sc->sk_type)) 1324 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET); 1325 1326 DELAY(1000); 1327 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET); 1328 DELAY(2); 1329 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET); 1330 if (SK_YUKON_FAMILY(sc->sk_type)) 1331 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR); 1332 1333 if (sc->sk_type == SK_GENESIS) { 1334 /* Configure packet arbiter */ 1335 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET); 1336 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT); 1337 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT); 1338 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT); 1339 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT); 1340 } 1341 1342 /* Enable RAM interface */ 1343 sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET); 1344 1345 /* 1346 * Configure interrupt moderation. The moderation timer 1347 * defers interrupts specified in the interrupt moderation 1348 * timer mask based on the timeout specified in the interrupt 1349 * moderation timer init register. Each bit in the timer 1350 * register represents 18.825ns, so to specify a timeout in 1351 * microseconds, we have to multiply by 54. 1352 */ 1353 printf("skc%d: interrupt moderation is %d us\n", 1354 sc->sk_unit, sc->sk_int_mod); 1355 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod)); 1356 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF| 1357 SK_ISR_RX1_EOF|SK_ISR_RX2_EOF); 1358 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START); 1359 1360 return; 1361 } 1362 1363 static int 1364 sk_probe(dev) 1365 device_t dev; 1366 { 1367 struct sk_softc *sc; 1368 1369 sc = device_get_softc(device_get_parent(dev)); 1370 1371 /* 1372 * Not much to do here. We always know there will be 1373 * at least one XMAC present, and if there are two, 1374 * skc_attach() will create a second device instance 1375 * for us. 1376 */ 1377 switch (sc->sk_type) { 1378 case SK_GENESIS: 1379 device_set_desc(dev, "XaQti Corp. XMAC II"); 1380 break; 1381 case SK_YUKON: 1382 case SK_YUKON_LITE: 1383 case SK_YUKON_LP: 1384 device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon"); 1385 break; 1386 } 1387 1388 return (BUS_PROBE_DEFAULT); 1389 } 1390 1391 /* 1392 * Each XMAC chip is attached as a separate logical IP interface. 1393 * Single port cards will have only one logical interface of course. 1394 */ 1395 static int 1396 sk_attach(dev) 1397 device_t dev; 1398 { 1399 struct sk_softc *sc; 1400 struct sk_if_softc *sc_if; 1401 struct ifnet *ifp; 1402 int i, port, error; 1403 1404 if (dev == NULL) 1405 return(EINVAL); 1406 1407 error = 0; 1408 sc_if = device_get_softc(dev); 1409 sc = device_get_softc(device_get_parent(dev)); 1410 port = *(int *)device_get_ivars(dev); 1411 1412 sc_if->sk_dev = dev; 1413 sc_if->sk_unit = device_get_unit(dev); 1414 sc_if->sk_port = port; 1415 sc_if->sk_softc = sc; 1416 sc->sk_if[port] = sc_if; 1417 if (port == SK_PORT_A) 1418 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0; 1419 if (port == SK_PORT_B) 1420 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1; 1421 1422 /* Allocate the descriptor queues. */ 1423 sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF, 1424 M_NOWAIT, M_ZERO, 0xffffffff, PAGE_SIZE, 0); 1425 1426 if (sc_if->sk_rdata == NULL) { 1427 printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit); 1428 error = ENOMEM; 1429 goto fail; 1430 } 1431 1432 /* Try to allocate memory for jumbo buffers. */ 1433 if (sk_alloc_jumbo_mem(sc_if)) { 1434 printf("sk%d: jumbo buffer allocation failed\n", 1435 sc_if->sk_unit); 1436 error = ENOMEM; 1437 goto fail; 1438 } 1439 1440 ifp = &sc_if->arpcom.ac_if; 1441 ifp->if_softc = sc_if; 1442 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1443 ifp->if_mtu = ETHERMTU; 1444 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1445 ifp->if_ioctl = sk_ioctl; 1446 ifp->if_start = sk_start; 1447 ifp->if_watchdog = sk_watchdog; 1448 ifp->if_init = sk_init; 1449 ifp->if_baudrate = 1000000000; 1450 IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1); 1451 ifp->if_snd.ifq_drv_maxlen = SK_TX_RING_CNT - 1; 1452 IFQ_SET_READY(&ifp->if_snd); 1453 1454 callout_handle_init(&sc_if->sk_tick_ch); 1455 1456 /* 1457 * Get station address for this interface. Note that 1458 * dual port cards actually come with three station 1459 * addresses: one for each port, plus an extra. The 1460 * extra one is used by the SysKonnect driver software 1461 * as a 'virtual' station address for when both ports 1462 * are operating in failover mode. Currently we don't 1463 * use this extra address. 1464 */ 1465 SK_LOCK(sc); 1466 for (i = 0; i < ETHER_ADDR_LEN; i++) 1467 sc_if->arpcom.ac_enaddr[i] = 1468 sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i); 1469 1470 /* 1471 * Set up RAM buffer addresses. The NIC will have a certain 1472 * amount of SRAM on it, somewhere between 512K and 2MB. We 1473 * need to divide this up a) between the transmitter and 1474 * receiver and b) between the two XMACs, if this is a 1475 * dual port NIC. Our algotithm is to divide up the memory 1476 * evenly so that everyone gets a fair share. 1477 */ 1478 if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) { 1479 u_int32_t chunk, val; 1480 1481 chunk = sc->sk_ramsize / 2; 1482 val = sc->sk_rboff / sizeof(u_int64_t); 1483 sc_if->sk_rx_ramstart = val; 1484 val += (chunk / sizeof(u_int64_t)); 1485 sc_if->sk_rx_ramend = val - 1; 1486 sc_if->sk_tx_ramstart = val; 1487 val += (chunk / sizeof(u_int64_t)); 1488 sc_if->sk_tx_ramend = val - 1; 1489 } else { 1490 u_int32_t chunk, val; 1491 1492 chunk = sc->sk_ramsize / 4; 1493 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) / 1494 sizeof(u_int64_t); 1495 sc_if->sk_rx_ramstart = val; 1496 val += (chunk / sizeof(u_int64_t)); 1497 sc_if->sk_rx_ramend = val - 1; 1498 sc_if->sk_tx_ramstart = val; 1499 val += (chunk / sizeof(u_int64_t)); 1500 sc_if->sk_tx_ramend = val - 1; 1501 } 1502 1503 /* Read and save PHY type and set PHY address */ 1504 sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF; 1505 switch(sc_if->sk_phytype) { 1506 case SK_PHYTYPE_XMAC: 1507 sc_if->sk_phyaddr = SK_PHYADDR_XMAC; 1508 break; 1509 case SK_PHYTYPE_BCOM: 1510 sc_if->sk_phyaddr = SK_PHYADDR_BCOM; 1511 break; 1512 case SK_PHYTYPE_MARV_COPPER: 1513 sc_if->sk_phyaddr = SK_PHYADDR_MARV; 1514 break; 1515 default: 1516 printf("skc%d: unsupported PHY type: %d\n", 1517 sc->sk_unit, sc_if->sk_phytype); 1518 error = ENODEV; 1519 SK_UNLOCK(sc); 1520 goto fail; 1521 } 1522 1523 1524 /* 1525 * Call MI attach routine. Can't hold locks when calling into ether_*. 1526 */ 1527 SK_UNLOCK(sc); 1528 ether_ifattach(ifp, sc_if->arpcom.ac_enaddr); 1529 SK_LOCK(sc); 1530 1531 /* 1532 * Do miibus setup. 1533 */ 1534 switch (sc->sk_type) { 1535 case SK_GENESIS: 1536 sk_init_xmac(sc_if); 1537 break; 1538 case SK_YUKON: 1539 case SK_YUKON_LITE: 1540 case SK_YUKON_LP: 1541 sk_init_yukon(sc_if); 1542 break; 1543 } 1544 1545 SK_UNLOCK(sc); 1546 if (mii_phy_probe(dev, &sc_if->sk_miibus, 1547 sk_ifmedia_upd, sk_ifmedia_sts)) { 1548 printf("skc%d: no PHY found!\n", sc_if->sk_unit); 1549 ether_ifdetach(ifp); 1550 error = ENXIO; 1551 goto fail; 1552 } 1553 1554 fail: 1555 if (error) { 1556 /* Access should be ok even though lock has been dropped */ 1557 sc->sk_if[port] = NULL; 1558 sk_detach(dev); 1559 } 1560 1561 return(error); 1562 } 1563 1564 /* 1565 * Attach the interface. Allocate softc structures, do ifmedia 1566 * setup and ethernet/BPF attach. 1567 */ 1568 static int 1569 skc_attach(dev) 1570 device_t dev; 1571 { 1572 struct sk_softc *sc; 1573 int unit, error = 0, rid, *port; 1574 uint8_t skrs; 1575 char *pname, *revstr; 1576 1577 sc = device_get_softc(dev); 1578 unit = device_get_unit(dev); 1579 1580 mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1581 MTX_DEF | MTX_RECURSE); 1582 /* 1583 * Map control/status registers. 1584 */ 1585 pci_enable_busmaster(dev); 1586 1587 rid = SK_RID; 1588 sc->sk_res = bus_alloc_resource_any(dev, SK_RES, &rid, RF_ACTIVE); 1589 1590 if (sc->sk_res == NULL) { 1591 printf("sk%d: couldn't map ports/memory\n", unit); 1592 error = ENXIO; 1593 goto fail; 1594 } 1595 1596 sc->sk_btag = rman_get_bustag(sc->sk_res); 1597 sc->sk_bhandle = rman_get_bushandle(sc->sk_res); 1598 1599 sc->sk_type = sk_win_read_1(sc, SK_CHIPVER); 1600 sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4) & 0xf; 1601 1602 /* Bail out if chip is not recognized. */ 1603 if (sc->sk_type != SK_GENESIS && !SK_YUKON_FAMILY(sc->sk_type)) { 1604 printf("skc%d: unknown device: chipver=%02x, rev=%x\n", 1605 unit, sc->sk_type, sc->sk_rev); 1606 error = ENXIO; 1607 goto fail; 1608 } 1609 1610 /* Allocate interrupt */ 1611 rid = 0; 1612 sc->sk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1613 RF_SHAREABLE | RF_ACTIVE); 1614 1615 if (sc->sk_irq == NULL) { 1616 printf("skc%d: couldn't map interrupt\n", unit); 1617 error = ENXIO; 1618 goto fail; 1619 } 1620 1621 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 1622 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1623 OID_AUTO, "int_mod", CTLTYPE_INT|CTLFLAG_RW, 1624 &sc->sk_int_mod, 0, sysctl_hw_sk_int_mod, "I", 1625 "SK interrupt moderation"); 1626 1627 /* Pull in device tunables. */ 1628 sc->sk_int_mod = SK_IM_DEFAULT; 1629 error = resource_int_value(device_get_name(dev), unit, 1630 "int_mod", &sc->sk_int_mod); 1631 if (error == 0) { 1632 if (sc->sk_int_mod < SK_IM_MIN || 1633 sc->sk_int_mod > SK_IM_MAX) { 1634 printf("skc%d: int_mod value out of range; " 1635 "using default: %d\n", unit, SK_IM_DEFAULT); 1636 sc->sk_int_mod = SK_IM_DEFAULT; 1637 } 1638 } 1639 1640 /* Reset the adapter. */ 1641 sk_reset(sc); 1642 1643 sc->sk_unit = unit; 1644 1645 /* Read and save vital product data from EEPROM. */ 1646 sk_vpd_read(sc); 1647 1648 skrs = sk_win_read_1(sc, SK_EPROM0); 1649 if (sc->sk_type == SK_GENESIS) { 1650 /* Read and save RAM size and RAMbuffer offset */ 1651 switch(skrs) { 1652 case SK_RAMSIZE_512K_64: 1653 sc->sk_ramsize = 0x80000; 1654 sc->sk_rboff = SK_RBOFF_0; 1655 break; 1656 case SK_RAMSIZE_1024K_64: 1657 sc->sk_ramsize = 0x100000; 1658 sc->sk_rboff = SK_RBOFF_80000; 1659 break; 1660 case SK_RAMSIZE_1024K_128: 1661 sc->sk_ramsize = 0x100000; 1662 sc->sk_rboff = SK_RBOFF_0; 1663 break; 1664 case SK_RAMSIZE_2048K_128: 1665 sc->sk_ramsize = 0x200000; 1666 sc->sk_rboff = SK_RBOFF_0; 1667 break; 1668 default: 1669 printf("skc%d: unknown ram size: %d\n", 1670 sc->sk_unit, sk_win_read_1(sc, SK_EPROM0)); 1671 error = ENXIO; 1672 goto fail; 1673 } 1674 } else { /* SK_YUKON_FAMILY */ 1675 if (skrs == 0x00) 1676 sc->sk_ramsize = 0x20000; 1677 else 1678 sc->sk_ramsize = skrs * (1<<12); 1679 sc->sk_rboff = SK_RBOFF_0; 1680 } 1681 1682 /* Read and save physical media type */ 1683 switch(sk_win_read_1(sc, SK_PMDTYPE)) { 1684 case SK_PMD_1000BASESX: 1685 sc->sk_pmd = IFM_1000_SX; 1686 break; 1687 case SK_PMD_1000BASELX: 1688 sc->sk_pmd = IFM_1000_LX; 1689 break; 1690 case SK_PMD_1000BASECX: 1691 sc->sk_pmd = IFM_1000_CX; 1692 break; 1693 case SK_PMD_1000BASETX: 1694 sc->sk_pmd = IFM_1000_T; 1695 break; 1696 default: 1697 printf("skc%d: unknown media type: 0x%x\n", 1698 sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE)); 1699 error = ENXIO; 1700 goto fail; 1701 } 1702 1703 /* Determine whether to name it with VPD PN or just make it up. 1704 * Marvell Yukon VPD PN seems to freqently be bogus. */ 1705 switch (pci_get_device(dev)) { 1706 case DEVICEID_SK_V1: 1707 case DEVICEID_BELKIN_5005: 1708 case DEVICEID_3COM_3C940: 1709 case DEVICEID_LINKSYS_EG1032: 1710 case DEVICEID_DLINK_DGE530T: 1711 /* Stay with VPD PN. */ 1712 pname = sc->sk_vpd_prodname; 1713 break; 1714 case DEVICEID_SK_V2: 1715 /* YUKON VPD PN might bear no resemblance to reality. */ 1716 switch (sc->sk_type) { 1717 case SK_GENESIS: 1718 /* Stay with VPD PN. */ 1719 pname = sc->sk_vpd_prodname; 1720 break; 1721 case SK_YUKON: 1722 pname = "Marvell Yukon Gigabit Ethernet"; 1723 break; 1724 case SK_YUKON_LITE: 1725 pname = "Marvell Yukon Lite Gigabit Ethernet"; 1726 break; 1727 case SK_YUKON_LP: 1728 pname = "Marvell Yukon LP Gigabit Ethernet"; 1729 break; 1730 default: 1731 pname = "Marvell Yukon (Unknown) Gigabit Ethernet"; 1732 break; 1733 } 1734 1735 /* Yukon Lite Rev. A0 needs special test. */ 1736 if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) { 1737 u_int32_t far; 1738 u_int8_t testbyte; 1739 1740 /* Save flash address register before testing. */ 1741 far = sk_win_read_4(sc, SK_EP_ADDR); 1742 1743 sk_win_write_1(sc, SK_EP_ADDR+0x03, 0xff); 1744 testbyte = sk_win_read_1(sc, SK_EP_ADDR+0x03); 1745 1746 if (testbyte != 0x00) { 1747 /* Yukon Lite Rev. A0 detected. */ 1748 sc->sk_type = SK_YUKON_LITE; 1749 sc->sk_rev = SK_YUKON_LITE_REV_A0; 1750 /* Restore flash address register. */ 1751 sk_win_write_4(sc, SK_EP_ADDR, far); 1752 } 1753 } 1754 break; 1755 default: 1756 device_printf(dev, "unknown device: vendor=%04x, device=%04x, " 1757 "chipver=%02x, rev=%x\n", 1758 pci_get_vendor(dev), pci_get_device(dev), 1759 sc->sk_type, sc->sk_rev); 1760 error = ENXIO; 1761 goto fail; 1762 } 1763 1764 if (sc->sk_type == SK_YUKON_LITE) { 1765 switch (sc->sk_rev) { 1766 case SK_YUKON_LITE_REV_A0: 1767 revstr = "A0"; 1768 break; 1769 case SK_YUKON_LITE_REV_A1: 1770 revstr = "A1"; 1771 break; 1772 case SK_YUKON_LITE_REV_A3: 1773 revstr = "A3"; 1774 break; 1775 default: 1776 revstr = ""; 1777 break; 1778 } 1779 } else { 1780 revstr = ""; 1781 } 1782 1783 /* Announce the product name and more VPD data if there. */ 1784 device_printf(dev, "%s rev. %s(0x%x)\n", pname, revstr, sc->sk_rev); 1785 1786 if (bootverbose) { 1787 if (sc->sk_vpd_readonly != NULL && 1788 sc->sk_vpd_readonly_len != 0) { 1789 char buf[256]; 1790 char *dp = sc->sk_vpd_readonly; 1791 uint16_t l, len = sc->sk_vpd_readonly_len; 1792 1793 while (len >= 3) { 1794 if ((*dp == 'P' && *(dp+1) == 'N') || 1795 (*dp == 'E' && *(dp+1) == 'C') || 1796 (*dp == 'M' && *(dp+1) == 'N') || 1797 (*dp == 'S' && *(dp+1) == 'N')) { 1798 l = 0; 1799 while (l < *(dp+2)) { 1800 buf[l] = *(dp+3+l); 1801 ++l; 1802 } 1803 buf[l] = '\0'; 1804 device_printf(dev, "%c%c: %s\n", 1805 *dp, *(dp+1), buf); 1806 len -= (3 + l); 1807 dp += (3 + l); 1808 } else { 1809 len -= (3 + *(dp+2)); 1810 dp += (3 + *(dp+2)); 1811 } 1812 } 1813 } 1814 device_printf(dev, "chip ver = 0x%02x\n", sc->sk_type); 1815 device_printf(dev, "chip rev = 0x%02x\n", sc->sk_rev); 1816 device_printf(dev, "SK_EPROM0 = 0x%02x\n", skrs); 1817 device_printf(dev, "SRAM size = 0x%06x\n", sc->sk_ramsize); 1818 } 1819 1820 sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1); 1821 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); 1822 *port = SK_PORT_A; 1823 device_set_ivars(sc->sk_devs[SK_PORT_A], port); 1824 1825 if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) { 1826 sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1); 1827 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); 1828 *port = SK_PORT_B; 1829 device_set_ivars(sc->sk_devs[SK_PORT_B], port); 1830 } 1831 1832 /* Turn on the 'driver is loaded' LED. */ 1833 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON); 1834 1835 bus_generic_attach(dev); 1836 1837 /* Hook interrupt last to avoid having to lock softc */ 1838 error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET|INTR_MPSAFE, 1839 sk_intr, sc, &sc->sk_intrhand); 1840 1841 if (error) { 1842 printf("skc%d: couldn't set up irq\n", unit); 1843 goto fail; 1844 } 1845 1846 fail: 1847 if (error) 1848 skc_detach(dev); 1849 1850 return(error); 1851 } 1852 1853 /* 1854 * Shutdown hardware and free up resources. This can be called any 1855 * time after the mutex has been initialized. It is called in both 1856 * the error case in attach and the normal detach case so it needs 1857 * to be careful about only freeing resources that have actually been 1858 * allocated. 1859 */ 1860 static int 1861 sk_detach(dev) 1862 device_t dev; 1863 { 1864 struct sk_if_softc *sc_if; 1865 struct ifnet *ifp; 1866 1867 sc_if = device_get_softc(dev); 1868 KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx), 1869 ("sk mutex not initialized in sk_detach")); 1870 SK_IF_LOCK(sc_if); 1871 1872 ifp = &sc_if->arpcom.ac_if; 1873 /* These should only be active if attach_xmac succeeded */ 1874 if (device_is_attached(dev)) { 1875 sk_stop(sc_if); 1876 /* Can't hold locks while calling detach */ 1877 SK_IF_UNLOCK(sc_if); 1878 ether_ifdetach(ifp); 1879 SK_IF_LOCK(sc_if); 1880 } 1881 /* 1882 * We're generally called from skc_detach() which is using 1883 * device_delete_child() to get to here. It's already trashed 1884 * miibus for us, so don't do it here or we'll panic. 1885 */ 1886 /* 1887 if (sc_if->sk_miibus != NULL) 1888 device_delete_child(dev, sc_if->sk_miibus); 1889 */ 1890 bus_generic_detach(dev); 1891 if (sc_if->sk_cdata.sk_jumbo_buf != NULL) 1892 sk_free_jumbo_mem(sc_if); 1893 if (sc_if->sk_rdata != NULL) { 1894 contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data), 1895 M_DEVBUF); 1896 } 1897 SK_IF_UNLOCK(sc_if); 1898 1899 return(0); 1900 } 1901 1902 static int 1903 skc_detach(dev) 1904 device_t dev; 1905 { 1906 struct sk_softc *sc; 1907 1908 sc = device_get_softc(dev); 1909 KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized")); 1910 1911 if (device_is_alive(dev)) { 1912 if (sc->sk_devs[SK_PORT_A] != NULL) { 1913 free(device_get_ivars(sc->sk_devs[SK_PORT_A]), M_DEVBUF); 1914 device_delete_child(dev, sc->sk_devs[SK_PORT_A]); 1915 } 1916 if (sc->sk_devs[SK_PORT_B] != NULL) { 1917 free(device_get_ivars(sc->sk_devs[SK_PORT_B]), M_DEVBUF); 1918 device_delete_child(dev, sc->sk_devs[SK_PORT_B]); 1919 } 1920 bus_generic_detach(dev); 1921 } 1922 1923 if (sc->sk_vpd_prodname != NULL) 1924 free(sc->sk_vpd_prodname, M_DEVBUF); 1925 if (sc->sk_vpd_readonly != NULL) 1926 free(sc->sk_vpd_readonly, M_DEVBUF); 1927 1928 if (sc->sk_intrhand) 1929 bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand); 1930 if (sc->sk_irq) 1931 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq); 1932 if (sc->sk_res) 1933 bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res); 1934 1935 mtx_destroy(&sc->sk_mtx); 1936 1937 return(0); 1938 } 1939 1940 static int 1941 sk_encap(sc_if, m_head, txidx) 1942 struct sk_if_softc *sc_if; 1943 struct mbuf *m_head; 1944 u_int32_t *txidx; 1945 { 1946 struct sk_tx_desc *f = NULL; 1947 struct mbuf *m; 1948 u_int32_t frag, cur, cnt = 0; 1949 1950 SK_IF_LOCK_ASSERT(sc_if); 1951 1952 m = m_head; 1953 cur = frag = *txidx; 1954 1955 /* 1956 * Start packing the mbufs in this chain into 1957 * the fragment pointers. Stop when we run out 1958 * of fragments or hit the end of the mbuf chain. 1959 */ 1960 for (m = m_head; m != NULL; m = m->m_next) { 1961 if (m->m_len != 0) { 1962 if ((SK_TX_RING_CNT - 1963 (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2) 1964 return(ENOBUFS); 1965 f = &sc_if->sk_rdata->sk_tx_ring[frag]; 1966 f->sk_data_lo = vtophys(mtod(m, vm_offset_t)); 1967 f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT; 1968 if (cnt == 0) 1969 f->sk_ctl |= SK_TXCTL_FIRSTFRAG; 1970 else 1971 f->sk_ctl |= SK_TXCTL_OWN; 1972 cur = frag; 1973 SK_INC(frag, SK_TX_RING_CNT); 1974 cnt++; 1975 } 1976 } 1977 1978 if (m != NULL) 1979 return(ENOBUFS); 1980 1981 sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |= 1982 SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR; 1983 sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head; 1984 sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN; 1985 sc_if->sk_cdata.sk_tx_cnt += cnt; 1986 1987 *txidx = frag; 1988 1989 return(0); 1990 } 1991 1992 static void 1993 sk_start(ifp) 1994 struct ifnet *ifp; 1995 { 1996 struct sk_softc *sc; 1997 struct sk_if_softc *sc_if; 1998 struct mbuf *m_head = NULL; 1999 u_int32_t idx; 2000 2001 sc_if = ifp->if_softc; 2002 sc = sc_if->sk_softc; 2003 2004 SK_IF_LOCK(sc_if); 2005 2006 idx = sc_if->sk_cdata.sk_tx_prod; 2007 2008 while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) { 2009 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2010 if (m_head == NULL) 2011 break; 2012 2013 /* 2014 * Pack the data into the transmit ring. If we 2015 * don't have room, set the OACTIVE flag and wait 2016 * for the NIC to drain the ring. 2017 */ 2018 if (sk_encap(sc_if, m_head, &idx)) { 2019 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2020 ifp->if_flags |= IFF_OACTIVE; 2021 break; 2022 } 2023 2024 /* 2025 * If there's a BPF listener, bounce a copy of this frame 2026 * to him. 2027 */ 2028 BPF_MTAP(ifp, m_head); 2029 } 2030 2031 /* Transmit */ 2032 if (idx != sc_if->sk_cdata.sk_tx_prod) { 2033 sc_if->sk_cdata.sk_tx_prod = idx; 2034 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); 2035 2036 /* Set a timeout in case the chip goes out to lunch. */ 2037 ifp->if_timer = 5; 2038 } 2039 SK_IF_UNLOCK(sc_if); 2040 2041 return; 2042 } 2043 2044 2045 static void 2046 sk_watchdog(ifp) 2047 struct ifnet *ifp; 2048 { 2049 struct sk_if_softc *sc_if; 2050 2051 sc_if = ifp->if_softc; 2052 2053 printf("sk%d: watchdog timeout\n", sc_if->sk_unit); 2054 ifp->if_flags &= ~IFF_RUNNING; 2055 sk_init(sc_if); 2056 2057 return; 2058 } 2059 2060 static void 2061 skc_shutdown(dev) 2062 device_t dev; 2063 { 2064 struct sk_softc *sc; 2065 2066 sc = device_get_softc(dev); 2067 SK_LOCK(sc); 2068 2069 /* Turn off the 'driver is loaded' LED. */ 2070 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF); 2071 2072 /* 2073 * Reset the GEnesis controller. Doing this should also 2074 * assert the resets on the attached XMAC(s). 2075 */ 2076 sk_reset(sc); 2077 SK_UNLOCK(sc); 2078 2079 return; 2080 } 2081 2082 static void 2083 sk_rxeof(sc_if) 2084 struct sk_if_softc *sc_if; 2085 { 2086 struct sk_softc *sc; 2087 struct mbuf *m; 2088 struct ifnet *ifp; 2089 struct sk_chain *cur_rx; 2090 int total_len = 0; 2091 int i; 2092 u_int32_t rxstat; 2093 2094 sc = sc_if->sk_softc; 2095 ifp = &sc_if->arpcom.ac_if; 2096 i = sc_if->sk_cdata.sk_rx_prod; 2097 cur_rx = &sc_if->sk_cdata.sk_rx_chain[i]; 2098 2099 SK_LOCK_ASSERT(sc); 2100 2101 while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) { 2102 2103 cur_rx = &sc_if->sk_cdata.sk_rx_chain[i]; 2104 rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat; 2105 m = cur_rx->sk_mbuf; 2106 cur_rx->sk_mbuf = NULL; 2107 total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl); 2108 SK_INC(i, SK_RX_RING_CNT); 2109 2110 if (rxstat & XM_RXSTAT_ERRFRAME) { 2111 ifp->if_ierrors++; 2112 sk_newbuf(sc_if, cur_rx, m); 2113 continue; 2114 } 2115 2116 /* 2117 * Try to allocate a new jumbo buffer. If that 2118 * fails, copy the packet to mbufs and put the 2119 * jumbo buffer back in the ring so it can be 2120 * re-used. If allocating mbufs fails, then we 2121 * have to drop the packet. 2122 */ 2123 if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) { 2124 struct mbuf *m0; 2125 m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, 2126 ifp, NULL); 2127 sk_newbuf(sc_if, cur_rx, m); 2128 if (m0 == NULL) { 2129 printf("sk%d: no receive buffers " 2130 "available -- packet dropped!\n", 2131 sc_if->sk_unit); 2132 ifp->if_ierrors++; 2133 continue; 2134 } 2135 m = m0; 2136 } else { 2137 m->m_pkthdr.rcvif = ifp; 2138 m->m_pkthdr.len = m->m_len = total_len; 2139 } 2140 2141 ifp->if_ipackets++; 2142 SK_UNLOCK(sc); 2143 (*ifp->if_input)(ifp, m); 2144 SK_LOCK(sc); 2145 } 2146 2147 sc_if->sk_cdata.sk_rx_prod = i; 2148 2149 return; 2150 } 2151 2152 static void 2153 sk_txeof(sc_if) 2154 struct sk_if_softc *sc_if; 2155 { 2156 struct sk_softc *sc; 2157 struct sk_tx_desc *cur_tx; 2158 struct ifnet *ifp; 2159 u_int32_t idx; 2160 2161 sc = sc_if->sk_softc; 2162 ifp = &sc_if->arpcom.ac_if; 2163 2164 /* 2165 * Go through our tx ring and free mbufs for those 2166 * frames that have been sent. 2167 */ 2168 idx = sc_if->sk_cdata.sk_tx_cons; 2169 while(idx != sc_if->sk_cdata.sk_tx_prod) { 2170 cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx]; 2171 if (cur_tx->sk_ctl & SK_TXCTL_OWN) 2172 break; 2173 if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG) 2174 ifp->if_opackets++; 2175 if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) { 2176 m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf); 2177 sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL; 2178 } 2179 sc_if->sk_cdata.sk_tx_cnt--; 2180 SK_INC(idx, SK_TX_RING_CNT); 2181 } 2182 2183 if (sc_if->sk_cdata.sk_tx_cnt == 0) { 2184 ifp->if_timer = 0; 2185 } else /* nudge chip to keep tx ring moving */ 2186 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); 2187 2188 if (sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 2) 2189 ifp->if_flags &= ~IFF_OACTIVE; 2190 2191 sc_if->sk_cdata.sk_tx_cons = idx; 2192 } 2193 2194 static void 2195 sk_tick(xsc_if) 2196 void *xsc_if; 2197 { 2198 struct sk_if_softc *sc_if; 2199 struct mii_data *mii; 2200 struct ifnet *ifp; 2201 int i; 2202 2203 sc_if = xsc_if; 2204 SK_IF_LOCK(sc_if); 2205 ifp = &sc_if->arpcom.ac_if; 2206 mii = device_get_softc(sc_if->sk_miibus); 2207 2208 if (!(ifp->if_flags & IFF_UP)) { 2209 SK_IF_UNLOCK(sc_if); 2210 return; 2211 } 2212 2213 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 2214 sk_intr_bcom(sc_if); 2215 SK_IF_UNLOCK(sc_if); 2216 return; 2217 } 2218 2219 /* 2220 * According to SysKonnect, the correct way to verify that 2221 * the link has come back up is to poll bit 0 of the GPIO 2222 * register three times. This pin has the signal from the 2223 * link_sync pin connected to it; if we read the same link 2224 * state 3 times in a row, we know the link is up. 2225 */ 2226 for (i = 0; i < 3; i++) { 2227 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET) 2228 break; 2229 } 2230 2231 if (i != 3) { 2232 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); 2233 SK_IF_UNLOCK(sc_if); 2234 return; 2235 } 2236 2237 /* Turn the GP0 interrupt back on. */ 2238 SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); 2239 SK_XM_READ_2(sc_if, XM_ISR); 2240 mii_tick(mii); 2241 untimeout(sk_tick, sc_if, sc_if->sk_tick_ch); 2242 2243 SK_IF_UNLOCK(sc_if); 2244 return; 2245 } 2246 2247 static void 2248 sk_intr_bcom(sc_if) 2249 struct sk_if_softc *sc_if; 2250 { 2251 struct mii_data *mii; 2252 struct ifnet *ifp; 2253 int status; 2254 mii = device_get_softc(sc_if->sk_miibus); 2255 ifp = &sc_if->arpcom.ac_if; 2256 2257 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 2258 2259 /* 2260 * Read the PHY interrupt register to make sure 2261 * we clear any pending interrupts. 2262 */ 2263 status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR); 2264 2265 if (!(ifp->if_flags & IFF_RUNNING)) { 2266 sk_init_xmac(sc_if); 2267 return; 2268 } 2269 2270 if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) { 2271 int lstat; 2272 lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 2273 BRGPHY_MII_AUXSTS); 2274 2275 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) { 2276 mii_mediachg(mii); 2277 /* Turn off the link LED. */ 2278 SK_IF_WRITE_1(sc_if, 0, 2279 SK_LINKLED1_CTL, SK_LINKLED_OFF); 2280 sc_if->sk_link = 0; 2281 } else if (status & BRGPHY_ISR_LNK_CHG) { 2282 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 2283 BRGPHY_MII_IMR, 0xFF00); 2284 mii_tick(mii); 2285 sc_if->sk_link = 1; 2286 /* Turn on the link LED. */ 2287 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, 2288 SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF| 2289 SK_LINKLED_BLINK_OFF); 2290 } else { 2291 mii_tick(mii); 2292 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); 2293 } 2294 } 2295 2296 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 2297 2298 return; 2299 } 2300 2301 static void 2302 sk_intr_xmac(sc_if) 2303 struct sk_if_softc *sc_if; 2304 { 2305 struct sk_softc *sc; 2306 u_int16_t status; 2307 2308 sc = sc_if->sk_softc; 2309 status = SK_XM_READ_2(sc_if, XM_ISR); 2310 2311 /* 2312 * Link has gone down. Start MII tick timeout to 2313 * watch for link resync. 2314 */ 2315 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) { 2316 if (status & XM_ISR_GP0_SET) { 2317 SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); 2318 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); 2319 } 2320 2321 if (status & XM_ISR_AUTONEG_DONE) { 2322 sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); 2323 } 2324 } 2325 2326 if (status & XM_IMR_TX_UNDERRUN) 2327 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO); 2328 2329 if (status & XM_IMR_RX_OVERRUN) 2330 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO); 2331 2332 status = SK_XM_READ_2(sc_if, XM_ISR); 2333 2334 return; 2335 } 2336 2337 static void 2338 sk_intr_yukon(sc_if) 2339 struct sk_if_softc *sc_if; 2340 { 2341 int status; 2342 2343 status = SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); 2344 2345 return; 2346 } 2347 2348 static void 2349 sk_intr(xsc) 2350 void *xsc; 2351 { 2352 struct sk_softc *sc = xsc; 2353 struct sk_if_softc *sc_if0 = NULL, *sc_if1 = NULL; 2354 struct ifnet *ifp0 = NULL, *ifp1 = NULL; 2355 u_int32_t status; 2356 2357 SK_LOCK(sc); 2358 2359 sc_if0 = sc->sk_if[SK_PORT_A]; 2360 sc_if1 = sc->sk_if[SK_PORT_B]; 2361 2362 if (sc_if0 != NULL) 2363 ifp0 = &sc_if0->arpcom.ac_if; 2364 if (sc_if1 != NULL) 2365 ifp1 = &sc_if1->arpcom.ac_if; 2366 2367 for (;;) { 2368 status = CSR_READ_4(sc, SK_ISSR); 2369 if (!(status & sc->sk_intrmask)) 2370 break; 2371 2372 /* Handle receive interrupts first. */ 2373 if (status & SK_ISR_RX1_EOF) { 2374 sk_rxeof(sc_if0); 2375 CSR_WRITE_4(sc, SK_BMU_RX_CSR0, 2376 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); 2377 } 2378 if (status & SK_ISR_RX2_EOF) { 2379 sk_rxeof(sc_if1); 2380 CSR_WRITE_4(sc, SK_BMU_RX_CSR1, 2381 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); 2382 } 2383 2384 /* Then transmit interrupts. */ 2385 if (status & SK_ISR_TX1_S_EOF) { 2386 sk_txeof(sc_if0); 2387 CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, 2388 SK_TXBMU_CLR_IRQ_EOF); 2389 } 2390 if (status & SK_ISR_TX2_S_EOF) { 2391 sk_txeof(sc_if1); 2392 CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, 2393 SK_TXBMU_CLR_IRQ_EOF); 2394 } 2395 2396 /* Then MAC interrupts. */ 2397 if (status & SK_ISR_MAC1 && ifp0->if_flags & IFF_RUNNING) { 2398 if (sc->sk_type == SK_GENESIS) 2399 sk_intr_xmac(sc_if0); 2400 else 2401 sk_intr_yukon(sc_if0); 2402 } 2403 2404 if (status & SK_ISR_MAC2 && ifp1->if_flags & IFF_RUNNING) { 2405 if (sc->sk_type == SK_GENESIS) 2406 sk_intr_xmac(sc_if1); 2407 else 2408 sk_intr_yukon(sc_if1); 2409 } 2410 2411 if (status & SK_ISR_EXTERNAL_REG) { 2412 if (ifp0 != NULL && 2413 sc_if0->sk_phytype == SK_PHYTYPE_BCOM) 2414 sk_intr_bcom(sc_if0); 2415 if (ifp1 != NULL && 2416 sc_if1->sk_phytype == SK_PHYTYPE_BCOM) 2417 sk_intr_bcom(sc_if1); 2418 } 2419 } 2420 2421 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2422 2423 if (ifp0 != NULL && !IFQ_DRV_IS_EMPTY(&ifp0->if_snd)) 2424 sk_start(ifp0); 2425 if (ifp1 != NULL && !IFQ_DRV_IS_EMPTY(&ifp1->if_snd)) 2426 sk_start(ifp1); 2427 2428 SK_UNLOCK(sc); 2429 2430 return; 2431 } 2432 2433 static void 2434 sk_init_xmac(sc_if) 2435 struct sk_if_softc *sc_if; 2436 { 2437 struct sk_softc *sc; 2438 struct ifnet *ifp; 2439 struct sk_bcom_hack bhack[] = { 2440 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 }, 2441 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 }, 2442 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, 2443 { 0, 0 } }; 2444 2445 sc = sc_if->sk_softc; 2446 ifp = &sc_if->arpcom.ac_if; 2447 2448 /* Unreset the XMAC. */ 2449 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET); 2450 DELAY(1000); 2451 2452 /* Reset the XMAC's internal state. */ 2453 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); 2454 2455 /* Save the XMAC II revision */ 2456 sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID)); 2457 2458 /* 2459 * Perform additional initialization for external PHYs, 2460 * namely for the 1000baseTX cards that use the XMAC's 2461 * GMII mode. 2462 */ 2463 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 2464 int i = 0; 2465 u_int32_t val; 2466 2467 /* Take PHY out of reset. */ 2468 val = sk_win_read_4(sc, SK_GPIO); 2469 if (sc_if->sk_port == SK_PORT_A) 2470 val |= SK_GPIO_DIR0|SK_GPIO_DAT0; 2471 else 2472 val |= SK_GPIO_DIR2|SK_GPIO_DAT2; 2473 sk_win_write_4(sc, SK_GPIO, val); 2474 2475 /* Enable GMII mode on the XMAC. */ 2476 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE); 2477 2478 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 2479 BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET); 2480 DELAY(10000); 2481 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 2482 BRGPHY_MII_IMR, 0xFFF0); 2483 2484 /* 2485 * Early versions of the BCM5400 apparently have 2486 * a bug that requires them to have their reserved 2487 * registers initialized to some magic values. I don't 2488 * know what the numbers do, I'm just the messenger. 2489 */ 2490 if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03) 2491 == 0x6041) { 2492 while(bhack[i].reg) { 2493 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 2494 bhack[i].reg, bhack[i].val); 2495 i++; 2496 } 2497 } 2498 } 2499 2500 /* Set station address */ 2501 SK_XM_WRITE_2(sc_if, XM_PAR0, 2502 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0])); 2503 SK_XM_WRITE_2(sc_if, XM_PAR1, 2504 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2])); 2505 SK_XM_WRITE_2(sc_if, XM_PAR2, 2506 *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4])); 2507 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION); 2508 2509 if (ifp->if_flags & IFF_BROADCAST) { 2510 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); 2511 } else { 2512 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); 2513 } 2514 2515 /* We don't need the FCS appended to the packet. */ 2516 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS); 2517 2518 /* We want short frames padded to 60 bytes. */ 2519 SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD); 2520 2521 /* 2522 * Enable the reception of all error frames. This is is 2523 * a necessary evil due to the design of the XMAC. The 2524 * XMAC's receive FIFO is only 8K in size, however jumbo 2525 * frames can be up to 9000 bytes in length. When bad 2526 * frame filtering is enabled, the XMAC's RX FIFO operates 2527 * in 'store and forward' mode. For this to work, the 2528 * entire frame has to fit into the FIFO, but that means 2529 * that jumbo frames larger than 8192 bytes will be 2530 * truncated. Disabling all bad frame filtering causes 2531 * the RX FIFO to operate in streaming mode, in which 2532 * case the XMAC will start transfering frames out of the 2533 * RX FIFO as soon as the FIFO threshold is reached. 2534 */ 2535 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES| 2536 XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS| 2537 XM_MODE_RX_INRANGELEN); 2538 2539 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 2540 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); 2541 else 2542 SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); 2543 2544 /* 2545 * Bump up the transmit threshold. This helps hold off transmit 2546 * underruns when we're blasting traffic from both ports at once. 2547 */ 2548 SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH); 2549 2550 /* Set promiscuous mode */ 2551 sk_setpromisc(sc_if); 2552 2553 /* Set multicast filter */ 2554 sk_setmulti(sc_if); 2555 2556 /* Clear and enable interrupts */ 2557 SK_XM_READ_2(sc_if, XM_ISR); 2558 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) 2559 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS); 2560 else 2561 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); 2562 2563 /* Configure MAC arbiter */ 2564 switch(sc_if->sk_xmac_rev) { 2565 case XM_XMAC_REV_B2: 2566 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2); 2567 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2); 2568 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2); 2569 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2); 2570 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2); 2571 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2); 2572 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2); 2573 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2); 2574 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); 2575 break; 2576 case XM_XMAC_REV_C1: 2577 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1); 2578 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1); 2579 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1); 2580 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1); 2581 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1); 2582 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1); 2583 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1); 2584 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1); 2585 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); 2586 break; 2587 default: 2588 break; 2589 } 2590 sk_win_write_2(sc, SK_MACARB_CTL, 2591 SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF); 2592 2593 sc_if->sk_link = 1; 2594 2595 return; 2596 } 2597 2598 static void 2599 sk_init_yukon(sc_if) 2600 struct sk_if_softc *sc_if; 2601 { 2602 u_int32_t phy; 2603 u_int16_t reg; 2604 struct sk_softc *sc; 2605 struct ifnet *ifp; 2606 int i; 2607 2608 sc = sc_if->sk_softc; 2609 ifp = &sc_if->arpcom.ac_if; 2610 2611 if (sc->sk_type == SK_YUKON_LITE && 2612 sc->sk_rev == SK_YUKON_LITE_REV_A3) { 2613 /* Take PHY out of reset. */ 2614 sk_win_write_4(sc, SK_GPIO, 2615 (sk_win_read_4(sc, SK_GPIO) | SK_GPIO_DIR9) & ~SK_GPIO_DAT9); 2616 } 2617 2618 /* GMAC and GPHY Reset */ 2619 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET); 2620 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); 2621 DELAY(1000); 2622 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_CLEAR); 2623 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); 2624 DELAY(1000); 2625 2626 phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP | 2627 SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE; 2628 2629 switch(sc_if->sk_softc->sk_pmd) { 2630 case IFM_1000_SX: 2631 case IFM_1000_LX: 2632 phy |= SK_GPHY_FIBER; 2633 break; 2634 2635 case IFM_1000_CX: 2636 case IFM_1000_T: 2637 phy |= SK_GPHY_COPPER; 2638 break; 2639 } 2640 2641 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET); 2642 DELAY(1000); 2643 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR); 2644 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF | 2645 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR); 2646 2647 /* unused read of the interrupt source register */ 2648 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); 2649 2650 reg = SK_YU_READ_2(sc_if, YUKON_PAR); 2651 2652 /* MIB Counter Clear Mode set */ 2653 reg |= YU_PAR_MIB_CLR; 2654 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 2655 2656 /* MIB Counter Clear Mode clear */ 2657 reg &= ~YU_PAR_MIB_CLR; 2658 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 2659 2660 /* receive control reg */ 2661 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR); 2662 2663 /* transmit parameter register */ 2664 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) | 2665 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) ); 2666 2667 /* serial mode register */ 2668 reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e); 2669 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 2670 reg |= YU_SMR_MFL_JUMBO; 2671 SK_YU_WRITE_2(sc_if, YUKON_SMR, reg); 2672 2673 /* Setup Yukon's address */ 2674 for (i = 0; i < 3; i++) { 2675 /* Write Source Address 1 (unicast filter) */ 2676 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4, 2677 sc_if->arpcom.ac_enaddr[i * 2] | 2678 sc_if->arpcom.ac_enaddr[i * 2 + 1] << 8); 2679 } 2680 2681 for (i = 0; i < 3; i++) { 2682 reg = sk_win_read_2(sc_if->sk_softc, 2683 SK_MAC1_0 + i * 2 + sc_if->sk_port * 8); 2684 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg); 2685 } 2686 2687 /* Set promiscuous mode */ 2688 sk_setpromisc(sc_if); 2689 2690 /* Set multicast filter */ 2691 sk_setmulti(sc_if); 2692 2693 /* enable interrupt mask for counter overflows */ 2694 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0); 2695 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0); 2696 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0); 2697 2698 /* Configure RX MAC FIFO */ 2699 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR); 2700 SK_IF_WRITE_4(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON); 2701 2702 /* Configure TX MAC FIFO */ 2703 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR); 2704 SK_IF_WRITE_4(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON); 2705 } 2706 2707 /* 2708 * Note that to properly initialize any part of the GEnesis chip, 2709 * you first have to take it out of reset mode. 2710 */ 2711 static void 2712 sk_init(xsc) 2713 void *xsc; 2714 { 2715 struct sk_if_softc *sc_if = xsc; 2716 struct sk_softc *sc; 2717 struct ifnet *ifp; 2718 struct mii_data *mii; 2719 u_int16_t reg; 2720 u_int32_t imr; 2721 2722 SK_IF_LOCK(sc_if); 2723 2724 ifp = &sc_if->arpcom.ac_if; 2725 sc = sc_if->sk_softc; 2726 mii = device_get_softc(sc_if->sk_miibus); 2727 2728 if (ifp->if_flags & IFF_RUNNING) { 2729 SK_IF_UNLOCK(sc_if); 2730 return; 2731 } 2732 2733 /* Cancel pending I/O and free all RX/TX buffers. */ 2734 sk_stop(sc_if); 2735 2736 if (sc->sk_type == SK_GENESIS) { 2737 /* Configure LINK_SYNC LED */ 2738 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON); 2739 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, 2740 SK_LINKLED_LINKSYNC_ON); 2741 2742 /* Configure RX LED */ 2743 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, 2744 SK_RXLEDCTL_COUNTER_START); 2745 2746 /* Configure TX LED */ 2747 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, 2748 SK_TXLEDCTL_COUNTER_START); 2749 } 2750 2751 /* Configure I2C registers */ 2752 2753 /* Configure XMAC(s) */ 2754 switch (sc->sk_type) { 2755 case SK_GENESIS: 2756 sk_init_xmac(sc_if); 2757 break; 2758 case SK_YUKON: 2759 case SK_YUKON_LITE: 2760 case SK_YUKON_LP: 2761 sk_init_yukon(sc_if); 2762 break; 2763 } 2764 mii_mediachg(mii); 2765 2766 if (sc->sk_type == SK_GENESIS) { 2767 /* Configure MAC FIFOs */ 2768 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET); 2769 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END); 2770 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON); 2771 2772 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET); 2773 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END); 2774 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON); 2775 } 2776 2777 /* Configure transmit arbiter(s) */ 2778 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, 2779 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON); 2780 2781 /* Configure RAMbuffers */ 2782 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET); 2783 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart); 2784 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart); 2785 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart); 2786 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend); 2787 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON); 2788 2789 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET); 2790 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON); 2791 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart); 2792 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart); 2793 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart); 2794 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend); 2795 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON); 2796 2797 /* Configure BMUs */ 2798 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE); 2799 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO, 2800 vtophys(&sc_if->sk_rdata->sk_rx_ring[0])); 2801 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0); 2802 2803 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE); 2804 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO, 2805 vtophys(&sc_if->sk_rdata->sk_tx_ring[0])); 2806 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0); 2807 2808 /* Init descriptors */ 2809 if (sk_init_rx_ring(sc_if) == ENOBUFS) { 2810 printf("sk%d: initialization failed: no " 2811 "memory for rx buffers\n", sc_if->sk_unit); 2812 sk_stop(sc_if); 2813 SK_IF_UNLOCK(sc_if); 2814 return; 2815 } 2816 sk_init_tx_ring(sc_if); 2817 2818 /* Set interrupt moderation if changed via sysctl. */ 2819 /* SK_LOCK(sc); */ 2820 imr = sk_win_read_4(sc, SK_IMTIMERINIT); 2821 if (imr != SK_IM_USECS(sc->sk_int_mod)) { 2822 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod)); 2823 printf("skc%d: interrupt moderation is %d us\n", 2824 sc->sk_unit, sc->sk_int_mod); 2825 } 2826 /* SK_UNLOCK(sc); */ 2827 2828 /* Configure interrupt handling */ 2829 CSR_READ_4(sc, SK_ISSR); 2830 if (sc_if->sk_port == SK_PORT_A) 2831 sc->sk_intrmask |= SK_INTRS1; 2832 else 2833 sc->sk_intrmask |= SK_INTRS2; 2834 2835 sc->sk_intrmask |= SK_ISR_EXTERNAL_REG; 2836 2837 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2838 2839 /* Start BMUs. */ 2840 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START); 2841 2842 switch(sc->sk_type) { 2843 case SK_GENESIS: 2844 /* Enable XMACs TX and RX state machines */ 2845 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE); 2846 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 2847 break; 2848 case SK_YUKON: 2849 case SK_YUKON_LITE: 2850 case SK_YUKON_LP: 2851 reg = SK_YU_READ_2(sc_if, YUKON_GPCR); 2852 reg |= YU_GPCR_TXEN | YU_GPCR_RXEN; 2853 reg &= ~(YU_GPCR_SPEED_EN | YU_GPCR_DPLX_EN); 2854 SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg); 2855 } 2856 2857 ifp->if_flags |= IFF_RUNNING; 2858 ifp->if_flags &= ~IFF_OACTIVE; 2859 2860 SK_IF_UNLOCK(sc_if); 2861 2862 return; 2863 } 2864 2865 static void 2866 sk_stop(sc_if) 2867 struct sk_if_softc *sc_if; 2868 { 2869 int i; 2870 struct sk_softc *sc; 2871 struct ifnet *ifp; 2872 2873 SK_IF_LOCK(sc_if); 2874 sc = sc_if->sk_softc; 2875 ifp = &sc_if->arpcom.ac_if; 2876 2877 untimeout(sk_tick, sc_if, sc_if->sk_tick_ch); 2878 2879 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 2880 u_int32_t val; 2881 2882 /* Put PHY back into reset. */ 2883 val = sk_win_read_4(sc, SK_GPIO); 2884 if (sc_if->sk_port == SK_PORT_A) { 2885 val |= SK_GPIO_DIR0; 2886 val &= ~SK_GPIO_DAT0; 2887 } else { 2888 val |= SK_GPIO_DIR2; 2889 val &= ~SK_GPIO_DAT2; 2890 } 2891 sk_win_write_4(sc, SK_GPIO, val); 2892 } 2893 2894 /* Turn off various components of this interface. */ 2895 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); 2896 switch (sc->sk_type) { 2897 case SK_GENESIS: 2898 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET); 2899 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET); 2900 break; 2901 case SK_YUKON: 2902 case SK_YUKON_LITE: 2903 case SK_YUKON_LP: 2904 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET); 2905 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET); 2906 break; 2907 } 2908 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE); 2909 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 2910 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE); 2911 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 2912 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF); 2913 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 2914 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 2915 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF); 2916 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF); 2917 2918 /* Disable interrupts */ 2919 if (sc_if->sk_port == SK_PORT_A) 2920 sc->sk_intrmask &= ~SK_INTRS1; 2921 else 2922 sc->sk_intrmask &= ~SK_INTRS2; 2923 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2924 2925 SK_XM_READ_2(sc_if, XM_ISR); 2926 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); 2927 2928 /* Free RX and TX mbufs still in the queues. */ 2929 for (i = 0; i < SK_RX_RING_CNT; i++) { 2930 if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) { 2931 m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf); 2932 sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL; 2933 } 2934 } 2935 2936 for (i = 0; i < SK_TX_RING_CNT; i++) { 2937 if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) { 2938 m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf); 2939 sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL; 2940 } 2941 } 2942 2943 ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE); 2944 SK_IF_UNLOCK(sc_if); 2945 return; 2946 } 2947 2948 static int 2949 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 2950 { 2951 int error, value; 2952 2953 if (!arg1) 2954 return (EINVAL); 2955 value = *(int *)arg1; 2956 error = sysctl_handle_int(oidp, &value, 0, req); 2957 if (error || !req->newptr) 2958 return (error); 2959 if (value < low || value > high) 2960 return (EINVAL); 2961 *(int *)arg1 = value; 2962 return (0); 2963 } 2964 2965 static int 2966 sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS) 2967 { 2968 return (sysctl_int_range(oidp, arg1, arg2, req, SK_IM_MIN, SK_IM_MAX)); 2969 } 2970