1 /* $OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998, 1999, 2000 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 /*- 35 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 36 * 37 * Permission to use, copy, modify, and distribute this software for any 38 * purpose with or without fee is hereby granted, provided that the above 39 * copyright notice and this permission notice appear in all copies. 40 * 41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 48 */ 49 50 #include <sys/cdefs.h> 51 __FBSDID("$FreeBSD$"); 52 53 /* 54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports 55 * the SK-984x series adapters, both single port and dual port. 56 * References: 57 * The XaQti XMAC II datasheet, 58 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 59 * The SysKonnect GEnesis manual, http://www.syskonnect.com 60 * 61 * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the 62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a 63 * convenience to others until Vitesse corrects this problem: 64 * 65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 66 * 67 * Written by Bill Paul <wpaul@ee.columbia.edu> 68 * Department of Electrical Engineering 69 * Columbia University, New York City 70 */ 71 /* 72 * The SysKonnect gigabit ethernet adapters consist of two main 73 * components: the SysKonnect GEnesis controller chip and the XaQti Corp. 74 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC 75 * components and a PHY while the GEnesis controller provides a PCI 76 * interface with DMA support. Each card may have between 512K and 77 * 2MB of SRAM on board depending on the configuration. 78 * 79 * The SysKonnect GEnesis controller can have either one or two XMAC 80 * chips connected to it, allowing single or dual port NIC configurations. 81 * SysKonnect has the distinction of being the only vendor on the market 82 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs, 83 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the 84 * XMAC registers. This driver takes advantage of these features to allow 85 * both XMACs to operate as independent interfaces. 86 */ 87 88 #include <sys/param.h> 89 #include <sys/systm.h> 90 #include <sys/bus.h> 91 #include <sys/endian.h> 92 #include <sys/mbuf.h> 93 #include <sys/malloc.h> 94 #include <sys/kernel.h> 95 #include <sys/module.h> 96 #include <sys/socket.h> 97 #include <sys/sockio.h> 98 #include <sys/queue.h> 99 #include <sys/sysctl.h> 100 101 #include <net/bpf.h> 102 #include <net/ethernet.h> 103 #include <net/if.h> 104 #include <net/if_arp.h> 105 #include <net/if_dl.h> 106 #include <net/if_media.h> 107 #include <net/if_types.h> 108 #include <net/if_vlan_var.h> 109 110 #include <netinet/in.h> 111 #include <netinet/in_systm.h> 112 #include <netinet/ip.h> 113 114 #include <machine/bus.h> 115 #include <machine/in_cksum.h> 116 #include <machine/resource.h> 117 #include <sys/rman.h> 118 119 #include <dev/mii/mii.h> 120 #include <dev/mii/miivar.h> 121 #include <dev/mii/brgphyreg.h> 122 123 #include <dev/pci/pcireg.h> 124 #include <dev/pci/pcivar.h> 125 126 #if 0 127 #define SK_USEIOSPACE 128 #endif 129 130 #include <dev/sk/if_skreg.h> 131 #include <dev/sk/xmaciireg.h> 132 #include <dev/sk/yukonreg.h> 133 134 MODULE_DEPEND(sk, pci, 1, 1, 1); 135 MODULE_DEPEND(sk, ether, 1, 1, 1); 136 MODULE_DEPEND(sk, miibus, 1, 1, 1); 137 138 /* "device miibus" required. See GENERIC if you get errors here. */ 139 #include "miibus_if.h" 140 141 #ifndef lint 142 static const char rcsid[] = 143 "$FreeBSD$"; 144 #endif 145 146 static struct sk_type sk_devs[] = { 147 { 148 VENDORID_SK, 149 DEVICEID_SK_V1, 150 "SysKonnect Gigabit Ethernet (V1.0)" 151 }, 152 { 153 VENDORID_SK, 154 DEVICEID_SK_V2, 155 "SysKonnect Gigabit Ethernet (V2.0)" 156 }, 157 { 158 VENDORID_MARVELL, 159 DEVICEID_SK_V2, 160 "Marvell Gigabit Ethernet" 161 }, 162 #ifdef not_yet 163 { 164 VENDORID_MARVELL, 165 DEVICEID_MRVL_4360, 166 "Marvell 88E8052 Gigabit Ethernet Controller" 167 }, 168 { 169 VENDORID_MARVELL, 170 DEVICEID_MRVL_4361, 171 "Marvell 88E8050 Gigabit Ethernet Controller" 172 }, 173 { 174 VENDORID_MARVELL, 175 DEVICEID_MRVL_4362, 176 "Marvell 88E8053 Gigabit Ethernet Controller" 177 }, 178 #endif 179 { 180 VENDORID_MARVELL, 181 DEVICEID_BELKIN_5005, 182 "Belkin F5D5005 Gigabit Ethernet" 183 }, 184 { 185 VENDORID_3COM, 186 DEVICEID_3COM_3C940, 187 "3Com 3C940 Gigabit Ethernet" 188 }, 189 { 190 VENDORID_LINKSYS, 191 DEVICEID_LINKSYS_EG1032, 192 "Linksys EG1032 Gigabit Ethernet" 193 }, 194 { 195 VENDORID_DLINK, 196 DEVICEID_DLINK_DGE530T, 197 "D-Link DGE-530T Gigabit Ethernet" 198 }, 199 { 0, 0, NULL } 200 }; 201 202 static int skc_probe(device_t); 203 static int skc_attach(device_t); 204 static int skc_detach(device_t); 205 static void skc_shutdown(device_t); 206 static int skc_suspend(device_t); 207 static int skc_resume(device_t); 208 static int sk_detach(device_t); 209 static int sk_probe(device_t); 210 static int sk_attach(device_t); 211 static void sk_tick(void *); 212 static void sk_yukon_tick(void *); 213 static void sk_intr(void *); 214 static void sk_intr_xmac(struct sk_if_softc *); 215 static void sk_intr_bcom(struct sk_if_softc *); 216 static void sk_intr_yukon(struct sk_if_softc *); 217 static __inline void sk_rxcksum(struct ifnet *, struct mbuf *, u_int32_t); 218 static __inline int sk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t); 219 static void sk_rxeof(struct sk_if_softc *); 220 static void sk_jumbo_rxeof(struct sk_if_softc *); 221 static void sk_txeof(struct sk_if_softc *); 222 static void sk_txcksum(struct ifnet *, struct mbuf *, struct sk_tx_desc *); 223 static int sk_encap(struct sk_if_softc *, struct mbuf **); 224 static void sk_start(struct ifnet *); 225 static void sk_start_locked(struct ifnet *); 226 static int sk_ioctl(struct ifnet *, u_long, caddr_t); 227 static void sk_init(void *); 228 static void sk_init_locked(struct sk_if_softc *); 229 static void sk_init_xmac(struct sk_if_softc *); 230 static void sk_init_yukon(struct sk_if_softc *); 231 static void sk_stop(struct sk_if_softc *); 232 static void sk_watchdog(struct ifnet *); 233 static int sk_ifmedia_upd(struct ifnet *); 234 static void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *); 235 static void sk_reset(struct sk_softc *); 236 static __inline void sk_discard_rxbuf(struct sk_if_softc *, int); 237 static __inline void sk_discard_jumbo_rxbuf(struct sk_if_softc *, int); 238 static int sk_newbuf(struct sk_if_softc *, int); 239 static int sk_jumbo_newbuf(struct sk_if_softc *, int); 240 static void sk_dmamap_cb(void *, bus_dma_segment_t *, int, int); 241 static int sk_dma_alloc(struct sk_if_softc *); 242 static void sk_dma_free(struct sk_if_softc *); 243 static void *sk_jalloc(struct sk_if_softc *); 244 static void sk_jfree(void *, void *); 245 static int sk_init_rx_ring(struct sk_if_softc *); 246 static int sk_init_jumbo_rx_ring(struct sk_if_softc *); 247 static void sk_init_tx_ring(struct sk_if_softc *); 248 static u_int32_t sk_win_read_4(struct sk_softc *, int); 249 static u_int16_t sk_win_read_2(struct sk_softc *, int); 250 static u_int8_t sk_win_read_1(struct sk_softc *, int); 251 static void sk_win_write_4(struct sk_softc *, int, u_int32_t); 252 static void sk_win_write_2(struct sk_softc *, int, u_int32_t); 253 static void sk_win_write_1(struct sk_softc *, int, u_int32_t); 254 static u_int8_t sk_vpd_readbyte(struct sk_softc *, int); 255 static void sk_vpd_read_res(struct sk_softc *, struct vpd_res *, int); 256 static void sk_vpd_read(struct sk_softc *); 257 258 static int sk_miibus_readreg(device_t, int, int); 259 static int sk_miibus_writereg(device_t, int, int, int); 260 static void sk_miibus_statchg(device_t); 261 262 static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int); 263 static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int, 264 int); 265 static void sk_xmac_miibus_statchg(struct sk_if_softc *); 266 267 static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int); 268 static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int, 269 int); 270 static void sk_marv_miibus_statchg(struct sk_if_softc *); 271 272 static uint32_t sk_xmchash(const uint8_t *); 273 static uint32_t sk_gmchash(const uint8_t *); 274 static void sk_setfilt(struct sk_if_softc *, u_int16_t *, int); 275 static void sk_setmulti(struct sk_if_softc *); 276 static void sk_setpromisc(struct sk_if_softc *); 277 278 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high); 279 static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS); 280 281 #ifdef SK_USEIOSPACE 282 #define SK_RES SYS_RES_IOPORT 283 #define SK_RID SK_PCI_LOIO 284 #else 285 #define SK_RES SYS_RES_MEMORY 286 #define SK_RID SK_PCI_LOMEM 287 #endif 288 289 /* 290 * It seems that SK-NET GENESIS supports very simple checksum offload 291 * capability for Tx and I believe it can generate 0 checksum value for 292 * UDP packets in Tx as the hardware can't differenciate UDP packets from 293 * TCP packets. 0 chcecksum value for UDP packet is an invalid one as it 294 * means sender didn't perforam checksum computation. For the safety I 295 * disabled UDP checksum offload capability at the moment. Alternatively 296 * we can intrduce a LINK0/LINK1 flag as hme(4) did in its Tx checksum 297 * offload routine. 298 */ 299 #define SK_CSUM_FEATURES (CSUM_TCP) 300 301 /* 302 * Note that we have newbus methods for both the GEnesis controller 303 * itself and the XMAC(s). The XMACs are children of the GEnesis, and 304 * the miibus code is a child of the XMACs. We need to do it this way 305 * so that the miibus drivers can access the PHY registers on the 306 * right PHY. It's not quite what I had in mind, but it's the only 307 * design that achieves the desired effect. 308 */ 309 static device_method_t skc_methods[] = { 310 /* Device interface */ 311 DEVMETHOD(device_probe, skc_probe), 312 DEVMETHOD(device_attach, skc_attach), 313 DEVMETHOD(device_detach, skc_detach), 314 DEVMETHOD(device_suspend, skc_suspend), 315 DEVMETHOD(device_resume, skc_resume), 316 DEVMETHOD(device_shutdown, skc_shutdown), 317 318 /* bus interface */ 319 DEVMETHOD(bus_print_child, bus_generic_print_child), 320 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 321 322 { 0, 0 } 323 }; 324 325 static driver_t skc_driver = { 326 "skc", 327 skc_methods, 328 sizeof(struct sk_softc) 329 }; 330 331 static devclass_t skc_devclass; 332 333 static device_method_t sk_methods[] = { 334 /* Device interface */ 335 DEVMETHOD(device_probe, sk_probe), 336 DEVMETHOD(device_attach, sk_attach), 337 DEVMETHOD(device_detach, sk_detach), 338 DEVMETHOD(device_shutdown, bus_generic_shutdown), 339 340 /* bus interface */ 341 DEVMETHOD(bus_print_child, bus_generic_print_child), 342 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 343 344 /* MII interface */ 345 DEVMETHOD(miibus_readreg, sk_miibus_readreg), 346 DEVMETHOD(miibus_writereg, sk_miibus_writereg), 347 DEVMETHOD(miibus_statchg, sk_miibus_statchg), 348 349 { 0, 0 } 350 }; 351 352 static driver_t sk_driver = { 353 "sk", 354 sk_methods, 355 sizeof(struct sk_if_softc) 356 }; 357 358 static devclass_t sk_devclass; 359 360 DRIVER_MODULE(skc, pci, skc_driver, skc_devclass, 0, 0); 361 DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0); 362 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0); 363 364 #define SK_SETBIT(sc, reg, x) \ 365 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x) 366 367 #define SK_CLRBIT(sc, reg, x) \ 368 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x) 369 370 #define SK_WIN_SETBIT_4(sc, reg, x) \ 371 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x) 372 373 #define SK_WIN_CLRBIT_4(sc, reg, x) \ 374 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x) 375 376 #define SK_WIN_SETBIT_2(sc, reg, x) \ 377 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x) 378 379 #define SK_WIN_CLRBIT_2(sc, reg, x) \ 380 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x) 381 382 static u_int32_t 383 sk_win_read_4(sc, reg) 384 struct sk_softc *sc; 385 int reg; 386 { 387 #ifdef SK_USEIOSPACE 388 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 389 return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg))); 390 #else 391 return(CSR_READ_4(sc, reg)); 392 #endif 393 } 394 395 static u_int16_t 396 sk_win_read_2(sc, reg) 397 struct sk_softc *sc; 398 int reg; 399 { 400 #ifdef SK_USEIOSPACE 401 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 402 return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg))); 403 #else 404 return(CSR_READ_2(sc, reg)); 405 #endif 406 } 407 408 static u_int8_t 409 sk_win_read_1(sc, reg) 410 struct sk_softc *sc; 411 int reg; 412 { 413 #ifdef SK_USEIOSPACE 414 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 415 return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg))); 416 #else 417 return(CSR_READ_1(sc, reg)); 418 #endif 419 } 420 421 static void 422 sk_win_write_4(sc, reg, val) 423 struct sk_softc *sc; 424 int reg; 425 u_int32_t val; 426 { 427 #ifdef SK_USEIOSPACE 428 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 429 CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val); 430 #else 431 CSR_WRITE_4(sc, reg, val); 432 #endif 433 return; 434 } 435 436 static void 437 sk_win_write_2(sc, reg, val) 438 struct sk_softc *sc; 439 int reg; 440 u_int32_t val; 441 { 442 #ifdef SK_USEIOSPACE 443 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 444 CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val); 445 #else 446 CSR_WRITE_2(sc, reg, val); 447 #endif 448 return; 449 } 450 451 static void 452 sk_win_write_1(sc, reg, val) 453 struct sk_softc *sc; 454 int reg; 455 u_int32_t val; 456 { 457 #ifdef SK_USEIOSPACE 458 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 459 CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val); 460 #else 461 CSR_WRITE_1(sc, reg, val); 462 #endif 463 return; 464 } 465 466 /* 467 * The VPD EEPROM contains Vital Product Data, as suggested in 468 * the PCI 2.1 specification. The VPD data is separared into areas 469 * denoted by resource IDs. The SysKonnect VPD contains an ID string 470 * resource (the name of the adapter), a read-only area resource 471 * containing various key/data fields and a read/write area which 472 * can be used to store asset management information or log messages. 473 * We read the ID string and read-only into buffers attached to 474 * the controller softc structure for later use. At the moment, 475 * we only use the ID string during skc_attach(). 476 */ 477 static u_int8_t 478 sk_vpd_readbyte(sc, addr) 479 struct sk_softc *sc; 480 int addr; 481 { 482 int i; 483 484 sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr); 485 for (i = 0; i < SK_TIMEOUT; i++) { 486 /* ASUS LOM takes a very long time to read VPD. */ 487 DELAY(100); 488 if (sk_win_read_2(sc, 489 SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG) 490 break; 491 } 492 493 if (i == SK_TIMEOUT) 494 return(0); 495 496 return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA))); 497 } 498 499 static void 500 sk_vpd_read_res(sc, res, addr) 501 struct sk_softc *sc; 502 struct vpd_res *res; 503 int addr; 504 { 505 int i; 506 u_int8_t *ptr; 507 508 ptr = (u_int8_t *)res; 509 for (i = 0; i < sizeof(struct vpd_res); i++) 510 ptr[i] = sk_vpd_readbyte(sc, i + addr); 511 512 return; 513 } 514 515 static void 516 sk_vpd_read(sc) 517 struct sk_softc *sc; 518 { 519 int pos = 0, i; 520 struct vpd_res res; 521 522 /* Check VPD capability */ 523 if (sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_CAPID)) != PCIY_VPD) 524 return; 525 if (sc->sk_vpd_prodname != NULL) 526 free(sc->sk_vpd_prodname, M_DEVBUF); 527 if (sc->sk_vpd_readonly != NULL) 528 free(sc->sk_vpd_readonly, M_DEVBUF); 529 sc->sk_vpd_prodname = NULL; 530 sc->sk_vpd_readonly = NULL; 531 sc->sk_vpd_readonly_len = 0; 532 533 sk_vpd_read_res(sc, &res, pos); 534 535 /* 536 * Bail out quietly if the eeprom appears to be missing or empty. 537 */ 538 if (res.vr_id == 0xff && res.vr_len == 0xff && res.vr_pad == 0xff) 539 return; 540 541 if (res.vr_id != VPD_RES_ID) { 542 device_printf(sc->sk_dev, "bad VPD resource id: expected %x " 543 "got %x\n", VPD_RES_ID, res.vr_id); 544 return; 545 } 546 547 pos += sizeof(res); 548 sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 549 if (sc->sk_vpd_prodname != NULL) { 550 for (i = 0; i < res.vr_len; i++) 551 sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos); 552 sc->sk_vpd_prodname[i] = '\0'; 553 } 554 pos += res.vr_len; 555 556 sk_vpd_read_res(sc, &res, pos); 557 558 if (res.vr_id != VPD_RES_READ) { 559 device_printf(sc->sk_dev, "bad VPD resource id: expected %x " 560 "got %x\n", VPD_RES_READ, res.vr_id); 561 return; 562 } 563 564 pos += sizeof(res); 565 sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 566 for (i = 0; i < res.vr_len; i++) 567 sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos); 568 sc->sk_vpd_readonly_len = res.vr_len; 569 570 return; 571 } 572 573 static int 574 sk_miibus_readreg(dev, phy, reg) 575 device_t dev; 576 int phy, reg; 577 { 578 struct sk_if_softc *sc_if; 579 int v; 580 581 sc_if = device_get_softc(dev); 582 583 SK_IF_MII_LOCK(sc_if); 584 switch(sc_if->sk_softc->sk_type) { 585 case SK_GENESIS: 586 v = sk_xmac_miibus_readreg(sc_if, phy, reg); 587 break; 588 case SK_YUKON: 589 case SK_YUKON_LITE: 590 case SK_YUKON_LP: 591 case SK_YUKON_EC: 592 v = sk_marv_miibus_readreg(sc_if, phy, reg); 593 break; 594 default: 595 v = 0; 596 break; 597 } 598 SK_IF_MII_UNLOCK(sc_if); 599 600 return (v); 601 } 602 603 static int 604 sk_miibus_writereg(dev, phy, reg, val) 605 device_t dev; 606 int phy, reg, val; 607 { 608 struct sk_if_softc *sc_if; 609 int v; 610 611 sc_if = device_get_softc(dev); 612 613 SK_IF_MII_LOCK(sc_if); 614 switch(sc_if->sk_softc->sk_type) { 615 case SK_GENESIS: 616 v = sk_xmac_miibus_writereg(sc_if, phy, reg, val); 617 break; 618 case SK_YUKON: 619 case SK_YUKON_LITE: 620 case SK_YUKON_LP: 621 case SK_YUKON_EC: 622 v = sk_marv_miibus_writereg(sc_if, phy, reg, val); 623 break; 624 default: 625 v = 0; 626 break; 627 } 628 SK_IF_MII_UNLOCK(sc_if); 629 630 return (v); 631 } 632 633 static void 634 sk_miibus_statchg(dev) 635 device_t dev; 636 { 637 struct sk_if_softc *sc_if; 638 639 sc_if = device_get_softc(dev); 640 641 SK_IF_MII_LOCK(sc_if); 642 switch(sc_if->sk_softc->sk_type) { 643 case SK_GENESIS: 644 sk_xmac_miibus_statchg(sc_if); 645 break; 646 case SK_YUKON: 647 case SK_YUKON_LITE: 648 case SK_YUKON_LP: 649 case SK_YUKON_EC: 650 sk_marv_miibus_statchg(sc_if); 651 break; 652 } 653 SK_IF_MII_UNLOCK(sc_if); 654 655 return; 656 } 657 658 static int 659 sk_xmac_miibus_readreg(sc_if, phy, reg) 660 struct sk_if_softc *sc_if; 661 int phy, reg; 662 { 663 int i; 664 665 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0) 666 return(0); 667 668 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); 669 SK_XM_READ_2(sc_if, XM_PHY_DATA); 670 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { 671 for (i = 0; i < SK_TIMEOUT; i++) { 672 DELAY(1); 673 if (SK_XM_READ_2(sc_if, XM_MMUCMD) & 674 XM_MMUCMD_PHYDATARDY) 675 break; 676 } 677 678 if (i == SK_TIMEOUT) { 679 if_printf(sc_if->sk_ifp, "phy failed to come ready\n"); 680 return(0); 681 } 682 } 683 DELAY(1); 684 i = SK_XM_READ_2(sc_if, XM_PHY_DATA); 685 686 return(i); 687 } 688 689 static int 690 sk_xmac_miibus_writereg(sc_if, phy, reg, val) 691 struct sk_if_softc *sc_if; 692 int phy, reg, val; 693 { 694 int i; 695 696 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); 697 for (i = 0; i < SK_TIMEOUT; i++) { 698 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) 699 break; 700 } 701 702 if (i == SK_TIMEOUT) { 703 if_printf(sc_if->sk_ifp, "phy failed to come ready\n"); 704 return (ETIMEDOUT); 705 } 706 707 SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val); 708 for (i = 0; i < SK_TIMEOUT; i++) { 709 DELAY(1); 710 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) 711 break; 712 } 713 if (i == SK_TIMEOUT) 714 if_printf(sc_if->sk_ifp, "phy write timed out\n"); 715 716 return(0); 717 } 718 719 static void 720 sk_xmac_miibus_statchg(sc_if) 721 struct sk_if_softc *sc_if; 722 { 723 struct mii_data *mii; 724 725 mii = device_get_softc(sc_if->sk_miibus); 726 727 /* 728 * If this is a GMII PHY, manually set the XMAC's 729 * duplex mode accordingly. 730 */ 731 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { 732 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 733 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); 734 } else { 735 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); 736 } 737 } 738 } 739 740 static int 741 sk_marv_miibus_readreg(sc_if, phy, reg) 742 struct sk_if_softc *sc_if; 743 int phy, reg; 744 { 745 u_int16_t val; 746 int i; 747 748 if (phy != 0 || 749 (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER && 750 sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) { 751 return(0); 752 } 753 754 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 755 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ); 756 757 for (i = 0; i < SK_TIMEOUT; i++) { 758 DELAY(1); 759 val = SK_YU_READ_2(sc_if, YUKON_SMICR); 760 if (val & YU_SMICR_READ_VALID) 761 break; 762 } 763 764 if (i == SK_TIMEOUT) { 765 if_printf(sc_if->sk_ifp, "phy failed to come ready\n"); 766 return(0); 767 } 768 769 val = SK_YU_READ_2(sc_if, YUKON_SMIDR); 770 771 return(val); 772 } 773 774 static int 775 sk_marv_miibus_writereg(sc_if, phy, reg, val) 776 struct sk_if_softc *sc_if; 777 int phy, reg, val; 778 { 779 int i; 780 781 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val); 782 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 783 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE); 784 785 for (i = 0; i < SK_TIMEOUT; i++) { 786 DELAY(1); 787 if (SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY) 788 break; 789 } 790 if (i == SK_TIMEOUT) { 791 if_printf(sc_if->sk_ifp, "phy write timeout\n"); 792 return (0); 793 } 794 795 return(0); 796 } 797 798 static void 799 sk_marv_miibus_statchg(sc_if) 800 struct sk_if_softc *sc_if; 801 { 802 return; 803 } 804 805 #define HASH_BITS 6 806 807 static u_int32_t 808 sk_xmchash(addr) 809 const uint8_t *addr; 810 { 811 uint32_t crc; 812 813 /* Compute CRC for the address value. */ 814 crc = ether_crc32_le(addr, ETHER_ADDR_LEN); 815 816 return (~crc & ((1 << HASH_BITS) - 1)); 817 } 818 819 /* gmchash is just a big endian crc */ 820 static u_int32_t 821 sk_gmchash(addr) 822 const uint8_t *addr; 823 { 824 uint32_t crc; 825 826 /* Compute CRC for the address value. */ 827 crc = ether_crc32_be(addr, ETHER_ADDR_LEN); 828 829 return (crc & ((1 << HASH_BITS) - 1)); 830 } 831 832 static void 833 sk_setfilt(sc_if, addr, slot) 834 struct sk_if_softc *sc_if; 835 u_int16_t *addr; 836 int slot; 837 { 838 int base; 839 840 base = XM_RXFILT_ENTRY(slot); 841 842 SK_XM_WRITE_2(sc_if, base, addr[0]); 843 SK_XM_WRITE_2(sc_if, base + 2, addr[1]); 844 SK_XM_WRITE_2(sc_if, base + 4, addr[2]); 845 846 return; 847 } 848 849 static void 850 sk_setmulti(sc_if) 851 struct sk_if_softc *sc_if; 852 { 853 struct sk_softc *sc = sc_if->sk_softc; 854 struct ifnet *ifp = sc_if->sk_ifp; 855 u_int32_t hashes[2] = { 0, 0 }; 856 int h = 0, i; 857 struct ifmultiaddr *ifma; 858 u_int16_t dummy[] = { 0, 0, 0 }; 859 u_int16_t maddr[(ETHER_ADDR_LEN+1)/2]; 860 861 SK_IF_LOCK_ASSERT(sc_if); 862 863 /* First, zot all the existing filters. */ 864 switch(sc->sk_type) { 865 case SK_GENESIS: 866 for (i = 1; i < XM_RXFILT_MAX; i++) 867 sk_setfilt(sc_if, dummy, i); 868 869 SK_XM_WRITE_4(sc_if, XM_MAR0, 0); 870 SK_XM_WRITE_4(sc_if, XM_MAR2, 0); 871 break; 872 case SK_YUKON: 873 case SK_YUKON_LITE: 874 case SK_YUKON_LP: 875 case SK_YUKON_EC: 876 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0); 877 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0); 878 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0); 879 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0); 880 break; 881 } 882 883 /* Now program new ones. */ 884 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 885 hashes[0] = 0xFFFFFFFF; 886 hashes[1] = 0xFFFFFFFF; 887 } else { 888 i = 1; 889 IF_ADDR_LOCK(ifp); 890 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) { 891 if (ifma->ifma_addr->sa_family != AF_LINK) 892 continue; 893 /* 894 * Program the first XM_RXFILT_MAX multicast groups 895 * into the perfect filter. For all others, 896 * use the hash table. 897 */ 898 if (sc->sk_type == SK_GENESIS && i < XM_RXFILT_MAX) { 899 bcopy(LLADDR( 900 (struct sockaddr_dl *)ifma->ifma_addr), 901 maddr, ETHER_ADDR_LEN); 902 sk_setfilt(sc_if, maddr, i); 903 i++; 904 continue; 905 } 906 907 switch(sc->sk_type) { 908 case SK_GENESIS: 909 bcopy(LLADDR( 910 (struct sockaddr_dl *)ifma->ifma_addr), 911 maddr, ETHER_ADDR_LEN); 912 h = sk_xmchash((const uint8_t *)maddr); 913 break; 914 case SK_YUKON: 915 case SK_YUKON_LITE: 916 case SK_YUKON_LP: 917 case SK_YUKON_EC: 918 bcopy(LLADDR( 919 (struct sockaddr_dl *)ifma->ifma_addr), 920 maddr, ETHER_ADDR_LEN); 921 h = sk_gmchash((const uint8_t *)maddr); 922 break; 923 } 924 if (h < 32) 925 hashes[0] |= (1 << h); 926 else 927 hashes[1] |= (1 << (h - 32)); 928 } 929 IF_ADDR_UNLOCK(ifp); 930 } 931 932 switch(sc->sk_type) { 933 case SK_GENESIS: 934 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH| 935 XM_MODE_RX_USE_PERFECT); 936 SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]); 937 SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]); 938 break; 939 case SK_YUKON: 940 case SK_YUKON_LITE: 941 case SK_YUKON_LP: 942 case SK_YUKON_EC: 943 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff); 944 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff); 945 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff); 946 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff); 947 break; 948 } 949 950 return; 951 } 952 953 static void 954 sk_setpromisc(sc_if) 955 struct sk_if_softc *sc_if; 956 { 957 struct sk_softc *sc = sc_if->sk_softc; 958 struct ifnet *ifp = sc_if->sk_ifp; 959 960 SK_IF_LOCK_ASSERT(sc_if); 961 962 switch(sc->sk_type) { 963 case SK_GENESIS: 964 if (ifp->if_flags & IFF_PROMISC) { 965 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); 966 } else { 967 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); 968 } 969 break; 970 case SK_YUKON: 971 case SK_YUKON_LITE: 972 case SK_YUKON_LP: 973 case SK_YUKON_EC: 974 if (ifp->if_flags & IFF_PROMISC) { 975 SK_YU_CLRBIT_2(sc_if, YUKON_RCR, 976 YU_RCR_UFLEN | YU_RCR_MUFLEN); 977 } else { 978 SK_YU_SETBIT_2(sc_if, YUKON_RCR, 979 YU_RCR_UFLEN | YU_RCR_MUFLEN); 980 } 981 break; 982 } 983 984 return; 985 } 986 987 static int 988 sk_init_rx_ring(sc_if) 989 struct sk_if_softc *sc_if; 990 { 991 struct sk_ring_data *rd; 992 bus_addr_t addr; 993 u_int32_t csum_start; 994 int i; 995 996 sc_if->sk_cdata.sk_rx_cons = 0; 997 998 csum_start = (ETHER_HDR_LEN + sizeof(struct ip)) << 16 | 999 ETHER_HDR_LEN; 1000 rd = &sc_if->sk_rdata; 1001 bzero(rd->sk_rx_ring, sizeof(struct sk_rx_desc) * SK_RX_RING_CNT); 1002 for (i = 0; i < SK_RX_RING_CNT; i++) { 1003 if (sk_newbuf(sc_if, i) != 0) 1004 return (ENOBUFS); 1005 if (i == (SK_RX_RING_CNT - 1)) 1006 addr = SK_RX_RING_ADDR(sc_if, 0); 1007 else 1008 addr = SK_RX_RING_ADDR(sc_if, i + 1); 1009 rd->sk_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr)); 1010 rd->sk_rx_ring[i].sk_csum_start = htole32(csum_start); 1011 } 1012 1013 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag, 1014 sc_if->sk_cdata.sk_rx_ring_map, 1015 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1016 1017 return(0); 1018 } 1019 1020 static int 1021 sk_init_jumbo_rx_ring(sc_if) 1022 struct sk_if_softc *sc_if; 1023 { 1024 struct sk_ring_data *rd; 1025 bus_addr_t addr; 1026 u_int32_t csum_start; 1027 int i; 1028 1029 sc_if->sk_cdata.sk_jumbo_rx_cons = 0; 1030 1031 csum_start = ((ETHER_HDR_LEN + sizeof(struct ip)) << 16) | 1032 ETHER_HDR_LEN; 1033 rd = &sc_if->sk_rdata; 1034 bzero(rd->sk_jumbo_rx_ring, 1035 sizeof(struct sk_rx_desc) * SK_JUMBO_RX_RING_CNT); 1036 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) { 1037 if (sk_jumbo_newbuf(sc_if, i) != 0) 1038 return (ENOBUFS); 1039 if (i == (SK_JUMBO_RX_RING_CNT - 1)) 1040 addr = SK_JUMBO_RX_RING_ADDR(sc_if, 0); 1041 else 1042 addr = SK_JUMBO_RX_RING_ADDR(sc_if, i + 1); 1043 rd->sk_jumbo_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr)); 1044 rd->sk_jumbo_rx_ring[i].sk_csum_start = htole32(csum_start); 1045 } 1046 1047 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, 1048 sc_if->sk_cdata.sk_jumbo_rx_ring_map, 1049 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1050 1051 return (0); 1052 } 1053 1054 static void 1055 sk_init_tx_ring(sc_if) 1056 struct sk_if_softc *sc_if; 1057 { 1058 struct sk_ring_data *rd; 1059 struct sk_txdesc *txd; 1060 bus_addr_t addr; 1061 int i; 1062 1063 STAILQ_INIT(&sc_if->sk_cdata.sk_txfreeq); 1064 STAILQ_INIT(&sc_if->sk_cdata.sk_txbusyq); 1065 1066 sc_if->sk_cdata.sk_tx_prod = 0; 1067 sc_if->sk_cdata.sk_tx_cons = 0; 1068 sc_if->sk_cdata.sk_tx_cnt = 0; 1069 1070 rd = &sc_if->sk_rdata; 1071 bzero(rd->sk_tx_ring, sizeof(struct sk_tx_desc) * SK_TX_RING_CNT); 1072 for (i = 0; i < SK_TX_RING_CNT; i++) { 1073 if (i == (SK_TX_RING_CNT - 1)) 1074 addr = SK_TX_RING_ADDR(sc_if, 0); 1075 else 1076 addr = SK_TX_RING_ADDR(sc_if, i + 1); 1077 rd->sk_tx_ring[i].sk_next = htole32(SK_ADDR_LO(addr)); 1078 txd = &sc_if->sk_cdata.sk_txdesc[i]; 1079 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q); 1080 } 1081 1082 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag, 1083 sc_if->sk_cdata.sk_tx_ring_map, 1084 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1085 } 1086 1087 static __inline void 1088 sk_discard_rxbuf(sc_if, idx) 1089 struct sk_if_softc *sc_if; 1090 int idx; 1091 { 1092 struct sk_rx_desc *r; 1093 struct sk_rxdesc *rxd; 1094 struct mbuf *m; 1095 1096 1097 r = &sc_if->sk_rdata.sk_rx_ring[idx]; 1098 rxd = &sc_if->sk_cdata.sk_rxdesc[idx]; 1099 m = rxd->rx_m; 1100 r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM); 1101 } 1102 1103 static __inline void 1104 sk_discard_jumbo_rxbuf(sc_if, idx) 1105 struct sk_if_softc *sc_if; 1106 int idx; 1107 { 1108 struct sk_rx_desc *r; 1109 struct sk_rxdesc *rxd; 1110 struct mbuf *m; 1111 1112 r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx]; 1113 rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx]; 1114 m = rxd->rx_m; 1115 r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM); 1116 } 1117 1118 static int 1119 sk_newbuf(sc_if, idx) 1120 struct sk_if_softc *sc_if; 1121 int idx; 1122 { 1123 struct sk_rx_desc *r; 1124 struct sk_rxdesc *rxd; 1125 struct mbuf *m; 1126 bus_dma_segment_t segs[1]; 1127 bus_dmamap_t map; 1128 int nsegs; 1129 1130 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1131 if (m == NULL) 1132 return (ENOBUFS); 1133 m->m_len = m->m_pkthdr.len = MCLBYTES; 1134 m_adj(m, ETHER_ALIGN); 1135 1136 if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_rx_tag, 1137 sc_if->sk_cdata.sk_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1138 m_freem(m); 1139 return (ENOBUFS); 1140 } 1141 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1142 1143 rxd = &sc_if->sk_cdata.sk_rxdesc[idx]; 1144 if (rxd->rx_m != NULL) { 1145 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap, 1146 BUS_DMASYNC_POSTREAD); 1147 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap); 1148 } 1149 map = rxd->rx_dmamap; 1150 rxd->rx_dmamap = sc_if->sk_cdata.sk_rx_sparemap; 1151 sc_if->sk_cdata.sk_rx_sparemap = map; 1152 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap, 1153 BUS_DMASYNC_PREREAD); 1154 rxd->rx_m = m; 1155 r = &sc_if->sk_rdata.sk_rx_ring[idx]; 1156 r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr)); 1157 r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr)); 1158 r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM); 1159 1160 return (0); 1161 } 1162 1163 static int 1164 sk_jumbo_newbuf(sc_if, idx) 1165 struct sk_if_softc *sc_if; 1166 int idx; 1167 { 1168 struct sk_rx_desc *r; 1169 struct sk_rxdesc *rxd; 1170 struct mbuf *m; 1171 bus_dma_segment_t segs[1]; 1172 bus_dmamap_t map; 1173 int nsegs; 1174 void *buf; 1175 1176 MGETHDR(m, M_DONTWAIT, MT_DATA); 1177 if (m == NULL) 1178 return (ENOBUFS); 1179 buf = sk_jalloc(sc_if); 1180 if (buf == NULL) { 1181 m_freem(m); 1182 return (ENOBUFS); 1183 } 1184 /* Attach the buffer to the mbuf */ 1185 MEXTADD(m, buf, SK_JLEN, sk_jfree, (struct sk_if_softc *)sc_if, 0, 1186 EXT_NET_DRV); 1187 if ((m->m_flags & M_EXT) == 0) { 1188 m_freem(m); 1189 return (ENOBUFS); 1190 } 1191 m->m_pkthdr.len = m->m_len = SK_JLEN; 1192 /* 1193 * Adjust alignment so packet payload begins on a 1194 * longword boundary. Mandatory for Alpha, useful on 1195 * x86 too. 1196 */ 1197 m_adj(m, ETHER_ALIGN); 1198 1199 if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_jumbo_rx_tag, 1200 sc_if->sk_cdata.sk_jumbo_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1201 m_freem(m); 1202 return (ENOBUFS); 1203 } 1204 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1205 1206 rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx]; 1207 if (rxd->rx_m != NULL) { 1208 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap, 1209 BUS_DMASYNC_POSTREAD); 1210 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag, 1211 rxd->rx_dmamap); 1212 } 1213 map = rxd->rx_dmamap; 1214 rxd->rx_dmamap = sc_if->sk_cdata.sk_jumbo_rx_sparemap; 1215 sc_if->sk_cdata.sk_jumbo_rx_sparemap = map; 1216 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap, 1217 BUS_DMASYNC_PREREAD); 1218 rxd->rx_m = m; 1219 r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx]; 1220 r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr)); 1221 r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr)); 1222 r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM); 1223 1224 return (0); 1225 } 1226 1227 /* 1228 * Set media options. 1229 */ 1230 static int 1231 sk_ifmedia_upd(ifp) 1232 struct ifnet *ifp; 1233 { 1234 struct sk_if_softc *sc_if = ifp->if_softc; 1235 struct mii_data *mii; 1236 1237 mii = device_get_softc(sc_if->sk_miibus); 1238 sk_init(sc_if); 1239 mii_mediachg(mii); 1240 1241 return(0); 1242 } 1243 1244 /* 1245 * Report current media status. 1246 */ 1247 static void 1248 sk_ifmedia_sts(ifp, ifmr) 1249 struct ifnet *ifp; 1250 struct ifmediareq *ifmr; 1251 { 1252 struct sk_if_softc *sc_if; 1253 struct mii_data *mii; 1254 1255 sc_if = ifp->if_softc; 1256 mii = device_get_softc(sc_if->sk_miibus); 1257 1258 mii_pollstat(mii); 1259 ifmr->ifm_active = mii->mii_media_active; 1260 ifmr->ifm_status = mii->mii_media_status; 1261 1262 return; 1263 } 1264 1265 static int 1266 sk_ioctl(ifp, command, data) 1267 struct ifnet *ifp; 1268 u_long command; 1269 caddr_t data; 1270 { 1271 struct sk_if_softc *sc_if = ifp->if_softc; 1272 struct ifreq *ifr = (struct ifreq *) data; 1273 int error, mask; 1274 struct mii_data *mii; 1275 1276 error = 0; 1277 switch(command) { 1278 case SIOCSIFMTU: 1279 SK_IF_LOCK(sc_if); 1280 if (ifr->ifr_mtu > SK_JUMBO_MTU) 1281 error = EINVAL; 1282 else { 1283 ifp->if_mtu = ifr->ifr_mtu; 1284 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1285 sk_init_locked(sc_if); 1286 } 1287 SK_IF_UNLOCK(sc_if); 1288 break; 1289 case SIOCSIFFLAGS: 1290 SK_IF_LOCK(sc_if); 1291 if (ifp->if_flags & IFF_UP) { 1292 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1293 if ((ifp->if_flags ^ sc_if->sk_if_flags) 1294 & IFF_PROMISC) { 1295 sk_setpromisc(sc_if); 1296 sk_setmulti(sc_if); 1297 } 1298 } else 1299 sk_init_locked(sc_if); 1300 } else { 1301 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1302 sk_stop(sc_if); 1303 } 1304 sc_if->sk_if_flags = ifp->if_flags; 1305 SK_IF_UNLOCK(sc_if); 1306 break; 1307 case SIOCADDMULTI: 1308 case SIOCDELMULTI: 1309 SK_IF_LOCK(sc_if); 1310 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1311 sk_setmulti(sc_if); 1312 SK_IF_UNLOCK(sc_if); 1313 break; 1314 case SIOCGIFMEDIA: 1315 case SIOCSIFMEDIA: 1316 mii = device_get_softc(sc_if->sk_miibus); 1317 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1318 break; 1319 case SIOCSIFCAP: 1320 SK_IF_LOCK(sc_if); 1321 if (sc_if->sk_softc->sk_type == SK_GENESIS) { 1322 SK_IF_UNLOCK(sc_if); 1323 break; 1324 } 1325 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1326 if (mask & IFCAP_HWCSUM) { 1327 ifp->if_capenable ^= IFCAP_HWCSUM; 1328 if (IFCAP_HWCSUM & ifp->if_capenable && 1329 IFCAP_HWCSUM & ifp->if_capabilities) 1330 ifp->if_hwassist = SK_CSUM_FEATURES; 1331 else 1332 ifp->if_hwassist = 0; 1333 } 1334 SK_IF_UNLOCK(sc_if); 1335 break; 1336 default: 1337 error = ether_ioctl(ifp, command, data); 1338 break; 1339 } 1340 1341 return (error); 1342 } 1343 1344 /* 1345 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device 1346 * IDs against our list and return a device name if we find a match. 1347 */ 1348 static int 1349 skc_probe(dev) 1350 device_t dev; 1351 { 1352 struct sk_type *t = sk_devs; 1353 1354 while(t->sk_name != NULL) { 1355 if ((pci_get_vendor(dev) == t->sk_vid) && 1356 (pci_get_device(dev) == t->sk_did)) { 1357 /* 1358 * Only attach to rev. 2 of the Linksys EG1032 adapter. 1359 * Rev. 3 is supported by re(4). 1360 */ 1361 if ((t->sk_vid == VENDORID_LINKSYS) && 1362 (t->sk_did == DEVICEID_LINKSYS_EG1032) && 1363 (pci_get_subdevice(dev) != 1364 SUBDEVICEID_LINKSYS_EG1032_REV2)) { 1365 t++; 1366 continue; 1367 } 1368 device_set_desc(dev, t->sk_name); 1369 return (BUS_PROBE_DEFAULT); 1370 } 1371 t++; 1372 } 1373 1374 return(ENXIO); 1375 } 1376 1377 /* 1378 * Force the GEnesis into reset, then bring it out of reset. 1379 */ 1380 static void 1381 sk_reset(sc) 1382 struct sk_softc *sc; 1383 { 1384 1385 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET); 1386 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET); 1387 if (SK_YUKON_FAMILY(sc->sk_type)) 1388 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET); 1389 1390 DELAY(1000); 1391 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET); 1392 DELAY(2); 1393 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET); 1394 if (SK_YUKON_FAMILY(sc->sk_type)) 1395 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR); 1396 1397 if (sc->sk_type == SK_GENESIS) { 1398 /* Configure packet arbiter */ 1399 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET); 1400 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT); 1401 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT); 1402 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT); 1403 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT); 1404 } 1405 1406 /* Enable RAM interface */ 1407 sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET); 1408 1409 /* 1410 * Configure interrupt moderation. The moderation timer 1411 * defers interrupts specified in the interrupt moderation 1412 * timer mask based on the timeout specified in the interrupt 1413 * moderation timer init register. Each bit in the timer 1414 * register represents one tick, so to specify a timeout in 1415 * microseconds, we have to multiply by the correct number of 1416 * ticks-per-microsecond. 1417 */ 1418 switch (sc->sk_type) { 1419 case SK_GENESIS: 1420 sc->sk_int_ticks = SK_IMTIMER_TICKS_GENESIS; 1421 break; 1422 case SK_YUKON_EC: 1423 sc->sk_int_ticks = SK_IMTIMER_TICKS_YUKON_EC; 1424 break; 1425 default: 1426 sc->sk_int_ticks = SK_IMTIMER_TICKS_YUKON; 1427 break; 1428 } 1429 if (bootverbose) 1430 device_printf(sc->sk_dev, "interrupt moderation is %d us\n", 1431 sc->sk_int_mod); 1432 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod, 1433 sc->sk_int_ticks)); 1434 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF| 1435 SK_ISR_RX1_EOF|SK_ISR_RX2_EOF); 1436 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START); 1437 1438 return; 1439 } 1440 1441 static int 1442 sk_probe(dev) 1443 device_t dev; 1444 { 1445 struct sk_softc *sc; 1446 1447 sc = device_get_softc(device_get_parent(dev)); 1448 1449 /* 1450 * Not much to do here. We always know there will be 1451 * at least one XMAC present, and if there are two, 1452 * skc_attach() will create a second device instance 1453 * for us. 1454 */ 1455 switch (sc->sk_type) { 1456 case SK_GENESIS: 1457 device_set_desc(dev, "XaQti Corp. XMAC II"); 1458 break; 1459 case SK_YUKON: 1460 case SK_YUKON_LITE: 1461 case SK_YUKON_LP: 1462 case SK_YUKON_EC: 1463 device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon"); 1464 break; 1465 } 1466 1467 return (BUS_PROBE_DEFAULT); 1468 } 1469 1470 /* 1471 * Each XMAC chip is attached as a separate logical IP interface. 1472 * Single port cards will have only one logical interface of course. 1473 */ 1474 static int 1475 sk_attach(dev) 1476 device_t dev; 1477 { 1478 struct sk_softc *sc; 1479 struct sk_if_softc *sc_if; 1480 struct ifnet *ifp; 1481 int i, port, error; 1482 u_char eaddr[6]; 1483 1484 if (dev == NULL) 1485 return(EINVAL); 1486 1487 error = 0; 1488 sc_if = device_get_softc(dev); 1489 sc = device_get_softc(device_get_parent(dev)); 1490 port = *(int *)device_get_ivars(dev); 1491 1492 sc_if->sk_if_dev = dev; 1493 sc_if->sk_port = port; 1494 sc_if->sk_softc = sc; 1495 sc->sk_if[port] = sc_if; 1496 if (port == SK_PORT_A) 1497 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0; 1498 if (port == SK_PORT_B) 1499 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1; 1500 1501 callout_init_mtx(&sc_if->sk_tick_ch, &sc_if->sk_softc->sk_mtx, 0); 1502 1503 if (sk_dma_alloc(sc_if) != 0) { 1504 error = ENOMEM; 1505 goto fail; 1506 } 1507 1508 ifp = sc_if->sk_ifp = if_alloc(IFT_ETHER); 1509 if (ifp == NULL) { 1510 device_printf(sc_if->sk_if_dev, "can not if_alloc()\n"); 1511 error = ENOSPC; 1512 goto fail; 1513 } 1514 ifp->if_softc = sc_if; 1515 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1516 ifp->if_mtu = ETHERMTU; 1517 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1518 /* 1519 * SK_GENESIS has a bug in checksum offload - From linux. 1520 */ 1521 if (sc_if->sk_softc->sk_type != SK_GENESIS) { 1522 ifp->if_capabilities = IFCAP_HWCSUM; 1523 ifp->if_hwassist = SK_CSUM_FEATURES; 1524 } else { 1525 ifp->if_capabilities = 0; 1526 ifp->if_hwassist = 0; 1527 } 1528 ifp->if_capenable = ifp->if_capabilities; 1529 ifp->if_ioctl = sk_ioctl; 1530 ifp->if_start = sk_start; 1531 ifp->if_watchdog = sk_watchdog; 1532 ifp->if_init = sk_init; 1533 IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1); 1534 ifp->if_snd.ifq_drv_maxlen = SK_TX_RING_CNT - 1; 1535 IFQ_SET_READY(&ifp->if_snd); 1536 1537 /* 1538 * Get station address for this interface. Note that 1539 * dual port cards actually come with three station 1540 * addresses: one for each port, plus an extra. The 1541 * extra one is used by the SysKonnect driver software 1542 * as a 'virtual' station address for when both ports 1543 * are operating in failover mode. Currently we don't 1544 * use this extra address. 1545 */ 1546 SK_IF_LOCK(sc_if); 1547 for (i = 0; i < ETHER_ADDR_LEN; i++) 1548 eaddr[i] = 1549 sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i); 1550 1551 /* 1552 * Set up RAM buffer addresses. The NIC will have a certain 1553 * amount of SRAM on it, somewhere between 512K and 2MB. We 1554 * need to divide this up a) between the transmitter and 1555 * receiver and b) between the two XMACs, if this is a 1556 * dual port NIC. Our algotithm is to divide up the memory 1557 * evenly so that everyone gets a fair share. 1558 * 1559 * Just to be contrary, Yukon2 appears to have separate memory 1560 * for each MAC. 1561 */ 1562 if (SK_IS_YUKON2(sc) || 1563 sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) { 1564 u_int32_t chunk, val; 1565 1566 chunk = sc->sk_ramsize / 2; 1567 val = sc->sk_rboff / sizeof(u_int64_t); 1568 sc_if->sk_rx_ramstart = val; 1569 val += (chunk / sizeof(u_int64_t)); 1570 sc_if->sk_rx_ramend = val - 1; 1571 sc_if->sk_tx_ramstart = val; 1572 val += (chunk / sizeof(u_int64_t)); 1573 sc_if->sk_tx_ramend = val - 1; 1574 } else { 1575 u_int32_t chunk, val; 1576 1577 chunk = sc->sk_ramsize / 4; 1578 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) / 1579 sizeof(u_int64_t); 1580 sc_if->sk_rx_ramstart = val; 1581 val += (chunk / sizeof(u_int64_t)); 1582 sc_if->sk_rx_ramend = val - 1; 1583 sc_if->sk_tx_ramstart = val; 1584 val += (chunk / sizeof(u_int64_t)); 1585 sc_if->sk_tx_ramend = val - 1; 1586 } 1587 1588 /* Read and save PHY type and set PHY address */ 1589 sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF; 1590 if (!SK_YUKON_FAMILY(sc->sk_type)) { 1591 switch(sc_if->sk_phytype) { 1592 case SK_PHYTYPE_XMAC: 1593 sc_if->sk_phyaddr = SK_PHYADDR_XMAC; 1594 break; 1595 case SK_PHYTYPE_BCOM: 1596 sc_if->sk_phyaddr = SK_PHYADDR_BCOM; 1597 break; 1598 default: 1599 device_printf(sc->sk_dev, "unsupported PHY type: %d\n", 1600 sc_if->sk_phytype); 1601 error = ENODEV; 1602 SK_IF_UNLOCK(sc_if); 1603 goto fail; 1604 } 1605 } else { 1606 if (sc_if->sk_phytype < SK_PHYTYPE_MARV_COPPER && 1607 sc->sk_pmd == IFM_1000_T) { 1608 /* not initialized, punt */ 1609 sc_if->sk_phytype = SK_PHYTYPE_MARV_COPPER; 1610 } 1611 1612 sc_if->sk_phyaddr = SK_PHYADDR_MARV; 1613 1614 if (sc->sk_pmd != IFM_1000_T && sc->sk_pmd != IFM_1000_CX) 1615 sc_if->sk_phytype = SK_PHYTYPE_MARV_FIBER; 1616 } 1617 1618 /* 1619 * Call MI attach routine. Can't hold locks when calling into ether_*. 1620 */ 1621 SK_IF_UNLOCK(sc_if); 1622 ether_ifattach(ifp, eaddr); 1623 SK_IF_LOCK(sc_if); 1624 1625 /* 1626 * The hardware should be ready for VLAN_MTU by default: 1627 * XMAC II has 0x8100 in VLAN Tag Level 1 register initially; 1628 * YU_SMR_MFL_VLAN is set by this driver in Yukon. 1629 * 1630 */ 1631 ifp->if_capabilities |= IFCAP_VLAN_MTU; 1632 ifp->if_capenable |= IFCAP_VLAN_MTU; 1633 /* 1634 * Tell the upper layer(s) we support long frames. 1635 * Must appear after the call to ether_ifattach() because 1636 * ether_ifattach() sets ifi_hdrlen to the default value. 1637 */ 1638 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1639 1640 /* 1641 * Do miibus setup. 1642 */ 1643 switch (sc->sk_type) { 1644 case SK_GENESIS: 1645 sk_init_xmac(sc_if); 1646 break; 1647 case SK_YUKON: 1648 case SK_YUKON_LITE: 1649 case SK_YUKON_LP: 1650 case SK_YUKON_EC: 1651 sk_init_yukon(sc_if); 1652 break; 1653 } 1654 1655 SK_IF_UNLOCK(sc_if); 1656 if (mii_phy_probe(dev, &sc_if->sk_miibus, 1657 sk_ifmedia_upd, sk_ifmedia_sts)) { 1658 device_printf(sc_if->sk_if_dev, "no PHY found!\n"); 1659 ether_ifdetach(ifp); 1660 error = ENXIO; 1661 goto fail; 1662 } 1663 1664 fail: 1665 if (error) { 1666 /* Access should be ok even though lock has been dropped */ 1667 sc->sk_if[port] = NULL; 1668 sk_detach(dev); 1669 } 1670 1671 return(error); 1672 } 1673 1674 /* 1675 * Attach the interface. Allocate softc structures, do ifmedia 1676 * setup and ethernet/BPF attach. 1677 */ 1678 static int 1679 skc_attach(dev) 1680 device_t dev; 1681 { 1682 struct sk_softc *sc; 1683 int error = 0, rid, *port, sk_macs; 1684 uint8_t skrs; 1685 char *pname, *revstr; 1686 1687 sc = device_get_softc(dev); 1688 sc->sk_dev = dev; 1689 1690 mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1691 MTX_DEF); 1692 mtx_init(&sc->sk_mii_mtx, "sk_mii_mutex", NULL, MTX_DEF); 1693 /* 1694 * Map control/status registers. 1695 */ 1696 pci_enable_busmaster(dev); 1697 1698 rid = SK_RID; 1699 sc->sk_res = bus_alloc_resource_any(dev, SK_RES, &rid, RF_ACTIVE); 1700 1701 if (sc->sk_res == NULL) { 1702 device_printf(dev, "couldn't map ports/memory\n"); 1703 error = ENXIO; 1704 goto fail; 1705 } 1706 1707 sc->sk_btag = rman_get_bustag(sc->sk_res); 1708 sc->sk_bhandle = rman_get_bushandle(sc->sk_res); 1709 1710 sc->sk_type = sk_win_read_1(sc, SK_CHIPVER); 1711 sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4) & 0xf; 1712 1713 /* Bail out if chip is not recognized. */ 1714 if (sc->sk_type != SK_GENESIS && !SK_YUKON_FAMILY(sc->sk_type)) { 1715 device_printf(dev, "unknown device: chipver=%02x, rev=%x\n", 1716 sc->sk_type, sc->sk_rev); 1717 error = ENXIO; 1718 goto fail; 1719 } 1720 1721 /* Allocate interrupt */ 1722 rid = 0; 1723 sc->sk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1724 RF_SHAREABLE | RF_ACTIVE); 1725 1726 if (sc->sk_irq == NULL) { 1727 device_printf(dev, "couldn't map interrupt\n"); 1728 error = ENXIO; 1729 goto fail; 1730 } 1731 1732 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 1733 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1734 OID_AUTO, "int_mod", CTLTYPE_INT|CTLFLAG_RW, 1735 &sc->sk_int_mod, 0, sysctl_hw_sk_int_mod, "I", 1736 "SK interrupt moderation"); 1737 1738 /* Pull in device tunables. */ 1739 sc->sk_int_mod = SK_IM_DEFAULT; 1740 error = resource_int_value(device_get_name(dev), device_get_unit(dev), 1741 "int_mod", &sc->sk_int_mod); 1742 if (error == 0) { 1743 if (sc->sk_int_mod < SK_IM_MIN || 1744 sc->sk_int_mod > SK_IM_MAX) { 1745 device_printf(dev, "int_mod value out of range; " 1746 "using default: %d\n", SK_IM_DEFAULT); 1747 sc->sk_int_mod = SK_IM_DEFAULT; 1748 } 1749 } 1750 1751 /* Reset the adapter. */ 1752 sk_reset(sc); 1753 1754 /* Read and save vital product data from EEPROM. */ 1755 sk_vpd_read(sc); 1756 1757 skrs = sk_win_read_1(sc, SK_EPROM0); 1758 if (sc->sk_type == SK_GENESIS) { 1759 /* Read and save RAM size and RAMbuffer offset */ 1760 switch(skrs) { 1761 case SK_RAMSIZE_512K_64: 1762 sc->sk_ramsize = 0x80000; 1763 sc->sk_rboff = SK_RBOFF_0; 1764 break; 1765 case SK_RAMSIZE_1024K_64: 1766 sc->sk_ramsize = 0x100000; 1767 sc->sk_rboff = SK_RBOFF_80000; 1768 break; 1769 case SK_RAMSIZE_1024K_128: 1770 sc->sk_ramsize = 0x100000; 1771 sc->sk_rboff = SK_RBOFF_0; 1772 break; 1773 case SK_RAMSIZE_2048K_128: 1774 sc->sk_ramsize = 0x200000; 1775 sc->sk_rboff = SK_RBOFF_0; 1776 break; 1777 default: 1778 device_printf(dev, "unknown ram size: %d\n", skrs); 1779 error = ENXIO; 1780 goto fail; 1781 } 1782 } else { /* SK_YUKON_FAMILY */ 1783 if (skrs == 0x00) 1784 sc->sk_ramsize = 0x20000; 1785 else 1786 sc->sk_ramsize = skrs * (1<<12); 1787 sc->sk_rboff = SK_RBOFF_0; 1788 } 1789 1790 /* Read and save physical media type */ 1791 switch(sk_win_read_1(sc, SK_PMDTYPE)) { 1792 case SK_PMD_1000BASESX: 1793 sc->sk_pmd = IFM_1000_SX; 1794 break; 1795 case SK_PMD_1000BASELX: 1796 sc->sk_pmd = IFM_1000_LX; 1797 break; 1798 case SK_PMD_1000BASECX: 1799 sc->sk_pmd = IFM_1000_CX; 1800 break; 1801 case SK_PMD_1000BASETX: 1802 sc->sk_pmd = IFM_1000_T; 1803 break; 1804 default: 1805 if (SK_YUKON_FAMILY(sc->sk_type) && (sk_win_read_1(sc, SK_EPROM1) 1806 & 0xF) < SK_PHYTYPE_MARV_COPPER) { 1807 /* not initialized, punt */ 1808 sc->sk_pmd = IFM_1000_T; 1809 break; 1810 } 1811 device_printf(dev, "unknown media type: 0x%x\n", 1812 sk_win_read_1(sc, SK_PMDTYPE)); 1813 error = ENXIO; 1814 goto fail; 1815 } 1816 1817 /* Determine whether to name it with VPD PN or just make it up. 1818 * Marvell Yukon VPD PN seems to freqently be bogus. */ 1819 switch (pci_get_device(dev)) { 1820 case DEVICEID_SK_V1: 1821 case DEVICEID_BELKIN_5005: 1822 case DEVICEID_3COM_3C940: 1823 case DEVICEID_LINKSYS_EG1032: 1824 case DEVICEID_DLINK_DGE530T: 1825 /* Stay with VPD PN. */ 1826 pname = sc->sk_vpd_prodname; 1827 break; 1828 case DEVICEID_SK_V2: 1829 case DEVICEID_MRVL_4360: 1830 case DEVICEID_MRVL_4361: 1831 case DEVICEID_MRVL_4362: 1832 /* YUKON VPD PN might bear no resemblance to reality. */ 1833 switch (sc->sk_type) { 1834 case SK_GENESIS: 1835 /* Stay with VPD PN. */ 1836 pname = sc->sk_vpd_prodname; 1837 break; 1838 case SK_YUKON: 1839 pname = "Marvell Yukon Gigabit Ethernet"; 1840 break; 1841 case SK_YUKON_LITE: 1842 pname = "Marvell Yukon Lite Gigabit Ethernet"; 1843 break; 1844 case SK_YUKON_LP: 1845 pname = "Marvell Yukon LP Gigabit Ethernet"; 1846 break; 1847 case SK_YUKON_EC: 1848 pname = "Marvell Yukon-2 EC Gigabit Ethernet"; 1849 break; 1850 default: 1851 pname = "Marvell Yukon (Unknown) Gigabit Ethernet"; 1852 break; 1853 } 1854 1855 /* Yukon Lite Rev. A0 needs special test. */ 1856 if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) { 1857 u_int32_t far; 1858 u_int8_t testbyte; 1859 1860 /* Save flash address register before testing. */ 1861 far = sk_win_read_4(sc, SK_EP_ADDR); 1862 1863 sk_win_write_1(sc, SK_EP_ADDR+0x03, 0xff); 1864 testbyte = sk_win_read_1(sc, SK_EP_ADDR+0x03); 1865 1866 if (testbyte != 0x00) { 1867 /* Yukon Lite Rev. A0 detected. */ 1868 sc->sk_type = SK_YUKON_LITE; 1869 sc->sk_rev = SK_YUKON_LITE_REV_A0; 1870 /* Restore flash address register. */ 1871 sk_win_write_4(sc, SK_EP_ADDR, far); 1872 } 1873 } 1874 break; 1875 default: 1876 device_printf(dev, "unknown device: vendor=%04x, device=%04x, " 1877 "chipver=%02x, rev=%x\n", 1878 pci_get_vendor(dev), pci_get_device(dev), 1879 sc->sk_type, sc->sk_rev); 1880 error = ENXIO; 1881 goto fail; 1882 } 1883 1884 if (sc->sk_type == SK_YUKON_LITE) { 1885 switch (sc->sk_rev) { 1886 case SK_YUKON_LITE_REV_A0: 1887 revstr = "A0"; 1888 break; 1889 case SK_YUKON_LITE_REV_A1: 1890 revstr = "A1"; 1891 break; 1892 case SK_YUKON_LITE_REV_A3: 1893 revstr = "A3"; 1894 break; 1895 default: 1896 revstr = ""; 1897 break; 1898 } 1899 } else if (sc->sk_type == SK_YUKON_EC) { 1900 switch (sc->sk_rev) { 1901 case SK_YUKON_EC_REV_A1: 1902 revstr = "A1"; 1903 break; 1904 case SK_YUKON_EC_REV_A2: 1905 revstr = "A2"; 1906 break; 1907 case SK_YUKON_EC_REV_A3: 1908 revstr = "A3"; 1909 break; 1910 default: 1911 revstr = ""; 1912 break; 1913 } 1914 } else { 1915 revstr = ""; 1916 } 1917 1918 /* Announce the product name and more VPD data if there. */ 1919 device_printf(dev, "%s rev. %s(0x%x)\n", 1920 pname != NULL ? pname : "<unknown>", revstr, sc->sk_rev); 1921 1922 if (bootverbose) { 1923 if (sc->sk_vpd_readonly != NULL && 1924 sc->sk_vpd_readonly_len != 0) { 1925 char buf[256]; 1926 char *dp = sc->sk_vpd_readonly; 1927 uint16_t l, len = sc->sk_vpd_readonly_len; 1928 1929 while (len >= 3) { 1930 if ((*dp == 'P' && *(dp+1) == 'N') || 1931 (*dp == 'E' && *(dp+1) == 'C') || 1932 (*dp == 'M' && *(dp+1) == 'N') || 1933 (*dp == 'S' && *(dp+1) == 'N')) { 1934 l = 0; 1935 while (l < *(dp+2)) { 1936 buf[l] = *(dp+3+l); 1937 ++l; 1938 } 1939 buf[l] = '\0'; 1940 device_printf(dev, "%c%c: %s\n", 1941 *dp, *(dp+1), buf); 1942 len -= (3 + l); 1943 dp += (3 + l); 1944 } else { 1945 len -= (3 + *(dp+2)); 1946 dp += (3 + *(dp+2)); 1947 } 1948 } 1949 } 1950 device_printf(dev, "chip ver = 0x%02x\n", sc->sk_type); 1951 device_printf(dev, "chip rev = 0x%02x\n", sc->sk_rev); 1952 device_printf(dev, "SK_EPROM0 = 0x%02x\n", skrs); 1953 device_printf(dev, "SRAM size = 0x%06x\n", sc->sk_ramsize); 1954 } 1955 1956 sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1); 1957 if (sc->sk_devs[SK_PORT_A] == NULL) { 1958 device_printf(dev, "failed to add child for PORT_A\n"); 1959 error = ENXIO; 1960 goto fail; 1961 } 1962 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); 1963 if (port == NULL) { 1964 device_printf(dev, "failed to allocate memory for " 1965 "ivars of PORT_A\n"); 1966 error = ENXIO; 1967 goto fail; 1968 } 1969 *port = SK_PORT_A; 1970 device_set_ivars(sc->sk_devs[SK_PORT_A], port); 1971 1972 sk_macs = 1; 1973 1974 if (SK_IS_YUKON2(sc)) { 1975 u_int8_t hw; 1976 1977 hw = sk_win_read_1(sc, SK_Y2_HWRES); 1978 if ((hw & SK_Y2_HWRES_LINK_MASK) == SK_Y2_HWRES_LINK_DUAL) { 1979 if ((sk_win_read_1(sc, SK_Y2_CLKGATE) & 1980 SK_Y2_CLKGATE_LINK2_INACTIVE) == 0) 1981 sk_macs++; 1982 } 1983 } else { 1984 if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) 1985 sk_macs++; 1986 } 1987 1988 if (sk_macs > 1) { 1989 sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1); 1990 if (sc->sk_devs[SK_PORT_B] == NULL) { 1991 device_printf(dev, "failed to add child for PORT_B\n"); 1992 error = ENXIO; 1993 goto fail; 1994 } 1995 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); 1996 if (port == NULL) { 1997 device_printf(dev, "failed to allocate memory for " 1998 "ivars of PORT_B\n"); 1999 error = ENXIO; 2000 goto fail; 2001 } 2002 *port = SK_PORT_B; 2003 device_set_ivars(sc->sk_devs[SK_PORT_B], port); 2004 } 2005 2006 /* Turn on the 'driver is loaded' LED. */ 2007 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON); 2008 2009 error = bus_generic_attach(dev); 2010 if (error) { 2011 device_printf(dev, "failed to attach port(s)\n"); 2012 goto fail; 2013 } 2014 2015 /* Hook interrupt last to avoid having to lock softc */ 2016 error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET|INTR_MPSAFE, 2017 sk_intr, sc, &sc->sk_intrhand); 2018 2019 if (error) { 2020 device_printf(dev, "couldn't set up irq\n"); 2021 goto fail; 2022 } 2023 2024 fail: 2025 if (error) 2026 skc_detach(dev); 2027 2028 return(error); 2029 } 2030 2031 /* 2032 * Shutdown hardware and free up resources. This can be called any 2033 * time after the mutex has been initialized. It is called in both 2034 * the error case in attach and the normal detach case so it needs 2035 * to be careful about only freeing resources that have actually been 2036 * allocated. 2037 */ 2038 static int 2039 sk_detach(dev) 2040 device_t dev; 2041 { 2042 struct sk_if_softc *sc_if; 2043 struct ifnet *ifp; 2044 2045 sc_if = device_get_softc(dev); 2046 KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx), 2047 ("sk mutex not initialized in sk_detach")); 2048 SK_IF_LOCK(sc_if); 2049 2050 ifp = sc_if->sk_ifp; 2051 /* These should only be active if attach_xmac succeeded */ 2052 if (device_is_attached(dev)) { 2053 sk_stop(sc_if); 2054 /* Can't hold locks while calling detach */ 2055 SK_IF_UNLOCK(sc_if); 2056 callout_drain(&sc_if->sk_tick_ch); 2057 ether_ifdetach(ifp); 2058 SK_IF_LOCK(sc_if); 2059 } 2060 if (ifp) 2061 if_free(ifp); 2062 /* 2063 * We're generally called from skc_detach() which is using 2064 * device_delete_child() to get to here. It's already trashed 2065 * miibus for us, so don't do it here or we'll panic. 2066 */ 2067 /* 2068 if (sc_if->sk_miibus != NULL) 2069 device_delete_child(dev, sc_if->sk_miibus); 2070 */ 2071 bus_generic_detach(dev); 2072 sk_dma_free(sc_if); 2073 SK_IF_UNLOCK(sc_if); 2074 2075 return(0); 2076 } 2077 2078 static int 2079 skc_detach(dev) 2080 device_t dev; 2081 { 2082 struct sk_softc *sc; 2083 2084 sc = device_get_softc(dev); 2085 KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized")); 2086 2087 if (device_is_alive(dev)) { 2088 if (sc->sk_devs[SK_PORT_A] != NULL) { 2089 free(device_get_ivars(sc->sk_devs[SK_PORT_A]), M_DEVBUF); 2090 device_delete_child(dev, sc->sk_devs[SK_PORT_A]); 2091 } 2092 if (sc->sk_devs[SK_PORT_B] != NULL) { 2093 free(device_get_ivars(sc->sk_devs[SK_PORT_B]), M_DEVBUF); 2094 device_delete_child(dev, sc->sk_devs[SK_PORT_B]); 2095 } 2096 bus_generic_detach(dev); 2097 } 2098 2099 if (sc->sk_vpd_prodname != NULL) 2100 free(sc->sk_vpd_prodname, M_DEVBUF); 2101 if (sc->sk_vpd_readonly != NULL) 2102 free(sc->sk_vpd_readonly, M_DEVBUF); 2103 2104 if (sc->sk_intrhand) 2105 bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand); 2106 if (sc->sk_irq) 2107 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq); 2108 if (sc->sk_res) 2109 bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res); 2110 2111 mtx_destroy(&sc->sk_mii_mtx); 2112 mtx_destroy(&sc->sk_mtx); 2113 2114 return(0); 2115 } 2116 2117 struct sk_dmamap_arg { 2118 bus_addr_t sk_busaddr; 2119 }; 2120 2121 static void 2122 sk_dmamap_cb(arg, segs, nseg, error) 2123 void *arg; 2124 bus_dma_segment_t *segs; 2125 int nseg; 2126 int error; 2127 { 2128 struct sk_dmamap_arg *ctx; 2129 2130 if (error != 0) 2131 return; 2132 2133 ctx = arg; 2134 ctx->sk_busaddr = segs[0].ds_addr; 2135 } 2136 2137 /* 2138 * Allocate jumbo buffer storage. The SysKonnect adapters support 2139 * "jumbograms" (9K frames), although SysKonnect doesn't currently 2140 * use them in their drivers. In order for us to use them, we need 2141 * large 9K receive buffers, however standard mbuf clusters are only 2142 * 2048 bytes in size. Consequently, we need to allocate and manage 2143 * our own jumbo buffer pool. Fortunately, this does not require an 2144 * excessive amount of additional code. 2145 */ 2146 static int 2147 sk_dma_alloc(sc_if) 2148 struct sk_if_softc *sc_if; 2149 { 2150 struct sk_dmamap_arg ctx; 2151 struct sk_txdesc *txd; 2152 struct sk_rxdesc *rxd; 2153 struct sk_rxdesc *jrxd; 2154 u_int8_t *ptr; 2155 struct sk_jpool_entry *entry; 2156 int error, i; 2157 2158 mtx_init(&sc_if->sk_jlist_mtx, "sk_jlist_mtx", NULL, MTX_DEF); 2159 SLIST_INIT(&sc_if->sk_jfree_listhead); 2160 SLIST_INIT(&sc_if->sk_jinuse_listhead); 2161 2162 /* create parent tag */ 2163 /* 2164 * XXX 2165 * This driver should use BUS_SPACE_MAXADDR for lowaddr argument 2166 * in bus_dma_tag_create(9) as the NIC would support DAC mode. 2167 * However bz@ reported that it does not work on amd64 with > 4GB 2168 * RAM. Until we have more clues of the breakage, disable DAC mode 2169 * by limiting DMA address to be in 32bit address space. 2170 */ 2171 error = bus_dma_tag_create(NULL, /* parent */ 2172 1, 0, /* algnmnt, boundary */ 2173 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2174 BUS_SPACE_MAXADDR, /* highaddr */ 2175 NULL, NULL, /* filter, filterarg */ 2176 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 2177 0, /* nsegments */ 2178 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 2179 0, /* flags */ 2180 NULL, NULL, /* lockfunc, lockarg */ 2181 &sc_if->sk_cdata.sk_parent_tag); 2182 if (error != 0) { 2183 device_printf(sc_if->sk_if_dev, 2184 "failed to create parent DMA tag\n"); 2185 goto fail; 2186 } 2187 /* create tag for Tx ring */ 2188 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ 2189 SK_RING_ALIGN, 0, /* algnmnt, boundary */ 2190 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2191 BUS_SPACE_MAXADDR, /* highaddr */ 2192 NULL, NULL, /* filter, filterarg */ 2193 SK_TX_RING_SZ, /* maxsize */ 2194 1, /* nsegments */ 2195 SK_TX_RING_SZ, /* maxsegsize */ 2196 0, /* flags */ 2197 NULL, NULL, /* lockfunc, lockarg */ 2198 &sc_if->sk_cdata.sk_tx_ring_tag); 2199 if (error != 0) { 2200 device_printf(sc_if->sk_if_dev, 2201 "failed to allocate Tx ring DMA tag\n"); 2202 goto fail; 2203 } 2204 2205 /* create tag for Rx ring */ 2206 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ 2207 SK_RING_ALIGN, 0, /* algnmnt, boundary */ 2208 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2209 BUS_SPACE_MAXADDR, /* highaddr */ 2210 NULL, NULL, /* filter, filterarg */ 2211 SK_RX_RING_SZ, /* maxsize */ 2212 1, /* nsegments */ 2213 SK_RX_RING_SZ, /* maxsegsize */ 2214 0, /* flags */ 2215 NULL, NULL, /* lockfunc, lockarg */ 2216 &sc_if->sk_cdata.sk_rx_ring_tag); 2217 if (error != 0) { 2218 device_printf(sc_if->sk_if_dev, 2219 "failed to allocate Rx ring DMA tag\n"); 2220 goto fail; 2221 } 2222 2223 /* create tag for jumbo Rx ring */ 2224 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ 2225 SK_RING_ALIGN, 0, /* algnmnt, boundary */ 2226 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2227 BUS_SPACE_MAXADDR, /* highaddr */ 2228 NULL, NULL, /* filter, filterarg */ 2229 SK_JUMBO_RX_RING_SZ, /* maxsize */ 2230 1, /* nsegments */ 2231 SK_JUMBO_RX_RING_SZ, /* maxsegsize */ 2232 0, /* flags */ 2233 NULL, NULL, /* lockfunc, lockarg */ 2234 &sc_if->sk_cdata.sk_jumbo_rx_ring_tag); 2235 if (error != 0) { 2236 device_printf(sc_if->sk_if_dev, 2237 "failed to allocate jumbo Rx ring DMA tag\n"); 2238 goto fail; 2239 } 2240 2241 /* create tag for jumbo buffer blocks */ 2242 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ 2243 PAGE_SIZE, 0, /* algnmnt, boundary */ 2244 BUS_SPACE_MAXADDR, /* lowaddr */ 2245 BUS_SPACE_MAXADDR, /* highaddr */ 2246 NULL, NULL, /* filter, filterarg */ 2247 SK_JMEM, /* maxsize */ 2248 1, /* nsegments */ 2249 SK_JMEM, /* maxsegsize */ 2250 0, /* flags */ 2251 NULL, NULL, /* lockfunc, lockarg */ 2252 &sc_if->sk_cdata.sk_jumbo_tag); 2253 if (error != 0) { 2254 device_printf(sc_if->sk_if_dev, 2255 "failed to allocate jumbo Rx buffer block DMA tag\n"); 2256 goto fail; 2257 } 2258 2259 /* create tag for Tx buffers */ 2260 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ 2261 1, 0, /* algnmnt, boundary */ 2262 BUS_SPACE_MAXADDR, /* lowaddr */ 2263 BUS_SPACE_MAXADDR, /* highaddr */ 2264 NULL, NULL, /* filter, filterarg */ 2265 MCLBYTES * SK_MAXTXSEGS, /* maxsize */ 2266 SK_MAXTXSEGS, /* nsegments */ 2267 MCLBYTES, /* maxsegsize */ 2268 0, /* flags */ 2269 NULL, NULL, /* lockfunc, lockarg */ 2270 &sc_if->sk_cdata.sk_tx_tag); 2271 if (error != 0) { 2272 device_printf(sc_if->sk_if_dev, 2273 "failed to allocate Tx DMA tag\n"); 2274 goto fail; 2275 } 2276 2277 /* create tag for Rx buffers */ 2278 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ 2279 1, 0, /* algnmnt, boundary */ 2280 BUS_SPACE_MAXADDR, /* lowaddr */ 2281 BUS_SPACE_MAXADDR, /* highaddr */ 2282 NULL, NULL, /* filter, filterarg */ 2283 MCLBYTES, /* maxsize */ 2284 1, /* nsegments */ 2285 MCLBYTES, /* maxsegsize */ 2286 0, /* flags */ 2287 NULL, NULL, /* lockfunc, lockarg */ 2288 &sc_if->sk_cdata.sk_rx_tag); 2289 if (error != 0) { 2290 device_printf(sc_if->sk_if_dev, 2291 "failed to allocate Rx DMA tag\n"); 2292 goto fail; 2293 } 2294 2295 /* create tag for jumbo Rx buffers */ 2296 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ 2297 PAGE_SIZE, 0, /* algnmnt, boundary */ 2298 BUS_SPACE_MAXADDR, /* lowaddr */ 2299 BUS_SPACE_MAXADDR, /* highaddr */ 2300 NULL, NULL, /* filter, filterarg */ 2301 MCLBYTES * SK_MAXRXSEGS, /* maxsize */ 2302 SK_MAXRXSEGS, /* nsegments */ 2303 SK_JLEN, /* maxsegsize */ 2304 0, /* flags */ 2305 NULL, NULL, /* lockfunc, lockarg */ 2306 &sc_if->sk_cdata.sk_jumbo_rx_tag); 2307 if (error != 0) { 2308 device_printf(sc_if->sk_if_dev, 2309 "failed to allocate jumbo Rx DMA tag\n"); 2310 goto fail; 2311 } 2312 2313 /* allocate DMA'able memory and load the DMA map for Tx ring */ 2314 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_tx_ring_tag, 2315 (void **)&sc_if->sk_rdata.sk_tx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 2316 &sc_if->sk_cdata.sk_tx_ring_map); 2317 if (error != 0) { 2318 device_printf(sc_if->sk_if_dev, 2319 "failed to allocate DMA'able memory for Tx ring\n"); 2320 goto fail; 2321 } 2322 2323 ctx.sk_busaddr = 0; 2324 error = bus_dmamap_load(sc_if->sk_cdata.sk_tx_ring_tag, 2325 sc_if->sk_cdata.sk_tx_ring_map, sc_if->sk_rdata.sk_tx_ring, 2326 SK_TX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 2327 if (error != 0) { 2328 device_printf(sc_if->sk_if_dev, 2329 "failed to load DMA'able memory for Tx ring\n"); 2330 goto fail; 2331 } 2332 sc_if->sk_rdata.sk_tx_ring_paddr = ctx.sk_busaddr; 2333 2334 /* allocate DMA'able memory and load the DMA map for Rx ring */ 2335 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_rx_ring_tag, 2336 (void **)&sc_if->sk_rdata.sk_rx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 2337 &sc_if->sk_cdata.sk_rx_ring_map); 2338 if (error != 0) { 2339 device_printf(sc_if->sk_if_dev, 2340 "failed to allocate DMA'able memory for Rx ring\n"); 2341 goto fail; 2342 } 2343 2344 ctx.sk_busaddr = 0; 2345 error = bus_dmamap_load(sc_if->sk_cdata.sk_rx_ring_tag, 2346 sc_if->sk_cdata.sk_rx_ring_map, sc_if->sk_rdata.sk_rx_ring, 2347 SK_RX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 2348 if (error != 0) { 2349 device_printf(sc_if->sk_if_dev, 2350 "failed to load DMA'able memory for Rx ring\n"); 2351 goto fail; 2352 } 2353 sc_if->sk_rdata.sk_rx_ring_paddr = ctx.sk_busaddr; 2354 2355 /* allocate DMA'able memory and load the DMA map for jumbo Rx ring */ 2356 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, 2357 (void **)&sc_if->sk_rdata.sk_jumbo_rx_ring, 2358 BUS_DMA_NOWAIT|BUS_DMA_ZERO, &sc_if->sk_cdata.sk_jumbo_rx_ring_map); 2359 if (error != 0) { 2360 device_printf(sc_if->sk_if_dev, 2361 "failed to allocate DMA'able memory for jumbo Rx ring\n"); 2362 goto fail; 2363 } 2364 2365 ctx.sk_busaddr = 0; 2366 error = bus_dmamap_load(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, 2367 sc_if->sk_cdata.sk_jumbo_rx_ring_map, 2368 sc_if->sk_rdata.sk_jumbo_rx_ring, SK_JUMBO_RX_RING_SZ, sk_dmamap_cb, 2369 &ctx, BUS_DMA_NOWAIT); 2370 if (error != 0) { 2371 device_printf(sc_if->sk_if_dev, 2372 "failed to load DMA'able memory for jumbo Rx ring\n"); 2373 goto fail; 2374 } 2375 sc_if->sk_rdata.sk_jumbo_rx_ring_paddr = ctx.sk_busaddr; 2376 2377 /* create DMA maps for Tx buffers */ 2378 for (i = 0; i < SK_TX_RING_CNT; i++) { 2379 txd = &sc_if->sk_cdata.sk_txdesc[i]; 2380 txd->tx_m = NULL; 2381 txd->tx_dmamap = 0; 2382 error = bus_dmamap_create(sc_if->sk_cdata.sk_tx_tag, 0, 2383 &txd->tx_dmamap); 2384 if (error != 0) { 2385 device_printf(sc_if->sk_if_dev, 2386 "failed to create Tx dmamap\n"); 2387 goto fail; 2388 } 2389 } 2390 /* create DMA maps for Rx buffers */ 2391 if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0, 2392 &sc_if->sk_cdata.sk_rx_sparemap)) != 0) { 2393 device_printf(sc_if->sk_if_dev, 2394 "failed to create spare Rx dmamap\n"); 2395 goto fail; 2396 } 2397 for (i = 0; i < SK_RX_RING_CNT; i++) { 2398 rxd = &sc_if->sk_cdata.sk_rxdesc[i]; 2399 rxd->rx_m = NULL; 2400 rxd->rx_dmamap = 0; 2401 error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0, 2402 &rxd->rx_dmamap); 2403 if (error != 0) { 2404 device_printf(sc_if->sk_if_dev, 2405 "failed to create Rx dmamap\n"); 2406 goto fail; 2407 } 2408 } 2409 /* create DMA maps for jumbo Rx buffers */ 2410 if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0, 2411 &sc_if->sk_cdata.sk_jumbo_rx_sparemap)) != 0) { 2412 device_printf(sc_if->sk_if_dev, 2413 "failed to create spare jumbo Rx dmamap\n"); 2414 goto fail; 2415 } 2416 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) { 2417 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i]; 2418 jrxd->rx_m = NULL; 2419 jrxd->rx_dmamap = 0; 2420 error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0, 2421 &jrxd->rx_dmamap); 2422 if (error != 0) { 2423 device_printf(sc_if->sk_if_dev, 2424 "failed to create jumbo Rx dmamap\n"); 2425 goto fail; 2426 } 2427 } 2428 2429 /* allocate DMA'able memory and load the DMA map for jumbo buf */ 2430 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_jumbo_tag, 2431 (void **)&sc_if->sk_rdata.sk_jumbo_buf, 2432 BUS_DMA_NOWAIT|BUS_DMA_ZERO, &sc_if->sk_cdata.sk_jumbo_map); 2433 if (error != 0) { 2434 device_printf(sc_if->sk_if_dev, 2435 "failed to allocate DMA'able memory for jumbo buf\n"); 2436 goto fail; 2437 } 2438 2439 ctx.sk_busaddr = 0; 2440 error = bus_dmamap_load(sc_if->sk_cdata.sk_jumbo_tag, 2441 sc_if->sk_cdata.sk_jumbo_map, 2442 sc_if->sk_rdata.sk_jumbo_buf, SK_JMEM, sk_dmamap_cb, 2443 &ctx, BUS_DMA_NOWAIT); 2444 if (error != 0) { 2445 device_printf(sc_if->sk_if_dev, 2446 "failed to load DMA'able memory for jumbobuf\n"); 2447 goto fail; 2448 } 2449 sc_if->sk_rdata.sk_jumbo_buf_paddr = ctx.sk_busaddr; 2450 2451 /* 2452 * Now divide it up into 9K pieces and save the addresses 2453 * in an array. 2454 */ 2455 ptr = sc_if->sk_rdata.sk_jumbo_buf; 2456 for (i = 0; i < SK_JSLOTS; i++) { 2457 sc_if->sk_cdata.sk_jslots[i] = ptr; 2458 ptr += SK_JLEN; 2459 entry = malloc(sizeof(struct sk_jpool_entry), 2460 M_DEVBUF, M_NOWAIT); 2461 if (entry == NULL) { 2462 device_printf(sc_if->sk_if_dev, 2463 "no memory for jumbo buffers!\n"); 2464 error = ENOMEM; 2465 goto fail; 2466 } 2467 entry->slot = i; 2468 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, 2469 jpool_entries); 2470 } 2471 2472 fail: 2473 return (error); 2474 } 2475 2476 static void 2477 sk_dma_free(sc_if) 2478 struct sk_if_softc *sc_if; 2479 { 2480 struct sk_txdesc *txd; 2481 struct sk_rxdesc *rxd; 2482 struct sk_rxdesc *jrxd; 2483 struct sk_jpool_entry *entry; 2484 int i; 2485 2486 SK_JLIST_LOCK(sc_if); 2487 while ((entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead))) { 2488 device_printf(sc_if->sk_if_dev, 2489 "asked to free buffer that is in use!\n"); 2490 SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries); 2491 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, 2492 jpool_entries); 2493 } 2494 2495 while (!SLIST_EMPTY(&sc_if->sk_jfree_listhead)) { 2496 entry = SLIST_FIRST(&sc_if->sk_jfree_listhead); 2497 SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries); 2498 free(entry, M_DEVBUF); 2499 } 2500 SK_JLIST_UNLOCK(sc_if); 2501 2502 /* destroy jumbo buffer block */ 2503 if (sc_if->sk_cdata.sk_jumbo_map) 2504 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_tag, 2505 sc_if->sk_cdata.sk_jumbo_map); 2506 2507 if (sc_if->sk_rdata.sk_jumbo_buf) { 2508 bus_dmamem_free(sc_if->sk_cdata.sk_jumbo_tag, 2509 sc_if->sk_rdata.sk_jumbo_buf, 2510 sc_if->sk_cdata.sk_jumbo_map); 2511 sc_if->sk_rdata.sk_jumbo_buf = NULL; 2512 sc_if->sk_cdata.sk_jumbo_map = 0; 2513 } 2514 2515 /* Tx ring */ 2516 if (sc_if->sk_cdata.sk_tx_ring_tag) { 2517 if (sc_if->sk_cdata.sk_tx_ring_map) 2518 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_ring_tag, 2519 sc_if->sk_cdata.sk_tx_ring_map); 2520 if (sc_if->sk_cdata.sk_tx_ring_map && 2521 sc_if->sk_rdata.sk_tx_ring) 2522 bus_dmamem_free(sc_if->sk_cdata.sk_tx_ring_tag, 2523 sc_if->sk_rdata.sk_tx_ring, 2524 sc_if->sk_cdata.sk_tx_ring_map); 2525 sc_if->sk_rdata.sk_tx_ring = NULL; 2526 sc_if->sk_cdata.sk_tx_ring_map = 0; 2527 bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_ring_tag); 2528 sc_if->sk_cdata.sk_tx_ring_tag = NULL; 2529 } 2530 /* Rx ring */ 2531 if (sc_if->sk_cdata.sk_rx_ring_tag) { 2532 if (sc_if->sk_cdata.sk_rx_ring_map) 2533 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_ring_tag, 2534 sc_if->sk_cdata.sk_rx_ring_map); 2535 if (sc_if->sk_cdata.sk_rx_ring_map && 2536 sc_if->sk_rdata.sk_rx_ring) 2537 bus_dmamem_free(sc_if->sk_cdata.sk_rx_ring_tag, 2538 sc_if->sk_rdata.sk_rx_ring, 2539 sc_if->sk_cdata.sk_rx_ring_map); 2540 sc_if->sk_rdata.sk_rx_ring = NULL; 2541 sc_if->sk_cdata.sk_rx_ring_map = 0; 2542 bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_ring_tag); 2543 sc_if->sk_cdata.sk_rx_ring_tag = NULL; 2544 } 2545 /* jumbo Rx ring */ 2546 if (sc_if->sk_cdata.sk_jumbo_rx_ring_tag) { 2547 if (sc_if->sk_cdata.sk_jumbo_rx_ring_map) 2548 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, 2549 sc_if->sk_cdata.sk_jumbo_rx_ring_map); 2550 if (sc_if->sk_cdata.sk_jumbo_rx_ring_map && 2551 sc_if->sk_rdata.sk_jumbo_rx_ring) 2552 bus_dmamem_free(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, 2553 sc_if->sk_rdata.sk_jumbo_rx_ring, 2554 sc_if->sk_cdata.sk_jumbo_rx_ring_map); 2555 sc_if->sk_rdata.sk_jumbo_rx_ring = NULL; 2556 sc_if->sk_cdata.sk_jumbo_rx_ring_map = 0; 2557 bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_ring_tag); 2558 sc_if->sk_cdata.sk_jumbo_rx_ring_tag = NULL; 2559 } 2560 /* Tx buffers */ 2561 if (sc_if->sk_cdata.sk_tx_tag) { 2562 for (i = 0; i < SK_TX_RING_CNT; i++) { 2563 txd = &sc_if->sk_cdata.sk_txdesc[i]; 2564 if (txd->tx_dmamap) { 2565 bus_dmamap_destroy(sc_if->sk_cdata.sk_tx_tag, 2566 txd->tx_dmamap); 2567 txd->tx_dmamap = 0; 2568 } 2569 } 2570 bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_tag); 2571 sc_if->sk_cdata.sk_tx_tag = NULL; 2572 } 2573 /* Rx buffers */ 2574 if (sc_if->sk_cdata.sk_rx_tag) { 2575 for (i = 0; i < SK_RX_RING_CNT; i++) { 2576 rxd = &sc_if->sk_cdata.sk_rxdesc[i]; 2577 if (rxd->rx_dmamap) { 2578 bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag, 2579 rxd->rx_dmamap); 2580 rxd->rx_dmamap = 0; 2581 } 2582 } 2583 if (sc_if->sk_cdata.sk_rx_sparemap) { 2584 bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag, 2585 sc_if->sk_cdata.sk_rx_sparemap); 2586 sc_if->sk_cdata.sk_rx_sparemap = 0; 2587 } 2588 bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_tag); 2589 sc_if->sk_cdata.sk_rx_tag = NULL; 2590 } 2591 /* jumbo Rx buffers */ 2592 if (sc_if->sk_cdata.sk_jumbo_rx_tag) { 2593 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) { 2594 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i]; 2595 if (jrxd->rx_dmamap) { 2596 bus_dmamap_destroy( 2597 sc_if->sk_cdata.sk_jumbo_rx_tag, 2598 jrxd->rx_dmamap); 2599 jrxd->rx_dmamap = 0; 2600 } 2601 } 2602 if (sc_if->sk_cdata.sk_jumbo_rx_sparemap) { 2603 bus_dmamap_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag, 2604 sc_if->sk_cdata.sk_jumbo_rx_sparemap); 2605 sc_if->sk_cdata.sk_jumbo_rx_sparemap = 0; 2606 } 2607 bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag); 2608 sc_if->sk_cdata.sk_jumbo_rx_tag = NULL; 2609 } 2610 2611 if (sc_if->sk_cdata.sk_parent_tag) { 2612 bus_dma_tag_destroy(sc_if->sk_cdata.sk_parent_tag); 2613 sc_if->sk_cdata.sk_parent_tag = NULL; 2614 } 2615 mtx_destroy(&sc_if->sk_jlist_mtx); 2616 } 2617 2618 /* 2619 * Allocate a jumbo buffer. 2620 */ 2621 static void * 2622 sk_jalloc(sc_if) 2623 struct sk_if_softc *sc_if; 2624 { 2625 struct sk_jpool_entry *entry; 2626 2627 SK_JLIST_LOCK(sc_if); 2628 2629 entry = SLIST_FIRST(&sc_if->sk_jfree_listhead); 2630 2631 if (entry == NULL) { 2632 SK_JLIST_UNLOCK(sc_if); 2633 return (NULL); 2634 } 2635 2636 SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries); 2637 SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries); 2638 2639 SK_JLIST_UNLOCK(sc_if); 2640 2641 return (sc_if->sk_cdata.sk_jslots[entry->slot]); 2642 } 2643 2644 /* 2645 * Release a jumbo buffer. 2646 */ 2647 static void 2648 sk_jfree(buf, args) 2649 void *buf; 2650 void *args; 2651 { 2652 struct sk_if_softc *sc_if; 2653 struct sk_jpool_entry *entry; 2654 int i; 2655 2656 /* Extract the softc struct pointer. */ 2657 sc_if = (struct sk_if_softc *)args; 2658 KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__)); 2659 2660 SK_JLIST_LOCK(sc_if); 2661 /* calculate the slot this buffer belongs to */ 2662 i = ((vm_offset_t)buf 2663 - (vm_offset_t)sc_if->sk_rdata.sk_jumbo_buf) / SK_JLEN; 2664 KASSERT(i >= 0 && i < SK_JSLOTS, 2665 ("%s: asked to free buffer that we don't manage!", __func__)); 2666 2667 entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead); 2668 KASSERT(entry != NULL, ("%s: buffer not in use!", __func__)); 2669 entry->slot = i; 2670 SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries); 2671 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries); 2672 if (SLIST_EMPTY(&sc_if->sk_jinuse_listhead)) 2673 wakeup(sc_if); 2674 2675 SK_JLIST_UNLOCK(sc_if); 2676 } 2677 2678 static void 2679 sk_txcksum(ifp, m, f) 2680 struct ifnet *ifp; 2681 struct mbuf *m; 2682 struct sk_tx_desc *f; 2683 { 2684 struct ip *ip; 2685 u_int16_t offset; 2686 u_int8_t *p; 2687 2688 offset = sizeof(struct ip) + ETHER_HDR_LEN; 2689 for(; m && m->m_len == 0; m = m->m_next) 2690 ; 2691 if (m == NULL || m->m_len < ETHER_HDR_LEN) { 2692 if_printf(ifp, "%s: m_len < ETHER_HDR_LEN\n", __func__); 2693 /* checksum may be corrupted */ 2694 goto sendit; 2695 } 2696 if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) { 2697 if (m->m_len != ETHER_HDR_LEN) { 2698 if_printf(ifp, "%s: m_len != ETHER_HDR_LEN\n", 2699 __func__); 2700 /* checksum may be corrupted */ 2701 goto sendit; 2702 } 2703 for(m = m->m_next; m && m->m_len == 0; m = m->m_next) 2704 ; 2705 if (m == NULL) { 2706 offset = sizeof(struct ip) + ETHER_HDR_LEN; 2707 /* checksum may be corrupted */ 2708 goto sendit; 2709 } 2710 ip = mtod(m, struct ip *); 2711 } else { 2712 p = mtod(m, u_int8_t *); 2713 p += ETHER_HDR_LEN; 2714 ip = (struct ip *)p; 2715 } 2716 offset = (ip->ip_hl << 2) + ETHER_HDR_LEN; 2717 2718 sendit: 2719 f->sk_csum_startval = 0; 2720 f->sk_csum_start = htole32(((offset + m->m_pkthdr.csum_data) & 0xffff) | 2721 (offset << 16)); 2722 } 2723 2724 static int 2725 sk_encap(sc_if, m_head) 2726 struct sk_if_softc *sc_if; 2727 struct mbuf **m_head; 2728 { 2729 struct sk_txdesc *txd; 2730 struct sk_tx_desc *f = NULL; 2731 struct mbuf *m, *n; 2732 bus_dma_segment_t txsegs[SK_MAXTXSEGS]; 2733 u_int32_t cflags, frag, si, sk_ctl; 2734 int error, i, nseg; 2735 2736 SK_IF_LOCK_ASSERT(sc_if); 2737 2738 if ((txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txfreeq)) == NULL) 2739 return (ENOBUFS); 2740 2741 m = *m_head; 2742 error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag, 2743 txd->tx_dmamap, m, txsegs, &nseg, 0); 2744 if (error == EFBIG) { 2745 n = m_defrag(m, M_DONTWAIT); 2746 if (n == NULL) { 2747 m_freem(m); 2748 m = NULL; 2749 return (ENOMEM); 2750 } 2751 m = n; 2752 error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag, 2753 txd->tx_dmamap, m, txsegs, &nseg, 0); 2754 if (error != 0) { 2755 m_freem(m); 2756 m = NULL; 2757 return (error); 2758 } 2759 } else if (error != 0) 2760 return (error); 2761 if (nseg == 0) { 2762 m_freem(m); 2763 m = NULL; 2764 return (EIO); 2765 } 2766 if (sc_if->sk_cdata.sk_tx_cnt + nseg >= SK_TX_RING_CNT) { 2767 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap); 2768 return (ENOBUFS); 2769 } 2770 2771 if ((m->m_pkthdr.csum_flags & sc_if->sk_ifp->if_hwassist) != 0) 2772 cflags = SK_OPCODE_CSUM; 2773 else 2774 cflags = SK_OPCODE_DEFAULT; 2775 si = frag = sc_if->sk_cdata.sk_tx_prod; 2776 for (i = 0; i < nseg; i++) { 2777 f = &sc_if->sk_rdata.sk_tx_ring[frag]; 2778 f->sk_data_lo = htole32(SK_ADDR_LO(txsegs[i].ds_addr)); 2779 f->sk_data_hi = htole32(SK_ADDR_HI(txsegs[i].ds_addr)); 2780 sk_ctl = txsegs[i].ds_len | cflags; 2781 if (i == 0) { 2782 if (cflags == SK_OPCODE_CSUM) 2783 sk_txcksum(sc_if->sk_ifp, m, f); 2784 sk_ctl |= SK_TXCTL_FIRSTFRAG; 2785 } else 2786 sk_ctl |= SK_TXCTL_OWN; 2787 f->sk_ctl = htole32(sk_ctl); 2788 sc_if->sk_cdata.sk_tx_cnt++; 2789 SK_INC(frag, SK_TX_RING_CNT); 2790 } 2791 sc_if->sk_cdata.sk_tx_prod = frag; 2792 2793 /* set EOF on the last desciptor */ 2794 frag = (frag + SK_TX_RING_CNT - 1) % SK_TX_RING_CNT; 2795 f = &sc_if->sk_rdata.sk_tx_ring[frag]; 2796 f->sk_ctl |= htole32(SK_TXCTL_LASTFRAG | SK_TXCTL_EOF_INTR); 2797 2798 /* turn the first descriptor ownership to NIC */ 2799 f = &sc_if->sk_rdata.sk_tx_ring[si]; 2800 f->sk_ctl |= htole32(SK_TXCTL_OWN); 2801 2802 STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txfreeq, tx_q); 2803 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txbusyq, txd, tx_q); 2804 txd->tx_m = m; 2805 2806 /* sync descriptors */ 2807 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap, 2808 BUS_DMASYNC_PREWRITE); 2809 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag, 2810 sc_if->sk_cdata.sk_tx_ring_map, 2811 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2812 2813 return (0); 2814 } 2815 2816 static void 2817 sk_start(ifp) 2818 struct ifnet *ifp; 2819 { 2820 struct sk_if_softc *sc_if; 2821 2822 sc_if = ifp->if_softc; 2823 2824 SK_IF_LOCK(sc_if); 2825 sk_start_locked(ifp); 2826 SK_IF_UNLOCK(sc_if); 2827 2828 return; 2829 } 2830 2831 static void 2832 sk_start_locked(ifp) 2833 struct ifnet *ifp; 2834 { 2835 struct sk_softc *sc; 2836 struct sk_if_softc *sc_if; 2837 struct mbuf *m_head; 2838 int enq; 2839 2840 sc_if = ifp->if_softc; 2841 sc = sc_if->sk_softc; 2842 2843 SK_IF_LOCK_ASSERT(sc_if); 2844 2845 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 2846 sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 1; ) { 2847 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2848 if (m_head == NULL) 2849 break; 2850 2851 /* 2852 * Pack the data into the transmit ring. If we 2853 * don't have room, set the OACTIVE flag and wait 2854 * for the NIC to drain the ring. 2855 */ 2856 if (sk_encap(sc_if, &m_head)) { 2857 if (m_head == NULL) 2858 break; 2859 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2860 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2861 break; 2862 } 2863 2864 enq++; 2865 /* 2866 * If there's a BPF listener, bounce a copy of this frame 2867 * to him. 2868 */ 2869 BPF_MTAP(ifp, m_head); 2870 } 2871 2872 if (enq > 0) { 2873 /* Transmit */ 2874 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); 2875 2876 /* Set a timeout in case the chip goes out to lunch. */ 2877 ifp->if_timer = 5; 2878 } 2879 } 2880 2881 2882 static void 2883 sk_watchdog(ifp) 2884 struct ifnet *ifp; 2885 { 2886 struct sk_if_softc *sc_if; 2887 2888 sc_if = ifp->if_softc; 2889 2890 SK_IF_LOCK(sc_if); 2891 if_printf(sc_if->sk_ifp, "watchdog timeout\n"); 2892 ifp->if_oerrors++; 2893 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2894 sk_init_locked(sc_if); 2895 SK_IF_UNLOCK(sc_if); 2896 2897 return; 2898 } 2899 2900 static void 2901 skc_shutdown(dev) 2902 device_t dev; 2903 { 2904 struct sk_softc *sc; 2905 2906 sc = device_get_softc(dev); 2907 SK_LOCK(sc); 2908 2909 /* Turn off the 'driver is loaded' LED. */ 2910 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF); 2911 2912 /* 2913 * Reset the GEnesis controller. Doing this should also 2914 * assert the resets on the attached XMAC(s). 2915 */ 2916 sk_reset(sc); 2917 SK_UNLOCK(sc); 2918 2919 return; 2920 } 2921 2922 static int 2923 skc_suspend(dev) 2924 device_t dev; 2925 { 2926 struct sk_softc *sc; 2927 struct sk_if_softc *sc_if0, *sc_if1; 2928 struct ifnet *ifp0 = NULL, *ifp1 = NULL; 2929 2930 sc = device_get_softc(dev); 2931 2932 SK_LOCK(sc); 2933 2934 sc_if0 = sc->sk_if[SK_PORT_A]; 2935 sc_if1 = sc->sk_if[SK_PORT_B]; 2936 if (sc_if0 != NULL) 2937 ifp0 = sc_if0->sk_ifp; 2938 if (sc_if1 != NULL) 2939 ifp1 = sc_if1->sk_ifp; 2940 if (ifp0 != NULL) 2941 sk_stop(sc_if0); 2942 if (ifp1 != NULL) 2943 sk_stop(sc_if1); 2944 sc->sk_suspended = 1; 2945 2946 SK_UNLOCK(sc); 2947 2948 return (0); 2949 } 2950 2951 static int 2952 skc_resume(dev) 2953 device_t dev; 2954 { 2955 struct sk_softc *sc; 2956 struct sk_if_softc *sc_if0, *sc_if1; 2957 struct ifnet *ifp0 = NULL, *ifp1 = NULL; 2958 2959 sc = device_get_softc(dev); 2960 2961 SK_LOCK(sc); 2962 2963 sc_if0 = sc->sk_if[SK_PORT_A]; 2964 sc_if1 = sc->sk_if[SK_PORT_B]; 2965 if (sc_if0 != NULL) 2966 ifp0 = sc_if0->sk_ifp; 2967 if (sc_if1 != NULL) 2968 ifp1 = sc_if1->sk_ifp; 2969 if (ifp0 != NULL && ifp0->if_flags & IFF_UP) 2970 sk_init_locked(sc_if0); 2971 if (ifp1 != NULL && ifp1->if_flags & IFF_UP) 2972 sk_init_locked(sc_if1); 2973 sc->sk_suspended = 0; 2974 2975 SK_UNLOCK(sc); 2976 2977 return (0); 2978 } 2979 2980 /* 2981 * According to the data sheet from SK-NET GENESIS the hardware can compute 2982 * two Rx checksums at the same time(Each checksum start position is 2983 * programmed in Rx descriptors). However it seems that TCP/UDP checksum 2984 * does not work at least on my Yukon hardware. I tried every possible ways 2985 * to get correct checksum value but couldn't get correct one. So TCP/UDP 2986 * checksum offload was disabled at the moment and only IP checksum offload 2987 * was enabled. 2988 * As nomral IP header size is 20 bytes I can't expect it would give an 2989 * increase in throughput. However it seems it doesn't hurt performance in 2990 * my testing. If there is a more detailed information for checksum secret 2991 * of the hardware in question please contact yongari@FreeBSD.org to add 2992 * TCP/UDP checksum offload support. 2993 */ 2994 static __inline void 2995 sk_rxcksum(ifp, m, csum) 2996 struct ifnet *ifp; 2997 struct mbuf *m; 2998 u_int32_t csum; 2999 { 3000 struct ether_header *eh; 3001 struct ip *ip; 3002 int32_t hlen, len, pktlen; 3003 u_int16_t csum1, csum2, ipcsum; 3004 3005 pktlen = m->m_pkthdr.len; 3006 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) 3007 return; 3008 eh = mtod(m, struct ether_header *); 3009 if (eh->ether_type != htons(ETHERTYPE_IP)) 3010 return; 3011 ip = (struct ip *)(eh + 1); 3012 if (ip->ip_v != IPVERSION) 3013 return; 3014 hlen = ip->ip_hl << 2; 3015 pktlen -= sizeof(struct ether_header); 3016 if (hlen < sizeof(struct ip)) 3017 return; 3018 if (ntohs(ip->ip_len) < hlen) 3019 return; 3020 if (ntohs(ip->ip_len) != pktlen) 3021 return; 3022 3023 csum1 = htons(csum & 0xffff); 3024 csum2 = htons((csum >> 16) & 0xffff); 3025 ipcsum = in_addword(csum1, ~csum2 & 0xffff); 3026 /* checksum fixup for IP options */ 3027 len = hlen - sizeof(struct ip); 3028 if (len > 0) { 3029 /* 3030 * If the second checksum value is correct we can compute IP 3031 * checksum with simple math. Unfortunately the second checksum 3032 * value is wrong so we can't verify the checksum from the 3033 * value(It seems there is some magic here to get correct 3034 * value). If the second checksum value is correct it also 3035 * means we can get TCP/UDP checksum) here. However, it still 3036 * needs pseudo header checksum calculation due to hardware 3037 * limitations. 3038 */ 3039 return; 3040 } 3041 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED; 3042 if (ipcsum == 0xffff) 3043 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 3044 } 3045 3046 static __inline int 3047 sk_rxvalid(sc, stat, len) 3048 struct sk_softc *sc; 3049 u_int32_t stat, len; 3050 { 3051 3052 if (sc->sk_type == SK_GENESIS) { 3053 if ((stat & XM_RXSTAT_ERRFRAME) == XM_RXSTAT_ERRFRAME || 3054 XM_RXSTAT_BYTES(stat) != len) 3055 return (0); 3056 } else { 3057 if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR | 3058 YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | 3059 YU_RXSTAT_JABBER)) != 0 || 3060 (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK || 3061 YU_RXSTAT_BYTES(stat) != len) 3062 return (0); 3063 } 3064 3065 return (1); 3066 } 3067 3068 static void 3069 sk_rxeof(sc_if) 3070 struct sk_if_softc *sc_if; 3071 { 3072 struct sk_softc *sc; 3073 struct mbuf *m; 3074 struct ifnet *ifp; 3075 struct sk_rx_desc *cur_rx; 3076 struct sk_rxdesc *rxd; 3077 int cons, prog; 3078 u_int32_t csum, rxstat, sk_ctl; 3079 3080 sc = sc_if->sk_softc; 3081 ifp = sc_if->sk_ifp; 3082 3083 SK_IF_LOCK_ASSERT(sc_if); 3084 3085 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag, 3086 sc_if->sk_cdata.sk_rx_ring_map, BUS_DMASYNC_POSTREAD); 3087 3088 prog = 0; 3089 for (cons = sc_if->sk_cdata.sk_rx_cons; prog < SK_RX_RING_CNT; 3090 prog++, SK_INC(cons, SK_RX_RING_CNT)) { 3091 cur_rx = &sc_if->sk_rdata.sk_rx_ring[cons]; 3092 sk_ctl = le32toh(cur_rx->sk_ctl); 3093 if ((sk_ctl & SK_RXCTL_OWN) != 0) 3094 break; 3095 rxd = &sc_if->sk_cdata.sk_rxdesc[cons]; 3096 rxstat = le32toh(cur_rx->sk_xmac_rxstat); 3097 3098 if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG | 3099 SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID | 3100 SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) || 3101 SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN || 3102 SK_RXBYTES(sk_ctl) > SK_MAX_FRAMELEN || 3103 sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) { 3104 ifp->if_ierrors++; 3105 sk_discard_rxbuf(sc_if, cons); 3106 continue; 3107 } 3108 3109 m = rxd->rx_m; 3110 csum = le32toh(cur_rx->sk_csum); 3111 if (sk_newbuf(sc_if, cons) != 0) { 3112 ifp->if_iqdrops++; 3113 /* reuse old buffer */ 3114 sk_discard_rxbuf(sc_if, cons); 3115 continue; 3116 } 3117 m->m_pkthdr.rcvif = ifp; 3118 m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl); 3119 ifp->if_ipackets++; 3120 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 3121 sk_rxcksum(ifp, m, csum); 3122 SK_IF_UNLOCK(sc_if); 3123 (*ifp->if_input)(ifp, m); 3124 SK_IF_LOCK(sc_if); 3125 } 3126 3127 if (prog > 0) { 3128 sc_if->sk_cdata.sk_rx_cons = cons; 3129 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag, 3130 sc_if->sk_cdata.sk_rx_ring_map, 3131 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3132 } 3133 } 3134 3135 static void 3136 sk_jumbo_rxeof(sc_if) 3137 struct sk_if_softc *sc_if; 3138 { 3139 struct sk_softc *sc; 3140 struct mbuf *m; 3141 struct ifnet *ifp; 3142 struct sk_rx_desc *cur_rx; 3143 struct sk_rxdesc *jrxd; 3144 int cons, prog; 3145 u_int32_t csum, rxstat, sk_ctl; 3146 3147 sc = sc_if->sk_softc; 3148 ifp = sc_if->sk_ifp; 3149 3150 SK_IF_LOCK_ASSERT(sc_if); 3151 3152 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, 3153 sc_if->sk_cdata.sk_jumbo_rx_ring_map, BUS_DMASYNC_POSTREAD); 3154 3155 prog = 0; 3156 for (cons = sc_if->sk_cdata.sk_jumbo_rx_cons; 3157 prog < SK_JUMBO_RX_RING_CNT; 3158 prog++, SK_INC(cons, SK_JUMBO_RX_RING_CNT)) { 3159 cur_rx = &sc_if->sk_rdata.sk_jumbo_rx_ring[cons]; 3160 sk_ctl = le32toh(cur_rx->sk_ctl); 3161 if ((sk_ctl & SK_RXCTL_OWN) != 0) 3162 break; 3163 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[cons]; 3164 rxstat = le32toh(cur_rx->sk_xmac_rxstat); 3165 3166 if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG | 3167 SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID | 3168 SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) || 3169 SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN || 3170 SK_RXBYTES(sk_ctl) > SK_JUMBO_FRAMELEN || 3171 sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) { 3172 ifp->if_ierrors++; 3173 sk_discard_jumbo_rxbuf(sc_if, cons); 3174 continue; 3175 } 3176 3177 m = jrxd->rx_m; 3178 csum = le32toh(cur_rx->sk_csum); 3179 if (sk_jumbo_newbuf(sc_if, cons) != 0) { 3180 ifp->if_iqdrops++; 3181 /* reuse old buffer */ 3182 sk_discard_jumbo_rxbuf(sc_if, cons); 3183 continue; 3184 } 3185 m->m_pkthdr.rcvif = ifp; 3186 m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl); 3187 ifp->if_ipackets++; 3188 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 3189 sk_rxcksum(ifp, m, csum); 3190 SK_IF_UNLOCK(sc_if); 3191 (*ifp->if_input)(ifp, m); 3192 SK_IF_LOCK(sc_if); 3193 } 3194 3195 if (prog > 0) { 3196 sc_if->sk_cdata.sk_jumbo_rx_cons = cons; 3197 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, 3198 sc_if->sk_cdata.sk_jumbo_rx_ring_map, 3199 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3200 } 3201 } 3202 3203 static void 3204 sk_txeof(sc_if) 3205 struct sk_if_softc *sc_if; 3206 { 3207 struct sk_softc *sc; 3208 struct sk_txdesc *txd; 3209 struct sk_tx_desc *cur_tx; 3210 struct ifnet *ifp; 3211 u_int32_t idx, sk_ctl; 3212 3213 sc = sc_if->sk_softc; 3214 ifp = sc_if->sk_ifp; 3215 3216 txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq); 3217 if (txd == NULL) 3218 return; 3219 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag, 3220 sc_if->sk_cdata.sk_tx_ring_map, BUS_DMASYNC_POSTREAD); 3221 /* 3222 * Go through our tx ring and free mbufs for those 3223 * frames that have been sent. 3224 */ 3225 for (idx = sc_if->sk_cdata.sk_tx_cons;; SK_INC(idx, SK_TX_RING_CNT)) { 3226 if (sc_if->sk_cdata.sk_tx_cnt <= 0) 3227 break; 3228 cur_tx = &sc_if->sk_rdata.sk_tx_ring[idx]; 3229 sk_ctl = le32toh(cur_tx->sk_ctl); 3230 if (sk_ctl & SK_TXCTL_OWN) 3231 break; 3232 sc_if->sk_cdata.sk_tx_cnt--; 3233 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3234 if ((sk_ctl & SK_TXCTL_LASTFRAG) == 0) 3235 continue; 3236 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap, 3237 BUS_DMASYNC_POSTWRITE); 3238 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap); 3239 3240 ifp->if_opackets++; 3241 m_freem(txd->tx_m); 3242 txd->tx_m = NULL; 3243 STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txbusyq, tx_q); 3244 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q); 3245 txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq); 3246 } 3247 sc_if->sk_cdata.sk_tx_cons = idx; 3248 ifp->if_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0; 3249 3250 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag, 3251 sc_if->sk_cdata.sk_tx_ring_map, 3252 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3253 } 3254 3255 static void 3256 sk_tick(xsc_if) 3257 void *xsc_if; 3258 { 3259 struct sk_if_softc *sc_if; 3260 struct mii_data *mii; 3261 struct ifnet *ifp; 3262 int i; 3263 3264 sc_if = xsc_if; 3265 ifp = sc_if->sk_ifp; 3266 mii = device_get_softc(sc_if->sk_miibus); 3267 3268 if (!(ifp->if_flags & IFF_UP)) 3269 return; 3270 3271 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 3272 sk_intr_bcom(sc_if); 3273 return; 3274 } 3275 3276 /* 3277 * According to SysKonnect, the correct way to verify that 3278 * the link has come back up is to poll bit 0 of the GPIO 3279 * register three times. This pin has the signal from the 3280 * link_sync pin connected to it; if we read the same link 3281 * state 3 times in a row, we know the link is up. 3282 */ 3283 for (i = 0; i < 3; i++) { 3284 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET) 3285 break; 3286 } 3287 3288 if (i != 3) { 3289 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if); 3290 return; 3291 } 3292 3293 /* Turn the GP0 interrupt back on. */ 3294 SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); 3295 SK_XM_READ_2(sc_if, XM_ISR); 3296 mii_tick(mii); 3297 callout_stop(&sc_if->sk_tick_ch); 3298 } 3299 3300 static void 3301 sk_yukon_tick(xsc_if) 3302 void *xsc_if; 3303 { 3304 struct sk_if_softc *sc_if; 3305 struct mii_data *mii; 3306 3307 sc_if = xsc_if; 3308 mii = device_get_softc(sc_if->sk_miibus); 3309 3310 mii_tick(mii); 3311 callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if); 3312 } 3313 3314 static void 3315 sk_intr_bcom(sc_if) 3316 struct sk_if_softc *sc_if; 3317 { 3318 struct mii_data *mii; 3319 struct ifnet *ifp; 3320 int status; 3321 mii = device_get_softc(sc_if->sk_miibus); 3322 ifp = sc_if->sk_ifp; 3323 3324 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 3325 3326 /* 3327 * Read the PHY interrupt register to make sure 3328 * we clear any pending interrupts. 3329 */ 3330 status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR); 3331 3332 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 3333 sk_init_xmac(sc_if); 3334 return; 3335 } 3336 3337 if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) { 3338 int lstat; 3339 lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 3340 BRGPHY_MII_AUXSTS); 3341 3342 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) { 3343 mii_mediachg(mii); 3344 /* Turn off the link LED. */ 3345 SK_IF_WRITE_1(sc_if, 0, 3346 SK_LINKLED1_CTL, SK_LINKLED_OFF); 3347 sc_if->sk_link = 0; 3348 } else if (status & BRGPHY_ISR_LNK_CHG) { 3349 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 3350 BRGPHY_MII_IMR, 0xFF00); 3351 mii_tick(mii); 3352 sc_if->sk_link = 1; 3353 /* Turn on the link LED. */ 3354 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, 3355 SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF| 3356 SK_LINKLED_BLINK_OFF); 3357 } else { 3358 mii_tick(mii); 3359 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if); 3360 } 3361 } 3362 3363 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 3364 3365 return; 3366 } 3367 3368 static void 3369 sk_intr_xmac(sc_if) 3370 struct sk_if_softc *sc_if; 3371 { 3372 struct sk_softc *sc; 3373 u_int16_t status; 3374 3375 sc = sc_if->sk_softc; 3376 status = SK_XM_READ_2(sc_if, XM_ISR); 3377 3378 /* 3379 * Link has gone down. Start MII tick timeout to 3380 * watch for link resync. 3381 */ 3382 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) { 3383 if (status & XM_ISR_GP0_SET) { 3384 SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); 3385 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if); 3386 } 3387 3388 if (status & XM_ISR_AUTONEG_DONE) { 3389 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if); 3390 } 3391 } 3392 3393 if (status & XM_IMR_TX_UNDERRUN) 3394 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO); 3395 3396 if (status & XM_IMR_RX_OVERRUN) 3397 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO); 3398 3399 status = SK_XM_READ_2(sc_if, XM_ISR); 3400 3401 return; 3402 } 3403 3404 static void 3405 sk_intr_yukon(sc_if) 3406 struct sk_if_softc *sc_if; 3407 { 3408 u_int8_t status; 3409 3410 status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR); 3411 /* RX overrun */ 3412 if ((status & SK_GMAC_INT_RX_OVER) != 0) { 3413 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, 3414 SK_RFCTL_RX_FIFO_OVER); 3415 } 3416 /* TX underrun */ 3417 if ((status & SK_GMAC_INT_TX_UNDER) != 0) { 3418 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, 3419 SK_TFCTL_TX_FIFO_UNDER); 3420 } 3421 } 3422 3423 static void 3424 sk_intr(xsc) 3425 void *xsc; 3426 { 3427 struct sk_softc *sc = xsc; 3428 struct sk_if_softc *sc_if0, *sc_if1; 3429 struct ifnet *ifp0 = NULL, *ifp1 = NULL; 3430 u_int32_t status; 3431 3432 SK_LOCK(sc); 3433 3434 status = CSR_READ_4(sc, SK_ISSR); 3435 if (status == 0 || status == 0xffffffff || sc->sk_suspended) 3436 goto done_locked; 3437 3438 sc_if0 = sc->sk_if[SK_PORT_A]; 3439 sc_if1 = sc->sk_if[SK_PORT_B]; 3440 3441 if (sc_if0 != NULL) 3442 ifp0 = sc_if0->sk_ifp; 3443 if (sc_if1 != NULL) 3444 ifp1 = sc_if1->sk_ifp; 3445 3446 status &= sc->sk_intrmask; 3447 if ((status & sc->sk_intrmask) != 0) { 3448 /* Handle receive interrupts first. */ 3449 if (status & SK_ISR_RX1_EOF) { 3450 if (ifp0->if_mtu > SK_MAX_FRAMELEN) 3451 sk_jumbo_rxeof(sc_if0); 3452 else 3453 sk_rxeof(sc_if0); 3454 CSR_WRITE_4(sc, SK_BMU_RX_CSR0, 3455 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); 3456 } 3457 if (status & SK_ISR_RX2_EOF) { 3458 if (ifp1->if_mtu > SK_MAX_FRAMELEN) 3459 sk_jumbo_rxeof(sc_if1); 3460 else 3461 sk_rxeof(sc_if1); 3462 CSR_WRITE_4(sc, SK_BMU_RX_CSR1, 3463 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); 3464 } 3465 3466 /* Then transmit interrupts. */ 3467 if (status & SK_ISR_TX1_S_EOF) { 3468 sk_txeof(sc_if0); 3469 CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, SK_TXBMU_CLR_IRQ_EOF); 3470 } 3471 if (status & SK_ISR_TX2_S_EOF) { 3472 sk_txeof(sc_if1); 3473 CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, SK_TXBMU_CLR_IRQ_EOF); 3474 } 3475 3476 /* Then MAC interrupts. */ 3477 if (status & SK_ISR_MAC1 && 3478 ifp0->if_drv_flags & IFF_DRV_RUNNING) { 3479 if (sc->sk_type == SK_GENESIS) 3480 sk_intr_xmac(sc_if0); 3481 else 3482 sk_intr_yukon(sc_if0); 3483 } 3484 3485 if (status & SK_ISR_MAC2 && 3486 ifp1->if_drv_flags & IFF_DRV_RUNNING) { 3487 if (sc->sk_type == SK_GENESIS) 3488 sk_intr_xmac(sc_if1); 3489 else 3490 sk_intr_yukon(sc_if1); 3491 } 3492 3493 if (status & SK_ISR_EXTERNAL_REG) { 3494 if (ifp0 != NULL && 3495 sc_if0->sk_phytype == SK_PHYTYPE_BCOM) 3496 sk_intr_bcom(sc_if0); 3497 if (ifp1 != NULL && 3498 sc_if1->sk_phytype == SK_PHYTYPE_BCOM) 3499 sk_intr_bcom(sc_if1); 3500 } 3501 } 3502 3503 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 3504 3505 if (ifp0 != NULL && !IFQ_DRV_IS_EMPTY(&ifp0->if_snd)) 3506 sk_start_locked(ifp0); 3507 if (ifp1 != NULL && !IFQ_DRV_IS_EMPTY(&ifp1->if_snd)) 3508 sk_start_locked(ifp1); 3509 3510 done_locked: 3511 SK_UNLOCK(sc); 3512 } 3513 3514 static void 3515 sk_init_xmac(sc_if) 3516 struct sk_if_softc *sc_if; 3517 { 3518 struct sk_softc *sc; 3519 struct ifnet *ifp; 3520 u_int16_t eaddr[(ETHER_ADDR_LEN+1)/2]; 3521 struct sk_bcom_hack bhack[] = { 3522 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 }, 3523 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 }, 3524 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, 3525 { 0, 0 } }; 3526 3527 SK_IF_LOCK_ASSERT(sc_if); 3528 3529 sc = sc_if->sk_softc; 3530 ifp = sc_if->sk_ifp; 3531 3532 /* Unreset the XMAC. */ 3533 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET); 3534 DELAY(1000); 3535 3536 /* Reset the XMAC's internal state. */ 3537 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); 3538 3539 /* Save the XMAC II revision */ 3540 sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID)); 3541 3542 /* 3543 * Perform additional initialization for external PHYs, 3544 * namely for the 1000baseTX cards that use the XMAC's 3545 * GMII mode. 3546 */ 3547 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 3548 int i = 0; 3549 u_int32_t val; 3550 3551 /* Take PHY out of reset. */ 3552 val = sk_win_read_4(sc, SK_GPIO); 3553 if (sc_if->sk_port == SK_PORT_A) 3554 val |= SK_GPIO_DIR0|SK_GPIO_DAT0; 3555 else 3556 val |= SK_GPIO_DIR2|SK_GPIO_DAT2; 3557 sk_win_write_4(sc, SK_GPIO, val); 3558 3559 /* Enable GMII mode on the XMAC. */ 3560 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE); 3561 3562 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 3563 BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET); 3564 DELAY(10000); 3565 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 3566 BRGPHY_MII_IMR, 0xFFF0); 3567 3568 /* 3569 * Early versions of the BCM5400 apparently have 3570 * a bug that requires them to have their reserved 3571 * registers initialized to some magic values. I don't 3572 * know what the numbers do, I'm just the messenger. 3573 */ 3574 if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03) 3575 == 0x6041) { 3576 while(bhack[i].reg) { 3577 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 3578 bhack[i].reg, bhack[i].val); 3579 i++; 3580 } 3581 } 3582 } 3583 3584 /* Set station address */ 3585 bcopy(IF_LLADDR(sc_if->sk_ifp), eaddr, ETHER_ADDR_LEN); 3586 SK_XM_WRITE_2(sc_if, XM_PAR0, eaddr[0]); 3587 SK_XM_WRITE_2(sc_if, XM_PAR1, eaddr[1]); 3588 SK_XM_WRITE_2(sc_if, XM_PAR2, eaddr[2]); 3589 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION); 3590 3591 if (ifp->if_flags & IFF_BROADCAST) { 3592 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); 3593 } else { 3594 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); 3595 } 3596 3597 /* We don't need the FCS appended to the packet. */ 3598 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS); 3599 3600 /* We want short frames padded to 60 bytes. */ 3601 SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD); 3602 3603 /* 3604 * Enable the reception of all error frames. This is is 3605 * a necessary evil due to the design of the XMAC. The 3606 * XMAC's receive FIFO is only 8K in size, however jumbo 3607 * frames can be up to 9000 bytes in length. When bad 3608 * frame filtering is enabled, the XMAC's RX FIFO operates 3609 * in 'store and forward' mode. For this to work, the 3610 * entire frame has to fit into the FIFO, but that means 3611 * that jumbo frames larger than 8192 bytes will be 3612 * truncated. Disabling all bad frame filtering causes 3613 * the RX FIFO to operate in streaming mode, in which 3614 * case the XMAC will start transfering frames out of the 3615 * RX FIFO as soon as the FIFO threshold is reached. 3616 */ 3617 if (ifp->if_mtu > SK_MAX_FRAMELEN) { 3618 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES| 3619 XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS| 3620 XM_MODE_RX_INRANGELEN); 3621 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); 3622 } else 3623 SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); 3624 3625 /* 3626 * Bump up the transmit threshold. This helps hold off transmit 3627 * underruns when we're blasting traffic from both ports at once. 3628 */ 3629 SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH); 3630 3631 /* Set promiscuous mode */ 3632 sk_setpromisc(sc_if); 3633 3634 /* Set multicast filter */ 3635 sk_setmulti(sc_if); 3636 3637 /* Clear and enable interrupts */ 3638 SK_XM_READ_2(sc_if, XM_ISR); 3639 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) 3640 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS); 3641 else 3642 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); 3643 3644 /* Configure MAC arbiter */ 3645 switch(sc_if->sk_xmac_rev) { 3646 case XM_XMAC_REV_B2: 3647 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2); 3648 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2); 3649 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2); 3650 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2); 3651 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2); 3652 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2); 3653 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2); 3654 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2); 3655 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); 3656 break; 3657 case XM_XMAC_REV_C1: 3658 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1); 3659 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1); 3660 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1); 3661 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1); 3662 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1); 3663 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1); 3664 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1); 3665 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1); 3666 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); 3667 break; 3668 default: 3669 break; 3670 } 3671 sk_win_write_2(sc, SK_MACARB_CTL, 3672 SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF); 3673 3674 sc_if->sk_link = 1; 3675 3676 return; 3677 } 3678 3679 static void 3680 sk_init_yukon(sc_if) 3681 struct sk_if_softc *sc_if; 3682 { 3683 u_int32_t phy, v; 3684 u_int16_t reg; 3685 struct sk_softc *sc; 3686 struct ifnet *ifp; 3687 int i; 3688 3689 SK_IF_LOCK_ASSERT(sc_if); 3690 3691 sc = sc_if->sk_softc; 3692 ifp = sc_if->sk_ifp; 3693 3694 if (sc->sk_type == SK_YUKON_LITE && 3695 sc->sk_rev >= SK_YUKON_LITE_REV_A3) { 3696 /* 3697 * Workaround code for COMA mode, set PHY reset. 3698 * Otherwise it will not correctly take chip out of 3699 * powerdown (coma) 3700 */ 3701 v = sk_win_read_4(sc, SK_GPIO); 3702 v |= SK_GPIO_DIR9 | SK_GPIO_DAT9; 3703 sk_win_write_4(sc, SK_GPIO, v); 3704 } 3705 3706 /* GMAC and GPHY Reset */ 3707 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET); 3708 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); 3709 DELAY(1000); 3710 3711 if (sc->sk_type == SK_YUKON_LITE && 3712 sc->sk_rev >= SK_YUKON_LITE_REV_A3) { 3713 /* 3714 * Workaround code for COMA mode, clear PHY reset 3715 */ 3716 v = sk_win_read_4(sc, SK_GPIO); 3717 v |= SK_GPIO_DIR9; 3718 v &= ~SK_GPIO_DAT9; 3719 sk_win_write_4(sc, SK_GPIO, v); 3720 } 3721 3722 phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP | 3723 SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE; 3724 3725 switch(sc_if->sk_softc->sk_pmd) { 3726 case IFM_1000_SX: 3727 case IFM_1000_LX: 3728 phy |= SK_GPHY_FIBER; 3729 break; 3730 3731 case IFM_1000_CX: 3732 case IFM_1000_T: 3733 phy |= SK_GPHY_COPPER; 3734 break; 3735 } 3736 3737 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET); 3738 DELAY(1000); 3739 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR); 3740 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF | 3741 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR); 3742 3743 /* unused read of the interrupt source register */ 3744 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); 3745 3746 reg = SK_YU_READ_2(sc_if, YUKON_PAR); 3747 3748 /* MIB Counter Clear Mode set */ 3749 reg |= YU_PAR_MIB_CLR; 3750 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 3751 3752 /* MIB Counter Clear Mode clear */ 3753 reg &= ~YU_PAR_MIB_CLR; 3754 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 3755 3756 /* receive control reg */ 3757 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR); 3758 3759 /* transmit parameter register */ 3760 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) | 3761 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) ); 3762 3763 /* serial mode register */ 3764 reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e); 3765 if (ifp->if_mtu > SK_MAX_FRAMELEN) 3766 reg |= YU_SMR_MFL_JUMBO; 3767 SK_YU_WRITE_2(sc_if, YUKON_SMR, reg); 3768 3769 /* Setup Yukon's address */ 3770 for (i = 0; i < 3; i++) { 3771 /* Write Source Address 1 (unicast filter) */ 3772 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4, 3773 IF_LLADDR(sc_if->sk_ifp)[i * 2] | 3774 IF_LLADDR(sc_if->sk_ifp)[i * 2 + 1] << 8); 3775 } 3776 3777 for (i = 0; i < 3; i++) { 3778 reg = sk_win_read_2(sc_if->sk_softc, 3779 SK_MAC1_0 + i * 2 + sc_if->sk_port * 8); 3780 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg); 3781 } 3782 3783 /* Set promiscuous mode */ 3784 sk_setpromisc(sc_if); 3785 3786 /* Set multicast filter */ 3787 sk_setmulti(sc_if); 3788 3789 /* enable interrupt mask for counter overflows */ 3790 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0); 3791 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0); 3792 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0); 3793 3794 /* Configure RX MAC FIFO Flush Mask */ 3795 v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR | 3796 YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT | 3797 YU_RXSTAT_JABBER; 3798 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v); 3799 3800 /* Disable RX MAC FIFO Flush for YUKON-Lite Rev. A0 only */ 3801 if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev == SK_YUKON_LITE_REV_A0) 3802 v = SK_TFCTL_OPERATION_ON; 3803 else 3804 v = SK_TFCTL_OPERATION_ON | SK_RFCTL_FIFO_FLUSH_ON; 3805 /* Configure RX MAC FIFO */ 3806 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR); 3807 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, v); 3808 3809 /* Increase flush threshould to 64 bytes */ 3810 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD, 3811 SK_RFCTL_FIFO_THRESHOLD + 1); 3812 3813 /* Configure TX MAC FIFO */ 3814 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR); 3815 SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON); 3816 } 3817 3818 /* 3819 * Note that to properly initialize any part of the GEnesis chip, 3820 * you first have to take it out of reset mode. 3821 */ 3822 static void 3823 sk_init(xsc) 3824 void *xsc; 3825 { 3826 struct sk_if_softc *sc_if = xsc; 3827 3828 SK_IF_LOCK(sc_if); 3829 sk_init_locked(sc_if); 3830 SK_IF_UNLOCK(sc_if); 3831 3832 return; 3833 } 3834 3835 static void 3836 sk_init_locked(sc_if) 3837 struct sk_if_softc *sc_if; 3838 { 3839 struct sk_softc *sc; 3840 struct ifnet *ifp; 3841 struct mii_data *mii; 3842 u_int16_t reg; 3843 u_int32_t imr; 3844 int error; 3845 3846 SK_IF_LOCK_ASSERT(sc_if); 3847 3848 ifp = sc_if->sk_ifp; 3849 sc = sc_if->sk_softc; 3850 mii = device_get_softc(sc_if->sk_miibus); 3851 3852 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3853 return; 3854 3855 /* Cancel pending I/O and free all RX/TX buffers. */ 3856 sk_stop(sc_if); 3857 3858 if (sc->sk_type == SK_GENESIS) { 3859 /* Configure LINK_SYNC LED */ 3860 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON); 3861 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, 3862 SK_LINKLED_LINKSYNC_ON); 3863 3864 /* Configure RX LED */ 3865 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, 3866 SK_RXLEDCTL_COUNTER_START); 3867 3868 /* Configure TX LED */ 3869 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, 3870 SK_TXLEDCTL_COUNTER_START); 3871 } 3872 3873 /* 3874 * Configure descriptor poll timer 3875 * 3876 * SK-NET GENESIS data sheet says that possibility of losing Start 3877 * transmit command due to CPU/cache related interim storage problems 3878 * under certain conditions. The document recommends a polling 3879 * mechanism to send a Start transmit command to initiate transfer 3880 * of ready descriptors regulary. To cope with this issue sk(4) now 3881 * enables descriptor poll timer to initiate descriptor processing 3882 * periodically as defined by SK_DPT_TIMER_MAX. However sk(4) still 3883 * issue SK_TXBMU_TX_START to Tx BMU to get fast execution of Tx 3884 * command instead of waiting for next descriptor polling time. 3885 * The same rule may apply to Rx side too but it seems that is not 3886 * needed at the moment. 3887 * Since sk(4) uses descriptor polling as a last resort there is no 3888 * need to set smaller polling time than maximum allowable one. 3889 */ 3890 SK_IF_WRITE_4(sc_if, 0, SK_DPT_INIT, SK_DPT_TIMER_MAX); 3891 3892 /* Configure I2C registers */ 3893 3894 /* Configure XMAC(s) */ 3895 switch (sc->sk_type) { 3896 case SK_GENESIS: 3897 sk_init_xmac(sc_if); 3898 break; 3899 case SK_YUKON: 3900 case SK_YUKON_LITE: 3901 case SK_YUKON_LP: 3902 case SK_YUKON_EC: 3903 sk_init_yukon(sc_if); 3904 break; 3905 } 3906 mii_mediachg(mii); 3907 3908 if (sc->sk_type == SK_GENESIS) { 3909 /* Configure MAC FIFOs */ 3910 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET); 3911 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END); 3912 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON); 3913 3914 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET); 3915 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END); 3916 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON); 3917 } 3918 3919 /* Configure transmit arbiter(s) */ 3920 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, 3921 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON); 3922 3923 /* Configure RAMbuffers */ 3924 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET); 3925 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart); 3926 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart); 3927 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart); 3928 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend); 3929 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON); 3930 3931 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET); 3932 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON); 3933 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart); 3934 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart); 3935 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart); 3936 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend); 3937 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON); 3938 3939 /* Configure BMUs */ 3940 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE); 3941 if (ifp->if_mtu > SK_MAX_FRAMELEN) { 3942 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO, 3943 SK_ADDR_LO(SK_JUMBO_RX_RING_ADDR(sc_if, 0))); 3944 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 3945 SK_ADDR_HI(SK_JUMBO_RX_RING_ADDR(sc_if, 0))); 3946 } else { 3947 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO, 3948 SK_ADDR_LO(SK_RX_RING_ADDR(sc_if, 0))); 3949 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 3950 SK_ADDR_HI(SK_RX_RING_ADDR(sc_if, 0))); 3951 } 3952 3953 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE); 3954 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO, 3955 SK_ADDR_LO(SK_TX_RING_ADDR(sc_if, 0))); 3956 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 3957 SK_ADDR_HI(SK_TX_RING_ADDR(sc_if, 0))); 3958 3959 /* Init descriptors */ 3960 if (ifp->if_mtu > SK_MAX_FRAMELEN) 3961 error = sk_init_jumbo_rx_ring(sc_if); 3962 else 3963 error = sk_init_rx_ring(sc_if); 3964 if (error != 0) { 3965 device_printf(sc_if->sk_if_dev, 3966 "initialization failed: no memory for rx buffers\n"); 3967 sk_stop(sc_if); 3968 return; 3969 } 3970 sk_init_tx_ring(sc_if); 3971 3972 /* Set interrupt moderation if changed via sysctl. */ 3973 imr = sk_win_read_4(sc, SK_IMTIMERINIT); 3974 if (imr != SK_IM_USECS(sc->sk_int_mod, sc->sk_int_ticks)) { 3975 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod, 3976 sc->sk_int_ticks)); 3977 if (bootverbose) 3978 device_printf(sc_if->sk_if_dev, 3979 "interrupt moderation is %d us.\n", 3980 sc->sk_int_mod); 3981 } 3982 3983 /* Configure interrupt handling */ 3984 CSR_READ_4(sc, SK_ISSR); 3985 if (sc_if->sk_port == SK_PORT_A) 3986 sc->sk_intrmask |= SK_INTRS1; 3987 else 3988 sc->sk_intrmask |= SK_INTRS2; 3989 3990 sc->sk_intrmask |= SK_ISR_EXTERNAL_REG; 3991 3992 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 3993 3994 /* Start BMUs. */ 3995 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START); 3996 3997 switch(sc->sk_type) { 3998 case SK_GENESIS: 3999 /* Enable XMACs TX and RX state machines */ 4000 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE); 4001 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 4002 break; 4003 case SK_YUKON: 4004 case SK_YUKON_LITE: 4005 case SK_YUKON_LP: 4006 case SK_YUKON_EC: 4007 reg = SK_YU_READ_2(sc_if, YUKON_GPCR); 4008 reg |= YU_GPCR_TXEN | YU_GPCR_RXEN; 4009 #if 0 4010 /* XXX disable 100Mbps and full duplex mode? */ 4011 reg &= ~(YU_GPCR_SPEED | YU_GPCR_DPLX_DIS); 4012 #endif 4013 SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg); 4014 } 4015 4016 /* Activate descriptor polling timer */ 4017 SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_START); 4018 /* start transfer of Tx descriptors */ 4019 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); 4020 4021 ifp->if_drv_flags |= IFF_DRV_RUNNING; 4022 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4023 4024 switch (sc->sk_type) { 4025 case SK_YUKON: 4026 case SK_YUKON_LITE: 4027 case SK_YUKON_LP: 4028 case SK_YUKON_EC: 4029 callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if); 4030 break; 4031 } 4032 4033 return; 4034 } 4035 4036 static void 4037 sk_stop(sc_if) 4038 struct sk_if_softc *sc_if; 4039 { 4040 int i; 4041 struct sk_softc *sc; 4042 struct sk_txdesc *txd; 4043 struct sk_rxdesc *rxd; 4044 struct sk_rxdesc *jrxd; 4045 struct ifnet *ifp; 4046 u_int32_t val; 4047 4048 SK_IF_LOCK_ASSERT(sc_if); 4049 sc = sc_if->sk_softc; 4050 ifp = sc_if->sk_ifp; 4051 4052 callout_stop(&sc_if->sk_tick_ch); 4053 4054 /* stop Tx descriptor polling timer */ 4055 SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP); 4056 /* stop transfer of Tx descriptors */ 4057 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_STOP); 4058 for (i = 0; i < SK_TIMEOUT; i++) { 4059 val = CSR_READ_4(sc, sc_if->sk_tx_bmu); 4060 if ((val & SK_TXBMU_TX_STOP) == 0) 4061 break; 4062 DELAY(1); 4063 } 4064 if (i == SK_TIMEOUT) 4065 device_printf(sc_if->sk_if_dev, 4066 "can not stop transfer of Tx descriptor\n"); 4067 /* stop transfer of Rx descriptors */ 4068 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_STOP); 4069 for (i = 0; i < SK_TIMEOUT; i++) { 4070 val = SK_IF_READ_4(sc_if, 0, SK_RXQ1_BMU_CSR); 4071 if ((val & SK_RXBMU_RX_STOP) == 0) 4072 break; 4073 DELAY(1); 4074 } 4075 if (i == SK_TIMEOUT) 4076 device_printf(sc_if->sk_if_dev, 4077 "can not stop transfer of Rx descriptor\n"); 4078 4079 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 4080 /* Put PHY back into reset. */ 4081 val = sk_win_read_4(sc, SK_GPIO); 4082 if (sc_if->sk_port == SK_PORT_A) { 4083 val |= SK_GPIO_DIR0; 4084 val &= ~SK_GPIO_DAT0; 4085 } else { 4086 val |= SK_GPIO_DIR2; 4087 val &= ~SK_GPIO_DAT2; 4088 } 4089 sk_win_write_4(sc, SK_GPIO, val); 4090 } 4091 4092 /* Turn off various components of this interface. */ 4093 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); 4094 switch (sc->sk_type) { 4095 case SK_GENESIS: 4096 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET); 4097 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET); 4098 break; 4099 case SK_YUKON: 4100 case SK_YUKON_LITE: 4101 case SK_YUKON_LP: 4102 case SK_YUKON_EC: 4103 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET); 4104 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET); 4105 break; 4106 } 4107 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE); 4108 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 4109 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE); 4110 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 4111 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF); 4112 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 4113 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 4114 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF); 4115 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF); 4116 4117 /* Disable interrupts */ 4118 if (sc_if->sk_port == SK_PORT_A) 4119 sc->sk_intrmask &= ~SK_INTRS1; 4120 else 4121 sc->sk_intrmask &= ~SK_INTRS2; 4122 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 4123 4124 SK_XM_READ_2(sc_if, XM_ISR); 4125 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); 4126 4127 /* Free RX and TX mbufs still in the queues. */ 4128 for (i = 0; i < SK_RX_RING_CNT; i++) { 4129 rxd = &sc_if->sk_cdata.sk_rxdesc[i]; 4130 if (rxd->rx_m != NULL) { 4131 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, 4132 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 4133 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag, 4134 rxd->rx_dmamap); 4135 m_freem(rxd->rx_m); 4136 rxd->rx_m = NULL; 4137 } 4138 } 4139 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) { 4140 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i]; 4141 if (jrxd->rx_m != NULL) { 4142 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, 4143 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 4144 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag, 4145 jrxd->rx_dmamap); 4146 m_freem(jrxd->rx_m); 4147 jrxd->rx_m = NULL; 4148 } 4149 } 4150 for (i = 0; i < SK_TX_RING_CNT; i++) { 4151 txd = &sc_if->sk_cdata.sk_txdesc[i]; 4152 if (txd->tx_m != NULL) { 4153 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, 4154 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 4155 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, 4156 txd->tx_dmamap); 4157 m_freem(txd->tx_m); 4158 txd->tx_m = NULL; 4159 } 4160 } 4161 4162 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE); 4163 4164 return; 4165 } 4166 4167 static int 4168 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 4169 { 4170 int error, value; 4171 4172 if (!arg1) 4173 return (EINVAL); 4174 value = *(int *)arg1; 4175 error = sysctl_handle_int(oidp, &value, 0, req); 4176 if (error || !req->newptr) 4177 return (error); 4178 if (value < low || value > high) 4179 return (EINVAL); 4180 *(int *)arg1 = value; 4181 return (0); 4182 } 4183 4184 static int 4185 sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS) 4186 { 4187 return (sysctl_int_range(oidp, arg1, arg2, req, SK_IM_MIN, SK_IM_MAX)); 4188 } 4189