1 /* $OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998, 1999, 2000 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 /*- 35 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 36 * 37 * Permission to use, copy, modify, and distribute this software for any 38 * purpose with or without fee is hereby granted, provided that the above 39 * copyright notice and this permission notice appear in all copies. 40 * 41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 48 */ 49 50 #include <sys/cdefs.h> 51 __FBSDID("$FreeBSD$"); 52 53 /* 54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports 55 * the SK-984x series adapters, both single port and dual port. 56 * References: 57 * The XaQti XMAC II datasheet, 58 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 59 * The SysKonnect GEnesis manual, http://www.syskonnect.com 60 * 61 * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the 62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a 63 * convenience to others until Vitesse corrects this problem: 64 * 65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 66 * 67 * Written by Bill Paul <wpaul@ee.columbia.edu> 68 * Department of Electrical Engineering 69 * Columbia University, New York City 70 */ 71 /* 72 * The SysKonnect gigabit ethernet adapters consist of two main 73 * components: the SysKonnect GEnesis controller chip and the XaQti Corp. 74 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC 75 * components and a PHY while the GEnesis controller provides a PCI 76 * interface with DMA support. Each card may have between 512K and 77 * 2MB of SRAM on board depending on the configuration. 78 * 79 * The SysKonnect GEnesis controller can have either one or two XMAC 80 * chips connected to it, allowing single or dual port NIC configurations. 81 * SysKonnect has the distinction of being the only vendor on the market 82 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs, 83 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the 84 * XMAC registers. This driver takes advantage of these features to allow 85 * both XMACs to operate as independent interfaces. 86 */ 87 88 #include <sys/param.h> 89 #include <sys/systm.h> 90 #include <sys/bus.h> 91 #include <sys/endian.h> 92 #include <sys/mbuf.h> 93 #include <sys/malloc.h> 94 #include <sys/kernel.h> 95 #include <sys/module.h> 96 #include <sys/socket.h> 97 #include <sys/sockio.h> 98 #include <sys/queue.h> 99 #include <sys/sysctl.h> 100 101 #include <net/bpf.h> 102 #include <net/ethernet.h> 103 #include <net/if.h> 104 #include <net/if_arp.h> 105 #include <net/if_dl.h> 106 #include <net/if_media.h> 107 #include <net/if_types.h> 108 #include <net/if_vlan_var.h> 109 110 #include <netinet/in.h> 111 #include <netinet/in_systm.h> 112 #include <netinet/ip.h> 113 114 #include <machine/bus.h> 115 #include <machine/in_cksum.h> 116 #include <machine/resource.h> 117 #include <sys/rman.h> 118 119 #include <dev/mii/mii.h> 120 #include <dev/mii/miivar.h> 121 #include <dev/mii/brgphyreg.h> 122 123 #include <dev/pci/pcireg.h> 124 #include <dev/pci/pcivar.h> 125 126 #if 0 127 #define SK_USEIOSPACE 128 #endif 129 130 #include <dev/sk/if_skreg.h> 131 #include <dev/sk/xmaciireg.h> 132 #include <dev/sk/yukonreg.h> 133 134 MODULE_DEPEND(sk, pci, 1, 1, 1); 135 MODULE_DEPEND(sk, ether, 1, 1, 1); 136 MODULE_DEPEND(sk, miibus, 1, 1, 1); 137 138 /* "device miibus" required. See GENERIC if you get errors here. */ 139 #include "miibus_if.h" 140 141 #ifndef lint 142 static const char rcsid[] = 143 "$FreeBSD$"; 144 #endif 145 146 static struct sk_type sk_devs[] = { 147 { 148 VENDORID_SK, 149 DEVICEID_SK_V1, 150 "SysKonnect Gigabit Ethernet (V1.0)" 151 }, 152 { 153 VENDORID_SK, 154 DEVICEID_SK_V2, 155 "SysKonnect Gigabit Ethernet (V2.0)" 156 }, 157 { 158 VENDORID_MARVELL, 159 DEVICEID_SK_V2, 160 "Marvell Gigabit Ethernet" 161 }, 162 #ifdef not_yet 163 { 164 VENDORID_MARVELL, 165 DEVICEID_MRVL_4360, 166 "Marvell 88E8052 Gigabit Ethernet Controller" 167 }, 168 { 169 VENDORID_MARVELL, 170 DEVICEID_MRVL_4361, 171 "Marvell 88E8050 Gigabit Ethernet Controller" 172 }, 173 { 174 VENDORID_MARVELL, 175 DEVICEID_MRVL_4362, 176 "Marvell 88E8053 Gigabit Ethernet Controller" 177 }, 178 #endif 179 { 180 VENDORID_MARVELL, 181 DEVICEID_BELKIN_5005, 182 "Belkin F5D5005 Gigabit Ethernet" 183 }, 184 { 185 VENDORID_3COM, 186 DEVICEID_3COM_3C940, 187 "3Com 3C940 Gigabit Ethernet" 188 }, 189 { 190 VENDORID_LINKSYS, 191 DEVICEID_LINKSYS_EG1032, 192 "Linksys EG1032 Gigabit Ethernet" 193 }, 194 { 195 VENDORID_DLINK, 196 DEVICEID_DLINK_DGE530T_A1, 197 "D-Link DGE-530T Gigabit Ethernet" 198 }, 199 { 200 VENDORID_DLINK, 201 DEVICEID_DLINK_DGE530T_B1, 202 "D-Link DGE-530T Gigabit Ethernet" 203 }, 204 { 0, 0, NULL } 205 }; 206 207 static int skc_probe(device_t); 208 static int skc_attach(device_t); 209 static int skc_detach(device_t); 210 static void skc_shutdown(device_t); 211 static int skc_suspend(device_t); 212 static int skc_resume(device_t); 213 static int sk_detach(device_t); 214 static int sk_probe(device_t); 215 static int sk_attach(device_t); 216 static void sk_tick(void *); 217 static void sk_yukon_tick(void *); 218 static void sk_intr(void *); 219 static void sk_intr_xmac(struct sk_if_softc *); 220 static void sk_intr_bcom(struct sk_if_softc *); 221 static void sk_intr_yukon(struct sk_if_softc *); 222 static __inline void sk_rxcksum(struct ifnet *, struct mbuf *, u_int32_t); 223 static __inline int sk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t); 224 static void sk_rxeof(struct sk_if_softc *); 225 static void sk_jumbo_rxeof(struct sk_if_softc *); 226 static void sk_txeof(struct sk_if_softc *); 227 static void sk_txcksum(struct ifnet *, struct mbuf *, struct sk_tx_desc *); 228 static int sk_encap(struct sk_if_softc *, struct mbuf **); 229 static void sk_start(struct ifnet *); 230 static void sk_start_locked(struct ifnet *); 231 static int sk_ioctl(struct ifnet *, u_long, caddr_t); 232 static void sk_init(void *); 233 static void sk_init_locked(struct sk_if_softc *); 234 static void sk_init_xmac(struct sk_if_softc *); 235 static void sk_init_yukon(struct sk_if_softc *); 236 static void sk_stop(struct sk_if_softc *); 237 static void sk_watchdog(struct ifnet *); 238 static int sk_ifmedia_upd(struct ifnet *); 239 static void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *); 240 static void sk_reset(struct sk_softc *); 241 static __inline void sk_discard_rxbuf(struct sk_if_softc *, int); 242 static __inline void sk_discard_jumbo_rxbuf(struct sk_if_softc *, int); 243 static int sk_newbuf(struct sk_if_softc *, int); 244 static int sk_jumbo_newbuf(struct sk_if_softc *, int); 245 static void sk_dmamap_cb(void *, bus_dma_segment_t *, int, int); 246 static int sk_dma_alloc(struct sk_if_softc *); 247 static void sk_dma_free(struct sk_if_softc *); 248 static void *sk_jalloc(struct sk_if_softc *); 249 static void sk_jfree(void *, void *); 250 static int sk_init_rx_ring(struct sk_if_softc *); 251 static int sk_init_jumbo_rx_ring(struct sk_if_softc *); 252 static void sk_init_tx_ring(struct sk_if_softc *); 253 static u_int32_t sk_win_read_4(struct sk_softc *, int); 254 static u_int16_t sk_win_read_2(struct sk_softc *, int); 255 static u_int8_t sk_win_read_1(struct sk_softc *, int); 256 static void sk_win_write_4(struct sk_softc *, int, u_int32_t); 257 static void sk_win_write_2(struct sk_softc *, int, u_int32_t); 258 static void sk_win_write_1(struct sk_softc *, int, u_int32_t); 259 static u_int8_t sk_vpd_readbyte(struct sk_softc *, int); 260 static void sk_vpd_read_res(struct sk_softc *, struct vpd_res *, int); 261 static void sk_vpd_read(struct sk_softc *); 262 263 static int sk_miibus_readreg(device_t, int, int); 264 static int sk_miibus_writereg(device_t, int, int, int); 265 static void sk_miibus_statchg(device_t); 266 267 static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int); 268 static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int, 269 int); 270 static void sk_xmac_miibus_statchg(struct sk_if_softc *); 271 272 static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int); 273 static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int, 274 int); 275 static void sk_marv_miibus_statchg(struct sk_if_softc *); 276 277 static uint32_t sk_xmchash(const uint8_t *); 278 static uint32_t sk_gmchash(const uint8_t *); 279 static void sk_setfilt(struct sk_if_softc *, u_int16_t *, int); 280 static void sk_setmulti(struct sk_if_softc *); 281 static void sk_setpromisc(struct sk_if_softc *); 282 283 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high); 284 static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS); 285 286 /* 287 * It seems that SK-NET GENESIS supports very simple checksum offload 288 * capability for Tx and I believe it can generate 0 checksum value for 289 * UDP packets in Tx as the hardware can't differenciate UDP packets from 290 * TCP packets. 0 chcecksum value for UDP packet is an invalid one as it 291 * means sender didn't perforam checksum computation. For the safety I 292 * disabled UDP checksum offload capability at the moment. Alternatively 293 * we can intrduce a LINK0/LINK1 flag as hme(4) did in its Tx checksum 294 * offload routine. 295 */ 296 #define SK_CSUM_FEATURES (CSUM_TCP) 297 298 /* 299 * Note that we have newbus methods for both the GEnesis controller 300 * itself and the XMAC(s). The XMACs are children of the GEnesis, and 301 * the miibus code is a child of the XMACs. We need to do it this way 302 * so that the miibus drivers can access the PHY registers on the 303 * right PHY. It's not quite what I had in mind, but it's the only 304 * design that achieves the desired effect. 305 */ 306 static device_method_t skc_methods[] = { 307 /* Device interface */ 308 DEVMETHOD(device_probe, skc_probe), 309 DEVMETHOD(device_attach, skc_attach), 310 DEVMETHOD(device_detach, skc_detach), 311 DEVMETHOD(device_suspend, skc_suspend), 312 DEVMETHOD(device_resume, skc_resume), 313 DEVMETHOD(device_shutdown, skc_shutdown), 314 315 /* bus interface */ 316 DEVMETHOD(bus_print_child, bus_generic_print_child), 317 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 318 319 { 0, 0 } 320 }; 321 322 static driver_t skc_driver = { 323 "skc", 324 skc_methods, 325 sizeof(struct sk_softc) 326 }; 327 328 static devclass_t skc_devclass; 329 330 static device_method_t sk_methods[] = { 331 /* Device interface */ 332 DEVMETHOD(device_probe, sk_probe), 333 DEVMETHOD(device_attach, sk_attach), 334 DEVMETHOD(device_detach, sk_detach), 335 DEVMETHOD(device_shutdown, bus_generic_shutdown), 336 337 /* bus interface */ 338 DEVMETHOD(bus_print_child, bus_generic_print_child), 339 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 340 341 /* MII interface */ 342 DEVMETHOD(miibus_readreg, sk_miibus_readreg), 343 DEVMETHOD(miibus_writereg, sk_miibus_writereg), 344 DEVMETHOD(miibus_statchg, sk_miibus_statchg), 345 346 { 0, 0 } 347 }; 348 349 static driver_t sk_driver = { 350 "sk", 351 sk_methods, 352 sizeof(struct sk_if_softc) 353 }; 354 355 static devclass_t sk_devclass; 356 357 DRIVER_MODULE(skc, pci, skc_driver, skc_devclass, 0, 0); 358 DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0); 359 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0); 360 361 static struct resource_spec sk_res_spec_io[] = { 362 { SYS_RES_IOPORT, PCIR_BAR(1), RF_ACTIVE }, 363 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 364 { -1, 0, 0 } 365 }; 366 367 static struct resource_spec sk_res_spec_mem[] = { 368 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, 369 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 370 { -1, 0, 0 } 371 }; 372 373 #define SK_SETBIT(sc, reg, x) \ 374 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x) 375 376 #define SK_CLRBIT(sc, reg, x) \ 377 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x) 378 379 #define SK_WIN_SETBIT_4(sc, reg, x) \ 380 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x) 381 382 #define SK_WIN_CLRBIT_4(sc, reg, x) \ 383 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x) 384 385 #define SK_WIN_SETBIT_2(sc, reg, x) \ 386 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x) 387 388 #define SK_WIN_CLRBIT_2(sc, reg, x) \ 389 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x) 390 391 static u_int32_t 392 sk_win_read_4(sc, reg) 393 struct sk_softc *sc; 394 int reg; 395 { 396 #ifdef SK_USEIOSPACE 397 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 398 return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg))); 399 #else 400 return(CSR_READ_4(sc, reg)); 401 #endif 402 } 403 404 static u_int16_t 405 sk_win_read_2(sc, reg) 406 struct sk_softc *sc; 407 int reg; 408 { 409 #ifdef SK_USEIOSPACE 410 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 411 return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg))); 412 #else 413 return(CSR_READ_2(sc, reg)); 414 #endif 415 } 416 417 static u_int8_t 418 sk_win_read_1(sc, reg) 419 struct sk_softc *sc; 420 int reg; 421 { 422 #ifdef SK_USEIOSPACE 423 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 424 return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg))); 425 #else 426 return(CSR_READ_1(sc, reg)); 427 #endif 428 } 429 430 static void 431 sk_win_write_4(sc, reg, val) 432 struct sk_softc *sc; 433 int reg; 434 u_int32_t val; 435 { 436 #ifdef SK_USEIOSPACE 437 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 438 CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val); 439 #else 440 CSR_WRITE_4(sc, reg, val); 441 #endif 442 return; 443 } 444 445 static void 446 sk_win_write_2(sc, reg, val) 447 struct sk_softc *sc; 448 int reg; 449 u_int32_t val; 450 { 451 #ifdef SK_USEIOSPACE 452 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 453 CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val); 454 #else 455 CSR_WRITE_2(sc, reg, val); 456 #endif 457 return; 458 } 459 460 static void 461 sk_win_write_1(sc, reg, val) 462 struct sk_softc *sc; 463 int reg; 464 u_int32_t val; 465 { 466 #ifdef SK_USEIOSPACE 467 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 468 CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val); 469 #else 470 CSR_WRITE_1(sc, reg, val); 471 #endif 472 return; 473 } 474 475 /* 476 * The VPD EEPROM contains Vital Product Data, as suggested in 477 * the PCI 2.1 specification. The VPD data is separared into areas 478 * denoted by resource IDs. The SysKonnect VPD contains an ID string 479 * resource (the name of the adapter), a read-only area resource 480 * containing various key/data fields and a read/write area which 481 * can be used to store asset management information or log messages. 482 * We read the ID string and read-only into buffers attached to 483 * the controller softc structure for later use. At the moment, 484 * we only use the ID string during skc_attach(). 485 */ 486 static u_int8_t 487 sk_vpd_readbyte(sc, addr) 488 struct sk_softc *sc; 489 int addr; 490 { 491 int i; 492 493 sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr); 494 for (i = 0; i < SK_TIMEOUT; i++) { 495 /* ASUS LOM takes a very long time to read VPD. */ 496 DELAY(100); 497 if (sk_win_read_2(sc, 498 SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG) 499 break; 500 } 501 502 if (i == SK_TIMEOUT) 503 return(0); 504 505 return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA))); 506 } 507 508 static void 509 sk_vpd_read_res(sc, res, addr) 510 struct sk_softc *sc; 511 struct vpd_res *res; 512 int addr; 513 { 514 int i; 515 u_int8_t *ptr; 516 517 ptr = (u_int8_t *)res; 518 for (i = 0; i < sizeof(struct vpd_res); i++) 519 ptr[i] = sk_vpd_readbyte(sc, i + addr); 520 521 return; 522 } 523 524 static void 525 sk_vpd_read(sc) 526 struct sk_softc *sc; 527 { 528 int pos = 0, i; 529 struct vpd_res res; 530 531 /* Check VPD capability */ 532 if (sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_CAPID)) != PCIY_VPD) 533 return; 534 if (sc->sk_vpd_prodname != NULL) 535 free(sc->sk_vpd_prodname, M_DEVBUF); 536 if (sc->sk_vpd_readonly != NULL) 537 free(sc->sk_vpd_readonly, M_DEVBUF); 538 sc->sk_vpd_prodname = NULL; 539 sc->sk_vpd_readonly = NULL; 540 sc->sk_vpd_readonly_len = 0; 541 542 sk_vpd_read_res(sc, &res, pos); 543 544 /* 545 * Bail out quietly if the eeprom appears to be missing or empty. 546 */ 547 if (res.vr_id == 0xff && res.vr_len == 0xff && res.vr_pad == 0xff) 548 return; 549 550 if (res.vr_id != VPD_RES_ID) { 551 device_printf(sc->sk_dev, "bad VPD resource id: expected %x " 552 "got %x\n", VPD_RES_ID, res.vr_id); 553 return; 554 } 555 556 pos += sizeof(res); 557 sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); 558 if (sc->sk_vpd_prodname != NULL) { 559 for (i = 0; i < res.vr_len; i++) 560 sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos); 561 sc->sk_vpd_prodname[i] = '\0'; 562 } 563 pos += res.vr_len; 564 565 sk_vpd_read_res(sc, &res, pos); 566 567 if (res.vr_id != VPD_RES_READ) { 568 device_printf(sc->sk_dev, "bad VPD resource id: expected %x " 569 "got %x\n", VPD_RES_READ, res.vr_id); 570 return; 571 } 572 573 pos += sizeof(res); 574 sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); 575 for (i = 0; i < res.vr_len; i++) 576 sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos); 577 sc->sk_vpd_readonly_len = res.vr_len; 578 579 return; 580 } 581 582 static int 583 sk_miibus_readreg(dev, phy, reg) 584 device_t dev; 585 int phy, reg; 586 { 587 struct sk_if_softc *sc_if; 588 int v; 589 590 sc_if = device_get_softc(dev); 591 592 SK_IF_MII_LOCK(sc_if); 593 switch(sc_if->sk_softc->sk_type) { 594 case SK_GENESIS: 595 v = sk_xmac_miibus_readreg(sc_if, phy, reg); 596 break; 597 case SK_YUKON: 598 case SK_YUKON_LITE: 599 case SK_YUKON_LP: 600 case SK_YUKON_EC: 601 v = sk_marv_miibus_readreg(sc_if, phy, reg); 602 break; 603 default: 604 v = 0; 605 break; 606 } 607 SK_IF_MII_UNLOCK(sc_if); 608 609 return (v); 610 } 611 612 static int 613 sk_miibus_writereg(dev, phy, reg, val) 614 device_t dev; 615 int phy, reg, val; 616 { 617 struct sk_if_softc *sc_if; 618 int v; 619 620 sc_if = device_get_softc(dev); 621 622 SK_IF_MII_LOCK(sc_if); 623 switch(sc_if->sk_softc->sk_type) { 624 case SK_GENESIS: 625 v = sk_xmac_miibus_writereg(sc_if, phy, reg, val); 626 break; 627 case SK_YUKON: 628 case SK_YUKON_LITE: 629 case SK_YUKON_LP: 630 case SK_YUKON_EC: 631 v = sk_marv_miibus_writereg(sc_if, phy, reg, val); 632 break; 633 default: 634 v = 0; 635 break; 636 } 637 SK_IF_MII_UNLOCK(sc_if); 638 639 return (v); 640 } 641 642 static void 643 sk_miibus_statchg(dev) 644 device_t dev; 645 { 646 struct sk_if_softc *sc_if; 647 648 sc_if = device_get_softc(dev); 649 650 SK_IF_MII_LOCK(sc_if); 651 switch(sc_if->sk_softc->sk_type) { 652 case SK_GENESIS: 653 sk_xmac_miibus_statchg(sc_if); 654 break; 655 case SK_YUKON: 656 case SK_YUKON_LITE: 657 case SK_YUKON_LP: 658 case SK_YUKON_EC: 659 sk_marv_miibus_statchg(sc_if); 660 break; 661 } 662 SK_IF_MII_UNLOCK(sc_if); 663 664 return; 665 } 666 667 static int 668 sk_xmac_miibus_readreg(sc_if, phy, reg) 669 struct sk_if_softc *sc_if; 670 int phy, reg; 671 { 672 int i; 673 674 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0) 675 return(0); 676 677 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); 678 SK_XM_READ_2(sc_if, XM_PHY_DATA); 679 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { 680 for (i = 0; i < SK_TIMEOUT; i++) { 681 DELAY(1); 682 if (SK_XM_READ_2(sc_if, XM_MMUCMD) & 683 XM_MMUCMD_PHYDATARDY) 684 break; 685 } 686 687 if (i == SK_TIMEOUT) { 688 if_printf(sc_if->sk_ifp, "phy failed to come ready\n"); 689 return(0); 690 } 691 } 692 DELAY(1); 693 i = SK_XM_READ_2(sc_if, XM_PHY_DATA); 694 695 return(i); 696 } 697 698 static int 699 sk_xmac_miibus_writereg(sc_if, phy, reg, val) 700 struct sk_if_softc *sc_if; 701 int phy, reg, val; 702 { 703 int i; 704 705 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); 706 for (i = 0; i < SK_TIMEOUT; i++) { 707 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) 708 break; 709 } 710 711 if (i == SK_TIMEOUT) { 712 if_printf(sc_if->sk_ifp, "phy failed to come ready\n"); 713 return (ETIMEDOUT); 714 } 715 716 SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val); 717 for (i = 0; i < SK_TIMEOUT; i++) { 718 DELAY(1); 719 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) 720 break; 721 } 722 if (i == SK_TIMEOUT) 723 if_printf(sc_if->sk_ifp, "phy write timed out\n"); 724 725 return(0); 726 } 727 728 static void 729 sk_xmac_miibus_statchg(sc_if) 730 struct sk_if_softc *sc_if; 731 { 732 struct mii_data *mii; 733 734 mii = device_get_softc(sc_if->sk_miibus); 735 736 /* 737 * If this is a GMII PHY, manually set the XMAC's 738 * duplex mode accordingly. 739 */ 740 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { 741 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 742 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); 743 } else { 744 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); 745 } 746 } 747 } 748 749 static int 750 sk_marv_miibus_readreg(sc_if, phy, reg) 751 struct sk_if_softc *sc_if; 752 int phy, reg; 753 { 754 u_int16_t val; 755 int i; 756 757 if (phy != 0 || 758 (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER && 759 sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) { 760 return(0); 761 } 762 763 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 764 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ); 765 766 for (i = 0; i < SK_TIMEOUT; i++) { 767 DELAY(1); 768 val = SK_YU_READ_2(sc_if, YUKON_SMICR); 769 if (val & YU_SMICR_READ_VALID) 770 break; 771 } 772 773 if (i == SK_TIMEOUT) { 774 if_printf(sc_if->sk_ifp, "phy failed to come ready\n"); 775 return(0); 776 } 777 778 val = SK_YU_READ_2(sc_if, YUKON_SMIDR); 779 780 return(val); 781 } 782 783 static int 784 sk_marv_miibus_writereg(sc_if, phy, reg, val) 785 struct sk_if_softc *sc_if; 786 int phy, reg, val; 787 { 788 int i; 789 790 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val); 791 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 792 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE); 793 794 for (i = 0; i < SK_TIMEOUT; i++) { 795 DELAY(1); 796 if ((SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY) == 0) 797 break; 798 } 799 if (i == SK_TIMEOUT) 800 if_printf(sc_if->sk_ifp, "phy write timeout\n"); 801 802 return(0); 803 } 804 805 static void 806 sk_marv_miibus_statchg(sc_if) 807 struct sk_if_softc *sc_if; 808 { 809 return; 810 } 811 812 #define HASH_BITS 6 813 814 static u_int32_t 815 sk_xmchash(addr) 816 const uint8_t *addr; 817 { 818 uint32_t crc; 819 820 /* Compute CRC for the address value. */ 821 crc = ether_crc32_le(addr, ETHER_ADDR_LEN); 822 823 return (~crc & ((1 << HASH_BITS) - 1)); 824 } 825 826 /* gmchash is just a big endian crc */ 827 static u_int32_t 828 sk_gmchash(addr) 829 const uint8_t *addr; 830 { 831 uint32_t crc; 832 833 /* Compute CRC for the address value. */ 834 crc = ether_crc32_be(addr, ETHER_ADDR_LEN); 835 836 return (crc & ((1 << HASH_BITS) - 1)); 837 } 838 839 static void 840 sk_setfilt(sc_if, addr, slot) 841 struct sk_if_softc *sc_if; 842 u_int16_t *addr; 843 int slot; 844 { 845 int base; 846 847 base = XM_RXFILT_ENTRY(slot); 848 849 SK_XM_WRITE_2(sc_if, base, addr[0]); 850 SK_XM_WRITE_2(sc_if, base + 2, addr[1]); 851 SK_XM_WRITE_2(sc_if, base + 4, addr[2]); 852 853 return; 854 } 855 856 static void 857 sk_setmulti(sc_if) 858 struct sk_if_softc *sc_if; 859 { 860 struct sk_softc *sc = sc_if->sk_softc; 861 struct ifnet *ifp = sc_if->sk_ifp; 862 u_int32_t hashes[2] = { 0, 0 }; 863 int h = 0, i; 864 struct ifmultiaddr *ifma; 865 u_int16_t dummy[] = { 0, 0, 0 }; 866 u_int16_t maddr[(ETHER_ADDR_LEN+1)/2]; 867 868 SK_IF_LOCK_ASSERT(sc_if); 869 870 /* First, zot all the existing filters. */ 871 switch(sc->sk_type) { 872 case SK_GENESIS: 873 for (i = 1; i < XM_RXFILT_MAX; i++) 874 sk_setfilt(sc_if, dummy, i); 875 876 SK_XM_WRITE_4(sc_if, XM_MAR0, 0); 877 SK_XM_WRITE_4(sc_if, XM_MAR2, 0); 878 break; 879 case SK_YUKON: 880 case SK_YUKON_LITE: 881 case SK_YUKON_LP: 882 case SK_YUKON_EC: 883 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0); 884 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0); 885 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0); 886 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0); 887 break; 888 } 889 890 /* Now program new ones. */ 891 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 892 hashes[0] = 0xFFFFFFFF; 893 hashes[1] = 0xFFFFFFFF; 894 } else { 895 i = 1; 896 IF_ADDR_LOCK(ifp); 897 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) { 898 if (ifma->ifma_addr->sa_family != AF_LINK) 899 continue; 900 /* 901 * Program the first XM_RXFILT_MAX multicast groups 902 * into the perfect filter. For all others, 903 * use the hash table. 904 */ 905 if (sc->sk_type == SK_GENESIS && i < XM_RXFILT_MAX) { 906 bcopy(LLADDR( 907 (struct sockaddr_dl *)ifma->ifma_addr), 908 maddr, ETHER_ADDR_LEN); 909 sk_setfilt(sc_if, maddr, i); 910 i++; 911 continue; 912 } 913 914 switch(sc->sk_type) { 915 case SK_GENESIS: 916 bcopy(LLADDR( 917 (struct sockaddr_dl *)ifma->ifma_addr), 918 maddr, ETHER_ADDR_LEN); 919 h = sk_xmchash((const uint8_t *)maddr); 920 break; 921 case SK_YUKON: 922 case SK_YUKON_LITE: 923 case SK_YUKON_LP: 924 case SK_YUKON_EC: 925 bcopy(LLADDR( 926 (struct sockaddr_dl *)ifma->ifma_addr), 927 maddr, ETHER_ADDR_LEN); 928 h = sk_gmchash((const uint8_t *)maddr); 929 break; 930 } 931 if (h < 32) 932 hashes[0] |= (1 << h); 933 else 934 hashes[1] |= (1 << (h - 32)); 935 } 936 IF_ADDR_UNLOCK(ifp); 937 } 938 939 switch(sc->sk_type) { 940 case SK_GENESIS: 941 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH| 942 XM_MODE_RX_USE_PERFECT); 943 SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]); 944 SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]); 945 break; 946 case SK_YUKON: 947 case SK_YUKON_LITE: 948 case SK_YUKON_LP: 949 case SK_YUKON_EC: 950 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff); 951 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff); 952 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff); 953 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff); 954 break; 955 } 956 957 return; 958 } 959 960 static void 961 sk_setpromisc(sc_if) 962 struct sk_if_softc *sc_if; 963 { 964 struct sk_softc *sc = sc_if->sk_softc; 965 struct ifnet *ifp = sc_if->sk_ifp; 966 967 SK_IF_LOCK_ASSERT(sc_if); 968 969 switch(sc->sk_type) { 970 case SK_GENESIS: 971 if (ifp->if_flags & IFF_PROMISC) { 972 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); 973 } else { 974 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); 975 } 976 break; 977 case SK_YUKON: 978 case SK_YUKON_LITE: 979 case SK_YUKON_LP: 980 case SK_YUKON_EC: 981 if (ifp->if_flags & IFF_PROMISC) { 982 SK_YU_CLRBIT_2(sc_if, YUKON_RCR, 983 YU_RCR_UFLEN | YU_RCR_MUFLEN); 984 } else { 985 SK_YU_SETBIT_2(sc_if, YUKON_RCR, 986 YU_RCR_UFLEN | YU_RCR_MUFLEN); 987 } 988 break; 989 } 990 991 return; 992 } 993 994 static int 995 sk_init_rx_ring(sc_if) 996 struct sk_if_softc *sc_if; 997 { 998 struct sk_ring_data *rd; 999 bus_addr_t addr; 1000 u_int32_t csum_start; 1001 int i; 1002 1003 sc_if->sk_cdata.sk_rx_cons = 0; 1004 1005 csum_start = (ETHER_HDR_LEN + sizeof(struct ip)) << 16 | 1006 ETHER_HDR_LEN; 1007 rd = &sc_if->sk_rdata; 1008 bzero(rd->sk_rx_ring, sizeof(struct sk_rx_desc) * SK_RX_RING_CNT); 1009 for (i = 0; i < SK_RX_RING_CNT; i++) { 1010 if (sk_newbuf(sc_if, i) != 0) 1011 return (ENOBUFS); 1012 if (i == (SK_RX_RING_CNT - 1)) 1013 addr = SK_RX_RING_ADDR(sc_if, 0); 1014 else 1015 addr = SK_RX_RING_ADDR(sc_if, i + 1); 1016 rd->sk_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr)); 1017 rd->sk_rx_ring[i].sk_csum_start = htole32(csum_start); 1018 } 1019 1020 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag, 1021 sc_if->sk_cdata.sk_rx_ring_map, 1022 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1023 1024 return(0); 1025 } 1026 1027 static int 1028 sk_init_jumbo_rx_ring(sc_if) 1029 struct sk_if_softc *sc_if; 1030 { 1031 struct sk_ring_data *rd; 1032 bus_addr_t addr; 1033 u_int32_t csum_start; 1034 int i; 1035 1036 sc_if->sk_cdata.sk_jumbo_rx_cons = 0; 1037 1038 csum_start = ((ETHER_HDR_LEN + sizeof(struct ip)) << 16) | 1039 ETHER_HDR_LEN; 1040 rd = &sc_if->sk_rdata; 1041 bzero(rd->sk_jumbo_rx_ring, 1042 sizeof(struct sk_rx_desc) * SK_JUMBO_RX_RING_CNT); 1043 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) { 1044 if (sk_jumbo_newbuf(sc_if, i) != 0) 1045 return (ENOBUFS); 1046 if (i == (SK_JUMBO_RX_RING_CNT - 1)) 1047 addr = SK_JUMBO_RX_RING_ADDR(sc_if, 0); 1048 else 1049 addr = SK_JUMBO_RX_RING_ADDR(sc_if, i + 1); 1050 rd->sk_jumbo_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr)); 1051 rd->sk_jumbo_rx_ring[i].sk_csum_start = htole32(csum_start); 1052 } 1053 1054 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, 1055 sc_if->sk_cdata.sk_jumbo_rx_ring_map, 1056 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1057 1058 return (0); 1059 } 1060 1061 static void 1062 sk_init_tx_ring(sc_if) 1063 struct sk_if_softc *sc_if; 1064 { 1065 struct sk_ring_data *rd; 1066 struct sk_txdesc *txd; 1067 bus_addr_t addr; 1068 int i; 1069 1070 STAILQ_INIT(&sc_if->sk_cdata.sk_txfreeq); 1071 STAILQ_INIT(&sc_if->sk_cdata.sk_txbusyq); 1072 1073 sc_if->sk_cdata.sk_tx_prod = 0; 1074 sc_if->sk_cdata.sk_tx_cons = 0; 1075 sc_if->sk_cdata.sk_tx_cnt = 0; 1076 1077 rd = &sc_if->sk_rdata; 1078 bzero(rd->sk_tx_ring, sizeof(struct sk_tx_desc) * SK_TX_RING_CNT); 1079 for (i = 0; i < SK_TX_RING_CNT; i++) { 1080 if (i == (SK_TX_RING_CNT - 1)) 1081 addr = SK_TX_RING_ADDR(sc_if, 0); 1082 else 1083 addr = SK_TX_RING_ADDR(sc_if, i + 1); 1084 rd->sk_tx_ring[i].sk_next = htole32(SK_ADDR_LO(addr)); 1085 txd = &sc_if->sk_cdata.sk_txdesc[i]; 1086 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q); 1087 } 1088 1089 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag, 1090 sc_if->sk_cdata.sk_tx_ring_map, 1091 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1092 } 1093 1094 static __inline void 1095 sk_discard_rxbuf(sc_if, idx) 1096 struct sk_if_softc *sc_if; 1097 int idx; 1098 { 1099 struct sk_rx_desc *r; 1100 struct sk_rxdesc *rxd; 1101 struct mbuf *m; 1102 1103 1104 r = &sc_if->sk_rdata.sk_rx_ring[idx]; 1105 rxd = &sc_if->sk_cdata.sk_rxdesc[idx]; 1106 m = rxd->rx_m; 1107 r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM); 1108 } 1109 1110 static __inline void 1111 sk_discard_jumbo_rxbuf(sc_if, idx) 1112 struct sk_if_softc *sc_if; 1113 int idx; 1114 { 1115 struct sk_rx_desc *r; 1116 struct sk_rxdesc *rxd; 1117 struct mbuf *m; 1118 1119 r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx]; 1120 rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx]; 1121 m = rxd->rx_m; 1122 r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM); 1123 } 1124 1125 static int 1126 sk_newbuf(sc_if, idx) 1127 struct sk_if_softc *sc_if; 1128 int idx; 1129 { 1130 struct sk_rx_desc *r; 1131 struct sk_rxdesc *rxd; 1132 struct mbuf *m; 1133 bus_dma_segment_t segs[1]; 1134 bus_dmamap_t map; 1135 int nsegs; 1136 1137 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1138 if (m == NULL) 1139 return (ENOBUFS); 1140 m->m_len = m->m_pkthdr.len = MCLBYTES; 1141 m_adj(m, ETHER_ALIGN); 1142 1143 if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_rx_tag, 1144 sc_if->sk_cdata.sk_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1145 m_freem(m); 1146 return (ENOBUFS); 1147 } 1148 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1149 1150 rxd = &sc_if->sk_cdata.sk_rxdesc[idx]; 1151 if (rxd->rx_m != NULL) { 1152 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap, 1153 BUS_DMASYNC_POSTREAD); 1154 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap); 1155 } 1156 map = rxd->rx_dmamap; 1157 rxd->rx_dmamap = sc_if->sk_cdata.sk_rx_sparemap; 1158 sc_if->sk_cdata.sk_rx_sparemap = map; 1159 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap, 1160 BUS_DMASYNC_PREREAD); 1161 rxd->rx_m = m; 1162 r = &sc_if->sk_rdata.sk_rx_ring[idx]; 1163 r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr)); 1164 r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr)); 1165 r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM); 1166 1167 return (0); 1168 } 1169 1170 static int 1171 sk_jumbo_newbuf(sc_if, idx) 1172 struct sk_if_softc *sc_if; 1173 int idx; 1174 { 1175 struct sk_rx_desc *r; 1176 struct sk_rxdesc *rxd; 1177 struct mbuf *m; 1178 bus_dma_segment_t segs[1]; 1179 bus_dmamap_t map; 1180 int nsegs; 1181 void *buf; 1182 1183 MGETHDR(m, M_DONTWAIT, MT_DATA); 1184 if (m == NULL) 1185 return (ENOBUFS); 1186 buf = sk_jalloc(sc_if); 1187 if (buf == NULL) { 1188 m_freem(m); 1189 return (ENOBUFS); 1190 } 1191 /* Attach the buffer to the mbuf */ 1192 MEXTADD(m, buf, SK_JLEN, sk_jfree, (struct sk_if_softc *)sc_if, 0, 1193 EXT_NET_DRV); 1194 if ((m->m_flags & M_EXT) == 0) { 1195 m_freem(m); 1196 return (ENOBUFS); 1197 } 1198 m->m_pkthdr.len = m->m_len = SK_JLEN; 1199 /* 1200 * Adjust alignment so packet payload begins on a 1201 * longword boundary. Mandatory for Alpha, useful on 1202 * x86 too. 1203 */ 1204 m_adj(m, ETHER_ALIGN); 1205 1206 if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_jumbo_rx_tag, 1207 sc_if->sk_cdata.sk_jumbo_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1208 m_freem(m); 1209 return (ENOBUFS); 1210 } 1211 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1212 1213 rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx]; 1214 if (rxd->rx_m != NULL) { 1215 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap, 1216 BUS_DMASYNC_POSTREAD); 1217 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag, 1218 rxd->rx_dmamap); 1219 } 1220 map = rxd->rx_dmamap; 1221 rxd->rx_dmamap = sc_if->sk_cdata.sk_jumbo_rx_sparemap; 1222 sc_if->sk_cdata.sk_jumbo_rx_sparemap = map; 1223 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap, 1224 BUS_DMASYNC_PREREAD); 1225 rxd->rx_m = m; 1226 r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx]; 1227 r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr)); 1228 r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr)); 1229 r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM); 1230 1231 return (0); 1232 } 1233 1234 /* 1235 * Set media options. 1236 */ 1237 static int 1238 sk_ifmedia_upd(ifp) 1239 struct ifnet *ifp; 1240 { 1241 struct sk_if_softc *sc_if = ifp->if_softc; 1242 struct mii_data *mii; 1243 1244 mii = device_get_softc(sc_if->sk_miibus); 1245 sk_init(sc_if); 1246 mii_mediachg(mii); 1247 1248 return(0); 1249 } 1250 1251 /* 1252 * Report current media status. 1253 */ 1254 static void 1255 sk_ifmedia_sts(ifp, ifmr) 1256 struct ifnet *ifp; 1257 struct ifmediareq *ifmr; 1258 { 1259 struct sk_if_softc *sc_if; 1260 struct mii_data *mii; 1261 1262 sc_if = ifp->if_softc; 1263 mii = device_get_softc(sc_if->sk_miibus); 1264 1265 mii_pollstat(mii); 1266 ifmr->ifm_active = mii->mii_media_active; 1267 ifmr->ifm_status = mii->mii_media_status; 1268 1269 return; 1270 } 1271 1272 static int 1273 sk_ioctl(ifp, command, data) 1274 struct ifnet *ifp; 1275 u_long command; 1276 caddr_t data; 1277 { 1278 struct sk_if_softc *sc_if = ifp->if_softc; 1279 struct ifreq *ifr = (struct ifreq *) data; 1280 int error, mask; 1281 struct mii_data *mii; 1282 1283 error = 0; 1284 switch(command) { 1285 case SIOCSIFMTU: 1286 SK_IF_LOCK(sc_if); 1287 if (ifr->ifr_mtu > SK_JUMBO_MTU) 1288 error = EINVAL; 1289 else { 1290 ifp->if_mtu = ifr->ifr_mtu; 1291 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1292 sk_init_locked(sc_if); 1293 } 1294 SK_IF_UNLOCK(sc_if); 1295 break; 1296 case SIOCSIFFLAGS: 1297 SK_IF_LOCK(sc_if); 1298 if (ifp->if_flags & IFF_UP) { 1299 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1300 if ((ifp->if_flags ^ sc_if->sk_if_flags) 1301 & IFF_PROMISC) { 1302 sk_setpromisc(sc_if); 1303 sk_setmulti(sc_if); 1304 } 1305 } else 1306 sk_init_locked(sc_if); 1307 } else { 1308 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1309 sk_stop(sc_if); 1310 } 1311 sc_if->sk_if_flags = ifp->if_flags; 1312 SK_IF_UNLOCK(sc_if); 1313 break; 1314 case SIOCADDMULTI: 1315 case SIOCDELMULTI: 1316 SK_IF_LOCK(sc_if); 1317 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1318 sk_setmulti(sc_if); 1319 SK_IF_UNLOCK(sc_if); 1320 break; 1321 case SIOCGIFMEDIA: 1322 case SIOCSIFMEDIA: 1323 mii = device_get_softc(sc_if->sk_miibus); 1324 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1325 break; 1326 case SIOCSIFCAP: 1327 SK_IF_LOCK(sc_if); 1328 if (sc_if->sk_softc->sk_type == SK_GENESIS) { 1329 SK_IF_UNLOCK(sc_if); 1330 break; 1331 } 1332 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1333 if (mask & IFCAP_HWCSUM) { 1334 ifp->if_capenable ^= IFCAP_HWCSUM; 1335 if (IFCAP_HWCSUM & ifp->if_capenable && 1336 IFCAP_HWCSUM & ifp->if_capabilities) 1337 ifp->if_hwassist = SK_CSUM_FEATURES; 1338 else 1339 ifp->if_hwassist = 0; 1340 } 1341 SK_IF_UNLOCK(sc_if); 1342 break; 1343 default: 1344 error = ether_ioctl(ifp, command, data); 1345 break; 1346 } 1347 1348 return (error); 1349 } 1350 1351 /* 1352 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device 1353 * IDs against our list and return a device name if we find a match. 1354 */ 1355 static int 1356 skc_probe(dev) 1357 device_t dev; 1358 { 1359 struct sk_type *t = sk_devs; 1360 1361 while(t->sk_name != NULL) { 1362 if ((pci_get_vendor(dev) == t->sk_vid) && 1363 (pci_get_device(dev) == t->sk_did)) { 1364 /* 1365 * Only attach to rev. 2 of the Linksys EG1032 adapter. 1366 * Rev. 3 is supported by re(4). 1367 */ 1368 if ((t->sk_vid == VENDORID_LINKSYS) && 1369 (t->sk_did == DEVICEID_LINKSYS_EG1032) && 1370 (pci_get_subdevice(dev) != 1371 SUBDEVICEID_LINKSYS_EG1032_REV2)) { 1372 t++; 1373 continue; 1374 } 1375 device_set_desc(dev, t->sk_name); 1376 return (BUS_PROBE_DEFAULT); 1377 } 1378 t++; 1379 } 1380 1381 return(ENXIO); 1382 } 1383 1384 /* 1385 * Force the GEnesis into reset, then bring it out of reset. 1386 */ 1387 static void 1388 sk_reset(sc) 1389 struct sk_softc *sc; 1390 { 1391 1392 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET); 1393 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET); 1394 if (SK_YUKON_FAMILY(sc->sk_type)) 1395 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET); 1396 1397 DELAY(1000); 1398 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET); 1399 DELAY(2); 1400 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET); 1401 if (SK_YUKON_FAMILY(sc->sk_type)) 1402 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR); 1403 1404 if (sc->sk_type == SK_GENESIS) { 1405 /* Configure packet arbiter */ 1406 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET); 1407 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT); 1408 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT); 1409 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT); 1410 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT); 1411 } 1412 1413 /* Enable RAM interface */ 1414 sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET); 1415 1416 /* 1417 * Configure interrupt moderation. The moderation timer 1418 * defers interrupts specified in the interrupt moderation 1419 * timer mask based on the timeout specified in the interrupt 1420 * moderation timer init register. Each bit in the timer 1421 * register represents one tick, so to specify a timeout in 1422 * microseconds, we have to multiply by the correct number of 1423 * ticks-per-microsecond. 1424 */ 1425 switch (sc->sk_type) { 1426 case SK_GENESIS: 1427 sc->sk_int_ticks = SK_IMTIMER_TICKS_GENESIS; 1428 break; 1429 case SK_YUKON_EC: 1430 sc->sk_int_ticks = SK_IMTIMER_TICKS_YUKON_EC; 1431 break; 1432 default: 1433 sc->sk_int_ticks = SK_IMTIMER_TICKS_YUKON; 1434 break; 1435 } 1436 if (bootverbose) 1437 device_printf(sc->sk_dev, "interrupt moderation is %d us\n", 1438 sc->sk_int_mod); 1439 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod, 1440 sc->sk_int_ticks)); 1441 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF| 1442 SK_ISR_RX1_EOF|SK_ISR_RX2_EOF); 1443 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START); 1444 1445 return; 1446 } 1447 1448 static int 1449 sk_probe(dev) 1450 device_t dev; 1451 { 1452 struct sk_softc *sc; 1453 1454 sc = device_get_softc(device_get_parent(dev)); 1455 1456 /* 1457 * Not much to do here. We always know there will be 1458 * at least one XMAC present, and if there are two, 1459 * skc_attach() will create a second device instance 1460 * for us. 1461 */ 1462 switch (sc->sk_type) { 1463 case SK_GENESIS: 1464 device_set_desc(dev, "XaQti Corp. XMAC II"); 1465 break; 1466 case SK_YUKON: 1467 case SK_YUKON_LITE: 1468 case SK_YUKON_LP: 1469 case SK_YUKON_EC: 1470 device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon"); 1471 break; 1472 } 1473 1474 return (BUS_PROBE_DEFAULT); 1475 } 1476 1477 /* 1478 * Each XMAC chip is attached as a separate logical IP interface. 1479 * Single port cards will have only one logical interface of course. 1480 */ 1481 static int 1482 sk_attach(dev) 1483 device_t dev; 1484 { 1485 struct sk_softc *sc; 1486 struct sk_if_softc *sc_if; 1487 struct ifnet *ifp; 1488 int i, port, error; 1489 u_char eaddr[6]; 1490 1491 if (dev == NULL) 1492 return(EINVAL); 1493 1494 error = 0; 1495 sc_if = device_get_softc(dev); 1496 sc = device_get_softc(device_get_parent(dev)); 1497 port = *(int *)device_get_ivars(dev); 1498 1499 sc_if->sk_if_dev = dev; 1500 sc_if->sk_port = port; 1501 sc_if->sk_softc = sc; 1502 sc->sk_if[port] = sc_if; 1503 if (port == SK_PORT_A) 1504 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0; 1505 if (port == SK_PORT_B) 1506 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1; 1507 1508 callout_init_mtx(&sc_if->sk_tick_ch, &sc_if->sk_softc->sk_mtx, 0); 1509 1510 if (sk_dma_alloc(sc_if) != 0) { 1511 error = ENOMEM; 1512 goto fail; 1513 } 1514 1515 ifp = sc_if->sk_ifp = if_alloc(IFT_ETHER); 1516 if (ifp == NULL) { 1517 device_printf(sc_if->sk_if_dev, "can not if_alloc()\n"); 1518 error = ENOSPC; 1519 goto fail; 1520 } 1521 ifp->if_softc = sc_if; 1522 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1523 ifp->if_mtu = ETHERMTU; 1524 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1525 /* 1526 * SK_GENESIS has a bug in checksum offload - From linux. 1527 */ 1528 if (sc_if->sk_softc->sk_type != SK_GENESIS) { 1529 ifp->if_capabilities = IFCAP_HWCSUM; 1530 ifp->if_hwassist = SK_CSUM_FEATURES; 1531 } else { 1532 ifp->if_capabilities = 0; 1533 ifp->if_hwassist = 0; 1534 } 1535 ifp->if_capenable = ifp->if_capabilities; 1536 ifp->if_ioctl = sk_ioctl; 1537 ifp->if_start = sk_start; 1538 ifp->if_watchdog = sk_watchdog; 1539 ifp->if_init = sk_init; 1540 IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1); 1541 ifp->if_snd.ifq_drv_maxlen = SK_TX_RING_CNT - 1; 1542 IFQ_SET_READY(&ifp->if_snd); 1543 1544 /* 1545 * Get station address for this interface. Note that 1546 * dual port cards actually come with three station 1547 * addresses: one for each port, plus an extra. The 1548 * extra one is used by the SysKonnect driver software 1549 * as a 'virtual' station address for when both ports 1550 * are operating in failover mode. Currently we don't 1551 * use this extra address. 1552 */ 1553 SK_IF_LOCK(sc_if); 1554 for (i = 0; i < ETHER_ADDR_LEN; i++) 1555 eaddr[i] = 1556 sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i); 1557 1558 /* 1559 * Set up RAM buffer addresses. The NIC will have a certain 1560 * amount of SRAM on it, somewhere between 512K and 2MB. We 1561 * need to divide this up a) between the transmitter and 1562 * receiver and b) between the two XMACs, if this is a 1563 * dual port NIC. Our algotithm is to divide up the memory 1564 * evenly so that everyone gets a fair share. 1565 * 1566 * Just to be contrary, Yukon2 appears to have separate memory 1567 * for each MAC. 1568 */ 1569 if (SK_IS_YUKON2(sc) || 1570 sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) { 1571 u_int32_t chunk, val; 1572 1573 chunk = sc->sk_ramsize / 2; 1574 val = sc->sk_rboff / sizeof(u_int64_t); 1575 sc_if->sk_rx_ramstart = val; 1576 val += (chunk / sizeof(u_int64_t)); 1577 sc_if->sk_rx_ramend = val - 1; 1578 sc_if->sk_tx_ramstart = val; 1579 val += (chunk / sizeof(u_int64_t)); 1580 sc_if->sk_tx_ramend = val - 1; 1581 } else { 1582 u_int32_t chunk, val; 1583 1584 chunk = sc->sk_ramsize / 4; 1585 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) / 1586 sizeof(u_int64_t); 1587 sc_if->sk_rx_ramstart = val; 1588 val += (chunk / sizeof(u_int64_t)); 1589 sc_if->sk_rx_ramend = val - 1; 1590 sc_if->sk_tx_ramstart = val; 1591 val += (chunk / sizeof(u_int64_t)); 1592 sc_if->sk_tx_ramend = val - 1; 1593 } 1594 1595 /* Read and save PHY type and set PHY address */ 1596 sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF; 1597 if (!SK_YUKON_FAMILY(sc->sk_type)) { 1598 switch(sc_if->sk_phytype) { 1599 case SK_PHYTYPE_XMAC: 1600 sc_if->sk_phyaddr = SK_PHYADDR_XMAC; 1601 break; 1602 case SK_PHYTYPE_BCOM: 1603 sc_if->sk_phyaddr = SK_PHYADDR_BCOM; 1604 break; 1605 default: 1606 device_printf(sc->sk_dev, "unsupported PHY type: %d\n", 1607 sc_if->sk_phytype); 1608 error = ENODEV; 1609 SK_IF_UNLOCK(sc_if); 1610 goto fail; 1611 } 1612 } else { 1613 if (sc_if->sk_phytype < SK_PHYTYPE_MARV_COPPER && 1614 sc->sk_pmd != 'S') { 1615 /* not initialized, punt */ 1616 sc_if->sk_phytype = SK_PHYTYPE_MARV_COPPER; 1617 sc->sk_coppertype = 1; 1618 } 1619 1620 sc_if->sk_phyaddr = SK_PHYADDR_MARV; 1621 1622 if (!(sc->sk_coppertype)) 1623 sc_if->sk_phytype = SK_PHYTYPE_MARV_FIBER; 1624 } 1625 1626 /* 1627 * Call MI attach routine. Can't hold locks when calling into ether_*. 1628 */ 1629 SK_IF_UNLOCK(sc_if); 1630 ether_ifattach(ifp, eaddr); 1631 SK_IF_LOCK(sc_if); 1632 1633 /* 1634 * The hardware should be ready for VLAN_MTU by default: 1635 * XMAC II has 0x8100 in VLAN Tag Level 1 register initially; 1636 * YU_SMR_MFL_VLAN is set by this driver in Yukon. 1637 * 1638 */ 1639 ifp->if_capabilities |= IFCAP_VLAN_MTU; 1640 ifp->if_capenable |= IFCAP_VLAN_MTU; 1641 /* 1642 * Tell the upper layer(s) we support long frames. 1643 * Must appear after the call to ether_ifattach() because 1644 * ether_ifattach() sets ifi_hdrlen to the default value. 1645 */ 1646 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1647 1648 /* 1649 * Do miibus setup. 1650 */ 1651 switch (sc->sk_type) { 1652 case SK_GENESIS: 1653 sk_init_xmac(sc_if); 1654 break; 1655 case SK_YUKON: 1656 case SK_YUKON_LITE: 1657 case SK_YUKON_LP: 1658 case SK_YUKON_EC: 1659 sk_init_yukon(sc_if); 1660 break; 1661 } 1662 1663 SK_IF_UNLOCK(sc_if); 1664 if (mii_phy_probe(dev, &sc_if->sk_miibus, 1665 sk_ifmedia_upd, sk_ifmedia_sts)) { 1666 device_printf(sc_if->sk_if_dev, "no PHY found!\n"); 1667 ether_ifdetach(ifp); 1668 error = ENXIO; 1669 goto fail; 1670 } 1671 1672 fail: 1673 if (error) { 1674 /* Access should be ok even though lock has been dropped */ 1675 sc->sk_if[port] = NULL; 1676 sk_detach(dev); 1677 } 1678 1679 return(error); 1680 } 1681 1682 /* 1683 * Attach the interface. Allocate softc structures, do ifmedia 1684 * setup and ethernet/BPF attach. 1685 */ 1686 static int 1687 skc_attach(dev) 1688 device_t dev; 1689 { 1690 struct sk_softc *sc; 1691 int error = 0, *port, sk_macs; 1692 uint8_t skrs; 1693 char *pname, *revstr; 1694 1695 sc = device_get_softc(dev); 1696 sc->sk_dev = dev; 1697 1698 mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1699 MTX_DEF); 1700 mtx_init(&sc->sk_mii_mtx, "sk_mii_mutex", NULL, MTX_DEF); 1701 /* 1702 * Map control/status registers. 1703 */ 1704 pci_enable_busmaster(dev); 1705 1706 /* Allocate resources */ 1707 #ifdef SK_USEIOSPACE 1708 sc->sk_res_spec = sk_res_spec_io; 1709 #else 1710 sc->sk_res_spec = sk_res_spec_mem; 1711 #endif 1712 error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res); 1713 if (error) { 1714 if (sc->sk_res_spec == sk_res_spec_mem) 1715 sc->sk_res_spec = sk_res_spec_io; 1716 else 1717 sc->sk_res_spec = sk_res_spec_mem; 1718 error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res); 1719 if (error) { 1720 device_printf(dev, "couldn't allocate %s resources\n", 1721 sc->sk_res_spec == sk_res_spec_mem ? "memory" : 1722 "I/O"); 1723 goto fail; 1724 } 1725 } 1726 1727 sc->sk_type = sk_win_read_1(sc, SK_CHIPVER); 1728 sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4) & 0xf; 1729 1730 /* Bail out if chip is not recognized. */ 1731 if (sc->sk_type != SK_GENESIS && !SK_YUKON_FAMILY(sc->sk_type)) { 1732 device_printf(dev, "unknown device: chipver=%02x, rev=%x\n", 1733 sc->sk_type, sc->sk_rev); 1734 error = ENXIO; 1735 goto fail; 1736 } 1737 1738 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 1739 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1740 OID_AUTO, "int_mod", CTLTYPE_INT|CTLFLAG_RW, 1741 &sc->sk_int_mod, 0, sysctl_hw_sk_int_mod, "I", 1742 "SK interrupt moderation"); 1743 1744 /* Pull in device tunables. */ 1745 sc->sk_int_mod = SK_IM_DEFAULT; 1746 error = resource_int_value(device_get_name(dev), device_get_unit(dev), 1747 "int_mod", &sc->sk_int_mod); 1748 if (error == 0) { 1749 if (sc->sk_int_mod < SK_IM_MIN || 1750 sc->sk_int_mod > SK_IM_MAX) { 1751 device_printf(dev, "int_mod value out of range; " 1752 "using default: %d\n", SK_IM_DEFAULT); 1753 sc->sk_int_mod = SK_IM_DEFAULT; 1754 } 1755 } 1756 1757 /* Reset the adapter. */ 1758 sk_reset(sc); 1759 1760 /* Read and save vital product data from EEPROM. */ 1761 sk_vpd_read(sc); 1762 1763 skrs = sk_win_read_1(sc, SK_EPROM0); 1764 if (sc->sk_type == SK_GENESIS) { 1765 /* Read and save RAM size and RAMbuffer offset */ 1766 switch(skrs) { 1767 case SK_RAMSIZE_512K_64: 1768 sc->sk_ramsize = 0x80000; 1769 sc->sk_rboff = SK_RBOFF_0; 1770 break; 1771 case SK_RAMSIZE_1024K_64: 1772 sc->sk_ramsize = 0x100000; 1773 sc->sk_rboff = SK_RBOFF_80000; 1774 break; 1775 case SK_RAMSIZE_1024K_128: 1776 sc->sk_ramsize = 0x100000; 1777 sc->sk_rboff = SK_RBOFF_0; 1778 break; 1779 case SK_RAMSIZE_2048K_128: 1780 sc->sk_ramsize = 0x200000; 1781 sc->sk_rboff = SK_RBOFF_0; 1782 break; 1783 default: 1784 device_printf(dev, "unknown ram size: %d\n", skrs); 1785 error = ENXIO; 1786 goto fail; 1787 } 1788 } else { /* SK_YUKON_FAMILY */ 1789 if (skrs == 0x00) 1790 sc->sk_ramsize = 0x20000; 1791 else 1792 sc->sk_ramsize = skrs * (1<<12); 1793 sc->sk_rboff = SK_RBOFF_0; 1794 } 1795 1796 /* Read and save physical media type */ 1797 sc->sk_pmd = sk_win_read_1(sc, SK_PMDTYPE); 1798 1799 if (sc->sk_pmd == 'T' || sc->sk_pmd == '1') 1800 sc->sk_coppertype = 1; 1801 else 1802 sc->sk_coppertype = 0; 1803 1804 /* Determine whether to name it with VPD PN or just make it up. 1805 * Marvell Yukon VPD PN seems to freqently be bogus. */ 1806 switch (pci_get_device(dev)) { 1807 case DEVICEID_SK_V1: 1808 case DEVICEID_BELKIN_5005: 1809 case DEVICEID_3COM_3C940: 1810 case DEVICEID_LINKSYS_EG1032: 1811 case DEVICEID_DLINK_DGE530T_A1: 1812 case DEVICEID_DLINK_DGE530T_B1: 1813 /* Stay with VPD PN. */ 1814 pname = sc->sk_vpd_prodname; 1815 break; 1816 case DEVICEID_SK_V2: 1817 case DEVICEID_MRVL_4360: 1818 case DEVICEID_MRVL_4361: 1819 case DEVICEID_MRVL_4362: 1820 /* YUKON VPD PN might bear no resemblance to reality. */ 1821 switch (sc->sk_type) { 1822 case SK_GENESIS: 1823 /* Stay with VPD PN. */ 1824 pname = sc->sk_vpd_prodname; 1825 break; 1826 case SK_YUKON: 1827 pname = "Marvell Yukon Gigabit Ethernet"; 1828 break; 1829 case SK_YUKON_LITE: 1830 pname = "Marvell Yukon Lite Gigabit Ethernet"; 1831 break; 1832 case SK_YUKON_LP: 1833 pname = "Marvell Yukon LP Gigabit Ethernet"; 1834 break; 1835 case SK_YUKON_EC: 1836 pname = "Marvell Yukon-2 EC Gigabit Ethernet"; 1837 break; 1838 default: 1839 pname = "Marvell Yukon (Unknown) Gigabit Ethernet"; 1840 break; 1841 } 1842 1843 /* Yukon Lite Rev. A0 needs special test. */ 1844 if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) { 1845 u_int32_t far; 1846 u_int8_t testbyte; 1847 1848 /* Save flash address register before testing. */ 1849 far = sk_win_read_4(sc, SK_EP_ADDR); 1850 1851 sk_win_write_1(sc, SK_EP_ADDR+0x03, 0xff); 1852 testbyte = sk_win_read_1(sc, SK_EP_ADDR+0x03); 1853 1854 if (testbyte != 0x00) { 1855 /* Yukon Lite Rev. A0 detected. */ 1856 sc->sk_type = SK_YUKON_LITE; 1857 sc->sk_rev = SK_YUKON_LITE_REV_A0; 1858 /* Restore flash address register. */ 1859 sk_win_write_4(sc, SK_EP_ADDR, far); 1860 } 1861 } 1862 break; 1863 default: 1864 device_printf(dev, "unknown device: vendor=%04x, device=%04x, " 1865 "chipver=%02x, rev=%x\n", 1866 pci_get_vendor(dev), pci_get_device(dev), 1867 sc->sk_type, sc->sk_rev); 1868 error = ENXIO; 1869 goto fail; 1870 } 1871 1872 if (sc->sk_type == SK_YUKON_LITE) { 1873 switch (sc->sk_rev) { 1874 case SK_YUKON_LITE_REV_A0: 1875 revstr = "A0"; 1876 break; 1877 case SK_YUKON_LITE_REV_A1: 1878 revstr = "A1"; 1879 break; 1880 case SK_YUKON_LITE_REV_A3: 1881 revstr = "A3"; 1882 break; 1883 default: 1884 revstr = ""; 1885 break; 1886 } 1887 } else if (sc->sk_type == SK_YUKON_EC) { 1888 switch (sc->sk_rev) { 1889 case SK_YUKON_EC_REV_A1: 1890 revstr = "A1"; 1891 break; 1892 case SK_YUKON_EC_REV_A2: 1893 revstr = "A2"; 1894 break; 1895 case SK_YUKON_EC_REV_A3: 1896 revstr = "A3"; 1897 break; 1898 default: 1899 revstr = ""; 1900 break; 1901 } 1902 } else { 1903 revstr = ""; 1904 } 1905 1906 /* Announce the product name and more VPD data if there. */ 1907 device_printf(dev, "%s rev. %s(0x%x)\n", 1908 pname != NULL ? pname : "<unknown>", revstr, sc->sk_rev); 1909 1910 if (bootverbose) { 1911 if (sc->sk_vpd_readonly != NULL && 1912 sc->sk_vpd_readonly_len != 0) { 1913 char buf[256]; 1914 char *dp = sc->sk_vpd_readonly; 1915 uint16_t l, len = sc->sk_vpd_readonly_len; 1916 1917 while (len >= 3) { 1918 if ((*dp == 'P' && *(dp+1) == 'N') || 1919 (*dp == 'E' && *(dp+1) == 'C') || 1920 (*dp == 'M' && *(dp+1) == 'N') || 1921 (*dp == 'S' && *(dp+1) == 'N')) { 1922 l = 0; 1923 while (l < *(dp+2)) { 1924 buf[l] = *(dp+3+l); 1925 ++l; 1926 } 1927 buf[l] = '\0'; 1928 device_printf(dev, "%c%c: %s\n", 1929 *dp, *(dp+1), buf); 1930 len -= (3 + l); 1931 dp += (3 + l); 1932 } else { 1933 len -= (3 + *(dp+2)); 1934 dp += (3 + *(dp+2)); 1935 } 1936 } 1937 } 1938 device_printf(dev, "chip ver = 0x%02x\n", sc->sk_type); 1939 device_printf(dev, "chip rev = 0x%02x\n", sc->sk_rev); 1940 device_printf(dev, "SK_EPROM0 = 0x%02x\n", skrs); 1941 device_printf(dev, "SRAM size = 0x%06x\n", sc->sk_ramsize); 1942 } 1943 1944 sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1); 1945 if (sc->sk_devs[SK_PORT_A] == NULL) { 1946 device_printf(dev, "failed to add child for PORT_A\n"); 1947 error = ENXIO; 1948 goto fail; 1949 } 1950 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); 1951 if (port == NULL) { 1952 device_printf(dev, "failed to allocate memory for " 1953 "ivars of PORT_A\n"); 1954 error = ENXIO; 1955 goto fail; 1956 } 1957 *port = SK_PORT_A; 1958 device_set_ivars(sc->sk_devs[SK_PORT_A], port); 1959 1960 sk_macs = 1; 1961 1962 if (SK_IS_YUKON2(sc)) { 1963 u_int8_t hw; 1964 1965 hw = sk_win_read_1(sc, SK_Y2_HWRES); 1966 if ((hw & SK_Y2_HWRES_LINK_MASK) == SK_Y2_HWRES_LINK_DUAL) { 1967 if ((sk_win_read_1(sc, SK_Y2_CLKGATE) & 1968 SK_Y2_CLKGATE_LINK2_INACTIVE) == 0) 1969 sk_macs++; 1970 } 1971 } else { 1972 if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) 1973 sk_macs++; 1974 } 1975 1976 if (sk_macs > 1) { 1977 sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1); 1978 if (sc->sk_devs[SK_PORT_B] == NULL) { 1979 device_printf(dev, "failed to add child for PORT_B\n"); 1980 error = ENXIO; 1981 goto fail; 1982 } 1983 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); 1984 if (port == NULL) { 1985 device_printf(dev, "failed to allocate memory for " 1986 "ivars of PORT_B\n"); 1987 error = ENXIO; 1988 goto fail; 1989 } 1990 *port = SK_PORT_B; 1991 device_set_ivars(sc->sk_devs[SK_PORT_B], port); 1992 } 1993 1994 /* Turn on the 'driver is loaded' LED. */ 1995 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON); 1996 1997 error = bus_generic_attach(dev); 1998 if (error) { 1999 device_printf(dev, "failed to attach port(s)\n"); 2000 goto fail; 2001 } 2002 2003 /* Hook interrupt last to avoid having to lock softc */ 2004 error = bus_setup_intr(dev, sc->sk_res[1], INTR_TYPE_NET|INTR_MPSAFE, 2005 sk_intr, sc, &sc->sk_intrhand); 2006 2007 if (error) { 2008 device_printf(dev, "couldn't set up irq\n"); 2009 goto fail; 2010 } 2011 2012 fail: 2013 if (error) 2014 skc_detach(dev); 2015 2016 return(error); 2017 } 2018 2019 /* 2020 * Shutdown hardware and free up resources. This can be called any 2021 * time after the mutex has been initialized. It is called in both 2022 * the error case in attach and the normal detach case so it needs 2023 * to be careful about only freeing resources that have actually been 2024 * allocated. 2025 */ 2026 static int 2027 sk_detach(dev) 2028 device_t dev; 2029 { 2030 struct sk_if_softc *sc_if; 2031 struct ifnet *ifp; 2032 2033 sc_if = device_get_softc(dev); 2034 KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx), 2035 ("sk mutex not initialized in sk_detach")); 2036 SK_IF_LOCK(sc_if); 2037 2038 ifp = sc_if->sk_ifp; 2039 /* These should only be active if attach_xmac succeeded */ 2040 if (device_is_attached(dev)) { 2041 sk_stop(sc_if); 2042 /* Can't hold locks while calling detach */ 2043 SK_IF_UNLOCK(sc_if); 2044 callout_drain(&sc_if->sk_tick_ch); 2045 ether_ifdetach(ifp); 2046 SK_IF_LOCK(sc_if); 2047 } 2048 if (ifp) 2049 if_free(ifp); 2050 /* 2051 * We're generally called from skc_detach() which is using 2052 * device_delete_child() to get to here. It's already trashed 2053 * miibus for us, so don't do it here or we'll panic. 2054 */ 2055 /* 2056 if (sc_if->sk_miibus != NULL) 2057 device_delete_child(dev, sc_if->sk_miibus); 2058 */ 2059 bus_generic_detach(dev); 2060 sk_dma_free(sc_if); 2061 SK_IF_UNLOCK(sc_if); 2062 2063 return(0); 2064 } 2065 2066 static int 2067 skc_detach(dev) 2068 device_t dev; 2069 { 2070 struct sk_softc *sc; 2071 2072 sc = device_get_softc(dev); 2073 KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized")); 2074 2075 if (device_is_alive(dev)) { 2076 if (sc->sk_devs[SK_PORT_A] != NULL) { 2077 free(device_get_ivars(sc->sk_devs[SK_PORT_A]), M_DEVBUF); 2078 device_delete_child(dev, sc->sk_devs[SK_PORT_A]); 2079 } 2080 if (sc->sk_devs[SK_PORT_B] != NULL) { 2081 free(device_get_ivars(sc->sk_devs[SK_PORT_B]), M_DEVBUF); 2082 device_delete_child(dev, sc->sk_devs[SK_PORT_B]); 2083 } 2084 bus_generic_detach(dev); 2085 } 2086 2087 if (sc->sk_vpd_prodname != NULL) 2088 free(sc->sk_vpd_prodname, M_DEVBUF); 2089 if (sc->sk_vpd_readonly != NULL) 2090 free(sc->sk_vpd_readonly, M_DEVBUF); 2091 2092 if (sc->sk_intrhand) 2093 bus_teardown_intr(dev, sc->sk_res[1], sc->sk_intrhand); 2094 bus_release_resources(dev, sc->sk_res_spec, sc->sk_res); 2095 2096 mtx_destroy(&sc->sk_mii_mtx); 2097 mtx_destroy(&sc->sk_mtx); 2098 2099 return(0); 2100 } 2101 2102 struct sk_dmamap_arg { 2103 bus_addr_t sk_busaddr; 2104 }; 2105 2106 static void 2107 sk_dmamap_cb(arg, segs, nseg, error) 2108 void *arg; 2109 bus_dma_segment_t *segs; 2110 int nseg; 2111 int error; 2112 { 2113 struct sk_dmamap_arg *ctx; 2114 2115 if (error != 0) 2116 return; 2117 2118 ctx = arg; 2119 ctx->sk_busaddr = segs[0].ds_addr; 2120 } 2121 2122 /* 2123 * Allocate jumbo buffer storage. The SysKonnect adapters support 2124 * "jumbograms" (9K frames), although SysKonnect doesn't currently 2125 * use them in their drivers. In order for us to use them, we need 2126 * large 9K receive buffers, however standard mbuf clusters are only 2127 * 2048 bytes in size. Consequently, we need to allocate and manage 2128 * our own jumbo buffer pool. Fortunately, this does not require an 2129 * excessive amount of additional code. 2130 */ 2131 static int 2132 sk_dma_alloc(sc_if) 2133 struct sk_if_softc *sc_if; 2134 { 2135 struct sk_dmamap_arg ctx; 2136 struct sk_txdesc *txd; 2137 struct sk_rxdesc *rxd; 2138 struct sk_rxdesc *jrxd; 2139 u_int8_t *ptr; 2140 struct sk_jpool_entry *entry; 2141 int error, i; 2142 2143 mtx_init(&sc_if->sk_jlist_mtx, "sk_jlist_mtx", NULL, MTX_DEF); 2144 SLIST_INIT(&sc_if->sk_jfree_listhead); 2145 SLIST_INIT(&sc_if->sk_jinuse_listhead); 2146 2147 /* create parent tag */ 2148 /* 2149 * XXX 2150 * This driver should use BUS_SPACE_MAXADDR for lowaddr argument 2151 * in bus_dma_tag_create(9) as the NIC would support DAC mode. 2152 * However bz@ reported that it does not work on amd64 with > 4GB 2153 * RAM. Until we have more clues of the breakage, disable DAC mode 2154 * by limiting DMA address to be in 32bit address space. 2155 */ 2156 error = bus_dma_tag_create(NULL, /* parent */ 2157 1, 0, /* algnmnt, boundary */ 2158 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2159 BUS_SPACE_MAXADDR, /* highaddr */ 2160 NULL, NULL, /* filter, filterarg */ 2161 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 2162 0, /* nsegments */ 2163 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 2164 0, /* flags */ 2165 NULL, NULL, /* lockfunc, lockarg */ 2166 &sc_if->sk_cdata.sk_parent_tag); 2167 if (error != 0) { 2168 device_printf(sc_if->sk_if_dev, 2169 "failed to create parent DMA tag\n"); 2170 goto fail; 2171 } 2172 /* create tag for Tx ring */ 2173 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ 2174 SK_RING_ALIGN, 0, /* algnmnt, boundary */ 2175 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2176 BUS_SPACE_MAXADDR, /* highaddr */ 2177 NULL, NULL, /* filter, filterarg */ 2178 SK_TX_RING_SZ, /* maxsize */ 2179 1, /* nsegments */ 2180 SK_TX_RING_SZ, /* maxsegsize */ 2181 0, /* flags */ 2182 NULL, NULL, /* lockfunc, lockarg */ 2183 &sc_if->sk_cdata.sk_tx_ring_tag); 2184 if (error != 0) { 2185 device_printf(sc_if->sk_if_dev, 2186 "failed to allocate Tx ring DMA tag\n"); 2187 goto fail; 2188 } 2189 2190 /* create tag for Rx ring */ 2191 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ 2192 SK_RING_ALIGN, 0, /* algnmnt, boundary */ 2193 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2194 BUS_SPACE_MAXADDR, /* highaddr */ 2195 NULL, NULL, /* filter, filterarg */ 2196 SK_RX_RING_SZ, /* maxsize */ 2197 1, /* nsegments */ 2198 SK_RX_RING_SZ, /* maxsegsize */ 2199 0, /* flags */ 2200 NULL, NULL, /* lockfunc, lockarg */ 2201 &sc_if->sk_cdata.sk_rx_ring_tag); 2202 if (error != 0) { 2203 device_printf(sc_if->sk_if_dev, 2204 "failed to allocate Rx ring DMA tag\n"); 2205 goto fail; 2206 } 2207 2208 /* create tag for jumbo Rx ring */ 2209 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ 2210 SK_RING_ALIGN, 0, /* algnmnt, boundary */ 2211 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2212 BUS_SPACE_MAXADDR, /* highaddr */ 2213 NULL, NULL, /* filter, filterarg */ 2214 SK_JUMBO_RX_RING_SZ, /* maxsize */ 2215 1, /* nsegments */ 2216 SK_JUMBO_RX_RING_SZ, /* maxsegsize */ 2217 0, /* flags */ 2218 NULL, NULL, /* lockfunc, lockarg */ 2219 &sc_if->sk_cdata.sk_jumbo_rx_ring_tag); 2220 if (error != 0) { 2221 device_printf(sc_if->sk_if_dev, 2222 "failed to allocate jumbo Rx ring DMA tag\n"); 2223 goto fail; 2224 } 2225 2226 /* create tag for jumbo buffer blocks */ 2227 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ 2228 PAGE_SIZE, 0, /* algnmnt, boundary */ 2229 BUS_SPACE_MAXADDR, /* lowaddr */ 2230 BUS_SPACE_MAXADDR, /* highaddr */ 2231 NULL, NULL, /* filter, filterarg */ 2232 SK_JMEM, /* maxsize */ 2233 1, /* nsegments */ 2234 SK_JMEM, /* maxsegsize */ 2235 0, /* flags */ 2236 NULL, NULL, /* lockfunc, lockarg */ 2237 &sc_if->sk_cdata.sk_jumbo_tag); 2238 if (error != 0) { 2239 device_printf(sc_if->sk_if_dev, 2240 "failed to allocate jumbo Rx buffer block DMA tag\n"); 2241 goto fail; 2242 } 2243 2244 /* create tag for Tx buffers */ 2245 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ 2246 1, 0, /* algnmnt, boundary */ 2247 BUS_SPACE_MAXADDR, /* lowaddr */ 2248 BUS_SPACE_MAXADDR, /* highaddr */ 2249 NULL, NULL, /* filter, filterarg */ 2250 MCLBYTES * SK_MAXTXSEGS, /* maxsize */ 2251 SK_MAXTXSEGS, /* nsegments */ 2252 MCLBYTES, /* maxsegsize */ 2253 0, /* flags */ 2254 NULL, NULL, /* lockfunc, lockarg */ 2255 &sc_if->sk_cdata.sk_tx_tag); 2256 if (error != 0) { 2257 device_printf(sc_if->sk_if_dev, 2258 "failed to allocate Tx DMA tag\n"); 2259 goto fail; 2260 } 2261 2262 /* create tag for Rx buffers */ 2263 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ 2264 1, 0, /* algnmnt, boundary */ 2265 BUS_SPACE_MAXADDR, /* lowaddr */ 2266 BUS_SPACE_MAXADDR, /* highaddr */ 2267 NULL, NULL, /* filter, filterarg */ 2268 MCLBYTES, /* maxsize */ 2269 1, /* nsegments */ 2270 MCLBYTES, /* maxsegsize */ 2271 0, /* flags */ 2272 NULL, NULL, /* lockfunc, lockarg */ 2273 &sc_if->sk_cdata.sk_rx_tag); 2274 if (error != 0) { 2275 device_printf(sc_if->sk_if_dev, 2276 "failed to allocate Rx DMA tag\n"); 2277 goto fail; 2278 } 2279 2280 /* create tag for jumbo Rx buffers */ 2281 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ 2282 PAGE_SIZE, 0, /* algnmnt, boundary */ 2283 BUS_SPACE_MAXADDR, /* lowaddr */ 2284 BUS_SPACE_MAXADDR, /* highaddr */ 2285 NULL, NULL, /* filter, filterarg */ 2286 MCLBYTES * SK_MAXRXSEGS, /* maxsize */ 2287 SK_MAXRXSEGS, /* nsegments */ 2288 SK_JLEN, /* maxsegsize */ 2289 0, /* flags */ 2290 NULL, NULL, /* lockfunc, lockarg */ 2291 &sc_if->sk_cdata.sk_jumbo_rx_tag); 2292 if (error != 0) { 2293 device_printf(sc_if->sk_if_dev, 2294 "failed to allocate jumbo Rx DMA tag\n"); 2295 goto fail; 2296 } 2297 2298 /* allocate DMA'able memory and load the DMA map for Tx ring */ 2299 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_tx_ring_tag, 2300 (void **)&sc_if->sk_rdata.sk_tx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 2301 &sc_if->sk_cdata.sk_tx_ring_map); 2302 if (error != 0) { 2303 device_printf(sc_if->sk_if_dev, 2304 "failed to allocate DMA'able memory for Tx ring\n"); 2305 goto fail; 2306 } 2307 2308 ctx.sk_busaddr = 0; 2309 error = bus_dmamap_load(sc_if->sk_cdata.sk_tx_ring_tag, 2310 sc_if->sk_cdata.sk_tx_ring_map, sc_if->sk_rdata.sk_tx_ring, 2311 SK_TX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 2312 if (error != 0) { 2313 device_printf(sc_if->sk_if_dev, 2314 "failed to load DMA'able memory for Tx ring\n"); 2315 goto fail; 2316 } 2317 sc_if->sk_rdata.sk_tx_ring_paddr = ctx.sk_busaddr; 2318 2319 /* allocate DMA'able memory and load the DMA map for Rx ring */ 2320 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_rx_ring_tag, 2321 (void **)&sc_if->sk_rdata.sk_rx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 2322 &sc_if->sk_cdata.sk_rx_ring_map); 2323 if (error != 0) { 2324 device_printf(sc_if->sk_if_dev, 2325 "failed to allocate DMA'able memory for Rx ring\n"); 2326 goto fail; 2327 } 2328 2329 ctx.sk_busaddr = 0; 2330 error = bus_dmamap_load(sc_if->sk_cdata.sk_rx_ring_tag, 2331 sc_if->sk_cdata.sk_rx_ring_map, sc_if->sk_rdata.sk_rx_ring, 2332 SK_RX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 2333 if (error != 0) { 2334 device_printf(sc_if->sk_if_dev, 2335 "failed to load DMA'able memory for Rx ring\n"); 2336 goto fail; 2337 } 2338 sc_if->sk_rdata.sk_rx_ring_paddr = ctx.sk_busaddr; 2339 2340 /* allocate DMA'able memory and load the DMA map for jumbo Rx ring */ 2341 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, 2342 (void **)&sc_if->sk_rdata.sk_jumbo_rx_ring, 2343 BUS_DMA_NOWAIT|BUS_DMA_ZERO, &sc_if->sk_cdata.sk_jumbo_rx_ring_map); 2344 if (error != 0) { 2345 device_printf(sc_if->sk_if_dev, 2346 "failed to allocate DMA'able memory for jumbo Rx ring\n"); 2347 goto fail; 2348 } 2349 2350 ctx.sk_busaddr = 0; 2351 error = bus_dmamap_load(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, 2352 sc_if->sk_cdata.sk_jumbo_rx_ring_map, 2353 sc_if->sk_rdata.sk_jumbo_rx_ring, SK_JUMBO_RX_RING_SZ, sk_dmamap_cb, 2354 &ctx, BUS_DMA_NOWAIT); 2355 if (error != 0) { 2356 device_printf(sc_if->sk_if_dev, 2357 "failed to load DMA'able memory for jumbo Rx ring\n"); 2358 goto fail; 2359 } 2360 sc_if->sk_rdata.sk_jumbo_rx_ring_paddr = ctx.sk_busaddr; 2361 2362 /* create DMA maps for Tx buffers */ 2363 for (i = 0; i < SK_TX_RING_CNT; i++) { 2364 txd = &sc_if->sk_cdata.sk_txdesc[i]; 2365 txd->tx_m = NULL; 2366 txd->tx_dmamap = 0; 2367 error = bus_dmamap_create(sc_if->sk_cdata.sk_tx_tag, 0, 2368 &txd->tx_dmamap); 2369 if (error != 0) { 2370 device_printf(sc_if->sk_if_dev, 2371 "failed to create Tx dmamap\n"); 2372 goto fail; 2373 } 2374 } 2375 /* create DMA maps for Rx buffers */ 2376 if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0, 2377 &sc_if->sk_cdata.sk_rx_sparemap)) != 0) { 2378 device_printf(sc_if->sk_if_dev, 2379 "failed to create spare Rx dmamap\n"); 2380 goto fail; 2381 } 2382 for (i = 0; i < SK_RX_RING_CNT; i++) { 2383 rxd = &sc_if->sk_cdata.sk_rxdesc[i]; 2384 rxd->rx_m = NULL; 2385 rxd->rx_dmamap = 0; 2386 error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0, 2387 &rxd->rx_dmamap); 2388 if (error != 0) { 2389 device_printf(sc_if->sk_if_dev, 2390 "failed to create Rx dmamap\n"); 2391 goto fail; 2392 } 2393 } 2394 /* create DMA maps for jumbo Rx buffers */ 2395 if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0, 2396 &sc_if->sk_cdata.sk_jumbo_rx_sparemap)) != 0) { 2397 device_printf(sc_if->sk_if_dev, 2398 "failed to create spare jumbo Rx dmamap\n"); 2399 goto fail; 2400 } 2401 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) { 2402 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i]; 2403 jrxd->rx_m = NULL; 2404 jrxd->rx_dmamap = 0; 2405 error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0, 2406 &jrxd->rx_dmamap); 2407 if (error != 0) { 2408 device_printf(sc_if->sk_if_dev, 2409 "failed to create jumbo Rx dmamap\n"); 2410 goto fail; 2411 } 2412 } 2413 2414 /* allocate DMA'able memory and load the DMA map for jumbo buf */ 2415 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_jumbo_tag, 2416 (void **)&sc_if->sk_rdata.sk_jumbo_buf, 2417 BUS_DMA_NOWAIT|BUS_DMA_ZERO, &sc_if->sk_cdata.sk_jumbo_map); 2418 if (error != 0) { 2419 device_printf(sc_if->sk_if_dev, 2420 "failed to allocate DMA'able memory for jumbo buf\n"); 2421 goto fail; 2422 } 2423 2424 ctx.sk_busaddr = 0; 2425 error = bus_dmamap_load(sc_if->sk_cdata.sk_jumbo_tag, 2426 sc_if->sk_cdata.sk_jumbo_map, 2427 sc_if->sk_rdata.sk_jumbo_buf, SK_JMEM, sk_dmamap_cb, 2428 &ctx, BUS_DMA_NOWAIT); 2429 if (error != 0) { 2430 device_printf(sc_if->sk_if_dev, 2431 "failed to load DMA'able memory for jumbobuf\n"); 2432 goto fail; 2433 } 2434 sc_if->sk_rdata.sk_jumbo_buf_paddr = ctx.sk_busaddr; 2435 2436 /* 2437 * Now divide it up into 9K pieces and save the addresses 2438 * in an array. 2439 */ 2440 ptr = sc_if->sk_rdata.sk_jumbo_buf; 2441 for (i = 0; i < SK_JSLOTS; i++) { 2442 sc_if->sk_cdata.sk_jslots[i] = ptr; 2443 ptr += SK_JLEN; 2444 entry = malloc(sizeof(struct sk_jpool_entry), 2445 M_DEVBUF, M_NOWAIT); 2446 if (entry == NULL) { 2447 device_printf(sc_if->sk_if_dev, 2448 "no memory for jumbo buffers!\n"); 2449 error = ENOMEM; 2450 goto fail; 2451 } 2452 entry->slot = i; 2453 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, 2454 jpool_entries); 2455 } 2456 2457 fail: 2458 return (error); 2459 } 2460 2461 static void 2462 sk_dma_free(sc_if) 2463 struct sk_if_softc *sc_if; 2464 { 2465 struct sk_txdesc *txd; 2466 struct sk_rxdesc *rxd; 2467 struct sk_rxdesc *jrxd; 2468 struct sk_jpool_entry *entry; 2469 int i; 2470 2471 SK_JLIST_LOCK(sc_if); 2472 while ((entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead))) { 2473 device_printf(sc_if->sk_if_dev, 2474 "asked to free buffer that is in use!\n"); 2475 SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries); 2476 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, 2477 jpool_entries); 2478 } 2479 2480 while (!SLIST_EMPTY(&sc_if->sk_jfree_listhead)) { 2481 entry = SLIST_FIRST(&sc_if->sk_jfree_listhead); 2482 SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries); 2483 free(entry, M_DEVBUF); 2484 } 2485 SK_JLIST_UNLOCK(sc_if); 2486 2487 /* destroy jumbo buffer block */ 2488 if (sc_if->sk_cdata.sk_jumbo_map) 2489 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_tag, 2490 sc_if->sk_cdata.sk_jumbo_map); 2491 2492 if (sc_if->sk_rdata.sk_jumbo_buf) { 2493 bus_dmamem_free(sc_if->sk_cdata.sk_jumbo_tag, 2494 sc_if->sk_rdata.sk_jumbo_buf, 2495 sc_if->sk_cdata.sk_jumbo_map); 2496 sc_if->sk_rdata.sk_jumbo_buf = NULL; 2497 sc_if->sk_cdata.sk_jumbo_map = 0; 2498 } 2499 2500 /* Tx ring */ 2501 if (sc_if->sk_cdata.sk_tx_ring_tag) { 2502 if (sc_if->sk_cdata.sk_tx_ring_map) 2503 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_ring_tag, 2504 sc_if->sk_cdata.sk_tx_ring_map); 2505 if (sc_if->sk_cdata.sk_tx_ring_map && 2506 sc_if->sk_rdata.sk_tx_ring) 2507 bus_dmamem_free(sc_if->sk_cdata.sk_tx_ring_tag, 2508 sc_if->sk_rdata.sk_tx_ring, 2509 sc_if->sk_cdata.sk_tx_ring_map); 2510 sc_if->sk_rdata.sk_tx_ring = NULL; 2511 sc_if->sk_cdata.sk_tx_ring_map = 0; 2512 bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_ring_tag); 2513 sc_if->sk_cdata.sk_tx_ring_tag = NULL; 2514 } 2515 /* Rx ring */ 2516 if (sc_if->sk_cdata.sk_rx_ring_tag) { 2517 if (sc_if->sk_cdata.sk_rx_ring_map) 2518 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_ring_tag, 2519 sc_if->sk_cdata.sk_rx_ring_map); 2520 if (sc_if->sk_cdata.sk_rx_ring_map && 2521 sc_if->sk_rdata.sk_rx_ring) 2522 bus_dmamem_free(sc_if->sk_cdata.sk_rx_ring_tag, 2523 sc_if->sk_rdata.sk_rx_ring, 2524 sc_if->sk_cdata.sk_rx_ring_map); 2525 sc_if->sk_rdata.sk_rx_ring = NULL; 2526 sc_if->sk_cdata.sk_rx_ring_map = 0; 2527 bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_ring_tag); 2528 sc_if->sk_cdata.sk_rx_ring_tag = NULL; 2529 } 2530 /* jumbo Rx ring */ 2531 if (sc_if->sk_cdata.sk_jumbo_rx_ring_tag) { 2532 if (sc_if->sk_cdata.sk_jumbo_rx_ring_map) 2533 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, 2534 sc_if->sk_cdata.sk_jumbo_rx_ring_map); 2535 if (sc_if->sk_cdata.sk_jumbo_rx_ring_map && 2536 sc_if->sk_rdata.sk_jumbo_rx_ring) 2537 bus_dmamem_free(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, 2538 sc_if->sk_rdata.sk_jumbo_rx_ring, 2539 sc_if->sk_cdata.sk_jumbo_rx_ring_map); 2540 sc_if->sk_rdata.sk_jumbo_rx_ring = NULL; 2541 sc_if->sk_cdata.sk_jumbo_rx_ring_map = 0; 2542 bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_ring_tag); 2543 sc_if->sk_cdata.sk_jumbo_rx_ring_tag = NULL; 2544 } 2545 /* Tx buffers */ 2546 if (sc_if->sk_cdata.sk_tx_tag) { 2547 for (i = 0; i < SK_TX_RING_CNT; i++) { 2548 txd = &sc_if->sk_cdata.sk_txdesc[i]; 2549 if (txd->tx_dmamap) { 2550 bus_dmamap_destroy(sc_if->sk_cdata.sk_tx_tag, 2551 txd->tx_dmamap); 2552 txd->tx_dmamap = 0; 2553 } 2554 } 2555 bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_tag); 2556 sc_if->sk_cdata.sk_tx_tag = NULL; 2557 } 2558 /* Rx buffers */ 2559 if (sc_if->sk_cdata.sk_rx_tag) { 2560 for (i = 0; i < SK_RX_RING_CNT; i++) { 2561 rxd = &sc_if->sk_cdata.sk_rxdesc[i]; 2562 if (rxd->rx_dmamap) { 2563 bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag, 2564 rxd->rx_dmamap); 2565 rxd->rx_dmamap = 0; 2566 } 2567 } 2568 if (sc_if->sk_cdata.sk_rx_sparemap) { 2569 bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag, 2570 sc_if->sk_cdata.sk_rx_sparemap); 2571 sc_if->sk_cdata.sk_rx_sparemap = 0; 2572 } 2573 bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_tag); 2574 sc_if->sk_cdata.sk_rx_tag = NULL; 2575 } 2576 /* jumbo Rx buffers */ 2577 if (sc_if->sk_cdata.sk_jumbo_rx_tag) { 2578 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) { 2579 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i]; 2580 if (jrxd->rx_dmamap) { 2581 bus_dmamap_destroy( 2582 sc_if->sk_cdata.sk_jumbo_rx_tag, 2583 jrxd->rx_dmamap); 2584 jrxd->rx_dmamap = 0; 2585 } 2586 } 2587 if (sc_if->sk_cdata.sk_jumbo_rx_sparemap) { 2588 bus_dmamap_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag, 2589 sc_if->sk_cdata.sk_jumbo_rx_sparemap); 2590 sc_if->sk_cdata.sk_jumbo_rx_sparemap = 0; 2591 } 2592 bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag); 2593 sc_if->sk_cdata.sk_jumbo_rx_tag = NULL; 2594 } 2595 2596 if (sc_if->sk_cdata.sk_parent_tag) { 2597 bus_dma_tag_destroy(sc_if->sk_cdata.sk_parent_tag); 2598 sc_if->sk_cdata.sk_parent_tag = NULL; 2599 } 2600 mtx_destroy(&sc_if->sk_jlist_mtx); 2601 } 2602 2603 /* 2604 * Allocate a jumbo buffer. 2605 */ 2606 static void * 2607 sk_jalloc(sc_if) 2608 struct sk_if_softc *sc_if; 2609 { 2610 struct sk_jpool_entry *entry; 2611 2612 SK_JLIST_LOCK(sc_if); 2613 2614 entry = SLIST_FIRST(&sc_if->sk_jfree_listhead); 2615 2616 if (entry == NULL) { 2617 SK_JLIST_UNLOCK(sc_if); 2618 return (NULL); 2619 } 2620 2621 SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries); 2622 SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries); 2623 2624 SK_JLIST_UNLOCK(sc_if); 2625 2626 return (sc_if->sk_cdata.sk_jslots[entry->slot]); 2627 } 2628 2629 /* 2630 * Release a jumbo buffer. 2631 */ 2632 static void 2633 sk_jfree(buf, args) 2634 void *buf; 2635 void *args; 2636 { 2637 struct sk_if_softc *sc_if; 2638 struct sk_jpool_entry *entry; 2639 int i; 2640 2641 /* Extract the softc struct pointer. */ 2642 sc_if = (struct sk_if_softc *)args; 2643 KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__)); 2644 2645 SK_JLIST_LOCK(sc_if); 2646 /* calculate the slot this buffer belongs to */ 2647 i = ((vm_offset_t)buf 2648 - (vm_offset_t)sc_if->sk_rdata.sk_jumbo_buf) / SK_JLEN; 2649 KASSERT(i >= 0 && i < SK_JSLOTS, 2650 ("%s: asked to free buffer that we don't manage!", __func__)); 2651 2652 entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead); 2653 KASSERT(entry != NULL, ("%s: buffer not in use!", __func__)); 2654 entry->slot = i; 2655 SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries); 2656 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries); 2657 if (SLIST_EMPTY(&sc_if->sk_jinuse_listhead)) 2658 wakeup(sc_if); 2659 2660 SK_JLIST_UNLOCK(sc_if); 2661 } 2662 2663 static void 2664 sk_txcksum(ifp, m, f) 2665 struct ifnet *ifp; 2666 struct mbuf *m; 2667 struct sk_tx_desc *f; 2668 { 2669 struct ip *ip; 2670 u_int16_t offset; 2671 u_int8_t *p; 2672 2673 offset = sizeof(struct ip) + ETHER_HDR_LEN; 2674 for(; m && m->m_len == 0; m = m->m_next) 2675 ; 2676 if (m == NULL || m->m_len < ETHER_HDR_LEN) { 2677 if_printf(ifp, "%s: m_len < ETHER_HDR_LEN\n", __func__); 2678 /* checksum may be corrupted */ 2679 goto sendit; 2680 } 2681 if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) { 2682 if (m->m_len != ETHER_HDR_LEN) { 2683 if_printf(ifp, "%s: m_len != ETHER_HDR_LEN\n", 2684 __func__); 2685 /* checksum may be corrupted */ 2686 goto sendit; 2687 } 2688 for(m = m->m_next; m && m->m_len == 0; m = m->m_next) 2689 ; 2690 if (m == NULL) { 2691 offset = sizeof(struct ip) + ETHER_HDR_LEN; 2692 /* checksum may be corrupted */ 2693 goto sendit; 2694 } 2695 ip = mtod(m, struct ip *); 2696 } else { 2697 p = mtod(m, u_int8_t *); 2698 p += ETHER_HDR_LEN; 2699 ip = (struct ip *)p; 2700 } 2701 offset = (ip->ip_hl << 2) + ETHER_HDR_LEN; 2702 2703 sendit: 2704 f->sk_csum_startval = 0; 2705 f->sk_csum_start = htole32(((offset + m->m_pkthdr.csum_data) & 0xffff) | 2706 (offset << 16)); 2707 } 2708 2709 static int 2710 sk_encap(sc_if, m_head) 2711 struct sk_if_softc *sc_if; 2712 struct mbuf **m_head; 2713 { 2714 struct sk_txdesc *txd; 2715 struct sk_tx_desc *f = NULL; 2716 struct mbuf *m, *n; 2717 bus_dma_segment_t txsegs[SK_MAXTXSEGS]; 2718 u_int32_t cflags, frag, si, sk_ctl; 2719 int error, i, nseg; 2720 2721 SK_IF_LOCK_ASSERT(sc_if); 2722 2723 if ((txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txfreeq)) == NULL) 2724 return (ENOBUFS); 2725 2726 m = *m_head; 2727 error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag, 2728 txd->tx_dmamap, m, txsegs, &nseg, 0); 2729 if (error == EFBIG) { 2730 n = m_defrag(m, M_DONTWAIT); 2731 if (n == NULL) { 2732 m_freem(m); 2733 m = NULL; 2734 return (ENOMEM); 2735 } 2736 m = n; 2737 error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag, 2738 txd->tx_dmamap, m, txsegs, &nseg, 0); 2739 if (error != 0) { 2740 m_freem(m); 2741 m = NULL; 2742 return (error); 2743 } 2744 } else if (error != 0) 2745 return (error); 2746 if (nseg == 0) { 2747 m_freem(m); 2748 m = NULL; 2749 return (EIO); 2750 } 2751 if (sc_if->sk_cdata.sk_tx_cnt + nseg >= SK_TX_RING_CNT) { 2752 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap); 2753 return (ENOBUFS); 2754 } 2755 2756 if ((m->m_pkthdr.csum_flags & sc_if->sk_ifp->if_hwassist) != 0) 2757 cflags = SK_OPCODE_CSUM; 2758 else 2759 cflags = SK_OPCODE_DEFAULT; 2760 si = frag = sc_if->sk_cdata.sk_tx_prod; 2761 for (i = 0; i < nseg; i++) { 2762 f = &sc_if->sk_rdata.sk_tx_ring[frag]; 2763 f->sk_data_lo = htole32(SK_ADDR_LO(txsegs[i].ds_addr)); 2764 f->sk_data_hi = htole32(SK_ADDR_HI(txsegs[i].ds_addr)); 2765 sk_ctl = txsegs[i].ds_len | cflags; 2766 if (i == 0) { 2767 if (cflags == SK_OPCODE_CSUM) 2768 sk_txcksum(sc_if->sk_ifp, m, f); 2769 sk_ctl |= SK_TXCTL_FIRSTFRAG; 2770 } else 2771 sk_ctl |= SK_TXCTL_OWN; 2772 f->sk_ctl = htole32(sk_ctl); 2773 sc_if->sk_cdata.sk_tx_cnt++; 2774 SK_INC(frag, SK_TX_RING_CNT); 2775 } 2776 sc_if->sk_cdata.sk_tx_prod = frag; 2777 2778 /* set EOF on the last desciptor */ 2779 frag = (frag + SK_TX_RING_CNT - 1) % SK_TX_RING_CNT; 2780 f = &sc_if->sk_rdata.sk_tx_ring[frag]; 2781 f->sk_ctl |= htole32(SK_TXCTL_LASTFRAG | SK_TXCTL_EOF_INTR); 2782 2783 /* turn the first descriptor ownership to NIC */ 2784 f = &sc_if->sk_rdata.sk_tx_ring[si]; 2785 f->sk_ctl |= htole32(SK_TXCTL_OWN); 2786 2787 STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txfreeq, tx_q); 2788 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txbusyq, txd, tx_q); 2789 txd->tx_m = m; 2790 2791 /* sync descriptors */ 2792 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap, 2793 BUS_DMASYNC_PREWRITE); 2794 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag, 2795 sc_if->sk_cdata.sk_tx_ring_map, 2796 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2797 2798 return (0); 2799 } 2800 2801 static void 2802 sk_start(ifp) 2803 struct ifnet *ifp; 2804 { 2805 struct sk_if_softc *sc_if; 2806 2807 sc_if = ifp->if_softc; 2808 2809 SK_IF_LOCK(sc_if); 2810 sk_start_locked(ifp); 2811 SK_IF_UNLOCK(sc_if); 2812 2813 return; 2814 } 2815 2816 static void 2817 sk_start_locked(ifp) 2818 struct ifnet *ifp; 2819 { 2820 struct sk_softc *sc; 2821 struct sk_if_softc *sc_if; 2822 struct mbuf *m_head; 2823 int enq; 2824 2825 sc_if = ifp->if_softc; 2826 sc = sc_if->sk_softc; 2827 2828 SK_IF_LOCK_ASSERT(sc_if); 2829 2830 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 2831 sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 1; ) { 2832 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2833 if (m_head == NULL) 2834 break; 2835 2836 /* 2837 * Pack the data into the transmit ring. If we 2838 * don't have room, set the OACTIVE flag and wait 2839 * for the NIC to drain the ring. 2840 */ 2841 if (sk_encap(sc_if, &m_head)) { 2842 if (m_head == NULL) 2843 break; 2844 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2845 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2846 break; 2847 } 2848 2849 enq++; 2850 /* 2851 * If there's a BPF listener, bounce a copy of this frame 2852 * to him. 2853 */ 2854 BPF_MTAP(ifp, m_head); 2855 } 2856 2857 if (enq > 0) { 2858 /* Transmit */ 2859 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); 2860 2861 /* Set a timeout in case the chip goes out to lunch. */ 2862 ifp->if_timer = 5; 2863 } 2864 } 2865 2866 2867 static void 2868 sk_watchdog(ifp) 2869 struct ifnet *ifp; 2870 { 2871 struct sk_if_softc *sc_if; 2872 2873 sc_if = ifp->if_softc; 2874 2875 SK_IF_LOCK(sc_if); 2876 /* 2877 * Reclaim first as there is a possibility of losing Tx completion 2878 * interrupts. 2879 */ 2880 sk_txeof(sc_if); 2881 if (sc_if->sk_cdata.sk_tx_cnt != 0) { 2882 if_printf(sc_if->sk_ifp, "watchdog timeout\n"); 2883 ifp->if_oerrors++; 2884 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2885 sk_init_locked(sc_if); 2886 } 2887 SK_IF_UNLOCK(sc_if); 2888 2889 return; 2890 } 2891 2892 static void 2893 skc_shutdown(dev) 2894 device_t dev; 2895 { 2896 struct sk_softc *sc; 2897 2898 sc = device_get_softc(dev); 2899 SK_LOCK(sc); 2900 2901 /* Turn off the 'driver is loaded' LED. */ 2902 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF); 2903 2904 /* 2905 * Reset the GEnesis controller. Doing this should also 2906 * assert the resets on the attached XMAC(s). 2907 */ 2908 sk_reset(sc); 2909 SK_UNLOCK(sc); 2910 2911 return; 2912 } 2913 2914 static int 2915 skc_suspend(dev) 2916 device_t dev; 2917 { 2918 struct sk_softc *sc; 2919 struct sk_if_softc *sc_if0, *sc_if1; 2920 struct ifnet *ifp0 = NULL, *ifp1 = NULL; 2921 2922 sc = device_get_softc(dev); 2923 2924 SK_LOCK(sc); 2925 2926 sc_if0 = sc->sk_if[SK_PORT_A]; 2927 sc_if1 = sc->sk_if[SK_PORT_B]; 2928 if (sc_if0 != NULL) 2929 ifp0 = sc_if0->sk_ifp; 2930 if (sc_if1 != NULL) 2931 ifp1 = sc_if1->sk_ifp; 2932 if (ifp0 != NULL) 2933 sk_stop(sc_if0); 2934 if (ifp1 != NULL) 2935 sk_stop(sc_if1); 2936 sc->sk_suspended = 1; 2937 2938 SK_UNLOCK(sc); 2939 2940 return (0); 2941 } 2942 2943 static int 2944 skc_resume(dev) 2945 device_t dev; 2946 { 2947 struct sk_softc *sc; 2948 struct sk_if_softc *sc_if0, *sc_if1; 2949 struct ifnet *ifp0 = NULL, *ifp1 = NULL; 2950 2951 sc = device_get_softc(dev); 2952 2953 SK_LOCK(sc); 2954 2955 sc_if0 = sc->sk_if[SK_PORT_A]; 2956 sc_if1 = sc->sk_if[SK_PORT_B]; 2957 if (sc_if0 != NULL) 2958 ifp0 = sc_if0->sk_ifp; 2959 if (sc_if1 != NULL) 2960 ifp1 = sc_if1->sk_ifp; 2961 if (ifp0 != NULL && ifp0->if_flags & IFF_UP) 2962 sk_init_locked(sc_if0); 2963 if (ifp1 != NULL && ifp1->if_flags & IFF_UP) 2964 sk_init_locked(sc_if1); 2965 sc->sk_suspended = 0; 2966 2967 SK_UNLOCK(sc); 2968 2969 return (0); 2970 } 2971 2972 /* 2973 * According to the data sheet from SK-NET GENESIS the hardware can compute 2974 * two Rx checksums at the same time(Each checksum start position is 2975 * programmed in Rx descriptors). However it seems that TCP/UDP checksum 2976 * does not work at least on my Yukon hardware. I tried every possible ways 2977 * to get correct checksum value but couldn't get correct one. So TCP/UDP 2978 * checksum offload was disabled at the moment and only IP checksum offload 2979 * was enabled. 2980 * As nomral IP header size is 20 bytes I can't expect it would give an 2981 * increase in throughput. However it seems it doesn't hurt performance in 2982 * my testing. If there is a more detailed information for checksum secret 2983 * of the hardware in question please contact yongari@FreeBSD.org to add 2984 * TCP/UDP checksum offload support. 2985 */ 2986 static __inline void 2987 sk_rxcksum(ifp, m, csum) 2988 struct ifnet *ifp; 2989 struct mbuf *m; 2990 u_int32_t csum; 2991 { 2992 struct ether_header *eh; 2993 struct ip *ip; 2994 int32_t hlen, len, pktlen; 2995 u_int16_t csum1, csum2, ipcsum; 2996 2997 pktlen = m->m_pkthdr.len; 2998 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) 2999 return; 3000 eh = mtod(m, struct ether_header *); 3001 if (eh->ether_type != htons(ETHERTYPE_IP)) 3002 return; 3003 ip = (struct ip *)(eh + 1); 3004 if (ip->ip_v != IPVERSION) 3005 return; 3006 hlen = ip->ip_hl << 2; 3007 pktlen -= sizeof(struct ether_header); 3008 if (hlen < sizeof(struct ip)) 3009 return; 3010 if (ntohs(ip->ip_len) < hlen) 3011 return; 3012 if (ntohs(ip->ip_len) != pktlen) 3013 return; 3014 3015 csum1 = htons(csum & 0xffff); 3016 csum2 = htons((csum >> 16) & 0xffff); 3017 ipcsum = in_addword(csum1, ~csum2 & 0xffff); 3018 /* checksum fixup for IP options */ 3019 len = hlen - sizeof(struct ip); 3020 if (len > 0) { 3021 /* 3022 * If the second checksum value is correct we can compute IP 3023 * checksum with simple math. Unfortunately the second checksum 3024 * value is wrong so we can't verify the checksum from the 3025 * value(It seems there is some magic here to get correct 3026 * value). If the second checksum value is correct it also 3027 * means we can get TCP/UDP checksum) here. However, it still 3028 * needs pseudo header checksum calculation due to hardware 3029 * limitations. 3030 */ 3031 return; 3032 } 3033 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED; 3034 if (ipcsum == 0xffff) 3035 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 3036 } 3037 3038 static __inline int 3039 sk_rxvalid(sc, stat, len) 3040 struct sk_softc *sc; 3041 u_int32_t stat, len; 3042 { 3043 3044 if (sc->sk_type == SK_GENESIS) { 3045 if ((stat & XM_RXSTAT_ERRFRAME) == XM_RXSTAT_ERRFRAME || 3046 XM_RXSTAT_BYTES(stat) != len) 3047 return (0); 3048 } else { 3049 if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR | 3050 YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | 3051 YU_RXSTAT_JABBER)) != 0 || 3052 (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK || 3053 YU_RXSTAT_BYTES(stat) != len) 3054 return (0); 3055 } 3056 3057 return (1); 3058 } 3059 3060 static void 3061 sk_rxeof(sc_if) 3062 struct sk_if_softc *sc_if; 3063 { 3064 struct sk_softc *sc; 3065 struct mbuf *m; 3066 struct ifnet *ifp; 3067 struct sk_rx_desc *cur_rx; 3068 struct sk_rxdesc *rxd; 3069 int cons, prog; 3070 u_int32_t csum, rxstat, sk_ctl; 3071 3072 sc = sc_if->sk_softc; 3073 ifp = sc_if->sk_ifp; 3074 3075 SK_IF_LOCK_ASSERT(sc_if); 3076 3077 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag, 3078 sc_if->sk_cdata.sk_rx_ring_map, BUS_DMASYNC_POSTREAD); 3079 3080 prog = 0; 3081 for (cons = sc_if->sk_cdata.sk_rx_cons; prog < SK_RX_RING_CNT; 3082 prog++, SK_INC(cons, SK_RX_RING_CNT)) { 3083 cur_rx = &sc_if->sk_rdata.sk_rx_ring[cons]; 3084 sk_ctl = le32toh(cur_rx->sk_ctl); 3085 if ((sk_ctl & SK_RXCTL_OWN) != 0) 3086 break; 3087 rxd = &sc_if->sk_cdata.sk_rxdesc[cons]; 3088 rxstat = le32toh(cur_rx->sk_xmac_rxstat); 3089 3090 if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG | 3091 SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID | 3092 SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) || 3093 SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN || 3094 SK_RXBYTES(sk_ctl) > SK_MAX_FRAMELEN || 3095 sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) { 3096 ifp->if_ierrors++; 3097 sk_discard_rxbuf(sc_if, cons); 3098 continue; 3099 } 3100 3101 m = rxd->rx_m; 3102 csum = le32toh(cur_rx->sk_csum); 3103 if (sk_newbuf(sc_if, cons) != 0) { 3104 ifp->if_iqdrops++; 3105 /* reuse old buffer */ 3106 sk_discard_rxbuf(sc_if, cons); 3107 continue; 3108 } 3109 m->m_pkthdr.rcvif = ifp; 3110 m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl); 3111 ifp->if_ipackets++; 3112 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 3113 sk_rxcksum(ifp, m, csum); 3114 SK_IF_UNLOCK(sc_if); 3115 (*ifp->if_input)(ifp, m); 3116 SK_IF_LOCK(sc_if); 3117 } 3118 3119 if (prog > 0) { 3120 sc_if->sk_cdata.sk_rx_cons = cons; 3121 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag, 3122 sc_if->sk_cdata.sk_rx_ring_map, 3123 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3124 } 3125 } 3126 3127 static void 3128 sk_jumbo_rxeof(sc_if) 3129 struct sk_if_softc *sc_if; 3130 { 3131 struct sk_softc *sc; 3132 struct mbuf *m; 3133 struct ifnet *ifp; 3134 struct sk_rx_desc *cur_rx; 3135 struct sk_rxdesc *jrxd; 3136 int cons, prog; 3137 u_int32_t csum, rxstat, sk_ctl; 3138 3139 sc = sc_if->sk_softc; 3140 ifp = sc_if->sk_ifp; 3141 3142 SK_IF_LOCK_ASSERT(sc_if); 3143 3144 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, 3145 sc_if->sk_cdata.sk_jumbo_rx_ring_map, BUS_DMASYNC_POSTREAD); 3146 3147 prog = 0; 3148 for (cons = sc_if->sk_cdata.sk_jumbo_rx_cons; 3149 prog < SK_JUMBO_RX_RING_CNT; 3150 prog++, SK_INC(cons, SK_JUMBO_RX_RING_CNT)) { 3151 cur_rx = &sc_if->sk_rdata.sk_jumbo_rx_ring[cons]; 3152 sk_ctl = le32toh(cur_rx->sk_ctl); 3153 if ((sk_ctl & SK_RXCTL_OWN) != 0) 3154 break; 3155 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[cons]; 3156 rxstat = le32toh(cur_rx->sk_xmac_rxstat); 3157 3158 if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG | 3159 SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID | 3160 SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) || 3161 SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN || 3162 SK_RXBYTES(sk_ctl) > SK_JUMBO_FRAMELEN || 3163 sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) { 3164 ifp->if_ierrors++; 3165 sk_discard_jumbo_rxbuf(sc_if, cons); 3166 continue; 3167 } 3168 3169 m = jrxd->rx_m; 3170 csum = le32toh(cur_rx->sk_csum); 3171 if (sk_jumbo_newbuf(sc_if, cons) != 0) { 3172 ifp->if_iqdrops++; 3173 /* reuse old buffer */ 3174 sk_discard_jumbo_rxbuf(sc_if, cons); 3175 continue; 3176 } 3177 m->m_pkthdr.rcvif = ifp; 3178 m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl); 3179 ifp->if_ipackets++; 3180 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 3181 sk_rxcksum(ifp, m, csum); 3182 SK_IF_UNLOCK(sc_if); 3183 (*ifp->if_input)(ifp, m); 3184 SK_IF_LOCK(sc_if); 3185 } 3186 3187 if (prog > 0) { 3188 sc_if->sk_cdata.sk_jumbo_rx_cons = cons; 3189 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, 3190 sc_if->sk_cdata.sk_jumbo_rx_ring_map, 3191 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3192 } 3193 } 3194 3195 static void 3196 sk_txeof(sc_if) 3197 struct sk_if_softc *sc_if; 3198 { 3199 struct sk_softc *sc; 3200 struct sk_txdesc *txd; 3201 struct sk_tx_desc *cur_tx; 3202 struct ifnet *ifp; 3203 u_int32_t idx, sk_ctl; 3204 3205 sc = sc_if->sk_softc; 3206 ifp = sc_if->sk_ifp; 3207 3208 txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq); 3209 if (txd == NULL) 3210 return; 3211 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag, 3212 sc_if->sk_cdata.sk_tx_ring_map, BUS_DMASYNC_POSTREAD); 3213 /* 3214 * Go through our tx ring and free mbufs for those 3215 * frames that have been sent. 3216 */ 3217 for (idx = sc_if->sk_cdata.sk_tx_cons;; SK_INC(idx, SK_TX_RING_CNT)) { 3218 if (sc_if->sk_cdata.sk_tx_cnt <= 0) 3219 break; 3220 cur_tx = &sc_if->sk_rdata.sk_tx_ring[idx]; 3221 sk_ctl = le32toh(cur_tx->sk_ctl); 3222 if (sk_ctl & SK_TXCTL_OWN) 3223 break; 3224 sc_if->sk_cdata.sk_tx_cnt--; 3225 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3226 if ((sk_ctl & SK_TXCTL_LASTFRAG) == 0) 3227 continue; 3228 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap, 3229 BUS_DMASYNC_POSTWRITE); 3230 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap); 3231 3232 ifp->if_opackets++; 3233 m_freem(txd->tx_m); 3234 txd->tx_m = NULL; 3235 STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txbusyq, tx_q); 3236 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q); 3237 txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq); 3238 } 3239 sc_if->sk_cdata.sk_tx_cons = idx; 3240 ifp->if_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0; 3241 3242 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag, 3243 sc_if->sk_cdata.sk_tx_ring_map, 3244 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3245 } 3246 3247 static void 3248 sk_tick(xsc_if) 3249 void *xsc_if; 3250 { 3251 struct sk_if_softc *sc_if; 3252 struct mii_data *mii; 3253 struct ifnet *ifp; 3254 int i; 3255 3256 sc_if = xsc_if; 3257 ifp = sc_if->sk_ifp; 3258 mii = device_get_softc(sc_if->sk_miibus); 3259 3260 if (!(ifp->if_flags & IFF_UP)) 3261 return; 3262 3263 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 3264 sk_intr_bcom(sc_if); 3265 return; 3266 } 3267 3268 /* 3269 * According to SysKonnect, the correct way to verify that 3270 * the link has come back up is to poll bit 0 of the GPIO 3271 * register three times. This pin has the signal from the 3272 * link_sync pin connected to it; if we read the same link 3273 * state 3 times in a row, we know the link is up. 3274 */ 3275 for (i = 0; i < 3; i++) { 3276 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET) 3277 break; 3278 } 3279 3280 if (i != 3) { 3281 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if); 3282 return; 3283 } 3284 3285 /* Turn the GP0 interrupt back on. */ 3286 SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); 3287 SK_XM_READ_2(sc_if, XM_ISR); 3288 mii_tick(mii); 3289 callout_stop(&sc_if->sk_tick_ch); 3290 } 3291 3292 static void 3293 sk_yukon_tick(xsc_if) 3294 void *xsc_if; 3295 { 3296 struct sk_if_softc *sc_if; 3297 struct mii_data *mii; 3298 3299 sc_if = xsc_if; 3300 mii = device_get_softc(sc_if->sk_miibus); 3301 3302 mii_tick(mii); 3303 callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if); 3304 } 3305 3306 static void 3307 sk_intr_bcom(sc_if) 3308 struct sk_if_softc *sc_if; 3309 { 3310 struct mii_data *mii; 3311 struct ifnet *ifp; 3312 int status; 3313 mii = device_get_softc(sc_if->sk_miibus); 3314 ifp = sc_if->sk_ifp; 3315 3316 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 3317 3318 /* 3319 * Read the PHY interrupt register to make sure 3320 * we clear any pending interrupts. 3321 */ 3322 status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR); 3323 3324 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 3325 sk_init_xmac(sc_if); 3326 return; 3327 } 3328 3329 if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) { 3330 int lstat; 3331 lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 3332 BRGPHY_MII_AUXSTS); 3333 3334 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) { 3335 mii_mediachg(mii); 3336 /* Turn off the link LED. */ 3337 SK_IF_WRITE_1(sc_if, 0, 3338 SK_LINKLED1_CTL, SK_LINKLED_OFF); 3339 sc_if->sk_link = 0; 3340 } else if (status & BRGPHY_ISR_LNK_CHG) { 3341 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 3342 BRGPHY_MII_IMR, 0xFF00); 3343 mii_tick(mii); 3344 sc_if->sk_link = 1; 3345 /* Turn on the link LED. */ 3346 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, 3347 SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF| 3348 SK_LINKLED_BLINK_OFF); 3349 } else { 3350 mii_tick(mii); 3351 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if); 3352 } 3353 } 3354 3355 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 3356 3357 return; 3358 } 3359 3360 static void 3361 sk_intr_xmac(sc_if) 3362 struct sk_if_softc *sc_if; 3363 { 3364 struct sk_softc *sc; 3365 u_int16_t status; 3366 3367 sc = sc_if->sk_softc; 3368 status = SK_XM_READ_2(sc_if, XM_ISR); 3369 3370 /* 3371 * Link has gone down. Start MII tick timeout to 3372 * watch for link resync. 3373 */ 3374 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) { 3375 if (status & XM_ISR_GP0_SET) { 3376 SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); 3377 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if); 3378 } 3379 3380 if (status & XM_ISR_AUTONEG_DONE) { 3381 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if); 3382 } 3383 } 3384 3385 if (status & XM_IMR_TX_UNDERRUN) 3386 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO); 3387 3388 if (status & XM_IMR_RX_OVERRUN) 3389 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO); 3390 3391 status = SK_XM_READ_2(sc_if, XM_ISR); 3392 3393 return; 3394 } 3395 3396 static void 3397 sk_intr_yukon(sc_if) 3398 struct sk_if_softc *sc_if; 3399 { 3400 u_int8_t status; 3401 3402 status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR); 3403 /* RX overrun */ 3404 if ((status & SK_GMAC_INT_RX_OVER) != 0) { 3405 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, 3406 SK_RFCTL_RX_FIFO_OVER); 3407 } 3408 /* TX underrun */ 3409 if ((status & SK_GMAC_INT_TX_UNDER) != 0) { 3410 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, 3411 SK_TFCTL_TX_FIFO_UNDER); 3412 } 3413 } 3414 3415 static void 3416 sk_intr(xsc) 3417 void *xsc; 3418 { 3419 struct sk_softc *sc = xsc; 3420 struct sk_if_softc *sc_if0, *sc_if1; 3421 struct ifnet *ifp0 = NULL, *ifp1 = NULL; 3422 u_int32_t status; 3423 3424 SK_LOCK(sc); 3425 3426 status = CSR_READ_4(sc, SK_ISSR); 3427 if (status == 0 || status == 0xffffffff || sc->sk_suspended) 3428 goto done_locked; 3429 3430 sc_if0 = sc->sk_if[SK_PORT_A]; 3431 sc_if1 = sc->sk_if[SK_PORT_B]; 3432 3433 if (sc_if0 != NULL) 3434 ifp0 = sc_if0->sk_ifp; 3435 if (sc_if1 != NULL) 3436 ifp1 = sc_if1->sk_ifp; 3437 3438 for (; (status &= sc->sk_intrmask) != 0;) { 3439 /* Handle receive interrupts first. */ 3440 if (status & SK_ISR_RX1_EOF) { 3441 if (ifp0->if_mtu > SK_MAX_FRAMELEN) 3442 sk_jumbo_rxeof(sc_if0); 3443 else 3444 sk_rxeof(sc_if0); 3445 CSR_WRITE_4(sc, SK_BMU_RX_CSR0, 3446 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); 3447 } 3448 if (status & SK_ISR_RX2_EOF) { 3449 if (ifp1->if_mtu > SK_MAX_FRAMELEN) 3450 sk_jumbo_rxeof(sc_if1); 3451 else 3452 sk_rxeof(sc_if1); 3453 CSR_WRITE_4(sc, SK_BMU_RX_CSR1, 3454 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); 3455 } 3456 3457 /* Then transmit interrupts. */ 3458 if (status & SK_ISR_TX1_S_EOF) { 3459 sk_txeof(sc_if0); 3460 CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, SK_TXBMU_CLR_IRQ_EOF); 3461 } 3462 if (status & SK_ISR_TX2_S_EOF) { 3463 sk_txeof(sc_if1); 3464 CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, SK_TXBMU_CLR_IRQ_EOF); 3465 } 3466 3467 /* Then MAC interrupts. */ 3468 if (status & SK_ISR_MAC1 && 3469 ifp0->if_drv_flags & IFF_DRV_RUNNING) { 3470 if (sc->sk_type == SK_GENESIS) 3471 sk_intr_xmac(sc_if0); 3472 else 3473 sk_intr_yukon(sc_if0); 3474 } 3475 3476 if (status & SK_ISR_MAC2 && 3477 ifp1->if_drv_flags & IFF_DRV_RUNNING) { 3478 if (sc->sk_type == SK_GENESIS) 3479 sk_intr_xmac(sc_if1); 3480 else 3481 sk_intr_yukon(sc_if1); 3482 } 3483 3484 if (status & SK_ISR_EXTERNAL_REG) { 3485 if (ifp0 != NULL && 3486 sc_if0->sk_phytype == SK_PHYTYPE_BCOM) 3487 sk_intr_bcom(sc_if0); 3488 if (ifp1 != NULL && 3489 sc_if1->sk_phytype == SK_PHYTYPE_BCOM) 3490 sk_intr_bcom(sc_if1); 3491 } 3492 status = CSR_READ_4(sc, SK_ISSR); 3493 } 3494 3495 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 3496 3497 if (ifp0 != NULL && !IFQ_DRV_IS_EMPTY(&ifp0->if_snd)) 3498 sk_start_locked(ifp0); 3499 if (ifp1 != NULL && !IFQ_DRV_IS_EMPTY(&ifp1->if_snd)) 3500 sk_start_locked(ifp1); 3501 3502 done_locked: 3503 SK_UNLOCK(sc); 3504 } 3505 3506 static void 3507 sk_init_xmac(sc_if) 3508 struct sk_if_softc *sc_if; 3509 { 3510 struct sk_softc *sc; 3511 struct ifnet *ifp; 3512 u_int16_t eaddr[(ETHER_ADDR_LEN+1)/2]; 3513 struct sk_bcom_hack bhack[] = { 3514 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 }, 3515 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 }, 3516 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, 3517 { 0, 0 } }; 3518 3519 SK_IF_LOCK_ASSERT(sc_if); 3520 3521 sc = sc_if->sk_softc; 3522 ifp = sc_if->sk_ifp; 3523 3524 /* Unreset the XMAC. */ 3525 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET); 3526 DELAY(1000); 3527 3528 /* Reset the XMAC's internal state. */ 3529 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); 3530 3531 /* Save the XMAC II revision */ 3532 sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID)); 3533 3534 /* 3535 * Perform additional initialization for external PHYs, 3536 * namely for the 1000baseTX cards that use the XMAC's 3537 * GMII mode. 3538 */ 3539 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 3540 int i = 0; 3541 u_int32_t val; 3542 3543 /* Take PHY out of reset. */ 3544 val = sk_win_read_4(sc, SK_GPIO); 3545 if (sc_if->sk_port == SK_PORT_A) 3546 val |= SK_GPIO_DIR0|SK_GPIO_DAT0; 3547 else 3548 val |= SK_GPIO_DIR2|SK_GPIO_DAT2; 3549 sk_win_write_4(sc, SK_GPIO, val); 3550 3551 /* Enable GMII mode on the XMAC. */ 3552 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE); 3553 3554 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 3555 BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET); 3556 DELAY(10000); 3557 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 3558 BRGPHY_MII_IMR, 0xFFF0); 3559 3560 /* 3561 * Early versions of the BCM5400 apparently have 3562 * a bug that requires them to have their reserved 3563 * registers initialized to some magic values. I don't 3564 * know what the numbers do, I'm just the messenger. 3565 */ 3566 if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03) 3567 == 0x6041) { 3568 while(bhack[i].reg) { 3569 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 3570 bhack[i].reg, bhack[i].val); 3571 i++; 3572 } 3573 } 3574 } 3575 3576 /* Set station address */ 3577 bcopy(IF_LLADDR(sc_if->sk_ifp), eaddr, ETHER_ADDR_LEN); 3578 SK_XM_WRITE_2(sc_if, XM_PAR0, eaddr[0]); 3579 SK_XM_WRITE_2(sc_if, XM_PAR1, eaddr[1]); 3580 SK_XM_WRITE_2(sc_if, XM_PAR2, eaddr[2]); 3581 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION); 3582 3583 if (ifp->if_flags & IFF_BROADCAST) { 3584 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); 3585 } else { 3586 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); 3587 } 3588 3589 /* We don't need the FCS appended to the packet. */ 3590 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS); 3591 3592 /* We want short frames padded to 60 bytes. */ 3593 SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD); 3594 3595 /* 3596 * Enable the reception of all error frames. This is is 3597 * a necessary evil due to the design of the XMAC. The 3598 * XMAC's receive FIFO is only 8K in size, however jumbo 3599 * frames can be up to 9000 bytes in length. When bad 3600 * frame filtering is enabled, the XMAC's RX FIFO operates 3601 * in 'store and forward' mode. For this to work, the 3602 * entire frame has to fit into the FIFO, but that means 3603 * that jumbo frames larger than 8192 bytes will be 3604 * truncated. Disabling all bad frame filtering causes 3605 * the RX FIFO to operate in streaming mode, in which 3606 * case the XMAC will start transfering frames out of the 3607 * RX FIFO as soon as the FIFO threshold is reached. 3608 */ 3609 if (ifp->if_mtu > SK_MAX_FRAMELEN) { 3610 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES| 3611 XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS| 3612 XM_MODE_RX_INRANGELEN); 3613 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); 3614 } else 3615 SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); 3616 3617 /* 3618 * Bump up the transmit threshold. This helps hold off transmit 3619 * underruns when we're blasting traffic from both ports at once. 3620 */ 3621 SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH); 3622 3623 /* Set promiscuous mode */ 3624 sk_setpromisc(sc_if); 3625 3626 /* Set multicast filter */ 3627 sk_setmulti(sc_if); 3628 3629 /* Clear and enable interrupts */ 3630 SK_XM_READ_2(sc_if, XM_ISR); 3631 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) 3632 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS); 3633 else 3634 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); 3635 3636 /* Configure MAC arbiter */ 3637 switch(sc_if->sk_xmac_rev) { 3638 case XM_XMAC_REV_B2: 3639 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2); 3640 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2); 3641 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2); 3642 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2); 3643 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2); 3644 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2); 3645 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2); 3646 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2); 3647 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); 3648 break; 3649 case XM_XMAC_REV_C1: 3650 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1); 3651 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1); 3652 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1); 3653 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1); 3654 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1); 3655 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1); 3656 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1); 3657 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1); 3658 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); 3659 break; 3660 default: 3661 break; 3662 } 3663 sk_win_write_2(sc, SK_MACARB_CTL, 3664 SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF); 3665 3666 sc_if->sk_link = 1; 3667 3668 return; 3669 } 3670 3671 static void 3672 sk_init_yukon(sc_if) 3673 struct sk_if_softc *sc_if; 3674 { 3675 u_int32_t phy, v; 3676 u_int16_t reg; 3677 struct sk_softc *sc; 3678 struct ifnet *ifp; 3679 int i; 3680 3681 SK_IF_LOCK_ASSERT(sc_if); 3682 3683 sc = sc_if->sk_softc; 3684 ifp = sc_if->sk_ifp; 3685 3686 if (sc->sk_type == SK_YUKON_LITE && 3687 sc->sk_rev >= SK_YUKON_LITE_REV_A3) { 3688 /* 3689 * Workaround code for COMA mode, set PHY reset. 3690 * Otherwise it will not correctly take chip out of 3691 * powerdown (coma) 3692 */ 3693 v = sk_win_read_4(sc, SK_GPIO); 3694 v |= SK_GPIO_DIR9 | SK_GPIO_DAT9; 3695 sk_win_write_4(sc, SK_GPIO, v); 3696 } 3697 3698 /* GMAC and GPHY Reset */ 3699 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET); 3700 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); 3701 DELAY(1000); 3702 3703 if (sc->sk_type == SK_YUKON_LITE && 3704 sc->sk_rev >= SK_YUKON_LITE_REV_A3) { 3705 /* 3706 * Workaround code for COMA mode, clear PHY reset 3707 */ 3708 v = sk_win_read_4(sc, SK_GPIO); 3709 v |= SK_GPIO_DIR9; 3710 v &= ~SK_GPIO_DAT9; 3711 sk_win_write_4(sc, SK_GPIO, v); 3712 } 3713 3714 phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP | 3715 SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE; 3716 3717 if (sc->sk_coppertype) 3718 phy |= SK_GPHY_COPPER; 3719 else 3720 phy |= SK_GPHY_FIBER; 3721 3722 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET); 3723 DELAY(1000); 3724 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR); 3725 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF | 3726 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR); 3727 3728 /* unused read of the interrupt source register */ 3729 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); 3730 3731 reg = SK_YU_READ_2(sc_if, YUKON_PAR); 3732 3733 /* MIB Counter Clear Mode set */ 3734 reg |= YU_PAR_MIB_CLR; 3735 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 3736 3737 /* MIB Counter Clear Mode clear */ 3738 reg &= ~YU_PAR_MIB_CLR; 3739 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 3740 3741 /* receive control reg */ 3742 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR); 3743 3744 /* transmit parameter register */ 3745 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) | 3746 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) ); 3747 3748 /* serial mode register */ 3749 reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e); 3750 if (ifp->if_mtu > SK_MAX_FRAMELEN) 3751 reg |= YU_SMR_MFL_JUMBO; 3752 SK_YU_WRITE_2(sc_if, YUKON_SMR, reg); 3753 3754 /* Setup Yukon's address */ 3755 for (i = 0; i < 3; i++) { 3756 /* Write Source Address 1 (unicast filter) */ 3757 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4, 3758 IF_LLADDR(sc_if->sk_ifp)[i * 2] | 3759 IF_LLADDR(sc_if->sk_ifp)[i * 2 + 1] << 8); 3760 } 3761 3762 for (i = 0; i < 3; i++) { 3763 reg = sk_win_read_2(sc_if->sk_softc, 3764 SK_MAC1_0 + i * 2 + sc_if->sk_port * 8); 3765 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg); 3766 } 3767 3768 /* Set promiscuous mode */ 3769 sk_setpromisc(sc_if); 3770 3771 /* Set multicast filter */ 3772 sk_setmulti(sc_if); 3773 3774 /* enable interrupt mask for counter overflows */ 3775 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0); 3776 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0); 3777 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0); 3778 3779 /* Configure RX MAC FIFO Flush Mask */ 3780 v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR | 3781 YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT | 3782 YU_RXSTAT_JABBER; 3783 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v); 3784 3785 /* Disable RX MAC FIFO Flush for YUKON-Lite Rev. A0 only */ 3786 if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev == SK_YUKON_LITE_REV_A0) 3787 v = SK_TFCTL_OPERATION_ON; 3788 else 3789 v = SK_TFCTL_OPERATION_ON | SK_RFCTL_FIFO_FLUSH_ON; 3790 /* Configure RX MAC FIFO */ 3791 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR); 3792 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, v); 3793 3794 /* Increase flush threshould to 64 bytes */ 3795 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD, 3796 SK_RFCTL_FIFO_THRESHOLD + 1); 3797 3798 /* Configure TX MAC FIFO */ 3799 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR); 3800 SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON); 3801 } 3802 3803 /* 3804 * Note that to properly initialize any part of the GEnesis chip, 3805 * you first have to take it out of reset mode. 3806 */ 3807 static void 3808 sk_init(xsc) 3809 void *xsc; 3810 { 3811 struct sk_if_softc *sc_if = xsc; 3812 3813 SK_IF_LOCK(sc_if); 3814 sk_init_locked(sc_if); 3815 SK_IF_UNLOCK(sc_if); 3816 3817 return; 3818 } 3819 3820 static void 3821 sk_init_locked(sc_if) 3822 struct sk_if_softc *sc_if; 3823 { 3824 struct sk_softc *sc; 3825 struct ifnet *ifp; 3826 struct mii_data *mii; 3827 u_int16_t reg; 3828 u_int32_t imr; 3829 int error; 3830 3831 SK_IF_LOCK_ASSERT(sc_if); 3832 3833 ifp = sc_if->sk_ifp; 3834 sc = sc_if->sk_softc; 3835 mii = device_get_softc(sc_if->sk_miibus); 3836 3837 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3838 return; 3839 3840 /* Cancel pending I/O and free all RX/TX buffers. */ 3841 sk_stop(sc_if); 3842 3843 if (sc->sk_type == SK_GENESIS) { 3844 /* Configure LINK_SYNC LED */ 3845 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON); 3846 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, 3847 SK_LINKLED_LINKSYNC_ON); 3848 3849 /* Configure RX LED */ 3850 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, 3851 SK_RXLEDCTL_COUNTER_START); 3852 3853 /* Configure TX LED */ 3854 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, 3855 SK_TXLEDCTL_COUNTER_START); 3856 } 3857 3858 /* 3859 * Configure descriptor poll timer 3860 * 3861 * SK-NET GENESIS data sheet says that possibility of losing Start 3862 * transmit command due to CPU/cache related interim storage problems 3863 * under certain conditions. The document recommends a polling 3864 * mechanism to send a Start transmit command to initiate transfer 3865 * of ready descriptors regulary. To cope with this issue sk(4) now 3866 * enables descriptor poll timer to initiate descriptor processing 3867 * periodically as defined by SK_DPT_TIMER_MAX. However sk(4) still 3868 * issue SK_TXBMU_TX_START to Tx BMU to get fast execution of Tx 3869 * command instead of waiting for next descriptor polling time. 3870 * The same rule may apply to Rx side too but it seems that is not 3871 * needed at the moment. 3872 * Since sk(4) uses descriptor polling as a last resort there is no 3873 * need to set smaller polling time than maximum allowable one. 3874 */ 3875 SK_IF_WRITE_4(sc_if, 0, SK_DPT_INIT, SK_DPT_TIMER_MAX); 3876 3877 /* Configure I2C registers */ 3878 3879 /* Configure XMAC(s) */ 3880 switch (sc->sk_type) { 3881 case SK_GENESIS: 3882 sk_init_xmac(sc_if); 3883 break; 3884 case SK_YUKON: 3885 case SK_YUKON_LITE: 3886 case SK_YUKON_LP: 3887 case SK_YUKON_EC: 3888 sk_init_yukon(sc_if); 3889 break; 3890 } 3891 mii_mediachg(mii); 3892 3893 if (sc->sk_type == SK_GENESIS) { 3894 /* Configure MAC FIFOs */ 3895 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET); 3896 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END); 3897 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON); 3898 3899 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET); 3900 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END); 3901 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON); 3902 } 3903 3904 /* Configure transmit arbiter(s) */ 3905 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, 3906 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON); 3907 3908 /* Configure RAMbuffers */ 3909 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET); 3910 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart); 3911 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart); 3912 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart); 3913 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend); 3914 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON); 3915 3916 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET); 3917 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON); 3918 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart); 3919 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart); 3920 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart); 3921 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend); 3922 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON); 3923 3924 /* Configure BMUs */ 3925 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE); 3926 if (ifp->if_mtu > SK_MAX_FRAMELEN) { 3927 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO, 3928 SK_ADDR_LO(SK_JUMBO_RX_RING_ADDR(sc_if, 0))); 3929 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 3930 SK_ADDR_HI(SK_JUMBO_RX_RING_ADDR(sc_if, 0))); 3931 } else { 3932 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO, 3933 SK_ADDR_LO(SK_RX_RING_ADDR(sc_if, 0))); 3934 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 3935 SK_ADDR_HI(SK_RX_RING_ADDR(sc_if, 0))); 3936 } 3937 3938 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE); 3939 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO, 3940 SK_ADDR_LO(SK_TX_RING_ADDR(sc_if, 0))); 3941 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 3942 SK_ADDR_HI(SK_TX_RING_ADDR(sc_if, 0))); 3943 3944 /* Init descriptors */ 3945 if (ifp->if_mtu > SK_MAX_FRAMELEN) 3946 error = sk_init_jumbo_rx_ring(sc_if); 3947 else 3948 error = sk_init_rx_ring(sc_if); 3949 if (error != 0) { 3950 device_printf(sc_if->sk_if_dev, 3951 "initialization failed: no memory for rx buffers\n"); 3952 sk_stop(sc_if); 3953 return; 3954 } 3955 sk_init_tx_ring(sc_if); 3956 3957 /* Set interrupt moderation if changed via sysctl. */ 3958 imr = sk_win_read_4(sc, SK_IMTIMERINIT); 3959 if (imr != SK_IM_USECS(sc->sk_int_mod, sc->sk_int_ticks)) { 3960 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod, 3961 sc->sk_int_ticks)); 3962 if (bootverbose) 3963 device_printf(sc_if->sk_if_dev, 3964 "interrupt moderation is %d us.\n", 3965 sc->sk_int_mod); 3966 } 3967 3968 /* Configure interrupt handling */ 3969 CSR_READ_4(sc, SK_ISSR); 3970 if (sc_if->sk_port == SK_PORT_A) 3971 sc->sk_intrmask |= SK_INTRS1; 3972 else 3973 sc->sk_intrmask |= SK_INTRS2; 3974 3975 sc->sk_intrmask |= SK_ISR_EXTERNAL_REG; 3976 3977 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 3978 3979 /* Start BMUs. */ 3980 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START); 3981 3982 switch(sc->sk_type) { 3983 case SK_GENESIS: 3984 /* Enable XMACs TX and RX state machines */ 3985 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE); 3986 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 3987 break; 3988 case SK_YUKON: 3989 case SK_YUKON_LITE: 3990 case SK_YUKON_LP: 3991 case SK_YUKON_EC: 3992 reg = SK_YU_READ_2(sc_if, YUKON_GPCR); 3993 reg |= YU_GPCR_TXEN | YU_GPCR_RXEN; 3994 #if 0 3995 /* XXX disable 100Mbps and full duplex mode? */ 3996 reg &= ~(YU_GPCR_SPEED | YU_GPCR_DPLX_DIS); 3997 #endif 3998 SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg); 3999 } 4000 4001 /* Activate descriptor polling timer */ 4002 SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_START); 4003 /* start transfer of Tx descriptors */ 4004 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); 4005 4006 ifp->if_drv_flags |= IFF_DRV_RUNNING; 4007 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4008 4009 switch (sc->sk_type) { 4010 case SK_YUKON: 4011 case SK_YUKON_LITE: 4012 case SK_YUKON_LP: 4013 case SK_YUKON_EC: 4014 callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if); 4015 break; 4016 } 4017 4018 return; 4019 } 4020 4021 static void 4022 sk_stop(sc_if) 4023 struct sk_if_softc *sc_if; 4024 { 4025 int i; 4026 struct sk_softc *sc; 4027 struct sk_txdesc *txd; 4028 struct sk_rxdesc *rxd; 4029 struct sk_rxdesc *jrxd; 4030 struct ifnet *ifp; 4031 u_int32_t val; 4032 4033 SK_IF_LOCK_ASSERT(sc_if); 4034 sc = sc_if->sk_softc; 4035 ifp = sc_if->sk_ifp; 4036 4037 callout_stop(&sc_if->sk_tick_ch); 4038 4039 /* stop Tx descriptor polling timer */ 4040 SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP); 4041 /* stop transfer of Tx descriptors */ 4042 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_STOP); 4043 for (i = 0; i < SK_TIMEOUT; i++) { 4044 val = CSR_READ_4(sc, sc_if->sk_tx_bmu); 4045 if ((val & SK_TXBMU_TX_STOP) == 0) 4046 break; 4047 DELAY(1); 4048 } 4049 if (i == SK_TIMEOUT) 4050 device_printf(sc_if->sk_if_dev, 4051 "can not stop transfer of Tx descriptor\n"); 4052 /* stop transfer of Rx descriptors */ 4053 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_STOP); 4054 for (i = 0; i < SK_TIMEOUT; i++) { 4055 val = SK_IF_READ_4(sc_if, 0, SK_RXQ1_BMU_CSR); 4056 if ((val & SK_RXBMU_RX_STOP) == 0) 4057 break; 4058 DELAY(1); 4059 } 4060 if (i == SK_TIMEOUT) 4061 device_printf(sc_if->sk_if_dev, 4062 "can not stop transfer of Rx descriptor\n"); 4063 4064 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 4065 /* Put PHY back into reset. */ 4066 val = sk_win_read_4(sc, SK_GPIO); 4067 if (sc_if->sk_port == SK_PORT_A) { 4068 val |= SK_GPIO_DIR0; 4069 val &= ~SK_GPIO_DAT0; 4070 } else { 4071 val |= SK_GPIO_DIR2; 4072 val &= ~SK_GPIO_DAT2; 4073 } 4074 sk_win_write_4(sc, SK_GPIO, val); 4075 } 4076 4077 /* Turn off various components of this interface. */ 4078 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); 4079 switch (sc->sk_type) { 4080 case SK_GENESIS: 4081 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET); 4082 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET); 4083 break; 4084 case SK_YUKON: 4085 case SK_YUKON_LITE: 4086 case SK_YUKON_LP: 4087 case SK_YUKON_EC: 4088 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET); 4089 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET); 4090 break; 4091 } 4092 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE); 4093 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 4094 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE); 4095 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 4096 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF); 4097 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 4098 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 4099 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF); 4100 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF); 4101 4102 /* Disable interrupts */ 4103 if (sc_if->sk_port == SK_PORT_A) 4104 sc->sk_intrmask &= ~SK_INTRS1; 4105 else 4106 sc->sk_intrmask &= ~SK_INTRS2; 4107 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 4108 4109 SK_XM_READ_2(sc_if, XM_ISR); 4110 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); 4111 4112 /* Free RX and TX mbufs still in the queues. */ 4113 for (i = 0; i < SK_RX_RING_CNT; i++) { 4114 rxd = &sc_if->sk_cdata.sk_rxdesc[i]; 4115 if (rxd->rx_m != NULL) { 4116 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, 4117 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 4118 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag, 4119 rxd->rx_dmamap); 4120 m_freem(rxd->rx_m); 4121 rxd->rx_m = NULL; 4122 } 4123 } 4124 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) { 4125 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i]; 4126 if (jrxd->rx_m != NULL) { 4127 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, 4128 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 4129 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag, 4130 jrxd->rx_dmamap); 4131 m_freem(jrxd->rx_m); 4132 jrxd->rx_m = NULL; 4133 } 4134 } 4135 for (i = 0; i < SK_TX_RING_CNT; i++) { 4136 txd = &sc_if->sk_cdata.sk_txdesc[i]; 4137 if (txd->tx_m != NULL) { 4138 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, 4139 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 4140 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, 4141 txd->tx_dmamap); 4142 m_freem(txd->tx_m); 4143 txd->tx_m = NULL; 4144 } 4145 } 4146 4147 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE); 4148 4149 return; 4150 } 4151 4152 static int 4153 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 4154 { 4155 int error, value; 4156 4157 if (!arg1) 4158 return (EINVAL); 4159 value = *(int *)arg1; 4160 error = sysctl_handle_int(oidp, &value, 0, req); 4161 if (error || !req->newptr) 4162 return (error); 4163 if (value < low || value > high) 4164 return (EINVAL); 4165 *(int *)arg1 = value; 4166 return (0); 4167 } 4168 4169 static int 4170 sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS) 4171 { 4172 return (sysctl_int_range(oidp, arg1, arg2, req, SK_IM_MIN, SK_IM_MAX)); 4173 } 4174