1 /* $OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $ */ 2 3 /*- 4 * SPDX-License-Identifier: BSD-4-Clause 5 * 6 * Copyright (c) 1997, 1998, 1999, 2000 7 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by Bill Paul. 20 * 4. Neither the name of the author nor the names of any co-contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 34 * THE POSSIBILITY OF SUCH DAMAGE. 35 */ 36 /*- 37 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 38 * 39 * Permission to use, copy, modify, and distribute this software for any 40 * purpose with or without fee is hereby granted, provided that the above 41 * copyright notice and this permission notice appear in all copies. 42 * 43 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 44 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 45 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 46 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 47 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 48 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 49 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 50 */ 51 52 #include <sys/cdefs.h> 53 __FBSDID("$FreeBSD$"); 54 55 /* 56 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports 57 * the SK-984x series adapters, both single port and dual port. 58 * References: 59 * The XaQti XMAC II datasheet, 60 * https://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 61 * The SysKonnect GEnesis manual, http://www.syskonnect.com 62 * 63 * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the 64 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a 65 * convenience to others until Vitesse corrects this problem: 66 * 67 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 68 * 69 * Written by Bill Paul <wpaul@ee.columbia.edu> 70 * Department of Electrical Engineering 71 * Columbia University, New York City 72 */ 73 /* 74 * The SysKonnect gigabit ethernet adapters consist of two main 75 * components: the SysKonnect GEnesis controller chip and the XaQti Corp. 76 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC 77 * components and a PHY while the GEnesis controller provides a PCI 78 * interface with DMA support. Each card may have between 512K and 79 * 2MB of SRAM on board depending on the configuration. 80 * 81 * The SysKonnect GEnesis controller can have either one or two XMAC 82 * chips connected to it, allowing single or dual port NIC configurations. 83 * SysKonnect has the distinction of being the only vendor on the market 84 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs, 85 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the 86 * XMAC registers. This driver takes advantage of these features to allow 87 * both XMACs to operate as independent interfaces. 88 */ 89 90 #include <sys/param.h> 91 #include <sys/systm.h> 92 #include <sys/bus.h> 93 #include <sys/endian.h> 94 #include <sys/mbuf.h> 95 #include <sys/malloc.h> 96 #include <sys/kernel.h> 97 #include <sys/module.h> 98 #include <sys/socket.h> 99 #include <sys/sockio.h> 100 #include <sys/queue.h> 101 #include <sys/sysctl.h> 102 103 #include <net/bpf.h> 104 #include <net/ethernet.h> 105 #include <net/if.h> 106 #include <net/if_var.h> 107 #include <net/if_arp.h> 108 #include <net/if_dl.h> 109 #include <net/if_media.h> 110 #include <net/if_types.h> 111 #include <net/if_vlan_var.h> 112 113 #include <netinet/in.h> 114 #include <netinet/in_systm.h> 115 #include <netinet/ip.h> 116 117 #include <machine/bus.h> 118 #include <machine/in_cksum.h> 119 #include <machine/resource.h> 120 #include <sys/rman.h> 121 122 #include <dev/mii/mii.h> 123 #include <dev/mii/miivar.h> 124 #include <dev/mii/brgphyreg.h> 125 126 #include <dev/pci/pcireg.h> 127 #include <dev/pci/pcivar.h> 128 129 #if 0 130 #define SK_USEIOSPACE 131 #endif 132 133 #include <dev/sk/if_skreg.h> 134 #include <dev/sk/xmaciireg.h> 135 #include <dev/sk/yukonreg.h> 136 137 MODULE_DEPEND(sk, pci, 1, 1, 1); 138 MODULE_DEPEND(sk, ether, 1, 1, 1); 139 MODULE_DEPEND(sk, miibus, 1, 1, 1); 140 141 /* "device miibus" required. See GENERIC if you get errors here. */ 142 #include "miibus_if.h" 143 144 static const struct sk_type sk_devs[] = { 145 { 146 VENDORID_SK, 147 DEVICEID_SK_V1, 148 "SysKonnect Gigabit Ethernet (V1.0)" 149 }, 150 { 151 VENDORID_SK, 152 DEVICEID_SK_V2, 153 "SysKonnect Gigabit Ethernet (V2.0)" 154 }, 155 { 156 VENDORID_MARVELL, 157 DEVICEID_SK_V2, 158 "Marvell Gigabit Ethernet" 159 }, 160 { 161 VENDORID_MARVELL, 162 DEVICEID_BELKIN_5005, 163 "Belkin F5D5005 Gigabit Ethernet" 164 }, 165 { 166 VENDORID_3COM, 167 DEVICEID_3COM_3C940, 168 "3Com 3C940 Gigabit Ethernet" 169 }, 170 { 171 VENDORID_LINKSYS, 172 DEVICEID_LINKSYS_EG1032, 173 "Linksys EG1032 Gigabit Ethernet" 174 }, 175 { 176 VENDORID_DLINK, 177 DEVICEID_DLINK_DGE530T_A1, 178 "D-Link DGE-530T Gigabit Ethernet" 179 }, 180 { 181 VENDORID_DLINK, 182 DEVICEID_DLINK_DGE530T_B1, 183 "D-Link DGE-530T Gigabit Ethernet" 184 }, 185 { 0, 0, NULL } 186 }; 187 188 static int skc_probe(device_t); 189 static int skc_attach(device_t); 190 static int skc_detach(device_t); 191 static int skc_shutdown(device_t); 192 static int skc_suspend(device_t); 193 static int skc_resume(device_t); 194 static bus_dma_tag_t skc_get_dma_tag(device_t, device_t); 195 static int sk_detach(device_t); 196 static int sk_probe(device_t); 197 static int sk_attach(device_t); 198 static void sk_tick(void *); 199 static void sk_yukon_tick(void *); 200 static void sk_intr(void *); 201 static void sk_intr_xmac(struct sk_if_softc *); 202 static void sk_intr_bcom(struct sk_if_softc *); 203 static void sk_intr_yukon(struct sk_if_softc *); 204 static __inline void sk_rxcksum(struct ifnet *, struct mbuf *, u_int32_t); 205 static __inline int sk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t); 206 static void sk_rxeof(struct sk_if_softc *); 207 static void sk_jumbo_rxeof(struct sk_if_softc *); 208 static void sk_txeof(struct sk_if_softc *); 209 static void sk_txcksum(struct ifnet *, struct mbuf *, struct sk_tx_desc *); 210 static int sk_encap(struct sk_if_softc *, struct mbuf **); 211 static void sk_start(struct ifnet *); 212 static void sk_start_locked(struct ifnet *); 213 static int sk_ioctl(struct ifnet *, u_long, caddr_t); 214 static void sk_init(void *); 215 static void sk_init_locked(struct sk_if_softc *); 216 static void sk_init_xmac(struct sk_if_softc *); 217 static void sk_init_yukon(struct sk_if_softc *); 218 static void sk_stop(struct sk_if_softc *); 219 static void sk_watchdog(void *); 220 static int sk_ifmedia_upd(struct ifnet *); 221 static void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *); 222 static void sk_reset(struct sk_softc *); 223 static __inline void sk_discard_rxbuf(struct sk_if_softc *, int); 224 static __inline void sk_discard_jumbo_rxbuf(struct sk_if_softc *, int); 225 static int sk_newbuf(struct sk_if_softc *, int); 226 static int sk_jumbo_newbuf(struct sk_if_softc *, int); 227 static void sk_dmamap_cb(void *, bus_dma_segment_t *, int, int); 228 static int sk_dma_alloc(struct sk_if_softc *); 229 static int sk_dma_jumbo_alloc(struct sk_if_softc *); 230 static void sk_dma_free(struct sk_if_softc *); 231 static void sk_dma_jumbo_free(struct sk_if_softc *); 232 static int sk_init_rx_ring(struct sk_if_softc *); 233 static int sk_init_jumbo_rx_ring(struct sk_if_softc *); 234 static void sk_init_tx_ring(struct sk_if_softc *); 235 static u_int32_t sk_win_read_4(struct sk_softc *, int); 236 static u_int16_t sk_win_read_2(struct sk_softc *, int); 237 static u_int8_t sk_win_read_1(struct sk_softc *, int); 238 static void sk_win_write_4(struct sk_softc *, int, u_int32_t); 239 static void sk_win_write_2(struct sk_softc *, int, u_int32_t); 240 static void sk_win_write_1(struct sk_softc *, int, u_int32_t); 241 242 static int sk_miibus_readreg(device_t, int, int); 243 static int sk_miibus_writereg(device_t, int, int, int); 244 static void sk_miibus_statchg(device_t); 245 246 static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int); 247 static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int, 248 int); 249 static void sk_xmac_miibus_statchg(struct sk_if_softc *); 250 251 static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int); 252 static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int, 253 int); 254 static void sk_marv_miibus_statchg(struct sk_if_softc *); 255 256 static uint32_t sk_xmchash(const uint8_t *); 257 static void sk_setfilt(struct sk_if_softc *, u_int16_t *, int); 258 static void sk_rxfilter(struct sk_if_softc *); 259 static void sk_rxfilter_genesis(struct sk_if_softc *); 260 static void sk_rxfilter_yukon(struct sk_if_softc *); 261 262 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high); 263 static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS); 264 265 /* Tunables. */ 266 static int jumbo_disable = 0; 267 TUNABLE_INT("hw.skc.jumbo_disable", &jumbo_disable); 268 269 /* 270 * It seems that SK-NET GENESIS supports very simple checksum offload 271 * capability for Tx and I believe it can generate 0 checksum value for 272 * UDP packets in Tx as the hardware can't differenciate UDP packets from 273 * TCP packets. 0 chcecksum value for UDP packet is an invalid one as it 274 * means sender didn't perforam checksum computation. For the safety I 275 * disabled UDP checksum offload capability at the moment. Alternatively 276 * we can intrduce a LINK0/LINK1 flag as hme(4) did in its Tx checksum 277 * offload routine. 278 */ 279 #define SK_CSUM_FEATURES (CSUM_TCP) 280 281 /* 282 * Note that we have newbus methods for both the GEnesis controller 283 * itself and the XMAC(s). The XMACs are children of the GEnesis, and 284 * the miibus code is a child of the XMACs. We need to do it this way 285 * so that the miibus drivers can access the PHY registers on the 286 * right PHY. It's not quite what I had in mind, but it's the only 287 * design that achieves the desired effect. 288 */ 289 static device_method_t skc_methods[] = { 290 /* Device interface */ 291 DEVMETHOD(device_probe, skc_probe), 292 DEVMETHOD(device_attach, skc_attach), 293 DEVMETHOD(device_detach, skc_detach), 294 DEVMETHOD(device_suspend, skc_suspend), 295 DEVMETHOD(device_resume, skc_resume), 296 DEVMETHOD(device_shutdown, skc_shutdown), 297 298 DEVMETHOD(bus_get_dma_tag, skc_get_dma_tag), 299 300 DEVMETHOD_END 301 }; 302 303 static driver_t skc_driver = { 304 "skc", 305 skc_methods, 306 sizeof(struct sk_softc) 307 }; 308 309 static devclass_t skc_devclass; 310 311 static device_method_t sk_methods[] = { 312 /* Device interface */ 313 DEVMETHOD(device_probe, sk_probe), 314 DEVMETHOD(device_attach, sk_attach), 315 DEVMETHOD(device_detach, sk_detach), 316 DEVMETHOD(device_shutdown, bus_generic_shutdown), 317 318 /* MII interface */ 319 DEVMETHOD(miibus_readreg, sk_miibus_readreg), 320 DEVMETHOD(miibus_writereg, sk_miibus_writereg), 321 DEVMETHOD(miibus_statchg, sk_miibus_statchg), 322 323 DEVMETHOD_END 324 }; 325 326 static driver_t sk_driver = { 327 "sk", 328 sk_methods, 329 sizeof(struct sk_if_softc) 330 }; 331 332 static devclass_t sk_devclass; 333 334 DRIVER_MODULE(skc, pci, skc_driver, skc_devclass, NULL, NULL); 335 DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, NULL, NULL); 336 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, NULL, NULL); 337 338 static struct resource_spec sk_res_spec_io[] = { 339 { SYS_RES_IOPORT, PCIR_BAR(1), RF_ACTIVE }, 340 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 341 { -1, 0, 0 } 342 }; 343 344 static struct resource_spec sk_res_spec_mem[] = { 345 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, 346 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 347 { -1, 0, 0 } 348 }; 349 350 #define SK_SETBIT(sc, reg, x) \ 351 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x) 352 353 #define SK_CLRBIT(sc, reg, x) \ 354 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x) 355 356 #define SK_WIN_SETBIT_4(sc, reg, x) \ 357 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x) 358 359 #define SK_WIN_CLRBIT_4(sc, reg, x) \ 360 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x) 361 362 #define SK_WIN_SETBIT_2(sc, reg, x) \ 363 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x) 364 365 #define SK_WIN_CLRBIT_2(sc, reg, x) \ 366 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x) 367 368 static u_int32_t 369 sk_win_read_4(sc, reg) 370 struct sk_softc *sc; 371 int reg; 372 { 373 #ifdef SK_USEIOSPACE 374 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 375 return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg))); 376 #else 377 return(CSR_READ_4(sc, reg)); 378 #endif 379 } 380 381 static u_int16_t 382 sk_win_read_2(sc, reg) 383 struct sk_softc *sc; 384 int reg; 385 { 386 #ifdef SK_USEIOSPACE 387 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 388 return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg))); 389 #else 390 return(CSR_READ_2(sc, reg)); 391 #endif 392 } 393 394 static u_int8_t 395 sk_win_read_1(sc, reg) 396 struct sk_softc *sc; 397 int reg; 398 { 399 #ifdef SK_USEIOSPACE 400 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 401 return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg))); 402 #else 403 return(CSR_READ_1(sc, reg)); 404 #endif 405 } 406 407 static void 408 sk_win_write_4(sc, reg, val) 409 struct sk_softc *sc; 410 int reg; 411 u_int32_t val; 412 { 413 #ifdef SK_USEIOSPACE 414 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 415 CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val); 416 #else 417 CSR_WRITE_4(sc, reg, val); 418 #endif 419 return; 420 } 421 422 static void 423 sk_win_write_2(sc, reg, val) 424 struct sk_softc *sc; 425 int reg; 426 u_int32_t val; 427 { 428 #ifdef SK_USEIOSPACE 429 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 430 CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val); 431 #else 432 CSR_WRITE_2(sc, reg, val); 433 #endif 434 return; 435 } 436 437 static void 438 sk_win_write_1(sc, reg, val) 439 struct sk_softc *sc; 440 int reg; 441 u_int32_t val; 442 { 443 #ifdef SK_USEIOSPACE 444 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 445 CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val); 446 #else 447 CSR_WRITE_1(sc, reg, val); 448 #endif 449 return; 450 } 451 452 static int 453 sk_miibus_readreg(dev, phy, reg) 454 device_t dev; 455 int phy, reg; 456 { 457 struct sk_if_softc *sc_if; 458 int v; 459 460 sc_if = device_get_softc(dev); 461 462 SK_IF_MII_LOCK(sc_if); 463 switch(sc_if->sk_softc->sk_type) { 464 case SK_GENESIS: 465 v = sk_xmac_miibus_readreg(sc_if, phy, reg); 466 break; 467 case SK_YUKON: 468 case SK_YUKON_LITE: 469 case SK_YUKON_LP: 470 v = sk_marv_miibus_readreg(sc_if, phy, reg); 471 break; 472 default: 473 v = 0; 474 break; 475 } 476 SK_IF_MII_UNLOCK(sc_if); 477 478 return (v); 479 } 480 481 static int 482 sk_miibus_writereg(dev, phy, reg, val) 483 device_t dev; 484 int phy, reg, val; 485 { 486 struct sk_if_softc *sc_if; 487 int v; 488 489 sc_if = device_get_softc(dev); 490 491 SK_IF_MII_LOCK(sc_if); 492 switch(sc_if->sk_softc->sk_type) { 493 case SK_GENESIS: 494 v = sk_xmac_miibus_writereg(sc_if, phy, reg, val); 495 break; 496 case SK_YUKON: 497 case SK_YUKON_LITE: 498 case SK_YUKON_LP: 499 v = sk_marv_miibus_writereg(sc_if, phy, reg, val); 500 break; 501 default: 502 v = 0; 503 break; 504 } 505 SK_IF_MII_UNLOCK(sc_if); 506 507 return (v); 508 } 509 510 static void 511 sk_miibus_statchg(dev) 512 device_t dev; 513 { 514 struct sk_if_softc *sc_if; 515 516 sc_if = device_get_softc(dev); 517 518 SK_IF_MII_LOCK(sc_if); 519 switch(sc_if->sk_softc->sk_type) { 520 case SK_GENESIS: 521 sk_xmac_miibus_statchg(sc_if); 522 break; 523 case SK_YUKON: 524 case SK_YUKON_LITE: 525 case SK_YUKON_LP: 526 sk_marv_miibus_statchg(sc_if); 527 break; 528 } 529 SK_IF_MII_UNLOCK(sc_if); 530 531 return; 532 } 533 534 static int 535 sk_xmac_miibus_readreg(sc_if, phy, reg) 536 struct sk_if_softc *sc_if; 537 int phy, reg; 538 { 539 int i; 540 541 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); 542 SK_XM_READ_2(sc_if, XM_PHY_DATA); 543 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { 544 for (i = 0; i < SK_TIMEOUT; i++) { 545 DELAY(1); 546 if (SK_XM_READ_2(sc_if, XM_MMUCMD) & 547 XM_MMUCMD_PHYDATARDY) 548 break; 549 } 550 551 if (i == SK_TIMEOUT) { 552 if_printf(sc_if->sk_ifp, "phy failed to come ready\n"); 553 return(0); 554 } 555 } 556 DELAY(1); 557 i = SK_XM_READ_2(sc_if, XM_PHY_DATA); 558 559 return(i); 560 } 561 562 static int 563 sk_xmac_miibus_writereg(sc_if, phy, reg, val) 564 struct sk_if_softc *sc_if; 565 int phy, reg, val; 566 { 567 int i; 568 569 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); 570 for (i = 0; i < SK_TIMEOUT; i++) { 571 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) 572 break; 573 } 574 575 if (i == SK_TIMEOUT) { 576 if_printf(sc_if->sk_ifp, "phy failed to come ready\n"); 577 return (ETIMEDOUT); 578 } 579 580 SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val); 581 for (i = 0; i < SK_TIMEOUT; i++) { 582 DELAY(1); 583 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) 584 break; 585 } 586 if (i == SK_TIMEOUT) 587 if_printf(sc_if->sk_ifp, "phy write timed out\n"); 588 589 return(0); 590 } 591 592 static void 593 sk_xmac_miibus_statchg(sc_if) 594 struct sk_if_softc *sc_if; 595 { 596 struct mii_data *mii; 597 598 mii = device_get_softc(sc_if->sk_miibus); 599 600 /* 601 * If this is a GMII PHY, manually set the XMAC's 602 * duplex mode accordingly. 603 */ 604 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { 605 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 606 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); 607 } else { 608 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); 609 } 610 } 611 } 612 613 static int 614 sk_marv_miibus_readreg(sc_if, phy, reg) 615 struct sk_if_softc *sc_if; 616 int phy, reg; 617 { 618 u_int16_t val; 619 int i; 620 621 if (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER && 622 sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER) { 623 return(0); 624 } 625 626 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 627 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ); 628 629 for (i = 0; i < SK_TIMEOUT; i++) { 630 DELAY(1); 631 val = SK_YU_READ_2(sc_if, YUKON_SMICR); 632 if (val & YU_SMICR_READ_VALID) 633 break; 634 } 635 636 if (i == SK_TIMEOUT) { 637 if_printf(sc_if->sk_ifp, "phy failed to come ready\n"); 638 return(0); 639 } 640 641 val = SK_YU_READ_2(sc_if, YUKON_SMIDR); 642 643 return(val); 644 } 645 646 static int 647 sk_marv_miibus_writereg(sc_if, phy, reg, val) 648 struct sk_if_softc *sc_if; 649 int phy, reg, val; 650 { 651 int i; 652 653 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val); 654 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 655 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE); 656 657 for (i = 0; i < SK_TIMEOUT; i++) { 658 DELAY(1); 659 if ((SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY) == 0) 660 break; 661 } 662 if (i == SK_TIMEOUT) 663 if_printf(sc_if->sk_ifp, "phy write timeout\n"); 664 665 return(0); 666 } 667 668 static void 669 sk_marv_miibus_statchg(sc_if) 670 struct sk_if_softc *sc_if; 671 { 672 return; 673 } 674 675 #define HASH_BITS 6 676 677 static u_int32_t 678 sk_xmchash(addr) 679 const uint8_t *addr; 680 { 681 uint32_t crc; 682 683 /* Compute CRC for the address value. */ 684 crc = ether_crc32_le(addr, ETHER_ADDR_LEN); 685 686 return (~crc & ((1 << HASH_BITS) - 1)); 687 } 688 689 static void 690 sk_setfilt(sc_if, addr, slot) 691 struct sk_if_softc *sc_if; 692 u_int16_t *addr; 693 int slot; 694 { 695 int base; 696 697 base = XM_RXFILT_ENTRY(slot); 698 699 SK_XM_WRITE_2(sc_if, base, addr[0]); 700 SK_XM_WRITE_2(sc_if, base + 2, addr[1]); 701 SK_XM_WRITE_2(sc_if, base + 4, addr[2]); 702 703 return; 704 } 705 706 static void 707 sk_rxfilter(sc_if) 708 struct sk_if_softc *sc_if; 709 { 710 struct sk_softc *sc; 711 712 SK_IF_LOCK_ASSERT(sc_if); 713 714 sc = sc_if->sk_softc; 715 if (sc->sk_type == SK_GENESIS) 716 sk_rxfilter_genesis(sc_if); 717 else 718 sk_rxfilter_yukon(sc_if); 719 } 720 721 static void 722 sk_rxfilter_genesis(sc_if) 723 struct sk_if_softc *sc_if; 724 { 725 struct ifnet *ifp = sc_if->sk_ifp; 726 u_int32_t hashes[2] = { 0, 0 }, mode; 727 int h = 0, i; 728 struct ifmultiaddr *ifma; 729 u_int16_t dummy[] = { 0, 0, 0 }; 730 u_int16_t maddr[(ETHER_ADDR_LEN+1)/2]; 731 732 SK_IF_LOCK_ASSERT(sc_if); 733 734 mode = SK_XM_READ_4(sc_if, XM_MODE); 735 mode &= ~(XM_MODE_RX_PROMISC | XM_MODE_RX_USE_HASH | 736 XM_MODE_RX_USE_PERFECT); 737 /* First, zot all the existing perfect filters. */ 738 for (i = 1; i < XM_RXFILT_MAX; i++) 739 sk_setfilt(sc_if, dummy, i); 740 741 /* Now program new ones. */ 742 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 743 if (ifp->if_flags & IFF_ALLMULTI) 744 mode |= XM_MODE_RX_USE_HASH; 745 if (ifp->if_flags & IFF_PROMISC) 746 mode |= XM_MODE_RX_PROMISC; 747 hashes[0] = 0xFFFFFFFF; 748 hashes[1] = 0xFFFFFFFF; 749 } else { 750 i = 1; 751 if_maddr_rlock(ifp); 752 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, 753 ifma_link) { 754 if (ifma->ifma_addr->sa_family != AF_LINK) 755 continue; 756 /* 757 * Program the first XM_RXFILT_MAX multicast groups 758 * into the perfect filter. 759 */ 760 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 761 maddr, ETHER_ADDR_LEN); 762 if (i < XM_RXFILT_MAX) { 763 sk_setfilt(sc_if, maddr, i); 764 mode |= XM_MODE_RX_USE_PERFECT; 765 i++; 766 continue; 767 } 768 h = sk_xmchash((const uint8_t *)maddr); 769 if (h < 32) 770 hashes[0] |= (1 << h); 771 else 772 hashes[1] |= (1 << (h - 32)); 773 mode |= XM_MODE_RX_USE_HASH; 774 } 775 if_maddr_runlock(ifp); 776 } 777 778 SK_XM_WRITE_4(sc_if, XM_MODE, mode); 779 SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]); 780 SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]); 781 } 782 783 static void 784 sk_rxfilter_yukon(sc_if) 785 struct sk_if_softc *sc_if; 786 { 787 struct ifnet *ifp; 788 u_int32_t crc, hashes[2] = { 0, 0 }, mode; 789 struct ifmultiaddr *ifma; 790 791 SK_IF_LOCK_ASSERT(sc_if); 792 793 ifp = sc_if->sk_ifp; 794 mode = SK_YU_READ_2(sc_if, YUKON_RCR); 795 if (ifp->if_flags & IFF_PROMISC) 796 mode &= ~(YU_RCR_UFLEN | YU_RCR_MUFLEN); 797 else if (ifp->if_flags & IFF_ALLMULTI) { 798 mode |= YU_RCR_UFLEN | YU_RCR_MUFLEN; 799 hashes[0] = 0xFFFFFFFF; 800 hashes[1] = 0xFFFFFFFF; 801 } else { 802 mode |= YU_RCR_UFLEN; 803 if_maddr_rlock(ifp); 804 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 805 if (ifma->ifma_addr->sa_family != AF_LINK) 806 continue; 807 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 808 ifma->ifma_addr), ETHER_ADDR_LEN); 809 /* Just want the 6 least significant bits. */ 810 crc &= 0x3f; 811 /* Set the corresponding bit in the hash table. */ 812 hashes[crc >> 5] |= 1 << (crc & 0x1f); 813 } 814 if_maddr_runlock(ifp); 815 if (hashes[0] != 0 || hashes[1] != 0) 816 mode |= YU_RCR_MUFLEN; 817 } 818 819 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff); 820 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff); 821 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff); 822 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff); 823 SK_YU_WRITE_2(sc_if, YUKON_RCR, mode); 824 } 825 826 static int 827 sk_init_rx_ring(sc_if) 828 struct sk_if_softc *sc_if; 829 { 830 struct sk_ring_data *rd; 831 bus_addr_t addr; 832 u_int32_t csum_start; 833 int i; 834 835 sc_if->sk_cdata.sk_rx_cons = 0; 836 837 csum_start = (ETHER_HDR_LEN + sizeof(struct ip)) << 16 | 838 ETHER_HDR_LEN; 839 rd = &sc_if->sk_rdata; 840 bzero(rd->sk_rx_ring, sizeof(struct sk_rx_desc) * SK_RX_RING_CNT); 841 for (i = 0; i < SK_RX_RING_CNT; i++) { 842 if (sk_newbuf(sc_if, i) != 0) 843 return (ENOBUFS); 844 if (i == (SK_RX_RING_CNT - 1)) 845 addr = SK_RX_RING_ADDR(sc_if, 0); 846 else 847 addr = SK_RX_RING_ADDR(sc_if, i + 1); 848 rd->sk_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr)); 849 rd->sk_rx_ring[i].sk_csum_start = htole32(csum_start); 850 } 851 852 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag, 853 sc_if->sk_cdata.sk_rx_ring_map, 854 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 855 856 return(0); 857 } 858 859 static int 860 sk_init_jumbo_rx_ring(sc_if) 861 struct sk_if_softc *sc_if; 862 { 863 struct sk_ring_data *rd; 864 bus_addr_t addr; 865 u_int32_t csum_start; 866 int i; 867 868 sc_if->sk_cdata.sk_jumbo_rx_cons = 0; 869 870 csum_start = ((ETHER_HDR_LEN + sizeof(struct ip)) << 16) | 871 ETHER_HDR_LEN; 872 rd = &sc_if->sk_rdata; 873 bzero(rd->sk_jumbo_rx_ring, 874 sizeof(struct sk_rx_desc) * SK_JUMBO_RX_RING_CNT); 875 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) { 876 if (sk_jumbo_newbuf(sc_if, i) != 0) 877 return (ENOBUFS); 878 if (i == (SK_JUMBO_RX_RING_CNT - 1)) 879 addr = SK_JUMBO_RX_RING_ADDR(sc_if, 0); 880 else 881 addr = SK_JUMBO_RX_RING_ADDR(sc_if, i + 1); 882 rd->sk_jumbo_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr)); 883 rd->sk_jumbo_rx_ring[i].sk_csum_start = htole32(csum_start); 884 } 885 886 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, 887 sc_if->sk_cdata.sk_jumbo_rx_ring_map, 888 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 889 890 return (0); 891 } 892 893 static void 894 sk_init_tx_ring(sc_if) 895 struct sk_if_softc *sc_if; 896 { 897 struct sk_ring_data *rd; 898 struct sk_txdesc *txd; 899 bus_addr_t addr; 900 int i; 901 902 STAILQ_INIT(&sc_if->sk_cdata.sk_txfreeq); 903 STAILQ_INIT(&sc_if->sk_cdata.sk_txbusyq); 904 905 sc_if->sk_cdata.sk_tx_prod = 0; 906 sc_if->sk_cdata.sk_tx_cons = 0; 907 sc_if->sk_cdata.sk_tx_cnt = 0; 908 909 rd = &sc_if->sk_rdata; 910 bzero(rd->sk_tx_ring, sizeof(struct sk_tx_desc) * SK_TX_RING_CNT); 911 for (i = 0; i < SK_TX_RING_CNT; i++) { 912 if (i == (SK_TX_RING_CNT - 1)) 913 addr = SK_TX_RING_ADDR(sc_if, 0); 914 else 915 addr = SK_TX_RING_ADDR(sc_if, i + 1); 916 rd->sk_tx_ring[i].sk_next = htole32(SK_ADDR_LO(addr)); 917 txd = &sc_if->sk_cdata.sk_txdesc[i]; 918 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q); 919 } 920 921 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag, 922 sc_if->sk_cdata.sk_tx_ring_map, 923 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 924 } 925 926 static __inline void 927 sk_discard_rxbuf(sc_if, idx) 928 struct sk_if_softc *sc_if; 929 int idx; 930 { 931 struct sk_rx_desc *r; 932 struct sk_rxdesc *rxd; 933 struct mbuf *m; 934 935 936 r = &sc_if->sk_rdata.sk_rx_ring[idx]; 937 rxd = &sc_if->sk_cdata.sk_rxdesc[idx]; 938 m = rxd->rx_m; 939 r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM); 940 } 941 942 static __inline void 943 sk_discard_jumbo_rxbuf(sc_if, idx) 944 struct sk_if_softc *sc_if; 945 int idx; 946 { 947 struct sk_rx_desc *r; 948 struct sk_rxdesc *rxd; 949 struct mbuf *m; 950 951 r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx]; 952 rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx]; 953 m = rxd->rx_m; 954 r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM); 955 } 956 957 static int 958 sk_newbuf(sc_if, idx) 959 struct sk_if_softc *sc_if; 960 int idx; 961 { 962 struct sk_rx_desc *r; 963 struct sk_rxdesc *rxd; 964 struct mbuf *m; 965 bus_dma_segment_t segs[1]; 966 bus_dmamap_t map; 967 int nsegs; 968 969 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 970 if (m == NULL) 971 return (ENOBUFS); 972 m->m_len = m->m_pkthdr.len = MCLBYTES; 973 m_adj(m, ETHER_ALIGN); 974 975 if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_rx_tag, 976 sc_if->sk_cdata.sk_rx_sparemap, m, segs, &nsegs, 0) != 0) { 977 m_freem(m); 978 return (ENOBUFS); 979 } 980 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 981 982 rxd = &sc_if->sk_cdata.sk_rxdesc[idx]; 983 if (rxd->rx_m != NULL) { 984 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap, 985 BUS_DMASYNC_POSTREAD); 986 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap); 987 } 988 map = rxd->rx_dmamap; 989 rxd->rx_dmamap = sc_if->sk_cdata.sk_rx_sparemap; 990 sc_if->sk_cdata.sk_rx_sparemap = map; 991 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap, 992 BUS_DMASYNC_PREREAD); 993 rxd->rx_m = m; 994 r = &sc_if->sk_rdata.sk_rx_ring[idx]; 995 r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr)); 996 r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr)); 997 r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM); 998 999 return (0); 1000 } 1001 1002 static int 1003 sk_jumbo_newbuf(sc_if, idx) 1004 struct sk_if_softc *sc_if; 1005 int idx; 1006 { 1007 struct sk_rx_desc *r; 1008 struct sk_rxdesc *rxd; 1009 struct mbuf *m; 1010 bus_dma_segment_t segs[1]; 1011 bus_dmamap_t map; 1012 int nsegs; 1013 1014 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); 1015 if (m == NULL) 1016 return (ENOBUFS); 1017 m->m_pkthdr.len = m->m_len = MJUM9BYTES; 1018 /* 1019 * Adjust alignment so packet payload begins on a 1020 * longword boundary. Mandatory for Alpha, useful on 1021 * x86 too. 1022 */ 1023 m_adj(m, ETHER_ALIGN); 1024 1025 if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_jumbo_rx_tag, 1026 sc_if->sk_cdata.sk_jumbo_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1027 m_freem(m); 1028 return (ENOBUFS); 1029 } 1030 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1031 1032 rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx]; 1033 if (rxd->rx_m != NULL) { 1034 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap, 1035 BUS_DMASYNC_POSTREAD); 1036 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag, 1037 rxd->rx_dmamap); 1038 } 1039 map = rxd->rx_dmamap; 1040 rxd->rx_dmamap = sc_if->sk_cdata.sk_jumbo_rx_sparemap; 1041 sc_if->sk_cdata.sk_jumbo_rx_sparemap = map; 1042 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap, 1043 BUS_DMASYNC_PREREAD); 1044 rxd->rx_m = m; 1045 r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx]; 1046 r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr)); 1047 r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr)); 1048 r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM); 1049 1050 return (0); 1051 } 1052 1053 /* 1054 * Set media options. 1055 */ 1056 static int 1057 sk_ifmedia_upd(ifp) 1058 struct ifnet *ifp; 1059 { 1060 struct sk_if_softc *sc_if = ifp->if_softc; 1061 struct mii_data *mii; 1062 1063 mii = device_get_softc(sc_if->sk_miibus); 1064 sk_init(sc_if); 1065 mii_mediachg(mii); 1066 1067 return(0); 1068 } 1069 1070 /* 1071 * Report current media status. 1072 */ 1073 static void 1074 sk_ifmedia_sts(ifp, ifmr) 1075 struct ifnet *ifp; 1076 struct ifmediareq *ifmr; 1077 { 1078 struct sk_if_softc *sc_if; 1079 struct mii_data *mii; 1080 1081 sc_if = ifp->if_softc; 1082 mii = device_get_softc(sc_if->sk_miibus); 1083 1084 mii_pollstat(mii); 1085 ifmr->ifm_active = mii->mii_media_active; 1086 ifmr->ifm_status = mii->mii_media_status; 1087 1088 return; 1089 } 1090 1091 static int 1092 sk_ioctl(ifp, command, data) 1093 struct ifnet *ifp; 1094 u_long command; 1095 caddr_t data; 1096 { 1097 struct sk_if_softc *sc_if = ifp->if_softc; 1098 struct ifreq *ifr = (struct ifreq *) data; 1099 int error, mask; 1100 struct mii_data *mii; 1101 1102 error = 0; 1103 switch(command) { 1104 case SIOCSIFMTU: 1105 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > SK_JUMBO_MTU) 1106 error = EINVAL; 1107 else if (ifp->if_mtu != ifr->ifr_mtu) { 1108 if (sc_if->sk_jumbo_disable != 0 && 1109 ifr->ifr_mtu > SK_MAX_FRAMELEN) 1110 error = EINVAL; 1111 else { 1112 SK_IF_LOCK(sc_if); 1113 ifp->if_mtu = ifr->ifr_mtu; 1114 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1115 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1116 sk_init_locked(sc_if); 1117 } 1118 SK_IF_UNLOCK(sc_if); 1119 } 1120 } 1121 break; 1122 case SIOCSIFFLAGS: 1123 SK_IF_LOCK(sc_if); 1124 if (ifp->if_flags & IFF_UP) { 1125 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1126 if ((ifp->if_flags ^ sc_if->sk_if_flags) 1127 & (IFF_PROMISC | IFF_ALLMULTI)) 1128 sk_rxfilter(sc_if); 1129 } else 1130 sk_init_locked(sc_if); 1131 } else { 1132 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1133 sk_stop(sc_if); 1134 } 1135 sc_if->sk_if_flags = ifp->if_flags; 1136 SK_IF_UNLOCK(sc_if); 1137 break; 1138 case SIOCADDMULTI: 1139 case SIOCDELMULTI: 1140 SK_IF_LOCK(sc_if); 1141 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1142 sk_rxfilter(sc_if); 1143 SK_IF_UNLOCK(sc_if); 1144 break; 1145 case SIOCGIFMEDIA: 1146 case SIOCSIFMEDIA: 1147 mii = device_get_softc(sc_if->sk_miibus); 1148 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1149 break; 1150 case SIOCSIFCAP: 1151 SK_IF_LOCK(sc_if); 1152 if (sc_if->sk_softc->sk_type == SK_GENESIS) { 1153 SK_IF_UNLOCK(sc_if); 1154 break; 1155 } 1156 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1157 if ((mask & IFCAP_TXCSUM) != 0 && 1158 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) { 1159 ifp->if_capenable ^= IFCAP_TXCSUM; 1160 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 1161 ifp->if_hwassist |= SK_CSUM_FEATURES; 1162 else 1163 ifp->if_hwassist &= ~SK_CSUM_FEATURES; 1164 } 1165 if ((mask & IFCAP_RXCSUM) != 0 && 1166 (IFCAP_RXCSUM & ifp->if_capabilities) != 0) 1167 ifp->if_capenable ^= IFCAP_RXCSUM; 1168 SK_IF_UNLOCK(sc_if); 1169 break; 1170 default: 1171 error = ether_ioctl(ifp, command, data); 1172 break; 1173 } 1174 1175 return (error); 1176 } 1177 1178 /* 1179 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device 1180 * IDs against our list and return a device name if we find a match. 1181 */ 1182 static int 1183 skc_probe(dev) 1184 device_t dev; 1185 { 1186 const struct sk_type *t = sk_devs; 1187 1188 while(t->sk_name != NULL) { 1189 if ((pci_get_vendor(dev) == t->sk_vid) && 1190 (pci_get_device(dev) == t->sk_did)) { 1191 /* 1192 * Only attach to rev. 2 of the Linksys EG1032 adapter. 1193 * Rev. 3 is supported by re(4). 1194 */ 1195 if ((t->sk_vid == VENDORID_LINKSYS) && 1196 (t->sk_did == DEVICEID_LINKSYS_EG1032) && 1197 (pci_get_subdevice(dev) != 1198 SUBDEVICEID_LINKSYS_EG1032_REV2)) { 1199 t++; 1200 continue; 1201 } 1202 device_set_desc(dev, t->sk_name); 1203 return (BUS_PROBE_DEFAULT); 1204 } 1205 t++; 1206 } 1207 1208 return(ENXIO); 1209 } 1210 1211 /* 1212 * Force the GEnesis into reset, then bring it out of reset. 1213 */ 1214 static void 1215 sk_reset(sc) 1216 struct sk_softc *sc; 1217 { 1218 1219 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET); 1220 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET); 1221 if (SK_YUKON_FAMILY(sc->sk_type)) 1222 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET); 1223 1224 DELAY(1000); 1225 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET); 1226 DELAY(2); 1227 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET); 1228 if (SK_YUKON_FAMILY(sc->sk_type)) 1229 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR); 1230 1231 if (sc->sk_type == SK_GENESIS) { 1232 /* Configure packet arbiter */ 1233 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET); 1234 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT); 1235 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT); 1236 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT); 1237 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT); 1238 } 1239 1240 /* Enable RAM interface */ 1241 sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET); 1242 1243 /* 1244 * Configure interrupt moderation. The moderation timer 1245 * defers interrupts specified in the interrupt moderation 1246 * timer mask based on the timeout specified in the interrupt 1247 * moderation timer init register. Each bit in the timer 1248 * register represents one tick, so to specify a timeout in 1249 * microseconds, we have to multiply by the correct number of 1250 * ticks-per-microsecond. 1251 */ 1252 switch (sc->sk_type) { 1253 case SK_GENESIS: 1254 sc->sk_int_ticks = SK_IMTIMER_TICKS_GENESIS; 1255 break; 1256 default: 1257 sc->sk_int_ticks = SK_IMTIMER_TICKS_YUKON; 1258 break; 1259 } 1260 if (bootverbose) 1261 device_printf(sc->sk_dev, "interrupt moderation is %d us\n", 1262 sc->sk_int_mod); 1263 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod, 1264 sc->sk_int_ticks)); 1265 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF| 1266 SK_ISR_RX1_EOF|SK_ISR_RX2_EOF); 1267 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START); 1268 1269 return; 1270 } 1271 1272 static int 1273 sk_probe(dev) 1274 device_t dev; 1275 { 1276 struct sk_softc *sc; 1277 1278 sc = device_get_softc(device_get_parent(dev)); 1279 1280 /* 1281 * Not much to do here. We always know there will be 1282 * at least one XMAC present, and if there are two, 1283 * skc_attach() will create a second device instance 1284 * for us. 1285 */ 1286 switch (sc->sk_type) { 1287 case SK_GENESIS: 1288 device_set_desc(dev, "XaQti Corp. XMAC II"); 1289 break; 1290 case SK_YUKON: 1291 case SK_YUKON_LITE: 1292 case SK_YUKON_LP: 1293 device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon"); 1294 break; 1295 } 1296 1297 return (BUS_PROBE_DEFAULT); 1298 } 1299 1300 /* 1301 * Each XMAC chip is attached as a separate logical IP interface. 1302 * Single port cards will have only one logical interface of course. 1303 */ 1304 static int 1305 sk_attach(dev) 1306 device_t dev; 1307 { 1308 struct sk_softc *sc; 1309 struct sk_if_softc *sc_if; 1310 struct ifnet *ifp; 1311 u_int32_t r; 1312 int error, i, phy, port; 1313 u_char eaddr[6]; 1314 u_char inv_mac[] = {0, 0, 0, 0, 0, 0}; 1315 1316 if (dev == NULL) 1317 return(EINVAL); 1318 1319 error = 0; 1320 sc_if = device_get_softc(dev); 1321 sc = device_get_softc(device_get_parent(dev)); 1322 port = *(int *)device_get_ivars(dev); 1323 1324 sc_if->sk_if_dev = dev; 1325 sc_if->sk_port = port; 1326 sc_if->sk_softc = sc; 1327 sc->sk_if[port] = sc_if; 1328 if (port == SK_PORT_A) 1329 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0; 1330 if (port == SK_PORT_B) 1331 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1; 1332 1333 callout_init_mtx(&sc_if->sk_tick_ch, &sc_if->sk_softc->sk_mtx, 0); 1334 callout_init_mtx(&sc_if->sk_watchdog_ch, &sc_if->sk_softc->sk_mtx, 0); 1335 1336 if (sk_dma_alloc(sc_if) != 0) { 1337 error = ENOMEM; 1338 goto fail; 1339 } 1340 sk_dma_jumbo_alloc(sc_if); 1341 1342 ifp = sc_if->sk_ifp = if_alloc(IFT_ETHER); 1343 if (ifp == NULL) { 1344 device_printf(sc_if->sk_if_dev, "can not if_alloc()\n"); 1345 error = ENOSPC; 1346 goto fail; 1347 } 1348 ifp->if_softc = sc_if; 1349 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1350 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1351 /* 1352 * SK_GENESIS has a bug in checksum offload - From linux. 1353 */ 1354 if (sc_if->sk_softc->sk_type != SK_GENESIS) { 1355 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_RXCSUM; 1356 ifp->if_hwassist = 0; 1357 } else { 1358 ifp->if_capabilities = 0; 1359 ifp->if_hwassist = 0; 1360 } 1361 ifp->if_capenable = ifp->if_capabilities; 1362 /* 1363 * Some revision of Yukon controller generates corrupted 1364 * frame when TX checksum offloading is enabled. The 1365 * frame has a valid checksum value so payload might be 1366 * modified during TX checksum calculation. Disable TX 1367 * checksum offloading but give users chance to enable it 1368 * when they know their controller works without problems 1369 * with TX checksum offloading. 1370 */ 1371 ifp->if_capenable &= ~IFCAP_TXCSUM; 1372 ifp->if_ioctl = sk_ioctl; 1373 ifp->if_start = sk_start; 1374 ifp->if_init = sk_init; 1375 IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1); 1376 ifp->if_snd.ifq_drv_maxlen = SK_TX_RING_CNT - 1; 1377 IFQ_SET_READY(&ifp->if_snd); 1378 1379 /* 1380 * Get station address for this interface. Note that 1381 * dual port cards actually come with three station 1382 * addresses: one for each port, plus an extra. The 1383 * extra one is used by the SysKonnect driver software 1384 * as a 'virtual' station address for when both ports 1385 * are operating in failover mode. Currently we don't 1386 * use this extra address. 1387 */ 1388 SK_IF_LOCK(sc_if); 1389 for (i = 0; i < ETHER_ADDR_LEN; i++) 1390 eaddr[i] = 1391 sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i); 1392 1393 /* Verify whether the station address is invalid or not. */ 1394 if (bcmp(eaddr, inv_mac, sizeof(inv_mac)) == 0) { 1395 device_printf(sc_if->sk_if_dev, 1396 "Generating random ethernet address\n"); 1397 r = arc4random(); 1398 /* 1399 * Set OUI to convenient locally assigned address. 'b' 1400 * is 0x62, which has the locally assigned bit set, and 1401 * the broadcast/multicast bit clear. 1402 */ 1403 eaddr[0] = 'b'; 1404 eaddr[1] = 's'; 1405 eaddr[2] = 'd'; 1406 eaddr[3] = (r >> 16) & 0xff; 1407 eaddr[4] = (r >> 8) & 0xff; 1408 eaddr[5] = (r >> 0) & 0xff; 1409 } 1410 /* 1411 * Set up RAM buffer addresses. The NIC will have a certain 1412 * amount of SRAM on it, somewhere between 512K and 2MB. We 1413 * need to divide this up a) between the transmitter and 1414 * receiver and b) between the two XMACs, if this is a 1415 * dual port NIC. Our algotithm is to divide up the memory 1416 * evenly so that everyone gets a fair share. 1417 * 1418 * Just to be contrary, Yukon2 appears to have separate memory 1419 * for each MAC. 1420 */ 1421 if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) { 1422 u_int32_t chunk, val; 1423 1424 chunk = sc->sk_ramsize / 2; 1425 val = sc->sk_rboff / sizeof(u_int64_t); 1426 sc_if->sk_rx_ramstart = val; 1427 val += (chunk / sizeof(u_int64_t)); 1428 sc_if->sk_rx_ramend = val - 1; 1429 sc_if->sk_tx_ramstart = val; 1430 val += (chunk / sizeof(u_int64_t)); 1431 sc_if->sk_tx_ramend = val - 1; 1432 } else { 1433 u_int32_t chunk, val; 1434 1435 chunk = sc->sk_ramsize / 4; 1436 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) / 1437 sizeof(u_int64_t); 1438 sc_if->sk_rx_ramstart = val; 1439 val += (chunk / sizeof(u_int64_t)); 1440 sc_if->sk_rx_ramend = val - 1; 1441 sc_if->sk_tx_ramstart = val; 1442 val += (chunk / sizeof(u_int64_t)); 1443 sc_if->sk_tx_ramend = val - 1; 1444 } 1445 1446 /* Read and save PHY type and set PHY address */ 1447 sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF; 1448 if (!SK_YUKON_FAMILY(sc->sk_type)) { 1449 switch(sc_if->sk_phytype) { 1450 case SK_PHYTYPE_XMAC: 1451 sc_if->sk_phyaddr = SK_PHYADDR_XMAC; 1452 break; 1453 case SK_PHYTYPE_BCOM: 1454 sc_if->sk_phyaddr = SK_PHYADDR_BCOM; 1455 break; 1456 default: 1457 device_printf(sc->sk_dev, "unsupported PHY type: %d\n", 1458 sc_if->sk_phytype); 1459 error = ENODEV; 1460 SK_IF_UNLOCK(sc_if); 1461 goto fail; 1462 } 1463 } else { 1464 if (sc_if->sk_phytype < SK_PHYTYPE_MARV_COPPER && 1465 sc->sk_pmd != 'S') { 1466 /* not initialized, punt */ 1467 sc_if->sk_phytype = SK_PHYTYPE_MARV_COPPER; 1468 sc->sk_coppertype = 1; 1469 } 1470 1471 sc_if->sk_phyaddr = SK_PHYADDR_MARV; 1472 1473 if (!(sc->sk_coppertype)) 1474 sc_if->sk_phytype = SK_PHYTYPE_MARV_FIBER; 1475 } 1476 1477 /* 1478 * Call MI attach routine. Can't hold locks when calling into ether_*. 1479 */ 1480 SK_IF_UNLOCK(sc_if); 1481 ether_ifattach(ifp, eaddr); 1482 SK_IF_LOCK(sc_if); 1483 1484 /* 1485 * The hardware should be ready for VLAN_MTU by default: 1486 * XMAC II has 0x8100 in VLAN Tag Level 1 register initially; 1487 * YU_SMR_MFL_VLAN is set by this driver in Yukon. 1488 * 1489 */ 1490 ifp->if_capabilities |= IFCAP_VLAN_MTU; 1491 ifp->if_capenable |= IFCAP_VLAN_MTU; 1492 /* 1493 * Tell the upper layer(s) we support long frames. 1494 * Must appear after the call to ether_ifattach() because 1495 * ether_ifattach() sets ifi_hdrlen to the default value. 1496 */ 1497 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 1498 1499 /* 1500 * Do miibus setup. 1501 */ 1502 phy = MII_PHY_ANY; 1503 switch (sc->sk_type) { 1504 case SK_GENESIS: 1505 sk_init_xmac(sc_if); 1506 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) 1507 phy = 0; 1508 break; 1509 case SK_YUKON: 1510 case SK_YUKON_LITE: 1511 case SK_YUKON_LP: 1512 sk_init_yukon(sc_if); 1513 phy = 0; 1514 break; 1515 } 1516 1517 SK_IF_UNLOCK(sc_if); 1518 error = mii_attach(dev, &sc_if->sk_miibus, ifp, sk_ifmedia_upd, 1519 sk_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0); 1520 if (error != 0) { 1521 device_printf(sc_if->sk_if_dev, "attaching PHYs failed\n"); 1522 ether_ifdetach(ifp); 1523 goto fail; 1524 } 1525 1526 fail: 1527 if (error) { 1528 /* Access should be ok even though lock has been dropped */ 1529 sc->sk_if[port] = NULL; 1530 sk_detach(dev); 1531 } 1532 1533 return(error); 1534 } 1535 1536 /* 1537 * Attach the interface. Allocate softc structures, do ifmedia 1538 * setup and ethernet/BPF attach. 1539 */ 1540 static int 1541 skc_attach(dev) 1542 device_t dev; 1543 { 1544 struct sk_softc *sc; 1545 int error = 0, *port; 1546 uint8_t skrs; 1547 const char *pname = NULL; 1548 char *revstr; 1549 1550 sc = device_get_softc(dev); 1551 sc->sk_dev = dev; 1552 1553 mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1554 MTX_DEF); 1555 mtx_init(&sc->sk_mii_mtx, "sk_mii_mutex", NULL, MTX_DEF); 1556 /* 1557 * Map control/status registers. 1558 */ 1559 pci_enable_busmaster(dev); 1560 1561 /* Allocate resources */ 1562 #ifdef SK_USEIOSPACE 1563 sc->sk_res_spec = sk_res_spec_io; 1564 #else 1565 sc->sk_res_spec = sk_res_spec_mem; 1566 #endif 1567 error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res); 1568 if (error) { 1569 if (sc->sk_res_spec == sk_res_spec_mem) 1570 sc->sk_res_spec = sk_res_spec_io; 1571 else 1572 sc->sk_res_spec = sk_res_spec_mem; 1573 error = bus_alloc_resources(dev, sc->sk_res_spec, sc->sk_res); 1574 if (error) { 1575 device_printf(dev, "couldn't allocate %s resources\n", 1576 sc->sk_res_spec == sk_res_spec_mem ? "memory" : 1577 "I/O"); 1578 goto fail; 1579 } 1580 } 1581 1582 sc->sk_type = sk_win_read_1(sc, SK_CHIPVER); 1583 sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4) & 0xf; 1584 1585 /* Bail out if chip is not recognized. */ 1586 if (sc->sk_type != SK_GENESIS && !SK_YUKON_FAMILY(sc->sk_type)) { 1587 device_printf(dev, "unknown device: chipver=%02x, rev=%x\n", 1588 sc->sk_type, sc->sk_rev); 1589 error = ENXIO; 1590 goto fail; 1591 } 1592 1593 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 1594 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1595 OID_AUTO, "int_mod", CTLTYPE_INT|CTLFLAG_RW, 1596 &sc->sk_int_mod, 0, sysctl_hw_sk_int_mod, "I", 1597 "SK interrupt moderation"); 1598 1599 /* Pull in device tunables. */ 1600 sc->sk_int_mod = SK_IM_DEFAULT; 1601 error = resource_int_value(device_get_name(dev), device_get_unit(dev), 1602 "int_mod", &sc->sk_int_mod); 1603 if (error == 0) { 1604 if (sc->sk_int_mod < SK_IM_MIN || 1605 sc->sk_int_mod > SK_IM_MAX) { 1606 device_printf(dev, "int_mod value out of range; " 1607 "using default: %d\n", SK_IM_DEFAULT); 1608 sc->sk_int_mod = SK_IM_DEFAULT; 1609 } 1610 } 1611 1612 /* Reset the adapter. */ 1613 sk_reset(sc); 1614 1615 skrs = sk_win_read_1(sc, SK_EPROM0); 1616 if (sc->sk_type == SK_GENESIS) { 1617 /* Read and save RAM size and RAMbuffer offset */ 1618 switch(skrs) { 1619 case SK_RAMSIZE_512K_64: 1620 sc->sk_ramsize = 0x80000; 1621 sc->sk_rboff = SK_RBOFF_0; 1622 break; 1623 case SK_RAMSIZE_1024K_64: 1624 sc->sk_ramsize = 0x100000; 1625 sc->sk_rboff = SK_RBOFF_80000; 1626 break; 1627 case SK_RAMSIZE_1024K_128: 1628 sc->sk_ramsize = 0x100000; 1629 sc->sk_rboff = SK_RBOFF_0; 1630 break; 1631 case SK_RAMSIZE_2048K_128: 1632 sc->sk_ramsize = 0x200000; 1633 sc->sk_rboff = SK_RBOFF_0; 1634 break; 1635 default: 1636 device_printf(dev, "unknown ram size: %d\n", skrs); 1637 error = ENXIO; 1638 goto fail; 1639 } 1640 } else { /* SK_YUKON_FAMILY */ 1641 if (skrs == 0x00) 1642 sc->sk_ramsize = 0x20000; 1643 else 1644 sc->sk_ramsize = skrs * (1<<12); 1645 sc->sk_rboff = SK_RBOFF_0; 1646 } 1647 1648 /* Read and save physical media type */ 1649 sc->sk_pmd = sk_win_read_1(sc, SK_PMDTYPE); 1650 1651 if (sc->sk_pmd == 'T' || sc->sk_pmd == '1') 1652 sc->sk_coppertype = 1; 1653 else 1654 sc->sk_coppertype = 0; 1655 1656 /* Determine whether to name it with VPD PN or just make it up. 1657 * Marvell Yukon VPD PN seems to freqently be bogus. */ 1658 switch (pci_get_device(dev)) { 1659 case DEVICEID_SK_V1: 1660 case DEVICEID_BELKIN_5005: 1661 case DEVICEID_3COM_3C940: 1662 case DEVICEID_LINKSYS_EG1032: 1663 case DEVICEID_DLINK_DGE530T_A1: 1664 case DEVICEID_DLINK_DGE530T_B1: 1665 /* Stay with VPD PN. */ 1666 (void) pci_get_vpd_ident(dev, &pname); 1667 break; 1668 case DEVICEID_SK_V2: 1669 /* YUKON VPD PN might bear no resemblance to reality. */ 1670 switch (sc->sk_type) { 1671 case SK_GENESIS: 1672 /* Stay with VPD PN. */ 1673 (void) pci_get_vpd_ident(dev, &pname); 1674 break; 1675 case SK_YUKON: 1676 pname = "Marvell Yukon Gigabit Ethernet"; 1677 break; 1678 case SK_YUKON_LITE: 1679 pname = "Marvell Yukon Lite Gigabit Ethernet"; 1680 break; 1681 case SK_YUKON_LP: 1682 pname = "Marvell Yukon LP Gigabit Ethernet"; 1683 break; 1684 default: 1685 pname = "Marvell Yukon (Unknown) Gigabit Ethernet"; 1686 break; 1687 } 1688 1689 /* Yukon Lite Rev. A0 needs special test. */ 1690 if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) { 1691 u_int32_t far; 1692 u_int8_t testbyte; 1693 1694 /* Save flash address register before testing. */ 1695 far = sk_win_read_4(sc, SK_EP_ADDR); 1696 1697 sk_win_write_1(sc, SK_EP_ADDR+0x03, 0xff); 1698 testbyte = sk_win_read_1(sc, SK_EP_ADDR+0x03); 1699 1700 if (testbyte != 0x00) { 1701 /* Yukon Lite Rev. A0 detected. */ 1702 sc->sk_type = SK_YUKON_LITE; 1703 sc->sk_rev = SK_YUKON_LITE_REV_A0; 1704 /* Restore flash address register. */ 1705 sk_win_write_4(sc, SK_EP_ADDR, far); 1706 } 1707 } 1708 break; 1709 default: 1710 device_printf(dev, "unknown device: vendor=%04x, device=%04x, " 1711 "chipver=%02x, rev=%x\n", 1712 pci_get_vendor(dev), pci_get_device(dev), 1713 sc->sk_type, sc->sk_rev); 1714 error = ENXIO; 1715 goto fail; 1716 } 1717 1718 if (sc->sk_type == SK_YUKON_LITE) { 1719 switch (sc->sk_rev) { 1720 case SK_YUKON_LITE_REV_A0: 1721 revstr = "A0"; 1722 break; 1723 case SK_YUKON_LITE_REV_A1: 1724 revstr = "A1"; 1725 break; 1726 case SK_YUKON_LITE_REV_A3: 1727 revstr = "A3"; 1728 break; 1729 default: 1730 revstr = ""; 1731 break; 1732 } 1733 } else { 1734 revstr = ""; 1735 } 1736 1737 /* Announce the product name and more VPD data if there. */ 1738 if (pname != NULL) 1739 device_printf(dev, "%s rev. %s(0x%x)\n", 1740 pname, revstr, sc->sk_rev); 1741 1742 if (bootverbose) { 1743 device_printf(dev, "chip ver = 0x%02x\n", sc->sk_type); 1744 device_printf(dev, "chip rev = 0x%02x\n", sc->sk_rev); 1745 device_printf(dev, "SK_EPROM0 = 0x%02x\n", skrs); 1746 device_printf(dev, "SRAM size = 0x%06x\n", sc->sk_ramsize); 1747 } 1748 1749 sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1); 1750 if (sc->sk_devs[SK_PORT_A] == NULL) { 1751 device_printf(dev, "failed to add child for PORT_A\n"); 1752 error = ENXIO; 1753 goto fail; 1754 } 1755 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); 1756 if (port == NULL) { 1757 device_printf(dev, "failed to allocate memory for " 1758 "ivars of PORT_A\n"); 1759 error = ENXIO; 1760 goto fail; 1761 } 1762 *port = SK_PORT_A; 1763 device_set_ivars(sc->sk_devs[SK_PORT_A], port); 1764 1765 if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) { 1766 sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1); 1767 if (sc->sk_devs[SK_PORT_B] == NULL) { 1768 device_printf(dev, "failed to add child for PORT_B\n"); 1769 error = ENXIO; 1770 goto fail; 1771 } 1772 port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); 1773 if (port == NULL) { 1774 device_printf(dev, "failed to allocate memory for " 1775 "ivars of PORT_B\n"); 1776 error = ENXIO; 1777 goto fail; 1778 } 1779 *port = SK_PORT_B; 1780 device_set_ivars(sc->sk_devs[SK_PORT_B], port); 1781 } 1782 1783 /* Turn on the 'driver is loaded' LED. */ 1784 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON); 1785 1786 error = bus_generic_attach(dev); 1787 if (error) { 1788 device_printf(dev, "failed to attach port(s)\n"); 1789 goto fail; 1790 } 1791 1792 /* Hook interrupt last to avoid having to lock softc */ 1793 error = bus_setup_intr(dev, sc->sk_res[1], INTR_TYPE_NET|INTR_MPSAFE, 1794 NULL, sk_intr, sc, &sc->sk_intrhand); 1795 1796 if (error) { 1797 device_printf(dev, "couldn't set up irq\n"); 1798 goto fail; 1799 } 1800 1801 fail: 1802 if (error) 1803 skc_detach(dev); 1804 1805 return(error); 1806 } 1807 1808 /* 1809 * Shutdown hardware and free up resources. This can be called any 1810 * time after the mutex has been initialized. It is called in both 1811 * the error case in attach and the normal detach case so it needs 1812 * to be careful about only freeing resources that have actually been 1813 * allocated. 1814 */ 1815 static int 1816 sk_detach(dev) 1817 device_t dev; 1818 { 1819 struct sk_if_softc *sc_if; 1820 struct ifnet *ifp; 1821 1822 sc_if = device_get_softc(dev); 1823 KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx), 1824 ("sk mutex not initialized in sk_detach")); 1825 SK_IF_LOCK(sc_if); 1826 1827 ifp = sc_if->sk_ifp; 1828 /* These should only be active if attach_xmac succeeded */ 1829 if (device_is_attached(dev)) { 1830 sk_stop(sc_if); 1831 /* Can't hold locks while calling detach */ 1832 SK_IF_UNLOCK(sc_if); 1833 callout_drain(&sc_if->sk_tick_ch); 1834 callout_drain(&sc_if->sk_watchdog_ch); 1835 ether_ifdetach(ifp); 1836 SK_IF_LOCK(sc_if); 1837 } 1838 /* 1839 * We're generally called from skc_detach() which is using 1840 * device_delete_child() to get to here. It's already trashed 1841 * miibus for us, so don't do it here or we'll panic. 1842 */ 1843 /* 1844 if (sc_if->sk_miibus != NULL) 1845 device_delete_child(dev, sc_if->sk_miibus); 1846 */ 1847 bus_generic_detach(dev); 1848 sk_dma_jumbo_free(sc_if); 1849 sk_dma_free(sc_if); 1850 SK_IF_UNLOCK(sc_if); 1851 if (ifp) 1852 if_free(ifp); 1853 1854 return(0); 1855 } 1856 1857 static int 1858 skc_detach(dev) 1859 device_t dev; 1860 { 1861 struct sk_softc *sc; 1862 1863 sc = device_get_softc(dev); 1864 KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized")); 1865 1866 if (device_is_alive(dev)) { 1867 if (sc->sk_devs[SK_PORT_A] != NULL) { 1868 free(device_get_ivars(sc->sk_devs[SK_PORT_A]), M_DEVBUF); 1869 device_delete_child(dev, sc->sk_devs[SK_PORT_A]); 1870 } 1871 if (sc->sk_devs[SK_PORT_B] != NULL) { 1872 free(device_get_ivars(sc->sk_devs[SK_PORT_B]), M_DEVBUF); 1873 device_delete_child(dev, sc->sk_devs[SK_PORT_B]); 1874 } 1875 bus_generic_detach(dev); 1876 } 1877 1878 if (sc->sk_intrhand) 1879 bus_teardown_intr(dev, sc->sk_res[1], sc->sk_intrhand); 1880 bus_release_resources(dev, sc->sk_res_spec, sc->sk_res); 1881 1882 mtx_destroy(&sc->sk_mii_mtx); 1883 mtx_destroy(&sc->sk_mtx); 1884 1885 return(0); 1886 } 1887 1888 static bus_dma_tag_t 1889 skc_get_dma_tag(device_t bus, device_t child __unused) 1890 { 1891 1892 return (bus_get_dma_tag(bus)); 1893 } 1894 1895 struct sk_dmamap_arg { 1896 bus_addr_t sk_busaddr; 1897 }; 1898 1899 static void 1900 sk_dmamap_cb(arg, segs, nseg, error) 1901 void *arg; 1902 bus_dma_segment_t *segs; 1903 int nseg; 1904 int error; 1905 { 1906 struct sk_dmamap_arg *ctx; 1907 1908 if (error != 0) 1909 return; 1910 1911 ctx = arg; 1912 ctx->sk_busaddr = segs[0].ds_addr; 1913 } 1914 1915 /* 1916 * Allocate jumbo buffer storage. The SysKonnect adapters support 1917 * "jumbograms" (9K frames), although SysKonnect doesn't currently 1918 * use them in their drivers. In order for us to use them, we need 1919 * large 9K receive buffers, however standard mbuf clusters are only 1920 * 2048 bytes in size. Consequently, we need to allocate and manage 1921 * our own jumbo buffer pool. Fortunately, this does not require an 1922 * excessive amount of additional code. 1923 */ 1924 static int 1925 sk_dma_alloc(sc_if) 1926 struct sk_if_softc *sc_if; 1927 { 1928 struct sk_dmamap_arg ctx; 1929 struct sk_txdesc *txd; 1930 struct sk_rxdesc *rxd; 1931 int error, i; 1932 1933 /* create parent tag */ 1934 /* 1935 * XXX 1936 * This driver should use BUS_SPACE_MAXADDR for lowaddr argument 1937 * in bus_dma_tag_create(9) as the NIC would support DAC mode. 1938 * However bz@ reported that it does not work on amd64 with > 4GB 1939 * RAM. Until we have more clues of the breakage, disable DAC mode 1940 * by limiting DMA address to be in 32bit address space. 1941 */ 1942 error = bus_dma_tag_create( 1943 bus_get_dma_tag(sc_if->sk_if_dev),/* parent */ 1944 1, 0, /* algnmnt, boundary */ 1945 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1946 BUS_SPACE_MAXADDR, /* highaddr */ 1947 NULL, NULL, /* filter, filterarg */ 1948 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1949 0, /* nsegments */ 1950 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1951 0, /* flags */ 1952 NULL, NULL, /* lockfunc, lockarg */ 1953 &sc_if->sk_cdata.sk_parent_tag); 1954 if (error != 0) { 1955 device_printf(sc_if->sk_if_dev, 1956 "failed to create parent DMA tag\n"); 1957 goto fail; 1958 } 1959 1960 /* create tag for Tx ring */ 1961 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ 1962 SK_RING_ALIGN, 0, /* algnmnt, boundary */ 1963 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1964 BUS_SPACE_MAXADDR, /* highaddr */ 1965 NULL, NULL, /* filter, filterarg */ 1966 SK_TX_RING_SZ, /* maxsize */ 1967 1, /* nsegments */ 1968 SK_TX_RING_SZ, /* maxsegsize */ 1969 0, /* flags */ 1970 NULL, NULL, /* lockfunc, lockarg */ 1971 &sc_if->sk_cdata.sk_tx_ring_tag); 1972 if (error != 0) { 1973 device_printf(sc_if->sk_if_dev, 1974 "failed to allocate Tx ring DMA tag\n"); 1975 goto fail; 1976 } 1977 1978 /* create tag for Rx ring */ 1979 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ 1980 SK_RING_ALIGN, 0, /* algnmnt, boundary */ 1981 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1982 BUS_SPACE_MAXADDR, /* highaddr */ 1983 NULL, NULL, /* filter, filterarg */ 1984 SK_RX_RING_SZ, /* maxsize */ 1985 1, /* nsegments */ 1986 SK_RX_RING_SZ, /* maxsegsize */ 1987 0, /* flags */ 1988 NULL, NULL, /* lockfunc, lockarg */ 1989 &sc_if->sk_cdata.sk_rx_ring_tag); 1990 if (error != 0) { 1991 device_printf(sc_if->sk_if_dev, 1992 "failed to allocate Rx ring DMA tag\n"); 1993 goto fail; 1994 } 1995 1996 /* create tag for Tx buffers */ 1997 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ 1998 1, 0, /* algnmnt, boundary */ 1999 BUS_SPACE_MAXADDR, /* lowaddr */ 2000 BUS_SPACE_MAXADDR, /* highaddr */ 2001 NULL, NULL, /* filter, filterarg */ 2002 MCLBYTES * SK_MAXTXSEGS, /* maxsize */ 2003 SK_MAXTXSEGS, /* nsegments */ 2004 MCLBYTES, /* maxsegsize */ 2005 0, /* flags */ 2006 NULL, NULL, /* lockfunc, lockarg */ 2007 &sc_if->sk_cdata.sk_tx_tag); 2008 if (error != 0) { 2009 device_printf(sc_if->sk_if_dev, 2010 "failed to allocate Tx DMA tag\n"); 2011 goto fail; 2012 } 2013 2014 /* create tag for Rx buffers */ 2015 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ 2016 1, 0, /* algnmnt, boundary */ 2017 BUS_SPACE_MAXADDR, /* lowaddr */ 2018 BUS_SPACE_MAXADDR, /* highaddr */ 2019 NULL, NULL, /* filter, filterarg */ 2020 MCLBYTES, /* maxsize */ 2021 1, /* nsegments */ 2022 MCLBYTES, /* maxsegsize */ 2023 0, /* flags */ 2024 NULL, NULL, /* lockfunc, lockarg */ 2025 &sc_if->sk_cdata.sk_rx_tag); 2026 if (error != 0) { 2027 device_printf(sc_if->sk_if_dev, 2028 "failed to allocate Rx DMA tag\n"); 2029 goto fail; 2030 } 2031 2032 /* allocate DMA'able memory and load the DMA map for Tx ring */ 2033 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_tx_ring_tag, 2034 (void **)&sc_if->sk_rdata.sk_tx_ring, BUS_DMA_NOWAIT | 2035 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->sk_cdata.sk_tx_ring_map); 2036 if (error != 0) { 2037 device_printf(sc_if->sk_if_dev, 2038 "failed to allocate DMA'able memory for Tx ring\n"); 2039 goto fail; 2040 } 2041 2042 ctx.sk_busaddr = 0; 2043 error = bus_dmamap_load(sc_if->sk_cdata.sk_tx_ring_tag, 2044 sc_if->sk_cdata.sk_tx_ring_map, sc_if->sk_rdata.sk_tx_ring, 2045 SK_TX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 2046 if (error != 0) { 2047 device_printf(sc_if->sk_if_dev, 2048 "failed to load DMA'able memory for Tx ring\n"); 2049 goto fail; 2050 } 2051 sc_if->sk_rdata.sk_tx_ring_paddr = ctx.sk_busaddr; 2052 2053 /* allocate DMA'able memory and load the DMA map for Rx ring */ 2054 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_rx_ring_tag, 2055 (void **)&sc_if->sk_rdata.sk_rx_ring, BUS_DMA_NOWAIT | 2056 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc_if->sk_cdata.sk_rx_ring_map); 2057 if (error != 0) { 2058 device_printf(sc_if->sk_if_dev, 2059 "failed to allocate DMA'able memory for Rx ring\n"); 2060 goto fail; 2061 } 2062 2063 ctx.sk_busaddr = 0; 2064 error = bus_dmamap_load(sc_if->sk_cdata.sk_rx_ring_tag, 2065 sc_if->sk_cdata.sk_rx_ring_map, sc_if->sk_rdata.sk_rx_ring, 2066 SK_RX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 2067 if (error != 0) { 2068 device_printf(sc_if->sk_if_dev, 2069 "failed to load DMA'able memory for Rx ring\n"); 2070 goto fail; 2071 } 2072 sc_if->sk_rdata.sk_rx_ring_paddr = ctx.sk_busaddr; 2073 2074 /* create DMA maps for Tx buffers */ 2075 for (i = 0; i < SK_TX_RING_CNT; i++) { 2076 txd = &sc_if->sk_cdata.sk_txdesc[i]; 2077 txd->tx_m = NULL; 2078 txd->tx_dmamap = NULL; 2079 error = bus_dmamap_create(sc_if->sk_cdata.sk_tx_tag, 0, 2080 &txd->tx_dmamap); 2081 if (error != 0) { 2082 device_printf(sc_if->sk_if_dev, 2083 "failed to create Tx dmamap\n"); 2084 goto fail; 2085 } 2086 } 2087 2088 /* create DMA maps for Rx buffers */ 2089 if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0, 2090 &sc_if->sk_cdata.sk_rx_sparemap)) != 0) { 2091 device_printf(sc_if->sk_if_dev, 2092 "failed to create spare Rx dmamap\n"); 2093 goto fail; 2094 } 2095 for (i = 0; i < SK_RX_RING_CNT; i++) { 2096 rxd = &sc_if->sk_cdata.sk_rxdesc[i]; 2097 rxd->rx_m = NULL; 2098 rxd->rx_dmamap = NULL; 2099 error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0, 2100 &rxd->rx_dmamap); 2101 if (error != 0) { 2102 device_printf(sc_if->sk_if_dev, 2103 "failed to create Rx dmamap\n"); 2104 goto fail; 2105 } 2106 } 2107 2108 fail: 2109 return (error); 2110 } 2111 2112 static int 2113 sk_dma_jumbo_alloc(sc_if) 2114 struct sk_if_softc *sc_if; 2115 { 2116 struct sk_dmamap_arg ctx; 2117 struct sk_rxdesc *jrxd; 2118 int error, i; 2119 2120 if (jumbo_disable != 0) { 2121 device_printf(sc_if->sk_if_dev, "disabling jumbo frame support\n"); 2122 sc_if->sk_jumbo_disable = 1; 2123 return (0); 2124 } 2125 /* create tag for jumbo Rx ring */ 2126 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ 2127 SK_RING_ALIGN, 0, /* algnmnt, boundary */ 2128 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 2129 BUS_SPACE_MAXADDR, /* highaddr */ 2130 NULL, NULL, /* filter, filterarg */ 2131 SK_JUMBO_RX_RING_SZ, /* maxsize */ 2132 1, /* nsegments */ 2133 SK_JUMBO_RX_RING_SZ, /* maxsegsize */ 2134 0, /* flags */ 2135 NULL, NULL, /* lockfunc, lockarg */ 2136 &sc_if->sk_cdata.sk_jumbo_rx_ring_tag); 2137 if (error != 0) { 2138 device_printf(sc_if->sk_if_dev, 2139 "failed to allocate jumbo Rx ring DMA tag\n"); 2140 goto jumbo_fail; 2141 } 2142 2143 /* create tag for jumbo Rx buffers */ 2144 error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ 2145 1, 0, /* algnmnt, boundary */ 2146 BUS_SPACE_MAXADDR, /* lowaddr */ 2147 BUS_SPACE_MAXADDR, /* highaddr */ 2148 NULL, NULL, /* filter, filterarg */ 2149 MJUM9BYTES, /* maxsize */ 2150 1, /* nsegments */ 2151 MJUM9BYTES, /* maxsegsize */ 2152 0, /* flags */ 2153 NULL, NULL, /* lockfunc, lockarg */ 2154 &sc_if->sk_cdata.sk_jumbo_rx_tag); 2155 if (error != 0) { 2156 device_printf(sc_if->sk_if_dev, 2157 "failed to allocate jumbo Rx DMA tag\n"); 2158 goto jumbo_fail; 2159 } 2160 2161 /* allocate DMA'able memory and load the DMA map for jumbo Rx ring */ 2162 error = bus_dmamem_alloc(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, 2163 (void **)&sc_if->sk_rdata.sk_jumbo_rx_ring, BUS_DMA_NOWAIT | 2164 BUS_DMA_COHERENT | BUS_DMA_ZERO, 2165 &sc_if->sk_cdata.sk_jumbo_rx_ring_map); 2166 if (error != 0) { 2167 device_printf(sc_if->sk_if_dev, 2168 "failed to allocate DMA'able memory for jumbo Rx ring\n"); 2169 goto jumbo_fail; 2170 } 2171 2172 ctx.sk_busaddr = 0; 2173 error = bus_dmamap_load(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, 2174 sc_if->sk_cdata.sk_jumbo_rx_ring_map, 2175 sc_if->sk_rdata.sk_jumbo_rx_ring, SK_JUMBO_RX_RING_SZ, sk_dmamap_cb, 2176 &ctx, BUS_DMA_NOWAIT); 2177 if (error != 0) { 2178 device_printf(sc_if->sk_if_dev, 2179 "failed to load DMA'able memory for jumbo Rx ring\n"); 2180 goto jumbo_fail; 2181 } 2182 sc_if->sk_rdata.sk_jumbo_rx_ring_paddr = ctx.sk_busaddr; 2183 2184 /* create DMA maps for jumbo Rx buffers */ 2185 if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0, 2186 &sc_if->sk_cdata.sk_jumbo_rx_sparemap)) != 0) { 2187 device_printf(sc_if->sk_if_dev, 2188 "failed to create spare jumbo Rx dmamap\n"); 2189 goto jumbo_fail; 2190 } 2191 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) { 2192 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i]; 2193 jrxd->rx_m = NULL; 2194 jrxd->rx_dmamap = NULL; 2195 error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0, 2196 &jrxd->rx_dmamap); 2197 if (error != 0) { 2198 device_printf(sc_if->sk_if_dev, 2199 "failed to create jumbo Rx dmamap\n"); 2200 goto jumbo_fail; 2201 } 2202 } 2203 2204 return (0); 2205 2206 jumbo_fail: 2207 sk_dma_jumbo_free(sc_if); 2208 device_printf(sc_if->sk_if_dev, "disabling jumbo frame support due to " 2209 "resource shortage\n"); 2210 sc_if->sk_jumbo_disable = 1; 2211 return (0); 2212 } 2213 2214 static void 2215 sk_dma_free(sc_if) 2216 struct sk_if_softc *sc_if; 2217 { 2218 struct sk_txdesc *txd; 2219 struct sk_rxdesc *rxd; 2220 int i; 2221 2222 /* Tx ring */ 2223 if (sc_if->sk_cdata.sk_tx_ring_tag) { 2224 if (sc_if->sk_rdata.sk_tx_ring_paddr) 2225 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_ring_tag, 2226 sc_if->sk_cdata.sk_tx_ring_map); 2227 if (sc_if->sk_rdata.sk_tx_ring) 2228 bus_dmamem_free(sc_if->sk_cdata.sk_tx_ring_tag, 2229 sc_if->sk_rdata.sk_tx_ring, 2230 sc_if->sk_cdata.sk_tx_ring_map); 2231 sc_if->sk_rdata.sk_tx_ring = NULL; 2232 sc_if->sk_rdata.sk_tx_ring_paddr = 0; 2233 bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_ring_tag); 2234 sc_if->sk_cdata.sk_tx_ring_tag = NULL; 2235 } 2236 /* Rx ring */ 2237 if (sc_if->sk_cdata.sk_rx_ring_tag) { 2238 if (sc_if->sk_rdata.sk_rx_ring_paddr) 2239 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_ring_tag, 2240 sc_if->sk_cdata.sk_rx_ring_map); 2241 if (sc_if->sk_rdata.sk_rx_ring) 2242 bus_dmamem_free(sc_if->sk_cdata.sk_rx_ring_tag, 2243 sc_if->sk_rdata.sk_rx_ring, 2244 sc_if->sk_cdata.sk_rx_ring_map); 2245 sc_if->sk_rdata.sk_rx_ring = NULL; 2246 sc_if->sk_rdata.sk_rx_ring_paddr = 0; 2247 bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_ring_tag); 2248 sc_if->sk_cdata.sk_rx_ring_tag = NULL; 2249 } 2250 /* Tx buffers */ 2251 if (sc_if->sk_cdata.sk_tx_tag) { 2252 for (i = 0; i < SK_TX_RING_CNT; i++) { 2253 txd = &sc_if->sk_cdata.sk_txdesc[i]; 2254 if (txd->tx_dmamap) { 2255 bus_dmamap_destroy(sc_if->sk_cdata.sk_tx_tag, 2256 txd->tx_dmamap); 2257 txd->tx_dmamap = NULL; 2258 } 2259 } 2260 bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_tag); 2261 sc_if->sk_cdata.sk_tx_tag = NULL; 2262 } 2263 /* Rx buffers */ 2264 if (sc_if->sk_cdata.sk_rx_tag) { 2265 for (i = 0; i < SK_RX_RING_CNT; i++) { 2266 rxd = &sc_if->sk_cdata.sk_rxdesc[i]; 2267 if (rxd->rx_dmamap) { 2268 bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag, 2269 rxd->rx_dmamap); 2270 rxd->rx_dmamap = NULL; 2271 } 2272 } 2273 if (sc_if->sk_cdata.sk_rx_sparemap) { 2274 bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag, 2275 sc_if->sk_cdata.sk_rx_sparemap); 2276 sc_if->sk_cdata.sk_rx_sparemap = NULL; 2277 } 2278 bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_tag); 2279 sc_if->sk_cdata.sk_rx_tag = NULL; 2280 } 2281 2282 if (sc_if->sk_cdata.sk_parent_tag) { 2283 bus_dma_tag_destroy(sc_if->sk_cdata.sk_parent_tag); 2284 sc_if->sk_cdata.sk_parent_tag = NULL; 2285 } 2286 } 2287 2288 static void 2289 sk_dma_jumbo_free(sc_if) 2290 struct sk_if_softc *sc_if; 2291 { 2292 struct sk_rxdesc *jrxd; 2293 int i; 2294 2295 /* jumbo Rx ring */ 2296 if (sc_if->sk_cdata.sk_jumbo_rx_ring_tag) { 2297 if (sc_if->sk_rdata.sk_jumbo_rx_ring_paddr) 2298 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, 2299 sc_if->sk_cdata.sk_jumbo_rx_ring_map); 2300 if (sc_if->sk_rdata.sk_jumbo_rx_ring) 2301 bus_dmamem_free(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, 2302 sc_if->sk_rdata.sk_jumbo_rx_ring, 2303 sc_if->sk_cdata.sk_jumbo_rx_ring_map); 2304 sc_if->sk_rdata.sk_jumbo_rx_ring = NULL; 2305 sc_if->sk_rdata.sk_jumbo_rx_ring_paddr = 0; 2306 bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_ring_tag); 2307 sc_if->sk_cdata.sk_jumbo_rx_ring_tag = NULL; 2308 } 2309 2310 /* jumbo Rx buffers */ 2311 if (sc_if->sk_cdata.sk_jumbo_rx_tag) { 2312 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) { 2313 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i]; 2314 if (jrxd->rx_dmamap) { 2315 bus_dmamap_destroy( 2316 sc_if->sk_cdata.sk_jumbo_rx_tag, 2317 jrxd->rx_dmamap); 2318 jrxd->rx_dmamap = NULL; 2319 } 2320 } 2321 if (sc_if->sk_cdata.sk_jumbo_rx_sparemap) { 2322 bus_dmamap_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag, 2323 sc_if->sk_cdata.sk_jumbo_rx_sparemap); 2324 sc_if->sk_cdata.sk_jumbo_rx_sparemap = NULL; 2325 } 2326 bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag); 2327 sc_if->sk_cdata.sk_jumbo_rx_tag = NULL; 2328 } 2329 } 2330 2331 static void 2332 sk_txcksum(ifp, m, f) 2333 struct ifnet *ifp; 2334 struct mbuf *m; 2335 struct sk_tx_desc *f; 2336 { 2337 struct ip *ip; 2338 u_int16_t offset; 2339 u_int8_t *p; 2340 2341 offset = sizeof(struct ip) + ETHER_HDR_LEN; 2342 for(; m && m->m_len == 0; m = m->m_next) 2343 ; 2344 if (m == NULL || m->m_len < ETHER_HDR_LEN) { 2345 if_printf(ifp, "%s: m_len < ETHER_HDR_LEN\n", __func__); 2346 /* checksum may be corrupted */ 2347 goto sendit; 2348 } 2349 if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) { 2350 if (m->m_len != ETHER_HDR_LEN) { 2351 if_printf(ifp, "%s: m_len != ETHER_HDR_LEN\n", 2352 __func__); 2353 /* checksum may be corrupted */ 2354 goto sendit; 2355 } 2356 for(m = m->m_next; m && m->m_len == 0; m = m->m_next) 2357 ; 2358 if (m == NULL) { 2359 offset = sizeof(struct ip) + ETHER_HDR_LEN; 2360 /* checksum may be corrupted */ 2361 goto sendit; 2362 } 2363 ip = mtod(m, struct ip *); 2364 } else { 2365 p = mtod(m, u_int8_t *); 2366 p += ETHER_HDR_LEN; 2367 ip = (struct ip *)p; 2368 } 2369 offset = (ip->ip_hl << 2) + ETHER_HDR_LEN; 2370 2371 sendit: 2372 f->sk_csum_startval = 0; 2373 f->sk_csum_start = htole32(((offset + m->m_pkthdr.csum_data) & 0xffff) | 2374 (offset << 16)); 2375 } 2376 2377 static int 2378 sk_encap(sc_if, m_head) 2379 struct sk_if_softc *sc_if; 2380 struct mbuf **m_head; 2381 { 2382 struct sk_txdesc *txd; 2383 struct sk_tx_desc *f = NULL; 2384 struct mbuf *m; 2385 bus_dma_segment_t txsegs[SK_MAXTXSEGS]; 2386 u_int32_t cflags, frag, si, sk_ctl; 2387 int error, i, nseg; 2388 2389 SK_IF_LOCK_ASSERT(sc_if); 2390 2391 if ((txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txfreeq)) == NULL) 2392 return (ENOBUFS); 2393 2394 error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag, 2395 txd->tx_dmamap, *m_head, txsegs, &nseg, 0); 2396 if (error == EFBIG) { 2397 m = m_defrag(*m_head, M_NOWAIT); 2398 if (m == NULL) { 2399 m_freem(*m_head); 2400 *m_head = NULL; 2401 return (ENOMEM); 2402 } 2403 *m_head = m; 2404 error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag, 2405 txd->tx_dmamap, *m_head, txsegs, &nseg, 0); 2406 if (error != 0) { 2407 m_freem(*m_head); 2408 *m_head = NULL; 2409 return (error); 2410 } 2411 } else if (error != 0) 2412 return (error); 2413 if (nseg == 0) { 2414 m_freem(*m_head); 2415 *m_head = NULL; 2416 return (EIO); 2417 } 2418 if (sc_if->sk_cdata.sk_tx_cnt + nseg >= SK_TX_RING_CNT) { 2419 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap); 2420 return (ENOBUFS); 2421 } 2422 2423 m = *m_head; 2424 if ((m->m_pkthdr.csum_flags & sc_if->sk_ifp->if_hwassist) != 0) 2425 cflags = SK_OPCODE_CSUM; 2426 else 2427 cflags = SK_OPCODE_DEFAULT; 2428 si = frag = sc_if->sk_cdata.sk_tx_prod; 2429 for (i = 0; i < nseg; i++) { 2430 f = &sc_if->sk_rdata.sk_tx_ring[frag]; 2431 f->sk_data_lo = htole32(SK_ADDR_LO(txsegs[i].ds_addr)); 2432 f->sk_data_hi = htole32(SK_ADDR_HI(txsegs[i].ds_addr)); 2433 sk_ctl = txsegs[i].ds_len | cflags; 2434 if (i == 0) { 2435 if (cflags == SK_OPCODE_CSUM) 2436 sk_txcksum(sc_if->sk_ifp, m, f); 2437 sk_ctl |= SK_TXCTL_FIRSTFRAG; 2438 } else 2439 sk_ctl |= SK_TXCTL_OWN; 2440 f->sk_ctl = htole32(sk_ctl); 2441 sc_if->sk_cdata.sk_tx_cnt++; 2442 SK_INC(frag, SK_TX_RING_CNT); 2443 } 2444 sc_if->sk_cdata.sk_tx_prod = frag; 2445 2446 /* set EOF on the last desciptor */ 2447 frag = (frag + SK_TX_RING_CNT - 1) % SK_TX_RING_CNT; 2448 f = &sc_if->sk_rdata.sk_tx_ring[frag]; 2449 f->sk_ctl |= htole32(SK_TXCTL_LASTFRAG | SK_TXCTL_EOF_INTR); 2450 2451 /* turn the first descriptor ownership to NIC */ 2452 f = &sc_if->sk_rdata.sk_tx_ring[si]; 2453 f->sk_ctl |= htole32(SK_TXCTL_OWN); 2454 2455 STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txfreeq, tx_q); 2456 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txbusyq, txd, tx_q); 2457 txd->tx_m = m; 2458 2459 /* sync descriptors */ 2460 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap, 2461 BUS_DMASYNC_PREWRITE); 2462 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag, 2463 sc_if->sk_cdata.sk_tx_ring_map, 2464 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2465 2466 return (0); 2467 } 2468 2469 static void 2470 sk_start(ifp) 2471 struct ifnet *ifp; 2472 { 2473 struct sk_if_softc *sc_if; 2474 2475 sc_if = ifp->if_softc; 2476 2477 SK_IF_LOCK(sc_if); 2478 sk_start_locked(ifp); 2479 SK_IF_UNLOCK(sc_if); 2480 2481 return; 2482 } 2483 2484 static void 2485 sk_start_locked(ifp) 2486 struct ifnet *ifp; 2487 { 2488 struct sk_softc *sc; 2489 struct sk_if_softc *sc_if; 2490 struct mbuf *m_head; 2491 int enq; 2492 2493 sc_if = ifp->if_softc; 2494 sc = sc_if->sk_softc; 2495 2496 SK_IF_LOCK_ASSERT(sc_if); 2497 2498 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 2499 sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 1; ) { 2500 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2501 if (m_head == NULL) 2502 break; 2503 2504 /* 2505 * Pack the data into the transmit ring. If we 2506 * don't have room, set the OACTIVE flag and wait 2507 * for the NIC to drain the ring. 2508 */ 2509 if (sk_encap(sc_if, &m_head)) { 2510 if (m_head == NULL) 2511 break; 2512 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2513 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2514 break; 2515 } 2516 2517 enq++; 2518 /* 2519 * If there's a BPF listener, bounce a copy of this frame 2520 * to him. 2521 */ 2522 BPF_MTAP(ifp, m_head); 2523 } 2524 2525 if (enq > 0) { 2526 /* Transmit */ 2527 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); 2528 2529 /* Set a timeout in case the chip goes out to lunch. */ 2530 sc_if->sk_watchdog_timer = 5; 2531 } 2532 } 2533 2534 2535 static void 2536 sk_watchdog(arg) 2537 void *arg; 2538 { 2539 struct sk_if_softc *sc_if; 2540 struct ifnet *ifp; 2541 2542 ifp = arg; 2543 sc_if = ifp->if_softc; 2544 2545 SK_IF_LOCK_ASSERT(sc_if); 2546 2547 if (sc_if->sk_watchdog_timer == 0 || --sc_if->sk_watchdog_timer) 2548 goto done; 2549 2550 /* 2551 * Reclaim first as there is a possibility of losing Tx completion 2552 * interrupts. 2553 */ 2554 sk_txeof(sc_if); 2555 if (sc_if->sk_cdata.sk_tx_cnt != 0) { 2556 if_printf(sc_if->sk_ifp, "watchdog timeout\n"); 2557 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2558 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2559 sk_init_locked(sc_if); 2560 } 2561 2562 done: 2563 callout_reset(&sc_if->sk_watchdog_ch, hz, sk_watchdog, ifp); 2564 2565 return; 2566 } 2567 2568 static int 2569 skc_shutdown(dev) 2570 device_t dev; 2571 { 2572 struct sk_softc *sc; 2573 2574 sc = device_get_softc(dev); 2575 SK_LOCK(sc); 2576 2577 /* Turn off the 'driver is loaded' LED. */ 2578 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF); 2579 2580 /* 2581 * Reset the GEnesis controller. Doing this should also 2582 * assert the resets on the attached XMAC(s). 2583 */ 2584 sk_reset(sc); 2585 SK_UNLOCK(sc); 2586 2587 return (0); 2588 } 2589 2590 static int 2591 skc_suspend(dev) 2592 device_t dev; 2593 { 2594 struct sk_softc *sc; 2595 struct sk_if_softc *sc_if0, *sc_if1; 2596 struct ifnet *ifp0 = NULL, *ifp1 = NULL; 2597 2598 sc = device_get_softc(dev); 2599 2600 SK_LOCK(sc); 2601 2602 sc_if0 = sc->sk_if[SK_PORT_A]; 2603 sc_if1 = sc->sk_if[SK_PORT_B]; 2604 if (sc_if0 != NULL) 2605 ifp0 = sc_if0->sk_ifp; 2606 if (sc_if1 != NULL) 2607 ifp1 = sc_if1->sk_ifp; 2608 if (ifp0 != NULL) 2609 sk_stop(sc_if0); 2610 if (ifp1 != NULL) 2611 sk_stop(sc_if1); 2612 sc->sk_suspended = 1; 2613 2614 SK_UNLOCK(sc); 2615 2616 return (0); 2617 } 2618 2619 static int 2620 skc_resume(dev) 2621 device_t dev; 2622 { 2623 struct sk_softc *sc; 2624 struct sk_if_softc *sc_if0, *sc_if1; 2625 struct ifnet *ifp0 = NULL, *ifp1 = NULL; 2626 2627 sc = device_get_softc(dev); 2628 2629 SK_LOCK(sc); 2630 2631 sc_if0 = sc->sk_if[SK_PORT_A]; 2632 sc_if1 = sc->sk_if[SK_PORT_B]; 2633 if (sc_if0 != NULL) 2634 ifp0 = sc_if0->sk_ifp; 2635 if (sc_if1 != NULL) 2636 ifp1 = sc_if1->sk_ifp; 2637 if (ifp0 != NULL && ifp0->if_flags & IFF_UP) 2638 sk_init_locked(sc_if0); 2639 if (ifp1 != NULL && ifp1->if_flags & IFF_UP) 2640 sk_init_locked(sc_if1); 2641 sc->sk_suspended = 0; 2642 2643 SK_UNLOCK(sc); 2644 2645 return (0); 2646 } 2647 2648 /* 2649 * According to the data sheet from SK-NET GENESIS the hardware can compute 2650 * two Rx checksums at the same time(Each checksum start position is 2651 * programmed in Rx descriptors). However it seems that TCP/UDP checksum 2652 * does not work at least on my Yukon hardware. I tried every possible ways 2653 * to get correct checksum value but couldn't get correct one. So TCP/UDP 2654 * checksum offload was disabled at the moment and only IP checksum offload 2655 * was enabled. 2656 * As nomral IP header size is 20 bytes I can't expect it would give an 2657 * increase in throughput. However it seems it doesn't hurt performance in 2658 * my testing. If there is a more detailed information for checksum secret 2659 * of the hardware in question please contact yongari@FreeBSD.org to add 2660 * TCP/UDP checksum offload support. 2661 */ 2662 static __inline void 2663 sk_rxcksum(ifp, m, csum) 2664 struct ifnet *ifp; 2665 struct mbuf *m; 2666 u_int32_t csum; 2667 { 2668 struct ether_header *eh; 2669 struct ip *ip; 2670 int32_t hlen, len, pktlen; 2671 u_int16_t csum1, csum2, ipcsum; 2672 2673 pktlen = m->m_pkthdr.len; 2674 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) 2675 return; 2676 eh = mtod(m, struct ether_header *); 2677 if (eh->ether_type != htons(ETHERTYPE_IP)) 2678 return; 2679 ip = (struct ip *)(eh + 1); 2680 if (ip->ip_v != IPVERSION) 2681 return; 2682 hlen = ip->ip_hl << 2; 2683 pktlen -= sizeof(struct ether_header); 2684 if (hlen < sizeof(struct ip)) 2685 return; 2686 if (ntohs(ip->ip_len) < hlen) 2687 return; 2688 if (ntohs(ip->ip_len) != pktlen) 2689 return; 2690 2691 csum1 = htons(csum & 0xffff); 2692 csum2 = htons((csum >> 16) & 0xffff); 2693 ipcsum = in_addword(csum1, ~csum2 & 0xffff); 2694 /* checksum fixup for IP options */ 2695 len = hlen - sizeof(struct ip); 2696 if (len > 0) { 2697 /* 2698 * If the second checksum value is correct we can compute IP 2699 * checksum with simple math. Unfortunately the second checksum 2700 * value is wrong so we can't verify the checksum from the 2701 * value(It seems there is some magic here to get correct 2702 * value). If the second checksum value is correct it also 2703 * means we can get TCP/UDP checksum) here. However, it still 2704 * needs pseudo header checksum calculation due to hardware 2705 * limitations. 2706 */ 2707 return; 2708 } 2709 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED; 2710 if (ipcsum == 0xffff) 2711 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2712 } 2713 2714 static __inline int 2715 sk_rxvalid(sc, stat, len) 2716 struct sk_softc *sc; 2717 u_int32_t stat, len; 2718 { 2719 2720 if (sc->sk_type == SK_GENESIS) { 2721 if ((stat & XM_RXSTAT_ERRFRAME) == XM_RXSTAT_ERRFRAME || 2722 XM_RXSTAT_BYTES(stat) != len) 2723 return (0); 2724 } else { 2725 if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR | 2726 YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | 2727 YU_RXSTAT_JABBER)) != 0 || 2728 (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK || 2729 YU_RXSTAT_BYTES(stat) != len) 2730 return (0); 2731 } 2732 2733 return (1); 2734 } 2735 2736 static void 2737 sk_rxeof(sc_if) 2738 struct sk_if_softc *sc_if; 2739 { 2740 struct sk_softc *sc; 2741 struct mbuf *m; 2742 struct ifnet *ifp; 2743 struct sk_rx_desc *cur_rx; 2744 struct sk_rxdesc *rxd; 2745 int cons, prog; 2746 u_int32_t csum, rxstat, sk_ctl; 2747 2748 sc = sc_if->sk_softc; 2749 ifp = sc_if->sk_ifp; 2750 2751 SK_IF_LOCK_ASSERT(sc_if); 2752 2753 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag, 2754 sc_if->sk_cdata.sk_rx_ring_map, BUS_DMASYNC_POSTREAD); 2755 2756 prog = 0; 2757 for (cons = sc_if->sk_cdata.sk_rx_cons; prog < SK_RX_RING_CNT; 2758 prog++, SK_INC(cons, SK_RX_RING_CNT)) { 2759 cur_rx = &sc_if->sk_rdata.sk_rx_ring[cons]; 2760 sk_ctl = le32toh(cur_rx->sk_ctl); 2761 if ((sk_ctl & SK_RXCTL_OWN) != 0) 2762 break; 2763 rxd = &sc_if->sk_cdata.sk_rxdesc[cons]; 2764 rxstat = le32toh(cur_rx->sk_xmac_rxstat); 2765 2766 if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG | 2767 SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID | 2768 SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) || 2769 SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN || 2770 SK_RXBYTES(sk_ctl) > SK_MAX_FRAMELEN || 2771 sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) { 2772 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 2773 sk_discard_rxbuf(sc_if, cons); 2774 continue; 2775 } 2776 2777 m = rxd->rx_m; 2778 csum = le32toh(cur_rx->sk_csum); 2779 if (sk_newbuf(sc_if, cons) != 0) { 2780 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 2781 /* reuse old buffer */ 2782 sk_discard_rxbuf(sc_if, cons); 2783 continue; 2784 } 2785 m->m_pkthdr.rcvif = ifp; 2786 m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl); 2787 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 2788 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 2789 sk_rxcksum(ifp, m, csum); 2790 SK_IF_UNLOCK(sc_if); 2791 (*ifp->if_input)(ifp, m); 2792 SK_IF_LOCK(sc_if); 2793 } 2794 2795 if (prog > 0) { 2796 sc_if->sk_cdata.sk_rx_cons = cons; 2797 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag, 2798 sc_if->sk_cdata.sk_rx_ring_map, 2799 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2800 } 2801 } 2802 2803 static void 2804 sk_jumbo_rxeof(sc_if) 2805 struct sk_if_softc *sc_if; 2806 { 2807 struct sk_softc *sc; 2808 struct mbuf *m; 2809 struct ifnet *ifp; 2810 struct sk_rx_desc *cur_rx; 2811 struct sk_rxdesc *jrxd; 2812 int cons, prog; 2813 u_int32_t csum, rxstat, sk_ctl; 2814 2815 sc = sc_if->sk_softc; 2816 ifp = sc_if->sk_ifp; 2817 2818 SK_IF_LOCK_ASSERT(sc_if); 2819 2820 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, 2821 sc_if->sk_cdata.sk_jumbo_rx_ring_map, BUS_DMASYNC_POSTREAD); 2822 2823 prog = 0; 2824 for (cons = sc_if->sk_cdata.sk_jumbo_rx_cons; 2825 prog < SK_JUMBO_RX_RING_CNT; 2826 prog++, SK_INC(cons, SK_JUMBO_RX_RING_CNT)) { 2827 cur_rx = &sc_if->sk_rdata.sk_jumbo_rx_ring[cons]; 2828 sk_ctl = le32toh(cur_rx->sk_ctl); 2829 if ((sk_ctl & SK_RXCTL_OWN) != 0) 2830 break; 2831 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[cons]; 2832 rxstat = le32toh(cur_rx->sk_xmac_rxstat); 2833 2834 if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG | 2835 SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID | 2836 SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) || 2837 SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN || 2838 SK_RXBYTES(sk_ctl) > SK_JUMBO_FRAMELEN || 2839 sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) { 2840 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 2841 sk_discard_jumbo_rxbuf(sc_if, cons); 2842 continue; 2843 } 2844 2845 m = jrxd->rx_m; 2846 csum = le32toh(cur_rx->sk_csum); 2847 if (sk_jumbo_newbuf(sc_if, cons) != 0) { 2848 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 2849 /* reuse old buffer */ 2850 sk_discard_jumbo_rxbuf(sc_if, cons); 2851 continue; 2852 } 2853 m->m_pkthdr.rcvif = ifp; 2854 m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl); 2855 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 2856 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 2857 sk_rxcksum(ifp, m, csum); 2858 SK_IF_UNLOCK(sc_if); 2859 (*ifp->if_input)(ifp, m); 2860 SK_IF_LOCK(sc_if); 2861 } 2862 2863 if (prog > 0) { 2864 sc_if->sk_cdata.sk_jumbo_rx_cons = cons; 2865 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, 2866 sc_if->sk_cdata.sk_jumbo_rx_ring_map, 2867 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2868 } 2869 } 2870 2871 static void 2872 sk_txeof(sc_if) 2873 struct sk_if_softc *sc_if; 2874 { 2875 struct sk_txdesc *txd; 2876 struct sk_tx_desc *cur_tx; 2877 struct ifnet *ifp; 2878 u_int32_t idx, sk_ctl; 2879 2880 ifp = sc_if->sk_ifp; 2881 2882 txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq); 2883 if (txd == NULL) 2884 return; 2885 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag, 2886 sc_if->sk_cdata.sk_tx_ring_map, BUS_DMASYNC_POSTREAD); 2887 /* 2888 * Go through our tx ring and free mbufs for those 2889 * frames that have been sent. 2890 */ 2891 for (idx = sc_if->sk_cdata.sk_tx_cons;; SK_INC(idx, SK_TX_RING_CNT)) { 2892 if (sc_if->sk_cdata.sk_tx_cnt <= 0) 2893 break; 2894 cur_tx = &sc_if->sk_rdata.sk_tx_ring[idx]; 2895 sk_ctl = le32toh(cur_tx->sk_ctl); 2896 if (sk_ctl & SK_TXCTL_OWN) 2897 break; 2898 sc_if->sk_cdata.sk_tx_cnt--; 2899 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2900 if ((sk_ctl & SK_TXCTL_LASTFRAG) == 0) 2901 continue; 2902 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap, 2903 BUS_DMASYNC_POSTWRITE); 2904 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap); 2905 2906 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 2907 m_freem(txd->tx_m); 2908 txd->tx_m = NULL; 2909 STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txbusyq, tx_q); 2910 STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q); 2911 txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq); 2912 } 2913 sc_if->sk_cdata.sk_tx_cons = idx; 2914 sc_if->sk_watchdog_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0; 2915 2916 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag, 2917 sc_if->sk_cdata.sk_tx_ring_map, 2918 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2919 } 2920 2921 static void 2922 sk_tick(xsc_if) 2923 void *xsc_if; 2924 { 2925 struct sk_if_softc *sc_if; 2926 struct mii_data *mii; 2927 struct ifnet *ifp; 2928 int i; 2929 2930 sc_if = xsc_if; 2931 ifp = sc_if->sk_ifp; 2932 mii = device_get_softc(sc_if->sk_miibus); 2933 2934 if (!(ifp->if_flags & IFF_UP)) 2935 return; 2936 2937 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 2938 sk_intr_bcom(sc_if); 2939 return; 2940 } 2941 2942 /* 2943 * According to SysKonnect, the correct way to verify that 2944 * the link has come back up is to poll bit 0 of the GPIO 2945 * register three times. This pin has the signal from the 2946 * link_sync pin connected to it; if we read the same link 2947 * state 3 times in a row, we know the link is up. 2948 */ 2949 for (i = 0; i < 3; i++) { 2950 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET) 2951 break; 2952 } 2953 2954 if (i != 3) { 2955 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if); 2956 return; 2957 } 2958 2959 /* Turn the GP0 interrupt back on. */ 2960 SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); 2961 SK_XM_READ_2(sc_if, XM_ISR); 2962 mii_tick(mii); 2963 callout_stop(&sc_if->sk_tick_ch); 2964 } 2965 2966 static void 2967 sk_yukon_tick(xsc_if) 2968 void *xsc_if; 2969 { 2970 struct sk_if_softc *sc_if; 2971 struct mii_data *mii; 2972 2973 sc_if = xsc_if; 2974 mii = device_get_softc(sc_if->sk_miibus); 2975 2976 mii_tick(mii); 2977 callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if); 2978 } 2979 2980 static void 2981 sk_intr_bcom(sc_if) 2982 struct sk_if_softc *sc_if; 2983 { 2984 struct mii_data *mii; 2985 struct ifnet *ifp; 2986 int status; 2987 mii = device_get_softc(sc_if->sk_miibus); 2988 ifp = sc_if->sk_ifp; 2989 2990 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 2991 2992 /* 2993 * Read the PHY interrupt register to make sure 2994 * we clear any pending interrupts. 2995 */ 2996 status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR); 2997 2998 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2999 sk_init_xmac(sc_if); 3000 return; 3001 } 3002 3003 if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) { 3004 int lstat; 3005 lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 3006 BRGPHY_MII_AUXSTS); 3007 3008 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) { 3009 mii_mediachg(mii); 3010 /* Turn off the link LED. */ 3011 SK_IF_WRITE_1(sc_if, 0, 3012 SK_LINKLED1_CTL, SK_LINKLED_OFF); 3013 sc_if->sk_link = 0; 3014 } else if (status & BRGPHY_ISR_LNK_CHG) { 3015 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 3016 BRGPHY_MII_IMR, 0xFF00); 3017 mii_tick(mii); 3018 sc_if->sk_link = 1; 3019 /* Turn on the link LED. */ 3020 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, 3021 SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF| 3022 SK_LINKLED_BLINK_OFF); 3023 } else { 3024 mii_tick(mii); 3025 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if); 3026 } 3027 } 3028 3029 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 3030 3031 return; 3032 } 3033 3034 static void 3035 sk_intr_xmac(sc_if) 3036 struct sk_if_softc *sc_if; 3037 { 3038 struct sk_softc *sc; 3039 u_int16_t status; 3040 3041 sc = sc_if->sk_softc; 3042 status = SK_XM_READ_2(sc_if, XM_ISR); 3043 3044 /* 3045 * Link has gone down. Start MII tick timeout to 3046 * watch for link resync. 3047 */ 3048 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) { 3049 if (status & XM_ISR_GP0_SET) { 3050 SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); 3051 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if); 3052 } 3053 3054 if (status & XM_ISR_AUTONEG_DONE) { 3055 callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if); 3056 } 3057 } 3058 3059 if (status & XM_IMR_TX_UNDERRUN) 3060 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO); 3061 3062 if (status & XM_IMR_RX_OVERRUN) 3063 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO); 3064 3065 status = SK_XM_READ_2(sc_if, XM_ISR); 3066 3067 return; 3068 } 3069 3070 static void 3071 sk_intr_yukon(sc_if) 3072 struct sk_if_softc *sc_if; 3073 { 3074 u_int8_t status; 3075 3076 status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR); 3077 /* RX overrun */ 3078 if ((status & SK_GMAC_INT_RX_OVER) != 0) { 3079 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, 3080 SK_RFCTL_RX_FIFO_OVER); 3081 } 3082 /* TX underrun */ 3083 if ((status & SK_GMAC_INT_TX_UNDER) != 0) { 3084 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, 3085 SK_TFCTL_TX_FIFO_UNDER); 3086 } 3087 } 3088 3089 static void 3090 sk_intr(xsc) 3091 void *xsc; 3092 { 3093 struct sk_softc *sc = xsc; 3094 struct sk_if_softc *sc_if0, *sc_if1; 3095 struct ifnet *ifp0 = NULL, *ifp1 = NULL; 3096 u_int32_t status; 3097 3098 SK_LOCK(sc); 3099 3100 status = CSR_READ_4(sc, SK_ISSR); 3101 if (status == 0 || status == 0xffffffff || sc->sk_suspended) 3102 goto done_locked; 3103 3104 sc_if0 = sc->sk_if[SK_PORT_A]; 3105 sc_if1 = sc->sk_if[SK_PORT_B]; 3106 3107 if (sc_if0 != NULL) 3108 ifp0 = sc_if0->sk_ifp; 3109 if (sc_if1 != NULL) 3110 ifp1 = sc_if1->sk_ifp; 3111 3112 for (; (status &= sc->sk_intrmask) != 0;) { 3113 /* Handle receive interrupts first. */ 3114 if (status & SK_ISR_RX1_EOF) { 3115 if (ifp0->if_mtu > SK_MAX_FRAMELEN) 3116 sk_jumbo_rxeof(sc_if0); 3117 else 3118 sk_rxeof(sc_if0); 3119 CSR_WRITE_4(sc, SK_BMU_RX_CSR0, 3120 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); 3121 } 3122 if (status & SK_ISR_RX2_EOF) { 3123 if (ifp1->if_mtu > SK_MAX_FRAMELEN) 3124 sk_jumbo_rxeof(sc_if1); 3125 else 3126 sk_rxeof(sc_if1); 3127 CSR_WRITE_4(sc, SK_BMU_RX_CSR1, 3128 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); 3129 } 3130 3131 /* Then transmit interrupts. */ 3132 if (status & SK_ISR_TX1_S_EOF) { 3133 sk_txeof(sc_if0); 3134 CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, SK_TXBMU_CLR_IRQ_EOF); 3135 } 3136 if (status & SK_ISR_TX2_S_EOF) { 3137 sk_txeof(sc_if1); 3138 CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, SK_TXBMU_CLR_IRQ_EOF); 3139 } 3140 3141 /* Then MAC interrupts. */ 3142 if (status & SK_ISR_MAC1 && 3143 ifp0->if_drv_flags & IFF_DRV_RUNNING) { 3144 if (sc->sk_type == SK_GENESIS) 3145 sk_intr_xmac(sc_if0); 3146 else 3147 sk_intr_yukon(sc_if0); 3148 } 3149 3150 if (status & SK_ISR_MAC2 && 3151 ifp1->if_drv_flags & IFF_DRV_RUNNING) { 3152 if (sc->sk_type == SK_GENESIS) 3153 sk_intr_xmac(sc_if1); 3154 else 3155 sk_intr_yukon(sc_if1); 3156 } 3157 3158 if (status & SK_ISR_EXTERNAL_REG) { 3159 if (ifp0 != NULL && 3160 sc_if0->sk_phytype == SK_PHYTYPE_BCOM) 3161 sk_intr_bcom(sc_if0); 3162 if (ifp1 != NULL && 3163 sc_if1->sk_phytype == SK_PHYTYPE_BCOM) 3164 sk_intr_bcom(sc_if1); 3165 } 3166 status = CSR_READ_4(sc, SK_ISSR); 3167 } 3168 3169 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 3170 3171 if (ifp0 != NULL && !IFQ_DRV_IS_EMPTY(&ifp0->if_snd)) 3172 sk_start_locked(ifp0); 3173 if (ifp1 != NULL && !IFQ_DRV_IS_EMPTY(&ifp1->if_snd)) 3174 sk_start_locked(ifp1); 3175 3176 done_locked: 3177 SK_UNLOCK(sc); 3178 } 3179 3180 static void 3181 sk_init_xmac(sc_if) 3182 struct sk_if_softc *sc_if; 3183 { 3184 struct sk_softc *sc; 3185 struct ifnet *ifp; 3186 u_int16_t eaddr[(ETHER_ADDR_LEN+1)/2]; 3187 static const struct sk_bcom_hack bhack[] = { 3188 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 }, 3189 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 }, 3190 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, 3191 { 0, 0 } }; 3192 3193 SK_IF_LOCK_ASSERT(sc_if); 3194 3195 sc = sc_if->sk_softc; 3196 ifp = sc_if->sk_ifp; 3197 3198 /* Unreset the XMAC. */ 3199 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET); 3200 DELAY(1000); 3201 3202 /* Reset the XMAC's internal state. */ 3203 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); 3204 3205 /* Save the XMAC II revision */ 3206 sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID)); 3207 3208 /* 3209 * Perform additional initialization for external PHYs, 3210 * namely for the 1000baseTX cards that use the XMAC's 3211 * GMII mode. 3212 */ 3213 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 3214 int i = 0; 3215 u_int32_t val; 3216 3217 /* Take PHY out of reset. */ 3218 val = sk_win_read_4(sc, SK_GPIO); 3219 if (sc_if->sk_port == SK_PORT_A) 3220 val |= SK_GPIO_DIR0|SK_GPIO_DAT0; 3221 else 3222 val |= SK_GPIO_DIR2|SK_GPIO_DAT2; 3223 sk_win_write_4(sc, SK_GPIO, val); 3224 3225 /* Enable GMII mode on the XMAC. */ 3226 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE); 3227 3228 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 3229 BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET); 3230 DELAY(10000); 3231 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 3232 BRGPHY_MII_IMR, 0xFFF0); 3233 3234 /* 3235 * Early versions of the BCM5400 apparently have 3236 * a bug that requires them to have their reserved 3237 * registers initialized to some magic values. I don't 3238 * know what the numbers do, I'm just the messenger. 3239 */ 3240 if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03) 3241 == 0x6041) { 3242 while(bhack[i].reg) { 3243 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 3244 bhack[i].reg, bhack[i].val); 3245 i++; 3246 } 3247 } 3248 } 3249 3250 /* Set station address */ 3251 bcopy(IF_LLADDR(sc_if->sk_ifp), eaddr, ETHER_ADDR_LEN); 3252 SK_XM_WRITE_2(sc_if, XM_PAR0, eaddr[0]); 3253 SK_XM_WRITE_2(sc_if, XM_PAR1, eaddr[1]); 3254 SK_XM_WRITE_2(sc_if, XM_PAR2, eaddr[2]); 3255 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION); 3256 3257 if (ifp->if_flags & IFF_BROADCAST) { 3258 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); 3259 } else { 3260 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); 3261 } 3262 3263 /* We don't need the FCS appended to the packet. */ 3264 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS); 3265 3266 /* We want short frames padded to 60 bytes. */ 3267 SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD); 3268 3269 /* 3270 * Enable the reception of all error frames. This is is 3271 * a necessary evil due to the design of the XMAC. The 3272 * XMAC's receive FIFO is only 8K in size, however jumbo 3273 * frames can be up to 9000 bytes in length. When bad 3274 * frame filtering is enabled, the XMAC's RX FIFO operates 3275 * in 'store and forward' mode. For this to work, the 3276 * entire frame has to fit into the FIFO, but that means 3277 * that jumbo frames larger than 8192 bytes will be 3278 * truncated. Disabling all bad frame filtering causes 3279 * the RX FIFO to operate in streaming mode, in which 3280 * case the XMAC will start transferring frames out of the 3281 * RX FIFO as soon as the FIFO threshold is reached. 3282 */ 3283 if (ifp->if_mtu > SK_MAX_FRAMELEN) { 3284 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES| 3285 XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS| 3286 XM_MODE_RX_INRANGELEN); 3287 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); 3288 } else 3289 SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); 3290 3291 /* 3292 * Bump up the transmit threshold. This helps hold off transmit 3293 * underruns when we're blasting traffic from both ports at once. 3294 */ 3295 SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH); 3296 3297 /* Set Rx filter */ 3298 sk_rxfilter_genesis(sc_if); 3299 3300 /* Clear and enable interrupts */ 3301 SK_XM_READ_2(sc_if, XM_ISR); 3302 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) 3303 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS); 3304 else 3305 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); 3306 3307 /* Configure MAC arbiter */ 3308 switch(sc_if->sk_xmac_rev) { 3309 case XM_XMAC_REV_B2: 3310 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2); 3311 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2); 3312 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2); 3313 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2); 3314 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2); 3315 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2); 3316 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2); 3317 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2); 3318 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); 3319 break; 3320 case XM_XMAC_REV_C1: 3321 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1); 3322 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1); 3323 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1); 3324 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1); 3325 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1); 3326 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1); 3327 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1); 3328 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1); 3329 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); 3330 break; 3331 default: 3332 break; 3333 } 3334 sk_win_write_2(sc, SK_MACARB_CTL, 3335 SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF); 3336 3337 sc_if->sk_link = 1; 3338 3339 return; 3340 } 3341 3342 static void 3343 sk_init_yukon(sc_if) 3344 struct sk_if_softc *sc_if; 3345 { 3346 u_int32_t phy, v; 3347 u_int16_t reg; 3348 struct sk_softc *sc; 3349 struct ifnet *ifp; 3350 u_int8_t *eaddr; 3351 int i; 3352 3353 SK_IF_LOCK_ASSERT(sc_if); 3354 3355 sc = sc_if->sk_softc; 3356 ifp = sc_if->sk_ifp; 3357 3358 if (sc->sk_type == SK_YUKON_LITE && 3359 sc->sk_rev >= SK_YUKON_LITE_REV_A3) { 3360 /* 3361 * Workaround code for COMA mode, set PHY reset. 3362 * Otherwise it will not correctly take chip out of 3363 * powerdown (coma) 3364 */ 3365 v = sk_win_read_4(sc, SK_GPIO); 3366 v |= SK_GPIO_DIR9 | SK_GPIO_DAT9; 3367 sk_win_write_4(sc, SK_GPIO, v); 3368 } 3369 3370 /* GMAC and GPHY Reset */ 3371 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET); 3372 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); 3373 DELAY(1000); 3374 3375 if (sc->sk_type == SK_YUKON_LITE && 3376 sc->sk_rev >= SK_YUKON_LITE_REV_A3) { 3377 /* 3378 * Workaround code for COMA mode, clear PHY reset 3379 */ 3380 v = sk_win_read_4(sc, SK_GPIO); 3381 v |= SK_GPIO_DIR9; 3382 v &= ~SK_GPIO_DAT9; 3383 sk_win_write_4(sc, SK_GPIO, v); 3384 } 3385 3386 phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP | 3387 SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE; 3388 3389 if (sc->sk_coppertype) 3390 phy |= SK_GPHY_COPPER; 3391 else 3392 phy |= SK_GPHY_FIBER; 3393 3394 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET); 3395 DELAY(1000); 3396 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR); 3397 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF | 3398 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR); 3399 3400 /* unused read of the interrupt source register */ 3401 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); 3402 3403 reg = SK_YU_READ_2(sc_if, YUKON_PAR); 3404 3405 /* MIB Counter Clear Mode set */ 3406 reg |= YU_PAR_MIB_CLR; 3407 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 3408 3409 /* MIB Counter Clear Mode clear */ 3410 reg &= ~YU_PAR_MIB_CLR; 3411 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 3412 3413 /* receive control reg */ 3414 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR); 3415 3416 /* transmit parameter register */ 3417 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) | 3418 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) ); 3419 3420 /* serial mode register */ 3421 reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e); 3422 if (ifp->if_mtu > SK_MAX_FRAMELEN) 3423 reg |= YU_SMR_MFL_JUMBO; 3424 SK_YU_WRITE_2(sc_if, YUKON_SMR, reg); 3425 3426 /* Setup Yukon's station address */ 3427 eaddr = IF_LLADDR(sc_if->sk_ifp); 3428 for (i = 0; i < 3; i++) 3429 SK_YU_WRITE_2(sc_if, SK_MAC0_0 + i * 4, 3430 eaddr[i * 2] | eaddr[i * 2 + 1] << 8); 3431 /* Set GMAC source address of flow control. */ 3432 for (i = 0; i < 3; i++) 3433 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4, 3434 eaddr[i * 2] | eaddr[i * 2 + 1] << 8); 3435 /* Set GMAC virtual address. */ 3436 for (i = 0; i < 3; i++) 3437 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, 3438 eaddr[i * 2] | eaddr[i * 2 + 1] << 8); 3439 3440 /* Set Rx filter */ 3441 sk_rxfilter_yukon(sc_if); 3442 3443 /* enable interrupt mask for counter overflows */ 3444 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0); 3445 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0); 3446 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0); 3447 3448 /* Configure RX MAC FIFO Flush Mask */ 3449 v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR | 3450 YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT | 3451 YU_RXSTAT_JABBER; 3452 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v); 3453 3454 /* Disable RX MAC FIFO Flush for YUKON-Lite Rev. A0 only */ 3455 if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev == SK_YUKON_LITE_REV_A0) 3456 v = SK_TFCTL_OPERATION_ON; 3457 else 3458 v = SK_TFCTL_OPERATION_ON | SK_RFCTL_FIFO_FLUSH_ON; 3459 /* Configure RX MAC FIFO */ 3460 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR); 3461 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, v); 3462 3463 /* Increase flush threshould to 64 bytes */ 3464 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD, 3465 SK_RFCTL_FIFO_THRESHOLD + 1); 3466 3467 /* Configure TX MAC FIFO */ 3468 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR); 3469 SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON); 3470 } 3471 3472 /* 3473 * Note that to properly initialize any part of the GEnesis chip, 3474 * you first have to take it out of reset mode. 3475 */ 3476 static void 3477 sk_init(xsc) 3478 void *xsc; 3479 { 3480 struct sk_if_softc *sc_if = xsc; 3481 3482 SK_IF_LOCK(sc_if); 3483 sk_init_locked(sc_if); 3484 SK_IF_UNLOCK(sc_if); 3485 3486 return; 3487 } 3488 3489 static void 3490 sk_init_locked(sc_if) 3491 struct sk_if_softc *sc_if; 3492 { 3493 struct sk_softc *sc; 3494 struct ifnet *ifp; 3495 struct mii_data *mii; 3496 u_int16_t reg; 3497 u_int32_t imr; 3498 int error; 3499 3500 SK_IF_LOCK_ASSERT(sc_if); 3501 3502 ifp = sc_if->sk_ifp; 3503 sc = sc_if->sk_softc; 3504 mii = device_get_softc(sc_if->sk_miibus); 3505 3506 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3507 return; 3508 3509 /* Cancel pending I/O and free all RX/TX buffers. */ 3510 sk_stop(sc_if); 3511 3512 if (sc->sk_type == SK_GENESIS) { 3513 /* Configure LINK_SYNC LED */ 3514 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON); 3515 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, 3516 SK_LINKLED_LINKSYNC_ON); 3517 3518 /* Configure RX LED */ 3519 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, 3520 SK_RXLEDCTL_COUNTER_START); 3521 3522 /* Configure TX LED */ 3523 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, 3524 SK_TXLEDCTL_COUNTER_START); 3525 } 3526 3527 /* 3528 * Configure descriptor poll timer 3529 * 3530 * SK-NET GENESIS data sheet says that possibility of losing Start 3531 * transmit command due to CPU/cache related interim storage problems 3532 * under certain conditions. The document recommends a polling 3533 * mechanism to send a Start transmit command to initiate transfer 3534 * of ready descriptors regulary. To cope with this issue sk(4) now 3535 * enables descriptor poll timer to initiate descriptor processing 3536 * periodically as defined by SK_DPT_TIMER_MAX. However sk(4) still 3537 * issue SK_TXBMU_TX_START to Tx BMU to get fast execution of Tx 3538 * command instead of waiting for next descriptor polling time. 3539 * The same rule may apply to Rx side too but it seems that is not 3540 * needed at the moment. 3541 * Since sk(4) uses descriptor polling as a last resort there is no 3542 * need to set smaller polling time than maximum allowable one. 3543 */ 3544 SK_IF_WRITE_4(sc_if, 0, SK_DPT_INIT, SK_DPT_TIMER_MAX); 3545 3546 /* Configure I2C registers */ 3547 3548 /* Configure XMAC(s) */ 3549 switch (sc->sk_type) { 3550 case SK_GENESIS: 3551 sk_init_xmac(sc_if); 3552 break; 3553 case SK_YUKON: 3554 case SK_YUKON_LITE: 3555 case SK_YUKON_LP: 3556 sk_init_yukon(sc_if); 3557 break; 3558 } 3559 mii_mediachg(mii); 3560 3561 if (sc->sk_type == SK_GENESIS) { 3562 /* Configure MAC FIFOs */ 3563 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET); 3564 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END); 3565 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON); 3566 3567 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET); 3568 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END); 3569 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON); 3570 } 3571 3572 /* Configure transmit arbiter(s) */ 3573 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, 3574 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON); 3575 3576 /* Configure RAMbuffers */ 3577 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET); 3578 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart); 3579 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart); 3580 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart); 3581 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend); 3582 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON); 3583 3584 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET); 3585 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON); 3586 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart); 3587 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart); 3588 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart); 3589 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend); 3590 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON); 3591 3592 /* Configure BMUs */ 3593 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE); 3594 if (ifp->if_mtu > SK_MAX_FRAMELEN) { 3595 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO, 3596 SK_ADDR_LO(SK_JUMBO_RX_RING_ADDR(sc_if, 0))); 3597 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 3598 SK_ADDR_HI(SK_JUMBO_RX_RING_ADDR(sc_if, 0))); 3599 } else { 3600 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO, 3601 SK_ADDR_LO(SK_RX_RING_ADDR(sc_if, 0))); 3602 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 3603 SK_ADDR_HI(SK_RX_RING_ADDR(sc_if, 0))); 3604 } 3605 3606 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE); 3607 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO, 3608 SK_ADDR_LO(SK_TX_RING_ADDR(sc_if, 0))); 3609 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 3610 SK_ADDR_HI(SK_TX_RING_ADDR(sc_if, 0))); 3611 3612 /* Init descriptors */ 3613 if (ifp->if_mtu > SK_MAX_FRAMELEN) 3614 error = sk_init_jumbo_rx_ring(sc_if); 3615 else 3616 error = sk_init_rx_ring(sc_if); 3617 if (error != 0) { 3618 device_printf(sc_if->sk_if_dev, 3619 "initialization failed: no memory for rx buffers\n"); 3620 sk_stop(sc_if); 3621 return; 3622 } 3623 sk_init_tx_ring(sc_if); 3624 3625 /* Set interrupt moderation if changed via sysctl. */ 3626 imr = sk_win_read_4(sc, SK_IMTIMERINIT); 3627 if (imr != SK_IM_USECS(sc->sk_int_mod, sc->sk_int_ticks)) { 3628 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod, 3629 sc->sk_int_ticks)); 3630 if (bootverbose) 3631 device_printf(sc_if->sk_if_dev, 3632 "interrupt moderation is %d us.\n", 3633 sc->sk_int_mod); 3634 } 3635 3636 /* Configure interrupt handling */ 3637 CSR_READ_4(sc, SK_ISSR); 3638 if (sc_if->sk_port == SK_PORT_A) 3639 sc->sk_intrmask |= SK_INTRS1; 3640 else 3641 sc->sk_intrmask |= SK_INTRS2; 3642 3643 sc->sk_intrmask |= SK_ISR_EXTERNAL_REG; 3644 3645 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 3646 3647 /* Start BMUs. */ 3648 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START); 3649 3650 switch(sc->sk_type) { 3651 case SK_GENESIS: 3652 /* Enable XMACs TX and RX state machines */ 3653 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE); 3654 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 3655 break; 3656 case SK_YUKON: 3657 case SK_YUKON_LITE: 3658 case SK_YUKON_LP: 3659 reg = SK_YU_READ_2(sc_if, YUKON_GPCR); 3660 reg |= YU_GPCR_TXEN | YU_GPCR_RXEN; 3661 #if 0 3662 /* XXX disable 100Mbps and full duplex mode? */ 3663 reg &= ~(YU_GPCR_SPEED | YU_GPCR_DPLX_DIS); 3664 #endif 3665 SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg); 3666 } 3667 3668 /* Activate descriptor polling timer */ 3669 SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_START); 3670 /* start transfer of Tx descriptors */ 3671 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); 3672 3673 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3674 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3675 3676 switch (sc->sk_type) { 3677 case SK_YUKON: 3678 case SK_YUKON_LITE: 3679 case SK_YUKON_LP: 3680 callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if); 3681 break; 3682 } 3683 3684 callout_reset(&sc_if->sk_watchdog_ch, hz, sk_watchdog, ifp); 3685 3686 return; 3687 } 3688 3689 static void 3690 sk_stop(sc_if) 3691 struct sk_if_softc *sc_if; 3692 { 3693 int i; 3694 struct sk_softc *sc; 3695 struct sk_txdesc *txd; 3696 struct sk_rxdesc *rxd; 3697 struct sk_rxdesc *jrxd; 3698 struct ifnet *ifp; 3699 u_int32_t val; 3700 3701 SK_IF_LOCK_ASSERT(sc_if); 3702 sc = sc_if->sk_softc; 3703 ifp = sc_if->sk_ifp; 3704 3705 callout_stop(&sc_if->sk_tick_ch); 3706 callout_stop(&sc_if->sk_watchdog_ch); 3707 3708 /* stop Tx descriptor polling timer */ 3709 SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP); 3710 /* stop transfer of Tx descriptors */ 3711 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_STOP); 3712 for (i = 0; i < SK_TIMEOUT; i++) { 3713 val = CSR_READ_4(sc, sc_if->sk_tx_bmu); 3714 if ((val & SK_TXBMU_TX_STOP) == 0) 3715 break; 3716 DELAY(1); 3717 } 3718 if (i == SK_TIMEOUT) 3719 device_printf(sc_if->sk_if_dev, 3720 "can not stop transfer of Tx descriptor\n"); 3721 /* stop transfer of Rx descriptors */ 3722 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_STOP); 3723 for (i = 0; i < SK_TIMEOUT; i++) { 3724 val = SK_IF_READ_4(sc_if, 0, SK_RXQ1_BMU_CSR); 3725 if ((val & SK_RXBMU_RX_STOP) == 0) 3726 break; 3727 DELAY(1); 3728 } 3729 if (i == SK_TIMEOUT) 3730 device_printf(sc_if->sk_if_dev, 3731 "can not stop transfer of Rx descriptor\n"); 3732 3733 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 3734 /* Put PHY back into reset. */ 3735 val = sk_win_read_4(sc, SK_GPIO); 3736 if (sc_if->sk_port == SK_PORT_A) { 3737 val |= SK_GPIO_DIR0; 3738 val &= ~SK_GPIO_DAT0; 3739 } else { 3740 val |= SK_GPIO_DIR2; 3741 val &= ~SK_GPIO_DAT2; 3742 } 3743 sk_win_write_4(sc, SK_GPIO, val); 3744 } 3745 3746 /* Turn off various components of this interface. */ 3747 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); 3748 switch (sc->sk_type) { 3749 case SK_GENESIS: 3750 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET); 3751 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET); 3752 break; 3753 case SK_YUKON: 3754 case SK_YUKON_LITE: 3755 case SK_YUKON_LP: 3756 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET); 3757 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET); 3758 break; 3759 } 3760 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE); 3761 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 3762 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE); 3763 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 3764 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF); 3765 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 3766 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 3767 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF); 3768 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF); 3769 3770 /* Disable interrupts */ 3771 if (sc_if->sk_port == SK_PORT_A) 3772 sc->sk_intrmask &= ~SK_INTRS1; 3773 else 3774 sc->sk_intrmask &= ~SK_INTRS2; 3775 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 3776 3777 SK_XM_READ_2(sc_if, XM_ISR); 3778 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); 3779 3780 /* Free RX and TX mbufs still in the queues. */ 3781 for (i = 0; i < SK_RX_RING_CNT; i++) { 3782 rxd = &sc_if->sk_cdata.sk_rxdesc[i]; 3783 if (rxd->rx_m != NULL) { 3784 bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, 3785 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3786 bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag, 3787 rxd->rx_dmamap); 3788 m_freem(rxd->rx_m); 3789 rxd->rx_m = NULL; 3790 } 3791 } 3792 for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) { 3793 jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i]; 3794 if (jrxd->rx_m != NULL) { 3795 bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, 3796 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3797 bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag, 3798 jrxd->rx_dmamap); 3799 m_freem(jrxd->rx_m); 3800 jrxd->rx_m = NULL; 3801 } 3802 } 3803 for (i = 0; i < SK_TX_RING_CNT; i++) { 3804 txd = &sc_if->sk_cdata.sk_txdesc[i]; 3805 if (txd->tx_m != NULL) { 3806 bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, 3807 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 3808 bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, 3809 txd->tx_dmamap); 3810 m_freem(txd->tx_m); 3811 txd->tx_m = NULL; 3812 } 3813 } 3814 3815 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE); 3816 3817 return; 3818 } 3819 3820 static int 3821 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 3822 { 3823 int error, value; 3824 3825 if (!arg1) 3826 return (EINVAL); 3827 value = *(int *)arg1; 3828 error = sysctl_handle_int(oidp, &value, 0, req); 3829 if (error || !req->newptr) 3830 return (error); 3831 if (value < low || value > high) 3832 return (EINVAL); 3833 *(int *)arg1 = value; 3834 return (0); 3835 } 3836 3837 static int 3838 sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS) 3839 { 3840 return (sysctl_int_range(oidp, arg1, arg2, req, SK_IM_MIN, SK_IM_MAX)); 3841 } 3842