1 /* 2 * Copyright (C) 2001 Eduardo Horvath. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp 27 * 28 * $FreeBSD$ 29 */ 30 31 /* 32 * Driver for Sun GEM ethernet controllers. 33 */ 34 35 #define GEM_DEBUG 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/bus.h> 40 #include <sys/callout.h> 41 #include <sys/endian.h> 42 #include <sys/mbuf.h> 43 #include <sys/malloc.h> 44 #include <sys/kernel.h> 45 #include <sys/socket.h> 46 #include <sys/sockio.h> 47 48 #include <net/ethernet.h> 49 #include <net/if.h> 50 #include <net/if_arp.h> 51 #include <net/if_dl.h> 52 #include <net/if_media.h> 53 54 #include <machine/bus.h> 55 56 #include <dev/mii/mii.h> 57 #include <dev/mii/miivar.h> 58 59 #include <gem/if_gemreg.h> 60 #include <gem/if_gemvar.h> 61 62 #define TRIES 10000 63 64 static void gem_start(struct ifnet *); 65 static void gem_stop(struct ifnet *, int); 66 static int gem_ioctl(struct ifnet *, u_long, caddr_t); 67 static void gem_cddma_callback(void *, bus_dma_segment_t *, int, int); 68 static void gem_rxdma_callback(void *, bus_dma_segment_t *, int, int); 69 static void gem_txdma_callback(void *, bus_dma_segment_t *, int, int); 70 static void gem_tick(void *); 71 static void gem_watchdog(struct ifnet *); 72 static void gem_init(void *); 73 static void gem_init_regs(struct gem_softc *sc); 74 static int gem_ringsize(int sz); 75 static int gem_meminit(struct gem_softc *); 76 static int gem_dmamap_load_mbuf(struct gem_softc *, struct mbuf *, 77 bus_dmamap_callback_t *, struct gem_txjob *, int); 78 static void gem_dmamap_unload_mbuf(struct gem_softc *, struct gem_txjob *); 79 static void gem_dmamap_commit_mbuf(struct gem_softc *, struct gem_txjob *); 80 static void gem_mifinit(struct gem_softc *); 81 static int gem_bitwait(struct gem_softc *sc, bus_addr_t r, 82 u_int32_t clr, u_int32_t set); 83 static int gem_reset_rx(struct gem_softc *); 84 static int gem_reset_tx(struct gem_softc *); 85 static int gem_disable_rx(struct gem_softc *); 86 static int gem_disable_tx(struct gem_softc *); 87 static void gem_rxdrain(struct gem_softc *); 88 static int gem_add_rxbuf(struct gem_softc *, int); 89 static void gem_setladrf(struct gem_softc *); 90 91 struct mbuf *gem_get(struct gem_softc *, int, int); 92 static void gem_eint(struct gem_softc *, u_int); 93 static void gem_rint(struct gem_softc *); 94 #if 0 95 static void gem_rint_timeout(void *); 96 #endif 97 static void gem_tint(struct gem_softc *); 98 #ifdef notyet 99 static void gem_power(int, void *); 100 #endif 101 102 devclass_t gem_devclass; 103 DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0); 104 MODULE_DEPEND(gem, miibus, 1, 1, 1); 105 106 #ifdef GEM_DEBUG 107 #define DPRINTF(sc, x) if ((sc)->sc_arpcom.ac_if.if_flags & IFF_DEBUG) \ 108 printf x 109 #include <sys/ktr.h> 110 #define KTR_GEM KTR_CT2 111 #else 112 #define DPRINTF(sc, x) /* nothing */ 113 #endif 114 115 #define GEM_NSEGS GEM_NTXSEGS 116 117 /* 118 * gem_attach: 119 * 120 * Attach a Gem interface to the system. 121 */ 122 int 123 gem_attach(sc) 124 struct gem_softc *sc; 125 { 126 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 127 struct mii_softc *child; 128 int i, error; 129 u_int32_t v; 130 131 /* Make sure the chip is stopped. */ 132 ifp->if_softc = sc; 133 gem_reset(sc); 134 135 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, 136 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, GEM_NSEGS, 137 BUS_SPACE_MAXSIZE_32BIT, 0, &sc->sc_pdmatag); 138 if (error) 139 return (error); 140 141 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 142 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MAXBSIZE, 143 GEM_NSEGS, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, 144 &sc->sc_dmatag); 145 if (error) 146 goto fail_0; 147 148 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, 149 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 150 sizeof(struct gem_control_data), 1, 151 sizeof(struct gem_control_data), BUS_DMA_ALLOCNOW, 152 &sc->sc_cdmatag); 153 if (error) 154 goto fail_1; 155 156 /* 157 * Allocate the control data structures, and create and load the 158 * DMA map for it. 159 */ 160 if ((error = bus_dmamem_alloc(sc->sc_cdmatag, 161 (void **)&sc->sc_control_data, 0, &sc->sc_cddmamap))) { 162 device_printf(sc->sc_dev, "unable to allocate control data," 163 " error = %d\n", error); 164 goto fail_2; 165 } 166 167 sc->sc_cddma = 0; 168 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, 169 sc->sc_control_data, sizeof(struct gem_control_data), 170 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { 171 device_printf(sc->sc_dev, "unable to load control data DMA " 172 "map, error = %d\n", error); 173 goto fail_3; 174 } 175 176 /* 177 * Initialize the transmit job descriptors. 178 */ 179 STAILQ_INIT(&sc->sc_txfreeq); 180 STAILQ_INIT(&sc->sc_txdirtyq); 181 182 /* 183 * Create the transmit buffer DMA maps. 184 */ 185 error = ENOMEM; 186 for (i = 0; i < GEM_TXQUEUELEN; i++) { 187 struct gem_txsoft *txs; 188 189 txs = &sc->sc_txsoft[i]; 190 txs->txs_mbuf = NULL; 191 txs->txs_ndescs = 0; 192 if ((error = bus_dmamap_create(sc->sc_dmatag, 0, 193 &txs->txs_dmamap)) != 0) { 194 device_printf(sc->sc_dev, "unable to create tx DMA map " 195 "%d, error = %d\n", i, error); 196 goto fail_4; 197 } 198 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 199 } 200 201 /* 202 * Create the receive buffer DMA maps. 203 */ 204 for (i = 0; i < GEM_NRXDESC; i++) { 205 if ((error = bus_dmamap_create(sc->sc_dmatag, 0, 206 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 207 device_printf(sc->sc_dev, "unable to create rx DMA map " 208 "%d, error = %d\n", i, error); 209 goto fail_5; 210 } 211 sc->sc_rxsoft[i].rxs_mbuf = NULL; 212 } 213 214 215 gem_mifinit(sc); 216 217 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, gem_mediachange, 218 gem_mediastatus)) != 0) { 219 device_printf(sc->sc_dev, "phy probe failed: %d\n", error); 220 goto fail_5; 221 } 222 sc->sc_mii = device_get_softc(sc->sc_miibus); 223 224 /* 225 * From this point forward, the attachment cannot fail. A failure 226 * before this point releases all resources that may have been 227 * allocated. 228 */ 229 230 /* Announce ourselves. */ 231 device_printf(sc->sc_dev, "Ethernet address:"); 232 for (i = 0; i < 6; i++) 233 printf("%c%02x", i > 0 ? ':' : ' ', sc->sc_arpcom.ac_enaddr[i]); 234 235 /* Get RX FIFO size */ 236 sc->sc_rxfifosize = 64 * 237 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_FIFO_SIZE); 238 printf(", %uKB RX fifo", sc->sc_rxfifosize / 1024); 239 240 /* Get TX FIFO size */ 241 v = bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_FIFO_SIZE); 242 printf(", %uKB TX fifo\n", v / 16); 243 244 /* Initialize ifnet structure. */ 245 ifp->if_softc = sc; 246 ifp->if_unit = device_get_unit(sc->sc_dev); 247 ifp->if_name = "gem"; 248 ifp->if_mtu = ETHERMTU; 249 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 250 ifp->if_start = gem_start; 251 ifp->if_ioctl = gem_ioctl; 252 ifp->if_watchdog = gem_watchdog; 253 ifp->if_init = gem_init; 254 ifp->if_output = ether_output; 255 ifp->if_snd.ifq_maxlen = GEM_TXQUEUELEN; 256 /* 257 * Walk along the list of attached MII devices and 258 * establish an `MII instance' to `phy number' 259 * mapping. We'll use this mapping in media change 260 * requests to determine which phy to use to program 261 * the MIF configuration register. 262 */ 263 for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL; 264 child = LIST_NEXT(child, mii_list)) { 265 /* 266 * Note: we support just two PHYs: the built-in 267 * internal device and an external on the MII 268 * connector. 269 */ 270 if (child->mii_phy > 1 || child->mii_inst > 1) { 271 device_printf(sc->sc_dev, "cannot accomodate " 272 "MII device %s at phy %d, instance %d\n", 273 device_get_name(child->mii_dev), 274 child->mii_phy, child->mii_inst); 275 continue; 276 } 277 278 sc->sc_phys[child->mii_inst] = child->mii_phy; 279 } 280 281 /* 282 * Now select and activate the PHY we will use. 283 * 284 * The order of preference is External (MDI1), 285 * Internal (MDI0), Serial Link (no MII). 286 */ 287 if (sc->sc_phys[1]) { 288 #ifdef GEM_DEBUG 289 printf("using external phy\n"); 290 #endif 291 sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL; 292 } else { 293 #ifdef GEM_DEBUG 294 printf("using internal phy\n"); 295 #endif 296 sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL; 297 } 298 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_MIF_CONFIG, 299 sc->sc_mif_config); 300 /* Attach the interface. */ 301 ether_ifattach(ifp, ETHER_BPF_SUPPORTED); 302 303 #if notyet 304 /* 305 * Add a suspend hook to make sure we come back up after a 306 * resume. 307 */ 308 sc->sc_powerhook = powerhook_establish(gem_power, sc); 309 if (sc->sc_powerhook == NULL) 310 device_printf(sc->sc_dev, "WARNING: unable to establish power " 311 "hook\n"); 312 #endif 313 314 callout_init(&sc->sc_tick_ch, 0); 315 callout_init(&sc->sc_rx_ch, 0); 316 return (0); 317 318 /* 319 * Free any resources we've allocated during the failed attach 320 * attempt. Do this in reverse order and fall through. 321 */ 322 fail_5: 323 for (i = 0; i < GEM_NRXDESC; i++) { 324 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 325 bus_dmamap_destroy(sc->sc_dmatag, 326 sc->sc_rxsoft[i].rxs_dmamap); 327 } 328 fail_4: 329 for (i = 0; i < GEM_TXQUEUELEN; i++) { 330 if (sc->sc_txsoft[i].txs_dmamap != NULL) 331 bus_dmamap_destroy(sc->sc_dmatag, 332 sc->sc_txsoft[i].txs_dmamap); 333 } 334 bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap); 335 fail_3: 336 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 337 sc->sc_cddmamap); 338 fail_2: 339 bus_dma_tag_destroy(sc->sc_cdmatag); 340 fail_1: 341 bus_dma_tag_destroy(sc->sc_dmatag); 342 fail_0: 343 bus_dma_tag_destroy(sc->sc_pdmatag); 344 return (error); 345 } 346 347 static void 348 gem_cddma_callback(xsc, segs, nsegs, error) 349 void *xsc; 350 bus_dma_segment_t *segs; 351 int nsegs; 352 int error; 353 { 354 struct gem_softc *sc = (struct gem_softc *)xsc; 355 356 if (error != 0) 357 return; 358 if (nsegs != 1) { 359 /* can't happen... */ 360 panic("gem_cddma_callback: bad control buffer segment count"); 361 } 362 sc->sc_cddma = segs[0].ds_addr; 363 } 364 365 static void 366 gem_rxdma_callback(xsc, segs, nsegs, error) 367 void *xsc; 368 bus_dma_segment_t *segs; 369 int nsegs; 370 int error; 371 { 372 struct gem_rxsoft *rxs = (struct gem_rxsoft *)xsc; 373 374 if (error != 0) 375 return; 376 if (nsegs != 1) { 377 /* can't happen... */ 378 panic("gem_rxdma_callback: bad control buffer segment count"); 379 } 380 rxs->rxs_paddr = segs[0].ds_addr; 381 } 382 383 /* 384 * This is called multiple times in our version of dmamap_load_mbuf, but should 385 * be fit for a generic version that only calls it once. 386 */ 387 static void 388 gem_txdma_callback(xsc, segs, nsegs, error) 389 void *xsc; 390 bus_dma_segment_t *segs; 391 int nsegs; 392 int error; 393 { 394 struct gem_txdma *tx = (struct gem_txdma *)xsc; 395 int seg; 396 397 tx->txd_error = error; 398 if (error != 0) 399 return; 400 tx->txd_nsegs = nsegs; 401 402 /* 403 * Initialize the transmit descriptors. 404 */ 405 for (seg = 0; seg < nsegs; 406 seg++, tx->txd_nexttx = GEM_NEXTTX(tx->txd_nexttx)) { 407 uint64_t flags; 408 409 DPRINTF(tx->txd_sc, ("txdma_cb: mapping seg %d (txd %d), len " 410 "%lx, addr %#lx (%#lx)\n", seg, tx->txd_nexttx, 411 segs[seg].ds_len, segs[seg].ds_addr, 412 GEM_DMA_WRITE(tx->txd_sc, segs[seg].ds_addr))); 413 CTR5(KTR_GEM, "txdma_cb: mapping seg %d (txd %d), len " 414 "%lx, addr %#lx (%#lx)", seg, tx->txd_nexttx, 415 segs[seg].ds_len, segs[seg].ds_addr, 416 GEM_DMA_WRITE(tx->txd_sc, segs[seg].ds_addr)); 417 /* 418 * If this is the first descriptor we're 419 * enqueueing, set the start of packet flag, 420 * and the checksum stuff if we want the hardware 421 * to do it. 422 */ 423 tx->txd_sc->sc_txdescs[tx->txd_nexttx].gd_addr = 424 GEM_DMA_WRITE(tx->txd_sc, segs[seg].ds_addr); 425 flags = segs[seg].ds_len & GEM_TD_BUFSIZE; 426 if ((tx->txd_flags & GTXD_FIRST) != 0 && seg == 0) { 427 CTR2(KTR_GEM, "txdma_cb: start of packet at seg %d, " 428 "tx %d", seg, tx->txd_nexttx); 429 flags |= GEM_TD_START_OF_PACKET; 430 if (++tx->txd_sc->sc_txwin > GEM_NTXSEGS * 2 / 3) { 431 tx->txd_sc->sc_txwin = 0; 432 flags |= GEM_TD_INTERRUPT_ME; 433 } 434 } 435 if ((tx->txd_flags & GTXD_LAST) != 0 && seg == nsegs - 1) { 436 CTR2(KTR_GEM, "txdma_cb: end of packet at seg %d, " 437 "tx %d", seg, tx->txd_nexttx); 438 flags |= GEM_TD_END_OF_PACKET; 439 } 440 tx->txd_sc->sc_txdescs[tx->txd_nexttx].gd_flags = 441 GEM_DMA_WRITE(tx->txd_sc, flags); 442 tx->txd_lasttx = tx->txd_nexttx; 443 } 444 } 445 446 static void 447 gem_tick(arg) 448 void *arg; 449 { 450 struct gem_softc *sc = arg; 451 int s; 452 453 s = splnet(); 454 mii_tick(sc->sc_mii); 455 splx(s); 456 457 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 458 } 459 460 static int 461 gem_bitwait(sc, r, clr, set) 462 struct gem_softc *sc; 463 bus_addr_t r; 464 u_int32_t clr; 465 u_int32_t set; 466 { 467 int i; 468 u_int32_t reg; 469 470 for (i = TRIES; i--; DELAY(100)) { 471 reg = bus_space_read_4(sc->sc_bustag, sc->sc_h, r); 472 if ((r & clr) == 0 && (r & set) == set) 473 return (1); 474 } 475 return (0); 476 } 477 478 void 479 gem_reset(sc) 480 struct gem_softc *sc; 481 { 482 bus_space_tag_t t = sc->sc_bustag; 483 bus_space_handle_t h = sc->sc_h; 484 int s; 485 486 s = splnet(); 487 DPRINTF(sc, ("%s: gem_reset\n", device_get_name(sc->sc_dev))); 488 CTR1(KTR_GEM, "%s: gem_reset", device_get_name(sc->sc_dev)); 489 gem_reset_rx(sc); 490 gem_reset_tx(sc); 491 492 /* Do a full reset */ 493 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); 494 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 495 device_printf(sc->sc_dev, "cannot reset device\n"); 496 splx(s); 497 } 498 499 500 /* 501 * gem_rxdrain: 502 * 503 * Drain the receive queue. 504 */ 505 static void 506 gem_rxdrain(sc) 507 struct gem_softc *sc; 508 { 509 struct gem_rxsoft *rxs; 510 int i; 511 512 for (i = 0; i < GEM_NRXDESC; i++) { 513 rxs = &sc->sc_rxsoft[i]; 514 if (rxs->rxs_mbuf != NULL) { 515 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap); 516 m_freem(rxs->rxs_mbuf); 517 rxs->rxs_mbuf = NULL; 518 } 519 } 520 } 521 522 /* 523 * Reset the whole thing. 524 */ 525 static void 526 gem_stop(ifp, disable) 527 struct ifnet *ifp; 528 int disable; 529 { 530 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 531 struct gem_txsoft *txs; 532 533 DPRINTF(sc, ("%s: gem_stop\n", device_get_name(sc->sc_dev))); 534 CTR1(KTR_GEM, "%s: gem_stop", device_get_name(sc->sc_dev)); 535 536 callout_stop(&sc->sc_tick_ch); 537 538 /* XXX - Should we reset these instead? */ 539 gem_disable_tx(sc); 540 gem_disable_rx(sc); 541 542 /* 543 * Release any queued transmit buffers. 544 */ 545 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 546 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 547 if (txs->txs_ndescs != 0) { 548 bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap); 549 if (txs->txs_mbuf != NULL) { 550 m_freem(txs->txs_mbuf); 551 txs->txs_mbuf = NULL; 552 } 553 } 554 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 555 } 556 557 if (disable) 558 gem_rxdrain(sc); 559 560 /* 561 * Mark the interface down and cancel the watchdog timer. 562 */ 563 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 564 ifp->if_timer = 0; 565 } 566 567 /* 568 * Reset the receiver 569 */ 570 int 571 gem_reset_rx(sc) 572 struct gem_softc *sc; 573 { 574 bus_space_tag_t t = sc->sc_bustag; 575 bus_space_handle_t h = sc->sc_h; 576 577 /* 578 * Resetting while DMA is in progress can cause a bus hang, so we 579 * disable DMA first. 580 */ 581 gem_disable_rx(sc); 582 bus_space_write_4(t, h, GEM_RX_CONFIG, 0); 583 /* Wait till it finishes */ 584 if (!gem_bitwait(sc, GEM_RX_CONFIG, 1, 0)) 585 device_printf(sc->sc_dev, "cannot disable read dma\n"); 586 587 /* Wait 5ms extra. */ 588 DELAY(5000); 589 590 /* Finally, reset the ERX */ 591 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX); 592 /* Wait till it finishes */ 593 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 594 device_printf(sc->sc_dev, "cannot reset receiver\n"); 595 return (1); 596 } 597 return (0); 598 } 599 600 601 /* 602 * Reset the transmitter 603 */ 604 static int 605 gem_reset_tx(sc) 606 struct gem_softc *sc; 607 { 608 bus_space_tag_t t = sc->sc_bustag; 609 bus_space_handle_t h = sc->sc_h; 610 int i; 611 612 /* 613 * Resetting while DMA is in progress can cause a bus hang, so we 614 * disable DMA first. 615 */ 616 gem_disable_tx(sc); 617 bus_space_write_4(t, h, GEM_TX_CONFIG, 0); 618 /* Wait till it finishes */ 619 if (!gem_bitwait(sc, GEM_TX_CONFIG, 1, 0)) 620 device_printf(sc->sc_dev, "cannot disable read dma\n"); 621 622 /* Wait 5ms extra. */ 623 DELAY(5000); 624 625 /* Finally, reset the ETX */ 626 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_TX); 627 /* Wait till it finishes */ 628 for (i = TRIES; i--; DELAY(100)) 629 if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_TX) == 0) 630 break; 631 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 632 device_printf(sc->sc_dev, "cannot reset receiver\n"); 633 return (1); 634 } 635 return (0); 636 } 637 638 /* 639 * disable receiver. 640 */ 641 static int 642 gem_disable_rx(sc) 643 struct gem_softc *sc; 644 { 645 bus_space_tag_t t = sc->sc_bustag; 646 bus_space_handle_t h = sc->sc_h; 647 u_int32_t cfg; 648 649 /* Flip the enable bit */ 650 cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 651 cfg &= ~GEM_MAC_RX_ENABLE; 652 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg); 653 654 /* Wait for it to finish */ 655 return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)); 656 } 657 658 /* 659 * disable transmitter. 660 */ 661 static int 662 gem_disable_tx(sc) 663 struct gem_softc *sc; 664 { 665 bus_space_tag_t t = sc->sc_bustag; 666 bus_space_handle_t h = sc->sc_h; 667 u_int32_t cfg; 668 669 /* Flip the enable bit */ 670 cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG); 671 cfg &= ~GEM_MAC_TX_ENABLE; 672 bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg); 673 674 /* Wait for it to finish */ 675 return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)); 676 } 677 678 /* 679 * Initialize interface. 680 */ 681 static int 682 gem_meminit(sc) 683 struct gem_softc *sc; 684 { 685 struct gem_rxsoft *rxs; 686 int i, error; 687 688 /* 689 * Initialize the transmit descriptor ring. 690 */ 691 memset((void *)sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 692 for (i = 0; i < GEM_NTXDESC; i++) { 693 sc->sc_txdescs[i].gd_flags = 0; 694 sc->sc_txdescs[i].gd_addr = 0; 695 } 696 GEM_CDTXSYNC(sc, 0, GEM_NTXDESC, 697 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 698 sc->sc_txfree = GEM_NTXDESC-1; 699 sc->sc_txnext = 0; 700 sc->sc_txwin = 0; 701 702 /* 703 * Initialize the receive descriptor and receive job 704 * descriptor rings. 705 */ 706 for (i = 0; i < GEM_NRXDESC; i++) { 707 rxs = &sc->sc_rxsoft[i]; 708 if (rxs->rxs_mbuf == NULL) { 709 if ((error = gem_add_rxbuf(sc, i)) != 0) { 710 device_printf(sc->sc_dev, "unable to " 711 "allocate or map rx buffer %d, error = " 712 "%d\n", i, error); 713 /* 714 * XXX Should attempt to run with fewer receive 715 * XXX buffers instead of just failing. 716 */ 717 gem_rxdrain(sc); 718 return (1); 719 } 720 } else 721 GEM_INIT_RXDESC(sc, i); 722 } 723 sc->sc_rxptr = 0; 724 725 return (0); 726 } 727 728 static int 729 gem_ringsize(sz) 730 int sz; 731 { 732 int v = 0; 733 734 switch (sz) { 735 case 32: 736 v = GEM_RING_SZ_32; 737 break; 738 case 64: 739 v = GEM_RING_SZ_64; 740 break; 741 case 128: 742 v = GEM_RING_SZ_128; 743 break; 744 case 256: 745 v = GEM_RING_SZ_256; 746 break; 747 case 512: 748 v = GEM_RING_SZ_512; 749 break; 750 case 1024: 751 v = GEM_RING_SZ_1024; 752 break; 753 case 2048: 754 v = GEM_RING_SZ_2048; 755 break; 756 case 4096: 757 v = GEM_RING_SZ_4096; 758 break; 759 case 8192: 760 v = GEM_RING_SZ_8192; 761 break; 762 default: 763 printf("gem: invalid Receive Descriptor ring size\n"); 764 break; 765 } 766 return (v); 767 } 768 769 /* 770 * Initialization of interface; set up initialization block 771 * and transmit/receive descriptor rings. 772 */ 773 static void 774 gem_init(xsc) 775 void *xsc; 776 { 777 struct gem_softc *sc = (struct gem_softc *)xsc; 778 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 779 bus_space_tag_t t = sc->sc_bustag; 780 bus_space_handle_t h = sc->sc_h; 781 int s; 782 u_int32_t v; 783 784 s = splnet(); 785 786 DPRINTF(sc, ("%s: gem_init: calling stop\n", device_get_name(sc->sc_dev))); 787 CTR1(KTR_GEM, "%s: gem_init: calling stop", device_get_name(sc->sc_dev)); 788 /* 789 * Initialization sequence. The numbered steps below correspond 790 * to the sequence outlined in section 6.3.5.1 in the Ethernet 791 * Channel Engine manual (part of the PCIO manual). 792 * See also the STP2002-STQ document from Sun Microsystems. 793 */ 794 795 /* step 1 & 2. Reset the Ethernet Channel */ 796 gem_stop(&sc->sc_arpcom.ac_if, 0); 797 gem_reset(sc); 798 DPRINTF(sc, ("%s: gem_init: restarting\n", device_get_name(sc->sc_dev))); 799 CTR1(KTR_GEM, "%s: gem_init: restarting", device_get_name(sc->sc_dev)); 800 801 /* Re-initialize the MIF */ 802 gem_mifinit(sc); 803 804 /* Call MI reset function if any */ 805 if (sc->sc_hwreset) 806 (*sc->sc_hwreset)(sc); 807 808 /* step 3. Setup data structures in host memory */ 809 gem_meminit(sc); 810 811 /* step 4. TX MAC registers & counters */ 812 gem_init_regs(sc); 813 /* XXX: VLAN code from NetBSD temporarily removed. */ 814 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 815 (ETHER_MAX_LEN + sizeof(struct ether_header)) | (0x2000<<16)); 816 817 /* step 5. RX MAC registers & counters */ 818 gem_setladrf(sc); 819 820 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 821 /* NOTE: we use only 32-bit DMA addresses here. */ 822 bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 0); 823 bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 824 825 bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 0); 826 bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 827 DPRINTF(sc, ("loading rx ring %lx, tx ring %lx, cddma %lx\n", 828 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma)); 829 CTR3(KTR_GEM, "loading rx ring %lx, tx ring %lx, cddma %lx", 830 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); 831 832 /* step 8. Global Configuration & Interrupt Mask */ 833 bus_space_write_4(t, h, GEM_INTMASK, 834 ~(GEM_INTR_TX_INTME| 835 GEM_INTR_TX_EMPTY| 836 GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF| 837 GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS| 838 GEM_INTR_MAC_CONTROL|GEM_INTR_MIF| 839 GEM_INTR_BERR)); 840 bus_space_write_4(t, h, GEM_MAC_RX_MASK, 841 GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT); 842 bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */ 843 bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */ 844 845 /* step 9. ETX Configuration: use mostly default values */ 846 847 /* Enable DMA */ 848 v = gem_ringsize(GEM_NTXDESC /*XXX*/); 849 bus_space_write_4(t, h, GEM_TX_CONFIG, 850 v|GEM_TX_CONFIG_TXDMA_EN| 851 ((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH)); 852 853 /* step 10. ERX Configuration */ 854 855 /* Encode Receive Descriptor ring size: four possible values */ 856 v = gem_ringsize(GEM_NRXDESC /*XXX*/); 857 858 /* Enable DMA */ 859 bus_space_write_4(t, h, GEM_RX_CONFIG, 860 v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)| 861 (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN| 862 (0<<GEM_RX_CONFIG_CXM_START_SHFT)); 863 /* 864 * The following value is for an OFF Threshold of about 3/4 full 865 * and an ON Threshold of 1/4 full. 866 */ 867 bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH, 868 (3 * sc->sc_rxfifosize / 256) | 869 ( (sc->sc_rxfifosize / 256) << 12)); 870 bus_space_write_4(t, h, GEM_RX_BLANKING, (6<<12)|6); 871 872 /* step 11. Configure Media */ 873 mii_mediachg(sc->sc_mii); 874 875 /* step 12. RX_MAC Configuration Register */ 876 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 877 v |= GEM_MAC_RX_ENABLE; 878 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 879 880 /* step 14. Issue Transmit Pending command */ 881 882 /* Call MI initialization function if any */ 883 if (sc->sc_hwinit) 884 (*sc->sc_hwinit)(sc); 885 886 /* step 15. Give the reciever a swift kick */ 887 bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4); 888 889 /* Start the one second timer. */ 890 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 891 892 ifp->if_flags |= IFF_RUNNING; 893 ifp->if_flags &= ~IFF_OACTIVE; 894 ifp->if_timer = 0; 895 sc->sc_ifflags = ifp->if_flags; 896 splx(s); 897 } 898 899 /* 900 * XXX: This is really a substitute for bus_dmamap_load_mbuf(), which FreeBSD 901 * does not yet have, with some adaptions for this driver. 902 * Some changes are mandated by the fact that multiple maps may needed to map 903 * a single mbuf. 904 * It should be removed once generic support is available. 905 * 906 * This is derived from NetBSD (syssrc/sys/arch/sparc64/sparc64/machdep.c), for 907 * a copyright notice see sparc64/sparc64/bus_machdep.c. 908 * 909 * Not every error condition is passed to the callback in this version, and the 910 * callback may be called more than once. 911 * It also gropes in the entails of the callback arg... 912 */ 913 static int 914 gem_dmamap_load_mbuf(sc, m0, cb, txj, flags) 915 struct gem_softc *sc; 916 struct mbuf *m0; 917 bus_dmamap_callback_t *cb; 918 struct gem_txjob *txj; 919 int flags; 920 { 921 struct gem_txdma txd; 922 struct gem_txsoft *txs; 923 struct mbuf *m; 924 void *vaddr; 925 int error, first = 1, len, totlen; 926 927 if ((m0->m_flags & M_PKTHDR) == 0) 928 panic("gem_dmamap_load_mbuf: no packet header"); 929 totlen = m0->m_pkthdr.len; 930 len = 0; 931 txd.txd_sc = sc; 932 txd.txd_nexttx = txj->txj_nexttx; 933 txj->txj_nsegs = 0; 934 STAILQ_INIT(&txj->txj_txsq); 935 m = m0; 936 while (m != NULL && len < totlen) { 937 if (m->m_len == 0) 938 continue; 939 /* Get a work queue entry. */ 940 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { 941 /* 942 * Ran out of descriptors, return a value that 943 * cannot be returned by bus_dmamap_load to notify 944 * the caller. 945 */ 946 error = -1; 947 goto fail; 948 } 949 len += m->m_len; 950 txd.txd_flags = first ? GTXD_FIRST : 0; 951 if (m->m_next == NULL || len >= totlen) 952 txd.txd_flags |= GTXD_LAST; 953 vaddr = mtod(m, void *); 954 error = bus_dmamap_load(sc->sc_dmatag, txs->txs_dmamap, vaddr, 955 m->m_len, cb, &txd, flags); 956 if (error != 0 || txd.txd_error != 0) 957 goto fail; 958 /* Sync the DMA map. */ 959 bus_dmamap_sync(sc->sc_dmatag, txs->txs_dmamap, 960 BUS_DMASYNC_PREWRITE); 961 m = m->m_next; 962 /* 963 * Store a pointer to the packet so we can free it later, 964 * and remember what txdirty will be once the packet is 965 * done. 966 */ 967 txs->txs_mbuf = first ? m0 : NULL; 968 txs->txs_firstdesc = txj->txj_nexttx; 969 txs->txs_lastdesc = txd.txd_lasttx; 970 txs->txs_ndescs = txd.txd_nsegs; 971 CTR3(KTR_GEM, "load_mbuf: setting firstdesc=%d, lastdesc=%d, " 972 "ndescs=%d", txs->txs_firstdesc, txs->txs_lastdesc, 973 txs->txs_ndescs); 974 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 975 STAILQ_INSERT_TAIL(&txj->txj_txsq, txs, txs_q); 976 txj->txj_nexttx = txd.txd_nexttx; 977 txj->txj_nsegs += txd.txd_nsegs; 978 first = 0; 979 } 980 txj->txj_lasttx = txd.txd_lasttx; 981 return (0); 982 983 fail: 984 CTR1(KTR_GEM, "gem_dmamap_load_mbuf failed (%d)", error); 985 gem_dmamap_unload_mbuf(sc, txj); 986 return (error); 987 } 988 989 /* 990 * Unload an mbuf using the txd the information was placed in. 991 * The tx interrupt code frees the tx segments one by one, because the txd is 992 * not available any more. 993 */ 994 static void 995 gem_dmamap_unload_mbuf(sc, txj) 996 struct gem_softc *sc; 997 struct gem_txjob *txj; 998 { 999 struct gem_txsoft *txs; 1000 1001 /* Readd the removed descriptors and unload the segments. */ 1002 while ((txs = STAILQ_FIRST(&txj->txj_txsq)) != NULL) { 1003 bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap); 1004 STAILQ_REMOVE_HEAD(&txj->txj_txsq, txs_q); 1005 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1006 } 1007 } 1008 1009 static void 1010 gem_dmamap_commit_mbuf(sc, txj) 1011 struct gem_softc *sc; 1012 struct gem_txjob *txj; 1013 { 1014 struct gem_txsoft *txs; 1015 1016 /* Commit the txjob by transfering the txsoft's to the txdirtyq. */ 1017 while ((txs = STAILQ_FIRST(&txj->txj_txsq)) != NULL) { 1018 STAILQ_REMOVE_HEAD(&txj->txj_txsq, txs_q); 1019 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 1020 } 1021 } 1022 1023 static void 1024 gem_init_regs(sc) 1025 struct gem_softc *sc; 1026 { 1027 bus_space_tag_t t = sc->sc_bustag; 1028 bus_space_handle_t h = sc->sc_h; 1029 const u_char *laddr = sc->sc_arpcom.ac_enaddr; 1030 u_int32_t v; 1031 1032 /* These regs are not cleared on reset */ 1033 if (!sc->sc_inited) { 1034 1035 /* Wooo. Magic values. */ 1036 bus_space_write_4(t, h, GEM_MAC_IPG0, 0); 1037 bus_space_write_4(t, h, GEM_MAC_IPG1, 8); 1038 bus_space_write_4(t, h, GEM_MAC_IPG2, 4); 1039 1040 bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 1041 /* Max frame and max burst size */ 1042 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 1043 ETHER_MAX_LEN | (0x2000<<16)); 1044 1045 bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x7); 1046 bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x4); 1047 bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10); 1048 /* Dunno.... */ 1049 bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088); 1050 bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED, 1051 ((laddr[5]<<8)|laddr[4])&0x3ff); 1052 1053 /* Secondary MAC addr set to 0:0:0:0:0:0 */ 1054 bus_space_write_4(t, h, GEM_MAC_ADDR3, 0); 1055 bus_space_write_4(t, h, GEM_MAC_ADDR4, 0); 1056 bus_space_write_4(t, h, GEM_MAC_ADDR5, 0); 1057 1058 /* MAC control addr set to 01:80:c2:00:00:01 */ 1059 bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001); 1060 bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200); 1061 bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180); 1062 1063 /* MAC filter addr set to 0:0:0:0:0:0 */ 1064 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0); 1065 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0); 1066 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0); 1067 1068 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0); 1069 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0); 1070 1071 sc->sc_inited = 1; 1072 } 1073 1074 /* Counters need to be zeroed */ 1075 bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0); 1076 bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0); 1077 bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0); 1078 bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0); 1079 bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0); 1080 bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0); 1081 bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0); 1082 bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0); 1083 bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0); 1084 bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0); 1085 bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0); 1086 1087 /* Un-pause stuff */ 1088 #if 0 1089 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); 1090 #else 1091 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0); 1092 #endif 1093 1094 /* 1095 * Set the station address. 1096 */ 1097 bus_space_write_4(t, h, GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]); 1098 bus_space_write_4(t, h, GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]); 1099 bus_space_write_4(t, h, GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]); 1100 1101 /* 1102 * Enable MII outputs. Enable GMII if there is a gigabit PHY. 1103 */ 1104 sc->sc_mif_config = bus_space_read_4(t, h, GEM_MIF_CONFIG); 1105 v = GEM_MAC_XIF_TX_MII_ENA; 1106 if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) { 1107 v |= GEM_MAC_XIF_FDPLX_LED; 1108 if (sc->sc_flags & GEM_GIGABIT) 1109 v |= GEM_MAC_XIF_GMII_MODE; 1110 } 1111 bus_space_write_4(t, h, GEM_MAC_XIF_CONFIG, v); 1112 } 1113 1114 static void 1115 gem_start(ifp) 1116 struct ifnet *ifp; 1117 { 1118 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 1119 struct mbuf *m0 = NULL, *m; 1120 struct gem_txjob txj; 1121 int firsttx, ofree, seg, ntx, txmfail; 1122 1123 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1124 return; 1125 1126 /* 1127 * Remember the previous number of free descriptors and 1128 * the first descriptor we'll use. 1129 */ 1130 ofree = sc->sc_txfree; 1131 firsttx = sc->sc_txnext; 1132 1133 DPRINTF(sc, ("%s: gem_start: txfree %d, txnext %d\n", 1134 device_get_name(sc->sc_dev), ofree, firsttx)); 1135 CTR3(KTR_GEM, "%s: gem_start: txfree %d, txnext %d", 1136 device_get_name(sc->sc_dev), ofree, firsttx); 1137 1138 txj.txj_nexttx = firsttx; 1139 txj.txj_lasttx = 0; 1140 /* 1141 * Loop through the send queue, setting up transmit descriptors 1142 * until we drain the queue, or use up all available transmit 1143 * descriptors. 1144 */ 1145 txmfail = 0; 1146 for (ntx = 0;; ntx++) { 1147 /* 1148 * Grab a packet off the queue. 1149 */ 1150 IF_DEQUEUE(&ifp->if_snd, m0); 1151 if (m0 == NULL) 1152 break; 1153 m = NULL; 1154 1155 /* 1156 * Load the DMA map. If this fails, the packet either 1157 * didn't fit in the alloted number of segments, or we were 1158 * short on resources. In this case, we'll copy and try 1159 * again. 1160 */ 1161 txmfail = gem_dmamap_load_mbuf(sc, m0, 1162 gem_txdma_callback, &txj, BUS_DMA_NOWAIT); 1163 if (txmfail == -1) { 1164 IF_PREPEND(&ifp->if_snd, m0); 1165 break; 1166 } 1167 if (txmfail > 0) { 1168 MGETHDR(m, M_DONTWAIT, MT_DATA); 1169 if (m == NULL) { 1170 device_printf(sc->sc_dev, "unable to " 1171 "allocate Tx mbuf\n"); 1172 /* Failed; requeue. */ 1173 IF_PREPEND(&ifp->if_snd, m0); 1174 break; 1175 } 1176 if (m0->m_pkthdr.len > MHLEN) { 1177 MCLGET(m, M_DONTWAIT); 1178 if ((m->m_flags & M_EXT) == 0) { 1179 device_printf(sc->sc_dev, "unable to " 1180 "allocate Tx cluster\n"); 1181 IF_PREPEND(&ifp->if_snd, m0); 1182 m_freem(m); 1183 break; 1184 } 1185 } 1186 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 1187 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 1188 txmfail = gem_dmamap_load_mbuf(sc, m, 1189 gem_txdma_callback, &txj, BUS_DMA_NOWAIT); 1190 if (txmfail != 0) { 1191 if (txmfail > 0) { 1192 device_printf(sc->sc_dev, "unable to " 1193 "load Tx buffer, error = %d\n", 1194 txmfail); 1195 } 1196 m_freem(m); 1197 IF_PREPEND(&ifp->if_snd, m0); 1198 break; 1199 } 1200 } 1201 1202 /* 1203 * Ensure we have enough descriptors free to describe 1204 * the packet. Note, we always reserve one descriptor 1205 * at the end of the ring as a termination point, to 1206 * prevent wrap-around. 1207 */ 1208 if (txj.txj_nsegs > (sc->sc_txfree - 1)) { 1209 /* 1210 * Not enough free descriptors to transmit this 1211 * packet. We haven't committed to anything yet, 1212 * so just unload the DMA map, put the packet 1213 * back on the queue, and punt. Notify the upper 1214 * layer that there are no more slots left. 1215 * 1216 * XXX We could allocate an mbuf and copy, but 1217 * XXX it is worth it? 1218 */ 1219 ifp->if_flags |= IFF_OACTIVE; 1220 gem_dmamap_unload_mbuf(sc, &txj); 1221 if (m != NULL) 1222 m_freem(m); 1223 IF_PREPEND(&ifp->if_snd, m0); 1224 break; 1225 } 1226 1227 if (m != NULL) 1228 m_freem(m0); 1229 1230 /* 1231 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 1232 */ 1233 1234 #ifdef GEM_DEBUG 1235 if (ifp->if_flags & IFF_DEBUG) { 1236 printf(" gem_start %p transmit chain:\n", 1237 STAILQ_FIRST(&txj.txj_txsq)); 1238 for (seg = sc->sc_txnext;; seg = GEM_NEXTTX(seg)) { 1239 printf("descriptor %d:\t", seg); 1240 printf("gd_flags: 0x%016llx\t", (long long) 1241 GEM_DMA_READ(sc, sc->sc_txdescs[seg].gd_flags)); 1242 printf("gd_addr: 0x%016llx\n", (long long) 1243 GEM_DMA_READ(sc, sc->sc_txdescs[seg].gd_addr)); 1244 if (seg == txj.txj_lasttx) 1245 break; 1246 } 1247 } 1248 #endif 1249 1250 /* Sync the descriptors we're using. */ 1251 GEM_CDTXSYNC(sc, sc->sc_txnext, txj.txj_nsegs, 1252 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1253 1254 /* Advance the tx pointer. */ 1255 sc->sc_txfree -= txj.txj_nsegs; 1256 sc->sc_txnext = txj.txj_nexttx; 1257 1258 gem_dmamap_commit_mbuf(sc, &txj); 1259 } 1260 1261 if (txmfail == -1 || sc->sc_txfree == 0) { 1262 ifp->if_flags |= IFF_OACTIVE; 1263 /* No more slots left; notify upper layer. */ 1264 } 1265 1266 if (ntx > 0) { 1267 DPRINTF(sc, ("%s: packets enqueued, IC on %d, OWN on %d\n", 1268 device_get_name(sc->sc_dev), txj.txj_lasttx, firsttx)); 1269 CTR3(KTR_GEM, "%s: packets enqueued, IC on %d, OWN on %d", 1270 device_get_name(sc->sc_dev), txj.txj_lasttx, firsttx); 1271 /* 1272 * The entire packet chain is set up. 1273 * Kick the transmitter. 1274 */ 1275 DPRINTF(sc, ("%s: gem_start: kicking tx %d\n", 1276 device_get_name(sc->sc_dev), txj.txj_nexttx)); 1277 CTR3(KTR_GEM, "%s: gem_start: kicking tx %d=%d", 1278 device_get_name(sc->sc_dev), txj.txj_nexttx, 1279 sc->sc_txnext); 1280 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_TX_KICK, 1281 sc->sc_txnext); 1282 1283 /* Set a watchdog timer in case the chip flakes out. */ 1284 ifp->if_timer = 5; 1285 DPRINTF(sc, ("%s: gem_start: watchdog %d\n", 1286 device_get_name(sc->sc_dev), ifp->if_timer)); 1287 CTR2(KTR_GEM, "%s: gem_start: watchdog %d", 1288 device_get_name(sc->sc_dev), ifp->if_timer); 1289 } 1290 } 1291 1292 /* 1293 * Transmit interrupt. 1294 */ 1295 static void 1296 gem_tint(sc) 1297 struct gem_softc *sc; 1298 { 1299 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1300 bus_space_tag_t t = sc->sc_bustag; 1301 bus_space_handle_t mac = sc->sc_h; 1302 struct gem_txsoft *txs; 1303 int txlast; 1304 int progress = 0; 1305 1306 1307 DPRINTF(sc, ("%s: gem_tint\n", device_get_name(sc->sc_dev))); 1308 CTR1(KTR_GEM, "%s: gem_tint", device_get_name(sc->sc_dev)); 1309 1310 /* 1311 * Unload collision counters 1312 */ 1313 ifp->if_collisions += 1314 bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) + 1315 bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT) + 1316 bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) + 1317 bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT); 1318 1319 /* 1320 * then clear the hardware counters. 1321 */ 1322 bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0); 1323 bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0); 1324 bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0); 1325 bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0); 1326 1327 /* 1328 * Go through our Tx list and free mbufs for those 1329 * frames that have been transmitted. 1330 */ 1331 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1332 GEM_CDTXSYNC(sc, txs->txs_lastdesc, 1333 txs->txs_ndescs, 1334 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1335 1336 #ifdef GEM_DEBUG 1337 if (ifp->if_flags & IFF_DEBUG) { 1338 int i; 1339 printf(" txsoft %p transmit chain:\n", txs); 1340 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { 1341 printf("descriptor %d: ", i); 1342 printf("gd_flags: 0x%016llx\t", (long long) 1343 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags)); 1344 printf("gd_addr: 0x%016llx\n", (long long) 1345 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr)); 1346 if (i == txs->txs_lastdesc) 1347 break; 1348 } 1349 } 1350 #endif 1351 1352 /* 1353 * In theory, we could harveast some descriptors before 1354 * the ring is empty, but that's a bit complicated. 1355 * 1356 * GEM_TX_COMPLETION points to the last descriptor 1357 * processed +1. 1358 */ 1359 txlast = bus_space_read_4(t, mac, GEM_TX_COMPLETION); 1360 DPRINTF(sc, 1361 ("gem_tint: txs->txs_lastdesc = %d, txlast = %d\n", 1362 txs->txs_lastdesc, txlast)); 1363 CTR3(KTR_GEM, "gem_tint: txs->txs_firstdesc = %d, " 1364 "txs->txs_lastdesc = %d, txlast = %d", 1365 txs->txs_firstdesc, txs->txs_lastdesc, txlast); 1366 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1367 if ((txlast >= txs->txs_firstdesc) && 1368 (txlast <= txs->txs_lastdesc)) 1369 break; 1370 } else { 1371 /* Ick -- this command wraps */ 1372 if ((txlast >= txs->txs_firstdesc) || 1373 (txlast <= txs->txs_lastdesc)) 1374 break; 1375 } 1376 1377 DPRINTF(sc, ("gem_tint: releasing a desc\n")); 1378 CTR0(KTR_GEM, "gem_tint: releasing a desc"); 1379 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1380 1381 sc->sc_txfree += txs->txs_ndescs; 1382 1383 bus_dmamap_sync(sc->sc_dmatag, txs->txs_dmamap, 1384 BUS_DMASYNC_POSTWRITE); 1385 bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap); 1386 if (txs->txs_mbuf != NULL) { 1387 m_freem(txs->txs_mbuf); 1388 txs->txs_mbuf = NULL; 1389 } 1390 1391 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1392 1393 ifp->if_opackets++; 1394 progress = 1; 1395 } 1396 1397 DPRINTF(sc, ("gem_tint: GEM_TX_STATE_MACHINE %x " 1398 "GEM_TX_DATA_PTR %llx " 1399 "GEM_TX_COMPLETION %x\n", 1400 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_STATE_MACHINE), 1401 ((long long) bus_space_read_4(sc->sc_bustag, sc->sc_h, 1402 GEM_TX_DATA_PTR_HI) << 32) | 1403 bus_space_read_4(sc->sc_bustag, sc->sc_h, 1404 GEM_TX_DATA_PTR_LO), 1405 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_COMPLETION))); 1406 CTR3(KTR_GEM, "gem_tint: GEM_TX_STATE_MACHINE %x " 1407 "GEM_TX_DATA_PTR %llx " 1408 "GEM_TX_COMPLETION %x", 1409 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_STATE_MACHINE), 1410 ((long long) bus_space_read_4(sc->sc_bustag, sc->sc_h, 1411 GEM_TX_DATA_PTR_HI) << 32) | 1412 bus_space_read_4(sc->sc_bustag, sc->sc_h, 1413 GEM_TX_DATA_PTR_LO), 1414 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_COMPLETION)); 1415 1416 if (progress) { 1417 if (sc->sc_txfree == GEM_NTXDESC - 1) 1418 sc->sc_txwin = 0; 1419 1420 /* Freed some descriptors, so reset IFF_OACTIVE and restart. */ 1421 ifp->if_flags &= ~IFF_OACTIVE; 1422 gem_start(ifp); 1423 1424 if (STAILQ_EMPTY(&sc->sc_txdirtyq)) 1425 ifp->if_timer = 0; 1426 } 1427 1428 DPRINTF(sc, ("%s: gem_tint: watchdog %d\n", 1429 device_get_name(sc->sc_dev), ifp->if_timer)); 1430 CTR2(KTR_GEM, "%s: gem_tint: watchdog %d", 1431 device_get_name(sc->sc_dev), ifp->if_timer); 1432 } 1433 1434 #if 0 1435 static void 1436 gem_rint_timeout(arg) 1437 void *arg; 1438 { 1439 1440 gem_rint((struct gem_softc *)arg); 1441 } 1442 #endif 1443 1444 /* 1445 * Receive interrupt. 1446 */ 1447 static void 1448 gem_rint(sc) 1449 struct gem_softc *sc; 1450 { 1451 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1452 bus_space_tag_t t = sc->sc_bustag; 1453 bus_space_handle_t h = sc->sc_h; 1454 struct ether_header *eh; 1455 struct gem_rxsoft *rxs; 1456 struct mbuf *m; 1457 u_int64_t rxstat; 1458 u_int32_t rxcomp; 1459 int i, len, progress = 0; 1460 1461 callout_stop(&sc->sc_rx_ch); 1462 DPRINTF(sc, ("%s: gem_rint\n", device_get_name(sc->sc_dev))); 1463 CTR1(KTR_GEM, "%s: gem_rint", device_get_name(sc->sc_dev)); 1464 1465 /* 1466 * Read the completion register once. This limits 1467 * how long the following loop can execute. 1468 */ 1469 rxcomp = bus_space_read_4(t, h, GEM_RX_COMPLETION); 1470 1471 /* 1472 * XXXX Read the lastrx only once at the top for speed. 1473 */ 1474 DPRINTF(sc, ("gem_rint: sc->rxptr %d, complete %d\n", 1475 sc->sc_rxptr, rxcomp)); 1476 CTR2(KTR_GEM, "gem_rint: sc->rxptr %d, complete %d", 1477 sc->sc_rxptr, rxcomp); 1478 for (i = sc->sc_rxptr; i != rxcomp; 1479 i = GEM_NEXTRX(i)) { 1480 rxs = &sc->sc_rxsoft[i]; 1481 1482 GEM_CDRXSYNC(sc, i, 1483 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1484 1485 rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags); 1486 1487 if (rxstat & GEM_RD_OWN) { 1488 #if 0 /* XXX: In case of emergency, re-enable this. */ 1489 /* 1490 * The descriptor is still marked as owned, although 1491 * it is supposed to have completed. This has been 1492 * observed on some machines. Just exiting here 1493 * might leave the packet sitting around until another 1494 * one arrives to trigger a new interrupt, which is 1495 * generally undesirable, so set up a timeout. 1496 */ 1497 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS, 1498 gem_rint_timeout, sc); 1499 #endif 1500 break; 1501 } 1502 1503 progress++; 1504 ifp->if_ipackets++; 1505 1506 if (rxstat & GEM_RD_BAD_CRC) { 1507 ifp->if_ierrors++; 1508 device_printf(sc->sc_dev, "receive error: CRC error\n"); 1509 GEM_INIT_RXDESC(sc, i); 1510 continue; 1511 } 1512 1513 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 1514 BUS_DMASYNC_POSTREAD); 1515 #ifdef GEM_DEBUG 1516 if (ifp->if_flags & IFF_DEBUG) { 1517 printf(" rxsoft %p descriptor %d: ", rxs, i); 1518 printf("gd_flags: 0x%016llx\t", (long long) 1519 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags)); 1520 printf("gd_addr: 0x%016llx\n", (long long) 1521 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr)); 1522 } 1523 #endif 1524 1525 /* 1526 * No errors; receive the packet. Note the Gem 1527 * includes the CRC with every packet. 1528 */ 1529 len = GEM_RD_BUFLEN(rxstat); 1530 1531 /* 1532 * Allocate a new mbuf cluster. If that fails, we are 1533 * out of memory, and must drop the packet and recycle 1534 * the buffer that's already attached to this descriptor. 1535 */ 1536 m = rxs->rxs_mbuf; 1537 if (gem_add_rxbuf(sc, i) != 0) { 1538 ifp->if_ierrors++; 1539 GEM_INIT_RXDESC(sc, i); 1540 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 1541 BUS_DMASYNC_PREREAD); 1542 continue; 1543 } 1544 m->m_data += 2; /* We're already off by two */ 1545 1546 eh = mtod(m, struct ether_header *); 1547 m->m_pkthdr.rcvif = ifp; 1548 m->m_pkthdr.len = m->m_len = len - ETHER_CRC_LEN; 1549 m_adj(m, sizeof(struct ether_header)); 1550 1551 /* Pass it on. */ 1552 ether_input(ifp, eh, m); 1553 } 1554 1555 if (progress) { 1556 /* Update the receive pointer. */ 1557 if (i == sc->sc_rxptr) { 1558 device_printf(sc->sc_dev, "rint: ring wrap\n"); 1559 } 1560 sc->sc_rxptr = i; 1561 bus_space_write_4(t, h, GEM_RX_KICK, GEM_PREVRX(i)); 1562 } 1563 1564 DPRINTF(sc, ("gem_rint: done sc->rxptr %d, complete %d\n", 1565 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION))); 1566 CTR2(KTR_GEM, "gem_rint: done sc->rxptr %d, complete %d", 1567 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)); 1568 1569 } 1570 1571 1572 /* 1573 * gem_add_rxbuf: 1574 * 1575 * Add a receive buffer to the indicated descriptor. 1576 */ 1577 static int 1578 gem_add_rxbuf(sc, idx) 1579 struct gem_softc *sc; 1580 int idx; 1581 { 1582 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1583 struct mbuf *m; 1584 int error; 1585 1586 MGETHDR(m, M_DONTWAIT, MT_DATA); 1587 if (m == NULL) 1588 return (ENOBUFS); 1589 1590 MCLGET(m, M_DONTWAIT); 1591 if ((m->m_flags & M_EXT) == 0) { 1592 m_freem(m); 1593 return (ENOBUFS); 1594 } 1595 1596 #ifdef GEM_DEBUG 1597 /* bzero the packet to check dma */ 1598 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1599 #endif 1600 1601 if (rxs->rxs_mbuf != NULL) 1602 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap); 1603 1604 rxs->rxs_mbuf = m; 1605 1606 error = bus_dmamap_load(sc->sc_dmatag, rxs->rxs_dmamap, 1607 m->m_ext.ext_buf, m->m_ext.ext_size, gem_rxdma_callback, rxs, 1608 BUS_DMA_NOWAIT); 1609 if (error != 0 || rxs->rxs_paddr == 0) { 1610 device_printf(sc->sc_dev, "can't load rx DMA map %d, error = " 1611 "%d\n", idx, error); 1612 panic("gem_add_rxbuf"); /* XXX */ 1613 } 1614 1615 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD); 1616 1617 GEM_INIT_RXDESC(sc, idx); 1618 1619 return (0); 1620 } 1621 1622 1623 static void 1624 gem_eint(sc, status) 1625 struct gem_softc *sc; 1626 u_int status; 1627 { 1628 1629 if ((status & GEM_INTR_MIF) != 0) { 1630 device_printf(sc->sc_dev, "XXXlink status changed\n"); 1631 return; 1632 } 1633 1634 device_printf(sc->sc_dev, "status=%x\n", status); 1635 } 1636 1637 1638 void 1639 gem_intr(v) 1640 void *v; 1641 { 1642 struct gem_softc *sc = (struct gem_softc *)v; 1643 bus_space_tag_t t = sc->sc_bustag; 1644 bus_space_handle_t seb = sc->sc_h; 1645 u_int32_t status; 1646 1647 status = bus_space_read_4(t, seb, GEM_STATUS); 1648 DPRINTF(sc, ("%s: gem_intr: cplt %x, status %x\n", 1649 device_get_name(sc->sc_dev), (status>>19), 1650 (u_int)status)); 1651 CTR3(KTR_GEM, "%s: gem_intr: cplt %x, status %x", 1652 device_get_name(sc->sc_dev), (status>>19), 1653 (u_int)status); 1654 1655 if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0) 1656 gem_eint(sc, status); 1657 1658 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 1659 gem_tint(sc); 1660 1661 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1662 gem_rint(sc); 1663 1664 /* We should eventually do more than just print out error stats. */ 1665 if (status & GEM_INTR_TX_MAC) { 1666 int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS); 1667 if (txstat & ~GEM_MAC_TX_XMIT_DONE) 1668 device_printf(sc->sc_dev, "MAC tx fault, status %x\n", 1669 txstat); 1670 if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) 1671 gem_init(sc); 1672 } 1673 if (status & GEM_INTR_RX_MAC) { 1674 int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS); 1675 if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) 1676 device_printf(sc->sc_dev, "MAC rx fault, status %x\n", 1677 rxstat); 1678 if ((rxstat & GEM_MAC_RX_OVERFLOW) != 0) 1679 gem_init(sc); 1680 } 1681 } 1682 1683 1684 static void 1685 gem_watchdog(ifp) 1686 struct ifnet *ifp; 1687 { 1688 struct gem_softc *sc = ifp->if_softc; 1689 1690 DPRINTF(sc, ("gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 1691 "GEM_MAC_RX_CONFIG %x\n", 1692 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG), 1693 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS), 1694 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG))); 1695 CTR3(KTR_GEM, "gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 1696 "GEM_MAC_RX_CONFIG %x", 1697 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG), 1698 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS), 1699 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG)); 1700 CTR3(KTR_GEM, "gem_watchdog: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x " 1701 "GEM_MAC_TX_CONFIG %x", 1702 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_CONFIG), 1703 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_STATUS), 1704 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_CONFIG)); 1705 1706 device_printf(sc->sc_dev, "device timeout\n"); 1707 ++ifp->if_oerrors; 1708 1709 /* Try to get more packets going. */ 1710 gem_start(ifp); 1711 } 1712 1713 /* 1714 * Initialize the MII Management Interface 1715 */ 1716 static void 1717 gem_mifinit(sc) 1718 struct gem_softc *sc; 1719 { 1720 bus_space_tag_t t = sc->sc_bustag; 1721 bus_space_handle_t mif = sc->sc_h; 1722 1723 /* Configure the MIF in frame mode */ 1724 sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1725 sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA; 1726 bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config); 1727 } 1728 1729 /* 1730 * MII interface 1731 * 1732 * The GEM MII interface supports at least three different operating modes: 1733 * 1734 * Bitbang mode is implemented using data, clock and output enable registers. 1735 * 1736 * Frame mode is implemented by loading a complete frame into the frame 1737 * register and polling the valid bit for completion. 1738 * 1739 * Polling mode uses the frame register but completion is indicated by 1740 * an interrupt. 1741 * 1742 */ 1743 int 1744 gem_mii_readreg(dev, phy, reg) 1745 device_t dev; 1746 int phy, reg; 1747 { 1748 struct gem_softc *sc = device_get_softc(dev); 1749 bus_space_tag_t t = sc->sc_bustag; 1750 bus_space_handle_t mif = sc->sc_h; 1751 int n; 1752 u_int32_t v; 1753 1754 #ifdef GEM_DEBUG_PHY 1755 printf("gem_mii_readreg: phy %d reg %d\n", phy, reg); 1756 #endif 1757 1758 #if 0 1759 /* Select the desired PHY in the MIF configuration register */ 1760 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1761 /* Clear PHY select bit */ 1762 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1763 if (phy == GEM_PHYAD_EXTERNAL) 1764 /* Set PHY select bit to get at external device */ 1765 v |= GEM_MIF_CONFIG_PHY_SEL; 1766 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1767 #endif 1768 1769 /* Construct the frame command */ 1770 v = (reg << GEM_MIF_REG_SHIFT) | (phy << GEM_MIF_PHY_SHIFT) | 1771 GEM_MIF_FRAME_READ; 1772 1773 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1774 for (n = 0; n < 100; n++) { 1775 DELAY(1); 1776 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1777 if (v & GEM_MIF_FRAME_TA0) 1778 return (v & GEM_MIF_FRAME_DATA); 1779 } 1780 1781 device_printf(sc->sc_dev, "mii_read timeout\n"); 1782 return (0); 1783 } 1784 1785 int 1786 gem_mii_writereg(dev, phy, reg, val) 1787 device_t dev; 1788 int phy, reg, val; 1789 { 1790 struct gem_softc *sc = device_get_softc(dev); 1791 bus_space_tag_t t = sc->sc_bustag; 1792 bus_space_handle_t mif = sc->sc_h; 1793 int n; 1794 u_int32_t v; 1795 1796 #ifdef GEM_DEBUG_PHY 1797 printf("gem_mii_writereg: phy %d reg %d val %x\n", phy, reg, val); 1798 #endif 1799 1800 #if 0 1801 /* Select the desired PHY in the MIF configuration register */ 1802 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1803 /* Clear PHY select bit */ 1804 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1805 if (phy == GEM_PHYAD_EXTERNAL) 1806 /* Set PHY select bit to get at external device */ 1807 v |= GEM_MIF_CONFIG_PHY_SEL; 1808 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1809 #endif 1810 /* Construct the frame command */ 1811 v = GEM_MIF_FRAME_WRITE | 1812 (phy << GEM_MIF_PHY_SHIFT) | 1813 (reg << GEM_MIF_REG_SHIFT) | 1814 (val & GEM_MIF_FRAME_DATA); 1815 1816 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1817 for (n = 0; n < 100; n++) { 1818 DELAY(1); 1819 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1820 if (v & GEM_MIF_FRAME_TA0) 1821 return (1); 1822 } 1823 1824 device_printf(sc->sc_dev, "mii_write timeout\n"); 1825 return (0); 1826 } 1827 1828 void 1829 gem_mii_statchg(dev) 1830 device_t dev; 1831 { 1832 struct gem_softc *sc = device_get_softc(dev); 1833 #ifdef GEM_DEBUG 1834 int instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media); 1835 #endif 1836 bus_space_tag_t t = sc->sc_bustag; 1837 bus_space_handle_t mac = sc->sc_h; 1838 u_int32_t v; 1839 1840 #ifdef GEM_DEBUG 1841 if (sc->sc_debug) 1842 printf("gem_mii_statchg: status change: phy = %d\n", 1843 sc->sc_phys[instance]); 1844 #endif 1845 1846 /* Set tx full duplex options */ 1847 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0); 1848 DELAY(10000); /* reg must be cleared and delay before changing. */ 1849 v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT| 1850 GEM_MAC_TX_ENABLE; 1851 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) { 1852 v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS; 1853 } 1854 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v); 1855 1856 /* XIF Configuration */ 1857 /* We should really calculate all this rather than rely on defaults */ 1858 v = bus_space_read_4(t, mac, GEM_MAC_XIF_CONFIG); 1859 v = GEM_MAC_XIF_LINK_LED; 1860 v |= GEM_MAC_XIF_TX_MII_ENA; 1861 1862 /* If an external transceiver is connected, enable its MII drivers */ 1863 sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG); 1864 if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) { 1865 /* External MII needs echo disable if half duplex. */ 1866 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1867 /* turn on full duplex LED */ 1868 v |= GEM_MAC_XIF_FDPLX_LED; 1869 else 1870 /* half duplex -- disable echo */ 1871 v |= GEM_MAC_XIF_ECHO_DISABL; 1872 1873 if (IFM_SUBTYPE(sc->sc_mii->mii_media_active) == IFM_1000_T) 1874 v |= GEM_MAC_XIF_GMII_MODE; 1875 else 1876 v &= ~GEM_MAC_XIF_GMII_MODE; 1877 } else { 1878 /* Internal MII needs buf enable */ 1879 v |= GEM_MAC_XIF_MII_BUF_ENA; 1880 } 1881 bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v); 1882 } 1883 1884 int 1885 gem_mediachange(ifp) 1886 struct ifnet *ifp; 1887 { 1888 struct gem_softc *sc = ifp->if_softc; 1889 1890 /* XXX Add support for serial media. */ 1891 1892 return (mii_mediachg(sc->sc_mii)); 1893 } 1894 1895 void 1896 gem_mediastatus(ifp, ifmr) 1897 struct ifnet *ifp; 1898 struct ifmediareq *ifmr; 1899 { 1900 struct gem_softc *sc = ifp->if_softc; 1901 1902 if ((ifp->if_flags & IFF_UP) == 0) 1903 return; 1904 1905 mii_pollstat(sc->sc_mii); 1906 ifmr->ifm_active = sc->sc_mii->mii_media_active; 1907 ifmr->ifm_status = sc->sc_mii->mii_media_status; 1908 } 1909 1910 /* 1911 * Process an ioctl request. 1912 */ 1913 static int 1914 gem_ioctl(ifp, cmd, data) 1915 struct ifnet *ifp; 1916 u_long cmd; 1917 caddr_t data; 1918 { 1919 struct gem_softc *sc = ifp->if_softc; 1920 struct ifreq *ifr = (struct ifreq *)data; 1921 int s, error = 0; 1922 1923 switch (cmd) { 1924 case SIOCSIFADDR: 1925 case SIOCGIFADDR: 1926 case SIOCSIFMTU: 1927 error = ether_ioctl(ifp, cmd, data); 1928 break; 1929 case SIOCSIFFLAGS: 1930 if (ifp->if_flags & IFF_UP) { 1931 if ((sc->sc_ifflags ^ ifp->if_flags) == IFF_PROMISC) 1932 gem_setladrf(sc); 1933 else 1934 gem_init(sc); 1935 } else { 1936 if (ifp->if_flags & IFF_RUNNING) 1937 gem_stop(ifp, 0); 1938 } 1939 sc->sc_ifflags = ifp->if_flags; 1940 error = 0; 1941 break; 1942 case SIOCADDMULTI: 1943 case SIOCDELMULTI: 1944 gem_setladrf(sc); 1945 error = 0; 1946 break; 1947 case SIOCGIFMEDIA: 1948 case SIOCSIFMEDIA: 1949 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 1950 break; 1951 default: 1952 error = ENOTTY; 1953 break; 1954 } 1955 1956 /* Try to get things going again */ 1957 if (ifp->if_flags & IFF_UP) 1958 gem_start(ifp); 1959 splx(s); 1960 return (error); 1961 } 1962 1963 /* 1964 * Set up the logical address filter. 1965 */ 1966 static void 1967 gem_setladrf(sc) 1968 struct gem_softc *sc; 1969 { 1970 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1971 struct ifmultiaddr *inm; 1972 struct sockaddr_dl *sdl; 1973 bus_space_tag_t t = sc->sc_bustag; 1974 bus_space_handle_t h = sc->sc_h; 1975 u_char *cp; 1976 u_int32_t crc; 1977 u_int32_t hash[16]; 1978 u_int32_t v; 1979 int len; 1980 int i; 1981 1982 /* Get current RX configuration */ 1983 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 1984 1985 /* 1986 * Turn off promiscuous mode, promiscuous group mode (all multicast), 1987 * and hash filter. Depending on the case, the right bit will be 1988 * enabled. 1989 */ 1990 v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER| 1991 GEM_MAC_RX_PROMISC_GRP); 1992 1993 if ((ifp->if_flags & IFF_PROMISC) != 0) { 1994 /* Turn on promiscuous mode */ 1995 v |= GEM_MAC_RX_PROMISCUOUS; 1996 goto chipit; 1997 } 1998 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 1999 hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; 2000 ifp->if_flags |= IFF_ALLMULTI; 2001 v |= GEM_MAC_RX_PROMISC_GRP; 2002 goto chipit; 2003 } 2004 2005 /* 2006 * Set up multicast address filter by passing all multicast addresses 2007 * through a crc generator, and then using the high order 8 bits as an 2008 * index into the 256 bit logical address filter. The high order 4 2009 * bits selects the word, while the other 4 bits select the bit within 2010 * the word (where bit 0 is the MSB). 2011 */ 2012 2013 /* Clear hash table */ 2014 memset(hash, 0, sizeof(hash)); 2015 2016 TAILQ_FOREACH(inm, &sc->sc_arpcom.ac_if.if_multiaddrs, ifma_link) { 2017 if (inm->ifma_addr->sa_family != AF_LINK) 2018 continue; 2019 sdl = (struct sockaddr_dl *)inm->ifma_addr; 2020 cp = LLADDR(sdl); 2021 crc = 0xffffffff; 2022 for (len = sdl->sdl_alen; --len >= 0;) { 2023 int octet = *cp++; 2024 int i; 2025 2026 #define MC_POLY_LE 0xedb88320UL /* mcast crc, little endian */ 2027 for (i = 0; i < 8; i++) { 2028 if ((crc & 1) ^ (octet & 1)) { 2029 crc >>= 1; 2030 crc ^= MC_POLY_LE; 2031 } else { 2032 crc >>= 1; 2033 } 2034 octet >>= 1; 2035 } 2036 } 2037 /* Just want the 8 most significant bits. */ 2038 crc >>= 24; 2039 2040 /* Set the corresponding bit in the filter. */ 2041 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 2042 } 2043 2044 v |= GEM_MAC_RX_HASH_FILTER; 2045 ifp->if_flags &= ~IFF_ALLMULTI; 2046 2047 /* Now load the hash table into the chip (if we are using it) */ 2048 for (i = 0; i < 16; i++) { 2049 bus_space_write_4(t, h, 2050 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0), 2051 hash[i]); 2052 } 2053 2054 chipit: 2055 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 2056 } 2057 2058 #if notyet 2059 2060 /* 2061 * gem_power: 2062 * 2063 * Power management (suspend/resume) hook. 2064 */ 2065 void 2066 static gem_power(why, arg) 2067 int why; 2068 void *arg; 2069 { 2070 struct gem_softc *sc = arg; 2071 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2072 int s; 2073 2074 s = splnet(); 2075 switch (why) { 2076 case PWR_SUSPEND: 2077 case PWR_STANDBY: 2078 gem_stop(ifp, 1); 2079 if (sc->sc_power != NULL) 2080 (*sc->sc_power)(sc, why); 2081 break; 2082 case PWR_RESUME: 2083 if (ifp->if_flags & IFF_UP) { 2084 if (sc->sc_power != NULL) 2085 (*sc->sc_power)(sc, why); 2086 gem_init(ifp); 2087 } 2088 break; 2089 case PWR_SOFTSUSPEND: 2090 case PWR_SOFTSTANDBY: 2091 case PWR_SOFTRESUME: 2092 break; 2093 } 2094 splx(s); 2095 } 2096 #endif 2097