1 /* 2 * Copyright (C) 2001 Eduardo Horvath. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp 27 * 28 * $FreeBSD$ 29 */ 30 31 /* 32 * Driver for Sun GEM ethernet controllers. 33 */ 34 35 #define GEM_DEBUG 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/bus.h> 40 #include <sys/callout.h> 41 #include <sys/endian.h> 42 #include <sys/mbuf.h> 43 #include <sys/malloc.h> 44 #include <sys/kernel.h> 45 #include <sys/socket.h> 46 #include <sys/sockio.h> 47 48 #include <net/ethernet.h> 49 #include <net/if.h> 50 #include <net/if_arp.h> 51 #include <net/if_dl.h> 52 #include <net/if_media.h> 53 54 #include <machine/bus.h> 55 56 #include <dev/mii/mii.h> 57 #include <dev/mii/miivar.h> 58 59 #include <gem/if_gemreg.h> 60 #include <gem/if_gemvar.h> 61 62 #define TRIES 10000 63 64 static void gem_start(struct ifnet *); 65 static void gem_stop(struct ifnet *, int); 66 static int gem_ioctl(struct ifnet *, u_long, caddr_t); 67 static void gem_cddma_callback(void *, bus_dma_segment_t *, int, int); 68 static void gem_rxdma_callback(void *, bus_dma_segment_t *, int, int); 69 static void gem_txdma_callback(void *, bus_dma_segment_t *, int, int); 70 static void gem_tick(void *); 71 static void gem_watchdog(struct ifnet *); 72 static void gem_init(void *); 73 static void gem_init_regs(struct gem_softc *sc); 74 static int gem_ringsize(int sz); 75 static int gem_meminit(struct gem_softc *); 76 static int gem_dmamap_load_mbuf(struct gem_softc *, struct mbuf *, 77 bus_dmamap_callback_t *, struct gem_txjob *, int); 78 static void gem_dmamap_unload_mbuf(struct gem_softc *, struct gem_txjob *); 79 static void gem_dmamap_commit_mbuf(struct gem_softc *, struct gem_txjob *); 80 static void gem_mifinit(struct gem_softc *); 81 static int gem_bitwait(struct gem_softc *sc, bus_addr_t r, 82 u_int32_t clr, u_int32_t set); 83 static int gem_reset_rx(struct gem_softc *); 84 static int gem_reset_tx(struct gem_softc *); 85 static int gem_disable_rx(struct gem_softc *); 86 static int gem_disable_tx(struct gem_softc *); 87 static void gem_rxdrain(struct gem_softc *); 88 static int gem_add_rxbuf(struct gem_softc *, int); 89 static void gem_setladrf(struct gem_softc *); 90 91 struct mbuf *gem_get(struct gem_softc *, int, int); 92 static void gem_eint(struct gem_softc *, u_int); 93 static void gem_rint(struct gem_softc *); 94 static void gem_rint_timeout(void *); 95 static void gem_tint(struct gem_softc *); 96 #ifdef notyet 97 static void gem_power(int, void *); 98 #endif 99 100 devclass_t gem_devclass; 101 DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0); 102 MODULE_DEPEND(gem, miibus, 1, 1, 1); 103 104 #ifdef GEM_DEBUG 105 #define DPRINTF(sc, x) if ((sc)->sc_arpcom.ac_if.if_flags & IFF_DEBUG) \ 106 printf x 107 #include <sys/ktr.h> 108 #define KTR_GEM KTR_CT2 109 #else 110 #define DPRINTF(sc, x) /* nothing */ 111 #endif 112 113 #define GEM_NSEGS GEM_NTXSEGS 114 115 /* 116 * gem_attach: 117 * 118 * Attach a Gem interface to the system. 119 */ 120 int 121 gem_attach(sc) 122 struct gem_softc *sc; 123 { 124 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 125 struct mii_softc *child; 126 int i, error; 127 u_int32_t v; 128 129 /* Make sure the chip is stopped. */ 130 ifp->if_softc = sc; 131 gem_reset(sc); 132 133 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, 134 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, GEM_NSEGS, 135 BUS_SPACE_MAXSIZE_32BIT, 0, &sc->sc_pdmatag); 136 if (error) 137 return (error); 138 139 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 140 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MAXBSIZE, 141 GEM_NSEGS, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, 142 &sc->sc_dmatag); 143 if (error) 144 goto fail_0; 145 146 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, 147 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 148 sizeof(struct gem_control_data), 1, 149 sizeof(struct gem_control_data), BUS_DMA_ALLOCNOW, 150 &sc->sc_cdmatag); 151 if (error) 152 goto fail_1; 153 154 /* 155 * Allocate the control data structures, and create and load the 156 * DMA map for it. 157 */ 158 if ((error = bus_dmamem_alloc(sc->sc_cdmatag, 159 (void **)&sc->sc_control_data, 0, &sc->sc_cddmamap))) { 160 device_printf(sc->sc_dev, "unable to allocate control data," 161 " error = %d\n", error); 162 goto fail_2; 163 } 164 165 sc->sc_cddma = 0; 166 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, 167 sc->sc_control_data, sizeof(struct gem_control_data), 168 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { 169 device_printf(sc->sc_dev, "unable to load control data DMA " 170 "map, error = %d\n", error); 171 goto fail_3; 172 } 173 174 /* 175 * Initialize the transmit job descriptors. 176 */ 177 STAILQ_INIT(&sc->sc_txfreeq); 178 STAILQ_INIT(&sc->sc_txdirtyq); 179 180 /* 181 * Create the transmit buffer DMA maps. 182 */ 183 error = ENOMEM; 184 for (i = 0; i < GEM_TXQUEUELEN; i++) { 185 struct gem_txsoft *txs; 186 187 txs = &sc->sc_txsoft[i]; 188 txs->txs_mbuf = NULL; 189 txs->txs_ndescs = 0; 190 if ((error = bus_dmamap_create(sc->sc_dmatag, 0, 191 &txs->txs_dmamap)) != 0) { 192 device_printf(sc->sc_dev, "unable to create tx DMA map " 193 "%d, error = %d\n", i, error); 194 goto fail_4; 195 } 196 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 197 } 198 199 /* 200 * Create the receive buffer DMA maps. 201 */ 202 for (i = 0; i < GEM_NRXDESC; i++) { 203 if ((error = bus_dmamap_create(sc->sc_dmatag, 0, 204 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 205 device_printf(sc->sc_dev, "unable to create rx DMA map " 206 "%d, error = %d\n", i, error); 207 goto fail_5; 208 } 209 sc->sc_rxsoft[i].rxs_mbuf = NULL; 210 } 211 212 213 gem_mifinit(sc); 214 215 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, gem_mediachange, 216 gem_mediastatus)) != 0) { 217 device_printf(sc->sc_dev, "phy probe failed: %d\n", error); 218 goto fail_5; 219 } 220 sc->sc_mii = device_get_softc(sc->sc_miibus); 221 222 /* 223 * From this point forward, the attachment cannot fail. A failure 224 * before this point releases all resources that may have been 225 * allocated. 226 */ 227 228 /* Announce ourselves. */ 229 device_printf(sc->sc_dev, "Ethernet address:"); 230 for (i = 0; i < 6; i++) 231 printf("%c%02x", i > 0 ? ':' : ' ', sc->sc_arpcom.ac_enaddr[i]); 232 233 /* Get RX FIFO size */ 234 sc->sc_rxfifosize = 64 * 235 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_FIFO_SIZE); 236 printf(", %uKB RX fifo", sc->sc_rxfifosize / 1024); 237 238 /* Get TX FIFO size */ 239 v = bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_FIFO_SIZE); 240 printf(", %uKB TX fifo\n", v / 16); 241 242 /* Initialize ifnet structure. */ 243 ifp->if_softc = sc; 244 ifp->if_unit = device_get_unit(sc->sc_dev); 245 ifp->if_name = "gem"; 246 ifp->if_mtu = ETHERMTU; 247 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 248 ifp->if_start = gem_start; 249 ifp->if_ioctl = gem_ioctl; 250 ifp->if_watchdog = gem_watchdog; 251 ifp->if_init = gem_init; 252 ifp->if_output = ether_output; 253 ifp->if_snd.ifq_maxlen = GEM_TXQUEUELEN; 254 /* 255 * Walk along the list of attached MII devices and 256 * establish an `MII instance' to `phy number' 257 * mapping. We'll use this mapping in media change 258 * requests to determine which phy to use to program 259 * the MIF configuration register. 260 */ 261 for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL; 262 child = LIST_NEXT(child, mii_list)) { 263 /* 264 * Note: we support just two PHYs: the built-in 265 * internal device and an external on the MII 266 * connector. 267 */ 268 if (child->mii_phy > 1 || child->mii_inst > 1) { 269 device_printf(sc->sc_dev, "cannot accomodate " 270 "MII device %s at phy %d, instance %d\n", 271 device_get_name(child->mii_dev), 272 child->mii_phy, child->mii_inst); 273 continue; 274 } 275 276 sc->sc_phys[child->mii_inst] = child->mii_phy; 277 } 278 279 /* 280 * Now select and activate the PHY we will use. 281 * 282 * The order of preference is External (MDI1), 283 * Internal (MDI0), Serial Link (no MII). 284 */ 285 if (sc->sc_phys[1]) { 286 #ifdef GEM_DEBUG 287 printf("using external phy\n"); 288 #endif 289 sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL; 290 } else { 291 #ifdef GEM_DEBUG 292 printf("using internal phy\n"); 293 #endif 294 sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL; 295 } 296 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_MIF_CONFIG, 297 sc->sc_mif_config); 298 /* Attach the interface. */ 299 ether_ifattach(ifp, ETHER_BPF_SUPPORTED); 300 301 #if notyet 302 /* 303 * Add a suspend hook to make sure we come back up after a 304 * resume. 305 */ 306 sc->sc_powerhook = powerhook_establish(gem_power, sc); 307 if (sc->sc_powerhook == NULL) 308 device_printf(sc->sc_dev, "WARNING: unable to establish power " 309 "hook\n"); 310 #endif 311 312 callout_init(&sc->sc_tick_ch, 0); 313 callout_init(&sc->sc_rx_ch, 0); 314 return (0); 315 316 /* 317 * Free any resources we've allocated during the failed attach 318 * attempt. Do this in reverse order and fall through. 319 */ 320 fail_5: 321 for (i = 0; i < GEM_NRXDESC; i++) { 322 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 323 bus_dmamap_destroy(sc->sc_dmatag, 324 sc->sc_rxsoft[i].rxs_dmamap); 325 } 326 fail_4: 327 for (i = 0; i < GEM_TXQUEUELEN; i++) { 328 if (sc->sc_txsoft[i].txs_dmamap != NULL) 329 bus_dmamap_destroy(sc->sc_dmatag, 330 sc->sc_txsoft[i].txs_dmamap); 331 } 332 bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap); 333 fail_3: 334 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 335 sc->sc_cddmamap); 336 fail_2: 337 bus_dma_tag_destroy(sc->sc_cdmatag); 338 fail_1: 339 bus_dma_tag_destroy(sc->sc_dmatag); 340 fail_0: 341 bus_dma_tag_destroy(sc->sc_pdmatag); 342 return (error); 343 } 344 345 static void 346 gem_cddma_callback(xsc, segs, nsegs, error) 347 void *xsc; 348 bus_dma_segment_t *segs; 349 int nsegs; 350 int error; 351 { 352 struct gem_softc *sc = (struct gem_softc *)xsc; 353 354 if (error != 0) 355 return; 356 if (nsegs != 1) { 357 /* can't happen... */ 358 panic("gem_cddma_callback: bad control buffer segment count"); 359 } 360 sc->sc_cddma = segs[0].ds_addr; 361 } 362 363 static void 364 gem_rxdma_callback(xsc, segs, nsegs, error) 365 void *xsc; 366 bus_dma_segment_t *segs; 367 int nsegs; 368 int error; 369 { 370 struct gem_rxsoft *rxs = (struct gem_rxsoft *)xsc; 371 372 if (error != 0) 373 return; 374 if (nsegs != 1) { 375 /* can't happen... */ 376 panic("gem_rxdma_callback: bad control buffer segment count"); 377 } 378 rxs->rxs_paddr = segs[0].ds_addr; 379 } 380 381 /* 382 * This is called multiple times in our version of dmamap_load_mbuf, but should 383 * be fit for a generic version that only calls it once. 384 */ 385 static void 386 gem_txdma_callback(xsc, segs, nsegs, error) 387 void *xsc; 388 bus_dma_segment_t *segs; 389 int nsegs; 390 int error; 391 { 392 struct gem_txdma *tx = (struct gem_txdma *)xsc; 393 int seg; 394 395 tx->txd_error = error; 396 if (error != 0) 397 return; 398 tx->txd_nsegs = nsegs; 399 400 /* 401 * Initialize the transmit descriptors. 402 */ 403 for (seg = 0; seg < nsegs; 404 seg++, tx->txd_nexttx = GEM_NEXTTX(tx->txd_nexttx)) { 405 uint64_t flags; 406 407 DPRINTF(tx->txd_sc, ("txdma_cb: mapping seg %d (txd %d), len " 408 "%lx, addr %#lx (%#lx)\n", seg, tx->txd_nexttx, 409 segs[seg].ds_len, segs[seg].ds_addr, 410 GEM_DMA_WRITE(tx->txd_sc, segs[seg].ds_addr))); 411 CTR5(KTR_GEM, "txdma_cb: mapping seg %d (txd %d), len " 412 "%lx, addr %#lx (%#lx)", seg, tx->txd_nexttx, 413 segs[seg].ds_len, segs[seg].ds_addr, 414 GEM_DMA_WRITE(tx->txd_sc, segs[seg].ds_addr)); 415 /* 416 * If this is the first descriptor we're 417 * enqueueing, set the start of packet flag, 418 * and the checksum stuff if we want the hardware 419 * to do it. 420 */ 421 tx->txd_sc->sc_txdescs[tx->txd_nexttx].gd_addr = 422 GEM_DMA_WRITE(tx->txd_sc, segs[seg].ds_addr); 423 flags = segs[seg].ds_len & GEM_TD_BUFSIZE; 424 if ((tx->txd_flags & GTXD_FIRST) != 0 && seg == 0) { 425 CTR2(KTR_GEM, "txdma_cb: start of packet at seg %d, " 426 "tx %d", seg, tx->txd_nexttx); 427 flags |= GEM_TD_START_OF_PACKET; 428 if (++tx->txd_sc->sc_txwin > GEM_NTXSEGS * 2 / 3) { 429 tx->txd_sc->sc_txwin = 0; 430 flags |= GEM_TD_INTERRUPT_ME; 431 } 432 } 433 if ((tx->txd_flags & GTXD_LAST) != 0 && seg == nsegs - 1) { 434 CTR2(KTR_GEM, "txdma_cb: end of packet at seg %d, " 435 "tx %d", seg, tx->txd_nexttx); 436 flags |= GEM_TD_END_OF_PACKET; 437 } 438 tx->txd_sc->sc_txdescs[tx->txd_nexttx].gd_flags = 439 GEM_DMA_WRITE(tx->txd_sc, flags); 440 tx->txd_lasttx = tx->txd_nexttx; 441 } 442 } 443 444 static void 445 gem_tick(arg) 446 void *arg; 447 { 448 struct gem_softc *sc = arg; 449 int s; 450 451 s = splnet(); 452 mii_tick(sc->sc_mii); 453 splx(s); 454 455 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 456 } 457 458 static int 459 gem_bitwait(sc, r, clr, set) 460 struct gem_softc *sc; 461 bus_addr_t r; 462 u_int32_t clr; 463 u_int32_t set; 464 { 465 int i; 466 u_int32_t reg; 467 468 for (i = TRIES; i--; DELAY(100)) { 469 reg = bus_space_read_4(sc->sc_bustag, sc->sc_h, r); 470 if ((r & clr) == 0 && (r & set) == set) 471 return (1); 472 } 473 return (0); 474 } 475 476 void 477 gem_reset(sc) 478 struct gem_softc *sc; 479 { 480 bus_space_tag_t t = sc->sc_bustag; 481 bus_space_handle_t h = sc->sc_h; 482 int s; 483 484 s = splnet(); 485 DPRINTF(sc, ("%s: gem_reset\n", device_get_name(sc->sc_dev))); 486 CTR1(KTR_GEM, "%s: gem_reset", device_get_name(sc->sc_dev)); 487 gem_reset_rx(sc); 488 gem_reset_tx(sc); 489 490 /* Do a full reset */ 491 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); 492 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 493 device_printf(sc->sc_dev, "cannot reset device\n"); 494 splx(s); 495 } 496 497 498 /* 499 * gem_rxdrain: 500 * 501 * Drain the receive queue. 502 */ 503 static void 504 gem_rxdrain(sc) 505 struct gem_softc *sc; 506 { 507 struct gem_rxsoft *rxs; 508 int i; 509 510 for (i = 0; i < GEM_NRXDESC; i++) { 511 rxs = &sc->sc_rxsoft[i]; 512 if (rxs->rxs_mbuf != NULL) { 513 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap); 514 m_freem(rxs->rxs_mbuf); 515 rxs->rxs_mbuf = NULL; 516 } 517 } 518 } 519 520 /* 521 * Reset the whole thing. 522 */ 523 static void 524 gem_stop(ifp, disable) 525 struct ifnet *ifp; 526 int disable; 527 { 528 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 529 struct gem_txsoft *txs; 530 531 DPRINTF(sc, ("%s: gem_stop\n", device_get_name(sc->sc_dev))); 532 CTR1(KTR_GEM, "%s: gem_stop", device_get_name(sc->sc_dev)); 533 534 callout_stop(&sc->sc_tick_ch); 535 536 /* XXX - Should we reset these instead? */ 537 gem_disable_tx(sc); 538 gem_disable_rx(sc); 539 540 /* 541 * Release any queued transmit buffers. 542 */ 543 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 544 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 545 if (txs->txs_ndescs != 0) { 546 bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap); 547 if (txs->txs_mbuf != NULL) { 548 m_freem(txs->txs_mbuf); 549 txs->txs_mbuf = NULL; 550 } 551 } 552 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 553 } 554 555 if (disable) 556 gem_rxdrain(sc); 557 558 /* 559 * Mark the interface down and cancel the watchdog timer. 560 */ 561 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 562 ifp->if_timer = 0; 563 } 564 565 /* 566 * Reset the receiver 567 */ 568 int 569 gem_reset_rx(sc) 570 struct gem_softc *sc; 571 { 572 bus_space_tag_t t = sc->sc_bustag; 573 bus_space_handle_t h = sc->sc_h; 574 575 /* 576 * Resetting while DMA is in progress can cause a bus hang, so we 577 * disable DMA first. 578 */ 579 gem_disable_rx(sc); 580 bus_space_write_4(t, h, GEM_RX_CONFIG, 0); 581 /* Wait till it finishes */ 582 if (!gem_bitwait(sc, GEM_RX_CONFIG, 1, 0)) 583 device_printf(sc->sc_dev, "cannot disable read dma\n"); 584 585 /* Wait 5ms extra. */ 586 DELAY(5000); 587 588 /* Finally, reset the ERX */ 589 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX); 590 /* Wait till it finishes */ 591 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 592 device_printf(sc->sc_dev, "cannot reset receiver\n"); 593 return (1); 594 } 595 return (0); 596 } 597 598 599 /* 600 * Reset the transmitter 601 */ 602 static int 603 gem_reset_tx(sc) 604 struct gem_softc *sc; 605 { 606 bus_space_tag_t t = sc->sc_bustag; 607 bus_space_handle_t h = sc->sc_h; 608 int i; 609 610 /* 611 * Resetting while DMA is in progress can cause a bus hang, so we 612 * disable DMA first. 613 */ 614 gem_disable_tx(sc); 615 bus_space_write_4(t, h, GEM_TX_CONFIG, 0); 616 /* Wait till it finishes */ 617 if (!gem_bitwait(sc, GEM_TX_CONFIG, 1, 0)) 618 device_printf(sc->sc_dev, "cannot disable read dma\n"); 619 620 /* Wait 5ms extra. */ 621 DELAY(5000); 622 623 /* Finally, reset the ETX */ 624 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_TX); 625 /* Wait till it finishes */ 626 for (i = TRIES; i--; DELAY(100)) 627 if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_TX) == 0) 628 break; 629 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 630 device_printf(sc->sc_dev, "cannot reset receiver\n"); 631 return (1); 632 } 633 return (0); 634 } 635 636 /* 637 * disable receiver. 638 */ 639 static int 640 gem_disable_rx(sc) 641 struct gem_softc *sc; 642 { 643 bus_space_tag_t t = sc->sc_bustag; 644 bus_space_handle_t h = sc->sc_h; 645 u_int32_t cfg; 646 647 /* Flip the enable bit */ 648 cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 649 cfg &= ~GEM_MAC_RX_ENABLE; 650 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg); 651 652 /* Wait for it to finish */ 653 return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)); 654 } 655 656 /* 657 * disable transmitter. 658 */ 659 static int 660 gem_disable_tx(sc) 661 struct gem_softc *sc; 662 { 663 bus_space_tag_t t = sc->sc_bustag; 664 bus_space_handle_t h = sc->sc_h; 665 u_int32_t cfg; 666 667 /* Flip the enable bit */ 668 cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG); 669 cfg &= ~GEM_MAC_TX_ENABLE; 670 bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg); 671 672 /* Wait for it to finish */ 673 return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)); 674 } 675 676 /* 677 * Initialize interface. 678 */ 679 static int 680 gem_meminit(sc) 681 struct gem_softc *sc; 682 { 683 struct gem_rxsoft *rxs; 684 int i, error; 685 686 /* 687 * Initialize the transmit descriptor ring. 688 */ 689 memset((void *)sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 690 for (i = 0; i < GEM_NTXDESC; i++) { 691 sc->sc_txdescs[i].gd_flags = 0; 692 sc->sc_txdescs[i].gd_addr = 0; 693 } 694 GEM_CDTXSYNC(sc, 0, GEM_NTXDESC, 695 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 696 sc->sc_txfree = GEM_NTXDESC-1; 697 sc->sc_txnext = 0; 698 sc->sc_txwin = 0; 699 700 /* 701 * Initialize the receive descriptor and receive job 702 * descriptor rings. 703 */ 704 for (i = 0; i < GEM_NRXDESC; i++) { 705 rxs = &sc->sc_rxsoft[i]; 706 if (rxs->rxs_mbuf == NULL) { 707 if ((error = gem_add_rxbuf(sc, i)) != 0) { 708 device_printf(sc->sc_dev, "unable to " 709 "allocate or map rx buffer %d, error = " 710 "%d\n", i, error); 711 /* 712 * XXX Should attempt to run with fewer receive 713 * XXX buffers instead of just failing. 714 */ 715 gem_rxdrain(sc); 716 return (1); 717 } 718 } else 719 GEM_INIT_RXDESC(sc, i); 720 } 721 sc->sc_rxptr = 0; 722 723 return (0); 724 } 725 726 static int 727 gem_ringsize(sz) 728 int sz; 729 { 730 int v = 0; 731 732 switch (sz) { 733 case 32: 734 v = GEM_RING_SZ_32; 735 break; 736 case 64: 737 v = GEM_RING_SZ_64; 738 break; 739 case 128: 740 v = GEM_RING_SZ_128; 741 break; 742 case 256: 743 v = GEM_RING_SZ_256; 744 break; 745 case 512: 746 v = GEM_RING_SZ_512; 747 break; 748 case 1024: 749 v = GEM_RING_SZ_1024; 750 break; 751 case 2048: 752 v = GEM_RING_SZ_2048; 753 break; 754 case 4096: 755 v = GEM_RING_SZ_4096; 756 break; 757 case 8192: 758 v = GEM_RING_SZ_8192; 759 break; 760 default: 761 printf("gem: invalid Receive Descriptor ring size\n"); 762 break; 763 } 764 return (v); 765 } 766 767 /* 768 * Initialization of interface; set up initialization block 769 * and transmit/receive descriptor rings. 770 */ 771 static void 772 gem_init(xsc) 773 void *xsc; 774 { 775 struct gem_softc *sc = (struct gem_softc *)xsc; 776 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 777 bus_space_tag_t t = sc->sc_bustag; 778 bus_space_handle_t h = sc->sc_h; 779 int s; 780 u_int32_t v; 781 782 s = splnet(); 783 784 DPRINTF(sc, ("%s: gem_init: calling stop\n", device_get_name(sc->sc_dev))); 785 CTR1(KTR_GEM, "%s: gem_init: calling stop", device_get_name(sc->sc_dev)); 786 /* 787 * Initialization sequence. The numbered steps below correspond 788 * to the sequence outlined in section 6.3.5.1 in the Ethernet 789 * Channel Engine manual (part of the PCIO manual). 790 * See also the STP2002-STQ document from Sun Microsystems. 791 */ 792 793 /* step 1 & 2. Reset the Ethernet Channel */ 794 gem_stop(&sc->sc_arpcom.ac_if, 0); 795 gem_reset(sc); 796 DPRINTF(sc, ("%s: gem_init: restarting\n", device_get_name(sc->sc_dev))); 797 CTR1(KTR_GEM, "%s: gem_init: restarting", device_get_name(sc->sc_dev)); 798 799 /* Re-initialize the MIF */ 800 gem_mifinit(sc); 801 802 /* Call MI reset function if any */ 803 if (sc->sc_hwreset) 804 (*sc->sc_hwreset)(sc); 805 806 /* step 3. Setup data structures in host memory */ 807 gem_meminit(sc); 808 809 /* step 4. TX MAC registers & counters */ 810 gem_init_regs(sc); 811 /* XXX: VLAN code from NetBSD temporarily removed. */ 812 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 813 (ETHER_MAX_LEN + sizeof(struct ether_header)) | (0x2000<<16)); 814 815 /* step 5. RX MAC registers & counters */ 816 gem_setladrf(sc); 817 818 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 819 /* NOTE: we use only 32-bit DMA addresses here. */ 820 bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 0); 821 bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 822 823 bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 0); 824 bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 825 DPRINTF(sc, ("loading rx ring %lx, tx ring %lx, cddma %lx\n", 826 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma)); 827 CTR3(KTR_GEM, "loading rx ring %lx, tx ring %lx, cddma %lx", 828 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); 829 830 /* step 8. Global Configuration & Interrupt Mask */ 831 bus_space_write_4(t, h, GEM_INTMASK, 832 ~(GEM_INTR_TX_INTME| 833 GEM_INTR_TX_EMPTY| 834 GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF| 835 GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS| 836 GEM_INTR_MAC_CONTROL|GEM_INTR_MIF| 837 GEM_INTR_BERR)); 838 bus_space_write_4(t, h, GEM_MAC_RX_MASK, 839 GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT); 840 bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */ 841 bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */ 842 843 /* step 9. ETX Configuration: use mostly default values */ 844 845 /* Enable DMA */ 846 v = gem_ringsize(GEM_NTXDESC /*XXX*/); 847 bus_space_write_4(t, h, GEM_TX_CONFIG, 848 v|GEM_TX_CONFIG_TXDMA_EN| 849 ((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH)); 850 851 /* step 10. ERX Configuration */ 852 853 /* Encode Receive Descriptor ring size: four possible values */ 854 v = gem_ringsize(GEM_NRXDESC /*XXX*/); 855 856 /* Enable DMA */ 857 bus_space_write_4(t, h, GEM_RX_CONFIG, 858 v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)| 859 (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN| 860 (0<<GEM_RX_CONFIG_CXM_START_SHFT)); 861 /* 862 * The following value is for an OFF Threshold of about 3/4 full 863 * and an ON Threshold of 1/4 full. 864 */ 865 bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH, 866 (3 * sc->sc_rxfifosize / 256) | 867 ( (sc->sc_rxfifosize / 256) << 12)); 868 bus_space_write_4(t, h, GEM_RX_BLANKING, (6<<12)|6); 869 870 /* step 11. Configure Media */ 871 mii_mediachg(sc->sc_mii); 872 873 /* step 12. RX_MAC Configuration Register */ 874 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 875 v |= GEM_MAC_RX_ENABLE; 876 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 877 878 /* step 14. Issue Transmit Pending command */ 879 880 /* Call MI initialization function if any */ 881 if (sc->sc_hwinit) 882 (*sc->sc_hwinit)(sc); 883 884 /* step 15. Give the reciever a swift kick */ 885 bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4); 886 887 /* Start the one second timer. */ 888 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 889 890 ifp->if_flags |= IFF_RUNNING; 891 ifp->if_flags &= ~IFF_OACTIVE; 892 ifp->if_timer = 0; 893 sc->sc_ifflags = ifp->if_flags; 894 splx(s); 895 } 896 897 /* 898 * XXX: This is really a substitute for bus_dmamap_load_mbuf(), which FreeBSD 899 * does not yet have, with some adaptions for this driver. 900 * Some changes are mandated by the fact that multiple maps may needed to map 901 * a single mbuf. 902 * It should be removed once generic support is available. 903 * 904 * This is derived from NetBSD (syssrc/sys/arch/sparc64/sparc64/machdep.c), for 905 * a copyright notice see sparc64/sparc64/bus_machdep.c. 906 * 907 * Not every error condition is passed to the callback in this version, and the 908 * callback may be called more than once. 909 * It also gropes in the entails of the callback arg... 910 */ 911 static int 912 gem_dmamap_load_mbuf(sc, m0, cb, txj, flags) 913 struct gem_softc *sc; 914 struct mbuf *m0; 915 bus_dmamap_callback_t *cb; 916 struct gem_txjob *txj; 917 int flags; 918 { 919 struct gem_txdma txd; 920 struct gem_txsoft *txs; 921 struct mbuf *m; 922 void *vaddr; 923 int error, first = 1, len, totlen; 924 925 if ((m0->m_flags & M_PKTHDR) == 0) 926 panic("gem_dmamap_load_mbuf: no packet header"); 927 totlen = m0->m_pkthdr.len; 928 len = 0; 929 txd.txd_sc = sc; 930 txd.txd_nexttx = txj->txj_nexttx; 931 txj->txj_nsegs = 0; 932 STAILQ_INIT(&txj->txj_txsq); 933 m = m0; 934 while (m != NULL && len < totlen) { 935 if (m->m_len == 0) 936 continue; 937 /* Get a work queue entry. */ 938 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { 939 /* 940 * Ran out of descriptors, return a value that 941 * cannot be returned by bus_dmamap_load to notify 942 * the caller. 943 */ 944 error = -1; 945 goto fail; 946 } 947 len += m->m_len; 948 txd.txd_flags = first ? GTXD_FIRST : 0; 949 if (m->m_next == NULL || len >= totlen) 950 txd.txd_flags |= GTXD_LAST; 951 vaddr = mtod(m, void *); 952 error = bus_dmamap_load(sc->sc_dmatag, txs->txs_dmamap, vaddr, 953 m->m_len, cb, &txd, flags); 954 if (error != 0 || txd.txd_error != 0) 955 goto fail; 956 /* Sync the DMA map. */ 957 bus_dmamap_sync(sc->sc_dmatag, txs->txs_dmamap, 958 BUS_DMASYNC_PREWRITE); 959 m = m->m_next; 960 /* 961 * Store a pointer to the packet so we can free it later, 962 * and remember what txdirty will be once the packet is 963 * done. 964 */ 965 txs->txs_mbuf = first ? m0 : NULL; 966 txs->txs_firstdesc = txj->txj_nexttx; 967 txs->txs_lastdesc = txd.txd_lasttx; 968 txs->txs_ndescs = txd.txd_nsegs; 969 CTR3(KTR_GEM, "load_mbuf: setting firstdesc=%d, lastdesc=%d, " 970 "ndescs=%d", txs->txs_firstdesc, txs->txs_lastdesc, 971 txs->txs_ndescs); 972 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 973 STAILQ_INSERT_TAIL(&txj->txj_txsq, txs, txs_q); 974 txj->txj_nexttx = txd.txd_nexttx; 975 txj->txj_nsegs += txd.txd_nsegs; 976 first = 0; 977 } 978 txj->txj_lasttx = txd.txd_lasttx; 979 return (0); 980 981 fail: 982 CTR1(KTR_GEM, "gem_dmamap_load_mbuf failed (%d)", error); 983 gem_dmamap_unload_mbuf(sc, txj); 984 return (error); 985 } 986 987 /* 988 * Unload an mbuf using the txd the information was placed in. 989 * The tx interrupt code frees the tx segments one by one, because the txd is 990 * not available any more. 991 */ 992 static void 993 gem_dmamap_unload_mbuf(sc, txj) 994 struct gem_softc *sc; 995 struct gem_txjob *txj; 996 { 997 struct gem_txsoft *txs; 998 999 /* Readd the removed descriptors and unload the segments. */ 1000 while ((txs = STAILQ_FIRST(&txj->txj_txsq)) != NULL) { 1001 bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap); 1002 STAILQ_REMOVE_HEAD(&txj->txj_txsq, txs_q); 1003 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1004 } 1005 } 1006 1007 static void 1008 gem_dmamap_commit_mbuf(sc, txj) 1009 struct gem_softc *sc; 1010 struct gem_txjob *txj; 1011 { 1012 struct gem_txsoft *txs; 1013 1014 /* Commit the txjob by transfering the txsoft's to the txdirtyq. */ 1015 while ((txs = STAILQ_FIRST(&txj->txj_txsq)) != NULL) { 1016 STAILQ_REMOVE_HEAD(&txj->txj_txsq, txs_q); 1017 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 1018 } 1019 } 1020 1021 static void 1022 gem_init_regs(sc) 1023 struct gem_softc *sc; 1024 { 1025 bus_space_tag_t t = sc->sc_bustag; 1026 bus_space_handle_t h = sc->sc_h; 1027 const u_char *laddr = sc->sc_arpcom.ac_enaddr; 1028 u_int32_t v; 1029 1030 /* These regs are not cleared on reset */ 1031 if (!sc->sc_inited) { 1032 1033 /* Wooo. Magic values. */ 1034 bus_space_write_4(t, h, GEM_MAC_IPG0, 0); 1035 bus_space_write_4(t, h, GEM_MAC_IPG1, 8); 1036 bus_space_write_4(t, h, GEM_MAC_IPG2, 4); 1037 1038 bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 1039 /* Max frame and max burst size */ 1040 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 1041 ETHER_MAX_LEN | (0x2000<<16)); 1042 1043 bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x7); 1044 bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x4); 1045 bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10); 1046 /* Dunno.... */ 1047 bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088); 1048 bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED, 1049 ((laddr[5]<<8)|laddr[4])&0x3ff); 1050 1051 /* Secondary MAC addr set to 0:0:0:0:0:0 */ 1052 bus_space_write_4(t, h, GEM_MAC_ADDR3, 0); 1053 bus_space_write_4(t, h, GEM_MAC_ADDR4, 0); 1054 bus_space_write_4(t, h, GEM_MAC_ADDR5, 0); 1055 1056 /* MAC control addr set to 01:80:c2:00:00:01 */ 1057 bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001); 1058 bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200); 1059 bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180); 1060 1061 /* MAC filter addr set to 0:0:0:0:0:0 */ 1062 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0); 1063 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0); 1064 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0); 1065 1066 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0); 1067 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0); 1068 1069 sc->sc_inited = 1; 1070 } 1071 1072 /* Counters need to be zeroed */ 1073 bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0); 1074 bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0); 1075 bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0); 1076 bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0); 1077 bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0); 1078 bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0); 1079 bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0); 1080 bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0); 1081 bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0); 1082 bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0); 1083 bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0); 1084 1085 /* Un-pause stuff */ 1086 #if 0 1087 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); 1088 #else 1089 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0); 1090 #endif 1091 1092 /* 1093 * Set the station address. 1094 */ 1095 bus_space_write_4(t, h, GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]); 1096 bus_space_write_4(t, h, GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]); 1097 bus_space_write_4(t, h, GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]); 1098 1099 /* 1100 * Enable MII outputs. Enable GMII if there is a gigabit PHY. 1101 */ 1102 sc->sc_mif_config = bus_space_read_4(t, h, GEM_MIF_CONFIG); 1103 v = GEM_MAC_XIF_TX_MII_ENA; 1104 if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) { 1105 v |= GEM_MAC_XIF_FDPLX_LED; 1106 if (sc->sc_flags & GEM_GIGABIT) 1107 v |= GEM_MAC_XIF_GMII_MODE; 1108 } 1109 bus_space_write_4(t, h, GEM_MAC_XIF_CONFIG, v); 1110 } 1111 1112 static void 1113 gem_start(ifp) 1114 struct ifnet *ifp; 1115 { 1116 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 1117 struct mbuf *m0 = NULL, *m; 1118 struct gem_txjob txj; 1119 int firsttx, ofree, seg, ntx, txmfail; 1120 1121 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1122 return; 1123 1124 /* 1125 * Remember the previous number of free descriptors and 1126 * the first descriptor we'll use. 1127 */ 1128 ofree = sc->sc_txfree; 1129 firsttx = sc->sc_txnext; 1130 1131 DPRINTF(sc, ("%s: gem_start: txfree %d, txnext %d\n", 1132 device_get_name(sc->sc_dev), ofree, firsttx)); 1133 CTR3(KTR_GEM, "%s: gem_start: txfree %d, txnext %d", 1134 device_get_name(sc->sc_dev), ofree, firsttx); 1135 1136 txj.txj_nexttx = firsttx; 1137 txj.txj_lasttx = 0; 1138 /* 1139 * Loop through the send queue, setting up transmit descriptors 1140 * until we drain the queue, or use up all available transmit 1141 * descriptors. 1142 */ 1143 txmfail = 0; 1144 for (ntx = 0;; ntx++) { 1145 /* 1146 * Grab a packet off the queue. 1147 */ 1148 IF_DEQUEUE(&ifp->if_snd, m0); 1149 if (m0 == NULL) 1150 break; 1151 m = NULL; 1152 1153 /* 1154 * Load the DMA map. If this fails, the packet either 1155 * didn't fit in the alloted number of segments, or we were 1156 * short on resources. In this case, we'll copy and try 1157 * again. 1158 */ 1159 txmfail = gem_dmamap_load_mbuf(sc, m0, 1160 gem_txdma_callback, &txj, BUS_DMA_NOWAIT); 1161 if (txmfail == -1) { 1162 IF_PREPEND(&ifp->if_snd, m0); 1163 break; 1164 } 1165 if (txmfail > 0) { 1166 MGETHDR(m, M_DONTWAIT, MT_DATA); 1167 if (m == NULL) { 1168 device_printf(sc->sc_dev, "unable to " 1169 "allocate Tx mbuf\n"); 1170 /* Failed; requeue. */ 1171 IF_PREPEND(&ifp->if_snd, m0); 1172 break; 1173 } 1174 if (m0->m_pkthdr.len > MHLEN) { 1175 MCLGET(m, M_DONTWAIT); 1176 if ((m->m_flags & M_EXT) == 0) { 1177 device_printf(sc->sc_dev, "unable to " 1178 "allocate Tx cluster\n"); 1179 IF_PREPEND(&ifp->if_snd, m0); 1180 m_freem(m); 1181 break; 1182 } 1183 } 1184 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 1185 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 1186 txmfail = gem_dmamap_load_mbuf(sc, m, 1187 gem_txdma_callback, &txj, BUS_DMA_NOWAIT); 1188 if (txmfail != 0) { 1189 if (txmfail > 0) { 1190 device_printf(sc->sc_dev, "unable to " 1191 "load Tx buffer, error = %d\n", 1192 txmfail); 1193 } 1194 m_freem(m); 1195 IF_PREPEND(&ifp->if_snd, m0); 1196 break; 1197 } 1198 } 1199 1200 /* 1201 * Ensure we have enough descriptors free to describe 1202 * the packet. Note, we always reserve one descriptor 1203 * at the end of the ring as a termination point, to 1204 * prevent wrap-around. 1205 */ 1206 if (txj.txj_nsegs > (sc->sc_txfree - 1)) { 1207 /* 1208 * Not enough free descriptors to transmit this 1209 * packet. We haven't committed to anything yet, 1210 * so just unload the DMA map, put the packet 1211 * back on the queue, and punt. Notify the upper 1212 * layer that there are no more slots left. 1213 * 1214 * XXX We could allocate an mbuf and copy, but 1215 * XXX it is worth it? 1216 */ 1217 ifp->if_flags |= IFF_OACTIVE; 1218 gem_dmamap_unload_mbuf(sc, &txj); 1219 if (m != NULL) 1220 m_freem(m); 1221 IF_PREPEND(&ifp->if_snd, m0); 1222 break; 1223 } 1224 1225 if (m != NULL) 1226 m_freem(m0); 1227 1228 /* 1229 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 1230 */ 1231 1232 #ifdef GEM_DEBUG 1233 if (ifp->if_flags & IFF_DEBUG) { 1234 printf(" gem_start %p transmit chain:\n", 1235 STAILQ_FIRST(&txj.txj_txsq)); 1236 for (seg = sc->sc_txnext;; seg = GEM_NEXTTX(seg)) { 1237 printf("descriptor %d:\t", seg); 1238 printf("gd_flags: 0x%016llx\t", (long long) 1239 GEM_DMA_READ(sc, sc->sc_txdescs[seg].gd_flags)); 1240 printf("gd_addr: 0x%016llx\n", (long long) 1241 GEM_DMA_READ(sc, sc->sc_txdescs[seg].gd_addr)); 1242 if (seg == txj.txj_lasttx) 1243 break; 1244 } 1245 } 1246 #endif 1247 1248 /* Sync the descriptors we're using. */ 1249 GEM_CDTXSYNC(sc, sc->sc_txnext, txj.txj_nsegs, 1250 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1251 1252 /* Advance the tx pointer. */ 1253 sc->sc_txfree -= txj.txj_nsegs; 1254 sc->sc_txnext = txj.txj_nexttx; 1255 1256 gem_dmamap_commit_mbuf(sc, &txj); 1257 } 1258 1259 if (txmfail == -1 || sc->sc_txfree == 0) { 1260 ifp->if_flags |= IFF_OACTIVE; 1261 /* No more slots left; notify upper layer. */ 1262 } 1263 1264 if (ntx > 0) { 1265 DPRINTF(sc, ("%s: packets enqueued, IC on %d, OWN on %d\n", 1266 device_get_name(sc->sc_dev), txj.txj_lasttx, firsttx)); 1267 CTR3(KTR_GEM, "%s: packets enqueued, IC on %d, OWN on %d", 1268 device_get_name(sc->sc_dev), txj.txj_lasttx, firsttx); 1269 /* 1270 * The entire packet chain is set up. 1271 * Kick the transmitter. 1272 */ 1273 DPRINTF(sc, ("%s: gem_start: kicking tx %d\n", 1274 device_get_name(sc->sc_dev), txj.txj_nexttx)); 1275 CTR3(KTR_GEM, "%s: gem_start: kicking tx %d=%d", 1276 device_get_name(sc->sc_dev), txj.txj_nexttx, 1277 sc->sc_txnext); 1278 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_TX_KICK, 1279 sc->sc_txnext); 1280 1281 /* Set a watchdog timer in case the chip flakes out. */ 1282 ifp->if_timer = 5; 1283 DPRINTF(sc, ("%s: gem_start: watchdog %d\n", 1284 device_get_name(sc->sc_dev), ifp->if_timer)); 1285 CTR2(KTR_GEM, "%s: gem_start: watchdog %d", 1286 device_get_name(sc->sc_dev), ifp->if_timer); 1287 } 1288 } 1289 1290 /* 1291 * Transmit interrupt. 1292 */ 1293 static void 1294 gem_tint(sc) 1295 struct gem_softc *sc; 1296 { 1297 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1298 bus_space_tag_t t = sc->sc_bustag; 1299 bus_space_handle_t mac = sc->sc_h; 1300 struct gem_txsoft *txs; 1301 int txlast; 1302 int progress = 0; 1303 1304 1305 DPRINTF(sc, ("%s: gem_tint\n", device_get_name(sc->sc_dev))); 1306 CTR1(KTR_GEM, "%s: gem_tint", device_get_name(sc->sc_dev)); 1307 1308 /* 1309 * Unload collision counters 1310 */ 1311 ifp->if_collisions += 1312 bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) + 1313 bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT) + 1314 bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) + 1315 bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT); 1316 1317 /* 1318 * then clear the hardware counters. 1319 */ 1320 bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0); 1321 bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0); 1322 bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0); 1323 bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0); 1324 1325 /* 1326 * Go through our Tx list and free mbufs for those 1327 * frames that have been transmitted. 1328 */ 1329 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1330 GEM_CDTXSYNC(sc, txs->txs_lastdesc, 1331 txs->txs_ndescs, 1332 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1333 1334 #ifdef GEM_DEBUG 1335 if (ifp->if_flags & IFF_DEBUG) { 1336 int i; 1337 printf(" txsoft %p transmit chain:\n", txs); 1338 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { 1339 printf("descriptor %d: ", i); 1340 printf("gd_flags: 0x%016llx\t", (long long) 1341 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags)); 1342 printf("gd_addr: 0x%016llx\n", (long long) 1343 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr)); 1344 if (i == txs->txs_lastdesc) 1345 break; 1346 } 1347 } 1348 #endif 1349 1350 /* 1351 * In theory, we could harveast some descriptors before 1352 * the ring is empty, but that's a bit complicated. 1353 * 1354 * GEM_TX_COMPLETION points to the last descriptor 1355 * processed +1. 1356 */ 1357 txlast = bus_space_read_4(t, mac, GEM_TX_COMPLETION); 1358 DPRINTF(sc, 1359 ("gem_tint: txs->txs_lastdesc = %d, txlast = %d\n", 1360 txs->txs_lastdesc, txlast)); 1361 CTR3(KTR_GEM, "gem_tint: txs->txs_firstdesc = %d, " 1362 "txs->txs_lastdesc = %d, txlast = %d", 1363 txs->txs_firstdesc, txs->txs_lastdesc, txlast); 1364 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1365 if ((txlast >= txs->txs_firstdesc) && 1366 (txlast <= txs->txs_lastdesc)) 1367 break; 1368 } else { 1369 /* Ick -- this command wraps */ 1370 if ((txlast >= txs->txs_firstdesc) || 1371 (txlast <= txs->txs_lastdesc)) 1372 break; 1373 } 1374 1375 DPRINTF(sc, ("gem_tint: releasing a desc\n")); 1376 CTR0(KTR_GEM, "gem_tint: releasing a desc"); 1377 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1378 1379 sc->sc_txfree += txs->txs_ndescs; 1380 1381 bus_dmamap_sync(sc->sc_dmatag, txs->txs_dmamap, 1382 BUS_DMASYNC_POSTWRITE); 1383 bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap); 1384 if (txs->txs_mbuf != NULL) { 1385 m_freem(txs->txs_mbuf); 1386 txs->txs_mbuf = NULL; 1387 } 1388 1389 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1390 1391 ifp->if_opackets++; 1392 progress = 1; 1393 } 1394 1395 DPRINTF(sc, ("gem_tint: GEM_TX_STATE_MACHINE %x " 1396 "GEM_TX_DATA_PTR %llx " 1397 "GEM_TX_COMPLETION %x\n", 1398 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_STATE_MACHINE), 1399 ((long long) bus_space_read_4(sc->sc_bustag, sc->sc_h, 1400 GEM_TX_DATA_PTR_HI) << 32) | 1401 bus_space_read_4(sc->sc_bustag, sc->sc_h, 1402 GEM_TX_DATA_PTR_LO), 1403 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_COMPLETION))); 1404 CTR3(KTR_GEM, "gem_tint: GEM_TX_STATE_MACHINE %x " 1405 "GEM_TX_DATA_PTR %llx " 1406 "GEM_TX_COMPLETION %x", 1407 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_STATE_MACHINE), 1408 ((long long) bus_space_read_4(sc->sc_bustag, sc->sc_h, 1409 GEM_TX_DATA_PTR_HI) << 32) | 1410 bus_space_read_4(sc->sc_bustag, sc->sc_h, 1411 GEM_TX_DATA_PTR_LO), 1412 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_COMPLETION)); 1413 1414 if (progress) { 1415 if (sc->sc_txfree == GEM_NTXDESC - 1) 1416 sc->sc_txwin = 0; 1417 1418 /* Freed some descriptors, so reset IFF_OACTIVE and restart. */ 1419 ifp->if_flags &= ~IFF_OACTIVE; 1420 gem_start(ifp); 1421 1422 if (STAILQ_EMPTY(&sc->sc_txdirtyq)) 1423 ifp->if_timer = 0; 1424 } 1425 1426 DPRINTF(sc, ("%s: gem_tint: watchdog %d\n", 1427 device_get_name(sc->sc_dev), ifp->if_timer)); 1428 CTR2(KTR_GEM, "%s: gem_tint: watchdog %d", 1429 device_get_name(sc->sc_dev), ifp->if_timer); 1430 } 1431 1432 static void 1433 gem_rint_timeout(arg) 1434 void *arg; 1435 { 1436 1437 gem_rint((struct gem_softc *)arg); 1438 } 1439 1440 /* 1441 * Receive interrupt. 1442 */ 1443 static void 1444 gem_rint(sc) 1445 struct gem_softc *sc; 1446 { 1447 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1448 bus_space_tag_t t = sc->sc_bustag; 1449 bus_space_handle_t h = sc->sc_h; 1450 struct ether_header *eh; 1451 struct gem_rxsoft *rxs; 1452 struct mbuf *m; 1453 u_int64_t rxstat; 1454 u_int32_t rxcomp; 1455 int i, len, progress = 0; 1456 1457 callout_stop(&sc->sc_rx_ch); 1458 DPRINTF(sc, ("%s: gem_rint\n", device_get_name(sc->sc_dev))); 1459 CTR1(KTR_GEM, "%s: gem_rint", device_get_name(sc->sc_dev)); 1460 1461 /* 1462 * Read the completion register once. This limits 1463 * how long the following loop can execute. 1464 */ 1465 rxcomp = bus_space_read_4(t, h, GEM_RX_COMPLETION); 1466 1467 /* 1468 * XXXX Read the lastrx only once at the top for speed. 1469 */ 1470 DPRINTF(sc, ("gem_rint: sc->rxptr %d, complete %d\n", 1471 sc->sc_rxptr, rxcomp)); 1472 CTR2(KTR_GEM, "gem_rint: sc->rxptr %d, complete %d", 1473 sc->sc_rxptr, rxcomp); 1474 for (i = sc->sc_rxptr; i != rxcomp; 1475 i = GEM_NEXTRX(i)) { 1476 rxs = &sc->sc_rxsoft[i]; 1477 1478 GEM_CDRXSYNC(sc, i, 1479 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1480 1481 rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags); 1482 1483 if (rxstat & GEM_RD_OWN) { 1484 #if 0 /* XXX: In case of emergency, re-enable this. */ 1485 /* 1486 * The descriptor is still marked as owned, although 1487 * it is supposed to have completed. This has been 1488 * observed on some machines. Just exiting here 1489 * might leave the packet sitting around until another 1490 * one arrives to trigger a new interrupt, which is 1491 * generally undesirable, so set up a timeout. 1492 */ 1493 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS, 1494 gem_rint_timeout, sc); 1495 #endif 1496 break; 1497 } 1498 1499 progress++; 1500 ifp->if_ipackets++; 1501 1502 if (rxstat & GEM_RD_BAD_CRC) { 1503 ifp->if_ierrors++; 1504 device_printf(sc->sc_dev, "receive error: CRC error\n"); 1505 GEM_INIT_RXDESC(sc, i); 1506 continue; 1507 } 1508 1509 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 1510 BUS_DMASYNC_POSTREAD); 1511 #ifdef GEM_DEBUG 1512 if (ifp->if_flags & IFF_DEBUG) { 1513 printf(" rxsoft %p descriptor %d: ", rxs, i); 1514 printf("gd_flags: 0x%016llx\t", (long long) 1515 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags)); 1516 printf("gd_addr: 0x%016llx\n", (long long) 1517 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr)); 1518 } 1519 #endif 1520 1521 /* 1522 * No errors; receive the packet. Note the Gem 1523 * includes the CRC with every packet. 1524 */ 1525 len = GEM_RD_BUFLEN(rxstat); 1526 1527 /* 1528 * Allocate a new mbuf cluster. If that fails, we are 1529 * out of memory, and must drop the packet and recycle 1530 * the buffer that's already attached to this descriptor. 1531 */ 1532 m = rxs->rxs_mbuf; 1533 if (gem_add_rxbuf(sc, i) != 0) { 1534 ifp->if_ierrors++; 1535 GEM_INIT_RXDESC(sc, i); 1536 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 1537 BUS_DMASYNC_PREREAD); 1538 continue; 1539 } 1540 m->m_data += 2; /* We're already off by two */ 1541 1542 eh = mtod(m, struct ether_header *); 1543 m->m_pkthdr.rcvif = ifp; 1544 m->m_pkthdr.len = m->m_len = len - ETHER_CRC_LEN; 1545 m_adj(m, sizeof(struct ether_header)); 1546 1547 /* Pass it on. */ 1548 ether_input(ifp, eh, m); 1549 } 1550 1551 if (progress) { 1552 /* Update the receive pointer. */ 1553 if (i == sc->sc_rxptr) { 1554 device_printf(sc->sc_dev, "rint: ring wrap\n"); 1555 } 1556 sc->sc_rxptr = i; 1557 bus_space_write_4(t, h, GEM_RX_KICK, GEM_PREVRX(i)); 1558 } 1559 1560 DPRINTF(sc, ("gem_rint: done sc->rxptr %d, complete %d\n", 1561 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION))); 1562 CTR2(KTR_GEM, "gem_rint: done sc->rxptr %d, complete %d", 1563 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)); 1564 1565 } 1566 1567 1568 /* 1569 * gem_add_rxbuf: 1570 * 1571 * Add a receive buffer to the indicated descriptor. 1572 */ 1573 static int 1574 gem_add_rxbuf(sc, idx) 1575 struct gem_softc *sc; 1576 int idx; 1577 { 1578 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1579 struct mbuf *m; 1580 int error; 1581 1582 MGETHDR(m, M_DONTWAIT, MT_DATA); 1583 if (m == NULL) 1584 return (ENOBUFS); 1585 1586 MCLGET(m, M_DONTWAIT); 1587 if ((m->m_flags & M_EXT) == 0) { 1588 m_freem(m); 1589 return (ENOBUFS); 1590 } 1591 1592 #ifdef GEM_DEBUG 1593 /* bzero the packet to check dma */ 1594 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1595 #endif 1596 1597 if (rxs->rxs_mbuf != NULL) 1598 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap); 1599 1600 rxs->rxs_mbuf = m; 1601 1602 error = bus_dmamap_load(sc->sc_dmatag, rxs->rxs_dmamap, 1603 m->m_ext.ext_buf, m->m_ext.ext_size, gem_rxdma_callback, rxs, 1604 BUS_DMA_NOWAIT); 1605 if (error != 0 || rxs->rxs_paddr == 0) { 1606 device_printf(sc->sc_dev, "can't load rx DMA map %d, error = " 1607 "%d\n", idx, error); 1608 panic("gem_add_rxbuf"); /* XXX */ 1609 } 1610 1611 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD); 1612 1613 GEM_INIT_RXDESC(sc, idx); 1614 1615 return (0); 1616 } 1617 1618 1619 static void 1620 gem_eint(sc, status) 1621 struct gem_softc *sc; 1622 u_int status; 1623 { 1624 1625 if ((status & GEM_INTR_MIF) != 0) { 1626 device_printf(sc->sc_dev, "XXXlink status changed\n"); 1627 return; 1628 } 1629 1630 device_printf(sc->sc_dev, "status=%x\n", status); 1631 } 1632 1633 1634 void 1635 gem_intr(v) 1636 void *v; 1637 { 1638 struct gem_softc *sc = (struct gem_softc *)v; 1639 bus_space_tag_t t = sc->sc_bustag; 1640 bus_space_handle_t seb = sc->sc_h; 1641 u_int32_t status; 1642 1643 status = bus_space_read_4(t, seb, GEM_STATUS); 1644 DPRINTF(sc, ("%s: gem_intr: cplt %x, status %x\n", 1645 device_get_name(sc->sc_dev), (status>>19), 1646 (u_int)status)); 1647 CTR3(KTR_GEM, "%s: gem_intr: cplt %x, status %x", 1648 device_get_name(sc->sc_dev), (status>>19), 1649 (u_int)status); 1650 1651 if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0) 1652 gem_eint(sc, status); 1653 1654 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 1655 gem_tint(sc); 1656 1657 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1658 gem_rint(sc); 1659 1660 /* We should eventually do more than just print out error stats. */ 1661 if (status & GEM_INTR_TX_MAC) { 1662 int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS); 1663 if (txstat & ~GEM_MAC_TX_XMIT_DONE) 1664 device_printf(sc->sc_dev, "MAC tx fault, status %x\n", 1665 txstat); 1666 if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) 1667 gem_init(sc); 1668 } 1669 if (status & GEM_INTR_RX_MAC) { 1670 int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS); 1671 if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) 1672 device_printf(sc->sc_dev, "MAC rx fault, status %x\n", 1673 rxstat); 1674 if ((rxstat & GEM_MAC_RX_OVERFLOW) != 0) 1675 gem_init(sc); 1676 } 1677 } 1678 1679 1680 static void 1681 gem_watchdog(ifp) 1682 struct ifnet *ifp; 1683 { 1684 struct gem_softc *sc = ifp->if_softc; 1685 1686 DPRINTF(sc, ("gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 1687 "GEM_MAC_RX_CONFIG %x\n", 1688 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG), 1689 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS), 1690 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG))); 1691 CTR3(KTR_GEM, "gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 1692 "GEM_MAC_RX_CONFIG %x", 1693 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG), 1694 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS), 1695 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG)); 1696 CTR3(KTR_GEM, "gem_watchdog: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x " 1697 "GEM_MAC_TX_CONFIG %x", 1698 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_CONFIG), 1699 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_STATUS), 1700 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_CONFIG)); 1701 1702 device_printf(sc->sc_dev, "device timeout\n"); 1703 ++ifp->if_oerrors; 1704 1705 /* Try to get more packets going. */ 1706 gem_start(ifp); 1707 } 1708 1709 /* 1710 * Initialize the MII Management Interface 1711 */ 1712 static void 1713 gem_mifinit(sc) 1714 struct gem_softc *sc; 1715 { 1716 bus_space_tag_t t = sc->sc_bustag; 1717 bus_space_handle_t mif = sc->sc_h; 1718 1719 /* Configure the MIF in frame mode */ 1720 sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1721 sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA; 1722 bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config); 1723 } 1724 1725 /* 1726 * MII interface 1727 * 1728 * The GEM MII interface supports at least three different operating modes: 1729 * 1730 * Bitbang mode is implemented using data, clock and output enable registers. 1731 * 1732 * Frame mode is implemented by loading a complete frame into the frame 1733 * register and polling the valid bit for completion. 1734 * 1735 * Polling mode uses the frame register but completion is indicated by 1736 * an interrupt. 1737 * 1738 */ 1739 int 1740 gem_mii_readreg(dev, phy, reg) 1741 device_t dev; 1742 int phy, reg; 1743 { 1744 struct gem_softc *sc = device_get_softc(dev); 1745 bus_space_tag_t t = sc->sc_bustag; 1746 bus_space_handle_t mif = sc->sc_h; 1747 int n; 1748 u_int32_t v; 1749 1750 #ifdef GEM_DEBUG_PHY 1751 printf("gem_mii_readreg: phy %d reg %d\n", phy, reg); 1752 #endif 1753 1754 #if 0 1755 /* Select the desired PHY in the MIF configuration register */ 1756 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1757 /* Clear PHY select bit */ 1758 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1759 if (phy == GEM_PHYAD_EXTERNAL) 1760 /* Set PHY select bit to get at external device */ 1761 v |= GEM_MIF_CONFIG_PHY_SEL; 1762 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1763 #endif 1764 1765 /* Construct the frame command */ 1766 v = (reg << GEM_MIF_REG_SHIFT) | (phy << GEM_MIF_PHY_SHIFT) | 1767 GEM_MIF_FRAME_READ; 1768 1769 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1770 for (n = 0; n < 100; n++) { 1771 DELAY(1); 1772 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1773 if (v & GEM_MIF_FRAME_TA0) 1774 return (v & GEM_MIF_FRAME_DATA); 1775 } 1776 1777 device_printf(sc->sc_dev, "mii_read timeout\n"); 1778 return (0); 1779 } 1780 1781 int 1782 gem_mii_writereg(dev, phy, reg, val) 1783 device_t dev; 1784 int phy, reg, val; 1785 { 1786 struct gem_softc *sc = device_get_softc(dev); 1787 bus_space_tag_t t = sc->sc_bustag; 1788 bus_space_handle_t mif = sc->sc_h; 1789 int n; 1790 u_int32_t v; 1791 1792 #ifdef GEM_DEBUG_PHY 1793 printf("gem_mii_writereg: phy %d reg %d val %x\n", phy, reg, val); 1794 #endif 1795 1796 #if 0 1797 /* Select the desired PHY in the MIF configuration register */ 1798 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1799 /* Clear PHY select bit */ 1800 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1801 if (phy == GEM_PHYAD_EXTERNAL) 1802 /* Set PHY select bit to get at external device */ 1803 v |= GEM_MIF_CONFIG_PHY_SEL; 1804 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1805 #endif 1806 /* Construct the frame command */ 1807 v = GEM_MIF_FRAME_WRITE | 1808 (phy << GEM_MIF_PHY_SHIFT) | 1809 (reg << GEM_MIF_REG_SHIFT) | 1810 (val & GEM_MIF_FRAME_DATA); 1811 1812 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1813 for (n = 0; n < 100; n++) { 1814 DELAY(1); 1815 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1816 if (v & GEM_MIF_FRAME_TA0) 1817 return (1); 1818 } 1819 1820 device_printf(sc->sc_dev, "mii_write timeout\n"); 1821 return (0); 1822 } 1823 1824 void 1825 gem_mii_statchg(dev) 1826 device_t dev; 1827 { 1828 struct gem_softc *sc = device_get_softc(dev); 1829 #ifdef GEM_DEBUG 1830 int instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media); 1831 #endif 1832 bus_space_tag_t t = sc->sc_bustag; 1833 bus_space_handle_t mac = sc->sc_h; 1834 u_int32_t v; 1835 1836 #ifdef GEM_DEBUG 1837 if (sc->sc_debug) 1838 printf("gem_mii_statchg: status change: phy = %d\n", 1839 sc->sc_phys[instance]); 1840 #endif 1841 1842 /* Set tx full duplex options */ 1843 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0); 1844 DELAY(10000); /* reg must be cleared and delay before changing. */ 1845 v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT| 1846 GEM_MAC_TX_ENABLE; 1847 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) { 1848 v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS; 1849 } 1850 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v); 1851 1852 /* XIF Configuration */ 1853 /* We should really calculate all this rather than rely on defaults */ 1854 v = bus_space_read_4(t, mac, GEM_MAC_XIF_CONFIG); 1855 v = GEM_MAC_XIF_LINK_LED; 1856 v |= GEM_MAC_XIF_TX_MII_ENA; 1857 1858 /* If an external transceiver is connected, enable its MII drivers */ 1859 sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG); 1860 if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) { 1861 /* External MII needs echo disable if half duplex. */ 1862 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1863 /* turn on full duplex LED */ 1864 v |= GEM_MAC_XIF_FDPLX_LED; 1865 else 1866 /* half duplex -- disable echo */ 1867 v |= GEM_MAC_XIF_ECHO_DISABL; 1868 1869 if (IFM_SUBTYPE(sc->sc_mii->mii_media_active) == IFM_1000_T) 1870 v |= GEM_MAC_XIF_GMII_MODE; 1871 else 1872 v &= ~GEM_MAC_XIF_GMII_MODE; 1873 } else { 1874 /* Internal MII needs buf enable */ 1875 v |= GEM_MAC_XIF_MII_BUF_ENA; 1876 } 1877 bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v); 1878 } 1879 1880 int 1881 gem_mediachange(ifp) 1882 struct ifnet *ifp; 1883 { 1884 struct gem_softc *sc = ifp->if_softc; 1885 1886 /* XXX Add support for serial media. */ 1887 1888 return (mii_mediachg(sc->sc_mii)); 1889 } 1890 1891 void 1892 gem_mediastatus(ifp, ifmr) 1893 struct ifnet *ifp; 1894 struct ifmediareq *ifmr; 1895 { 1896 struct gem_softc *sc = ifp->if_softc; 1897 1898 if ((ifp->if_flags & IFF_UP) == 0) 1899 return; 1900 1901 mii_pollstat(sc->sc_mii); 1902 ifmr->ifm_active = sc->sc_mii->mii_media_active; 1903 ifmr->ifm_status = sc->sc_mii->mii_media_status; 1904 } 1905 1906 /* 1907 * Process an ioctl request. 1908 */ 1909 static int 1910 gem_ioctl(ifp, cmd, data) 1911 struct ifnet *ifp; 1912 u_long cmd; 1913 caddr_t data; 1914 { 1915 struct gem_softc *sc = ifp->if_softc; 1916 struct ifreq *ifr = (struct ifreq *)data; 1917 int s, error = 0; 1918 1919 switch (cmd) { 1920 case SIOCSIFADDR: 1921 case SIOCGIFADDR: 1922 case SIOCSIFMTU: 1923 error = ether_ioctl(ifp, cmd, data); 1924 break; 1925 case SIOCSIFFLAGS: 1926 if (ifp->if_flags & IFF_UP) { 1927 if ((sc->sc_ifflags ^ ifp->if_flags) == IFF_PROMISC) 1928 gem_setladrf(sc); 1929 else 1930 gem_init(sc); 1931 } else { 1932 if (ifp->if_flags & IFF_RUNNING) 1933 gem_stop(ifp, 0); 1934 } 1935 sc->sc_ifflags = ifp->if_flags; 1936 error = 0; 1937 break; 1938 case SIOCADDMULTI: 1939 case SIOCDELMULTI: 1940 gem_setladrf(sc); 1941 error = 0; 1942 break; 1943 case SIOCGIFMEDIA: 1944 case SIOCSIFMEDIA: 1945 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 1946 break; 1947 default: 1948 error = ENOTTY; 1949 break; 1950 } 1951 1952 /* Try to get things going again */ 1953 if (ifp->if_flags & IFF_UP) 1954 gem_start(ifp); 1955 splx(s); 1956 return (error); 1957 } 1958 1959 /* 1960 * Set up the logical address filter. 1961 */ 1962 static void 1963 gem_setladrf(sc) 1964 struct gem_softc *sc; 1965 { 1966 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1967 struct ifmultiaddr *inm; 1968 struct sockaddr_dl *sdl; 1969 bus_space_tag_t t = sc->sc_bustag; 1970 bus_space_handle_t h = sc->sc_h; 1971 u_char *cp; 1972 u_int32_t crc; 1973 u_int32_t hash[16]; 1974 u_int32_t v; 1975 int len; 1976 int i; 1977 1978 /* Get current RX configuration */ 1979 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 1980 1981 /* 1982 * Turn off promiscuous mode, promiscuous group mode (all multicast), 1983 * and hash filter. Depending on the case, the right bit will be 1984 * enabled. 1985 */ 1986 v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER| 1987 GEM_MAC_RX_PROMISC_GRP); 1988 1989 if ((ifp->if_flags & IFF_PROMISC) != 0) { 1990 /* Turn on promiscuous mode */ 1991 v |= GEM_MAC_RX_PROMISCUOUS; 1992 goto chipit; 1993 } 1994 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 1995 hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; 1996 ifp->if_flags |= IFF_ALLMULTI; 1997 v |= GEM_MAC_RX_PROMISC_GRP; 1998 goto chipit; 1999 } 2000 2001 /* 2002 * Set up multicast address filter by passing all multicast addresses 2003 * through a crc generator, and then using the high order 8 bits as an 2004 * index into the 256 bit logical address filter. The high order 4 2005 * bits selects the word, while the other 4 bits select the bit within 2006 * the word (where bit 0 is the MSB). 2007 */ 2008 2009 /* Clear hash table */ 2010 memset(hash, 0, sizeof(hash)); 2011 2012 TAILQ_FOREACH(inm, &sc->sc_arpcom.ac_if.if_multiaddrs, ifma_link) { 2013 if (inm->ifma_addr->sa_family != AF_LINK) 2014 continue; 2015 sdl = (struct sockaddr_dl *)inm->ifma_addr; 2016 cp = LLADDR(sdl); 2017 crc = 0xffffffff; 2018 for (len = sdl->sdl_alen; --len >= 0;) { 2019 int octet = *cp++; 2020 int i; 2021 2022 #define MC_POLY_LE 0xedb88320UL /* mcast crc, little endian */ 2023 for (i = 0; i < 8; i++) { 2024 if ((crc & 1) ^ (octet & 1)) { 2025 crc >>= 1; 2026 crc ^= MC_POLY_LE; 2027 } else { 2028 crc >>= 1; 2029 } 2030 octet >>= 1; 2031 } 2032 } 2033 /* Just want the 8 most significant bits. */ 2034 crc >>= 24; 2035 2036 /* Set the corresponding bit in the filter. */ 2037 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 2038 } 2039 2040 v |= GEM_MAC_RX_HASH_FILTER; 2041 ifp->if_flags &= ~IFF_ALLMULTI; 2042 2043 /* Now load the hash table into the chip (if we are using it) */ 2044 for (i = 0; i < 16; i++) { 2045 bus_space_write_4(t, h, 2046 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0), 2047 hash[i]); 2048 } 2049 2050 chipit: 2051 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 2052 } 2053 2054 #if notyet 2055 2056 /* 2057 * gem_power: 2058 * 2059 * Power management (suspend/resume) hook. 2060 */ 2061 void 2062 static gem_power(why, arg) 2063 int why; 2064 void *arg; 2065 { 2066 struct gem_softc *sc = arg; 2067 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2068 int s; 2069 2070 s = splnet(); 2071 switch (why) { 2072 case PWR_SUSPEND: 2073 case PWR_STANDBY: 2074 gem_stop(ifp, 1); 2075 if (sc->sc_power != NULL) 2076 (*sc->sc_power)(sc, why); 2077 break; 2078 case PWR_RESUME: 2079 if (ifp->if_flags & IFF_UP) { 2080 if (sc->sc_power != NULL) 2081 (*sc->sc_power)(sc, why); 2082 gem_init(ifp); 2083 } 2084 break; 2085 case PWR_SOFTSUSPEND: 2086 case PWR_SOFTSTANDBY: 2087 case PWR_SOFTRESUME: 2088 break; 2089 } 2090 splx(s); 2091 } 2092 #endif 2093