1 /* 2 * Copyright (C) 2001 Eduardo Horvath. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp 27 * 28 * $FreeBSD$ 29 */ 30 31 /* 32 * Driver for Sun GEM ethernet controllers. 33 */ 34 35 #define GEM_DEBUG 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/bus.h> 40 #include <sys/callout.h> 41 #include <sys/endian.h> 42 #include <sys/mbuf.h> 43 #include <sys/malloc.h> 44 #include <sys/kernel.h> 45 #include <sys/socket.h> 46 #include <sys/sockio.h> 47 48 #include <net/bpf.h> 49 #include <net/ethernet.h> 50 #include <net/if.h> 51 #include <net/if_arp.h> 52 #include <net/if_dl.h> 53 #include <net/if_media.h> 54 55 #include <machine/bus.h> 56 57 #include <dev/mii/mii.h> 58 #include <dev/mii/miivar.h> 59 60 #include <gem/if_gemreg.h> 61 #include <gem/if_gemvar.h> 62 63 #define TRIES 10000 64 65 static void gem_start(struct ifnet *); 66 static void gem_stop(struct ifnet *, int); 67 static int gem_ioctl(struct ifnet *, u_long, caddr_t); 68 static void gem_cddma_callback(void *, bus_dma_segment_t *, int, int); 69 static void gem_rxdma_callback(void *, bus_dma_segment_t *, int, int); 70 static void gem_txdma_callback(void *, bus_dma_segment_t *, int, int); 71 static void gem_tick(void *); 72 static void gem_watchdog(struct ifnet *); 73 static void gem_init(void *); 74 static void gem_init_regs(struct gem_softc *sc); 75 static int gem_ringsize(int sz); 76 static int gem_meminit(struct gem_softc *); 77 static int gem_dmamap_load_mbuf(struct gem_softc *, struct mbuf *, 78 bus_dmamap_callback_t *, struct gem_txjob *, int); 79 static void gem_dmamap_unload_mbuf(struct gem_softc *, struct gem_txjob *); 80 static void gem_dmamap_commit_mbuf(struct gem_softc *, struct gem_txjob *); 81 static void gem_mifinit(struct gem_softc *); 82 static int gem_bitwait(struct gem_softc *sc, bus_addr_t r, 83 u_int32_t clr, u_int32_t set); 84 static int gem_reset_rx(struct gem_softc *); 85 static int gem_reset_tx(struct gem_softc *); 86 static int gem_disable_rx(struct gem_softc *); 87 static int gem_disable_tx(struct gem_softc *); 88 static void gem_rxdrain(struct gem_softc *); 89 static int gem_add_rxbuf(struct gem_softc *, int); 90 static void gem_setladrf(struct gem_softc *); 91 92 struct mbuf *gem_get(struct gem_softc *, int, int); 93 static void gem_eint(struct gem_softc *, u_int); 94 static void gem_rint(struct gem_softc *); 95 #if 0 96 static void gem_rint_timeout(void *); 97 #endif 98 static void gem_tint(struct gem_softc *); 99 #ifdef notyet 100 static void gem_power(int, void *); 101 #endif 102 103 devclass_t gem_devclass; 104 DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0); 105 MODULE_DEPEND(gem, miibus, 1, 1, 1); 106 107 #ifdef GEM_DEBUG 108 #define DPRINTF(sc, x) if ((sc)->sc_arpcom.ac_if.if_flags & IFF_DEBUG) \ 109 printf x 110 #include <sys/ktr.h> 111 #define KTR_GEM KTR_CT2 112 #else 113 #define DPRINTF(sc, x) /* nothing */ 114 #endif 115 116 #define GEM_NSEGS GEM_NTXSEGS 117 118 /* 119 * gem_attach: 120 * 121 * Attach a Gem interface to the system. 122 */ 123 int 124 gem_attach(sc) 125 struct gem_softc *sc; 126 { 127 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 128 struct mii_softc *child; 129 int i, error; 130 u_int32_t v; 131 132 /* Make sure the chip is stopped. */ 133 ifp->if_softc = sc; 134 gem_reset(sc); 135 136 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, 137 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, GEM_NSEGS, 138 BUS_SPACE_MAXSIZE_32BIT, 0, &sc->sc_pdmatag); 139 if (error) 140 return (error); 141 142 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 143 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MAXBSIZE, 144 GEM_NSEGS, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, 145 &sc->sc_dmatag); 146 if (error) 147 goto fail_0; 148 149 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, 150 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 151 sizeof(struct gem_control_data), 1, 152 sizeof(struct gem_control_data), BUS_DMA_ALLOCNOW, 153 &sc->sc_cdmatag); 154 if (error) 155 goto fail_1; 156 157 /* 158 * Allocate the control data structures, and create and load the 159 * DMA map for it. 160 */ 161 if ((error = bus_dmamem_alloc(sc->sc_cdmatag, 162 (void **)&sc->sc_control_data, 0, &sc->sc_cddmamap))) { 163 device_printf(sc->sc_dev, "unable to allocate control data," 164 " error = %d\n", error); 165 goto fail_2; 166 } 167 168 sc->sc_cddma = 0; 169 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, 170 sc->sc_control_data, sizeof(struct gem_control_data), 171 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { 172 device_printf(sc->sc_dev, "unable to load control data DMA " 173 "map, error = %d\n", error); 174 goto fail_3; 175 } 176 177 /* 178 * Initialize the transmit job descriptors. 179 */ 180 STAILQ_INIT(&sc->sc_txfreeq); 181 STAILQ_INIT(&sc->sc_txdirtyq); 182 183 /* 184 * Create the transmit buffer DMA maps. 185 */ 186 error = ENOMEM; 187 for (i = 0; i < GEM_TXQUEUELEN; i++) { 188 struct gem_txsoft *txs; 189 190 txs = &sc->sc_txsoft[i]; 191 txs->txs_mbuf = NULL; 192 txs->txs_ndescs = 0; 193 if ((error = bus_dmamap_create(sc->sc_dmatag, 0, 194 &txs->txs_dmamap)) != 0) { 195 device_printf(sc->sc_dev, "unable to create tx DMA map " 196 "%d, error = %d\n", i, error); 197 goto fail_4; 198 } 199 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 200 } 201 202 /* 203 * Create the receive buffer DMA maps. 204 */ 205 for (i = 0; i < GEM_NRXDESC; i++) { 206 if ((error = bus_dmamap_create(sc->sc_dmatag, 0, 207 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 208 device_printf(sc->sc_dev, "unable to create rx DMA map " 209 "%d, error = %d\n", i, error); 210 goto fail_5; 211 } 212 sc->sc_rxsoft[i].rxs_mbuf = NULL; 213 } 214 215 216 gem_mifinit(sc); 217 218 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, gem_mediachange, 219 gem_mediastatus)) != 0) { 220 device_printf(sc->sc_dev, "phy probe failed: %d\n", error); 221 goto fail_5; 222 } 223 sc->sc_mii = device_get_softc(sc->sc_miibus); 224 225 /* 226 * From this point forward, the attachment cannot fail. A failure 227 * before this point releases all resources that may have been 228 * allocated. 229 */ 230 231 /* Announce ourselves. */ 232 device_printf(sc->sc_dev, "Ethernet address:"); 233 for (i = 0; i < 6; i++) 234 printf("%c%02x", i > 0 ? ':' : ' ', sc->sc_arpcom.ac_enaddr[i]); 235 236 /* Get RX FIFO size */ 237 sc->sc_rxfifosize = 64 * 238 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_FIFO_SIZE); 239 printf(", %uKB RX fifo", sc->sc_rxfifosize / 1024); 240 241 /* Get TX FIFO size */ 242 v = bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_FIFO_SIZE); 243 printf(", %uKB TX fifo\n", v / 16); 244 245 /* Initialize ifnet structure. */ 246 ifp->if_softc = sc; 247 ifp->if_unit = device_get_unit(sc->sc_dev); 248 ifp->if_name = "gem"; 249 ifp->if_mtu = ETHERMTU; 250 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 251 ifp->if_start = gem_start; 252 ifp->if_ioctl = gem_ioctl; 253 ifp->if_watchdog = gem_watchdog; 254 ifp->if_init = gem_init; 255 ifp->if_output = ether_output; 256 ifp->if_snd.ifq_maxlen = GEM_TXQUEUELEN; 257 /* 258 * Walk along the list of attached MII devices and 259 * establish an `MII instance' to `phy number' 260 * mapping. We'll use this mapping in media change 261 * requests to determine which phy to use to program 262 * the MIF configuration register. 263 */ 264 for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL; 265 child = LIST_NEXT(child, mii_list)) { 266 /* 267 * Note: we support just two PHYs: the built-in 268 * internal device and an external on the MII 269 * connector. 270 */ 271 if (child->mii_phy > 1 || child->mii_inst > 1) { 272 device_printf(sc->sc_dev, "cannot accomodate " 273 "MII device %s at phy %d, instance %d\n", 274 device_get_name(child->mii_dev), 275 child->mii_phy, child->mii_inst); 276 continue; 277 } 278 279 sc->sc_phys[child->mii_inst] = child->mii_phy; 280 } 281 282 /* 283 * Now select and activate the PHY we will use. 284 * 285 * The order of preference is External (MDI1), 286 * Internal (MDI0), Serial Link (no MII). 287 */ 288 if (sc->sc_phys[1]) { 289 #ifdef GEM_DEBUG 290 printf("using external phy\n"); 291 #endif 292 sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL; 293 } else { 294 #ifdef GEM_DEBUG 295 printf("using internal phy\n"); 296 #endif 297 sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL; 298 } 299 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_MIF_CONFIG, 300 sc->sc_mif_config); 301 /* Attach the interface. */ 302 ether_ifattach(ifp, ETHER_BPF_SUPPORTED); 303 304 #if notyet 305 /* 306 * Add a suspend hook to make sure we come back up after a 307 * resume. 308 */ 309 sc->sc_powerhook = powerhook_establish(gem_power, sc); 310 if (sc->sc_powerhook == NULL) 311 device_printf(sc->sc_dev, "WARNING: unable to establish power " 312 "hook\n"); 313 #endif 314 315 callout_init(&sc->sc_tick_ch, 0); 316 callout_init(&sc->sc_rx_ch, 0); 317 return (0); 318 319 /* 320 * Free any resources we've allocated during the failed attach 321 * attempt. Do this in reverse order and fall through. 322 */ 323 fail_5: 324 for (i = 0; i < GEM_NRXDESC; i++) { 325 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 326 bus_dmamap_destroy(sc->sc_dmatag, 327 sc->sc_rxsoft[i].rxs_dmamap); 328 } 329 fail_4: 330 for (i = 0; i < GEM_TXQUEUELEN; i++) { 331 if (sc->sc_txsoft[i].txs_dmamap != NULL) 332 bus_dmamap_destroy(sc->sc_dmatag, 333 sc->sc_txsoft[i].txs_dmamap); 334 } 335 bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap); 336 fail_3: 337 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 338 sc->sc_cddmamap); 339 fail_2: 340 bus_dma_tag_destroy(sc->sc_cdmatag); 341 fail_1: 342 bus_dma_tag_destroy(sc->sc_dmatag); 343 fail_0: 344 bus_dma_tag_destroy(sc->sc_pdmatag); 345 return (error); 346 } 347 348 static void 349 gem_cddma_callback(xsc, segs, nsegs, error) 350 void *xsc; 351 bus_dma_segment_t *segs; 352 int nsegs; 353 int error; 354 { 355 struct gem_softc *sc = (struct gem_softc *)xsc; 356 357 if (error != 0) 358 return; 359 if (nsegs != 1) { 360 /* can't happen... */ 361 panic("gem_cddma_callback: bad control buffer segment count"); 362 } 363 sc->sc_cddma = segs[0].ds_addr; 364 } 365 366 static void 367 gem_rxdma_callback(xsc, segs, nsegs, error) 368 void *xsc; 369 bus_dma_segment_t *segs; 370 int nsegs; 371 int error; 372 { 373 struct gem_rxsoft *rxs = (struct gem_rxsoft *)xsc; 374 375 if (error != 0) 376 return; 377 if (nsegs != 1) { 378 /* can't happen... */ 379 panic("gem_rxdma_callback: bad control buffer segment count"); 380 } 381 rxs->rxs_paddr = segs[0].ds_addr; 382 } 383 384 /* 385 * This is called multiple times in our version of dmamap_load_mbuf, but should 386 * be fit for a generic version that only calls it once. 387 */ 388 static void 389 gem_txdma_callback(xsc, segs, nsegs, error) 390 void *xsc; 391 bus_dma_segment_t *segs; 392 int nsegs; 393 int error; 394 { 395 struct gem_txdma *tx = (struct gem_txdma *)xsc; 396 int seg; 397 398 tx->txd_error = error; 399 if (error != 0) 400 return; 401 tx->txd_nsegs = nsegs; 402 403 /* 404 * Initialize the transmit descriptors. 405 */ 406 for (seg = 0; seg < nsegs; 407 seg++, tx->txd_nexttx = GEM_NEXTTX(tx->txd_nexttx)) { 408 uint64_t flags; 409 410 DPRINTF(tx->txd_sc, ("txdma_cb: mapping seg %d (txd %d), len " 411 "%lx, addr %#lx (%#lx)\n", seg, tx->txd_nexttx, 412 segs[seg].ds_len, segs[seg].ds_addr, 413 GEM_DMA_WRITE(tx->txd_sc, segs[seg].ds_addr))); 414 CTR5(KTR_GEM, "txdma_cb: mapping seg %d (txd %d), len " 415 "%lx, addr %#lx (%#lx)", seg, tx->txd_nexttx, 416 segs[seg].ds_len, segs[seg].ds_addr, 417 GEM_DMA_WRITE(tx->txd_sc, segs[seg].ds_addr)); 418 /* 419 * If this is the first descriptor we're 420 * enqueueing, set the start of packet flag, 421 * and the checksum stuff if we want the hardware 422 * to do it. 423 */ 424 tx->txd_sc->sc_txdescs[tx->txd_nexttx].gd_addr = 425 GEM_DMA_WRITE(tx->txd_sc, segs[seg].ds_addr); 426 flags = segs[seg].ds_len & GEM_TD_BUFSIZE; 427 if ((tx->txd_flags & GTXD_FIRST) != 0 && seg == 0) { 428 CTR2(KTR_GEM, "txdma_cb: start of packet at seg %d, " 429 "tx %d", seg, tx->txd_nexttx); 430 flags |= GEM_TD_START_OF_PACKET; 431 if (++tx->txd_sc->sc_txwin > GEM_NTXSEGS * 2 / 3) { 432 tx->txd_sc->sc_txwin = 0; 433 flags |= GEM_TD_INTERRUPT_ME; 434 } 435 } 436 if ((tx->txd_flags & GTXD_LAST) != 0 && seg == nsegs - 1) { 437 CTR2(KTR_GEM, "txdma_cb: end of packet at seg %d, " 438 "tx %d", seg, tx->txd_nexttx); 439 flags |= GEM_TD_END_OF_PACKET; 440 } 441 tx->txd_sc->sc_txdescs[tx->txd_nexttx].gd_flags = 442 GEM_DMA_WRITE(tx->txd_sc, flags); 443 tx->txd_lasttx = tx->txd_nexttx; 444 } 445 } 446 447 static void 448 gem_tick(arg) 449 void *arg; 450 { 451 struct gem_softc *sc = arg; 452 int s; 453 454 s = splnet(); 455 mii_tick(sc->sc_mii); 456 splx(s); 457 458 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 459 } 460 461 static int 462 gem_bitwait(sc, r, clr, set) 463 struct gem_softc *sc; 464 bus_addr_t r; 465 u_int32_t clr; 466 u_int32_t set; 467 { 468 int i; 469 u_int32_t reg; 470 471 for (i = TRIES; i--; DELAY(100)) { 472 reg = bus_space_read_4(sc->sc_bustag, sc->sc_h, r); 473 if ((r & clr) == 0 && (r & set) == set) 474 return (1); 475 } 476 return (0); 477 } 478 479 void 480 gem_reset(sc) 481 struct gem_softc *sc; 482 { 483 bus_space_tag_t t = sc->sc_bustag; 484 bus_space_handle_t h = sc->sc_h; 485 int s; 486 487 s = splnet(); 488 DPRINTF(sc, ("%s: gem_reset\n", device_get_name(sc->sc_dev))); 489 CTR1(KTR_GEM, "%s: gem_reset", device_get_name(sc->sc_dev)); 490 gem_reset_rx(sc); 491 gem_reset_tx(sc); 492 493 /* Do a full reset */ 494 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); 495 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 496 device_printf(sc->sc_dev, "cannot reset device\n"); 497 splx(s); 498 } 499 500 501 /* 502 * gem_rxdrain: 503 * 504 * Drain the receive queue. 505 */ 506 static void 507 gem_rxdrain(sc) 508 struct gem_softc *sc; 509 { 510 struct gem_rxsoft *rxs; 511 int i; 512 513 for (i = 0; i < GEM_NRXDESC; i++) { 514 rxs = &sc->sc_rxsoft[i]; 515 if (rxs->rxs_mbuf != NULL) { 516 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap); 517 m_freem(rxs->rxs_mbuf); 518 rxs->rxs_mbuf = NULL; 519 } 520 } 521 } 522 523 /* 524 * Reset the whole thing. 525 */ 526 static void 527 gem_stop(ifp, disable) 528 struct ifnet *ifp; 529 int disable; 530 { 531 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 532 struct gem_txsoft *txs; 533 534 DPRINTF(sc, ("%s: gem_stop\n", device_get_name(sc->sc_dev))); 535 CTR1(KTR_GEM, "%s: gem_stop", device_get_name(sc->sc_dev)); 536 537 callout_stop(&sc->sc_tick_ch); 538 539 /* XXX - Should we reset these instead? */ 540 gem_disable_tx(sc); 541 gem_disable_rx(sc); 542 543 /* 544 * Release any queued transmit buffers. 545 */ 546 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 547 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 548 if (txs->txs_ndescs != 0) { 549 bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap); 550 if (txs->txs_mbuf != NULL) { 551 m_freem(txs->txs_mbuf); 552 txs->txs_mbuf = NULL; 553 } 554 } 555 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 556 } 557 558 if (disable) 559 gem_rxdrain(sc); 560 561 /* 562 * Mark the interface down and cancel the watchdog timer. 563 */ 564 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 565 ifp->if_timer = 0; 566 } 567 568 /* 569 * Reset the receiver 570 */ 571 int 572 gem_reset_rx(sc) 573 struct gem_softc *sc; 574 { 575 bus_space_tag_t t = sc->sc_bustag; 576 bus_space_handle_t h = sc->sc_h; 577 578 /* 579 * Resetting while DMA is in progress can cause a bus hang, so we 580 * disable DMA first. 581 */ 582 gem_disable_rx(sc); 583 bus_space_write_4(t, h, GEM_RX_CONFIG, 0); 584 /* Wait till it finishes */ 585 if (!gem_bitwait(sc, GEM_RX_CONFIG, 1, 0)) 586 device_printf(sc->sc_dev, "cannot disable read dma\n"); 587 588 /* Wait 5ms extra. */ 589 DELAY(5000); 590 591 /* Finally, reset the ERX */ 592 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX); 593 /* Wait till it finishes */ 594 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 595 device_printf(sc->sc_dev, "cannot reset receiver\n"); 596 return (1); 597 } 598 return (0); 599 } 600 601 602 /* 603 * Reset the transmitter 604 */ 605 static int 606 gem_reset_tx(sc) 607 struct gem_softc *sc; 608 { 609 bus_space_tag_t t = sc->sc_bustag; 610 bus_space_handle_t h = sc->sc_h; 611 int i; 612 613 /* 614 * Resetting while DMA is in progress can cause a bus hang, so we 615 * disable DMA first. 616 */ 617 gem_disable_tx(sc); 618 bus_space_write_4(t, h, GEM_TX_CONFIG, 0); 619 /* Wait till it finishes */ 620 if (!gem_bitwait(sc, GEM_TX_CONFIG, 1, 0)) 621 device_printf(sc->sc_dev, "cannot disable read dma\n"); 622 623 /* Wait 5ms extra. */ 624 DELAY(5000); 625 626 /* Finally, reset the ETX */ 627 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_TX); 628 /* Wait till it finishes */ 629 for (i = TRIES; i--; DELAY(100)) 630 if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_TX) == 0) 631 break; 632 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 633 device_printf(sc->sc_dev, "cannot reset receiver\n"); 634 return (1); 635 } 636 return (0); 637 } 638 639 /* 640 * disable receiver. 641 */ 642 static int 643 gem_disable_rx(sc) 644 struct gem_softc *sc; 645 { 646 bus_space_tag_t t = sc->sc_bustag; 647 bus_space_handle_t h = sc->sc_h; 648 u_int32_t cfg; 649 650 /* Flip the enable bit */ 651 cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 652 cfg &= ~GEM_MAC_RX_ENABLE; 653 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg); 654 655 /* Wait for it to finish */ 656 return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)); 657 } 658 659 /* 660 * disable transmitter. 661 */ 662 static int 663 gem_disable_tx(sc) 664 struct gem_softc *sc; 665 { 666 bus_space_tag_t t = sc->sc_bustag; 667 bus_space_handle_t h = sc->sc_h; 668 u_int32_t cfg; 669 670 /* Flip the enable bit */ 671 cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG); 672 cfg &= ~GEM_MAC_TX_ENABLE; 673 bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg); 674 675 /* Wait for it to finish */ 676 return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)); 677 } 678 679 /* 680 * Initialize interface. 681 */ 682 static int 683 gem_meminit(sc) 684 struct gem_softc *sc; 685 { 686 struct gem_rxsoft *rxs; 687 int i, error; 688 689 /* 690 * Initialize the transmit descriptor ring. 691 */ 692 memset((void *)sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 693 for (i = 0; i < GEM_NTXDESC; i++) { 694 sc->sc_txdescs[i].gd_flags = 0; 695 sc->sc_txdescs[i].gd_addr = 0; 696 } 697 GEM_CDTXSYNC(sc, 0, GEM_NTXDESC, 698 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 699 sc->sc_txfree = GEM_NTXDESC-1; 700 sc->sc_txnext = 0; 701 sc->sc_txwin = 0; 702 703 /* 704 * Initialize the receive descriptor and receive job 705 * descriptor rings. 706 */ 707 for (i = 0; i < GEM_NRXDESC; i++) { 708 rxs = &sc->sc_rxsoft[i]; 709 if (rxs->rxs_mbuf == NULL) { 710 if ((error = gem_add_rxbuf(sc, i)) != 0) { 711 device_printf(sc->sc_dev, "unable to " 712 "allocate or map rx buffer %d, error = " 713 "%d\n", i, error); 714 /* 715 * XXX Should attempt to run with fewer receive 716 * XXX buffers instead of just failing. 717 */ 718 gem_rxdrain(sc); 719 return (1); 720 } 721 } else 722 GEM_INIT_RXDESC(sc, i); 723 } 724 sc->sc_rxptr = 0; 725 726 return (0); 727 } 728 729 static int 730 gem_ringsize(sz) 731 int sz; 732 { 733 int v = 0; 734 735 switch (sz) { 736 case 32: 737 v = GEM_RING_SZ_32; 738 break; 739 case 64: 740 v = GEM_RING_SZ_64; 741 break; 742 case 128: 743 v = GEM_RING_SZ_128; 744 break; 745 case 256: 746 v = GEM_RING_SZ_256; 747 break; 748 case 512: 749 v = GEM_RING_SZ_512; 750 break; 751 case 1024: 752 v = GEM_RING_SZ_1024; 753 break; 754 case 2048: 755 v = GEM_RING_SZ_2048; 756 break; 757 case 4096: 758 v = GEM_RING_SZ_4096; 759 break; 760 case 8192: 761 v = GEM_RING_SZ_8192; 762 break; 763 default: 764 printf("gem: invalid Receive Descriptor ring size\n"); 765 break; 766 } 767 return (v); 768 } 769 770 /* 771 * Initialization of interface; set up initialization block 772 * and transmit/receive descriptor rings. 773 */ 774 static void 775 gem_init(xsc) 776 void *xsc; 777 { 778 struct gem_softc *sc = (struct gem_softc *)xsc; 779 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 780 bus_space_tag_t t = sc->sc_bustag; 781 bus_space_handle_t h = sc->sc_h; 782 int s; 783 u_int32_t v; 784 785 s = splnet(); 786 787 DPRINTF(sc, ("%s: gem_init: calling stop\n", device_get_name(sc->sc_dev))); 788 CTR1(KTR_GEM, "%s: gem_init: calling stop", device_get_name(sc->sc_dev)); 789 /* 790 * Initialization sequence. The numbered steps below correspond 791 * to the sequence outlined in section 6.3.5.1 in the Ethernet 792 * Channel Engine manual (part of the PCIO manual). 793 * See also the STP2002-STQ document from Sun Microsystems. 794 */ 795 796 /* step 1 & 2. Reset the Ethernet Channel */ 797 gem_stop(&sc->sc_arpcom.ac_if, 0); 798 gem_reset(sc); 799 DPRINTF(sc, ("%s: gem_init: restarting\n", device_get_name(sc->sc_dev))); 800 CTR1(KTR_GEM, "%s: gem_init: restarting", device_get_name(sc->sc_dev)); 801 802 /* Re-initialize the MIF */ 803 gem_mifinit(sc); 804 805 /* Call MI reset function if any */ 806 if (sc->sc_hwreset) 807 (*sc->sc_hwreset)(sc); 808 809 /* step 3. Setup data structures in host memory */ 810 gem_meminit(sc); 811 812 /* step 4. TX MAC registers & counters */ 813 gem_init_regs(sc); 814 /* XXX: VLAN code from NetBSD temporarily removed. */ 815 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 816 (ETHER_MAX_LEN + sizeof(struct ether_header)) | (0x2000<<16)); 817 818 /* step 5. RX MAC registers & counters */ 819 gem_setladrf(sc); 820 821 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 822 /* NOTE: we use only 32-bit DMA addresses here. */ 823 bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 0); 824 bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 825 826 bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 0); 827 bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 828 DPRINTF(sc, ("loading rx ring %lx, tx ring %lx, cddma %lx\n", 829 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma)); 830 CTR3(KTR_GEM, "loading rx ring %lx, tx ring %lx, cddma %lx", 831 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); 832 833 /* step 8. Global Configuration & Interrupt Mask */ 834 bus_space_write_4(t, h, GEM_INTMASK, 835 ~(GEM_INTR_TX_INTME| 836 GEM_INTR_TX_EMPTY| 837 GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF| 838 GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS| 839 GEM_INTR_MAC_CONTROL|GEM_INTR_MIF| 840 GEM_INTR_BERR)); 841 bus_space_write_4(t, h, GEM_MAC_RX_MASK, 842 GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT); 843 bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */ 844 bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */ 845 846 /* step 9. ETX Configuration: use mostly default values */ 847 848 /* Enable DMA */ 849 v = gem_ringsize(GEM_NTXDESC /*XXX*/); 850 bus_space_write_4(t, h, GEM_TX_CONFIG, 851 v|GEM_TX_CONFIG_TXDMA_EN| 852 ((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH)); 853 854 /* step 10. ERX Configuration */ 855 856 /* Encode Receive Descriptor ring size: four possible values */ 857 v = gem_ringsize(GEM_NRXDESC /*XXX*/); 858 859 /* Enable DMA */ 860 bus_space_write_4(t, h, GEM_RX_CONFIG, 861 v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)| 862 (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN| 863 (0<<GEM_RX_CONFIG_CXM_START_SHFT)); 864 /* 865 * The following value is for an OFF Threshold of about 3/4 full 866 * and an ON Threshold of 1/4 full. 867 */ 868 bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH, 869 (3 * sc->sc_rxfifosize / 256) | 870 ( (sc->sc_rxfifosize / 256) << 12)); 871 bus_space_write_4(t, h, GEM_RX_BLANKING, (6<<12)|6); 872 873 /* step 11. Configure Media */ 874 mii_mediachg(sc->sc_mii); 875 876 /* step 12. RX_MAC Configuration Register */ 877 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 878 v |= GEM_MAC_RX_ENABLE; 879 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 880 881 /* step 14. Issue Transmit Pending command */ 882 883 /* Call MI initialization function if any */ 884 if (sc->sc_hwinit) 885 (*sc->sc_hwinit)(sc); 886 887 /* step 15. Give the reciever a swift kick */ 888 bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4); 889 890 /* Start the one second timer. */ 891 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 892 893 ifp->if_flags |= IFF_RUNNING; 894 ifp->if_flags &= ~IFF_OACTIVE; 895 ifp->if_timer = 0; 896 sc->sc_ifflags = ifp->if_flags; 897 splx(s); 898 } 899 900 /* 901 * XXX: This is really a substitute for bus_dmamap_load_mbuf(), which FreeBSD 902 * does not yet have, with some adaptions for this driver. 903 * Some changes are mandated by the fact that multiple maps may needed to map 904 * a single mbuf. 905 * It should be removed once generic support is available. 906 * 907 * This is derived from NetBSD (syssrc/sys/arch/sparc64/sparc64/machdep.c), for 908 * a copyright notice see sparc64/sparc64/bus_machdep.c. 909 * 910 * Not every error condition is passed to the callback in this version, and the 911 * callback may be called more than once. 912 * It also gropes in the entails of the callback arg... 913 */ 914 static int 915 gem_dmamap_load_mbuf(sc, m0, cb, txj, flags) 916 struct gem_softc *sc; 917 struct mbuf *m0; 918 bus_dmamap_callback_t *cb; 919 struct gem_txjob *txj; 920 int flags; 921 { 922 struct gem_txdma txd; 923 struct gem_txsoft *txs; 924 struct mbuf *m; 925 void *vaddr; 926 int error, first = 1, len, totlen; 927 928 if ((m0->m_flags & M_PKTHDR) == 0) 929 panic("gem_dmamap_load_mbuf: no packet header"); 930 totlen = m0->m_pkthdr.len; 931 len = 0; 932 txd.txd_sc = sc; 933 txd.txd_nexttx = txj->txj_nexttx; 934 txj->txj_nsegs = 0; 935 STAILQ_INIT(&txj->txj_txsq); 936 m = m0; 937 while (m != NULL && len < totlen) { 938 if (m->m_len == 0) 939 continue; 940 /* Get a work queue entry. */ 941 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { 942 /* 943 * Ran out of descriptors, return a value that 944 * cannot be returned by bus_dmamap_load to notify 945 * the caller. 946 */ 947 error = -1; 948 goto fail; 949 } 950 len += m->m_len; 951 txd.txd_flags = first ? GTXD_FIRST : 0; 952 if (m->m_next == NULL || len >= totlen) 953 txd.txd_flags |= GTXD_LAST; 954 vaddr = mtod(m, void *); 955 error = bus_dmamap_load(sc->sc_dmatag, txs->txs_dmamap, vaddr, 956 m->m_len, cb, &txd, flags); 957 if (error != 0 || txd.txd_error != 0) 958 goto fail; 959 /* Sync the DMA map. */ 960 bus_dmamap_sync(sc->sc_dmatag, txs->txs_dmamap, 961 BUS_DMASYNC_PREWRITE); 962 m = m->m_next; 963 /* 964 * Store a pointer to the packet so we can free it later, 965 * and remember what txdirty will be once the packet is 966 * done. 967 */ 968 txs->txs_mbuf = first ? m0 : NULL; 969 txs->txs_firstdesc = txj->txj_nexttx; 970 txs->txs_lastdesc = txd.txd_lasttx; 971 txs->txs_ndescs = txd.txd_nsegs; 972 CTR3(KTR_GEM, "load_mbuf: setting firstdesc=%d, lastdesc=%d, " 973 "ndescs=%d", txs->txs_firstdesc, txs->txs_lastdesc, 974 txs->txs_ndescs); 975 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 976 STAILQ_INSERT_TAIL(&txj->txj_txsq, txs, txs_q); 977 txj->txj_nexttx = txd.txd_nexttx; 978 txj->txj_nsegs += txd.txd_nsegs; 979 first = 0; 980 } 981 txj->txj_lasttx = txd.txd_lasttx; 982 return (0); 983 984 fail: 985 CTR1(KTR_GEM, "gem_dmamap_load_mbuf failed (%d)", error); 986 gem_dmamap_unload_mbuf(sc, txj); 987 return (error); 988 } 989 990 /* 991 * Unload an mbuf using the txd the information was placed in. 992 * The tx interrupt code frees the tx segments one by one, because the txd is 993 * not available any more. 994 */ 995 static void 996 gem_dmamap_unload_mbuf(sc, txj) 997 struct gem_softc *sc; 998 struct gem_txjob *txj; 999 { 1000 struct gem_txsoft *txs; 1001 1002 /* Readd the removed descriptors and unload the segments. */ 1003 while ((txs = STAILQ_FIRST(&txj->txj_txsq)) != NULL) { 1004 bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap); 1005 STAILQ_REMOVE_HEAD(&txj->txj_txsq, txs_q); 1006 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1007 } 1008 } 1009 1010 static void 1011 gem_dmamap_commit_mbuf(sc, txj) 1012 struct gem_softc *sc; 1013 struct gem_txjob *txj; 1014 { 1015 struct gem_txsoft *txs; 1016 1017 /* Commit the txjob by transfering the txsoft's to the txdirtyq. */ 1018 while ((txs = STAILQ_FIRST(&txj->txj_txsq)) != NULL) { 1019 STAILQ_REMOVE_HEAD(&txj->txj_txsq, txs_q); 1020 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 1021 } 1022 } 1023 1024 static void 1025 gem_init_regs(sc) 1026 struct gem_softc *sc; 1027 { 1028 bus_space_tag_t t = sc->sc_bustag; 1029 bus_space_handle_t h = sc->sc_h; 1030 const u_char *laddr = sc->sc_arpcom.ac_enaddr; 1031 u_int32_t v; 1032 1033 /* These regs are not cleared on reset */ 1034 if (!sc->sc_inited) { 1035 1036 /* Wooo. Magic values. */ 1037 bus_space_write_4(t, h, GEM_MAC_IPG0, 0); 1038 bus_space_write_4(t, h, GEM_MAC_IPG1, 8); 1039 bus_space_write_4(t, h, GEM_MAC_IPG2, 4); 1040 1041 bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 1042 /* Max frame and max burst size */ 1043 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 1044 ETHER_MAX_LEN | (0x2000<<16)); 1045 1046 bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x7); 1047 bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x4); 1048 bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10); 1049 /* Dunno.... */ 1050 bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088); 1051 bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED, 1052 ((laddr[5]<<8)|laddr[4])&0x3ff); 1053 1054 /* Secondary MAC addr set to 0:0:0:0:0:0 */ 1055 bus_space_write_4(t, h, GEM_MAC_ADDR3, 0); 1056 bus_space_write_4(t, h, GEM_MAC_ADDR4, 0); 1057 bus_space_write_4(t, h, GEM_MAC_ADDR5, 0); 1058 1059 /* MAC control addr set to 01:80:c2:00:00:01 */ 1060 bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001); 1061 bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200); 1062 bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180); 1063 1064 /* MAC filter addr set to 0:0:0:0:0:0 */ 1065 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0); 1066 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0); 1067 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0); 1068 1069 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0); 1070 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0); 1071 1072 sc->sc_inited = 1; 1073 } 1074 1075 /* Counters need to be zeroed */ 1076 bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0); 1077 bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0); 1078 bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0); 1079 bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0); 1080 bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0); 1081 bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0); 1082 bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0); 1083 bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0); 1084 bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0); 1085 bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0); 1086 bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0); 1087 1088 /* Un-pause stuff */ 1089 #if 0 1090 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); 1091 #else 1092 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0); 1093 #endif 1094 1095 /* 1096 * Set the station address. 1097 */ 1098 bus_space_write_4(t, h, GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]); 1099 bus_space_write_4(t, h, GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]); 1100 bus_space_write_4(t, h, GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]); 1101 1102 /* 1103 * Enable MII outputs. Enable GMII if there is a gigabit PHY. 1104 */ 1105 sc->sc_mif_config = bus_space_read_4(t, h, GEM_MIF_CONFIG); 1106 v = GEM_MAC_XIF_TX_MII_ENA; 1107 if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) { 1108 v |= GEM_MAC_XIF_FDPLX_LED; 1109 if (sc->sc_flags & GEM_GIGABIT) 1110 v |= GEM_MAC_XIF_GMII_MODE; 1111 } 1112 bus_space_write_4(t, h, GEM_MAC_XIF_CONFIG, v); 1113 } 1114 1115 static void 1116 gem_start(ifp) 1117 struct ifnet *ifp; 1118 { 1119 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 1120 struct mbuf *m0 = NULL, *m; 1121 struct gem_txjob txj; 1122 int firsttx, ofree, seg, ntx, txmfail; 1123 1124 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1125 return; 1126 1127 /* 1128 * Remember the previous number of free descriptors and 1129 * the first descriptor we'll use. 1130 */ 1131 ofree = sc->sc_txfree; 1132 firsttx = sc->sc_txnext; 1133 1134 DPRINTF(sc, ("%s: gem_start: txfree %d, txnext %d\n", 1135 device_get_name(sc->sc_dev), ofree, firsttx)); 1136 CTR3(KTR_GEM, "%s: gem_start: txfree %d, txnext %d", 1137 device_get_name(sc->sc_dev), ofree, firsttx); 1138 1139 txj.txj_nexttx = firsttx; 1140 txj.txj_lasttx = 0; 1141 /* 1142 * Loop through the send queue, setting up transmit descriptors 1143 * until we drain the queue, or use up all available transmit 1144 * descriptors. 1145 */ 1146 txmfail = 0; 1147 for (ntx = 0;; ntx++) { 1148 /* 1149 * Grab a packet off the queue. 1150 */ 1151 IF_DEQUEUE(&ifp->if_snd, m0); 1152 if (m0 == NULL) 1153 break; 1154 m = NULL; 1155 1156 /* 1157 * Load the DMA map. If this fails, the packet either 1158 * didn't fit in the alloted number of segments, or we were 1159 * short on resources. In this case, we'll copy and try 1160 * again. 1161 */ 1162 txmfail = gem_dmamap_load_mbuf(sc, m0, 1163 gem_txdma_callback, &txj, BUS_DMA_NOWAIT); 1164 if (txmfail == -1) { 1165 IF_PREPEND(&ifp->if_snd, m0); 1166 break; 1167 } 1168 if (txmfail > 0) { 1169 MGETHDR(m, M_DONTWAIT, MT_DATA); 1170 if (m == NULL) { 1171 device_printf(sc->sc_dev, "unable to " 1172 "allocate Tx mbuf\n"); 1173 /* Failed; requeue. */ 1174 IF_PREPEND(&ifp->if_snd, m0); 1175 break; 1176 } 1177 if (m0->m_pkthdr.len > MHLEN) { 1178 MCLGET(m, M_DONTWAIT); 1179 if ((m->m_flags & M_EXT) == 0) { 1180 device_printf(sc->sc_dev, "unable to " 1181 "allocate Tx cluster\n"); 1182 IF_PREPEND(&ifp->if_snd, m0); 1183 m_freem(m); 1184 break; 1185 } 1186 } 1187 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 1188 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 1189 txmfail = gem_dmamap_load_mbuf(sc, m, 1190 gem_txdma_callback, &txj, BUS_DMA_NOWAIT); 1191 if (txmfail != 0) { 1192 if (txmfail > 0) { 1193 device_printf(sc->sc_dev, "unable to " 1194 "load Tx buffer, error = %d\n", 1195 txmfail); 1196 } 1197 m_freem(m); 1198 IF_PREPEND(&ifp->if_snd, m0); 1199 break; 1200 } 1201 } 1202 1203 /* 1204 * Ensure we have enough descriptors free to describe 1205 * the packet. Note, we always reserve one descriptor 1206 * at the end of the ring as a termination point, to 1207 * prevent wrap-around. 1208 */ 1209 if (txj.txj_nsegs > (sc->sc_txfree - 1)) { 1210 /* 1211 * Not enough free descriptors to transmit this 1212 * packet. We haven't committed to anything yet, 1213 * so just unload the DMA map, put the packet 1214 * back on the queue, and punt. Notify the upper 1215 * layer that there are no more slots left. 1216 * 1217 * XXX We could allocate an mbuf and copy, but 1218 * XXX it is worth it? 1219 */ 1220 ifp->if_flags |= IFF_OACTIVE; 1221 gem_dmamap_unload_mbuf(sc, &txj); 1222 if (m != NULL) 1223 m_freem(m); 1224 IF_PREPEND(&ifp->if_snd, m0); 1225 break; 1226 } 1227 1228 /* 1229 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 1230 */ 1231 if (ifp->if_bpf != NULL) 1232 bpf_mtap(ifp, m0); 1233 1234 if (m != NULL) 1235 m_freem(m0); 1236 1237 #ifdef GEM_DEBUG 1238 if (ifp->if_flags & IFF_DEBUG) { 1239 printf(" gem_start %p transmit chain:\n", 1240 STAILQ_FIRST(&txj.txj_txsq)); 1241 for (seg = sc->sc_txnext;; seg = GEM_NEXTTX(seg)) { 1242 printf("descriptor %d:\t", seg); 1243 printf("gd_flags: 0x%016llx\t", (long long) 1244 GEM_DMA_READ(sc, sc->sc_txdescs[seg].gd_flags)); 1245 printf("gd_addr: 0x%016llx\n", (long long) 1246 GEM_DMA_READ(sc, sc->sc_txdescs[seg].gd_addr)); 1247 if (seg == txj.txj_lasttx) 1248 break; 1249 } 1250 } 1251 #endif 1252 1253 /* Sync the descriptors we're using. */ 1254 GEM_CDTXSYNC(sc, sc->sc_txnext, txj.txj_nsegs, 1255 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1256 1257 /* Advance the tx pointer. */ 1258 sc->sc_txfree -= txj.txj_nsegs; 1259 sc->sc_txnext = txj.txj_nexttx; 1260 1261 gem_dmamap_commit_mbuf(sc, &txj); 1262 } 1263 1264 if (txmfail == -1 || sc->sc_txfree == 0) { 1265 ifp->if_flags |= IFF_OACTIVE; 1266 /* No more slots left; notify upper layer. */ 1267 } 1268 1269 if (ntx > 0) { 1270 DPRINTF(sc, ("%s: packets enqueued, IC on %d, OWN on %d\n", 1271 device_get_name(sc->sc_dev), txj.txj_lasttx, firsttx)); 1272 CTR3(KTR_GEM, "%s: packets enqueued, IC on %d, OWN on %d", 1273 device_get_name(sc->sc_dev), txj.txj_lasttx, firsttx); 1274 /* 1275 * The entire packet chain is set up. 1276 * Kick the transmitter. 1277 */ 1278 DPRINTF(sc, ("%s: gem_start: kicking tx %d\n", 1279 device_get_name(sc->sc_dev), txj.txj_nexttx)); 1280 CTR3(KTR_GEM, "%s: gem_start: kicking tx %d=%d", 1281 device_get_name(sc->sc_dev), txj.txj_nexttx, 1282 sc->sc_txnext); 1283 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_TX_KICK, 1284 sc->sc_txnext); 1285 1286 /* Set a watchdog timer in case the chip flakes out. */ 1287 ifp->if_timer = 5; 1288 DPRINTF(sc, ("%s: gem_start: watchdog %d\n", 1289 device_get_name(sc->sc_dev), ifp->if_timer)); 1290 CTR2(KTR_GEM, "%s: gem_start: watchdog %d", 1291 device_get_name(sc->sc_dev), ifp->if_timer); 1292 } 1293 } 1294 1295 /* 1296 * Transmit interrupt. 1297 */ 1298 static void 1299 gem_tint(sc) 1300 struct gem_softc *sc; 1301 { 1302 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1303 bus_space_tag_t t = sc->sc_bustag; 1304 bus_space_handle_t mac = sc->sc_h; 1305 struct gem_txsoft *txs; 1306 int txlast; 1307 int progress = 0; 1308 1309 1310 DPRINTF(sc, ("%s: gem_tint\n", device_get_name(sc->sc_dev))); 1311 CTR1(KTR_GEM, "%s: gem_tint", device_get_name(sc->sc_dev)); 1312 1313 /* 1314 * Unload collision counters 1315 */ 1316 ifp->if_collisions += 1317 bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) + 1318 bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT) + 1319 bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) + 1320 bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT); 1321 1322 /* 1323 * then clear the hardware counters. 1324 */ 1325 bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0); 1326 bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0); 1327 bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0); 1328 bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0); 1329 1330 /* 1331 * Go through our Tx list and free mbufs for those 1332 * frames that have been transmitted. 1333 */ 1334 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1335 GEM_CDTXSYNC(sc, txs->txs_lastdesc, 1336 txs->txs_ndescs, 1337 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1338 1339 #ifdef GEM_DEBUG 1340 if (ifp->if_flags & IFF_DEBUG) { 1341 int i; 1342 printf(" txsoft %p transmit chain:\n", txs); 1343 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { 1344 printf("descriptor %d: ", i); 1345 printf("gd_flags: 0x%016llx\t", (long long) 1346 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags)); 1347 printf("gd_addr: 0x%016llx\n", (long long) 1348 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr)); 1349 if (i == txs->txs_lastdesc) 1350 break; 1351 } 1352 } 1353 #endif 1354 1355 /* 1356 * In theory, we could harveast some descriptors before 1357 * the ring is empty, but that's a bit complicated. 1358 * 1359 * GEM_TX_COMPLETION points to the last descriptor 1360 * processed +1. 1361 */ 1362 txlast = bus_space_read_4(t, mac, GEM_TX_COMPLETION); 1363 DPRINTF(sc, 1364 ("gem_tint: txs->txs_lastdesc = %d, txlast = %d\n", 1365 txs->txs_lastdesc, txlast)); 1366 CTR3(KTR_GEM, "gem_tint: txs->txs_firstdesc = %d, " 1367 "txs->txs_lastdesc = %d, txlast = %d", 1368 txs->txs_firstdesc, txs->txs_lastdesc, txlast); 1369 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1370 if ((txlast >= txs->txs_firstdesc) && 1371 (txlast <= txs->txs_lastdesc)) 1372 break; 1373 } else { 1374 /* Ick -- this command wraps */ 1375 if ((txlast >= txs->txs_firstdesc) || 1376 (txlast <= txs->txs_lastdesc)) 1377 break; 1378 } 1379 1380 DPRINTF(sc, ("gem_tint: releasing a desc\n")); 1381 CTR0(KTR_GEM, "gem_tint: releasing a desc"); 1382 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1383 1384 sc->sc_txfree += txs->txs_ndescs; 1385 1386 bus_dmamap_sync(sc->sc_dmatag, txs->txs_dmamap, 1387 BUS_DMASYNC_POSTWRITE); 1388 bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap); 1389 if (txs->txs_mbuf != NULL) { 1390 m_freem(txs->txs_mbuf); 1391 txs->txs_mbuf = NULL; 1392 } 1393 1394 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1395 1396 ifp->if_opackets++; 1397 progress = 1; 1398 } 1399 1400 DPRINTF(sc, ("gem_tint: GEM_TX_STATE_MACHINE %x " 1401 "GEM_TX_DATA_PTR %llx " 1402 "GEM_TX_COMPLETION %x\n", 1403 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_STATE_MACHINE), 1404 ((long long) bus_space_read_4(sc->sc_bustag, sc->sc_h, 1405 GEM_TX_DATA_PTR_HI) << 32) | 1406 bus_space_read_4(sc->sc_bustag, sc->sc_h, 1407 GEM_TX_DATA_PTR_LO), 1408 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_COMPLETION))); 1409 CTR3(KTR_GEM, "gem_tint: GEM_TX_STATE_MACHINE %x " 1410 "GEM_TX_DATA_PTR %llx " 1411 "GEM_TX_COMPLETION %x", 1412 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_STATE_MACHINE), 1413 ((long long) bus_space_read_4(sc->sc_bustag, sc->sc_h, 1414 GEM_TX_DATA_PTR_HI) << 32) | 1415 bus_space_read_4(sc->sc_bustag, sc->sc_h, 1416 GEM_TX_DATA_PTR_LO), 1417 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_COMPLETION)); 1418 1419 if (progress) { 1420 if (sc->sc_txfree == GEM_NTXDESC - 1) 1421 sc->sc_txwin = 0; 1422 1423 /* Freed some descriptors, so reset IFF_OACTIVE and restart. */ 1424 ifp->if_flags &= ~IFF_OACTIVE; 1425 gem_start(ifp); 1426 1427 if (STAILQ_EMPTY(&sc->sc_txdirtyq)) 1428 ifp->if_timer = 0; 1429 } 1430 1431 DPRINTF(sc, ("%s: gem_tint: watchdog %d\n", 1432 device_get_name(sc->sc_dev), ifp->if_timer)); 1433 CTR2(KTR_GEM, "%s: gem_tint: watchdog %d", 1434 device_get_name(sc->sc_dev), ifp->if_timer); 1435 } 1436 1437 #if 0 1438 static void 1439 gem_rint_timeout(arg) 1440 void *arg; 1441 { 1442 1443 gem_rint((struct gem_softc *)arg); 1444 } 1445 #endif 1446 1447 /* 1448 * Receive interrupt. 1449 */ 1450 static void 1451 gem_rint(sc) 1452 struct gem_softc *sc; 1453 { 1454 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1455 bus_space_tag_t t = sc->sc_bustag; 1456 bus_space_handle_t h = sc->sc_h; 1457 struct ether_header *eh; 1458 struct gem_rxsoft *rxs; 1459 struct mbuf *m; 1460 u_int64_t rxstat; 1461 u_int32_t rxcomp; 1462 int i, len, progress = 0; 1463 1464 callout_stop(&sc->sc_rx_ch); 1465 DPRINTF(sc, ("%s: gem_rint\n", device_get_name(sc->sc_dev))); 1466 CTR1(KTR_GEM, "%s: gem_rint", device_get_name(sc->sc_dev)); 1467 1468 /* 1469 * Read the completion register once. This limits 1470 * how long the following loop can execute. 1471 */ 1472 rxcomp = bus_space_read_4(t, h, GEM_RX_COMPLETION); 1473 1474 /* 1475 * XXXX Read the lastrx only once at the top for speed. 1476 */ 1477 DPRINTF(sc, ("gem_rint: sc->rxptr %d, complete %d\n", 1478 sc->sc_rxptr, rxcomp)); 1479 CTR2(KTR_GEM, "gem_rint: sc->rxptr %d, complete %d", 1480 sc->sc_rxptr, rxcomp); 1481 for (i = sc->sc_rxptr; i != rxcomp; 1482 i = GEM_NEXTRX(i)) { 1483 rxs = &sc->sc_rxsoft[i]; 1484 1485 GEM_CDRXSYNC(sc, i, 1486 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1487 1488 rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags); 1489 1490 if (rxstat & GEM_RD_OWN) { 1491 #if 0 /* XXX: In case of emergency, re-enable this. */ 1492 /* 1493 * The descriptor is still marked as owned, although 1494 * it is supposed to have completed. This has been 1495 * observed on some machines. Just exiting here 1496 * might leave the packet sitting around until another 1497 * one arrives to trigger a new interrupt, which is 1498 * generally undesirable, so set up a timeout. 1499 */ 1500 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS, 1501 gem_rint_timeout, sc); 1502 #endif 1503 break; 1504 } 1505 1506 progress++; 1507 ifp->if_ipackets++; 1508 1509 if (rxstat & GEM_RD_BAD_CRC) { 1510 ifp->if_ierrors++; 1511 device_printf(sc->sc_dev, "receive error: CRC error\n"); 1512 GEM_INIT_RXDESC(sc, i); 1513 continue; 1514 } 1515 1516 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 1517 BUS_DMASYNC_POSTREAD); 1518 #ifdef GEM_DEBUG 1519 if (ifp->if_flags & IFF_DEBUG) { 1520 printf(" rxsoft %p descriptor %d: ", rxs, i); 1521 printf("gd_flags: 0x%016llx\t", (long long) 1522 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags)); 1523 printf("gd_addr: 0x%016llx\n", (long long) 1524 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr)); 1525 } 1526 #endif 1527 1528 /* 1529 * No errors; receive the packet. Note the Gem 1530 * includes the CRC with every packet. 1531 */ 1532 len = GEM_RD_BUFLEN(rxstat); 1533 1534 /* 1535 * Allocate a new mbuf cluster. If that fails, we are 1536 * out of memory, and must drop the packet and recycle 1537 * the buffer that's already attached to this descriptor. 1538 */ 1539 m = rxs->rxs_mbuf; 1540 if (gem_add_rxbuf(sc, i) != 0) { 1541 ifp->if_ierrors++; 1542 GEM_INIT_RXDESC(sc, i); 1543 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 1544 BUS_DMASYNC_PREREAD); 1545 continue; 1546 } 1547 m->m_data += 2; /* We're already off by two */ 1548 1549 eh = mtod(m, struct ether_header *); 1550 m->m_pkthdr.rcvif = ifp; 1551 m->m_pkthdr.len = m->m_len = len - ETHER_CRC_LEN; 1552 m_adj(m, sizeof(struct ether_header)); 1553 1554 /* Pass it on. */ 1555 ether_input(ifp, eh, m); 1556 } 1557 1558 if (progress) { 1559 /* Update the receive pointer. */ 1560 if (i == sc->sc_rxptr) { 1561 device_printf(sc->sc_dev, "rint: ring wrap\n"); 1562 } 1563 sc->sc_rxptr = i; 1564 bus_space_write_4(t, h, GEM_RX_KICK, GEM_PREVRX(i)); 1565 } 1566 1567 DPRINTF(sc, ("gem_rint: done sc->rxptr %d, complete %d\n", 1568 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION))); 1569 CTR2(KTR_GEM, "gem_rint: done sc->rxptr %d, complete %d", 1570 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)); 1571 1572 } 1573 1574 1575 /* 1576 * gem_add_rxbuf: 1577 * 1578 * Add a receive buffer to the indicated descriptor. 1579 */ 1580 static int 1581 gem_add_rxbuf(sc, idx) 1582 struct gem_softc *sc; 1583 int idx; 1584 { 1585 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1586 struct mbuf *m; 1587 int error; 1588 1589 MGETHDR(m, M_DONTWAIT, MT_DATA); 1590 if (m == NULL) 1591 return (ENOBUFS); 1592 1593 MCLGET(m, M_DONTWAIT); 1594 if ((m->m_flags & M_EXT) == 0) { 1595 m_freem(m); 1596 return (ENOBUFS); 1597 } 1598 1599 #ifdef GEM_DEBUG 1600 /* bzero the packet to check dma */ 1601 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1602 #endif 1603 1604 if (rxs->rxs_mbuf != NULL) 1605 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap); 1606 1607 rxs->rxs_mbuf = m; 1608 1609 error = bus_dmamap_load(sc->sc_dmatag, rxs->rxs_dmamap, 1610 m->m_ext.ext_buf, m->m_ext.ext_size, gem_rxdma_callback, rxs, 1611 BUS_DMA_NOWAIT); 1612 if (error != 0 || rxs->rxs_paddr == 0) { 1613 device_printf(sc->sc_dev, "can't load rx DMA map %d, error = " 1614 "%d\n", idx, error); 1615 panic("gem_add_rxbuf"); /* XXX */ 1616 } 1617 1618 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD); 1619 1620 GEM_INIT_RXDESC(sc, idx); 1621 1622 return (0); 1623 } 1624 1625 1626 static void 1627 gem_eint(sc, status) 1628 struct gem_softc *sc; 1629 u_int status; 1630 { 1631 1632 if ((status & GEM_INTR_MIF) != 0) { 1633 device_printf(sc->sc_dev, "XXXlink status changed\n"); 1634 return; 1635 } 1636 1637 device_printf(sc->sc_dev, "status=%x\n", status); 1638 } 1639 1640 1641 void 1642 gem_intr(v) 1643 void *v; 1644 { 1645 struct gem_softc *sc = (struct gem_softc *)v; 1646 bus_space_tag_t t = sc->sc_bustag; 1647 bus_space_handle_t seb = sc->sc_h; 1648 u_int32_t status; 1649 1650 status = bus_space_read_4(t, seb, GEM_STATUS); 1651 DPRINTF(sc, ("%s: gem_intr: cplt %x, status %x\n", 1652 device_get_name(sc->sc_dev), (status>>19), 1653 (u_int)status)); 1654 CTR3(KTR_GEM, "%s: gem_intr: cplt %x, status %x", 1655 device_get_name(sc->sc_dev), (status>>19), 1656 (u_int)status); 1657 1658 if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0) 1659 gem_eint(sc, status); 1660 1661 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 1662 gem_tint(sc); 1663 1664 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1665 gem_rint(sc); 1666 1667 /* We should eventually do more than just print out error stats. */ 1668 if (status & GEM_INTR_TX_MAC) { 1669 int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS); 1670 if (txstat & ~GEM_MAC_TX_XMIT_DONE) 1671 device_printf(sc->sc_dev, "MAC tx fault, status %x\n", 1672 txstat); 1673 if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) 1674 gem_init(sc); 1675 } 1676 if (status & GEM_INTR_RX_MAC) { 1677 int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS); 1678 if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) 1679 device_printf(sc->sc_dev, "MAC rx fault, status %x\n", 1680 rxstat); 1681 if ((rxstat & GEM_MAC_RX_OVERFLOW) != 0) 1682 gem_init(sc); 1683 } 1684 } 1685 1686 1687 static void 1688 gem_watchdog(ifp) 1689 struct ifnet *ifp; 1690 { 1691 struct gem_softc *sc = ifp->if_softc; 1692 1693 DPRINTF(sc, ("gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 1694 "GEM_MAC_RX_CONFIG %x\n", 1695 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG), 1696 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS), 1697 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG))); 1698 CTR3(KTR_GEM, "gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 1699 "GEM_MAC_RX_CONFIG %x", 1700 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG), 1701 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS), 1702 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG)); 1703 CTR3(KTR_GEM, "gem_watchdog: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x " 1704 "GEM_MAC_TX_CONFIG %x", 1705 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_CONFIG), 1706 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_STATUS), 1707 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_CONFIG)); 1708 1709 device_printf(sc->sc_dev, "device timeout\n"); 1710 ++ifp->if_oerrors; 1711 1712 /* Try to get more packets going. */ 1713 gem_start(ifp); 1714 } 1715 1716 /* 1717 * Initialize the MII Management Interface 1718 */ 1719 static void 1720 gem_mifinit(sc) 1721 struct gem_softc *sc; 1722 { 1723 bus_space_tag_t t = sc->sc_bustag; 1724 bus_space_handle_t mif = sc->sc_h; 1725 1726 /* Configure the MIF in frame mode */ 1727 sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1728 sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA; 1729 bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config); 1730 } 1731 1732 /* 1733 * MII interface 1734 * 1735 * The GEM MII interface supports at least three different operating modes: 1736 * 1737 * Bitbang mode is implemented using data, clock and output enable registers. 1738 * 1739 * Frame mode is implemented by loading a complete frame into the frame 1740 * register and polling the valid bit for completion. 1741 * 1742 * Polling mode uses the frame register but completion is indicated by 1743 * an interrupt. 1744 * 1745 */ 1746 int 1747 gem_mii_readreg(dev, phy, reg) 1748 device_t dev; 1749 int phy, reg; 1750 { 1751 struct gem_softc *sc = device_get_softc(dev); 1752 bus_space_tag_t t = sc->sc_bustag; 1753 bus_space_handle_t mif = sc->sc_h; 1754 int n; 1755 u_int32_t v; 1756 1757 #ifdef GEM_DEBUG_PHY 1758 printf("gem_mii_readreg: phy %d reg %d\n", phy, reg); 1759 #endif 1760 1761 #if 0 1762 /* Select the desired PHY in the MIF configuration register */ 1763 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1764 /* Clear PHY select bit */ 1765 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1766 if (phy == GEM_PHYAD_EXTERNAL) 1767 /* Set PHY select bit to get at external device */ 1768 v |= GEM_MIF_CONFIG_PHY_SEL; 1769 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1770 #endif 1771 1772 /* Construct the frame command */ 1773 v = (reg << GEM_MIF_REG_SHIFT) | (phy << GEM_MIF_PHY_SHIFT) | 1774 GEM_MIF_FRAME_READ; 1775 1776 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1777 for (n = 0; n < 100; n++) { 1778 DELAY(1); 1779 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1780 if (v & GEM_MIF_FRAME_TA0) 1781 return (v & GEM_MIF_FRAME_DATA); 1782 } 1783 1784 device_printf(sc->sc_dev, "mii_read timeout\n"); 1785 return (0); 1786 } 1787 1788 int 1789 gem_mii_writereg(dev, phy, reg, val) 1790 device_t dev; 1791 int phy, reg, val; 1792 { 1793 struct gem_softc *sc = device_get_softc(dev); 1794 bus_space_tag_t t = sc->sc_bustag; 1795 bus_space_handle_t mif = sc->sc_h; 1796 int n; 1797 u_int32_t v; 1798 1799 #ifdef GEM_DEBUG_PHY 1800 printf("gem_mii_writereg: phy %d reg %d val %x\n", phy, reg, val); 1801 #endif 1802 1803 #if 0 1804 /* Select the desired PHY in the MIF configuration register */ 1805 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1806 /* Clear PHY select bit */ 1807 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1808 if (phy == GEM_PHYAD_EXTERNAL) 1809 /* Set PHY select bit to get at external device */ 1810 v |= GEM_MIF_CONFIG_PHY_SEL; 1811 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1812 #endif 1813 /* Construct the frame command */ 1814 v = GEM_MIF_FRAME_WRITE | 1815 (phy << GEM_MIF_PHY_SHIFT) | 1816 (reg << GEM_MIF_REG_SHIFT) | 1817 (val & GEM_MIF_FRAME_DATA); 1818 1819 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1820 for (n = 0; n < 100; n++) { 1821 DELAY(1); 1822 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1823 if (v & GEM_MIF_FRAME_TA0) 1824 return (1); 1825 } 1826 1827 device_printf(sc->sc_dev, "mii_write timeout\n"); 1828 return (0); 1829 } 1830 1831 void 1832 gem_mii_statchg(dev) 1833 device_t dev; 1834 { 1835 struct gem_softc *sc = device_get_softc(dev); 1836 #ifdef GEM_DEBUG 1837 int instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media); 1838 #endif 1839 bus_space_tag_t t = sc->sc_bustag; 1840 bus_space_handle_t mac = sc->sc_h; 1841 u_int32_t v; 1842 1843 #ifdef GEM_DEBUG 1844 if (sc->sc_debug) 1845 printf("gem_mii_statchg: status change: phy = %d\n", 1846 sc->sc_phys[instance]); 1847 #endif 1848 1849 /* Set tx full duplex options */ 1850 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0); 1851 DELAY(10000); /* reg must be cleared and delay before changing. */ 1852 v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT| 1853 GEM_MAC_TX_ENABLE; 1854 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) { 1855 v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS; 1856 } 1857 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v); 1858 1859 /* XIF Configuration */ 1860 /* We should really calculate all this rather than rely on defaults */ 1861 v = bus_space_read_4(t, mac, GEM_MAC_XIF_CONFIG); 1862 v = GEM_MAC_XIF_LINK_LED; 1863 v |= GEM_MAC_XIF_TX_MII_ENA; 1864 1865 /* If an external transceiver is connected, enable its MII drivers */ 1866 sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG); 1867 if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) { 1868 /* External MII needs echo disable if half duplex. */ 1869 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1870 /* turn on full duplex LED */ 1871 v |= GEM_MAC_XIF_FDPLX_LED; 1872 else 1873 /* half duplex -- disable echo */ 1874 v |= GEM_MAC_XIF_ECHO_DISABL; 1875 1876 if (IFM_SUBTYPE(sc->sc_mii->mii_media_active) == IFM_1000_T) 1877 v |= GEM_MAC_XIF_GMII_MODE; 1878 else 1879 v &= ~GEM_MAC_XIF_GMII_MODE; 1880 } else { 1881 /* Internal MII needs buf enable */ 1882 v |= GEM_MAC_XIF_MII_BUF_ENA; 1883 } 1884 bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v); 1885 } 1886 1887 int 1888 gem_mediachange(ifp) 1889 struct ifnet *ifp; 1890 { 1891 struct gem_softc *sc = ifp->if_softc; 1892 1893 /* XXX Add support for serial media. */ 1894 1895 return (mii_mediachg(sc->sc_mii)); 1896 } 1897 1898 void 1899 gem_mediastatus(ifp, ifmr) 1900 struct ifnet *ifp; 1901 struct ifmediareq *ifmr; 1902 { 1903 struct gem_softc *sc = ifp->if_softc; 1904 1905 if ((ifp->if_flags & IFF_UP) == 0) 1906 return; 1907 1908 mii_pollstat(sc->sc_mii); 1909 ifmr->ifm_active = sc->sc_mii->mii_media_active; 1910 ifmr->ifm_status = sc->sc_mii->mii_media_status; 1911 } 1912 1913 /* 1914 * Process an ioctl request. 1915 */ 1916 static int 1917 gem_ioctl(ifp, cmd, data) 1918 struct ifnet *ifp; 1919 u_long cmd; 1920 caddr_t data; 1921 { 1922 struct gem_softc *sc = ifp->if_softc; 1923 struct ifreq *ifr = (struct ifreq *)data; 1924 int s, error = 0; 1925 1926 switch (cmd) { 1927 case SIOCSIFADDR: 1928 case SIOCGIFADDR: 1929 case SIOCSIFMTU: 1930 error = ether_ioctl(ifp, cmd, data); 1931 break; 1932 case SIOCSIFFLAGS: 1933 if (ifp->if_flags & IFF_UP) { 1934 if ((sc->sc_ifflags ^ ifp->if_flags) == IFF_PROMISC) 1935 gem_setladrf(sc); 1936 else 1937 gem_init(sc); 1938 } else { 1939 if (ifp->if_flags & IFF_RUNNING) 1940 gem_stop(ifp, 0); 1941 } 1942 sc->sc_ifflags = ifp->if_flags; 1943 error = 0; 1944 break; 1945 case SIOCADDMULTI: 1946 case SIOCDELMULTI: 1947 gem_setladrf(sc); 1948 error = 0; 1949 break; 1950 case SIOCGIFMEDIA: 1951 case SIOCSIFMEDIA: 1952 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 1953 break; 1954 default: 1955 error = ENOTTY; 1956 break; 1957 } 1958 1959 /* Try to get things going again */ 1960 if (ifp->if_flags & IFF_UP) 1961 gem_start(ifp); 1962 splx(s); 1963 return (error); 1964 } 1965 1966 /* 1967 * Set up the logical address filter. 1968 */ 1969 static void 1970 gem_setladrf(sc) 1971 struct gem_softc *sc; 1972 { 1973 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1974 struct ifmultiaddr *inm; 1975 struct sockaddr_dl *sdl; 1976 bus_space_tag_t t = sc->sc_bustag; 1977 bus_space_handle_t h = sc->sc_h; 1978 u_char *cp; 1979 u_int32_t crc; 1980 u_int32_t hash[16]; 1981 u_int32_t v; 1982 int len; 1983 int i; 1984 1985 /* Get current RX configuration */ 1986 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 1987 1988 /* 1989 * Turn off promiscuous mode, promiscuous group mode (all multicast), 1990 * and hash filter. Depending on the case, the right bit will be 1991 * enabled. 1992 */ 1993 v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER| 1994 GEM_MAC_RX_PROMISC_GRP); 1995 1996 if ((ifp->if_flags & IFF_PROMISC) != 0) { 1997 /* Turn on promiscuous mode */ 1998 v |= GEM_MAC_RX_PROMISCUOUS; 1999 goto chipit; 2000 } 2001 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 2002 hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; 2003 ifp->if_flags |= IFF_ALLMULTI; 2004 v |= GEM_MAC_RX_PROMISC_GRP; 2005 goto chipit; 2006 } 2007 2008 /* 2009 * Set up multicast address filter by passing all multicast addresses 2010 * through a crc generator, and then using the high order 8 bits as an 2011 * index into the 256 bit logical address filter. The high order 4 2012 * bits selects the word, while the other 4 bits select the bit within 2013 * the word (where bit 0 is the MSB). 2014 */ 2015 2016 /* Clear hash table */ 2017 memset(hash, 0, sizeof(hash)); 2018 2019 TAILQ_FOREACH(inm, &sc->sc_arpcom.ac_if.if_multiaddrs, ifma_link) { 2020 if (inm->ifma_addr->sa_family != AF_LINK) 2021 continue; 2022 sdl = (struct sockaddr_dl *)inm->ifma_addr; 2023 cp = LLADDR(sdl); 2024 crc = 0xffffffff; 2025 for (len = sdl->sdl_alen; --len >= 0;) { 2026 int octet = *cp++; 2027 int i; 2028 2029 #define MC_POLY_LE 0xedb88320UL /* mcast crc, little endian */ 2030 for (i = 0; i < 8; i++) { 2031 if ((crc & 1) ^ (octet & 1)) { 2032 crc >>= 1; 2033 crc ^= MC_POLY_LE; 2034 } else { 2035 crc >>= 1; 2036 } 2037 octet >>= 1; 2038 } 2039 } 2040 /* Just want the 8 most significant bits. */ 2041 crc >>= 24; 2042 2043 /* Set the corresponding bit in the filter. */ 2044 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 2045 } 2046 2047 v |= GEM_MAC_RX_HASH_FILTER; 2048 ifp->if_flags &= ~IFF_ALLMULTI; 2049 2050 /* Now load the hash table into the chip (if we are using it) */ 2051 for (i = 0; i < 16; i++) { 2052 bus_space_write_4(t, h, 2053 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0), 2054 hash[i]); 2055 } 2056 2057 chipit: 2058 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 2059 } 2060 2061 #if notyet 2062 2063 /* 2064 * gem_power: 2065 * 2066 * Power management (suspend/resume) hook. 2067 */ 2068 void 2069 static gem_power(why, arg) 2070 int why; 2071 void *arg; 2072 { 2073 struct gem_softc *sc = arg; 2074 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2075 int s; 2076 2077 s = splnet(); 2078 switch (why) { 2079 case PWR_SUSPEND: 2080 case PWR_STANDBY: 2081 gem_stop(ifp, 1); 2082 if (sc->sc_power != NULL) 2083 (*sc->sc_power)(sc, why); 2084 break; 2085 case PWR_RESUME: 2086 if (ifp->if_flags & IFF_UP) { 2087 if (sc->sc_power != NULL) 2088 (*sc->sc_power)(sc, why); 2089 gem_init(ifp); 2090 } 2091 break; 2092 case PWR_SOFTSUSPEND: 2093 case PWR_SOFTSTANDBY: 2094 case PWR_SOFTRESUME: 2095 break; 2096 } 2097 splx(s); 2098 } 2099 #endif 2100