1 /* 2 * Copyright (C) 2001 Eduardo Horvath. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * from: NetBSD: gem.c,v 1.9 2001/10/21 20:45:15 thorpej Exp 27 * 28 * $FreeBSD$ 29 */ 30 31 /* 32 * Driver for Sun GEM ethernet controllers. 33 */ 34 35 #define GEM_DEBUG 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/bus.h> 40 #include <sys/callout.h> 41 #include <sys/endian.h> 42 #include <sys/mbuf.h> 43 #include <sys/malloc.h> 44 #include <sys/kernel.h> 45 #include <sys/socket.h> 46 #include <sys/sockio.h> 47 48 #include <net/ethernet.h> 49 #include <net/if.h> 50 #include <net/if_arp.h> 51 #include <net/if_dl.h> 52 #include <net/if_media.h> 53 54 #include <machine/bus.h> 55 56 #include <dev/mii/mii.h> 57 #include <dev/mii/miivar.h> 58 59 #include <gem/if_gemreg.h> 60 #include <gem/if_gemvar.h> 61 62 #define TRIES 10000 63 64 static void gem_start(struct ifnet *); 65 static void gem_stop(struct ifnet *, int); 66 static int gem_ioctl(struct ifnet *, u_long, caddr_t); 67 static void gem_cddma_callback(void *, bus_dma_segment_t *, int, int); 68 static void gem_rxdma_callback(void *, bus_dma_segment_t *, int, int); 69 static void gem_txdma_callback(void *, bus_dma_segment_t *, int, int); 70 static void gem_tick(void *); 71 static void gem_watchdog(struct ifnet *); 72 static void gem_init(void *); 73 static void gem_init_regs(struct gem_softc *sc); 74 static int gem_ringsize(int sz); 75 static int gem_meminit(struct gem_softc *); 76 static int gem_dmamap_load_mbuf(struct gem_softc *, struct mbuf *, 77 bus_dmamap_callback_t *, struct gem_txjob *, int); 78 static void gem_dmamap_unload_mbuf(struct gem_softc *, struct gem_txjob *); 79 static void gem_dmamap_commit_mbuf(struct gem_softc *, struct gem_txjob *); 80 static void gem_mifinit(struct gem_softc *); 81 static int gem_bitwait(struct gem_softc *sc, bus_addr_t r, 82 u_int32_t clr, u_int32_t set); 83 static int gem_reset_rx(struct gem_softc *); 84 static int gem_reset_tx(struct gem_softc *); 85 static int gem_disable_rx(struct gem_softc *); 86 static int gem_disable_tx(struct gem_softc *); 87 static void gem_rxdrain(struct gem_softc *); 88 static int gem_add_rxbuf(struct gem_softc *, int); 89 static void gem_setladrf(struct gem_softc *); 90 91 struct mbuf *gem_get(struct gem_softc *, int, int); 92 static void gem_eint(struct gem_softc *, u_int); 93 static void gem_rint(struct gem_softc *); 94 static void gem_rint_timeout(void *); 95 static void gem_tint(struct gem_softc *); 96 #ifdef notyet 97 static void gem_power(int, void *); 98 #endif 99 100 devclass_t gem_devclass; 101 DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0); 102 MODULE_DEPEND(gem, miibus, 1, 1, 1); 103 104 #ifdef GEM_DEBUG 105 #define DPRINTF(sc, x) if ((sc)->sc_arpcom.ac_if.if_flags & IFF_DEBUG) \ 106 printf x 107 #include <sys/ktr.h> 108 #define KTR_GEM KTR_CT2 109 #else 110 #define DPRINTF(sc, x) /* nothing */ 111 #endif 112 113 #define GEM_NSEGS GEM_NTXSEGS 114 115 /* 116 * gem_attach: 117 * 118 * Attach a Gem interface to the system. 119 */ 120 int 121 gem_attach(sc) 122 struct gem_softc *sc; 123 { 124 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 125 struct mii_softc *child; 126 int i, error; 127 128 /* Make sure the chip is stopped. */ 129 ifp->if_softc = sc; 130 gem_reset(sc); 131 132 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, 133 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, GEM_NSEGS, 134 BUS_SPACE_MAXSIZE_32BIT, 0, &sc->sc_pdmatag); 135 if (error) 136 return (error); 137 138 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 139 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MAXBSIZE, 140 GEM_NSEGS, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, 141 &sc->sc_dmatag); 142 if (error) 143 goto fail_0; 144 145 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, 146 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 147 sizeof(struct gem_control_data), 1, 148 sizeof(struct gem_control_data), BUS_DMA_ALLOCNOW, 149 &sc->sc_cdmatag); 150 if (error) 151 goto fail_1; 152 153 /* 154 * Allocate the control data structures, and create and load the 155 * DMA map for it. 156 */ 157 if ((error = bus_dmamem_alloc(sc->sc_cdmatag, 158 (void **)&sc->sc_control_data, 0, &sc->sc_cddmamap))) { 159 device_printf(sc->sc_dev, "unable to allocate control data," 160 " error = %d\n", error); 161 goto fail_2; 162 } 163 164 sc->sc_cddma = 0; 165 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, 166 sc->sc_control_data, sizeof(struct gem_control_data), 167 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { 168 device_printf(sc->sc_dev, "unable to load control data DMA " 169 "map, error = %d\n", error); 170 goto fail_3; 171 } 172 173 /* 174 * Initialize the transmit job descriptors. 175 */ 176 STAILQ_INIT(&sc->sc_txfreeq); 177 STAILQ_INIT(&sc->sc_txdirtyq); 178 179 /* 180 * Create the transmit buffer DMA maps. 181 */ 182 error = ENOMEM; 183 for (i = 0; i < GEM_TXQUEUELEN; i++) { 184 struct gem_txsoft *txs; 185 186 txs = &sc->sc_txsoft[i]; 187 txs->txs_mbuf = NULL; 188 txs->txs_ndescs = 0; 189 if ((error = bus_dmamap_create(sc->sc_dmatag, 0, 190 &txs->txs_dmamap)) != 0) { 191 device_printf(sc->sc_dev, "unable to create tx DMA map " 192 "%d, error = %d\n", i, error); 193 goto fail_4; 194 } 195 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 196 } 197 198 /* 199 * Create the receive buffer DMA maps. 200 */ 201 for (i = 0; i < GEM_NRXDESC; i++) { 202 if ((error = bus_dmamap_create(sc->sc_dmatag, 0, 203 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 204 device_printf(sc->sc_dev, "unable to create rx DMA map " 205 "%d, error = %d\n", i, error); 206 goto fail_5; 207 } 208 sc->sc_rxsoft[i].rxs_mbuf = NULL; 209 } 210 211 212 gem_mifinit(sc); 213 214 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, gem_mediachange, 215 gem_mediastatus)) != 0) { 216 device_printf(sc->sc_dev, "phy probe failed: %d\n", error); 217 goto fail_5; 218 } 219 sc->sc_mii = device_get_softc(sc->sc_miibus); 220 221 /* 222 * From this point forward, the attachment cannot fail. A failure 223 * before this point releases all resources that may have been 224 * allocated. 225 */ 226 227 /* Announce ourselves. */ 228 device_printf(sc->sc_dev, "Ethernet address:"); 229 for (i = 0; i < 6; i++) 230 printf("%c%02x", i > 0 ? ':' : ' ', sc->sc_arpcom.ac_enaddr[i]); 231 printf("\n"); 232 233 /* Initialize ifnet structure. */ 234 ifp->if_softc = sc; 235 ifp->if_unit = device_get_unit(sc->sc_dev); 236 ifp->if_name = "gem"; 237 ifp->if_mtu = ETHERMTU; 238 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 239 ifp->if_start = gem_start; 240 ifp->if_ioctl = gem_ioctl; 241 ifp->if_watchdog = gem_watchdog; 242 ifp->if_init = gem_init; 243 ifp->if_output = ether_output; 244 ifp->if_snd.ifq_maxlen = GEM_TXQUEUELEN; 245 /* 246 * Walk along the list of attached MII devices and 247 * establish an `MII instance' to `phy number' 248 * mapping. We'll use this mapping in media change 249 * requests to determine which phy to use to program 250 * the MIF configuration register. 251 */ 252 for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL; 253 child = LIST_NEXT(child, mii_list)) { 254 /* 255 * Note: we support just two PHYs: the built-in 256 * internal device and an external on the MII 257 * connector. 258 */ 259 if (child->mii_phy > 1 || child->mii_inst > 1) { 260 device_printf(sc->sc_dev, "cannot accomodate " 261 "MII device %s at phy %d, instance %d\n", 262 device_get_name(child->mii_dev), 263 child->mii_phy, child->mii_inst); 264 continue; 265 } 266 267 sc->sc_phys[child->mii_inst] = child->mii_phy; 268 } 269 270 /* 271 * Now select and activate the PHY we will use. 272 * 273 * The order of preference is External (MDI1), 274 * Internal (MDI0), Serial Link (no MII). 275 */ 276 if (sc->sc_phys[1]) { 277 #ifdef GEM_DEBUG 278 printf("using external phy\n"); 279 #endif 280 sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL; 281 } else { 282 #ifdef GEM_DEBUG 283 printf("using internal phy\n"); 284 #endif 285 sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL; 286 } 287 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_MIF_CONFIG, 288 sc->sc_mif_config); 289 /* Attach the interface. */ 290 ether_ifattach(ifp, ETHER_BPF_SUPPORTED); 291 292 #if notyet 293 /* 294 * Add a suspend hook to make sure we come back up after a 295 * resume. 296 */ 297 sc->sc_powerhook = powerhook_establish(gem_power, sc); 298 if (sc->sc_powerhook == NULL) 299 device_printf(sc->sc_dev, "WARNING: unable to establish power " 300 "hook\n"); 301 #endif 302 303 callout_init(&sc->sc_tick_ch, 0); 304 callout_init(&sc->sc_rx_ch, 0); 305 return (0); 306 307 /* 308 * Free any resources we've allocated during the failed attach 309 * attempt. Do this in reverse order and fall through. 310 */ 311 fail_5: 312 for (i = 0; i < GEM_NRXDESC; i++) { 313 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 314 bus_dmamap_destroy(sc->sc_dmatag, 315 sc->sc_rxsoft[i].rxs_dmamap); 316 } 317 fail_4: 318 for (i = 0; i < GEM_TXQUEUELEN; i++) { 319 if (sc->sc_txsoft[i].txs_dmamap != NULL) 320 bus_dmamap_destroy(sc->sc_dmatag, 321 sc->sc_txsoft[i].txs_dmamap); 322 } 323 bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap); 324 fail_3: 325 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 326 sc->sc_cddmamap); 327 fail_2: 328 bus_dma_tag_destroy(sc->sc_cdmatag); 329 fail_1: 330 bus_dma_tag_destroy(sc->sc_dmatag); 331 fail_0: 332 bus_dma_tag_destroy(sc->sc_pdmatag); 333 return (error); 334 } 335 336 static void 337 gem_cddma_callback(xsc, segs, nsegs, error) 338 void *xsc; 339 bus_dma_segment_t *segs; 340 int nsegs; 341 int error; 342 { 343 struct gem_softc *sc = (struct gem_softc *)xsc; 344 345 if (error != 0) 346 return; 347 if (nsegs != 1) { 348 /* can't happen... */ 349 panic("gem_cddma_callback: bad control buffer segment count"); 350 } 351 sc->sc_cddma = segs[0].ds_addr; 352 } 353 354 static void 355 gem_rxdma_callback(xsc, segs, nsegs, error) 356 void *xsc; 357 bus_dma_segment_t *segs; 358 int nsegs; 359 int error; 360 { 361 struct gem_rxsoft *rxs = (struct gem_rxsoft *)xsc; 362 363 if (error != 0) 364 return; 365 if (nsegs != 1) { 366 /* can't happen... */ 367 panic("gem_rxdma_callback: bad control buffer segment count"); 368 } 369 rxs->rxs_paddr = segs[0].ds_addr; 370 } 371 372 /* 373 * This is called multiple times in our version of dmamap_load_mbuf, but should 374 * be fit for a generic version that only calls it once. 375 */ 376 static void 377 gem_txdma_callback(xsc, segs, nsegs, error) 378 void *xsc; 379 bus_dma_segment_t *segs; 380 int nsegs; 381 int error; 382 { 383 struct gem_txdma *tx = (struct gem_txdma *)xsc; 384 int seg; 385 386 tx->txd_error = error; 387 if (error != 0) 388 return; 389 tx->txd_nsegs = nsegs; 390 391 /* 392 * Initialize the transmit descriptors. 393 */ 394 for (seg = 0; seg < nsegs; 395 seg++, tx->txd_nexttx = GEM_NEXTTX(tx->txd_nexttx)) { 396 uint64_t flags; 397 398 DPRINTF(tx->txd_sc, ("txdma_cb: mapping seg %d (txd %d), len " 399 "%lx, addr %#lx (%#lx)\n", seg, tx->txd_nexttx, 400 segs[seg].ds_len, segs[seg].ds_addr, 401 GEM_DMA_WRITE(tx->txd_sc, segs[seg].ds_addr))); 402 CTR5(KTR_GEM, "txdma_cb: mapping seg %d (txd %d), len " 403 "%lx, addr %#lx (%#lx)", seg, tx->txd_nexttx, 404 segs[seg].ds_len, segs[seg].ds_addr, 405 GEM_DMA_WRITE(tx->txd_sc, segs[seg].ds_addr)); 406 /* 407 * If this is the first descriptor we're 408 * enqueueing, set the start of packet flag, 409 * and the checksum stuff if we want the hardware 410 * to do it. 411 */ 412 tx->txd_sc->sc_txdescs[tx->txd_nexttx].gd_addr = 413 GEM_DMA_WRITE(tx->txd_sc, segs[seg].ds_addr); 414 flags = segs[seg].ds_len & GEM_TD_BUFSIZE; 415 if ((tx->txd_flags & GTXD_FIRST) != 0 && seg == 0) { 416 CTR2(KTR_GEM, "txdma_cb: start of packet at seg %d, " 417 "tx %d", seg, tx->txd_nexttx); 418 flags |= GEM_TD_START_OF_PACKET; 419 } 420 if ((tx->txd_flags & GTXD_LAST) != 0 && seg == nsegs - 1) { 421 CTR2(KTR_GEM, "txdma_cb: end of packet at seg %d, " 422 "tx %d", seg, tx->txd_nexttx); 423 flags |= GEM_TD_END_OF_PACKET; 424 } 425 tx->txd_sc->sc_txdescs[tx->txd_nexttx].gd_flags = 426 GEM_DMA_WRITE(tx->txd_sc, flags); 427 tx->txd_lasttx = tx->txd_nexttx; 428 } 429 } 430 431 static void 432 gem_tick(arg) 433 void *arg; 434 { 435 struct gem_softc *sc = arg; 436 int s; 437 438 s = splnet(); 439 mii_tick(sc->sc_mii); 440 splx(s); 441 442 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 443 } 444 445 static int 446 gem_bitwait(sc, r, clr, set) 447 struct gem_softc *sc; 448 bus_addr_t r; 449 u_int32_t clr; 450 u_int32_t set; 451 { 452 int i; 453 u_int32_t reg; 454 455 for (i = TRIES; i--; DELAY(100)) { 456 reg = bus_space_read_4(sc->sc_bustag, sc->sc_h, r); 457 if ((r & clr) == 0 && (r & set) == set) 458 return (1); 459 } 460 return (0); 461 } 462 463 void 464 gem_reset(sc) 465 struct gem_softc *sc; 466 { 467 bus_space_tag_t t = sc->sc_bustag; 468 bus_space_handle_t h = sc->sc_h; 469 int s; 470 471 s = splnet(); 472 DPRINTF(sc, ("%s: gem_reset\n", device_get_name(sc->sc_dev))); 473 CTR1(KTR_GEM, "%s: gem_reset", device_get_name(sc->sc_dev)); 474 gem_reset_rx(sc); 475 gem_reset_tx(sc); 476 477 /* Do a full reset */ 478 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); 479 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 480 device_printf(sc->sc_dev, "cannot reset device\n"); 481 splx(s); 482 } 483 484 485 /* 486 * gem_rxdrain: 487 * 488 * Drain the receive queue. 489 */ 490 static void 491 gem_rxdrain(sc) 492 struct gem_softc *sc; 493 { 494 struct gem_rxsoft *rxs; 495 int i; 496 497 for (i = 0; i < GEM_NRXDESC; i++) { 498 rxs = &sc->sc_rxsoft[i]; 499 if (rxs->rxs_mbuf != NULL) { 500 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap); 501 m_freem(rxs->rxs_mbuf); 502 rxs->rxs_mbuf = NULL; 503 } 504 } 505 } 506 507 /* 508 * Reset the whole thing. 509 */ 510 static void 511 gem_stop(ifp, disable) 512 struct ifnet *ifp; 513 int disable; 514 { 515 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 516 struct gem_txsoft *txs; 517 518 DPRINTF(sc, ("%s: gem_stop\n", device_get_name(sc->sc_dev))); 519 CTR1(KTR_GEM, "%s: gem_stop", device_get_name(sc->sc_dev)); 520 521 callout_stop(&sc->sc_tick_ch); 522 523 /* XXX - Should we reset these instead? */ 524 gem_disable_tx(sc); 525 gem_disable_rx(sc); 526 527 /* 528 * Release any queued transmit buffers. 529 */ 530 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 531 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 532 if (txs->txs_ndescs != 0) { 533 bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap); 534 if (txs->txs_mbuf != NULL) { 535 m_freem(txs->txs_mbuf); 536 txs->txs_mbuf = NULL; 537 } 538 } 539 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 540 } 541 542 if (disable) 543 gem_rxdrain(sc); 544 545 /* 546 * Mark the interface down and cancel the watchdog timer. 547 */ 548 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 549 ifp->if_timer = 0; 550 } 551 552 /* 553 * Reset the receiver 554 */ 555 int 556 gem_reset_rx(sc) 557 struct gem_softc *sc; 558 { 559 bus_space_tag_t t = sc->sc_bustag; 560 bus_space_handle_t h = sc->sc_h; 561 562 /* 563 * Resetting while DMA is in progress can cause a bus hang, so we 564 * disable DMA first. 565 */ 566 gem_disable_rx(sc); 567 bus_space_write_4(t, h, GEM_RX_CONFIG, 0); 568 /* Wait till it finishes */ 569 if (!gem_bitwait(sc, GEM_RX_CONFIG, 1, 0)) 570 device_printf(sc->sc_dev, "cannot disable read dma\n"); 571 572 /* Wait 5ms extra. */ 573 DELAY(5000); 574 575 /* Finally, reset the ERX */ 576 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX); 577 /* Wait till it finishes */ 578 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 579 device_printf(sc->sc_dev, "cannot reset receiver\n"); 580 return (1); 581 } 582 return (0); 583 } 584 585 586 /* 587 * Reset the transmitter 588 */ 589 static int 590 gem_reset_tx(sc) 591 struct gem_softc *sc; 592 { 593 bus_space_tag_t t = sc->sc_bustag; 594 bus_space_handle_t h = sc->sc_h; 595 int i; 596 597 /* 598 * Resetting while DMA is in progress can cause a bus hang, so we 599 * disable DMA first. 600 */ 601 gem_disable_tx(sc); 602 bus_space_write_4(t, h, GEM_TX_CONFIG, 0); 603 /* Wait till it finishes */ 604 if (!gem_bitwait(sc, GEM_TX_CONFIG, 1, 0)) 605 device_printf(sc->sc_dev, "cannot disable read dma\n"); 606 607 /* Wait 5ms extra. */ 608 DELAY(5000); 609 610 /* Finally, reset the ETX */ 611 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_TX); 612 /* Wait till it finishes */ 613 for (i = TRIES; i--; DELAY(100)) 614 if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_TX) == 0) 615 break; 616 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 617 device_printf(sc->sc_dev, "cannot reset receiver\n"); 618 return (1); 619 } 620 return (0); 621 } 622 623 /* 624 * disable receiver. 625 */ 626 static int 627 gem_disable_rx(sc) 628 struct gem_softc *sc; 629 { 630 bus_space_tag_t t = sc->sc_bustag; 631 bus_space_handle_t h = sc->sc_h; 632 u_int32_t cfg; 633 634 /* Flip the enable bit */ 635 cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 636 cfg &= ~GEM_MAC_RX_ENABLE; 637 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg); 638 639 /* Wait for it to finish */ 640 return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)); 641 } 642 643 /* 644 * disable transmitter. 645 */ 646 static int 647 gem_disable_tx(sc) 648 struct gem_softc *sc; 649 { 650 bus_space_tag_t t = sc->sc_bustag; 651 bus_space_handle_t h = sc->sc_h; 652 u_int32_t cfg; 653 654 /* Flip the enable bit */ 655 cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG); 656 cfg &= ~GEM_MAC_TX_ENABLE; 657 bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg); 658 659 /* Wait for it to finish */ 660 return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)); 661 } 662 663 /* 664 * Initialize interface. 665 */ 666 static int 667 gem_meminit(sc) 668 struct gem_softc *sc; 669 { 670 struct gem_rxsoft *rxs; 671 int i, error; 672 673 /* 674 * Initialize the transmit descriptor ring. 675 */ 676 memset((void *)sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 677 for (i = 0; i < GEM_NTXDESC; i++) { 678 sc->sc_txdescs[i].gd_flags = 0; 679 sc->sc_txdescs[i].gd_addr = 0; 680 } 681 GEM_CDTXSYNC(sc, 0, GEM_NTXDESC, 682 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 683 sc->sc_txfree = GEM_NTXDESC; 684 sc->sc_txnext = 0; 685 686 /* 687 * Initialize the receive descriptor and receive job 688 * descriptor rings. 689 */ 690 for (i = 0; i < GEM_NRXDESC; i++) { 691 rxs = &sc->sc_rxsoft[i]; 692 if (rxs->rxs_mbuf == NULL) { 693 if ((error = gem_add_rxbuf(sc, i)) != 0) { 694 device_printf(sc->sc_dev, "unable to " 695 "allocate or map rx buffer %d, error = " 696 "%d\n", i, error); 697 /* 698 * XXX Should attempt to run with fewer receive 699 * XXX buffers instead of just failing. 700 */ 701 gem_rxdrain(sc); 702 return (1); 703 } 704 } else 705 GEM_INIT_RXDESC(sc, i); 706 } 707 sc->sc_rxptr = 0; 708 709 return (0); 710 } 711 712 static int 713 gem_ringsize(sz) 714 int sz; 715 { 716 int v = 0; 717 718 switch (sz) { 719 case 32: 720 v = GEM_RING_SZ_32; 721 break; 722 case 64: 723 v = GEM_RING_SZ_64; 724 break; 725 case 128: 726 v = GEM_RING_SZ_128; 727 break; 728 case 256: 729 v = GEM_RING_SZ_256; 730 break; 731 case 512: 732 v = GEM_RING_SZ_512; 733 break; 734 case 1024: 735 v = GEM_RING_SZ_1024; 736 break; 737 case 2048: 738 v = GEM_RING_SZ_2048; 739 break; 740 case 4096: 741 v = GEM_RING_SZ_4096; 742 break; 743 case 8192: 744 v = GEM_RING_SZ_8192; 745 break; 746 default: 747 printf("gem: invalid Receive Descriptor ring size\n"); 748 break; 749 } 750 return (v); 751 } 752 753 /* 754 * Initialization of interface; set up initialization block 755 * and transmit/receive descriptor rings. 756 */ 757 static void 758 gem_init(xsc) 759 void *xsc; 760 { 761 struct gem_softc *sc = (struct gem_softc *)xsc; 762 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 763 bus_space_tag_t t = sc->sc_bustag; 764 bus_space_handle_t h = sc->sc_h; 765 int s; 766 u_int32_t v; 767 768 s = splnet(); 769 770 DPRINTF(sc, ("%s: gem_init: calling stop\n", device_get_name(sc->sc_dev))); 771 CTR1(KTR_GEM, "%s: gem_init: calling stop", device_get_name(sc->sc_dev)); 772 /* 773 * Initialization sequence. The numbered steps below correspond 774 * to the sequence outlined in section 6.3.5.1 in the Ethernet 775 * Channel Engine manual (part of the PCIO manual). 776 * See also the STP2002-STQ document from Sun Microsystems. 777 */ 778 779 /* step 1 & 2. Reset the Ethernet Channel */ 780 gem_stop(&sc->sc_arpcom.ac_if, 0); 781 gem_reset(sc); 782 DPRINTF(sc, ("%s: gem_init: restarting\n", device_get_name(sc->sc_dev))); 783 CTR1(KTR_GEM, "%s: gem_init: restarting", device_get_name(sc->sc_dev)); 784 785 /* Re-initialize the MIF */ 786 gem_mifinit(sc); 787 788 /* Call MI reset function if any */ 789 if (sc->sc_hwreset) 790 (*sc->sc_hwreset)(sc); 791 792 /* step 3. Setup data structures in host memory */ 793 gem_meminit(sc); 794 795 /* step 4. TX MAC registers & counters */ 796 gem_init_regs(sc); 797 /* XXX: VLAN code from NetBSD temporarily removed. */ 798 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 799 (ETHER_MAX_LEN + sizeof(struct ether_header)) | (0x2000<<16)); 800 801 /* step 5. RX MAC registers & counters */ 802 gem_setladrf(sc); 803 804 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 805 /* NOTE: we use only 32-bit DMA addresses here. */ 806 bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 0); 807 bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 808 809 bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 0); 810 bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 811 DPRINTF(sc, ("loading rx ring %lx, tx ring %lx, cddma %lx\n", 812 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma)); 813 CTR3(KTR_GEM, "loading rx ring %lx, tx ring %lx, cddma %lx", 814 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); 815 816 /* step 8. Global Configuration & Interrupt Mask */ 817 bus_space_write_4(t, h, GEM_INTMASK, 818 ~(GEM_INTR_TX_INTME| 819 GEM_INTR_TX_EMPTY| 820 GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF| 821 GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS| 822 GEM_INTR_MAC_CONTROL|GEM_INTR_MIF| 823 GEM_INTR_BERR)); 824 bus_space_write_4(t, h, GEM_MAC_RX_MASK, 0); /* XXXX */ 825 bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */ 826 bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */ 827 828 /* step 9. ETX Configuration: use mostly default values */ 829 830 /* Enable DMA */ 831 v = gem_ringsize(GEM_NTXDESC /*XXX*/); 832 bus_space_write_4(t, h, GEM_TX_CONFIG, 833 v|GEM_TX_CONFIG_TXDMA_EN| 834 ((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH)); 835 836 /* step 10. ERX Configuration */ 837 838 /* Encode Receive Descriptor ring size: four possible values */ 839 v = gem_ringsize(GEM_NRXDESC /*XXX*/); 840 841 /* Enable DMA */ 842 bus_space_write_4(t, h, GEM_RX_CONFIG, 843 v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)| 844 (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN| 845 (0<<GEM_RX_CONFIG_CXM_START_SHFT)); 846 /* 847 * The following value is for an OFF Threshold of about 15.5 Kbytes 848 * and an ON Threshold of 4K bytes. 849 */ 850 bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH, 0xf8 | (0x40 << 12)); 851 bus_space_write_4(t, h, GEM_RX_BLANKING, (2<<12)|6); 852 853 /* step 11. Configure Media */ 854 (void)gem_mii_statchg(sc->sc_dev); 855 856 /* step 12. RX_MAC Configuration Register */ 857 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 858 v |= GEM_MAC_RX_ENABLE; 859 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 860 861 /* step 14. Issue Transmit Pending command */ 862 863 /* Call MI initialization function if any */ 864 if (sc->sc_hwinit) 865 (*sc->sc_hwinit)(sc); 866 867 /* step 15. Give the reciever a swift kick */ 868 bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4); 869 870 /* Start the one second timer. */ 871 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 872 873 ifp->if_flags |= IFF_RUNNING; 874 ifp->if_flags &= ~IFF_OACTIVE; 875 ifp->if_timer = 0; 876 sc->sc_flags = ifp->if_flags; 877 splx(s); 878 } 879 880 /* 881 * XXX: This is really a substitute for bus_dmamap_load_mbuf(), which FreeBSD 882 * does not yet have, with some adaptions for this driver. 883 * Some changes are mandated by the fact that multiple maps may needed to map 884 * a single mbuf. 885 * It should be removed once generic support is available. 886 * 887 * This is derived from NetBSD (syssrc/sys/arch/sparc64/sparc64/machdep.c), for 888 * a copyright notice see sparc64/sparc64/bus_machdep.c. 889 * 890 * Not every error condition is passed to the callback in this version, and the 891 * callback may be called more than once. 892 * It also gropes in the entails of the callback arg... 893 */ 894 static int 895 gem_dmamap_load_mbuf(sc, m0, cb, txj, flags) 896 struct gem_softc *sc; 897 struct mbuf *m0; 898 bus_dmamap_callback_t *cb; 899 struct gem_txjob *txj; 900 int flags; 901 { 902 struct gem_txdma txd; 903 struct gem_txsoft *txs; 904 struct mbuf *m; 905 void *vaddr; 906 int error, first = 1, len, totlen; 907 908 if ((m0->m_flags & M_PKTHDR) == 0) 909 panic("gem_dmamap_load_mbuf: no packet header"); 910 totlen = m0->m_pkthdr.len; 911 len = 0; 912 txd.txd_sc = sc; 913 txd.txd_nexttx = txj->txj_nexttx; 914 txj->txj_nsegs = 0; 915 STAILQ_INIT(&txj->txj_txsq); 916 m = m0; 917 while (m != NULL && len < totlen) { 918 if (m->m_len == 0) 919 continue; 920 /* Get a work queue entry. */ 921 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { 922 /* 923 * Ran out of descriptors, return a value that 924 * cannot be returned by bus_dmamap_load to notify 925 * the caller. 926 */ 927 error = -1; 928 goto fail; 929 } 930 len += m->m_len; 931 txd.txd_flags = first ? GTXD_FIRST : 0; 932 if (m->m_next == NULL || len >= totlen) 933 txd.txd_flags |= GTXD_LAST; 934 vaddr = mtod(m, void *); 935 error = bus_dmamap_load(sc->sc_dmatag, txs->txs_dmamap, vaddr, 936 m->m_len, cb, &txd, flags); 937 if (error != 0 || txd.txd_error != 0) 938 goto fail; 939 /* Sync the DMA map. */ 940 bus_dmamap_sync(sc->sc_dmatag, txs->txs_dmamap, 941 BUS_DMASYNC_PREWRITE); 942 m = m->m_next; 943 /* 944 * Store a pointer to the packet so we can free it later, 945 * and remember what txdirty will be once the packet is 946 * done. 947 */ 948 txs->txs_mbuf = first ? m0 : NULL; 949 txs->txs_firstdesc = txj->txj_nexttx; 950 txs->txs_lastdesc = txd.txd_lasttx; 951 txs->txs_ndescs = txd.txd_nsegs; 952 CTR3(KTR_GEM, "load_mbuf: setting firstdesc=%d, lastdesc=%d, " 953 "ndescs=%d", txs->txs_firstdesc, txs->txs_lastdesc, 954 txs->txs_ndescs); 955 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 956 STAILQ_INSERT_TAIL(&txj->txj_txsq, txs, txs_q); 957 txj->txj_nexttx = txd.txd_nexttx; 958 txj->txj_nsegs += txd.txd_nsegs; 959 first = 0; 960 } 961 txj->txj_lasttx = txd.txd_lasttx; 962 return (0); 963 964 fail: 965 CTR1(KTR_GEM, "gem_dmamap_load_mbuf failed (%d)", error); 966 gem_dmamap_unload_mbuf(sc, txj); 967 return (error); 968 } 969 970 /* 971 * Unload an mbuf using the txd the information was placed in. 972 * The tx interrupt code frees the tx segments one by one, because the txd is 973 * not available any more. 974 */ 975 static void 976 gem_dmamap_unload_mbuf(sc, txj) 977 struct gem_softc *sc; 978 struct gem_txjob *txj; 979 { 980 struct gem_txsoft *txs; 981 982 /* Readd the removed descriptors and unload the segments. */ 983 while ((txs = STAILQ_FIRST(&txj->txj_txsq)) != NULL) { 984 bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap); 985 STAILQ_REMOVE_HEAD(&txj->txj_txsq, txs_q); 986 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 987 } 988 } 989 990 static void 991 gem_dmamap_commit_mbuf(sc, txj) 992 struct gem_softc *sc; 993 struct gem_txjob *txj; 994 { 995 struct gem_txsoft *txs; 996 997 /* Commit the txjob by transfering the txsoft's to the txdirtyq. */ 998 while ((txs = STAILQ_FIRST(&txj->txj_txsq)) != NULL) { 999 STAILQ_REMOVE_HEAD(&txj->txj_txsq, txs_q); 1000 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 1001 } 1002 } 1003 1004 static void 1005 gem_init_regs(sc) 1006 struct gem_softc *sc; 1007 { 1008 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1009 bus_space_tag_t t = sc->sc_bustag; 1010 bus_space_handle_t h = sc->sc_h; 1011 1012 /* These regs are not cleared on reset */ 1013 sc->sc_inited = 0; 1014 if (!sc->sc_inited) { 1015 1016 /* Wooo. Magic values. */ 1017 bus_space_write_4(t, h, GEM_MAC_IPG0, 0); 1018 bus_space_write_4(t, h, GEM_MAC_IPG1, 8); 1019 bus_space_write_4(t, h, GEM_MAC_IPG2, 4); 1020 1021 bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 1022 /* Max frame and max burst size */ 1023 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 1024 (ifp->if_mtu+18) | (0x2000<<16)/* Burst size */); 1025 bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x7); 1026 bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x4); 1027 bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10); 1028 /* Dunno.... */ 1029 bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088); 1030 bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED, 1031 ((sc->sc_arpcom.ac_enaddr[5]<<8)| 1032 sc->sc_arpcom.ac_enaddr[4])&0x3ff); 1033 /* Secondary MAC addr set to 0:0:0:0:0:0 */ 1034 bus_space_write_4(t, h, GEM_MAC_ADDR3, 0); 1035 bus_space_write_4(t, h, GEM_MAC_ADDR4, 0); 1036 bus_space_write_4(t, h, GEM_MAC_ADDR5, 0); 1037 /* MAC control addr set to 0:1:c2:0:1:80 */ 1038 bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001); 1039 bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200); 1040 bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180); 1041 1042 /* MAC filter addr set to 0:0:0:0:0:0 */ 1043 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0); 1044 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0); 1045 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0); 1046 1047 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0); 1048 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0); 1049 1050 sc->sc_inited = 1; 1051 } 1052 1053 /* Counters need to be zeroed */ 1054 bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0); 1055 bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0); 1056 bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0); 1057 bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0); 1058 bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0); 1059 bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0); 1060 bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0); 1061 bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0); 1062 bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0); 1063 bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0); 1064 bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0); 1065 1066 /* Un-pause stuff */ 1067 #if 0 1068 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); 1069 #else 1070 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0); 1071 #endif 1072 1073 /* 1074 * Set the station address. 1075 */ 1076 bus_space_write_4(t, h, GEM_MAC_ADDR0, 1077 (sc->sc_arpcom.ac_enaddr[4]<<8) | sc->sc_arpcom.ac_enaddr[5]); 1078 bus_space_write_4(t, h, GEM_MAC_ADDR1, 1079 (sc->sc_arpcom.ac_enaddr[2]<<8) | sc->sc_arpcom.ac_enaddr[3]); 1080 bus_space_write_4(t, h, GEM_MAC_ADDR2, 1081 (sc->sc_arpcom.ac_enaddr[0]<<8) | sc->sc_arpcom.ac_enaddr[1]); 1082 } 1083 1084 static void 1085 gem_start(ifp) 1086 struct ifnet *ifp; 1087 { 1088 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 1089 struct mbuf *m0 = NULL, *m; 1090 struct gem_txjob txj; 1091 int firsttx, ofree, seg, ntx, txmfail; 1092 1093 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1094 return; 1095 1096 /* 1097 * Remember the previous number of free descriptors and 1098 * the first descriptor we'll use. 1099 */ 1100 ofree = sc->sc_txfree; 1101 firsttx = sc->sc_txnext; 1102 1103 DPRINTF(sc, ("%s: gem_start: txfree %d, txnext %d\n", 1104 device_get_name(sc->sc_dev), ofree, firsttx)); 1105 CTR3(KTR_GEM, "%s: gem_start: txfree %d, txnext %d", 1106 device_get_name(sc->sc_dev), ofree, firsttx); 1107 1108 txj.txj_nexttx = firsttx; 1109 txj.txj_lasttx = 0; 1110 /* 1111 * Loop through the send queue, setting up transmit descriptors 1112 * until we drain the queue, or use up all available transmit 1113 * descriptors. 1114 */ 1115 txmfail = 0; 1116 for (ntx = 0;; ntx++) { 1117 /* 1118 * Grab a packet off the queue. 1119 */ 1120 IF_DEQUEUE(&ifp->if_snd, m0); 1121 if (m0 == NULL) 1122 break; 1123 m = NULL; 1124 1125 /* 1126 * Load the DMA map. If this fails, the packet either 1127 * didn't fit in the alloted number of segments, or we were 1128 * short on resources. In this case, we'll copy and try 1129 * again. 1130 */ 1131 txmfail = gem_dmamap_load_mbuf(sc, m0, 1132 gem_txdma_callback, &txj, BUS_DMA_NOWAIT); 1133 if (txmfail == -1) { 1134 IF_PREPEND(&ifp->if_snd, m0); 1135 break; 1136 } 1137 if (txmfail > 0) { 1138 MGETHDR(m, M_DONTWAIT, MT_DATA); 1139 if (m == NULL) { 1140 device_printf(sc->sc_dev, "unable to " 1141 "allocate Tx mbuf\n"); 1142 /* Failed; requeue. */ 1143 IF_PREPEND(&ifp->if_snd, m0); 1144 break; 1145 } 1146 if (m0->m_pkthdr.len > MHLEN) { 1147 MCLGET(m, M_DONTWAIT); 1148 if ((m->m_flags & M_EXT) == 0) { 1149 device_printf(sc->sc_dev, "unable to " 1150 "allocate Tx cluster\n"); 1151 IF_PREPEND(&ifp->if_snd, m0); 1152 m_freem(m); 1153 break; 1154 } 1155 } 1156 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 1157 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 1158 txmfail = gem_dmamap_load_mbuf(sc, m, 1159 gem_txdma_callback, &txj, BUS_DMA_NOWAIT); 1160 if (txmfail != 0) { 1161 if (txmfail > 0) { 1162 device_printf(sc->sc_dev, "unable to " 1163 "load Tx buffer, error = %d\n", 1164 txmfail); 1165 } 1166 m_freem(m); 1167 IF_PREPEND(&ifp->if_snd, m0); 1168 break; 1169 } 1170 } 1171 1172 /* 1173 * Ensure we have enough descriptors free to describe 1174 * the packet. Note, we always reserve one descriptor 1175 * at the end of the ring as a termination point, to 1176 * prevent wrap-around. 1177 */ 1178 if (txj.txj_nsegs > (sc->sc_txfree - 1)) { 1179 /* 1180 * Not enough free descriptors to transmit this 1181 * packet. We haven't committed to anything yet, 1182 * so just unload the DMA map, put the packet 1183 * back on the queue, and punt. Notify the upper 1184 * layer that there are no more slots left. 1185 * 1186 * XXX We could allocate an mbuf and copy, but 1187 * XXX it is worth it? 1188 */ 1189 ifp->if_flags |= IFF_OACTIVE; 1190 gem_dmamap_unload_mbuf(sc, &txj); 1191 if (m != NULL) 1192 m_freem(m); 1193 IF_PREPEND(&ifp->if_snd, m0); 1194 break; 1195 } 1196 1197 if (m != NULL) 1198 m_freem(m0); 1199 1200 /* 1201 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 1202 */ 1203 1204 #ifdef GEM_DEBUG 1205 if (ifp->if_flags & IFF_DEBUG) { 1206 printf(" gem_start %p transmit chain:\n", 1207 STAILQ_FIRST(&txj.txj_txsq)); 1208 for (seg = sc->sc_txnext;; seg = GEM_NEXTTX(seg)) { 1209 printf("descriptor %d:\t", seg); 1210 printf("gd_flags: 0x%016llx\t", (long long) 1211 GEM_DMA_READ(sc, sc->sc_txdescs[seg].gd_flags)); 1212 printf("gd_addr: 0x%016llx\n", (long long) 1213 GEM_DMA_READ(sc, sc->sc_txdescs[seg].gd_addr)); 1214 if (seg == txj.txj_lasttx) 1215 break; 1216 } 1217 } 1218 #endif 1219 1220 /* Sync the descriptors we're using. */ 1221 GEM_CDTXSYNC(sc, sc->sc_txnext, txj.txj_nsegs, 1222 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1223 1224 /* Advance the tx pointer. */ 1225 sc->sc_txfree -= txj.txj_nsegs; 1226 sc->sc_txnext = txj.txj_nexttx; 1227 1228 gem_dmamap_commit_mbuf(sc, &txj); 1229 } 1230 1231 if (txmfail == -1 || sc->sc_txfree == 0) { 1232 ifp->if_flags |= IFF_OACTIVE; 1233 /* No more slots left; notify upper layer. */ 1234 } 1235 1236 if (ntx > 0) { 1237 DPRINTF(sc, ("%s: packets enqueued, IC on %d, OWN on %d\n", 1238 device_get_name(sc->sc_dev), txj.txj_lasttx, firsttx)); 1239 CTR3(KTR_GEM, "%s: packets enqueued, IC on %d, OWN on %d", 1240 device_get_name(sc->sc_dev), txj.txj_lasttx, firsttx); 1241 /* 1242 * The entire packet chain is set up. 1243 * Kick the transmitter. 1244 */ 1245 DPRINTF(sc, ("%s: gem_start: kicking tx %d\n", 1246 device_get_name(sc->sc_dev), txj.txj_nexttx)); 1247 CTR3(KTR_GEM, "%s: gem_start: kicking tx %d=%d", 1248 device_get_name(sc->sc_dev), txj.txj_nexttx, 1249 sc->sc_txnext); 1250 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_TX_KICK, 1251 sc->sc_txnext); 1252 1253 /* Set a watchdog timer in case the chip flakes out. */ 1254 ifp->if_timer = 5; 1255 DPRINTF(sc, ("%s: gem_start: watchdog %d\n", 1256 device_get_name(sc->sc_dev), ifp->if_timer)); 1257 CTR2(KTR_GEM, "%s: gem_start: watchdog %d", 1258 device_get_name(sc->sc_dev), ifp->if_timer); 1259 } 1260 } 1261 1262 /* 1263 * Transmit interrupt. 1264 */ 1265 static void 1266 gem_tint(sc) 1267 struct gem_softc *sc; 1268 { 1269 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1270 bus_space_tag_t t = sc->sc_bustag; 1271 bus_space_handle_t mac = sc->sc_h; 1272 struct gem_txsoft *txs; 1273 int txlast; 1274 1275 1276 DPRINTF(sc, ("%s: gem_tint\n", device_get_name(sc->sc_dev))); 1277 CTR1(KTR_GEM, "%s: gem_tint", device_get_name(sc->sc_dev)); 1278 1279 /* 1280 * Unload collision counters 1281 */ 1282 ifp->if_collisions += 1283 bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) + 1284 bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT) + 1285 bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) + 1286 bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT); 1287 1288 /* 1289 * then clear the hardware counters. 1290 */ 1291 bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0); 1292 bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0); 1293 bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0); 1294 bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0); 1295 1296 /* 1297 * Go through our Tx list and free mbufs for those 1298 * frames that have been transmitted. 1299 */ 1300 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1301 GEM_CDTXSYNC(sc, txs->txs_lastdesc, 1302 txs->txs_ndescs, 1303 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1304 1305 #ifdef GEM_DEBUG 1306 if (ifp->if_flags & IFF_DEBUG) { 1307 int i; 1308 printf(" txsoft %p transmit chain:\n", txs); 1309 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { 1310 printf("descriptor %d: ", i); 1311 printf("gd_flags: 0x%016llx\t", (long long) 1312 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags)); 1313 printf("gd_addr: 0x%016llx\n", (long long) 1314 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr)); 1315 if (i == txs->txs_lastdesc) 1316 break; 1317 } 1318 } 1319 #endif 1320 1321 /* 1322 * In theory, we could harveast some descriptors before 1323 * the ring is empty, but that's a bit complicated. 1324 * 1325 * GEM_TX_COMPLETION points to the last descriptor 1326 * processed +1. 1327 */ 1328 txlast = bus_space_read_4(t, mac, GEM_TX_COMPLETION); 1329 DPRINTF(sc, 1330 ("gem_tint: txs->txs_lastdesc = %d, txlast = %d\n", 1331 txs->txs_lastdesc, txlast)); 1332 CTR3(KTR_GEM, "gem_tint: txs->txs_firstdesc = %d, " 1333 "txs->txs_lastdesc = %d, txlast = %d", 1334 txs->txs_firstdesc, txs->txs_lastdesc, txlast); 1335 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1336 if ((txlast >= txs->txs_firstdesc) && 1337 (txlast <= txs->txs_lastdesc)) 1338 break; 1339 } else { 1340 /* Ick -- this command wraps */ 1341 if ((txlast >= txs->txs_firstdesc) || 1342 (txlast <= txs->txs_lastdesc)) 1343 break; 1344 } 1345 1346 DPRINTF(sc, ("gem_tint: releasing a desc\n")); 1347 CTR0(KTR_GEM, "gem_tint: releasing a desc"); 1348 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1349 1350 sc->sc_txfree += txs->txs_ndescs; 1351 1352 bus_dmamap_sync(sc->sc_dmatag, txs->txs_dmamap, 1353 BUS_DMASYNC_POSTWRITE); 1354 bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap); 1355 if (txs->txs_mbuf != NULL) { 1356 m_freem(txs->txs_mbuf); 1357 txs->txs_mbuf = NULL; 1358 } 1359 1360 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1361 1362 ifp->if_opackets++; 1363 } 1364 1365 DPRINTF(sc, ("gem_tint: GEM_TX_STATE_MACHINE %x " 1366 "GEM_TX_DATA_PTR %llx " 1367 "GEM_TX_COMPLETION %x\n", 1368 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_STATE_MACHINE), 1369 ((long long) bus_space_read_4(sc->sc_bustag, sc->sc_h, 1370 GEM_TX_DATA_PTR_HI) << 32) | 1371 bus_space_read_4(sc->sc_bustag, sc->sc_h, 1372 GEM_TX_DATA_PTR_LO), 1373 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_COMPLETION))); 1374 CTR3(KTR_GEM, "gem_tint: GEM_TX_STATE_MACHINE %x " 1375 "GEM_TX_DATA_PTR %llx " 1376 "GEM_TX_COMPLETION %x", 1377 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_STATE_MACHINE), 1378 ((long long) bus_space_read_4(sc->sc_bustag, sc->sc_h, 1379 GEM_TX_DATA_PTR_HI) << 32) | 1380 bus_space_read_4(sc->sc_bustag, sc->sc_h, 1381 GEM_TX_DATA_PTR_LO), 1382 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_COMPLETION)); 1383 1384 if (STAILQ_FIRST(&sc->sc_txdirtyq) == NULL) 1385 ifp->if_timer = 0; 1386 1387 1388 DPRINTF(sc, ("%s: gem_tint: watchdog %d\n", 1389 device_get_name(sc->sc_dev), ifp->if_timer)); 1390 CTR2(KTR_GEM, "%s: gem_tint: watchdog %d", 1391 device_get_name(sc->sc_dev), ifp->if_timer); 1392 1393 /* Freed some descriptors, so reset IFF_OACTIVE and restart. */ 1394 ifp->if_flags &= ~IFF_OACTIVE; 1395 gem_start(ifp); 1396 } 1397 1398 static void 1399 gem_rint_timeout(arg) 1400 void *arg; 1401 { 1402 1403 gem_rint((struct gem_softc *)arg); 1404 } 1405 1406 /* 1407 * Receive interrupt. 1408 */ 1409 static void 1410 gem_rint(sc) 1411 struct gem_softc *sc; 1412 { 1413 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1414 bus_space_tag_t t = sc->sc_bustag; 1415 bus_space_handle_t h = sc->sc_h; 1416 struct ether_header *eh; 1417 struct gem_rxsoft *rxs; 1418 struct mbuf *m; 1419 u_int64_t rxstat; 1420 int i, len; 1421 1422 callout_stop(&sc->sc_rx_ch); 1423 DPRINTF(sc, ("%s: gem_rint\n", device_get_name(sc->sc_dev))); 1424 CTR1(KTR_GEM, "%s: gem_rint", device_get_name(sc->sc_dev)); 1425 /* 1426 * XXXX Read the lastrx only once at the top for speed. 1427 */ 1428 DPRINTF(sc, ("gem_rint: sc->rxptr %d, complete %d\n", 1429 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION))); 1430 CTR2(KTR_GEM, "gem_rint: sc->rxptr %d, complete %d", 1431 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)); 1432 for (i = sc->sc_rxptr; i != bus_space_read_4(t, h, GEM_RX_COMPLETION); 1433 i = GEM_NEXTRX(i)) { 1434 rxs = &sc->sc_rxsoft[i]; 1435 1436 GEM_CDRXSYNC(sc, i, 1437 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1438 1439 rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags); 1440 1441 if (rxstat & GEM_RD_OWN) { 1442 /* 1443 * The descriptor is still marked as owned, although 1444 * it is supposed to have completed. This has been 1445 * observed on some machines. Just exiting here 1446 * might leave the packet sitting around until another 1447 * one arrives to trigger a new interrupt, which is 1448 * generally undesirable, so set up a timeout. 1449 */ 1450 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS, 1451 gem_rint_timeout, sc); 1452 break; 1453 } 1454 1455 if (rxstat & GEM_RD_BAD_CRC) { 1456 device_printf(sc->sc_dev, "receive error: CRC error\n"); 1457 GEM_INIT_RXDESC(sc, i); 1458 continue; 1459 } 1460 1461 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 1462 BUS_DMASYNC_POSTREAD); 1463 #ifdef GEM_DEBUG 1464 if (ifp->if_flags & IFF_DEBUG) { 1465 printf(" rxsoft %p descriptor %d: ", rxs, i); 1466 printf("gd_flags: 0x%016llx\t", (long long) 1467 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags)); 1468 printf("gd_addr: 0x%016llx\n", (long long) 1469 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr)); 1470 } 1471 #endif 1472 1473 /* 1474 * No errors; receive the packet. Note the Gem 1475 * includes the CRC with every packet. 1476 */ 1477 len = GEM_RD_BUFLEN(rxstat); 1478 1479 /* 1480 * Allocate a new mbuf cluster. If that fails, we are 1481 * out of memory, and must drop the packet and recycle 1482 * the buffer that's already attached to this descriptor. 1483 */ 1484 m = rxs->rxs_mbuf; 1485 if (gem_add_rxbuf(sc, i) != 0) { 1486 ifp->if_ierrors++; 1487 GEM_INIT_RXDESC(sc, i); 1488 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 1489 BUS_DMASYNC_PREREAD); 1490 continue; 1491 } 1492 m->m_data += 2; /* We're already off by two */ 1493 1494 ifp->if_ipackets++; 1495 eh = mtod(m, struct ether_header *); 1496 m->m_pkthdr.rcvif = ifp; 1497 m->m_pkthdr.len = m->m_len = len - ETHER_CRC_LEN; 1498 m_adj(m, sizeof(struct ether_header)); 1499 1500 /* Pass it on. */ 1501 ether_input(ifp, eh, m); 1502 } 1503 1504 /* Update the receive pointer. */ 1505 sc->sc_rxptr = i; 1506 bus_space_write_4(t, h, GEM_RX_KICK, i); 1507 1508 DPRINTF(sc, ("gem_rint: done sc->rxptr %d, complete %d\n", 1509 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION))); 1510 CTR2(KTR_GEM, "gem_rint: done sc->rxptr %d, complete %d", 1511 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)); 1512 1513 } 1514 1515 1516 /* 1517 * gem_add_rxbuf: 1518 * 1519 * Add a receive buffer to the indicated descriptor. 1520 */ 1521 static int 1522 gem_add_rxbuf(sc, idx) 1523 struct gem_softc *sc; 1524 int idx; 1525 { 1526 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1527 struct mbuf *m; 1528 int error; 1529 1530 MGETHDR(m, M_DONTWAIT, MT_DATA); 1531 if (m == NULL) 1532 return (ENOBUFS); 1533 1534 MCLGET(m, M_DONTWAIT); 1535 if ((m->m_flags & M_EXT) == 0) { 1536 m_freem(m); 1537 return (ENOBUFS); 1538 } 1539 1540 #ifdef GEM_DEBUG 1541 /* bzero the packet to check dma */ 1542 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1543 #endif 1544 1545 if (rxs->rxs_mbuf != NULL) 1546 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap); 1547 1548 rxs->rxs_mbuf = m; 1549 1550 error = bus_dmamap_load(sc->sc_dmatag, rxs->rxs_dmamap, 1551 m->m_ext.ext_buf, m->m_ext.ext_size, gem_rxdma_callback, rxs, 1552 BUS_DMA_NOWAIT); 1553 if (error != 0 || rxs->rxs_paddr == 0) { 1554 device_printf(sc->sc_dev, "can't load rx DMA map %d, error = " 1555 "%d\n", idx, error); 1556 panic("gem_add_rxbuf"); /* XXX */ 1557 } 1558 1559 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD); 1560 1561 GEM_INIT_RXDESC(sc, idx); 1562 1563 return (0); 1564 } 1565 1566 1567 static void 1568 gem_eint(sc, status) 1569 struct gem_softc *sc; 1570 u_int status; 1571 { 1572 1573 if ((status & GEM_INTR_MIF) != 0) { 1574 device_printf(sc->sc_dev, "XXXlink status changed\n"); 1575 return; 1576 } 1577 1578 device_printf(sc->sc_dev, "status=%x\n", status); 1579 } 1580 1581 1582 void 1583 gem_intr(v) 1584 void *v; 1585 { 1586 struct gem_softc *sc = (struct gem_softc *)v; 1587 bus_space_tag_t t = sc->sc_bustag; 1588 bus_space_handle_t seb = sc->sc_h; 1589 u_int32_t status; 1590 1591 status = bus_space_read_4(t, seb, GEM_STATUS); 1592 DPRINTF(sc, ("%s: gem_intr: cplt %x, status %x\n", 1593 device_get_name(sc->sc_dev), (status>>19), 1594 (u_int)status)); 1595 CTR3(KTR_GEM, "%s: gem_intr: cplt %x, status %x", 1596 device_get_name(sc->sc_dev), (status>>19), 1597 (u_int)status); 1598 1599 if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0) 1600 gem_eint(sc, status); 1601 1602 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 1603 gem_tint(sc); 1604 1605 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1606 gem_rint(sc); 1607 1608 /* We should eventually do more than just print out error stats. */ 1609 if (status & GEM_INTR_TX_MAC) { 1610 int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS); 1611 if (txstat & ~GEM_MAC_TX_XMIT_DONE) 1612 printf("MAC tx fault, status %x\n", txstat); 1613 if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) 1614 gem_init(sc); 1615 } 1616 if (status & GEM_INTR_RX_MAC) { 1617 int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS); 1618 if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) 1619 printf("MAC rx fault, status %x\n", rxstat); 1620 if ((rxstat & GEM_MAC_RX_OVERFLOW) != 0) 1621 gem_init(sc); 1622 } 1623 } 1624 1625 1626 static void 1627 gem_watchdog(ifp) 1628 struct ifnet *ifp; 1629 { 1630 struct gem_softc *sc = ifp->if_softc; 1631 1632 DPRINTF(sc, ("gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 1633 "GEM_MAC_RX_CONFIG %x\n", 1634 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG), 1635 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS), 1636 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG))); 1637 CTR3(KTR_GEM, "gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 1638 "GEM_MAC_RX_CONFIG %x", 1639 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG), 1640 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS), 1641 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG)); 1642 CTR3(KTR_GEM, "gem_watchdog: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x " 1643 "GEM_MAC_TX_CONFIG %x", 1644 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_CONFIG), 1645 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_STATUS), 1646 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_CONFIG)); 1647 1648 device_printf(sc->sc_dev, "device timeout\n"); 1649 ++ifp->if_oerrors; 1650 1651 /* Try to get more packets going. */ 1652 gem_start(ifp); 1653 } 1654 1655 /* 1656 * Initialize the MII Management Interface 1657 */ 1658 static void 1659 gem_mifinit(sc) 1660 struct gem_softc *sc; 1661 { 1662 bus_space_tag_t t = sc->sc_bustag; 1663 bus_space_handle_t mif = sc->sc_h; 1664 1665 /* Configure the MIF in frame mode */ 1666 sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1667 sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA; 1668 bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config); 1669 } 1670 1671 /* 1672 * MII interface 1673 * 1674 * The GEM MII interface supports at least three different operating modes: 1675 * 1676 * Bitbang mode is implemented using data, clock and output enable registers. 1677 * 1678 * Frame mode is implemented by loading a complete frame into the frame 1679 * register and polling the valid bit for completion. 1680 * 1681 * Polling mode uses the frame register but completion is indicated by 1682 * an interrupt. 1683 * 1684 */ 1685 int 1686 gem_mii_readreg(dev, phy, reg) 1687 device_t dev; 1688 int phy, reg; 1689 { 1690 struct gem_softc *sc = device_get_softc(dev); 1691 bus_space_tag_t t = sc->sc_bustag; 1692 bus_space_handle_t mif = sc->sc_h; 1693 int n; 1694 u_int32_t v; 1695 1696 #ifdef GEM_DEBUG_PHY 1697 printf("gem_mii_readreg: phy %d reg %d\n", phy, reg); 1698 #endif 1699 1700 #if 0 1701 /* Select the desired PHY in the MIF configuration register */ 1702 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1703 /* Clear PHY select bit */ 1704 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1705 if (phy == GEM_PHYAD_EXTERNAL) 1706 /* Set PHY select bit to get at external device */ 1707 v |= GEM_MIF_CONFIG_PHY_SEL; 1708 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1709 #endif 1710 1711 /* Construct the frame command */ 1712 v = (reg << GEM_MIF_REG_SHIFT) | (phy << GEM_MIF_PHY_SHIFT) | 1713 GEM_MIF_FRAME_READ; 1714 1715 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1716 for (n = 0; n < 100; n++) { 1717 DELAY(1); 1718 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1719 if (v & GEM_MIF_FRAME_TA0) 1720 return (v & GEM_MIF_FRAME_DATA); 1721 } 1722 1723 device_printf(sc->sc_dev, "mii_read timeout\n"); 1724 return (0); 1725 } 1726 1727 int 1728 gem_mii_writereg(dev, phy, reg, val) 1729 device_t dev; 1730 int phy, reg, val; 1731 { 1732 struct gem_softc *sc = device_get_softc(dev); 1733 bus_space_tag_t t = sc->sc_bustag; 1734 bus_space_handle_t mif = sc->sc_h; 1735 int n; 1736 u_int32_t v; 1737 1738 #ifdef GEM_DEBUG_PHY 1739 printf("gem_mii_writereg: phy %d reg %d val %x\n", phy, reg, val); 1740 #endif 1741 1742 #if 0 1743 /* Select the desired PHY in the MIF configuration register */ 1744 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1745 /* Clear PHY select bit */ 1746 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1747 if (phy == GEM_PHYAD_EXTERNAL) 1748 /* Set PHY select bit to get at external device */ 1749 v |= GEM_MIF_CONFIG_PHY_SEL; 1750 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1751 #endif 1752 /* Construct the frame command */ 1753 v = GEM_MIF_FRAME_WRITE | 1754 (phy << GEM_MIF_PHY_SHIFT) | 1755 (reg << GEM_MIF_REG_SHIFT) | 1756 (val & GEM_MIF_FRAME_DATA); 1757 1758 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1759 for (n = 0; n < 100; n++) { 1760 DELAY(1); 1761 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1762 if (v & GEM_MIF_FRAME_TA0) 1763 return (1); 1764 } 1765 1766 device_printf(sc->sc_dev, "mii_write timeout\n"); 1767 return (0); 1768 } 1769 1770 void 1771 gem_mii_statchg(dev) 1772 device_t dev; 1773 { 1774 struct gem_softc *sc = device_get_softc(dev); 1775 #ifdef GEM_DEBUG 1776 int instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media); 1777 #endif 1778 bus_space_tag_t t = sc->sc_bustag; 1779 bus_space_handle_t mac = sc->sc_h; 1780 u_int32_t v; 1781 1782 #ifdef GEM_DEBUG 1783 if (sc->sc_debug) 1784 printf("gem_mii_statchg: status change: phy = %d\n", 1785 sc->sc_phys[instance]); 1786 #endif 1787 1788 /* Set tx full duplex options */ 1789 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0); 1790 DELAY(10000); /* reg must be cleared and delay before changing. */ 1791 v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT| 1792 GEM_MAC_TX_ENABLE; 1793 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) { 1794 v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS; 1795 } 1796 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v); 1797 1798 /* XIF Configuration */ 1799 /* We should really calculate all this rather than rely on defaults */ 1800 v = bus_space_read_4(t, mac, GEM_MAC_XIF_CONFIG); 1801 v = GEM_MAC_XIF_LINK_LED; 1802 v |= GEM_MAC_XIF_TX_MII_ENA; 1803 /* If an external transceiver is connected, enable its MII drivers */ 1804 sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG); 1805 if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) { 1806 /* External MII needs echo disable if half duplex. */ 1807 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1808 /* turn on full duplex LED */ 1809 v |= GEM_MAC_XIF_FDPLX_LED; 1810 else 1811 /* half duplex -- disable echo */ 1812 v |= GEM_MAC_XIF_ECHO_DISABL; 1813 } else { 1814 /* Internal MII needs buf enable */ 1815 v |= GEM_MAC_XIF_MII_BUF_ENA; 1816 } 1817 bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v); 1818 } 1819 1820 int 1821 gem_mediachange(ifp) 1822 struct ifnet *ifp; 1823 { 1824 struct gem_softc *sc = ifp->if_softc; 1825 1826 /* XXX Add support for serial media. */ 1827 1828 return (mii_mediachg(sc->sc_mii)); 1829 } 1830 1831 void 1832 gem_mediastatus(ifp, ifmr) 1833 struct ifnet *ifp; 1834 struct ifmediareq *ifmr; 1835 { 1836 struct gem_softc *sc = ifp->if_softc; 1837 1838 if ((ifp->if_flags & IFF_UP) == 0) 1839 return; 1840 1841 mii_pollstat(sc->sc_mii); 1842 ifmr->ifm_active = sc->sc_mii->mii_media_active; 1843 ifmr->ifm_status = sc->sc_mii->mii_media_status; 1844 } 1845 1846 /* 1847 * Process an ioctl request. 1848 */ 1849 static int 1850 gem_ioctl(ifp, cmd, data) 1851 struct ifnet *ifp; 1852 u_long cmd; 1853 caddr_t data; 1854 { 1855 struct gem_softc *sc = ifp->if_softc; 1856 struct ifreq *ifr = (struct ifreq *)data; 1857 int s, error = 0; 1858 1859 switch (cmd) { 1860 case SIOCSIFADDR: 1861 case SIOCGIFADDR: 1862 case SIOCSIFMTU: 1863 error = ether_ioctl(ifp, cmd, data); 1864 break; 1865 case SIOCSIFFLAGS: 1866 if (ifp->if_flags & IFF_UP) { 1867 if ((sc->sc_flags ^ ifp->if_flags) == IFF_PROMISC) 1868 gem_setladrf(sc); 1869 else 1870 gem_init(sc); 1871 } else { 1872 if (ifp->if_flags & IFF_RUNNING) 1873 gem_stop(ifp, 0); 1874 } 1875 sc->sc_flags = ifp->if_flags; 1876 error = 0; 1877 break; 1878 case SIOCADDMULTI: 1879 case SIOCDELMULTI: 1880 gem_setladrf(sc); 1881 error = 0; 1882 break; 1883 case SIOCGIFMEDIA: 1884 case SIOCSIFMEDIA: 1885 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 1886 break; 1887 default: 1888 error = ENOTTY; 1889 break; 1890 } 1891 1892 /* Try to get things going again */ 1893 if (ifp->if_flags & IFF_UP) 1894 gem_start(ifp); 1895 splx(s); 1896 return (error); 1897 } 1898 1899 /* 1900 * Set up the logical address filter. 1901 */ 1902 static void 1903 gem_setladrf(sc) 1904 struct gem_softc *sc; 1905 { 1906 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1907 struct ifmultiaddr *inm; 1908 struct sockaddr_dl *sdl; 1909 bus_space_tag_t t = sc->sc_bustag; 1910 bus_space_handle_t h = sc->sc_h; 1911 u_char *cp; 1912 u_int32_t crc; 1913 u_int32_t hash[16]; 1914 u_int32_t v; 1915 int len; 1916 1917 /* Clear hash table */ 1918 memset(hash, 0, sizeof(hash)); 1919 1920 /* Get current RX configuration */ 1921 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 1922 1923 if ((ifp->if_flags & IFF_PROMISC) != 0) { 1924 /* Turn on promiscuous mode; turn off the hash filter */ 1925 v |= GEM_MAC_RX_PROMISCUOUS; 1926 v &= ~GEM_MAC_RX_HASH_FILTER; 1927 ; 1928 goto chipit; 1929 } 1930 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 1931 hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; 1932 ifp->if_flags |= IFF_ALLMULTI; 1933 goto chipit; 1934 } 1935 1936 /* Turn off promiscuous mode; turn on the hash filter */ 1937 v &= ~GEM_MAC_RX_PROMISCUOUS; 1938 v |= GEM_MAC_RX_HASH_FILTER; 1939 1940 /* 1941 * Set up multicast address filter by passing all multicast addresses 1942 * through a crc generator, and then using the high order 6 bits as an 1943 * index into the 256 bit logical address filter. The high order bit 1944 * selects the word, while the rest of the bits select the bit within 1945 * the word. 1946 */ 1947 1948 TAILQ_FOREACH(inm, &sc->sc_arpcom.ac_if.if_multiaddrs, ifma_link) { 1949 if (inm->ifma_addr->sa_family != AF_LINK) 1950 continue; 1951 sdl = (struct sockaddr_dl *)inm->ifma_addr; 1952 cp = LLADDR(sdl); 1953 crc = 0xffffffff; 1954 for (len = sdl->sdl_alen; --len >= 0;) { 1955 int octet = *cp++; 1956 int i; 1957 1958 #define MC_POLY_LE 0xedb88320UL /* mcast crc, little endian */ 1959 for (i = 0; i < 8; i++) { 1960 if ((crc & 1) ^ (octet & 1)) { 1961 crc >>= 1; 1962 crc ^= MC_POLY_LE; 1963 } else { 1964 crc >>= 1; 1965 } 1966 octet >>= 1; 1967 } 1968 } 1969 /* Just want the 8 most significant bits. */ 1970 crc >>= 24; 1971 1972 /* Set the corresponding bit in the filter. */ 1973 hash[crc >> 4] |= 1 << (crc & 0xf); 1974 } 1975 1976 chipit: 1977 /* Now load the hash table into the chip */ 1978 bus_space_write_4(t, h, GEM_MAC_HASH0, hash[0]); 1979 bus_space_write_4(t, h, GEM_MAC_HASH1, hash[1]); 1980 bus_space_write_4(t, h, GEM_MAC_HASH2, hash[2]); 1981 bus_space_write_4(t, h, GEM_MAC_HASH3, hash[3]); 1982 bus_space_write_4(t, h, GEM_MAC_HASH4, hash[4]); 1983 bus_space_write_4(t, h, GEM_MAC_HASH5, hash[5]); 1984 bus_space_write_4(t, h, GEM_MAC_HASH6, hash[6]); 1985 bus_space_write_4(t, h, GEM_MAC_HASH7, hash[7]); 1986 bus_space_write_4(t, h, GEM_MAC_HASH8, hash[8]); 1987 bus_space_write_4(t, h, GEM_MAC_HASH9, hash[9]); 1988 bus_space_write_4(t, h, GEM_MAC_HASH10, hash[10]); 1989 bus_space_write_4(t, h, GEM_MAC_HASH11, hash[11]); 1990 bus_space_write_4(t, h, GEM_MAC_HASH12, hash[12]); 1991 bus_space_write_4(t, h, GEM_MAC_HASH13, hash[13]); 1992 bus_space_write_4(t, h, GEM_MAC_HASH14, hash[14]); 1993 bus_space_write_4(t, h, GEM_MAC_HASH15, hash[15]); 1994 1995 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 1996 } 1997 1998 #if notyet 1999 2000 /* 2001 * gem_power: 2002 * 2003 * Power management (suspend/resume) hook. 2004 */ 2005 void 2006 static gem_power(why, arg) 2007 int why; 2008 void *arg; 2009 { 2010 struct gem_softc *sc = arg; 2011 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2012 int s; 2013 2014 s = splnet(); 2015 switch (why) { 2016 case PWR_SUSPEND: 2017 case PWR_STANDBY: 2018 gem_stop(ifp, 1); 2019 if (sc->sc_power != NULL) 2020 (*sc->sc_power)(sc, why); 2021 break; 2022 case PWR_RESUME: 2023 if (ifp->if_flags & IFF_UP) { 2024 if (sc->sc_power != NULL) 2025 (*sc->sc_power)(sc, why); 2026 gem_init(ifp); 2027 } 2028 break; 2029 case PWR_SOFTSUSPEND: 2030 case PWR_SOFTSTANDBY: 2031 case PWR_SOFTRESUME: 2032 break; 2033 } 2034 splx(s); 2035 } 2036 #endif 2037