1 /* 2 * Copyright (C) 2001 Eduardo Horvath. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * from: NetBSD: gem.c,v 1.9 2001/10/21 20:45:15 thorpej Exp 27 * 28 * $FreeBSD$ 29 */ 30 31 /* 32 * Driver for Sun GEM ethernet controllers. 33 */ 34 35 #define GEM_DEBUG 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/bus.h> 40 #include <sys/callout.h> 41 #include <sys/mbuf.h> 42 #include <sys/malloc.h> 43 #include <sys/kernel.h> 44 #include <sys/socket.h> 45 #include <sys/sockio.h> 46 47 #include <net/ethernet.h> 48 #include <net/if.h> 49 #include <net/if_arp.h> 50 #include <net/if_dl.h> 51 #include <net/if_media.h> 52 53 #include <machine/bus.h> 54 55 #include <dev/mii/mii.h> 56 #include <dev/mii/miivar.h> 57 58 #include <gem/if_gemreg.h> 59 #include <gem/if_gemvar.h> 60 61 #define TRIES 10000 62 63 static void gem_start(struct ifnet *); 64 static void gem_stop(struct ifnet *, int); 65 static int gem_ioctl(struct ifnet *, u_long, caddr_t); 66 static void gem_cddma_callback(void *, bus_dma_segment_t *, int, int); 67 static void gem_rxdma_callback(void *, bus_dma_segment_t *, int, int); 68 static void gem_txdma_callback(void *, bus_dma_segment_t *, int, int); 69 static void gem_tick(void *); 70 static void gem_watchdog(struct ifnet *); 71 static void gem_init(void *); 72 static void gem_init_regs(struct gem_softc *sc); 73 static int gem_ringsize(int sz); 74 static int gem_meminit(struct gem_softc *); 75 static int gem_dmamap_load_mbuf(struct gem_softc *, struct mbuf *, 76 bus_dmamap_callback_t *, struct gem_txjob *, int); 77 static void gem_dmamap_unload_mbuf(struct gem_softc *, struct gem_txjob *); 78 static void gem_dmamap_commit_mbuf(struct gem_softc *, struct gem_txjob *); 79 static void gem_mifinit(struct gem_softc *); 80 static int gem_bitwait(struct gem_softc *sc, bus_addr_t r, 81 u_int32_t clr, u_int32_t set); 82 static int gem_reset_rx(struct gem_softc *); 83 static int gem_reset_tx(struct gem_softc *); 84 static int gem_disable_rx(struct gem_softc *); 85 static int gem_disable_tx(struct gem_softc *); 86 static void gem_rxdrain(struct gem_softc *); 87 static int gem_add_rxbuf(struct gem_softc *, int); 88 static void gem_setladrf(struct gem_softc *); 89 90 struct mbuf *gem_get(struct gem_softc *, int, int); 91 static void gem_eint(struct gem_softc *, u_int); 92 static void gem_rint(struct gem_softc *); 93 static void gem_rint_timeout(void *); 94 static void gem_tint(struct gem_softc *); 95 #ifdef notyet 96 static void gem_power(int, void *); 97 #endif 98 99 devclass_t gem_devclass; 100 DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0); 101 MODULE_DEPEND(gem, miibus, 1, 1, 1); 102 103 #ifdef GEM_DEBUG 104 #define DPRINTF(sc, x) if ((sc)->sc_arpcom.ac_if.if_flags & IFF_DEBUG) \ 105 printf x 106 #include <sys/ktr.h> 107 #define KTR_GEM KTR_CT2 108 #else 109 #define DPRINTF(sc, x) /* nothing */ 110 #endif 111 112 #define GEM_NSEGS GEM_NTXSEGS 113 114 /* 115 * gem_attach: 116 * 117 * Attach a Gem interface to the system. 118 */ 119 int 120 gem_attach(sc) 121 struct gem_softc *sc; 122 { 123 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 124 struct mii_softc *child; 125 int i, error; 126 127 /* Make sure the chip is stopped. */ 128 ifp->if_softc = sc; 129 gem_reset(sc); 130 131 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, 132 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, GEM_NSEGS, 133 BUS_SPACE_MAXSIZE_32BIT, 0, &sc->sc_pdmatag); 134 if (error) 135 return (error); 136 137 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 138 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MAXBSIZE, 139 GEM_NSEGS, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, 140 &sc->sc_dmatag); 141 if (error) 142 goto fail_0; 143 144 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, 145 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 146 sizeof(struct gem_control_data), 1, 147 sizeof(struct gem_control_data), BUS_DMA_ALLOCNOW, 148 &sc->sc_cdmatag); 149 if (error) 150 goto fail_1; 151 152 /* 153 * Allocate the control data structures, and create and load the 154 * DMA map for it. 155 */ 156 if ((error = bus_dmamem_alloc(sc->sc_cdmatag, 157 (void **)&sc->sc_control_data, 0, &sc->sc_cddmamap))) { 158 device_printf(sc->sc_dev, "unable to allocate control data," 159 " error = %d\n", error); 160 goto fail_2; 161 } 162 163 sc->sc_cddma = 0; 164 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, 165 sc->sc_control_data, sizeof(struct gem_control_data), 166 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { 167 device_printf(sc->sc_dev, "unable to load control data DMA " 168 "map, error = %d\n", error); 169 goto fail_3; 170 } 171 172 /* 173 * Initialize the transmit job descriptors. 174 */ 175 STAILQ_INIT(&sc->sc_txfreeq); 176 STAILQ_INIT(&sc->sc_txdirtyq); 177 178 /* 179 * Create the transmit buffer DMA maps. 180 */ 181 error = ENOMEM; 182 for (i = 0; i < GEM_TXQUEUELEN; i++) { 183 struct gem_txsoft *txs; 184 185 txs = &sc->sc_txsoft[i]; 186 txs->txs_mbuf = NULL; 187 txs->txs_ndescs = 0; 188 if ((error = bus_dmamap_create(sc->sc_dmatag, 0, 189 &txs->txs_dmamap)) != 0) { 190 device_printf(sc->sc_dev, "unable to create tx DMA map " 191 "%d, error = %d\n", i, error); 192 goto fail_4; 193 } 194 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 195 } 196 197 /* 198 * Create the receive buffer DMA maps. 199 */ 200 for (i = 0; i < GEM_NRXDESC; i++) { 201 if ((error = bus_dmamap_create(sc->sc_dmatag, 0, 202 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 203 device_printf(sc->sc_dev, "unable to create rx DMA map " 204 "%d, error = %d\n", i, error); 205 goto fail_5; 206 } 207 sc->sc_rxsoft[i].rxs_mbuf = NULL; 208 } 209 210 211 gem_mifinit(sc); 212 213 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, gem_mediachange, 214 gem_mediastatus)) != 0) { 215 device_printf(sc->sc_dev, "phy probe failed: %d\n", error); 216 goto fail_5; 217 } 218 sc->sc_mii = device_get_softc(sc->sc_miibus); 219 220 /* 221 * From this point forward, the attachment cannot fail. A failure 222 * before this point releases all resources that may have been 223 * allocated. 224 */ 225 226 /* Announce ourselves. */ 227 device_printf(sc->sc_dev, "Ethernet address:"); 228 for (i = 0; i < 6; i++) 229 printf("%c%02x", i > 0 ? ':' : ' ', sc->sc_arpcom.ac_enaddr[i]); 230 printf("\n"); 231 232 /* Initialize ifnet structure. */ 233 ifp->if_softc = sc; 234 ifp->if_unit = device_get_unit(sc->sc_dev); 235 ifp->if_name = "gem"; 236 ifp->if_mtu = ETHERMTU; 237 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 238 ifp->if_start = gem_start; 239 ifp->if_ioctl = gem_ioctl; 240 ifp->if_watchdog = gem_watchdog; 241 ifp->if_init = gem_init; 242 ifp->if_output = ether_output; 243 ifp->if_snd.ifq_maxlen = GEM_TXQUEUELEN; 244 /* 245 * Walk along the list of attached MII devices and 246 * establish an `MII instance' to `phy number' 247 * mapping. We'll use this mapping in media change 248 * requests to determine which phy to use to program 249 * the MIF configuration register. 250 */ 251 for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL; 252 child = LIST_NEXT(child, mii_list)) { 253 /* 254 * Note: we support just two PHYs: the built-in 255 * internal device and an external on the MII 256 * connector. 257 */ 258 if (child->mii_phy > 1 || child->mii_inst > 1) { 259 device_printf(sc->sc_dev, "cannot accomodate " 260 "MII device %s at phy %d, instance %d\n", 261 device_get_name(child->mii_dev), 262 child->mii_phy, child->mii_inst); 263 continue; 264 } 265 266 sc->sc_phys[child->mii_inst] = child->mii_phy; 267 } 268 269 /* 270 * Now select and activate the PHY we will use. 271 * 272 * The order of preference is External (MDI1), 273 * Internal (MDI0), Serial Link (no MII). 274 */ 275 if (sc->sc_phys[1]) { 276 #ifdef GEM_DEBUG 277 printf("using external phy\n"); 278 #endif 279 sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL; 280 } else { 281 #ifdef GEM_DEBUG 282 printf("using internal phy\n"); 283 #endif 284 sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL; 285 } 286 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_MIF_CONFIG, 287 sc->sc_mif_config); 288 /* Attach the interface. */ 289 ether_ifattach(ifp, ETHER_BPF_SUPPORTED); 290 291 #if notyet 292 /* 293 * Add a suspend hook to make sure we come back up after a 294 * resume. 295 */ 296 sc->sc_powerhook = powerhook_establish(gem_power, sc); 297 if (sc->sc_powerhook == NULL) 298 device_printf(sc->sc_dev, "WARNING: unable to establish power " 299 "hook\n"); 300 #endif 301 302 callout_init(&sc->sc_tick_ch, 0); 303 callout_init(&sc->sc_rx_ch, 0); 304 return (0); 305 306 /* 307 * Free any resources we've allocated during the failed attach 308 * attempt. Do this in reverse order and fall through. 309 */ 310 fail_5: 311 for (i = 0; i < GEM_NRXDESC; i++) { 312 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 313 bus_dmamap_destroy(sc->sc_dmatag, 314 sc->sc_rxsoft[i].rxs_dmamap); 315 } 316 fail_4: 317 for (i = 0; i < GEM_TXQUEUELEN; i++) { 318 if (sc->sc_txsoft[i].txs_dmamap != NULL) 319 bus_dmamap_destroy(sc->sc_dmatag, 320 sc->sc_txsoft[i].txs_dmamap); 321 } 322 bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap); 323 fail_3: 324 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 325 sc->sc_cddmamap); 326 fail_2: 327 bus_dma_tag_destroy(sc->sc_cdmatag); 328 fail_1: 329 bus_dma_tag_destroy(sc->sc_dmatag); 330 fail_0: 331 bus_dma_tag_destroy(sc->sc_pdmatag); 332 return (error); 333 } 334 335 static void 336 gem_cddma_callback(xsc, segs, nsegs, error) 337 void *xsc; 338 bus_dma_segment_t *segs; 339 int nsegs; 340 int error; 341 { 342 struct gem_softc *sc = (struct gem_softc *)xsc; 343 344 if (error != 0) 345 return; 346 if (nsegs != 1) { 347 /* can't happen... */ 348 panic("gem_cddma_callback: bad control buffer segment count"); 349 } 350 sc->sc_cddma = segs[0].ds_addr; 351 } 352 353 static void 354 gem_rxdma_callback(xsc, segs, nsegs, error) 355 void *xsc; 356 bus_dma_segment_t *segs; 357 int nsegs; 358 int error; 359 { 360 struct gem_rxsoft *rxs = (struct gem_rxsoft *)xsc; 361 362 if (error != 0) 363 return; 364 if (nsegs != 1) { 365 /* can't happen... */ 366 panic("gem_rxdma_callback: bad control buffer segment count"); 367 } 368 rxs->rxs_paddr = segs[0].ds_addr; 369 } 370 371 /* 372 * This is called multiple times in our version of dmamap_load_mbuf, but should 373 * be fit for a generic version that only calls it once. 374 */ 375 static void 376 gem_txdma_callback(xsc, segs, nsegs, error) 377 void *xsc; 378 bus_dma_segment_t *segs; 379 int nsegs; 380 int error; 381 { 382 struct gem_txdma *tx = (struct gem_txdma *)xsc; 383 int seg; 384 385 tx->txd_error = error; 386 if (error != 0) 387 return; 388 tx->txd_nsegs = nsegs; 389 390 /* 391 * Initialize the transmit descriptors. 392 */ 393 for (seg = 0; seg < nsegs; 394 seg++, tx->txd_nexttx = GEM_NEXTTX(tx->txd_nexttx)) { 395 uint64_t flags; 396 397 DPRINTF(tx->txd_sc, ("txdma_cb: mapping seg %d (txd %d), len " 398 "%lx, addr %#lx (%#lx)\n", seg, tx->txd_nexttx, 399 segs[seg].ds_len, segs[seg].ds_addr, 400 GEM_DMA_WRITE(tx->txd_sc, segs[seg].ds_addr))); 401 CTR5(KTR_GEM, "txdma_cb: mapping seg %d (txd %d), len " 402 "%lx, addr %#lx (%#lx)", seg, tx->txd_nexttx, 403 segs[seg].ds_len, segs[seg].ds_addr, 404 GEM_DMA_WRITE(tx->txd_sc, segs[seg].ds_addr)); 405 /* 406 * If this is the first descriptor we're 407 * enqueueing, set the start of packet flag, 408 * and the checksum stuff if we want the hardware 409 * to do it. 410 */ 411 tx->txd_sc->sc_txdescs[tx->txd_nexttx].gd_addr = 412 GEM_DMA_WRITE(tx->txd_sc, segs[seg].ds_addr); 413 flags = segs[seg].ds_len & GEM_TD_BUFSIZE; 414 if ((tx->txd_flags & GTXD_FIRST) != 0 && seg == 0) { 415 CTR2(KTR_GEM, "txdma_cb: start of packet at seg %d, " 416 "tx %d", seg, tx->txd_nexttx); 417 flags |= GEM_TD_START_OF_PACKET; 418 } 419 if ((tx->txd_flags & GTXD_LAST) != 0 && seg == nsegs - 1) { 420 CTR2(KTR_GEM, "txdma_cb: end of packet at seg %d, " 421 "tx %d", seg, tx->txd_nexttx); 422 flags |= GEM_TD_END_OF_PACKET; 423 } 424 tx->txd_sc->sc_txdescs[tx->txd_nexttx].gd_flags = 425 GEM_DMA_WRITE(tx->txd_sc, flags); 426 tx->txd_lasttx = tx->txd_nexttx; 427 } 428 } 429 430 static void 431 gem_tick(arg) 432 void *arg; 433 { 434 struct gem_softc *sc = arg; 435 int s; 436 437 s = splnet(); 438 mii_tick(sc->sc_mii); 439 splx(s); 440 441 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 442 } 443 444 static int 445 gem_bitwait(sc, r, clr, set) 446 struct gem_softc *sc; 447 bus_addr_t r; 448 u_int32_t clr; 449 u_int32_t set; 450 { 451 int i; 452 u_int32_t reg; 453 454 for (i = TRIES; i--; DELAY(100)) { 455 reg = bus_space_read_4(sc->sc_bustag, sc->sc_h, r); 456 if ((r & clr) == 0 && (r & set) == set) 457 return (1); 458 } 459 return (0); 460 } 461 462 void 463 gem_reset(sc) 464 struct gem_softc *sc; 465 { 466 bus_space_tag_t t = sc->sc_bustag; 467 bus_space_handle_t h = sc->sc_h; 468 int s; 469 470 s = splnet(); 471 DPRINTF(sc, ("%s: gem_reset\n", device_get_name(sc->sc_dev))); 472 CTR1(KTR_GEM, "%s: gem_reset", device_get_name(sc->sc_dev)); 473 gem_reset_rx(sc); 474 gem_reset_tx(sc); 475 476 /* Do a full reset */ 477 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); 478 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 479 device_printf(sc->sc_dev, "cannot reset device\n"); 480 splx(s); 481 } 482 483 484 /* 485 * gem_rxdrain: 486 * 487 * Drain the receive queue. 488 */ 489 static void 490 gem_rxdrain(sc) 491 struct gem_softc *sc; 492 { 493 struct gem_rxsoft *rxs; 494 int i; 495 496 for (i = 0; i < GEM_NRXDESC; i++) { 497 rxs = &sc->sc_rxsoft[i]; 498 if (rxs->rxs_mbuf != NULL) { 499 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap); 500 m_freem(rxs->rxs_mbuf); 501 rxs->rxs_mbuf = NULL; 502 } 503 } 504 } 505 506 /* 507 * Reset the whole thing. 508 */ 509 static void 510 gem_stop(ifp, disable) 511 struct ifnet *ifp; 512 int disable; 513 { 514 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 515 struct gem_txsoft *txs; 516 517 DPRINTF(sc, ("%s: gem_stop\n", device_get_name(sc->sc_dev))); 518 CTR1(KTR_GEM, "%s: gem_stop", device_get_name(sc->sc_dev)); 519 520 callout_stop(&sc->sc_tick_ch); 521 522 /* XXX - Should we reset these instead? */ 523 gem_disable_tx(sc); 524 gem_disable_rx(sc); 525 526 /* 527 * Release any queued transmit buffers. 528 */ 529 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 530 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 531 if (txs->txs_ndescs != 0) { 532 bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap); 533 if (txs->txs_mbuf != NULL) { 534 m_freem(txs->txs_mbuf); 535 txs->txs_mbuf = NULL; 536 } 537 } 538 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 539 } 540 541 if (disable) 542 gem_rxdrain(sc); 543 544 /* 545 * Mark the interface down and cancel the watchdog timer. 546 */ 547 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 548 ifp->if_timer = 0; 549 } 550 551 /* 552 * Reset the receiver 553 */ 554 int 555 gem_reset_rx(sc) 556 struct gem_softc *sc; 557 { 558 bus_space_tag_t t = sc->sc_bustag; 559 bus_space_handle_t h = sc->sc_h; 560 561 /* 562 * Resetting while DMA is in progress can cause a bus hang, so we 563 * disable DMA first. 564 */ 565 gem_disable_rx(sc); 566 bus_space_write_4(t, h, GEM_RX_CONFIG, 0); 567 /* Wait till it finishes */ 568 if (!gem_bitwait(sc, GEM_RX_CONFIG, 1, 0)) 569 device_printf(sc->sc_dev, "cannot disable read dma\n"); 570 571 /* Wait 5ms extra. */ 572 DELAY(5000); 573 574 /* Finally, reset the ERX */ 575 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX); 576 /* Wait till it finishes */ 577 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 578 device_printf(sc->sc_dev, "cannot reset receiver\n"); 579 return (1); 580 } 581 return (0); 582 } 583 584 585 /* 586 * Reset the transmitter 587 */ 588 static int 589 gem_reset_tx(sc) 590 struct gem_softc *sc; 591 { 592 bus_space_tag_t t = sc->sc_bustag; 593 bus_space_handle_t h = sc->sc_h; 594 int i; 595 596 /* 597 * Resetting while DMA is in progress can cause a bus hang, so we 598 * disable DMA first. 599 */ 600 gem_disable_tx(sc); 601 bus_space_write_4(t, h, GEM_TX_CONFIG, 0); 602 /* Wait till it finishes */ 603 if (!gem_bitwait(sc, GEM_TX_CONFIG, 1, 0)) 604 device_printf(sc->sc_dev, "cannot disable read dma\n"); 605 606 /* Wait 5ms extra. */ 607 DELAY(5000); 608 609 /* Finally, reset the ETX */ 610 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_TX); 611 /* Wait till it finishes */ 612 for (i = TRIES; i--; DELAY(100)) 613 if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_TX) == 0) 614 break; 615 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 616 device_printf(sc->sc_dev, "cannot reset receiver\n"); 617 return (1); 618 } 619 return (0); 620 } 621 622 /* 623 * disable receiver. 624 */ 625 static int 626 gem_disable_rx(sc) 627 struct gem_softc *sc; 628 { 629 bus_space_tag_t t = sc->sc_bustag; 630 bus_space_handle_t h = sc->sc_h; 631 u_int32_t cfg; 632 633 /* Flip the enable bit */ 634 cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 635 cfg &= ~GEM_MAC_RX_ENABLE; 636 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg); 637 638 /* Wait for it to finish */ 639 return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)); 640 } 641 642 /* 643 * disable transmitter. 644 */ 645 static int 646 gem_disable_tx(sc) 647 struct gem_softc *sc; 648 { 649 bus_space_tag_t t = sc->sc_bustag; 650 bus_space_handle_t h = sc->sc_h; 651 u_int32_t cfg; 652 653 /* Flip the enable bit */ 654 cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG); 655 cfg &= ~GEM_MAC_TX_ENABLE; 656 bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg); 657 658 /* Wait for it to finish */ 659 return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)); 660 } 661 662 /* 663 * Initialize interface. 664 */ 665 static int 666 gem_meminit(sc) 667 struct gem_softc *sc; 668 { 669 struct gem_rxsoft *rxs; 670 int i, error; 671 672 /* 673 * Initialize the transmit descriptor ring. 674 */ 675 memset((void *)sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 676 for (i = 0; i < GEM_NTXDESC; i++) { 677 sc->sc_txdescs[i].gd_flags = 0; 678 sc->sc_txdescs[i].gd_addr = 0; 679 } 680 GEM_CDTXSYNC(sc, 0, GEM_NTXDESC, 681 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 682 sc->sc_txfree = GEM_NTXDESC; 683 sc->sc_txnext = 0; 684 685 /* 686 * Initialize the receive descriptor and receive job 687 * descriptor rings. 688 */ 689 for (i = 0; i < GEM_NRXDESC; i++) { 690 rxs = &sc->sc_rxsoft[i]; 691 if (rxs->rxs_mbuf == NULL) { 692 if ((error = gem_add_rxbuf(sc, i)) != 0) { 693 device_printf(sc->sc_dev, "unable to " 694 "allocate or map rx buffer %d, error = " 695 "%d\n", i, error); 696 /* 697 * XXX Should attempt to run with fewer receive 698 * XXX buffers instead of just failing. 699 */ 700 gem_rxdrain(sc); 701 return (1); 702 } 703 } else 704 GEM_INIT_RXDESC(sc, i); 705 } 706 sc->sc_rxptr = 0; 707 708 return (0); 709 } 710 711 static int 712 gem_ringsize(sz) 713 int sz; 714 { 715 int v = 0; 716 717 switch (sz) { 718 case 32: 719 v = GEM_RING_SZ_32; 720 break; 721 case 64: 722 v = GEM_RING_SZ_64; 723 break; 724 case 128: 725 v = GEM_RING_SZ_128; 726 break; 727 case 256: 728 v = GEM_RING_SZ_256; 729 break; 730 case 512: 731 v = GEM_RING_SZ_512; 732 break; 733 case 1024: 734 v = GEM_RING_SZ_1024; 735 break; 736 case 2048: 737 v = GEM_RING_SZ_2048; 738 break; 739 case 4096: 740 v = GEM_RING_SZ_4096; 741 break; 742 case 8192: 743 v = GEM_RING_SZ_8192; 744 break; 745 default: 746 printf("gem: invalid Receive Descriptor ring size\n"); 747 break; 748 } 749 return (v); 750 } 751 752 /* 753 * Initialization of interface; set up initialization block 754 * and transmit/receive descriptor rings. 755 */ 756 static void 757 gem_init(xsc) 758 void *xsc; 759 { 760 struct gem_softc *sc = (struct gem_softc *)xsc; 761 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 762 bus_space_tag_t t = sc->sc_bustag; 763 bus_space_handle_t h = sc->sc_h; 764 int s; 765 u_int32_t v; 766 767 s = splnet(); 768 769 DPRINTF(sc, ("%s: gem_init: calling stop\n", device_get_name(sc->sc_dev))); 770 CTR1(KTR_GEM, "%s: gem_init: calling stop", device_get_name(sc->sc_dev)); 771 /* 772 * Initialization sequence. The numbered steps below correspond 773 * to the sequence outlined in section 6.3.5.1 in the Ethernet 774 * Channel Engine manual (part of the PCIO manual). 775 * See also the STP2002-STQ document from Sun Microsystems. 776 */ 777 778 /* step 1 & 2. Reset the Ethernet Channel */ 779 gem_stop(&sc->sc_arpcom.ac_if, 0); 780 gem_reset(sc); 781 DPRINTF(sc, ("%s: gem_init: restarting\n", device_get_name(sc->sc_dev))); 782 CTR1(KTR_GEM, "%s: gem_init: restarting", device_get_name(sc->sc_dev)); 783 784 /* Re-initialize the MIF */ 785 gem_mifinit(sc); 786 787 /* Call MI reset function if any */ 788 if (sc->sc_hwreset) 789 (*sc->sc_hwreset)(sc); 790 791 /* step 3. Setup data structures in host memory */ 792 gem_meminit(sc); 793 794 /* step 4. TX MAC registers & counters */ 795 gem_init_regs(sc); 796 /* XXX: VLAN code from NetBSD temporarily removed. */ 797 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 798 (ETHER_MAX_LEN + sizeof(struct ether_header)) | (0x2000<<16)); 799 800 /* step 5. RX MAC registers & counters */ 801 gem_setladrf(sc); 802 803 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 804 /* NOTE: we use only 32-bit DMA addresses here. */ 805 bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 0); 806 bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 807 808 bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 0); 809 bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 810 DPRINTF(sc, ("loading rx ring %lx, tx ring %lx, cddma %lx\n", 811 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma)); 812 CTR3(KTR_GEM, "loading rx ring %lx, tx ring %lx, cddma %lx", 813 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); 814 815 /* step 8. Global Configuration & Interrupt Mask */ 816 bus_space_write_4(t, h, GEM_INTMASK, 817 ~(GEM_INTR_TX_INTME| 818 GEM_INTR_TX_EMPTY| 819 GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF| 820 GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS| 821 GEM_INTR_MAC_CONTROL|GEM_INTR_MIF| 822 GEM_INTR_BERR)); 823 bus_space_write_4(t, h, GEM_MAC_RX_MASK, 0); /* XXXX */ 824 bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */ 825 bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */ 826 827 /* step 9. ETX Configuration: use mostly default values */ 828 829 /* Enable DMA */ 830 v = gem_ringsize(GEM_NTXDESC /*XXX*/); 831 bus_space_write_4(t, h, GEM_TX_CONFIG, 832 v|GEM_TX_CONFIG_TXDMA_EN| 833 ((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH)); 834 835 /* step 10. ERX Configuration */ 836 837 /* Encode Receive Descriptor ring size: four possible values */ 838 v = gem_ringsize(GEM_NRXDESC /*XXX*/); 839 840 /* Enable DMA */ 841 bus_space_write_4(t, h, GEM_RX_CONFIG, 842 v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)| 843 (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN| 844 (0<<GEM_RX_CONFIG_CXM_START_SHFT)); 845 /* 846 * The following value is for an OFF Threshold of about 15.5 Kbytes 847 * and an ON Threshold of 4K bytes. 848 */ 849 bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH, 0xf8 | (0x40 << 12)); 850 bus_space_write_4(t, h, GEM_RX_BLANKING, (2<<12)|6); 851 852 /* step 11. Configure Media */ 853 (void)gem_mii_statchg(sc->sc_dev); 854 855 /* step 12. RX_MAC Configuration Register */ 856 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 857 v |= GEM_MAC_RX_ENABLE; 858 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 859 860 /* step 14. Issue Transmit Pending command */ 861 862 /* Call MI initialization function if any */ 863 if (sc->sc_hwinit) 864 (*sc->sc_hwinit)(sc); 865 866 /* step 15. Give the reciever a swift kick */ 867 bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4); 868 869 /* Start the one second timer. */ 870 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 871 872 ifp->if_flags |= IFF_RUNNING; 873 ifp->if_flags &= ~IFF_OACTIVE; 874 ifp->if_timer = 0; 875 sc->sc_flags = ifp->if_flags; 876 splx(s); 877 } 878 879 /* 880 * XXX: This is really a substitute for bus_dmamap_load_mbuf(), which FreeBSD 881 * does not yet have, with some adaptions for this driver. 882 * Some changes are mandated by the fact that multiple maps may needed to map 883 * a single mbuf. 884 * It should be removed once generic support is available. 885 * 886 * This is derived from NetBSD (syssrc/sys/arch/sparc64/sparc64/machdep.c), for 887 * a copyright notice see sparc64/sparc64/bus_machdep.c. 888 * 889 * Not every error condition is passed to the callback in this version, and the 890 * callback may be called more than once. 891 * It also gropes in the entails of the callback arg... 892 */ 893 static int 894 gem_dmamap_load_mbuf(sc, m0, cb, txj, flags) 895 struct gem_softc *sc; 896 struct mbuf *m0; 897 bus_dmamap_callback_t *cb; 898 struct gem_txjob *txj; 899 int flags; 900 { 901 struct gem_txdma txd; 902 struct gem_txsoft *txs; 903 struct mbuf *m; 904 void *vaddr; 905 int error, first = 1, len, totlen; 906 907 if ((m0->m_flags & M_PKTHDR) == 0) 908 panic("gem_dmamap_load_mbuf: no packet header"); 909 totlen = m0->m_pkthdr.len; 910 len = 0; 911 txd.txd_sc = sc; 912 txd.txd_nexttx = txj->txj_nexttx; 913 txj->txj_nsegs = 0; 914 STAILQ_INIT(&txj->txj_txsq); 915 m = m0; 916 while (m != NULL && len < totlen) { 917 if (m->m_len == 0) 918 continue; 919 /* Get a work queue entry. */ 920 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { 921 /* 922 * Ran out of descriptors, return a value that 923 * cannot be returned by bus_dmamap_load to notify 924 * the caller. 925 */ 926 error = -1; 927 goto fail; 928 } 929 len += m->m_len; 930 txd.txd_flags = first ? GTXD_FIRST : 0; 931 if (m->m_next == NULL || len >= totlen) 932 txd.txd_flags |= GTXD_LAST; 933 vaddr = mtod(m, void *); 934 error = bus_dmamap_load(sc->sc_dmatag, txs->txs_dmamap, vaddr, 935 m->m_len, cb, &txd, flags); 936 if (error != 0 || txd.txd_error != 0) 937 goto fail; 938 /* Sync the DMA map. */ 939 bus_dmamap_sync(sc->sc_dmatag, txs->txs_dmamap, 940 BUS_DMASYNC_PREWRITE); 941 m = m->m_next; 942 /* 943 * Store a pointer to the packet so we can free it later, 944 * and remember what txdirty will be once the packet is 945 * done. 946 */ 947 txs->txs_mbuf = first ? m0 : NULL; 948 txs->txs_firstdesc = txj->txj_nexttx; 949 txs->txs_lastdesc = txd.txd_lasttx; 950 txs->txs_ndescs = txd.txd_nsegs; 951 CTR3(KTR_GEM, "load_mbuf: setting firstdesc=%d, lastdesc=%d, " 952 "ndescs=%d", txs->txs_firstdesc, txs->txs_lastdesc, 953 txs->txs_ndescs); 954 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 955 STAILQ_INSERT_TAIL(&txj->txj_txsq, txs, txs_q); 956 txj->txj_nexttx = txd.txd_nexttx; 957 txj->txj_nsegs += txd.txd_nsegs; 958 first = 0; 959 } 960 txj->txj_lasttx = txd.txd_lasttx; 961 return (0); 962 963 fail: 964 CTR1(KTR_GEM, "gem_dmamap_load_mbuf failed (%d)", error); 965 gem_dmamap_unload_mbuf(sc, txj); 966 return (error); 967 } 968 969 /* 970 * Unload an mbuf using the txd the information was placed in. 971 * The tx interrupt code frees the tx segments one by one, because the txd is 972 * not available any more. 973 */ 974 static void 975 gem_dmamap_unload_mbuf(sc, txj) 976 struct gem_softc *sc; 977 struct gem_txjob *txj; 978 { 979 struct gem_txsoft *txs; 980 981 /* Readd the removed descriptors and unload the segments. */ 982 while ((txs = STAILQ_FIRST(&txj->txj_txsq)) != NULL) { 983 bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap); 984 STAILQ_REMOVE_HEAD(&txj->txj_txsq, txs_q); 985 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 986 } 987 } 988 989 static void 990 gem_dmamap_commit_mbuf(sc, txj) 991 struct gem_softc *sc; 992 struct gem_txjob *txj; 993 { 994 struct gem_txsoft *txs; 995 996 /* Commit the txjob by transfering the txsoft's to the txdirtyq. */ 997 while ((txs = STAILQ_FIRST(&txj->txj_txsq)) != NULL) { 998 STAILQ_REMOVE_HEAD(&txj->txj_txsq, txs_q); 999 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 1000 } 1001 } 1002 1003 static void 1004 gem_init_regs(sc) 1005 struct gem_softc *sc; 1006 { 1007 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1008 bus_space_tag_t t = sc->sc_bustag; 1009 bus_space_handle_t h = sc->sc_h; 1010 1011 /* These regs are not cleared on reset */ 1012 sc->sc_inited = 0; 1013 if (!sc->sc_inited) { 1014 1015 /* Wooo. Magic values. */ 1016 bus_space_write_4(t, h, GEM_MAC_IPG0, 0); 1017 bus_space_write_4(t, h, GEM_MAC_IPG1, 8); 1018 bus_space_write_4(t, h, GEM_MAC_IPG2, 4); 1019 1020 bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 1021 /* Max frame and max burst size */ 1022 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 1023 (ifp->if_mtu+18) | (0x2000<<16)/* Burst size */); 1024 bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x7); 1025 bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x4); 1026 bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10); 1027 /* Dunno.... */ 1028 bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088); 1029 bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED, 1030 ((sc->sc_arpcom.ac_enaddr[5]<<8)| 1031 sc->sc_arpcom.ac_enaddr[4])&0x3ff); 1032 /* Secondary MAC addr set to 0:0:0:0:0:0 */ 1033 bus_space_write_4(t, h, GEM_MAC_ADDR3, 0); 1034 bus_space_write_4(t, h, GEM_MAC_ADDR4, 0); 1035 bus_space_write_4(t, h, GEM_MAC_ADDR5, 0); 1036 /* MAC control addr set to 0:1:c2:0:1:80 */ 1037 bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001); 1038 bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200); 1039 bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180); 1040 1041 /* MAC filter addr set to 0:0:0:0:0:0 */ 1042 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0); 1043 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0); 1044 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0); 1045 1046 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0); 1047 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0); 1048 1049 sc->sc_inited = 1; 1050 } 1051 1052 /* Counters need to be zeroed */ 1053 bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0); 1054 bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0); 1055 bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0); 1056 bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0); 1057 bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0); 1058 bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0); 1059 bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0); 1060 bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0); 1061 bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0); 1062 bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0); 1063 bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0); 1064 1065 /* Un-pause stuff */ 1066 #if 0 1067 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); 1068 #else 1069 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0); 1070 #endif 1071 1072 /* 1073 * Set the station address. 1074 */ 1075 bus_space_write_4(t, h, GEM_MAC_ADDR0, 1076 (sc->sc_arpcom.ac_enaddr[4]<<8) | sc->sc_arpcom.ac_enaddr[5]); 1077 bus_space_write_4(t, h, GEM_MAC_ADDR1, 1078 (sc->sc_arpcom.ac_enaddr[2]<<8) | sc->sc_arpcom.ac_enaddr[3]); 1079 bus_space_write_4(t, h, GEM_MAC_ADDR2, 1080 (sc->sc_arpcom.ac_enaddr[0]<<8) | sc->sc_arpcom.ac_enaddr[1]); 1081 } 1082 1083 static void 1084 gem_start(ifp) 1085 struct ifnet *ifp; 1086 { 1087 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 1088 struct mbuf *m0 = NULL, *m; 1089 struct gem_txjob txj; 1090 int firsttx, ofree, seg, ntx, txmfail; 1091 1092 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1093 return; 1094 1095 /* 1096 * Remember the previous number of free descriptors and 1097 * the first descriptor we'll use. 1098 */ 1099 ofree = sc->sc_txfree; 1100 firsttx = sc->sc_txnext; 1101 1102 DPRINTF(sc, ("%s: gem_start: txfree %d, txnext %d\n", 1103 device_get_name(sc->sc_dev), ofree, firsttx)); 1104 CTR3(KTR_GEM, "%s: gem_start: txfree %d, txnext %d", 1105 device_get_name(sc->sc_dev), ofree, firsttx); 1106 1107 txj.txj_nexttx = firsttx; 1108 txj.txj_lasttx = 0; 1109 /* 1110 * Loop through the send queue, setting up transmit descriptors 1111 * until we drain the queue, or use up all available transmit 1112 * descriptors. 1113 */ 1114 txmfail = 0; 1115 for (ntx = 0;; ntx++) { 1116 /* 1117 * Grab a packet off the queue. 1118 */ 1119 IF_DEQUEUE(&ifp->if_snd, m0); 1120 if (m0 == NULL) 1121 break; 1122 m = NULL; 1123 1124 /* 1125 * Load the DMA map. If this fails, the packet either 1126 * didn't fit in the alloted number of segments, or we were 1127 * short on resources. In this case, we'll copy and try 1128 * again. 1129 */ 1130 txmfail = gem_dmamap_load_mbuf(sc, m0, 1131 gem_txdma_callback, &txj, BUS_DMA_NOWAIT); 1132 if (txmfail == -1) { 1133 IF_PREPEND(&ifp->if_snd, m0); 1134 break; 1135 } 1136 if (txmfail > 0) { 1137 MGETHDR(m, M_DONTWAIT, MT_DATA); 1138 if (m == NULL) { 1139 device_printf(sc->sc_dev, "unable to " 1140 "allocate Tx mbuf\n"); 1141 /* Failed; requeue. */ 1142 IF_PREPEND(&ifp->if_snd, m0); 1143 break; 1144 } 1145 if (m0->m_pkthdr.len > MHLEN) { 1146 MCLGET(m, M_DONTWAIT); 1147 if ((m->m_flags & M_EXT) == 0) { 1148 device_printf(sc->sc_dev, "unable to " 1149 "allocate Tx cluster\n"); 1150 IF_PREPEND(&ifp->if_snd, m0); 1151 m_freem(m); 1152 break; 1153 } 1154 } 1155 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 1156 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 1157 txmfail = gem_dmamap_load_mbuf(sc, m, 1158 gem_txdma_callback, &txj, BUS_DMA_NOWAIT); 1159 if (txmfail != 0) { 1160 if (txmfail > 0) { 1161 device_printf(sc->sc_dev, "unable to " 1162 "load Tx buffer, error = %d\n", 1163 txmfail); 1164 } 1165 m_freem(m); 1166 IF_PREPEND(&ifp->if_snd, m0); 1167 break; 1168 } 1169 } 1170 1171 /* 1172 * Ensure we have enough descriptors free to describe 1173 * the packet. Note, we always reserve one descriptor 1174 * at the end of the ring as a termination point, to 1175 * prevent wrap-around. 1176 */ 1177 if (txj.txj_nsegs > (sc->sc_txfree - 1)) { 1178 /* 1179 * Not enough free descriptors to transmit this 1180 * packet. We haven't committed to anything yet, 1181 * so just unload the DMA map, put the packet 1182 * back on the queue, and punt. Notify the upper 1183 * layer that there are no more slots left. 1184 * 1185 * XXX We could allocate an mbuf and copy, but 1186 * XXX it is worth it? 1187 */ 1188 ifp->if_flags |= IFF_OACTIVE; 1189 gem_dmamap_unload_mbuf(sc, &txj); 1190 if (m != NULL) 1191 m_freem(m); 1192 IF_PREPEND(&ifp->if_snd, m0); 1193 break; 1194 } 1195 1196 if (m != NULL) 1197 m_freem(m0); 1198 1199 /* 1200 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 1201 */ 1202 1203 #ifdef GEM_DEBUG 1204 if (ifp->if_flags & IFF_DEBUG) { 1205 printf(" gem_start %p transmit chain:\n", 1206 STAILQ_FIRST(&txj.txj_txsq)); 1207 for (seg = sc->sc_txnext;; seg = GEM_NEXTTX(seg)) { 1208 printf("descriptor %d:\t", seg); 1209 printf("gd_flags: 0x%016llx\t", (long long) 1210 GEM_DMA_READ(sc, sc->sc_txdescs[seg].gd_flags)); 1211 printf("gd_addr: 0x%016llx\n", (long long) 1212 GEM_DMA_READ(sc, sc->sc_txdescs[seg].gd_addr)); 1213 if (seg == txj.txj_lasttx) 1214 break; 1215 } 1216 } 1217 #endif 1218 1219 /* Sync the descriptors we're using. */ 1220 GEM_CDTXSYNC(sc, sc->sc_txnext, txj.txj_nsegs, 1221 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1222 1223 /* Advance the tx pointer. */ 1224 sc->sc_txfree -= txj.txj_nsegs; 1225 sc->sc_txnext = txj.txj_nexttx; 1226 1227 gem_dmamap_commit_mbuf(sc, &txj); 1228 } 1229 1230 if (txmfail == -1 || sc->sc_txfree == 0) { 1231 ifp->if_flags |= IFF_OACTIVE; 1232 /* No more slots left; notify upper layer. */ 1233 } 1234 1235 if (ntx > 0) { 1236 DPRINTF(sc, ("%s: packets enqueued, IC on %d, OWN on %d\n", 1237 device_get_name(sc->sc_dev), txj.txj_lasttx, firsttx)); 1238 CTR3(KTR_GEM, "%s: packets enqueued, IC on %d, OWN on %d", 1239 device_get_name(sc->sc_dev), txj.txj_lasttx, firsttx); 1240 /* 1241 * The entire packet chain is set up. 1242 * Kick the transmitter. 1243 */ 1244 DPRINTF(sc, ("%s: gem_start: kicking tx %d\n", 1245 device_get_name(sc->sc_dev), txj.txj_nexttx)); 1246 CTR3(KTR_GEM, "%s: gem_start: kicking tx %d=%d", 1247 device_get_name(sc->sc_dev), txj.txj_nexttx, 1248 sc->sc_txnext); 1249 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_TX_KICK, 1250 sc->sc_txnext); 1251 1252 /* Set a watchdog timer in case the chip flakes out. */ 1253 ifp->if_timer = 5; 1254 DPRINTF(sc, ("%s: gem_start: watchdog %d\n", 1255 device_get_name(sc->sc_dev), ifp->if_timer)); 1256 CTR2(KTR_GEM, "%s: gem_start: watchdog %d", 1257 device_get_name(sc->sc_dev), ifp->if_timer); 1258 } 1259 } 1260 1261 /* 1262 * Transmit interrupt. 1263 */ 1264 static void 1265 gem_tint(sc) 1266 struct gem_softc *sc; 1267 { 1268 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1269 bus_space_tag_t t = sc->sc_bustag; 1270 bus_space_handle_t mac = sc->sc_h; 1271 struct gem_txsoft *txs; 1272 int txlast; 1273 1274 1275 DPRINTF(sc, ("%s: gem_tint\n", device_get_name(sc->sc_dev))); 1276 CTR1(KTR_GEM, "%s: gem_tint", device_get_name(sc->sc_dev)); 1277 1278 /* 1279 * Unload collision counters 1280 */ 1281 ifp->if_collisions += 1282 bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) + 1283 bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT) + 1284 bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) + 1285 bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT); 1286 1287 /* 1288 * then clear the hardware counters. 1289 */ 1290 bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0); 1291 bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0); 1292 bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0); 1293 bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0); 1294 1295 /* 1296 * Go through our Tx list and free mbufs for those 1297 * frames that have been transmitted. 1298 */ 1299 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1300 GEM_CDTXSYNC(sc, txs->txs_lastdesc, 1301 txs->txs_ndescs, 1302 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1303 1304 #ifdef GEM_DEBUG 1305 if (ifp->if_flags & IFF_DEBUG) { 1306 int i; 1307 printf(" txsoft %p transmit chain:\n", txs); 1308 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { 1309 printf("descriptor %d: ", i); 1310 printf("gd_flags: 0x%016llx\t", (long long) 1311 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags)); 1312 printf("gd_addr: 0x%016llx\n", (long long) 1313 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr)); 1314 if (i == txs->txs_lastdesc) 1315 break; 1316 } 1317 } 1318 #endif 1319 1320 /* 1321 * In theory, we could harveast some descriptors before 1322 * the ring is empty, but that's a bit complicated. 1323 * 1324 * GEM_TX_COMPLETION points to the last descriptor 1325 * processed +1. 1326 */ 1327 txlast = bus_space_read_4(t, mac, GEM_TX_COMPLETION); 1328 DPRINTF(sc, 1329 ("gem_tint: txs->txs_lastdesc = %d, txlast = %d\n", 1330 txs->txs_lastdesc, txlast)); 1331 CTR3(KTR_GEM, "gem_tint: txs->txs_firstdesc = %d, " 1332 "txs->txs_lastdesc = %d, txlast = %d", 1333 txs->txs_firstdesc, txs->txs_lastdesc, txlast); 1334 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1335 if ((txlast >= txs->txs_firstdesc) && 1336 (txlast <= txs->txs_lastdesc)) 1337 break; 1338 } else { 1339 /* Ick -- this command wraps */ 1340 if ((txlast >= txs->txs_firstdesc) || 1341 (txlast <= txs->txs_lastdesc)) 1342 break; 1343 } 1344 1345 DPRINTF(sc, ("gem_tint: releasing a desc\n")); 1346 CTR0(KTR_GEM, "gem_tint: releasing a desc"); 1347 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1348 1349 sc->sc_txfree += txs->txs_ndescs; 1350 1351 bus_dmamap_sync(sc->sc_dmatag, txs->txs_dmamap, 1352 BUS_DMASYNC_POSTWRITE); 1353 bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap); 1354 if (txs->txs_mbuf != NULL) { 1355 m_freem(txs->txs_mbuf); 1356 txs->txs_mbuf = NULL; 1357 } 1358 1359 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1360 1361 ifp->if_opackets++; 1362 } 1363 1364 DPRINTF(sc, ("gem_tint: GEM_TX_STATE_MACHINE %x " 1365 "GEM_TX_DATA_PTR %llx " 1366 "GEM_TX_COMPLETION %x\n", 1367 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_STATE_MACHINE), 1368 ((long long) bus_space_read_4(sc->sc_bustag, sc->sc_h, 1369 GEM_TX_DATA_PTR_HI) << 32) | 1370 bus_space_read_4(sc->sc_bustag, sc->sc_h, 1371 GEM_TX_DATA_PTR_LO), 1372 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_COMPLETION))); 1373 CTR3(KTR_GEM, "gem_tint: GEM_TX_STATE_MACHINE %x " 1374 "GEM_TX_DATA_PTR %llx " 1375 "GEM_TX_COMPLETION %x", 1376 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_STATE_MACHINE), 1377 ((long long) bus_space_read_4(sc->sc_bustag, sc->sc_h, 1378 GEM_TX_DATA_PTR_HI) << 32) | 1379 bus_space_read_4(sc->sc_bustag, sc->sc_h, 1380 GEM_TX_DATA_PTR_LO), 1381 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_COMPLETION)); 1382 1383 if (STAILQ_FIRST(&sc->sc_txdirtyq) == NULL) 1384 ifp->if_timer = 0; 1385 1386 1387 DPRINTF(sc, ("%s: gem_tint: watchdog %d\n", 1388 device_get_name(sc->sc_dev), ifp->if_timer)); 1389 CTR2(KTR_GEM, "%s: gem_tint: watchdog %d", 1390 device_get_name(sc->sc_dev), ifp->if_timer); 1391 1392 /* Freed some descriptors, so reset IFF_OACTIVE and restart. */ 1393 ifp->if_flags &= ~IFF_OACTIVE; 1394 gem_start(ifp); 1395 } 1396 1397 static void 1398 gem_rint_timeout(arg) 1399 void *arg; 1400 { 1401 1402 gem_rint((struct gem_softc *)arg); 1403 } 1404 1405 /* 1406 * Receive interrupt. 1407 */ 1408 static void 1409 gem_rint(sc) 1410 struct gem_softc *sc; 1411 { 1412 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1413 bus_space_tag_t t = sc->sc_bustag; 1414 bus_space_handle_t h = sc->sc_h; 1415 struct ether_header *eh; 1416 struct gem_rxsoft *rxs; 1417 struct mbuf *m; 1418 u_int64_t rxstat; 1419 int i, len; 1420 1421 callout_stop(&sc->sc_rx_ch); 1422 DPRINTF(sc, ("%s: gem_rint\n", device_get_name(sc->sc_dev))); 1423 CTR1(KTR_GEM, "%s: gem_rint", device_get_name(sc->sc_dev)); 1424 /* 1425 * XXXX Read the lastrx only once at the top for speed. 1426 */ 1427 DPRINTF(sc, ("gem_rint: sc->rxptr %d, complete %d\n", 1428 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION))); 1429 CTR2(KTR_GEM, "gem_rint: sc->rxptr %d, complete %d", 1430 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)); 1431 for (i = sc->sc_rxptr; i != bus_space_read_4(t, h, GEM_RX_COMPLETION); 1432 i = GEM_NEXTRX(i)) { 1433 rxs = &sc->sc_rxsoft[i]; 1434 1435 GEM_CDRXSYNC(sc, i, 1436 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1437 1438 rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags); 1439 1440 if (rxstat & GEM_RD_OWN) { 1441 /* 1442 * The descriptor is still marked as owned, although 1443 * it is supposed to have completed. This has been 1444 * observed on some machines. Just exiting here 1445 * might leave the packet sitting around until another 1446 * one arrives to trigger a new interrupt, which is 1447 * generally undesirable, so set up a timeout. 1448 */ 1449 callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS, 1450 gem_rint_timeout, sc); 1451 break; 1452 } 1453 1454 if (rxstat & GEM_RD_BAD_CRC) { 1455 device_printf(sc->sc_dev, "receive error: CRC error\n"); 1456 GEM_INIT_RXDESC(sc, i); 1457 continue; 1458 } 1459 1460 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 1461 BUS_DMASYNC_POSTREAD); 1462 #ifdef GEM_DEBUG 1463 if (ifp->if_flags & IFF_DEBUG) { 1464 printf(" rxsoft %p descriptor %d: ", rxs, i); 1465 printf("gd_flags: 0x%016llx\t", (long long) 1466 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags)); 1467 printf("gd_addr: 0x%016llx\n", (long long) 1468 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr)); 1469 } 1470 #endif 1471 1472 /* 1473 * No errors; receive the packet. Note the Gem 1474 * includes the CRC with every packet. 1475 */ 1476 len = GEM_RD_BUFLEN(rxstat); 1477 1478 /* 1479 * Allocate a new mbuf cluster. If that fails, we are 1480 * out of memory, and must drop the packet and recycle 1481 * the buffer that's already attached to this descriptor. 1482 */ 1483 m = rxs->rxs_mbuf; 1484 if (gem_add_rxbuf(sc, i) != 0) { 1485 ifp->if_ierrors++; 1486 GEM_INIT_RXDESC(sc, i); 1487 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 1488 BUS_DMASYNC_PREREAD); 1489 continue; 1490 } 1491 m->m_data += 2; /* We're already off by two */ 1492 1493 ifp->if_ipackets++; 1494 eh = mtod(m, struct ether_header *); 1495 m->m_pkthdr.rcvif = ifp; 1496 m->m_pkthdr.len = m->m_len = len - ETHER_CRC_LEN; 1497 m_adj(m, sizeof(struct ether_header)); 1498 1499 /* Pass it on. */ 1500 ether_input(ifp, eh, m); 1501 } 1502 1503 /* Update the receive pointer. */ 1504 sc->sc_rxptr = i; 1505 bus_space_write_4(t, h, GEM_RX_KICK, i); 1506 1507 DPRINTF(sc, ("gem_rint: done sc->rxptr %d, complete %d\n", 1508 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION))); 1509 CTR2(KTR_GEM, "gem_rint: done sc->rxptr %d, complete %d", 1510 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)); 1511 1512 } 1513 1514 1515 /* 1516 * gem_add_rxbuf: 1517 * 1518 * Add a receive buffer to the indicated descriptor. 1519 */ 1520 static int 1521 gem_add_rxbuf(sc, idx) 1522 struct gem_softc *sc; 1523 int idx; 1524 { 1525 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1526 struct mbuf *m; 1527 int error; 1528 1529 MGETHDR(m, M_DONTWAIT, MT_DATA); 1530 if (m == NULL) 1531 return (ENOBUFS); 1532 1533 MCLGET(m, M_DONTWAIT); 1534 if ((m->m_flags & M_EXT) == 0) { 1535 m_freem(m); 1536 return (ENOBUFS); 1537 } 1538 1539 #ifdef GEM_DEBUG 1540 /* bzero the packet to check dma */ 1541 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1542 #endif 1543 1544 if (rxs->rxs_mbuf != NULL) 1545 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap); 1546 1547 rxs->rxs_mbuf = m; 1548 1549 error = bus_dmamap_load(sc->sc_dmatag, rxs->rxs_dmamap, 1550 m->m_ext.ext_buf, m->m_ext.ext_size, gem_rxdma_callback, rxs, 1551 BUS_DMA_NOWAIT); 1552 if (error != 0 || rxs->rxs_paddr == 0) { 1553 device_printf(sc->sc_dev, "can't load rx DMA map %d, error = " 1554 "%d\n", idx, error); 1555 panic("gem_add_rxbuf"); /* XXX */ 1556 } 1557 1558 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD); 1559 1560 GEM_INIT_RXDESC(sc, idx); 1561 1562 return (0); 1563 } 1564 1565 1566 static void 1567 gem_eint(sc, status) 1568 struct gem_softc *sc; 1569 u_int status; 1570 { 1571 1572 if ((status & GEM_INTR_MIF) != 0) { 1573 device_printf(sc->sc_dev, "XXXlink status changed\n"); 1574 return; 1575 } 1576 1577 device_printf(sc->sc_dev, "status=%x\n", status); 1578 } 1579 1580 1581 void 1582 gem_intr(v) 1583 void *v; 1584 { 1585 struct gem_softc *sc = (struct gem_softc *)v; 1586 bus_space_tag_t t = sc->sc_bustag; 1587 bus_space_handle_t seb = sc->sc_h; 1588 u_int32_t status; 1589 1590 status = bus_space_read_4(t, seb, GEM_STATUS); 1591 DPRINTF(sc, ("%s: gem_intr: cplt %x, status %x\n", 1592 device_get_name(sc->sc_dev), (status>>19), 1593 (u_int)status)); 1594 CTR3(KTR_GEM, "%s: gem_intr: cplt %x, status %x", 1595 device_get_name(sc->sc_dev), (status>>19), 1596 (u_int)status); 1597 1598 if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0) 1599 gem_eint(sc, status); 1600 1601 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 1602 gem_tint(sc); 1603 1604 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1605 gem_rint(sc); 1606 1607 /* We should eventually do more than just print out error stats. */ 1608 if (status & GEM_INTR_TX_MAC) { 1609 int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS); 1610 if (txstat & ~GEM_MAC_TX_XMIT_DONE) 1611 printf("MAC tx fault, status %x\n", txstat); 1612 } 1613 if (status & GEM_INTR_RX_MAC) { 1614 int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS); 1615 if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) 1616 printf("MAC rx fault, status %x\n", rxstat); 1617 } 1618 } 1619 1620 1621 static void 1622 gem_watchdog(ifp) 1623 struct ifnet *ifp; 1624 { 1625 struct gem_softc *sc = ifp->if_softc; 1626 1627 DPRINTF(sc, ("gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 1628 "GEM_MAC_RX_CONFIG %x\n", 1629 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG), 1630 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS), 1631 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG))); 1632 CTR3(KTR_GEM, "gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 1633 "GEM_MAC_RX_CONFIG %x", 1634 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG), 1635 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS), 1636 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG)); 1637 CTR3(KTR_GEM, "gem_watchdog: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x " 1638 "GEM_MAC_TX_CONFIG %x", 1639 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_CONFIG), 1640 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_STATUS), 1641 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_CONFIG)); 1642 1643 device_printf(sc->sc_dev, "device timeout\n"); 1644 ++ifp->if_oerrors; 1645 1646 /* Try to get more packets going. */ 1647 gem_start(ifp); 1648 } 1649 1650 /* 1651 * Initialize the MII Management Interface 1652 */ 1653 static void 1654 gem_mifinit(sc) 1655 struct gem_softc *sc; 1656 { 1657 bus_space_tag_t t = sc->sc_bustag; 1658 bus_space_handle_t mif = sc->sc_h; 1659 1660 /* Configure the MIF in frame mode */ 1661 sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1662 sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA; 1663 bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config); 1664 } 1665 1666 /* 1667 * MII interface 1668 * 1669 * The GEM MII interface supports at least three different operating modes: 1670 * 1671 * Bitbang mode is implemented using data, clock and output enable registers. 1672 * 1673 * Frame mode is implemented by loading a complete frame into the frame 1674 * register and polling the valid bit for completion. 1675 * 1676 * Polling mode uses the frame register but completion is indicated by 1677 * an interrupt. 1678 * 1679 */ 1680 int 1681 gem_mii_readreg(dev, phy, reg) 1682 device_t dev; 1683 int phy, reg; 1684 { 1685 struct gem_softc *sc = device_get_softc(dev); 1686 bus_space_tag_t t = sc->sc_bustag; 1687 bus_space_handle_t mif = sc->sc_h; 1688 int n; 1689 u_int32_t v; 1690 1691 #ifdef GEM_DEBUG_PHY 1692 printf("gem_mii_readreg: phy %d reg %d\n", phy, reg); 1693 #endif 1694 1695 #if 0 1696 /* Select the desired PHY in the MIF configuration register */ 1697 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1698 /* Clear PHY select bit */ 1699 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1700 if (phy == GEM_PHYAD_EXTERNAL) 1701 /* Set PHY select bit to get at external device */ 1702 v |= GEM_MIF_CONFIG_PHY_SEL; 1703 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1704 #endif 1705 1706 /* Construct the frame command */ 1707 v = (reg << GEM_MIF_REG_SHIFT) | (phy << GEM_MIF_PHY_SHIFT) | 1708 GEM_MIF_FRAME_READ; 1709 1710 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1711 for (n = 0; n < 100; n++) { 1712 DELAY(1); 1713 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1714 if (v & GEM_MIF_FRAME_TA0) 1715 return (v & GEM_MIF_FRAME_DATA); 1716 } 1717 1718 device_printf(sc->sc_dev, "mii_read timeout\n"); 1719 return (0); 1720 } 1721 1722 int 1723 gem_mii_writereg(dev, phy, reg, val) 1724 device_t dev; 1725 int phy, reg, val; 1726 { 1727 struct gem_softc *sc = device_get_softc(dev); 1728 bus_space_tag_t t = sc->sc_bustag; 1729 bus_space_handle_t mif = sc->sc_h; 1730 int n; 1731 u_int32_t v; 1732 1733 #ifdef GEM_DEBUG_PHY 1734 printf("gem_mii_writereg: phy %d reg %d val %x\n", phy, reg, val); 1735 #endif 1736 1737 #if 0 1738 /* Select the desired PHY in the MIF configuration register */ 1739 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1740 /* Clear PHY select bit */ 1741 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1742 if (phy == GEM_PHYAD_EXTERNAL) 1743 /* Set PHY select bit to get at external device */ 1744 v |= GEM_MIF_CONFIG_PHY_SEL; 1745 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1746 #endif 1747 /* Construct the frame command */ 1748 v = GEM_MIF_FRAME_WRITE | 1749 (phy << GEM_MIF_PHY_SHIFT) | 1750 (reg << GEM_MIF_REG_SHIFT) | 1751 (val & GEM_MIF_FRAME_DATA); 1752 1753 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1754 for (n = 0; n < 100; n++) { 1755 DELAY(1); 1756 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1757 if (v & GEM_MIF_FRAME_TA0) 1758 return (1); 1759 } 1760 1761 device_printf(sc->sc_dev, "mii_write timeout\n"); 1762 return (0); 1763 } 1764 1765 void 1766 gem_mii_statchg(dev) 1767 device_t dev; 1768 { 1769 struct gem_softc *sc = device_get_softc(dev); 1770 #ifdef GEM_DEBUG 1771 int instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media); 1772 #endif 1773 bus_space_tag_t t = sc->sc_bustag; 1774 bus_space_handle_t mac = sc->sc_h; 1775 u_int32_t v; 1776 1777 #ifdef GEM_DEBUG 1778 if (sc->sc_debug) 1779 printf("gem_mii_statchg: status change: phy = %d\n", 1780 sc->sc_phys[instance]); 1781 #endif 1782 1783 /* Set tx full duplex options */ 1784 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0); 1785 DELAY(10000); /* reg must be cleared and delay before changing. */ 1786 v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT| 1787 GEM_MAC_TX_ENABLE; 1788 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) { 1789 v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS; 1790 } 1791 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v); 1792 1793 /* XIF Configuration */ 1794 /* We should really calculate all this rather than rely on defaults */ 1795 v = bus_space_read_4(t, mac, GEM_MAC_XIF_CONFIG); 1796 v = GEM_MAC_XIF_LINK_LED; 1797 v |= GEM_MAC_XIF_TX_MII_ENA; 1798 /* If an external transceiver is connected, enable its MII drivers */ 1799 sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG); 1800 if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) { 1801 /* External MII needs echo disable if half duplex. */ 1802 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1803 /* turn on full duplex LED */ 1804 v |= GEM_MAC_XIF_FDPLX_LED; 1805 else 1806 /* half duplex -- disable echo */ 1807 v |= GEM_MAC_XIF_ECHO_DISABL; 1808 } else { 1809 /* Internal MII needs buf enable */ 1810 v |= GEM_MAC_XIF_MII_BUF_ENA; 1811 } 1812 bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v); 1813 } 1814 1815 int 1816 gem_mediachange(ifp) 1817 struct ifnet *ifp; 1818 { 1819 struct gem_softc *sc = ifp->if_softc; 1820 1821 /* XXX Add support for serial media. */ 1822 1823 return (mii_mediachg(sc->sc_mii)); 1824 } 1825 1826 void 1827 gem_mediastatus(ifp, ifmr) 1828 struct ifnet *ifp; 1829 struct ifmediareq *ifmr; 1830 { 1831 struct gem_softc *sc = ifp->if_softc; 1832 1833 if ((ifp->if_flags & IFF_UP) == 0) 1834 return; 1835 1836 mii_pollstat(sc->sc_mii); 1837 ifmr->ifm_active = sc->sc_mii->mii_media_active; 1838 ifmr->ifm_status = sc->sc_mii->mii_media_status; 1839 } 1840 1841 /* 1842 * Process an ioctl request. 1843 */ 1844 static int 1845 gem_ioctl(ifp, cmd, data) 1846 struct ifnet *ifp; 1847 u_long cmd; 1848 caddr_t data; 1849 { 1850 struct gem_softc *sc = ifp->if_softc; 1851 struct ifreq *ifr = (struct ifreq *)data; 1852 int s, error = 0; 1853 1854 switch (cmd) { 1855 case SIOCSIFADDR: 1856 case SIOCGIFADDR: 1857 case SIOCSIFMTU: 1858 error = ether_ioctl(ifp, cmd, data); 1859 break; 1860 case SIOCSIFFLAGS: 1861 if (ifp->if_flags & IFF_UP) { 1862 if ((sc->sc_flags ^ ifp->if_flags) == IFF_PROMISC) 1863 gem_setladrf(sc); 1864 else 1865 gem_init(sc); 1866 } else { 1867 if (ifp->if_flags & IFF_RUNNING) 1868 gem_stop(ifp, 0); 1869 } 1870 sc->sc_flags = ifp->if_flags; 1871 error = 0; 1872 break; 1873 case SIOCADDMULTI: 1874 case SIOCDELMULTI: 1875 gem_setladrf(sc); 1876 error = 0; 1877 break; 1878 case SIOCGIFMEDIA: 1879 case SIOCSIFMEDIA: 1880 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 1881 break; 1882 default: 1883 error = ENOTTY; 1884 break; 1885 } 1886 1887 /* Try to get things going again */ 1888 if (ifp->if_flags & IFF_UP) 1889 gem_start(ifp); 1890 splx(s); 1891 return (error); 1892 } 1893 1894 /* 1895 * Set up the logical address filter. 1896 */ 1897 static void 1898 gem_setladrf(sc) 1899 struct gem_softc *sc; 1900 { 1901 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1902 struct ifmultiaddr *inm; 1903 struct sockaddr_dl *sdl; 1904 bus_space_tag_t t = sc->sc_bustag; 1905 bus_space_handle_t h = sc->sc_h; 1906 u_char *cp; 1907 u_int32_t crc; 1908 u_int32_t hash[16]; 1909 u_int32_t v; 1910 int len; 1911 1912 /* Clear hash table */ 1913 memset(hash, 0, sizeof(hash)); 1914 1915 /* Get current RX configuration */ 1916 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 1917 1918 if ((ifp->if_flags & IFF_PROMISC) != 0) { 1919 /* Turn on promiscuous mode; turn off the hash filter */ 1920 v |= GEM_MAC_RX_PROMISCUOUS; 1921 v &= ~GEM_MAC_RX_HASH_FILTER; 1922 ; 1923 goto chipit; 1924 } 1925 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 1926 hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; 1927 ifp->if_flags |= IFF_ALLMULTI; 1928 goto chipit; 1929 } 1930 1931 /* Turn off promiscuous mode; turn on the hash filter */ 1932 v &= ~GEM_MAC_RX_PROMISCUOUS; 1933 v |= GEM_MAC_RX_HASH_FILTER; 1934 1935 /* 1936 * Set up multicast address filter by passing all multicast addresses 1937 * through a crc generator, and then using the high order 6 bits as an 1938 * index into the 256 bit logical address filter. The high order bit 1939 * selects the word, while the rest of the bits select the bit within 1940 * the word. 1941 */ 1942 1943 TAILQ_FOREACH(inm, &sc->sc_arpcom.ac_if.if_multiaddrs, ifma_link) { 1944 if (inm->ifma_addr->sa_family != AF_LINK) 1945 continue; 1946 sdl = (struct sockaddr_dl *)inm->ifma_addr; 1947 cp = LLADDR(sdl); 1948 crc = 0xffffffff; 1949 for (len = sdl->sdl_alen; --len >= 0;) { 1950 int octet = *cp++; 1951 int i; 1952 1953 #define MC_POLY_LE 0xedb88320UL /* mcast crc, little endian */ 1954 for (i = 0; i < 8; i++) { 1955 if ((crc & 1) ^ (octet & 1)) { 1956 crc >>= 1; 1957 crc ^= MC_POLY_LE; 1958 } else { 1959 crc >>= 1; 1960 } 1961 octet >>= 1; 1962 } 1963 } 1964 /* Just want the 8 most significant bits. */ 1965 crc >>= 24; 1966 1967 /* Set the corresponding bit in the filter. */ 1968 hash[crc >> 4] |= 1 << (crc & 0xf); 1969 } 1970 1971 chipit: 1972 /* Now load the hash table into the chip */ 1973 bus_space_write_4(t, h, GEM_MAC_HASH0, hash[0]); 1974 bus_space_write_4(t, h, GEM_MAC_HASH1, hash[1]); 1975 bus_space_write_4(t, h, GEM_MAC_HASH2, hash[2]); 1976 bus_space_write_4(t, h, GEM_MAC_HASH3, hash[3]); 1977 bus_space_write_4(t, h, GEM_MAC_HASH4, hash[4]); 1978 bus_space_write_4(t, h, GEM_MAC_HASH5, hash[5]); 1979 bus_space_write_4(t, h, GEM_MAC_HASH6, hash[6]); 1980 bus_space_write_4(t, h, GEM_MAC_HASH7, hash[7]); 1981 bus_space_write_4(t, h, GEM_MAC_HASH8, hash[8]); 1982 bus_space_write_4(t, h, GEM_MAC_HASH9, hash[9]); 1983 bus_space_write_4(t, h, GEM_MAC_HASH10, hash[10]); 1984 bus_space_write_4(t, h, GEM_MAC_HASH11, hash[11]); 1985 bus_space_write_4(t, h, GEM_MAC_HASH12, hash[12]); 1986 bus_space_write_4(t, h, GEM_MAC_HASH13, hash[13]); 1987 bus_space_write_4(t, h, GEM_MAC_HASH14, hash[14]); 1988 bus_space_write_4(t, h, GEM_MAC_HASH15, hash[15]); 1989 1990 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 1991 } 1992 1993 #if notyet 1994 1995 /* 1996 * gem_power: 1997 * 1998 * Power management (suspend/resume) hook. 1999 */ 2000 void 2001 static gem_power(why, arg) 2002 int why; 2003 void *arg; 2004 { 2005 struct gem_softc *sc = arg; 2006 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2007 int s; 2008 2009 s = splnet(); 2010 switch (why) { 2011 case PWR_SUSPEND: 2012 case PWR_STANDBY: 2013 gem_stop(ifp, 1); 2014 if (sc->sc_power != NULL) 2015 (*sc->sc_power)(sc, why); 2016 break; 2017 case PWR_RESUME: 2018 if (ifp->if_flags & IFF_UP) { 2019 if (sc->sc_power != NULL) 2020 (*sc->sc_power)(sc, why); 2021 gem_init(ifp); 2022 } 2023 break; 2024 case PWR_SOFTSUSPEND: 2025 case PWR_SOFTSTANDBY: 2026 case PWR_SOFTRESUME: 2027 break; 2028 } 2029 splx(s); 2030 } 2031 #endif 2032