1 /* 2 * Copyright (C) 2001 Eduardo Horvath. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * from: NetBSD: gem.c,v 1.9 2001/10/21 20:45:15 thorpej Exp 27 * 28 * $FreeBSD$ 29 */ 30 31 /* 32 * Driver for Sun GEM ethernet controllers. 33 */ 34 35 #define GEM_DEBUG 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/bus.h> 40 #include <sys/callout.h> 41 #include <sys/mbuf.h> 42 #include <sys/malloc.h> 43 #include <sys/kernel.h> 44 #include <sys/socket.h> 45 #include <sys/sockio.h> 46 47 #include <net/ethernet.h> 48 #include <net/if.h> 49 #include <net/if_arp.h> 50 #include <net/if_dl.h> 51 #include <net/if_media.h> 52 53 #include <machine/bus.h> 54 55 #include <dev/mii/mii.h> 56 #include <dev/mii/miivar.h> 57 58 #include <gem/if_gemreg.h> 59 #include <gem/if_gemvar.h> 60 61 #define TRIES 10000 62 63 static void gem_start(struct ifnet *); 64 static void gem_stop(struct ifnet *, int); 65 static int gem_ioctl(struct ifnet *, u_long, caddr_t); 66 static void gem_cddma_callback(void *, bus_dma_segment_t *, int, int); 67 static void gem_rxdma_callback(void *, bus_dma_segment_t *, int, int); 68 static void gem_txdma_callback(void *, bus_dma_segment_t *, int, int); 69 static void gem_tick(void *); 70 static void gem_watchdog(struct ifnet *); 71 static void gem_init(void *); 72 static void gem_init_regs(struct gem_softc *sc); 73 static int gem_ringsize(int sz); 74 static int gem_meminit(struct gem_softc *); 75 static int gem_dmamap_load_mbuf(struct gem_softc *, struct mbuf *, 76 bus_dmamap_callback_t *, struct gem_txjob *, int); 77 static void gem_dmamap_unload_mbuf(struct gem_softc *, struct gem_txjob *); 78 static void gem_dmamap_commit_mbuf(struct gem_softc *, struct gem_txjob *); 79 static void gem_mifinit(struct gem_softc *); 80 static int gem_bitwait(struct gem_softc *sc, bus_addr_t r, 81 u_int32_t clr, u_int32_t set); 82 static int gem_reset_rx(struct gem_softc *); 83 static int gem_reset_tx(struct gem_softc *); 84 static int gem_disable_rx(struct gem_softc *); 85 static int gem_disable_tx(struct gem_softc *); 86 static void gem_rxdrain(struct gem_softc *); 87 static int gem_add_rxbuf(struct gem_softc *, int); 88 static void gem_setladrf(struct gem_softc *); 89 90 struct mbuf *gem_get(struct gem_softc *, int, int); 91 static void gem_eint(struct gem_softc *, u_int); 92 static void gem_rint(struct gem_softc *); 93 static void gem_tint(struct gem_softc *); 94 #ifdef notyet 95 static void gem_power(int, void *); 96 #endif 97 98 devclass_t gem_devclass; 99 DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0); 100 MODULE_DEPEND(gem, miibus, 1, 1, 1); 101 102 #ifdef GEM_DEBUG 103 #define DPRINTF(sc, x) if ((sc)->sc_arpcom.ac_if.if_flags & IFF_DEBUG) \ 104 printf x 105 #include <sys/ktr.h> 106 #define KTR_GEM KTR_CT2 107 #else 108 #define DPRINTF(sc, x) /* nothing */ 109 #endif 110 111 #define GEM_NSEGS GEM_NTXSEGS 112 113 /* 114 * gem_attach: 115 * 116 * Attach a Gem interface to the system. 117 */ 118 int 119 gem_attach(sc) 120 struct gem_softc *sc; 121 { 122 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 123 struct mii_softc *child; 124 int i, error; 125 126 /* Make sure the chip is stopped. */ 127 ifp->if_softc = sc; 128 gem_reset(sc); 129 130 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, 131 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, GEM_NSEGS, 132 BUS_SPACE_MAXSIZE_32BIT, 0, &sc->sc_pdmatag); 133 if (error) 134 return (error); 135 136 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 137 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MAXBSIZE, 138 GEM_NSEGS, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, 139 &sc->sc_dmatag); 140 if (error) 141 goto fail_0; 142 143 error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, 144 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 145 sizeof(struct gem_control_data), 1, 146 sizeof(struct gem_control_data), BUS_DMA_ALLOCNOW, 147 &sc->sc_cdmatag); 148 if (error) 149 goto fail_1; 150 151 /* 152 * Allocate the control data structures, and create and load the 153 * DMA map for it. 154 */ 155 if ((error = bus_dmamem_alloc(sc->sc_cdmatag, 156 (void **)&sc->sc_control_data, 0, &sc->sc_cddmamap))) { 157 device_printf(sc->sc_dev, "unable to allocate control data," 158 " error = %d\n", error); 159 goto fail_2; 160 } 161 162 sc->sc_cddma = 0; 163 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, 164 sc->sc_control_data, sizeof(struct gem_control_data), 165 gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { 166 device_printf(sc->sc_dev, "unable to load control data DMA " 167 "map, error = %d\n", error); 168 goto fail_3; 169 } 170 171 /* 172 * Initialize the transmit job descriptors. 173 */ 174 STAILQ_INIT(&sc->sc_txfreeq); 175 STAILQ_INIT(&sc->sc_txdirtyq); 176 177 /* 178 * Create the transmit buffer DMA maps. 179 */ 180 error = ENOMEM; 181 for (i = 0; i < GEM_TXQUEUELEN; i++) { 182 struct gem_txsoft *txs; 183 184 txs = &sc->sc_txsoft[i]; 185 txs->txs_mbuf = NULL; 186 txs->txs_ndescs = 0; 187 if ((error = bus_dmamap_create(sc->sc_dmatag, 0, 188 &txs->txs_dmamap)) != 0) { 189 device_printf(sc->sc_dev, "unable to create tx DMA map " 190 "%d, error = %d\n", i, error); 191 goto fail_4; 192 } 193 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 194 } 195 196 /* 197 * Create the receive buffer DMA maps. 198 */ 199 for (i = 0; i < GEM_NRXDESC; i++) { 200 if ((error = bus_dmamap_create(sc->sc_dmatag, 0, 201 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 202 device_printf(sc->sc_dev, "unable to create rx DMA map " 203 "%d, error = %d\n", i, error); 204 goto fail_5; 205 } 206 sc->sc_rxsoft[i].rxs_mbuf = NULL; 207 } 208 209 210 gem_mifinit(sc); 211 212 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, gem_mediachange, 213 gem_mediastatus)) != 0) { 214 device_printf(sc->sc_dev, "phy probe failed: %d\n", error); 215 goto fail_5; 216 } 217 sc->sc_mii = device_get_softc(sc->sc_miibus); 218 219 /* 220 * From this point forward, the attachment cannot fail. A failure 221 * before this point releases all resources that may have been 222 * allocated. 223 */ 224 225 /* Announce ourselves. */ 226 device_printf(sc->sc_dev, "Ethernet address:"); 227 for (i = 0; i < 6; i++) 228 printf("%c%02x", i > 0 ? ':' : ' ', sc->sc_arpcom.ac_enaddr[i]); 229 printf("\n"); 230 231 /* Initialize ifnet structure. */ 232 ifp->if_softc = sc; 233 ifp->if_unit = device_get_unit(sc->sc_dev); 234 ifp->if_name = "gem"; 235 ifp->if_mtu = ETHERMTU; 236 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 237 ifp->if_start = gem_start; 238 ifp->if_ioctl = gem_ioctl; 239 ifp->if_watchdog = gem_watchdog; 240 ifp->if_init = gem_init; 241 ifp->if_output = ether_output; 242 ifp->if_snd.ifq_maxlen = GEM_TXQUEUELEN; 243 /* 244 * Walk along the list of attached MII devices and 245 * establish an `MII instance' to `phy number' 246 * mapping. We'll use this mapping in media change 247 * requests to determine which phy to use to program 248 * the MIF configuration register. 249 */ 250 for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL; 251 child = LIST_NEXT(child, mii_list)) { 252 /* 253 * Note: we support just two PHYs: the built-in 254 * internal device and an external on the MII 255 * connector. 256 */ 257 if (child->mii_phy > 1 || child->mii_inst > 1) { 258 device_printf(sc->sc_dev, "cannot accomodate " 259 "MII device %s at phy %d, instance %d\n", 260 device_get_name(child->mii_dev), 261 child->mii_phy, child->mii_inst); 262 continue; 263 } 264 265 sc->sc_phys[child->mii_inst] = child->mii_phy; 266 } 267 268 /* 269 * Now select and activate the PHY we will use. 270 * 271 * The order of preference is External (MDI1), 272 * Internal (MDI0), Serial Link (no MII). 273 */ 274 if (sc->sc_phys[1]) { 275 #ifdef GEM_DEBUG 276 printf("using external phy\n"); 277 #endif 278 sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL; 279 } else { 280 #ifdef GEM_DEBUG 281 printf("using internal phy\n"); 282 #endif 283 sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL; 284 } 285 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_MIF_CONFIG, 286 sc->sc_mif_config); 287 /* Attach the interface. */ 288 ether_ifattach(ifp, ETHER_BPF_SUPPORTED); 289 290 #if notyet 291 /* 292 * Add a suspend hook to make sure we come back up after a 293 * resume. 294 */ 295 sc->sc_powerhook = powerhook_establish(gem_power, sc); 296 if (sc->sc_powerhook == NULL) 297 device_printf(sc->sc_dev, "WARNING: unable to establish power " 298 "hook\n"); 299 #endif 300 301 callout_init(&sc->sc_tick_ch, 0); 302 return (0); 303 304 /* 305 * Free any resources we've allocated during the failed attach 306 * attempt. Do this in reverse order and fall through. 307 */ 308 fail_5: 309 for (i = 0; i < GEM_NRXDESC; i++) { 310 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 311 bus_dmamap_destroy(sc->sc_dmatag, 312 sc->sc_rxsoft[i].rxs_dmamap); 313 } 314 fail_4: 315 for (i = 0; i < GEM_TXQUEUELEN; i++) { 316 if (sc->sc_txsoft[i].txs_dmamap != NULL) 317 bus_dmamap_destroy(sc->sc_dmatag, 318 sc->sc_txsoft[i].txs_dmamap); 319 } 320 bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap); 321 fail_3: 322 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 323 sc->sc_cddmamap); 324 fail_2: 325 bus_dma_tag_destroy(sc->sc_cdmatag); 326 fail_1: 327 bus_dma_tag_destroy(sc->sc_dmatag); 328 fail_0: 329 bus_dma_tag_destroy(sc->sc_pdmatag); 330 return (error); 331 } 332 333 static void 334 gem_cddma_callback(xsc, segs, nsegs, error) 335 void *xsc; 336 bus_dma_segment_t *segs; 337 int nsegs; 338 int error; 339 { 340 struct gem_softc *sc = (struct gem_softc *)xsc; 341 342 if (error != 0) 343 return; 344 if (nsegs != 1) { 345 /* can't happen... */ 346 panic("gem_cddma_callback: bad control buffer segment count"); 347 } 348 sc->sc_cddma = segs[0].ds_addr; 349 } 350 351 static void 352 gem_rxdma_callback(xsc, segs, nsegs, error) 353 void *xsc; 354 bus_dma_segment_t *segs; 355 int nsegs; 356 int error; 357 { 358 struct gem_rxsoft *rxs = (struct gem_rxsoft *)xsc; 359 360 if (error != 0) 361 return; 362 if (nsegs != 1) { 363 /* can't happen... */ 364 panic("gem_rxdma_callback: bad control buffer segment count"); 365 } 366 rxs->rxs_paddr = segs[0].ds_addr; 367 } 368 369 /* 370 * This is called multiple times in our version of dmamap_load_mbuf, but should 371 * be fit for a generic version that only calls it once. 372 */ 373 static void 374 gem_txdma_callback(xsc, segs, nsegs, error) 375 void *xsc; 376 bus_dma_segment_t *segs; 377 int nsegs; 378 int error; 379 { 380 struct gem_txdma *tx = (struct gem_txdma *)xsc; 381 int seg; 382 383 tx->txd_error = error; 384 if (error != 0) 385 return; 386 tx->txd_nsegs = nsegs; 387 388 /* 389 * Initialize the transmit descriptors. 390 */ 391 for (seg = 0; seg < nsegs; 392 seg++, tx->txd_nexttx = GEM_NEXTTX(tx->txd_nexttx)) { 393 uint64_t flags; 394 395 DPRINTF(tx->txd_sc, ("txdma_cb: mapping seg %d (txd %d), len " 396 "%lx, addr %#lx (%#lx)\n", seg, tx->txd_nexttx, 397 segs[seg].ds_len, segs[seg].ds_addr, 398 GEM_DMA_WRITE(tx->txd_sc, segs[seg].ds_addr))); 399 CTR5(KTR_GEM, "txdma_cb: mapping seg %d (txd %d), len " 400 "%lx, addr %#lx (%#lx)", seg, tx->txd_nexttx, 401 segs[seg].ds_len, segs[seg].ds_addr, 402 GEM_DMA_WRITE(tx->txd_sc, segs[seg].ds_addr)); 403 /* 404 * If this is the first descriptor we're 405 * enqueueing, set the start of packet flag, 406 * and the checksum stuff if we want the hardware 407 * to do it. 408 */ 409 tx->txd_sc->sc_txdescs[tx->txd_nexttx].gd_addr = 410 GEM_DMA_WRITE(tx->txd_sc, segs[seg].ds_addr); 411 flags = segs[seg].ds_len & GEM_TD_BUFSIZE; 412 if ((tx->txd_flags & GTXD_FIRST) != 0 && seg == 0) { 413 CTR2(KTR_GEM, "txdma_cb: start of packet at seg %d, " 414 "tx %d", seg, tx->txd_nexttx); 415 flags |= GEM_TD_START_OF_PACKET; 416 } 417 if ((tx->txd_flags & GTXD_LAST) != 0 && seg == nsegs - 1) { 418 CTR2(KTR_GEM, "txdma_cb: end of packet at seg %d, " 419 "tx %d", seg, tx->txd_nexttx); 420 flags |= GEM_TD_END_OF_PACKET; 421 } 422 tx->txd_sc->sc_txdescs[tx->txd_nexttx].gd_flags = 423 GEM_DMA_WRITE(tx->txd_sc, flags); 424 tx->txd_lasttx = tx->txd_nexttx; 425 } 426 } 427 428 static void 429 gem_tick(arg) 430 void *arg; 431 { 432 struct gem_softc *sc = arg; 433 int s; 434 435 s = splnet(); 436 mii_tick(sc->sc_mii); 437 splx(s); 438 439 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 440 } 441 442 static int 443 gem_bitwait(sc, r, clr, set) 444 struct gem_softc *sc; 445 bus_addr_t r; 446 u_int32_t clr; 447 u_int32_t set; 448 { 449 int i; 450 u_int32_t reg; 451 452 for (i = TRIES; i--; DELAY(100)) { 453 reg = bus_space_read_4(sc->sc_bustag, sc->sc_h, r); 454 if ((r & clr) == 0 && (r & set) == set) 455 return (1); 456 } 457 return (0); 458 } 459 460 void 461 gem_reset(sc) 462 struct gem_softc *sc; 463 { 464 bus_space_tag_t t = sc->sc_bustag; 465 bus_space_handle_t h = sc->sc_h; 466 int s; 467 468 s = splnet(); 469 DPRINTF(sc, ("%s: gem_reset\n", device_get_name(sc->sc_dev))); 470 CTR1(KTR_GEM, "%s: gem_reset", device_get_name(sc->sc_dev)); 471 gem_reset_rx(sc); 472 gem_reset_tx(sc); 473 474 /* Do a full reset */ 475 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); 476 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 477 device_printf(sc->sc_dev, "cannot reset device\n"); 478 splx(s); 479 } 480 481 482 /* 483 * gem_rxdrain: 484 * 485 * Drain the receive queue. 486 */ 487 static void 488 gem_rxdrain(sc) 489 struct gem_softc *sc; 490 { 491 struct gem_rxsoft *rxs; 492 int i; 493 494 for (i = 0; i < GEM_NRXDESC; i++) { 495 rxs = &sc->sc_rxsoft[i]; 496 if (rxs->rxs_mbuf != NULL) { 497 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap); 498 m_freem(rxs->rxs_mbuf); 499 rxs->rxs_mbuf = NULL; 500 } 501 } 502 } 503 504 /* 505 * Reset the whole thing. 506 */ 507 static void 508 gem_stop(ifp, disable) 509 struct ifnet *ifp; 510 int disable; 511 { 512 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 513 struct gem_txsoft *txs; 514 515 DPRINTF(sc, ("%s: gem_stop\n", device_get_name(sc->sc_dev))); 516 CTR1(KTR_GEM, "%s: gem_stop", device_get_name(sc->sc_dev)); 517 518 callout_stop(&sc->sc_tick_ch); 519 520 /* XXX - Should we reset these instead? */ 521 gem_disable_tx(sc); 522 gem_disable_rx(sc); 523 524 /* 525 * Release any queued transmit buffers. 526 */ 527 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 528 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 529 if (txs->txs_ndescs != 0) { 530 bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap); 531 if (txs->txs_mbuf != NULL) { 532 m_freem(txs->txs_mbuf); 533 txs->txs_mbuf = NULL; 534 } 535 } 536 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 537 } 538 539 if (disable) 540 gem_rxdrain(sc); 541 542 /* 543 * Mark the interface down and cancel the watchdog timer. 544 */ 545 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 546 ifp->if_timer = 0; 547 } 548 549 /* 550 * Reset the receiver 551 */ 552 int 553 gem_reset_rx(sc) 554 struct gem_softc *sc; 555 { 556 bus_space_tag_t t = sc->sc_bustag; 557 bus_space_handle_t h = sc->sc_h; 558 559 /* 560 * Resetting while DMA is in progress can cause a bus hang, so we 561 * disable DMA first. 562 */ 563 gem_disable_rx(sc); 564 bus_space_write_4(t, h, GEM_RX_CONFIG, 0); 565 /* Wait till it finishes */ 566 if (!gem_bitwait(sc, GEM_RX_CONFIG, 1, 0)) 567 device_printf(sc->sc_dev, "cannot disable read dma\n"); 568 569 /* Wait 5ms extra. */ 570 DELAY(5000); 571 572 /* Finally, reset the ERX */ 573 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX); 574 /* Wait till it finishes */ 575 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 576 device_printf(sc->sc_dev, "cannot reset receiver\n"); 577 return (1); 578 } 579 return (0); 580 } 581 582 583 /* 584 * Reset the transmitter 585 */ 586 static int 587 gem_reset_tx(sc) 588 struct gem_softc *sc; 589 { 590 bus_space_tag_t t = sc->sc_bustag; 591 bus_space_handle_t h = sc->sc_h; 592 int i; 593 594 /* 595 * Resetting while DMA is in progress can cause a bus hang, so we 596 * disable DMA first. 597 */ 598 gem_disable_tx(sc); 599 bus_space_write_4(t, h, GEM_TX_CONFIG, 0); 600 /* Wait till it finishes */ 601 if (!gem_bitwait(sc, GEM_TX_CONFIG, 1, 0)) 602 device_printf(sc->sc_dev, "cannot disable read dma\n"); 603 604 /* Wait 5ms extra. */ 605 DELAY(5000); 606 607 /* Finally, reset the ETX */ 608 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_TX); 609 /* Wait till it finishes */ 610 for (i = TRIES; i--; DELAY(100)) 611 if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_TX) == 0) 612 break; 613 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 614 device_printf(sc->sc_dev, "cannot reset receiver\n"); 615 return (1); 616 } 617 return (0); 618 } 619 620 /* 621 * disable receiver. 622 */ 623 static int 624 gem_disable_rx(sc) 625 struct gem_softc *sc; 626 { 627 bus_space_tag_t t = sc->sc_bustag; 628 bus_space_handle_t h = sc->sc_h; 629 u_int32_t cfg; 630 631 /* Flip the enable bit */ 632 cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 633 cfg &= ~GEM_MAC_RX_ENABLE; 634 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg); 635 636 /* Wait for it to finish */ 637 return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)); 638 } 639 640 /* 641 * disable transmitter. 642 */ 643 static int 644 gem_disable_tx(sc) 645 struct gem_softc *sc; 646 { 647 bus_space_tag_t t = sc->sc_bustag; 648 bus_space_handle_t h = sc->sc_h; 649 u_int32_t cfg; 650 651 /* Flip the enable bit */ 652 cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG); 653 cfg &= ~GEM_MAC_TX_ENABLE; 654 bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg); 655 656 /* Wait for it to finish */ 657 return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)); 658 } 659 660 /* 661 * Initialize interface. 662 */ 663 static int 664 gem_meminit(sc) 665 struct gem_softc *sc; 666 { 667 struct gem_rxsoft *rxs; 668 int i, error; 669 670 /* 671 * Initialize the transmit descriptor ring. 672 */ 673 memset((void *)sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 674 for (i = 0; i < GEM_NTXDESC; i++) { 675 sc->sc_txdescs[i].gd_flags = 0; 676 sc->sc_txdescs[i].gd_addr = 0; 677 } 678 GEM_CDTXSYNC(sc, 0, GEM_NTXDESC, 679 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 680 sc->sc_txfree = GEM_NTXDESC; 681 sc->sc_txnext = 0; 682 683 /* 684 * Initialize the receive descriptor and receive job 685 * descriptor rings. 686 */ 687 for (i = 0; i < GEM_NRXDESC; i++) { 688 rxs = &sc->sc_rxsoft[i]; 689 if (rxs->rxs_mbuf == NULL) { 690 if ((error = gem_add_rxbuf(sc, i)) != 0) { 691 device_printf(sc->sc_dev, "unable to " 692 "allocate or map rx buffer %d, error = " 693 "%d\n", i, error); 694 /* 695 * XXX Should attempt to run with fewer receive 696 * XXX buffers instead of just failing. 697 */ 698 gem_rxdrain(sc); 699 return (1); 700 } 701 } else 702 GEM_INIT_RXDESC(sc, i); 703 } 704 sc->sc_rxptr = 0; 705 706 return (0); 707 } 708 709 static int 710 gem_ringsize(sz) 711 int sz; 712 { 713 int v = 0; 714 715 switch (sz) { 716 case 32: 717 v = GEM_RING_SZ_32; 718 break; 719 case 64: 720 v = GEM_RING_SZ_64; 721 break; 722 case 128: 723 v = GEM_RING_SZ_128; 724 break; 725 case 256: 726 v = GEM_RING_SZ_256; 727 break; 728 case 512: 729 v = GEM_RING_SZ_512; 730 break; 731 case 1024: 732 v = GEM_RING_SZ_1024; 733 break; 734 case 2048: 735 v = GEM_RING_SZ_2048; 736 break; 737 case 4096: 738 v = GEM_RING_SZ_4096; 739 break; 740 case 8192: 741 v = GEM_RING_SZ_8192; 742 break; 743 default: 744 printf("gem: invalid Receive Descriptor ring size\n"); 745 break; 746 } 747 return (v); 748 } 749 750 /* 751 * Initialization of interface; set up initialization block 752 * and transmit/receive descriptor rings. 753 */ 754 static void 755 gem_init(xsc) 756 void *xsc; 757 { 758 struct gem_softc *sc = (struct gem_softc *)xsc; 759 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 760 bus_space_tag_t t = sc->sc_bustag; 761 bus_space_handle_t h = sc->sc_h; 762 int s; 763 u_int32_t v; 764 765 s = splnet(); 766 767 DPRINTF(sc, ("%s: gem_init: calling stop\n", device_get_name(sc->sc_dev))); 768 CTR1(KTR_GEM, "%s: gem_init: calling stop", device_get_name(sc->sc_dev)); 769 /* 770 * Initialization sequence. The numbered steps below correspond 771 * to the sequence outlined in section 6.3.5.1 in the Ethernet 772 * Channel Engine manual (part of the PCIO manual). 773 * See also the STP2002-STQ document from Sun Microsystems. 774 */ 775 776 /* step 1 & 2. Reset the Ethernet Channel */ 777 gem_stop(&sc->sc_arpcom.ac_if, 0); 778 gem_reset(sc); 779 DPRINTF(sc, ("%s: gem_init: restarting\n", device_get_name(sc->sc_dev))); 780 CTR1(KTR_GEM, "%s: gem_init: restarting", device_get_name(sc->sc_dev)); 781 782 /* Re-initialize the MIF */ 783 gem_mifinit(sc); 784 785 /* Call MI reset function if any */ 786 if (sc->sc_hwreset) 787 (*sc->sc_hwreset)(sc); 788 789 /* step 3. Setup data structures in host memory */ 790 gem_meminit(sc); 791 792 /* step 4. TX MAC registers & counters */ 793 gem_init_regs(sc); 794 /* XXX: VLAN code from NetBSD temporarily removed. */ 795 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 796 (ETHER_MAX_LEN + sizeof(struct ether_header)) | (0x2000<<16)); 797 798 /* step 5. RX MAC registers & counters */ 799 gem_setladrf(sc); 800 801 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 802 /* NOTE: we use only 32-bit DMA addresses here. */ 803 bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 0); 804 bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 805 806 bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 0); 807 bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 808 DPRINTF(sc, ("loading rx ring %lx, tx ring %lx, cddma %lx\n", 809 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma)); 810 CTR3(KTR_GEM, "loading rx ring %lx, tx ring %lx, cddma %lx", 811 GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); 812 813 /* step 8. Global Configuration & Interrupt Mask */ 814 bus_space_write_4(t, h, GEM_INTMASK, 815 ~(GEM_INTR_TX_INTME| 816 GEM_INTR_TX_EMPTY| 817 GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF| 818 GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS| 819 GEM_INTR_MAC_CONTROL|GEM_INTR_MIF| 820 GEM_INTR_BERR)); 821 bus_space_write_4(t, h, GEM_MAC_RX_MASK, 0); /* XXXX */ 822 bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */ 823 bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */ 824 825 /* step 9. ETX Configuration: use mostly default values */ 826 827 /* Enable DMA */ 828 v = gem_ringsize(GEM_NTXDESC /*XXX*/); 829 bus_space_write_4(t, h, GEM_TX_CONFIG, 830 v|GEM_TX_CONFIG_TXDMA_EN| 831 ((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH)); 832 833 /* step 10. ERX Configuration */ 834 835 /* Encode Receive Descriptor ring size: four possible values */ 836 v = gem_ringsize(GEM_NRXDESC /*XXX*/); 837 838 /* Enable DMA */ 839 bus_space_write_4(t, h, GEM_RX_CONFIG, 840 v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)| 841 (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN| 842 (0<<GEM_RX_CONFIG_CXM_START_SHFT)); 843 /* 844 * The following value is for an OFF Threshold of about 15.5 Kbytes 845 * and an ON Threshold of 4K bytes. 846 */ 847 bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH, 0xf8 | (0x40 << 12)); 848 bus_space_write_4(t, h, GEM_RX_BLANKING, (2<<12)|6); 849 850 /* step 11. Configure Media */ 851 (void)gem_mii_statchg(sc->sc_dev); 852 853 /* step 12. RX_MAC Configuration Register */ 854 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 855 v |= GEM_MAC_RX_ENABLE; 856 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 857 858 /* step 14. Issue Transmit Pending command */ 859 860 /* Call MI initialization function if any */ 861 if (sc->sc_hwinit) 862 (*sc->sc_hwinit)(sc); 863 864 /* step 15. Give the reciever a swift kick */ 865 bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4); 866 867 /* Start the one second timer. */ 868 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 869 870 ifp->if_flags |= IFF_RUNNING; 871 ifp->if_flags &= ~IFF_OACTIVE; 872 ifp->if_timer = 0; 873 sc->sc_flags = ifp->if_flags; 874 splx(s); 875 } 876 877 /* 878 * XXX: This is really a substitute for bus_dmamap_load_mbuf(), which FreeBSD 879 * does not yet have, with some adaptions for this driver. 880 * Some changes are mandated by the fact that multiple maps may needed to map 881 * a single mbuf. 882 * It should be removed once generic support is available. 883 * 884 * This is derived from NetBSD (syssrc/sys/arch/sparc64/sparc64/machdep.c), for 885 * a copyright notice see sparc64/sparc64/bus_machdep.c. 886 * 887 * Not every error condition is passed to the callback in this version, and the 888 * callback may be called more than once. 889 * It also gropes in the entails of the callback arg... 890 */ 891 static int 892 gem_dmamap_load_mbuf(sc, m0, cb, txj, flags) 893 struct gem_softc *sc; 894 struct mbuf *m0; 895 bus_dmamap_callback_t *cb; 896 struct gem_txjob *txj; 897 int flags; 898 { 899 struct gem_txdma txd; 900 struct gem_txsoft *txs; 901 struct mbuf *m; 902 void *vaddr; 903 int error, first = 1, len, totlen; 904 905 if ((m0->m_flags & M_PKTHDR) == 0) 906 panic("gem_dmamap_load_mbuf: no packet header"); 907 totlen = m0->m_pkthdr.len; 908 len = 0; 909 txd.txd_sc = sc; 910 txd.txd_nexttx = txj->txj_nexttx; 911 txj->txj_nsegs = 0; 912 STAILQ_INIT(&txj->txj_txsq); 913 m = m0; 914 while (m != NULL && len < totlen) { 915 if (m->m_len == 0) 916 continue; 917 /* Get a work queue entry. */ 918 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { 919 /* 920 * Ran out of descriptors, return a value that 921 * cannot be returned by bus_dmamap_load to notify 922 * the caller. 923 */ 924 error = -1; 925 goto fail; 926 } 927 len += m->m_len; 928 txd.txd_flags = first ? GTXD_FIRST : 0; 929 if (m->m_next == NULL || len >= totlen) 930 txd.txd_flags |= GTXD_LAST; 931 vaddr = mtod(m, void *); 932 error = bus_dmamap_load(sc->sc_dmatag, txs->txs_dmamap, vaddr, 933 m->m_len, cb, &txd, flags); 934 if (error != 0 || txd.txd_error != 0) 935 goto fail; 936 /* Sync the DMA map. */ 937 bus_dmamap_sync(sc->sc_dmatag, txs->txs_dmamap, 938 BUS_DMASYNC_PREWRITE); 939 m = m->m_next; 940 /* 941 * Store a pointer to the packet so we can free it later, 942 * and remember what txdirty will be once the packet is 943 * done. 944 */ 945 txs->txs_mbuf = first ? m0 : NULL; 946 txs->txs_firstdesc = txj->txj_nexttx; 947 txs->txs_lastdesc = txd.txd_lasttx; 948 txs->txs_ndescs = txd.txd_nsegs; 949 CTR3(KTR_GEM, "load_mbuf: setting firstdesc=%d, lastdesc=%d, " 950 "ndescs=%d", txs->txs_firstdesc, txs->txs_lastdesc, 951 txs->txs_ndescs); 952 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 953 STAILQ_INSERT_TAIL(&txj->txj_txsq, txs, txs_q); 954 txj->txj_nexttx = txd.txd_nexttx; 955 txj->txj_nsegs += txd.txd_nsegs; 956 first = 0; 957 } 958 txj->txj_lasttx = txd.txd_lasttx; 959 return (0); 960 961 fail: 962 CTR1(KTR_GEM, "gem_dmamap_load_mbuf failed (%d)", error); 963 gem_dmamap_unload_mbuf(sc, txj); 964 return (error); 965 } 966 967 /* 968 * Unload an mbuf using the txd the information was placed in. 969 * The tx interrupt code frees the tx segments one by one, because the txd is 970 * not available any more. 971 */ 972 static void 973 gem_dmamap_unload_mbuf(sc, txj) 974 struct gem_softc *sc; 975 struct gem_txjob *txj; 976 { 977 struct gem_txsoft *txs; 978 979 /* Readd the removed descriptors and unload the segments. */ 980 while ((txs = STAILQ_FIRST(&txj->txj_txsq)) != NULL) { 981 bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap); 982 STAILQ_REMOVE_HEAD(&txj->txj_txsq, txs_q); 983 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 984 } 985 } 986 987 static void 988 gem_dmamap_commit_mbuf(sc, txj) 989 struct gem_softc *sc; 990 struct gem_txjob *txj; 991 { 992 struct gem_txsoft *txs; 993 994 /* Commit the txjob by transfering the txsoft's to the txdirtyq. */ 995 while ((txs = STAILQ_FIRST(&txj->txj_txsq)) != NULL) { 996 STAILQ_REMOVE_HEAD(&txj->txj_txsq, txs_q); 997 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 998 } 999 } 1000 1001 static void 1002 gem_init_regs(sc) 1003 struct gem_softc *sc; 1004 { 1005 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1006 bus_space_tag_t t = sc->sc_bustag; 1007 bus_space_handle_t h = sc->sc_h; 1008 1009 /* These regs are not cleared on reset */ 1010 sc->sc_inited = 0; 1011 if (!sc->sc_inited) { 1012 1013 /* Wooo. Magic values. */ 1014 bus_space_write_4(t, h, GEM_MAC_IPG0, 0); 1015 bus_space_write_4(t, h, GEM_MAC_IPG1, 8); 1016 bus_space_write_4(t, h, GEM_MAC_IPG2, 4); 1017 1018 bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 1019 /* Max frame and max burst size */ 1020 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 1021 (ifp->if_mtu+18) | (0x2000<<16)/* Burst size */); 1022 bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x7); 1023 bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x4); 1024 bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10); 1025 /* Dunno.... */ 1026 bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088); 1027 bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED, 1028 ((sc->sc_arpcom.ac_enaddr[5]<<8)| 1029 sc->sc_arpcom.ac_enaddr[4])&0x3ff); 1030 /* Secondary MAC addr set to 0:0:0:0:0:0 */ 1031 bus_space_write_4(t, h, GEM_MAC_ADDR3, 0); 1032 bus_space_write_4(t, h, GEM_MAC_ADDR4, 0); 1033 bus_space_write_4(t, h, GEM_MAC_ADDR5, 0); 1034 /* MAC control addr set to 0:1:c2:0:1:80 */ 1035 bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001); 1036 bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200); 1037 bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180); 1038 1039 /* MAC filter addr set to 0:0:0:0:0:0 */ 1040 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0); 1041 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0); 1042 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0); 1043 1044 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0); 1045 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0); 1046 1047 sc->sc_inited = 1; 1048 } 1049 1050 /* Counters need to be zeroed */ 1051 bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0); 1052 bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0); 1053 bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0); 1054 bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0); 1055 bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0); 1056 bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0); 1057 bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0); 1058 bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0); 1059 bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0); 1060 bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0); 1061 bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0); 1062 1063 /* Un-pause stuff */ 1064 #if 0 1065 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); 1066 #else 1067 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0); 1068 #endif 1069 1070 /* 1071 * Set the station address. 1072 */ 1073 bus_space_write_4(t, h, GEM_MAC_ADDR0, 1074 (sc->sc_arpcom.ac_enaddr[4]<<8) | sc->sc_arpcom.ac_enaddr[5]); 1075 bus_space_write_4(t, h, GEM_MAC_ADDR1, 1076 (sc->sc_arpcom.ac_enaddr[2]<<8) | sc->sc_arpcom.ac_enaddr[3]); 1077 bus_space_write_4(t, h, GEM_MAC_ADDR2, 1078 (sc->sc_arpcom.ac_enaddr[0]<<8) | sc->sc_arpcom.ac_enaddr[1]); 1079 } 1080 1081 static void 1082 gem_start(ifp) 1083 struct ifnet *ifp; 1084 { 1085 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; 1086 struct mbuf *m0 = NULL, *m; 1087 struct gem_txjob txj; 1088 int firsttx, ofree, seg, ntx, txmfail; 1089 1090 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1091 return; 1092 1093 /* 1094 * Remember the previous number of free descriptors and 1095 * the first descriptor we'll use. 1096 */ 1097 ofree = sc->sc_txfree; 1098 firsttx = sc->sc_txnext; 1099 1100 DPRINTF(sc, ("%s: gem_start: txfree %d, txnext %d\n", 1101 device_get_name(sc->sc_dev), ofree, firsttx)); 1102 CTR3(KTR_GEM, "%s: gem_start: txfree %d, txnext %d", 1103 device_get_name(sc->sc_dev), ofree, firsttx); 1104 1105 txj.txj_nexttx = firsttx; 1106 txj.txj_lasttx = 0; 1107 /* 1108 * Loop through the send queue, setting up transmit descriptors 1109 * until we drain the queue, or use up all available transmit 1110 * descriptors. 1111 */ 1112 txmfail = 0; 1113 for (ntx = 0;; ntx++) { 1114 /* 1115 * Grab a packet off the queue. 1116 */ 1117 IF_DEQUEUE(&ifp->if_snd, m0); 1118 if (m0 == NULL) 1119 break; 1120 m = NULL; 1121 1122 /* 1123 * Load the DMA map. If this fails, the packet either 1124 * didn't fit in the alloted number of segments, or we were 1125 * short on resources. In this case, we'll copy and try 1126 * again. 1127 */ 1128 txmfail = gem_dmamap_load_mbuf(sc, m0, 1129 gem_txdma_callback, &txj, BUS_DMA_NOWAIT); 1130 if (txmfail == -1) { 1131 IF_PREPEND(&ifp->if_snd, m0); 1132 break; 1133 } 1134 if (txmfail > 0) { 1135 MGETHDR(m, M_DONTWAIT, MT_DATA); 1136 if (m == NULL) { 1137 device_printf(sc->sc_dev, "unable to " 1138 "allocate Tx mbuf\n"); 1139 /* Failed; requeue. */ 1140 IF_PREPEND(&ifp->if_snd, m0); 1141 break; 1142 } 1143 if (m0->m_pkthdr.len > MHLEN) { 1144 MCLGET(m, M_DONTWAIT); 1145 if ((m->m_flags & M_EXT) == 0) { 1146 device_printf(sc->sc_dev, "unable to " 1147 "allocate Tx cluster\n"); 1148 IF_PREPEND(&ifp->if_snd, m0); 1149 m_freem(m); 1150 break; 1151 } 1152 } 1153 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 1154 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 1155 txmfail = gem_dmamap_load_mbuf(sc, m, 1156 gem_txdma_callback, &txj, BUS_DMA_NOWAIT); 1157 if (txmfail != 0) { 1158 if (txmfail > 0) { 1159 device_printf(sc->sc_dev, "unable to " 1160 "load Tx buffer, error = %d\n", 1161 txmfail); 1162 } 1163 m_freem(m); 1164 IF_PREPEND(&ifp->if_snd, m0); 1165 break; 1166 } 1167 } 1168 1169 /* 1170 * Ensure we have enough descriptors free to describe 1171 * the packet. Note, we always reserve one descriptor 1172 * at the end of the ring as a termination point, to 1173 * prevent wrap-around. 1174 */ 1175 if (txj.txj_nsegs > (sc->sc_txfree - 1)) { 1176 /* 1177 * Not enough free descriptors to transmit this 1178 * packet. We haven't committed to anything yet, 1179 * so just unload the DMA map, put the packet 1180 * back on the queue, and punt. Notify the upper 1181 * layer that there are no more slots left. 1182 * 1183 * XXX We could allocate an mbuf and copy, but 1184 * XXX it is worth it? 1185 */ 1186 ifp->if_flags |= IFF_OACTIVE; 1187 gem_dmamap_unload_mbuf(sc, &txj); 1188 if (m != NULL) 1189 m_freem(m); 1190 IF_PREPEND(&ifp->if_snd, m0); 1191 break; 1192 } 1193 1194 if (m != NULL) 1195 m_freem(m0); 1196 1197 /* 1198 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 1199 */ 1200 1201 #ifdef GEM_DEBUG 1202 if (ifp->if_flags & IFF_DEBUG) { 1203 printf(" gem_start %p transmit chain:\n", 1204 STAILQ_FIRST(&txj.txj_txsq)); 1205 for (seg = sc->sc_txnext;; seg = GEM_NEXTTX(seg)) { 1206 printf("descriptor %d:\t", seg); 1207 printf("gd_flags: 0x%016llx\t", (long long) 1208 GEM_DMA_READ(sc, sc->sc_txdescs[seg].gd_flags)); 1209 printf("gd_addr: 0x%016llx\n", (long long) 1210 GEM_DMA_READ(sc, sc->sc_txdescs[seg].gd_addr)); 1211 if (seg == txj.txj_lasttx) 1212 break; 1213 } 1214 } 1215 #endif 1216 1217 /* Sync the descriptors we're using. */ 1218 GEM_CDTXSYNC(sc, sc->sc_txnext, txj.txj_nsegs, 1219 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1220 1221 /* Advance the tx pointer. */ 1222 sc->sc_txfree -= txj.txj_nsegs; 1223 sc->sc_txnext = txj.txj_nexttx; 1224 1225 gem_dmamap_commit_mbuf(sc, &txj); 1226 } 1227 1228 if (txmfail == -1 || sc->sc_txfree == 0) { 1229 ifp->if_flags |= IFF_OACTIVE; 1230 /* No more slots left; notify upper layer. */ 1231 } 1232 1233 if (ntx > 0) { 1234 DPRINTF(sc, ("%s: packets enqueued, IC on %d, OWN on %d\n", 1235 device_get_name(sc->sc_dev), txj.txj_lasttx, firsttx)); 1236 CTR3(KTR_GEM, "%s: packets enqueued, IC on %d, OWN on %d", 1237 device_get_name(sc->sc_dev), txj.txj_lasttx, firsttx); 1238 /* 1239 * The entire packet chain is set up. 1240 * Kick the transmitter. 1241 */ 1242 DPRINTF(sc, ("%s: gem_start: kicking tx %d\n", 1243 device_get_name(sc->sc_dev), txj.txj_nexttx)); 1244 CTR3(KTR_GEM, "%s: gem_start: kicking tx %d=%d", 1245 device_get_name(sc->sc_dev), txj.txj_nexttx, 1246 sc->sc_txnext); 1247 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_TX_KICK, 1248 sc->sc_txnext); 1249 1250 /* Set a watchdog timer in case the chip flakes out. */ 1251 ifp->if_timer = 5; 1252 DPRINTF(sc, ("%s: gem_start: watchdog %d\n", 1253 device_get_name(sc->sc_dev), ifp->if_timer)); 1254 CTR2(KTR_GEM, "%s: gem_start: watchdog %d", 1255 device_get_name(sc->sc_dev), ifp->if_timer); 1256 } 1257 } 1258 1259 /* 1260 * Transmit interrupt. 1261 */ 1262 static void 1263 gem_tint(sc) 1264 struct gem_softc *sc; 1265 { 1266 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1267 bus_space_tag_t t = sc->sc_bustag; 1268 bus_space_handle_t mac = sc->sc_h; 1269 struct gem_txsoft *txs; 1270 int txlast; 1271 1272 1273 DPRINTF(sc, ("%s: gem_tint\n", device_get_name(sc->sc_dev))); 1274 CTR1(KTR_GEM, "%s: gem_tint", device_get_name(sc->sc_dev)); 1275 1276 /* 1277 * Unload collision counters 1278 */ 1279 ifp->if_collisions += 1280 bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) + 1281 bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT) + 1282 bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) + 1283 bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT); 1284 1285 /* 1286 * then clear the hardware counters. 1287 */ 1288 bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0); 1289 bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0); 1290 bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0); 1291 bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0); 1292 1293 /* 1294 * Go through our Tx list and free mbufs for those 1295 * frames that have been transmitted. 1296 */ 1297 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1298 GEM_CDTXSYNC(sc, txs->txs_lastdesc, 1299 txs->txs_ndescs, 1300 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1301 1302 #ifdef GEM_DEBUG 1303 if (ifp->if_flags & IFF_DEBUG) { 1304 int i; 1305 printf(" txsoft %p transmit chain:\n", txs); 1306 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { 1307 printf("descriptor %d: ", i); 1308 printf("gd_flags: 0x%016llx\t", (long long) 1309 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags)); 1310 printf("gd_addr: 0x%016llx\n", (long long) 1311 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr)); 1312 if (i == txs->txs_lastdesc) 1313 break; 1314 } 1315 } 1316 #endif 1317 1318 /* 1319 * In theory, we could harveast some descriptors before 1320 * the ring is empty, but that's a bit complicated. 1321 * 1322 * GEM_TX_COMPLETION points to the last descriptor 1323 * processed +1. 1324 */ 1325 txlast = bus_space_read_4(t, mac, GEM_TX_COMPLETION); 1326 DPRINTF(sc, 1327 ("gem_tint: txs->txs_lastdesc = %d, txlast = %d\n", 1328 txs->txs_lastdesc, txlast)); 1329 CTR3(KTR_GEM, "gem_tint: txs->txs_firstdesc = %d, " 1330 "txs->txs_lastdesc = %d, txlast = %d", 1331 txs->txs_firstdesc, txs->txs_lastdesc, txlast); 1332 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1333 if ((txlast >= txs->txs_firstdesc) && 1334 (txlast <= txs->txs_lastdesc)) 1335 break; 1336 } else { 1337 /* Ick -- this command wraps */ 1338 if ((txlast >= txs->txs_firstdesc) || 1339 (txlast <= txs->txs_lastdesc)) 1340 break; 1341 } 1342 1343 DPRINTF(sc, ("gem_tint: releasing a desc\n")); 1344 CTR0(KTR_GEM, "gem_tint: releasing a desc"); 1345 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1346 1347 sc->sc_txfree += txs->txs_ndescs; 1348 1349 bus_dmamap_sync(sc->sc_dmatag, txs->txs_dmamap, 1350 BUS_DMASYNC_POSTWRITE); 1351 bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap); 1352 if (txs->txs_mbuf != NULL) { 1353 m_freem(txs->txs_mbuf); 1354 txs->txs_mbuf = NULL; 1355 } 1356 1357 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1358 1359 ifp->if_opackets++; 1360 } 1361 1362 DPRINTF(sc, ("gem_tint: GEM_TX_STATE_MACHINE %x " 1363 "GEM_TX_DATA_PTR %llx " 1364 "GEM_TX_COMPLETION %x\n", 1365 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_STATE_MACHINE), 1366 ((long long) bus_space_read_4(sc->sc_bustag, sc->sc_h, 1367 GEM_TX_DATA_PTR_HI) << 32) | 1368 bus_space_read_4(sc->sc_bustag, sc->sc_h, 1369 GEM_TX_DATA_PTR_LO), 1370 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_COMPLETION))); 1371 CTR3(KTR_GEM, "gem_tint: GEM_TX_STATE_MACHINE %x " 1372 "GEM_TX_DATA_PTR %llx " 1373 "GEM_TX_COMPLETION %x", 1374 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_STATE_MACHINE), 1375 ((long long) bus_space_read_4(sc->sc_bustag, sc->sc_h, 1376 GEM_TX_DATA_PTR_HI) << 32) | 1377 bus_space_read_4(sc->sc_bustag, sc->sc_h, 1378 GEM_TX_DATA_PTR_LO), 1379 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_COMPLETION)); 1380 1381 if (STAILQ_FIRST(&sc->sc_txdirtyq) == NULL) 1382 ifp->if_timer = 0; 1383 1384 1385 DPRINTF(sc, ("%s: gem_tint: watchdog %d\n", 1386 device_get_name(sc->sc_dev), ifp->if_timer)); 1387 CTR2(KTR_GEM, "%s: gem_tint: watchdog %d", 1388 device_get_name(sc->sc_dev), ifp->if_timer); 1389 1390 /* Freed some descriptors, so reset IFF_OACTIVE and restart. */ 1391 ifp->if_flags &= ~IFF_OACTIVE; 1392 gem_start(ifp); 1393 } 1394 1395 /* 1396 * Receive interrupt. 1397 */ 1398 static void 1399 gem_rint(sc) 1400 struct gem_softc *sc; 1401 { 1402 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1403 bus_space_tag_t t = sc->sc_bustag; 1404 bus_space_handle_t h = sc->sc_h; 1405 struct ether_header *eh; 1406 struct gem_rxsoft *rxs; 1407 struct mbuf *m; 1408 u_int64_t rxstat; 1409 int i, len; 1410 1411 DPRINTF(sc, ("%s: gem_rint\n", device_get_name(sc->sc_dev))); 1412 CTR1(KTR_GEM, "%s: gem_rint", device_get_name(sc->sc_dev)); 1413 /* 1414 * XXXX Read the lastrx only once at the top for speed. 1415 */ 1416 DPRINTF(sc, ("gem_rint: sc->rxptr %d, complete %d\n", 1417 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION))); 1418 CTR2(KTR_GEM, "gem_rint: sc->rxptr %d, complete %d", 1419 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)); 1420 for (i = sc->sc_rxptr; i != bus_space_read_4(t, h, GEM_RX_COMPLETION); 1421 i = GEM_NEXTRX(i)) { 1422 rxs = &sc->sc_rxsoft[i]; 1423 1424 GEM_CDRXSYNC(sc, i, 1425 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1426 1427 rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags); 1428 1429 if (rxstat & GEM_RD_OWN) { 1430 printf("gem_rint: completed descriptor " 1431 "still owned %d\n", i); 1432 /* 1433 * We have processed all of the receive buffers. 1434 */ 1435 break; 1436 } 1437 1438 if (rxstat & GEM_RD_BAD_CRC) { 1439 device_printf(sc->sc_dev, "receive error: CRC error\n"); 1440 GEM_INIT_RXDESC(sc, i); 1441 continue; 1442 } 1443 1444 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 1445 BUS_DMASYNC_POSTREAD); 1446 #ifdef GEM_DEBUG 1447 if (ifp->if_flags & IFF_DEBUG) { 1448 printf(" rxsoft %p descriptor %d: ", rxs, i); 1449 printf("gd_flags: 0x%016llx\t", (long long) 1450 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags)); 1451 printf("gd_addr: 0x%016llx\n", (long long) 1452 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr)); 1453 } 1454 #endif 1455 1456 /* 1457 * No errors; receive the packet. Note the Gem 1458 * includes the CRC with every packet. 1459 */ 1460 len = GEM_RD_BUFLEN(rxstat); 1461 1462 /* 1463 * Allocate a new mbuf cluster. If that fails, we are 1464 * out of memory, and must drop the packet and recycle 1465 * the buffer that's already attached to this descriptor. 1466 */ 1467 m = rxs->rxs_mbuf; 1468 if (gem_add_rxbuf(sc, i) != 0) { 1469 ifp->if_ierrors++; 1470 GEM_INIT_RXDESC(sc, i); 1471 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 1472 BUS_DMASYNC_PREREAD); 1473 continue; 1474 } 1475 m->m_data += 2; /* We're already off by two */ 1476 1477 ifp->if_ipackets++; 1478 eh = mtod(m, struct ether_header *); 1479 m->m_pkthdr.rcvif = ifp; 1480 m->m_pkthdr.len = m->m_len = len - ETHER_CRC_LEN; 1481 m_adj(m, sizeof(struct ether_header)); 1482 1483 /* Pass it on. */ 1484 ether_input(ifp, eh, m); 1485 } 1486 1487 /* Update the receive pointer. */ 1488 sc->sc_rxptr = i; 1489 bus_space_write_4(t, h, GEM_RX_KICK, i); 1490 1491 DPRINTF(sc, ("gem_rint: done sc->rxptr %d, complete %d\n", 1492 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION))); 1493 CTR2(KTR_GEM, "gem_rint: done sc->rxptr %d, complete %d", 1494 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)); 1495 1496 } 1497 1498 1499 /* 1500 * gem_add_rxbuf: 1501 * 1502 * Add a receive buffer to the indicated descriptor. 1503 */ 1504 static int 1505 gem_add_rxbuf(sc, idx) 1506 struct gem_softc *sc; 1507 int idx; 1508 { 1509 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1510 struct mbuf *m; 1511 int error; 1512 1513 MGETHDR(m, M_DONTWAIT, MT_DATA); 1514 if (m == NULL) 1515 return (ENOBUFS); 1516 1517 MCLGET(m, M_DONTWAIT); 1518 if ((m->m_flags & M_EXT) == 0) { 1519 m_freem(m); 1520 return (ENOBUFS); 1521 } 1522 1523 #ifdef GEM_DEBUG 1524 /* bzero the packet to check dma */ 1525 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 1526 #endif 1527 1528 if (rxs->rxs_mbuf != NULL) 1529 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap); 1530 1531 rxs->rxs_mbuf = m; 1532 1533 error = bus_dmamap_load(sc->sc_dmatag, rxs->rxs_dmamap, 1534 m->m_ext.ext_buf, m->m_ext.ext_size, gem_rxdma_callback, rxs, 1535 BUS_DMA_NOWAIT); 1536 if (error != 0 || rxs->rxs_paddr == 0) { 1537 device_printf(sc->sc_dev, "can't load rx DMA map %d, error = " 1538 "%d\n", idx, error); 1539 panic("gem_add_rxbuf"); /* XXX */ 1540 } 1541 1542 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD); 1543 1544 GEM_INIT_RXDESC(sc, idx); 1545 1546 return (0); 1547 } 1548 1549 1550 static void 1551 gem_eint(sc, status) 1552 struct gem_softc *sc; 1553 u_int status; 1554 { 1555 1556 if ((status & GEM_INTR_MIF) != 0) { 1557 device_printf(sc->sc_dev, "XXXlink status changed\n"); 1558 return; 1559 } 1560 1561 device_printf(sc->sc_dev, "status=%x\n", status); 1562 } 1563 1564 1565 void 1566 gem_intr(v) 1567 void *v; 1568 { 1569 struct gem_softc *sc = (struct gem_softc *)v; 1570 bus_space_tag_t t = sc->sc_bustag; 1571 bus_space_handle_t seb = sc->sc_h; 1572 u_int32_t status; 1573 1574 status = bus_space_read_4(t, seb, GEM_STATUS); 1575 DPRINTF(sc, ("%s: gem_intr: cplt %x, status %x\n", 1576 device_get_name(sc->sc_dev), (status>>19), 1577 (u_int)status)); 1578 CTR3(KTR_GEM, "%s: gem_intr: cplt %x, status %x", 1579 device_get_name(sc->sc_dev), (status>>19), 1580 (u_int)status); 1581 1582 if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0) 1583 gem_eint(sc, status); 1584 1585 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 1586 gem_tint(sc); 1587 1588 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 1589 gem_rint(sc); 1590 1591 /* We should eventually do more than just print out error stats. */ 1592 if (status & GEM_INTR_TX_MAC) { 1593 int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS); 1594 if (txstat & ~GEM_MAC_TX_XMIT_DONE) 1595 printf("MAC tx fault, status %x\n", txstat); 1596 } 1597 if (status & GEM_INTR_RX_MAC) { 1598 int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS); 1599 if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) 1600 printf("MAC rx fault, status %x\n", rxstat); 1601 } 1602 } 1603 1604 1605 static void 1606 gem_watchdog(ifp) 1607 struct ifnet *ifp; 1608 { 1609 struct gem_softc *sc = ifp->if_softc; 1610 1611 DPRINTF(sc, ("gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 1612 "GEM_MAC_RX_CONFIG %x\n", 1613 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG), 1614 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS), 1615 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG))); 1616 CTR3(KTR_GEM, "gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 1617 "GEM_MAC_RX_CONFIG %x", 1618 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG), 1619 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS), 1620 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG)); 1621 CTR3(KTR_GEM, "gem_watchdog: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x " 1622 "GEM_MAC_TX_CONFIG %x", 1623 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_CONFIG), 1624 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_STATUS), 1625 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_CONFIG)); 1626 1627 device_printf(sc->sc_dev, "device timeout\n"); 1628 ++ifp->if_oerrors; 1629 1630 /* Try to get more packets going. */ 1631 gem_start(ifp); 1632 } 1633 1634 /* 1635 * Initialize the MII Management Interface 1636 */ 1637 static void 1638 gem_mifinit(sc) 1639 struct gem_softc *sc; 1640 { 1641 bus_space_tag_t t = sc->sc_bustag; 1642 bus_space_handle_t mif = sc->sc_h; 1643 1644 /* Configure the MIF in frame mode */ 1645 sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1646 sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA; 1647 bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config); 1648 } 1649 1650 /* 1651 * MII interface 1652 * 1653 * The GEM MII interface supports at least three different operating modes: 1654 * 1655 * Bitbang mode is implemented using data, clock and output enable registers. 1656 * 1657 * Frame mode is implemented by loading a complete frame into the frame 1658 * register and polling the valid bit for completion. 1659 * 1660 * Polling mode uses the frame register but completion is indicated by 1661 * an interrupt. 1662 * 1663 */ 1664 int 1665 gem_mii_readreg(dev, phy, reg) 1666 device_t dev; 1667 int phy, reg; 1668 { 1669 struct gem_softc *sc = device_get_softc(dev); 1670 bus_space_tag_t t = sc->sc_bustag; 1671 bus_space_handle_t mif = sc->sc_h; 1672 int n; 1673 u_int32_t v; 1674 1675 #ifdef GEM_DEBUG_PHY 1676 printf("gem_mii_readreg: phy %d reg %d\n", phy, reg); 1677 #endif 1678 1679 #if 0 1680 /* Select the desired PHY in the MIF configuration register */ 1681 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1682 /* Clear PHY select bit */ 1683 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1684 if (phy == GEM_PHYAD_EXTERNAL) 1685 /* Set PHY select bit to get at external device */ 1686 v |= GEM_MIF_CONFIG_PHY_SEL; 1687 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1688 #endif 1689 1690 /* Construct the frame command */ 1691 v = (reg << GEM_MIF_REG_SHIFT) | (phy << GEM_MIF_PHY_SHIFT) | 1692 GEM_MIF_FRAME_READ; 1693 1694 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1695 for (n = 0; n < 100; n++) { 1696 DELAY(1); 1697 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1698 if (v & GEM_MIF_FRAME_TA0) 1699 return (v & GEM_MIF_FRAME_DATA); 1700 } 1701 1702 device_printf(sc->sc_dev, "mii_read timeout\n"); 1703 return (0); 1704 } 1705 1706 int 1707 gem_mii_writereg(dev, phy, reg, val) 1708 device_t dev; 1709 int phy, reg, val; 1710 { 1711 struct gem_softc *sc = device_get_softc(dev); 1712 bus_space_tag_t t = sc->sc_bustag; 1713 bus_space_handle_t mif = sc->sc_h; 1714 int n; 1715 u_int32_t v; 1716 1717 #ifdef GEM_DEBUG_PHY 1718 printf("gem_mii_writereg: phy %d reg %d val %x\n", phy, reg, val); 1719 #endif 1720 1721 #if 0 1722 /* Select the desired PHY in the MIF configuration register */ 1723 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 1724 /* Clear PHY select bit */ 1725 v &= ~GEM_MIF_CONFIG_PHY_SEL; 1726 if (phy == GEM_PHYAD_EXTERNAL) 1727 /* Set PHY select bit to get at external device */ 1728 v |= GEM_MIF_CONFIG_PHY_SEL; 1729 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); 1730 #endif 1731 /* Construct the frame command */ 1732 v = GEM_MIF_FRAME_WRITE | 1733 (phy << GEM_MIF_PHY_SHIFT) | 1734 (reg << GEM_MIF_REG_SHIFT) | 1735 (val & GEM_MIF_FRAME_DATA); 1736 1737 bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 1738 for (n = 0; n < 100; n++) { 1739 DELAY(1); 1740 v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 1741 if (v & GEM_MIF_FRAME_TA0) 1742 return (1); 1743 } 1744 1745 device_printf(sc->sc_dev, "mii_write timeout\n"); 1746 return (0); 1747 } 1748 1749 void 1750 gem_mii_statchg(dev) 1751 device_t dev; 1752 { 1753 struct gem_softc *sc = device_get_softc(dev); 1754 #ifdef GEM_DEBUG 1755 int instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media); 1756 #endif 1757 bus_space_tag_t t = sc->sc_bustag; 1758 bus_space_handle_t mac = sc->sc_h; 1759 u_int32_t v; 1760 1761 #ifdef GEM_DEBUG 1762 if (sc->sc_debug) 1763 printf("gem_mii_statchg: status change: phy = %d\n", 1764 sc->sc_phys[instance]); 1765 #endif 1766 1767 /* Set tx full duplex options */ 1768 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0); 1769 DELAY(10000); /* reg must be cleared and delay before changing. */ 1770 v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT| 1771 GEM_MAC_TX_ENABLE; 1772 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) { 1773 v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS; 1774 } 1775 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v); 1776 1777 /* XIF Configuration */ 1778 /* We should really calculate all this rather than rely on defaults */ 1779 v = bus_space_read_4(t, mac, GEM_MAC_XIF_CONFIG); 1780 v = GEM_MAC_XIF_LINK_LED; 1781 v |= GEM_MAC_XIF_TX_MII_ENA; 1782 /* If an external transceiver is connected, enable its MII drivers */ 1783 sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG); 1784 if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) { 1785 /* External MII needs echo disable if half duplex. */ 1786 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 1787 /* turn on full duplex LED */ 1788 v |= GEM_MAC_XIF_FDPLX_LED; 1789 else 1790 /* half duplex -- disable echo */ 1791 v |= GEM_MAC_XIF_ECHO_DISABL; 1792 } else { 1793 /* Internal MII needs buf enable */ 1794 v |= GEM_MAC_XIF_MII_BUF_ENA; 1795 } 1796 bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v); 1797 } 1798 1799 int 1800 gem_mediachange(ifp) 1801 struct ifnet *ifp; 1802 { 1803 struct gem_softc *sc = ifp->if_softc; 1804 1805 /* XXX Add support for serial media. */ 1806 1807 return (mii_mediachg(sc->sc_mii)); 1808 } 1809 1810 void 1811 gem_mediastatus(ifp, ifmr) 1812 struct ifnet *ifp; 1813 struct ifmediareq *ifmr; 1814 { 1815 struct gem_softc *sc = ifp->if_softc; 1816 1817 if ((ifp->if_flags & IFF_UP) == 0) 1818 return; 1819 1820 mii_pollstat(sc->sc_mii); 1821 ifmr->ifm_active = sc->sc_mii->mii_media_active; 1822 ifmr->ifm_status = sc->sc_mii->mii_media_status; 1823 } 1824 1825 /* 1826 * Process an ioctl request. 1827 */ 1828 static int 1829 gem_ioctl(ifp, cmd, data) 1830 struct ifnet *ifp; 1831 u_long cmd; 1832 caddr_t data; 1833 { 1834 struct gem_softc *sc = ifp->if_softc; 1835 struct ifreq *ifr = (struct ifreq *)data; 1836 int s, error = 0; 1837 1838 switch (cmd) { 1839 case SIOCSIFADDR: 1840 case SIOCGIFADDR: 1841 case SIOCSIFMTU: 1842 error = ether_ioctl(ifp, cmd, data); 1843 break; 1844 case SIOCSIFFLAGS: 1845 if (ifp->if_flags & IFF_UP) { 1846 if ((sc->sc_flags ^ ifp->if_flags) == IFF_PROMISC) 1847 gem_setladrf(sc); 1848 else 1849 gem_init(sc); 1850 } else { 1851 if (ifp->if_flags & IFF_RUNNING) 1852 gem_stop(ifp, 0); 1853 } 1854 sc->sc_flags = ifp->if_flags; 1855 error = 0; 1856 break; 1857 case SIOCADDMULTI: 1858 case SIOCDELMULTI: 1859 gem_setladrf(sc); 1860 error = 0; 1861 break; 1862 case SIOCGIFMEDIA: 1863 case SIOCSIFMEDIA: 1864 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 1865 break; 1866 default: 1867 error = ENOTTY; 1868 break; 1869 } 1870 1871 /* Try to get things going again */ 1872 if (ifp->if_flags & IFF_UP) 1873 gem_start(ifp); 1874 splx(s); 1875 return (error); 1876 } 1877 1878 /* 1879 * Set up the logical address filter. 1880 */ 1881 static void 1882 gem_setladrf(sc) 1883 struct gem_softc *sc; 1884 { 1885 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1886 struct ifmultiaddr *inm; 1887 struct sockaddr_dl *sdl; 1888 bus_space_tag_t t = sc->sc_bustag; 1889 bus_space_handle_t h = sc->sc_h; 1890 u_char *cp; 1891 u_int32_t crc; 1892 u_int32_t hash[16]; 1893 u_int32_t v; 1894 int len; 1895 1896 /* Clear hash table */ 1897 memset(hash, 0, sizeof(hash)); 1898 1899 /* Get current RX configuration */ 1900 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 1901 1902 if ((ifp->if_flags & IFF_PROMISC) != 0) { 1903 /* Turn on promiscuous mode; turn off the hash filter */ 1904 v |= GEM_MAC_RX_PROMISCUOUS; 1905 v &= ~GEM_MAC_RX_HASH_FILTER; 1906 ; 1907 goto chipit; 1908 } 1909 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 1910 hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; 1911 ifp->if_flags |= IFF_ALLMULTI; 1912 goto chipit; 1913 } 1914 1915 /* Turn off promiscuous mode; turn on the hash filter */ 1916 v &= ~GEM_MAC_RX_PROMISCUOUS; 1917 v |= GEM_MAC_RX_HASH_FILTER; 1918 1919 /* 1920 * Set up multicast address filter by passing all multicast addresses 1921 * through a crc generator, and then using the high order 6 bits as an 1922 * index into the 256 bit logical address filter. The high order bit 1923 * selects the word, while the rest of the bits select the bit within 1924 * the word. 1925 */ 1926 1927 TAILQ_FOREACH(inm, &sc->sc_arpcom.ac_if.if_multiaddrs, ifma_link) { 1928 if (inm->ifma_addr->sa_family != AF_LINK) 1929 continue; 1930 sdl = (struct sockaddr_dl *)inm->ifma_addr; 1931 cp = LLADDR(sdl); 1932 crc = 0xffffffff; 1933 for (len = sdl->sdl_alen; --len >= 0;) { 1934 int octet = *cp++; 1935 int i; 1936 1937 #define MC_POLY_LE 0xedb88320UL /* mcast crc, little endian */ 1938 for (i = 0; i < 8; i++) { 1939 if ((crc & 1) ^ (octet & 1)) { 1940 crc >>= 1; 1941 crc ^= MC_POLY_LE; 1942 } else { 1943 crc >>= 1; 1944 } 1945 octet >>= 1; 1946 } 1947 } 1948 /* Just want the 8 most significant bits. */ 1949 crc >>= 24; 1950 1951 /* Set the corresponding bit in the filter. */ 1952 hash[crc >> 4] |= 1 << (crc & 0xf); 1953 } 1954 1955 chipit: 1956 /* Now load the hash table into the chip */ 1957 bus_space_write_4(t, h, GEM_MAC_HASH0, hash[0]); 1958 bus_space_write_4(t, h, GEM_MAC_HASH1, hash[1]); 1959 bus_space_write_4(t, h, GEM_MAC_HASH2, hash[2]); 1960 bus_space_write_4(t, h, GEM_MAC_HASH3, hash[3]); 1961 bus_space_write_4(t, h, GEM_MAC_HASH4, hash[4]); 1962 bus_space_write_4(t, h, GEM_MAC_HASH5, hash[5]); 1963 bus_space_write_4(t, h, GEM_MAC_HASH6, hash[6]); 1964 bus_space_write_4(t, h, GEM_MAC_HASH7, hash[7]); 1965 bus_space_write_4(t, h, GEM_MAC_HASH8, hash[8]); 1966 bus_space_write_4(t, h, GEM_MAC_HASH9, hash[9]); 1967 bus_space_write_4(t, h, GEM_MAC_HASH10, hash[10]); 1968 bus_space_write_4(t, h, GEM_MAC_HASH11, hash[11]); 1969 bus_space_write_4(t, h, GEM_MAC_HASH12, hash[12]); 1970 bus_space_write_4(t, h, GEM_MAC_HASH13, hash[13]); 1971 bus_space_write_4(t, h, GEM_MAC_HASH14, hash[14]); 1972 bus_space_write_4(t, h, GEM_MAC_HASH15, hash[15]); 1973 1974 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 1975 } 1976 1977 #if notyet 1978 1979 /* 1980 * gem_power: 1981 * 1982 * Power management (suspend/resume) hook. 1983 */ 1984 void 1985 static gem_power(why, arg) 1986 int why; 1987 void *arg; 1988 { 1989 struct gem_softc *sc = arg; 1990 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1991 int s; 1992 1993 s = splnet(); 1994 switch (why) { 1995 case PWR_SUSPEND: 1996 case PWR_STANDBY: 1997 gem_stop(ifp, 1); 1998 if (sc->sc_power != NULL) 1999 (*sc->sc_power)(sc, why); 2000 break; 2001 case PWR_RESUME: 2002 if (ifp->if_flags & IFF_UP) { 2003 if (sc->sc_power != NULL) 2004 (*sc->sc_power)(sc, why); 2005 gem_init(ifp); 2006 } 2007 break; 2008 case PWR_SOFTSUSPEND: 2009 case PWR_SOFTSTANDBY: 2010 case PWR_SOFTRESUME: 2011 break; 2012 } 2013 splx(s); 2014 } 2015 #endif 2016