1 /* 2 * Copyright (c) 1995, David Greenman 3 * All rights reserved. 4 * 5 * Modifications to support NetBSD and media selection: 6 * Copyright (c) 1997 Jason R. Thorpe. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * $FreeBSD$ 31 */ 32 33 /* 34 * Intel EtherExpress Pro/100B PCI Fast Ethernet driver 35 */ 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/mbuf.h> 40 #include <sys/malloc.h> 41 #include <sys/kernel.h> 42 #include <sys/socket.h> 43 44 #include <net/if.h> 45 #include <net/if_dl.h> 46 #include <net/if_media.h> 47 48 #ifdef NS 49 #include <netns/ns.h> 50 #include <netns/ns_if.h> 51 #endif 52 53 #include <net/bpf.h> 54 55 #if defined(__NetBSD__) 56 57 #include <sys/ioctl.h> 58 #include <sys/errno.h> 59 #include <sys/device.h> 60 61 #include <net/if_dl.h> 62 #include <net/if_ether.h> 63 64 #include <netinet/if_inarp.h> 65 66 #include <vm/vm.h> 67 68 #include <machine/cpu.h> 69 #include <machine/bus.h> 70 #include <machine/intr.h> 71 72 #include <dev/pci/if_fxpreg.h> 73 #include <dev/pci/if_fxpvar.h> 74 75 #include <dev/pci/pcivar.h> 76 #include <dev/pci/pcireg.h> 77 #include <dev/pci/pcidevs.h> 78 79 80 #else /* __FreeBSD__ */ 81 82 #include <sys/sockio.h> 83 #include <sys/bus.h> 84 #include <machine/bus.h> 85 #include <sys/rman.h> 86 #include <machine/resource.h> 87 88 #include <net/ethernet.h> 89 #include <net/if_arp.h> 90 91 #include <vm/vm.h> /* for vtophys */ 92 #include <vm/pmap.h> /* for vtophys */ 93 #include <machine/clock.h> /* for DELAY */ 94 95 #include <pci/pcivar.h> 96 #include <pci/pcireg.h> /* for PCIM_CMD_xxx */ 97 #include <pci/if_fxpreg.h> 98 #include <pci/if_fxpvar.h> 99 100 #endif /* __NetBSD__ */ 101 102 #ifdef __alpha__ /* XXX */ 103 /* XXX XXX NEED REAL DMA MAPPING SUPPORT XXX XXX */ 104 #undef vtophys 105 #define vtophys(va) alpha_XXX_dmamap((vm_offset_t)(va)) 106 #endif /* __alpha__ */ 107 108 109 #include "opt_bdg.h" 110 #ifdef BRIDGE 111 #include <net/if_types.h> 112 #include <net/bridge.h> 113 #endif 114 115 /* 116 * NOTE! On the Alpha, we have an alignment constraint. The 117 * card DMAs the packet immediately following the RFA. However, 118 * the first thing in the packet is a 14-byte Ethernet header. 119 * This means that the packet is misaligned. To compensate, 120 * we actually offset the RFA 2 bytes into the cluster. This 121 * alignes the packet after the Ethernet header at a 32-bit 122 * boundary. HOWEVER! This means that the RFA is misaligned! 123 */ 124 #define RFA_ALIGNMENT_FUDGE 2 125 126 /* 127 * Inline function to copy a 16-bit aligned 32-bit quantity. 128 */ 129 static __inline void fxp_lwcopy __P((volatile u_int32_t *, 130 volatile u_int32_t *)); 131 static __inline void 132 fxp_lwcopy(src, dst) 133 volatile u_int32_t *src, *dst; 134 { 135 volatile u_int16_t *a = (volatile u_int16_t *)src; 136 volatile u_int16_t *b = (volatile u_int16_t *)dst; 137 138 b[0] = a[0]; 139 b[1] = a[1]; 140 } 141 142 /* 143 * Template for default configuration parameters. 144 * See struct fxp_cb_config for the bit definitions. 145 */ 146 static u_char fxp_cb_config_template[] = { 147 0x0, 0x0, /* cb_status */ 148 0x80, 0x2, /* cb_command */ 149 0xff, 0xff, 0xff, 0xff, /* link_addr */ 150 0x16, /* 0 */ 151 0x8, /* 1 */ 152 0x0, /* 2 */ 153 0x0, /* 3 */ 154 0x0, /* 4 */ 155 0x80, /* 5 */ 156 0xb2, /* 6 */ 157 0x3, /* 7 */ 158 0x1, /* 8 */ 159 0x0, /* 9 */ 160 0x26, /* 10 */ 161 0x0, /* 11 */ 162 0x60, /* 12 */ 163 0x0, /* 13 */ 164 0xf2, /* 14 */ 165 0x48, /* 15 */ 166 0x0, /* 16 */ 167 0x40, /* 17 */ 168 0xf3, /* 18 */ 169 0x0, /* 19 */ 170 0x3f, /* 20 */ 171 0x5 /* 21 */ 172 }; 173 174 /* Supported media types. */ 175 struct fxp_supported_media { 176 const int fsm_phy; /* PHY type */ 177 const int *fsm_media; /* the media array */ 178 const int fsm_nmedia; /* the number of supported media */ 179 const int fsm_defmedia; /* default media for this PHY */ 180 }; 181 182 static const int fxp_media_standard[] = { 183 IFM_ETHER|IFM_10_T, 184 IFM_ETHER|IFM_10_T|IFM_FDX, 185 IFM_ETHER|IFM_100_TX, 186 IFM_ETHER|IFM_100_TX|IFM_FDX, 187 IFM_ETHER|IFM_AUTO, 188 }; 189 #define FXP_MEDIA_STANDARD_DEFMEDIA (IFM_ETHER|IFM_AUTO) 190 191 static const int fxp_media_default[] = { 192 IFM_ETHER|IFM_MANUAL, /* XXX IFM_AUTO ? */ 193 }; 194 #define FXP_MEDIA_DEFAULT_DEFMEDIA (IFM_ETHER|IFM_MANUAL) 195 196 static const struct fxp_supported_media fxp_media[] = { 197 { FXP_PHY_DP83840, fxp_media_standard, 198 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 199 FXP_MEDIA_STANDARD_DEFMEDIA }, 200 { FXP_PHY_DP83840A, fxp_media_standard, 201 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 202 FXP_MEDIA_STANDARD_DEFMEDIA }, 203 { FXP_PHY_82553A, fxp_media_standard, 204 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 205 FXP_MEDIA_STANDARD_DEFMEDIA }, 206 { FXP_PHY_82553C, fxp_media_standard, 207 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 208 FXP_MEDIA_STANDARD_DEFMEDIA }, 209 { FXP_PHY_82555, fxp_media_standard, 210 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 211 FXP_MEDIA_STANDARD_DEFMEDIA }, 212 { FXP_PHY_82555B, fxp_media_standard, 213 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 214 FXP_MEDIA_STANDARD_DEFMEDIA }, 215 { FXP_PHY_80C24, fxp_media_default, 216 sizeof(fxp_media_default) / sizeof(fxp_media_default[0]), 217 FXP_MEDIA_DEFAULT_DEFMEDIA }, 218 }; 219 #define NFXPMEDIA (sizeof(fxp_media) / sizeof(fxp_media[0])) 220 221 static int fxp_mediachange __P((struct ifnet *)); 222 static void fxp_mediastatus __P((struct ifnet *, struct ifmediareq *)); 223 static void fxp_set_media __P((struct fxp_softc *, int)); 224 static __inline void fxp_scb_wait __P((struct fxp_softc *)); 225 static FXP_INTR_TYPE fxp_intr __P((void *)); 226 static void fxp_start __P((struct ifnet *)); 227 static int fxp_ioctl __P((struct ifnet *, 228 FXP_IOCTLCMD_TYPE, caddr_t)); 229 static void fxp_init __P((void *)); 230 static void fxp_stop __P((struct fxp_softc *)); 231 static void fxp_watchdog __P((struct ifnet *)); 232 static int fxp_add_rfabuf __P((struct fxp_softc *, struct mbuf *)); 233 static int fxp_mdi_read __P((struct fxp_softc *, int, int)); 234 static void fxp_mdi_write __P((struct fxp_softc *, int, int, int)); 235 static void fxp_read_eeprom __P((struct fxp_softc *, u_int16_t *, 236 int, int)); 237 static int fxp_attach_common __P((struct fxp_softc *, u_int8_t *)); 238 static void fxp_stats_update __P((void *)); 239 static void fxp_mc_setup __P((struct fxp_softc *)); 240 241 /* 242 * Set initial transmit threshold at 64 (512 bytes). This is 243 * increased by 64 (512 bytes) at a time, to maximum of 192 244 * (1536 bytes), if an underrun occurs. 245 */ 246 static int tx_threshold = 64; 247 248 /* 249 * Number of transmit control blocks. This determines the number 250 * of transmit buffers that can be chained in the CB list. 251 * This must be a power of two. 252 */ 253 #define FXP_NTXCB 128 254 255 /* 256 * Number of completed TX commands at which point an interrupt 257 * will be generated to garbage collect the attached buffers. 258 * Must be at least one less than FXP_NTXCB, and should be 259 * enough less so that the transmitter doesn't becomes idle 260 * during the buffer rundown (which would reduce performance). 261 */ 262 #define FXP_CXINT_THRESH 120 263 264 /* 265 * TxCB list index mask. This is used to do list wrap-around. 266 */ 267 #define FXP_TXCB_MASK (FXP_NTXCB - 1) 268 269 /* 270 * Number of receive frame area buffers. These are large so chose 271 * wisely. 272 */ 273 #define FXP_NRFABUFS 64 274 275 /* 276 * Maximum number of seconds that the receiver can be idle before we 277 * assume it's dead and attempt to reset it by reprogramming the 278 * multicast filter. This is part of a work-around for a bug in the 279 * NIC. See fxp_stats_update(). 280 */ 281 #define FXP_MAX_RX_IDLE 15 282 283 /* 284 * Wait for the previous command to be accepted (but not necessarily 285 * completed). 286 */ 287 static __inline void 288 fxp_scb_wait(sc) 289 struct fxp_softc *sc; 290 { 291 int i = 10000; 292 293 while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i); 294 } 295 296 /************************************************************* 297 * Operating system-specific autoconfiguration glue 298 *************************************************************/ 299 300 #if defined(__NetBSD__) 301 302 #ifdef __BROKEN_INDIRECT_CONFIG 303 static int fxp_match __P((struct device *, void *, void *)); 304 #else 305 static int fxp_match __P((struct device *, struct cfdata *, void *)); 306 #endif 307 static void fxp_attach __P((struct device *, struct device *, void *)); 308 309 static void fxp_shutdown __P((void *)); 310 311 /* Compensate for lack of a generic ether_ioctl() */ 312 static int fxp_ether_ioctl __P((struct ifnet *, 313 FXP_IOCTLCMD_TYPE, caddr_t)); 314 #define ether_ioctl fxp_ether_ioctl 315 316 struct cfattach fxp_ca = { 317 sizeof(struct fxp_softc), fxp_match, fxp_attach 318 }; 319 320 struct cfdriver fxp_cd = { 321 NULL, "fxp", DV_IFNET 322 }; 323 324 /* 325 * Check if a device is an 82557. 326 */ 327 static int 328 fxp_match(parent, match, aux) 329 struct device *parent; 330 #ifdef __BROKEN_INDIRECT_CONFIG 331 void *match; 332 #else 333 struct cfdata *match; 334 #endif 335 void *aux; 336 { 337 struct pci_attach_args *pa = aux; 338 339 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL) 340 return (0); 341 342 switch (PCI_PRODUCT(pa->pa_id)) { 343 case PCI_PRODUCT_INTEL_82557: 344 return (1); 345 } 346 347 return (0); 348 } 349 350 static void 351 fxp_attach(parent, self, aux) 352 struct device *parent, *self; 353 void *aux; 354 { 355 struct fxp_softc *sc = (struct fxp_softc *)self; 356 struct pci_attach_args *pa = aux; 357 pci_chipset_tag_t pc = pa->pa_pc; 358 pci_intr_handle_t ih; 359 const char *intrstr = NULL; 360 u_int8_t enaddr[6]; 361 struct ifnet *ifp; 362 363 /* 364 * Map control/status registers. 365 */ 366 if (pci_mapreg_map(pa, FXP_PCI_MMBA, PCI_MAPREG_TYPE_MEM, 0, 367 &sc->sc_st, &sc->sc_sh, NULL, NULL)) { 368 printf(": can't map registers\n"); 369 return; 370 } 371 printf(": Intel EtherExpress Pro 10/100B Ethernet\n"); 372 373 /* 374 * Allocate our interrupt. 375 */ 376 if (pci_intr_map(pc, pa->pa_intrtag, pa->pa_intrpin, 377 pa->pa_intrline, &ih)) { 378 printf("%s: couldn't map interrupt\n", sc->sc_dev.dv_xname); 379 return; 380 } 381 intrstr = pci_intr_string(pc, ih); 382 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, fxp_intr, sc); 383 if (sc->sc_ih == NULL) { 384 printf("%s: couldn't establish interrupt", 385 sc->sc_dev.dv_xname); 386 if (intrstr != NULL) 387 printf(" at %s", intrstr); 388 printf("\n"); 389 return; 390 } 391 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr); 392 393 /* Do generic parts of attach. */ 394 if (fxp_attach_common(sc, enaddr)) { 395 /* Failed! */ 396 return; 397 } 398 399 printf("%s: Ethernet address %s%s\n", sc->sc_dev.dv_xname, 400 ether_sprintf(enaddr), sc->phy_10Mbps_only ? ", 10Mbps" : ""); 401 402 ifp = &sc->sc_ethercom.ec_if; 403 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 404 ifp->if_softc = sc; 405 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 406 ifp->if_ioctl = fxp_ioctl; 407 ifp->if_start = fxp_start; 408 ifp->if_watchdog = fxp_watchdog; 409 410 /* 411 * Attach the interface. 412 */ 413 if_attach(ifp); 414 /* 415 * Let the system queue as many packets as we have available 416 * TX descriptors. 417 */ 418 ifp->if_snd.ifq_maxlen = FXP_NTXCB - 1; 419 ether_ifattach(ifp, enaddr); 420 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB, 421 sizeof(struct ether_header)); 422 423 /* 424 * Add shutdown hook so that DMA is disabled prior to reboot. Not 425 * doing do could allow DMA to corrupt kernel memory during the 426 * reboot before the driver initializes. 427 */ 428 shutdownhook_establish(fxp_shutdown, sc); 429 } 430 431 /* 432 * Device shutdown routine. Called at system shutdown after sync. The 433 * main purpose of this routine is to shut off receiver DMA so that 434 * kernel memory doesn't get clobbered during warmboot. 435 */ 436 static void 437 fxp_shutdown(sc) 438 void *sc; 439 { 440 fxp_stop((struct fxp_softc *) sc); 441 } 442 443 static int 444 fxp_ether_ioctl(ifp, cmd, data) 445 struct ifnet *ifp; 446 FXP_IOCTLCMD_TYPE cmd; 447 caddr_t data; 448 { 449 struct ifaddr *ifa = (struct ifaddr *) data; 450 struct fxp_softc *sc = ifp->if_softc; 451 452 switch (cmd) { 453 case SIOCSIFADDR: 454 ifp->if_flags |= IFF_UP; 455 456 switch (ifa->ifa_addr->sa_family) { 457 #ifdef INET 458 case AF_INET: 459 fxp_init(sc); 460 arp_ifinit(ifp, ifa); 461 break; 462 #endif 463 #ifdef NS 464 case AF_NS: 465 { 466 register struct ns_addr *ina = &IA_SNS(ifa)->sns_addr; 467 468 if (ns_nullhost(*ina)) 469 ina->x_host = *(union ns_host *) 470 LLADDR(ifp->if_sadl); 471 else 472 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl), 473 ifp->if_addrlen); 474 /* Set new address. */ 475 fxp_init(sc); 476 break; 477 } 478 #endif 479 default: 480 fxp_init(sc); 481 break; 482 } 483 break; 484 485 default: 486 return (EINVAL); 487 } 488 489 return (0); 490 } 491 492 #else /* __FreeBSD__ */ 493 494 /* 495 * Return identification string if this is device is ours. 496 */ 497 static int 498 fxp_probe(device_t dev) 499 { 500 if ((pci_get_vendor(dev) == FXP_VENDORID_INTEL) && 501 (pci_get_device(dev) == FXP_DEVICEID_i82557)) { 502 device_set_desc(dev, "Intel EtherExpress Pro 10/100B Ethernet"); 503 return 0; 504 } 505 if ((pci_get_vendor(dev) == FXP_VENDORID_INTEL) && 506 (pci_get_device(dev) == FXP_DEVICEID_i82559)) { 507 device_set_desc(dev, "Intel InBusiness 10/100 Ethernet"); 508 return 0; 509 } 510 511 return ENXIO; 512 } 513 514 static int 515 fxp_attach(device_t dev) 516 { 517 int error = 0; 518 struct fxp_softc *sc = device_get_softc(dev); 519 struct ifnet *ifp; 520 int s; 521 u_long val; 522 int rid; 523 524 callout_handle_init(&sc->stat_ch); 525 526 s = splimp(); 527 528 /* 529 * Enable bus mastering. 530 */ 531 val = pci_read_config(dev, PCIR_COMMAND, 2); 532 val |= (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); 533 pci_write_config(dev, PCIR_COMMAND, val, 2); 534 535 /* 536 * Map control/status registers. 537 */ 538 rid = FXP_PCI_MMBA; 539 sc->mem = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 540 0, ~0, 1, RF_ACTIVE); 541 if (!sc->mem) { 542 device_printf(dev, "could not map memory\n"); 543 error = ENXIO; 544 goto fail; 545 } 546 547 sc->sc_st = rman_get_bustag(sc->mem); 548 sc->sc_sh = rman_get_bushandle(sc->mem); 549 550 /* 551 * Allocate our interrupt. 552 */ 553 rid = 0; 554 sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 555 RF_SHAREABLE | RF_ACTIVE); 556 if (sc->irq == NULL) { 557 device_printf(dev, "could not map interrupt\n"); 558 error = ENXIO; 559 goto fail; 560 } 561 562 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET, 563 fxp_intr, sc, &sc->ih); 564 if (error) { 565 device_printf(dev, "could not setup irq\n"); 566 goto fail; 567 } 568 569 /* Do generic parts of attach. */ 570 if (fxp_attach_common(sc, sc->arpcom.ac_enaddr)) { 571 /* Failed! */ 572 bus_teardown_intr(dev, sc->irq, sc->ih); 573 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); 574 bus_release_resource(dev, SYS_RES_MEMORY, FXP_PCI_MMBA, sc->mem); 575 error = ENXIO; 576 goto fail; 577 } 578 579 device_printf(dev, "Ethernet address %6D%s\n", 580 sc->arpcom.ac_enaddr, ":", sc->phy_10Mbps_only ? ", 10Mbps" : ""); 581 582 ifp = &sc->arpcom.ac_if; 583 ifp->if_unit = device_get_unit(dev); 584 ifp->if_name = "fxp"; 585 ifp->if_output = ether_output; 586 ifp->if_baudrate = 100000000; 587 ifp->if_init = fxp_init; 588 ifp->if_softc = sc; 589 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 590 ifp->if_ioctl = fxp_ioctl; 591 ifp->if_start = fxp_start; 592 ifp->if_watchdog = fxp_watchdog; 593 594 /* 595 * Attach the interface. 596 */ 597 if_attach(ifp); 598 /* 599 * Let the system queue as many packets as we have available 600 * TX descriptors. 601 */ 602 ifp->if_snd.ifq_maxlen = FXP_NTXCB - 1; 603 ether_ifattach(ifp); 604 bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header)); 605 606 splx(s); 607 return 0; 608 609 fail: 610 splx(s); 611 return error; 612 } 613 614 /* 615 * Detach interface. 616 */ 617 static int 618 fxp_detach(device_t dev) 619 { 620 struct fxp_softc *sc = device_get_softc(dev); 621 int s; 622 623 s = splimp(); 624 625 /* 626 * Close down routes etc. 627 */ 628 if_detach(&sc->arpcom.ac_if); 629 630 /* 631 * Stop DMA and drop transmit queue. 632 */ 633 fxp_stop(sc); 634 635 /* 636 * Deallocate resources. 637 */ 638 bus_teardown_intr(dev, sc->irq, sc->ih); 639 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); 640 bus_release_resource(dev, SYS_RES_MEMORY, FXP_PCI_MMBA, sc->mem); 641 642 /* 643 * Free all the receive buffers. 644 */ 645 if (sc->rfa_headm != NULL) 646 m_freem(sc->rfa_headm); 647 648 /* 649 * Free all media structures. 650 */ 651 ifmedia_removeall(&sc->sc_media); 652 653 /* 654 * Free anciliary structures. 655 */ 656 free(sc->cbl_base, M_DEVBUF); 657 free(sc->fxp_stats, M_DEVBUF); 658 free(sc->mcsp, M_DEVBUF); 659 660 splx(s); 661 662 return 0; 663 } 664 665 /* 666 * Device shutdown routine. Called at system shutdown after sync. The 667 * main purpose of this routine is to shut off receiver DMA so that 668 * kernel memory doesn't get clobbered during warmboot. 669 */ 670 static int 671 fxp_shutdown(device_t dev) 672 { 673 /* 674 * Make sure that DMA is disabled prior to reboot. Not doing 675 * do could allow DMA to corrupt kernel memory during the 676 * reboot before the driver initializes. 677 */ 678 fxp_stop((struct fxp_softc *) device_get_softc(dev)); 679 return 0; 680 } 681 682 static device_method_t fxp_methods[] = { 683 /* Device interface */ 684 DEVMETHOD(device_probe, fxp_probe), 685 DEVMETHOD(device_attach, fxp_attach), 686 DEVMETHOD(device_detach, fxp_detach), 687 DEVMETHOD(device_shutdown, fxp_shutdown), 688 689 { 0, 0 } 690 }; 691 692 static driver_t fxp_driver = { 693 "fxp", 694 fxp_methods, 695 sizeof(struct fxp_softc), 696 }; 697 698 static devclass_t fxp_devclass; 699 700 DRIVER_MODULE(if_fxp, pci, fxp_driver, fxp_devclass, 0, 0); 701 702 #endif /* __NetBSD__ */ 703 704 /************************************************************* 705 * End of operating system-specific autoconfiguration glue 706 *************************************************************/ 707 708 /* 709 * Do generic parts of attach. 710 */ 711 static int 712 fxp_attach_common(sc, enaddr) 713 struct fxp_softc *sc; 714 u_int8_t *enaddr; 715 { 716 u_int16_t data; 717 int i, nmedia, defmedia; 718 const int *media; 719 720 /* 721 * Reset to a stable state. 722 */ 723 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); 724 DELAY(10); 725 726 sc->cbl_base = malloc(sizeof(struct fxp_cb_tx) * FXP_NTXCB, 727 M_DEVBUF, M_NOWAIT); 728 if (sc->cbl_base == NULL) 729 goto fail; 730 bzero(sc->cbl_base, sizeof(struct fxp_cb_tx) * FXP_NTXCB); 731 732 sc->fxp_stats = malloc(sizeof(struct fxp_stats), M_DEVBUF, M_NOWAIT); 733 if (sc->fxp_stats == NULL) 734 goto fail; 735 bzero(sc->fxp_stats, sizeof(struct fxp_stats)); 736 737 sc->mcsp = malloc(sizeof(struct fxp_cb_mcs), M_DEVBUF, M_NOWAIT); 738 if (sc->mcsp == NULL) 739 goto fail; 740 741 /* 742 * Pre-allocate our receive buffers. 743 */ 744 for (i = 0; i < FXP_NRFABUFS; i++) { 745 if (fxp_add_rfabuf(sc, NULL) != 0) { 746 goto fail; 747 } 748 } 749 750 /* 751 * Get info about the primary PHY 752 */ 753 fxp_read_eeprom(sc, (u_int16_t *)&data, 6, 1); 754 sc->phy_primary_addr = data & 0xff; 755 sc->phy_primary_device = (data >> 8) & 0x3f; 756 sc->phy_10Mbps_only = data >> 15; 757 758 /* 759 * Read MAC address. 760 */ 761 fxp_read_eeprom(sc, (u_int16_t *)enaddr, 0, 3); 762 763 /* 764 * Initialize the media structures. 765 */ 766 767 media = fxp_media_default; 768 nmedia = sizeof(fxp_media_default) / sizeof(fxp_media_default[0]); 769 defmedia = FXP_MEDIA_DEFAULT_DEFMEDIA; 770 771 for (i = 0; i < NFXPMEDIA; i++) { 772 if (sc->phy_primary_device == fxp_media[i].fsm_phy) { 773 media = fxp_media[i].fsm_media; 774 nmedia = fxp_media[i].fsm_nmedia; 775 defmedia = fxp_media[i].fsm_defmedia; 776 } 777 } 778 779 ifmedia_init(&sc->sc_media, 0, fxp_mediachange, fxp_mediastatus); 780 for (i = 0; i < nmedia; i++) { 781 if (IFM_SUBTYPE(media[i]) == IFM_100_TX && sc->phy_10Mbps_only) 782 continue; 783 ifmedia_add(&sc->sc_media, media[i], 0, NULL); 784 } 785 ifmedia_set(&sc->sc_media, defmedia); 786 787 return (0); 788 789 fail: 790 printf(FXP_FORMAT ": Failed to malloc memory\n", FXP_ARGS(sc)); 791 if (sc->cbl_base) 792 free(sc->cbl_base, M_DEVBUF); 793 if (sc->fxp_stats) 794 free(sc->fxp_stats, M_DEVBUF); 795 if (sc->mcsp) 796 free(sc->mcsp, M_DEVBUF); 797 /* frees entire chain */ 798 if (sc->rfa_headm) 799 m_freem(sc->rfa_headm); 800 801 return (ENOMEM); 802 } 803 804 /* 805 * Read from the serial EEPROM. Basically, you manually shift in 806 * the read opcode (one bit at a time) and then shift in the address, 807 * and then you shift out the data (all of this one bit at a time). 808 * The word size is 16 bits, so you have to provide the address for 809 * every 16 bits of data. 810 */ 811 static void 812 fxp_read_eeprom(sc, data, offset, words) 813 struct fxp_softc *sc; 814 u_short *data; 815 int offset; 816 int words; 817 { 818 u_int16_t reg; 819 int i, x; 820 821 for (i = 0; i < words; i++) { 822 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 823 /* 824 * Shift in read opcode. 825 */ 826 for (x = 3; x > 0; x--) { 827 if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) { 828 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 829 } else { 830 reg = FXP_EEPROM_EECS; 831 } 832 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 833 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 834 reg | FXP_EEPROM_EESK); 835 DELAY(1); 836 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 837 DELAY(1); 838 } 839 /* 840 * Shift in address. 841 */ 842 for (x = 6; x > 0; x--) { 843 if ((i + offset) & (1 << (x - 1))) { 844 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 845 } else { 846 reg = FXP_EEPROM_EECS; 847 } 848 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 849 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 850 reg | FXP_EEPROM_EESK); 851 DELAY(1); 852 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 853 DELAY(1); 854 } 855 reg = FXP_EEPROM_EECS; 856 data[i] = 0; 857 /* 858 * Shift out data. 859 */ 860 for (x = 16; x > 0; x--) { 861 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 862 reg | FXP_EEPROM_EESK); 863 DELAY(1); 864 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & 865 FXP_EEPROM_EEDO) 866 data[i] |= (1 << (x - 1)); 867 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 868 DELAY(1); 869 } 870 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 871 DELAY(1); 872 } 873 } 874 875 /* 876 * Start packet transmission on the interface. 877 */ 878 static void 879 fxp_start(ifp) 880 struct ifnet *ifp; 881 { 882 struct fxp_softc *sc = ifp->if_softc; 883 struct fxp_cb_tx *txp; 884 885 /* 886 * See if we need to suspend xmit until the multicast filter 887 * has been reprogrammed (which can only be done at the head 888 * of the command chain). 889 */ 890 if (sc->need_mcsetup) 891 return; 892 893 txp = NULL; 894 895 /* 896 * We're finished if there is nothing more to add to the list or if 897 * we're all filled up with buffers to transmit. 898 * NOTE: One TxCB is reserved to guarantee that fxp_mc_setup() can add 899 * a NOP command when needed. 900 */ 901 while (ifp->if_snd.ifq_head != NULL && sc->tx_queued < FXP_NTXCB - 1) { 902 struct mbuf *m, *mb_head; 903 int segment; 904 905 /* 906 * Grab a packet to transmit. 907 */ 908 IF_DEQUEUE(&ifp->if_snd, mb_head); 909 910 /* 911 * Get pointer to next available tx desc. 912 */ 913 txp = sc->cbl_last->next; 914 915 /* 916 * Go through each of the mbufs in the chain and initialize 917 * the transmit buffer descriptors with the physical address 918 * and size of the mbuf. 919 */ 920 tbdinit: 921 for (m = mb_head, segment = 0; m != NULL; m = m->m_next) { 922 if (m->m_len != 0) { 923 if (segment == FXP_NTXSEG) 924 break; 925 txp->tbd[segment].tb_addr = 926 vtophys(mtod(m, vm_offset_t)); 927 txp->tbd[segment].tb_size = m->m_len; 928 segment++; 929 } 930 } 931 if (m != NULL) { 932 struct mbuf *mn; 933 934 /* 935 * We ran out of segments. We have to recopy this mbuf 936 * chain first. Bail out if we can't get the new buffers. 937 */ 938 MGETHDR(mn, M_DONTWAIT, MT_DATA); 939 if (mn == NULL) { 940 m_freem(mb_head); 941 break; 942 } 943 if (mb_head->m_pkthdr.len > MHLEN) { 944 MCLGET(mn, M_DONTWAIT); 945 if ((mn->m_flags & M_EXT) == 0) { 946 m_freem(mn); 947 m_freem(mb_head); 948 break; 949 } 950 } 951 m_copydata(mb_head, 0, mb_head->m_pkthdr.len, 952 mtod(mn, caddr_t)); 953 mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len; 954 m_freem(mb_head); 955 mb_head = mn; 956 goto tbdinit; 957 } 958 959 txp->tbd_number = segment; 960 txp->mb_head = mb_head; 961 txp->cb_status = 0; 962 if (sc->tx_queued != FXP_CXINT_THRESH - 1) { 963 txp->cb_command = 964 FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF | FXP_CB_COMMAND_S; 965 } else { 966 txp->cb_command = 967 FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I; 968 /* 969 * Set a 5 second timer just in case we don't hear from the 970 * card again. 971 */ 972 ifp->if_timer = 5; 973 } 974 txp->tx_threshold = tx_threshold; 975 976 /* 977 * Advance the end of list forward. 978 */ 979 sc->cbl_last->cb_command &= ~FXP_CB_COMMAND_S; 980 sc->cbl_last = txp; 981 982 /* 983 * Advance the beginning of the list forward if there are 984 * no other packets queued (when nothing is queued, cbl_first 985 * sits on the last TxCB that was sent out). 986 */ 987 if (sc->tx_queued == 0) 988 sc->cbl_first = txp; 989 990 sc->tx_queued++; 991 992 /* 993 * Pass packet to bpf if there is a listener. 994 */ 995 if (ifp->if_bpf) 996 bpf_mtap(FXP_BPFTAP_ARG(ifp), mb_head); 997 } 998 999 /* 1000 * We're finished. If we added to the list, issue a RESUME to get DMA 1001 * going again if suspended. 1002 */ 1003 if (txp != NULL) { 1004 fxp_scb_wait(sc); 1005 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_RESUME); 1006 } 1007 } 1008 1009 /* 1010 * Process interface interrupts. 1011 */ 1012 static FXP_INTR_TYPE 1013 fxp_intr(arg) 1014 void *arg; 1015 { 1016 struct fxp_softc *sc = arg; 1017 struct ifnet *ifp = &sc->sc_if; 1018 u_int8_t statack; 1019 #if defined(__NetBSD__) 1020 int claimed = 0; 1021 #endif 1022 1023 while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) { 1024 #if defined(__NetBSD__) 1025 claimed = 1; 1026 #endif 1027 /* 1028 * First ACK all the interrupts in this pass. 1029 */ 1030 CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack); 1031 1032 /* 1033 * Free any finished transmit mbuf chains. 1034 */ 1035 if (statack & FXP_SCB_STATACK_CXTNO) { 1036 struct fxp_cb_tx *txp; 1037 1038 for (txp = sc->cbl_first; sc->tx_queued && 1039 (txp->cb_status & FXP_CB_STATUS_C) != 0; 1040 txp = txp->next) { 1041 if (txp->mb_head != NULL) { 1042 m_freem(txp->mb_head); 1043 txp->mb_head = NULL; 1044 } 1045 sc->tx_queued--; 1046 } 1047 sc->cbl_first = txp; 1048 ifp->if_timer = 0; 1049 if (sc->tx_queued == 0) { 1050 if (sc->need_mcsetup) 1051 fxp_mc_setup(sc); 1052 } 1053 /* 1054 * Try to start more packets transmitting. 1055 */ 1056 if (ifp->if_snd.ifq_head != NULL) 1057 fxp_start(ifp); 1058 } 1059 /* 1060 * Process receiver interrupts. If a no-resource (RNR) 1061 * condition exists, get whatever packets we can and 1062 * re-start the receiver. 1063 */ 1064 if (statack & (FXP_SCB_STATACK_FR | FXP_SCB_STATACK_RNR)) { 1065 struct mbuf *m; 1066 struct fxp_rfa *rfa; 1067 rcvloop: 1068 m = sc->rfa_headm; 1069 rfa = (struct fxp_rfa *)(m->m_ext.ext_buf + 1070 RFA_ALIGNMENT_FUDGE); 1071 1072 if (rfa->rfa_status & FXP_RFA_STATUS_C) { 1073 /* 1074 * Remove first packet from the chain. 1075 */ 1076 sc->rfa_headm = m->m_next; 1077 m->m_next = NULL; 1078 1079 /* 1080 * Add a new buffer to the receive chain. 1081 * If this fails, the old buffer is recycled 1082 * instead. 1083 */ 1084 if (fxp_add_rfabuf(sc, m) == 0) { 1085 struct ether_header *eh; 1086 u_int16_t total_len; 1087 1088 total_len = rfa->actual_size & 1089 (MCLBYTES - 1); 1090 if (total_len < 1091 sizeof(struct ether_header)) { 1092 m_freem(m); 1093 goto rcvloop; 1094 } 1095 m->m_pkthdr.rcvif = ifp; 1096 m->m_pkthdr.len = m->m_len = 1097 total_len ; 1098 eh = mtod(m, struct ether_header *); 1099 if (ifp->if_bpf) 1100 bpf_tap(FXP_BPFTAP_ARG(ifp), 1101 mtod(m, caddr_t), 1102 total_len); 1103 #ifdef BRIDGE 1104 if (do_bridge) { 1105 struct ifnet *bdg_ifp ; 1106 bdg_ifp = bridge_in(m); 1107 if (bdg_ifp == BDG_DROP) 1108 goto dropit ; 1109 if (bdg_ifp != BDG_LOCAL) 1110 bdg_forward(&m, bdg_ifp); 1111 if (bdg_ifp != BDG_LOCAL && 1112 bdg_ifp != BDG_BCAST && 1113 bdg_ifp != BDG_MCAST) 1114 goto dropit ; 1115 goto getit ; 1116 } 1117 #endif 1118 /* 1119 * Only pass this packet up 1120 * if it is for us. 1121 */ 1122 if ((ifp->if_flags & 1123 IFF_PROMISC) && 1124 (rfa->rfa_status & 1125 FXP_RFA_STATUS_IAMATCH) && 1126 (eh->ether_dhost[0] & 1) 1127 == 0) { 1128 #ifdef BRIDGE 1129 dropit: 1130 #endif 1131 if (m) 1132 m_freem(m); 1133 goto rcvloop; 1134 } 1135 #ifdef BRIDGE 1136 getit: 1137 #endif 1138 m->m_data += 1139 sizeof(struct ether_header); 1140 m->m_len -= 1141 sizeof(struct ether_header); 1142 m->m_pkthdr.len = m->m_len ; 1143 ether_input(ifp, eh, m); 1144 } 1145 goto rcvloop; 1146 } 1147 if (statack & FXP_SCB_STATACK_RNR) { 1148 fxp_scb_wait(sc); 1149 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 1150 vtophys(sc->rfa_headm->m_ext.ext_buf) + 1151 RFA_ALIGNMENT_FUDGE); 1152 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, 1153 FXP_SCB_COMMAND_RU_START); 1154 } 1155 } 1156 } 1157 #if defined(__NetBSD__) 1158 return (claimed); 1159 #endif 1160 } 1161 1162 /* 1163 * Update packet in/out/collision statistics. The i82557 doesn't 1164 * allow you to access these counters without doing a fairly 1165 * expensive DMA to get _all_ of the statistics it maintains, so 1166 * we do this operation here only once per second. The statistics 1167 * counters in the kernel are updated from the previous dump-stats 1168 * DMA and then a new dump-stats DMA is started. The on-chip 1169 * counters are zeroed when the DMA completes. If we can't start 1170 * the DMA immediately, we don't wait - we just prepare to read 1171 * them again next time. 1172 */ 1173 static void 1174 fxp_stats_update(arg) 1175 void *arg; 1176 { 1177 struct fxp_softc *sc = arg; 1178 struct ifnet *ifp = &sc->sc_if; 1179 struct fxp_stats *sp = sc->fxp_stats; 1180 struct fxp_cb_tx *txp; 1181 int s; 1182 1183 ifp->if_opackets += sp->tx_good; 1184 ifp->if_collisions += sp->tx_total_collisions; 1185 if (sp->rx_good) { 1186 ifp->if_ipackets += sp->rx_good; 1187 sc->rx_idle_secs = 0; 1188 } else { 1189 /* 1190 * Receiver's been idle for another second. 1191 */ 1192 sc->rx_idle_secs++; 1193 } 1194 ifp->if_ierrors += 1195 sp->rx_crc_errors + 1196 sp->rx_alignment_errors + 1197 sp->rx_rnr_errors + 1198 sp->rx_overrun_errors; 1199 /* 1200 * If any transmit underruns occured, bump up the transmit 1201 * threshold by another 512 bytes (64 * 8). 1202 */ 1203 if (sp->tx_underruns) { 1204 ifp->if_oerrors += sp->tx_underruns; 1205 if (tx_threshold < 192) 1206 tx_threshold += 64; 1207 } 1208 s = splimp(); 1209 /* 1210 * Release any xmit buffers that have completed DMA. This isn't 1211 * strictly necessary to do here, but it's advantagous for mbufs 1212 * with external storage to be released in a timely manner rather 1213 * than being defered for a potentially long time. This limits 1214 * the delay to a maximum of one second. 1215 */ 1216 for (txp = sc->cbl_first; sc->tx_queued && 1217 (txp->cb_status & FXP_CB_STATUS_C) != 0; 1218 txp = txp->next) { 1219 if (txp->mb_head != NULL) { 1220 m_freem(txp->mb_head); 1221 txp->mb_head = NULL; 1222 } 1223 sc->tx_queued--; 1224 } 1225 sc->cbl_first = txp; 1226 /* 1227 * If we haven't received any packets in FXP_MAC_RX_IDLE seconds, 1228 * then assume the receiver has locked up and attempt to clear 1229 * the condition by reprogramming the multicast filter. This is 1230 * a work-around for a bug in the 82557 where the receiver locks 1231 * up if it gets certain types of garbage in the syncronization 1232 * bits prior to the packet header. This bug is supposed to only 1233 * occur in 10Mbps mode, but has been seen to occur in 100Mbps 1234 * mode as well (perhaps due to a 10/100 speed transition). 1235 */ 1236 if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) { 1237 sc->rx_idle_secs = 0; 1238 fxp_mc_setup(sc); 1239 } 1240 /* 1241 * If there is no pending command, start another stats 1242 * dump. Otherwise punt for now. 1243 */ 1244 if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) { 1245 /* 1246 * Start another stats dump. 1247 */ 1248 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, 1249 FXP_SCB_COMMAND_CU_DUMPRESET); 1250 } else { 1251 /* 1252 * A previous command is still waiting to be accepted. 1253 * Just zero our copy of the stats and wait for the 1254 * next timer event to update them. 1255 */ 1256 sp->tx_good = 0; 1257 sp->tx_underruns = 0; 1258 sp->tx_total_collisions = 0; 1259 1260 sp->rx_good = 0; 1261 sp->rx_crc_errors = 0; 1262 sp->rx_alignment_errors = 0; 1263 sp->rx_rnr_errors = 0; 1264 sp->rx_overrun_errors = 0; 1265 } 1266 splx(s); 1267 /* 1268 * Schedule another timeout one second from now. 1269 */ 1270 sc->stat_ch = timeout(fxp_stats_update, sc, hz); 1271 } 1272 1273 /* 1274 * Stop the interface. Cancels the statistics updater and resets 1275 * the interface. 1276 */ 1277 static void 1278 fxp_stop(sc) 1279 struct fxp_softc *sc; 1280 { 1281 struct ifnet *ifp = &sc->sc_if; 1282 struct fxp_cb_tx *txp; 1283 int i; 1284 1285 /* 1286 * Cancel stats updater. 1287 */ 1288 untimeout(fxp_stats_update, sc, sc->stat_ch); 1289 1290 /* 1291 * Issue software reset 1292 */ 1293 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); 1294 DELAY(10); 1295 1296 /* 1297 * Release any xmit buffers. 1298 */ 1299 txp = sc->cbl_base; 1300 if (txp != NULL) { 1301 for (i = 0; i < FXP_NTXCB; i++) { 1302 if (txp[i].mb_head != NULL) { 1303 m_freem(txp[i].mb_head); 1304 txp[i].mb_head = NULL; 1305 } 1306 } 1307 } 1308 sc->tx_queued = 0; 1309 1310 /* 1311 * Free all the receive buffers then reallocate/reinitialize 1312 */ 1313 if (sc->rfa_headm != NULL) 1314 m_freem(sc->rfa_headm); 1315 sc->rfa_headm = NULL; 1316 sc->rfa_tailm = NULL; 1317 for (i = 0; i < FXP_NRFABUFS; i++) { 1318 if (fxp_add_rfabuf(sc, NULL) != 0) { 1319 /* 1320 * This "can't happen" - we're at splimp() 1321 * and we just freed all the buffers we need 1322 * above. 1323 */ 1324 panic("fxp_stop: no buffers!"); 1325 } 1326 } 1327 1328 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1329 ifp->if_timer = 0; 1330 } 1331 1332 /* 1333 * Watchdog/transmission transmit timeout handler. Called when a 1334 * transmission is started on the interface, but no interrupt is 1335 * received before the timeout. This usually indicates that the 1336 * card has wedged for some reason. 1337 */ 1338 static void 1339 fxp_watchdog(ifp) 1340 struct ifnet *ifp; 1341 { 1342 struct fxp_softc *sc = ifp->if_softc; 1343 1344 printf(FXP_FORMAT ": device timeout\n", FXP_ARGS(sc)); 1345 ifp->if_oerrors++; 1346 1347 fxp_init(sc); 1348 } 1349 1350 static void 1351 fxp_init(xsc) 1352 void *xsc; 1353 { 1354 struct fxp_softc *sc = xsc; 1355 struct ifnet *ifp = &sc->sc_if; 1356 struct fxp_cb_config *cbp; 1357 struct fxp_cb_ias *cb_ias; 1358 struct fxp_cb_tx *txp; 1359 int i, s, prm; 1360 1361 s = splimp(); 1362 /* 1363 * Cancel any pending I/O 1364 */ 1365 fxp_stop(sc); 1366 1367 prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0; 1368 1369 /* 1370 * Initialize base of CBL and RFA memory. Loading with zero 1371 * sets it up for regular linear addressing. 1372 */ 1373 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0); 1374 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_BASE); 1375 1376 fxp_scb_wait(sc); 1377 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_RU_BASE); 1378 1379 /* 1380 * Initialize base of dump-stats buffer. 1381 */ 1382 fxp_scb_wait(sc); 1383 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(sc->fxp_stats)); 1384 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_DUMP_ADR); 1385 1386 /* 1387 * We temporarily use memory that contains the TxCB list to 1388 * construct the config CB. The TxCB list memory is rebuilt 1389 * later. 1390 */ 1391 cbp = (struct fxp_cb_config *) sc->cbl_base; 1392 1393 /* 1394 * This bcopy is kind of disgusting, but there are a bunch of must be 1395 * zero and must be one bits in this structure and this is the easiest 1396 * way to initialize them all to proper values. 1397 */ 1398 bcopy(fxp_cb_config_template, (volatile void *)&cbp->cb_status, 1399 sizeof(fxp_cb_config_template)); 1400 1401 cbp->cb_status = 0; 1402 cbp->cb_command = FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL; 1403 cbp->link_addr = -1; /* (no) next command */ 1404 cbp->byte_count = 22; /* (22) bytes to config */ 1405 cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */ 1406 cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */ 1407 cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */ 1408 cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */ 1409 cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */ 1410 cbp->dma_bce = 0; /* (disable) dma max counters */ 1411 cbp->late_scb = 0; /* (don't) defer SCB update */ 1412 cbp->tno_int = 0; /* (disable) tx not okay interrupt */ 1413 cbp->ci_int = 1; /* interrupt on CU idle */ 1414 cbp->save_bf = prm; /* save bad frames */ 1415 cbp->disc_short_rx = !prm; /* discard short packets */ 1416 cbp->underrun_retry = 1; /* retry mode (1) on DMA underrun */ 1417 cbp->mediatype = !sc->phy_10Mbps_only; /* interface mode */ 1418 cbp->nsai = 1; /* (don't) disable source addr insert */ 1419 cbp->preamble_length = 2; /* (7 byte) preamble */ 1420 cbp->loopback = 0; /* (don't) loopback */ 1421 cbp->linear_priority = 0; /* (normal CSMA/CD operation) */ 1422 cbp->linear_pri_mode = 0; /* (wait after xmit only) */ 1423 cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */ 1424 cbp->promiscuous = prm; /* promiscuous mode */ 1425 cbp->bcast_disable = 0; /* (don't) disable broadcasts */ 1426 cbp->crscdt = 0; /* (CRS only) */ 1427 cbp->stripping = !prm; /* truncate rx packet to byte count */ 1428 cbp->padding = 1; /* (do) pad short tx packets */ 1429 cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */ 1430 cbp->force_fdx = 0; /* (don't) force full duplex */ 1431 cbp->fdx_pin_en = 1; /* (enable) FDX# pin */ 1432 cbp->multi_ia = 0; /* (don't) accept multiple IAs */ 1433 cbp->mc_all = sc->all_mcasts;/* accept all multicasts */ 1434 1435 /* 1436 * Start the config command/DMA. 1437 */ 1438 fxp_scb_wait(sc); 1439 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(&cbp->cb_status)); 1440 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); 1441 /* ...and wait for it to complete. */ 1442 while (!(cbp->cb_status & FXP_CB_STATUS_C)); 1443 1444 /* 1445 * Now initialize the station address. Temporarily use the TxCB 1446 * memory area like we did above for the config CB. 1447 */ 1448 cb_ias = (struct fxp_cb_ias *) sc->cbl_base; 1449 cb_ias->cb_status = 0; 1450 cb_ias->cb_command = FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL; 1451 cb_ias->link_addr = -1; 1452 #if defined(__NetBSD__) 1453 bcopy(LLADDR(ifp->if_sadl), (void *)cb_ias->macaddr, 6); 1454 #else 1455 bcopy(sc->arpcom.ac_enaddr, (volatile void *)cb_ias->macaddr, 1456 sizeof(sc->arpcom.ac_enaddr)); 1457 #endif /* __NetBSD__ */ 1458 1459 /* 1460 * Start the IAS (Individual Address Setup) command/DMA. 1461 */ 1462 fxp_scb_wait(sc); 1463 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); 1464 /* ...and wait for it to complete. */ 1465 while (!(cb_ias->cb_status & FXP_CB_STATUS_C)); 1466 1467 /* 1468 * Initialize transmit control block (TxCB) list. 1469 */ 1470 1471 txp = sc->cbl_base; 1472 bzero(txp, sizeof(struct fxp_cb_tx) * FXP_NTXCB); 1473 for (i = 0; i < FXP_NTXCB; i++) { 1474 txp[i].cb_status = FXP_CB_STATUS_C | FXP_CB_STATUS_OK; 1475 txp[i].cb_command = FXP_CB_COMMAND_NOP; 1476 txp[i].link_addr = vtophys(&txp[(i + 1) & FXP_TXCB_MASK].cb_status); 1477 txp[i].tbd_array_addr = vtophys(&txp[i].tbd[0]); 1478 txp[i].next = &txp[(i + 1) & FXP_TXCB_MASK]; 1479 } 1480 /* 1481 * Set the suspend flag on the first TxCB and start the control 1482 * unit. It will execute the NOP and then suspend. 1483 */ 1484 txp->cb_command = FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S; 1485 sc->cbl_first = sc->cbl_last = txp; 1486 sc->tx_queued = 1; 1487 1488 fxp_scb_wait(sc); 1489 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); 1490 1491 /* 1492 * Initialize receiver buffer area - RFA. 1493 */ 1494 fxp_scb_wait(sc); 1495 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 1496 vtophys(sc->rfa_headm->m_ext.ext_buf) + RFA_ALIGNMENT_FUDGE); 1497 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_RU_START); 1498 1499 /* 1500 * Set current media. 1501 */ 1502 fxp_set_media(sc, sc->sc_media.ifm_cur->ifm_media); 1503 1504 ifp->if_flags |= IFF_RUNNING; 1505 ifp->if_flags &= ~IFF_OACTIVE; 1506 splx(s); 1507 1508 /* 1509 * Start stats updater. 1510 */ 1511 sc->stat_ch = timeout(fxp_stats_update, sc, hz); 1512 } 1513 1514 static void 1515 fxp_set_media(sc, media) 1516 struct fxp_softc *sc; 1517 int media; 1518 { 1519 1520 switch (sc->phy_primary_device) { 1521 case FXP_PHY_DP83840: 1522 case FXP_PHY_DP83840A: 1523 fxp_mdi_write(sc, sc->phy_primary_addr, FXP_DP83840_PCR, 1524 fxp_mdi_read(sc, sc->phy_primary_addr, FXP_DP83840_PCR) | 1525 FXP_DP83840_PCR_LED4_MODE | /* LED4 always indicates duplex */ 1526 FXP_DP83840_PCR_F_CONNECT | /* force link disconnect bypass */ 1527 FXP_DP83840_PCR_BIT10); /* XXX I have no idea */ 1528 /* fall through */ 1529 case FXP_PHY_82553A: 1530 case FXP_PHY_82553C: /* untested */ 1531 case FXP_PHY_82555: 1532 case FXP_PHY_82555B: 1533 if (IFM_SUBTYPE(media) != IFM_AUTO) { 1534 int flags; 1535 1536 flags = (IFM_SUBTYPE(media) == IFM_100_TX) ? 1537 FXP_PHY_BMCR_SPEED_100M : 0; 1538 flags |= (media & IFM_FDX) ? 1539 FXP_PHY_BMCR_FULLDUPLEX : 0; 1540 fxp_mdi_write(sc, sc->phy_primary_addr, 1541 FXP_PHY_BMCR, 1542 (fxp_mdi_read(sc, sc->phy_primary_addr, 1543 FXP_PHY_BMCR) & 1544 ~(FXP_PHY_BMCR_AUTOEN | FXP_PHY_BMCR_SPEED_100M | 1545 FXP_PHY_BMCR_FULLDUPLEX)) | flags); 1546 } else { 1547 fxp_mdi_write(sc, sc->phy_primary_addr, 1548 FXP_PHY_BMCR, 1549 (fxp_mdi_read(sc, sc->phy_primary_addr, 1550 FXP_PHY_BMCR) | FXP_PHY_BMCR_AUTOEN)); 1551 } 1552 break; 1553 /* 1554 * The Seeq 80c24 doesn't have a PHY programming interface, so do 1555 * nothing. 1556 */ 1557 case FXP_PHY_80C24: 1558 break; 1559 default: 1560 printf(FXP_FORMAT 1561 ": warning: unsupported PHY, type = %d, addr = %d\n", 1562 FXP_ARGS(sc), sc->phy_primary_device, 1563 sc->phy_primary_addr); 1564 } 1565 } 1566 1567 /* 1568 * Change media according to request. 1569 */ 1570 int 1571 fxp_mediachange(ifp) 1572 struct ifnet *ifp; 1573 { 1574 struct fxp_softc *sc = ifp->if_softc; 1575 struct ifmedia *ifm = &sc->sc_media; 1576 1577 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1578 return (EINVAL); 1579 1580 fxp_set_media(sc, ifm->ifm_media); 1581 return (0); 1582 } 1583 1584 /* 1585 * Notify the world which media we're using. 1586 */ 1587 void 1588 fxp_mediastatus(ifp, ifmr) 1589 struct ifnet *ifp; 1590 struct ifmediareq *ifmr; 1591 { 1592 struct fxp_softc *sc = ifp->if_softc; 1593 int flags, stsflags; 1594 1595 switch (sc->phy_primary_device) { 1596 case FXP_PHY_82555: 1597 case FXP_PHY_82555B: 1598 case FXP_PHY_DP83840: 1599 case FXP_PHY_DP83840A: 1600 ifmr->ifm_status = IFM_AVALID; /* IFM_ACTIVE will be valid */ 1601 ifmr->ifm_active = IFM_ETHER; 1602 /* 1603 * the following is not an error. 1604 * You need to read this register twice to get current 1605 * status. This is correct documented behaviour, the 1606 * first read gets latched values. 1607 */ 1608 stsflags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_STS); 1609 stsflags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_STS); 1610 if (stsflags & FXP_PHY_STS_LINK_STS) 1611 ifmr->ifm_status |= IFM_ACTIVE; 1612 1613 /* 1614 * If we are in auto mode, then try report the result. 1615 */ 1616 flags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_BMCR); 1617 if (flags & FXP_PHY_BMCR_AUTOEN) { 1618 ifmr->ifm_active |= IFM_AUTO; /* XXX presently 0 */ 1619 if (stsflags & FXP_PHY_STS_AUTO_DONE) { 1620 /* 1621 * Intel and National parts report 1622 * differently on what they found. 1623 */ 1624 if ((sc->phy_primary_device == FXP_PHY_82555) 1625 || (sc->phy_primary_device == FXP_PHY_82555B)) { 1626 flags = fxp_mdi_read(sc, 1627 sc->phy_primary_addr, 1628 FXP_PHY_USC); 1629 1630 if (flags & FXP_PHY_USC_SPEED) 1631 ifmr->ifm_active |= IFM_100_TX; 1632 else 1633 ifmr->ifm_active |= IFM_10_T; 1634 1635 if (flags & FXP_PHY_USC_DUPLEX) 1636 ifmr->ifm_active |= IFM_FDX; 1637 } else { /* it's National. only know speed */ 1638 flags = fxp_mdi_read(sc, 1639 sc->phy_primary_addr, 1640 FXP_DP83840_PAR); 1641 1642 if (flags & FXP_DP83840_PAR_SPEED_10) 1643 ifmr->ifm_active |= IFM_10_T; 1644 else 1645 ifmr->ifm_active |= IFM_100_TX; 1646 } 1647 } 1648 } else { /* in manual mode.. just report what we were set to */ 1649 if (flags & FXP_PHY_BMCR_SPEED_100M) 1650 ifmr->ifm_active |= IFM_100_TX; 1651 else 1652 ifmr->ifm_active |= IFM_10_T; 1653 1654 if (flags & FXP_PHY_BMCR_FULLDUPLEX) 1655 ifmr->ifm_active |= IFM_FDX; 1656 } 1657 break; 1658 1659 case FXP_PHY_80C24: 1660 default: 1661 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; /* XXX IFM_AUTO ? */ 1662 } 1663 } 1664 1665 /* 1666 * Add a buffer to the end of the RFA buffer list. 1667 * Return 0 if successful, 1 for failure. A failure results in 1668 * adding the 'oldm' (if non-NULL) on to the end of the list - 1669 * tossing out its old contents and recycling it. 1670 * The RFA struct is stuck at the beginning of mbuf cluster and the 1671 * data pointer is fixed up to point just past it. 1672 */ 1673 static int 1674 fxp_add_rfabuf(sc, oldm) 1675 struct fxp_softc *sc; 1676 struct mbuf *oldm; 1677 { 1678 u_int32_t v; 1679 struct mbuf *m; 1680 struct fxp_rfa *rfa, *p_rfa; 1681 1682 MGETHDR(m, M_DONTWAIT, MT_DATA); 1683 if (m != NULL) { 1684 MCLGET(m, M_DONTWAIT); 1685 if ((m->m_flags & M_EXT) == 0) { 1686 m_freem(m); 1687 if (oldm == NULL) 1688 return 1; 1689 m = oldm; 1690 m->m_data = m->m_ext.ext_buf; 1691 } 1692 } else { 1693 if (oldm == NULL) 1694 return 1; 1695 m = oldm; 1696 m->m_data = m->m_ext.ext_buf; 1697 } 1698 1699 /* 1700 * Move the data pointer up so that the incoming data packet 1701 * will be 32-bit aligned. 1702 */ 1703 m->m_data += RFA_ALIGNMENT_FUDGE; 1704 1705 /* 1706 * Get a pointer to the base of the mbuf cluster and move 1707 * data start past it. 1708 */ 1709 rfa = mtod(m, struct fxp_rfa *); 1710 m->m_data += sizeof(struct fxp_rfa); 1711 rfa->size = (u_int16_t)(MCLBYTES - sizeof(struct fxp_rfa) - RFA_ALIGNMENT_FUDGE); 1712 1713 /* 1714 * Initialize the rest of the RFA. Note that since the RFA 1715 * is misaligned, we cannot store values directly. Instead, 1716 * we use an optimized, inline copy. 1717 */ 1718 1719 rfa->rfa_status = 0; 1720 rfa->rfa_control = FXP_RFA_CONTROL_EL; 1721 rfa->actual_size = 0; 1722 1723 v = -1; 1724 fxp_lwcopy(&v, (volatile u_int32_t *) rfa->link_addr); 1725 fxp_lwcopy(&v, (volatile u_int32_t *) rfa->rbd_addr); 1726 1727 /* 1728 * If there are other buffers already on the list, attach this 1729 * one to the end by fixing up the tail to point to this one. 1730 */ 1731 if (sc->rfa_headm != NULL) { 1732 p_rfa = (struct fxp_rfa *) (sc->rfa_tailm->m_ext.ext_buf + 1733 RFA_ALIGNMENT_FUDGE); 1734 sc->rfa_tailm->m_next = m; 1735 v = vtophys(rfa); 1736 fxp_lwcopy(&v, (volatile u_int32_t *) p_rfa->link_addr); 1737 p_rfa->rfa_control &= ~FXP_RFA_CONTROL_EL; 1738 } else { 1739 sc->rfa_headm = m; 1740 } 1741 sc->rfa_tailm = m; 1742 1743 return (m == oldm); 1744 } 1745 1746 static volatile int 1747 fxp_mdi_read(sc, phy, reg) 1748 struct fxp_softc *sc; 1749 int phy; 1750 int reg; 1751 { 1752 int count = 10000; 1753 int value; 1754 1755 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, 1756 (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21)); 1757 1758 while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0 1759 && count--) 1760 DELAY(10); 1761 1762 if (count <= 0) 1763 printf(FXP_FORMAT ": fxp_mdi_read: timed out\n", 1764 FXP_ARGS(sc)); 1765 1766 return (value & 0xffff); 1767 } 1768 1769 static void 1770 fxp_mdi_write(sc, phy, reg, value) 1771 struct fxp_softc *sc; 1772 int phy; 1773 int reg; 1774 int value; 1775 { 1776 int count = 10000; 1777 1778 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, 1779 (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) | 1780 (value & 0xffff)); 1781 1782 while((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 && 1783 count--) 1784 DELAY(10); 1785 1786 if (count <= 0) 1787 printf(FXP_FORMAT ": fxp_mdi_write: timed out\n", 1788 FXP_ARGS(sc)); 1789 } 1790 1791 static int 1792 fxp_ioctl(ifp, command, data) 1793 struct ifnet *ifp; 1794 FXP_IOCTLCMD_TYPE command; 1795 caddr_t data; 1796 { 1797 struct fxp_softc *sc = ifp->if_softc; 1798 struct ifreq *ifr = (struct ifreq *)data; 1799 int s, error = 0; 1800 1801 s = splimp(); 1802 1803 switch (command) { 1804 1805 case SIOCSIFADDR: 1806 #if !defined(__NetBSD__) 1807 case SIOCGIFADDR: 1808 case SIOCSIFMTU: 1809 #endif 1810 error = ether_ioctl(ifp, command, data); 1811 break; 1812 1813 case SIOCSIFFLAGS: 1814 sc->all_mcasts = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0; 1815 1816 /* 1817 * If interface is marked up and not running, then start it. 1818 * If it is marked down and running, stop it. 1819 * XXX If it's up then re-initialize it. This is so flags 1820 * such as IFF_PROMISC are handled. 1821 */ 1822 if (ifp->if_flags & IFF_UP) { 1823 fxp_init(sc); 1824 } else { 1825 if (ifp->if_flags & IFF_RUNNING) 1826 fxp_stop(sc); 1827 } 1828 break; 1829 1830 case SIOCADDMULTI: 1831 case SIOCDELMULTI: 1832 sc->all_mcasts = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0; 1833 #if defined(__NetBSD__) 1834 error = (command == SIOCADDMULTI) ? 1835 ether_addmulti(ifr, &sc->sc_ethercom) : 1836 ether_delmulti(ifr, &sc->sc_ethercom); 1837 1838 if (error == ENETRESET) { 1839 /* 1840 * Multicast list has changed; set the hardware 1841 * filter accordingly. 1842 */ 1843 if (!sc->all_mcasts) 1844 fxp_mc_setup(sc); 1845 /* 1846 * fxp_mc_setup() can turn on all_mcasts if we run 1847 * out of space, so check it again rather than else {}. 1848 */ 1849 if (sc->all_mcasts) 1850 fxp_init(sc); 1851 error = 0; 1852 } 1853 #else /* __FreeBSD__ */ 1854 /* 1855 * Multicast list has changed; set the hardware filter 1856 * accordingly. 1857 */ 1858 if (!sc->all_mcasts) 1859 fxp_mc_setup(sc); 1860 /* 1861 * fxp_mc_setup() can turn on sc->all_mcasts, so check it 1862 * again rather than else {}. 1863 */ 1864 if (sc->all_mcasts) 1865 fxp_init(sc); 1866 error = 0; 1867 #endif /* __NetBSD__ */ 1868 break; 1869 1870 case SIOCSIFMEDIA: 1871 case SIOCGIFMEDIA: 1872 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command); 1873 break; 1874 1875 default: 1876 error = EINVAL; 1877 } 1878 (void) splx(s); 1879 return (error); 1880 } 1881 1882 /* 1883 * Program the multicast filter. 1884 * 1885 * We have an artificial restriction that the multicast setup command 1886 * must be the first command in the chain, so we take steps to ensure 1887 * this. By requiring this, it allows us to keep up the performance of 1888 * the pre-initialized command ring (esp. link pointers) by not actually 1889 * inserting the mcsetup command in the ring - i.e. its link pointer 1890 * points to the TxCB ring, but the mcsetup descriptor itself is not part 1891 * of it. We then can do 'CU_START' on the mcsetup descriptor and have it 1892 * lead into the regular TxCB ring when it completes. 1893 * 1894 * This function must be called at splimp. 1895 */ 1896 static void 1897 fxp_mc_setup(sc) 1898 struct fxp_softc *sc; 1899 { 1900 struct fxp_cb_mcs *mcsp = sc->mcsp; 1901 struct ifnet *ifp = &sc->sc_if; 1902 struct ifmultiaddr *ifma; 1903 int nmcasts; 1904 1905 /* 1906 * If there are queued commands, we must wait until they are all 1907 * completed. If we are already waiting, then add a NOP command 1908 * with interrupt option so that we're notified when all commands 1909 * have been completed - fxp_start() ensures that no additional 1910 * TX commands will be added when need_mcsetup is true. 1911 */ 1912 if (sc->tx_queued) { 1913 struct fxp_cb_tx *txp; 1914 1915 /* 1916 * need_mcsetup will be true if we are already waiting for the 1917 * NOP command to be completed (see below). In this case, bail. 1918 */ 1919 if (sc->need_mcsetup) 1920 return; 1921 sc->need_mcsetup = 1; 1922 1923 /* 1924 * Add a NOP command with interrupt so that we are notified when all 1925 * TX commands have been processed. 1926 */ 1927 txp = sc->cbl_last->next; 1928 txp->mb_head = NULL; 1929 txp->cb_status = 0; 1930 txp->cb_command = FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I; 1931 /* 1932 * Advance the end of list forward. 1933 */ 1934 sc->cbl_last->cb_command &= ~FXP_CB_COMMAND_S; 1935 sc->cbl_last = txp; 1936 sc->tx_queued++; 1937 /* 1938 * Issue a resume in case the CU has just suspended. 1939 */ 1940 fxp_scb_wait(sc); 1941 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_RESUME); 1942 /* 1943 * Set a 5 second timer just in case we don't hear from the 1944 * card again. 1945 */ 1946 ifp->if_timer = 5; 1947 1948 return; 1949 } 1950 sc->need_mcsetup = 0; 1951 1952 /* 1953 * Initialize multicast setup descriptor. 1954 */ 1955 mcsp->next = sc->cbl_base; 1956 mcsp->mb_head = NULL; 1957 mcsp->cb_status = 0; 1958 mcsp->cb_command = FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I; 1959 mcsp->link_addr = vtophys(&sc->cbl_base->cb_status); 1960 1961 nmcasts = 0; 1962 if (!sc->all_mcasts) { 1963 for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; 1964 ifma = ifma->ifma_link.le_next) { 1965 if (ifma->ifma_addr->sa_family != AF_LINK) 1966 continue; 1967 if (nmcasts >= MAXMCADDR) { 1968 sc->all_mcasts = 1; 1969 nmcasts = 0; 1970 break; 1971 } 1972 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1973 (volatile void *) &sc->mcsp->mc_addr[nmcasts][0], 6); 1974 nmcasts++; 1975 } 1976 } 1977 mcsp->mc_cnt = nmcasts * 6; 1978 sc->cbl_first = sc->cbl_last = (struct fxp_cb_tx *) mcsp; 1979 sc->tx_queued = 1; 1980 1981 /* 1982 * Wait until command unit is not active. This should never 1983 * be the case when nothing is queued, but make sure anyway. 1984 */ 1985 while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) == 1986 FXP_SCB_CUS_ACTIVE) ; 1987 1988 /* 1989 * Start the multicast setup command. 1990 */ 1991 fxp_scb_wait(sc); 1992 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(&mcsp->cb_status)); 1993 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); 1994 1995 ifp->if_timer = 2; 1996 return; 1997 } 1998