1 /* 2 * Copyright (c) 1995, David Greenman 3 * All rights reserved. 4 * 5 * Modifications to support NetBSD and media selection: 6 * Copyright (c) 1997 Jason R. Thorpe. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * $Id: if_fxp.c,v 1.68 1999/05/08 21:59:39 dfr Exp $ 31 */ 32 33 /* 34 * Intel EtherExpress Pro/100B PCI Fast Ethernet driver 35 */ 36 37 #include "bpfilter.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/mbuf.h> 42 #include <sys/malloc.h> 43 #include <sys/kernel.h> 44 #include <sys/socket.h> 45 46 #include <net/if.h> 47 #include <net/if_dl.h> 48 #include <net/if_media.h> 49 50 #ifdef NS 51 #include <netns/ns.h> 52 #include <netns/ns_if.h> 53 #endif 54 55 #if NBPFILTER > 0 56 #include <net/bpf.h> 57 #endif 58 59 #if defined(__NetBSD__) 60 61 #include <sys/ioctl.h> 62 #include <sys/errno.h> 63 #include <sys/device.h> 64 65 #include <net/if_dl.h> 66 #include <net/if_ether.h> 67 68 #include <netinet/if_inarp.h> 69 70 #include <vm/vm.h> 71 72 #include <machine/cpu.h> 73 #include <machine/bus.h> 74 #include <machine/intr.h> 75 76 #include <dev/pci/if_fxpreg.h> 77 #include <dev/pci/if_fxpvar.h> 78 79 #include <dev/pci/pcivar.h> 80 #include <dev/pci/pcireg.h> 81 #include <dev/pci/pcidevs.h> 82 83 #ifdef __alpha__ /* XXX */ 84 /* XXX XXX NEED REAL DMA MAPPING SUPPORT XXX XXX */ 85 #undef vtophys 86 #define vtophys(va) alpha_XXX_dmamap((vm_offset_t)(va)) 87 #endif /* __alpha__ */ 88 89 #else /* __FreeBSD__ */ 90 91 #include <sys/sockio.h> 92 #include <sys/bus.h> 93 #include <machine/bus.h> 94 #include <sys/rman.h> 95 #include <machine/resource.h> 96 97 #include <net/ethernet.h> 98 #include <net/if_arp.h> 99 100 #include <vm/vm.h> /* for vtophys */ 101 #include <vm/pmap.h> /* for vtophys */ 102 #include <machine/clock.h> /* for DELAY */ 103 104 #include <pci/pcivar.h> 105 #include <pci/pcireg.h> /* for PCIM_CMD_xxx */ 106 #include <pci/if_fxpreg.h> 107 #include <pci/if_fxpvar.h> 108 109 #endif /* __NetBSD__ */ 110 111 #include "opt_bdg.h" 112 #ifdef BRIDGE 113 #include <net/if_types.h> 114 #include <net/bridge.h> 115 #endif 116 117 /* 118 * NOTE! On the Alpha, we have an alignment constraint. The 119 * card DMAs the packet immediately following the RFA. However, 120 * the first thing in the packet is a 14-byte Ethernet header. 121 * This means that the packet is misaligned. To compensate, 122 * we actually offset the RFA 2 bytes into the cluster. This 123 * alignes the packet after the Ethernet header at a 32-bit 124 * boundary. HOWEVER! This means that the RFA is misaligned! 125 */ 126 #define RFA_ALIGNMENT_FUDGE 2 127 128 /* 129 * Inline function to copy a 16-bit aligned 32-bit quantity. 130 */ 131 static __inline void fxp_lwcopy __P((volatile u_int32_t *, 132 volatile u_int32_t *)); 133 static __inline void 134 fxp_lwcopy(src, dst) 135 volatile u_int32_t *src, *dst; 136 { 137 volatile u_int16_t *a = (volatile u_int16_t *)src; 138 volatile u_int16_t *b = (volatile u_int16_t *)dst; 139 140 b[0] = a[0]; 141 b[1] = a[1]; 142 } 143 144 /* 145 * Template for default configuration parameters. 146 * See struct fxp_cb_config for the bit definitions. 147 */ 148 static u_char fxp_cb_config_template[] = { 149 0x0, 0x0, /* cb_status */ 150 0x80, 0x2, /* cb_command */ 151 0xff, 0xff, 0xff, 0xff, /* link_addr */ 152 0x16, /* 0 */ 153 0x8, /* 1 */ 154 0x0, /* 2 */ 155 0x0, /* 3 */ 156 0x0, /* 4 */ 157 0x80, /* 5 */ 158 0xb2, /* 6 */ 159 0x3, /* 7 */ 160 0x1, /* 8 */ 161 0x0, /* 9 */ 162 0x26, /* 10 */ 163 0x0, /* 11 */ 164 0x60, /* 12 */ 165 0x0, /* 13 */ 166 0xf2, /* 14 */ 167 0x48, /* 15 */ 168 0x0, /* 16 */ 169 0x40, /* 17 */ 170 0xf3, /* 18 */ 171 0x0, /* 19 */ 172 0x3f, /* 20 */ 173 0x5 /* 21 */ 174 }; 175 176 /* Supported media types. */ 177 struct fxp_supported_media { 178 const int fsm_phy; /* PHY type */ 179 const int *fsm_media; /* the media array */ 180 const int fsm_nmedia; /* the number of supported media */ 181 const int fsm_defmedia; /* default media for this PHY */ 182 }; 183 184 static const int fxp_media_standard[] = { 185 IFM_ETHER|IFM_10_T, 186 IFM_ETHER|IFM_10_T|IFM_FDX, 187 IFM_ETHER|IFM_100_TX, 188 IFM_ETHER|IFM_100_TX|IFM_FDX, 189 IFM_ETHER|IFM_AUTO, 190 }; 191 #define FXP_MEDIA_STANDARD_DEFMEDIA (IFM_ETHER|IFM_AUTO) 192 193 static const int fxp_media_default[] = { 194 IFM_ETHER|IFM_MANUAL, /* XXX IFM_AUTO ? */ 195 }; 196 #define FXP_MEDIA_DEFAULT_DEFMEDIA (IFM_ETHER|IFM_MANUAL) 197 198 static const struct fxp_supported_media fxp_media[] = { 199 { FXP_PHY_DP83840, fxp_media_standard, 200 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 201 FXP_MEDIA_STANDARD_DEFMEDIA }, 202 { FXP_PHY_DP83840A, fxp_media_standard, 203 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 204 FXP_MEDIA_STANDARD_DEFMEDIA }, 205 { FXP_PHY_82553A, fxp_media_standard, 206 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 207 FXP_MEDIA_STANDARD_DEFMEDIA }, 208 { FXP_PHY_82553C, fxp_media_standard, 209 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 210 FXP_MEDIA_STANDARD_DEFMEDIA }, 211 { FXP_PHY_82555, fxp_media_standard, 212 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 213 FXP_MEDIA_STANDARD_DEFMEDIA }, 214 { FXP_PHY_82555B, fxp_media_standard, 215 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 216 FXP_MEDIA_STANDARD_DEFMEDIA }, 217 { FXP_PHY_80C24, fxp_media_default, 218 sizeof(fxp_media_default) / sizeof(fxp_media_default[0]), 219 FXP_MEDIA_DEFAULT_DEFMEDIA }, 220 }; 221 #define NFXPMEDIA (sizeof(fxp_media) / sizeof(fxp_media[0])) 222 223 static int fxp_mediachange __P((struct ifnet *)); 224 static void fxp_mediastatus __P((struct ifnet *, struct ifmediareq *)); 225 static void fxp_set_media __P((struct fxp_softc *, int)); 226 static __inline void fxp_scb_wait __P((struct fxp_softc *)); 227 static FXP_INTR_TYPE fxp_intr __P((void *)); 228 static void fxp_start __P((struct ifnet *)); 229 static int fxp_ioctl __P((struct ifnet *, 230 FXP_IOCTLCMD_TYPE, caddr_t)); 231 static void fxp_init __P((void *)); 232 static void fxp_stop __P((struct fxp_softc *)); 233 static void fxp_watchdog __P((struct ifnet *)); 234 static int fxp_add_rfabuf __P((struct fxp_softc *, struct mbuf *)); 235 static int fxp_mdi_read __P((struct fxp_softc *, int, int)); 236 static void fxp_mdi_write __P((struct fxp_softc *, int, int, int)); 237 static void fxp_read_eeprom __P((struct fxp_softc *, u_int16_t *, 238 int, int)); 239 static int fxp_attach_common __P((struct fxp_softc *, u_int8_t *)); 240 static void fxp_stats_update __P((void *)); 241 static void fxp_mc_setup __P((struct fxp_softc *)); 242 243 /* 244 * Set initial transmit threshold at 64 (512 bytes). This is 245 * increased by 64 (512 bytes) at a time, to maximum of 192 246 * (1536 bytes), if an underrun occurs. 247 */ 248 static int tx_threshold = 64; 249 250 /* 251 * Number of transmit control blocks. This determines the number 252 * of transmit buffers that can be chained in the CB list. 253 * This must be a power of two. 254 */ 255 #define FXP_NTXCB 128 256 257 /* 258 * Number of completed TX commands at which point an interrupt 259 * will be generated to garbage collect the attached buffers. 260 * Must be at least one less than FXP_NTXCB, and should be 261 * enough less so that the transmitter doesn't becomes idle 262 * during the buffer rundown (which would reduce performance). 263 */ 264 #define FXP_CXINT_THRESH 120 265 266 /* 267 * TxCB list index mask. This is used to do list wrap-around. 268 */ 269 #define FXP_TXCB_MASK (FXP_NTXCB - 1) 270 271 /* 272 * Number of receive frame area buffers. These are large so chose 273 * wisely. 274 */ 275 #define FXP_NRFABUFS 64 276 277 /* 278 * Maximum number of seconds that the receiver can be idle before we 279 * assume it's dead and attempt to reset it by reprogramming the 280 * multicast filter. This is part of a work-around for a bug in the 281 * NIC. See fxp_stats_update(). 282 */ 283 #define FXP_MAX_RX_IDLE 15 284 285 /* 286 * Wait for the previous command to be accepted (but not necessarily 287 * completed). 288 */ 289 static __inline void 290 fxp_scb_wait(sc) 291 struct fxp_softc *sc; 292 { 293 int i = 10000; 294 295 while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i); 296 } 297 298 /************************************************************* 299 * Operating system-specific autoconfiguration glue 300 *************************************************************/ 301 302 #if defined(__NetBSD__) 303 304 #ifdef __BROKEN_INDIRECT_CONFIG 305 static int fxp_match __P((struct device *, void *, void *)); 306 #else 307 static int fxp_match __P((struct device *, struct cfdata *, void *)); 308 #endif 309 static void fxp_attach __P((struct device *, struct device *, void *)); 310 311 static void fxp_shutdown __P((void *)); 312 313 /* Compensate for lack of a generic ether_ioctl() */ 314 static int fxp_ether_ioctl __P((struct ifnet *, 315 FXP_IOCTLCMD_TYPE, caddr_t)); 316 #define ether_ioctl fxp_ether_ioctl 317 318 struct cfattach fxp_ca = { 319 sizeof(struct fxp_softc), fxp_match, fxp_attach 320 }; 321 322 struct cfdriver fxp_cd = { 323 NULL, "fxp", DV_IFNET 324 }; 325 326 /* 327 * Check if a device is an 82557. 328 */ 329 static int 330 fxp_match(parent, match, aux) 331 struct device *parent; 332 #ifdef __BROKEN_INDIRECT_CONFIG 333 void *match; 334 #else 335 struct cfdata *match; 336 #endif 337 void *aux; 338 { 339 struct pci_attach_args *pa = aux; 340 341 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL) 342 return (0); 343 344 switch (PCI_PRODUCT(pa->pa_id)) { 345 case PCI_PRODUCT_INTEL_82557: 346 return (1); 347 } 348 349 return (0); 350 } 351 352 static void 353 fxp_attach(parent, self, aux) 354 struct device *parent, *self; 355 void *aux; 356 { 357 struct fxp_softc *sc = (struct fxp_softc *)self; 358 struct pci_attach_args *pa = aux; 359 pci_chipset_tag_t pc = pa->pa_pc; 360 pci_intr_handle_t ih; 361 const char *intrstr = NULL; 362 u_int8_t enaddr[6]; 363 struct ifnet *ifp; 364 365 /* 366 * Map control/status registers. 367 */ 368 if (pci_mapreg_map(pa, FXP_PCI_MMBA, PCI_MAPREG_TYPE_MEM, 0, 369 &sc->sc_st, &sc->sc_sh, NULL, NULL)) { 370 printf(": can't map registers\n"); 371 return; 372 } 373 printf(": Intel EtherExpress Pro 10/100B Ethernet\n"); 374 375 /* 376 * Allocate our interrupt. 377 */ 378 if (pci_intr_map(pc, pa->pa_intrtag, pa->pa_intrpin, 379 pa->pa_intrline, &ih)) { 380 printf("%s: couldn't map interrupt\n", sc->sc_dev.dv_xname); 381 return; 382 } 383 intrstr = pci_intr_string(pc, ih); 384 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, fxp_intr, sc); 385 if (sc->sc_ih == NULL) { 386 printf("%s: couldn't establish interrupt", 387 sc->sc_dev.dv_xname); 388 if (intrstr != NULL) 389 printf(" at %s", intrstr); 390 printf("\n"); 391 return; 392 } 393 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr); 394 395 /* Do generic parts of attach. */ 396 if (fxp_attach_common(sc, enaddr)) { 397 /* Failed! */ 398 return; 399 } 400 401 printf("%s: Ethernet address %s%s\n", sc->sc_dev.dv_xname, 402 ether_sprintf(enaddr), sc->phy_10Mbps_only ? ", 10Mbps" : ""); 403 404 ifp = &sc->sc_ethercom.ec_if; 405 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 406 ifp->if_softc = sc; 407 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 408 ifp->if_ioctl = fxp_ioctl; 409 ifp->if_start = fxp_start; 410 ifp->if_watchdog = fxp_watchdog; 411 412 /* 413 * Attach the interface. 414 */ 415 if_attach(ifp); 416 /* 417 * Let the system queue as many packets as we have available 418 * TX descriptors. 419 */ 420 ifp->if_snd.ifq_maxlen = FXP_NTXCB - 1; 421 ether_ifattach(ifp, enaddr); 422 #if NBPFILTER > 0 423 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB, 424 sizeof(struct ether_header)); 425 #endif 426 427 /* 428 * Add shutdown hook so that DMA is disabled prior to reboot. Not 429 * doing do could allow DMA to corrupt kernel memory during the 430 * reboot before the driver initializes. 431 */ 432 shutdownhook_establish(fxp_shutdown, sc); 433 } 434 435 /* 436 * Device shutdown routine. Called at system shutdown after sync. The 437 * main purpose of this routine is to shut off receiver DMA so that 438 * kernel memory doesn't get clobbered during warmboot. 439 */ 440 static void 441 fxp_shutdown(sc) 442 void *sc; 443 { 444 fxp_stop((struct fxp_softc *) sc); 445 } 446 447 static int 448 fxp_ether_ioctl(ifp, cmd, data) 449 struct ifnet *ifp; 450 FXP_IOCTLCMD_TYPE cmd; 451 caddr_t data; 452 { 453 struct ifaddr *ifa = (struct ifaddr *) data; 454 struct fxp_softc *sc = ifp->if_softc; 455 456 switch (cmd) { 457 case SIOCSIFADDR: 458 ifp->if_flags |= IFF_UP; 459 460 switch (ifa->ifa_addr->sa_family) { 461 #ifdef INET 462 case AF_INET: 463 fxp_init(sc); 464 arp_ifinit(ifp, ifa); 465 break; 466 #endif 467 #ifdef NS 468 case AF_NS: 469 { 470 register struct ns_addr *ina = &IA_SNS(ifa)->sns_addr; 471 472 if (ns_nullhost(*ina)) 473 ina->x_host = *(union ns_host *) 474 LLADDR(ifp->if_sadl); 475 else 476 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl), 477 ifp->if_addrlen); 478 /* Set new address. */ 479 fxp_init(sc); 480 break; 481 } 482 #endif 483 default: 484 fxp_init(sc); 485 break; 486 } 487 break; 488 489 default: 490 return (EINVAL); 491 } 492 493 return (0); 494 } 495 496 #else /* __FreeBSD__ */ 497 498 /* 499 * Return identification string if this is device is ours. 500 */ 501 static int 502 fxp_probe(device_t dev) 503 { 504 if ((pci_get_vendor(dev) == FXP_VENDORID_INTEL) && 505 (pci_get_device(dev) == FXP_DEVICEID_i82557)) { 506 device_set_desc(dev, "Intel EtherExpress Pro 10/100B Ethernet"); 507 return 0; 508 } 509 510 return ENXIO; 511 } 512 513 static int 514 fxp_attach(device_t dev) 515 { 516 int error = 0; 517 struct fxp_softc *sc = device_get_softc(dev); 518 struct ifnet *ifp; 519 int s; 520 u_long val; 521 int rid; 522 523 callout_handle_init(&sc->stat_ch); 524 525 s = splimp(); 526 527 /* 528 * Enable bus mastering. 529 */ 530 val = pci_read_config(dev, PCIR_COMMAND, 2); 531 val |= (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); 532 pci_write_config(dev, PCIR_COMMAND, val, 2); 533 534 /* 535 * Map control/status registers. 536 */ 537 rid = FXP_PCI_MMBA; 538 sc->mem = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 539 0, ~0, 1, RF_ACTIVE); 540 if (!sc->mem) { 541 device_printf(dev, "could not map memory\n"); 542 error = ENXIO; 543 goto fail; 544 } 545 sc->csr = rman_get_virtual(sc->mem); /* XXX use bus_space */ 546 547 /* 548 * Allocate our interrupt. 549 */ 550 rid = 0; 551 sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 552 RF_SHAREABLE | RF_ACTIVE); 553 if (sc->irq == NULL) { 554 device_printf(dev, "could not map interrupt\n"); 555 error = ENXIO; 556 goto fail; 557 } 558 559 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET, 560 fxp_intr, sc, &sc->ih); 561 if (error) { 562 device_printf(dev, "could not setup irq\n"); 563 goto fail; 564 } 565 566 /* Do generic parts of attach. */ 567 if (fxp_attach_common(sc, sc->arpcom.ac_enaddr)) { 568 /* Failed! */ 569 bus_teardown_intr(dev, sc->irq, sc->ih); 570 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); 571 bus_release_resource(dev, SYS_RES_MEMORY, FXP_PCI_MMBA, sc->mem); 572 error = ENXIO; 573 goto fail; 574 } 575 576 device_printf(dev, "Ethernet address %6D%s\n", 577 sc->arpcom.ac_enaddr, ":", sc->phy_10Mbps_only ? ", 10Mbps" : ""); 578 579 ifp = &sc->arpcom.ac_if; 580 ifp->if_unit = device_get_unit(dev); 581 ifp->if_name = "fxp"; 582 ifp->if_output = ether_output; 583 ifp->if_baudrate = 100000000; 584 ifp->if_init = fxp_init; 585 ifp->if_softc = sc; 586 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 587 ifp->if_ioctl = fxp_ioctl; 588 ifp->if_start = fxp_start; 589 ifp->if_watchdog = fxp_watchdog; 590 591 /* 592 * Attach the interface. 593 */ 594 if_attach(ifp); 595 /* 596 * Let the system queue as many packets as we have available 597 * TX descriptors. 598 */ 599 ifp->if_snd.ifq_maxlen = FXP_NTXCB - 1; 600 ether_ifattach(ifp); 601 #if NBPFILTER > 0 602 bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header)); 603 #endif 604 605 splx(s); 606 return 0; 607 608 fail: 609 splx(s); 610 return error; 611 } 612 613 /* 614 * Detach interface. 615 */ 616 static int 617 fxp_detach(device_t dev) 618 { 619 struct fxp_softc *sc = device_get_softc(dev); 620 int s; 621 622 s = splimp(); 623 624 /* 625 * Close down routes etc. 626 */ 627 if_detach(&sc->arpcom.ac_if); 628 629 /* 630 * Stop DMA and drop transmit queue. 631 */ 632 fxp_stop(sc); 633 634 /* 635 * Deallocate resources. 636 */ 637 bus_teardown_intr(dev, sc->irq, sc->ih); 638 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); 639 bus_release_resource(dev, SYS_RES_MEMORY, FXP_PCI_MMBA, sc->mem); 640 641 /* 642 * Free all the receive buffers. 643 */ 644 if (sc->rfa_headm != NULL) 645 m_freem(sc->rfa_headm); 646 647 /* 648 * Free all media structures. 649 */ 650 ifmedia_removeall(&sc->sc_media); 651 652 /* 653 * Free anciliary structures. 654 */ 655 free(sc->cbl_base, M_DEVBUF); 656 free(sc->fxp_stats, M_DEVBUF); 657 free(sc->mcsp, M_DEVBUF); 658 659 splx(s); 660 661 return 0; 662 } 663 664 /* 665 * Device shutdown routine. Called at system shutdown after sync. The 666 * main purpose of this routine is to shut off receiver DMA so that 667 * kernel memory doesn't get clobbered during warmboot. 668 */ 669 static int 670 fxp_shutdown(device_t dev) 671 { 672 /* 673 * Make sure that DMA is disabled prior to reboot. Not doing 674 * do could allow DMA to corrupt kernel memory during the 675 * reboot before the driver initializes. 676 */ 677 fxp_stop((struct fxp_softc *) device_get_softc(dev)); 678 return 0; 679 } 680 681 static device_method_t fxp_methods[] = { 682 /* Device interface */ 683 DEVMETHOD(device_probe, fxp_probe), 684 DEVMETHOD(device_attach, fxp_attach), 685 DEVMETHOD(device_detach, fxp_detach), 686 DEVMETHOD(device_shutdown, fxp_shutdown), 687 688 { 0, 0 } 689 }; 690 691 static driver_t fxp_driver = { 692 "fxp", 693 fxp_methods, 694 sizeof(struct fxp_softc), 695 }; 696 697 static devclass_t fxp_devclass; 698 699 DRIVER_MODULE(fxp, pci, fxp_driver, fxp_devclass, 0, 0); 700 701 #endif /* __NetBSD__ */ 702 703 /************************************************************* 704 * End of operating system-specific autoconfiguration glue 705 *************************************************************/ 706 707 /* 708 * Do generic parts of attach. 709 */ 710 static int 711 fxp_attach_common(sc, enaddr) 712 struct fxp_softc *sc; 713 u_int8_t *enaddr; 714 { 715 u_int16_t data; 716 int i, nmedia, defmedia; 717 const int *media; 718 719 /* 720 * Reset to a stable state. 721 */ 722 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); 723 DELAY(10); 724 725 sc->cbl_base = malloc(sizeof(struct fxp_cb_tx) * FXP_NTXCB, 726 M_DEVBUF, M_NOWAIT); 727 if (sc->cbl_base == NULL) 728 goto fail; 729 bzero(sc->cbl_base, sizeof(struct fxp_cb_tx) * FXP_NTXCB); 730 731 sc->fxp_stats = malloc(sizeof(struct fxp_stats), M_DEVBUF, M_NOWAIT); 732 if (sc->fxp_stats == NULL) 733 goto fail; 734 bzero(sc->fxp_stats, sizeof(struct fxp_stats)); 735 736 sc->mcsp = malloc(sizeof(struct fxp_cb_mcs), M_DEVBUF, M_NOWAIT); 737 if (sc->mcsp == NULL) 738 goto fail; 739 740 /* 741 * Pre-allocate our receive buffers. 742 */ 743 for (i = 0; i < FXP_NRFABUFS; i++) { 744 if (fxp_add_rfabuf(sc, NULL) != 0) { 745 goto fail; 746 } 747 } 748 749 /* 750 * Get info about the primary PHY 751 */ 752 fxp_read_eeprom(sc, (u_int16_t *)&data, 6, 1); 753 sc->phy_primary_addr = data & 0xff; 754 sc->phy_primary_device = (data >> 8) & 0x3f; 755 sc->phy_10Mbps_only = data >> 15; 756 757 /* 758 * Read MAC address. 759 */ 760 fxp_read_eeprom(sc, (u_int16_t *)enaddr, 0, 3); 761 762 /* 763 * Initialize the media structures. 764 */ 765 766 media = fxp_media_default; 767 nmedia = sizeof(fxp_media_default) / sizeof(fxp_media_default[0]); 768 defmedia = FXP_MEDIA_DEFAULT_DEFMEDIA; 769 770 for (i = 0; i < NFXPMEDIA; i++) { 771 if (sc->phy_primary_device == fxp_media[i].fsm_phy) { 772 media = fxp_media[i].fsm_media; 773 nmedia = fxp_media[i].fsm_nmedia; 774 defmedia = fxp_media[i].fsm_defmedia; 775 } 776 } 777 778 ifmedia_init(&sc->sc_media, 0, fxp_mediachange, fxp_mediastatus); 779 for (i = 0; i < nmedia; i++) { 780 if (IFM_SUBTYPE(media[i]) == IFM_100_TX && sc->phy_10Mbps_only) 781 continue; 782 ifmedia_add(&sc->sc_media, media[i], 0, NULL); 783 } 784 ifmedia_set(&sc->sc_media, defmedia); 785 786 return (0); 787 788 fail: 789 printf(FXP_FORMAT ": Failed to malloc memory\n", FXP_ARGS(sc)); 790 if (sc->cbl_base) 791 free(sc->cbl_base, M_DEVBUF); 792 if (sc->fxp_stats) 793 free(sc->fxp_stats, M_DEVBUF); 794 if (sc->mcsp) 795 free(sc->mcsp, M_DEVBUF); 796 /* frees entire chain */ 797 if (sc->rfa_headm) 798 m_freem(sc->rfa_headm); 799 800 return (ENOMEM); 801 } 802 803 /* 804 * Read from the serial EEPROM. Basically, you manually shift in 805 * the read opcode (one bit at a time) and then shift in the address, 806 * and then you shift out the data (all of this one bit at a time). 807 * The word size is 16 bits, so you have to provide the address for 808 * every 16 bits of data. 809 */ 810 static void 811 fxp_read_eeprom(sc, data, offset, words) 812 struct fxp_softc *sc; 813 u_short *data; 814 int offset; 815 int words; 816 { 817 u_int16_t reg; 818 int i, x; 819 820 for (i = 0; i < words; i++) { 821 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 822 /* 823 * Shift in read opcode. 824 */ 825 for (x = 3; x > 0; x--) { 826 if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) { 827 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 828 } else { 829 reg = FXP_EEPROM_EECS; 830 } 831 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 832 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 833 reg | FXP_EEPROM_EESK); 834 DELAY(1); 835 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 836 DELAY(1); 837 } 838 /* 839 * Shift in address. 840 */ 841 for (x = 6; x > 0; x--) { 842 if ((i + offset) & (1 << (x - 1))) { 843 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 844 } else { 845 reg = FXP_EEPROM_EECS; 846 } 847 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 848 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 849 reg | FXP_EEPROM_EESK); 850 DELAY(1); 851 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 852 DELAY(1); 853 } 854 reg = FXP_EEPROM_EECS; 855 data[i] = 0; 856 /* 857 * Shift out data. 858 */ 859 for (x = 16; x > 0; x--) { 860 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 861 reg | FXP_EEPROM_EESK); 862 DELAY(1); 863 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & 864 FXP_EEPROM_EEDO) 865 data[i] |= (1 << (x - 1)); 866 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 867 DELAY(1); 868 } 869 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 870 DELAY(1); 871 } 872 } 873 874 /* 875 * Start packet transmission on the interface. 876 */ 877 static void 878 fxp_start(ifp) 879 struct ifnet *ifp; 880 { 881 struct fxp_softc *sc = ifp->if_softc; 882 struct fxp_cb_tx *txp; 883 884 /* 885 * See if we need to suspend xmit until the multicast filter 886 * has been reprogrammed (which can only be done at the head 887 * of the command chain). 888 */ 889 if (sc->need_mcsetup) 890 return; 891 892 txp = NULL; 893 894 /* 895 * We're finished if there is nothing more to add to the list or if 896 * we're all filled up with buffers to transmit. 897 * NOTE: One TxCB is reserved to guarantee that fxp_mc_setup() can add 898 * a NOP command when needed. 899 */ 900 while (ifp->if_snd.ifq_head != NULL && sc->tx_queued < FXP_NTXCB - 1) { 901 struct mbuf *m, *mb_head; 902 int segment; 903 904 /* 905 * Grab a packet to transmit. 906 */ 907 IF_DEQUEUE(&ifp->if_snd, mb_head); 908 909 /* 910 * Get pointer to next available tx desc. 911 */ 912 txp = sc->cbl_last->next; 913 914 /* 915 * Go through each of the mbufs in the chain and initialize 916 * the transmit buffer descriptors with the physical address 917 * and size of the mbuf. 918 */ 919 tbdinit: 920 for (m = mb_head, segment = 0; m != NULL; m = m->m_next) { 921 if (m->m_len != 0) { 922 if (segment == FXP_NTXSEG) 923 break; 924 txp->tbd[segment].tb_addr = 925 vtophys(mtod(m, vm_offset_t)); 926 txp->tbd[segment].tb_size = m->m_len; 927 segment++; 928 } 929 } 930 if (m != NULL) { 931 struct mbuf *mn; 932 933 /* 934 * We ran out of segments. We have to recopy this mbuf 935 * chain first. Bail out if we can't get the new buffers. 936 */ 937 MGETHDR(mn, M_DONTWAIT, MT_DATA); 938 if (mn == NULL) { 939 m_freem(mb_head); 940 break; 941 } 942 if (mb_head->m_pkthdr.len > MHLEN) { 943 MCLGET(mn, M_DONTWAIT); 944 if ((mn->m_flags & M_EXT) == 0) { 945 m_freem(mn); 946 m_freem(mb_head); 947 break; 948 } 949 } 950 m_copydata(mb_head, 0, mb_head->m_pkthdr.len, 951 mtod(mn, caddr_t)); 952 mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len; 953 m_freem(mb_head); 954 mb_head = mn; 955 goto tbdinit; 956 } 957 958 txp->tbd_number = segment; 959 txp->mb_head = mb_head; 960 txp->cb_status = 0; 961 if (sc->tx_queued != FXP_CXINT_THRESH - 1) { 962 txp->cb_command = 963 FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF | FXP_CB_COMMAND_S; 964 } else { 965 txp->cb_command = 966 FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I; 967 /* 968 * Set a 5 second timer just in case we don't hear from the 969 * card again. 970 */ 971 ifp->if_timer = 5; 972 } 973 txp->tx_threshold = tx_threshold; 974 975 /* 976 * Advance the end of list forward. 977 */ 978 sc->cbl_last->cb_command &= ~FXP_CB_COMMAND_S; 979 sc->cbl_last = txp; 980 981 /* 982 * Advance the beginning of the list forward if there are 983 * no other packets queued (when nothing is queued, cbl_first 984 * sits on the last TxCB that was sent out). 985 */ 986 if (sc->tx_queued == 0) 987 sc->cbl_first = txp; 988 989 sc->tx_queued++; 990 991 #if NBPFILTER > 0 992 /* 993 * Pass packet to bpf if there is a listener. 994 */ 995 if (ifp->if_bpf) 996 bpf_mtap(FXP_BPFTAP_ARG(ifp), mb_head); 997 #endif 998 } 999 1000 /* 1001 * We're finished. If we added to the list, issue a RESUME to get DMA 1002 * going again if suspended. 1003 */ 1004 if (txp != NULL) { 1005 fxp_scb_wait(sc); 1006 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_RESUME); 1007 } 1008 } 1009 1010 /* 1011 * Process interface interrupts. 1012 */ 1013 static FXP_INTR_TYPE 1014 fxp_intr(arg) 1015 void *arg; 1016 { 1017 struct fxp_softc *sc = arg; 1018 struct ifnet *ifp = &sc->sc_if; 1019 u_int8_t statack; 1020 #if defined(__NetBSD__) 1021 int claimed = 0; 1022 #endif 1023 1024 while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) { 1025 #if defined(__NetBSD__) 1026 claimed = 1; 1027 #endif 1028 /* 1029 * First ACK all the interrupts in this pass. 1030 */ 1031 CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack); 1032 1033 /* 1034 * Free any finished transmit mbuf chains. 1035 */ 1036 if (statack & FXP_SCB_STATACK_CXTNO) { 1037 struct fxp_cb_tx *txp; 1038 1039 for (txp = sc->cbl_first; sc->tx_queued && 1040 (txp->cb_status & FXP_CB_STATUS_C) != 0; 1041 txp = txp->next) { 1042 if (txp->mb_head != NULL) { 1043 m_freem(txp->mb_head); 1044 txp->mb_head = NULL; 1045 } 1046 sc->tx_queued--; 1047 } 1048 sc->cbl_first = txp; 1049 ifp->if_timer = 0; 1050 if (sc->tx_queued == 0) { 1051 if (sc->need_mcsetup) 1052 fxp_mc_setup(sc); 1053 } 1054 /* 1055 * Try to start more packets transmitting. 1056 */ 1057 if (ifp->if_snd.ifq_head != NULL) 1058 fxp_start(ifp); 1059 } 1060 /* 1061 * Process receiver interrupts. If a no-resource (RNR) 1062 * condition exists, get whatever packets we can and 1063 * re-start the receiver. 1064 */ 1065 if (statack & (FXP_SCB_STATACK_FR | FXP_SCB_STATACK_RNR)) { 1066 struct mbuf *m; 1067 struct fxp_rfa *rfa; 1068 rcvloop: 1069 m = sc->rfa_headm; 1070 rfa = (struct fxp_rfa *)(m->m_ext.ext_buf + 1071 RFA_ALIGNMENT_FUDGE); 1072 1073 if (rfa->rfa_status & FXP_RFA_STATUS_C) { 1074 /* 1075 * Remove first packet from the chain. 1076 */ 1077 sc->rfa_headm = m->m_next; 1078 m->m_next = NULL; 1079 1080 /* 1081 * Add a new buffer to the receive chain. 1082 * If this fails, the old buffer is recycled 1083 * instead. 1084 */ 1085 if (fxp_add_rfabuf(sc, m) == 0) { 1086 struct ether_header *eh; 1087 u_int16_t total_len; 1088 1089 total_len = rfa->actual_size & 1090 (MCLBYTES - 1); 1091 if (total_len < 1092 sizeof(struct ether_header)) { 1093 m_freem(m); 1094 goto rcvloop; 1095 } 1096 m->m_pkthdr.rcvif = ifp; 1097 m->m_pkthdr.len = m->m_len = 1098 total_len ; 1099 eh = mtod(m, struct ether_header *); 1100 #if NBPFILTER > 0 1101 if (ifp->if_bpf) 1102 bpf_tap(FXP_BPFTAP_ARG(ifp), 1103 mtod(m, caddr_t), 1104 total_len); 1105 #endif /* NBPFILTER > 0 */ 1106 #ifdef BRIDGE 1107 if (do_bridge) { 1108 struct ifnet *bdg_ifp ; 1109 bdg_ifp = bridge_in(m); 1110 if (bdg_ifp == BDG_DROP) 1111 goto dropit ; 1112 if (bdg_ifp != BDG_LOCAL) 1113 bdg_forward(&m, bdg_ifp); 1114 if (bdg_ifp != BDG_LOCAL && 1115 bdg_ifp != BDG_BCAST && 1116 bdg_ifp != BDG_MCAST) 1117 goto dropit ; 1118 goto getit ; 1119 } 1120 #endif 1121 /* 1122 * Only pass this packet up 1123 * if it is for us. 1124 */ 1125 if ((ifp->if_flags & 1126 IFF_PROMISC) && 1127 (rfa->rfa_status & 1128 FXP_RFA_STATUS_IAMATCH) && 1129 (eh->ether_dhost[0] & 1) 1130 == 0) { 1131 #ifdef BRIDGE 1132 dropit: 1133 #endif 1134 if (m) 1135 m_freem(m); 1136 goto rcvloop; 1137 } 1138 #ifdef BRIDGE 1139 getit: 1140 #endif 1141 m->m_data += 1142 sizeof(struct ether_header); 1143 m->m_len -= 1144 sizeof(struct ether_header); 1145 m->m_pkthdr.len = m->m_len ; 1146 ether_input(ifp, eh, m); 1147 } 1148 goto rcvloop; 1149 } 1150 if (statack & FXP_SCB_STATACK_RNR) { 1151 fxp_scb_wait(sc); 1152 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 1153 vtophys(sc->rfa_headm->m_ext.ext_buf) + 1154 RFA_ALIGNMENT_FUDGE); 1155 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, 1156 FXP_SCB_COMMAND_RU_START); 1157 } 1158 } 1159 } 1160 #if defined(__NetBSD__) 1161 return (claimed); 1162 #endif 1163 } 1164 1165 /* 1166 * Update packet in/out/collision statistics. The i82557 doesn't 1167 * allow you to access these counters without doing a fairly 1168 * expensive DMA to get _all_ of the statistics it maintains, so 1169 * we do this operation here only once per second. The statistics 1170 * counters in the kernel are updated from the previous dump-stats 1171 * DMA and then a new dump-stats DMA is started. The on-chip 1172 * counters are zeroed when the DMA completes. If we can't start 1173 * the DMA immediately, we don't wait - we just prepare to read 1174 * them again next time. 1175 */ 1176 static void 1177 fxp_stats_update(arg) 1178 void *arg; 1179 { 1180 struct fxp_softc *sc = arg; 1181 struct ifnet *ifp = &sc->sc_if; 1182 struct fxp_stats *sp = sc->fxp_stats; 1183 struct fxp_cb_tx *txp; 1184 int s; 1185 1186 ifp->if_opackets += sp->tx_good; 1187 ifp->if_collisions += sp->tx_total_collisions; 1188 if (sp->rx_good) { 1189 ifp->if_ipackets += sp->rx_good; 1190 sc->rx_idle_secs = 0; 1191 } else { 1192 /* 1193 * Receiver's been idle for another second. 1194 */ 1195 sc->rx_idle_secs++; 1196 } 1197 ifp->if_ierrors += 1198 sp->rx_crc_errors + 1199 sp->rx_alignment_errors + 1200 sp->rx_rnr_errors + 1201 sp->rx_overrun_errors; 1202 /* 1203 * If any transmit underruns occured, bump up the transmit 1204 * threshold by another 512 bytes (64 * 8). 1205 */ 1206 if (sp->tx_underruns) { 1207 ifp->if_oerrors += sp->tx_underruns; 1208 if (tx_threshold < 192) 1209 tx_threshold += 64; 1210 } 1211 s = splimp(); 1212 /* 1213 * Release any xmit buffers that have completed DMA. This isn't 1214 * strictly necessary to do here, but it's advantagous for mbufs 1215 * with external storage to be released in a timely manner rather 1216 * than being defered for a potentially long time. This limits 1217 * the delay to a maximum of one second. 1218 */ 1219 for (txp = sc->cbl_first; sc->tx_queued && 1220 (txp->cb_status & FXP_CB_STATUS_C) != 0; 1221 txp = txp->next) { 1222 if (txp->mb_head != NULL) { 1223 m_freem(txp->mb_head); 1224 txp->mb_head = NULL; 1225 } 1226 sc->tx_queued--; 1227 } 1228 sc->cbl_first = txp; 1229 /* 1230 * If we haven't received any packets in FXP_MAC_RX_IDLE seconds, 1231 * then assume the receiver has locked up and attempt to clear 1232 * the condition by reprogramming the multicast filter. This is 1233 * a work-around for a bug in the 82557 where the receiver locks 1234 * up if it gets certain types of garbage in the syncronization 1235 * bits prior to the packet header. This bug is supposed to only 1236 * occur in 10Mbps mode, but has been seen to occur in 100Mbps 1237 * mode as well (perhaps due to a 10/100 speed transition). 1238 */ 1239 if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) { 1240 sc->rx_idle_secs = 0; 1241 fxp_mc_setup(sc); 1242 } 1243 /* 1244 * If there is no pending command, start another stats 1245 * dump. Otherwise punt for now. 1246 */ 1247 if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) { 1248 /* 1249 * Start another stats dump. 1250 */ 1251 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, 1252 FXP_SCB_COMMAND_CU_DUMPRESET); 1253 } else { 1254 /* 1255 * A previous command is still waiting to be accepted. 1256 * Just zero our copy of the stats and wait for the 1257 * next timer event to update them. 1258 */ 1259 sp->tx_good = 0; 1260 sp->tx_underruns = 0; 1261 sp->tx_total_collisions = 0; 1262 1263 sp->rx_good = 0; 1264 sp->rx_crc_errors = 0; 1265 sp->rx_alignment_errors = 0; 1266 sp->rx_rnr_errors = 0; 1267 sp->rx_overrun_errors = 0; 1268 } 1269 splx(s); 1270 /* 1271 * Schedule another timeout one second from now. 1272 */ 1273 sc->stat_ch = timeout(fxp_stats_update, sc, hz); 1274 } 1275 1276 /* 1277 * Stop the interface. Cancels the statistics updater and resets 1278 * the interface. 1279 */ 1280 static void 1281 fxp_stop(sc) 1282 struct fxp_softc *sc; 1283 { 1284 struct ifnet *ifp = &sc->sc_if; 1285 struct fxp_cb_tx *txp; 1286 int i; 1287 1288 /* 1289 * Cancel stats updater. 1290 */ 1291 untimeout(fxp_stats_update, sc, sc->stat_ch); 1292 1293 /* 1294 * Issue software reset 1295 */ 1296 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); 1297 DELAY(10); 1298 1299 /* 1300 * Release any xmit buffers. 1301 */ 1302 txp = sc->cbl_base; 1303 if (txp != NULL) { 1304 for (i = 0; i < FXP_NTXCB; i++) { 1305 if (txp[i].mb_head != NULL) { 1306 m_freem(txp[i].mb_head); 1307 txp[i].mb_head = NULL; 1308 } 1309 } 1310 } 1311 sc->tx_queued = 0; 1312 1313 /* 1314 * Free all the receive buffers then reallocate/reinitialize 1315 */ 1316 if (sc->rfa_headm != NULL) 1317 m_freem(sc->rfa_headm); 1318 sc->rfa_headm = NULL; 1319 sc->rfa_tailm = NULL; 1320 for (i = 0; i < FXP_NRFABUFS; i++) { 1321 if (fxp_add_rfabuf(sc, NULL) != 0) { 1322 /* 1323 * This "can't happen" - we're at splimp() 1324 * and we just freed all the buffers we need 1325 * above. 1326 */ 1327 panic("fxp_stop: no buffers!"); 1328 } 1329 } 1330 1331 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1332 ifp->if_timer = 0; 1333 } 1334 1335 /* 1336 * Watchdog/transmission transmit timeout handler. Called when a 1337 * transmission is started on the interface, but no interrupt is 1338 * received before the timeout. This usually indicates that the 1339 * card has wedged for some reason. 1340 */ 1341 static void 1342 fxp_watchdog(ifp) 1343 struct ifnet *ifp; 1344 { 1345 struct fxp_softc *sc = ifp->if_softc; 1346 1347 printf(FXP_FORMAT ": device timeout\n", FXP_ARGS(sc)); 1348 ifp->if_oerrors++; 1349 1350 fxp_init(sc); 1351 } 1352 1353 static void 1354 fxp_init(xsc) 1355 void *xsc; 1356 { 1357 struct fxp_softc *sc = xsc; 1358 struct ifnet *ifp = &sc->sc_if; 1359 struct fxp_cb_config *cbp; 1360 struct fxp_cb_ias *cb_ias; 1361 struct fxp_cb_tx *txp; 1362 int i, s, prm; 1363 1364 s = splimp(); 1365 /* 1366 * Cancel any pending I/O 1367 */ 1368 fxp_stop(sc); 1369 1370 prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0; 1371 1372 /* 1373 * Initialize base of CBL and RFA memory. Loading with zero 1374 * sets it up for regular linear addressing. 1375 */ 1376 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0); 1377 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_BASE); 1378 1379 fxp_scb_wait(sc); 1380 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_RU_BASE); 1381 1382 /* 1383 * Initialize base of dump-stats buffer. 1384 */ 1385 fxp_scb_wait(sc); 1386 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(sc->fxp_stats)); 1387 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_DUMP_ADR); 1388 1389 /* 1390 * We temporarily use memory that contains the TxCB list to 1391 * construct the config CB. The TxCB list memory is rebuilt 1392 * later. 1393 */ 1394 cbp = (struct fxp_cb_config *) sc->cbl_base; 1395 1396 /* 1397 * This bcopy is kind of disgusting, but there are a bunch of must be 1398 * zero and must be one bits in this structure and this is the easiest 1399 * way to initialize them all to proper values. 1400 */ 1401 bcopy(fxp_cb_config_template, (volatile void *)&cbp->cb_status, 1402 sizeof(fxp_cb_config_template)); 1403 1404 cbp->cb_status = 0; 1405 cbp->cb_command = FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL; 1406 cbp->link_addr = -1; /* (no) next command */ 1407 cbp->byte_count = 22; /* (22) bytes to config */ 1408 cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */ 1409 cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */ 1410 cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */ 1411 cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */ 1412 cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */ 1413 cbp->dma_bce = 0; /* (disable) dma max counters */ 1414 cbp->late_scb = 0; /* (don't) defer SCB update */ 1415 cbp->tno_int = 0; /* (disable) tx not okay interrupt */ 1416 cbp->ci_int = 1; /* interrupt on CU idle */ 1417 cbp->save_bf = prm; /* save bad frames */ 1418 cbp->disc_short_rx = !prm; /* discard short packets */ 1419 cbp->underrun_retry = 1; /* retry mode (1) on DMA underrun */ 1420 cbp->mediatype = !sc->phy_10Mbps_only; /* interface mode */ 1421 cbp->nsai = 1; /* (don't) disable source addr insert */ 1422 cbp->preamble_length = 2; /* (7 byte) preamble */ 1423 cbp->loopback = 0; /* (don't) loopback */ 1424 cbp->linear_priority = 0; /* (normal CSMA/CD operation) */ 1425 cbp->linear_pri_mode = 0; /* (wait after xmit only) */ 1426 cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */ 1427 cbp->promiscuous = prm; /* promiscuous mode */ 1428 cbp->bcast_disable = 0; /* (don't) disable broadcasts */ 1429 cbp->crscdt = 0; /* (CRS only) */ 1430 cbp->stripping = !prm; /* truncate rx packet to byte count */ 1431 cbp->padding = 1; /* (do) pad short tx packets */ 1432 cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */ 1433 cbp->force_fdx = 0; /* (don't) force full duplex */ 1434 cbp->fdx_pin_en = 1; /* (enable) FDX# pin */ 1435 cbp->multi_ia = 0; /* (don't) accept multiple IAs */ 1436 cbp->mc_all = sc->all_mcasts;/* accept all multicasts */ 1437 1438 /* 1439 * Start the config command/DMA. 1440 */ 1441 fxp_scb_wait(sc); 1442 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(&cbp->cb_status)); 1443 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); 1444 /* ...and wait for it to complete. */ 1445 while (!(cbp->cb_status & FXP_CB_STATUS_C)); 1446 1447 /* 1448 * Now initialize the station address. Temporarily use the TxCB 1449 * memory area like we did above for the config CB. 1450 */ 1451 cb_ias = (struct fxp_cb_ias *) sc->cbl_base; 1452 cb_ias->cb_status = 0; 1453 cb_ias->cb_command = FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL; 1454 cb_ias->link_addr = -1; 1455 #if defined(__NetBSD__) 1456 bcopy(LLADDR(ifp->if_sadl), (void *)cb_ias->macaddr, 6); 1457 #else 1458 bcopy(sc->arpcom.ac_enaddr, (volatile void *)cb_ias->macaddr, 1459 sizeof(sc->arpcom.ac_enaddr)); 1460 #endif /* __NetBSD__ */ 1461 1462 /* 1463 * Start the IAS (Individual Address Setup) command/DMA. 1464 */ 1465 fxp_scb_wait(sc); 1466 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); 1467 /* ...and wait for it to complete. */ 1468 while (!(cb_ias->cb_status & FXP_CB_STATUS_C)); 1469 1470 /* 1471 * Initialize transmit control block (TxCB) list. 1472 */ 1473 1474 txp = sc->cbl_base; 1475 bzero(txp, sizeof(struct fxp_cb_tx) * FXP_NTXCB); 1476 for (i = 0; i < FXP_NTXCB; i++) { 1477 txp[i].cb_status = FXP_CB_STATUS_C | FXP_CB_STATUS_OK; 1478 txp[i].cb_command = FXP_CB_COMMAND_NOP; 1479 txp[i].link_addr = vtophys(&txp[(i + 1) & FXP_TXCB_MASK].cb_status); 1480 txp[i].tbd_array_addr = vtophys(&txp[i].tbd[0]); 1481 txp[i].next = &txp[(i + 1) & FXP_TXCB_MASK]; 1482 } 1483 /* 1484 * Set the suspend flag on the first TxCB and start the control 1485 * unit. It will execute the NOP and then suspend. 1486 */ 1487 txp->cb_command = FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S; 1488 sc->cbl_first = sc->cbl_last = txp; 1489 sc->tx_queued = 1; 1490 1491 fxp_scb_wait(sc); 1492 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); 1493 1494 /* 1495 * Initialize receiver buffer area - RFA. 1496 */ 1497 fxp_scb_wait(sc); 1498 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 1499 vtophys(sc->rfa_headm->m_ext.ext_buf) + RFA_ALIGNMENT_FUDGE); 1500 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_RU_START); 1501 1502 /* 1503 * Set current media. 1504 */ 1505 fxp_set_media(sc, sc->sc_media.ifm_cur->ifm_media); 1506 1507 ifp->if_flags |= IFF_RUNNING; 1508 ifp->if_flags &= ~IFF_OACTIVE; 1509 splx(s); 1510 1511 /* 1512 * Start stats updater. 1513 */ 1514 sc->stat_ch = timeout(fxp_stats_update, sc, hz); 1515 } 1516 1517 static void 1518 fxp_set_media(sc, media) 1519 struct fxp_softc *sc; 1520 int media; 1521 { 1522 1523 switch (sc->phy_primary_device) { 1524 case FXP_PHY_DP83840: 1525 case FXP_PHY_DP83840A: 1526 fxp_mdi_write(sc, sc->phy_primary_addr, FXP_DP83840_PCR, 1527 fxp_mdi_read(sc, sc->phy_primary_addr, FXP_DP83840_PCR) | 1528 FXP_DP83840_PCR_LED4_MODE | /* LED4 always indicates duplex */ 1529 FXP_DP83840_PCR_F_CONNECT | /* force link disconnect bypass */ 1530 FXP_DP83840_PCR_BIT10); /* XXX I have no idea */ 1531 /* fall through */ 1532 case FXP_PHY_82553A: 1533 case FXP_PHY_82553C: /* untested */ 1534 case FXP_PHY_82555: 1535 case FXP_PHY_82555B: 1536 if (IFM_SUBTYPE(media) != IFM_AUTO) { 1537 int flags; 1538 1539 flags = (IFM_SUBTYPE(media) == IFM_100_TX) ? 1540 FXP_PHY_BMCR_SPEED_100M : 0; 1541 flags |= (media & IFM_FDX) ? 1542 FXP_PHY_BMCR_FULLDUPLEX : 0; 1543 fxp_mdi_write(sc, sc->phy_primary_addr, 1544 FXP_PHY_BMCR, 1545 (fxp_mdi_read(sc, sc->phy_primary_addr, 1546 FXP_PHY_BMCR) & 1547 ~(FXP_PHY_BMCR_AUTOEN | FXP_PHY_BMCR_SPEED_100M | 1548 FXP_PHY_BMCR_FULLDUPLEX)) | flags); 1549 } else { 1550 fxp_mdi_write(sc, sc->phy_primary_addr, 1551 FXP_PHY_BMCR, 1552 (fxp_mdi_read(sc, sc->phy_primary_addr, 1553 FXP_PHY_BMCR) | FXP_PHY_BMCR_AUTOEN)); 1554 } 1555 break; 1556 /* 1557 * The Seeq 80c24 doesn't have a PHY programming interface, so do 1558 * nothing. 1559 */ 1560 case FXP_PHY_80C24: 1561 break; 1562 default: 1563 printf(FXP_FORMAT 1564 ": warning: unsupported PHY, type = %d, addr = %d\n", 1565 FXP_ARGS(sc), sc->phy_primary_device, 1566 sc->phy_primary_addr); 1567 } 1568 } 1569 1570 /* 1571 * Change media according to request. 1572 */ 1573 int 1574 fxp_mediachange(ifp) 1575 struct ifnet *ifp; 1576 { 1577 struct fxp_softc *sc = ifp->if_softc; 1578 struct ifmedia *ifm = &sc->sc_media; 1579 1580 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1581 return (EINVAL); 1582 1583 fxp_set_media(sc, ifm->ifm_media); 1584 return (0); 1585 } 1586 1587 /* 1588 * Notify the world which media we're using. 1589 */ 1590 void 1591 fxp_mediastatus(ifp, ifmr) 1592 struct ifnet *ifp; 1593 struct ifmediareq *ifmr; 1594 { 1595 struct fxp_softc *sc = ifp->if_softc; 1596 int flags, stsflags; 1597 1598 switch (sc->phy_primary_device) { 1599 case FXP_PHY_82555: 1600 case FXP_PHY_82555B: 1601 case FXP_PHY_DP83840: 1602 case FXP_PHY_DP83840A: 1603 ifmr->ifm_status = IFM_AVALID; /* IFM_ACTIVE will be valid */ 1604 ifmr->ifm_active = IFM_ETHER; 1605 /* 1606 * the following is not an error. 1607 * You need to read this register twice to get current 1608 * status. This is correct documented behaviour, the 1609 * first read gets latched values. 1610 */ 1611 stsflags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_STS); 1612 stsflags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_STS); 1613 if (stsflags & FXP_PHY_STS_LINK_STS) 1614 ifmr->ifm_status |= IFM_ACTIVE; 1615 1616 /* 1617 * If we are in auto mode, then try report the result. 1618 */ 1619 flags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_BMCR); 1620 if (flags & FXP_PHY_BMCR_AUTOEN) { 1621 ifmr->ifm_active |= IFM_AUTO; /* XXX presently 0 */ 1622 if (stsflags & FXP_PHY_STS_AUTO_DONE) { 1623 /* 1624 * Intel and National parts report 1625 * differently on what they found. 1626 */ 1627 if ((sc->phy_primary_device == FXP_PHY_82555) 1628 || (sc->phy_primary_device == FXP_PHY_82555B)) { 1629 flags = fxp_mdi_read(sc, 1630 sc->phy_primary_addr, 1631 FXP_PHY_USC); 1632 1633 if (flags & FXP_PHY_USC_SPEED) 1634 ifmr->ifm_active |= IFM_100_TX; 1635 else 1636 ifmr->ifm_active |= IFM_10_T; 1637 1638 if (flags & FXP_PHY_USC_DUPLEX) 1639 ifmr->ifm_active |= IFM_FDX; 1640 } else { /* it's National. only know speed */ 1641 flags = fxp_mdi_read(sc, 1642 sc->phy_primary_addr, 1643 FXP_DP83840_PAR); 1644 1645 if (flags & FXP_DP83840_PAR_SPEED_10) 1646 ifmr->ifm_active |= IFM_10_T; 1647 else 1648 ifmr->ifm_active |= IFM_100_TX; 1649 } 1650 } 1651 } else { /* in manual mode.. just report what we were set to */ 1652 if (flags & FXP_PHY_BMCR_SPEED_100M) 1653 ifmr->ifm_active |= IFM_100_TX; 1654 else 1655 ifmr->ifm_active |= IFM_10_T; 1656 1657 if (flags & FXP_PHY_BMCR_FULLDUPLEX) 1658 ifmr->ifm_active |= IFM_FDX; 1659 } 1660 break; 1661 1662 case FXP_PHY_80C24: 1663 default: 1664 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; /* XXX IFM_AUTO ? */ 1665 } 1666 } 1667 1668 /* 1669 * Add a buffer to the end of the RFA buffer list. 1670 * Return 0 if successful, 1 for failure. A failure results in 1671 * adding the 'oldm' (if non-NULL) on to the end of the list - 1672 * tossing out its old contents and recycling it. 1673 * The RFA struct is stuck at the beginning of mbuf cluster and the 1674 * data pointer is fixed up to point just past it. 1675 */ 1676 static int 1677 fxp_add_rfabuf(sc, oldm) 1678 struct fxp_softc *sc; 1679 struct mbuf *oldm; 1680 { 1681 u_int32_t v; 1682 struct mbuf *m; 1683 struct fxp_rfa *rfa, *p_rfa; 1684 1685 MGETHDR(m, M_DONTWAIT, MT_DATA); 1686 if (m != NULL) { 1687 MCLGET(m, M_DONTWAIT); 1688 if ((m->m_flags & M_EXT) == 0) { 1689 m_freem(m); 1690 if (oldm == NULL) 1691 return 1; 1692 m = oldm; 1693 m->m_data = m->m_ext.ext_buf; 1694 } 1695 } else { 1696 if (oldm == NULL) 1697 return 1; 1698 m = oldm; 1699 m->m_data = m->m_ext.ext_buf; 1700 } 1701 1702 /* 1703 * Move the data pointer up so that the incoming data packet 1704 * will be 32-bit aligned. 1705 */ 1706 m->m_data += RFA_ALIGNMENT_FUDGE; 1707 1708 /* 1709 * Get a pointer to the base of the mbuf cluster and move 1710 * data start past it. 1711 */ 1712 rfa = mtod(m, struct fxp_rfa *); 1713 m->m_data += sizeof(struct fxp_rfa); 1714 rfa->size = MCLBYTES - sizeof(struct fxp_rfa) - RFA_ALIGNMENT_FUDGE; 1715 1716 /* 1717 * Initialize the rest of the RFA. Note that since the RFA 1718 * is misaligned, we cannot store values directly. Instead, 1719 * we use an optimized, inline copy. 1720 */ 1721 rfa->rfa_status = 0; 1722 rfa->rfa_control = FXP_RFA_CONTROL_EL; 1723 rfa->actual_size = 0; 1724 1725 v = -1; 1726 fxp_lwcopy(&v, &rfa->link_addr); 1727 fxp_lwcopy(&v, &rfa->rbd_addr); 1728 1729 /* 1730 * If there are other buffers already on the list, attach this 1731 * one to the end by fixing up the tail to point to this one. 1732 */ 1733 if (sc->rfa_headm != NULL) { 1734 p_rfa = (struct fxp_rfa *) (sc->rfa_tailm->m_ext.ext_buf + 1735 RFA_ALIGNMENT_FUDGE); 1736 sc->rfa_tailm->m_next = m; 1737 v = vtophys(rfa); 1738 fxp_lwcopy(&v, &p_rfa->link_addr); 1739 p_rfa->rfa_control &= ~FXP_RFA_CONTROL_EL; 1740 } else { 1741 sc->rfa_headm = m; 1742 } 1743 sc->rfa_tailm = m; 1744 1745 return (m == oldm); 1746 } 1747 1748 static volatile int 1749 fxp_mdi_read(sc, phy, reg) 1750 struct fxp_softc *sc; 1751 int phy; 1752 int reg; 1753 { 1754 int count = 10000; 1755 int value; 1756 1757 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, 1758 (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21)); 1759 1760 while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0 1761 && count--) 1762 DELAY(10); 1763 1764 if (count <= 0) 1765 printf(FXP_FORMAT ": fxp_mdi_read: timed out\n", 1766 FXP_ARGS(sc)); 1767 1768 return (value & 0xffff); 1769 } 1770 1771 static void 1772 fxp_mdi_write(sc, phy, reg, value) 1773 struct fxp_softc *sc; 1774 int phy; 1775 int reg; 1776 int value; 1777 { 1778 int count = 10000; 1779 1780 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, 1781 (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) | 1782 (value & 0xffff)); 1783 1784 while((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 && 1785 count--) 1786 DELAY(10); 1787 1788 if (count <= 0) 1789 printf(FXP_FORMAT ": fxp_mdi_write: timed out\n", 1790 FXP_ARGS(sc)); 1791 } 1792 1793 static int 1794 fxp_ioctl(ifp, command, data) 1795 struct ifnet *ifp; 1796 FXP_IOCTLCMD_TYPE command; 1797 caddr_t data; 1798 { 1799 struct fxp_softc *sc = ifp->if_softc; 1800 struct ifreq *ifr = (struct ifreq *)data; 1801 int s, error = 0; 1802 1803 s = splimp(); 1804 1805 switch (command) { 1806 1807 case SIOCSIFADDR: 1808 #if !defined(__NetBSD__) 1809 case SIOCGIFADDR: 1810 case SIOCSIFMTU: 1811 #endif 1812 error = ether_ioctl(ifp, command, data); 1813 break; 1814 1815 case SIOCSIFFLAGS: 1816 sc->all_mcasts = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0; 1817 1818 /* 1819 * If interface is marked up and not running, then start it. 1820 * If it is marked down and running, stop it. 1821 * XXX If it's up then re-initialize it. This is so flags 1822 * such as IFF_PROMISC are handled. 1823 */ 1824 if (ifp->if_flags & IFF_UP) { 1825 fxp_init(sc); 1826 } else { 1827 if (ifp->if_flags & IFF_RUNNING) 1828 fxp_stop(sc); 1829 } 1830 break; 1831 1832 case SIOCADDMULTI: 1833 case SIOCDELMULTI: 1834 sc->all_mcasts = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0; 1835 #if defined(__NetBSD__) 1836 error = (command == SIOCADDMULTI) ? 1837 ether_addmulti(ifr, &sc->sc_ethercom) : 1838 ether_delmulti(ifr, &sc->sc_ethercom); 1839 1840 if (error == ENETRESET) { 1841 /* 1842 * Multicast list has changed; set the hardware 1843 * filter accordingly. 1844 */ 1845 if (!sc->all_mcasts) 1846 fxp_mc_setup(sc); 1847 /* 1848 * fxp_mc_setup() can turn on all_mcasts if we run 1849 * out of space, so check it again rather than else {}. 1850 */ 1851 if (sc->all_mcasts) 1852 fxp_init(sc); 1853 error = 0; 1854 } 1855 #else /* __FreeBSD__ */ 1856 /* 1857 * Multicast list has changed; set the hardware filter 1858 * accordingly. 1859 */ 1860 if (!sc->all_mcasts) 1861 fxp_mc_setup(sc); 1862 /* 1863 * fxp_mc_setup() can turn on sc->all_mcasts, so check it 1864 * again rather than else {}. 1865 */ 1866 if (sc->all_mcasts) 1867 fxp_init(sc); 1868 error = 0; 1869 #endif /* __NetBSD__ */ 1870 break; 1871 1872 case SIOCSIFMEDIA: 1873 case SIOCGIFMEDIA: 1874 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command); 1875 break; 1876 1877 default: 1878 error = EINVAL; 1879 } 1880 (void) splx(s); 1881 return (error); 1882 } 1883 1884 /* 1885 * Program the multicast filter. 1886 * 1887 * We have an artificial restriction that the multicast setup command 1888 * must be the first command in the chain, so we take steps to ensure 1889 * this. By requiring this, it allows us to keep up the performance of 1890 * the pre-initialized command ring (esp. link pointers) by not actually 1891 * inserting the mcsetup command in the ring - i.e. its link pointer 1892 * points to the TxCB ring, but the mcsetup descriptor itself is not part 1893 * of it. We then can do 'CU_START' on the mcsetup descriptor and have it 1894 * lead into the regular TxCB ring when it completes. 1895 * 1896 * This function must be called at splimp. 1897 */ 1898 static void 1899 fxp_mc_setup(sc) 1900 struct fxp_softc *sc; 1901 { 1902 struct fxp_cb_mcs *mcsp = sc->mcsp; 1903 struct ifnet *ifp = &sc->sc_if; 1904 struct ifmultiaddr *ifma; 1905 int nmcasts; 1906 1907 /* 1908 * If there are queued commands, we must wait until they are all 1909 * completed. If we are already waiting, then add a NOP command 1910 * with interrupt option so that we're notified when all commands 1911 * have been completed - fxp_start() ensures that no additional 1912 * TX commands will be added when need_mcsetup is true. 1913 */ 1914 if (sc->tx_queued) { 1915 struct fxp_cb_tx *txp; 1916 1917 /* 1918 * need_mcsetup will be true if we are already waiting for the 1919 * NOP command to be completed (see below). In this case, bail. 1920 */ 1921 if (sc->need_mcsetup) 1922 return; 1923 sc->need_mcsetup = 1; 1924 1925 /* 1926 * Add a NOP command with interrupt so that we are notified when all 1927 * TX commands have been processed. 1928 */ 1929 txp = sc->cbl_last->next; 1930 txp->mb_head = NULL; 1931 txp->cb_status = 0; 1932 txp->cb_command = FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I; 1933 /* 1934 * Advance the end of list forward. 1935 */ 1936 sc->cbl_last->cb_command &= ~FXP_CB_COMMAND_S; 1937 sc->cbl_last = txp; 1938 sc->tx_queued++; 1939 /* 1940 * Issue a resume in case the CU has just suspended. 1941 */ 1942 fxp_scb_wait(sc); 1943 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_RESUME); 1944 /* 1945 * Set a 5 second timer just in case we don't hear from the 1946 * card again. 1947 */ 1948 ifp->if_timer = 5; 1949 1950 return; 1951 } 1952 sc->need_mcsetup = 0; 1953 1954 /* 1955 * Initialize multicast setup descriptor. 1956 */ 1957 mcsp->next = sc->cbl_base; 1958 mcsp->mb_head = NULL; 1959 mcsp->cb_status = 0; 1960 mcsp->cb_command = FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I; 1961 mcsp->link_addr = vtophys(&sc->cbl_base->cb_status); 1962 1963 nmcasts = 0; 1964 if (!sc->all_mcasts) { 1965 for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; 1966 ifma = ifma->ifma_link.le_next) { 1967 if (ifma->ifma_addr->sa_family != AF_LINK) 1968 continue; 1969 if (nmcasts >= MAXMCADDR) { 1970 sc->all_mcasts = 1; 1971 nmcasts = 0; 1972 break; 1973 } 1974 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1975 (volatile void *) &sc->mcsp->mc_addr[nmcasts][0], 6); 1976 nmcasts++; 1977 } 1978 } 1979 mcsp->mc_cnt = nmcasts * 6; 1980 sc->cbl_first = sc->cbl_last = (struct fxp_cb_tx *) mcsp; 1981 sc->tx_queued = 1; 1982 1983 /* 1984 * Wait until command unit is not active. This should never 1985 * be the case when nothing is queued, but make sure anyway. 1986 */ 1987 while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) == 1988 FXP_SCB_CUS_ACTIVE) ; 1989 1990 /* 1991 * Start the multicast setup command. 1992 */ 1993 fxp_scb_wait(sc); 1994 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(&mcsp->cb_status)); 1995 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); 1996 1997 ifp->if_timer = 2; 1998 return; 1999 } 2000