1 /* 2 * Copyright (c) 1995, David Greenman 3 * All rights reserved. 4 * 5 * Modifications to support NetBSD and media selection: 6 * Copyright (c) 1997 Jason R. Thorpe. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * $FreeBSD$ 31 */ 32 33 /* 34 * Intel EtherExpress Pro/100B PCI Fast Ethernet driver 35 */ 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/mbuf.h> 40 #include <sys/malloc.h> 41 #include <sys/kernel.h> 42 #include <sys/socket.h> 43 44 #include <net/if.h> 45 #include <net/if_dl.h> 46 #include <net/if_media.h> 47 48 #ifdef NS 49 #include <netns/ns.h> 50 #include <netns/ns_if.h> 51 #endif 52 53 #include <net/bpf.h> 54 55 #if defined(__NetBSD__) 56 57 #include <sys/ioctl.h> 58 #include <sys/errno.h> 59 #include <sys/device.h> 60 61 #include <net/if_dl.h> 62 #include <net/if_ether.h> 63 64 #include <netinet/if_inarp.h> 65 66 #include <vm/vm.h> 67 68 #include <machine/cpu.h> 69 #include <machine/bus.h> 70 #include <machine/intr.h> 71 72 #include <dev/pci/if_fxpreg.h> 73 #include <dev/pci/if_fxpvar.h> 74 75 #include <dev/pci/pcivar.h> 76 #include <dev/pci/pcireg.h> 77 #include <dev/pci/pcidevs.h> 78 79 80 #else /* __FreeBSD__ */ 81 82 #include <sys/sockio.h> 83 #include <sys/bus.h> 84 #include <machine/bus.h> 85 #include <sys/rman.h> 86 #include <machine/resource.h> 87 88 #include <net/ethernet.h> 89 #include <net/if_arp.h> 90 91 #include <vm/vm.h> /* for vtophys */ 92 #include <vm/pmap.h> /* for vtophys */ 93 #include <machine/clock.h> /* for DELAY */ 94 95 #include <pci/pcivar.h> 96 #include <pci/pcireg.h> /* for PCIM_CMD_xxx */ 97 #include <pci/if_fxpreg.h> 98 #include <pci/if_fxpvar.h> 99 100 #endif /* __NetBSD__ */ 101 102 #ifdef __alpha__ /* XXX */ 103 /* XXX XXX NEED REAL DMA MAPPING SUPPORT XXX XXX */ 104 #undef vtophys 105 #define vtophys(va) alpha_XXX_dmamap((vm_offset_t)(va)) 106 #endif /* __alpha__ */ 107 108 109 #include "opt_bdg.h" 110 #ifdef BRIDGE 111 #include <net/if_types.h> 112 #include <net/bridge.h> 113 #endif 114 115 /* 116 * NOTE! On the Alpha, we have an alignment constraint. The 117 * card DMAs the packet immediately following the RFA. However, 118 * the first thing in the packet is a 14-byte Ethernet header. 119 * This means that the packet is misaligned. To compensate, 120 * we actually offset the RFA 2 bytes into the cluster. This 121 * alignes the packet after the Ethernet header at a 32-bit 122 * boundary. HOWEVER! This means that the RFA is misaligned! 123 */ 124 #define RFA_ALIGNMENT_FUDGE 2 125 126 /* 127 * Inline function to copy a 16-bit aligned 32-bit quantity. 128 */ 129 static __inline void fxp_lwcopy __P((volatile u_int32_t *, 130 volatile u_int32_t *)); 131 static __inline void 132 fxp_lwcopy(src, dst) 133 volatile u_int32_t *src, *dst; 134 { 135 volatile u_int16_t *a = (volatile u_int16_t *)src; 136 volatile u_int16_t *b = (volatile u_int16_t *)dst; 137 138 b[0] = a[0]; 139 b[1] = a[1]; 140 } 141 142 /* 143 * Template for default configuration parameters. 144 * See struct fxp_cb_config for the bit definitions. 145 */ 146 static u_char fxp_cb_config_template[] = { 147 0x0, 0x0, /* cb_status */ 148 0x80, 0x2, /* cb_command */ 149 0xff, 0xff, 0xff, 0xff, /* link_addr */ 150 0x16, /* 0 */ 151 0x8, /* 1 */ 152 0x0, /* 2 */ 153 0x0, /* 3 */ 154 0x0, /* 4 */ 155 0x80, /* 5 */ 156 0xb2, /* 6 */ 157 0x3, /* 7 */ 158 0x1, /* 8 */ 159 0x0, /* 9 */ 160 0x26, /* 10 */ 161 0x0, /* 11 */ 162 0x60, /* 12 */ 163 0x0, /* 13 */ 164 0xf2, /* 14 */ 165 0x48, /* 15 */ 166 0x0, /* 16 */ 167 0x40, /* 17 */ 168 0xf3, /* 18 */ 169 0x0, /* 19 */ 170 0x3f, /* 20 */ 171 0x5 /* 21 */ 172 }; 173 174 /* Supported media types. */ 175 struct fxp_supported_media { 176 const int fsm_phy; /* PHY type */ 177 const int *fsm_media; /* the media array */ 178 const int fsm_nmedia; /* the number of supported media */ 179 const int fsm_defmedia; /* default media for this PHY */ 180 }; 181 182 static const int fxp_media_standard[] = { 183 IFM_ETHER|IFM_10_T, 184 IFM_ETHER|IFM_10_T|IFM_FDX, 185 IFM_ETHER|IFM_100_TX, 186 IFM_ETHER|IFM_100_TX|IFM_FDX, 187 IFM_ETHER|IFM_AUTO, 188 }; 189 #define FXP_MEDIA_STANDARD_DEFMEDIA (IFM_ETHER|IFM_AUTO) 190 191 static const int fxp_media_default[] = { 192 IFM_ETHER|IFM_MANUAL, /* XXX IFM_AUTO ? */ 193 }; 194 #define FXP_MEDIA_DEFAULT_DEFMEDIA (IFM_ETHER|IFM_MANUAL) 195 196 static const struct fxp_supported_media fxp_media[] = { 197 { FXP_PHY_DP83840, fxp_media_standard, 198 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 199 FXP_MEDIA_STANDARD_DEFMEDIA }, 200 { FXP_PHY_DP83840A, fxp_media_standard, 201 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 202 FXP_MEDIA_STANDARD_DEFMEDIA }, 203 { FXP_PHY_82553A, fxp_media_standard, 204 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 205 FXP_MEDIA_STANDARD_DEFMEDIA }, 206 { FXP_PHY_82553C, fxp_media_standard, 207 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 208 FXP_MEDIA_STANDARD_DEFMEDIA }, 209 { FXP_PHY_82555, fxp_media_standard, 210 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 211 FXP_MEDIA_STANDARD_DEFMEDIA }, 212 { FXP_PHY_82555B, fxp_media_standard, 213 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 214 FXP_MEDIA_STANDARD_DEFMEDIA }, 215 { FXP_PHY_80C24, fxp_media_default, 216 sizeof(fxp_media_default) / sizeof(fxp_media_default[0]), 217 FXP_MEDIA_DEFAULT_DEFMEDIA }, 218 }; 219 #define NFXPMEDIA (sizeof(fxp_media) / sizeof(fxp_media[0])) 220 221 static int fxp_mediachange __P((struct ifnet *)); 222 static void fxp_mediastatus __P((struct ifnet *, struct ifmediareq *)); 223 static void fxp_set_media __P((struct fxp_softc *, int)); 224 static __inline void fxp_scb_wait __P((struct fxp_softc *)); 225 static FXP_INTR_TYPE fxp_intr __P((void *)); 226 static void fxp_start __P((struct ifnet *)); 227 static int fxp_ioctl __P((struct ifnet *, 228 FXP_IOCTLCMD_TYPE, caddr_t)); 229 static void fxp_init __P((void *)); 230 static void fxp_stop __P((struct fxp_softc *)); 231 static void fxp_watchdog __P((struct ifnet *)); 232 static int fxp_add_rfabuf __P((struct fxp_softc *, struct mbuf *)); 233 static int fxp_mdi_read __P((struct fxp_softc *, int, int)); 234 static void fxp_mdi_write __P((struct fxp_softc *, int, int, int)); 235 static void fxp_autosize_eeprom __P((struct fxp_softc *)); 236 static void fxp_read_eeprom __P((struct fxp_softc *, u_int16_t *, 237 int, int)); 238 static int fxp_attach_common __P((struct fxp_softc *, u_int8_t *)); 239 static void fxp_stats_update __P((void *)); 240 static void fxp_mc_setup __P((struct fxp_softc *)); 241 242 /* 243 * Set initial transmit threshold at 64 (512 bytes). This is 244 * increased by 64 (512 bytes) at a time, to maximum of 192 245 * (1536 bytes), if an underrun occurs. 246 */ 247 static int tx_threshold = 64; 248 249 /* 250 * Number of transmit control blocks. This determines the number 251 * of transmit buffers that can be chained in the CB list. 252 * This must be a power of two. 253 */ 254 #define FXP_NTXCB 128 255 256 /* 257 * Number of completed TX commands at which point an interrupt 258 * will be generated to garbage collect the attached buffers. 259 * Must be at least one less than FXP_NTXCB, and should be 260 * enough less so that the transmitter doesn't becomes idle 261 * during the buffer rundown (which would reduce performance). 262 */ 263 #define FXP_CXINT_THRESH 120 264 265 /* 266 * TxCB list index mask. This is used to do list wrap-around. 267 */ 268 #define FXP_TXCB_MASK (FXP_NTXCB - 1) 269 270 /* 271 * Number of receive frame area buffers. These are large so chose 272 * wisely. 273 */ 274 #define FXP_NRFABUFS 64 275 276 /* 277 * Maximum number of seconds that the receiver can be idle before we 278 * assume it's dead and attempt to reset it by reprogramming the 279 * multicast filter. This is part of a work-around for a bug in the 280 * NIC. See fxp_stats_update(). 281 */ 282 #define FXP_MAX_RX_IDLE 15 283 284 /* 285 * Wait for the previous command to be accepted (but not necessarily 286 * completed). 287 */ 288 static __inline void 289 fxp_scb_wait(sc) 290 struct fxp_softc *sc; 291 { 292 int i = 10000; 293 294 while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i); 295 } 296 297 /************************************************************* 298 * Operating system-specific autoconfiguration glue 299 *************************************************************/ 300 301 #if defined(__NetBSD__) 302 303 #ifdef __BROKEN_INDIRECT_CONFIG 304 static int fxp_match __P((struct device *, void *, void *)); 305 #else 306 static int fxp_match __P((struct device *, struct cfdata *, void *)); 307 #endif 308 static void fxp_attach __P((struct device *, struct device *, void *)); 309 310 static void fxp_shutdown __P((void *)); 311 312 /* Compensate for lack of a generic ether_ioctl() */ 313 static int fxp_ether_ioctl __P((struct ifnet *, 314 FXP_IOCTLCMD_TYPE, caddr_t)); 315 #define ether_ioctl fxp_ether_ioctl 316 317 struct cfattach fxp_ca = { 318 sizeof(struct fxp_softc), fxp_match, fxp_attach 319 }; 320 321 struct cfdriver fxp_cd = { 322 NULL, "fxp", DV_IFNET 323 }; 324 325 /* 326 * Check if a device is an 82557. 327 */ 328 static int 329 fxp_match(parent, match, aux) 330 struct device *parent; 331 #ifdef __BROKEN_INDIRECT_CONFIG 332 void *match; 333 #else 334 struct cfdata *match; 335 #endif 336 void *aux; 337 { 338 struct pci_attach_args *pa = aux; 339 340 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL) 341 return (0); 342 343 switch (PCI_PRODUCT(pa->pa_id)) { 344 case PCI_PRODUCT_INTEL_82557: 345 return (1); 346 } 347 348 return (0); 349 } 350 351 static void 352 fxp_attach(parent, self, aux) 353 struct device *parent, *self; 354 void *aux; 355 { 356 struct fxp_softc *sc = (struct fxp_softc *)self; 357 struct pci_attach_args *pa = aux; 358 pci_chipset_tag_t pc = pa->pa_pc; 359 pci_intr_handle_t ih; 360 const char *intrstr = NULL; 361 u_int8_t enaddr[6]; 362 struct ifnet *ifp; 363 364 /* 365 * Map control/status registers. 366 */ 367 if (pci_mapreg_map(pa, FXP_PCI_MMBA, PCI_MAPREG_TYPE_MEM, 0, 368 &sc->sc_st, &sc->sc_sh, NULL, NULL)) { 369 printf(": can't map registers\n"); 370 return; 371 } 372 printf(": Intel EtherExpress Pro 10/100B Ethernet\n"); 373 374 /* 375 * Allocate our interrupt. 376 */ 377 if (pci_intr_map(pc, pa->pa_intrtag, pa->pa_intrpin, 378 pa->pa_intrline, &ih)) { 379 printf("%s: couldn't map interrupt\n", sc->sc_dev.dv_xname); 380 return; 381 } 382 intrstr = pci_intr_string(pc, ih); 383 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, fxp_intr, sc); 384 if (sc->sc_ih == NULL) { 385 printf("%s: couldn't establish interrupt", 386 sc->sc_dev.dv_xname); 387 if (intrstr != NULL) 388 printf(" at %s", intrstr); 389 printf("\n"); 390 return; 391 } 392 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr); 393 394 /* Do generic parts of attach. */ 395 if (fxp_attach_common(sc, enaddr)) { 396 /* Failed! */ 397 return; 398 } 399 400 printf("%s: Ethernet address %s%s\n", sc->sc_dev.dv_xname, 401 ether_sprintf(enaddr), sc->phy_10Mbps_only ? ", 10Mbps" : ""); 402 403 ifp = &sc->sc_ethercom.ec_if; 404 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 405 ifp->if_softc = sc; 406 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 407 ifp->if_ioctl = fxp_ioctl; 408 ifp->if_start = fxp_start; 409 ifp->if_watchdog = fxp_watchdog; 410 411 /* 412 * Attach the interface. 413 */ 414 if_attach(ifp); 415 /* 416 * Let the system queue as many packets as we have available 417 * TX descriptors. 418 */ 419 ifp->if_snd.ifq_maxlen = FXP_NTXCB - 1; 420 ether_ifattach(ifp, enaddr); 421 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB, 422 sizeof(struct ether_header)); 423 424 /* 425 * Add shutdown hook so that DMA is disabled prior to reboot. Not 426 * doing do could allow DMA to corrupt kernel memory during the 427 * reboot before the driver initializes. 428 */ 429 shutdownhook_establish(fxp_shutdown, sc); 430 } 431 432 /* 433 * Device shutdown routine. Called at system shutdown after sync. The 434 * main purpose of this routine is to shut off receiver DMA so that 435 * kernel memory doesn't get clobbered during warmboot. 436 */ 437 static void 438 fxp_shutdown(sc) 439 void *sc; 440 { 441 fxp_stop((struct fxp_softc *) sc); 442 } 443 444 static int 445 fxp_ether_ioctl(ifp, cmd, data) 446 struct ifnet *ifp; 447 FXP_IOCTLCMD_TYPE cmd; 448 caddr_t data; 449 { 450 struct ifaddr *ifa = (struct ifaddr *) data; 451 struct fxp_softc *sc = ifp->if_softc; 452 453 switch (cmd) { 454 case SIOCSIFADDR: 455 ifp->if_flags |= IFF_UP; 456 457 switch (ifa->ifa_addr->sa_family) { 458 #ifdef INET 459 case AF_INET: 460 fxp_init(sc); 461 arp_ifinit(ifp, ifa); 462 break; 463 #endif 464 #ifdef NS 465 case AF_NS: 466 { 467 register struct ns_addr *ina = &IA_SNS(ifa)->sns_addr; 468 469 if (ns_nullhost(*ina)) 470 ina->x_host = *(union ns_host *) 471 LLADDR(ifp->if_sadl); 472 else 473 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl), 474 ifp->if_addrlen); 475 /* Set new address. */ 476 fxp_init(sc); 477 break; 478 } 479 #endif 480 default: 481 fxp_init(sc); 482 break; 483 } 484 break; 485 486 default: 487 return (EINVAL); 488 } 489 490 return (0); 491 } 492 493 #else /* __FreeBSD__ */ 494 495 /* 496 * Return identification string if this is device is ours. 497 */ 498 static int 499 fxp_probe(device_t dev) 500 { 501 if ((pci_get_vendor(dev) == FXP_VENDORID_INTEL) && 502 (pci_get_device(dev) == FXP_DEVICEID_i82557)) { 503 device_set_desc(dev, "Intel EtherExpress Pro 10/100B Ethernet"); 504 return 0; 505 } 506 if ((pci_get_vendor(dev) == FXP_VENDORID_INTEL) && 507 (pci_get_device(dev) == FXP_DEVICEID_i82559)) { 508 device_set_desc(dev, "Intel InBusiness 10/100 Ethernet"); 509 return 0; 510 } 511 512 return ENXIO; 513 } 514 515 static int 516 fxp_attach(device_t dev) 517 { 518 int error = 0; 519 struct fxp_softc *sc = device_get_softc(dev); 520 struct ifnet *ifp; 521 int s; 522 u_long val; 523 int rid; 524 525 callout_handle_init(&sc->stat_ch); 526 527 s = splimp(); 528 529 /* 530 * Enable bus mastering. 531 */ 532 val = pci_read_config(dev, PCIR_COMMAND, 2); 533 val |= (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); 534 pci_write_config(dev, PCIR_COMMAND, val, 2); 535 536 /* 537 * Map control/status registers. 538 */ 539 rid = FXP_PCI_MMBA; 540 sc->mem = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 541 0, ~0, 1, RF_ACTIVE); 542 if (!sc->mem) { 543 device_printf(dev, "could not map memory\n"); 544 error = ENXIO; 545 goto fail; 546 } 547 548 sc->sc_st = rman_get_bustag(sc->mem); 549 sc->sc_sh = rman_get_bushandle(sc->mem); 550 551 /* 552 * Allocate our interrupt. 553 */ 554 rid = 0; 555 sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 556 RF_SHAREABLE | RF_ACTIVE); 557 if (sc->irq == NULL) { 558 device_printf(dev, "could not map interrupt\n"); 559 error = ENXIO; 560 goto fail; 561 } 562 563 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET, 564 fxp_intr, sc, &sc->ih); 565 if (error) { 566 device_printf(dev, "could not setup irq\n"); 567 goto fail; 568 } 569 570 /* Do generic parts of attach. */ 571 if (fxp_attach_common(sc, sc->arpcom.ac_enaddr)) { 572 /* Failed! */ 573 bus_teardown_intr(dev, sc->irq, sc->ih); 574 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); 575 bus_release_resource(dev, SYS_RES_MEMORY, FXP_PCI_MMBA, sc->mem); 576 error = ENXIO; 577 goto fail; 578 } 579 580 device_printf(dev, "Ethernet address %6D%s\n", 581 sc->arpcom.ac_enaddr, ":", sc->phy_10Mbps_only ? ", 10Mbps" : ""); 582 583 ifp = &sc->arpcom.ac_if; 584 ifp->if_unit = device_get_unit(dev); 585 ifp->if_name = "fxp"; 586 ifp->if_output = ether_output; 587 ifp->if_baudrate = 100000000; 588 ifp->if_init = fxp_init; 589 ifp->if_softc = sc; 590 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 591 ifp->if_ioctl = fxp_ioctl; 592 ifp->if_start = fxp_start; 593 ifp->if_watchdog = fxp_watchdog; 594 595 /* 596 * Attach the interface. 597 */ 598 if_attach(ifp); 599 /* 600 * Let the system queue as many packets as we have available 601 * TX descriptors. 602 */ 603 ifp->if_snd.ifq_maxlen = FXP_NTXCB - 1; 604 ether_ifattach(ifp); 605 bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header)); 606 607 splx(s); 608 return 0; 609 610 fail: 611 splx(s); 612 return error; 613 } 614 615 /* 616 * Detach interface. 617 */ 618 static int 619 fxp_detach(device_t dev) 620 { 621 struct fxp_softc *sc = device_get_softc(dev); 622 int s; 623 624 s = splimp(); 625 626 /* 627 * Close down routes etc. 628 */ 629 if_detach(&sc->arpcom.ac_if); 630 631 /* 632 * Stop DMA and drop transmit queue. 633 */ 634 fxp_stop(sc); 635 636 /* 637 * Deallocate resources. 638 */ 639 bus_teardown_intr(dev, sc->irq, sc->ih); 640 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); 641 bus_release_resource(dev, SYS_RES_MEMORY, FXP_PCI_MMBA, sc->mem); 642 643 /* 644 * Free all the receive buffers. 645 */ 646 if (sc->rfa_headm != NULL) 647 m_freem(sc->rfa_headm); 648 649 /* 650 * Free all media structures. 651 */ 652 ifmedia_removeall(&sc->sc_media); 653 654 /* 655 * Free anciliary structures. 656 */ 657 free(sc->cbl_base, M_DEVBUF); 658 free(sc->fxp_stats, M_DEVBUF); 659 free(sc->mcsp, M_DEVBUF); 660 661 splx(s); 662 663 return 0; 664 } 665 666 /* 667 * Device shutdown routine. Called at system shutdown after sync. The 668 * main purpose of this routine is to shut off receiver DMA so that 669 * kernel memory doesn't get clobbered during warmboot. 670 */ 671 static int 672 fxp_shutdown(device_t dev) 673 { 674 /* 675 * Make sure that DMA is disabled prior to reboot. Not doing 676 * do could allow DMA to corrupt kernel memory during the 677 * reboot before the driver initializes. 678 */ 679 fxp_stop((struct fxp_softc *) device_get_softc(dev)); 680 return 0; 681 } 682 683 static device_method_t fxp_methods[] = { 684 /* Device interface */ 685 DEVMETHOD(device_probe, fxp_probe), 686 DEVMETHOD(device_attach, fxp_attach), 687 DEVMETHOD(device_detach, fxp_detach), 688 DEVMETHOD(device_shutdown, fxp_shutdown), 689 690 { 0, 0 } 691 }; 692 693 static driver_t fxp_driver = { 694 "fxp", 695 fxp_methods, 696 sizeof(struct fxp_softc), 697 }; 698 699 static devclass_t fxp_devclass; 700 701 DRIVER_MODULE(if_fxp, pci, fxp_driver, fxp_devclass, 0, 0); 702 703 #endif /* __NetBSD__ */ 704 705 /************************************************************* 706 * End of operating system-specific autoconfiguration glue 707 *************************************************************/ 708 709 /* 710 * Do generic parts of attach. 711 */ 712 static int 713 fxp_attach_common(sc, enaddr) 714 struct fxp_softc *sc; 715 u_int8_t *enaddr; 716 { 717 u_int16_t data; 718 int i, nmedia, defmedia; 719 const int *media; 720 721 /* 722 * Reset to a stable state. 723 */ 724 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); 725 DELAY(10); 726 727 sc->cbl_base = malloc(sizeof(struct fxp_cb_tx) * FXP_NTXCB, 728 M_DEVBUF, M_NOWAIT); 729 if (sc->cbl_base == NULL) 730 goto fail; 731 bzero(sc->cbl_base, sizeof(struct fxp_cb_tx) * FXP_NTXCB); 732 733 sc->fxp_stats = malloc(sizeof(struct fxp_stats), M_DEVBUF, M_NOWAIT); 734 if (sc->fxp_stats == NULL) 735 goto fail; 736 bzero(sc->fxp_stats, sizeof(struct fxp_stats)); 737 738 sc->mcsp = malloc(sizeof(struct fxp_cb_mcs), M_DEVBUF, M_NOWAIT); 739 if (sc->mcsp == NULL) 740 goto fail; 741 742 /* 743 * Pre-allocate our receive buffers. 744 */ 745 for (i = 0; i < FXP_NRFABUFS; i++) { 746 if (fxp_add_rfabuf(sc, NULL) != 0) { 747 goto fail; 748 } 749 } 750 751 /* 752 * Find out how large of an SEEPROM we have. 753 */ 754 fxp_autosize_eeprom(sc); 755 756 /* 757 * Get info about the primary PHY 758 */ 759 fxp_read_eeprom(sc, (u_int16_t *)&data, 6, 1); 760 sc->phy_primary_addr = data & 0xff; 761 sc->phy_primary_device = (data >> 8) & 0x3f; 762 sc->phy_10Mbps_only = data >> 15; 763 764 /* 765 * Read MAC address. 766 */ 767 fxp_read_eeprom(sc, (u_int16_t *)enaddr, 0, 3); 768 769 /* 770 * Initialize the media structures. 771 */ 772 773 media = fxp_media_default; 774 nmedia = sizeof(fxp_media_default) / sizeof(fxp_media_default[0]); 775 defmedia = FXP_MEDIA_DEFAULT_DEFMEDIA; 776 777 for (i = 0; i < NFXPMEDIA; i++) { 778 if (sc->phy_primary_device == fxp_media[i].fsm_phy) { 779 media = fxp_media[i].fsm_media; 780 nmedia = fxp_media[i].fsm_nmedia; 781 defmedia = fxp_media[i].fsm_defmedia; 782 } 783 } 784 785 ifmedia_init(&sc->sc_media, 0, fxp_mediachange, fxp_mediastatus); 786 for (i = 0; i < nmedia; i++) { 787 if (IFM_SUBTYPE(media[i]) == IFM_100_TX && sc->phy_10Mbps_only) 788 continue; 789 ifmedia_add(&sc->sc_media, media[i], 0, NULL); 790 } 791 ifmedia_set(&sc->sc_media, defmedia); 792 793 return (0); 794 795 fail: 796 printf(FXP_FORMAT ": Failed to malloc memory\n", FXP_ARGS(sc)); 797 if (sc->cbl_base) 798 free(sc->cbl_base, M_DEVBUF); 799 if (sc->fxp_stats) 800 free(sc->fxp_stats, M_DEVBUF); 801 if (sc->mcsp) 802 free(sc->mcsp, M_DEVBUF); 803 /* frees entire chain */ 804 if (sc->rfa_headm) 805 m_freem(sc->rfa_headm); 806 807 return (ENOMEM); 808 } 809 810 /* 811 * From NetBSD: 812 * 813 * Figure out EEPROM size. 814 * 815 * 559's can have either 64-word or 256-word EEPROMs, the 558 816 * datasheet only talks about 64-word EEPROMs, and the 557 datasheet 817 * talks about the existance of 16 to 256 word EEPROMs. 818 * 819 * The only known sizes are 64 and 256, where the 256 version is used 820 * by CardBus cards to store CIS information. 821 * 822 * The address is shifted in msb-to-lsb, and after the last 823 * address-bit the EEPROM is supposed to output a `dummy zero' bit, 824 * after which follows the actual data. We try to detect this zero, by 825 * probing the data-out bit in the EEPROM control register just after 826 * having shifted in a bit. If the bit is zero, we assume we've 827 * shifted enough address bits. The data-out should be tri-state, 828 * before this, which should translate to a logical one. 829 * 830 * Other ways to do this would be to try to read a register with known 831 * contents with a varying number of address bits, but no such 832 * register seem to be available. The high bits of register 10 are 01 833 * on the 558 and 559, but apparently not on the 557. 834 * 835 * The Linux driver computes a checksum on the EEPROM data, but the 836 * value of this checksum is not very well documented. 837 */ 838 static void 839 fxp_autosize_eeprom(sc) 840 struct fxp_softc *sc; 841 { 842 u_int16_t reg; 843 int x; 844 845 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 846 /* 847 * Shift in read opcode. 848 */ 849 for (x = 3; x > 0; x--) { 850 if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) { 851 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 852 } else { 853 reg = FXP_EEPROM_EECS; 854 } 855 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 856 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 857 reg | FXP_EEPROM_EESK); 858 DELAY(1); 859 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 860 DELAY(1); 861 } 862 /* 863 * Shift in address. 864 * Wait for the dummy zero following a correct address shift. 865 */ 866 for (x = 1; x <= 8; x++) { 867 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 868 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 869 FXP_EEPROM_EECS | FXP_EEPROM_EESK); 870 DELAY(1); 871 if ((CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) == 0) 872 break; 873 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 874 DELAY(1); 875 } 876 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 877 DELAY(1); 878 sc->eeprom_size = x; 879 } 880 /* 881 * Read from the serial EEPROM. Basically, you manually shift in 882 * the read opcode (one bit at a time) and then shift in the address, 883 * and then you shift out the data (all of this one bit at a time). 884 * The word size is 16 bits, so you have to provide the address for 885 * every 16 bits of data. 886 */ 887 static void 888 fxp_read_eeprom(sc, data, offset, words) 889 struct fxp_softc *sc; 890 u_short *data; 891 int offset; 892 int words; 893 { 894 u_int16_t reg; 895 int i, x; 896 897 for (i = 0; i < words; i++) { 898 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 899 /* 900 * Shift in read opcode. 901 */ 902 for (x = 3; x > 0; x--) { 903 if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) { 904 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 905 } else { 906 reg = FXP_EEPROM_EECS; 907 } 908 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 909 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 910 reg | FXP_EEPROM_EESK); 911 DELAY(1); 912 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 913 DELAY(1); 914 } 915 /* 916 * Shift in address. 917 */ 918 for (x = sc->eeprom_size; x > 0; x--) { 919 if ((i + offset) & (1 << (x - 1))) { 920 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 921 } else { 922 reg = FXP_EEPROM_EECS; 923 } 924 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 925 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 926 reg | FXP_EEPROM_EESK); 927 DELAY(1); 928 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 929 DELAY(1); 930 } 931 reg = FXP_EEPROM_EECS; 932 data[i] = 0; 933 /* 934 * Shift out data. 935 */ 936 for (x = 16; x > 0; x--) { 937 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 938 reg | FXP_EEPROM_EESK); 939 DELAY(1); 940 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & 941 FXP_EEPROM_EEDO) 942 data[i] |= (1 << (x - 1)); 943 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 944 DELAY(1); 945 } 946 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 947 DELAY(1); 948 } 949 } 950 951 /* 952 * Start packet transmission on the interface. 953 */ 954 static void 955 fxp_start(ifp) 956 struct ifnet *ifp; 957 { 958 struct fxp_softc *sc = ifp->if_softc; 959 struct fxp_cb_tx *txp; 960 961 /* 962 * See if we need to suspend xmit until the multicast filter 963 * has been reprogrammed (which can only be done at the head 964 * of the command chain). 965 */ 966 if (sc->need_mcsetup) 967 return; 968 969 txp = NULL; 970 971 /* 972 * We're finished if there is nothing more to add to the list or if 973 * we're all filled up with buffers to transmit. 974 * NOTE: One TxCB is reserved to guarantee that fxp_mc_setup() can add 975 * a NOP command when needed. 976 */ 977 while (ifp->if_snd.ifq_head != NULL && sc->tx_queued < FXP_NTXCB - 1) { 978 struct mbuf *m, *mb_head; 979 int segment; 980 981 /* 982 * Grab a packet to transmit. 983 */ 984 IF_DEQUEUE(&ifp->if_snd, mb_head); 985 986 /* 987 * Get pointer to next available tx desc. 988 */ 989 txp = sc->cbl_last->next; 990 991 /* 992 * Go through each of the mbufs in the chain and initialize 993 * the transmit buffer descriptors with the physical address 994 * and size of the mbuf. 995 */ 996 tbdinit: 997 for (m = mb_head, segment = 0; m != NULL; m = m->m_next) { 998 if (m->m_len != 0) { 999 if (segment == FXP_NTXSEG) 1000 break; 1001 txp->tbd[segment].tb_addr = 1002 vtophys(mtod(m, vm_offset_t)); 1003 txp->tbd[segment].tb_size = m->m_len; 1004 segment++; 1005 } 1006 } 1007 if (m != NULL) { 1008 struct mbuf *mn; 1009 1010 /* 1011 * We ran out of segments. We have to recopy this mbuf 1012 * chain first. Bail out if we can't get the new buffers. 1013 */ 1014 MGETHDR(mn, M_DONTWAIT, MT_DATA); 1015 if (mn == NULL) { 1016 m_freem(mb_head); 1017 break; 1018 } 1019 if (mb_head->m_pkthdr.len > MHLEN) { 1020 MCLGET(mn, M_DONTWAIT); 1021 if ((mn->m_flags & M_EXT) == 0) { 1022 m_freem(mn); 1023 m_freem(mb_head); 1024 break; 1025 } 1026 } 1027 m_copydata(mb_head, 0, mb_head->m_pkthdr.len, 1028 mtod(mn, caddr_t)); 1029 mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len; 1030 m_freem(mb_head); 1031 mb_head = mn; 1032 goto tbdinit; 1033 } 1034 1035 txp->tbd_number = segment; 1036 txp->mb_head = mb_head; 1037 txp->cb_status = 0; 1038 if (sc->tx_queued != FXP_CXINT_THRESH - 1) { 1039 txp->cb_command = 1040 FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF | FXP_CB_COMMAND_S; 1041 } else { 1042 txp->cb_command = 1043 FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I; 1044 /* 1045 * Set a 5 second timer just in case we don't hear from the 1046 * card again. 1047 */ 1048 ifp->if_timer = 5; 1049 } 1050 txp->tx_threshold = tx_threshold; 1051 1052 /* 1053 * Advance the end of list forward. 1054 */ 1055 sc->cbl_last->cb_command &= ~FXP_CB_COMMAND_S; 1056 sc->cbl_last = txp; 1057 1058 /* 1059 * Advance the beginning of the list forward if there are 1060 * no other packets queued (when nothing is queued, cbl_first 1061 * sits on the last TxCB that was sent out). 1062 */ 1063 if (sc->tx_queued == 0) 1064 sc->cbl_first = txp; 1065 1066 sc->tx_queued++; 1067 1068 /* 1069 * Pass packet to bpf if there is a listener. 1070 */ 1071 if (ifp->if_bpf) 1072 bpf_mtap(FXP_BPFTAP_ARG(ifp), mb_head); 1073 } 1074 1075 /* 1076 * We're finished. If we added to the list, issue a RESUME to get DMA 1077 * going again if suspended. 1078 */ 1079 if (txp != NULL) { 1080 fxp_scb_wait(sc); 1081 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_RESUME); 1082 } 1083 } 1084 1085 /* 1086 * Process interface interrupts. 1087 */ 1088 static FXP_INTR_TYPE 1089 fxp_intr(arg) 1090 void *arg; 1091 { 1092 struct fxp_softc *sc = arg; 1093 struct ifnet *ifp = &sc->sc_if; 1094 u_int8_t statack; 1095 #if defined(__NetBSD__) 1096 int claimed = 0; 1097 #endif 1098 1099 while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) { 1100 #if defined(__NetBSD__) 1101 claimed = 1; 1102 #endif 1103 /* 1104 * First ACK all the interrupts in this pass. 1105 */ 1106 CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack); 1107 1108 /* 1109 * Free any finished transmit mbuf chains. 1110 */ 1111 if (statack & FXP_SCB_STATACK_CXTNO) { 1112 struct fxp_cb_tx *txp; 1113 1114 for (txp = sc->cbl_first; sc->tx_queued && 1115 (txp->cb_status & FXP_CB_STATUS_C) != 0; 1116 txp = txp->next) { 1117 if (txp->mb_head != NULL) { 1118 m_freem(txp->mb_head); 1119 txp->mb_head = NULL; 1120 } 1121 sc->tx_queued--; 1122 } 1123 sc->cbl_first = txp; 1124 ifp->if_timer = 0; 1125 if (sc->tx_queued == 0) { 1126 if (sc->need_mcsetup) 1127 fxp_mc_setup(sc); 1128 } 1129 /* 1130 * Try to start more packets transmitting. 1131 */ 1132 if (ifp->if_snd.ifq_head != NULL) 1133 fxp_start(ifp); 1134 } 1135 /* 1136 * Process receiver interrupts. If a no-resource (RNR) 1137 * condition exists, get whatever packets we can and 1138 * re-start the receiver. 1139 */ 1140 if (statack & (FXP_SCB_STATACK_FR | FXP_SCB_STATACK_RNR)) { 1141 struct mbuf *m; 1142 struct fxp_rfa *rfa; 1143 rcvloop: 1144 m = sc->rfa_headm; 1145 rfa = (struct fxp_rfa *)(m->m_ext.ext_buf + 1146 RFA_ALIGNMENT_FUDGE); 1147 1148 if (rfa->rfa_status & FXP_RFA_STATUS_C) { 1149 /* 1150 * Remove first packet from the chain. 1151 */ 1152 sc->rfa_headm = m->m_next; 1153 m->m_next = NULL; 1154 1155 /* 1156 * Add a new buffer to the receive chain. 1157 * If this fails, the old buffer is recycled 1158 * instead. 1159 */ 1160 if (fxp_add_rfabuf(sc, m) == 0) { 1161 struct ether_header *eh; 1162 u_int16_t total_len; 1163 1164 total_len = rfa->actual_size & 1165 (MCLBYTES - 1); 1166 if (total_len < 1167 sizeof(struct ether_header)) { 1168 m_freem(m); 1169 goto rcvloop; 1170 } 1171 m->m_pkthdr.rcvif = ifp; 1172 m->m_pkthdr.len = m->m_len = 1173 total_len ; 1174 eh = mtod(m, struct ether_header *); 1175 if (ifp->if_bpf) 1176 bpf_tap(FXP_BPFTAP_ARG(ifp), 1177 mtod(m, caddr_t), 1178 total_len); 1179 #ifdef BRIDGE 1180 if (do_bridge) { 1181 struct ifnet *bdg_ifp ; 1182 bdg_ifp = bridge_in(m); 1183 if (bdg_ifp == BDG_DROP) 1184 goto dropit ; 1185 if (bdg_ifp != BDG_LOCAL) 1186 bdg_forward(&m, bdg_ifp); 1187 if (bdg_ifp != BDG_LOCAL && 1188 bdg_ifp != BDG_BCAST && 1189 bdg_ifp != BDG_MCAST) 1190 goto dropit ; 1191 goto getit ; 1192 } 1193 #endif 1194 /* 1195 * Only pass this packet up 1196 * if it is for us. 1197 */ 1198 if ((ifp->if_flags & 1199 IFF_PROMISC) && 1200 (rfa->rfa_status & 1201 FXP_RFA_STATUS_IAMATCH) && 1202 (eh->ether_dhost[0] & 1) 1203 == 0) { 1204 #ifdef BRIDGE 1205 dropit: 1206 #endif 1207 if (m) 1208 m_freem(m); 1209 goto rcvloop; 1210 } 1211 #ifdef BRIDGE 1212 getit: 1213 #endif 1214 m->m_data += 1215 sizeof(struct ether_header); 1216 m->m_len -= 1217 sizeof(struct ether_header); 1218 m->m_pkthdr.len = m->m_len ; 1219 ether_input(ifp, eh, m); 1220 } 1221 goto rcvloop; 1222 } 1223 if (statack & FXP_SCB_STATACK_RNR) { 1224 fxp_scb_wait(sc); 1225 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 1226 vtophys(sc->rfa_headm->m_ext.ext_buf) + 1227 RFA_ALIGNMENT_FUDGE); 1228 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, 1229 FXP_SCB_COMMAND_RU_START); 1230 } 1231 } 1232 } 1233 #if defined(__NetBSD__) 1234 return (claimed); 1235 #endif 1236 } 1237 1238 /* 1239 * Update packet in/out/collision statistics. The i82557 doesn't 1240 * allow you to access these counters without doing a fairly 1241 * expensive DMA to get _all_ of the statistics it maintains, so 1242 * we do this operation here only once per second. The statistics 1243 * counters in the kernel are updated from the previous dump-stats 1244 * DMA and then a new dump-stats DMA is started. The on-chip 1245 * counters are zeroed when the DMA completes. If we can't start 1246 * the DMA immediately, we don't wait - we just prepare to read 1247 * them again next time. 1248 */ 1249 static void 1250 fxp_stats_update(arg) 1251 void *arg; 1252 { 1253 struct fxp_softc *sc = arg; 1254 struct ifnet *ifp = &sc->sc_if; 1255 struct fxp_stats *sp = sc->fxp_stats; 1256 struct fxp_cb_tx *txp; 1257 int s; 1258 1259 ifp->if_opackets += sp->tx_good; 1260 ifp->if_collisions += sp->tx_total_collisions; 1261 if (sp->rx_good) { 1262 ifp->if_ipackets += sp->rx_good; 1263 sc->rx_idle_secs = 0; 1264 } else { 1265 /* 1266 * Receiver's been idle for another second. 1267 */ 1268 sc->rx_idle_secs++; 1269 } 1270 ifp->if_ierrors += 1271 sp->rx_crc_errors + 1272 sp->rx_alignment_errors + 1273 sp->rx_rnr_errors + 1274 sp->rx_overrun_errors; 1275 /* 1276 * If any transmit underruns occured, bump up the transmit 1277 * threshold by another 512 bytes (64 * 8). 1278 */ 1279 if (sp->tx_underruns) { 1280 ifp->if_oerrors += sp->tx_underruns; 1281 if (tx_threshold < 192) 1282 tx_threshold += 64; 1283 } 1284 s = splimp(); 1285 /* 1286 * Release any xmit buffers that have completed DMA. This isn't 1287 * strictly necessary to do here, but it's advantagous for mbufs 1288 * with external storage to be released in a timely manner rather 1289 * than being defered for a potentially long time. This limits 1290 * the delay to a maximum of one second. 1291 */ 1292 for (txp = sc->cbl_first; sc->tx_queued && 1293 (txp->cb_status & FXP_CB_STATUS_C) != 0; 1294 txp = txp->next) { 1295 if (txp->mb_head != NULL) { 1296 m_freem(txp->mb_head); 1297 txp->mb_head = NULL; 1298 } 1299 sc->tx_queued--; 1300 } 1301 sc->cbl_first = txp; 1302 /* 1303 * If we haven't received any packets in FXP_MAC_RX_IDLE seconds, 1304 * then assume the receiver has locked up and attempt to clear 1305 * the condition by reprogramming the multicast filter. This is 1306 * a work-around for a bug in the 82557 where the receiver locks 1307 * up if it gets certain types of garbage in the syncronization 1308 * bits prior to the packet header. This bug is supposed to only 1309 * occur in 10Mbps mode, but has been seen to occur in 100Mbps 1310 * mode as well (perhaps due to a 10/100 speed transition). 1311 */ 1312 if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) { 1313 sc->rx_idle_secs = 0; 1314 fxp_mc_setup(sc); 1315 } 1316 /* 1317 * If there is no pending command, start another stats 1318 * dump. Otherwise punt for now. 1319 */ 1320 if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) { 1321 /* 1322 * Start another stats dump. 1323 */ 1324 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, 1325 FXP_SCB_COMMAND_CU_DUMPRESET); 1326 } else { 1327 /* 1328 * A previous command is still waiting to be accepted. 1329 * Just zero our copy of the stats and wait for the 1330 * next timer event to update them. 1331 */ 1332 sp->tx_good = 0; 1333 sp->tx_underruns = 0; 1334 sp->tx_total_collisions = 0; 1335 1336 sp->rx_good = 0; 1337 sp->rx_crc_errors = 0; 1338 sp->rx_alignment_errors = 0; 1339 sp->rx_rnr_errors = 0; 1340 sp->rx_overrun_errors = 0; 1341 } 1342 splx(s); 1343 /* 1344 * Schedule another timeout one second from now. 1345 */ 1346 sc->stat_ch = timeout(fxp_stats_update, sc, hz); 1347 } 1348 1349 /* 1350 * Stop the interface. Cancels the statistics updater and resets 1351 * the interface. 1352 */ 1353 static void 1354 fxp_stop(sc) 1355 struct fxp_softc *sc; 1356 { 1357 struct ifnet *ifp = &sc->sc_if; 1358 struct fxp_cb_tx *txp; 1359 int i; 1360 1361 /* 1362 * Cancel stats updater. 1363 */ 1364 untimeout(fxp_stats_update, sc, sc->stat_ch); 1365 1366 /* 1367 * Issue software reset 1368 */ 1369 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); 1370 DELAY(10); 1371 1372 /* 1373 * Release any xmit buffers. 1374 */ 1375 txp = sc->cbl_base; 1376 if (txp != NULL) { 1377 for (i = 0; i < FXP_NTXCB; i++) { 1378 if (txp[i].mb_head != NULL) { 1379 m_freem(txp[i].mb_head); 1380 txp[i].mb_head = NULL; 1381 } 1382 } 1383 } 1384 sc->tx_queued = 0; 1385 1386 /* 1387 * Free all the receive buffers then reallocate/reinitialize 1388 */ 1389 if (sc->rfa_headm != NULL) 1390 m_freem(sc->rfa_headm); 1391 sc->rfa_headm = NULL; 1392 sc->rfa_tailm = NULL; 1393 for (i = 0; i < FXP_NRFABUFS; i++) { 1394 if (fxp_add_rfabuf(sc, NULL) != 0) { 1395 /* 1396 * This "can't happen" - we're at splimp() 1397 * and we just freed all the buffers we need 1398 * above. 1399 */ 1400 panic("fxp_stop: no buffers!"); 1401 } 1402 } 1403 1404 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1405 ifp->if_timer = 0; 1406 } 1407 1408 /* 1409 * Watchdog/transmission transmit timeout handler. Called when a 1410 * transmission is started on the interface, but no interrupt is 1411 * received before the timeout. This usually indicates that the 1412 * card has wedged for some reason. 1413 */ 1414 static void 1415 fxp_watchdog(ifp) 1416 struct ifnet *ifp; 1417 { 1418 struct fxp_softc *sc = ifp->if_softc; 1419 1420 printf(FXP_FORMAT ": device timeout\n", FXP_ARGS(sc)); 1421 ifp->if_oerrors++; 1422 1423 fxp_init(sc); 1424 } 1425 1426 static void 1427 fxp_init(xsc) 1428 void *xsc; 1429 { 1430 struct fxp_softc *sc = xsc; 1431 struct ifnet *ifp = &sc->sc_if; 1432 struct fxp_cb_config *cbp; 1433 struct fxp_cb_ias *cb_ias; 1434 struct fxp_cb_tx *txp; 1435 int i, s, prm; 1436 1437 s = splimp(); 1438 /* 1439 * Cancel any pending I/O 1440 */ 1441 fxp_stop(sc); 1442 1443 prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0; 1444 1445 /* 1446 * Initialize base of CBL and RFA memory. Loading with zero 1447 * sets it up for regular linear addressing. 1448 */ 1449 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0); 1450 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_BASE); 1451 1452 fxp_scb_wait(sc); 1453 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_RU_BASE); 1454 1455 /* 1456 * Initialize base of dump-stats buffer. 1457 */ 1458 fxp_scb_wait(sc); 1459 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(sc->fxp_stats)); 1460 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_DUMP_ADR); 1461 1462 /* 1463 * We temporarily use memory that contains the TxCB list to 1464 * construct the config CB. The TxCB list memory is rebuilt 1465 * later. 1466 */ 1467 cbp = (struct fxp_cb_config *) sc->cbl_base; 1468 1469 /* 1470 * This bcopy is kind of disgusting, but there are a bunch of must be 1471 * zero and must be one bits in this structure and this is the easiest 1472 * way to initialize them all to proper values. 1473 */ 1474 bcopy(fxp_cb_config_template, (volatile void *)&cbp->cb_status, 1475 sizeof(fxp_cb_config_template)); 1476 1477 cbp->cb_status = 0; 1478 cbp->cb_command = FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL; 1479 cbp->link_addr = -1; /* (no) next command */ 1480 cbp->byte_count = 22; /* (22) bytes to config */ 1481 cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */ 1482 cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */ 1483 cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */ 1484 cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */ 1485 cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */ 1486 cbp->dma_bce = 0; /* (disable) dma max counters */ 1487 cbp->late_scb = 0; /* (don't) defer SCB update */ 1488 cbp->tno_int = 0; /* (disable) tx not okay interrupt */ 1489 cbp->ci_int = 1; /* interrupt on CU idle */ 1490 cbp->save_bf = prm; /* save bad frames */ 1491 cbp->disc_short_rx = !prm; /* discard short packets */ 1492 cbp->underrun_retry = 1; /* retry mode (1) on DMA underrun */ 1493 cbp->mediatype = !sc->phy_10Mbps_only; /* interface mode */ 1494 cbp->nsai = 1; /* (don't) disable source addr insert */ 1495 cbp->preamble_length = 2; /* (7 byte) preamble */ 1496 cbp->loopback = 0; /* (don't) loopback */ 1497 cbp->linear_priority = 0; /* (normal CSMA/CD operation) */ 1498 cbp->linear_pri_mode = 0; /* (wait after xmit only) */ 1499 cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */ 1500 cbp->promiscuous = prm; /* promiscuous mode */ 1501 cbp->bcast_disable = 0; /* (don't) disable broadcasts */ 1502 cbp->crscdt = 0; /* (CRS only) */ 1503 cbp->stripping = !prm; /* truncate rx packet to byte count */ 1504 cbp->padding = 1; /* (do) pad short tx packets */ 1505 cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */ 1506 cbp->force_fdx = 0; /* (don't) force full duplex */ 1507 cbp->fdx_pin_en = 1; /* (enable) FDX# pin */ 1508 cbp->multi_ia = 0; /* (don't) accept multiple IAs */ 1509 cbp->mc_all = sc->all_mcasts;/* accept all multicasts */ 1510 1511 /* 1512 * Start the config command/DMA. 1513 */ 1514 fxp_scb_wait(sc); 1515 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(&cbp->cb_status)); 1516 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); 1517 /* ...and wait for it to complete. */ 1518 while (!(cbp->cb_status & FXP_CB_STATUS_C)); 1519 1520 /* 1521 * Now initialize the station address. Temporarily use the TxCB 1522 * memory area like we did above for the config CB. 1523 */ 1524 cb_ias = (struct fxp_cb_ias *) sc->cbl_base; 1525 cb_ias->cb_status = 0; 1526 cb_ias->cb_command = FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL; 1527 cb_ias->link_addr = -1; 1528 #if defined(__NetBSD__) 1529 bcopy(LLADDR(ifp->if_sadl), (void *)cb_ias->macaddr, 6); 1530 #else 1531 bcopy(sc->arpcom.ac_enaddr, (volatile void *)cb_ias->macaddr, 1532 sizeof(sc->arpcom.ac_enaddr)); 1533 #endif /* __NetBSD__ */ 1534 1535 /* 1536 * Start the IAS (Individual Address Setup) command/DMA. 1537 */ 1538 fxp_scb_wait(sc); 1539 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); 1540 /* ...and wait for it to complete. */ 1541 while (!(cb_ias->cb_status & FXP_CB_STATUS_C)); 1542 1543 /* 1544 * Initialize transmit control block (TxCB) list. 1545 */ 1546 1547 txp = sc->cbl_base; 1548 bzero(txp, sizeof(struct fxp_cb_tx) * FXP_NTXCB); 1549 for (i = 0; i < FXP_NTXCB; i++) { 1550 txp[i].cb_status = FXP_CB_STATUS_C | FXP_CB_STATUS_OK; 1551 txp[i].cb_command = FXP_CB_COMMAND_NOP; 1552 txp[i].link_addr = vtophys(&txp[(i + 1) & FXP_TXCB_MASK].cb_status); 1553 txp[i].tbd_array_addr = vtophys(&txp[i].tbd[0]); 1554 txp[i].next = &txp[(i + 1) & FXP_TXCB_MASK]; 1555 } 1556 /* 1557 * Set the suspend flag on the first TxCB and start the control 1558 * unit. It will execute the NOP and then suspend. 1559 */ 1560 txp->cb_command = FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S; 1561 sc->cbl_first = sc->cbl_last = txp; 1562 sc->tx_queued = 1; 1563 1564 fxp_scb_wait(sc); 1565 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); 1566 1567 /* 1568 * Initialize receiver buffer area - RFA. 1569 */ 1570 fxp_scb_wait(sc); 1571 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 1572 vtophys(sc->rfa_headm->m_ext.ext_buf) + RFA_ALIGNMENT_FUDGE); 1573 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_RU_START); 1574 1575 /* 1576 * Set current media. 1577 */ 1578 fxp_set_media(sc, sc->sc_media.ifm_cur->ifm_media); 1579 1580 ifp->if_flags |= IFF_RUNNING; 1581 ifp->if_flags &= ~IFF_OACTIVE; 1582 splx(s); 1583 1584 /* 1585 * Start stats updater. 1586 */ 1587 sc->stat_ch = timeout(fxp_stats_update, sc, hz); 1588 } 1589 1590 static void 1591 fxp_set_media(sc, media) 1592 struct fxp_softc *sc; 1593 int media; 1594 { 1595 1596 switch (sc->phy_primary_device) { 1597 case FXP_PHY_DP83840: 1598 case FXP_PHY_DP83840A: 1599 fxp_mdi_write(sc, sc->phy_primary_addr, FXP_DP83840_PCR, 1600 fxp_mdi_read(sc, sc->phy_primary_addr, FXP_DP83840_PCR) | 1601 FXP_DP83840_PCR_LED4_MODE | /* LED4 always indicates duplex */ 1602 FXP_DP83840_PCR_F_CONNECT | /* force link disconnect bypass */ 1603 FXP_DP83840_PCR_BIT10); /* XXX I have no idea */ 1604 /* fall through */ 1605 case FXP_PHY_82553A: 1606 case FXP_PHY_82553C: /* untested */ 1607 case FXP_PHY_82555: 1608 case FXP_PHY_82555B: 1609 if (IFM_SUBTYPE(media) != IFM_AUTO) { 1610 int flags; 1611 1612 flags = (IFM_SUBTYPE(media) == IFM_100_TX) ? 1613 FXP_PHY_BMCR_SPEED_100M : 0; 1614 flags |= (media & IFM_FDX) ? 1615 FXP_PHY_BMCR_FULLDUPLEX : 0; 1616 fxp_mdi_write(sc, sc->phy_primary_addr, 1617 FXP_PHY_BMCR, 1618 (fxp_mdi_read(sc, sc->phy_primary_addr, 1619 FXP_PHY_BMCR) & 1620 ~(FXP_PHY_BMCR_AUTOEN | FXP_PHY_BMCR_SPEED_100M | 1621 FXP_PHY_BMCR_FULLDUPLEX)) | flags); 1622 } else { 1623 fxp_mdi_write(sc, sc->phy_primary_addr, 1624 FXP_PHY_BMCR, 1625 (fxp_mdi_read(sc, sc->phy_primary_addr, 1626 FXP_PHY_BMCR) | FXP_PHY_BMCR_AUTOEN)); 1627 } 1628 break; 1629 /* 1630 * The Seeq 80c24 doesn't have a PHY programming interface, so do 1631 * nothing. 1632 */ 1633 case FXP_PHY_80C24: 1634 break; 1635 default: 1636 printf(FXP_FORMAT 1637 ": warning: unsupported PHY, type = %d, addr = %d\n", 1638 FXP_ARGS(sc), sc->phy_primary_device, 1639 sc->phy_primary_addr); 1640 } 1641 } 1642 1643 /* 1644 * Change media according to request. 1645 */ 1646 int 1647 fxp_mediachange(ifp) 1648 struct ifnet *ifp; 1649 { 1650 struct fxp_softc *sc = ifp->if_softc; 1651 struct ifmedia *ifm = &sc->sc_media; 1652 1653 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1654 return (EINVAL); 1655 1656 fxp_set_media(sc, ifm->ifm_media); 1657 return (0); 1658 } 1659 1660 /* 1661 * Notify the world which media we're using. 1662 */ 1663 void 1664 fxp_mediastatus(ifp, ifmr) 1665 struct ifnet *ifp; 1666 struct ifmediareq *ifmr; 1667 { 1668 struct fxp_softc *sc = ifp->if_softc; 1669 int flags, stsflags; 1670 1671 switch (sc->phy_primary_device) { 1672 case FXP_PHY_82555: 1673 case FXP_PHY_82555B: 1674 case FXP_PHY_DP83840: 1675 case FXP_PHY_DP83840A: 1676 ifmr->ifm_status = IFM_AVALID; /* IFM_ACTIVE will be valid */ 1677 ifmr->ifm_active = IFM_ETHER; 1678 /* 1679 * the following is not an error. 1680 * You need to read this register twice to get current 1681 * status. This is correct documented behaviour, the 1682 * first read gets latched values. 1683 */ 1684 stsflags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_STS); 1685 stsflags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_STS); 1686 if (stsflags & FXP_PHY_STS_LINK_STS) 1687 ifmr->ifm_status |= IFM_ACTIVE; 1688 1689 /* 1690 * If we are in auto mode, then try report the result. 1691 */ 1692 flags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_BMCR); 1693 if (flags & FXP_PHY_BMCR_AUTOEN) { 1694 ifmr->ifm_active |= IFM_AUTO; /* XXX presently 0 */ 1695 if (stsflags & FXP_PHY_STS_AUTO_DONE) { 1696 /* 1697 * Intel and National parts report 1698 * differently on what they found. 1699 */ 1700 if ((sc->phy_primary_device == FXP_PHY_82555) 1701 || (sc->phy_primary_device == FXP_PHY_82555B)) { 1702 flags = fxp_mdi_read(sc, 1703 sc->phy_primary_addr, 1704 FXP_PHY_USC); 1705 1706 if (flags & FXP_PHY_USC_SPEED) 1707 ifmr->ifm_active |= IFM_100_TX; 1708 else 1709 ifmr->ifm_active |= IFM_10_T; 1710 1711 if (flags & FXP_PHY_USC_DUPLEX) 1712 ifmr->ifm_active |= IFM_FDX; 1713 } else { /* it's National. only know speed */ 1714 flags = fxp_mdi_read(sc, 1715 sc->phy_primary_addr, 1716 FXP_DP83840_PAR); 1717 1718 if (flags & FXP_DP83840_PAR_SPEED_10) 1719 ifmr->ifm_active |= IFM_10_T; 1720 else 1721 ifmr->ifm_active |= IFM_100_TX; 1722 } 1723 } 1724 } else { /* in manual mode.. just report what we were set to */ 1725 if (flags & FXP_PHY_BMCR_SPEED_100M) 1726 ifmr->ifm_active |= IFM_100_TX; 1727 else 1728 ifmr->ifm_active |= IFM_10_T; 1729 1730 if (flags & FXP_PHY_BMCR_FULLDUPLEX) 1731 ifmr->ifm_active |= IFM_FDX; 1732 } 1733 break; 1734 1735 case FXP_PHY_80C24: 1736 default: 1737 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; /* XXX IFM_AUTO ? */ 1738 } 1739 } 1740 1741 /* 1742 * Add a buffer to the end of the RFA buffer list. 1743 * Return 0 if successful, 1 for failure. A failure results in 1744 * adding the 'oldm' (if non-NULL) on to the end of the list - 1745 * tossing out its old contents and recycling it. 1746 * The RFA struct is stuck at the beginning of mbuf cluster and the 1747 * data pointer is fixed up to point just past it. 1748 */ 1749 static int 1750 fxp_add_rfabuf(sc, oldm) 1751 struct fxp_softc *sc; 1752 struct mbuf *oldm; 1753 { 1754 u_int32_t v; 1755 struct mbuf *m; 1756 struct fxp_rfa *rfa, *p_rfa; 1757 1758 MGETHDR(m, M_DONTWAIT, MT_DATA); 1759 if (m != NULL) { 1760 MCLGET(m, M_DONTWAIT); 1761 if ((m->m_flags & M_EXT) == 0) { 1762 m_freem(m); 1763 if (oldm == NULL) 1764 return 1; 1765 m = oldm; 1766 m->m_data = m->m_ext.ext_buf; 1767 } 1768 } else { 1769 if (oldm == NULL) 1770 return 1; 1771 m = oldm; 1772 m->m_data = m->m_ext.ext_buf; 1773 } 1774 1775 /* 1776 * Move the data pointer up so that the incoming data packet 1777 * will be 32-bit aligned. 1778 */ 1779 m->m_data += RFA_ALIGNMENT_FUDGE; 1780 1781 /* 1782 * Get a pointer to the base of the mbuf cluster and move 1783 * data start past it. 1784 */ 1785 rfa = mtod(m, struct fxp_rfa *); 1786 m->m_data += sizeof(struct fxp_rfa); 1787 rfa->size = (u_int16_t)(MCLBYTES - sizeof(struct fxp_rfa) - RFA_ALIGNMENT_FUDGE); 1788 1789 /* 1790 * Initialize the rest of the RFA. Note that since the RFA 1791 * is misaligned, we cannot store values directly. Instead, 1792 * we use an optimized, inline copy. 1793 */ 1794 1795 rfa->rfa_status = 0; 1796 rfa->rfa_control = FXP_RFA_CONTROL_EL; 1797 rfa->actual_size = 0; 1798 1799 v = -1; 1800 fxp_lwcopy(&v, (volatile u_int32_t *) rfa->link_addr); 1801 fxp_lwcopy(&v, (volatile u_int32_t *) rfa->rbd_addr); 1802 1803 /* 1804 * If there are other buffers already on the list, attach this 1805 * one to the end by fixing up the tail to point to this one. 1806 */ 1807 if (sc->rfa_headm != NULL) { 1808 p_rfa = (struct fxp_rfa *) (sc->rfa_tailm->m_ext.ext_buf + 1809 RFA_ALIGNMENT_FUDGE); 1810 sc->rfa_tailm->m_next = m; 1811 v = vtophys(rfa); 1812 fxp_lwcopy(&v, (volatile u_int32_t *) p_rfa->link_addr); 1813 p_rfa->rfa_control &= ~FXP_RFA_CONTROL_EL; 1814 } else { 1815 sc->rfa_headm = m; 1816 } 1817 sc->rfa_tailm = m; 1818 1819 return (m == oldm); 1820 } 1821 1822 static volatile int 1823 fxp_mdi_read(sc, phy, reg) 1824 struct fxp_softc *sc; 1825 int phy; 1826 int reg; 1827 { 1828 int count = 10000; 1829 int value; 1830 1831 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, 1832 (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21)); 1833 1834 while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0 1835 && count--) 1836 DELAY(10); 1837 1838 if (count <= 0) 1839 printf(FXP_FORMAT ": fxp_mdi_read: timed out\n", 1840 FXP_ARGS(sc)); 1841 1842 return (value & 0xffff); 1843 } 1844 1845 static void 1846 fxp_mdi_write(sc, phy, reg, value) 1847 struct fxp_softc *sc; 1848 int phy; 1849 int reg; 1850 int value; 1851 { 1852 int count = 10000; 1853 1854 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, 1855 (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) | 1856 (value & 0xffff)); 1857 1858 while((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 && 1859 count--) 1860 DELAY(10); 1861 1862 if (count <= 0) 1863 printf(FXP_FORMAT ": fxp_mdi_write: timed out\n", 1864 FXP_ARGS(sc)); 1865 } 1866 1867 static int 1868 fxp_ioctl(ifp, command, data) 1869 struct ifnet *ifp; 1870 FXP_IOCTLCMD_TYPE command; 1871 caddr_t data; 1872 { 1873 struct fxp_softc *sc = ifp->if_softc; 1874 struct ifreq *ifr = (struct ifreq *)data; 1875 int s, error = 0; 1876 1877 s = splimp(); 1878 1879 switch (command) { 1880 1881 case SIOCSIFADDR: 1882 #if !defined(__NetBSD__) 1883 case SIOCGIFADDR: 1884 case SIOCSIFMTU: 1885 #endif 1886 error = ether_ioctl(ifp, command, data); 1887 break; 1888 1889 case SIOCSIFFLAGS: 1890 sc->all_mcasts = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0; 1891 1892 /* 1893 * If interface is marked up and not running, then start it. 1894 * If it is marked down and running, stop it. 1895 * XXX If it's up then re-initialize it. This is so flags 1896 * such as IFF_PROMISC are handled. 1897 */ 1898 if (ifp->if_flags & IFF_UP) { 1899 fxp_init(sc); 1900 } else { 1901 if (ifp->if_flags & IFF_RUNNING) 1902 fxp_stop(sc); 1903 } 1904 break; 1905 1906 case SIOCADDMULTI: 1907 case SIOCDELMULTI: 1908 sc->all_mcasts = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0; 1909 #if defined(__NetBSD__) 1910 error = (command == SIOCADDMULTI) ? 1911 ether_addmulti(ifr, &sc->sc_ethercom) : 1912 ether_delmulti(ifr, &sc->sc_ethercom); 1913 1914 if (error == ENETRESET) { 1915 /* 1916 * Multicast list has changed; set the hardware 1917 * filter accordingly. 1918 */ 1919 if (!sc->all_mcasts) 1920 fxp_mc_setup(sc); 1921 /* 1922 * fxp_mc_setup() can turn on all_mcasts if we run 1923 * out of space, so check it again rather than else {}. 1924 */ 1925 if (sc->all_mcasts) 1926 fxp_init(sc); 1927 error = 0; 1928 } 1929 #else /* __FreeBSD__ */ 1930 /* 1931 * Multicast list has changed; set the hardware filter 1932 * accordingly. 1933 */ 1934 if (!sc->all_mcasts) 1935 fxp_mc_setup(sc); 1936 /* 1937 * fxp_mc_setup() can turn on sc->all_mcasts, so check it 1938 * again rather than else {}. 1939 */ 1940 if (sc->all_mcasts) 1941 fxp_init(sc); 1942 error = 0; 1943 #endif /* __NetBSD__ */ 1944 break; 1945 1946 case SIOCSIFMEDIA: 1947 case SIOCGIFMEDIA: 1948 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command); 1949 break; 1950 1951 default: 1952 error = EINVAL; 1953 } 1954 (void) splx(s); 1955 return (error); 1956 } 1957 1958 /* 1959 * Program the multicast filter. 1960 * 1961 * We have an artificial restriction that the multicast setup command 1962 * must be the first command in the chain, so we take steps to ensure 1963 * this. By requiring this, it allows us to keep up the performance of 1964 * the pre-initialized command ring (esp. link pointers) by not actually 1965 * inserting the mcsetup command in the ring - i.e. its link pointer 1966 * points to the TxCB ring, but the mcsetup descriptor itself is not part 1967 * of it. We then can do 'CU_START' on the mcsetup descriptor and have it 1968 * lead into the regular TxCB ring when it completes. 1969 * 1970 * This function must be called at splimp. 1971 */ 1972 static void 1973 fxp_mc_setup(sc) 1974 struct fxp_softc *sc; 1975 { 1976 struct fxp_cb_mcs *mcsp = sc->mcsp; 1977 struct ifnet *ifp = &sc->sc_if; 1978 struct ifmultiaddr *ifma; 1979 int nmcasts; 1980 1981 /* 1982 * If there are queued commands, we must wait until they are all 1983 * completed. If we are already waiting, then add a NOP command 1984 * with interrupt option so that we're notified when all commands 1985 * have been completed - fxp_start() ensures that no additional 1986 * TX commands will be added when need_mcsetup is true. 1987 */ 1988 if (sc->tx_queued) { 1989 struct fxp_cb_tx *txp; 1990 1991 /* 1992 * need_mcsetup will be true if we are already waiting for the 1993 * NOP command to be completed (see below). In this case, bail. 1994 */ 1995 if (sc->need_mcsetup) 1996 return; 1997 sc->need_mcsetup = 1; 1998 1999 /* 2000 * Add a NOP command with interrupt so that we are notified when all 2001 * TX commands have been processed. 2002 */ 2003 txp = sc->cbl_last->next; 2004 txp->mb_head = NULL; 2005 txp->cb_status = 0; 2006 txp->cb_command = FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I; 2007 /* 2008 * Advance the end of list forward. 2009 */ 2010 sc->cbl_last->cb_command &= ~FXP_CB_COMMAND_S; 2011 sc->cbl_last = txp; 2012 sc->tx_queued++; 2013 /* 2014 * Issue a resume in case the CU has just suspended. 2015 */ 2016 fxp_scb_wait(sc); 2017 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_RESUME); 2018 /* 2019 * Set a 5 second timer just in case we don't hear from the 2020 * card again. 2021 */ 2022 ifp->if_timer = 5; 2023 2024 return; 2025 } 2026 sc->need_mcsetup = 0; 2027 2028 /* 2029 * Initialize multicast setup descriptor. 2030 */ 2031 mcsp->next = sc->cbl_base; 2032 mcsp->mb_head = NULL; 2033 mcsp->cb_status = 0; 2034 mcsp->cb_command = FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I; 2035 mcsp->link_addr = vtophys(&sc->cbl_base->cb_status); 2036 2037 nmcasts = 0; 2038 if (!sc->all_mcasts) { 2039 for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; 2040 ifma = ifma->ifma_link.le_next) { 2041 if (ifma->ifma_addr->sa_family != AF_LINK) 2042 continue; 2043 if (nmcasts >= MAXMCADDR) { 2044 sc->all_mcasts = 1; 2045 nmcasts = 0; 2046 break; 2047 } 2048 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 2049 (volatile void *) &sc->mcsp->mc_addr[nmcasts][0], 6); 2050 nmcasts++; 2051 } 2052 } 2053 mcsp->mc_cnt = nmcasts * 6; 2054 sc->cbl_first = sc->cbl_last = (struct fxp_cb_tx *) mcsp; 2055 sc->tx_queued = 1; 2056 2057 /* 2058 * Wait until command unit is not active. This should never 2059 * be the case when nothing is queued, but make sure anyway. 2060 */ 2061 while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) == 2062 FXP_SCB_CUS_ACTIVE) ; 2063 2064 /* 2065 * Start the multicast setup command. 2066 */ 2067 fxp_scb_wait(sc); 2068 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(&mcsp->cb_status)); 2069 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); 2070 2071 ifp->if_timer = 2; 2072 return; 2073 } 2074