1 /* 2 * Copyright (c) 1995, David Greenman 3 * All rights reserved. 4 * 5 * Modifications to support NetBSD and media selection: 6 * Copyright (c) 1997 Jason R. Thorpe. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * $FreeBSD$ 31 */ 32 33 /* 34 * Intel EtherExpress Pro/100B PCI Fast Ethernet driver 35 */ 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/mbuf.h> 40 #include <sys/malloc.h> 41 #include <sys/kernel.h> 42 #include <sys/socket.h> 43 44 #include <net/if.h> 45 #include <net/if_dl.h> 46 #include <net/if_media.h> 47 48 #ifdef NS 49 #include <netns/ns.h> 50 #include <netns/ns_if.h> 51 #endif 52 53 #include <net/bpf.h> 54 55 #if defined(__NetBSD__) 56 57 #include <sys/ioctl.h> 58 #include <sys/errno.h> 59 #include <sys/device.h> 60 61 #include <net/if_dl.h> 62 #include <net/if_ether.h> 63 64 #include <netinet/if_inarp.h> 65 66 #include <vm/vm.h> 67 68 #include <machine/cpu.h> 69 #include <machine/bus.h> 70 #include <machine/intr.h> 71 72 #include <dev/pci/if_fxpreg.h> 73 #include <dev/pci/if_fxpvar.h> 74 75 #include <dev/pci/pcivar.h> 76 #include <dev/pci/pcireg.h> 77 #include <dev/pci/pcidevs.h> 78 79 80 #else /* __FreeBSD__ */ 81 82 #include <sys/sockio.h> 83 #include <sys/bus.h> 84 #include <machine/bus.h> 85 #include <sys/rman.h> 86 #include <machine/resource.h> 87 88 #include <net/ethernet.h> 89 #include <net/if_arp.h> 90 91 #include <vm/vm.h> /* for vtophys */ 92 #include <vm/pmap.h> /* for vtophys */ 93 #include <machine/clock.h> /* for DELAY */ 94 95 #include <pci/pcivar.h> 96 #include <pci/pcireg.h> /* for PCIM_CMD_xxx */ 97 #include <pci/if_fxpreg.h> 98 #include <pci/if_fxpvar.h> 99 100 #endif /* __NetBSD__ */ 101 102 #ifdef __alpha__ /* XXX */ 103 /* XXX XXX NEED REAL DMA MAPPING SUPPORT XXX XXX */ 104 #undef vtophys 105 #define vtophys(va) alpha_XXX_dmamap((vm_offset_t)(va)) 106 #endif /* __alpha__ */ 107 108 /* 109 * NOTE! On the Alpha, we have an alignment constraint. The 110 * card DMAs the packet immediately following the RFA. However, 111 * the first thing in the packet is a 14-byte Ethernet header. 112 * This means that the packet is misaligned. To compensate, 113 * we actually offset the RFA 2 bytes into the cluster. This 114 * alignes the packet after the Ethernet header at a 32-bit 115 * boundary. HOWEVER! This means that the RFA is misaligned! 116 */ 117 #define RFA_ALIGNMENT_FUDGE 2 118 119 /* 120 * Inline function to copy a 16-bit aligned 32-bit quantity. 121 */ 122 static __inline void fxp_lwcopy __P((volatile u_int32_t *, 123 volatile u_int32_t *)); 124 static __inline void 125 fxp_lwcopy(src, dst) 126 volatile u_int32_t *src, *dst; 127 { 128 #ifdef __i386__ 129 *dst = *src; 130 #else 131 volatile u_int16_t *a = (volatile u_int16_t *)src; 132 volatile u_int16_t *b = (volatile u_int16_t *)dst; 133 134 b[0] = a[0]; 135 b[1] = a[1]; 136 #endif 137 } 138 139 /* 140 * Template for default configuration parameters. 141 * See struct fxp_cb_config for the bit definitions. 142 */ 143 static u_char fxp_cb_config_template[] = { 144 0x0, 0x0, /* cb_status */ 145 0x80, 0x2, /* cb_command */ 146 0xff, 0xff, 0xff, 0xff, /* link_addr */ 147 0x16, /* 0 */ 148 0x8, /* 1 */ 149 0x0, /* 2 */ 150 0x0, /* 3 */ 151 0x0, /* 4 */ 152 0x80, /* 5 */ 153 0xb2, /* 6 */ 154 0x3, /* 7 */ 155 0x1, /* 8 */ 156 0x0, /* 9 */ 157 0x26, /* 10 */ 158 0x0, /* 11 */ 159 0x60, /* 12 */ 160 0x0, /* 13 */ 161 0xf2, /* 14 */ 162 0x48, /* 15 */ 163 0x0, /* 16 */ 164 0x40, /* 17 */ 165 0xf3, /* 18 */ 166 0x0, /* 19 */ 167 0x3f, /* 20 */ 168 0x5 /* 21 */ 169 }; 170 171 /* Supported media types. */ 172 struct fxp_supported_media { 173 const int fsm_phy; /* PHY type */ 174 const int *fsm_media; /* the media array */ 175 const int fsm_nmedia; /* the number of supported media */ 176 const int fsm_defmedia; /* default media for this PHY */ 177 }; 178 179 static const int fxp_media_standard[] = { 180 IFM_ETHER|IFM_10_T, 181 IFM_ETHER|IFM_10_T|IFM_FDX, 182 IFM_ETHER|IFM_100_TX, 183 IFM_ETHER|IFM_100_TX|IFM_FDX, 184 IFM_ETHER|IFM_AUTO, 185 }; 186 #define FXP_MEDIA_STANDARD_DEFMEDIA (IFM_ETHER|IFM_AUTO) 187 188 static const int fxp_media_default[] = { 189 IFM_ETHER|IFM_MANUAL, /* XXX IFM_AUTO ? */ 190 }; 191 #define FXP_MEDIA_DEFAULT_DEFMEDIA (IFM_ETHER|IFM_MANUAL) 192 193 static const struct fxp_supported_media fxp_media[] = { 194 { FXP_PHY_DP83840, fxp_media_standard, 195 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 196 FXP_MEDIA_STANDARD_DEFMEDIA }, 197 { FXP_PHY_DP83840A, fxp_media_standard, 198 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 199 FXP_MEDIA_STANDARD_DEFMEDIA }, 200 { FXP_PHY_82553A, fxp_media_standard, 201 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 202 FXP_MEDIA_STANDARD_DEFMEDIA }, 203 { FXP_PHY_82553C, fxp_media_standard, 204 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 205 FXP_MEDIA_STANDARD_DEFMEDIA }, 206 { FXP_PHY_82555, fxp_media_standard, 207 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 208 FXP_MEDIA_STANDARD_DEFMEDIA }, 209 { FXP_PHY_82555B, fxp_media_standard, 210 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 211 FXP_MEDIA_STANDARD_DEFMEDIA }, 212 { FXP_PHY_80C24, fxp_media_default, 213 sizeof(fxp_media_default) / sizeof(fxp_media_default[0]), 214 FXP_MEDIA_DEFAULT_DEFMEDIA }, 215 }; 216 #define NFXPMEDIA (sizeof(fxp_media) / sizeof(fxp_media[0])) 217 218 static int fxp_mediachange __P((struct ifnet *)); 219 static void fxp_mediastatus __P((struct ifnet *, struct ifmediareq *)); 220 static void fxp_set_media __P((struct fxp_softc *, int)); 221 static __inline void fxp_scb_wait __P((struct fxp_softc *)); 222 static FXP_INTR_TYPE fxp_intr __P((void *)); 223 static void fxp_start __P((struct ifnet *)); 224 static int fxp_ioctl __P((struct ifnet *, 225 FXP_IOCTLCMD_TYPE, caddr_t)); 226 static void fxp_init __P((void *)); 227 static void fxp_stop __P((struct fxp_softc *)); 228 static void fxp_watchdog __P((struct ifnet *)); 229 static int fxp_add_rfabuf __P((struct fxp_softc *, struct mbuf *)); 230 static int fxp_mdi_read __P((struct fxp_softc *, int, int)); 231 static void fxp_mdi_write __P((struct fxp_softc *, int, int, int)); 232 static void fxp_autosize_eeprom __P((struct fxp_softc *)); 233 static void fxp_read_eeprom __P((struct fxp_softc *, u_int16_t *, 234 int, int)); 235 static int fxp_attach_common __P((struct fxp_softc *, u_int8_t *)); 236 static void fxp_stats_update __P((void *)); 237 static void fxp_mc_setup __P((struct fxp_softc *)); 238 239 /* 240 * Set initial transmit threshold at 64 (512 bytes). This is 241 * increased by 64 (512 bytes) at a time, to maximum of 192 242 * (1536 bytes), if an underrun occurs. 243 */ 244 static int tx_threshold = 64; 245 246 /* 247 * Number of transmit control blocks. This determines the number 248 * of transmit buffers that can be chained in the CB list. 249 * This must be a power of two. 250 */ 251 #define FXP_NTXCB 128 252 253 /* 254 * Number of completed TX commands at which point an interrupt 255 * will be generated to garbage collect the attached buffers. 256 * Must be at least one less than FXP_NTXCB, and should be 257 * enough less so that the transmitter doesn't becomes idle 258 * during the buffer rundown (which would reduce performance). 259 */ 260 #define FXP_CXINT_THRESH 120 261 262 /* 263 * TxCB list index mask. This is used to do list wrap-around. 264 */ 265 #define FXP_TXCB_MASK (FXP_NTXCB - 1) 266 267 /* 268 * Number of receive frame area buffers. These are large so chose 269 * wisely. 270 */ 271 #define FXP_NRFABUFS 64 272 273 /* 274 * Maximum number of seconds that the receiver can be idle before we 275 * assume it's dead and attempt to reset it by reprogramming the 276 * multicast filter. This is part of a work-around for a bug in the 277 * NIC. See fxp_stats_update(). 278 */ 279 #define FXP_MAX_RX_IDLE 15 280 281 /* 282 * Wait for the previous command to be accepted (but not necessarily 283 * completed). 284 */ 285 static __inline void 286 fxp_scb_wait(sc) 287 struct fxp_softc *sc; 288 { 289 int i = 10000; 290 291 while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i); 292 } 293 294 /************************************************************* 295 * Operating system-specific autoconfiguration glue 296 *************************************************************/ 297 298 #if defined(__NetBSD__) 299 300 #ifdef __BROKEN_INDIRECT_CONFIG 301 static int fxp_match __P((struct device *, void *, void *)); 302 #else 303 static int fxp_match __P((struct device *, struct cfdata *, void *)); 304 #endif 305 static void fxp_attach __P((struct device *, struct device *, void *)); 306 307 static void fxp_shutdown __P((void *)); 308 309 /* Compensate for lack of a generic ether_ioctl() */ 310 static int fxp_ether_ioctl __P((struct ifnet *, 311 FXP_IOCTLCMD_TYPE, caddr_t)); 312 #define ether_ioctl fxp_ether_ioctl 313 314 struct cfattach fxp_ca = { 315 sizeof(struct fxp_softc), fxp_match, fxp_attach 316 }; 317 318 struct cfdriver fxp_cd = { 319 NULL, "fxp", DV_IFNET 320 }; 321 322 /* 323 * Check if a device is an 82557. 324 */ 325 static int 326 fxp_match(parent, match, aux) 327 struct device *parent; 328 #ifdef __BROKEN_INDIRECT_CONFIG 329 void *match; 330 #else 331 struct cfdata *match; 332 #endif 333 void *aux; 334 { 335 struct pci_attach_args *pa = aux; 336 337 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL) 338 return (0); 339 340 switch (PCI_PRODUCT(pa->pa_id)) { 341 case PCI_PRODUCT_INTEL_82557: 342 return (1); 343 } 344 345 return (0); 346 } 347 348 static void 349 fxp_attach(parent, self, aux) 350 struct device *parent, *self; 351 void *aux; 352 { 353 struct fxp_softc *sc = (struct fxp_softc *)self; 354 struct pci_attach_args *pa = aux; 355 pci_chipset_tag_t pc = pa->pa_pc; 356 pci_intr_handle_t ih; 357 const char *intrstr = NULL; 358 u_int8_t enaddr[6]; 359 struct ifnet *ifp; 360 361 /* 362 * Map control/status registers. 363 */ 364 if (pci_mapreg_map(pa, FXP_PCI_MMBA, PCI_MAPREG_TYPE_MEM, 0, 365 &sc->sc_st, &sc->sc_sh, NULL, NULL)) { 366 printf(": can't map registers\n"); 367 return; 368 } 369 printf(": Intel EtherExpress Pro 10/100B Ethernet\n"); 370 371 /* 372 * Allocate our interrupt. 373 */ 374 if (pci_intr_map(pc, pa->pa_intrtag, pa->pa_intrpin, 375 pa->pa_intrline, &ih)) { 376 printf("%s: couldn't map interrupt\n", sc->sc_dev.dv_xname); 377 return; 378 } 379 intrstr = pci_intr_string(pc, ih); 380 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, fxp_intr, sc); 381 if (sc->sc_ih == NULL) { 382 printf("%s: couldn't establish interrupt", 383 sc->sc_dev.dv_xname); 384 if (intrstr != NULL) 385 printf(" at %s", intrstr); 386 printf("\n"); 387 return; 388 } 389 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr); 390 391 /* Do generic parts of attach. */ 392 if (fxp_attach_common(sc, enaddr)) { 393 /* Failed! */ 394 return; 395 } 396 397 printf("%s: Ethernet address %s%s\n", sc->sc_dev.dv_xname, 398 ether_sprintf(enaddr), sc->phy_10Mbps_only ? ", 10Mbps" : ""); 399 400 ifp = &sc->sc_ethercom.ec_if; 401 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 402 ifp->if_softc = sc; 403 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 404 ifp->if_ioctl = fxp_ioctl; 405 ifp->if_start = fxp_start; 406 ifp->if_watchdog = fxp_watchdog; 407 408 /* 409 * Attach the interface. 410 */ 411 if_attach(ifp); 412 /* 413 * Let the system queue as many packets as we have available 414 * TX descriptors. 415 */ 416 ifp->if_snd.ifq_maxlen = FXP_NTXCB - 1; 417 ether_ifattach(ifp, enaddr); 418 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB, 419 sizeof(struct ether_header)); 420 421 /* 422 * Add shutdown hook so that DMA is disabled prior to reboot. Not 423 * doing do could allow DMA to corrupt kernel memory during the 424 * reboot before the driver initializes. 425 */ 426 shutdownhook_establish(fxp_shutdown, sc); 427 } 428 429 /* 430 * Device shutdown routine. Called at system shutdown after sync. The 431 * main purpose of this routine is to shut off receiver DMA so that 432 * kernel memory doesn't get clobbered during warmboot. 433 */ 434 static void 435 fxp_shutdown(sc) 436 void *sc; 437 { 438 fxp_stop((struct fxp_softc *) sc); 439 } 440 441 static int 442 fxp_ether_ioctl(ifp, cmd, data) 443 struct ifnet *ifp; 444 FXP_IOCTLCMD_TYPE cmd; 445 caddr_t data; 446 { 447 struct ifaddr *ifa = (struct ifaddr *) data; 448 struct fxp_softc *sc = ifp->if_softc; 449 450 switch (cmd) { 451 case SIOCSIFADDR: 452 ifp->if_flags |= IFF_UP; 453 454 switch (ifa->ifa_addr->sa_family) { 455 #ifdef INET 456 case AF_INET: 457 fxp_init(sc); 458 arp_ifinit(ifp, ifa); 459 break; 460 #endif 461 #ifdef NS 462 case AF_NS: 463 { 464 register struct ns_addr *ina = &IA_SNS(ifa)->sns_addr; 465 466 if (ns_nullhost(*ina)) 467 ina->x_host = *(union ns_host *) 468 LLADDR(ifp->if_sadl); 469 else 470 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl), 471 ifp->if_addrlen); 472 /* Set new address. */ 473 fxp_init(sc); 474 break; 475 } 476 #endif 477 default: 478 fxp_init(sc); 479 break; 480 } 481 break; 482 483 default: 484 return (EINVAL); 485 } 486 487 return (0); 488 } 489 490 #else /* __FreeBSD__ */ 491 492 /* 493 * Return identification string if this is device is ours. 494 */ 495 static int 496 fxp_probe(device_t dev) 497 { 498 if (pci_get_vendor(dev) == FXP_VENDORID_INTEL) { 499 switch (pci_get_device(dev)) { 500 501 case FXP_DEVICEID_i82557: 502 device_set_desc(dev, "Intel Pro 10/100B/100+ Ethernet"); 503 return 0; 504 case FXP_DEVICEID_i82559: 505 device_set_desc(dev, "Intel InBusiness 10/100 Ethernet"); 506 return 0; 507 case FXP_DEVICEID_i82559ER: 508 device_set_desc(dev, "Intel Embedded 10/100 Ethernet"); 509 return 0; 510 default: 511 break; 512 } 513 } 514 515 return ENXIO; 516 } 517 518 static int 519 fxp_attach(device_t dev) 520 { 521 int error = 0; 522 struct fxp_softc *sc = device_get_softc(dev); 523 struct ifnet *ifp; 524 int s; 525 u_long val; 526 int rid; 527 528 callout_handle_init(&sc->stat_ch); 529 530 s = splimp(); 531 532 /* 533 * Enable bus mastering. 534 */ 535 val = pci_read_config(dev, PCIR_COMMAND, 2); 536 val |= (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); 537 pci_write_config(dev, PCIR_COMMAND, val, 2); 538 539 /* 540 * Map control/status registers. 541 */ 542 rid = FXP_PCI_MMBA; 543 sc->mem = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 544 0, ~0, 1, RF_ACTIVE); 545 if (!sc->mem) { 546 device_printf(dev, "could not map memory\n"); 547 error = ENXIO; 548 goto fail; 549 } 550 551 sc->sc_st = rman_get_bustag(sc->mem); 552 sc->sc_sh = rman_get_bushandle(sc->mem); 553 554 /* 555 * Allocate our interrupt. 556 */ 557 rid = 0; 558 sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 559 RF_SHAREABLE | RF_ACTIVE); 560 if (sc->irq == NULL) { 561 device_printf(dev, "could not map interrupt\n"); 562 error = ENXIO; 563 goto fail; 564 } 565 566 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET, 567 fxp_intr, sc, &sc->ih); 568 if (error) { 569 device_printf(dev, "could not setup irq\n"); 570 goto fail; 571 } 572 573 /* Do generic parts of attach. */ 574 if (fxp_attach_common(sc, sc->arpcom.ac_enaddr)) { 575 /* Failed! */ 576 bus_teardown_intr(dev, sc->irq, sc->ih); 577 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); 578 bus_release_resource(dev, SYS_RES_MEMORY, FXP_PCI_MMBA, sc->mem); 579 error = ENXIO; 580 goto fail; 581 } 582 583 device_printf(dev, "Ethernet address %6D%s\n", 584 sc->arpcom.ac_enaddr, ":", sc->phy_10Mbps_only ? ", 10Mbps" : ""); 585 586 ifp = &sc->arpcom.ac_if; 587 ifp->if_unit = device_get_unit(dev); 588 ifp->if_name = "fxp"; 589 ifp->if_output = ether_output; 590 ifp->if_baudrate = 100000000; 591 ifp->if_init = fxp_init; 592 ifp->if_softc = sc; 593 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 594 ifp->if_ioctl = fxp_ioctl; 595 ifp->if_start = fxp_start; 596 ifp->if_watchdog = fxp_watchdog; 597 598 /* 599 * Attach the interface. 600 */ 601 ether_ifattach(ifp, ETHER_BPF_SUPPORTED); 602 /* 603 * Let the system queue as many packets as we have available 604 * TX descriptors. 605 */ 606 ifp->if_snd.ifq_maxlen = FXP_NTXCB - 1; 607 608 splx(s); 609 return 0; 610 611 fail: 612 splx(s); 613 return error; 614 } 615 616 /* 617 * Detach interface. 618 */ 619 static int 620 fxp_detach(device_t dev) 621 { 622 struct fxp_softc *sc = device_get_softc(dev); 623 int s; 624 625 s = splimp(); 626 627 /* 628 * Close down routes etc. 629 */ 630 ether_ifdetach(&sc->arpcom.ac_if, ETHER_BPF_SUPPORTED); 631 632 /* 633 * Stop DMA and drop transmit queue. 634 */ 635 fxp_stop(sc); 636 637 /* 638 * Deallocate resources. 639 */ 640 bus_teardown_intr(dev, sc->irq, sc->ih); 641 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); 642 bus_release_resource(dev, SYS_RES_MEMORY, FXP_PCI_MMBA, sc->mem); 643 644 /* 645 * Free all the receive buffers. 646 */ 647 if (sc->rfa_headm != NULL) 648 m_freem(sc->rfa_headm); 649 650 /* 651 * Free all media structures. 652 */ 653 ifmedia_removeall(&sc->sc_media); 654 655 /* 656 * Free anciliary structures. 657 */ 658 free(sc->cbl_base, M_DEVBUF); 659 free(sc->fxp_stats, M_DEVBUF); 660 free(sc->mcsp, M_DEVBUF); 661 662 splx(s); 663 664 return 0; 665 } 666 667 /* 668 * Device shutdown routine. Called at system shutdown after sync. The 669 * main purpose of this routine is to shut off receiver DMA so that 670 * kernel memory doesn't get clobbered during warmboot. 671 */ 672 static int 673 fxp_shutdown(device_t dev) 674 { 675 /* 676 * Make sure that DMA is disabled prior to reboot. Not doing 677 * do could allow DMA to corrupt kernel memory during the 678 * reboot before the driver initializes. 679 */ 680 fxp_stop((struct fxp_softc *) device_get_softc(dev)); 681 return 0; 682 } 683 684 static device_method_t fxp_methods[] = { 685 /* Device interface */ 686 DEVMETHOD(device_probe, fxp_probe), 687 DEVMETHOD(device_attach, fxp_attach), 688 DEVMETHOD(device_detach, fxp_detach), 689 DEVMETHOD(device_shutdown, fxp_shutdown), 690 691 { 0, 0 } 692 }; 693 694 static driver_t fxp_driver = { 695 "fxp", 696 fxp_methods, 697 sizeof(struct fxp_softc), 698 }; 699 700 static devclass_t fxp_devclass; 701 702 DRIVER_MODULE(if_fxp, pci, fxp_driver, fxp_devclass, 0, 0); 703 704 #endif /* __NetBSD__ */ 705 706 /************************************************************* 707 * End of operating system-specific autoconfiguration glue 708 *************************************************************/ 709 710 /* 711 * Do generic parts of attach. 712 */ 713 static int 714 fxp_attach_common(sc, enaddr) 715 struct fxp_softc *sc; 716 u_int8_t *enaddr; 717 { 718 u_int16_t data; 719 int i, nmedia, defmedia; 720 const int *media; 721 722 /* 723 * Reset to a stable state. 724 */ 725 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); 726 DELAY(10); 727 728 sc->cbl_base = malloc(sizeof(struct fxp_cb_tx) * FXP_NTXCB, 729 M_DEVBUF, M_NOWAIT); 730 if (sc->cbl_base == NULL) 731 goto fail; 732 bzero(sc->cbl_base, sizeof(struct fxp_cb_tx) * FXP_NTXCB); 733 734 sc->fxp_stats = malloc(sizeof(struct fxp_stats), M_DEVBUF, M_NOWAIT); 735 if (sc->fxp_stats == NULL) 736 goto fail; 737 bzero(sc->fxp_stats, sizeof(struct fxp_stats)); 738 739 sc->mcsp = malloc(sizeof(struct fxp_cb_mcs), M_DEVBUF, M_NOWAIT); 740 if (sc->mcsp == NULL) 741 goto fail; 742 743 /* 744 * Pre-allocate our receive buffers. 745 */ 746 for (i = 0; i < FXP_NRFABUFS; i++) { 747 if (fxp_add_rfabuf(sc, NULL) != 0) { 748 goto fail; 749 } 750 } 751 752 /* 753 * Find out how large of an SEEPROM we have. 754 */ 755 fxp_autosize_eeprom(sc); 756 757 /* 758 * Get info about the primary PHY 759 */ 760 fxp_read_eeprom(sc, (u_int16_t *)&data, 6, 1); 761 sc->phy_primary_addr = data & 0xff; 762 sc->phy_primary_device = (data >> 8) & 0x3f; 763 sc->phy_10Mbps_only = data >> 15; 764 765 /* 766 * Read MAC address. 767 */ 768 fxp_read_eeprom(sc, (u_int16_t *)enaddr, 0, 3); 769 770 /* 771 * Initialize the media structures. 772 */ 773 774 media = fxp_media_default; 775 nmedia = sizeof(fxp_media_default) / sizeof(fxp_media_default[0]); 776 defmedia = FXP_MEDIA_DEFAULT_DEFMEDIA; 777 778 for (i = 0; i < NFXPMEDIA; i++) { 779 if (sc->phy_primary_device == fxp_media[i].fsm_phy) { 780 media = fxp_media[i].fsm_media; 781 nmedia = fxp_media[i].fsm_nmedia; 782 defmedia = fxp_media[i].fsm_defmedia; 783 } 784 } 785 786 ifmedia_init(&sc->sc_media, 0, fxp_mediachange, fxp_mediastatus); 787 for (i = 0; i < nmedia; i++) { 788 if (IFM_SUBTYPE(media[i]) == IFM_100_TX && sc->phy_10Mbps_only) 789 continue; 790 ifmedia_add(&sc->sc_media, media[i], 0, NULL); 791 } 792 ifmedia_set(&sc->sc_media, defmedia); 793 794 return (0); 795 796 fail: 797 printf(FXP_FORMAT ": Failed to malloc memory\n", FXP_ARGS(sc)); 798 if (sc->cbl_base) 799 free(sc->cbl_base, M_DEVBUF); 800 if (sc->fxp_stats) 801 free(sc->fxp_stats, M_DEVBUF); 802 if (sc->mcsp) 803 free(sc->mcsp, M_DEVBUF); 804 /* frees entire chain */ 805 if (sc->rfa_headm) 806 m_freem(sc->rfa_headm); 807 808 return (ENOMEM); 809 } 810 811 /* 812 * From NetBSD: 813 * 814 * Figure out EEPROM size. 815 * 816 * 559's can have either 64-word or 256-word EEPROMs, the 558 817 * datasheet only talks about 64-word EEPROMs, and the 557 datasheet 818 * talks about the existance of 16 to 256 word EEPROMs. 819 * 820 * The only known sizes are 64 and 256, where the 256 version is used 821 * by CardBus cards to store CIS information. 822 * 823 * The address is shifted in msb-to-lsb, and after the last 824 * address-bit the EEPROM is supposed to output a `dummy zero' bit, 825 * after which follows the actual data. We try to detect this zero, by 826 * probing the data-out bit in the EEPROM control register just after 827 * having shifted in a bit. If the bit is zero, we assume we've 828 * shifted enough address bits. The data-out should be tri-state, 829 * before this, which should translate to a logical one. 830 * 831 * Other ways to do this would be to try to read a register with known 832 * contents with a varying number of address bits, but no such 833 * register seem to be available. The high bits of register 10 are 01 834 * on the 558 and 559, but apparently not on the 557. 835 * 836 * The Linux driver computes a checksum on the EEPROM data, but the 837 * value of this checksum is not very well documented. 838 */ 839 static void 840 fxp_autosize_eeprom(sc) 841 struct fxp_softc *sc; 842 { 843 u_int16_t reg; 844 int x; 845 846 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 847 /* 848 * Shift in read opcode. 849 */ 850 for (x = 3; x > 0; x--) { 851 if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) { 852 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 853 } else { 854 reg = FXP_EEPROM_EECS; 855 } 856 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 857 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 858 reg | FXP_EEPROM_EESK); 859 DELAY(1); 860 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 861 DELAY(1); 862 } 863 /* 864 * Shift in address. 865 * Wait for the dummy zero following a correct address shift. 866 */ 867 for (x = 1; x <= 8; x++) { 868 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 869 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 870 FXP_EEPROM_EECS | FXP_EEPROM_EESK); 871 DELAY(1); 872 if ((CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) == 0) 873 break; 874 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 875 DELAY(1); 876 } 877 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 878 DELAY(1); 879 sc->eeprom_size = x; 880 } 881 /* 882 * Read from the serial EEPROM. Basically, you manually shift in 883 * the read opcode (one bit at a time) and then shift in the address, 884 * and then you shift out the data (all of this one bit at a time). 885 * The word size is 16 bits, so you have to provide the address for 886 * every 16 bits of data. 887 */ 888 static void 889 fxp_read_eeprom(sc, data, offset, words) 890 struct fxp_softc *sc; 891 u_short *data; 892 int offset; 893 int words; 894 { 895 u_int16_t reg; 896 int i, x; 897 898 for (i = 0; i < words; i++) { 899 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 900 /* 901 * Shift in read opcode. 902 */ 903 for (x = 3; x > 0; x--) { 904 if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) { 905 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 906 } else { 907 reg = FXP_EEPROM_EECS; 908 } 909 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 910 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 911 reg | FXP_EEPROM_EESK); 912 DELAY(1); 913 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 914 DELAY(1); 915 } 916 /* 917 * Shift in address. 918 */ 919 for (x = sc->eeprom_size; x > 0; x--) { 920 if ((i + offset) & (1 << (x - 1))) { 921 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 922 } else { 923 reg = FXP_EEPROM_EECS; 924 } 925 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 926 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 927 reg | FXP_EEPROM_EESK); 928 DELAY(1); 929 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 930 DELAY(1); 931 } 932 reg = FXP_EEPROM_EECS; 933 data[i] = 0; 934 /* 935 * Shift out data. 936 */ 937 for (x = 16; x > 0; x--) { 938 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 939 reg | FXP_EEPROM_EESK); 940 DELAY(1); 941 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & 942 FXP_EEPROM_EEDO) 943 data[i] |= (1 << (x - 1)); 944 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 945 DELAY(1); 946 } 947 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 948 DELAY(1); 949 } 950 } 951 952 /* 953 * Start packet transmission on the interface. 954 */ 955 static void 956 fxp_start(ifp) 957 struct ifnet *ifp; 958 { 959 struct fxp_softc *sc = ifp->if_softc; 960 struct fxp_cb_tx *txp; 961 962 /* 963 * See if we need to suspend xmit until the multicast filter 964 * has been reprogrammed (which can only be done at the head 965 * of the command chain). 966 */ 967 if (sc->need_mcsetup) 968 return; 969 970 txp = NULL; 971 972 /* 973 * We're finished if there is nothing more to add to the list or if 974 * we're all filled up with buffers to transmit. 975 * NOTE: One TxCB is reserved to guarantee that fxp_mc_setup() can add 976 * a NOP command when needed. 977 */ 978 while (ifp->if_snd.ifq_head != NULL && sc->tx_queued < FXP_NTXCB - 1) { 979 struct mbuf *m, *mb_head; 980 int segment; 981 982 /* 983 * Grab a packet to transmit. 984 */ 985 IF_DEQUEUE(&ifp->if_snd, mb_head); 986 987 /* 988 * Get pointer to next available tx desc. 989 */ 990 txp = sc->cbl_last->next; 991 992 /* 993 * Go through each of the mbufs in the chain and initialize 994 * the transmit buffer descriptors with the physical address 995 * and size of the mbuf. 996 */ 997 tbdinit: 998 for (m = mb_head, segment = 0; m != NULL; m = m->m_next) { 999 if (m->m_len != 0) { 1000 if (segment == FXP_NTXSEG) 1001 break; 1002 txp->tbd[segment].tb_addr = 1003 vtophys(mtod(m, vm_offset_t)); 1004 txp->tbd[segment].tb_size = m->m_len; 1005 segment++; 1006 } 1007 } 1008 if (m != NULL) { 1009 struct mbuf *mn; 1010 1011 /* 1012 * We ran out of segments. We have to recopy this mbuf 1013 * chain first. Bail out if we can't get the new buffers. 1014 */ 1015 MGETHDR(mn, M_DONTWAIT, MT_DATA); 1016 if (mn == NULL) { 1017 m_freem(mb_head); 1018 break; 1019 } 1020 if (mb_head->m_pkthdr.len > MHLEN) { 1021 MCLGET(mn, M_DONTWAIT); 1022 if ((mn->m_flags & M_EXT) == 0) { 1023 m_freem(mn); 1024 m_freem(mb_head); 1025 break; 1026 } 1027 } 1028 m_copydata(mb_head, 0, mb_head->m_pkthdr.len, 1029 mtod(mn, caddr_t)); 1030 mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len; 1031 m_freem(mb_head); 1032 mb_head = mn; 1033 goto tbdinit; 1034 } 1035 1036 txp->tbd_number = segment; 1037 txp->mb_head = mb_head; 1038 txp->cb_status = 0; 1039 if (sc->tx_queued != FXP_CXINT_THRESH - 1) { 1040 txp->cb_command = 1041 FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF | FXP_CB_COMMAND_S; 1042 } else { 1043 txp->cb_command = 1044 FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I; 1045 /* 1046 * Set a 5 second timer just in case we don't hear from the 1047 * card again. 1048 */ 1049 ifp->if_timer = 5; 1050 } 1051 txp->tx_threshold = tx_threshold; 1052 1053 /* 1054 * Advance the end of list forward. 1055 */ 1056 1057 #ifdef __alpha__ 1058 /* 1059 * On platforms which can't access memory in 16-bit 1060 * granularities, we must prevent the card from DMA'ing 1061 * up the status while we update the command field. 1062 * This could cause us to overwrite the completion status. 1063 */ 1064 atomic_clear_short(&sc->cbl_last->cb_command, 1065 FXP_CB_COMMAND_S); 1066 #else 1067 sc->cbl_last->cb_command &= ~FXP_CB_COMMAND_S; 1068 #endif /*__alpha__*/ 1069 sc->cbl_last = txp; 1070 1071 /* 1072 * Advance the beginning of the list forward if there are 1073 * no other packets queued (when nothing is queued, cbl_first 1074 * sits on the last TxCB that was sent out). 1075 */ 1076 if (sc->tx_queued == 0) 1077 sc->cbl_first = txp; 1078 1079 sc->tx_queued++; 1080 1081 /* 1082 * Pass packet to bpf if there is a listener. 1083 */ 1084 if (ifp->if_bpf) 1085 bpf_mtap(FXP_BPFTAP_ARG(ifp), mb_head); 1086 } 1087 1088 /* 1089 * We're finished. If we added to the list, issue a RESUME to get DMA 1090 * going again if suspended. 1091 */ 1092 if (txp != NULL) { 1093 fxp_scb_wait(sc); 1094 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_RESUME); 1095 } 1096 } 1097 1098 /* 1099 * Process interface interrupts. 1100 */ 1101 static FXP_INTR_TYPE 1102 fxp_intr(arg) 1103 void *arg; 1104 { 1105 struct fxp_softc *sc = arg; 1106 struct ifnet *ifp = &sc->sc_if; 1107 u_int8_t statack; 1108 #if defined(__NetBSD__) 1109 int claimed = 0; 1110 #endif 1111 1112 while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) { 1113 #if defined(__NetBSD__) 1114 claimed = 1; 1115 #endif 1116 /* 1117 * First ACK all the interrupts in this pass. 1118 */ 1119 CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack); 1120 1121 /* 1122 * Free any finished transmit mbuf chains. 1123 */ 1124 if (statack & FXP_SCB_STATACK_CXTNO) { 1125 struct fxp_cb_tx *txp; 1126 1127 for (txp = sc->cbl_first; sc->tx_queued && 1128 (txp->cb_status & FXP_CB_STATUS_C) != 0; 1129 txp = txp->next) { 1130 if (txp->mb_head != NULL) { 1131 m_freem(txp->mb_head); 1132 txp->mb_head = NULL; 1133 } 1134 sc->tx_queued--; 1135 } 1136 sc->cbl_first = txp; 1137 ifp->if_timer = 0; 1138 if (sc->tx_queued == 0) { 1139 if (sc->need_mcsetup) 1140 fxp_mc_setup(sc); 1141 } 1142 /* 1143 * Try to start more packets transmitting. 1144 */ 1145 if (ifp->if_snd.ifq_head != NULL) 1146 fxp_start(ifp); 1147 } 1148 /* 1149 * Process receiver interrupts. If a no-resource (RNR) 1150 * condition exists, get whatever packets we can and 1151 * re-start the receiver. 1152 */ 1153 if (statack & (FXP_SCB_STATACK_FR | FXP_SCB_STATACK_RNR)) { 1154 struct mbuf *m; 1155 struct fxp_rfa *rfa; 1156 rcvloop: 1157 m = sc->rfa_headm; 1158 rfa = (struct fxp_rfa *)(m->m_ext.ext_buf + 1159 RFA_ALIGNMENT_FUDGE); 1160 1161 if (rfa->rfa_status & FXP_RFA_STATUS_C) { 1162 /* 1163 * Remove first packet from the chain. 1164 */ 1165 sc->rfa_headm = m->m_next; 1166 m->m_next = NULL; 1167 1168 /* 1169 * Add a new buffer to the receive chain. 1170 * If this fails, the old buffer is recycled 1171 * instead. 1172 */ 1173 if (fxp_add_rfabuf(sc, m) == 0) { 1174 struct ether_header *eh; 1175 int total_len; 1176 1177 total_len = rfa->actual_size & 1178 (MCLBYTES - 1); 1179 if (total_len < 1180 sizeof(struct ether_header)) { 1181 m_freem(m); 1182 goto rcvloop; 1183 } 1184 m->m_pkthdr.rcvif = ifp; 1185 m->m_pkthdr.len = m->m_len = total_len; 1186 eh = mtod(m, struct ether_header *); 1187 m->m_data += 1188 sizeof(struct ether_header); 1189 m->m_len -= 1190 sizeof(struct ether_header); 1191 m->m_pkthdr.len = m->m_len; 1192 ether_input(ifp, eh, m); 1193 } 1194 goto rcvloop; 1195 } 1196 if (statack & FXP_SCB_STATACK_RNR) { 1197 fxp_scb_wait(sc); 1198 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 1199 vtophys(sc->rfa_headm->m_ext.ext_buf) + 1200 RFA_ALIGNMENT_FUDGE); 1201 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, 1202 FXP_SCB_COMMAND_RU_START); 1203 } 1204 } 1205 } 1206 #if defined(__NetBSD__) 1207 return (claimed); 1208 #endif 1209 } 1210 1211 /* 1212 * Update packet in/out/collision statistics. The i82557 doesn't 1213 * allow you to access these counters without doing a fairly 1214 * expensive DMA to get _all_ of the statistics it maintains, so 1215 * we do this operation here only once per second. The statistics 1216 * counters in the kernel are updated from the previous dump-stats 1217 * DMA and then a new dump-stats DMA is started. The on-chip 1218 * counters are zeroed when the DMA completes. If we can't start 1219 * the DMA immediately, we don't wait - we just prepare to read 1220 * them again next time. 1221 */ 1222 static void 1223 fxp_stats_update(arg) 1224 void *arg; 1225 { 1226 struct fxp_softc *sc = arg; 1227 struct ifnet *ifp = &sc->sc_if; 1228 struct fxp_stats *sp = sc->fxp_stats; 1229 struct fxp_cb_tx *txp; 1230 int s; 1231 1232 ifp->if_opackets += sp->tx_good; 1233 ifp->if_collisions += sp->tx_total_collisions; 1234 if (sp->rx_good) { 1235 ifp->if_ipackets += sp->rx_good; 1236 sc->rx_idle_secs = 0; 1237 } else { 1238 /* 1239 * Receiver's been idle for another second. 1240 */ 1241 sc->rx_idle_secs++; 1242 } 1243 ifp->if_ierrors += 1244 sp->rx_crc_errors + 1245 sp->rx_alignment_errors + 1246 sp->rx_rnr_errors + 1247 sp->rx_overrun_errors; 1248 /* 1249 * If any transmit underruns occured, bump up the transmit 1250 * threshold by another 512 bytes (64 * 8). 1251 */ 1252 if (sp->tx_underruns) { 1253 ifp->if_oerrors += sp->tx_underruns; 1254 if (tx_threshold < 192) 1255 tx_threshold += 64; 1256 } 1257 s = splimp(); 1258 /* 1259 * Release any xmit buffers that have completed DMA. This isn't 1260 * strictly necessary to do here, but it's advantagous for mbufs 1261 * with external storage to be released in a timely manner rather 1262 * than being defered for a potentially long time. This limits 1263 * the delay to a maximum of one second. 1264 */ 1265 for (txp = sc->cbl_first; sc->tx_queued && 1266 (txp->cb_status & FXP_CB_STATUS_C) != 0; 1267 txp = txp->next) { 1268 if (txp->mb_head != NULL) { 1269 m_freem(txp->mb_head); 1270 txp->mb_head = NULL; 1271 } 1272 sc->tx_queued--; 1273 } 1274 sc->cbl_first = txp; 1275 /* 1276 * If we haven't received any packets in FXP_MAC_RX_IDLE seconds, 1277 * then assume the receiver has locked up and attempt to clear 1278 * the condition by reprogramming the multicast filter. This is 1279 * a work-around for a bug in the 82557 where the receiver locks 1280 * up if it gets certain types of garbage in the syncronization 1281 * bits prior to the packet header. This bug is supposed to only 1282 * occur in 10Mbps mode, but has been seen to occur in 100Mbps 1283 * mode as well (perhaps due to a 10/100 speed transition). 1284 */ 1285 if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) { 1286 sc->rx_idle_secs = 0; 1287 fxp_mc_setup(sc); 1288 } 1289 /* 1290 * If there is no pending command, start another stats 1291 * dump. Otherwise punt for now. 1292 */ 1293 if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) { 1294 /* 1295 * Start another stats dump. 1296 */ 1297 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, 1298 FXP_SCB_COMMAND_CU_DUMPRESET); 1299 } else { 1300 /* 1301 * A previous command is still waiting to be accepted. 1302 * Just zero our copy of the stats and wait for the 1303 * next timer event to update them. 1304 */ 1305 sp->tx_good = 0; 1306 sp->tx_underruns = 0; 1307 sp->tx_total_collisions = 0; 1308 1309 sp->rx_good = 0; 1310 sp->rx_crc_errors = 0; 1311 sp->rx_alignment_errors = 0; 1312 sp->rx_rnr_errors = 0; 1313 sp->rx_overrun_errors = 0; 1314 } 1315 splx(s); 1316 /* 1317 * Schedule another timeout one second from now. 1318 */ 1319 sc->stat_ch = timeout(fxp_stats_update, sc, hz); 1320 } 1321 1322 /* 1323 * Stop the interface. Cancels the statistics updater and resets 1324 * the interface. 1325 */ 1326 static void 1327 fxp_stop(sc) 1328 struct fxp_softc *sc; 1329 { 1330 struct ifnet *ifp = &sc->sc_if; 1331 struct fxp_cb_tx *txp; 1332 int i; 1333 1334 /* 1335 * Cancel stats updater. 1336 */ 1337 untimeout(fxp_stats_update, sc, sc->stat_ch); 1338 1339 /* 1340 * Issue software reset 1341 */ 1342 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); 1343 DELAY(10); 1344 1345 /* 1346 * Release any xmit buffers. 1347 */ 1348 txp = sc->cbl_base; 1349 if (txp != NULL) { 1350 for (i = 0; i < FXP_NTXCB; i++) { 1351 if (txp[i].mb_head != NULL) { 1352 m_freem(txp[i].mb_head); 1353 txp[i].mb_head = NULL; 1354 } 1355 } 1356 } 1357 sc->tx_queued = 0; 1358 1359 /* 1360 * Free all the receive buffers then reallocate/reinitialize 1361 */ 1362 if (sc->rfa_headm != NULL) 1363 m_freem(sc->rfa_headm); 1364 sc->rfa_headm = NULL; 1365 sc->rfa_tailm = NULL; 1366 for (i = 0; i < FXP_NRFABUFS; i++) { 1367 if (fxp_add_rfabuf(sc, NULL) != 0) { 1368 /* 1369 * This "can't happen" - we're at splimp() 1370 * and we just freed all the buffers we need 1371 * above. 1372 */ 1373 panic("fxp_stop: no buffers!"); 1374 } 1375 } 1376 1377 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1378 ifp->if_timer = 0; 1379 } 1380 1381 /* 1382 * Watchdog/transmission transmit timeout handler. Called when a 1383 * transmission is started on the interface, but no interrupt is 1384 * received before the timeout. This usually indicates that the 1385 * card has wedged for some reason. 1386 */ 1387 static void 1388 fxp_watchdog(ifp) 1389 struct ifnet *ifp; 1390 { 1391 struct fxp_softc *sc = ifp->if_softc; 1392 1393 printf(FXP_FORMAT ": device timeout\n", FXP_ARGS(sc)); 1394 ifp->if_oerrors++; 1395 1396 fxp_init(sc); 1397 } 1398 1399 static void 1400 fxp_init(xsc) 1401 void *xsc; 1402 { 1403 struct fxp_softc *sc = xsc; 1404 struct ifnet *ifp = &sc->sc_if; 1405 struct fxp_cb_config *cbp; 1406 struct fxp_cb_ias *cb_ias; 1407 struct fxp_cb_tx *txp; 1408 int i, s, prm; 1409 1410 s = splimp(); 1411 /* 1412 * Cancel any pending I/O 1413 */ 1414 fxp_stop(sc); 1415 1416 prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0; 1417 1418 /* 1419 * Initialize base of CBL and RFA memory. Loading with zero 1420 * sets it up for regular linear addressing. 1421 */ 1422 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0); 1423 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_BASE); 1424 1425 fxp_scb_wait(sc); 1426 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_RU_BASE); 1427 1428 /* 1429 * Initialize base of dump-stats buffer. 1430 */ 1431 fxp_scb_wait(sc); 1432 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(sc->fxp_stats)); 1433 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_DUMP_ADR); 1434 1435 /* 1436 * We temporarily use memory that contains the TxCB list to 1437 * construct the config CB. The TxCB list memory is rebuilt 1438 * later. 1439 */ 1440 cbp = (struct fxp_cb_config *) sc->cbl_base; 1441 1442 /* 1443 * This bcopy is kind of disgusting, but there are a bunch of must be 1444 * zero and must be one bits in this structure and this is the easiest 1445 * way to initialize them all to proper values. 1446 */ 1447 bcopy(fxp_cb_config_template, (volatile void *)&cbp->cb_status, 1448 sizeof(fxp_cb_config_template)); 1449 1450 cbp->cb_status = 0; 1451 cbp->cb_command = FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL; 1452 cbp->link_addr = -1; /* (no) next command */ 1453 cbp->byte_count = 22; /* (22) bytes to config */ 1454 cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */ 1455 cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */ 1456 cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */ 1457 cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */ 1458 cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */ 1459 cbp->dma_bce = 0; /* (disable) dma max counters */ 1460 cbp->late_scb = 0; /* (don't) defer SCB update */ 1461 cbp->tno_int = 0; /* (disable) tx not okay interrupt */ 1462 cbp->ci_int = 1; /* interrupt on CU idle */ 1463 cbp->save_bf = prm; /* save bad frames */ 1464 cbp->disc_short_rx = !prm; /* discard short packets */ 1465 cbp->underrun_retry = 1; /* retry mode (1) on DMA underrun */ 1466 cbp->mediatype = !sc->phy_10Mbps_only; /* interface mode */ 1467 cbp->nsai = 1; /* (don't) disable source addr insert */ 1468 cbp->preamble_length = 2; /* (7 byte) preamble */ 1469 cbp->loopback = 0; /* (don't) loopback */ 1470 cbp->linear_priority = 0; /* (normal CSMA/CD operation) */ 1471 cbp->linear_pri_mode = 0; /* (wait after xmit only) */ 1472 cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */ 1473 cbp->promiscuous = prm; /* promiscuous mode */ 1474 cbp->bcast_disable = 0; /* (don't) disable broadcasts */ 1475 cbp->crscdt = 0; /* (CRS only) */ 1476 cbp->stripping = !prm; /* truncate rx packet to byte count */ 1477 cbp->padding = 1; /* (do) pad short tx packets */ 1478 cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */ 1479 cbp->force_fdx = 0; /* (don't) force full duplex */ 1480 cbp->fdx_pin_en = 1; /* (enable) FDX# pin */ 1481 cbp->multi_ia = 0; /* (don't) accept multiple IAs */ 1482 cbp->mc_all = sc->all_mcasts;/* accept all multicasts */ 1483 1484 /* 1485 * Start the config command/DMA. 1486 */ 1487 fxp_scb_wait(sc); 1488 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(&cbp->cb_status)); 1489 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); 1490 /* ...and wait for it to complete. */ 1491 while (!(cbp->cb_status & FXP_CB_STATUS_C)); 1492 1493 /* 1494 * Now initialize the station address. Temporarily use the TxCB 1495 * memory area like we did above for the config CB. 1496 */ 1497 cb_ias = (struct fxp_cb_ias *) sc->cbl_base; 1498 cb_ias->cb_status = 0; 1499 cb_ias->cb_command = FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL; 1500 cb_ias->link_addr = -1; 1501 #if defined(__NetBSD__) 1502 bcopy(LLADDR(ifp->if_sadl), (void *)cb_ias->macaddr, 6); 1503 #else 1504 bcopy(sc->arpcom.ac_enaddr, (volatile void *)cb_ias->macaddr, 1505 sizeof(sc->arpcom.ac_enaddr)); 1506 #endif /* __NetBSD__ */ 1507 1508 /* 1509 * Start the IAS (Individual Address Setup) command/DMA. 1510 */ 1511 fxp_scb_wait(sc); 1512 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); 1513 /* ...and wait for it to complete. */ 1514 while (!(cb_ias->cb_status & FXP_CB_STATUS_C)); 1515 1516 /* 1517 * Initialize transmit control block (TxCB) list. 1518 */ 1519 1520 txp = sc->cbl_base; 1521 bzero(txp, sizeof(struct fxp_cb_tx) * FXP_NTXCB); 1522 for (i = 0; i < FXP_NTXCB; i++) { 1523 txp[i].cb_status = FXP_CB_STATUS_C | FXP_CB_STATUS_OK; 1524 txp[i].cb_command = FXP_CB_COMMAND_NOP; 1525 txp[i].link_addr = vtophys(&txp[(i + 1) & FXP_TXCB_MASK].cb_status); 1526 txp[i].tbd_array_addr = vtophys(&txp[i].tbd[0]); 1527 txp[i].next = &txp[(i + 1) & FXP_TXCB_MASK]; 1528 } 1529 /* 1530 * Set the suspend flag on the first TxCB and start the control 1531 * unit. It will execute the NOP and then suspend. 1532 */ 1533 txp->cb_command = FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S; 1534 sc->cbl_first = sc->cbl_last = txp; 1535 sc->tx_queued = 1; 1536 1537 fxp_scb_wait(sc); 1538 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); 1539 1540 /* 1541 * Initialize receiver buffer area - RFA. 1542 */ 1543 fxp_scb_wait(sc); 1544 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 1545 vtophys(sc->rfa_headm->m_ext.ext_buf) + RFA_ALIGNMENT_FUDGE); 1546 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_RU_START); 1547 1548 /* 1549 * Set current media. 1550 */ 1551 fxp_set_media(sc, sc->sc_media.ifm_cur->ifm_media); 1552 1553 ifp->if_flags |= IFF_RUNNING; 1554 ifp->if_flags &= ~IFF_OACTIVE; 1555 splx(s); 1556 1557 /* 1558 * Start stats updater. 1559 */ 1560 sc->stat_ch = timeout(fxp_stats_update, sc, hz); 1561 } 1562 1563 static void 1564 fxp_set_media(sc, media) 1565 struct fxp_softc *sc; 1566 int media; 1567 { 1568 1569 switch (sc->phy_primary_device) { 1570 case FXP_PHY_DP83840: 1571 case FXP_PHY_DP83840A: 1572 fxp_mdi_write(sc, sc->phy_primary_addr, FXP_DP83840_PCR, 1573 fxp_mdi_read(sc, sc->phy_primary_addr, FXP_DP83840_PCR) | 1574 FXP_DP83840_PCR_LED4_MODE | /* LED4 always indicates duplex */ 1575 FXP_DP83840_PCR_F_CONNECT | /* force link disconnect bypass */ 1576 FXP_DP83840_PCR_BIT10); /* XXX I have no idea */ 1577 /* fall through */ 1578 case FXP_PHY_82553A: 1579 case FXP_PHY_82553C: /* untested */ 1580 case FXP_PHY_82555: 1581 case FXP_PHY_82555B: 1582 if (IFM_SUBTYPE(media) != IFM_AUTO) { 1583 int flags; 1584 1585 flags = (IFM_SUBTYPE(media) == IFM_100_TX) ? 1586 FXP_PHY_BMCR_SPEED_100M : 0; 1587 flags |= (media & IFM_FDX) ? 1588 FXP_PHY_BMCR_FULLDUPLEX : 0; 1589 fxp_mdi_write(sc, sc->phy_primary_addr, 1590 FXP_PHY_BMCR, 1591 (fxp_mdi_read(sc, sc->phy_primary_addr, 1592 FXP_PHY_BMCR) & 1593 ~(FXP_PHY_BMCR_AUTOEN | FXP_PHY_BMCR_SPEED_100M | 1594 FXP_PHY_BMCR_FULLDUPLEX)) | flags); 1595 } else { 1596 fxp_mdi_write(sc, sc->phy_primary_addr, 1597 FXP_PHY_BMCR, 1598 (fxp_mdi_read(sc, sc->phy_primary_addr, 1599 FXP_PHY_BMCR) | FXP_PHY_BMCR_AUTOEN)); 1600 } 1601 break; 1602 /* 1603 * The Seeq 80c24 doesn't have a PHY programming interface, so do 1604 * nothing. 1605 */ 1606 case FXP_PHY_80C24: 1607 break; 1608 default: 1609 printf(FXP_FORMAT 1610 ": warning: unsupported PHY, type = %d, addr = %d\n", 1611 FXP_ARGS(sc), sc->phy_primary_device, 1612 sc->phy_primary_addr); 1613 } 1614 } 1615 1616 /* 1617 * Change media according to request. 1618 */ 1619 int 1620 fxp_mediachange(ifp) 1621 struct ifnet *ifp; 1622 { 1623 struct fxp_softc *sc = ifp->if_softc; 1624 struct ifmedia *ifm = &sc->sc_media; 1625 1626 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1627 return (EINVAL); 1628 1629 fxp_set_media(sc, ifm->ifm_media); 1630 return (0); 1631 } 1632 1633 /* 1634 * Notify the world which media we're using. 1635 */ 1636 void 1637 fxp_mediastatus(ifp, ifmr) 1638 struct ifnet *ifp; 1639 struct ifmediareq *ifmr; 1640 { 1641 struct fxp_softc *sc = ifp->if_softc; 1642 int flags, stsflags; 1643 1644 switch (sc->phy_primary_device) { 1645 case FXP_PHY_82555: 1646 case FXP_PHY_82555B: 1647 case FXP_PHY_DP83840: 1648 case FXP_PHY_DP83840A: 1649 ifmr->ifm_status = IFM_AVALID; /* IFM_ACTIVE will be valid */ 1650 ifmr->ifm_active = IFM_ETHER; 1651 /* 1652 * the following is not an error. 1653 * You need to read this register twice to get current 1654 * status. This is correct documented behaviour, the 1655 * first read gets latched values. 1656 */ 1657 stsflags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_STS); 1658 stsflags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_STS); 1659 if (stsflags & FXP_PHY_STS_LINK_STS) 1660 ifmr->ifm_status |= IFM_ACTIVE; 1661 1662 /* 1663 * If we are in auto mode, then try report the result. 1664 */ 1665 flags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_BMCR); 1666 if (flags & FXP_PHY_BMCR_AUTOEN) { 1667 ifmr->ifm_active |= IFM_AUTO; /* XXX presently 0 */ 1668 if (stsflags & FXP_PHY_STS_AUTO_DONE) { 1669 /* 1670 * Intel and National parts report 1671 * differently on what they found. 1672 */ 1673 if ((sc->phy_primary_device == FXP_PHY_82555) 1674 || (sc->phy_primary_device == FXP_PHY_82555B)) { 1675 flags = fxp_mdi_read(sc, 1676 sc->phy_primary_addr, 1677 FXP_PHY_USC); 1678 1679 if (flags & FXP_PHY_USC_SPEED) 1680 ifmr->ifm_active |= IFM_100_TX; 1681 else 1682 ifmr->ifm_active |= IFM_10_T; 1683 1684 if (flags & FXP_PHY_USC_DUPLEX) 1685 ifmr->ifm_active |= IFM_FDX; 1686 } else { /* it's National. only know speed */ 1687 flags = fxp_mdi_read(sc, 1688 sc->phy_primary_addr, 1689 FXP_DP83840_PAR); 1690 1691 if (flags & FXP_DP83840_PAR_SPEED_10) 1692 ifmr->ifm_active |= IFM_10_T; 1693 else 1694 ifmr->ifm_active |= IFM_100_TX; 1695 } 1696 } 1697 } else { /* in manual mode.. just report what we were set to */ 1698 if (flags & FXP_PHY_BMCR_SPEED_100M) 1699 ifmr->ifm_active |= IFM_100_TX; 1700 else 1701 ifmr->ifm_active |= IFM_10_T; 1702 1703 if (flags & FXP_PHY_BMCR_FULLDUPLEX) 1704 ifmr->ifm_active |= IFM_FDX; 1705 } 1706 break; 1707 1708 case FXP_PHY_80C24: 1709 default: 1710 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; /* XXX IFM_AUTO ? */ 1711 } 1712 } 1713 1714 /* 1715 * Add a buffer to the end of the RFA buffer list. 1716 * Return 0 if successful, 1 for failure. A failure results in 1717 * adding the 'oldm' (if non-NULL) on to the end of the list - 1718 * tossing out its old contents and recycling it. 1719 * The RFA struct is stuck at the beginning of mbuf cluster and the 1720 * data pointer is fixed up to point just past it. 1721 */ 1722 static int 1723 fxp_add_rfabuf(sc, oldm) 1724 struct fxp_softc *sc; 1725 struct mbuf *oldm; 1726 { 1727 u_int32_t v; 1728 struct mbuf *m; 1729 struct fxp_rfa *rfa, *p_rfa; 1730 1731 MGETHDR(m, M_DONTWAIT, MT_DATA); 1732 if (m != NULL) { 1733 MCLGET(m, M_DONTWAIT); 1734 if ((m->m_flags & M_EXT) == 0) { 1735 m_freem(m); 1736 if (oldm == NULL) 1737 return 1; 1738 m = oldm; 1739 m->m_data = m->m_ext.ext_buf; 1740 } 1741 } else { 1742 if (oldm == NULL) 1743 return 1; 1744 m = oldm; 1745 m->m_data = m->m_ext.ext_buf; 1746 } 1747 1748 /* 1749 * Move the data pointer up so that the incoming data packet 1750 * will be 32-bit aligned. 1751 */ 1752 m->m_data += RFA_ALIGNMENT_FUDGE; 1753 1754 /* 1755 * Get a pointer to the base of the mbuf cluster and move 1756 * data start past it. 1757 */ 1758 rfa = mtod(m, struct fxp_rfa *); 1759 m->m_data += sizeof(struct fxp_rfa); 1760 rfa->size = (u_int16_t)(MCLBYTES - sizeof(struct fxp_rfa) - RFA_ALIGNMENT_FUDGE); 1761 1762 /* 1763 * Initialize the rest of the RFA. Note that since the RFA 1764 * is misaligned, we cannot store values directly. Instead, 1765 * we use an optimized, inline copy. 1766 */ 1767 1768 rfa->rfa_status = 0; 1769 rfa->rfa_control = FXP_RFA_CONTROL_EL; 1770 rfa->actual_size = 0; 1771 1772 v = -1; 1773 fxp_lwcopy(&v, (volatile u_int32_t *) rfa->link_addr); 1774 fxp_lwcopy(&v, (volatile u_int32_t *) rfa->rbd_addr); 1775 1776 /* 1777 * If there are other buffers already on the list, attach this 1778 * one to the end by fixing up the tail to point to this one. 1779 */ 1780 if (sc->rfa_headm != NULL) { 1781 p_rfa = (struct fxp_rfa *) (sc->rfa_tailm->m_ext.ext_buf + 1782 RFA_ALIGNMENT_FUDGE); 1783 sc->rfa_tailm->m_next = m; 1784 v = vtophys(rfa); 1785 fxp_lwcopy(&v, (volatile u_int32_t *) p_rfa->link_addr); 1786 p_rfa->rfa_control = 0; 1787 } else { 1788 sc->rfa_headm = m; 1789 } 1790 sc->rfa_tailm = m; 1791 1792 return (m == oldm); 1793 } 1794 1795 static volatile int 1796 fxp_mdi_read(sc, phy, reg) 1797 struct fxp_softc *sc; 1798 int phy; 1799 int reg; 1800 { 1801 int count = 10000; 1802 int value; 1803 1804 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, 1805 (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21)); 1806 1807 while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0 1808 && count--) 1809 DELAY(10); 1810 1811 if (count <= 0) 1812 printf(FXP_FORMAT ": fxp_mdi_read: timed out\n", 1813 FXP_ARGS(sc)); 1814 1815 return (value & 0xffff); 1816 } 1817 1818 static void 1819 fxp_mdi_write(sc, phy, reg, value) 1820 struct fxp_softc *sc; 1821 int phy; 1822 int reg; 1823 int value; 1824 { 1825 int count = 10000; 1826 1827 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, 1828 (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) | 1829 (value & 0xffff)); 1830 1831 while((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 && 1832 count--) 1833 DELAY(10); 1834 1835 if (count <= 0) 1836 printf(FXP_FORMAT ": fxp_mdi_write: timed out\n", 1837 FXP_ARGS(sc)); 1838 } 1839 1840 static int 1841 fxp_ioctl(ifp, command, data) 1842 struct ifnet *ifp; 1843 FXP_IOCTLCMD_TYPE command; 1844 caddr_t data; 1845 { 1846 struct fxp_softc *sc = ifp->if_softc; 1847 struct ifreq *ifr = (struct ifreq *)data; 1848 int s, error = 0; 1849 1850 s = splimp(); 1851 1852 switch (command) { 1853 1854 case SIOCSIFADDR: 1855 #if !defined(__NetBSD__) 1856 case SIOCGIFADDR: 1857 case SIOCSIFMTU: 1858 #endif 1859 error = ether_ioctl(ifp, command, data); 1860 break; 1861 1862 case SIOCSIFFLAGS: 1863 sc->all_mcasts = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0; 1864 1865 /* 1866 * If interface is marked up and not running, then start it. 1867 * If it is marked down and running, stop it. 1868 * XXX If it's up then re-initialize it. This is so flags 1869 * such as IFF_PROMISC are handled. 1870 */ 1871 if (ifp->if_flags & IFF_UP) { 1872 fxp_init(sc); 1873 } else { 1874 if (ifp->if_flags & IFF_RUNNING) 1875 fxp_stop(sc); 1876 } 1877 break; 1878 1879 case SIOCADDMULTI: 1880 case SIOCDELMULTI: 1881 sc->all_mcasts = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0; 1882 #if defined(__NetBSD__) 1883 error = (command == SIOCADDMULTI) ? 1884 ether_addmulti(ifr, &sc->sc_ethercom) : 1885 ether_delmulti(ifr, &sc->sc_ethercom); 1886 1887 if (error == ENETRESET) { 1888 /* 1889 * Multicast list has changed; set the hardware 1890 * filter accordingly. 1891 */ 1892 if (!sc->all_mcasts) 1893 fxp_mc_setup(sc); 1894 /* 1895 * fxp_mc_setup() can turn on all_mcasts if we run 1896 * out of space, so check it again rather than else {}. 1897 */ 1898 if (sc->all_mcasts) 1899 fxp_init(sc); 1900 error = 0; 1901 } 1902 #else /* __FreeBSD__ */ 1903 /* 1904 * Multicast list has changed; set the hardware filter 1905 * accordingly. 1906 */ 1907 if (!sc->all_mcasts) 1908 fxp_mc_setup(sc); 1909 /* 1910 * fxp_mc_setup() can turn on sc->all_mcasts, so check it 1911 * again rather than else {}. 1912 */ 1913 if (sc->all_mcasts) 1914 fxp_init(sc); 1915 error = 0; 1916 #endif /* __NetBSD__ */ 1917 break; 1918 1919 case SIOCSIFMEDIA: 1920 case SIOCGIFMEDIA: 1921 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command); 1922 break; 1923 1924 default: 1925 error = EINVAL; 1926 } 1927 (void) splx(s); 1928 return (error); 1929 } 1930 1931 /* 1932 * Program the multicast filter. 1933 * 1934 * We have an artificial restriction that the multicast setup command 1935 * must be the first command in the chain, so we take steps to ensure 1936 * this. By requiring this, it allows us to keep up the performance of 1937 * the pre-initialized command ring (esp. link pointers) by not actually 1938 * inserting the mcsetup command in the ring - i.e. its link pointer 1939 * points to the TxCB ring, but the mcsetup descriptor itself is not part 1940 * of it. We then can do 'CU_START' on the mcsetup descriptor and have it 1941 * lead into the regular TxCB ring when it completes. 1942 * 1943 * This function must be called at splimp. 1944 */ 1945 static void 1946 fxp_mc_setup(sc) 1947 struct fxp_softc *sc; 1948 { 1949 struct fxp_cb_mcs *mcsp = sc->mcsp; 1950 struct ifnet *ifp = &sc->sc_if; 1951 struct ifmultiaddr *ifma; 1952 int nmcasts; 1953 1954 /* 1955 * If there are queued commands, we must wait until they are all 1956 * completed. If we are already waiting, then add a NOP command 1957 * with interrupt option so that we're notified when all commands 1958 * have been completed - fxp_start() ensures that no additional 1959 * TX commands will be added when need_mcsetup is true. 1960 */ 1961 if (sc->tx_queued) { 1962 struct fxp_cb_tx *txp; 1963 1964 /* 1965 * need_mcsetup will be true if we are already waiting for the 1966 * NOP command to be completed (see below). In this case, bail. 1967 */ 1968 if (sc->need_mcsetup) 1969 return; 1970 sc->need_mcsetup = 1; 1971 1972 /* 1973 * Add a NOP command with interrupt so that we are notified when all 1974 * TX commands have been processed. 1975 */ 1976 txp = sc->cbl_last->next; 1977 txp->mb_head = NULL; 1978 txp->cb_status = 0; 1979 txp->cb_command = FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I; 1980 /* 1981 * Advance the end of list forward. 1982 */ 1983 sc->cbl_last->cb_command &= ~FXP_CB_COMMAND_S; 1984 sc->cbl_last = txp; 1985 sc->tx_queued++; 1986 /* 1987 * Issue a resume in case the CU has just suspended. 1988 */ 1989 fxp_scb_wait(sc); 1990 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_RESUME); 1991 /* 1992 * Set a 5 second timer just in case we don't hear from the 1993 * card again. 1994 */ 1995 ifp->if_timer = 5; 1996 1997 return; 1998 } 1999 sc->need_mcsetup = 0; 2000 2001 /* 2002 * Initialize multicast setup descriptor. 2003 */ 2004 mcsp->next = sc->cbl_base; 2005 mcsp->mb_head = NULL; 2006 mcsp->cb_status = 0; 2007 mcsp->cb_command = FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I; 2008 mcsp->link_addr = vtophys(&sc->cbl_base->cb_status); 2009 2010 nmcasts = 0; 2011 if (!sc->all_mcasts) { 2012 for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; 2013 ifma = ifma->ifma_link.le_next) { 2014 if (ifma->ifma_addr->sa_family != AF_LINK) 2015 continue; 2016 if (nmcasts >= MAXMCADDR) { 2017 sc->all_mcasts = 1; 2018 nmcasts = 0; 2019 break; 2020 } 2021 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 2022 (volatile void *) &sc->mcsp->mc_addr[nmcasts][0], 6); 2023 nmcasts++; 2024 } 2025 } 2026 mcsp->mc_cnt = nmcasts * 6; 2027 sc->cbl_first = sc->cbl_last = (struct fxp_cb_tx *) mcsp; 2028 sc->tx_queued = 1; 2029 2030 /* 2031 * Wait until command unit is not active. This should never 2032 * be the case when nothing is queued, but make sure anyway. 2033 */ 2034 while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) == 2035 FXP_SCB_CUS_ACTIVE) ; 2036 2037 /* 2038 * Start the multicast setup command. 2039 */ 2040 fxp_scb_wait(sc); 2041 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(&mcsp->cb_status)); 2042 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); 2043 2044 ifp->if_timer = 2; 2045 return; 2046 } 2047