xref: /freebsd/sys/dev/fxp/if_fxp.c (revision 380a989b3223d455375b4fae70fd0b9bdd43bafb)
1 /*
2  * Copyright (c) 1995, David Greenman
3  * All rights reserved.
4  *
5  * Modifications to support NetBSD and media selection:
6  * Copyright (c) 1997 Jason R. Thorpe.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  *	$Id: if_fxp.c,v 1.58 1998/10/22 02:00:49 dg Exp $
31  */
32 
33 /*
34  * Intel EtherExpress Pro/100B PCI Fast Ethernet driver
35  */
36 
37 #include "bpfilter.h"
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/mbuf.h>
42 #include <sys/malloc.h>
43 #include <sys/kernel.h>
44 #include <sys/socket.h>
45 
46 #include <net/if.h>
47 #include <net/if_dl.h>
48 #include <net/if_media.h>
49 
50 #ifdef NS
51 #include <netns/ns.h>
52 #include <netns/ns_if.h>
53 #endif
54 
55 #if NBPFILTER > 0
56 #include <net/bpf.h>
57 #endif
58 
59 #if defined(__NetBSD__)
60 
61 #include <sys/ioctl.h>
62 #include <sys/errno.h>
63 #include <sys/device.h>
64 
65 #include <net/if_dl.h>
66 #include <net/if_ether.h>
67 
68 #include <netinet/if_inarp.h>
69 
70 #include <vm/vm.h>
71 
72 #include <machine/cpu.h>
73 #include <machine/bus.h>
74 #include <machine/intr.h>
75 
76 #include <dev/pci/if_fxpreg.h>
77 #include <dev/pci/if_fxpvar.h>
78 
79 #include <dev/pci/pcivar.h>
80 #include <dev/pci/pcireg.h>
81 #include <dev/pci/pcidevs.h>
82 
83 #ifdef __alpha__		/* XXX */
84 /* XXX XXX NEED REAL DMA MAPPING SUPPORT XXX XXX */
85 #undef vtophys
86 #define	vtophys(va)	alpha_XXX_dmamap((vm_offset_t)(va))
87 #endif /* __alpha__ */
88 
89 #else /* __FreeBSD__ */
90 
91 #include <sys/sockio.h>
92 
93 #include <net/ethernet.h>
94 #include <net/if_arp.h>
95 
96 #include <vm/vm.h>		/* for vtophys */
97 #include <vm/pmap.h>		/* for vtophys */
98 #include <machine/clock.h>	/* for DELAY */
99 
100 #include <pci/pcivar.h>
101 #include <pci/if_fxpreg.h>
102 #include <pci/if_fxpvar.h>
103 
104 #endif /* __NetBSD__ */
105 
106 /*
107  * NOTE!  On the Alpha, we have an alignment constraint.  The
108  * card DMAs the packet immediately following the RFA.  However,
109  * the first thing in the packet is a 14-byte Ethernet header.
110  * This means that the packet is misaligned.  To compensate,
111  * we actually offset the RFA 2 bytes into the cluster.  This
112  * alignes the packet after the Ethernet header at a 32-bit
113  * boundary.  HOWEVER!  This means that the RFA is misaligned!
114  */
115 #define	RFA_ALIGNMENT_FUDGE	2
116 
117 /*
118  * Inline function to copy a 16-bit aligned 32-bit quantity.
119  */
120 static __inline void fxp_lwcopy __P((volatile u_int32_t *,
121 	volatile u_int32_t *));
122 static __inline void
123 fxp_lwcopy(src, dst)
124 	volatile u_int32_t *src, *dst;
125 {
126 	volatile u_int16_t *a = (u_int16_t *)src;
127 	volatile u_int16_t *b = (u_int16_t *)dst;
128 
129 	b[0] = a[0];
130 	b[1] = a[1];
131 }
132 
133 /*
134  * Template for default configuration parameters.
135  * See struct fxp_cb_config for the bit definitions.
136  */
137 static u_char fxp_cb_config_template[] = {
138 	0x0, 0x0,		/* cb_status */
139 	0x80, 0x2,		/* cb_command */
140 	0xff, 0xff, 0xff, 0xff,	/* link_addr */
141 	0x16,	/*  0 */
142 	0x8,	/*  1 */
143 	0x0,	/*  2 */
144 	0x0,	/*  3 */
145 	0x0,	/*  4 */
146 	0x80,	/*  5 */
147 	0xb2,	/*  6 */
148 	0x3,	/*  7 */
149 	0x1,	/*  8 */
150 	0x0,	/*  9 */
151 	0x26,	/* 10 */
152 	0x0,	/* 11 */
153 	0x60,	/* 12 */
154 	0x0,	/* 13 */
155 	0xf2,	/* 14 */
156 	0x48,	/* 15 */
157 	0x0,	/* 16 */
158 	0x40,	/* 17 */
159 	0xf3,	/* 18 */
160 	0x0,	/* 19 */
161 	0x3f,	/* 20 */
162 	0x5	/* 21 */
163 };
164 
165 /* Supported media types. */
166 struct fxp_supported_media {
167 	const int	fsm_phy;	/* PHY type */
168 	const int	*fsm_media;	/* the media array */
169 	const int	fsm_nmedia;	/* the number of supported media */
170 	const int	fsm_defmedia;	/* default media for this PHY */
171 };
172 
173 static const int fxp_media_standard[] = {
174 	IFM_ETHER|IFM_10_T,
175 	IFM_ETHER|IFM_10_T|IFM_FDX,
176 	IFM_ETHER|IFM_100_TX,
177 	IFM_ETHER|IFM_100_TX|IFM_FDX,
178 	IFM_ETHER|IFM_AUTO,
179 };
180 #define	FXP_MEDIA_STANDARD_DEFMEDIA	(IFM_ETHER|IFM_AUTO)
181 
182 static const int fxp_media_default[] = {
183 	IFM_ETHER|IFM_MANUAL,		/* XXX IFM_AUTO ? */
184 };
185 #define	FXP_MEDIA_DEFAULT_DEFMEDIA	(IFM_ETHER|IFM_MANUAL)
186 
187 static const struct fxp_supported_media fxp_media[] = {
188 	{ FXP_PHY_DP83840, fxp_media_standard,
189 	  sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]),
190 	  FXP_MEDIA_STANDARD_DEFMEDIA },
191 	{ FXP_PHY_DP83840A, fxp_media_standard,
192 	  sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]),
193 	  FXP_MEDIA_STANDARD_DEFMEDIA },
194 	{ FXP_PHY_82553A, fxp_media_standard,
195 	  sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]),
196 	  FXP_MEDIA_STANDARD_DEFMEDIA },
197 	{ FXP_PHY_82553C, fxp_media_standard,
198 	  sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]),
199 	  FXP_MEDIA_STANDARD_DEFMEDIA },
200 	{ FXP_PHY_82555, fxp_media_standard,
201 	  sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]),
202 	  FXP_MEDIA_STANDARD_DEFMEDIA },
203 	{ FXP_PHY_82555B, fxp_media_standard,
204 	  sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]),
205 	  FXP_MEDIA_STANDARD_DEFMEDIA },
206 	{ FXP_PHY_80C24, fxp_media_default,
207 	  sizeof(fxp_media_default) / sizeof(fxp_media_default[0]),
208 	  FXP_MEDIA_DEFAULT_DEFMEDIA },
209 };
210 #define	NFXPMEDIA (sizeof(fxp_media) / sizeof(fxp_media[0]))
211 
212 static int fxp_mediachange	__P((struct ifnet *));
213 static void fxp_mediastatus	__P((struct ifnet *, struct ifmediareq *));
214 static void fxp_set_media	__P((struct fxp_softc *, int));
215 static __inline void fxp_scb_wait __P((struct fxp_softc *));
216 static FXP_INTR_TYPE fxp_intr	__P((void *));
217 static void fxp_start		__P((struct ifnet *));
218 static int fxp_ioctl		__P((struct ifnet *,
219 				     FXP_IOCTLCMD_TYPE, caddr_t));
220 static void fxp_init		__P((void *));
221 static void fxp_stop		__P((struct fxp_softc *));
222 static void fxp_watchdog	__P((struct ifnet *));
223 static int fxp_add_rfabuf	__P((struct fxp_softc *, struct mbuf *));
224 static int fxp_mdi_read		__P((struct fxp_softc *, int, int));
225 static void fxp_mdi_write	__P((struct fxp_softc *, int, int, int));
226 static void fxp_read_eeprom	__P((struct fxp_softc *, u_int16_t *,
227 				     int, int));
228 static int fxp_attach_common	__P((struct fxp_softc *, u_int8_t *));
229 static void fxp_stats_update	__P((void *));
230 static void fxp_mc_setup	__P((struct fxp_softc *));
231 
232 /*
233  * Set initial transmit threshold at 64 (512 bytes). This is
234  * increased by 64 (512 bytes) at a time, to maximum of 192
235  * (1536 bytes), if an underrun occurs.
236  */
237 static int tx_threshold = 64;
238 
239 /*
240  * Number of transmit control blocks. This determines the number
241  * of transmit buffers that can be chained in the CB list.
242  * This must be a power of two.
243  */
244 #define FXP_NTXCB	128
245 
246 /*
247  * Number of completed TX commands at which point an interrupt
248  * will be generated to garbage collect the attached buffers.
249  * Must be at least one less than FXP_NTXCB, and should be
250  * enough less so that the transmitter doesn't becomes idle
251  * during the buffer rundown (which would reduce performance).
252  */
253 #define FXP_CXINT_THRESH 120
254 
255 /*
256  * TxCB list index mask. This is used to do list wrap-around.
257  */
258 #define FXP_TXCB_MASK	(FXP_NTXCB - 1)
259 
260 /*
261  * Number of receive frame area buffers. These are large so chose
262  * wisely.
263  */
264 #define FXP_NRFABUFS	64
265 
266 /*
267  * Maximum number of seconds that the receiver can be idle before we
268  * assume it's dead and attempt to reset it by reprogramming the
269  * multicast filter. This is part of a work-around for a bug in the
270  * NIC. See fxp_stats_update().
271  */
272 #define FXP_MAX_RX_IDLE	15
273 
274 /*
275  * Wait for the previous command to be accepted (but not necessarily
276  * completed).
277  */
278 static __inline void
279 fxp_scb_wait(sc)
280 	struct fxp_softc *sc;
281 {
282 	int i = 10000;
283 
284 	while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i);
285 }
286 
287 /*************************************************************
288  * Operating system-specific autoconfiguration glue
289  *************************************************************/
290 
291 #if defined(__NetBSD__)
292 
293 #ifdef __BROKEN_INDIRECT_CONFIG
294 static int fxp_match __P((struct device *, void *, void *));
295 #else
296 static int fxp_match __P((struct device *, struct cfdata *, void *));
297 #endif
298 static void fxp_attach __P((struct device *, struct device *, void *));
299 
300 static void	fxp_shutdown __P((void *));
301 
302 /* Compensate for lack of a generic ether_ioctl() */
303 static int	fxp_ether_ioctl __P((struct ifnet *,
304 				    FXP_IOCTLCMD_TYPE, caddr_t));
305 #define	ether_ioctl	fxp_ether_ioctl
306 
307 struct cfattach fxp_ca = {
308 	sizeof(struct fxp_softc), fxp_match, fxp_attach
309 };
310 
311 struct cfdriver fxp_cd = {
312 	NULL, "fxp", DV_IFNET
313 };
314 
315 /*
316  * Check if a device is an 82557.
317  */
318 static int
319 fxp_match(parent, match, aux)
320 	struct device *parent;
321 #ifdef __BROKEN_INDIRECT_CONFIG
322 	void *match;
323 #else
324 	struct cfdata *match;
325 #endif
326 	void *aux;
327 {
328 	struct pci_attach_args *pa = aux;
329 
330 	if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
331 		return (0);
332 
333 	switch (PCI_PRODUCT(pa->pa_id)) {
334 	case PCI_PRODUCT_INTEL_82557:
335 		return (1);
336 	}
337 
338 	return (0);
339 }
340 
341 static void
342 fxp_attach(parent, self, aux)
343 	struct device *parent, *self;
344 	void *aux;
345 {
346 	struct fxp_softc *sc = (struct fxp_softc *)self;
347 	struct pci_attach_args *pa = aux;
348 	pci_chipset_tag_t pc = pa->pa_pc;
349 	pci_intr_handle_t ih;
350 	const char *intrstr = NULL;
351 	u_int8_t enaddr[6];
352 	struct ifnet *ifp;
353 
354 	/*
355 	 * Map control/status registers.
356 	 */
357 	if (pci_mapreg_map(pa, FXP_PCI_MMBA, PCI_MAPREG_TYPE_MEM, 0,
358 	    &sc->sc_st, &sc->sc_sh, NULL, NULL)) {
359 		printf(": can't map registers\n");
360 		return;
361 	}
362 	printf(": Intel EtherExpress Pro 10/100B Ethernet\n");
363 
364 	/*
365 	 * Allocate our interrupt.
366 	 */
367 	if (pci_intr_map(pc, pa->pa_intrtag, pa->pa_intrpin,
368 	    pa->pa_intrline, &ih)) {
369 		printf("%s: couldn't map interrupt\n", sc->sc_dev.dv_xname);
370 		return;
371 	}
372 	intrstr = pci_intr_string(pc, ih);
373 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, fxp_intr, sc);
374 	if (sc->sc_ih == NULL) {
375 		printf("%s: couldn't establish interrupt",
376 		    sc->sc_dev.dv_xname);
377 		if (intrstr != NULL)
378 			printf(" at %s", intrstr);
379 		printf("\n");
380 		return;
381 	}
382 	printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
383 
384 	/* Do generic parts of attach. */
385 	if (fxp_attach_common(sc, enaddr)) {
386 		/* Failed! */
387 		return;
388 	}
389 
390 	printf("%s: Ethernet address %s%s\n", sc->sc_dev.dv_xname,
391 	    ether_sprintf(enaddr), sc->phy_10Mbps_only ? ", 10Mbps" : "");
392 
393 	ifp = &sc->sc_ethercom.ec_if;
394 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
395 	ifp->if_softc = sc;
396 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
397 	ifp->if_ioctl = fxp_ioctl;
398 	ifp->if_start = fxp_start;
399 	ifp->if_watchdog = fxp_watchdog;
400 
401 	/*
402 	 * Attach the interface.
403 	 */
404 	if_attach(ifp);
405 	/*
406 	 * Let the system queue as many packets as we have available
407 	 * TX descriptors.
408 	 */
409 	ifp->if_snd.ifq_maxlen = FXP_NTXCB - 1;
410 	ether_ifattach(ifp, enaddr);
411 #if NBPFILTER > 0
412 	bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB,
413 	    sizeof(struct ether_header));
414 #endif
415 
416 	/*
417 	 * Add shutdown hook so that DMA is disabled prior to reboot. Not
418 	 * doing do could allow DMA to corrupt kernel memory during the
419 	 * reboot before the driver initializes.
420 	 */
421 	shutdownhook_establish(fxp_shutdown, sc);
422 }
423 
424 /*
425  * Device shutdown routine. Called at system shutdown after sync. The
426  * main purpose of this routine is to shut off receiver DMA so that
427  * kernel memory doesn't get clobbered during warmboot.
428  */
429 static void
430 fxp_shutdown(sc)
431 	void *sc;
432 {
433 	fxp_stop((struct fxp_softc *) sc);
434 }
435 
436 static int
437 fxp_ether_ioctl(ifp, cmd, data)
438 	struct ifnet *ifp;
439 	FXP_IOCTLCMD_TYPE cmd;
440 	caddr_t data;
441 {
442 	struct ifaddr *ifa = (struct ifaddr *) data;
443 	struct fxp_softc *sc = ifp->if_softc;
444 
445 	switch (cmd) {
446 	case SIOCSIFADDR:
447 		ifp->if_flags |= IFF_UP;
448 
449 		switch (ifa->ifa_addr->sa_family) {
450 #ifdef INET
451 		case AF_INET:
452 			fxp_init(sc);
453 			arp_ifinit(ifp, ifa);
454 			break;
455 #endif
456 #ifdef NS
457 		case AF_NS:
458 		    {
459 			 register struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
460 
461 			 if (ns_nullhost(*ina))
462 				ina->x_host = *(union ns_host *)
463 				    LLADDR(ifp->if_sadl);
464 			 else
465 				bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl),
466 				    ifp->if_addrlen);
467 			 /* Set new address. */
468 			 fxp_init(sc);
469 			 break;
470 		    }
471 #endif
472 		default:
473 			fxp_init(sc);
474 			break;
475 		}
476 		break;
477 
478 	default:
479 		return (EINVAL);
480 	}
481 
482 	return (0);
483 }
484 
485 #else /* __FreeBSD__ */
486 
487 static u_long fxp_count;
488 static const char *fxp_probe		__P((pcici_t, pcidi_t));
489 static void fxp_attach		__P((pcici_t, int));
490 
491 static void fxp_shutdown	__P((int, void *));
492 
493 static struct pci_device fxp_device = {
494 	"fxp",
495 	fxp_probe,
496 	fxp_attach,
497 	&fxp_count,
498 	NULL
499 };
500 DATA_SET(pcidevice_set, fxp_device);
501 
502 /*
503  * Return identification string if this is device is ours.
504  */
505 static const char *
506 fxp_probe(config_id, device_id)
507 	pcici_t config_id;
508 	pcidi_t device_id;
509 {
510 	if (((device_id & 0xffff) == FXP_VENDORID_INTEL) &&
511 	    ((device_id >> 16) & 0xffff) == FXP_DEVICEID_i82557)
512 		return ("Intel EtherExpress Pro 10/100B Ethernet");
513 
514 	return NULL;
515 }
516 
517 static void
518 fxp_attach(config_id, unit)
519 	pcici_t config_id;
520 	int unit;
521 {
522 	struct fxp_softc *sc;
523 	vm_offset_t pbase;
524 	struct ifnet *ifp;
525 	int s;
526 
527 	sc = malloc(sizeof(struct fxp_softc), M_DEVBUF, M_NOWAIT);
528 	if (sc == NULL)
529 		return;
530 	bzero(sc, sizeof(struct fxp_softc));
531 	callout_handle_init(&sc->stat_ch);
532 
533 	s = splimp();
534 
535 	/*
536 	 * Map control/status registers.
537 	 */
538 	if (!pci_map_mem(config_id, FXP_PCI_MMBA,
539 	    (vm_offset_t *)&sc->csr, &pbase)) {
540 		printf("fxp%d: couldn't map memory\n", unit);
541 		goto fail;
542 	}
543 
544 	/*
545 	 * Allocate our interrupt.
546 	 */
547 	if (!pci_map_int(config_id, fxp_intr, sc, &net_imask)) {
548 		printf("fxp%d: couldn't map interrupt\n", unit);
549 		goto fail;
550 	}
551 
552 	/* Do generic parts of attach. */
553 	if (fxp_attach_common(sc, sc->arpcom.ac_enaddr)) {
554 		/* Failed! */
555 		(void) pci_unmap_int(config_id);
556 		goto fail;
557 	}
558 
559 	printf("fxp%d: Ethernet address %6D%s\n", unit,
560 	    sc->arpcom.ac_enaddr, ":", sc->phy_10Mbps_only ? ", 10Mbps" : "");
561 
562 	ifp = &sc->arpcom.ac_if;
563 	ifp->if_unit = unit;
564 	ifp->if_name = "fxp";
565 	ifp->if_output = ether_output;
566 	ifp->if_baudrate = 100000000;
567 	ifp->if_init = fxp_init;
568 	ifp->if_softc = sc;
569 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
570 	ifp->if_ioctl = fxp_ioctl;
571 	ifp->if_start = fxp_start;
572 	ifp->if_watchdog = fxp_watchdog;
573 
574 	/*
575 	 * Attach the interface.
576 	 */
577 	if_attach(ifp);
578 	/*
579 	 * Let the system queue as many packets as we have available
580 	 * TX descriptors.
581 	 */
582 	ifp->if_snd.ifq_maxlen = FXP_NTXCB - 1;
583 	ether_ifattach(ifp);
584 #if NBPFILTER > 0
585 	bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header));
586 #endif
587 
588 	/*
589 	 * Add shutdown hook so that DMA is disabled prior to reboot. Not
590 	 * doing do could allow DMA to corrupt kernel memory during the
591 	 * reboot before the driver initializes.
592 	 */
593 	at_shutdown(fxp_shutdown, sc, SHUTDOWN_POST_SYNC);
594 
595 	splx(s);
596 	return;
597 
598  fail:
599 	free(sc, M_DEVBUF);
600 	splx(s);
601 }
602 
603 /*
604  * Device shutdown routine. Called at system shutdown after sync. The
605  * main purpose of this routine is to shut off receiver DMA so that
606  * kernel memory doesn't get clobbered during warmboot.
607  */
608 static void
609 fxp_shutdown(howto, sc)
610 	int howto;
611 	void *sc;
612 {
613 	fxp_stop((struct fxp_softc *) sc);
614 }
615 
616 #endif /* __NetBSD__ */
617 
618 /*************************************************************
619  * End of operating system-specific autoconfiguration glue
620  *************************************************************/
621 
622 /*
623  * Do generic parts of attach.
624  */
625 static int
626 fxp_attach_common(sc, enaddr)
627 	struct fxp_softc *sc;
628 	u_int8_t *enaddr;
629 {
630 	u_int16_t data;
631 	int i, nmedia, defmedia;
632 	const int *media;
633 
634 	/*
635 	 * Reset to a stable state.
636 	 */
637 	CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
638 	DELAY(10);
639 
640 	sc->cbl_base = malloc(sizeof(struct fxp_cb_tx) * FXP_NTXCB,
641 	    M_DEVBUF, M_NOWAIT);
642 	if (sc->cbl_base == NULL)
643 		goto fail;
644 	bzero(sc->cbl_base, sizeof(struct fxp_cb_tx) * FXP_NTXCB);
645 
646 	sc->fxp_stats = malloc(sizeof(struct fxp_stats), M_DEVBUF, M_NOWAIT);
647 	if (sc->fxp_stats == NULL)
648 		goto fail;
649 	bzero(sc->fxp_stats, sizeof(struct fxp_stats));
650 
651 	sc->mcsp = malloc(sizeof(struct fxp_cb_mcs), M_DEVBUF, M_NOWAIT);
652 	if (sc->mcsp == NULL)
653 		goto fail;
654 
655 	/*
656 	 * Pre-allocate our receive buffers.
657 	 */
658 	for (i = 0; i < FXP_NRFABUFS; i++) {
659 		if (fxp_add_rfabuf(sc, NULL) != 0) {
660 			goto fail;
661 		}
662 	}
663 
664 	/*
665 	 * Get info about the primary PHY
666 	 */
667 	fxp_read_eeprom(sc, (u_int16_t *)&data, 6, 1);
668 	sc->phy_primary_addr = data & 0xff;
669 	sc->phy_primary_device = (data >> 8) & 0x3f;
670 	sc->phy_10Mbps_only = data >> 15;
671 
672 	/*
673 	 * Read MAC address.
674 	 */
675 	fxp_read_eeprom(sc, (u_int16_t *)enaddr, 0, 3);
676 
677 	/*
678 	 * Initialize the media structures.
679 	 */
680 
681 	media = fxp_media_default;
682 	nmedia = sizeof(fxp_media_default) / sizeof(fxp_media_default[0]);
683 	defmedia = FXP_MEDIA_DEFAULT_DEFMEDIA;
684 
685 	for (i = 0; i < NFXPMEDIA; i++) {
686 		if (sc->phy_primary_device == fxp_media[i].fsm_phy) {
687 			media = fxp_media[i].fsm_media;
688 			nmedia = fxp_media[i].fsm_nmedia;
689 			defmedia = fxp_media[i].fsm_defmedia;
690 		}
691 	}
692 
693 	ifmedia_init(&sc->sc_media, 0, fxp_mediachange, fxp_mediastatus);
694 	for (i = 0; i < nmedia; i++) {
695 		if (IFM_SUBTYPE(media[i]) == IFM_100_TX && sc->phy_10Mbps_only)
696 			continue;
697 		ifmedia_add(&sc->sc_media, media[i], 0, NULL);
698 	}
699 	ifmedia_set(&sc->sc_media, defmedia);
700 
701 	return (0);
702 
703  fail:
704 	printf(FXP_FORMAT ": Failed to malloc memory\n", FXP_ARGS(sc));
705 	if (sc->cbl_base)
706 		free(sc->cbl_base, M_DEVBUF);
707 	if (sc->fxp_stats)
708 		free(sc->fxp_stats, M_DEVBUF);
709 	if (sc->mcsp)
710 		free(sc->mcsp, M_DEVBUF);
711 	/* frees entire chain */
712 	if (sc->rfa_headm)
713 		m_freem(sc->rfa_headm);
714 
715 	return (ENOMEM);
716 }
717 
718 /*
719  * Read from the serial EEPROM. Basically, you manually shift in
720  * the read opcode (one bit at a time) and then shift in the address,
721  * and then you shift out the data (all of this one bit at a time).
722  * The word size is 16 bits, so you have to provide the address for
723  * every 16 bits of data.
724  */
725 static void
726 fxp_read_eeprom(sc, data, offset, words)
727 	struct fxp_softc *sc;
728 	u_short *data;
729 	int offset;
730 	int words;
731 {
732 	u_int16_t reg;
733 	int i, x;
734 
735 	for (i = 0; i < words; i++) {
736 		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
737 		/*
738 		 * Shift in read opcode.
739 		 */
740 		for (x = 3; x > 0; x--) {
741 			if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) {
742 				reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
743 			} else {
744 				reg = FXP_EEPROM_EECS;
745 			}
746 			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
747 			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
748 			    reg | FXP_EEPROM_EESK);
749 			DELAY(1);
750 			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
751 			DELAY(1);
752 		}
753 		/*
754 		 * Shift in address.
755 		 */
756 		for (x = 6; x > 0; x--) {
757 			if ((i + offset) & (1 << (x - 1))) {
758 				reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
759 			} else {
760 				reg = FXP_EEPROM_EECS;
761 			}
762 			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
763 			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
764 			    reg | FXP_EEPROM_EESK);
765 			DELAY(1);
766 			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
767 			DELAY(1);
768 		}
769 		reg = FXP_EEPROM_EECS;
770 		data[i] = 0;
771 		/*
772 		 * Shift out data.
773 		 */
774 		for (x = 16; x > 0; x--) {
775 			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
776 			    reg | FXP_EEPROM_EESK);
777 			DELAY(1);
778 			if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) &
779 			    FXP_EEPROM_EEDO)
780 				data[i] |= (1 << (x - 1));
781 			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
782 			DELAY(1);
783 		}
784 		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
785 		DELAY(1);
786 	}
787 }
788 
789 /*
790  * Start packet transmission on the interface.
791  */
792 static void
793 fxp_start(ifp)
794 	struct ifnet *ifp;
795 {
796 	struct fxp_softc *sc = ifp->if_softc;
797 	struct fxp_cb_tx *txp;
798 
799 	/*
800 	 * See if we need to suspend xmit until the multicast filter
801 	 * has been reprogrammed (which can only be done at the head
802 	 * of the command chain).
803 	 */
804 	if (sc->need_mcsetup)
805 		return;
806 
807 	txp = NULL;
808 
809 	/*
810 	 * We're finished if there is nothing more to add to the list or if
811 	 * we're all filled up with buffers to transmit.
812 	 * NOTE: One TxCB is reserved to guarantee that fxp_mc_setup() can add
813 	 *       a NOP command when needed.
814 	 */
815 	while (ifp->if_snd.ifq_head != NULL && sc->tx_queued < FXP_NTXCB - 1) {
816 		struct mbuf *m, *mb_head;
817 		int segment;
818 
819 		/*
820 		 * Grab a packet to transmit.
821 		 */
822 		IF_DEQUEUE(&ifp->if_snd, mb_head);
823 
824 		/*
825 		 * Get pointer to next available tx desc.
826 		 */
827 		txp = sc->cbl_last->next;
828 
829 		/*
830 		 * Go through each of the mbufs in the chain and initialize
831 		 * the transmit buffer descriptors with the physical address
832 		 * and size of the mbuf.
833 		 */
834 tbdinit:
835 		for (m = mb_head, segment = 0; m != NULL; m = m->m_next) {
836 			if (m->m_len != 0) {
837 				if (segment == FXP_NTXSEG)
838 					break;
839 				txp->tbd[segment].tb_addr =
840 				    vtophys(mtod(m, vm_offset_t));
841 				txp->tbd[segment].tb_size = m->m_len;
842 				segment++;
843 			}
844 		}
845 		if (m != NULL) {
846 			struct mbuf *mn;
847 
848 			/*
849 			 * We ran out of segments. We have to recopy this mbuf
850 			 * chain first. Bail out if we can't get the new buffers.
851 			 */
852 			MGETHDR(mn, M_DONTWAIT, MT_DATA);
853 			if (mn == NULL) {
854 				m_freem(mb_head);
855 				break;
856 			}
857 			if (mb_head->m_pkthdr.len > MHLEN) {
858 				MCLGET(mn, M_DONTWAIT);
859 				if ((mn->m_flags & M_EXT) == 0) {
860 					m_freem(mn);
861 					m_freem(mb_head);
862 					break;
863 				}
864 			}
865 			m_copydata(mb_head, 0, mb_head->m_pkthdr.len,
866 			    mtod(mn, caddr_t));
867 			mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len;
868 			m_freem(mb_head);
869 			mb_head = mn;
870 			goto tbdinit;
871 		}
872 
873 		txp->tbd_number = segment;
874 		txp->mb_head = mb_head;
875 		txp->cb_status = 0;
876 		if (sc->tx_queued != FXP_CXINT_THRESH - 1) {
877 			txp->cb_command =
878 			    FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF | FXP_CB_COMMAND_S;
879 		} else {
880 			txp->cb_command =
881 			    FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I;
882 			/*
883 			 * Set a 5 second timer just in case we don't hear from the
884 			 * card again.
885 			 */
886 			ifp->if_timer = 5;
887 		}
888 		txp->tx_threshold = tx_threshold;
889 
890 		/*
891 		 * Advance the end of list forward.
892 		 */
893 		sc->cbl_last->cb_command &= ~FXP_CB_COMMAND_S;
894 		sc->cbl_last = txp;
895 
896 		/*
897 		 * Advance the beginning of the list forward if there are
898 		 * no other packets queued (when nothing is queued, cbl_first
899 		 * sits on the last TxCB that was sent out).
900 		 */
901 		if (sc->tx_queued == 0)
902 			sc->cbl_first = txp;
903 
904 		sc->tx_queued++;
905 
906 #if NBPFILTER > 0
907 		/*
908 		 * Pass packet to bpf if there is a listener.
909 		 */
910 		if (ifp->if_bpf)
911 			bpf_mtap(FXP_BPFTAP_ARG(ifp), mb_head);
912 #endif
913 	}
914 
915 	/*
916 	 * We're finished. If we added to the list, issue a RESUME to get DMA
917 	 * going again if suspended.
918 	 */
919 	if (txp != NULL) {
920 		fxp_scb_wait(sc);
921 		CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_RESUME);
922 	}
923 }
924 
925 /*
926  * Process interface interrupts.
927  */
928 static FXP_INTR_TYPE
929 fxp_intr(arg)
930 	void *arg;
931 {
932 	struct fxp_softc *sc = arg;
933 	struct ifnet *ifp = &sc->sc_if;
934 	u_int8_t statack;
935 #if defined(__NetBSD__)
936 	int claimed = 0;
937 #endif
938 
939 	while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) {
940 #if defined(__NetBSD__)
941 		claimed = 1;
942 #endif
943 		/*
944 		 * First ACK all the interrupts in this pass.
945 		 */
946 		CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack);
947 
948 		/*
949 		 * Free any finished transmit mbuf chains.
950 		 */
951 		if (statack & FXP_SCB_STATACK_CXTNO) {
952 			struct fxp_cb_tx *txp;
953 
954 			for (txp = sc->cbl_first; sc->tx_queued &&
955 			    (txp->cb_status & FXP_CB_STATUS_C) != 0;
956 			    txp = txp->next) {
957 				if (txp->mb_head != NULL) {
958 					m_freem(txp->mb_head);
959 					txp->mb_head = NULL;
960 				}
961 				sc->tx_queued--;
962 			}
963 			sc->cbl_first = txp;
964 			ifp->if_timer = 0;
965 			if (sc->tx_queued == 0) {
966 				if (sc->need_mcsetup)
967 					fxp_mc_setup(sc);
968 			}
969 			/*
970 			 * Try to start more packets transmitting.
971 			 */
972 			if (ifp->if_snd.ifq_head != NULL)
973 				fxp_start(ifp);
974 		}
975 		/*
976 		 * Process receiver interrupts. If a no-resource (RNR)
977 		 * condition exists, get whatever packets we can and
978 		 * re-start the receiver.
979 		 */
980 		if (statack & (FXP_SCB_STATACK_FR | FXP_SCB_STATACK_RNR)) {
981 			struct mbuf *m;
982 			struct fxp_rfa *rfa;
983 rcvloop:
984 			m = sc->rfa_headm;
985 			rfa = (struct fxp_rfa *)(m->m_ext.ext_buf +
986 			    RFA_ALIGNMENT_FUDGE);
987 
988 			if (rfa->rfa_status & FXP_RFA_STATUS_C) {
989 				/*
990 				 * Remove first packet from the chain.
991 				 */
992 				sc->rfa_headm = m->m_next;
993 				m->m_next = NULL;
994 
995 				/*
996 				 * Add a new buffer to the receive chain.
997 				 * If this fails, the old buffer is recycled
998 				 * instead.
999 				 */
1000 				if (fxp_add_rfabuf(sc, m) == 0) {
1001 					struct ether_header *eh;
1002 					u_int16_t total_len;
1003 
1004 					total_len = rfa->actual_size &
1005 					    (MCLBYTES - 1);
1006 					if (total_len <
1007 					    sizeof(struct ether_header)) {
1008 						m_freem(m);
1009 						goto rcvloop;
1010 					}
1011 					m->m_pkthdr.rcvif = ifp;
1012 					m->m_pkthdr.len = m->m_len =
1013 					    total_len -
1014 					    sizeof(struct ether_header);
1015 					eh = mtod(m, struct ether_header *);
1016 #if NBPFILTER > 0
1017 					if (ifp->if_bpf) {
1018 						bpf_tap(FXP_BPFTAP_ARG(ifp),
1019 						    mtod(m, caddr_t),
1020 						    total_len);
1021 						/*
1022 						 * Only pass this packet up
1023 						 * if it is for us.
1024 						 */
1025 						if ((ifp->if_flags &
1026 						    IFF_PROMISC) &&
1027 						    (rfa->rfa_status &
1028 						    FXP_RFA_STATUS_IAMATCH) &&
1029 						    (eh->ether_dhost[0] & 1)
1030 						    == 0) {
1031 							m_freem(m);
1032 							goto rcvloop;
1033 						}
1034 					}
1035 #endif /* NBPFILTER > 0 */
1036 					m->m_data +=
1037 					    sizeof(struct ether_header);
1038 					ether_input(ifp, eh, m);
1039 				}
1040 				goto rcvloop;
1041 			}
1042 			if (statack & FXP_SCB_STATACK_RNR) {
1043 				fxp_scb_wait(sc);
1044 				CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
1045 				    vtophys(sc->rfa_headm->m_ext.ext_buf) +
1046 					RFA_ALIGNMENT_FUDGE);
1047 				CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND,
1048 				    FXP_SCB_COMMAND_RU_START);
1049 			}
1050 		}
1051 	}
1052 #if defined(__NetBSD__)
1053 	return (claimed);
1054 #endif
1055 }
1056 
1057 /*
1058  * Update packet in/out/collision statistics. The i82557 doesn't
1059  * allow you to access these counters without doing a fairly
1060  * expensive DMA to get _all_ of the statistics it maintains, so
1061  * we do this operation here only once per second. The statistics
1062  * counters in the kernel are updated from the previous dump-stats
1063  * DMA and then a new dump-stats DMA is started. The on-chip
1064  * counters are zeroed when the DMA completes. If we can't start
1065  * the DMA immediately, we don't wait - we just prepare to read
1066  * them again next time.
1067  */
1068 static void
1069 fxp_stats_update(arg)
1070 	void *arg;
1071 {
1072 	struct fxp_softc *sc = arg;
1073 	struct ifnet *ifp = &sc->sc_if;
1074 	struct fxp_stats *sp = sc->fxp_stats;
1075 	struct fxp_cb_tx *txp;
1076 	int s;
1077 
1078 	ifp->if_opackets += sp->tx_good;
1079 	ifp->if_collisions += sp->tx_total_collisions;
1080 	if (sp->rx_good) {
1081 		ifp->if_ipackets += sp->rx_good;
1082 		sc->rx_idle_secs = 0;
1083 	} else {
1084 		/*
1085 		 * Receiver's been idle for another second.
1086 		 */
1087 		sc->rx_idle_secs++;
1088 	}
1089 	ifp->if_ierrors +=
1090 	    sp->rx_crc_errors +
1091 	    sp->rx_alignment_errors +
1092 	    sp->rx_rnr_errors +
1093 	    sp->rx_overrun_errors;
1094 	/*
1095 	 * If any transmit underruns occured, bump up the transmit
1096 	 * threshold by another 512 bytes (64 * 8).
1097 	 */
1098 	if (sp->tx_underruns) {
1099 		ifp->if_oerrors += sp->tx_underruns;
1100 		if (tx_threshold < 192)
1101 			tx_threshold += 64;
1102 	}
1103 	s = splimp();
1104 	/*
1105 	 * Release any xmit buffers that have completed DMA. This isn't
1106 	 * strictly necessary to do here, but it's advantagous for mbufs
1107 	 * with external storage to be released in a timely manner rather
1108 	 * than being defered for a potentially long time. This limits
1109 	 * the delay to a maximum of one second.
1110 	 */
1111 	for (txp = sc->cbl_first; sc->tx_queued &&
1112 	    (txp->cb_status & FXP_CB_STATUS_C) != 0;
1113 	    txp = txp->next) {
1114 		if (txp->mb_head != NULL) {
1115 			m_freem(txp->mb_head);
1116 			txp->mb_head = NULL;
1117 		}
1118 		sc->tx_queued--;
1119 	}
1120 	sc->cbl_first = txp;
1121 	/*
1122 	 * If we haven't received any packets in FXP_MAC_RX_IDLE seconds,
1123 	 * then assume the receiver has locked up and attempt to clear
1124 	 * the condition by reprogramming the multicast filter. This is
1125 	 * a work-around for a bug in the 82557 where the receiver locks
1126 	 * up if it gets certain types of garbage in the syncronization
1127 	 * bits prior to the packet header. This bug is supposed to only
1128 	 * occur in 10Mbps mode, but has been seen to occur in 100Mbps
1129 	 * mode as well (perhaps due to a 10/100 speed transition).
1130 	 */
1131 	if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) {
1132 		sc->rx_idle_secs = 0;
1133 		fxp_mc_setup(sc);
1134 	}
1135 	/*
1136 	 * If there is no pending command, start another stats
1137 	 * dump. Otherwise punt for now.
1138 	 */
1139 	if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) {
1140 		/*
1141 		 * Start another stats dump.
1142 		 */
1143 		CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND,
1144 		    FXP_SCB_COMMAND_CU_DUMPRESET);
1145 	} else {
1146 		/*
1147 		 * A previous command is still waiting to be accepted.
1148 		 * Just zero our copy of the stats and wait for the
1149 		 * next timer event to update them.
1150 		 */
1151 		sp->tx_good = 0;
1152 		sp->tx_underruns = 0;
1153 		sp->tx_total_collisions = 0;
1154 
1155 		sp->rx_good = 0;
1156 		sp->rx_crc_errors = 0;
1157 		sp->rx_alignment_errors = 0;
1158 		sp->rx_rnr_errors = 0;
1159 		sp->rx_overrun_errors = 0;
1160 	}
1161 	splx(s);
1162 	/*
1163 	 * Schedule another timeout one second from now.
1164 	 */
1165 	sc->stat_ch = timeout(fxp_stats_update, sc, hz);
1166 }
1167 
1168 /*
1169  * Stop the interface. Cancels the statistics updater and resets
1170  * the interface.
1171  */
1172 static void
1173 fxp_stop(sc)
1174 	struct fxp_softc *sc;
1175 {
1176 	struct ifnet *ifp = &sc->sc_if;
1177 	struct fxp_cb_tx *txp;
1178 	int i;
1179 
1180 	/*
1181 	 * Cancel stats updater.
1182 	 */
1183 	untimeout(fxp_stats_update, sc, sc->stat_ch);
1184 
1185 	/*
1186 	 * Issue software reset
1187 	 */
1188 	CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
1189 	DELAY(10);
1190 
1191 	/*
1192 	 * Release any xmit buffers.
1193 	 */
1194 	txp = sc->cbl_base;
1195 	if (txp != NULL) {
1196 		for (i = 0; i < FXP_NTXCB; i++) {
1197 			if (txp[i].mb_head != NULL) {
1198 				m_freem(txp[i].mb_head);
1199 				txp[i].mb_head = NULL;
1200 			}
1201 		}
1202 	}
1203 	sc->tx_queued = 0;
1204 
1205 	/*
1206 	 * Free all the receive buffers then reallocate/reinitialize
1207 	 */
1208 	if (sc->rfa_headm != NULL)
1209 		m_freem(sc->rfa_headm);
1210 	sc->rfa_headm = NULL;
1211 	sc->rfa_tailm = NULL;
1212 	for (i = 0; i < FXP_NRFABUFS; i++) {
1213 		if (fxp_add_rfabuf(sc, NULL) != 0) {
1214 			/*
1215 			 * This "can't happen" - we're at splimp()
1216 			 * and we just freed all the buffers we need
1217 			 * above.
1218 			 */
1219 			panic("fxp_stop: no buffers!");
1220 		}
1221 	}
1222 
1223 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1224 	ifp->if_timer = 0;
1225 }
1226 
1227 /*
1228  * Watchdog/transmission transmit timeout handler. Called when a
1229  * transmission is started on the interface, but no interrupt is
1230  * received before the timeout. This usually indicates that the
1231  * card has wedged for some reason.
1232  */
1233 static void
1234 fxp_watchdog(ifp)
1235 	struct ifnet *ifp;
1236 {
1237 	struct fxp_softc *sc = ifp->if_softc;
1238 
1239 	printf(FXP_FORMAT ": device timeout\n", FXP_ARGS(sc));
1240 	ifp->if_oerrors++;
1241 
1242 	fxp_init(sc);
1243 }
1244 
1245 static void
1246 fxp_init(xsc)
1247 	void *xsc;
1248 {
1249 	struct fxp_softc *sc = xsc;
1250 	struct ifnet *ifp = &sc->sc_if;
1251 	struct fxp_cb_config *cbp;
1252 	struct fxp_cb_ias *cb_ias;
1253 	struct fxp_cb_tx *txp;
1254 	int i, s, prm;
1255 
1256 	s = splimp();
1257 	/*
1258 	 * Cancel any pending I/O
1259 	 */
1260 	fxp_stop(sc);
1261 
1262 	prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0;
1263 
1264 	/*
1265 	 * Initialize base of CBL and RFA memory. Loading with zero
1266 	 * sets it up for regular linear addressing.
1267 	 */
1268 	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0);
1269 	CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_BASE);
1270 
1271 	fxp_scb_wait(sc);
1272 	CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_RU_BASE);
1273 
1274 	/*
1275 	 * Initialize base of dump-stats buffer.
1276 	 */
1277 	fxp_scb_wait(sc);
1278 	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(sc->fxp_stats));
1279 	CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_DUMP_ADR);
1280 
1281 	/*
1282 	 * We temporarily use memory that contains the TxCB list to
1283 	 * construct the config CB. The TxCB list memory is rebuilt
1284 	 * later.
1285 	 */
1286 	cbp = (struct fxp_cb_config *) sc->cbl_base;
1287 
1288 	/*
1289 	 * This bcopy is kind of disgusting, but there are a bunch of must be
1290 	 * zero and must be one bits in this structure and this is the easiest
1291 	 * way to initialize them all to proper values.
1292 	 */
1293 	bcopy(fxp_cb_config_template, (void *)&cbp->cb_status,
1294 		sizeof(fxp_cb_config_template));
1295 
1296 	cbp->cb_status =	0;
1297 	cbp->cb_command =	FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL;
1298 	cbp->link_addr =	-1;	/* (no) next command */
1299 	cbp->byte_count =	22;	/* (22) bytes to config */
1300 	cbp->rx_fifo_limit =	8;	/* rx fifo threshold (32 bytes) */
1301 	cbp->tx_fifo_limit =	0;	/* tx fifo threshold (0 bytes) */
1302 	cbp->adaptive_ifs =	0;	/* (no) adaptive interframe spacing */
1303 	cbp->rx_dma_bytecount =	0;	/* (no) rx DMA max */
1304 	cbp->tx_dma_bytecount =	0;	/* (no) tx DMA max */
1305 	cbp->dma_bce =		0;	/* (disable) dma max counters */
1306 	cbp->late_scb =		0;	/* (don't) defer SCB update */
1307 	cbp->tno_int =		0;	/* (disable) tx not okay interrupt */
1308 	cbp->ci_int =		1;	/* interrupt on CU idle */
1309 	cbp->save_bf =		prm;	/* save bad frames */
1310 	cbp->disc_short_rx =	!prm;	/* discard short packets */
1311 	cbp->underrun_retry =	1;	/* retry mode (1) on DMA underrun */
1312 	cbp->mediatype =	!sc->phy_10Mbps_only; /* interface mode */
1313 	cbp->nsai =		1;	/* (don't) disable source addr insert */
1314 	cbp->preamble_length =	2;	/* (7 byte) preamble */
1315 	cbp->loopback =		0;	/* (don't) loopback */
1316 	cbp->linear_priority =	0;	/* (normal CSMA/CD operation) */
1317 	cbp->linear_pri_mode =	0;	/* (wait after xmit only) */
1318 	cbp->interfrm_spacing =	6;	/* (96 bits of) interframe spacing */
1319 	cbp->promiscuous =	prm;	/* promiscuous mode */
1320 	cbp->bcast_disable =	0;	/* (don't) disable broadcasts */
1321 	cbp->crscdt =		0;	/* (CRS only) */
1322 	cbp->stripping =	!prm;	/* truncate rx packet to byte count */
1323 	cbp->padding =		1;	/* (do) pad short tx packets */
1324 	cbp->rcv_crc_xfer =	0;	/* (don't) xfer CRC to host */
1325 	cbp->force_fdx =	0;	/* (don't) force full duplex */
1326 	cbp->fdx_pin_en =	1;	/* (enable) FDX# pin */
1327 	cbp->multi_ia =		0;	/* (don't) accept multiple IAs */
1328 	cbp->mc_all =		sc->all_mcasts;/* accept all multicasts */
1329 
1330 	/*
1331 	 * Start the config command/DMA.
1332 	 */
1333 	fxp_scb_wait(sc);
1334 	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(&cbp->cb_status));
1335 	CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START);
1336 	/* ...and wait for it to complete. */
1337 	while (!(cbp->cb_status & FXP_CB_STATUS_C));
1338 
1339 	/*
1340 	 * Now initialize the station address. Temporarily use the TxCB
1341 	 * memory area like we did above for the config CB.
1342 	 */
1343 	cb_ias = (struct fxp_cb_ias *) sc->cbl_base;
1344 	cb_ias->cb_status = 0;
1345 	cb_ias->cb_command = FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL;
1346 	cb_ias->link_addr = -1;
1347 #if defined(__NetBSD__)
1348 	bcopy(LLADDR(ifp->if_sadl), (void *)cb_ias->macaddr, 6);
1349 #else
1350 	bcopy(sc->arpcom.ac_enaddr, (void *)cb_ias->macaddr,
1351 	    sizeof(sc->arpcom.ac_enaddr));
1352 #endif /* __NetBSD__ */
1353 
1354 	/*
1355 	 * Start the IAS (Individual Address Setup) command/DMA.
1356 	 */
1357 	fxp_scb_wait(sc);
1358 	CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START);
1359 	/* ...and wait for it to complete. */
1360 	while (!(cb_ias->cb_status & FXP_CB_STATUS_C));
1361 
1362 	/*
1363 	 * Initialize transmit control block (TxCB) list.
1364 	 */
1365 
1366 	txp = sc->cbl_base;
1367 	bzero(txp, sizeof(struct fxp_cb_tx) * FXP_NTXCB);
1368 	for (i = 0; i < FXP_NTXCB; i++) {
1369 		txp[i].cb_status = FXP_CB_STATUS_C | FXP_CB_STATUS_OK;
1370 		txp[i].cb_command = FXP_CB_COMMAND_NOP;
1371 		txp[i].link_addr = vtophys(&txp[(i + 1) & FXP_TXCB_MASK].cb_status);
1372 		txp[i].tbd_array_addr = vtophys(&txp[i].tbd[0]);
1373 		txp[i].next = &txp[(i + 1) & FXP_TXCB_MASK];
1374 	}
1375 	/*
1376 	 * Set the suspend flag on the first TxCB and start the control
1377 	 * unit. It will execute the NOP and then suspend.
1378 	 */
1379 	txp->cb_command = FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S;
1380 	sc->cbl_first = sc->cbl_last = txp;
1381 	sc->tx_queued = 1;
1382 
1383 	fxp_scb_wait(sc);
1384 	CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START);
1385 
1386 	/*
1387 	 * Initialize receiver buffer area - RFA.
1388 	 */
1389 	fxp_scb_wait(sc);
1390 	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
1391 	    vtophys(sc->rfa_headm->m_ext.ext_buf) + RFA_ALIGNMENT_FUDGE);
1392 	CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_RU_START);
1393 
1394 	/*
1395 	 * Set current media.
1396 	 */
1397 	fxp_set_media(sc, sc->sc_media.ifm_cur->ifm_media);
1398 
1399 	ifp->if_flags |= IFF_RUNNING;
1400 	ifp->if_flags &= ~IFF_OACTIVE;
1401 	splx(s);
1402 
1403 	/*
1404 	 * Start stats updater.
1405 	 */
1406 	sc->stat_ch = timeout(fxp_stats_update, sc, hz);
1407 }
1408 
1409 static void
1410 fxp_set_media(sc, media)
1411 	struct fxp_softc *sc;
1412 	int media;
1413 {
1414 
1415 	switch (sc->phy_primary_device) {
1416 	case FXP_PHY_DP83840:
1417 	case FXP_PHY_DP83840A:
1418 		fxp_mdi_write(sc, sc->phy_primary_addr, FXP_DP83840_PCR,
1419 		    fxp_mdi_read(sc, sc->phy_primary_addr, FXP_DP83840_PCR) |
1420 		    FXP_DP83840_PCR_LED4_MODE |	/* LED4 always indicates duplex */
1421 		    FXP_DP83840_PCR_F_CONNECT |	/* force link disconnect bypass */
1422 		    FXP_DP83840_PCR_BIT10);	/* XXX I have no idea */
1423 		/* fall through */
1424 	case FXP_PHY_82553A:
1425 	case FXP_PHY_82553C: /* untested */
1426 	case FXP_PHY_82555:
1427 	case FXP_PHY_82555B:
1428 		if (IFM_SUBTYPE(media) != IFM_AUTO) {
1429 			int flags;
1430 
1431 			flags = (IFM_SUBTYPE(media) == IFM_100_TX) ?
1432 			    FXP_PHY_BMCR_SPEED_100M : 0;
1433 			flags |= (media & IFM_FDX) ?
1434 			    FXP_PHY_BMCR_FULLDUPLEX : 0;
1435 			fxp_mdi_write(sc, sc->phy_primary_addr,
1436 			    FXP_PHY_BMCR,
1437 			    (fxp_mdi_read(sc, sc->phy_primary_addr,
1438 			    FXP_PHY_BMCR) &
1439 			    ~(FXP_PHY_BMCR_AUTOEN | FXP_PHY_BMCR_SPEED_100M |
1440 			     FXP_PHY_BMCR_FULLDUPLEX)) | flags);
1441 		} else {
1442 			fxp_mdi_write(sc, sc->phy_primary_addr,
1443 			    FXP_PHY_BMCR,
1444 			    (fxp_mdi_read(sc, sc->phy_primary_addr,
1445 			    FXP_PHY_BMCR) | FXP_PHY_BMCR_AUTOEN));
1446 		}
1447 		break;
1448 	/*
1449 	 * The Seeq 80c24 doesn't have a PHY programming interface, so do
1450 	 * nothing.
1451 	 */
1452 	case FXP_PHY_80C24:
1453 		break;
1454 	default:
1455 		printf(FXP_FORMAT
1456 		    ": warning: unsupported PHY, type = %d, addr = %d\n",
1457 		     FXP_ARGS(sc), sc->phy_primary_device,
1458 		     sc->phy_primary_addr);
1459 	}
1460 }
1461 
1462 /*
1463  * Change media according to request.
1464  */
1465 int
1466 fxp_mediachange(ifp)
1467 	struct ifnet *ifp;
1468 {
1469 	struct fxp_softc *sc = ifp->if_softc;
1470 	struct ifmedia *ifm = &sc->sc_media;
1471 
1472 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1473 		return (EINVAL);
1474 
1475 	fxp_set_media(sc, ifm->ifm_media);
1476 	return (0);
1477 }
1478 
1479 /*
1480  * Notify the world which media we're using.
1481  */
1482 void
1483 fxp_mediastatus(ifp, ifmr)
1484 	struct ifnet *ifp;
1485 	struct ifmediareq *ifmr;
1486 {
1487 	struct fxp_softc *sc = ifp->if_softc;
1488 	int flags;
1489 
1490 	switch (sc->phy_primary_device) {
1491 	case FXP_PHY_DP83840:
1492 	case FXP_PHY_DP83840A:
1493 	case FXP_PHY_82555:
1494 	case FXP_PHY_82555B:
1495 		flags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_BMCR);
1496 		ifmr->ifm_active = IFM_ETHER;
1497 		if (flags & FXP_PHY_BMCR_AUTOEN)
1498 			ifmr->ifm_active |= IFM_AUTO;
1499 		else {
1500 			if (flags & FXP_PHY_BMCR_SPEED_100M)
1501 				ifmr->ifm_active |= IFM_100_TX;
1502 			else
1503 				ifmr->ifm_active |= IFM_10_T;
1504 
1505 			if (flags & FXP_PHY_BMCR_FULLDUPLEX)
1506 				ifmr->ifm_active |= IFM_FDX;
1507 		}
1508 		break;
1509 
1510 	case FXP_PHY_80C24:
1511 	default:
1512 		ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; /* XXX IFM_AUTO ? */
1513 	}
1514 }
1515 
1516 /*
1517  * Add a buffer to the end of the RFA buffer list.
1518  * Return 0 if successful, 1 for failure. A failure results in
1519  * adding the 'oldm' (if non-NULL) on to the end of the list -
1520  * tossing out its old contents and recycling it.
1521  * The RFA struct is stuck at the beginning of mbuf cluster and the
1522  * data pointer is fixed up to point just past it.
1523  */
1524 static int
1525 fxp_add_rfabuf(sc, oldm)
1526 	struct fxp_softc *sc;
1527 	struct mbuf *oldm;
1528 {
1529 	u_int32_t v;
1530 	struct mbuf *m;
1531 	struct fxp_rfa *rfa, *p_rfa;
1532 
1533 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1534 	if (m != NULL) {
1535 		MCLGET(m, M_DONTWAIT);
1536 		if ((m->m_flags & M_EXT) == 0) {
1537 			m_freem(m);
1538 			if (oldm == NULL)
1539 				return 1;
1540 			m = oldm;
1541 			m->m_data = m->m_ext.ext_buf;
1542 		}
1543 	} else {
1544 		if (oldm == NULL)
1545 			return 1;
1546 		m = oldm;
1547 		m->m_data = m->m_ext.ext_buf;
1548 	}
1549 
1550 	/*
1551 	 * Move the data pointer up so that the incoming data packet
1552 	 * will be 32-bit aligned.
1553 	 */
1554 	m->m_data += RFA_ALIGNMENT_FUDGE;
1555 
1556 	/*
1557 	 * Get a pointer to the base of the mbuf cluster and move
1558 	 * data start past it.
1559 	 */
1560 	rfa = mtod(m, struct fxp_rfa *);
1561 	m->m_data += sizeof(struct fxp_rfa);
1562 	rfa->size = MCLBYTES - sizeof(struct fxp_rfa) - RFA_ALIGNMENT_FUDGE;
1563 
1564 	/*
1565 	 * Initialize the rest of the RFA.  Note that since the RFA
1566 	 * is misaligned, we cannot store values directly.  Instead,
1567 	 * we use an optimized, inline copy.
1568 	 */
1569 	rfa->rfa_status = 0;
1570 	rfa->rfa_control = FXP_RFA_CONTROL_EL;
1571 	rfa->actual_size = 0;
1572 
1573 	v = -1;
1574 	fxp_lwcopy(&v, &rfa->link_addr);
1575 	fxp_lwcopy(&v, &rfa->rbd_addr);
1576 
1577 	/*
1578 	 * If there are other buffers already on the list, attach this
1579 	 * one to the end by fixing up the tail to point to this one.
1580 	 */
1581 	if (sc->rfa_headm != NULL) {
1582 		p_rfa = (struct fxp_rfa *) (sc->rfa_tailm->m_ext.ext_buf +
1583 		    RFA_ALIGNMENT_FUDGE);
1584 		sc->rfa_tailm->m_next = m;
1585 		v = vtophys(rfa);
1586 		fxp_lwcopy(&v, &p_rfa->link_addr);
1587 		p_rfa->rfa_control &= ~FXP_RFA_CONTROL_EL;
1588 	} else {
1589 		sc->rfa_headm = m;
1590 	}
1591 	sc->rfa_tailm = m;
1592 
1593 	return (m == oldm);
1594 }
1595 
1596 static volatile int
1597 fxp_mdi_read(sc, phy, reg)
1598 	struct fxp_softc *sc;
1599 	int phy;
1600 	int reg;
1601 {
1602 	int count = 10000;
1603 	int value;
1604 
1605 	CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
1606 	    (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21));
1607 
1608 	while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0
1609 	    && count--)
1610 		DELAY(10);
1611 
1612 	if (count <= 0)
1613 		printf(FXP_FORMAT ": fxp_mdi_read: timed out\n",
1614 		    FXP_ARGS(sc));
1615 
1616 	return (value & 0xffff);
1617 }
1618 
1619 static void
1620 fxp_mdi_write(sc, phy, reg, value)
1621 	struct fxp_softc *sc;
1622 	int phy;
1623 	int reg;
1624 	int value;
1625 {
1626 	int count = 10000;
1627 
1628 	CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
1629 	    (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) |
1630 	    (value & 0xffff));
1631 
1632 	while((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 &&
1633 	    count--)
1634 		DELAY(10);
1635 
1636 	if (count <= 0)
1637 		printf(FXP_FORMAT ": fxp_mdi_write: timed out\n",
1638 		    FXP_ARGS(sc));
1639 }
1640 
1641 static int
1642 fxp_ioctl(ifp, command, data)
1643 	struct ifnet *ifp;
1644 	FXP_IOCTLCMD_TYPE command;
1645 	caddr_t data;
1646 {
1647 	struct fxp_softc *sc = ifp->if_softc;
1648 	struct ifreq *ifr = (struct ifreq *)data;
1649 	int s, error = 0;
1650 
1651 	s = splimp();
1652 
1653 	switch (command) {
1654 
1655 	case SIOCSIFADDR:
1656 #if !defined(__NetBSD__)
1657 	case SIOCGIFADDR:
1658 	case SIOCSIFMTU:
1659 #endif
1660 		error = ether_ioctl(ifp, command, data);
1661 		break;
1662 
1663 	case SIOCSIFFLAGS:
1664 		sc->all_mcasts = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0;
1665 
1666 		/*
1667 		 * If interface is marked up and not running, then start it.
1668 		 * If it is marked down and running, stop it.
1669 		 * XXX If it's up then re-initialize it. This is so flags
1670 		 * such as IFF_PROMISC are handled.
1671 		 */
1672 		if (ifp->if_flags & IFF_UP) {
1673 			fxp_init(sc);
1674 		} else {
1675 			if (ifp->if_flags & IFF_RUNNING)
1676 				fxp_stop(sc);
1677 		}
1678 		break;
1679 
1680 	case SIOCADDMULTI:
1681 	case SIOCDELMULTI:
1682 		sc->all_mcasts = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0;
1683 #if defined(__NetBSD__)
1684 		error = (command == SIOCADDMULTI) ?
1685 		    ether_addmulti(ifr, &sc->sc_ethercom) :
1686 		    ether_delmulti(ifr, &sc->sc_ethercom);
1687 
1688 		if (error == ENETRESET) {
1689 			/*
1690 			 * Multicast list has changed; set the hardware
1691 			 * filter accordingly.
1692 			 */
1693 			if (!sc->all_mcasts)
1694 				fxp_mc_setup(sc);
1695 			/*
1696 			 * fxp_mc_setup() can turn on all_mcasts if we run
1697 			 * out of space, so check it again rather than else {}.
1698 			 */
1699 			if (sc->all_mcasts)
1700 				fxp_init(sc);
1701 			error = 0;
1702 		}
1703 #else /* __FreeBSD__ */
1704 		/*
1705 		 * Multicast list has changed; set the hardware filter
1706 		 * accordingly.
1707 		 */
1708 		if (!sc->all_mcasts)
1709 			fxp_mc_setup(sc);
1710 		/*
1711 		 * fxp_mc_setup() can turn on sc->all_mcasts, so check it
1712 		 * again rather than else {}.
1713 		 */
1714 		if (sc->all_mcasts)
1715 			fxp_init(sc);
1716 		error = 0;
1717 #endif /* __NetBSD__ */
1718 		break;
1719 
1720 	case SIOCSIFMEDIA:
1721 	case SIOCGIFMEDIA:
1722 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
1723 		break;
1724 
1725 	default:
1726 		error = EINVAL;
1727 	}
1728 	(void) splx(s);
1729 	return (error);
1730 }
1731 
1732 /*
1733  * Program the multicast filter.
1734  *
1735  * We have an artificial restriction that the multicast setup command
1736  * must be the first command in the chain, so we take steps to ensure
1737  * this. By requiring this, it allows us to keep up the performance of
1738  * the pre-initialized command ring (esp. link pointers) by not actually
1739  * inserting the mcsetup command in the ring - i.e. its link pointer
1740  * points to the TxCB ring, but the mcsetup descriptor itself is not part
1741  * of it. We then can do 'CU_START' on the mcsetup descriptor and have it
1742  * lead into the regular TxCB ring when it completes.
1743  *
1744  * This function must be called at splimp.
1745  */
1746 static void
1747 fxp_mc_setup(sc)
1748 	struct fxp_softc *sc;
1749 {
1750 	struct fxp_cb_mcs *mcsp = sc->mcsp;
1751 	struct ifnet *ifp = &sc->sc_if;
1752 	struct ifmultiaddr *ifma;
1753 	int nmcasts;
1754 
1755 	/*
1756 	 * If there are queued commands, we must wait until they are all
1757 	 * completed. If we are already waiting, then add a NOP command
1758 	 * with interrupt option so that we're notified when all commands
1759 	 * have been completed - fxp_start() ensures that no additional
1760 	 * TX commands will be added when need_mcsetup is true.
1761 	 */
1762 	if (sc->tx_queued) {
1763 		struct fxp_cb_tx *txp;
1764 
1765 		/*
1766 		 * need_mcsetup will be true if we are already waiting for the
1767 		 * NOP command to be completed (see below). In this case, bail.
1768 		 */
1769 		if (sc->need_mcsetup)
1770 			return;
1771 		sc->need_mcsetup = 1;
1772 
1773 		/*
1774 		 * Add a NOP command with interrupt so that we are notified when all
1775 		 * TX commands have been processed.
1776 		 */
1777 		txp = sc->cbl_last->next;
1778 		txp->mb_head = NULL;
1779 		txp->cb_status = 0;
1780 		txp->cb_command = FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I;
1781 		/*
1782 		 * Advance the end of list forward.
1783 		 */
1784 		sc->cbl_last->cb_command &= ~FXP_CB_COMMAND_S;
1785 		sc->cbl_last = txp;
1786 		sc->tx_queued++;
1787 		/*
1788 		 * Issue a resume in case the CU has just suspended.
1789 		 */
1790 		fxp_scb_wait(sc);
1791 		CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_RESUME);
1792 		/*
1793 		 * Set a 5 second timer just in case we don't hear from the
1794 		 * card again.
1795 		 */
1796 		ifp->if_timer = 5;
1797 
1798 		return;
1799 	}
1800 	sc->need_mcsetup = 0;
1801 
1802 	/*
1803 	 * Initialize multicast setup descriptor.
1804 	 */
1805 	mcsp->next = sc->cbl_base;
1806 	mcsp->mb_head = NULL;
1807 	mcsp->cb_status = 0;
1808 	mcsp->cb_command = FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I;
1809 	mcsp->link_addr = vtophys(&sc->cbl_base->cb_status);
1810 
1811 	nmcasts = 0;
1812 	if (!sc->all_mcasts) {
1813 		for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
1814 		    ifma = ifma->ifma_link.le_next) {
1815 			if (ifma->ifma_addr->sa_family != AF_LINK)
1816 				continue;
1817 			if (nmcasts >= MAXMCADDR) {
1818 				sc->all_mcasts = 1;
1819 				nmcasts = 0;
1820 				break;
1821 			}
1822 			bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1823 			    (void *) &sc->mcsp->mc_addr[nmcasts][0], 6);
1824 			nmcasts++;
1825 		}
1826 	}
1827 	mcsp->mc_cnt = nmcasts * 6;
1828 	sc->cbl_first = sc->cbl_last = (struct fxp_cb_tx *) mcsp;
1829 	sc->tx_queued = 1;
1830 
1831 	/*
1832 	 * Wait until command unit is not active. This should never
1833 	 * be the case when nothing is queued, but make sure anyway.
1834 	 */
1835 	while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) ==
1836 	    FXP_SCB_CUS_ACTIVE) ;
1837 
1838 	/*
1839 	 * Start the multicast setup command.
1840 	 */
1841 	fxp_scb_wait(sc);
1842 	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(&mcsp->cb_status));
1843 	CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START);
1844 
1845 	ifp->if_timer = 2;
1846 	return;
1847 }
1848