xref: /freebsd/sys/dev/et/if_et.c (revision 39beb93c3f8bdbf72a61fda42300b5ebed7390c8)
1 /*-
2  * Copyright (c) 2007 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Sepherosa Ziehau <sepherosa@gmail.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.10 2008/05/18 07:47:14 sephe Exp $
35  * $FreeBSD$
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/endian.h>
41 #include <sys/kernel.h>
42 #include <sys/bus.h>
43 #include <sys/malloc.h>
44 #include <sys/mbuf.h>
45 #include <sys/proc.h>
46 #include <sys/rman.h>
47 #include <sys/module.h>
48 #include <sys/socket.h>
49 #include <sys/sockio.h>
50 #include <sys/sysctl.h>
51 
52 #include <net/ethernet.h>
53 #include <net/if.h>
54 #include <net/if_dl.h>
55 #include <net/if_types.h>
56 #include <net/bpf.h>
57 #include <net/if_arp.h>
58 #include <net/if_dl.h>
59 #include <net/if_media.h>
60 #include <net/if_vlan_var.h>
61 
62 #include <machine/bus.h>
63 
64 #include <dev/mii/miivar.h>
65 #include <dev/mii/truephyreg.h>
66 
67 #include <dev/pci/pcireg.h>
68 #include <dev/pci/pcivar.h>
69 
70 #include <dev/et/if_etreg.h>
71 #include <dev/et/if_etvar.h>
72 
73 #include "miibus_if.h"
74 
75 MODULE_DEPEND(et, pci, 1, 1, 1);
76 MODULE_DEPEND(et, ether, 1, 1, 1);
77 MODULE_DEPEND(et, miibus, 1, 1, 1);
78 
79 static int	et_probe(device_t);
80 static int	et_attach(device_t);
81 static int	et_detach(device_t);
82 static int	et_shutdown(device_t);
83 
84 static int	et_miibus_readreg(device_t, int, int);
85 static int	et_miibus_writereg(device_t, int, int, int);
86 static void	et_miibus_statchg(device_t);
87 
88 static void	et_init_locked(struct et_softc *);
89 static void	et_init(void *);
90 static int	et_ioctl(struct ifnet *, u_long, caddr_t);
91 static void	et_start_locked(struct ifnet *);
92 static void	et_start(struct ifnet *);
93 static void	et_watchdog(struct et_softc *);
94 static int	et_ifmedia_upd_locked(struct ifnet *);
95 static int	et_ifmedia_upd(struct ifnet *);
96 static void	et_ifmedia_sts(struct ifnet *, struct ifmediareq *);
97 
98 static void	et_add_sysctls(struct et_softc *);
99 static int	et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS);
100 static int	et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS);
101 
102 static void	et_intr(void *);
103 static void	et_enable_intrs(struct et_softc *, uint32_t);
104 static void	et_disable_intrs(struct et_softc *);
105 static void	et_rxeof(struct et_softc *);
106 static void	et_txeof(struct et_softc *);
107 
108 static int	et_dma_alloc(device_t);
109 static void	et_dma_free(device_t);
110 static int	et_dma_mem_create(device_t, bus_size_t, bus_dma_tag_t *,
111 				  void **, bus_addr_t *, bus_dmamap_t *);
112 static void	et_dma_mem_destroy(bus_dma_tag_t, void *, bus_dmamap_t);
113 static int	et_dma_mbuf_create(device_t);
114 static void	et_dma_mbuf_destroy(device_t, int, const int[]);
115 static void	et_dma_ring_addr(void *, bus_dma_segment_t *, int, int);
116 static void	et_dma_buf_addr(void *, bus_dma_segment_t *, int,
117 				bus_size_t, int);
118 static int	et_init_tx_ring(struct et_softc *);
119 static int	et_init_rx_ring(struct et_softc *);
120 static void	et_free_tx_ring(struct et_softc *);
121 static void	et_free_rx_ring(struct et_softc *);
122 static int	et_encap(struct et_softc *, struct mbuf **);
123 static int	et_newbuf(struct et_rxbuf_data *, int, int, int);
124 static int	et_newbuf_cluster(struct et_rxbuf_data *, int, int);
125 static int	et_newbuf_hdr(struct et_rxbuf_data *, int, int);
126 
127 static void	et_stop(struct et_softc *);
128 static int	et_chip_init(struct et_softc *);
129 static void	et_chip_attach(struct et_softc *);
130 static void	et_init_mac(struct et_softc *);
131 static void	et_init_rxmac(struct et_softc *);
132 static void	et_init_txmac(struct et_softc *);
133 static int	et_init_rxdma(struct et_softc *);
134 static int	et_init_txdma(struct et_softc *);
135 static int	et_start_rxdma(struct et_softc *);
136 static int	et_start_txdma(struct et_softc *);
137 static int	et_stop_rxdma(struct et_softc *);
138 static int	et_stop_txdma(struct et_softc *);
139 static int	et_enable_txrx(struct et_softc *, int);
140 static void	et_reset(struct et_softc *);
141 static int	et_bus_config(device_t);
142 static void	et_get_eaddr(device_t, uint8_t[]);
143 static void	et_setmulti(struct et_softc *);
144 static void	et_tick(void *);
145 static void	et_setmedia(struct et_softc *);
146 static void	et_setup_rxdesc(struct et_rxbuf_data *, int, bus_addr_t);
147 
148 static const struct et_dev {
149 	uint16_t	vid;
150 	uint16_t	did;
151 	const char	*desc;
152 } et_devices[] = {
153 	{ PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310,
154 	  "Agere ET1310 Gigabit Ethernet" },
155 	{ PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST,
156 	  "Agere ET1310 Fast Ethernet" },
157 	{ 0, 0, NULL }
158 };
159 
160 static device_method_t et_methods[] = {
161 	DEVMETHOD(device_probe,		et_probe),
162 	DEVMETHOD(device_attach,	et_attach),
163 	DEVMETHOD(device_detach,	et_detach),
164 	DEVMETHOD(device_shutdown,	et_shutdown),
165 
166 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
167 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
168 
169 	DEVMETHOD(miibus_readreg,	et_miibus_readreg),
170 	DEVMETHOD(miibus_writereg,	et_miibus_writereg),
171 	DEVMETHOD(miibus_statchg,	et_miibus_statchg),
172 
173 	{ 0, 0 }
174 };
175 
176 static driver_t et_driver = {
177 	"et",
178 	et_methods,
179 	sizeof(struct et_softc)
180 };
181 
182 static devclass_t et_devclass;
183 
184 DRIVER_MODULE(et, pci, et_driver, et_devclass, 0, 0);
185 DRIVER_MODULE(miibus, et, miibus_driver, miibus_devclass, 0, 0);
186 
187 static int	et_rx_intr_npkts = 32;
188 static int	et_rx_intr_delay = 20;		/* x10 usec */
189 static int	et_tx_intr_nsegs = 126;
190 static uint32_t	et_timer = 1000 * 1000 * 1000;	/* nanosec */
191 
192 TUNABLE_INT("hw.et.timer", &et_timer);
193 TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts);
194 TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay);
195 TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs);
196 
197 struct et_bsize {
198 	int		bufsize;
199 	et_newbuf_t	newbuf;
200 };
201 
202 static const struct et_bsize	et_bufsize_std[ET_RX_NRING] = {
203 	{ .bufsize = ET_RXDMA_CTRL_RING0_128,
204 	  .newbuf = et_newbuf_hdr },
205 	{ .bufsize = ET_RXDMA_CTRL_RING1_2048,
206 	  .newbuf = et_newbuf_cluster },
207 };
208 
209 static int
210 et_probe(device_t dev)
211 {
212 	const struct et_dev *d;
213 	uint16_t did, vid;
214 
215 	vid = pci_get_vendor(dev);
216 	did = pci_get_device(dev);
217 
218 	for (d = et_devices; d->desc != NULL; ++d) {
219 		if (vid == d->vid && did == d->did) {
220 			device_set_desc(dev, d->desc);
221 			return 0;
222 		}
223 	}
224 	return ENXIO;
225 }
226 
227 static int
228 et_attach(device_t dev)
229 {
230 	struct et_softc *sc;
231 	struct ifnet *ifp;
232 	uint8_t eaddr[ETHER_ADDR_LEN];
233 	int error;
234 
235 	sc = device_get_softc(dev);
236 	sc->dev = dev;
237 	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
238 	    MTX_DEF);
239 
240 	ifp = sc->ifp = if_alloc(IFT_ETHER);
241 	if (ifp == NULL) {
242 		device_printf(dev, "can not if_alloc()\n");
243 		error = ENOSPC;
244 		goto fail;
245 	}
246 
247 	/*
248 	 * Initialize tunables
249 	 */
250 	sc->sc_rx_intr_npkts = et_rx_intr_npkts;
251 	sc->sc_rx_intr_delay = et_rx_intr_delay;
252 	sc->sc_tx_intr_nsegs = et_tx_intr_nsegs;
253 	sc->sc_timer = et_timer;
254 
255 	/* Enable bus mastering */
256 	pci_enable_busmaster(dev);
257 
258 	/*
259 	 * Allocate IO memory
260 	 */
261 	sc->sc_mem_rid = ET_PCIR_BAR;
262 	sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
263 						&sc->sc_mem_rid, RF_ACTIVE);
264 	if (sc->sc_mem_res == NULL) {
265 		device_printf(dev, "can't allocate IO memory\n");
266 		return ENXIO;
267 	}
268 	sc->sc_mem_bt = rman_get_bustag(sc->sc_mem_res);
269 	sc->sc_mem_bh = rman_get_bushandle(sc->sc_mem_res);
270 
271 	/*
272 	 * Allocate IRQ
273 	 */
274 	sc->sc_irq_rid = 0;
275 	sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
276 						&sc->sc_irq_rid,
277 						RF_SHAREABLE | RF_ACTIVE);
278 	if (sc->sc_irq_res == NULL) {
279 		device_printf(dev, "can't allocate irq\n");
280 		error = ENXIO;
281 		goto fail;
282 	}
283 
284 	error = et_bus_config(dev);
285 	if (error)
286 		goto fail;
287 
288 	et_get_eaddr(dev, eaddr);
289 
290 	CSR_WRITE_4(sc, ET_PM,
291 		    ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE);
292 
293 	et_reset(sc);
294 
295 	et_disable_intrs(sc);
296 
297 	error = et_dma_alloc(dev);
298 	if (error)
299 		goto fail;
300 
301 	ifp->if_softc = sc;
302 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
303 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
304 	ifp->if_init = et_init;
305 	ifp->if_ioctl = et_ioctl;
306 	ifp->if_start = et_start;
307 	ifp->if_mtu = ETHERMTU;
308 	ifp->if_capabilities = IFCAP_VLAN_MTU;
309 	ifp->if_capenable = ifp->if_capabilities;
310 	IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC);
311 	IFQ_SET_READY(&ifp->if_snd);
312 
313 	et_chip_attach(sc);
314 
315 	error = mii_phy_probe(dev, &sc->sc_miibus,
316 			      et_ifmedia_upd, et_ifmedia_sts);
317 	if (error) {
318 		device_printf(dev, "can't probe any PHY\n");
319 		goto fail;
320 	}
321 
322 	ether_ifattach(ifp, eaddr);
323 	callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0);
324 
325 #if __FreeBSD_version > 700030
326 	error = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_NET | INTR_MPSAFE,
327 			       NULL, et_intr, sc, &sc->sc_irq_handle);
328 #else
329 	error = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_NET | INTR_MPSAFE,
330 			       et_intr, sc, &sc->sc_irq_handle);
331 #endif
332 
333 	if (error) {
334 		ether_ifdetach(ifp);
335 		device_printf(dev, "can't setup intr\n");
336 		goto fail;
337 	}
338 
339 	et_add_sysctls(sc);
340 
341 	return 0;
342 fail:
343 	et_detach(dev);
344 	return error;
345 }
346 
347 static int
348 et_detach(device_t dev)
349 {
350 	struct et_softc *sc = device_get_softc(dev);
351 
352 	if (device_is_attached(dev)) {
353 		struct ifnet *ifp = sc->ifp;
354 
355 		ET_LOCK(sc);
356 		et_stop(sc);
357 		bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle);
358 		ET_UNLOCK(sc);
359 
360 		ether_ifdetach(ifp);
361 	}
362 
363 	if (sc->sc_miibus != NULL)
364 		device_delete_child(dev, sc->sc_miibus);
365 	bus_generic_detach(dev);
366 
367 	if (sc->sc_irq_res != NULL) {
368 		bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
369 				     sc->sc_irq_res);
370 	}
371 
372 	if (sc->sc_mem_res != NULL) {
373 		bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid,
374 				     sc->sc_mem_res);
375 	}
376 
377 	if (sc->ifp != NULL)
378 		if_free(sc->ifp);
379 
380 	et_dma_free(dev);
381 	/* XXX Destroy lock here */
382 
383 	return 0;
384 }
385 
386 static int
387 et_shutdown(device_t dev)
388 {
389 	struct et_softc *sc = device_get_softc(dev);
390 
391 	ET_LOCK(sc);
392 	et_stop(sc);
393 	ET_UNLOCK(sc);
394 	return 0;
395 }
396 
397 static int
398 et_miibus_readreg(device_t dev, int phy, int reg)
399 {
400 	struct et_softc *sc = device_get_softc(dev);
401 	uint32_t val;
402 	int i, ret;
403 
404 	/* Stop any pending operations */
405 	CSR_WRITE_4(sc, ET_MII_CMD, 0);
406 
407 	val = __SHIFTIN(phy, ET_MII_ADDR_PHY) |
408 	      __SHIFTIN(reg, ET_MII_ADDR_REG);
409 	CSR_WRITE_4(sc, ET_MII_ADDR, val);
410 
411 	/* Start reading */
412 	CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ);
413 
414 #define NRETRY	50
415 
416 	for (i = 0; i < NRETRY; ++i) {
417 		val = CSR_READ_4(sc, ET_MII_IND);
418 		if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0)
419 			break;
420 		DELAY(50);
421 	}
422 	if (i == NRETRY) {
423 		if_printf(sc->ifp,
424 			  "read phy %d, reg %d timed out\n", phy, reg);
425 		ret = 0;
426 		goto back;
427 	}
428 
429 #undef NRETRY
430 
431 	val = CSR_READ_4(sc, ET_MII_STAT);
432 	ret = __SHIFTOUT(val, ET_MII_STAT_VALUE);
433 
434 back:
435 	/* Make sure that the current operation is stopped */
436 	CSR_WRITE_4(sc, ET_MII_CMD, 0);
437 	return ret;
438 }
439 
440 static int
441 et_miibus_writereg(device_t dev, int phy, int reg, int val0)
442 {
443 	struct et_softc *sc = device_get_softc(dev);
444 	uint32_t val;
445 	int i;
446 
447 	/* Stop any pending operations */
448 	CSR_WRITE_4(sc, ET_MII_CMD, 0);
449 
450 	val = __SHIFTIN(phy, ET_MII_ADDR_PHY) |
451 	      __SHIFTIN(reg, ET_MII_ADDR_REG);
452 	CSR_WRITE_4(sc, ET_MII_ADDR, val);
453 
454 	/* Start writing */
455 	CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val0, ET_MII_CTRL_VALUE));
456 
457 #define NRETRY 100
458 
459 	for (i = 0; i < NRETRY; ++i) {
460 		val = CSR_READ_4(sc, ET_MII_IND);
461 		if ((val & ET_MII_IND_BUSY) == 0)
462 			break;
463 		DELAY(50);
464 	}
465 	if (i == NRETRY) {
466 		if_printf(sc->ifp,
467 			  "write phy %d, reg %d timed out\n", phy, reg);
468 		et_miibus_readreg(dev, phy, reg);
469 	}
470 
471 #undef NRETRY
472 
473 	/* Make sure that the current operation is stopped */
474 	CSR_WRITE_4(sc, ET_MII_CMD, 0);
475 	return 0;
476 }
477 
478 static void
479 et_miibus_statchg(device_t dev)
480 {
481 	et_setmedia(device_get_softc(dev));
482 }
483 
484 static int
485 et_ifmedia_upd_locked(struct ifnet *ifp)
486 {
487 	struct et_softc *sc = ifp->if_softc;
488 	struct mii_data *mii = device_get_softc(sc->sc_miibus);
489 
490 	if (mii->mii_instance != 0) {
491 		struct mii_softc *miisc;
492 
493 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
494 			mii_phy_reset(miisc);
495 	}
496 	mii_mediachg(mii);
497 
498 	return 0;
499 }
500 
501 static int
502 et_ifmedia_upd(struct ifnet *ifp)
503 {
504 	struct et_softc *sc = ifp->if_softc;
505 	int res;
506 
507 	ET_LOCK(sc);
508 	res = et_ifmedia_upd_locked(ifp);
509 	ET_UNLOCK(sc);
510 
511 	return res;
512 }
513 
514 static void
515 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
516 {
517 	struct et_softc *sc = ifp->if_softc;
518 	struct mii_data *mii = device_get_softc(sc->sc_miibus);
519 
520 	mii_pollstat(mii);
521 	ifmr->ifm_active = mii->mii_media_active;
522 	ifmr->ifm_status = mii->mii_media_status;
523 }
524 
525 static void
526 et_stop(struct et_softc *sc)
527 {
528 	struct ifnet *ifp = sc->ifp;
529 
530 	ET_LOCK_ASSERT(sc);
531 
532 	callout_stop(&sc->sc_tick);
533 
534 	et_stop_rxdma(sc);
535 	et_stop_txdma(sc);
536 
537 	et_disable_intrs(sc);
538 
539 	et_free_tx_ring(sc);
540 	et_free_rx_ring(sc);
541 
542 	et_reset(sc);
543 
544 	sc->sc_tx = 0;
545 	sc->sc_tx_intr = 0;
546 	sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED;
547 
548 	sc->watchdog_timer = 0;
549 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
550 }
551 
552 static int
553 et_bus_config(device_t dev)
554 {
555 	uint32_t val, max_plsz;
556 	uint16_t ack_latency, replay_timer;
557 
558 	/*
559 	 * Test whether EEPROM is valid
560 	 * NOTE: Read twice to get the correct value
561 	 */
562 	pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1);
563 	val = pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1);
564 	if (val & ET_PCIM_EEPROM_STATUS_ERROR) {
565 		device_printf(dev, "EEPROM status error 0x%02x\n", val);
566 		return ENXIO;
567 	}
568 
569 	/* TODO: LED */
570 
571 	/*
572 	 * Configure ACK latency and replay timer according to
573 	 * max playload size
574 	 */
575 	val = pci_read_config(dev, ET_PCIR_DEVICE_CAPS, 4);
576 	max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ;
577 
578 	switch (max_plsz) {
579 	case ET_PCIV_DEVICE_CAPS_PLSZ_128:
580 		ack_latency = ET_PCIV_ACK_LATENCY_128;
581 		replay_timer = ET_PCIV_REPLAY_TIMER_128;
582 		break;
583 
584 	case ET_PCIV_DEVICE_CAPS_PLSZ_256:
585 		ack_latency = ET_PCIV_ACK_LATENCY_256;
586 		replay_timer = ET_PCIV_REPLAY_TIMER_256;
587 		break;
588 
589 	default:
590 		ack_latency = pci_read_config(dev, ET_PCIR_ACK_LATENCY, 2);
591 		replay_timer = pci_read_config(dev, ET_PCIR_REPLAY_TIMER, 2);
592 		device_printf(dev, "ack latency %u, replay timer %u\n",
593 			      ack_latency, replay_timer);
594 		break;
595 	}
596 	if (ack_latency != 0) {
597 		pci_write_config(dev, ET_PCIR_ACK_LATENCY, ack_latency, 2);
598 		pci_write_config(dev, ET_PCIR_REPLAY_TIMER, replay_timer, 2);
599 	}
600 
601 	/*
602 	 * Set L0s and L1 latency timer to 2us
603 	 */
604 	val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2);
605 	pci_write_config(dev, ET_PCIR_L0S_L1_LATENCY, val, 1);
606 
607 	/*
608 	 * Set max read request size to 2048 bytes
609 	 */
610 	val = pci_read_config(dev, ET_PCIR_DEVICE_CTRL, 2);
611 	val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ;
612 	val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K;
613 	pci_write_config(dev, ET_PCIR_DEVICE_CTRL, val, 2);
614 
615 	return 0;
616 }
617 
618 static void
619 et_get_eaddr(device_t dev, uint8_t eaddr[])
620 {
621 	uint32_t val;
622 	int i;
623 
624 	val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4);
625 	for (i = 0; i < 4; ++i)
626 		eaddr[i] = (val >> (8 * i)) & 0xff;
627 
628 	val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2);
629 	for (; i < ETHER_ADDR_LEN; ++i)
630 		eaddr[i] = (val >> (8 * (i - 4))) & 0xff;
631 }
632 
633 static void
634 et_reset(struct et_softc *sc)
635 {
636 	CSR_WRITE_4(sc, ET_MAC_CFG1,
637 		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
638 		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
639 		    ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
640 
641 	CSR_WRITE_4(sc, ET_SWRST,
642 		    ET_SWRST_TXDMA | ET_SWRST_RXDMA |
643 		    ET_SWRST_TXMAC | ET_SWRST_RXMAC |
644 		    ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC);
645 
646 	CSR_WRITE_4(sc, ET_MAC_CFG1,
647 		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
648 		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC);
649 	CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
650 }
651 
652 static void
653 et_disable_intrs(struct et_softc *sc)
654 {
655 	CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
656 }
657 
658 static void
659 et_enable_intrs(struct et_softc *sc, uint32_t intrs)
660 {
661 	CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs);
662 }
663 
664 static int
665 et_dma_alloc(device_t dev)
666 {
667 	struct et_softc *sc = device_get_softc(dev);
668 	struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
669 	struct et_txstatus_data *txsd = &sc->sc_tx_status;
670 	struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
671 	struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
672 	int i, error;
673 
674 	/*
675 	 * Create top level DMA tag
676 	 */
677 	error = bus_dma_tag_create(NULL, 1, 0,
678 				   BUS_SPACE_MAXADDR_32BIT,
679 				   BUS_SPACE_MAXADDR,
680 				   NULL, NULL,
681 				   MAXBSIZE,
682 				   BUS_SPACE_UNRESTRICTED,
683 				   BUS_SPACE_MAXSIZE_32BIT,
684 				   0, NULL, NULL, &sc->sc_dtag);
685 	if (error) {
686 		device_printf(dev, "can't create DMA tag\n");
687 		return error;
688 	}
689 
690 	/*
691 	 * Create TX ring DMA stuffs
692 	 */
693 	error = et_dma_mem_create(dev, ET_TX_RING_SIZE, &tx_ring->tr_dtag,
694 				  (void **)&tx_ring->tr_desc,
695 				  &tx_ring->tr_paddr, &tx_ring->tr_dmap);
696 	if (error) {
697 		device_printf(dev, "can't create TX ring DMA stuffs\n");
698 		return error;
699 	}
700 
701 	/*
702 	 * Create TX status DMA stuffs
703 	 */
704 	error = et_dma_mem_create(dev, sizeof(uint32_t), &txsd->txsd_dtag,
705 				  (void **)&txsd->txsd_status,
706 				  &txsd->txsd_paddr, &txsd->txsd_dmap);
707 	if (error) {
708 		device_printf(dev, "can't create TX status DMA stuffs\n");
709 		return error;
710 	}
711 
712 	/*
713 	 * Create DMA stuffs for RX rings
714 	 */
715 	for (i = 0; i < ET_RX_NRING; ++i) {
716 		static const uint32_t rx_ring_posreg[ET_RX_NRING] =
717 		{ ET_RX_RING0_POS, ET_RX_RING1_POS };
718 
719 		struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
720 
721 		error = et_dma_mem_create(dev, ET_RX_RING_SIZE,
722 					  &rx_ring->rr_dtag,
723 					  (void **)&rx_ring->rr_desc,
724 					  &rx_ring->rr_paddr,
725 					  &rx_ring->rr_dmap);
726 		if (error) {
727 			device_printf(dev, "can't create DMA stuffs for "
728 				      "the %d RX ring\n", i);
729 			return error;
730 		}
731 		rx_ring->rr_posreg = rx_ring_posreg[i];
732 	}
733 
734 	/*
735 	 * Create RX stat ring DMA stuffs
736 	 */
737 	error = et_dma_mem_create(dev, ET_RXSTAT_RING_SIZE,
738 				  &rxst_ring->rsr_dtag,
739 				  (void **)&rxst_ring->rsr_stat,
740 				  &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap);
741 	if (error) {
742 		device_printf(dev, "can't create RX stat ring DMA stuffs\n");
743 		return error;
744 	}
745 
746 	/*
747 	 * Create RX status DMA stuffs
748 	 */
749 	error = et_dma_mem_create(dev, sizeof(struct et_rxstatus),
750 				  &rxsd->rxsd_dtag,
751 				  (void **)&rxsd->rxsd_status,
752 				  &rxsd->rxsd_paddr, &rxsd->rxsd_dmap);
753 	if (error) {
754 		device_printf(dev, "can't create RX status DMA stuffs\n");
755 		return error;
756 	}
757 
758 	/*
759 	 * Create mbuf DMA stuffs
760 	 */
761 	error = et_dma_mbuf_create(dev);
762 	if (error)
763 		return error;
764 
765 	return 0;
766 }
767 
768 static void
769 et_dma_free(device_t dev)
770 {
771 	struct et_softc *sc = device_get_softc(dev);
772 	struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
773 	struct et_txstatus_data *txsd = &sc->sc_tx_status;
774 	struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
775 	struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
776 	int i, rx_done[ET_RX_NRING];
777 
778 	/*
779 	 * Destroy TX ring DMA stuffs
780 	 */
781 	et_dma_mem_destroy(tx_ring->tr_dtag, tx_ring->tr_desc,
782 			   tx_ring->tr_dmap);
783 
784 	/*
785 	 * Destroy TX status DMA stuffs
786 	 */
787 	et_dma_mem_destroy(txsd->txsd_dtag, txsd->txsd_status,
788 			   txsd->txsd_dmap);
789 
790 	/*
791 	 * Destroy DMA stuffs for RX rings
792 	 */
793 	for (i = 0; i < ET_RX_NRING; ++i) {
794 		struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
795 
796 		et_dma_mem_destroy(rx_ring->rr_dtag, rx_ring->rr_desc,
797 				   rx_ring->rr_dmap);
798 	}
799 
800 	/*
801 	 * Destroy RX stat ring DMA stuffs
802 	 */
803 	et_dma_mem_destroy(rxst_ring->rsr_dtag, rxst_ring->rsr_stat,
804 			   rxst_ring->rsr_dmap);
805 
806 	/*
807 	 * Destroy RX status DMA stuffs
808 	 */
809 	et_dma_mem_destroy(rxsd->rxsd_dtag, rxsd->rxsd_status,
810 			   rxsd->rxsd_dmap);
811 
812 	/*
813 	 * Destroy mbuf DMA stuffs
814 	 */
815 	for (i = 0; i < ET_RX_NRING; ++i)
816 		rx_done[i] = ET_RX_NDESC;
817 	et_dma_mbuf_destroy(dev, ET_TX_NDESC, rx_done);
818 
819 	/*
820 	 * Destroy top level DMA tag
821 	 */
822 	if (sc->sc_dtag != NULL)
823 		bus_dma_tag_destroy(sc->sc_dtag);
824 }
825 
826 static int
827 et_dma_mbuf_create(device_t dev)
828 {
829 	struct et_softc *sc = device_get_softc(dev);
830 	struct et_txbuf_data *tbd = &sc->sc_tx_data;
831 	int i, error, rx_done[ET_RX_NRING];
832 
833 	/*
834 	 * Create mbuf DMA tag
835 	 */
836 	error = bus_dma_tag_create(sc->sc_dtag, 1, 0,
837 				   BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
838 				   NULL, NULL,
839 				   ET_JUMBO_FRAMELEN, ET_NSEG_MAX,
840 				   BUS_SPACE_MAXSIZE_32BIT,
841 				   BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_mbuf_dtag);
842 	if (error) {
843 		device_printf(dev, "can't create mbuf DMA tag\n");
844 		return error;
845 	}
846 
847 	/*
848 	 * Create spare DMA map for RX mbufs
849 	 */
850 	error = bus_dmamap_create(sc->sc_mbuf_dtag, 0, &sc->sc_mbuf_tmp_dmap);
851 	if (error) {
852 		device_printf(dev, "can't create spare mbuf DMA map\n");
853 		bus_dma_tag_destroy(sc->sc_mbuf_dtag);
854 		sc->sc_mbuf_dtag = NULL;
855 		return error;
856 	}
857 
858 	/*
859 	 * Create DMA maps for RX mbufs
860 	 */
861 	bzero(rx_done, sizeof(rx_done));
862 	for (i = 0; i < ET_RX_NRING; ++i) {
863 		struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
864 		int j;
865 
866 		for (j = 0; j < ET_RX_NDESC; ++j) {
867 			error = bus_dmamap_create(sc->sc_mbuf_dtag, 0,
868 				&rbd->rbd_buf[j].rb_dmap);
869 			if (error) {
870 				device_printf(dev, "can't create %d RX mbuf "
871 					      "for %d RX ring\n", j, i);
872 				rx_done[i] = j;
873 				et_dma_mbuf_destroy(dev, 0, rx_done);
874 				return error;
875 			}
876 		}
877 		rx_done[i] = ET_RX_NDESC;
878 
879 		rbd->rbd_softc = sc;
880 		rbd->rbd_ring = &sc->sc_rx_ring[i];
881 	}
882 
883 	/*
884 	 * Create DMA maps for TX mbufs
885 	 */
886 	for (i = 0; i < ET_TX_NDESC; ++i) {
887 		error = bus_dmamap_create(sc->sc_mbuf_dtag, 0,
888 					  &tbd->tbd_buf[i].tb_dmap);
889 		if (error) {
890 			device_printf(dev, "can't create %d TX mbuf "
891 				      "DMA map\n", i);
892 			et_dma_mbuf_destroy(dev, i, rx_done);
893 			return error;
894 		}
895 	}
896 
897 	return 0;
898 }
899 
900 static void
901 et_dma_mbuf_destroy(device_t dev, int tx_done, const int rx_done[])
902 {
903 	struct et_softc *sc = device_get_softc(dev);
904 	struct et_txbuf_data *tbd = &sc->sc_tx_data;
905 	int i;
906 
907 	if (sc->sc_mbuf_dtag == NULL)
908 		return;
909 
910 	/*
911 	 * Destroy DMA maps for RX mbufs
912 	 */
913 	for (i = 0; i < ET_RX_NRING; ++i) {
914 		struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
915 		int j;
916 
917 		for (j = 0; j < rx_done[i]; ++j) {
918 			struct et_rxbuf *rb = &rbd->rbd_buf[j];
919 
920 			KASSERT(rb->rb_mbuf == NULL,
921 			    ("RX mbuf in %d RX ring is not freed yet\n", i));
922 			bus_dmamap_destroy(sc->sc_mbuf_dtag, rb->rb_dmap);
923 		}
924 	}
925 
926 	/*
927 	 * Destroy DMA maps for TX mbufs
928 	 */
929 	for (i = 0; i < tx_done; ++i) {
930 		struct et_txbuf *tb = &tbd->tbd_buf[i];
931 
932 		KASSERT(tb->tb_mbuf == NULL, ("TX mbuf is not freed yet\n"));
933 		bus_dmamap_destroy(sc->sc_mbuf_dtag, tb->tb_dmap);
934 	}
935 
936 	/*
937 	 * Destroy spare mbuf DMA map
938 	 */
939 	bus_dmamap_destroy(sc->sc_mbuf_dtag, sc->sc_mbuf_tmp_dmap);
940 
941 	/*
942 	 * Destroy mbuf DMA tag
943 	 */
944 	bus_dma_tag_destroy(sc->sc_mbuf_dtag);
945 	sc->sc_mbuf_dtag = NULL;
946 }
947 
948 static int
949 et_dma_mem_create(device_t dev, bus_size_t size, bus_dma_tag_t *dtag,
950 		  void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap)
951 {
952 	struct et_softc *sc = device_get_softc(dev);
953 	int error;
954 
955 	error = bus_dma_tag_create(sc->sc_dtag, ET_ALIGN, 0,
956 				   BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
957 				   NULL, NULL,
958 				   size, 1, BUS_SPACE_MAXSIZE_32BIT,
959 				   0, NULL, NULL, dtag);
960 	if (error) {
961 		device_printf(dev, "can't create DMA tag\n");
962 		return error;
963 	}
964 
965 	error = bus_dmamem_alloc(*dtag, addr, BUS_DMA_WAITOK | BUS_DMA_ZERO,
966 				 dmap);
967 	if (error) {
968 		device_printf(dev, "can't allocate DMA mem\n");
969 		bus_dma_tag_destroy(*dtag);
970 		*dtag = NULL;
971 		return error;
972 	}
973 
974 	error = bus_dmamap_load(*dtag, *dmap, *addr, size,
975 				et_dma_ring_addr, paddr, BUS_DMA_WAITOK);
976 	if (error) {
977 		device_printf(dev, "can't load DMA mem\n");
978 		bus_dmamem_free(*dtag, *addr, *dmap);
979 		bus_dma_tag_destroy(*dtag);
980 		*dtag = NULL;
981 		return error;
982 	}
983 	return 0;
984 }
985 
986 static void
987 et_dma_mem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap)
988 {
989 	if (dtag != NULL) {
990 		bus_dmamap_unload(dtag, dmap);
991 		bus_dmamem_free(dtag, addr, dmap);
992 		bus_dma_tag_destroy(dtag);
993 	}
994 }
995 
996 static void
997 et_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error)
998 {
999 	KASSERT(nseg == 1, ("too many segments\n"));
1000 	*((bus_addr_t *)arg) = seg->ds_addr;
1001 }
1002 
1003 static void
1004 et_chip_attach(struct et_softc *sc)
1005 {
1006 	uint32_t val;
1007 
1008 	/*
1009 	 * Perform minimal initialization
1010 	 */
1011 
1012 	/* Disable loopback */
1013 	CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1014 
1015 	/* Reset MAC */
1016 	CSR_WRITE_4(sc, ET_MAC_CFG1,
1017 		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1018 		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1019 		    ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1020 
1021 	/*
1022 	 * Setup half duplex mode
1023 	 */
1024 	val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) |
1025 	      __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) |
1026 	      __SHIFTIN(55, ET_MAC_HDX_COLLWIN) |
1027 	      ET_MAC_HDX_EXC_DEFER;
1028 	CSR_WRITE_4(sc, ET_MAC_HDX, val);
1029 
1030 	/* Clear MAC control */
1031 	CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1032 
1033 	/* Reset MII */
1034 	CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1035 
1036 	/* Bring MAC out of reset state */
1037 	CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1038 
1039 	/* Enable memory controllers */
1040 	CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1041 }
1042 
1043 static void
1044 et_intr(void *xsc)
1045 {
1046 	struct et_softc *sc = xsc;
1047 	struct ifnet *ifp;
1048 	uint32_t intrs;
1049 
1050 	ET_LOCK(sc);
1051 	ifp = sc->ifp;
1052 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1053 		ET_UNLOCK(sc);
1054 		return;
1055 	}
1056 
1057 	et_disable_intrs(sc);
1058 
1059 	intrs = CSR_READ_4(sc, ET_INTR_STATUS);
1060 	intrs &= ET_INTRS;
1061 	if (intrs == 0)	/* Not interested */
1062 		goto back;
1063 
1064 	if (intrs & ET_INTR_RXEOF)
1065 		et_rxeof(sc);
1066 	if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER))
1067 		et_txeof(sc);
1068 	if (intrs & ET_INTR_TIMER)
1069 		CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1070 back:
1071 	et_enable_intrs(sc, ET_INTRS);
1072 	ET_UNLOCK(sc);
1073 }
1074 
1075 static void
1076 et_init_locked(struct et_softc *sc)
1077 {
1078 	struct ifnet *ifp = sc->ifp;
1079 	const struct et_bsize *arr;
1080 	int error, i;
1081 
1082 	ET_LOCK_ASSERT(sc);
1083 
1084 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1085 		return;
1086 
1087 	et_stop(sc);
1088 
1089 	arr = et_bufsize_std;
1090 	for (i = 0; i < ET_RX_NRING; ++i) {
1091 		sc->sc_rx_data[i].rbd_bufsize = arr[i].bufsize;
1092 		sc->sc_rx_data[i].rbd_newbuf = arr[i].newbuf;
1093 	}
1094 
1095 	error = et_init_tx_ring(sc);
1096 	if (error)
1097 		goto back;
1098 
1099 	error = et_init_rx_ring(sc);
1100 	if (error)
1101 		goto back;
1102 
1103 	error = et_chip_init(sc);
1104 	if (error)
1105 		goto back;
1106 
1107 	error = et_enable_txrx(sc, 1);
1108 	if (error)
1109 		goto back;
1110 
1111 	et_enable_intrs(sc, ET_INTRS);
1112 
1113 	callout_reset(&sc->sc_tick, hz, et_tick, sc);
1114 
1115 	CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1116 
1117 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1118 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1119 back:
1120 	if (error)
1121 		et_stop(sc);
1122 }
1123 
1124 static void
1125 et_init(void *xsc)
1126 {
1127 	struct et_softc *sc = xsc;
1128 
1129 	ET_LOCK(sc);
1130 	et_init_locked(sc);
1131 	ET_UNLOCK(sc);
1132 }
1133 
1134 static int
1135 et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1136 {
1137 	struct et_softc *sc = ifp->if_softc;
1138 	struct mii_data *mii = device_get_softc(sc->sc_miibus);
1139 	struct ifreq *ifr = (struct ifreq *)data;
1140 	int error = 0, max_framelen;
1141 
1142 /* XXX LOCKSUSED */
1143 	switch (cmd) {
1144 	case SIOCSIFFLAGS:
1145 		ET_LOCK(sc);
1146 		if (ifp->if_flags & IFF_UP) {
1147 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1148 				if ((ifp->if_flags ^ sc->sc_if_flags) &
1149 				(IFF_ALLMULTI | IFF_PROMISC | IFF_BROADCAST))
1150 					et_setmulti(sc);
1151 			} else {
1152 				et_init_locked(sc);
1153 			}
1154 		} else {
1155 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1156 				et_stop(sc);
1157 		}
1158 		sc->sc_if_flags = ifp->if_flags;
1159 		ET_UNLOCK(sc);
1160 		break;
1161 
1162 	case SIOCSIFMEDIA:
1163 	case SIOCGIFMEDIA:
1164 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1165 		break;
1166 
1167 	case SIOCADDMULTI:
1168 	case SIOCDELMULTI:
1169 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1170 			ET_LOCK(sc);
1171 			et_setmulti(sc);
1172 			ET_UNLOCK(sc);
1173 			error = 0;
1174 		}
1175 		break;
1176 
1177 	case SIOCSIFMTU:
1178 #if 0
1179 		if (sc->sc_flags & ET_FLAG_JUMBO)
1180 			max_framelen = ET_JUMBO_FRAMELEN;
1181 		else
1182 #endif
1183 			max_framelen = MCLBYTES - 1;
1184 
1185 		if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) {
1186 			error = EOPNOTSUPP;
1187 			break;
1188 		}
1189 
1190 		if (ifp->if_mtu != ifr->ifr_mtu) {
1191 			ifp->if_mtu = ifr->ifr_mtu;
1192 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1193 			et_init(sc);
1194 		}
1195 		break;
1196 
1197 	default:
1198 		error = ether_ioctl(ifp, cmd, data);
1199 		break;
1200 	}
1201 	return error;
1202 }
1203 
1204 static void
1205 et_start_locked(struct ifnet *ifp)
1206 {
1207 	struct et_softc *sc = ifp->if_softc;
1208 	struct et_txbuf_data *tbd;
1209 	int trans;
1210 
1211 	ET_LOCK_ASSERT(sc);
1212 	tbd = &sc->sc_tx_data;
1213 
1214 	if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
1215 		return;
1216 
1217 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING)
1218 		return;
1219 
1220 	trans = 0;
1221 	for (;;) {
1222 		struct mbuf *m;
1223 
1224 		if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) {
1225 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1226 			break;
1227 		}
1228 
1229 		IFQ_DEQUEUE(&ifp->if_snd, m);
1230 		if (m == NULL)
1231 			break;
1232 
1233 		if (et_encap(sc, &m)) {
1234 			ifp->if_oerrors++;
1235 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1236 			break;
1237 		}
1238 		trans = 1;
1239 
1240 		BPF_MTAP(ifp, m);
1241 	}
1242 
1243 	if (trans)
1244 		sc->watchdog_timer = 5;
1245 }
1246 
1247 static void
1248 et_start(struct ifnet *ifp)
1249 {
1250 	struct et_softc *sc = ifp->if_softc;
1251 
1252 	ET_LOCK(sc);
1253 	et_start_locked(ifp);
1254 	ET_UNLOCK(sc);
1255 }
1256 
1257 static void
1258 et_watchdog(struct et_softc *sc)
1259 {
1260 	ET_LOCK_ASSERT(sc);
1261 
1262 	if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
1263 		return;
1264 
1265 	if_printf(sc->ifp, "watchdog timed out\n");
1266 
1267 	et_init_locked(sc);
1268 	et_start_locked(sc->ifp);
1269 }
1270 
1271 static int
1272 et_stop_rxdma(struct et_softc *sc)
1273 {
1274 	CSR_WRITE_4(sc, ET_RXDMA_CTRL,
1275 		    ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE);
1276 
1277 	DELAY(5);
1278 	if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) {
1279 		if_printf(sc->ifp, "can't stop RX DMA engine\n");
1280 		return ETIMEDOUT;
1281 	}
1282 	return 0;
1283 }
1284 
1285 static int
1286 et_stop_txdma(struct et_softc *sc)
1287 {
1288 	CSR_WRITE_4(sc, ET_TXDMA_CTRL,
1289 		    ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT);
1290 	return 0;
1291 }
1292 
1293 static void
1294 et_free_tx_ring(struct et_softc *sc)
1295 {
1296 	struct et_txbuf_data *tbd = &sc->sc_tx_data;
1297 	struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1298 	int i;
1299 
1300 	for (i = 0; i < ET_TX_NDESC; ++i) {
1301 		struct et_txbuf *tb = &tbd->tbd_buf[i];
1302 
1303 		if (tb->tb_mbuf != NULL) {
1304 			bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap);
1305 			m_freem(tb->tb_mbuf);
1306 			tb->tb_mbuf = NULL;
1307 		}
1308 	}
1309 
1310 	bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1311 	bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1312 			BUS_DMASYNC_PREWRITE);
1313 }
1314 
1315 static void
1316 et_free_rx_ring(struct et_softc *sc)
1317 {
1318 	int n;
1319 
1320 	for (n = 0; n < ET_RX_NRING; ++n) {
1321 		struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1322 		struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n];
1323 		int i;
1324 
1325 		for (i = 0; i < ET_RX_NDESC; ++i) {
1326 			struct et_rxbuf *rb = &rbd->rbd_buf[i];
1327 
1328 			if (rb->rb_mbuf != NULL) {
1329 				bus_dmamap_unload(sc->sc_mbuf_dtag,
1330 			  	    rb->rb_dmap);
1331 				m_freem(rb->rb_mbuf);
1332 				rb->rb_mbuf = NULL;
1333 			}
1334 		}
1335 
1336 		bzero(rx_ring->rr_desc, ET_RX_RING_SIZE);
1337 		bus_dmamap_sync(rx_ring->rr_dtag, rx_ring->rr_dmap,
1338 				BUS_DMASYNC_PREWRITE);
1339 	}
1340 }
1341 
1342 static void
1343 et_setmulti(struct et_softc *sc)
1344 {
1345 	struct ifnet *ifp;
1346 	uint32_t hash[4] = { 0, 0, 0, 0 };
1347 	uint32_t rxmac_ctrl, pktfilt;
1348 	struct ifmultiaddr *ifma;
1349 	int i, count;
1350 
1351 	ET_LOCK_ASSERT(sc);
1352 	ifp = sc->ifp;
1353 
1354 	pktfilt = CSR_READ_4(sc, ET_PKTFILT);
1355 	rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL);
1356 
1357 	pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST);
1358 	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1359 		rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT;
1360 		goto back;
1361 	}
1362 
1363 	count = 0;
1364 	IF_ADDR_LOCK(ifp);
1365 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1366 		uint32_t *hp, h;
1367 
1368 		if (ifma->ifma_addr->sa_family != AF_LINK)
1369 			continue;
1370 
1371 		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
1372 				   ifma->ifma_addr), ETHER_ADDR_LEN);
1373 		h = (h & 0x3f800000) >> 23;
1374 
1375 		hp = &hash[0];
1376 		if (h >= 32 && h < 64) {
1377 			h -= 32;
1378 			hp = &hash[1];
1379 		} else if (h >= 64 && h < 96) {
1380 			h -= 64;
1381 			hp = &hash[2];
1382 		} else if (h >= 96) {
1383 			h -= 96;
1384 			hp = &hash[3];
1385 		}
1386 		*hp |= (1 << h);
1387 
1388 		++count;
1389 	}
1390 	IF_ADDR_UNLOCK(ifp);
1391 
1392 	for (i = 0; i < 4; ++i)
1393 		CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]);
1394 
1395 	if (count > 0)
1396 		pktfilt |= ET_PKTFILT_MCAST;
1397 	rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT;
1398 back:
1399 	CSR_WRITE_4(sc, ET_PKTFILT, pktfilt);
1400 	CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl);
1401 }
1402 
1403 static int
1404 et_chip_init(struct et_softc *sc)
1405 {
1406 	struct ifnet *ifp = sc->ifp;
1407 	uint32_t rxq_end;
1408 	int error, frame_len, rxmem_size;
1409 
1410 	/*
1411 	 * Split 16Kbytes internal memory between TX and RX
1412 	 * according to frame length.
1413 	 */
1414 	frame_len = ET_FRAMELEN(ifp->if_mtu);
1415 	if (frame_len < 2048) {
1416 		rxmem_size = ET_MEM_RXSIZE_DEFAULT;
1417 	} else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) {
1418 		rxmem_size = ET_MEM_SIZE / 2;
1419 	} else {
1420 		rxmem_size = ET_MEM_SIZE -
1421 		roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT);
1422 	}
1423 	rxq_end = ET_QUEUE_ADDR(rxmem_size);
1424 
1425 	CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START);
1426 	CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end);
1427 	CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1);
1428 	CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END);
1429 
1430 	/* No loopback */
1431 	CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1432 
1433 	/* Clear MSI configure */
1434 	CSR_WRITE_4(sc, ET_MSI_CFG, 0);
1435 
1436 	/* Disable timer */
1437 	CSR_WRITE_4(sc, ET_TIMER, 0);
1438 
1439 	/* Initialize MAC */
1440 	et_init_mac(sc);
1441 
1442 	/* Enable memory controllers */
1443 	CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1444 
1445 	/* Initialize RX MAC */
1446 	et_init_rxmac(sc);
1447 
1448 	/* Initialize TX MAC */
1449 	et_init_txmac(sc);
1450 
1451 	/* Initialize RX DMA engine */
1452 	error = et_init_rxdma(sc);
1453 	if (error)
1454 		return error;
1455 
1456 	/* Initialize TX DMA engine */
1457 	error = et_init_txdma(sc);
1458 	if (error)
1459 		return error;
1460 
1461 	return 0;
1462 }
1463 
1464 static int
1465 et_init_tx_ring(struct et_softc *sc)
1466 {
1467 	struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1468 	struct et_txstatus_data *txsd = &sc->sc_tx_status;
1469 	struct et_txbuf_data *tbd = &sc->sc_tx_data;
1470 
1471 	bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1472 	bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1473 			BUS_DMASYNC_PREWRITE);
1474 
1475 	tbd->tbd_start_index = 0;
1476 	tbd->tbd_start_wrap = 0;
1477 	tbd->tbd_used = 0;
1478 
1479 	bzero(txsd->txsd_status, sizeof(uint32_t));
1480 	bus_dmamap_sync(txsd->txsd_dtag, txsd->txsd_dmap,
1481 			BUS_DMASYNC_PREWRITE);
1482 	return 0;
1483 }
1484 
1485 static int
1486 et_init_rx_ring(struct et_softc *sc)
1487 {
1488 	struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1489 	struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1490 	int n;
1491 
1492 	for (n = 0; n < ET_RX_NRING; ++n) {
1493 		struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1494 		int i, error;
1495 
1496 		for (i = 0; i < ET_RX_NDESC; ++i) {
1497 			error = rbd->rbd_newbuf(rbd, i, 1);
1498 			if (error) {
1499 				if_printf(sc->ifp, "%d ring %d buf, "
1500 					  "newbuf failed: %d\n", n, i, error);
1501 				return error;
1502 			}
1503 		}
1504 	}
1505 
1506 	bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus));
1507 	bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
1508 			BUS_DMASYNC_PREWRITE);
1509 
1510 	bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE);
1511 	bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
1512 			BUS_DMASYNC_PREWRITE);
1513 
1514 	return 0;
1515 }
1516 
1517 static void
1518 et_dma_buf_addr(void *xctx, bus_dma_segment_t *segs, int nsegs,
1519 		bus_size_t mapsz __unused, int error)
1520 {
1521 	struct et_dmamap_ctx *ctx = xctx;
1522 	int i;
1523 
1524 	if (error)
1525 		return;
1526 
1527 	if (nsegs > ctx->nsegs) {
1528 		ctx->nsegs = 0;
1529 		return;
1530 	}
1531 
1532 	ctx->nsegs = nsegs;
1533 	for (i = 0; i < nsegs; ++i)
1534 		ctx->segs[i] = segs[i];
1535 }
1536 
1537 static int
1538 et_init_rxdma(struct et_softc *sc)
1539 {
1540 	struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1541 	struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1542 	struct et_rxdesc_ring *rx_ring;
1543 	int error;
1544 
1545 	error = et_stop_rxdma(sc);
1546 	if (error) {
1547 		if_printf(sc->ifp, "can't init RX DMA engine\n");
1548 		return error;
1549 	}
1550 
1551 	/*
1552 	 * Install RX status
1553 	 */
1554 	CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr));
1555 	CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr));
1556 
1557 	/*
1558 	 * Install RX stat ring
1559 	 */
1560 	CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr));
1561 	CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr));
1562 	CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1);
1563 	CSR_WRITE_4(sc, ET_RXSTAT_POS, 0);
1564 	CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1);
1565 
1566 	/* Match ET_RXSTAT_POS */
1567 	rxst_ring->rsr_index = 0;
1568 	rxst_ring->rsr_wrap = 0;
1569 
1570 	/*
1571 	 * Install the 2nd RX descriptor ring
1572 	 */
1573 	rx_ring = &sc->sc_rx_ring[1];
1574 	CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1575 	CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1576 	CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1);
1577 	CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP);
1578 	CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1579 
1580 	/* Match ET_RX_RING1_POS */
1581 	rx_ring->rr_index = 0;
1582 	rx_ring->rr_wrap = 1;
1583 
1584 	/*
1585 	 * Install the 1st RX descriptor ring
1586 	 */
1587 	rx_ring = &sc->sc_rx_ring[0];
1588 	CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1589 	CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1590 	CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1);
1591 	CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP);
1592 	CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1593 
1594 	/* Match ET_RX_RING0_POS */
1595 	rx_ring->rr_index = 0;
1596 	rx_ring->rr_wrap = 1;
1597 
1598 	/*
1599 	 * RX intr moderation
1600 	 */
1601 	CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts);
1602 	CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay);
1603 
1604 	return 0;
1605 }
1606 
1607 static int
1608 et_init_txdma(struct et_softc *sc)
1609 {
1610 	struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1611 	struct et_txstatus_data *txsd = &sc->sc_tx_status;
1612 	int error;
1613 
1614 	error = et_stop_txdma(sc);
1615 	if (error) {
1616 		if_printf(sc->ifp, "can't init TX DMA engine\n");
1617 		return error;
1618 	}
1619 
1620 	/*
1621 	 * Install TX descriptor ring
1622 	 */
1623 	CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
1624 	CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
1625 	CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1);
1626 
1627 	/*
1628 	 * Install TX status
1629 	 */
1630 	CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr));
1631 	CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr));
1632 
1633 	CSR_WRITE_4(sc, ET_TX_READY_POS, 0);
1634 
1635 	/* Match ET_TX_READY_POS */
1636 	tx_ring->tr_ready_index = 0;
1637 	tx_ring->tr_ready_wrap = 0;
1638 
1639 	return 0;
1640 }
1641 
1642 static void
1643 et_init_mac(struct et_softc *sc)
1644 {
1645 	struct ifnet *ifp = sc->ifp;
1646 	const uint8_t *eaddr = IF_LLADDR(ifp);
1647 	uint32_t val;
1648 
1649 	/* Reset MAC */
1650 	CSR_WRITE_4(sc, ET_MAC_CFG1,
1651 		    ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1652 		    ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1653 		    ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1654 
1655 	/*
1656 	 * Setup inter packet gap
1657 	 */
1658 	val = __SHIFTIN(56, ET_IPG_NONB2B_1) |
1659 	      __SHIFTIN(88, ET_IPG_NONB2B_2) |
1660 	      __SHIFTIN(80, ET_IPG_MINIFG) |
1661 	      __SHIFTIN(96, ET_IPG_B2B);
1662 	CSR_WRITE_4(sc, ET_IPG, val);
1663 
1664 	/*
1665 	 * Setup half duplex mode
1666 	 */
1667 	val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) |
1668 	      __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) |
1669 	      __SHIFTIN(55, ET_MAC_HDX_COLLWIN) |
1670 	      ET_MAC_HDX_EXC_DEFER;
1671 	CSR_WRITE_4(sc, ET_MAC_HDX, val);
1672 
1673 	/* Clear MAC control */
1674 	CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1675 
1676 	/* Reset MII */
1677 	CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1678 
1679 	/*
1680 	 * Set MAC address
1681 	 */
1682 	val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24);
1683 	CSR_WRITE_4(sc, ET_MAC_ADDR1, val);
1684 	val = (eaddr[0] << 16) | (eaddr[1] << 24);
1685 	CSR_WRITE_4(sc, ET_MAC_ADDR2, val);
1686 
1687 	/* Set max frame length */
1688 	CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(ifp->if_mtu));
1689 
1690 	/* Bring MAC out of reset state */
1691 	CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1692 }
1693 
1694 static void
1695 et_init_rxmac(struct et_softc *sc)
1696 {
1697 	struct ifnet *ifp = sc->ifp;
1698 	const uint8_t *eaddr = IF_LLADDR(ifp);
1699 	uint32_t val;
1700 	int i;
1701 
1702 	/* Disable RX MAC and WOL */
1703 	CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE);
1704 
1705 	/*
1706 	 * Clear all WOL related registers
1707 	 */
1708 	for (i = 0; i < 3; ++i)
1709 		CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0);
1710 	for (i = 0; i < 20; ++i)
1711 		CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0);
1712 
1713 	/*
1714 	 * Set WOL source address.  XXX is this necessary?
1715 	 */
1716 	val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5];
1717 	CSR_WRITE_4(sc, ET_WOL_SA_LO, val);
1718 	val = (eaddr[0] << 8) | eaddr[1];
1719 	CSR_WRITE_4(sc, ET_WOL_SA_HI, val);
1720 
1721 	/* Clear packet filters */
1722 	CSR_WRITE_4(sc, ET_PKTFILT, 0);
1723 
1724 	/* No ucast filtering */
1725 	CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0);
1726 	CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0);
1727 	CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0);
1728 
1729 	if (ET_FRAMELEN(ifp->if_mtu) > ET_RXMAC_CUT_THRU_FRMLEN) {
1730 		/*
1731 		 * In order to transmit jumbo packets greater than
1732 		 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between
1733 		 * RX MAC and RX DMA needs to be reduced in size to
1734 		 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen).  In
1735 		 * order to implement this, we must use "cut through"
1736 		 * mode in the RX MAC, which chops packets down into
1737 		 * segments.  In this case we selected 256 bytes,
1738 		 * since this is the size of the PCI-Express TLP's
1739 		 * that the ET1310 uses.
1740 		 */
1741 		val = __SHIFTIN(ET_RXMAC_SEGSZ(256), ET_RXMAC_MC_SEGSZ_MAX) |
1742 		      ET_RXMAC_MC_SEGSZ_ENABLE;
1743 	} else {
1744 		val = 0;
1745 	}
1746 	CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val);
1747 
1748 	CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0);
1749 
1750 	/* Initialize RX MAC management register */
1751 	CSR_WRITE_4(sc, ET_RXMAC_MGT, 0);
1752 
1753 	CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0);
1754 
1755 	CSR_WRITE_4(sc, ET_RXMAC_MGT,
1756 		    ET_RXMAC_MGT_PASS_ECRC |
1757 		    ET_RXMAC_MGT_PASS_ELEN |
1758 		    ET_RXMAC_MGT_PASS_ETRUNC |
1759 		    ET_RXMAC_MGT_CHECK_PKT);
1760 
1761 	/*
1762 	 * Configure runt filtering (may not work on certain chip generation)
1763 	 */
1764 	val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG;
1765 	CSR_WRITE_4(sc, ET_PKTFILT, val);
1766 
1767 	/* Enable RX MAC but leave WOL disabled */
1768 	CSR_WRITE_4(sc, ET_RXMAC_CTRL,
1769 		    ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE);
1770 
1771 	/*
1772 	 * Setup multicast hash and allmulti/promisc mode
1773 	 */
1774 	et_setmulti(sc);
1775 }
1776 
1777 static void
1778 et_init_txmac(struct et_softc *sc)
1779 {
1780 	/* Disable TX MAC and FC(?) */
1781 	CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE);
1782 
1783 	/* No flow control yet */
1784 	CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0);
1785 
1786 	/* Enable TX MAC but leave FC(?) diabled */
1787 	CSR_WRITE_4(sc, ET_TXMAC_CTRL,
1788 		    ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE);
1789 }
1790 
1791 static int
1792 et_start_rxdma(struct et_softc *sc)
1793 {
1794 	uint32_t val = 0;
1795 
1796 	val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize,
1797 			 ET_RXDMA_CTRL_RING0_SIZE) |
1798 	       ET_RXDMA_CTRL_RING0_ENABLE;
1799 	val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize,
1800 			 ET_RXDMA_CTRL_RING1_SIZE) |
1801 	       ET_RXDMA_CTRL_RING1_ENABLE;
1802 
1803 	CSR_WRITE_4(sc, ET_RXDMA_CTRL, val);
1804 
1805 	DELAY(5);
1806 
1807 	if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) {
1808 		if_printf(sc->ifp, "can't start RX DMA engine\n");
1809 		return ETIMEDOUT;
1810 	}
1811 	return 0;
1812 }
1813 
1814 static int
1815 et_start_txdma(struct et_softc *sc)
1816 {
1817 	CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT);
1818 	return 0;
1819 }
1820 
1821 static int
1822 et_enable_txrx(struct et_softc *sc, int media_upd)
1823 {
1824 	struct ifnet *ifp = sc->ifp;
1825 	uint32_t val;
1826 	int i, error;
1827 
1828 	val = CSR_READ_4(sc, ET_MAC_CFG1);
1829 	val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN;
1830 	val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW |
1831 		 ET_MAC_CFG1_LOOPBACK);
1832 	CSR_WRITE_4(sc, ET_MAC_CFG1, val);
1833 
1834 	if (media_upd)
1835 		et_ifmedia_upd_locked(ifp);
1836 	else
1837 		et_setmedia(sc);
1838 
1839 #define NRETRY	50
1840 
1841 	for (i = 0; i < NRETRY; ++i) {
1842 		val = CSR_READ_4(sc, ET_MAC_CFG1);
1843 		if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) ==
1844 		    (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN))
1845 			break;
1846 
1847 		DELAY(100);
1848 	}
1849 	if (i == NRETRY) {
1850 		if_printf(ifp, "can't enable RX/TX\n");
1851 		return 0;
1852 	}
1853 	sc->sc_flags |= ET_FLAG_TXRX_ENABLED;
1854 
1855 #undef NRETRY
1856 
1857 	/*
1858 	 * Start TX/RX DMA engine
1859 	 */
1860 	error = et_start_rxdma(sc);
1861 	if (error)
1862 		return error;
1863 
1864 	error = et_start_txdma(sc);
1865 	if (error)
1866 		return error;
1867 
1868 	return 0;
1869 }
1870 
1871 static void
1872 et_rxeof(struct et_softc *sc)
1873 {
1874 	struct ifnet *ifp;
1875 	struct et_rxstatus_data *rxsd;
1876 	struct et_rxstat_ring *rxst_ring;
1877 	uint32_t rxs_stat_ring;
1878 	int rxst_wrap, rxst_index;
1879 
1880 	ET_LOCK_ASSERT(sc);
1881 	ifp = sc->ifp;
1882 	rxsd = &sc->sc_rx_status;
1883 	rxst_ring = &sc->sc_rxstat_ring;
1884 
1885 	if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
1886 		return;
1887 
1888 	bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
1889 			BUS_DMASYNC_POSTREAD);
1890 	bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
1891 			BUS_DMASYNC_POSTREAD);
1892 
1893 	rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring;
1894 	rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0;
1895 	rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX);
1896 
1897 	while (rxst_index != rxst_ring->rsr_index ||
1898 	       rxst_wrap != rxst_ring->rsr_wrap) {
1899 		struct et_rxbuf_data *rbd;
1900 		struct et_rxdesc_ring *rx_ring;
1901 		struct et_rxstat *st;
1902 		struct mbuf *m;
1903 		int buflen, buf_idx, ring_idx;
1904 		uint32_t rxstat_pos, rxring_pos;
1905 
1906 		MPASS(rxst_ring->rsr_index < ET_RX_NSTAT);
1907 		st = &rxst_ring->rsr_stat[rxst_ring->rsr_index];
1908 
1909 		buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN);
1910 		buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX);
1911 		ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX);
1912 
1913 		if (++rxst_ring->rsr_index == ET_RX_NSTAT) {
1914 			rxst_ring->rsr_index = 0;
1915 			rxst_ring->rsr_wrap ^= 1;
1916 		}
1917 		rxstat_pos = __SHIFTIN(rxst_ring->rsr_index,
1918 				       ET_RXSTAT_POS_INDEX);
1919 		if (rxst_ring->rsr_wrap)
1920 			rxstat_pos |= ET_RXSTAT_POS_WRAP;
1921 		CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos);
1922 
1923 		if (ring_idx >= ET_RX_NRING) {
1924 			ifp->if_ierrors++;
1925 			if_printf(ifp, "invalid ring index %d\n", ring_idx);
1926 			continue;
1927 		}
1928 		if (buf_idx >= ET_RX_NDESC) {
1929 			ifp->if_ierrors++;
1930 			if_printf(ifp, "invalid buf index %d\n", buf_idx);
1931 			continue;
1932 		}
1933 
1934 		rbd = &sc->sc_rx_data[ring_idx];
1935 		m = rbd->rbd_buf[buf_idx].rb_mbuf;
1936 
1937 		if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) {
1938 			if (buflen < ETHER_CRC_LEN) {
1939 				m_freem(m);
1940 				m = NULL;
1941 				ifp->if_ierrors++;
1942 			} else {
1943 				m->m_pkthdr.len = m->m_len = buflen;
1944 				m->m_pkthdr.rcvif = ifp;
1945 
1946 				m_adj(m, -ETHER_CRC_LEN);
1947 
1948 				ifp->if_ipackets++;
1949 				ET_UNLOCK(sc);
1950 				ifp->if_input(ifp, m);
1951 				ET_LOCK(sc);
1952 			}
1953 		} else {
1954 			ifp->if_ierrors++;
1955 		}
1956 		m = NULL;	/* Catch invalid reference */
1957 
1958 		rx_ring = &sc->sc_rx_ring[ring_idx];
1959 
1960 		if (buf_idx != rx_ring->rr_index) {
1961 			if_printf(ifp, "WARNING!! ring %d, "
1962 				  "buf_idx %d, rr_idx %d\n",
1963 				  ring_idx, buf_idx, rx_ring->rr_index);
1964 		}
1965 
1966 		MPASS(rx_ring->rr_index < ET_RX_NDESC);
1967 		if (++rx_ring->rr_index == ET_RX_NDESC) {
1968 			rx_ring->rr_index = 0;
1969 			rx_ring->rr_wrap ^= 1;
1970 		}
1971 		rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX);
1972 		if (rx_ring->rr_wrap)
1973 			rxring_pos |= ET_RX_RING_POS_WRAP;
1974 		CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos);
1975 	}
1976 }
1977 
1978 static int
1979 et_encap(struct et_softc *sc, struct mbuf **m0)
1980 {
1981 	struct mbuf *m = *m0;
1982 	bus_dma_segment_t segs[ET_NSEG_MAX];
1983 	struct et_dmamap_ctx ctx;
1984 	struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1985 	struct et_txbuf_data *tbd = &sc->sc_tx_data;
1986 	struct et_txdesc *td;
1987 	bus_dmamap_t map;
1988 	int error, maxsegs, first_idx, last_idx, i;
1989 	uint32_t tx_ready_pos, last_td_ctrl2;
1990 
1991 	maxsegs = ET_TX_NDESC - tbd->tbd_used;
1992 	if (maxsegs > ET_NSEG_MAX)
1993 		maxsegs = ET_NSEG_MAX;
1994 	KASSERT(maxsegs >= ET_NSEG_SPARE,
1995 		("not enough spare TX desc (%d)\n", maxsegs));
1996 
1997 	MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
1998 	first_idx = tx_ring->tr_ready_index;
1999 	map = tbd->tbd_buf[first_idx].tb_dmap;
2000 
2001 	ctx.nsegs = maxsegs;
2002 	ctx.segs = segs;
2003 	error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, map, m,
2004 				     et_dma_buf_addr, &ctx, BUS_DMA_NOWAIT);
2005 	if (!error && ctx.nsegs == 0) {
2006 		bus_dmamap_unload(sc->sc_mbuf_dtag, map);
2007 		error = EFBIG;
2008 	}
2009 	if (error && error != EFBIG) {
2010 		if_printf(sc->ifp, "can't load TX mbuf, error %d\n",
2011 			  error);
2012 		goto back;
2013 	}
2014 	if (error) {	/* error == EFBIG */
2015 		struct mbuf *m_new;
2016 
2017 		m_new = m_defrag(m, M_DONTWAIT);
2018 		if (m_new == NULL) {
2019 			if_printf(sc->ifp, "can't defrag TX mbuf\n");
2020 			error = ENOBUFS;
2021 			goto back;
2022 		} else {
2023 			*m0 = m = m_new;
2024 		}
2025 
2026 		ctx.nsegs = maxsegs;
2027 		ctx.segs = segs;
2028 		error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, map, m,
2029 					     et_dma_buf_addr, &ctx,
2030 					     BUS_DMA_NOWAIT);
2031 		if (error || ctx.nsegs == 0) {
2032 			if (ctx.nsegs == 0) {
2033 				bus_dmamap_unload(sc->sc_mbuf_dtag, map);
2034 				error = EFBIG;
2035 			}
2036 			if_printf(sc->ifp,
2037 				  "can't load defraged TX mbuf\n");
2038 			goto back;
2039 		}
2040 	}
2041 
2042 	bus_dmamap_sync(sc->sc_mbuf_dtag, map, BUS_DMASYNC_PREWRITE);
2043 
2044 	last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG;
2045 	sc->sc_tx += ctx.nsegs;
2046 	if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) {
2047 		sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs;
2048 		last_td_ctrl2 |= ET_TDCTRL2_INTR;
2049 	}
2050 
2051 	last_idx = -1;
2052 	for (i = 0; i < ctx.nsegs; ++i) {
2053 		int idx;
2054 
2055 		idx = (first_idx + i) % ET_TX_NDESC;
2056 		td = &tx_ring->tr_desc[idx];
2057 		td->td_addr_hi = ET_ADDR_HI(segs[i].ds_addr);
2058 		td->td_addr_lo = ET_ADDR_LO(segs[i].ds_addr);
2059 		td->td_ctrl1 = __SHIFTIN(segs[i].ds_len, ET_TDCTRL1_LEN);
2060 
2061 		if (i == ctx.nsegs - 1) {	/* Last frag */
2062 			td->td_ctrl2 = last_td_ctrl2;
2063 			last_idx = idx;
2064 		}
2065 
2066 		MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
2067 		if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
2068 			tx_ring->tr_ready_index = 0;
2069 			tx_ring->tr_ready_wrap ^= 1;
2070 		}
2071 	}
2072 	td = &tx_ring->tr_desc[first_idx];
2073 	td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG;	/* First frag */
2074 
2075 	MPASS(last_idx >= 0);
2076 	tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap;
2077 	tbd->tbd_buf[last_idx].tb_dmap = map;
2078 	tbd->tbd_buf[last_idx].tb_mbuf = m;
2079 
2080 	tbd->tbd_used += ctx.nsegs;
2081 	MPASS(tbd->tbd_used <= ET_TX_NDESC);
2082 
2083 	bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
2084 			BUS_DMASYNC_PREWRITE);
2085 
2086 	tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index,
2087 		       ET_TX_READY_POS_INDEX);
2088 	if (tx_ring->tr_ready_wrap)
2089 		tx_ready_pos |= ET_TX_READY_POS_WRAP;
2090 	CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos);
2091 
2092 	error = 0;
2093 back:
2094 	if (error) {
2095 		m_freem(m);
2096 		*m0 = NULL;
2097 	}
2098 	return error;
2099 }
2100 
2101 static void
2102 et_txeof(struct et_softc *sc)
2103 {
2104 	struct ifnet *ifp;
2105 	struct et_txdesc_ring *tx_ring;
2106 	struct et_txbuf_data *tbd;
2107 	uint32_t tx_done;
2108 	int end, wrap;
2109 
2110 	ET_LOCK_ASSERT(sc);
2111 	ifp = sc->ifp;
2112 	tx_ring = &sc->sc_tx_ring;
2113 	tbd = &sc->sc_tx_data;
2114 
2115 	if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
2116 		return;
2117 
2118 	if (tbd->tbd_used == 0)
2119 		return;
2120 
2121 	tx_done = CSR_READ_4(sc, ET_TX_DONE_POS);
2122 	end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX);
2123 	wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0;
2124 
2125 	while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) {
2126 		struct et_txbuf *tb;
2127 
2128 		MPASS(tbd->tbd_start_index < ET_TX_NDESC);
2129 		tb = &tbd->tbd_buf[tbd->tbd_start_index];
2130 
2131 		bzero(&tx_ring->tr_desc[tbd->tbd_start_index],
2132 		      sizeof(struct et_txdesc));
2133 		bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
2134 				BUS_DMASYNC_PREWRITE);
2135 
2136 		if (tb->tb_mbuf != NULL) {
2137 			bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap);
2138 			m_freem(tb->tb_mbuf);
2139 			tb->tb_mbuf = NULL;
2140 			ifp->if_opackets++;
2141 		}
2142 
2143 		if (++tbd->tbd_start_index == ET_TX_NDESC) {
2144 			tbd->tbd_start_index = 0;
2145 			tbd->tbd_start_wrap ^= 1;
2146 		}
2147 
2148 		MPASS(tbd->tbd_used > 0);
2149 		tbd->tbd_used--;
2150 	}
2151 
2152 	if (tbd->tbd_used == 0)
2153 		sc->watchdog_timer = 0;
2154 	if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC)
2155 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2156 
2157 	et_start_locked(ifp);
2158 }
2159 
2160 static void
2161 et_tick(void *xsc)
2162 {
2163 	struct et_softc *sc = xsc;
2164 	struct ifnet *ifp;
2165 	struct mii_data *mii;
2166 
2167 	ET_LOCK_ASSERT(sc);
2168 	ifp = sc->ifp;
2169 	mii = device_get_softc(sc->sc_miibus);
2170 
2171 	mii_tick(mii);
2172 	if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0 &&
2173 	    (mii->mii_media_status & IFM_ACTIVE) &&
2174 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2175 		if_printf(ifp, "Link up, enable TX/RX\n");
2176 		if (et_enable_txrx(sc, 0) == 0)
2177 			et_start_locked(ifp);
2178 	}
2179 	et_watchdog(sc);
2180 	callout_reset(&sc->sc_tick, hz, et_tick, sc);
2181 }
2182 
2183 static int
2184 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init)
2185 {
2186 	return et_newbuf(rbd, buf_idx, init, MCLBYTES);
2187 }
2188 
2189 static int
2190 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init)
2191 {
2192 	return et_newbuf(rbd, buf_idx, init, MHLEN);
2193 }
2194 
2195 static int
2196 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0)
2197 {
2198 	struct et_softc *sc = rbd->rbd_softc;
2199 	struct et_rxbuf *rb;
2200 	struct mbuf *m;
2201 	struct et_dmamap_ctx ctx;
2202 	bus_dma_segment_t seg;
2203 	bus_dmamap_t dmap;
2204 	int error, len;
2205 
2206 	MPASS(buf_idx < ET_RX_NDESC);
2207 	rb = &rbd->rbd_buf[buf_idx];
2208 
2209 	m = m_getl(len0, /* init ? M_WAIT :*/ M_DONTWAIT, MT_DATA, M_PKTHDR, &len);
2210 	if (m == NULL) {
2211 		error = ENOBUFS;
2212 
2213 		if (init) {
2214 			if_printf(sc->ifp,
2215 				  "m_getl failed, size %d\n", len0);
2216 			return error;
2217 		} else {
2218 			goto back;
2219 		}
2220 	}
2221 	m->m_len = m->m_pkthdr.len = len;
2222 
2223 	/*
2224 	 * Try load RX mbuf into temporary DMA tag
2225 	 */
2226 	ctx.nsegs = 1;
2227 	ctx.segs = &seg;
2228 	error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, sc->sc_mbuf_tmp_dmap, m,
2229 				     et_dma_buf_addr, &ctx,
2230 				     init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
2231 	if (error || ctx.nsegs == 0) {
2232 		if (!error) {
2233 			bus_dmamap_unload(sc->sc_mbuf_dtag,
2234 					  sc->sc_mbuf_tmp_dmap);
2235 			error = EFBIG;
2236 			if_printf(sc->ifp, "too many segments?!\n");
2237 		}
2238 		m_freem(m);
2239 		m = NULL;
2240 
2241 		if (init) {
2242 			if_printf(sc->ifp, "can't load RX mbuf\n");
2243 			return error;
2244 		} else {
2245 			goto back;
2246 		}
2247 	}
2248 
2249 	if (!init) {
2250 		bus_dmamap_sync(sc->sc_mbuf_dtag, rb->rb_dmap,
2251 				BUS_DMASYNC_POSTREAD);
2252 		bus_dmamap_unload(sc->sc_mbuf_dtag, rb->rb_dmap);
2253 	}
2254 	rb->rb_mbuf = m;
2255 	rb->rb_paddr = seg.ds_addr;
2256 
2257 	/*
2258 	 * Swap RX buf's DMA map with the loaded temporary one
2259 	 */
2260 	dmap = rb->rb_dmap;
2261 	rb->rb_dmap = sc->sc_mbuf_tmp_dmap;
2262 	sc->sc_mbuf_tmp_dmap = dmap;
2263 
2264 	error = 0;
2265 back:
2266 	et_setup_rxdesc(rbd, buf_idx, rb->rb_paddr);
2267 	return error;
2268 }
2269 
2270 /*
2271  * Create sysctl tree
2272  */
2273 static void
2274 et_add_sysctls(struct et_softc * sc)
2275 {
2276 	struct sysctl_ctx_list *ctx;
2277 	struct sysctl_oid_list *children;
2278 
2279 	ctx = device_get_sysctl_ctx(sc->dev);
2280 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2281 
2282 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_npkts",
2283 	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_npkts, "I",
2284 	    "RX IM, # packets per RX interrupt");
2285 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_delay",
2286 	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_delay, "I",
2287 	    "RX IM, RX interrupt delay (x10 usec)");
2288 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_intr_nsegs",
2289 	    CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0,
2290 	    "TX IM, # segments per TX interrupt");
2291 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "timer",
2292 	    CTLFLAG_RW, &sc->sc_timer, 0, "TX timer");
2293 }
2294 
2295 static int
2296 et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS)
2297 {
2298 	struct et_softc *sc = arg1;
2299 	struct ifnet *ifp = sc->ifp;
2300 	int error = 0, v;
2301 
2302 	v = sc->sc_rx_intr_npkts;
2303 	error = sysctl_handle_int(oidp, &v, 0, req);
2304 	if (error || req->newptr == NULL)
2305 		goto back;
2306 	if (v <= 0) {
2307 		error = EINVAL;
2308 		goto back;
2309 	}
2310 
2311 	if (sc->sc_rx_intr_npkts != v) {
2312 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2313 			CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v);
2314 		sc->sc_rx_intr_npkts = v;
2315 	}
2316 back:
2317 	return error;
2318 }
2319 
2320 static int
2321 et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS)
2322 {
2323 	struct et_softc *sc = arg1;
2324 	struct ifnet *ifp = sc->ifp;
2325 	int error = 0, v;
2326 
2327 	v = sc->sc_rx_intr_delay;
2328 	error = sysctl_handle_int(oidp, &v, 0, req);
2329 	if (error || req->newptr == NULL)
2330 		goto back;
2331 	if (v <= 0) {
2332 		error = EINVAL;
2333 		goto back;
2334 	}
2335 
2336 	if (sc->sc_rx_intr_delay != v) {
2337 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2338 			CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v);
2339 		sc->sc_rx_intr_delay = v;
2340 	}
2341 back:
2342 	return error;
2343 }
2344 
2345 static void
2346 et_setmedia(struct et_softc *sc)
2347 {
2348 	struct mii_data *mii = device_get_softc(sc->sc_miibus);
2349 	uint32_t cfg2, ctrl;
2350 
2351 	cfg2 = CSR_READ_4(sc, ET_MAC_CFG2);
2352 	cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII |
2353 		  ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM);
2354 	cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC |
2355 		__SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN);
2356 
2357 	ctrl = CSR_READ_4(sc, ET_MAC_CTRL);
2358 	ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII);
2359 
2360 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
2361 		cfg2 |= ET_MAC_CFG2_MODE_GMII;
2362 	} else {
2363 		cfg2 |= ET_MAC_CFG2_MODE_MII;
2364 		ctrl |= ET_MAC_CTRL_MODE_MII;
2365 	}
2366 
2367 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
2368 		cfg2 |= ET_MAC_CFG2_FDX;
2369 	else
2370 		ctrl |= ET_MAC_CTRL_GHDX;
2371 
2372 	CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl);
2373 	CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2);
2374 }
2375 
2376 static void
2377 et_setup_rxdesc(struct et_rxbuf_data *rbd, int buf_idx, bus_addr_t paddr)
2378 {
2379 	struct et_rxdesc_ring *rx_ring = rbd->rbd_ring;
2380 	struct et_rxdesc *desc;
2381 
2382 	MPASS(buf_idx < ET_RX_NDESC);
2383 	desc = &rx_ring->rr_desc[buf_idx];
2384 
2385 	desc->rd_addr_hi = ET_ADDR_HI(paddr);
2386 	desc->rd_addr_lo = ET_ADDR_LO(paddr);
2387 	desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX);
2388 
2389 	bus_dmamap_sync(rx_ring->rr_dtag, rx_ring->rr_dmap,
2390 			BUS_DMASYNC_PREWRITE);
2391 }
2392