xref: /freebsd/sys/dev/nfe/if_nfe.c (revision 4a5216a6dc0c3ce4cf5f2d3ee8af0c3ff3402c4f)
1 /*	$OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $	*/
2 
3 /*-
4  * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
5  * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
6  * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
22 
23 #include <sys/cdefs.h>
24 __FBSDID("$FreeBSD$");
25 
26 #ifdef HAVE_KERNEL_OPTION_HEADERS
27 #include "opt_device_polling.h"
28 #endif
29 
30 #include <sys/param.h>
31 #include <sys/endian.h>
32 #include <sys/systm.h>
33 #include <sys/sockio.h>
34 #include <sys/mbuf.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/kernel.h>
38 #include <sys/queue.h>
39 #include <sys/socket.h>
40 #include <sys/sysctl.h>
41 #include <sys/taskqueue.h>
42 
43 #include <net/if.h>
44 #include <net/if_arp.h>
45 #include <net/ethernet.h>
46 #include <net/if_dl.h>
47 #include <net/if_media.h>
48 #include <net/if_types.h>
49 #include <net/if_vlan_var.h>
50 
51 #include <net/bpf.h>
52 
53 #include <machine/bus.h>
54 #include <machine/resource.h>
55 #include <sys/bus.h>
56 #include <sys/rman.h>
57 
58 #include <dev/mii/mii.h>
59 #include <dev/mii/miivar.h>
60 
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63 
64 #include <dev/nfe/if_nfereg.h>
65 #include <dev/nfe/if_nfevar.h>
66 
67 MODULE_DEPEND(nfe, pci, 1, 1, 1);
68 MODULE_DEPEND(nfe, ether, 1, 1, 1);
69 MODULE_DEPEND(nfe, miibus, 1, 1, 1);
70 
71 /* "device miibus" required.  See GENERIC if you get errors here. */
72 #include "miibus_if.h"
73 
74 static int  nfe_probe(device_t);
75 static int  nfe_attach(device_t);
76 static int  nfe_detach(device_t);
77 static int  nfe_suspend(device_t);
78 static int  nfe_resume(device_t);
79 static int nfe_shutdown(device_t);
80 static void nfe_power(struct nfe_softc *);
81 static int  nfe_miibus_readreg(device_t, int, int);
82 static int  nfe_miibus_writereg(device_t, int, int, int);
83 static void nfe_miibus_statchg(device_t);
84 static void nfe_link_task(void *, int);
85 static void nfe_set_intr(struct nfe_softc *);
86 static __inline void nfe_enable_intr(struct nfe_softc *);
87 static __inline void nfe_disable_intr(struct nfe_softc *);
88 static int  nfe_ioctl(struct ifnet *, u_long, caddr_t);
89 static void nfe_alloc_msix(struct nfe_softc *, int);
90 static int nfe_intr(void *);
91 static void nfe_int_task(void *, int);
92 static __inline void nfe_discard_rxbuf(struct nfe_softc *, int);
93 static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int);
94 static int nfe_newbuf(struct nfe_softc *, int);
95 static int nfe_jnewbuf(struct nfe_softc *, int);
96 static int  nfe_rxeof(struct nfe_softc *, int);
97 static int  nfe_jrxeof(struct nfe_softc *, int);
98 static void nfe_txeof(struct nfe_softc *);
99 static int  nfe_encap(struct nfe_softc *, struct mbuf **);
100 static void nfe_setmulti(struct nfe_softc *);
101 static void nfe_tx_task(void *, int);
102 static void nfe_start(struct ifnet *);
103 static void nfe_watchdog(struct ifnet *);
104 static void nfe_init(void *);
105 static void nfe_init_locked(void *);
106 static void nfe_stop(struct ifnet *);
107 static int  nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
108 static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
109 static int  nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
110 static int  nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
111 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
112 static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
113 static int  nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
114 static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
115 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
116 static int  nfe_ifmedia_upd(struct ifnet *);
117 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
118 static void nfe_tick(void *);
119 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
120 static void nfe_set_macaddr(struct nfe_softc *, uint8_t *);
121 static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int);
122 
123 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
124 static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS);
125 static void nfe_sysctl_node(struct nfe_softc *);
126 static void nfe_stats_clear(struct nfe_softc *);
127 static void nfe_stats_update(struct nfe_softc *);
128 
129 #ifdef NFE_DEBUG
130 static int nfedebug = 0;
131 #define	DPRINTF(sc, ...)	do {				\
132 	if (nfedebug)						\
133 		device_printf((sc)->nfe_dev, __VA_ARGS__);	\
134 } while (0)
135 #define	DPRINTFN(sc, n, ...)	do {				\
136 	if (nfedebug >= (n))					\
137 		device_printf((sc)->nfe_dev, __VA_ARGS__);	\
138 } while (0)
139 #else
140 #define	DPRINTF(sc, ...)
141 #define	DPRINTFN(sc, n, ...)
142 #endif
143 
144 #define	NFE_LOCK(_sc)		mtx_lock(&(_sc)->nfe_mtx)
145 #define	NFE_UNLOCK(_sc)		mtx_unlock(&(_sc)->nfe_mtx)
146 #define	NFE_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
147 
148 /* Tunables. */
149 static int msi_disable = 0;
150 static int msix_disable = 0;
151 static int jumbo_disable = 0;
152 TUNABLE_INT("hw.nfe.msi_disable", &msi_disable);
153 TUNABLE_INT("hw.nfe.msix_disable", &msix_disable);
154 TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable);
155 
156 static device_method_t nfe_methods[] = {
157 	/* Device interface */
158 	DEVMETHOD(device_probe,		nfe_probe),
159 	DEVMETHOD(device_attach,	nfe_attach),
160 	DEVMETHOD(device_detach,	nfe_detach),
161 	DEVMETHOD(device_suspend,	nfe_suspend),
162 	DEVMETHOD(device_resume,	nfe_resume),
163 	DEVMETHOD(device_shutdown,	nfe_shutdown),
164 
165 	/* bus interface */
166 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
167 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
168 
169 	/* MII interface */
170 	DEVMETHOD(miibus_readreg,	nfe_miibus_readreg),
171 	DEVMETHOD(miibus_writereg,	nfe_miibus_writereg),
172 	DEVMETHOD(miibus_statchg,	nfe_miibus_statchg),
173 
174 	{ NULL, NULL }
175 };
176 
177 static driver_t nfe_driver = {
178 	"nfe",
179 	nfe_methods,
180 	sizeof(struct nfe_softc)
181 };
182 
183 static devclass_t nfe_devclass;
184 
185 DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0);
186 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
187 
188 static struct nfe_type nfe_devs[] = {
189 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
190 	    "NVIDIA nForce MCP Networking Adapter"},
191 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
192 	    "NVIDIA nForce2 MCP2 Networking Adapter"},
193 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
194 	    "NVIDIA nForce2 400 MCP4 Networking Adapter"},
195 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
196 	    "NVIDIA nForce2 400 MCP5 Networking Adapter"},
197 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
198 	    "NVIDIA nForce3 MCP3 Networking Adapter"},
199 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
200 	    "NVIDIA nForce3 250 MCP6 Networking Adapter"},
201 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
202 	    "NVIDIA nForce3 MCP7 Networking Adapter"},
203 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
204 	    "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
205 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
206 	    "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
207 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
208 	    "NVIDIA nForce MCP04 Networking Adapter"},		/* MCP10 */
209 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
210 	    "NVIDIA nForce MCP04 Networking Adapter"},		/* MCP11 */
211 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
212 	    "NVIDIA nForce 430 MCP12 Networking Adapter"},
213 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
214 	    "NVIDIA nForce 430 MCP13 Networking Adapter"},
215 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
216 	    "NVIDIA nForce MCP55 Networking Adapter"},
217 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
218 	    "NVIDIA nForce MCP55 Networking Adapter"},
219 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
220 	    "NVIDIA nForce MCP61 Networking Adapter"},
221 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
222 	    "NVIDIA nForce MCP61 Networking Adapter"},
223 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
224 	    "NVIDIA nForce MCP61 Networking Adapter"},
225 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
226 	    "NVIDIA nForce MCP61 Networking Adapter"},
227 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
228 	    "NVIDIA nForce MCP65 Networking Adapter"},
229 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
230 	    "NVIDIA nForce MCP65 Networking Adapter"},
231 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
232 	    "NVIDIA nForce MCP65 Networking Adapter"},
233 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
234 	    "NVIDIA nForce MCP65 Networking Adapter"},
235 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
236 	    "NVIDIA nForce MCP67 Networking Adapter"},
237 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
238 	    "NVIDIA nForce MCP67 Networking Adapter"},
239 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
240 	    "NVIDIA nForce MCP67 Networking Adapter"},
241 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
242 	    "NVIDIA nForce MCP67 Networking Adapter"},
243 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1,
244 	    "NVIDIA nForce MCP73 Networking Adapter"},
245 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2,
246 	    "NVIDIA nForce MCP73 Networking Adapter"},
247 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3,
248 	    "NVIDIA nForce MCP73 Networking Adapter"},
249 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4,
250 	    "NVIDIA nForce MCP73 Networking Adapter"},
251 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1,
252 	    "NVIDIA nForce MCP77 Networking Adapter"},
253 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2,
254 	    "NVIDIA nForce MCP77 Networking Adapter"},
255 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3,
256 	    "NVIDIA nForce MCP77 Networking Adapter"},
257 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4,
258 	    "NVIDIA nForce MCP77 Networking Adapter"},
259 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1,
260 	    "NVIDIA nForce MCP79 Networking Adapter"},
261 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2,
262 	    "NVIDIA nForce MCP79 Networking Adapter"},
263 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3,
264 	    "NVIDIA nForce MCP79 Networking Adapter"},
265 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4,
266 	    "NVIDIA nForce MCP79 Networking Adapter"},
267 	{0, 0, NULL}
268 };
269 
270 
271 /* Probe for supported hardware ID's */
272 static int
273 nfe_probe(device_t dev)
274 {
275 	struct nfe_type *t;
276 
277 	t = nfe_devs;
278 	/* Check for matching PCI DEVICE ID's */
279 	while (t->name != NULL) {
280 		if ((pci_get_vendor(dev) == t->vid_id) &&
281 		    (pci_get_device(dev) == t->dev_id)) {
282 			device_set_desc(dev, t->name);
283 			return (BUS_PROBE_DEFAULT);
284 		}
285 		t++;
286 	}
287 
288 	return (ENXIO);
289 }
290 
291 static void
292 nfe_alloc_msix(struct nfe_softc *sc, int count)
293 {
294 	int rid;
295 
296 	rid = PCIR_BAR(2);
297 	sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY,
298 	    &rid, RF_ACTIVE);
299 	if (sc->nfe_msix_res == NULL) {
300 		device_printf(sc->nfe_dev,
301 		    "couldn't allocate MSIX table resource\n");
302 		return;
303 	}
304 	rid = PCIR_BAR(3);
305 	sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev,
306 	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
307 	if (sc->nfe_msix_pba_res == NULL) {
308 		device_printf(sc->nfe_dev,
309 		    "couldn't allocate MSIX PBA resource\n");
310 		bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2),
311 		    sc->nfe_msix_res);
312 		sc->nfe_msix_res = NULL;
313 		return;
314 	}
315 
316 	if (pci_alloc_msix(sc->nfe_dev, &count) == 0) {
317 		if (count == NFE_MSI_MESSAGES) {
318 			if (bootverbose)
319 				device_printf(sc->nfe_dev,
320 				    "Using %d MSIX messages\n", count);
321 			sc->nfe_msix = 1;
322 		} else {
323 			if (bootverbose)
324 				device_printf(sc->nfe_dev,
325 				    "couldn't allocate MSIX\n");
326 			pci_release_msi(sc->nfe_dev);
327 			bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
328 			    PCIR_BAR(3), sc->nfe_msix_pba_res);
329 			bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
330 			    PCIR_BAR(2), sc->nfe_msix_res);
331 			sc->nfe_msix_pba_res = NULL;
332 			sc->nfe_msix_res = NULL;
333 		}
334 	}
335 }
336 
337 static int
338 nfe_attach(device_t dev)
339 {
340 	struct nfe_softc *sc;
341 	struct ifnet *ifp;
342 	bus_addr_t dma_addr_max;
343 	int error = 0, i, msic, reg, rid;
344 
345 	sc = device_get_softc(dev);
346 	sc->nfe_dev = dev;
347 
348 	mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
349 	    MTX_DEF);
350 	callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
351 	TASK_INIT(&sc->nfe_link_task, 0, nfe_link_task, sc);
352 
353 	pci_enable_busmaster(dev);
354 
355 	rid = PCIR_BAR(0);
356 	sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
357 	    RF_ACTIVE);
358 	if (sc->nfe_res[0] == NULL) {
359 		device_printf(dev, "couldn't map memory resources\n");
360 		mtx_destroy(&sc->nfe_mtx);
361 		return (ENXIO);
362 	}
363 
364 	if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0) {
365 		uint16_t v, width;
366 
367 		v = pci_read_config(dev, reg + 0x08, 2);
368 		/* Change max. read request size to 4096. */
369 		v &= ~(7 << 12);
370 		v |= (5 << 12);
371 		pci_write_config(dev, reg + 0x08, v, 2);
372 
373 		v = pci_read_config(dev, reg + 0x0c, 2);
374 		/* link capability */
375 		v = (v >> 4) & 0x0f;
376 		width = pci_read_config(dev, reg + 0x12, 2);
377 		/* negotiated link width */
378 		width = (width >> 4) & 0x3f;
379 		if (v != width)
380 			device_printf(sc->nfe_dev,
381 			    "warning, negotiated width of link(x%d) != "
382 			    "max. width of link(x%d)\n", width, v);
383 	}
384 
385 	/* Allocate interrupt */
386 	if (msix_disable == 0 || msi_disable == 0) {
387 		if (msix_disable == 0 &&
388 		    (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES)
389 			nfe_alloc_msix(sc, msic);
390 		if (msi_disable == 0 && sc->nfe_msix == 0 &&
391 		    (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES &&
392 		    pci_alloc_msi(dev, &msic) == 0) {
393 			if (msic == NFE_MSI_MESSAGES) {
394 				if (bootverbose)
395 					device_printf(dev,
396 					    "Using %d MSI messages\n", msic);
397 				sc->nfe_msi = 1;
398 			} else
399 				pci_release_msi(dev);
400 		}
401 	}
402 
403 	if (sc->nfe_msix == 0 && sc->nfe_msi == 0) {
404 		rid = 0;
405 		sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
406 		    RF_SHAREABLE | RF_ACTIVE);
407 		if (sc->nfe_irq[0] == NULL) {
408 			device_printf(dev, "couldn't allocate IRQ resources\n");
409 			error = ENXIO;
410 			goto fail;
411 		}
412 	} else {
413 		for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
414 			sc->nfe_irq[i] = bus_alloc_resource_any(dev,
415 			    SYS_RES_IRQ, &rid, RF_ACTIVE);
416 			if (sc->nfe_irq[i] == NULL) {
417 				device_printf(dev,
418 				    "couldn't allocate IRQ resources for "
419 				    "message %d\n", rid);
420 				error = ENXIO;
421 				goto fail;
422 			}
423 		}
424 		/* Map interrupts to vector 0. */
425 		if (sc->nfe_msix != 0) {
426 			NFE_WRITE(sc, NFE_MSIX_MAP0, 0);
427 			NFE_WRITE(sc, NFE_MSIX_MAP1, 0);
428 		} else if (sc->nfe_msi != 0) {
429 			NFE_WRITE(sc, NFE_MSI_MAP0, 0);
430 			NFE_WRITE(sc, NFE_MSI_MAP1, 0);
431 		}
432 	}
433 
434 	/* Set IRQ status/mask register. */
435 	sc->nfe_irq_status = NFE_IRQ_STATUS;
436 	sc->nfe_irq_mask = NFE_IRQ_MASK;
437 	sc->nfe_intrs = NFE_IRQ_WANTED;
438 	sc->nfe_nointrs = 0;
439 	if (sc->nfe_msix != 0) {
440 		sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS;
441 		sc->nfe_nointrs = NFE_IRQ_WANTED;
442 	} else if (sc->nfe_msi != 0) {
443 		sc->nfe_irq_mask = NFE_MSI_IRQ_MASK;
444 		sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED;
445 	}
446 
447 	sc->nfe_devid = pci_get_device(dev);
448 	sc->nfe_revid = pci_get_revid(dev);
449 	sc->nfe_flags = 0;
450 
451 	switch (sc->nfe_devid) {
452 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
453 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
454 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
455 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
456 		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
457 		break;
458 	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
459 	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
460 		sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_MIB_V1;
461 		break;
462 	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
463 	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
464 	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
465 	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
466 		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
467 		    NFE_MIB_V1;
468 		break;
469 	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
470 	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
471 		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
472 		    NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
473 		break;
474 
475 	case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
476 	case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
477 	case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
478 	case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
479 	case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
480 	case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
481 	case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
482 	case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
483 	case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
484 	case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
485 	case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
486 	case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
487 		sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT |
488 		    NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
489 		break;
490 	case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
491 	case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
492 	case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
493 	case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
494 		/* XXX flow control */
495 		sc->nfe_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT |
496 		    NFE_CORRECT_MACADDR | NFE_MIB_V3;
497 		break;
498 	case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
499 	case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
500 	case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
501 	case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
502 		/* XXX flow control */
503 		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
504 		    NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3;
505 		break;
506 	case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
507 	case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
508 	case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
509 	case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
510 		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
511 		    NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL |
512 		    NFE_MIB_V2;
513 		break;
514 	}
515 
516 	nfe_power(sc);
517 	/* Check for reversed ethernet address */
518 	if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0)
519 		sc->nfe_flags |= NFE_CORRECT_MACADDR;
520 	nfe_get_macaddr(sc, sc->eaddr);
521 	/*
522 	 * Allocate the parent bus DMA tag appropriate for PCI.
523 	 */
524 	dma_addr_max = BUS_SPACE_MAXADDR_32BIT;
525 	if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0)
526 		dma_addr_max = NFE_DMA_MAXADDR;
527 	error = bus_dma_tag_create(
528 	    bus_get_dma_tag(sc->nfe_dev),	/* parent */
529 	    1, 0,				/* alignment, boundary */
530 	    dma_addr_max,			/* lowaddr */
531 	    BUS_SPACE_MAXADDR,			/* highaddr */
532 	    NULL, NULL,				/* filter, filterarg */
533 	    BUS_SPACE_MAXSIZE_32BIT, 0,		/* maxsize, nsegments */
534 	    BUS_SPACE_MAXSIZE_32BIT,		/* maxsegsize */
535 	    0,					/* flags */
536 	    NULL, NULL,				/* lockfunc, lockarg */
537 	    &sc->nfe_parent_tag);
538 	if (error)
539 		goto fail;
540 
541 	ifp = sc->nfe_ifp = if_alloc(IFT_ETHER);
542 	if (ifp == NULL) {
543 		device_printf(dev, "can not if_alloc()\n");
544 		error = ENOSPC;
545 		goto fail;
546 	}
547 	TASK_INIT(&sc->nfe_tx_task, 1, nfe_tx_task, ifp);
548 
549 	/*
550 	 * Allocate Tx and Rx rings.
551 	 */
552 	if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0)
553 		goto fail;
554 
555 	if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0)
556 		goto fail;
557 
558 	nfe_alloc_jrx_ring(sc, &sc->jrxq);
559 	/* Create sysctl node. */
560 	nfe_sysctl_node(sc);
561 
562 	ifp->if_softc = sc;
563 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
564 	ifp->if_mtu = ETHERMTU;
565 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
566 	ifp->if_ioctl = nfe_ioctl;
567 	ifp->if_start = nfe_start;
568 	ifp->if_hwassist = 0;
569 	ifp->if_capabilities = 0;
570 	ifp->if_watchdog = NULL;
571 	ifp->if_init = nfe_init;
572 	IFQ_SET_MAXLEN(&ifp->if_snd, NFE_TX_RING_COUNT - 1);
573 	ifp->if_snd.ifq_drv_maxlen = NFE_TX_RING_COUNT - 1;
574 	IFQ_SET_READY(&ifp->if_snd);
575 
576 	if (sc->nfe_flags & NFE_HW_CSUM) {
577 		ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4;
578 		ifp->if_hwassist |= NFE_CSUM_FEATURES | CSUM_TSO;
579 	}
580 	ifp->if_capenable = ifp->if_capabilities;
581 
582 	sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
583 	/* VLAN capability setup. */
584 	ifp->if_capabilities |= IFCAP_VLAN_MTU;
585 	if ((sc->nfe_flags & NFE_HW_VLAN) != 0) {
586 		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
587 		if ((ifp->if_capabilities & IFCAP_HWCSUM) != 0)
588 			ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
589 	}
590 	ifp->if_capenable = ifp->if_capabilities;
591 
592 	/*
593 	 * Tell the upper layer(s) we support long frames.
594 	 * Must appear after the call to ether_ifattach() because
595 	 * ether_ifattach() sets ifi_hdrlen to the default value.
596 	 */
597 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
598 
599 #ifdef DEVICE_POLLING
600 	ifp->if_capabilities |= IFCAP_POLLING;
601 #endif
602 
603 	/* Do MII setup */
604 	if (mii_phy_probe(dev, &sc->nfe_miibus, nfe_ifmedia_upd,
605 	    nfe_ifmedia_sts)) {
606 		device_printf(dev, "MII without any phy!\n");
607 		error = ENXIO;
608 		goto fail;
609 	}
610 	ether_ifattach(ifp, sc->eaddr);
611 
612 	TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc);
613 	sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK,
614 	    taskqueue_thread_enqueue, &sc->nfe_tq);
615 	taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq",
616 	    device_get_nameunit(sc->nfe_dev));
617 	error = 0;
618 	if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
619 		error = bus_setup_intr(dev, sc->nfe_irq[0],
620 		    INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
621 		    &sc->nfe_intrhand[0]);
622 	} else {
623 		for (i = 0; i < NFE_MSI_MESSAGES; i++) {
624 			error = bus_setup_intr(dev, sc->nfe_irq[i],
625 			    INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
626 			    &sc->nfe_intrhand[i]);
627 			if (error != 0)
628 				break;
629 		}
630 	}
631 	if (error) {
632 		device_printf(dev, "couldn't set up irq\n");
633 		taskqueue_free(sc->nfe_tq);
634 		sc->nfe_tq = NULL;
635 		ether_ifdetach(ifp);
636 		goto fail;
637 	}
638 
639 fail:
640 	if (error)
641 		nfe_detach(dev);
642 
643 	return (error);
644 }
645 
646 
647 static int
648 nfe_detach(device_t dev)
649 {
650 	struct nfe_softc *sc;
651 	struct ifnet *ifp;
652 	uint8_t eaddr[ETHER_ADDR_LEN];
653 	int i, rid;
654 
655 	sc = device_get_softc(dev);
656 	KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
657 	ifp = sc->nfe_ifp;
658 
659 #ifdef DEVICE_POLLING
660 	if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
661 		ether_poll_deregister(ifp);
662 #endif
663 	if (device_is_attached(dev)) {
664 		NFE_LOCK(sc);
665 		nfe_stop(ifp);
666 		ifp->if_flags &= ~IFF_UP;
667 		NFE_UNLOCK(sc);
668 		callout_drain(&sc->nfe_stat_ch);
669 		taskqueue_drain(taskqueue_fast, &sc->nfe_tx_task);
670 		taskqueue_drain(taskqueue_swi, &sc->nfe_link_task);
671 		ether_ifdetach(ifp);
672 	}
673 
674 	if (ifp) {
675 		/* restore ethernet address */
676 		if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
677 			for (i = 0; i < ETHER_ADDR_LEN; i++) {
678 				eaddr[i] = sc->eaddr[5 - i];
679 			}
680 		} else
681 			bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN);
682 		nfe_set_macaddr(sc, eaddr);
683 		if_free(ifp);
684 	}
685 	if (sc->nfe_miibus)
686 		device_delete_child(dev, sc->nfe_miibus);
687 	bus_generic_detach(dev);
688 	if (sc->nfe_tq != NULL) {
689 		taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task);
690 		taskqueue_free(sc->nfe_tq);
691 		sc->nfe_tq = NULL;
692 	}
693 
694 	for (i = 0; i < NFE_MSI_MESSAGES; i++) {
695 		if (sc->nfe_intrhand[i] != NULL) {
696 			bus_teardown_intr(dev, sc->nfe_irq[i],
697 			    sc->nfe_intrhand[i]);
698 			sc->nfe_intrhand[i] = NULL;
699 		}
700 	}
701 
702 	if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
703 		if (sc->nfe_irq[0] != NULL)
704 			bus_release_resource(dev, SYS_RES_IRQ, 0,
705 			    sc->nfe_irq[0]);
706 	} else {
707 		for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
708 			if (sc->nfe_irq[i] != NULL) {
709 				bus_release_resource(dev, SYS_RES_IRQ, rid,
710 				    sc->nfe_irq[i]);
711 				sc->nfe_irq[i] = NULL;
712 			}
713 		}
714 		pci_release_msi(dev);
715 	}
716 	if (sc->nfe_msix_pba_res != NULL) {
717 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3),
718 		    sc->nfe_msix_pba_res);
719 		sc->nfe_msix_pba_res = NULL;
720 	}
721 	if (sc->nfe_msix_res != NULL) {
722 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2),
723 		    sc->nfe_msix_res);
724 		sc->nfe_msix_res = NULL;
725 	}
726 	if (sc->nfe_res[0] != NULL) {
727 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
728 		    sc->nfe_res[0]);
729 		sc->nfe_res[0] = NULL;
730 	}
731 
732 	nfe_free_tx_ring(sc, &sc->txq);
733 	nfe_free_rx_ring(sc, &sc->rxq);
734 	nfe_free_jrx_ring(sc, &sc->jrxq);
735 
736 	if (sc->nfe_parent_tag) {
737 		bus_dma_tag_destroy(sc->nfe_parent_tag);
738 		sc->nfe_parent_tag = NULL;
739 	}
740 
741 	mtx_destroy(&sc->nfe_mtx);
742 
743 	return (0);
744 }
745 
746 
747 static int
748 nfe_suspend(device_t dev)
749 {
750 	struct nfe_softc *sc;
751 
752 	sc = device_get_softc(dev);
753 
754 	NFE_LOCK(sc);
755 	nfe_stop(sc->nfe_ifp);
756 	sc->nfe_suspended = 1;
757 	NFE_UNLOCK(sc);
758 
759 	return (0);
760 }
761 
762 
763 static int
764 nfe_resume(device_t dev)
765 {
766 	struct nfe_softc *sc;
767 	struct ifnet *ifp;
768 
769 	sc = device_get_softc(dev);
770 
771 	NFE_LOCK(sc);
772 	ifp = sc->nfe_ifp;
773 	if (ifp->if_flags & IFF_UP)
774 		nfe_init_locked(sc);
775 	sc->nfe_suspended = 0;
776 	NFE_UNLOCK(sc);
777 
778 	return (0);
779 }
780 
781 
782 /* Take PHY/NIC out of powerdown, from Linux */
783 static void
784 nfe_power(struct nfe_softc *sc)
785 {
786 	uint32_t pwr;
787 
788 	if ((sc->nfe_flags & NFE_PWR_MGMT) == 0)
789 		return;
790 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
791 	NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
792 	DELAY(100);
793 	NFE_WRITE(sc, NFE_MAC_RESET, 0);
794 	DELAY(100);
795 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
796 	pwr = NFE_READ(sc, NFE_PWR2_CTL);
797 	pwr &= ~NFE_PWR2_WAKEUP_MASK;
798 	if (sc->nfe_revid >= 0xa3 &&
799 	    (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 ||
800 	    sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2))
801 		pwr |= NFE_PWR2_REVA3;
802 	NFE_WRITE(sc, NFE_PWR2_CTL, pwr);
803 }
804 
805 
806 static void
807 nfe_miibus_statchg(device_t dev)
808 {
809 	struct nfe_softc *sc;
810 
811 	sc = device_get_softc(dev);
812 	taskqueue_enqueue(taskqueue_swi, &sc->nfe_link_task);
813 }
814 
815 
816 static void
817 nfe_link_task(void *arg, int pending)
818 {
819 	struct nfe_softc *sc;
820 	struct mii_data *mii;
821 	struct ifnet *ifp;
822 	uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
823 	uint32_t gmask, rxctl, txctl, val;
824 
825 	sc = (struct nfe_softc *)arg;
826 
827 	NFE_LOCK(sc);
828 
829 	mii = device_get_softc(sc->nfe_miibus);
830 	ifp = sc->nfe_ifp;
831 	if (mii == NULL || ifp == NULL) {
832 		NFE_UNLOCK(sc);
833 		return;
834 	}
835 
836 	if (mii->mii_media_status & IFM_ACTIVE) {
837 		if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
838 			sc->nfe_link = 1;
839 	} else
840 		sc->nfe_link = 0;
841 
842 	phy = NFE_READ(sc, NFE_PHY_IFACE);
843 	phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
844 
845 	seed = NFE_READ(sc, NFE_RNDSEED);
846 	seed &= ~NFE_SEED_MASK;
847 
848 	if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) {
849 		phy  |= NFE_PHY_HDX;	/* half-duplex */
850 		misc |= NFE_MISC1_HDX;
851 	}
852 
853 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
854 	case IFM_1000_T:	/* full-duplex only */
855 		link |= NFE_MEDIA_1000T;
856 		seed |= NFE_SEED_1000T;
857 		phy  |= NFE_PHY_1000T;
858 		break;
859 	case IFM_100_TX:
860 		link |= NFE_MEDIA_100TX;
861 		seed |= NFE_SEED_100TX;
862 		phy  |= NFE_PHY_100TX;
863 		break;
864 	case IFM_10_T:
865 		link |= NFE_MEDIA_10T;
866 		seed |= NFE_SEED_10T;
867 		break;
868 	}
869 
870 	if ((phy & 0x10000000) != 0) {
871 		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
872 			val = NFE_R1_MAGIC_1000;
873 		else
874 			val = NFE_R1_MAGIC_10_100;
875 	} else
876 		val = NFE_R1_MAGIC_DEFAULT;
877 	NFE_WRITE(sc, NFE_SETUP_R1, val);
878 
879 	NFE_WRITE(sc, NFE_RNDSEED, seed);	/* XXX: gigabit NICs only? */
880 
881 	NFE_WRITE(sc, NFE_PHY_IFACE, phy);
882 	NFE_WRITE(sc, NFE_MISC1, misc);
883 	NFE_WRITE(sc, NFE_LINKSPEED, link);
884 
885 	gmask = mii->mii_media_active & IFM_GMASK;
886 	if ((gmask & IFM_FDX) != 0) {
887 		/* It seems all hardwares supports Rx pause frames. */
888 		val = NFE_READ(sc, NFE_RXFILTER);
889 		if ((gmask & IFM_FLAG0) != 0)
890 			val |= NFE_PFF_RX_PAUSE;
891 		else
892 			val &= ~NFE_PFF_RX_PAUSE;
893 		NFE_WRITE(sc, NFE_RXFILTER, val);
894 		if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
895 			val = NFE_READ(sc, NFE_MISC1);
896 			if ((gmask & IFM_FLAG1) != 0) {
897 				NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
898 				    NFE_TX_PAUSE_FRAME_ENABLE);
899 				val |= NFE_MISC1_TX_PAUSE;
900 			} else {
901 				val &= ~NFE_MISC1_TX_PAUSE;
902 				NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
903 				    NFE_TX_PAUSE_FRAME_DISABLE);
904 			}
905 			NFE_WRITE(sc, NFE_MISC1, val);
906 		}
907 	} else {
908 		/* disable rx/tx pause frames */
909 		val = NFE_READ(sc, NFE_RXFILTER);
910 		val &= ~NFE_PFF_RX_PAUSE;
911 		NFE_WRITE(sc, NFE_RXFILTER, val);
912 		if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
913 			NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
914 			    NFE_TX_PAUSE_FRAME_DISABLE);
915 			val = NFE_READ(sc, NFE_MISC1);
916 			val &= ~NFE_MISC1_TX_PAUSE;
917 			NFE_WRITE(sc, NFE_MISC1, val);
918 		}
919 	}
920 
921 	txctl = NFE_READ(sc, NFE_TX_CTL);
922 	rxctl = NFE_READ(sc, NFE_RX_CTL);
923 	if (sc->nfe_link != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
924 		txctl |= NFE_TX_START;
925 		rxctl |= NFE_RX_START;
926 	} else {
927 		txctl &= ~NFE_TX_START;
928 		rxctl &= ~NFE_RX_START;
929 	}
930 	NFE_WRITE(sc, NFE_TX_CTL, txctl);
931 	NFE_WRITE(sc, NFE_RX_CTL, rxctl);
932 
933 	NFE_UNLOCK(sc);
934 }
935 
936 
937 static int
938 nfe_miibus_readreg(device_t dev, int phy, int reg)
939 {
940 	struct nfe_softc *sc = device_get_softc(dev);
941 	uint32_t val;
942 	int ntries;
943 
944 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
945 
946 	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
947 		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
948 		DELAY(100);
949 	}
950 
951 	NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
952 
953 	for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
954 		DELAY(100);
955 		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
956 			break;
957 	}
958 	if (ntries == NFE_TIMEOUT) {
959 		DPRINTFN(sc, 2, "timeout waiting for PHY\n");
960 		return 0;
961 	}
962 
963 	if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
964 		DPRINTFN(sc, 2, "could not read PHY\n");
965 		return 0;
966 	}
967 
968 	val = NFE_READ(sc, NFE_PHY_DATA);
969 	if (val != 0xffffffff && val != 0)
970 		sc->mii_phyaddr = phy;
971 
972 	DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
973 
974 	return (val);
975 }
976 
977 
978 static int
979 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
980 {
981 	struct nfe_softc *sc = device_get_softc(dev);
982 	uint32_t ctl;
983 	int ntries;
984 
985 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
986 
987 	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
988 		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
989 		DELAY(100);
990 	}
991 
992 	NFE_WRITE(sc, NFE_PHY_DATA, val);
993 	ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
994 	NFE_WRITE(sc, NFE_PHY_CTL, ctl);
995 
996 	for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
997 		DELAY(100);
998 		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
999 			break;
1000 	}
1001 #ifdef NFE_DEBUG
1002 	if (nfedebug >= 2 && ntries == NFE_TIMEOUT)
1003 		device_printf(sc->nfe_dev, "could not write to PHY\n");
1004 #endif
1005 	return (0);
1006 }
1007 
1008 struct nfe_dmamap_arg {
1009 	bus_addr_t nfe_busaddr;
1010 };
1011 
1012 static int
1013 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1014 {
1015 	struct nfe_dmamap_arg ctx;
1016 	struct nfe_rx_data *data;
1017 	void *desc;
1018 	int i, error, descsize;
1019 
1020 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1021 		desc = ring->desc64;
1022 		descsize = sizeof (struct nfe_desc64);
1023 	} else {
1024 		desc = ring->desc32;
1025 		descsize = sizeof (struct nfe_desc32);
1026 	}
1027 
1028 	ring->cur = ring->next = 0;
1029 
1030 	error = bus_dma_tag_create(sc->nfe_parent_tag,
1031 	    NFE_RING_ALIGN, 0,			/* alignment, boundary */
1032 	    BUS_SPACE_MAXADDR,			/* lowaddr */
1033 	    BUS_SPACE_MAXADDR,			/* highaddr */
1034 	    NULL, NULL,				/* filter, filterarg */
1035 	    NFE_RX_RING_COUNT * descsize, 1,	/* maxsize, nsegments */
1036 	    NFE_RX_RING_COUNT * descsize,	/* maxsegsize */
1037 	    0,					/* flags */
1038 	    NULL, NULL,				/* lockfunc, lockarg */
1039 	    &ring->rx_desc_tag);
1040 	if (error != 0) {
1041 		device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1042 		goto fail;
1043 	}
1044 
1045 	/* allocate memory to desc */
1046 	error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK |
1047 	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map);
1048 	if (error != 0) {
1049 		device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1050 		goto fail;
1051 	}
1052 	if (sc->nfe_flags & NFE_40BIT_ADDR)
1053 		ring->desc64 = desc;
1054 	else
1055 		ring->desc32 = desc;
1056 
1057 	/* map desc to device visible address space */
1058 	ctx.nfe_busaddr = 0;
1059 	error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc,
1060 	    NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1061 	if (error != 0) {
1062 		device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1063 		goto fail;
1064 	}
1065 	ring->physaddr = ctx.nfe_busaddr;
1066 
1067 	error = bus_dma_tag_create(sc->nfe_parent_tag,
1068 	    1, 0,			/* alignment, boundary */
1069 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1070 	    BUS_SPACE_MAXADDR,		/* highaddr */
1071 	    NULL, NULL,			/* filter, filterarg */
1072 	    MCLBYTES, 1,		/* maxsize, nsegments */
1073 	    MCLBYTES,			/* maxsegsize */
1074 	    0,				/* flags */
1075 	    NULL, NULL,			/* lockfunc, lockarg */
1076 	    &ring->rx_data_tag);
1077 	if (error != 0) {
1078 		device_printf(sc->nfe_dev, "could not create Rx DMA tag\n");
1079 		goto fail;
1080 	}
1081 
1082 	error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map);
1083 	if (error != 0) {
1084 		device_printf(sc->nfe_dev,
1085 		    "could not create Rx DMA spare map\n");
1086 		goto fail;
1087 	}
1088 
1089 	/*
1090 	 * Pre-allocate Rx buffers and populate Rx ring.
1091 	 */
1092 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1093 		data = &sc->rxq.data[i];
1094 		data->rx_data_map = NULL;
1095 		data->m = NULL;
1096 		error = bus_dmamap_create(ring->rx_data_tag, 0,
1097 		    &data->rx_data_map);
1098 		if (error != 0) {
1099 			device_printf(sc->nfe_dev,
1100 			    "could not create Rx DMA map\n");
1101 			goto fail;
1102 		}
1103 	}
1104 
1105 fail:
1106 	return (error);
1107 }
1108 
1109 
1110 static void
1111 nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1112 {
1113 	struct nfe_dmamap_arg ctx;
1114 	struct nfe_rx_data *data;
1115 	void *desc;
1116 	int i, error, descsize;
1117 
1118 	if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1119 		return;
1120 	if (jumbo_disable != 0) {
1121 		device_printf(sc->nfe_dev, "disabling jumbo frame support\n");
1122 		sc->nfe_jumbo_disable = 1;
1123 		return;
1124 	}
1125 
1126 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1127 		desc = ring->jdesc64;
1128 		descsize = sizeof (struct nfe_desc64);
1129 	} else {
1130 		desc = ring->jdesc32;
1131 		descsize = sizeof (struct nfe_desc32);
1132 	}
1133 
1134 	ring->jcur = ring->jnext = 0;
1135 
1136 	/* Create DMA tag for jumbo Rx ring. */
1137 	error = bus_dma_tag_create(sc->nfe_parent_tag,
1138 	    NFE_RING_ALIGN, 0,			/* alignment, boundary */
1139 	    BUS_SPACE_MAXADDR,			/* lowaddr */
1140 	    BUS_SPACE_MAXADDR,			/* highaddr */
1141 	    NULL, NULL,				/* filter, filterarg */
1142 	    NFE_JUMBO_RX_RING_COUNT * descsize,	/* maxsize */
1143 	    1, 					/* nsegments */
1144 	    NFE_JUMBO_RX_RING_COUNT * descsize,	/* maxsegsize */
1145 	    0,					/* flags */
1146 	    NULL, NULL,				/* lockfunc, lockarg */
1147 	    &ring->jrx_desc_tag);
1148 	if (error != 0) {
1149 		device_printf(sc->nfe_dev,
1150 		    "could not create jumbo ring DMA tag\n");
1151 		goto fail;
1152 	}
1153 
1154 	/* Create DMA tag for jumbo Rx buffers. */
1155 	error = bus_dma_tag_create(sc->nfe_parent_tag,
1156 	    PAGE_SIZE, 0,			/* alignment, boundary */
1157 	    BUS_SPACE_MAXADDR,			/* lowaddr */
1158 	    BUS_SPACE_MAXADDR,			/* highaddr */
1159 	    NULL, NULL,				/* filter, filterarg */
1160 	    MJUM9BYTES,				/* maxsize */
1161 	    1,					/* nsegments */
1162 	    MJUM9BYTES,				/* maxsegsize */
1163 	    0,					/* flags */
1164 	    NULL, NULL,				/* lockfunc, lockarg */
1165 	    &ring->jrx_data_tag);
1166 	if (error != 0) {
1167 		device_printf(sc->nfe_dev,
1168 		    "could not create jumbo Rx buffer DMA tag\n");
1169 		goto fail;
1170 	}
1171 
1172 	/* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
1173 	error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK |
1174 	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map);
1175 	if (error != 0) {
1176 		device_printf(sc->nfe_dev,
1177 		    "could not allocate DMA'able memory for jumbo Rx ring\n");
1178 		goto fail;
1179 	}
1180 	if (sc->nfe_flags & NFE_40BIT_ADDR)
1181 		ring->jdesc64 = desc;
1182 	else
1183 		ring->jdesc32 = desc;
1184 
1185 	ctx.nfe_busaddr = 0;
1186 	error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc,
1187 	    NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1188 	if (error != 0) {
1189 		device_printf(sc->nfe_dev,
1190 		    "could not load DMA'able memory for jumbo Rx ring\n");
1191 		goto fail;
1192 	}
1193 	ring->jphysaddr = ctx.nfe_busaddr;
1194 
1195 	/* Create DMA maps for jumbo Rx buffers. */
1196 	error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map);
1197 	if (error != 0) {
1198 		device_printf(sc->nfe_dev,
1199 		    "could not create jumbo Rx DMA spare map\n");
1200 		goto fail;
1201 	}
1202 
1203 	for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1204 		data = &sc->jrxq.jdata[i];
1205 		data->rx_data_map = NULL;
1206 		data->m = NULL;
1207 		error = bus_dmamap_create(ring->jrx_data_tag, 0,
1208 		    &data->rx_data_map);
1209 		if (error != 0) {
1210 			device_printf(sc->nfe_dev,
1211 			    "could not create jumbo Rx DMA map\n");
1212 			goto fail;
1213 		}
1214 	}
1215 
1216 	return;
1217 
1218 fail:
1219 	/*
1220 	 * Running without jumbo frame support is ok for most cases
1221 	 * so don't fail on creating dma tag/map for jumbo frame.
1222 	 */
1223 	nfe_free_jrx_ring(sc, ring);
1224 	device_printf(sc->nfe_dev, "disabling jumbo frame support due to "
1225 	    "resource shortage\n");
1226 	sc->nfe_jumbo_disable = 1;
1227 }
1228 
1229 
1230 static int
1231 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1232 {
1233 	void *desc;
1234 	size_t descsize;
1235 	int i;
1236 
1237 	ring->cur = ring->next = 0;
1238 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1239 		desc = ring->desc64;
1240 		descsize = sizeof (struct nfe_desc64);
1241 	} else {
1242 		desc = ring->desc32;
1243 		descsize = sizeof (struct nfe_desc32);
1244 	}
1245 	bzero(desc, descsize * NFE_RX_RING_COUNT);
1246 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1247 		if (nfe_newbuf(sc, i) != 0)
1248 			return (ENOBUFS);
1249 	}
1250 
1251 	bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
1252 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1253 
1254 	return (0);
1255 }
1256 
1257 
1258 static int
1259 nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1260 {
1261 	void *desc;
1262 	size_t descsize;
1263 	int i;
1264 
1265 	ring->jcur = ring->jnext = 0;
1266 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1267 		desc = ring->jdesc64;
1268 		descsize = sizeof (struct nfe_desc64);
1269 	} else {
1270 		desc = ring->jdesc32;
1271 		descsize = sizeof (struct nfe_desc32);
1272 	}
1273 	bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT);
1274 	for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1275 		if (nfe_jnewbuf(sc, i) != 0)
1276 			return (ENOBUFS);
1277 	}
1278 
1279 	bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map,
1280 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1281 
1282 	return (0);
1283 }
1284 
1285 
1286 static void
1287 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1288 {
1289 	struct nfe_rx_data *data;
1290 	void *desc;
1291 	int i, descsize;
1292 
1293 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1294 		desc = ring->desc64;
1295 		descsize = sizeof (struct nfe_desc64);
1296 	} else {
1297 		desc = ring->desc32;
1298 		descsize = sizeof (struct nfe_desc32);
1299 	}
1300 
1301 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1302 		data = &ring->data[i];
1303 		if (data->rx_data_map != NULL) {
1304 			bus_dmamap_destroy(ring->rx_data_tag,
1305 			    data->rx_data_map);
1306 			data->rx_data_map = NULL;
1307 		}
1308 		if (data->m != NULL) {
1309 			m_freem(data->m);
1310 			data->m = NULL;
1311 		}
1312 	}
1313 	if (ring->rx_data_tag != NULL) {
1314 		if (ring->rx_spare_map != NULL) {
1315 			bus_dmamap_destroy(ring->rx_data_tag,
1316 			    ring->rx_spare_map);
1317 			ring->rx_spare_map = NULL;
1318 		}
1319 		bus_dma_tag_destroy(ring->rx_data_tag);
1320 		ring->rx_data_tag = NULL;
1321 	}
1322 
1323 	if (desc != NULL) {
1324 		bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
1325 		bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
1326 		ring->desc64 = NULL;
1327 		ring->desc32 = NULL;
1328 		ring->rx_desc_map = NULL;
1329 	}
1330 	if (ring->rx_desc_tag != NULL) {
1331 		bus_dma_tag_destroy(ring->rx_desc_tag);
1332 		ring->rx_desc_tag = NULL;
1333 	}
1334 }
1335 
1336 
1337 static void
1338 nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1339 {
1340 	struct nfe_rx_data *data;
1341 	void *desc;
1342 	int i, descsize;
1343 
1344 	if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1345 		return;
1346 
1347 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1348 		desc = ring->jdesc64;
1349 		descsize = sizeof (struct nfe_desc64);
1350 	} else {
1351 		desc = ring->jdesc32;
1352 		descsize = sizeof (struct nfe_desc32);
1353 	}
1354 
1355 	for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1356 		data = &ring->jdata[i];
1357 		if (data->rx_data_map != NULL) {
1358 			bus_dmamap_destroy(ring->jrx_data_tag,
1359 			    data->rx_data_map);
1360 			data->rx_data_map = NULL;
1361 		}
1362 		if (data->m != NULL) {
1363 			m_freem(data->m);
1364 			data->m = NULL;
1365 		}
1366 	}
1367 	if (ring->jrx_data_tag != NULL) {
1368 		if (ring->jrx_spare_map != NULL) {
1369 			bus_dmamap_destroy(ring->jrx_data_tag,
1370 			    ring->jrx_spare_map);
1371 			ring->jrx_spare_map = NULL;
1372 		}
1373 		bus_dma_tag_destroy(ring->jrx_data_tag);
1374 		ring->jrx_data_tag = NULL;
1375 	}
1376 
1377 	if (desc != NULL) {
1378 		bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map);
1379 		bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map);
1380 		ring->jdesc64 = NULL;
1381 		ring->jdesc32 = NULL;
1382 		ring->jrx_desc_map = NULL;
1383 	}
1384 
1385 	if (ring->jrx_desc_tag != NULL) {
1386 		bus_dma_tag_destroy(ring->jrx_desc_tag);
1387 		ring->jrx_desc_tag = NULL;
1388 	}
1389 }
1390 
1391 
1392 static int
1393 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1394 {
1395 	struct nfe_dmamap_arg ctx;
1396 	int i, error;
1397 	void *desc;
1398 	int descsize;
1399 
1400 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1401 		desc = ring->desc64;
1402 		descsize = sizeof (struct nfe_desc64);
1403 	} else {
1404 		desc = ring->desc32;
1405 		descsize = sizeof (struct nfe_desc32);
1406 	}
1407 
1408 	ring->queued = 0;
1409 	ring->cur = ring->next = 0;
1410 
1411 	error = bus_dma_tag_create(sc->nfe_parent_tag,
1412 	    NFE_RING_ALIGN, 0,			/* alignment, boundary */
1413 	    BUS_SPACE_MAXADDR,			/* lowaddr */
1414 	    BUS_SPACE_MAXADDR,			/* highaddr */
1415 	    NULL, NULL,				/* filter, filterarg */
1416 	    NFE_TX_RING_COUNT * descsize, 1,	/* maxsize, nsegments */
1417 	    NFE_TX_RING_COUNT * descsize,	/* maxsegsize */
1418 	    0,					/* flags */
1419 	    NULL, NULL,				/* lockfunc, lockarg */
1420 	    &ring->tx_desc_tag);
1421 	if (error != 0) {
1422 		device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1423 		goto fail;
1424 	}
1425 
1426 	error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK |
1427 	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map);
1428 	if (error != 0) {
1429 		device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1430 		goto fail;
1431 	}
1432 	if (sc->nfe_flags & NFE_40BIT_ADDR)
1433 		ring->desc64 = desc;
1434 	else
1435 		ring->desc32 = desc;
1436 
1437 	ctx.nfe_busaddr = 0;
1438 	error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc,
1439 	    NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1440 	if (error != 0) {
1441 		device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1442 		goto fail;
1443 	}
1444 	ring->physaddr = ctx.nfe_busaddr;
1445 
1446 	error = bus_dma_tag_create(sc->nfe_parent_tag,
1447 	    1, 0,
1448 	    BUS_SPACE_MAXADDR,
1449 	    BUS_SPACE_MAXADDR,
1450 	    NULL, NULL,
1451 	    NFE_TSO_MAXSIZE,
1452 	    NFE_MAX_SCATTER,
1453 	    NFE_TSO_MAXSGSIZE,
1454 	    0,
1455 	    NULL, NULL,
1456 	    &ring->tx_data_tag);
1457 	if (error != 0) {
1458 		device_printf(sc->nfe_dev, "could not create Tx DMA tag\n");
1459 		goto fail;
1460 	}
1461 
1462 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1463 		error = bus_dmamap_create(ring->tx_data_tag, 0,
1464 		    &ring->data[i].tx_data_map);
1465 		if (error != 0) {
1466 			device_printf(sc->nfe_dev,
1467 			    "could not create Tx DMA map\n");
1468 			goto fail;
1469 		}
1470 	}
1471 
1472 fail:
1473 	return (error);
1474 }
1475 
1476 
1477 static void
1478 nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1479 {
1480 	void *desc;
1481 	size_t descsize;
1482 
1483 	sc->nfe_force_tx = 0;
1484 	ring->queued = 0;
1485 	ring->cur = ring->next = 0;
1486 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1487 		desc = ring->desc64;
1488 		descsize = sizeof (struct nfe_desc64);
1489 	} else {
1490 		desc = ring->desc32;
1491 		descsize = sizeof (struct nfe_desc32);
1492 	}
1493 	bzero(desc, descsize * NFE_TX_RING_COUNT);
1494 
1495 	bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1496 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1497 }
1498 
1499 
1500 static void
1501 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1502 {
1503 	struct nfe_tx_data *data;
1504 	void *desc;
1505 	int i, descsize;
1506 
1507 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1508 		desc = ring->desc64;
1509 		descsize = sizeof (struct nfe_desc64);
1510 	} else {
1511 		desc = ring->desc32;
1512 		descsize = sizeof (struct nfe_desc32);
1513 	}
1514 
1515 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1516 		data = &ring->data[i];
1517 
1518 		if (data->m != NULL) {
1519 			bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map,
1520 			    BUS_DMASYNC_POSTWRITE);
1521 			bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map);
1522 			m_freem(data->m);
1523 			data->m = NULL;
1524 		}
1525 		if (data->tx_data_map != NULL) {
1526 			bus_dmamap_destroy(ring->tx_data_tag,
1527 			    data->tx_data_map);
1528 			data->tx_data_map = NULL;
1529 		}
1530 	}
1531 
1532 	if (ring->tx_data_tag != NULL) {
1533 		bus_dma_tag_destroy(ring->tx_data_tag);
1534 		ring->tx_data_tag = NULL;
1535 	}
1536 
1537 	if (desc != NULL) {
1538 		bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1539 		    BUS_DMASYNC_POSTWRITE);
1540 		bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
1541 		bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
1542 		ring->desc64 = NULL;
1543 		ring->desc32 = NULL;
1544 		ring->tx_desc_map = NULL;
1545 		bus_dma_tag_destroy(ring->tx_desc_tag);
1546 		ring->tx_desc_tag = NULL;
1547 	}
1548 }
1549 
1550 #ifdef DEVICE_POLLING
1551 static poll_handler_t nfe_poll;
1552 
1553 
1554 static void
1555 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1556 {
1557 	struct nfe_softc *sc = ifp->if_softc;
1558 	uint32_t r;
1559 
1560 	NFE_LOCK(sc);
1561 
1562 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1563 		NFE_UNLOCK(sc);
1564 		return;
1565 	}
1566 
1567 	if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1568 		nfe_jrxeof(sc, count);
1569 	else
1570 		nfe_rxeof(sc, count);
1571 	nfe_txeof(sc);
1572 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1573 		taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task);
1574 
1575 	if (cmd == POLL_AND_CHECK_STATUS) {
1576 		if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1577 			NFE_UNLOCK(sc);
1578 			return;
1579 		}
1580 		NFE_WRITE(sc, sc->nfe_irq_status, r);
1581 
1582 		if (r & NFE_IRQ_LINK) {
1583 			NFE_READ(sc, NFE_PHY_STATUS);
1584 			NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1585 			DPRINTF(sc, "link state changed\n");
1586 		}
1587 	}
1588 	NFE_UNLOCK(sc);
1589 }
1590 #endif /* DEVICE_POLLING */
1591 
1592 static void
1593 nfe_set_intr(struct nfe_softc *sc)
1594 {
1595 
1596 	if (sc->nfe_msi != 0)
1597 		NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1598 }
1599 
1600 
1601 /* In MSIX, a write to mask reegisters behaves as XOR. */
1602 static __inline void
1603 nfe_enable_intr(struct nfe_softc *sc)
1604 {
1605 
1606 	if (sc->nfe_msix != 0) {
1607 		/* XXX Should have a better way to enable interrupts! */
1608 		if (NFE_READ(sc, sc->nfe_irq_mask) == 0)
1609 			NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1610 	} else
1611 		NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1612 }
1613 
1614 
1615 static __inline void
1616 nfe_disable_intr(struct nfe_softc *sc)
1617 {
1618 
1619 	if (sc->nfe_msix != 0) {
1620 		/* XXX Should have a better way to disable interrupts! */
1621 		if (NFE_READ(sc, sc->nfe_irq_mask) != 0)
1622 			NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1623 	} else
1624 		NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1625 }
1626 
1627 
1628 static int
1629 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1630 {
1631 	struct nfe_softc *sc;
1632 	struct ifreq *ifr;
1633 	struct mii_data *mii;
1634 	int error, init, mask;
1635 
1636 	sc = ifp->if_softc;
1637 	ifr = (struct ifreq *) data;
1638 	error = 0;
1639 	init = 0;
1640 	switch (cmd) {
1641 	case SIOCSIFMTU:
1642 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU)
1643 			error = EINVAL;
1644 		else if (ifp->if_mtu != ifr->ifr_mtu) {
1645 			if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) ||
1646 			    (sc->nfe_jumbo_disable != 0)) &&
1647 			    ifr->ifr_mtu > ETHERMTU)
1648 				error = EINVAL;
1649 			else {
1650 				NFE_LOCK(sc);
1651 				ifp->if_mtu = ifr->ifr_mtu;
1652 				if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1653 					nfe_init_locked(sc);
1654 				NFE_UNLOCK(sc);
1655 			}
1656 		}
1657 		break;
1658 	case SIOCSIFFLAGS:
1659 		NFE_LOCK(sc);
1660 		if (ifp->if_flags & IFF_UP) {
1661 			/*
1662 			 * If only the PROMISC or ALLMULTI flag changes, then
1663 			 * don't do a full re-init of the chip, just update
1664 			 * the Rx filter.
1665 			 */
1666 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1667 			    ((ifp->if_flags ^ sc->nfe_if_flags) &
1668 			     (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1669 				nfe_setmulti(sc);
1670 			else
1671 				nfe_init_locked(sc);
1672 		} else {
1673 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1674 				nfe_stop(ifp);
1675 		}
1676 		sc->nfe_if_flags = ifp->if_flags;
1677 		NFE_UNLOCK(sc);
1678 		error = 0;
1679 		break;
1680 	case SIOCADDMULTI:
1681 	case SIOCDELMULTI:
1682 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1683 			NFE_LOCK(sc);
1684 			nfe_setmulti(sc);
1685 			NFE_UNLOCK(sc);
1686 			error = 0;
1687 		}
1688 		break;
1689 	case SIOCSIFMEDIA:
1690 	case SIOCGIFMEDIA:
1691 		mii = device_get_softc(sc->nfe_miibus);
1692 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1693 		break;
1694 	case SIOCSIFCAP:
1695 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1696 #ifdef DEVICE_POLLING
1697 		if ((mask & IFCAP_POLLING) != 0) {
1698 			if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1699 				error = ether_poll_register(nfe_poll, ifp);
1700 				if (error)
1701 					break;
1702 				NFE_LOCK(sc);
1703 				nfe_disable_intr(sc);
1704 				ifp->if_capenable |= IFCAP_POLLING;
1705 				NFE_UNLOCK(sc);
1706 			} else {
1707 				error = ether_poll_deregister(ifp);
1708 				/* Enable interrupt even in error case */
1709 				NFE_LOCK(sc);
1710 				nfe_enable_intr(sc);
1711 				ifp->if_capenable &= ~IFCAP_POLLING;
1712 				NFE_UNLOCK(sc);
1713 			}
1714 		}
1715 #endif /* DEVICE_POLLING */
1716 		if ((sc->nfe_flags & NFE_HW_CSUM) != 0 &&
1717 		    (mask & IFCAP_HWCSUM) != 0) {
1718 			ifp->if_capenable ^= IFCAP_HWCSUM;
1719 			if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 &&
1720 			    (IFCAP_TXCSUM & ifp->if_capabilities) != 0)
1721 				ifp->if_hwassist |= NFE_CSUM_FEATURES;
1722 			else
1723 				ifp->if_hwassist &= ~NFE_CSUM_FEATURES;
1724 			init++;
1725 		}
1726 		if ((sc->nfe_flags & NFE_HW_VLAN) != 0 &&
1727 		    (mask & IFCAP_VLAN_HWTAGGING) != 0) {
1728 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1729 			init++;
1730 		}
1731 		/*
1732 		 * XXX
1733 		 * It seems that VLAN stripping requires Rx checksum offload.
1734 		 * Unfortunately FreeBSD has no way to disable only Rx side
1735 		 * VLAN stripping. So when we know Rx checksum offload is
1736 		 * disabled turn entire hardware VLAN assist off.
1737 		 */
1738 		if ((sc->nfe_flags & (NFE_HW_CSUM | NFE_HW_VLAN)) ==
1739 		    (NFE_HW_CSUM | NFE_HW_VLAN)) {
1740 			if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
1741 				ifp->if_capenable &= ~IFCAP_VLAN_HWTAGGING;
1742 		}
1743 
1744 		if ((sc->nfe_flags & NFE_HW_CSUM) != 0 &&
1745 		    (mask & IFCAP_TSO4) != 0) {
1746 			ifp->if_capenable ^= IFCAP_TSO4;
1747 			if ((IFCAP_TSO4 & ifp->if_capenable) != 0 &&
1748 			    (IFCAP_TSO4 & ifp->if_capabilities) != 0)
1749 				ifp->if_hwassist |= CSUM_TSO;
1750 			else
1751 				ifp->if_hwassist &= ~CSUM_TSO;
1752 		}
1753 
1754 		if (init > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1755 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1756 			nfe_init(sc);
1757 		}
1758 		if ((sc->nfe_flags & NFE_HW_VLAN) != 0)
1759 			VLAN_CAPABILITIES(ifp);
1760 		break;
1761 	default:
1762 		error = ether_ioctl(ifp, cmd, data);
1763 		break;
1764 	}
1765 
1766 	return (error);
1767 }
1768 
1769 
1770 static int
1771 nfe_intr(void *arg)
1772 {
1773 	struct nfe_softc *sc;
1774 	uint32_t status;
1775 
1776 	sc = (struct nfe_softc *)arg;
1777 
1778 	status = NFE_READ(sc, sc->nfe_irq_status);
1779 	if (status == 0 || status == 0xffffffff)
1780 		return (FILTER_STRAY);
1781 	nfe_disable_intr(sc);
1782 	taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1783 
1784 	return (FILTER_HANDLED);
1785 }
1786 
1787 
1788 static void
1789 nfe_int_task(void *arg, int pending)
1790 {
1791 	struct nfe_softc *sc = arg;
1792 	struct ifnet *ifp = sc->nfe_ifp;
1793 	uint32_t r;
1794 	int domore;
1795 
1796 	NFE_LOCK(sc);
1797 
1798 	if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1799 		nfe_enable_intr(sc);
1800 		NFE_UNLOCK(sc);
1801 		return;	/* not for us */
1802 	}
1803 	NFE_WRITE(sc, sc->nfe_irq_status, r);
1804 
1805 	DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r);
1806 
1807 #ifdef DEVICE_POLLING
1808 	if (ifp->if_capenable & IFCAP_POLLING) {
1809 		NFE_UNLOCK(sc);
1810 		return;
1811 	}
1812 #endif
1813 
1814 	if (r & NFE_IRQ_LINK) {
1815 		NFE_READ(sc, NFE_PHY_STATUS);
1816 		NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1817 		DPRINTF(sc, "link state changed\n");
1818 	}
1819 
1820 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1821 		NFE_UNLOCK(sc);
1822 		nfe_enable_intr(sc);
1823 		return;
1824 	}
1825 
1826 	domore = 0;
1827 	/* check Rx ring */
1828 	if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1829 		domore = nfe_jrxeof(sc, sc->nfe_process_limit);
1830 	else
1831 		domore = nfe_rxeof(sc, sc->nfe_process_limit);
1832 	/* check Tx ring */
1833 	nfe_txeof(sc);
1834 
1835 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1836 		taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task);
1837 
1838 	NFE_UNLOCK(sc);
1839 
1840 	if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) {
1841 		taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1842 		return;
1843 	}
1844 
1845 	/* Reenable interrupts. */
1846 	nfe_enable_intr(sc);
1847 }
1848 
1849 
1850 static __inline void
1851 nfe_discard_rxbuf(struct nfe_softc *sc, int idx)
1852 {
1853 	struct nfe_desc32 *desc32;
1854 	struct nfe_desc64 *desc64;
1855 	struct nfe_rx_data *data;
1856 	struct mbuf *m;
1857 
1858 	data = &sc->rxq.data[idx];
1859 	m = data->m;
1860 
1861 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1862 		desc64 = &sc->rxq.desc64[idx];
1863 		/* VLAN packet may have overwritten it. */
1864 		desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1865 		desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1866 		desc64->length = htole16(m->m_len);
1867 		desc64->flags = htole16(NFE_RX_READY);
1868 	} else {
1869 		desc32 = &sc->rxq.desc32[idx];
1870 		desc32->length = htole16(m->m_len);
1871 		desc32->flags = htole16(NFE_RX_READY);
1872 	}
1873 }
1874 
1875 
1876 static __inline void
1877 nfe_discard_jrxbuf(struct nfe_softc *sc, int idx)
1878 {
1879 	struct nfe_desc32 *desc32;
1880 	struct nfe_desc64 *desc64;
1881 	struct nfe_rx_data *data;
1882 	struct mbuf *m;
1883 
1884 	data = &sc->jrxq.jdata[idx];
1885 	m = data->m;
1886 
1887 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1888 		desc64 = &sc->jrxq.jdesc64[idx];
1889 		/* VLAN packet may have overwritten it. */
1890 		desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1891 		desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1892 		desc64->length = htole16(m->m_len);
1893 		desc64->flags = htole16(NFE_RX_READY);
1894 	} else {
1895 		desc32 = &sc->jrxq.jdesc32[idx];
1896 		desc32->length = htole16(m->m_len);
1897 		desc32->flags = htole16(NFE_RX_READY);
1898 	}
1899 }
1900 
1901 
1902 static int
1903 nfe_newbuf(struct nfe_softc *sc, int idx)
1904 {
1905 	struct nfe_rx_data *data;
1906 	struct nfe_desc32 *desc32;
1907 	struct nfe_desc64 *desc64;
1908 	struct mbuf *m;
1909 	bus_dma_segment_t segs[1];
1910 	bus_dmamap_t map;
1911 	int nsegs;
1912 
1913 	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1914 	if (m == NULL)
1915 		return (ENOBUFS);
1916 
1917 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1918 	m_adj(m, ETHER_ALIGN);
1919 
1920 	if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map,
1921 	    m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
1922 		m_freem(m);
1923 		return (ENOBUFS);
1924 	}
1925 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1926 
1927 	data = &sc->rxq.data[idx];
1928 	if (data->m != NULL) {
1929 		bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
1930 		    BUS_DMASYNC_POSTREAD);
1931 		bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map);
1932 	}
1933 	map = data->rx_data_map;
1934 	data->rx_data_map = sc->rxq.rx_spare_map;
1935 	sc->rxq.rx_spare_map = map;
1936 	bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
1937 	    BUS_DMASYNC_PREREAD);
1938 	data->paddr = segs[0].ds_addr;
1939 	data->m = m;
1940 	/* update mapping address in h/w descriptor */
1941 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1942 		desc64 = &sc->rxq.desc64[idx];
1943 		desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
1944 		desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
1945 		desc64->length = htole16(segs[0].ds_len);
1946 		desc64->flags = htole16(NFE_RX_READY);
1947 	} else {
1948 		desc32 = &sc->rxq.desc32[idx];
1949 		desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
1950 		desc32->length = htole16(segs[0].ds_len);
1951 		desc32->flags = htole16(NFE_RX_READY);
1952 	}
1953 
1954 	return (0);
1955 }
1956 
1957 
1958 static int
1959 nfe_jnewbuf(struct nfe_softc *sc, int idx)
1960 {
1961 	struct nfe_rx_data *data;
1962 	struct nfe_desc32 *desc32;
1963 	struct nfe_desc64 *desc64;
1964 	struct mbuf *m;
1965 	bus_dma_segment_t segs[1];
1966 	bus_dmamap_t map;
1967 	int nsegs;
1968 
1969 	m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
1970 	if (m == NULL)
1971 		return (ENOBUFS);
1972 	if ((m->m_flags & M_EXT) == 0) {
1973 		m_freem(m);
1974 		return (ENOBUFS);
1975 	}
1976 	m->m_pkthdr.len = m->m_len = MJUM9BYTES;
1977 	m_adj(m, ETHER_ALIGN);
1978 
1979 	if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag,
1980 	    sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
1981 		m_freem(m);
1982 		return (ENOBUFS);
1983 	}
1984 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1985 
1986 	data = &sc->jrxq.jdata[idx];
1987 	if (data->m != NULL) {
1988 		bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
1989 		    BUS_DMASYNC_POSTREAD);
1990 		bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map);
1991 	}
1992 	map = data->rx_data_map;
1993 	data->rx_data_map = sc->jrxq.jrx_spare_map;
1994 	sc->jrxq.jrx_spare_map = map;
1995 	bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
1996 	    BUS_DMASYNC_PREREAD);
1997 	data->paddr = segs[0].ds_addr;
1998 	data->m = m;
1999 	/* update mapping address in h/w descriptor */
2000 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
2001 		desc64 = &sc->jrxq.jdesc64[idx];
2002 		desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2003 		desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2004 		desc64->length = htole16(segs[0].ds_len);
2005 		desc64->flags = htole16(NFE_RX_READY);
2006 	} else {
2007 		desc32 = &sc->jrxq.jdesc32[idx];
2008 		desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2009 		desc32->length = htole16(segs[0].ds_len);
2010 		desc32->flags = htole16(NFE_RX_READY);
2011 	}
2012 
2013 	return (0);
2014 }
2015 
2016 
2017 static int
2018 nfe_rxeof(struct nfe_softc *sc, int count)
2019 {
2020 	struct ifnet *ifp = sc->nfe_ifp;
2021 	struct nfe_desc32 *desc32;
2022 	struct nfe_desc64 *desc64;
2023 	struct nfe_rx_data *data;
2024 	struct mbuf *m;
2025 	uint16_t flags;
2026 	int len, prog;
2027 	uint32_t vtag = 0;
2028 
2029 	NFE_LOCK_ASSERT(sc);
2030 
2031 	bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2032 	    BUS_DMASYNC_POSTREAD);
2033 
2034 	for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) {
2035 		if (count <= 0)
2036 			break;
2037 		count--;
2038 
2039 		data = &sc->rxq.data[sc->rxq.cur];
2040 
2041 		if (sc->nfe_flags & NFE_40BIT_ADDR) {
2042 			desc64 = &sc->rxq.desc64[sc->rxq.cur];
2043 			vtag = le32toh(desc64->physaddr[1]);
2044 			flags = le16toh(desc64->flags);
2045 			len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2046 		} else {
2047 			desc32 = &sc->rxq.desc32[sc->rxq.cur];
2048 			flags = le16toh(desc32->flags);
2049 			len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2050 		}
2051 
2052 		if (flags & NFE_RX_READY)
2053 			break;
2054 		prog++;
2055 		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2056 			if (!(flags & NFE_RX_VALID_V1)) {
2057 				ifp->if_ierrors++;
2058 				nfe_discard_rxbuf(sc, sc->rxq.cur);
2059 				continue;
2060 			}
2061 			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2062 				flags &= ~NFE_RX_ERROR;
2063 				len--;	/* fix buffer length */
2064 			}
2065 		} else {
2066 			if (!(flags & NFE_RX_VALID_V2)) {
2067 				ifp->if_ierrors++;
2068 				nfe_discard_rxbuf(sc, sc->rxq.cur);
2069 				continue;
2070 			}
2071 
2072 			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2073 				flags &= ~NFE_RX_ERROR;
2074 				len--;	/* fix buffer length */
2075 			}
2076 		}
2077 
2078 		if (flags & NFE_RX_ERROR) {
2079 			ifp->if_ierrors++;
2080 			nfe_discard_rxbuf(sc, sc->rxq.cur);
2081 			continue;
2082 		}
2083 
2084 		m = data->m;
2085 		if (nfe_newbuf(sc, sc->rxq.cur) != 0) {
2086 			ifp->if_iqdrops++;
2087 			nfe_discard_rxbuf(sc, sc->rxq.cur);
2088 			continue;
2089 		}
2090 
2091 		if ((vtag & NFE_RX_VTAG) != 0 &&
2092 		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2093 			m->m_pkthdr.ether_vtag = vtag & 0xffff;
2094 			m->m_flags |= M_VLANTAG;
2095 		}
2096 
2097 		m->m_pkthdr.len = m->m_len = len;
2098 		m->m_pkthdr.rcvif = ifp;
2099 
2100 		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2101 			if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2102 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2103 				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2104 				if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2105 				    (flags & NFE_RX_UDP_CSUMOK) != 0) {
2106 					m->m_pkthdr.csum_flags |=
2107 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2108 					m->m_pkthdr.csum_data = 0xffff;
2109 				}
2110 			}
2111 		}
2112 
2113 		ifp->if_ipackets++;
2114 
2115 		NFE_UNLOCK(sc);
2116 		(*ifp->if_input)(ifp, m);
2117 		NFE_LOCK(sc);
2118 	}
2119 
2120 	if (prog > 0)
2121 		bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2122 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2123 
2124 	return (count > 0 ? 0 : EAGAIN);
2125 }
2126 
2127 
2128 static int
2129 nfe_jrxeof(struct nfe_softc *sc, int count)
2130 {
2131 	struct ifnet *ifp = sc->nfe_ifp;
2132 	struct nfe_desc32 *desc32;
2133 	struct nfe_desc64 *desc64;
2134 	struct nfe_rx_data *data;
2135 	struct mbuf *m;
2136 	uint16_t flags;
2137 	int len, prog;
2138 	uint32_t vtag = 0;
2139 
2140 	NFE_LOCK_ASSERT(sc);
2141 
2142 	bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2143 	    BUS_DMASYNC_POSTREAD);
2144 
2145 	for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT),
2146 	    vtag = 0) {
2147 		if (count <= 0)
2148 			break;
2149 		count--;
2150 
2151 		data = &sc->jrxq.jdata[sc->jrxq.jcur];
2152 
2153 		if (sc->nfe_flags & NFE_40BIT_ADDR) {
2154 			desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur];
2155 			vtag = le32toh(desc64->physaddr[1]);
2156 			flags = le16toh(desc64->flags);
2157 			len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2158 		} else {
2159 			desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur];
2160 			flags = le16toh(desc32->flags);
2161 			len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2162 		}
2163 
2164 		if (flags & NFE_RX_READY)
2165 			break;
2166 		prog++;
2167 		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2168 			if (!(flags & NFE_RX_VALID_V1)) {
2169 				ifp->if_ierrors++;
2170 				nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2171 				continue;
2172 			}
2173 			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2174 				flags &= ~NFE_RX_ERROR;
2175 				len--;	/* fix buffer length */
2176 			}
2177 		} else {
2178 			if (!(flags & NFE_RX_VALID_V2)) {
2179 				ifp->if_ierrors++;
2180 				nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2181 				continue;
2182 			}
2183 
2184 			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2185 				flags &= ~NFE_RX_ERROR;
2186 				len--;	/* fix buffer length */
2187 			}
2188 		}
2189 
2190 		if (flags & NFE_RX_ERROR) {
2191 			ifp->if_ierrors++;
2192 			nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2193 			continue;
2194 		}
2195 
2196 		m = data->m;
2197 		if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) {
2198 			ifp->if_iqdrops++;
2199 			nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2200 			continue;
2201 		}
2202 
2203 		if ((vtag & NFE_RX_VTAG) != 0 &&
2204 		    (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2205 			m->m_pkthdr.ether_vtag = vtag & 0xffff;
2206 			m->m_flags |= M_VLANTAG;
2207 		}
2208 
2209 		m->m_pkthdr.len = m->m_len = len;
2210 		m->m_pkthdr.rcvif = ifp;
2211 
2212 		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2213 			if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2214 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2215 				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2216 				if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2217 				    (flags & NFE_RX_UDP_CSUMOK) != 0) {
2218 					m->m_pkthdr.csum_flags |=
2219 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2220 					m->m_pkthdr.csum_data = 0xffff;
2221 				}
2222 			}
2223 		}
2224 
2225 		ifp->if_ipackets++;
2226 
2227 		NFE_UNLOCK(sc);
2228 		(*ifp->if_input)(ifp, m);
2229 		NFE_LOCK(sc);
2230 	}
2231 
2232 	if (prog > 0)
2233 		bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2234 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2235 
2236 	return (count > 0 ? 0 : EAGAIN);
2237 }
2238 
2239 
2240 static void
2241 nfe_txeof(struct nfe_softc *sc)
2242 {
2243 	struct ifnet *ifp = sc->nfe_ifp;
2244 	struct nfe_desc32 *desc32;
2245 	struct nfe_desc64 *desc64;
2246 	struct nfe_tx_data *data = NULL;
2247 	uint16_t flags;
2248 	int cons, prog;
2249 
2250 	NFE_LOCK_ASSERT(sc);
2251 
2252 	bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2253 	    BUS_DMASYNC_POSTREAD);
2254 
2255 	prog = 0;
2256 	for (cons = sc->txq.next; cons != sc->txq.cur;
2257 	    NFE_INC(cons, NFE_TX_RING_COUNT)) {
2258 		if (sc->nfe_flags & NFE_40BIT_ADDR) {
2259 			desc64 = &sc->txq.desc64[cons];
2260 			flags = le16toh(desc64->flags);
2261 		} else {
2262 			desc32 = &sc->txq.desc32[cons];
2263 			flags = le16toh(desc32->flags);
2264 		}
2265 
2266 		if (flags & NFE_TX_VALID)
2267 			break;
2268 
2269 		prog++;
2270 		sc->txq.queued--;
2271 		data = &sc->txq.data[cons];
2272 
2273 		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2274 			if ((flags & NFE_TX_LASTFRAG_V1) == 0)
2275 				continue;
2276 			if ((flags & NFE_TX_ERROR_V1) != 0) {
2277 				device_printf(sc->nfe_dev,
2278 				    "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR);
2279 
2280 				ifp->if_oerrors++;
2281 			} else
2282 				ifp->if_opackets++;
2283 		} else {
2284 			if ((flags & NFE_TX_LASTFRAG_V2) == 0)
2285 				continue;
2286 			if ((flags & NFE_TX_ERROR_V2) != 0) {
2287 				device_printf(sc->nfe_dev,
2288 				    "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR);
2289 				ifp->if_oerrors++;
2290 			} else
2291 				ifp->if_opackets++;
2292 		}
2293 
2294 		/* last fragment of the mbuf chain transmitted */
2295 		KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__));
2296 		bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map,
2297 		    BUS_DMASYNC_POSTWRITE);
2298 		bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map);
2299 		m_freem(data->m);
2300 		data->m = NULL;
2301 	}
2302 
2303 	if (prog > 0) {
2304 		sc->nfe_force_tx = 0;
2305 		sc->txq.next = cons;
2306 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2307 		if (sc->txq.queued == 0)
2308 			sc->nfe_watchdog_timer = 0;
2309 	}
2310 }
2311 
2312 static int
2313 nfe_encap(struct nfe_softc *sc, struct mbuf **m_head)
2314 {
2315 	struct nfe_desc32 *desc32 = NULL;
2316 	struct nfe_desc64 *desc64 = NULL;
2317 	bus_dmamap_t map;
2318 	bus_dma_segment_t segs[NFE_MAX_SCATTER];
2319 	int error, i, nsegs, prod, si;
2320 	uint32_t tso_segsz;
2321 	uint16_t cflags, flags;
2322 	struct mbuf *m;
2323 
2324 	prod = si = sc->txq.cur;
2325 	map = sc->txq.data[prod].tx_data_map;
2326 
2327 	error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs,
2328 	    &nsegs, BUS_DMA_NOWAIT);
2329 	if (error == EFBIG) {
2330 		m = m_collapse(*m_head, M_DONTWAIT, NFE_MAX_SCATTER);
2331 		if (m == NULL) {
2332 			m_freem(*m_head);
2333 			*m_head = NULL;
2334 			return (ENOBUFS);
2335 		}
2336 		*m_head = m;
2337 		error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map,
2338 		    *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2339 		if (error != 0) {
2340 			m_freem(*m_head);
2341 			*m_head = NULL;
2342 			return (ENOBUFS);
2343 		}
2344 	} else if (error != 0)
2345 		return (error);
2346 	if (nsegs == 0) {
2347 		m_freem(*m_head);
2348 		*m_head = NULL;
2349 		return (EIO);
2350 	}
2351 
2352 	if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) {
2353 		bus_dmamap_unload(sc->txq.tx_data_tag, map);
2354 		return (ENOBUFS);
2355 	}
2356 
2357 	m = *m_head;
2358 	cflags = flags = 0;
2359 	tso_segsz = 0;
2360 	if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) {
2361 		if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2362 			cflags |= NFE_TX_IP_CSUM;
2363 		if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2364 			cflags |= NFE_TX_TCP_UDP_CSUM;
2365 		if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2366 			cflags |= NFE_TX_TCP_UDP_CSUM;
2367 	}
2368 	if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2369 		tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz <<
2370 		    NFE_TX_TSO_SHIFT;
2371 		cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
2372 		cflags |= NFE_TX_TSO;
2373 	}
2374 
2375 	for (i = 0; i < nsegs; i++) {
2376 		if (sc->nfe_flags & NFE_40BIT_ADDR) {
2377 			desc64 = &sc->txq.desc64[prod];
2378 			desc64->physaddr[0] =
2379 			    htole32(NFE_ADDR_HI(segs[i].ds_addr));
2380 			desc64->physaddr[1] =
2381 			    htole32(NFE_ADDR_LO(segs[i].ds_addr));
2382 			desc64->vtag = 0;
2383 			desc64->length = htole16(segs[i].ds_len - 1);
2384 			desc64->flags = htole16(flags);
2385 		} else {
2386 			desc32 = &sc->txq.desc32[prod];
2387 			desc32->physaddr =
2388 			    htole32(NFE_ADDR_LO(segs[i].ds_addr));
2389 			desc32->length = htole16(segs[i].ds_len - 1);
2390 			desc32->flags = htole16(flags);
2391 		}
2392 
2393 		/*
2394 		 * Setting of the valid bit in the first descriptor is
2395 		 * deferred until the whole chain is fully setup.
2396 		 */
2397 		flags |= NFE_TX_VALID;
2398 
2399 		sc->txq.queued++;
2400 		NFE_INC(prod, NFE_TX_RING_COUNT);
2401 	}
2402 
2403 	/*
2404 	 * the whole mbuf chain has been DMA mapped, fix last/first descriptor.
2405 	 * csum flags, vtag and TSO belong to the first fragment only.
2406 	 */
2407 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
2408 		desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
2409 		desc64 = &sc->txq.desc64[si];
2410 		if ((m->m_flags & M_VLANTAG) != 0)
2411 			desc64->vtag = htole32(NFE_TX_VTAG |
2412 			    m->m_pkthdr.ether_vtag);
2413 		if (tso_segsz != 0) {
2414 			/*
2415 			 * XXX
2416 			 * The following indicates the descriptor element
2417 			 * is a 32bit quantity.
2418 			 */
2419 			desc64->length |= htole16((uint16_t)tso_segsz);
2420 			desc64->flags |= htole16(tso_segsz >> 16);
2421 		}
2422 		/*
2423 		 * finally, set the valid/checksum/TSO bit in the first
2424 		 * descriptor.
2425 		 */
2426 		desc64->flags |= htole16(NFE_TX_VALID | cflags);
2427 	} else {
2428 		if (sc->nfe_flags & NFE_JUMBO_SUP)
2429 			desc32->flags |= htole16(NFE_TX_LASTFRAG_V2);
2430 		else
2431 			desc32->flags |= htole16(NFE_TX_LASTFRAG_V1);
2432 		desc32 = &sc->txq.desc32[si];
2433 		if (tso_segsz != 0) {
2434 			/*
2435 			 * XXX
2436 			 * The following indicates the descriptor element
2437 			 * is a 32bit quantity.
2438 			 */
2439 			desc32->length |= htole16((uint16_t)tso_segsz);
2440 			desc32->flags |= htole16(tso_segsz >> 16);
2441 		}
2442 		/*
2443 		 * finally, set the valid/checksum/TSO bit in the first
2444 		 * descriptor.
2445 		 */
2446 		desc32->flags |= htole16(NFE_TX_VALID | cflags);
2447 	}
2448 
2449 	sc->txq.cur = prod;
2450 	prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT;
2451 	sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map;
2452 	sc->txq.data[prod].tx_data_map = map;
2453 	sc->txq.data[prod].m = m;
2454 
2455 	bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
2456 
2457 	return (0);
2458 }
2459 
2460 
2461 static void
2462 nfe_setmulti(struct nfe_softc *sc)
2463 {
2464 	struct ifnet *ifp = sc->nfe_ifp;
2465 	struct ifmultiaddr *ifma;
2466 	int i;
2467 	uint32_t filter;
2468 	uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
2469 	uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = {
2470 		0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2471 	};
2472 
2473 	NFE_LOCK_ASSERT(sc);
2474 
2475 	if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
2476 		bzero(addr, ETHER_ADDR_LEN);
2477 		bzero(mask, ETHER_ADDR_LEN);
2478 		goto done;
2479 	}
2480 
2481 	bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
2482 	bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
2483 
2484 	IF_ADDR_LOCK(ifp);
2485 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2486 		u_char *addrp;
2487 
2488 		if (ifma->ifma_addr->sa_family != AF_LINK)
2489 			continue;
2490 
2491 		addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2492 		for (i = 0; i < ETHER_ADDR_LEN; i++) {
2493 			u_int8_t mcaddr = addrp[i];
2494 			addr[i] &= mcaddr;
2495 			mask[i] &= ~mcaddr;
2496 		}
2497 	}
2498 	IF_ADDR_UNLOCK(ifp);
2499 
2500 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
2501 		mask[i] |= addr[i];
2502 	}
2503 
2504 done:
2505 	addr[0] |= 0x01;	/* make sure multicast bit is set */
2506 
2507 	NFE_WRITE(sc, NFE_MULTIADDR_HI,
2508 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2509 	NFE_WRITE(sc, NFE_MULTIADDR_LO,
2510 	    addr[5] <<  8 | addr[4]);
2511 	NFE_WRITE(sc, NFE_MULTIMASK_HI,
2512 	    mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
2513 	NFE_WRITE(sc, NFE_MULTIMASK_LO,
2514 	    mask[5] <<  8 | mask[4]);
2515 
2516 	filter = NFE_READ(sc, NFE_RXFILTER);
2517 	filter &= NFE_PFF_RX_PAUSE;
2518 	filter |= NFE_RXFILTER_MAGIC;
2519 	filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M;
2520 	NFE_WRITE(sc, NFE_RXFILTER, filter);
2521 }
2522 
2523 
2524 static void
2525 nfe_tx_task(void *arg, int pending)
2526 {
2527 	struct ifnet *ifp;
2528 
2529 	ifp = (struct ifnet *)arg;
2530 	nfe_start(ifp);
2531 }
2532 
2533 
2534 static void
2535 nfe_start(struct ifnet *ifp)
2536 {
2537 	struct nfe_softc *sc = ifp->if_softc;
2538 	struct mbuf *m0;
2539 	int enq;
2540 
2541 	NFE_LOCK(sc);
2542 
2543 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2544 	    IFF_DRV_RUNNING || sc->nfe_link == 0) {
2545 		NFE_UNLOCK(sc);
2546 		return;
2547 	}
2548 
2549 	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
2550 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
2551 		if (m0 == NULL)
2552 			break;
2553 
2554 		if (nfe_encap(sc, &m0) != 0) {
2555 			if (m0 == NULL)
2556 				break;
2557 			IFQ_DRV_PREPEND(&ifp->if_snd, m0);
2558 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2559 			break;
2560 		}
2561 		enq++;
2562 		ETHER_BPF_MTAP(ifp, m0);
2563 	}
2564 
2565 	if (enq > 0) {
2566 		bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2567 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2568 
2569 		/* kick Tx */
2570 		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2571 
2572 		/*
2573 		 * Set a timeout in case the chip goes out to lunch.
2574 		 */
2575 		sc->nfe_watchdog_timer = 5;
2576 	}
2577 
2578 	NFE_UNLOCK(sc);
2579 }
2580 
2581 
2582 static void
2583 nfe_watchdog(struct ifnet *ifp)
2584 {
2585 	struct nfe_softc *sc = ifp->if_softc;
2586 
2587 	if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer)
2588 		return;
2589 
2590 	/* Check if we've lost Tx completion interrupt. */
2591 	nfe_txeof(sc);
2592 	if (sc->txq.queued == 0) {
2593 		if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2594 		    "-- recovering\n");
2595 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2596 			taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task);
2597 		return;
2598 	}
2599 	/* Check if we've lost start Tx command. */
2600 	sc->nfe_force_tx++;
2601 	if (sc->nfe_force_tx <= 3) {
2602 		/*
2603 		 * If this is the case for watchdog timeout, the following
2604 		 * code should go to nfe_txeof().
2605 		 */
2606 		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2607 		return;
2608 	}
2609 	sc->nfe_force_tx = 0;
2610 
2611 	if_printf(ifp, "watchdog timeout\n");
2612 
2613 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2614 	ifp->if_oerrors++;
2615 	nfe_init_locked(sc);
2616 }
2617 
2618 
2619 static void
2620 nfe_init(void *xsc)
2621 {
2622 	struct nfe_softc *sc = xsc;
2623 
2624 	NFE_LOCK(sc);
2625 	nfe_init_locked(sc);
2626 	NFE_UNLOCK(sc);
2627 }
2628 
2629 
2630 static void
2631 nfe_init_locked(void *xsc)
2632 {
2633 	struct nfe_softc *sc = xsc;
2634 	struct ifnet *ifp = sc->nfe_ifp;
2635 	struct mii_data *mii;
2636 	uint32_t val;
2637 	int error;
2638 
2639 	NFE_LOCK_ASSERT(sc);
2640 
2641 	mii = device_get_softc(sc->nfe_miibus);
2642 
2643 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2644 		return;
2645 
2646 	nfe_stop(ifp);
2647 
2648 	sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
2649 
2650 	nfe_init_tx_ring(sc, &sc->txq);
2651 	if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN))
2652 		error = nfe_init_jrx_ring(sc, &sc->jrxq);
2653 	else
2654 		error = nfe_init_rx_ring(sc, &sc->rxq);
2655 	if (error != 0) {
2656 		device_printf(sc->nfe_dev,
2657 		    "initialization failed: no memory for rx buffers\n");
2658 		nfe_stop(ifp);
2659 		return;
2660 	}
2661 
2662 	val = 0;
2663 	if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0)
2664 		val |= NFE_MAC_ADDR_INORDER;
2665 	NFE_WRITE(sc, NFE_TX_UNK, val);
2666 	NFE_WRITE(sc, NFE_STATUS, 0);
2667 
2668 	if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0)
2669 		NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE);
2670 
2671 	sc->rxtxctl = NFE_RXTX_BIT2;
2672 	if (sc->nfe_flags & NFE_40BIT_ADDR)
2673 		sc->rxtxctl |= NFE_RXTX_V3MAGIC;
2674 	else if (sc->nfe_flags & NFE_JUMBO_SUP)
2675 		sc->rxtxctl |= NFE_RXTX_V2MAGIC;
2676 
2677 	if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2678 		sc->rxtxctl |= NFE_RXTX_RXCSUM;
2679 	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2680 		sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
2681 
2682 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
2683 	DELAY(10);
2684 	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2685 
2686 	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2687 		NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
2688 	else
2689 		NFE_WRITE(sc, NFE_VTAG_CTL, 0);
2690 
2691 	NFE_WRITE(sc, NFE_SETUP_R6, 0);
2692 
2693 	/* set MAC address */
2694 	nfe_set_macaddr(sc, IF_LLADDR(ifp));
2695 
2696 	/* tell MAC where rings are in memory */
2697 	if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) {
2698 		NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2699 		    NFE_ADDR_HI(sc->jrxq.jphysaddr));
2700 		NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2701 		    NFE_ADDR_LO(sc->jrxq.jphysaddr));
2702 	} else {
2703 		NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2704 		    NFE_ADDR_HI(sc->rxq.physaddr));
2705 		NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2706 		    NFE_ADDR_LO(sc->rxq.physaddr));
2707 	}
2708 	NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr));
2709 	NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr));
2710 
2711 	NFE_WRITE(sc, NFE_RING_SIZE,
2712 	    (NFE_RX_RING_COUNT - 1) << 16 |
2713 	    (NFE_TX_RING_COUNT - 1));
2714 
2715 	NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize);
2716 
2717 	/* force MAC to wakeup */
2718 	val = NFE_READ(sc, NFE_PWR_STATE);
2719 	if ((val & NFE_PWR_WAKEUP) == 0)
2720 		NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP);
2721 	DELAY(10);
2722 	val = NFE_READ(sc, NFE_PWR_STATE);
2723 	NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID);
2724 
2725 #if 1
2726 	/* configure interrupts coalescing/mitigation */
2727 	NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
2728 #else
2729 	/* no interrupt mitigation: one interrupt per packet */
2730 	NFE_WRITE(sc, NFE_IMTIMER, 970);
2731 #endif
2732 
2733 	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100);
2734 	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
2735 	NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
2736 
2737 	/* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
2738 	NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
2739 
2740 	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
2741 	NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
2742 
2743 	sc->rxtxctl &= ~NFE_RXTX_BIT2;
2744 	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2745 	DELAY(10);
2746 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
2747 
2748 	/* set Rx filter */
2749 	nfe_setmulti(sc);
2750 
2751 	/* enable Rx */
2752 	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
2753 
2754 	/* enable Tx */
2755 	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
2756 
2757 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
2758 
2759 	/* Clear hardware stats. */
2760 	nfe_stats_clear(sc);
2761 
2762 #ifdef DEVICE_POLLING
2763 	if (ifp->if_capenable & IFCAP_POLLING)
2764 		nfe_disable_intr(sc);
2765 	else
2766 #endif
2767 	nfe_set_intr(sc);
2768 	nfe_enable_intr(sc); /* enable interrupts */
2769 
2770 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2771 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2772 
2773 	sc->nfe_link = 0;
2774 	mii_mediachg(mii);
2775 
2776 	callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2777 }
2778 
2779 
2780 static void
2781 nfe_stop(struct ifnet *ifp)
2782 {
2783 	struct nfe_softc *sc = ifp->if_softc;
2784 	struct nfe_rx_ring *rx_ring;
2785 	struct nfe_jrx_ring *jrx_ring;
2786 	struct nfe_tx_ring *tx_ring;
2787 	struct nfe_rx_data *rdata;
2788 	struct nfe_tx_data *tdata;
2789 	int i;
2790 
2791 	NFE_LOCK_ASSERT(sc);
2792 
2793 	sc->nfe_watchdog_timer = 0;
2794 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2795 
2796 	callout_stop(&sc->nfe_stat_ch);
2797 
2798 	/* abort Tx */
2799 	NFE_WRITE(sc, NFE_TX_CTL, 0);
2800 
2801 	/* disable Rx */
2802 	NFE_WRITE(sc, NFE_RX_CTL, 0);
2803 
2804 	/* disable interrupts */
2805 	nfe_disable_intr(sc);
2806 
2807 	sc->nfe_link = 0;
2808 
2809 	/* free Rx and Tx mbufs still in the queues. */
2810 	rx_ring = &sc->rxq;
2811 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2812 		rdata = &rx_ring->data[i];
2813 		if (rdata->m != NULL) {
2814 			bus_dmamap_sync(rx_ring->rx_data_tag,
2815 			    rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2816 			bus_dmamap_unload(rx_ring->rx_data_tag,
2817 			    rdata->rx_data_map);
2818 			m_freem(rdata->m);
2819 			rdata->m = NULL;
2820 		}
2821 	}
2822 
2823 	if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) {
2824 		jrx_ring = &sc->jrxq;
2825 		for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
2826 			rdata = &jrx_ring->jdata[i];
2827 			if (rdata->m != NULL) {
2828 				bus_dmamap_sync(jrx_ring->jrx_data_tag,
2829 				    rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2830 				bus_dmamap_unload(jrx_ring->jrx_data_tag,
2831 				    rdata->rx_data_map);
2832 				m_freem(rdata->m);
2833 				rdata->m = NULL;
2834 			}
2835 		}
2836 	}
2837 
2838 	tx_ring = &sc->txq;
2839 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2840 		tdata = &tx_ring->data[i];
2841 		if (tdata->m != NULL) {
2842 			bus_dmamap_sync(tx_ring->tx_data_tag,
2843 			    tdata->tx_data_map, BUS_DMASYNC_POSTWRITE);
2844 			bus_dmamap_unload(tx_ring->tx_data_tag,
2845 			    tdata->tx_data_map);
2846 			m_freem(tdata->m);
2847 			tdata->m = NULL;
2848 		}
2849 	}
2850 	/* Update hardware stats. */
2851 	nfe_stats_update(sc);
2852 }
2853 
2854 
2855 static int
2856 nfe_ifmedia_upd(struct ifnet *ifp)
2857 {
2858 	struct nfe_softc *sc = ifp->if_softc;
2859 	struct mii_data *mii;
2860 
2861 	NFE_LOCK(sc);
2862 	mii = device_get_softc(sc->nfe_miibus);
2863 	mii_mediachg(mii);
2864 	NFE_UNLOCK(sc);
2865 
2866 	return (0);
2867 }
2868 
2869 
2870 static void
2871 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2872 {
2873 	struct nfe_softc *sc;
2874 	struct mii_data *mii;
2875 
2876 	sc = ifp->if_softc;
2877 
2878 	NFE_LOCK(sc);
2879 	mii = device_get_softc(sc->nfe_miibus);
2880 	mii_pollstat(mii);
2881 	NFE_UNLOCK(sc);
2882 
2883 	ifmr->ifm_active = mii->mii_media_active;
2884 	ifmr->ifm_status = mii->mii_media_status;
2885 }
2886 
2887 
2888 void
2889 nfe_tick(void *xsc)
2890 {
2891 	struct nfe_softc *sc;
2892 	struct mii_data *mii;
2893 	struct ifnet *ifp;
2894 
2895 	sc = (struct nfe_softc *)xsc;
2896 
2897 	NFE_LOCK_ASSERT(sc);
2898 
2899 	ifp = sc->nfe_ifp;
2900 
2901 	mii = device_get_softc(sc->nfe_miibus);
2902 	mii_tick(mii);
2903 	nfe_stats_update(sc);
2904 	nfe_watchdog(ifp);
2905 	callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2906 }
2907 
2908 
2909 static int
2910 nfe_shutdown(device_t dev)
2911 {
2912 	struct nfe_softc *sc;
2913 	struct ifnet *ifp;
2914 
2915 	sc = device_get_softc(dev);
2916 
2917 	NFE_LOCK(sc);
2918 	ifp = sc->nfe_ifp;
2919 	nfe_stop(ifp);
2920 	/* nfe_reset(sc); */
2921 	NFE_UNLOCK(sc);
2922 
2923 	return (0);
2924 }
2925 
2926 
2927 static void
2928 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
2929 {
2930 	uint32_t val;
2931 
2932 	if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
2933 		val = NFE_READ(sc, NFE_MACADDR_LO);
2934 		addr[0] = (val >> 8) & 0xff;
2935 		addr[1] = (val & 0xff);
2936 
2937 		val = NFE_READ(sc, NFE_MACADDR_HI);
2938 		addr[2] = (val >> 24) & 0xff;
2939 		addr[3] = (val >> 16) & 0xff;
2940 		addr[4] = (val >>  8) & 0xff;
2941 		addr[5] = (val & 0xff);
2942 	} else {
2943 		val = NFE_READ(sc, NFE_MACADDR_LO);
2944 		addr[5] = (val >> 8) & 0xff;
2945 		addr[4] = (val & 0xff);
2946 
2947 		val = NFE_READ(sc, NFE_MACADDR_HI);
2948 		addr[3] = (val >> 24) & 0xff;
2949 		addr[2] = (val >> 16) & 0xff;
2950 		addr[1] = (val >>  8) & 0xff;
2951 		addr[0] = (val & 0xff);
2952 	}
2953 }
2954 
2955 
2956 static void
2957 nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr)
2958 {
2959 
2960 	NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] <<  8 | addr[4]);
2961 	NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
2962 	    addr[1] << 8 | addr[0]);
2963 }
2964 
2965 
2966 /*
2967  * Map a single buffer address.
2968  */
2969 
2970 static void
2971 nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2972 {
2973 	struct nfe_dmamap_arg *ctx;
2974 
2975 	if (error != 0)
2976 		return;
2977 
2978 	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
2979 
2980 	ctx = (struct nfe_dmamap_arg *)arg;
2981 	ctx->nfe_busaddr = segs[0].ds_addr;
2982 }
2983 
2984 
2985 static int
2986 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2987 {
2988 	int error, value;
2989 
2990 	if (!arg1)
2991 		return (EINVAL);
2992 	value = *(int *)arg1;
2993 	error = sysctl_handle_int(oidp, &value, 0, req);
2994 	if (error || !req->newptr)
2995 		return (error);
2996 	if (value < low || value > high)
2997 		return (EINVAL);
2998 	*(int *)arg1 = value;
2999 
3000 	return (0);
3001 }
3002 
3003 
3004 static int
3005 sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS)
3006 {
3007 
3008 	return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN,
3009 	    NFE_PROC_MAX));
3010 }
3011 
3012 
3013 #define	NFE_SYSCTL_STAT_ADD32(c, h, n, p, d)	\
3014 	    SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
3015 #define	NFE_SYSCTL_STAT_ADD64(c, h, n, p, d)	\
3016 	    SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
3017 
3018 static void
3019 nfe_sysctl_node(struct nfe_softc *sc)
3020 {
3021 	struct sysctl_ctx_list *ctx;
3022 	struct sysctl_oid_list *child, *parent;
3023 	struct sysctl_oid *tree;
3024 	struct nfe_hw_stats *stats;
3025 	int error;
3026 
3027 	stats = &sc->nfe_stats;
3028 	ctx = device_get_sysctl_ctx(sc->nfe_dev);
3029 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nfe_dev));
3030 	SYSCTL_ADD_PROC(ctx, child,
3031 	    OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
3032 	    &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I",
3033 	    "max number of Rx events to process");
3034 
3035 	sc->nfe_process_limit = NFE_PROC_DEFAULT;
3036 	error = resource_int_value(device_get_name(sc->nfe_dev),
3037 	    device_get_unit(sc->nfe_dev), "process_limit",
3038 	    &sc->nfe_process_limit);
3039 	if (error == 0) {
3040 		if (sc->nfe_process_limit < NFE_PROC_MIN ||
3041 		    sc->nfe_process_limit > NFE_PROC_MAX) {
3042 			device_printf(sc->nfe_dev,
3043 			    "process_limit value out of range; "
3044 			    "using default: %d\n", NFE_PROC_DEFAULT);
3045 			sc->nfe_process_limit = NFE_PROC_DEFAULT;
3046 		}
3047 	}
3048 
3049 	if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3050 		return;
3051 
3052 	tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
3053 	    NULL, "NFE statistics");
3054 	parent = SYSCTL_CHILDREN(tree);
3055 
3056 	/* Rx statistics. */
3057 	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
3058 	    NULL, "Rx MAC statistics");
3059 	child = SYSCTL_CHILDREN(tree);
3060 
3061 	NFE_SYSCTL_STAT_ADD32(ctx, child, "frame_errors",
3062 	    &stats->rx_frame_errors, "Framing Errors");
3063 	NFE_SYSCTL_STAT_ADD32(ctx, child, "extra_bytes",
3064 	    &stats->rx_extra_bytes, "Extra Bytes");
3065 	NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3066 	    &stats->rx_late_cols, "Late Collisions");
3067 	NFE_SYSCTL_STAT_ADD32(ctx, child, "runts",
3068 	    &stats->rx_runts, "Runts");
3069 	NFE_SYSCTL_STAT_ADD32(ctx, child, "jumbos",
3070 	    &stats->rx_jumbos, "Jumbos");
3071 	NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_overuns",
3072 	    &stats->rx_fifo_overuns, "FIFO Overruns");
3073 	NFE_SYSCTL_STAT_ADD32(ctx, child, "crc_errors",
3074 	    &stats->rx_crc_errors, "CRC Errors");
3075 	NFE_SYSCTL_STAT_ADD32(ctx, child, "fae",
3076 	    &stats->rx_fae, "Frame Alignment Errors");
3077 	NFE_SYSCTL_STAT_ADD32(ctx, child, "len_errors",
3078 	    &stats->rx_len_errors, "Length Errors");
3079 	NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3080 	    &stats->rx_unicast, "Unicast Frames");
3081 	NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3082 	    &stats->rx_multicast, "Multicast Frames");
3083 	NFE_SYSCTL_STAT_ADD32(ctx, child, "brocadcast",
3084 	    &stats->rx_broadcast, "Broadcast Frames");
3085 	if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3086 		NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3087 		    &stats->rx_octets, "Octets");
3088 		NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3089 		    &stats->rx_pause, "Pause frames");
3090 		NFE_SYSCTL_STAT_ADD32(ctx, child, "drops",
3091 		    &stats->rx_drops, "Drop frames");
3092 	}
3093 
3094 	/* Tx statistics. */
3095 	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
3096 	    NULL, "Tx MAC statistics");
3097 	child = SYSCTL_CHILDREN(tree);
3098 	NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3099 	    &stats->tx_octets, "Octets");
3100 	NFE_SYSCTL_STAT_ADD32(ctx, child, "zero_rexmits",
3101 	    &stats->tx_zero_rexmits, "Zero Retransmits");
3102 	NFE_SYSCTL_STAT_ADD32(ctx, child, "one_rexmits",
3103 	    &stats->tx_one_rexmits, "One Retransmits");
3104 	NFE_SYSCTL_STAT_ADD32(ctx, child, "multi_rexmits",
3105 	    &stats->tx_multi_rexmits, "Multiple Retransmits");
3106 	NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3107 	    &stats->tx_late_cols, "Late Collisions");
3108 	NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_underuns",
3109 	    &stats->tx_fifo_underuns, "FIFO Underruns");
3110 	NFE_SYSCTL_STAT_ADD32(ctx, child, "carrier_losts",
3111 	    &stats->tx_carrier_losts, "Carrier Losts");
3112 	NFE_SYSCTL_STAT_ADD32(ctx, child, "excess_deferrals",
3113 	    &stats->tx_excess_deferals, "Excess Deferrals");
3114 	NFE_SYSCTL_STAT_ADD32(ctx, child, "retry_errors",
3115 	    &stats->tx_retry_errors, "Retry Errors");
3116 	if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3117 		NFE_SYSCTL_STAT_ADD32(ctx, child, "deferrals",
3118 		    &stats->tx_deferals, "Deferrals");
3119 		NFE_SYSCTL_STAT_ADD32(ctx, child, "frames",
3120 		    &stats->tx_frames, "Frames");
3121 		NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3122 		    &stats->tx_pause, "Pause Frames");
3123 	}
3124 	if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3125 		NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3126 		    &stats->tx_deferals, "Unicast Frames");
3127 		NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3128 		    &stats->tx_frames, "Multicast Frames");
3129 		NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3130 		    &stats->tx_pause, "Broadcast Frames");
3131 	}
3132 }
3133 
3134 #undef NFE_SYSCTL_STAT_ADD32
3135 #undef NFE_SYSCTL_STAT_ADD64
3136 
3137 static void
3138 nfe_stats_clear(struct nfe_softc *sc)
3139 {
3140 	int i, mib_cnt;
3141 
3142 	if ((sc->nfe_flags & NFE_MIB_V1) != 0)
3143 		mib_cnt = NFE_NUM_MIB_STATV1;
3144 	else if ((sc->nfe_flags & (NFE_MIB_V2 | NFE_MIB_V3)) != 0)
3145 		mib_cnt = NFE_NUM_MIB_STATV2;
3146 	else
3147 		return;
3148 
3149 	for (i = 0; i < mib_cnt; i += sizeof(uint32_t))
3150 		NFE_READ(sc, NFE_TX_OCTET + i);
3151 
3152 	if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3153 		NFE_READ(sc, NFE_TX_UNICAST);
3154 		NFE_READ(sc, NFE_TX_MULTICAST);
3155 		NFE_READ(sc, NFE_TX_BROADCAST);
3156 	}
3157 }
3158 
3159 static void
3160 nfe_stats_update(struct nfe_softc *sc)
3161 {
3162 	struct nfe_hw_stats *stats;
3163 
3164 	NFE_LOCK_ASSERT(sc);
3165 
3166 	if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3167 		return;
3168 
3169 	stats = &sc->nfe_stats;
3170 	stats->tx_octets += NFE_READ(sc, NFE_TX_OCTET);
3171 	stats->tx_zero_rexmits += NFE_READ(sc, NFE_TX_ZERO_REXMIT);
3172 	stats->tx_one_rexmits += NFE_READ(sc, NFE_TX_ONE_REXMIT);
3173 	stats->tx_multi_rexmits += NFE_READ(sc, NFE_TX_MULTI_REXMIT);
3174 	stats->tx_late_cols += NFE_READ(sc, NFE_TX_LATE_COL);
3175 	stats->tx_fifo_underuns += NFE_READ(sc, NFE_TX_FIFO_UNDERUN);
3176 	stats->tx_carrier_losts += NFE_READ(sc, NFE_TX_CARRIER_LOST);
3177 	stats->tx_excess_deferals += NFE_READ(sc, NFE_TX_EXCESS_DEFERRAL);
3178 	stats->tx_retry_errors += NFE_READ(sc, NFE_TX_RETRY_ERROR);
3179 	stats->rx_frame_errors += NFE_READ(sc, NFE_RX_FRAME_ERROR);
3180 	stats->rx_extra_bytes += NFE_READ(sc, NFE_RX_EXTRA_BYTES);
3181 	stats->rx_late_cols += NFE_READ(sc, NFE_RX_LATE_COL);
3182 	stats->rx_runts += NFE_READ(sc, NFE_RX_RUNT);
3183 	stats->rx_jumbos += NFE_READ(sc, NFE_RX_JUMBO);
3184 	stats->rx_fifo_overuns += NFE_READ(sc, NFE_RX_FIFO_OVERUN);
3185 	stats->rx_crc_errors += NFE_READ(sc, NFE_RX_CRC_ERROR);
3186 	stats->rx_fae += NFE_READ(sc, NFE_RX_FAE);
3187 	stats->rx_len_errors += NFE_READ(sc, NFE_RX_LEN_ERROR);
3188 	stats->rx_unicast += NFE_READ(sc, NFE_RX_UNICAST);
3189 	stats->rx_multicast += NFE_READ(sc, NFE_RX_MULTICAST);
3190 	stats->rx_broadcast += NFE_READ(sc, NFE_RX_BROADCAST);
3191 
3192 	if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3193 		stats->tx_deferals += NFE_READ(sc, NFE_TX_DEFERAL);
3194 		stats->tx_frames += NFE_READ(sc, NFE_TX_FRAME);
3195 		stats->rx_octets += NFE_READ(sc, NFE_RX_OCTET);
3196 		stats->tx_pause += NFE_READ(sc, NFE_TX_PAUSE);
3197 		stats->rx_pause += NFE_READ(sc, NFE_RX_PAUSE);
3198 		stats->rx_drops += NFE_READ(sc, NFE_RX_DROP);
3199 	}
3200 
3201 	if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3202 		stats->tx_unicast += NFE_READ(sc, NFE_TX_UNICAST);
3203 		stats->tx_multicast += NFE_READ(sc, NFE_TX_MULTICAST);
3204 		stats->rx_broadcast += NFE_READ(sc, NFE_TX_BROADCAST);
3205 	}
3206 }
3207