xref: /freebsd/sys/dev/nfe/if_nfe.c (revision acd3428b7d3e94cef0e1881c868cb4b131d4ff41)
1 /*	$OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $	*/
2 
3 /*-
4  * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
5  * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
6  * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
22 
23 #include <sys/cdefs.h>
24 __FBSDID("$FreeBSD$");
25 
26 /* Uncomment the following line to enable polling. */
27 /* #define	DEVICE_POLLING */
28 
29 #define	NFE_NO_JUMBO
30 #define	NFE_CSUM
31 #define	NFE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
32 #define	NVLAN 0
33 
34 #ifdef HAVE_KERNEL_OPTION_HEADERS
35 #include "opt_device_polling.h"
36 #endif
37 
38 #include <sys/param.h>
39 #include <sys/endian.h>
40 #include <sys/systm.h>
41 #include <sys/sockio.h>
42 #include <sys/mbuf.h>
43 #include <sys/malloc.h>
44 #include <sys/module.h>
45 #include <sys/kernel.h>
46 #include <sys/socket.h>
47 #include <sys/taskqueue.h>
48 
49 #include <net/if.h>
50 #include <net/if_arp.h>
51 #include <net/ethernet.h>
52 #include <net/if_dl.h>
53 #include <net/if_media.h>
54 #include <net/if_types.h>
55 #include <net/if_vlan_var.h>
56 
57 #include <net/bpf.h>
58 
59 #include <machine/bus.h>
60 #include <machine/resource.h>
61 #include <sys/bus.h>
62 #include <sys/rman.h>
63 
64 #include <dev/mii/mii.h>
65 #include <dev/mii/miivar.h>
66 
67 #include <dev/pci/pcireg.h>
68 #include <dev/pci/pcivar.h>
69 
70 #include <dev/nfe/if_nfereg.h>
71 #include <dev/nfe/if_nfevar.h>
72 
73 MODULE_DEPEND(nfe, pci, 1, 1, 1);
74 MODULE_DEPEND(nfe, ether, 1, 1, 1);
75 MODULE_DEPEND(nfe, miibus, 1, 1, 1);
76 #include "miibus_if.h"
77 
78 static int  nfe_probe(device_t);
79 static int  nfe_attach(device_t);
80 static int  nfe_detach(device_t);
81 static void nfe_shutdown(device_t);
82 static int  nfe_miibus_readreg(device_t, int, int);
83 static int  nfe_miibus_writereg(device_t, int, int, int);
84 static void nfe_miibus_statchg(device_t);
85 static int  nfe_ioctl(struct ifnet *, u_long, caddr_t);
86 static void nfe_intr(void *);
87 static void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
88 static void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
89 static void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int);
90 static void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int);
91 static void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
92 static void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
93 static void nfe_rxeof(struct nfe_softc *);
94 static void nfe_txeof(struct nfe_softc *);
95 static int  nfe_encap(struct nfe_softc *, struct mbuf *);
96 static struct nfe_jbuf *nfe_jalloc(struct nfe_softc *);
97 static void nfe_jfree(void *, void *);
98 static int  nfe_jpool_alloc(struct nfe_softc *);
99 static void nfe_jpool_free(struct nfe_softc *);
100 static void nfe_setmulti(struct nfe_softc *);
101 static void nfe_start(struct ifnet *);
102 static void nfe_start_locked(struct ifnet *);
103 static void nfe_watchdog(struct ifnet *);
104 static void nfe_init(void *);
105 static void nfe_init_locked(void *);
106 static void nfe_stop(struct ifnet *, int);
107 static int  nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
108 static void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
109 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
110 static int  nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
111 static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
112 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
113 static int  nfe_ifmedia_upd(struct ifnet *);
114 static int  nfe_ifmedia_upd_locked(struct ifnet *);
115 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
116 static void nfe_tick(void *);
117 static void nfe_tick_locked(struct nfe_softc *);
118 static void nfe_get_macaddr(struct nfe_softc *, u_char *);
119 static void nfe_set_macaddr(struct nfe_softc *, u_char *);
120 static void nfe_dma_map_segs	(void *, bus_dma_segment_t *, int, int);
121 #ifdef DEVICE_POLLING
122 static void nfe_poll_locked(struct ifnet *, enum poll_cmd, int);
123 #endif
124 
125 #ifdef NFE_DEBUG
126 int nfedebug = 0;
127 #define	DPRINTF(x)	do { if (nfedebug) printf x; } while (0)
128 #define	DPRINTFN(n,x)	do { if (nfedebug >= (n)) printf x; } while (0)
129 #else
130 #define	DPRINTF(x)
131 #define	DPRINTFN(n,x)
132 #endif
133 
134 #define	NFE_LOCK(_sc)		mtx_lock(&(_sc)->nfe_mtx)
135 #define	NFE_UNLOCK(_sc)		mtx_unlock(&(_sc)->nfe_mtx)
136 #define	NFE_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
137 
138 #define	letoh16(x) le16toh(x)
139 
140 #define	NV_RID		0x10
141 
142 static device_method_t nfe_methods[] = {
143 	/* Device interface */
144 	DEVMETHOD(device_probe,		nfe_probe),
145 	DEVMETHOD(device_attach,	nfe_attach),
146 	DEVMETHOD(device_detach,	nfe_detach),
147 	DEVMETHOD(device_shutdown,	nfe_shutdown),
148 
149 	/* bus interface */
150 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
151 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
152 
153 	/* MII interface */
154 	DEVMETHOD(miibus_readreg,	nfe_miibus_readreg),
155 	DEVMETHOD(miibus_writereg,	nfe_miibus_writereg),
156 	DEVMETHOD(miibus_statchg,	nfe_miibus_statchg),
157 
158 	{ 0, 0 }
159 };
160 
161 static driver_t nfe_driver = {
162 	"nfe",
163 	nfe_methods,
164 	sizeof(struct nfe_softc)
165 };
166 
167 static devclass_t nfe_devclass;
168 
169 DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0);
170 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
171 
172 static struct nfe_type nfe_devs[] = {
173 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
174 	    "NVIDIA nForce MCP Networking Adapter"},
175 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
176 	    "NVIDIA nForce2 MCP2 Networking Adapter"},
177 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
178 	    "NVIDIA nForce2 400 MCP4 Networking Adapter"},
179 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
180 	    "NVIDIA nForce2 400 MCP5 Networking Adapter"},
181 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
182 	    "NVIDIA nForce3 MCP3 Networking Adapter"},
183 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
184 	    "NVIDIA nForce3 250 MCP6 Networking Adapter"},
185 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
186 	    "NVIDIA nForce3 MCP7 Networking Adapter"},
187 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
188 	    "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
189 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
190 	    "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
191 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
192 	    "NVIDIA nForce MCP04 Networking Adapter"},		// MCP10
193 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
194 	    "NVIDIA nForce MCP04 Networking Adapter"},		// MCP11
195 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
196 	    "NVIDIA nForce 430 MCP12 Networking Adapter"},
197 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
198 	    "NVIDIA nForce 430 MCP13 Networking Adapter"},
199 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
200 	    "NVIDIA nForce MCP55 Networking Adapter"},
201 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
202 	    "NVIDIA nForce MCP55 Networking Adapter"},
203 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
204 	    "NVIDIA nForce MCP61 Networking Adapter"},
205 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
206 	    "NVIDIA nForce MCP61 Networking Adapter"},
207 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
208 	    "NVIDIA nForce MCP61 Networking Adapter"},
209 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
210 	    "NVIDIA nForce MCP61 Networking Adapter"},
211 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
212 	    "NVIDIA nForce MCP65 Networking Adapter"},
213 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
214 	    "NVIDIA nForce MCP65 Networking Adapter"},
215 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
216 	    "NVIDIA nForce MCP65 Networking Adapter"},
217 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
218 	    "NVIDIA nForce MCP65 Networking Adapter"},
219 	{0, 0, NULL}
220 };
221 
222 
223 /* Probe for supported hardware ID's */
224 static int
225 nfe_probe(device_t dev)
226 {
227 	struct nfe_type *t;
228 
229 	t = nfe_devs;
230 	/* Check for matching PCI DEVICE ID's */
231 	while (t->name != NULL) {
232 		if ((pci_get_vendor(dev) == t->vid_id) &&
233 		    (pci_get_device(dev) == t->dev_id)) {
234 			device_set_desc(dev, t->name);
235 			return (0);
236 		}
237 		t++;
238 	}
239 
240 	return (ENXIO);
241 }
242 
243 
244 static int
245 nfe_attach(device_t dev)
246 {
247 	struct nfe_softc *sc;
248 	struct ifnet *ifp;
249 	int unit, error = 0, rid;
250 
251 	sc = device_get_softc(dev);
252 	unit = device_get_unit(dev);
253 	sc->nfe_dev = dev;
254 	sc->nfe_unit = unit;
255 
256 	mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
257 	    MTX_DEF | MTX_RECURSE);
258 	callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
259 
260 	pci_enable_busmaster(dev);
261 
262 	rid = NV_RID;
263 	sc->nfe_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
264 	    0, ~0, 1, RF_ACTIVE);
265 
266 	if (sc->nfe_res == NULL) {
267 		printf ("nfe%d: couldn't map ports/memory\n", unit);
268 		error = ENXIO;
269 		goto fail;
270 	}
271 
272 	sc->nfe_memt = rman_get_bustag(sc->nfe_res);
273 	sc->nfe_memh = rman_get_bushandle(sc->nfe_res);
274 
275 	/* Allocate interrupt */
276 	rid = 0;
277 	sc->nfe_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid,
278 	    0, ~0, 1, RF_SHAREABLE | RF_ACTIVE);
279 
280 	if (sc->nfe_irq == NULL) {
281 		printf("nfe%d: couldn't map interrupt\n", unit);
282 		error = ENXIO;
283 		goto fail;
284 	}
285 
286 	nfe_get_macaddr(sc, sc->eaddr);
287 
288 	sc->nfe_flags = 0;
289 
290 	switch (pci_get_device(dev)) {
291 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
292 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
293 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
294 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
295 		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
296 		break;
297 	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
298 	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
299 		sc->nfe_flags |= NFE_40BIT_ADDR;
300 		break;
301 	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
302 	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
303 	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
304 	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
305 		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
306 		break;
307 	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
308 	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
309 		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
310 		    NFE_HW_VLAN;
311 		break;
312 	case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
313 	case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
314 	case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
315 	case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
316 		sc->nfe_flags |= NFE_40BIT_ADDR;
317 		break;
318 	case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
319 	case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
320 	case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
321 	case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
322 		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
323 		break;
324 	}
325 
326 #ifndef NFE_NO_JUMBO
327 	/* enable jumbo frames for adapters that support it */
328 	if (sc->nfe_flags & NFE_JUMBO_SUP)
329 		sc->nfe_flags |= NFE_USE_JUMBO;
330 #endif
331 
332 	/*
333 	 * Allocate the parent bus DMA tag appropriate for PCI.
334 	 */
335 #define	NFE_NSEG_NEW 32
336 	error = bus_dma_tag_create(NULL,	/* parent */
337 	    1, 0,				/* alignment, boundary */
338 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
339 	    BUS_SPACE_MAXADDR,			/* highaddr */
340 	    NULL, NULL,				/* filter, filterarg */
341 	    MAXBSIZE, NFE_NSEG_NEW,		/* maxsize, nsegments */
342 	    BUS_SPACE_MAXSIZE_32BIT,		/* maxsegsize */
343 	    BUS_DMA_ALLOCNOW,			/* flags */
344 	    NULL, NULL,				/* lockfunc, lockarg */
345 	    &sc->nfe_parent_tag);
346 	if (error)
347 		goto fail;
348 
349 	/*
350 	 * Allocate Tx and Rx rings.
351 	 */
352 	if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
353 		printf("nfe%d: could not allocate Tx ring\n", unit);
354 		error = ENXIO;
355 		goto fail;
356 	}
357 
358 	if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
359 		printf("nfe%d: could not allocate Rx ring\n", unit);
360 		nfe_free_tx_ring(sc, &sc->txq);
361 		error = ENXIO;
362 		goto fail;
363 	}
364 
365 	ifp = sc->nfe_ifp = if_alloc(IFT_ETHER);
366 	if (ifp == NULL) {
367 		printf("nfe%d: can not if_alloc()\n", unit);
368 		error = ENOSPC;
369 		goto fail;
370 	}
371 
372 	ifp->if_softc = sc;
373 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
374 	ifp->if_mtu = ETHERMTU;
375 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
376 	ifp->if_ioctl = nfe_ioctl;
377 	ifp->if_start = nfe_start;
378 	/* ifp->if_hwassist = NFE_CSUM_FEATURES; */
379 	ifp->if_watchdog = nfe_watchdog;
380 	ifp->if_init = nfe_init;
381 	ifp->if_baudrate = IF_Gbps(1);
382 	ifp->if_snd.ifq_maxlen = NFE_IFQ_MAXLEN;
383 
384 	ifp->if_capabilities = IFCAP_VLAN_MTU;
385 #if NVLAN > 0
386 	if (sc->nfe_flags & NFE_HW_VLAN)
387 		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
388 #endif
389 #ifdef NFE_CSUM
390 	if (sc->nfe_flags & NFE_HW_CSUM) {
391 		ifp->if_capabilities |= IFCAP_HWCSUM;
392 	}
393 #endif
394 	ifp->if_capenable = ifp->if_capabilities;
395 
396 #ifdef DEVICE_POLLING
397 	ifp->if_capabilities |= IFCAP_POLLING;
398 #endif
399 
400 	/* Do MII setup */
401 	if (mii_phy_probe(dev, &sc->nfe_miibus, nfe_ifmedia_upd,
402 	    nfe_ifmedia_sts)) {
403 		printf("nfe%d: MII without any phy!\n", unit);
404 		error = ENXIO;
405 		goto fail;
406 	}
407 
408 	ether_ifattach(ifp, sc->eaddr);
409 
410 	error = bus_setup_intr(dev, sc->nfe_irq, INTR_TYPE_NET | INTR_MPSAFE,
411 	    nfe_intr, sc, &sc->nfe_intrhand);
412 
413 	if (error) {
414 		printf("nfe%d: couldn't set up irq\n", unit);
415 		ether_ifdetach(ifp);
416 		goto fail;
417 	}
418 
419 fail:
420 	if (error)
421 		nfe_detach(dev);
422 
423 	return (error);
424 }
425 
426 
427 static int
428 nfe_detach(device_t dev)
429 {
430 	struct nfe_softc *sc;
431 	struct ifnet *ifp;
432 	u_char eaddr[ETHER_ADDR_LEN];
433 	int i;
434 
435 	sc = device_get_softc(dev);
436 	KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
437 	ifp = sc->nfe_ifp;
438 
439 #ifdef DEVICE_POLLING
440 	if (ifp->if_capenable & IFCAP_POLLING)
441 		ether_poll_deregister(ifp);
442 #endif
443 
444 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
445 		eaddr[i] = sc->eaddr[5 - i];
446 	}
447 	nfe_set_macaddr(sc, eaddr);
448 
449 	if (device_is_attached(dev)) {
450 		nfe_stop(ifp, 1);
451 		ifp->if_flags &= ~IFF_UP;
452 		callout_drain(&sc->nfe_stat_ch);
453 		ether_ifdetach(ifp);
454 	}
455 
456 	if (ifp)
457 		if_free(ifp);
458 	if (sc->nfe_miibus)
459 		device_delete_child(dev, sc->nfe_miibus);
460 	bus_generic_detach(dev);
461 
462 	if (sc->nfe_intrhand)
463 		bus_teardown_intr(dev, sc->nfe_irq, sc->nfe_intrhand);
464 	if (sc->nfe_irq)
465 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nfe_irq);
466 	if (sc->nfe_res)
467 		bus_release_resource(dev, SYS_RES_MEMORY, NV_RID, sc->nfe_res);
468 
469 	nfe_free_tx_ring(sc, &sc->txq);
470 	nfe_free_rx_ring(sc, &sc->rxq);
471 
472 	if (sc->nfe_parent_tag)
473 		bus_dma_tag_destroy(sc->nfe_parent_tag);
474 
475 	mtx_destroy(&sc->nfe_mtx);
476 
477 	return (0);
478 }
479 
480 
481 static void
482 nfe_miibus_statchg(device_t dev)
483 {
484 	struct nfe_softc *sc;
485 	struct mii_data *mii;
486 	u_int32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
487 
488 	sc = device_get_softc(dev);
489 	mii = device_get_softc(sc->nfe_miibus);
490 
491 	phy = NFE_READ(sc, NFE_PHY_IFACE);
492 	phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
493 
494 	seed = NFE_READ(sc, NFE_RNDSEED);
495 	seed &= ~NFE_SEED_MASK;
496 
497 	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
498 		phy  |= NFE_PHY_HDX;	/* half-duplex */
499 		misc |= NFE_MISC1_HDX;
500 	}
501 
502 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
503 	case IFM_1000_T:	/* full-duplex only */
504 		link |= NFE_MEDIA_1000T;
505 		seed |= NFE_SEED_1000T;
506 		phy  |= NFE_PHY_1000T;
507 		break;
508 	case IFM_100_TX:
509 		link |= NFE_MEDIA_100TX;
510 		seed |= NFE_SEED_100TX;
511 		phy  |= NFE_PHY_100TX;
512 		break;
513 	case IFM_10_T:
514 		link |= NFE_MEDIA_10T;
515 		seed |= NFE_SEED_10T;
516 		break;
517 	}
518 
519 	NFE_WRITE(sc, NFE_RNDSEED, seed);	/* XXX: gigabit NICs only? */
520 
521 	NFE_WRITE(sc, NFE_PHY_IFACE, phy);
522 	NFE_WRITE(sc, NFE_MISC1, misc);
523 	NFE_WRITE(sc, NFE_LINKSPEED, link);
524 }
525 
526 
527 static int
528 nfe_miibus_readreg(device_t dev, int phy, int reg)
529 {
530 	struct nfe_softc *sc = device_get_softc(dev);
531 	u_int32_t val;
532 	int ntries;
533 
534 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
535 
536 	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
537 		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
538 		DELAY(100);
539 	}
540 
541 	NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
542 
543 	for (ntries = 0; ntries < 1000; ntries++) {
544 		DELAY(100);
545 		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
546 			break;
547 	}
548 	if (ntries == 1000) {
549 		DPRINTFN(2, ("nfe%d: timeout waiting for PHY\n", sc->nfe_unit));
550 		return 0;
551 	}
552 
553 	if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
554 		DPRINTFN(2, ("nfe%d: could not read PHY\n", sc->nfe_unit));
555 		return 0;
556 	}
557 
558 	val = NFE_READ(sc, NFE_PHY_DATA);
559 	if (val != 0xffffffff && val != 0)
560 		sc->mii_phyaddr = phy;
561 
562 	DPRINTFN(2, ("nfe%d: mii read phy %d reg 0x%x ret 0x%x\n",
563 	    sc->nfe_unit, phy, reg, val));
564 
565 	return val;
566 }
567 
568 
569 static int
570 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
571 {
572 	struct nfe_softc *sc = device_get_softc(dev);
573 	u_int32_t ctl;
574 	int ntries;
575 
576 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
577 
578 	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
579 		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
580 		DELAY(100);
581 	}
582 
583 	NFE_WRITE(sc, NFE_PHY_DATA, val);
584 	ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
585 	NFE_WRITE(sc, NFE_PHY_CTL, ctl);
586 
587 	for (ntries = 0; ntries < 1000; ntries++) {
588 		DELAY(100);
589 		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
590 			break;
591 	}
592 #ifdef NFE_DEBUG
593 	if (nfedebug >= 2 && ntries == 1000)
594 		printf("could not write to PHY\n");
595 #endif
596 	return 0;
597 }
598 
599 
600 static int
601 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
602 {
603 	struct nfe_desc32 *desc32;
604 	struct nfe_desc64 *desc64;
605 	struct nfe_rx_data *data;
606 	struct nfe_jbuf *jbuf;
607 	void **desc;
608 	bus_addr_t physaddr;
609 	int i, error, descsize;
610 
611 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
612 		desc = (void **)&ring->desc64;
613 		descsize = sizeof (struct nfe_desc64);
614 	} else {
615 		desc = (void **)&ring->desc32;
616 		descsize = sizeof (struct nfe_desc32);
617 	}
618 
619 	ring->cur = ring->next = 0;
620 	ring->bufsz = MCLBYTES;
621 
622 	error = bus_dma_tag_create(sc->nfe_parent_tag,
623 	   PAGE_SIZE, 0,			/* alignment, boundary */
624 	   BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
625 	   BUS_SPACE_MAXADDR,			/* highaddr */
626 	   NULL, NULL,				/* filter, filterarg */
627 	   NFE_RX_RING_COUNT * descsize, 1,	/* maxsize, nsegments */
628 	   NFE_RX_RING_COUNT * descsize,	/* maxsegsize */
629 	   BUS_DMA_ALLOCNOW,			/* flags */
630 	   NULL, NULL,				/* lockfunc, lockarg */
631 	   &ring->rx_desc_tag);
632 	if (error != 0) {
633 		printf("nfe%d: could not create desc DMA tag\n", sc->nfe_unit);
634 		goto fail;
635 	}
636 
637 	/* allocate memory to desc */
638 	error = bus_dmamem_alloc(ring->rx_desc_tag, (void **)desc,
639 	    BUS_DMA_NOWAIT, &ring->rx_desc_map);
640 	if (error != 0) {
641 		printf("nfe%d: could not create desc DMA map\n", sc->nfe_unit);
642 		goto fail;
643 	}
644 
645 	/* map desc to device visible address space */
646 	error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, *desc,
647 	    NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs,
648 	    &ring->rx_desc_segs, BUS_DMA_NOWAIT);
649 	if (error != 0) {
650 		printf("nfe%d: could not load desc DMA map\n", sc->nfe_unit);
651 		goto fail;
652 	}
653 
654 	bzero(*desc, NFE_RX_RING_COUNT * descsize);
655 	ring->rx_desc_addr = ring->rx_desc_segs.ds_addr;
656 	ring->physaddr = ring->rx_desc_addr;
657 
658 	if (sc->nfe_flags & NFE_USE_JUMBO) {
659 		ring->bufsz = NFE_JBYTES;
660 		if ((error = nfe_jpool_alloc(sc)) != 0) {
661 			printf("nfe%d: could not allocate jumbo frames\n",
662 			    sc->nfe_unit);
663 			goto fail;
664 		}
665 	}
666 
667 	/*
668 	 * Pre-allocate Rx buffers and populate Rx ring.
669 	 */
670 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
671 		data = &sc->rxq.data[i];
672 
673 		MGETHDR(data->m, M_DONTWAIT, MT_DATA);
674 		if (data->m == NULL) {
675 			printf("nfe%d: could not allocate rx mbuf\n",
676 			    sc->nfe_unit);
677 			error = ENOMEM;
678 			goto fail;
679 		}
680 
681 		if (sc->nfe_flags & NFE_USE_JUMBO) {
682 			if ((jbuf = nfe_jalloc(sc)) == NULL) {
683 				printf("nfe%d: could not allocate jumbo buffer\n",
684 				    sc->nfe_unit);
685 				goto fail;
686 			}
687 			data->m->m_data = (void *)jbuf->buf;
688 			data->m->m_len = data->m->m_pkthdr.len = NFE_JBYTES;
689 			MEXTADD(data->m, jbuf->buf, NFE_JBYTES, nfe_jfree,
690 			    (struct nfe_softc *)sc, 0, EXT_NET_DRV);
691 			/* m_adj(data->m, ETHER_ALIGN); */
692 			physaddr = jbuf->physaddr;
693 		} else {
694 			error = bus_dma_tag_create(sc->nfe_parent_tag,
695 			    ETHER_ALIGN, 0,	       /* alignment, boundary */
696 			    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
697 			    BUS_SPACE_MAXADDR,		/* highaddr */
698 			    NULL, NULL,		/* filter, filterarg */
699 			    MCLBYTES, 1,		/* maxsize, nsegments */
700 			    MCLBYTES,			/* maxsegsize */
701 			    BUS_DMA_ALLOCNOW,		/* flags */
702 			    NULL, NULL,		/* lockfunc, lockarg */
703 			    &data->rx_data_tag);
704 			if (error != 0) {
705 				printf("nfe%d: could not create DMA map\n",
706 				    sc->nfe_unit);
707 				goto fail;
708 			}
709 
710 			error = bus_dmamap_create(data->rx_data_tag, 0,
711 			    &data->rx_data_map);
712 			if (error != 0) {
713 				printf("nfe%d: could not allocate mbuf cluster\n",
714 				    sc->nfe_unit);
715 				goto fail;
716 			}
717 
718 			MCLGET(data->m, M_DONTWAIT);
719 			if (!(data->m->m_flags & M_EXT)) {
720 				error = ENOMEM;
721 				goto fail;
722 			}
723 
724 			error = bus_dmamap_load(data->rx_data_tag,
725 			    data->rx_data_map, mtod(data->m, void *), MCLBYTES,
726 			    nfe_dma_map_segs, &data->rx_data_segs,
727 			    BUS_DMA_NOWAIT);
728 			if (error != 0) {
729 				printf("nfe%d: could not load rx buf DMA map\n",
730 				    sc->nfe_unit);
731 				goto fail;
732 			}
733 
734 			data->rx_data_addr = data->rx_data_segs.ds_addr;
735 			physaddr = data->rx_data_addr;
736 
737 		}
738 
739 		if (sc->nfe_flags & NFE_40BIT_ADDR) {
740 			desc64 = &sc->rxq.desc64[i];
741 #if defined(__LP64__)
742 			desc64->physaddr[0] = htole32(physaddr >> 32);
743 #endif
744 			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
745 			desc64->length = htole16(sc->rxq.bufsz);
746 			desc64->flags = htole16(NFE_RX_READY);
747 		} else {
748 			desc32 = &sc->rxq.desc32[i];
749 			desc32->physaddr = htole32(physaddr);
750 			desc32->length = htole16(sc->rxq.bufsz);
751 			desc32->flags = htole16(NFE_RX_READY);
752 		}
753 
754 	}
755 
756 	bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
757 	    BUS_DMASYNC_PREWRITE);
758 
759 	return 0;
760 
761 fail:	nfe_free_rx_ring(sc, ring);
762 
763 	return error;
764 }
765 
766 
767 static int
768 nfe_jpool_alloc(struct nfe_softc *sc)
769 {
770 	struct nfe_rx_ring *ring = &sc->rxq;
771 	struct nfe_jbuf *jbuf;
772 	bus_addr_t physaddr;
773 	caddr_t buf;
774 	int i, error;
775 
776 	/*
777 	 * Allocate a big chunk of DMA'able memory.
778 	 */
779 	error = bus_dma_tag_create(sc->nfe_parent_tag,
780 	   PAGE_SIZE, 0,		/* alignment, boundary */
781 	   BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
782 	   BUS_SPACE_MAXADDR,		/* highaddr */
783 	   NULL, NULL,			/* filter, filterarg */
784 	   NFE_JPOOL_SIZE, 1,		/* maxsize, nsegments */
785 	   NFE_JPOOL_SIZE,		/* maxsegsize */
786 	   BUS_DMA_ALLOCNOW,		/* flags */
787 	   NULL, NULL,			/* lockfunc, lockarg */
788 	   &ring->rx_jumbo_tag);
789 	if (error != 0) {
790 		printf("nfe%d: could not create jumbo DMA tag\n", sc->nfe_unit);
791 		goto fail;
792 	}
793 
794 	error = bus_dmamem_alloc(ring->rx_jumbo_tag, (void **)&ring->jpool,
795 	    BUS_DMA_NOWAIT, &ring->rx_jumbo_map);
796 	if (error != 0) {
797 		printf("nfe%d: could not create jumbo DMA memory\n",
798 		    sc->nfe_unit);
799 		goto fail;
800 	}
801 
802 	error = bus_dmamap_load(ring->rx_jumbo_tag, ring->rx_jumbo_map,
803 	    ring->jpool, NFE_JPOOL_SIZE, nfe_dma_map_segs, &ring->rx_jumbo_segs,
804 	    BUS_DMA_NOWAIT);
805 	if (error != 0) {
806 		printf("nfe%d: could not load jumbo DMA map\n", sc->nfe_unit);
807 		goto fail;
808 	}
809 
810 	/* ..and split it into 9KB chunks */
811 	SLIST_INIT(&ring->jfreelist);
812 
813 	buf = ring->jpool;
814 	ring->rx_jumbo_addr = ring->rx_jumbo_segs.ds_addr;
815 	physaddr = ring->rx_jumbo_addr;
816 
817 	for (i = 0; i < NFE_JPOOL_COUNT; i++) {
818 		jbuf = &ring->jbuf[i];
819 
820 		jbuf->buf = buf;
821 		jbuf->physaddr = physaddr;
822 
823 		SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
824 
825 		buf += NFE_JBYTES;
826 		physaddr += NFE_JBYTES;
827 	}
828 
829 	return 0;
830 
831 fail:	nfe_jpool_free(sc);
832 	return error;
833 }
834 
835 
836 static void
837 nfe_jpool_free(struct nfe_softc *sc)
838 {
839 	struct nfe_rx_ring *ring = &sc->rxq;
840 
841 	if (ring->jpool != NULL) {
842 #if 0
843 		bus_dmamem_unmap(ring->rx_jumbo_tag, ring->jpool,
844 		    NFE_JPOOL_SIZE);
845 #endif
846 		bus_dmamem_free(ring->rx_jumbo_tag, &ring->rx_jumbo_segs,
847 		    ring->rx_jumbo_map);
848 	}
849 	if (ring->rx_jumbo_map != NULL) {
850 		bus_dmamap_sync(ring->rx_jumbo_tag, ring->rx_jumbo_map,
851 		    BUS_DMASYNC_POSTWRITE);
852 		bus_dmamap_unload(ring->rx_jumbo_tag, ring->rx_jumbo_map);
853 		bus_dmamap_destroy(ring->rx_jumbo_tag, ring->rx_jumbo_map);
854 	}
855 }
856 
857 
858 static struct nfe_jbuf *
859 nfe_jalloc(struct nfe_softc *sc)
860 {
861 	struct nfe_jbuf *jbuf;
862 
863 	jbuf = SLIST_FIRST(&sc->rxq.jfreelist);
864 	if (jbuf == NULL)
865 		return NULL;
866 	SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext);
867 	return jbuf;
868 }
869 
870 
871 /*
872  * This is called automatically by the network stack when the mbuf is freed.
873  * Caution must be taken that the NIC might be reset by the time the mbuf is
874  * freed.
875  */
876 static void
877 nfe_jfree(void *buf, void *arg)
878 {
879 	struct nfe_softc *sc = arg;
880 	struct nfe_jbuf *jbuf;
881 	int i;
882 
883 	/* find the jbuf from the base pointer */
884 	i = ((vm_offset_t)buf - (vm_offset_t)sc->rxq.jpool) / NFE_JBYTES;
885 	if (i < 0 || i >= NFE_JPOOL_COUNT) {
886 		printf("nfe%d: request to free a buffer (%p) not managed by us\n",
887 		    sc->nfe_unit, buf);
888 		return;
889 	}
890 	jbuf = &sc->rxq.jbuf[i];
891 
892 	/* ..and put it back in the free list */
893 	SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext);
894 }
895 
896 
897 static void
898 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
899 {
900 	int i;
901 
902 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
903 		if (sc->nfe_flags & NFE_40BIT_ADDR) {
904 			ring->desc64[i].length = htole16(ring->bufsz);
905 			ring->desc64[i].flags = htole16(NFE_RX_READY);
906 		} else {
907 			ring->desc32[i].length = htole16(ring->bufsz);
908 			ring->desc32[i].flags = htole16(NFE_RX_READY);
909 		}
910 	}
911 
912 	bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
913 	    BUS_DMASYNC_PREWRITE);
914 
915 	ring->cur = ring->next = 0;
916 }
917 
918 
919 static void
920 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
921 {
922 	struct nfe_rx_data *data;
923 	void *desc;
924 	int i, descsize;
925 
926 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
927 		desc = ring->desc64;
928 		descsize = sizeof (struct nfe_desc64);
929 	} else {
930 		desc = ring->desc32;
931 		descsize = sizeof (struct nfe_desc32);
932 	}
933 
934 	if (desc != NULL) {
935 		bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
936 		    BUS_DMASYNC_POSTWRITE);
937 		bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
938 		bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
939 		bus_dma_tag_destroy(ring->rx_desc_tag);
940 	}
941 
942 	if (sc->nfe_flags & NFE_USE_JUMBO) {
943 		nfe_jpool_free(sc);
944 	} else {
945 		for (i = 0; i < NFE_RX_RING_COUNT; i++) {
946 			data = &ring->data[i];
947 
948 			if (data->rx_data_map != NULL) {
949 				bus_dmamap_sync(data->rx_data_tag,
950 				    data->rx_data_map, BUS_DMASYNC_POSTREAD);
951 				bus_dmamap_unload(data->rx_data_tag,
952 				    data->rx_data_map);
953 				bus_dmamap_destroy(data->rx_data_tag,
954 				    data->rx_data_map);
955 				bus_dma_tag_destroy(data->rx_data_tag);
956 			}
957 
958 			if (data->m != NULL)
959 				m_freem(data->m);
960 		}
961 	}
962 }
963 
964 
965 static int
966 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
967 {
968 	int i, error;
969 	void **desc;
970 	int descsize;
971 
972 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
973 		desc = (void **)&ring->desc64;
974 		descsize = sizeof (struct nfe_desc64);
975 	} else {
976 		desc = (void **)&ring->desc32;
977 		descsize = sizeof (struct nfe_desc32);
978 	}
979 
980 	ring->queued = 0;
981 	ring->cur = ring->next = 0;
982 
983 	error = bus_dma_tag_create(sc->nfe_parent_tag,
984 	   PAGE_SIZE, 0,			/* alignment, boundary */
985 	   BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
986 	   BUS_SPACE_MAXADDR,			/* highaddr */
987 	   NULL, NULL,				/* filter, filterarg */
988 	   NFE_TX_RING_COUNT * descsize, 1,	/* maxsize, nsegments */
989 	   NFE_TX_RING_COUNT * descsize,	/* maxsegsize */
990 	   BUS_DMA_ALLOCNOW,			/* flags */
991 	   NULL, NULL,				/* lockfunc, lockarg */
992 	   &ring->tx_desc_tag);
993 	if (error != 0) {
994 		printf("nfe%d: could not create desc DMA tag\n", sc->nfe_unit);
995 		goto fail;
996 	}
997 
998 	error = bus_dmamem_alloc(ring->tx_desc_tag, (void **)desc,
999 	    BUS_DMA_NOWAIT, &ring->tx_desc_map);
1000 	if (error != 0) {
1001 		printf("nfe%d: could not create desc DMA map\n", sc->nfe_unit);
1002 		goto fail;
1003 	}
1004 
1005 	error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, *desc,
1006 	    NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ring->tx_desc_segs,
1007 	    BUS_DMA_NOWAIT);
1008 	if (error != 0) {
1009 		printf("nfe%d: could not load desc DMA map\n", sc->nfe_unit);
1010 		goto fail;
1011 	}
1012 
1013 	bzero(*desc, NFE_TX_RING_COUNT * descsize);
1014 
1015 	ring->tx_desc_addr = ring->tx_desc_segs.ds_addr;
1016 	ring->physaddr = ring->tx_desc_addr;
1017 
1018 	error = bus_dma_tag_create(sc->nfe_parent_tag,
1019 	   ETHER_ALIGN, 0,
1020 	   BUS_SPACE_MAXADDR_32BIT,
1021 	   BUS_SPACE_MAXADDR,
1022 	   NULL, NULL,
1023 	   NFE_JBYTES, NFE_MAX_SCATTER,
1024 	   NFE_JBYTES,
1025 	   BUS_DMA_ALLOCNOW,
1026 	   NULL, NULL,
1027 	   &ring->tx_data_tag);
1028 	if (error != 0) {
1029 	  printf("nfe%d: could not create DMA tag\n", sc->nfe_unit);
1030 	  goto fail;
1031 	}
1032 
1033 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1034 		error = bus_dmamap_create(ring->tx_data_tag, 0,
1035 		    &ring->data[i].tx_data_map);
1036 		if (error != 0) {
1037 			printf("nfe%d: could not create DMA map\n",
1038 			    sc->nfe_unit);
1039 			goto fail;
1040 		}
1041 	}
1042 
1043 	return 0;
1044 
1045 fail:	nfe_free_tx_ring(sc, ring);
1046 	return error;
1047 }
1048 
1049 
1050 static void
1051 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1052 {
1053 	struct nfe_tx_data *data;
1054 	int i;
1055 
1056 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1057 		if (sc->nfe_flags & NFE_40BIT_ADDR)
1058 			ring->desc64[i].flags = 0;
1059 		else
1060 			ring->desc32[i].flags = 0;
1061 
1062 		data = &ring->data[i];
1063 
1064 		if (data->m != NULL) {
1065 			bus_dmamap_sync(ring->tx_data_tag, data->active,
1066 			    BUS_DMASYNC_POSTWRITE);
1067 			bus_dmamap_unload(ring->tx_data_tag, data->active);
1068 			m_freem(data->m);
1069 			data->m = NULL;
1070 		}
1071 	}
1072 
1073 	bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1074 	    BUS_DMASYNC_PREWRITE);
1075 
1076 	ring->queued = 0;
1077 	ring->cur = ring->next = 0;
1078 }
1079 
1080 
1081 static void
1082 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1083 {
1084 	struct nfe_tx_data *data;
1085 	void *desc;
1086 	int i, descsize;
1087 
1088 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1089 		desc = ring->desc64;
1090 		descsize = sizeof (struct nfe_desc64);
1091 	} else {
1092 		desc = ring->desc32;
1093 		descsize = sizeof (struct nfe_desc32);
1094 	}
1095 
1096 	if (desc != NULL) {
1097 		bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1098 		    BUS_DMASYNC_POSTWRITE);
1099 		bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
1100 		bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
1101 		bus_dma_tag_destroy(ring->tx_desc_tag);
1102 	}
1103 
1104 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1105 		data = &ring->data[i];
1106 
1107 		if (data->m != NULL) {
1108 			bus_dmamap_sync(ring->tx_data_tag, data->active,
1109 			    BUS_DMASYNC_POSTWRITE);
1110 			bus_dmamap_unload(ring->tx_data_tag, data->active);
1111 			m_freem(data->m);
1112 		}
1113 	}
1114 
1115 	/* ..and now actually destroy the DMA mappings */
1116 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1117 		data = &ring->data[i];
1118 		if (data->tx_data_map == NULL)
1119 			continue;
1120 		bus_dmamap_destroy(ring->tx_data_tag, data->tx_data_map);
1121 	}
1122 
1123 	bus_dma_tag_destroy(ring->tx_data_tag);
1124 }
1125 
1126 #ifdef DEVICE_POLLING
1127 static poll_handler_t nfe_poll;
1128 
1129 
1130 static void
1131 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1132 {
1133 	struct  nfe_softc *sc = ifp->if_softc;
1134 
1135 	NFE_LOCK(sc);
1136 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1137 		nfe_poll_locked(ifp, cmd, count);
1138 	NFE_UNLOCK(sc);
1139 }
1140 
1141 
1142 static void
1143 nfe_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
1144 {
1145 	struct  nfe_softc *sc = ifp->if_softc;
1146 	u_int32_t r;
1147 
1148 	NFE_LOCK_ASSERT(sc);
1149 
1150 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1151 		return;
1152 	}
1153 
1154 	sc->rxcycles = count;
1155 	nfe_rxeof(sc);
1156 	nfe_txeof(sc);
1157 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1158 		nfe_start_locked(ifp);
1159 
1160 	if (cmd == POLL_AND_CHECK_STATUS) {
1161 		if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0) {
1162 			return;
1163 		}
1164 		NFE_WRITE(sc, NFE_IRQ_STATUS, r);
1165 
1166 		if (r & NFE_IRQ_LINK) {
1167 			NFE_READ(sc, NFE_PHY_STATUS);
1168 			NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1169 			DPRINTF(("nfe%d: link state changed\n", sc->nfe_unit));
1170 		}
1171 	}
1172 }
1173 #endif /* DEVICE_POLLING */
1174 
1175 
1176 static int
1177 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1178 {
1179 	struct nfe_softc *sc = ifp->if_softc;
1180 	struct ifreq *ifr = (struct ifreq *) data;
1181 	struct mii_data *mii;
1182 	int error = 0;
1183 
1184 	switch (cmd) {
1185 	case SIOCSIFMTU:
1186 		if (ifr->ifr_mtu < ETHERMIN ||
1187 		    ((sc->nfe_flags & NFE_USE_JUMBO) &&
1188 		    ifr->ifr_mtu > ETHERMTU_JUMBO) ||
1189 		    (!(sc->nfe_flags & NFE_USE_JUMBO) &&
1190 		    ifr->ifr_mtu > ETHERMTU)) {
1191 			error = EINVAL;
1192 		} else if (ifp->if_mtu != ifr->ifr_mtu) {
1193 			ifp->if_mtu = ifr->ifr_mtu;
1194 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1195 			nfe_init(sc);
1196 		}
1197 		break;
1198 	case SIOCSIFFLAGS:
1199 		NFE_LOCK(sc);
1200 		if (ifp->if_flags & IFF_UP) {
1201 			/*
1202 			 * If only the PROMISC or ALLMULTI flag changes, then
1203 			 * don't do a full re-init of the chip, just update
1204 			 * the Rx filter.
1205 			 */
1206 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1207 			    ((ifp->if_flags ^ sc->nfe_if_flags) &
1208 			     (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1209 				nfe_setmulti(sc);
1210 			else
1211 				nfe_init_locked(sc);
1212 		} else {
1213 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1214 				nfe_stop(ifp, 1);
1215 		}
1216 		sc->nfe_if_flags = ifp->if_flags;
1217 		NFE_UNLOCK(sc);
1218 		error = 0;
1219 		break;
1220 	case SIOCADDMULTI:
1221 	case SIOCDELMULTI:
1222 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1223 			NFE_LOCK(sc);
1224 			nfe_setmulti(sc);
1225 			NFE_UNLOCK(sc);
1226 			error = 0;
1227 		}
1228 		break;
1229 	case SIOCSIFMEDIA:
1230 	case SIOCGIFMEDIA:
1231 		mii = device_get_softc(sc->nfe_miibus);
1232 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1233 		break;
1234 	case SIOCSIFCAP:
1235 	{
1236 		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1237 #ifdef DEVICE_POLLING
1238 		if (mask & IFCAP_POLLING) {
1239 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1240 				error = ether_poll_register(nfe_poll, ifp);
1241 				if (error)
1242 					return(error);
1243 				NFE_LOCK(sc);
1244 				NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1245 				ifp->if_capenable |= IFCAP_POLLING;
1246 				NFE_UNLOCK(sc);
1247 			} else {
1248 				error = ether_poll_deregister(ifp);
1249 				/* Enable interrupt even in error case */
1250 				NFE_LOCK(sc);
1251 				NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1252 				ifp->if_capenable &= ~IFCAP_POLLING;
1253 				NFE_UNLOCK(sc);
1254 			}
1255 		}
1256 #endif /* DEVICE_POLLING */
1257 		if (mask & IFCAP_HWCSUM) {
1258 			ifp->if_capenable ^= IFCAP_HWCSUM;
1259 			if (IFCAP_HWCSUM & ifp->if_capenable &&
1260 			    IFCAP_HWCSUM & ifp->if_capabilities)
1261 				ifp->if_hwassist = NFE_CSUM_FEATURES;
1262 			else
1263 				ifp->if_hwassist = 0;
1264 		}
1265 	}
1266 		break;
1267 
1268 	default:
1269 		error = ether_ioctl(ifp, cmd, data);
1270 		break;
1271 	}
1272 
1273 	return error;
1274 }
1275 
1276 
1277 static void
1278 nfe_intr(void *arg)
1279 {
1280 	struct nfe_softc *sc = arg;
1281 	struct ifnet *ifp = sc->nfe_ifp;
1282 	u_int32_t r;
1283 
1284 	NFE_LOCK(sc);
1285 
1286 #ifdef DEVICE_POLLING
1287 	if (ifp->if_capenable & IFCAP_POLLING) {
1288 		NFE_UNLOCK(sc);
1289 		return;
1290 	}
1291 #endif
1292 
1293 	if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0) {
1294 		NFE_UNLOCK(sc);
1295 		return;	/* not for us */
1296 	}
1297 	NFE_WRITE(sc, NFE_IRQ_STATUS, r);
1298 
1299 	DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r));
1300 
1301 	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1302 
1303 	if (r & NFE_IRQ_LINK) {
1304 		NFE_READ(sc, NFE_PHY_STATUS);
1305 		NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1306 		DPRINTF(("nfe%d: link state changed\n", sc->nfe_unit));
1307 	}
1308 
1309 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1310 		/* check Rx ring */
1311 		nfe_rxeof(sc);
1312 		/* check Tx ring */
1313 		nfe_txeof(sc);
1314 	}
1315 
1316 	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1317 
1318 	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1319 	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1320 		nfe_start_locked(ifp);
1321 
1322 	NFE_UNLOCK(sc);
1323 
1324 	return;
1325 }
1326 
1327 
1328 static void
1329 nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
1330 {
1331 
1332 	bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops);
1333 }
1334 
1335 
1336 static void
1337 nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
1338 {
1339 
1340 	bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops);
1341 }
1342 
1343 
1344 static void
1345 nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops)
1346 {
1347 
1348 	bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops);
1349 }
1350 
1351 
1352 static void
1353 nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops)
1354 {
1355 
1356 	bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops);
1357 }
1358 
1359 
1360 static void
1361 nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
1362 {
1363 
1364 	bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, ops);
1365 }
1366 
1367 
1368 static void
1369 nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
1370 {
1371 
1372 	bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, ops);
1373 }
1374 
1375 
1376 static void
1377 nfe_rxeof(struct nfe_softc *sc)
1378 {
1379 	struct ifnet *ifp = sc->nfe_ifp;
1380 	struct nfe_desc32 *desc32=NULL;
1381 	struct nfe_desc64 *desc64=NULL;
1382 	struct nfe_rx_data *data;
1383 	struct nfe_jbuf *jbuf;
1384 	struct mbuf *m, *mnew;
1385 	bus_addr_t physaddr;
1386 	u_int16_t flags;
1387 	int error, len;
1388 #if NVLAN > 1
1389 	u_int16_t vlan_tag = 0;
1390 	int have_tag = 0;
1391 #endif
1392 
1393 	NFE_LOCK_ASSERT(sc);
1394 
1395 	for (;;) {
1396 
1397 #ifdef DEVICE_POLLING
1398 		if (ifp->if_capenable & IFCAP_POLLING) {
1399 			if (sc->rxcycles <= 0)
1400 				break;
1401 			sc->rxcycles--;
1402 		}
1403 #endif
1404 
1405 		data = &sc->rxq.data[sc->rxq.cur];
1406 
1407 		if (sc->nfe_flags & NFE_40BIT_ADDR) {
1408 			desc64 = &sc->rxq.desc64[sc->rxq.cur];
1409 			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
1410 
1411 			flags = letoh16(desc64->flags);
1412 			len = letoh16(desc64->length) & 0x3fff;
1413 
1414 #if NVLAN > 1
1415 			if (flags & NFE_TX_VLAN_TAG) {
1416 				have_tag = 1;
1417 				vlan_tag = desc64->vtag;
1418 			}
1419 #endif
1420 
1421 		} else {
1422 			desc32 = &sc->rxq.desc32[sc->rxq.cur];
1423 			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
1424 
1425 			flags = letoh16(desc32->flags);
1426 			len = letoh16(desc32->length) & 0x3fff;
1427 		}
1428 
1429 		if (flags & NFE_RX_READY)
1430 			break;
1431 
1432 		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
1433 			if (!(flags & NFE_RX_VALID_V1))
1434 				goto skip;
1435 			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
1436 				flags &= ~NFE_RX_ERROR;
1437 				len--;	/* fix buffer length */
1438 			}
1439 		} else {
1440 			if (!(flags & NFE_RX_VALID_V2))
1441 				goto skip;
1442 
1443 			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
1444 				flags &= ~NFE_RX_ERROR;
1445 				len--;	/* fix buffer length */
1446 			}
1447 		}
1448 
1449 		if (flags & NFE_RX_ERROR) {
1450 			ifp->if_ierrors++;
1451 			goto skip;
1452 		}
1453 
1454 		/*
1455 		 * Try to allocate a new mbuf for this ring element and load
1456 		 * it before processing the current mbuf. If the ring element
1457 		 * cannot be loaded, drop the received packet and reuse the
1458 		 * old mbuf. In the unlikely case that the old mbuf can't be
1459 		 * reloaded either, explicitly panic.
1460 		 */
1461 		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1462 		if (mnew == NULL) {
1463 			ifp->if_ierrors++;
1464 			goto skip;
1465 		}
1466 
1467 		if (sc->nfe_flags & NFE_USE_JUMBO) {
1468 			if ((jbuf = nfe_jalloc(sc)) == NULL) {
1469 				m_freem(mnew);
1470 				ifp->if_ierrors++;
1471 				goto skip;
1472 			}
1473 			mnew->m_data = (void *)jbuf->buf;
1474 			mnew->m_len = mnew->m_pkthdr.len = NFE_JBYTES;
1475 			MEXTADD(mnew, jbuf->buf, NFE_JBYTES, nfe_jfree,
1476 			    (struct nfe_softc *)sc, 0 , EXT_NET_DRV);
1477 
1478 			bus_dmamap_sync(sc->rxq.rx_jumbo_tag,
1479 			    sc->rxq.rx_jumbo_map, BUS_DMASYNC_POSTREAD);
1480 			physaddr = jbuf->physaddr;
1481 		} else {
1482 			MCLGET(mnew, M_DONTWAIT);
1483 			if (!(mnew->m_flags & M_EXT)) {
1484 				m_freem(mnew);
1485 				ifp->if_ierrors++;
1486 				goto skip;
1487 			}
1488 
1489 			bus_dmamap_sync(data->rx_data_tag, data->rx_data_map,
1490 			    BUS_DMASYNC_POSTREAD);
1491 			bus_dmamap_unload(data->rx_data_tag, data->rx_data_map);
1492 			error = bus_dmamap_load(data->rx_data_tag,
1493 			    data->rx_data_map, mtod(mnew, void *), MCLBYTES,
1494 			    nfe_dma_map_segs, &data->rx_data_segs,
1495 			    BUS_DMA_NOWAIT);
1496 			if (error != 0) {
1497 				m_freem(mnew);
1498 
1499 				/* try to reload the old mbuf */
1500 				error = bus_dmamap_load(data->rx_data_tag,
1501 				    data->rx_data_map, mtod(data->m, void *),
1502 				    MCLBYTES, nfe_dma_map_segs,
1503 				    &data->rx_data_segs, BUS_DMA_NOWAIT);
1504 				if (error != 0) {
1505 					/* very unlikely that it will fail.. */
1506 				      panic("nfe%d: could not load old rx mbuf",
1507 					    sc->nfe_unit);
1508 				}
1509 				ifp->if_ierrors++;
1510 				goto skip;
1511 			}
1512 			data->rx_data_addr = data->rx_data_segs.ds_addr;
1513 			physaddr = data->rx_data_addr;
1514 		}
1515 
1516 		/*
1517 		 * New mbuf successfully loaded, update Rx ring and continue
1518 		 * processing.
1519 		 */
1520 		m = data->m;
1521 		data->m = mnew;
1522 
1523 		/* finalize mbuf */
1524 		m->m_pkthdr.len = m->m_len = len;
1525 		m->m_pkthdr.rcvif = ifp;
1526 
1527 
1528 #if defined(NFE_CSUM)
1529 		if ((sc->nfe_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK)) {
1530 			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1531 			if (flags & NFE_RX_IP_CSUMOK_V2) {
1532 				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1533 			}
1534 			if (flags & NFE_RX_UDP_CSUMOK_V2 ||
1535 			    flags & NFE_RX_TCP_CSUMOK_V2) {
1536 				m->m_pkthdr.csum_flags |=
1537 				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1538 				m->m_pkthdr.csum_data = 0xffff;
1539 			}
1540 		}
1541 #endif
1542 
1543 #if NVLAN > 1
1544 		if (have_tag) {
1545 			m->m_pkthdr.ether_vtag = vlan_tag;
1546 			m->m_flags |= M_VLANTAG;
1547 		}
1548 #endif
1549 
1550 		ifp->if_ipackets++;
1551 
1552 		NFE_UNLOCK(sc);
1553 		(*ifp->if_input)(ifp, m);
1554 		NFE_LOCK(sc);
1555 
1556 		/* update mapping address in h/w descriptor */
1557 		if (sc->nfe_flags & NFE_40BIT_ADDR) {
1558 #if defined(__LP64__)
1559 			desc64->physaddr[0] = htole32(physaddr >> 32);
1560 #endif
1561 			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
1562 		} else {
1563 			desc32->physaddr = htole32(physaddr);
1564 		}
1565 
1566 skip:		if (sc->nfe_flags & NFE_40BIT_ADDR) {
1567 			desc64->length = htole16(sc->rxq.bufsz);
1568 			desc64->flags = htole16(NFE_RX_READY);
1569 
1570 			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
1571 		} else {
1572 			desc32->length = htole16(sc->rxq.bufsz);
1573 			desc32->flags = htole16(NFE_RX_READY);
1574 
1575 			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
1576 		}
1577 
1578 		sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT;
1579 	}
1580 }
1581 
1582 
1583 static void
1584 nfe_txeof(struct nfe_softc *sc)
1585 {
1586 	struct ifnet *ifp = sc->nfe_ifp;
1587 	struct nfe_desc32 *desc32;
1588 	struct nfe_desc64 *desc64;
1589 	struct nfe_tx_data *data = NULL;
1590 	u_int16_t flags;
1591 
1592 	NFE_LOCK_ASSERT(sc);
1593 
1594 	while (sc->txq.next != sc->txq.cur) {
1595 		if (sc->nfe_flags & NFE_40BIT_ADDR) {
1596 			desc64 = &sc->txq.desc64[sc->txq.next];
1597 			nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
1598 
1599 			flags = letoh16(desc64->flags);
1600 		} else {
1601 			desc32 = &sc->txq.desc32[sc->txq.next];
1602 			nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
1603 
1604 			flags = letoh16(desc32->flags);
1605 		}
1606 
1607 		if (flags & NFE_TX_VALID)
1608 			break;
1609 
1610 		data = &sc->txq.data[sc->txq.next];
1611 
1612 		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
1613 			if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
1614 				goto skip;
1615 
1616 			if ((flags & NFE_TX_ERROR_V1) != 0) {
1617 				printf("nfe%d: tx v1 error 0x%4b\n",
1618 				    sc->nfe_unit, flags, NFE_V1_TXERR);
1619 
1620 				ifp->if_oerrors++;
1621 			} else
1622 				ifp->if_opackets++;
1623 		} else {
1624 			if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
1625 				goto skip;
1626 
1627 			if ((flags & NFE_TX_ERROR_V2) != 0) {
1628 				printf("nfe%d: tx v1 error 0x%4b\n",
1629 				    sc->nfe_unit, flags, NFE_V2_TXERR);
1630 
1631 				ifp->if_oerrors++;
1632 			} else
1633 				ifp->if_opackets++;
1634 		}
1635 
1636 		if (data->m == NULL) {	/* should not get there */
1637 			printf("nfe%d: last fragment bit w/o associated mbuf!\n",
1638 			    sc->nfe_unit);
1639 			goto skip;
1640 		}
1641 
1642 		/* last fragment of the mbuf chain transmitted */
1643 		bus_dmamap_sync(sc->txq.tx_data_tag, data->active,
1644 		    BUS_DMASYNC_POSTWRITE);
1645 		bus_dmamap_unload(sc->txq.tx_data_tag, data->active);
1646 		m_freem(data->m);
1647 		data->m = NULL;
1648 
1649 		ifp->if_timer = 0;
1650 
1651 skip:		sc->txq.queued--;
1652 		sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT;
1653 	}
1654 
1655 	if (data != NULL) {	/* at least one slot freed */
1656 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1657 		nfe_start_locked(ifp);
1658 	}
1659 }
1660 
1661 
1662 static int
1663 nfe_encap(struct nfe_softc *sc, struct mbuf *m0)
1664 {
1665 	struct nfe_desc32 *desc32=NULL;
1666 	struct nfe_desc64 *desc64=NULL;
1667 	struct nfe_tx_data *data=NULL;
1668 	bus_dmamap_t map;
1669 	bus_dma_segment_t segs[NFE_MAX_SCATTER];
1670 	int error, i, nsegs;
1671 	u_int16_t flags = NFE_TX_VALID;
1672 
1673 	map = sc->txq.data[sc->txq.cur].tx_data_map;
1674 
1675 	error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, m0, segs,
1676 	    &nsegs, BUS_DMA_NOWAIT);
1677 
1678 	if (error != 0) {
1679 		printf("nfe%d: could not map mbuf (error %d)\n", sc->nfe_unit,
1680 		    error);
1681 		return error;
1682 	}
1683 
1684 	if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 1) {
1685 		bus_dmamap_unload(sc->txq.tx_data_tag, map);
1686 		return ENOBUFS;
1687 	}
1688 
1689 
1690 #ifdef NFE_CSUM
1691 	if (m0->m_pkthdr.csum_flags & CSUM_IP)
1692 		flags |= NFE_TX_IP_CSUM;
1693 	if (m0->m_pkthdr.csum_flags & CSUM_TCP)
1694 		flags |= NFE_TX_TCP_CSUM;
1695 	if (m0->m_pkthdr.csum_flags & CSUM_UDP)
1696 		flags |= NFE_TX_TCP_CSUM;
1697 #endif
1698 
1699 	for (i = 0; i < nsegs; i++) {
1700 		data = &sc->txq.data[sc->txq.cur];
1701 
1702 		if (sc->nfe_flags & NFE_40BIT_ADDR) {
1703 			desc64 = &sc->txq.desc64[sc->txq.cur];
1704 #if defined(__LP64__)
1705 			desc64->physaddr[0] = htole32(segs[i].ds_addr >> 32);
1706 #endif
1707 			desc64->physaddr[1] = htole32(segs[i].ds_addr &
1708 			    0xffffffff);
1709 			desc64->length = htole16(segs[i].ds_len - 1);
1710 			desc64->flags = htole16(flags);
1711 #if NVLAN > 0
1712 			if (m0->m_flags & M_VLANTAG)
1713 				desc64->vtag = htole32(NFE_TX_VTAG |
1714 				    m0->m_pkthdr.ether_vtag);
1715 #endif
1716 		} else {
1717 			desc32 = &sc->txq.desc32[sc->txq.cur];
1718 
1719 			desc32->physaddr = htole32(segs[i].ds_addr);
1720 			desc32->length = htole16(segs[i].ds_len - 1);
1721 			desc32->flags = htole16(flags);
1722 		}
1723 
1724 		/* csum flags and vtag belong to the first fragment only */
1725 		if (nsegs > 1) {
1726 			flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM);
1727 		}
1728 
1729 		sc->txq.queued++;
1730 		sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT;
1731 	}
1732 
1733 	/* the whole mbuf chain has been DMA mapped, fix last descriptor */
1734 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1735 		flags |= NFE_TX_LASTFRAG_V2;
1736 		desc64->flags = htole16(flags);
1737 	} else {
1738 		if (sc->nfe_flags & NFE_JUMBO_SUP)
1739 			flags |= NFE_TX_LASTFRAG_V2;
1740 		else
1741 			flags |= NFE_TX_LASTFRAG_V1;
1742 		desc32->flags = htole16(flags);
1743 	}
1744 
1745 	data->m = m0;
1746 	data->active = map;
1747 	data->nsegs = nsegs;
1748 
1749 	bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
1750 
1751 	return 0;
1752 }
1753 
1754 
1755 static void
1756 nfe_setmulti(struct nfe_softc *sc)
1757 {
1758 	struct ifnet *ifp = sc->nfe_ifp;
1759 	struct ifmultiaddr *ifma;
1760 	int i;
1761 	u_int32_t filter = NFE_RXFILTER_MAGIC;
1762 	u_int8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1763 	u_int8_t etherbroadcastaddr[ETHER_ADDR_LEN] = {
1764 		0xff, 0xff, 0xff, 0xff, 0xff, 0xff
1765 	};
1766 
1767 	NFE_LOCK_ASSERT(sc);
1768 
1769 	if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1770 		bzero(addr, ETHER_ADDR_LEN);
1771 		bzero(mask, ETHER_ADDR_LEN);
1772 		goto done;
1773 	}
1774 
1775 	bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
1776 	bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
1777 
1778 	IF_ADDR_LOCK(ifp);
1779 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1780 		u_char *addrp;
1781 
1782 		if (ifma->ifma_addr->sa_family != AF_LINK)
1783 			continue;
1784 
1785 		addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
1786 		for (i = 0; i < ETHER_ADDR_LEN; i++) {
1787 			u_int8_t mcaddr = addrp[i];
1788 			addr[i] &= mcaddr;
1789 			mask[i] &= ~mcaddr;
1790 		}
1791 	}
1792 	IF_ADDR_UNLOCK(ifp);
1793 
1794 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
1795 		mask[i] |= addr[i];
1796 	}
1797 
1798 done:
1799 	addr[0] |= 0x01;	/* make sure multicast bit is set */
1800 
1801 	NFE_WRITE(sc, NFE_MULTIADDR_HI,
1802 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1803 	NFE_WRITE(sc, NFE_MULTIADDR_LO,
1804 	    addr[5] <<  8 | addr[4]);
1805 	NFE_WRITE(sc, NFE_MULTIMASK_HI,
1806 	    mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1807 	NFE_WRITE(sc, NFE_MULTIMASK_LO,
1808 	    mask[5] <<  8 | mask[4]);
1809 
1810 	filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
1811 	NFE_WRITE(sc, NFE_RXFILTER, filter);
1812 }
1813 
1814 
1815 static void
1816 nfe_start(struct ifnet *ifp)
1817 {
1818 	struct nfe_softc *sc;
1819 
1820 	sc = ifp->if_softc;
1821 	NFE_LOCK(sc);
1822 	nfe_start_locked(ifp);
1823 	NFE_UNLOCK(sc);
1824 }
1825 
1826 
1827 static void
1828 nfe_start_locked(struct ifnet *ifp)
1829 {
1830 	struct nfe_softc *sc = ifp->if_softc;
1831 	struct mbuf *m0;
1832 	int old = sc->txq.cur;
1833 
1834 	if (!sc->nfe_link || ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1835 		return;
1836 	}
1837 
1838 	for (;;) {
1839 		IFQ_POLL(&ifp->if_snd, m0);
1840 		if (m0 == NULL)
1841 			break;
1842 
1843 		if (nfe_encap(sc, m0) != 0) {
1844 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1845 			break;
1846 		}
1847 
1848 		/* packet put in h/w queue, remove from s/w queue */
1849 		IFQ_DEQUEUE(&ifp->if_snd, m0);
1850 
1851 		BPF_MTAP(ifp, m0);
1852 	}
1853 	if (sc->txq.cur == old)	{ /* nothing sent */
1854 		return;
1855 	}
1856 
1857 	if (sc->nfe_flags & NFE_40BIT_ADDR)
1858 		nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE);
1859 	else
1860 		nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE);
1861 
1862 	/* kick Tx */
1863 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
1864 
1865 	/*
1866 	 * Set a timeout in case the chip goes out to lunch.
1867 	 */
1868 	ifp->if_timer = 5;
1869 
1870 	return;
1871 }
1872 
1873 
1874 static void
1875 nfe_watchdog(struct ifnet *ifp)
1876 {
1877 	struct nfe_softc *sc = ifp->if_softc;
1878 
1879 	printf("nfe%d: watchdog timeout\n", sc->nfe_unit);
1880 
1881 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1882 	nfe_init(sc);
1883 	ifp->if_oerrors++;
1884 
1885 	return;
1886 }
1887 
1888 
1889 static void
1890 nfe_init(void *xsc)
1891 {
1892 	struct nfe_softc *sc = xsc;
1893 
1894 	NFE_LOCK(sc);
1895 	nfe_init_locked(sc);
1896 	NFE_UNLOCK(sc);
1897 
1898 	return;
1899 }
1900 
1901 
1902 static void
1903 nfe_init_locked(void *xsc)
1904 {
1905 	struct nfe_softc *sc = xsc;
1906 	struct ifnet *ifp = sc->nfe_ifp;
1907 	struct mii_data *mii;
1908 	u_int32_t tmp;
1909 
1910 	NFE_LOCK_ASSERT(sc);
1911 
1912 	mii = device_get_softc(sc->nfe_miibus);
1913 
1914 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1915 		return;
1916 	}
1917 
1918 	nfe_stop(ifp, 0);
1919 
1920 	NFE_WRITE(sc, NFE_TX_UNK, 0);
1921 	NFE_WRITE(sc, NFE_STATUS, 0);
1922 
1923 	sc->rxtxctl = NFE_RXTX_BIT2;
1924 	if (sc->nfe_flags & NFE_40BIT_ADDR)
1925 		sc->rxtxctl |= NFE_RXTX_V3MAGIC;
1926 	else if (sc->nfe_flags & NFE_JUMBO_SUP)
1927 		sc->rxtxctl |= NFE_RXTX_V2MAGIC;
1928 #ifdef NFE_CSUM
1929 	if (sc->nfe_flags & NFE_HW_CSUM)
1930 		sc->rxtxctl |= NFE_RXTX_RXCSUM;
1931 #endif
1932 
1933 #if NVLAN > 0
1934 	/*
1935 	 * Although the adapter is capable of stripping VLAN tags from received
1936 	 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on
1937 	 * purpose.  This will be done in software by our network stack.
1938 	 */
1939 	if (sc->nfe_flags & NFE_HW_VLAN)
1940 		sc->rxtxctl |= NFE_RXTX_VTAG_INSERT;
1941 #endif
1942 
1943 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
1944 	DELAY(10);
1945 	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1946 
1947 #if NVLAN
1948 	if (sc->nfe_flags & NFE_HW_VLAN)
1949 		NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
1950 #endif
1951 
1952 	NFE_WRITE(sc, NFE_SETUP_R6, 0);
1953 
1954 	/* set MAC address */
1955 	nfe_set_macaddr(sc, sc->eaddr);
1956 
1957 	/* tell MAC where rings are in memory */
1958 #ifdef __LP64__
1959 	NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32);
1960 #endif
1961 	NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff);
1962 #ifdef __LP64__
1963 	NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32);
1964 #endif
1965 	NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff);
1966 
1967 	NFE_WRITE(sc, NFE_RING_SIZE,
1968 	    (NFE_RX_RING_COUNT - 1) << 16 |
1969 	    (NFE_TX_RING_COUNT - 1));
1970 
1971 	NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1972 
1973 	/* force MAC to wakeup */
1974 	tmp = NFE_READ(sc, NFE_PWR_STATE);
1975 	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1976 	DELAY(10);
1977 	tmp = NFE_READ(sc, NFE_PWR_STATE);
1978 	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1979 
1980 #if 1
1981 	/* configure interrupts coalescing/mitigation */
1982 	NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
1983 #else
1984 	/* no interrupt mitigation: one interrupt per packet */
1985 	NFE_WRITE(sc, NFE_IMTIMER, 970);
1986 #endif
1987 
1988 	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1989 	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1990 	NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
1991 
1992 	/* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1993 	NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
1994 
1995 	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1996 	NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
1997 
1998 	sc->rxtxctl &= ~NFE_RXTX_BIT2;
1999 	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2000 	DELAY(10);
2001 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
2002 
2003 	/* set Rx filter */
2004 	nfe_setmulti(sc);
2005 
2006 	nfe_ifmedia_upd(ifp);
2007 
2008 	nfe_tick_locked(sc);
2009 
2010 	/* enable Rx */
2011 	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
2012 
2013 	/* enable Tx */
2014 	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
2015 
2016 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
2017 
2018 #ifdef DEVICE_POLLING
2019 	if (ifp->if_capenable & IFCAP_POLLING)
2020 		NFE_WRITE(sc, NFE_IRQ_MASK, 0);
2021 	else
2022 #endif
2023 	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); /* enable interrupts */
2024 
2025 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2026 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2027 
2028 	sc->nfe_link = 0;
2029 
2030 	return;
2031 }
2032 
2033 
2034 static void
2035 nfe_stop(struct ifnet *ifp, int disable)
2036 {
2037 	struct nfe_softc *sc = ifp->if_softc;
2038 	struct mii_data  *mii;
2039 
2040 	NFE_LOCK_ASSERT(sc);
2041 
2042 	ifp->if_timer = 0;
2043 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2044 
2045 	mii = device_get_softc(sc->nfe_miibus);
2046 
2047 	callout_stop(&sc->nfe_stat_ch);
2048 
2049 	/* abort Tx */
2050 	NFE_WRITE(sc, NFE_TX_CTL, 0);
2051 
2052 	/* disable Rx */
2053 	NFE_WRITE(sc, NFE_RX_CTL, 0);
2054 
2055 	/* disable interrupts */
2056 	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
2057 
2058 	sc->nfe_link = 0;
2059 
2060 	/* reset Tx and Rx rings */
2061 	nfe_reset_tx_ring(sc, &sc->txq);
2062 	nfe_reset_rx_ring(sc, &sc->rxq);
2063 
2064 	return;
2065 }
2066 
2067 
2068 static int
2069 nfe_ifmedia_upd(struct ifnet *ifp)
2070 {
2071 	struct nfe_softc *sc = ifp->if_softc;
2072 
2073 	NFE_LOCK(sc);
2074 	nfe_ifmedia_upd_locked(ifp);
2075 	NFE_UNLOCK(sc);
2076 	return (0);
2077 }
2078 
2079 
2080 static int
2081 nfe_ifmedia_upd_locked(struct ifnet *ifp)
2082 {
2083 	struct nfe_softc *sc = ifp->if_softc;
2084 	struct mii_data *mii;
2085 
2086 	NFE_LOCK_ASSERT(sc);
2087 
2088 	mii = device_get_softc(sc->nfe_miibus);
2089 
2090 	if (mii->mii_instance) {
2091 		struct mii_softc *miisc;
2092 		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
2093 		    miisc = LIST_NEXT(miisc, mii_list)) {
2094 			mii_phy_reset(miisc);
2095 		}
2096 	}
2097 	mii_mediachg(mii);
2098 
2099 	return (0);
2100 }
2101 
2102 
2103 static void
2104 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2105 {
2106 	struct nfe_softc *sc;
2107 	struct mii_data *mii;
2108 
2109 	sc = ifp->if_softc;
2110 
2111 	NFE_LOCK(sc);
2112 	mii = device_get_softc(sc->nfe_miibus);
2113 	mii_pollstat(mii);
2114 	NFE_UNLOCK(sc);
2115 
2116 	ifmr->ifm_active = mii->mii_media_active;
2117 	ifmr->ifm_status = mii->mii_media_status;
2118 
2119 	return;
2120 }
2121 
2122 
2123 static void
2124 nfe_tick(void *xsc)
2125 {
2126 	struct nfe_softc *sc;
2127 
2128 	sc = xsc;
2129 
2130 	NFE_LOCK(sc);
2131 	nfe_tick_locked(sc);
2132 	NFE_UNLOCK(sc);
2133 }
2134 
2135 
2136 void
2137 nfe_tick_locked(struct nfe_softc *arg)
2138 {
2139 	struct nfe_softc *sc;
2140 	struct mii_data *mii;
2141 	struct ifnet *ifp;
2142 
2143 	sc = arg;
2144 
2145 	NFE_LOCK_ASSERT(sc);
2146 
2147 	ifp = sc->nfe_ifp;
2148 
2149 	mii = device_get_softc(sc->nfe_miibus);
2150 	mii_tick(mii);
2151 
2152 	if (!sc->nfe_link) {
2153 		if (mii->mii_media_status & IFM_ACTIVE &&
2154 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2155 			sc->nfe_link++;
2156 			if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T
2157 			    && bootverbose)
2158 				if_printf(sc->nfe_ifp, "gigabit link up\n");
2159 					if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2160 						nfe_start_locked(ifp);
2161 		}
2162 	}
2163 	callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2164 
2165 	return;
2166 }
2167 
2168 
2169 static void
2170 nfe_shutdown(device_t dev)
2171 {
2172 	struct nfe_softc *sc;
2173 	struct ifnet *ifp;
2174 
2175 	sc = device_get_softc(dev);
2176 
2177 	NFE_LOCK(sc);
2178 	ifp = sc->nfe_ifp;
2179 	nfe_stop(ifp,0);
2180 	/* nfe_reset(sc); */
2181 	NFE_UNLOCK(sc);
2182 
2183 	return;
2184 }
2185 
2186 
2187 static void
2188 nfe_get_macaddr(struct nfe_softc *sc, u_char *addr)
2189 {
2190 	uint32_t tmp;
2191 
2192 	tmp = NFE_READ(sc, NFE_MACADDR_LO);
2193 	addr[0] = (tmp >> 8) & 0xff;
2194 	addr[1] = (tmp & 0xff);
2195 
2196 	tmp = NFE_READ(sc, NFE_MACADDR_HI);
2197 	addr[2] = (tmp >> 24) & 0xff;
2198 	addr[3] = (tmp >> 16) & 0xff;
2199 	addr[4] = (tmp >>  8) & 0xff;
2200 	addr[5] = (tmp & 0xff);
2201 }
2202 
2203 
2204 static void
2205 nfe_set_macaddr(struct nfe_softc *sc, u_char *addr)
2206 {
2207 
2208 	NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] <<  8 | addr[4]);
2209 	NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
2210 	    addr[1] << 8 | addr[0]);
2211 }
2212 
2213 
2214 /*
2215  * Map a single buffer address.
2216  */
2217 
2218 static void
2219 nfe_dma_map_segs(arg, segs, nseg, error)
2220 	void *arg;
2221 	bus_dma_segment_t *segs;
2222 	int error, nseg;
2223 {
2224 
2225 	if (error)
2226 		return;
2227 
2228 	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
2229 
2230 	*(bus_dma_segment_t *)arg = *segs;
2231 
2232 	return;
2233 }
2234