xref: /freebsd/sys/dev/nfe/if_nfe.c (revision f3bb407b7c1b3faa88d0580541f01a8e6fb6cc68)
1 /*	$OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $	*/
2 
3 /*-
4  * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
5  * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
6  * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
22 
23 #include <sys/cdefs.h>
24 __FBSDID("$FreeBSD$");
25 
26 /* Uncomment the following line to enable polling. */
27 /* #define	DEVICE_POLLING */
28 
29 #define	NFE_JUMBO
30 #define	NFE_CSUM
31 #define	NVLAN 0
32 
33 #ifdef HAVE_KERNEL_OPTION_HEADERS
34 #include "opt_device_polling.h"
35 #endif
36 
37 #include <sys/param.h>
38 #include <sys/endian.h>
39 #include <sys/systm.h>
40 #include <sys/sockio.h>
41 #include <sys/mbuf.h>
42 #include <sys/malloc.h>
43 #include <sys/module.h>
44 #include <sys/kernel.h>
45 #include <sys/socket.h>
46 #include <sys/taskqueue.h>
47 
48 #include <net/if.h>
49 #include <net/if_arp.h>
50 #include <net/ethernet.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/if_types.h>
54 #include <net/if_vlan_var.h>
55 
56 #include <net/bpf.h>
57 
58 #include <machine/bus.h>
59 #include <machine/resource.h>
60 #include <sys/bus.h>
61 #include <sys/rman.h>
62 
63 #include <dev/mii/mii.h>
64 #include <dev/mii/miivar.h>
65 
66 #include <dev/pci/pcireg.h>
67 #include <dev/pci/pcivar.h>
68 
69 #include <dev/nfe/if_nfereg.h>
70 #include <dev/nfe/if_nfevar.h>
71 
72 MODULE_DEPEND(nfe, pci, 1, 1, 1);
73 MODULE_DEPEND(nfe, ether, 1, 1, 1);
74 MODULE_DEPEND(nfe, miibus, 1, 1, 1);
75 #include "miibus_if.h"
76 
77 static int  nfe_probe(device_t);
78 static int  nfe_attach(device_t);
79 static int  nfe_detach(device_t);
80 static void nfe_shutdown(device_t);
81 static int  nfe_miibus_readreg(device_t, int, int);
82 static int  nfe_miibus_writereg(device_t, int, int, int);
83 static void nfe_miibus_statchg(device_t);
84 static int  nfe_ioctl(struct ifnet *, u_long, caddr_t);
85 static void nfe_intr(void *);
86 static void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
87 static void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
88 static void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int);
89 static void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int);
90 static void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
91 static void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
92 static void nfe_rxeof(struct nfe_softc *);
93 static void nfe_txeof(struct nfe_softc *);
94 static int  nfe_encap(struct nfe_softc *, struct mbuf *);
95 static void nfe_setmulti(struct nfe_softc *);
96 static void nfe_start(struct ifnet *);
97 static void nfe_start_locked(struct ifnet *);
98 static void nfe_watchdog(struct ifnet *);
99 static void nfe_init(void *);
100 static void nfe_init_locked(void *);
101 static void nfe_stop(struct ifnet *, int);
102 static int  nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
103 static void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
104 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
105 static int  nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
106 static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
107 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
108 static int  nfe_ifmedia_upd(struct ifnet *);
109 static int  nfe_ifmedia_upd_locked(struct ifnet *);
110 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
111 static void nfe_tick(void *);
112 static void nfe_tick_locked(struct nfe_softc *);
113 static void nfe_get_macaddr(struct nfe_softc *, u_char *);
114 static void nfe_set_macaddr(struct nfe_softc *, u_char *);
115 static void nfe_dma_map_segs	(void *, bus_dma_segment_t *, int, int);
116 #ifdef DEVICE_POLLING
117 static void nfe_poll_locked(struct ifnet *, enum poll_cmd, int);
118 #endif
119 
120 #ifdef NFE_DEBUG
121 int nfedebug = 0;
122 #define	DPRINTF(x)	do { if (nfedebug) printf x; } while (0)
123 #define	DPRINTFN(n,x)	do { if (nfedebug >= (n)) printf x; } while (0)
124 #else
125 #define	DPRINTF(x)
126 #define	DPRINTFN(n,x)
127 #endif
128 
129 #define	NFE_LOCK(_sc)		mtx_lock(&(_sc)->nfe_mtx)
130 #define	NFE_UNLOCK(_sc)		mtx_unlock(&(_sc)->nfe_mtx)
131 #define	NFE_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
132 
133 #define	letoh16(x) le16toh(x)
134 
135 #define	NV_RID		0x10
136 
137 static device_method_t nfe_methods[] = {
138 	/* Device interface */
139 	DEVMETHOD(device_probe,		nfe_probe),
140 	DEVMETHOD(device_attach,	nfe_attach),
141 	DEVMETHOD(device_detach,	nfe_detach),
142 	DEVMETHOD(device_shutdown,	nfe_shutdown),
143 
144 	/* bus interface */
145 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
146 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
147 
148 	/* MII interface */
149 	DEVMETHOD(miibus_readreg,	nfe_miibus_readreg),
150 	DEVMETHOD(miibus_writereg,	nfe_miibus_writereg),
151 	DEVMETHOD(miibus_statchg,	nfe_miibus_statchg),
152 
153 	{ 0, 0 }
154 };
155 
156 static driver_t nfe_driver = {
157 	"nfe",
158 	nfe_methods,
159 	sizeof(struct nfe_softc)
160 };
161 
162 static devclass_t nfe_devclass;
163 
164 DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0);
165 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
166 
167 static struct nfe_type nfe_devs[] = {
168 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
169 	    "NVIDIA nForce MCP Networking Adapter"},
170 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
171 	    "NVIDIA nForce2 MCP2 Networking Adapter"},
172 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
173 	    "NVIDIA nForce2 400 MCP4 Networking Adapter"},
174 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
175 	    "NVIDIA nForce2 400 MCP5 Networking Adapter"},
176 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
177 	    "NVIDIA nForce3 MCP3 Networking Adapter"},
178 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
179 	    "NVIDIA nForce3 250 MCP6 Networking Adapter"},
180 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
181 	    "NVIDIA nForce3 MCP7 Networking Adapter"},
182 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
183 	    "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
184 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
185 	    "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
186 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
187 	    "NVIDIA nForce MCP04 Networking Adapter"},		// MCP10
188 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
189 	    "NVIDIA nForce MCP04 Networking Adapter"},		// MCP11
190 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
191 	    "NVIDIA nForce 430 MCP12 Networking Adapter"},
192 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
193 	    "NVIDIA nForce 430 MCP13 Networking Adapter"},
194 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
195 	    "NVIDIA nForce MCP55 Networking Adapter"},
196 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
197 	    "NVIDIA nForce MCP55 Networking Adapter"},
198 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
199 	    "NVIDIA nForce MCP61 Networking Adapter"},
200 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
201 	    "NVIDIA nForce MCP61 Networking Adapter"},
202 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
203 	    "NVIDIA nForce MCP61 Networking Adapter"},
204 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
205 	    "NVIDIA nForce MCP61 Networking Adapter"},
206 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
207 	    "NVIDIA nForce MCP65 Networking Adapter"},
208 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
209 	    "NVIDIA nForce MCP65 Networking Adapter"},
210 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
211 	    "NVIDIA nForce MCP65 Networking Adapter"},
212 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
213 	    "NVIDIA nForce MCP65 Networking Adapter"},
214 	{0, 0, NULL}
215 };
216 
217 
218 /* Probe for supported hardware ID's */
219 static int
220 nfe_probe(device_t dev)
221 {
222 	struct nfe_type *t;
223 
224 	t = nfe_devs;
225 	/* Check for matching PCI DEVICE ID's */
226 	while (t->name != NULL) {
227 		if ((pci_get_vendor(dev) == t->vid_id) &&
228 		    (pci_get_device(dev) == t->dev_id)) {
229 			device_set_desc(dev, t->name);
230 			return (0);
231 		}
232 		t++;
233 	}
234 
235 	return (ENXIO);
236 }
237 
238 
239 static int
240 nfe_attach(device_t dev)
241 {
242 	struct nfe_softc *sc;
243 	struct ifnet *ifp;
244 	int unit, error = 0, rid;
245 
246 	sc = device_get_softc(dev);
247 	unit = device_get_unit(dev);
248 	sc->nfe_dev = dev;
249 	sc->nfe_unit = unit;
250 
251 	mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
252 	    MTX_DEF | MTX_RECURSE);
253 	callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
254 
255 	pci_enable_busmaster(dev);
256 
257 	rid = NV_RID;
258 	sc->nfe_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
259 	    0, ~0, 1, RF_ACTIVE);
260 
261 	if (sc->nfe_res == NULL) {
262 		printf ("nfe%d: couldn't map ports/memory\n", unit);
263 		error = ENXIO;
264 		goto fail;
265 	}
266 
267 	sc->nfe_memt = rman_get_bustag(sc->nfe_res);
268 	sc->nfe_memh = rman_get_bushandle(sc->nfe_res);
269 
270 	/* Allocate interrupt */
271 	rid = 0;
272 	sc->nfe_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid,
273 	    0, ~0, 1, RF_SHAREABLE | RF_ACTIVE);
274 
275 	if (sc->nfe_irq == NULL) {
276 		printf("nfe%d: couldn't map interrupt\n", unit);
277 		error = ENXIO;
278 		goto fail;
279 	}
280 
281 	nfe_get_macaddr(sc, sc->eaddr);
282 
283 	sc->nfe_flags = 0;
284 
285 	switch (pci_get_device(dev)) {
286 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
287 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
288 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
289 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
290 		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
291 		break;
292 	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
293 	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
294 		sc->nfe_flags |= NFE_40BIT_ADDR;
295 		break;
296 	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
297 	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
298 	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
299 	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
300 		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
301 		break;
302 	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
303 	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
304 		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
305 		    NFE_HW_VLAN;
306 		break;
307 	case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
308 	case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
309 	case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
310 	case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
311 		sc->nfe_flags |= NFE_40BIT_ADDR;
312 		break;
313 	case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
314 	case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
315 	case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
316 	case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
317 		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
318 		break;
319 	}
320 
321 	/*
322 	 * Allocate the parent bus DMA tag appropriate for PCI.
323 	 */
324 #define	NFE_NSEG_NEW 32
325 	error = bus_dma_tag_create(NULL,	/* parent */
326 	    1, 0,				/* alignment, boundary */
327 	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
328 	    BUS_SPACE_MAXADDR,			/* highaddr */
329 	    NULL, NULL,				/* filter, filterarg */
330 	    MAXBSIZE, NFE_NSEG_NEW,		/* maxsize, nsegments */
331 	    BUS_SPACE_MAXSIZE_32BIT,		/* maxsegsize */
332 	    BUS_DMA_ALLOCNOW,			/* flags */
333 	    NULL, NULL,				/* lockfunc, lockarg */
334 	    &sc->nfe_parent_tag);
335 	if (error)
336 		goto fail;
337 
338 	ifp = sc->nfe_ifp = if_alloc(IFT_ETHER);
339 	if (ifp == NULL) {
340 		printf("nfe%d: can not if_alloc()\n", unit);
341 		error = ENOSPC;
342 		goto fail;
343 	}
344 	sc->nfe_mtu = ifp->if_mtu = ETHERMTU;
345 
346 	/*
347 	 * Allocate Tx and Rx rings.
348 	 */
349 	if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
350 		printf("nfe%d: could not allocate Tx ring\n", unit);
351 		error = ENXIO;
352 		goto fail;
353 	}
354 
355 	if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
356 		printf("nfe%d: could not allocate Rx ring\n", unit);
357 		nfe_free_tx_ring(sc, &sc->txq);
358 		error = ENXIO;
359 		goto fail;
360 	}
361 
362 	ifp->if_softc = sc;
363 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
364 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
365 	ifp->if_ioctl = nfe_ioctl;
366 	ifp->if_start = nfe_start;
367 	/* ifp->if_hwassist = NFE_CSUM_FEATURES; */
368 	ifp->if_watchdog = nfe_watchdog;
369 	ifp->if_init = nfe_init;
370 	ifp->if_baudrate = IF_Gbps(1);
371 	ifp->if_snd.ifq_maxlen = NFE_IFQ_MAXLEN;
372 
373 	ifp->if_capabilities = IFCAP_VLAN_MTU;
374 
375 #ifdef NFE_JUMBO
376 	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
377 #else
378 	ifp->if_capabilities &= ~IFCAP_JUMBO_MTU;
379 	sc->nfe_flags &= ~NFE_JUMBO_SUP;
380 #endif
381 
382 #if NVLAN > 0
383 	if (sc->nfe_flags & NFE_HW_VLAN)
384 		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
385 #endif
386 #ifdef NFE_CSUM
387 	if (sc->nfe_flags & NFE_HW_CSUM) {
388 		ifp->if_capabilities |= IFCAP_HWCSUM;
389 		ifp->if_capenable |= IFCAP_HWCSUM;
390 		ifp->if_hwassist = NFE_CSUM_FEATURES;
391 	}
392 #else
393 	sc->nfe_flags &= ~NFE_HW_CSUM;
394 #endif
395 	ifp->if_capenable = ifp->if_capabilities;
396 
397 #ifdef DEVICE_POLLING
398 	ifp->if_capabilities |= IFCAP_POLLING;
399 #endif
400 
401 	/* Do MII setup */
402 	if (mii_phy_probe(dev, &sc->nfe_miibus, nfe_ifmedia_upd,
403 	    nfe_ifmedia_sts)) {
404 		printf("nfe%d: MII without any phy!\n", unit);
405 		error = ENXIO;
406 		goto fail;
407 	}
408 
409 	ether_ifattach(ifp, sc->eaddr);
410 
411 	error = bus_setup_intr(dev, sc->nfe_irq, INTR_TYPE_NET | INTR_MPSAFE,
412 	    NULL, nfe_intr, sc, &sc->nfe_intrhand);
413 
414 	if (error) {
415 		printf("nfe%d: couldn't set up irq\n", unit);
416 		ether_ifdetach(ifp);
417 		goto fail;
418 	}
419 
420 fail:
421 	if (error)
422 		nfe_detach(dev);
423 
424 	return (error);
425 }
426 
427 
428 static int
429 nfe_detach(device_t dev)
430 {
431 	struct nfe_softc *sc;
432 	struct ifnet *ifp;
433 	u_char eaddr[ETHER_ADDR_LEN];
434 	int i;
435 
436 	sc = device_get_softc(dev);
437 	KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
438 	ifp = sc->nfe_ifp;
439 
440 #ifdef DEVICE_POLLING
441 	if (ifp->if_capenable & IFCAP_POLLING)
442 		ether_poll_deregister(ifp);
443 #endif
444 
445 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
446 		eaddr[i] = sc->eaddr[5 - i];
447 	}
448 	nfe_set_macaddr(sc, eaddr);
449 
450 	if (device_is_attached(dev)) {
451 		NFE_LOCK(sc);
452 		nfe_stop(ifp, 1);
453 		ifp->if_flags &= ~IFF_UP;
454 		NFE_UNLOCK(sc);
455 		callout_drain(&sc->nfe_stat_ch);
456 		ether_ifdetach(ifp);
457 	}
458 
459 	if (ifp)
460 		if_free(ifp);
461 	if (sc->nfe_miibus)
462 		device_delete_child(dev, sc->nfe_miibus);
463 	bus_generic_detach(dev);
464 
465 	if (sc->nfe_intrhand)
466 		bus_teardown_intr(dev, sc->nfe_irq, sc->nfe_intrhand);
467 	if (sc->nfe_irq)
468 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nfe_irq);
469 	if (sc->nfe_res)
470 		bus_release_resource(dev, SYS_RES_MEMORY, NV_RID, sc->nfe_res);
471 
472 	nfe_free_tx_ring(sc, &sc->txq);
473 	nfe_free_rx_ring(sc, &sc->rxq);
474 
475 	if (sc->nfe_parent_tag)
476 		bus_dma_tag_destroy(sc->nfe_parent_tag);
477 
478 	mtx_destroy(&sc->nfe_mtx);
479 
480 	return (0);
481 }
482 
483 
484 static void
485 nfe_miibus_statchg(device_t dev)
486 {
487 	struct nfe_softc *sc;
488 	struct mii_data *mii;
489 	u_int32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
490 
491 	sc = device_get_softc(dev);
492 	mii = device_get_softc(sc->nfe_miibus);
493 
494 	phy = NFE_READ(sc, NFE_PHY_IFACE);
495 	phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
496 
497 	seed = NFE_READ(sc, NFE_RNDSEED);
498 	seed &= ~NFE_SEED_MASK;
499 
500 	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
501 		phy  |= NFE_PHY_HDX;	/* half-duplex */
502 		misc |= NFE_MISC1_HDX;
503 	}
504 
505 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
506 	case IFM_1000_T:	/* full-duplex only */
507 		link |= NFE_MEDIA_1000T;
508 		seed |= NFE_SEED_1000T;
509 		phy  |= NFE_PHY_1000T;
510 		break;
511 	case IFM_100_TX:
512 		link |= NFE_MEDIA_100TX;
513 		seed |= NFE_SEED_100TX;
514 		phy  |= NFE_PHY_100TX;
515 		break;
516 	case IFM_10_T:
517 		link |= NFE_MEDIA_10T;
518 		seed |= NFE_SEED_10T;
519 		break;
520 	}
521 
522 	NFE_WRITE(sc, NFE_RNDSEED, seed);	/* XXX: gigabit NICs only? */
523 
524 	NFE_WRITE(sc, NFE_PHY_IFACE, phy);
525 	NFE_WRITE(sc, NFE_MISC1, misc);
526 	NFE_WRITE(sc, NFE_LINKSPEED, link);
527 }
528 
529 
530 static int
531 nfe_miibus_readreg(device_t dev, int phy, int reg)
532 {
533 	struct nfe_softc *sc = device_get_softc(dev);
534 	u_int32_t val;
535 	int ntries;
536 
537 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
538 
539 	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
540 		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
541 		DELAY(100);
542 	}
543 
544 	NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
545 
546 	for (ntries = 0; ntries < 1000; ntries++) {
547 		DELAY(100);
548 		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
549 			break;
550 	}
551 	if (ntries == 1000) {
552 		DPRINTFN(2, ("nfe%d: timeout waiting for PHY\n", sc->nfe_unit));
553 		return 0;
554 	}
555 
556 	if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
557 		DPRINTFN(2, ("nfe%d: could not read PHY\n", sc->nfe_unit));
558 		return 0;
559 	}
560 
561 	val = NFE_READ(sc, NFE_PHY_DATA);
562 	if (val != 0xffffffff && val != 0)
563 		sc->mii_phyaddr = phy;
564 
565 	DPRINTFN(2, ("nfe%d: mii read phy %d reg 0x%x ret 0x%x\n",
566 	    sc->nfe_unit, phy, reg, val));
567 
568 	return val;
569 }
570 
571 
572 static int
573 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
574 {
575 	struct nfe_softc *sc = device_get_softc(dev);
576 	u_int32_t ctl;
577 	int ntries;
578 
579 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
580 
581 	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
582 		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
583 		DELAY(100);
584 	}
585 
586 	NFE_WRITE(sc, NFE_PHY_DATA, val);
587 	ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
588 	NFE_WRITE(sc, NFE_PHY_CTL, ctl);
589 
590 	for (ntries = 0; ntries < 1000; ntries++) {
591 		DELAY(100);
592 		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
593 			break;
594 	}
595 #ifdef NFE_DEBUG
596 	if (nfedebug >= 2 && ntries == 1000)
597 		printf("could not write to PHY\n");
598 #endif
599 	return 0;
600 }
601 
602 
603 static int
604 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
605 {
606 	struct nfe_desc32 *desc32;
607 	struct nfe_desc64 *desc64;
608 	struct nfe_rx_data *data;
609 	void **desc;
610 	bus_addr_t physaddr;
611 	int i, error, descsize;
612 
613 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
614 		desc = (void **)&ring->desc64;
615 		descsize = sizeof (struct nfe_desc64);
616 	} else {
617 		desc = (void **)&ring->desc32;
618 		descsize = sizeof (struct nfe_desc32);
619 	}
620 
621 	ring->cur = ring->next = 0;
622 	ring->bufsz = (sc->nfe_mtu + NFE_RX_HEADERS <= MCLBYTES) ?
623 	    MCLBYTES : MJUM9BYTES;
624 
625 	error = bus_dma_tag_create(sc->nfe_parent_tag,
626 	   PAGE_SIZE, 0,			/* alignment, boundary */
627 	   BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
628 	   BUS_SPACE_MAXADDR,			/* highaddr */
629 	   NULL, NULL,				/* filter, filterarg */
630 	   NFE_RX_RING_COUNT * descsize, 1,	/* maxsize, nsegments */
631 	   NFE_RX_RING_COUNT * descsize,	/* maxsegsize */
632 	   BUS_DMA_ALLOCNOW,			/* flags */
633 	   NULL, NULL,				/* lockfunc, lockarg */
634 	   &ring->rx_desc_tag);
635 	if (error != 0) {
636 		printf("nfe%d: could not create desc DMA tag\n", sc->nfe_unit);
637 		goto fail;
638 	}
639 
640 	/* allocate memory to desc */
641 	error = bus_dmamem_alloc(ring->rx_desc_tag, (void **)desc,
642 	    BUS_DMA_NOWAIT, &ring->rx_desc_map);
643 	if (error != 0) {
644 		printf("nfe%d: could not create desc DMA map\n", sc->nfe_unit);
645 		goto fail;
646 	}
647 
648 	/* map desc to device visible address space */
649 	error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, *desc,
650 	    NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs,
651 	    &ring->rx_desc_segs, BUS_DMA_NOWAIT);
652 	if (error != 0) {
653 		printf("nfe%d: could not load desc DMA map\n", sc->nfe_unit);
654 		goto fail;
655 	}
656 
657 	bzero(*desc, NFE_RX_RING_COUNT * descsize);
658 	ring->rx_desc_addr = ring->rx_desc_segs.ds_addr;
659 	ring->physaddr = ring->rx_desc_addr;
660 
661 	/*
662 	 * Pre-allocate Rx buffers and populate Rx ring.
663 	 */
664 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
665 		data = &sc->rxq.data[i];
666 
667 		MGETHDR(data->m, M_DONTWAIT, MT_DATA);
668 		if (data->m == NULL) {
669 			printf("nfe%d: could not allocate rx mbuf\n",
670 			    sc->nfe_unit);
671 			error = ENOMEM;
672 			goto fail;
673 		}
674 
675 		error = bus_dma_tag_create(sc->nfe_parent_tag,
676 		    ETHER_ALIGN, 0,	       /* alignment, boundary */
677 		    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
678 		    BUS_SPACE_MAXADDR,		/* highaddr */
679 		    NULL, NULL,		/* filter, filterarg */
680 		    MCLBYTES, 1,		/* maxsize, nsegments */
681 		    MCLBYTES,			/* maxsegsize */
682 		    BUS_DMA_ALLOCNOW,		/* flags */
683 		    NULL, NULL,		/* lockfunc, lockarg */
684 		    &data->rx_data_tag);
685 		if (error != 0) {
686 			printf("nfe%d: could not create DMA map\n",
687 			    sc->nfe_unit);
688 			goto fail;
689 		}
690 
691 		error = bus_dmamap_create(data->rx_data_tag, 0,
692 		    &data->rx_data_map);
693 		if (error != 0) {
694 			printf("nfe%d: could not allocate mbuf cluster\n",
695 			    sc->nfe_unit);
696 			goto fail;
697 		}
698 
699 		MCLGET(data->m, M_DONTWAIT);
700 		if (!(data->m->m_flags & M_EXT)) {
701 			error = ENOMEM;
702 			goto fail;
703 		}
704 
705 		error = bus_dmamap_load(data->rx_data_tag,
706 		    data->rx_data_map, mtod(data->m, void *),
707 		    ring->bufsz, nfe_dma_map_segs, &data->rx_data_segs,
708 		    BUS_DMA_NOWAIT);
709 		if (error != 0) {
710 			printf("nfe%d: could not load rx buf DMA map\n",
711 			    sc->nfe_unit);
712 			goto fail;
713 		}
714 
715 		data->rx_data_addr = data->rx_data_segs.ds_addr;
716 		physaddr = data->rx_data_addr;
717 
718 
719 		if (sc->nfe_flags & NFE_40BIT_ADDR) {
720 			desc64 = &sc->rxq.desc64[i];
721 #if defined(__LP64__)
722 			desc64->physaddr[0] = htole32(physaddr >> 32);
723 #endif
724 			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
725 			desc64->length = htole16(sc->rxq.bufsz);
726 			desc64->flags = htole16(NFE_RX_READY);
727 		} else {
728 			desc32 = &sc->rxq.desc32[i];
729 			desc32->physaddr = htole32(physaddr);
730 			desc32->length = htole16(sc->rxq.bufsz);
731 			desc32->flags = htole16(NFE_RX_READY);
732 		}
733 
734 	}
735 
736 	bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
737 	    BUS_DMASYNC_PREWRITE);
738 
739 	return 0;
740 
741 fail:	nfe_free_rx_ring(sc, ring);
742 
743 	return error;
744 }
745 
746 
747 static void
748 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
749 {
750 	int i;
751 
752 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
753 		if (sc->nfe_flags & NFE_40BIT_ADDR) {
754 			ring->desc64[i].length = htole16(ring->bufsz);
755 			ring->desc64[i].flags = htole16(NFE_RX_READY);
756 		} else {
757 			ring->desc32[i].length = htole16(ring->bufsz);
758 			ring->desc32[i].flags = htole16(NFE_RX_READY);
759 		}
760 	}
761 
762 	bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
763 	    BUS_DMASYNC_PREWRITE);
764 
765 	ring->cur = ring->next = 0;
766 }
767 
768 
769 static void
770 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
771 {
772 	struct nfe_rx_data *data;
773 	void *desc;
774 	int i, descsize;
775 
776 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
777 		desc = ring->desc64;
778 		descsize = sizeof (struct nfe_desc64);
779 	} else {
780 		desc = ring->desc32;
781 		descsize = sizeof (struct nfe_desc32);
782 	}
783 
784 	if (desc != NULL) {
785 		bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
786 		    BUS_DMASYNC_POSTWRITE);
787 		bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
788 		bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
789 		bus_dma_tag_destroy(ring->rx_desc_tag);
790 	}
791 
792 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
793 		data = &ring->data[i];
794 
795 		if (data->rx_data_map != NULL) {
796 			bus_dmamap_sync(data->rx_data_tag,
797 			    data->rx_data_map, BUS_DMASYNC_POSTREAD);
798 			bus_dmamap_unload(data->rx_data_tag,
799 			    data->rx_data_map);
800 			bus_dmamap_destroy(data->rx_data_tag,
801 			    data->rx_data_map);
802 			bus_dma_tag_destroy(data->rx_data_tag);
803 		}
804 
805 		if (data->m != NULL)
806 			m_freem(data->m);
807 	}
808 }
809 
810 
811 static int
812 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
813 {
814 	int i, error;
815 	void **desc;
816 	int descsize;
817 
818 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
819 		desc = (void **)&ring->desc64;
820 		descsize = sizeof (struct nfe_desc64);
821 	} else {
822 		desc = (void **)&ring->desc32;
823 		descsize = sizeof (struct nfe_desc32);
824 	}
825 
826 	ring->queued = 0;
827 	ring->cur = ring->next = 0;
828 
829 	error = bus_dma_tag_create(sc->nfe_parent_tag,
830 	   PAGE_SIZE, 0,			/* alignment, boundary */
831 	   BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
832 	   BUS_SPACE_MAXADDR,			/* highaddr */
833 	   NULL, NULL,				/* filter, filterarg */
834 	   NFE_TX_RING_COUNT * descsize, 1,	/* maxsize, nsegments */
835 	   NFE_TX_RING_COUNT * descsize,	/* maxsegsize */
836 	   BUS_DMA_ALLOCNOW,			/* flags */
837 	   NULL, NULL,				/* lockfunc, lockarg */
838 	   &ring->tx_desc_tag);
839 	if (error != 0) {
840 		printf("nfe%d: could not create desc DMA tag\n", sc->nfe_unit);
841 		goto fail;
842 	}
843 
844 	error = bus_dmamem_alloc(ring->tx_desc_tag, (void **)desc,
845 	    BUS_DMA_NOWAIT, &ring->tx_desc_map);
846 	if (error != 0) {
847 		printf("nfe%d: could not create desc DMA map\n", sc->nfe_unit);
848 		goto fail;
849 	}
850 
851 	error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, *desc,
852 	    NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ring->tx_desc_segs,
853 	    BUS_DMA_NOWAIT);
854 	if (error != 0) {
855 		printf("nfe%d: could not load desc DMA map\n", sc->nfe_unit);
856 		goto fail;
857 	}
858 
859 	bzero(*desc, NFE_TX_RING_COUNT * descsize);
860 
861 	ring->tx_desc_addr = ring->tx_desc_segs.ds_addr;
862 	ring->physaddr = ring->tx_desc_addr;
863 
864 	error = bus_dma_tag_create(sc->nfe_parent_tag,
865 	   ETHER_ALIGN, 0,
866 	   BUS_SPACE_MAXADDR_32BIT,
867 	   BUS_SPACE_MAXADDR,
868 	   NULL, NULL,
869 	   NFE_JBYTES, NFE_MAX_SCATTER,
870 	   NFE_JBYTES,
871 	   BUS_DMA_ALLOCNOW,
872 	   NULL, NULL,
873 	   &ring->tx_data_tag);
874 	if (error != 0) {
875 	  printf("nfe%d: could not create DMA tag\n", sc->nfe_unit);
876 	  goto fail;
877 	}
878 
879 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
880 		error = bus_dmamap_create(ring->tx_data_tag, 0,
881 		    &ring->data[i].tx_data_map);
882 		if (error != 0) {
883 			printf("nfe%d: could not create DMA map\n",
884 			    sc->nfe_unit);
885 			goto fail;
886 		}
887 	}
888 
889 	return 0;
890 
891 fail:	nfe_free_tx_ring(sc, ring);
892 	return error;
893 }
894 
895 
896 static void
897 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
898 {
899 	struct nfe_tx_data *data;
900 	int i;
901 
902 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
903 		if (sc->nfe_flags & NFE_40BIT_ADDR)
904 			ring->desc64[i].flags = 0;
905 		else
906 			ring->desc32[i].flags = 0;
907 
908 		data = &ring->data[i];
909 
910 		if (data->m != NULL) {
911 			bus_dmamap_sync(ring->tx_data_tag, data->active,
912 			    BUS_DMASYNC_POSTWRITE);
913 			bus_dmamap_unload(ring->tx_data_tag, data->active);
914 			m_freem(data->m);
915 			data->m = NULL;
916 		}
917 	}
918 
919 	bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
920 	    BUS_DMASYNC_PREWRITE);
921 
922 	ring->queued = 0;
923 	ring->cur = ring->next = 0;
924 }
925 
926 
927 static void
928 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
929 {
930 	struct nfe_tx_data *data;
931 	void *desc;
932 	int i, descsize;
933 
934 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
935 		desc = ring->desc64;
936 		descsize = sizeof (struct nfe_desc64);
937 	} else {
938 		desc = ring->desc32;
939 		descsize = sizeof (struct nfe_desc32);
940 	}
941 
942 	if (desc != NULL) {
943 		bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
944 		    BUS_DMASYNC_POSTWRITE);
945 		bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
946 		bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
947 		bus_dma_tag_destroy(ring->tx_desc_tag);
948 	}
949 
950 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
951 		data = &ring->data[i];
952 
953 		if (data->m != NULL) {
954 			bus_dmamap_sync(ring->tx_data_tag, data->active,
955 			    BUS_DMASYNC_POSTWRITE);
956 			bus_dmamap_unload(ring->tx_data_tag, data->active);
957 			m_freem(data->m);
958 		}
959 	}
960 
961 	/* ..and now actually destroy the DMA mappings */
962 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
963 		data = &ring->data[i];
964 		if (data->tx_data_map == NULL)
965 			continue;
966 		bus_dmamap_destroy(ring->tx_data_tag, data->tx_data_map);
967 	}
968 
969 	bus_dma_tag_destroy(ring->tx_data_tag);
970 }
971 
972 #ifdef DEVICE_POLLING
973 static poll_handler_t nfe_poll;
974 
975 
976 static void
977 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
978 {
979 	struct nfe_softc *sc = ifp->if_softc;
980 
981 	NFE_LOCK(sc);
982 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
983 		nfe_poll_locked(ifp, cmd, count);
984 	NFE_UNLOCK(sc);
985 }
986 
987 
988 static void
989 nfe_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
990 {
991 	struct nfe_softc *sc = ifp->if_softc;
992 	u_int32_t r;
993 
994 	NFE_LOCK_ASSERT(sc);
995 
996 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
997 		return;
998 	}
999 
1000 	sc->rxcycles = count;
1001 	nfe_rxeof(sc);
1002 	nfe_txeof(sc);
1003 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1004 		nfe_start_locked(ifp);
1005 
1006 	if (cmd == POLL_AND_CHECK_STATUS) {
1007 		if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0) {
1008 			return;
1009 		}
1010 		NFE_WRITE(sc, NFE_IRQ_STATUS, r);
1011 
1012 		if (r & NFE_IRQ_LINK) {
1013 			NFE_READ(sc, NFE_PHY_STATUS);
1014 			NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1015 			DPRINTF(("nfe%d: link state changed\n", sc->nfe_unit));
1016 		}
1017 	}
1018 }
1019 #endif /* DEVICE_POLLING */
1020 
1021 
1022 static int
1023 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1024 {
1025 	struct nfe_softc *sc = ifp->if_softc;
1026 	struct ifreq *ifr = (struct ifreq *) data;
1027 	struct mii_data *mii;
1028 	int error = 0;
1029 
1030 	switch (cmd) {
1031 	case SIOCSIFMTU:
1032 		if (ifr->ifr_mtu == ifp->if_mtu) {
1033 			error = EINVAL;
1034 			break;
1035 		}
1036 		if ((sc->nfe_flags & NFE_JUMBO_SUP) && (ifr->ifr_mtu >=
1037 		    ETHERMIN && ifr->ifr_mtu <= NV_PKTLIMIT_2)) {
1038 			NFE_LOCK(sc);
1039 			sc->nfe_mtu = ifp->if_mtu = ifr->ifr_mtu;
1040 			nfe_stop(ifp, 1);
1041 			nfe_free_tx_ring(sc, &sc->txq);
1042 			nfe_free_rx_ring(sc, &sc->rxq);
1043 			NFE_UNLOCK(sc);
1044 
1045 			/* Reallocate Tx and Rx rings. */
1046 			if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
1047 				printf("nfe%d: could not allocate Tx ring\n",
1048 				    sc->nfe_unit);
1049 				error = ENXIO;
1050 				break;
1051 			}
1052 
1053 			if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
1054 				printf("nfe%d: could not allocate Rx ring\n",
1055 				    sc->nfe_unit);
1056 				nfe_free_tx_ring(sc, &sc->txq);
1057 				error = ENXIO;
1058 				break;
1059 			}
1060 			NFE_LOCK(sc);
1061 			nfe_init_locked(sc);
1062 			NFE_UNLOCK(sc);
1063 		} else {
1064 			error = EINVAL;
1065 		}
1066 		break;
1067 	case SIOCSIFFLAGS:
1068 		NFE_LOCK(sc);
1069 		if (ifp->if_flags & IFF_UP) {
1070 			/*
1071 			 * If only the PROMISC or ALLMULTI flag changes, then
1072 			 * don't do a full re-init of the chip, just update
1073 			 * the Rx filter.
1074 			 */
1075 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1076 			    ((ifp->if_flags ^ sc->nfe_if_flags) &
1077 			     (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1078 				nfe_setmulti(sc);
1079 			else
1080 				nfe_init_locked(sc);
1081 		} else {
1082 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1083 				nfe_stop(ifp, 1);
1084 		}
1085 		sc->nfe_if_flags = ifp->if_flags;
1086 		NFE_UNLOCK(sc);
1087 		error = 0;
1088 		break;
1089 	case SIOCADDMULTI:
1090 	case SIOCDELMULTI:
1091 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1092 			NFE_LOCK(sc);
1093 			nfe_setmulti(sc);
1094 			NFE_UNLOCK(sc);
1095 			error = 0;
1096 		}
1097 		break;
1098 	case SIOCSIFMEDIA:
1099 	case SIOCGIFMEDIA:
1100 		mii = device_get_softc(sc->nfe_miibus);
1101 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1102 		break;
1103 	case SIOCSIFCAP:
1104 	{
1105 		int init = 0;
1106 		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1107 #ifdef DEVICE_POLLING
1108 		if (mask & IFCAP_POLLING) {
1109 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1110 				error = ether_poll_register(nfe_poll, ifp);
1111 				if (error)
1112 					return(error);
1113 				NFE_LOCK(sc);
1114 				NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1115 				ifp->if_capenable |= IFCAP_POLLING;
1116 				NFE_UNLOCK(sc);
1117 			} else {
1118 				error = ether_poll_deregister(ifp);
1119 				/* Enable interrupt even in error case */
1120 				NFE_LOCK(sc);
1121 				NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1122 				ifp->if_capenable &= ~IFCAP_POLLING;
1123 				NFE_UNLOCK(sc);
1124 			}
1125 		}
1126 #endif /* DEVICE_POLLING */
1127 #ifdef NFE_CSUM
1128 		if (mask & IFCAP_HWCSUM) {
1129 			ifp->if_capenable ^= IFCAP_HWCSUM;
1130 			if (IFCAP_HWCSUM & ifp->if_capenable &&
1131 			    IFCAP_HWCSUM & ifp->if_capabilities)
1132 				ifp->if_hwassist = NFE_CSUM_FEATURES;
1133 			else
1134 				ifp->if_hwassist = 0;
1135 			sc->nfe_flags ^= NFE_HW_CSUM;
1136 			init = 1;
1137 		}
1138 #endif
1139 		if (init && ifp->if_drv_flags & IFF_DRV_RUNNING)
1140 			nfe_init(sc);
1141 	}
1142 		break;
1143 
1144 	default:
1145 		error = ether_ioctl(ifp, cmd, data);
1146 		break;
1147 	}
1148 
1149 	return error;
1150 }
1151 
1152 
1153 static void
1154 nfe_intr(void *arg)
1155 {
1156 	struct nfe_softc *sc = arg;
1157 	struct ifnet *ifp = sc->nfe_ifp;
1158 	u_int32_t r;
1159 
1160 	NFE_LOCK(sc);
1161 
1162 #ifdef DEVICE_POLLING
1163 	if (ifp->if_capenable & IFCAP_POLLING) {
1164 		NFE_UNLOCK(sc);
1165 		return;
1166 	}
1167 #endif
1168 
1169 	if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0) {
1170 		NFE_UNLOCK(sc);
1171 		return;	/* not for us */
1172 	}
1173 	NFE_WRITE(sc, NFE_IRQ_STATUS, r);
1174 
1175 	DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r));
1176 
1177 	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1178 
1179 	if (r & NFE_IRQ_LINK) {
1180 		NFE_READ(sc, NFE_PHY_STATUS);
1181 		NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1182 		DPRINTF(("nfe%d: link state changed\n", sc->nfe_unit));
1183 	}
1184 
1185 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1186 		/* check Rx ring */
1187 		nfe_rxeof(sc);
1188 		/* check Tx ring */
1189 		nfe_txeof(sc);
1190 	}
1191 
1192 	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1193 
1194 	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1195 	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1196 		nfe_start_locked(ifp);
1197 
1198 	NFE_UNLOCK(sc);
1199 
1200 	return;
1201 }
1202 
1203 
1204 static void
1205 nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
1206 {
1207 
1208 	bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops);
1209 }
1210 
1211 
1212 static void
1213 nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
1214 {
1215 
1216 	bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops);
1217 }
1218 
1219 
1220 static void
1221 nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops)
1222 {
1223 
1224 	bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops);
1225 }
1226 
1227 
1228 static void
1229 nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops)
1230 {
1231 
1232 	bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops);
1233 }
1234 
1235 
1236 static void
1237 nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
1238 {
1239 
1240 	bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, ops);
1241 }
1242 
1243 
1244 static void
1245 nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
1246 {
1247 
1248 	bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, ops);
1249 }
1250 
1251 
1252 static void
1253 nfe_rxeof(struct nfe_softc *sc)
1254 {
1255 	struct ifnet *ifp = sc->nfe_ifp;
1256 	struct nfe_desc32 *desc32=NULL;
1257 	struct nfe_desc64 *desc64=NULL;
1258 	struct nfe_rx_data *data;
1259 	struct mbuf *m, *mnew;
1260 	bus_addr_t physaddr;
1261 	u_int16_t flags;
1262 	int error, len;
1263 #if NVLAN > 1
1264 	u_int16_t vlan_tag = 0;
1265 	int have_tag = 0;
1266 #endif
1267 
1268 	NFE_LOCK_ASSERT(sc);
1269 
1270 	for (;;) {
1271 
1272 #ifdef DEVICE_POLLING
1273 		if (ifp->if_capenable & IFCAP_POLLING) {
1274 			if (sc->rxcycles <= 0)
1275 				break;
1276 			sc->rxcycles--;
1277 		}
1278 #endif
1279 
1280 		data = &sc->rxq.data[sc->rxq.cur];
1281 
1282 		if (sc->nfe_flags & NFE_40BIT_ADDR) {
1283 			desc64 = &sc->rxq.desc64[sc->rxq.cur];
1284 			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
1285 
1286 			flags = letoh16(desc64->flags);
1287 			len = letoh16(desc64->length) & 0x3fff;
1288 
1289 #if NVLAN > 1
1290 			if (flags & NFE_TX_VLAN_TAG) {
1291 				have_tag = 1;
1292 				vlan_tag = desc64->vtag;
1293 			}
1294 #endif
1295 
1296 		} else {
1297 			desc32 = &sc->rxq.desc32[sc->rxq.cur];
1298 			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
1299 
1300 			flags = letoh16(desc32->flags);
1301 			len = letoh16(desc32->length) & 0x3fff;
1302 		}
1303 
1304 		if (flags & NFE_RX_READY)
1305 			break;
1306 
1307 		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
1308 			if (!(flags & NFE_RX_VALID_V1))
1309 				goto skip;
1310 			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
1311 				flags &= ~NFE_RX_ERROR;
1312 				len--;	/* fix buffer length */
1313 			}
1314 		} else {
1315 			if (!(flags & NFE_RX_VALID_V2))
1316 				goto skip;
1317 
1318 			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
1319 				flags &= ~NFE_RX_ERROR;
1320 				len--;	/* fix buffer length */
1321 			}
1322 		}
1323 
1324 		if (flags & NFE_RX_ERROR) {
1325 			ifp->if_ierrors++;
1326 			goto skip;
1327 		}
1328 
1329 		/*
1330 		 * Try to allocate a new mbuf for this ring element and load
1331 		 * it before processing the current mbuf. If the ring element
1332 		 * cannot be loaded, drop the received packet and reuse the
1333 		 * old mbuf. In the unlikely case that the old mbuf can't be
1334 		 * reloaded either, explicitly panic.
1335 		 */
1336 		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1337 		if (mnew == NULL) {
1338 			ifp->if_ierrors++;
1339 			goto skip;
1340 		}
1341 
1342 		MCLGET(mnew, M_DONTWAIT);
1343 		if (!(mnew->m_flags & M_EXT)) {
1344 			m_freem(mnew);
1345 			ifp->if_ierrors++;
1346 			goto skip;
1347 		}
1348 
1349 		bus_dmamap_sync(data->rx_data_tag, data->rx_data_map,
1350 		    BUS_DMASYNC_POSTREAD);
1351 		bus_dmamap_unload(data->rx_data_tag, data->rx_data_map);
1352 		error = bus_dmamap_load(data->rx_data_tag,
1353 		    data->rx_data_map, mtod(mnew, void *), MCLBYTES,
1354 		    nfe_dma_map_segs, &data->rx_data_segs,
1355 		    BUS_DMA_NOWAIT);
1356 		if (error != 0) {
1357 			m_freem(mnew);
1358 
1359 			/* try to reload the old mbuf */
1360 			error = bus_dmamap_load(data->rx_data_tag,
1361 			    data->rx_data_map, mtod(data->m, void *),
1362 			    MCLBYTES, nfe_dma_map_segs,
1363 			    &data->rx_data_segs, BUS_DMA_NOWAIT);
1364 			if (error != 0) {
1365 				/* very unlikely that it will fail.. */
1366 			      panic("nfe%d: could not load old rx mbuf",
1367 				    sc->nfe_unit);
1368 			}
1369 			ifp->if_ierrors++;
1370 			goto skip;
1371 		}
1372 		data->rx_data_addr = data->rx_data_segs.ds_addr;
1373 		physaddr = data->rx_data_addr;
1374 
1375 		/*
1376 		 * New mbuf successfully loaded, update Rx ring and continue
1377 		 * processing.
1378 		 */
1379 		m = data->m;
1380 		data->m = mnew;
1381 
1382 		/* finalize mbuf */
1383 		m->m_pkthdr.len = m->m_len = len;
1384 		m->m_pkthdr.rcvif = ifp;
1385 
1386 		if ((sc->nfe_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK)) {
1387 			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1388 			if (flags & NFE_RX_IP_CSUMOK_V2) {
1389 				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1390 			}
1391 			if (flags & NFE_RX_UDP_CSUMOK_V2 ||
1392 			    flags & NFE_RX_TCP_CSUMOK_V2) {
1393 				m->m_pkthdr.csum_flags |=
1394 				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1395 				m->m_pkthdr.csum_data = 0xffff;
1396 			}
1397 		}
1398 
1399 #if NVLAN > 1
1400 		if (have_tag) {
1401 			m->m_pkthdr.ether_vtag = vlan_tag;
1402 			m->m_flags |= M_VLANTAG;
1403 		}
1404 #endif
1405 		ifp->if_ipackets++;
1406 
1407 		NFE_UNLOCK(sc);
1408 		(*ifp->if_input)(ifp, m);
1409 		NFE_LOCK(sc);
1410 
1411 		/* update mapping address in h/w descriptor */
1412 		if (sc->nfe_flags & NFE_40BIT_ADDR) {
1413 #if defined(__LP64__)
1414 			desc64->physaddr[0] = htole32(physaddr >> 32);
1415 #endif
1416 			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
1417 		} else {
1418 			desc32->physaddr = htole32(physaddr);
1419 		}
1420 
1421 skip:		if (sc->nfe_flags & NFE_40BIT_ADDR) {
1422 			desc64->length = htole16(sc->rxq.bufsz);
1423 			desc64->flags = htole16(NFE_RX_READY);
1424 			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
1425 		} else {
1426 			desc32->length = htole16(sc->rxq.bufsz);
1427 			desc32->flags = htole16(NFE_RX_READY);
1428 			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
1429 		}
1430 
1431 		sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT;
1432 	} //end for(;;)
1433 }
1434 
1435 
1436 static void
1437 nfe_txeof(struct nfe_softc *sc)
1438 {
1439 	struct ifnet *ifp = sc->nfe_ifp;
1440 	struct nfe_desc32 *desc32;
1441 	struct nfe_desc64 *desc64;
1442 	struct nfe_tx_data *data = NULL;
1443 	u_int16_t flags;
1444 
1445 	NFE_LOCK_ASSERT(sc);
1446 
1447 	while (sc->txq.next != sc->txq.cur) {
1448 		if (sc->nfe_flags & NFE_40BIT_ADDR) {
1449 			desc64 = &sc->txq.desc64[sc->txq.next];
1450 			nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
1451 
1452 			flags = letoh16(desc64->flags);
1453 		} else {
1454 			desc32 = &sc->txq.desc32[sc->txq.next];
1455 			nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
1456 
1457 			flags = letoh16(desc32->flags);
1458 		}
1459 
1460 		if (flags & NFE_TX_VALID)
1461 			break;
1462 
1463 		data = &sc->txq.data[sc->txq.next];
1464 
1465 		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
1466 			if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
1467 				goto skip;
1468 
1469 			if ((flags & NFE_TX_ERROR_V1) != 0) {
1470 				printf("nfe%d: tx v1 error 0x%4b\n",
1471 				    sc->nfe_unit, flags, NFE_V1_TXERR);
1472 
1473 				ifp->if_oerrors++;
1474 			} else
1475 				ifp->if_opackets++;
1476 		} else {
1477 			if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
1478 				goto skip;
1479 
1480 			if ((flags & NFE_TX_ERROR_V2) != 0) {
1481 				printf("nfe%d: tx v1 error 0x%4b\n",
1482 				    sc->nfe_unit, flags, NFE_V2_TXERR);
1483 
1484 				ifp->if_oerrors++;
1485 			} else
1486 				ifp->if_opackets++;
1487 		}
1488 
1489 		if (data->m == NULL) {	/* should not get there */
1490 			printf("nfe%d: last fragment bit w/o associated mbuf!\n",
1491 			    sc->nfe_unit);
1492 			goto skip;
1493 		}
1494 
1495 		/* last fragment of the mbuf chain transmitted */
1496 		bus_dmamap_sync(sc->txq.tx_data_tag, data->active,
1497 		    BUS_DMASYNC_POSTWRITE);
1498 		bus_dmamap_unload(sc->txq.tx_data_tag, data->active);
1499 		m_freem(data->m);
1500 		data->m = NULL;
1501 
1502 		ifp->if_timer = 0;
1503 
1504 skip:		sc->txq.queued--;
1505 		sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT;
1506 	}
1507 
1508 	if (data != NULL) {	/* at least one slot freed */
1509 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1510 		nfe_start_locked(ifp);
1511 	}
1512 }
1513 
1514 
1515 static int
1516 nfe_encap(struct nfe_softc *sc, struct mbuf *m0)
1517 {
1518 	struct nfe_desc32 *desc32=NULL;
1519 	struct nfe_desc64 *desc64=NULL;
1520 	struct nfe_tx_data *data=NULL;
1521 	bus_dmamap_t map;
1522 	bus_dma_segment_t segs[NFE_MAX_SCATTER];
1523 	int error, i, nsegs;
1524 	u_int16_t flags = NFE_TX_VALID;
1525 
1526 	map = sc->txq.data[sc->txq.cur].tx_data_map;
1527 
1528 	error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, m0, segs,
1529 	    &nsegs, BUS_DMA_NOWAIT);
1530 
1531 	if (error != 0) {
1532 		printf("nfe%d: could not map mbuf (error %d)\n", sc->nfe_unit,
1533 		    error);
1534 		return error;
1535 	}
1536 
1537 	if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 1) {
1538 		bus_dmamap_unload(sc->txq.tx_data_tag, map);
1539 		return ENOBUFS;
1540 	}
1541 
1542 	if(sc->nfe_flags & NFE_HW_CSUM){
1543 		if (m0->m_pkthdr.csum_flags & CSUM_IP)
1544 			flags |= NFE_TX_IP_CSUM;
1545 		if (m0->m_pkthdr.csum_flags & CSUM_TCP)
1546 			flags |= NFE_TX_TCP_CSUM;
1547 		if (m0->m_pkthdr.csum_flags & CSUM_UDP)
1548 			flags |= NFE_TX_TCP_CSUM;
1549 	}
1550 
1551 	for (i = 0; i < nsegs; i++) {
1552 		data = &sc->txq.data[sc->txq.cur];
1553 
1554 		if (sc->nfe_flags & NFE_40BIT_ADDR) {
1555 			desc64 = &sc->txq.desc64[sc->txq.cur];
1556 #if defined(__LP64__)
1557 			desc64->physaddr[0] = htole32(segs[i].ds_addr >> 32);
1558 #endif
1559 			desc64->physaddr[1] = htole32(segs[i].ds_addr &
1560 			    0xffffffff);
1561 			desc64->length = htole16(segs[i].ds_len - 1);
1562 			desc64->flags = htole16(flags);
1563 #if NVLAN > 0
1564 			if (m0->m_flags & M_VLANTAG)
1565 				desc64->vtag = htole32(NFE_TX_VTAG |
1566 				    m0->m_pkthdr.ether_vtag);
1567 #endif
1568 		} else {
1569 			desc32 = &sc->txq.desc32[sc->txq.cur];
1570 
1571 			desc32->physaddr = htole32(segs[i].ds_addr);
1572 			desc32->length = htole16(segs[i].ds_len - 1);
1573 			desc32->flags = htole16(flags);
1574 		}
1575 
1576 		/* csum flags and vtag belong to the first fragment only */
1577 		if (nsegs > 1) {
1578 			flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM);
1579 		}
1580 
1581 		sc->txq.queued++;
1582 		sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT;
1583 	}
1584 
1585 	/* the whole mbuf chain has been DMA mapped, fix last descriptor */
1586 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1587 		flags |= NFE_TX_LASTFRAG_V2;
1588 		desc64->flags = htole16(flags);
1589 	} else {
1590 		if (sc->nfe_flags & NFE_JUMBO_SUP)
1591 			flags |= NFE_TX_LASTFRAG_V2;
1592 		else
1593 			flags |= NFE_TX_LASTFRAG_V1;
1594 		desc32->flags = htole16(flags);
1595 	}
1596 
1597 	data->m = m0;
1598 	data->active = map;
1599 	data->nsegs = nsegs;
1600 
1601 	bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
1602 
1603 	return 0;
1604 }
1605 
1606 
1607 static void
1608 nfe_setmulti(struct nfe_softc *sc)
1609 {
1610 	struct ifnet *ifp = sc->nfe_ifp;
1611 	struct ifmultiaddr *ifma;
1612 	int i;
1613 	u_int32_t filter = NFE_RXFILTER_MAGIC;
1614 	u_int8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1615 	u_int8_t etherbroadcastaddr[ETHER_ADDR_LEN] = {
1616 		0xff, 0xff, 0xff, 0xff, 0xff, 0xff
1617 	};
1618 
1619 	NFE_LOCK_ASSERT(sc);
1620 
1621 	if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1622 		bzero(addr, ETHER_ADDR_LEN);
1623 		bzero(mask, ETHER_ADDR_LEN);
1624 		goto done;
1625 	}
1626 
1627 	bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
1628 	bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
1629 
1630 	IF_ADDR_LOCK(ifp);
1631 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1632 		u_char *addrp;
1633 
1634 		if (ifma->ifma_addr->sa_family != AF_LINK)
1635 			continue;
1636 
1637 		addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
1638 		for (i = 0; i < ETHER_ADDR_LEN; i++) {
1639 			u_int8_t mcaddr = addrp[i];
1640 			addr[i] &= mcaddr;
1641 			mask[i] &= ~mcaddr;
1642 		}
1643 	}
1644 	IF_ADDR_UNLOCK(ifp);
1645 
1646 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
1647 		mask[i] |= addr[i];
1648 	}
1649 
1650 done:
1651 	addr[0] |= 0x01;	/* make sure multicast bit is set */
1652 
1653 	NFE_WRITE(sc, NFE_MULTIADDR_HI,
1654 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1655 	NFE_WRITE(sc, NFE_MULTIADDR_LO,
1656 	    addr[5] <<  8 | addr[4]);
1657 	NFE_WRITE(sc, NFE_MULTIMASK_HI,
1658 	    mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1659 	NFE_WRITE(sc, NFE_MULTIMASK_LO,
1660 	    mask[5] <<  8 | mask[4]);
1661 
1662 	filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
1663 	NFE_WRITE(sc, NFE_RXFILTER, filter);
1664 }
1665 
1666 
1667 static void
1668 nfe_start(struct ifnet *ifp)
1669 {
1670 	struct nfe_softc *sc;
1671 
1672 	sc = ifp->if_softc;
1673 	NFE_LOCK(sc);
1674 	nfe_start_locked(ifp);
1675 	NFE_UNLOCK(sc);
1676 }
1677 
1678 
1679 static void
1680 nfe_start_locked(struct ifnet *ifp)
1681 {
1682 	struct nfe_softc *sc = ifp->if_softc;
1683 	struct mbuf *m0;
1684 	int old = sc->txq.cur;
1685 
1686 	if (!sc->nfe_link || ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1687 		return;
1688 	}
1689 
1690 	for (;;) {
1691 		IFQ_POLL(&ifp->if_snd, m0);
1692 		if (m0 == NULL)
1693 			break;
1694 
1695 		if (nfe_encap(sc, m0) != 0) {
1696 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1697 			break;
1698 		}
1699 
1700 		/* packet put in h/w queue, remove from s/w queue */
1701 		IFQ_DEQUEUE(&ifp->if_snd, m0);
1702 
1703 		ETHER_BPF_MTAP(ifp, m0);
1704 	}
1705 	if (sc->txq.cur == old)	{ /* nothing sent */
1706 		return;
1707 	}
1708 
1709 	if (sc->nfe_flags & NFE_40BIT_ADDR)
1710 		nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE);
1711 	else
1712 		nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE);
1713 
1714 	/* kick Tx */
1715 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
1716 
1717 	/*
1718 	 * Set a timeout in case the chip goes out to lunch.
1719 	 */
1720 	ifp->if_timer = 5;
1721 
1722 	return;
1723 }
1724 
1725 
1726 static void
1727 nfe_watchdog(struct ifnet *ifp)
1728 {
1729 	struct nfe_softc *sc = ifp->if_softc;
1730 
1731 	printf("nfe%d: watchdog timeout\n", sc->nfe_unit);
1732 
1733 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1734 	nfe_init(sc);
1735 	ifp->if_oerrors++;
1736 
1737 	return;
1738 }
1739 
1740 
1741 static void
1742 nfe_init(void *xsc)
1743 {
1744 	struct nfe_softc *sc = xsc;
1745 
1746 	NFE_LOCK(sc);
1747 	nfe_init_locked(sc);
1748 	NFE_UNLOCK(sc);
1749 
1750 	return;
1751 }
1752 
1753 
1754 static void
1755 nfe_init_locked(void *xsc)
1756 {
1757 	struct nfe_softc *sc = xsc;
1758 	struct ifnet *ifp = sc->nfe_ifp;
1759 	struct mii_data *mii;
1760 	u_int32_t tmp;
1761 
1762 	NFE_LOCK_ASSERT(sc);
1763 
1764 	mii = device_get_softc(sc->nfe_miibus);
1765 
1766 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1767 		return;
1768 	}
1769 
1770 	nfe_stop(ifp, 0);
1771 
1772 	NFE_WRITE(sc, NFE_TX_UNK, 0);
1773 	NFE_WRITE(sc, NFE_STATUS, 0);
1774 
1775 	sc->rxtxctl = NFE_RXTX_BIT2;
1776 	if (sc->nfe_flags & NFE_40BIT_ADDR)
1777 		sc->rxtxctl |= NFE_RXTX_V3MAGIC;
1778 	else if (sc->nfe_flags & NFE_JUMBO_SUP)
1779 		sc->rxtxctl |= NFE_RXTX_V2MAGIC;
1780 
1781 	if (sc->nfe_flags & NFE_HW_CSUM)
1782 		sc->rxtxctl |= NFE_RXTX_RXCSUM;
1783 
1784 #if NVLAN > 0
1785 	/*
1786 	 * Although the adapter is capable of stripping VLAN tags from received
1787 	 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on
1788 	 * purpose.  This will be done in software by our network stack.
1789 	 */
1790 	if (sc->nfe_flags & NFE_HW_VLAN)
1791 		sc->rxtxctl |= NFE_RXTX_VTAG_INSERT;
1792 #endif
1793 
1794 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
1795 	DELAY(10);
1796 	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1797 
1798 #if NVLAN
1799 	if (sc->nfe_flags & NFE_HW_VLAN)
1800 		NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
1801 #endif
1802 
1803 	NFE_WRITE(sc, NFE_SETUP_R6, 0);
1804 
1805 	/* set MAC address */
1806 	nfe_set_macaddr(sc, sc->eaddr);
1807 
1808 	/* tell MAC where rings are in memory */
1809 #ifdef __LP64__
1810 	NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32);
1811 #endif
1812 	NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff);
1813 #ifdef __LP64__
1814 	NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32);
1815 #endif
1816 	NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff);
1817 
1818 	NFE_WRITE(sc, NFE_RING_SIZE,
1819 	    (NFE_RX_RING_COUNT - 1) << 16 |
1820 	    (NFE_TX_RING_COUNT - 1));
1821 
1822 	NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1823 
1824 	/* force MAC to wakeup */
1825 	tmp = NFE_READ(sc, NFE_PWR_STATE);
1826 	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1827 	DELAY(10);
1828 	tmp = NFE_READ(sc, NFE_PWR_STATE);
1829 	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1830 
1831 #if 1
1832 	/* configure interrupts coalescing/mitigation */
1833 	NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
1834 #else
1835 	/* no interrupt mitigation: one interrupt per packet */
1836 	NFE_WRITE(sc, NFE_IMTIMER, 970);
1837 #endif
1838 
1839 	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1840 	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1841 	NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
1842 
1843 	/* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1844 	NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
1845 
1846 	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1847 	NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
1848 
1849 	sc->rxtxctl &= ~NFE_RXTX_BIT2;
1850 	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1851 	DELAY(10);
1852 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
1853 
1854 	/* set Rx filter */
1855 	nfe_setmulti(sc);
1856 
1857 	nfe_ifmedia_upd(ifp);
1858 
1859 	nfe_tick_locked(sc);
1860 
1861 	/* enable Rx */
1862 	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1863 
1864 	/* enable Tx */
1865 	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1866 
1867 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1868 
1869 #ifdef DEVICE_POLLING
1870 	if (ifp->if_capenable & IFCAP_POLLING)
1871 		NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1872 	else
1873 #endif
1874 	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); /* enable interrupts */
1875 
1876 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1877 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1878 
1879 	sc->nfe_link = 0;
1880 
1881 	return;
1882 }
1883 
1884 
1885 static void
1886 nfe_stop(struct ifnet *ifp, int disable)
1887 {
1888 	struct nfe_softc *sc = ifp->if_softc;
1889 	struct mii_data  *mii;
1890 
1891 	NFE_LOCK_ASSERT(sc);
1892 
1893 	ifp->if_timer = 0;
1894 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1895 
1896 	mii = device_get_softc(sc->nfe_miibus);
1897 
1898 	callout_stop(&sc->nfe_stat_ch);
1899 
1900 	/* abort Tx */
1901 	NFE_WRITE(sc, NFE_TX_CTL, 0);
1902 
1903 	/* disable Rx */
1904 	NFE_WRITE(sc, NFE_RX_CTL, 0);
1905 
1906 	/* disable interrupts */
1907 	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1908 
1909 	sc->nfe_link = 0;
1910 
1911 	/* reset Tx and Rx rings */
1912 	nfe_reset_tx_ring(sc, &sc->txq);
1913 	nfe_reset_rx_ring(sc, &sc->rxq);
1914 
1915 	return;
1916 }
1917 
1918 
1919 static int
1920 nfe_ifmedia_upd(struct ifnet *ifp)
1921 {
1922 	struct nfe_softc *sc = ifp->if_softc;
1923 
1924 	NFE_LOCK(sc);
1925 	nfe_ifmedia_upd_locked(ifp);
1926 	NFE_UNLOCK(sc);
1927 	return (0);
1928 }
1929 
1930 
1931 static int
1932 nfe_ifmedia_upd_locked(struct ifnet *ifp)
1933 {
1934 	struct nfe_softc *sc = ifp->if_softc;
1935 	struct mii_data *mii;
1936 
1937 	NFE_LOCK_ASSERT(sc);
1938 
1939 	mii = device_get_softc(sc->nfe_miibus);
1940 
1941 	if (mii->mii_instance) {
1942 		struct mii_softc *miisc;
1943 		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
1944 		    miisc = LIST_NEXT(miisc, mii_list)) {
1945 			mii_phy_reset(miisc);
1946 		}
1947 	}
1948 	mii_mediachg(mii);
1949 
1950 	return (0);
1951 }
1952 
1953 
1954 static void
1955 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1956 {
1957 	struct nfe_softc *sc;
1958 	struct mii_data *mii;
1959 
1960 	sc = ifp->if_softc;
1961 
1962 	NFE_LOCK(sc);
1963 	mii = device_get_softc(sc->nfe_miibus);
1964 	mii_pollstat(mii);
1965 	NFE_UNLOCK(sc);
1966 
1967 	ifmr->ifm_active = mii->mii_media_active;
1968 	ifmr->ifm_status = mii->mii_media_status;
1969 
1970 	return;
1971 }
1972 
1973 
1974 static void
1975 nfe_tick(void *xsc)
1976 {
1977 	struct nfe_softc *sc;
1978 
1979 	sc = xsc;
1980 
1981 	NFE_LOCK(sc);
1982 	nfe_tick_locked(sc);
1983 	NFE_UNLOCK(sc);
1984 }
1985 
1986 
1987 void
1988 nfe_tick_locked(struct nfe_softc *arg)
1989 {
1990 	struct nfe_softc *sc;
1991 	struct mii_data *mii;
1992 	struct ifnet *ifp;
1993 
1994 	sc = arg;
1995 
1996 	NFE_LOCK_ASSERT(sc);
1997 
1998 	ifp = sc->nfe_ifp;
1999 
2000 	mii = device_get_softc(sc->nfe_miibus);
2001 	mii_tick(mii);
2002 
2003 	if (!sc->nfe_link) {
2004 		if (mii->mii_media_status & IFM_ACTIVE &&
2005 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2006 			sc->nfe_link++;
2007 			if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T
2008 			    && bootverbose)
2009 				if_printf(sc->nfe_ifp, "gigabit link up\n");
2010 					if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2011 						nfe_start_locked(ifp);
2012 		}
2013 	}
2014 	callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2015 
2016 	return;
2017 }
2018 
2019 
2020 static void
2021 nfe_shutdown(device_t dev)
2022 {
2023 	struct nfe_softc *sc;
2024 	struct ifnet *ifp;
2025 
2026 	sc = device_get_softc(dev);
2027 
2028 	NFE_LOCK(sc);
2029 	ifp = sc->nfe_ifp;
2030 	nfe_stop(ifp,0);
2031 	/* nfe_reset(sc); */
2032 	NFE_UNLOCK(sc);
2033 
2034 	return;
2035 }
2036 
2037 
2038 static void
2039 nfe_get_macaddr(struct nfe_softc *sc, u_char *addr)
2040 {
2041 	uint32_t tmp;
2042 
2043 	tmp = NFE_READ(sc, NFE_MACADDR_LO);
2044 	addr[0] = (tmp >> 8) & 0xff;
2045 	addr[1] = (tmp & 0xff);
2046 
2047 	tmp = NFE_READ(sc, NFE_MACADDR_HI);
2048 	addr[2] = (tmp >> 24) & 0xff;
2049 	addr[3] = (tmp >> 16) & 0xff;
2050 	addr[4] = (tmp >>  8) & 0xff;
2051 	addr[5] = (tmp & 0xff);
2052 }
2053 
2054 
2055 static void
2056 nfe_set_macaddr(struct nfe_softc *sc, u_char *addr)
2057 {
2058 
2059 	NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] <<  8 | addr[4]);
2060 	NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
2061 	    addr[1] << 8 | addr[0]);
2062 }
2063 
2064 
2065 /*
2066  * Map a single buffer address.
2067  */
2068 
2069 static void
2070 nfe_dma_map_segs(arg, segs, nseg, error)
2071 	void *arg;
2072 	bus_dma_segment_t *segs;
2073 	int error, nseg;
2074 {
2075 
2076 	if (error)
2077 		return;
2078 
2079 	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
2080 
2081 	*(bus_dma_segment_t *)arg = *segs;
2082 
2083 	return;
2084 }
2085