xref: /freebsd/sys/dev/nfe/if_nfe.c (revision d056fa046c6a91b90cd98165face0e42a33a5173)
1 /*	$OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $	*/
2 
3 /*-
4  * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
5  * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
6  * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
22 
23 #include <sys/cdefs.h>
24 __FBSDID("$FreeBSD$");
25 
26 /* Uncomment the following line to enable polling. */
27 /* #define DEVICE_POLLING */
28 
29 #define NFE_NO_JUMBO
30 #define NFE_CSUM
31 #define NFE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
32 #define NVLAN 0
33 
34 #ifdef HAVE_KERNEL_OPTION_HEADERS
35 #include "opt_device_polling.h"
36 #endif
37 
38 #include <sys/param.h>
39 #include <sys/endian.h>
40 #include <sys/systm.h>
41 #include <sys/sockio.h>
42 #include <sys/mbuf.h>
43 #include <sys/malloc.h>
44 #include <sys/module.h>
45 #include <sys/kernel.h>
46 #include <sys/socket.h>
47 #include <sys/taskqueue.h>
48 
49 #include <net/if.h>
50 #include <net/if_arp.h>
51 #include <net/ethernet.h>
52 #include <net/if_dl.h>
53 #include <net/if_media.h>
54 #include <net/if_types.h>
55 #include <net/if_vlan_var.h>
56 
57 #include <net/bpf.h>
58 
59 #include <machine/bus.h>
60 #include <machine/resource.h>
61 #include <sys/bus.h>
62 #include <sys/rman.h>
63 
64 #include <dev/mii/mii.h>
65 #include <dev/mii/miivar.h>
66 
67 #include <dev/pci/pcireg.h>
68 #include <dev/pci/pcivar.h>
69 
70 #include <dev/nfe/if_nfereg.h>
71 #include <dev/nfe/if_nfevar.h>
72 
73 MODULE_DEPEND(nfe, pci, 1, 1, 1);
74 MODULE_DEPEND(nfe, ether, 1, 1, 1);
75 MODULE_DEPEND(nfe, miibus, 1, 1, 1);
76 #include "miibus_if.h"
77 
78 static  int nfe_probe  (device_t);
79 static  int nfe_attach (device_t);
80 static  int nfe_detach (device_t);
81 static  void nfe_shutdown(device_t);
82 static  int nfe_miibus_readreg	(device_t, int, int);
83 static  int nfe_miibus_writereg	(device_t, int, int, int);
84 static  void nfe_miibus_statchg	(device_t);
85 static  int nfe_ioctl(struct ifnet *, u_long, caddr_t);
86 static  void nfe_intr(void *);
87 static void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
88 static void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
89 static void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int);
90 static void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int);
91 static void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
92 static void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
93 static void nfe_rxeof(struct nfe_softc *);
94 static void nfe_txeof(struct nfe_softc *);
95 static int  nfe_encap(struct nfe_softc *, struct mbuf *);
96 static struct nfe_jbuf *nfe_jalloc(struct nfe_softc *);
97 static void nfe_jfree(void *, void *);
98 static int  nfe_jpool_alloc(struct nfe_softc *);
99 static void nfe_jpool_free(struct nfe_softc *);
100 static void nfe_setmulti(struct nfe_softc *);
101 static void nfe_start(struct ifnet *);
102 static void nfe_start_locked(struct ifnet *);
103 static void nfe_watchdog(struct ifnet *);
104 static void nfe_init(void *);
105 static void nfe_init_locked(void *);
106 static void nfe_stop(struct ifnet *, int);
107 static int  nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
108 static void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
109 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
110 static int  nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
111 static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
112 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
113 static int  nfe_ifmedia_upd(struct ifnet *);
114 static int  nfe_ifmedia_upd_locked(struct ifnet *);
115 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
116 static void nfe_tick(void *);
117 static void nfe_tick_locked(struct nfe_softc *);
118 static void nfe_get_macaddr(struct nfe_softc *, u_char *);
119 static void nfe_set_macaddr(struct nfe_softc *, u_char *);
120 static void nfe_dma_map_segs	(void *, bus_dma_segment_t *, int, int);
121 #ifdef DEVICE_POLLING
122 static void nfe_poll_locked(struct ifnet *, enum poll_cmd, int);
123 #endif
124 
125 #ifdef NFE_DEBUG
126 int nfedebug = 0;
127 #define DPRINTF(x)	do { if (nfedebug) printf x; } while (0)
128 #define DPRINTFN(n,x)	do { if (nfedebug >= (n)) printf x; } while (0)
129 #else
130 #define DPRINTF(x)
131 #define DPRINTFN(n,x)
132 #endif
133 
134 #define	NFE_LOCK(_sc)		mtx_lock(&(_sc)->nfe_mtx)
135 #define	NFE_UNLOCK(_sc)		mtx_unlock(&(_sc)->nfe_mtx)
136 #define	NFE_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
137 
138 #define letoh16(x) le16toh(x)
139 
140 #define	NV_RID		0x10
141 
142 static device_method_t nfe_methods[] = {
143 	/* Device interface */
144 	DEVMETHOD(device_probe,		nfe_probe),
145 	DEVMETHOD(device_attach,	nfe_attach),
146 	DEVMETHOD(device_detach,	nfe_detach),
147 	DEVMETHOD(device_shutdown,	nfe_shutdown),
148 
149 	/* bus interface */
150 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
151 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
152 
153 	/* MII interface */
154 	DEVMETHOD(miibus_readreg,	nfe_miibus_readreg),
155 	DEVMETHOD(miibus_writereg,	nfe_miibus_writereg),
156 	DEVMETHOD(miibus_statchg,	nfe_miibus_statchg),
157 
158 	{ 0, 0 }
159 };
160 
161 static driver_t nfe_driver = {
162 	"nfe",
163 	nfe_methods,
164 	sizeof(struct nfe_softc)
165 };
166 
167 static devclass_t nfe_devclass;
168 
169 DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0);
170 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
171 
172 static struct nfe_type nfe_devs[] = {
173 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
174 	"NVIDIA nForce Networking Adapter"},
175 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
176 	"NVIDIA nForce2 Networking Adapter"},
177 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
178 	"NVIDIA nForce3 Networking Adapter"},
179 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
180 	"NVIDIA nForce2 400 Networking Adapter"},
181 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
182 	"NVIDIA nForce2 400 Networking Adapter"},
183 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
184 	"NVIDIA nForce3 250 Networking Adapter"},
185 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
186 	"NVIDIA nForce3 Networking Adapter"},
187 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
188 	"NVIDIA nForce4 Networking Adapter"},
189 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
190 	"NVIDIA nForce4 Networking Adapter"},
191 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
192 	"NVIDIA nForce MCP04 Networking Adapter"},
193 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
194 	"NVIDIA nForce MCP04 Networking Adapter"},
195 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
196 	"NVIDIA nForce 430 Networking Adapter"},
197 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
198 	"NVIDIA nForce 430 Networking Adapter"},
199 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
200 	"NVIDIA nForce MCP55 Networking Adapter"},
201 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
202 	"NVIDIA nForce MCP55 Networking Adapter"},
203 	{0, 0, NULL}
204 };
205 
206 
207 /* Probe for supported hardware ID's */
208 static int
209 nfe_probe(device_t dev)
210 {
211 	struct nfe_type *t;
212 
213 	t = nfe_devs;
214 	/* Check for matching PCI DEVICE ID's */
215 	while (t->name != NULL) {
216 		if ((pci_get_vendor(dev) == t->vid_id) &&
217 		    (pci_get_device(dev) == t->dev_id)) {
218 			device_set_desc(dev, t->name);
219 			return (0);
220 		}
221 		t++;
222 	}
223 
224 	return (ENXIO);
225 }
226 
227 static int
228 nfe_attach(device_t dev)
229 {
230 	struct nfe_softc *sc;
231 	struct ifnet *ifp;
232 	int unit, error = 0, rid;
233 
234 	sc = device_get_softc(dev);
235 	unit = device_get_unit(dev);
236 	sc->nfe_dev = dev;
237 	sc->nfe_unit = unit;
238 
239 	mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
240 	    MTX_DEF | MTX_RECURSE);
241 	callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
242 
243 
244 	pci_enable_busmaster(dev);
245 
246 	rid = NV_RID;
247 	sc->nfe_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
248 	    0, ~0, 1, RF_ACTIVE);
249 
250 	if (sc->nfe_res == NULL) {
251 		printf ("nfe%d: couldn't map ports/memory\n", unit);
252 		error = ENXIO;
253 		goto fail;
254 	}
255 
256 	sc->nfe_memt = rman_get_bustag(sc->nfe_res);
257 	sc->nfe_memh = rman_get_bushandle(sc->nfe_res);
258 
259 	/* Allocate interrupt */
260 	rid = 0;
261 	sc->nfe_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid,
262 	    0, ~0, 1, RF_SHAREABLE | RF_ACTIVE);
263 
264 	if (sc->nfe_irq == NULL) {
265 		printf("nfe%d: couldn't map interrupt\n", unit);
266 		error = ENXIO;
267 		goto fail;
268 	}
269 
270 	nfe_get_macaddr(sc, sc->eaddr);
271 
272 	sc->nfe_flags = 0;
273 
274 	switch (pci_get_device(dev)) {
275 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
276 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
277 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
278 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
279 		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
280 		break;
281 	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
282 	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
283 		sc->nfe_flags |= NFE_40BIT_ADDR;
284 		break;
285 	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
286 	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
287 	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
288 	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
289 		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
290 		break;
291 	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
292 	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
293 		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_HW_VLAN;
294 		break;
295 	}
296 
297 #ifndef NFE_NO_JUMBO
298 	/* enable jumbo frames for adapters that support it */
299 	if (sc->nfe_flags & NFE_JUMBO_SUP)
300 		sc->nfe_flags |= NFE_USE_JUMBO;
301 #endif
302 
303 	/*
304 	 * Allocate the parent bus DMA tag appropriate for PCI.
305 	 */
306 #define NFE_NSEG_NEW 32
307 	error = bus_dma_tag_create(NULL,	/* parent */
308 			1, 0,			/* alignment, boundary */
309 			BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
310 			BUS_SPACE_MAXADDR,	/* highaddr */
311 			NULL, NULL,		/* filter, filterarg */
312 			MAXBSIZE, NFE_NSEG_NEW,	/* maxsize, nsegments */
313 			BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
314 			BUS_DMA_ALLOCNOW,	/* flags */
315 			NULL, NULL,		/* lockfunc, lockarg */
316 			&sc->nfe_parent_tag);
317 	if (error)
318 		goto fail;
319 
320 	/*
321 	 * Allocate Tx and Rx rings.
322 	 */
323 	if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
324 		printf("nfe%d: could not allocate Tx ring\n", unit);
325 		error = ENXIO;
326 		goto fail;
327 	}
328 
329 	if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
330 		printf("nfe%d: could not allocate Rx ring\n", unit);
331 		nfe_free_tx_ring(sc, &sc->txq);
332 		error = ENXIO;
333 		goto fail;
334 	}
335 
336 	ifp = sc->nfe_ifp = if_alloc(IFT_ETHER);
337 	if (ifp == NULL) {
338 		printf("nfe%d: can not if_alloc()\n", unit);
339 		error = ENOSPC;
340 		goto fail;
341 	}
342 
343 	ifp->if_softc = sc;
344 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
345 	ifp->if_mtu = ETHERMTU;
346 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
347 	ifp->if_ioctl = nfe_ioctl;
348 	ifp->if_start = nfe_start;
349 	/* ifp->if_hwassist = NFE_CSUM_FEATURES; */
350 	ifp->if_watchdog = nfe_watchdog;
351 	ifp->if_init = nfe_init;
352 	ifp->if_baudrate = IF_Gbps(1);
353 	ifp->if_snd.ifq_maxlen = NFE_IFQ_MAXLEN;
354 
355 	ifp->if_capabilities = IFCAP_VLAN_MTU;
356 #if NVLAN > 0
357 	if (sc->nfe_flags & NFE_HW_VLAN)
358 		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
359 #endif
360 #ifdef NFE_CSUM
361 	if (sc->nfe_flags & NFE_HW_CSUM) {
362 		ifp->if_capabilities |= IFCAP_HWCSUM;
363 	}
364 #endif
365 	ifp->if_capenable = ifp->if_capabilities;
366 
367 #ifdef DEVICE_POLLING
368 	ifp->if_capabilities |= IFCAP_POLLING;
369 #endif
370 
371 	/* Do MII setup */
372 	if (mii_phy_probe(dev, &sc->nfe_miibus, nfe_ifmedia_upd, nfe_ifmedia_sts)) {
373 		printf("nfe%d: MII without any phy!\n", unit);
374 		error = ENXIO;
375 		goto fail;
376 	}
377 
378 	ether_ifattach(ifp, sc->eaddr);
379 
380 	error = bus_setup_intr(dev, sc->nfe_irq, INTR_TYPE_NET|INTR_MPSAFE,
381 	    nfe_intr, sc, &sc->nfe_intrhand);
382 
383 	if (error) {
384 		printf("nfe%d: couldn't set up irq\n", unit);
385 		ether_ifdetach(ifp);
386 		goto fail;
387 	}
388 
389 fail:
390 	if (error)
391 		nfe_detach(dev);
392 
393 	return (error);
394 }
395 
396 
397 static int
398 nfe_detach(device_t dev)
399 {
400 	struct nfe_softc	*sc;
401 	struct ifnet		*ifp;
402 	u_char			eaddr[ETHER_ADDR_LEN];
403 	int			i;
404 
405 	sc = device_get_softc(dev);
406 	KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
407 	ifp = sc->nfe_ifp;
408 
409 #ifdef DEVICE_POLLING
410 	if (ifp->if_capenable & IFCAP_POLLING)
411 		ether_poll_deregister(ifp);
412 #endif
413 
414 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
415 		eaddr[i] = sc->eaddr[5 - i];
416 	}
417 	nfe_set_macaddr(sc, eaddr);
418 
419 	if (device_is_attached(dev)) {
420 		nfe_stop(ifp, 1);
421 		ifp->if_flags &= ~IFF_UP;
422 		callout_drain(&sc->nfe_stat_ch);
423 		ether_ifdetach(ifp);
424 	}
425 
426 	if (ifp)
427 		if_free(ifp);
428 	if (sc->nfe_miibus)
429 		device_delete_child(dev, sc->nfe_miibus);
430 	bus_generic_detach(dev);
431 
432 	if (sc->nfe_intrhand)
433 		bus_teardown_intr(dev, sc->nfe_irq, sc->nfe_intrhand);
434 	if (sc->nfe_irq)
435 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nfe_irq);
436 	if (sc->nfe_res)
437 		bus_release_resource(dev, SYS_RES_MEMORY, NV_RID, sc->nfe_res);
438 
439 	nfe_free_tx_ring(sc, &sc->txq);
440 	nfe_free_rx_ring(sc, &sc->rxq);
441 
442 	if (sc->nfe_parent_tag)
443 		bus_dma_tag_destroy(sc->nfe_parent_tag);
444 
445 	mtx_destroy(&sc->nfe_mtx);
446 
447 	return (0);
448 }
449 
450 
451 static void
452 nfe_miibus_statchg(device_t dev)
453 {
454 	struct nfe_softc *sc;
455 	struct mii_data *mii;
456 	u_int32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
457 
458 	sc = device_get_softc(dev);
459 	mii = device_get_softc(sc->nfe_miibus);
460 
461 	phy = NFE_READ(sc, NFE_PHY_IFACE);
462 	phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
463 
464 	seed = NFE_READ(sc, NFE_RNDSEED);
465 	seed &= ~NFE_SEED_MASK;
466 
467 	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
468 		phy  |= NFE_PHY_HDX;	/* half-duplex */
469 		misc |= NFE_MISC1_HDX;
470 	}
471 
472 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
473 	case IFM_1000_T:	/* full-duplex only */
474 		link |= NFE_MEDIA_1000T;
475 		seed |= NFE_SEED_1000T;
476 		phy  |= NFE_PHY_1000T;
477 		break;
478 	case IFM_100_TX:
479 		link |= NFE_MEDIA_100TX;
480 		seed |= NFE_SEED_100TX;
481 		phy  |= NFE_PHY_100TX;
482 		break;
483 	case IFM_10_T:
484 		link |= NFE_MEDIA_10T;
485 		seed |= NFE_SEED_10T;
486 		break;
487 	}
488 
489 	NFE_WRITE(sc, NFE_RNDSEED, seed);	/* XXX: gigabit NICs only? */
490 
491 	NFE_WRITE(sc, NFE_PHY_IFACE, phy);
492 	NFE_WRITE(sc, NFE_MISC1, misc);
493 	NFE_WRITE(sc, NFE_LINKSPEED, link);
494 }
495 
496 static int
497 nfe_miibus_readreg(device_t dev, int phy, int reg)
498 {
499 	struct nfe_softc *sc = device_get_softc(dev);
500 	u_int32_t val;
501 	int ntries;
502 
503 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
504 
505 	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
506 		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
507 		DELAY(100);
508 	}
509 
510 	NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
511 
512 	for (ntries = 0; ntries < 1000; ntries++) {
513 		DELAY(100);
514 		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
515 			break;
516 	}
517 	if (ntries == 1000) {
518 		DPRINTFN(2, ("nfe%d: timeout waiting for PHY\n", sc->nfe_unit));
519 		return 0;
520 	}
521 
522 	if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
523 		DPRINTFN(2, ("nfe%d: could not read PHY\n", sc->nfe_unit));
524 		return 0;
525 	}
526 
527 	val = NFE_READ(sc, NFE_PHY_DATA);
528 	if (val != 0xffffffff && val != 0)
529 		sc->mii_phyaddr = phy;
530 
531 	DPRINTFN(2, ("nfe%d: mii read phy %d reg 0x%x ret 0x%x\n", sc->nfe_unit, phy, reg, val));
532 
533 	return val;
534 }
535 
536 static int
537 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
538 {
539 	struct nfe_softc *sc = device_get_softc(dev);
540 	u_int32_t ctl;
541 	int ntries;
542 
543 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
544 
545 	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
546 		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
547 		DELAY(100);
548 	}
549 
550 	NFE_WRITE(sc, NFE_PHY_DATA, val);
551 	ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
552 	NFE_WRITE(sc, NFE_PHY_CTL, ctl);
553 
554 	for (ntries = 0; ntries < 1000; ntries++) {
555 		DELAY(100);
556 		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
557 			break;
558 	}
559 #ifdef NFE_DEBUG
560 	if (nfedebug >= 2 && ntries == 1000)
561 		printf("could not write to PHY\n");
562 #endif
563 	return 0;
564 }
565 
566 static int
567 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
568 {
569 	struct nfe_desc32 *desc32;
570 	struct nfe_desc64 *desc64;
571 	struct nfe_rx_data *data;
572 	struct nfe_jbuf *jbuf;
573 	void **desc;
574 	bus_addr_t physaddr;
575 	int i, error, descsize;
576 
577 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
578 		desc = (void **)&ring->desc64;
579 		descsize = sizeof (struct nfe_desc64);
580 	} else {
581 		desc = (void **)&ring->desc32;
582 		descsize = sizeof (struct nfe_desc32);
583 	}
584 
585 	ring->cur = ring->next = 0;
586 	ring->bufsz = MCLBYTES;
587 
588 	error = bus_dma_tag_create(sc->nfe_parent_tag,
589 				   PAGE_SIZE, 0,	/* alignment, boundary */
590 				   BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
591 				   BUS_SPACE_MAXADDR,	/* highaddr */
592 				   NULL, NULL,		/* filter, filterarg */
593 				   NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
594 				   NFE_RX_RING_COUNT * descsize,   /* maxsegsize */
595 				   BUS_DMA_ALLOCNOW,	/* flags */
596 				   NULL, NULL,		/* lockfunc, lockarg */
597 				   &ring->rx_desc_tag);
598 	if (error != 0) {
599 		printf("nfe%d: could not create desc DMA tag\n", sc->nfe_unit);
600 		goto fail;
601 	}
602 
603 	/* allocate memory to desc */
604 	error = bus_dmamem_alloc(ring->rx_desc_tag, (void **)desc, BUS_DMA_NOWAIT, &ring->rx_desc_map);
605 	if (error != 0) {
606 		printf("nfe%d: could not create desc DMA map\n", sc->nfe_unit);
607 		goto fail;
608 	}
609 
610 	/* map desc to device visible address space */
611 	error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, *desc,
612 	    NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ring->rx_desc_segs, BUS_DMA_NOWAIT);
613 	if (error != 0) {
614 		printf("nfe%d: could not load desc DMA map\n", sc->nfe_unit);
615 		goto fail;
616 	}
617 
618 	bzero(*desc, NFE_RX_RING_COUNT * descsize);
619 	ring->rx_desc_addr = ring->rx_desc_segs.ds_addr;
620 	ring->physaddr = ring->rx_desc_addr;
621 
622 	if (sc->nfe_flags & NFE_USE_JUMBO) {
623 		ring->bufsz = NFE_JBYTES;
624 		if ((error = nfe_jpool_alloc(sc)) != 0) {
625 			printf("nfe%d: could not allocate jumbo frames\n", sc->nfe_unit);
626 			goto fail;
627 		}
628 	}
629 
630 	/*
631 	 * Pre-allocate Rx buffers and populate Rx ring.
632 	 */
633 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
634 		data = &sc->rxq.data[i];
635 
636 		MGETHDR(data->m, M_DONTWAIT, MT_DATA);
637 		if (data->m == NULL) {
638 			printf("nfe%d: could not allocate rx mbuf\n", sc->nfe_unit);
639 			error = ENOMEM;
640 			goto fail;
641 		}
642 
643 		if (sc->nfe_flags & NFE_USE_JUMBO) {
644 			if ((jbuf = nfe_jalloc(sc)) == NULL) {
645 				printf("nfe%d: could not allocate jumbo buffer\n", sc->nfe_unit);
646 				goto fail;
647 			}
648 			data->m->m_data = (void *)jbuf->buf;
649 			data->m->m_len = data->m->m_pkthdr.len = NFE_JBYTES;
650 			MEXTADD(data->m, jbuf->buf, NFE_JBYTES, nfe_jfree, (struct nfe_softc *)sc, 0, EXT_NET_DRV);
651 			/* m_adj(data->m, ETHER_ALIGN); */
652 			physaddr = jbuf->physaddr;
653 		} else {
654 		  error = bus_dma_tag_create(sc->nfe_parent_tag,
655 					     PAGE_SIZE, 0,	/* alignment, boundary */
656 					     BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
657 					     BUS_SPACE_MAXADDR,	/* highaddr */
658 					     NULL, NULL,	/* filter, filterarg */
659 					     MCLBYTES, 1,       /* maxsize, nsegments */
660 					     MCLBYTES,          /* maxsegsize */
661 					     BUS_DMA_ALLOCNOW,	/* flags */
662 					     NULL, NULL,		/* lockfunc, lockarg */
663 					     &data->rx_data_tag);
664 		  if (error != 0) {
665 		    printf("nfe%d: could not create DMA map\n", sc->nfe_unit);
666 		    goto fail;
667 		  }
668 
669 		  error = bus_dmamap_create(data->rx_data_tag, 0, &data->rx_data_map);
670 		  if (error != 0) {
671 		    printf("nfe%d: could not allocate mbuf cluster\n", sc->nfe_unit);
672 		    goto fail;
673 		  }
674 
675 		  MCLGET(data->m, M_DONTWAIT);
676 		  if (!(data->m->m_flags & M_EXT)) {
677 		    error = ENOMEM;
678 		    goto fail;
679 		  }
680 
681 		  error = bus_dmamap_load(data->rx_data_tag, data->rx_data_map, mtod(data->m, void *),
682 					  MCLBYTES, nfe_dma_map_segs, &data->rx_data_segs, BUS_DMA_NOWAIT);
683 		  if (error != 0) {
684 		    printf("nfe%d: could not load rx buf DMA map", sc->nfe_unit);
685 		    goto fail;
686 		  }
687 
688 		  data->rx_data_addr = data->rx_data_segs.ds_addr;
689 		  physaddr = data->rx_data_addr;
690 
691 		}
692 
693 		if (sc->nfe_flags & NFE_40BIT_ADDR) {
694 			desc64 = &sc->rxq.desc64[i];
695 #if defined(__LP64__)
696 			desc64->physaddr[0] = htole32(physaddr >> 32);
697 #endif
698 			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
699 			desc64->length = htole16(sc->rxq.bufsz);
700 			desc64->flags = htole16(NFE_RX_READY);
701 		} else {
702 			desc32 = &sc->rxq.desc32[i];
703 			desc32->physaddr = htole32(physaddr);
704 			desc32->length = htole16(sc->rxq.bufsz);
705 			desc32->flags = htole16(NFE_RX_READY);
706 		}
707 
708 	}
709 
710 	bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map, BUS_DMASYNC_PREWRITE);
711 
712 	return 0;
713 
714 fail:	nfe_free_rx_ring(sc, ring);
715 
716 	return error;
717 }
718 
719 static int
720 nfe_jpool_alloc(struct nfe_softc *sc)
721 {
722 	struct nfe_rx_ring *ring = &sc->rxq;
723 	struct nfe_jbuf *jbuf;
724 	bus_addr_t physaddr;
725 	caddr_t buf;
726 	int i, error;
727 
728 	/*
729 	 * Allocate a big chunk of DMA'able memory.
730 	 */
731 	error = bus_dma_tag_create(sc->nfe_parent_tag,
732 				   PAGE_SIZE, 0,	/* alignment, boundary */
733 				   BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
734 				   BUS_SPACE_MAXADDR,	/* highaddr */
735 				   NULL, NULL,		/* filter, filterarg */
736 				   NFE_JPOOL_SIZE, 1, /* maxsize, nsegments */
737 				   NFE_JPOOL_SIZE,   /* maxsegsize */
738 				   BUS_DMA_ALLOCNOW,	/* flags */
739 				   NULL, NULL,		/* lockfunc, lockarg */
740 				   &ring->rx_jumbo_tag);
741 	if (error != 0) {
742 		printf("nfe%d: could not create jumbo DMA tag\n", sc->nfe_unit);
743 		goto fail;
744 	}
745 	error = bus_dmamem_alloc(ring->rx_jumbo_tag, (void **)&ring->jpool, BUS_DMA_NOWAIT, &ring->rx_jumbo_map);
746 	if (error != 0) {
747 		printf("nfe%d: could not create jumbo DMA memory\n", sc->nfe_unit);
748 		goto fail;
749 	}
750 
751 	error = bus_dmamap_load(ring->rx_jumbo_tag, ring->rx_jumbo_map, ring->jpool,
752 	    NFE_JPOOL_SIZE, nfe_dma_map_segs, &ring->rx_jumbo_segs, BUS_DMA_NOWAIT);
753 	if (error != 0) {
754 		printf("nfe%d: could not load jumbo DMA map\n", sc->nfe_unit);
755 		goto fail;
756 	}
757 
758 	/* ..and split it into 9KB chunks */
759 	SLIST_INIT(&ring->jfreelist);
760 
761 	buf = ring->jpool;
762 	ring->rx_jumbo_addr = ring->rx_jumbo_segs.ds_addr;
763 	physaddr = ring->rx_jumbo_addr;
764 
765 	for (i = 0; i < NFE_JPOOL_COUNT; i++) {
766 		jbuf = &ring->jbuf[i];
767 
768 		jbuf->buf = buf;
769 		jbuf->physaddr = physaddr;
770 
771 		SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
772 
773 		buf += NFE_JBYTES;
774 		physaddr += NFE_JBYTES;
775 	}
776 
777 	return 0;
778 
779 fail:	nfe_jpool_free(sc);
780 	return error;
781 }
782 
783 
784 static void
785 nfe_jpool_free(struct nfe_softc *sc)
786 {
787 	struct nfe_rx_ring *ring = &sc->rxq;
788 
789 	if (ring->jpool != NULL) {
790 #if 0
791 		bus_dmamem_unmap(ring->rx_jumbo_tag, ring->jpool, NFE_JPOOL_SIZE);
792 #endif
793 		bus_dmamem_free(ring->rx_jumbo_tag, &ring->rx_jumbo_segs, ring->rx_jumbo_map);
794 	}
795 	if (ring->rx_jumbo_map != NULL) {
796 		bus_dmamap_sync(ring->rx_jumbo_tag, ring->rx_jumbo_map, BUS_DMASYNC_POSTWRITE);
797 		bus_dmamap_unload(ring->rx_jumbo_tag, ring->rx_jumbo_map);
798 		bus_dmamap_destroy(ring->rx_jumbo_tag, ring->rx_jumbo_map);
799 	}
800 }
801 
802 static struct nfe_jbuf *
803 nfe_jalloc(struct nfe_softc *sc)
804 {
805 	struct nfe_jbuf *jbuf;
806 
807 	jbuf = SLIST_FIRST(&sc->rxq.jfreelist);
808 	if (jbuf == NULL)
809 		return NULL;
810 	SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext);
811 	return jbuf;
812 }
813 
814 /*
815  * This is called automatically by the network stack when the mbuf is freed.
816  * Caution must be taken that the NIC might be reset by the time the mbuf is
817  * freed.
818  */
819 static void
820 nfe_jfree(void *buf, void *arg)
821 {
822 	struct nfe_softc *sc = arg;
823 	struct nfe_jbuf *jbuf;
824 	int i;
825 
826 	/* find the jbuf from the base pointer */
827 	i = ((vm_offset_t)buf - (vm_offset_t)sc->rxq.jpool) / NFE_JBYTES;
828 	if (i < 0 || i >= NFE_JPOOL_COUNT) {
829 		printf("nfe%d: request to free a buffer (%p) not managed by us\n", sc->nfe_unit, buf);
830 		return;
831 	}
832 	jbuf = &sc->rxq.jbuf[i];
833 
834 	/* ..and put it back in the free list */
835 	SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext);
836 }
837 
838 
839 static void
840 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
841 {
842 	int i;
843 
844 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
845 		if (sc->nfe_flags & NFE_40BIT_ADDR) {
846 			ring->desc64[i].length = htole16(ring->bufsz);
847 			ring->desc64[i].flags = htole16(NFE_RX_READY);
848 		} else {
849 			ring->desc32[i].length = htole16(ring->bufsz);
850 			ring->desc32[i].flags = htole16(NFE_RX_READY);
851 		}
852 	}
853 
854 	bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map, BUS_DMASYNC_PREWRITE);
855 
856 	ring->cur = ring->next = 0;
857 }
858 
859 
860 static void
861 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
862 {
863 	struct nfe_rx_data *data;
864 	void *desc;
865 	int i, descsize;
866 
867 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
868 		desc = ring->desc64;
869 		descsize = sizeof (struct nfe_desc64);
870 	} else {
871 		desc = ring->desc32;
872 		descsize = sizeof (struct nfe_desc32);
873 	}
874 
875 	if (desc != NULL) {
876 		bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map, BUS_DMASYNC_POSTWRITE);
877 		bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
878 		bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
879 		bus_dma_tag_destroy(ring->rx_desc_tag);
880 	}
881 
882 
883 	if (sc->nfe_flags & NFE_USE_JUMBO) {
884 	        nfe_jpool_free(sc);
885 	} else {
886 	  for (i = 0; i < NFE_RX_RING_COUNT; i++) {
887 	    data = &ring->data[i];
888 
889 	    if (data->rx_data_map != NULL) {
890 	      bus_dmamap_sync(data->rx_data_tag, data->rx_data_map, BUS_DMASYNC_POSTREAD);
891 	      bus_dmamap_unload(data->rx_data_tag, data->rx_data_map);
892 	      bus_dmamap_destroy(data->rx_data_tag, data->rx_data_map);
893 	      bus_dma_tag_destroy(data->rx_data_tag);
894 	    }
895 	    if (data->m != NULL)
896 	      m_freem(data->m);
897 	  }
898 	}
899 }
900 
901 static int
902 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
903 {
904 	int i, error;
905 	void **desc;
906 	int descsize;
907 
908 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
909 		desc = (void **)&ring->desc64;
910 		descsize = sizeof (struct nfe_desc64);
911 	} else {
912 		desc = (void **)&ring->desc32;
913 		descsize = sizeof (struct nfe_desc32);
914 	}
915 
916 	ring->queued = 0;
917 	ring->cur = ring->next = 0;
918 
919 	error = bus_dma_tag_create(sc->nfe_parent_tag,
920 				   PAGE_SIZE, 0,	/* alignment, boundary */
921 				   BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
922 				   BUS_SPACE_MAXADDR,	/* highaddr */
923 				   NULL, NULL,		/* filter, filterarg */
924 				   NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
925 				   NFE_TX_RING_COUNT * descsize,   /* maxsegsize */
926 				   BUS_DMA_ALLOCNOW,	/* flags */
927 				   NULL, NULL,		/* lockfunc, lockarg */
928 				   &ring->tx_desc_tag);
929 	if (error != 0) {
930 		printf("nfe%d: could not create desc DMA tag\n", sc->nfe_unit);
931 		goto fail;
932 	}
933 
934 	error = bus_dmamem_alloc(ring->tx_desc_tag, (void **)desc, BUS_DMA_NOWAIT, &ring->tx_desc_map);
935 	if (error != 0) {
936 		printf("nfe%d: could not create desc DMA map\n", sc->nfe_unit);
937 		goto fail;
938 	}
939 
940 	error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, *desc,
941 	    NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ring->tx_desc_segs, BUS_DMA_NOWAIT);
942 	if (error != 0) {
943 		printf("nfe%d: could not load desc DMA map\n", sc->nfe_unit);
944 		goto fail;
945 	}
946 
947 	bzero(*desc, NFE_TX_RING_COUNT * descsize);
948 
949 	ring->tx_desc_addr = ring->tx_desc_segs.ds_addr;
950 	ring->physaddr = ring->tx_desc_addr;
951 
952 	error = bus_dma_tag_create(sc->nfe_parent_tag,
953 				   ETHER_ALIGN, 0,
954 				   BUS_SPACE_MAXADDR_32BIT,
955 				   BUS_SPACE_MAXADDR,
956 				   NULL, NULL,
957 				   NFE_JBYTES, NFE_MAX_SCATTER,
958 				   NFE_JBYTES,
959 				   BUS_DMA_ALLOCNOW,
960 				   NULL, NULL,
961 				   &ring->tx_data_tag);
962 	if (error != 0) {
963 	  printf("nfe%d: could not create DMA tag\n", sc->nfe_unit);
964 	  goto fail;
965 	}
966 
967 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
968 		error = bus_dmamap_create(ring->tx_data_tag, 0, &ring->data[i].tx_data_map);
969 		if (error != 0) {
970 			printf("nfe%d: could not create DMA map\n", sc->nfe_unit);
971 			goto fail;
972 		}
973 	}
974 
975 	return 0;
976 
977 fail:	nfe_free_tx_ring(sc, ring);
978 	return error;
979 }
980 
981 
982 static void
983 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
984 {
985 	struct nfe_tx_data *data;
986 	int i;
987 
988 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
989 		if (sc->nfe_flags & NFE_40BIT_ADDR)
990 			ring->desc64[i].flags = 0;
991 		else
992 			ring->desc32[i].flags = 0;
993 
994 		data = &ring->data[i];
995 
996 		if (data->m != NULL) {
997 			bus_dmamap_sync(ring->tx_data_tag, data->active, BUS_DMASYNC_POSTWRITE);
998 			bus_dmamap_unload(ring->tx_data_tag, data->active);
999 			m_freem(data->m);
1000 			data->m = NULL;
1001 		}
1002 	}
1003 
1004 	bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, BUS_DMASYNC_PREWRITE);
1005 
1006 	ring->queued = 0;
1007 	ring->cur = ring->next = 0;
1008 }
1009 
1010 static void
1011 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1012 {
1013 	struct nfe_tx_data *data;
1014 	void *desc;
1015 	int i, descsize;
1016 
1017 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1018 		desc = ring->desc64;
1019 		descsize = sizeof (struct nfe_desc64);
1020 	} else {
1021 		desc = ring->desc32;
1022 		descsize = sizeof (struct nfe_desc32);
1023 	}
1024 
1025 	if (desc != NULL) {
1026 		bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, BUS_DMASYNC_POSTWRITE);
1027 		bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
1028 		bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
1029 		bus_dma_tag_destroy(ring->tx_desc_tag);
1030 	}
1031 
1032 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1033 		data = &ring->data[i];
1034 
1035 		if (data->m != NULL) {
1036 			bus_dmamap_sync(ring->tx_data_tag, data->active, BUS_DMASYNC_POSTWRITE);
1037 			bus_dmamap_unload(ring->tx_data_tag, data->active);
1038 			m_freem(data->m);
1039 		}
1040 	}
1041 
1042 	/* ..and now actually destroy the DMA mappings */
1043 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1044 		data = &ring->data[i];
1045 		if (data->tx_data_map == NULL)
1046 			continue;
1047 		bus_dmamap_destroy(ring->tx_data_tag, data->tx_data_map);
1048 	}
1049 
1050 	bus_dma_tag_destroy(ring->tx_data_tag);
1051 }
1052 
1053 #ifdef DEVICE_POLLING
1054 static poll_handler_t nfe_poll;
1055 
1056 static void
1057 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1058 {
1059 	struct  nfe_softc *sc = ifp->if_softc;
1060 
1061 	NFE_LOCK(sc);
1062 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1063 		nfe_poll_locked(ifp, cmd, count);
1064 	NFE_UNLOCK(sc);
1065 }
1066 
1067 
1068 static void
1069 nfe_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
1070 {
1071 	struct  nfe_softc *sc = ifp->if_softc;
1072 	u_int32_t r;
1073 
1074 	NFE_LOCK_ASSERT(sc);
1075 
1076 	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1077 		return;
1078 	}
1079 
1080 	sc->rxcycles = count;
1081 	nfe_rxeof(sc);
1082 	nfe_txeof(sc);
1083 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1084 		nfe_start_locked(ifp);
1085 
1086 	if (cmd == POLL_AND_CHECK_STATUS) {
1087 	  if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0) {
1088 	    return;
1089 	  }
1090 	  NFE_WRITE(sc, NFE_IRQ_STATUS, r);
1091 
1092 	  if (r & NFE_IRQ_LINK) {
1093 	    NFE_READ(sc, NFE_PHY_STATUS);
1094 	    NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1095 	    DPRINTF(("nfe%d: link state changed\n", sc->nfe_unit));
1096 	  }
1097 	}
1098 }
1099 #endif /* DEVICE_POLLING */
1100 
1101 
1102 static int
1103 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1104 {
1105 	int			error = 0;
1106 	struct nfe_softc	*sc = ifp->if_softc;
1107 	struct ifreq		*ifr = (struct ifreq *) data;
1108 	struct mii_data		*mii;
1109 
1110 	switch (cmd) {
1111 	case SIOCSIFMTU:
1112 		if (ifr->ifr_mtu < ETHERMIN ||
1113 		    ((sc->nfe_flags & NFE_USE_JUMBO) &&
1114 		    ifr->ifr_mtu > ETHERMTU_JUMBO) ||
1115 		    (!(sc->nfe_flags & NFE_USE_JUMBO) &&
1116 		    ifr->ifr_mtu > ETHERMTU))
1117 			error = EINVAL;
1118 		else if (ifp->if_mtu != ifr->ifr_mtu) {
1119 			ifp->if_mtu = ifr->ifr_mtu;
1120 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1121 			nfe_init(sc);
1122 		}
1123 		break;
1124 	case SIOCSIFFLAGS:
1125 		NFE_LOCK(sc);
1126 		if (ifp->if_flags & IFF_UP) {
1127 			/*
1128 			 * If only the PROMISC or ALLMULTI flag changes, then
1129 			 * don't do a full re-init of the chip, just update
1130 			 * the Rx filter.
1131 			 */
1132 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1133 			    ((ifp->if_flags ^ sc->nfe_if_flags) &
1134 			     (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1135 				nfe_setmulti(sc);
1136 			else
1137 				nfe_init_locked(sc);
1138 		} else {
1139 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1140 				nfe_stop(ifp, 1);
1141 		}
1142 		sc->nfe_if_flags = ifp->if_flags;
1143 		NFE_UNLOCK(sc);
1144 		error = 0;
1145 		break;
1146 	case SIOCADDMULTI:
1147 	case SIOCDELMULTI:
1148 		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1149 			NFE_LOCK(sc);
1150 			nfe_setmulti(sc);
1151 			NFE_UNLOCK(sc);
1152 			error = 0;
1153 		}
1154 		break;
1155 	case SIOCSIFMEDIA:
1156 	case SIOCGIFMEDIA:
1157 		mii = device_get_softc(sc->nfe_miibus);
1158 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1159 		break;
1160 	case SIOCSIFCAP:
1161 	    {
1162 		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1163 #ifdef DEVICE_POLLING
1164 		if (mask & IFCAP_POLLING) {
1165 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1166 				error = ether_poll_register(nfe_poll, ifp);
1167 				if (error)
1168 					return(error);
1169 				NFE_LOCK(sc);
1170 				NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1171 				ifp->if_capenable |= IFCAP_POLLING;
1172 				NFE_UNLOCK(sc);
1173 			} else {
1174 				error = ether_poll_deregister(ifp);
1175 				/* Enable interrupt even in error case */
1176 				NFE_LOCK(sc);
1177 				NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1178 				ifp->if_capenable &= ~IFCAP_POLLING;
1179 				NFE_UNLOCK(sc);
1180 			}
1181 		}
1182 #endif
1183 		if (mask & IFCAP_HWCSUM) {
1184 			ifp->if_capenable ^= IFCAP_HWCSUM;
1185 			if (IFCAP_HWCSUM & ifp->if_capenable &&
1186 			    IFCAP_HWCSUM & ifp->if_capabilities)
1187 				ifp->if_hwassist = NFE_CSUM_FEATURES;
1188 			else
1189 				ifp->if_hwassist = 0;
1190 		}
1191 	    }
1192 		break;
1193 
1194 	default:
1195 		error = ether_ioctl(ifp, cmd, data);
1196 		break;
1197 	}
1198 
1199 	return error;
1200 }
1201 
1202 
1203 static void nfe_intr(void *arg)
1204 {
1205 	struct nfe_softc *sc = arg;
1206 	struct ifnet *ifp = sc->nfe_ifp;
1207 	u_int32_t r;
1208 
1209 	NFE_LOCK(sc);
1210 
1211 #ifdef DEVICE_POLLING
1212 	if (ifp->if_capenable & IFCAP_POLLING) {
1213 		NFE_UNLOCK(sc);
1214 		return;
1215 	}
1216 #endif
1217 
1218 	if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0) {
1219 	        NFE_UNLOCK(sc);
1220 		return;	/* not for us */
1221 	}
1222 	NFE_WRITE(sc, NFE_IRQ_STATUS, r);
1223 
1224 	DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r));
1225 
1226 	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1227 
1228 	if (r & NFE_IRQ_LINK) {
1229 		NFE_READ(sc, NFE_PHY_STATUS);
1230 		NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1231 		DPRINTF(("nfe%d: link state changed\n", sc->nfe_unit));
1232 	}
1233 
1234 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1235 		/* check Rx ring */
1236 		nfe_rxeof(sc);
1237 		/* check Tx ring */
1238 		nfe_txeof(sc);
1239 	}
1240 
1241 	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1242 
1243 	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1244 	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1245 		nfe_start_locked(ifp);
1246 
1247 	NFE_UNLOCK(sc);
1248 
1249 	return;
1250 }
1251 
1252 static void
1253 nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
1254 {
1255 	bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops);
1256 }
1257 
1258 static void
1259 nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
1260 {
1261 	bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops);
1262 }
1263 
1264 static void
1265 nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops)
1266 {
1267 	bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops);
1268 }
1269 
1270 static void
1271 nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops)
1272 {
1273 	bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops);
1274 }
1275 
1276 static void
1277 nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
1278 {
1279 	bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, ops);
1280 }
1281 
1282 static void
1283 nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
1284 {
1285 
1286 	bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, ops);
1287 }
1288 
1289 static void nfe_rxeof(struct nfe_softc *sc)
1290 {
1291 	struct ifnet *ifp = sc->nfe_ifp;
1292 	struct nfe_desc32 *desc32=NULL;
1293 	struct nfe_desc64 *desc64=NULL;
1294 	struct nfe_rx_data *data;
1295 	struct nfe_jbuf *jbuf;
1296 	struct mbuf *m, *mnew;
1297 	bus_addr_t physaddr;
1298 	u_int16_t flags;
1299 	int error, len;
1300 #if NVLAN > 1
1301 	u_int16_t vlan_tag = 0;
1302 	int have_tag = 0;
1303 #endif
1304 
1305 	NFE_LOCK_ASSERT(sc);
1306 
1307 	for (;;) {
1308 
1309 #ifdef DEVICE_POLLING
1310 		if (ifp->if_capenable & IFCAP_POLLING) {
1311 			if (sc->rxcycles <= 0)
1312 				break;
1313 			sc->rxcycles--;
1314 		}
1315 #endif
1316 
1317 		data = &sc->rxq.data[sc->rxq.cur];
1318 
1319 		if (sc->nfe_flags & NFE_40BIT_ADDR) {
1320 			desc64 = &sc->rxq.desc64[sc->rxq.cur];
1321 			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
1322 
1323 			flags = letoh16(desc64->flags);
1324 			len = letoh16(desc64->length) & 0x3fff;
1325 
1326 #if NVLAN > 1
1327 			if (flags & NFE_TX_VLAN_TAG) {
1328 				have_tag = 1;
1329 				vlan_tag = desc64->vtag;
1330 			}
1331 #endif
1332 
1333 		} else {
1334 			desc32 = &sc->rxq.desc32[sc->rxq.cur];
1335 			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
1336 
1337 			flags = letoh16(desc32->flags);
1338 			len = letoh16(desc32->length) & 0x3fff;
1339 		}
1340 
1341 		if (flags & NFE_RX_READY)
1342 			break;
1343 
1344 		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
1345 			if (!(flags & NFE_RX_VALID_V1))
1346 				goto skip;
1347 			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
1348 				flags &= ~NFE_RX_ERROR;
1349 				len--;	/* fix buffer length */
1350 			}
1351 		} else {
1352 			if (!(flags & NFE_RX_VALID_V2))
1353 				goto skip;
1354 
1355 			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
1356 				flags &= ~NFE_RX_ERROR;
1357 				len--;	/* fix buffer length */
1358 			}
1359 		}
1360 
1361 		if (flags & NFE_RX_ERROR) {
1362 			ifp->if_ierrors++;
1363 			goto skip;
1364 		}
1365 
1366 		/*
1367 		 * Try to allocate a new mbuf for this ring element and load
1368 		 * it before processing the current mbuf. If the ring element
1369 		 * cannot be loaded, drop the received packet and reuse the
1370 		 * old mbuf. In the unlikely case that the old mbuf can't be
1371 		 * reloaded either, explicitly panic.
1372 		 */
1373 		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1374 		if (mnew == NULL) {
1375 			ifp->if_ierrors++;
1376 			goto skip;
1377 		}
1378 
1379 		if (sc->nfe_flags & NFE_USE_JUMBO) {
1380 			if ((jbuf = nfe_jalloc(sc)) == NULL) {
1381 				m_freem(mnew);
1382 				ifp->if_ierrors++;
1383 				goto skip;
1384 			}
1385 			mnew->m_data = (void *)jbuf->buf;
1386 			mnew->m_len = mnew->m_pkthdr.len = NFE_JBYTES;
1387 			MEXTADD(mnew, jbuf->buf, NFE_JBYTES, nfe_jfree,
1388 			    (struct nfe_softc *)sc, 0 , EXT_NET_DRV);
1389 
1390 			bus_dmamap_sync(sc->rxq.rx_jumbo_tag,
1391 			    sc->rxq.rx_jumbo_map, BUS_DMASYNC_POSTREAD);
1392 			physaddr = jbuf->physaddr;
1393 		} else {
1394 			MCLGET(mnew, M_DONTWAIT);
1395 			if (!(mnew->m_flags & M_EXT)) {
1396 				m_freem(mnew);
1397 				ifp->if_ierrors++;
1398 				goto skip;
1399 			}
1400 
1401 			bus_dmamap_sync(data->rx_data_tag, data->rx_data_map,
1402 			    BUS_DMASYNC_POSTREAD);
1403 			bus_dmamap_unload(data->rx_data_tag, data->rx_data_map);
1404 			error = bus_dmamap_load(data->rx_data_tag,
1405 			    data->rx_data_map, mtod(mnew, void *), MCLBYTES,
1406 			    nfe_dma_map_segs, &data->rx_data_segs,
1407 			    BUS_DMA_NOWAIT);
1408 			if (error != 0) {
1409 				m_freem(mnew);
1410 
1411 				/* try to reload the old mbuf */
1412 				error = bus_dmamap_load(data->rx_data_tag,
1413 				    data->rx_data_map, mtod(data->m, void *),
1414 				    MCLBYTES, nfe_dma_map_segs,
1415 				    &data->rx_data_segs, BUS_DMA_NOWAIT);
1416 				if (error != 0) {
1417 					/* very unlikely that it will fail.. */
1418 				      panic("nfe%d: could not load old rx mbuf",
1419 					    sc->nfe_unit);
1420 				}
1421 				ifp->if_ierrors++;
1422 				goto skip;
1423 			}
1424 			data->rx_data_addr = data->rx_data_segs.ds_addr;
1425 			physaddr = data->rx_data_addr;
1426 		}
1427 
1428 		/*
1429 		 * New mbuf successfully loaded, update Rx ring and continue
1430 		 * processing.
1431 		 */
1432 		m = data->m;
1433 		data->m = mnew;
1434 
1435 		/* finalize mbuf */
1436 		m->m_pkthdr.len = m->m_len = len;
1437 		m->m_pkthdr.rcvif = ifp;
1438 
1439 
1440 #if defined(NFE_CSUM)
1441 		if ((sc->nfe_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK)) {
1442 			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1443 			if (flags & NFE_RX_IP_CSUMOK_V2) {
1444 				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1445 			}
1446 			if (flags & NFE_RX_UDP_CSUMOK_V2 ||
1447 			    flags & NFE_RX_TCP_CSUMOK_V2) {
1448 				m->m_pkthdr.csum_flags |=
1449 				    CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
1450 				m->m_pkthdr.csum_data = 0xffff;
1451 			}
1452 		}
1453 #endif
1454 
1455 #if NVLAN > 1
1456 		if (have_tag) {
1457 			VLAN_INPUT_TAG_NEW(ifp, m, vlan_tag);
1458 			if (m == NULL)
1459 				continue;
1460 		}
1461 #endif
1462 
1463 		ifp->if_ipackets++;
1464 
1465 		NFE_UNLOCK(sc);
1466 		(*ifp->if_input)(ifp, m);
1467 		NFE_LOCK(sc);
1468 
1469 		/* update mapping address in h/w descriptor */
1470 		if (sc->nfe_flags & NFE_40BIT_ADDR) {
1471 #if defined(__LP64__)
1472 			desc64->physaddr[0] = htole32(physaddr >> 32);
1473 #endif
1474 			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
1475 		} else {
1476 			desc32->physaddr = htole32(physaddr);
1477 		}
1478 
1479 skip:		if (sc->nfe_flags & NFE_40BIT_ADDR) {
1480 			desc64->length = htole16(sc->rxq.bufsz);
1481 			desc64->flags = htole16(NFE_RX_READY);
1482 
1483 			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
1484 		} else {
1485 			desc32->length = htole16(sc->rxq.bufsz);
1486 			desc32->flags = htole16(NFE_RX_READY);
1487 
1488 			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
1489 		}
1490 
1491 		sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT;
1492 	}
1493 }
1494 
1495 static void nfe_txeof(struct nfe_softc *sc)
1496 {
1497 	struct ifnet *ifp = sc->nfe_ifp;
1498 	struct nfe_desc32 *desc32;
1499 	struct nfe_desc64 *desc64;
1500 	struct nfe_tx_data *data = NULL;
1501 	u_int16_t flags;
1502 
1503 	NFE_LOCK_ASSERT(sc);
1504 
1505 	while (sc->txq.next != sc->txq.cur) {
1506 		if (sc->nfe_flags & NFE_40BIT_ADDR) {
1507 			desc64 = &sc->txq.desc64[sc->txq.next];
1508 			nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
1509 
1510 			flags = letoh16(desc64->flags);
1511 		} else {
1512 			desc32 = &sc->txq.desc32[sc->txq.next];
1513 			nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
1514 
1515 			flags = letoh16(desc32->flags);
1516 		}
1517 
1518 		if (flags & NFE_TX_VALID)
1519 			break;
1520 
1521 		data = &sc->txq.data[sc->txq.next];
1522 
1523 		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
1524 			if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
1525 				goto skip;
1526 
1527 			if ((flags & NFE_TX_ERROR_V1) != 0) {
1528 				printf("nfe%d: tx v1 error 0x%4b\n",
1529 				    sc->nfe_unit, flags, NFE_V1_TXERR);
1530 
1531 				ifp->if_oerrors++;
1532 			} else
1533 				ifp->if_opackets++;
1534 		} else {
1535 			if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
1536 				goto skip;
1537 
1538 			if ((flags & NFE_TX_ERROR_V2) != 0) {
1539 				printf("nfe%d: tx v1 error 0x%4b\n",
1540 				    sc->nfe_unit, flags, NFE_V2_TXERR);
1541 
1542 				ifp->if_oerrors++;
1543 			} else
1544 				ifp->if_opackets++;
1545 		}
1546 
1547 		if (data->m == NULL) {	/* should not get there */
1548 		       printf("nfe%d: last fragment bit w/o associated mbuf!\n",
1549 			    sc->nfe_unit);
1550 			goto skip;
1551 		}
1552 
1553 		/* last fragment of the mbuf chain transmitted */
1554 		bus_dmamap_sync(sc->txq.tx_data_tag, data->active,
1555 		    BUS_DMASYNC_POSTWRITE);
1556 		bus_dmamap_unload(sc->txq.tx_data_tag, data->active);
1557 		m_freem(data->m);
1558 		data->m = NULL;
1559 
1560 		ifp->if_timer = 0;
1561 
1562 skip:		sc->txq.queued--;
1563 		sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT;
1564 	}
1565 
1566 	if (data != NULL) {	/* at least one slot freed */
1567 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1568 		nfe_start_locked(ifp);
1569 	}
1570 }
1571 
1572 static int nfe_encap(struct nfe_softc *sc, struct mbuf *m0)
1573 {
1574 	struct nfe_desc32 *desc32=NULL;
1575 	struct nfe_desc64 *desc64=NULL;
1576 	struct nfe_tx_data *data=NULL;
1577 	bus_dmamap_t map;
1578 	u_int16_t flags = NFE_TX_VALID;
1579 #if NVLAN > 0
1580 	struct m_tag *vtag;
1581 #endif
1582 	bus_dma_segment_t segs[NFE_MAX_SCATTER];
1583 	int nsegs;
1584 	int error, i;
1585 
1586 	map = sc->txq.data[sc->txq.cur].tx_data_map;
1587 
1588 	error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, m0, segs,
1589 	    &nsegs, BUS_DMA_NOWAIT);
1590 
1591 	if (error != 0) {
1592 		printf("nfe%d: could not map mbuf (error %d)\n", sc->nfe_unit,
1593 		    error);
1594 		return error;
1595 	}
1596 
1597 	if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 1) {
1598 		bus_dmamap_unload(sc->txq.tx_data_tag, map);
1599 		return ENOBUFS;
1600 	}
1601 
1602 
1603 #if NVLAN > 0
1604 	/* setup h/w VLAN tagging */
1605 	vtag = VLAN_OUTPUT_TAG(sc->nfe_ifp, m0);
1606 #endif
1607 
1608 #ifdef NFE_CSUM
1609 	if (m0->m_pkthdr.csum_flags & CSUM_IP)
1610 		flags |= NFE_TX_IP_CSUM;
1611 	if (m0->m_pkthdr.csum_flags & CSUM_TCP)
1612 		flags |= NFE_TX_TCP_CSUM;
1613 	if (m0->m_pkthdr.csum_flags & CSUM_UDP)
1614 		flags |= NFE_TX_TCP_CSUM;
1615 #endif
1616 
1617 	for (i = 0; i < nsegs; i++) {
1618 		data = &sc->txq.data[sc->txq.cur];
1619 
1620 		if (sc->nfe_flags & NFE_40BIT_ADDR) {
1621 			desc64 = &sc->txq.desc64[sc->txq.cur];
1622 #if defined(__LP64__)
1623 			desc64->physaddr[0] = htole32(segs[i].ds_addr >> 32);
1624 #endif
1625 			desc64->physaddr[1] = htole32(segs[i].ds_addr &
1626 			    0xffffffff);
1627 			desc64->length = htole16(segs[i].ds_len - 1);
1628 			desc64->flags = htole16(flags);
1629 #if NVLAN > 0
1630 			desc64->vtag = htole32(NFE_TX_VTAG |
1631 			    VLAN_TAG_VALUE(vtag));
1632 #endif
1633 		} else {
1634 			desc32 = &sc->txq.desc32[sc->txq.cur];
1635 
1636 			desc32->physaddr = htole32(segs[i].ds_addr);
1637 			desc32->length = htole16(segs[i].ds_len - 1);
1638 			desc32->flags = htole16(flags);
1639 		}
1640 
1641 		/* csum flags and vtag belong to the first fragment only */
1642 		if (nsegs > 1) {
1643 			flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM);
1644 #if NVLAN > 0
1645 			vtag = 0;
1646 #endif
1647 		}
1648 
1649 		sc->txq.queued++;
1650 		sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT;
1651 	}
1652 
1653 	/* the whole mbuf chain has been DMA mapped, fix last descriptor */
1654 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1655 		flags |= NFE_TX_LASTFRAG_V2;
1656 		desc64->flags = htole16(flags);
1657 	} else {
1658 		if (sc->nfe_flags & NFE_JUMBO_SUP)
1659 			flags |= NFE_TX_LASTFRAG_V2;
1660 		else
1661 			flags |= NFE_TX_LASTFRAG_V1;
1662 		desc32->flags = htole16(flags);
1663 	}
1664 
1665 	data->m = m0;
1666 	data->active = map;
1667 	data->nsegs = nsegs;
1668 
1669 	bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
1670 
1671 	return 0;
1672 }
1673 
1674 
1675 static void nfe_setmulti(struct nfe_softc *sc)
1676 {
1677 	struct ifnet *ifp = sc->nfe_ifp;
1678 	struct ifmultiaddr	*ifma;
1679 	u_int8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1680 	u_int32_t filter = NFE_RXFILTER_MAGIC;
1681 	u_int8_t etherbroadcastaddr[ETHER_ADDR_LEN] =
1682 	    { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1683 	int i;
1684 
1685 	NFE_LOCK_ASSERT(sc);
1686 
1687 	if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1688 		bzero(addr, ETHER_ADDR_LEN);
1689 		bzero(mask, ETHER_ADDR_LEN);
1690 		goto done;
1691 	}
1692 
1693 	bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
1694 	bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
1695 
1696 	IF_ADDR_LOCK(ifp);
1697 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1698 		u_char *addrp;
1699 
1700 		if (ifma->ifma_addr->sa_family != AF_LINK)
1701 			continue;
1702 
1703 		addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
1704 		for (i = 0; i < ETHER_ADDR_LEN; i++) {
1705 			u_int8_t mcaddr = addrp[i];
1706 			addr[i] &= mcaddr;
1707 			mask[i] &= ~mcaddr;
1708 		}
1709 	}
1710 	IF_ADDR_UNLOCK(ifp);
1711 
1712 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
1713 		mask[i] |= addr[i];
1714 	}
1715 
1716 done:
1717 	addr[0] |= 0x01;	/* make sure multicast bit is set */
1718 
1719 	NFE_WRITE(sc, NFE_MULTIADDR_HI,
1720 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1721 	NFE_WRITE(sc, NFE_MULTIADDR_LO,
1722 	    addr[5] <<  8 | addr[4]);
1723 	NFE_WRITE(sc, NFE_MULTIMASK_HI,
1724 	    mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1725 	NFE_WRITE(sc, NFE_MULTIMASK_LO,
1726 	    mask[5] <<  8 | mask[4]);
1727 
1728 	filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
1729 	NFE_WRITE(sc, NFE_RXFILTER, filter);
1730 }
1731 
1732 static void nfe_start(struct ifnet *ifp)
1733 {
1734 	struct nfe_softc *sc;
1735 
1736 	sc = ifp->if_softc;
1737 	NFE_LOCK(sc);
1738 	nfe_start_locked(ifp);
1739 	NFE_UNLOCK(sc);
1740 }
1741 
1742 static void nfe_start_locked(struct ifnet *ifp)
1743 {
1744 	struct nfe_softc *sc = ifp->if_softc;
1745 	int old = sc->txq.cur;
1746 	struct mbuf *m0;
1747 
1748 	if (!sc->nfe_link || ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1749 		return;
1750 	}
1751 
1752 	for (;;) {
1753 		IFQ_POLL(&ifp->if_snd, m0);
1754 		if (m0 == NULL)
1755 			break;
1756 
1757 		if (nfe_encap(sc, m0) != 0) {
1758 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1759 			break;
1760 		}
1761 
1762 		/* packet put in h/w queue, remove from s/w queue */
1763 		IFQ_DEQUEUE(&ifp->if_snd, m0);
1764 
1765 		BPF_MTAP(ifp, m0);
1766 	}
1767 	if (sc->txq.cur == old)	{ /* nothing sent */
1768 		return;
1769 	}
1770 
1771 	if (sc->nfe_flags & NFE_40BIT_ADDR)
1772 		nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE);
1773 	else
1774 		nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE);
1775 
1776 	/* kick Tx */
1777 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
1778 
1779 	/*
1780 	 * Set a timeout in case the chip goes out to lunch.
1781 	 */
1782 	ifp->if_timer = 5;
1783 
1784 	return;
1785 }
1786 
1787 static void nfe_watchdog(struct ifnet *ifp)
1788 {
1789 	struct nfe_softc *sc = ifp->if_softc;
1790 
1791 	printf("nfe%d: watchdog timeout\n", sc->nfe_unit);
1792 
1793 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1794 	nfe_init(sc);
1795 
1796 	ifp->if_oerrors++;
1797 
1798 	return;
1799 }
1800 
1801 static void nfe_init(void *xsc)
1802 {
1803 	struct nfe_softc *sc = xsc;
1804 
1805 	NFE_LOCK(sc);
1806 	nfe_init_locked(sc);
1807 	NFE_UNLOCK(sc);
1808 
1809 	return;
1810 }
1811 
1812 static void nfe_init_locked(void *xsc)
1813 {
1814 	struct nfe_softc *sc = xsc;
1815 	struct ifnet *ifp = sc->nfe_ifp;
1816 	struct mii_data *mii;
1817 	u_int32_t tmp;
1818 
1819 	NFE_LOCK_ASSERT(sc);
1820 
1821 	mii = device_get_softc(sc->nfe_miibus);
1822 
1823 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1824 		return;
1825 	}
1826 
1827 	nfe_stop(ifp, 0);
1828 
1829 	NFE_WRITE(sc, NFE_TX_UNK, 0);
1830 	NFE_WRITE(sc, NFE_STATUS, 0);
1831 
1832 	sc->rxtxctl = NFE_RXTX_BIT2;
1833 	if (sc->nfe_flags & NFE_40BIT_ADDR)
1834 		sc->rxtxctl |= NFE_RXTX_V3MAGIC;
1835 	else if (sc->nfe_flags & NFE_JUMBO_SUP)
1836 		sc->rxtxctl |= NFE_RXTX_V2MAGIC;
1837 #ifdef NFE_CSUM
1838 	if (sc->nfe_flags & NFE_HW_CSUM)
1839 		sc->rxtxctl |= NFE_RXTX_RXCSUM;
1840 #endif
1841 
1842 #if NVLAN > 0
1843 	/*
1844 	 * Although the adapter is capable of stripping VLAN tags from received
1845 	 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on
1846 	 * purpose.  This will be done in software by our network stack.
1847 	 */
1848 	if (sc->nfe_flags & NFE_HW_VLAN)
1849 		sc->rxtxctl |= NFE_RXTX_VTAG_INSERT;
1850 #endif
1851 
1852 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
1853 	DELAY(10);
1854 	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1855 
1856 #if NVLAN
1857 	if (sc->nfe_flags & NFE_HW_VLAN)
1858 		NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
1859 #endif
1860 
1861 	NFE_WRITE(sc, NFE_SETUP_R6, 0);
1862 
1863 	/* set MAC address */
1864 	nfe_set_macaddr(sc, sc->eaddr);
1865 
1866 	/* tell MAC where rings are in memory */
1867 #ifdef __LP64__
1868 	NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32);
1869 #endif
1870 	NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff);
1871 #ifdef __LP64__
1872 	NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32);
1873 #endif
1874 	NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff);
1875 
1876 	NFE_WRITE(sc, NFE_RING_SIZE,
1877 	    (NFE_RX_RING_COUNT - 1) << 16 |
1878 	    (NFE_TX_RING_COUNT - 1));
1879 
1880 	NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1881 
1882 	/* force MAC to wakeup */
1883 	tmp = NFE_READ(sc, NFE_PWR_STATE);
1884 	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1885 	DELAY(10);
1886 	tmp = NFE_READ(sc, NFE_PWR_STATE);
1887 	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1888 
1889 #if 1
1890 	/* configure interrupts coalescing/mitigation */
1891 	NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
1892 #else
1893 	/* no interrupt mitigation: one interrupt per packet */
1894 	NFE_WRITE(sc, NFE_IMTIMER, 970);
1895 #endif
1896 
1897 	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1898 	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1899 	NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
1900 
1901 	/* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1902 	NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
1903 
1904 	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1905 	NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
1906 
1907 	sc->rxtxctl &= ~NFE_RXTX_BIT2;
1908 	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1909 	DELAY(10);
1910 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
1911 
1912 	/* set Rx filter */
1913 	nfe_setmulti(sc);
1914 
1915 	nfe_ifmedia_upd(ifp);
1916 
1917 	nfe_tick_locked(sc);
1918 
1919 	/* enable Rx */
1920 	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1921 
1922 	/* enable Tx */
1923 	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1924 
1925 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1926 
1927 #ifdef DEVICE_POLLING
1928 	if (ifp->if_capenable & IFCAP_POLLING)
1929 		NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1930 	else
1931 #endif
1932 	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); /* enable interrupts */
1933 
1934 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1935 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1936 
1937 	sc->nfe_link = 0;
1938 
1939 	return;
1940 }
1941 
1942 static void nfe_stop(struct ifnet *ifp, int disable)
1943 {
1944 	struct nfe_softc *sc = ifp->if_softc;
1945 	struct mii_data  *mii;
1946 
1947 	NFE_LOCK_ASSERT(sc);
1948 
1949 	ifp->if_timer = 0;
1950 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1951 
1952 	mii = device_get_softc(sc->nfe_miibus);
1953 
1954 	callout_stop(&sc->nfe_stat_ch);
1955 
1956 	/* abort Tx */
1957 	NFE_WRITE(sc, NFE_TX_CTL, 0);
1958 
1959 	/* disable Rx */
1960 	NFE_WRITE(sc, NFE_RX_CTL, 0);
1961 
1962 	/* disable interrupts */
1963 	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1964 
1965 	sc->nfe_link = 0;
1966 
1967 	/* reset Tx and Rx rings */
1968 	nfe_reset_tx_ring(sc, &sc->txq);
1969 	nfe_reset_rx_ring(sc, &sc->rxq);
1970 
1971 	return;
1972 }
1973 
1974 static int nfe_ifmedia_upd(struct ifnet *ifp)
1975 {
1976 	struct nfe_softc *sc = ifp->if_softc;
1977 
1978 	NFE_LOCK(sc);
1979 	nfe_ifmedia_upd_locked(ifp);
1980 	NFE_UNLOCK(sc);
1981 	return (0);
1982 }
1983 
1984 static int nfe_ifmedia_upd_locked(struct ifnet *ifp)
1985 {
1986 	struct nfe_softc	*sc = ifp->if_softc;
1987 	struct mii_data		*mii;
1988 
1989 	NFE_LOCK_ASSERT(sc);
1990 
1991 	mii = device_get_softc(sc->nfe_miibus);
1992 
1993 	if (mii->mii_instance) {
1994 		struct mii_softc *miisc;
1995 		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
1996 		    miisc = LIST_NEXT(miisc, mii_list)) {
1997 			mii_phy_reset(miisc);
1998 		}
1999 	}
2000 	mii_mediachg(mii);
2001 
2002 	return (0);
2003 }
2004 
2005 static void nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2006 {
2007 	struct nfe_softc	*sc;
2008 	struct mii_data		*mii;
2009 
2010 	sc = ifp->if_softc;
2011 
2012 	NFE_LOCK(sc);
2013 	mii = device_get_softc(sc->nfe_miibus);
2014 	mii_pollstat(mii);
2015 	NFE_UNLOCK(sc);
2016 
2017 	ifmr->ifm_active = mii->mii_media_active;
2018 	ifmr->ifm_status = mii->mii_media_status;
2019 
2020 	return;
2021 }
2022 
2023 static void
2024 nfe_tick(void *xsc)
2025 {
2026 	struct nfe_softc *sc;
2027 
2028 	sc = xsc;
2029 
2030 	NFE_LOCK(sc);
2031 	nfe_tick_locked(sc);
2032 	NFE_UNLOCK(sc);
2033 }
2034 
2035 
2036 void nfe_tick_locked(struct nfe_softc *arg)
2037 {
2038 	struct nfe_softc	*sc;
2039 	struct mii_data		*mii;
2040 	struct ifnet		*ifp;
2041 
2042 	sc = arg;
2043 
2044 	NFE_LOCK_ASSERT(sc);
2045 
2046 	ifp = sc->nfe_ifp;
2047 
2048 	mii = device_get_softc(sc->nfe_miibus);
2049 	mii_tick(mii);
2050 
2051 	if (!sc->nfe_link) {
2052 		if (mii->mii_media_status & IFM_ACTIVE &&
2053 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2054 			sc->nfe_link++;
2055 			if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T
2056 			    && bootverbose)
2057 				if_printf(sc->nfe_ifp, "gigabit link up\n");
2058 					if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2059 						nfe_start_locked(ifp);
2060 		}
2061 	}
2062 	callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2063 
2064 	return;
2065 }
2066 
2067 
2068 static void nfe_shutdown(device_t dev)
2069 {
2070 	struct nfe_softc *sc;
2071 	struct ifnet *ifp;
2072 
2073 	sc = device_get_softc(dev);
2074 
2075 	NFE_LOCK(sc);
2076 	ifp = sc->nfe_ifp;
2077 	nfe_stop(ifp,0);
2078 	/* nfe_reset(sc); */
2079 	NFE_UNLOCK(sc);
2080 
2081 	return;
2082 }
2083 
2084 
2085 static void nfe_get_macaddr(struct nfe_softc *sc, u_char *addr)
2086 {
2087 	uint32_t tmp;
2088 
2089 	tmp = NFE_READ(sc, NFE_MACADDR_LO);
2090 	addr[0] = (tmp >> 8) & 0xff;
2091 	addr[1] = (tmp & 0xff);
2092 
2093 	tmp = NFE_READ(sc, NFE_MACADDR_HI);
2094 	addr[2] = (tmp >> 24) & 0xff;
2095 	addr[3] = (tmp >> 16) & 0xff;
2096 	addr[4] = (tmp >>  8) & 0xff;
2097 	addr[5] = (tmp & 0xff);
2098 }
2099 
2100 static void nfe_set_macaddr(struct nfe_softc *sc, u_char *addr)
2101 {
2102 
2103 	NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] <<  8 | addr[4]);
2104 	NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
2105 	    addr[1] << 8 | addr[0]);
2106 }
2107 
2108 /*
2109  * Map a single buffer address.
2110  */
2111 
2112 static void
2113 nfe_dma_map_segs(arg, segs, nseg, error)
2114 	void *arg;
2115 	bus_dma_segment_t *segs;
2116 	int error, nseg;
2117 {
2118 
2119 	if (error)
2120 		return;
2121 
2122 	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
2123 
2124 	*(bus_dma_segment_t *)arg = *segs;
2125 
2126 	return;
2127 }
2128