xref: /freebsd/sys/dev/nfe/if_nfe.c (revision aa3860851b9f6a6002d135b1cac7736e0995eedc)
1 /*	$OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $	*/
2 
3 /*-
4  * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
5  * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
6  * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
22 
23 #include <sys/cdefs.h>
24 #ifdef HAVE_KERNEL_OPTION_HEADERS
25 #include "opt_device_polling.h"
26 #endif
27 
28 #include <sys/param.h>
29 #include <sys/endian.h>
30 #include <sys/systm.h>
31 #include <sys/sockio.h>
32 #include <sys/mbuf.h>
33 #include <sys/malloc.h>
34 #include <sys/module.h>
35 #include <sys/kernel.h>
36 #include <sys/queue.h>
37 #include <sys/socket.h>
38 #include <sys/sysctl.h>
39 #include <sys/taskqueue.h>
40 
41 #include <net/if.h>
42 #include <net/if_var.h>
43 #include <net/if_arp.h>
44 #include <net/ethernet.h>
45 #include <net/if_dl.h>
46 #include <net/if_media.h>
47 #include <net/if_types.h>
48 #include <net/if_vlan_var.h>
49 
50 #include <net/bpf.h>
51 
52 #include <machine/bus.h>
53 #include <machine/resource.h>
54 #include <sys/bus.h>
55 #include <sys/rman.h>
56 
57 #include <dev/mii/mii.h>
58 #include <dev/mii/miivar.h>
59 
60 #include <dev/pci/pcireg.h>
61 #include <dev/pci/pcivar.h>
62 
63 #include <dev/nfe/if_nfereg.h>
64 #include <dev/nfe/if_nfevar.h>
65 
66 MODULE_DEPEND(nfe, pci, 1, 1, 1);
67 MODULE_DEPEND(nfe, ether, 1, 1, 1);
68 MODULE_DEPEND(nfe, miibus, 1, 1, 1);
69 
70 /* "device miibus" required.  See GENERIC if you get errors here. */
71 #include "miibus_if.h"
72 
73 static int  nfe_probe(device_t);
74 static int  nfe_attach(device_t);
75 static int  nfe_detach(device_t);
76 static int  nfe_suspend(device_t);
77 static int  nfe_resume(device_t);
78 static int nfe_shutdown(device_t);
79 static int  nfe_can_use_msix(struct nfe_softc *);
80 static int  nfe_detect_msik9(struct nfe_softc *);
81 static void nfe_power(struct nfe_softc *);
82 static int  nfe_miibus_readreg(device_t, int, int);
83 static int  nfe_miibus_writereg(device_t, int, int, int);
84 static void nfe_miibus_statchg(device_t);
85 static void nfe_mac_config(struct nfe_softc *, struct mii_data *);
86 static void nfe_set_intr(struct nfe_softc *);
87 static __inline void nfe_enable_intr(struct nfe_softc *);
88 static __inline void nfe_disable_intr(struct nfe_softc *);
89 static int  nfe_ioctl(if_t, u_long, caddr_t);
90 static void nfe_alloc_msix(struct nfe_softc *, int);
91 static int nfe_intr(void *);
92 static void nfe_int_task(void *, int);
93 static __inline void nfe_discard_rxbuf(struct nfe_softc *, int);
94 static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int);
95 static int nfe_newbuf(struct nfe_softc *, int);
96 static int nfe_jnewbuf(struct nfe_softc *, int);
97 static int  nfe_rxeof(struct nfe_softc *, int, int *);
98 static int  nfe_jrxeof(struct nfe_softc *, int, int *);
99 static void nfe_txeof(struct nfe_softc *);
100 static int  nfe_encap(struct nfe_softc *, struct mbuf **);
101 static void nfe_setmulti(struct nfe_softc *);
102 static void nfe_start(if_t);
103 static void nfe_start_locked(if_t);
104 static void nfe_watchdog(if_t);
105 static void nfe_init(void *);
106 static void nfe_init_locked(void *);
107 static void nfe_stop(if_t);
108 static int  nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
109 static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
110 static int  nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
111 static int  nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
112 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
113 static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
114 static int  nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
115 static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
116 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
117 static int  nfe_ifmedia_upd(if_t);
118 static void nfe_ifmedia_sts(if_t, struct ifmediareq *);
119 static void nfe_tick(void *);
120 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
121 static void nfe_set_macaddr(struct nfe_softc *, uint8_t *);
122 static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int);
123 
124 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
125 static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS);
126 static void nfe_sysctl_node(struct nfe_softc *);
127 static void nfe_stats_clear(struct nfe_softc *);
128 static void nfe_stats_update(struct nfe_softc *);
129 static void nfe_set_linkspeed(struct nfe_softc *);
130 static void nfe_set_wol(struct nfe_softc *);
131 
132 #ifdef NFE_DEBUG
133 static int nfedebug = 0;
134 #define	DPRINTF(sc, ...)	do {				\
135 	if (nfedebug)						\
136 		device_printf((sc)->nfe_dev, __VA_ARGS__);	\
137 } while (0)
138 #define	DPRINTFN(sc, n, ...)	do {				\
139 	if (nfedebug >= (n))					\
140 		device_printf((sc)->nfe_dev, __VA_ARGS__);	\
141 } while (0)
142 #else
143 #define	DPRINTF(sc, ...)
144 #define	DPRINTFN(sc, n, ...)
145 #endif
146 
147 #define	NFE_LOCK(_sc)		mtx_lock(&(_sc)->nfe_mtx)
148 #define	NFE_UNLOCK(_sc)		mtx_unlock(&(_sc)->nfe_mtx)
149 #define	NFE_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
150 
151 /* Tunables. */
152 static int msi_disable = 0;
153 static int msix_disable = 0;
154 static int jumbo_disable = 0;
155 TUNABLE_INT("hw.nfe.msi_disable", &msi_disable);
156 TUNABLE_INT("hw.nfe.msix_disable", &msix_disable);
157 TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable);
158 
159 static device_method_t nfe_methods[] = {
160 	/* Device interface */
161 	DEVMETHOD(device_probe,		nfe_probe),
162 	DEVMETHOD(device_attach,	nfe_attach),
163 	DEVMETHOD(device_detach,	nfe_detach),
164 	DEVMETHOD(device_suspend,	nfe_suspend),
165 	DEVMETHOD(device_resume,	nfe_resume),
166 	DEVMETHOD(device_shutdown,	nfe_shutdown),
167 
168 	/* MII interface */
169 	DEVMETHOD(miibus_readreg,	nfe_miibus_readreg),
170 	DEVMETHOD(miibus_writereg,	nfe_miibus_writereg),
171 	DEVMETHOD(miibus_statchg,	nfe_miibus_statchg),
172 
173 	DEVMETHOD_END
174 };
175 
176 static driver_t nfe_driver = {
177 	"nfe",
178 	nfe_methods,
179 	sizeof(struct nfe_softc)
180 };
181 
182 DRIVER_MODULE(nfe, pci, nfe_driver, 0, 0);
183 DRIVER_MODULE(miibus, nfe, miibus_driver, 0, 0);
184 
185 static struct nfe_type nfe_devs[] = {
186 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
187 	    "NVIDIA nForce MCP Networking Adapter"},
188 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
189 	    "NVIDIA nForce2 MCP2 Networking Adapter"},
190 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
191 	    "NVIDIA nForce2 400 MCP4 Networking Adapter"},
192 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
193 	    "NVIDIA nForce2 400 MCP5 Networking Adapter"},
194 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
195 	    "NVIDIA nForce3 MCP3 Networking Adapter"},
196 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
197 	    "NVIDIA nForce3 250 MCP6 Networking Adapter"},
198 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
199 	    "NVIDIA nForce3 MCP7 Networking Adapter"},
200 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
201 	    "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
202 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
203 	    "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
204 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
205 	    "NVIDIA nForce MCP04 Networking Adapter"},		/* MCP10 */
206 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
207 	    "NVIDIA nForce MCP04 Networking Adapter"},		/* MCP11 */
208 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
209 	    "NVIDIA nForce 430 MCP12 Networking Adapter"},
210 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
211 	    "NVIDIA nForce 430 MCP13 Networking Adapter"},
212 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
213 	    "NVIDIA nForce MCP55 Networking Adapter"},
214 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
215 	    "NVIDIA nForce MCP55 Networking Adapter"},
216 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
217 	    "NVIDIA nForce MCP61 Networking Adapter"},
218 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
219 	    "NVIDIA nForce MCP61 Networking Adapter"},
220 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
221 	    "NVIDIA nForce MCP61 Networking Adapter"},
222 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
223 	    "NVIDIA nForce MCP61 Networking Adapter"},
224 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
225 	    "NVIDIA nForce MCP65 Networking Adapter"},
226 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
227 	    "NVIDIA nForce MCP65 Networking Adapter"},
228 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
229 	    "NVIDIA nForce MCP65 Networking Adapter"},
230 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
231 	    "NVIDIA nForce MCP65 Networking Adapter"},
232 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
233 	    "NVIDIA nForce MCP67 Networking Adapter"},
234 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
235 	    "NVIDIA nForce MCP67 Networking Adapter"},
236 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
237 	    "NVIDIA nForce MCP67 Networking Adapter"},
238 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
239 	    "NVIDIA nForce MCP67 Networking Adapter"},
240 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1,
241 	    "NVIDIA nForce MCP73 Networking Adapter"},
242 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2,
243 	    "NVIDIA nForce MCP73 Networking Adapter"},
244 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3,
245 	    "NVIDIA nForce MCP73 Networking Adapter"},
246 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4,
247 	    "NVIDIA nForce MCP73 Networking Adapter"},
248 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1,
249 	    "NVIDIA nForce MCP77 Networking Adapter"},
250 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2,
251 	    "NVIDIA nForce MCP77 Networking Adapter"},
252 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3,
253 	    "NVIDIA nForce MCP77 Networking Adapter"},
254 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4,
255 	    "NVIDIA nForce MCP77 Networking Adapter"},
256 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1,
257 	    "NVIDIA nForce MCP79 Networking Adapter"},
258 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2,
259 	    "NVIDIA nForce MCP79 Networking Adapter"},
260 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3,
261 	    "NVIDIA nForce MCP79 Networking Adapter"},
262 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4,
263 	    "NVIDIA nForce MCP79 Networking Adapter"},
264 	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP89_LAN,
265 	    "NVIDIA nForce MCP89 Networking Adapter"},
266 	{0, 0, NULL}
267 };
268 
269 /* Probe for supported hardware ID's */
270 static int
nfe_probe(device_t dev)271 nfe_probe(device_t dev)
272 {
273 	struct nfe_type *t;
274 
275 	t = nfe_devs;
276 	/* Check for matching PCI DEVICE ID's */
277 	while (t->name != NULL) {
278 		if ((pci_get_vendor(dev) == t->vid_id) &&
279 		    (pci_get_device(dev) == t->dev_id)) {
280 			device_set_desc(dev, t->name);
281 			return (BUS_PROBE_DEFAULT);
282 		}
283 		t++;
284 	}
285 
286 	return (ENXIO);
287 }
288 
289 static void
nfe_alloc_msix(struct nfe_softc * sc,int count)290 nfe_alloc_msix(struct nfe_softc *sc, int count)
291 {
292 	int rid;
293 
294 	rid = PCIR_BAR(2);
295 	sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY,
296 	    &rid, RF_ACTIVE);
297 	if (sc->nfe_msix_res == NULL) {
298 		device_printf(sc->nfe_dev,
299 		    "couldn't allocate MSIX table resource\n");
300 		return;
301 	}
302 	rid = PCIR_BAR(3);
303 	sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev,
304 	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
305 	if (sc->nfe_msix_pba_res == NULL) {
306 		device_printf(sc->nfe_dev,
307 		    "couldn't allocate MSIX PBA resource\n");
308 		bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2),
309 		    sc->nfe_msix_res);
310 		sc->nfe_msix_res = NULL;
311 		return;
312 	}
313 
314 	if (pci_alloc_msix(sc->nfe_dev, &count) == 0) {
315 		if (count == NFE_MSI_MESSAGES) {
316 			if (bootverbose)
317 				device_printf(sc->nfe_dev,
318 				    "Using %d MSIX messages\n", count);
319 			sc->nfe_msix = 1;
320 		} else {
321 			if (bootverbose)
322 				device_printf(sc->nfe_dev,
323 				    "couldn't allocate MSIX\n");
324 			pci_release_msi(sc->nfe_dev);
325 			bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
326 			    PCIR_BAR(3), sc->nfe_msix_pba_res);
327 			bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
328 			    PCIR_BAR(2), sc->nfe_msix_res);
329 			sc->nfe_msix_pba_res = NULL;
330 			sc->nfe_msix_res = NULL;
331 		}
332 	}
333 }
334 
335 static int
nfe_detect_msik9(struct nfe_softc * sc)336 nfe_detect_msik9(struct nfe_softc *sc)
337 {
338 	static const char *maker = "MSI";
339 	static const char *product = "K9N6PGM2-V2 (MS-7309)";
340 	char *m, *p;
341 	int found;
342 
343 	found = 0;
344 	m = kern_getenv("smbios.planar.maker");
345 	p = kern_getenv("smbios.planar.product");
346 	if (m != NULL && p != NULL) {
347 		if (strcmp(m, maker) == 0 && strcmp(p, product) == 0)
348 			found = 1;
349 	}
350 	if (m != NULL)
351 		freeenv(m);
352 	if (p != NULL)
353 		freeenv(p);
354 
355 	return (found);
356 }
357 
358 static int
nfe_attach(device_t dev)359 nfe_attach(device_t dev)
360 {
361 	struct nfe_softc *sc;
362 	if_t ifp;
363 	bus_addr_t dma_addr_max;
364 	int error = 0, i, msic, phyloc, reg, rid;
365 
366 	sc = device_get_softc(dev);
367 	sc->nfe_dev = dev;
368 
369 	mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
370 	    MTX_DEF);
371 	callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
372 
373 	pci_enable_busmaster(dev);
374 
375 	rid = PCIR_BAR(0);
376 	sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
377 	    RF_ACTIVE);
378 	if (sc->nfe_res[0] == NULL) {
379 		device_printf(dev, "couldn't map memory resources\n");
380 		mtx_destroy(&sc->nfe_mtx);
381 		return (ENXIO);
382 	}
383 
384 	if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
385 		uint16_t v, width;
386 
387 		v = pci_read_config(dev, reg + 0x08, 2);
388 		/* Change max. read request size to 4096. */
389 		v &= ~(7 << 12);
390 		v |= (5 << 12);
391 		pci_write_config(dev, reg + 0x08, v, 2);
392 
393 		v = pci_read_config(dev, reg + 0x0c, 2);
394 		/* link capability */
395 		v = (v >> 4) & 0x0f;
396 		width = pci_read_config(dev, reg + 0x12, 2);
397 		/* negotiated link width */
398 		width = (width >> 4) & 0x3f;
399 		if (v != width)
400 			device_printf(sc->nfe_dev,
401 			    "warning, negotiated width of link(x%d) != "
402 			    "max. width of link(x%d)\n", width, v);
403 	}
404 
405 	if (nfe_can_use_msix(sc) == 0) {
406 		device_printf(sc->nfe_dev,
407 		    "MSI/MSI-X capability black-listed, will use INTx\n");
408 		msix_disable = 1;
409 		msi_disable = 1;
410 	}
411 
412 	/* Allocate interrupt */
413 	if (msix_disable == 0 || msi_disable == 0) {
414 		if (msix_disable == 0 &&
415 		    (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES)
416 			nfe_alloc_msix(sc, msic);
417 		if (msi_disable == 0 && sc->nfe_msix == 0 &&
418 		    (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES &&
419 		    pci_alloc_msi(dev, &msic) == 0) {
420 			if (msic == NFE_MSI_MESSAGES) {
421 				if (bootverbose)
422 					device_printf(dev,
423 					    "Using %d MSI messages\n", msic);
424 				sc->nfe_msi = 1;
425 			} else
426 				pci_release_msi(dev);
427 		}
428 	}
429 
430 	if (sc->nfe_msix == 0 && sc->nfe_msi == 0) {
431 		rid = 0;
432 		sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
433 		    RF_SHAREABLE | RF_ACTIVE);
434 		if (sc->nfe_irq[0] == NULL) {
435 			device_printf(dev, "couldn't allocate IRQ resources\n");
436 			error = ENXIO;
437 			goto fail;
438 		}
439 	} else {
440 		for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
441 			sc->nfe_irq[i] = bus_alloc_resource_any(dev,
442 			    SYS_RES_IRQ, &rid, RF_ACTIVE);
443 			if (sc->nfe_irq[i] == NULL) {
444 				device_printf(dev,
445 				    "couldn't allocate IRQ resources for "
446 				    "message %d\n", rid);
447 				error = ENXIO;
448 				goto fail;
449 			}
450 		}
451 		/* Map interrupts to vector 0. */
452 		if (sc->nfe_msix != 0) {
453 			NFE_WRITE(sc, NFE_MSIX_MAP0, 0);
454 			NFE_WRITE(sc, NFE_MSIX_MAP1, 0);
455 		} else if (sc->nfe_msi != 0) {
456 			NFE_WRITE(sc, NFE_MSI_MAP0, 0);
457 			NFE_WRITE(sc, NFE_MSI_MAP1, 0);
458 		}
459 	}
460 
461 	/* Set IRQ status/mask register. */
462 	sc->nfe_irq_status = NFE_IRQ_STATUS;
463 	sc->nfe_irq_mask = NFE_IRQ_MASK;
464 	sc->nfe_intrs = NFE_IRQ_WANTED;
465 	sc->nfe_nointrs = 0;
466 	if (sc->nfe_msix != 0) {
467 		sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS;
468 		sc->nfe_nointrs = NFE_IRQ_WANTED;
469 	} else if (sc->nfe_msi != 0) {
470 		sc->nfe_irq_mask = NFE_MSI_IRQ_MASK;
471 		sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED;
472 	}
473 
474 	sc->nfe_devid = pci_get_device(dev);
475 	sc->nfe_revid = pci_get_revid(dev);
476 	sc->nfe_flags = 0;
477 
478 	switch (sc->nfe_devid) {
479 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
480 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
481 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
482 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
483 		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
484 		break;
485 	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
486 	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
487 		sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_MIB_V1;
488 		break;
489 	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
490 	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
491 	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
492 	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
493 		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
494 		    NFE_MIB_V1;
495 		break;
496 	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
497 	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
498 		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
499 		    NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
500 		break;
501 
502 	case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
503 	case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
504 	case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
505 	case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
506 	case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
507 	case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
508 	case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
509 	case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
510 	case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
511 	case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
512 	case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
513 	case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
514 		sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT |
515 		    NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
516 		break;
517 	case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
518 	case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
519 	case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
520 	case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
521 		/* XXX flow control */
522 		sc->nfe_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT |
523 		    NFE_CORRECT_MACADDR | NFE_MIB_V3;
524 		break;
525 	case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
526 	case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
527 	case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
528 	case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
529 	case PCI_PRODUCT_NVIDIA_MCP89_LAN:
530 		/* XXX flow control */
531 		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
532 		    NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3;
533 		break;
534 	case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
535 	case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
536 	case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
537 	case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
538 		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
539 		    NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL |
540 		    NFE_MIB_V2;
541 		break;
542 	}
543 
544 	nfe_power(sc);
545 	/* Check for reversed ethernet address */
546 	if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0)
547 		sc->nfe_flags |= NFE_CORRECT_MACADDR;
548 	nfe_get_macaddr(sc, sc->eaddr);
549 	/*
550 	 * Allocate the parent bus DMA tag appropriate for PCI.
551 	 */
552 	dma_addr_max = BUS_SPACE_MAXADDR_32BIT;
553 	if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0)
554 		dma_addr_max = NFE_DMA_MAXADDR;
555 	error = bus_dma_tag_create(
556 	    bus_get_dma_tag(sc->nfe_dev),	/* parent */
557 	    1, 0,				/* alignment, boundary */
558 	    dma_addr_max,			/* lowaddr */
559 	    BUS_SPACE_MAXADDR,			/* highaddr */
560 	    NULL, NULL,				/* filter, filterarg */
561 	    BUS_SPACE_MAXSIZE_32BIT, 0,		/* maxsize, nsegments */
562 	    BUS_SPACE_MAXSIZE_32BIT,		/* maxsegsize */
563 	    0,					/* flags */
564 	    NULL, NULL,				/* lockfunc, lockarg */
565 	    &sc->nfe_parent_tag);
566 	if (error)
567 		goto fail;
568 
569 	ifp = sc->nfe_ifp = if_gethandle(IFT_ETHER);
570 
571 	/*
572 	 * Allocate Tx and Rx rings.
573 	 */
574 	if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0)
575 		goto fail;
576 
577 	if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0)
578 		goto fail;
579 
580 	nfe_alloc_jrx_ring(sc, &sc->jrxq);
581 	/* Create sysctl node. */
582 	nfe_sysctl_node(sc);
583 
584 	if_setsoftc(ifp, sc);
585 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
586 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
587 	if_setioctlfn(ifp, nfe_ioctl);
588 	if_setstartfn(ifp, nfe_start);
589 	if_sethwassist(ifp, 0);
590 	if_setcapabilities(ifp, 0);
591 	if_setinitfn(ifp, nfe_init);
592 	if_setsendqlen(ifp, NFE_TX_RING_COUNT - 1);
593 	if_setsendqready(ifp);
594 
595 	if (sc->nfe_flags & NFE_HW_CSUM) {
596 		if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_TSO4, 0);
597 		if_sethwassistbits(ifp, NFE_CSUM_FEATURES | CSUM_TSO, 0);
598 	}
599 	if_setcapenable(ifp, if_getcapabilities(ifp));
600 
601 	sc->nfe_framesize = if_getmtu(ifp) + NFE_RX_HEADERS;
602 	/* VLAN capability setup. */
603 	if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
604 	if ((sc->nfe_flags & NFE_HW_VLAN) != 0) {
605 		if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0);
606 		if ((if_getcapabilities(ifp) & IFCAP_HWCSUM) != 0)
607 			if_setcapabilitiesbit(ifp,
608 			    (IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO), 0);
609 	}
610 
611 	if (pci_find_cap(dev, PCIY_PMG, &reg) == 0)
612 		if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0);
613 	if_setcapenable(ifp, if_getcapabilities(ifp));
614 
615 	/*
616 	 * Tell the upper layer(s) we support long frames.
617 	 * Must appear after the call to ether_ifattach() because
618 	 * ether_ifattach() sets ifi_hdrlen to the default value.
619 	 */
620 	if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
621 
622 #ifdef DEVICE_POLLING
623 	if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
624 #endif
625 
626 	/* Do MII setup */
627 	phyloc = MII_PHY_ANY;
628 	if (sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN1 ||
629 	    sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN2 ||
630 	    sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN3 ||
631 	    sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN4) {
632 		if (nfe_detect_msik9(sc) != 0)
633 			phyloc = 0;
634 	}
635 	error = mii_attach(dev, &sc->nfe_miibus, ifp,
636 	    (ifm_change_cb_t)nfe_ifmedia_upd, (ifm_stat_cb_t)nfe_ifmedia_sts,
637 	    BMSR_DEFCAPMASK, phyloc, MII_OFFSET_ANY, MIIF_DOPAUSE);
638 	if (error != 0) {
639 		device_printf(dev, "attaching PHYs failed\n");
640 		goto fail;
641 	}
642 	ether_ifattach(ifp, sc->eaddr);
643 
644 	NET_TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc);
645 	sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK,
646 	    taskqueue_thread_enqueue, &sc->nfe_tq);
647 	taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq",
648 	    device_get_nameunit(sc->nfe_dev));
649 	error = 0;
650 	if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
651 		error = bus_setup_intr(dev, sc->nfe_irq[0],
652 		    INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
653 		    &sc->nfe_intrhand[0]);
654 	} else {
655 		for (i = 0; i < NFE_MSI_MESSAGES; i++) {
656 			error = bus_setup_intr(dev, sc->nfe_irq[i],
657 			    INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
658 			    &sc->nfe_intrhand[i]);
659 			if (error != 0)
660 				break;
661 		}
662 	}
663 	if (error) {
664 		device_printf(dev, "couldn't set up irq\n");
665 		taskqueue_free(sc->nfe_tq);
666 		sc->nfe_tq = NULL;
667 		ether_ifdetach(ifp);
668 		goto fail;
669 	}
670 
671 fail:
672 	if (error)
673 		nfe_detach(dev);
674 
675 	return (error);
676 }
677 
678 static int
nfe_detach(device_t dev)679 nfe_detach(device_t dev)
680 {
681 	struct nfe_softc *sc;
682 	if_t ifp;
683 	uint8_t eaddr[ETHER_ADDR_LEN];
684 	int i, rid;
685 
686 	sc = device_get_softc(dev);
687 	KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
688 	ifp = sc->nfe_ifp;
689 
690 #ifdef DEVICE_POLLING
691 	if (ifp != NULL && if_getcapenable(ifp) & IFCAP_POLLING)
692 		ether_poll_deregister(ifp);
693 #endif
694 	if (device_is_attached(dev)) {
695 		NFE_LOCK(sc);
696 		nfe_stop(ifp);
697 		if_setflagbits(ifp, 0, IFF_UP);
698 		NFE_UNLOCK(sc);
699 		callout_drain(&sc->nfe_stat_ch);
700 		ether_ifdetach(ifp);
701 	}
702 
703 	if (ifp) {
704 		/* restore ethernet address */
705 		if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
706 			for (i = 0; i < ETHER_ADDR_LEN; i++) {
707 				eaddr[i] = sc->eaddr[5 - i];
708 			}
709 		} else
710 			bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN);
711 		nfe_set_macaddr(sc, eaddr);
712 		if_free(ifp);
713 	}
714 	if (sc->nfe_miibus)
715 		device_delete_child(dev, sc->nfe_miibus);
716 	bus_generic_detach(dev);
717 	if (sc->nfe_tq != NULL) {
718 		taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task);
719 		taskqueue_free(sc->nfe_tq);
720 		sc->nfe_tq = NULL;
721 	}
722 
723 	for (i = 0; i < NFE_MSI_MESSAGES; i++) {
724 		if (sc->nfe_intrhand[i] != NULL) {
725 			bus_teardown_intr(dev, sc->nfe_irq[i],
726 			    sc->nfe_intrhand[i]);
727 			sc->nfe_intrhand[i] = NULL;
728 		}
729 	}
730 
731 	if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
732 		if (sc->nfe_irq[0] != NULL)
733 			bus_release_resource(dev, SYS_RES_IRQ, 0,
734 			    sc->nfe_irq[0]);
735 	} else {
736 		for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
737 			if (sc->nfe_irq[i] != NULL) {
738 				bus_release_resource(dev, SYS_RES_IRQ, rid,
739 				    sc->nfe_irq[i]);
740 				sc->nfe_irq[i] = NULL;
741 			}
742 		}
743 		pci_release_msi(dev);
744 	}
745 	if (sc->nfe_msix_pba_res != NULL) {
746 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3),
747 		    sc->nfe_msix_pba_res);
748 		sc->nfe_msix_pba_res = NULL;
749 	}
750 	if (sc->nfe_msix_res != NULL) {
751 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2),
752 		    sc->nfe_msix_res);
753 		sc->nfe_msix_res = NULL;
754 	}
755 	if (sc->nfe_res[0] != NULL) {
756 		bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
757 		    sc->nfe_res[0]);
758 		sc->nfe_res[0] = NULL;
759 	}
760 
761 	nfe_free_tx_ring(sc, &sc->txq);
762 	nfe_free_rx_ring(sc, &sc->rxq);
763 	nfe_free_jrx_ring(sc, &sc->jrxq);
764 
765 	if (sc->nfe_parent_tag) {
766 		bus_dma_tag_destroy(sc->nfe_parent_tag);
767 		sc->nfe_parent_tag = NULL;
768 	}
769 
770 	mtx_destroy(&sc->nfe_mtx);
771 
772 	return (0);
773 }
774 
775 static int
nfe_suspend(device_t dev)776 nfe_suspend(device_t dev)
777 {
778 	struct nfe_softc *sc;
779 
780 	sc = device_get_softc(dev);
781 
782 	NFE_LOCK(sc);
783 	nfe_stop(sc->nfe_ifp);
784 	nfe_set_wol(sc);
785 	sc->nfe_suspended = 1;
786 	NFE_UNLOCK(sc);
787 
788 	return (0);
789 }
790 
791 static int
nfe_resume(device_t dev)792 nfe_resume(device_t dev)
793 {
794 	struct nfe_softc *sc;
795 	if_t ifp;
796 
797 	sc = device_get_softc(dev);
798 
799 	NFE_LOCK(sc);
800 	nfe_power(sc);
801 	ifp = sc->nfe_ifp;
802 	if (if_getflags(ifp) & IFF_UP)
803 		nfe_init_locked(sc);
804 	sc->nfe_suspended = 0;
805 	NFE_UNLOCK(sc);
806 
807 	return (0);
808 }
809 
810 static int
nfe_can_use_msix(struct nfe_softc * sc)811 nfe_can_use_msix(struct nfe_softc *sc)
812 {
813 	static struct msix_blacklist {
814 		char	*maker;
815 		char	*product;
816 	} msix_blacklists[] = {
817 		{ "ASUSTeK Computer INC.", "P5N32-SLI PREMIUM" }
818 	};
819 
820 	struct msix_blacklist *mblp;
821 	char *maker, *product;
822 	int count, n, use_msix;
823 
824 	/*
825 	 * Search base board manufacturer and product name table
826 	 * to see this system has a known MSI/MSI-X issue.
827 	 */
828 	maker = kern_getenv("smbios.planar.maker");
829 	product = kern_getenv("smbios.planar.product");
830 	use_msix = 1;
831 	if (maker != NULL && product != NULL) {
832 		count = nitems(msix_blacklists);
833 		mblp = msix_blacklists;
834 		for (n = 0; n < count; n++) {
835 			if (strcmp(maker, mblp->maker) == 0 &&
836 			    strcmp(product, mblp->product) == 0) {
837 				use_msix = 0;
838 				break;
839 			}
840 			mblp++;
841 		}
842 	}
843 	if (maker != NULL)
844 		freeenv(maker);
845 	if (product != NULL)
846 		freeenv(product);
847 
848 	return (use_msix);
849 }
850 
851 /* Take PHY/NIC out of powerdown, from Linux */
852 static void
nfe_power(struct nfe_softc * sc)853 nfe_power(struct nfe_softc *sc)
854 {
855 	uint32_t pwr;
856 
857 	if ((sc->nfe_flags & NFE_PWR_MGMT) == 0)
858 		return;
859 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
860 	NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
861 	DELAY(100);
862 	NFE_WRITE(sc, NFE_MAC_RESET, 0);
863 	DELAY(100);
864 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
865 	pwr = NFE_READ(sc, NFE_PWR2_CTL);
866 	pwr &= ~NFE_PWR2_WAKEUP_MASK;
867 	if (sc->nfe_revid >= 0xa3 &&
868 	    (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 ||
869 	    sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2))
870 		pwr |= NFE_PWR2_REVA3;
871 	NFE_WRITE(sc, NFE_PWR2_CTL, pwr);
872 }
873 
874 static void
nfe_miibus_statchg(device_t dev)875 nfe_miibus_statchg(device_t dev)
876 {
877 	struct nfe_softc *sc;
878 	struct mii_data *mii;
879 	if_t ifp;
880 	uint32_t rxctl, txctl;
881 
882 	sc = device_get_softc(dev);
883 
884 	mii = device_get_softc(sc->nfe_miibus);
885 	ifp = sc->nfe_ifp;
886 
887 	sc->nfe_link = 0;
888 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
889 	    (IFM_ACTIVE | IFM_AVALID)) {
890 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
891 		case IFM_10_T:
892 		case IFM_100_TX:
893 		case IFM_1000_T:
894 			sc->nfe_link = 1;
895 			break;
896 		default:
897 			break;
898 		}
899 	}
900 
901 	nfe_mac_config(sc, mii);
902 	txctl = NFE_READ(sc, NFE_TX_CTL);
903 	rxctl = NFE_READ(sc, NFE_RX_CTL);
904 	if (sc->nfe_link != 0 && (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
905 		txctl |= NFE_TX_START;
906 		rxctl |= NFE_RX_START;
907 	} else {
908 		txctl &= ~NFE_TX_START;
909 		rxctl &= ~NFE_RX_START;
910 	}
911 	NFE_WRITE(sc, NFE_TX_CTL, txctl);
912 	NFE_WRITE(sc, NFE_RX_CTL, rxctl);
913 }
914 
915 static void
nfe_mac_config(struct nfe_softc * sc,struct mii_data * mii)916 nfe_mac_config(struct nfe_softc *sc, struct mii_data *mii)
917 {
918 	uint32_t link, misc, phy, seed;
919 	uint32_t val;
920 
921 	NFE_LOCK_ASSERT(sc);
922 
923 	phy = NFE_READ(sc, NFE_PHY_IFACE);
924 	phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
925 
926 	seed = NFE_READ(sc, NFE_RNDSEED);
927 	seed &= ~NFE_SEED_MASK;
928 
929 	misc = NFE_MISC1_MAGIC;
930 	link = NFE_MEDIA_SET;
931 
932 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) {
933 		phy  |= NFE_PHY_HDX;	/* half-duplex */
934 		misc |= NFE_MISC1_HDX;
935 	}
936 
937 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
938 	case IFM_1000_T:	/* full-duplex only */
939 		link |= NFE_MEDIA_1000T;
940 		seed |= NFE_SEED_1000T;
941 		phy  |= NFE_PHY_1000T;
942 		break;
943 	case IFM_100_TX:
944 		link |= NFE_MEDIA_100TX;
945 		seed |= NFE_SEED_100TX;
946 		phy  |= NFE_PHY_100TX;
947 		break;
948 	case IFM_10_T:
949 		link |= NFE_MEDIA_10T;
950 		seed |= NFE_SEED_10T;
951 		break;
952 	}
953 
954 	if ((phy & 0x10000000) != 0) {
955 		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
956 			val = NFE_R1_MAGIC_1000;
957 		else
958 			val = NFE_R1_MAGIC_10_100;
959 	} else
960 		val = NFE_R1_MAGIC_DEFAULT;
961 	NFE_WRITE(sc, NFE_SETUP_R1, val);
962 
963 	NFE_WRITE(sc, NFE_RNDSEED, seed);	/* XXX: gigabit NICs only? */
964 
965 	NFE_WRITE(sc, NFE_PHY_IFACE, phy);
966 	NFE_WRITE(sc, NFE_MISC1, misc);
967 	NFE_WRITE(sc, NFE_LINKSPEED, link);
968 
969 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
970 		/* It seems all hardwares supports Rx pause frames. */
971 		val = NFE_READ(sc, NFE_RXFILTER);
972 		if ((IFM_OPTIONS(mii->mii_media_active) &
973 		    IFM_ETH_RXPAUSE) != 0)
974 			val |= NFE_PFF_RX_PAUSE;
975 		else
976 			val &= ~NFE_PFF_RX_PAUSE;
977 		NFE_WRITE(sc, NFE_RXFILTER, val);
978 		if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
979 			val = NFE_READ(sc, NFE_MISC1);
980 			if ((IFM_OPTIONS(mii->mii_media_active) &
981 			    IFM_ETH_TXPAUSE) != 0) {
982 				NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
983 				    NFE_TX_PAUSE_FRAME_ENABLE);
984 				val |= NFE_MISC1_TX_PAUSE;
985 			} else {
986 				val &= ~NFE_MISC1_TX_PAUSE;
987 				NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
988 				    NFE_TX_PAUSE_FRAME_DISABLE);
989 			}
990 			NFE_WRITE(sc, NFE_MISC1, val);
991 		}
992 	} else {
993 		/* disable rx/tx pause frames */
994 		val = NFE_READ(sc, NFE_RXFILTER);
995 		val &= ~NFE_PFF_RX_PAUSE;
996 		NFE_WRITE(sc, NFE_RXFILTER, val);
997 		if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
998 			NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
999 			    NFE_TX_PAUSE_FRAME_DISABLE);
1000 			val = NFE_READ(sc, NFE_MISC1);
1001 			val &= ~NFE_MISC1_TX_PAUSE;
1002 			NFE_WRITE(sc, NFE_MISC1, val);
1003 		}
1004 	}
1005 }
1006 
1007 static int
nfe_miibus_readreg(device_t dev,int phy,int reg)1008 nfe_miibus_readreg(device_t dev, int phy, int reg)
1009 {
1010 	struct nfe_softc *sc = device_get_softc(dev);
1011 	uint32_t val;
1012 	int ntries;
1013 
1014 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1015 
1016 	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
1017 		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
1018 		DELAY(100);
1019 	}
1020 
1021 	NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
1022 
1023 	for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
1024 		DELAY(100);
1025 		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
1026 			break;
1027 	}
1028 	if (ntries == NFE_TIMEOUT) {
1029 		DPRINTFN(sc, 2, "timeout waiting for PHY\n");
1030 		return 0;
1031 	}
1032 
1033 	if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
1034 		DPRINTFN(sc, 2, "could not read PHY\n");
1035 		return 0;
1036 	}
1037 
1038 	val = NFE_READ(sc, NFE_PHY_DATA);
1039 	if (val != 0xffffffff && val != 0)
1040 		sc->mii_phyaddr = phy;
1041 
1042 	DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
1043 
1044 	return (val);
1045 }
1046 
1047 static int
nfe_miibus_writereg(device_t dev,int phy,int reg,int val)1048 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
1049 {
1050 	struct nfe_softc *sc = device_get_softc(dev);
1051 	uint32_t ctl;
1052 	int ntries;
1053 
1054 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1055 
1056 	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
1057 		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
1058 		DELAY(100);
1059 	}
1060 
1061 	NFE_WRITE(sc, NFE_PHY_DATA, val);
1062 	ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
1063 	NFE_WRITE(sc, NFE_PHY_CTL, ctl);
1064 
1065 	for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
1066 		DELAY(100);
1067 		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
1068 			break;
1069 	}
1070 #ifdef NFE_DEBUG
1071 	if (nfedebug >= 2 && ntries == NFE_TIMEOUT)
1072 		device_printf(sc->nfe_dev, "could not write to PHY\n");
1073 #endif
1074 	return (0);
1075 }
1076 
1077 struct nfe_dmamap_arg {
1078 	bus_addr_t nfe_busaddr;
1079 };
1080 
1081 static int
nfe_alloc_rx_ring(struct nfe_softc * sc,struct nfe_rx_ring * ring)1082 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1083 {
1084 	struct nfe_dmamap_arg ctx;
1085 	struct nfe_rx_data *data;
1086 	void *desc;
1087 	int i, error, descsize;
1088 
1089 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1090 		desc = ring->desc64;
1091 		descsize = sizeof (struct nfe_desc64);
1092 	} else {
1093 		desc = ring->desc32;
1094 		descsize = sizeof (struct nfe_desc32);
1095 	}
1096 
1097 	ring->cur = ring->next = 0;
1098 
1099 	error = bus_dma_tag_create(sc->nfe_parent_tag,
1100 	    NFE_RING_ALIGN, 0,			/* alignment, boundary */
1101 	    BUS_SPACE_MAXADDR,			/* lowaddr */
1102 	    BUS_SPACE_MAXADDR,			/* highaddr */
1103 	    NULL, NULL,				/* filter, filterarg */
1104 	    NFE_RX_RING_COUNT * descsize, 1,	/* maxsize, nsegments */
1105 	    NFE_RX_RING_COUNT * descsize,	/* maxsegsize */
1106 	    0,					/* flags */
1107 	    NULL, NULL,				/* lockfunc, lockarg */
1108 	    &ring->rx_desc_tag);
1109 	if (error != 0) {
1110 		device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1111 		goto fail;
1112 	}
1113 
1114 	/* allocate memory to desc */
1115 	error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK |
1116 	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map);
1117 	if (error != 0) {
1118 		device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1119 		goto fail;
1120 	}
1121 	if (sc->nfe_flags & NFE_40BIT_ADDR)
1122 		ring->desc64 = desc;
1123 	else
1124 		ring->desc32 = desc;
1125 
1126 	/* map desc to device visible address space */
1127 	ctx.nfe_busaddr = 0;
1128 	error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc,
1129 	    NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1130 	if (error != 0) {
1131 		device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1132 		goto fail;
1133 	}
1134 	ring->physaddr = ctx.nfe_busaddr;
1135 
1136 	error = bus_dma_tag_create(sc->nfe_parent_tag,
1137 	    1, 0,			/* alignment, boundary */
1138 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1139 	    BUS_SPACE_MAXADDR,		/* highaddr */
1140 	    NULL, NULL,			/* filter, filterarg */
1141 	    MCLBYTES, 1,		/* maxsize, nsegments */
1142 	    MCLBYTES,			/* maxsegsize */
1143 	    0,				/* flags */
1144 	    NULL, NULL,			/* lockfunc, lockarg */
1145 	    &ring->rx_data_tag);
1146 	if (error != 0) {
1147 		device_printf(sc->nfe_dev, "could not create Rx DMA tag\n");
1148 		goto fail;
1149 	}
1150 
1151 	error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map);
1152 	if (error != 0) {
1153 		device_printf(sc->nfe_dev,
1154 		    "could not create Rx DMA spare map\n");
1155 		goto fail;
1156 	}
1157 
1158 	/*
1159 	 * Pre-allocate Rx buffers and populate Rx ring.
1160 	 */
1161 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1162 		data = &sc->rxq.data[i];
1163 		data->rx_data_map = NULL;
1164 		data->m = NULL;
1165 		error = bus_dmamap_create(ring->rx_data_tag, 0,
1166 		    &data->rx_data_map);
1167 		if (error != 0) {
1168 			device_printf(sc->nfe_dev,
1169 			    "could not create Rx DMA map\n");
1170 			goto fail;
1171 		}
1172 	}
1173 
1174 fail:
1175 	return (error);
1176 }
1177 
1178 static void
nfe_alloc_jrx_ring(struct nfe_softc * sc,struct nfe_jrx_ring * ring)1179 nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1180 {
1181 	struct nfe_dmamap_arg ctx;
1182 	struct nfe_rx_data *data;
1183 	void *desc;
1184 	int i, error, descsize;
1185 
1186 	if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1187 		return;
1188 	if (jumbo_disable != 0) {
1189 		device_printf(sc->nfe_dev, "disabling jumbo frame support\n");
1190 		sc->nfe_jumbo_disable = 1;
1191 		return;
1192 	}
1193 
1194 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1195 		desc = ring->jdesc64;
1196 		descsize = sizeof (struct nfe_desc64);
1197 	} else {
1198 		desc = ring->jdesc32;
1199 		descsize = sizeof (struct nfe_desc32);
1200 	}
1201 
1202 	ring->jcur = ring->jnext = 0;
1203 
1204 	/* Create DMA tag for jumbo Rx ring. */
1205 	error = bus_dma_tag_create(sc->nfe_parent_tag,
1206 	    NFE_RING_ALIGN, 0,			/* alignment, boundary */
1207 	    BUS_SPACE_MAXADDR,			/* lowaddr */
1208 	    BUS_SPACE_MAXADDR,			/* highaddr */
1209 	    NULL, NULL,				/* filter, filterarg */
1210 	    NFE_JUMBO_RX_RING_COUNT * descsize,	/* maxsize */
1211 	    1, 					/* nsegments */
1212 	    NFE_JUMBO_RX_RING_COUNT * descsize,	/* maxsegsize */
1213 	    0,					/* flags */
1214 	    NULL, NULL,				/* lockfunc, lockarg */
1215 	    &ring->jrx_desc_tag);
1216 	if (error != 0) {
1217 		device_printf(sc->nfe_dev,
1218 		    "could not create jumbo ring DMA tag\n");
1219 		goto fail;
1220 	}
1221 
1222 	/* Create DMA tag for jumbo Rx buffers. */
1223 	error = bus_dma_tag_create(sc->nfe_parent_tag,
1224 	    1, 0,				/* alignment, boundary */
1225 	    BUS_SPACE_MAXADDR,			/* lowaddr */
1226 	    BUS_SPACE_MAXADDR,			/* highaddr */
1227 	    NULL, NULL,				/* filter, filterarg */
1228 	    MJUM9BYTES,				/* maxsize */
1229 	    1,					/* nsegments */
1230 	    MJUM9BYTES,				/* maxsegsize */
1231 	    0,					/* flags */
1232 	    NULL, NULL,				/* lockfunc, lockarg */
1233 	    &ring->jrx_data_tag);
1234 	if (error != 0) {
1235 		device_printf(sc->nfe_dev,
1236 		    "could not create jumbo Rx buffer DMA tag\n");
1237 		goto fail;
1238 	}
1239 
1240 	/* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
1241 	error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK |
1242 	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map);
1243 	if (error != 0) {
1244 		device_printf(sc->nfe_dev,
1245 		    "could not allocate DMA'able memory for jumbo Rx ring\n");
1246 		goto fail;
1247 	}
1248 	if (sc->nfe_flags & NFE_40BIT_ADDR)
1249 		ring->jdesc64 = desc;
1250 	else
1251 		ring->jdesc32 = desc;
1252 
1253 	ctx.nfe_busaddr = 0;
1254 	error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc,
1255 	    NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1256 	if (error != 0) {
1257 		device_printf(sc->nfe_dev,
1258 		    "could not load DMA'able memory for jumbo Rx ring\n");
1259 		goto fail;
1260 	}
1261 	ring->jphysaddr = ctx.nfe_busaddr;
1262 
1263 	/* Create DMA maps for jumbo Rx buffers. */
1264 	error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map);
1265 	if (error != 0) {
1266 		device_printf(sc->nfe_dev,
1267 		    "could not create jumbo Rx DMA spare map\n");
1268 		goto fail;
1269 	}
1270 
1271 	for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1272 		data = &sc->jrxq.jdata[i];
1273 		data->rx_data_map = NULL;
1274 		data->m = NULL;
1275 		error = bus_dmamap_create(ring->jrx_data_tag, 0,
1276 		    &data->rx_data_map);
1277 		if (error != 0) {
1278 			device_printf(sc->nfe_dev,
1279 			    "could not create jumbo Rx DMA map\n");
1280 			goto fail;
1281 		}
1282 	}
1283 
1284 	return;
1285 
1286 fail:
1287 	/*
1288 	 * Running without jumbo frame support is ok for most cases
1289 	 * so don't fail on creating dma tag/map for jumbo frame.
1290 	 */
1291 	nfe_free_jrx_ring(sc, ring);
1292 	device_printf(sc->nfe_dev, "disabling jumbo frame support due to "
1293 	    "resource shortage\n");
1294 	sc->nfe_jumbo_disable = 1;
1295 }
1296 
1297 static int
nfe_init_rx_ring(struct nfe_softc * sc,struct nfe_rx_ring * ring)1298 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1299 {
1300 	void *desc;
1301 	size_t descsize;
1302 	int i;
1303 
1304 	ring->cur = ring->next = 0;
1305 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1306 		desc = ring->desc64;
1307 		descsize = sizeof (struct nfe_desc64);
1308 	} else {
1309 		desc = ring->desc32;
1310 		descsize = sizeof (struct nfe_desc32);
1311 	}
1312 	bzero(desc, descsize * NFE_RX_RING_COUNT);
1313 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1314 		if (nfe_newbuf(sc, i) != 0)
1315 			return (ENOBUFS);
1316 	}
1317 
1318 	bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
1319 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1320 
1321 	return (0);
1322 }
1323 
1324 static int
nfe_init_jrx_ring(struct nfe_softc * sc,struct nfe_jrx_ring * ring)1325 nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1326 {
1327 	void *desc;
1328 	size_t descsize;
1329 	int i;
1330 
1331 	ring->jcur = ring->jnext = 0;
1332 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1333 		desc = ring->jdesc64;
1334 		descsize = sizeof (struct nfe_desc64);
1335 	} else {
1336 		desc = ring->jdesc32;
1337 		descsize = sizeof (struct nfe_desc32);
1338 	}
1339 	bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT);
1340 	for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1341 		if (nfe_jnewbuf(sc, i) != 0)
1342 			return (ENOBUFS);
1343 	}
1344 
1345 	bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map,
1346 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1347 
1348 	return (0);
1349 }
1350 
1351 static void
nfe_free_rx_ring(struct nfe_softc * sc,struct nfe_rx_ring * ring)1352 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1353 {
1354 	struct nfe_rx_data *data;
1355 	void *desc;
1356 	int i;
1357 
1358 	if (sc->nfe_flags & NFE_40BIT_ADDR)
1359 		desc = ring->desc64;
1360 	else
1361 		desc = ring->desc32;
1362 
1363 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1364 		data = &ring->data[i];
1365 		if (data->rx_data_map != NULL) {
1366 			bus_dmamap_destroy(ring->rx_data_tag,
1367 			    data->rx_data_map);
1368 			data->rx_data_map = NULL;
1369 		}
1370 		if (data->m != NULL) {
1371 			m_freem(data->m);
1372 			data->m = NULL;
1373 		}
1374 	}
1375 	if (ring->rx_data_tag != NULL) {
1376 		if (ring->rx_spare_map != NULL) {
1377 			bus_dmamap_destroy(ring->rx_data_tag,
1378 			    ring->rx_spare_map);
1379 			ring->rx_spare_map = NULL;
1380 		}
1381 		bus_dma_tag_destroy(ring->rx_data_tag);
1382 		ring->rx_data_tag = NULL;
1383 	}
1384 
1385 	if (desc != NULL) {
1386 		bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
1387 		bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
1388 		ring->desc64 = NULL;
1389 		ring->desc32 = NULL;
1390 	}
1391 	if (ring->rx_desc_tag != NULL) {
1392 		bus_dma_tag_destroy(ring->rx_desc_tag);
1393 		ring->rx_desc_tag = NULL;
1394 	}
1395 }
1396 
1397 static void
nfe_free_jrx_ring(struct nfe_softc * sc,struct nfe_jrx_ring * ring)1398 nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1399 {
1400 	struct nfe_rx_data *data;
1401 	void *desc;
1402 	int i;
1403 
1404 	if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1405 		return;
1406 
1407 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1408 		desc = ring->jdesc64;
1409 	} else {
1410 		desc = ring->jdesc32;
1411 	}
1412 
1413 	for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1414 		data = &ring->jdata[i];
1415 		if (data->rx_data_map != NULL) {
1416 			bus_dmamap_destroy(ring->jrx_data_tag,
1417 			    data->rx_data_map);
1418 			data->rx_data_map = NULL;
1419 		}
1420 		if (data->m != NULL) {
1421 			m_freem(data->m);
1422 			data->m = NULL;
1423 		}
1424 	}
1425 	if (ring->jrx_data_tag != NULL) {
1426 		if (ring->jrx_spare_map != NULL) {
1427 			bus_dmamap_destroy(ring->jrx_data_tag,
1428 			    ring->jrx_spare_map);
1429 			ring->jrx_spare_map = NULL;
1430 		}
1431 		bus_dma_tag_destroy(ring->jrx_data_tag);
1432 		ring->jrx_data_tag = NULL;
1433 	}
1434 
1435 	if (desc != NULL) {
1436 		bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map);
1437 		bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map);
1438 		ring->jdesc64 = NULL;
1439 		ring->jdesc32 = NULL;
1440 	}
1441 
1442 	if (ring->jrx_desc_tag != NULL) {
1443 		bus_dma_tag_destroy(ring->jrx_desc_tag);
1444 		ring->jrx_desc_tag = NULL;
1445 	}
1446 }
1447 
1448 static int
nfe_alloc_tx_ring(struct nfe_softc * sc,struct nfe_tx_ring * ring)1449 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1450 {
1451 	struct nfe_dmamap_arg ctx;
1452 	int i, error;
1453 	void *desc;
1454 	int descsize;
1455 
1456 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1457 		desc = ring->desc64;
1458 		descsize = sizeof (struct nfe_desc64);
1459 	} else {
1460 		desc = ring->desc32;
1461 		descsize = sizeof (struct nfe_desc32);
1462 	}
1463 
1464 	ring->queued = 0;
1465 	ring->cur = ring->next = 0;
1466 
1467 	error = bus_dma_tag_create(sc->nfe_parent_tag,
1468 	    NFE_RING_ALIGN, 0,			/* alignment, boundary */
1469 	    BUS_SPACE_MAXADDR,			/* lowaddr */
1470 	    BUS_SPACE_MAXADDR,			/* highaddr */
1471 	    NULL, NULL,				/* filter, filterarg */
1472 	    NFE_TX_RING_COUNT * descsize, 1,	/* maxsize, nsegments */
1473 	    NFE_TX_RING_COUNT * descsize,	/* maxsegsize */
1474 	    0,					/* flags */
1475 	    NULL, NULL,				/* lockfunc, lockarg */
1476 	    &ring->tx_desc_tag);
1477 	if (error != 0) {
1478 		device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1479 		goto fail;
1480 	}
1481 
1482 	error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK |
1483 	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map);
1484 	if (error != 0) {
1485 		device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1486 		goto fail;
1487 	}
1488 	if (sc->nfe_flags & NFE_40BIT_ADDR)
1489 		ring->desc64 = desc;
1490 	else
1491 		ring->desc32 = desc;
1492 
1493 	ctx.nfe_busaddr = 0;
1494 	error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc,
1495 	    NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1496 	if (error != 0) {
1497 		device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1498 		goto fail;
1499 	}
1500 	ring->physaddr = ctx.nfe_busaddr;
1501 
1502 	error = bus_dma_tag_create(sc->nfe_parent_tag,
1503 	    1, 0,
1504 	    BUS_SPACE_MAXADDR,
1505 	    BUS_SPACE_MAXADDR,
1506 	    NULL, NULL,
1507 	    NFE_TSO_MAXSIZE,
1508 	    NFE_MAX_SCATTER,
1509 	    NFE_TSO_MAXSGSIZE,
1510 	    0,
1511 	    NULL, NULL,
1512 	    &ring->tx_data_tag);
1513 	if (error != 0) {
1514 		device_printf(sc->nfe_dev, "could not create Tx DMA tag\n");
1515 		goto fail;
1516 	}
1517 
1518 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1519 		error = bus_dmamap_create(ring->tx_data_tag, 0,
1520 		    &ring->data[i].tx_data_map);
1521 		if (error != 0) {
1522 			device_printf(sc->nfe_dev,
1523 			    "could not create Tx DMA map\n");
1524 			goto fail;
1525 		}
1526 	}
1527 
1528 fail:
1529 	return (error);
1530 }
1531 
1532 static void
nfe_init_tx_ring(struct nfe_softc * sc,struct nfe_tx_ring * ring)1533 nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1534 {
1535 	void *desc;
1536 	size_t descsize;
1537 
1538 	sc->nfe_force_tx = 0;
1539 	ring->queued = 0;
1540 	ring->cur = ring->next = 0;
1541 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1542 		desc = ring->desc64;
1543 		descsize = sizeof (struct nfe_desc64);
1544 	} else {
1545 		desc = ring->desc32;
1546 		descsize = sizeof (struct nfe_desc32);
1547 	}
1548 	bzero(desc, descsize * NFE_TX_RING_COUNT);
1549 
1550 	bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1551 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1552 }
1553 
1554 static void
nfe_free_tx_ring(struct nfe_softc * sc,struct nfe_tx_ring * ring)1555 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1556 {
1557 	struct nfe_tx_data *data;
1558 	void *desc;
1559 	int i;
1560 
1561 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1562 		desc = ring->desc64;
1563 	} else {
1564 		desc = ring->desc32;
1565 	}
1566 
1567 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1568 		data = &ring->data[i];
1569 
1570 		if (data->m != NULL) {
1571 			bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map,
1572 			    BUS_DMASYNC_POSTWRITE);
1573 			bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map);
1574 			m_freem(data->m);
1575 			data->m = NULL;
1576 		}
1577 		if (data->tx_data_map != NULL) {
1578 			bus_dmamap_destroy(ring->tx_data_tag,
1579 			    data->tx_data_map);
1580 			data->tx_data_map = NULL;
1581 		}
1582 	}
1583 
1584 	if (ring->tx_data_tag != NULL) {
1585 		bus_dma_tag_destroy(ring->tx_data_tag);
1586 		ring->tx_data_tag = NULL;
1587 	}
1588 
1589 	if (desc != NULL) {
1590 		bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1591 		    BUS_DMASYNC_POSTWRITE);
1592 		bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
1593 		bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
1594 		ring->desc64 = NULL;
1595 		ring->desc32 = NULL;
1596 		bus_dma_tag_destroy(ring->tx_desc_tag);
1597 		ring->tx_desc_tag = NULL;
1598 	}
1599 }
1600 
1601 #ifdef DEVICE_POLLING
1602 static poll_handler_t nfe_poll;
1603 
1604 static int
nfe_poll(if_t ifp,enum poll_cmd cmd,int count)1605 nfe_poll(if_t ifp, enum poll_cmd cmd, int count)
1606 {
1607 	struct nfe_softc *sc = if_getsoftc(ifp);
1608 	uint32_t r;
1609 	int rx_npkts = 0;
1610 
1611 	NFE_LOCK(sc);
1612 
1613 	if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
1614 		NFE_UNLOCK(sc);
1615 		return (rx_npkts);
1616 	}
1617 
1618 	if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1619 		rx_npkts = nfe_jrxeof(sc, count, &rx_npkts);
1620 	else
1621 		rx_npkts = nfe_rxeof(sc, count, &rx_npkts);
1622 	nfe_txeof(sc);
1623 	if (!if_sendq_empty(ifp))
1624 		nfe_start_locked(ifp);
1625 
1626 	if (cmd == POLL_AND_CHECK_STATUS) {
1627 		if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1628 			NFE_UNLOCK(sc);
1629 			return (rx_npkts);
1630 		}
1631 		NFE_WRITE(sc, sc->nfe_irq_status, r);
1632 
1633 		if (r & NFE_IRQ_LINK) {
1634 			NFE_READ(sc, NFE_PHY_STATUS);
1635 			NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1636 			DPRINTF(sc, "link state changed\n");
1637 		}
1638 	}
1639 	NFE_UNLOCK(sc);
1640 	return (rx_npkts);
1641 }
1642 #endif /* DEVICE_POLLING */
1643 
1644 static void
nfe_set_intr(struct nfe_softc * sc)1645 nfe_set_intr(struct nfe_softc *sc)
1646 {
1647 
1648 	if (sc->nfe_msi != 0)
1649 		NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1650 }
1651 
1652 /* In MSIX, a write to mask reegisters behaves as XOR. */
1653 static __inline void
nfe_enable_intr(struct nfe_softc * sc)1654 nfe_enable_intr(struct nfe_softc *sc)
1655 {
1656 
1657 	if (sc->nfe_msix != 0) {
1658 		/* XXX Should have a better way to enable interrupts! */
1659 		if (NFE_READ(sc, sc->nfe_irq_mask) == 0)
1660 			NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1661 	} else
1662 		NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1663 }
1664 
1665 static __inline void
nfe_disable_intr(struct nfe_softc * sc)1666 nfe_disable_intr(struct nfe_softc *sc)
1667 {
1668 
1669 	if (sc->nfe_msix != 0) {
1670 		/* XXX Should have a better way to disable interrupts! */
1671 		if (NFE_READ(sc, sc->nfe_irq_mask) != 0)
1672 			NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1673 	} else
1674 		NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1675 }
1676 
1677 static int
nfe_ioctl(if_t ifp,u_long cmd,caddr_t data)1678 nfe_ioctl(if_t ifp, u_long cmd, caddr_t data)
1679 {
1680 	struct nfe_softc *sc;
1681 	struct ifreq *ifr;
1682 	struct mii_data *mii;
1683 	int error, init, mask;
1684 
1685 	sc = if_getsoftc(ifp);
1686 	ifr = (struct ifreq *) data;
1687 	error = 0;
1688 	init = 0;
1689 	switch (cmd) {
1690 	case SIOCSIFMTU:
1691 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU)
1692 			error = EINVAL;
1693 		else if (if_getmtu(ifp) != ifr->ifr_mtu) {
1694 			if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) ||
1695 			    (sc->nfe_jumbo_disable != 0)) &&
1696 			    ifr->ifr_mtu > ETHERMTU)
1697 				error = EINVAL;
1698 			else {
1699 				NFE_LOCK(sc);
1700 				if_setmtu(ifp, ifr->ifr_mtu);
1701 				if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1702 					if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1703 					nfe_init_locked(sc);
1704 				}
1705 				NFE_UNLOCK(sc);
1706 			}
1707 		}
1708 		break;
1709 	case SIOCSIFFLAGS:
1710 		NFE_LOCK(sc);
1711 		if (if_getflags(ifp) & IFF_UP) {
1712 			/*
1713 			 * If only the PROMISC or ALLMULTI flag changes, then
1714 			 * don't do a full re-init of the chip, just update
1715 			 * the Rx filter.
1716 			 */
1717 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) &&
1718 			    ((if_getflags(ifp) ^ sc->nfe_if_flags) &
1719 			     (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1720 				nfe_setmulti(sc);
1721 			else
1722 				nfe_init_locked(sc);
1723 		} else {
1724 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1725 				nfe_stop(ifp);
1726 		}
1727 		sc->nfe_if_flags = if_getflags(ifp);
1728 		NFE_UNLOCK(sc);
1729 		error = 0;
1730 		break;
1731 	case SIOCADDMULTI:
1732 	case SIOCDELMULTI:
1733 		if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1734 			NFE_LOCK(sc);
1735 			nfe_setmulti(sc);
1736 			NFE_UNLOCK(sc);
1737 			error = 0;
1738 		}
1739 		break;
1740 	case SIOCSIFMEDIA:
1741 	case SIOCGIFMEDIA:
1742 		mii = device_get_softc(sc->nfe_miibus);
1743 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1744 		break;
1745 	case SIOCSIFCAP:
1746 		mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
1747 #ifdef DEVICE_POLLING
1748 		if ((mask & IFCAP_POLLING) != 0) {
1749 			if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1750 				error = ether_poll_register(nfe_poll, ifp);
1751 				if (error)
1752 					break;
1753 				NFE_LOCK(sc);
1754 				nfe_disable_intr(sc);
1755 				if_setcapenablebit(ifp, IFCAP_POLLING, 0);
1756 				NFE_UNLOCK(sc);
1757 			} else {
1758 				error = ether_poll_deregister(ifp);
1759 				/* Enable interrupt even in error case */
1760 				NFE_LOCK(sc);
1761 				nfe_enable_intr(sc);
1762 				if_setcapenablebit(ifp, 0, IFCAP_POLLING);
1763 				NFE_UNLOCK(sc);
1764 			}
1765 		}
1766 #endif /* DEVICE_POLLING */
1767 		if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1768 		    (if_getcapabilities(ifp) & IFCAP_WOL_MAGIC) != 0)
1769 			if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
1770 		if ((mask & IFCAP_TXCSUM) != 0 &&
1771 		    (if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) {
1772 			if_togglecapenable(ifp, IFCAP_TXCSUM);
1773 			if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
1774 				if_sethwassistbits(ifp, NFE_CSUM_FEATURES, 0);
1775 			else
1776 				if_sethwassistbits(ifp, 0, NFE_CSUM_FEATURES);
1777 		}
1778 		if ((mask & IFCAP_RXCSUM) != 0 &&
1779 		    (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) {
1780 			if_togglecapenable(ifp, IFCAP_RXCSUM);
1781 			init++;
1782 		}
1783 		if ((mask & IFCAP_TSO4) != 0 &&
1784 		    (if_getcapabilities(ifp) & IFCAP_TSO4) != 0) {
1785 			if_togglecapenable(ifp, IFCAP_TSO4);
1786 			if ((IFCAP_TSO4 & if_getcapenable(ifp)) != 0)
1787 				if_sethwassistbits(ifp, CSUM_TSO, 0);
1788 			else
1789 				if_sethwassistbits(ifp, 0, CSUM_TSO);
1790 		}
1791 		if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
1792 		    (if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0)
1793 			if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
1794 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1795 		    (if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
1796 			if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
1797 			if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0)
1798 				if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWTSO);
1799 			init++;
1800 		}
1801 		/*
1802 		 * XXX
1803 		 * It seems that VLAN stripping requires Rx checksum offload.
1804 		 * Unfortunately FreeBSD has no way to disable only Rx side
1805 		 * VLAN stripping. So when we know Rx checksum offload is
1806 		 * disabled turn entire hardware VLAN assist off.
1807 		 */
1808 		if ((if_getcapenable(ifp) & IFCAP_RXCSUM) == 0) {
1809 			if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
1810 				init++;
1811 			if_setcapenablebit(ifp, 0,
1812 			    (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO));
1813 		}
1814 		if (init > 0 && (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1815 			if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1816 			nfe_init(sc);
1817 		}
1818 		if_vlancap(ifp);
1819 		break;
1820 	default:
1821 		error = ether_ioctl(ifp, cmd, data);
1822 		break;
1823 	}
1824 
1825 	return (error);
1826 }
1827 
1828 static int
nfe_intr(void * arg)1829 nfe_intr(void *arg)
1830 {
1831 	struct nfe_softc *sc;
1832 	uint32_t status;
1833 
1834 	sc = (struct nfe_softc *)arg;
1835 
1836 	status = NFE_READ(sc, sc->nfe_irq_status);
1837 	if (status == 0 || status == 0xffffffff)
1838 		return (FILTER_STRAY);
1839 	nfe_disable_intr(sc);
1840 	taskqueue_enqueue(sc->nfe_tq, &sc->nfe_int_task);
1841 
1842 	return (FILTER_HANDLED);
1843 }
1844 
1845 static void
nfe_int_task(void * arg,int pending)1846 nfe_int_task(void *arg, int pending)
1847 {
1848 	struct nfe_softc *sc = arg;
1849 	if_t ifp = sc->nfe_ifp;
1850 	uint32_t r;
1851 	int domore;
1852 
1853 	NFE_LOCK(sc);
1854 
1855 	if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1856 		nfe_enable_intr(sc);
1857 		NFE_UNLOCK(sc);
1858 		return;	/* not for us */
1859 	}
1860 	NFE_WRITE(sc, sc->nfe_irq_status, r);
1861 
1862 	DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r);
1863 
1864 #ifdef DEVICE_POLLING
1865 	if (if_getcapenable(ifp) & IFCAP_POLLING) {
1866 		NFE_UNLOCK(sc);
1867 		return;
1868 	}
1869 #endif
1870 
1871 	if (r & NFE_IRQ_LINK) {
1872 		NFE_READ(sc, NFE_PHY_STATUS);
1873 		NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1874 		DPRINTF(sc, "link state changed\n");
1875 	}
1876 
1877 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
1878 		NFE_UNLOCK(sc);
1879 		nfe_disable_intr(sc);
1880 		return;
1881 	}
1882 
1883 	domore = 0;
1884 	/* check Rx ring */
1885 	if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1886 		domore = nfe_jrxeof(sc, sc->nfe_process_limit, NULL);
1887 	else
1888 		domore = nfe_rxeof(sc, sc->nfe_process_limit, NULL);
1889 	/* check Tx ring */
1890 	nfe_txeof(sc);
1891 
1892 	if (!if_sendq_empty(ifp))
1893 		nfe_start_locked(ifp);
1894 
1895 	NFE_UNLOCK(sc);
1896 
1897 	if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) {
1898 		taskqueue_enqueue(sc->nfe_tq, &sc->nfe_int_task);
1899 		return;
1900 	}
1901 
1902 	/* Reenable interrupts. */
1903 	nfe_enable_intr(sc);
1904 }
1905 
1906 static __inline void
nfe_discard_rxbuf(struct nfe_softc * sc,int idx)1907 nfe_discard_rxbuf(struct nfe_softc *sc, int idx)
1908 {
1909 	struct nfe_desc32 *desc32;
1910 	struct nfe_desc64 *desc64;
1911 	struct nfe_rx_data *data;
1912 	struct mbuf *m;
1913 
1914 	data = &sc->rxq.data[idx];
1915 	m = data->m;
1916 
1917 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1918 		desc64 = &sc->rxq.desc64[idx];
1919 		/* VLAN packet may have overwritten it. */
1920 		desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1921 		desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1922 		desc64->length = htole16(m->m_len);
1923 		desc64->flags = htole16(NFE_RX_READY);
1924 	} else {
1925 		desc32 = &sc->rxq.desc32[idx];
1926 		desc32->length = htole16(m->m_len);
1927 		desc32->flags = htole16(NFE_RX_READY);
1928 	}
1929 }
1930 
1931 static __inline void
nfe_discard_jrxbuf(struct nfe_softc * sc,int idx)1932 nfe_discard_jrxbuf(struct nfe_softc *sc, int idx)
1933 {
1934 	struct nfe_desc32 *desc32;
1935 	struct nfe_desc64 *desc64;
1936 	struct nfe_rx_data *data;
1937 	struct mbuf *m;
1938 
1939 	data = &sc->jrxq.jdata[idx];
1940 	m = data->m;
1941 
1942 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1943 		desc64 = &sc->jrxq.jdesc64[idx];
1944 		/* VLAN packet may have overwritten it. */
1945 		desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1946 		desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1947 		desc64->length = htole16(m->m_len);
1948 		desc64->flags = htole16(NFE_RX_READY);
1949 	} else {
1950 		desc32 = &sc->jrxq.jdesc32[idx];
1951 		desc32->length = htole16(m->m_len);
1952 		desc32->flags = htole16(NFE_RX_READY);
1953 	}
1954 }
1955 
1956 static int
nfe_newbuf(struct nfe_softc * sc,int idx)1957 nfe_newbuf(struct nfe_softc *sc, int idx)
1958 {
1959 	struct nfe_rx_data *data;
1960 	struct nfe_desc32 *desc32;
1961 	struct nfe_desc64 *desc64;
1962 	struct mbuf *m;
1963 	bus_dma_segment_t segs[1];
1964 	bus_dmamap_t map;
1965 	int nsegs;
1966 
1967 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1968 	if (m == NULL)
1969 		return (ENOBUFS);
1970 
1971 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1972 	m_adj(m, ETHER_ALIGN);
1973 
1974 	if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map,
1975 	    m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
1976 		m_freem(m);
1977 		return (ENOBUFS);
1978 	}
1979 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1980 
1981 	data = &sc->rxq.data[idx];
1982 	if (data->m != NULL) {
1983 		bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
1984 		    BUS_DMASYNC_POSTREAD);
1985 		bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map);
1986 	}
1987 	map = data->rx_data_map;
1988 	data->rx_data_map = sc->rxq.rx_spare_map;
1989 	sc->rxq.rx_spare_map = map;
1990 	bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
1991 	    BUS_DMASYNC_PREREAD);
1992 	data->paddr = segs[0].ds_addr;
1993 	data->m = m;
1994 	/* update mapping address in h/w descriptor */
1995 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1996 		desc64 = &sc->rxq.desc64[idx];
1997 		desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
1998 		desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
1999 		desc64->length = htole16(segs[0].ds_len);
2000 		desc64->flags = htole16(NFE_RX_READY);
2001 	} else {
2002 		desc32 = &sc->rxq.desc32[idx];
2003 		desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2004 		desc32->length = htole16(segs[0].ds_len);
2005 		desc32->flags = htole16(NFE_RX_READY);
2006 	}
2007 
2008 	return (0);
2009 }
2010 
2011 static int
nfe_jnewbuf(struct nfe_softc * sc,int idx)2012 nfe_jnewbuf(struct nfe_softc *sc, int idx)
2013 {
2014 	struct nfe_rx_data *data;
2015 	struct nfe_desc32 *desc32;
2016 	struct nfe_desc64 *desc64;
2017 	struct mbuf *m;
2018 	bus_dma_segment_t segs[1];
2019 	bus_dmamap_t map;
2020 	int nsegs;
2021 
2022 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
2023 	if (m == NULL)
2024 		return (ENOBUFS);
2025 	m->m_pkthdr.len = m->m_len = MJUM9BYTES;
2026 	m_adj(m, ETHER_ALIGN);
2027 
2028 	if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag,
2029 	    sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
2030 		m_freem(m);
2031 		return (ENOBUFS);
2032 	}
2033 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2034 
2035 	data = &sc->jrxq.jdata[idx];
2036 	if (data->m != NULL) {
2037 		bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2038 		    BUS_DMASYNC_POSTREAD);
2039 		bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map);
2040 	}
2041 	map = data->rx_data_map;
2042 	data->rx_data_map = sc->jrxq.jrx_spare_map;
2043 	sc->jrxq.jrx_spare_map = map;
2044 	bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2045 	    BUS_DMASYNC_PREREAD);
2046 	data->paddr = segs[0].ds_addr;
2047 	data->m = m;
2048 	/* update mapping address in h/w descriptor */
2049 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
2050 		desc64 = &sc->jrxq.jdesc64[idx];
2051 		desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2052 		desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2053 		desc64->length = htole16(segs[0].ds_len);
2054 		desc64->flags = htole16(NFE_RX_READY);
2055 	} else {
2056 		desc32 = &sc->jrxq.jdesc32[idx];
2057 		desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2058 		desc32->length = htole16(segs[0].ds_len);
2059 		desc32->flags = htole16(NFE_RX_READY);
2060 	}
2061 
2062 	return (0);
2063 }
2064 
2065 static int
nfe_rxeof(struct nfe_softc * sc,int count,int * rx_npktsp)2066 nfe_rxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2067 {
2068 	if_t ifp = sc->nfe_ifp;
2069 	struct nfe_desc32 *desc32;
2070 	struct nfe_desc64 *desc64;
2071 	struct nfe_rx_data *data;
2072 	struct mbuf *m;
2073 	uint16_t flags;
2074 	int len, prog, rx_npkts;
2075 	uint32_t vtag = 0;
2076 
2077 	rx_npkts = 0;
2078 	NFE_LOCK_ASSERT(sc);
2079 
2080 	bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2081 	    BUS_DMASYNC_POSTREAD);
2082 
2083 	for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) {
2084 		if (count <= 0)
2085 			break;
2086 		count--;
2087 
2088 		data = &sc->rxq.data[sc->rxq.cur];
2089 
2090 		if (sc->nfe_flags & NFE_40BIT_ADDR) {
2091 			desc64 = &sc->rxq.desc64[sc->rxq.cur];
2092 			vtag = le32toh(desc64->physaddr[1]);
2093 			flags = le16toh(desc64->flags);
2094 			len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2095 		} else {
2096 			desc32 = &sc->rxq.desc32[sc->rxq.cur];
2097 			flags = le16toh(desc32->flags);
2098 			len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2099 		}
2100 
2101 		if (flags & NFE_RX_READY)
2102 			break;
2103 		prog++;
2104 		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2105 			if (!(flags & NFE_RX_VALID_V1)) {
2106 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2107 				nfe_discard_rxbuf(sc, sc->rxq.cur);
2108 				continue;
2109 			}
2110 			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2111 				flags &= ~NFE_RX_ERROR;
2112 				len--;	/* fix buffer length */
2113 			}
2114 		} else {
2115 			if (!(flags & NFE_RX_VALID_V2)) {
2116 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2117 				nfe_discard_rxbuf(sc, sc->rxq.cur);
2118 				continue;
2119 			}
2120 
2121 			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2122 				flags &= ~NFE_RX_ERROR;
2123 				len--;	/* fix buffer length */
2124 			}
2125 		}
2126 
2127 		if (flags & NFE_RX_ERROR) {
2128 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2129 			nfe_discard_rxbuf(sc, sc->rxq.cur);
2130 			continue;
2131 		}
2132 
2133 		m = data->m;
2134 		if (nfe_newbuf(sc, sc->rxq.cur) != 0) {
2135 			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2136 			nfe_discard_rxbuf(sc, sc->rxq.cur);
2137 			continue;
2138 		}
2139 
2140 		if ((vtag & NFE_RX_VTAG) != 0 &&
2141 		    (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
2142 			m->m_pkthdr.ether_vtag = vtag & 0xffff;
2143 			m->m_flags |= M_VLANTAG;
2144 		}
2145 
2146 		m->m_pkthdr.len = m->m_len = len;
2147 		m->m_pkthdr.rcvif = ifp;
2148 
2149 		if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
2150 			if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2151 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2152 				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2153 				if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2154 				    (flags & NFE_RX_UDP_CSUMOK) != 0) {
2155 					m->m_pkthdr.csum_flags |=
2156 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2157 					m->m_pkthdr.csum_data = 0xffff;
2158 				}
2159 			}
2160 		}
2161 
2162 		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2163 
2164 		NFE_UNLOCK(sc);
2165 		if_input(ifp, m);
2166 		NFE_LOCK(sc);
2167 		rx_npkts++;
2168 	}
2169 
2170 	if (prog > 0)
2171 		bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2172 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2173 
2174 	if (rx_npktsp != NULL)
2175 		*rx_npktsp = rx_npkts;
2176 	return (count > 0 ? 0 : EAGAIN);
2177 }
2178 
2179 static int
nfe_jrxeof(struct nfe_softc * sc,int count,int * rx_npktsp)2180 nfe_jrxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2181 {
2182 	if_t ifp = sc->nfe_ifp;
2183 	struct nfe_desc32 *desc32;
2184 	struct nfe_desc64 *desc64;
2185 	struct nfe_rx_data *data;
2186 	struct mbuf *m;
2187 	uint16_t flags;
2188 	int len, prog, rx_npkts;
2189 	uint32_t vtag = 0;
2190 
2191 	rx_npkts = 0;
2192 	NFE_LOCK_ASSERT(sc);
2193 
2194 	bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2195 	    BUS_DMASYNC_POSTREAD);
2196 
2197 	for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT),
2198 	    vtag = 0) {
2199 		if (count <= 0)
2200 			break;
2201 		count--;
2202 
2203 		data = &sc->jrxq.jdata[sc->jrxq.jcur];
2204 
2205 		if (sc->nfe_flags & NFE_40BIT_ADDR) {
2206 			desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur];
2207 			vtag = le32toh(desc64->physaddr[1]);
2208 			flags = le16toh(desc64->flags);
2209 			len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2210 		} else {
2211 			desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur];
2212 			flags = le16toh(desc32->flags);
2213 			len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2214 		}
2215 
2216 		if (flags & NFE_RX_READY)
2217 			break;
2218 		prog++;
2219 		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2220 			if (!(flags & NFE_RX_VALID_V1)) {
2221 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2222 				nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2223 				continue;
2224 			}
2225 			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2226 				flags &= ~NFE_RX_ERROR;
2227 				len--;	/* fix buffer length */
2228 			}
2229 		} else {
2230 			if (!(flags & NFE_RX_VALID_V2)) {
2231 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2232 				nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2233 				continue;
2234 			}
2235 
2236 			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2237 				flags &= ~NFE_RX_ERROR;
2238 				len--;	/* fix buffer length */
2239 			}
2240 		}
2241 
2242 		if (flags & NFE_RX_ERROR) {
2243 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2244 			nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2245 			continue;
2246 		}
2247 
2248 		m = data->m;
2249 		if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) {
2250 			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2251 			nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2252 			continue;
2253 		}
2254 
2255 		if ((vtag & NFE_RX_VTAG) != 0 &&
2256 		    (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
2257 			m->m_pkthdr.ether_vtag = vtag & 0xffff;
2258 			m->m_flags |= M_VLANTAG;
2259 		}
2260 
2261 		m->m_pkthdr.len = m->m_len = len;
2262 		m->m_pkthdr.rcvif = ifp;
2263 
2264 		if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
2265 			if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2266 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2267 				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2268 				if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2269 				    (flags & NFE_RX_UDP_CSUMOK) != 0) {
2270 					m->m_pkthdr.csum_flags |=
2271 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2272 					m->m_pkthdr.csum_data = 0xffff;
2273 				}
2274 			}
2275 		}
2276 
2277 		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2278 
2279 		NFE_UNLOCK(sc);
2280 		if_input(ifp, m);
2281 		NFE_LOCK(sc);
2282 		rx_npkts++;
2283 	}
2284 
2285 	if (prog > 0)
2286 		bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2287 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2288 
2289 	if (rx_npktsp != NULL)
2290 		*rx_npktsp = rx_npkts;
2291 	return (count > 0 ? 0 : EAGAIN);
2292 }
2293 
2294 static void
nfe_txeof(struct nfe_softc * sc)2295 nfe_txeof(struct nfe_softc *sc)
2296 {
2297 	if_t ifp = sc->nfe_ifp;
2298 	struct nfe_desc32 *desc32;
2299 	struct nfe_desc64 *desc64;
2300 	struct nfe_tx_data *data = NULL;
2301 	uint16_t flags;
2302 	int cons, prog;
2303 
2304 	NFE_LOCK_ASSERT(sc);
2305 
2306 	bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2307 	    BUS_DMASYNC_POSTREAD);
2308 
2309 	prog = 0;
2310 	for (cons = sc->txq.next; cons != sc->txq.cur;
2311 	    NFE_INC(cons, NFE_TX_RING_COUNT)) {
2312 		if (sc->nfe_flags & NFE_40BIT_ADDR) {
2313 			desc64 = &sc->txq.desc64[cons];
2314 			flags = le16toh(desc64->flags);
2315 		} else {
2316 			desc32 = &sc->txq.desc32[cons];
2317 			flags = le16toh(desc32->flags);
2318 		}
2319 
2320 		if (flags & NFE_TX_VALID)
2321 			break;
2322 
2323 		prog++;
2324 		sc->txq.queued--;
2325 		data = &sc->txq.data[cons];
2326 
2327 		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2328 			if ((flags & NFE_TX_LASTFRAG_V1) == 0)
2329 				continue;
2330 			if ((flags & NFE_TX_ERROR_V1) != 0) {
2331 				device_printf(sc->nfe_dev,
2332 				    "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR);
2333 
2334 				if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2335 			} else
2336 				if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2337 		} else {
2338 			if ((flags & NFE_TX_LASTFRAG_V2) == 0)
2339 				continue;
2340 			if ((flags & NFE_TX_ERROR_V2) != 0) {
2341 				device_printf(sc->nfe_dev,
2342 				    "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR);
2343 				if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2344 			} else
2345 				if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2346 		}
2347 
2348 		/* last fragment of the mbuf chain transmitted */
2349 		KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__));
2350 		bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map,
2351 		    BUS_DMASYNC_POSTWRITE);
2352 		bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map);
2353 		m_freem(data->m);
2354 		data->m = NULL;
2355 	}
2356 
2357 	if (prog > 0) {
2358 		sc->nfe_force_tx = 0;
2359 		sc->txq.next = cons;
2360 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2361 		if (sc->txq.queued == 0)
2362 			sc->nfe_watchdog_timer = 0;
2363 	}
2364 }
2365 
2366 static int
nfe_encap(struct nfe_softc * sc,struct mbuf ** m_head)2367 nfe_encap(struct nfe_softc *sc, struct mbuf **m_head)
2368 {
2369 	struct nfe_desc32 *desc32 = NULL;
2370 	struct nfe_desc64 *desc64 = NULL;
2371 	bus_dmamap_t map;
2372 	bus_dma_segment_t segs[NFE_MAX_SCATTER];
2373 	int error, i, nsegs, prod, si;
2374 	uint32_t tsosegsz;
2375 	uint16_t cflags, flags;
2376 	struct mbuf *m;
2377 
2378 	prod = si = sc->txq.cur;
2379 	map = sc->txq.data[prod].tx_data_map;
2380 
2381 	error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs,
2382 	    &nsegs, BUS_DMA_NOWAIT);
2383 	if (error == EFBIG) {
2384 		m = m_collapse(*m_head, M_NOWAIT, NFE_MAX_SCATTER);
2385 		if (m == NULL) {
2386 			m_freem(*m_head);
2387 			*m_head = NULL;
2388 			return (ENOBUFS);
2389 		}
2390 		*m_head = m;
2391 		error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map,
2392 		    *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2393 		if (error != 0) {
2394 			m_freem(*m_head);
2395 			*m_head = NULL;
2396 			return (ENOBUFS);
2397 		}
2398 	} else if (error != 0)
2399 		return (error);
2400 	if (nsegs == 0) {
2401 		m_freem(*m_head);
2402 		*m_head = NULL;
2403 		return (EIO);
2404 	}
2405 
2406 	if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) {
2407 		bus_dmamap_unload(sc->txq.tx_data_tag, map);
2408 		return (ENOBUFS);
2409 	}
2410 
2411 	m = *m_head;
2412 	cflags = flags = 0;
2413 	tsosegsz = 0;
2414 	if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2415 		tsosegsz = (uint32_t)m->m_pkthdr.tso_segsz <<
2416 		    NFE_TX_TSO_SHIFT;
2417 		cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
2418 		cflags |= NFE_TX_TSO;
2419 	} else if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) {
2420 		if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2421 			cflags |= NFE_TX_IP_CSUM;
2422 		if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2423 			cflags |= NFE_TX_TCP_UDP_CSUM;
2424 		if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2425 			cflags |= NFE_TX_TCP_UDP_CSUM;
2426 	}
2427 
2428 	for (i = 0; i < nsegs; i++) {
2429 		if (sc->nfe_flags & NFE_40BIT_ADDR) {
2430 			desc64 = &sc->txq.desc64[prod];
2431 			desc64->physaddr[0] =
2432 			    htole32(NFE_ADDR_HI(segs[i].ds_addr));
2433 			desc64->physaddr[1] =
2434 			    htole32(NFE_ADDR_LO(segs[i].ds_addr));
2435 			desc64->vtag = 0;
2436 			desc64->length = htole16(segs[i].ds_len - 1);
2437 			desc64->flags = htole16(flags);
2438 		} else {
2439 			desc32 = &sc->txq.desc32[prod];
2440 			desc32->physaddr =
2441 			    htole32(NFE_ADDR_LO(segs[i].ds_addr));
2442 			desc32->length = htole16(segs[i].ds_len - 1);
2443 			desc32->flags = htole16(flags);
2444 		}
2445 
2446 		/*
2447 		 * Setting of the valid bit in the first descriptor is
2448 		 * deferred until the whole chain is fully setup.
2449 		 */
2450 		flags |= NFE_TX_VALID;
2451 
2452 		sc->txq.queued++;
2453 		NFE_INC(prod, NFE_TX_RING_COUNT);
2454 	}
2455 
2456 	/*
2457 	 * the whole mbuf chain has been DMA mapped, fix last/first descriptor.
2458 	 * csum flags, vtag and TSO belong to the first fragment only.
2459 	 */
2460 	if (sc->nfe_flags & NFE_40BIT_ADDR) {
2461 		desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
2462 		desc64 = &sc->txq.desc64[si];
2463 		if ((m->m_flags & M_VLANTAG) != 0)
2464 			desc64->vtag = htole32(NFE_TX_VTAG |
2465 			    m->m_pkthdr.ether_vtag);
2466 		if (tsosegsz != 0) {
2467 			/*
2468 			 * XXX
2469 			 * The following indicates the descriptor element
2470 			 * is a 32bit quantity.
2471 			 */
2472 			desc64->length |= htole16((uint16_t)tsosegsz);
2473 			desc64->flags |= htole16(tsosegsz >> 16);
2474 		}
2475 		/*
2476 		 * finally, set the valid/checksum/TSO bit in the first
2477 		 * descriptor.
2478 		 */
2479 		desc64->flags |= htole16(NFE_TX_VALID | cflags);
2480 	} else {
2481 		if (sc->nfe_flags & NFE_JUMBO_SUP)
2482 			desc32->flags |= htole16(NFE_TX_LASTFRAG_V2);
2483 		else
2484 			desc32->flags |= htole16(NFE_TX_LASTFRAG_V1);
2485 		desc32 = &sc->txq.desc32[si];
2486 		if (tsosegsz != 0) {
2487 			/*
2488 			 * XXX
2489 			 * The following indicates the descriptor element
2490 			 * is a 32bit quantity.
2491 			 */
2492 			desc32->length |= htole16((uint16_t)tsosegsz);
2493 			desc32->flags |= htole16(tsosegsz >> 16);
2494 		}
2495 		/*
2496 		 * finally, set the valid/checksum/TSO bit in the first
2497 		 * descriptor.
2498 		 */
2499 		desc32->flags |= htole16(NFE_TX_VALID | cflags);
2500 	}
2501 
2502 	sc->txq.cur = prod;
2503 	prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT;
2504 	sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map;
2505 	sc->txq.data[prod].tx_data_map = map;
2506 	sc->txq.data[prod].m = m;
2507 
2508 	bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
2509 
2510 	return (0);
2511 }
2512 
2513 struct nfe_hash_maddr_ctx {
2514 	uint8_t addr[ETHER_ADDR_LEN];
2515 	uint8_t mask[ETHER_ADDR_LEN];
2516 };
2517 
2518 static u_int
nfe_hash_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)2519 nfe_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2520 {
2521 	struct nfe_hash_maddr_ctx *ctx = arg;
2522 	uint8_t *addrp, mcaddr;
2523 	int j;
2524 
2525 	addrp = LLADDR(sdl);
2526 	for (j = 0; j < ETHER_ADDR_LEN; j++) {
2527 		mcaddr = addrp[j];
2528 		ctx->addr[j] &= mcaddr;
2529 		ctx->mask[j] &= ~mcaddr;
2530 	}
2531 
2532 	return (1);
2533 }
2534 
2535 static void
nfe_setmulti(struct nfe_softc * sc)2536 nfe_setmulti(struct nfe_softc *sc)
2537 {
2538 	if_t ifp = sc->nfe_ifp;
2539 	struct nfe_hash_maddr_ctx ctx;
2540 	uint32_t filter;
2541 	uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = {
2542 		0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2543 	};
2544 	int i;
2545 
2546 	NFE_LOCK_ASSERT(sc);
2547 
2548 	if ((if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
2549 		bzero(ctx.addr, ETHER_ADDR_LEN);
2550 		bzero(ctx.mask, ETHER_ADDR_LEN);
2551 		goto done;
2552 	}
2553 
2554 	bcopy(etherbroadcastaddr, ctx.addr, ETHER_ADDR_LEN);
2555 	bcopy(etherbroadcastaddr, ctx.mask, ETHER_ADDR_LEN);
2556 
2557 	if_foreach_llmaddr(ifp, nfe_hash_maddr, &ctx);
2558 
2559 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
2560 		ctx.mask[i] |= ctx.addr[i];
2561 	}
2562 
2563 done:
2564 	ctx.addr[0] |= 0x01;	/* make sure multicast bit is set */
2565 
2566 	NFE_WRITE(sc, NFE_MULTIADDR_HI, ctx.addr[3] << 24 | ctx.addr[2] << 16 |
2567 	    ctx.addr[1] << 8 | ctx.addr[0]);
2568 	NFE_WRITE(sc, NFE_MULTIADDR_LO,
2569 	    ctx.addr[5] <<  8 | ctx.addr[4]);
2570 	NFE_WRITE(sc, NFE_MULTIMASK_HI, ctx.mask[3] << 24 | ctx.mask[2] << 16 |
2571 	    ctx.mask[1] << 8 | ctx.mask[0]);
2572 	NFE_WRITE(sc, NFE_MULTIMASK_LO,
2573 	    ctx.mask[5] <<  8 | ctx.mask[4]);
2574 
2575 	filter = NFE_READ(sc, NFE_RXFILTER);
2576 	filter &= NFE_PFF_RX_PAUSE;
2577 	filter |= NFE_RXFILTER_MAGIC;
2578 	filter |= (if_getflags(ifp) & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M;
2579 	NFE_WRITE(sc, NFE_RXFILTER, filter);
2580 }
2581 
2582 static void
nfe_start(if_t ifp)2583 nfe_start(if_t ifp)
2584 {
2585 	struct nfe_softc *sc = if_getsoftc(ifp);
2586 
2587 	NFE_LOCK(sc);
2588 	nfe_start_locked(ifp);
2589 	NFE_UNLOCK(sc);
2590 }
2591 
2592 static void
nfe_start_locked(if_t ifp)2593 nfe_start_locked(if_t ifp)
2594 {
2595 	struct nfe_softc *sc = if_getsoftc(ifp);
2596 	struct mbuf *m0;
2597 	int enq = 0;
2598 
2599 	NFE_LOCK_ASSERT(sc);
2600 
2601 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2602 	    IFF_DRV_RUNNING || sc->nfe_link == 0)
2603 		return;
2604 
2605 	while (!if_sendq_empty(ifp)) {
2606 		m0 = if_dequeue(ifp);
2607 
2608 		if (m0 == NULL)
2609 			break;
2610 
2611 		if (nfe_encap(sc, &m0) != 0) {
2612 			if (m0 == NULL)
2613 				break;
2614 			if_sendq_prepend(ifp, m0);
2615 			if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
2616 			break;
2617 		}
2618 		enq++;
2619 		ether_bpf_mtap_if(ifp, m0);
2620 	}
2621 
2622 	if (enq > 0) {
2623 		bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2624 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2625 
2626 		/* kick Tx */
2627 		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2628 
2629 		/*
2630 		 * Set a timeout in case the chip goes out to lunch.
2631 		 */
2632 		sc->nfe_watchdog_timer = 5;
2633 	}
2634 }
2635 
2636 static void
nfe_watchdog(if_t ifp)2637 nfe_watchdog(if_t ifp)
2638 {
2639 	struct nfe_softc *sc = if_getsoftc(ifp);
2640 
2641 	if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer)
2642 		return;
2643 
2644 	/* Check if we've lost Tx completion interrupt. */
2645 	nfe_txeof(sc);
2646 	if (sc->txq.queued == 0) {
2647 		if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2648 		    "-- recovering\n");
2649 		if (!if_sendq_empty(ifp))
2650 			nfe_start_locked(ifp);
2651 		return;
2652 	}
2653 	/* Check if we've lost start Tx command. */
2654 	sc->nfe_force_tx++;
2655 	if (sc->nfe_force_tx <= 3) {
2656 		/*
2657 		 * If this is the case for watchdog timeout, the following
2658 		 * code should go to nfe_txeof().
2659 		 */
2660 		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2661 		return;
2662 	}
2663 	sc->nfe_force_tx = 0;
2664 
2665 	if_printf(ifp, "watchdog timeout\n");
2666 
2667 	if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
2668 	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2669 	nfe_init_locked(sc);
2670 }
2671 
2672 static void
nfe_init(void * xsc)2673 nfe_init(void *xsc)
2674 {
2675 	struct nfe_softc *sc = xsc;
2676 
2677 	NFE_LOCK(sc);
2678 	nfe_init_locked(sc);
2679 	NFE_UNLOCK(sc);
2680 }
2681 
2682 static void
nfe_init_locked(void * xsc)2683 nfe_init_locked(void *xsc)
2684 {
2685 	struct nfe_softc *sc = xsc;
2686 	if_t ifp = sc->nfe_ifp;
2687 	struct mii_data *mii;
2688 	uint32_t val;
2689 	int error;
2690 
2691 	NFE_LOCK_ASSERT(sc);
2692 
2693 	mii = device_get_softc(sc->nfe_miibus);
2694 
2695 	if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2696 		return;
2697 
2698 	nfe_stop(ifp);
2699 
2700 	sc->nfe_framesize = if_getmtu(ifp) + NFE_RX_HEADERS;
2701 
2702 	nfe_init_tx_ring(sc, &sc->txq);
2703 	if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN))
2704 		error = nfe_init_jrx_ring(sc, &sc->jrxq);
2705 	else
2706 		error = nfe_init_rx_ring(sc, &sc->rxq);
2707 	if (error != 0) {
2708 		device_printf(sc->nfe_dev,
2709 		    "initialization failed: no memory for rx buffers\n");
2710 		nfe_stop(ifp);
2711 		return;
2712 	}
2713 
2714 	val = 0;
2715 	if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0)
2716 		val |= NFE_MAC_ADDR_INORDER;
2717 	NFE_WRITE(sc, NFE_TX_UNK, val);
2718 	NFE_WRITE(sc, NFE_STATUS, 0);
2719 
2720 	if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0)
2721 		NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE);
2722 
2723 	sc->rxtxctl = NFE_RXTX_BIT2;
2724 	if (sc->nfe_flags & NFE_40BIT_ADDR)
2725 		sc->rxtxctl |= NFE_RXTX_V3MAGIC;
2726 	else if (sc->nfe_flags & NFE_JUMBO_SUP)
2727 		sc->rxtxctl |= NFE_RXTX_V2MAGIC;
2728 
2729 	if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
2730 		sc->rxtxctl |= NFE_RXTX_RXCSUM;
2731 	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
2732 		sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
2733 
2734 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
2735 	DELAY(10);
2736 	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2737 
2738 	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
2739 		NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
2740 	else
2741 		NFE_WRITE(sc, NFE_VTAG_CTL, 0);
2742 
2743 	NFE_WRITE(sc, NFE_SETUP_R6, 0);
2744 
2745 	/* set MAC address */
2746 	nfe_set_macaddr(sc, if_getlladdr(ifp));
2747 
2748 	/* tell MAC where rings are in memory */
2749 	if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) {
2750 		NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2751 		    NFE_ADDR_HI(sc->jrxq.jphysaddr));
2752 		NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2753 		    NFE_ADDR_LO(sc->jrxq.jphysaddr));
2754 	} else {
2755 		NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2756 		    NFE_ADDR_HI(sc->rxq.physaddr));
2757 		NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2758 		    NFE_ADDR_LO(sc->rxq.physaddr));
2759 	}
2760 	NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr));
2761 	NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr));
2762 
2763 	NFE_WRITE(sc, NFE_RING_SIZE,
2764 	    (NFE_RX_RING_COUNT - 1) << 16 |
2765 	    (NFE_TX_RING_COUNT - 1));
2766 
2767 	NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize);
2768 
2769 	/* force MAC to wakeup */
2770 	val = NFE_READ(sc, NFE_PWR_STATE);
2771 	if ((val & NFE_PWR_WAKEUP) == 0)
2772 		NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP);
2773 	DELAY(10);
2774 	val = NFE_READ(sc, NFE_PWR_STATE);
2775 	NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID);
2776 
2777 #if 1
2778 	/* configure interrupts coalescing/mitigation */
2779 	NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
2780 #else
2781 	/* no interrupt mitigation: one interrupt per packet */
2782 	NFE_WRITE(sc, NFE_IMTIMER, 970);
2783 #endif
2784 
2785 	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100);
2786 	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
2787 	NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
2788 
2789 	/* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
2790 	NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
2791 
2792 	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
2793 	/* Disable WOL. */
2794 	NFE_WRITE(sc, NFE_WOL_CTL, 0);
2795 
2796 	sc->rxtxctl &= ~NFE_RXTX_BIT2;
2797 	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2798 	DELAY(10);
2799 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
2800 
2801 	/* set Rx filter */
2802 	nfe_setmulti(sc);
2803 
2804 	/* enable Rx */
2805 	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
2806 
2807 	/* enable Tx */
2808 	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
2809 
2810 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
2811 
2812 	/* Clear hardware stats. */
2813 	nfe_stats_clear(sc);
2814 
2815 #ifdef DEVICE_POLLING
2816 	if (if_getcapenable(ifp) & IFCAP_POLLING)
2817 		nfe_disable_intr(sc);
2818 	else
2819 #endif
2820 	nfe_set_intr(sc);
2821 	nfe_enable_intr(sc); /* enable interrupts */
2822 
2823 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
2824 	if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2825 
2826 	sc->nfe_link = 0;
2827 	mii_mediachg(mii);
2828 
2829 	callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2830 }
2831 
2832 static void
nfe_stop(if_t ifp)2833 nfe_stop(if_t ifp)
2834 {
2835 	struct nfe_softc *sc = if_getsoftc(ifp);
2836 	struct nfe_rx_ring *rx_ring;
2837 	struct nfe_jrx_ring *jrx_ring;
2838 	struct nfe_tx_ring *tx_ring;
2839 	struct nfe_rx_data *rdata;
2840 	struct nfe_tx_data *tdata;
2841 	int i;
2842 
2843 	NFE_LOCK_ASSERT(sc);
2844 
2845 	sc->nfe_watchdog_timer = 0;
2846 	if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
2847 
2848 	callout_stop(&sc->nfe_stat_ch);
2849 
2850 	/* abort Tx */
2851 	NFE_WRITE(sc, NFE_TX_CTL, 0);
2852 
2853 	/* disable Rx */
2854 	NFE_WRITE(sc, NFE_RX_CTL, 0);
2855 
2856 	/* disable interrupts */
2857 	nfe_disable_intr(sc);
2858 
2859 	sc->nfe_link = 0;
2860 
2861 	/* free Rx and Tx mbufs still in the queues. */
2862 	rx_ring = &sc->rxq;
2863 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2864 		rdata = &rx_ring->data[i];
2865 		if (rdata->m != NULL) {
2866 			bus_dmamap_sync(rx_ring->rx_data_tag,
2867 			    rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2868 			bus_dmamap_unload(rx_ring->rx_data_tag,
2869 			    rdata->rx_data_map);
2870 			m_freem(rdata->m);
2871 			rdata->m = NULL;
2872 		}
2873 	}
2874 
2875 	if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) {
2876 		jrx_ring = &sc->jrxq;
2877 		for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
2878 			rdata = &jrx_ring->jdata[i];
2879 			if (rdata->m != NULL) {
2880 				bus_dmamap_sync(jrx_ring->jrx_data_tag,
2881 				    rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2882 				bus_dmamap_unload(jrx_ring->jrx_data_tag,
2883 				    rdata->rx_data_map);
2884 				m_freem(rdata->m);
2885 				rdata->m = NULL;
2886 			}
2887 		}
2888 	}
2889 
2890 	tx_ring = &sc->txq;
2891 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2892 		tdata = &tx_ring->data[i];
2893 		if (tdata->m != NULL) {
2894 			bus_dmamap_sync(tx_ring->tx_data_tag,
2895 			    tdata->tx_data_map, BUS_DMASYNC_POSTWRITE);
2896 			bus_dmamap_unload(tx_ring->tx_data_tag,
2897 			    tdata->tx_data_map);
2898 			m_freem(tdata->m);
2899 			tdata->m = NULL;
2900 		}
2901 	}
2902 	/* Update hardware stats. */
2903 	nfe_stats_update(sc);
2904 }
2905 
2906 static int
nfe_ifmedia_upd(if_t ifp)2907 nfe_ifmedia_upd(if_t ifp)
2908 {
2909 	struct nfe_softc *sc = if_getsoftc(ifp);
2910 	struct mii_data *mii;
2911 
2912 	NFE_LOCK(sc);
2913 	mii = device_get_softc(sc->nfe_miibus);
2914 	mii_mediachg(mii);
2915 	NFE_UNLOCK(sc);
2916 
2917 	return (0);
2918 }
2919 
2920 static void
nfe_ifmedia_sts(if_t ifp,struct ifmediareq * ifmr)2921 nfe_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
2922 {
2923 	struct nfe_softc *sc;
2924 	struct mii_data *mii;
2925 
2926 	sc = if_getsoftc(ifp);
2927 
2928 	NFE_LOCK(sc);
2929 	mii = device_get_softc(sc->nfe_miibus);
2930 	mii_pollstat(mii);
2931 
2932 	ifmr->ifm_active = mii->mii_media_active;
2933 	ifmr->ifm_status = mii->mii_media_status;
2934 	NFE_UNLOCK(sc);
2935 }
2936 
2937 void
nfe_tick(void * xsc)2938 nfe_tick(void *xsc)
2939 {
2940 	struct nfe_softc *sc;
2941 	struct mii_data *mii;
2942 	if_t ifp;
2943 
2944 	sc = (struct nfe_softc *)xsc;
2945 
2946 	NFE_LOCK_ASSERT(sc);
2947 
2948 	ifp = sc->nfe_ifp;
2949 
2950 	mii = device_get_softc(sc->nfe_miibus);
2951 	mii_tick(mii);
2952 	nfe_stats_update(sc);
2953 	nfe_watchdog(ifp);
2954 	callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2955 }
2956 
2957 static int
nfe_shutdown(device_t dev)2958 nfe_shutdown(device_t dev)
2959 {
2960 
2961 	return (nfe_suspend(dev));
2962 }
2963 
2964 static void
nfe_get_macaddr(struct nfe_softc * sc,uint8_t * addr)2965 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
2966 {
2967 	uint32_t val;
2968 
2969 	if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
2970 		val = NFE_READ(sc, NFE_MACADDR_LO);
2971 		addr[0] = (val >> 8) & 0xff;
2972 		addr[1] = (val & 0xff);
2973 
2974 		val = NFE_READ(sc, NFE_MACADDR_HI);
2975 		addr[2] = (val >> 24) & 0xff;
2976 		addr[3] = (val >> 16) & 0xff;
2977 		addr[4] = (val >>  8) & 0xff;
2978 		addr[5] = (val & 0xff);
2979 	} else {
2980 		val = NFE_READ(sc, NFE_MACADDR_LO);
2981 		addr[5] = (val >> 8) & 0xff;
2982 		addr[4] = (val & 0xff);
2983 
2984 		val = NFE_READ(sc, NFE_MACADDR_HI);
2985 		addr[3] = (val >> 24) & 0xff;
2986 		addr[2] = (val >> 16) & 0xff;
2987 		addr[1] = (val >>  8) & 0xff;
2988 		addr[0] = (val & 0xff);
2989 	}
2990 }
2991 
2992 static void
nfe_set_macaddr(struct nfe_softc * sc,uint8_t * addr)2993 nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr)
2994 {
2995 
2996 	NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] <<  8 | addr[4]);
2997 	NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
2998 	    addr[1] << 8 | addr[0]);
2999 }
3000 
3001 /*
3002  * Map a single buffer address.
3003  */
3004 
3005 static void
nfe_dma_map_segs(void * arg,bus_dma_segment_t * segs,int nseg,int error)3006 nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3007 {
3008 	struct nfe_dmamap_arg *ctx;
3009 
3010 	if (error != 0)
3011 		return;
3012 
3013 	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
3014 
3015 	ctx = (struct nfe_dmamap_arg *)arg;
3016 	ctx->nfe_busaddr = segs[0].ds_addr;
3017 }
3018 
3019 static int
sysctl_int_range(SYSCTL_HANDLER_ARGS,int low,int high)3020 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3021 {
3022 	int error, value;
3023 
3024 	if (!arg1)
3025 		return (EINVAL);
3026 	value = *(int *)arg1;
3027 	error = sysctl_handle_int(oidp, &value, 0, req);
3028 	if (error || !req->newptr)
3029 		return (error);
3030 	if (value < low || value > high)
3031 		return (EINVAL);
3032 	*(int *)arg1 = value;
3033 
3034 	return (0);
3035 }
3036 
3037 static int
sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS)3038 sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS)
3039 {
3040 
3041 	return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN,
3042 	    NFE_PROC_MAX));
3043 }
3044 
3045 #define	NFE_SYSCTL_STAT_ADD32(c, h, n, p, d)	\
3046 	    SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
3047 #define	NFE_SYSCTL_STAT_ADD64(c, h, n, p, d)	\
3048 	    SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
3049 
3050 static void
nfe_sysctl_node(struct nfe_softc * sc)3051 nfe_sysctl_node(struct nfe_softc *sc)
3052 {
3053 	struct sysctl_ctx_list *ctx;
3054 	struct sysctl_oid_list *child, *parent;
3055 	struct sysctl_oid *tree;
3056 	struct nfe_hw_stats *stats;
3057 	int error;
3058 
3059 	stats = &sc->nfe_stats;
3060 	ctx = device_get_sysctl_ctx(sc->nfe_dev);
3061 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nfe_dev));
3062 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
3063 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
3064 	    &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I",
3065 	    "max number of Rx events to process");
3066 
3067 	sc->nfe_process_limit = NFE_PROC_DEFAULT;
3068 	error = resource_int_value(device_get_name(sc->nfe_dev),
3069 	    device_get_unit(sc->nfe_dev), "process_limit",
3070 	    &sc->nfe_process_limit);
3071 	if (error == 0) {
3072 		if (sc->nfe_process_limit < NFE_PROC_MIN ||
3073 		    sc->nfe_process_limit > NFE_PROC_MAX) {
3074 			device_printf(sc->nfe_dev,
3075 			    "process_limit value out of range; "
3076 			    "using default: %d\n", NFE_PROC_DEFAULT);
3077 			sc->nfe_process_limit = NFE_PROC_DEFAULT;
3078 		}
3079 	}
3080 
3081 	if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3082 		return;
3083 
3084 	tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
3085 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "NFE statistics");
3086 	parent = SYSCTL_CHILDREN(tree);
3087 
3088 	/* Rx statistics. */
3089 	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx",
3090 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Rx MAC statistics");
3091 	child = SYSCTL_CHILDREN(tree);
3092 
3093 	NFE_SYSCTL_STAT_ADD32(ctx, child, "frame_errors",
3094 	    &stats->rx_frame_errors, "Framing Errors");
3095 	NFE_SYSCTL_STAT_ADD32(ctx, child, "extra_bytes",
3096 	    &stats->rx_extra_bytes, "Extra Bytes");
3097 	NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3098 	    &stats->rx_late_cols, "Late Collisions");
3099 	NFE_SYSCTL_STAT_ADD32(ctx, child, "runts",
3100 	    &stats->rx_runts, "Runts");
3101 	NFE_SYSCTL_STAT_ADD32(ctx, child, "jumbos",
3102 	    &stats->rx_jumbos, "Jumbos");
3103 	NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_overuns",
3104 	    &stats->rx_fifo_overuns, "FIFO Overruns");
3105 	NFE_SYSCTL_STAT_ADD32(ctx, child, "crc_errors",
3106 	    &stats->rx_crc_errors, "CRC Errors");
3107 	NFE_SYSCTL_STAT_ADD32(ctx, child, "fae",
3108 	    &stats->rx_fae, "Frame Alignment Errors");
3109 	NFE_SYSCTL_STAT_ADD32(ctx, child, "len_errors",
3110 	    &stats->rx_len_errors, "Length Errors");
3111 	NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3112 	    &stats->rx_unicast, "Unicast Frames");
3113 	NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3114 	    &stats->rx_multicast, "Multicast Frames");
3115 	NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3116 	    &stats->rx_broadcast, "Broadcast Frames");
3117 	if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3118 		NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3119 		    &stats->rx_octets, "Octets");
3120 		NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3121 		    &stats->rx_pause, "Pause frames");
3122 		NFE_SYSCTL_STAT_ADD32(ctx, child, "drops",
3123 		    &stats->rx_drops, "Drop frames");
3124 	}
3125 
3126 	/* Tx statistics. */
3127 	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx",
3128 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx MAC statistics");
3129 	child = SYSCTL_CHILDREN(tree);
3130 	NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3131 	    &stats->tx_octets, "Octets");
3132 	NFE_SYSCTL_STAT_ADD32(ctx, child, "zero_rexmits",
3133 	    &stats->tx_zero_rexmits, "Zero Retransmits");
3134 	NFE_SYSCTL_STAT_ADD32(ctx, child, "one_rexmits",
3135 	    &stats->tx_one_rexmits, "One Retransmits");
3136 	NFE_SYSCTL_STAT_ADD32(ctx, child, "multi_rexmits",
3137 	    &stats->tx_multi_rexmits, "Multiple Retransmits");
3138 	NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3139 	    &stats->tx_late_cols, "Late Collisions");
3140 	NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_underuns",
3141 	    &stats->tx_fifo_underuns, "FIFO Underruns");
3142 	NFE_SYSCTL_STAT_ADD32(ctx, child, "carrier_losts",
3143 	    &stats->tx_carrier_losts, "Carrier Losts");
3144 	NFE_SYSCTL_STAT_ADD32(ctx, child, "excess_deferrals",
3145 	    &stats->tx_excess_deferals, "Excess Deferrals");
3146 	NFE_SYSCTL_STAT_ADD32(ctx, child, "retry_errors",
3147 	    &stats->tx_retry_errors, "Retry Errors");
3148 	if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3149 		NFE_SYSCTL_STAT_ADD32(ctx, child, "deferrals",
3150 		    &stats->tx_deferals, "Deferrals");
3151 		NFE_SYSCTL_STAT_ADD32(ctx, child, "frames",
3152 		    &stats->tx_frames, "Frames");
3153 		NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3154 		    &stats->tx_pause, "Pause Frames");
3155 	}
3156 	if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3157 		NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3158 		    &stats->tx_deferals, "Unicast Frames");
3159 		NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3160 		    &stats->tx_frames, "Multicast Frames");
3161 		NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3162 		    &stats->tx_pause, "Broadcast Frames");
3163 	}
3164 }
3165 
3166 #undef NFE_SYSCTL_STAT_ADD32
3167 #undef NFE_SYSCTL_STAT_ADD64
3168 
3169 static void
nfe_stats_clear(struct nfe_softc * sc)3170 nfe_stats_clear(struct nfe_softc *sc)
3171 {
3172 	int i, mib_cnt;
3173 
3174 	if ((sc->nfe_flags & NFE_MIB_V1) != 0)
3175 		mib_cnt = NFE_NUM_MIB_STATV1;
3176 	else if ((sc->nfe_flags & (NFE_MIB_V2 | NFE_MIB_V3)) != 0)
3177 		mib_cnt = NFE_NUM_MIB_STATV2;
3178 	else
3179 		return;
3180 
3181 	for (i = 0; i < mib_cnt; i++)
3182 		NFE_READ(sc, NFE_TX_OCTET + i * sizeof(uint32_t));
3183 
3184 	if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3185 		NFE_READ(sc, NFE_TX_UNICAST);
3186 		NFE_READ(sc, NFE_TX_MULTICAST);
3187 		NFE_READ(sc, NFE_TX_BROADCAST);
3188 	}
3189 }
3190 
3191 static void
nfe_stats_update(struct nfe_softc * sc)3192 nfe_stats_update(struct nfe_softc *sc)
3193 {
3194 	struct nfe_hw_stats *stats;
3195 
3196 	NFE_LOCK_ASSERT(sc);
3197 
3198 	if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3199 		return;
3200 
3201 	stats = &sc->nfe_stats;
3202 	stats->tx_octets += NFE_READ(sc, NFE_TX_OCTET);
3203 	stats->tx_zero_rexmits += NFE_READ(sc, NFE_TX_ZERO_REXMIT);
3204 	stats->tx_one_rexmits += NFE_READ(sc, NFE_TX_ONE_REXMIT);
3205 	stats->tx_multi_rexmits += NFE_READ(sc, NFE_TX_MULTI_REXMIT);
3206 	stats->tx_late_cols += NFE_READ(sc, NFE_TX_LATE_COL);
3207 	stats->tx_fifo_underuns += NFE_READ(sc, NFE_TX_FIFO_UNDERUN);
3208 	stats->tx_carrier_losts += NFE_READ(sc, NFE_TX_CARRIER_LOST);
3209 	stats->tx_excess_deferals += NFE_READ(sc, NFE_TX_EXCESS_DEFERRAL);
3210 	stats->tx_retry_errors += NFE_READ(sc, NFE_TX_RETRY_ERROR);
3211 	stats->rx_frame_errors += NFE_READ(sc, NFE_RX_FRAME_ERROR);
3212 	stats->rx_extra_bytes += NFE_READ(sc, NFE_RX_EXTRA_BYTES);
3213 	stats->rx_late_cols += NFE_READ(sc, NFE_RX_LATE_COL);
3214 	stats->rx_runts += NFE_READ(sc, NFE_RX_RUNT);
3215 	stats->rx_jumbos += NFE_READ(sc, NFE_RX_JUMBO);
3216 	stats->rx_fifo_overuns += NFE_READ(sc, NFE_RX_FIFO_OVERUN);
3217 	stats->rx_crc_errors += NFE_READ(sc, NFE_RX_CRC_ERROR);
3218 	stats->rx_fae += NFE_READ(sc, NFE_RX_FAE);
3219 	stats->rx_len_errors += NFE_READ(sc, NFE_RX_LEN_ERROR);
3220 	stats->rx_unicast += NFE_READ(sc, NFE_RX_UNICAST);
3221 	stats->rx_multicast += NFE_READ(sc, NFE_RX_MULTICAST);
3222 	stats->rx_broadcast += NFE_READ(sc, NFE_RX_BROADCAST);
3223 
3224 	if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3225 		stats->tx_deferals += NFE_READ(sc, NFE_TX_DEFERAL);
3226 		stats->tx_frames += NFE_READ(sc, NFE_TX_FRAME);
3227 		stats->rx_octets += NFE_READ(sc, NFE_RX_OCTET);
3228 		stats->tx_pause += NFE_READ(sc, NFE_TX_PAUSE);
3229 		stats->rx_pause += NFE_READ(sc, NFE_RX_PAUSE);
3230 		stats->rx_drops += NFE_READ(sc, NFE_RX_DROP);
3231 	}
3232 
3233 	if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3234 		stats->tx_unicast += NFE_READ(sc, NFE_TX_UNICAST);
3235 		stats->tx_multicast += NFE_READ(sc, NFE_TX_MULTICAST);
3236 		stats->tx_broadcast += NFE_READ(sc, NFE_TX_BROADCAST);
3237 	}
3238 }
3239 
3240 static void
nfe_set_linkspeed(struct nfe_softc * sc)3241 nfe_set_linkspeed(struct nfe_softc *sc)
3242 {
3243 	struct mii_softc *miisc;
3244 	struct mii_data *mii;
3245 	int aneg, i, phyno;
3246 
3247 	NFE_LOCK_ASSERT(sc);
3248 
3249 	mii = device_get_softc(sc->nfe_miibus);
3250 	mii_pollstat(mii);
3251 	aneg = 0;
3252 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
3253 	    (IFM_ACTIVE | IFM_AVALID)) {
3254 		switch IFM_SUBTYPE(mii->mii_media_active) {
3255 		case IFM_10_T:
3256 		case IFM_100_TX:
3257 			return;
3258 		case IFM_1000_T:
3259 			aneg++;
3260 			break;
3261 		default:
3262 			break;
3263 		}
3264 	}
3265 	miisc = LIST_FIRST(&mii->mii_phys);
3266 	phyno = miisc->mii_phy;
3267 	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3268 		PHY_RESET(miisc);
3269 	nfe_miibus_writereg(sc->nfe_dev, phyno, MII_100T2CR, 0);
3270 	nfe_miibus_writereg(sc->nfe_dev, phyno,
3271 	    MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
3272 	nfe_miibus_writereg(sc->nfe_dev, phyno,
3273 	    MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
3274 	DELAY(1000);
3275 	if (aneg != 0) {
3276 		/*
3277 		 * Poll link state until nfe(4) get a 10/100Mbps link.
3278 		 */
3279 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
3280 			mii_pollstat(mii);
3281 			if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
3282 			    == (IFM_ACTIVE | IFM_AVALID)) {
3283 				switch (IFM_SUBTYPE(mii->mii_media_active)) {
3284 				case IFM_10_T:
3285 				case IFM_100_TX:
3286 					nfe_mac_config(sc, mii);
3287 					return;
3288 				default:
3289 					break;
3290 				}
3291 			}
3292 			NFE_UNLOCK(sc);
3293 			pause("nfelnk", hz);
3294 			NFE_LOCK(sc);
3295 		}
3296 		if (i == MII_ANEGTICKS_GIGE)
3297 			device_printf(sc->nfe_dev,
3298 			    "establishing a link failed, WOL may not work!");
3299 	}
3300 	/*
3301 	 * No link, force MAC to have 100Mbps, full-duplex link.
3302 	 * This is the last resort and may/may not work.
3303 	 */
3304 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
3305 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
3306 	nfe_mac_config(sc, mii);
3307 }
3308 
3309 static void
nfe_set_wol(struct nfe_softc * sc)3310 nfe_set_wol(struct nfe_softc *sc)
3311 {
3312 	if_t ifp;
3313 	uint32_t wolctl;
3314 	int pmc;
3315 	uint16_t pmstat;
3316 
3317 	NFE_LOCK_ASSERT(sc);
3318 
3319 	if (pci_find_cap(sc->nfe_dev, PCIY_PMG, &pmc) != 0)
3320 		return;
3321 	ifp = sc->nfe_ifp;
3322 	if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
3323 		wolctl = NFE_WOL_MAGIC;
3324 	else
3325 		wolctl = 0;
3326 	NFE_WRITE(sc, NFE_WOL_CTL, wolctl);
3327 	if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0) {
3328 		nfe_set_linkspeed(sc);
3329 		if ((sc->nfe_flags & NFE_PWR_MGMT) != 0)
3330 			NFE_WRITE(sc, NFE_PWR2_CTL,
3331 			    NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_GATE_CLOCKS);
3332 		/* Enable RX. */
3333 		NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 0);
3334 		NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 0);
3335 		NFE_WRITE(sc, NFE_RX_CTL, NFE_READ(sc, NFE_RX_CTL) |
3336 		    NFE_RX_START);
3337 	}
3338 	/* Request PME if WOL is requested. */
3339 	pmstat = pci_read_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, 2);
3340 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
3341 	if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
3342 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3343 	pci_write_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
3344 }
3345