1 /* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */
2
3 /*-
4 * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
5 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
22
23 #include <sys/cdefs.h>
24 #ifdef HAVE_KERNEL_OPTION_HEADERS
25 #include "opt_device_polling.h"
26 #endif
27
28 #include <sys/param.h>
29 #include <sys/endian.h>
30 #include <sys/systm.h>
31 #include <sys/sockio.h>
32 #include <sys/mbuf.h>
33 #include <sys/malloc.h>
34 #include <sys/module.h>
35 #include <sys/kernel.h>
36 #include <sys/queue.h>
37 #include <sys/socket.h>
38 #include <sys/sysctl.h>
39 #include <sys/taskqueue.h>
40
41 #include <net/if.h>
42 #include <net/if_var.h>
43 #include <net/if_arp.h>
44 #include <net/ethernet.h>
45 #include <net/if_dl.h>
46 #include <net/if_media.h>
47 #include <net/if_types.h>
48 #include <net/if_vlan_var.h>
49
50 #include <net/bpf.h>
51
52 #include <machine/bus.h>
53 #include <machine/resource.h>
54 #include <sys/bus.h>
55 #include <sys/rman.h>
56
57 #include <dev/mii/mii.h>
58 #include <dev/mii/miivar.h>
59
60 #include <dev/pci/pcireg.h>
61 #include <dev/pci/pcivar.h>
62
63 #include <dev/nfe/if_nfereg.h>
64 #include <dev/nfe/if_nfevar.h>
65
66 MODULE_DEPEND(nfe, pci, 1, 1, 1);
67 MODULE_DEPEND(nfe, ether, 1, 1, 1);
68 MODULE_DEPEND(nfe, miibus, 1, 1, 1);
69
70 /* "device miibus" required. See GENERIC if you get errors here. */
71 #include "miibus_if.h"
72
73 static int nfe_probe(device_t);
74 static int nfe_attach(device_t);
75 static int nfe_detach(device_t);
76 static int nfe_suspend(device_t);
77 static int nfe_resume(device_t);
78 static int nfe_shutdown(device_t);
79 static int nfe_can_use_msix(struct nfe_softc *);
80 static int nfe_detect_msik9(struct nfe_softc *);
81 static void nfe_power(struct nfe_softc *);
82 static int nfe_miibus_readreg(device_t, int, int);
83 static int nfe_miibus_writereg(device_t, int, int, int);
84 static void nfe_miibus_statchg(device_t);
85 static void nfe_mac_config(struct nfe_softc *, struct mii_data *);
86 static void nfe_set_intr(struct nfe_softc *);
87 static __inline void nfe_enable_intr(struct nfe_softc *);
88 static __inline void nfe_disable_intr(struct nfe_softc *);
89 static int nfe_ioctl(if_t, u_long, caddr_t);
90 static void nfe_alloc_msix(struct nfe_softc *, int);
91 static int nfe_intr(void *);
92 static void nfe_int_task(void *, int);
93 static __inline void nfe_discard_rxbuf(struct nfe_softc *, int);
94 static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int);
95 static int nfe_newbuf(struct nfe_softc *, int);
96 static int nfe_jnewbuf(struct nfe_softc *, int);
97 static int nfe_rxeof(struct nfe_softc *, int, int *);
98 static int nfe_jrxeof(struct nfe_softc *, int, int *);
99 static void nfe_txeof(struct nfe_softc *);
100 static int nfe_encap(struct nfe_softc *, struct mbuf **);
101 static void nfe_setmulti(struct nfe_softc *);
102 static void nfe_start(if_t);
103 static void nfe_start_locked(if_t);
104 static void nfe_watchdog(if_t);
105 static void nfe_init(void *);
106 static void nfe_init_locked(void *);
107 static void nfe_stop(if_t);
108 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
109 static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
110 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
111 static int nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
112 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
113 static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
114 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
115 static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
116 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
117 static int nfe_ifmedia_upd(if_t);
118 static void nfe_ifmedia_sts(if_t, struct ifmediareq *);
119 static void nfe_tick(void *);
120 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
121 static void nfe_set_macaddr(struct nfe_softc *, uint8_t *);
122 static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int);
123
124 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
125 static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS);
126 static void nfe_sysctl_node(struct nfe_softc *);
127 static void nfe_stats_clear(struct nfe_softc *);
128 static void nfe_stats_update(struct nfe_softc *);
129 static void nfe_set_linkspeed(struct nfe_softc *);
130 static void nfe_set_wol(struct nfe_softc *);
131
132 #ifdef NFE_DEBUG
133 static int nfedebug = 0;
134 #define DPRINTF(sc, ...) do { \
135 if (nfedebug) \
136 device_printf((sc)->nfe_dev, __VA_ARGS__); \
137 } while (0)
138 #define DPRINTFN(sc, n, ...) do { \
139 if (nfedebug >= (n)) \
140 device_printf((sc)->nfe_dev, __VA_ARGS__); \
141 } while (0)
142 #else
143 #define DPRINTF(sc, ...)
144 #define DPRINTFN(sc, n, ...)
145 #endif
146
147 #define NFE_LOCK(_sc) mtx_lock(&(_sc)->nfe_mtx)
148 #define NFE_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_mtx)
149 #define NFE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
150
151 /* Tunables. */
152 static int msi_disable = 0;
153 static int msix_disable = 0;
154 static int jumbo_disable = 0;
155 TUNABLE_INT("hw.nfe.msi_disable", &msi_disable);
156 TUNABLE_INT("hw.nfe.msix_disable", &msix_disable);
157 TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable);
158
159 static device_method_t nfe_methods[] = {
160 /* Device interface */
161 DEVMETHOD(device_probe, nfe_probe),
162 DEVMETHOD(device_attach, nfe_attach),
163 DEVMETHOD(device_detach, nfe_detach),
164 DEVMETHOD(device_suspend, nfe_suspend),
165 DEVMETHOD(device_resume, nfe_resume),
166 DEVMETHOD(device_shutdown, nfe_shutdown),
167
168 /* MII interface */
169 DEVMETHOD(miibus_readreg, nfe_miibus_readreg),
170 DEVMETHOD(miibus_writereg, nfe_miibus_writereg),
171 DEVMETHOD(miibus_statchg, nfe_miibus_statchg),
172
173 DEVMETHOD_END
174 };
175
176 static driver_t nfe_driver = {
177 "nfe",
178 nfe_methods,
179 sizeof(struct nfe_softc)
180 };
181
182 DRIVER_MODULE(nfe, pci, nfe_driver, 0, 0);
183 DRIVER_MODULE(miibus, nfe, miibus_driver, 0, 0);
184
185 static struct nfe_type nfe_devs[] = {
186 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
187 "NVIDIA nForce MCP Networking Adapter"},
188 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
189 "NVIDIA nForce2 MCP2 Networking Adapter"},
190 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
191 "NVIDIA nForce2 400 MCP4 Networking Adapter"},
192 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
193 "NVIDIA nForce2 400 MCP5 Networking Adapter"},
194 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
195 "NVIDIA nForce3 MCP3 Networking Adapter"},
196 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
197 "NVIDIA nForce3 250 MCP6 Networking Adapter"},
198 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
199 "NVIDIA nForce3 MCP7 Networking Adapter"},
200 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
201 "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
202 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
203 "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
204 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
205 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP10 */
206 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
207 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP11 */
208 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
209 "NVIDIA nForce 430 MCP12 Networking Adapter"},
210 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
211 "NVIDIA nForce 430 MCP13 Networking Adapter"},
212 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
213 "NVIDIA nForce MCP55 Networking Adapter"},
214 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
215 "NVIDIA nForce MCP55 Networking Adapter"},
216 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
217 "NVIDIA nForce MCP61 Networking Adapter"},
218 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
219 "NVIDIA nForce MCP61 Networking Adapter"},
220 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
221 "NVIDIA nForce MCP61 Networking Adapter"},
222 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
223 "NVIDIA nForce MCP61 Networking Adapter"},
224 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
225 "NVIDIA nForce MCP65 Networking Adapter"},
226 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
227 "NVIDIA nForce MCP65 Networking Adapter"},
228 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
229 "NVIDIA nForce MCP65 Networking Adapter"},
230 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
231 "NVIDIA nForce MCP65 Networking Adapter"},
232 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
233 "NVIDIA nForce MCP67 Networking Adapter"},
234 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
235 "NVIDIA nForce MCP67 Networking Adapter"},
236 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
237 "NVIDIA nForce MCP67 Networking Adapter"},
238 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
239 "NVIDIA nForce MCP67 Networking Adapter"},
240 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1,
241 "NVIDIA nForce MCP73 Networking Adapter"},
242 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2,
243 "NVIDIA nForce MCP73 Networking Adapter"},
244 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3,
245 "NVIDIA nForce MCP73 Networking Adapter"},
246 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4,
247 "NVIDIA nForce MCP73 Networking Adapter"},
248 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1,
249 "NVIDIA nForce MCP77 Networking Adapter"},
250 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2,
251 "NVIDIA nForce MCP77 Networking Adapter"},
252 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3,
253 "NVIDIA nForce MCP77 Networking Adapter"},
254 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4,
255 "NVIDIA nForce MCP77 Networking Adapter"},
256 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1,
257 "NVIDIA nForce MCP79 Networking Adapter"},
258 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2,
259 "NVIDIA nForce MCP79 Networking Adapter"},
260 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3,
261 "NVIDIA nForce MCP79 Networking Adapter"},
262 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4,
263 "NVIDIA nForce MCP79 Networking Adapter"},
264 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP89_LAN,
265 "NVIDIA nForce MCP89 Networking Adapter"},
266 {0, 0, NULL}
267 };
268
269 /* Probe for supported hardware ID's */
270 static int
nfe_probe(device_t dev)271 nfe_probe(device_t dev)
272 {
273 struct nfe_type *t;
274
275 t = nfe_devs;
276 /* Check for matching PCI DEVICE ID's */
277 while (t->name != NULL) {
278 if ((pci_get_vendor(dev) == t->vid_id) &&
279 (pci_get_device(dev) == t->dev_id)) {
280 device_set_desc(dev, t->name);
281 return (BUS_PROBE_DEFAULT);
282 }
283 t++;
284 }
285
286 return (ENXIO);
287 }
288
289 static void
nfe_alloc_msix(struct nfe_softc * sc,int count)290 nfe_alloc_msix(struct nfe_softc *sc, int count)
291 {
292 int rid;
293
294 rid = PCIR_BAR(2);
295 sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY,
296 &rid, RF_ACTIVE);
297 if (sc->nfe_msix_res == NULL) {
298 device_printf(sc->nfe_dev,
299 "couldn't allocate MSIX table resource\n");
300 return;
301 }
302 rid = PCIR_BAR(3);
303 sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev,
304 SYS_RES_MEMORY, &rid, RF_ACTIVE);
305 if (sc->nfe_msix_pba_res == NULL) {
306 device_printf(sc->nfe_dev,
307 "couldn't allocate MSIX PBA resource\n");
308 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2),
309 sc->nfe_msix_res);
310 sc->nfe_msix_res = NULL;
311 return;
312 }
313
314 if (pci_alloc_msix(sc->nfe_dev, &count) == 0) {
315 if (count == NFE_MSI_MESSAGES) {
316 if (bootverbose)
317 device_printf(sc->nfe_dev,
318 "Using %d MSIX messages\n", count);
319 sc->nfe_msix = 1;
320 } else {
321 if (bootverbose)
322 device_printf(sc->nfe_dev,
323 "couldn't allocate MSIX\n");
324 pci_release_msi(sc->nfe_dev);
325 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
326 PCIR_BAR(3), sc->nfe_msix_pba_res);
327 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
328 PCIR_BAR(2), sc->nfe_msix_res);
329 sc->nfe_msix_pba_res = NULL;
330 sc->nfe_msix_res = NULL;
331 }
332 }
333 }
334
335 static int
nfe_detect_msik9(struct nfe_softc * sc)336 nfe_detect_msik9(struct nfe_softc *sc)
337 {
338 static const char *maker = "MSI";
339 static const char *product = "K9N6PGM2-V2 (MS-7309)";
340 char *m, *p;
341 int found;
342
343 found = 0;
344 m = kern_getenv("smbios.planar.maker");
345 p = kern_getenv("smbios.planar.product");
346 if (m != NULL && p != NULL) {
347 if (strcmp(m, maker) == 0 && strcmp(p, product) == 0)
348 found = 1;
349 }
350 if (m != NULL)
351 freeenv(m);
352 if (p != NULL)
353 freeenv(p);
354
355 return (found);
356 }
357
358 static int
nfe_attach(device_t dev)359 nfe_attach(device_t dev)
360 {
361 struct nfe_softc *sc;
362 if_t ifp;
363 bus_addr_t dma_addr_max;
364 int error = 0, i, msic, phyloc, reg, rid;
365
366 sc = device_get_softc(dev);
367 sc->nfe_dev = dev;
368
369 mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
370 MTX_DEF);
371 callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
372
373 pci_enable_busmaster(dev);
374
375 rid = PCIR_BAR(0);
376 sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
377 RF_ACTIVE);
378 if (sc->nfe_res[0] == NULL) {
379 device_printf(dev, "couldn't map memory resources\n");
380 mtx_destroy(&sc->nfe_mtx);
381 return (ENXIO);
382 }
383
384 if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) {
385 uint16_t v, width;
386
387 v = pci_read_config(dev, reg + 0x08, 2);
388 /* Change max. read request size to 4096. */
389 v &= ~(7 << 12);
390 v |= (5 << 12);
391 pci_write_config(dev, reg + 0x08, v, 2);
392
393 v = pci_read_config(dev, reg + 0x0c, 2);
394 /* link capability */
395 v = (v >> 4) & 0x0f;
396 width = pci_read_config(dev, reg + 0x12, 2);
397 /* negotiated link width */
398 width = (width >> 4) & 0x3f;
399 if (v != width)
400 device_printf(sc->nfe_dev,
401 "warning, negotiated width of link(x%d) != "
402 "max. width of link(x%d)\n", width, v);
403 }
404
405 if (nfe_can_use_msix(sc) == 0) {
406 device_printf(sc->nfe_dev,
407 "MSI/MSI-X capability black-listed, will use INTx\n");
408 msix_disable = 1;
409 msi_disable = 1;
410 }
411
412 /* Allocate interrupt */
413 if (msix_disable == 0 || msi_disable == 0) {
414 if (msix_disable == 0 &&
415 (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES)
416 nfe_alloc_msix(sc, msic);
417 if (msi_disable == 0 && sc->nfe_msix == 0 &&
418 (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES &&
419 pci_alloc_msi(dev, &msic) == 0) {
420 if (msic == NFE_MSI_MESSAGES) {
421 if (bootverbose)
422 device_printf(dev,
423 "Using %d MSI messages\n", msic);
424 sc->nfe_msi = 1;
425 } else
426 pci_release_msi(dev);
427 }
428 }
429
430 if (sc->nfe_msix == 0 && sc->nfe_msi == 0) {
431 rid = 0;
432 sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
433 RF_SHAREABLE | RF_ACTIVE);
434 if (sc->nfe_irq[0] == NULL) {
435 device_printf(dev, "couldn't allocate IRQ resources\n");
436 error = ENXIO;
437 goto fail;
438 }
439 } else {
440 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
441 sc->nfe_irq[i] = bus_alloc_resource_any(dev,
442 SYS_RES_IRQ, &rid, RF_ACTIVE);
443 if (sc->nfe_irq[i] == NULL) {
444 device_printf(dev,
445 "couldn't allocate IRQ resources for "
446 "message %d\n", rid);
447 error = ENXIO;
448 goto fail;
449 }
450 }
451 /* Map interrupts to vector 0. */
452 if (sc->nfe_msix != 0) {
453 NFE_WRITE(sc, NFE_MSIX_MAP0, 0);
454 NFE_WRITE(sc, NFE_MSIX_MAP1, 0);
455 } else if (sc->nfe_msi != 0) {
456 NFE_WRITE(sc, NFE_MSI_MAP0, 0);
457 NFE_WRITE(sc, NFE_MSI_MAP1, 0);
458 }
459 }
460
461 /* Set IRQ status/mask register. */
462 sc->nfe_irq_status = NFE_IRQ_STATUS;
463 sc->nfe_irq_mask = NFE_IRQ_MASK;
464 sc->nfe_intrs = NFE_IRQ_WANTED;
465 sc->nfe_nointrs = 0;
466 if (sc->nfe_msix != 0) {
467 sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS;
468 sc->nfe_nointrs = NFE_IRQ_WANTED;
469 } else if (sc->nfe_msi != 0) {
470 sc->nfe_irq_mask = NFE_MSI_IRQ_MASK;
471 sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED;
472 }
473
474 sc->nfe_devid = pci_get_device(dev);
475 sc->nfe_revid = pci_get_revid(dev);
476 sc->nfe_flags = 0;
477
478 switch (sc->nfe_devid) {
479 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
480 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
481 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
482 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
483 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
484 break;
485 case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
486 case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
487 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_MIB_V1;
488 break;
489 case PCI_PRODUCT_NVIDIA_CK804_LAN1:
490 case PCI_PRODUCT_NVIDIA_CK804_LAN2:
491 case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
492 case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
493 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
494 NFE_MIB_V1;
495 break;
496 case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
497 case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
498 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
499 NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
500 break;
501
502 case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
503 case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
504 case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
505 case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
506 case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
507 case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
508 case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
509 case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
510 case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
511 case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
512 case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
513 case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
514 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT |
515 NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
516 break;
517 case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
518 case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
519 case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
520 case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
521 /* XXX flow control */
522 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT |
523 NFE_CORRECT_MACADDR | NFE_MIB_V3;
524 break;
525 case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
526 case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
527 case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
528 case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
529 case PCI_PRODUCT_NVIDIA_MCP89_LAN:
530 /* XXX flow control */
531 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
532 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3;
533 break;
534 case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
535 case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
536 case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
537 case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
538 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
539 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL |
540 NFE_MIB_V2;
541 break;
542 }
543
544 nfe_power(sc);
545 /* Check for reversed ethernet address */
546 if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0)
547 sc->nfe_flags |= NFE_CORRECT_MACADDR;
548 nfe_get_macaddr(sc, sc->eaddr);
549 /*
550 * Allocate the parent bus DMA tag appropriate for PCI.
551 */
552 dma_addr_max = BUS_SPACE_MAXADDR_32BIT;
553 if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0)
554 dma_addr_max = NFE_DMA_MAXADDR;
555 error = bus_dma_tag_create(
556 bus_get_dma_tag(sc->nfe_dev), /* parent */
557 1, 0, /* alignment, boundary */
558 dma_addr_max, /* lowaddr */
559 BUS_SPACE_MAXADDR, /* highaddr */
560 NULL, NULL, /* filter, filterarg */
561 BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */
562 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
563 0, /* flags */
564 NULL, NULL, /* lockfunc, lockarg */
565 &sc->nfe_parent_tag);
566 if (error)
567 goto fail;
568
569 ifp = sc->nfe_ifp = if_gethandle(IFT_ETHER);
570
571 /*
572 * Allocate Tx and Rx rings.
573 */
574 if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0)
575 goto fail;
576
577 if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0)
578 goto fail;
579
580 nfe_alloc_jrx_ring(sc, &sc->jrxq);
581 /* Create sysctl node. */
582 nfe_sysctl_node(sc);
583
584 if_setsoftc(ifp, sc);
585 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
586 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
587 if_setioctlfn(ifp, nfe_ioctl);
588 if_setstartfn(ifp, nfe_start);
589 if_sethwassist(ifp, 0);
590 if_setcapabilities(ifp, 0);
591 if_setinitfn(ifp, nfe_init);
592 if_setsendqlen(ifp, NFE_TX_RING_COUNT - 1);
593 if_setsendqready(ifp);
594
595 if (sc->nfe_flags & NFE_HW_CSUM) {
596 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_TSO4, 0);
597 if_sethwassistbits(ifp, NFE_CSUM_FEATURES | CSUM_TSO, 0);
598 }
599 if_setcapenable(ifp, if_getcapabilities(ifp));
600
601 sc->nfe_framesize = if_getmtu(ifp) + NFE_RX_HEADERS;
602 /* VLAN capability setup. */
603 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
604 if ((sc->nfe_flags & NFE_HW_VLAN) != 0) {
605 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0);
606 if ((if_getcapabilities(ifp) & IFCAP_HWCSUM) != 0)
607 if_setcapabilitiesbit(ifp,
608 (IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO), 0);
609 }
610
611 if (pci_find_cap(dev, PCIY_PMG, ®) == 0)
612 if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0);
613 if_setcapenable(ifp, if_getcapabilities(ifp));
614
615 /*
616 * Tell the upper layer(s) we support long frames.
617 * Must appear after the call to ether_ifattach() because
618 * ether_ifattach() sets ifi_hdrlen to the default value.
619 */
620 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
621
622 #ifdef DEVICE_POLLING
623 if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
624 #endif
625
626 /* Do MII setup */
627 phyloc = MII_PHY_ANY;
628 if (sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN1 ||
629 sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN2 ||
630 sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN3 ||
631 sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN4) {
632 if (nfe_detect_msik9(sc) != 0)
633 phyloc = 0;
634 }
635 error = mii_attach(dev, &sc->nfe_miibus, ifp,
636 (ifm_change_cb_t)nfe_ifmedia_upd, (ifm_stat_cb_t)nfe_ifmedia_sts,
637 BMSR_DEFCAPMASK, phyloc, MII_OFFSET_ANY, MIIF_DOPAUSE);
638 if (error != 0) {
639 device_printf(dev, "attaching PHYs failed\n");
640 goto fail;
641 }
642 ether_ifattach(ifp, sc->eaddr);
643
644 NET_TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc);
645 sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK,
646 taskqueue_thread_enqueue, &sc->nfe_tq);
647 taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq",
648 device_get_nameunit(sc->nfe_dev));
649 error = 0;
650 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
651 error = bus_setup_intr(dev, sc->nfe_irq[0],
652 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
653 &sc->nfe_intrhand[0]);
654 } else {
655 for (i = 0; i < NFE_MSI_MESSAGES; i++) {
656 error = bus_setup_intr(dev, sc->nfe_irq[i],
657 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
658 &sc->nfe_intrhand[i]);
659 if (error != 0)
660 break;
661 }
662 }
663 if (error) {
664 device_printf(dev, "couldn't set up irq\n");
665 taskqueue_free(sc->nfe_tq);
666 sc->nfe_tq = NULL;
667 ether_ifdetach(ifp);
668 goto fail;
669 }
670
671 fail:
672 if (error)
673 nfe_detach(dev);
674
675 return (error);
676 }
677
678 static int
nfe_detach(device_t dev)679 nfe_detach(device_t dev)
680 {
681 struct nfe_softc *sc;
682 if_t ifp;
683 uint8_t eaddr[ETHER_ADDR_LEN];
684 int i, rid;
685
686 sc = device_get_softc(dev);
687 KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
688 ifp = sc->nfe_ifp;
689
690 #ifdef DEVICE_POLLING
691 if (ifp != NULL && if_getcapenable(ifp) & IFCAP_POLLING)
692 ether_poll_deregister(ifp);
693 #endif
694 if (device_is_attached(dev)) {
695 NFE_LOCK(sc);
696 nfe_stop(ifp);
697 if_setflagbits(ifp, 0, IFF_UP);
698 NFE_UNLOCK(sc);
699 callout_drain(&sc->nfe_stat_ch);
700 ether_ifdetach(ifp);
701 }
702
703 if (ifp) {
704 /* restore ethernet address */
705 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
706 for (i = 0; i < ETHER_ADDR_LEN; i++) {
707 eaddr[i] = sc->eaddr[5 - i];
708 }
709 } else
710 bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN);
711 nfe_set_macaddr(sc, eaddr);
712 if_free(ifp);
713 }
714 bus_generic_detach(dev);
715 if (sc->nfe_tq != NULL) {
716 taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task);
717 taskqueue_free(sc->nfe_tq);
718 sc->nfe_tq = NULL;
719 }
720
721 for (i = 0; i < NFE_MSI_MESSAGES; i++) {
722 if (sc->nfe_intrhand[i] != NULL) {
723 bus_teardown_intr(dev, sc->nfe_irq[i],
724 sc->nfe_intrhand[i]);
725 sc->nfe_intrhand[i] = NULL;
726 }
727 }
728
729 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
730 if (sc->nfe_irq[0] != NULL)
731 bus_release_resource(dev, SYS_RES_IRQ, 0,
732 sc->nfe_irq[0]);
733 } else {
734 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
735 if (sc->nfe_irq[i] != NULL) {
736 bus_release_resource(dev, SYS_RES_IRQ, rid,
737 sc->nfe_irq[i]);
738 sc->nfe_irq[i] = NULL;
739 }
740 }
741 pci_release_msi(dev);
742 }
743 if (sc->nfe_msix_pba_res != NULL) {
744 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3),
745 sc->nfe_msix_pba_res);
746 sc->nfe_msix_pba_res = NULL;
747 }
748 if (sc->nfe_msix_res != NULL) {
749 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2),
750 sc->nfe_msix_res);
751 sc->nfe_msix_res = NULL;
752 }
753 if (sc->nfe_res[0] != NULL) {
754 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
755 sc->nfe_res[0]);
756 sc->nfe_res[0] = NULL;
757 }
758
759 nfe_free_tx_ring(sc, &sc->txq);
760 nfe_free_rx_ring(sc, &sc->rxq);
761 nfe_free_jrx_ring(sc, &sc->jrxq);
762
763 if (sc->nfe_parent_tag) {
764 bus_dma_tag_destroy(sc->nfe_parent_tag);
765 sc->nfe_parent_tag = NULL;
766 }
767
768 mtx_destroy(&sc->nfe_mtx);
769
770 return (0);
771 }
772
773 static int
nfe_suspend(device_t dev)774 nfe_suspend(device_t dev)
775 {
776 struct nfe_softc *sc;
777
778 sc = device_get_softc(dev);
779
780 NFE_LOCK(sc);
781 nfe_stop(sc->nfe_ifp);
782 nfe_set_wol(sc);
783 sc->nfe_suspended = 1;
784 NFE_UNLOCK(sc);
785
786 return (0);
787 }
788
789 static int
nfe_resume(device_t dev)790 nfe_resume(device_t dev)
791 {
792 struct nfe_softc *sc;
793 if_t ifp;
794
795 sc = device_get_softc(dev);
796
797 NFE_LOCK(sc);
798 nfe_power(sc);
799 ifp = sc->nfe_ifp;
800 if (if_getflags(ifp) & IFF_UP)
801 nfe_init_locked(sc);
802 sc->nfe_suspended = 0;
803 NFE_UNLOCK(sc);
804
805 return (0);
806 }
807
808 static int
nfe_can_use_msix(struct nfe_softc * sc)809 nfe_can_use_msix(struct nfe_softc *sc)
810 {
811 static struct msix_blacklist {
812 char *maker;
813 char *product;
814 } msix_blacklists[] = {
815 { "ASUSTeK Computer INC.", "P5N32-SLI PREMIUM" }
816 };
817
818 struct msix_blacklist *mblp;
819 char *maker, *product;
820 int count, n, use_msix;
821
822 /*
823 * Search base board manufacturer and product name table
824 * to see this system has a known MSI/MSI-X issue.
825 */
826 maker = kern_getenv("smbios.planar.maker");
827 product = kern_getenv("smbios.planar.product");
828 use_msix = 1;
829 if (maker != NULL && product != NULL) {
830 count = nitems(msix_blacklists);
831 mblp = msix_blacklists;
832 for (n = 0; n < count; n++) {
833 if (strcmp(maker, mblp->maker) == 0 &&
834 strcmp(product, mblp->product) == 0) {
835 use_msix = 0;
836 break;
837 }
838 mblp++;
839 }
840 }
841 if (maker != NULL)
842 freeenv(maker);
843 if (product != NULL)
844 freeenv(product);
845
846 return (use_msix);
847 }
848
849 /* Take PHY/NIC out of powerdown, from Linux */
850 static void
nfe_power(struct nfe_softc * sc)851 nfe_power(struct nfe_softc *sc)
852 {
853 uint32_t pwr;
854
855 if ((sc->nfe_flags & NFE_PWR_MGMT) == 0)
856 return;
857 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
858 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
859 DELAY(100);
860 NFE_WRITE(sc, NFE_MAC_RESET, 0);
861 DELAY(100);
862 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
863 pwr = NFE_READ(sc, NFE_PWR2_CTL);
864 pwr &= ~NFE_PWR2_WAKEUP_MASK;
865 if (sc->nfe_revid >= 0xa3 &&
866 (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 ||
867 sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2))
868 pwr |= NFE_PWR2_REVA3;
869 NFE_WRITE(sc, NFE_PWR2_CTL, pwr);
870 }
871
872 static void
nfe_miibus_statchg(device_t dev)873 nfe_miibus_statchg(device_t dev)
874 {
875 struct nfe_softc *sc;
876 struct mii_data *mii;
877 if_t ifp;
878 uint32_t rxctl, txctl;
879
880 sc = device_get_softc(dev);
881
882 mii = device_get_softc(sc->nfe_miibus);
883 ifp = sc->nfe_ifp;
884
885 sc->nfe_link = 0;
886 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
887 (IFM_ACTIVE | IFM_AVALID)) {
888 switch (IFM_SUBTYPE(mii->mii_media_active)) {
889 case IFM_10_T:
890 case IFM_100_TX:
891 case IFM_1000_T:
892 sc->nfe_link = 1;
893 break;
894 default:
895 break;
896 }
897 }
898
899 nfe_mac_config(sc, mii);
900 txctl = NFE_READ(sc, NFE_TX_CTL);
901 rxctl = NFE_READ(sc, NFE_RX_CTL);
902 if (sc->nfe_link != 0 && (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
903 txctl |= NFE_TX_START;
904 rxctl |= NFE_RX_START;
905 } else {
906 txctl &= ~NFE_TX_START;
907 rxctl &= ~NFE_RX_START;
908 }
909 NFE_WRITE(sc, NFE_TX_CTL, txctl);
910 NFE_WRITE(sc, NFE_RX_CTL, rxctl);
911 }
912
913 static void
nfe_mac_config(struct nfe_softc * sc,struct mii_data * mii)914 nfe_mac_config(struct nfe_softc *sc, struct mii_data *mii)
915 {
916 uint32_t link, misc, phy, seed;
917 uint32_t val;
918
919 NFE_LOCK_ASSERT(sc);
920
921 phy = NFE_READ(sc, NFE_PHY_IFACE);
922 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
923
924 seed = NFE_READ(sc, NFE_RNDSEED);
925 seed &= ~NFE_SEED_MASK;
926
927 misc = NFE_MISC1_MAGIC;
928 link = NFE_MEDIA_SET;
929
930 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) {
931 phy |= NFE_PHY_HDX; /* half-duplex */
932 misc |= NFE_MISC1_HDX;
933 }
934
935 switch (IFM_SUBTYPE(mii->mii_media_active)) {
936 case IFM_1000_T: /* full-duplex only */
937 link |= NFE_MEDIA_1000T;
938 seed |= NFE_SEED_1000T;
939 phy |= NFE_PHY_1000T;
940 break;
941 case IFM_100_TX:
942 link |= NFE_MEDIA_100TX;
943 seed |= NFE_SEED_100TX;
944 phy |= NFE_PHY_100TX;
945 break;
946 case IFM_10_T:
947 link |= NFE_MEDIA_10T;
948 seed |= NFE_SEED_10T;
949 break;
950 }
951
952 if ((phy & 0x10000000) != 0) {
953 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
954 val = NFE_R1_MAGIC_1000;
955 else
956 val = NFE_R1_MAGIC_10_100;
957 } else
958 val = NFE_R1_MAGIC_DEFAULT;
959 NFE_WRITE(sc, NFE_SETUP_R1, val);
960
961 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
962
963 NFE_WRITE(sc, NFE_PHY_IFACE, phy);
964 NFE_WRITE(sc, NFE_MISC1, misc);
965 NFE_WRITE(sc, NFE_LINKSPEED, link);
966
967 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
968 /* It seems all hardwares supports Rx pause frames. */
969 val = NFE_READ(sc, NFE_RXFILTER);
970 if ((IFM_OPTIONS(mii->mii_media_active) &
971 IFM_ETH_RXPAUSE) != 0)
972 val |= NFE_PFF_RX_PAUSE;
973 else
974 val &= ~NFE_PFF_RX_PAUSE;
975 NFE_WRITE(sc, NFE_RXFILTER, val);
976 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
977 val = NFE_READ(sc, NFE_MISC1);
978 if ((IFM_OPTIONS(mii->mii_media_active) &
979 IFM_ETH_TXPAUSE) != 0) {
980 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
981 NFE_TX_PAUSE_FRAME_ENABLE);
982 val |= NFE_MISC1_TX_PAUSE;
983 } else {
984 val &= ~NFE_MISC1_TX_PAUSE;
985 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
986 NFE_TX_PAUSE_FRAME_DISABLE);
987 }
988 NFE_WRITE(sc, NFE_MISC1, val);
989 }
990 } else {
991 /* disable rx/tx pause frames */
992 val = NFE_READ(sc, NFE_RXFILTER);
993 val &= ~NFE_PFF_RX_PAUSE;
994 NFE_WRITE(sc, NFE_RXFILTER, val);
995 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
996 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
997 NFE_TX_PAUSE_FRAME_DISABLE);
998 val = NFE_READ(sc, NFE_MISC1);
999 val &= ~NFE_MISC1_TX_PAUSE;
1000 NFE_WRITE(sc, NFE_MISC1, val);
1001 }
1002 }
1003 }
1004
1005 static int
nfe_miibus_readreg(device_t dev,int phy,int reg)1006 nfe_miibus_readreg(device_t dev, int phy, int reg)
1007 {
1008 struct nfe_softc *sc = device_get_softc(dev);
1009 uint32_t val;
1010 int ntries;
1011
1012 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1013
1014 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
1015 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
1016 DELAY(100);
1017 }
1018
1019 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
1020
1021 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
1022 DELAY(100);
1023 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
1024 break;
1025 }
1026 if (ntries == NFE_TIMEOUT) {
1027 DPRINTFN(sc, 2, "timeout waiting for PHY\n");
1028 return 0;
1029 }
1030
1031 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
1032 DPRINTFN(sc, 2, "could not read PHY\n");
1033 return 0;
1034 }
1035
1036 val = NFE_READ(sc, NFE_PHY_DATA);
1037 if (val != 0xffffffff && val != 0)
1038 sc->mii_phyaddr = phy;
1039
1040 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
1041
1042 return (val);
1043 }
1044
1045 static int
nfe_miibus_writereg(device_t dev,int phy,int reg,int val)1046 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
1047 {
1048 struct nfe_softc *sc = device_get_softc(dev);
1049 uint32_t ctl;
1050 int ntries;
1051
1052 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1053
1054 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
1055 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
1056 DELAY(100);
1057 }
1058
1059 NFE_WRITE(sc, NFE_PHY_DATA, val);
1060 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
1061 NFE_WRITE(sc, NFE_PHY_CTL, ctl);
1062
1063 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
1064 DELAY(100);
1065 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
1066 break;
1067 }
1068 #ifdef NFE_DEBUG
1069 if (nfedebug >= 2 && ntries == NFE_TIMEOUT)
1070 device_printf(sc->nfe_dev, "could not write to PHY\n");
1071 #endif
1072 return (0);
1073 }
1074
1075 struct nfe_dmamap_arg {
1076 bus_addr_t nfe_busaddr;
1077 };
1078
1079 static int
nfe_alloc_rx_ring(struct nfe_softc * sc,struct nfe_rx_ring * ring)1080 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1081 {
1082 struct nfe_dmamap_arg ctx;
1083 struct nfe_rx_data *data;
1084 void *desc;
1085 int i, error, descsize;
1086
1087 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1088 desc = ring->desc64;
1089 descsize = sizeof (struct nfe_desc64);
1090 } else {
1091 desc = ring->desc32;
1092 descsize = sizeof (struct nfe_desc32);
1093 }
1094
1095 ring->cur = ring->next = 0;
1096
1097 error = bus_dma_tag_create(sc->nfe_parent_tag,
1098 NFE_RING_ALIGN, 0, /* alignment, boundary */
1099 BUS_SPACE_MAXADDR, /* lowaddr */
1100 BUS_SPACE_MAXADDR, /* highaddr */
1101 NULL, NULL, /* filter, filterarg */
1102 NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
1103 NFE_RX_RING_COUNT * descsize, /* maxsegsize */
1104 0, /* flags */
1105 NULL, NULL, /* lockfunc, lockarg */
1106 &ring->rx_desc_tag);
1107 if (error != 0) {
1108 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1109 goto fail;
1110 }
1111
1112 /* allocate memory to desc */
1113 error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK |
1114 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map);
1115 if (error != 0) {
1116 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1117 goto fail;
1118 }
1119 if (sc->nfe_flags & NFE_40BIT_ADDR)
1120 ring->desc64 = desc;
1121 else
1122 ring->desc32 = desc;
1123
1124 /* map desc to device visible address space */
1125 ctx.nfe_busaddr = 0;
1126 error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc,
1127 NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1128 if (error != 0) {
1129 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1130 goto fail;
1131 }
1132 ring->physaddr = ctx.nfe_busaddr;
1133
1134 error = bus_dma_tag_create(sc->nfe_parent_tag,
1135 1, 0, /* alignment, boundary */
1136 BUS_SPACE_MAXADDR, /* lowaddr */
1137 BUS_SPACE_MAXADDR, /* highaddr */
1138 NULL, NULL, /* filter, filterarg */
1139 MCLBYTES, 1, /* maxsize, nsegments */
1140 MCLBYTES, /* maxsegsize */
1141 0, /* flags */
1142 NULL, NULL, /* lockfunc, lockarg */
1143 &ring->rx_data_tag);
1144 if (error != 0) {
1145 device_printf(sc->nfe_dev, "could not create Rx DMA tag\n");
1146 goto fail;
1147 }
1148
1149 error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map);
1150 if (error != 0) {
1151 device_printf(sc->nfe_dev,
1152 "could not create Rx DMA spare map\n");
1153 goto fail;
1154 }
1155
1156 /*
1157 * Pre-allocate Rx buffers and populate Rx ring.
1158 */
1159 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1160 data = &sc->rxq.data[i];
1161 data->rx_data_map = NULL;
1162 data->m = NULL;
1163 error = bus_dmamap_create(ring->rx_data_tag, 0,
1164 &data->rx_data_map);
1165 if (error != 0) {
1166 device_printf(sc->nfe_dev,
1167 "could not create Rx DMA map\n");
1168 goto fail;
1169 }
1170 }
1171
1172 fail:
1173 return (error);
1174 }
1175
1176 static void
nfe_alloc_jrx_ring(struct nfe_softc * sc,struct nfe_jrx_ring * ring)1177 nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1178 {
1179 struct nfe_dmamap_arg ctx;
1180 struct nfe_rx_data *data;
1181 void *desc;
1182 int i, error, descsize;
1183
1184 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1185 return;
1186 if (jumbo_disable != 0) {
1187 device_printf(sc->nfe_dev, "disabling jumbo frame support\n");
1188 sc->nfe_jumbo_disable = 1;
1189 return;
1190 }
1191
1192 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1193 desc = ring->jdesc64;
1194 descsize = sizeof (struct nfe_desc64);
1195 } else {
1196 desc = ring->jdesc32;
1197 descsize = sizeof (struct nfe_desc32);
1198 }
1199
1200 ring->jcur = ring->jnext = 0;
1201
1202 /* Create DMA tag for jumbo Rx ring. */
1203 error = bus_dma_tag_create(sc->nfe_parent_tag,
1204 NFE_RING_ALIGN, 0, /* alignment, boundary */
1205 BUS_SPACE_MAXADDR, /* lowaddr */
1206 BUS_SPACE_MAXADDR, /* highaddr */
1207 NULL, NULL, /* filter, filterarg */
1208 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsize */
1209 1, /* nsegments */
1210 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsegsize */
1211 0, /* flags */
1212 NULL, NULL, /* lockfunc, lockarg */
1213 &ring->jrx_desc_tag);
1214 if (error != 0) {
1215 device_printf(sc->nfe_dev,
1216 "could not create jumbo ring DMA tag\n");
1217 goto fail;
1218 }
1219
1220 /* Create DMA tag for jumbo Rx buffers. */
1221 error = bus_dma_tag_create(sc->nfe_parent_tag,
1222 1, 0, /* alignment, boundary */
1223 BUS_SPACE_MAXADDR, /* lowaddr */
1224 BUS_SPACE_MAXADDR, /* highaddr */
1225 NULL, NULL, /* filter, filterarg */
1226 MJUM9BYTES, /* maxsize */
1227 1, /* nsegments */
1228 MJUM9BYTES, /* maxsegsize */
1229 0, /* flags */
1230 NULL, NULL, /* lockfunc, lockarg */
1231 &ring->jrx_data_tag);
1232 if (error != 0) {
1233 device_printf(sc->nfe_dev,
1234 "could not create jumbo Rx buffer DMA tag\n");
1235 goto fail;
1236 }
1237
1238 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
1239 error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK |
1240 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map);
1241 if (error != 0) {
1242 device_printf(sc->nfe_dev,
1243 "could not allocate DMA'able memory for jumbo Rx ring\n");
1244 goto fail;
1245 }
1246 if (sc->nfe_flags & NFE_40BIT_ADDR)
1247 ring->jdesc64 = desc;
1248 else
1249 ring->jdesc32 = desc;
1250
1251 ctx.nfe_busaddr = 0;
1252 error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc,
1253 NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1254 if (error != 0) {
1255 device_printf(sc->nfe_dev,
1256 "could not load DMA'able memory for jumbo Rx ring\n");
1257 goto fail;
1258 }
1259 ring->jphysaddr = ctx.nfe_busaddr;
1260
1261 /* Create DMA maps for jumbo Rx buffers. */
1262 error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map);
1263 if (error != 0) {
1264 device_printf(sc->nfe_dev,
1265 "could not create jumbo Rx DMA spare map\n");
1266 goto fail;
1267 }
1268
1269 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1270 data = &sc->jrxq.jdata[i];
1271 data->rx_data_map = NULL;
1272 data->m = NULL;
1273 error = bus_dmamap_create(ring->jrx_data_tag, 0,
1274 &data->rx_data_map);
1275 if (error != 0) {
1276 device_printf(sc->nfe_dev,
1277 "could not create jumbo Rx DMA map\n");
1278 goto fail;
1279 }
1280 }
1281
1282 return;
1283
1284 fail:
1285 /*
1286 * Running without jumbo frame support is ok for most cases
1287 * so don't fail on creating dma tag/map for jumbo frame.
1288 */
1289 nfe_free_jrx_ring(sc, ring);
1290 device_printf(sc->nfe_dev, "disabling jumbo frame support due to "
1291 "resource shortage\n");
1292 sc->nfe_jumbo_disable = 1;
1293 }
1294
1295 static int
nfe_init_rx_ring(struct nfe_softc * sc,struct nfe_rx_ring * ring)1296 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1297 {
1298 void *desc;
1299 size_t descsize;
1300 int i;
1301
1302 ring->cur = ring->next = 0;
1303 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1304 desc = ring->desc64;
1305 descsize = sizeof (struct nfe_desc64);
1306 } else {
1307 desc = ring->desc32;
1308 descsize = sizeof (struct nfe_desc32);
1309 }
1310 bzero(desc, descsize * NFE_RX_RING_COUNT);
1311 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1312 if (nfe_newbuf(sc, i) != 0)
1313 return (ENOBUFS);
1314 }
1315
1316 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
1317 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1318
1319 return (0);
1320 }
1321
1322 static int
nfe_init_jrx_ring(struct nfe_softc * sc,struct nfe_jrx_ring * ring)1323 nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1324 {
1325 void *desc;
1326 size_t descsize;
1327 int i;
1328
1329 ring->jcur = ring->jnext = 0;
1330 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1331 desc = ring->jdesc64;
1332 descsize = sizeof (struct nfe_desc64);
1333 } else {
1334 desc = ring->jdesc32;
1335 descsize = sizeof (struct nfe_desc32);
1336 }
1337 bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT);
1338 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1339 if (nfe_jnewbuf(sc, i) != 0)
1340 return (ENOBUFS);
1341 }
1342
1343 bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map,
1344 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1345
1346 return (0);
1347 }
1348
1349 static void
nfe_free_rx_ring(struct nfe_softc * sc,struct nfe_rx_ring * ring)1350 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1351 {
1352 struct nfe_rx_data *data;
1353 void *desc;
1354 int i;
1355
1356 if (sc->nfe_flags & NFE_40BIT_ADDR)
1357 desc = ring->desc64;
1358 else
1359 desc = ring->desc32;
1360
1361 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1362 data = &ring->data[i];
1363 if (data->rx_data_map != NULL) {
1364 bus_dmamap_destroy(ring->rx_data_tag,
1365 data->rx_data_map);
1366 data->rx_data_map = NULL;
1367 }
1368 if (data->m != NULL) {
1369 m_freem(data->m);
1370 data->m = NULL;
1371 }
1372 }
1373 if (ring->rx_data_tag != NULL) {
1374 if (ring->rx_spare_map != NULL) {
1375 bus_dmamap_destroy(ring->rx_data_tag,
1376 ring->rx_spare_map);
1377 ring->rx_spare_map = NULL;
1378 }
1379 bus_dma_tag_destroy(ring->rx_data_tag);
1380 ring->rx_data_tag = NULL;
1381 }
1382
1383 if (desc != NULL) {
1384 bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
1385 bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
1386 ring->desc64 = NULL;
1387 ring->desc32 = NULL;
1388 }
1389 if (ring->rx_desc_tag != NULL) {
1390 bus_dma_tag_destroy(ring->rx_desc_tag);
1391 ring->rx_desc_tag = NULL;
1392 }
1393 }
1394
1395 static void
nfe_free_jrx_ring(struct nfe_softc * sc,struct nfe_jrx_ring * ring)1396 nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1397 {
1398 struct nfe_rx_data *data;
1399 void *desc;
1400 int i;
1401
1402 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1403 return;
1404
1405 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1406 desc = ring->jdesc64;
1407 } else {
1408 desc = ring->jdesc32;
1409 }
1410
1411 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1412 data = &ring->jdata[i];
1413 if (data->rx_data_map != NULL) {
1414 bus_dmamap_destroy(ring->jrx_data_tag,
1415 data->rx_data_map);
1416 data->rx_data_map = NULL;
1417 }
1418 if (data->m != NULL) {
1419 m_freem(data->m);
1420 data->m = NULL;
1421 }
1422 }
1423 if (ring->jrx_data_tag != NULL) {
1424 if (ring->jrx_spare_map != NULL) {
1425 bus_dmamap_destroy(ring->jrx_data_tag,
1426 ring->jrx_spare_map);
1427 ring->jrx_spare_map = NULL;
1428 }
1429 bus_dma_tag_destroy(ring->jrx_data_tag);
1430 ring->jrx_data_tag = NULL;
1431 }
1432
1433 if (desc != NULL) {
1434 bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map);
1435 bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map);
1436 ring->jdesc64 = NULL;
1437 ring->jdesc32 = NULL;
1438 }
1439
1440 if (ring->jrx_desc_tag != NULL) {
1441 bus_dma_tag_destroy(ring->jrx_desc_tag);
1442 ring->jrx_desc_tag = NULL;
1443 }
1444 }
1445
1446 static int
nfe_alloc_tx_ring(struct nfe_softc * sc,struct nfe_tx_ring * ring)1447 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1448 {
1449 struct nfe_dmamap_arg ctx;
1450 int i, error;
1451 void *desc;
1452 int descsize;
1453
1454 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1455 desc = ring->desc64;
1456 descsize = sizeof (struct nfe_desc64);
1457 } else {
1458 desc = ring->desc32;
1459 descsize = sizeof (struct nfe_desc32);
1460 }
1461
1462 ring->queued = 0;
1463 ring->cur = ring->next = 0;
1464
1465 error = bus_dma_tag_create(sc->nfe_parent_tag,
1466 NFE_RING_ALIGN, 0, /* alignment, boundary */
1467 BUS_SPACE_MAXADDR, /* lowaddr */
1468 BUS_SPACE_MAXADDR, /* highaddr */
1469 NULL, NULL, /* filter, filterarg */
1470 NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
1471 NFE_TX_RING_COUNT * descsize, /* maxsegsize */
1472 0, /* flags */
1473 NULL, NULL, /* lockfunc, lockarg */
1474 &ring->tx_desc_tag);
1475 if (error != 0) {
1476 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1477 goto fail;
1478 }
1479
1480 error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK |
1481 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map);
1482 if (error != 0) {
1483 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1484 goto fail;
1485 }
1486 if (sc->nfe_flags & NFE_40BIT_ADDR)
1487 ring->desc64 = desc;
1488 else
1489 ring->desc32 = desc;
1490
1491 ctx.nfe_busaddr = 0;
1492 error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc,
1493 NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1494 if (error != 0) {
1495 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1496 goto fail;
1497 }
1498 ring->physaddr = ctx.nfe_busaddr;
1499
1500 error = bus_dma_tag_create(sc->nfe_parent_tag,
1501 1, 0,
1502 BUS_SPACE_MAXADDR,
1503 BUS_SPACE_MAXADDR,
1504 NULL, NULL,
1505 NFE_TSO_MAXSIZE,
1506 NFE_MAX_SCATTER,
1507 NFE_TSO_MAXSGSIZE,
1508 0,
1509 NULL, NULL,
1510 &ring->tx_data_tag);
1511 if (error != 0) {
1512 device_printf(sc->nfe_dev, "could not create Tx DMA tag\n");
1513 goto fail;
1514 }
1515
1516 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1517 error = bus_dmamap_create(ring->tx_data_tag, 0,
1518 &ring->data[i].tx_data_map);
1519 if (error != 0) {
1520 device_printf(sc->nfe_dev,
1521 "could not create Tx DMA map\n");
1522 goto fail;
1523 }
1524 }
1525
1526 fail:
1527 return (error);
1528 }
1529
1530 static void
nfe_init_tx_ring(struct nfe_softc * sc,struct nfe_tx_ring * ring)1531 nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1532 {
1533 void *desc;
1534 size_t descsize;
1535
1536 sc->nfe_force_tx = 0;
1537 ring->queued = 0;
1538 ring->cur = ring->next = 0;
1539 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1540 desc = ring->desc64;
1541 descsize = sizeof (struct nfe_desc64);
1542 } else {
1543 desc = ring->desc32;
1544 descsize = sizeof (struct nfe_desc32);
1545 }
1546 bzero(desc, descsize * NFE_TX_RING_COUNT);
1547
1548 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1549 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1550 }
1551
1552 static void
nfe_free_tx_ring(struct nfe_softc * sc,struct nfe_tx_ring * ring)1553 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1554 {
1555 struct nfe_tx_data *data;
1556 void *desc;
1557 int i;
1558
1559 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1560 desc = ring->desc64;
1561 } else {
1562 desc = ring->desc32;
1563 }
1564
1565 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1566 data = &ring->data[i];
1567
1568 if (data->m != NULL) {
1569 bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map,
1570 BUS_DMASYNC_POSTWRITE);
1571 bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map);
1572 m_freem(data->m);
1573 data->m = NULL;
1574 }
1575 if (data->tx_data_map != NULL) {
1576 bus_dmamap_destroy(ring->tx_data_tag,
1577 data->tx_data_map);
1578 data->tx_data_map = NULL;
1579 }
1580 }
1581
1582 if (ring->tx_data_tag != NULL) {
1583 bus_dma_tag_destroy(ring->tx_data_tag);
1584 ring->tx_data_tag = NULL;
1585 }
1586
1587 if (desc != NULL) {
1588 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1589 BUS_DMASYNC_POSTWRITE);
1590 bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
1591 bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
1592 ring->desc64 = NULL;
1593 ring->desc32 = NULL;
1594 bus_dma_tag_destroy(ring->tx_desc_tag);
1595 ring->tx_desc_tag = NULL;
1596 }
1597 }
1598
1599 #ifdef DEVICE_POLLING
1600 static poll_handler_t nfe_poll;
1601
1602 static int
nfe_poll(if_t ifp,enum poll_cmd cmd,int count)1603 nfe_poll(if_t ifp, enum poll_cmd cmd, int count)
1604 {
1605 struct nfe_softc *sc = if_getsoftc(ifp);
1606 uint32_t r;
1607 int rx_npkts = 0;
1608
1609 NFE_LOCK(sc);
1610
1611 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
1612 NFE_UNLOCK(sc);
1613 return (rx_npkts);
1614 }
1615
1616 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1617 rx_npkts = nfe_jrxeof(sc, count, &rx_npkts);
1618 else
1619 rx_npkts = nfe_rxeof(sc, count, &rx_npkts);
1620 nfe_txeof(sc);
1621 if (!if_sendq_empty(ifp))
1622 nfe_start_locked(ifp);
1623
1624 if (cmd == POLL_AND_CHECK_STATUS) {
1625 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1626 NFE_UNLOCK(sc);
1627 return (rx_npkts);
1628 }
1629 NFE_WRITE(sc, sc->nfe_irq_status, r);
1630
1631 if (r & NFE_IRQ_LINK) {
1632 NFE_READ(sc, NFE_PHY_STATUS);
1633 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1634 DPRINTF(sc, "link state changed\n");
1635 }
1636 }
1637 NFE_UNLOCK(sc);
1638 return (rx_npkts);
1639 }
1640 #endif /* DEVICE_POLLING */
1641
1642 static void
nfe_set_intr(struct nfe_softc * sc)1643 nfe_set_intr(struct nfe_softc *sc)
1644 {
1645
1646 if (sc->nfe_msi != 0)
1647 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1648 }
1649
1650 /* In MSIX, a write to mask reegisters behaves as XOR. */
1651 static __inline void
nfe_enable_intr(struct nfe_softc * sc)1652 nfe_enable_intr(struct nfe_softc *sc)
1653 {
1654
1655 if (sc->nfe_msix != 0) {
1656 /* XXX Should have a better way to enable interrupts! */
1657 if (NFE_READ(sc, sc->nfe_irq_mask) == 0)
1658 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1659 } else
1660 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1661 }
1662
1663 static __inline void
nfe_disable_intr(struct nfe_softc * sc)1664 nfe_disable_intr(struct nfe_softc *sc)
1665 {
1666
1667 if (sc->nfe_msix != 0) {
1668 /* XXX Should have a better way to disable interrupts! */
1669 if (NFE_READ(sc, sc->nfe_irq_mask) != 0)
1670 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1671 } else
1672 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1673 }
1674
1675 static int
nfe_ioctl(if_t ifp,u_long cmd,caddr_t data)1676 nfe_ioctl(if_t ifp, u_long cmd, caddr_t data)
1677 {
1678 struct nfe_softc *sc;
1679 struct ifreq *ifr;
1680 struct mii_data *mii;
1681 int error, init, mask;
1682
1683 sc = if_getsoftc(ifp);
1684 ifr = (struct ifreq *) data;
1685 error = 0;
1686 init = 0;
1687 switch (cmd) {
1688 case SIOCSIFMTU:
1689 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU)
1690 error = EINVAL;
1691 else if (if_getmtu(ifp) != ifr->ifr_mtu) {
1692 if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) ||
1693 (sc->nfe_jumbo_disable != 0)) &&
1694 ifr->ifr_mtu > ETHERMTU)
1695 error = EINVAL;
1696 else {
1697 NFE_LOCK(sc);
1698 if_setmtu(ifp, ifr->ifr_mtu);
1699 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1700 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1701 nfe_init_locked(sc);
1702 }
1703 NFE_UNLOCK(sc);
1704 }
1705 }
1706 break;
1707 case SIOCSIFFLAGS:
1708 NFE_LOCK(sc);
1709 if (if_getflags(ifp) & IFF_UP) {
1710 /*
1711 * If only the PROMISC or ALLMULTI flag changes, then
1712 * don't do a full re-init of the chip, just update
1713 * the Rx filter.
1714 */
1715 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) &&
1716 ((if_getflags(ifp) ^ sc->nfe_if_flags) &
1717 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1718 nfe_setmulti(sc);
1719 else
1720 nfe_init_locked(sc);
1721 } else {
1722 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1723 nfe_stop(ifp);
1724 }
1725 sc->nfe_if_flags = if_getflags(ifp);
1726 NFE_UNLOCK(sc);
1727 error = 0;
1728 break;
1729 case SIOCADDMULTI:
1730 case SIOCDELMULTI:
1731 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1732 NFE_LOCK(sc);
1733 nfe_setmulti(sc);
1734 NFE_UNLOCK(sc);
1735 error = 0;
1736 }
1737 break;
1738 case SIOCSIFMEDIA:
1739 case SIOCGIFMEDIA:
1740 mii = device_get_softc(sc->nfe_miibus);
1741 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1742 break;
1743 case SIOCSIFCAP:
1744 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
1745 #ifdef DEVICE_POLLING
1746 if ((mask & IFCAP_POLLING) != 0) {
1747 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1748 error = ether_poll_register(nfe_poll, ifp);
1749 if (error)
1750 break;
1751 NFE_LOCK(sc);
1752 nfe_disable_intr(sc);
1753 if_setcapenablebit(ifp, IFCAP_POLLING, 0);
1754 NFE_UNLOCK(sc);
1755 } else {
1756 error = ether_poll_deregister(ifp);
1757 /* Enable interrupt even in error case */
1758 NFE_LOCK(sc);
1759 nfe_enable_intr(sc);
1760 if_setcapenablebit(ifp, 0, IFCAP_POLLING);
1761 NFE_UNLOCK(sc);
1762 }
1763 }
1764 #endif /* DEVICE_POLLING */
1765 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1766 (if_getcapabilities(ifp) & IFCAP_WOL_MAGIC) != 0)
1767 if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
1768 if ((mask & IFCAP_TXCSUM) != 0 &&
1769 (if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) {
1770 if_togglecapenable(ifp, IFCAP_TXCSUM);
1771 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
1772 if_sethwassistbits(ifp, NFE_CSUM_FEATURES, 0);
1773 else
1774 if_sethwassistbits(ifp, 0, NFE_CSUM_FEATURES);
1775 }
1776 if ((mask & IFCAP_RXCSUM) != 0 &&
1777 (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) {
1778 if_togglecapenable(ifp, IFCAP_RXCSUM);
1779 init++;
1780 }
1781 if ((mask & IFCAP_TSO4) != 0 &&
1782 (if_getcapabilities(ifp) & IFCAP_TSO4) != 0) {
1783 if_togglecapenable(ifp, IFCAP_TSO4);
1784 if ((IFCAP_TSO4 & if_getcapenable(ifp)) != 0)
1785 if_sethwassistbits(ifp, CSUM_TSO, 0);
1786 else
1787 if_sethwassistbits(ifp, 0, CSUM_TSO);
1788 }
1789 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
1790 (if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0)
1791 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
1792 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1793 (if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
1794 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
1795 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0)
1796 if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWTSO);
1797 init++;
1798 }
1799 /*
1800 * XXX
1801 * It seems that VLAN stripping requires Rx checksum offload.
1802 * Unfortunately FreeBSD has no way to disable only Rx side
1803 * VLAN stripping. So when we know Rx checksum offload is
1804 * disabled turn entire hardware VLAN assist off.
1805 */
1806 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) == 0) {
1807 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
1808 init++;
1809 if_setcapenablebit(ifp, 0,
1810 (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO));
1811 }
1812 if (init > 0 && (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1813 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1814 nfe_init(sc);
1815 }
1816 if_vlancap(ifp);
1817 break;
1818 default:
1819 error = ether_ioctl(ifp, cmd, data);
1820 break;
1821 }
1822
1823 return (error);
1824 }
1825
1826 static int
nfe_intr(void * arg)1827 nfe_intr(void *arg)
1828 {
1829 struct nfe_softc *sc;
1830 uint32_t status;
1831
1832 sc = (struct nfe_softc *)arg;
1833
1834 status = NFE_READ(sc, sc->nfe_irq_status);
1835 if (status == 0 || status == 0xffffffff)
1836 return (FILTER_STRAY);
1837 nfe_disable_intr(sc);
1838 taskqueue_enqueue(sc->nfe_tq, &sc->nfe_int_task);
1839
1840 return (FILTER_HANDLED);
1841 }
1842
1843 static void
nfe_int_task(void * arg,int pending)1844 nfe_int_task(void *arg, int pending)
1845 {
1846 struct nfe_softc *sc = arg;
1847 if_t ifp = sc->nfe_ifp;
1848 uint32_t r;
1849 int domore;
1850
1851 NFE_LOCK(sc);
1852
1853 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1854 nfe_enable_intr(sc);
1855 NFE_UNLOCK(sc);
1856 return; /* not for us */
1857 }
1858 NFE_WRITE(sc, sc->nfe_irq_status, r);
1859
1860 DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r);
1861
1862 #ifdef DEVICE_POLLING
1863 if (if_getcapenable(ifp) & IFCAP_POLLING) {
1864 NFE_UNLOCK(sc);
1865 return;
1866 }
1867 #endif
1868
1869 if (r & NFE_IRQ_LINK) {
1870 NFE_READ(sc, NFE_PHY_STATUS);
1871 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1872 DPRINTF(sc, "link state changed\n");
1873 }
1874
1875 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
1876 NFE_UNLOCK(sc);
1877 nfe_disable_intr(sc);
1878 return;
1879 }
1880
1881 domore = 0;
1882 /* check Rx ring */
1883 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1884 domore = nfe_jrxeof(sc, sc->nfe_process_limit, NULL);
1885 else
1886 domore = nfe_rxeof(sc, sc->nfe_process_limit, NULL);
1887 /* check Tx ring */
1888 nfe_txeof(sc);
1889
1890 if (!if_sendq_empty(ifp))
1891 nfe_start_locked(ifp);
1892
1893 NFE_UNLOCK(sc);
1894
1895 if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) {
1896 taskqueue_enqueue(sc->nfe_tq, &sc->nfe_int_task);
1897 return;
1898 }
1899
1900 /* Reenable interrupts. */
1901 nfe_enable_intr(sc);
1902 }
1903
1904 static __inline void
nfe_discard_rxbuf(struct nfe_softc * sc,int idx)1905 nfe_discard_rxbuf(struct nfe_softc *sc, int idx)
1906 {
1907 struct nfe_desc32 *desc32;
1908 struct nfe_desc64 *desc64;
1909 struct nfe_rx_data *data;
1910 struct mbuf *m;
1911
1912 data = &sc->rxq.data[idx];
1913 m = data->m;
1914
1915 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1916 desc64 = &sc->rxq.desc64[idx];
1917 /* VLAN packet may have overwritten it. */
1918 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1919 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1920 desc64->length = htole16(m->m_len);
1921 desc64->flags = htole16(NFE_RX_READY);
1922 } else {
1923 desc32 = &sc->rxq.desc32[idx];
1924 desc32->length = htole16(m->m_len);
1925 desc32->flags = htole16(NFE_RX_READY);
1926 }
1927 }
1928
1929 static __inline void
nfe_discard_jrxbuf(struct nfe_softc * sc,int idx)1930 nfe_discard_jrxbuf(struct nfe_softc *sc, int idx)
1931 {
1932 struct nfe_desc32 *desc32;
1933 struct nfe_desc64 *desc64;
1934 struct nfe_rx_data *data;
1935 struct mbuf *m;
1936
1937 data = &sc->jrxq.jdata[idx];
1938 m = data->m;
1939
1940 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1941 desc64 = &sc->jrxq.jdesc64[idx];
1942 /* VLAN packet may have overwritten it. */
1943 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1944 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1945 desc64->length = htole16(m->m_len);
1946 desc64->flags = htole16(NFE_RX_READY);
1947 } else {
1948 desc32 = &sc->jrxq.jdesc32[idx];
1949 desc32->length = htole16(m->m_len);
1950 desc32->flags = htole16(NFE_RX_READY);
1951 }
1952 }
1953
1954 static int
nfe_newbuf(struct nfe_softc * sc,int idx)1955 nfe_newbuf(struct nfe_softc *sc, int idx)
1956 {
1957 struct nfe_rx_data *data;
1958 struct nfe_desc32 *desc32;
1959 struct nfe_desc64 *desc64;
1960 struct mbuf *m;
1961 bus_dma_segment_t segs[1];
1962 bus_dmamap_t map;
1963 int nsegs;
1964
1965 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1966 if (m == NULL)
1967 return (ENOBUFS);
1968
1969 m->m_len = m->m_pkthdr.len = MCLBYTES;
1970 m_adj(m, ETHER_ALIGN);
1971
1972 if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map,
1973 m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
1974 m_freem(m);
1975 return (ENOBUFS);
1976 }
1977 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1978
1979 data = &sc->rxq.data[idx];
1980 if (data->m != NULL) {
1981 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
1982 BUS_DMASYNC_POSTREAD);
1983 bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map);
1984 }
1985 map = data->rx_data_map;
1986 data->rx_data_map = sc->rxq.rx_spare_map;
1987 sc->rxq.rx_spare_map = map;
1988 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
1989 BUS_DMASYNC_PREREAD);
1990 data->paddr = segs[0].ds_addr;
1991 data->m = m;
1992 /* update mapping address in h/w descriptor */
1993 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1994 desc64 = &sc->rxq.desc64[idx];
1995 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
1996 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
1997 desc64->length = htole16(segs[0].ds_len);
1998 desc64->flags = htole16(NFE_RX_READY);
1999 } else {
2000 desc32 = &sc->rxq.desc32[idx];
2001 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2002 desc32->length = htole16(segs[0].ds_len);
2003 desc32->flags = htole16(NFE_RX_READY);
2004 }
2005
2006 return (0);
2007 }
2008
2009 static int
nfe_jnewbuf(struct nfe_softc * sc,int idx)2010 nfe_jnewbuf(struct nfe_softc *sc, int idx)
2011 {
2012 struct nfe_rx_data *data;
2013 struct nfe_desc32 *desc32;
2014 struct nfe_desc64 *desc64;
2015 struct mbuf *m;
2016 bus_dma_segment_t segs[1];
2017 bus_dmamap_t map;
2018 int nsegs;
2019
2020 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
2021 if (m == NULL)
2022 return (ENOBUFS);
2023 m->m_pkthdr.len = m->m_len = MJUM9BYTES;
2024 m_adj(m, ETHER_ALIGN);
2025
2026 if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag,
2027 sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
2028 m_freem(m);
2029 return (ENOBUFS);
2030 }
2031 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2032
2033 data = &sc->jrxq.jdata[idx];
2034 if (data->m != NULL) {
2035 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2036 BUS_DMASYNC_POSTREAD);
2037 bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map);
2038 }
2039 map = data->rx_data_map;
2040 data->rx_data_map = sc->jrxq.jrx_spare_map;
2041 sc->jrxq.jrx_spare_map = map;
2042 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2043 BUS_DMASYNC_PREREAD);
2044 data->paddr = segs[0].ds_addr;
2045 data->m = m;
2046 /* update mapping address in h/w descriptor */
2047 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2048 desc64 = &sc->jrxq.jdesc64[idx];
2049 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2050 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2051 desc64->length = htole16(segs[0].ds_len);
2052 desc64->flags = htole16(NFE_RX_READY);
2053 } else {
2054 desc32 = &sc->jrxq.jdesc32[idx];
2055 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2056 desc32->length = htole16(segs[0].ds_len);
2057 desc32->flags = htole16(NFE_RX_READY);
2058 }
2059
2060 return (0);
2061 }
2062
2063 static int
nfe_rxeof(struct nfe_softc * sc,int count,int * rx_npktsp)2064 nfe_rxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2065 {
2066 if_t ifp = sc->nfe_ifp;
2067 struct nfe_desc32 *desc32;
2068 struct nfe_desc64 *desc64;
2069 struct nfe_rx_data *data;
2070 struct mbuf *m;
2071 uint16_t flags;
2072 int len, prog, rx_npkts;
2073 uint32_t vtag = 0;
2074
2075 rx_npkts = 0;
2076 NFE_LOCK_ASSERT(sc);
2077
2078 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2079 BUS_DMASYNC_POSTREAD);
2080
2081 for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) {
2082 if (count <= 0)
2083 break;
2084 count--;
2085
2086 data = &sc->rxq.data[sc->rxq.cur];
2087
2088 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2089 desc64 = &sc->rxq.desc64[sc->rxq.cur];
2090 vtag = le32toh(desc64->physaddr[1]);
2091 flags = le16toh(desc64->flags);
2092 len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2093 } else {
2094 desc32 = &sc->rxq.desc32[sc->rxq.cur];
2095 flags = le16toh(desc32->flags);
2096 len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2097 }
2098
2099 if (flags & NFE_RX_READY)
2100 break;
2101 prog++;
2102 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2103 if (!(flags & NFE_RX_VALID_V1)) {
2104 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2105 nfe_discard_rxbuf(sc, sc->rxq.cur);
2106 continue;
2107 }
2108 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2109 flags &= ~NFE_RX_ERROR;
2110 len--; /* fix buffer length */
2111 }
2112 } else {
2113 if (!(flags & NFE_RX_VALID_V2)) {
2114 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2115 nfe_discard_rxbuf(sc, sc->rxq.cur);
2116 continue;
2117 }
2118
2119 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2120 flags &= ~NFE_RX_ERROR;
2121 len--; /* fix buffer length */
2122 }
2123 }
2124
2125 if (flags & NFE_RX_ERROR) {
2126 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2127 nfe_discard_rxbuf(sc, sc->rxq.cur);
2128 continue;
2129 }
2130
2131 m = data->m;
2132 if (nfe_newbuf(sc, sc->rxq.cur) != 0) {
2133 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2134 nfe_discard_rxbuf(sc, sc->rxq.cur);
2135 continue;
2136 }
2137
2138 if ((vtag & NFE_RX_VTAG) != 0 &&
2139 (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
2140 m->m_pkthdr.ether_vtag = vtag & 0xffff;
2141 m->m_flags |= M_VLANTAG;
2142 }
2143
2144 m->m_pkthdr.len = m->m_len = len;
2145 m->m_pkthdr.rcvif = ifp;
2146
2147 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
2148 if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2149 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2150 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2151 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2152 (flags & NFE_RX_UDP_CSUMOK) != 0) {
2153 m->m_pkthdr.csum_flags |=
2154 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2155 m->m_pkthdr.csum_data = 0xffff;
2156 }
2157 }
2158 }
2159
2160 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2161
2162 NFE_UNLOCK(sc);
2163 if_input(ifp, m);
2164 NFE_LOCK(sc);
2165 rx_npkts++;
2166 }
2167
2168 if (prog > 0)
2169 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2170 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2171
2172 if (rx_npktsp != NULL)
2173 *rx_npktsp = rx_npkts;
2174 return (count > 0 ? 0 : EAGAIN);
2175 }
2176
2177 static int
nfe_jrxeof(struct nfe_softc * sc,int count,int * rx_npktsp)2178 nfe_jrxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2179 {
2180 if_t ifp = sc->nfe_ifp;
2181 struct nfe_desc32 *desc32;
2182 struct nfe_desc64 *desc64;
2183 struct nfe_rx_data *data;
2184 struct mbuf *m;
2185 uint16_t flags;
2186 int len, prog, rx_npkts;
2187 uint32_t vtag = 0;
2188
2189 rx_npkts = 0;
2190 NFE_LOCK_ASSERT(sc);
2191
2192 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2193 BUS_DMASYNC_POSTREAD);
2194
2195 for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT),
2196 vtag = 0) {
2197 if (count <= 0)
2198 break;
2199 count--;
2200
2201 data = &sc->jrxq.jdata[sc->jrxq.jcur];
2202
2203 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2204 desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur];
2205 vtag = le32toh(desc64->physaddr[1]);
2206 flags = le16toh(desc64->flags);
2207 len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2208 } else {
2209 desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur];
2210 flags = le16toh(desc32->flags);
2211 len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2212 }
2213
2214 if (flags & NFE_RX_READY)
2215 break;
2216 prog++;
2217 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2218 if (!(flags & NFE_RX_VALID_V1)) {
2219 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2220 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2221 continue;
2222 }
2223 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2224 flags &= ~NFE_RX_ERROR;
2225 len--; /* fix buffer length */
2226 }
2227 } else {
2228 if (!(flags & NFE_RX_VALID_V2)) {
2229 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2230 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2231 continue;
2232 }
2233
2234 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2235 flags &= ~NFE_RX_ERROR;
2236 len--; /* fix buffer length */
2237 }
2238 }
2239
2240 if (flags & NFE_RX_ERROR) {
2241 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2242 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2243 continue;
2244 }
2245
2246 m = data->m;
2247 if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) {
2248 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2249 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2250 continue;
2251 }
2252
2253 if ((vtag & NFE_RX_VTAG) != 0 &&
2254 (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
2255 m->m_pkthdr.ether_vtag = vtag & 0xffff;
2256 m->m_flags |= M_VLANTAG;
2257 }
2258
2259 m->m_pkthdr.len = m->m_len = len;
2260 m->m_pkthdr.rcvif = ifp;
2261
2262 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
2263 if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2264 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2265 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2266 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2267 (flags & NFE_RX_UDP_CSUMOK) != 0) {
2268 m->m_pkthdr.csum_flags |=
2269 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2270 m->m_pkthdr.csum_data = 0xffff;
2271 }
2272 }
2273 }
2274
2275 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2276
2277 NFE_UNLOCK(sc);
2278 if_input(ifp, m);
2279 NFE_LOCK(sc);
2280 rx_npkts++;
2281 }
2282
2283 if (prog > 0)
2284 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2285 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2286
2287 if (rx_npktsp != NULL)
2288 *rx_npktsp = rx_npkts;
2289 return (count > 0 ? 0 : EAGAIN);
2290 }
2291
2292 static void
nfe_txeof(struct nfe_softc * sc)2293 nfe_txeof(struct nfe_softc *sc)
2294 {
2295 if_t ifp = sc->nfe_ifp;
2296 struct nfe_desc32 *desc32;
2297 struct nfe_desc64 *desc64;
2298 struct nfe_tx_data *data = NULL;
2299 uint16_t flags;
2300 int cons, prog;
2301
2302 NFE_LOCK_ASSERT(sc);
2303
2304 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2305 BUS_DMASYNC_POSTREAD);
2306
2307 prog = 0;
2308 for (cons = sc->txq.next; cons != sc->txq.cur;
2309 NFE_INC(cons, NFE_TX_RING_COUNT)) {
2310 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2311 desc64 = &sc->txq.desc64[cons];
2312 flags = le16toh(desc64->flags);
2313 } else {
2314 desc32 = &sc->txq.desc32[cons];
2315 flags = le16toh(desc32->flags);
2316 }
2317
2318 if (flags & NFE_TX_VALID)
2319 break;
2320
2321 prog++;
2322 sc->txq.queued--;
2323 data = &sc->txq.data[cons];
2324
2325 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2326 if ((flags & NFE_TX_LASTFRAG_V1) == 0)
2327 continue;
2328 if ((flags & NFE_TX_ERROR_V1) != 0) {
2329 device_printf(sc->nfe_dev,
2330 "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR);
2331
2332 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2333 } else
2334 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2335 } else {
2336 if ((flags & NFE_TX_LASTFRAG_V2) == 0)
2337 continue;
2338 if ((flags & NFE_TX_ERROR_V2) != 0) {
2339 device_printf(sc->nfe_dev,
2340 "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR);
2341 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2342 } else
2343 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2344 }
2345
2346 /* last fragment of the mbuf chain transmitted */
2347 KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__));
2348 bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map,
2349 BUS_DMASYNC_POSTWRITE);
2350 bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map);
2351 m_freem(data->m);
2352 data->m = NULL;
2353 }
2354
2355 if (prog > 0) {
2356 sc->nfe_force_tx = 0;
2357 sc->txq.next = cons;
2358 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2359 if (sc->txq.queued == 0)
2360 sc->nfe_watchdog_timer = 0;
2361 }
2362 }
2363
2364 static int
nfe_encap(struct nfe_softc * sc,struct mbuf ** m_head)2365 nfe_encap(struct nfe_softc *sc, struct mbuf **m_head)
2366 {
2367 struct nfe_desc32 *desc32 = NULL;
2368 struct nfe_desc64 *desc64 = NULL;
2369 bus_dmamap_t map;
2370 bus_dma_segment_t segs[NFE_MAX_SCATTER];
2371 int error, i, nsegs, prod, si;
2372 uint32_t tsosegsz;
2373 uint16_t cflags, flags;
2374 struct mbuf *m;
2375
2376 prod = si = sc->txq.cur;
2377 map = sc->txq.data[prod].tx_data_map;
2378
2379 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs,
2380 &nsegs, BUS_DMA_NOWAIT);
2381 if (error == EFBIG) {
2382 m = m_collapse(*m_head, M_NOWAIT, NFE_MAX_SCATTER);
2383 if (m == NULL) {
2384 m_freem(*m_head);
2385 *m_head = NULL;
2386 return (ENOBUFS);
2387 }
2388 *m_head = m;
2389 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map,
2390 *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2391 if (error != 0) {
2392 m_freem(*m_head);
2393 *m_head = NULL;
2394 return (ENOBUFS);
2395 }
2396 } else if (error != 0)
2397 return (error);
2398 if (nsegs == 0) {
2399 m_freem(*m_head);
2400 *m_head = NULL;
2401 return (EIO);
2402 }
2403
2404 if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) {
2405 bus_dmamap_unload(sc->txq.tx_data_tag, map);
2406 return (ENOBUFS);
2407 }
2408
2409 m = *m_head;
2410 cflags = flags = 0;
2411 tsosegsz = 0;
2412 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2413 tsosegsz = (uint32_t)m->m_pkthdr.tso_segsz <<
2414 NFE_TX_TSO_SHIFT;
2415 cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
2416 cflags |= NFE_TX_TSO;
2417 } else if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) {
2418 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2419 cflags |= NFE_TX_IP_CSUM;
2420 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2421 cflags |= NFE_TX_TCP_UDP_CSUM;
2422 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2423 cflags |= NFE_TX_TCP_UDP_CSUM;
2424 }
2425
2426 for (i = 0; i < nsegs; i++) {
2427 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2428 desc64 = &sc->txq.desc64[prod];
2429 desc64->physaddr[0] =
2430 htole32(NFE_ADDR_HI(segs[i].ds_addr));
2431 desc64->physaddr[1] =
2432 htole32(NFE_ADDR_LO(segs[i].ds_addr));
2433 desc64->vtag = 0;
2434 desc64->length = htole16(segs[i].ds_len - 1);
2435 desc64->flags = htole16(flags);
2436 } else {
2437 desc32 = &sc->txq.desc32[prod];
2438 desc32->physaddr =
2439 htole32(NFE_ADDR_LO(segs[i].ds_addr));
2440 desc32->length = htole16(segs[i].ds_len - 1);
2441 desc32->flags = htole16(flags);
2442 }
2443
2444 /*
2445 * Setting of the valid bit in the first descriptor is
2446 * deferred until the whole chain is fully setup.
2447 */
2448 flags |= NFE_TX_VALID;
2449
2450 sc->txq.queued++;
2451 NFE_INC(prod, NFE_TX_RING_COUNT);
2452 }
2453
2454 /*
2455 * the whole mbuf chain has been DMA mapped, fix last/first descriptor.
2456 * csum flags, vtag and TSO belong to the first fragment only.
2457 */
2458 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2459 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
2460 desc64 = &sc->txq.desc64[si];
2461 if ((m->m_flags & M_VLANTAG) != 0)
2462 desc64->vtag = htole32(NFE_TX_VTAG |
2463 m->m_pkthdr.ether_vtag);
2464 if (tsosegsz != 0) {
2465 /*
2466 * XXX
2467 * The following indicates the descriptor element
2468 * is a 32bit quantity.
2469 */
2470 desc64->length |= htole16((uint16_t)tsosegsz);
2471 desc64->flags |= htole16(tsosegsz >> 16);
2472 }
2473 /*
2474 * finally, set the valid/checksum/TSO bit in the first
2475 * descriptor.
2476 */
2477 desc64->flags |= htole16(NFE_TX_VALID | cflags);
2478 } else {
2479 if (sc->nfe_flags & NFE_JUMBO_SUP)
2480 desc32->flags |= htole16(NFE_TX_LASTFRAG_V2);
2481 else
2482 desc32->flags |= htole16(NFE_TX_LASTFRAG_V1);
2483 desc32 = &sc->txq.desc32[si];
2484 if (tsosegsz != 0) {
2485 /*
2486 * XXX
2487 * The following indicates the descriptor element
2488 * is a 32bit quantity.
2489 */
2490 desc32->length |= htole16((uint16_t)tsosegsz);
2491 desc32->flags |= htole16(tsosegsz >> 16);
2492 }
2493 /*
2494 * finally, set the valid/checksum/TSO bit in the first
2495 * descriptor.
2496 */
2497 desc32->flags |= htole16(NFE_TX_VALID | cflags);
2498 }
2499
2500 sc->txq.cur = prod;
2501 prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT;
2502 sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map;
2503 sc->txq.data[prod].tx_data_map = map;
2504 sc->txq.data[prod].m = m;
2505
2506 bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
2507
2508 return (0);
2509 }
2510
2511 struct nfe_hash_maddr_ctx {
2512 uint8_t addr[ETHER_ADDR_LEN];
2513 uint8_t mask[ETHER_ADDR_LEN];
2514 };
2515
2516 static u_int
nfe_hash_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)2517 nfe_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2518 {
2519 struct nfe_hash_maddr_ctx *ctx = arg;
2520 uint8_t *addrp, mcaddr;
2521 int j;
2522
2523 addrp = LLADDR(sdl);
2524 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2525 mcaddr = addrp[j];
2526 ctx->addr[j] &= mcaddr;
2527 ctx->mask[j] &= ~mcaddr;
2528 }
2529
2530 return (1);
2531 }
2532
2533 static void
nfe_setmulti(struct nfe_softc * sc)2534 nfe_setmulti(struct nfe_softc *sc)
2535 {
2536 if_t ifp = sc->nfe_ifp;
2537 struct nfe_hash_maddr_ctx ctx;
2538 uint32_t filter;
2539 uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = {
2540 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2541 };
2542 int i;
2543
2544 NFE_LOCK_ASSERT(sc);
2545
2546 if ((if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
2547 bzero(ctx.addr, ETHER_ADDR_LEN);
2548 bzero(ctx.mask, ETHER_ADDR_LEN);
2549 goto done;
2550 }
2551
2552 bcopy(etherbroadcastaddr, ctx.addr, ETHER_ADDR_LEN);
2553 bcopy(etherbroadcastaddr, ctx.mask, ETHER_ADDR_LEN);
2554
2555 if_foreach_llmaddr(ifp, nfe_hash_maddr, &ctx);
2556
2557 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2558 ctx.mask[i] |= ctx.addr[i];
2559 }
2560
2561 done:
2562 ctx.addr[0] |= 0x01; /* make sure multicast bit is set */
2563
2564 NFE_WRITE(sc, NFE_MULTIADDR_HI, ctx.addr[3] << 24 | ctx.addr[2] << 16 |
2565 ctx.addr[1] << 8 | ctx.addr[0]);
2566 NFE_WRITE(sc, NFE_MULTIADDR_LO,
2567 ctx.addr[5] << 8 | ctx.addr[4]);
2568 NFE_WRITE(sc, NFE_MULTIMASK_HI, ctx.mask[3] << 24 | ctx.mask[2] << 16 |
2569 ctx.mask[1] << 8 | ctx.mask[0]);
2570 NFE_WRITE(sc, NFE_MULTIMASK_LO,
2571 ctx.mask[5] << 8 | ctx.mask[4]);
2572
2573 filter = NFE_READ(sc, NFE_RXFILTER);
2574 filter &= NFE_PFF_RX_PAUSE;
2575 filter |= NFE_RXFILTER_MAGIC;
2576 filter |= (if_getflags(ifp) & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M;
2577 NFE_WRITE(sc, NFE_RXFILTER, filter);
2578 }
2579
2580 static void
nfe_start(if_t ifp)2581 nfe_start(if_t ifp)
2582 {
2583 struct nfe_softc *sc = if_getsoftc(ifp);
2584
2585 NFE_LOCK(sc);
2586 nfe_start_locked(ifp);
2587 NFE_UNLOCK(sc);
2588 }
2589
2590 static void
nfe_start_locked(if_t ifp)2591 nfe_start_locked(if_t ifp)
2592 {
2593 struct nfe_softc *sc = if_getsoftc(ifp);
2594 struct mbuf *m0;
2595 int enq = 0;
2596
2597 NFE_LOCK_ASSERT(sc);
2598
2599 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2600 IFF_DRV_RUNNING || sc->nfe_link == 0)
2601 return;
2602
2603 while (!if_sendq_empty(ifp)) {
2604 m0 = if_dequeue(ifp);
2605
2606 if (m0 == NULL)
2607 break;
2608
2609 if (nfe_encap(sc, &m0) != 0) {
2610 if (m0 == NULL)
2611 break;
2612 if_sendq_prepend(ifp, m0);
2613 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
2614 break;
2615 }
2616 enq++;
2617 ether_bpf_mtap_if(ifp, m0);
2618 }
2619
2620 if (enq > 0) {
2621 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2622 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2623
2624 /* kick Tx */
2625 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2626
2627 /*
2628 * Set a timeout in case the chip goes out to lunch.
2629 */
2630 sc->nfe_watchdog_timer = 5;
2631 }
2632 }
2633
2634 static void
nfe_watchdog(if_t ifp)2635 nfe_watchdog(if_t ifp)
2636 {
2637 struct nfe_softc *sc = if_getsoftc(ifp);
2638
2639 if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer)
2640 return;
2641
2642 /* Check if we've lost Tx completion interrupt. */
2643 nfe_txeof(sc);
2644 if (sc->txq.queued == 0) {
2645 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2646 "-- recovering\n");
2647 if (!if_sendq_empty(ifp))
2648 nfe_start_locked(ifp);
2649 return;
2650 }
2651 /* Check if we've lost start Tx command. */
2652 sc->nfe_force_tx++;
2653 if (sc->nfe_force_tx <= 3) {
2654 /*
2655 * If this is the case for watchdog timeout, the following
2656 * code should go to nfe_txeof().
2657 */
2658 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2659 return;
2660 }
2661 sc->nfe_force_tx = 0;
2662
2663 if_printf(ifp, "watchdog timeout\n");
2664
2665 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
2666 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2667 nfe_init_locked(sc);
2668 }
2669
2670 static void
nfe_init(void * xsc)2671 nfe_init(void *xsc)
2672 {
2673 struct nfe_softc *sc = xsc;
2674
2675 NFE_LOCK(sc);
2676 nfe_init_locked(sc);
2677 NFE_UNLOCK(sc);
2678 }
2679
2680 static void
nfe_init_locked(void * xsc)2681 nfe_init_locked(void *xsc)
2682 {
2683 struct nfe_softc *sc = xsc;
2684 if_t ifp = sc->nfe_ifp;
2685 struct mii_data *mii;
2686 uint32_t val;
2687 int error;
2688
2689 NFE_LOCK_ASSERT(sc);
2690
2691 mii = device_get_softc(sc->nfe_miibus);
2692
2693 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2694 return;
2695
2696 nfe_stop(ifp);
2697
2698 sc->nfe_framesize = if_getmtu(ifp) + NFE_RX_HEADERS;
2699
2700 nfe_init_tx_ring(sc, &sc->txq);
2701 if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN))
2702 error = nfe_init_jrx_ring(sc, &sc->jrxq);
2703 else
2704 error = nfe_init_rx_ring(sc, &sc->rxq);
2705 if (error != 0) {
2706 device_printf(sc->nfe_dev,
2707 "initialization failed: no memory for rx buffers\n");
2708 nfe_stop(ifp);
2709 return;
2710 }
2711
2712 val = 0;
2713 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0)
2714 val |= NFE_MAC_ADDR_INORDER;
2715 NFE_WRITE(sc, NFE_TX_UNK, val);
2716 NFE_WRITE(sc, NFE_STATUS, 0);
2717
2718 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0)
2719 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE);
2720
2721 sc->rxtxctl = NFE_RXTX_BIT2;
2722 if (sc->nfe_flags & NFE_40BIT_ADDR)
2723 sc->rxtxctl |= NFE_RXTX_V3MAGIC;
2724 else if (sc->nfe_flags & NFE_JUMBO_SUP)
2725 sc->rxtxctl |= NFE_RXTX_V2MAGIC;
2726
2727 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
2728 sc->rxtxctl |= NFE_RXTX_RXCSUM;
2729 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
2730 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
2731
2732 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
2733 DELAY(10);
2734 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2735
2736 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
2737 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
2738 else
2739 NFE_WRITE(sc, NFE_VTAG_CTL, 0);
2740
2741 NFE_WRITE(sc, NFE_SETUP_R6, 0);
2742
2743 /* set MAC address */
2744 nfe_set_macaddr(sc, if_getlladdr(ifp));
2745
2746 /* tell MAC where rings are in memory */
2747 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) {
2748 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2749 NFE_ADDR_HI(sc->jrxq.jphysaddr));
2750 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2751 NFE_ADDR_LO(sc->jrxq.jphysaddr));
2752 } else {
2753 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2754 NFE_ADDR_HI(sc->rxq.physaddr));
2755 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2756 NFE_ADDR_LO(sc->rxq.physaddr));
2757 }
2758 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr));
2759 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr));
2760
2761 NFE_WRITE(sc, NFE_RING_SIZE,
2762 (NFE_RX_RING_COUNT - 1) << 16 |
2763 (NFE_TX_RING_COUNT - 1));
2764
2765 NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize);
2766
2767 /* force MAC to wakeup */
2768 val = NFE_READ(sc, NFE_PWR_STATE);
2769 if ((val & NFE_PWR_WAKEUP) == 0)
2770 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP);
2771 DELAY(10);
2772 val = NFE_READ(sc, NFE_PWR_STATE);
2773 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID);
2774
2775 #if 1
2776 /* configure interrupts coalescing/mitigation */
2777 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
2778 #else
2779 /* no interrupt mitigation: one interrupt per packet */
2780 NFE_WRITE(sc, NFE_IMTIMER, 970);
2781 #endif
2782
2783 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100);
2784 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
2785 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
2786
2787 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
2788 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
2789
2790 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
2791 /* Disable WOL. */
2792 NFE_WRITE(sc, NFE_WOL_CTL, 0);
2793
2794 sc->rxtxctl &= ~NFE_RXTX_BIT2;
2795 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2796 DELAY(10);
2797 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
2798
2799 /* set Rx filter */
2800 nfe_setmulti(sc);
2801
2802 /* enable Rx */
2803 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
2804
2805 /* enable Tx */
2806 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
2807
2808 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
2809
2810 /* Clear hardware stats. */
2811 nfe_stats_clear(sc);
2812
2813 #ifdef DEVICE_POLLING
2814 if (if_getcapenable(ifp) & IFCAP_POLLING)
2815 nfe_disable_intr(sc);
2816 else
2817 #endif
2818 nfe_set_intr(sc);
2819 nfe_enable_intr(sc); /* enable interrupts */
2820
2821 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
2822 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2823
2824 sc->nfe_link = 0;
2825 mii_mediachg(mii);
2826
2827 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2828 }
2829
2830 static void
nfe_stop(if_t ifp)2831 nfe_stop(if_t ifp)
2832 {
2833 struct nfe_softc *sc = if_getsoftc(ifp);
2834 struct nfe_rx_ring *rx_ring;
2835 struct nfe_jrx_ring *jrx_ring;
2836 struct nfe_tx_ring *tx_ring;
2837 struct nfe_rx_data *rdata;
2838 struct nfe_tx_data *tdata;
2839 int i;
2840
2841 NFE_LOCK_ASSERT(sc);
2842
2843 sc->nfe_watchdog_timer = 0;
2844 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
2845
2846 callout_stop(&sc->nfe_stat_ch);
2847
2848 /* abort Tx */
2849 NFE_WRITE(sc, NFE_TX_CTL, 0);
2850
2851 /* disable Rx */
2852 NFE_WRITE(sc, NFE_RX_CTL, 0);
2853
2854 /* disable interrupts */
2855 nfe_disable_intr(sc);
2856
2857 sc->nfe_link = 0;
2858
2859 /* free Rx and Tx mbufs still in the queues. */
2860 rx_ring = &sc->rxq;
2861 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2862 rdata = &rx_ring->data[i];
2863 if (rdata->m != NULL) {
2864 bus_dmamap_sync(rx_ring->rx_data_tag,
2865 rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2866 bus_dmamap_unload(rx_ring->rx_data_tag,
2867 rdata->rx_data_map);
2868 m_freem(rdata->m);
2869 rdata->m = NULL;
2870 }
2871 }
2872
2873 if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) {
2874 jrx_ring = &sc->jrxq;
2875 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
2876 rdata = &jrx_ring->jdata[i];
2877 if (rdata->m != NULL) {
2878 bus_dmamap_sync(jrx_ring->jrx_data_tag,
2879 rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2880 bus_dmamap_unload(jrx_ring->jrx_data_tag,
2881 rdata->rx_data_map);
2882 m_freem(rdata->m);
2883 rdata->m = NULL;
2884 }
2885 }
2886 }
2887
2888 tx_ring = &sc->txq;
2889 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2890 tdata = &tx_ring->data[i];
2891 if (tdata->m != NULL) {
2892 bus_dmamap_sync(tx_ring->tx_data_tag,
2893 tdata->tx_data_map, BUS_DMASYNC_POSTWRITE);
2894 bus_dmamap_unload(tx_ring->tx_data_tag,
2895 tdata->tx_data_map);
2896 m_freem(tdata->m);
2897 tdata->m = NULL;
2898 }
2899 }
2900 /* Update hardware stats. */
2901 nfe_stats_update(sc);
2902 }
2903
2904 static int
nfe_ifmedia_upd(if_t ifp)2905 nfe_ifmedia_upd(if_t ifp)
2906 {
2907 struct nfe_softc *sc = if_getsoftc(ifp);
2908 struct mii_data *mii;
2909
2910 NFE_LOCK(sc);
2911 mii = device_get_softc(sc->nfe_miibus);
2912 mii_mediachg(mii);
2913 NFE_UNLOCK(sc);
2914
2915 return (0);
2916 }
2917
2918 static void
nfe_ifmedia_sts(if_t ifp,struct ifmediareq * ifmr)2919 nfe_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
2920 {
2921 struct nfe_softc *sc;
2922 struct mii_data *mii;
2923
2924 sc = if_getsoftc(ifp);
2925
2926 NFE_LOCK(sc);
2927 mii = device_get_softc(sc->nfe_miibus);
2928 mii_pollstat(mii);
2929
2930 ifmr->ifm_active = mii->mii_media_active;
2931 ifmr->ifm_status = mii->mii_media_status;
2932 NFE_UNLOCK(sc);
2933 }
2934
2935 void
nfe_tick(void * xsc)2936 nfe_tick(void *xsc)
2937 {
2938 struct nfe_softc *sc;
2939 struct mii_data *mii;
2940 if_t ifp;
2941
2942 sc = (struct nfe_softc *)xsc;
2943
2944 NFE_LOCK_ASSERT(sc);
2945
2946 ifp = sc->nfe_ifp;
2947
2948 mii = device_get_softc(sc->nfe_miibus);
2949 mii_tick(mii);
2950 nfe_stats_update(sc);
2951 nfe_watchdog(ifp);
2952 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2953 }
2954
2955 static int
nfe_shutdown(device_t dev)2956 nfe_shutdown(device_t dev)
2957 {
2958
2959 return (nfe_suspend(dev));
2960 }
2961
2962 static void
nfe_get_macaddr(struct nfe_softc * sc,uint8_t * addr)2963 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
2964 {
2965 uint32_t val;
2966
2967 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
2968 val = NFE_READ(sc, NFE_MACADDR_LO);
2969 addr[0] = (val >> 8) & 0xff;
2970 addr[1] = (val & 0xff);
2971
2972 val = NFE_READ(sc, NFE_MACADDR_HI);
2973 addr[2] = (val >> 24) & 0xff;
2974 addr[3] = (val >> 16) & 0xff;
2975 addr[4] = (val >> 8) & 0xff;
2976 addr[5] = (val & 0xff);
2977 } else {
2978 val = NFE_READ(sc, NFE_MACADDR_LO);
2979 addr[5] = (val >> 8) & 0xff;
2980 addr[4] = (val & 0xff);
2981
2982 val = NFE_READ(sc, NFE_MACADDR_HI);
2983 addr[3] = (val >> 24) & 0xff;
2984 addr[2] = (val >> 16) & 0xff;
2985 addr[1] = (val >> 8) & 0xff;
2986 addr[0] = (val & 0xff);
2987 }
2988 }
2989
2990 static void
nfe_set_macaddr(struct nfe_softc * sc,uint8_t * addr)2991 nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr)
2992 {
2993
2994 NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] << 8 | addr[4]);
2995 NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
2996 addr[1] << 8 | addr[0]);
2997 }
2998
2999 /*
3000 * Map a single buffer address.
3001 */
3002
3003 static void
nfe_dma_map_segs(void * arg,bus_dma_segment_t * segs,int nseg,int error)3004 nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3005 {
3006 struct nfe_dmamap_arg *ctx;
3007
3008 if (error != 0)
3009 return;
3010
3011 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
3012
3013 ctx = (struct nfe_dmamap_arg *)arg;
3014 ctx->nfe_busaddr = segs[0].ds_addr;
3015 }
3016
3017 static int
sysctl_int_range(SYSCTL_HANDLER_ARGS,int low,int high)3018 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3019 {
3020 int error, value;
3021
3022 if (!arg1)
3023 return (EINVAL);
3024 value = *(int *)arg1;
3025 error = sysctl_handle_int(oidp, &value, 0, req);
3026 if (error || !req->newptr)
3027 return (error);
3028 if (value < low || value > high)
3029 return (EINVAL);
3030 *(int *)arg1 = value;
3031
3032 return (0);
3033 }
3034
3035 static int
sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS)3036 sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS)
3037 {
3038
3039 return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN,
3040 NFE_PROC_MAX));
3041 }
3042
3043 #define NFE_SYSCTL_STAT_ADD32(c, h, n, p, d) \
3044 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
3045 #define NFE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
3046 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
3047
3048 static void
nfe_sysctl_node(struct nfe_softc * sc)3049 nfe_sysctl_node(struct nfe_softc *sc)
3050 {
3051 struct sysctl_ctx_list *ctx;
3052 struct sysctl_oid_list *child, *parent;
3053 struct sysctl_oid *tree;
3054 struct nfe_hw_stats *stats;
3055 int error;
3056
3057 stats = &sc->nfe_stats;
3058 ctx = device_get_sysctl_ctx(sc->nfe_dev);
3059 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nfe_dev));
3060 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
3061 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
3062 &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I",
3063 "max number of Rx events to process");
3064
3065 sc->nfe_process_limit = NFE_PROC_DEFAULT;
3066 error = resource_int_value(device_get_name(sc->nfe_dev),
3067 device_get_unit(sc->nfe_dev), "process_limit",
3068 &sc->nfe_process_limit);
3069 if (error == 0) {
3070 if (sc->nfe_process_limit < NFE_PROC_MIN ||
3071 sc->nfe_process_limit > NFE_PROC_MAX) {
3072 device_printf(sc->nfe_dev,
3073 "process_limit value out of range; "
3074 "using default: %d\n", NFE_PROC_DEFAULT);
3075 sc->nfe_process_limit = NFE_PROC_DEFAULT;
3076 }
3077 }
3078
3079 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3080 return;
3081
3082 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
3083 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "NFE statistics");
3084 parent = SYSCTL_CHILDREN(tree);
3085
3086 /* Rx statistics. */
3087 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx",
3088 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Rx MAC statistics");
3089 child = SYSCTL_CHILDREN(tree);
3090
3091 NFE_SYSCTL_STAT_ADD32(ctx, child, "frame_errors",
3092 &stats->rx_frame_errors, "Framing Errors");
3093 NFE_SYSCTL_STAT_ADD32(ctx, child, "extra_bytes",
3094 &stats->rx_extra_bytes, "Extra Bytes");
3095 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3096 &stats->rx_late_cols, "Late Collisions");
3097 NFE_SYSCTL_STAT_ADD32(ctx, child, "runts",
3098 &stats->rx_runts, "Runts");
3099 NFE_SYSCTL_STAT_ADD32(ctx, child, "jumbos",
3100 &stats->rx_jumbos, "Jumbos");
3101 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_overuns",
3102 &stats->rx_fifo_overuns, "FIFO Overruns");
3103 NFE_SYSCTL_STAT_ADD32(ctx, child, "crc_errors",
3104 &stats->rx_crc_errors, "CRC Errors");
3105 NFE_SYSCTL_STAT_ADD32(ctx, child, "fae",
3106 &stats->rx_fae, "Frame Alignment Errors");
3107 NFE_SYSCTL_STAT_ADD32(ctx, child, "len_errors",
3108 &stats->rx_len_errors, "Length Errors");
3109 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3110 &stats->rx_unicast, "Unicast Frames");
3111 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3112 &stats->rx_multicast, "Multicast Frames");
3113 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3114 &stats->rx_broadcast, "Broadcast Frames");
3115 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3116 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3117 &stats->rx_octets, "Octets");
3118 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3119 &stats->rx_pause, "Pause frames");
3120 NFE_SYSCTL_STAT_ADD32(ctx, child, "drops",
3121 &stats->rx_drops, "Drop frames");
3122 }
3123
3124 /* Tx statistics. */
3125 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx",
3126 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx MAC statistics");
3127 child = SYSCTL_CHILDREN(tree);
3128 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3129 &stats->tx_octets, "Octets");
3130 NFE_SYSCTL_STAT_ADD32(ctx, child, "zero_rexmits",
3131 &stats->tx_zero_rexmits, "Zero Retransmits");
3132 NFE_SYSCTL_STAT_ADD32(ctx, child, "one_rexmits",
3133 &stats->tx_one_rexmits, "One Retransmits");
3134 NFE_SYSCTL_STAT_ADD32(ctx, child, "multi_rexmits",
3135 &stats->tx_multi_rexmits, "Multiple Retransmits");
3136 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3137 &stats->tx_late_cols, "Late Collisions");
3138 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_underuns",
3139 &stats->tx_fifo_underuns, "FIFO Underruns");
3140 NFE_SYSCTL_STAT_ADD32(ctx, child, "carrier_losts",
3141 &stats->tx_carrier_losts, "Carrier Losts");
3142 NFE_SYSCTL_STAT_ADD32(ctx, child, "excess_deferrals",
3143 &stats->tx_excess_deferals, "Excess Deferrals");
3144 NFE_SYSCTL_STAT_ADD32(ctx, child, "retry_errors",
3145 &stats->tx_retry_errors, "Retry Errors");
3146 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3147 NFE_SYSCTL_STAT_ADD32(ctx, child, "deferrals",
3148 &stats->tx_deferals, "Deferrals");
3149 NFE_SYSCTL_STAT_ADD32(ctx, child, "frames",
3150 &stats->tx_frames, "Frames");
3151 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3152 &stats->tx_pause, "Pause Frames");
3153 }
3154 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3155 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3156 &stats->tx_deferals, "Unicast Frames");
3157 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3158 &stats->tx_frames, "Multicast Frames");
3159 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3160 &stats->tx_pause, "Broadcast Frames");
3161 }
3162 }
3163
3164 #undef NFE_SYSCTL_STAT_ADD32
3165 #undef NFE_SYSCTL_STAT_ADD64
3166
3167 static void
nfe_stats_clear(struct nfe_softc * sc)3168 nfe_stats_clear(struct nfe_softc *sc)
3169 {
3170 int i, mib_cnt;
3171
3172 if ((sc->nfe_flags & NFE_MIB_V1) != 0)
3173 mib_cnt = NFE_NUM_MIB_STATV1;
3174 else if ((sc->nfe_flags & (NFE_MIB_V2 | NFE_MIB_V3)) != 0)
3175 mib_cnt = NFE_NUM_MIB_STATV2;
3176 else
3177 return;
3178
3179 for (i = 0; i < mib_cnt; i++)
3180 NFE_READ(sc, NFE_TX_OCTET + i * sizeof(uint32_t));
3181
3182 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3183 NFE_READ(sc, NFE_TX_UNICAST);
3184 NFE_READ(sc, NFE_TX_MULTICAST);
3185 NFE_READ(sc, NFE_TX_BROADCAST);
3186 }
3187 }
3188
3189 static void
nfe_stats_update(struct nfe_softc * sc)3190 nfe_stats_update(struct nfe_softc *sc)
3191 {
3192 struct nfe_hw_stats *stats;
3193
3194 NFE_LOCK_ASSERT(sc);
3195
3196 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3197 return;
3198
3199 stats = &sc->nfe_stats;
3200 stats->tx_octets += NFE_READ(sc, NFE_TX_OCTET);
3201 stats->tx_zero_rexmits += NFE_READ(sc, NFE_TX_ZERO_REXMIT);
3202 stats->tx_one_rexmits += NFE_READ(sc, NFE_TX_ONE_REXMIT);
3203 stats->tx_multi_rexmits += NFE_READ(sc, NFE_TX_MULTI_REXMIT);
3204 stats->tx_late_cols += NFE_READ(sc, NFE_TX_LATE_COL);
3205 stats->tx_fifo_underuns += NFE_READ(sc, NFE_TX_FIFO_UNDERUN);
3206 stats->tx_carrier_losts += NFE_READ(sc, NFE_TX_CARRIER_LOST);
3207 stats->tx_excess_deferals += NFE_READ(sc, NFE_TX_EXCESS_DEFERRAL);
3208 stats->tx_retry_errors += NFE_READ(sc, NFE_TX_RETRY_ERROR);
3209 stats->rx_frame_errors += NFE_READ(sc, NFE_RX_FRAME_ERROR);
3210 stats->rx_extra_bytes += NFE_READ(sc, NFE_RX_EXTRA_BYTES);
3211 stats->rx_late_cols += NFE_READ(sc, NFE_RX_LATE_COL);
3212 stats->rx_runts += NFE_READ(sc, NFE_RX_RUNT);
3213 stats->rx_jumbos += NFE_READ(sc, NFE_RX_JUMBO);
3214 stats->rx_fifo_overuns += NFE_READ(sc, NFE_RX_FIFO_OVERUN);
3215 stats->rx_crc_errors += NFE_READ(sc, NFE_RX_CRC_ERROR);
3216 stats->rx_fae += NFE_READ(sc, NFE_RX_FAE);
3217 stats->rx_len_errors += NFE_READ(sc, NFE_RX_LEN_ERROR);
3218 stats->rx_unicast += NFE_READ(sc, NFE_RX_UNICAST);
3219 stats->rx_multicast += NFE_READ(sc, NFE_RX_MULTICAST);
3220 stats->rx_broadcast += NFE_READ(sc, NFE_RX_BROADCAST);
3221
3222 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3223 stats->tx_deferals += NFE_READ(sc, NFE_TX_DEFERAL);
3224 stats->tx_frames += NFE_READ(sc, NFE_TX_FRAME);
3225 stats->rx_octets += NFE_READ(sc, NFE_RX_OCTET);
3226 stats->tx_pause += NFE_READ(sc, NFE_TX_PAUSE);
3227 stats->rx_pause += NFE_READ(sc, NFE_RX_PAUSE);
3228 stats->rx_drops += NFE_READ(sc, NFE_RX_DROP);
3229 }
3230
3231 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3232 stats->tx_unicast += NFE_READ(sc, NFE_TX_UNICAST);
3233 stats->tx_multicast += NFE_READ(sc, NFE_TX_MULTICAST);
3234 stats->tx_broadcast += NFE_READ(sc, NFE_TX_BROADCAST);
3235 }
3236 }
3237
3238 static void
nfe_set_linkspeed(struct nfe_softc * sc)3239 nfe_set_linkspeed(struct nfe_softc *sc)
3240 {
3241 struct mii_softc *miisc;
3242 struct mii_data *mii;
3243 int aneg, i, phyno;
3244
3245 NFE_LOCK_ASSERT(sc);
3246
3247 mii = device_get_softc(sc->nfe_miibus);
3248 mii_pollstat(mii);
3249 aneg = 0;
3250 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
3251 (IFM_ACTIVE | IFM_AVALID)) {
3252 switch IFM_SUBTYPE(mii->mii_media_active) {
3253 case IFM_10_T:
3254 case IFM_100_TX:
3255 return;
3256 case IFM_1000_T:
3257 aneg++;
3258 break;
3259 default:
3260 break;
3261 }
3262 }
3263 miisc = LIST_FIRST(&mii->mii_phys);
3264 phyno = miisc->mii_phy;
3265 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3266 PHY_RESET(miisc);
3267 nfe_miibus_writereg(sc->nfe_dev, phyno, MII_100T2CR, 0);
3268 nfe_miibus_writereg(sc->nfe_dev, phyno,
3269 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
3270 nfe_miibus_writereg(sc->nfe_dev, phyno,
3271 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
3272 DELAY(1000);
3273 if (aneg != 0) {
3274 /*
3275 * Poll link state until nfe(4) get a 10/100Mbps link.
3276 */
3277 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
3278 mii_pollstat(mii);
3279 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
3280 == (IFM_ACTIVE | IFM_AVALID)) {
3281 switch (IFM_SUBTYPE(mii->mii_media_active)) {
3282 case IFM_10_T:
3283 case IFM_100_TX:
3284 nfe_mac_config(sc, mii);
3285 return;
3286 default:
3287 break;
3288 }
3289 }
3290 NFE_UNLOCK(sc);
3291 pause("nfelnk", hz);
3292 NFE_LOCK(sc);
3293 }
3294 if (i == MII_ANEGTICKS_GIGE)
3295 device_printf(sc->nfe_dev,
3296 "establishing a link failed, WOL may not work!");
3297 }
3298 /*
3299 * No link, force MAC to have 100Mbps, full-duplex link.
3300 * This is the last resort and may/may not work.
3301 */
3302 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
3303 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
3304 nfe_mac_config(sc, mii);
3305 }
3306
3307 static void
nfe_set_wol(struct nfe_softc * sc)3308 nfe_set_wol(struct nfe_softc *sc)
3309 {
3310 if_t ifp;
3311 uint32_t wolctl;
3312 int pmc;
3313 uint16_t pmstat;
3314
3315 NFE_LOCK_ASSERT(sc);
3316
3317 if (pci_find_cap(sc->nfe_dev, PCIY_PMG, &pmc) != 0)
3318 return;
3319 ifp = sc->nfe_ifp;
3320 if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
3321 wolctl = NFE_WOL_MAGIC;
3322 else
3323 wolctl = 0;
3324 NFE_WRITE(sc, NFE_WOL_CTL, wolctl);
3325 if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0) {
3326 nfe_set_linkspeed(sc);
3327 if ((sc->nfe_flags & NFE_PWR_MGMT) != 0)
3328 NFE_WRITE(sc, NFE_PWR2_CTL,
3329 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_GATE_CLOCKS);
3330 /* Enable RX. */
3331 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 0);
3332 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 0);
3333 NFE_WRITE(sc, NFE_RX_CTL, NFE_READ(sc, NFE_RX_CTL) |
3334 NFE_RX_START);
3335 }
3336 /* Request PME if WOL is requested. */
3337 pmstat = pci_read_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, 2);
3338 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
3339 if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
3340 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3341 pci_write_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
3342 }
3343