1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2007 Sepherosa Ziehau. All rights reserved.
5 *
6 * This code is derived from software contributed to The DragonFly Project
7 * by Sepherosa Ziehau <sepherosa@gmail.com>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.10 2008/05/18 07:47:14 sephe Exp $
37 */
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/endian.h>
42 #include <sys/kernel.h>
43 #include <sys/bus.h>
44 #include <sys/malloc.h>
45 #include <sys/mbuf.h>
46 #include <sys/proc.h>
47 #include <sys/rman.h>
48 #include <sys/module.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/sysctl.h>
52
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_var.h>
56 #include <net/if_dl.h>
57 #include <net/if_types.h>
58 #include <net/bpf.h>
59 #include <net/if_arp.h>
60 #include <net/if_media.h>
61 #include <net/if_vlan_var.h>
62
63 #include <machine/bus.h>
64
65 #include <dev/mii/mii.h>
66 #include <dev/mii/miivar.h>
67
68 #include <dev/pci/pcireg.h>
69 #include <dev/pci/pcivar.h>
70
71 #include <dev/et/if_etreg.h>
72 #include <dev/et/if_etvar.h>
73
74 #include "miibus_if.h"
75
76 MODULE_DEPEND(et, pci, 1, 1, 1);
77 MODULE_DEPEND(et, ether, 1, 1, 1);
78 MODULE_DEPEND(et, miibus, 1, 1, 1);
79
80 /* Tunables. */
81 static int msi_disable = 0;
82 TUNABLE_INT("hw.et.msi_disable", &msi_disable);
83
84 #define ET_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
85
86 static int et_probe(device_t);
87 static int et_attach(device_t);
88 static int et_detach(device_t);
89 static int et_shutdown(device_t);
90 static int et_suspend(device_t);
91 static int et_resume(device_t);
92
93 static int et_miibus_readreg(device_t, int, int);
94 static int et_miibus_writereg(device_t, int, int, int);
95 static void et_miibus_statchg(device_t);
96
97 static void et_init_locked(struct et_softc *);
98 static void et_init(void *);
99 static int et_ioctl(if_t, u_long, caddr_t);
100 static void et_start_locked(if_t);
101 static void et_start(if_t);
102 static int et_watchdog(struct et_softc *);
103 static int et_ifmedia_upd_locked(if_t);
104 static int et_ifmedia_upd(if_t);
105 static void et_ifmedia_sts(if_t, struct ifmediareq *);
106 static uint64_t et_get_counter(if_t, ift_counter);
107
108 static void et_add_sysctls(struct et_softc *);
109 static int et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS);
110 static int et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS);
111
112 static void et_intr(void *);
113 static void et_rxeof(struct et_softc *);
114 static void et_txeof(struct et_softc *);
115
116 static int et_dma_alloc(struct et_softc *);
117 static void et_dma_free(struct et_softc *);
118 static void et_dma_map_addr(void *, bus_dma_segment_t *, int, int);
119 static int et_dma_ring_alloc(struct et_softc *, bus_size_t, bus_size_t,
120 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *,
121 const char *);
122 static void et_dma_ring_free(struct et_softc *, bus_dma_tag_t *, uint8_t **,
123 bus_dmamap_t, bus_addr_t *);
124 static void et_init_tx_ring(struct et_softc *);
125 static int et_init_rx_ring(struct et_softc *);
126 static void et_free_tx_ring(struct et_softc *);
127 static void et_free_rx_ring(struct et_softc *);
128 static int et_encap(struct et_softc *, struct mbuf **);
129 static int et_newbuf_cluster(struct et_rxbuf_data *, int);
130 static int et_newbuf_hdr(struct et_rxbuf_data *, int);
131 static void et_rxbuf_discard(struct et_rxbuf_data *, int);
132
133 static void et_stop(struct et_softc *);
134 static int et_chip_init(struct et_softc *);
135 static void et_chip_attach(struct et_softc *);
136 static void et_init_mac(struct et_softc *);
137 static void et_init_rxmac(struct et_softc *);
138 static void et_init_txmac(struct et_softc *);
139 static int et_init_rxdma(struct et_softc *);
140 static int et_init_txdma(struct et_softc *);
141 static int et_start_rxdma(struct et_softc *);
142 static int et_start_txdma(struct et_softc *);
143 static int et_stop_rxdma(struct et_softc *);
144 static int et_stop_txdma(struct et_softc *);
145 static void et_reset(struct et_softc *);
146 static int et_bus_config(struct et_softc *);
147 static void et_get_eaddr(device_t, uint8_t[]);
148 static void et_setmulti(struct et_softc *);
149 static void et_tick(void *);
150 static void et_stats_update(struct et_softc *);
151
152 static const struct et_dev {
153 uint16_t vid;
154 uint16_t did;
155 const char *desc;
156 } et_devices[] = {
157 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310,
158 "Agere ET1310 Gigabit Ethernet" },
159 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST,
160 "Agere ET1310 Fast Ethernet" },
161 { 0, 0, NULL }
162 };
163
164 static device_method_t et_methods[] = {
165 DEVMETHOD(device_probe, et_probe),
166 DEVMETHOD(device_attach, et_attach),
167 DEVMETHOD(device_detach, et_detach),
168 DEVMETHOD(device_shutdown, et_shutdown),
169 DEVMETHOD(device_suspend, et_suspend),
170 DEVMETHOD(device_resume, et_resume),
171
172 DEVMETHOD(miibus_readreg, et_miibus_readreg),
173 DEVMETHOD(miibus_writereg, et_miibus_writereg),
174 DEVMETHOD(miibus_statchg, et_miibus_statchg),
175
176 DEVMETHOD_END
177 };
178
179 static driver_t et_driver = {
180 "et",
181 et_methods,
182 sizeof(struct et_softc)
183 };
184
185 DRIVER_MODULE(et, pci, et_driver, 0, 0);
186 MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, et, et_devices,
187 nitems(et_devices) - 1);
188 DRIVER_MODULE(miibus, et, miibus_driver, 0, 0);
189
190 static int et_rx_intr_npkts = 32;
191 static int et_rx_intr_delay = 20; /* x10 usec */
192 static int et_tx_intr_nsegs = 126;
193 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */
194
195 TUNABLE_INT("hw.et.timer", &et_timer);
196 TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts);
197 TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay);
198 TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs);
199
200 static int
et_probe(device_t dev)201 et_probe(device_t dev)
202 {
203 const struct et_dev *d;
204 uint16_t did, vid;
205
206 vid = pci_get_vendor(dev);
207 did = pci_get_device(dev);
208
209 for (d = et_devices; d->desc != NULL; ++d) {
210 if (vid == d->vid && did == d->did) {
211 device_set_desc(dev, d->desc);
212 return (BUS_PROBE_DEFAULT);
213 }
214 }
215 return (ENXIO);
216 }
217
218 static int
et_attach(device_t dev)219 et_attach(device_t dev)
220 {
221 struct et_softc *sc;
222 if_t ifp;
223 uint8_t eaddr[ETHER_ADDR_LEN];
224 uint32_t pmcfg;
225 int cap, error, msic;
226
227 sc = device_get_softc(dev);
228 sc->dev = dev;
229 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
230 MTX_DEF);
231 callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0);
232
233 ifp = sc->ifp = if_alloc(IFT_ETHER);
234
235 /*
236 * Initialize tunables
237 */
238 sc->sc_rx_intr_npkts = et_rx_intr_npkts;
239 sc->sc_rx_intr_delay = et_rx_intr_delay;
240 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs;
241 sc->sc_timer = et_timer;
242
243 /* Enable bus mastering */
244 pci_enable_busmaster(dev);
245
246 /*
247 * Allocate IO memory
248 */
249 sc->sc_mem_rid = PCIR_BAR(0);
250 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
251 &sc->sc_mem_rid, RF_ACTIVE);
252 if (sc->sc_mem_res == NULL) {
253 device_printf(dev, "can't allocate IO memory\n");
254 return (ENXIO);
255 }
256
257 msic = 0;
258 if (pci_find_cap(dev, PCIY_EXPRESS, &cap) == 0) {
259 sc->sc_expcap = cap;
260 sc->sc_flags |= ET_FLAG_PCIE;
261 msic = pci_msi_count(dev);
262 if (bootverbose)
263 device_printf(dev, "MSI count: %d\n", msic);
264 }
265 if (msic > 0 && msi_disable == 0) {
266 msic = 1;
267 if (pci_alloc_msi(dev, &msic) == 0) {
268 if (msic == 1) {
269 device_printf(dev, "Using %d MSI message\n",
270 msic);
271 sc->sc_flags |= ET_FLAG_MSI;
272 } else
273 pci_release_msi(dev);
274 }
275 }
276
277 /*
278 * Allocate IRQ
279 */
280 if ((sc->sc_flags & ET_FLAG_MSI) == 0) {
281 sc->sc_irq_rid = 0;
282 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
283 &sc->sc_irq_rid, RF_SHAREABLE | RF_ACTIVE);
284 } else {
285 sc->sc_irq_rid = 1;
286 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
287 &sc->sc_irq_rid, RF_ACTIVE);
288 }
289 if (sc->sc_irq_res == NULL) {
290 device_printf(dev, "can't allocate irq\n");
291 error = ENXIO;
292 goto fail;
293 }
294
295 if (pci_get_device(dev) == PCI_PRODUCT_LUCENT_ET1310_FAST)
296 sc->sc_flags |= ET_FLAG_FASTETHER;
297
298 error = et_bus_config(sc);
299 if (error)
300 goto fail;
301
302 et_get_eaddr(dev, eaddr);
303
304 /* Take PHY out of COMA and enable clocks. */
305 pmcfg = ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE;
306 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
307 pmcfg |= EM_PM_GIGEPHY_ENB;
308 CSR_WRITE_4(sc, ET_PM, pmcfg);
309
310 et_reset(sc);
311
312 error = et_dma_alloc(sc);
313 if (error)
314 goto fail;
315
316 if_setsoftc(ifp, sc);
317 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
318 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
319 if_setinitfn(ifp, et_init);
320 if_setioctlfn(ifp, et_ioctl);
321 if_setstartfn(ifp, et_start);
322 if_setgetcounterfn(ifp, et_get_counter);
323 if_setcapabilities(ifp, IFCAP_TXCSUM | IFCAP_VLAN_MTU);
324 if_setcapenable(ifp, if_getcapabilities(ifp));
325 if_setsendqlen(ifp, ET_TX_NDESC - 1);
326 if_setsendqready(ifp);
327
328 et_chip_attach(sc);
329
330 error = mii_attach(dev, &sc->sc_miibus, ifp, et_ifmedia_upd,
331 et_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
332 MIIF_DOPAUSE);
333 if (error) {
334 device_printf(dev, "attaching PHYs failed\n");
335 goto fail;
336 }
337
338 ether_ifattach(ifp, eaddr);
339
340 /* Tell the upper layer(s) we support long frames. */
341 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
342
343 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_NET | INTR_MPSAFE,
344 NULL, et_intr, sc, &sc->sc_irq_handle);
345 if (error) {
346 ether_ifdetach(ifp);
347 device_printf(dev, "can't setup intr\n");
348 goto fail;
349 }
350
351 et_add_sysctls(sc);
352
353 return (0);
354 fail:
355 et_detach(dev);
356 return (error);
357 }
358
359 static int
et_detach(device_t dev)360 et_detach(device_t dev)
361 {
362 struct et_softc *sc;
363
364 sc = device_get_softc(dev);
365 if (device_is_attached(dev)) {
366 ether_ifdetach(sc->ifp);
367 ET_LOCK(sc);
368 et_stop(sc);
369 ET_UNLOCK(sc);
370 callout_drain(&sc->sc_tick);
371 }
372
373 bus_generic_detach(dev);
374
375 if (sc->sc_irq_handle != NULL)
376 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle);
377 if (sc->sc_irq_res != NULL)
378 bus_release_resource(dev, SYS_RES_IRQ,
379 rman_get_rid(sc->sc_irq_res), sc->sc_irq_res);
380 if ((sc->sc_flags & ET_FLAG_MSI) != 0)
381 pci_release_msi(dev);
382 if (sc->sc_mem_res != NULL)
383 bus_release_resource(dev, SYS_RES_MEMORY,
384 rman_get_rid(sc->sc_mem_res), sc->sc_mem_res);
385
386 if (sc->ifp != NULL)
387 if_free(sc->ifp);
388
389 et_dma_free(sc);
390
391 mtx_destroy(&sc->sc_mtx);
392
393 return (0);
394 }
395
396 static int
et_shutdown(device_t dev)397 et_shutdown(device_t dev)
398 {
399 struct et_softc *sc;
400
401 sc = device_get_softc(dev);
402 ET_LOCK(sc);
403 et_stop(sc);
404 ET_UNLOCK(sc);
405 return (0);
406 }
407
408 static int
et_miibus_readreg(device_t dev,int phy,int reg)409 et_miibus_readreg(device_t dev, int phy, int reg)
410 {
411 struct et_softc *sc;
412 uint32_t val;
413 int i, ret;
414
415 sc = device_get_softc(dev);
416 /* Stop any pending operations */
417 CSR_WRITE_4(sc, ET_MII_CMD, 0);
418
419 val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK;
420 val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK;
421 CSR_WRITE_4(sc, ET_MII_ADDR, val);
422
423 /* Start reading */
424 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ);
425
426 #define NRETRY 50
427
428 for (i = 0; i < NRETRY; ++i) {
429 val = CSR_READ_4(sc, ET_MII_IND);
430 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0)
431 break;
432 DELAY(50);
433 }
434 if (i == NRETRY) {
435 if_printf(sc->ifp,
436 "read phy %d, reg %d timed out\n", phy, reg);
437 ret = 0;
438 goto back;
439 }
440
441 #undef NRETRY
442
443 val = CSR_READ_4(sc, ET_MII_STAT);
444 ret = val & ET_MII_STAT_VALUE_MASK;
445
446 back:
447 /* Make sure that the current operation is stopped */
448 CSR_WRITE_4(sc, ET_MII_CMD, 0);
449 return (ret);
450 }
451
452 static int
et_miibus_writereg(device_t dev,int phy,int reg,int val0)453 et_miibus_writereg(device_t dev, int phy, int reg, int val0)
454 {
455 struct et_softc *sc;
456 uint32_t val;
457 int i;
458
459 sc = device_get_softc(dev);
460 /* Stop any pending operations */
461 CSR_WRITE_4(sc, ET_MII_CMD, 0);
462
463 val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK;
464 val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK;
465 CSR_WRITE_4(sc, ET_MII_ADDR, val);
466
467 /* Start writing */
468 CSR_WRITE_4(sc, ET_MII_CTRL,
469 (val0 << ET_MII_CTRL_VALUE_SHIFT) & ET_MII_CTRL_VALUE_MASK);
470
471 #define NRETRY 100
472
473 for (i = 0; i < NRETRY; ++i) {
474 val = CSR_READ_4(sc, ET_MII_IND);
475 if ((val & ET_MII_IND_BUSY) == 0)
476 break;
477 DELAY(50);
478 }
479 if (i == NRETRY) {
480 if_printf(sc->ifp,
481 "write phy %d, reg %d timed out\n", phy, reg);
482 et_miibus_readreg(dev, phy, reg);
483 }
484
485 #undef NRETRY
486
487 /* Make sure that the current operation is stopped */
488 CSR_WRITE_4(sc, ET_MII_CMD, 0);
489 return (0);
490 }
491
492 static void
et_miibus_statchg(device_t dev)493 et_miibus_statchg(device_t dev)
494 {
495 struct et_softc *sc;
496 struct mii_data *mii;
497 if_t ifp;
498 uint32_t cfg1, cfg2, ctrl;
499 int i;
500
501 sc = device_get_softc(dev);
502
503 mii = device_get_softc(sc->sc_miibus);
504 ifp = sc->ifp;
505 if (mii == NULL || ifp == NULL ||
506 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
507 return;
508
509 sc->sc_flags &= ~ET_FLAG_LINK;
510 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
511 (IFM_ACTIVE | IFM_AVALID)) {
512 switch (IFM_SUBTYPE(mii->mii_media_active)) {
513 case IFM_10_T:
514 case IFM_100_TX:
515 sc->sc_flags |= ET_FLAG_LINK;
516 break;
517 case IFM_1000_T:
518 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
519 sc->sc_flags |= ET_FLAG_LINK;
520 break;
521 }
522 }
523
524 /* XXX Stop TX/RX MAC? */
525 if ((sc->sc_flags & ET_FLAG_LINK) == 0)
526 return;
527
528 /* Program MACs with resolved speed/duplex/flow-control. */
529 ctrl = CSR_READ_4(sc, ET_MAC_CTRL);
530 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII);
531 cfg1 = CSR_READ_4(sc, ET_MAC_CFG1);
532 cfg1 &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW |
533 ET_MAC_CFG1_LOOPBACK);
534 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2);
535 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII |
536 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM);
537 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC |
538 ((7 << ET_MAC_CFG2_PREAMBLE_LEN_SHIFT) &
539 ET_MAC_CFG2_PREAMBLE_LEN_MASK);
540
541 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
542 cfg2 |= ET_MAC_CFG2_MODE_GMII;
543 else {
544 cfg2 |= ET_MAC_CFG2_MODE_MII;
545 ctrl |= ET_MAC_CTRL_MODE_MII;
546 }
547
548 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
549 cfg2 |= ET_MAC_CFG2_FDX;
550 /*
551 * Controller lacks automatic TX pause frame
552 * generation so it should be handled by driver.
553 * Even though driver can send pause frame with
554 * arbitrary pause time, controller does not
555 * provide a way that tells how many free RX
556 * buffers are available in controller. This
557 * limitation makes it hard to generate XON frame
558 * in time on driver side so don't enable TX flow
559 * control.
560 */
561 #ifdef notyet
562 if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE)
563 cfg1 |= ET_MAC_CFG1_TXFLOW;
564 #endif
565 if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE)
566 cfg1 |= ET_MAC_CFG1_RXFLOW;
567 } else
568 ctrl |= ET_MAC_CTRL_GHDX;
569
570 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl);
571 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2);
572 cfg1 |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN;
573 CSR_WRITE_4(sc, ET_MAC_CFG1, cfg1);
574
575 #define NRETRY 50
576
577 for (i = 0; i < NRETRY; ++i) {
578 cfg1 = CSR_READ_4(sc, ET_MAC_CFG1);
579 if ((cfg1 & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) ==
580 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN))
581 break;
582 DELAY(100);
583 }
584 if (i == NRETRY)
585 if_printf(ifp, "can't enable RX/TX\n");
586 sc->sc_flags |= ET_FLAG_TXRX_ENABLED;
587
588 #undef NRETRY
589 }
590
591 static int
et_ifmedia_upd_locked(if_t ifp)592 et_ifmedia_upd_locked(if_t ifp)
593 {
594 struct et_softc *sc;
595 struct mii_data *mii;
596 struct mii_softc *miisc;
597
598 sc = if_getsoftc(ifp);
599 mii = device_get_softc(sc->sc_miibus);
600 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
601 PHY_RESET(miisc);
602 return (mii_mediachg(mii));
603 }
604
605 static int
et_ifmedia_upd(if_t ifp)606 et_ifmedia_upd(if_t ifp)
607 {
608 struct et_softc *sc;
609 int res;
610
611 sc = if_getsoftc(ifp);
612 ET_LOCK(sc);
613 res = et_ifmedia_upd_locked(ifp);
614 ET_UNLOCK(sc);
615
616 return (res);
617 }
618
619 static void
et_ifmedia_sts(if_t ifp,struct ifmediareq * ifmr)620 et_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
621 {
622 struct et_softc *sc;
623 struct mii_data *mii;
624
625 sc = if_getsoftc(ifp);
626 ET_LOCK(sc);
627 if ((if_getflags(ifp) & IFF_UP) == 0) {
628 ET_UNLOCK(sc);
629 return;
630 }
631
632 mii = device_get_softc(sc->sc_miibus);
633 mii_pollstat(mii);
634 ifmr->ifm_active = mii->mii_media_active;
635 ifmr->ifm_status = mii->mii_media_status;
636 ET_UNLOCK(sc);
637 }
638
639 static void
et_stop(struct et_softc * sc)640 et_stop(struct et_softc *sc)
641 {
642 if_t ifp;
643
644 ET_LOCK_ASSERT(sc);
645
646 ifp = sc->ifp;
647 callout_stop(&sc->sc_tick);
648 /* Disable interrupts. */
649 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
650
651 CSR_WRITE_4(sc, ET_MAC_CFG1, CSR_READ_4(sc, ET_MAC_CFG1) & ~(
652 ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN));
653 DELAY(100);
654
655 et_stop_rxdma(sc);
656 et_stop_txdma(sc);
657 et_stats_update(sc);
658
659 et_free_tx_ring(sc);
660 et_free_rx_ring(sc);
661
662 sc->sc_tx = 0;
663 sc->sc_tx_intr = 0;
664 sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED;
665
666 sc->watchdog_timer = 0;
667 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
668 }
669
670 static int
et_bus_config(struct et_softc * sc)671 et_bus_config(struct et_softc *sc)
672 {
673 uint32_t val, max_plsz;
674 uint16_t ack_latency, replay_timer;
675
676 /*
677 * Test whether EEPROM is valid
678 * NOTE: Read twice to get the correct value
679 */
680 pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1);
681 val = pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1);
682 if (val & ET_PCIM_EEPROM_STATUS_ERROR) {
683 device_printf(sc->dev, "EEPROM status error 0x%02x\n", val);
684 return (ENXIO);
685 }
686
687 /* TODO: LED */
688
689 if ((sc->sc_flags & ET_FLAG_PCIE) == 0)
690 return (0);
691
692 /*
693 * Configure ACK latency and replay timer according to
694 * max playload size
695 */
696 val = pci_read_config(sc->dev,
697 sc->sc_expcap + PCIER_DEVICE_CAP, 4);
698 max_plsz = val & PCIEM_CAP_MAX_PAYLOAD;
699
700 switch (max_plsz) {
701 case ET_PCIV_DEVICE_CAPS_PLSZ_128:
702 ack_latency = ET_PCIV_ACK_LATENCY_128;
703 replay_timer = ET_PCIV_REPLAY_TIMER_128;
704 break;
705
706 case ET_PCIV_DEVICE_CAPS_PLSZ_256:
707 ack_latency = ET_PCIV_ACK_LATENCY_256;
708 replay_timer = ET_PCIV_REPLAY_TIMER_256;
709 break;
710
711 default:
712 ack_latency = pci_read_config(sc->dev, ET_PCIR_ACK_LATENCY, 2);
713 replay_timer = pci_read_config(sc->dev,
714 ET_PCIR_REPLAY_TIMER, 2);
715 device_printf(sc->dev, "ack latency %u, replay timer %u\n",
716 ack_latency, replay_timer);
717 break;
718 }
719 if (ack_latency != 0) {
720 pci_write_config(sc->dev, ET_PCIR_ACK_LATENCY, ack_latency, 2);
721 pci_write_config(sc->dev, ET_PCIR_REPLAY_TIMER, replay_timer,
722 2);
723 }
724
725 /*
726 * Set L0s and L1 latency timer to 2us
727 */
728 val = pci_read_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, 4);
729 val &= ~(PCIEM_LINK_CAP_L0S_EXIT | PCIEM_LINK_CAP_L1_EXIT);
730 /* L0s exit latency : 2us */
731 val |= 0x00005000;
732 /* L1 exit latency : 2us */
733 val |= 0x00028000;
734 pci_write_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, val, 4);
735
736 /*
737 * Set max read request size to 2048 bytes
738 */
739 pci_set_max_read_req(sc->dev, 2048);
740
741 return (0);
742 }
743
744 static void
et_get_eaddr(device_t dev,uint8_t eaddr[])745 et_get_eaddr(device_t dev, uint8_t eaddr[])
746 {
747 uint32_t val;
748 int i;
749
750 val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4);
751 for (i = 0; i < 4; ++i)
752 eaddr[i] = (val >> (8 * i)) & 0xff;
753
754 val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2);
755 for (; i < ETHER_ADDR_LEN; ++i)
756 eaddr[i] = (val >> (8 * (i - 4))) & 0xff;
757 }
758
759 static void
et_reset(struct et_softc * sc)760 et_reset(struct et_softc *sc)
761 {
762
763 CSR_WRITE_4(sc, ET_MAC_CFG1,
764 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
765 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
766 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
767
768 CSR_WRITE_4(sc, ET_SWRST,
769 ET_SWRST_TXDMA | ET_SWRST_RXDMA |
770 ET_SWRST_TXMAC | ET_SWRST_RXMAC |
771 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC);
772
773 CSR_WRITE_4(sc, ET_MAC_CFG1,
774 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
775 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC);
776 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
777 /* Disable interrupts. */
778 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
779 }
780
781 struct et_dmamap_arg {
782 bus_addr_t et_busaddr;
783 };
784
785 static void
et_dma_map_addr(void * arg,bus_dma_segment_t * segs,int nseg,int error)786 et_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
787 {
788 struct et_dmamap_arg *ctx;
789
790 if (error)
791 return;
792
793 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
794
795 ctx = arg;
796 ctx->et_busaddr = segs->ds_addr;
797 }
798
799 static int
et_dma_ring_alloc(struct et_softc * sc,bus_size_t alignment,bus_size_t maxsize,bus_dma_tag_t * tag,uint8_t ** ring,bus_dmamap_t * map,bus_addr_t * paddr,const char * msg)800 et_dma_ring_alloc(struct et_softc *sc, bus_size_t alignment, bus_size_t maxsize,
801 bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map, bus_addr_t *paddr,
802 const char *msg)
803 {
804 struct et_dmamap_arg ctx;
805 int error;
806
807 error = bus_dma_tag_create(sc->sc_dtag, alignment, 0, BUS_SPACE_MAXADDR,
808 BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1, maxsize, 0, NULL, NULL,
809 tag);
810 if (error != 0) {
811 device_printf(sc->dev, "could not create %s dma tag\n", msg);
812 return (error);
813 }
814 /* Allocate DMA'able memory for ring. */
815 error = bus_dmamem_alloc(*tag, (void **)ring,
816 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
817 if (error != 0) {
818 device_printf(sc->dev,
819 "could not allocate DMA'able memory for %s\n", msg);
820 return (error);
821 }
822 /* Load the address of the ring. */
823 ctx.et_busaddr = 0;
824 error = bus_dmamap_load(*tag, *map, *ring, maxsize, et_dma_map_addr,
825 &ctx, BUS_DMA_NOWAIT);
826 if (error != 0) {
827 device_printf(sc->dev,
828 "could not load DMA'able memory for %s\n", msg);
829 return (error);
830 }
831 *paddr = ctx.et_busaddr;
832 return (0);
833 }
834
835 static void
et_dma_ring_free(struct et_softc * sc,bus_dma_tag_t * tag,uint8_t ** ring,bus_dmamap_t map,bus_addr_t * paddr)836 et_dma_ring_free(struct et_softc *sc, bus_dma_tag_t *tag, uint8_t **ring,
837 bus_dmamap_t map, bus_addr_t *paddr)
838 {
839
840 if (*paddr != 0) {
841 bus_dmamap_unload(*tag, map);
842 *paddr = 0;
843 }
844 if (*ring != NULL) {
845 bus_dmamem_free(*tag, *ring, map);
846 *ring = NULL;
847 }
848 if (*tag) {
849 bus_dma_tag_destroy(*tag);
850 *tag = NULL;
851 }
852 }
853
854 static int
et_dma_alloc(struct et_softc * sc)855 et_dma_alloc(struct et_softc *sc)
856 {
857 struct et_txdesc_ring *tx_ring;
858 struct et_rxdesc_ring *rx_ring;
859 struct et_rxstat_ring *rxst_ring;
860 struct et_rxstatus_data *rxsd;
861 struct et_rxbuf_data *rbd;
862 struct et_txbuf_data *tbd;
863 struct et_txstatus_data *txsd;
864 int i, error;
865
866 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
867 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
868 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
869 &sc->sc_dtag);
870 if (error != 0) {
871 device_printf(sc->dev, "could not allocate parent dma tag\n");
872 return (error);
873 }
874
875 /* TX ring. */
876 tx_ring = &sc->sc_tx_ring;
877 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_TX_RING_SIZE,
878 &tx_ring->tr_dtag, (uint8_t **)&tx_ring->tr_desc, &tx_ring->tr_dmap,
879 &tx_ring->tr_paddr, "TX ring");
880 if (error)
881 return (error);
882
883 /* TX status block. */
884 txsd = &sc->sc_tx_status;
885 error = et_dma_ring_alloc(sc, ET_STATUS_ALIGN, sizeof(uint32_t),
886 &txsd->txsd_dtag, (uint8_t **)&txsd->txsd_status, &txsd->txsd_dmap,
887 &txsd->txsd_paddr, "TX status block");
888 if (error)
889 return (error);
890
891 /* RX ring 0, used as to recive small sized frames. */
892 rx_ring = &sc->sc_rx_ring[0];
893 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RX_RING_SIZE,
894 &rx_ring->rr_dtag, (uint8_t **)&rx_ring->rr_desc, &rx_ring->rr_dmap,
895 &rx_ring->rr_paddr, "RX ring 0");
896 rx_ring->rr_posreg = ET_RX_RING0_POS;
897 if (error)
898 return (error);
899
900 /* RX ring 1, used as to store normal sized frames. */
901 rx_ring = &sc->sc_rx_ring[1];
902 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RX_RING_SIZE,
903 &rx_ring->rr_dtag, (uint8_t **)&rx_ring->rr_desc, &rx_ring->rr_dmap,
904 &rx_ring->rr_paddr, "RX ring 1");
905 rx_ring->rr_posreg = ET_RX_RING1_POS;
906 if (error)
907 return (error);
908
909 /* RX stat ring. */
910 rxst_ring = &sc->sc_rxstat_ring;
911 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RXSTAT_RING_SIZE,
912 &rxst_ring->rsr_dtag, (uint8_t **)&rxst_ring->rsr_stat,
913 &rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr, "RX stat ring");
914 if (error)
915 return (error);
916
917 /* RX status block. */
918 rxsd = &sc->sc_rx_status;
919 error = et_dma_ring_alloc(sc, ET_STATUS_ALIGN,
920 sizeof(struct et_rxstatus), &rxsd->rxsd_dtag,
921 (uint8_t **)&rxsd->rxsd_status, &rxsd->rxsd_dmap,
922 &rxsd->rxsd_paddr, "RX status block");
923 if (error)
924 return (error);
925
926 /* Create parent DMA tag for mbufs. */
927 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
928 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
929 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
930 &sc->sc_mbuf_dtag);
931 if (error != 0) {
932 device_printf(sc->dev,
933 "could not allocate parent dma tag for mbuf\n");
934 return (error);
935 }
936
937 /* Create DMA tag for mini RX mbufs to use RX ring 0. */
938 error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
939 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MHLEN, 1,
940 MHLEN, 0, NULL, NULL, &sc->sc_rx_mini_tag);
941 if (error) {
942 device_printf(sc->dev, "could not create mini RX dma tag\n");
943 return (error);
944 }
945
946 /* Create DMA tag for standard RX mbufs to use RX ring 1. */
947 error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
948 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
949 MCLBYTES, 0, NULL, NULL, &sc->sc_rx_tag);
950 if (error) {
951 device_printf(sc->dev, "could not create RX dma tag\n");
952 return (error);
953 }
954
955 /* Create DMA tag for TX mbufs. */
956 error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0,
957 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
958 MCLBYTES * ET_NSEG_MAX, ET_NSEG_MAX, MCLBYTES, 0, NULL, NULL,
959 &sc->sc_tx_tag);
960 if (error) {
961 device_printf(sc->dev, "could not create TX dma tag\n");
962 return (error);
963 }
964
965 /* Initialize RX ring 0. */
966 rbd = &sc->sc_rx_data[0];
967 rbd->rbd_bufsize = ET_RXDMA_CTRL_RING0_128;
968 rbd->rbd_newbuf = et_newbuf_hdr;
969 rbd->rbd_discard = et_rxbuf_discard;
970 rbd->rbd_softc = sc;
971 rbd->rbd_ring = &sc->sc_rx_ring[0];
972 /* Create DMA maps for mini RX buffers, ring 0. */
973 for (i = 0; i < ET_RX_NDESC; i++) {
974 error = bus_dmamap_create(sc->sc_rx_mini_tag, 0,
975 &rbd->rbd_buf[i].rb_dmap);
976 if (error) {
977 device_printf(sc->dev,
978 "could not create DMA map for mini RX mbufs\n");
979 return (error);
980 }
981 }
982
983 /* Create a spare DMA map for mini RX buffers, ring 0. */
984 error = bus_dmamap_create(sc->sc_rx_mini_tag, 0,
985 &sc->sc_rx_mini_sparemap);
986 if (error) {
987 device_printf(sc->dev,
988 "could not create spare DMA map for mini RX mbuf\n");
989 return (error);
990 }
991
992 /* Initialize RX ring 1. */
993 rbd = &sc->sc_rx_data[1];
994 rbd->rbd_bufsize = ET_RXDMA_CTRL_RING1_2048;
995 rbd->rbd_newbuf = et_newbuf_cluster;
996 rbd->rbd_discard = et_rxbuf_discard;
997 rbd->rbd_softc = sc;
998 rbd->rbd_ring = &sc->sc_rx_ring[1];
999 /* Create DMA maps for standard RX buffers, ring 1. */
1000 for (i = 0; i < ET_RX_NDESC; i++) {
1001 error = bus_dmamap_create(sc->sc_rx_tag, 0,
1002 &rbd->rbd_buf[i].rb_dmap);
1003 if (error) {
1004 device_printf(sc->dev,
1005 "could not create DMA map for mini RX mbufs\n");
1006 return (error);
1007 }
1008 }
1009
1010 /* Create a spare DMA map for standard RX buffers, ring 1. */
1011 error = bus_dmamap_create(sc->sc_rx_tag, 0, &sc->sc_rx_sparemap);
1012 if (error) {
1013 device_printf(sc->dev,
1014 "could not create spare DMA map for RX mbuf\n");
1015 return (error);
1016 }
1017
1018 /* Create DMA maps for TX buffers. */
1019 tbd = &sc->sc_tx_data;
1020 for (i = 0; i < ET_TX_NDESC; i++) {
1021 error = bus_dmamap_create(sc->sc_tx_tag, 0,
1022 &tbd->tbd_buf[i].tb_dmap);
1023 if (error) {
1024 device_printf(sc->dev,
1025 "could not create DMA map for TX mbufs\n");
1026 return (error);
1027 }
1028 }
1029
1030 return (0);
1031 }
1032
1033 static void
et_dma_free(struct et_softc * sc)1034 et_dma_free(struct et_softc *sc)
1035 {
1036 struct et_txdesc_ring *tx_ring;
1037 struct et_rxdesc_ring *rx_ring;
1038 struct et_txstatus_data *txsd;
1039 struct et_rxstat_ring *rxst_ring;
1040 struct et_rxbuf_data *rbd;
1041 struct et_txbuf_data *tbd;
1042 int i;
1043
1044 /* Destroy DMA maps for mini RX buffers, ring 0. */
1045 rbd = &sc->sc_rx_data[0];
1046 for (i = 0; i < ET_RX_NDESC; i++) {
1047 if (rbd->rbd_buf[i].rb_dmap) {
1048 bus_dmamap_destroy(sc->sc_rx_mini_tag,
1049 rbd->rbd_buf[i].rb_dmap);
1050 rbd->rbd_buf[i].rb_dmap = NULL;
1051 }
1052 }
1053 if (sc->sc_rx_mini_sparemap) {
1054 bus_dmamap_destroy(sc->sc_rx_mini_tag, sc->sc_rx_mini_sparemap);
1055 sc->sc_rx_mini_sparemap = NULL;
1056 }
1057 if (sc->sc_rx_mini_tag) {
1058 bus_dma_tag_destroy(sc->sc_rx_mini_tag);
1059 sc->sc_rx_mini_tag = NULL;
1060 }
1061
1062 /* Destroy DMA maps for standard RX buffers, ring 1. */
1063 rbd = &sc->sc_rx_data[1];
1064 for (i = 0; i < ET_RX_NDESC; i++) {
1065 if (rbd->rbd_buf[i].rb_dmap) {
1066 bus_dmamap_destroy(sc->sc_rx_tag,
1067 rbd->rbd_buf[i].rb_dmap);
1068 rbd->rbd_buf[i].rb_dmap = NULL;
1069 }
1070 }
1071 if (sc->sc_rx_sparemap) {
1072 bus_dmamap_destroy(sc->sc_rx_tag, sc->sc_rx_sparemap);
1073 sc->sc_rx_sparemap = NULL;
1074 }
1075 if (sc->sc_rx_tag) {
1076 bus_dma_tag_destroy(sc->sc_rx_tag);
1077 sc->sc_rx_tag = NULL;
1078 }
1079
1080 /* Destroy DMA maps for TX buffers. */
1081 tbd = &sc->sc_tx_data;
1082 for (i = 0; i < ET_TX_NDESC; i++) {
1083 if (tbd->tbd_buf[i].tb_dmap) {
1084 bus_dmamap_destroy(sc->sc_tx_tag,
1085 tbd->tbd_buf[i].tb_dmap);
1086 tbd->tbd_buf[i].tb_dmap = NULL;
1087 }
1088 }
1089 if (sc->sc_tx_tag) {
1090 bus_dma_tag_destroy(sc->sc_tx_tag);
1091 sc->sc_tx_tag = NULL;
1092 }
1093
1094 /* Destroy mini RX ring, ring 0. */
1095 rx_ring = &sc->sc_rx_ring[0];
1096 et_dma_ring_free(sc, &rx_ring->rr_dtag, (void *)&rx_ring->rr_desc,
1097 rx_ring->rr_dmap, &rx_ring->rr_paddr);
1098 /* Destroy standard RX ring, ring 1. */
1099 rx_ring = &sc->sc_rx_ring[1];
1100 et_dma_ring_free(sc, &rx_ring->rr_dtag, (void *)&rx_ring->rr_desc,
1101 rx_ring->rr_dmap, &rx_ring->rr_paddr);
1102 /* Destroy RX stat ring. */
1103 rxst_ring = &sc->sc_rxstat_ring;
1104 et_dma_ring_free(sc, &rxst_ring->rsr_dtag, (void *)&rxst_ring->rsr_stat,
1105 rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr);
1106 /* Destroy RX status block. */
1107 et_dma_ring_free(sc, &rxst_ring->rsr_dtag, (void *)&rxst_ring->rsr_stat,
1108 rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr);
1109 /* Destroy TX ring. */
1110 tx_ring = &sc->sc_tx_ring;
1111 et_dma_ring_free(sc, &tx_ring->tr_dtag, (void *)&tx_ring->tr_desc,
1112 tx_ring->tr_dmap, &tx_ring->tr_paddr);
1113 /* Destroy TX status block. */
1114 txsd = &sc->sc_tx_status;
1115 et_dma_ring_free(sc, &txsd->txsd_dtag, (void *)&txsd->txsd_status,
1116 txsd->txsd_dmap, &txsd->txsd_paddr);
1117
1118 /* Destroy the parent tag. */
1119 if (sc->sc_dtag) {
1120 bus_dma_tag_destroy(sc->sc_dtag);
1121 sc->sc_dtag = NULL;
1122 }
1123 }
1124
1125 static void
et_chip_attach(struct et_softc * sc)1126 et_chip_attach(struct et_softc *sc)
1127 {
1128 uint32_t val;
1129
1130 /*
1131 * Perform minimal initialization
1132 */
1133
1134 /* Disable loopback */
1135 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1136
1137 /* Reset MAC */
1138 CSR_WRITE_4(sc, ET_MAC_CFG1,
1139 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1140 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1141 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1142
1143 /*
1144 * Setup half duplex mode
1145 */
1146 val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) |
1147 (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) |
1148 (55 << ET_MAC_HDX_COLLWIN_SHIFT) |
1149 ET_MAC_HDX_EXC_DEFER;
1150 CSR_WRITE_4(sc, ET_MAC_HDX, val);
1151
1152 /* Clear MAC control */
1153 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1154
1155 /* Reset MII */
1156 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1157
1158 /* Bring MAC out of reset state */
1159 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1160
1161 /* Enable memory controllers */
1162 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1163 }
1164
1165 static void
et_intr(void * xsc)1166 et_intr(void *xsc)
1167 {
1168 struct et_softc *sc;
1169 if_t ifp;
1170 uint32_t status;
1171
1172 sc = xsc;
1173 ET_LOCK(sc);
1174 ifp = sc->ifp;
1175 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
1176 goto done;
1177
1178 status = CSR_READ_4(sc, ET_INTR_STATUS);
1179 if ((status & ET_INTRS) == 0)
1180 goto done;
1181
1182 /* Disable further interrupts. */
1183 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
1184
1185 if (status & (ET_INTR_RXDMA_ERROR | ET_INTR_TXDMA_ERROR)) {
1186 device_printf(sc->dev, "DMA error(0x%08x) -- resetting\n",
1187 status);
1188 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1189 et_init_locked(sc);
1190 ET_UNLOCK(sc);
1191 return;
1192 }
1193 if (status & ET_INTR_RXDMA)
1194 et_rxeof(sc);
1195 if (status & (ET_INTR_TXDMA | ET_INTR_TIMER))
1196 et_txeof(sc);
1197 if (status & ET_INTR_TIMER)
1198 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1199 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1200 CSR_WRITE_4(sc, ET_INTR_MASK, ~ET_INTRS);
1201 if (!if_sendq_empty(ifp))
1202 et_start_locked(ifp);
1203 }
1204 done:
1205 ET_UNLOCK(sc);
1206 }
1207
1208 static void
et_init_locked(struct et_softc * sc)1209 et_init_locked(struct et_softc *sc)
1210 {
1211 if_t ifp;
1212 int error;
1213
1214 ET_LOCK_ASSERT(sc);
1215
1216 ifp = sc->ifp;
1217 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1218 return;
1219
1220 et_stop(sc);
1221 et_reset(sc);
1222
1223 et_init_tx_ring(sc);
1224 error = et_init_rx_ring(sc);
1225 if (error)
1226 return;
1227
1228 error = et_chip_init(sc);
1229 if (error)
1230 goto fail;
1231
1232 /*
1233 * Start TX/RX DMA engine
1234 */
1235 error = et_start_rxdma(sc);
1236 if (error)
1237 return;
1238
1239 error = et_start_txdma(sc);
1240 if (error)
1241 return;
1242
1243 /* Enable interrupts. */
1244 CSR_WRITE_4(sc, ET_INTR_MASK, ~ET_INTRS);
1245
1246 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1247
1248 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
1249 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1250
1251 sc->sc_flags &= ~ET_FLAG_LINK;
1252 et_ifmedia_upd_locked(ifp);
1253
1254 callout_reset(&sc->sc_tick, hz, et_tick, sc);
1255
1256 fail:
1257 if (error)
1258 et_stop(sc);
1259 }
1260
1261 static void
et_init(void * xsc)1262 et_init(void *xsc)
1263 {
1264 struct et_softc *sc = xsc;
1265
1266 ET_LOCK(sc);
1267 et_init_locked(sc);
1268 ET_UNLOCK(sc);
1269 }
1270
1271 static int
et_ioctl(if_t ifp,u_long cmd,caddr_t data)1272 et_ioctl(if_t ifp, u_long cmd, caddr_t data)
1273 {
1274 struct et_softc *sc;
1275 struct mii_data *mii;
1276 struct ifreq *ifr;
1277 int error, mask, max_framelen;
1278
1279 sc = if_getsoftc(ifp);
1280 ifr = (struct ifreq *)data;
1281 error = 0;
1282
1283 /* XXX LOCKSUSED */
1284 switch (cmd) {
1285 case SIOCSIFFLAGS:
1286 ET_LOCK(sc);
1287 if (if_getflags(ifp) & IFF_UP) {
1288 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1289 if ((if_getflags(ifp) ^ sc->sc_if_flags) &
1290 (IFF_ALLMULTI | IFF_PROMISC | IFF_BROADCAST))
1291 et_setmulti(sc);
1292 } else {
1293 et_init_locked(sc);
1294 }
1295 } else {
1296 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1297 et_stop(sc);
1298 }
1299 sc->sc_if_flags = if_getflags(ifp);
1300 ET_UNLOCK(sc);
1301 break;
1302
1303 case SIOCSIFMEDIA:
1304 case SIOCGIFMEDIA:
1305 mii = device_get_softc(sc->sc_miibus);
1306 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1307 break;
1308
1309 case SIOCADDMULTI:
1310 case SIOCDELMULTI:
1311 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1312 ET_LOCK(sc);
1313 et_setmulti(sc);
1314 ET_UNLOCK(sc);
1315 }
1316 break;
1317
1318 case SIOCSIFMTU:
1319 ET_LOCK(sc);
1320 #if 0
1321 if (sc->sc_flags & ET_FLAG_JUMBO)
1322 max_framelen = ET_JUMBO_FRAMELEN;
1323 else
1324 #endif
1325 max_framelen = MCLBYTES - 1;
1326
1327 if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) {
1328 error = EOPNOTSUPP;
1329 ET_UNLOCK(sc);
1330 break;
1331 }
1332
1333 if (if_getmtu(ifp) != ifr->ifr_mtu) {
1334 if_setmtu(ifp, ifr->ifr_mtu);
1335 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1336 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1337 et_init_locked(sc);
1338 }
1339 }
1340 ET_UNLOCK(sc);
1341 break;
1342
1343 case SIOCSIFCAP:
1344 ET_LOCK(sc);
1345 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
1346 if ((mask & IFCAP_TXCSUM) != 0 &&
1347 (IFCAP_TXCSUM & if_getcapabilities(ifp)) != 0) {
1348 if_togglecapenable(ifp, IFCAP_TXCSUM);
1349 if ((IFCAP_TXCSUM & if_getcapenable(ifp)) != 0)
1350 if_sethwassistbits(ifp, ET_CSUM_FEATURES, 0);
1351 else
1352 if_sethwassistbits(ifp, 0, ET_CSUM_FEATURES);
1353 }
1354 ET_UNLOCK(sc);
1355 break;
1356
1357 default:
1358 error = ether_ioctl(ifp, cmd, data);
1359 break;
1360 }
1361 return (error);
1362 }
1363
1364 static void
et_start_locked(if_t ifp)1365 et_start_locked(if_t ifp)
1366 {
1367 struct et_softc *sc;
1368 struct mbuf *m_head = NULL;
1369 struct et_txdesc_ring *tx_ring;
1370 struct et_txbuf_data *tbd;
1371 uint32_t tx_ready_pos;
1372 int enq;
1373
1374 sc = if_getsoftc(ifp);
1375 ET_LOCK_ASSERT(sc);
1376
1377 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1378 IFF_DRV_RUNNING ||
1379 (sc->sc_flags & (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED)) !=
1380 (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED))
1381 return;
1382
1383 /*
1384 * Driver does not request TX completion interrupt for every
1385 * queued frames to prevent generating excessive interrupts.
1386 * This means driver may wait for TX completion interrupt even
1387 * though some frames were successfully transmitted. Reclaiming
1388 * transmitted frames will ensure driver see all available
1389 * descriptors.
1390 */
1391 tbd = &sc->sc_tx_data;
1392 if (tbd->tbd_used > (ET_TX_NDESC * 2) / 3)
1393 et_txeof(sc);
1394
1395 for (enq = 0; !if_sendq_empty(ifp); ) {
1396 if (tbd->tbd_used + ET_NSEG_SPARE >= ET_TX_NDESC) {
1397 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1398 break;
1399 }
1400
1401 m_head = if_dequeue(ifp);
1402 if (m_head == NULL)
1403 break;
1404
1405 if (et_encap(sc, &m_head)) {
1406 if (m_head == NULL) {
1407 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1408 break;
1409 }
1410 if_sendq_prepend(ifp, m_head);
1411 if (tbd->tbd_used > 0)
1412 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1413 break;
1414 }
1415 enq++;
1416 ETHER_BPF_MTAP(ifp, m_head);
1417 }
1418
1419 if (enq > 0) {
1420 tx_ring = &sc->sc_tx_ring;
1421 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1422 BUS_DMASYNC_PREWRITE);
1423 tx_ready_pos = tx_ring->tr_ready_index &
1424 ET_TX_READY_POS_INDEX_MASK;
1425 if (tx_ring->tr_ready_wrap)
1426 tx_ready_pos |= ET_TX_READY_POS_WRAP;
1427 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos);
1428 sc->watchdog_timer = 5;
1429 }
1430 }
1431
1432 static void
et_start(if_t ifp)1433 et_start(if_t ifp)
1434 {
1435 struct et_softc *sc;
1436
1437 sc = if_getsoftc(ifp);
1438 ET_LOCK(sc);
1439 et_start_locked(ifp);
1440 ET_UNLOCK(sc);
1441 }
1442
1443 static int
et_watchdog(struct et_softc * sc)1444 et_watchdog(struct et_softc *sc)
1445 {
1446 uint32_t status;
1447
1448 ET_LOCK_ASSERT(sc);
1449
1450 if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
1451 return (0);
1452
1453 bus_dmamap_sync(sc->sc_tx_status.txsd_dtag, sc->sc_tx_status.txsd_dmap,
1454 BUS_DMASYNC_POSTREAD);
1455 status = le32toh(*(sc->sc_tx_status.txsd_status));
1456 if_printf(sc->ifp, "watchdog timed out (0x%08x) -- resetting\n",
1457 status);
1458
1459 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
1460 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
1461 et_init_locked(sc);
1462 return (EJUSTRETURN);
1463 }
1464
1465 static int
et_stop_rxdma(struct et_softc * sc)1466 et_stop_rxdma(struct et_softc *sc)
1467 {
1468
1469 CSR_WRITE_4(sc, ET_RXDMA_CTRL,
1470 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE);
1471
1472 DELAY(5);
1473 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) {
1474 if_printf(sc->ifp, "can't stop RX DMA engine\n");
1475 return (ETIMEDOUT);
1476 }
1477 return (0);
1478 }
1479
1480 static int
et_stop_txdma(struct et_softc * sc)1481 et_stop_txdma(struct et_softc *sc)
1482 {
1483
1484 CSR_WRITE_4(sc, ET_TXDMA_CTRL,
1485 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT);
1486 return (0);
1487 }
1488
1489 static void
et_free_tx_ring(struct et_softc * sc)1490 et_free_tx_ring(struct et_softc *sc)
1491 {
1492 struct et_txbuf_data *tbd;
1493 struct et_txbuf *tb;
1494 int i;
1495
1496 tbd = &sc->sc_tx_data;
1497 for (i = 0; i < ET_TX_NDESC; ++i) {
1498 tb = &tbd->tbd_buf[i];
1499 if (tb->tb_mbuf != NULL) {
1500 bus_dmamap_sync(sc->sc_tx_tag, tb->tb_dmap,
1501 BUS_DMASYNC_POSTWRITE);
1502 bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap);
1503 m_freem(tb->tb_mbuf);
1504 tb->tb_mbuf = NULL;
1505 }
1506 }
1507 }
1508
1509 static void
et_free_rx_ring(struct et_softc * sc)1510 et_free_rx_ring(struct et_softc *sc)
1511 {
1512 struct et_rxbuf_data *rbd;
1513 struct et_rxdesc_ring *rx_ring;
1514 struct et_rxbuf *rb;
1515 int i;
1516
1517 /* Ring 0 */
1518 rx_ring = &sc->sc_rx_ring[0];
1519 rbd = &sc->sc_rx_data[0];
1520 for (i = 0; i < ET_RX_NDESC; ++i) {
1521 rb = &rbd->rbd_buf[i];
1522 if (rb->rb_mbuf != NULL) {
1523 bus_dmamap_sync(sc->sc_rx_mini_tag, rx_ring->rr_dmap,
1524 BUS_DMASYNC_POSTREAD);
1525 bus_dmamap_unload(sc->sc_rx_mini_tag, rb->rb_dmap);
1526 m_freem(rb->rb_mbuf);
1527 rb->rb_mbuf = NULL;
1528 }
1529 }
1530
1531 /* Ring 1 */
1532 rx_ring = &sc->sc_rx_ring[1];
1533 rbd = &sc->sc_rx_data[1];
1534 for (i = 0; i < ET_RX_NDESC; ++i) {
1535 rb = &rbd->rbd_buf[i];
1536 if (rb->rb_mbuf != NULL) {
1537 bus_dmamap_sync(sc->sc_rx_tag, rx_ring->rr_dmap,
1538 BUS_DMASYNC_POSTREAD);
1539 bus_dmamap_unload(sc->sc_rx_tag, rb->rb_dmap);
1540 m_freem(rb->rb_mbuf);
1541 rb->rb_mbuf = NULL;
1542 }
1543 }
1544 }
1545
1546 static u_int
et_hash_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)1547 et_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
1548 {
1549 uint32_t h, *hp, *hash = arg;
1550
1551 h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
1552 h = (h & 0x3f800000) >> 23;
1553
1554 hp = &hash[0];
1555 if (h >= 32 && h < 64) {
1556 h -= 32;
1557 hp = &hash[1];
1558 } else if (h >= 64 && h < 96) {
1559 h -= 64;
1560 hp = &hash[2];
1561 } else if (h >= 96) {
1562 h -= 96;
1563 hp = &hash[3];
1564 }
1565 *hp |= (1 << h);
1566
1567 return (1);
1568 }
1569
1570 static void
et_setmulti(struct et_softc * sc)1571 et_setmulti(struct et_softc *sc)
1572 {
1573 if_t ifp;
1574 uint32_t hash[4] = { 0, 0, 0, 0 };
1575 uint32_t rxmac_ctrl, pktfilt;
1576 int i, count;
1577
1578 ET_LOCK_ASSERT(sc);
1579 ifp = sc->ifp;
1580
1581 pktfilt = CSR_READ_4(sc, ET_PKTFILT);
1582 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL);
1583
1584 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST);
1585 if (if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) {
1586 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT;
1587 goto back;
1588 }
1589
1590 count = if_foreach_llmaddr(ifp, et_hash_maddr, &hash);
1591
1592 for (i = 0; i < 4; ++i)
1593 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]);
1594
1595 if (count > 0)
1596 pktfilt |= ET_PKTFILT_MCAST;
1597 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT;
1598 back:
1599 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt);
1600 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl);
1601 }
1602
1603 static int
et_chip_init(struct et_softc * sc)1604 et_chip_init(struct et_softc *sc)
1605 {
1606 if_t ifp;
1607 uint32_t rxq_end;
1608 int error, frame_len, rxmem_size;
1609
1610 ifp = sc->ifp;
1611 /*
1612 * Split 16Kbytes internal memory between TX and RX
1613 * according to frame length.
1614 */
1615 frame_len = ET_FRAMELEN(if_getmtu(ifp));
1616 if (frame_len < 2048) {
1617 rxmem_size = ET_MEM_RXSIZE_DEFAULT;
1618 } else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) {
1619 rxmem_size = ET_MEM_SIZE / 2;
1620 } else {
1621 rxmem_size = ET_MEM_SIZE -
1622 roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT);
1623 }
1624 rxq_end = ET_QUEUE_ADDR(rxmem_size);
1625
1626 CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START);
1627 CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end);
1628 CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1);
1629 CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END);
1630
1631 /* No loopback */
1632 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1633
1634 /* Clear MSI configure */
1635 if ((sc->sc_flags & ET_FLAG_MSI) == 0)
1636 CSR_WRITE_4(sc, ET_MSI_CFG, 0);
1637
1638 /* Disable timer */
1639 CSR_WRITE_4(sc, ET_TIMER, 0);
1640
1641 /* Initialize MAC */
1642 et_init_mac(sc);
1643
1644 /* Enable memory controllers */
1645 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1646
1647 /* Initialize RX MAC */
1648 et_init_rxmac(sc);
1649
1650 /* Initialize TX MAC */
1651 et_init_txmac(sc);
1652
1653 /* Initialize RX DMA engine */
1654 error = et_init_rxdma(sc);
1655 if (error)
1656 return (error);
1657
1658 /* Initialize TX DMA engine */
1659 error = et_init_txdma(sc);
1660 if (error)
1661 return (error);
1662
1663 return (0);
1664 }
1665
1666 static void
et_init_tx_ring(struct et_softc * sc)1667 et_init_tx_ring(struct et_softc *sc)
1668 {
1669 struct et_txdesc_ring *tx_ring;
1670 struct et_txbuf_data *tbd;
1671 struct et_txstatus_data *txsd;
1672
1673 tx_ring = &sc->sc_tx_ring;
1674 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1675 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1676 BUS_DMASYNC_PREWRITE);
1677
1678 tbd = &sc->sc_tx_data;
1679 tbd->tbd_start_index = 0;
1680 tbd->tbd_start_wrap = 0;
1681 tbd->tbd_used = 0;
1682
1683 txsd = &sc->sc_tx_status;
1684 bzero(txsd->txsd_status, sizeof(uint32_t));
1685 bus_dmamap_sync(txsd->txsd_dtag, txsd->txsd_dmap,
1686 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1687 }
1688
1689 static int
et_init_rx_ring(struct et_softc * sc)1690 et_init_rx_ring(struct et_softc *sc)
1691 {
1692 struct et_rxstatus_data *rxsd;
1693 struct et_rxstat_ring *rxst_ring;
1694 struct et_rxbuf_data *rbd;
1695 int i, error, n;
1696
1697 for (n = 0; n < ET_RX_NRING; ++n) {
1698 rbd = &sc->sc_rx_data[n];
1699 for (i = 0; i < ET_RX_NDESC; ++i) {
1700 error = rbd->rbd_newbuf(rbd, i);
1701 if (error) {
1702 if_printf(sc->ifp, "%d ring %d buf, "
1703 "newbuf failed: %d\n", n, i, error);
1704 return (error);
1705 }
1706 }
1707 }
1708
1709 rxsd = &sc->sc_rx_status;
1710 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus));
1711 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
1712 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1713
1714 rxst_ring = &sc->sc_rxstat_ring;
1715 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE);
1716 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
1717 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1718
1719 return (0);
1720 }
1721
1722 static int
et_init_rxdma(struct et_softc * sc)1723 et_init_rxdma(struct et_softc *sc)
1724 {
1725 struct et_rxstatus_data *rxsd;
1726 struct et_rxstat_ring *rxst_ring;
1727 struct et_rxdesc_ring *rx_ring;
1728 int error;
1729
1730 error = et_stop_rxdma(sc);
1731 if (error) {
1732 if_printf(sc->ifp, "can't init RX DMA engine\n");
1733 return (error);
1734 }
1735
1736 /*
1737 * Install RX status
1738 */
1739 rxsd = &sc->sc_rx_status;
1740 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr));
1741 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr));
1742
1743 /*
1744 * Install RX stat ring
1745 */
1746 rxst_ring = &sc->sc_rxstat_ring;
1747 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr));
1748 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr));
1749 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1);
1750 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0);
1751 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1);
1752
1753 /* Match ET_RXSTAT_POS */
1754 rxst_ring->rsr_index = 0;
1755 rxst_ring->rsr_wrap = 0;
1756
1757 /*
1758 * Install the 2nd RX descriptor ring
1759 */
1760 rx_ring = &sc->sc_rx_ring[1];
1761 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1762 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1763 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1);
1764 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP);
1765 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1766
1767 /* Match ET_RX_RING1_POS */
1768 rx_ring->rr_index = 0;
1769 rx_ring->rr_wrap = 1;
1770
1771 /*
1772 * Install the 1st RX descriptor ring
1773 */
1774 rx_ring = &sc->sc_rx_ring[0];
1775 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1776 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1777 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1);
1778 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP);
1779 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1780
1781 /* Match ET_RX_RING0_POS */
1782 rx_ring->rr_index = 0;
1783 rx_ring->rr_wrap = 1;
1784
1785 /*
1786 * RX intr moderation
1787 */
1788 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts);
1789 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay);
1790
1791 return (0);
1792 }
1793
1794 static int
et_init_txdma(struct et_softc * sc)1795 et_init_txdma(struct et_softc *sc)
1796 {
1797 struct et_txdesc_ring *tx_ring;
1798 struct et_txstatus_data *txsd;
1799 int error;
1800
1801 error = et_stop_txdma(sc);
1802 if (error) {
1803 if_printf(sc->ifp, "can't init TX DMA engine\n");
1804 return (error);
1805 }
1806
1807 /*
1808 * Install TX descriptor ring
1809 */
1810 tx_ring = &sc->sc_tx_ring;
1811 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
1812 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
1813 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1);
1814
1815 /*
1816 * Install TX status
1817 */
1818 txsd = &sc->sc_tx_status;
1819 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr));
1820 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr));
1821
1822 CSR_WRITE_4(sc, ET_TX_READY_POS, 0);
1823
1824 /* Match ET_TX_READY_POS */
1825 tx_ring->tr_ready_index = 0;
1826 tx_ring->tr_ready_wrap = 0;
1827
1828 return (0);
1829 }
1830
1831 static void
et_init_mac(struct et_softc * sc)1832 et_init_mac(struct et_softc *sc)
1833 {
1834 if_t ifp;
1835 const uint8_t *eaddr;
1836 uint32_t val;
1837
1838 /* Reset MAC */
1839 CSR_WRITE_4(sc, ET_MAC_CFG1,
1840 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1841 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1842 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1843
1844 /*
1845 * Setup inter packet gap
1846 */
1847 val = (56 << ET_IPG_NONB2B_1_SHIFT) |
1848 (88 << ET_IPG_NONB2B_2_SHIFT) |
1849 (80 << ET_IPG_MINIFG_SHIFT) |
1850 (96 << ET_IPG_B2B_SHIFT);
1851 CSR_WRITE_4(sc, ET_IPG, val);
1852
1853 /*
1854 * Setup half duplex mode
1855 */
1856 val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) |
1857 (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) |
1858 (55 << ET_MAC_HDX_COLLWIN_SHIFT) |
1859 ET_MAC_HDX_EXC_DEFER;
1860 CSR_WRITE_4(sc, ET_MAC_HDX, val);
1861
1862 /* Clear MAC control */
1863 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1864
1865 /* Reset MII */
1866 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1867
1868 /*
1869 * Set MAC address
1870 */
1871 ifp = sc->ifp;
1872 eaddr = if_getlladdr(ifp);
1873 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24);
1874 CSR_WRITE_4(sc, ET_MAC_ADDR1, val);
1875 val = (eaddr[0] << 16) | (eaddr[1] << 24);
1876 CSR_WRITE_4(sc, ET_MAC_ADDR2, val);
1877
1878 /* Set max frame length */
1879 CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(if_getmtu(ifp)));
1880
1881 /* Bring MAC out of reset state */
1882 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1883 }
1884
1885 static void
et_init_rxmac(struct et_softc * sc)1886 et_init_rxmac(struct et_softc *sc)
1887 {
1888 if_t ifp;
1889 const uint8_t *eaddr;
1890 uint32_t val;
1891 int i;
1892
1893 /* Disable RX MAC and WOL */
1894 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE);
1895
1896 /*
1897 * Clear all WOL related registers
1898 */
1899 for (i = 0; i < 3; ++i)
1900 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0);
1901 for (i = 0; i < 20; ++i)
1902 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0);
1903
1904 /*
1905 * Set WOL source address. XXX is this necessary?
1906 */
1907 ifp = sc->ifp;
1908 eaddr = if_getlladdr(ifp);
1909 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5];
1910 CSR_WRITE_4(sc, ET_WOL_SA_LO, val);
1911 val = (eaddr[0] << 8) | eaddr[1];
1912 CSR_WRITE_4(sc, ET_WOL_SA_HI, val);
1913
1914 /* Clear packet filters */
1915 CSR_WRITE_4(sc, ET_PKTFILT, 0);
1916
1917 /* No ucast filtering */
1918 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0);
1919 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0);
1920 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0);
1921
1922 if (ET_FRAMELEN(if_getmtu(ifp)) > ET_RXMAC_CUT_THRU_FRMLEN) {
1923 /*
1924 * In order to transmit jumbo packets greater than
1925 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between
1926 * RX MAC and RX DMA needs to be reduced in size to
1927 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen). In
1928 * order to implement this, we must use "cut through"
1929 * mode in the RX MAC, which chops packets down into
1930 * segments. In this case we selected 256 bytes,
1931 * since this is the size of the PCI-Express TLP's
1932 * that the ET1310 uses.
1933 */
1934 val = (ET_RXMAC_SEGSZ(256) & ET_RXMAC_MC_SEGSZ_MAX_MASK) |
1935 ET_RXMAC_MC_SEGSZ_ENABLE;
1936 } else {
1937 val = 0;
1938 }
1939 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val);
1940
1941 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0);
1942
1943 /* Initialize RX MAC management register */
1944 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0);
1945
1946 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0);
1947
1948 CSR_WRITE_4(sc, ET_RXMAC_MGT,
1949 ET_RXMAC_MGT_PASS_ECRC |
1950 ET_RXMAC_MGT_PASS_ELEN |
1951 ET_RXMAC_MGT_PASS_ETRUNC |
1952 ET_RXMAC_MGT_CHECK_PKT);
1953
1954 /*
1955 * Configure runt filtering (may not work on certain chip generation)
1956 */
1957 val = (ETHER_MIN_LEN << ET_PKTFILT_MINLEN_SHIFT) &
1958 ET_PKTFILT_MINLEN_MASK;
1959 val |= ET_PKTFILT_FRAG;
1960 CSR_WRITE_4(sc, ET_PKTFILT, val);
1961
1962 /* Enable RX MAC but leave WOL disabled */
1963 CSR_WRITE_4(sc, ET_RXMAC_CTRL,
1964 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE);
1965
1966 /*
1967 * Setup multicast hash and allmulti/promisc mode
1968 */
1969 et_setmulti(sc);
1970 }
1971
1972 static void
et_init_txmac(struct et_softc * sc)1973 et_init_txmac(struct et_softc *sc)
1974 {
1975
1976 /* Disable TX MAC and FC(?) */
1977 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE);
1978
1979 /*
1980 * Initialize pause time.
1981 * This register should be set before XON/XOFF frame is
1982 * sent by driver.
1983 */
1984 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0 << ET_TXMAC_FLOWCTRL_CFPT_SHIFT);
1985
1986 /* Enable TX MAC but leave FC(?) disabled */
1987 CSR_WRITE_4(sc, ET_TXMAC_CTRL,
1988 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE);
1989 }
1990
1991 static int
et_start_rxdma(struct et_softc * sc)1992 et_start_rxdma(struct et_softc *sc)
1993 {
1994 uint32_t val;
1995
1996 val = (sc->sc_rx_data[0].rbd_bufsize & ET_RXDMA_CTRL_RING0_SIZE_MASK) |
1997 ET_RXDMA_CTRL_RING0_ENABLE;
1998 val |= (sc->sc_rx_data[1].rbd_bufsize & ET_RXDMA_CTRL_RING1_SIZE_MASK) |
1999 ET_RXDMA_CTRL_RING1_ENABLE;
2000
2001 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val);
2002
2003 DELAY(5);
2004
2005 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) {
2006 if_printf(sc->ifp, "can't start RX DMA engine\n");
2007 return (ETIMEDOUT);
2008 }
2009 return (0);
2010 }
2011
2012 static int
et_start_txdma(struct et_softc * sc)2013 et_start_txdma(struct et_softc *sc)
2014 {
2015
2016 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT);
2017 return (0);
2018 }
2019
2020 static void
et_rxeof(struct et_softc * sc)2021 et_rxeof(struct et_softc *sc)
2022 {
2023 struct et_rxstatus_data *rxsd;
2024 struct et_rxstat_ring *rxst_ring;
2025 struct et_rxbuf_data *rbd;
2026 struct et_rxdesc_ring *rx_ring;
2027 struct et_rxstat *st;
2028 if_t ifp;
2029 struct mbuf *m;
2030 uint32_t rxstat_pos, rxring_pos;
2031 uint32_t rxst_info1, rxst_info2, rxs_stat_ring;
2032 int buflen, buf_idx, npost[2], ring_idx;
2033 int rxst_index, rxst_wrap;
2034
2035 ET_LOCK_ASSERT(sc);
2036
2037 ifp = sc->ifp;
2038 rxsd = &sc->sc_rx_status;
2039 rxst_ring = &sc->sc_rxstat_ring;
2040
2041 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
2042 return;
2043
2044 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
2045 BUS_DMASYNC_POSTREAD);
2046 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
2047 BUS_DMASYNC_POSTREAD);
2048
2049 npost[0] = npost[1] = 0;
2050 rxs_stat_ring = le32toh(rxsd->rxsd_status->rxs_stat_ring);
2051 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0;
2052 rxst_index = (rxs_stat_ring & ET_RXS_STATRING_INDEX_MASK) >>
2053 ET_RXS_STATRING_INDEX_SHIFT;
2054
2055 while (rxst_index != rxst_ring->rsr_index ||
2056 rxst_wrap != rxst_ring->rsr_wrap) {
2057 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
2058 break;
2059
2060 MPASS(rxst_ring->rsr_index < ET_RX_NSTAT);
2061 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index];
2062 rxst_info1 = le32toh(st->rxst_info1);
2063 rxst_info2 = le32toh(st->rxst_info2);
2064 buflen = (rxst_info2 & ET_RXST_INFO2_LEN_MASK) >>
2065 ET_RXST_INFO2_LEN_SHIFT;
2066 buf_idx = (rxst_info2 & ET_RXST_INFO2_BUFIDX_MASK) >>
2067 ET_RXST_INFO2_BUFIDX_SHIFT;
2068 ring_idx = (rxst_info2 & ET_RXST_INFO2_RINGIDX_MASK) >>
2069 ET_RXST_INFO2_RINGIDX_SHIFT;
2070
2071 if (++rxst_ring->rsr_index == ET_RX_NSTAT) {
2072 rxst_ring->rsr_index = 0;
2073 rxst_ring->rsr_wrap ^= 1;
2074 }
2075 rxstat_pos = rxst_ring->rsr_index & ET_RXSTAT_POS_INDEX_MASK;
2076 if (rxst_ring->rsr_wrap)
2077 rxstat_pos |= ET_RXSTAT_POS_WRAP;
2078 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos);
2079
2080 if (ring_idx >= ET_RX_NRING) {
2081 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2082 if_printf(ifp, "invalid ring index %d\n", ring_idx);
2083 continue;
2084 }
2085 if (buf_idx >= ET_RX_NDESC) {
2086 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2087 if_printf(ifp, "invalid buf index %d\n", buf_idx);
2088 continue;
2089 }
2090
2091 rbd = &sc->sc_rx_data[ring_idx];
2092 m = rbd->rbd_buf[buf_idx].rb_mbuf;
2093 if ((rxst_info1 & ET_RXST_INFO1_OK) == 0){
2094 /* Discard errored frame. */
2095 rbd->rbd_discard(rbd, buf_idx);
2096 } else if (rbd->rbd_newbuf(rbd, buf_idx) != 0) {
2097 /* No available mbufs, discard it. */
2098 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2099 rbd->rbd_discard(rbd, buf_idx);
2100 } else {
2101 buflen -= ETHER_CRC_LEN;
2102 if (buflen < ETHER_HDR_LEN) {
2103 m_freem(m);
2104 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2105 } else {
2106 m->m_pkthdr.len = m->m_len = buflen;
2107 m->m_pkthdr.rcvif = ifp;
2108 ET_UNLOCK(sc);
2109 if_input(ifp, m);
2110 ET_LOCK(sc);
2111 }
2112 }
2113
2114 rx_ring = &sc->sc_rx_ring[ring_idx];
2115 if (buf_idx != rx_ring->rr_index) {
2116 if_printf(ifp,
2117 "WARNING!! ring %d, buf_idx %d, rr_idx %d\n",
2118 ring_idx, buf_idx, rx_ring->rr_index);
2119 }
2120
2121 MPASS(rx_ring->rr_index < ET_RX_NDESC);
2122 if (++rx_ring->rr_index == ET_RX_NDESC) {
2123 rx_ring->rr_index = 0;
2124 rx_ring->rr_wrap ^= 1;
2125 }
2126 rxring_pos = rx_ring->rr_index & ET_RX_RING_POS_INDEX_MASK;
2127 if (rx_ring->rr_wrap)
2128 rxring_pos |= ET_RX_RING_POS_WRAP;
2129 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos);
2130 }
2131
2132 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
2133 BUS_DMASYNC_PREREAD);
2134 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
2135 BUS_DMASYNC_PREREAD);
2136 }
2137
2138 static int
et_encap(struct et_softc * sc,struct mbuf ** m0)2139 et_encap(struct et_softc *sc, struct mbuf **m0)
2140 {
2141 struct et_txdesc_ring *tx_ring;
2142 struct et_txbuf_data *tbd;
2143 struct et_txdesc *td;
2144 struct mbuf *m;
2145 bus_dma_segment_t segs[ET_NSEG_MAX];
2146 bus_dmamap_t map;
2147 uint32_t csum_flags, last_td_ctrl2;
2148 int error, i, idx, first_idx, last_idx, nsegs;
2149
2150 tx_ring = &sc->sc_tx_ring;
2151 MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
2152 tbd = &sc->sc_tx_data;
2153 first_idx = tx_ring->tr_ready_index;
2154 map = tbd->tbd_buf[first_idx].tb_dmap;
2155
2156 error = bus_dmamap_load_mbuf_sg(sc->sc_tx_tag, map, *m0, segs, &nsegs,
2157 0);
2158 if (error == EFBIG) {
2159 m = m_collapse(*m0, M_NOWAIT, ET_NSEG_MAX);
2160 if (m == NULL) {
2161 m_freem(*m0);
2162 *m0 = NULL;
2163 return (ENOMEM);
2164 }
2165 *m0 = m;
2166 error = bus_dmamap_load_mbuf_sg(sc->sc_tx_tag, map, *m0, segs,
2167 &nsegs, 0);
2168 if (error != 0) {
2169 m_freem(*m0);
2170 *m0 = NULL;
2171 return (error);
2172 }
2173 } else if (error != 0)
2174 return (error);
2175
2176 /* Check for descriptor overruns. */
2177 if (tbd->tbd_used + nsegs > ET_TX_NDESC - 1) {
2178 bus_dmamap_unload(sc->sc_tx_tag, map);
2179 return (ENOBUFS);
2180 }
2181 bus_dmamap_sync(sc->sc_tx_tag, map, BUS_DMASYNC_PREWRITE);
2182
2183 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG;
2184 sc->sc_tx += nsegs;
2185 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) {
2186 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs;
2187 last_td_ctrl2 |= ET_TDCTRL2_INTR;
2188 }
2189
2190 m = *m0;
2191 csum_flags = 0;
2192 if ((m->m_pkthdr.csum_flags & ET_CSUM_FEATURES) != 0) {
2193 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2194 csum_flags |= ET_TDCTRL2_CSUM_IP;
2195 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2196 csum_flags |= ET_TDCTRL2_CSUM_UDP;
2197 else if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2198 csum_flags |= ET_TDCTRL2_CSUM_TCP;
2199 }
2200 last_idx = -1;
2201 for (i = 0; i < nsegs; ++i) {
2202 idx = (first_idx + i) % ET_TX_NDESC;
2203 td = &tx_ring->tr_desc[idx];
2204 td->td_addr_hi = htole32(ET_ADDR_HI(segs[i].ds_addr));
2205 td->td_addr_lo = htole32(ET_ADDR_LO(segs[i].ds_addr));
2206 td->td_ctrl1 = htole32(segs[i].ds_len & ET_TDCTRL1_LEN_MASK);
2207 if (i == nsegs - 1) {
2208 /* Last frag */
2209 td->td_ctrl2 = htole32(last_td_ctrl2 | csum_flags);
2210 last_idx = idx;
2211 } else
2212 td->td_ctrl2 = htole32(csum_flags);
2213
2214 MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
2215 if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
2216 tx_ring->tr_ready_index = 0;
2217 tx_ring->tr_ready_wrap ^= 1;
2218 }
2219 }
2220 td = &tx_ring->tr_desc[first_idx];
2221 /* First frag */
2222 td->td_ctrl2 |= htole32(ET_TDCTRL2_FIRST_FRAG);
2223
2224 MPASS(last_idx >= 0);
2225 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap;
2226 tbd->tbd_buf[last_idx].tb_dmap = map;
2227 tbd->tbd_buf[last_idx].tb_mbuf = m;
2228
2229 tbd->tbd_used += nsegs;
2230 MPASS(tbd->tbd_used <= ET_TX_NDESC);
2231
2232 return (0);
2233 }
2234
2235 static void
et_txeof(struct et_softc * sc)2236 et_txeof(struct et_softc *sc)
2237 {
2238 struct et_txdesc_ring *tx_ring;
2239 struct et_txbuf_data *tbd;
2240 struct et_txbuf *tb;
2241 if_t ifp;
2242 uint32_t tx_done;
2243 int end, wrap;
2244
2245 ET_LOCK_ASSERT(sc);
2246
2247 ifp = sc->ifp;
2248 tx_ring = &sc->sc_tx_ring;
2249 tbd = &sc->sc_tx_data;
2250
2251 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
2252 return;
2253
2254 if (tbd->tbd_used == 0)
2255 return;
2256
2257 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
2258 BUS_DMASYNC_POSTWRITE);
2259
2260 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS);
2261 end = tx_done & ET_TX_DONE_POS_INDEX_MASK;
2262 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0;
2263
2264 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) {
2265 MPASS(tbd->tbd_start_index < ET_TX_NDESC);
2266 tb = &tbd->tbd_buf[tbd->tbd_start_index];
2267 if (tb->tb_mbuf != NULL) {
2268 bus_dmamap_sync(sc->sc_tx_tag, tb->tb_dmap,
2269 BUS_DMASYNC_POSTWRITE);
2270 bus_dmamap_unload(sc->sc_tx_tag, tb->tb_dmap);
2271 m_freem(tb->tb_mbuf);
2272 tb->tb_mbuf = NULL;
2273 }
2274
2275 if (++tbd->tbd_start_index == ET_TX_NDESC) {
2276 tbd->tbd_start_index = 0;
2277 tbd->tbd_start_wrap ^= 1;
2278 }
2279
2280 MPASS(tbd->tbd_used > 0);
2281 tbd->tbd_used--;
2282 }
2283
2284 if (tbd->tbd_used == 0)
2285 sc->watchdog_timer = 0;
2286 if (tbd->tbd_used + ET_NSEG_SPARE < ET_TX_NDESC)
2287 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2288 }
2289
2290 static void
et_tick(void * xsc)2291 et_tick(void *xsc)
2292 {
2293 struct et_softc *sc;
2294 struct mii_data *mii;
2295
2296 sc = xsc;
2297 ET_LOCK_ASSERT(sc);
2298 mii = device_get_softc(sc->sc_miibus);
2299
2300 mii_tick(mii);
2301 et_stats_update(sc);
2302 if (et_watchdog(sc) == EJUSTRETURN)
2303 return;
2304 callout_reset(&sc->sc_tick, hz, et_tick, sc);
2305 }
2306
2307 static int
et_newbuf_cluster(struct et_rxbuf_data * rbd,int buf_idx)2308 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx)
2309 {
2310 struct et_softc *sc;
2311 struct et_rxdesc *desc;
2312 struct et_rxbuf *rb;
2313 struct mbuf *m;
2314 bus_dma_segment_t segs[1];
2315 bus_dmamap_t dmap;
2316 int nsegs;
2317
2318 MPASS(buf_idx < ET_RX_NDESC);
2319 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2320 if (m == NULL)
2321 return (ENOBUFS);
2322 m->m_len = m->m_pkthdr.len = MCLBYTES;
2323 m_adj(m, ETHER_ALIGN);
2324
2325 sc = rbd->rbd_softc;
2326 rb = &rbd->rbd_buf[buf_idx];
2327
2328 if (bus_dmamap_load_mbuf_sg(sc->sc_rx_tag, sc->sc_rx_sparemap, m,
2329 segs, &nsegs, 0) != 0) {
2330 m_freem(m);
2331 return (ENOBUFS);
2332 }
2333 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2334
2335 if (rb->rb_mbuf != NULL) {
2336 bus_dmamap_sync(sc->sc_rx_tag, rb->rb_dmap,
2337 BUS_DMASYNC_POSTREAD);
2338 bus_dmamap_unload(sc->sc_rx_tag, rb->rb_dmap);
2339 }
2340 dmap = rb->rb_dmap;
2341 rb->rb_dmap = sc->sc_rx_sparemap;
2342 sc->sc_rx_sparemap = dmap;
2343 bus_dmamap_sync(sc->sc_rx_tag, rb->rb_dmap, BUS_DMASYNC_PREREAD);
2344
2345 rb->rb_mbuf = m;
2346 desc = &rbd->rbd_ring->rr_desc[buf_idx];
2347 desc->rd_addr_hi = htole32(ET_ADDR_HI(segs[0].ds_addr));
2348 desc->rd_addr_lo = htole32(ET_ADDR_LO(segs[0].ds_addr));
2349 desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2350 bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
2351 BUS_DMASYNC_PREWRITE);
2352 return (0);
2353 }
2354
2355 static void
et_rxbuf_discard(struct et_rxbuf_data * rbd,int buf_idx)2356 et_rxbuf_discard(struct et_rxbuf_data *rbd, int buf_idx)
2357 {
2358 struct et_rxdesc *desc;
2359
2360 desc = &rbd->rbd_ring->rr_desc[buf_idx];
2361 desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2362 bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
2363 BUS_DMASYNC_PREWRITE);
2364 }
2365
2366 static int
et_newbuf_hdr(struct et_rxbuf_data * rbd,int buf_idx)2367 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx)
2368 {
2369 struct et_softc *sc;
2370 struct et_rxdesc *desc;
2371 struct et_rxbuf *rb;
2372 struct mbuf *m;
2373 bus_dma_segment_t segs[1];
2374 bus_dmamap_t dmap;
2375 int nsegs;
2376
2377 MPASS(buf_idx < ET_RX_NDESC);
2378 MGETHDR(m, M_NOWAIT, MT_DATA);
2379 if (m == NULL)
2380 return (ENOBUFS);
2381 m->m_len = m->m_pkthdr.len = MHLEN;
2382 m_adj(m, ETHER_ALIGN);
2383
2384 sc = rbd->rbd_softc;
2385 rb = &rbd->rbd_buf[buf_idx];
2386
2387 if (bus_dmamap_load_mbuf_sg(sc->sc_rx_mini_tag, sc->sc_rx_mini_sparemap,
2388 m, segs, &nsegs, 0) != 0) {
2389 m_freem(m);
2390 return (ENOBUFS);
2391 }
2392 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2393
2394 if (rb->rb_mbuf != NULL) {
2395 bus_dmamap_sync(sc->sc_rx_mini_tag, rb->rb_dmap,
2396 BUS_DMASYNC_POSTREAD);
2397 bus_dmamap_unload(sc->sc_rx_mini_tag, rb->rb_dmap);
2398 }
2399 dmap = rb->rb_dmap;
2400 rb->rb_dmap = sc->sc_rx_mini_sparemap;
2401 sc->sc_rx_mini_sparemap = dmap;
2402 bus_dmamap_sync(sc->sc_rx_mini_tag, rb->rb_dmap, BUS_DMASYNC_PREREAD);
2403
2404 rb->rb_mbuf = m;
2405 desc = &rbd->rbd_ring->rr_desc[buf_idx];
2406 desc->rd_addr_hi = htole32(ET_ADDR_HI(segs[0].ds_addr));
2407 desc->rd_addr_lo = htole32(ET_ADDR_LO(segs[0].ds_addr));
2408 desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
2409 bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap,
2410 BUS_DMASYNC_PREWRITE);
2411 return (0);
2412 }
2413
2414 #define ET_SYSCTL_STAT_ADD32(c, h, n, p, d) \
2415 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
2416 #define ET_SYSCTL_STAT_ADD64(c, h, n, p, d) \
2417 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
2418
2419 /*
2420 * Create sysctl tree
2421 */
2422 static void
et_add_sysctls(struct et_softc * sc)2423 et_add_sysctls(struct et_softc * sc)
2424 {
2425 struct sysctl_ctx_list *ctx;
2426 struct sysctl_oid_list *children, *parent;
2427 struct sysctl_oid *tree;
2428 struct et_hw_stats *stats;
2429
2430 ctx = device_get_sysctl_ctx(sc->dev);
2431 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2432
2433 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_npkts",
2434 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
2435 et_sysctl_rx_intr_npkts, "I", "RX IM, # packets per RX interrupt");
2436 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_delay",
2437 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
2438 et_sysctl_rx_intr_delay, "I",
2439 "RX IM, RX interrupt delay (x10 usec)");
2440 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_intr_nsegs",
2441 CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0,
2442 "TX IM, # segments per TX interrupt");
2443 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "timer",
2444 CTLFLAG_RW, &sc->sc_timer, 0, "TX timer");
2445
2446 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats",
2447 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "ET statistics");
2448 parent = SYSCTL_CHILDREN(tree);
2449
2450 /* TX/RX statistics. */
2451 stats = &sc->sc_stats;
2452 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_64", &stats->pkts_64,
2453 "0 to 64 bytes frames");
2454 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_65_127", &stats->pkts_65,
2455 "65 to 127 bytes frames");
2456 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_128_255", &stats->pkts_128,
2457 "128 to 255 bytes frames");
2458 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_256_511", &stats->pkts_256,
2459 "256 to 511 bytes frames");
2460 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_512_1023", &stats->pkts_512,
2461 "512 to 1023 bytes frames");
2462 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_1024_1518", &stats->pkts_1024,
2463 "1024 to 1518 bytes frames");
2464 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_1519_1522", &stats->pkts_1519,
2465 "1519 to 1522 bytes frames");
2466
2467 /* RX statistics. */
2468 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx",
2469 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX MAC statistics");
2470 children = SYSCTL_CHILDREN(tree);
2471 ET_SYSCTL_STAT_ADD64(ctx, children, "bytes",
2472 &stats->rx_bytes, "Good bytes");
2473 ET_SYSCTL_STAT_ADD64(ctx, children, "frames",
2474 &stats->rx_frames, "Good frames");
2475 ET_SYSCTL_STAT_ADD32(ctx, children, "crc_errs",
2476 &stats->rx_crcerrs, "CRC errors");
2477 ET_SYSCTL_STAT_ADD64(ctx, children, "mcast_frames",
2478 &stats->rx_mcast, "Multicast frames");
2479 ET_SYSCTL_STAT_ADD64(ctx, children, "bcast_frames",
2480 &stats->rx_bcast, "Broadcast frames");
2481 ET_SYSCTL_STAT_ADD32(ctx, children, "control",
2482 &stats->rx_control, "Control frames");
2483 ET_SYSCTL_STAT_ADD32(ctx, children, "pause",
2484 &stats->rx_pause, "Pause frames");
2485 ET_SYSCTL_STAT_ADD32(ctx, children, "unknown_control",
2486 &stats->rx_unknown_control, "Unknown control frames");
2487 ET_SYSCTL_STAT_ADD32(ctx, children, "align_errs",
2488 &stats->rx_alignerrs, "Alignment errors");
2489 ET_SYSCTL_STAT_ADD32(ctx, children, "len_errs",
2490 &stats->rx_lenerrs, "Frames with length mismatched");
2491 ET_SYSCTL_STAT_ADD32(ctx, children, "code_errs",
2492 &stats->rx_codeerrs, "Frames with code error");
2493 ET_SYSCTL_STAT_ADD32(ctx, children, "cs_errs",
2494 &stats->rx_cserrs, "Frames with carrier sense error");
2495 ET_SYSCTL_STAT_ADD32(ctx, children, "runts",
2496 &stats->rx_runts, "Too short frames");
2497 ET_SYSCTL_STAT_ADD64(ctx, children, "oversize",
2498 &stats->rx_oversize, "Oversized frames");
2499 ET_SYSCTL_STAT_ADD32(ctx, children, "fragments",
2500 &stats->rx_fragments, "Fragmented frames");
2501 ET_SYSCTL_STAT_ADD32(ctx, children, "jabbers",
2502 &stats->rx_jabbers, "Frames with jabber error");
2503 ET_SYSCTL_STAT_ADD32(ctx, children, "drop",
2504 &stats->rx_drop, "Dropped frames");
2505
2506 /* TX statistics. */
2507 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx",
2508 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX MAC statistics");
2509 children = SYSCTL_CHILDREN(tree);
2510 ET_SYSCTL_STAT_ADD64(ctx, children, "bytes",
2511 &stats->tx_bytes, "Good bytes");
2512 ET_SYSCTL_STAT_ADD64(ctx, children, "frames",
2513 &stats->tx_frames, "Good frames");
2514 ET_SYSCTL_STAT_ADD64(ctx, children, "mcast_frames",
2515 &stats->tx_mcast, "Multicast frames");
2516 ET_SYSCTL_STAT_ADD64(ctx, children, "bcast_frames",
2517 &stats->tx_bcast, "Broadcast frames");
2518 ET_SYSCTL_STAT_ADD32(ctx, children, "pause",
2519 &stats->tx_pause, "Pause frames");
2520 ET_SYSCTL_STAT_ADD32(ctx, children, "deferred",
2521 &stats->tx_deferred, "Deferred frames");
2522 ET_SYSCTL_STAT_ADD32(ctx, children, "excess_deferred",
2523 &stats->tx_excess_deferred, "Excessively deferred frames");
2524 ET_SYSCTL_STAT_ADD32(ctx, children, "single_colls",
2525 &stats->tx_single_colls, "Single collisions");
2526 ET_SYSCTL_STAT_ADD32(ctx, children, "multi_colls",
2527 &stats->tx_multi_colls, "Multiple collisions");
2528 ET_SYSCTL_STAT_ADD32(ctx, children, "late_colls",
2529 &stats->tx_late_colls, "Late collisions");
2530 ET_SYSCTL_STAT_ADD32(ctx, children, "excess_colls",
2531 &stats->tx_excess_colls, "Excess collisions");
2532 ET_SYSCTL_STAT_ADD32(ctx, children, "total_colls",
2533 &stats->tx_total_colls, "Total collisions");
2534 ET_SYSCTL_STAT_ADD32(ctx, children, "pause_honored",
2535 &stats->tx_pause_honored, "Honored pause frames");
2536 ET_SYSCTL_STAT_ADD32(ctx, children, "drop",
2537 &stats->tx_drop, "Dropped frames");
2538 ET_SYSCTL_STAT_ADD32(ctx, children, "jabbers",
2539 &stats->tx_jabbers, "Frames with jabber errors");
2540 ET_SYSCTL_STAT_ADD32(ctx, children, "crc_errs",
2541 &stats->tx_crcerrs, "Frames with CRC errors");
2542 ET_SYSCTL_STAT_ADD32(ctx, children, "control",
2543 &stats->tx_control, "Control frames");
2544 ET_SYSCTL_STAT_ADD64(ctx, children, "oversize",
2545 &stats->tx_oversize, "Oversized frames");
2546 ET_SYSCTL_STAT_ADD32(ctx, children, "undersize",
2547 &stats->tx_undersize, "Undersized frames");
2548 ET_SYSCTL_STAT_ADD32(ctx, children, "fragments",
2549 &stats->tx_fragments, "Fragmented frames");
2550 }
2551
2552 #undef ET_SYSCTL_STAT_ADD32
2553 #undef ET_SYSCTL_STAT_ADD64
2554
2555 static int
et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS)2556 et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS)
2557 {
2558 struct et_softc *sc;
2559 if_t ifp;
2560 int error, v;
2561
2562 sc = arg1;
2563 ifp = sc->ifp;
2564 v = sc->sc_rx_intr_npkts;
2565 error = sysctl_handle_int(oidp, &v, 0, req);
2566 if (error || req->newptr == NULL)
2567 goto back;
2568 if (v <= 0) {
2569 error = EINVAL;
2570 goto back;
2571 }
2572
2573 if (sc->sc_rx_intr_npkts != v) {
2574 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2575 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v);
2576 sc->sc_rx_intr_npkts = v;
2577 }
2578 back:
2579 return (error);
2580 }
2581
2582 static int
et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS)2583 et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS)
2584 {
2585 struct et_softc *sc;
2586 if_t ifp;
2587 int error, v;
2588
2589 sc = arg1;
2590 ifp = sc->ifp;
2591 v = sc->sc_rx_intr_delay;
2592 error = sysctl_handle_int(oidp, &v, 0, req);
2593 if (error || req->newptr == NULL)
2594 goto back;
2595 if (v <= 0) {
2596 error = EINVAL;
2597 goto back;
2598 }
2599
2600 if (sc->sc_rx_intr_delay != v) {
2601 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2602 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v);
2603 sc->sc_rx_intr_delay = v;
2604 }
2605 back:
2606 return (error);
2607 }
2608
2609 static void
et_stats_update(struct et_softc * sc)2610 et_stats_update(struct et_softc *sc)
2611 {
2612 struct et_hw_stats *stats;
2613
2614 stats = &sc->sc_stats;
2615 stats->pkts_64 += CSR_READ_4(sc, ET_STAT_PKTS_64);
2616 stats->pkts_65 += CSR_READ_4(sc, ET_STAT_PKTS_65_127);
2617 stats->pkts_128 += CSR_READ_4(sc, ET_STAT_PKTS_128_255);
2618 stats->pkts_256 += CSR_READ_4(sc, ET_STAT_PKTS_256_511);
2619 stats->pkts_512 += CSR_READ_4(sc, ET_STAT_PKTS_512_1023);
2620 stats->pkts_1024 += CSR_READ_4(sc, ET_STAT_PKTS_1024_1518);
2621 stats->pkts_1519 += CSR_READ_4(sc, ET_STAT_PKTS_1519_1522);
2622
2623 stats->rx_bytes += CSR_READ_4(sc, ET_STAT_RX_BYTES);
2624 stats->rx_frames += CSR_READ_4(sc, ET_STAT_RX_FRAMES);
2625 stats->rx_crcerrs += CSR_READ_4(sc, ET_STAT_RX_CRC_ERR);
2626 stats->rx_mcast += CSR_READ_4(sc, ET_STAT_RX_MCAST);
2627 stats->rx_bcast += CSR_READ_4(sc, ET_STAT_RX_BCAST);
2628 stats->rx_control += CSR_READ_4(sc, ET_STAT_RX_CTL);
2629 stats->rx_pause += CSR_READ_4(sc, ET_STAT_RX_PAUSE);
2630 stats->rx_unknown_control += CSR_READ_4(sc, ET_STAT_RX_UNKNOWN_CTL);
2631 stats->rx_alignerrs += CSR_READ_4(sc, ET_STAT_RX_ALIGN_ERR);
2632 stats->rx_lenerrs += CSR_READ_4(sc, ET_STAT_RX_LEN_ERR);
2633 stats->rx_codeerrs += CSR_READ_4(sc, ET_STAT_RX_CODE_ERR);
2634 stats->rx_cserrs += CSR_READ_4(sc, ET_STAT_RX_CS_ERR);
2635 stats->rx_runts += CSR_READ_4(sc, ET_STAT_RX_RUNT);
2636 stats->rx_oversize += CSR_READ_4(sc, ET_STAT_RX_OVERSIZE);
2637 stats->rx_fragments += CSR_READ_4(sc, ET_STAT_RX_FRAG);
2638 stats->rx_jabbers += CSR_READ_4(sc, ET_STAT_RX_JABBER);
2639 stats->rx_drop += CSR_READ_4(sc, ET_STAT_RX_DROP);
2640
2641 stats->tx_bytes += CSR_READ_4(sc, ET_STAT_TX_BYTES);
2642 stats->tx_frames += CSR_READ_4(sc, ET_STAT_TX_FRAMES);
2643 stats->tx_mcast += CSR_READ_4(sc, ET_STAT_TX_MCAST);
2644 stats->tx_bcast += CSR_READ_4(sc, ET_STAT_TX_BCAST);
2645 stats->tx_pause += CSR_READ_4(sc, ET_STAT_TX_PAUSE);
2646 stats->tx_deferred += CSR_READ_4(sc, ET_STAT_TX_DEFER);
2647 stats->tx_excess_deferred += CSR_READ_4(sc, ET_STAT_TX_EXCESS_DEFER);
2648 stats->tx_single_colls += CSR_READ_4(sc, ET_STAT_TX_SINGLE_COL);
2649 stats->tx_multi_colls += CSR_READ_4(sc, ET_STAT_TX_MULTI_COL);
2650 stats->tx_late_colls += CSR_READ_4(sc, ET_STAT_TX_LATE_COL);
2651 stats->tx_excess_colls += CSR_READ_4(sc, ET_STAT_TX_EXCESS_COL);
2652 stats->tx_total_colls += CSR_READ_4(sc, ET_STAT_TX_TOTAL_COL);
2653 stats->tx_pause_honored += CSR_READ_4(sc, ET_STAT_TX_PAUSE_HONOR);
2654 stats->tx_drop += CSR_READ_4(sc, ET_STAT_TX_DROP);
2655 stats->tx_jabbers += CSR_READ_4(sc, ET_STAT_TX_JABBER);
2656 stats->tx_crcerrs += CSR_READ_4(sc, ET_STAT_TX_CRC_ERR);
2657 stats->tx_control += CSR_READ_4(sc, ET_STAT_TX_CTL);
2658 stats->tx_oversize += CSR_READ_4(sc, ET_STAT_TX_OVERSIZE);
2659 stats->tx_undersize += CSR_READ_4(sc, ET_STAT_TX_UNDERSIZE);
2660 stats->tx_fragments += CSR_READ_4(sc, ET_STAT_TX_FRAG);
2661 }
2662
2663 static uint64_t
et_get_counter(if_t ifp,ift_counter cnt)2664 et_get_counter(if_t ifp, ift_counter cnt)
2665 {
2666 struct et_softc *sc;
2667 struct et_hw_stats *stats;
2668
2669 sc = if_getsoftc(ifp);
2670 stats = &sc->sc_stats;
2671
2672 switch (cnt) {
2673 case IFCOUNTER_OPACKETS:
2674 return (stats->tx_frames);
2675 case IFCOUNTER_COLLISIONS:
2676 return (stats->tx_total_colls);
2677 case IFCOUNTER_OERRORS:
2678 return (stats->tx_drop + stats->tx_jabbers +
2679 stats->tx_crcerrs + stats->tx_excess_deferred +
2680 stats->tx_late_colls);
2681 case IFCOUNTER_IPACKETS:
2682 return (stats->rx_frames);
2683 case IFCOUNTER_IERRORS:
2684 return (stats->rx_crcerrs + stats->rx_alignerrs +
2685 stats->rx_lenerrs + stats->rx_codeerrs + stats->rx_cserrs +
2686 stats->rx_runts + stats->rx_jabbers + stats->rx_drop);
2687 default:
2688 return (if_get_counter_default(ifp, cnt));
2689 }
2690 }
2691
2692 static int
et_suspend(device_t dev)2693 et_suspend(device_t dev)
2694 {
2695 struct et_softc *sc;
2696 uint32_t pmcfg;
2697
2698 sc = device_get_softc(dev);
2699 ET_LOCK(sc);
2700 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != 0)
2701 et_stop(sc);
2702 /* Diable all clocks and put PHY into COMA. */
2703 pmcfg = CSR_READ_4(sc, ET_PM);
2704 pmcfg &= ~(EM_PM_GIGEPHY_ENB | ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE |
2705 ET_PM_RXCLK_GATE);
2706 pmcfg |= ET_PM_PHY_SW_COMA;
2707 CSR_WRITE_4(sc, ET_PM, pmcfg);
2708 ET_UNLOCK(sc);
2709 return (0);
2710 }
2711
2712 static int
et_resume(device_t dev)2713 et_resume(device_t dev)
2714 {
2715 struct et_softc *sc;
2716 uint32_t pmcfg;
2717
2718 sc = device_get_softc(dev);
2719 ET_LOCK(sc);
2720 /* Take PHY out of COMA and enable clocks. */
2721 pmcfg = ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE;
2722 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0)
2723 pmcfg |= EM_PM_GIGEPHY_ENB;
2724 CSR_WRITE_4(sc, ET_PM, pmcfg);
2725 if ((if_getflags(sc->ifp) & IFF_UP) != 0)
2726 et_init_locked(sc);
2727 ET_UNLOCK(sc);
2728 return (0);
2729 }
2730