xref: /freebsd/sys/dev/ae/if_ae.c (revision 357378bbdedf24ce2b90e9bd831af4a9db3ec70a)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2008 Stanislav Sedov <stas@FreeBSD.org>.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  *
27  * Driver for Attansic Technology Corp. L2 FastEthernet adapter.
28  *
29  * This driver is heavily based on age(4) Attansic L1 driver by Pyun YongHyeon.
30  */
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/bus.h>
35 #include <sys/endian.h>
36 #include <sys/kernel.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/mbuf.h>
40 #include <sys/mutex.h>
41 #include <sys/rman.h>
42 #include <sys/module.h>
43 #include <sys/queue.h>
44 #include <sys/socket.h>
45 #include <sys/sockio.h>
46 #include <sys/sysctl.h>
47 #include <sys/taskqueue.h>
48 
49 #include <net/bpf.h>
50 #include <net/if.h>
51 #include <net/if_var.h>
52 #include <net/if_arp.h>
53 #include <net/ethernet.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 #include <net/if_types.h>
57 #include <net/if_vlan_var.h>
58 
59 #include <netinet/in.h>
60 #include <netinet/in_systm.h>
61 #include <netinet/ip.h>
62 #include <netinet/tcp.h>
63 
64 #include <dev/mii/mii.h>
65 #include <dev/mii/miivar.h>
66 #include <dev/pci/pcireg.h>
67 #include <dev/pci/pcivar.h>
68 
69 #include <machine/bus.h>
70 
71 #include "miibus_if.h"
72 
73 #include "if_aereg.h"
74 #include "if_aevar.h"
75 
76 /*
77  * Devices supported by this driver.
78  */
79 static struct ae_dev {
80 	uint16_t	vendorid;
81 	uint16_t	deviceid;
82 	const char	*name;
83 } ae_devs[] = {
84 	{ VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L2,
85 		"Attansic Technology Corp, L2 FastEthernet" },
86 };
87 #define	AE_DEVS_COUNT nitems(ae_devs)
88 
89 static struct resource_spec ae_res_spec_mem[] = {
90 	{ SYS_RES_MEMORY,       PCIR_BAR(0),    RF_ACTIVE },
91 	{ -1,			0,		0 }
92 };
93 static struct resource_spec ae_res_spec_irq[] = {
94 	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
95 	{ -1,			0,		0 }
96 };
97 static struct resource_spec ae_res_spec_msi[] = {
98 	{ SYS_RES_IRQ,		1,		RF_ACTIVE },
99 	{ -1,			0,		0 }
100 };
101 
102 static int	ae_probe(device_t dev);
103 static int	ae_attach(device_t dev);
104 static void	ae_pcie_init(ae_softc_t *sc);
105 static void	ae_phy_reset(ae_softc_t *sc);
106 static void	ae_phy_init(ae_softc_t *sc);
107 static int	ae_reset(ae_softc_t *sc);
108 static void	ae_init(void *arg);
109 static int	ae_init_locked(ae_softc_t *sc);
110 static int	ae_detach(device_t dev);
111 static int	ae_miibus_readreg(device_t dev, int phy, int reg);
112 static int	ae_miibus_writereg(device_t dev, int phy, int reg, int val);
113 static void	ae_miibus_statchg(device_t dev);
114 static void	ae_mediastatus(if_t ifp, struct ifmediareq *ifmr);
115 static int	ae_mediachange(if_t ifp);
116 static void	ae_retrieve_address(ae_softc_t *sc);
117 static void	ae_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs,
118     int error);
119 static int	ae_alloc_rings(ae_softc_t *sc);
120 static void	ae_dma_free(ae_softc_t *sc);
121 static int	ae_shutdown(device_t dev);
122 static int	ae_suspend(device_t dev);
123 static void	ae_powersave_disable(ae_softc_t *sc);
124 static void	ae_powersave_enable(ae_softc_t *sc);
125 static int	ae_resume(device_t dev);
126 static unsigned int	ae_tx_avail_size(ae_softc_t *sc);
127 static int	ae_encap(ae_softc_t *sc, struct mbuf **m_head);
128 static void	ae_start(if_t ifp);
129 static void	ae_start_locked(if_t ifp);
130 static void	ae_link_task(void *arg, int pending);
131 static void	ae_stop_rxmac(ae_softc_t *sc);
132 static void	ae_stop_txmac(ae_softc_t *sc);
133 static void	ae_mac_config(ae_softc_t *sc);
134 static int	ae_intr(void *arg);
135 static void	ae_int_task(void *arg, int pending);
136 static void	ae_tx_intr(ae_softc_t *sc);
137 static void	ae_rxeof(ae_softc_t *sc, ae_rxd_t *rxd);
138 static void	ae_rx_intr(ae_softc_t *sc);
139 static void	ae_watchdog(ae_softc_t *sc);
140 static void	ae_tick(void *arg);
141 static void	ae_rxfilter(ae_softc_t *sc);
142 static void	ae_rxvlan(ae_softc_t *sc);
143 static int	ae_ioctl(if_t ifp, u_long cmd, caddr_t data);
144 static void	ae_stop(ae_softc_t *sc);
145 static int	ae_check_eeprom_present(ae_softc_t *sc, int *vpdc);
146 static int	ae_vpd_read_word(ae_softc_t *sc, int reg, uint32_t *word);
147 static int	ae_get_vpd_eaddr(ae_softc_t *sc, uint32_t *eaddr);
148 static int	ae_get_reg_eaddr(ae_softc_t *sc, uint32_t *eaddr);
149 static void	ae_update_stats_rx(uint16_t flags, ae_stats_t *stats);
150 static void	ae_update_stats_tx(uint16_t flags, ae_stats_t *stats);
151 static void	ae_init_tunables(ae_softc_t *sc);
152 
153 static device_method_t ae_methods[] = {
154 	/* Device interface. */
155 	DEVMETHOD(device_probe,		ae_probe),
156 	DEVMETHOD(device_attach,	ae_attach),
157 	DEVMETHOD(device_detach,	ae_detach),
158 	DEVMETHOD(device_shutdown,	ae_shutdown),
159 	DEVMETHOD(device_suspend,	ae_suspend),
160 	DEVMETHOD(device_resume,	ae_resume),
161 
162 	/* MII interface. */
163 	DEVMETHOD(miibus_readreg,	ae_miibus_readreg),
164 	DEVMETHOD(miibus_writereg,	ae_miibus_writereg),
165 	DEVMETHOD(miibus_statchg,	ae_miibus_statchg),
166 	{ NULL, NULL }
167 };
168 static driver_t ae_driver = {
169         "ae",
170         ae_methods,
171         sizeof(ae_softc_t)
172 };
173 
174 DRIVER_MODULE(ae, pci, ae_driver, 0, 0);
175 MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, ae, ae_devs,
176     nitems(ae_devs));
177 DRIVER_MODULE(miibus, ae, miibus_driver, 0, 0);
178 MODULE_DEPEND(ae, pci, 1, 1, 1);
179 MODULE_DEPEND(ae, ether, 1, 1, 1);
180 MODULE_DEPEND(ae, miibus, 1, 1, 1);
181 
182 /*
183  * Tunables.
184  */
185 static int msi_disable = 0;
186 TUNABLE_INT("hw.ae.msi_disable", &msi_disable);
187 
188 #define	AE_READ_4(sc, reg) \
189 	bus_read_4((sc)->mem[0], (reg))
190 #define	AE_READ_2(sc, reg) \
191 	bus_read_2((sc)->mem[0], (reg))
192 #define	AE_READ_1(sc, reg) \
193 	bus_read_1((sc)->mem[0], (reg))
194 #define	AE_WRITE_4(sc, reg, val) \
195 	bus_write_4((sc)->mem[0], (reg), (val))
196 #define	AE_WRITE_2(sc, reg, val) \
197 	bus_write_2((sc)->mem[0], (reg), (val))
198 #define	AE_WRITE_1(sc, reg, val) \
199 	bus_write_1((sc)->mem[0], (reg), (val))
200 #define	AE_PHY_READ(sc, reg) \
201 	ae_miibus_readreg(sc->dev, 0, reg)
202 #define	AE_PHY_WRITE(sc, reg, val) \
203 	ae_miibus_writereg(sc->dev, 0, reg, val)
204 #define	AE_CHECK_EADDR_VALID(eaddr) \
205 	((eaddr[0] == 0 && eaddr[1] == 0) || \
206 	(eaddr[0] == 0xffffffff && eaddr[1] == 0xffff))
207 #define	AE_RXD_VLAN(vtag) \
208 	(((vtag) >> 4) | (((vtag) & 0x07) << 13) | (((vtag) & 0x08) << 9))
209 #define	AE_TXD_VLAN(vtag) \
210 	(((vtag) << 4) | (((vtag) >> 13) & 0x07) | (((vtag) >> 9) & 0x08))
211 
212 static int
213 ae_probe(device_t dev)
214 {
215 	uint16_t deviceid, vendorid;
216 	int i;
217 
218 	vendorid = pci_get_vendor(dev);
219 	deviceid = pci_get_device(dev);
220 
221 	/*
222 	 * Search through the list of supported devs for matching one.
223 	 */
224 	for (i = 0; i < AE_DEVS_COUNT; i++) {
225 		if (vendorid == ae_devs[i].vendorid &&
226 		    deviceid == ae_devs[i].deviceid) {
227 			device_set_desc(dev, ae_devs[i].name);
228 			return (BUS_PROBE_DEFAULT);
229 		}
230 	}
231 	return (ENXIO);
232 }
233 
234 static int
235 ae_attach(device_t dev)
236 {
237 	ae_softc_t *sc;
238 	if_t ifp;
239 	uint8_t chiprev;
240 	uint32_t pcirev;
241 	int nmsi, pmc;
242 	int error;
243 
244 	sc = device_get_softc(dev); /* Automatically allocated and zeroed
245 				       on attach. */
246 	KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
247 	sc->dev = dev;
248 
249 	/*
250 	 * Initialize mutexes and tasks.
251 	 */
252 	mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
253 	callout_init_mtx(&sc->tick_ch, &sc->mtx, 0);
254 	TASK_INIT(&sc->int_task, 0, ae_int_task, sc);
255 	TASK_INIT(&sc->link_task, 0, ae_link_task, sc);
256 
257 	pci_enable_busmaster(dev);		/* Enable bus mastering. */
258 
259 	sc->spec_mem = ae_res_spec_mem;
260 
261 	/*
262 	 * Allocate memory-mapped registers.
263 	 */
264 	error = bus_alloc_resources(dev, sc->spec_mem, sc->mem);
265 	if (error != 0) {
266 		device_printf(dev, "could not allocate memory resources.\n");
267 		sc->spec_mem = NULL;
268 		goto fail;
269 	}
270 
271 	/*
272 	 * Retrieve PCI and chip revisions.
273 	 */
274 	pcirev = pci_get_revid(dev);
275 	chiprev = (AE_READ_4(sc, AE_MASTER_REG) >> AE_MASTER_REVNUM_SHIFT) &
276 	    AE_MASTER_REVNUM_MASK;
277 	if (bootverbose) {
278 		device_printf(dev, "pci device revision: %#04x\n", pcirev);
279 		device_printf(dev, "chip id: %#02x\n", chiprev);
280 	}
281 	nmsi = pci_msi_count(dev);
282 	if (bootverbose)
283 		device_printf(dev, "MSI count: %d.\n", nmsi);
284 
285 	/*
286 	 * Allocate interrupt resources.
287 	 */
288 	if (msi_disable == 0 && nmsi == 1) {
289 		error = pci_alloc_msi(dev, &nmsi);
290 		if (error == 0) {
291 			device_printf(dev, "Using MSI messages.\n");
292 			sc->spec_irq = ae_res_spec_msi;
293 			error = bus_alloc_resources(dev, sc->spec_irq, sc->irq);
294 			if (error != 0) {
295 				device_printf(dev, "MSI allocation failed.\n");
296 				sc->spec_irq = NULL;
297 				pci_release_msi(dev);
298 			} else {
299 				sc->flags |= AE_FLAG_MSI;
300 			}
301 		}
302 	}
303 	if (sc->spec_irq == NULL) {
304 		sc->spec_irq = ae_res_spec_irq;
305 		error = bus_alloc_resources(dev, sc->spec_irq, sc->irq);
306 		if (error != 0) {
307 			device_printf(dev, "could not allocate IRQ resources.\n");
308 			sc->spec_irq = NULL;
309 			goto fail;
310 		}
311 	}
312 
313 	ae_init_tunables(sc);
314 
315 	ae_phy_reset(sc);		/* Reset PHY. */
316 	error = ae_reset(sc);		/* Reset the controller itself. */
317 	if (error != 0)
318 		goto fail;
319 
320 	ae_pcie_init(sc);
321 
322 	ae_retrieve_address(sc);	/* Load MAC address. */
323 
324 	error = ae_alloc_rings(sc);	/* Allocate ring buffers. */
325 	if (error != 0)
326 		goto fail;
327 
328 	ifp = sc->ifp = if_alloc(IFT_ETHER);
329 	if_setsoftc(ifp, sc);
330 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
331 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
332 	if_setioctlfn(ifp, ae_ioctl);
333 	if_setstartfn(ifp, ae_start);
334 	if_setinitfn(ifp, ae_init);
335 	if_setcapabilities(ifp, IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING);
336 	if_sethwassist(ifp, 0);
337 	if_setsendqlen(ifp, ifqmaxlen);
338 	if_setsendqready(ifp);
339 	if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) {
340 		if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0);
341 		sc->flags |= AE_FLAG_PMG;
342 	}
343 	if_setcapenable(ifp, if_getcapabilities(ifp));
344 
345 	/*
346 	 * Configure and attach MII bus.
347 	 */
348 	error = mii_attach(dev, &sc->miibus, ifp, ae_mediachange,
349 	    ae_mediastatus, BMSR_DEFCAPMASK, AE_PHYADDR_DEFAULT,
350 	    MII_OFFSET_ANY, 0);
351 	if (error != 0) {
352 		device_printf(dev, "attaching PHYs failed\n");
353 		goto fail;
354 	}
355 
356 	ether_ifattach(ifp, sc->eaddr);
357 	/* Tell the upper layer(s) we support long frames. */
358 	if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
359 
360 	/*
361 	 * Create and run all helper tasks.
362 	 */
363 	sc->tq = taskqueue_create_fast("ae_taskq", M_WAITOK,
364             taskqueue_thread_enqueue, &sc->tq);
365 	if (sc->tq == NULL) {
366 		device_printf(dev, "could not create taskqueue.\n");
367 		ether_ifdetach(ifp);
368 		error = ENXIO;
369 		goto fail;
370 	}
371 	taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
372 	    device_get_nameunit(sc->dev));
373 
374 	/*
375 	 * Configure interrupt handlers.
376 	 */
377 	error = bus_setup_intr(dev, sc->irq[0], INTR_TYPE_NET | INTR_MPSAFE,
378 	    ae_intr, NULL, sc, &sc->intrhand);
379 	if (error != 0) {
380 		device_printf(dev, "could not set up interrupt handler.\n");
381 		taskqueue_free(sc->tq);
382 		sc->tq = NULL;
383 		ether_ifdetach(ifp);
384 		goto fail;
385 	}
386 
387 fail:
388 	if (error != 0)
389 		ae_detach(dev);
390 
391 	return (error);
392 }
393 
394 #define	AE_SYSCTL(stx, parent, name, desc, ptr)	\
395 	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, name, CTLFLAG_RD, ptr, 0, desc)
396 
397 static void
398 ae_init_tunables(ae_softc_t *sc)
399 {
400 	struct sysctl_ctx_list *ctx;
401 	struct sysctl_oid *root, *stats, *stats_rx, *stats_tx;
402 	struct ae_stats *ae_stats;
403 
404 	KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
405 	ae_stats = &sc->stats;
406 
407 	ctx = device_get_sysctl_ctx(sc->dev);
408 	root = device_get_sysctl_tree(sc->dev);
409 	stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(root), OID_AUTO, "stats",
410 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "ae statistics");
411 
412 	/*
413 	 * Receiver statistcics.
414 	 */
415 	stats_rx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx",
416 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Rx MAC statistics");
417 	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "bcast",
418 	    "broadcast frames", &ae_stats->rx_bcast);
419 	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "mcast",
420 	    "multicast frames", &ae_stats->rx_mcast);
421 	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "pause",
422 	    "PAUSE frames", &ae_stats->rx_pause);
423 	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "control",
424 	    "control frames", &ae_stats->rx_ctrl);
425 	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "crc_errors",
426 	    "frames with CRC errors", &ae_stats->rx_crcerr);
427 	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "code_errors",
428 	    "frames with invalid opcode", &ae_stats->rx_codeerr);
429 	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "runt",
430 	    "runt frames", &ae_stats->rx_runt);
431 	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "frag",
432 	    "fragmented frames", &ae_stats->rx_frag);
433 	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "align_errors",
434 	    "frames with alignment errors", &ae_stats->rx_align);
435 	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "truncated",
436 	    "frames truncated due to Rx FIFO inderrun", &ae_stats->rx_trunc);
437 
438 	/*
439 	 * Receiver statistcics.
440 	 */
441 	stats_tx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx",
442 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx MAC statistics");
443 	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "bcast",
444 	    "broadcast frames", &ae_stats->tx_bcast);
445 	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "mcast",
446 	    "multicast frames", &ae_stats->tx_mcast);
447 	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "pause",
448 	    "PAUSE frames", &ae_stats->tx_pause);
449 	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "control",
450 	    "control frames", &ae_stats->tx_ctrl);
451 	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "defers",
452 	    "deferrals occuried", &ae_stats->tx_defer);
453 	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "exc_defers",
454 	    "excessive deferrals occuried", &ae_stats->tx_excdefer);
455 	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "singlecols",
456 	    "single collisions occuried", &ae_stats->tx_singlecol);
457 	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "multicols",
458 	    "multiple collisions occuried", &ae_stats->tx_multicol);
459 	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "latecols",
460 	    "late collisions occuried", &ae_stats->tx_latecol);
461 	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "aborts",
462 	    "transmit aborts due collisions", &ae_stats->tx_abortcol);
463 	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "underruns",
464 	    "Tx FIFO underruns", &ae_stats->tx_underrun);
465 }
466 
467 static void
468 ae_pcie_init(ae_softc_t *sc)
469 {
470 
471 	AE_WRITE_4(sc, AE_PCIE_LTSSM_TESTMODE_REG, AE_PCIE_LTSSM_TESTMODE_DEFAULT);
472 	AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, AE_PCIE_DLL_TX_CTRL_DEFAULT);
473 }
474 
475 static void
476 ae_phy_reset(ae_softc_t *sc)
477 {
478 
479 	AE_WRITE_4(sc, AE_PHY_ENABLE_REG, AE_PHY_ENABLE);
480 	DELAY(1000);	/* XXX: pause(9) ? */
481 }
482 
483 static int
484 ae_reset(ae_softc_t *sc)
485 {
486 	int i;
487 
488 	/*
489 	 * Issue a soft reset.
490 	 */
491 	AE_WRITE_4(sc, AE_MASTER_REG, AE_MASTER_SOFT_RESET);
492 	bus_barrier(sc->mem[0], AE_MASTER_REG, 4,
493 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
494 
495 	/*
496 	 * Wait for reset to complete.
497 	 */
498 	for (i = 0; i < AE_RESET_TIMEOUT; i++) {
499 		if ((AE_READ_4(sc, AE_MASTER_REG) & AE_MASTER_SOFT_RESET) == 0)
500 			break;
501 		DELAY(10);
502 	}
503 	if (i == AE_RESET_TIMEOUT) {
504 		device_printf(sc->dev, "reset timeout.\n");
505 		return (ENXIO);
506 	}
507 
508 	/*
509 	 * Wait for everything to enter idle state.
510 	 */
511 	for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
512 		if (AE_READ_4(sc, AE_IDLE_REG) == 0)
513 			break;
514 		DELAY(100);
515 	}
516 	if (i == AE_IDLE_TIMEOUT) {
517 		device_printf(sc->dev, "could not enter idle state.\n");
518 		return (ENXIO);
519 	}
520 	return (0);
521 }
522 
523 static void
524 ae_init(void *arg)
525 {
526 	ae_softc_t *sc;
527 
528 	sc = (ae_softc_t *)arg;
529 	AE_LOCK(sc);
530 	ae_init_locked(sc);
531 	AE_UNLOCK(sc);
532 }
533 
534 static void
535 ae_phy_init(ae_softc_t *sc)
536 {
537 
538 	/*
539 	 * Enable link status change interrupt.
540 	 * XXX magic numbers.
541 	 */
542 #ifdef notyet
543 	AE_PHY_WRITE(sc, 18, 0xc00);
544 #endif
545 }
546 
547 static int
548 ae_init_locked(ae_softc_t *sc)
549 {
550 	if_t ifp;
551 	struct mii_data *mii;
552 	uint8_t eaddr[ETHER_ADDR_LEN];
553 	uint32_t val;
554 	bus_addr_t addr;
555 
556 	AE_LOCK_ASSERT(sc);
557 
558 	ifp = sc->ifp;
559 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
560 		return (0);
561 	mii = device_get_softc(sc->miibus);
562 
563 	ae_stop(sc);
564 	ae_reset(sc);
565 	ae_pcie_init(sc);		/* Initialize PCIE stuff. */
566 	ae_phy_init(sc);
567 	ae_powersave_disable(sc);
568 
569 	/*
570 	 * Clear and disable interrupts.
571 	 */
572 	AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff);
573 
574 	/*
575 	 * Set the MAC address.
576 	 */
577 	bcopy(if_getlladdr(ifp), eaddr, ETHER_ADDR_LEN);
578 	val = eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5];
579 	AE_WRITE_4(sc, AE_EADDR0_REG, val);
580 	val = eaddr[0] << 8 | eaddr[1];
581 	AE_WRITE_4(sc, AE_EADDR1_REG, val);
582 
583 	bzero(sc->rxd_base_dma, AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING);
584 	bzero(sc->txd_base, AE_TXD_BUFSIZE_DEFAULT);
585 	bzero(sc->txs_base, AE_TXS_COUNT_DEFAULT * 4);
586 	/*
587 	 * Set ring buffers base addresses.
588 	 */
589 	addr = sc->dma_rxd_busaddr;
590 	AE_WRITE_4(sc, AE_DESC_ADDR_HI_REG, BUS_ADDR_HI(addr));
591 	AE_WRITE_4(sc, AE_RXD_ADDR_LO_REG, BUS_ADDR_LO(addr));
592 	addr = sc->dma_txd_busaddr;
593 	AE_WRITE_4(sc, AE_TXD_ADDR_LO_REG, BUS_ADDR_LO(addr));
594 	addr = sc->dma_txs_busaddr;
595 	AE_WRITE_4(sc, AE_TXS_ADDR_LO_REG, BUS_ADDR_LO(addr));
596 
597 	/*
598 	 * Configure ring buffers sizes.
599 	 */
600 	AE_WRITE_2(sc, AE_RXD_COUNT_REG, AE_RXD_COUNT_DEFAULT);
601 	AE_WRITE_2(sc, AE_TXD_BUFSIZE_REG, AE_TXD_BUFSIZE_DEFAULT / 4);
602 	AE_WRITE_2(sc, AE_TXS_COUNT_REG, AE_TXS_COUNT_DEFAULT);
603 
604 	/*
605 	 * Configure interframe gap parameters.
606 	 */
607 	val = ((AE_IFG_TXIPG_DEFAULT << AE_IFG_TXIPG_SHIFT) &
608 	    AE_IFG_TXIPG_MASK) |
609 	    ((AE_IFG_RXIPG_DEFAULT << AE_IFG_RXIPG_SHIFT) &
610 	    AE_IFG_RXIPG_MASK) |
611 	    ((AE_IFG_IPGR1_DEFAULT << AE_IFG_IPGR1_SHIFT) &
612 	    AE_IFG_IPGR1_MASK) |
613 	    ((AE_IFG_IPGR2_DEFAULT << AE_IFG_IPGR2_SHIFT) &
614 	    AE_IFG_IPGR2_MASK);
615 	AE_WRITE_4(sc, AE_IFG_REG, val);
616 
617 	/*
618 	 * Configure half-duplex operation.
619 	 */
620 	val = ((AE_HDPX_LCOL_DEFAULT << AE_HDPX_LCOL_SHIFT) &
621 	    AE_HDPX_LCOL_MASK) |
622 	    ((AE_HDPX_RETRY_DEFAULT << AE_HDPX_RETRY_SHIFT) &
623 	    AE_HDPX_RETRY_MASK) |
624 	    ((AE_HDPX_ABEBT_DEFAULT << AE_HDPX_ABEBT_SHIFT) &
625 	    AE_HDPX_ABEBT_MASK) |
626 	    ((AE_HDPX_JAMIPG_DEFAULT << AE_HDPX_JAMIPG_SHIFT) &
627 	    AE_HDPX_JAMIPG_MASK) | AE_HDPX_EXC_EN;
628 	AE_WRITE_4(sc, AE_HDPX_REG, val);
629 
630 	/*
631 	 * Configure interrupt moderate timer.
632 	 */
633 	AE_WRITE_2(sc, AE_IMT_REG, AE_IMT_DEFAULT);
634 	val = AE_READ_4(sc, AE_MASTER_REG);
635 	val |= AE_MASTER_IMT_EN;
636 	AE_WRITE_4(sc, AE_MASTER_REG, val);
637 
638 	/*
639 	 * Configure interrupt clearing timer.
640 	 */
641 	AE_WRITE_2(sc, AE_ICT_REG, AE_ICT_DEFAULT);
642 
643 	/*
644 	 * Configure MTU.
645 	 */
646 	val = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
647 	    ETHER_CRC_LEN;
648 	AE_WRITE_2(sc, AE_MTU_REG, val);
649 
650 	/*
651 	 * Configure cut-through threshold.
652 	 */
653 	AE_WRITE_4(sc, AE_CUT_THRESH_REG, AE_CUT_THRESH_DEFAULT);
654 
655 	/*
656 	 * Configure flow control.
657 	 */
658 	AE_WRITE_2(sc, AE_FLOW_THRESH_HI_REG, (AE_RXD_COUNT_DEFAULT / 8) * 7);
659 	AE_WRITE_2(sc, AE_FLOW_THRESH_LO_REG, (AE_RXD_COUNT_MIN / 8) >
660 	    (AE_RXD_COUNT_DEFAULT / 12) ? (AE_RXD_COUNT_MIN / 8) :
661 	    (AE_RXD_COUNT_DEFAULT / 12));
662 
663 	/*
664 	 * Init mailboxes.
665 	 */
666 	sc->txd_cur = sc->rxd_cur = 0;
667 	sc->txs_ack = sc->txd_ack = 0;
668 	sc->rxd_cur = 0;
669 	AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur);
670 	AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur);
671 
672 	sc->tx_inproc = 0;	/* Number of packets the chip processes now. */
673 	sc->flags |= AE_FLAG_TXAVAIL;	/* Free Tx's available. */
674 
675 	/*
676 	 * Enable DMA.
677 	 */
678 	AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN);
679 	AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN);
680 
681 	/*
682 	 * Check if everything is OK.
683 	 */
684 	val = AE_READ_4(sc, AE_ISR_REG);
685 	if ((val & AE_ISR_PHY_LINKDOWN) != 0) {
686 		device_printf(sc->dev, "Initialization failed.\n");
687 		return (ENXIO);
688 	}
689 
690 	/*
691 	 * Clear interrupt status.
692 	 */
693 	AE_WRITE_4(sc, AE_ISR_REG, 0x3fffffff);
694 	AE_WRITE_4(sc, AE_ISR_REG, 0x0);
695 
696 	/*
697 	 * Enable interrupts.
698 	 */
699 	val = AE_READ_4(sc, AE_MASTER_REG);
700 	AE_WRITE_4(sc, AE_MASTER_REG, val | AE_MASTER_MANUAL_INT);
701 	AE_WRITE_4(sc, AE_IMR_REG, AE_IMR_DEFAULT);
702 
703 	/*
704 	 * Disable WOL.
705 	 */
706 	AE_WRITE_4(sc, AE_WOL_REG, 0);
707 
708 	/*
709 	 * Configure MAC.
710 	 */
711 	val = AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD |
712 	    AE_MAC_FULL_DUPLEX | AE_MAC_CLK_PHY |
713 	    AE_MAC_TX_FLOW_EN | AE_MAC_RX_FLOW_EN |
714 	    ((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & AE_HALFBUF_MASK) |
715 	    ((AE_MAC_PREAMBLE_DEFAULT << AE_MAC_PREAMBLE_SHIFT) &
716 	    AE_MAC_PREAMBLE_MASK);
717 	AE_WRITE_4(sc, AE_MAC_REG, val);
718 
719 	/*
720 	 * Configure Rx MAC.
721 	 */
722 	ae_rxfilter(sc);
723 	ae_rxvlan(sc);
724 
725 	/*
726 	 * Enable Tx/Rx.
727 	 */
728 	val = AE_READ_4(sc, AE_MAC_REG);
729 	AE_WRITE_4(sc, AE_MAC_REG, val | AE_MAC_TX_EN | AE_MAC_RX_EN);
730 
731 	sc->flags &= ~AE_FLAG_LINK;
732 	mii_mediachg(mii);	/* Switch to the current media. */
733 
734 	callout_reset(&sc->tick_ch, hz, ae_tick, sc);
735 
736 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
737 	if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
738 
739 #ifdef AE_DEBUG
740 	device_printf(sc->dev, "Initialization complete.\n");
741 #endif
742 
743 	return (0);
744 }
745 
746 static int
747 ae_detach(device_t dev)
748 {
749 	struct ae_softc *sc;
750 	if_t ifp;
751 
752 	sc = device_get_softc(dev);
753 	KASSERT(sc != NULL, ("[ae: %d]: sc is NULL", __LINE__));
754 	ifp = sc->ifp;
755 	if (device_is_attached(dev)) {
756 		AE_LOCK(sc);
757 		sc->flags |= AE_FLAG_DETACH;
758 		ae_stop(sc);
759 		AE_UNLOCK(sc);
760 		callout_drain(&sc->tick_ch);
761 		taskqueue_drain(sc->tq, &sc->int_task);
762 		taskqueue_drain(taskqueue_swi, &sc->link_task);
763 		ether_ifdetach(ifp);
764 	}
765 	if (sc->tq != NULL) {
766 		taskqueue_drain(sc->tq, &sc->int_task);
767 		taskqueue_free(sc->tq);
768 		sc->tq = NULL;
769 	}
770 	if (sc->miibus != NULL) {
771 		device_delete_child(dev, sc->miibus);
772 		sc->miibus = NULL;
773 	}
774 	bus_generic_detach(sc->dev);
775 	ae_dma_free(sc);
776 	if (sc->intrhand != NULL) {
777 		bus_teardown_intr(dev, sc->irq[0], sc->intrhand);
778 		sc->intrhand = NULL;
779 	}
780 	if (ifp != NULL) {
781 		if_free(ifp);
782 		sc->ifp = NULL;
783 	}
784 	if (sc->spec_irq != NULL)
785 		bus_release_resources(dev, sc->spec_irq, sc->irq);
786 	if (sc->spec_mem != NULL)
787 		bus_release_resources(dev, sc->spec_mem, sc->mem);
788 	if ((sc->flags & AE_FLAG_MSI) != 0)
789 		pci_release_msi(dev);
790 	mtx_destroy(&sc->mtx);
791 
792 	return (0);
793 }
794 
795 static int
796 ae_miibus_readreg(device_t dev, int phy, int reg)
797 {
798 	ae_softc_t *sc;
799 	uint32_t val;
800 	int i;
801 
802 	sc = device_get_softc(dev);
803 	KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
804 
805 	/*
806 	 * Locking is done in upper layers.
807 	 */
808 
809 	val = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) |
810 	    AE_MDIO_START | AE_MDIO_READ | AE_MDIO_SUP_PREAMBLE |
811 	    ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK);
812 	AE_WRITE_4(sc, AE_MDIO_REG, val);
813 
814 	/*
815 	 * Wait for operation to complete.
816 	 */
817 	for (i = 0; i < AE_MDIO_TIMEOUT; i++) {
818 		DELAY(2);
819 		val = AE_READ_4(sc, AE_MDIO_REG);
820 		if ((val & (AE_MDIO_START | AE_MDIO_BUSY)) == 0)
821 			break;
822 	}
823 	if (i == AE_MDIO_TIMEOUT) {
824 		device_printf(sc->dev, "phy read timeout: %d.\n", reg);
825 		return (0);
826 	}
827 	return ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK);
828 }
829 
830 static int
831 ae_miibus_writereg(device_t dev, int phy, int reg, int val)
832 {
833 	ae_softc_t *sc;
834 	uint32_t aereg;
835 	int i;
836 
837 	sc = device_get_softc(dev);
838 	KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
839 
840 	/*
841 	 * Locking is done in upper layers.
842 	 */
843 
844 	aereg = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) |
845 	    AE_MDIO_START | AE_MDIO_SUP_PREAMBLE |
846 	    ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK) |
847 	    ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK);
848 	AE_WRITE_4(sc, AE_MDIO_REG, aereg);
849 
850 	/*
851 	 * Wait for operation to complete.
852 	 */
853 	for (i = 0; i < AE_MDIO_TIMEOUT; i++) {
854 		DELAY(2);
855 		aereg = AE_READ_4(sc, AE_MDIO_REG);
856 		if ((aereg & (AE_MDIO_START | AE_MDIO_BUSY)) == 0)
857 			break;
858 	}
859 	if (i == AE_MDIO_TIMEOUT) {
860 		device_printf(sc->dev, "phy write timeout: %d.\n", reg);
861 	}
862 	return (0);
863 }
864 
865 static void
866 ae_miibus_statchg(device_t dev)
867 {
868 	ae_softc_t *sc;
869 
870 	sc = device_get_softc(dev);
871 	taskqueue_enqueue(taskqueue_swi, &sc->link_task);
872 }
873 
874 static void
875 ae_mediastatus(if_t ifp, struct ifmediareq *ifmr)
876 {
877 	ae_softc_t *sc;
878 	struct mii_data *mii;
879 
880 	sc = if_getsoftc(ifp);
881 	KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
882 
883 	AE_LOCK(sc);
884 	mii = device_get_softc(sc->miibus);
885 	mii_pollstat(mii);
886 	ifmr->ifm_status = mii->mii_media_status;
887 	ifmr->ifm_active = mii->mii_media_active;
888 	AE_UNLOCK(sc);
889 }
890 
891 static int
892 ae_mediachange(if_t ifp)
893 {
894 	ae_softc_t *sc;
895 	struct mii_data *mii;
896 	struct mii_softc *mii_sc;
897 	int error;
898 
899 	/* XXX: check IFF_UP ?? */
900 	sc = if_getsoftc(ifp);
901 	KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
902 	AE_LOCK(sc);
903 	mii = device_get_softc(sc->miibus);
904 	LIST_FOREACH(mii_sc, &mii->mii_phys, mii_list)
905 		PHY_RESET(mii_sc);
906 	error = mii_mediachg(mii);
907 	AE_UNLOCK(sc);
908 
909 	return (error);
910 }
911 
912 static int
913 ae_check_eeprom_present(ae_softc_t *sc, int *vpdc)
914 {
915 	int error;
916 	uint32_t val;
917 
918 	KASSERT(vpdc != NULL, ("[ae, %d]: vpdc is NULL!\n", __LINE__));
919 
920 	/*
921 	 * Not sure why, but Linux does this.
922 	 */
923 	val = AE_READ_4(sc, AE_SPICTL_REG);
924 	if ((val & AE_SPICTL_VPD_EN) != 0) {
925 		val &= ~AE_SPICTL_VPD_EN;
926 		AE_WRITE_4(sc, AE_SPICTL_REG, val);
927 	}
928 	error = pci_find_cap(sc->dev, PCIY_VPD, vpdc);
929 	return (error);
930 }
931 
932 static int
933 ae_vpd_read_word(ae_softc_t *sc, int reg, uint32_t *word)
934 {
935 	uint32_t val;
936 	int i;
937 
938 	AE_WRITE_4(sc, AE_VPD_DATA_REG, 0);	/* Clear register value. */
939 
940 	/*
941 	 * VPD registers start at offset 0x100. Read them.
942 	 */
943 	val = 0x100 + reg * 4;
944 	AE_WRITE_4(sc, AE_VPD_CAP_REG, (val << AE_VPD_CAP_ADDR_SHIFT) &
945 	    AE_VPD_CAP_ADDR_MASK);
946 	for (i = 0; i < AE_VPD_TIMEOUT; i++) {
947 		DELAY(2000);
948 		val = AE_READ_4(sc, AE_VPD_CAP_REG);
949 		if ((val & AE_VPD_CAP_DONE) != 0)
950 			break;
951 	}
952 	if (i == AE_VPD_TIMEOUT) {
953 		device_printf(sc->dev, "timeout reading VPD register %d.\n",
954 		    reg);
955 		return (ETIMEDOUT);
956 	}
957 	*word = AE_READ_4(sc, AE_VPD_DATA_REG);
958 	return (0);
959 }
960 
961 static int
962 ae_get_vpd_eaddr(ae_softc_t *sc, uint32_t *eaddr)
963 {
964 	uint32_t word, reg, val;
965 	int error;
966 	int found;
967 	int vpdc;
968 	int i;
969 
970 	KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
971 	KASSERT(eaddr != NULL, ("[ae, %d]: eaddr is NULL", __LINE__));
972 
973 	/*
974 	 * Check for EEPROM.
975 	 */
976 	error = ae_check_eeprom_present(sc, &vpdc);
977 	if (error != 0)
978 		return (error);
979 
980 	/*
981 	 * Read the VPD configuration space.
982 	 * Each register is prefixed with signature,
983 	 * so we can check if it is valid.
984 	 */
985 	for (i = 0, found = 0; i < AE_VPD_NREGS; i++) {
986 		error = ae_vpd_read_word(sc, i, &word);
987 		if (error != 0)
988 			break;
989 
990 		/*
991 		 * Check signature.
992 		 */
993 		if ((word & AE_VPD_SIG_MASK) != AE_VPD_SIG)
994 			break;
995 		reg = word >> AE_VPD_REG_SHIFT;
996 		i++;	/* Move to the next word. */
997 
998 		if (reg != AE_EADDR0_REG && reg != AE_EADDR1_REG)
999 			continue;
1000 
1001 		error = ae_vpd_read_word(sc, i, &val);
1002 		if (error != 0)
1003 			break;
1004 		if (reg == AE_EADDR0_REG)
1005 			eaddr[0] = val;
1006 		else
1007 			eaddr[1] = val;
1008 		found++;
1009 	}
1010 
1011 	if (found < 2)
1012 		return (ENOENT);
1013 
1014 	eaddr[1] &= 0xffff;	/* Only last 2 bytes are used. */
1015 	if (AE_CHECK_EADDR_VALID(eaddr) != 0) {
1016 		if (bootverbose)
1017 			device_printf(sc->dev,
1018 			    "VPD ethernet address registers are invalid.\n");
1019 		return (EINVAL);
1020 	}
1021 	return (0);
1022 }
1023 
1024 static int
1025 ae_get_reg_eaddr(ae_softc_t *sc, uint32_t *eaddr)
1026 {
1027 
1028 	/*
1029 	 * BIOS is supposed to set this.
1030 	 */
1031 	eaddr[0] = AE_READ_4(sc, AE_EADDR0_REG);
1032 	eaddr[1] = AE_READ_4(sc, AE_EADDR1_REG);
1033 	eaddr[1] &= 0xffff;	/* Only last 2 bytes are used. */
1034 
1035 	if (AE_CHECK_EADDR_VALID(eaddr) != 0) {
1036 		if (bootverbose)
1037 			device_printf(sc->dev,
1038 			    "Ethernet address registers are invalid.\n");
1039 		return (EINVAL);
1040 	}
1041 	return (0);
1042 }
1043 
1044 static void
1045 ae_retrieve_address(ae_softc_t *sc)
1046 {
1047 	uint32_t eaddr[2] = {0, 0};
1048 	int error;
1049 
1050 	/*
1051 	 *Check for EEPROM.
1052 	 */
1053 	error = ae_get_vpd_eaddr(sc, eaddr);
1054 	if (error != 0)
1055 		error = ae_get_reg_eaddr(sc, eaddr);
1056 	if (error != 0) {
1057 		if (bootverbose)
1058 			device_printf(sc->dev,
1059 			    "Generating random ethernet address.\n");
1060 		eaddr[0] = arc4random();
1061 
1062 		/*
1063 		 * Set OUI to ASUSTek COMPUTER INC.
1064 		 */
1065 		sc->eaddr[0] = 0x02;	/* U/L bit set. */
1066 		sc->eaddr[1] = 0x1f;
1067 		sc->eaddr[2] = 0xc6;
1068 		sc->eaddr[3] = (eaddr[0] >> 16) & 0xff;
1069 		sc->eaddr[4] = (eaddr[0] >> 8) & 0xff;
1070 		sc->eaddr[5] = (eaddr[0] >> 0) & 0xff;
1071 	} else {
1072 		sc->eaddr[0] = (eaddr[1] >> 8) & 0xff;
1073 		sc->eaddr[1] = (eaddr[1] >> 0) & 0xff;
1074 		sc->eaddr[2] = (eaddr[0] >> 24) & 0xff;
1075 		sc->eaddr[3] = (eaddr[0] >> 16) & 0xff;
1076 		sc->eaddr[4] = (eaddr[0] >> 8) & 0xff;
1077 		sc->eaddr[5] = (eaddr[0] >> 0) & 0xff;
1078 	}
1079 }
1080 
1081 static void
1082 ae_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1083 {
1084 	bus_addr_t *addr = arg;
1085 
1086 	if (error != 0)
1087 		return;
1088 	KASSERT(nsegs == 1, ("[ae, %d]: %d segments instead of 1!", __LINE__,
1089 	    nsegs));
1090 	*addr = segs[0].ds_addr;
1091 }
1092 
1093 static int
1094 ae_alloc_rings(ae_softc_t *sc)
1095 {
1096 	bus_addr_t busaddr;
1097 	int error;
1098 
1099 	/*
1100 	 * Create parent DMA tag.
1101 	 */
1102 	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
1103 	    1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1104 	    NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0,
1105 	    BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
1106 	    &sc->dma_parent_tag);
1107 	if (error != 0) {
1108 		device_printf(sc->dev, "could not creare parent DMA tag.\n");
1109 		return (error);
1110 	}
1111 
1112 	/*
1113 	 * Create DMA tag for TxD.
1114 	 */
1115 	error = bus_dma_tag_create(sc->dma_parent_tag,
1116 	    8, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1117 	    NULL, NULL, AE_TXD_BUFSIZE_DEFAULT, 1,
1118 	    AE_TXD_BUFSIZE_DEFAULT, 0, NULL, NULL,
1119 	    &sc->dma_txd_tag);
1120 	if (error != 0) {
1121 		device_printf(sc->dev, "could not creare TxD DMA tag.\n");
1122 		return (error);
1123 	}
1124 
1125 	/*
1126 	 * Create DMA tag for TxS.
1127 	 */
1128 	error = bus_dma_tag_create(sc->dma_parent_tag,
1129 	    8, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1130 	    NULL, NULL, AE_TXS_COUNT_DEFAULT * 4, 1,
1131 	    AE_TXS_COUNT_DEFAULT * 4, 0, NULL, NULL,
1132 	    &sc->dma_txs_tag);
1133 	if (error != 0) {
1134 		device_printf(sc->dev, "could not creare TxS DMA tag.\n");
1135 		return (error);
1136 	}
1137 
1138 	/*
1139 	 * Create DMA tag for RxD.
1140 	 */
1141 	error = bus_dma_tag_create(sc->dma_parent_tag,
1142 	    128, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1143 	    NULL, NULL, AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING, 1,
1144 	    AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING, 0, NULL, NULL,
1145 	    &sc->dma_rxd_tag);
1146 	if (error != 0) {
1147 		device_printf(sc->dev, "could not creare TxS DMA tag.\n");
1148 		return (error);
1149 	}
1150 
1151 	/*
1152 	 * Allocate TxD DMA memory.
1153 	 */
1154 	error = bus_dmamem_alloc(sc->dma_txd_tag, (void **)&sc->txd_base,
1155 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1156 	    &sc->dma_txd_map);
1157 	if (error != 0) {
1158 		device_printf(sc->dev,
1159 		    "could not allocate DMA memory for TxD ring.\n");
1160 		return (error);
1161 	}
1162 	error = bus_dmamap_load(sc->dma_txd_tag, sc->dma_txd_map, sc->txd_base,
1163 	    AE_TXD_BUFSIZE_DEFAULT, ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT);
1164 	if (error != 0 || busaddr == 0) {
1165 		device_printf(sc->dev,
1166 		    "could not load DMA map for TxD ring.\n");
1167 		return (error);
1168 	}
1169 	sc->dma_txd_busaddr = busaddr;
1170 
1171 	/*
1172 	 * Allocate TxS DMA memory.
1173 	 */
1174 	error = bus_dmamem_alloc(sc->dma_txs_tag, (void **)&sc->txs_base,
1175 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1176 	    &sc->dma_txs_map);
1177 	if (error != 0) {
1178 		device_printf(sc->dev,
1179 		    "could not allocate DMA memory for TxS ring.\n");
1180 		return (error);
1181 	}
1182 	error = bus_dmamap_load(sc->dma_txs_tag, sc->dma_txs_map, sc->txs_base,
1183 	    AE_TXS_COUNT_DEFAULT * 4, ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT);
1184 	if (error != 0 || busaddr == 0) {
1185 		device_printf(sc->dev,
1186 		    "could not load DMA map for TxS ring.\n");
1187 		return (error);
1188 	}
1189 	sc->dma_txs_busaddr = busaddr;
1190 
1191 	/*
1192 	 * Allocate RxD DMA memory.
1193 	 */
1194 	error = bus_dmamem_alloc(sc->dma_rxd_tag, (void **)&sc->rxd_base_dma,
1195 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1196 	    &sc->dma_rxd_map);
1197 	if (error != 0) {
1198 		device_printf(sc->dev,
1199 		    "could not allocate DMA memory for RxD ring.\n");
1200 		return (error);
1201 	}
1202 	error = bus_dmamap_load(sc->dma_rxd_tag, sc->dma_rxd_map,
1203 	    sc->rxd_base_dma, AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING,
1204 	    ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT);
1205 	if (error != 0 || busaddr == 0) {
1206 		device_printf(sc->dev,
1207 		    "could not load DMA map for RxD ring.\n");
1208 		return (error);
1209 	}
1210 	sc->dma_rxd_busaddr = busaddr + AE_RXD_PADDING;
1211 	sc->rxd_base = (ae_rxd_t *)(sc->rxd_base_dma + AE_RXD_PADDING);
1212 
1213 	return (0);
1214 }
1215 
1216 static void
1217 ae_dma_free(ae_softc_t *sc)
1218 {
1219 
1220 	if (sc->dma_txd_tag != NULL) {
1221 		if (sc->dma_txd_busaddr != 0)
1222 			bus_dmamap_unload(sc->dma_txd_tag, sc->dma_txd_map);
1223 		if (sc->txd_base != NULL)
1224 			bus_dmamem_free(sc->dma_txd_tag, sc->txd_base,
1225 			    sc->dma_txd_map);
1226 		bus_dma_tag_destroy(sc->dma_txd_tag);
1227 		sc->dma_txd_tag = NULL;
1228 		sc->txd_base = NULL;
1229 		sc->dma_txd_busaddr = 0;
1230 	}
1231 	if (sc->dma_txs_tag != NULL) {
1232 		if (sc->dma_txs_busaddr != 0)
1233 			bus_dmamap_unload(sc->dma_txs_tag, sc->dma_txs_map);
1234 		if (sc->txs_base != NULL)
1235 			bus_dmamem_free(sc->dma_txs_tag, sc->txs_base,
1236 			    sc->dma_txs_map);
1237 		bus_dma_tag_destroy(sc->dma_txs_tag);
1238 		sc->dma_txs_tag = NULL;
1239 		sc->txs_base = NULL;
1240 		sc->dma_txs_busaddr = 0;
1241 	}
1242 	if (sc->dma_rxd_tag != NULL) {
1243 		if (sc->dma_rxd_busaddr != 0)
1244 			bus_dmamap_unload(sc->dma_rxd_tag, sc->dma_rxd_map);
1245 		if (sc->rxd_base_dma != NULL)
1246 			bus_dmamem_free(sc->dma_rxd_tag, sc->rxd_base_dma,
1247 			    sc->dma_rxd_map);
1248 		bus_dma_tag_destroy(sc->dma_rxd_tag);
1249 		sc->dma_rxd_tag = NULL;
1250 		sc->rxd_base_dma = NULL;
1251 		sc->dma_rxd_busaddr = 0;
1252 	}
1253 	if (sc->dma_parent_tag != NULL) {
1254 		bus_dma_tag_destroy(sc->dma_parent_tag);
1255 		sc->dma_parent_tag = NULL;
1256 	}
1257 }
1258 
1259 static int
1260 ae_shutdown(device_t dev)
1261 {
1262 	ae_softc_t *sc;
1263 	int error;
1264 
1265 	sc = device_get_softc(dev);
1266 	KASSERT(sc != NULL, ("[ae: %d]: sc is NULL", __LINE__));
1267 
1268 	error = ae_suspend(dev);
1269 	AE_LOCK(sc);
1270 	ae_powersave_enable(sc);
1271 	AE_UNLOCK(sc);
1272 	return (error);
1273 }
1274 
1275 static void
1276 ae_powersave_disable(ae_softc_t *sc)
1277 {
1278 	uint32_t val;
1279 
1280 	AE_LOCK_ASSERT(sc);
1281 
1282 	AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0);
1283 	val = AE_PHY_READ(sc, AE_PHY_DBG_DATA);
1284 	if (val & AE_PHY_DBG_POWERSAVE) {
1285 		val &= ~AE_PHY_DBG_POWERSAVE;
1286 		AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, val);
1287 		DELAY(1000);
1288 	}
1289 }
1290 
1291 static void
1292 ae_powersave_enable(ae_softc_t *sc)
1293 {
1294 	uint32_t val;
1295 
1296 	AE_LOCK_ASSERT(sc);
1297 
1298 	/*
1299 	 * XXX magic numbers.
1300 	 */
1301 	AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0);
1302 	val = AE_PHY_READ(sc, AE_PHY_DBG_DATA);
1303 	AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, val | 0x1000);
1304 	AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 2);
1305 	AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0x3000);
1306 	AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 3);
1307 	AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0);
1308 }
1309 
1310 static void
1311 ae_pm_init(ae_softc_t *sc)
1312 {
1313 	if_t ifp;
1314 	uint32_t val;
1315 	uint16_t pmstat;
1316 	struct mii_data *mii;
1317 	int pmc;
1318 
1319 	AE_LOCK_ASSERT(sc);
1320 
1321 	ifp = sc->ifp;
1322 	if ((sc->flags & AE_FLAG_PMG) == 0) {
1323 		/* Disable WOL entirely. */
1324 		AE_WRITE_4(sc, AE_WOL_REG, 0);
1325 		return;
1326 	}
1327 
1328 	/*
1329 	 * Configure WOL if enabled.
1330 	 */
1331 	if ((if_getcapenable(ifp) & IFCAP_WOL) != 0) {
1332 		mii = device_get_softc(sc->miibus);
1333 		mii_pollstat(mii);
1334 		if ((mii->mii_media_status & IFM_AVALID) != 0 &&
1335 		    (mii->mii_media_status & IFM_ACTIVE) != 0) {
1336 			AE_WRITE_4(sc, AE_WOL_REG, AE_WOL_MAGIC | \
1337 			    AE_WOL_MAGIC_PME);
1338 
1339 			/*
1340 			 * Configure MAC.
1341 			 */
1342 			val = AE_MAC_RX_EN | AE_MAC_CLK_PHY | \
1343 			    AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD | \
1344 			    ((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & \
1345 			    AE_HALFBUF_MASK) | \
1346 			    ((AE_MAC_PREAMBLE_DEFAULT << \
1347 			    AE_MAC_PREAMBLE_SHIFT) & AE_MAC_PREAMBLE_MASK) | \
1348 			    AE_MAC_BCAST_EN | AE_MAC_MCAST_EN;
1349 			if ((IFM_OPTIONS(mii->mii_media_active) & \
1350 			    IFM_FDX) != 0)
1351 				val |= AE_MAC_FULL_DUPLEX;
1352 			AE_WRITE_4(sc, AE_MAC_REG, val);
1353 
1354 		} else {	/* No link. */
1355 			AE_WRITE_4(sc, AE_WOL_REG, AE_WOL_LNKCHG | \
1356 			    AE_WOL_LNKCHG_PME);
1357 			AE_WRITE_4(sc, AE_MAC_REG, 0);
1358 		}
1359 	} else {
1360 		ae_powersave_enable(sc);
1361 	}
1362 
1363 	/*
1364 	 * PCIE hacks. Magic numbers.
1365 	 */
1366 	val = AE_READ_4(sc, AE_PCIE_PHYMISC_REG);
1367 	val |= AE_PCIE_PHYMISC_FORCE_RCV_DET;
1368 	AE_WRITE_4(sc, AE_PCIE_PHYMISC_REG, val);
1369 	val = AE_READ_4(sc, AE_PCIE_DLL_TX_CTRL_REG);
1370 	val |= AE_PCIE_DLL_TX_CTRL_SEL_NOR_CLK;
1371 	AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, val);
1372 
1373 	/*
1374 	 * Configure PME.
1375 	 */
1376 	if (pci_find_cap(sc->dev, PCIY_PMG, &pmc) == 0) {
1377 		pmstat = pci_read_config(sc->dev, pmc + PCIR_POWER_STATUS, 2);
1378 		pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1379 		if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
1380 			pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1381 		pci_write_config(sc->dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1382 	}
1383 }
1384 
1385 static int
1386 ae_suspend(device_t dev)
1387 {
1388 	ae_softc_t *sc;
1389 
1390 	sc = device_get_softc(dev);
1391 
1392 	AE_LOCK(sc);
1393 	ae_stop(sc);
1394 	ae_pm_init(sc);
1395 	AE_UNLOCK(sc);
1396 
1397 	return (0);
1398 }
1399 
1400 static int
1401 ae_resume(device_t dev)
1402 {
1403 	ae_softc_t *sc;
1404 
1405 	sc = device_get_softc(dev);
1406 	KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
1407 
1408 	AE_LOCK(sc);
1409 	AE_READ_4(sc, AE_WOL_REG);	/* Clear WOL status. */
1410 	if ((if_getflags(sc->ifp) & IFF_UP) != 0)
1411 		ae_init_locked(sc);
1412 	AE_UNLOCK(sc);
1413 
1414 	return (0);
1415 }
1416 
1417 static unsigned int
1418 ae_tx_avail_size(ae_softc_t *sc)
1419 {
1420 	unsigned int avail;
1421 
1422 	if (sc->txd_cur >= sc->txd_ack)
1423 		avail = AE_TXD_BUFSIZE_DEFAULT - (sc->txd_cur - sc->txd_ack);
1424 	else
1425 		avail = sc->txd_ack - sc->txd_cur;
1426 
1427 	return (avail);
1428 }
1429 
1430 static int
1431 ae_encap(ae_softc_t *sc, struct mbuf **m_head)
1432 {
1433 	struct mbuf *m0;
1434 	ae_txd_t *hdr;
1435 	unsigned int to_end;
1436 	uint16_t len;
1437 
1438 	AE_LOCK_ASSERT(sc);
1439 
1440 	m0 = *m_head;
1441 	len = m0->m_pkthdr.len;
1442 
1443 	if ((sc->flags & AE_FLAG_TXAVAIL) == 0 ||
1444 	    len + sizeof(ae_txd_t) + 3 > ae_tx_avail_size(sc)) {
1445 #ifdef AE_DEBUG
1446 		if_printf(sc->ifp, "No free Tx available.\n");
1447 #endif
1448 		return ENOBUFS;
1449 	}
1450 
1451 	hdr = (ae_txd_t *)(sc->txd_base + sc->txd_cur);
1452 	bzero(hdr, sizeof(*hdr));
1453 	/* Skip header size. */
1454 	sc->txd_cur = (sc->txd_cur + sizeof(ae_txd_t)) % AE_TXD_BUFSIZE_DEFAULT;
1455 	/* Space available to the end of the ring */
1456 	to_end = AE_TXD_BUFSIZE_DEFAULT - sc->txd_cur;
1457 	if (to_end >= len) {
1458 		m_copydata(m0, 0, len, (caddr_t)(sc->txd_base + sc->txd_cur));
1459 	} else {
1460 		m_copydata(m0, 0, to_end, (caddr_t)(sc->txd_base +
1461 		    sc->txd_cur));
1462 		m_copydata(m0, to_end, len - to_end, (caddr_t)sc->txd_base);
1463 	}
1464 
1465 	/*
1466 	 * Set TxD flags and parameters.
1467 	 */
1468 	if ((m0->m_flags & M_VLANTAG) != 0) {
1469 		hdr->vlan = htole16(AE_TXD_VLAN(m0->m_pkthdr.ether_vtag));
1470 		hdr->len = htole16(len | AE_TXD_INSERT_VTAG);
1471 	} else {
1472 		hdr->len = htole16(len);
1473 	}
1474 
1475 	/*
1476 	 * Set current TxD position and round up to a 4-byte boundary.
1477 	 */
1478 	sc->txd_cur = ((sc->txd_cur + len + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT;
1479 	if (sc->txd_cur == sc->txd_ack)
1480 		sc->flags &= ~AE_FLAG_TXAVAIL;
1481 #ifdef AE_DEBUG
1482 	if_printf(sc->ifp, "New txd_cur = %d.\n", sc->txd_cur);
1483 #endif
1484 
1485 	/*
1486 	 * Update TxS position and check if there are empty TxS available.
1487 	 */
1488 	sc->txs_base[sc->txs_cur].flags &= ~htole16(AE_TXS_UPDATE);
1489 	sc->txs_cur = (sc->txs_cur + 1) % AE_TXS_COUNT_DEFAULT;
1490 	if (sc->txs_cur == sc->txs_ack)
1491 		sc->flags &= ~AE_FLAG_TXAVAIL;
1492 
1493 	/*
1494 	 * Synchronize DMA memory.
1495 	 */
1496 	bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_PREREAD |
1497 	    BUS_DMASYNC_PREWRITE);
1498 	bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map,
1499 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1500 
1501 	return (0);
1502 }
1503 
1504 static void
1505 ae_start(if_t ifp)
1506 {
1507 	ae_softc_t *sc;
1508 
1509 	sc = if_getsoftc(ifp);
1510 	AE_LOCK(sc);
1511 	ae_start_locked(ifp);
1512 	AE_UNLOCK(sc);
1513 }
1514 
1515 static void
1516 ae_start_locked(if_t ifp)
1517 {
1518 	ae_softc_t *sc;
1519 	unsigned int count;
1520 	struct mbuf *m0;
1521 	int error;
1522 
1523 	sc = if_getsoftc(ifp);
1524 	KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
1525 	AE_LOCK_ASSERT(sc);
1526 
1527 #ifdef AE_DEBUG
1528 	if_printf(ifp, "Start called.\n");
1529 #endif
1530 
1531 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1532 	    IFF_DRV_RUNNING || (sc->flags & AE_FLAG_LINK) == 0)
1533 		return;
1534 
1535 	count = 0;
1536 	while (!if_sendq_empty(ifp)) {
1537 		m0 = if_dequeue(ifp);
1538 		if (m0 == NULL)
1539 			break;	/* Nothing to do. */
1540 
1541 		error = ae_encap(sc, &m0);
1542 		if (error != 0) {
1543 			if (m0 != NULL) {
1544 				if_sendq_prepend(ifp, m0);
1545 				if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1546 #ifdef AE_DEBUG
1547 				if_printf(ifp, "Setting OACTIVE.\n");
1548 #endif
1549 			}
1550 			break;
1551 		}
1552 		count++;
1553 		sc->tx_inproc++;
1554 
1555 		/* Bounce a copy of the frame to BPF. */
1556 		ETHER_BPF_MTAP(ifp, m0);
1557 
1558 		m_freem(m0);
1559 	}
1560 
1561 	if (count > 0) {	/* Something was dequeued. */
1562 		AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur / 4);
1563 		sc->wd_timer = AE_TX_TIMEOUT;	/* Load watchdog. */
1564 #ifdef AE_DEBUG
1565 		if_printf(ifp, "%d packets dequeued.\n", count);
1566 		if_printf(ifp, "Tx pos now is %d.\n", sc->txd_cur);
1567 #endif
1568 	}
1569 }
1570 
1571 static void
1572 ae_link_task(void *arg, int pending)
1573 {
1574 	ae_softc_t *sc;
1575 	struct mii_data *mii;
1576 	if_t ifp;
1577 	uint32_t val;
1578 
1579 	sc = (ae_softc_t *)arg;
1580 	KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
1581 	AE_LOCK(sc);
1582 
1583 	ifp = sc->ifp;
1584 	mii = device_get_softc(sc->miibus);
1585 	if (mii == NULL || ifp == NULL ||
1586 	    (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
1587 		AE_UNLOCK(sc);	/* XXX: could happen? */
1588 		return;
1589 	}
1590 
1591 	sc->flags &= ~AE_FLAG_LINK;
1592 	if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
1593 	    (IFM_AVALID | IFM_ACTIVE)) {
1594 		switch(IFM_SUBTYPE(mii->mii_media_active)) {
1595 		case IFM_10_T:
1596 		case IFM_100_TX:
1597 			sc->flags |= AE_FLAG_LINK;
1598 			break;
1599 		default:
1600 			break;
1601 		}
1602 	}
1603 
1604 	/*
1605 	 * Stop Rx/Tx MACs.
1606 	 */
1607 	ae_stop_rxmac(sc);
1608 	ae_stop_txmac(sc);
1609 
1610 	if ((sc->flags & AE_FLAG_LINK) != 0) {
1611 		ae_mac_config(sc);
1612 
1613 		/*
1614 		 * Restart DMA engines.
1615 		 */
1616 		AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN);
1617 		AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN);
1618 
1619 		/*
1620 		 * Enable Rx and Tx MACs.
1621 		 */
1622 		val = AE_READ_4(sc, AE_MAC_REG);
1623 		val |= AE_MAC_TX_EN | AE_MAC_RX_EN;
1624 		AE_WRITE_4(sc, AE_MAC_REG, val);
1625 	}
1626 	AE_UNLOCK(sc);
1627 }
1628 
1629 static void
1630 ae_stop_rxmac(ae_softc_t *sc)
1631 {
1632 	uint32_t val;
1633 	int i;
1634 
1635 	AE_LOCK_ASSERT(sc);
1636 
1637 	/*
1638 	 * Stop Rx MAC engine.
1639 	 */
1640 	val = AE_READ_4(sc, AE_MAC_REG);
1641 	if ((val & AE_MAC_RX_EN) != 0) {
1642 		val &= ~AE_MAC_RX_EN;
1643 		AE_WRITE_4(sc, AE_MAC_REG, val);
1644 	}
1645 
1646 	/*
1647 	 * Stop Rx DMA engine.
1648 	 */
1649 	if (AE_READ_1(sc, AE_DMAWRITE_REG) == AE_DMAWRITE_EN)
1650 		AE_WRITE_1(sc, AE_DMAWRITE_REG, 0);
1651 
1652 	/*
1653 	 * Wait for IDLE state.
1654 	 */
1655 	for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
1656 		val = AE_READ_4(sc, AE_IDLE_REG);
1657 		if ((val & (AE_IDLE_RXMAC | AE_IDLE_DMAWRITE)) == 0)
1658 			break;
1659 		DELAY(100);
1660 	}
1661 	if (i == AE_IDLE_TIMEOUT)
1662 		device_printf(sc->dev, "timed out while stopping Rx MAC.\n");
1663 }
1664 
1665 static void
1666 ae_stop_txmac(ae_softc_t *sc)
1667 {
1668 	uint32_t val;
1669 	int i;
1670 
1671 	AE_LOCK_ASSERT(sc);
1672 
1673 	/*
1674 	 * Stop Tx MAC engine.
1675 	 */
1676 	val = AE_READ_4(sc, AE_MAC_REG);
1677 	if ((val & AE_MAC_TX_EN) != 0) {
1678 		val &= ~AE_MAC_TX_EN;
1679 		AE_WRITE_4(sc, AE_MAC_REG, val);
1680 	}
1681 
1682 	/*
1683 	 * Stop Tx DMA engine.
1684 	 */
1685 	if (AE_READ_1(sc, AE_DMAREAD_REG) == AE_DMAREAD_EN)
1686 		AE_WRITE_1(sc, AE_DMAREAD_REG, 0);
1687 
1688 	/*
1689 	 * Wait for IDLE state.
1690 	 */
1691 	for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
1692 		val = AE_READ_4(sc, AE_IDLE_REG);
1693 		if ((val & (AE_IDLE_TXMAC | AE_IDLE_DMAREAD)) == 0)
1694 			break;
1695 		DELAY(100);
1696 	}
1697 	if (i == AE_IDLE_TIMEOUT)
1698 		device_printf(sc->dev, "timed out while stopping Tx MAC.\n");
1699 }
1700 
1701 static void
1702 ae_mac_config(ae_softc_t *sc)
1703 {
1704 	struct mii_data *mii;
1705 	uint32_t val;
1706 
1707 	AE_LOCK_ASSERT(sc);
1708 
1709 	mii = device_get_softc(sc->miibus);
1710 	val = AE_READ_4(sc, AE_MAC_REG);
1711 	val &= ~AE_MAC_FULL_DUPLEX;
1712 	/* XXX disable AE_MAC_TX_FLOW_EN? */
1713 
1714 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
1715 		val |= AE_MAC_FULL_DUPLEX;
1716 
1717 	AE_WRITE_4(sc, AE_MAC_REG, val);
1718 }
1719 
1720 static int
1721 ae_intr(void *arg)
1722 {
1723 	ae_softc_t *sc;
1724 	uint32_t val;
1725 
1726 	sc = (ae_softc_t *)arg;
1727 	KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
1728 
1729 	val = AE_READ_4(sc, AE_ISR_REG);
1730 	if (val == 0 || (val & AE_IMR_DEFAULT) == 0)
1731 		return (FILTER_STRAY);
1732 
1733 	/* Disable interrupts. */
1734 	AE_WRITE_4(sc, AE_ISR_REG, AE_ISR_DISABLE);
1735 
1736 	/* Schedule interrupt processing. */
1737 	taskqueue_enqueue(sc->tq, &sc->int_task);
1738 
1739 	return (FILTER_HANDLED);
1740 }
1741 
1742 static void
1743 ae_int_task(void *arg, int pending)
1744 {
1745 	ae_softc_t *sc;
1746 	if_t ifp;
1747 	uint32_t val;
1748 
1749 	sc = (ae_softc_t *)arg;
1750 
1751 	AE_LOCK(sc);
1752 
1753 	ifp = sc->ifp;
1754 
1755 	val = AE_READ_4(sc, AE_ISR_REG);	/* Read interrupt status. */
1756 	if (val == 0) {
1757 		AE_UNLOCK(sc);
1758 		return;
1759 	}
1760 
1761 	/*
1762 	 * Clear interrupts and disable them.
1763 	 */
1764 	AE_WRITE_4(sc, AE_ISR_REG, val | AE_ISR_DISABLE);
1765 
1766 #ifdef AE_DEBUG
1767 	if_printf(ifp, "Interrupt received: 0x%08x\n", val);
1768 #endif
1769 
1770 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1771 		if ((val & (AE_ISR_DMAR_TIMEOUT | AE_ISR_DMAW_TIMEOUT |
1772 		    AE_ISR_PHY_LINKDOWN)) != 0) {
1773 			if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1774 			ae_init_locked(sc);
1775 			AE_UNLOCK(sc);
1776 			return;
1777 		}
1778 		if ((val & AE_ISR_TX_EVENT) != 0)
1779 			ae_tx_intr(sc);
1780 		if ((val & AE_ISR_RX_EVENT) != 0)
1781 			ae_rx_intr(sc);
1782 		/*
1783 		 * Re-enable interrupts.
1784 		 */
1785 		AE_WRITE_4(sc, AE_ISR_REG, 0);
1786 
1787 		if ((sc->flags & AE_FLAG_TXAVAIL) != 0) {
1788 			if (!if_sendq_empty(ifp))
1789 				ae_start_locked(ifp);
1790 		}
1791 	}
1792 
1793 	AE_UNLOCK(sc);
1794 }
1795 
1796 static void
1797 ae_tx_intr(ae_softc_t *sc)
1798 {
1799 	if_t ifp;
1800 	ae_txd_t *txd;
1801 	ae_txs_t *txs;
1802 	uint16_t flags;
1803 
1804 	AE_LOCK_ASSERT(sc);
1805 
1806 	ifp = sc->ifp;
1807 
1808 #ifdef AE_DEBUG
1809 	if_printf(ifp, "Tx interrupt occuried.\n");
1810 #endif
1811 
1812 	/*
1813 	 * Syncronize DMA buffers.
1814 	 */
1815 	bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map,
1816 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1817 	bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map,
1818 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1819 
1820 	for (;;) {
1821 		txs = sc->txs_base + sc->txs_ack;
1822 		flags = le16toh(txs->flags);
1823 		if ((flags & AE_TXS_UPDATE) == 0)
1824 			break;
1825 		txs->flags = htole16(flags & ~AE_TXS_UPDATE);
1826 		/* Update stats. */
1827 		ae_update_stats_tx(flags, &sc->stats);
1828 
1829 		/*
1830 		 * Update TxS position.
1831 		 */
1832 		sc->txs_ack = (sc->txs_ack + 1) % AE_TXS_COUNT_DEFAULT;
1833 		sc->flags |= AE_FLAG_TXAVAIL;
1834 
1835 		txd = (ae_txd_t *)(sc->txd_base + sc->txd_ack);
1836 		if (txs->len != txd->len)
1837 			device_printf(sc->dev, "Size mismatch: TxS:%d TxD:%d\n",
1838 			    le16toh(txs->len), le16toh(txd->len));
1839 
1840 		/*
1841 		 * Move txd ack and align on 4-byte boundary.
1842 		 */
1843 		sc->txd_ack = ((sc->txd_ack + le16toh(txd->len) +
1844 		    sizeof(ae_txs_t) + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT;
1845 
1846 		if ((flags & AE_TXS_SUCCESS) != 0)
1847 			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1848 		else
1849 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1850 
1851 		sc->tx_inproc--;
1852 	}
1853 
1854 	if ((sc->flags & AE_FLAG_TXAVAIL) != 0)
1855 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1856 	if (sc->tx_inproc < 0) {
1857 		if_printf(ifp, "Received stray Tx interrupt(s).\n");
1858 		sc->tx_inproc = 0;
1859 	}
1860 
1861 	if (sc->tx_inproc == 0)
1862 		sc->wd_timer = 0;	/* Unarm watchdog. */
1863 
1864 	/*
1865 	 * Syncronize DMA buffers.
1866 	 */
1867 	bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map,
1868 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1869 	bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map,
1870 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1871 }
1872 
1873 static void
1874 ae_rxeof(ae_softc_t *sc, ae_rxd_t *rxd)
1875 {
1876 	if_t ifp;
1877 	struct mbuf *m;
1878 	unsigned int size;
1879 	uint16_t flags;
1880 
1881 	AE_LOCK_ASSERT(sc);
1882 
1883 	ifp = sc->ifp;
1884 	flags = le16toh(rxd->flags);
1885 
1886 #ifdef AE_DEBUG
1887 	if_printf(ifp, "Rx interrupt occuried.\n");
1888 #endif
1889 	size = le16toh(rxd->len) - ETHER_CRC_LEN;
1890 	if (size < (ETHER_MIN_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN)) {
1891 		if_printf(ifp, "Runt frame received.");
1892 		if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1893 		return;
1894 	}
1895 
1896 	m = m_devget(&rxd->data[0], size, ETHER_ALIGN, ifp, NULL);
1897 	if (m == NULL) {
1898 		if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1899 		return;
1900 	}
1901 
1902 	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0 &&
1903 	    (flags & AE_RXD_HAS_VLAN) != 0) {
1904 		m->m_pkthdr.ether_vtag = AE_RXD_VLAN(le16toh(rxd->vlan));
1905 		m->m_flags |= M_VLANTAG;
1906 	}
1907 
1908 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1909 	/*
1910 	 * Pass it through.
1911 	 */
1912 	AE_UNLOCK(sc);
1913 	if_input(ifp, m);
1914 	AE_LOCK(sc);
1915 }
1916 
1917 static void
1918 ae_rx_intr(ae_softc_t *sc)
1919 {
1920 	ae_rxd_t *rxd;
1921 	if_t ifp;
1922 	uint16_t flags;
1923 	int count;
1924 
1925 	KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
1926 
1927 	AE_LOCK_ASSERT(sc);
1928 
1929 	ifp = sc->ifp;
1930 
1931 	/*
1932 	 * Syncronize DMA buffers.
1933 	 */
1934 	bus_dmamap_sync(sc->dma_rxd_tag, sc->dma_rxd_map,
1935 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1936 
1937 	for (count = 0;; count++) {
1938 		rxd = (ae_rxd_t *)(sc->rxd_base + sc->rxd_cur);
1939 		flags = le16toh(rxd->flags);
1940 		if ((flags & AE_RXD_UPDATE) == 0)
1941 			break;
1942 		rxd->flags = htole16(flags & ~AE_RXD_UPDATE);
1943 		/* Update stats. */
1944 		ae_update_stats_rx(flags, &sc->stats);
1945 
1946 		/*
1947 		 * Update position index.
1948 		 */
1949 		sc->rxd_cur = (sc->rxd_cur + 1) % AE_RXD_COUNT_DEFAULT;
1950 
1951 		if ((flags & AE_RXD_SUCCESS) != 0)
1952 			ae_rxeof(sc, rxd);
1953 		else
1954 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1955 	}
1956 
1957 	if (count > 0) {
1958 		bus_dmamap_sync(sc->dma_rxd_tag, sc->dma_rxd_map,
1959 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1960 		/*
1961 		 * Update Rx index.
1962 		 */
1963 		AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur);
1964 	}
1965 }
1966 
1967 static void
1968 ae_watchdog(ae_softc_t *sc)
1969 {
1970 	if_t ifp;
1971 
1972 	KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
1973 	AE_LOCK_ASSERT(sc);
1974 	ifp = sc->ifp;
1975 
1976 	if (sc->wd_timer == 0 || --sc->wd_timer != 0)
1977 		return;		/* Noting to do. */
1978 
1979 	if ((sc->flags & AE_FLAG_LINK) == 0)
1980 		if_printf(ifp, "watchdog timeout (missed link).\n");
1981 	else
1982 		if_printf(ifp, "watchdog timeout - resetting.\n");
1983 
1984 	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1985 	if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1986 	ae_init_locked(sc);
1987 	if (!if_sendq_empty(ifp))
1988 		ae_start_locked(ifp);
1989 }
1990 
1991 static void
1992 ae_tick(void *arg)
1993 {
1994 	ae_softc_t *sc;
1995 	struct mii_data *mii;
1996 
1997 	sc = (ae_softc_t *)arg;
1998 	KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
1999 	AE_LOCK_ASSERT(sc);
2000 
2001 	mii = device_get_softc(sc->miibus);
2002 	mii_tick(mii);
2003 	ae_watchdog(sc);	/* Watchdog check. */
2004 	callout_reset(&sc->tick_ch, hz, ae_tick, sc);
2005 }
2006 
2007 static void
2008 ae_rxvlan(ae_softc_t *sc)
2009 {
2010 	if_t ifp;
2011 	uint32_t val;
2012 
2013 	AE_LOCK_ASSERT(sc);
2014 	ifp = sc->ifp;
2015 	val = AE_READ_4(sc, AE_MAC_REG);
2016 	val &= ~AE_MAC_RMVLAN_EN;
2017 	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
2018 		val |= AE_MAC_RMVLAN_EN;
2019 	AE_WRITE_4(sc, AE_MAC_REG, val);
2020 }
2021 
2022 static u_int
2023 ae_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2024 {
2025 	uint32_t crc, *mchash = arg;
2026 
2027 	crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
2028 	mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
2029 
2030 	return (1);
2031 }
2032 
2033 static void
2034 ae_rxfilter(ae_softc_t *sc)
2035 {
2036 	if_t ifp;
2037 	uint32_t mchash[2];
2038 	uint32_t rxcfg;
2039 
2040 	KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
2041 
2042 	AE_LOCK_ASSERT(sc);
2043 
2044 	ifp = sc->ifp;
2045 
2046 	rxcfg = AE_READ_4(sc, AE_MAC_REG);
2047 	rxcfg &= ~(AE_MAC_MCAST_EN | AE_MAC_BCAST_EN | AE_MAC_PROMISC_EN);
2048 
2049 	if ((if_getflags(ifp) & IFF_BROADCAST) != 0)
2050 		rxcfg |= AE_MAC_BCAST_EN;
2051 	if ((if_getflags(ifp) & IFF_PROMISC) != 0)
2052 		rxcfg |= AE_MAC_PROMISC_EN;
2053 	if ((if_getflags(ifp) & IFF_ALLMULTI) != 0)
2054 		rxcfg |= AE_MAC_MCAST_EN;
2055 
2056 	/*
2057 	 * Wipe old settings.
2058 	 */
2059 	AE_WRITE_4(sc, AE_REG_MHT0, 0);
2060 	AE_WRITE_4(sc, AE_REG_MHT1, 0);
2061 	if ((if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2062 		AE_WRITE_4(sc, AE_REG_MHT0, 0xffffffff);
2063 		AE_WRITE_4(sc, AE_REG_MHT1, 0xffffffff);
2064 		AE_WRITE_4(sc, AE_MAC_REG, rxcfg);
2065 		return;
2066 	}
2067 
2068 	/*
2069 	 * Load multicast tables.
2070 	 */
2071 	bzero(mchash, sizeof(mchash));
2072 	if_foreach_llmaddr(ifp, ae_hash_maddr, &mchash);
2073 	AE_WRITE_4(sc, AE_REG_MHT0, mchash[0]);
2074 	AE_WRITE_4(sc, AE_REG_MHT1, mchash[1]);
2075 	AE_WRITE_4(sc, AE_MAC_REG, rxcfg);
2076 }
2077 
2078 static int
2079 ae_ioctl(if_t ifp, u_long cmd, caddr_t data)
2080 {
2081 	struct ae_softc *sc;
2082 	struct ifreq *ifr;
2083 	struct mii_data *mii;
2084 	int error, mask;
2085 
2086 	sc = if_getsoftc(ifp);
2087 	ifr = (struct ifreq *)data;
2088 	error = 0;
2089 
2090 	switch (cmd) {
2091 	case SIOCSIFMTU:
2092 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
2093 			error = EINVAL;
2094 		else if (if_getmtu(ifp) != ifr->ifr_mtu) {
2095 			AE_LOCK(sc);
2096 			if_setmtu(ifp, ifr->ifr_mtu);
2097 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2098 				if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
2099 				ae_init_locked(sc);
2100 			}
2101 			AE_UNLOCK(sc);
2102 		}
2103 		break;
2104 	case SIOCSIFFLAGS:
2105 		AE_LOCK(sc);
2106 		if ((if_getflags(ifp) & IFF_UP) != 0) {
2107 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2108 				if (((if_getflags(ifp) ^ sc->if_flags)
2109 				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2110 					ae_rxfilter(sc);
2111 			} else {
2112 				if ((sc->flags & AE_FLAG_DETACH) == 0)
2113 					ae_init_locked(sc);
2114 			}
2115 		} else {
2116 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
2117 				ae_stop(sc);
2118 		}
2119 		sc->if_flags = if_getflags(ifp);
2120 		AE_UNLOCK(sc);
2121 		break;
2122 	case SIOCADDMULTI:
2123 	case SIOCDELMULTI:
2124 		AE_LOCK(sc);
2125 		if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
2126 			ae_rxfilter(sc);
2127 		AE_UNLOCK(sc);
2128 		break;
2129 	case SIOCSIFMEDIA:
2130 	case SIOCGIFMEDIA:
2131 		mii = device_get_softc(sc->miibus);
2132 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
2133 		break;
2134 	case SIOCSIFCAP:
2135 		AE_LOCK(sc);
2136 		mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
2137 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2138 		    (if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
2139 			if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
2140 			ae_rxvlan(sc);
2141 		}
2142 		VLAN_CAPABILITIES(ifp);
2143 		AE_UNLOCK(sc);
2144 		break;
2145 	default:
2146 		error = ether_ioctl(ifp, cmd, data);
2147 		break;
2148 	}
2149 	return (error);
2150 }
2151 
2152 static void
2153 ae_stop(ae_softc_t *sc)
2154 {
2155 	if_t ifp;
2156 	int i;
2157 
2158 	AE_LOCK_ASSERT(sc);
2159 
2160 	ifp = sc->ifp;
2161 	if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
2162 	sc->flags &= ~AE_FLAG_LINK;
2163 	sc->wd_timer = 0;	/* Cancel watchdog. */
2164 	callout_stop(&sc->tick_ch);
2165 
2166 	/*
2167 	 * Clear and disable interrupts.
2168 	 */
2169 	AE_WRITE_4(sc, AE_IMR_REG, 0);
2170 	AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff);
2171 
2172 	/*
2173 	 * Stop Rx/Tx MACs.
2174 	 */
2175 	ae_stop_txmac(sc);
2176 	ae_stop_rxmac(sc);
2177 
2178 	/*
2179 	 * Stop DMA engines.
2180 	 */
2181 	AE_WRITE_1(sc, AE_DMAREAD_REG, ~AE_DMAREAD_EN);
2182 	AE_WRITE_1(sc, AE_DMAWRITE_REG, ~AE_DMAWRITE_EN);
2183 
2184 	/*
2185 	 * Wait for everything to enter idle state.
2186 	 */
2187 	for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
2188 		if (AE_READ_4(sc, AE_IDLE_REG) == 0)
2189 			break;
2190 		DELAY(100);
2191 	}
2192 	if (i == AE_IDLE_TIMEOUT)
2193 		device_printf(sc->dev, "could not enter idle state in stop.\n");
2194 }
2195 
2196 static void
2197 ae_update_stats_tx(uint16_t flags, ae_stats_t *stats)
2198 {
2199 
2200 	if ((flags & AE_TXS_BCAST) != 0)
2201 		stats->tx_bcast++;
2202 	if ((flags & AE_TXS_MCAST) != 0)
2203 		stats->tx_mcast++;
2204 	if ((flags & AE_TXS_PAUSE) != 0)
2205 		stats->tx_pause++;
2206 	if ((flags & AE_TXS_CTRL) != 0)
2207 		stats->tx_ctrl++;
2208 	if ((flags & AE_TXS_DEFER) != 0)
2209 		stats->tx_defer++;
2210 	if ((flags & AE_TXS_EXCDEFER) != 0)
2211 		stats->tx_excdefer++;
2212 	if ((flags & AE_TXS_SINGLECOL) != 0)
2213 		stats->tx_singlecol++;
2214 	if ((flags & AE_TXS_MULTICOL) != 0)
2215 		stats->tx_multicol++;
2216 	if ((flags & AE_TXS_LATECOL) != 0)
2217 		stats->tx_latecol++;
2218 	if ((flags & AE_TXS_ABORTCOL) != 0)
2219 		stats->tx_abortcol++;
2220 	if ((flags & AE_TXS_UNDERRUN) != 0)
2221 		stats->tx_underrun++;
2222 }
2223 
2224 static void
2225 ae_update_stats_rx(uint16_t flags, ae_stats_t *stats)
2226 {
2227 
2228 	if ((flags & AE_RXD_BCAST) != 0)
2229 		stats->rx_bcast++;
2230 	if ((flags & AE_RXD_MCAST) != 0)
2231 		stats->rx_mcast++;
2232 	if ((flags & AE_RXD_PAUSE) != 0)
2233 		stats->rx_pause++;
2234 	if ((flags & AE_RXD_CTRL) != 0)
2235 		stats->rx_ctrl++;
2236 	if ((flags & AE_RXD_CRCERR) != 0)
2237 		stats->rx_crcerr++;
2238 	if ((flags & AE_RXD_CODEERR) != 0)
2239 		stats->rx_codeerr++;
2240 	if ((flags & AE_RXD_RUNT) != 0)
2241 		stats->rx_runt++;
2242 	if ((flags & AE_RXD_FRAG) != 0)
2243 		stats->rx_frag++;
2244 	if ((flags & AE_RXD_TRUNC) != 0)
2245 		stats->rx_trunc++;
2246 	if ((flags & AE_RXD_ALIGN) != 0)
2247 		stats->rx_align++;
2248 }
2249