xref: /freebsd/sys/dev/age/if_age.c (revision dda5b39711dab90ae1c5624bdd6ff7453177df31)
1 /*-
2  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 /* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/endian.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/mbuf.h>
40 #include <sys/rman.h>
41 #include <sys/module.h>
42 #include <sys/queue.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45 #include <sys/sysctl.h>
46 #include <sys/taskqueue.h>
47 
48 #include <net/bpf.h>
49 #include <net/if.h>
50 #include <net/if_var.h>
51 #include <net/if_arp.h>
52 #include <net/ethernet.h>
53 #include <net/if_dl.h>
54 #include <net/if_media.h>
55 #include <net/if_types.h>
56 #include <net/if_vlan_var.h>
57 
58 #include <netinet/in.h>
59 #include <netinet/in_systm.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 
63 #include <dev/mii/mii.h>
64 #include <dev/mii/miivar.h>
65 
66 #include <dev/pci/pcireg.h>
67 #include <dev/pci/pcivar.h>
68 
69 #include <machine/bus.h>
70 #include <machine/in_cksum.h>
71 
72 #include <dev/age/if_agereg.h>
73 #include <dev/age/if_agevar.h>
74 
75 /* "device miibus" required.  See GENERIC if you get errors here. */
76 #include "miibus_if.h"
77 
78 #define	AGE_CSUM_FEATURES	(CSUM_TCP | CSUM_UDP)
79 
80 MODULE_DEPEND(age, pci, 1, 1, 1);
81 MODULE_DEPEND(age, ether, 1, 1, 1);
82 MODULE_DEPEND(age, miibus, 1, 1, 1);
83 
84 /* Tunables. */
85 static int msi_disable = 0;
86 static int msix_disable = 0;
87 TUNABLE_INT("hw.age.msi_disable", &msi_disable);
88 TUNABLE_INT("hw.age.msix_disable", &msix_disable);
89 
90 /*
91  * Devices supported by this driver.
92  */
93 static struct age_dev {
94 	uint16_t	age_vendorid;
95 	uint16_t	age_deviceid;
96 	const char	*age_name;
97 } age_devs[] = {
98 	{ VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L1,
99 	    "Attansic Technology Corp, L1 Gigabit Ethernet" },
100 };
101 
102 static int age_miibus_readreg(device_t, int, int);
103 static int age_miibus_writereg(device_t, int, int, int);
104 static void age_miibus_statchg(device_t);
105 static void age_mediastatus(struct ifnet *, struct ifmediareq *);
106 static int age_mediachange(struct ifnet *);
107 static int age_probe(device_t);
108 static void age_get_macaddr(struct age_softc *);
109 static void age_phy_reset(struct age_softc *);
110 static int age_attach(device_t);
111 static int age_detach(device_t);
112 static void age_sysctl_node(struct age_softc *);
113 static void age_dmamap_cb(void *, bus_dma_segment_t *, int, int);
114 static int age_check_boundary(struct age_softc *);
115 static int age_dma_alloc(struct age_softc *);
116 static void age_dma_free(struct age_softc *);
117 static int age_shutdown(device_t);
118 static void age_setwol(struct age_softc *);
119 static int age_suspend(device_t);
120 static int age_resume(device_t);
121 static int age_encap(struct age_softc *, struct mbuf **);
122 static void age_start(struct ifnet *);
123 static void age_start_locked(struct ifnet *);
124 static void age_watchdog(struct age_softc *);
125 static int age_ioctl(struct ifnet *, u_long, caddr_t);
126 static void age_mac_config(struct age_softc *);
127 static void age_link_task(void *, int);
128 static void age_stats_update(struct age_softc *);
129 static int age_intr(void *);
130 static void age_int_task(void *, int);
131 static void age_txintr(struct age_softc *, int);
132 static void age_rxeof(struct age_softc *sc, struct rx_rdesc *);
133 static int age_rxintr(struct age_softc *, int, int);
134 static void age_tick(void *);
135 static void age_reset(struct age_softc *);
136 static void age_init(void *);
137 static void age_init_locked(struct age_softc *);
138 static void age_stop(struct age_softc *);
139 static void age_stop_txmac(struct age_softc *);
140 static void age_stop_rxmac(struct age_softc *);
141 static void age_init_tx_ring(struct age_softc *);
142 static int age_init_rx_ring(struct age_softc *);
143 static void age_init_rr_ring(struct age_softc *);
144 static void age_init_cmb_block(struct age_softc *);
145 static void age_init_smb_block(struct age_softc *);
146 #ifndef __NO_STRICT_ALIGNMENT
147 static struct mbuf *age_fixup_rx(struct ifnet *, struct mbuf *);
148 #endif
149 static int age_newbuf(struct age_softc *, struct age_rxdesc *);
150 static void age_rxvlan(struct age_softc *);
151 static void age_rxfilter(struct age_softc *);
152 static int sysctl_age_stats(SYSCTL_HANDLER_ARGS);
153 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
154 static int sysctl_hw_age_proc_limit(SYSCTL_HANDLER_ARGS);
155 static int sysctl_hw_age_int_mod(SYSCTL_HANDLER_ARGS);
156 
157 
158 static device_method_t age_methods[] = {
159 	/* Device interface. */
160 	DEVMETHOD(device_probe,		age_probe),
161 	DEVMETHOD(device_attach,	age_attach),
162 	DEVMETHOD(device_detach,	age_detach),
163 	DEVMETHOD(device_shutdown,	age_shutdown),
164 	DEVMETHOD(device_suspend,	age_suspend),
165 	DEVMETHOD(device_resume,	age_resume),
166 
167 	/* MII interface. */
168 	DEVMETHOD(miibus_readreg,	age_miibus_readreg),
169 	DEVMETHOD(miibus_writereg,	age_miibus_writereg),
170 	DEVMETHOD(miibus_statchg,	age_miibus_statchg),
171 
172 	{ NULL, NULL }
173 };
174 
175 static driver_t age_driver = {
176 	"age",
177 	age_methods,
178 	sizeof(struct age_softc)
179 };
180 
181 static devclass_t age_devclass;
182 
183 DRIVER_MODULE(age, pci, age_driver, age_devclass, 0, 0);
184 DRIVER_MODULE(miibus, age, miibus_driver, miibus_devclass, 0, 0);
185 
186 static struct resource_spec age_res_spec_mem[] = {
187 	{ SYS_RES_MEMORY,	PCIR_BAR(0),	RF_ACTIVE },
188 	{ -1,			0,		0 }
189 };
190 
191 static struct resource_spec age_irq_spec_legacy[] = {
192 	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
193 	{ -1,			0,		0 }
194 };
195 
196 static struct resource_spec age_irq_spec_msi[] = {
197 	{ SYS_RES_IRQ,		1,		RF_ACTIVE },
198 	{ -1,			0,		0 }
199 };
200 
201 static struct resource_spec age_irq_spec_msix[] = {
202 	{ SYS_RES_IRQ,		1,		RF_ACTIVE },
203 	{ -1,			0,		0 }
204 };
205 
206 /*
207  *	Read a PHY register on the MII of the L1.
208  */
209 static int
210 age_miibus_readreg(device_t dev, int phy, int reg)
211 {
212 	struct age_softc *sc;
213 	uint32_t v;
214 	int i;
215 
216 	sc = device_get_softc(dev);
217 
218 	CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
219 	    MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
220 	for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
221 		DELAY(1);
222 		v = CSR_READ_4(sc, AGE_MDIO);
223 		if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
224 			break;
225 	}
226 
227 	if (i == 0) {
228 		device_printf(sc->age_dev, "phy read timeout : %d\n", reg);
229 		return (0);
230 	}
231 
232 	return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
233 }
234 
235 /*
236  *	Write a PHY register on the MII of the L1.
237  */
238 static int
239 age_miibus_writereg(device_t dev, int phy, int reg, int val)
240 {
241 	struct age_softc *sc;
242 	uint32_t v;
243 	int i;
244 
245 	sc = device_get_softc(dev);
246 
247 	CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
248 	    (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
249 	    MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
250 	for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
251 		DELAY(1);
252 		v = CSR_READ_4(sc, AGE_MDIO);
253 		if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
254 			break;
255 	}
256 
257 	if (i == 0)
258 		device_printf(sc->age_dev, "phy write timeout : %d\n", reg);
259 
260 	return (0);
261 }
262 
263 /*
264  *	Callback from MII layer when media changes.
265  */
266 static void
267 age_miibus_statchg(device_t dev)
268 {
269 	struct age_softc *sc;
270 
271 	sc = device_get_softc(dev);
272 	taskqueue_enqueue(taskqueue_swi, &sc->age_link_task);
273 }
274 
275 /*
276  *	Get the current interface media status.
277  */
278 static void
279 age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
280 {
281 	struct age_softc *sc;
282 	struct mii_data *mii;
283 
284 	sc = ifp->if_softc;
285 	AGE_LOCK(sc);
286 	mii = device_get_softc(sc->age_miibus);
287 
288 	mii_pollstat(mii);
289 	ifmr->ifm_status = mii->mii_media_status;
290 	ifmr->ifm_active = mii->mii_media_active;
291 	AGE_UNLOCK(sc);
292 }
293 
294 /*
295  *	Set hardware to newly-selected media.
296  */
297 static int
298 age_mediachange(struct ifnet *ifp)
299 {
300 	struct age_softc *sc;
301 	struct mii_data *mii;
302 	struct mii_softc *miisc;
303 	int error;
304 
305 	sc = ifp->if_softc;
306 	AGE_LOCK(sc);
307 	mii = device_get_softc(sc->age_miibus);
308 	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
309 		PHY_RESET(miisc);
310 	error = mii_mediachg(mii);
311 	AGE_UNLOCK(sc);
312 
313 	return (error);
314 }
315 
316 static int
317 age_probe(device_t dev)
318 {
319 	struct age_dev *sp;
320 	int i;
321 	uint16_t vendor, devid;
322 
323 	vendor = pci_get_vendor(dev);
324 	devid = pci_get_device(dev);
325 	sp = age_devs;
326 	for (i = 0; i < sizeof(age_devs) / sizeof(age_devs[0]);
327 	    i++, sp++) {
328 		if (vendor == sp->age_vendorid &&
329 		    devid == sp->age_deviceid) {
330 			device_set_desc(dev, sp->age_name);
331 			return (BUS_PROBE_DEFAULT);
332 		}
333 	}
334 
335 	return (ENXIO);
336 }
337 
338 static void
339 age_get_macaddr(struct age_softc *sc)
340 {
341 	uint32_t ea[2], reg;
342 	int i, vpdc;
343 
344 	reg = CSR_READ_4(sc, AGE_SPI_CTRL);
345 	if ((reg & SPI_VPD_ENB) != 0) {
346 		/* Get VPD stored in TWSI EEPROM. */
347 		reg &= ~SPI_VPD_ENB;
348 		CSR_WRITE_4(sc, AGE_SPI_CTRL, reg);
349 	}
350 
351 	if (pci_find_cap(sc->age_dev, PCIY_VPD, &vpdc) == 0) {
352 		/*
353 		 * PCI VPD capability found, let TWSI reload EEPROM.
354 		 * This will set ethernet address of controller.
355 		 */
356 		CSR_WRITE_4(sc, AGE_TWSI_CTRL, CSR_READ_4(sc, AGE_TWSI_CTRL) |
357 		    TWSI_CTRL_SW_LD_START);
358 		for (i = 100; i > 0; i--) {
359 			DELAY(1000);
360 			reg = CSR_READ_4(sc, AGE_TWSI_CTRL);
361 			if ((reg & TWSI_CTRL_SW_LD_START) == 0)
362 				break;
363 		}
364 		if (i == 0)
365 			device_printf(sc->age_dev,
366 			    "reloading EEPROM timeout!\n");
367 	} else {
368 		if (bootverbose)
369 			device_printf(sc->age_dev,
370 			    "PCI VPD capability not found!\n");
371 	}
372 
373 	ea[0] = CSR_READ_4(sc, AGE_PAR0);
374 	ea[1] = CSR_READ_4(sc, AGE_PAR1);
375 	sc->age_eaddr[0] = (ea[1] >> 8) & 0xFF;
376 	sc->age_eaddr[1] = (ea[1] >> 0) & 0xFF;
377 	sc->age_eaddr[2] = (ea[0] >> 24) & 0xFF;
378 	sc->age_eaddr[3] = (ea[0] >> 16) & 0xFF;
379 	sc->age_eaddr[4] = (ea[0] >> 8) & 0xFF;
380 	sc->age_eaddr[5] = (ea[0] >> 0) & 0xFF;
381 }
382 
383 static void
384 age_phy_reset(struct age_softc *sc)
385 {
386 	uint16_t reg, pn;
387 	int i, linkup;
388 
389 	/* Reset PHY. */
390 	CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST);
391 	DELAY(2000);
392 	CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR);
393 	DELAY(2000);
394 
395 #define	ATPHY_DBG_ADDR		0x1D
396 #define	ATPHY_DBG_DATA		0x1E
397 #define	ATPHY_CDTC		0x16
398 #define	PHY_CDTC_ENB		0x0001
399 #define	PHY_CDTC_POFF		8
400 #define	ATPHY_CDTS		0x1C
401 #define	PHY_CDTS_STAT_OK	0x0000
402 #define	PHY_CDTS_STAT_SHORT	0x0100
403 #define	PHY_CDTS_STAT_OPEN	0x0200
404 #define	PHY_CDTS_STAT_INVAL	0x0300
405 #define	PHY_CDTS_STAT_MASK	0x0300
406 
407 	/* Check power saving mode. Magic from Linux. */
408 	age_miibus_writereg(sc->age_dev, sc->age_phyaddr, MII_BMCR, BMCR_RESET);
409 	for (linkup = 0, pn = 0; pn < 4; pn++) {
410 		age_miibus_writereg(sc->age_dev, sc->age_phyaddr, ATPHY_CDTC,
411 		    (pn << PHY_CDTC_POFF) | PHY_CDTC_ENB);
412 		for (i = 200; i > 0; i--) {
413 			DELAY(1000);
414 			reg = age_miibus_readreg(sc->age_dev, sc->age_phyaddr,
415 			    ATPHY_CDTC);
416 			if ((reg & PHY_CDTC_ENB) == 0)
417 				break;
418 		}
419 		DELAY(1000);
420 		reg = age_miibus_readreg(sc->age_dev, sc->age_phyaddr,
421 		    ATPHY_CDTS);
422 		if ((reg & PHY_CDTS_STAT_MASK) != PHY_CDTS_STAT_OPEN) {
423 			linkup++;
424 			break;
425 		}
426 	}
427 	age_miibus_writereg(sc->age_dev, sc->age_phyaddr, MII_BMCR,
428 	    BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
429 	if (linkup == 0) {
430 		age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
431 		    ATPHY_DBG_ADDR, 0);
432 		age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
433 		    ATPHY_DBG_DATA, 0x124E);
434 		age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
435 		    ATPHY_DBG_ADDR, 1);
436 		reg = age_miibus_readreg(sc->age_dev, sc->age_phyaddr,
437 		    ATPHY_DBG_DATA);
438 		age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
439 		    ATPHY_DBG_DATA, reg | 0x03);
440 		/* XXX */
441 		DELAY(1500 * 1000);
442 		age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
443 		    ATPHY_DBG_ADDR, 0);
444 		age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
445 		    ATPHY_DBG_DATA, 0x024E);
446     }
447 
448 #undef	ATPHY_DBG_ADDR
449 #undef	ATPHY_DBG_DATA
450 #undef	ATPHY_CDTC
451 #undef	PHY_CDTC_ENB
452 #undef	PHY_CDTC_POFF
453 #undef	ATPHY_CDTS
454 #undef	PHY_CDTS_STAT_OK
455 #undef	PHY_CDTS_STAT_SHORT
456 #undef	PHY_CDTS_STAT_OPEN
457 #undef	PHY_CDTS_STAT_INVAL
458 #undef	PHY_CDTS_STAT_MASK
459 }
460 
461 static int
462 age_attach(device_t dev)
463 {
464 	struct age_softc *sc;
465 	struct ifnet *ifp;
466 	uint16_t burst;
467 	int error, i, msic, msixc, pmc;
468 
469 	error = 0;
470 	sc = device_get_softc(dev);
471 	sc->age_dev = dev;
472 
473 	mtx_init(&sc->age_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
474 	    MTX_DEF);
475 	callout_init_mtx(&sc->age_tick_ch, &sc->age_mtx, 0);
476 	TASK_INIT(&sc->age_int_task, 0, age_int_task, sc);
477 	TASK_INIT(&sc->age_link_task, 0, age_link_task, sc);
478 
479 	/* Map the device. */
480 	pci_enable_busmaster(dev);
481 	sc->age_res_spec = age_res_spec_mem;
482 	sc->age_irq_spec = age_irq_spec_legacy;
483 	error = bus_alloc_resources(dev, sc->age_res_spec, sc->age_res);
484 	if (error != 0) {
485 		device_printf(dev, "cannot allocate memory resources.\n");
486 		goto fail;
487 	}
488 
489 	/* Set PHY address. */
490 	sc->age_phyaddr = AGE_PHY_ADDR;
491 
492 	/* Reset PHY. */
493 	age_phy_reset(sc);
494 
495 	/* Reset the ethernet controller. */
496 	age_reset(sc);
497 
498 	/* Get PCI and chip id/revision. */
499 	sc->age_rev = pci_get_revid(dev);
500 	sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >>
501 	    MASTER_CHIP_REV_SHIFT;
502 	if (bootverbose) {
503 		device_printf(dev, "PCI device revision : 0x%04x\n",
504 		    sc->age_rev);
505 		device_printf(dev, "Chip id/revision : 0x%04x\n",
506 		    sc->age_chip_rev);
507 	}
508 
509 	/*
510 	 * XXX
511 	 * Unintialized hardware returns an invalid chip id/revision
512 	 * as well as 0xFFFFFFFF for Tx/Rx fifo length. It seems that
513 	 * unplugged cable results in putting hardware into automatic
514 	 * power down mode which in turn returns invalld chip revision.
515 	 */
516 	if (sc->age_chip_rev == 0xFFFF) {
517 		device_printf(dev,"invalid chip revision : 0x%04x -- "
518 		    "not initialized?\n", sc->age_chip_rev);
519 		error = ENXIO;
520 		goto fail;
521 	}
522 
523 	device_printf(dev, "%d Tx FIFO, %d Rx FIFO\n",
524 	    CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN),
525 	    CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN));
526 
527 	/* Allocate IRQ resources. */
528 	msixc = pci_msix_count(dev);
529 	msic = pci_msi_count(dev);
530 	if (bootverbose) {
531 		device_printf(dev, "MSIX count : %d\n", msixc);
532 		device_printf(dev, "MSI count : %d\n", msic);
533 	}
534 
535 	/* Prefer MSIX over MSI. */
536 	if (msix_disable == 0 || msi_disable == 0) {
537 		if (msix_disable == 0 && msixc == AGE_MSIX_MESSAGES &&
538 		    pci_alloc_msix(dev, &msixc) == 0) {
539 			if (msic == AGE_MSIX_MESSAGES) {
540 				device_printf(dev, "Using %d MSIX messages.\n",
541 				    msixc);
542 				sc->age_flags |= AGE_FLAG_MSIX;
543 				sc->age_irq_spec = age_irq_spec_msix;
544 			} else
545 				pci_release_msi(dev);
546 		}
547 		if (msi_disable == 0 && (sc->age_flags & AGE_FLAG_MSIX) == 0 &&
548 		    msic == AGE_MSI_MESSAGES &&
549 		    pci_alloc_msi(dev, &msic) == 0) {
550 			if (msic == AGE_MSI_MESSAGES) {
551 				device_printf(dev, "Using %d MSI messages.\n",
552 				    msic);
553 				sc->age_flags |= AGE_FLAG_MSI;
554 				sc->age_irq_spec = age_irq_spec_msi;
555 			} else
556 				pci_release_msi(dev);
557 		}
558 	}
559 
560 	error = bus_alloc_resources(dev, sc->age_irq_spec, sc->age_irq);
561 	if (error != 0) {
562 		device_printf(dev, "cannot allocate IRQ resources.\n");
563 		goto fail;
564 	}
565 
566 
567 	/* Get DMA parameters from PCIe device control register. */
568 	if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
569 		sc->age_flags |= AGE_FLAG_PCIE;
570 		burst = pci_read_config(dev, i + 0x08, 2);
571 		/* Max read request size. */
572 		sc->age_dma_rd_burst = ((burst >> 12) & 0x07) <<
573 		    DMA_CFG_RD_BURST_SHIFT;
574 		/* Max payload size. */
575 		sc->age_dma_wr_burst = ((burst >> 5) & 0x07) <<
576 		    DMA_CFG_WR_BURST_SHIFT;
577 		if (bootverbose) {
578 			device_printf(dev, "Read request size : %d bytes.\n",
579 			    128 << ((burst >> 12) & 0x07));
580 			device_printf(dev, "TLP payload size : %d bytes.\n",
581 			    128 << ((burst >> 5) & 0x07));
582 		}
583 	} else {
584 		sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128;
585 		sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128;
586 	}
587 
588 	/* Create device sysctl node. */
589 	age_sysctl_node(sc);
590 
591 	if ((error = age_dma_alloc(sc) != 0))
592 		goto fail;
593 
594 	/* Load station address. */
595 	age_get_macaddr(sc);
596 
597 	ifp = sc->age_ifp = if_alloc(IFT_ETHER);
598 	if (ifp == NULL) {
599 		device_printf(dev, "cannot allocate ifnet structure.\n");
600 		error = ENXIO;
601 		goto fail;
602 	}
603 
604 	ifp->if_softc = sc;
605 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
606 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
607 	ifp->if_ioctl = age_ioctl;
608 	ifp->if_start = age_start;
609 	ifp->if_init = age_init;
610 	ifp->if_snd.ifq_drv_maxlen = AGE_TX_RING_CNT - 1;
611 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
612 	IFQ_SET_READY(&ifp->if_snd);
613 	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4;
614 	ifp->if_hwassist = AGE_CSUM_FEATURES | CSUM_TSO;
615 	if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) {
616 		sc->age_flags |= AGE_FLAG_PMCAP;
617 		ifp->if_capabilities |= IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST;
618 	}
619 	ifp->if_capenable = ifp->if_capabilities;
620 
621 	/* Set up MII bus. */
622 	error = mii_attach(dev, &sc->age_miibus, ifp, age_mediachange,
623 	    age_mediastatus, BMSR_DEFCAPMASK, sc->age_phyaddr, MII_OFFSET_ANY,
624 	    0);
625 	if (error != 0) {
626 		device_printf(dev, "attaching PHYs failed\n");
627 		goto fail;
628 	}
629 
630 	ether_ifattach(ifp, sc->age_eaddr);
631 
632 	/* VLAN capability setup. */
633 	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
634 	    IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO;
635 	ifp->if_capenable = ifp->if_capabilities;
636 
637 	/* Tell the upper layer(s) we support long frames. */
638 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
639 
640 	/* Create local taskq. */
641 	sc->age_tq = taskqueue_create_fast("age_taskq", M_WAITOK,
642 	    taskqueue_thread_enqueue, &sc->age_tq);
643 	if (sc->age_tq == NULL) {
644 		device_printf(dev, "could not create taskqueue.\n");
645 		ether_ifdetach(ifp);
646 		error = ENXIO;
647 		goto fail;
648 	}
649 	taskqueue_start_threads(&sc->age_tq, 1, PI_NET, "%s taskq",
650 	    device_get_nameunit(sc->age_dev));
651 
652 	if ((sc->age_flags & AGE_FLAG_MSIX) != 0)
653 		msic = AGE_MSIX_MESSAGES;
654 	else if ((sc->age_flags & AGE_FLAG_MSI) != 0)
655 		msic = AGE_MSI_MESSAGES;
656 	else
657 		msic = 1;
658 	for (i = 0; i < msic; i++) {
659 		error = bus_setup_intr(dev, sc->age_irq[i],
660 		    INTR_TYPE_NET | INTR_MPSAFE, age_intr, NULL, sc,
661 		    &sc->age_intrhand[i]);
662 		if (error != 0)
663 			break;
664 	}
665 	if (error != 0) {
666 		device_printf(dev, "could not set up interrupt handler.\n");
667 		taskqueue_free(sc->age_tq);
668 		sc->age_tq = NULL;
669 		ether_ifdetach(ifp);
670 		goto fail;
671 	}
672 
673 fail:
674 	if (error != 0)
675 		age_detach(dev);
676 
677 	return (error);
678 }
679 
680 static int
681 age_detach(device_t dev)
682 {
683 	struct age_softc *sc;
684 	struct ifnet *ifp;
685 	int i, msic;
686 
687 	sc = device_get_softc(dev);
688 
689 	ifp = sc->age_ifp;
690 	if (device_is_attached(dev)) {
691 		AGE_LOCK(sc);
692 		sc->age_flags |= AGE_FLAG_DETACH;
693 		age_stop(sc);
694 		AGE_UNLOCK(sc);
695 		callout_drain(&sc->age_tick_ch);
696 		taskqueue_drain(sc->age_tq, &sc->age_int_task);
697 		taskqueue_drain(taskqueue_swi, &sc->age_link_task);
698 		ether_ifdetach(ifp);
699 	}
700 
701 	if (sc->age_tq != NULL) {
702 		taskqueue_drain(sc->age_tq, &sc->age_int_task);
703 		taskqueue_free(sc->age_tq);
704 		sc->age_tq = NULL;
705 	}
706 
707 	if (sc->age_miibus != NULL) {
708 		device_delete_child(dev, sc->age_miibus);
709 		sc->age_miibus = NULL;
710 	}
711 	bus_generic_detach(dev);
712 	age_dma_free(sc);
713 
714 	if (ifp != NULL) {
715 		if_free(ifp);
716 		sc->age_ifp = NULL;
717 	}
718 
719 	if ((sc->age_flags & AGE_FLAG_MSIX) != 0)
720 		msic = AGE_MSIX_MESSAGES;
721 	else if ((sc->age_flags & AGE_FLAG_MSI) != 0)
722 		msic = AGE_MSI_MESSAGES;
723 	else
724 		msic = 1;
725 	for (i = 0; i < msic; i++) {
726 		if (sc->age_intrhand[i] != NULL) {
727 			bus_teardown_intr(dev, sc->age_irq[i],
728 			    sc->age_intrhand[i]);
729 			sc->age_intrhand[i] = NULL;
730 		}
731 	}
732 
733 	bus_release_resources(dev, sc->age_irq_spec, sc->age_irq);
734 	if ((sc->age_flags & (AGE_FLAG_MSI | AGE_FLAG_MSIX)) != 0)
735 		pci_release_msi(dev);
736 	bus_release_resources(dev, sc->age_res_spec, sc->age_res);
737 	mtx_destroy(&sc->age_mtx);
738 
739 	return (0);
740 }
741 
742 static void
743 age_sysctl_node(struct age_softc *sc)
744 {
745 	int error;
746 
747 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->age_dev),
748 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->age_dev)), OID_AUTO,
749 	    "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_age_stats,
750 	    "I", "Statistics");
751 
752 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->age_dev),
753 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->age_dev)), OID_AUTO,
754 	    "int_mod", CTLTYPE_INT | CTLFLAG_RW, &sc->age_int_mod, 0,
755 	    sysctl_hw_age_int_mod, "I", "age interrupt moderation");
756 
757 	/* Pull in device tunables. */
758 	sc->age_int_mod = AGE_IM_TIMER_DEFAULT;
759 	error = resource_int_value(device_get_name(sc->age_dev),
760 	    device_get_unit(sc->age_dev), "int_mod", &sc->age_int_mod);
761 	if (error == 0) {
762 		if (sc->age_int_mod < AGE_IM_TIMER_MIN ||
763 		    sc->age_int_mod > AGE_IM_TIMER_MAX) {
764 			device_printf(sc->age_dev,
765 			    "int_mod value out of range; using default: %d\n",
766 			    AGE_IM_TIMER_DEFAULT);
767 			sc->age_int_mod = AGE_IM_TIMER_DEFAULT;
768 		}
769 	}
770 
771 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->age_dev),
772 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->age_dev)), OID_AUTO,
773 	    "process_limit", CTLTYPE_INT | CTLFLAG_RW, &sc->age_process_limit,
774 	    0, sysctl_hw_age_proc_limit, "I",
775 	    "max number of Rx events to process");
776 
777 	/* Pull in device tunables. */
778 	sc->age_process_limit = AGE_PROC_DEFAULT;
779 	error = resource_int_value(device_get_name(sc->age_dev),
780 	    device_get_unit(sc->age_dev), "process_limit",
781 	    &sc->age_process_limit);
782 	if (error == 0) {
783 		if (sc->age_process_limit < AGE_PROC_MIN ||
784 		    sc->age_process_limit > AGE_PROC_MAX) {
785 			device_printf(sc->age_dev,
786 			    "process_limit value out of range; "
787 			    "using default: %d\n", AGE_PROC_DEFAULT);
788 			sc->age_process_limit = AGE_PROC_DEFAULT;
789 		}
790 	}
791 }
792 
793 struct age_dmamap_arg {
794 	bus_addr_t	age_busaddr;
795 };
796 
797 static void
798 age_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
799 {
800 	struct age_dmamap_arg *ctx;
801 
802 	if (error != 0)
803 		return;
804 
805 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
806 
807 	ctx = (struct age_dmamap_arg *)arg;
808 	ctx->age_busaddr = segs[0].ds_addr;
809 }
810 
811 /*
812  * Attansic L1 controller have single register to specify high
813  * address part of DMA blocks. So all descriptor structures and
814  * DMA memory blocks should have the same high address of given
815  * 4GB address space(i.e. crossing 4GB boundary is not allowed).
816  */
817 static int
818 age_check_boundary(struct age_softc *sc)
819 {
820 	bus_addr_t rx_ring_end, rr_ring_end, tx_ring_end;
821 	bus_addr_t cmb_block_end, smb_block_end;
822 
823 	/* Tx/Rx descriptor queue should reside within 4GB boundary. */
824 	tx_ring_end = sc->age_rdata.age_tx_ring_paddr + AGE_TX_RING_SZ;
825 	rx_ring_end = sc->age_rdata.age_rx_ring_paddr + AGE_RX_RING_SZ;
826 	rr_ring_end = sc->age_rdata.age_rr_ring_paddr + AGE_RR_RING_SZ;
827 	cmb_block_end = sc->age_rdata.age_cmb_block_paddr + AGE_CMB_BLOCK_SZ;
828 	smb_block_end = sc->age_rdata.age_smb_block_paddr + AGE_SMB_BLOCK_SZ;
829 
830 	if ((AGE_ADDR_HI(tx_ring_end) !=
831 	    AGE_ADDR_HI(sc->age_rdata.age_tx_ring_paddr)) ||
832 	    (AGE_ADDR_HI(rx_ring_end) !=
833 	    AGE_ADDR_HI(sc->age_rdata.age_rx_ring_paddr)) ||
834 	    (AGE_ADDR_HI(rr_ring_end) !=
835 	    AGE_ADDR_HI(sc->age_rdata.age_rr_ring_paddr)) ||
836 	    (AGE_ADDR_HI(cmb_block_end) !=
837 	    AGE_ADDR_HI(sc->age_rdata.age_cmb_block_paddr)) ||
838 	    (AGE_ADDR_HI(smb_block_end) !=
839 	    AGE_ADDR_HI(sc->age_rdata.age_smb_block_paddr)))
840 		return (EFBIG);
841 
842 	if ((AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(rx_ring_end)) ||
843 	    (AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(rr_ring_end)) ||
844 	    (AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(cmb_block_end)) ||
845 	    (AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(smb_block_end)))
846 		return (EFBIG);
847 
848 	return (0);
849 }
850 
851 static int
852 age_dma_alloc(struct age_softc *sc)
853 {
854 	struct age_txdesc *txd;
855 	struct age_rxdesc *rxd;
856 	bus_addr_t lowaddr;
857 	struct age_dmamap_arg ctx;
858 	int error, i;
859 
860 	lowaddr = BUS_SPACE_MAXADDR;
861 
862 again:
863 	/* Create parent ring/DMA block tag. */
864 	error = bus_dma_tag_create(
865 	    bus_get_dma_tag(sc->age_dev), /* parent */
866 	    1, 0,			/* alignment, boundary */
867 	    lowaddr,			/* lowaddr */
868 	    BUS_SPACE_MAXADDR,		/* highaddr */
869 	    NULL, NULL,			/* filter, filterarg */
870 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
871 	    0,				/* nsegments */
872 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
873 	    0,				/* flags */
874 	    NULL, NULL,			/* lockfunc, lockarg */
875 	    &sc->age_cdata.age_parent_tag);
876 	if (error != 0) {
877 		device_printf(sc->age_dev,
878 		    "could not create parent DMA tag.\n");
879 		goto fail;
880 	}
881 
882 	/* Create tag for Tx ring. */
883 	error = bus_dma_tag_create(
884 	    sc->age_cdata.age_parent_tag, /* parent */
885 	    AGE_TX_RING_ALIGN, 0,	/* alignment, boundary */
886 	    BUS_SPACE_MAXADDR,		/* lowaddr */
887 	    BUS_SPACE_MAXADDR,		/* highaddr */
888 	    NULL, NULL,			/* filter, filterarg */
889 	    AGE_TX_RING_SZ,		/* maxsize */
890 	    1,				/* nsegments */
891 	    AGE_TX_RING_SZ,		/* maxsegsize */
892 	    0,				/* flags */
893 	    NULL, NULL,			/* lockfunc, lockarg */
894 	    &sc->age_cdata.age_tx_ring_tag);
895 	if (error != 0) {
896 		device_printf(sc->age_dev,
897 		    "could not create Tx ring DMA tag.\n");
898 		goto fail;
899 	}
900 
901 	/* Create tag for Rx ring. */
902 	error = bus_dma_tag_create(
903 	    sc->age_cdata.age_parent_tag, /* parent */
904 	    AGE_RX_RING_ALIGN, 0,	/* alignment, boundary */
905 	    BUS_SPACE_MAXADDR,		/* lowaddr */
906 	    BUS_SPACE_MAXADDR,		/* highaddr */
907 	    NULL, NULL,			/* filter, filterarg */
908 	    AGE_RX_RING_SZ,		/* maxsize */
909 	    1,				/* nsegments */
910 	    AGE_RX_RING_SZ,		/* maxsegsize */
911 	    0,				/* flags */
912 	    NULL, NULL,			/* lockfunc, lockarg */
913 	    &sc->age_cdata.age_rx_ring_tag);
914 	if (error != 0) {
915 		device_printf(sc->age_dev,
916 		    "could not create Rx ring DMA tag.\n");
917 		goto fail;
918 	}
919 
920 	/* Create tag for Rx return ring. */
921 	error = bus_dma_tag_create(
922 	    sc->age_cdata.age_parent_tag, /* parent */
923 	    AGE_RR_RING_ALIGN, 0,	/* alignment, boundary */
924 	    BUS_SPACE_MAXADDR,		/* lowaddr */
925 	    BUS_SPACE_MAXADDR,		/* highaddr */
926 	    NULL, NULL,			/* filter, filterarg */
927 	    AGE_RR_RING_SZ,		/* maxsize */
928 	    1,				/* nsegments */
929 	    AGE_RR_RING_SZ,		/* maxsegsize */
930 	    0,				/* flags */
931 	    NULL, NULL,			/* lockfunc, lockarg */
932 	    &sc->age_cdata.age_rr_ring_tag);
933 	if (error != 0) {
934 		device_printf(sc->age_dev,
935 		    "could not create Rx return ring DMA tag.\n");
936 		goto fail;
937 	}
938 
939 	/* Create tag for coalesing message block. */
940 	error = bus_dma_tag_create(
941 	    sc->age_cdata.age_parent_tag, /* parent */
942 	    AGE_CMB_ALIGN, 0,		/* alignment, boundary */
943 	    BUS_SPACE_MAXADDR,		/* lowaddr */
944 	    BUS_SPACE_MAXADDR,		/* highaddr */
945 	    NULL, NULL,			/* filter, filterarg */
946 	    AGE_CMB_BLOCK_SZ,		/* maxsize */
947 	    1,				/* nsegments */
948 	    AGE_CMB_BLOCK_SZ,		/* maxsegsize */
949 	    0,				/* flags */
950 	    NULL, NULL,			/* lockfunc, lockarg */
951 	    &sc->age_cdata.age_cmb_block_tag);
952 	if (error != 0) {
953 		device_printf(sc->age_dev,
954 		    "could not create CMB DMA tag.\n");
955 		goto fail;
956 	}
957 
958 	/* Create tag for statistics message block. */
959 	error = bus_dma_tag_create(
960 	    sc->age_cdata.age_parent_tag, /* parent */
961 	    AGE_SMB_ALIGN, 0,		/* alignment, boundary */
962 	    BUS_SPACE_MAXADDR,		/* lowaddr */
963 	    BUS_SPACE_MAXADDR,		/* highaddr */
964 	    NULL, NULL,			/* filter, filterarg */
965 	    AGE_SMB_BLOCK_SZ,		/* maxsize */
966 	    1,				/* nsegments */
967 	    AGE_SMB_BLOCK_SZ,		/* maxsegsize */
968 	    0,				/* flags */
969 	    NULL, NULL,			/* lockfunc, lockarg */
970 	    &sc->age_cdata.age_smb_block_tag);
971 	if (error != 0) {
972 		device_printf(sc->age_dev,
973 		    "could not create SMB DMA tag.\n");
974 		goto fail;
975 	}
976 
977 	/* Allocate DMA'able memory and load the DMA map. */
978 	error = bus_dmamem_alloc(sc->age_cdata.age_tx_ring_tag,
979 	    (void **)&sc->age_rdata.age_tx_ring,
980 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
981 	    &sc->age_cdata.age_tx_ring_map);
982 	if (error != 0) {
983 		device_printf(sc->age_dev,
984 		    "could not allocate DMA'able memory for Tx ring.\n");
985 		goto fail;
986 	}
987 	ctx.age_busaddr = 0;
988 	error = bus_dmamap_load(sc->age_cdata.age_tx_ring_tag,
989 	    sc->age_cdata.age_tx_ring_map, sc->age_rdata.age_tx_ring,
990 	    AGE_TX_RING_SZ, age_dmamap_cb, &ctx, 0);
991 	if (error != 0 || ctx.age_busaddr == 0) {
992 		device_printf(sc->age_dev,
993 		    "could not load DMA'able memory for Tx ring.\n");
994 		goto fail;
995 	}
996 	sc->age_rdata.age_tx_ring_paddr = ctx.age_busaddr;
997 	/* Rx ring */
998 	error = bus_dmamem_alloc(sc->age_cdata.age_rx_ring_tag,
999 	    (void **)&sc->age_rdata.age_rx_ring,
1000 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1001 	    &sc->age_cdata.age_rx_ring_map);
1002 	if (error != 0) {
1003 		device_printf(sc->age_dev,
1004 		    "could not allocate DMA'able memory for Rx ring.\n");
1005 		goto fail;
1006 	}
1007 	ctx.age_busaddr = 0;
1008 	error = bus_dmamap_load(sc->age_cdata.age_rx_ring_tag,
1009 	    sc->age_cdata.age_rx_ring_map, sc->age_rdata.age_rx_ring,
1010 	    AGE_RX_RING_SZ, age_dmamap_cb, &ctx, 0);
1011 	if (error != 0 || ctx.age_busaddr == 0) {
1012 		device_printf(sc->age_dev,
1013 		    "could not load DMA'able memory for Rx ring.\n");
1014 		goto fail;
1015 	}
1016 	sc->age_rdata.age_rx_ring_paddr = ctx.age_busaddr;
1017 	/* Rx return ring */
1018 	error = bus_dmamem_alloc(sc->age_cdata.age_rr_ring_tag,
1019 	    (void **)&sc->age_rdata.age_rr_ring,
1020 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1021 	    &sc->age_cdata.age_rr_ring_map);
1022 	if (error != 0) {
1023 		device_printf(sc->age_dev,
1024 		    "could not allocate DMA'able memory for Rx return ring.\n");
1025 		goto fail;
1026 	}
1027 	ctx.age_busaddr = 0;
1028 	error = bus_dmamap_load(sc->age_cdata.age_rr_ring_tag,
1029 	    sc->age_cdata.age_rr_ring_map, sc->age_rdata.age_rr_ring,
1030 	    AGE_RR_RING_SZ, age_dmamap_cb,
1031 	    &ctx, 0);
1032 	if (error != 0 || ctx.age_busaddr == 0) {
1033 		device_printf(sc->age_dev,
1034 		    "could not load DMA'able memory for Rx return ring.\n");
1035 		goto fail;
1036 	}
1037 	sc->age_rdata.age_rr_ring_paddr = ctx.age_busaddr;
1038 	/* CMB block */
1039 	error = bus_dmamem_alloc(sc->age_cdata.age_cmb_block_tag,
1040 	    (void **)&sc->age_rdata.age_cmb_block,
1041 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1042 	    &sc->age_cdata.age_cmb_block_map);
1043 	if (error != 0) {
1044 		device_printf(sc->age_dev,
1045 		    "could not allocate DMA'able memory for CMB block.\n");
1046 		goto fail;
1047 	}
1048 	ctx.age_busaddr = 0;
1049 	error = bus_dmamap_load(sc->age_cdata.age_cmb_block_tag,
1050 	    sc->age_cdata.age_cmb_block_map, sc->age_rdata.age_cmb_block,
1051 	    AGE_CMB_BLOCK_SZ, age_dmamap_cb, &ctx, 0);
1052 	if (error != 0 || ctx.age_busaddr == 0) {
1053 		device_printf(sc->age_dev,
1054 		    "could not load DMA'able memory for CMB block.\n");
1055 		goto fail;
1056 	}
1057 	sc->age_rdata.age_cmb_block_paddr = ctx.age_busaddr;
1058 	/* SMB block */
1059 	error = bus_dmamem_alloc(sc->age_cdata.age_smb_block_tag,
1060 	    (void **)&sc->age_rdata.age_smb_block,
1061 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1062 	    &sc->age_cdata.age_smb_block_map);
1063 	if (error != 0) {
1064 		device_printf(sc->age_dev,
1065 		    "could not allocate DMA'able memory for SMB block.\n");
1066 		goto fail;
1067 	}
1068 	ctx.age_busaddr = 0;
1069 	error = bus_dmamap_load(sc->age_cdata.age_smb_block_tag,
1070 	    sc->age_cdata.age_smb_block_map, sc->age_rdata.age_smb_block,
1071 	    AGE_SMB_BLOCK_SZ, age_dmamap_cb, &ctx, 0);
1072 	if (error != 0 || ctx.age_busaddr == 0) {
1073 		device_printf(sc->age_dev,
1074 		    "could not load DMA'able memory for SMB block.\n");
1075 		goto fail;
1076 	}
1077 	sc->age_rdata.age_smb_block_paddr = ctx.age_busaddr;
1078 
1079 	/*
1080 	 * All ring buffer and DMA blocks should have the same
1081 	 * high address part of 64bit DMA address space.
1082 	 */
1083 	if (lowaddr != BUS_SPACE_MAXADDR_32BIT &&
1084 	    (error = age_check_boundary(sc)) != 0) {
1085 		device_printf(sc->age_dev, "4GB boundary crossed, "
1086 		    "switching to 32bit DMA addressing mode.\n");
1087 		age_dma_free(sc);
1088 		/* Limit DMA address space to 32bit and try again. */
1089 		lowaddr = BUS_SPACE_MAXADDR_32BIT;
1090 		goto again;
1091 	}
1092 
1093 	/*
1094 	 * Create Tx/Rx buffer parent tag.
1095 	 * L1 supports full 64bit DMA addressing in Tx/Rx buffers
1096 	 * so it needs separate parent DMA tag.
1097 	 * XXX
1098 	 * It seems enabling 64bit DMA causes data corruption. Limit
1099 	 * DMA address space to 32bit.
1100 	 */
1101 	error = bus_dma_tag_create(
1102 	    bus_get_dma_tag(sc->age_dev), /* parent */
1103 	    1, 0,			/* alignment, boundary */
1104 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1105 	    BUS_SPACE_MAXADDR,		/* highaddr */
1106 	    NULL, NULL,			/* filter, filterarg */
1107 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1108 	    0,				/* nsegments */
1109 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1110 	    0,				/* flags */
1111 	    NULL, NULL,			/* lockfunc, lockarg */
1112 	    &sc->age_cdata.age_buffer_tag);
1113 	if (error != 0) {
1114 		device_printf(sc->age_dev,
1115 		    "could not create parent buffer DMA tag.\n");
1116 		goto fail;
1117 	}
1118 
1119 	/* Create tag for Tx buffers. */
1120 	error = bus_dma_tag_create(
1121 	    sc->age_cdata.age_buffer_tag, /* parent */
1122 	    1, 0,			/* alignment, boundary */
1123 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1124 	    BUS_SPACE_MAXADDR,		/* highaddr */
1125 	    NULL, NULL,			/* filter, filterarg */
1126 	    AGE_TSO_MAXSIZE,		/* maxsize */
1127 	    AGE_MAXTXSEGS,		/* nsegments */
1128 	    AGE_TSO_MAXSEGSIZE,		/* maxsegsize */
1129 	    0,				/* flags */
1130 	    NULL, NULL,			/* lockfunc, lockarg */
1131 	    &sc->age_cdata.age_tx_tag);
1132 	if (error != 0) {
1133 		device_printf(sc->age_dev, "could not create Tx DMA tag.\n");
1134 		goto fail;
1135 	}
1136 
1137 	/* Create tag for Rx buffers. */
1138 	error = bus_dma_tag_create(
1139 	    sc->age_cdata.age_buffer_tag, /* parent */
1140 	    AGE_RX_BUF_ALIGN, 0,	/* alignment, boundary */
1141 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1142 	    BUS_SPACE_MAXADDR,		/* highaddr */
1143 	    NULL, NULL,			/* filter, filterarg */
1144 	    MCLBYTES,			/* maxsize */
1145 	    1,				/* nsegments */
1146 	    MCLBYTES,			/* maxsegsize */
1147 	    0,				/* flags */
1148 	    NULL, NULL,			/* lockfunc, lockarg */
1149 	    &sc->age_cdata.age_rx_tag);
1150 	if (error != 0) {
1151 		device_printf(sc->age_dev, "could not create Rx DMA tag.\n");
1152 		goto fail;
1153 	}
1154 
1155 	/* Create DMA maps for Tx buffers. */
1156 	for (i = 0; i < AGE_TX_RING_CNT; i++) {
1157 		txd = &sc->age_cdata.age_txdesc[i];
1158 		txd->tx_m = NULL;
1159 		txd->tx_dmamap = NULL;
1160 		error = bus_dmamap_create(sc->age_cdata.age_tx_tag, 0,
1161 		    &txd->tx_dmamap);
1162 		if (error != 0) {
1163 			device_printf(sc->age_dev,
1164 			    "could not create Tx dmamap.\n");
1165 			goto fail;
1166 		}
1167 	}
1168 	/* Create DMA maps for Rx buffers. */
1169 	if ((error = bus_dmamap_create(sc->age_cdata.age_rx_tag, 0,
1170 	    &sc->age_cdata.age_rx_sparemap)) != 0) {
1171 		device_printf(sc->age_dev,
1172 		    "could not create spare Rx dmamap.\n");
1173 		goto fail;
1174 	}
1175 	for (i = 0; i < AGE_RX_RING_CNT; i++) {
1176 		rxd = &sc->age_cdata.age_rxdesc[i];
1177 		rxd->rx_m = NULL;
1178 		rxd->rx_dmamap = NULL;
1179 		error = bus_dmamap_create(sc->age_cdata.age_rx_tag, 0,
1180 		    &rxd->rx_dmamap);
1181 		if (error != 0) {
1182 			device_printf(sc->age_dev,
1183 			    "could not create Rx dmamap.\n");
1184 			goto fail;
1185 		}
1186 	}
1187 
1188 fail:
1189 	return (error);
1190 }
1191 
1192 static void
1193 age_dma_free(struct age_softc *sc)
1194 {
1195 	struct age_txdesc *txd;
1196 	struct age_rxdesc *rxd;
1197 	int i;
1198 
1199 	/* Tx buffers */
1200 	if (sc->age_cdata.age_tx_tag != NULL) {
1201 		for (i = 0; i < AGE_TX_RING_CNT; i++) {
1202 			txd = &sc->age_cdata.age_txdesc[i];
1203 			if (txd->tx_dmamap != NULL) {
1204 				bus_dmamap_destroy(sc->age_cdata.age_tx_tag,
1205 				    txd->tx_dmamap);
1206 				txd->tx_dmamap = NULL;
1207 			}
1208 		}
1209 		bus_dma_tag_destroy(sc->age_cdata.age_tx_tag);
1210 		sc->age_cdata.age_tx_tag = NULL;
1211 	}
1212 	/* Rx buffers */
1213 	if (sc->age_cdata.age_rx_tag != NULL) {
1214 		for (i = 0; i < AGE_RX_RING_CNT; i++) {
1215 			rxd = &sc->age_cdata.age_rxdesc[i];
1216 			if (rxd->rx_dmamap != NULL) {
1217 				bus_dmamap_destroy(sc->age_cdata.age_rx_tag,
1218 				    rxd->rx_dmamap);
1219 				rxd->rx_dmamap = NULL;
1220 			}
1221 		}
1222 		if (sc->age_cdata.age_rx_sparemap != NULL) {
1223 			bus_dmamap_destroy(sc->age_cdata.age_rx_tag,
1224 			    sc->age_cdata.age_rx_sparemap);
1225 			sc->age_cdata.age_rx_sparemap = NULL;
1226 		}
1227 		bus_dma_tag_destroy(sc->age_cdata.age_rx_tag);
1228 		sc->age_cdata.age_rx_tag = NULL;
1229 	}
1230 	/* Tx ring. */
1231 	if (sc->age_cdata.age_tx_ring_tag != NULL) {
1232 		if (sc->age_cdata.age_tx_ring_map != NULL)
1233 			bus_dmamap_unload(sc->age_cdata.age_tx_ring_tag,
1234 			    sc->age_cdata.age_tx_ring_map);
1235 		if (sc->age_cdata.age_tx_ring_map != NULL &&
1236 		    sc->age_rdata.age_tx_ring != NULL)
1237 			bus_dmamem_free(sc->age_cdata.age_tx_ring_tag,
1238 			    sc->age_rdata.age_tx_ring,
1239 			    sc->age_cdata.age_tx_ring_map);
1240 		sc->age_rdata.age_tx_ring = NULL;
1241 		sc->age_cdata.age_tx_ring_map = NULL;
1242 		bus_dma_tag_destroy(sc->age_cdata.age_tx_ring_tag);
1243 		sc->age_cdata.age_tx_ring_tag = NULL;
1244 	}
1245 	/* Rx ring. */
1246 	if (sc->age_cdata.age_rx_ring_tag != NULL) {
1247 		if (sc->age_cdata.age_rx_ring_map != NULL)
1248 			bus_dmamap_unload(sc->age_cdata.age_rx_ring_tag,
1249 			    sc->age_cdata.age_rx_ring_map);
1250 		if (sc->age_cdata.age_rx_ring_map != NULL &&
1251 		    sc->age_rdata.age_rx_ring != NULL)
1252 			bus_dmamem_free(sc->age_cdata.age_rx_ring_tag,
1253 			    sc->age_rdata.age_rx_ring,
1254 			    sc->age_cdata.age_rx_ring_map);
1255 		sc->age_rdata.age_rx_ring = NULL;
1256 		sc->age_cdata.age_rx_ring_map = NULL;
1257 		bus_dma_tag_destroy(sc->age_cdata.age_rx_ring_tag);
1258 		sc->age_cdata.age_rx_ring_tag = NULL;
1259 	}
1260 	/* Rx return ring. */
1261 	if (sc->age_cdata.age_rr_ring_tag != NULL) {
1262 		if (sc->age_cdata.age_rr_ring_map != NULL)
1263 			bus_dmamap_unload(sc->age_cdata.age_rr_ring_tag,
1264 			    sc->age_cdata.age_rr_ring_map);
1265 		if (sc->age_cdata.age_rr_ring_map != NULL &&
1266 		    sc->age_rdata.age_rr_ring != NULL)
1267 			bus_dmamem_free(sc->age_cdata.age_rr_ring_tag,
1268 			    sc->age_rdata.age_rr_ring,
1269 			    sc->age_cdata.age_rr_ring_map);
1270 		sc->age_rdata.age_rr_ring = NULL;
1271 		sc->age_cdata.age_rr_ring_map = NULL;
1272 		bus_dma_tag_destroy(sc->age_cdata.age_rr_ring_tag);
1273 		sc->age_cdata.age_rr_ring_tag = NULL;
1274 	}
1275 	/* CMB block */
1276 	if (sc->age_cdata.age_cmb_block_tag != NULL) {
1277 		if (sc->age_cdata.age_cmb_block_map != NULL)
1278 			bus_dmamap_unload(sc->age_cdata.age_cmb_block_tag,
1279 			    sc->age_cdata.age_cmb_block_map);
1280 		if (sc->age_cdata.age_cmb_block_map != NULL &&
1281 		    sc->age_rdata.age_cmb_block != NULL)
1282 			bus_dmamem_free(sc->age_cdata.age_cmb_block_tag,
1283 			    sc->age_rdata.age_cmb_block,
1284 			    sc->age_cdata.age_cmb_block_map);
1285 		sc->age_rdata.age_cmb_block = NULL;
1286 		sc->age_cdata.age_cmb_block_map = NULL;
1287 		bus_dma_tag_destroy(sc->age_cdata.age_cmb_block_tag);
1288 		sc->age_cdata.age_cmb_block_tag = NULL;
1289 	}
1290 	/* SMB block */
1291 	if (sc->age_cdata.age_smb_block_tag != NULL) {
1292 		if (sc->age_cdata.age_smb_block_map != NULL)
1293 			bus_dmamap_unload(sc->age_cdata.age_smb_block_tag,
1294 			    sc->age_cdata.age_smb_block_map);
1295 		if (sc->age_cdata.age_smb_block_map != NULL &&
1296 		    sc->age_rdata.age_smb_block != NULL)
1297 			bus_dmamem_free(sc->age_cdata.age_smb_block_tag,
1298 			    sc->age_rdata.age_smb_block,
1299 			    sc->age_cdata.age_smb_block_map);
1300 		sc->age_rdata.age_smb_block = NULL;
1301 		sc->age_cdata.age_smb_block_map = NULL;
1302 		bus_dma_tag_destroy(sc->age_cdata.age_smb_block_tag);
1303 		sc->age_cdata.age_smb_block_tag = NULL;
1304 	}
1305 
1306 	if (sc->age_cdata.age_buffer_tag != NULL) {
1307 		bus_dma_tag_destroy(sc->age_cdata.age_buffer_tag);
1308 		sc->age_cdata.age_buffer_tag = NULL;
1309 	}
1310 	if (sc->age_cdata.age_parent_tag != NULL) {
1311 		bus_dma_tag_destroy(sc->age_cdata.age_parent_tag);
1312 		sc->age_cdata.age_parent_tag = NULL;
1313 	}
1314 }
1315 
1316 /*
1317  *	Make sure the interface is stopped at reboot time.
1318  */
1319 static int
1320 age_shutdown(device_t dev)
1321 {
1322 
1323 	return (age_suspend(dev));
1324 }
1325 
1326 static void
1327 age_setwol(struct age_softc *sc)
1328 {
1329 	struct ifnet *ifp;
1330 	struct mii_data *mii;
1331 	uint32_t reg, pmcs;
1332 	uint16_t pmstat;
1333 	int aneg, i, pmc;
1334 
1335 	AGE_LOCK_ASSERT(sc);
1336 
1337 	if (pci_find_cap(sc->age_dev, PCIY_PMG, &pmc) != 0) {
1338 		CSR_WRITE_4(sc, AGE_WOL_CFG, 0);
1339 		/*
1340 		 * No PME capability, PHY power down.
1341 		 * XXX
1342 		 * Due to an unknown reason powering down PHY resulted
1343 		 * in unexpected results such as inaccessbility of
1344 		 * hardware of freshly rebooted system. Disable
1345 		 * powering down PHY until I got more information for
1346 		 * Attansic/Atheros PHY hardwares.
1347 		 */
1348 #ifdef notyet
1349 		age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
1350 		    MII_BMCR, BMCR_PDOWN);
1351 #endif
1352 		return;
1353 	}
1354 
1355 	ifp = sc->age_ifp;
1356 	if ((ifp->if_capenable & IFCAP_WOL) != 0) {
1357 		/*
1358 		 * Note, this driver resets the link speed to 10/100Mbps with
1359 		 * auto-negotiation but we don't know whether that operation
1360 		 * would succeed or not as it have no control after powering
1361 		 * off. If the renegotiation fail WOL may not work. Running
1362 		 * at 1Gbps will draw more power than 375mA at 3.3V which is
1363 		 * specified in PCI specification and that would result in
1364 		 * complete shutdowning power to ethernet controller.
1365 		 *
1366 		 * TODO
1367 		 *  Save current negotiated media speed/duplex/flow-control
1368 		 *  to softc and restore the same link again after resuming.
1369 		 *  PHY handling such as power down/resetting to 100Mbps
1370 		 *  may be better handled in suspend method in phy driver.
1371 		 */
1372 		mii = device_get_softc(sc->age_miibus);
1373 		mii_pollstat(mii);
1374 		aneg = 0;
1375 		if ((mii->mii_media_status & IFM_AVALID) != 0) {
1376 			switch IFM_SUBTYPE(mii->mii_media_active) {
1377 			case IFM_10_T:
1378 			case IFM_100_TX:
1379 				goto got_link;
1380 			case IFM_1000_T:
1381 				aneg++;
1382 			default:
1383 				break;
1384 			}
1385 		}
1386 		age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
1387 		    MII_100T2CR, 0);
1388 		age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
1389 		    MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD |
1390 		    ANAR_10 | ANAR_CSMA);
1391 		age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
1392 		    MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
1393 		DELAY(1000);
1394 		if (aneg != 0) {
1395 			/* Poll link state until age(4) get a 10/100 link. */
1396 			for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1397 				mii_pollstat(mii);
1398 				if ((mii->mii_media_status & IFM_AVALID) != 0) {
1399 					switch (IFM_SUBTYPE(
1400 					    mii->mii_media_active)) {
1401 					case IFM_10_T:
1402 					case IFM_100_TX:
1403 						age_mac_config(sc);
1404 						goto got_link;
1405 					default:
1406 						break;
1407 					}
1408 				}
1409 				AGE_UNLOCK(sc);
1410 				pause("agelnk", hz);
1411 				AGE_LOCK(sc);
1412 			}
1413 			if (i == MII_ANEGTICKS_GIGE)
1414 				device_printf(sc->age_dev,
1415 				    "establishing link failed, "
1416 				    "WOL may not work!");
1417 		}
1418 		/*
1419 		 * No link, force MAC to have 100Mbps, full-duplex link.
1420 		 * This is the last resort and may/may not work.
1421 		 */
1422 		mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1423 		mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1424 		age_mac_config(sc);
1425 	}
1426 
1427 got_link:
1428 	pmcs = 0;
1429 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
1430 		pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB;
1431 	CSR_WRITE_4(sc, AGE_WOL_CFG, pmcs);
1432 	reg = CSR_READ_4(sc, AGE_MAC_CFG);
1433 	reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC);
1434 	reg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST);
1435 	if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
1436 		reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST;
1437 	if ((ifp->if_capenable & IFCAP_WOL) != 0) {
1438 		reg |= MAC_CFG_RX_ENB;
1439 		CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
1440 	}
1441 
1442 	/* Request PME. */
1443 	pmstat = pci_read_config(sc->age_dev, pmc + PCIR_POWER_STATUS, 2);
1444 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1445 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1446 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1447 	pci_write_config(sc->age_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1448 #ifdef notyet
1449 	/* See above for powering down PHY issues. */
1450 	if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1451 		/* No WOL, PHY power down. */
1452 		age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
1453 		    MII_BMCR, BMCR_PDOWN);
1454 	}
1455 #endif
1456 }
1457 
1458 static int
1459 age_suspend(device_t dev)
1460 {
1461 	struct age_softc *sc;
1462 
1463 	sc = device_get_softc(dev);
1464 
1465 	AGE_LOCK(sc);
1466 	age_stop(sc);
1467 	age_setwol(sc);
1468 	AGE_UNLOCK(sc);
1469 
1470 	return (0);
1471 }
1472 
1473 static int
1474 age_resume(device_t dev)
1475 {
1476 	struct age_softc *sc;
1477 	struct ifnet *ifp;
1478 
1479 	sc = device_get_softc(dev);
1480 
1481 	AGE_LOCK(sc);
1482 	age_phy_reset(sc);
1483 	ifp = sc->age_ifp;
1484 	if ((ifp->if_flags & IFF_UP) != 0)
1485 		age_init_locked(sc);
1486 
1487 	AGE_UNLOCK(sc);
1488 
1489 	return (0);
1490 }
1491 
1492 static int
1493 age_encap(struct age_softc *sc, struct mbuf **m_head)
1494 {
1495 	struct age_txdesc *txd, *txd_last;
1496 	struct tx_desc *desc;
1497 	struct mbuf *m;
1498 	struct ip *ip;
1499 	struct tcphdr *tcp;
1500 	bus_dma_segment_t txsegs[AGE_MAXTXSEGS];
1501 	bus_dmamap_t map;
1502 	uint32_t cflags, hdrlen, ip_off, poff, vtag;
1503 	int error, i, nsegs, prod, si;
1504 
1505 	AGE_LOCK_ASSERT(sc);
1506 
1507 	M_ASSERTPKTHDR((*m_head));
1508 
1509 	m = *m_head;
1510 	ip = NULL;
1511 	tcp = NULL;
1512 	cflags = vtag = 0;
1513 	ip_off = poff = 0;
1514 	if ((m->m_pkthdr.csum_flags & (AGE_CSUM_FEATURES | CSUM_TSO)) != 0) {
1515 		/*
1516 		 * L1 requires offset of TCP/UDP payload in its Tx
1517 		 * descriptor to perform hardware Tx checksum offload.
1518 		 * Additionally, TSO requires IP/TCP header size and
1519 		 * modification of IP/TCP header in order to make TSO
1520 		 * engine work. This kind of operation takes many CPU
1521 		 * cycles on FreeBSD so fast host CPU is needed to get
1522 		 * smooth TSO performance.
1523 		 */
1524 		struct ether_header *eh;
1525 
1526 		if (M_WRITABLE(m) == 0) {
1527 			/* Get a writable copy. */
1528 			m = m_dup(*m_head, M_NOWAIT);
1529 			/* Release original mbufs. */
1530 			m_freem(*m_head);
1531 			if (m == NULL) {
1532 				*m_head = NULL;
1533 				return (ENOBUFS);
1534 			}
1535 			*m_head = m;
1536 		}
1537 		ip_off = sizeof(struct ether_header);
1538 		m = m_pullup(m, ip_off);
1539 		if (m == NULL) {
1540 			*m_head = NULL;
1541 			return (ENOBUFS);
1542 		}
1543 		eh = mtod(m, struct ether_header *);
1544 		/*
1545 		 * Check if hardware VLAN insertion is off.
1546 		 * Additional check for LLC/SNAP frame?
1547 		 */
1548 		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1549 			ip_off = sizeof(struct ether_vlan_header);
1550 			m = m_pullup(m, ip_off);
1551 			if (m == NULL) {
1552 				*m_head = NULL;
1553 				return (ENOBUFS);
1554 			}
1555 		}
1556 		m = m_pullup(m, ip_off + sizeof(struct ip));
1557 		if (m == NULL) {
1558 			*m_head = NULL;
1559 			return (ENOBUFS);
1560 		}
1561 		ip = (struct ip *)(mtod(m, char *) + ip_off);
1562 		poff = ip_off + (ip->ip_hl << 2);
1563 		if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1564 			m = m_pullup(m, poff + sizeof(struct tcphdr));
1565 			if (m == NULL) {
1566 				*m_head = NULL;
1567 				return (ENOBUFS);
1568 			}
1569 			tcp = (struct tcphdr *)(mtod(m, char *) + poff);
1570 			m = m_pullup(m, poff + (tcp->th_off << 2));
1571 			if (m == NULL) {
1572 				*m_head = NULL;
1573 				return (ENOBUFS);
1574 			}
1575 			/*
1576 			 * L1 requires IP/TCP header size and offset as
1577 			 * well as TCP pseudo checksum which complicates
1578 			 * TSO configuration. I guess this comes from the
1579 			 * adherence to Microsoft NDIS Large Send
1580 			 * specification which requires insertion of
1581 			 * pseudo checksum by upper stack. The pseudo
1582 			 * checksum that NDIS refers to doesn't include
1583 			 * TCP payload length so age(4) should recompute
1584 			 * the pseudo checksum here. Hopefully this wouldn't
1585 			 * be much burden on modern CPUs.
1586 			 * Reset IP checksum and recompute TCP pseudo
1587 			 * checksum as NDIS specification said.
1588 			 */
1589 			ip = (struct ip *)(mtod(m, char *) + ip_off);
1590 			tcp = (struct tcphdr *)(mtod(m, char *) + poff);
1591 			ip->ip_sum = 0;
1592 			tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1593 			    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1594 		}
1595 		*m_head = m;
1596 	}
1597 
1598 	si = prod = sc->age_cdata.age_tx_prod;
1599 	txd = &sc->age_cdata.age_txdesc[prod];
1600 	txd_last = txd;
1601 	map = txd->tx_dmamap;
1602 
1603 	error =  bus_dmamap_load_mbuf_sg(sc->age_cdata.age_tx_tag, map,
1604 	    *m_head, txsegs, &nsegs, 0);
1605 	if (error == EFBIG) {
1606 		m = m_collapse(*m_head, M_NOWAIT, AGE_MAXTXSEGS);
1607 		if (m == NULL) {
1608 			m_freem(*m_head);
1609 			*m_head = NULL;
1610 			return (ENOMEM);
1611 		}
1612 		*m_head = m;
1613 		error = bus_dmamap_load_mbuf_sg(sc->age_cdata.age_tx_tag, map,
1614 		    *m_head, txsegs, &nsegs, 0);
1615 		if (error != 0) {
1616 			m_freem(*m_head);
1617 			*m_head = NULL;
1618 			return (error);
1619 		}
1620 	} else if (error != 0)
1621 		return (error);
1622 	if (nsegs == 0) {
1623 		m_freem(*m_head);
1624 		*m_head = NULL;
1625 		return (EIO);
1626 	}
1627 
1628 	/* Check descriptor overrun. */
1629 	if (sc->age_cdata.age_tx_cnt + nsegs >= AGE_TX_RING_CNT - 2) {
1630 		bus_dmamap_unload(sc->age_cdata.age_tx_tag, map);
1631 		return (ENOBUFS);
1632 	}
1633 
1634 	m = *m_head;
1635 	/* Configure VLAN hardware tag insertion. */
1636 	if ((m->m_flags & M_VLANTAG) != 0) {
1637 		vtag = AGE_TX_VLAN_TAG(m->m_pkthdr.ether_vtag);
1638 		vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK);
1639 		cflags |= AGE_TD_INSERT_VLAN_TAG;
1640 	}
1641 
1642 	desc = NULL;
1643 	i = 0;
1644 	if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1645 		/* Request TSO and set MSS. */
1646 		cflags |= AGE_TD_TSO_IPV4;
1647 		cflags |= AGE_TD_IPCSUM | AGE_TD_TCPCSUM;
1648 		cflags |= ((uint32_t)m->m_pkthdr.tso_segsz <<
1649 		    AGE_TD_TSO_MSS_SHIFT);
1650 		/* Set IP/TCP header size. */
1651 		cflags |= ip->ip_hl << AGE_TD_IPHDR_LEN_SHIFT;
1652 		cflags |= tcp->th_off << AGE_TD_TSO_TCPHDR_LEN_SHIFT;
1653 		/*
1654 		 * L1 requires the first buffer should only hold IP/TCP
1655 		 * header data. TCP payload should be handled in other
1656 		 * descriptors.
1657 		 */
1658 		hdrlen = poff + (tcp->th_off << 2);
1659 		desc = &sc->age_rdata.age_tx_ring[prod];
1660 		desc->addr = htole64(txsegs[0].ds_addr);
1661 		desc->len = htole32(AGE_TX_BYTES(hdrlen) | vtag);
1662 		desc->flags = htole32(cflags);
1663 		sc->age_cdata.age_tx_cnt++;
1664 		AGE_DESC_INC(prod, AGE_TX_RING_CNT);
1665 		if (m->m_len - hdrlen > 0) {
1666 			/* Handle remaining payload of the 1st fragment. */
1667 			desc = &sc->age_rdata.age_tx_ring[prod];
1668 			desc->addr = htole64(txsegs[0].ds_addr + hdrlen);
1669 			desc->len = htole32(AGE_TX_BYTES(m->m_len - hdrlen) |
1670 			    vtag);
1671 			desc->flags = htole32(cflags);
1672 			sc->age_cdata.age_tx_cnt++;
1673 			AGE_DESC_INC(prod, AGE_TX_RING_CNT);
1674 		}
1675 		/* Handle remaining fragments. */
1676 		i = 1;
1677 	} else if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) {
1678 		/* Configure Tx IP/TCP/UDP checksum offload. */
1679 		cflags |= AGE_TD_CSUM;
1680 		if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
1681 			cflags |= AGE_TD_TCPCSUM;
1682 		if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
1683 			cflags |= AGE_TD_UDPCSUM;
1684 		/* Set checksum start offset. */
1685 		cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT);
1686 		/* Set checksum insertion position of TCP/UDP. */
1687 		cflags |= ((poff + m->m_pkthdr.csum_data) <<
1688 		    AGE_TD_CSUM_XSUMOFFSET_SHIFT);
1689 	}
1690 	for (; i < nsegs; i++) {
1691 		desc = &sc->age_rdata.age_tx_ring[prod];
1692 		desc->addr = htole64(txsegs[i].ds_addr);
1693 		desc->len = htole32(AGE_TX_BYTES(txsegs[i].ds_len) | vtag);
1694 		desc->flags = htole32(cflags);
1695 		sc->age_cdata.age_tx_cnt++;
1696 		AGE_DESC_INC(prod, AGE_TX_RING_CNT);
1697 	}
1698 	/* Update producer index. */
1699 	sc->age_cdata.age_tx_prod = prod;
1700 
1701 	/* Set EOP on the last descriptor. */
1702 	prod = (prod + AGE_TX_RING_CNT - 1) % AGE_TX_RING_CNT;
1703 	desc = &sc->age_rdata.age_tx_ring[prod];
1704 	desc->flags |= htole32(AGE_TD_EOP);
1705 
1706 	/* Lastly set TSO header and modify IP/TCP header for TSO operation. */
1707 	if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1708 		desc = &sc->age_rdata.age_tx_ring[si];
1709 		desc->flags |= htole32(AGE_TD_TSO_HDR);
1710 	}
1711 
1712 	/* Swap dmamap of the first and the last. */
1713 	txd = &sc->age_cdata.age_txdesc[prod];
1714 	map = txd_last->tx_dmamap;
1715 	txd_last->tx_dmamap = txd->tx_dmamap;
1716 	txd->tx_dmamap = map;
1717 	txd->tx_m = m;
1718 
1719 	/* Sync descriptors. */
1720 	bus_dmamap_sync(sc->age_cdata.age_tx_tag, map, BUS_DMASYNC_PREWRITE);
1721 	bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag,
1722 	    sc->age_cdata.age_tx_ring_map,
1723 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1724 
1725 	return (0);
1726 }
1727 
1728 static void
1729 age_start(struct ifnet *ifp)
1730 {
1731         struct age_softc *sc;
1732 
1733 	sc = ifp->if_softc;
1734 	AGE_LOCK(sc);
1735 	age_start_locked(ifp);
1736 	AGE_UNLOCK(sc);
1737 }
1738 
1739 static void
1740 age_start_locked(struct ifnet *ifp)
1741 {
1742         struct age_softc *sc;
1743         struct mbuf *m_head;
1744 	int enq;
1745 
1746 	sc = ifp->if_softc;
1747 
1748 	AGE_LOCK_ASSERT(sc);
1749 
1750 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1751 	    IFF_DRV_RUNNING || (sc->age_flags & AGE_FLAG_LINK) == 0)
1752 		return;
1753 
1754 	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1755 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1756 		if (m_head == NULL)
1757 			break;
1758 		/*
1759 		 * Pack the data into the transmit ring. If we
1760 		 * don't have room, set the OACTIVE flag and wait
1761 		 * for the NIC to drain the ring.
1762 		 */
1763 		if (age_encap(sc, &m_head)) {
1764 			if (m_head == NULL)
1765 				break;
1766 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1767 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1768 			break;
1769 		}
1770 
1771 		enq++;
1772 		/*
1773 		 * If there's a BPF listener, bounce a copy of this frame
1774 		 * to him.
1775 		 */
1776 		ETHER_BPF_MTAP(ifp, m_head);
1777 	}
1778 
1779 	if (enq > 0) {
1780 		/* Update mbox. */
1781 		AGE_COMMIT_MBOX(sc);
1782 		/* Set a timeout in case the chip goes out to lunch. */
1783 		sc->age_watchdog_timer = AGE_TX_TIMEOUT;
1784 	}
1785 }
1786 
1787 static void
1788 age_watchdog(struct age_softc *sc)
1789 {
1790 	struct ifnet *ifp;
1791 
1792 	AGE_LOCK_ASSERT(sc);
1793 
1794 	if (sc->age_watchdog_timer == 0 || --sc->age_watchdog_timer)
1795 		return;
1796 
1797 	ifp = sc->age_ifp;
1798 	if ((sc->age_flags & AGE_FLAG_LINK) == 0) {
1799 		if_printf(sc->age_ifp, "watchdog timeout (missed link)\n");
1800 		ifp->if_oerrors++;
1801 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1802 		age_init_locked(sc);
1803 		return;
1804 	}
1805 	if (sc->age_cdata.age_tx_cnt == 0) {
1806 		if_printf(sc->age_ifp,
1807 		    "watchdog timeout (missed Tx interrupts) -- recovering\n");
1808 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1809 			age_start_locked(ifp);
1810 		return;
1811 	}
1812 	if_printf(sc->age_ifp, "watchdog timeout\n");
1813 	ifp->if_oerrors++;
1814 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1815 	age_init_locked(sc);
1816 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1817 		age_start_locked(ifp);
1818 }
1819 
1820 static int
1821 age_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1822 {
1823 	struct age_softc *sc;
1824 	struct ifreq *ifr;
1825 	struct mii_data *mii;
1826 	uint32_t reg;
1827 	int error, mask;
1828 
1829 	sc = ifp->if_softc;
1830 	ifr = (struct ifreq *)data;
1831 	error = 0;
1832 	switch (cmd) {
1833 	case SIOCSIFMTU:
1834 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > AGE_JUMBO_MTU)
1835 			error = EINVAL;
1836 		else if (ifp->if_mtu != ifr->ifr_mtu) {
1837 			AGE_LOCK(sc);
1838 			ifp->if_mtu = ifr->ifr_mtu;
1839 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1840 				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1841 				age_init_locked(sc);
1842 			}
1843 			AGE_UNLOCK(sc);
1844 		}
1845 		break;
1846 	case SIOCSIFFLAGS:
1847 		AGE_LOCK(sc);
1848 		if ((ifp->if_flags & IFF_UP) != 0) {
1849 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1850 				if (((ifp->if_flags ^ sc->age_if_flags)
1851 				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1852 					age_rxfilter(sc);
1853 			} else {
1854 				if ((sc->age_flags & AGE_FLAG_DETACH) == 0)
1855 					age_init_locked(sc);
1856 			}
1857 		} else {
1858 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1859 				age_stop(sc);
1860 		}
1861 		sc->age_if_flags = ifp->if_flags;
1862 		AGE_UNLOCK(sc);
1863 		break;
1864 	case SIOCADDMULTI:
1865 	case SIOCDELMULTI:
1866 		AGE_LOCK(sc);
1867 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1868 			age_rxfilter(sc);
1869 		AGE_UNLOCK(sc);
1870 		break;
1871 	case SIOCSIFMEDIA:
1872 	case SIOCGIFMEDIA:
1873 		mii = device_get_softc(sc->age_miibus);
1874 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1875 		break;
1876 	case SIOCSIFCAP:
1877 		AGE_LOCK(sc);
1878 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1879 		if ((mask & IFCAP_TXCSUM) != 0 &&
1880 		    (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
1881 			ifp->if_capenable ^= IFCAP_TXCSUM;
1882 			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1883 				ifp->if_hwassist |= AGE_CSUM_FEATURES;
1884 			else
1885 				ifp->if_hwassist &= ~AGE_CSUM_FEATURES;
1886 		}
1887 		if ((mask & IFCAP_RXCSUM) != 0 &&
1888 		    (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
1889 			ifp->if_capenable ^= IFCAP_RXCSUM;
1890 			reg = CSR_READ_4(sc, AGE_MAC_CFG);
1891 			reg &= ~MAC_CFG_RXCSUM_ENB;
1892 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1893 				reg |= MAC_CFG_RXCSUM_ENB;
1894 			CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
1895 		}
1896 		if ((mask & IFCAP_TSO4) != 0 &&
1897 		    (ifp->if_capabilities & IFCAP_TSO4) != 0) {
1898 			ifp->if_capenable ^= IFCAP_TSO4;
1899 			if ((ifp->if_capenable & IFCAP_TSO4) != 0)
1900 				ifp->if_hwassist |= CSUM_TSO;
1901 			else
1902 				ifp->if_hwassist &= ~CSUM_TSO;
1903 		}
1904 
1905 		if ((mask & IFCAP_WOL_MCAST) != 0 &&
1906 		    (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0)
1907 			ifp->if_capenable ^= IFCAP_WOL_MCAST;
1908 		if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1909 		    (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
1910 			ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1911 		if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
1912 		    (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
1913 			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1914 		if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
1915 		    (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
1916 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1917 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1918 		    (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
1919 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1920 			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
1921 				ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
1922 			age_rxvlan(sc);
1923 		}
1924 		AGE_UNLOCK(sc);
1925 		VLAN_CAPABILITIES(ifp);
1926 		break;
1927 	default:
1928 		error = ether_ioctl(ifp, cmd, data);
1929 		break;
1930 	}
1931 
1932 	return (error);
1933 }
1934 
1935 static void
1936 age_mac_config(struct age_softc *sc)
1937 {
1938 	struct mii_data *mii;
1939 	uint32_t reg;
1940 
1941 	AGE_LOCK_ASSERT(sc);
1942 
1943 	mii = device_get_softc(sc->age_miibus);
1944 	reg = CSR_READ_4(sc, AGE_MAC_CFG);
1945 	reg &= ~MAC_CFG_FULL_DUPLEX;
1946 	reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC);
1947 	reg &= ~MAC_CFG_SPEED_MASK;
1948 	/* Reprogram MAC with resolved speed/duplex. */
1949 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1950 	case IFM_10_T:
1951 	case IFM_100_TX:
1952 		reg |= MAC_CFG_SPEED_10_100;
1953 		break;
1954 	case IFM_1000_T:
1955 		reg |= MAC_CFG_SPEED_1000;
1956 		break;
1957 	}
1958 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1959 		reg |= MAC_CFG_FULL_DUPLEX;
1960 #ifdef notyet
1961 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1962 			reg |= MAC_CFG_TX_FC;
1963 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1964 			reg |= MAC_CFG_RX_FC;
1965 #endif
1966 	}
1967 
1968 	CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
1969 }
1970 
1971 static void
1972 age_link_task(void *arg, int pending)
1973 {
1974 	struct age_softc *sc;
1975 	struct mii_data *mii;
1976 	struct ifnet *ifp;
1977 	uint32_t reg;
1978 
1979 	sc = (struct age_softc *)arg;
1980 
1981 	AGE_LOCK(sc);
1982 	mii = device_get_softc(sc->age_miibus);
1983 	ifp = sc->age_ifp;
1984 	if (mii == NULL || ifp == NULL ||
1985 	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1986 		AGE_UNLOCK(sc);
1987 		return;
1988 	}
1989 
1990 	sc->age_flags &= ~AGE_FLAG_LINK;
1991 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
1992 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
1993 		case IFM_10_T:
1994 		case IFM_100_TX:
1995 		case IFM_1000_T:
1996 			sc->age_flags |= AGE_FLAG_LINK;
1997 			break;
1998 		default:
1999 			break;
2000 		}
2001 	}
2002 
2003 	/* Stop Rx/Tx MACs. */
2004 	age_stop_rxmac(sc);
2005 	age_stop_txmac(sc);
2006 
2007 	/* Program MACs with resolved speed/duplex/flow-control. */
2008 	if ((sc->age_flags & AGE_FLAG_LINK) != 0) {
2009 		age_mac_config(sc);
2010 		reg = CSR_READ_4(sc, AGE_MAC_CFG);
2011 		/* Restart DMA engine and Tx/Rx MAC. */
2012 		CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) |
2013 		    DMA_CFG_RD_ENB | DMA_CFG_WR_ENB);
2014 		reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
2015 		CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2016 	}
2017 
2018 	AGE_UNLOCK(sc);
2019 }
2020 
2021 static void
2022 age_stats_update(struct age_softc *sc)
2023 {
2024 	struct age_stats *stat;
2025 	struct smb *smb;
2026 	struct ifnet *ifp;
2027 
2028 	AGE_LOCK_ASSERT(sc);
2029 
2030 	stat = &sc->age_stat;
2031 
2032 	bus_dmamap_sync(sc->age_cdata.age_smb_block_tag,
2033 	    sc->age_cdata.age_smb_block_map,
2034 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2035 
2036 	smb = sc->age_rdata.age_smb_block;
2037 	if (smb->updated == 0)
2038 		return;
2039 
2040 	ifp = sc->age_ifp;
2041 	/* Rx stats. */
2042 	stat->rx_frames += smb->rx_frames;
2043 	stat->rx_bcast_frames += smb->rx_bcast_frames;
2044 	stat->rx_mcast_frames += smb->rx_mcast_frames;
2045 	stat->rx_pause_frames += smb->rx_pause_frames;
2046 	stat->rx_control_frames += smb->rx_control_frames;
2047 	stat->rx_crcerrs += smb->rx_crcerrs;
2048 	stat->rx_lenerrs += smb->rx_lenerrs;
2049 	stat->rx_bytes += smb->rx_bytes;
2050 	stat->rx_runts += smb->rx_runts;
2051 	stat->rx_fragments += smb->rx_fragments;
2052 	stat->rx_pkts_64 += smb->rx_pkts_64;
2053 	stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
2054 	stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
2055 	stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
2056 	stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
2057 	stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
2058 	stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
2059 	stat->rx_pkts_truncated += smb->rx_pkts_truncated;
2060 	stat->rx_fifo_oflows += smb->rx_fifo_oflows;
2061 	stat->rx_desc_oflows += smb->rx_desc_oflows;
2062 	stat->rx_alignerrs += smb->rx_alignerrs;
2063 	stat->rx_bcast_bytes += smb->rx_bcast_bytes;
2064 	stat->rx_mcast_bytes += smb->rx_mcast_bytes;
2065 	stat->rx_pkts_filtered += smb->rx_pkts_filtered;
2066 
2067 	/* Tx stats. */
2068 	stat->tx_frames += smb->tx_frames;
2069 	stat->tx_bcast_frames += smb->tx_bcast_frames;
2070 	stat->tx_mcast_frames += smb->tx_mcast_frames;
2071 	stat->tx_pause_frames += smb->tx_pause_frames;
2072 	stat->tx_excess_defer += smb->tx_excess_defer;
2073 	stat->tx_control_frames += smb->tx_control_frames;
2074 	stat->tx_deferred += smb->tx_deferred;
2075 	stat->tx_bytes += smb->tx_bytes;
2076 	stat->tx_pkts_64 += smb->tx_pkts_64;
2077 	stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
2078 	stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
2079 	stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
2080 	stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
2081 	stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
2082 	stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
2083 	stat->tx_single_colls += smb->tx_single_colls;
2084 	stat->tx_multi_colls += smb->tx_multi_colls;
2085 	stat->tx_late_colls += smb->tx_late_colls;
2086 	stat->tx_excess_colls += smb->tx_excess_colls;
2087 	stat->tx_underrun += smb->tx_underrun;
2088 	stat->tx_desc_underrun += smb->tx_desc_underrun;
2089 	stat->tx_lenerrs += smb->tx_lenerrs;
2090 	stat->tx_pkts_truncated += smb->tx_pkts_truncated;
2091 	stat->tx_bcast_bytes += smb->tx_bcast_bytes;
2092 	stat->tx_mcast_bytes += smb->tx_mcast_bytes;
2093 
2094 	/* Update counters in ifnet. */
2095 	ifp->if_opackets += smb->tx_frames;
2096 
2097 	ifp->if_collisions += smb->tx_single_colls +
2098 	    smb->tx_multi_colls + smb->tx_late_colls +
2099 	    smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT;
2100 
2101 	ifp->if_oerrors += smb->tx_excess_colls +
2102 	    smb->tx_late_colls + smb->tx_underrun +
2103 	    smb->tx_pkts_truncated;
2104 
2105 	ifp->if_ipackets += smb->rx_frames;
2106 
2107 	ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
2108 	    smb->rx_runts + smb->rx_pkts_truncated +
2109 	    smb->rx_fifo_oflows + smb->rx_desc_oflows +
2110 	    smb->rx_alignerrs;
2111 
2112 	/* Update done, clear. */
2113 	smb->updated = 0;
2114 
2115 	bus_dmamap_sync(sc->age_cdata.age_smb_block_tag,
2116 	    sc->age_cdata.age_smb_block_map,
2117 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2118 }
2119 
2120 static int
2121 age_intr(void *arg)
2122 {
2123 	struct age_softc *sc;
2124 	uint32_t status;
2125 
2126 	sc = (struct age_softc *)arg;
2127 
2128 	status = CSR_READ_4(sc, AGE_INTR_STATUS);
2129 	if (status == 0 || (status & AGE_INTRS) == 0)
2130 		return (FILTER_STRAY);
2131 	/* Disable interrupts. */
2132 	CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT);
2133 	taskqueue_enqueue(sc->age_tq, &sc->age_int_task);
2134 
2135 	return (FILTER_HANDLED);
2136 }
2137 
2138 static void
2139 age_int_task(void *arg, int pending)
2140 {
2141 	struct age_softc *sc;
2142 	struct ifnet *ifp;
2143 	struct cmb *cmb;
2144 	uint32_t status;
2145 
2146 	sc = (struct age_softc *)arg;
2147 
2148 	AGE_LOCK(sc);
2149 
2150 	bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag,
2151 	    sc->age_cdata.age_cmb_block_map,
2152 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2153 	cmb = sc->age_rdata.age_cmb_block;
2154 	status = le32toh(cmb->intr_status);
2155 	if (sc->age_morework != 0)
2156 		status |= INTR_CMB_RX;
2157 	if ((status & AGE_INTRS) == 0)
2158 		goto done;
2159 
2160 	sc->age_tpd_cons = (le32toh(cmb->tpd_cons) & TPD_CONS_MASK) >>
2161 	    TPD_CONS_SHIFT;
2162 	sc->age_rr_prod = (le32toh(cmb->rprod_cons) & RRD_PROD_MASK) >>
2163 	    RRD_PROD_SHIFT;
2164 	/* Let hardware know CMB was served. */
2165 	cmb->intr_status = 0;
2166 	bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag,
2167 	    sc->age_cdata.age_cmb_block_map,
2168 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2169 
2170 #if 0
2171 	printf("INTR: 0x%08x\n", status);
2172 	status &= ~INTR_DIS_DMA;
2173 	CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT);
2174 #endif
2175 	ifp = sc->age_ifp;
2176 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2177 		if ((status & INTR_CMB_RX) != 0)
2178 			sc->age_morework = age_rxintr(sc, sc->age_rr_prod,
2179 			    sc->age_process_limit);
2180 		if ((status & INTR_CMB_TX) != 0)
2181 			age_txintr(sc, sc->age_tpd_cons);
2182 		if ((status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) != 0) {
2183 			if ((status & INTR_DMA_RD_TO_RST) != 0)
2184 				device_printf(sc->age_dev,
2185 				    "DMA read error! -- resetting\n");
2186 			if ((status & INTR_DMA_WR_TO_RST) != 0)
2187 				device_printf(sc->age_dev,
2188 				    "DMA write error! -- resetting\n");
2189 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2190 			age_init_locked(sc);
2191 		}
2192 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2193 			age_start_locked(ifp);
2194 		if ((status & INTR_SMB) != 0)
2195 			age_stats_update(sc);
2196 	}
2197 
2198 	/* Check whether CMB was updated while serving Tx/Rx/SMB handler. */
2199 	bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag,
2200 	    sc->age_cdata.age_cmb_block_map,
2201 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2202 	status = le32toh(cmb->intr_status);
2203 	if (sc->age_morework != 0 || (status & AGE_INTRS) != 0) {
2204 		taskqueue_enqueue(sc->age_tq, &sc->age_int_task);
2205 		AGE_UNLOCK(sc);
2206 		return;
2207 	}
2208 
2209 done:
2210 	/* Re-enable interrupts. */
2211 	CSR_WRITE_4(sc, AGE_INTR_STATUS, 0);
2212 	AGE_UNLOCK(sc);
2213 }
2214 
2215 static void
2216 age_txintr(struct age_softc *sc, int tpd_cons)
2217 {
2218 	struct ifnet *ifp;
2219 	struct age_txdesc *txd;
2220 	int cons, prog;
2221 
2222 	AGE_LOCK_ASSERT(sc);
2223 
2224 	ifp = sc->age_ifp;
2225 
2226 	bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag,
2227 	    sc->age_cdata.age_tx_ring_map,
2228 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2229 
2230 	/*
2231 	 * Go through our Tx list and free mbufs for those
2232 	 * frames which have been transmitted.
2233 	 */
2234 	cons = sc->age_cdata.age_tx_cons;
2235 	for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) {
2236 		if (sc->age_cdata.age_tx_cnt <= 0)
2237 			break;
2238 		prog++;
2239 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2240 		sc->age_cdata.age_tx_cnt--;
2241 		txd = &sc->age_cdata.age_txdesc[cons];
2242 		/*
2243 		 * Clear Tx descriptors, it's not required but would
2244 		 * help debugging in case of Tx issues.
2245 		 */
2246 		txd->tx_desc->addr = 0;
2247 		txd->tx_desc->len = 0;
2248 		txd->tx_desc->flags = 0;
2249 
2250 		if (txd->tx_m == NULL)
2251 			continue;
2252 		/* Reclaim transmitted mbufs. */
2253 		bus_dmamap_sync(sc->age_cdata.age_tx_tag, txd->tx_dmamap,
2254 		    BUS_DMASYNC_POSTWRITE);
2255 		bus_dmamap_unload(sc->age_cdata.age_tx_tag, txd->tx_dmamap);
2256 		m_freem(txd->tx_m);
2257 		txd->tx_m = NULL;
2258 	}
2259 
2260 	if (prog > 0) {
2261 		sc->age_cdata.age_tx_cons = cons;
2262 
2263 		/*
2264 		 * Unarm watchdog timer only when there are no pending
2265 		 * Tx descriptors in queue.
2266 		 */
2267 		if (sc->age_cdata.age_tx_cnt == 0)
2268 			sc->age_watchdog_timer = 0;
2269 		bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag,
2270 		    sc->age_cdata.age_tx_ring_map,
2271 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2272 	}
2273 }
2274 
2275 #ifndef __NO_STRICT_ALIGNMENT
2276 static struct mbuf *
2277 age_fixup_rx(struct ifnet *ifp, struct mbuf *m)
2278 {
2279 	struct mbuf *n;
2280         int i;
2281         uint16_t *src, *dst;
2282 
2283 	src = mtod(m, uint16_t *);
2284 	dst = src - 3;
2285 
2286 	if (m->m_next == NULL) {
2287 		for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
2288 			*dst++ = *src++;
2289 		m->m_data -= 6;
2290 		return (m);
2291 	}
2292 	/*
2293 	 * Append a new mbuf to received mbuf chain and copy ethernet
2294 	 * header from the mbuf chain. This can save lots of CPU
2295 	 * cycles for jumbo frame.
2296 	 */
2297 	MGETHDR(n, M_NOWAIT, MT_DATA);
2298 	if (n == NULL) {
2299 		ifp->if_iqdrops++;
2300 		m_freem(m);
2301 		return (NULL);
2302 	}
2303 	bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
2304 	m->m_data += ETHER_HDR_LEN;
2305 	m->m_len -= ETHER_HDR_LEN;
2306 	n->m_len = ETHER_HDR_LEN;
2307 	M_MOVE_PKTHDR(n, m);
2308 	n->m_next = m;
2309 	return (n);
2310 }
2311 #endif
2312 
2313 /* Receive a frame. */
2314 static void
2315 age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd)
2316 {
2317 	struct age_rxdesc *rxd;
2318 	struct ifnet *ifp;
2319 	struct mbuf *mp, *m;
2320 	uint32_t status, index, vtag;
2321 	int count, nsegs;
2322 	int rx_cons;
2323 
2324 	AGE_LOCK_ASSERT(sc);
2325 
2326 	ifp = sc->age_ifp;
2327 	status = le32toh(rxrd->flags);
2328 	index = le32toh(rxrd->index);
2329 	rx_cons = AGE_RX_CONS(index);
2330 	nsegs = AGE_RX_NSEGS(index);
2331 
2332 	sc->age_cdata.age_rxlen = AGE_RX_BYTES(le32toh(rxrd->len));
2333 	if ((status & (AGE_RRD_ERROR | AGE_RRD_LENGTH_NOK)) != 0) {
2334 		/*
2335 		 * We want to pass the following frames to upper
2336 		 * layer regardless of error status of Rx return
2337 		 * ring.
2338 		 *
2339 		 *  o IP/TCP/UDP checksum is bad.
2340 		 *  o frame length and protocol specific length
2341 		 *     does not match.
2342 		 */
2343 		status |= AGE_RRD_IPCSUM_NOK | AGE_RRD_TCP_UDPCSUM_NOK;
2344 		if ((status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE |
2345 		    AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0)
2346 			return;
2347 	}
2348 
2349 	for (count = 0; count < nsegs; count++,
2350 	    AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) {
2351 		rxd = &sc->age_cdata.age_rxdesc[rx_cons];
2352 		mp = rxd->rx_m;
2353 		/* Add a new receive buffer to the ring. */
2354 		if (age_newbuf(sc, rxd) != 0) {
2355 			ifp->if_iqdrops++;
2356 			/* Reuse Rx buffers. */
2357 			if (sc->age_cdata.age_rxhead != NULL)
2358 				m_freem(sc->age_cdata.age_rxhead);
2359 			break;
2360 		}
2361 
2362 		/*
2363 		 * Assume we've received a full sized frame.
2364 		 * Actual size is fixed when we encounter the end of
2365 		 * multi-segmented frame.
2366 		 */
2367 		mp->m_len = AGE_RX_BUF_SIZE;
2368 
2369 		/* Chain received mbufs. */
2370 		if (sc->age_cdata.age_rxhead == NULL) {
2371 			sc->age_cdata.age_rxhead = mp;
2372 			sc->age_cdata.age_rxtail = mp;
2373 		} else {
2374 			mp->m_flags &= ~M_PKTHDR;
2375 			sc->age_cdata.age_rxprev_tail =
2376 			    sc->age_cdata.age_rxtail;
2377 			sc->age_cdata.age_rxtail->m_next = mp;
2378 			sc->age_cdata.age_rxtail = mp;
2379 		}
2380 
2381 		if (count == nsegs - 1) {
2382 			/* Last desc. for this frame. */
2383 			m = sc->age_cdata.age_rxhead;
2384 			m->m_flags |= M_PKTHDR;
2385 			/*
2386 			 * It seems that L1 controller has no way
2387 			 * to tell hardware to strip CRC bytes.
2388 			 */
2389 			m->m_pkthdr.len = sc->age_cdata.age_rxlen -
2390 			    ETHER_CRC_LEN;
2391 			if (nsegs > 1) {
2392 				/* Set last mbuf size. */
2393 				mp->m_len = sc->age_cdata.age_rxlen -
2394 				    ((nsegs - 1) * AGE_RX_BUF_SIZE);
2395 				/* Remove the CRC bytes in chained mbufs. */
2396 				if (mp->m_len <= ETHER_CRC_LEN) {
2397 					sc->age_cdata.age_rxtail =
2398 					    sc->age_cdata.age_rxprev_tail;
2399 					sc->age_cdata.age_rxtail->m_len -=
2400 					    (ETHER_CRC_LEN - mp->m_len);
2401 					sc->age_cdata.age_rxtail->m_next = NULL;
2402 					m_freem(mp);
2403 				} else {
2404 					mp->m_len -= ETHER_CRC_LEN;
2405 				}
2406 			} else
2407 				m->m_len = m->m_pkthdr.len;
2408 			m->m_pkthdr.rcvif = ifp;
2409 			/*
2410 			 * Set checksum information.
2411 			 * It seems that L1 controller can compute partial
2412 			 * checksum. The partial checksum value can be used
2413 			 * to accelerate checksum computation for fragmented
2414 			 * TCP/UDP packets. Upper network stack already
2415 			 * takes advantage of the partial checksum value in
2416 			 * IP reassembly stage. But I'm not sure the
2417 			 * correctness of the partial hardware checksum
2418 			 * assistance due to lack of data sheet. If it is
2419 			 * proven to work on L1 I'll enable it.
2420 			 */
2421 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
2422 			    (status & AGE_RRD_IPV4) != 0) {
2423 				if ((status & AGE_RRD_IPCSUM_NOK) == 0)
2424 					m->m_pkthdr.csum_flags |=
2425 					    CSUM_IP_CHECKED | CSUM_IP_VALID;
2426 				if ((status & (AGE_RRD_TCP | AGE_RRD_UDP)) &&
2427 				    (status & AGE_RRD_TCP_UDPCSUM_NOK) == 0) {
2428 					m->m_pkthdr.csum_flags |=
2429 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2430 					m->m_pkthdr.csum_data = 0xffff;
2431 				}
2432 				/*
2433 				 * Don't mark bad checksum for TCP/UDP frames
2434 				 * as fragmented frames may always have set
2435 				 * bad checksummed bit of descriptor status.
2436 				 */
2437 			}
2438 
2439 			/* Check for VLAN tagged frames. */
2440 			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
2441 			    (status & AGE_RRD_VLAN) != 0) {
2442 				vtag = AGE_RX_VLAN(le32toh(rxrd->vtags));
2443 				m->m_pkthdr.ether_vtag = AGE_RX_VLAN_TAG(vtag);
2444 				m->m_flags |= M_VLANTAG;
2445 			}
2446 #ifndef __NO_STRICT_ALIGNMENT
2447 			m = age_fixup_rx(ifp, m);
2448 			if (m != NULL)
2449 #endif
2450 			{
2451 			/* Pass it on. */
2452 			AGE_UNLOCK(sc);
2453 			(*ifp->if_input)(ifp, m);
2454 			AGE_LOCK(sc);
2455 			}
2456 		}
2457 	}
2458 
2459 	/* Reset mbuf chains. */
2460 	AGE_RXCHAIN_RESET(sc);
2461 }
2462 
2463 static int
2464 age_rxintr(struct age_softc *sc, int rr_prod, int count)
2465 {
2466 	struct rx_rdesc *rxrd;
2467 	int rr_cons, nsegs, pktlen, prog;
2468 
2469 	AGE_LOCK_ASSERT(sc);
2470 
2471 	rr_cons = sc->age_cdata.age_rr_cons;
2472 	if (rr_cons == rr_prod)
2473 		return (0);
2474 
2475 	bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag,
2476 	    sc->age_cdata.age_rr_ring_map,
2477 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2478 	bus_dmamap_sync(sc->age_cdata.age_rx_ring_tag,
2479 	    sc->age_cdata.age_rx_ring_map, BUS_DMASYNC_POSTWRITE);
2480 
2481 	for (prog = 0; rr_cons != rr_prod; prog++) {
2482 		if (count-- <= 0)
2483 			break;
2484 		rxrd = &sc->age_rdata.age_rr_ring[rr_cons];
2485 		nsegs = AGE_RX_NSEGS(le32toh(rxrd->index));
2486 		if (nsegs == 0)
2487 			break;
2488 		/*
2489 		 * Check number of segments against received bytes.
2490 		 * Non-matching value would indicate that hardware
2491 		 * is still trying to update Rx return descriptors.
2492 		 * I'm not sure whether this check is really needed.
2493 		 */
2494 		pktlen = AGE_RX_BYTES(le32toh(rxrd->len));
2495 		if (nsegs != (pktlen + (AGE_RX_BUF_SIZE - 1)) / AGE_RX_BUF_SIZE)
2496 			break;
2497 
2498 		/* Received a frame. */
2499 		age_rxeof(sc, rxrd);
2500 		/* Clear return ring. */
2501 		rxrd->index = 0;
2502 		AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT);
2503 		sc->age_cdata.age_rx_cons += nsegs;
2504 		sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
2505 	}
2506 
2507 	if (prog > 0) {
2508 		/* Update the consumer index. */
2509 		sc->age_cdata.age_rr_cons = rr_cons;
2510 
2511 		bus_dmamap_sync(sc->age_cdata.age_rx_ring_tag,
2512 		    sc->age_cdata.age_rx_ring_map, BUS_DMASYNC_PREWRITE);
2513 		/* Sync descriptors. */
2514 		bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag,
2515 		    sc->age_cdata.age_rr_ring_map,
2516 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2517 
2518 		/* Notify hardware availability of new Rx buffers. */
2519 		AGE_COMMIT_MBOX(sc);
2520 	}
2521 
2522 	return (count > 0 ? 0 : EAGAIN);
2523 }
2524 
2525 static void
2526 age_tick(void *arg)
2527 {
2528 	struct age_softc *sc;
2529 	struct mii_data *mii;
2530 
2531 	sc = (struct age_softc *)arg;
2532 
2533 	AGE_LOCK_ASSERT(sc);
2534 
2535 	mii = device_get_softc(sc->age_miibus);
2536 	mii_tick(mii);
2537 	age_watchdog(sc);
2538 	callout_reset(&sc->age_tick_ch, hz, age_tick, sc);
2539 }
2540 
2541 static void
2542 age_reset(struct age_softc *sc)
2543 {
2544 	uint32_t reg;
2545 	int i;
2546 
2547 	CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET);
2548 	CSR_READ_4(sc, AGE_MASTER_CFG);
2549 	DELAY(1000);
2550 	for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2551 		if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
2552 			break;
2553 		DELAY(10);
2554 	}
2555 
2556 	if (i == 0)
2557 		device_printf(sc->age_dev, "reset timeout(0x%08x)!\n", reg);
2558 	/* Initialize PCIe module. From Linux. */
2559 	CSR_WRITE_4(sc, 0x12FC, 0x6500);
2560 	CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
2561 }
2562 
2563 static void
2564 age_init(void *xsc)
2565 {
2566 	struct age_softc *sc;
2567 
2568 	sc = (struct age_softc *)xsc;
2569 	AGE_LOCK(sc);
2570 	age_init_locked(sc);
2571 	AGE_UNLOCK(sc);
2572 }
2573 
2574 static void
2575 age_init_locked(struct age_softc *sc)
2576 {
2577 	struct ifnet *ifp;
2578 	struct mii_data *mii;
2579 	uint8_t eaddr[ETHER_ADDR_LEN];
2580 	bus_addr_t paddr;
2581 	uint32_t reg, fsize;
2582 	uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo;
2583 	int error;
2584 
2585 	AGE_LOCK_ASSERT(sc);
2586 
2587 	ifp = sc->age_ifp;
2588 	mii = device_get_softc(sc->age_miibus);
2589 
2590 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2591 		return;
2592 
2593 	/*
2594 	 * Cancel any pending I/O.
2595 	 */
2596 	age_stop(sc);
2597 
2598 	/*
2599 	 * Reset the chip to a known state.
2600 	 */
2601 	age_reset(sc);
2602 
2603 	/* Initialize descriptors. */
2604 	error = age_init_rx_ring(sc);
2605         if (error != 0) {
2606                 device_printf(sc->age_dev, "no memory for Rx buffers.\n");
2607                 age_stop(sc);
2608 		return;
2609         }
2610 	age_init_rr_ring(sc);
2611 	age_init_tx_ring(sc);
2612 	age_init_cmb_block(sc);
2613 	age_init_smb_block(sc);
2614 
2615 	/* Reprogram the station address. */
2616 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2617 	CSR_WRITE_4(sc, AGE_PAR0,
2618 	    eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
2619 	CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]);
2620 
2621 	/* Set descriptor base addresses. */
2622 	paddr = sc->age_rdata.age_tx_ring_paddr;
2623 	CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr));
2624 	paddr = sc->age_rdata.age_rx_ring_paddr;
2625 	CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr));
2626 	paddr = sc->age_rdata.age_rr_ring_paddr;
2627 	CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr));
2628 	paddr = sc->age_rdata.age_tx_ring_paddr;
2629 	CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr));
2630 	paddr = sc->age_rdata.age_cmb_block_paddr;
2631 	CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr));
2632 	paddr = sc->age_rdata.age_smb_block_paddr;
2633 	CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr));
2634 	/* Set Rx/Rx return descriptor counter. */
2635 	CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT,
2636 	    ((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) &
2637 	    DESC_RRD_CNT_MASK) |
2638 	    ((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK));
2639 	/* Set Tx descriptor counter. */
2640 	CSR_WRITE_4(sc, AGE_DESC_TPD_CNT,
2641 	    (AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK);
2642 
2643 	/* Tell hardware that we're ready to load descriptors. */
2644 	CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD);
2645 
2646 	/*
2647 	 * Initialize mailbox register.
2648 	 * Updated producer/consumer index information is exchanged
2649 	 * through this mailbox register. However Tx producer and
2650 	 * Rx return consumer/Rx producer are all shared such that
2651 	 * it's hard to separate code path between Tx and Rx without
2652 	 * locking. If L1 hardware have a separate mail box register
2653 	 * for Tx and Rx consumer/producer management we could have
2654 	 * indepent Tx/Rx handler which in turn Rx handler could have
2655 	 * been run without any locking.
2656 	 */
2657 	AGE_COMMIT_MBOX(sc);
2658 
2659 	/* Configure IPG/IFG parameters. */
2660 	CSR_WRITE_4(sc, AGE_IPG_IFG_CFG,
2661 	    ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) |
2662 	    ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
2663 	    ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
2664 	    ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK));
2665 
2666 	/* Set parameters for half-duplex media. */
2667 	CSR_WRITE_4(sc, AGE_HDPX_CFG,
2668 	    ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
2669 	    HDPX_CFG_LCOL_MASK) |
2670 	    ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
2671 	    HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
2672 	    ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
2673 	    HDPX_CFG_ABEBT_MASK) |
2674 	    ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
2675 	    HDPX_CFG_JAMIPG_MASK));
2676 
2677 	/* Configure interrupt moderation timer. */
2678 	CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod));
2679 	reg = CSR_READ_4(sc, AGE_MASTER_CFG);
2680 	reg &= ~MASTER_MTIMER_ENB;
2681 	if (AGE_USECS(sc->age_int_mod) == 0)
2682 		reg &= ~MASTER_ITIMER_ENB;
2683 	else
2684 		reg |= MASTER_ITIMER_ENB;
2685 	CSR_WRITE_4(sc, AGE_MASTER_CFG, reg);
2686 	if (bootverbose)
2687 		device_printf(sc->age_dev, "interrupt moderation is %d us.\n",
2688 		    sc->age_int_mod);
2689 	CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000));
2690 
2691 	/* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */
2692 	if (ifp->if_mtu < ETHERMTU)
2693 		sc->age_max_frame_size = ETHERMTU;
2694 	else
2695 		sc->age_max_frame_size = ifp->if_mtu;
2696 	sc->age_max_frame_size += ETHER_HDR_LEN +
2697 	    sizeof(struct ether_vlan_header) + ETHER_CRC_LEN;
2698 	CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size);
2699 	/* Configure jumbo frame. */
2700 	fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t));
2701 	CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG,
2702 	    (((fsize / sizeof(uint64_t)) <<
2703 	    RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) |
2704 	    ((RXQ_JUMBO_CFG_LKAH_DEFAULT <<
2705 	    RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) |
2706 	    ((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) &
2707 	    RXQ_JUMBO_CFG_RRD_TIMER_MASK));
2708 
2709 	/* Configure flow-control parameters. From Linux. */
2710 	if ((sc->age_flags & AGE_FLAG_PCIE) != 0) {
2711 		/*
2712 		 * Magic workaround for old-L1.
2713 		 * Don't know which hw revision requires this magic.
2714 		 */
2715 		CSR_WRITE_4(sc, 0x12FC, 0x6500);
2716 		/*
2717 		 * Another magic workaround for flow-control mode
2718 		 * change. From Linux.
2719 		 */
2720 		CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
2721 	}
2722 	/*
2723 	 * TODO
2724 	 *  Should understand pause parameter relationships between FIFO
2725 	 *  size and number of Rx descriptors and Rx return descriptors.
2726 	 *
2727 	 *  Magic parameters came from Linux.
2728 	 */
2729 	switch (sc->age_chip_rev) {
2730 	case 0x8001:
2731 	case 0x9001:
2732 	case 0x9002:
2733 	case 0x9003:
2734 		rxf_hi = AGE_RX_RING_CNT / 16;
2735 		rxf_lo = (AGE_RX_RING_CNT * 7) / 8;
2736 		rrd_hi = (AGE_RR_RING_CNT * 7) / 8;
2737 		rrd_lo = AGE_RR_RING_CNT / 16;
2738 		break;
2739 	default:
2740 		reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN);
2741 		rxf_lo = reg / 16;
2742 		if (rxf_lo < 192)
2743 			rxf_lo = 192;
2744 		rxf_hi = (reg * 7) / 8;
2745 		if (rxf_hi < rxf_lo)
2746 			rxf_hi = rxf_lo + 16;
2747 		reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN);
2748 		rrd_lo = reg / 8;
2749 		rrd_hi = (reg * 7) / 8;
2750 		if (rrd_lo < 2)
2751 			rrd_lo = 2;
2752 		if (rrd_hi < rrd_lo)
2753 			rrd_hi = rrd_lo + 3;
2754 		break;
2755 	}
2756 	CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH,
2757 	    ((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) &
2758 	    RXQ_FIFO_PAUSE_THRESH_LO_MASK) |
2759 	    ((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) &
2760 	    RXQ_FIFO_PAUSE_THRESH_HI_MASK));
2761 	CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH,
2762 	    ((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) &
2763 	    RXQ_RRD_PAUSE_THRESH_LO_MASK) |
2764 	    ((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) &
2765 	    RXQ_RRD_PAUSE_THRESH_HI_MASK));
2766 
2767 	/* Configure RxQ. */
2768 	CSR_WRITE_4(sc, AGE_RXQ_CFG,
2769 	    ((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
2770 	    RXQ_CFG_RD_BURST_MASK) |
2771 	    ((RXQ_CFG_RRD_BURST_THRESH_DEFAULT <<
2772 	    RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) |
2773 	    ((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT <<
2774 	    RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) |
2775 	    RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB);
2776 
2777 	/* Configure TxQ. */
2778 	CSR_WRITE_4(sc, AGE_TXQ_CFG,
2779 	    ((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) &
2780 	    TXQ_CFG_TPD_BURST_MASK) |
2781 	    ((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) &
2782 	    TXQ_CFG_TX_FIFO_BURST_MASK) |
2783 	    ((TXQ_CFG_TPD_FETCH_DEFAULT <<
2784 	    TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) |
2785 	    TXQ_CFG_ENB);
2786 
2787 	CSR_WRITE_4(sc, AGE_TX_JUMBO_TPD_TH_IPG,
2788 	    (((fsize / sizeof(uint64_t) << TX_JUMBO_TPD_TH_SHIFT)) &
2789 	    TX_JUMBO_TPD_TH_MASK) |
2790 	    ((TX_JUMBO_TPD_IPG_DEFAULT << TX_JUMBO_TPD_IPG_SHIFT) &
2791 	    TX_JUMBO_TPD_IPG_MASK));
2792 	/* Configure DMA parameters. */
2793 	CSR_WRITE_4(sc, AGE_DMA_CFG,
2794 	    DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 |
2795 	    sc->age_dma_rd_burst | DMA_CFG_RD_ENB |
2796 	    sc->age_dma_wr_burst | DMA_CFG_WR_ENB);
2797 
2798 	/* Configure CMB DMA write threshold. */
2799 	CSR_WRITE_4(sc, AGE_CMB_WR_THRESH,
2800 	    ((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) &
2801 	    CMB_WR_THRESH_RRD_MASK) |
2802 	    ((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) &
2803 	    CMB_WR_THRESH_TPD_MASK));
2804 
2805 	/* Set CMB/SMB timer and enable them. */
2806 	CSR_WRITE_4(sc, AGE_CMB_WR_TIMER,
2807 	    ((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) |
2808 	    ((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK));
2809 	/* Request SMB updates for every seconds. */
2810 	CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000));
2811 	CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB);
2812 
2813 	/*
2814 	 * Disable all WOL bits as WOL can interfere normal Rx
2815 	 * operation.
2816 	 */
2817 	CSR_WRITE_4(sc, AGE_WOL_CFG, 0);
2818 
2819 	/*
2820 	 * Configure Tx/Rx MACs.
2821 	 *  - Auto-padding for short frames.
2822 	 *  - Enable CRC generation.
2823 	 *  Start with full-duplex/1000Mbps media. Actual reconfiguration
2824 	 *  of MAC is followed after link establishment.
2825 	 */
2826 	CSR_WRITE_4(sc, AGE_MAC_CFG,
2827 	    MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD |
2828 	    MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 |
2829 	    ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
2830 	    MAC_CFG_PREAMBLE_MASK));
2831 	/* Set up the receive filter. */
2832 	age_rxfilter(sc);
2833 	age_rxvlan(sc);
2834 
2835 	reg = CSR_READ_4(sc, AGE_MAC_CFG);
2836 	if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2837 		reg |= MAC_CFG_RXCSUM_ENB;
2838 
2839 	/* Ack all pending interrupts and clear it. */
2840 	CSR_WRITE_4(sc, AGE_INTR_STATUS, 0);
2841 	CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS);
2842 
2843 	/* Finally enable Tx/Rx MAC. */
2844 	CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
2845 
2846 	sc->age_flags &= ~AGE_FLAG_LINK;
2847 	/* Switch to the current media. */
2848 	mii_mediachg(mii);
2849 
2850 	callout_reset(&sc->age_tick_ch, hz, age_tick, sc);
2851 
2852 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2853 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2854 }
2855 
2856 static void
2857 age_stop(struct age_softc *sc)
2858 {
2859 	struct ifnet *ifp;
2860 	struct age_txdesc *txd;
2861 	struct age_rxdesc *rxd;
2862 	uint32_t reg;
2863 	int i;
2864 
2865 	AGE_LOCK_ASSERT(sc);
2866 	/*
2867 	 * Mark the interface down and cancel the watchdog timer.
2868 	 */
2869 	ifp = sc->age_ifp;
2870 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2871 	sc->age_flags &= ~AGE_FLAG_LINK;
2872 	callout_stop(&sc->age_tick_ch);
2873 	sc->age_watchdog_timer = 0;
2874 
2875 	/*
2876 	 * Disable interrupts.
2877 	 */
2878 	CSR_WRITE_4(sc, AGE_INTR_MASK, 0);
2879 	CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF);
2880 	/* Stop CMB/SMB updates. */
2881 	CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0);
2882 	/* Stop Rx/Tx MAC. */
2883 	age_stop_rxmac(sc);
2884 	age_stop_txmac(sc);
2885 	/* Stop DMA. */
2886 	CSR_WRITE_4(sc, AGE_DMA_CFG,
2887 	    CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB));
2888 	/* Stop TxQ/RxQ. */
2889 	CSR_WRITE_4(sc, AGE_TXQ_CFG,
2890 	    CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB);
2891 	CSR_WRITE_4(sc, AGE_RXQ_CFG,
2892 	    CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB);
2893 	for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2894 		if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
2895 			break;
2896 		DELAY(10);
2897 	}
2898 	if (i == 0)
2899 		device_printf(sc->age_dev,
2900 		    "stopping Rx/Tx MACs timed out(0x%08x)!\n", reg);
2901 
2902 	 /* Reclaim Rx buffers that have been processed. */
2903 	if (sc->age_cdata.age_rxhead != NULL)
2904 		m_freem(sc->age_cdata.age_rxhead);
2905 	AGE_RXCHAIN_RESET(sc);
2906 	/*
2907 	 * Free RX and TX mbufs still in the queues.
2908 	 */
2909 	for (i = 0; i < AGE_RX_RING_CNT; i++) {
2910 		rxd = &sc->age_cdata.age_rxdesc[i];
2911 		if (rxd->rx_m != NULL) {
2912 			bus_dmamap_sync(sc->age_cdata.age_rx_tag,
2913 			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2914 			bus_dmamap_unload(sc->age_cdata.age_rx_tag,
2915 			    rxd->rx_dmamap);
2916 			m_freem(rxd->rx_m);
2917 			rxd->rx_m = NULL;
2918 		}
2919         }
2920 	for (i = 0; i < AGE_TX_RING_CNT; i++) {
2921 		txd = &sc->age_cdata.age_txdesc[i];
2922 		if (txd->tx_m != NULL) {
2923 			bus_dmamap_sync(sc->age_cdata.age_tx_tag,
2924 			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2925 			bus_dmamap_unload(sc->age_cdata.age_tx_tag,
2926 			    txd->tx_dmamap);
2927 			m_freem(txd->tx_m);
2928 			txd->tx_m = NULL;
2929 		}
2930         }
2931 }
2932 
2933 static void
2934 age_stop_txmac(struct age_softc *sc)
2935 {
2936 	uint32_t reg;
2937 	int i;
2938 
2939 	AGE_LOCK_ASSERT(sc);
2940 
2941 	reg = CSR_READ_4(sc, AGE_MAC_CFG);
2942 	if ((reg & MAC_CFG_TX_ENB) != 0) {
2943 		reg &= ~MAC_CFG_TX_ENB;
2944 		CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2945 	}
2946 	/* Stop Tx DMA engine. */
2947 	reg = CSR_READ_4(sc, AGE_DMA_CFG);
2948 	if ((reg & DMA_CFG_RD_ENB) != 0) {
2949 		reg &= ~DMA_CFG_RD_ENB;
2950 		CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
2951 	}
2952 	for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2953 		if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
2954 		    (IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0)
2955 			break;
2956 		DELAY(10);
2957 	}
2958 	if (i == 0)
2959 		device_printf(sc->age_dev, "stopping TxMAC timeout!\n");
2960 }
2961 
2962 static void
2963 age_stop_rxmac(struct age_softc *sc)
2964 {
2965 	uint32_t reg;
2966 	int i;
2967 
2968 	AGE_LOCK_ASSERT(sc);
2969 
2970 	reg = CSR_READ_4(sc, AGE_MAC_CFG);
2971 	if ((reg & MAC_CFG_RX_ENB) != 0) {
2972 		reg &= ~MAC_CFG_RX_ENB;
2973 		CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2974 	}
2975 	/* Stop Rx DMA engine. */
2976 	reg = CSR_READ_4(sc, AGE_DMA_CFG);
2977 	if ((reg & DMA_CFG_WR_ENB) != 0) {
2978 		reg &= ~DMA_CFG_WR_ENB;
2979 		CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
2980 	}
2981 	for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2982 		if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
2983 		    (IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0)
2984 			break;
2985 		DELAY(10);
2986 	}
2987 	if (i == 0)
2988 		device_printf(sc->age_dev, "stopping RxMAC timeout!\n");
2989 }
2990 
2991 static void
2992 age_init_tx_ring(struct age_softc *sc)
2993 {
2994 	struct age_ring_data *rd;
2995 	struct age_txdesc *txd;
2996 	int i;
2997 
2998 	AGE_LOCK_ASSERT(sc);
2999 
3000 	sc->age_cdata.age_tx_prod = 0;
3001 	sc->age_cdata.age_tx_cons = 0;
3002 	sc->age_cdata.age_tx_cnt = 0;
3003 
3004 	rd = &sc->age_rdata;
3005 	bzero(rd->age_tx_ring, AGE_TX_RING_SZ);
3006 	for (i = 0; i < AGE_TX_RING_CNT; i++) {
3007 		txd = &sc->age_cdata.age_txdesc[i];
3008 		txd->tx_desc = &rd->age_tx_ring[i];
3009 		txd->tx_m = NULL;
3010 	}
3011 
3012 	bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag,
3013 	    sc->age_cdata.age_tx_ring_map,
3014 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3015 }
3016 
3017 static int
3018 age_init_rx_ring(struct age_softc *sc)
3019 {
3020 	struct age_ring_data *rd;
3021 	struct age_rxdesc *rxd;
3022 	int i;
3023 
3024 	AGE_LOCK_ASSERT(sc);
3025 
3026 	sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1;
3027 	sc->age_morework = 0;
3028 	rd = &sc->age_rdata;
3029 	bzero(rd->age_rx_ring, AGE_RX_RING_SZ);
3030 	for (i = 0; i < AGE_RX_RING_CNT; i++) {
3031 		rxd = &sc->age_cdata.age_rxdesc[i];
3032 		rxd->rx_m = NULL;
3033 		rxd->rx_desc = &rd->age_rx_ring[i];
3034 		if (age_newbuf(sc, rxd) != 0)
3035 			return (ENOBUFS);
3036 	}
3037 
3038 	bus_dmamap_sync(sc->age_cdata.age_rx_ring_tag,
3039 	    sc->age_cdata.age_rx_ring_map, BUS_DMASYNC_PREWRITE);
3040 
3041 	return (0);
3042 }
3043 
3044 static void
3045 age_init_rr_ring(struct age_softc *sc)
3046 {
3047 	struct age_ring_data *rd;
3048 
3049 	AGE_LOCK_ASSERT(sc);
3050 
3051 	sc->age_cdata.age_rr_cons = 0;
3052 	AGE_RXCHAIN_RESET(sc);
3053 
3054 	rd = &sc->age_rdata;
3055 	bzero(rd->age_rr_ring, AGE_RR_RING_SZ);
3056 	bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag,
3057 	    sc->age_cdata.age_rr_ring_map,
3058 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3059 }
3060 
3061 static void
3062 age_init_cmb_block(struct age_softc *sc)
3063 {
3064 	struct age_ring_data *rd;
3065 
3066 	AGE_LOCK_ASSERT(sc);
3067 
3068 	rd = &sc->age_rdata;
3069 	bzero(rd->age_cmb_block, AGE_CMB_BLOCK_SZ);
3070 	bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag,
3071 	    sc->age_cdata.age_cmb_block_map,
3072 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3073 }
3074 
3075 static void
3076 age_init_smb_block(struct age_softc *sc)
3077 {
3078 	struct age_ring_data *rd;
3079 
3080 	AGE_LOCK_ASSERT(sc);
3081 
3082 	rd = &sc->age_rdata;
3083 	bzero(rd->age_smb_block, AGE_SMB_BLOCK_SZ);
3084 	bus_dmamap_sync(sc->age_cdata.age_smb_block_tag,
3085 	    sc->age_cdata.age_smb_block_map,
3086 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3087 }
3088 
3089 static int
3090 age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd)
3091 {
3092 	struct rx_desc *desc;
3093 	struct mbuf *m;
3094 	bus_dma_segment_t segs[1];
3095 	bus_dmamap_t map;
3096 	int nsegs;
3097 
3098 	AGE_LOCK_ASSERT(sc);
3099 
3100 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3101 	if (m == NULL)
3102 		return (ENOBUFS);
3103 	m->m_len = m->m_pkthdr.len = MCLBYTES;
3104 #ifndef __NO_STRICT_ALIGNMENT
3105 	m_adj(m, AGE_RX_BUF_ALIGN);
3106 #endif
3107 
3108 	if (bus_dmamap_load_mbuf_sg(sc->age_cdata.age_rx_tag,
3109 	    sc->age_cdata.age_rx_sparemap, m, segs, &nsegs, 0) != 0) {
3110 		m_freem(m);
3111 		return (ENOBUFS);
3112 	}
3113 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
3114 
3115 	if (rxd->rx_m != NULL) {
3116 		bus_dmamap_sync(sc->age_cdata.age_rx_tag, rxd->rx_dmamap,
3117 		    BUS_DMASYNC_POSTREAD);
3118 		bus_dmamap_unload(sc->age_cdata.age_rx_tag, rxd->rx_dmamap);
3119 	}
3120 	map = rxd->rx_dmamap;
3121 	rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap;
3122 	sc->age_cdata.age_rx_sparemap = map;
3123 	bus_dmamap_sync(sc->age_cdata.age_rx_tag, rxd->rx_dmamap,
3124 	    BUS_DMASYNC_PREREAD);
3125 	rxd->rx_m = m;
3126 
3127 	desc = rxd->rx_desc;
3128 	desc->addr = htole64(segs[0].ds_addr);
3129 	desc->len = htole32((segs[0].ds_len & AGE_RD_LEN_MASK) <<
3130 	    AGE_RD_LEN_SHIFT);
3131 	return (0);
3132 }
3133 
3134 static void
3135 age_rxvlan(struct age_softc *sc)
3136 {
3137 	struct ifnet *ifp;
3138 	uint32_t reg;
3139 
3140 	AGE_LOCK_ASSERT(sc);
3141 
3142 	ifp = sc->age_ifp;
3143 	reg = CSR_READ_4(sc, AGE_MAC_CFG);
3144 	reg &= ~MAC_CFG_VLAN_TAG_STRIP;
3145 	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3146 		reg |= MAC_CFG_VLAN_TAG_STRIP;
3147 	CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
3148 }
3149 
3150 static void
3151 age_rxfilter(struct age_softc *sc)
3152 {
3153 	struct ifnet *ifp;
3154 	struct ifmultiaddr *ifma;
3155 	uint32_t crc;
3156 	uint32_t mchash[2];
3157 	uint32_t rxcfg;
3158 
3159 	AGE_LOCK_ASSERT(sc);
3160 
3161 	ifp = sc->age_ifp;
3162 
3163 	rxcfg = CSR_READ_4(sc, AGE_MAC_CFG);
3164 	rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
3165 	if ((ifp->if_flags & IFF_BROADCAST) != 0)
3166 		rxcfg |= MAC_CFG_BCAST;
3167 	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3168 		if ((ifp->if_flags & IFF_PROMISC) != 0)
3169 			rxcfg |= MAC_CFG_PROMISC;
3170 		if ((ifp->if_flags & IFF_ALLMULTI) != 0)
3171 			rxcfg |= MAC_CFG_ALLMULTI;
3172 		CSR_WRITE_4(sc, AGE_MAR0, 0xFFFFFFFF);
3173 		CSR_WRITE_4(sc, AGE_MAR1, 0xFFFFFFFF);
3174 		CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg);
3175 		return;
3176 	}
3177 
3178 	/* Program new filter. */
3179 	bzero(mchash, sizeof(mchash));
3180 
3181 	if_maddr_rlock(ifp);
3182 	TAILQ_FOREACH(ifma, &sc->age_ifp->if_multiaddrs, ifma_link) {
3183 		if (ifma->ifma_addr->sa_family != AF_LINK)
3184 			continue;
3185 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
3186 		    ifma->ifma_addr), ETHER_ADDR_LEN);
3187 		mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
3188 	}
3189 	if_maddr_runlock(ifp);
3190 
3191 	CSR_WRITE_4(sc, AGE_MAR0, mchash[0]);
3192 	CSR_WRITE_4(sc, AGE_MAR1, mchash[1]);
3193 	CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg);
3194 }
3195 
3196 static int
3197 sysctl_age_stats(SYSCTL_HANDLER_ARGS)
3198 {
3199 	struct age_softc *sc;
3200 	struct age_stats *stats;
3201 	int error, result;
3202 
3203 	result = -1;
3204 	error = sysctl_handle_int(oidp, &result, 0, req);
3205 
3206 	if (error != 0 || req->newptr == NULL)
3207 		return (error);
3208 
3209 	if (result != 1)
3210 		return (error);
3211 
3212 	sc = (struct age_softc *)arg1;
3213 	stats = &sc->age_stat;
3214 	printf("%s statistics:\n", device_get_nameunit(sc->age_dev));
3215 	printf("Transmit good frames : %ju\n",
3216 	    (uintmax_t)stats->tx_frames);
3217 	printf("Transmit good broadcast frames : %ju\n",
3218 	    (uintmax_t)stats->tx_bcast_frames);
3219 	printf("Transmit good multicast frames : %ju\n",
3220 	    (uintmax_t)stats->tx_mcast_frames);
3221 	printf("Transmit pause control frames : %u\n",
3222 	    stats->tx_pause_frames);
3223 	printf("Transmit control frames : %u\n",
3224 	    stats->tx_control_frames);
3225 	printf("Transmit frames with excessive deferrals : %u\n",
3226 	    stats->tx_excess_defer);
3227 	printf("Transmit deferrals : %u\n",
3228 	    stats->tx_deferred);
3229 	printf("Transmit good octets : %ju\n",
3230 	    (uintmax_t)stats->tx_bytes);
3231 	printf("Transmit good broadcast octets : %ju\n",
3232 	    (uintmax_t)stats->tx_bcast_bytes);
3233 	printf("Transmit good multicast octets : %ju\n",
3234 	    (uintmax_t)stats->tx_mcast_bytes);
3235 	printf("Transmit frames 64 bytes : %ju\n",
3236 	    (uintmax_t)stats->tx_pkts_64);
3237 	printf("Transmit frames 65 to 127 bytes : %ju\n",
3238 	    (uintmax_t)stats->tx_pkts_65_127);
3239 	printf("Transmit frames 128 to 255 bytes : %ju\n",
3240 	    (uintmax_t)stats->tx_pkts_128_255);
3241 	printf("Transmit frames 256 to 511 bytes : %ju\n",
3242 	    (uintmax_t)stats->tx_pkts_256_511);
3243 	printf("Transmit frames 512 to 1024 bytes : %ju\n",
3244 	    (uintmax_t)stats->tx_pkts_512_1023);
3245 	printf("Transmit frames 1024 to 1518 bytes : %ju\n",
3246 	    (uintmax_t)stats->tx_pkts_1024_1518);
3247 	printf("Transmit frames 1519 to MTU bytes : %ju\n",
3248 	    (uintmax_t)stats->tx_pkts_1519_max);
3249 	printf("Transmit single collisions : %u\n",
3250 	    stats->tx_single_colls);
3251 	printf("Transmit multiple collisions : %u\n",
3252 	    stats->tx_multi_colls);
3253 	printf("Transmit late collisions : %u\n",
3254 	    stats->tx_late_colls);
3255 	printf("Transmit abort due to excessive collisions : %u\n",
3256 	    stats->tx_excess_colls);
3257 	printf("Transmit underruns due to FIFO underruns : %u\n",
3258 	    stats->tx_underrun);
3259 	printf("Transmit descriptor write-back errors : %u\n",
3260 	    stats->tx_desc_underrun);
3261 	printf("Transmit frames with length mismatched frame size : %u\n",
3262 	    stats->tx_lenerrs);
3263 	printf("Transmit frames with truncated due to MTU size : %u\n",
3264 	    stats->tx_lenerrs);
3265 
3266 	printf("Receive good frames : %ju\n",
3267 	    (uintmax_t)stats->rx_frames);
3268 	printf("Receive good broadcast frames : %ju\n",
3269 	    (uintmax_t)stats->rx_bcast_frames);
3270 	printf("Receive good multicast frames : %ju\n",
3271 	    (uintmax_t)stats->rx_mcast_frames);
3272 	printf("Receive pause control frames : %u\n",
3273 	    stats->rx_pause_frames);
3274 	printf("Receive control frames : %u\n",
3275 	    stats->rx_control_frames);
3276 	printf("Receive CRC errors : %u\n",
3277 	    stats->rx_crcerrs);
3278 	printf("Receive frames with length errors : %u\n",
3279 	    stats->rx_lenerrs);
3280 	printf("Receive good octets : %ju\n",
3281 	    (uintmax_t)stats->rx_bytes);
3282 	printf("Receive good broadcast octets : %ju\n",
3283 	    (uintmax_t)stats->rx_bcast_bytes);
3284 	printf("Receive good multicast octets : %ju\n",
3285 	    (uintmax_t)stats->rx_mcast_bytes);
3286 	printf("Receive frames too short : %u\n",
3287 	    stats->rx_runts);
3288 	printf("Receive fragmented frames : %ju\n",
3289 	    (uintmax_t)stats->rx_fragments);
3290 	printf("Receive frames 64 bytes : %ju\n",
3291 	    (uintmax_t)stats->rx_pkts_64);
3292 	printf("Receive frames 65 to 127 bytes : %ju\n",
3293 	    (uintmax_t)stats->rx_pkts_65_127);
3294 	printf("Receive frames 128 to 255 bytes : %ju\n",
3295 	    (uintmax_t)stats->rx_pkts_128_255);
3296 	printf("Receive frames 256 to 511 bytes : %ju\n",
3297 	    (uintmax_t)stats->rx_pkts_256_511);
3298 	printf("Receive frames 512 to 1024 bytes : %ju\n",
3299 	    (uintmax_t)stats->rx_pkts_512_1023);
3300 	printf("Receive frames 1024 to 1518 bytes : %ju\n",
3301 	    (uintmax_t)stats->rx_pkts_1024_1518);
3302 	printf("Receive frames 1519 to MTU bytes : %ju\n",
3303 	    (uintmax_t)stats->rx_pkts_1519_max);
3304 	printf("Receive frames too long : %ju\n",
3305 	    (uint64_t)stats->rx_pkts_truncated);
3306 	printf("Receive frames with FIFO overflow : %u\n",
3307 	    stats->rx_fifo_oflows);
3308 	printf("Receive frames with return descriptor overflow : %u\n",
3309 	    stats->rx_desc_oflows);
3310 	printf("Receive frames with alignment errors : %u\n",
3311 	    stats->rx_alignerrs);
3312 	printf("Receive frames dropped due to address filtering : %ju\n",
3313 	    (uint64_t)stats->rx_pkts_filtered);
3314 
3315 	return (error);
3316 }
3317 
3318 static int
3319 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3320 {
3321 	int error, value;
3322 
3323 	if (arg1 == NULL)
3324 		return (EINVAL);
3325 	value = *(int *)arg1;
3326 	error = sysctl_handle_int(oidp, &value, 0, req);
3327 	if (error || req->newptr == NULL)
3328 		return (error);
3329 	if (value < low || value > high)
3330 		return (EINVAL);
3331         *(int *)arg1 = value;
3332 
3333         return (0);
3334 }
3335 
3336 static int
3337 sysctl_hw_age_proc_limit(SYSCTL_HANDLER_ARGS)
3338 {
3339 	return (sysctl_int_range(oidp, arg1, arg2, req,
3340 	    AGE_PROC_MIN, AGE_PROC_MAX));
3341 }
3342 
3343 static int
3344 sysctl_hw_age_int_mod(SYSCTL_HANDLER_ARGS)
3345 {
3346 
3347 	return (sysctl_int_range(oidp, arg1, arg2, req, AGE_IM_TIMER_MIN,
3348 	    AGE_IM_TIMER_MAX));
3349 }
3350