xref: /freebsd/sys/dev/alc/if_alc.c (revision 5861f9665471e98e544f6fa3ce73c4912229ff82)
1 /*-
2  * Copyright (c) 2009, Pyun YongHyeon <yongari@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 /* Driver for Atheros AR8131/AR8132 PCIe Ethernet. */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/endian.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/mbuf.h>
41 #include <sys/module.h>
42 #include <sys/mutex.h>
43 #include <sys/rman.h>
44 #include <sys/queue.h>
45 #include <sys/socket.h>
46 #include <sys/sockio.h>
47 #include <sys/sysctl.h>
48 #include <sys/taskqueue.h>
49 
50 #include <net/bpf.h>
51 #include <net/if.h>
52 #include <net/if_arp.h>
53 #include <net/ethernet.h>
54 #include <net/if_dl.h>
55 #include <net/if_llc.h>
56 #include <net/if_media.h>
57 #include <net/if_types.h>
58 #include <net/if_vlan_var.h>
59 
60 #include <netinet/in.h>
61 #include <netinet/in_systm.h>
62 #include <netinet/ip.h>
63 #include <netinet/tcp.h>
64 
65 #include <dev/mii/mii.h>
66 #include <dev/mii/miivar.h>
67 
68 #include <dev/pci/pcireg.h>
69 #include <dev/pci/pcivar.h>
70 
71 #include <machine/atomic.h>
72 #include <machine/bus.h>
73 #include <machine/in_cksum.h>
74 
75 #include <dev/alc/if_alcreg.h>
76 #include <dev/alc/if_alcvar.h>
77 
78 /* "device miibus" required.  See GENERIC if you get errors here. */
79 #include "miibus_if.h"
80 #undef ALC_USE_CUSTOM_CSUM
81 
82 #ifdef ALC_USE_CUSTOM_CSUM
83 #define	ALC_CSUM_FEATURES	(CSUM_TCP | CSUM_UDP)
84 #else
85 #define	ALC_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
86 #endif
87 #ifndef	IFCAP_VLAN_HWTSO
88 #define	IFCAP_VLAN_HWTSO	0
89 #endif
90 
91 MODULE_DEPEND(alc, pci, 1, 1, 1);
92 MODULE_DEPEND(alc, ether, 1, 1, 1);
93 MODULE_DEPEND(alc, miibus, 1, 1, 1);
94 
95 /* Tunables. */
96 static int msi_disable = 0;
97 static int msix_disable = 0;
98 TUNABLE_INT("hw.alc.msi_disable", &msi_disable);
99 TUNABLE_INT("hw.alc.msix_disable", &msix_disable);
100 
101 /*
102  * Devices supported by this driver.
103  */
104 static struct alc_dev {
105 	uint16_t	alc_vendorid;
106 	uint16_t	alc_deviceid;
107 	const char	*alc_name;
108 } alc_devs[] = {
109 	{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8131,
110 		"Atheros AR8131 PCIe Gigabit Ethernet" },
111 	{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8132,
112 		"Atheros AR8132 PCIe Fast Ethernet" }
113 };
114 
115 static void	alc_aspm(struct alc_softc *);
116 static int	alc_attach(device_t);
117 static int	alc_check_boundary(struct alc_softc *);
118 static int	alc_detach(device_t);
119 static void	alc_disable_l0s_l1(struct alc_softc *);
120 static int	alc_dma_alloc(struct alc_softc *);
121 static void	alc_dma_free(struct alc_softc *);
122 static void	alc_dmamap_cb(void *, bus_dma_segment_t *, int, int);
123 static int	alc_encap(struct alc_softc *, struct mbuf **);
124 #ifndef __NO_STRICT_ALIGNMENT
125 static struct mbuf *
126 		alc_fixup_rx(struct ifnet *, struct mbuf *);
127 #endif
128 static void	alc_get_macaddr(struct alc_softc *);
129 static void	alc_init(void *);
130 static void	alc_init_cmb(struct alc_softc *);
131 static void	alc_init_locked(struct alc_softc *);
132 static void	alc_init_rr_ring(struct alc_softc *);
133 static int	alc_init_rx_ring(struct alc_softc *);
134 static void	alc_init_smb(struct alc_softc *);
135 static void	alc_init_tx_ring(struct alc_softc *);
136 static void	alc_int_task(void *, int);
137 static int	alc_intr(void *);
138 static int	alc_ioctl(struct ifnet *, u_long, caddr_t);
139 static void	alc_mac_config(struct alc_softc *);
140 static int	alc_miibus_readreg(device_t, int, int);
141 static void	alc_miibus_statchg(device_t);
142 static int	alc_miibus_writereg(device_t, int, int, int);
143 static int	alc_mediachange(struct ifnet *);
144 static void	alc_mediastatus(struct ifnet *, struct ifmediareq *);
145 static int	alc_newbuf(struct alc_softc *, struct alc_rxdesc *);
146 static void	alc_phy_down(struct alc_softc *);
147 static void	alc_phy_reset(struct alc_softc *);
148 static int	alc_probe(device_t);
149 static void	alc_reset(struct alc_softc *);
150 static int	alc_resume(device_t);
151 static void	alc_rxeof(struct alc_softc *, struct rx_rdesc *);
152 static int	alc_rxintr(struct alc_softc *, int);
153 static void	alc_rxfilter(struct alc_softc *);
154 static void	alc_rxvlan(struct alc_softc *);
155 static void	alc_setlinkspeed(struct alc_softc *);
156 static void	alc_setwol(struct alc_softc *);
157 static int	alc_shutdown(device_t);
158 static void	alc_start(struct ifnet *);
159 static void	alc_start_queue(struct alc_softc *);
160 static void	alc_stats_clear(struct alc_softc *);
161 static void	alc_stats_update(struct alc_softc *);
162 static void	alc_stop(struct alc_softc *);
163 static void	alc_stop_mac(struct alc_softc *);
164 static void	alc_stop_queue(struct alc_softc *);
165 static int	alc_suspend(device_t);
166 static void	alc_sysctl_node(struct alc_softc *);
167 static void	alc_tick(void *);
168 static void	alc_tx_task(void *, int);
169 static void	alc_txeof(struct alc_softc *);
170 static void	alc_watchdog(struct alc_softc *);
171 static int	sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
172 static int	sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS);
173 static int	sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS);
174 
175 static device_method_t alc_methods[] = {
176 	/* Device interface. */
177 	DEVMETHOD(device_probe,		alc_probe),
178 	DEVMETHOD(device_attach,	alc_attach),
179 	DEVMETHOD(device_detach,	alc_detach),
180 	DEVMETHOD(device_shutdown,	alc_shutdown),
181 	DEVMETHOD(device_suspend,	alc_suspend),
182 	DEVMETHOD(device_resume,	alc_resume),
183 
184 	/* MII interface. */
185 	DEVMETHOD(miibus_readreg,	alc_miibus_readreg),
186 	DEVMETHOD(miibus_writereg,	alc_miibus_writereg),
187 	DEVMETHOD(miibus_statchg,	alc_miibus_statchg),
188 
189 	{ NULL, NULL }
190 };
191 
192 static driver_t alc_driver = {
193 	"alc",
194 	alc_methods,
195 	sizeof(struct alc_softc)
196 };
197 
198 static devclass_t alc_devclass;
199 
200 DRIVER_MODULE(alc, pci, alc_driver, alc_devclass, 0, 0);
201 DRIVER_MODULE(miibus, alc, miibus_driver, miibus_devclass, 0, 0);
202 
203 static struct resource_spec alc_res_spec_mem[] = {
204 	{ SYS_RES_MEMORY,	PCIR_BAR(0),	RF_ACTIVE },
205 	{ -1,			0,		0 }
206 };
207 
208 static struct resource_spec alc_irq_spec_legacy[] = {
209 	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
210 	{ -1,			0,		0 }
211 };
212 
213 static struct resource_spec alc_irq_spec_msi[] = {
214 	{ SYS_RES_IRQ,		1,		RF_ACTIVE },
215 	{ -1,			0,		0 }
216 };
217 
218 static struct resource_spec alc_irq_spec_msix[] = {
219 	{ SYS_RES_IRQ,		1,		RF_ACTIVE },
220 	{ -1,			0,		0 }
221 };
222 
223 static uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0 };
224 
225 static int
226 alc_miibus_readreg(device_t dev, int phy, int reg)
227 {
228 	struct alc_softc *sc;
229 	uint32_t v;
230 	int i;
231 
232 	sc = device_get_softc(dev);
233 
234 	if (phy != sc->alc_phyaddr)
235 		return (0);
236 
237 	CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
238 	    MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
239 	for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
240 		DELAY(5);
241 		v = CSR_READ_4(sc, ALC_MDIO);
242 		if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
243 			break;
244 	}
245 
246 	if (i == 0) {
247 		device_printf(sc->alc_dev, "phy read timeout : %d\n", reg);
248 		return (0);
249 	}
250 
251 	return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
252 }
253 
254 static int
255 alc_miibus_writereg(device_t dev, int phy, int reg, int val)
256 {
257 	struct alc_softc *sc;
258 	uint32_t v;
259 	int i;
260 
261 	sc = device_get_softc(dev);
262 
263 	if (phy != sc->alc_phyaddr)
264 		return (0);
265 
266 	CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
267 	    (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
268 	    MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
269 	for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
270 		DELAY(5);
271 		v = CSR_READ_4(sc, ALC_MDIO);
272 		if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
273 			break;
274 	}
275 
276 	if (i == 0)
277 		device_printf(sc->alc_dev, "phy write timeout : %d\n", reg);
278 
279 	return (0);
280 }
281 
282 static void
283 alc_miibus_statchg(device_t dev)
284 {
285 	struct alc_softc *sc;
286 	struct mii_data *mii;
287 	struct ifnet *ifp;
288 	uint32_t reg;
289 
290 	sc = device_get_softc(dev);
291 
292 	mii = device_get_softc(sc->alc_miibus);
293 	ifp = sc->alc_ifp;
294 	if (mii == NULL || ifp == NULL ||
295 	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
296 		return;
297 
298 	sc->alc_flags &= ~ALC_FLAG_LINK;
299 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
300 	    (IFM_ACTIVE | IFM_AVALID)) {
301 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
302 		case IFM_10_T:
303 		case IFM_100_TX:
304 			sc->alc_flags |= ALC_FLAG_LINK;
305 			break;
306 		case IFM_1000_T:
307 			if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
308 				sc->alc_flags |= ALC_FLAG_LINK;
309 			break;
310 		default:
311 			break;
312 		}
313 	}
314 	alc_stop_queue(sc);
315 	/* Stop Rx/Tx MACs. */
316 	alc_stop_mac(sc);
317 
318 	/* Program MACs with resolved speed/duplex/flow-control. */
319 	if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
320 		alc_start_queue(sc);
321 		alc_mac_config(sc);
322 		/* Re-enable Tx/Rx MACs. */
323 		reg = CSR_READ_4(sc, ALC_MAC_CFG);
324 		reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
325 		CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
326 	}
327 	alc_aspm(sc);
328 }
329 
330 static void
331 alc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
332 {
333 	struct alc_softc *sc;
334 	struct mii_data *mii;
335 
336 	sc = ifp->if_softc;
337 	ALC_LOCK(sc);
338 	if ((ifp->if_flags & IFF_UP) == 0) {
339 		ALC_UNLOCK(sc);
340 		return;
341 	}
342 	mii = device_get_softc(sc->alc_miibus);
343 
344 	mii_pollstat(mii);
345 	ALC_UNLOCK(sc);
346 	ifmr->ifm_status = mii->mii_media_status;
347 	ifmr->ifm_active = mii->mii_media_active;
348 }
349 
350 static int
351 alc_mediachange(struct ifnet *ifp)
352 {
353 	struct alc_softc *sc;
354 	struct mii_data *mii;
355 	struct mii_softc *miisc;
356 	int error;
357 
358 	sc = ifp->if_softc;
359 	ALC_LOCK(sc);
360 	mii = device_get_softc(sc->alc_miibus);
361 	if (mii->mii_instance != 0) {
362 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
363 			mii_phy_reset(miisc);
364 	}
365 	error = mii_mediachg(mii);
366 	ALC_UNLOCK(sc);
367 
368 	return (error);
369 }
370 
371 static int
372 alc_probe(device_t dev)
373 {
374 	struct alc_dev *sp;
375 	int i;
376 	uint16_t vendor, devid;
377 
378 	vendor = pci_get_vendor(dev);
379 	devid = pci_get_device(dev);
380 	sp = alc_devs;
381 	for (i = 0; i < sizeof(alc_devs) / sizeof(alc_devs[0]); i++) {
382 		if (vendor == sp->alc_vendorid &&
383 		    devid == sp->alc_deviceid) {
384 			device_set_desc(dev, sp->alc_name);
385 			return (BUS_PROBE_DEFAULT);
386 		}
387 		sp++;
388 	}
389 
390 	return (ENXIO);
391 }
392 
393 static void
394 alc_get_macaddr(struct alc_softc *sc)
395 {
396 	uint32_t ea[2], opt;
397 	int i;
398 
399 	opt = CSR_READ_4(sc, ALC_OPT_CFG);
400 	if ((CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) {
401 		/*
402 		 * EEPROM found, let TWSI reload EEPROM configuration.
403 		 * This will set ethernet address of controller.
404 		 */
405 		if ((opt & OPT_CFG_CLK_ENB) == 0) {
406 			opt |= OPT_CFG_CLK_ENB;
407 			CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
408 			CSR_READ_4(sc, ALC_OPT_CFG);
409 			DELAY(1000);
410 		}
411 		CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) |
412 		    TWSI_CFG_SW_LD_START);
413 		for (i = 100; i > 0; i--) {
414 			DELAY(1000);
415 			if ((CSR_READ_4(sc, ALC_TWSI_CFG) &
416 			    TWSI_CFG_SW_LD_START) == 0)
417 				break;
418 		}
419 		if (i == 0)
420 			device_printf(sc->alc_dev,
421 			    "reloading EEPROM timeout!\n");
422 	} else {
423 		if (bootverbose)
424 			device_printf(sc->alc_dev, "EEPROM not found!\n");
425 	}
426 	if ((opt & OPT_CFG_CLK_ENB) != 0) {
427 		opt &= ~OPT_CFG_CLK_ENB;
428 		CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
429 		CSR_READ_4(sc, ALC_OPT_CFG);
430 		DELAY(1000);
431 	}
432 
433 	ea[0] = CSR_READ_4(sc, ALC_PAR0);
434 	ea[1] = CSR_READ_4(sc, ALC_PAR1);
435 	sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF;
436 	sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF;
437 	sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF;
438 	sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF;
439 	sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF;
440 	sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF;
441 }
442 
443 static void
444 alc_disable_l0s_l1(struct alc_softc *sc)
445 {
446 	uint32_t pmcfg;
447 
448 	/* Another magic from vendor. */
449 	pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
450 	pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 |
451 	    PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK |
452 	    PM_CFG_SERDES_PD_EX_L1);
453 	pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB |
454 	    PM_CFG_SERDES_L1_ENB;
455 	CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
456 }
457 
458 static void
459 alc_phy_reset(struct alc_softc *sc)
460 {
461 	uint16_t data;
462 
463 	/* Reset magic from Linux. */
464 	CSR_WRITE_2(sc, ALC_GPHY_CFG,
465 	    GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | GPHY_CFG_SEL_ANA_RESET);
466 	CSR_READ_2(sc, ALC_GPHY_CFG);
467 	DELAY(10 * 1000);
468 
469 	CSR_WRITE_2(sc, ALC_GPHY_CFG,
470 	    GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
471 	    GPHY_CFG_SEL_ANA_RESET);
472 	CSR_READ_2(sc, ALC_GPHY_CFG);
473 	DELAY(10 * 1000);
474 
475 	/* Load DSP codes, vendor magic. */
476 	data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE |
477 	    ((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK);
478 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
479 	    ALC_MII_DBG_ADDR, MII_ANA_CFG18);
480 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
481 	    ALC_MII_DBG_DATA, data);
482 
483 	data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) |
484 	    ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL |
485 	    ANA_SERDES_EN_LCKDT;
486 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
487 	    ALC_MII_DBG_ADDR, MII_ANA_CFG5);
488 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
489 	    ALC_MII_DBG_DATA, data);
490 
491 	data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) &
492 	    ANA_LONG_CABLE_TH_100_MASK) |
493 	    ((33 << ANA_SHORT_CABLE_TH_100_SHIFT) &
494 	    ANA_SHORT_CABLE_TH_100_SHIFT) |
495 	    ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW;
496 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
497 	    ALC_MII_DBG_ADDR, MII_ANA_CFG54);
498 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
499 	    ALC_MII_DBG_DATA, data);
500 
501 	data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) |
502 	    ((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) |
503 	    ((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) |
504 	    ((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK);
505 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
506 	    ALC_MII_DBG_ADDR, MII_ANA_CFG4);
507 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
508 	    ALC_MII_DBG_DATA, data);
509 
510 	data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) |
511 	    ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB |
512 	    ANA_OEN_125M;
513 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
514 	    ALC_MII_DBG_ADDR, MII_ANA_CFG0);
515 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
516 	    ALC_MII_DBG_DATA, data);
517 	DELAY(1000);
518 }
519 
520 static void
521 alc_phy_down(struct alc_softc *sc)
522 {
523 
524 	/* Force PHY down. */
525 	CSR_WRITE_2(sc, ALC_GPHY_CFG,
526 	    GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
527 	    GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ | GPHY_CFG_PWDOWN_HW);
528 	DELAY(1000);
529 }
530 
531 static void
532 alc_aspm(struct alc_softc *sc)
533 {
534 	uint32_t pmcfg;
535 
536 	ALC_LOCK_ASSERT(sc);
537 
538 	pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
539 	pmcfg &= ~PM_CFG_SERDES_PD_EX_L1;
540 	pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB;
541 	pmcfg |= PM_CFG_SERDES_L1_ENB;
542 	pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK;
543 	pmcfg |= PM_CFG_MAC_ASPM_CHK;
544 	if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
545 		pmcfg |= PM_CFG_SERDES_PLL_L1_ENB;
546 		pmcfg &= ~PM_CFG_CLK_SWH_L1;
547 		pmcfg &= ~PM_CFG_ASPM_L1_ENB;
548 		pmcfg &= ~PM_CFG_ASPM_L0S_ENB;
549 	} else {
550 		pmcfg &= ~PM_CFG_SERDES_PLL_L1_ENB;
551 		pmcfg |= PM_CFG_CLK_SWH_L1;
552 		pmcfg &= ~PM_CFG_ASPM_L1_ENB;
553 		pmcfg &= ~PM_CFG_ASPM_L0S_ENB;
554 	}
555 	CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
556 }
557 
558 static int
559 alc_attach(device_t dev)
560 {
561 	struct alc_softc *sc;
562 	struct ifnet *ifp;
563 	char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/l1" };
564 	uint16_t burst;
565 	int base, error, i, msic, msixc, pmc, state;
566 	uint32_t cap, ctl, val;
567 
568 	error = 0;
569 	sc = device_get_softc(dev);
570 	sc->alc_dev = dev;
571 
572 	mtx_init(&sc->alc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
573 	    MTX_DEF);
574 	callout_init_mtx(&sc->alc_tick_ch, &sc->alc_mtx, 0);
575 	TASK_INIT(&sc->alc_int_task, 0, alc_int_task, sc);
576 
577 	/* Map the device. */
578 	pci_enable_busmaster(dev);
579 	sc->alc_res_spec = alc_res_spec_mem;
580 	sc->alc_irq_spec = alc_irq_spec_legacy;
581 	error = bus_alloc_resources(dev, sc->alc_res_spec, sc->alc_res);
582 	if (error != 0) {
583 		device_printf(dev, "cannot allocate memory resources.\n");
584 		goto fail;
585 	}
586 
587 	/* Set PHY address. */
588 	sc->alc_phyaddr = ALC_PHY_ADDR;
589 
590 	/* Initialize DMA parameters. */
591 	sc->alc_dma_rd_burst = 0;
592 	sc->alc_dma_wr_burst = 0;
593 	sc->alc_rcb = DMA_CFG_RCB_64;
594 	if (pci_find_extcap(dev, PCIY_EXPRESS, &base) == 0) {
595 		sc->alc_flags |= ALC_FLAG_PCIE;
596 		burst = CSR_READ_2(sc, base + PCIR_EXPRESS_DEVICE_CTL);
597 		sc->alc_dma_rd_burst =
598 		    (burst & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12;
599 		sc->alc_dma_wr_burst = (burst & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5;
600 		if (bootverbose) {
601 			device_printf(dev, "Read request size : %u bytes.\n",
602 			    alc_dma_burst[sc->alc_dma_rd_burst]);
603 			device_printf(dev, "TLP payload size : %u bytes.\n",
604 			    alc_dma_burst[sc->alc_dma_wr_burst]);
605 		}
606 		/* Clear data link and flow-control protocol error. */
607 		val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV);
608 		val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP);
609 		CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val);
610 		/* Disable ASPM L0S and L1. */
611 		cap = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CAP);
612 		if ((cap & PCIM_LINK_CAP_ASPM) != 0) {
613 			ctl = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CTL);
614 			if ((ctl & 0x08) != 0)
615 				sc->alc_rcb = DMA_CFG_RCB_128;
616 			if (bootverbose)
617 				device_printf(dev, "RCB %u bytes\n",
618 				    sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128);
619 			state = ctl & 0x03;
620 			if (bootverbose)
621 				device_printf(sc->alc_dev, "ASPM %s %s\n",
622 				    aspm_state[state],
623 				    state == 0 ? "disabled" : "enabled");
624 			if (state != 0)
625 				alc_disable_l0s_l1(sc);
626 		}
627 	}
628 
629 	/* Reset PHY. */
630 	alc_phy_reset(sc);
631 
632 	/* Reset the ethernet controller. */
633 	alc_reset(sc);
634 
635 	/*
636 	 * One odd thing is AR8132 uses the same PHY hardware(F1
637 	 * gigabit PHY) of AR8131. So atphy(4) of AR8132 reports
638 	 * the PHY supports 1000Mbps but that's not true. The PHY
639 	 * used in AR8132 can't establish gigabit link even if it
640 	 * shows the same PHY model/revision number of AR8131.
641 	 */
642 	if (pci_get_device(dev) == DEVICEID_ATHEROS_AR8132)
643 		sc->alc_flags |= ALC_FLAG_FASTETHER | ALC_FLAG_JUMBO;
644 	else
645 		sc->alc_flags |= ALC_FLAG_JUMBO | ALC_FLAG_ASPM_MON;
646 	/*
647 	 * It seems that AR8131/AR8132 has silicon bug for SMB. In
648 	 * addition, Atheros said that enabling SMB wouldn't improve
649 	 * performance. However I think it's bad to access lots of
650 	 * registers to extract MAC statistics.
651 	 */
652 	sc->alc_flags |= ALC_FLAG_SMB_BUG;
653 	/*
654 	 * Don't use Tx CMB. It is known to have silicon bug.
655 	 */
656 	sc->alc_flags |= ALC_FLAG_CMB_BUG;
657 	sc->alc_rev = pci_get_revid(dev);
658 	sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >>
659 	    MASTER_CHIP_REV_SHIFT;
660 	if (bootverbose) {
661 		device_printf(dev, "PCI device revision : 0x%04x\n",
662 		    sc->alc_rev);
663 		device_printf(dev, "Chip id/revision : 0x%04x\n",
664 		    sc->alc_chip_rev);
665 	}
666 	device_printf(dev, "%u Tx FIFO, %u Rx FIFO\n",
667 	    CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8,
668 	    CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8);
669 
670 	/* Allocate IRQ resources. */
671 	msixc = pci_msix_count(dev);
672 	msic = pci_msi_count(dev);
673 	if (bootverbose) {
674 		device_printf(dev, "MSIX count : %d\n", msixc);
675 		device_printf(dev, "MSI count : %d\n", msic);
676 	}
677 	/* Prefer MSIX over MSI. */
678 	if (msix_disable == 0 || msi_disable == 0) {
679 		if (msix_disable == 0 && msixc == ALC_MSIX_MESSAGES &&
680 		    pci_alloc_msix(dev, &msixc) == 0) {
681 			if (msic == ALC_MSIX_MESSAGES) {
682 				device_printf(dev,
683 				    "Using %d MSIX message(s).\n", msixc);
684 				sc->alc_flags |= ALC_FLAG_MSIX;
685 				sc->alc_irq_spec = alc_irq_spec_msix;
686 			} else
687 				pci_release_msi(dev);
688 		}
689 		if (msi_disable == 0 && (sc->alc_flags & ALC_FLAG_MSIX) == 0 &&
690 		    msic == ALC_MSI_MESSAGES &&
691 		    pci_alloc_msi(dev, &msic) == 0) {
692 			if (msic == ALC_MSI_MESSAGES) {
693 				device_printf(dev,
694 				    "Using %d MSI message(s).\n", msic);
695 				sc->alc_flags |= ALC_FLAG_MSI;
696 				sc->alc_irq_spec = alc_irq_spec_msi;
697 			} else
698 				pci_release_msi(dev);
699 		}
700 	}
701 
702 	error = bus_alloc_resources(dev, sc->alc_irq_spec, sc->alc_irq);
703 	if (error != 0) {
704 		device_printf(dev, "cannot allocate IRQ resources.\n");
705 		goto fail;
706 	}
707 
708 	/* Create device sysctl node. */
709 	alc_sysctl_node(sc);
710 
711 	if ((error = alc_dma_alloc(sc) != 0))
712 		goto fail;
713 
714 	/* Load station address. */
715 	alc_get_macaddr(sc);
716 
717 	ifp = sc->alc_ifp = if_alloc(IFT_ETHER);
718 	if (ifp == NULL) {
719 		device_printf(dev, "cannot allocate ifnet structure.\n");
720 		error = ENXIO;
721 		goto fail;
722 	}
723 
724 	ifp->if_softc = sc;
725 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
726 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
727 	ifp->if_ioctl = alc_ioctl;
728 	ifp->if_start = alc_start;
729 	ifp->if_init = alc_init;
730 	ifp->if_snd.ifq_drv_maxlen = ALC_TX_RING_CNT - 1;
731 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
732 	IFQ_SET_READY(&ifp->if_snd);
733 	ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4;
734 	ifp->if_hwassist = ALC_CSUM_FEATURES | CSUM_TSO;
735 	if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
736 		ifp->if_capabilities |= IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST;
737 	ifp->if_capenable = ifp->if_capabilities;
738 
739 	/* Set up MII bus. */
740 	if ((error = mii_phy_probe(dev, &sc->alc_miibus, alc_mediachange,
741 	    alc_mediastatus)) != 0) {
742 		device_printf(dev, "no PHY found!\n");
743 		goto fail;
744 	}
745 
746 	ether_ifattach(ifp, sc->alc_eaddr);
747 
748 	/* VLAN capability setup. */
749 	ifp->if_capabilities |= IFCAP_VLAN_MTU;
750 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
751 	ifp->if_capenable = ifp->if_capabilities;
752 	/*
753 	 * XXX
754 	 * It seems enabling Tx checksum offloading makes more trouble.
755 	 * Sometimes the controller does not receive any frames when
756 	 * Tx checksum offloading is enabled. I'm not sure whether this
757 	 * is a bug in Tx checksum offloading logic or I got broken
758 	 * sample boards. To safety, don't enable Tx checksum offloading
759 	 * by default but give chance to users to toggle it if they know
760 	 * their controllers work without problems.
761 	 */
762 	ifp->if_capenable &= ~IFCAP_TXCSUM;
763 	ifp->if_hwassist &= ~ALC_CSUM_FEATURES;
764 
765 	/* Tell the upper layer(s) we support long frames. */
766 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
767 
768 	/* Create local taskq. */
769 	TASK_INIT(&sc->alc_tx_task, 1, alc_tx_task, ifp);
770 	sc->alc_tq = taskqueue_create_fast("alc_taskq", M_WAITOK,
771 	    taskqueue_thread_enqueue, &sc->alc_tq);
772 	if (sc->alc_tq == NULL) {
773 		device_printf(dev, "could not create taskqueue.\n");
774 		ether_ifdetach(ifp);
775 		error = ENXIO;
776 		goto fail;
777 	}
778 	taskqueue_start_threads(&sc->alc_tq, 1, PI_NET, "%s taskq",
779 	    device_get_nameunit(sc->alc_dev));
780 
781 	if ((sc->alc_flags & ALC_FLAG_MSIX) != 0)
782 		msic = ALC_MSIX_MESSAGES;
783 	else if ((sc->alc_flags & ALC_FLAG_MSI) != 0)
784 		msic = ALC_MSI_MESSAGES;
785 	else
786 		msic = 1;
787 	for (i = 0; i < msic; i++) {
788 		error = bus_setup_intr(dev, sc->alc_irq[i],
789 		    INTR_TYPE_NET | INTR_MPSAFE, alc_intr, NULL, sc,
790 		    &sc->alc_intrhand[i]);
791 		if (error != 0)
792 			break;
793 	}
794 	if (error != 0) {
795 		device_printf(dev, "could not set up interrupt handler.\n");
796 		taskqueue_free(sc->alc_tq);
797 		sc->alc_tq = NULL;
798 		ether_ifdetach(ifp);
799 		goto fail;
800 	}
801 
802 fail:
803 	if (error != 0)
804 		alc_detach(dev);
805 
806 	return (error);
807 }
808 
809 static int
810 alc_detach(device_t dev)
811 {
812 	struct alc_softc *sc;
813 	struct ifnet *ifp;
814 	int i, msic;
815 
816 	sc = device_get_softc(dev);
817 
818 	ifp = sc->alc_ifp;
819 	if (device_is_attached(dev)) {
820 		ALC_LOCK(sc);
821 		sc->alc_flags |= ALC_FLAG_DETACH;
822 		alc_stop(sc);
823 		ALC_UNLOCK(sc);
824 		callout_drain(&sc->alc_tick_ch);
825 		taskqueue_drain(sc->alc_tq, &sc->alc_int_task);
826 		taskqueue_drain(sc->alc_tq, &sc->alc_tx_task);
827 		ether_ifdetach(ifp);
828 	}
829 
830 	if (sc->alc_tq != NULL) {
831 		taskqueue_drain(sc->alc_tq, &sc->alc_int_task);
832 		taskqueue_free(sc->alc_tq);
833 		sc->alc_tq = NULL;
834 	}
835 
836 	if (sc->alc_miibus != NULL) {
837 		device_delete_child(dev, sc->alc_miibus);
838 		sc->alc_miibus = NULL;
839 	}
840 	bus_generic_detach(dev);
841 	alc_dma_free(sc);
842 
843 	if (ifp != NULL) {
844 		if_free(ifp);
845 		sc->alc_ifp = NULL;
846 	}
847 
848 	if ((sc->alc_flags & ALC_FLAG_MSIX) != 0)
849 		msic = ALC_MSIX_MESSAGES;
850 	else if ((sc->alc_flags & ALC_FLAG_MSI) != 0)
851 		msic = ALC_MSI_MESSAGES;
852 	else
853 		msic = 1;
854 	for (i = 0; i < msic; i++) {
855 		if (sc->alc_intrhand[i] != NULL) {
856 			bus_teardown_intr(dev, sc->alc_irq[i],
857 			    sc->alc_intrhand[i]);
858 			sc->alc_intrhand[i] = NULL;
859 		}
860 	}
861 	alc_phy_down(sc);
862 	bus_release_resources(dev, sc->alc_irq_spec, sc->alc_irq);
863 	if ((sc->alc_flags & (ALC_FLAG_MSI | ALC_FLAG_MSIX)) != 0)
864 		pci_release_msi(dev);
865 	bus_release_resources(dev, sc->alc_res_spec, sc->alc_res);
866 	mtx_destroy(&sc->alc_mtx);
867 
868 	return (0);
869 }
870 
871 #define	ALC_SYSCTL_STAT_ADD32(c, h, n, p, d)	\
872 	    SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
873 #define	ALC_SYSCTL_STAT_ADD64(c, h, n, p, d)	\
874 	    SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
875 
876 static void
877 alc_sysctl_node(struct alc_softc *sc)
878 {
879 	struct sysctl_ctx_list *ctx;
880 	struct sysctl_oid_list *child, *parent;
881 	struct sysctl_oid *tree;
882 	struct alc_hw_stats *stats;
883 	int error;
884 
885 	stats = &sc->alc_stats;
886 	ctx = device_get_sysctl_ctx(sc->alc_dev);
887 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->alc_dev));
888 
889 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod",
890 	    CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_rx_mod, 0,
891 	    sysctl_hw_alc_int_mod, "I", "alc Rx interrupt moderation");
892 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod",
893 	    CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_tx_mod, 0,
894 	    sysctl_hw_alc_int_mod, "I", "alc Tx interrupt moderation");
895 	/* Pull in device tunables. */
896 	sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT;
897 	error = resource_int_value(device_get_name(sc->alc_dev),
898 	    device_get_unit(sc->alc_dev), "int_rx_mod", &sc->alc_int_rx_mod);
899 	if (error == 0) {
900 		if (sc->alc_int_rx_mod < ALC_IM_TIMER_MIN ||
901 		    sc->alc_int_rx_mod > ALC_IM_TIMER_MAX) {
902 			device_printf(sc->alc_dev, "int_rx_mod value out of "
903 			    "range; using default: %d\n",
904 			    ALC_IM_RX_TIMER_DEFAULT);
905 			sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT;
906 		}
907 	}
908 	sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT;
909 	error = resource_int_value(device_get_name(sc->alc_dev),
910 	    device_get_unit(sc->alc_dev), "int_tx_mod", &sc->alc_int_tx_mod);
911 	if (error == 0) {
912 		if (sc->alc_int_tx_mod < ALC_IM_TIMER_MIN ||
913 		    sc->alc_int_tx_mod > ALC_IM_TIMER_MAX) {
914 			device_printf(sc->alc_dev, "int_tx_mod value out of "
915 			    "range; using default: %d\n",
916 			    ALC_IM_TX_TIMER_DEFAULT);
917 			sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT;
918 		}
919 	}
920 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
921 	    CTLTYPE_INT | CTLFLAG_RW, &sc->alc_process_limit, 0,
922 	    sysctl_hw_alc_proc_limit, "I",
923 	    "max number of Rx events to process");
924 	/* Pull in device tunables. */
925 	sc->alc_process_limit = ALC_PROC_DEFAULT;
926 	error = resource_int_value(device_get_name(sc->alc_dev),
927 	    device_get_unit(sc->alc_dev), "process_limit",
928 	    &sc->alc_process_limit);
929 	if (error == 0) {
930 		if (sc->alc_process_limit < ALC_PROC_MIN ||
931 		    sc->alc_process_limit > ALC_PROC_MAX) {
932 			device_printf(sc->alc_dev,
933 			    "process_limit value out of range; "
934 			    "using default: %d\n", ALC_PROC_DEFAULT);
935 			sc->alc_process_limit = ALC_PROC_DEFAULT;
936 		}
937 	}
938 
939 	tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
940 	    NULL, "ALC statistics");
941 	parent = SYSCTL_CHILDREN(tree);
942 
943 	/* Rx statistics. */
944 	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
945 	    NULL, "Rx MAC statistics");
946 	child = SYSCTL_CHILDREN(tree);
947 	ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
948 	    &stats->rx_frames, "Good frames");
949 	ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
950 	    &stats->rx_bcast_frames, "Good broadcast frames");
951 	ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
952 	    &stats->rx_mcast_frames, "Good multicast frames");
953 	ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
954 	    &stats->rx_pause_frames, "Pause control frames");
955 	ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames",
956 	    &stats->rx_control_frames, "Control frames");
957 	ALC_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
958 	    &stats->rx_crcerrs, "CRC errors");
959 	ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
960 	    &stats->rx_lenerrs, "Frames with length mismatched");
961 	ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets",
962 	    &stats->rx_bytes, "Good octets");
963 	ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets",
964 	    &stats->rx_bcast_bytes, "Good broadcast octets");
965 	ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets",
966 	    &stats->rx_mcast_bytes, "Good multicast octets");
967 	ALC_SYSCTL_STAT_ADD32(ctx, child, "runts",
968 	    &stats->rx_runts, "Too short frames");
969 	ALC_SYSCTL_STAT_ADD32(ctx, child, "fragments",
970 	    &stats->rx_fragments, "Fragmented frames");
971 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
972 	    &stats->rx_pkts_64, "64 bytes frames");
973 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
974 	    &stats->rx_pkts_65_127, "65 to 127 bytes frames");
975 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
976 	    &stats->rx_pkts_128_255, "128 to 255 bytes frames");
977 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
978 	    &stats->rx_pkts_256_511, "256 to 511 bytes frames");
979 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
980 	    &stats->rx_pkts_512_1023, "512 to 1023 bytes frames");
981 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
982 	    &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames");
983 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
984 	    &stats->rx_pkts_1519_max, "1519 to max frames");
985 	ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs",
986 	    &stats->rx_pkts_truncated, "Truncated frames due to MTU size");
987 	ALC_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
988 	    &stats->rx_fifo_oflows, "FIFO overflows");
989 	ALC_SYSCTL_STAT_ADD32(ctx, child, "rrs_errs",
990 	    &stats->rx_rrs_errs, "Return status write-back errors");
991 	ALC_SYSCTL_STAT_ADD32(ctx, child, "align_errs",
992 	    &stats->rx_alignerrs, "Alignment errors");
993 	ALC_SYSCTL_STAT_ADD32(ctx, child, "filtered",
994 	    &stats->rx_pkts_filtered,
995 	    "Frames dropped due to address filtering");
996 
997 	/* Tx statistics. */
998 	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
999 	    NULL, "Tx MAC statistics");
1000 	child = SYSCTL_CHILDREN(tree);
1001 	ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
1002 	    &stats->tx_frames, "Good frames");
1003 	ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
1004 	    &stats->tx_bcast_frames, "Good broadcast frames");
1005 	ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
1006 	    &stats->tx_mcast_frames, "Good multicast frames");
1007 	ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
1008 	    &stats->tx_pause_frames, "Pause control frames");
1009 	ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames",
1010 	    &stats->tx_control_frames, "Control frames");
1011 	ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_defers",
1012 	    &stats->tx_excess_defer, "Frames with excessive derferrals");
1013 	ALC_SYSCTL_STAT_ADD32(ctx, child, "defers",
1014 	    &stats->tx_excess_defer, "Frames with derferrals");
1015 	ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets",
1016 	    &stats->tx_bytes, "Good octets");
1017 	ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets",
1018 	    &stats->tx_bcast_bytes, "Good broadcast octets");
1019 	ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets",
1020 	    &stats->tx_mcast_bytes, "Good multicast octets");
1021 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
1022 	    &stats->tx_pkts_64, "64 bytes frames");
1023 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
1024 	    &stats->tx_pkts_65_127, "65 to 127 bytes frames");
1025 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
1026 	    &stats->tx_pkts_128_255, "128 to 255 bytes frames");
1027 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
1028 	    &stats->tx_pkts_256_511, "256 to 511 bytes frames");
1029 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
1030 	    &stats->tx_pkts_512_1023, "512 to 1023 bytes frames");
1031 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
1032 	    &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames");
1033 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
1034 	    &stats->tx_pkts_1519_max, "1519 to max frames");
1035 	ALC_SYSCTL_STAT_ADD32(ctx, child, "single_colls",
1036 	    &stats->tx_single_colls, "Single collisions");
1037 	ALC_SYSCTL_STAT_ADD32(ctx, child, "multi_colls",
1038 	    &stats->tx_multi_colls, "Multiple collisions");
1039 	ALC_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
1040 	    &stats->tx_late_colls, "Late collisions");
1041 	ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_colls",
1042 	    &stats->tx_excess_colls, "Excessive collisions");
1043 	ALC_SYSCTL_STAT_ADD32(ctx, child, "abort",
1044 	    &stats->tx_abort, "Aborted frames due to Excessive collisions");
1045 	ALC_SYSCTL_STAT_ADD32(ctx, child, "underruns",
1046 	    &stats->tx_underrun, "FIFO underruns");
1047 	ALC_SYSCTL_STAT_ADD32(ctx, child, "desc_underruns",
1048 	    &stats->tx_desc_underrun, "Descriptor write-back errors");
1049 	ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
1050 	    &stats->tx_lenerrs, "Frames with length mismatched");
1051 	ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs",
1052 	    &stats->tx_pkts_truncated, "Truncated frames due to MTU size");
1053 }
1054 
1055 #undef ALC_SYSCTL_STAT_ADD32
1056 #undef ALC_SYSCTL_STAT_ADD64
1057 
1058 struct alc_dmamap_arg {
1059 	bus_addr_t	alc_busaddr;
1060 };
1061 
1062 static void
1063 alc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1064 {
1065 	struct alc_dmamap_arg *ctx;
1066 
1067 	if (error != 0)
1068 		return;
1069 
1070 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1071 
1072 	ctx = (struct alc_dmamap_arg *)arg;
1073 	ctx->alc_busaddr = segs[0].ds_addr;
1074 }
1075 
1076 /*
1077  * Normal and high Tx descriptors shares single Tx high address.
1078  * Four Rx descriptor/return rings and CMB shares the same Rx
1079  * high address.
1080  */
1081 static int
1082 alc_check_boundary(struct alc_softc *sc)
1083 {
1084 	bus_addr_t cmb_end, rx_ring_end, rr_ring_end, tx_ring_end;
1085 
1086 	rx_ring_end = sc->alc_rdata.alc_rx_ring_paddr + ALC_RX_RING_SZ;
1087 	rr_ring_end = sc->alc_rdata.alc_rr_ring_paddr + ALC_RR_RING_SZ;
1088 	cmb_end = sc->alc_rdata.alc_cmb_paddr + ALC_CMB_SZ;
1089 	tx_ring_end = sc->alc_rdata.alc_tx_ring_paddr + ALC_TX_RING_SZ;
1090 
1091 	/* 4GB boundary crossing is not allowed. */
1092 	if ((ALC_ADDR_HI(rx_ring_end) !=
1093 	    ALC_ADDR_HI(sc->alc_rdata.alc_rx_ring_paddr)) ||
1094 	    (ALC_ADDR_HI(rr_ring_end) !=
1095 	    ALC_ADDR_HI(sc->alc_rdata.alc_rr_ring_paddr)) ||
1096 	    (ALC_ADDR_HI(cmb_end) !=
1097 	    ALC_ADDR_HI(sc->alc_rdata.alc_cmb_paddr)) ||
1098 	    (ALC_ADDR_HI(tx_ring_end) !=
1099 	    ALC_ADDR_HI(sc->alc_rdata.alc_tx_ring_paddr)))
1100 		return (EFBIG);
1101 	/*
1102 	 * Make sure Rx return descriptor/Rx descriptor/CMB use
1103 	 * the same high address.
1104 	 */
1105 	if ((ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(rr_ring_end)) ||
1106 	    (ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(cmb_end)))
1107 		return (EFBIG);
1108 
1109 	return (0);
1110 }
1111 
1112 static int
1113 alc_dma_alloc(struct alc_softc *sc)
1114 {
1115 	struct alc_txdesc *txd;
1116 	struct alc_rxdesc *rxd;
1117 	bus_addr_t lowaddr;
1118 	struct alc_dmamap_arg ctx;
1119 	int error, i;
1120 
1121 	lowaddr = BUS_SPACE_MAXADDR;
1122 again:
1123 	/* Create parent DMA tag. */
1124 	error = bus_dma_tag_create(
1125 	    bus_get_dma_tag(sc->alc_dev), /* parent */
1126 	    1, 0,			/* alignment, boundary */
1127 	    lowaddr,			/* lowaddr */
1128 	    BUS_SPACE_MAXADDR,		/* highaddr */
1129 	    NULL, NULL,			/* filter, filterarg */
1130 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1131 	    0,				/* nsegments */
1132 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1133 	    0,				/* flags */
1134 	    NULL, NULL,			/* lockfunc, lockarg */
1135 	    &sc->alc_cdata.alc_parent_tag);
1136 	if (error != 0) {
1137 		device_printf(sc->alc_dev,
1138 		    "could not create parent DMA tag.\n");
1139 		goto fail;
1140 	}
1141 
1142 	/* Create DMA tag for Tx descriptor ring. */
1143 	error = bus_dma_tag_create(
1144 	    sc->alc_cdata.alc_parent_tag, /* parent */
1145 	    ALC_TX_RING_ALIGN, 0,	/* alignment, boundary */
1146 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1147 	    BUS_SPACE_MAXADDR,		/* highaddr */
1148 	    NULL, NULL,			/* filter, filterarg */
1149 	    ALC_TX_RING_SZ,		/* maxsize */
1150 	    1,				/* nsegments */
1151 	    ALC_TX_RING_SZ,		/* maxsegsize */
1152 	    0,				/* flags */
1153 	    NULL, NULL,			/* lockfunc, lockarg */
1154 	    &sc->alc_cdata.alc_tx_ring_tag);
1155 	if (error != 0) {
1156 		device_printf(sc->alc_dev,
1157 		    "could not create Tx ring DMA tag.\n");
1158 		goto fail;
1159 	}
1160 
1161 	/* Create DMA tag for Rx free descriptor ring. */
1162 	error = bus_dma_tag_create(
1163 	    sc->alc_cdata.alc_parent_tag, /* parent */
1164 	    ALC_RX_RING_ALIGN, 0,	/* alignment, boundary */
1165 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1166 	    BUS_SPACE_MAXADDR,		/* highaddr */
1167 	    NULL, NULL,			/* filter, filterarg */
1168 	    ALC_RX_RING_SZ,		/* maxsize */
1169 	    1,				/* nsegments */
1170 	    ALC_RX_RING_SZ,		/* maxsegsize */
1171 	    0,				/* flags */
1172 	    NULL, NULL,			/* lockfunc, lockarg */
1173 	    &sc->alc_cdata.alc_rx_ring_tag);
1174 	if (error != 0) {
1175 		device_printf(sc->alc_dev,
1176 		    "could not create Rx ring DMA tag.\n");
1177 		goto fail;
1178 	}
1179 	/* Create DMA tag for Rx return descriptor ring. */
1180 	error = bus_dma_tag_create(
1181 	    sc->alc_cdata.alc_parent_tag, /* parent */
1182 	    ALC_RR_RING_ALIGN, 0,	/* alignment, boundary */
1183 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1184 	    BUS_SPACE_MAXADDR,		/* highaddr */
1185 	    NULL, NULL,			/* filter, filterarg */
1186 	    ALC_RR_RING_SZ,		/* maxsize */
1187 	    1,				/* nsegments */
1188 	    ALC_RR_RING_SZ,		/* maxsegsize */
1189 	    0,				/* flags */
1190 	    NULL, NULL,			/* lockfunc, lockarg */
1191 	    &sc->alc_cdata.alc_rr_ring_tag);
1192 	if (error != 0) {
1193 		device_printf(sc->alc_dev,
1194 		    "could not create Rx return ring DMA tag.\n");
1195 		goto fail;
1196 	}
1197 
1198 	/* Create DMA tag for coalescing message block. */
1199 	error = bus_dma_tag_create(
1200 	    sc->alc_cdata.alc_parent_tag, /* parent */
1201 	    ALC_CMB_ALIGN, 0,		/* alignment, boundary */
1202 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1203 	    BUS_SPACE_MAXADDR,		/* highaddr */
1204 	    NULL, NULL,			/* filter, filterarg */
1205 	    ALC_CMB_SZ,			/* maxsize */
1206 	    1,				/* nsegments */
1207 	    ALC_CMB_SZ,			/* maxsegsize */
1208 	    0,				/* flags */
1209 	    NULL, NULL,			/* lockfunc, lockarg */
1210 	    &sc->alc_cdata.alc_cmb_tag);
1211 	if (error != 0) {
1212 		device_printf(sc->alc_dev,
1213 		    "could not create CMB DMA tag.\n");
1214 		goto fail;
1215 	}
1216 	/* Create DMA tag for status message block. */
1217 	error = bus_dma_tag_create(
1218 	    sc->alc_cdata.alc_parent_tag, /* parent */
1219 	    ALC_SMB_ALIGN, 0,		/* alignment, boundary */
1220 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1221 	    BUS_SPACE_MAXADDR,		/* highaddr */
1222 	    NULL, NULL,			/* filter, filterarg */
1223 	    ALC_SMB_SZ,			/* maxsize */
1224 	    1,				/* nsegments */
1225 	    ALC_SMB_SZ,			/* maxsegsize */
1226 	    0,				/* flags */
1227 	    NULL, NULL,			/* lockfunc, lockarg */
1228 	    &sc->alc_cdata.alc_smb_tag);
1229 	if (error != 0) {
1230 		device_printf(sc->alc_dev,
1231 		    "could not create SMB DMA tag.\n");
1232 		goto fail;
1233 	}
1234 
1235 	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
1236 	error = bus_dmamem_alloc(sc->alc_cdata.alc_tx_ring_tag,
1237 	    (void **)&sc->alc_rdata.alc_tx_ring,
1238 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1239 	    &sc->alc_cdata.alc_tx_ring_map);
1240 	if (error != 0) {
1241 		device_printf(sc->alc_dev,
1242 		    "could not allocate DMA'able memory for Tx ring.\n");
1243 		goto fail;
1244 	}
1245 	ctx.alc_busaddr = 0;
1246 	error = bus_dmamap_load(sc->alc_cdata.alc_tx_ring_tag,
1247 	    sc->alc_cdata.alc_tx_ring_map, sc->alc_rdata.alc_tx_ring,
1248 	    ALC_TX_RING_SZ, alc_dmamap_cb, &ctx, 0);
1249 	if (error != 0 || ctx.alc_busaddr == 0) {
1250 		device_printf(sc->alc_dev,
1251 		    "could not load DMA'able memory for Tx ring.\n");
1252 		goto fail;
1253 	}
1254 	sc->alc_rdata.alc_tx_ring_paddr = ctx.alc_busaddr;
1255 
1256 	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
1257 	error = bus_dmamem_alloc(sc->alc_cdata.alc_rx_ring_tag,
1258 	    (void **)&sc->alc_rdata.alc_rx_ring,
1259 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1260 	    &sc->alc_cdata.alc_rx_ring_map);
1261 	if (error != 0) {
1262 		device_printf(sc->alc_dev,
1263 		    "could not allocate DMA'able memory for Rx ring.\n");
1264 		goto fail;
1265 	}
1266 	ctx.alc_busaddr = 0;
1267 	error = bus_dmamap_load(sc->alc_cdata.alc_rx_ring_tag,
1268 	    sc->alc_cdata.alc_rx_ring_map, sc->alc_rdata.alc_rx_ring,
1269 	    ALC_RX_RING_SZ, alc_dmamap_cb, &ctx, 0);
1270 	if (error != 0 || ctx.alc_busaddr == 0) {
1271 		device_printf(sc->alc_dev,
1272 		    "could not load DMA'able memory for Rx ring.\n");
1273 		goto fail;
1274 	}
1275 	sc->alc_rdata.alc_rx_ring_paddr = ctx.alc_busaddr;
1276 
1277 	/* Allocate DMA'able memory and load the DMA map for Rx return ring. */
1278 	error = bus_dmamem_alloc(sc->alc_cdata.alc_rr_ring_tag,
1279 	    (void **)&sc->alc_rdata.alc_rr_ring,
1280 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1281 	    &sc->alc_cdata.alc_rr_ring_map);
1282 	if (error != 0) {
1283 		device_printf(sc->alc_dev,
1284 		    "could not allocate DMA'able memory for Rx return ring.\n");
1285 		goto fail;
1286 	}
1287 	ctx.alc_busaddr = 0;
1288 	error = bus_dmamap_load(sc->alc_cdata.alc_rr_ring_tag,
1289 	    sc->alc_cdata.alc_rr_ring_map, sc->alc_rdata.alc_rr_ring,
1290 	    ALC_RR_RING_SZ, alc_dmamap_cb, &ctx, 0);
1291 	if (error != 0 || ctx.alc_busaddr == 0) {
1292 		device_printf(sc->alc_dev,
1293 		    "could not load DMA'able memory for Tx ring.\n");
1294 		goto fail;
1295 	}
1296 	sc->alc_rdata.alc_rr_ring_paddr = ctx.alc_busaddr;
1297 
1298 	/* Allocate DMA'able memory and load the DMA map for CMB. */
1299 	error = bus_dmamem_alloc(sc->alc_cdata.alc_cmb_tag,
1300 	    (void **)&sc->alc_rdata.alc_cmb,
1301 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1302 	    &sc->alc_cdata.alc_cmb_map);
1303 	if (error != 0) {
1304 		device_printf(sc->alc_dev,
1305 		    "could not allocate DMA'able memory for CMB.\n");
1306 		goto fail;
1307 	}
1308 	ctx.alc_busaddr = 0;
1309 	error = bus_dmamap_load(sc->alc_cdata.alc_cmb_tag,
1310 	    sc->alc_cdata.alc_cmb_map, sc->alc_rdata.alc_cmb,
1311 	    ALC_CMB_SZ, alc_dmamap_cb, &ctx, 0);
1312 	if (error != 0 || ctx.alc_busaddr == 0) {
1313 		device_printf(sc->alc_dev,
1314 		    "could not load DMA'able memory for CMB.\n");
1315 		goto fail;
1316 	}
1317 	sc->alc_rdata.alc_cmb_paddr = ctx.alc_busaddr;
1318 
1319 	/* Allocate DMA'able memory and load the DMA map for SMB. */
1320 	error = bus_dmamem_alloc(sc->alc_cdata.alc_smb_tag,
1321 	    (void **)&sc->alc_rdata.alc_smb,
1322 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1323 	    &sc->alc_cdata.alc_smb_map);
1324 	if (error != 0) {
1325 		device_printf(sc->alc_dev,
1326 		    "could not allocate DMA'able memory for SMB.\n");
1327 		goto fail;
1328 	}
1329 	ctx.alc_busaddr = 0;
1330 	error = bus_dmamap_load(sc->alc_cdata.alc_smb_tag,
1331 	    sc->alc_cdata.alc_smb_map, sc->alc_rdata.alc_smb,
1332 	    ALC_SMB_SZ, alc_dmamap_cb, &ctx, 0);
1333 	if (error != 0 || ctx.alc_busaddr == 0) {
1334 		device_printf(sc->alc_dev,
1335 		    "could not load DMA'able memory for CMB.\n");
1336 		goto fail;
1337 	}
1338 	sc->alc_rdata.alc_smb_paddr = ctx.alc_busaddr;
1339 
1340 	/* Make sure we've not crossed 4GB boundary. */
1341 	if (lowaddr != BUS_SPACE_MAXADDR_32BIT &&
1342 	    (error = alc_check_boundary(sc)) != 0) {
1343 		device_printf(sc->alc_dev, "4GB boundary crossed, "
1344 		    "switching to 32bit DMA addressing mode.\n");
1345 		alc_dma_free(sc);
1346 		/*
1347 		 * Limit max allowable DMA address space to 32bit
1348 		 * and try again.
1349 		 */
1350 		lowaddr = BUS_SPACE_MAXADDR_32BIT;
1351 		goto again;
1352 	}
1353 
1354 	/*
1355 	 * Create Tx buffer parent tag.
1356 	 * AR8131/AR8132 allows 64bit DMA addressing of Tx/Rx buffers
1357 	 * so it needs separate parent DMA tag as parent DMA address
1358 	 * space could be restricted to be within 32bit address space
1359 	 * by 4GB boundary crossing.
1360 	 */
1361 	error = bus_dma_tag_create(
1362 	    bus_get_dma_tag(sc->alc_dev), /* parent */
1363 	    1, 0,			/* alignment, boundary */
1364 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1365 	    BUS_SPACE_MAXADDR,		/* highaddr */
1366 	    NULL, NULL,			/* filter, filterarg */
1367 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1368 	    0,				/* nsegments */
1369 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1370 	    0,				/* flags */
1371 	    NULL, NULL,			/* lockfunc, lockarg */
1372 	    &sc->alc_cdata.alc_buffer_tag);
1373 	if (error != 0) {
1374 		device_printf(sc->alc_dev,
1375 		    "could not create parent buffer DMA tag.\n");
1376 		goto fail;
1377 	}
1378 
1379 	/* Create DMA tag for Tx buffers. */
1380 	error = bus_dma_tag_create(
1381 	    sc->alc_cdata.alc_buffer_tag, /* parent */
1382 	    1, 0,			/* alignment, boundary */
1383 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1384 	    BUS_SPACE_MAXADDR,		/* highaddr */
1385 	    NULL, NULL,			/* filter, filterarg */
1386 	    ALC_TSO_MAXSIZE,		/* maxsize */
1387 	    ALC_MAXTXSEGS,		/* nsegments */
1388 	    ALC_TSO_MAXSEGSIZE,		/* maxsegsize */
1389 	    0,				/* flags */
1390 	    NULL, NULL,			/* lockfunc, lockarg */
1391 	    &sc->alc_cdata.alc_tx_tag);
1392 	if (error != 0) {
1393 		device_printf(sc->alc_dev, "could not create Tx DMA tag.\n");
1394 		goto fail;
1395 	}
1396 
1397 	/* Create DMA tag for Rx buffers. */
1398 	error = bus_dma_tag_create(
1399 	    sc->alc_cdata.alc_buffer_tag, /* parent */
1400 	    ALC_RX_BUF_ALIGN, 0,	/* alignment, boundary */
1401 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1402 	    BUS_SPACE_MAXADDR,		/* highaddr */
1403 	    NULL, NULL,			/* filter, filterarg */
1404 	    MCLBYTES,			/* maxsize */
1405 	    1,				/* nsegments */
1406 	    MCLBYTES,			/* maxsegsize */
1407 	    0,				/* flags */
1408 	    NULL, NULL,			/* lockfunc, lockarg */
1409 	    &sc->alc_cdata.alc_rx_tag);
1410 	if (error != 0) {
1411 		device_printf(sc->alc_dev, "could not create Rx DMA tag.\n");
1412 		goto fail;
1413 	}
1414 	/* Create DMA maps for Tx buffers. */
1415 	for (i = 0; i < ALC_TX_RING_CNT; i++) {
1416 		txd = &sc->alc_cdata.alc_txdesc[i];
1417 		txd->tx_m = NULL;
1418 		txd->tx_dmamap = NULL;
1419 		error = bus_dmamap_create(sc->alc_cdata.alc_tx_tag, 0,
1420 		    &txd->tx_dmamap);
1421 		if (error != 0) {
1422 			device_printf(sc->alc_dev,
1423 			    "could not create Tx dmamap.\n");
1424 			goto fail;
1425 		}
1426 	}
1427 	/* Create DMA maps for Rx buffers. */
1428 	if ((error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag, 0,
1429 	    &sc->alc_cdata.alc_rx_sparemap)) != 0) {
1430 		device_printf(sc->alc_dev,
1431 		    "could not create spare Rx dmamap.\n");
1432 		goto fail;
1433 	}
1434 	for (i = 0; i < ALC_RX_RING_CNT; i++) {
1435 		rxd = &sc->alc_cdata.alc_rxdesc[i];
1436 		rxd->rx_m = NULL;
1437 		rxd->rx_dmamap = NULL;
1438 		error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag, 0,
1439 		    &rxd->rx_dmamap);
1440 		if (error != 0) {
1441 			device_printf(sc->alc_dev,
1442 			    "could not create Rx dmamap.\n");
1443 			goto fail;
1444 		}
1445 	}
1446 
1447 fail:
1448 	return (error);
1449 }
1450 
1451 static void
1452 alc_dma_free(struct alc_softc *sc)
1453 {
1454 	struct alc_txdesc *txd;
1455 	struct alc_rxdesc *rxd;
1456 	int i;
1457 
1458 	/* Tx buffers. */
1459 	if (sc->alc_cdata.alc_tx_tag != NULL) {
1460 		for (i = 0; i < ALC_TX_RING_CNT; i++) {
1461 			txd = &sc->alc_cdata.alc_txdesc[i];
1462 			if (txd->tx_dmamap != NULL) {
1463 				bus_dmamap_destroy(sc->alc_cdata.alc_tx_tag,
1464 				    txd->tx_dmamap);
1465 				txd->tx_dmamap = NULL;
1466 			}
1467 		}
1468 		bus_dma_tag_destroy(sc->alc_cdata.alc_tx_tag);
1469 		sc->alc_cdata.alc_tx_tag = NULL;
1470 	}
1471 	/* Rx buffers */
1472 	if (sc->alc_cdata.alc_rx_tag != NULL) {
1473 		for (i = 0; i < ALC_RX_RING_CNT; i++) {
1474 			rxd = &sc->alc_cdata.alc_rxdesc[i];
1475 			if (rxd->rx_dmamap != NULL) {
1476 				bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag,
1477 				    rxd->rx_dmamap);
1478 				rxd->rx_dmamap = NULL;
1479 			}
1480 		}
1481 		if (sc->alc_cdata.alc_rx_sparemap != NULL) {
1482 			bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag,
1483 			    sc->alc_cdata.alc_rx_sparemap);
1484 			sc->alc_cdata.alc_rx_sparemap = NULL;
1485 		}
1486 		bus_dma_tag_destroy(sc->alc_cdata.alc_rx_tag);
1487 		sc->alc_cdata.alc_rx_tag = NULL;
1488 	}
1489 	/* Tx descriptor ring. */
1490 	if (sc->alc_cdata.alc_tx_ring_tag != NULL) {
1491 		if (sc->alc_cdata.alc_tx_ring_map != NULL)
1492 			bus_dmamap_unload(sc->alc_cdata.alc_tx_ring_tag,
1493 			    sc->alc_cdata.alc_tx_ring_map);
1494 		if (sc->alc_cdata.alc_tx_ring_map != NULL &&
1495 		    sc->alc_rdata.alc_tx_ring != NULL)
1496 			bus_dmamem_free(sc->alc_cdata.alc_tx_ring_tag,
1497 			    sc->alc_rdata.alc_tx_ring,
1498 			    sc->alc_cdata.alc_tx_ring_map);
1499 		sc->alc_rdata.alc_tx_ring = NULL;
1500 		sc->alc_cdata.alc_tx_ring_map = NULL;
1501 		bus_dma_tag_destroy(sc->alc_cdata.alc_tx_ring_tag);
1502 		sc->alc_cdata.alc_tx_ring_tag = NULL;
1503 	}
1504 	/* Rx return ring. */
1505 	if (sc->alc_cdata.alc_rr_ring_tag != NULL) {
1506 		if (sc->alc_cdata.alc_rr_ring_map != NULL)
1507 			bus_dmamap_unload(sc->alc_cdata.alc_rr_ring_tag,
1508 			    sc->alc_cdata.alc_rr_ring_map);
1509 		if (sc->alc_cdata.alc_rr_ring_map != NULL &&
1510 		    sc->alc_rdata.alc_rr_ring != NULL)
1511 			bus_dmamem_free(sc->alc_cdata.alc_rr_ring_tag,
1512 			    sc->alc_rdata.alc_rr_ring,
1513 			    sc->alc_cdata.alc_rr_ring_map);
1514 		sc->alc_rdata.alc_rr_ring = NULL;
1515 		sc->alc_cdata.alc_rr_ring_map = NULL;
1516 		bus_dma_tag_destroy(sc->alc_cdata.alc_rr_ring_tag);
1517 		sc->alc_cdata.alc_rr_ring_tag = NULL;
1518 	}
1519 	/* CMB block */
1520 	if (sc->alc_cdata.alc_cmb_tag != NULL) {
1521 		if (sc->alc_cdata.alc_cmb_map != NULL)
1522 			bus_dmamap_unload(sc->alc_cdata.alc_cmb_tag,
1523 			    sc->alc_cdata.alc_cmb_map);
1524 		if (sc->alc_cdata.alc_cmb_map != NULL &&
1525 		    sc->alc_rdata.alc_cmb != NULL)
1526 			bus_dmamem_free(sc->alc_cdata.alc_cmb_tag,
1527 			    sc->alc_rdata.alc_cmb,
1528 			    sc->alc_cdata.alc_cmb_map);
1529 		sc->alc_rdata.alc_cmb = NULL;
1530 		sc->alc_cdata.alc_cmb_map = NULL;
1531 		bus_dma_tag_destroy(sc->alc_cdata.alc_cmb_tag);
1532 		sc->alc_cdata.alc_cmb_tag = NULL;
1533 	}
1534 	/* SMB block */
1535 	if (sc->alc_cdata.alc_smb_tag != NULL) {
1536 		if (sc->alc_cdata.alc_smb_map != NULL)
1537 			bus_dmamap_unload(sc->alc_cdata.alc_smb_tag,
1538 			    sc->alc_cdata.alc_smb_map);
1539 		if (sc->alc_cdata.alc_smb_map != NULL &&
1540 		    sc->alc_rdata.alc_smb != NULL)
1541 			bus_dmamem_free(sc->alc_cdata.alc_smb_tag,
1542 			    sc->alc_rdata.alc_smb,
1543 			    sc->alc_cdata.alc_smb_map);
1544 		sc->alc_rdata.alc_smb = NULL;
1545 		sc->alc_cdata.alc_smb_map = NULL;
1546 		bus_dma_tag_destroy(sc->alc_cdata.alc_smb_tag);
1547 		sc->alc_cdata.alc_smb_tag = NULL;
1548 	}
1549 	if (sc->alc_cdata.alc_buffer_tag != NULL) {
1550 		bus_dma_tag_destroy(sc->alc_cdata.alc_buffer_tag);
1551 		sc->alc_cdata.alc_buffer_tag = NULL;
1552 	}
1553 	if (sc->alc_cdata.alc_parent_tag != NULL) {
1554 		bus_dma_tag_destroy(sc->alc_cdata.alc_parent_tag);
1555 		sc->alc_cdata.alc_parent_tag = NULL;
1556 	}
1557 }
1558 
1559 static int
1560 alc_shutdown(device_t dev)
1561 {
1562 
1563 	return (alc_suspend(dev));
1564 }
1565 
1566 /*
1567  * Note, this driver resets the link speed to 10/100Mbps by
1568  * restarting auto-negotiation in suspend/shutdown phase but we
1569  * don't know whether that auto-negotiation would succeed or not
1570  * as driver has no control after powering off/suspend operation.
1571  * If the renegotiation fail WOL may not work. Running at 1Gbps
1572  * will draw more power than 375mA at 3.3V which is specified in
1573  * PCI specification and that would result in complete
1574  * shutdowning power to ethernet controller.
1575  *
1576  * TODO
1577  * Save current negotiated media speed/duplex/flow-control to
1578  * softc and restore the same link again after resuming. PHY
1579  * handling such as power down/resetting to 100Mbps may be better
1580  * handled in suspend method in phy driver.
1581  */
1582 static void
1583 alc_setlinkspeed(struct alc_softc *sc)
1584 {
1585 	struct mii_data *mii;
1586 	int aneg, i;
1587 
1588 	mii = device_get_softc(sc->alc_miibus);
1589 	mii_pollstat(mii);
1590 	aneg = 0;
1591 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1592 	    (IFM_ACTIVE | IFM_AVALID)) {
1593 		switch IFM_SUBTYPE(mii->mii_media_active) {
1594 		case IFM_10_T:
1595 		case IFM_100_TX:
1596 			return;
1597 		case IFM_1000_T:
1598 			aneg++;
1599 			break;
1600 		default:
1601 			break;
1602 		}
1603 	}
1604 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, MII_100T2CR, 0);
1605 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
1606 	    MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1607 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
1608 	    MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
1609 	DELAY(1000);
1610 	if (aneg != 0) {
1611 		/*
1612 		 * Poll link state until alc(4) get a 10/100Mbps link.
1613 		 */
1614 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1615 			mii_pollstat(mii);
1616 			if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
1617 			    == (IFM_ACTIVE | IFM_AVALID)) {
1618 				switch (IFM_SUBTYPE(
1619 				    mii->mii_media_active)) {
1620 				case IFM_10_T:
1621 				case IFM_100_TX:
1622 					alc_mac_config(sc);
1623 					return;
1624 				default:
1625 					break;
1626 				}
1627 			}
1628 			ALC_UNLOCK(sc);
1629 			pause("alclnk", hz);
1630 			ALC_LOCK(sc);
1631 		}
1632 		if (i == MII_ANEGTICKS_GIGE)
1633 			device_printf(sc->alc_dev,
1634 			    "establishing a link failed, WOL may not work!");
1635 	}
1636 	/*
1637 	 * No link, force MAC to have 100Mbps, full-duplex link.
1638 	 * This is the last resort and may/may not work.
1639 	 */
1640 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1641 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1642 	alc_mac_config(sc);
1643 }
1644 
1645 static void
1646 alc_setwol(struct alc_softc *sc)
1647 {
1648 	struct ifnet *ifp;
1649 	uint32_t cap, reg, pmcs;
1650 	uint16_t pmstat;
1651 	int base, pmc;
1652 
1653 	ALC_LOCK_ASSERT(sc);
1654 
1655 	if (pci_find_extcap(sc->alc_dev, PCIY_EXPRESS, &base) == 0) {
1656 		cap = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CAP);
1657 		if ((cap & PCIM_LINK_CAP_ASPM) != 0) {
1658 			cap = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CTL);
1659 			alc_disable_l0s_l1(sc);
1660 		}
1661 	}
1662 	if (pci_find_extcap(sc->alc_dev, PCIY_PMG, &pmc) != 0) {
1663 		/* Disable WOL. */
1664 		CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
1665 		reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC);
1666 		reg |= PCIE_PHYMISC_FORCE_RCV_DET;
1667 		CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg);
1668 		/* Force PHY power down. */
1669 		alc_phy_down(sc);
1670 		return;
1671 	}
1672 
1673 	ifp = sc->alc_ifp;
1674 	if ((ifp->if_capenable & IFCAP_WOL) != 0) {
1675 		if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
1676 			alc_setlinkspeed(sc);
1677 		reg = CSR_READ_4(sc, ALC_MASTER_CFG);
1678 		reg &= ~MASTER_CLK_SEL_DIS;
1679 		CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
1680 	}
1681 
1682 	pmcs = 0;
1683 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
1684 		pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB;
1685 	CSR_WRITE_4(sc, ALC_WOL_CFG, pmcs);
1686 	reg = CSR_READ_4(sc, ALC_MAC_CFG);
1687 	reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC | MAC_CFG_ALLMULTI |
1688 	    MAC_CFG_BCAST);
1689 	if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
1690 		reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST;
1691 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1692 		reg |= MAC_CFG_RX_ENB;
1693 	CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
1694 
1695 	reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC);
1696 	reg |= PCIE_PHYMISC_FORCE_RCV_DET;
1697 	CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg);
1698 	if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1699 		/* WOL disabled, PHY power down. */
1700 		alc_phy_down(sc);
1701 	}
1702 	/* Request PME. */
1703 	pmstat = pci_read_config(sc->alc_dev, pmc + PCIR_POWER_STATUS, 2);
1704 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1705 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1706 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1707 	pci_write_config(sc->alc_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1708 }
1709 
1710 static int
1711 alc_suspend(device_t dev)
1712 {
1713 	struct alc_softc *sc;
1714 
1715 	sc = device_get_softc(dev);
1716 
1717 	ALC_LOCK(sc);
1718 	alc_stop(sc);
1719 	alc_setwol(sc);
1720 	ALC_UNLOCK(sc);
1721 
1722 	return (0);
1723 }
1724 
1725 static int
1726 alc_resume(device_t dev)
1727 {
1728 	struct alc_softc *sc;
1729 	struct ifnet *ifp;
1730 	int pmc;
1731 	uint16_t pmstat;
1732 
1733 	sc = device_get_softc(dev);
1734 
1735 	ALC_LOCK(sc);
1736 	if (pci_find_extcap(sc->alc_dev, PCIY_PMG, &pmc) == 0) {
1737 		/* Disable PME and clear PME status. */
1738 		pmstat = pci_read_config(sc->alc_dev,
1739 		    pmc + PCIR_POWER_STATUS, 2);
1740 		if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
1741 			pmstat &= ~PCIM_PSTAT_PMEENABLE;
1742 			pci_write_config(sc->alc_dev,
1743 			    pmc + PCIR_POWER_STATUS, pmstat, 2);
1744 		}
1745 	}
1746 	/* Reset PHY. */
1747 	alc_phy_reset(sc);
1748 	ifp = sc->alc_ifp;
1749 	if ((ifp->if_flags & IFF_UP) != 0) {
1750 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1751 		alc_init_locked(sc);
1752 	}
1753 	ALC_UNLOCK(sc);
1754 
1755 	return (0);
1756 }
1757 
1758 static int
1759 alc_encap(struct alc_softc *sc, struct mbuf **m_head)
1760 {
1761 	struct alc_txdesc *txd, *txd_last;
1762 	struct tx_desc *desc;
1763 	struct mbuf *m;
1764 	struct ip *ip;
1765 	struct tcphdr *tcp;
1766 	bus_dma_segment_t txsegs[ALC_MAXTXSEGS];
1767 	bus_dmamap_t map;
1768 	uint32_t cflags, hdrlen, ip_off, poff, vtag;
1769 	int error, idx, nsegs, prod;
1770 
1771 	ALC_LOCK_ASSERT(sc);
1772 
1773 	M_ASSERTPKTHDR((*m_head));
1774 
1775 	m = *m_head;
1776 	ip = NULL;
1777 	tcp = NULL;
1778 	ip_off = poff = 0;
1779 	if ((m->m_pkthdr.csum_flags & (ALC_CSUM_FEATURES | CSUM_TSO)) != 0) {
1780 		/*
1781 		 * AR8131/AR8132 requires offset of TCP/UDP header in its
1782 		 * Tx descriptor to perform Tx checksum offloading. TSO
1783 		 * also requires TCP header offset and modification of
1784 		 * IP/TCP header. This kind of operation takes many CPU
1785 		 * cycles on FreeBSD so fast host CPU is required to get
1786 		 * smooth TSO performance.
1787 		 */
1788 		struct ether_header *eh;
1789 
1790 		if (M_WRITABLE(m) == 0) {
1791 			/* Get a writable copy. */
1792 			m = m_dup(*m_head, M_DONTWAIT);
1793 			/* Release original mbufs. */
1794 			m_freem(*m_head);
1795 			if (m == NULL) {
1796 				*m_head = NULL;
1797 				return (ENOBUFS);
1798 			}
1799 			*m_head = m;
1800 		}
1801 
1802 		ip_off = sizeof(struct ether_header);
1803 		m = m_pullup(m, ip_off);
1804 		if (m == NULL) {
1805 			*m_head = NULL;
1806 			return (ENOBUFS);
1807 		}
1808 		eh = mtod(m, struct ether_header *);
1809 		/*
1810 		 * Check if hardware VLAN insertion is off.
1811 		 * Additional check for LLC/SNAP frame?
1812 		 */
1813 		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1814 			ip_off = sizeof(struct ether_vlan_header);
1815 			m = m_pullup(m, ip_off);
1816 			if (m == NULL) {
1817 				*m_head = NULL;
1818 				return (ENOBUFS);
1819 			}
1820 		}
1821 		m = m_pullup(m, ip_off + sizeof(struct ip));
1822 		if (m == NULL) {
1823 			*m_head = NULL;
1824 			return (ENOBUFS);
1825 		}
1826 		ip = (struct ip *)(mtod(m, char *) + ip_off);
1827 		poff = ip_off + (ip->ip_hl << 2);
1828 		if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1829 			m = m_pullup(m, poff + sizeof(struct tcphdr));
1830 			if (m == NULL) {
1831 				*m_head = NULL;
1832 				return (ENOBUFS);
1833 			}
1834 			tcp = (struct tcphdr *)(mtod(m, char *) + poff);
1835 			m = m_pullup(m, poff + (tcp->th_off << 2));
1836 			if (m == NULL) {
1837 				*m_head = NULL;
1838 				return (ENOBUFS);
1839 			}
1840 			/*
1841 			 * Due to strict adherence of Microsoft NDIS
1842 			 * Large Send specification, hardware expects
1843 			 * a pseudo TCP checksum inserted by upper
1844 			 * stack. Unfortunately the pseudo TCP
1845 			 * checksum that NDIS refers to does not include
1846 			 * TCP payload length so driver should recompute
1847 			 * the pseudo checksum here. Hopefully this
1848 			 * wouldn't be much burden on modern CPUs.
1849 			 *
1850 			 * Reset IP checksum and recompute TCP pseudo
1851 			 * checksum as NDIS specification said.
1852 			 */
1853 			ip->ip_sum = 0;
1854 			tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1855 			    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1856 		}
1857 		*m_head = m;
1858 	}
1859 
1860 	prod = sc->alc_cdata.alc_tx_prod;
1861 	txd = &sc->alc_cdata.alc_txdesc[prod];
1862 	txd_last = txd;
1863 	map = txd->tx_dmamap;
1864 
1865 	error = bus_dmamap_load_mbuf_sg(sc->alc_cdata.alc_tx_tag, map,
1866 	    *m_head, txsegs, &nsegs, 0);
1867 	if (error == EFBIG) {
1868 		m = m_collapse(*m_head, M_DONTWAIT, ALC_MAXTXSEGS);
1869 		if (m == NULL) {
1870 			m_freem(*m_head);
1871 			*m_head = NULL;
1872 			return (ENOMEM);
1873 		}
1874 		*m_head = m;
1875 		error = bus_dmamap_load_mbuf_sg(sc->alc_cdata.alc_tx_tag, map,
1876 		    *m_head, txsegs, &nsegs, 0);
1877 		if (error != 0) {
1878 			m_freem(*m_head);
1879 			*m_head = NULL;
1880 			return (error);
1881 		}
1882 	} else if (error != 0)
1883 		return (error);
1884 	if (nsegs == 0) {
1885 		m_freem(*m_head);
1886 		*m_head = NULL;
1887 		return (EIO);
1888 	}
1889 
1890 	/* Check descriptor overrun. */
1891 	if (sc->alc_cdata.alc_tx_cnt + nsegs >= ALC_TX_RING_CNT - 3) {
1892 		bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, map);
1893 		return (ENOBUFS);
1894 	}
1895 	bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, map, BUS_DMASYNC_PREWRITE);
1896 
1897 	m = *m_head;
1898 	cflags = TD_ETHERNET;
1899 	vtag = 0;
1900 	desc = NULL;
1901 	idx = 0;
1902 	/* Configure VLAN hardware tag insertion. */
1903 	if ((m->m_flags & M_VLANTAG) != 0) {
1904 		vtag = htons(m->m_pkthdr.ether_vtag);
1905 		vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK;
1906 		cflags |= TD_INS_VLAN_TAG;
1907 	}
1908 	/* Configure Tx checksum offload. */
1909 	if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) {
1910 #ifdef ALC_USE_CUSTOM_CSUM
1911 		cflags |= TD_CUSTOM_CSUM;
1912 		/* Set checksum start offset. */
1913 		cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) &
1914 		    TD_PLOAD_OFFSET_MASK;
1915 		/* Set checksum insertion position of TCP/UDP. */
1916 		cflags |= (((poff + m->m_pkthdr.csum_data) >> 1) <<
1917 		    TD_CUSTOM_CSUM_OFFSET_SHIFT) & TD_CUSTOM_CSUM_OFFSET_MASK;
1918 #else
1919 		if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
1920 			cflags |= TD_IPCSUM;
1921 		if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
1922 			cflags |= TD_TCPCSUM;
1923 		if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
1924 			cflags |= TD_UDPCSUM;
1925 		/* Set TCP/UDP header offset. */
1926 		cflags |= (poff << TD_L4HDR_OFFSET_SHIFT) &
1927 		    TD_L4HDR_OFFSET_MASK;
1928 #endif
1929 	} else if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1930 		/* Request TSO and set MSS. */
1931 		cflags |= TD_TSO | TD_TSO_DESCV1;
1932 		cflags |= ((uint32_t)m->m_pkthdr.tso_segsz << TD_MSS_SHIFT) &
1933 		    TD_MSS_MASK;
1934 		/* Set TCP header offset. */
1935 		cflags |= (poff << TD_TCPHDR_OFFSET_SHIFT) &
1936 		    TD_TCPHDR_OFFSET_MASK;
1937 		/*
1938 		 * AR8131/AR8132 requires the first buffer should
1939 		 * only hold IP/TCP header data. Payload should
1940 		 * be handled in other descriptors.
1941 		 */
1942 		hdrlen = poff + (tcp->th_off << 2);
1943 		desc = &sc->alc_rdata.alc_tx_ring[prod];
1944 		desc->len = htole32(TX_BYTES(hdrlen | vtag));
1945 		desc->flags = htole32(cflags);
1946 		desc->addr = htole64(txsegs[0].ds_addr);
1947 		sc->alc_cdata.alc_tx_cnt++;
1948 		ALC_DESC_INC(prod, ALC_TX_RING_CNT);
1949 		if (m->m_len - hdrlen > 0) {
1950 			/* Handle remaining payload of the first fragment. */
1951 			desc = &sc->alc_rdata.alc_tx_ring[prod];
1952 			desc->len = htole32(TX_BYTES((m->m_len - hdrlen) |
1953 			    vtag));
1954 			desc->flags = htole32(cflags);
1955 			desc->addr = htole64(txsegs[0].ds_addr + hdrlen);
1956 			sc->alc_cdata.alc_tx_cnt++;
1957 			ALC_DESC_INC(prod, ALC_TX_RING_CNT);
1958 		}
1959 		/* Handle remaining fragments. */
1960 		idx = 1;
1961 	}
1962 	for (; idx < nsegs; idx++) {
1963 		desc = &sc->alc_rdata.alc_tx_ring[prod];
1964 		desc->len = htole32(TX_BYTES(txsegs[idx].ds_len) | vtag);
1965 		desc->flags = htole32(cflags);
1966 		desc->addr = htole64(txsegs[idx].ds_addr);
1967 		sc->alc_cdata.alc_tx_cnt++;
1968 		ALC_DESC_INC(prod, ALC_TX_RING_CNT);
1969 	}
1970 	/* Update producer index. */
1971 	sc->alc_cdata.alc_tx_prod = prod;
1972 
1973 	/* Finally set EOP on the last descriptor. */
1974 	prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT;
1975 	desc = &sc->alc_rdata.alc_tx_ring[prod];
1976 	desc->flags |= htole32(TD_EOP);
1977 
1978 	/* Swap dmamap of the first and the last. */
1979 	txd = &sc->alc_cdata.alc_txdesc[prod];
1980 	map = txd_last->tx_dmamap;
1981 	txd_last->tx_dmamap = txd->tx_dmamap;
1982 	txd->tx_dmamap = map;
1983 	txd->tx_m = m;
1984 
1985 	return (0);
1986 }
1987 
1988 static void
1989 alc_tx_task(void *arg, int pending)
1990 {
1991 	struct ifnet *ifp;
1992 
1993 	ifp = (struct ifnet *)arg;
1994 	alc_start(ifp);
1995 }
1996 
1997 static void
1998 alc_start(struct ifnet *ifp)
1999 {
2000 	struct alc_softc *sc;
2001 	struct mbuf *m_head;
2002 	int enq;
2003 
2004 	sc = ifp->if_softc;
2005 
2006 	ALC_LOCK(sc);
2007 
2008 	/* Reclaim transmitted frames. */
2009 	if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT)
2010 		alc_txeof(sc);
2011 
2012 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2013 	    IFF_DRV_RUNNING || (sc->alc_flags & ALC_FLAG_LINK) == 0) {
2014 		ALC_UNLOCK(sc);
2015 		return;
2016 	}
2017 
2018 	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
2019 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2020 		if (m_head == NULL)
2021 			break;
2022 		/*
2023 		 * Pack the data into the transmit ring. If we
2024 		 * don't have room, set the OACTIVE flag and wait
2025 		 * for the NIC to drain the ring.
2026 		 */
2027 		if (alc_encap(sc, &m_head)) {
2028 			if (m_head == NULL)
2029 				break;
2030 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2031 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2032 			break;
2033 		}
2034 
2035 		enq++;
2036 		/*
2037 		 * If there's a BPF listener, bounce a copy of this frame
2038 		 * to him.
2039 		 */
2040 		ETHER_BPF_MTAP(ifp, m_head);
2041 	}
2042 
2043 	if (enq > 0) {
2044 		/* Sync descriptors. */
2045 		bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag,
2046 		    sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE);
2047 		/* Kick. Assume we're using normal Tx priority queue. */
2048 		CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX,
2049 		    (sc->alc_cdata.alc_tx_prod <<
2050 		    MBOX_TD_PROD_LO_IDX_SHIFT) &
2051 		    MBOX_TD_PROD_LO_IDX_MASK);
2052 		/* Set a timeout in case the chip goes out to lunch. */
2053 		sc->alc_watchdog_timer = ALC_TX_TIMEOUT;
2054 	}
2055 
2056 	ALC_UNLOCK(sc);
2057 }
2058 
2059 static void
2060 alc_watchdog(struct alc_softc *sc)
2061 {
2062 	struct ifnet *ifp;
2063 
2064 	ALC_LOCK_ASSERT(sc);
2065 
2066 	if (sc->alc_watchdog_timer == 0 || --sc->alc_watchdog_timer)
2067 		return;
2068 
2069 	ifp = sc->alc_ifp;
2070 	if ((sc->alc_flags & ALC_FLAG_LINK) == 0) {
2071 		if_printf(sc->alc_ifp, "watchdog timeout (lost link)\n");
2072 		ifp->if_oerrors++;
2073 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2074 		alc_init_locked(sc);
2075 		return;
2076 	}
2077 	if_printf(sc->alc_ifp, "watchdog timeout -- resetting\n");
2078 	ifp->if_oerrors++;
2079 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2080 	alc_init_locked(sc);
2081 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2082 		taskqueue_enqueue(sc->alc_tq, &sc->alc_tx_task);
2083 }
2084 
2085 static int
2086 alc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2087 {
2088 	struct alc_softc *sc;
2089 	struct ifreq *ifr;
2090 	struct mii_data *mii;
2091 	int error, mask;
2092 
2093 	sc = ifp->if_softc;
2094 	ifr = (struct ifreq *)data;
2095 	error = 0;
2096 	switch (cmd) {
2097 	case SIOCSIFMTU:
2098 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ALC_JUMBO_MTU ||
2099 		    ((sc->alc_flags & ALC_FLAG_JUMBO) == 0 &&
2100 		    ifr->ifr_mtu > ETHERMTU))
2101 			error = EINVAL;
2102 		else if (ifp->if_mtu != ifr->ifr_mtu) {
2103 			ALC_LOCK(sc);
2104 			ifp->if_mtu = ifr->ifr_mtu;
2105 			/* AR8131/AR8132 has 13 bits MSS field. */
2106 			if (ifp->if_mtu > ALC_TSO_MTU &&
2107 			    (ifp->if_capenable & IFCAP_TSO4) != 0) {
2108 				ifp->if_capenable &= ~IFCAP_TSO4;
2109 				ifp->if_hwassist &= ~CSUM_TSO;
2110 			}
2111 			ALC_UNLOCK(sc);
2112 		}
2113 		break;
2114 	case SIOCSIFFLAGS:
2115 		ALC_LOCK(sc);
2116 		if ((ifp->if_flags & IFF_UP) != 0) {
2117 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2118 			    ((ifp->if_flags ^ sc->alc_if_flags) &
2119 			    (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2120 				alc_rxfilter(sc);
2121 			else if ((sc->alc_flags & ALC_FLAG_DETACH) == 0)
2122 				alc_init_locked(sc);
2123 		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2124 			alc_stop(sc);
2125 		sc->alc_if_flags = ifp->if_flags;
2126 		ALC_UNLOCK(sc);
2127 		break;
2128 	case SIOCADDMULTI:
2129 	case SIOCDELMULTI:
2130 		ALC_LOCK(sc);
2131 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2132 			alc_rxfilter(sc);
2133 		ALC_UNLOCK(sc);
2134 		break;
2135 	case SIOCSIFMEDIA:
2136 	case SIOCGIFMEDIA:
2137 		mii = device_get_softc(sc->alc_miibus);
2138 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
2139 		break;
2140 	case SIOCSIFCAP:
2141 		ALC_LOCK(sc);
2142 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2143 		if ((mask & IFCAP_TXCSUM) != 0 &&
2144 		    (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
2145 			ifp->if_capenable ^= IFCAP_TXCSUM;
2146 			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2147 				ifp->if_hwassist |= ALC_CSUM_FEATURES;
2148 			else
2149 				ifp->if_hwassist &= ~ALC_CSUM_FEATURES;
2150 		}
2151 		if ((mask & IFCAP_TSO4) != 0 &&
2152 		    (ifp->if_capabilities & IFCAP_TSO4) != 0) {
2153 			ifp->if_capenable ^= IFCAP_TSO4;
2154 			if ((ifp->if_capenable & IFCAP_TSO4) != 0) {
2155 				/* AR8131/AR8132 has 13 bits MSS field. */
2156 				if (ifp->if_mtu > ALC_TSO_MTU) {
2157 					ifp->if_capenable &= ~IFCAP_TSO4;
2158 					ifp->if_hwassist &= ~CSUM_TSO;
2159 				} else
2160 					ifp->if_hwassist |= CSUM_TSO;
2161 			} else
2162 				ifp->if_hwassist &= ~CSUM_TSO;
2163 		}
2164 		if ((mask & IFCAP_WOL_MCAST) != 0 &&
2165 		    (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0)
2166 			ifp->if_capenable ^= IFCAP_WOL_MCAST;
2167 		if ((mask & IFCAP_WOL_MAGIC) != 0 &&
2168 		    (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
2169 			ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2170 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2171 		    (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
2172 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2173 			alc_rxvlan(sc);
2174 		}
2175 		if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
2176 		    (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
2177 			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2178 		if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
2179 		    (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
2180 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2181 		/*
2182 		 * VLAN hardware tagging is required to do checksum
2183 		 * offload or TSO on VLAN interface. Checksum offload
2184 		 * on VLAN interface also requires hardware checksum
2185 		 * offload of parent interface.
2186 		 */
2187 		if ((ifp->if_capenable & IFCAP_TXCSUM) == 0)
2188 			ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
2189 		if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
2190 			ifp->if_capenable &=
2191 			    ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM);
2192 		ALC_UNLOCK(sc);
2193 		VLAN_CAPABILITIES(ifp);
2194 		break;
2195 	default:
2196 		error = ether_ioctl(ifp, cmd, data);
2197 		break;
2198 	}
2199 
2200 	return (error);
2201 }
2202 
2203 static void
2204 alc_mac_config(struct alc_softc *sc)
2205 {
2206 	struct mii_data *mii;
2207 	uint32_t reg;
2208 
2209 	ALC_LOCK_ASSERT(sc);
2210 
2211 	mii = device_get_softc(sc->alc_miibus);
2212 	reg = CSR_READ_4(sc, ALC_MAC_CFG);
2213 	reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC |
2214 	    MAC_CFG_SPEED_MASK);
2215 	/* Reprogram MAC with resolved speed/duplex. */
2216 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
2217 	case IFM_10_T:
2218 	case IFM_100_TX:
2219 		reg |= MAC_CFG_SPEED_10_100;
2220 		break;
2221 	case IFM_1000_T:
2222 		reg |= MAC_CFG_SPEED_1000;
2223 		break;
2224 	}
2225 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
2226 		reg |= MAC_CFG_FULL_DUPLEX;
2227 #ifdef notyet
2228 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
2229 			reg |= MAC_CFG_TX_FC;
2230 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
2231 			reg |= MAC_CFG_RX_FC;
2232 #endif
2233 	}
2234 	CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
2235 }
2236 
2237 static void
2238 alc_stats_clear(struct alc_softc *sc)
2239 {
2240 	struct smb sb, *smb;
2241 	uint32_t *reg;
2242 	int i;
2243 
2244 	if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2245 		bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
2246 		    sc->alc_cdata.alc_smb_map,
2247 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2248 		smb = sc->alc_rdata.alc_smb;
2249 		/* Update done, clear. */
2250 		smb->updated = 0;
2251 		bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
2252 		    sc->alc_cdata.alc_smb_map,
2253 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2254 	} else {
2255 		for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
2256 		    reg++) {
2257 			CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
2258 			i += sizeof(uint32_t);
2259 		}
2260 		/* Read Tx statistics. */
2261 		for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
2262 		    reg++) {
2263 			CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
2264 			i += sizeof(uint32_t);
2265 		}
2266 	}
2267 }
2268 
2269 static void
2270 alc_stats_update(struct alc_softc *sc)
2271 {
2272 	struct alc_hw_stats *stat;
2273 	struct smb sb, *smb;
2274 	struct ifnet *ifp;
2275 	uint32_t *reg;
2276 	int i;
2277 
2278 	ALC_LOCK_ASSERT(sc);
2279 
2280 	ifp = sc->alc_ifp;
2281 	stat = &sc->alc_stats;
2282 	if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2283 		bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
2284 		    sc->alc_cdata.alc_smb_map,
2285 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2286 		smb = sc->alc_rdata.alc_smb;
2287 		if (smb->updated == 0)
2288 			return;
2289 	} else {
2290 		smb = &sb;
2291 		/* Read Rx statistics. */
2292 		for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
2293 		    reg++) {
2294 			*reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
2295 			i += sizeof(uint32_t);
2296 		}
2297 		/* Read Tx statistics. */
2298 		for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
2299 		    reg++) {
2300 			*reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
2301 			i += sizeof(uint32_t);
2302 		}
2303 	}
2304 
2305 	/* Rx stats. */
2306 	stat->rx_frames += smb->rx_frames;
2307 	stat->rx_bcast_frames += smb->rx_bcast_frames;
2308 	stat->rx_mcast_frames += smb->rx_mcast_frames;
2309 	stat->rx_pause_frames += smb->rx_pause_frames;
2310 	stat->rx_control_frames += smb->rx_control_frames;
2311 	stat->rx_crcerrs += smb->rx_crcerrs;
2312 	stat->rx_lenerrs += smb->rx_lenerrs;
2313 	stat->rx_bytes += smb->rx_bytes;
2314 	stat->rx_runts += smb->rx_runts;
2315 	stat->rx_fragments += smb->rx_fragments;
2316 	stat->rx_pkts_64 += smb->rx_pkts_64;
2317 	stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
2318 	stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
2319 	stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
2320 	stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
2321 	stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
2322 	stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
2323 	stat->rx_pkts_truncated += smb->rx_pkts_truncated;
2324 	stat->rx_fifo_oflows += smb->rx_fifo_oflows;
2325 	stat->rx_rrs_errs += smb->rx_rrs_errs;
2326 	stat->rx_alignerrs += smb->rx_alignerrs;
2327 	stat->rx_bcast_bytes += smb->rx_bcast_bytes;
2328 	stat->rx_mcast_bytes += smb->rx_mcast_bytes;
2329 	stat->rx_pkts_filtered += smb->rx_pkts_filtered;
2330 
2331 	/* Tx stats. */
2332 	stat->tx_frames += smb->tx_frames;
2333 	stat->tx_bcast_frames += smb->tx_bcast_frames;
2334 	stat->tx_mcast_frames += smb->tx_mcast_frames;
2335 	stat->tx_pause_frames += smb->tx_pause_frames;
2336 	stat->tx_excess_defer += smb->tx_excess_defer;
2337 	stat->tx_control_frames += smb->tx_control_frames;
2338 	stat->tx_deferred += smb->tx_deferred;
2339 	stat->tx_bytes += smb->tx_bytes;
2340 	stat->tx_pkts_64 += smb->tx_pkts_64;
2341 	stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
2342 	stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
2343 	stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
2344 	stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
2345 	stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
2346 	stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
2347 	stat->tx_single_colls += smb->tx_single_colls;
2348 	stat->tx_multi_colls += smb->tx_multi_colls;
2349 	stat->tx_late_colls += smb->tx_late_colls;
2350 	stat->tx_excess_colls += smb->tx_excess_colls;
2351 	stat->tx_abort += smb->tx_abort;
2352 	stat->tx_underrun += smb->tx_underrun;
2353 	stat->tx_desc_underrun += smb->tx_desc_underrun;
2354 	stat->tx_lenerrs += smb->tx_lenerrs;
2355 	stat->tx_pkts_truncated += smb->tx_pkts_truncated;
2356 	stat->tx_bcast_bytes += smb->tx_bcast_bytes;
2357 	stat->tx_mcast_bytes += smb->tx_mcast_bytes;
2358 
2359 	/* Update counters in ifnet. */
2360 	ifp->if_opackets += smb->tx_frames;
2361 
2362 	ifp->if_collisions += smb->tx_single_colls +
2363 	    smb->tx_multi_colls * 2 + smb->tx_late_colls +
2364 	    smb->tx_abort * HDPX_CFG_RETRY_DEFAULT;
2365 
2366 	/*
2367 	 * XXX
2368 	 * tx_pkts_truncated counter looks suspicious. It constantly
2369 	 * increments with no sign of Tx errors. This may indicate
2370 	 * the counter name is not correct one so I've removed the
2371 	 * counter in output errors.
2372 	 */
2373 	ifp->if_oerrors += smb->tx_abort + smb->tx_late_colls +
2374 	    smb->tx_underrun;
2375 
2376 	ifp->if_ipackets += smb->rx_frames;
2377 
2378 	ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
2379 	    smb->rx_runts + smb->rx_pkts_truncated +
2380 	    smb->rx_fifo_oflows + smb->rx_rrs_errs +
2381 	    smb->rx_alignerrs;
2382 
2383 	if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2384 		/* Update done, clear. */
2385 		smb->updated = 0;
2386 		bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
2387 		    sc->alc_cdata.alc_smb_map,
2388 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2389 	}
2390 }
2391 
2392 static int
2393 alc_intr(void *arg)
2394 {
2395 	struct alc_softc *sc;
2396 	uint32_t status;
2397 
2398 	sc = (struct alc_softc *)arg;
2399 
2400 	status = CSR_READ_4(sc, ALC_INTR_STATUS);
2401 	if ((status & ALC_INTRS) == 0)
2402 		return (FILTER_STRAY);
2403 	/* Disable interrupts. */
2404 	CSR_WRITE_4(sc, ALC_INTR_STATUS, INTR_DIS_INT);
2405 	taskqueue_enqueue(sc->alc_tq, &sc->alc_int_task);
2406 
2407 	return (FILTER_HANDLED);
2408 }
2409 
2410 static void
2411 alc_int_task(void *arg, int pending)
2412 {
2413 	struct alc_softc *sc;
2414 	struct ifnet *ifp;
2415 	uint32_t status;
2416 	int more;
2417 
2418 	sc = (struct alc_softc *)arg;
2419 	ifp = sc->alc_ifp;
2420 
2421 	status = CSR_READ_4(sc, ALC_INTR_STATUS);
2422 	more = atomic_readandclear_int(&sc->alc_morework);
2423 	if (more != 0)
2424 		status |= INTR_RX_PKT;
2425 	if ((status & ALC_INTRS) == 0)
2426 		goto done;
2427 
2428 	/* Acknowledge interrupts but still disable interrupts. */
2429 	CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT);
2430 
2431 	more = 0;
2432 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2433 		if ((status & INTR_RX_PKT) != 0) {
2434 			more = alc_rxintr(sc, sc->alc_process_limit);
2435 			if (more == EAGAIN)
2436 				atomic_set_int(&sc->alc_morework, 1);
2437 			else if (more == EIO) {
2438 				ALC_LOCK(sc);
2439 				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2440 				alc_init_locked(sc);
2441 				ALC_UNLOCK(sc);
2442 				return;
2443 			}
2444 		}
2445 		if ((status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST |
2446 		    INTR_TXQ_TO_RST)) != 0) {
2447 			if ((status & INTR_DMA_RD_TO_RST) != 0)
2448 				device_printf(sc->alc_dev,
2449 				    "DMA read error! -- resetting\n");
2450 			if ((status & INTR_DMA_WR_TO_RST) != 0)
2451 				device_printf(sc->alc_dev,
2452 				    "DMA write error! -- resetting\n");
2453 			if ((status & INTR_TXQ_TO_RST) != 0)
2454 				device_printf(sc->alc_dev,
2455 				    "TxQ reset! -- resetting\n");
2456 			ALC_LOCK(sc);
2457 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2458 			alc_init_locked(sc);
2459 			ALC_UNLOCK(sc);
2460 			return;
2461 		}
2462 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2463 		    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2464 			taskqueue_enqueue(sc->alc_tq, &sc->alc_tx_task);
2465 	}
2466 
2467 	if (more == EAGAIN ||
2468 	    (CSR_READ_4(sc, ALC_INTR_STATUS) & ALC_INTRS) != 0) {
2469 		taskqueue_enqueue(sc->alc_tq, &sc->alc_int_task);
2470 		return;
2471 	}
2472 
2473 done:
2474 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2475 		/* Re-enable interrupts if we're running. */
2476 		CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF);
2477 	}
2478 }
2479 
2480 static void
2481 alc_txeof(struct alc_softc *sc)
2482 {
2483 	struct ifnet *ifp;
2484 	struct alc_txdesc *txd;
2485 	uint32_t cons, prod;
2486 	int prog;
2487 
2488 	ALC_LOCK_ASSERT(sc);
2489 
2490 	ifp = sc->alc_ifp;
2491 
2492 	if (sc->alc_cdata.alc_tx_cnt == 0)
2493 		return;
2494 	bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag,
2495 	    sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_POSTWRITE);
2496 	if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
2497 		bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag,
2498 		    sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_POSTREAD);
2499 		prod = sc->alc_rdata.alc_cmb->cons;
2500 	} else
2501 		prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX);
2502 	/* Assume we're using normal Tx priority queue. */
2503 	prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >>
2504 	    MBOX_TD_CONS_LO_IDX_SHIFT;
2505 	cons = sc->alc_cdata.alc_tx_cons;
2506 	/*
2507 	 * Go through our Tx list and free mbufs for those
2508 	 * frames which have been transmitted.
2509 	 */
2510 	for (prog = 0; cons != prod; prog++,
2511 	    ALC_DESC_INC(cons, ALC_TX_RING_CNT)) {
2512 		if (sc->alc_cdata.alc_tx_cnt <= 0)
2513 			break;
2514 		prog++;
2515 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2516 		sc->alc_cdata.alc_tx_cnt--;
2517 		txd = &sc->alc_cdata.alc_txdesc[cons];
2518 		if (txd->tx_m != NULL) {
2519 			/* Reclaim transmitted mbufs. */
2520 			bus_dmamap_sync(sc->alc_cdata.alc_tx_tag,
2521 			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2522 			bus_dmamap_unload(sc->alc_cdata.alc_tx_tag,
2523 			    txd->tx_dmamap);
2524 			m_freem(txd->tx_m);
2525 			txd->tx_m = NULL;
2526 		}
2527 	}
2528 
2529 	if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
2530 		bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag,
2531 		    sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_PREREAD);
2532 	sc->alc_cdata.alc_tx_cons = cons;
2533 	/*
2534 	 * Unarm watchdog timer only when there is no pending
2535 	 * frames in Tx queue.
2536 	 */
2537 	if (sc->alc_cdata.alc_tx_cnt == 0)
2538 		sc->alc_watchdog_timer = 0;
2539 }
2540 
2541 static int
2542 alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd)
2543 {
2544 	struct mbuf *m;
2545 	bus_dma_segment_t segs[1];
2546 	bus_dmamap_t map;
2547 	int nsegs;
2548 
2549 	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2550 	if (m == NULL)
2551 		return (ENOBUFS);
2552 	m->m_len = m->m_pkthdr.len = RX_BUF_SIZE_MAX;
2553 #ifndef __NO_STRICT_ALIGNMENT
2554 	m_adj(m, sizeof(uint64_t));
2555 #endif
2556 
2557 	if (bus_dmamap_load_mbuf_sg(sc->alc_cdata.alc_rx_tag,
2558 	    sc->alc_cdata.alc_rx_sparemap, m, segs, &nsegs, 0) != 0) {
2559 		m_freem(m);
2560 		return (ENOBUFS);
2561 	}
2562 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2563 
2564 	if (rxd->rx_m != NULL) {
2565 		bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap,
2566 		    BUS_DMASYNC_POSTREAD);
2567 		bus_dmamap_unload(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap);
2568 	}
2569 	map = rxd->rx_dmamap;
2570 	rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap;
2571 	sc->alc_cdata.alc_rx_sparemap = map;
2572 	bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap,
2573 	    BUS_DMASYNC_PREREAD);
2574 	rxd->rx_m = m;
2575 	rxd->rx_desc->addr = htole64(segs[0].ds_addr);
2576 	return (0);
2577 }
2578 
2579 static int
2580 alc_rxintr(struct alc_softc *sc, int count)
2581 {
2582 	struct ifnet *ifp;
2583 	struct rx_rdesc *rrd;
2584 	uint32_t nsegs, status;
2585 	int rr_cons, prog;
2586 
2587 	bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag,
2588 	    sc->alc_cdata.alc_rr_ring_map,
2589 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2590 	bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag,
2591 	    sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_POSTWRITE);
2592 	rr_cons = sc->alc_cdata.alc_rr_cons;
2593 	ifp = sc->alc_ifp;
2594 	for (prog = 0; (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;) {
2595 		if (count-- <= 0)
2596 			break;
2597 		rrd = &sc->alc_rdata.alc_rr_ring[rr_cons];
2598 		status = le32toh(rrd->status);
2599 		if ((status & RRD_VALID) == 0)
2600 			break;
2601 		nsegs = RRD_RD_CNT(le32toh(rrd->rdinfo));
2602 		if (nsegs == 0) {
2603 			/* This should not happen! */
2604 			device_printf(sc->alc_dev,
2605 			    "unexpected segment count -- resetting\n");
2606 			return (EIO);
2607 		}
2608 		alc_rxeof(sc, rrd);
2609 		/* Clear Rx return status. */
2610 		rrd->status = 0;
2611 		ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT);
2612 		sc->alc_cdata.alc_rx_cons += nsegs;
2613 		sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT;
2614 		prog += nsegs;
2615 	}
2616 
2617 	if (prog > 0) {
2618 		/* Update the consumer index. */
2619 		sc->alc_cdata.alc_rr_cons = rr_cons;
2620 		/* Sync Rx return descriptors. */
2621 		bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag,
2622 		    sc->alc_cdata.alc_rr_ring_map,
2623 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2624 		/*
2625 		 * Sync updated Rx descriptors such that controller see
2626 		 * modified buffer addresses.
2627 		 */
2628 		bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag,
2629 		    sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE);
2630 		/*
2631 		 * Let controller know availability of new Rx buffers.
2632 		 * Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors
2633 		 * it may be possible to update ALC_MBOX_RD0_PROD_IDX
2634 		 * only when Rx buffer pre-fetching is required. In
2635 		 * addition we already set ALC_RX_RD_FREE_THRESH to
2636 		 * RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However
2637 		 * it still seems that pre-fetching needs more
2638 		 * experimentation.
2639 		 */
2640 		CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX,
2641 		    sc->alc_cdata.alc_rx_cons);
2642 	}
2643 
2644 	return (count > 0 ? 0 : EAGAIN);
2645 }
2646 
2647 #ifndef __NO_STRICT_ALIGNMENT
2648 static struct mbuf *
2649 alc_fixup_rx(struct ifnet *ifp, struct mbuf *m)
2650 {
2651 	struct mbuf *n;
2652         int i;
2653         uint16_t *src, *dst;
2654 
2655 	src = mtod(m, uint16_t *);
2656 	dst = src - 3;
2657 
2658 	if (m->m_next == NULL) {
2659 		for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
2660 			*dst++ = *src++;
2661 		m->m_data -= 6;
2662 		return (m);
2663 	}
2664 	/*
2665 	 * Append a new mbuf to received mbuf chain and copy ethernet
2666 	 * header from the mbuf chain. This can save lots of CPU
2667 	 * cycles for jumbo frame.
2668 	 */
2669 	MGETHDR(n, M_DONTWAIT, MT_DATA);
2670 	if (n == NULL) {
2671 		ifp->if_iqdrops++;
2672 		m_freem(m);
2673 		return (NULL);
2674 	}
2675 	bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
2676 	m->m_data += ETHER_HDR_LEN;
2677 	m->m_len -= ETHER_HDR_LEN;
2678 	n->m_len = ETHER_HDR_LEN;
2679 	M_MOVE_PKTHDR(n, m);
2680 	n->m_next = m;
2681 	return (n);
2682 }
2683 #endif
2684 
2685 /* Receive a frame. */
2686 static void
2687 alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd)
2688 {
2689 	struct alc_rxdesc *rxd;
2690 	struct ifnet *ifp;
2691 	struct mbuf *mp, *m;
2692 	uint32_t rdinfo, status, vtag;
2693 	int count, nsegs, rx_cons;
2694 
2695 	ifp = sc->alc_ifp;
2696 	status = le32toh(rrd->status);
2697 	rdinfo = le32toh(rrd->rdinfo);
2698 	rx_cons = RRD_RD_IDX(rdinfo);
2699 	nsegs = RRD_RD_CNT(rdinfo);
2700 
2701 	sc->alc_cdata.alc_rxlen = RRD_BYTES(status);
2702 	if ((status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) != 0) {
2703 		/*
2704 		 * We want to pass the following frames to upper
2705 		 * layer regardless of error status of Rx return
2706 		 * ring.
2707 		 *
2708 		 *  o IP/TCP/UDP checksum is bad.
2709 		 *  o frame length and protocol specific length
2710 		 *     does not match.
2711 		 *
2712 		 *  Force network stack compute checksum for
2713 		 *  errored frames.
2714 		 */
2715 		status |= RRD_TCP_UDPCSUM_NOK | RRD_IPCSUM_NOK;
2716 		if ((RRD_ERR_CRC | RRD_ERR_ALIGN | RRD_ERR_TRUNC |
2717 		    RRD_ERR_RUNT) != 0)
2718 			return;
2719 	}
2720 
2721 	for (count = 0; count < nsegs; count++,
2722 	    ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) {
2723 		rxd = &sc->alc_cdata.alc_rxdesc[rx_cons];
2724 		mp = rxd->rx_m;
2725 		/* Add a new receive buffer to the ring. */
2726 		if (alc_newbuf(sc, rxd) != 0) {
2727 			ifp->if_iqdrops++;
2728 			/* Reuse Rx buffers. */
2729 			if (sc->alc_cdata.alc_rxhead != NULL)
2730 				m_freem(sc->alc_cdata.alc_rxhead);
2731 			break;
2732 		}
2733 
2734 		/*
2735 		 * Assume we've received a full sized frame.
2736 		 * Actual size is fixed when we encounter the end of
2737 		 * multi-segmented frame.
2738 		 */
2739 		mp->m_len = sc->alc_buf_size;
2740 
2741 		/* Chain received mbufs. */
2742 		if (sc->alc_cdata.alc_rxhead == NULL) {
2743 			sc->alc_cdata.alc_rxhead = mp;
2744 			sc->alc_cdata.alc_rxtail = mp;
2745 		} else {
2746 			mp->m_flags &= ~M_PKTHDR;
2747 			sc->alc_cdata.alc_rxprev_tail =
2748 			    sc->alc_cdata.alc_rxtail;
2749 			sc->alc_cdata.alc_rxtail->m_next = mp;
2750 			sc->alc_cdata.alc_rxtail = mp;
2751 		}
2752 
2753 		if (count == nsegs - 1) {
2754 			/* Last desc. for this frame. */
2755 			m = sc->alc_cdata.alc_rxhead;
2756 			m->m_flags |= M_PKTHDR;
2757 			/*
2758 			 * It seems that L1C/L2C controller has no way
2759 			 * to tell hardware to strip CRC bytes.
2760 			 */
2761 			m->m_pkthdr.len =
2762 			    sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN;
2763 			if (nsegs > 1) {
2764 				/* Set last mbuf size. */
2765 				mp->m_len = sc->alc_cdata.alc_rxlen -
2766 				    (nsegs - 1) * sc->alc_buf_size;
2767 				/* Remove the CRC bytes in chained mbufs. */
2768 				if (mp->m_len <= ETHER_CRC_LEN) {
2769 					sc->alc_cdata.alc_rxtail =
2770 					    sc->alc_cdata.alc_rxprev_tail;
2771 					sc->alc_cdata.alc_rxtail->m_len -=
2772 					    (ETHER_CRC_LEN - mp->m_len);
2773 					sc->alc_cdata.alc_rxtail->m_next = NULL;
2774 					m_freem(mp);
2775 				} else {
2776 					mp->m_len -= ETHER_CRC_LEN;
2777 				}
2778 			} else
2779 				m->m_len = m->m_pkthdr.len;
2780 			m->m_pkthdr.rcvif = ifp;
2781 			/*
2782 			 * Due to hardware bugs, Rx checksum offloading
2783 			 * was intentionally disabled.
2784 			 */
2785 			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
2786 			    (status & RRD_VLAN_TAG) != 0) {
2787 				vtag = RRD_VLAN(le32toh(rrd->vtag));
2788 				m->m_pkthdr.ether_vtag = ntohs(vtag);
2789 				m->m_flags |= M_VLANTAG;
2790 			}
2791 #ifndef __NO_STRICT_ALIGNMENT
2792 			m = alc_fixup_rx(ifp, m);
2793 			if (m != NULL)
2794 #endif
2795 			{
2796 			/* Pass it on. */
2797 			(*ifp->if_input)(ifp, m);
2798 			}
2799 		}
2800 	}
2801 	/* Reset mbuf chains. */
2802 	ALC_RXCHAIN_RESET(sc);
2803 }
2804 
2805 static void
2806 alc_tick(void *arg)
2807 {
2808 	struct alc_softc *sc;
2809 	struct mii_data *mii;
2810 
2811 	sc = (struct alc_softc *)arg;
2812 
2813 	ALC_LOCK_ASSERT(sc);
2814 
2815 	mii = device_get_softc(sc->alc_miibus);
2816 	mii_tick(mii);
2817 	alc_stats_update(sc);
2818 	/*
2819 	 * alc(4) does not rely on Tx completion interrupts to reclaim
2820 	 * transferred buffers. Instead Tx completion interrupts are
2821 	 * used to hint for scheduling Tx task. So it's necessary to
2822 	 * release transmitted buffers by kicking Tx completion
2823 	 * handler. This limits the maximum reclamation delay to a hz.
2824 	 */
2825 	alc_txeof(sc);
2826 	alc_watchdog(sc);
2827 	callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc);
2828 }
2829 
2830 static void
2831 alc_reset(struct alc_softc *sc)
2832 {
2833 	uint32_t reg;
2834 	int i;
2835 
2836 	CSR_WRITE_4(sc, ALC_MASTER_CFG, MASTER_RESET);
2837 	for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2838 		DELAY(10);
2839 		if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0)
2840 			break;
2841 	}
2842 	if (i == 0)
2843 		device_printf(sc->alc_dev, "master reset timeout!\n");
2844 
2845 	for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2846 		if ((reg = CSR_READ_4(sc, ALC_IDLE_STATUS)) == 0)
2847 			break;
2848 		DELAY(10);
2849 	}
2850 
2851 	if (i == 0)
2852 		device_printf(sc->alc_dev, "reset timeout(0x%08x)!\n", reg);
2853 }
2854 
2855 static void
2856 alc_init(void *xsc)
2857 {
2858 	struct alc_softc *sc;
2859 
2860 	sc = (struct alc_softc *)xsc;
2861 	ALC_LOCK(sc);
2862 	alc_init_locked(sc);
2863 	ALC_UNLOCK(sc);
2864 }
2865 
2866 static void
2867 alc_init_locked(struct alc_softc *sc)
2868 {
2869 	struct ifnet *ifp;
2870 	struct mii_data *mii;
2871 	uint8_t eaddr[ETHER_ADDR_LEN];
2872 	bus_addr_t paddr;
2873 	uint32_t reg, rxf_hi, rxf_lo;
2874 
2875 	ALC_LOCK_ASSERT(sc);
2876 
2877 	ifp = sc->alc_ifp;
2878 	mii = device_get_softc(sc->alc_miibus);
2879 
2880 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2881 		return;
2882 	/*
2883 	 * Cancel any pending I/O.
2884 	 */
2885 	alc_stop(sc);
2886 	/*
2887 	 * Reset the chip to a known state.
2888 	 */
2889 	alc_reset(sc);
2890 
2891 	/* Initialize Rx descriptors. */
2892 	if (alc_init_rx_ring(sc) != 0) {
2893 		device_printf(sc->alc_dev, "no memory for Rx buffers.\n");
2894 		alc_stop(sc);
2895 		return;
2896 	}
2897 	alc_init_rr_ring(sc);
2898 	alc_init_tx_ring(sc);
2899 	alc_init_cmb(sc);
2900 	alc_init_smb(sc);
2901 
2902 	/* Reprogram the station address. */
2903 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2904 	CSR_WRITE_4(sc, ALC_PAR0,
2905 	    eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
2906 	CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]);
2907 	/*
2908 	 * Clear WOL status and disable all WOL feature as WOL
2909 	 * would interfere Rx operation under normal environments.
2910 	 */
2911 	CSR_READ_4(sc, ALC_WOL_CFG);
2912 	CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
2913 	/* Set Tx descriptor base addresses. */
2914 	paddr = sc->alc_rdata.alc_tx_ring_paddr;
2915 	CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2916 	CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2917 	/* We don't use high priority ring. */
2918 	CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0);
2919 	/* Set Tx descriptor counter. */
2920 	CSR_WRITE_4(sc, ALC_TD_RING_CNT,
2921 	    (ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK);
2922 	/* Set Rx descriptor base addresses. */
2923 	paddr = sc->alc_rdata.alc_rx_ring_paddr;
2924 	CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2925 	CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2926 	/* We use one Rx ring. */
2927 	CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0);
2928 	CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0);
2929 	CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0);
2930 	/* Set Rx descriptor counter. */
2931 	CSR_WRITE_4(sc, ALC_RD_RING_CNT,
2932 	    (ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK);
2933 
2934 	/*
2935 	 * Let hardware split jumbo frames into alc_max_buf_sized chunks.
2936 	 * if it do not fit the buffer size. Rx return descriptor holds
2937 	 * a counter that indicates how many fragments were made by the
2938 	 * hardware. The buffer size should be multiple of 8 bytes.
2939 	 * Since hardware has limit on the size of buffer size, always
2940 	 * use the maximum value.
2941 	 * For strict-alignment architectures make sure to reduce buffer
2942 	 * size by 8 bytes to make room for alignment fixup.
2943 	 */
2944 #ifndef __NO_STRICT_ALIGNMENT
2945 	sc->alc_buf_size = RX_BUF_SIZE_MAX - sizeof(uint64_t);
2946 #else
2947 	sc->alc_buf_size = RX_BUF_SIZE_MAX;
2948 #endif
2949 	CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size);
2950 
2951 	paddr = sc->alc_rdata.alc_rr_ring_paddr;
2952 	/* Set Rx return descriptor base addresses. */
2953 	CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2954 	/* We use one Rx return ring. */
2955 	CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0);
2956 	CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0);
2957 	CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0);
2958 	/* Set Rx return descriptor counter. */
2959 	CSR_WRITE_4(sc, ALC_RRD_RING_CNT,
2960 	    (ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK);
2961 	paddr = sc->alc_rdata.alc_cmb_paddr;
2962 	CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
2963 	paddr = sc->alc_rdata.alc_smb_paddr;
2964 	CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2965 	CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
2966 
2967 	/* Tell hardware that we're ready to load DMA blocks. */
2968 	CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD);
2969 
2970 	/* Configure interrupt moderation timer. */
2971 	reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT;
2972 	reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT;
2973 	CSR_WRITE_4(sc, ALC_IM_TIMER, reg);
2974 	reg = CSR_READ_4(sc, ALC_MASTER_CFG);
2975 	reg &= ~(MASTER_CHIP_REV_MASK | MASTER_CHIP_ID_MASK);
2976 	/*
2977 	 * We don't want to automatic interrupt clear as task queue
2978 	 * for the interrupt should know interrupt status.
2979 	 */
2980 	reg &= ~MASTER_INTR_RD_CLR;
2981 	reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB);
2982 	if (ALC_USECS(sc->alc_int_rx_mod) != 0)
2983 		reg |= MASTER_IM_RX_TIMER_ENB;
2984 	if (ALC_USECS(sc->alc_int_tx_mod) != 0)
2985 		reg |= MASTER_IM_TX_TIMER_ENB;
2986 	CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
2987 	/*
2988 	 * Disable interrupt re-trigger timer. We don't want automatic
2989 	 * re-triggering of un-ACKed interrupts.
2990 	 */
2991 	CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0));
2992 	/* Configure CMB. */
2993 	CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4);
2994 	if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
2995 		CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000));
2996 	else
2997 		CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0));
2998 	/*
2999 	 * Hardware can be configured to issue SMB interrupt based
3000 	 * on programmed interval. Since there is a callout that is
3001 	 * invoked for every hz in driver we use that instead of
3002 	 * relying on periodic SMB interrupt.
3003 	 */
3004 	CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0));
3005 	/* Clear MAC statistics. */
3006 	alc_stats_clear(sc);
3007 
3008 	/*
3009 	 * Always use maximum frame size that controller can support.
3010 	 * Otherwise received frames that has larger frame length
3011 	 * than alc(4) MTU would be silently dropped in hardware. This
3012 	 * would make path-MTU discovery hard as sender wouldn't get
3013 	 * any responses from receiver. alc(4) supports
3014 	 * multi-fragmented frames on Rx path so it has no issue on
3015 	 * assembling fragmented frames. Using maximum frame size also
3016 	 * removes the need to reinitialize hardware when interface
3017 	 * MTU configuration was changed.
3018 	 *
3019 	 * Be conservative in what you do, be liberal in what you
3020 	 * accept from others - RFC 793.
3021 	 */
3022 	CSR_WRITE_4(sc, ALC_FRAME_SIZE, ALC_JUMBO_FRAMELEN);
3023 
3024 	/* Disable header split(?) */
3025 	CSR_WRITE_4(sc, ALC_HDS_CFG, 0);
3026 
3027 	/* Configure IPG/IFG parameters. */
3028 	CSR_WRITE_4(sc, ALC_IPG_IFG_CFG,
3029 	    ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) |
3030 	    ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
3031 	    ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
3032 	    ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK));
3033 	/* Set parameters for half-duplex media. */
3034 	CSR_WRITE_4(sc, ALC_HDPX_CFG,
3035 	    ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
3036 	    HDPX_CFG_LCOL_MASK) |
3037 	    ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
3038 	    HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
3039 	    ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
3040 	    HDPX_CFG_ABEBT_MASK) |
3041 	    ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
3042 	    HDPX_CFG_JAMIPG_MASK));
3043 	/*
3044 	 * Set TSO/checksum offload threshold. For frames that is
3045 	 * larger than this threshold, hardware wouldn't do
3046 	 * TSO/checksum offloading.
3047 	 */
3048 	CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH,
3049 	    (ALC_JUMBO_FRAMELEN >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) &
3050 	    TSO_OFFLOAD_THRESH_MASK);
3051 	/* Configure TxQ. */
3052 	reg = (alc_dma_burst[sc->alc_dma_rd_burst] <<
3053 	    TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK;
3054 	reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) &
3055 	    TXQ_CFG_TD_BURST_MASK;
3056 	CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE);
3057 
3058 	/* Configure Rx free descriptor pre-fetching. */
3059 	CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH,
3060 	    ((RX_RD_FREE_THRESH_HI_DEFAULT << RX_RD_FREE_THRESH_HI_SHIFT) &
3061 	    RX_RD_FREE_THRESH_HI_MASK) |
3062 	    ((RX_RD_FREE_THRESH_LO_DEFAULT << RX_RD_FREE_THRESH_LO_SHIFT) &
3063 	    RX_RD_FREE_THRESH_LO_MASK));
3064 
3065 	/*
3066 	 * Configure flow control parameters.
3067 	 * XON  : 80% of Rx FIFO
3068 	 * XOFF : 30% of Rx FIFO
3069 	 */
3070 	reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN);
3071 	rxf_hi = (reg * 8) / 10;
3072 	rxf_lo = (reg * 3)/ 10;
3073 	CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH,
3074 	    ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
3075 	    RX_FIFO_PAUSE_THRESH_LO_MASK) |
3076 	    ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
3077 	     RX_FIFO_PAUSE_THRESH_HI_MASK));
3078 
3079 	/* Disable RSS until I understand L1C/L2C's RSS logic. */
3080 	CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0);
3081 	CSR_WRITE_4(sc, ALC_RSS_CPU, 0);
3082 
3083 	/* Configure RxQ. */
3084 	reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
3085 	    RXQ_CFG_RD_BURST_MASK;
3086 	reg |= RXQ_CFG_RSS_MODE_DIS;
3087 	if ((sc->alc_flags & ALC_FLAG_ASPM_MON) != 0)
3088 		reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M;
3089 	CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
3090 
3091 	/* Configure Rx DMAW request thresold. */
3092 	CSR_WRITE_4(sc, ALC_RD_DMA_CFG,
3093 	    ((RD_DMA_CFG_THRESH_DEFAULT << RD_DMA_CFG_THRESH_SHIFT) &
3094 	    RD_DMA_CFG_THRESH_MASK) |
3095 	    ((ALC_RD_DMA_CFG_USECS(0) << RD_DMA_CFG_TIMER_SHIFT) &
3096 	    RD_DMA_CFG_TIMER_MASK));
3097 	/* Configure DMA parameters. */
3098 	reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI;
3099 	reg |= sc->alc_rcb;
3100 	if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
3101 		reg |= DMA_CFG_CMB_ENB;
3102 	if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0)
3103 		reg |= DMA_CFG_SMB_ENB;
3104 	else
3105 		reg |= DMA_CFG_SMB_DIS;
3106 	reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) <<
3107 	    DMA_CFG_RD_BURST_SHIFT;
3108 	reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) <<
3109 	    DMA_CFG_WR_BURST_SHIFT;
3110 	reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) &
3111 	    DMA_CFG_RD_DELAY_CNT_MASK;
3112 	reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) &
3113 	    DMA_CFG_WR_DELAY_CNT_MASK;
3114 	CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
3115 
3116 	/*
3117 	 * Configure Tx/Rx MACs.
3118 	 *  - Auto-padding for short frames.
3119 	 *  - Enable CRC generation.
3120 	 *  Actual reconfiguration of MAC for resolved speed/duplex
3121 	 *  is followed after detection of link establishment.
3122 	 *  AR8131/AR8132 always does checksum computation regardless
3123 	 *  of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to
3124 	 *  have bug in protocol field in Rx return structure so
3125 	 *  these controllers can't handle fragmented frames. Disable
3126 	 *  Rx checksum offloading until there is a newer controller
3127 	 *  that has sane implementation.
3128 	 */
3129 	reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX |
3130 	    ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
3131 	    MAC_CFG_PREAMBLE_MASK);
3132 	if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0)
3133 		reg |= MAC_CFG_SPEED_10_100;
3134 	else
3135 		reg |= MAC_CFG_SPEED_1000;
3136 	CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3137 
3138 	/* Set up the receive filter. */
3139 	alc_rxfilter(sc);
3140 	alc_rxvlan(sc);
3141 
3142 	/* Acknowledge all pending interrupts and clear it. */
3143 	CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS);
3144 	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3145 	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0);
3146 
3147 	sc->alc_flags &= ~ALC_FLAG_LINK;
3148 	/* Switch to the current media. */
3149 	mii_mediachg(mii);
3150 
3151 	callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc);
3152 
3153 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3154 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3155 }
3156 
3157 static void
3158 alc_stop(struct alc_softc *sc)
3159 {
3160 	struct ifnet *ifp;
3161 	struct alc_txdesc *txd;
3162 	struct alc_rxdesc *rxd;
3163 	uint32_t reg;
3164 	int i;
3165 
3166 	ALC_LOCK_ASSERT(sc);
3167 	/*
3168 	 * Mark the interface down and cancel the watchdog timer.
3169 	 */
3170 	ifp = sc->alc_ifp;
3171 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3172 	sc->alc_flags &= ~ALC_FLAG_LINK;
3173 	callout_stop(&sc->alc_tick_ch);
3174 	sc->alc_watchdog_timer = 0;
3175 	alc_stats_update(sc);
3176 	/* Disable interrupts. */
3177 	CSR_WRITE_4(sc, ALC_INTR_MASK, 0);
3178 	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3179 	alc_stop_queue(sc);
3180 	/* Disable DMA. */
3181 	reg = CSR_READ_4(sc, ALC_DMA_CFG);
3182 	reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB);
3183 	reg |= DMA_CFG_SMB_DIS;
3184 	CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
3185 	DELAY(1000);
3186 	/* Stop Rx/Tx MACs. */
3187 	alc_stop_mac(sc);
3188 	/* Disable interrupts which might be touched in taskq handler. */
3189 	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3190 
3191 	/* Reclaim Rx buffers that have been processed. */
3192 	if (sc->alc_cdata.alc_rxhead != NULL)
3193 		m_freem(sc->alc_cdata.alc_rxhead);
3194 	ALC_RXCHAIN_RESET(sc);
3195 	/*
3196 	 * Free Tx/Rx mbufs still in the queues.
3197 	 */
3198 	for (i = 0; i < ALC_RX_RING_CNT; i++) {
3199 		rxd = &sc->alc_cdata.alc_rxdesc[i];
3200 		if (rxd->rx_m != NULL) {
3201 			bus_dmamap_sync(sc->alc_cdata.alc_rx_tag,
3202 			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3203 			bus_dmamap_unload(sc->alc_cdata.alc_rx_tag,
3204 			    rxd->rx_dmamap);
3205 			m_freem(rxd->rx_m);
3206 			rxd->rx_m = NULL;
3207 		}
3208 	}
3209 	for (i = 0; i < ALC_TX_RING_CNT; i++) {
3210 		txd = &sc->alc_cdata.alc_txdesc[i];
3211 		if (txd->tx_m != NULL) {
3212 			bus_dmamap_sync(sc->alc_cdata.alc_tx_tag,
3213 			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3214 			bus_dmamap_unload(sc->alc_cdata.alc_tx_tag,
3215 			    txd->tx_dmamap);
3216 			m_freem(txd->tx_m);
3217 			txd->tx_m = NULL;
3218 		}
3219 	}
3220 }
3221 
3222 static void
3223 alc_stop_mac(struct alc_softc *sc)
3224 {
3225 	uint32_t reg;
3226 	int i;
3227 
3228 	ALC_LOCK_ASSERT(sc);
3229 
3230 	/* Disable Rx/Tx MAC. */
3231 	reg = CSR_READ_4(sc, ALC_MAC_CFG);
3232 	if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) {
3233 		reg &= ~MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
3234 		CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3235 	}
3236 	for (i = ALC_TIMEOUT; i > 0; i--) {
3237 		reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
3238 		if (reg == 0)
3239 			break;
3240 		DELAY(10);
3241 	}
3242 	if (i == 0)
3243 		device_printf(sc->alc_dev,
3244 		    "could not disable Rx/Tx MAC(0x%08x)!\n", reg);
3245 }
3246 
3247 static void
3248 alc_start_queue(struct alc_softc *sc)
3249 {
3250 	uint32_t qcfg[] = {
3251 		0,
3252 		RXQ_CFG_QUEUE0_ENB,
3253 		RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB,
3254 		RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB,
3255 		RXQ_CFG_ENB
3256 	};
3257 	uint32_t cfg;
3258 
3259 	ALC_LOCK_ASSERT(sc);
3260 
3261 	/* Enable RxQ. */
3262 	cfg = CSR_READ_4(sc, ALC_RXQ_CFG);
3263 	cfg &= ~RXQ_CFG_ENB;
3264 	cfg |= qcfg[1];
3265 	CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg);
3266 	/* Enable TxQ. */
3267 	cfg = CSR_READ_4(sc, ALC_TXQ_CFG);
3268 	cfg |= TXQ_CFG_ENB;
3269 	CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg);
3270 }
3271 
3272 static void
3273 alc_stop_queue(struct alc_softc *sc)
3274 {
3275 	uint32_t reg;
3276 	int i;
3277 
3278 	ALC_LOCK_ASSERT(sc);
3279 
3280 	/* Disable RxQ. */
3281 	reg = CSR_READ_4(sc, ALC_RXQ_CFG);
3282 	if ((reg & RXQ_CFG_ENB) != 0) {
3283 		reg &= ~RXQ_CFG_ENB;
3284 		CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
3285 	}
3286 	/* Disable TxQ. */
3287 	reg = CSR_READ_4(sc, ALC_TXQ_CFG);
3288 	if ((reg & TXQ_CFG_ENB) == 0) {
3289 		reg &= ~TXQ_CFG_ENB;
3290 		CSR_WRITE_4(sc, ALC_TXQ_CFG, reg);
3291 	}
3292 	for (i = ALC_TIMEOUT; i > 0; i--) {
3293 		reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
3294 		if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0)
3295 			break;
3296 		DELAY(10);
3297 	}
3298 	if (i == 0)
3299 		device_printf(sc->alc_dev,
3300 		    "could not disable RxQ/TxQ (0x%08x)!\n", reg);
3301 }
3302 
3303 static void
3304 alc_init_tx_ring(struct alc_softc *sc)
3305 {
3306 	struct alc_ring_data *rd;
3307 	struct alc_txdesc *txd;
3308 	int i;
3309 
3310 	ALC_LOCK_ASSERT(sc);
3311 
3312 	sc->alc_cdata.alc_tx_prod = 0;
3313 	sc->alc_cdata.alc_tx_cons = 0;
3314 	sc->alc_cdata.alc_tx_cnt = 0;
3315 
3316 	rd = &sc->alc_rdata;
3317 	bzero(rd->alc_tx_ring, ALC_TX_RING_SZ);
3318 	for (i = 0; i < ALC_TX_RING_CNT; i++) {
3319 		txd = &sc->alc_cdata.alc_txdesc[i];
3320 		txd->tx_m = NULL;
3321 	}
3322 
3323 	bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag,
3324 	    sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE);
3325 }
3326 
3327 static int
3328 alc_init_rx_ring(struct alc_softc *sc)
3329 {
3330 	struct alc_ring_data *rd;
3331 	struct alc_rxdesc *rxd;
3332 	int i;
3333 
3334 	ALC_LOCK_ASSERT(sc);
3335 
3336 	sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1;
3337 	sc->alc_morework = 0;
3338 	rd = &sc->alc_rdata;
3339 	bzero(rd->alc_rx_ring, ALC_RX_RING_SZ);
3340 	for (i = 0; i < ALC_RX_RING_CNT; i++) {
3341 		rxd = &sc->alc_cdata.alc_rxdesc[i];
3342 		rxd->rx_m = NULL;
3343 		rxd->rx_desc = &rd->alc_rx_ring[i];
3344 		if (alc_newbuf(sc, rxd) != 0)
3345 			return (ENOBUFS);
3346 	}
3347 
3348 	/*
3349 	 * Since controller does not update Rx descriptors, driver
3350 	 * does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE
3351 	 * is enough to ensure coherence.
3352 	 */
3353 	bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag,
3354 	    sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE);
3355 	/* Let controller know availability of new Rx buffers. */
3356 	CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons);
3357 
3358 	return (0);
3359 }
3360 
3361 static void
3362 alc_init_rr_ring(struct alc_softc *sc)
3363 {
3364 	struct alc_ring_data *rd;
3365 
3366 	ALC_LOCK_ASSERT(sc);
3367 
3368 	sc->alc_cdata.alc_rr_cons = 0;
3369 	ALC_RXCHAIN_RESET(sc);
3370 
3371 	rd = &sc->alc_rdata;
3372 	bzero(rd->alc_rr_ring, ALC_RR_RING_SZ);
3373 	bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag,
3374 	    sc->alc_cdata.alc_rr_ring_map,
3375 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3376 }
3377 
3378 static void
3379 alc_init_cmb(struct alc_softc *sc)
3380 {
3381 	struct alc_ring_data *rd;
3382 
3383 	ALC_LOCK_ASSERT(sc);
3384 
3385 	rd = &sc->alc_rdata;
3386 	bzero(rd->alc_cmb, ALC_CMB_SZ);
3387 	bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, sc->alc_cdata.alc_cmb_map,
3388 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3389 }
3390 
3391 static void
3392 alc_init_smb(struct alc_softc *sc)
3393 {
3394 	struct alc_ring_data *rd;
3395 
3396 	ALC_LOCK_ASSERT(sc);
3397 
3398 	rd = &sc->alc_rdata;
3399 	bzero(rd->alc_smb, ALC_SMB_SZ);
3400 	bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, sc->alc_cdata.alc_smb_map,
3401 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3402 }
3403 
3404 static void
3405 alc_rxvlan(struct alc_softc *sc)
3406 {
3407 	struct ifnet *ifp;
3408 	uint32_t reg;
3409 
3410 	ALC_LOCK_ASSERT(sc);
3411 
3412 	ifp = sc->alc_ifp;
3413 	reg = CSR_READ_4(sc, ALC_MAC_CFG);
3414 	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3415 		reg |= MAC_CFG_VLAN_TAG_STRIP;
3416 	else
3417 		reg &= ~MAC_CFG_VLAN_TAG_STRIP;
3418 	CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3419 }
3420 
3421 static void
3422 alc_rxfilter(struct alc_softc *sc)
3423 {
3424 	struct ifnet *ifp;
3425 	struct ifmultiaddr *ifma;
3426 	uint32_t crc;
3427 	uint32_t mchash[2];
3428 	uint32_t rxcfg;
3429 
3430 	ALC_LOCK_ASSERT(sc);
3431 
3432 	ifp = sc->alc_ifp;
3433 
3434 	bzero(mchash, sizeof(mchash));
3435 	rxcfg = CSR_READ_4(sc, ALC_MAC_CFG);
3436 	rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
3437 	if ((ifp->if_flags & IFF_BROADCAST) != 0)
3438 		rxcfg |= MAC_CFG_BCAST;
3439 	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3440 		if ((ifp->if_flags & IFF_PROMISC) != 0)
3441 			rxcfg |= MAC_CFG_PROMISC;
3442 		if ((ifp->if_flags & IFF_ALLMULTI) != 0)
3443 			rxcfg |= MAC_CFG_ALLMULTI;
3444 		mchash[0] = 0xFFFFFFFF;
3445 		mchash[1] = 0xFFFFFFFF;
3446 		goto chipit;
3447 	}
3448 
3449 	if_maddr_rlock(ifp);
3450 	TAILQ_FOREACH(ifma, &sc->alc_ifp->if_multiaddrs, ifma_link) {
3451 		if (ifma->ifma_addr->sa_family != AF_LINK)
3452 			continue;
3453 		crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
3454 		    ifma->ifma_addr), ETHER_ADDR_LEN);
3455 		mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
3456 	}
3457 	if_maddr_runlock(ifp);
3458 
3459 chipit:
3460 	CSR_WRITE_4(sc, ALC_MAR0, mchash[0]);
3461 	CSR_WRITE_4(sc, ALC_MAR1, mchash[1]);
3462 	CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg);
3463 }
3464 
3465 static int
3466 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3467 {
3468 	int error, value;
3469 
3470 	if (arg1 == NULL)
3471 		return (EINVAL);
3472 	value = *(int *)arg1;
3473 	error = sysctl_handle_int(oidp, &value, 0, req);
3474 	if (error || req->newptr == NULL)
3475 		return (error);
3476 	if (value < low || value > high)
3477 		return (EINVAL);
3478 	*(int *)arg1 = value;
3479 
3480 	return (0);
3481 }
3482 
3483 static int
3484 sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS)
3485 {
3486 	return (sysctl_int_range(oidp, arg1, arg2, req,
3487 	    ALC_PROC_MIN, ALC_PROC_MAX));
3488 }
3489 
3490 static int
3491 sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS)
3492 {
3493 
3494 	return (sysctl_int_range(oidp, arg1, arg2, req,
3495 	    ALC_IM_TIMER_MIN, ALC_IM_TIMER_MAX));
3496 }
3497