xref: /freebsd/sys/dev/alc/if_alc.c (revision 830940567b49bb0c08dfaed40418999e76616909)
1 /*-
2  * Copyright (c) 2009, Pyun YongHyeon <yongari@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 /* Driver for Atheros AR8131/AR8132 PCIe Ethernet. */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/endian.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/mbuf.h>
41 #include <sys/module.h>
42 #include <sys/mutex.h>
43 #include <sys/rman.h>
44 #include <sys/queue.h>
45 #include <sys/socket.h>
46 #include <sys/sockio.h>
47 #include <sys/sysctl.h>
48 #include <sys/taskqueue.h>
49 
50 #include <net/bpf.h>
51 #include <net/if.h>
52 #include <net/if_arp.h>
53 #include <net/ethernet.h>
54 #include <net/if_dl.h>
55 #include <net/if_llc.h>
56 #include <net/if_media.h>
57 #include <net/if_types.h>
58 #include <net/if_vlan_var.h>
59 
60 #include <netinet/in.h>
61 #include <netinet/in_systm.h>
62 #include <netinet/ip.h>
63 #include <netinet/tcp.h>
64 
65 #include <dev/mii/mii.h>
66 #include <dev/mii/miivar.h>
67 
68 #include <dev/pci/pcireg.h>
69 #include <dev/pci/pcivar.h>
70 
71 #include <machine/atomic.h>
72 #include <machine/bus.h>
73 #include <machine/in_cksum.h>
74 
75 #include <dev/alc/if_alcreg.h>
76 #include <dev/alc/if_alcvar.h>
77 
78 /* "device miibus" required.  See GENERIC if you get errors here. */
79 #include "miibus_if.h"
80 #undef ALC_USE_CUSTOM_CSUM
81 
82 #ifdef ALC_USE_CUSTOM_CSUM
83 #define	ALC_CSUM_FEATURES	(CSUM_TCP | CSUM_UDP)
84 #else
85 #define	ALC_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
86 #endif
87 #ifndef	IFCAP_VLAN_HWTSO
88 #define	IFCAP_VLAN_HWTSO	0
89 #endif
90 
91 MODULE_DEPEND(alc, pci, 1, 1, 1);
92 MODULE_DEPEND(alc, ether, 1, 1, 1);
93 MODULE_DEPEND(alc, miibus, 1, 1, 1);
94 
95 /* Tunables. */
96 static int msi_disable = 0;
97 static int msix_disable = 0;
98 TUNABLE_INT("hw.alc.msi_disable", &msi_disable);
99 TUNABLE_INT("hw.alc.msix_disable", &msix_disable);
100 
101 /*
102  * Devices supported by this driver.
103  */
104 static struct alc_dev {
105 	uint16_t	alc_vendorid;
106 	uint16_t	alc_deviceid;
107 	const char	*alc_name;
108 } alc_devs[] = {
109 	{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8131,
110 		"Atheros AR8131 PCIe Gigabit Ethernet" },
111 	{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8132,
112 		"Atheros AR8132 PCIe Fast Ethernet" }
113 };
114 
115 static void	alc_aspm(struct alc_softc *);
116 static int	alc_attach(device_t);
117 static int	alc_check_boundary(struct alc_softc *);
118 static int	alc_detach(device_t);
119 static void	alc_disable_l0s_l1(struct alc_softc *);
120 static int	alc_dma_alloc(struct alc_softc *);
121 static void	alc_dma_free(struct alc_softc *);
122 static void	alc_dmamap_cb(void *, bus_dma_segment_t *, int, int);
123 static int	alc_encap(struct alc_softc *, struct mbuf **);
124 #ifndef __NO_STRICT_ALIGNMENT
125 static struct mbuf *
126 		alc_fixup_rx(struct ifnet *, struct mbuf *);
127 #endif
128 static void	alc_get_macaddr(struct alc_softc *);
129 static void	alc_init(void *);
130 static void	alc_init_cmb(struct alc_softc *);
131 static void	alc_init_locked(struct alc_softc *);
132 static void	alc_init_rr_ring(struct alc_softc *);
133 static int	alc_init_rx_ring(struct alc_softc *);
134 static void	alc_init_smb(struct alc_softc *);
135 static void	alc_init_tx_ring(struct alc_softc *);
136 static void	alc_int_task(void *, int);
137 static int	alc_intr(void *);
138 static int	alc_ioctl(struct ifnet *, u_long, caddr_t);
139 static void	alc_mac_config(struct alc_softc *);
140 static int	alc_miibus_readreg(device_t, int, int);
141 static void	alc_miibus_statchg(device_t);
142 static int	alc_miibus_writereg(device_t, int, int, int);
143 static int	alc_mediachange(struct ifnet *);
144 static void	alc_mediastatus(struct ifnet *, struct ifmediareq *);
145 static int	alc_newbuf(struct alc_softc *, struct alc_rxdesc *);
146 static void	alc_phy_down(struct alc_softc *);
147 static void	alc_phy_reset(struct alc_softc *);
148 static int	alc_probe(device_t);
149 static void	alc_reset(struct alc_softc *);
150 static int	alc_resume(device_t);
151 static void	alc_rxeof(struct alc_softc *, struct rx_rdesc *);
152 static int	alc_rxintr(struct alc_softc *, int);
153 static void	alc_rxfilter(struct alc_softc *);
154 static void	alc_rxvlan(struct alc_softc *);
155 static void	alc_setlinkspeed(struct alc_softc *);
156 static void	alc_setwol(struct alc_softc *);
157 static int	alc_shutdown(device_t);
158 static void	alc_start(struct ifnet *);
159 static void	alc_start_queue(struct alc_softc *);
160 static void	alc_stats_clear(struct alc_softc *);
161 static void	alc_stats_update(struct alc_softc *);
162 static void	alc_stop(struct alc_softc *);
163 static void	alc_stop_mac(struct alc_softc *);
164 static void	alc_stop_queue(struct alc_softc *);
165 static int	alc_suspend(device_t);
166 static void	alc_sysctl_node(struct alc_softc *);
167 static void	alc_tick(void *);
168 static void	alc_tx_task(void *, int);
169 static void	alc_txeof(struct alc_softc *);
170 static void	alc_watchdog(struct alc_softc *);
171 static int	sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
172 static int	sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS);
173 static int	sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS);
174 
175 static device_method_t alc_methods[] = {
176 	/* Device interface. */
177 	DEVMETHOD(device_probe,		alc_probe),
178 	DEVMETHOD(device_attach,	alc_attach),
179 	DEVMETHOD(device_detach,	alc_detach),
180 	DEVMETHOD(device_shutdown,	alc_shutdown),
181 	DEVMETHOD(device_suspend,	alc_suspend),
182 	DEVMETHOD(device_resume,	alc_resume),
183 
184 	/* MII interface. */
185 	DEVMETHOD(miibus_readreg,	alc_miibus_readreg),
186 	DEVMETHOD(miibus_writereg,	alc_miibus_writereg),
187 	DEVMETHOD(miibus_statchg,	alc_miibus_statchg),
188 
189 	{ NULL, NULL }
190 };
191 
192 static driver_t alc_driver = {
193 	"alc",
194 	alc_methods,
195 	sizeof(struct alc_softc)
196 };
197 
198 static devclass_t alc_devclass;
199 
200 DRIVER_MODULE(alc, pci, alc_driver, alc_devclass, 0, 0);
201 DRIVER_MODULE(miibus, alc, miibus_driver, miibus_devclass, 0, 0);
202 
203 static struct resource_spec alc_res_spec_mem[] = {
204 	{ SYS_RES_MEMORY,	PCIR_BAR(0),	RF_ACTIVE },
205 	{ -1,			0,		0 }
206 };
207 
208 static struct resource_spec alc_irq_spec_legacy[] = {
209 	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
210 	{ -1,			0,		0 }
211 };
212 
213 static struct resource_spec alc_irq_spec_msi[] = {
214 	{ SYS_RES_IRQ,		1,		RF_ACTIVE },
215 	{ -1,			0,		0 }
216 };
217 
218 static struct resource_spec alc_irq_spec_msix[] = {
219 	{ SYS_RES_IRQ,		1,		RF_ACTIVE },
220 	{ -1,			0,		0 }
221 };
222 
223 static uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0 };
224 
225 static int
226 alc_miibus_readreg(device_t dev, int phy, int reg)
227 {
228 	struct alc_softc *sc;
229 	uint32_t v;
230 	int i;
231 
232 	sc = device_get_softc(dev);
233 
234 	if (phy != sc->alc_phyaddr)
235 		return (0);
236 
237 	CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
238 	    MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
239 	for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
240 		DELAY(5);
241 		v = CSR_READ_4(sc, ALC_MDIO);
242 		if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
243 			break;
244 	}
245 
246 	if (i == 0) {
247 		device_printf(sc->alc_dev, "phy read timeout : %d\n", reg);
248 		return (0);
249 	}
250 
251 	return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
252 }
253 
254 static int
255 alc_miibus_writereg(device_t dev, int phy, int reg, int val)
256 {
257 	struct alc_softc *sc;
258 	uint32_t v;
259 	int i;
260 
261 	sc = device_get_softc(dev);
262 
263 	if (phy != sc->alc_phyaddr)
264 		return (0);
265 
266 	CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
267 	    (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
268 	    MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
269 	for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
270 		DELAY(5);
271 		v = CSR_READ_4(sc, ALC_MDIO);
272 		if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
273 			break;
274 	}
275 
276 	if (i == 0)
277 		device_printf(sc->alc_dev, "phy write timeout : %d\n", reg);
278 
279 	return (0);
280 }
281 
282 static void
283 alc_miibus_statchg(device_t dev)
284 {
285 	struct alc_softc *sc;
286 	struct mii_data *mii;
287 	struct ifnet *ifp;
288 	uint32_t reg;
289 
290 	sc = device_get_softc(dev);
291 
292 	mii = device_get_softc(sc->alc_miibus);
293 	ifp = sc->alc_ifp;
294 	if (mii == NULL || ifp == NULL ||
295 	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
296 		return;
297 
298 	sc->alc_flags &= ~ALC_FLAG_LINK;
299 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
300 	    (IFM_ACTIVE | IFM_AVALID)) {
301 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
302 		case IFM_10_T:
303 		case IFM_100_TX:
304 			sc->alc_flags |= ALC_FLAG_LINK;
305 			break;
306 		case IFM_1000_T:
307 			if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
308 				sc->alc_flags |= ALC_FLAG_LINK;
309 			break;
310 		default:
311 			break;
312 		}
313 	}
314 	alc_stop_queue(sc);
315 	/* Stop Rx/Tx MACs. */
316 	alc_stop_mac(sc);
317 
318 	/* Program MACs with resolved speed/duplex/flow-control. */
319 	if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
320 		alc_start_queue(sc);
321 		alc_mac_config(sc);
322 		/* Re-enable Tx/Rx MACs. */
323 		reg = CSR_READ_4(sc, ALC_MAC_CFG);
324 		reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
325 		CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
326 	}
327 	alc_aspm(sc);
328 }
329 
330 static void
331 alc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
332 {
333 	struct alc_softc *sc;
334 	struct mii_data *mii;
335 
336 	sc = ifp->if_softc;
337 	ALC_LOCK(sc);
338 	if ((ifp->if_flags & IFF_UP) == 0) {
339 		ALC_UNLOCK(sc);
340 		return;
341 	}
342 	mii = device_get_softc(sc->alc_miibus);
343 
344 	mii_pollstat(mii);
345 	ALC_UNLOCK(sc);
346 	ifmr->ifm_status = mii->mii_media_status;
347 	ifmr->ifm_active = mii->mii_media_active;
348 }
349 
350 static int
351 alc_mediachange(struct ifnet *ifp)
352 {
353 	struct alc_softc *sc;
354 	struct mii_data *mii;
355 	struct mii_softc *miisc;
356 	int error;
357 
358 	sc = ifp->if_softc;
359 	ALC_LOCK(sc);
360 	mii = device_get_softc(sc->alc_miibus);
361 	if (mii->mii_instance != 0) {
362 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
363 			mii_phy_reset(miisc);
364 	}
365 	error = mii_mediachg(mii);
366 	ALC_UNLOCK(sc);
367 
368 	return (error);
369 }
370 
371 static int
372 alc_probe(device_t dev)
373 {
374 	struct alc_dev *sp;
375 	int i;
376 	uint16_t vendor, devid;
377 
378 	vendor = pci_get_vendor(dev);
379 	devid = pci_get_device(dev);
380 	sp = alc_devs;
381 	for (i = 0; i < sizeof(alc_devs) / sizeof(alc_devs[0]); i++) {
382 		if (vendor == sp->alc_vendorid &&
383 		    devid == sp->alc_deviceid) {
384 			device_set_desc(dev, sp->alc_name);
385 			return (BUS_PROBE_DEFAULT);
386 		}
387 		sp++;
388 	}
389 
390 	return (ENXIO);
391 }
392 
393 static void
394 alc_get_macaddr(struct alc_softc *sc)
395 {
396 	uint32_t ea[2], opt;
397 	int i;
398 
399 	opt = CSR_READ_4(sc, ALC_OPT_CFG);
400 	if ((CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) {
401 		/*
402 		 * EEPROM found, let TWSI reload EEPROM configuration.
403 		 * This will set ethernet address of controller.
404 		 */
405 		if ((opt & OPT_CFG_CLK_ENB) == 0) {
406 			opt |= OPT_CFG_CLK_ENB;
407 			CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
408 			CSR_READ_4(sc, ALC_OPT_CFG);
409 			DELAY(1000);
410 		}
411 		CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) |
412 		    TWSI_CFG_SW_LD_START);
413 		for (i = 100; i > 0; i--) {
414 			DELAY(1000);
415 			if ((CSR_READ_4(sc, ALC_TWSI_CFG) &
416 			    TWSI_CFG_SW_LD_START) == 0)
417 				break;
418 		}
419 		if (i == 0)
420 			device_printf(sc->alc_dev,
421 			    "reloading EEPROM timeout!\n");
422 	} else {
423 		if (bootverbose)
424 			device_printf(sc->alc_dev, "EEPROM not found!\n");
425 	}
426 	if ((opt & OPT_CFG_CLK_ENB) != 0) {
427 		opt &= ~OPT_CFG_CLK_ENB;
428 		CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
429 		CSR_READ_4(sc, ALC_OPT_CFG);
430 		DELAY(1000);
431 	}
432 
433 	ea[0] = CSR_READ_4(sc, ALC_PAR0);
434 	ea[1] = CSR_READ_4(sc, ALC_PAR1);
435 	sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF;
436 	sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF;
437 	sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF;
438 	sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF;
439 	sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF;
440 	sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF;
441 }
442 
443 static void
444 alc_disable_l0s_l1(struct alc_softc *sc)
445 {
446 	uint32_t pmcfg;
447 
448 	/* Another magic from vendor. */
449 	pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
450 	pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 |
451 	    PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK |
452 	    PM_CFG_SERDES_PD_EX_L1);
453 	pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB |
454 	    PM_CFG_SERDES_L1_ENB;
455 	CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
456 }
457 
458 static void
459 alc_phy_reset(struct alc_softc *sc)
460 {
461 	uint16_t data;
462 
463 	/* Reset magic from Linux. */
464 	CSR_WRITE_2(sc, ALC_GPHY_CFG,
465 	    GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | GPHY_CFG_SEL_ANA_RESET);
466 	CSR_READ_2(sc, ALC_GPHY_CFG);
467 	DELAY(10 * 1000);
468 
469 	CSR_WRITE_2(sc, ALC_GPHY_CFG,
470 	    GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
471 	    GPHY_CFG_SEL_ANA_RESET);
472 	CSR_READ_2(sc, ALC_GPHY_CFG);
473 	DELAY(10 * 1000);
474 
475 	/* Load DSP codes, vendor magic. */
476 	data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE |
477 	    ((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK);
478 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
479 	    ALC_MII_DBG_ADDR, MII_ANA_CFG18);
480 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
481 	    ALC_MII_DBG_DATA, data);
482 
483 	data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) |
484 	    ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL |
485 	    ANA_SERDES_EN_LCKDT;
486 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
487 	    ALC_MII_DBG_ADDR, MII_ANA_CFG5);
488 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
489 	    ALC_MII_DBG_DATA, data);
490 
491 	data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) &
492 	    ANA_LONG_CABLE_TH_100_MASK) |
493 	    ((33 << ANA_SHORT_CABLE_TH_100_SHIFT) &
494 	    ANA_SHORT_CABLE_TH_100_SHIFT) |
495 	    ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW;
496 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
497 	    ALC_MII_DBG_ADDR, MII_ANA_CFG54);
498 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
499 	    ALC_MII_DBG_DATA, data);
500 
501 	data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) |
502 	    ((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) |
503 	    ((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) |
504 	    ((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK);
505 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
506 	    ALC_MII_DBG_ADDR, MII_ANA_CFG4);
507 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
508 	    ALC_MII_DBG_DATA, data);
509 
510 	data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) |
511 	    ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB |
512 	    ANA_OEN_125M;
513 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
514 	    ALC_MII_DBG_ADDR, MII_ANA_CFG0);
515 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
516 	    ALC_MII_DBG_DATA, data);
517 	DELAY(1000);
518 }
519 
520 static void
521 alc_phy_down(struct alc_softc *sc)
522 {
523 
524 	/* Force PHY down. */
525 	CSR_WRITE_2(sc, ALC_GPHY_CFG,
526 	    GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
527 	    GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ | GPHY_CFG_PWDOWN_HW);
528 	DELAY(1000);
529 }
530 
531 static void
532 alc_aspm(struct alc_softc *sc)
533 {
534 	uint32_t pmcfg;
535 
536 	ALC_LOCK_ASSERT(sc);
537 
538 	pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
539 	pmcfg &= ~PM_CFG_SERDES_PD_EX_L1;
540 	pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB;
541 	pmcfg |= PM_CFG_SERDES_L1_ENB;
542 	pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK;
543 	pmcfg |= PM_CFG_MAC_ASPM_CHK;
544 	if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
545 		pmcfg |= PM_CFG_SERDES_PLL_L1_ENB;
546 		pmcfg &= ~PM_CFG_CLK_SWH_L1;
547 		pmcfg &= ~PM_CFG_ASPM_L1_ENB;
548 		pmcfg &= ~PM_CFG_ASPM_L0S_ENB;
549 	} else {
550 		pmcfg &= ~PM_CFG_SERDES_PLL_L1_ENB;
551 		pmcfg |= PM_CFG_CLK_SWH_L1;
552 		pmcfg &= ~PM_CFG_ASPM_L1_ENB;
553 		pmcfg &= ~PM_CFG_ASPM_L0S_ENB;
554 	}
555 	CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
556 }
557 
558 static int
559 alc_attach(device_t dev)
560 {
561 	struct alc_softc *sc;
562 	struct ifnet *ifp;
563 	char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/l1" };
564 	uint16_t burst;
565 	int base, error, i, msic, msixc, pmc, state;
566 	uint32_t cap, ctl, val;
567 
568 	error = 0;
569 	sc = device_get_softc(dev);
570 	sc->alc_dev = dev;
571 
572 	mtx_init(&sc->alc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
573 	    MTX_DEF);
574 	callout_init_mtx(&sc->alc_tick_ch, &sc->alc_mtx, 0);
575 	TASK_INIT(&sc->alc_int_task, 0, alc_int_task, sc);
576 
577 	/* Map the device. */
578 	pci_enable_busmaster(dev);
579 	sc->alc_res_spec = alc_res_spec_mem;
580 	sc->alc_irq_spec = alc_irq_spec_legacy;
581 	error = bus_alloc_resources(dev, sc->alc_res_spec, sc->alc_res);
582 	if (error != 0) {
583 		device_printf(dev, "cannot allocate memory resources.\n");
584 		goto fail;
585 	}
586 
587 	/* Set PHY address. */
588 	sc->alc_phyaddr = ALC_PHY_ADDR;
589 
590 	/* Initialize DMA parameters. */
591 	sc->alc_dma_rd_burst = 0;
592 	sc->alc_dma_wr_burst = 0;
593 	sc->alc_rcb = DMA_CFG_RCB_64;
594 	if (pci_find_extcap(dev, PCIY_EXPRESS, &base) == 0) {
595 		sc->alc_flags |= ALC_FLAG_PCIE;
596 		burst = CSR_READ_2(sc, base + PCIR_EXPRESS_DEVICE_CTL);
597 		sc->alc_dma_rd_burst =
598 		    (burst & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12;
599 		sc->alc_dma_wr_burst = (burst & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5;
600 		if (bootverbose) {
601 			device_printf(dev, "Read request size : %u bytes.\n",
602 			    alc_dma_burst[sc->alc_dma_rd_burst]);
603 			device_printf(dev, "TLP payload size : %u bytes.\n",
604 			    alc_dma_burst[sc->alc_dma_wr_burst]);
605 		}
606 		/* Clear data link and flow-control protocol error. */
607 		val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV);
608 		val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP);
609 		CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val);
610 		/* Disable ASPM L0S and L1. */
611 		cap = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CAP);
612 		if ((cap & PCIM_LINK_CAP_ASPM) != 0) {
613 			ctl = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CTL);
614 			if ((ctl & 0x08) != 0)
615 				sc->alc_rcb = DMA_CFG_RCB_128;
616 			if (bootverbose)
617 				device_printf(dev, "RCB %u bytes\n",
618 				    sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128);
619 			state = ctl & 0x03;
620 			if (bootverbose)
621 				device_printf(sc->alc_dev, "ASPM %s %s\n",
622 				    aspm_state[state],
623 				    state == 0 ? "disabled" : "enabled");
624 			if (state != 0)
625 				alc_disable_l0s_l1(sc);
626 		}
627 	}
628 
629 	/* Reset PHY. */
630 	alc_phy_reset(sc);
631 
632 	/* Reset the ethernet controller. */
633 	alc_reset(sc);
634 
635 	/*
636 	 * One odd thing is AR8132 uses the same PHY hardware(F1
637 	 * gigabit PHY) of AR8131. So atphy(4) of AR8132 reports
638 	 * the PHY supports 1000Mbps but that's not true. The PHY
639 	 * used in AR8132 can't establish gigabit link even if it
640 	 * shows the same PHY model/revision number of AR8131.
641 	 */
642 	if (pci_get_device(dev) == DEVICEID_ATHEROS_AR8132)
643 		sc->alc_flags |= ALC_FLAG_FASTETHER | ALC_FLAG_JUMBO;
644 	else
645 		sc->alc_flags |= ALC_FLAG_JUMBO | ALC_FLAG_ASPM_MON;
646 	/*
647 	 * It seems that AR8131/AR8132 has silicon bug for SMB. In
648 	 * addition, Atheros said that enabling SMB wouldn't improve
649 	 * performance. However I think it's bad to access lots of
650 	 * registers to extract MAC statistics.
651 	 */
652 	sc->alc_flags |= ALC_FLAG_SMB_BUG;
653 	/*
654 	 * Don't use Tx CMB. It is known to have silicon bug.
655 	 */
656 	sc->alc_flags |= ALC_FLAG_CMB_BUG;
657 	sc->alc_rev = pci_get_revid(dev);
658 	sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >>
659 	    MASTER_CHIP_REV_SHIFT;
660 	if (bootverbose) {
661 		device_printf(dev, "PCI device revision : 0x%04x\n",
662 		    sc->alc_rev);
663 		device_printf(dev, "Chip id/revision : 0x%04x\n",
664 		    sc->alc_chip_rev);
665 	}
666 	device_printf(dev, "%u Tx FIFO, %u Rx FIFO\n",
667 	    CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8,
668 	    CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8);
669 
670 	/* Allocate IRQ resources. */
671 	msixc = pci_msix_count(dev);
672 	msic = pci_msi_count(dev);
673 	if (bootverbose) {
674 		device_printf(dev, "MSIX count : %d\n", msixc);
675 		device_printf(dev, "MSI count : %d\n", msic);
676 	}
677 	/* Prefer MSIX over MSI. */
678 	if (msix_disable == 0 || msi_disable == 0) {
679 		if (msix_disable == 0 && msixc == ALC_MSIX_MESSAGES &&
680 		    pci_alloc_msix(dev, &msixc) == 0) {
681 			if (msic == ALC_MSIX_MESSAGES) {
682 				device_printf(dev,
683 				    "Using %d MSIX message(s).\n", msixc);
684 				sc->alc_flags |= ALC_FLAG_MSIX;
685 				sc->alc_irq_spec = alc_irq_spec_msix;
686 			} else
687 				pci_release_msi(dev);
688 		}
689 		if (msi_disable == 0 && (sc->alc_flags & ALC_FLAG_MSIX) == 0 &&
690 		    msic == ALC_MSI_MESSAGES &&
691 		    pci_alloc_msi(dev, &msic) == 0) {
692 			if (msic == ALC_MSI_MESSAGES) {
693 				device_printf(dev,
694 				    "Using %d MSI message(s).\n", msic);
695 				sc->alc_flags |= ALC_FLAG_MSI;
696 				sc->alc_irq_spec = alc_irq_spec_msi;
697 			} else
698 				pci_release_msi(dev);
699 		}
700 	}
701 
702 	error = bus_alloc_resources(dev, sc->alc_irq_spec, sc->alc_irq);
703 	if (error != 0) {
704 		device_printf(dev, "cannot allocate IRQ resources.\n");
705 		goto fail;
706 	}
707 
708 	/* Create device sysctl node. */
709 	alc_sysctl_node(sc);
710 
711 	if ((error = alc_dma_alloc(sc) != 0))
712 		goto fail;
713 
714 	/* Load station address. */
715 	alc_get_macaddr(sc);
716 
717 	ifp = sc->alc_ifp = if_alloc(IFT_ETHER);
718 	if (ifp == NULL) {
719 		device_printf(dev, "cannot allocate ifnet structure.\n");
720 		error = ENXIO;
721 		goto fail;
722 	}
723 
724 	ifp->if_softc = sc;
725 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
726 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
727 	ifp->if_ioctl = alc_ioctl;
728 	ifp->if_start = alc_start;
729 	ifp->if_init = alc_init;
730 	ifp->if_snd.ifq_drv_maxlen = ALC_TX_RING_CNT - 1;
731 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
732 	IFQ_SET_READY(&ifp->if_snd);
733 	ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4;
734 	ifp->if_hwassist = ALC_CSUM_FEATURES | CSUM_TSO;
735 	if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
736 		ifp->if_capabilities |= IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST;
737 	ifp->if_capenable = ifp->if_capabilities;
738 
739 	/* Set up MII bus. */
740 	if ((error = mii_phy_probe(dev, &sc->alc_miibus, alc_mediachange,
741 	    alc_mediastatus)) != 0) {
742 		device_printf(dev, "no PHY found!\n");
743 		goto fail;
744 	}
745 
746 	ether_ifattach(ifp, sc->alc_eaddr);
747 
748 	/* VLAN capability setup. */
749 	ifp->if_capabilities |= IFCAP_VLAN_MTU;
750 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
751 	ifp->if_capenable = ifp->if_capabilities;
752 	/*
753 	 * XXX
754 	 * It seems enabling Tx checksum offloading makes more trouble.
755 	 * Sometimes the controller does not receive any frames when
756 	 * Tx checksum offloading is enabled. I'm not sure whether this
757 	 * is a bug in Tx checksum offloading logic or I got broken
758 	 * sample boards. To safety, don't enable Tx checksum offloading
759 	 * by default but give chance to users to toggle it if they know
760 	 * their controllers work without problems.
761 	 */
762 	ifp->if_capenable &= ~IFCAP_TXCSUM;
763 	ifp->if_hwassist &= ~ALC_CSUM_FEATURES;
764 
765 	/* Tell the upper layer(s) we support long frames. */
766 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
767 
768 	/* Create local taskq. */
769 	TASK_INIT(&sc->alc_tx_task, 1, alc_tx_task, ifp);
770 	sc->alc_tq = taskqueue_create_fast("alc_taskq", M_WAITOK,
771 	    taskqueue_thread_enqueue, &sc->alc_tq);
772 	if (sc->alc_tq == NULL) {
773 		device_printf(dev, "could not create taskqueue.\n");
774 		ether_ifdetach(ifp);
775 		error = ENXIO;
776 		goto fail;
777 	}
778 	taskqueue_start_threads(&sc->alc_tq, 1, PI_NET, "%s taskq",
779 	    device_get_nameunit(sc->alc_dev));
780 
781 	if ((sc->alc_flags & ALC_FLAG_MSIX) != 0)
782 		msic = ALC_MSIX_MESSAGES;
783 	else if ((sc->alc_flags & ALC_FLAG_MSI) != 0)
784 		msic = ALC_MSI_MESSAGES;
785 	else
786 		msic = 1;
787 	for (i = 0; i < msic; i++) {
788 		error = bus_setup_intr(dev, sc->alc_irq[i],
789 		    INTR_TYPE_NET | INTR_MPSAFE, alc_intr, NULL, sc,
790 		    &sc->alc_intrhand[i]);
791 		if (error != 0)
792 			break;
793 	}
794 	if (error != 0) {
795 		device_printf(dev, "could not set up interrupt handler.\n");
796 		taskqueue_free(sc->alc_tq);
797 		sc->alc_tq = NULL;
798 		ether_ifdetach(ifp);
799 		goto fail;
800 	}
801 
802 fail:
803 	if (error != 0)
804 		alc_detach(dev);
805 
806 	return (error);
807 }
808 
809 static int
810 alc_detach(device_t dev)
811 {
812 	struct alc_softc *sc;
813 	struct ifnet *ifp;
814 	int i, msic;
815 
816 	sc = device_get_softc(dev);
817 
818 	ifp = sc->alc_ifp;
819 	if (device_is_attached(dev)) {
820 		ALC_LOCK(sc);
821 		sc->alc_flags |= ALC_FLAG_DETACH;
822 		alc_stop(sc);
823 		ALC_UNLOCK(sc);
824 		callout_drain(&sc->alc_tick_ch);
825 		taskqueue_drain(sc->alc_tq, &sc->alc_int_task);
826 		taskqueue_drain(sc->alc_tq, &sc->alc_tx_task);
827 		ether_ifdetach(ifp);
828 	}
829 
830 	if (sc->alc_tq != NULL) {
831 		taskqueue_drain(sc->alc_tq, &sc->alc_int_task);
832 		taskqueue_free(sc->alc_tq);
833 		sc->alc_tq = NULL;
834 	}
835 
836 	if (sc->alc_miibus != NULL) {
837 		device_delete_child(dev, sc->alc_miibus);
838 		sc->alc_miibus = NULL;
839 	}
840 	bus_generic_detach(dev);
841 	alc_dma_free(sc);
842 
843 	if (ifp != NULL) {
844 		if_free(ifp);
845 		sc->alc_ifp = NULL;
846 	}
847 
848 	if ((sc->alc_flags & ALC_FLAG_MSIX) != 0)
849 		msic = ALC_MSIX_MESSAGES;
850 	else if ((sc->alc_flags & ALC_FLAG_MSI) != 0)
851 		msic = ALC_MSI_MESSAGES;
852 	else
853 		msic = 1;
854 	for (i = 0; i < msic; i++) {
855 		if (sc->alc_intrhand[i] != NULL) {
856 			bus_teardown_intr(dev, sc->alc_irq[i],
857 			    sc->alc_intrhand[i]);
858 			sc->alc_intrhand[i] = NULL;
859 		}
860 	}
861 	alc_phy_down(sc);
862 	bus_release_resources(dev, sc->alc_irq_spec, sc->alc_irq);
863 	if ((sc->alc_flags & (ALC_FLAG_MSI | ALC_FLAG_MSIX)) != 0)
864 		pci_release_msi(dev);
865 	bus_release_resources(dev, sc->alc_res_spec, sc->alc_res);
866 	mtx_destroy(&sc->alc_mtx);
867 
868 	return (0);
869 }
870 
871 #define	ALC_SYSCTL_STAT_ADD32(c, h, n, p, d)	\
872 	    SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
873 #define	ALC_SYSCTL_STAT_ADD64(c, h, n, p, d)	\
874 	    SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
875 
876 static void
877 alc_sysctl_node(struct alc_softc *sc)
878 {
879 	struct sysctl_ctx_list *ctx;
880 	struct sysctl_oid_list *child, *parent;
881 	struct sysctl_oid *tree;
882 	struct alc_hw_stats *stats;
883 	int error;
884 
885 	stats = &sc->alc_stats;
886 	ctx = device_get_sysctl_ctx(sc->alc_dev);
887 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->alc_dev));
888 
889 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod",
890 	    CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_rx_mod, 0,
891 	    sysctl_hw_alc_int_mod, "I", "alc Rx interrupt moderation");
892 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod",
893 	    CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_tx_mod, 0,
894 	    sysctl_hw_alc_int_mod, "I", "alc Tx interrupt moderation");
895 	/* Pull in device tunables. */
896 	sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT;
897 	error = resource_int_value(device_get_name(sc->alc_dev),
898 	    device_get_unit(sc->alc_dev), "int_rx_mod", &sc->alc_int_rx_mod);
899 	if (error == 0) {
900 		if (sc->alc_int_rx_mod < ALC_IM_TIMER_MIN ||
901 		    sc->alc_int_rx_mod > ALC_IM_TIMER_MAX) {
902 			device_printf(sc->alc_dev, "int_rx_mod value out of "
903 			    "range; using default: %d\n",
904 			    ALC_IM_RX_TIMER_DEFAULT);
905 			sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT;
906 		}
907 	}
908 	sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT;
909 	error = resource_int_value(device_get_name(sc->alc_dev),
910 	    device_get_unit(sc->alc_dev), "int_tx_mod", &sc->alc_int_tx_mod);
911 	if (error == 0) {
912 		if (sc->alc_int_tx_mod < ALC_IM_TIMER_MIN ||
913 		    sc->alc_int_tx_mod > ALC_IM_TIMER_MAX) {
914 			device_printf(sc->alc_dev, "int_tx_mod value out of "
915 			    "range; using default: %d\n",
916 			    ALC_IM_TX_TIMER_DEFAULT);
917 			sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT;
918 		}
919 	}
920 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
921 	    CTLTYPE_INT | CTLFLAG_RW, &sc->alc_process_limit, 0,
922 	    sysctl_hw_alc_proc_limit, "I",
923 	    "max number of Rx events to process");
924 	/* Pull in device tunables. */
925 	sc->alc_process_limit = ALC_PROC_DEFAULT;
926 	error = resource_int_value(device_get_name(sc->alc_dev),
927 	    device_get_unit(sc->alc_dev), "process_limit",
928 	    &sc->alc_process_limit);
929 	if (error == 0) {
930 		if (sc->alc_process_limit < ALC_PROC_MIN ||
931 		    sc->alc_process_limit > ALC_PROC_MAX) {
932 			device_printf(sc->alc_dev,
933 			    "process_limit value out of range; "
934 			    "using default: %d\n", ALC_PROC_DEFAULT);
935 			sc->alc_process_limit = ALC_PROC_DEFAULT;
936 		}
937 	}
938 
939 	tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
940 	    NULL, "ALC statistics");
941 	parent = SYSCTL_CHILDREN(tree);
942 
943 	/* Rx statistics. */
944 	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
945 	    NULL, "Rx MAC statistics");
946 	child = SYSCTL_CHILDREN(tree);
947 	ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
948 	    &stats->rx_frames, "Good frames");
949 	ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
950 	    &stats->rx_bcast_frames, "Good broadcast frames");
951 	ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
952 	    &stats->rx_mcast_frames, "Good multicast frames");
953 	ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
954 	    &stats->rx_pause_frames, "Pause control frames");
955 	ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames",
956 	    &stats->rx_control_frames, "Control frames");
957 	ALC_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
958 	    &stats->rx_crcerrs, "CRC errors");
959 	ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
960 	    &stats->rx_lenerrs, "Frames with length mismatched");
961 	ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets",
962 	    &stats->rx_bytes, "Good octets");
963 	ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets",
964 	    &stats->rx_bcast_bytes, "Good broadcast octets");
965 	ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets",
966 	    &stats->rx_mcast_bytes, "Good multicast octets");
967 	ALC_SYSCTL_STAT_ADD32(ctx, child, "runts",
968 	    &stats->rx_runts, "Too short frames");
969 	ALC_SYSCTL_STAT_ADD32(ctx, child, "fragments",
970 	    &stats->rx_fragments, "Fragmented frames");
971 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
972 	    &stats->rx_pkts_64, "64 bytes frames");
973 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
974 	    &stats->rx_pkts_65_127, "65 to 127 bytes frames");
975 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
976 	    &stats->rx_pkts_128_255, "128 to 255 bytes frames");
977 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
978 	    &stats->rx_pkts_256_511, "256 to 511 bytes frames");
979 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
980 	    &stats->rx_pkts_512_1023, "512 to 1023 bytes frames");
981 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
982 	    &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames");
983 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
984 	    &stats->rx_pkts_1519_max, "1519 to max frames");
985 	ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs",
986 	    &stats->rx_pkts_truncated, "Truncated frames due to MTU size");
987 	ALC_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
988 	    &stats->rx_fifo_oflows, "FIFO overflows");
989 	ALC_SYSCTL_STAT_ADD32(ctx, child, "rrs_errs",
990 	    &stats->rx_rrs_errs, "Return status write-back errors");
991 	ALC_SYSCTL_STAT_ADD32(ctx, child, "align_errs",
992 	    &stats->rx_alignerrs, "Alignment errors");
993 	ALC_SYSCTL_STAT_ADD32(ctx, child, "filtered",
994 	    &stats->rx_pkts_filtered,
995 	    "Frames dropped due to address filtering");
996 
997 	/* Tx statistics. */
998 	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
999 	    NULL, "Tx MAC statistics");
1000 	child = SYSCTL_CHILDREN(tree);
1001 	ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
1002 	    &stats->tx_frames, "Good frames");
1003 	ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
1004 	    &stats->tx_bcast_frames, "Good broadcast frames");
1005 	ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
1006 	    &stats->tx_mcast_frames, "Good multicast frames");
1007 	ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
1008 	    &stats->tx_pause_frames, "Pause control frames");
1009 	ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames",
1010 	    &stats->tx_control_frames, "Control frames");
1011 	ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_defers",
1012 	    &stats->tx_excess_defer, "Frames with excessive derferrals");
1013 	ALC_SYSCTL_STAT_ADD32(ctx, child, "defers",
1014 	    &stats->tx_excess_defer, "Frames with derferrals");
1015 	ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets",
1016 	    &stats->tx_bytes, "Good octets");
1017 	ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets",
1018 	    &stats->tx_bcast_bytes, "Good broadcast octets");
1019 	ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets",
1020 	    &stats->tx_mcast_bytes, "Good multicast octets");
1021 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
1022 	    &stats->tx_pkts_64, "64 bytes frames");
1023 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
1024 	    &stats->tx_pkts_65_127, "65 to 127 bytes frames");
1025 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
1026 	    &stats->tx_pkts_128_255, "128 to 255 bytes frames");
1027 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
1028 	    &stats->tx_pkts_256_511, "256 to 511 bytes frames");
1029 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
1030 	    &stats->tx_pkts_512_1023, "512 to 1023 bytes frames");
1031 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
1032 	    &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames");
1033 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
1034 	    &stats->tx_pkts_1519_max, "1519 to max frames");
1035 	ALC_SYSCTL_STAT_ADD32(ctx, child, "single_colls",
1036 	    &stats->tx_single_colls, "Single collisions");
1037 	ALC_SYSCTL_STAT_ADD32(ctx, child, "multi_colls",
1038 	    &stats->tx_multi_colls, "Multiple collisions");
1039 	ALC_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
1040 	    &stats->tx_late_colls, "Late collisions");
1041 	ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_colls",
1042 	    &stats->tx_excess_colls, "Excessive collisions");
1043 	ALC_SYSCTL_STAT_ADD32(ctx, child, "abort",
1044 	    &stats->tx_abort, "Aborted frames due to Excessive collisions");
1045 	ALC_SYSCTL_STAT_ADD32(ctx, child, "underruns",
1046 	    &stats->tx_underrun, "FIFO underruns");
1047 	ALC_SYSCTL_STAT_ADD32(ctx, child, "desc_underruns",
1048 	    &stats->tx_desc_underrun, "Descriptor write-back errors");
1049 	ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
1050 	    &stats->tx_lenerrs, "Frames with length mismatched");
1051 	ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs",
1052 	    &stats->tx_pkts_truncated, "Truncated frames due to MTU size");
1053 }
1054 
1055 #undef ALC_SYSCTL_STAT_ADD32
1056 #undef ALC_SYSCTL_STAT_ADD64
1057 
1058 struct alc_dmamap_arg {
1059 	bus_addr_t	alc_busaddr;
1060 };
1061 
1062 static void
1063 alc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1064 {
1065 	struct alc_dmamap_arg *ctx;
1066 
1067 	if (error != 0)
1068 		return;
1069 
1070 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1071 
1072 	ctx = (struct alc_dmamap_arg *)arg;
1073 	ctx->alc_busaddr = segs[0].ds_addr;
1074 }
1075 
1076 /*
1077  * Normal and high Tx descriptors shares single Tx high address.
1078  * Four Rx descriptor/return rings and CMB shares the same Rx
1079  * high address.
1080  */
1081 static int
1082 alc_check_boundary(struct alc_softc *sc)
1083 {
1084 	bus_addr_t cmb_end, rx_ring_end, rr_ring_end, tx_ring_end;
1085 
1086 	rx_ring_end = sc->alc_rdata.alc_rx_ring_paddr + ALC_RX_RING_SZ;
1087 	rr_ring_end = sc->alc_rdata.alc_rr_ring_paddr + ALC_RR_RING_SZ;
1088 	cmb_end = sc->alc_rdata.alc_cmb_paddr + ALC_CMB_SZ;
1089 	tx_ring_end = sc->alc_rdata.alc_tx_ring_paddr + ALC_TX_RING_SZ;
1090 
1091 	/* 4GB boundary crossing is not allowed. */
1092 	if ((ALC_ADDR_HI(rx_ring_end) !=
1093 	    ALC_ADDR_HI(sc->alc_rdata.alc_rx_ring_paddr)) ||
1094 	    (ALC_ADDR_HI(rr_ring_end) !=
1095 	    ALC_ADDR_HI(sc->alc_rdata.alc_rr_ring_paddr)) ||
1096 	    (ALC_ADDR_HI(cmb_end) !=
1097 	    ALC_ADDR_HI(sc->alc_rdata.alc_cmb_paddr)) ||
1098 	    (ALC_ADDR_HI(tx_ring_end) !=
1099 	    ALC_ADDR_HI(sc->alc_rdata.alc_tx_ring_paddr)))
1100 		return (EFBIG);
1101 	/*
1102 	 * Make sure Rx return descriptor/Rx descriptor/CMB use
1103 	 * the same high address.
1104 	 */
1105 	if ((ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(rr_ring_end)) ||
1106 	    (ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(cmb_end)))
1107 		return (EFBIG);
1108 
1109 	return (0);
1110 }
1111 
1112 static int
1113 alc_dma_alloc(struct alc_softc *sc)
1114 {
1115 	struct alc_txdesc *txd;
1116 	struct alc_rxdesc *rxd;
1117 	bus_addr_t lowaddr;
1118 	struct alc_dmamap_arg ctx;
1119 	int error, i;
1120 
1121 	lowaddr = BUS_SPACE_MAXADDR;
1122 again:
1123 	/* Create parent DMA tag. */
1124 	error = bus_dma_tag_create(
1125 	    bus_get_dma_tag(sc->alc_dev), /* parent */
1126 	    1, 0,			/* alignment, boundary */
1127 	    lowaddr,			/* lowaddr */
1128 	    BUS_SPACE_MAXADDR,		/* highaddr */
1129 	    NULL, NULL,			/* filter, filterarg */
1130 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1131 	    0,				/* nsegments */
1132 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1133 	    0,				/* flags */
1134 	    NULL, NULL,			/* lockfunc, lockarg */
1135 	    &sc->alc_cdata.alc_parent_tag);
1136 	if (error != 0) {
1137 		device_printf(sc->alc_dev,
1138 		    "could not create parent DMA tag.\n");
1139 		goto fail;
1140 	}
1141 
1142 	/* Create DMA tag for Tx descriptor ring. */
1143 	error = bus_dma_tag_create(
1144 	    sc->alc_cdata.alc_parent_tag, /* parent */
1145 	    ALC_TX_RING_ALIGN, 0,	/* alignment, boundary */
1146 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1147 	    BUS_SPACE_MAXADDR,		/* highaddr */
1148 	    NULL, NULL,			/* filter, filterarg */
1149 	    ALC_TX_RING_SZ,		/* maxsize */
1150 	    1,				/* nsegments */
1151 	    ALC_TX_RING_SZ,		/* maxsegsize */
1152 	    0,				/* flags */
1153 	    NULL, NULL,			/* lockfunc, lockarg */
1154 	    &sc->alc_cdata.alc_tx_ring_tag);
1155 	if (error != 0) {
1156 		device_printf(sc->alc_dev,
1157 		    "could not create Tx ring DMA tag.\n");
1158 		goto fail;
1159 	}
1160 
1161 	/* Create DMA tag for Rx free descriptor ring. */
1162 	error = bus_dma_tag_create(
1163 	    sc->alc_cdata.alc_parent_tag, /* parent */
1164 	    ALC_RX_RING_ALIGN, 0,	/* alignment, boundary */
1165 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1166 	    BUS_SPACE_MAXADDR,		/* highaddr */
1167 	    NULL, NULL,			/* filter, filterarg */
1168 	    ALC_RX_RING_SZ,		/* maxsize */
1169 	    1,				/* nsegments */
1170 	    ALC_RX_RING_SZ,		/* maxsegsize */
1171 	    0,				/* flags */
1172 	    NULL, NULL,			/* lockfunc, lockarg */
1173 	    &sc->alc_cdata.alc_rx_ring_tag);
1174 	if (error != 0) {
1175 		device_printf(sc->alc_dev,
1176 		    "could not create Rx ring DMA tag.\n");
1177 		goto fail;
1178 	}
1179 	/* Create DMA tag for Rx return descriptor ring. */
1180 	error = bus_dma_tag_create(
1181 	    sc->alc_cdata.alc_parent_tag, /* parent */
1182 	    ALC_RR_RING_ALIGN, 0,	/* alignment, boundary */
1183 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1184 	    BUS_SPACE_MAXADDR,		/* highaddr */
1185 	    NULL, NULL,			/* filter, filterarg */
1186 	    ALC_RR_RING_SZ,		/* maxsize */
1187 	    1,				/* nsegments */
1188 	    ALC_RR_RING_SZ,		/* maxsegsize */
1189 	    0,				/* flags */
1190 	    NULL, NULL,			/* lockfunc, lockarg */
1191 	    &sc->alc_cdata.alc_rr_ring_tag);
1192 	if (error != 0) {
1193 		device_printf(sc->alc_dev,
1194 		    "could not create Rx return ring DMA tag.\n");
1195 		goto fail;
1196 	}
1197 
1198 	/* Create DMA tag for coalescing message block. */
1199 	error = bus_dma_tag_create(
1200 	    sc->alc_cdata.alc_parent_tag, /* parent */
1201 	    ALC_CMB_ALIGN, 0,		/* alignment, boundary */
1202 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1203 	    BUS_SPACE_MAXADDR,		/* highaddr */
1204 	    NULL, NULL,			/* filter, filterarg */
1205 	    ALC_CMB_SZ,			/* maxsize */
1206 	    1,				/* nsegments */
1207 	    ALC_CMB_SZ,			/* maxsegsize */
1208 	    0,				/* flags */
1209 	    NULL, NULL,			/* lockfunc, lockarg */
1210 	    &sc->alc_cdata.alc_cmb_tag);
1211 	if (error != 0) {
1212 		device_printf(sc->alc_dev,
1213 		    "could not create CMB DMA tag.\n");
1214 		goto fail;
1215 	}
1216 	/* Create DMA tag for status message block. */
1217 	error = bus_dma_tag_create(
1218 	    sc->alc_cdata.alc_parent_tag, /* parent */
1219 	    ALC_SMB_ALIGN, 0,		/* alignment, boundary */
1220 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1221 	    BUS_SPACE_MAXADDR,		/* highaddr */
1222 	    NULL, NULL,			/* filter, filterarg */
1223 	    ALC_SMB_SZ,			/* maxsize */
1224 	    1,				/* nsegments */
1225 	    ALC_SMB_SZ,			/* maxsegsize */
1226 	    0,				/* flags */
1227 	    NULL, NULL,			/* lockfunc, lockarg */
1228 	    &sc->alc_cdata.alc_smb_tag);
1229 	if (error != 0) {
1230 		device_printf(sc->alc_dev,
1231 		    "could not create SMB DMA tag.\n");
1232 		goto fail;
1233 	}
1234 
1235 	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
1236 	error = bus_dmamem_alloc(sc->alc_cdata.alc_tx_ring_tag,
1237 	    (void **)&sc->alc_rdata.alc_tx_ring,
1238 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1239 	    &sc->alc_cdata.alc_tx_ring_map);
1240 	if (error != 0) {
1241 		device_printf(sc->alc_dev,
1242 		    "could not allocate DMA'able memory for Tx ring.\n");
1243 		goto fail;
1244 	}
1245 	ctx.alc_busaddr = 0;
1246 	error = bus_dmamap_load(sc->alc_cdata.alc_tx_ring_tag,
1247 	    sc->alc_cdata.alc_tx_ring_map, sc->alc_rdata.alc_tx_ring,
1248 	    ALC_TX_RING_SZ, alc_dmamap_cb, &ctx, 0);
1249 	if (error != 0 || ctx.alc_busaddr == 0) {
1250 		device_printf(sc->alc_dev,
1251 		    "could not load DMA'able memory for Tx ring.\n");
1252 		goto fail;
1253 	}
1254 	sc->alc_rdata.alc_tx_ring_paddr = ctx.alc_busaddr;
1255 
1256 	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
1257 	error = bus_dmamem_alloc(sc->alc_cdata.alc_rx_ring_tag,
1258 	    (void **)&sc->alc_rdata.alc_rx_ring,
1259 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1260 	    &sc->alc_cdata.alc_rx_ring_map);
1261 	if (error != 0) {
1262 		device_printf(sc->alc_dev,
1263 		    "could not allocate DMA'able memory for Rx ring.\n");
1264 		goto fail;
1265 	}
1266 	ctx.alc_busaddr = 0;
1267 	error = bus_dmamap_load(sc->alc_cdata.alc_rx_ring_tag,
1268 	    sc->alc_cdata.alc_rx_ring_map, sc->alc_rdata.alc_rx_ring,
1269 	    ALC_RX_RING_SZ, alc_dmamap_cb, &ctx, 0);
1270 	if (error != 0 || ctx.alc_busaddr == 0) {
1271 		device_printf(sc->alc_dev,
1272 		    "could not load DMA'able memory for Rx ring.\n");
1273 		goto fail;
1274 	}
1275 	sc->alc_rdata.alc_rx_ring_paddr = ctx.alc_busaddr;
1276 
1277 	/* Allocate DMA'able memory and load the DMA map for Rx return ring. */
1278 	error = bus_dmamem_alloc(sc->alc_cdata.alc_rr_ring_tag,
1279 	    (void **)&sc->alc_rdata.alc_rr_ring,
1280 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1281 	    &sc->alc_cdata.alc_rr_ring_map);
1282 	if (error != 0) {
1283 		device_printf(sc->alc_dev,
1284 		    "could not allocate DMA'able memory for Rx return ring.\n");
1285 		goto fail;
1286 	}
1287 	ctx.alc_busaddr = 0;
1288 	error = bus_dmamap_load(sc->alc_cdata.alc_rr_ring_tag,
1289 	    sc->alc_cdata.alc_rr_ring_map, sc->alc_rdata.alc_rr_ring,
1290 	    ALC_RR_RING_SZ, alc_dmamap_cb, &ctx, 0);
1291 	if (error != 0 || ctx.alc_busaddr == 0) {
1292 		device_printf(sc->alc_dev,
1293 		    "could not load DMA'able memory for Tx ring.\n");
1294 		goto fail;
1295 	}
1296 	sc->alc_rdata.alc_rr_ring_paddr = ctx.alc_busaddr;
1297 
1298 	/* Allocate DMA'able memory and load the DMA map for CMB. */
1299 	error = bus_dmamem_alloc(sc->alc_cdata.alc_cmb_tag,
1300 	    (void **)&sc->alc_rdata.alc_cmb,
1301 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1302 	    &sc->alc_cdata.alc_cmb_map);
1303 	if (error != 0) {
1304 		device_printf(sc->alc_dev,
1305 		    "could not allocate DMA'able memory for CMB.\n");
1306 		goto fail;
1307 	}
1308 	ctx.alc_busaddr = 0;
1309 	error = bus_dmamap_load(sc->alc_cdata.alc_cmb_tag,
1310 	    sc->alc_cdata.alc_cmb_map, sc->alc_rdata.alc_cmb,
1311 	    ALC_CMB_SZ, alc_dmamap_cb, &ctx, 0);
1312 	if (error != 0 || ctx.alc_busaddr == 0) {
1313 		device_printf(sc->alc_dev,
1314 		    "could not load DMA'able memory for CMB.\n");
1315 		goto fail;
1316 	}
1317 	sc->alc_rdata.alc_cmb_paddr = ctx.alc_busaddr;
1318 
1319 	/* Allocate DMA'able memory and load the DMA map for SMB. */
1320 	error = bus_dmamem_alloc(sc->alc_cdata.alc_smb_tag,
1321 	    (void **)&sc->alc_rdata.alc_smb,
1322 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1323 	    &sc->alc_cdata.alc_smb_map);
1324 	if (error != 0) {
1325 		device_printf(sc->alc_dev,
1326 		    "could not allocate DMA'able memory for SMB.\n");
1327 		goto fail;
1328 	}
1329 	ctx.alc_busaddr = 0;
1330 	error = bus_dmamap_load(sc->alc_cdata.alc_smb_tag,
1331 	    sc->alc_cdata.alc_smb_map, sc->alc_rdata.alc_smb,
1332 	    ALC_SMB_SZ, alc_dmamap_cb, &ctx, 0);
1333 	if (error != 0 || ctx.alc_busaddr == 0) {
1334 		device_printf(sc->alc_dev,
1335 		    "could not load DMA'able memory for CMB.\n");
1336 		goto fail;
1337 	}
1338 	sc->alc_rdata.alc_smb_paddr = ctx.alc_busaddr;
1339 
1340 	/* Make sure we've not crossed 4GB boundary. */
1341 	if (lowaddr != BUS_SPACE_MAXADDR_32BIT &&
1342 	    (error = alc_check_boundary(sc)) != 0) {
1343 		device_printf(sc->alc_dev, "4GB boundary crossed, "
1344 		    "switching to 32bit DMA addressing mode.\n");
1345 		alc_dma_free(sc);
1346 		/*
1347 		 * Limit max allowable DMA address space to 32bit
1348 		 * and try again.
1349 		 */
1350 		lowaddr = BUS_SPACE_MAXADDR_32BIT;
1351 		goto again;
1352 	}
1353 
1354 	/*
1355 	 * Create Tx buffer parent tag.
1356 	 * AR8131/AR8132 allows 64bit DMA addressing of Tx/Rx buffers
1357 	 * so it needs separate parent DMA tag as parent DMA address
1358 	 * space could be restricted to be within 32bit address space
1359 	 * by 4GB boundary crossing.
1360 	 */
1361 	error = bus_dma_tag_create(
1362 	    bus_get_dma_tag(sc->alc_dev), /* parent */
1363 	    1, 0,			/* alignment, boundary */
1364 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1365 	    BUS_SPACE_MAXADDR,		/* highaddr */
1366 	    NULL, NULL,			/* filter, filterarg */
1367 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1368 	    0,				/* nsegments */
1369 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1370 	    0,				/* flags */
1371 	    NULL, NULL,			/* lockfunc, lockarg */
1372 	    &sc->alc_cdata.alc_buffer_tag);
1373 	if (error != 0) {
1374 		device_printf(sc->alc_dev,
1375 		    "could not create parent buffer DMA tag.\n");
1376 		goto fail;
1377 	}
1378 
1379 	/* Create DMA tag for Tx buffers. */
1380 	error = bus_dma_tag_create(
1381 	    sc->alc_cdata.alc_buffer_tag, /* parent */
1382 	    1, 0,			/* alignment, boundary */
1383 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1384 	    BUS_SPACE_MAXADDR,		/* highaddr */
1385 	    NULL, NULL,			/* filter, filterarg */
1386 	    ALC_TSO_MAXSIZE,		/* maxsize */
1387 	    ALC_MAXTXSEGS,		/* nsegments */
1388 	    ALC_TSO_MAXSEGSIZE,		/* maxsegsize */
1389 	    0,				/* flags */
1390 	    NULL, NULL,			/* lockfunc, lockarg */
1391 	    &sc->alc_cdata.alc_tx_tag);
1392 	if (error != 0) {
1393 		device_printf(sc->alc_dev, "could not create Tx DMA tag.\n");
1394 		goto fail;
1395 	}
1396 
1397 	/* Create DMA tag for Rx buffers. */
1398 	error = bus_dma_tag_create(
1399 	    sc->alc_cdata.alc_buffer_tag, /* parent */
1400 	    ALC_RX_BUF_ALIGN, 0,	/* alignment, boundary */
1401 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1402 	    BUS_SPACE_MAXADDR,		/* highaddr */
1403 	    NULL, NULL,			/* filter, filterarg */
1404 	    MCLBYTES,			/* maxsize */
1405 	    1,				/* nsegments */
1406 	    MCLBYTES,			/* maxsegsize */
1407 	    0,				/* flags */
1408 	    NULL, NULL,			/* lockfunc, lockarg */
1409 	    &sc->alc_cdata.alc_rx_tag);
1410 	if (error != 0) {
1411 		device_printf(sc->alc_dev, "could not create Rx DMA tag.\n");
1412 		goto fail;
1413 	}
1414 	/* Create DMA maps for Tx buffers. */
1415 	for (i = 0; i < ALC_TX_RING_CNT; i++) {
1416 		txd = &sc->alc_cdata.alc_txdesc[i];
1417 		txd->tx_m = NULL;
1418 		txd->tx_dmamap = NULL;
1419 		error = bus_dmamap_create(sc->alc_cdata.alc_tx_tag, 0,
1420 		    &txd->tx_dmamap);
1421 		if (error != 0) {
1422 			device_printf(sc->alc_dev,
1423 			    "could not create Tx dmamap.\n");
1424 			goto fail;
1425 		}
1426 	}
1427 	/* Create DMA maps for Rx buffers. */
1428 	if ((error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag, 0,
1429 	    &sc->alc_cdata.alc_rx_sparemap)) != 0) {
1430 		device_printf(sc->alc_dev,
1431 		    "could not create spare Rx dmamap.\n");
1432 		goto fail;
1433 	}
1434 	for (i = 0; i < ALC_RX_RING_CNT; i++) {
1435 		rxd = &sc->alc_cdata.alc_rxdesc[i];
1436 		rxd->rx_m = NULL;
1437 		rxd->rx_dmamap = NULL;
1438 		error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag, 0,
1439 		    &rxd->rx_dmamap);
1440 		if (error != 0) {
1441 			device_printf(sc->alc_dev,
1442 			    "could not create Rx dmamap.\n");
1443 			goto fail;
1444 		}
1445 	}
1446 
1447 fail:
1448 	return (error);
1449 }
1450 
1451 static void
1452 alc_dma_free(struct alc_softc *sc)
1453 {
1454 	struct alc_txdesc *txd;
1455 	struct alc_rxdesc *rxd;
1456 	int i;
1457 
1458 	/* Tx buffers. */
1459 	if (sc->alc_cdata.alc_tx_tag != NULL) {
1460 		for (i = 0; i < ALC_TX_RING_CNT; i++) {
1461 			txd = &sc->alc_cdata.alc_txdesc[i];
1462 			if (txd->tx_dmamap != NULL) {
1463 				bus_dmamap_destroy(sc->alc_cdata.alc_tx_tag,
1464 				    txd->tx_dmamap);
1465 				txd->tx_dmamap = NULL;
1466 			}
1467 		}
1468 		bus_dma_tag_destroy(sc->alc_cdata.alc_tx_tag);
1469 		sc->alc_cdata.alc_tx_tag = NULL;
1470 	}
1471 	/* Rx buffers */
1472 	if (sc->alc_cdata.alc_rx_tag != NULL) {
1473 		for (i = 0; i < ALC_RX_RING_CNT; i++) {
1474 			rxd = &sc->alc_cdata.alc_rxdesc[i];
1475 			if (rxd->rx_dmamap != NULL) {
1476 				bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag,
1477 				    rxd->rx_dmamap);
1478 				rxd->rx_dmamap = NULL;
1479 			}
1480 		}
1481 		if (sc->alc_cdata.alc_rx_sparemap != NULL) {
1482 			bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag,
1483 			    sc->alc_cdata.alc_rx_sparemap);
1484 			sc->alc_cdata.alc_rx_sparemap = NULL;
1485 		}
1486 		bus_dma_tag_destroy(sc->alc_cdata.alc_rx_tag);
1487 		sc->alc_cdata.alc_rx_tag = NULL;
1488 	}
1489 	/* Tx descriptor ring. */
1490 	if (sc->alc_cdata.alc_tx_ring_tag != NULL) {
1491 		if (sc->alc_cdata.alc_tx_ring_map != NULL)
1492 			bus_dmamap_unload(sc->alc_cdata.alc_tx_ring_tag,
1493 			    sc->alc_cdata.alc_tx_ring_map);
1494 		if (sc->alc_cdata.alc_tx_ring_map != NULL &&
1495 		    sc->alc_rdata.alc_tx_ring != NULL)
1496 			bus_dmamem_free(sc->alc_cdata.alc_tx_ring_tag,
1497 			    sc->alc_rdata.alc_tx_ring,
1498 			    sc->alc_cdata.alc_tx_ring_map);
1499 		sc->alc_rdata.alc_tx_ring = NULL;
1500 		sc->alc_cdata.alc_tx_ring_map = NULL;
1501 		bus_dma_tag_destroy(sc->alc_cdata.alc_tx_ring_tag);
1502 		sc->alc_cdata.alc_tx_ring_tag = NULL;
1503 	}
1504 	/* Rx ring. */
1505 	if (sc->alc_cdata.alc_rx_ring_tag != NULL) {
1506 		if (sc->alc_cdata.alc_rx_ring_map != NULL)
1507 			bus_dmamap_unload(sc->alc_cdata.alc_rx_ring_tag,
1508 			    sc->alc_cdata.alc_rx_ring_map);
1509 		if (sc->alc_cdata.alc_rx_ring_map != NULL &&
1510 		    sc->alc_rdata.alc_rx_ring != NULL)
1511 			bus_dmamem_free(sc->alc_cdata.alc_rx_ring_tag,
1512 			    sc->alc_rdata.alc_rx_ring,
1513 			    sc->alc_cdata.alc_rx_ring_map);
1514 		sc->alc_rdata.alc_rx_ring = NULL;
1515 		sc->alc_cdata.alc_rx_ring_map = NULL;
1516 		bus_dma_tag_destroy(sc->alc_cdata.alc_rx_ring_tag);
1517 		sc->alc_cdata.alc_rx_ring_tag = NULL;
1518 	}
1519 	/* Rx return ring. */
1520 	if (sc->alc_cdata.alc_rr_ring_tag != NULL) {
1521 		if (sc->alc_cdata.alc_rr_ring_map != NULL)
1522 			bus_dmamap_unload(sc->alc_cdata.alc_rr_ring_tag,
1523 			    sc->alc_cdata.alc_rr_ring_map);
1524 		if (sc->alc_cdata.alc_rr_ring_map != NULL &&
1525 		    sc->alc_rdata.alc_rr_ring != NULL)
1526 			bus_dmamem_free(sc->alc_cdata.alc_rr_ring_tag,
1527 			    sc->alc_rdata.alc_rr_ring,
1528 			    sc->alc_cdata.alc_rr_ring_map);
1529 		sc->alc_rdata.alc_rr_ring = NULL;
1530 		sc->alc_cdata.alc_rr_ring_map = NULL;
1531 		bus_dma_tag_destroy(sc->alc_cdata.alc_rr_ring_tag);
1532 		sc->alc_cdata.alc_rr_ring_tag = NULL;
1533 	}
1534 	/* CMB block */
1535 	if (sc->alc_cdata.alc_cmb_tag != NULL) {
1536 		if (sc->alc_cdata.alc_cmb_map != NULL)
1537 			bus_dmamap_unload(sc->alc_cdata.alc_cmb_tag,
1538 			    sc->alc_cdata.alc_cmb_map);
1539 		if (sc->alc_cdata.alc_cmb_map != NULL &&
1540 		    sc->alc_rdata.alc_cmb != NULL)
1541 			bus_dmamem_free(sc->alc_cdata.alc_cmb_tag,
1542 			    sc->alc_rdata.alc_cmb,
1543 			    sc->alc_cdata.alc_cmb_map);
1544 		sc->alc_rdata.alc_cmb = NULL;
1545 		sc->alc_cdata.alc_cmb_map = NULL;
1546 		bus_dma_tag_destroy(sc->alc_cdata.alc_cmb_tag);
1547 		sc->alc_cdata.alc_cmb_tag = NULL;
1548 	}
1549 	/* SMB block */
1550 	if (sc->alc_cdata.alc_smb_tag != NULL) {
1551 		if (sc->alc_cdata.alc_smb_map != NULL)
1552 			bus_dmamap_unload(sc->alc_cdata.alc_smb_tag,
1553 			    sc->alc_cdata.alc_smb_map);
1554 		if (sc->alc_cdata.alc_smb_map != NULL &&
1555 		    sc->alc_rdata.alc_smb != NULL)
1556 			bus_dmamem_free(sc->alc_cdata.alc_smb_tag,
1557 			    sc->alc_rdata.alc_smb,
1558 			    sc->alc_cdata.alc_smb_map);
1559 		sc->alc_rdata.alc_smb = NULL;
1560 		sc->alc_cdata.alc_smb_map = NULL;
1561 		bus_dma_tag_destroy(sc->alc_cdata.alc_smb_tag);
1562 		sc->alc_cdata.alc_smb_tag = NULL;
1563 	}
1564 	if (sc->alc_cdata.alc_buffer_tag != NULL) {
1565 		bus_dma_tag_destroy(sc->alc_cdata.alc_buffer_tag);
1566 		sc->alc_cdata.alc_buffer_tag = NULL;
1567 	}
1568 	if (sc->alc_cdata.alc_parent_tag != NULL) {
1569 		bus_dma_tag_destroy(sc->alc_cdata.alc_parent_tag);
1570 		sc->alc_cdata.alc_parent_tag = NULL;
1571 	}
1572 }
1573 
1574 static int
1575 alc_shutdown(device_t dev)
1576 {
1577 
1578 	return (alc_suspend(dev));
1579 }
1580 
1581 /*
1582  * Note, this driver resets the link speed to 10/100Mbps by
1583  * restarting auto-negotiation in suspend/shutdown phase but we
1584  * don't know whether that auto-negotiation would succeed or not
1585  * as driver has no control after powering off/suspend operation.
1586  * If the renegotiation fail WOL may not work. Running at 1Gbps
1587  * will draw more power than 375mA at 3.3V which is specified in
1588  * PCI specification and that would result in complete
1589  * shutdowning power to ethernet controller.
1590  *
1591  * TODO
1592  * Save current negotiated media speed/duplex/flow-control to
1593  * softc and restore the same link again after resuming. PHY
1594  * handling such as power down/resetting to 100Mbps may be better
1595  * handled in suspend method in phy driver.
1596  */
1597 static void
1598 alc_setlinkspeed(struct alc_softc *sc)
1599 {
1600 	struct mii_data *mii;
1601 	int aneg, i;
1602 
1603 	mii = device_get_softc(sc->alc_miibus);
1604 	mii_pollstat(mii);
1605 	aneg = 0;
1606 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1607 	    (IFM_ACTIVE | IFM_AVALID)) {
1608 		switch IFM_SUBTYPE(mii->mii_media_active) {
1609 		case IFM_10_T:
1610 		case IFM_100_TX:
1611 			return;
1612 		case IFM_1000_T:
1613 			aneg++;
1614 			break;
1615 		default:
1616 			break;
1617 		}
1618 	}
1619 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, MII_100T2CR, 0);
1620 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
1621 	    MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1622 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
1623 	    MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
1624 	DELAY(1000);
1625 	if (aneg != 0) {
1626 		/*
1627 		 * Poll link state until alc(4) get a 10/100Mbps link.
1628 		 */
1629 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1630 			mii_pollstat(mii);
1631 			if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
1632 			    == (IFM_ACTIVE | IFM_AVALID)) {
1633 				switch (IFM_SUBTYPE(
1634 				    mii->mii_media_active)) {
1635 				case IFM_10_T:
1636 				case IFM_100_TX:
1637 					alc_mac_config(sc);
1638 					return;
1639 				default:
1640 					break;
1641 				}
1642 			}
1643 			ALC_UNLOCK(sc);
1644 			pause("alclnk", hz);
1645 			ALC_LOCK(sc);
1646 		}
1647 		if (i == MII_ANEGTICKS_GIGE)
1648 			device_printf(sc->alc_dev,
1649 			    "establishing a link failed, WOL may not work!");
1650 	}
1651 	/*
1652 	 * No link, force MAC to have 100Mbps, full-duplex link.
1653 	 * This is the last resort and may/may not work.
1654 	 */
1655 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1656 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1657 	alc_mac_config(sc);
1658 }
1659 
1660 static void
1661 alc_setwol(struct alc_softc *sc)
1662 {
1663 	struct ifnet *ifp;
1664 	uint32_t cap, reg, pmcs;
1665 	uint16_t pmstat;
1666 	int base, pmc;
1667 
1668 	ALC_LOCK_ASSERT(sc);
1669 
1670 	if (pci_find_extcap(sc->alc_dev, PCIY_EXPRESS, &base) == 0) {
1671 		cap = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CAP);
1672 		if ((cap & PCIM_LINK_CAP_ASPM) != 0) {
1673 			cap = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CTL);
1674 			alc_disable_l0s_l1(sc);
1675 		}
1676 	}
1677 	if (pci_find_extcap(sc->alc_dev, PCIY_PMG, &pmc) != 0) {
1678 		/* Disable WOL. */
1679 		CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
1680 		reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC);
1681 		reg |= PCIE_PHYMISC_FORCE_RCV_DET;
1682 		CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg);
1683 		/* Force PHY power down. */
1684 		alc_phy_down(sc);
1685 		return;
1686 	}
1687 
1688 	ifp = sc->alc_ifp;
1689 	if ((ifp->if_capenable & IFCAP_WOL) != 0) {
1690 		if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
1691 			alc_setlinkspeed(sc);
1692 		reg = CSR_READ_4(sc, ALC_MASTER_CFG);
1693 		reg &= ~MASTER_CLK_SEL_DIS;
1694 		CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
1695 	}
1696 
1697 	pmcs = 0;
1698 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
1699 		pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB;
1700 	CSR_WRITE_4(sc, ALC_WOL_CFG, pmcs);
1701 	reg = CSR_READ_4(sc, ALC_MAC_CFG);
1702 	reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC | MAC_CFG_ALLMULTI |
1703 	    MAC_CFG_BCAST);
1704 	if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
1705 		reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST;
1706 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1707 		reg |= MAC_CFG_RX_ENB;
1708 	CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
1709 
1710 	reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC);
1711 	reg |= PCIE_PHYMISC_FORCE_RCV_DET;
1712 	CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg);
1713 	if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1714 		/* WOL disabled, PHY power down. */
1715 		alc_phy_down(sc);
1716 	}
1717 	/* Request PME. */
1718 	pmstat = pci_read_config(sc->alc_dev, pmc + PCIR_POWER_STATUS, 2);
1719 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1720 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1721 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1722 	pci_write_config(sc->alc_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1723 }
1724 
1725 static int
1726 alc_suspend(device_t dev)
1727 {
1728 	struct alc_softc *sc;
1729 
1730 	sc = device_get_softc(dev);
1731 
1732 	ALC_LOCK(sc);
1733 	alc_stop(sc);
1734 	alc_setwol(sc);
1735 	ALC_UNLOCK(sc);
1736 
1737 	return (0);
1738 }
1739 
1740 static int
1741 alc_resume(device_t dev)
1742 {
1743 	struct alc_softc *sc;
1744 	struct ifnet *ifp;
1745 	int pmc;
1746 	uint16_t pmstat;
1747 
1748 	sc = device_get_softc(dev);
1749 
1750 	ALC_LOCK(sc);
1751 	if (pci_find_extcap(sc->alc_dev, PCIY_PMG, &pmc) == 0) {
1752 		/* Disable PME and clear PME status. */
1753 		pmstat = pci_read_config(sc->alc_dev,
1754 		    pmc + PCIR_POWER_STATUS, 2);
1755 		if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
1756 			pmstat &= ~PCIM_PSTAT_PMEENABLE;
1757 			pci_write_config(sc->alc_dev,
1758 			    pmc + PCIR_POWER_STATUS, pmstat, 2);
1759 		}
1760 	}
1761 	/* Reset PHY. */
1762 	alc_phy_reset(sc);
1763 	ifp = sc->alc_ifp;
1764 	if ((ifp->if_flags & IFF_UP) != 0) {
1765 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1766 		alc_init_locked(sc);
1767 	}
1768 	ALC_UNLOCK(sc);
1769 
1770 	return (0);
1771 }
1772 
1773 static int
1774 alc_encap(struct alc_softc *sc, struct mbuf **m_head)
1775 {
1776 	struct alc_txdesc *txd, *txd_last;
1777 	struct tx_desc *desc;
1778 	struct mbuf *m;
1779 	struct ip *ip;
1780 	struct tcphdr *tcp;
1781 	bus_dma_segment_t txsegs[ALC_MAXTXSEGS];
1782 	bus_dmamap_t map;
1783 	uint32_t cflags, hdrlen, ip_off, poff, vtag;
1784 	int error, idx, nsegs, prod;
1785 
1786 	ALC_LOCK_ASSERT(sc);
1787 
1788 	M_ASSERTPKTHDR((*m_head));
1789 
1790 	m = *m_head;
1791 	ip = NULL;
1792 	tcp = NULL;
1793 	ip_off = poff = 0;
1794 	if ((m->m_pkthdr.csum_flags & (ALC_CSUM_FEATURES | CSUM_TSO)) != 0) {
1795 		/*
1796 		 * AR8131/AR8132 requires offset of TCP/UDP header in its
1797 		 * Tx descriptor to perform Tx checksum offloading. TSO
1798 		 * also requires TCP header offset and modification of
1799 		 * IP/TCP header. This kind of operation takes many CPU
1800 		 * cycles on FreeBSD so fast host CPU is required to get
1801 		 * smooth TSO performance.
1802 		 */
1803 		struct ether_header *eh;
1804 
1805 		if (M_WRITABLE(m) == 0) {
1806 			/* Get a writable copy. */
1807 			m = m_dup(*m_head, M_DONTWAIT);
1808 			/* Release original mbufs. */
1809 			m_freem(*m_head);
1810 			if (m == NULL) {
1811 				*m_head = NULL;
1812 				return (ENOBUFS);
1813 			}
1814 			*m_head = m;
1815 		}
1816 
1817 		ip_off = sizeof(struct ether_header);
1818 		m = m_pullup(m, ip_off);
1819 		if (m == NULL) {
1820 			*m_head = NULL;
1821 			return (ENOBUFS);
1822 		}
1823 		eh = mtod(m, struct ether_header *);
1824 		/*
1825 		 * Check if hardware VLAN insertion is off.
1826 		 * Additional check for LLC/SNAP frame?
1827 		 */
1828 		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1829 			ip_off = sizeof(struct ether_vlan_header);
1830 			m = m_pullup(m, ip_off);
1831 			if (m == NULL) {
1832 				*m_head = NULL;
1833 				return (ENOBUFS);
1834 			}
1835 		}
1836 		m = m_pullup(m, ip_off + sizeof(struct ip));
1837 		if (m == NULL) {
1838 			*m_head = NULL;
1839 			return (ENOBUFS);
1840 		}
1841 		ip = (struct ip *)(mtod(m, char *) + ip_off);
1842 		poff = ip_off + (ip->ip_hl << 2);
1843 		if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1844 			m = m_pullup(m, poff + sizeof(struct tcphdr));
1845 			if (m == NULL) {
1846 				*m_head = NULL;
1847 				return (ENOBUFS);
1848 			}
1849 			tcp = (struct tcphdr *)(mtod(m, char *) + poff);
1850 			m = m_pullup(m, poff + (tcp->th_off << 2));
1851 			if (m == NULL) {
1852 				*m_head = NULL;
1853 				return (ENOBUFS);
1854 			}
1855 			/*
1856 			 * Due to strict adherence of Microsoft NDIS
1857 			 * Large Send specification, hardware expects
1858 			 * a pseudo TCP checksum inserted by upper
1859 			 * stack. Unfortunately the pseudo TCP
1860 			 * checksum that NDIS refers to does not include
1861 			 * TCP payload length so driver should recompute
1862 			 * the pseudo checksum here. Hopefully this
1863 			 * wouldn't be much burden on modern CPUs.
1864 			 *
1865 			 * Reset IP checksum and recompute TCP pseudo
1866 			 * checksum as NDIS specification said.
1867 			 */
1868 			ip->ip_sum = 0;
1869 			tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1870 			    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1871 		}
1872 		*m_head = m;
1873 	}
1874 
1875 	prod = sc->alc_cdata.alc_tx_prod;
1876 	txd = &sc->alc_cdata.alc_txdesc[prod];
1877 	txd_last = txd;
1878 	map = txd->tx_dmamap;
1879 
1880 	error = bus_dmamap_load_mbuf_sg(sc->alc_cdata.alc_tx_tag, map,
1881 	    *m_head, txsegs, &nsegs, 0);
1882 	if (error == EFBIG) {
1883 		m = m_collapse(*m_head, M_DONTWAIT, ALC_MAXTXSEGS);
1884 		if (m == NULL) {
1885 			m_freem(*m_head);
1886 			*m_head = NULL;
1887 			return (ENOMEM);
1888 		}
1889 		*m_head = m;
1890 		error = bus_dmamap_load_mbuf_sg(sc->alc_cdata.alc_tx_tag, map,
1891 		    *m_head, txsegs, &nsegs, 0);
1892 		if (error != 0) {
1893 			m_freem(*m_head);
1894 			*m_head = NULL;
1895 			return (error);
1896 		}
1897 	} else if (error != 0)
1898 		return (error);
1899 	if (nsegs == 0) {
1900 		m_freem(*m_head);
1901 		*m_head = NULL;
1902 		return (EIO);
1903 	}
1904 
1905 	/* Check descriptor overrun. */
1906 	if (sc->alc_cdata.alc_tx_cnt + nsegs >= ALC_TX_RING_CNT - 3) {
1907 		bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, map);
1908 		return (ENOBUFS);
1909 	}
1910 	bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, map, BUS_DMASYNC_PREWRITE);
1911 
1912 	m = *m_head;
1913 	cflags = TD_ETHERNET;
1914 	vtag = 0;
1915 	desc = NULL;
1916 	idx = 0;
1917 	/* Configure VLAN hardware tag insertion. */
1918 	if ((m->m_flags & M_VLANTAG) != 0) {
1919 		vtag = htons(m->m_pkthdr.ether_vtag);
1920 		vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK;
1921 		cflags |= TD_INS_VLAN_TAG;
1922 	}
1923 	/* Configure Tx checksum offload. */
1924 	if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) {
1925 #ifdef ALC_USE_CUSTOM_CSUM
1926 		cflags |= TD_CUSTOM_CSUM;
1927 		/* Set checksum start offset. */
1928 		cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) &
1929 		    TD_PLOAD_OFFSET_MASK;
1930 		/* Set checksum insertion position of TCP/UDP. */
1931 		cflags |= (((poff + m->m_pkthdr.csum_data) >> 1) <<
1932 		    TD_CUSTOM_CSUM_OFFSET_SHIFT) & TD_CUSTOM_CSUM_OFFSET_MASK;
1933 #else
1934 		if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
1935 			cflags |= TD_IPCSUM;
1936 		if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
1937 			cflags |= TD_TCPCSUM;
1938 		if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
1939 			cflags |= TD_UDPCSUM;
1940 		/* Set TCP/UDP header offset. */
1941 		cflags |= (poff << TD_L4HDR_OFFSET_SHIFT) &
1942 		    TD_L4HDR_OFFSET_MASK;
1943 #endif
1944 	} else if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1945 		/* Request TSO and set MSS. */
1946 		cflags |= TD_TSO | TD_TSO_DESCV1;
1947 		cflags |= ((uint32_t)m->m_pkthdr.tso_segsz << TD_MSS_SHIFT) &
1948 		    TD_MSS_MASK;
1949 		/* Set TCP header offset. */
1950 		cflags |= (poff << TD_TCPHDR_OFFSET_SHIFT) &
1951 		    TD_TCPHDR_OFFSET_MASK;
1952 		/*
1953 		 * AR8131/AR8132 requires the first buffer should
1954 		 * only hold IP/TCP header data. Payload should
1955 		 * be handled in other descriptors.
1956 		 */
1957 		hdrlen = poff + (tcp->th_off << 2);
1958 		desc = &sc->alc_rdata.alc_tx_ring[prod];
1959 		desc->len = htole32(TX_BYTES(hdrlen | vtag));
1960 		desc->flags = htole32(cflags);
1961 		desc->addr = htole64(txsegs[0].ds_addr);
1962 		sc->alc_cdata.alc_tx_cnt++;
1963 		ALC_DESC_INC(prod, ALC_TX_RING_CNT);
1964 		if (m->m_len - hdrlen > 0) {
1965 			/* Handle remaining payload of the first fragment. */
1966 			desc = &sc->alc_rdata.alc_tx_ring[prod];
1967 			desc->len = htole32(TX_BYTES((m->m_len - hdrlen) |
1968 			    vtag));
1969 			desc->flags = htole32(cflags);
1970 			desc->addr = htole64(txsegs[0].ds_addr + hdrlen);
1971 			sc->alc_cdata.alc_tx_cnt++;
1972 			ALC_DESC_INC(prod, ALC_TX_RING_CNT);
1973 		}
1974 		/* Handle remaining fragments. */
1975 		idx = 1;
1976 	}
1977 	for (; idx < nsegs; idx++) {
1978 		desc = &sc->alc_rdata.alc_tx_ring[prod];
1979 		desc->len = htole32(TX_BYTES(txsegs[idx].ds_len) | vtag);
1980 		desc->flags = htole32(cflags);
1981 		desc->addr = htole64(txsegs[idx].ds_addr);
1982 		sc->alc_cdata.alc_tx_cnt++;
1983 		ALC_DESC_INC(prod, ALC_TX_RING_CNT);
1984 	}
1985 	/* Update producer index. */
1986 	sc->alc_cdata.alc_tx_prod = prod;
1987 
1988 	/* Finally set EOP on the last descriptor. */
1989 	prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT;
1990 	desc = &sc->alc_rdata.alc_tx_ring[prod];
1991 	desc->flags |= htole32(TD_EOP);
1992 
1993 	/* Swap dmamap of the first and the last. */
1994 	txd = &sc->alc_cdata.alc_txdesc[prod];
1995 	map = txd_last->tx_dmamap;
1996 	txd_last->tx_dmamap = txd->tx_dmamap;
1997 	txd->tx_dmamap = map;
1998 	txd->tx_m = m;
1999 
2000 	return (0);
2001 }
2002 
2003 static void
2004 alc_tx_task(void *arg, int pending)
2005 {
2006 	struct ifnet *ifp;
2007 
2008 	ifp = (struct ifnet *)arg;
2009 	alc_start(ifp);
2010 }
2011 
2012 static void
2013 alc_start(struct ifnet *ifp)
2014 {
2015 	struct alc_softc *sc;
2016 	struct mbuf *m_head;
2017 	int enq;
2018 
2019 	sc = ifp->if_softc;
2020 
2021 	ALC_LOCK(sc);
2022 
2023 	/* Reclaim transmitted frames. */
2024 	if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT)
2025 		alc_txeof(sc);
2026 
2027 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2028 	    IFF_DRV_RUNNING || (sc->alc_flags & ALC_FLAG_LINK) == 0) {
2029 		ALC_UNLOCK(sc);
2030 		return;
2031 	}
2032 
2033 	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
2034 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2035 		if (m_head == NULL)
2036 			break;
2037 		/*
2038 		 * Pack the data into the transmit ring. If we
2039 		 * don't have room, set the OACTIVE flag and wait
2040 		 * for the NIC to drain the ring.
2041 		 */
2042 		if (alc_encap(sc, &m_head)) {
2043 			if (m_head == NULL)
2044 				break;
2045 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2046 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2047 			break;
2048 		}
2049 
2050 		enq++;
2051 		/*
2052 		 * If there's a BPF listener, bounce a copy of this frame
2053 		 * to him.
2054 		 */
2055 		ETHER_BPF_MTAP(ifp, m_head);
2056 	}
2057 
2058 	if (enq > 0) {
2059 		/* Sync descriptors. */
2060 		bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag,
2061 		    sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE);
2062 		/* Kick. Assume we're using normal Tx priority queue. */
2063 		CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX,
2064 		    (sc->alc_cdata.alc_tx_prod <<
2065 		    MBOX_TD_PROD_LO_IDX_SHIFT) &
2066 		    MBOX_TD_PROD_LO_IDX_MASK);
2067 		/* Set a timeout in case the chip goes out to lunch. */
2068 		sc->alc_watchdog_timer = ALC_TX_TIMEOUT;
2069 	}
2070 
2071 	ALC_UNLOCK(sc);
2072 }
2073 
2074 static void
2075 alc_watchdog(struct alc_softc *sc)
2076 {
2077 	struct ifnet *ifp;
2078 
2079 	ALC_LOCK_ASSERT(sc);
2080 
2081 	if (sc->alc_watchdog_timer == 0 || --sc->alc_watchdog_timer)
2082 		return;
2083 
2084 	ifp = sc->alc_ifp;
2085 	if ((sc->alc_flags & ALC_FLAG_LINK) == 0) {
2086 		if_printf(sc->alc_ifp, "watchdog timeout (lost link)\n");
2087 		ifp->if_oerrors++;
2088 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2089 		alc_init_locked(sc);
2090 		return;
2091 	}
2092 	if_printf(sc->alc_ifp, "watchdog timeout -- resetting\n");
2093 	ifp->if_oerrors++;
2094 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2095 	alc_init_locked(sc);
2096 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2097 		taskqueue_enqueue(sc->alc_tq, &sc->alc_tx_task);
2098 }
2099 
2100 static int
2101 alc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2102 {
2103 	struct alc_softc *sc;
2104 	struct ifreq *ifr;
2105 	struct mii_data *mii;
2106 	int error, mask;
2107 
2108 	sc = ifp->if_softc;
2109 	ifr = (struct ifreq *)data;
2110 	error = 0;
2111 	switch (cmd) {
2112 	case SIOCSIFMTU:
2113 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ALC_JUMBO_MTU ||
2114 		    ((sc->alc_flags & ALC_FLAG_JUMBO) == 0 &&
2115 		    ifr->ifr_mtu > ETHERMTU))
2116 			error = EINVAL;
2117 		else if (ifp->if_mtu != ifr->ifr_mtu) {
2118 			ALC_LOCK(sc);
2119 			ifp->if_mtu = ifr->ifr_mtu;
2120 			/* AR8131/AR8132 has 13 bits MSS field. */
2121 			if (ifp->if_mtu > ALC_TSO_MTU &&
2122 			    (ifp->if_capenable & IFCAP_TSO4) != 0) {
2123 				ifp->if_capenable &= ~IFCAP_TSO4;
2124 				ifp->if_hwassist &= ~CSUM_TSO;
2125 			}
2126 			ALC_UNLOCK(sc);
2127 		}
2128 		break;
2129 	case SIOCSIFFLAGS:
2130 		ALC_LOCK(sc);
2131 		if ((ifp->if_flags & IFF_UP) != 0) {
2132 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2133 			    ((ifp->if_flags ^ sc->alc_if_flags) &
2134 			    (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2135 				alc_rxfilter(sc);
2136 			else if ((sc->alc_flags & ALC_FLAG_DETACH) == 0)
2137 				alc_init_locked(sc);
2138 		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2139 			alc_stop(sc);
2140 		sc->alc_if_flags = ifp->if_flags;
2141 		ALC_UNLOCK(sc);
2142 		break;
2143 	case SIOCADDMULTI:
2144 	case SIOCDELMULTI:
2145 		ALC_LOCK(sc);
2146 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2147 			alc_rxfilter(sc);
2148 		ALC_UNLOCK(sc);
2149 		break;
2150 	case SIOCSIFMEDIA:
2151 	case SIOCGIFMEDIA:
2152 		mii = device_get_softc(sc->alc_miibus);
2153 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
2154 		break;
2155 	case SIOCSIFCAP:
2156 		ALC_LOCK(sc);
2157 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2158 		if ((mask & IFCAP_TXCSUM) != 0 &&
2159 		    (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
2160 			ifp->if_capenable ^= IFCAP_TXCSUM;
2161 			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2162 				ifp->if_hwassist |= ALC_CSUM_FEATURES;
2163 			else
2164 				ifp->if_hwassist &= ~ALC_CSUM_FEATURES;
2165 		}
2166 		if ((mask & IFCAP_TSO4) != 0 &&
2167 		    (ifp->if_capabilities & IFCAP_TSO4) != 0) {
2168 			ifp->if_capenable ^= IFCAP_TSO4;
2169 			if ((ifp->if_capenable & IFCAP_TSO4) != 0) {
2170 				/* AR8131/AR8132 has 13 bits MSS field. */
2171 				if (ifp->if_mtu > ALC_TSO_MTU) {
2172 					ifp->if_capenable &= ~IFCAP_TSO4;
2173 					ifp->if_hwassist &= ~CSUM_TSO;
2174 				} else
2175 					ifp->if_hwassist |= CSUM_TSO;
2176 			} else
2177 				ifp->if_hwassist &= ~CSUM_TSO;
2178 		}
2179 		if ((mask & IFCAP_WOL_MCAST) != 0 &&
2180 		    (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0)
2181 			ifp->if_capenable ^= IFCAP_WOL_MCAST;
2182 		if ((mask & IFCAP_WOL_MAGIC) != 0 &&
2183 		    (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
2184 			ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2185 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2186 		    (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
2187 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2188 			alc_rxvlan(sc);
2189 		}
2190 		if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
2191 		    (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
2192 			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2193 		if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
2194 		    (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
2195 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2196 		/*
2197 		 * VLAN hardware tagging is required to do checksum
2198 		 * offload or TSO on VLAN interface. Checksum offload
2199 		 * on VLAN interface also requires hardware checksum
2200 		 * offload of parent interface.
2201 		 */
2202 		if ((ifp->if_capenable & IFCAP_TXCSUM) == 0)
2203 			ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
2204 		if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
2205 			ifp->if_capenable &=
2206 			    ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM);
2207 		ALC_UNLOCK(sc);
2208 		VLAN_CAPABILITIES(ifp);
2209 		break;
2210 	default:
2211 		error = ether_ioctl(ifp, cmd, data);
2212 		break;
2213 	}
2214 
2215 	return (error);
2216 }
2217 
2218 static void
2219 alc_mac_config(struct alc_softc *sc)
2220 {
2221 	struct mii_data *mii;
2222 	uint32_t reg;
2223 
2224 	ALC_LOCK_ASSERT(sc);
2225 
2226 	mii = device_get_softc(sc->alc_miibus);
2227 	reg = CSR_READ_4(sc, ALC_MAC_CFG);
2228 	reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC |
2229 	    MAC_CFG_SPEED_MASK);
2230 	/* Reprogram MAC with resolved speed/duplex. */
2231 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
2232 	case IFM_10_T:
2233 	case IFM_100_TX:
2234 		reg |= MAC_CFG_SPEED_10_100;
2235 		break;
2236 	case IFM_1000_T:
2237 		reg |= MAC_CFG_SPEED_1000;
2238 		break;
2239 	}
2240 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
2241 		reg |= MAC_CFG_FULL_DUPLEX;
2242 #ifdef notyet
2243 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
2244 			reg |= MAC_CFG_TX_FC;
2245 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
2246 			reg |= MAC_CFG_RX_FC;
2247 #endif
2248 	}
2249 	CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
2250 }
2251 
2252 static void
2253 alc_stats_clear(struct alc_softc *sc)
2254 {
2255 	struct smb sb, *smb;
2256 	uint32_t *reg;
2257 	int i;
2258 
2259 	if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2260 		bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
2261 		    sc->alc_cdata.alc_smb_map,
2262 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2263 		smb = sc->alc_rdata.alc_smb;
2264 		/* Update done, clear. */
2265 		smb->updated = 0;
2266 		bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
2267 		    sc->alc_cdata.alc_smb_map,
2268 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2269 	} else {
2270 		for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
2271 		    reg++) {
2272 			CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
2273 			i += sizeof(uint32_t);
2274 		}
2275 		/* Read Tx statistics. */
2276 		for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
2277 		    reg++) {
2278 			CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
2279 			i += sizeof(uint32_t);
2280 		}
2281 	}
2282 }
2283 
2284 static void
2285 alc_stats_update(struct alc_softc *sc)
2286 {
2287 	struct alc_hw_stats *stat;
2288 	struct smb sb, *smb;
2289 	struct ifnet *ifp;
2290 	uint32_t *reg;
2291 	int i;
2292 
2293 	ALC_LOCK_ASSERT(sc);
2294 
2295 	ifp = sc->alc_ifp;
2296 	stat = &sc->alc_stats;
2297 	if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2298 		bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
2299 		    sc->alc_cdata.alc_smb_map,
2300 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2301 		smb = sc->alc_rdata.alc_smb;
2302 		if (smb->updated == 0)
2303 			return;
2304 	} else {
2305 		smb = &sb;
2306 		/* Read Rx statistics. */
2307 		for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
2308 		    reg++) {
2309 			*reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
2310 			i += sizeof(uint32_t);
2311 		}
2312 		/* Read Tx statistics. */
2313 		for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
2314 		    reg++) {
2315 			*reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
2316 			i += sizeof(uint32_t);
2317 		}
2318 	}
2319 
2320 	/* Rx stats. */
2321 	stat->rx_frames += smb->rx_frames;
2322 	stat->rx_bcast_frames += smb->rx_bcast_frames;
2323 	stat->rx_mcast_frames += smb->rx_mcast_frames;
2324 	stat->rx_pause_frames += smb->rx_pause_frames;
2325 	stat->rx_control_frames += smb->rx_control_frames;
2326 	stat->rx_crcerrs += smb->rx_crcerrs;
2327 	stat->rx_lenerrs += smb->rx_lenerrs;
2328 	stat->rx_bytes += smb->rx_bytes;
2329 	stat->rx_runts += smb->rx_runts;
2330 	stat->rx_fragments += smb->rx_fragments;
2331 	stat->rx_pkts_64 += smb->rx_pkts_64;
2332 	stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
2333 	stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
2334 	stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
2335 	stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
2336 	stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
2337 	stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
2338 	stat->rx_pkts_truncated += smb->rx_pkts_truncated;
2339 	stat->rx_fifo_oflows += smb->rx_fifo_oflows;
2340 	stat->rx_rrs_errs += smb->rx_rrs_errs;
2341 	stat->rx_alignerrs += smb->rx_alignerrs;
2342 	stat->rx_bcast_bytes += smb->rx_bcast_bytes;
2343 	stat->rx_mcast_bytes += smb->rx_mcast_bytes;
2344 	stat->rx_pkts_filtered += smb->rx_pkts_filtered;
2345 
2346 	/* Tx stats. */
2347 	stat->tx_frames += smb->tx_frames;
2348 	stat->tx_bcast_frames += smb->tx_bcast_frames;
2349 	stat->tx_mcast_frames += smb->tx_mcast_frames;
2350 	stat->tx_pause_frames += smb->tx_pause_frames;
2351 	stat->tx_excess_defer += smb->tx_excess_defer;
2352 	stat->tx_control_frames += smb->tx_control_frames;
2353 	stat->tx_deferred += smb->tx_deferred;
2354 	stat->tx_bytes += smb->tx_bytes;
2355 	stat->tx_pkts_64 += smb->tx_pkts_64;
2356 	stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
2357 	stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
2358 	stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
2359 	stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
2360 	stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
2361 	stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
2362 	stat->tx_single_colls += smb->tx_single_colls;
2363 	stat->tx_multi_colls += smb->tx_multi_colls;
2364 	stat->tx_late_colls += smb->tx_late_colls;
2365 	stat->tx_excess_colls += smb->tx_excess_colls;
2366 	stat->tx_abort += smb->tx_abort;
2367 	stat->tx_underrun += smb->tx_underrun;
2368 	stat->tx_desc_underrun += smb->tx_desc_underrun;
2369 	stat->tx_lenerrs += smb->tx_lenerrs;
2370 	stat->tx_pkts_truncated += smb->tx_pkts_truncated;
2371 	stat->tx_bcast_bytes += smb->tx_bcast_bytes;
2372 	stat->tx_mcast_bytes += smb->tx_mcast_bytes;
2373 
2374 	/* Update counters in ifnet. */
2375 	ifp->if_opackets += smb->tx_frames;
2376 
2377 	ifp->if_collisions += smb->tx_single_colls +
2378 	    smb->tx_multi_colls * 2 + smb->tx_late_colls +
2379 	    smb->tx_abort * HDPX_CFG_RETRY_DEFAULT;
2380 
2381 	/*
2382 	 * XXX
2383 	 * tx_pkts_truncated counter looks suspicious. It constantly
2384 	 * increments with no sign of Tx errors. This may indicate
2385 	 * the counter name is not correct one so I've removed the
2386 	 * counter in output errors.
2387 	 */
2388 	ifp->if_oerrors += smb->tx_abort + smb->tx_late_colls +
2389 	    smb->tx_underrun;
2390 
2391 	ifp->if_ipackets += smb->rx_frames;
2392 
2393 	ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
2394 	    smb->rx_runts + smb->rx_pkts_truncated +
2395 	    smb->rx_fifo_oflows + smb->rx_rrs_errs +
2396 	    smb->rx_alignerrs;
2397 
2398 	if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2399 		/* Update done, clear. */
2400 		smb->updated = 0;
2401 		bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
2402 		    sc->alc_cdata.alc_smb_map,
2403 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2404 	}
2405 }
2406 
2407 static int
2408 alc_intr(void *arg)
2409 {
2410 	struct alc_softc *sc;
2411 	uint32_t status;
2412 
2413 	sc = (struct alc_softc *)arg;
2414 
2415 	status = CSR_READ_4(sc, ALC_INTR_STATUS);
2416 	if ((status & ALC_INTRS) == 0)
2417 		return (FILTER_STRAY);
2418 	/* Disable interrupts. */
2419 	CSR_WRITE_4(sc, ALC_INTR_STATUS, INTR_DIS_INT);
2420 	taskqueue_enqueue(sc->alc_tq, &sc->alc_int_task);
2421 
2422 	return (FILTER_HANDLED);
2423 }
2424 
2425 static void
2426 alc_int_task(void *arg, int pending)
2427 {
2428 	struct alc_softc *sc;
2429 	struct ifnet *ifp;
2430 	uint32_t status;
2431 	int more;
2432 
2433 	sc = (struct alc_softc *)arg;
2434 	ifp = sc->alc_ifp;
2435 
2436 	status = CSR_READ_4(sc, ALC_INTR_STATUS);
2437 	more = atomic_readandclear_int(&sc->alc_morework);
2438 	if (more != 0)
2439 		status |= INTR_RX_PKT;
2440 	if ((status & ALC_INTRS) == 0)
2441 		goto done;
2442 
2443 	/* Acknowledge interrupts but still disable interrupts. */
2444 	CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT);
2445 
2446 	more = 0;
2447 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2448 		if ((status & INTR_RX_PKT) != 0) {
2449 			more = alc_rxintr(sc, sc->alc_process_limit);
2450 			if (more == EAGAIN)
2451 				atomic_set_int(&sc->alc_morework, 1);
2452 			else if (more == EIO) {
2453 				ALC_LOCK(sc);
2454 				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2455 				alc_init_locked(sc);
2456 				ALC_UNLOCK(sc);
2457 				return;
2458 			}
2459 		}
2460 		if ((status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST |
2461 		    INTR_TXQ_TO_RST)) != 0) {
2462 			if ((status & INTR_DMA_RD_TO_RST) != 0)
2463 				device_printf(sc->alc_dev,
2464 				    "DMA read error! -- resetting\n");
2465 			if ((status & INTR_DMA_WR_TO_RST) != 0)
2466 				device_printf(sc->alc_dev,
2467 				    "DMA write error! -- resetting\n");
2468 			if ((status & INTR_TXQ_TO_RST) != 0)
2469 				device_printf(sc->alc_dev,
2470 				    "TxQ reset! -- resetting\n");
2471 			ALC_LOCK(sc);
2472 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2473 			alc_init_locked(sc);
2474 			ALC_UNLOCK(sc);
2475 			return;
2476 		}
2477 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2478 		    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2479 			taskqueue_enqueue(sc->alc_tq, &sc->alc_tx_task);
2480 	}
2481 
2482 	if (more == EAGAIN ||
2483 	    (CSR_READ_4(sc, ALC_INTR_STATUS) & ALC_INTRS) != 0) {
2484 		taskqueue_enqueue(sc->alc_tq, &sc->alc_int_task);
2485 		return;
2486 	}
2487 
2488 done:
2489 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2490 		/* Re-enable interrupts if we're running. */
2491 		CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF);
2492 	}
2493 }
2494 
2495 static void
2496 alc_txeof(struct alc_softc *sc)
2497 {
2498 	struct ifnet *ifp;
2499 	struct alc_txdesc *txd;
2500 	uint32_t cons, prod;
2501 	int prog;
2502 
2503 	ALC_LOCK_ASSERT(sc);
2504 
2505 	ifp = sc->alc_ifp;
2506 
2507 	if (sc->alc_cdata.alc_tx_cnt == 0)
2508 		return;
2509 	bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag,
2510 	    sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_POSTWRITE);
2511 	if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
2512 		bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag,
2513 		    sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_POSTREAD);
2514 		prod = sc->alc_rdata.alc_cmb->cons;
2515 	} else
2516 		prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX);
2517 	/* Assume we're using normal Tx priority queue. */
2518 	prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >>
2519 	    MBOX_TD_CONS_LO_IDX_SHIFT;
2520 	cons = sc->alc_cdata.alc_tx_cons;
2521 	/*
2522 	 * Go through our Tx list and free mbufs for those
2523 	 * frames which have been transmitted.
2524 	 */
2525 	for (prog = 0; cons != prod; prog++,
2526 	    ALC_DESC_INC(cons, ALC_TX_RING_CNT)) {
2527 		if (sc->alc_cdata.alc_tx_cnt <= 0)
2528 			break;
2529 		prog++;
2530 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2531 		sc->alc_cdata.alc_tx_cnt--;
2532 		txd = &sc->alc_cdata.alc_txdesc[cons];
2533 		if (txd->tx_m != NULL) {
2534 			/* Reclaim transmitted mbufs. */
2535 			bus_dmamap_sync(sc->alc_cdata.alc_tx_tag,
2536 			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2537 			bus_dmamap_unload(sc->alc_cdata.alc_tx_tag,
2538 			    txd->tx_dmamap);
2539 			m_freem(txd->tx_m);
2540 			txd->tx_m = NULL;
2541 		}
2542 	}
2543 
2544 	if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
2545 		bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag,
2546 		    sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_PREREAD);
2547 	sc->alc_cdata.alc_tx_cons = cons;
2548 	/*
2549 	 * Unarm watchdog timer only when there is no pending
2550 	 * frames in Tx queue.
2551 	 */
2552 	if (sc->alc_cdata.alc_tx_cnt == 0)
2553 		sc->alc_watchdog_timer = 0;
2554 }
2555 
2556 static int
2557 alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd)
2558 {
2559 	struct mbuf *m;
2560 	bus_dma_segment_t segs[1];
2561 	bus_dmamap_t map;
2562 	int nsegs;
2563 
2564 	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2565 	if (m == NULL)
2566 		return (ENOBUFS);
2567 	m->m_len = m->m_pkthdr.len = RX_BUF_SIZE_MAX;
2568 #ifndef __NO_STRICT_ALIGNMENT
2569 	m_adj(m, sizeof(uint64_t));
2570 #endif
2571 
2572 	if (bus_dmamap_load_mbuf_sg(sc->alc_cdata.alc_rx_tag,
2573 	    sc->alc_cdata.alc_rx_sparemap, m, segs, &nsegs, 0) != 0) {
2574 		m_freem(m);
2575 		return (ENOBUFS);
2576 	}
2577 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2578 
2579 	if (rxd->rx_m != NULL) {
2580 		bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap,
2581 		    BUS_DMASYNC_POSTREAD);
2582 		bus_dmamap_unload(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap);
2583 	}
2584 	map = rxd->rx_dmamap;
2585 	rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap;
2586 	sc->alc_cdata.alc_rx_sparemap = map;
2587 	bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap,
2588 	    BUS_DMASYNC_PREREAD);
2589 	rxd->rx_m = m;
2590 	rxd->rx_desc->addr = htole64(segs[0].ds_addr);
2591 	return (0);
2592 }
2593 
2594 static int
2595 alc_rxintr(struct alc_softc *sc, int count)
2596 {
2597 	struct ifnet *ifp;
2598 	struct rx_rdesc *rrd;
2599 	uint32_t nsegs, status;
2600 	int rr_cons, prog;
2601 
2602 	bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag,
2603 	    sc->alc_cdata.alc_rr_ring_map,
2604 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2605 	bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag,
2606 	    sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_POSTWRITE);
2607 	rr_cons = sc->alc_cdata.alc_rr_cons;
2608 	ifp = sc->alc_ifp;
2609 	for (prog = 0; (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;) {
2610 		if (count-- <= 0)
2611 			break;
2612 		rrd = &sc->alc_rdata.alc_rr_ring[rr_cons];
2613 		status = le32toh(rrd->status);
2614 		if ((status & RRD_VALID) == 0)
2615 			break;
2616 		nsegs = RRD_RD_CNT(le32toh(rrd->rdinfo));
2617 		if (nsegs == 0) {
2618 			/* This should not happen! */
2619 			device_printf(sc->alc_dev,
2620 			    "unexpected segment count -- resetting\n");
2621 			return (EIO);
2622 		}
2623 		alc_rxeof(sc, rrd);
2624 		/* Clear Rx return status. */
2625 		rrd->status = 0;
2626 		ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT);
2627 		sc->alc_cdata.alc_rx_cons += nsegs;
2628 		sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT;
2629 		prog += nsegs;
2630 	}
2631 
2632 	if (prog > 0) {
2633 		/* Update the consumer index. */
2634 		sc->alc_cdata.alc_rr_cons = rr_cons;
2635 		/* Sync Rx return descriptors. */
2636 		bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag,
2637 		    sc->alc_cdata.alc_rr_ring_map,
2638 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2639 		/*
2640 		 * Sync updated Rx descriptors such that controller see
2641 		 * modified buffer addresses.
2642 		 */
2643 		bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag,
2644 		    sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE);
2645 		/*
2646 		 * Let controller know availability of new Rx buffers.
2647 		 * Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors
2648 		 * it may be possible to update ALC_MBOX_RD0_PROD_IDX
2649 		 * only when Rx buffer pre-fetching is required. In
2650 		 * addition we already set ALC_RX_RD_FREE_THRESH to
2651 		 * RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However
2652 		 * it still seems that pre-fetching needs more
2653 		 * experimentation.
2654 		 */
2655 		CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX,
2656 		    sc->alc_cdata.alc_rx_cons);
2657 	}
2658 
2659 	return (count > 0 ? 0 : EAGAIN);
2660 }
2661 
2662 #ifndef __NO_STRICT_ALIGNMENT
2663 static struct mbuf *
2664 alc_fixup_rx(struct ifnet *ifp, struct mbuf *m)
2665 {
2666 	struct mbuf *n;
2667         int i;
2668         uint16_t *src, *dst;
2669 
2670 	src = mtod(m, uint16_t *);
2671 	dst = src - 3;
2672 
2673 	if (m->m_next == NULL) {
2674 		for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
2675 			*dst++ = *src++;
2676 		m->m_data -= 6;
2677 		return (m);
2678 	}
2679 	/*
2680 	 * Append a new mbuf to received mbuf chain and copy ethernet
2681 	 * header from the mbuf chain. This can save lots of CPU
2682 	 * cycles for jumbo frame.
2683 	 */
2684 	MGETHDR(n, M_DONTWAIT, MT_DATA);
2685 	if (n == NULL) {
2686 		ifp->if_iqdrops++;
2687 		m_freem(m);
2688 		return (NULL);
2689 	}
2690 	bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
2691 	m->m_data += ETHER_HDR_LEN;
2692 	m->m_len -= ETHER_HDR_LEN;
2693 	n->m_len = ETHER_HDR_LEN;
2694 	M_MOVE_PKTHDR(n, m);
2695 	n->m_next = m;
2696 	return (n);
2697 }
2698 #endif
2699 
2700 /* Receive a frame. */
2701 static void
2702 alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd)
2703 {
2704 	struct alc_rxdesc *rxd;
2705 	struct ifnet *ifp;
2706 	struct mbuf *mp, *m;
2707 	uint32_t rdinfo, status, vtag;
2708 	int count, nsegs, rx_cons;
2709 
2710 	ifp = sc->alc_ifp;
2711 	status = le32toh(rrd->status);
2712 	rdinfo = le32toh(rrd->rdinfo);
2713 	rx_cons = RRD_RD_IDX(rdinfo);
2714 	nsegs = RRD_RD_CNT(rdinfo);
2715 
2716 	sc->alc_cdata.alc_rxlen = RRD_BYTES(status);
2717 	if ((status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) != 0) {
2718 		/*
2719 		 * We want to pass the following frames to upper
2720 		 * layer regardless of error status of Rx return
2721 		 * ring.
2722 		 *
2723 		 *  o IP/TCP/UDP checksum is bad.
2724 		 *  o frame length and protocol specific length
2725 		 *     does not match.
2726 		 *
2727 		 *  Force network stack compute checksum for
2728 		 *  errored frames.
2729 		 */
2730 		status |= RRD_TCP_UDPCSUM_NOK | RRD_IPCSUM_NOK;
2731 		if ((RRD_ERR_CRC | RRD_ERR_ALIGN | RRD_ERR_TRUNC |
2732 		    RRD_ERR_RUNT) != 0)
2733 			return;
2734 	}
2735 
2736 	for (count = 0; count < nsegs; count++,
2737 	    ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) {
2738 		rxd = &sc->alc_cdata.alc_rxdesc[rx_cons];
2739 		mp = rxd->rx_m;
2740 		/* Add a new receive buffer to the ring. */
2741 		if (alc_newbuf(sc, rxd) != 0) {
2742 			ifp->if_iqdrops++;
2743 			/* Reuse Rx buffers. */
2744 			if (sc->alc_cdata.alc_rxhead != NULL)
2745 				m_freem(sc->alc_cdata.alc_rxhead);
2746 			break;
2747 		}
2748 
2749 		/*
2750 		 * Assume we've received a full sized frame.
2751 		 * Actual size is fixed when we encounter the end of
2752 		 * multi-segmented frame.
2753 		 */
2754 		mp->m_len = sc->alc_buf_size;
2755 
2756 		/* Chain received mbufs. */
2757 		if (sc->alc_cdata.alc_rxhead == NULL) {
2758 			sc->alc_cdata.alc_rxhead = mp;
2759 			sc->alc_cdata.alc_rxtail = mp;
2760 		} else {
2761 			mp->m_flags &= ~M_PKTHDR;
2762 			sc->alc_cdata.alc_rxprev_tail =
2763 			    sc->alc_cdata.alc_rxtail;
2764 			sc->alc_cdata.alc_rxtail->m_next = mp;
2765 			sc->alc_cdata.alc_rxtail = mp;
2766 		}
2767 
2768 		if (count == nsegs - 1) {
2769 			/* Last desc. for this frame. */
2770 			m = sc->alc_cdata.alc_rxhead;
2771 			m->m_flags |= M_PKTHDR;
2772 			/*
2773 			 * It seems that L1C/L2C controller has no way
2774 			 * to tell hardware to strip CRC bytes.
2775 			 */
2776 			m->m_pkthdr.len =
2777 			    sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN;
2778 			if (nsegs > 1) {
2779 				/* Set last mbuf size. */
2780 				mp->m_len = sc->alc_cdata.alc_rxlen -
2781 				    (nsegs - 1) * sc->alc_buf_size;
2782 				/* Remove the CRC bytes in chained mbufs. */
2783 				if (mp->m_len <= ETHER_CRC_LEN) {
2784 					sc->alc_cdata.alc_rxtail =
2785 					    sc->alc_cdata.alc_rxprev_tail;
2786 					sc->alc_cdata.alc_rxtail->m_len -=
2787 					    (ETHER_CRC_LEN - mp->m_len);
2788 					sc->alc_cdata.alc_rxtail->m_next = NULL;
2789 					m_freem(mp);
2790 				} else {
2791 					mp->m_len -= ETHER_CRC_LEN;
2792 				}
2793 			} else
2794 				m->m_len = m->m_pkthdr.len;
2795 			m->m_pkthdr.rcvif = ifp;
2796 			/*
2797 			 * Due to hardware bugs, Rx checksum offloading
2798 			 * was intentionally disabled.
2799 			 */
2800 			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
2801 			    (status & RRD_VLAN_TAG) != 0) {
2802 				vtag = RRD_VLAN(le32toh(rrd->vtag));
2803 				m->m_pkthdr.ether_vtag = ntohs(vtag);
2804 				m->m_flags |= M_VLANTAG;
2805 			}
2806 #ifndef __NO_STRICT_ALIGNMENT
2807 			m = alc_fixup_rx(ifp, m);
2808 			if (m != NULL)
2809 #endif
2810 			{
2811 			/* Pass it on. */
2812 			(*ifp->if_input)(ifp, m);
2813 			}
2814 		}
2815 	}
2816 	/* Reset mbuf chains. */
2817 	ALC_RXCHAIN_RESET(sc);
2818 }
2819 
2820 static void
2821 alc_tick(void *arg)
2822 {
2823 	struct alc_softc *sc;
2824 	struct mii_data *mii;
2825 
2826 	sc = (struct alc_softc *)arg;
2827 
2828 	ALC_LOCK_ASSERT(sc);
2829 
2830 	mii = device_get_softc(sc->alc_miibus);
2831 	mii_tick(mii);
2832 	alc_stats_update(sc);
2833 	/*
2834 	 * alc(4) does not rely on Tx completion interrupts to reclaim
2835 	 * transferred buffers. Instead Tx completion interrupts are
2836 	 * used to hint for scheduling Tx task. So it's necessary to
2837 	 * release transmitted buffers by kicking Tx completion
2838 	 * handler. This limits the maximum reclamation delay to a hz.
2839 	 */
2840 	alc_txeof(sc);
2841 	alc_watchdog(sc);
2842 	callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc);
2843 }
2844 
2845 static void
2846 alc_reset(struct alc_softc *sc)
2847 {
2848 	uint32_t reg;
2849 	int i;
2850 
2851 	CSR_WRITE_4(sc, ALC_MASTER_CFG, MASTER_RESET);
2852 	for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2853 		DELAY(10);
2854 		if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0)
2855 			break;
2856 	}
2857 	if (i == 0)
2858 		device_printf(sc->alc_dev, "master reset timeout!\n");
2859 
2860 	for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2861 		if ((reg = CSR_READ_4(sc, ALC_IDLE_STATUS)) == 0)
2862 			break;
2863 		DELAY(10);
2864 	}
2865 
2866 	if (i == 0)
2867 		device_printf(sc->alc_dev, "reset timeout(0x%08x)!\n", reg);
2868 }
2869 
2870 static void
2871 alc_init(void *xsc)
2872 {
2873 	struct alc_softc *sc;
2874 
2875 	sc = (struct alc_softc *)xsc;
2876 	ALC_LOCK(sc);
2877 	alc_init_locked(sc);
2878 	ALC_UNLOCK(sc);
2879 }
2880 
2881 static void
2882 alc_init_locked(struct alc_softc *sc)
2883 {
2884 	struct ifnet *ifp;
2885 	struct mii_data *mii;
2886 	uint8_t eaddr[ETHER_ADDR_LEN];
2887 	bus_addr_t paddr;
2888 	uint32_t reg, rxf_hi, rxf_lo;
2889 
2890 	ALC_LOCK_ASSERT(sc);
2891 
2892 	ifp = sc->alc_ifp;
2893 	mii = device_get_softc(sc->alc_miibus);
2894 
2895 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2896 		return;
2897 	/*
2898 	 * Cancel any pending I/O.
2899 	 */
2900 	alc_stop(sc);
2901 	/*
2902 	 * Reset the chip to a known state.
2903 	 */
2904 	alc_reset(sc);
2905 
2906 	/* Initialize Rx descriptors. */
2907 	if (alc_init_rx_ring(sc) != 0) {
2908 		device_printf(sc->alc_dev, "no memory for Rx buffers.\n");
2909 		alc_stop(sc);
2910 		return;
2911 	}
2912 	alc_init_rr_ring(sc);
2913 	alc_init_tx_ring(sc);
2914 	alc_init_cmb(sc);
2915 	alc_init_smb(sc);
2916 
2917 	/* Reprogram the station address. */
2918 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2919 	CSR_WRITE_4(sc, ALC_PAR0,
2920 	    eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
2921 	CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]);
2922 	/*
2923 	 * Clear WOL status and disable all WOL feature as WOL
2924 	 * would interfere Rx operation under normal environments.
2925 	 */
2926 	CSR_READ_4(sc, ALC_WOL_CFG);
2927 	CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
2928 	/* Set Tx descriptor base addresses. */
2929 	paddr = sc->alc_rdata.alc_tx_ring_paddr;
2930 	CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2931 	CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2932 	/* We don't use high priority ring. */
2933 	CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0);
2934 	/* Set Tx descriptor counter. */
2935 	CSR_WRITE_4(sc, ALC_TD_RING_CNT,
2936 	    (ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK);
2937 	/* Set Rx descriptor base addresses. */
2938 	paddr = sc->alc_rdata.alc_rx_ring_paddr;
2939 	CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2940 	CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2941 	/* We use one Rx ring. */
2942 	CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0);
2943 	CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0);
2944 	CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0);
2945 	/* Set Rx descriptor counter. */
2946 	CSR_WRITE_4(sc, ALC_RD_RING_CNT,
2947 	    (ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK);
2948 
2949 	/*
2950 	 * Let hardware split jumbo frames into alc_max_buf_sized chunks.
2951 	 * if it do not fit the buffer size. Rx return descriptor holds
2952 	 * a counter that indicates how many fragments were made by the
2953 	 * hardware. The buffer size should be multiple of 8 bytes.
2954 	 * Since hardware has limit on the size of buffer size, always
2955 	 * use the maximum value.
2956 	 * For strict-alignment architectures make sure to reduce buffer
2957 	 * size by 8 bytes to make room for alignment fixup.
2958 	 */
2959 #ifndef __NO_STRICT_ALIGNMENT
2960 	sc->alc_buf_size = RX_BUF_SIZE_MAX - sizeof(uint64_t);
2961 #else
2962 	sc->alc_buf_size = RX_BUF_SIZE_MAX;
2963 #endif
2964 	CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size);
2965 
2966 	paddr = sc->alc_rdata.alc_rr_ring_paddr;
2967 	/* Set Rx return descriptor base addresses. */
2968 	CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2969 	/* We use one Rx return ring. */
2970 	CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0);
2971 	CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0);
2972 	CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0);
2973 	/* Set Rx return descriptor counter. */
2974 	CSR_WRITE_4(sc, ALC_RRD_RING_CNT,
2975 	    (ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK);
2976 	paddr = sc->alc_rdata.alc_cmb_paddr;
2977 	CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
2978 	paddr = sc->alc_rdata.alc_smb_paddr;
2979 	CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2980 	CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
2981 
2982 	/* Tell hardware that we're ready to load DMA blocks. */
2983 	CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD);
2984 
2985 	/* Configure interrupt moderation timer. */
2986 	reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT;
2987 	reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT;
2988 	CSR_WRITE_4(sc, ALC_IM_TIMER, reg);
2989 	reg = CSR_READ_4(sc, ALC_MASTER_CFG);
2990 	reg &= ~(MASTER_CHIP_REV_MASK | MASTER_CHIP_ID_MASK);
2991 	/*
2992 	 * We don't want to automatic interrupt clear as task queue
2993 	 * for the interrupt should know interrupt status.
2994 	 */
2995 	reg &= ~MASTER_INTR_RD_CLR;
2996 	reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB);
2997 	if (ALC_USECS(sc->alc_int_rx_mod) != 0)
2998 		reg |= MASTER_IM_RX_TIMER_ENB;
2999 	if (ALC_USECS(sc->alc_int_tx_mod) != 0)
3000 		reg |= MASTER_IM_TX_TIMER_ENB;
3001 	CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
3002 	/*
3003 	 * Disable interrupt re-trigger timer. We don't want automatic
3004 	 * re-triggering of un-ACKed interrupts.
3005 	 */
3006 	CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0));
3007 	/* Configure CMB. */
3008 	CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4);
3009 	if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
3010 		CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000));
3011 	else
3012 		CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0));
3013 	/*
3014 	 * Hardware can be configured to issue SMB interrupt based
3015 	 * on programmed interval. Since there is a callout that is
3016 	 * invoked for every hz in driver we use that instead of
3017 	 * relying on periodic SMB interrupt.
3018 	 */
3019 	CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0));
3020 	/* Clear MAC statistics. */
3021 	alc_stats_clear(sc);
3022 
3023 	/*
3024 	 * Always use maximum frame size that controller can support.
3025 	 * Otherwise received frames that has larger frame length
3026 	 * than alc(4) MTU would be silently dropped in hardware. This
3027 	 * would make path-MTU discovery hard as sender wouldn't get
3028 	 * any responses from receiver. alc(4) supports
3029 	 * multi-fragmented frames on Rx path so it has no issue on
3030 	 * assembling fragmented frames. Using maximum frame size also
3031 	 * removes the need to reinitialize hardware when interface
3032 	 * MTU configuration was changed.
3033 	 *
3034 	 * Be conservative in what you do, be liberal in what you
3035 	 * accept from others - RFC 793.
3036 	 */
3037 	CSR_WRITE_4(sc, ALC_FRAME_SIZE, ALC_JUMBO_FRAMELEN);
3038 
3039 	/* Disable header split(?) */
3040 	CSR_WRITE_4(sc, ALC_HDS_CFG, 0);
3041 
3042 	/* Configure IPG/IFG parameters. */
3043 	CSR_WRITE_4(sc, ALC_IPG_IFG_CFG,
3044 	    ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) |
3045 	    ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
3046 	    ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
3047 	    ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK));
3048 	/* Set parameters for half-duplex media. */
3049 	CSR_WRITE_4(sc, ALC_HDPX_CFG,
3050 	    ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
3051 	    HDPX_CFG_LCOL_MASK) |
3052 	    ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
3053 	    HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
3054 	    ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
3055 	    HDPX_CFG_ABEBT_MASK) |
3056 	    ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
3057 	    HDPX_CFG_JAMIPG_MASK));
3058 	/*
3059 	 * Set TSO/checksum offload threshold. For frames that is
3060 	 * larger than this threshold, hardware wouldn't do
3061 	 * TSO/checksum offloading.
3062 	 */
3063 	CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH,
3064 	    (ALC_JUMBO_FRAMELEN >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) &
3065 	    TSO_OFFLOAD_THRESH_MASK);
3066 	/* Configure TxQ. */
3067 	reg = (alc_dma_burst[sc->alc_dma_rd_burst] <<
3068 	    TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK;
3069 	reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) &
3070 	    TXQ_CFG_TD_BURST_MASK;
3071 	CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE);
3072 
3073 	/* Configure Rx free descriptor pre-fetching. */
3074 	CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH,
3075 	    ((RX_RD_FREE_THRESH_HI_DEFAULT << RX_RD_FREE_THRESH_HI_SHIFT) &
3076 	    RX_RD_FREE_THRESH_HI_MASK) |
3077 	    ((RX_RD_FREE_THRESH_LO_DEFAULT << RX_RD_FREE_THRESH_LO_SHIFT) &
3078 	    RX_RD_FREE_THRESH_LO_MASK));
3079 
3080 	/*
3081 	 * Configure flow control parameters.
3082 	 * XON  : 80% of Rx FIFO
3083 	 * XOFF : 30% of Rx FIFO
3084 	 */
3085 	reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN);
3086 	rxf_hi = (reg * 8) / 10;
3087 	rxf_lo = (reg * 3)/ 10;
3088 	CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH,
3089 	    ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
3090 	    RX_FIFO_PAUSE_THRESH_LO_MASK) |
3091 	    ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
3092 	     RX_FIFO_PAUSE_THRESH_HI_MASK));
3093 
3094 	/* Disable RSS until I understand L1C/L2C's RSS logic. */
3095 	CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0);
3096 	CSR_WRITE_4(sc, ALC_RSS_CPU, 0);
3097 
3098 	/* Configure RxQ. */
3099 	reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
3100 	    RXQ_CFG_RD_BURST_MASK;
3101 	reg |= RXQ_CFG_RSS_MODE_DIS;
3102 	if ((sc->alc_flags & ALC_FLAG_ASPM_MON) != 0)
3103 		reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M;
3104 	CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
3105 
3106 	/* Configure Rx DMAW request thresold. */
3107 	CSR_WRITE_4(sc, ALC_RD_DMA_CFG,
3108 	    ((RD_DMA_CFG_THRESH_DEFAULT << RD_DMA_CFG_THRESH_SHIFT) &
3109 	    RD_DMA_CFG_THRESH_MASK) |
3110 	    ((ALC_RD_DMA_CFG_USECS(0) << RD_DMA_CFG_TIMER_SHIFT) &
3111 	    RD_DMA_CFG_TIMER_MASK));
3112 	/* Configure DMA parameters. */
3113 	reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI;
3114 	reg |= sc->alc_rcb;
3115 	if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
3116 		reg |= DMA_CFG_CMB_ENB;
3117 	if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0)
3118 		reg |= DMA_CFG_SMB_ENB;
3119 	else
3120 		reg |= DMA_CFG_SMB_DIS;
3121 	reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) <<
3122 	    DMA_CFG_RD_BURST_SHIFT;
3123 	reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) <<
3124 	    DMA_CFG_WR_BURST_SHIFT;
3125 	reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) &
3126 	    DMA_CFG_RD_DELAY_CNT_MASK;
3127 	reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) &
3128 	    DMA_CFG_WR_DELAY_CNT_MASK;
3129 	CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
3130 
3131 	/*
3132 	 * Configure Tx/Rx MACs.
3133 	 *  - Auto-padding for short frames.
3134 	 *  - Enable CRC generation.
3135 	 *  Actual reconfiguration of MAC for resolved speed/duplex
3136 	 *  is followed after detection of link establishment.
3137 	 *  AR8131/AR8132 always does checksum computation regardless
3138 	 *  of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to
3139 	 *  have bug in protocol field in Rx return structure so
3140 	 *  these controllers can't handle fragmented frames. Disable
3141 	 *  Rx checksum offloading until there is a newer controller
3142 	 *  that has sane implementation.
3143 	 */
3144 	reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX |
3145 	    ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
3146 	    MAC_CFG_PREAMBLE_MASK);
3147 	if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0)
3148 		reg |= MAC_CFG_SPEED_10_100;
3149 	else
3150 		reg |= MAC_CFG_SPEED_1000;
3151 	CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3152 
3153 	/* Set up the receive filter. */
3154 	alc_rxfilter(sc);
3155 	alc_rxvlan(sc);
3156 
3157 	/* Acknowledge all pending interrupts and clear it. */
3158 	CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS);
3159 	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3160 	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0);
3161 
3162 	sc->alc_flags &= ~ALC_FLAG_LINK;
3163 	/* Switch to the current media. */
3164 	mii_mediachg(mii);
3165 
3166 	callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc);
3167 
3168 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3169 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3170 }
3171 
3172 static void
3173 alc_stop(struct alc_softc *sc)
3174 {
3175 	struct ifnet *ifp;
3176 	struct alc_txdesc *txd;
3177 	struct alc_rxdesc *rxd;
3178 	uint32_t reg;
3179 	int i;
3180 
3181 	ALC_LOCK_ASSERT(sc);
3182 	/*
3183 	 * Mark the interface down and cancel the watchdog timer.
3184 	 */
3185 	ifp = sc->alc_ifp;
3186 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3187 	sc->alc_flags &= ~ALC_FLAG_LINK;
3188 	callout_stop(&sc->alc_tick_ch);
3189 	sc->alc_watchdog_timer = 0;
3190 	alc_stats_update(sc);
3191 	/* Disable interrupts. */
3192 	CSR_WRITE_4(sc, ALC_INTR_MASK, 0);
3193 	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3194 	alc_stop_queue(sc);
3195 	/* Disable DMA. */
3196 	reg = CSR_READ_4(sc, ALC_DMA_CFG);
3197 	reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB);
3198 	reg |= DMA_CFG_SMB_DIS;
3199 	CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
3200 	DELAY(1000);
3201 	/* Stop Rx/Tx MACs. */
3202 	alc_stop_mac(sc);
3203 	/* Disable interrupts which might be touched in taskq handler. */
3204 	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3205 
3206 	/* Reclaim Rx buffers that have been processed. */
3207 	if (sc->alc_cdata.alc_rxhead != NULL)
3208 		m_freem(sc->alc_cdata.alc_rxhead);
3209 	ALC_RXCHAIN_RESET(sc);
3210 	/*
3211 	 * Free Tx/Rx mbufs still in the queues.
3212 	 */
3213 	for (i = 0; i < ALC_RX_RING_CNT; i++) {
3214 		rxd = &sc->alc_cdata.alc_rxdesc[i];
3215 		if (rxd->rx_m != NULL) {
3216 			bus_dmamap_sync(sc->alc_cdata.alc_rx_tag,
3217 			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3218 			bus_dmamap_unload(sc->alc_cdata.alc_rx_tag,
3219 			    rxd->rx_dmamap);
3220 			m_freem(rxd->rx_m);
3221 			rxd->rx_m = NULL;
3222 		}
3223 	}
3224 	for (i = 0; i < ALC_TX_RING_CNT; i++) {
3225 		txd = &sc->alc_cdata.alc_txdesc[i];
3226 		if (txd->tx_m != NULL) {
3227 			bus_dmamap_sync(sc->alc_cdata.alc_tx_tag,
3228 			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3229 			bus_dmamap_unload(sc->alc_cdata.alc_tx_tag,
3230 			    txd->tx_dmamap);
3231 			m_freem(txd->tx_m);
3232 			txd->tx_m = NULL;
3233 		}
3234 	}
3235 }
3236 
3237 static void
3238 alc_stop_mac(struct alc_softc *sc)
3239 {
3240 	uint32_t reg;
3241 	int i;
3242 
3243 	ALC_LOCK_ASSERT(sc);
3244 
3245 	/* Disable Rx/Tx MAC. */
3246 	reg = CSR_READ_4(sc, ALC_MAC_CFG);
3247 	if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) {
3248 		reg &= ~MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
3249 		CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3250 	}
3251 	for (i = ALC_TIMEOUT; i > 0; i--) {
3252 		reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
3253 		if (reg == 0)
3254 			break;
3255 		DELAY(10);
3256 	}
3257 	if (i == 0)
3258 		device_printf(sc->alc_dev,
3259 		    "could not disable Rx/Tx MAC(0x%08x)!\n", reg);
3260 }
3261 
3262 static void
3263 alc_start_queue(struct alc_softc *sc)
3264 {
3265 	uint32_t qcfg[] = {
3266 		0,
3267 		RXQ_CFG_QUEUE0_ENB,
3268 		RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB,
3269 		RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB,
3270 		RXQ_CFG_ENB
3271 	};
3272 	uint32_t cfg;
3273 
3274 	ALC_LOCK_ASSERT(sc);
3275 
3276 	/* Enable RxQ. */
3277 	cfg = CSR_READ_4(sc, ALC_RXQ_CFG);
3278 	cfg &= ~RXQ_CFG_ENB;
3279 	cfg |= qcfg[1];
3280 	CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg);
3281 	/* Enable TxQ. */
3282 	cfg = CSR_READ_4(sc, ALC_TXQ_CFG);
3283 	cfg |= TXQ_CFG_ENB;
3284 	CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg);
3285 }
3286 
3287 static void
3288 alc_stop_queue(struct alc_softc *sc)
3289 {
3290 	uint32_t reg;
3291 	int i;
3292 
3293 	ALC_LOCK_ASSERT(sc);
3294 
3295 	/* Disable RxQ. */
3296 	reg = CSR_READ_4(sc, ALC_RXQ_CFG);
3297 	if ((reg & RXQ_CFG_ENB) != 0) {
3298 		reg &= ~RXQ_CFG_ENB;
3299 		CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
3300 	}
3301 	/* Disable TxQ. */
3302 	reg = CSR_READ_4(sc, ALC_TXQ_CFG);
3303 	if ((reg & TXQ_CFG_ENB) == 0) {
3304 		reg &= ~TXQ_CFG_ENB;
3305 		CSR_WRITE_4(sc, ALC_TXQ_CFG, reg);
3306 	}
3307 	for (i = ALC_TIMEOUT; i > 0; i--) {
3308 		reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
3309 		if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0)
3310 			break;
3311 		DELAY(10);
3312 	}
3313 	if (i == 0)
3314 		device_printf(sc->alc_dev,
3315 		    "could not disable RxQ/TxQ (0x%08x)!\n", reg);
3316 }
3317 
3318 static void
3319 alc_init_tx_ring(struct alc_softc *sc)
3320 {
3321 	struct alc_ring_data *rd;
3322 	struct alc_txdesc *txd;
3323 	int i;
3324 
3325 	ALC_LOCK_ASSERT(sc);
3326 
3327 	sc->alc_cdata.alc_tx_prod = 0;
3328 	sc->alc_cdata.alc_tx_cons = 0;
3329 	sc->alc_cdata.alc_tx_cnt = 0;
3330 
3331 	rd = &sc->alc_rdata;
3332 	bzero(rd->alc_tx_ring, ALC_TX_RING_SZ);
3333 	for (i = 0; i < ALC_TX_RING_CNT; i++) {
3334 		txd = &sc->alc_cdata.alc_txdesc[i];
3335 		txd->tx_m = NULL;
3336 	}
3337 
3338 	bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag,
3339 	    sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE);
3340 }
3341 
3342 static int
3343 alc_init_rx_ring(struct alc_softc *sc)
3344 {
3345 	struct alc_ring_data *rd;
3346 	struct alc_rxdesc *rxd;
3347 	int i;
3348 
3349 	ALC_LOCK_ASSERT(sc);
3350 
3351 	sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1;
3352 	sc->alc_morework = 0;
3353 	rd = &sc->alc_rdata;
3354 	bzero(rd->alc_rx_ring, ALC_RX_RING_SZ);
3355 	for (i = 0; i < ALC_RX_RING_CNT; i++) {
3356 		rxd = &sc->alc_cdata.alc_rxdesc[i];
3357 		rxd->rx_m = NULL;
3358 		rxd->rx_desc = &rd->alc_rx_ring[i];
3359 		if (alc_newbuf(sc, rxd) != 0)
3360 			return (ENOBUFS);
3361 	}
3362 
3363 	/*
3364 	 * Since controller does not update Rx descriptors, driver
3365 	 * does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE
3366 	 * is enough to ensure coherence.
3367 	 */
3368 	bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag,
3369 	    sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE);
3370 	/* Let controller know availability of new Rx buffers. */
3371 	CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons);
3372 
3373 	return (0);
3374 }
3375 
3376 static void
3377 alc_init_rr_ring(struct alc_softc *sc)
3378 {
3379 	struct alc_ring_data *rd;
3380 
3381 	ALC_LOCK_ASSERT(sc);
3382 
3383 	sc->alc_cdata.alc_rr_cons = 0;
3384 	ALC_RXCHAIN_RESET(sc);
3385 
3386 	rd = &sc->alc_rdata;
3387 	bzero(rd->alc_rr_ring, ALC_RR_RING_SZ);
3388 	bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag,
3389 	    sc->alc_cdata.alc_rr_ring_map,
3390 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3391 }
3392 
3393 static void
3394 alc_init_cmb(struct alc_softc *sc)
3395 {
3396 	struct alc_ring_data *rd;
3397 
3398 	ALC_LOCK_ASSERT(sc);
3399 
3400 	rd = &sc->alc_rdata;
3401 	bzero(rd->alc_cmb, ALC_CMB_SZ);
3402 	bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, sc->alc_cdata.alc_cmb_map,
3403 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3404 }
3405 
3406 static void
3407 alc_init_smb(struct alc_softc *sc)
3408 {
3409 	struct alc_ring_data *rd;
3410 
3411 	ALC_LOCK_ASSERT(sc);
3412 
3413 	rd = &sc->alc_rdata;
3414 	bzero(rd->alc_smb, ALC_SMB_SZ);
3415 	bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, sc->alc_cdata.alc_smb_map,
3416 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3417 }
3418 
3419 static void
3420 alc_rxvlan(struct alc_softc *sc)
3421 {
3422 	struct ifnet *ifp;
3423 	uint32_t reg;
3424 
3425 	ALC_LOCK_ASSERT(sc);
3426 
3427 	ifp = sc->alc_ifp;
3428 	reg = CSR_READ_4(sc, ALC_MAC_CFG);
3429 	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3430 		reg |= MAC_CFG_VLAN_TAG_STRIP;
3431 	else
3432 		reg &= ~MAC_CFG_VLAN_TAG_STRIP;
3433 	CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3434 }
3435 
3436 static void
3437 alc_rxfilter(struct alc_softc *sc)
3438 {
3439 	struct ifnet *ifp;
3440 	struct ifmultiaddr *ifma;
3441 	uint32_t crc;
3442 	uint32_t mchash[2];
3443 	uint32_t rxcfg;
3444 
3445 	ALC_LOCK_ASSERT(sc);
3446 
3447 	ifp = sc->alc_ifp;
3448 
3449 	bzero(mchash, sizeof(mchash));
3450 	rxcfg = CSR_READ_4(sc, ALC_MAC_CFG);
3451 	rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
3452 	if ((ifp->if_flags & IFF_BROADCAST) != 0)
3453 		rxcfg |= MAC_CFG_BCAST;
3454 	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3455 		if ((ifp->if_flags & IFF_PROMISC) != 0)
3456 			rxcfg |= MAC_CFG_PROMISC;
3457 		if ((ifp->if_flags & IFF_ALLMULTI) != 0)
3458 			rxcfg |= MAC_CFG_ALLMULTI;
3459 		mchash[0] = 0xFFFFFFFF;
3460 		mchash[1] = 0xFFFFFFFF;
3461 		goto chipit;
3462 	}
3463 
3464 	if_maddr_rlock(ifp);
3465 	TAILQ_FOREACH(ifma, &sc->alc_ifp->if_multiaddrs, ifma_link) {
3466 		if (ifma->ifma_addr->sa_family != AF_LINK)
3467 			continue;
3468 		crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
3469 		    ifma->ifma_addr), ETHER_ADDR_LEN);
3470 		mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
3471 	}
3472 	if_maddr_runlock(ifp);
3473 
3474 chipit:
3475 	CSR_WRITE_4(sc, ALC_MAR0, mchash[0]);
3476 	CSR_WRITE_4(sc, ALC_MAR1, mchash[1]);
3477 	CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg);
3478 }
3479 
3480 static int
3481 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3482 {
3483 	int error, value;
3484 
3485 	if (arg1 == NULL)
3486 		return (EINVAL);
3487 	value = *(int *)arg1;
3488 	error = sysctl_handle_int(oidp, &value, 0, req);
3489 	if (error || req->newptr == NULL)
3490 		return (error);
3491 	if (value < low || value > high)
3492 		return (EINVAL);
3493 	*(int *)arg1 = value;
3494 
3495 	return (0);
3496 }
3497 
3498 static int
3499 sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS)
3500 {
3501 	return (sysctl_int_range(oidp, arg1, arg2, req,
3502 	    ALC_PROC_MIN, ALC_PROC_MAX));
3503 }
3504 
3505 static int
3506 sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS)
3507 {
3508 
3509 	return (sysctl_int_range(oidp, arg1, arg2, req,
3510 	    ALC_IM_TIMER_MIN, ALC_IM_TIMER_MAX));
3511 }
3512