xref: /freebsd/sys/dev/re/if_re.c (revision 5a0bba9007c527b18db7f9b64f06b486cda4fe9d)
1 /*-
2  * Copyright (c) 1997, 1998-2003
3  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /*
37  * RealTek 8139C+/8169/8169S/8110S/8168/8111/8101E PCI NIC driver
38  *
39  * Written by Bill Paul <wpaul@windriver.com>
40  * Senior Networking Software Engineer
41  * Wind River Systems
42  */
43 
44 /*
45  * This driver is designed to support RealTek's next generation of
46  * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently
47  * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S,
48  * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E.
49  *
50  * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible
51  * with the older 8139 family, however it also supports a special
52  * C+ mode of operation that provides several new performance enhancing
53  * features. These include:
54  *
55  *	o Descriptor based DMA mechanism. Each descriptor represents
56  *	  a single packet fragment. Data buffers may be aligned on
57  *	  any byte boundary.
58  *
59  *	o 64-bit DMA
60  *
61  *	o TCP/IP checksum offload for both RX and TX
62  *
63  *	o High and normal priority transmit DMA rings
64  *
65  *	o VLAN tag insertion and extraction
66  *
67  *	o TCP large send (segmentation offload)
68  *
69  * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+
70  * programming API is fairly straightforward. The RX filtering, EEPROM
71  * access and PHY access is the same as it is on the older 8139 series
72  * chips.
73  *
74  * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the
75  * same programming API and feature set as the 8139C+ with the following
76  * differences and additions:
77  *
78  *	o 1000Mbps mode
79  *
80  *	o Jumbo frames
81  *
82  *	o GMII and TBI ports/registers for interfacing with copper
83  *	  or fiber PHYs
84  *
85  *	o RX and TX DMA rings can have up to 1024 descriptors
86  *	  (the 8139C+ allows a maximum of 64)
87  *
88  *	o Slight differences in register layout from the 8139C+
89  *
90  * The TX start and timer interrupt registers are at different locations
91  * on the 8169 than they are on the 8139C+. Also, the status word in the
92  * RX descriptor has a slightly different bit layout. The 8169 does not
93  * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska'
94  * copper gigE PHY.
95  *
96  * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
97  * (the 'S' stands for 'single-chip'). These devices have the same
98  * programming API as the older 8169, but also have some vendor-specific
99  * registers for the on-board PHY. The 8110S is a LAN-on-motherboard
100  * part designed to be pin-compatible with the RealTek 8100 10/100 chip.
101  *
102  * This driver takes advantage of the RX and TX checksum offload and
103  * VLAN tag insertion/extraction features. It also implements TX
104  * interrupt moderation using the timer interrupt registers, which
105  * significantly reduces TX interrupt load. There is also support
106  * for jumbo frames, however the 8169/8169S/8110S can not transmit
107  * jumbo frames larger than 7440, so the max MTU possible with this
108  * driver is 7422 bytes.
109  */
110 
111 #ifdef HAVE_KERNEL_OPTION_HEADERS
112 #include "opt_device_polling.h"
113 #endif
114 
115 #include <sys/param.h>
116 #include <sys/endian.h>
117 #include <sys/systm.h>
118 #include <sys/sockio.h>
119 #include <sys/mbuf.h>
120 #include <sys/malloc.h>
121 #include <sys/module.h>
122 #include <sys/kernel.h>
123 #include <sys/socket.h>
124 #include <sys/lock.h>
125 #include <sys/mutex.h>
126 #include <sys/sysctl.h>
127 #include <sys/taskqueue.h>
128 
129 #include <net/if.h>
130 #include <net/if_arp.h>
131 #include <net/ethernet.h>
132 #include <net/if_dl.h>
133 #include <net/if_media.h>
134 #include <net/if_types.h>
135 #include <net/if_vlan_var.h>
136 
137 #include <net/bpf.h>
138 
139 #include <machine/bus.h>
140 #include <machine/resource.h>
141 #include <sys/bus.h>
142 #include <sys/rman.h>
143 
144 #include <dev/mii/mii.h>
145 #include <dev/mii/miivar.h>
146 
147 #include <dev/pci/pcireg.h>
148 #include <dev/pci/pcivar.h>
149 
150 #include <pci/if_rlreg.h>
151 
152 MODULE_DEPEND(re, pci, 1, 1, 1);
153 MODULE_DEPEND(re, ether, 1, 1, 1);
154 MODULE_DEPEND(re, miibus, 1, 1, 1);
155 
156 /* "device miibus" required.  See GENERIC if you get errors here. */
157 #include "miibus_if.h"
158 
159 /* Tunables. */
160 static int intr_filter = 0;
161 TUNABLE_INT("hw.re.intr_filter", &intr_filter);
162 static int msi_disable = 0;
163 TUNABLE_INT("hw.re.msi_disable", &msi_disable);
164 static int msix_disable = 0;
165 TUNABLE_INT("hw.re.msix_disable", &msix_disable);
166 static int prefer_iomap = 0;
167 TUNABLE_INT("hw.re.prefer_iomap", &prefer_iomap);
168 
169 #define RE_CSUM_FEATURES    (CSUM_IP | CSUM_TCP | CSUM_UDP)
170 
171 /*
172  * Various supported device vendors/types and their names.
173  */
174 static struct rl_type re_devs[] = {
175 	{ DLINK_VENDORID, DLINK_DEVICEID_528T, 0,
176 	    "D-Link DGE-528(T) Gigabit Ethernet Adapter" },
177 	{ RT_VENDORID, RT_DEVICEID_8139, 0,
178 	    "RealTek 8139C+ 10/100BaseTX" },
179 	{ RT_VENDORID, RT_DEVICEID_8101E, 0,
180 	    "RealTek 810xE PCIe 10/100baseTX" },
181 	{ RT_VENDORID, RT_DEVICEID_8168, 0,
182 	    "RealTek 8168/8111 B/C/CP/D/DP/E PCIe Gigabit Ethernet" },
183 	{ RT_VENDORID, RT_DEVICEID_8169, 0,
184 	    "RealTek 8169/8169S/8169SB(L)/8110S/8110SB(L) Gigabit Ethernet" },
185 	{ RT_VENDORID, RT_DEVICEID_8169SC, 0,
186 	    "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" },
187 	{ COREGA_VENDORID, COREGA_DEVICEID_CGLAPCIGT, 0,
188 	    "Corega CG-LAPCIGT (RTL8169S) Gigabit Ethernet" },
189 	{ LINKSYS_VENDORID, LINKSYS_DEVICEID_EG1032, 0,
190 	    "Linksys EG1032 (RTL8169S) Gigabit Ethernet" },
191 	{ USR_VENDORID, USR_DEVICEID_997902, 0,
192 	    "US Robotics 997902 (RTL8169S) Gigabit Ethernet" }
193 };
194 
195 static struct rl_hwrev re_hwrevs[] = {
196 	{ RL_HWREV_8139, RL_8139,  "", RL_MTU },
197 	{ RL_HWREV_8139A, RL_8139, "A", RL_MTU },
198 	{ RL_HWREV_8139AG, RL_8139, "A-G", RL_MTU },
199 	{ RL_HWREV_8139B, RL_8139, "B", RL_MTU },
200 	{ RL_HWREV_8130, RL_8139, "8130", RL_MTU },
201 	{ RL_HWREV_8139C, RL_8139, "C", RL_MTU },
202 	{ RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C", RL_MTU },
203 	{ RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+", RL_MTU },
204 	{ RL_HWREV_8168B_SPIN1, RL_8169, "8168", RL_JUMBO_MTU },
205 	{ RL_HWREV_8169, RL_8169, "8169", RL_JUMBO_MTU },
206 	{ RL_HWREV_8169S, RL_8169, "8169S", RL_JUMBO_MTU },
207 	{ RL_HWREV_8110S, RL_8169, "8110S", RL_JUMBO_MTU },
208 	{ RL_HWREV_8169_8110SB, RL_8169, "8169SB/8110SB", RL_JUMBO_MTU },
209 	{ RL_HWREV_8169_8110SC, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU },
210 	{ RL_HWREV_8169_8110SBL, RL_8169, "8169SBL/8110SBL", RL_JUMBO_MTU },
211 	{ RL_HWREV_8169_8110SCE, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU },
212 	{ RL_HWREV_8100, RL_8139, "8100", RL_MTU },
213 	{ RL_HWREV_8101, RL_8139, "8101", RL_MTU },
214 	{ RL_HWREV_8100E, RL_8169, "8100E", RL_MTU },
215 	{ RL_HWREV_8101E, RL_8169, "8101E", RL_MTU },
216 	{ RL_HWREV_8102E, RL_8169, "8102E", RL_MTU },
217 	{ RL_HWREV_8102EL, RL_8169, "8102EL", RL_MTU },
218 	{ RL_HWREV_8102EL_SPIN1, RL_8169, "8102EL", RL_MTU },
219 	{ RL_HWREV_8103E, RL_8169, "8103E", RL_MTU },
220 	{ RL_HWREV_8105E, RL_8169, "8105E", RL_MTU },
221 	{ RL_HWREV_8168B_SPIN2, RL_8169, "8168", RL_JUMBO_MTU },
222 	{ RL_HWREV_8168B_SPIN3, RL_8169, "8168", RL_JUMBO_MTU },
223 	{ RL_HWREV_8168C, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K },
224 	{ RL_HWREV_8168C_SPIN2, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K },
225 	{ RL_HWREV_8168CP, RL_8169, "8168CP/8111CP", RL_JUMBO_MTU_6K },
226 	{ RL_HWREV_8168D, RL_8169, "8168D/8111D", RL_JUMBO_MTU_9K },
227 	{ RL_HWREV_8168DP, RL_8169, "8168DP/8111DP", RL_JUMBO_MTU_9K },
228 	{ RL_HWREV_8168E, RL_8169, "8168E/8111E", RL_JUMBO_MTU_9K},
229 	{ RL_HWREV_8168E_VL, RL_8169, "8168E/8111E-VL", RL_JUMBO_MTU_6K},
230 	{ 0, 0, NULL, 0 }
231 };
232 
233 static int re_probe		(device_t);
234 static int re_attach		(device_t);
235 static int re_detach		(device_t);
236 
237 static int re_encap		(struct rl_softc *, struct mbuf **);
238 
239 static void re_dma_map_addr	(void *, bus_dma_segment_t *, int, int);
240 static int re_allocmem		(device_t, struct rl_softc *);
241 static __inline void re_discard_rxbuf
242 				(struct rl_softc *, int);
243 static int re_newbuf		(struct rl_softc *, int);
244 static int re_jumbo_newbuf	(struct rl_softc *, int);
245 static int re_rx_list_init	(struct rl_softc *);
246 static int re_jrx_list_init	(struct rl_softc *);
247 static int re_tx_list_init	(struct rl_softc *);
248 #ifdef RE_FIXUP_RX
249 static __inline void re_fixup_rx
250 				(struct mbuf *);
251 #endif
252 static int re_rxeof		(struct rl_softc *, int *);
253 static void re_txeof		(struct rl_softc *);
254 #ifdef DEVICE_POLLING
255 static int re_poll		(struct ifnet *, enum poll_cmd, int);
256 static int re_poll_locked	(struct ifnet *, enum poll_cmd, int);
257 #endif
258 static int re_intr		(void *);
259 static void re_intr_msi		(void *);
260 static void re_tick		(void *);
261 static void re_int_task		(void *, int);
262 static void re_start		(struct ifnet *);
263 static void re_start_locked	(struct ifnet *);
264 static int re_ioctl		(struct ifnet *, u_long, caddr_t);
265 static void re_init		(void *);
266 static void re_init_locked	(struct rl_softc *);
267 static void re_stop		(struct rl_softc *);
268 static void re_watchdog		(struct rl_softc *);
269 static int re_suspend		(device_t);
270 static int re_resume		(device_t);
271 static int re_shutdown		(device_t);
272 static int re_ifmedia_upd	(struct ifnet *);
273 static void re_ifmedia_sts	(struct ifnet *, struct ifmediareq *);
274 
275 static void re_eeprom_putbyte	(struct rl_softc *, int);
276 static void re_eeprom_getword	(struct rl_softc *, int, u_int16_t *);
277 static void re_read_eeprom	(struct rl_softc *, caddr_t, int, int);
278 static int re_gmii_readreg	(device_t, int, int);
279 static int re_gmii_writereg	(device_t, int, int, int);
280 
281 static int re_miibus_readreg	(device_t, int, int);
282 static int re_miibus_writereg	(device_t, int, int, int);
283 static void re_miibus_statchg	(device_t);
284 
285 static void re_set_jumbo	(struct rl_softc *, int);
286 static void re_set_rxmode		(struct rl_softc *);
287 static void re_reset		(struct rl_softc *);
288 static void re_setwol		(struct rl_softc *);
289 static void re_clrwol		(struct rl_softc *);
290 
291 #ifdef RE_DIAG
292 static int re_diag		(struct rl_softc *);
293 #endif
294 
295 static void re_add_sysctls	(struct rl_softc *);
296 static int re_sysctl_stats	(SYSCTL_HANDLER_ARGS);
297 static int sysctl_int_range	(SYSCTL_HANDLER_ARGS, int, int);
298 static int sysctl_hw_re_int_mod	(SYSCTL_HANDLER_ARGS);
299 
300 static device_method_t re_methods[] = {
301 	/* Device interface */
302 	DEVMETHOD(device_probe,		re_probe),
303 	DEVMETHOD(device_attach,	re_attach),
304 	DEVMETHOD(device_detach,	re_detach),
305 	DEVMETHOD(device_suspend,	re_suspend),
306 	DEVMETHOD(device_resume,	re_resume),
307 	DEVMETHOD(device_shutdown,	re_shutdown),
308 
309 	/* bus interface */
310 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
311 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
312 
313 	/* MII interface */
314 	DEVMETHOD(miibus_readreg,	re_miibus_readreg),
315 	DEVMETHOD(miibus_writereg,	re_miibus_writereg),
316 	DEVMETHOD(miibus_statchg,	re_miibus_statchg),
317 
318 	{ 0, 0 }
319 };
320 
321 static driver_t re_driver = {
322 	"re",
323 	re_methods,
324 	sizeof(struct rl_softc)
325 };
326 
327 static devclass_t re_devclass;
328 
329 DRIVER_MODULE(re, pci, re_driver, re_devclass, 0, 0);
330 DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0);
331 
332 #define EE_SET(x)					\
333 	CSR_WRITE_1(sc, RL_EECMD,			\
334 		CSR_READ_1(sc, RL_EECMD) | x)
335 
336 #define EE_CLR(x)					\
337 	CSR_WRITE_1(sc, RL_EECMD,			\
338 		CSR_READ_1(sc, RL_EECMD) & ~x)
339 
340 /*
341  * Send a read command and address to the EEPROM, check for ACK.
342  */
343 static void
344 re_eeprom_putbyte(struct rl_softc *sc, int addr)
345 {
346 	int			d, i;
347 
348 	d = addr | (RL_9346_READ << sc->rl_eewidth);
349 
350 	/*
351 	 * Feed in each bit and strobe the clock.
352 	 */
353 
354 	for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) {
355 		if (d & i) {
356 			EE_SET(RL_EE_DATAIN);
357 		} else {
358 			EE_CLR(RL_EE_DATAIN);
359 		}
360 		DELAY(100);
361 		EE_SET(RL_EE_CLK);
362 		DELAY(150);
363 		EE_CLR(RL_EE_CLK);
364 		DELAY(100);
365 	}
366 }
367 
368 /*
369  * Read a word of data stored in the EEPROM at address 'addr.'
370  */
371 static void
372 re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest)
373 {
374 	int			i;
375 	u_int16_t		word = 0;
376 
377 	/*
378 	 * Send address of word we want to read.
379 	 */
380 	re_eeprom_putbyte(sc, addr);
381 
382 	/*
383 	 * Start reading bits from EEPROM.
384 	 */
385 	for (i = 0x8000; i; i >>= 1) {
386 		EE_SET(RL_EE_CLK);
387 		DELAY(100);
388 		if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
389 			word |= i;
390 		EE_CLR(RL_EE_CLK);
391 		DELAY(100);
392 	}
393 
394 	*dest = word;
395 }
396 
397 /*
398  * Read a sequence of words from the EEPROM.
399  */
400 static void
401 re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt)
402 {
403 	int			i;
404 	u_int16_t		word = 0, *ptr;
405 
406 	CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
407 
408         DELAY(100);
409 
410 	for (i = 0; i < cnt; i++) {
411 		CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL);
412 		re_eeprom_getword(sc, off + i, &word);
413 		CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL);
414 		ptr = (u_int16_t *)(dest + (i * 2));
415                 *ptr = word;
416 	}
417 
418 	CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
419 }
420 
421 static int
422 re_gmii_readreg(device_t dev, int phy, int reg)
423 {
424 	struct rl_softc		*sc;
425 	u_int32_t		rval;
426 	int			i;
427 
428 	sc = device_get_softc(dev);
429 
430 	/* Let the rgephy driver read the GMEDIASTAT register */
431 
432 	if (reg == RL_GMEDIASTAT) {
433 		rval = CSR_READ_1(sc, RL_GMEDIASTAT);
434 		return (rval);
435 	}
436 
437 	CSR_WRITE_4(sc, RL_PHYAR, reg << 16);
438 
439 	for (i = 0; i < RL_PHY_TIMEOUT; i++) {
440 		rval = CSR_READ_4(sc, RL_PHYAR);
441 		if (rval & RL_PHYAR_BUSY)
442 			break;
443 		DELAY(25);
444 	}
445 
446 	if (i == RL_PHY_TIMEOUT) {
447 		device_printf(sc->rl_dev, "PHY read failed\n");
448 		return (0);
449 	}
450 
451 	/*
452 	 * Controller requires a 20us delay to process next MDIO request.
453 	 */
454 	DELAY(20);
455 
456 	return (rval & RL_PHYAR_PHYDATA);
457 }
458 
459 static int
460 re_gmii_writereg(device_t dev, int phy, int reg, int data)
461 {
462 	struct rl_softc		*sc;
463 	u_int32_t		rval;
464 	int			i;
465 
466 	sc = device_get_softc(dev);
467 
468 	CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) |
469 	    (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY);
470 
471 	for (i = 0; i < RL_PHY_TIMEOUT; i++) {
472 		rval = CSR_READ_4(sc, RL_PHYAR);
473 		if (!(rval & RL_PHYAR_BUSY))
474 			break;
475 		DELAY(25);
476 	}
477 
478 	if (i == RL_PHY_TIMEOUT) {
479 		device_printf(sc->rl_dev, "PHY write failed\n");
480 		return (0);
481 	}
482 
483 	/*
484 	 * Controller requires a 20us delay to process next MDIO request.
485 	 */
486 	DELAY(20);
487 
488 	return (0);
489 }
490 
491 static int
492 re_miibus_readreg(device_t dev, int phy, int reg)
493 {
494 	struct rl_softc		*sc;
495 	u_int16_t		rval = 0;
496 	u_int16_t		re8139_reg = 0;
497 
498 	sc = device_get_softc(dev);
499 
500 	if (sc->rl_type == RL_8169) {
501 		rval = re_gmii_readreg(dev, phy, reg);
502 		return (rval);
503 	}
504 
505 	switch (reg) {
506 	case MII_BMCR:
507 		re8139_reg = RL_BMCR;
508 		break;
509 	case MII_BMSR:
510 		re8139_reg = RL_BMSR;
511 		break;
512 	case MII_ANAR:
513 		re8139_reg = RL_ANAR;
514 		break;
515 	case MII_ANER:
516 		re8139_reg = RL_ANER;
517 		break;
518 	case MII_ANLPAR:
519 		re8139_reg = RL_LPAR;
520 		break;
521 	case MII_PHYIDR1:
522 	case MII_PHYIDR2:
523 		return (0);
524 	/*
525 	 * Allow the rlphy driver to read the media status
526 	 * register. If we have a link partner which does not
527 	 * support NWAY, this is the register which will tell
528 	 * us the results of parallel detection.
529 	 */
530 	case RL_MEDIASTAT:
531 		rval = CSR_READ_1(sc, RL_MEDIASTAT);
532 		return (rval);
533 	default:
534 		device_printf(sc->rl_dev, "bad phy register\n");
535 		return (0);
536 	}
537 	rval = CSR_READ_2(sc, re8139_reg);
538 	if (sc->rl_type == RL_8139CPLUS && re8139_reg == RL_BMCR) {
539 		/* 8139C+ has different bit layout. */
540 		rval &= ~(BMCR_LOOP | BMCR_ISO);
541 	}
542 	return (rval);
543 }
544 
545 static int
546 re_miibus_writereg(device_t dev, int phy, int reg, int data)
547 {
548 	struct rl_softc		*sc;
549 	u_int16_t		re8139_reg = 0;
550 	int			rval = 0;
551 
552 	sc = device_get_softc(dev);
553 
554 	if (sc->rl_type == RL_8169) {
555 		rval = re_gmii_writereg(dev, phy, reg, data);
556 		return (rval);
557 	}
558 
559 	switch (reg) {
560 	case MII_BMCR:
561 		re8139_reg = RL_BMCR;
562 		if (sc->rl_type == RL_8139CPLUS) {
563 			/* 8139C+ has different bit layout. */
564 			data &= ~(BMCR_LOOP | BMCR_ISO);
565 		}
566 		break;
567 	case MII_BMSR:
568 		re8139_reg = RL_BMSR;
569 		break;
570 	case MII_ANAR:
571 		re8139_reg = RL_ANAR;
572 		break;
573 	case MII_ANER:
574 		re8139_reg = RL_ANER;
575 		break;
576 	case MII_ANLPAR:
577 		re8139_reg = RL_LPAR;
578 		break;
579 	case MII_PHYIDR1:
580 	case MII_PHYIDR2:
581 		return (0);
582 		break;
583 	default:
584 		device_printf(sc->rl_dev, "bad phy register\n");
585 		return (0);
586 	}
587 	CSR_WRITE_2(sc, re8139_reg, data);
588 	return (0);
589 }
590 
591 static void
592 re_miibus_statchg(device_t dev)
593 {
594 	struct rl_softc		*sc;
595 	struct ifnet		*ifp;
596 	struct mii_data		*mii;
597 
598 	sc = device_get_softc(dev);
599 	mii = device_get_softc(sc->rl_miibus);
600 	ifp = sc->rl_ifp;
601 	if (mii == NULL || ifp == NULL ||
602 	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
603 		return;
604 
605 	sc->rl_flags &= ~RL_FLAG_LINK;
606 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
607 	    (IFM_ACTIVE | IFM_AVALID)) {
608 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
609 		case IFM_10_T:
610 		case IFM_100_TX:
611 			sc->rl_flags |= RL_FLAG_LINK;
612 			break;
613 		case IFM_1000_T:
614 			if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0)
615 				break;
616 			sc->rl_flags |= RL_FLAG_LINK;
617 			break;
618 		default:
619 			break;
620 		}
621 	}
622 	/*
623 	 * RealTek controllers does not provide any interface to
624 	 * Tx/Rx MACs for resolved speed, duplex and flow-control
625 	 * parameters.
626 	 */
627 }
628 
629 /*
630  * Set the RX configuration and 64-bit multicast hash filter.
631  */
632 static void
633 re_set_rxmode(struct rl_softc *sc)
634 {
635 	struct ifnet		*ifp;
636 	struct ifmultiaddr	*ifma;
637 	uint32_t		hashes[2] = { 0, 0 };
638 	uint32_t		h, rxfilt;
639 
640 	RL_LOCK_ASSERT(sc);
641 
642 	ifp = sc->rl_ifp;
643 
644 	rxfilt = RL_RXCFG_CONFIG | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD;
645 
646 	if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
647 		if (ifp->if_flags & IFF_PROMISC)
648 			rxfilt |= RL_RXCFG_RX_ALLPHYS;
649 		/*
650 		 * Unlike other hardwares, we have to explicitly set
651 		 * RL_RXCFG_RX_MULTI to receive multicast frames in
652 		 * promiscuous mode.
653 		 */
654 		rxfilt |= RL_RXCFG_RX_MULTI;
655 		hashes[0] = hashes[1] = 0xffffffff;
656 		goto done;
657 	}
658 
659 	if_maddr_rlock(ifp);
660 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
661 		if (ifma->ifma_addr->sa_family != AF_LINK)
662 			continue;
663 		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
664 		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
665 		if (h < 32)
666 			hashes[0] |= (1 << h);
667 		else
668 			hashes[1] |= (1 << (h - 32));
669 	}
670 	if_maddr_runlock(ifp);
671 
672 	if (hashes[0] != 0 || hashes[1] != 0) {
673 		/*
674 		 * For some unfathomable reason, RealTek decided to
675 		 * reverse the order of the multicast hash registers
676 		 * in the PCI Express parts.  This means we have to
677 		 * write the hash pattern in reverse order for those
678 		 * devices.
679 		 */
680 		if ((sc->rl_flags & RL_FLAG_PCIE) != 0) {
681 			h = bswap32(hashes[0]);
682 			hashes[0] = bswap32(hashes[1]);
683 			hashes[1] = h;
684 		}
685 		rxfilt |= RL_RXCFG_RX_MULTI;
686 	}
687 
688 done:
689 	CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
690 	CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
691 	CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
692 }
693 
694 static void
695 re_reset(struct rl_softc *sc)
696 {
697 	int			i;
698 
699 	RL_LOCK_ASSERT(sc);
700 
701 	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
702 
703 	for (i = 0; i < RL_TIMEOUT; i++) {
704 		DELAY(10);
705 		if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
706 			break;
707 	}
708 	if (i == RL_TIMEOUT)
709 		device_printf(sc->rl_dev, "reset never completed!\n");
710 
711 	if ((sc->rl_flags & RL_FLAG_MACRESET) != 0)
712 		CSR_WRITE_1(sc, 0x82, 1);
713 	if (sc->rl_hwrev->rl_rev == RL_HWREV_8169S)
714 		re_gmii_writereg(sc->rl_dev, 1, 0x0b, 0);
715 }
716 
717 #ifdef RE_DIAG
718 
719 /*
720  * The following routine is designed to test for a defect on some
721  * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64#
722  * lines connected to the bus, however for a 32-bit only card, they
723  * should be pulled high. The result of this defect is that the
724  * NIC will not work right if you plug it into a 64-bit slot: DMA
725  * operations will be done with 64-bit transfers, which will fail
726  * because the 64-bit data lines aren't connected.
727  *
728  * There's no way to work around this (short of talking a soldering
729  * iron to the board), however we can detect it. The method we use
730  * here is to put the NIC into digital loopback mode, set the receiver
731  * to promiscuous mode, and then try to send a frame. We then compare
732  * the frame data we sent to what was received. If the data matches,
733  * then the NIC is working correctly, otherwise we know the user has
734  * a defective NIC which has been mistakenly plugged into a 64-bit PCI
735  * slot. In the latter case, there's no way the NIC can work correctly,
736  * so we print out a message on the console and abort the device attach.
737  */
738 
739 static int
740 re_diag(struct rl_softc *sc)
741 {
742 	struct ifnet		*ifp = sc->rl_ifp;
743 	struct mbuf		*m0;
744 	struct ether_header	*eh;
745 	struct rl_desc		*cur_rx;
746 	u_int16_t		status;
747 	u_int32_t		rxstat;
748 	int			total_len, i, error = 0, phyaddr;
749 	u_int8_t		dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' };
750 	u_int8_t		src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' };
751 
752 	/* Allocate a single mbuf */
753 	MGETHDR(m0, M_DONTWAIT, MT_DATA);
754 	if (m0 == NULL)
755 		return (ENOBUFS);
756 
757 	RL_LOCK(sc);
758 
759 	/*
760 	 * Initialize the NIC in test mode. This sets the chip up
761 	 * so that it can send and receive frames, but performs the
762 	 * following special functions:
763 	 * - Puts receiver in promiscuous mode
764 	 * - Enables digital loopback mode
765 	 * - Leaves interrupts turned off
766 	 */
767 
768 	ifp->if_flags |= IFF_PROMISC;
769 	sc->rl_testmode = 1;
770 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
771 	re_init_locked(sc);
772 	sc->rl_flags |= RL_FLAG_LINK;
773 	if (sc->rl_type == RL_8169)
774 		phyaddr = 1;
775 	else
776 		phyaddr = 0;
777 
778 	re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_RESET);
779 	for (i = 0; i < RL_TIMEOUT; i++) {
780 		status = re_miibus_readreg(sc->rl_dev, phyaddr, MII_BMCR);
781 		if (!(status & BMCR_RESET))
782 			break;
783 	}
784 
785 	re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_LOOP);
786 	CSR_WRITE_2(sc, RL_ISR, RL_INTRS);
787 
788 	DELAY(100000);
789 
790 	/* Put some data in the mbuf */
791 
792 	eh = mtod(m0, struct ether_header *);
793 	bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN);
794 	bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN);
795 	eh->ether_type = htons(ETHERTYPE_IP);
796 	m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN;
797 
798 	/*
799 	 * Queue the packet, start transmission.
800 	 * Note: IF_HANDOFF() ultimately calls re_start() for us.
801 	 */
802 
803 	CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
804 	RL_UNLOCK(sc);
805 	/* XXX: re_diag must not be called when in ALTQ mode */
806 	IF_HANDOFF(&ifp->if_snd, m0, ifp);
807 	RL_LOCK(sc);
808 	m0 = NULL;
809 
810 	/* Wait for it to propagate through the chip */
811 
812 	DELAY(100000);
813 	for (i = 0; i < RL_TIMEOUT; i++) {
814 		status = CSR_READ_2(sc, RL_ISR);
815 		CSR_WRITE_2(sc, RL_ISR, status);
816 		if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) ==
817 		    (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK))
818 			break;
819 		DELAY(10);
820 	}
821 
822 	if (i == RL_TIMEOUT) {
823 		device_printf(sc->rl_dev,
824 		    "diagnostic failed, failed to receive packet in"
825 		    " loopback mode\n");
826 		error = EIO;
827 		goto done;
828 	}
829 
830 	/*
831 	 * The packet should have been dumped into the first
832 	 * entry in the RX DMA ring. Grab it from there.
833 	 */
834 
835 	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
836 	    sc->rl_ldata.rl_rx_list_map,
837 	    BUS_DMASYNC_POSTREAD);
838 	bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
839 	    sc->rl_ldata.rl_rx_desc[0].rx_dmamap,
840 	    BUS_DMASYNC_POSTREAD);
841 	bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag,
842 	    sc->rl_ldata.rl_rx_desc[0].rx_dmamap);
843 
844 	m0 = sc->rl_ldata.rl_rx_desc[0].rx_m;
845 	sc->rl_ldata.rl_rx_desc[0].rx_m = NULL;
846 	eh = mtod(m0, struct ether_header *);
847 
848 	cur_rx = &sc->rl_ldata.rl_rx_list[0];
849 	total_len = RL_RXBYTES(cur_rx);
850 	rxstat = le32toh(cur_rx->rl_cmdstat);
851 
852 	if (total_len != ETHER_MIN_LEN) {
853 		device_printf(sc->rl_dev,
854 		    "diagnostic failed, received short packet\n");
855 		error = EIO;
856 		goto done;
857 	}
858 
859 	/* Test that the received packet data matches what we sent. */
860 
861 	if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) ||
862 	    bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) ||
863 	    ntohs(eh->ether_type) != ETHERTYPE_IP) {
864 		device_printf(sc->rl_dev, "WARNING, DMA FAILURE!\n");
865 		device_printf(sc->rl_dev, "expected TX data: %6D/%6D/0x%x\n",
866 		    dst, ":", src, ":", ETHERTYPE_IP);
867 		device_printf(sc->rl_dev, "received RX data: %6D/%6D/0x%x\n",
868 		    eh->ether_dhost, ":",  eh->ether_shost, ":",
869 		    ntohs(eh->ether_type));
870 		device_printf(sc->rl_dev, "You may have a defective 32-bit "
871 		    "NIC plugged into a 64-bit PCI slot.\n");
872 		device_printf(sc->rl_dev, "Please re-install the NIC in a "
873 		    "32-bit slot for proper operation.\n");
874 		device_printf(sc->rl_dev, "Read the re(4) man page for more "
875 		    "details.\n");
876 		error = EIO;
877 	}
878 
879 done:
880 	/* Turn interface off, release resources */
881 
882 	sc->rl_testmode = 0;
883 	sc->rl_flags &= ~RL_FLAG_LINK;
884 	ifp->if_flags &= ~IFF_PROMISC;
885 	re_stop(sc);
886 	if (m0 != NULL)
887 		m_freem(m0);
888 
889 	RL_UNLOCK(sc);
890 
891 	return (error);
892 }
893 
894 #endif
895 
896 /*
897  * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device
898  * IDs against our list and return a device name if we find a match.
899  */
900 static int
901 re_probe(device_t dev)
902 {
903 	struct rl_type		*t;
904 	uint16_t		devid, vendor;
905 	uint16_t		revid, sdevid;
906 	int			i;
907 
908 	vendor = pci_get_vendor(dev);
909 	devid = pci_get_device(dev);
910 	revid = pci_get_revid(dev);
911 	sdevid = pci_get_subdevice(dev);
912 
913 	if (vendor == LINKSYS_VENDORID && devid == LINKSYS_DEVICEID_EG1032) {
914 		if (sdevid != LINKSYS_SUBDEVICE_EG1032_REV3) {
915 			/*
916 			 * Only attach to rev. 3 of the Linksys EG1032 adapter.
917 			 * Rev. 2 is supported by sk(4).
918 			 */
919 			return (ENXIO);
920 		}
921 	}
922 
923 	if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) {
924 		if (revid != 0x20) {
925 			/* 8139, let rl(4) take care of this device. */
926 			return (ENXIO);
927 		}
928 	}
929 
930 	t = re_devs;
931 	for (i = 0; i < sizeof(re_devs) / sizeof(re_devs[0]); i++, t++) {
932 		if (vendor == t->rl_vid && devid == t->rl_did) {
933 			device_set_desc(dev, t->rl_name);
934 			return (BUS_PROBE_DEFAULT);
935 		}
936 	}
937 
938 	return (ENXIO);
939 }
940 
941 /*
942  * Map a single buffer address.
943  */
944 
945 static void
946 re_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
947 {
948 	bus_addr_t		*addr;
949 
950 	if (error)
951 		return;
952 
953 	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
954 	addr = arg;
955 	*addr = segs->ds_addr;
956 }
957 
958 static int
959 re_allocmem(device_t dev, struct rl_softc *sc)
960 {
961 	bus_addr_t		lowaddr;
962 	bus_size_t		rx_list_size, tx_list_size;
963 	int			error;
964 	int			i;
965 
966 	rx_list_size = sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc);
967 	tx_list_size = sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc);
968 
969 	/*
970 	 * Allocate the parent bus DMA tag appropriate for PCI.
971 	 * In order to use DAC, RL_CPLUSCMD_PCI_DAC bit of RL_CPLUS_CMD
972 	 * register should be set. However some RealTek chips are known
973 	 * to be buggy on DAC handling, therefore disable DAC by limiting
974 	 * DMA address space to 32bit. PCIe variants of RealTek chips
975 	 * may not have the limitation.
976 	 */
977 	lowaddr = BUS_SPACE_MAXADDR;
978 	if ((sc->rl_flags & RL_FLAG_PCIE) == 0)
979 		lowaddr = BUS_SPACE_MAXADDR_32BIT;
980 	error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
981 	    lowaddr, BUS_SPACE_MAXADDR, NULL, NULL,
982 	    BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
983 	    NULL, NULL, &sc->rl_parent_tag);
984 	if (error) {
985 		device_printf(dev, "could not allocate parent DMA tag\n");
986 		return (error);
987 	}
988 
989 	/*
990 	 * Allocate map for TX mbufs.
991 	 */
992 	error = bus_dma_tag_create(sc->rl_parent_tag, 1, 0,
993 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
994 	    NULL, MCLBYTES * RL_NTXSEGS, RL_NTXSEGS, 4096, 0,
995 	    NULL, NULL, &sc->rl_ldata.rl_tx_mtag);
996 	if (error) {
997 		device_printf(dev, "could not allocate TX DMA tag\n");
998 		return (error);
999 	}
1000 
1001 	/*
1002 	 * Allocate map for RX mbufs.
1003 	 */
1004 
1005 	if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
1006 		error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t),
1007 		    0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1008 		    MJUM9BYTES, 1, MJUM9BYTES, 0, NULL, NULL,
1009 		    &sc->rl_ldata.rl_jrx_mtag);
1010 		if (error) {
1011 			device_printf(dev,
1012 			    "could not allocate jumbo RX DMA tag\n");
1013 			return (error);
1014 		}
1015 	}
1016 	error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 0,
1017 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1018 	    MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rl_ldata.rl_rx_mtag);
1019 	if (error) {
1020 		device_printf(dev, "could not allocate RX DMA tag\n");
1021 		return (error);
1022 	}
1023 
1024 	/*
1025 	 * Allocate map for TX descriptor list.
1026 	 */
1027 	error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
1028 	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1029 	    NULL, tx_list_size, 1, tx_list_size, 0,
1030 	    NULL, NULL, &sc->rl_ldata.rl_tx_list_tag);
1031 	if (error) {
1032 		device_printf(dev, "could not allocate TX DMA ring tag\n");
1033 		return (error);
1034 	}
1035 
1036 	/* Allocate DMA'able memory for the TX ring */
1037 
1038 	error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag,
1039 	    (void **)&sc->rl_ldata.rl_tx_list,
1040 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1041 	    &sc->rl_ldata.rl_tx_list_map);
1042 	if (error) {
1043 		device_printf(dev, "could not allocate TX DMA ring\n");
1044 		return (error);
1045 	}
1046 
1047 	/* Load the map for the TX ring. */
1048 
1049 	sc->rl_ldata.rl_tx_list_addr = 0;
1050 	error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag,
1051 	     sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list,
1052 	     tx_list_size, re_dma_map_addr,
1053 	     &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT);
1054 	if (error != 0 || sc->rl_ldata.rl_tx_list_addr == 0) {
1055 		device_printf(dev, "could not load TX DMA ring\n");
1056 		return (ENOMEM);
1057 	}
1058 
1059 	/* Create DMA maps for TX buffers */
1060 
1061 	for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
1062 		error = bus_dmamap_create(sc->rl_ldata.rl_tx_mtag, 0,
1063 		    &sc->rl_ldata.rl_tx_desc[i].tx_dmamap);
1064 		if (error) {
1065 			device_printf(dev, "could not create DMA map for TX\n");
1066 			return (error);
1067 		}
1068 	}
1069 
1070 	/*
1071 	 * Allocate map for RX descriptor list.
1072 	 */
1073 	error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
1074 	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1075 	    NULL, rx_list_size, 1, rx_list_size, 0,
1076 	    NULL, NULL, &sc->rl_ldata.rl_rx_list_tag);
1077 	if (error) {
1078 		device_printf(dev, "could not create RX DMA ring tag\n");
1079 		return (error);
1080 	}
1081 
1082 	/* Allocate DMA'able memory for the RX ring */
1083 
1084 	error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag,
1085 	    (void **)&sc->rl_ldata.rl_rx_list,
1086 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1087 	    &sc->rl_ldata.rl_rx_list_map);
1088 	if (error) {
1089 		device_printf(dev, "could not allocate RX DMA ring\n");
1090 		return (error);
1091 	}
1092 
1093 	/* Load the map for the RX ring. */
1094 
1095 	sc->rl_ldata.rl_rx_list_addr = 0;
1096 	error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag,
1097 	     sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list,
1098 	     rx_list_size, re_dma_map_addr,
1099 	     &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT);
1100 	if (error != 0 || sc->rl_ldata.rl_rx_list_addr == 0) {
1101 		device_printf(dev, "could not load RX DMA ring\n");
1102 		return (ENOMEM);
1103 	}
1104 
1105 	/* Create DMA maps for RX buffers */
1106 
1107 	if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
1108 		error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0,
1109 		    &sc->rl_ldata.rl_jrx_sparemap);
1110 		if (error) {
1111 			device_printf(dev,
1112 			    "could not create spare DMA map for jumbo RX\n");
1113 			return (error);
1114 		}
1115 		for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1116 			error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0,
1117 			    &sc->rl_ldata.rl_jrx_desc[i].rx_dmamap);
1118 			if (error) {
1119 				device_printf(dev,
1120 				    "could not create DMA map for jumbo RX\n");
1121 				return (error);
1122 			}
1123 		}
1124 	}
1125 	error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0,
1126 	    &sc->rl_ldata.rl_rx_sparemap);
1127 	if (error) {
1128 		device_printf(dev, "could not create spare DMA map for RX\n");
1129 		return (error);
1130 	}
1131 	for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1132 		error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0,
1133 		    &sc->rl_ldata.rl_rx_desc[i].rx_dmamap);
1134 		if (error) {
1135 			device_printf(dev, "could not create DMA map for RX\n");
1136 			return (error);
1137 		}
1138 	}
1139 
1140 	/* Create DMA map for statistics. */
1141 	error = bus_dma_tag_create(sc->rl_parent_tag, RL_DUMP_ALIGN, 0,
1142 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1143 	    sizeof(struct rl_stats), 1, sizeof(struct rl_stats), 0, NULL, NULL,
1144 	    &sc->rl_ldata.rl_stag);
1145 	if (error) {
1146 		device_printf(dev, "could not create statistics DMA tag\n");
1147 		return (error);
1148 	}
1149 	/* Allocate DMA'able memory for statistics. */
1150 	error = bus_dmamem_alloc(sc->rl_ldata.rl_stag,
1151 	    (void **)&sc->rl_ldata.rl_stats,
1152 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1153 	    &sc->rl_ldata.rl_smap);
1154 	if (error) {
1155 		device_printf(dev,
1156 		    "could not allocate statistics DMA memory\n");
1157 		return (error);
1158 	}
1159 	/* Load the map for statistics. */
1160 	sc->rl_ldata.rl_stats_addr = 0;
1161 	error = bus_dmamap_load(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap,
1162 	    sc->rl_ldata.rl_stats, sizeof(struct rl_stats), re_dma_map_addr,
1163 	     &sc->rl_ldata.rl_stats_addr, BUS_DMA_NOWAIT);
1164 	if (error != 0 || sc->rl_ldata.rl_stats_addr == 0) {
1165 		device_printf(dev, "could not load statistics DMA memory\n");
1166 		return (ENOMEM);
1167 	}
1168 
1169 	return (0);
1170 }
1171 
1172 /*
1173  * Attach the interface. Allocate softc structures, do ifmedia
1174  * setup and ethernet/BPF attach.
1175  */
1176 static int
1177 re_attach(device_t dev)
1178 {
1179 	u_char			eaddr[ETHER_ADDR_LEN];
1180 	u_int16_t		as[ETHER_ADDR_LEN / 2];
1181 	struct rl_softc		*sc;
1182 	struct ifnet		*ifp;
1183 	struct rl_hwrev		*hw_rev;
1184 	int			hwrev;
1185 	u_int16_t		devid, re_did = 0;
1186 	int			error = 0, i, phy, rid;
1187 	int			msic, msixc, reg;
1188 	uint8_t			cfg;
1189 
1190 	sc = device_get_softc(dev);
1191 	sc->rl_dev = dev;
1192 
1193 	mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1194 	    MTX_DEF);
1195 	callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0);
1196 
1197 	/*
1198 	 * Map control/status registers.
1199 	 */
1200 	pci_enable_busmaster(dev);
1201 
1202 	devid = pci_get_device(dev);
1203 	/*
1204 	 * Prefer memory space register mapping over IO space.
1205 	 * Because RTL8169SC does not seem to work when memory mapping
1206 	 * is used always activate io mapping.
1207 	 */
1208 	if (devid == RT_DEVICEID_8169SC)
1209 		prefer_iomap = 1;
1210 	if (prefer_iomap == 0) {
1211 		sc->rl_res_id = PCIR_BAR(1);
1212 		sc->rl_res_type = SYS_RES_MEMORY;
1213 		/* RTL8168/8101E seems to use different BARs. */
1214 		if (devid == RT_DEVICEID_8168 || devid == RT_DEVICEID_8101E)
1215 			sc->rl_res_id = PCIR_BAR(2);
1216 	} else {
1217 		sc->rl_res_id = PCIR_BAR(0);
1218 		sc->rl_res_type = SYS_RES_IOPORT;
1219 	}
1220 	sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
1221 	    &sc->rl_res_id, RF_ACTIVE);
1222 	if (sc->rl_res == NULL && prefer_iomap == 0) {
1223 		sc->rl_res_id = PCIR_BAR(0);
1224 		sc->rl_res_type = SYS_RES_IOPORT;
1225 		sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
1226 		    &sc->rl_res_id, RF_ACTIVE);
1227 	}
1228 	if (sc->rl_res == NULL) {
1229 		device_printf(dev, "couldn't map ports/memory\n");
1230 		error = ENXIO;
1231 		goto fail;
1232 	}
1233 
1234 	sc->rl_btag = rman_get_bustag(sc->rl_res);
1235 	sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
1236 
1237 	msic = pci_msi_count(dev);
1238 	msixc = pci_msix_count(dev);
1239 	if (pci_find_extcap(dev, PCIY_EXPRESS, &reg) == 0)
1240 		sc->rl_flags |= RL_FLAG_PCIE;
1241 	if (bootverbose) {
1242 		device_printf(dev, "MSI count : %d\n", msic);
1243 		device_printf(dev, "MSI-X count : %d\n", msixc);
1244 	}
1245 	if (msix_disable > 0)
1246 		msixc = 0;
1247 	if (msi_disable > 0)
1248 		msic = 0;
1249 	/* Prefer MSI-X to MSI. */
1250 	if (msixc > 0) {
1251 		msixc = 1;
1252 		rid = PCIR_BAR(4);
1253 		sc->rl_res_pba = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1254 		    &rid, RF_ACTIVE);
1255 		if (sc->rl_res_pba == NULL) {
1256 			device_printf(sc->rl_dev,
1257 			    "could not allocate MSI-X PBA resource\n");
1258 		}
1259 		if (sc->rl_res_pba != NULL &&
1260 		    pci_alloc_msix(dev, &msixc) == 0) {
1261 			if (msixc == 1) {
1262 				device_printf(dev, "Using %d MSI-X message\n",
1263 				    msixc);
1264 				sc->rl_flags |= RL_FLAG_MSIX;
1265 			} else
1266 				pci_release_msi(dev);
1267 		}
1268 		if ((sc->rl_flags & RL_FLAG_MSIX) == 0) {
1269 			if (sc->rl_res_pba != NULL)
1270 				bus_release_resource(dev, SYS_RES_MEMORY, rid,
1271 				    sc->rl_res_pba);
1272 			sc->rl_res_pba = NULL;
1273 			msixc = 0;
1274 		}
1275 	}
1276 	/* Prefer MSI to INTx. */
1277 	if (msixc == 0 && msic > 0) {
1278 		msic = 1;
1279 		if (pci_alloc_msi(dev, &msic) == 0) {
1280 			if (msic == RL_MSI_MESSAGES) {
1281 				device_printf(dev, "Using %d MSI message\n",
1282 				    msic);
1283 				sc->rl_flags |= RL_FLAG_MSI;
1284 				/* Explicitly set MSI enable bit. */
1285 				CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
1286 				cfg = CSR_READ_1(sc, RL_CFG2);
1287 				cfg |= RL_CFG2_MSI;
1288 				CSR_WRITE_1(sc, RL_CFG2, cfg);
1289 				CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1290 			} else
1291 				pci_release_msi(dev);
1292 		}
1293 		if ((sc->rl_flags & RL_FLAG_MSI) == 0)
1294 			msic = 0;
1295 	}
1296 
1297 	/* Allocate interrupt */
1298 	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) {
1299 		rid = 0;
1300 		sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1301 		    RF_SHAREABLE | RF_ACTIVE);
1302 		if (sc->rl_irq[0] == NULL) {
1303 			device_printf(dev, "couldn't allocate IRQ resources\n");
1304 			error = ENXIO;
1305 			goto fail;
1306 		}
1307 	} else {
1308 		for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) {
1309 			sc->rl_irq[i] = bus_alloc_resource_any(dev,
1310 			    SYS_RES_IRQ, &rid, RF_ACTIVE);
1311 			if (sc->rl_irq[i] == NULL) {
1312 				device_printf(dev,
1313 				    "couldn't llocate IRQ resources for "
1314 				    "message %d\n", rid);
1315 				error = ENXIO;
1316 				goto fail;
1317 			}
1318 		}
1319 	}
1320 
1321 	if ((sc->rl_flags & RL_FLAG_MSI) == 0) {
1322 		CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
1323 		cfg = CSR_READ_1(sc, RL_CFG2);
1324 		if ((cfg & RL_CFG2_MSI) != 0) {
1325 			device_printf(dev, "turning off MSI enable bit.\n");
1326 			cfg &= ~RL_CFG2_MSI;
1327 			CSR_WRITE_1(sc, RL_CFG2, cfg);
1328 		}
1329 		CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1330 	}
1331 
1332 	hw_rev = re_hwrevs;
1333 	hwrev = CSR_READ_4(sc, RL_TXCFG);
1334 	switch (hwrev & 0x70000000) {
1335 	case 0x00000000:
1336 	case 0x10000000:
1337 		device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0xfc800000);
1338 		hwrev &= (RL_TXCFG_HWREV | 0x80000000);
1339 		break;
1340 	default:
1341 		device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0x7c800000);
1342 		hwrev &= RL_TXCFG_HWREV;
1343 		break;
1344 	}
1345 	device_printf(dev, "MAC rev. 0x%08x\n", hwrev & 0x00700000);
1346 	while (hw_rev->rl_desc != NULL) {
1347 		if (hw_rev->rl_rev == hwrev) {
1348 			sc->rl_type = hw_rev->rl_type;
1349 			sc->rl_hwrev = hw_rev;
1350 			break;
1351 		}
1352 		hw_rev++;
1353 	}
1354 	if (hw_rev->rl_desc == NULL) {
1355 		device_printf(dev, "Unknown H/W revision: 0x%08x\n", hwrev);
1356 		error = ENXIO;
1357 		goto fail;
1358 	}
1359 
1360 	switch (hw_rev->rl_rev) {
1361 	case RL_HWREV_8139CPLUS:
1362 		sc->rl_flags |= RL_FLAG_FASTETHER | RL_FLAG_AUTOPAD;
1363 		break;
1364 	case RL_HWREV_8100E:
1365 	case RL_HWREV_8101E:
1366 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_FASTETHER;
1367 		break;
1368 	case RL_HWREV_8102E:
1369 	case RL_HWREV_8102EL:
1370 	case RL_HWREV_8102EL_SPIN1:
1371 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 |
1372 		    RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP |
1373 		    RL_FLAG_AUTOPAD;
1374 		break;
1375 	case RL_HWREV_8103E:
1376 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 |
1377 		    RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP |
1378 		    RL_FLAG_AUTOPAD | RL_FLAG_MACSLEEP;
1379 		break;
1380 	case RL_HWREV_8105E:
1381 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1382 		    RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1383 		    RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD;
1384 		break;
1385 	case RL_HWREV_8168B_SPIN1:
1386 	case RL_HWREV_8168B_SPIN2:
1387 		sc->rl_flags |= RL_FLAG_WOLRXENB;
1388 		/* FALLTHROUGH */
1389 	case RL_HWREV_8168B_SPIN3:
1390 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT;
1391 		break;
1392 	case RL_HWREV_8168C_SPIN2:
1393 		sc->rl_flags |= RL_FLAG_MACSLEEP;
1394 		/* FALLTHROUGH */
1395 	case RL_HWREV_8168C:
1396 		if ((hwrev & 0x00700000) == 0x00200000)
1397 			sc->rl_flags |= RL_FLAG_MACSLEEP;
1398 		/* FALLTHROUGH */
1399 	case RL_HWREV_8168CP:
1400 	case RL_HWREV_8168D:
1401 	case RL_HWREV_8168DP:
1402 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1403 		    RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1404 		    RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2;
1405 		break;
1406 	case RL_HWREV_8168E:
1407 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1408 		    RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1409 		    RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2;
1410 		break;
1411 	case RL_HWREV_8168E_VL:
1412 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1413 		    RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1414 		    RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2;
1415 		break;
1416 	case RL_HWREV_8169_8110SB:
1417 	case RL_HWREV_8169_8110SBL:
1418 	case RL_HWREV_8169_8110SC:
1419 	case RL_HWREV_8169_8110SCE:
1420 		sc->rl_flags |= RL_FLAG_PHYWAKE;
1421 		/* FALLTHROUGH */
1422 	case RL_HWREV_8169:
1423 	case RL_HWREV_8169S:
1424 	case RL_HWREV_8110S:
1425 		sc->rl_flags |= RL_FLAG_MACRESET;
1426 		break;
1427 	default:
1428 		break;
1429 	}
1430 
1431 	/* Reset the adapter. */
1432 	RL_LOCK(sc);
1433 	re_reset(sc);
1434 	RL_UNLOCK(sc);
1435 
1436 	/* Enable PME. */
1437 	CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
1438 	cfg = CSR_READ_1(sc, RL_CFG1);
1439 	cfg |= RL_CFG1_PME;
1440 	CSR_WRITE_1(sc, RL_CFG1, cfg);
1441 	cfg = CSR_READ_1(sc, RL_CFG5);
1442 	cfg &= RL_CFG5_PME_STS;
1443 	CSR_WRITE_1(sc, RL_CFG5, cfg);
1444 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1445 
1446 	if ((sc->rl_flags & RL_FLAG_PAR) != 0) {
1447 		/*
1448 		 * XXX Should have a better way to extract station
1449 		 * address from EEPROM.
1450 		 */
1451 		for (i = 0; i < ETHER_ADDR_LEN; i++)
1452 			eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i);
1453 	} else {
1454 		sc->rl_eewidth = RL_9356_ADDR_LEN;
1455 		re_read_eeprom(sc, (caddr_t)&re_did, 0, 1);
1456 		if (re_did != 0x8129)
1457 			sc->rl_eewidth = RL_9346_ADDR_LEN;
1458 
1459 		/*
1460 		 * Get station address from the EEPROM.
1461 		 */
1462 		re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3);
1463 		for (i = 0; i < ETHER_ADDR_LEN / 2; i++)
1464 			as[i] = le16toh(as[i]);
1465 		bcopy(as, eaddr, sizeof(eaddr));
1466 	}
1467 
1468 	if (sc->rl_type == RL_8169) {
1469 		/* Set RX length mask and number of descriptors. */
1470 		sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN;
1471 		sc->rl_txstart = RL_GTXSTART;
1472 		sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT;
1473 		sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT;
1474 	} else {
1475 		/* Set RX length mask and number of descriptors. */
1476 		sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN;
1477 		sc->rl_txstart = RL_TXSTART;
1478 		sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT;
1479 		sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT;
1480 	}
1481 
1482 	error = re_allocmem(dev, sc);
1483 	if (error)
1484 		goto fail;
1485 	re_add_sysctls(sc);
1486 
1487 	ifp = sc->rl_ifp = if_alloc(IFT_ETHER);
1488 	if (ifp == NULL) {
1489 		device_printf(dev, "can not if_alloc()\n");
1490 		error = ENOSPC;
1491 		goto fail;
1492 	}
1493 
1494 	/* Take controller out of deep sleep mode. */
1495 	if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
1496 		if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
1497 			CSR_WRITE_1(sc, RL_GPIO,
1498 			    CSR_READ_1(sc, RL_GPIO) | 0x01);
1499 		else
1500 			CSR_WRITE_1(sc, RL_GPIO,
1501 			    CSR_READ_1(sc, RL_GPIO) & ~0x01);
1502 	}
1503 
1504 	/* Take PHY out of power down mode. */
1505 	if ((sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0)
1506 		CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80);
1507 	if ((sc->rl_flags & RL_FLAG_PHYWAKE) != 0) {
1508 		re_gmii_writereg(dev, 1, 0x1f, 0);
1509 		re_gmii_writereg(dev, 1, 0x0e, 0);
1510 	}
1511 
1512 #define	RE_PHYAD_INTERNAL	 0
1513 
1514 	/* Do MII setup. */
1515 	phy = RE_PHYAD_INTERNAL;
1516 	if (sc->rl_type == RL_8169)
1517 		phy = 1;
1518 	error = mii_attach(dev, &sc->rl_miibus, ifp, re_ifmedia_upd,
1519 	    re_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, MIIF_DOPAUSE);
1520 	if (error != 0) {
1521 		device_printf(dev, "attaching PHYs failed\n");
1522 		goto fail;
1523 	}
1524 
1525 	ifp->if_softc = sc;
1526 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1527 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1528 	ifp->if_ioctl = re_ioctl;
1529 	ifp->if_start = re_start;
1530 	ifp->if_hwassist = RE_CSUM_FEATURES | CSUM_TSO;
1531 	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4;
1532 	ifp->if_capenable = ifp->if_capabilities;
1533 	ifp->if_init = re_init;
1534 	IFQ_SET_MAXLEN(&ifp->if_snd, RL_IFQ_MAXLEN);
1535 	ifp->if_snd.ifq_drv_maxlen = RL_IFQ_MAXLEN;
1536 	IFQ_SET_READY(&ifp->if_snd);
1537 
1538 	TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc);
1539 
1540 	/*
1541 	 * Call MI attach routine.
1542 	 */
1543 	ether_ifattach(ifp, eaddr);
1544 
1545 	/* VLAN capability setup */
1546 	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
1547 	if (ifp->if_capabilities & IFCAP_HWCSUM)
1548 		ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
1549 	/* Enable WOL if PM is supported. */
1550 	if (pci_find_extcap(sc->rl_dev, PCIY_PMG, &reg) == 0)
1551 		ifp->if_capabilities |= IFCAP_WOL;
1552 	ifp->if_capenable = ifp->if_capabilities;
1553 	/*
1554 	 * Don't enable TSO by default.  It is known to generate
1555 	 * corrupted TCP segments(bad TCP options) under certain
1556 	 * circumtances.
1557 	 */
1558 	ifp->if_hwassist &= ~CSUM_TSO;
1559 	ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_VLAN_HWTSO);
1560 #ifdef DEVICE_POLLING
1561 	ifp->if_capabilities |= IFCAP_POLLING;
1562 #endif
1563 	/*
1564 	 * Tell the upper layer(s) we support long frames.
1565 	 * Must appear after the call to ether_ifattach() because
1566 	 * ether_ifattach() sets ifi_hdrlen to the default value.
1567 	 */
1568 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1569 
1570 #ifdef RE_DIAG
1571 	/*
1572 	 * Perform hardware diagnostic on the original RTL8169.
1573 	 * Some 32-bit cards were incorrectly wired and would
1574 	 * malfunction if plugged into a 64-bit slot.
1575 	 */
1576 
1577 	if (hwrev == RL_HWREV_8169) {
1578 		error = re_diag(sc);
1579 		if (error) {
1580 			device_printf(dev,
1581 		    	"attach aborted due to hardware diag failure\n");
1582 			ether_ifdetach(ifp);
1583 			goto fail;
1584 		}
1585 	}
1586 #endif
1587 
1588 #ifdef RE_TX_MODERATION
1589 	intr_filter = 1;
1590 #endif
1591 	/* Hook interrupt last to avoid having to lock softc */
1592 	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 &&
1593 	    intr_filter == 0) {
1594 		error = bus_setup_intr(dev, sc->rl_irq[0],
1595 		    INTR_TYPE_NET | INTR_MPSAFE, NULL, re_intr_msi, sc,
1596 		    &sc->rl_intrhand[0]);
1597 	} else {
1598 		error = bus_setup_intr(dev, sc->rl_irq[0],
1599 		    INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc,
1600 		    &sc->rl_intrhand[0]);
1601 	}
1602 	if (error) {
1603 		device_printf(dev, "couldn't set up irq\n");
1604 		ether_ifdetach(ifp);
1605 	}
1606 
1607 fail:
1608 
1609 	if (error)
1610 		re_detach(dev);
1611 
1612 	return (error);
1613 }
1614 
1615 /*
1616  * Shutdown hardware and free up resources. This can be called any
1617  * time after the mutex has been initialized. It is called in both
1618  * the error case in attach and the normal detach case so it needs
1619  * to be careful about only freeing resources that have actually been
1620  * allocated.
1621  */
1622 static int
1623 re_detach(device_t dev)
1624 {
1625 	struct rl_softc		*sc;
1626 	struct ifnet		*ifp;
1627 	int			i, rid;
1628 
1629 	sc = device_get_softc(dev);
1630 	ifp = sc->rl_ifp;
1631 	KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized"));
1632 
1633 	/* These should only be active if attach succeeded */
1634 	if (device_is_attached(dev)) {
1635 #ifdef DEVICE_POLLING
1636 		if (ifp->if_capenable & IFCAP_POLLING)
1637 			ether_poll_deregister(ifp);
1638 #endif
1639 		RL_LOCK(sc);
1640 #if 0
1641 		sc->suspended = 1;
1642 #endif
1643 		re_stop(sc);
1644 		RL_UNLOCK(sc);
1645 		callout_drain(&sc->rl_stat_callout);
1646 		taskqueue_drain(taskqueue_fast, &sc->rl_inttask);
1647 		/*
1648 		 * Force off the IFF_UP flag here, in case someone
1649 		 * still had a BPF descriptor attached to this
1650 		 * interface. If they do, ether_ifdetach() will cause
1651 		 * the BPF code to try and clear the promisc mode
1652 		 * flag, which will bubble down to re_ioctl(),
1653 		 * which will try to call re_init() again. This will
1654 		 * turn the NIC back on and restart the MII ticker,
1655 		 * which will panic the system when the kernel tries
1656 		 * to invoke the re_tick() function that isn't there
1657 		 * anymore.
1658 		 */
1659 		ifp->if_flags &= ~IFF_UP;
1660 		ether_ifdetach(ifp);
1661 	}
1662 	if (sc->rl_miibus)
1663 		device_delete_child(dev, sc->rl_miibus);
1664 	bus_generic_detach(dev);
1665 
1666 	/*
1667 	 * The rest is resource deallocation, so we should already be
1668 	 * stopped here.
1669 	 */
1670 
1671 	if (sc->rl_intrhand[0] != NULL) {
1672 		bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]);
1673 		sc->rl_intrhand[0] = NULL;
1674 	}
1675 	if (ifp != NULL)
1676 		if_free(ifp);
1677 	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0)
1678 		rid = 0;
1679 	else
1680 		rid = 1;
1681 	if (sc->rl_irq[0] != NULL) {
1682 		bus_release_resource(dev, SYS_RES_IRQ, rid, sc->rl_irq[0]);
1683 		sc->rl_irq[0] = NULL;
1684 	}
1685 	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0)
1686 		pci_release_msi(dev);
1687 	if (sc->rl_res_pba) {
1688 		rid = PCIR_BAR(4);
1689 		bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->rl_res_pba);
1690 	}
1691 	if (sc->rl_res)
1692 		bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id,
1693 		    sc->rl_res);
1694 
1695 	/* Unload and free the RX DMA ring memory and map */
1696 
1697 	if (sc->rl_ldata.rl_rx_list_tag) {
1698 		if (sc->rl_ldata.rl_rx_list_map)
1699 			bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag,
1700 			    sc->rl_ldata.rl_rx_list_map);
1701 		if (sc->rl_ldata.rl_rx_list_map && sc->rl_ldata.rl_rx_list)
1702 			bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag,
1703 			    sc->rl_ldata.rl_rx_list,
1704 			    sc->rl_ldata.rl_rx_list_map);
1705 		bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag);
1706 	}
1707 
1708 	/* Unload and free the TX DMA ring memory and map */
1709 
1710 	if (sc->rl_ldata.rl_tx_list_tag) {
1711 		if (sc->rl_ldata.rl_tx_list_map)
1712 			bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag,
1713 			    sc->rl_ldata.rl_tx_list_map);
1714 		if (sc->rl_ldata.rl_tx_list_map && sc->rl_ldata.rl_tx_list)
1715 			bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag,
1716 			    sc->rl_ldata.rl_tx_list,
1717 			    sc->rl_ldata.rl_tx_list_map);
1718 		bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag);
1719 	}
1720 
1721 	/* Destroy all the RX and TX buffer maps */
1722 
1723 	if (sc->rl_ldata.rl_tx_mtag) {
1724 		for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
1725 			if (sc->rl_ldata.rl_tx_desc[i].tx_dmamap)
1726 				bus_dmamap_destroy(sc->rl_ldata.rl_tx_mtag,
1727 				    sc->rl_ldata.rl_tx_desc[i].tx_dmamap);
1728 		}
1729 		bus_dma_tag_destroy(sc->rl_ldata.rl_tx_mtag);
1730 	}
1731 	if (sc->rl_ldata.rl_rx_mtag) {
1732 		for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1733 			if (sc->rl_ldata.rl_rx_desc[i].rx_dmamap)
1734 				bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag,
1735 				    sc->rl_ldata.rl_rx_desc[i].rx_dmamap);
1736 		}
1737 		if (sc->rl_ldata.rl_rx_sparemap)
1738 			bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag,
1739 			    sc->rl_ldata.rl_rx_sparemap);
1740 		bus_dma_tag_destroy(sc->rl_ldata.rl_rx_mtag);
1741 	}
1742 	if (sc->rl_ldata.rl_jrx_mtag) {
1743 		for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1744 			if (sc->rl_ldata.rl_jrx_desc[i].rx_dmamap)
1745 				bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag,
1746 				    sc->rl_ldata.rl_jrx_desc[i].rx_dmamap);
1747 		}
1748 		if (sc->rl_ldata.rl_jrx_sparemap)
1749 			bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag,
1750 			    sc->rl_ldata.rl_jrx_sparemap);
1751 		bus_dma_tag_destroy(sc->rl_ldata.rl_jrx_mtag);
1752 	}
1753 	/* Unload and free the stats buffer and map */
1754 
1755 	if (sc->rl_ldata.rl_stag) {
1756 		if (sc->rl_ldata.rl_smap)
1757 			bus_dmamap_unload(sc->rl_ldata.rl_stag,
1758 			    sc->rl_ldata.rl_smap);
1759 		if (sc->rl_ldata.rl_smap && sc->rl_ldata.rl_stats)
1760 			bus_dmamem_free(sc->rl_ldata.rl_stag,
1761 			    sc->rl_ldata.rl_stats, sc->rl_ldata.rl_smap);
1762 		bus_dma_tag_destroy(sc->rl_ldata.rl_stag);
1763 	}
1764 
1765 	if (sc->rl_parent_tag)
1766 		bus_dma_tag_destroy(sc->rl_parent_tag);
1767 
1768 	mtx_destroy(&sc->rl_mtx);
1769 
1770 	return (0);
1771 }
1772 
1773 static __inline void
1774 re_discard_rxbuf(struct rl_softc *sc, int idx)
1775 {
1776 	struct rl_desc		*desc;
1777 	struct rl_rxdesc	*rxd;
1778 	uint32_t		cmdstat;
1779 
1780 	if (sc->rl_ifp->if_mtu > RL_MTU &&
1781 	    (sc->rl_flags & RL_FLAG_JUMBOV2) != 0)
1782 		rxd = &sc->rl_ldata.rl_jrx_desc[idx];
1783 	else
1784 		rxd = &sc->rl_ldata.rl_rx_desc[idx];
1785 	desc = &sc->rl_ldata.rl_rx_list[idx];
1786 	desc->rl_vlanctl = 0;
1787 	cmdstat = rxd->rx_size;
1788 	if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
1789 		cmdstat |= RL_RDESC_CMD_EOR;
1790 	desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
1791 }
1792 
1793 static int
1794 re_newbuf(struct rl_softc *sc, int idx)
1795 {
1796 	struct mbuf		*m;
1797 	struct rl_rxdesc	*rxd;
1798 	bus_dma_segment_t	segs[1];
1799 	bus_dmamap_t		map;
1800 	struct rl_desc		*desc;
1801 	uint32_t		cmdstat;
1802 	int			error, nsegs;
1803 
1804 	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1805 	if (m == NULL)
1806 		return (ENOBUFS);
1807 
1808 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1809 #ifdef RE_FIXUP_RX
1810 	/*
1811 	 * This is part of an evil trick to deal with non-x86 platforms.
1812 	 * The RealTek chip requires RX buffers to be aligned on 64-bit
1813 	 * boundaries, but that will hose non-x86 machines. To get around
1814 	 * this, we leave some empty space at the start of each buffer
1815 	 * and for non-x86 hosts, we copy the buffer back six bytes
1816 	 * to achieve word alignment. This is slightly more efficient
1817 	 * than allocating a new buffer, copying the contents, and
1818 	 * discarding the old buffer.
1819 	 */
1820 	m_adj(m, RE_ETHER_ALIGN);
1821 #endif
1822 	error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_rx_mtag,
1823 	    sc->rl_ldata.rl_rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
1824 	if (error != 0) {
1825 		m_freem(m);
1826 		return (ENOBUFS);
1827 	}
1828 	KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs));
1829 
1830 	rxd = &sc->rl_ldata.rl_rx_desc[idx];
1831 	if (rxd->rx_m != NULL) {
1832 		bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap,
1833 		    BUS_DMASYNC_POSTREAD);
1834 		bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap);
1835 	}
1836 
1837 	rxd->rx_m = m;
1838 	map = rxd->rx_dmamap;
1839 	rxd->rx_dmamap = sc->rl_ldata.rl_rx_sparemap;
1840 	rxd->rx_size = segs[0].ds_len;
1841 	sc->rl_ldata.rl_rx_sparemap = map;
1842 	bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap,
1843 	    BUS_DMASYNC_PREREAD);
1844 
1845 	desc = &sc->rl_ldata.rl_rx_list[idx];
1846 	desc->rl_vlanctl = 0;
1847 	desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr));
1848 	desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr));
1849 	cmdstat = segs[0].ds_len;
1850 	if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
1851 		cmdstat |= RL_RDESC_CMD_EOR;
1852 	desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
1853 
1854 	return (0);
1855 }
1856 
1857 static int
1858 re_jumbo_newbuf(struct rl_softc *sc, int idx)
1859 {
1860 	struct mbuf		*m;
1861 	struct rl_rxdesc	*rxd;
1862 	bus_dma_segment_t	segs[1];
1863 	bus_dmamap_t		map;
1864 	struct rl_desc		*desc;
1865 	uint32_t		cmdstat;
1866 	int			error, nsegs;
1867 
1868 	m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
1869 	if (m == NULL)
1870 		return (ENOBUFS);
1871 	m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1872 #ifdef RE_FIXUP_RX
1873 	m_adj(m, RE_ETHER_ALIGN);
1874 #endif
1875 	error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_jrx_mtag,
1876 	    sc->rl_ldata.rl_jrx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
1877 	if (error != 0) {
1878 		m_freem(m);
1879 		return (ENOBUFS);
1880 	}
1881 	KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs));
1882 
1883 	rxd = &sc->rl_ldata.rl_jrx_desc[idx];
1884 	if (rxd->rx_m != NULL) {
1885 		bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap,
1886 		    BUS_DMASYNC_POSTREAD);
1887 		bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap);
1888 	}
1889 
1890 	rxd->rx_m = m;
1891 	map = rxd->rx_dmamap;
1892 	rxd->rx_dmamap = sc->rl_ldata.rl_jrx_sparemap;
1893 	rxd->rx_size = segs[0].ds_len;
1894 	sc->rl_ldata.rl_jrx_sparemap = map;
1895 	bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap,
1896 	    BUS_DMASYNC_PREREAD);
1897 
1898 	desc = &sc->rl_ldata.rl_rx_list[idx];
1899 	desc->rl_vlanctl = 0;
1900 	desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr));
1901 	desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr));
1902 	cmdstat = segs[0].ds_len;
1903 	if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
1904 		cmdstat |= RL_RDESC_CMD_EOR;
1905 	desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
1906 
1907 	return (0);
1908 }
1909 
1910 #ifdef RE_FIXUP_RX
1911 static __inline void
1912 re_fixup_rx(struct mbuf *m)
1913 {
1914 	int                     i;
1915 	uint16_t                *src, *dst;
1916 
1917 	src = mtod(m, uint16_t *);
1918 	dst = src - (RE_ETHER_ALIGN - ETHER_ALIGN) / sizeof *src;
1919 
1920 	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1921 		*dst++ = *src++;
1922 
1923 	m->m_data -= RE_ETHER_ALIGN - ETHER_ALIGN;
1924 }
1925 #endif
1926 
1927 static int
1928 re_tx_list_init(struct rl_softc *sc)
1929 {
1930 	struct rl_desc		*desc;
1931 	int			i;
1932 
1933 	RL_LOCK_ASSERT(sc);
1934 
1935 	bzero(sc->rl_ldata.rl_tx_list,
1936 	    sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc));
1937 	for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++)
1938 		sc->rl_ldata.rl_tx_desc[i].tx_m = NULL;
1939 	/* Set EOR. */
1940 	desc = &sc->rl_ldata.rl_tx_list[sc->rl_ldata.rl_tx_desc_cnt - 1];
1941 	desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOR);
1942 
1943 	bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
1944 	    sc->rl_ldata.rl_tx_list_map,
1945 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1946 
1947 	sc->rl_ldata.rl_tx_prodidx = 0;
1948 	sc->rl_ldata.rl_tx_considx = 0;
1949 	sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt;
1950 
1951 	return (0);
1952 }
1953 
1954 static int
1955 re_rx_list_init(struct rl_softc *sc)
1956 {
1957 	int			error, i;
1958 
1959 	bzero(sc->rl_ldata.rl_rx_list,
1960 	    sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc));
1961 	for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1962 		sc->rl_ldata.rl_rx_desc[i].rx_m = NULL;
1963 		if ((error = re_newbuf(sc, i)) != 0)
1964 			return (error);
1965 	}
1966 
1967 	/* Flush the RX descriptors */
1968 
1969 	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
1970 	    sc->rl_ldata.rl_rx_list_map,
1971 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1972 
1973 	sc->rl_ldata.rl_rx_prodidx = 0;
1974 	sc->rl_head = sc->rl_tail = NULL;
1975 	sc->rl_int_rx_act = 0;
1976 
1977 	return (0);
1978 }
1979 
1980 static int
1981 re_jrx_list_init(struct rl_softc *sc)
1982 {
1983 	int			error, i;
1984 
1985 	bzero(sc->rl_ldata.rl_rx_list,
1986 	    sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc));
1987 	for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1988 		sc->rl_ldata.rl_jrx_desc[i].rx_m = NULL;
1989 		if ((error = re_jumbo_newbuf(sc, i)) != 0)
1990 			return (error);
1991 	}
1992 
1993 	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
1994 	    sc->rl_ldata.rl_rx_list_map,
1995 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1996 
1997 	sc->rl_ldata.rl_rx_prodidx = 0;
1998 	sc->rl_head = sc->rl_tail = NULL;
1999 	sc->rl_int_rx_act = 0;
2000 
2001 	return (0);
2002 }
2003 
2004 /*
2005  * RX handler for C+ and 8169. For the gigE chips, we support
2006  * the reception of jumbo frames that have been fragmented
2007  * across multiple 2K mbuf cluster buffers.
2008  */
2009 static int
2010 re_rxeof(struct rl_softc *sc, int *rx_npktsp)
2011 {
2012 	struct mbuf		*m;
2013 	struct ifnet		*ifp;
2014 	int			i, rxerr, total_len;
2015 	struct rl_desc		*cur_rx;
2016 	u_int32_t		rxstat, rxvlan;
2017 	int			jumbo, maxpkt = 16, rx_npkts = 0;
2018 
2019 	RL_LOCK_ASSERT(sc);
2020 
2021 	ifp = sc->rl_ifp;
2022 	if (ifp->if_mtu > RL_MTU && (sc->rl_flags & RL_FLAG_JUMBOV2) != 0)
2023 		jumbo = 1;
2024 	else
2025 		jumbo = 0;
2026 
2027 	/* Invalidate the descriptor memory */
2028 
2029 	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2030 	    sc->rl_ldata.rl_rx_list_map,
2031 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2032 
2033 	for (i = sc->rl_ldata.rl_rx_prodidx; maxpkt > 0;
2034 	    i = RL_RX_DESC_NXT(sc, i)) {
2035 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2036 			break;
2037 		cur_rx = &sc->rl_ldata.rl_rx_list[i];
2038 		rxstat = le32toh(cur_rx->rl_cmdstat);
2039 		if ((rxstat & RL_RDESC_STAT_OWN) != 0)
2040 			break;
2041 		total_len = rxstat & sc->rl_rxlenmask;
2042 		rxvlan = le32toh(cur_rx->rl_vlanctl);
2043 		if (jumbo != 0)
2044 			m = sc->rl_ldata.rl_jrx_desc[i].rx_m;
2045 		else
2046 			m = sc->rl_ldata.rl_rx_desc[i].rx_m;
2047 
2048 		if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
2049 		    (rxstat & (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) !=
2050 		    (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) {
2051 			/*
2052 			 * RTL8168C or later controllers do not
2053 			 * support multi-fragment packet.
2054 			 */
2055 			re_discard_rxbuf(sc, i);
2056 			continue;
2057 		} else if ((rxstat & RL_RDESC_STAT_EOF) == 0) {
2058 			if (re_newbuf(sc, i) != 0) {
2059 				/*
2060 				 * If this is part of a multi-fragment packet,
2061 				 * discard all the pieces.
2062 				 */
2063 				if (sc->rl_head != NULL) {
2064 					m_freem(sc->rl_head);
2065 					sc->rl_head = sc->rl_tail = NULL;
2066 				}
2067 				re_discard_rxbuf(sc, i);
2068 				continue;
2069 			}
2070 			m->m_len = RE_RX_DESC_BUFLEN;
2071 			if (sc->rl_head == NULL)
2072 				sc->rl_head = sc->rl_tail = m;
2073 			else {
2074 				m->m_flags &= ~M_PKTHDR;
2075 				sc->rl_tail->m_next = m;
2076 				sc->rl_tail = m;
2077 			}
2078 			continue;
2079 		}
2080 
2081 		/*
2082 		 * NOTE: for the 8139C+, the frame length field
2083 		 * is always 12 bits in size, but for the gigE chips,
2084 		 * it is 13 bits (since the max RX frame length is 16K).
2085 		 * Unfortunately, all 32 bits in the status word
2086 		 * were already used, so to make room for the extra
2087 		 * length bit, RealTek took out the 'frame alignment
2088 		 * error' bit and shifted the other status bits
2089 		 * over one slot. The OWN, EOR, FS and LS bits are
2090 		 * still in the same places. We have already extracted
2091 		 * the frame length and checked the OWN bit, so rather
2092 		 * than using an alternate bit mapping, we shift the
2093 		 * status bits one space to the right so we can evaluate
2094 		 * them using the 8169 status as though it was in the
2095 		 * same format as that of the 8139C+.
2096 		 */
2097 		if (sc->rl_type == RL_8169)
2098 			rxstat >>= 1;
2099 
2100 		/*
2101 		 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be
2102 		 * set, but if CRC is clear, it will still be a valid frame.
2103 		 */
2104 		if ((rxstat & RL_RDESC_STAT_RXERRSUM) != 0) {
2105 			rxerr = 1;
2106 			if ((sc->rl_flags & RL_FLAG_JUMBOV2) == 0 &&
2107 			    total_len > 8191 &&
2108 			    (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT)
2109 				rxerr = 0;
2110 			if (rxerr != 0) {
2111 				ifp->if_ierrors++;
2112 				/*
2113 				 * If this is part of a multi-fragment packet,
2114 				 * discard all the pieces.
2115 				 */
2116 				if (sc->rl_head != NULL) {
2117 					m_freem(sc->rl_head);
2118 					sc->rl_head = sc->rl_tail = NULL;
2119 				}
2120 				re_discard_rxbuf(sc, i);
2121 				continue;
2122 			}
2123 		}
2124 
2125 		/*
2126 		 * If allocating a replacement mbuf fails,
2127 		 * reload the current one.
2128 		 */
2129 		if (jumbo != 0)
2130 			rxerr = re_jumbo_newbuf(sc, i);
2131 		else
2132 			rxerr = re_newbuf(sc, i);
2133 		if (rxerr != 0) {
2134 			ifp->if_iqdrops++;
2135 			if (sc->rl_head != NULL) {
2136 				m_freem(sc->rl_head);
2137 				sc->rl_head = sc->rl_tail = NULL;
2138 			}
2139 			re_discard_rxbuf(sc, i);
2140 			continue;
2141 		}
2142 
2143 		if (sc->rl_head != NULL) {
2144 			if (jumbo != 0)
2145 				m->m_len = total_len;
2146 			else {
2147 				m->m_len = total_len % RE_RX_DESC_BUFLEN;
2148 				if (m->m_len == 0)
2149 					m->m_len = RE_RX_DESC_BUFLEN;
2150 			}
2151 			/*
2152 			 * Special case: if there's 4 bytes or less
2153 			 * in this buffer, the mbuf can be discarded:
2154 			 * the last 4 bytes is the CRC, which we don't
2155 			 * care about anyway.
2156 			 */
2157 			if (m->m_len <= ETHER_CRC_LEN) {
2158 				sc->rl_tail->m_len -=
2159 				    (ETHER_CRC_LEN - m->m_len);
2160 				m_freem(m);
2161 			} else {
2162 				m->m_len -= ETHER_CRC_LEN;
2163 				m->m_flags &= ~M_PKTHDR;
2164 				sc->rl_tail->m_next = m;
2165 			}
2166 			m = sc->rl_head;
2167 			sc->rl_head = sc->rl_tail = NULL;
2168 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
2169 		} else
2170 			m->m_pkthdr.len = m->m_len =
2171 			    (total_len - ETHER_CRC_LEN);
2172 
2173 #ifdef RE_FIXUP_RX
2174 		re_fixup_rx(m);
2175 #endif
2176 		ifp->if_ipackets++;
2177 		m->m_pkthdr.rcvif = ifp;
2178 
2179 		/* Do RX checksumming if enabled */
2180 
2181 		if (ifp->if_capenable & IFCAP_RXCSUM) {
2182 			if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) {
2183 				/* Check IP header checksum */
2184 				if (rxstat & RL_RDESC_STAT_PROTOID)
2185 					m->m_pkthdr.csum_flags |=
2186 					    CSUM_IP_CHECKED;
2187 				if (!(rxstat & RL_RDESC_STAT_IPSUMBAD))
2188 					m->m_pkthdr.csum_flags |=
2189 					    CSUM_IP_VALID;
2190 
2191 				/* Check TCP/UDP checksum */
2192 				if ((RL_TCPPKT(rxstat) &&
2193 				    !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
2194 				    (RL_UDPPKT(rxstat) &&
2195 				     !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
2196 					m->m_pkthdr.csum_flags |=
2197 						CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
2198 					m->m_pkthdr.csum_data = 0xffff;
2199 				}
2200 			} else {
2201 				/*
2202 				 * RTL8168C/RTL816CP/RTL8111C/RTL8111CP
2203 				 */
2204 				if ((rxstat & RL_RDESC_STAT_PROTOID) &&
2205 				    (rxvlan & RL_RDESC_IPV4))
2206 					m->m_pkthdr.csum_flags |=
2207 					    CSUM_IP_CHECKED;
2208 				if (!(rxstat & RL_RDESC_STAT_IPSUMBAD) &&
2209 				    (rxvlan & RL_RDESC_IPV4))
2210 					m->m_pkthdr.csum_flags |=
2211 					    CSUM_IP_VALID;
2212 				if (((rxstat & RL_RDESC_STAT_TCP) &&
2213 				    !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
2214 				    ((rxstat & RL_RDESC_STAT_UDP) &&
2215 				    !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
2216 					m->m_pkthdr.csum_flags |=
2217 						CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
2218 					m->m_pkthdr.csum_data = 0xffff;
2219 				}
2220 			}
2221 		}
2222 		maxpkt--;
2223 		if (rxvlan & RL_RDESC_VLANCTL_TAG) {
2224 			m->m_pkthdr.ether_vtag =
2225 			    bswap16((rxvlan & RL_RDESC_VLANCTL_DATA));
2226 			m->m_flags |= M_VLANTAG;
2227 		}
2228 		RL_UNLOCK(sc);
2229 		(*ifp->if_input)(ifp, m);
2230 		RL_LOCK(sc);
2231 		rx_npkts++;
2232 	}
2233 
2234 	/* Flush the RX DMA ring */
2235 
2236 	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2237 	    sc->rl_ldata.rl_rx_list_map,
2238 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2239 
2240 	sc->rl_ldata.rl_rx_prodidx = i;
2241 
2242 	if (rx_npktsp != NULL)
2243 		*rx_npktsp = rx_npkts;
2244 	if (maxpkt)
2245 		return (EAGAIN);
2246 
2247 	return (0);
2248 }
2249 
2250 static void
2251 re_txeof(struct rl_softc *sc)
2252 {
2253 	struct ifnet		*ifp;
2254 	struct rl_txdesc	*txd;
2255 	u_int32_t		txstat;
2256 	int			cons;
2257 
2258 	cons = sc->rl_ldata.rl_tx_considx;
2259 	if (cons == sc->rl_ldata.rl_tx_prodidx)
2260 		return;
2261 
2262 	ifp = sc->rl_ifp;
2263 	/* Invalidate the TX descriptor list */
2264 	bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
2265 	    sc->rl_ldata.rl_tx_list_map,
2266 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2267 
2268 	for (; cons != sc->rl_ldata.rl_tx_prodidx;
2269 	    cons = RL_TX_DESC_NXT(sc, cons)) {
2270 		txstat = le32toh(sc->rl_ldata.rl_tx_list[cons].rl_cmdstat);
2271 		if (txstat & RL_TDESC_STAT_OWN)
2272 			break;
2273 		/*
2274 		 * We only stash mbufs in the last descriptor
2275 		 * in a fragment chain, which also happens to
2276 		 * be the only place where the TX status bits
2277 		 * are valid.
2278 		 */
2279 		if (txstat & RL_TDESC_CMD_EOF) {
2280 			txd = &sc->rl_ldata.rl_tx_desc[cons];
2281 			bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
2282 			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2283 			bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag,
2284 			    txd->tx_dmamap);
2285 			KASSERT(txd->tx_m != NULL,
2286 			    ("%s: freeing NULL mbufs!", __func__));
2287 			m_freem(txd->tx_m);
2288 			txd->tx_m = NULL;
2289 			if (txstat & (RL_TDESC_STAT_EXCESSCOL|
2290 			    RL_TDESC_STAT_COLCNT))
2291 				ifp->if_collisions++;
2292 			if (txstat & RL_TDESC_STAT_TXERRSUM)
2293 				ifp->if_oerrors++;
2294 			else
2295 				ifp->if_opackets++;
2296 		}
2297 		sc->rl_ldata.rl_tx_free++;
2298 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2299 	}
2300 	sc->rl_ldata.rl_tx_considx = cons;
2301 
2302 	/* No changes made to the TX ring, so no flush needed */
2303 
2304 	if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) {
2305 #ifdef RE_TX_MODERATION
2306 		/*
2307 		 * If not all descriptors have been reaped yet, reload
2308 		 * the timer so that we will eventually get another
2309 		 * interrupt that will cause us to re-enter this routine.
2310 		 * This is done in case the transmitter has gone idle.
2311 		 */
2312 		CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2313 #endif
2314 	} else
2315 		sc->rl_watchdog_timer = 0;
2316 }
2317 
2318 static void
2319 re_tick(void *xsc)
2320 {
2321 	struct rl_softc		*sc;
2322 	struct mii_data		*mii;
2323 
2324 	sc = xsc;
2325 
2326 	RL_LOCK_ASSERT(sc);
2327 
2328 	mii = device_get_softc(sc->rl_miibus);
2329 	mii_tick(mii);
2330 	if ((sc->rl_flags & RL_FLAG_LINK) == 0)
2331 		re_miibus_statchg(sc->rl_dev);
2332 	/*
2333 	 * Reclaim transmitted frames here. Technically it is not
2334 	 * necessary to do here but it ensures periodic reclamation
2335 	 * regardless of Tx completion interrupt which seems to be
2336 	 * lost on PCIe based controllers under certain situations.
2337 	 */
2338 	re_txeof(sc);
2339 	re_watchdog(sc);
2340 	callout_reset(&sc->rl_stat_callout, hz, re_tick, sc);
2341 }
2342 
2343 #ifdef DEVICE_POLLING
2344 static int
2345 re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2346 {
2347 	struct rl_softc *sc = ifp->if_softc;
2348 	int rx_npkts = 0;
2349 
2350 	RL_LOCK(sc);
2351 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2352 		rx_npkts = re_poll_locked(ifp, cmd, count);
2353 	RL_UNLOCK(sc);
2354 	return (rx_npkts);
2355 }
2356 
2357 static int
2358 re_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
2359 {
2360 	struct rl_softc *sc = ifp->if_softc;
2361 	int rx_npkts;
2362 
2363 	RL_LOCK_ASSERT(sc);
2364 
2365 	sc->rxcycles = count;
2366 	re_rxeof(sc, &rx_npkts);
2367 	re_txeof(sc);
2368 
2369 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2370 		re_start_locked(ifp);
2371 
2372 	if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
2373 		u_int16_t       status;
2374 
2375 		status = CSR_READ_2(sc, RL_ISR);
2376 		if (status == 0xffff)
2377 			return (rx_npkts);
2378 		if (status)
2379 			CSR_WRITE_2(sc, RL_ISR, status);
2380 		if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
2381 		    (sc->rl_flags & RL_FLAG_PCIE))
2382 			CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2383 
2384 		/*
2385 		 * XXX check behaviour on receiver stalls.
2386 		 */
2387 
2388 		if (status & RL_ISR_SYSTEM_ERR) {
2389 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2390 			re_init_locked(sc);
2391 		}
2392 	}
2393 	return (rx_npkts);
2394 }
2395 #endif /* DEVICE_POLLING */
2396 
2397 static int
2398 re_intr(void *arg)
2399 {
2400 	struct rl_softc		*sc;
2401 	uint16_t		status;
2402 
2403 	sc = arg;
2404 
2405 	status = CSR_READ_2(sc, RL_ISR);
2406 	if (status == 0xFFFF || (status & RL_INTRS_CPLUS) == 0)
2407                 return (FILTER_STRAY);
2408 	CSR_WRITE_2(sc, RL_IMR, 0);
2409 
2410 	taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask);
2411 
2412 	return (FILTER_HANDLED);
2413 }
2414 
2415 static void
2416 re_int_task(void *arg, int npending)
2417 {
2418 	struct rl_softc		*sc;
2419 	struct ifnet		*ifp;
2420 	u_int16_t		status;
2421 	int			rval = 0;
2422 
2423 	sc = arg;
2424 	ifp = sc->rl_ifp;
2425 
2426 	RL_LOCK(sc);
2427 
2428 	status = CSR_READ_2(sc, RL_ISR);
2429         CSR_WRITE_2(sc, RL_ISR, status);
2430 
2431 	if (sc->suspended ||
2432 	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2433 		RL_UNLOCK(sc);
2434 		return;
2435 	}
2436 
2437 #ifdef DEVICE_POLLING
2438 	if  (ifp->if_capenable & IFCAP_POLLING) {
2439 		RL_UNLOCK(sc);
2440 		return;
2441 	}
2442 #endif
2443 
2444 	if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW))
2445 		rval = re_rxeof(sc, NULL);
2446 
2447 	/*
2448 	 * Some chips will ignore a second TX request issued
2449 	 * while an existing transmission is in progress. If
2450 	 * the transmitter goes idle but there are still
2451 	 * packets waiting to be sent, we need to restart the
2452 	 * channel here to flush them out. This only seems to
2453 	 * be required with the PCIe devices.
2454 	 */
2455 	if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
2456 	    (sc->rl_flags & RL_FLAG_PCIE))
2457 		CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2458 	if (status & (
2459 #ifdef RE_TX_MODERATION
2460 	    RL_ISR_TIMEOUT_EXPIRED|
2461 #else
2462 	    RL_ISR_TX_OK|
2463 #endif
2464 	    RL_ISR_TX_ERR|RL_ISR_TX_DESC_UNAVAIL))
2465 		re_txeof(sc);
2466 
2467 	if (status & RL_ISR_SYSTEM_ERR) {
2468 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2469 		re_init_locked(sc);
2470 	}
2471 
2472 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2473 		re_start_locked(ifp);
2474 
2475 	RL_UNLOCK(sc);
2476 
2477         if ((CSR_READ_2(sc, RL_ISR) & RL_INTRS_CPLUS) || rval) {
2478 		taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask);
2479 		return;
2480 	}
2481 
2482 	CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
2483 }
2484 
2485 static void
2486 re_intr_msi(void *xsc)
2487 {
2488 	struct rl_softc		*sc;
2489 	struct ifnet		*ifp;
2490 	uint16_t		intrs, status;
2491 
2492 	sc = xsc;
2493 	RL_LOCK(sc);
2494 
2495 	ifp = sc->rl_ifp;
2496 #ifdef DEVICE_POLLING
2497 	if (ifp->if_capenable & IFCAP_POLLING) {
2498 		RL_UNLOCK(sc);
2499 		return;
2500 	}
2501 #endif
2502 	/* Disable interrupts. */
2503 	CSR_WRITE_2(sc, RL_IMR, 0);
2504 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2505 		RL_UNLOCK(sc);
2506 		return;
2507 	}
2508 
2509 	intrs = RL_INTRS_CPLUS;
2510 	status = CSR_READ_2(sc, RL_ISR);
2511         CSR_WRITE_2(sc, RL_ISR, status);
2512 	if (sc->rl_int_rx_act > 0) {
2513 		intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW |
2514 		    RL_ISR_RX_OVERRUN);
2515 		status &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW |
2516 		    RL_ISR_RX_OVERRUN);
2517 	}
2518 
2519 	if (status & (RL_ISR_TIMEOUT_EXPIRED | RL_ISR_RX_OK | RL_ISR_RX_ERR |
2520 	    RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) {
2521 		re_rxeof(sc, NULL);
2522 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2523 			if (sc->rl_int_rx_mod != 0 &&
2524 			    (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR |
2525 			    RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) != 0) {
2526 				/* Rearm one-shot timer. */
2527 				CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2528 				intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR |
2529 				    RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN);
2530 				sc->rl_int_rx_act = 1;
2531 			} else {
2532 				intrs |= RL_ISR_RX_OK | RL_ISR_RX_ERR |
2533 				    RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN;
2534 				sc->rl_int_rx_act = 0;
2535 			}
2536 		}
2537 	}
2538 
2539 	/*
2540 	 * Some chips will ignore a second TX request issued
2541 	 * while an existing transmission is in progress. If
2542 	 * the transmitter goes idle but there are still
2543 	 * packets waiting to be sent, we need to restart the
2544 	 * channel here to flush them out. This only seems to
2545 	 * be required with the PCIe devices.
2546 	 */
2547 	if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
2548 	    (sc->rl_flags & RL_FLAG_PCIE))
2549 		CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2550 	if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR | RL_ISR_TX_DESC_UNAVAIL))
2551 		re_txeof(sc);
2552 
2553 	if (status & RL_ISR_SYSTEM_ERR) {
2554 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2555 		re_init_locked(sc);
2556 	}
2557 
2558 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2559 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2560 			re_start_locked(ifp);
2561 		CSR_WRITE_2(sc, RL_IMR, intrs);
2562 	}
2563 	RL_UNLOCK(sc);
2564 }
2565 
2566 static int
2567 re_encap(struct rl_softc *sc, struct mbuf **m_head)
2568 {
2569 	struct rl_txdesc	*txd, *txd_last;
2570 	bus_dma_segment_t	segs[RL_NTXSEGS];
2571 	bus_dmamap_t		map;
2572 	struct mbuf		*m_new;
2573 	struct rl_desc		*desc;
2574 	int			nsegs, prod;
2575 	int			i, error, ei, si;
2576 	int			padlen;
2577 	uint32_t		cmdstat, csum_flags, vlanctl;
2578 
2579 	RL_LOCK_ASSERT(sc);
2580 	M_ASSERTPKTHDR((*m_head));
2581 
2582 	/*
2583 	 * With some of the RealTek chips, using the checksum offload
2584 	 * support in conjunction with the autopadding feature results
2585 	 * in the transmission of corrupt frames. For example, if we
2586 	 * need to send a really small IP fragment that's less than 60
2587 	 * bytes in size, and IP header checksumming is enabled, the
2588 	 * resulting ethernet frame that appears on the wire will
2589 	 * have garbled payload. To work around this, if TX IP checksum
2590 	 * offload is enabled, we always manually pad short frames out
2591 	 * to the minimum ethernet frame size.
2592 	 */
2593 	if ((sc->rl_flags & RL_FLAG_AUTOPAD) == 0 &&
2594 	    (*m_head)->m_pkthdr.len < RL_IP4CSUMTX_PADLEN &&
2595 	    ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) != 0) {
2596 		padlen = RL_MIN_FRAMELEN - (*m_head)->m_pkthdr.len;
2597 		if (M_WRITABLE(*m_head) == 0) {
2598 			/* Get a writable copy. */
2599 			m_new = m_dup(*m_head, M_DONTWAIT);
2600 			m_freem(*m_head);
2601 			if (m_new == NULL) {
2602 				*m_head = NULL;
2603 				return (ENOBUFS);
2604 			}
2605 			*m_head = m_new;
2606 		}
2607 		if ((*m_head)->m_next != NULL ||
2608 		    M_TRAILINGSPACE(*m_head) < padlen) {
2609 			m_new = m_defrag(*m_head, M_DONTWAIT);
2610 			if (m_new == NULL) {
2611 				m_freem(*m_head);
2612 				*m_head = NULL;
2613 				return (ENOBUFS);
2614 			}
2615 		} else
2616 			m_new = *m_head;
2617 
2618 		/*
2619 		 * Manually pad short frames, and zero the pad space
2620 		 * to avoid leaking data.
2621 		 */
2622 		bzero(mtod(m_new, char *) + m_new->m_pkthdr.len, padlen);
2623 		m_new->m_pkthdr.len += padlen;
2624 		m_new->m_len = m_new->m_pkthdr.len;
2625 		*m_head = m_new;
2626 	}
2627 
2628 	prod = sc->rl_ldata.rl_tx_prodidx;
2629 	txd = &sc->rl_ldata.rl_tx_desc[prod];
2630 	error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap,
2631 	    *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2632 	if (error == EFBIG) {
2633 		m_new = m_collapse(*m_head, M_DONTWAIT, RL_NTXSEGS);
2634 		if (m_new == NULL) {
2635 			m_freem(*m_head);
2636 			*m_head = NULL;
2637 			return (ENOBUFS);
2638 		}
2639 		*m_head = m_new;
2640 		error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag,
2641 		    txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2642 		if (error != 0) {
2643 			m_freem(*m_head);
2644 			*m_head = NULL;
2645 			return (error);
2646 		}
2647 	} else if (error != 0)
2648 		return (error);
2649 	if (nsegs == 0) {
2650 		m_freem(*m_head);
2651 		*m_head = NULL;
2652 		return (EIO);
2653 	}
2654 
2655 	/* Check for number of available descriptors. */
2656 	if (sc->rl_ldata.rl_tx_free - nsegs <= 1) {
2657 		bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap);
2658 		return (ENOBUFS);
2659 	}
2660 
2661 	bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap,
2662 	    BUS_DMASYNC_PREWRITE);
2663 
2664 	/*
2665 	 * Set up checksum offload. Note: checksum offload bits must
2666 	 * appear in all descriptors of a multi-descriptor transmit
2667 	 * attempt. This is according to testing done with an 8169
2668 	 * chip. This is a requirement.
2669 	 */
2670 	vlanctl = 0;
2671 	csum_flags = 0;
2672 	if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2673 		if ((sc->rl_flags & RL_FLAG_DESCV2) != 0) {
2674 			csum_flags |= RL_TDESC_CMD_LGSEND;
2675 			vlanctl |= ((uint32_t)(*m_head)->m_pkthdr.tso_segsz <<
2676 			    RL_TDESC_CMD_MSSVALV2_SHIFT);
2677 		} else {
2678 			csum_flags |= RL_TDESC_CMD_LGSEND |
2679 			    ((uint32_t)(*m_head)->m_pkthdr.tso_segsz <<
2680 			    RL_TDESC_CMD_MSSVAL_SHIFT);
2681 		}
2682 	} else {
2683 		/*
2684 		 * Unconditionally enable IP checksum if TCP or UDP
2685 		 * checksum is required. Otherwise, TCP/UDP checksum
2686 		 * does't make effects.
2687 		 */
2688 		if (((*m_head)->m_pkthdr.csum_flags & RE_CSUM_FEATURES) != 0) {
2689 			if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) {
2690 				csum_flags |= RL_TDESC_CMD_IPCSUM;
2691 				if (((*m_head)->m_pkthdr.csum_flags &
2692 				    CSUM_TCP) != 0)
2693 					csum_flags |= RL_TDESC_CMD_TCPCSUM;
2694 				if (((*m_head)->m_pkthdr.csum_flags &
2695 				    CSUM_UDP) != 0)
2696 					csum_flags |= RL_TDESC_CMD_UDPCSUM;
2697 			} else {
2698 				vlanctl |= RL_TDESC_CMD_IPCSUMV2;
2699 				if (((*m_head)->m_pkthdr.csum_flags &
2700 				    CSUM_TCP) != 0)
2701 					vlanctl |= RL_TDESC_CMD_TCPCSUMV2;
2702 				if (((*m_head)->m_pkthdr.csum_flags &
2703 				    CSUM_UDP) != 0)
2704 					vlanctl |= RL_TDESC_CMD_UDPCSUMV2;
2705 			}
2706 		}
2707 	}
2708 
2709 	/*
2710 	 * Set up hardware VLAN tagging. Note: vlan tag info must
2711 	 * appear in all descriptors of a multi-descriptor
2712 	 * transmission attempt.
2713 	 */
2714 	if ((*m_head)->m_flags & M_VLANTAG)
2715 		vlanctl |= bswap16((*m_head)->m_pkthdr.ether_vtag) |
2716 		    RL_TDESC_VLANCTL_TAG;
2717 
2718 	si = prod;
2719 	for (i = 0; i < nsegs; i++, prod = RL_TX_DESC_NXT(sc, prod)) {
2720 		desc = &sc->rl_ldata.rl_tx_list[prod];
2721 		desc->rl_vlanctl = htole32(vlanctl);
2722 		desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr));
2723 		desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr));
2724 		cmdstat = segs[i].ds_len;
2725 		if (i != 0)
2726 			cmdstat |= RL_TDESC_CMD_OWN;
2727 		if (prod == sc->rl_ldata.rl_tx_desc_cnt - 1)
2728 			cmdstat |= RL_TDESC_CMD_EOR;
2729 		desc->rl_cmdstat = htole32(cmdstat | csum_flags);
2730 		sc->rl_ldata.rl_tx_free--;
2731 	}
2732 	/* Update producer index. */
2733 	sc->rl_ldata.rl_tx_prodidx = prod;
2734 
2735 	/* Set EOF on the last descriptor. */
2736 	ei = RL_TX_DESC_PRV(sc, prod);
2737 	desc = &sc->rl_ldata.rl_tx_list[ei];
2738 	desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF);
2739 
2740 	desc = &sc->rl_ldata.rl_tx_list[si];
2741 	/* Set SOF and transfer ownership of packet to the chip. */
2742 	desc->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF);
2743 
2744 	/*
2745 	 * Insure that the map for this transmission
2746 	 * is placed at the array index of the last descriptor
2747 	 * in this chain.  (Swap last and first dmamaps.)
2748 	 */
2749 	txd_last = &sc->rl_ldata.rl_tx_desc[ei];
2750 	map = txd->tx_dmamap;
2751 	txd->tx_dmamap = txd_last->tx_dmamap;
2752 	txd_last->tx_dmamap = map;
2753 	txd_last->tx_m = *m_head;
2754 
2755 	return (0);
2756 }
2757 
2758 static void
2759 re_start(struct ifnet *ifp)
2760 {
2761 	struct rl_softc		*sc;
2762 
2763 	sc = ifp->if_softc;
2764 	RL_LOCK(sc);
2765 	re_start_locked(ifp);
2766 	RL_UNLOCK(sc);
2767 }
2768 
2769 /*
2770  * Main transmit routine for C+ and gigE NICs.
2771  */
2772 static void
2773 re_start_locked(struct ifnet *ifp)
2774 {
2775 	struct rl_softc		*sc;
2776 	struct mbuf		*m_head;
2777 	int			queued;
2778 
2779 	sc = ifp->if_softc;
2780 
2781 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2782 	    IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0)
2783 		return;
2784 
2785 	for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2786 	    sc->rl_ldata.rl_tx_free > 1;) {
2787 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2788 		if (m_head == NULL)
2789 			break;
2790 
2791 		if (re_encap(sc, &m_head) != 0) {
2792 			if (m_head == NULL)
2793 				break;
2794 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2795 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2796 			break;
2797 		}
2798 
2799 		/*
2800 		 * If there's a BPF listener, bounce a copy of this frame
2801 		 * to him.
2802 		 */
2803 		ETHER_BPF_MTAP(ifp, m_head);
2804 
2805 		queued++;
2806 	}
2807 
2808 	if (queued == 0) {
2809 #ifdef RE_TX_MODERATION
2810 		if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt)
2811 			CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2812 #endif
2813 		return;
2814 	}
2815 
2816 	/* Flush the TX descriptors */
2817 
2818 	bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
2819 	    sc->rl_ldata.rl_tx_list_map,
2820 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2821 
2822 	CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2823 
2824 #ifdef RE_TX_MODERATION
2825 	/*
2826 	 * Use the countdown timer for interrupt moderation.
2827 	 * 'TX done' interrupts are disabled. Instead, we reset the
2828 	 * countdown timer, which will begin counting until it hits
2829 	 * the value in the TIMERINT register, and then trigger an
2830 	 * interrupt. Each time we write to the TIMERCNT register,
2831 	 * the timer count is reset to 0.
2832 	 */
2833 	CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2834 #endif
2835 
2836 	/*
2837 	 * Set a timeout in case the chip goes out to lunch.
2838 	 */
2839 	sc->rl_watchdog_timer = 5;
2840 }
2841 
2842 static void
2843 re_set_jumbo(struct rl_softc *sc, int jumbo)
2844 {
2845 
2846 	if (sc->rl_hwrev->rl_rev == RL_HWREV_8168E_VL) {
2847 		pci_set_max_read_req(sc->rl_dev, 4096);
2848 		return;
2849 	}
2850 
2851 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
2852 	if (jumbo != 0) {
2853 		CSR_WRITE_1(sc, RL_CFG3, CSR_READ_1(sc, RL_CFG3) |
2854 		    RL_CFG3_JUMBO_EN0);
2855 		switch (sc->rl_hwrev->rl_rev) {
2856 		case RL_HWREV_8168DP:
2857 			break;
2858 		case RL_HWREV_8168E:
2859 			CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) |
2860 			    0x01);
2861 			break;
2862 		default:
2863 			CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) |
2864 			    RL_CFG4_JUMBO_EN1);
2865 		}
2866 	} else {
2867 		CSR_WRITE_1(sc, RL_CFG3, CSR_READ_1(sc, RL_CFG3) &
2868 		    ~RL_CFG3_JUMBO_EN0);
2869 		switch (sc->rl_hwrev->rl_rev) {
2870 		case RL_HWREV_8168DP:
2871 			break;
2872 		case RL_HWREV_8168E:
2873 			CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) &
2874 			    ~0x01);
2875 			break;
2876 		default:
2877 			CSR_WRITE_1(sc, RL_CFG4, CSR_READ_1(sc, RL_CFG4) &
2878 			    ~RL_CFG4_JUMBO_EN1);
2879 		}
2880 	}
2881 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
2882 
2883 	switch (sc->rl_hwrev->rl_rev) {
2884 	case RL_HWREV_8168DP:
2885 		pci_set_max_read_req(sc->rl_dev, 4096);
2886 		break;
2887 	default:
2888 		if (jumbo != 0)
2889 			pci_set_max_read_req(sc->rl_dev, 512);
2890 		else
2891 			pci_set_max_read_req(sc->rl_dev, 4096);
2892 	}
2893 }
2894 
2895 static void
2896 re_init(void *xsc)
2897 {
2898 	struct rl_softc		*sc = xsc;
2899 
2900 	RL_LOCK(sc);
2901 	re_init_locked(sc);
2902 	RL_UNLOCK(sc);
2903 }
2904 
2905 static void
2906 re_init_locked(struct rl_softc *sc)
2907 {
2908 	struct ifnet		*ifp = sc->rl_ifp;
2909 	struct mii_data		*mii;
2910 	uint32_t		reg;
2911 	uint16_t		cfg;
2912 	union {
2913 		uint32_t align_dummy;
2914 		u_char eaddr[ETHER_ADDR_LEN];
2915         } eaddr;
2916 
2917 	RL_LOCK_ASSERT(sc);
2918 
2919 	mii = device_get_softc(sc->rl_miibus);
2920 
2921 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2922 		return;
2923 
2924 	/*
2925 	 * Cancel pending I/O and free all RX/TX buffers.
2926 	 */
2927 	re_stop(sc);
2928 
2929 	/* Put controller into known state. */
2930 	re_reset(sc);
2931 
2932 	/*
2933 	 * For C+ mode, initialize the RX descriptors and mbufs.
2934 	 */
2935 	if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
2936 		if (ifp->if_mtu > RL_MTU) {
2937 			if (re_jrx_list_init(sc) != 0) {
2938 				device_printf(sc->rl_dev,
2939 				    "no memory for jumbo RX buffers\n");
2940 				re_stop(sc);
2941 				return;
2942 			}
2943 			/* Disable checksum offloading for jumbo frames. */
2944 			ifp->if_capenable &= ~(IFCAP_HWCSUM | IFCAP_TSO4);
2945 			ifp->if_hwassist &= ~(RE_CSUM_FEATURES | CSUM_TSO);
2946 		} else {
2947 			if (re_rx_list_init(sc) != 0) {
2948 				device_printf(sc->rl_dev,
2949 				    "no memory for RX buffers\n");
2950 				re_stop(sc);
2951 				return;
2952 			}
2953 		}
2954 		re_set_jumbo(sc, ifp->if_mtu > RL_MTU);
2955 	} else {
2956 		if (re_rx_list_init(sc) != 0) {
2957 			device_printf(sc->rl_dev, "no memory for RX buffers\n");
2958 			re_stop(sc);
2959 			return;
2960 		}
2961 		if ((sc->rl_flags & RL_FLAG_PCIE) != 0 &&
2962 		    pci_get_device(sc->rl_dev) != RT_DEVICEID_8101E) {
2963 			if (ifp->if_mtu > RL_MTU)
2964 				pci_set_max_read_req(sc->rl_dev, 512);
2965 			else
2966 				pci_set_max_read_req(sc->rl_dev, 4096);
2967 		}
2968 	}
2969 	re_tx_list_init(sc);
2970 
2971 	/*
2972 	 * Enable C+ RX and TX mode, as well as VLAN stripping and
2973 	 * RX checksum offload. We must configure the C+ register
2974 	 * before all others.
2975 	 */
2976 	cfg = RL_CPLUSCMD_PCI_MRW;
2977 	if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2978 		cfg |= RL_CPLUSCMD_RXCSUM_ENB;
2979 	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2980 		cfg |= RL_CPLUSCMD_VLANSTRIP;
2981 	if ((sc->rl_flags & RL_FLAG_MACSTAT) != 0) {
2982 		cfg |= RL_CPLUSCMD_MACSTAT_DIS;
2983 		/* XXX magic. */
2984 		cfg |= 0x0001;
2985 	} else
2986 		cfg |= RL_CPLUSCMD_RXENB | RL_CPLUSCMD_TXENB;
2987 	CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg);
2988 	if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SC ||
2989 	    sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) {
2990 		reg = 0x000fff00;
2991 		if ((CSR_READ_1(sc, RL_CFG2) & RL_CFG2_PCI66MHZ) != 0)
2992 			reg |= 0x000000ff;
2993 		if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE)
2994 			reg |= 0x00f00000;
2995 		CSR_WRITE_4(sc, 0x7c, reg);
2996 		/* Disable interrupt mitigation. */
2997 		CSR_WRITE_2(sc, 0xe2, 0);
2998 	}
2999 	/*
3000 	 * Disable TSO if interface MTU size is greater than MSS
3001 	 * allowed in controller.
3002 	 */
3003 	if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) {
3004 		ifp->if_capenable &= ~IFCAP_TSO4;
3005 		ifp->if_hwassist &= ~CSUM_TSO;
3006 	}
3007 
3008 	/*
3009 	 * Init our MAC address.  Even though the chipset
3010 	 * documentation doesn't mention it, we need to enter "Config
3011 	 * register write enable" mode to modify the ID registers.
3012 	 */
3013 	/* Copy MAC address on stack to align. */
3014 	bcopy(IF_LLADDR(ifp), eaddr.eaddr, ETHER_ADDR_LEN);
3015 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
3016 	CSR_WRITE_4(sc, RL_IDR0,
3017 	    htole32(*(u_int32_t *)(&eaddr.eaddr[0])));
3018 	CSR_WRITE_4(sc, RL_IDR4,
3019 	    htole32(*(u_int32_t *)(&eaddr.eaddr[4])));
3020 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3021 
3022 	/*
3023 	 * Load the addresses of the RX and TX lists into the chip.
3024 	 */
3025 
3026 	CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI,
3027 	    RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr));
3028 	CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO,
3029 	    RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr));
3030 
3031 	CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI,
3032 	    RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr));
3033 	CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO,
3034 	    RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr));
3035 
3036 	/*
3037 	 * Enable transmit and receive.
3038 	 */
3039 	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
3040 
3041 	/*
3042 	 * Set the initial TX configuration.
3043 	 */
3044 	if (sc->rl_testmode) {
3045 		if (sc->rl_type == RL_8169)
3046 			CSR_WRITE_4(sc, RL_TXCFG,
3047 			    RL_TXCFG_CONFIG|RL_LOOPTEST_ON);
3048 		else
3049 			CSR_WRITE_4(sc, RL_TXCFG,
3050 			    RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS);
3051 	} else
3052 		CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
3053 
3054 	CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16);
3055 
3056 	/*
3057 	 * Set the initial RX configuration.
3058 	 */
3059 	re_set_rxmode(sc);
3060 
3061 	/* Configure interrupt moderation. */
3062 	if (sc->rl_type == RL_8169) {
3063 		/* Magic from vendor. */
3064 		CSR_WRITE_2(sc, RL_INTRMOD, 0x5100);
3065 	}
3066 
3067 #ifdef DEVICE_POLLING
3068 	/*
3069 	 * Disable interrupts if we are polling.
3070 	 */
3071 	if (ifp->if_capenable & IFCAP_POLLING)
3072 		CSR_WRITE_2(sc, RL_IMR, 0);
3073 	else	/* otherwise ... */
3074 #endif
3075 
3076 	/*
3077 	 * Enable interrupts.
3078 	 */
3079 	if (sc->rl_testmode)
3080 		CSR_WRITE_2(sc, RL_IMR, 0);
3081 	else
3082 		CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
3083 	CSR_WRITE_2(sc, RL_ISR, RL_INTRS_CPLUS);
3084 
3085 	/* Set initial TX threshold */
3086 	sc->rl_txthresh = RL_TX_THRESH_INIT;
3087 
3088 	/* Start RX/TX process. */
3089 	CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
3090 #ifdef notdef
3091 	/* Enable receiver and transmitter. */
3092 	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
3093 #endif
3094 
3095 	/*
3096 	 * Initialize the timer interrupt register so that
3097 	 * a timer interrupt will be generated once the timer
3098 	 * reaches a certain number of ticks. The timer is
3099 	 * reloaded on each transmit.
3100 	 */
3101 #ifdef RE_TX_MODERATION
3102 	/*
3103 	 * Use timer interrupt register to moderate TX interrupt
3104 	 * moderation, which dramatically improves TX frame rate.
3105 	 */
3106 	if (sc->rl_type == RL_8169)
3107 		CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800);
3108 	else
3109 		CSR_WRITE_4(sc, RL_TIMERINT, 0x400);
3110 #else
3111 	/*
3112 	 * Use timer interrupt register to moderate RX interrupt
3113 	 * moderation.
3114 	 */
3115 	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 &&
3116 	    intr_filter == 0) {
3117 		if (sc->rl_type == RL_8169)
3118 			CSR_WRITE_4(sc, RL_TIMERINT_8169,
3119 			    RL_USECS(sc->rl_int_rx_mod));
3120 	} else {
3121 		if (sc->rl_type == RL_8169)
3122 			CSR_WRITE_4(sc, RL_TIMERINT_8169, RL_USECS(0));
3123 	}
3124 #endif
3125 
3126 	/*
3127 	 * For 8169 gigE NICs, set the max allowed RX packet
3128 	 * size so we can receive jumbo frames.
3129 	 */
3130 	if (sc->rl_type == RL_8169) {
3131 		if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
3132 			/*
3133 			 * For controllers that use new jumbo frame scheme,
3134 			 * set maximum size of jumbo frame depedning on
3135 			 * controller revisions.
3136 			 */
3137 			if (ifp->if_mtu > RL_MTU)
3138 				CSR_WRITE_2(sc, RL_MAXRXPKTLEN,
3139 				    sc->rl_hwrev->rl_max_mtu +
3140 				    ETHER_VLAN_ENCAP_LEN + ETHER_HDR_LEN +
3141 				    ETHER_CRC_LEN);
3142 			else
3143 				CSR_WRITE_2(sc, RL_MAXRXPKTLEN,
3144 				    RE_RX_DESC_BUFLEN);
3145 		} else if ((sc->rl_flags & RL_FLAG_PCIE) != 0 &&
3146 		    sc->rl_hwrev->rl_max_mtu == RL_MTU) {
3147 			/* RTL810x has no jumbo frame support. */
3148 			CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN);
3149 		} else
3150 			CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383);
3151 	}
3152 
3153 	if (sc->rl_testmode)
3154 		return;
3155 
3156 	mii_mediachg(mii);
3157 
3158 	CSR_WRITE_1(sc, RL_CFG1, CSR_READ_1(sc, RL_CFG1) | RL_CFG1_DRVLOAD);
3159 
3160 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3161 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3162 
3163 	sc->rl_flags &= ~RL_FLAG_LINK;
3164 	sc->rl_watchdog_timer = 0;
3165 	callout_reset(&sc->rl_stat_callout, hz, re_tick, sc);
3166 }
3167 
3168 /*
3169  * Set media options.
3170  */
3171 static int
3172 re_ifmedia_upd(struct ifnet *ifp)
3173 {
3174 	struct rl_softc		*sc;
3175 	struct mii_data		*mii;
3176 	int			error;
3177 
3178 	sc = ifp->if_softc;
3179 	mii = device_get_softc(sc->rl_miibus);
3180 	RL_LOCK(sc);
3181 	error = mii_mediachg(mii);
3182 	RL_UNLOCK(sc);
3183 
3184 	return (error);
3185 }
3186 
3187 /*
3188  * Report current media status.
3189  */
3190 static void
3191 re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3192 {
3193 	struct rl_softc		*sc;
3194 	struct mii_data		*mii;
3195 
3196 	sc = ifp->if_softc;
3197 	mii = device_get_softc(sc->rl_miibus);
3198 
3199 	RL_LOCK(sc);
3200 	mii_pollstat(mii);
3201 	RL_UNLOCK(sc);
3202 	ifmr->ifm_active = mii->mii_media_active;
3203 	ifmr->ifm_status = mii->mii_media_status;
3204 }
3205 
3206 static int
3207 re_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3208 {
3209 	struct rl_softc		*sc = ifp->if_softc;
3210 	struct ifreq		*ifr = (struct ifreq *) data;
3211 	struct mii_data		*mii;
3212 	int			error = 0;
3213 
3214 	switch (command) {
3215 	case SIOCSIFMTU:
3216 		if (ifr->ifr_mtu < ETHERMIN ||
3217 		    ifr->ifr_mtu > sc->rl_hwrev->rl_max_mtu) {
3218 			error = EINVAL;
3219 			break;
3220 		}
3221 		RL_LOCK(sc);
3222 		if (ifp->if_mtu != ifr->ifr_mtu) {
3223 			ifp->if_mtu = ifr->ifr_mtu;
3224 			if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
3225 			    (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
3226 				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3227 				re_init_locked(sc);
3228 			}
3229 			if (ifp->if_mtu > RL_TSO_MTU &&
3230 			    (ifp->if_capenable & IFCAP_TSO4) != 0) {
3231 				ifp->if_capenable &= ~(IFCAP_TSO4 |
3232 				    IFCAP_VLAN_HWTSO);
3233 				ifp->if_hwassist &= ~CSUM_TSO;
3234 			}
3235 			VLAN_CAPABILITIES(ifp);
3236 		}
3237 		RL_UNLOCK(sc);
3238 		break;
3239 	case SIOCSIFFLAGS:
3240 		RL_LOCK(sc);
3241 		if ((ifp->if_flags & IFF_UP) != 0) {
3242 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
3243 				if (((ifp->if_flags ^ sc->rl_if_flags)
3244 				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3245 					re_set_rxmode(sc);
3246 			} else
3247 				re_init_locked(sc);
3248 		} else {
3249 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3250 				re_stop(sc);
3251 		}
3252 		sc->rl_if_flags = ifp->if_flags;
3253 		RL_UNLOCK(sc);
3254 		break;
3255 	case SIOCADDMULTI:
3256 	case SIOCDELMULTI:
3257 		RL_LOCK(sc);
3258 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3259 			re_set_rxmode(sc);
3260 		RL_UNLOCK(sc);
3261 		break;
3262 	case SIOCGIFMEDIA:
3263 	case SIOCSIFMEDIA:
3264 		mii = device_get_softc(sc->rl_miibus);
3265 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
3266 		break;
3267 	case SIOCSIFCAP:
3268 	    {
3269 		int mask, reinit;
3270 
3271 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3272 		reinit = 0;
3273 #ifdef DEVICE_POLLING
3274 		if (mask & IFCAP_POLLING) {
3275 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
3276 				error = ether_poll_register(re_poll, ifp);
3277 				if (error)
3278 					return (error);
3279 				RL_LOCK(sc);
3280 				/* Disable interrupts */
3281 				CSR_WRITE_2(sc, RL_IMR, 0x0000);
3282 				ifp->if_capenable |= IFCAP_POLLING;
3283 				RL_UNLOCK(sc);
3284 			} else {
3285 				error = ether_poll_deregister(ifp);
3286 				/* Enable interrupts. */
3287 				RL_LOCK(sc);
3288 				CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
3289 				ifp->if_capenable &= ~IFCAP_POLLING;
3290 				RL_UNLOCK(sc);
3291 			}
3292 		}
3293 #endif /* DEVICE_POLLING */
3294 		if ((mask & IFCAP_TXCSUM) != 0 &&
3295 		    (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
3296 			ifp->if_capenable ^= IFCAP_TXCSUM;
3297 			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
3298 				ifp->if_hwassist |= RE_CSUM_FEATURES;
3299 			else
3300 				ifp->if_hwassist &= ~RE_CSUM_FEATURES;
3301 			reinit = 1;
3302 		}
3303 		if ((mask & IFCAP_RXCSUM) != 0 &&
3304 		    (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
3305 			ifp->if_capenable ^= IFCAP_RXCSUM;
3306 			reinit = 1;
3307 		}
3308 		if ((mask & IFCAP_TSO4) != 0 &&
3309 		    (ifp->if_capabilities & IFCAP_TSO) != 0) {
3310 			ifp->if_capenable ^= IFCAP_TSO4;
3311 			if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
3312 				ifp->if_hwassist |= CSUM_TSO;
3313 			else
3314 				ifp->if_hwassist &= ~CSUM_TSO;
3315 			if (ifp->if_mtu > RL_TSO_MTU &&
3316 			    (ifp->if_capenable & IFCAP_TSO4) != 0) {
3317 				ifp->if_capenable &= ~IFCAP_TSO4;
3318 				ifp->if_hwassist &= ~CSUM_TSO;
3319 			}
3320 		}
3321 		if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
3322 		    (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
3323 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
3324 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
3325 		    (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
3326 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
3327 			/* TSO over VLAN requires VLAN hardware tagging. */
3328 			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
3329 				ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
3330 			reinit = 1;
3331 		}
3332 		if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
3333 		    (mask & (IFCAP_HWCSUM | IFCAP_TSO4 |
3334 		    IFCAP_VLAN_HWTSO)) != 0)
3335 				reinit = 1;
3336 		if ((mask & IFCAP_WOL) != 0 &&
3337 		    (ifp->if_capabilities & IFCAP_WOL) != 0) {
3338 			if ((mask & IFCAP_WOL_UCAST) != 0)
3339 				ifp->if_capenable ^= IFCAP_WOL_UCAST;
3340 			if ((mask & IFCAP_WOL_MCAST) != 0)
3341 				ifp->if_capenable ^= IFCAP_WOL_MCAST;
3342 			if ((mask & IFCAP_WOL_MAGIC) != 0)
3343 				ifp->if_capenable ^= IFCAP_WOL_MAGIC;
3344 		}
3345 		if (reinit && ifp->if_drv_flags & IFF_DRV_RUNNING) {
3346 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3347 			re_init(sc);
3348 		}
3349 		VLAN_CAPABILITIES(ifp);
3350 	    }
3351 		break;
3352 	default:
3353 		error = ether_ioctl(ifp, command, data);
3354 		break;
3355 	}
3356 
3357 	return (error);
3358 }
3359 
3360 static void
3361 re_watchdog(struct rl_softc *sc)
3362 {
3363 	struct ifnet		*ifp;
3364 
3365 	RL_LOCK_ASSERT(sc);
3366 
3367 	if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer != 0)
3368 		return;
3369 
3370 	ifp = sc->rl_ifp;
3371 	re_txeof(sc);
3372 	if (sc->rl_ldata.rl_tx_free == sc->rl_ldata.rl_tx_desc_cnt) {
3373 		if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
3374 		    "-- recovering\n");
3375 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3376 			re_start_locked(ifp);
3377 		return;
3378 	}
3379 
3380 	if_printf(ifp, "watchdog timeout\n");
3381 	ifp->if_oerrors++;
3382 
3383 	re_rxeof(sc, NULL);
3384 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3385 	re_init_locked(sc);
3386 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3387 		re_start_locked(ifp);
3388 }
3389 
3390 /*
3391  * Stop the adapter and free any mbufs allocated to the
3392  * RX and TX lists.
3393  */
3394 static void
3395 re_stop(struct rl_softc *sc)
3396 {
3397 	int			i;
3398 	struct ifnet		*ifp;
3399 	struct rl_txdesc	*txd;
3400 	struct rl_rxdesc	*rxd;
3401 
3402 	RL_LOCK_ASSERT(sc);
3403 
3404 	ifp = sc->rl_ifp;
3405 
3406 	sc->rl_watchdog_timer = 0;
3407 	callout_stop(&sc->rl_stat_callout);
3408 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3409 
3410 	if ((sc->rl_flags & RL_FLAG_CMDSTOP) != 0)
3411 		CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB |
3412 		    RL_CMD_RX_ENB);
3413 	else
3414 		CSR_WRITE_1(sc, RL_COMMAND, 0x00);
3415 	DELAY(1000);
3416 	CSR_WRITE_2(sc, RL_IMR, 0x0000);
3417 	CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
3418 
3419 	if (sc->rl_head != NULL) {
3420 		m_freem(sc->rl_head);
3421 		sc->rl_head = sc->rl_tail = NULL;
3422 	}
3423 
3424 	/* Free the TX list buffers. */
3425 
3426 	for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
3427 		txd = &sc->rl_ldata.rl_tx_desc[i];
3428 		if (txd->tx_m != NULL) {
3429 			bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
3430 			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3431 			bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag,
3432 			    txd->tx_dmamap);
3433 			m_freem(txd->tx_m);
3434 			txd->tx_m = NULL;
3435 		}
3436 	}
3437 
3438 	/* Free the RX list buffers. */
3439 
3440 	for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
3441 		rxd = &sc->rl_ldata.rl_rx_desc[i];
3442 		if (rxd->rx_m != NULL) {
3443 			bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
3444 			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3445 			bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag,
3446 			    rxd->rx_dmamap);
3447 			m_freem(rxd->rx_m);
3448 			rxd->rx_m = NULL;
3449 		}
3450 	}
3451 }
3452 
3453 /*
3454  * Device suspend routine.  Stop the interface and save some PCI
3455  * settings in case the BIOS doesn't restore them properly on
3456  * resume.
3457  */
3458 static int
3459 re_suspend(device_t dev)
3460 {
3461 	struct rl_softc		*sc;
3462 
3463 	sc = device_get_softc(dev);
3464 
3465 	RL_LOCK(sc);
3466 	re_stop(sc);
3467 	re_setwol(sc);
3468 	sc->suspended = 1;
3469 	RL_UNLOCK(sc);
3470 
3471 	return (0);
3472 }
3473 
3474 /*
3475  * Device resume routine.  Restore some PCI settings in case the BIOS
3476  * doesn't, re-enable busmastering, and restart the interface if
3477  * appropriate.
3478  */
3479 static int
3480 re_resume(device_t dev)
3481 {
3482 	struct rl_softc		*sc;
3483 	struct ifnet		*ifp;
3484 
3485 	sc = device_get_softc(dev);
3486 
3487 	RL_LOCK(sc);
3488 
3489 	ifp = sc->rl_ifp;
3490 	/* Take controller out of sleep mode. */
3491 	if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
3492 		if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
3493 			CSR_WRITE_1(sc, RL_GPIO,
3494 			    CSR_READ_1(sc, RL_GPIO) | 0x01);
3495 	}
3496 
3497 	/*
3498 	 * Clear WOL matching such that normal Rx filtering
3499 	 * wouldn't interfere with WOL patterns.
3500 	 */
3501 	re_clrwol(sc);
3502 
3503 	/* reinitialize interface if necessary */
3504 	if (ifp->if_flags & IFF_UP)
3505 		re_init_locked(sc);
3506 
3507 	sc->suspended = 0;
3508 	RL_UNLOCK(sc);
3509 
3510 	return (0);
3511 }
3512 
3513 /*
3514  * Stop all chip I/O so that the kernel's probe routines don't
3515  * get confused by errant DMAs when rebooting.
3516  */
3517 static int
3518 re_shutdown(device_t dev)
3519 {
3520 	struct rl_softc		*sc;
3521 
3522 	sc = device_get_softc(dev);
3523 
3524 	RL_LOCK(sc);
3525 	re_stop(sc);
3526 	/*
3527 	 * Mark interface as down since otherwise we will panic if
3528 	 * interrupt comes in later on, which can happen in some
3529 	 * cases.
3530 	 */
3531 	sc->rl_ifp->if_flags &= ~IFF_UP;
3532 	re_setwol(sc);
3533 	RL_UNLOCK(sc);
3534 
3535 	return (0);
3536 }
3537 
3538 static void
3539 re_setwol(struct rl_softc *sc)
3540 {
3541 	struct ifnet		*ifp;
3542 	int			pmc;
3543 	uint16_t		pmstat;
3544 	uint8_t			v;
3545 
3546 	RL_LOCK_ASSERT(sc);
3547 
3548 	if (pci_find_extcap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
3549 		return;
3550 
3551 	ifp = sc->rl_ifp;
3552 	/* Put controller into sleep mode. */
3553 	if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
3554 		if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
3555 			CSR_WRITE_1(sc, RL_GPIO,
3556 			    CSR_READ_1(sc, RL_GPIO) & ~0x01);
3557 	}
3558 	if ((ifp->if_capenable & IFCAP_WOL) != 0 &&
3559 	    (sc->rl_flags & RL_FLAG_WOLRXENB) != 0)
3560 		CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RX_ENB);
3561 	/* Enable config register write. */
3562 	CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
3563 
3564 	/* Enable PME. */
3565 	v = CSR_READ_1(sc, RL_CFG1);
3566 	v &= ~RL_CFG1_PME;
3567 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
3568 		v |= RL_CFG1_PME;
3569 	CSR_WRITE_1(sc, RL_CFG1, v);
3570 
3571 	v = CSR_READ_1(sc, RL_CFG3);
3572 	v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
3573 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
3574 		v |= RL_CFG3_WOL_MAGIC;
3575 	CSR_WRITE_1(sc, RL_CFG3, v);
3576 
3577 	/* Config register write done. */
3578 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3579 
3580 	v = CSR_READ_1(sc, RL_CFG5);
3581 	v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
3582 	v &= ~RL_CFG5_WOL_LANWAKE;
3583 	if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
3584 		v |= RL_CFG5_WOL_UCAST;
3585 	if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
3586 		v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST;
3587 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
3588 		v |= RL_CFG5_WOL_LANWAKE;
3589 	CSR_WRITE_1(sc, RL_CFG5, v);
3590 
3591 	if ((ifp->if_capenable & IFCAP_WOL) != 0 &&
3592 	    (sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0)
3593 		CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) & ~0x80);
3594 	/*
3595 	 * It seems that hardware resets its link speed to 100Mbps in
3596 	 * power down mode so switching to 100Mbps in driver is not
3597 	 * needed.
3598 	 */
3599 
3600 	/* Request PME if WOL is requested. */
3601 	pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2);
3602 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
3603 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
3604 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3605 	pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
3606 }
3607 
3608 static void
3609 re_clrwol(struct rl_softc *sc)
3610 {
3611 	int			pmc;
3612 	uint8_t			v;
3613 
3614 	RL_LOCK_ASSERT(sc);
3615 
3616 	if (pci_find_extcap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
3617 		return;
3618 
3619 	/* Enable config register write. */
3620 	CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
3621 
3622 	v = CSR_READ_1(sc, RL_CFG3);
3623 	v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
3624 	CSR_WRITE_1(sc, RL_CFG3, v);
3625 
3626 	/* Config register write done. */
3627 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3628 
3629 	v = CSR_READ_1(sc, RL_CFG5);
3630 	v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
3631 	v &= ~RL_CFG5_WOL_LANWAKE;
3632 	CSR_WRITE_1(sc, RL_CFG5, v);
3633 }
3634 
3635 static void
3636 re_add_sysctls(struct rl_softc *sc)
3637 {
3638 	struct sysctl_ctx_list	*ctx;
3639 	struct sysctl_oid_list	*children;
3640 	int			error;
3641 
3642 	ctx = device_get_sysctl_ctx(sc->rl_dev);
3643 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev));
3644 
3645 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "stats",
3646 	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, re_sysctl_stats, "I",
3647 	    "Statistics Information");
3648 	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0)
3649 		return;
3650 
3651 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "int_rx_mod",
3652 	    CTLTYPE_INT | CTLFLAG_RW, &sc->rl_int_rx_mod, 0,
3653 	    sysctl_hw_re_int_mod, "I", "re RX interrupt moderation");
3654 	/* Pull in device tunables. */
3655 	sc->rl_int_rx_mod = RL_TIMER_DEFAULT;
3656 	error = resource_int_value(device_get_name(sc->rl_dev),
3657 	    device_get_unit(sc->rl_dev), "int_rx_mod", &sc->rl_int_rx_mod);
3658 	if (error == 0) {
3659 		if (sc->rl_int_rx_mod < RL_TIMER_MIN ||
3660 		    sc->rl_int_rx_mod > RL_TIMER_MAX) {
3661 			device_printf(sc->rl_dev, "int_rx_mod value out of "
3662 			    "range; using default: %d\n",
3663 			    RL_TIMER_DEFAULT);
3664 			sc->rl_int_rx_mod = RL_TIMER_DEFAULT;
3665 		}
3666 	}
3667 
3668 }
3669 
3670 static int
3671 re_sysctl_stats(SYSCTL_HANDLER_ARGS)
3672 {
3673 	struct rl_softc		*sc;
3674 	struct rl_stats		*stats;
3675 	int			error, i, result;
3676 
3677 	result = -1;
3678 	error = sysctl_handle_int(oidp, &result, 0, req);
3679 	if (error || req->newptr == NULL)
3680 		return (error);
3681 
3682 	if (result == 1) {
3683 		sc = (struct rl_softc *)arg1;
3684 		RL_LOCK(sc);
3685 		if ((sc->rl_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3686 			RL_UNLOCK(sc);
3687 			goto done;
3688 		}
3689 		bus_dmamap_sync(sc->rl_ldata.rl_stag,
3690 		    sc->rl_ldata.rl_smap, BUS_DMASYNC_PREREAD);
3691 		CSR_WRITE_4(sc, RL_DUMPSTATS_HI,
3692 		    RL_ADDR_HI(sc->rl_ldata.rl_stats_addr));
3693 		CSR_WRITE_4(sc, RL_DUMPSTATS_LO,
3694 		    RL_ADDR_LO(sc->rl_ldata.rl_stats_addr));
3695 		CSR_WRITE_4(sc, RL_DUMPSTATS_LO,
3696 		    RL_ADDR_LO(sc->rl_ldata.rl_stats_addr |
3697 		    RL_DUMPSTATS_START));
3698 		for (i = RL_TIMEOUT; i > 0; i--) {
3699 			if ((CSR_READ_4(sc, RL_DUMPSTATS_LO) &
3700 			    RL_DUMPSTATS_START) == 0)
3701 				break;
3702 			DELAY(1000);
3703 		}
3704 		bus_dmamap_sync(sc->rl_ldata.rl_stag,
3705 		    sc->rl_ldata.rl_smap, BUS_DMASYNC_POSTREAD);
3706 		RL_UNLOCK(sc);
3707 		if (i == 0) {
3708 			device_printf(sc->rl_dev,
3709 			    "DUMP statistics request timedout\n");
3710 			return (ETIMEDOUT);
3711 		}
3712 done:
3713 		stats = sc->rl_ldata.rl_stats;
3714 		printf("%s statistics:\n", device_get_nameunit(sc->rl_dev));
3715 		printf("Tx frames : %ju\n",
3716 		    (uintmax_t)le64toh(stats->rl_tx_pkts));
3717 		printf("Rx frames : %ju\n",
3718 		    (uintmax_t)le64toh(stats->rl_rx_pkts));
3719 		printf("Tx errors : %ju\n",
3720 		    (uintmax_t)le64toh(stats->rl_tx_errs));
3721 		printf("Rx errors : %u\n",
3722 		    le32toh(stats->rl_rx_errs));
3723 		printf("Rx missed frames : %u\n",
3724 		    (uint32_t)le16toh(stats->rl_missed_pkts));
3725 		printf("Rx frame alignment errs : %u\n",
3726 		    (uint32_t)le16toh(stats->rl_rx_framealign_errs));
3727 		printf("Tx single collisions : %u\n",
3728 		    le32toh(stats->rl_tx_onecoll));
3729 		printf("Tx multiple collisions : %u\n",
3730 		    le32toh(stats->rl_tx_multicolls));
3731 		printf("Rx unicast frames : %ju\n",
3732 		    (uintmax_t)le64toh(stats->rl_rx_ucasts));
3733 		printf("Rx broadcast frames : %ju\n",
3734 		    (uintmax_t)le64toh(stats->rl_rx_bcasts));
3735 		printf("Rx multicast frames : %u\n",
3736 		    le32toh(stats->rl_rx_mcasts));
3737 		printf("Tx aborts : %u\n",
3738 		    (uint32_t)le16toh(stats->rl_tx_aborts));
3739 		printf("Tx underruns : %u\n",
3740 		    (uint32_t)le16toh(stats->rl_rx_underruns));
3741 	}
3742 
3743 	return (error);
3744 }
3745 
3746 static int
3747 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3748 {
3749 	int error, value;
3750 
3751 	if (arg1 == NULL)
3752 		return (EINVAL);
3753 	value = *(int *)arg1;
3754 	error = sysctl_handle_int(oidp, &value, 0, req);
3755 	if (error || req->newptr == NULL)
3756 		return (error);
3757 	if (value < low || value > high)
3758 		return (EINVAL);
3759 	*(int *)arg1 = value;
3760 
3761 	return (0);
3762 }
3763 
3764 static int
3765 sysctl_hw_re_int_mod(SYSCTL_HANDLER_ARGS)
3766 {
3767 
3768 	return (sysctl_int_range(oidp, arg1, arg2, req, RL_TIMER_MIN,
3769 	    RL_TIMER_MAX));
3770 }
3771