xref: /freebsd/sys/dev/re/if_re.c (revision a0b9e2e854027e6ff61fb075a1309dbc71c42b54)
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (c) 1997, 1998-2003
5  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 /*
39  * RealTek 8139C+/8169/8169S/8110S/8168/8111/8101E PCI NIC driver
40  *
41  * Written by Bill Paul <wpaul@windriver.com>
42  * Senior Networking Software Engineer
43  * Wind River Systems
44  */
45 
46 /*
47  * This driver is designed to support RealTek's next generation of
48  * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently
49  * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S,
50  * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E.
51  *
52  * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible
53  * with the older 8139 family, however it also supports a special
54  * C+ mode of operation that provides several new performance enhancing
55  * features. These include:
56  *
57  *	o Descriptor based DMA mechanism. Each descriptor represents
58  *	  a single packet fragment. Data buffers may be aligned on
59  *	  any byte boundary.
60  *
61  *	o 64-bit DMA
62  *
63  *	o TCP/IP checksum offload for both RX and TX
64  *
65  *	o High and normal priority transmit DMA rings
66  *
67  *	o VLAN tag insertion and extraction
68  *
69  *	o TCP large send (segmentation offload)
70  *
71  * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+
72  * programming API is fairly straightforward. The RX filtering, EEPROM
73  * access and PHY access is the same as it is on the older 8139 series
74  * chips.
75  *
76  * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the
77  * same programming API and feature set as the 8139C+ with the following
78  * differences and additions:
79  *
80  *	o 1000Mbps mode
81  *
82  *	o Jumbo frames
83  *
84  *	o GMII and TBI ports/registers for interfacing with copper
85  *	  or fiber PHYs
86  *
87  *	o RX and TX DMA rings can have up to 1024 descriptors
88  *	  (the 8139C+ allows a maximum of 64)
89  *
90  *	o Slight differences in register layout from the 8139C+
91  *
92  * The TX start and timer interrupt registers are at different locations
93  * on the 8169 than they are on the 8139C+. Also, the status word in the
94  * RX descriptor has a slightly different bit layout. The 8169 does not
95  * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska'
96  * copper gigE PHY.
97  *
98  * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
99  * (the 'S' stands for 'single-chip'). These devices have the same
100  * programming API as the older 8169, but also have some vendor-specific
101  * registers for the on-board PHY. The 8110S is a LAN-on-motherboard
102  * part designed to be pin-compatible with the RealTek 8100 10/100 chip.
103  *
104  * This driver takes advantage of the RX and TX checksum offload and
105  * VLAN tag insertion/extraction features. It also implements TX
106  * interrupt moderation using the timer interrupt registers, which
107  * significantly reduces TX interrupt load. There is also support
108  * for jumbo frames, however the 8169/8169S/8110S can not transmit
109  * jumbo frames larger than 7440, so the max MTU possible with this
110  * driver is 7422 bytes.
111  */
112 
113 #ifdef HAVE_KERNEL_OPTION_HEADERS
114 #include "opt_device_polling.h"
115 #endif
116 
117 #include <sys/param.h>
118 #include <sys/endian.h>
119 #include <sys/systm.h>
120 #include <sys/sockio.h>
121 #include <sys/mbuf.h>
122 #include <sys/malloc.h>
123 #include <sys/module.h>
124 #include <sys/kernel.h>
125 #include <sys/socket.h>
126 #include <sys/lock.h>
127 #include <sys/mutex.h>
128 #include <sys/sysctl.h>
129 #include <sys/taskqueue.h>
130 
131 #include <net/debugnet.h>
132 #include <net/if.h>
133 #include <net/if_var.h>
134 #include <net/if_arp.h>
135 #include <net/ethernet.h>
136 #include <net/if_dl.h>
137 #include <net/if_media.h>
138 #include <net/if_types.h>
139 #include <net/if_vlan_var.h>
140 
141 #include <net/bpf.h>
142 
143 #include <machine/bus.h>
144 #include <machine/resource.h>
145 #include <sys/bus.h>
146 #include <sys/rman.h>
147 
148 #include <dev/mii/mii.h>
149 #include <dev/mii/miivar.h>
150 
151 #include <dev/pci/pcireg.h>
152 #include <dev/pci/pcivar.h>
153 
154 #include <dev/rl/if_rlreg.h>
155 
156 MODULE_DEPEND(re, pci, 1, 1, 1);
157 MODULE_DEPEND(re, ether, 1, 1, 1);
158 MODULE_DEPEND(re, miibus, 1, 1, 1);
159 
160 /* "device miibus" required.  See GENERIC if you get errors here. */
161 #include "miibus_if.h"
162 
163 /* Tunables. */
164 static int intr_filter = 0;
165 TUNABLE_INT("hw.re.intr_filter", &intr_filter);
166 static int msi_disable = 0;
167 TUNABLE_INT("hw.re.msi_disable", &msi_disable);
168 static int msix_disable = 0;
169 TUNABLE_INT("hw.re.msix_disable", &msix_disable);
170 static int prefer_iomap = 0;
171 TUNABLE_INT("hw.re.prefer_iomap", &prefer_iomap);
172 
173 #define RE_CSUM_FEATURES    (CSUM_IP | CSUM_TCP | CSUM_UDP)
174 
175 /*
176  * Various supported device vendors/types and their names.
177  */
178 static const struct rl_type re_devs[] = {
179 	{ DLINK_VENDORID, DLINK_DEVICEID_528T, 0,
180 	    "D-Link DGE-528(T) Gigabit Ethernet Adapter" },
181 	{ DLINK_VENDORID, DLINK_DEVICEID_530T_REVC, 0,
182 	    "D-Link DGE-530(T) Gigabit Ethernet Adapter" },
183 	{ RT_VENDORID, RT_DEVICEID_8139, 0,
184 	    "RealTek 8139C+ 10/100BaseTX" },
185 	{ RT_VENDORID, RT_DEVICEID_8101E, 0,
186 	    "RealTek 810xE PCIe 10/100baseTX" },
187 	{ RT_VENDORID, RT_DEVICEID_8168, 0,
188 	    "RealTek 8168/8111 B/C/CP/D/DP/E/F/G PCIe Gigabit Ethernet" },
189 	{ RT_VENDORID, RT_DEVICEID_8161, 0,
190 	    "RealTek 8168 Gigabit Ethernet" },
191 	{ NCUBE_VENDORID, RT_DEVICEID_8168, 0,
192 	    "TP-Link TG-3468 v2 (RTL8168) Gigabit Ethernet" },
193 	{ RT_VENDORID, RT_DEVICEID_8169, 0,
194 	    "RealTek 8169/8169S/8169SB(L)/8110S/8110SB(L) Gigabit Ethernet" },
195 	{ RT_VENDORID, RT_DEVICEID_8169SC, 0,
196 	    "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" },
197 	{ COREGA_VENDORID, COREGA_DEVICEID_CGLAPCIGT, 0,
198 	    "Corega CG-LAPCIGT (RTL8169S) Gigabit Ethernet" },
199 	{ LINKSYS_VENDORID, LINKSYS_DEVICEID_EG1032, 0,
200 	    "Linksys EG1032 (RTL8169S) Gigabit Ethernet" },
201 	{ USR_VENDORID, USR_DEVICEID_997902, 0,
202 	    "US Robotics 997902 (RTL8169S) Gigabit Ethernet" }
203 };
204 
205 static const struct rl_hwrev re_hwrevs[] = {
206 	{ RL_HWREV_8139, RL_8139, "", RL_MTU },
207 	{ RL_HWREV_8139A, RL_8139, "A", RL_MTU },
208 	{ RL_HWREV_8139AG, RL_8139, "A-G", RL_MTU },
209 	{ RL_HWREV_8139B, RL_8139, "B", RL_MTU },
210 	{ RL_HWREV_8130, RL_8139, "8130", RL_MTU },
211 	{ RL_HWREV_8139C, RL_8139, "C", RL_MTU },
212 	{ RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C", RL_MTU },
213 	{ RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+", RL_MTU },
214 	{ RL_HWREV_8168B_SPIN1, RL_8169, "8168", RL_JUMBO_MTU },
215 	{ RL_HWREV_8169, RL_8169, "8169", RL_JUMBO_MTU },
216 	{ RL_HWREV_8169S, RL_8169, "8169S", RL_JUMBO_MTU },
217 	{ RL_HWREV_8110S, RL_8169, "8110S", RL_JUMBO_MTU },
218 	{ RL_HWREV_8169_8110SB, RL_8169, "8169SB/8110SB", RL_JUMBO_MTU },
219 	{ RL_HWREV_8169_8110SC, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU },
220 	{ RL_HWREV_8169_8110SBL, RL_8169, "8169SBL/8110SBL", RL_JUMBO_MTU },
221 	{ RL_HWREV_8169_8110SCE, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU },
222 	{ RL_HWREV_8100, RL_8139, "8100", RL_MTU },
223 	{ RL_HWREV_8101, RL_8139, "8101", RL_MTU },
224 	{ RL_HWREV_8100E, RL_8169, "8100E", RL_MTU },
225 	{ RL_HWREV_8101E, RL_8169, "8101E", RL_MTU },
226 	{ RL_HWREV_8102E, RL_8169, "8102E", RL_MTU },
227 	{ RL_HWREV_8102EL, RL_8169, "8102EL", RL_MTU },
228 	{ RL_HWREV_8102EL_SPIN1, RL_8169, "8102EL", RL_MTU },
229 	{ RL_HWREV_8103E, RL_8169, "8103E", RL_MTU },
230 	{ RL_HWREV_8401E, RL_8169, "8401E", RL_MTU },
231 	{ RL_HWREV_8402, RL_8169, "8402", RL_MTU },
232 	{ RL_HWREV_8105E, RL_8169, "8105E", RL_MTU },
233 	{ RL_HWREV_8105E_SPIN1, RL_8169, "8105E", RL_MTU },
234 	{ RL_HWREV_8106E, RL_8169, "8106E", RL_MTU },
235 	{ RL_HWREV_8168B_SPIN2, RL_8169, "8168", RL_JUMBO_MTU },
236 	{ RL_HWREV_8168B_SPIN3, RL_8169, "8168", RL_JUMBO_MTU },
237 	{ RL_HWREV_8168C, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K },
238 	{ RL_HWREV_8168C_SPIN2, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K },
239 	{ RL_HWREV_8168CP, RL_8169, "8168CP/8111CP", RL_JUMBO_MTU_6K },
240 	{ RL_HWREV_8168D, RL_8169, "8168D/8111D", RL_JUMBO_MTU_9K },
241 	{ RL_HWREV_8168DP, RL_8169, "8168DP/8111DP", RL_JUMBO_MTU_9K },
242 	{ RL_HWREV_8168E, RL_8169, "8168E/8111E", RL_JUMBO_MTU_9K},
243 	{ RL_HWREV_8168E_VL, RL_8169, "8168E/8111E-VL", RL_JUMBO_MTU_6K},
244 	{ RL_HWREV_8168EP, RL_8169, "8168EP/8111EP", RL_JUMBO_MTU_9K},
245 	{ RL_HWREV_8168F, RL_8169, "8168F/8111F", RL_JUMBO_MTU_9K},
246 	{ RL_HWREV_8168G, RL_8169, "8168G/8111G", RL_JUMBO_MTU_9K},
247 	{ RL_HWREV_8168GU, RL_8169, "8168GU/8111GU", RL_JUMBO_MTU_9K},
248 	{ RL_HWREV_8168H, RL_8169, "8168H/8111H", RL_JUMBO_MTU_9K},
249 	{ RL_HWREV_8411, RL_8169, "8411", RL_JUMBO_MTU_9K},
250 	{ RL_HWREV_8411B, RL_8169, "8411B", RL_JUMBO_MTU_9K},
251 	{ 0, 0, NULL, 0 }
252 };
253 
254 static int re_probe		(device_t);
255 static int re_attach		(device_t);
256 static int re_detach		(device_t);
257 
258 static int re_encap		(struct rl_softc *, struct mbuf **);
259 
260 static void re_dma_map_addr	(void *, bus_dma_segment_t *, int, int);
261 static int re_allocmem		(device_t, struct rl_softc *);
262 static __inline void re_discard_rxbuf
263 				(struct rl_softc *, int);
264 static int re_newbuf		(struct rl_softc *, int);
265 static int re_jumbo_newbuf	(struct rl_softc *, int);
266 static int re_rx_list_init	(struct rl_softc *);
267 static int re_jrx_list_init	(struct rl_softc *);
268 static int re_tx_list_init	(struct rl_softc *);
269 #ifdef RE_FIXUP_RX
270 static __inline void re_fixup_rx
271 				(struct mbuf *);
272 #endif
273 static int re_rxeof		(struct rl_softc *, int *);
274 static void re_txeof		(struct rl_softc *);
275 #ifdef DEVICE_POLLING
276 static int re_poll		(struct ifnet *, enum poll_cmd, int);
277 static int re_poll_locked	(struct ifnet *, enum poll_cmd, int);
278 #endif
279 static int re_intr		(void *);
280 static void re_intr_msi		(void *);
281 static void re_tick		(void *);
282 static void re_int_task		(void *, int);
283 static void re_start		(struct ifnet *);
284 static void re_start_locked	(struct ifnet *);
285 static void re_start_tx		(struct rl_softc *);
286 static int re_ioctl		(struct ifnet *, u_long, caddr_t);
287 static void re_init		(void *);
288 static void re_init_locked	(struct rl_softc *);
289 static void re_stop		(struct rl_softc *);
290 static void re_watchdog		(struct rl_softc *);
291 static int re_suspend		(device_t);
292 static int re_resume		(device_t);
293 static int re_shutdown		(device_t);
294 static int re_ifmedia_upd	(struct ifnet *);
295 static void re_ifmedia_sts	(struct ifnet *, struct ifmediareq *);
296 
297 static void re_eeprom_putbyte	(struct rl_softc *, int);
298 static void re_eeprom_getword	(struct rl_softc *, int, u_int16_t *);
299 static void re_read_eeprom	(struct rl_softc *, caddr_t, int, int);
300 static int re_gmii_readreg	(device_t, int, int);
301 static int re_gmii_writereg	(device_t, int, int, int);
302 
303 static int re_miibus_readreg	(device_t, int, int);
304 static int re_miibus_writereg	(device_t, int, int, int);
305 static void re_miibus_statchg	(device_t);
306 
307 static void re_set_jumbo	(struct rl_softc *, int);
308 static void re_set_rxmode		(struct rl_softc *);
309 static void re_reset		(struct rl_softc *);
310 static void re_setwol		(struct rl_softc *);
311 static void re_clrwol		(struct rl_softc *);
312 static void re_set_linkspeed	(struct rl_softc *);
313 
314 DEBUGNET_DEFINE(re);
315 
316 #ifdef DEV_NETMAP	/* see ixgbe.c for details */
317 #include <dev/netmap/if_re_netmap.h>
318 MODULE_DEPEND(re, netmap, 1, 1, 1);
319 #endif /* !DEV_NETMAP */
320 
321 #ifdef RE_DIAG
322 static int re_diag		(struct rl_softc *);
323 #endif
324 
325 static void re_add_sysctls	(struct rl_softc *);
326 static int re_sysctl_stats	(SYSCTL_HANDLER_ARGS);
327 static int sysctl_int_range	(SYSCTL_HANDLER_ARGS, int, int);
328 static int sysctl_hw_re_int_mod	(SYSCTL_HANDLER_ARGS);
329 
330 static device_method_t re_methods[] = {
331 	/* Device interface */
332 	DEVMETHOD(device_probe,		re_probe),
333 	DEVMETHOD(device_attach,	re_attach),
334 	DEVMETHOD(device_detach,	re_detach),
335 	DEVMETHOD(device_suspend,	re_suspend),
336 	DEVMETHOD(device_resume,	re_resume),
337 	DEVMETHOD(device_shutdown,	re_shutdown),
338 
339 	/* MII interface */
340 	DEVMETHOD(miibus_readreg,	re_miibus_readreg),
341 	DEVMETHOD(miibus_writereg,	re_miibus_writereg),
342 	DEVMETHOD(miibus_statchg,	re_miibus_statchg),
343 
344 	DEVMETHOD_END
345 };
346 
347 static driver_t re_driver = {
348 	"re",
349 	re_methods,
350 	sizeof(struct rl_softc)
351 };
352 
353 static devclass_t re_devclass;
354 
355 DRIVER_MODULE(re, pci, re_driver, re_devclass, 0, 0);
356 DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0);
357 
358 #define EE_SET(x)					\
359 	CSR_WRITE_1(sc, RL_EECMD,			\
360 		CSR_READ_1(sc, RL_EECMD) | x)
361 
362 #define EE_CLR(x)					\
363 	CSR_WRITE_1(sc, RL_EECMD,			\
364 		CSR_READ_1(sc, RL_EECMD) & ~x)
365 
366 /*
367  * Send a read command and address to the EEPROM, check for ACK.
368  */
369 static void
370 re_eeprom_putbyte(struct rl_softc *sc, int addr)
371 {
372 	int			d, i;
373 
374 	d = addr | (RL_9346_READ << sc->rl_eewidth);
375 
376 	/*
377 	 * Feed in each bit and strobe the clock.
378 	 */
379 
380 	for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) {
381 		if (d & i) {
382 			EE_SET(RL_EE_DATAIN);
383 		} else {
384 			EE_CLR(RL_EE_DATAIN);
385 		}
386 		DELAY(100);
387 		EE_SET(RL_EE_CLK);
388 		DELAY(150);
389 		EE_CLR(RL_EE_CLK);
390 		DELAY(100);
391 	}
392 }
393 
394 /*
395  * Read a word of data stored in the EEPROM at address 'addr.'
396  */
397 static void
398 re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest)
399 {
400 	int			i;
401 	u_int16_t		word = 0;
402 
403 	/*
404 	 * Send address of word we want to read.
405 	 */
406 	re_eeprom_putbyte(sc, addr);
407 
408 	/*
409 	 * Start reading bits from EEPROM.
410 	 */
411 	for (i = 0x8000; i; i >>= 1) {
412 		EE_SET(RL_EE_CLK);
413 		DELAY(100);
414 		if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
415 			word |= i;
416 		EE_CLR(RL_EE_CLK);
417 		DELAY(100);
418 	}
419 
420 	*dest = word;
421 }
422 
423 /*
424  * Read a sequence of words from the EEPROM.
425  */
426 static void
427 re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt)
428 {
429 	int			i;
430 	u_int16_t		word = 0, *ptr;
431 
432 	CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
433 
434         DELAY(100);
435 
436 	for (i = 0; i < cnt; i++) {
437 		CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL);
438 		re_eeprom_getword(sc, off + i, &word);
439 		CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL);
440 		ptr = (u_int16_t *)(dest + (i * 2));
441                 *ptr = word;
442 	}
443 
444 	CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
445 }
446 
447 static int
448 re_gmii_readreg(device_t dev, int phy, int reg)
449 {
450 	struct rl_softc		*sc;
451 	u_int32_t		rval;
452 	int			i;
453 
454 	sc = device_get_softc(dev);
455 
456 	/* Let the rgephy driver read the GMEDIASTAT register */
457 
458 	if (reg == RL_GMEDIASTAT) {
459 		rval = CSR_READ_1(sc, RL_GMEDIASTAT);
460 		return (rval);
461 	}
462 
463 	CSR_WRITE_4(sc, RL_PHYAR, reg << 16);
464 
465 	for (i = 0; i < RL_PHY_TIMEOUT; i++) {
466 		rval = CSR_READ_4(sc, RL_PHYAR);
467 		if (rval & RL_PHYAR_BUSY)
468 			break;
469 		DELAY(25);
470 	}
471 
472 	if (i == RL_PHY_TIMEOUT) {
473 		device_printf(sc->rl_dev, "PHY read failed\n");
474 		return (0);
475 	}
476 
477 	/*
478 	 * Controller requires a 20us delay to process next MDIO request.
479 	 */
480 	DELAY(20);
481 
482 	return (rval & RL_PHYAR_PHYDATA);
483 }
484 
485 static int
486 re_gmii_writereg(device_t dev, int phy, int reg, int data)
487 {
488 	struct rl_softc		*sc;
489 	u_int32_t		rval;
490 	int			i;
491 
492 	sc = device_get_softc(dev);
493 
494 	CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) |
495 	    (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY);
496 
497 	for (i = 0; i < RL_PHY_TIMEOUT; i++) {
498 		rval = CSR_READ_4(sc, RL_PHYAR);
499 		if (!(rval & RL_PHYAR_BUSY))
500 			break;
501 		DELAY(25);
502 	}
503 
504 	if (i == RL_PHY_TIMEOUT) {
505 		device_printf(sc->rl_dev, "PHY write failed\n");
506 		return (0);
507 	}
508 
509 	/*
510 	 * Controller requires a 20us delay to process next MDIO request.
511 	 */
512 	DELAY(20);
513 
514 	return (0);
515 }
516 
517 static int
518 re_miibus_readreg(device_t dev, int phy, int reg)
519 {
520 	struct rl_softc		*sc;
521 	u_int16_t		rval = 0;
522 	u_int16_t		re8139_reg = 0;
523 
524 	sc = device_get_softc(dev);
525 
526 	if (sc->rl_type == RL_8169) {
527 		rval = re_gmii_readreg(dev, phy, reg);
528 		return (rval);
529 	}
530 
531 	switch (reg) {
532 	case MII_BMCR:
533 		re8139_reg = RL_BMCR;
534 		break;
535 	case MII_BMSR:
536 		re8139_reg = RL_BMSR;
537 		break;
538 	case MII_ANAR:
539 		re8139_reg = RL_ANAR;
540 		break;
541 	case MII_ANER:
542 		re8139_reg = RL_ANER;
543 		break;
544 	case MII_ANLPAR:
545 		re8139_reg = RL_LPAR;
546 		break;
547 	case MII_PHYIDR1:
548 	case MII_PHYIDR2:
549 		return (0);
550 	/*
551 	 * Allow the rlphy driver to read the media status
552 	 * register. If we have a link partner which does not
553 	 * support NWAY, this is the register which will tell
554 	 * us the results of parallel detection.
555 	 */
556 	case RL_MEDIASTAT:
557 		rval = CSR_READ_1(sc, RL_MEDIASTAT);
558 		return (rval);
559 	default:
560 		device_printf(sc->rl_dev, "bad phy register\n");
561 		return (0);
562 	}
563 	rval = CSR_READ_2(sc, re8139_reg);
564 	if (sc->rl_type == RL_8139CPLUS && re8139_reg == RL_BMCR) {
565 		/* 8139C+ has different bit layout. */
566 		rval &= ~(BMCR_LOOP | BMCR_ISO);
567 	}
568 	return (rval);
569 }
570 
571 static int
572 re_miibus_writereg(device_t dev, int phy, int reg, int data)
573 {
574 	struct rl_softc		*sc;
575 	u_int16_t		re8139_reg = 0;
576 	int			rval = 0;
577 
578 	sc = device_get_softc(dev);
579 
580 	if (sc->rl_type == RL_8169) {
581 		rval = re_gmii_writereg(dev, phy, reg, data);
582 		return (rval);
583 	}
584 
585 	switch (reg) {
586 	case MII_BMCR:
587 		re8139_reg = RL_BMCR;
588 		if (sc->rl_type == RL_8139CPLUS) {
589 			/* 8139C+ has different bit layout. */
590 			data &= ~(BMCR_LOOP | BMCR_ISO);
591 		}
592 		break;
593 	case MII_BMSR:
594 		re8139_reg = RL_BMSR;
595 		break;
596 	case MII_ANAR:
597 		re8139_reg = RL_ANAR;
598 		break;
599 	case MII_ANER:
600 		re8139_reg = RL_ANER;
601 		break;
602 	case MII_ANLPAR:
603 		re8139_reg = RL_LPAR;
604 		break;
605 	case MII_PHYIDR1:
606 	case MII_PHYIDR2:
607 		return (0);
608 		break;
609 	default:
610 		device_printf(sc->rl_dev, "bad phy register\n");
611 		return (0);
612 	}
613 	CSR_WRITE_2(sc, re8139_reg, data);
614 	return (0);
615 }
616 
617 static void
618 re_miibus_statchg(device_t dev)
619 {
620 	struct rl_softc		*sc;
621 	struct ifnet		*ifp;
622 	struct mii_data		*mii;
623 
624 	sc = device_get_softc(dev);
625 	mii = device_get_softc(sc->rl_miibus);
626 	ifp = sc->rl_ifp;
627 	if (mii == NULL || ifp == NULL ||
628 	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
629 		return;
630 
631 	sc->rl_flags &= ~RL_FLAG_LINK;
632 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
633 	    (IFM_ACTIVE | IFM_AVALID)) {
634 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
635 		case IFM_10_T:
636 		case IFM_100_TX:
637 			sc->rl_flags |= RL_FLAG_LINK;
638 			break;
639 		case IFM_1000_T:
640 			if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0)
641 				break;
642 			sc->rl_flags |= RL_FLAG_LINK;
643 			break;
644 		default:
645 			break;
646 		}
647 	}
648 	/*
649 	 * RealTek controllers do not provide any interface to the RX/TX
650 	 * MACs for resolved speed, duplex and flow-control parameters.
651 	 */
652 }
653 
654 static u_int
655 re_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
656 {
657 	uint32_t h, *hashes = arg;
658 
659 	h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26;
660 	if (h < 32)
661 		hashes[0] |= (1 << h);
662 	else
663 		hashes[1] |= (1 << (h - 32));
664 
665 	return (1);
666 }
667 
668 /*
669  * Set the RX configuration and 64-bit multicast hash filter.
670  */
671 static void
672 re_set_rxmode(struct rl_softc *sc)
673 {
674 	struct ifnet		*ifp;
675 	uint32_t		h, hashes[2] = { 0, 0 };
676 	uint32_t		rxfilt;
677 
678 	RL_LOCK_ASSERT(sc);
679 
680 	ifp = sc->rl_ifp;
681 
682 	rxfilt = RL_RXCFG_CONFIG | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD;
683 	if ((sc->rl_flags & RL_FLAG_EARLYOFF) != 0)
684 		rxfilt |= RL_RXCFG_EARLYOFF;
685 	else if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0)
686 		rxfilt |= RL_RXCFG_EARLYOFFV2;
687 
688 	if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
689 		if (ifp->if_flags & IFF_PROMISC)
690 			rxfilt |= RL_RXCFG_RX_ALLPHYS;
691 		/*
692 		 * Unlike other hardwares, we have to explicitly set
693 		 * RL_RXCFG_RX_MULTI to receive multicast frames in
694 		 * promiscuous mode.
695 		 */
696 		rxfilt |= RL_RXCFG_RX_MULTI;
697 		hashes[0] = hashes[1] = 0xffffffff;
698 		goto done;
699 	}
700 
701 	if_foreach_llmaddr(ifp, re_hash_maddr, hashes);
702 
703 	if (hashes[0] != 0 || hashes[1] != 0) {
704 		/*
705 		 * For some unfathomable reason, RealTek decided to
706 		 * reverse the order of the multicast hash registers
707 		 * in the PCI Express parts.  This means we have to
708 		 * write the hash pattern in reverse order for those
709 		 * devices.
710 		 */
711 		if ((sc->rl_flags & RL_FLAG_PCIE) != 0) {
712 			h = bswap32(hashes[0]);
713 			hashes[0] = bswap32(hashes[1]);
714 			hashes[1] = h;
715 		}
716 		rxfilt |= RL_RXCFG_RX_MULTI;
717 	}
718 
719 	if  (sc->rl_hwrev->rl_rev == RL_HWREV_8168F) {
720 		/* Disable multicast filtering due to silicon bug. */
721 		hashes[0] = 0xffffffff;
722 		hashes[1] = 0xffffffff;
723 	}
724 
725 done:
726 	CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
727 	CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
728 	CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
729 }
730 
731 static void
732 re_reset(struct rl_softc *sc)
733 {
734 	int			i;
735 
736 	RL_LOCK_ASSERT(sc);
737 
738 	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
739 
740 	for (i = 0; i < RL_TIMEOUT; i++) {
741 		DELAY(10);
742 		if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
743 			break;
744 	}
745 	if (i == RL_TIMEOUT)
746 		device_printf(sc->rl_dev, "reset never completed!\n");
747 
748 	if ((sc->rl_flags & RL_FLAG_MACRESET) != 0)
749 		CSR_WRITE_1(sc, 0x82, 1);
750 	if (sc->rl_hwrev->rl_rev == RL_HWREV_8169S)
751 		re_gmii_writereg(sc->rl_dev, 1, 0x0b, 0);
752 }
753 
754 #ifdef RE_DIAG
755 
756 /*
757  * The following routine is designed to test for a defect on some
758  * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64#
759  * lines connected to the bus, however for a 32-bit only card, they
760  * should be pulled high. The result of this defect is that the
761  * NIC will not work right if you plug it into a 64-bit slot: DMA
762  * operations will be done with 64-bit transfers, which will fail
763  * because the 64-bit data lines aren't connected.
764  *
765  * There's no way to work around this (short of talking a soldering
766  * iron to the board), however we can detect it. The method we use
767  * here is to put the NIC into digital loopback mode, set the receiver
768  * to promiscuous mode, and then try to send a frame. We then compare
769  * the frame data we sent to what was received. If the data matches,
770  * then the NIC is working correctly, otherwise we know the user has
771  * a defective NIC which has been mistakenly plugged into a 64-bit PCI
772  * slot. In the latter case, there's no way the NIC can work correctly,
773  * so we print out a message on the console and abort the device attach.
774  */
775 
776 static int
777 re_diag(struct rl_softc *sc)
778 {
779 	struct ifnet		*ifp = sc->rl_ifp;
780 	struct mbuf		*m0;
781 	struct ether_header	*eh;
782 	struct rl_desc		*cur_rx;
783 	u_int16_t		status;
784 	u_int32_t		rxstat;
785 	int			total_len, i, error = 0, phyaddr;
786 	u_int8_t		dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' };
787 	u_int8_t		src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' };
788 
789 	/* Allocate a single mbuf */
790 	MGETHDR(m0, M_NOWAIT, MT_DATA);
791 	if (m0 == NULL)
792 		return (ENOBUFS);
793 
794 	RL_LOCK(sc);
795 
796 	/*
797 	 * Initialize the NIC in test mode. This sets the chip up
798 	 * so that it can send and receive frames, but performs the
799 	 * following special functions:
800 	 * - Puts receiver in promiscuous mode
801 	 * - Enables digital loopback mode
802 	 * - Leaves interrupts turned off
803 	 */
804 
805 	ifp->if_flags |= IFF_PROMISC;
806 	sc->rl_testmode = 1;
807 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
808 	re_init_locked(sc);
809 	sc->rl_flags |= RL_FLAG_LINK;
810 	if (sc->rl_type == RL_8169)
811 		phyaddr = 1;
812 	else
813 		phyaddr = 0;
814 
815 	re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_RESET);
816 	for (i = 0; i < RL_TIMEOUT; i++) {
817 		status = re_miibus_readreg(sc->rl_dev, phyaddr, MII_BMCR);
818 		if (!(status & BMCR_RESET))
819 			break;
820 	}
821 
822 	re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_LOOP);
823 	CSR_WRITE_2(sc, RL_ISR, RL_INTRS);
824 
825 	DELAY(100000);
826 
827 	/* Put some data in the mbuf */
828 
829 	eh = mtod(m0, struct ether_header *);
830 	bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN);
831 	bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN);
832 	eh->ether_type = htons(ETHERTYPE_IP);
833 	m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN;
834 
835 	/*
836 	 * Queue the packet, start transmission.
837 	 * Note: IF_HANDOFF() ultimately calls re_start() for us.
838 	 */
839 
840 	CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
841 	RL_UNLOCK(sc);
842 	/* XXX: re_diag must not be called when in ALTQ mode */
843 	IF_HANDOFF(&ifp->if_snd, m0, ifp);
844 	RL_LOCK(sc);
845 	m0 = NULL;
846 
847 	/* Wait for it to propagate through the chip */
848 
849 	DELAY(100000);
850 	for (i = 0; i < RL_TIMEOUT; i++) {
851 		status = CSR_READ_2(sc, RL_ISR);
852 		CSR_WRITE_2(sc, RL_ISR, status);
853 		if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) ==
854 		    (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK))
855 			break;
856 		DELAY(10);
857 	}
858 
859 	if (i == RL_TIMEOUT) {
860 		device_printf(sc->rl_dev,
861 		    "diagnostic failed, failed to receive packet in"
862 		    " loopback mode\n");
863 		error = EIO;
864 		goto done;
865 	}
866 
867 	/*
868 	 * The packet should have been dumped into the first
869 	 * entry in the RX DMA ring. Grab it from there.
870 	 */
871 
872 	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
873 	    sc->rl_ldata.rl_rx_list_map,
874 	    BUS_DMASYNC_POSTREAD);
875 	bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
876 	    sc->rl_ldata.rl_rx_desc[0].rx_dmamap,
877 	    BUS_DMASYNC_POSTREAD);
878 	bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag,
879 	    sc->rl_ldata.rl_rx_desc[0].rx_dmamap);
880 
881 	m0 = sc->rl_ldata.rl_rx_desc[0].rx_m;
882 	sc->rl_ldata.rl_rx_desc[0].rx_m = NULL;
883 	eh = mtod(m0, struct ether_header *);
884 
885 	cur_rx = &sc->rl_ldata.rl_rx_list[0];
886 	total_len = RL_RXBYTES(cur_rx);
887 	rxstat = le32toh(cur_rx->rl_cmdstat);
888 
889 	if (total_len != ETHER_MIN_LEN) {
890 		device_printf(sc->rl_dev,
891 		    "diagnostic failed, received short packet\n");
892 		error = EIO;
893 		goto done;
894 	}
895 
896 	/* Test that the received packet data matches what we sent. */
897 
898 	if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) ||
899 	    bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) ||
900 	    ntohs(eh->ether_type) != ETHERTYPE_IP) {
901 		device_printf(sc->rl_dev, "WARNING, DMA FAILURE!\n");
902 		device_printf(sc->rl_dev, "expected TX data: %6D/%6D/0x%x\n",
903 		    dst, ":", src, ":", ETHERTYPE_IP);
904 		device_printf(sc->rl_dev, "received RX data: %6D/%6D/0x%x\n",
905 		    eh->ether_dhost, ":", eh->ether_shost, ":",
906 		    ntohs(eh->ether_type));
907 		device_printf(sc->rl_dev, "You may have a defective 32-bit "
908 		    "NIC plugged into a 64-bit PCI slot.\n");
909 		device_printf(sc->rl_dev, "Please re-install the NIC in a "
910 		    "32-bit slot for proper operation.\n");
911 		device_printf(sc->rl_dev, "Read the re(4) man page for more "
912 		    "details.\n");
913 		error = EIO;
914 	}
915 
916 done:
917 	/* Turn interface off, release resources */
918 
919 	sc->rl_testmode = 0;
920 	sc->rl_flags &= ~RL_FLAG_LINK;
921 	ifp->if_flags &= ~IFF_PROMISC;
922 	re_stop(sc);
923 	if (m0 != NULL)
924 		m_freem(m0);
925 
926 	RL_UNLOCK(sc);
927 
928 	return (error);
929 }
930 
931 #endif
932 
933 /*
934  * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device
935  * IDs against our list and return a device name if we find a match.
936  */
937 static int
938 re_probe(device_t dev)
939 {
940 	const struct rl_type	*t;
941 	uint16_t		devid, vendor;
942 	uint16_t		revid, sdevid;
943 	int			i;
944 
945 	vendor = pci_get_vendor(dev);
946 	devid = pci_get_device(dev);
947 	revid = pci_get_revid(dev);
948 	sdevid = pci_get_subdevice(dev);
949 
950 	if (vendor == LINKSYS_VENDORID && devid == LINKSYS_DEVICEID_EG1032) {
951 		if (sdevid != LINKSYS_SUBDEVICE_EG1032_REV3) {
952 			/*
953 			 * Only attach to rev. 3 of the Linksys EG1032 adapter.
954 			 * Rev. 2 is supported by sk(4).
955 			 */
956 			return (ENXIO);
957 		}
958 	}
959 
960 	if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) {
961 		if (revid != 0x20) {
962 			/* 8139, let rl(4) take care of this device. */
963 			return (ENXIO);
964 		}
965 	}
966 
967 	t = re_devs;
968 	for (i = 0; i < nitems(re_devs); i++, t++) {
969 		if (vendor == t->rl_vid && devid == t->rl_did) {
970 			device_set_desc(dev, t->rl_name);
971 			return (BUS_PROBE_DEFAULT);
972 		}
973 	}
974 
975 	return (ENXIO);
976 }
977 
978 /*
979  * Map a single buffer address.
980  */
981 
982 static void
983 re_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
984 {
985 	bus_addr_t		*addr;
986 
987 	if (error)
988 		return;
989 
990 	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
991 	addr = arg;
992 	*addr = segs->ds_addr;
993 }
994 
995 static int
996 re_allocmem(device_t dev, struct rl_softc *sc)
997 {
998 	bus_addr_t		lowaddr;
999 	bus_size_t		rx_list_size, tx_list_size;
1000 	int			error;
1001 	int			i;
1002 
1003 	rx_list_size = sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc);
1004 	tx_list_size = sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc);
1005 
1006 	/*
1007 	 * Allocate the parent bus DMA tag appropriate for PCI.
1008 	 * In order to use DAC, RL_CPLUSCMD_PCI_DAC bit of RL_CPLUS_CMD
1009 	 * register should be set. However some RealTek chips are known
1010 	 * to be buggy on DAC handling, therefore disable DAC by limiting
1011 	 * DMA address space to 32bit. PCIe variants of RealTek chips
1012 	 * may not have the limitation.
1013 	 */
1014 	lowaddr = BUS_SPACE_MAXADDR;
1015 	if ((sc->rl_flags & RL_FLAG_PCIE) == 0)
1016 		lowaddr = BUS_SPACE_MAXADDR_32BIT;
1017 	error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
1018 	    lowaddr, BUS_SPACE_MAXADDR, NULL, NULL,
1019 	    BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
1020 	    NULL, NULL, &sc->rl_parent_tag);
1021 	if (error) {
1022 		device_printf(dev, "could not allocate parent DMA tag\n");
1023 		return (error);
1024 	}
1025 
1026 	/*
1027 	 * Allocate map for TX mbufs.
1028 	 */
1029 	error = bus_dma_tag_create(sc->rl_parent_tag, 1, 0,
1030 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1031 	    NULL, MCLBYTES * RL_NTXSEGS, RL_NTXSEGS, 4096, 0,
1032 	    NULL, NULL, &sc->rl_ldata.rl_tx_mtag);
1033 	if (error) {
1034 		device_printf(dev, "could not allocate TX DMA tag\n");
1035 		return (error);
1036 	}
1037 
1038 	/*
1039 	 * Allocate map for RX mbufs.
1040 	 */
1041 
1042 	if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
1043 		error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t),
1044 		    0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1045 		    MJUM9BYTES, 1, MJUM9BYTES, 0, NULL, NULL,
1046 		    &sc->rl_ldata.rl_jrx_mtag);
1047 		if (error) {
1048 			device_printf(dev,
1049 			    "could not allocate jumbo RX DMA tag\n");
1050 			return (error);
1051 		}
1052 	}
1053 	error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 0,
1054 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1055 	    MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rl_ldata.rl_rx_mtag);
1056 	if (error) {
1057 		device_printf(dev, "could not allocate RX DMA tag\n");
1058 		return (error);
1059 	}
1060 
1061 	/*
1062 	 * Allocate map for TX descriptor list.
1063 	 */
1064 	error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
1065 	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1066 	    NULL, tx_list_size, 1, tx_list_size, 0,
1067 	    NULL, NULL, &sc->rl_ldata.rl_tx_list_tag);
1068 	if (error) {
1069 		device_printf(dev, "could not allocate TX DMA ring tag\n");
1070 		return (error);
1071 	}
1072 
1073 	/* Allocate DMA'able memory for the TX ring */
1074 
1075 	error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag,
1076 	    (void **)&sc->rl_ldata.rl_tx_list,
1077 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1078 	    &sc->rl_ldata.rl_tx_list_map);
1079 	if (error) {
1080 		device_printf(dev, "could not allocate TX DMA ring\n");
1081 		return (error);
1082 	}
1083 
1084 	/* Load the map for the TX ring. */
1085 
1086 	sc->rl_ldata.rl_tx_list_addr = 0;
1087 	error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag,
1088 	     sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list,
1089 	     tx_list_size, re_dma_map_addr,
1090 	     &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT);
1091 	if (error != 0 || sc->rl_ldata.rl_tx_list_addr == 0) {
1092 		device_printf(dev, "could not load TX DMA ring\n");
1093 		return (ENOMEM);
1094 	}
1095 
1096 	/* Create DMA maps for TX buffers */
1097 
1098 	for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
1099 		error = bus_dmamap_create(sc->rl_ldata.rl_tx_mtag, 0,
1100 		    &sc->rl_ldata.rl_tx_desc[i].tx_dmamap);
1101 		if (error) {
1102 			device_printf(dev, "could not create DMA map for TX\n");
1103 			return (error);
1104 		}
1105 	}
1106 
1107 	/*
1108 	 * Allocate map for RX descriptor list.
1109 	 */
1110 	error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
1111 	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1112 	    NULL, rx_list_size, 1, rx_list_size, 0,
1113 	    NULL, NULL, &sc->rl_ldata.rl_rx_list_tag);
1114 	if (error) {
1115 		device_printf(dev, "could not create RX DMA ring tag\n");
1116 		return (error);
1117 	}
1118 
1119 	/* Allocate DMA'able memory for the RX ring */
1120 
1121 	error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag,
1122 	    (void **)&sc->rl_ldata.rl_rx_list,
1123 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1124 	    &sc->rl_ldata.rl_rx_list_map);
1125 	if (error) {
1126 		device_printf(dev, "could not allocate RX DMA ring\n");
1127 		return (error);
1128 	}
1129 
1130 	/* Load the map for the RX ring. */
1131 
1132 	sc->rl_ldata.rl_rx_list_addr = 0;
1133 	error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag,
1134 	     sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list,
1135 	     rx_list_size, re_dma_map_addr,
1136 	     &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT);
1137 	if (error != 0 || sc->rl_ldata.rl_rx_list_addr == 0) {
1138 		device_printf(dev, "could not load RX DMA ring\n");
1139 		return (ENOMEM);
1140 	}
1141 
1142 	/* Create DMA maps for RX buffers */
1143 
1144 	if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
1145 		error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0,
1146 		    &sc->rl_ldata.rl_jrx_sparemap);
1147 		if (error) {
1148 			device_printf(dev,
1149 			    "could not create spare DMA map for jumbo RX\n");
1150 			return (error);
1151 		}
1152 		for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1153 			error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0,
1154 			    &sc->rl_ldata.rl_jrx_desc[i].rx_dmamap);
1155 			if (error) {
1156 				device_printf(dev,
1157 				    "could not create DMA map for jumbo RX\n");
1158 				return (error);
1159 			}
1160 		}
1161 	}
1162 	error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0,
1163 	    &sc->rl_ldata.rl_rx_sparemap);
1164 	if (error) {
1165 		device_printf(dev, "could not create spare DMA map for RX\n");
1166 		return (error);
1167 	}
1168 	for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1169 		error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0,
1170 		    &sc->rl_ldata.rl_rx_desc[i].rx_dmamap);
1171 		if (error) {
1172 			device_printf(dev, "could not create DMA map for RX\n");
1173 			return (error);
1174 		}
1175 	}
1176 
1177 	/* Create DMA map for statistics. */
1178 	error = bus_dma_tag_create(sc->rl_parent_tag, RL_DUMP_ALIGN, 0,
1179 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1180 	    sizeof(struct rl_stats), 1, sizeof(struct rl_stats), 0, NULL, NULL,
1181 	    &sc->rl_ldata.rl_stag);
1182 	if (error) {
1183 		device_printf(dev, "could not create statistics DMA tag\n");
1184 		return (error);
1185 	}
1186 	/* Allocate DMA'able memory for statistics. */
1187 	error = bus_dmamem_alloc(sc->rl_ldata.rl_stag,
1188 	    (void **)&sc->rl_ldata.rl_stats,
1189 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1190 	    &sc->rl_ldata.rl_smap);
1191 	if (error) {
1192 		device_printf(dev,
1193 		    "could not allocate statistics DMA memory\n");
1194 		return (error);
1195 	}
1196 	/* Load the map for statistics. */
1197 	sc->rl_ldata.rl_stats_addr = 0;
1198 	error = bus_dmamap_load(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap,
1199 	    sc->rl_ldata.rl_stats, sizeof(struct rl_stats), re_dma_map_addr,
1200 	     &sc->rl_ldata.rl_stats_addr, BUS_DMA_NOWAIT);
1201 	if (error != 0 || sc->rl_ldata.rl_stats_addr == 0) {
1202 		device_printf(dev, "could not load statistics DMA memory\n");
1203 		return (ENOMEM);
1204 	}
1205 
1206 	return (0);
1207 }
1208 
1209 /*
1210  * Attach the interface. Allocate softc structures, do ifmedia
1211  * setup and ethernet/BPF attach.
1212  */
1213 static int
1214 re_attach(device_t dev)
1215 {
1216 	u_char			eaddr[ETHER_ADDR_LEN];
1217 	u_int16_t		as[ETHER_ADDR_LEN / 2];
1218 	struct rl_softc		*sc;
1219 	struct ifnet		*ifp;
1220 	const struct rl_hwrev	*hw_rev;
1221 	int			capmask, error = 0, hwrev, i, msic, msixc,
1222 				phy, reg, rid;
1223 	u_int32_t		cap, ctl;
1224 	u_int16_t		devid, re_did = 0;
1225 	uint8_t			cfg;
1226 
1227 	sc = device_get_softc(dev);
1228 	sc->rl_dev = dev;
1229 
1230 	mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1231 	    MTX_DEF);
1232 	callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0);
1233 
1234 	/*
1235 	 * Map control/status registers.
1236 	 */
1237 	pci_enable_busmaster(dev);
1238 
1239 	devid = pci_get_device(dev);
1240 	/*
1241 	 * Prefer memory space register mapping over IO space.
1242 	 * Because RTL8169SC does not seem to work when memory mapping
1243 	 * is used always activate io mapping.
1244 	 */
1245 	if (devid == RT_DEVICEID_8169SC)
1246 		prefer_iomap = 1;
1247 	if (prefer_iomap == 0) {
1248 		sc->rl_res_id = PCIR_BAR(1);
1249 		sc->rl_res_type = SYS_RES_MEMORY;
1250 		/* RTL8168/8101E seems to use different BARs. */
1251 		if (devid == RT_DEVICEID_8168 || devid == RT_DEVICEID_8101E)
1252 			sc->rl_res_id = PCIR_BAR(2);
1253 	} else {
1254 		sc->rl_res_id = PCIR_BAR(0);
1255 		sc->rl_res_type = SYS_RES_IOPORT;
1256 	}
1257 	sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
1258 	    &sc->rl_res_id, RF_ACTIVE);
1259 	if (sc->rl_res == NULL && prefer_iomap == 0) {
1260 		sc->rl_res_id = PCIR_BAR(0);
1261 		sc->rl_res_type = SYS_RES_IOPORT;
1262 		sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
1263 		    &sc->rl_res_id, RF_ACTIVE);
1264 	}
1265 	if (sc->rl_res == NULL) {
1266 		device_printf(dev, "couldn't map ports/memory\n");
1267 		error = ENXIO;
1268 		goto fail;
1269 	}
1270 
1271 	sc->rl_btag = rman_get_bustag(sc->rl_res);
1272 	sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
1273 
1274 	msic = pci_msi_count(dev);
1275 	msixc = pci_msix_count(dev);
1276 	if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
1277 		sc->rl_flags |= RL_FLAG_PCIE;
1278 		sc->rl_expcap = reg;
1279 	}
1280 	if (bootverbose) {
1281 		device_printf(dev, "MSI count : %d\n", msic);
1282 		device_printf(dev, "MSI-X count : %d\n", msixc);
1283 	}
1284 	if (msix_disable > 0)
1285 		msixc = 0;
1286 	if (msi_disable > 0)
1287 		msic = 0;
1288 	/* Prefer MSI-X to MSI. */
1289 	if (msixc > 0) {
1290 		msixc = RL_MSI_MESSAGES;
1291 		rid = PCIR_BAR(4);
1292 		sc->rl_res_pba = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1293 		    &rid, RF_ACTIVE);
1294 		if (sc->rl_res_pba == NULL) {
1295 			device_printf(sc->rl_dev,
1296 			    "could not allocate MSI-X PBA resource\n");
1297 		}
1298 		if (sc->rl_res_pba != NULL &&
1299 		    pci_alloc_msix(dev, &msixc) == 0) {
1300 			if (msixc == RL_MSI_MESSAGES) {
1301 				device_printf(dev, "Using %d MSI-X message\n",
1302 				    msixc);
1303 				sc->rl_flags |= RL_FLAG_MSIX;
1304 			} else
1305 				pci_release_msi(dev);
1306 		}
1307 		if ((sc->rl_flags & RL_FLAG_MSIX) == 0) {
1308 			if (sc->rl_res_pba != NULL)
1309 				bus_release_resource(dev, SYS_RES_MEMORY, rid,
1310 				    sc->rl_res_pba);
1311 			sc->rl_res_pba = NULL;
1312 			msixc = 0;
1313 		}
1314 	}
1315 	/* Prefer MSI to INTx. */
1316 	if (msixc == 0 && msic > 0) {
1317 		msic = RL_MSI_MESSAGES;
1318 		if (pci_alloc_msi(dev, &msic) == 0) {
1319 			if (msic == RL_MSI_MESSAGES) {
1320 				device_printf(dev, "Using %d MSI message\n",
1321 				    msic);
1322 				sc->rl_flags |= RL_FLAG_MSI;
1323 				/* Explicitly set MSI enable bit. */
1324 				CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
1325 				cfg = CSR_READ_1(sc, RL_CFG2);
1326 				cfg |= RL_CFG2_MSI;
1327 				CSR_WRITE_1(sc, RL_CFG2, cfg);
1328 				CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1329 			} else
1330 				pci_release_msi(dev);
1331 		}
1332 		if ((sc->rl_flags & RL_FLAG_MSI) == 0)
1333 			msic = 0;
1334 	}
1335 
1336 	/* Allocate interrupt */
1337 	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) {
1338 		rid = 0;
1339 		sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1340 		    RF_SHAREABLE | RF_ACTIVE);
1341 		if (sc->rl_irq[0] == NULL) {
1342 			device_printf(dev, "couldn't allocate IRQ resources\n");
1343 			error = ENXIO;
1344 			goto fail;
1345 		}
1346 	} else {
1347 		for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) {
1348 			sc->rl_irq[i] = bus_alloc_resource_any(dev,
1349 			    SYS_RES_IRQ, &rid, RF_ACTIVE);
1350 			if (sc->rl_irq[i] == NULL) {
1351 				device_printf(dev,
1352 				    "couldn't allocate IRQ resources for "
1353 				    "message %d\n", rid);
1354 				error = ENXIO;
1355 				goto fail;
1356 			}
1357 		}
1358 	}
1359 
1360 	if ((sc->rl_flags & RL_FLAG_MSI) == 0) {
1361 		CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
1362 		cfg = CSR_READ_1(sc, RL_CFG2);
1363 		if ((cfg & RL_CFG2_MSI) != 0) {
1364 			device_printf(dev, "turning off MSI enable bit.\n");
1365 			cfg &= ~RL_CFG2_MSI;
1366 			CSR_WRITE_1(sc, RL_CFG2, cfg);
1367 		}
1368 		CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1369 	}
1370 
1371 	/* Disable ASPM L0S/L1 and CLKREQ. */
1372 	if (sc->rl_expcap != 0) {
1373 		cap = pci_read_config(dev, sc->rl_expcap +
1374 		    PCIER_LINK_CAP, 2);
1375 		if ((cap & PCIEM_LINK_CAP_ASPM) != 0) {
1376 			ctl = pci_read_config(dev, sc->rl_expcap +
1377 			    PCIER_LINK_CTL, 2);
1378 			if ((ctl & (PCIEM_LINK_CTL_ECPM |
1379 			    PCIEM_LINK_CTL_ASPMC))!= 0) {
1380 				ctl &= ~(PCIEM_LINK_CTL_ECPM |
1381 				    PCIEM_LINK_CTL_ASPMC);
1382 				pci_write_config(dev, sc->rl_expcap +
1383 				    PCIER_LINK_CTL, ctl, 2);
1384 				device_printf(dev, "ASPM disabled\n");
1385 			}
1386 		} else
1387 			device_printf(dev, "no ASPM capability\n");
1388 	}
1389 
1390 	hw_rev = re_hwrevs;
1391 	hwrev = CSR_READ_4(sc, RL_TXCFG);
1392 	switch (hwrev & 0x70000000) {
1393 	case 0x00000000:
1394 	case 0x10000000:
1395 		device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0xfc800000);
1396 		hwrev &= (RL_TXCFG_HWREV | 0x80000000);
1397 		break;
1398 	default:
1399 		device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0x7c800000);
1400 		sc->rl_macrev = hwrev & 0x00700000;
1401 		hwrev &= RL_TXCFG_HWREV;
1402 		break;
1403 	}
1404 	device_printf(dev, "MAC rev. 0x%08x\n", sc->rl_macrev);
1405 	while (hw_rev->rl_desc != NULL) {
1406 		if (hw_rev->rl_rev == hwrev) {
1407 			sc->rl_type = hw_rev->rl_type;
1408 			sc->rl_hwrev = hw_rev;
1409 			break;
1410 		}
1411 		hw_rev++;
1412 	}
1413 	if (hw_rev->rl_desc == NULL) {
1414 		device_printf(dev, "Unknown H/W revision: 0x%08x\n", hwrev);
1415 		error = ENXIO;
1416 		goto fail;
1417 	}
1418 
1419 	switch (hw_rev->rl_rev) {
1420 	case RL_HWREV_8139CPLUS:
1421 		sc->rl_flags |= RL_FLAG_FASTETHER | RL_FLAG_AUTOPAD;
1422 		break;
1423 	case RL_HWREV_8100E:
1424 	case RL_HWREV_8101E:
1425 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_FASTETHER;
1426 		break;
1427 	case RL_HWREV_8102E:
1428 	case RL_HWREV_8102EL:
1429 	case RL_HWREV_8102EL_SPIN1:
1430 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 |
1431 		    RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP |
1432 		    RL_FLAG_AUTOPAD;
1433 		break;
1434 	case RL_HWREV_8103E:
1435 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 |
1436 		    RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP |
1437 		    RL_FLAG_AUTOPAD | RL_FLAG_MACSLEEP;
1438 		break;
1439 	case RL_HWREV_8401E:
1440 	case RL_HWREV_8105E:
1441 	case RL_HWREV_8105E_SPIN1:
1442 	case RL_HWREV_8106E:
1443 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1444 		    RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1445 		    RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD;
1446 		break;
1447 	case RL_HWREV_8402:
1448 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1449 		    RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1450 		    RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD |
1451 		    RL_FLAG_CMDSTOP_WAIT_TXQ;
1452 		break;
1453 	case RL_HWREV_8168B_SPIN1:
1454 	case RL_HWREV_8168B_SPIN2:
1455 		sc->rl_flags |= RL_FLAG_WOLRXENB;
1456 		/* FALLTHROUGH */
1457 	case RL_HWREV_8168B_SPIN3:
1458 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT;
1459 		break;
1460 	case RL_HWREV_8168C_SPIN2:
1461 		sc->rl_flags |= RL_FLAG_MACSLEEP;
1462 		/* FALLTHROUGH */
1463 	case RL_HWREV_8168C:
1464 		if (sc->rl_macrev == 0x00200000)
1465 			sc->rl_flags |= RL_FLAG_MACSLEEP;
1466 		/* FALLTHROUGH */
1467 	case RL_HWREV_8168CP:
1468 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1469 		    RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1470 		    RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK;
1471 		break;
1472 	case RL_HWREV_8168D:
1473 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1474 		    RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1475 		    RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
1476 		    RL_FLAG_WOL_MANLINK;
1477 		break;
1478 	case RL_HWREV_8168DP:
1479 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1480 		    RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_AUTOPAD |
1481 		    RL_FLAG_JUMBOV2 | RL_FLAG_WAIT_TXPOLL | RL_FLAG_WOL_MANLINK;
1482 		break;
1483 	case RL_HWREV_8168E:
1484 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1485 		    RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1486 		    RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
1487 		    RL_FLAG_WOL_MANLINK;
1488 		break;
1489 	case RL_HWREV_8168E_VL:
1490 	case RL_HWREV_8168F:
1491 		sc->rl_flags |= RL_FLAG_EARLYOFF;
1492 		/* FALLTHROUGH */
1493 	case RL_HWREV_8411:
1494 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1495 		    RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1496 		    RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
1497 		    RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_WOL_MANLINK;
1498 		break;
1499 	case RL_HWREV_8168EP:
1500 	case RL_HWREV_8168G:
1501 	case RL_HWREV_8411B:
1502 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1503 		    RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1504 		    RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
1505 		    RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_WOL_MANLINK |
1506 		    RL_FLAG_8168G_PLUS;
1507 		break;
1508 	case RL_HWREV_8168GU:
1509 	case RL_HWREV_8168H:
1510 		if (pci_get_device(dev) == RT_DEVICEID_8101E) {
1511 			/* RTL8106E(US), RTL8107E */
1512 			sc->rl_flags |= RL_FLAG_FASTETHER;
1513 		} else
1514 			sc->rl_flags |= RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK;
1515 
1516 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1517 		    RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1518 		    RL_FLAG_AUTOPAD | RL_FLAG_CMDSTOP_WAIT_TXQ |
1519 		    RL_FLAG_8168G_PLUS;
1520 		break;
1521 	case RL_HWREV_8169_8110SB:
1522 	case RL_HWREV_8169_8110SBL:
1523 	case RL_HWREV_8169_8110SC:
1524 	case RL_HWREV_8169_8110SCE:
1525 		sc->rl_flags |= RL_FLAG_PHYWAKE;
1526 		/* FALLTHROUGH */
1527 	case RL_HWREV_8169:
1528 	case RL_HWREV_8169S:
1529 	case RL_HWREV_8110S:
1530 		sc->rl_flags |= RL_FLAG_MACRESET;
1531 		break;
1532 	default:
1533 		break;
1534 	}
1535 
1536 	if (sc->rl_hwrev->rl_rev == RL_HWREV_8139CPLUS) {
1537 		sc->rl_cfg0 = RL_8139_CFG0;
1538 		sc->rl_cfg1 = RL_8139_CFG1;
1539 		sc->rl_cfg2 = 0;
1540 		sc->rl_cfg3 = RL_8139_CFG3;
1541 		sc->rl_cfg4 = RL_8139_CFG4;
1542 		sc->rl_cfg5 = RL_8139_CFG5;
1543 	} else {
1544 		sc->rl_cfg0 = RL_CFG0;
1545 		sc->rl_cfg1 = RL_CFG1;
1546 		sc->rl_cfg2 = RL_CFG2;
1547 		sc->rl_cfg3 = RL_CFG3;
1548 		sc->rl_cfg4 = RL_CFG4;
1549 		sc->rl_cfg5 = RL_CFG5;
1550 	}
1551 
1552 	/* Reset the adapter. */
1553 	RL_LOCK(sc);
1554 	re_reset(sc);
1555 	RL_UNLOCK(sc);
1556 
1557 	/* Enable PME. */
1558 	CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
1559 	cfg = CSR_READ_1(sc, sc->rl_cfg1);
1560 	cfg |= RL_CFG1_PME;
1561 	CSR_WRITE_1(sc, sc->rl_cfg1, cfg);
1562 	cfg = CSR_READ_1(sc, sc->rl_cfg5);
1563 	cfg &= RL_CFG5_PME_STS;
1564 	CSR_WRITE_1(sc, sc->rl_cfg5, cfg);
1565 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1566 
1567 	if ((sc->rl_flags & RL_FLAG_PAR) != 0) {
1568 		/*
1569 		 * XXX Should have a better way to extract station
1570 		 * address from EEPROM.
1571 		 */
1572 		for (i = 0; i < ETHER_ADDR_LEN; i++)
1573 			eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i);
1574 	} else {
1575 		sc->rl_eewidth = RL_9356_ADDR_LEN;
1576 		re_read_eeprom(sc, (caddr_t)&re_did, 0, 1);
1577 		if (re_did != 0x8129)
1578 			sc->rl_eewidth = RL_9346_ADDR_LEN;
1579 
1580 		/*
1581 		 * Get station address from the EEPROM.
1582 		 */
1583 		re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3);
1584 		for (i = 0; i < ETHER_ADDR_LEN / 2; i++)
1585 			as[i] = le16toh(as[i]);
1586 		bcopy(as, eaddr, ETHER_ADDR_LEN);
1587 	}
1588 
1589 	if (sc->rl_type == RL_8169) {
1590 		/* Set RX length mask and number of descriptors. */
1591 		sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN;
1592 		sc->rl_txstart = RL_GTXSTART;
1593 		sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT;
1594 		sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT;
1595 	} else {
1596 		/* Set RX length mask and number of descriptors. */
1597 		sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN;
1598 		sc->rl_txstart = RL_TXSTART;
1599 		sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT;
1600 		sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT;
1601 	}
1602 
1603 	error = re_allocmem(dev, sc);
1604 	if (error)
1605 		goto fail;
1606 	re_add_sysctls(sc);
1607 
1608 	ifp = sc->rl_ifp = if_alloc(IFT_ETHER);
1609 	if (ifp == NULL) {
1610 		device_printf(dev, "can not if_alloc()\n");
1611 		error = ENOSPC;
1612 		goto fail;
1613 	}
1614 
1615 	/* Take controller out of deep sleep mode. */
1616 	if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
1617 		if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
1618 			CSR_WRITE_1(sc, RL_GPIO,
1619 			    CSR_READ_1(sc, RL_GPIO) | 0x01);
1620 		else
1621 			CSR_WRITE_1(sc, RL_GPIO,
1622 			    CSR_READ_1(sc, RL_GPIO) & ~0x01);
1623 	}
1624 
1625 	/* Take PHY out of power down mode. */
1626 	if ((sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0) {
1627 		CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80);
1628 		if (hw_rev->rl_rev == RL_HWREV_8401E)
1629 			CSR_WRITE_1(sc, 0xD1, CSR_READ_1(sc, 0xD1) & ~0x08);
1630 	}
1631 	if ((sc->rl_flags & RL_FLAG_PHYWAKE) != 0) {
1632 		re_gmii_writereg(dev, 1, 0x1f, 0);
1633 		re_gmii_writereg(dev, 1, 0x0e, 0);
1634 	}
1635 
1636 	ifp->if_softc = sc;
1637 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1638 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1639 	ifp->if_ioctl = re_ioctl;
1640 	ifp->if_start = re_start;
1641 	/*
1642 	 * RTL8168/8111C generates wrong IP checksummed frame if the
1643 	 * packet has IP options so disable TX checksum offloading.
1644 	 */
1645 	if (sc->rl_hwrev->rl_rev == RL_HWREV_8168C ||
1646 	    sc->rl_hwrev->rl_rev == RL_HWREV_8168C_SPIN2 ||
1647 	    sc->rl_hwrev->rl_rev == RL_HWREV_8168CP) {
1648 		ifp->if_hwassist = 0;
1649 		ifp->if_capabilities = IFCAP_RXCSUM | IFCAP_TSO4;
1650 	} else {
1651 		ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP;
1652 		ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4;
1653 	}
1654 	ifp->if_hwassist |= CSUM_TSO;
1655 	ifp->if_capenable = ifp->if_capabilities;
1656 	ifp->if_init = re_init;
1657 	IFQ_SET_MAXLEN(&ifp->if_snd, RL_IFQ_MAXLEN);
1658 	ifp->if_snd.ifq_drv_maxlen = RL_IFQ_MAXLEN;
1659 	IFQ_SET_READY(&ifp->if_snd);
1660 
1661 	NET_TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc);
1662 
1663 #define	RE_PHYAD_INTERNAL	 0
1664 
1665 	/* Do MII setup. */
1666 	phy = RE_PHYAD_INTERNAL;
1667 	if (sc->rl_type == RL_8169)
1668 		phy = 1;
1669 	capmask = BMSR_DEFCAPMASK;
1670 	if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0)
1671 		 capmask &= ~BMSR_EXTSTAT;
1672 	error = mii_attach(dev, &sc->rl_miibus, ifp, re_ifmedia_upd,
1673 	    re_ifmedia_sts, capmask, phy, MII_OFFSET_ANY, MIIF_DOPAUSE);
1674 	if (error != 0) {
1675 		device_printf(dev, "attaching PHYs failed\n");
1676 		goto fail;
1677 	}
1678 
1679 	/*
1680 	 * Call MI attach routine.
1681 	 */
1682 	ether_ifattach(ifp, eaddr);
1683 
1684 	/* VLAN capability setup */
1685 	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
1686 	if (ifp->if_capabilities & IFCAP_HWCSUM)
1687 		ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
1688 	/* Enable WOL if PM is supported. */
1689 	if (pci_find_cap(sc->rl_dev, PCIY_PMG, &reg) == 0)
1690 		ifp->if_capabilities |= IFCAP_WOL;
1691 	ifp->if_capenable = ifp->if_capabilities;
1692 	ifp->if_capenable &= ~(IFCAP_WOL_UCAST | IFCAP_WOL_MCAST);
1693 	/*
1694 	 * Don't enable TSO by default.  It is known to generate
1695 	 * corrupted TCP segments(bad TCP options) under certain
1696 	 * circumstances.
1697 	 */
1698 	ifp->if_hwassist &= ~CSUM_TSO;
1699 	ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_VLAN_HWTSO);
1700 #ifdef DEVICE_POLLING
1701 	ifp->if_capabilities |= IFCAP_POLLING;
1702 #endif
1703 	/*
1704 	 * Tell the upper layer(s) we support long frames.
1705 	 * Must appear after the call to ether_ifattach() because
1706 	 * ether_ifattach() sets ifi_hdrlen to the default value.
1707 	 */
1708 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1709 
1710 #ifdef DEV_NETMAP
1711 	re_netmap_attach(sc);
1712 #endif /* DEV_NETMAP */
1713 
1714 #ifdef RE_DIAG
1715 	/*
1716 	 * Perform hardware diagnostic on the original RTL8169.
1717 	 * Some 32-bit cards were incorrectly wired and would
1718 	 * malfunction if plugged into a 64-bit slot.
1719 	 */
1720 	if (hwrev == RL_HWREV_8169) {
1721 		error = re_diag(sc);
1722 		if (error) {
1723 			device_printf(dev,
1724 		    	"attach aborted due to hardware diag failure\n");
1725 			ether_ifdetach(ifp);
1726 			goto fail;
1727 		}
1728 	}
1729 #endif
1730 
1731 #ifdef RE_TX_MODERATION
1732 	intr_filter = 1;
1733 #endif
1734 	/* Hook interrupt last to avoid having to lock softc */
1735 	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 &&
1736 	    intr_filter == 0) {
1737 		error = bus_setup_intr(dev, sc->rl_irq[0],
1738 		    INTR_TYPE_NET | INTR_MPSAFE, NULL, re_intr_msi, sc,
1739 		    &sc->rl_intrhand[0]);
1740 	} else {
1741 		error = bus_setup_intr(dev, sc->rl_irq[0],
1742 		    INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc,
1743 		    &sc->rl_intrhand[0]);
1744 	}
1745 	if (error) {
1746 		device_printf(dev, "couldn't set up irq\n");
1747 		ether_ifdetach(ifp);
1748 		goto fail;
1749 	}
1750 
1751 	DEBUGNET_SET(ifp, re);
1752 
1753 fail:
1754 	if (error)
1755 		re_detach(dev);
1756 
1757 	return (error);
1758 }
1759 
1760 /*
1761  * Shutdown hardware and free up resources. This can be called any
1762  * time after the mutex has been initialized. It is called in both
1763  * the error case in attach and the normal detach case so it needs
1764  * to be careful about only freeing resources that have actually been
1765  * allocated.
1766  */
1767 static int
1768 re_detach(device_t dev)
1769 {
1770 	struct rl_softc		*sc;
1771 	struct ifnet		*ifp;
1772 	int			i, rid;
1773 
1774 	sc = device_get_softc(dev);
1775 	ifp = sc->rl_ifp;
1776 	KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized"));
1777 
1778 	/* These should only be active if attach succeeded */
1779 	if (device_is_attached(dev)) {
1780 #ifdef DEVICE_POLLING
1781 		if (ifp->if_capenable & IFCAP_POLLING)
1782 			ether_poll_deregister(ifp);
1783 #endif
1784 		RL_LOCK(sc);
1785 #if 0
1786 		sc->suspended = 1;
1787 #endif
1788 		re_stop(sc);
1789 		RL_UNLOCK(sc);
1790 		callout_drain(&sc->rl_stat_callout);
1791 		taskqueue_drain(taskqueue_fast, &sc->rl_inttask);
1792 		/*
1793 		 * Force off the IFF_UP flag here, in case someone
1794 		 * still had a BPF descriptor attached to this
1795 		 * interface. If they do, ether_ifdetach() will cause
1796 		 * the BPF code to try and clear the promisc mode
1797 		 * flag, which will bubble down to re_ioctl(),
1798 		 * which will try to call re_init() again. This will
1799 		 * turn the NIC back on and restart the MII ticker,
1800 		 * which will panic the system when the kernel tries
1801 		 * to invoke the re_tick() function that isn't there
1802 		 * anymore.
1803 		 */
1804 		ifp->if_flags &= ~IFF_UP;
1805 		ether_ifdetach(ifp);
1806 	}
1807 	if (sc->rl_miibus)
1808 		device_delete_child(dev, sc->rl_miibus);
1809 	bus_generic_detach(dev);
1810 
1811 	/*
1812 	 * The rest is resource deallocation, so we should already be
1813 	 * stopped here.
1814 	 */
1815 
1816 	if (sc->rl_intrhand[0] != NULL) {
1817 		bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]);
1818 		sc->rl_intrhand[0] = NULL;
1819 	}
1820 	if (ifp != NULL) {
1821 #ifdef DEV_NETMAP
1822 		netmap_detach(ifp);
1823 #endif /* DEV_NETMAP */
1824 		if_free(ifp);
1825 	}
1826 	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0)
1827 		rid = 0;
1828 	else
1829 		rid = 1;
1830 	if (sc->rl_irq[0] != NULL) {
1831 		bus_release_resource(dev, SYS_RES_IRQ, rid, sc->rl_irq[0]);
1832 		sc->rl_irq[0] = NULL;
1833 	}
1834 	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0)
1835 		pci_release_msi(dev);
1836 	if (sc->rl_res_pba) {
1837 		rid = PCIR_BAR(4);
1838 		bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->rl_res_pba);
1839 	}
1840 	if (sc->rl_res)
1841 		bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id,
1842 		    sc->rl_res);
1843 
1844 	/* Unload and free the RX DMA ring memory and map */
1845 
1846 	if (sc->rl_ldata.rl_rx_list_tag) {
1847 		if (sc->rl_ldata.rl_rx_list_addr)
1848 			bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag,
1849 			    sc->rl_ldata.rl_rx_list_map);
1850 		if (sc->rl_ldata.rl_rx_list)
1851 			bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag,
1852 			    sc->rl_ldata.rl_rx_list,
1853 			    sc->rl_ldata.rl_rx_list_map);
1854 		bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag);
1855 	}
1856 
1857 	/* Unload and free the TX DMA ring memory and map */
1858 
1859 	if (sc->rl_ldata.rl_tx_list_tag) {
1860 		if (sc->rl_ldata.rl_tx_list_addr)
1861 			bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag,
1862 			    sc->rl_ldata.rl_tx_list_map);
1863 		if (sc->rl_ldata.rl_tx_list)
1864 			bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag,
1865 			    sc->rl_ldata.rl_tx_list,
1866 			    sc->rl_ldata.rl_tx_list_map);
1867 		bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag);
1868 	}
1869 
1870 	/* Destroy all the RX and TX buffer maps */
1871 
1872 	if (sc->rl_ldata.rl_tx_mtag) {
1873 		for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
1874 			if (sc->rl_ldata.rl_tx_desc[i].tx_dmamap)
1875 				bus_dmamap_destroy(sc->rl_ldata.rl_tx_mtag,
1876 				    sc->rl_ldata.rl_tx_desc[i].tx_dmamap);
1877 		}
1878 		bus_dma_tag_destroy(sc->rl_ldata.rl_tx_mtag);
1879 	}
1880 	if (sc->rl_ldata.rl_rx_mtag) {
1881 		for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1882 			if (sc->rl_ldata.rl_rx_desc[i].rx_dmamap)
1883 				bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag,
1884 				    sc->rl_ldata.rl_rx_desc[i].rx_dmamap);
1885 		}
1886 		if (sc->rl_ldata.rl_rx_sparemap)
1887 			bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag,
1888 			    sc->rl_ldata.rl_rx_sparemap);
1889 		bus_dma_tag_destroy(sc->rl_ldata.rl_rx_mtag);
1890 	}
1891 	if (sc->rl_ldata.rl_jrx_mtag) {
1892 		for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1893 			if (sc->rl_ldata.rl_jrx_desc[i].rx_dmamap)
1894 				bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag,
1895 				    sc->rl_ldata.rl_jrx_desc[i].rx_dmamap);
1896 		}
1897 		if (sc->rl_ldata.rl_jrx_sparemap)
1898 			bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag,
1899 			    sc->rl_ldata.rl_jrx_sparemap);
1900 		bus_dma_tag_destroy(sc->rl_ldata.rl_jrx_mtag);
1901 	}
1902 	/* Unload and free the stats buffer and map */
1903 
1904 	if (sc->rl_ldata.rl_stag) {
1905 		if (sc->rl_ldata.rl_stats_addr)
1906 			bus_dmamap_unload(sc->rl_ldata.rl_stag,
1907 			    sc->rl_ldata.rl_smap);
1908 		if (sc->rl_ldata.rl_stats)
1909 			bus_dmamem_free(sc->rl_ldata.rl_stag,
1910 			    sc->rl_ldata.rl_stats, sc->rl_ldata.rl_smap);
1911 		bus_dma_tag_destroy(sc->rl_ldata.rl_stag);
1912 	}
1913 
1914 	if (sc->rl_parent_tag)
1915 		bus_dma_tag_destroy(sc->rl_parent_tag);
1916 
1917 	mtx_destroy(&sc->rl_mtx);
1918 
1919 	return (0);
1920 }
1921 
1922 static __inline void
1923 re_discard_rxbuf(struct rl_softc *sc, int idx)
1924 {
1925 	struct rl_desc		*desc;
1926 	struct rl_rxdesc	*rxd;
1927 	uint32_t		cmdstat;
1928 
1929 	if (sc->rl_ifp->if_mtu > RL_MTU &&
1930 	    (sc->rl_flags & RL_FLAG_JUMBOV2) != 0)
1931 		rxd = &sc->rl_ldata.rl_jrx_desc[idx];
1932 	else
1933 		rxd = &sc->rl_ldata.rl_rx_desc[idx];
1934 	desc = &sc->rl_ldata.rl_rx_list[idx];
1935 	desc->rl_vlanctl = 0;
1936 	cmdstat = rxd->rx_size;
1937 	if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
1938 		cmdstat |= RL_RDESC_CMD_EOR;
1939 	desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
1940 }
1941 
1942 static int
1943 re_newbuf(struct rl_softc *sc, int idx)
1944 {
1945 	struct mbuf		*m;
1946 	struct rl_rxdesc	*rxd;
1947 	bus_dma_segment_t	segs[1];
1948 	bus_dmamap_t		map;
1949 	struct rl_desc		*desc;
1950 	uint32_t		cmdstat;
1951 	int			error, nsegs;
1952 
1953 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1954 	if (m == NULL)
1955 		return (ENOBUFS);
1956 
1957 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1958 #ifdef RE_FIXUP_RX
1959 	/*
1960 	 * This is part of an evil trick to deal with non-x86 platforms.
1961 	 * The RealTek chip requires RX buffers to be aligned on 64-bit
1962 	 * boundaries, but that will hose non-x86 machines. To get around
1963 	 * this, we leave some empty space at the start of each buffer
1964 	 * and for non-x86 hosts, we copy the buffer back six bytes
1965 	 * to achieve word alignment. This is slightly more efficient
1966 	 * than allocating a new buffer, copying the contents, and
1967 	 * discarding the old buffer.
1968 	 */
1969 	m_adj(m, RE_ETHER_ALIGN);
1970 #endif
1971 	error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_rx_mtag,
1972 	    sc->rl_ldata.rl_rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
1973 	if (error != 0) {
1974 		m_freem(m);
1975 		return (ENOBUFS);
1976 	}
1977 	KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs));
1978 
1979 	rxd = &sc->rl_ldata.rl_rx_desc[idx];
1980 	if (rxd->rx_m != NULL) {
1981 		bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap,
1982 		    BUS_DMASYNC_POSTREAD);
1983 		bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap);
1984 	}
1985 
1986 	rxd->rx_m = m;
1987 	map = rxd->rx_dmamap;
1988 	rxd->rx_dmamap = sc->rl_ldata.rl_rx_sparemap;
1989 	rxd->rx_size = segs[0].ds_len;
1990 	sc->rl_ldata.rl_rx_sparemap = map;
1991 	bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap,
1992 	    BUS_DMASYNC_PREREAD);
1993 
1994 	desc = &sc->rl_ldata.rl_rx_list[idx];
1995 	desc->rl_vlanctl = 0;
1996 	desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr));
1997 	desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr));
1998 	cmdstat = segs[0].ds_len;
1999 	if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
2000 		cmdstat |= RL_RDESC_CMD_EOR;
2001 	desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
2002 
2003 	return (0);
2004 }
2005 
2006 static int
2007 re_jumbo_newbuf(struct rl_softc *sc, int idx)
2008 {
2009 	struct mbuf		*m;
2010 	struct rl_rxdesc	*rxd;
2011 	bus_dma_segment_t	segs[1];
2012 	bus_dmamap_t		map;
2013 	struct rl_desc		*desc;
2014 	uint32_t		cmdstat;
2015 	int			error, nsegs;
2016 
2017 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
2018 	if (m == NULL)
2019 		return (ENOBUFS);
2020 	m->m_len = m->m_pkthdr.len = MJUM9BYTES;
2021 #ifdef RE_FIXUP_RX
2022 	m_adj(m, RE_ETHER_ALIGN);
2023 #endif
2024 	error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_jrx_mtag,
2025 	    sc->rl_ldata.rl_jrx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
2026 	if (error != 0) {
2027 		m_freem(m);
2028 		return (ENOBUFS);
2029 	}
2030 	KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs));
2031 
2032 	rxd = &sc->rl_ldata.rl_jrx_desc[idx];
2033 	if (rxd->rx_m != NULL) {
2034 		bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap,
2035 		    BUS_DMASYNC_POSTREAD);
2036 		bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap);
2037 	}
2038 
2039 	rxd->rx_m = m;
2040 	map = rxd->rx_dmamap;
2041 	rxd->rx_dmamap = sc->rl_ldata.rl_jrx_sparemap;
2042 	rxd->rx_size = segs[0].ds_len;
2043 	sc->rl_ldata.rl_jrx_sparemap = map;
2044 	bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap,
2045 	    BUS_DMASYNC_PREREAD);
2046 
2047 	desc = &sc->rl_ldata.rl_rx_list[idx];
2048 	desc->rl_vlanctl = 0;
2049 	desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr));
2050 	desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr));
2051 	cmdstat = segs[0].ds_len;
2052 	if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
2053 		cmdstat |= RL_RDESC_CMD_EOR;
2054 	desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
2055 
2056 	return (0);
2057 }
2058 
2059 #ifdef RE_FIXUP_RX
2060 static __inline void
2061 re_fixup_rx(struct mbuf *m)
2062 {
2063 	int                     i;
2064 	uint16_t                *src, *dst;
2065 
2066 	src = mtod(m, uint16_t *);
2067 	dst = src - (RE_ETHER_ALIGN - ETHER_ALIGN) / sizeof *src;
2068 
2069 	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
2070 		*dst++ = *src++;
2071 
2072 	m->m_data -= RE_ETHER_ALIGN - ETHER_ALIGN;
2073 }
2074 #endif
2075 
2076 static int
2077 re_tx_list_init(struct rl_softc *sc)
2078 {
2079 	struct rl_desc		*desc;
2080 	int			i;
2081 
2082 	RL_LOCK_ASSERT(sc);
2083 
2084 	bzero(sc->rl_ldata.rl_tx_list,
2085 	    sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc));
2086 	for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++)
2087 		sc->rl_ldata.rl_tx_desc[i].tx_m = NULL;
2088 #ifdef DEV_NETMAP
2089 	re_netmap_tx_init(sc);
2090 #endif /* DEV_NETMAP */
2091 	/* Set EOR. */
2092 	desc = &sc->rl_ldata.rl_tx_list[sc->rl_ldata.rl_tx_desc_cnt - 1];
2093 	desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOR);
2094 
2095 	bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
2096 	    sc->rl_ldata.rl_tx_list_map,
2097 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2098 
2099 	sc->rl_ldata.rl_tx_prodidx = 0;
2100 	sc->rl_ldata.rl_tx_considx = 0;
2101 	sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt;
2102 
2103 	return (0);
2104 }
2105 
2106 static int
2107 re_rx_list_init(struct rl_softc *sc)
2108 {
2109 	int			error, i;
2110 
2111 	bzero(sc->rl_ldata.rl_rx_list,
2112 	    sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc));
2113 	for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
2114 		sc->rl_ldata.rl_rx_desc[i].rx_m = NULL;
2115 		if ((error = re_newbuf(sc, i)) != 0)
2116 			return (error);
2117 	}
2118 #ifdef DEV_NETMAP
2119 	re_netmap_rx_init(sc);
2120 #endif /* DEV_NETMAP */
2121 
2122 	/* Flush the RX descriptors */
2123 
2124 	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2125 	    sc->rl_ldata.rl_rx_list_map,
2126 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2127 
2128 	sc->rl_ldata.rl_rx_prodidx = 0;
2129 	sc->rl_head = sc->rl_tail = NULL;
2130 	sc->rl_int_rx_act = 0;
2131 
2132 	return (0);
2133 }
2134 
2135 static int
2136 re_jrx_list_init(struct rl_softc *sc)
2137 {
2138 	int			error, i;
2139 
2140 	bzero(sc->rl_ldata.rl_rx_list,
2141 	    sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc));
2142 	for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
2143 		sc->rl_ldata.rl_jrx_desc[i].rx_m = NULL;
2144 		if ((error = re_jumbo_newbuf(sc, i)) != 0)
2145 			return (error);
2146 	}
2147 
2148 	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2149 	    sc->rl_ldata.rl_rx_list_map,
2150 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2151 
2152 	sc->rl_ldata.rl_rx_prodidx = 0;
2153 	sc->rl_head = sc->rl_tail = NULL;
2154 	sc->rl_int_rx_act = 0;
2155 
2156 	return (0);
2157 }
2158 
2159 /*
2160  * RX handler for C+ and 8169. For the gigE chips, we support
2161  * the reception of jumbo frames that have been fragmented
2162  * across multiple 2K mbuf cluster buffers.
2163  */
2164 static int
2165 re_rxeof(struct rl_softc *sc, int *rx_npktsp)
2166 {
2167 	struct mbuf		*m;
2168 	struct ifnet		*ifp;
2169 	int			i, rxerr, total_len;
2170 	struct rl_desc		*cur_rx;
2171 	u_int32_t		rxstat, rxvlan;
2172 	int			jumbo, maxpkt = 16, rx_npkts = 0;
2173 
2174 	RL_LOCK_ASSERT(sc);
2175 
2176 	ifp = sc->rl_ifp;
2177 #ifdef DEV_NETMAP
2178 	if (netmap_rx_irq(ifp, 0, &rx_npkts))
2179 		return 0;
2180 #endif /* DEV_NETMAP */
2181 	if (ifp->if_mtu > RL_MTU && (sc->rl_flags & RL_FLAG_JUMBOV2) != 0)
2182 		jumbo = 1;
2183 	else
2184 		jumbo = 0;
2185 
2186 	/* Invalidate the descriptor memory */
2187 
2188 	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2189 	    sc->rl_ldata.rl_rx_list_map,
2190 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2191 
2192 	for (i = sc->rl_ldata.rl_rx_prodidx; maxpkt > 0;
2193 	    i = RL_RX_DESC_NXT(sc, i)) {
2194 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2195 			break;
2196 		cur_rx = &sc->rl_ldata.rl_rx_list[i];
2197 		rxstat = le32toh(cur_rx->rl_cmdstat);
2198 		if ((rxstat & RL_RDESC_STAT_OWN) != 0)
2199 			break;
2200 		total_len = rxstat & sc->rl_rxlenmask;
2201 		rxvlan = le32toh(cur_rx->rl_vlanctl);
2202 		if (jumbo != 0)
2203 			m = sc->rl_ldata.rl_jrx_desc[i].rx_m;
2204 		else
2205 			m = sc->rl_ldata.rl_rx_desc[i].rx_m;
2206 
2207 		if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
2208 		    (rxstat & (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) !=
2209 		    (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) {
2210 			/*
2211 			 * RTL8168C or later controllers do not
2212 			 * support multi-fragment packet.
2213 			 */
2214 			re_discard_rxbuf(sc, i);
2215 			continue;
2216 		} else if ((rxstat & RL_RDESC_STAT_EOF) == 0) {
2217 			if (re_newbuf(sc, i) != 0) {
2218 				/*
2219 				 * If this is part of a multi-fragment packet,
2220 				 * discard all the pieces.
2221 				 */
2222 				if (sc->rl_head != NULL) {
2223 					m_freem(sc->rl_head);
2224 					sc->rl_head = sc->rl_tail = NULL;
2225 				}
2226 				re_discard_rxbuf(sc, i);
2227 				continue;
2228 			}
2229 			m->m_len = RE_RX_DESC_BUFLEN;
2230 			if (sc->rl_head == NULL)
2231 				sc->rl_head = sc->rl_tail = m;
2232 			else {
2233 				m->m_flags &= ~M_PKTHDR;
2234 				sc->rl_tail->m_next = m;
2235 				sc->rl_tail = m;
2236 			}
2237 			continue;
2238 		}
2239 
2240 		/*
2241 		 * NOTE: for the 8139C+, the frame length field
2242 		 * is always 12 bits in size, but for the gigE chips,
2243 		 * it is 13 bits (since the max RX frame length is 16K).
2244 		 * Unfortunately, all 32 bits in the status word
2245 		 * were already used, so to make room for the extra
2246 		 * length bit, RealTek took out the 'frame alignment
2247 		 * error' bit and shifted the other status bits
2248 		 * over one slot. The OWN, EOR, FS and LS bits are
2249 		 * still in the same places. We have already extracted
2250 		 * the frame length and checked the OWN bit, so rather
2251 		 * than using an alternate bit mapping, we shift the
2252 		 * status bits one space to the right so we can evaluate
2253 		 * them using the 8169 status as though it was in the
2254 		 * same format as that of the 8139C+.
2255 		 */
2256 		if (sc->rl_type == RL_8169)
2257 			rxstat >>= 1;
2258 
2259 		/*
2260 		 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be
2261 		 * set, but if CRC is clear, it will still be a valid frame.
2262 		 */
2263 		if ((rxstat & RL_RDESC_STAT_RXERRSUM) != 0) {
2264 			rxerr = 1;
2265 			if ((sc->rl_flags & RL_FLAG_JUMBOV2) == 0 &&
2266 			    total_len > 8191 &&
2267 			    (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT)
2268 				rxerr = 0;
2269 			if (rxerr != 0) {
2270 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2271 				/*
2272 				 * If this is part of a multi-fragment packet,
2273 				 * discard all the pieces.
2274 				 */
2275 				if (sc->rl_head != NULL) {
2276 					m_freem(sc->rl_head);
2277 					sc->rl_head = sc->rl_tail = NULL;
2278 				}
2279 				re_discard_rxbuf(sc, i);
2280 				continue;
2281 			}
2282 		}
2283 
2284 		/*
2285 		 * If allocating a replacement mbuf fails,
2286 		 * reload the current one.
2287 		 */
2288 		if (jumbo != 0)
2289 			rxerr = re_jumbo_newbuf(sc, i);
2290 		else
2291 			rxerr = re_newbuf(sc, i);
2292 		if (rxerr != 0) {
2293 			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2294 			if (sc->rl_head != NULL) {
2295 				m_freem(sc->rl_head);
2296 				sc->rl_head = sc->rl_tail = NULL;
2297 			}
2298 			re_discard_rxbuf(sc, i);
2299 			continue;
2300 		}
2301 
2302 		if (sc->rl_head != NULL) {
2303 			if (jumbo != 0)
2304 				m->m_len = total_len;
2305 			else {
2306 				m->m_len = total_len % RE_RX_DESC_BUFLEN;
2307 				if (m->m_len == 0)
2308 					m->m_len = RE_RX_DESC_BUFLEN;
2309 			}
2310 			/*
2311 			 * Special case: if there's 4 bytes or less
2312 			 * in this buffer, the mbuf can be discarded:
2313 			 * the last 4 bytes is the CRC, which we don't
2314 			 * care about anyway.
2315 			 */
2316 			if (m->m_len <= ETHER_CRC_LEN) {
2317 				sc->rl_tail->m_len -=
2318 				    (ETHER_CRC_LEN - m->m_len);
2319 				m_freem(m);
2320 			} else {
2321 				m->m_len -= ETHER_CRC_LEN;
2322 				m->m_flags &= ~M_PKTHDR;
2323 				sc->rl_tail->m_next = m;
2324 			}
2325 			m = sc->rl_head;
2326 			sc->rl_head = sc->rl_tail = NULL;
2327 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
2328 		} else
2329 			m->m_pkthdr.len = m->m_len =
2330 			    (total_len - ETHER_CRC_LEN);
2331 
2332 #ifdef RE_FIXUP_RX
2333 		re_fixup_rx(m);
2334 #endif
2335 		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2336 		m->m_pkthdr.rcvif = ifp;
2337 
2338 		/* Do RX checksumming if enabled */
2339 
2340 		if (ifp->if_capenable & IFCAP_RXCSUM) {
2341 			if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) {
2342 				/* Check IP header checksum */
2343 				if (rxstat & RL_RDESC_STAT_PROTOID)
2344 					m->m_pkthdr.csum_flags |=
2345 					    CSUM_IP_CHECKED;
2346 				if (!(rxstat & RL_RDESC_STAT_IPSUMBAD))
2347 					m->m_pkthdr.csum_flags |=
2348 					    CSUM_IP_VALID;
2349 
2350 				/* Check TCP/UDP checksum */
2351 				if ((RL_TCPPKT(rxstat) &&
2352 				    !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
2353 				    (RL_UDPPKT(rxstat) &&
2354 				     !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
2355 					m->m_pkthdr.csum_flags |=
2356 						CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
2357 					m->m_pkthdr.csum_data = 0xffff;
2358 				}
2359 			} else {
2360 				/*
2361 				 * RTL8168C/RTL816CP/RTL8111C/RTL8111CP
2362 				 */
2363 				if ((rxstat & RL_RDESC_STAT_PROTOID) &&
2364 				    (rxvlan & RL_RDESC_IPV4))
2365 					m->m_pkthdr.csum_flags |=
2366 					    CSUM_IP_CHECKED;
2367 				if (!(rxstat & RL_RDESC_STAT_IPSUMBAD) &&
2368 				    (rxvlan & RL_RDESC_IPV4))
2369 					m->m_pkthdr.csum_flags |=
2370 					    CSUM_IP_VALID;
2371 				if (((rxstat & RL_RDESC_STAT_TCP) &&
2372 				    !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
2373 				    ((rxstat & RL_RDESC_STAT_UDP) &&
2374 				    !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
2375 					m->m_pkthdr.csum_flags |=
2376 						CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
2377 					m->m_pkthdr.csum_data = 0xffff;
2378 				}
2379 			}
2380 		}
2381 		maxpkt--;
2382 		if (rxvlan & RL_RDESC_VLANCTL_TAG) {
2383 			m->m_pkthdr.ether_vtag =
2384 			    bswap16((rxvlan & RL_RDESC_VLANCTL_DATA));
2385 			m->m_flags |= M_VLANTAG;
2386 		}
2387 		RL_UNLOCK(sc);
2388 		(*ifp->if_input)(ifp, m);
2389 		RL_LOCK(sc);
2390 		rx_npkts++;
2391 	}
2392 
2393 	/* Flush the RX DMA ring */
2394 
2395 	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2396 	    sc->rl_ldata.rl_rx_list_map,
2397 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2398 
2399 	sc->rl_ldata.rl_rx_prodidx = i;
2400 
2401 	if (rx_npktsp != NULL)
2402 		*rx_npktsp = rx_npkts;
2403 	if (maxpkt)
2404 		return (EAGAIN);
2405 
2406 	return (0);
2407 }
2408 
2409 static void
2410 re_txeof(struct rl_softc *sc)
2411 {
2412 	struct ifnet		*ifp;
2413 	struct rl_txdesc	*txd;
2414 	u_int32_t		txstat;
2415 	int			cons;
2416 
2417 	cons = sc->rl_ldata.rl_tx_considx;
2418 	if (cons == sc->rl_ldata.rl_tx_prodidx)
2419 		return;
2420 
2421 	ifp = sc->rl_ifp;
2422 #ifdef DEV_NETMAP
2423 	if (netmap_tx_irq(ifp, 0))
2424 		return;
2425 #endif /* DEV_NETMAP */
2426 	/* Invalidate the TX descriptor list */
2427 	bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
2428 	    sc->rl_ldata.rl_tx_list_map,
2429 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2430 
2431 	for (; cons != sc->rl_ldata.rl_tx_prodidx;
2432 	    cons = RL_TX_DESC_NXT(sc, cons)) {
2433 		txstat = le32toh(sc->rl_ldata.rl_tx_list[cons].rl_cmdstat);
2434 		if (txstat & RL_TDESC_STAT_OWN)
2435 			break;
2436 		/*
2437 		 * We only stash mbufs in the last descriptor
2438 		 * in a fragment chain, which also happens to
2439 		 * be the only place where the TX status bits
2440 		 * are valid.
2441 		 */
2442 		if (txstat & RL_TDESC_CMD_EOF) {
2443 			txd = &sc->rl_ldata.rl_tx_desc[cons];
2444 			bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
2445 			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2446 			bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag,
2447 			    txd->tx_dmamap);
2448 			KASSERT(txd->tx_m != NULL,
2449 			    ("%s: freeing NULL mbufs!", __func__));
2450 			m_freem(txd->tx_m);
2451 			txd->tx_m = NULL;
2452 			if (txstat & (RL_TDESC_STAT_EXCESSCOL|
2453 			    RL_TDESC_STAT_COLCNT))
2454 				if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
2455 			if (txstat & RL_TDESC_STAT_TXERRSUM)
2456 				if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2457 			else
2458 				if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2459 		}
2460 		sc->rl_ldata.rl_tx_free++;
2461 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2462 	}
2463 	sc->rl_ldata.rl_tx_considx = cons;
2464 
2465 	/* No changes made to the TX ring, so no flush needed */
2466 
2467 	if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) {
2468 #ifdef RE_TX_MODERATION
2469 		/*
2470 		 * If not all descriptors have been reaped yet, reload
2471 		 * the timer so that we will eventually get another
2472 		 * interrupt that will cause us to re-enter this routine.
2473 		 * This is done in case the transmitter has gone idle.
2474 		 */
2475 		CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2476 #endif
2477 	} else
2478 		sc->rl_watchdog_timer = 0;
2479 }
2480 
2481 static void
2482 re_tick(void *xsc)
2483 {
2484 	struct rl_softc		*sc;
2485 	struct mii_data		*mii;
2486 
2487 	sc = xsc;
2488 
2489 	RL_LOCK_ASSERT(sc);
2490 
2491 	mii = device_get_softc(sc->rl_miibus);
2492 	mii_tick(mii);
2493 	if ((sc->rl_flags & RL_FLAG_LINK) == 0)
2494 		re_miibus_statchg(sc->rl_dev);
2495 	/*
2496 	 * Reclaim transmitted frames here. Technically it is not
2497 	 * necessary to do here but it ensures periodic reclamation
2498 	 * regardless of Tx completion interrupt which seems to be
2499 	 * lost on PCIe based controllers under certain situations.
2500 	 */
2501 	re_txeof(sc);
2502 	re_watchdog(sc);
2503 	callout_reset(&sc->rl_stat_callout, hz, re_tick, sc);
2504 }
2505 
2506 #ifdef DEVICE_POLLING
2507 static int
2508 re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2509 {
2510 	struct rl_softc *sc = ifp->if_softc;
2511 	int rx_npkts = 0;
2512 
2513 	RL_LOCK(sc);
2514 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2515 		rx_npkts = re_poll_locked(ifp, cmd, count);
2516 	RL_UNLOCK(sc);
2517 	return (rx_npkts);
2518 }
2519 
2520 static int
2521 re_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
2522 {
2523 	struct rl_softc *sc = ifp->if_softc;
2524 	int rx_npkts;
2525 
2526 	RL_LOCK_ASSERT(sc);
2527 
2528 	sc->rxcycles = count;
2529 	re_rxeof(sc, &rx_npkts);
2530 	re_txeof(sc);
2531 
2532 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2533 		re_start_locked(ifp);
2534 
2535 	if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
2536 		u_int16_t       status;
2537 
2538 		status = CSR_READ_2(sc, RL_ISR);
2539 		if (status == 0xffff)
2540 			return (rx_npkts);
2541 		if (status)
2542 			CSR_WRITE_2(sc, RL_ISR, status);
2543 		if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
2544 		    (sc->rl_flags & RL_FLAG_PCIE))
2545 			CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2546 
2547 		/*
2548 		 * XXX check behaviour on receiver stalls.
2549 		 */
2550 
2551 		if (status & RL_ISR_SYSTEM_ERR) {
2552 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2553 			re_init_locked(sc);
2554 		}
2555 	}
2556 	return (rx_npkts);
2557 }
2558 #endif /* DEVICE_POLLING */
2559 
2560 static int
2561 re_intr(void *arg)
2562 {
2563 	struct rl_softc		*sc;
2564 	uint16_t		status;
2565 
2566 	sc = arg;
2567 
2568 	status = CSR_READ_2(sc, RL_ISR);
2569 	if (status == 0xFFFF || (status & RL_INTRS_CPLUS) == 0)
2570                 return (FILTER_STRAY);
2571 	CSR_WRITE_2(sc, RL_IMR, 0);
2572 
2573 	taskqueue_enqueue(taskqueue_fast, &sc->rl_inttask);
2574 
2575 	return (FILTER_HANDLED);
2576 }
2577 
2578 static void
2579 re_int_task(void *arg, int npending)
2580 {
2581 	struct rl_softc		*sc;
2582 	struct ifnet		*ifp;
2583 	u_int16_t		status;
2584 	int			rval = 0;
2585 
2586 	sc = arg;
2587 	ifp = sc->rl_ifp;
2588 
2589 	RL_LOCK(sc);
2590 
2591 	status = CSR_READ_2(sc, RL_ISR);
2592         CSR_WRITE_2(sc, RL_ISR, status);
2593 
2594 	if (sc->suspended ||
2595 	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2596 		RL_UNLOCK(sc);
2597 		return;
2598 	}
2599 
2600 #ifdef DEVICE_POLLING
2601 	if  (ifp->if_capenable & IFCAP_POLLING) {
2602 		RL_UNLOCK(sc);
2603 		return;
2604 	}
2605 #endif
2606 
2607 	if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW))
2608 		rval = re_rxeof(sc, NULL);
2609 
2610 	/*
2611 	 * Some chips will ignore a second TX request issued
2612 	 * while an existing transmission is in progress. If
2613 	 * the transmitter goes idle but there are still
2614 	 * packets waiting to be sent, we need to restart the
2615 	 * channel here to flush them out. This only seems to
2616 	 * be required with the PCIe devices.
2617 	 */
2618 	if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
2619 	    (sc->rl_flags & RL_FLAG_PCIE))
2620 		CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2621 	if (status & (
2622 #ifdef RE_TX_MODERATION
2623 	    RL_ISR_TIMEOUT_EXPIRED|
2624 #else
2625 	    RL_ISR_TX_OK|
2626 #endif
2627 	    RL_ISR_TX_ERR|RL_ISR_TX_DESC_UNAVAIL))
2628 		re_txeof(sc);
2629 
2630 	if (status & RL_ISR_SYSTEM_ERR) {
2631 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2632 		re_init_locked(sc);
2633 	}
2634 
2635 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2636 		re_start_locked(ifp);
2637 
2638 	RL_UNLOCK(sc);
2639 
2640         if ((CSR_READ_2(sc, RL_ISR) & RL_INTRS_CPLUS) || rval) {
2641 		taskqueue_enqueue(taskqueue_fast, &sc->rl_inttask);
2642 		return;
2643 	}
2644 
2645 	CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
2646 }
2647 
2648 static void
2649 re_intr_msi(void *xsc)
2650 {
2651 	struct rl_softc		*sc;
2652 	struct ifnet		*ifp;
2653 	uint16_t		intrs, status;
2654 
2655 	sc = xsc;
2656 	RL_LOCK(sc);
2657 
2658 	ifp = sc->rl_ifp;
2659 #ifdef DEVICE_POLLING
2660 	if (ifp->if_capenable & IFCAP_POLLING) {
2661 		RL_UNLOCK(sc);
2662 		return;
2663 	}
2664 #endif
2665 	/* Disable interrupts. */
2666 	CSR_WRITE_2(sc, RL_IMR, 0);
2667 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2668 		RL_UNLOCK(sc);
2669 		return;
2670 	}
2671 
2672 	intrs = RL_INTRS_CPLUS;
2673 	status = CSR_READ_2(sc, RL_ISR);
2674         CSR_WRITE_2(sc, RL_ISR, status);
2675 	if (sc->rl_int_rx_act > 0) {
2676 		intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW |
2677 		    RL_ISR_RX_OVERRUN);
2678 		status &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW |
2679 		    RL_ISR_RX_OVERRUN);
2680 	}
2681 
2682 	if (status & (RL_ISR_TIMEOUT_EXPIRED | RL_ISR_RX_OK | RL_ISR_RX_ERR |
2683 	    RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) {
2684 		re_rxeof(sc, NULL);
2685 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2686 			if (sc->rl_int_rx_mod != 0 &&
2687 			    (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR |
2688 			    RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) != 0) {
2689 				/* Rearm one-shot timer. */
2690 				CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2691 				intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR |
2692 				    RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN);
2693 				sc->rl_int_rx_act = 1;
2694 			} else {
2695 				intrs |= RL_ISR_RX_OK | RL_ISR_RX_ERR |
2696 				    RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN;
2697 				sc->rl_int_rx_act = 0;
2698 			}
2699 		}
2700 	}
2701 
2702 	/*
2703 	 * Some chips will ignore a second TX request issued
2704 	 * while an existing transmission is in progress. If
2705 	 * the transmitter goes idle but there are still
2706 	 * packets waiting to be sent, we need to restart the
2707 	 * channel here to flush them out. This only seems to
2708 	 * be required with the PCIe devices.
2709 	 */
2710 	if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
2711 	    (sc->rl_flags & RL_FLAG_PCIE))
2712 		CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2713 	if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR | RL_ISR_TX_DESC_UNAVAIL))
2714 		re_txeof(sc);
2715 
2716 	if (status & RL_ISR_SYSTEM_ERR) {
2717 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2718 		re_init_locked(sc);
2719 	}
2720 
2721 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2722 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2723 			re_start_locked(ifp);
2724 		CSR_WRITE_2(sc, RL_IMR, intrs);
2725 	}
2726 	RL_UNLOCK(sc);
2727 }
2728 
2729 static int
2730 re_encap(struct rl_softc *sc, struct mbuf **m_head)
2731 {
2732 	struct rl_txdesc	*txd, *txd_last;
2733 	bus_dma_segment_t	segs[RL_NTXSEGS];
2734 	bus_dmamap_t		map;
2735 	struct mbuf		*m_new;
2736 	struct rl_desc		*desc;
2737 	int			nsegs, prod;
2738 	int			i, error, ei, si;
2739 	int			padlen;
2740 	uint32_t		cmdstat, csum_flags, vlanctl;
2741 
2742 	RL_LOCK_ASSERT(sc);
2743 	M_ASSERTPKTHDR((*m_head));
2744 
2745 	/*
2746 	 * With some of the RealTek chips, using the checksum offload
2747 	 * support in conjunction with the autopadding feature results
2748 	 * in the transmission of corrupt frames. For example, if we
2749 	 * need to send a really small IP fragment that's less than 60
2750 	 * bytes in size, and IP header checksumming is enabled, the
2751 	 * resulting ethernet frame that appears on the wire will
2752 	 * have garbled payload. To work around this, if TX IP checksum
2753 	 * offload is enabled, we always manually pad short frames out
2754 	 * to the minimum ethernet frame size.
2755 	 */
2756 	if ((sc->rl_flags & RL_FLAG_AUTOPAD) == 0 &&
2757 	    (*m_head)->m_pkthdr.len < RL_IP4CSUMTX_PADLEN &&
2758 	    ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) != 0) {
2759 		padlen = RL_MIN_FRAMELEN - (*m_head)->m_pkthdr.len;
2760 		if (M_WRITABLE(*m_head) == 0) {
2761 			/* Get a writable copy. */
2762 			m_new = m_dup(*m_head, M_NOWAIT);
2763 			m_freem(*m_head);
2764 			if (m_new == NULL) {
2765 				*m_head = NULL;
2766 				return (ENOBUFS);
2767 			}
2768 			*m_head = m_new;
2769 		}
2770 		if ((*m_head)->m_next != NULL ||
2771 		    M_TRAILINGSPACE(*m_head) < padlen) {
2772 			m_new = m_defrag(*m_head, M_NOWAIT);
2773 			if (m_new == NULL) {
2774 				m_freem(*m_head);
2775 				*m_head = NULL;
2776 				return (ENOBUFS);
2777 			}
2778 		} else
2779 			m_new = *m_head;
2780 
2781 		/*
2782 		 * Manually pad short frames, and zero the pad space
2783 		 * to avoid leaking data.
2784 		 */
2785 		bzero(mtod(m_new, char *) + m_new->m_pkthdr.len, padlen);
2786 		m_new->m_pkthdr.len += padlen;
2787 		m_new->m_len = m_new->m_pkthdr.len;
2788 		*m_head = m_new;
2789 	}
2790 
2791 	prod = sc->rl_ldata.rl_tx_prodidx;
2792 	txd = &sc->rl_ldata.rl_tx_desc[prod];
2793 	error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap,
2794 	    *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2795 	if (error == EFBIG) {
2796 		m_new = m_collapse(*m_head, M_NOWAIT, RL_NTXSEGS);
2797 		if (m_new == NULL) {
2798 			m_freem(*m_head);
2799 			*m_head = NULL;
2800 			return (ENOBUFS);
2801 		}
2802 		*m_head = m_new;
2803 		error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag,
2804 		    txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2805 		if (error != 0) {
2806 			m_freem(*m_head);
2807 			*m_head = NULL;
2808 			return (error);
2809 		}
2810 	} else if (error != 0)
2811 		return (error);
2812 	if (nsegs == 0) {
2813 		m_freem(*m_head);
2814 		*m_head = NULL;
2815 		return (EIO);
2816 	}
2817 
2818 	/* Check for number of available descriptors. */
2819 	if (sc->rl_ldata.rl_tx_free - nsegs <= 1) {
2820 		bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap);
2821 		return (ENOBUFS);
2822 	}
2823 
2824 	bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap,
2825 	    BUS_DMASYNC_PREWRITE);
2826 
2827 	/*
2828 	 * Set up checksum offload. Note: checksum offload bits must
2829 	 * appear in all descriptors of a multi-descriptor transmit
2830 	 * attempt. This is according to testing done with an 8169
2831 	 * chip. This is a requirement.
2832 	 */
2833 	vlanctl = 0;
2834 	csum_flags = 0;
2835 	if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2836 		if ((sc->rl_flags & RL_FLAG_DESCV2) != 0) {
2837 			csum_flags |= RL_TDESC_CMD_LGSEND;
2838 			vlanctl |= ((uint32_t)(*m_head)->m_pkthdr.tso_segsz <<
2839 			    RL_TDESC_CMD_MSSVALV2_SHIFT);
2840 		} else {
2841 			csum_flags |= RL_TDESC_CMD_LGSEND |
2842 			    ((uint32_t)(*m_head)->m_pkthdr.tso_segsz <<
2843 			    RL_TDESC_CMD_MSSVAL_SHIFT);
2844 		}
2845 	} else {
2846 		/*
2847 		 * Unconditionally enable IP checksum if TCP or UDP
2848 		 * checksum is required. Otherwise, TCP/UDP checksum
2849 		 * doesn't make effects.
2850 		 */
2851 		if (((*m_head)->m_pkthdr.csum_flags & RE_CSUM_FEATURES) != 0) {
2852 			if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) {
2853 				csum_flags |= RL_TDESC_CMD_IPCSUM;
2854 				if (((*m_head)->m_pkthdr.csum_flags &
2855 				    CSUM_TCP) != 0)
2856 					csum_flags |= RL_TDESC_CMD_TCPCSUM;
2857 				if (((*m_head)->m_pkthdr.csum_flags &
2858 				    CSUM_UDP) != 0)
2859 					csum_flags |= RL_TDESC_CMD_UDPCSUM;
2860 			} else {
2861 				vlanctl |= RL_TDESC_CMD_IPCSUMV2;
2862 				if (((*m_head)->m_pkthdr.csum_flags &
2863 				    CSUM_TCP) != 0)
2864 					vlanctl |= RL_TDESC_CMD_TCPCSUMV2;
2865 				if (((*m_head)->m_pkthdr.csum_flags &
2866 				    CSUM_UDP) != 0)
2867 					vlanctl |= RL_TDESC_CMD_UDPCSUMV2;
2868 			}
2869 		}
2870 	}
2871 
2872 	/*
2873 	 * Set up hardware VLAN tagging. Note: vlan tag info must
2874 	 * appear in all descriptors of a multi-descriptor
2875 	 * transmission attempt.
2876 	 */
2877 	if ((*m_head)->m_flags & M_VLANTAG)
2878 		vlanctl |= bswap16((*m_head)->m_pkthdr.ether_vtag) |
2879 		    RL_TDESC_VLANCTL_TAG;
2880 
2881 	si = prod;
2882 	for (i = 0; i < nsegs; i++, prod = RL_TX_DESC_NXT(sc, prod)) {
2883 		desc = &sc->rl_ldata.rl_tx_list[prod];
2884 		desc->rl_vlanctl = htole32(vlanctl);
2885 		desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr));
2886 		desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr));
2887 		cmdstat = segs[i].ds_len;
2888 		if (i != 0)
2889 			cmdstat |= RL_TDESC_CMD_OWN;
2890 		if (prod == sc->rl_ldata.rl_tx_desc_cnt - 1)
2891 			cmdstat |= RL_TDESC_CMD_EOR;
2892 		desc->rl_cmdstat = htole32(cmdstat | csum_flags);
2893 		sc->rl_ldata.rl_tx_free--;
2894 	}
2895 	/* Update producer index. */
2896 	sc->rl_ldata.rl_tx_prodidx = prod;
2897 
2898 	/* Set EOF on the last descriptor. */
2899 	ei = RL_TX_DESC_PRV(sc, prod);
2900 	desc = &sc->rl_ldata.rl_tx_list[ei];
2901 	desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF);
2902 
2903 	desc = &sc->rl_ldata.rl_tx_list[si];
2904 	/* Set SOF and transfer ownership of packet to the chip. */
2905 	desc->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF);
2906 
2907 	/*
2908 	 * Insure that the map for this transmission
2909 	 * is placed at the array index of the last descriptor
2910 	 * in this chain.  (Swap last and first dmamaps.)
2911 	 */
2912 	txd_last = &sc->rl_ldata.rl_tx_desc[ei];
2913 	map = txd->tx_dmamap;
2914 	txd->tx_dmamap = txd_last->tx_dmamap;
2915 	txd_last->tx_dmamap = map;
2916 	txd_last->tx_m = *m_head;
2917 
2918 	return (0);
2919 }
2920 
2921 static void
2922 re_start(struct ifnet *ifp)
2923 {
2924 	struct rl_softc		*sc;
2925 
2926 	sc = ifp->if_softc;
2927 	RL_LOCK(sc);
2928 	re_start_locked(ifp);
2929 	RL_UNLOCK(sc);
2930 }
2931 
2932 /*
2933  * Main transmit routine for C+ and gigE NICs.
2934  */
2935 static void
2936 re_start_locked(struct ifnet *ifp)
2937 {
2938 	struct rl_softc		*sc;
2939 	struct mbuf		*m_head;
2940 	int			queued;
2941 
2942 	sc = ifp->if_softc;
2943 
2944 #ifdef DEV_NETMAP
2945 	/* XXX is this necessary ? */
2946 	if (ifp->if_capenable & IFCAP_NETMAP) {
2947 		struct netmap_kring *kring = NA(ifp)->tx_rings[0];
2948 		if (sc->rl_ldata.rl_tx_prodidx != kring->nr_hwcur) {
2949 			/* kick the tx unit */
2950 			CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2951 #ifdef RE_TX_MODERATION
2952 			CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2953 #endif
2954 			sc->rl_watchdog_timer = 5;
2955 		}
2956 		return;
2957 	}
2958 #endif /* DEV_NETMAP */
2959 
2960 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2961 	    IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0)
2962 		return;
2963 
2964 	for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2965 	    sc->rl_ldata.rl_tx_free > 1;) {
2966 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2967 		if (m_head == NULL)
2968 			break;
2969 
2970 		if (re_encap(sc, &m_head) != 0) {
2971 			if (m_head == NULL)
2972 				break;
2973 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2974 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2975 			break;
2976 		}
2977 
2978 		/*
2979 		 * If there's a BPF listener, bounce a copy of this frame
2980 		 * to him.
2981 		 */
2982 		ETHER_BPF_MTAP(ifp, m_head);
2983 
2984 		queued++;
2985 	}
2986 
2987 	if (queued == 0) {
2988 #ifdef RE_TX_MODERATION
2989 		if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt)
2990 			CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2991 #endif
2992 		return;
2993 	}
2994 
2995 	re_start_tx(sc);
2996 }
2997 
2998 static void
2999 re_start_tx(struct rl_softc *sc)
3000 {
3001 
3002 	/* Flush the TX descriptors */
3003 	bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
3004 	    sc->rl_ldata.rl_tx_list_map,
3005 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
3006 
3007 	CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
3008 
3009 #ifdef RE_TX_MODERATION
3010 	/*
3011 	 * Use the countdown timer for interrupt moderation.
3012 	 * 'TX done' interrupts are disabled. Instead, we reset the
3013 	 * countdown timer, which will begin counting until it hits
3014 	 * the value in the TIMERINT register, and then trigger an
3015 	 * interrupt. Each time we write to the TIMERCNT register,
3016 	 * the timer count is reset to 0.
3017 	 */
3018 	CSR_WRITE_4(sc, RL_TIMERCNT, 1);
3019 #endif
3020 
3021 	/*
3022 	 * Set a timeout in case the chip goes out to lunch.
3023 	 */
3024 	sc->rl_watchdog_timer = 5;
3025 }
3026 
3027 static void
3028 re_set_jumbo(struct rl_softc *sc, int jumbo)
3029 {
3030 
3031 	if (sc->rl_hwrev->rl_rev == RL_HWREV_8168E_VL) {
3032 		pci_set_max_read_req(sc->rl_dev, 4096);
3033 		return;
3034 	}
3035 
3036 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
3037 	if (jumbo != 0) {
3038 		CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) |
3039 		    RL_CFG3_JUMBO_EN0);
3040 		switch (sc->rl_hwrev->rl_rev) {
3041 		case RL_HWREV_8168DP:
3042 			break;
3043 		case RL_HWREV_8168E:
3044 			CSR_WRITE_1(sc, sc->rl_cfg4,
3045 			    CSR_READ_1(sc, sc->rl_cfg4) | 0x01);
3046 			break;
3047 		default:
3048 			CSR_WRITE_1(sc, sc->rl_cfg4,
3049 			    CSR_READ_1(sc, sc->rl_cfg4) | RL_CFG4_JUMBO_EN1);
3050 		}
3051 	} else {
3052 		CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) &
3053 		    ~RL_CFG3_JUMBO_EN0);
3054 		switch (sc->rl_hwrev->rl_rev) {
3055 		case RL_HWREV_8168DP:
3056 			break;
3057 		case RL_HWREV_8168E:
3058 			CSR_WRITE_1(sc, sc->rl_cfg4,
3059 			    CSR_READ_1(sc, sc->rl_cfg4) & ~0x01);
3060 			break;
3061 		default:
3062 			CSR_WRITE_1(sc, sc->rl_cfg4,
3063 			    CSR_READ_1(sc, sc->rl_cfg4) & ~RL_CFG4_JUMBO_EN1);
3064 		}
3065 	}
3066 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3067 
3068 	switch (sc->rl_hwrev->rl_rev) {
3069 	case RL_HWREV_8168DP:
3070 		pci_set_max_read_req(sc->rl_dev, 4096);
3071 		break;
3072 	default:
3073 		if (jumbo != 0)
3074 			pci_set_max_read_req(sc->rl_dev, 512);
3075 		else
3076 			pci_set_max_read_req(sc->rl_dev, 4096);
3077 	}
3078 }
3079 
3080 static void
3081 re_init(void *xsc)
3082 {
3083 	struct rl_softc		*sc = xsc;
3084 
3085 	RL_LOCK(sc);
3086 	re_init_locked(sc);
3087 	RL_UNLOCK(sc);
3088 }
3089 
3090 static void
3091 re_init_locked(struct rl_softc *sc)
3092 {
3093 	struct ifnet		*ifp = sc->rl_ifp;
3094 	struct mii_data		*mii;
3095 	uint32_t		reg;
3096 	uint16_t		cfg;
3097 	union {
3098 		uint32_t align_dummy;
3099 		u_char eaddr[ETHER_ADDR_LEN];
3100         } eaddr;
3101 
3102 	RL_LOCK_ASSERT(sc);
3103 
3104 	mii = device_get_softc(sc->rl_miibus);
3105 
3106 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3107 		return;
3108 
3109 	/*
3110 	 * Cancel pending I/O and free all RX/TX buffers.
3111 	 */
3112 	re_stop(sc);
3113 
3114 	/* Put controller into known state. */
3115 	re_reset(sc);
3116 
3117 	/*
3118 	 * For C+ mode, initialize the RX descriptors and mbufs.
3119 	 */
3120 	if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
3121 		if (ifp->if_mtu > RL_MTU) {
3122 			if (re_jrx_list_init(sc) != 0) {
3123 				device_printf(sc->rl_dev,
3124 				    "no memory for jumbo RX buffers\n");
3125 				re_stop(sc);
3126 				return;
3127 			}
3128 			/* Disable checksum offloading for jumbo frames. */
3129 			ifp->if_capenable &= ~(IFCAP_HWCSUM | IFCAP_TSO4);
3130 			ifp->if_hwassist &= ~(RE_CSUM_FEATURES | CSUM_TSO);
3131 		} else {
3132 			if (re_rx_list_init(sc) != 0) {
3133 				device_printf(sc->rl_dev,
3134 				    "no memory for RX buffers\n");
3135 				re_stop(sc);
3136 				return;
3137 			}
3138 		}
3139 		re_set_jumbo(sc, ifp->if_mtu > RL_MTU);
3140 	} else {
3141 		if (re_rx_list_init(sc) != 0) {
3142 			device_printf(sc->rl_dev, "no memory for RX buffers\n");
3143 			re_stop(sc);
3144 			return;
3145 		}
3146 		if ((sc->rl_flags & RL_FLAG_PCIE) != 0 &&
3147 		    pci_get_device(sc->rl_dev) != RT_DEVICEID_8101E) {
3148 			if (ifp->if_mtu > RL_MTU)
3149 				pci_set_max_read_req(sc->rl_dev, 512);
3150 			else
3151 				pci_set_max_read_req(sc->rl_dev, 4096);
3152 		}
3153 	}
3154 	re_tx_list_init(sc);
3155 
3156 	/*
3157 	 * Enable C+ RX and TX mode, as well as VLAN stripping and
3158 	 * RX checksum offload. We must configure the C+ register
3159 	 * before all others.
3160 	 */
3161 	cfg = RL_CPLUSCMD_PCI_MRW;
3162 	if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3163 		cfg |= RL_CPLUSCMD_RXCSUM_ENB;
3164 	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3165 		cfg |= RL_CPLUSCMD_VLANSTRIP;
3166 	if ((sc->rl_flags & RL_FLAG_MACSTAT) != 0) {
3167 		cfg |= RL_CPLUSCMD_MACSTAT_DIS;
3168 		/* XXX magic. */
3169 		cfg |= 0x0001;
3170 	} else
3171 		cfg |= RL_CPLUSCMD_RXENB | RL_CPLUSCMD_TXENB;
3172 	CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg);
3173 	if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SC ||
3174 	    sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) {
3175 		reg = 0x000fff00;
3176 		if ((CSR_READ_1(sc, sc->rl_cfg2) & RL_CFG2_PCI66MHZ) != 0)
3177 			reg |= 0x000000ff;
3178 		if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE)
3179 			reg |= 0x00f00000;
3180 		CSR_WRITE_4(sc, 0x7c, reg);
3181 		/* Disable interrupt mitigation. */
3182 		CSR_WRITE_2(sc, 0xe2, 0);
3183 	}
3184 	/*
3185 	 * Disable TSO if interface MTU size is greater than MSS
3186 	 * allowed in controller.
3187 	 */
3188 	if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) {
3189 		ifp->if_capenable &= ~IFCAP_TSO4;
3190 		ifp->if_hwassist &= ~CSUM_TSO;
3191 	}
3192 
3193 	/*
3194 	 * Init our MAC address.  Even though the chipset
3195 	 * documentation doesn't mention it, we need to enter "Config
3196 	 * register write enable" mode to modify the ID registers.
3197 	 */
3198 	/* Copy MAC address on stack to align. */
3199 	bcopy(IF_LLADDR(ifp), eaddr.eaddr, ETHER_ADDR_LEN);
3200 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
3201 	CSR_WRITE_4(sc, RL_IDR0,
3202 	    htole32(*(u_int32_t *)(&eaddr.eaddr[0])));
3203 	CSR_WRITE_4(sc, RL_IDR4,
3204 	    htole32(*(u_int32_t *)(&eaddr.eaddr[4])));
3205 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3206 
3207 	/*
3208 	 * Load the addresses of the RX and TX lists into the chip.
3209 	 */
3210 
3211 	CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI,
3212 	    RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr));
3213 	CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO,
3214 	    RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr));
3215 
3216 	CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI,
3217 	    RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr));
3218 	CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO,
3219 	    RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr));
3220 
3221 	if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) {
3222 		/* Disable RXDV gate. */
3223 		CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) &
3224 		    ~0x00080000);
3225 	}
3226 
3227 	/*
3228 	 * Enable transmit and receive for pre-RTL8168G controllers.
3229 	 * RX/TX MACs should be enabled before RX/TX configuration.
3230 	 */
3231 	if ((sc->rl_flags & RL_FLAG_8168G_PLUS) == 0)
3232 		CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB | RL_CMD_RX_ENB);
3233 
3234 	/*
3235 	 * Set the initial TX configuration.
3236 	 */
3237 	if (sc->rl_testmode) {
3238 		if (sc->rl_type == RL_8169)
3239 			CSR_WRITE_4(sc, RL_TXCFG,
3240 			    RL_TXCFG_CONFIG|RL_LOOPTEST_ON);
3241 		else
3242 			CSR_WRITE_4(sc, RL_TXCFG,
3243 			    RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS);
3244 	} else
3245 		CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
3246 
3247 	CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16);
3248 
3249 	/*
3250 	 * Set the initial RX configuration.
3251 	 */
3252 	re_set_rxmode(sc);
3253 
3254 	/* Configure interrupt moderation. */
3255 	if (sc->rl_type == RL_8169) {
3256 		/* Magic from vendor. */
3257 		CSR_WRITE_2(sc, RL_INTRMOD, 0x5100);
3258 	}
3259 
3260 	/*
3261 	 * Enable transmit and receive for RTL8168G and later controllers.
3262 	 * RX/TX MACs should be enabled after RX/TX configuration.
3263 	 */
3264 	if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0)
3265 		CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB | RL_CMD_RX_ENB);
3266 
3267 #ifdef DEVICE_POLLING
3268 	/*
3269 	 * Disable interrupts if we are polling.
3270 	 */
3271 	if (ifp->if_capenable & IFCAP_POLLING)
3272 		CSR_WRITE_2(sc, RL_IMR, 0);
3273 	else	/* otherwise ... */
3274 #endif
3275 
3276 	/*
3277 	 * Enable interrupts.
3278 	 */
3279 	if (sc->rl_testmode)
3280 		CSR_WRITE_2(sc, RL_IMR, 0);
3281 	else
3282 		CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
3283 	CSR_WRITE_2(sc, RL_ISR, RL_INTRS_CPLUS);
3284 
3285 	/* Set initial TX threshold */
3286 	sc->rl_txthresh = RL_TX_THRESH_INIT;
3287 
3288 	/* Start RX/TX process. */
3289 	CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
3290 
3291 	/*
3292 	 * Initialize the timer interrupt register so that
3293 	 * a timer interrupt will be generated once the timer
3294 	 * reaches a certain number of ticks. The timer is
3295 	 * reloaded on each transmit.
3296 	 */
3297 #ifdef RE_TX_MODERATION
3298 	/*
3299 	 * Use timer interrupt register to moderate TX interrupt
3300 	 * moderation, which dramatically improves TX frame rate.
3301 	 */
3302 	if (sc->rl_type == RL_8169)
3303 		CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800);
3304 	else
3305 		CSR_WRITE_4(sc, RL_TIMERINT, 0x400);
3306 #else
3307 	/*
3308 	 * Use timer interrupt register to moderate RX interrupt
3309 	 * moderation.
3310 	 */
3311 	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 &&
3312 	    intr_filter == 0) {
3313 		if (sc->rl_type == RL_8169)
3314 			CSR_WRITE_4(sc, RL_TIMERINT_8169,
3315 			    RL_USECS(sc->rl_int_rx_mod));
3316 	} else {
3317 		if (sc->rl_type == RL_8169)
3318 			CSR_WRITE_4(sc, RL_TIMERINT_8169, RL_USECS(0));
3319 	}
3320 #endif
3321 
3322 	/*
3323 	 * For 8169 gigE NICs, set the max allowed RX packet
3324 	 * size so we can receive jumbo frames.
3325 	 */
3326 	if (sc->rl_type == RL_8169) {
3327 		if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
3328 			/*
3329 			 * For controllers that use new jumbo frame scheme,
3330 			 * set maximum size of jumbo frame depending on
3331 			 * controller revisions.
3332 			 */
3333 			if (ifp->if_mtu > RL_MTU)
3334 				CSR_WRITE_2(sc, RL_MAXRXPKTLEN,
3335 				    sc->rl_hwrev->rl_max_mtu +
3336 				    ETHER_VLAN_ENCAP_LEN + ETHER_HDR_LEN +
3337 				    ETHER_CRC_LEN);
3338 			else
3339 				CSR_WRITE_2(sc, RL_MAXRXPKTLEN,
3340 				    RE_RX_DESC_BUFLEN);
3341 		} else if ((sc->rl_flags & RL_FLAG_PCIE) != 0 &&
3342 		    sc->rl_hwrev->rl_max_mtu == RL_MTU) {
3343 			/* RTL810x has no jumbo frame support. */
3344 			CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN);
3345 		} else
3346 			CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383);
3347 	}
3348 
3349 	if (sc->rl_testmode)
3350 		return;
3351 
3352 	CSR_WRITE_1(sc, sc->rl_cfg1, CSR_READ_1(sc, sc->rl_cfg1) |
3353 	    RL_CFG1_DRVLOAD);
3354 
3355 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3356 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3357 
3358 	sc->rl_flags &= ~RL_FLAG_LINK;
3359 	mii_mediachg(mii);
3360 
3361 	sc->rl_watchdog_timer = 0;
3362 	callout_reset(&sc->rl_stat_callout, hz, re_tick, sc);
3363 }
3364 
3365 /*
3366  * Set media options.
3367  */
3368 static int
3369 re_ifmedia_upd(struct ifnet *ifp)
3370 {
3371 	struct rl_softc		*sc;
3372 	struct mii_data		*mii;
3373 	int			error;
3374 
3375 	sc = ifp->if_softc;
3376 	mii = device_get_softc(sc->rl_miibus);
3377 	RL_LOCK(sc);
3378 	error = mii_mediachg(mii);
3379 	RL_UNLOCK(sc);
3380 
3381 	return (error);
3382 }
3383 
3384 /*
3385  * Report current media status.
3386  */
3387 static void
3388 re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3389 {
3390 	struct rl_softc		*sc;
3391 	struct mii_data		*mii;
3392 
3393 	sc = ifp->if_softc;
3394 	mii = device_get_softc(sc->rl_miibus);
3395 
3396 	RL_LOCK(sc);
3397 	mii_pollstat(mii);
3398 	ifmr->ifm_active = mii->mii_media_active;
3399 	ifmr->ifm_status = mii->mii_media_status;
3400 	RL_UNLOCK(sc);
3401 }
3402 
3403 static int
3404 re_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3405 {
3406 	struct rl_softc		*sc = ifp->if_softc;
3407 	struct ifreq		*ifr = (struct ifreq *) data;
3408 	struct mii_data		*mii;
3409 	int			error = 0;
3410 
3411 	switch (command) {
3412 	case SIOCSIFMTU:
3413 		if (ifr->ifr_mtu < ETHERMIN ||
3414 		    ifr->ifr_mtu > sc->rl_hwrev->rl_max_mtu ||
3415 		    ((sc->rl_flags & RL_FLAG_FASTETHER) != 0 &&
3416 		    ifr->ifr_mtu > RL_MTU)) {
3417 			error = EINVAL;
3418 			break;
3419 		}
3420 		RL_LOCK(sc);
3421 		if (ifp->if_mtu != ifr->ifr_mtu) {
3422 			ifp->if_mtu = ifr->ifr_mtu;
3423 			if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
3424 			    (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
3425 				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3426 				re_init_locked(sc);
3427 			}
3428 			if (ifp->if_mtu > RL_TSO_MTU &&
3429 			    (ifp->if_capenable & IFCAP_TSO4) != 0) {
3430 				ifp->if_capenable &= ~(IFCAP_TSO4 |
3431 				    IFCAP_VLAN_HWTSO);
3432 				ifp->if_hwassist &= ~CSUM_TSO;
3433 			}
3434 			VLAN_CAPABILITIES(ifp);
3435 		}
3436 		RL_UNLOCK(sc);
3437 		break;
3438 	case SIOCSIFFLAGS:
3439 		RL_LOCK(sc);
3440 		if ((ifp->if_flags & IFF_UP) != 0) {
3441 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
3442 				if (((ifp->if_flags ^ sc->rl_if_flags)
3443 				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3444 					re_set_rxmode(sc);
3445 			} else
3446 				re_init_locked(sc);
3447 		} else {
3448 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3449 				re_stop(sc);
3450 		}
3451 		sc->rl_if_flags = ifp->if_flags;
3452 		RL_UNLOCK(sc);
3453 		break;
3454 	case SIOCADDMULTI:
3455 	case SIOCDELMULTI:
3456 		RL_LOCK(sc);
3457 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3458 			re_set_rxmode(sc);
3459 		RL_UNLOCK(sc);
3460 		break;
3461 	case SIOCGIFMEDIA:
3462 	case SIOCSIFMEDIA:
3463 		mii = device_get_softc(sc->rl_miibus);
3464 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
3465 		break;
3466 	case SIOCSIFCAP:
3467 	    {
3468 		int mask, reinit;
3469 
3470 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3471 		reinit = 0;
3472 #ifdef DEVICE_POLLING
3473 		if (mask & IFCAP_POLLING) {
3474 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
3475 				error = ether_poll_register(re_poll, ifp);
3476 				if (error)
3477 					return (error);
3478 				RL_LOCK(sc);
3479 				/* Disable interrupts */
3480 				CSR_WRITE_2(sc, RL_IMR, 0x0000);
3481 				ifp->if_capenable |= IFCAP_POLLING;
3482 				RL_UNLOCK(sc);
3483 			} else {
3484 				error = ether_poll_deregister(ifp);
3485 				/* Enable interrupts. */
3486 				RL_LOCK(sc);
3487 				CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
3488 				ifp->if_capenable &= ~IFCAP_POLLING;
3489 				RL_UNLOCK(sc);
3490 			}
3491 		}
3492 #endif /* DEVICE_POLLING */
3493 		RL_LOCK(sc);
3494 		if ((mask & IFCAP_TXCSUM) != 0 &&
3495 		    (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
3496 			ifp->if_capenable ^= IFCAP_TXCSUM;
3497 			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
3498 				ifp->if_hwassist |= RE_CSUM_FEATURES;
3499 			else
3500 				ifp->if_hwassist &= ~RE_CSUM_FEATURES;
3501 			reinit = 1;
3502 		}
3503 		if ((mask & IFCAP_RXCSUM) != 0 &&
3504 		    (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
3505 			ifp->if_capenable ^= IFCAP_RXCSUM;
3506 			reinit = 1;
3507 		}
3508 		if ((mask & IFCAP_TSO4) != 0 &&
3509 		    (ifp->if_capabilities & IFCAP_TSO4) != 0) {
3510 			ifp->if_capenable ^= IFCAP_TSO4;
3511 			if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
3512 				ifp->if_hwassist |= CSUM_TSO;
3513 			else
3514 				ifp->if_hwassist &= ~CSUM_TSO;
3515 			if (ifp->if_mtu > RL_TSO_MTU &&
3516 			    (ifp->if_capenable & IFCAP_TSO4) != 0) {
3517 				ifp->if_capenable &= ~IFCAP_TSO4;
3518 				ifp->if_hwassist &= ~CSUM_TSO;
3519 			}
3520 		}
3521 		if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
3522 		    (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
3523 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
3524 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
3525 		    (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
3526 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
3527 			/* TSO over VLAN requires VLAN hardware tagging. */
3528 			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
3529 				ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
3530 			reinit = 1;
3531 		}
3532 		if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
3533 		    (mask & (IFCAP_HWCSUM | IFCAP_TSO4 |
3534 		    IFCAP_VLAN_HWTSO)) != 0)
3535 				reinit = 1;
3536 		if ((mask & IFCAP_WOL) != 0 &&
3537 		    (ifp->if_capabilities & IFCAP_WOL) != 0) {
3538 			if ((mask & IFCAP_WOL_UCAST) != 0)
3539 				ifp->if_capenable ^= IFCAP_WOL_UCAST;
3540 			if ((mask & IFCAP_WOL_MCAST) != 0)
3541 				ifp->if_capenable ^= IFCAP_WOL_MCAST;
3542 			if ((mask & IFCAP_WOL_MAGIC) != 0)
3543 				ifp->if_capenable ^= IFCAP_WOL_MAGIC;
3544 		}
3545 		if (reinit && ifp->if_drv_flags & IFF_DRV_RUNNING) {
3546 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3547 			re_init_locked(sc);
3548 		}
3549 		RL_UNLOCK(sc);
3550 		VLAN_CAPABILITIES(ifp);
3551 	    }
3552 		break;
3553 	default:
3554 		error = ether_ioctl(ifp, command, data);
3555 		break;
3556 	}
3557 
3558 	return (error);
3559 }
3560 
3561 static void
3562 re_watchdog(struct rl_softc *sc)
3563 {
3564 	struct ifnet		*ifp;
3565 
3566 	RL_LOCK_ASSERT(sc);
3567 
3568 	if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer != 0)
3569 		return;
3570 
3571 	ifp = sc->rl_ifp;
3572 	re_txeof(sc);
3573 	if (sc->rl_ldata.rl_tx_free == sc->rl_ldata.rl_tx_desc_cnt) {
3574 		if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
3575 		    "-- recovering\n");
3576 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3577 			re_start_locked(ifp);
3578 		return;
3579 	}
3580 
3581 	if_printf(ifp, "watchdog timeout\n");
3582 	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
3583 
3584 	re_rxeof(sc, NULL);
3585 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3586 	re_init_locked(sc);
3587 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3588 		re_start_locked(ifp);
3589 }
3590 
3591 /*
3592  * Stop the adapter and free any mbufs allocated to the
3593  * RX and TX lists.
3594  */
3595 static void
3596 re_stop(struct rl_softc *sc)
3597 {
3598 	int			i;
3599 	struct ifnet		*ifp;
3600 	struct rl_txdesc	*txd;
3601 	struct rl_rxdesc	*rxd;
3602 
3603 	RL_LOCK_ASSERT(sc);
3604 
3605 	ifp = sc->rl_ifp;
3606 
3607 	sc->rl_watchdog_timer = 0;
3608 	callout_stop(&sc->rl_stat_callout);
3609 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3610 
3611 	/*
3612 	 * Disable accepting frames to put RX MAC into idle state.
3613 	 * Otherwise it's possible to get frames while stop command
3614 	 * execution is in progress and controller can DMA the frame
3615 	 * to already freed RX buffer during that period.
3616 	 */
3617 	CSR_WRITE_4(sc, RL_RXCFG, CSR_READ_4(sc, RL_RXCFG) &
3618 	    ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_MULTI |
3619 	    RL_RXCFG_RX_BROAD));
3620 
3621 	if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) {
3622 		/* Enable RXDV gate. */
3623 		CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) |
3624 		    0x00080000);
3625 	}
3626 
3627 	if ((sc->rl_flags & RL_FLAG_WAIT_TXPOLL) != 0) {
3628 		for (i = RL_TIMEOUT; i > 0; i--) {
3629 			if ((CSR_READ_1(sc, sc->rl_txstart) &
3630 			    RL_TXSTART_START) == 0)
3631 				break;
3632 			DELAY(20);
3633 		}
3634 		if (i == 0)
3635 			device_printf(sc->rl_dev,
3636 			    "stopping TX poll timed out!\n");
3637 		CSR_WRITE_1(sc, RL_COMMAND, 0x00);
3638 	} else if ((sc->rl_flags & RL_FLAG_CMDSTOP) != 0) {
3639 		CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB |
3640 		    RL_CMD_RX_ENB);
3641 		if ((sc->rl_flags & RL_FLAG_CMDSTOP_WAIT_TXQ) != 0) {
3642 			for (i = RL_TIMEOUT; i > 0; i--) {
3643 				if ((CSR_READ_4(sc, RL_TXCFG) &
3644 				    RL_TXCFG_QUEUE_EMPTY) != 0)
3645 					break;
3646 				DELAY(100);
3647 			}
3648 			if (i == 0)
3649 				device_printf(sc->rl_dev,
3650 				   "stopping TXQ timed out!\n");
3651 		}
3652 	} else
3653 		CSR_WRITE_1(sc, RL_COMMAND, 0x00);
3654 	DELAY(1000);
3655 	CSR_WRITE_2(sc, RL_IMR, 0x0000);
3656 	CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
3657 
3658 	if (sc->rl_head != NULL) {
3659 		m_freem(sc->rl_head);
3660 		sc->rl_head = sc->rl_tail = NULL;
3661 	}
3662 
3663 	/* Free the TX list buffers. */
3664 	for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
3665 		txd = &sc->rl_ldata.rl_tx_desc[i];
3666 		if (txd->tx_m != NULL) {
3667 			bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
3668 			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3669 			bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag,
3670 			    txd->tx_dmamap);
3671 			m_freem(txd->tx_m);
3672 			txd->tx_m = NULL;
3673 		}
3674 	}
3675 
3676 	/* Free the RX list buffers. */
3677 	for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
3678 		rxd = &sc->rl_ldata.rl_rx_desc[i];
3679 		if (rxd->rx_m != NULL) {
3680 			bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
3681 			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3682 			bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag,
3683 			    rxd->rx_dmamap);
3684 			m_freem(rxd->rx_m);
3685 			rxd->rx_m = NULL;
3686 		}
3687 	}
3688 
3689 	if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
3690 		for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
3691 			rxd = &sc->rl_ldata.rl_jrx_desc[i];
3692 			if (rxd->rx_m != NULL) {
3693 				bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag,
3694 				    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3695 				bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag,
3696 				    rxd->rx_dmamap);
3697 				m_freem(rxd->rx_m);
3698 				rxd->rx_m = NULL;
3699 			}
3700 		}
3701 	}
3702 }
3703 
3704 /*
3705  * Device suspend routine.  Stop the interface and save some PCI
3706  * settings in case the BIOS doesn't restore them properly on
3707  * resume.
3708  */
3709 static int
3710 re_suspend(device_t dev)
3711 {
3712 	struct rl_softc		*sc;
3713 
3714 	sc = device_get_softc(dev);
3715 
3716 	RL_LOCK(sc);
3717 	re_stop(sc);
3718 	re_setwol(sc);
3719 	sc->suspended = 1;
3720 	RL_UNLOCK(sc);
3721 
3722 	return (0);
3723 }
3724 
3725 /*
3726  * Device resume routine.  Restore some PCI settings in case the BIOS
3727  * doesn't, re-enable busmastering, and restart the interface if
3728  * appropriate.
3729  */
3730 static int
3731 re_resume(device_t dev)
3732 {
3733 	struct rl_softc		*sc;
3734 	struct ifnet		*ifp;
3735 
3736 	sc = device_get_softc(dev);
3737 
3738 	RL_LOCK(sc);
3739 
3740 	ifp = sc->rl_ifp;
3741 	/* Take controller out of sleep mode. */
3742 	if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
3743 		if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
3744 			CSR_WRITE_1(sc, RL_GPIO,
3745 			    CSR_READ_1(sc, RL_GPIO) | 0x01);
3746 	}
3747 
3748 	/*
3749 	 * Clear WOL matching such that normal Rx filtering
3750 	 * wouldn't interfere with WOL patterns.
3751 	 */
3752 	re_clrwol(sc);
3753 
3754 	/* reinitialize interface if necessary */
3755 	if (ifp->if_flags & IFF_UP)
3756 		re_init_locked(sc);
3757 
3758 	sc->suspended = 0;
3759 	RL_UNLOCK(sc);
3760 
3761 	return (0);
3762 }
3763 
3764 /*
3765  * Stop all chip I/O so that the kernel's probe routines don't
3766  * get confused by errant DMAs when rebooting.
3767  */
3768 static int
3769 re_shutdown(device_t dev)
3770 {
3771 	struct rl_softc		*sc;
3772 
3773 	sc = device_get_softc(dev);
3774 
3775 	RL_LOCK(sc);
3776 	re_stop(sc);
3777 	/*
3778 	 * Mark interface as down since otherwise we will panic if
3779 	 * interrupt comes in later on, which can happen in some
3780 	 * cases.
3781 	 */
3782 	sc->rl_ifp->if_flags &= ~IFF_UP;
3783 	re_setwol(sc);
3784 	RL_UNLOCK(sc);
3785 
3786 	return (0);
3787 }
3788 
3789 static void
3790 re_set_linkspeed(struct rl_softc *sc)
3791 {
3792 	struct mii_softc *miisc;
3793 	struct mii_data *mii;
3794 	int aneg, i, phyno;
3795 
3796 	RL_LOCK_ASSERT(sc);
3797 
3798 	mii = device_get_softc(sc->rl_miibus);
3799 	mii_pollstat(mii);
3800 	aneg = 0;
3801 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
3802 	    (IFM_ACTIVE | IFM_AVALID)) {
3803 		switch IFM_SUBTYPE(mii->mii_media_active) {
3804 		case IFM_10_T:
3805 		case IFM_100_TX:
3806 			return;
3807 		case IFM_1000_T:
3808 			aneg++;
3809 			break;
3810 		default:
3811 			break;
3812 		}
3813 	}
3814 	miisc = LIST_FIRST(&mii->mii_phys);
3815 	phyno = miisc->mii_phy;
3816 	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3817 		PHY_RESET(miisc);
3818 	re_miibus_writereg(sc->rl_dev, phyno, MII_100T2CR, 0);
3819 	re_miibus_writereg(sc->rl_dev, phyno,
3820 	    MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
3821 	re_miibus_writereg(sc->rl_dev, phyno,
3822 	    MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG);
3823 	DELAY(1000);
3824 	if (aneg != 0) {
3825 		/*
3826 		 * Poll link state until re(4) get a 10/100Mbps link.
3827 		 */
3828 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
3829 			mii_pollstat(mii);
3830 			if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
3831 			    == (IFM_ACTIVE | IFM_AVALID)) {
3832 				switch (IFM_SUBTYPE(mii->mii_media_active)) {
3833 				case IFM_10_T:
3834 				case IFM_100_TX:
3835 					return;
3836 				default:
3837 					break;
3838 				}
3839 			}
3840 			RL_UNLOCK(sc);
3841 			pause("relnk", hz);
3842 			RL_LOCK(sc);
3843 		}
3844 		if (i == MII_ANEGTICKS_GIGE)
3845 			device_printf(sc->rl_dev,
3846 			    "establishing a link failed, WOL may not work!");
3847 	}
3848 	/*
3849 	 * No link, force MAC to have 100Mbps, full-duplex link.
3850 	 * MAC does not require reprogramming on resolved speed/duplex,
3851 	 * so this is just for completeness.
3852 	 */
3853 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
3854 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
3855 }
3856 
3857 static void
3858 re_setwol(struct rl_softc *sc)
3859 {
3860 	struct ifnet		*ifp;
3861 	int			pmc;
3862 	uint16_t		pmstat;
3863 	uint8_t			v;
3864 
3865 	RL_LOCK_ASSERT(sc);
3866 
3867 	if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
3868 		return;
3869 
3870 	ifp = sc->rl_ifp;
3871 	/* Put controller into sleep mode. */
3872 	if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
3873 		if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
3874 			CSR_WRITE_1(sc, RL_GPIO,
3875 			    CSR_READ_1(sc, RL_GPIO) & ~0x01);
3876 	}
3877 	if ((ifp->if_capenable & IFCAP_WOL) != 0) {
3878 		if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) {
3879 			/* Disable RXDV gate. */
3880 			CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) &
3881 			    ~0x00080000);
3882 		}
3883 		re_set_rxmode(sc);
3884 		if ((sc->rl_flags & RL_FLAG_WOL_MANLINK) != 0)
3885 			re_set_linkspeed(sc);
3886 		if ((sc->rl_flags & RL_FLAG_WOLRXENB) != 0)
3887 			CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RX_ENB);
3888 	}
3889 	/* Enable config register write. */
3890 	CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
3891 
3892 	/* Enable PME. */
3893 	v = CSR_READ_1(sc, sc->rl_cfg1);
3894 	v &= ~RL_CFG1_PME;
3895 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
3896 		v |= RL_CFG1_PME;
3897 	CSR_WRITE_1(sc, sc->rl_cfg1, v);
3898 
3899 	v = CSR_READ_1(sc, sc->rl_cfg3);
3900 	v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
3901 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
3902 		v |= RL_CFG3_WOL_MAGIC;
3903 	CSR_WRITE_1(sc, sc->rl_cfg3, v);
3904 
3905 	v = CSR_READ_1(sc, sc->rl_cfg5);
3906 	v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST |
3907 	    RL_CFG5_WOL_LANWAKE);
3908 	if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
3909 		v |= RL_CFG5_WOL_UCAST;
3910 	if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
3911 		v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST;
3912 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
3913 		v |= RL_CFG5_WOL_LANWAKE;
3914 	CSR_WRITE_1(sc, sc->rl_cfg5, v);
3915 
3916 	/* Config register write done. */
3917 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3918 
3919 	if ((ifp->if_capenable & IFCAP_WOL) == 0 &&
3920 	    (sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0)
3921 		CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) & ~0x80);
3922 	/*
3923 	 * It seems that hardware resets its link speed to 100Mbps in
3924 	 * power down mode so switching to 100Mbps in driver is not
3925 	 * needed.
3926 	 */
3927 
3928 	/* Request PME if WOL is requested. */
3929 	pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2);
3930 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
3931 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
3932 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3933 	pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
3934 }
3935 
3936 static void
3937 re_clrwol(struct rl_softc *sc)
3938 {
3939 	int			pmc;
3940 	uint8_t			v;
3941 
3942 	RL_LOCK_ASSERT(sc);
3943 
3944 	if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
3945 		return;
3946 
3947 	/* Enable config register write. */
3948 	CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
3949 
3950 	v = CSR_READ_1(sc, sc->rl_cfg3);
3951 	v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
3952 	CSR_WRITE_1(sc, sc->rl_cfg3, v);
3953 
3954 	/* Config register write done. */
3955 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3956 
3957 	v = CSR_READ_1(sc, sc->rl_cfg5);
3958 	v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
3959 	v &= ~RL_CFG5_WOL_LANWAKE;
3960 	CSR_WRITE_1(sc, sc->rl_cfg5, v);
3961 }
3962 
3963 static void
3964 re_add_sysctls(struct rl_softc *sc)
3965 {
3966 	struct sysctl_ctx_list	*ctx;
3967 	struct sysctl_oid_list	*children;
3968 	int			error;
3969 
3970 	ctx = device_get_sysctl_ctx(sc->rl_dev);
3971 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev));
3972 
3973 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "stats",
3974 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
3975 	    re_sysctl_stats, "I", "Statistics Information");
3976 	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0)
3977 		return;
3978 
3979 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "int_rx_mod",
3980 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
3981 	    &sc->rl_int_rx_mod, 0, sysctl_hw_re_int_mod, "I",
3982 	    "re RX interrupt moderation");
3983 	/* Pull in device tunables. */
3984 	sc->rl_int_rx_mod = RL_TIMER_DEFAULT;
3985 	error = resource_int_value(device_get_name(sc->rl_dev),
3986 	    device_get_unit(sc->rl_dev), "int_rx_mod", &sc->rl_int_rx_mod);
3987 	if (error == 0) {
3988 		if (sc->rl_int_rx_mod < RL_TIMER_MIN ||
3989 		    sc->rl_int_rx_mod > RL_TIMER_MAX) {
3990 			device_printf(sc->rl_dev, "int_rx_mod value out of "
3991 			    "range; using default: %d\n",
3992 			    RL_TIMER_DEFAULT);
3993 			sc->rl_int_rx_mod = RL_TIMER_DEFAULT;
3994 		}
3995 	}
3996 }
3997 
3998 static int
3999 re_sysctl_stats(SYSCTL_HANDLER_ARGS)
4000 {
4001 	struct rl_softc		*sc;
4002 	struct rl_stats		*stats;
4003 	int			error, i, result;
4004 
4005 	result = -1;
4006 	error = sysctl_handle_int(oidp, &result, 0, req);
4007 	if (error || req->newptr == NULL)
4008 		return (error);
4009 
4010 	if (result == 1) {
4011 		sc = (struct rl_softc *)arg1;
4012 		RL_LOCK(sc);
4013 		if ((sc->rl_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
4014 			RL_UNLOCK(sc);
4015 			goto done;
4016 		}
4017 		bus_dmamap_sync(sc->rl_ldata.rl_stag,
4018 		    sc->rl_ldata.rl_smap, BUS_DMASYNC_PREREAD);
4019 		CSR_WRITE_4(sc, RL_DUMPSTATS_HI,
4020 		    RL_ADDR_HI(sc->rl_ldata.rl_stats_addr));
4021 		CSR_WRITE_4(sc, RL_DUMPSTATS_LO,
4022 		    RL_ADDR_LO(sc->rl_ldata.rl_stats_addr));
4023 		CSR_WRITE_4(sc, RL_DUMPSTATS_LO,
4024 		    RL_ADDR_LO(sc->rl_ldata.rl_stats_addr |
4025 		    RL_DUMPSTATS_START));
4026 		for (i = RL_TIMEOUT; i > 0; i--) {
4027 			if ((CSR_READ_4(sc, RL_DUMPSTATS_LO) &
4028 			    RL_DUMPSTATS_START) == 0)
4029 				break;
4030 			DELAY(1000);
4031 		}
4032 		bus_dmamap_sync(sc->rl_ldata.rl_stag,
4033 		    sc->rl_ldata.rl_smap, BUS_DMASYNC_POSTREAD);
4034 		RL_UNLOCK(sc);
4035 		if (i == 0) {
4036 			device_printf(sc->rl_dev,
4037 			    "DUMP statistics request timed out\n");
4038 			return (ETIMEDOUT);
4039 		}
4040 done:
4041 		stats = sc->rl_ldata.rl_stats;
4042 		printf("%s statistics:\n", device_get_nameunit(sc->rl_dev));
4043 		printf("Tx frames : %ju\n",
4044 		    (uintmax_t)le64toh(stats->rl_tx_pkts));
4045 		printf("Rx frames : %ju\n",
4046 		    (uintmax_t)le64toh(stats->rl_rx_pkts));
4047 		printf("Tx errors : %ju\n",
4048 		    (uintmax_t)le64toh(stats->rl_tx_errs));
4049 		printf("Rx errors : %u\n",
4050 		    le32toh(stats->rl_rx_errs));
4051 		printf("Rx missed frames : %u\n",
4052 		    (uint32_t)le16toh(stats->rl_missed_pkts));
4053 		printf("Rx frame alignment errs : %u\n",
4054 		    (uint32_t)le16toh(stats->rl_rx_framealign_errs));
4055 		printf("Tx single collisions : %u\n",
4056 		    le32toh(stats->rl_tx_onecoll));
4057 		printf("Tx multiple collisions : %u\n",
4058 		    le32toh(stats->rl_tx_multicolls));
4059 		printf("Rx unicast frames : %ju\n",
4060 		    (uintmax_t)le64toh(stats->rl_rx_ucasts));
4061 		printf("Rx broadcast frames : %ju\n",
4062 		    (uintmax_t)le64toh(stats->rl_rx_bcasts));
4063 		printf("Rx multicast frames : %u\n",
4064 		    le32toh(stats->rl_rx_mcasts));
4065 		printf("Tx aborts : %u\n",
4066 		    (uint32_t)le16toh(stats->rl_tx_aborts));
4067 		printf("Tx underruns : %u\n",
4068 		    (uint32_t)le16toh(stats->rl_rx_underruns));
4069 	}
4070 
4071 	return (error);
4072 }
4073 
4074 static int
4075 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
4076 {
4077 	int error, value;
4078 
4079 	if (arg1 == NULL)
4080 		return (EINVAL);
4081 	value = *(int *)arg1;
4082 	error = sysctl_handle_int(oidp, &value, 0, req);
4083 	if (error || req->newptr == NULL)
4084 		return (error);
4085 	if (value < low || value > high)
4086 		return (EINVAL);
4087 	*(int *)arg1 = value;
4088 
4089 	return (0);
4090 }
4091 
4092 static int
4093 sysctl_hw_re_int_mod(SYSCTL_HANDLER_ARGS)
4094 {
4095 
4096 	return (sysctl_int_range(oidp, arg1, arg2, req, RL_TIMER_MIN,
4097 	    RL_TIMER_MAX));
4098 }
4099 
4100 #ifdef DEBUGNET
4101 static void
4102 re_debugnet_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize)
4103 {
4104 	struct rl_softc *sc;
4105 
4106 	sc = if_getsoftc(ifp);
4107 	RL_LOCK(sc);
4108 	*nrxr = sc->rl_ldata.rl_rx_desc_cnt;
4109 	*ncl = DEBUGNET_MAX_IN_FLIGHT;
4110 	*clsize = (ifp->if_mtu > RL_MTU &&
4111 	    (sc->rl_flags & RL_FLAG_JUMBOV2) != 0) ? MJUM9BYTES : MCLBYTES;
4112 	RL_UNLOCK(sc);
4113 }
4114 
4115 static void
4116 re_debugnet_event(struct ifnet *ifp __unused, enum debugnet_ev event __unused)
4117 {
4118 }
4119 
4120 static int
4121 re_debugnet_transmit(struct ifnet *ifp, struct mbuf *m)
4122 {
4123 	struct rl_softc *sc;
4124 	int error;
4125 
4126 	sc = if_getsoftc(ifp);
4127 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4128 	    IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0)
4129 		return (EBUSY);
4130 
4131 	error = re_encap(sc, &m);
4132 	if (error == 0)
4133 		re_start_tx(sc);
4134 	return (error);
4135 }
4136 
4137 static int
4138 re_debugnet_poll(struct ifnet *ifp, int count)
4139 {
4140 	struct rl_softc *sc;
4141 	int error;
4142 
4143 	sc = if_getsoftc(ifp);
4144 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 ||
4145 	    (sc->rl_flags & RL_FLAG_LINK) == 0)
4146 		return (EBUSY);
4147 
4148 	re_txeof(sc);
4149 	error = re_rxeof(sc, NULL);
4150 	if (error != 0 && error != EAGAIN)
4151 		return (error);
4152 	return (0);
4153 }
4154 #endif /* DEBUGNET */
4155