xref: /freebsd/sys/dev/re/if_re.c (revision 5ffd83dbcc34f10e07f6d3e968ae6365869615f4)
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (c) 1997, 1998-2003
5  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 /*
39  * RealTek 8139C+/8169/8169S/8110S/8168/8111/8101E PCI NIC driver
40  *
41  * Written by Bill Paul <wpaul@windriver.com>
42  * Senior Networking Software Engineer
43  * Wind River Systems
44  */
45 
46 /*
47  * This driver is designed to support RealTek's next generation of
48  * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently
49  * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S,
50  * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E.
51  *
52  * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible
53  * with the older 8139 family, however it also supports a special
54  * C+ mode of operation that provides several new performance enhancing
55  * features. These include:
56  *
57  *	o Descriptor based DMA mechanism. Each descriptor represents
58  *	  a single packet fragment. Data buffers may be aligned on
59  *	  any byte boundary.
60  *
61  *	o 64-bit DMA
62  *
63  *	o TCP/IP checksum offload for both RX and TX
64  *
65  *	o High and normal priority transmit DMA rings
66  *
67  *	o VLAN tag insertion and extraction
68  *
69  *	o TCP large send (segmentation offload)
70  *
71  * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+
72  * programming API is fairly straightforward. The RX filtering, EEPROM
73  * access and PHY access is the same as it is on the older 8139 series
74  * chips.
75  *
76  * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the
77  * same programming API and feature set as the 8139C+ with the following
78  * differences and additions:
79  *
80  *	o 1000Mbps mode
81  *
82  *	o Jumbo frames
83  *
84  *	o GMII and TBI ports/registers for interfacing with copper
85  *	  or fiber PHYs
86  *
87  *	o RX and TX DMA rings can have up to 1024 descriptors
88  *	  (the 8139C+ allows a maximum of 64)
89  *
90  *	o Slight differences in register layout from the 8139C+
91  *
92  * The TX start and timer interrupt registers are at different locations
93  * on the 8169 than they are on the 8139C+. Also, the status word in the
94  * RX descriptor has a slightly different bit layout. The 8169 does not
95  * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska'
96  * copper gigE PHY.
97  *
98  * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
99  * (the 'S' stands for 'single-chip'). These devices have the same
100  * programming API as the older 8169, but also have some vendor-specific
101  * registers for the on-board PHY. The 8110S is a LAN-on-motherboard
102  * part designed to be pin-compatible with the RealTek 8100 10/100 chip.
103  *
104  * This driver takes advantage of the RX and TX checksum offload and
105  * VLAN tag insertion/extraction features. It also implements TX
106  * interrupt moderation using the timer interrupt registers, which
107  * significantly reduces TX interrupt load. There is also support
108  * for jumbo frames, however the 8169/8169S/8110S can not transmit
109  * jumbo frames larger than 7440, so the max MTU possible with this
110  * driver is 7422 bytes.
111  */
112 
113 #ifdef HAVE_KERNEL_OPTION_HEADERS
114 #include "opt_device_polling.h"
115 #endif
116 
117 #include <sys/param.h>
118 #include <sys/endian.h>
119 #include <sys/systm.h>
120 #include <sys/sockio.h>
121 #include <sys/mbuf.h>
122 #include <sys/malloc.h>
123 #include <sys/module.h>
124 #include <sys/kernel.h>
125 #include <sys/socket.h>
126 #include <sys/lock.h>
127 #include <sys/mutex.h>
128 #include <sys/sysctl.h>
129 #include <sys/taskqueue.h>
130 
131 #include <net/debugnet.h>
132 #include <net/if.h>
133 #include <net/if_var.h>
134 #include <net/if_arp.h>
135 #include <net/ethernet.h>
136 #include <net/if_dl.h>
137 #include <net/if_media.h>
138 #include <net/if_types.h>
139 #include <net/if_vlan_var.h>
140 
141 #include <net/bpf.h>
142 
143 #include <machine/bus.h>
144 #include <machine/resource.h>
145 #include <sys/bus.h>
146 #include <sys/rman.h>
147 
148 #include <dev/mii/mii.h>
149 #include <dev/mii/miivar.h>
150 
151 #include <dev/pci/pcireg.h>
152 #include <dev/pci/pcivar.h>
153 
154 #include <dev/rl/if_rlreg.h>
155 
156 MODULE_DEPEND(re, pci, 1, 1, 1);
157 MODULE_DEPEND(re, ether, 1, 1, 1);
158 MODULE_DEPEND(re, miibus, 1, 1, 1);
159 
160 /* "device miibus" required.  See GENERIC if you get errors here. */
161 #include "miibus_if.h"
162 
163 /* Tunables. */
164 static int intr_filter = 0;
165 TUNABLE_INT("hw.re.intr_filter", &intr_filter);
166 static int msi_disable = 0;
167 TUNABLE_INT("hw.re.msi_disable", &msi_disable);
168 static int msix_disable = 0;
169 TUNABLE_INT("hw.re.msix_disable", &msix_disable);
170 static int prefer_iomap = 0;
171 TUNABLE_INT("hw.re.prefer_iomap", &prefer_iomap);
172 
173 #define RE_CSUM_FEATURES    (CSUM_IP | CSUM_TCP | CSUM_UDP)
174 
175 /*
176  * Various supported device vendors/types and their names.
177  */
178 static const struct rl_type re_devs[] = {
179 	{ DLINK_VENDORID, DLINK_DEVICEID_528T, 0,
180 	    "D-Link DGE-528(T) Gigabit Ethernet Adapter" },
181 	{ DLINK_VENDORID, DLINK_DEVICEID_530T_REVC, 0,
182 	    "D-Link DGE-530(T) Gigabit Ethernet Adapter" },
183 	{ RT_VENDORID, RT_DEVICEID_8139, 0,
184 	    "RealTek 8139C+ 10/100BaseTX" },
185 	{ RT_VENDORID, RT_DEVICEID_8101E, 0,
186 	    "RealTek 810xE PCIe 10/100baseTX" },
187 	{ RT_VENDORID, RT_DEVICEID_8168, 0,
188 	    "RealTek 8168/8111 B/C/CP/D/DP/E/F/G PCIe Gigabit Ethernet" },
189 	{ NCUBE_VENDORID, RT_DEVICEID_8168, 0,
190 	    "TP-Link TG-3468 v2 (RTL8168) Gigabit Ethernet" },
191 	{ RT_VENDORID, RT_DEVICEID_8169, 0,
192 	    "RealTek 8169/8169S/8169SB(L)/8110S/8110SB(L) Gigabit Ethernet" },
193 	{ RT_VENDORID, RT_DEVICEID_8169SC, 0,
194 	    "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" },
195 	{ COREGA_VENDORID, COREGA_DEVICEID_CGLAPCIGT, 0,
196 	    "Corega CG-LAPCIGT (RTL8169S) Gigabit Ethernet" },
197 	{ LINKSYS_VENDORID, LINKSYS_DEVICEID_EG1032, 0,
198 	    "Linksys EG1032 (RTL8169S) Gigabit Ethernet" },
199 	{ USR_VENDORID, USR_DEVICEID_997902, 0,
200 	    "US Robotics 997902 (RTL8169S) Gigabit Ethernet" }
201 };
202 
203 static const struct rl_hwrev re_hwrevs[] = {
204 	{ RL_HWREV_8139, RL_8139, "", RL_MTU },
205 	{ RL_HWREV_8139A, RL_8139, "A", RL_MTU },
206 	{ RL_HWREV_8139AG, RL_8139, "A-G", RL_MTU },
207 	{ RL_HWREV_8139B, RL_8139, "B", RL_MTU },
208 	{ RL_HWREV_8130, RL_8139, "8130", RL_MTU },
209 	{ RL_HWREV_8139C, RL_8139, "C", RL_MTU },
210 	{ RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C", RL_MTU },
211 	{ RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+", RL_MTU },
212 	{ RL_HWREV_8168B_SPIN1, RL_8169, "8168", RL_JUMBO_MTU },
213 	{ RL_HWREV_8169, RL_8169, "8169", RL_JUMBO_MTU },
214 	{ RL_HWREV_8169S, RL_8169, "8169S", RL_JUMBO_MTU },
215 	{ RL_HWREV_8110S, RL_8169, "8110S", RL_JUMBO_MTU },
216 	{ RL_HWREV_8169_8110SB, RL_8169, "8169SB/8110SB", RL_JUMBO_MTU },
217 	{ RL_HWREV_8169_8110SC, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU },
218 	{ RL_HWREV_8169_8110SBL, RL_8169, "8169SBL/8110SBL", RL_JUMBO_MTU },
219 	{ RL_HWREV_8169_8110SCE, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU },
220 	{ RL_HWREV_8100, RL_8139, "8100", RL_MTU },
221 	{ RL_HWREV_8101, RL_8139, "8101", RL_MTU },
222 	{ RL_HWREV_8100E, RL_8169, "8100E", RL_MTU },
223 	{ RL_HWREV_8101E, RL_8169, "8101E", RL_MTU },
224 	{ RL_HWREV_8102E, RL_8169, "8102E", RL_MTU },
225 	{ RL_HWREV_8102EL, RL_8169, "8102EL", RL_MTU },
226 	{ RL_HWREV_8102EL_SPIN1, RL_8169, "8102EL", RL_MTU },
227 	{ RL_HWREV_8103E, RL_8169, "8103E", RL_MTU },
228 	{ RL_HWREV_8401E, RL_8169, "8401E", RL_MTU },
229 	{ RL_HWREV_8402, RL_8169, "8402", RL_MTU },
230 	{ RL_HWREV_8105E, RL_8169, "8105E", RL_MTU },
231 	{ RL_HWREV_8105E_SPIN1, RL_8169, "8105E", RL_MTU },
232 	{ RL_HWREV_8106E, RL_8169, "8106E", RL_MTU },
233 	{ RL_HWREV_8168B_SPIN2, RL_8169, "8168", RL_JUMBO_MTU },
234 	{ RL_HWREV_8168B_SPIN3, RL_8169, "8168", RL_JUMBO_MTU },
235 	{ RL_HWREV_8168C, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K },
236 	{ RL_HWREV_8168C_SPIN2, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K },
237 	{ RL_HWREV_8168CP, RL_8169, "8168CP/8111CP", RL_JUMBO_MTU_6K },
238 	{ RL_HWREV_8168D, RL_8169, "8168D/8111D", RL_JUMBO_MTU_9K },
239 	{ RL_HWREV_8168DP, RL_8169, "8168DP/8111DP", RL_JUMBO_MTU_9K },
240 	{ RL_HWREV_8168E, RL_8169, "8168E/8111E", RL_JUMBO_MTU_9K},
241 	{ RL_HWREV_8168E_VL, RL_8169, "8168E/8111E-VL", RL_JUMBO_MTU_6K},
242 	{ RL_HWREV_8168EP, RL_8169, "8168EP/8111EP", RL_JUMBO_MTU_9K},
243 	{ RL_HWREV_8168F, RL_8169, "8168F/8111F", RL_JUMBO_MTU_9K},
244 	{ RL_HWREV_8168G, RL_8169, "8168G/8111G", RL_JUMBO_MTU_9K},
245 	{ RL_HWREV_8168GU, RL_8169, "8168GU/8111GU", RL_JUMBO_MTU_9K},
246 	{ RL_HWREV_8168H, RL_8169, "8168H/8111H", RL_JUMBO_MTU_9K},
247 	{ RL_HWREV_8411, RL_8169, "8411", RL_JUMBO_MTU_9K},
248 	{ RL_HWREV_8411B, RL_8169, "8411B", RL_JUMBO_MTU_9K},
249 	{ 0, 0, NULL, 0 }
250 };
251 
252 static int re_probe		(device_t);
253 static int re_attach		(device_t);
254 static int re_detach		(device_t);
255 
256 static int re_encap		(struct rl_softc *, struct mbuf **);
257 
258 static void re_dma_map_addr	(void *, bus_dma_segment_t *, int, int);
259 static int re_allocmem		(device_t, struct rl_softc *);
260 static __inline void re_discard_rxbuf
261 				(struct rl_softc *, int);
262 static int re_newbuf		(struct rl_softc *, int);
263 static int re_jumbo_newbuf	(struct rl_softc *, int);
264 static int re_rx_list_init	(struct rl_softc *);
265 static int re_jrx_list_init	(struct rl_softc *);
266 static int re_tx_list_init	(struct rl_softc *);
267 #ifdef RE_FIXUP_RX
268 static __inline void re_fixup_rx
269 				(struct mbuf *);
270 #endif
271 static int re_rxeof		(struct rl_softc *, int *);
272 static void re_txeof		(struct rl_softc *);
273 #ifdef DEVICE_POLLING
274 static int re_poll		(struct ifnet *, enum poll_cmd, int);
275 static int re_poll_locked	(struct ifnet *, enum poll_cmd, int);
276 #endif
277 static int re_intr		(void *);
278 static void re_intr_msi		(void *);
279 static void re_tick		(void *);
280 static void re_int_task		(void *, int);
281 static void re_start		(struct ifnet *);
282 static void re_start_locked	(struct ifnet *);
283 static void re_start_tx		(struct rl_softc *);
284 static int re_ioctl		(struct ifnet *, u_long, caddr_t);
285 static void re_init		(void *);
286 static void re_init_locked	(struct rl_softc *);
287 static void re_stop		(struct rl_softc *);
288 static void re_watchdog		(struct rl_softc *);
289 static int re_suspend		(device_t);
290 static int re_resume		(device_t);
291 static int re_shutdown		(device_t);
292 static int re_ifmedia_upd	(struct ifnet *);
293 static void re_ifmedia_sts	(struct ifnet *, struct ifmediareq *);
294 
295 static void re_eeprom_putbyte	(struct rl_softc *, int);
296 static void re_eeprom_getword	(struct rl_softc *, int, u_int16_t *);
297 static void re_read_eeprom	(struct rl_softc *, caddr_t, int, int);
298 static int re_gmii_readreg	(device_t, int, int);
299 static int re_gmii_writereg	(device_t, int, int, int);
300 
301 static int re_miibus_readreg	(device_t, int, int);
302 static int re_miibus_writereg	(device_t, int, int, int);
303 static void re_miibus_statchg	(device_t);
304 
305 static void re_set_jumbo	(struct rl_softc *, int);
306 static void re_set_rxmode		(struct rl_softc *);
307 static void re_reset		(struct rl_softc *);
308 static void re_setwol		(struct rl_softc *);
309 static void re_clrwol		(struct rl_softc *);
310 static void re_set_linkspeed	(struct rl_softc *);
311 
312 DEBUGNET_DEFINE(re);
313 
314 #ifdef DEV_NETMAP	/* see ixgbe.c for details */
315 #include <dev/netmap/if_re_netmap.h>
316 MODULE_DEPEND(re, netmap, 1, 1, 1);
317 #endif /* !DEV_NETMAP */
318 
319 #ifdef RE_DIAG
320 static int re_diag		(struct rl_softc *);
321 #endif
322 
323 static void re_add_sysctls	(struct rl_softc *);
324 static int re_sysctl_stats	(SYSCTL_HANDLER_ARGS);
325 static int sysctl_int_range	(SYSCTL_HANDLER_ARGS, int, int);
326 static int sysctl_hw_re_int_mod	(SYSCTL_HANDLER_ARGS);
327 
328 static device_method_t re_methods[] = {
329 	/* Device interface */
330 	DEVMETHOD(device_probe,		re_probe),
331 	DEVMETHOD(device_attach,	re_attach),
332 	DEVMETHOD(device_detach,	re_detach),
333 	DEVMETHOD(device_suspend,	re_suspend),
334 	DEVMETHOD(device_resume,	re_resume),
335 	DEVMETHOD(device_shutdown,	re_shutdown),
336 
337 	/* MII interface */
338 	DEVMETHOD(miibus_readreg,	re_miibus_readreg),
339 	DEVMETHOD(miibus_writereg,	re_miibus_writereg),
340 	DEVMETHOD(miibus_statchg,	re_miibus_statchg),
341 
342 	DEVMETHOD_END
343 };
344 
345 static driver_t re_driver = {
346 	"re",
347 	re_methods,
348 	sizeof(struct rl_softc)
349 };
350 
351 static devclass_t re_devclass;
352 
353 DRIVER_MODULE(re, pci, re_driver, re_devclass, 0, 0);
354 DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0);
355 
356 #define EE_SET(x)					\
357 	CSR_WRITE_1(sc, RL_EECMD,			\
358 		CSR_READ_1(sc, RL_EECMD) | x)
359 
360 #define EE_CLR(x)					\
361 	CSR_WRITE_1(sc, RL_EECMD,			\
362 		CSR_READ_1(sc, RL_EECMD) & ~x)
363 
364 /*
365  * Send a read command and address to the EEPROM, check for ACK.
366  */
367 static void
368 re_eeprom_putbyte(struct rl_softc *sc, int addr)
369 {
370 	int			d, i;
371 
372 	d = addr | (RL_9346_READ << sc->rl_eewidth);
373 
374 	/*
375 	 * Feed in each bit and strobe the clock.
376 	 */
377 
378 	for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) {
379 		if (d & i) {
380 			EE_SET(RL_EE_DATAIN);
381 		} else {
382 			EE_CLR(RL_EE_DATAIN);
383 		}
384 		DELAY(100);
385 		EE_SET(RL_EE_CLK);
386 		DELAY(150);
387 		EE_CLR(RL_EE_CLK);
388 		DELAY(100);
389 	}
390 }
391 
392 /*
393  * Read a word of data stored in the EEPROM at address 'addr.'
394  */
395 static void
396 re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest)
397 {
398 	int			i;
399 	u_int16_t		word = 0;
400 
401 	/*
402 	 * Send address of word we want to read.
403 	 */
404 	re_eeprom_putbyte(sc, addr);
405 
406 	/*
407 	 * Start reading bits from EEPROM.
408 	 */
409 	for (i = 0x8000; i; i >>= 1) {
410 		EE_SET(RL_EE_CLK);
411 		DELAY(100);
412 		if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
413 			word |= i;
414 		EE_CLR(RL_EE_CLK);
415 		DELAY(100);
416 	}
417 
418 	*dest = word;
419 }
420 
421 /*
422  * Read a sequence of words from the EEPROM.
423  */
424 static void
425 re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt)
426 {
427 	int			i;
428 	u_int16_t		word = 0, *ptr;
429 
430 	CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
431 
432         DELAY(100);
433 
434 	for (i = 0; i < cnt; i++) {
435 		CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL);
436 		re_eeprom_getword(sc, off + i, &word);
437 		CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL);
438 		ptr = (u_int16_t *)(dest + (i * 2));
439                 *ptr = word;
440 	}
441 
442 	CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
443 }
444 
445 static int
446 re_gmii_readreg(device_t dev, int phy, int reg)
447 {
448 	struct rl_softc		*sc;
449 	u_int32_t		rval;
450 	int			i;
451 
452 	sc = device_get_softc(dev);
453 
454 	/* Let the rgephy driver read the GMEDIASTAT register */
455 
456 	if (reg == RL_GMEDIASTAT) {
457 		rval = CSR_READ_1(sc, RL_GMEDIASTAT);
458 		return (rval);
459 	}
460 
461 	CSR_WRITE_4(sc, RL_PHYAR, reg << 16);
462 
463 	for (i = 0; i < RL_PHY_TIMEOUT; i++) {
464 		rval = CSR_READ_4(sc, RL_PHYAR);
465 		if (rval & RL_PHYAR_BUSY)
466 			break;
467 		DELAY(25);
468 	}
469 
470 	if (i == RL_PHY_TIMEOUT) {
471 		device_printf(sc->rl_dev, "PHY read failed\n");
472 		return (0);
473 	}
474 
475 	/*
476 	 * Controller requires a 20us delay to process next MDIO request.
477 	 */
478 	DELAY(20);
479 
480 	return (rval & RL_PHYAR_PHYDATA);
481 }
482 
483 static int
484 re_gmii_writereg(device_t dev, int phy, int reg, int data)
485 {
486 	struct rl_softc		*sc;
487 	u_int32_t		rval;
488 	int			i;
489 
490 	sc = device_get_softc(dev);
491 
492 	CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) |
493 	    (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY);
494 
495 	for (i = 0; i < RL_PHY_TIMEOUT; i++) {
496 		rval = CSR_READ_4(sc, RL_PHYAR);
497 		if (!(rval & RL_PHYAR_BUSY))
498 			break;
499 		DELAY(25);
500 	}
501 
502 	if (i == RL_PHY_TIMEOUT) {
503 		device_printf(sc->rl_dev, "PHY write failed\n");
504 		return (0);
505 	}
506 
507 	/*
508 	 * Controller requires a 20us delay to process next MDIO request.
509 	 */
510 	DELAY(20);
511 
512 	return (0);
513 }
514 
515 static int
516 re_miibus_readreg(device_t dev, int phy, int reg)
517 {
518 	struct rl_softc		*sc;
519 	u_int16_t		rval = 0;
520 	u_int16_t		re8139_reg = 0;
521 
522 	sc = device_get_softc(dev);
523 
524 	if (sc->rl_type == RL_8169) {
525 		rval = re_gmii_readreg(dev, phy, reg);
526 		return (rval);
527 	}
528 
529 	switch (reg) {
530 	case MII_BMCR:
531 		re8139_reg = RL_BMCR;
532 		break;
533 	case MII_BMSR:
534 		re8139_reg = RL_BMSR;
535 		break;
536 	case MII_ANAR:
537 		re8139_reg = RL_ANAR;
538 		break;
539 	case MII_ANER:
540 		re8139_reg = RL_ANER;
541 		break;
542 	case MII_ANLPAR:
543 		re8139_reg = RL_LPAR;
544 		break;
545 	case MII_PHYIDR1:
546 	case MII_PHYIDR2:
547 		return (0);
548 	/*
549 	 * Allow the rlphy driver to read the media status
550 	 * register. If we have a link partner which does not
551 	 * support NWAY, this is the register which will tell
552 	 * us the results of parallel detection.
553 	 */
554 	case RL_MEDIASTAT:
555 		rval = CSR_READ_1(sc, RL_MEDIASTAT);
556 		return (rval);
557 	default:
558 		device_printf(sc->rl_dev, "bad phy register\n");
559 		return (0);
560 	}
561 	rval = CSR_READ_2(sc, re8139_reg);
562 	if (sc->rl_type == RL_8139CPLUS && re8139_reg == RL_BMCR) {
563 		/* 8139C+ has different bit layout. */
564 		rval &= ~(BMCR_LOOP | BMCR_ISO);
565 	}
566 	return (rval);
567 }
568 
569 static int
570 re_miibus_writereg(device_t dev, int phy, int reg, int data)
571 {
572 	struct rl_softc		*sc;
573 	u_int16_t		re8139_reg = 0;
574 	int			rval = 0;
575 
576 	sc = device_get_softc(dev);
577 
578 	if (sc->rl_type == RL_8169) {
579 		rval = re_gmii_writereg(dev, phy, reg, data);
580 		return (rval);
581 	}
582 
583 	switch (reg) {
584 	case MII_BMCR:
585 		re8139_reg = RL_BMCR;
586 		if (sc->rl_type == RL_8139CPLUS) {
587 			/* 8139C+ has different bit layout. */
588 			data &= ~(BMCR_LOOP | BMCR_ISO);
589 		}
590 		break;
591 	case MII_BMSR:
592 		re8139_reg = RL_BMSR;
593 		break;
594 	case MII_ANAR:
595 		re8139_reg = RL_ANAR;
596 		break;
597 	case MII_ANER:
598 		re8139_reg = RL_ANER;
599 		break;
600 	case MII_ANLPAR:
601 		re8139_reg = RL_LPAR;
602 		break;
603 	case MII_PHYIDR1:
604 	case MII_PHYIDR2:
605 		return (0);
606 		break;
607 	default:
608 		device_printf(sc->rl_dev, "bad phy register\n");
609 		return (0);
610 	}
611 	CSR_WRITE_2(sc, re8139_reg, data);
612 	return (0);
613 }
614 
615 static void
616 re_miibus_statchg(device_t dev)
617 {
618 	struct rl_softc		*sc;
619 	struct ifnet		*ifp;
620 	struct mii_data		*mii;
621 
622 	sc = device_get_softc(dev);
623 	mii = device_get_softc(sc->rl_miibus);
624 	ifp = sc->rl_ifp;
625 	if (mii == NULL || ifp == NULL ||
626 	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
627 		return;
628 
629 	sc->rl_flags &= ~RL_FLAG_LINK;
630 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
631 	    (IFM_ACTIVE | IFM_AVALID)) {
632 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
633 		case IFM_10_T:
634 		case IFM_100_TX:
635 			sc->rl_flags |= RL_FLAG_LINK;
636 			break;
637 		case IFM_1000_T:
638 			if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0)
639 				break;
640 			sc->rl_flags |= RL_FLAG_LINK;
641 			break;
642 		default:
643 			break;
644 		}
645 	}
646 	/*
647 	 * RealTek controllers do not provide any interface to the RX/TX
648 	 * MACs for resolved speed, duplex and flow-control parameters.
649 	 */
650 }
651 
652 static u_int
653 re_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
654 {
655 	uint32_t h, *hashes = arg;
656 
657 	h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26;
658 	if (h < 32)
659 		hashes[0] |= (1 << h);
660 	else
661 		hashes[1] |= (1 << (h - 32));
662 
663 	return (1);
664 }
665 
666 /*
667  * Set the RX configuration and 64-bit multicast hash filter.
668  */
669 static void
670 re_set_rxmode(struct rl_softc *sc)
671 {
672 	struct ifnet		*ifp;
673 	uint32_t		h, hashes[2] = { 0, 0 };
674 	uint32_t		rxfilt;
675 
676 	RL_LOCK_ASSERT(sc);
677 
678 	ifp = sc->rl_ifp;
679 
680 	rxfilt = RL_RXCFG_CONFIG | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD;
681 	if ((sc->rl_flags & RL_FLAG_EARLYOFF) != 0)
682 		rxfilt |= RL_RXCFG_EARLYOFF;
683 	else if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0)
684 		rxfilt |= RL_RXCFG_EARLYOFFV2;
685 
686 	if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
687 		if (ifp->if_flags & IFF_PROMISC)
688 			rxfilt |= RL_RXCFG_RX_ALLPHYS;
689 		/*
690 		 * Unlike other hardwares, we have to explicitly set
691 		 * RL_RXCFG_RX_MULTI to receive multicast frames in
692 		 * promiscuous mode.
693 		 */
694 		rxfilt |= RL_RXCFG_RX_MULTI;
695 		hashes[0] = hashes[1] = 0xffffffff;
696 		goto done;
697 	}
698 
699 	if_foreach_llmaddr(ifp, re_hash_maddr, hashes);
700 
701 	if (hashes[0] != 0 || hashes[1] != 0) {
702 		/*
703 		 * For some unfathomable reason, RealTek decided to
704 		 * reverse the order of the multicast hash registers
705 		 * in the PCI Express parts.  This means we have to
706 		 * write the hash pattern in reverse order for those
707 		 * devices.
708 		 */
709 		if ((sc->rl_flags & RL_FLAG_PCIE) != 0) {
710 			h = bswap32(hashes[0]);
711 			hashes[0] = bswap32(hashes[1]);
712 			hashes[1] = h;
713 		}
714 		rxfilt |= RL_RXCFG_RX_MULTI;
715 	}
716 
717 	if  (sc->rl_hwrev->rl_rev == RL_HWREV_8168F) {
718 		/* Disable multicast filtering due to silicon bug. */
719 		hashes[0] = 0xffffffff;
720 		hashes[1] = 0xffffffff;
721 	}
722 
723 done:
724 	CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
725 	CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
726 	CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
727 }
728 
729 static void
730 re_reset(struct rl_softc *sc)
731 {
732 	int			i;
733 
734 	RL_LOCK_ASSERT(sc);
735 
736 	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
737 
738 	for (i = 0; i < RL_TIMEOUT; i++) {
739 		DELAY(10);
740 		if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
741 			break;
742 	}
743 	if (i == RL_TIMEOUT)
744 		device_printf(sc->rl_dev, "reset never completed!\n");
745 
746 	if ((sc->rl_flags & RL_FLAG_MACRESET) != 0)
747 		CSR_WRITE_1(sc, 0x82, 1);
748 	if (sc->rl_hwrev->rl_rev == RL_HWREV_8169S)
749 		re_gmii_writereg(sc->rl_dev, 1, 0x0b, 0);
750 }
751 
752 #ifdef RE_DIAG
753 
754 /*
755  * The following routine is designed to test for a defect on some
756  * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64#
757  * lines connected to the bus, however for a 32-bit only card, they
758  * should be pulled high. The result of this defect is that the
759  * NIC will not work right if you plug it into a 64-bit slot: DMA
760  * operations will be done with 64-bit transfers, which will fail
761  * because the 64-bit data lines aren't connected.
762  *
763  * There's no way to work around this (short of talking a soldering
764  * iron to the board), however we can detect it. The method we use
765  * here is to put the NIC into digital loopback mode, set the receiver
766  * to promiscuous mode, and then try to send a frame. We then compare
767  * the frame data we sent to what was received. If the data matches,
768  * then the NIC is working correctly, otherwise we know the user has
769  * a defective NIC which has been mistakenly plugged into a 64-bit PCI
770  * slot. In the latter case, there's no way the NIC can work correctly,
771  * so we print out a message on the console and abort the device attach.
772  */
773 
774 static int
775 re_diag(struct rl_softc *sc)
776 {
777 	struct ifnet		*ifp = sc->rl_ifp;
778 	struct mbuf		*m0;
779 	struct ether_header	*eh;
780 	struct rl_desc		*cur_rx;
781 	u_int16_t		status;
782 	u_int32_t		rxstat;
783 	int			total_len, i, error = 0, phyaddr;
784 	u_int8_t		dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' };
785 	u_int8_t		src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' };
786 
787 	/* Allocate a single mbuf */
788 	MGETHDR(m0, M_NOWAIT, MT_DATA);
789 	if (m0 == NULL)
790 		return (ENOBUFS);
791 
792 	RL_LOCK(sc);
793 
794 	/*
795 	 * Initialize the NIC in test mode. This sets the chip up
796 	 * so that it can send and receive frames, but performs the
797 	 * following special functions:
798 	 * - Puts receiver in promiscuous mode
799 	 * - Enables digital loopback mode
800 	 * - Leaves interrupts turned off
801 	 */
802 
803 	ifp->if_flags |= IFF_PROMISC;
804 	sc->rl_testmode = 1;
805 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
806 	re_init_locked(sc);
807 	sc->rl_flags |= RL_FLAG_LINK;
808 	if (sc->rl_type == RL_8169)
809 		phyaddr = 1;
810 	else
811 		phyaddr = 0;
812 
813 	re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_RESET);
814 	for (i = 0; i < RL_TIMEOUT; i++) {
815 		status = re_miibus_readreg(sc->rl_dev, phyaddr, MII_BMCR);
816 		if (!(status & BMCR_RESET))
817 			break;
818 	}
819 
820 	re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_LOOP);
821 	CSR_WRITE_2(sc, RL_ISR, RL_INTRS);
822 
823 	DELAY(100000);
824 
825 	/* Put some data in the mbuf */
826 
827 	eh = mtod(m0, struct ether_header *);
828 	bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN);
829 	bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN);
830 	eh->ether_type = htons(ETHERTYPE_IP);
831 	m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN;
832 
833 	/*
834 	 * Queue the packet, start transmission.
835 	 * Note: IF_HANDOFF() ultimately calls re_start() for us.
836 	 */
837 
838 	CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
839 	RL_UNLOCK(sc);
840 	/* XXX: re_diag must not be called when in ALTQ mode */
841 	IF_HANDOFF(&ifp->if_snd, m0, ifp);
842 	RL_LOCK(sc);
843 	m0 = NULL;
844 
845 	/* Wait for it to propagate through the chip */
846 
847 	DELAY(100000);
848 	for (i = 0; i < RL_TIMEOUT; i++) {
849 		status = CSR_READ_2(sc, RL_ISR);
850 		CSR_WRITE_2(sc, RL_ISR, status);
851 		if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) ==
852 		    (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK))
853 			break;
854 		DELAY(10);
855 	}
856 
857 	if (i == RL_TIMEOUT) {
858 		device_printf(sc->rl_dev,
859 		    "diagnostic failed, failed to receive packet in"
860 		    " loopback mode\n");
861 		error = EIO;
862 		goto done;
863 	}
864 
865 	/*
866 	 * The packet should have been dumped into the first
867 	 * entry in the RX DMA ring. Grab it from there.
868 	 */
869 
870 	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
871 	    sc->rl_ldata.rl_rx_list_map,
872 	    BUS_DMASYNC_POSTREAD);
873 	bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
874 	    sc->rl_ldata.rl_rx_desc[0].rx_dmamap,
875 	    BUS_DMASYNC_POSTREAD);
876 	bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag,
877 	    sc->rl_ldata.rl_rx_desc[0].rx_dmamap);
878 
879 	m0 = sc->rl_ldata.rl_rx_desc[0].rx_m;
880 	sc->rl_ldata.rl_rx_desc[0].rx_m = NULL;
881 	eh = mtod(m0, struct ether_header *);
882 
883 	cur_rx = &sc->rl_ldata.rl_rx_list[0];
884 	total_len = RL_RXBYTES(cur_rx);
885 	rxstat = le32toh(cur_rx->rl_cmdstat);
886 
887 	if (total_len != ETHER_MIN_LEN) {
888 		device_printf(sc->rl_dev,
889 		    "diagnostic failed, received short packet\n");
890 		error = EIO;
891 		goto done;
892 	}
893 
894 	/* Test that the received packet data matches what we sent. */
895 
896 	if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) ||
897 	    bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) ||
898 	    ntohs(eh->ether_type) != ETHERTYPE_IP) {
899 		device_printf(sc->rl_dev, "WARNING, DMA FAILURE!\n");
900 		device_printf(sc->rl_dev, "expected TX data: %6D/%6D/0x%x\n",
901 		    dst, ":", src, ":", ETHERTYPE_IP);
902 		device_printf(sc->rl_dev, "received RX data: %6D/%6D/0x%x\n",
903 		    eh->ether_dhost, ":", eh->ether_shost, ":",
904 		    ntohs(eh->ether_type));
905 		device_printf(sc->rl_dev, "You may have a defective 32-bit "
906 		    "NIC plugged into a 64-bit PCI slot.\n");
907 		device_printf(sc->rl_dev, "Please re-install the NIC in a "
908 		    "32-bit slot for proper operation.\n");
909 		device_printf(sc->rl_dev, "Read the re(4) man page for more "
910 		    "details.\n");
911 		error = EIO;
912 	}
913 
914 done:
915 	/* Turn interface off, release resources */
916 
917 	sc->rl_testmode = 0;
918 	sc->rl_flags &= ~RL_FLAG_LINK;
919 	ifp->if_flags &= ~IFF_PROMISC;
920 	re_stop(sc);
921 	if (m0 != NULL)
922 		m_freem(m0);
923 
924 	RL_UNLOCK(sc);
925 
926 	return (error);
927 }
928 
929 #endif
930 
931 /*
932  * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device
933  * IDs against our list and return a device name if we find a match.
934  */
935 static int
936 re_probe(device_t dev)
937 {
938 	const struct rl_type	*t;
939 	uint16_t		devid, vendor;
940 	uint16_t		revid, sdevid;
941 	int			i;
942 
943 	vendor = pci_get_vendor(dev);
944 	devid = pci_get_device(dev);
945 	revid = pci_get_revid(dev);
946 	sdevid = pci_get_subdevice(dev);
947 
948 	if (vendor == LINKSYS_VENDORID && devid == LINKSYS_DEVICEID_EG1032) {
949 		if (sdevid != LINKSYS_SUBDEVICE_EG1032_REV3) {
950 			/*
951 			 * Only attach to rev. 3 of the Linksys EG1032 adapter.
952 			 * Rev. 2 is supported by sk(4).
953 			 */
954 			return (ENXIO);
955 		}
956 	}
957 
958 	if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) {
959 		if (revid != 0x20) {
960 			/* 8139, let rl(4) take care of this device. */
961 			return (ENXIO);
962 		}
963 	}
964 
965 	t = re_devs;
966 	for (i = 0; i < nitems(re_devs); i++, t++) {
967 		if (vendor == t->rl_vid && devid == t->rl_did) {
968 			device_set_desc(dev, t->rl_name);
969 			return (BUS_PROBE_DEFAULT);
970 		}
971 	}
972 
973 	return (ENXIO);
974 }
975 
976 /*
977  * Map a single buffer address.
978  */
979 
980 static void
981 re_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
982 {
983 	bus_addr_t		*addr;
984 
985 	if (error)
986 		return;
987 
988 	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
989 	addr = arg;
990 	*addr = segs->ds_addr;
991 }
992 
993 static int
994 re_allocmem(device_t dev, struct rl_softc *sc)
995 {
996 	bus_addr_t		lowaddr;
997 	bus_size_t		rx_list_size, tx_list_size;
998 	int			error;
999 	int			i;
1000 
1001 	rx_list_size = sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc);
1002 	tx_list_size = sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc);
1003 
1004 	/*
1005 	 * Allocate the parent bus DMA tag appropriate for PCI.
1006 	 * In order to use DAC, RL_CPLUSCMD_PCI_DAC bit of RL_CPLUS_CMD
1007 	 * register should be set. However some RealTek chips are known
1008 	 * to be buggy on DAC handling, therefore disable DAC by limiting
1009 	 * DMA address space to 32bit. PCIe variants of RealTek chips
1010 	 * may not have the limitation.
1011 	 */
1012 	lowaddr = BUS_SPACE_MAXADDR;
1013 	if ((sc->rl_flags & RL_FLAG_PCIE) == 0)
1014 		lowaddr = BUS_SPACE_MAXADDR_32BIT;
1015 	error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
1016 	    lowaddr, BUS_SPACE_MAXADDR, NULL, NULL,
1017 	    BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
1018 	    NULL, NULL, &sc->rl_parent_tag);
1019 	if (error) {
1020 		device_printf(dev, "could not allocate parent DMA tag\n");
1021 		return (error);
1022 	}
1023 
1024 	/*
1025 	 * Allocate map for TX mbufs.
1026 	 */
1027 	error = bus_dma_tag_create(sc->rl_parent_tag, 1, 0,
1028 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1029 	    NULL, MCLBYTES * RL_NTXSEGS, RL_NTXSEGS, 4096, 0,
1030 	    NULL, NULL, &sc->rl_ldata.rl_tx_mtag);
1031 	if (error) {
1032 		device_printf(dev, "could not allocate TX DMA tag\n");
1033 		return (error);
1034 	}
1035 
1036 	/*
1037 	 * Allocate map for RX mbufs.
1038 	 */
1039 
1040 	if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
1041 		error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t),
1042 		    0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1043 		    MJUM9BYTES, 1, MJUM9BYTES, 0, NULL, NULL,
1044 		    &sc->rl_ldata.rl_jrx_mtag);
1045 		if (error) {
1046 			device_printf(dev,
1047 			    "could not allocate jumbo RX DMA tag\n");
1048 			return (error);
1049 		}
1050 	}
1051 	error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 0,
1052 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1053 	    MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rl_ldata.rl_rx_mtag);
1054 	if (error) {
1055 		device_printf(dev, "could not allocate RX DMA tag\n");
1056 		return (error);
1057 	}
1058 
1059 	/*
1060 	 * Allocate map for TX descriptor list.
1061 	 */
1062 	error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
1063 	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1064 	    NULL, tx_list_size, 1, tx_list_size, 0,
1065 	    NULL, NULL, &sc->rl_ldata.rl_tx_list_tag);
1066 	if (error) {
1067 		device_printf(dev, "could not allocate TX DMA ring tag\n");
1068 		return (error);
1069 	}
1070 
1071 	/* Allocate DMA'able memory for the TX ring */
1072 
1073 	error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag,
1074 	    (void **)&sc->rl_ldata.rl_tx_list,
1075 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1076 	    &sc->rl_ldata.rl_tx_list_map);
1077 	if (error) {
1078 		device_printf(dev, "could not allocate TX DMA ring\n");
1079 		return (error);
1080 	}
1081 
1082 	/* Load the map for the TX ring. */
1083 
1084 	sc->rl_ldata.rl_tx_list_addr = 0;
1085 	error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag,
1086 	     sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list,
1087 	     tx_list_size, re_dma_map_addr,
1088 	     &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT);
1089 	if (error != 0 || sc->rl_ldata.rl_tx_list_addr == 0) {
1090 		device_printf(dev, "could not load TX DMA ring\n");
1091 		return (ENOMEM);
1092 	}
1093 
1094 	/* Create DMA maps for TX buffers */
1095 
1096 	for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
1097 		error = bus_dmamap_create(sc->rl_ldata.rl_tx_mtag, 0,
1098 		    &sc->rl_ldata.rl_tx_desc[i].tx_dmamap);
1099 		if (error) {
1100 			device_printf(dev, "could not create DMA map for TX\n");
1101 			return (error);
1102 		}
1103 	}
1104 
1105 	/*
1106 	 * Allocate map for RX descriptor list.
1107 	 */
1108 	error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
1109 	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1110 	    NULL, rx_list_size, 1, rx_list_size, 0,
1111 	    NULL, NULL, &sc->rl_ldata.rl_rx_list_tag);
1112 	if (error) {
1113 		device_printf(dev, "could not create RX DMA ring tag\n");
1114 		return (error);
1115 	}
1116 
1117 	/* Allocate DMA'able memory for the RX ring */
1118 
1119 	error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag,
1120 	    (void **)&sc->rl_ldata.rl_rx_list,
1121 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1122 	    &sc->rl_ldata.rl_rx_list_map);
1123 	if (error) {
1124 		device_printf(dev, "could not allocate RX DMA ring\n");
1125 		return (error);
1126 	}
1127 
1128 	/* Load the map for the RX ring. */
1129 
1130 	sc->rl_ldata.rl_rx_list_addr = 0;
1131 	error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag,
1132 	     sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list,
1133 	     rx_list_size, re_dma_map_addr,
1134 	     &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT);
1135 	if (error != 0 || sc->rl_ldata.rl_rx_list_addr == 0) {
1136 		device_printf(dev, "could not load RX DMA ring\n");
1137 		return (ENOMEM);
1138 	}
1139 
1140 	/* Create DMA maps for RX buffers */
1141 
1142 	if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
1143 		error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0,
1144 		    &sc->rl_ldata.rl_jrx_sparemap);
1145 		if (error) {
1146 			device_printf(dev,
1147 			    "could not create spare DMA map for jumbo RX\n");
1148 			return (error);
1149 		}
1150 		for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1151 			error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0,
1152 			    &sc->rl_ldata.rl_jrx_desc[i].rx_dmamap);
1153 			if (error) {
1154 				device_printf(dev,
1155 				    "could not create DMA map for jumbo RX\n");
1156 				return (error);
1157 			}
1158 		}
1159 	}
1160 	error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0,
1161 	    &sc->rl_ldata.rl_rx_sparemap);
1162 	if (error) {
1163 		device_printf(dev, "could not create spare DMA map for RX\n");
1164 		return (error);
1165 	}
1166 	for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1167 		error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0,
1168 		    &sc->rl_ldata.rl_rx_desc[i].rx_dmamap);
1169 		if (error) {
1170 			device_printf(dev, "could not create DMA map for RX\n");
1171 			return (error);
1172 		}
1173 	}
1174 
1175 	/* Create DMA map for statistics. */
1176 	error = bus_dma_tag_create(sc->rl_parent_tag, RL_DUMP_ALIGN, 0,
1177 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1178 	    sizeof(struct rl_stats), 1, sizeof(struct rl_stats), 0, NULL, NULL,
1179 	    &sc->rl_ldata.rl_stag);
1180 	if (error) {
1181 		device_printf(dev, "could not create statistics DMA tag\n");
1182 		return (error);
1183 	}
1184 	/* Allocate DMA'able memory for statistics. */
1185 	error = bus_dmamem_alloc(sc->rl_ldata.rl_stag,
1186 	    (void **)&sc->rl_ldata.rl_stats,
1187 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1188 	    &sc->rl_ldata.rl_smap);
1189 	if (error) {
1190 		device_printf(dev,
1191 		    "could not allocate statistics DMA memory\n");
1192 		return (error);
1193 	}
1194 	/* Load the map for statistics. */
1195 	sc->rl_ldata.rl_stats_addr = 0;
1196 	error = bus_dmamap_load(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap,
1197 	    sc->rl_ldata.rl_stats, sizeof(struct rl_stats), re_dma_map_addr,
1198 	     &sc->rl_ldata.rl_stats_addr, BUS_DMA_NOWAIT);
1199 	if (error != 0 || sc->rl_ldata.rl_stats_addr == 0) {
1200 		device_printf(dev, "could not load statistics DMA memory\n");
1201 		return (ENOMEM);
1202 	}
1203 
1204 	return (0);
1205 }
1206 
1207 /*
1208  * Attach the interface. Allocate softc structures, do ifmedia
1209  * setup and ethernet/BPF attach.
1210  */
1211 static int
1212 re_attach(device_t dev)
1213 {
1214 	u_char			eaddr[ETHER_ADDR_LEN];
1215 	u_int16_t		as[ETHER_ADDR_LEN / 2];
1216 	struct rl_softc		*sc;
1217 	struct ifnet		*ifp;
1218 	const struct rl_hwrev	*hw_rev;
1219 	int			capmask, error = 0, hwrev, i, msic, msixc,
1220 				phy, reg, rid;
1221 	u_int32_t		cap, ctl;
1222 	u_int16_t		devid, re_did = 0;
1223 	uint8_t			cfg;
1224 
1225 	sc = device_get_softc(dev);
1226 	sc->rl_dev = dev;
1227 
1228 	mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1229 	    MTX_DEF);
1230 	callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0);
1231 
1232 	/*
1233 	 * Map control/status registers.
1234 	 */
1235 	pci_enable_busmaster(dev);
1236 
1237 	devid = pci_get_device(dev);
1238 	/*
1239 	 * Prefer memory space register mapping over IO space.
1240 	 * Because RTL8169SC does not seem to work when memory mapping
1241 	 * is used always activate io mapping.
1242 	 */
1243 	if (devid == RT_DEVICEID_8169SC)
1244 		prefer_iomap = 1;
1245 	if (prefer_iomap == 0) {
1246 		sc->rl_res_id = PCIR_BAR(1);
1247 		sc->rl_res_type = SYS_RES_MEMORY;
1248 		/* RTL8168/8101E seems to use different BARs. */
1249 		if (devid == RT_DEVICEID_8168 || devid == RT_DEVICEID_8101E)
1250 			sc->rl_res_id = PCIR_BAR(2);
1251 	} else {
1252 		sc->rl_res_id = PCIR_BAR(0);
1253 		sc->rl_res_type = SYS_RES_IOPORT;
1254 	}
1255 	sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
1256 	    &sc->rl_res_id, RF_ACTIVE);
1257 	if (sc->rl_res == NULL && prefer_iomap == 0) {
1258 		sc->rl_res_id = PCIR_BAR(0);
1259 		sc->rl_res_type = SYS_RES_IOPORT;
1260 		sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
1261 		    &sc->rl_res_id, RF_ACTIVE);
1262 	}
1263 	if (sc->rl_res == NULL) {
1264 		device_printf(dev, "couldn't map ports/memory\n");
1265 		error = ENXIO;
1266 		goto fail;
1267 	}
1268 
1269 	sc->rl_btag = rman_get_bustag(sc->rl_res);
1270 	sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
1271 
1272 	msic = pci_msi_count(dev);
1273 	msixc = pci_msix_count(dev);
1274 	if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
1275 		sc->rl_flags |= RL_FLAG_PCIE;
1276 		sc->rl_expcap = reg;
1277 	}
1278 	if (bootverbose) {
1279 		device_printf(dev, "MSI count : %d\n", msic);
1280 		device_printf(dev, "MSI-X count : %d\n", msixc);
1281 	}
1282 	if (msix_disable > 0)
1283 		msixc = 0;
1284 	if (msi_disable > 0)
1285 		msic = 0;
1286 	/* Prefer MSI-X to MSI. */
1287 	if (msixc > 0) {
1288 		msixc = RL_MSI_MESSAGES;
1289 		rid = PCIR_BAR(4);
1290 		sc->rl_res_pba = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1291 		    &rid, RF_ACTIVE);
1292 		if (sc->rl_res_pba == NULL) {
1293 			device_printf(sc->rl_dev,
1294 			    "could not allocate MSI-X PBA resource\n");
1295 		}
1296 		if (sc->rl_res_pba != NULL &&
1297 		    pci_alloc_msix(dev, &msixc) == 0) {
1298 			if (msixc == RL_MSI_MESSAGES) {
1299 				device_printf(dev, "Using %d MSI-X message\n",
1300 				    msixc);
1301 				sc->rl_flags |= RL_FLAG_MSIX;
1302 			} else
1303 				pci_release_msi(dev);
1304 		}
1305 		if ((sc->rl_flags & RL_FLAG_MSIX) == 0) {
1306 			if (sc->rl_res_pba != NULL)
1307 				bus_release_resource(dev, SYS_RES_MEMORY, rid,
1308 				    sc->rl_res_pba);
1309 			sc->rl_res_pba = NULL;
1310 			msixc = 0;
1311 		}
1312 	}
1313 	/* Prefer MSI to INTx. */
1314 	if (msixc == 0 && msic > 0) {
1315 		msic = RL_MSI_MESSAGES;
1316 		if (pci_alloc_msi(dev, &msic) == 0) {
1317 			if (msic == RL_MSI_MESSAGES) {
1318 				device_printf(dev, "Using %d MSI message\n",
1319 				    msic);
1320 				sc->rl_flags |= RL_FLAG_MSI;
1321 				/* Explicitly set MSI enable bit. */
1322 				CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
1323 				cfg = CSR_READ_1(sc, RL_CFG2);
1324 				cfg |= RL_CFG2_MSI;
1325 				CSR_WRITE_1(sc, RL_CFG2, cfg);
1326 				CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1327 			} else
1328 				pci_release_msi(dev);
1329 		}
1330 		if ((sc->rl_flags & RL_FLAG_MSI) == 0)
1331 			msic = 0;
1332 	}
1333 
1334 	/* Allocate interrupt */
1335 	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) {
1336 		rid = 0;
1337 		sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1338 		    RF_SHAREABLE | RF_ACTIVE);
1339 		if (sc->rl_irq[0] == NULL) {
1340 			device_printf(dev, "couldn't allocate IRQ resources\n");
1341 			error = ENXIO;
1342 			goto fail;
1343 		}
1344 	} else {
1345 		for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) {
1346 			sc->rl_irq[i] = bus_alloc_resource_any(dev,
1347 			    SYS_RES_IRQ, &rid, RF_ACTIVE);
1348 			if (sc->rl_irq[i] == NULL) {
1349 				device_printf(dev,
1350 				    "couldn't allocate IRQ resources for "
1351 				    "message %d\n", rid);
1352 				error = ENXIO;
1353 				goto fail;
1354 			}
1355 		}
1356 	}
1357 
1358 	if ((sc->rl_flags & RL_FLAG_MSI) == 0) {
1359 		CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
1360 		cfg = CSR_READ_1(sc, RL_CFG2);
1361 		if ((cfg & RL_CFG2_MSI) != 0) {
1362 			device_printf(dev, "turning off MSI enable bit.\n");
1363 			cfg &= ~RL_CFG2_MSI;
1364 			CSR_WRITE_1(sc, RL_CFG2, cfg);
1365 		}
1366 		CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1367 	}
1368 
1369 	/* Disable ASPM L0S/L1 and CLKREQ. */
1370 	if (sc->rl_expcap != 0) {
1371 		cap = pci_read_config(dev, sc->rl_expcap +
1372 		    PCIER_LINK_CAP, 2);
1373 		if ((cap & PCIEM_LINK_CAP_ASPM) != 0) {
1374 			ctl = pci_read_config(dev, sc->rl_expcap +
1375 			    PCIER_LINK_CTL, 2);
1376 			if ((ctl & (PCIEM_LINK_CTL_ECPM |
1377 			    PCIEM_LINK_CTL_ASPMC))!= 0) {
1378 				ctl &= ~(PCIEM_LINK_CTL_ECPM |
1379 				    PCIEM_LINK_CTL_ASPMC);
1380 				pci_write_config(dev, sc->rl_expcap +
1381 				    PCIER_LINK_CTL, ctl, 2);
1382 				device_printf(dev, "ASPM disabled\n");
1383 			}
1384 		} else
1385 			device_printf(dev, "no ASPM capability\n");
1386 	}
1387 
1388 	hw_rev = re_hwrevs;
1389 	hwrev = CSR_READ_4(sc, RL_TXCFG);
1390 	switch (hwrev & 0x70000000) {
1391 	case 0x00000000:
1392 	case 0x10000000:
1393 		device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0xfc800000);
1394 		hwrev &= (RL_TXCFG_HWREV | 0x80000000);
1395 		break;
1396 	default:
1397 		device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0x7c800000);
1398 		sc->rl_macrev = hwrev & 0x00700000;
1399 		hwrev &= RL_TXCFG_HWREV;
1400 		break;
1401 	}
1402 	device_printf(dev, "MAC rev. 0x%08x\n", sc->rl_macrev);
1403 	while (hw_rev->rl_desc != NULL) {
1404 		if (hw_rev->rl_rev == hwrev) {
1405 			sc->rl_type = hw_rev->rl_type;
1406 			sc->rl_hwrev = hw_rev;
1407 			break;
1408 		}
1409 		hw_rev++;
1410 	}
1411 	if (hw_rev->rl_desc == NULL) {
1412 		device_printf(dev, "Unknown H/W revision: 0x%08x\n", hwrev);
1413 		error = ENXIO;
1414 		goto fail;
1415 	}
1416 
1417 	switch (hw_rev->rl_rev) {
1418 	case RL_HWREV_8139CPLUS:
1419 		sc->rl_flags |= RL_FLAG_FASTETHER | RL_FLAG_AUTOPAD;
1420 		break;
1421 	case RL_HWREV_8100E:
1422 	case RL_HWREV_8101E:
1423 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_FASTETHER;
1424 		break;
1425 	case RL_HWREV_8102E:
1426 	case RL_HWREV_8102EL:
1427 	case RL_HWREV_8102EL_SPIN1:
1428 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 |
1429 		    RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP |
1430 		    RL_FLAG_AUTOPAD;
1431 		break;
1432 	case RL_HWREV_8103E:
1433 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 |
1434 		    RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP |
1435 		    RL_FLAG_AUTOPAD | RL_FLAG_MACSLEEP;
1436 		break;
1437 	case RL_HWREV_8401E:
1438 	case RL_HWREV_8105E:
1439 	case RL_HWREV_8105E_SPIN1:
1440 	case RL_HWREV_8106E:
1441 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1442 		    RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1443 		    RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD;
1444 		break;
1445 	case RL_HWREV_8402:
1446 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1447 		    RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1448 		    RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD |
1449 		    RL_FLAG_CMDSTOP_WAIT_TXQ;
1450 		break;
1451 	case RL_HWREV_8168B_SPIN1:
1452 	case RL_HWREV_8168B_SPIN2:
1453 		sc->rl_flags |= RL_FLAG_WOLRXENB;
1454 		/* FALLTHROUGH */
1455 	case RL_HWREV_8168B_SPIN3:
1456 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT;
1457 		break;
1458 	case RL_HWREV_8168C_SPIN2:
1459 		sc->rl_flags |= RL_FLAG_MACSLEEP;
1460 		/* FALLTHROUGH */
1461 	case RL_HWREV_8168C:
1462 		if (sc->rl_macrev == 0x00200000)
1463 			sc->rl_flags |= RL_FLAG_MACSLEEP;
1464 		/* FALLTHROUGH */
1465 	case RL_HWREV_8168CP:
1466 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1467 		    RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1468 		    RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK;
1469 		break;
1470 	case RL_HWREV_8168D:
1471 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1472 		    RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1473 		    RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
1474 		    RL_FLAG_WOL_MANLINK;
1475 		break;
1476 	case RL_HWREV_8168DP:
1477 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1478 		    RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_AUTOPAD |
1479 		    RL_FLAG_JUMBOV2 | RL_FLAG_WAIT_TXPOLL | RL_FLAG_WOL_MANLINK;
1480 		break;
1481 	case RL_HWREV_8168E:
1482 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1483 		    RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1484 		    RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
1485 		    RL_FLAG_WOL_MANLINK;
1486 		break;
1487 	case RL_HWREV_8168E_VL:
1488 	case RL_HWREV_8168F:
1489 		sc->rl_flags |= RL_FLAG_EARLYOFF;
1490 		/* FALLTHROUGH */
1491 	case RL_HWREV_8411:
1492 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1493 		    RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1494 		    RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
1495 		    RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_WOL_MANLINK;
1496 		break;
1497 	case RL_HWREV_8168EP:
1498 	case RL_HWREV_8168G:
1499 	case RL_HWREV_8411B:
1500 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1501 		    RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1502 		    RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
1503 		    RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_WOL_MANLINK |
1504 		    RL_FLAG_8168G_PLUS;
1505 		break;
1506 	case RL_HWREV_8168GU:
1507 	case RL_HWREV_8168H:
1508 		if (pci_get_device(dev) == RT_DEVICEID_8101E) {
1509 			/* RTL8106E(US), RTL8107E */
1510 			sc->rl_flags |= RL_FLAG_FASTETHER;
1511 		} else
1512 			sc->rl_flags |= RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK;
1513 
1514 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1515 		    RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1516 		    RL_FLAG_AUTOPAD | RL_FLAG_CMDSTOP_WAIT_TXQ |
1517 		    RL_FLAG_8168G_PLUS;
1518 		break;
1519 	case RL_HWREV_8169_8110SB:
1520 	case RL_HWREV_8169_8110SBL:
1521 	case RL_HWREV_8169_8110SC:
1522 	case RL_HWREV_8169_8110SCE:
1523 		sc->rl_flags |= RL_FLAG_PHYWAKE;
1524 		/* FALLTHROUGH */
1525 	case RL_HWREV_8169:
1526 	case RL_HWREV_8169S:
1527 	case RL_HWREV_8110S:
1528 		sc->rl_flags |= RL_FLAG_MACRESET;
1529 		break;
1530 	default:
1531 		break;
1532 	}
1533 
1534 	if (sc->rl_hwrev->rl_rev == RL_HWREV_8139CPLUS) {
1535 		sc->rl_cfg0 = RL_8139_CFG0;
1536 		sc->rl_cfg1 = RL_8139_CFG1;
1537 		sc->rl_cfg2 = 0;
1538 		sc->rl_cfg3 = RL_8139_CFG3;
1539 		sc->rl_cfg4 = RL_8139_CFG4;
1540 		sc->rl_cfg5 = RL_8139_CFG5;
1541 	} else {
1542 		sc->rl_cfg0 = RL_CFG0;
1543 		sc->rl_cfg1 = RL_CFG1;
1544 		sc->rl_cfg2 = RL_CFG2;
1545 		sc->rl_cfg3 = RL_CFG3;
1546 		sc->rl_cfg4 = RL_CFG4;
1547 		sc->rl_cfg5 = RL_CFG5;
1548 	}
1549 
1550 	/* Reset the adapter. */
1551 	RL_LOCK(sc);
1552 	re_reset(sc);
1553 	RL_UNLOCK(sc);
1554 
1555 	/* Enable PME. */
1556 	CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
1557 	cfg = CSR_READ_1(sc, sc->rl_cfg1);
1558 	cfg |= RL_CFG1_PME;
1559 	CSR_WRITE_1(sc, sc->rl_cfg1, cfg);
1560 	cfg = CSR_READ_1(sc, sc->rl_cfg5);
1561 	cfg &= RL_CFG5_PME_STS;
1562 	CSR_WRITE_1(sc, sc->rl_cfg5, cfg);
1563 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1564 
1565 	if ((sc->rl_flags & RL_FLAG_PAR) != 0) {
1566 		/*
1567 		 * XXX Should have a better way to extract station
1568 		 * address from EEPROM.
1569 		 */
1570 		for (i = 0; i < ETHER_ADDR_LEN; i++)
1571 			eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i);
1572 	} else {
1573 		sc->rl_eewidth = RL_9356_ADDR_LEN;
1574 		re_read_eeprom(sc, (caddr_t)&re_did, 0, 1);
1575 		if (re_did != 0x8129)
1576 			sc->rl_eewidth = RL_9346_ADDR_LEN;
1577 
1578 		/*
1579 		 * Get station address from the EEPROM.
1580 		 */
1581 		re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3);
1582 		for (i = 0; i < ETHER_ADDR_LEN / 2; i++)
1583 			as[i] = le16toh(as[i]);
1584 		bcopy(as, eaddr, ETHER_ADDR_LEN);
1585 	}
1586 
1587 	if (sc->rl_type == RL_8169) {
1588 		/* Set RX length mask and number of descriptors. */
1589 		sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN;
1590 		sc->rl_txstart = RL_GTXSTART;
1591 		sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT;
1592 		sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT;
1593 	} else {
1594 		/* Set RX length mask and number of descriptors. */
1595 		sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN;
1596 		sc->rl_txstart = RL_TXSTART;
1597 		sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT;
1598 		sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT;
1599 	}
1600 
1601 	error = re_allocmem(dev, sc);
1602 	if (error)
1603 		goto fail;
1604 	re_add_sysctls(sc);
1605 
1606 	ifp = sc->rl_ifp = if_alloc(IFT_ETHER);
1607 	if (ifp == NULL) {
1608 		device_printf(dev, "can not if_alloc()\n");
1609 		error = ENOSPC;
1610 		goto fail;
1611 	}
1612 
1613 	/* Take controller out of deep sleep mode. */
1614 	if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
1615 		if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
1616 			CSR_WRITE_1(sc, RL_GPIO,
1617 			    CSR_READ_1(sc, RL_GPIO) | 0x01);
1618 		else
1619 			CSR_WRITE_1(sc, RL_GPIO,
1620 			    CSR_READ_1(sc, RL_GPIO) & ~0x01);
1621 	}
1622 
1623 	/* Take PHY out of power down mode. */
1624 	if ((sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0) {
1625 		CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80);
1626 		if (hw_rev->rl_rev == RL_HWREV_8401E)
1627 			CSR_WRITE_1(sc, 0xD1, CSR_READ_1(sc, 0xD1) & ~0x08);
1628 	}
1629 	if ((sc->rl_flags & RL_FLAG_PHYWAKE) != 0) {
1630 		re_gmii_writereg(dev, 1, 0x1f, 0);
1631 		re_gmii_writereg(dev, 1, 0x0e, 0);
1632 	}
1633 
1634 	ifp->if_softc = sc;
1635 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1636 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1637 	ifp->if_ioctl = re_ioctl;
1638 	ifp->if_start = re_start;
1639 	/*
1640 	 * RTL8168/8111C generates wrong IP checksummed frame if the
1641 	 * packet has IP options so disable TX checksum offloading.
1642 	 */
1643 	if (sc->rl_hwrev->rl_rev == RL_HWREV_8168C ||
1644 	    sc->rl_hwrev->rl_rev == RL_HWREV_8168C_SPIN2 ||
1645 	    sc->rl_hwrev->rl_rev == RL_HWREV_8168CP) {
1646 		ifp->if_hwassist = 0;
1647 		ifp->if_capabilities = IFCAP_RXCSUM | IFCAP_TSO4;
1648 	} else {
1649 		ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP;
1650 		ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4;
1651 	}
1652 	ifp->if_hwassist |= CSUM_TSO;
1653 	ifp->if_capenable = ifp->if_capabilities;
1654 	ifp->if_init = re_init;
1655 	IFQ_SET_MAXLEN(&ifp->if_snd, RL_IFQ_MAXLEN);
1656 	ifp->if_snd.ifq_drv_maxlen = RL_IFQ_MAXLEN;
1657 	IFQ_SET_READY(&ifp->if_snd);
1658 
1659 	NET_TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc);
1660 
1661 #define	RE_PHYAD_INTERNAL	 0
1662 
1663 	/* Do MII setup. */
1664 	phy = RE_PHYAD_INTERNAL;
1665 	if (sc->rl_type == RL_8169)
1666 		phy = 1;
1667 	capmask = BMSR_DEFCAPMASK;
1668 	if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0)
1669 		 capmask &= ~BMSR_EXTSTAT;
1670 	error = mii_attach(dev, &sc->rl_miibus, ifp, re_ifmedia_upd,
1671 	    re_ifmedia_sts, capmask, phy, MII_OFFSET_ANY, MIIF_DOPAUSE);
1672 	if (error != 0) {
1673 		device_printf(dev, "attaching PHYs failed\n");
1674 		goto fail;
1675 	}
1676 
1677 	/*
1678 	 * Call MI attach routine.
1679 	 */
1680 	ether_ifattach(ifp, eaddr);
1681 
1682 	/* VLAN capability setup */
1683 	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
1684 	if (ifp->if_capabilities & IFCAP_HWCSUM)
1685 		ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
1686 	/* Enable WOL if PM is supported. */
1687 	if (pci_find_cap(sc->rl_dev, PCIY_PMG, &reg) == 0)
1688 		ifp->if_capabilities |= IFCAP_WOL;
1689 	ifp->if_capenable = ifp->if_capabilities;
1690 	ifp->if_capenable &= ~(IFCAP_WOL_UCAST | IFCAP_WOL_MCAST);
1691 	/*
1692 	 * Don't enable TSO by default.  It is known to generate
1693 	 * corrupted TCP segments(bad TCP options) under certain
1694 	 * circumstances.
1695 	 */
1696 	ifp->if_hwassist &= ~CSUM_TSO;
1697 	ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_VLAN_HWTSO);
1698 #ifdef DEVICE_POLLING
1699 	ifp->if_capabilities |= IFCAP_POLLING;
1700 #endif
1701 	/*
1702 	 * Tell the upper layer(s) we support long frames.
1703 	 * Must appear after the call to ether_ifattach() because
1704 	 * ether_ifattach() sets ifi_hdrlen to the default value.
1705 	 */
1706 	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
1707 
1708 #ifdef DEV_NETMAP
1709 	re_netmap_attach(sc);
1710 #endif /* DEV_NETMAP */
1711 
1712 #ifdef RE_DIAG
1713 	/*
1714 	 * Perform hardware diagnostic on the original RTL8169.
1715 	 * Some 32-bit cards were incorrectly wired and would
1716 	 * malfunction if plugged into a 64-bit slot.
1717 	 */
1718 	if (hwrev == RL_HWREV_8169) {
1719 		error = re_diag(sc);
1720 		if (error) {
1721 			device_printf(dev,
1722 		    	"attach aborted due to hardware diag failure\n");
1723 			ether_ifdetach(ifp);
1724 			goto fail;
1725 		}
1726 	}
1727 #endif
1728 
1729 #ifdef RE_TX_MODERATION
1730 	intr_filter = 1;
1731 #endif
1732 	/* Hook interrupt last to avoid having to lock softc */
1733 	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 &&
1734 	    intr_filter == 0) {
1735 		error = bus_setup_intr(dev, sc->rl_irq[0],
1736 		    INTR_TYPE_NET | INTR_MPSAFE, NULL, re_intr_msi, sc,
1737 		    &sc->rl_intrhand[0]);
1738 	} else {
1739 		error = bus_setup_intr(dev, sc->rl_irq[0],
1740 		    INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc,
1741 		    &sc->rl_intrhand[0]);
1742 	}
1743 	if (error) {
1744 		device_printf(dev, "couldn't set up irq\n");
1745 		ether_ifdetach(ifp);
1746 		goto fail;
1747 	}
1748 
1749 	DEBUGNET_SET(ifp, re);
1750 
1751 fail:
1752 	if (error)
1753 		re_detach(dev);
1754 
1755 	return (error);
1756 }
1757 
1758 /*
1759  * Shutdown hardware and free up resources. This can be called any
1760  * time after the mutex has been initialized. It is called in both
1761  * the error case in attach and the normal detach case so it needs
1762  * to be careful about only freeing resources that have actually been
1763  * allocated.
1764  */
1765 static int
1766 re_detach(device_t dev)
1767 {
1768 	struct rl_softc		*sc;
1769 	struct ifnet		*ifp;
1770 	int			i, rid;
1771 
1772 	sc = device_get_softc(dev);
1773 	ifp = sc->rl_ifp;
1774 	KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized"));
1775 
1776 	/* These should only be active if attach succeeded */
1777 	if (device_is_attached(dev)) {
1778 #ifdef DEVICE_POLLING
1779 		if (ifp->if_capenable & IFCAP_POLLING)
1780 			ether_poll_deregister(ifp);
1781 #endif
1782 		RL_LOCK(sc);
1783 #if 0
1784 		sc->suspended = 1;
1785 #endif
1786 		re_stop(sc);
1787 		RL_UNLOCK(sc);
1788 		callout_drain(&sc->rl_stat_callout);
1789 		taskqueue_drain(taskqueue_fast, &sc->rl_inttask);
1790 		/*
1791 		 * Force off the IFF_UP flag here, in case someone
1792 		 * still had a BPF descriptor attached to this
1793 		 * interface. If they do, ether_ifdetach() will cause
1794 		 * the BPF code to try and clear the promisc mode
1795 		 * flag, which will bubble down to re_ioctl(),
1796 		 * which will try to call re_init() again. This will
1797 		 * turn the NIC back on and restart the MII ticker,
1798 		 * which will panic the system when the kernel tries
1799 		 * to invoke the re_tick() function that isn't there
1800 		 * anymore.
1801 		 */
1802 		ifp->if_flags &= ~IFF_UP;
1803 		ether_ifdetach(ifp);
1804 	}
1805 	if (sc->rl_miibus)
1806 		device_delete_child(dev, sc->rl_miibus);
1807 	bus_generic_detach(dev);
1808 
1809 	/*
1810 	 * The rest is resource deallocation, so we should already be
1811 	 * stopped here.
1812 	 */
1813 
1814 	if (sc->rl_intrhand[0] != NULL) {
1815 		bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]);
1816 		sc->rl_intrhand[0] = NULL;
1817 	}
1818 	if (ifp != NULL) {
1819 #ifdef DEV_NETMAP
1820 		netmap_detach(ifp);
1821 #endif /* DEV_NETMAP */
1822 		if_free(ifp);
1823 	}
1824 	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0)
1825 		rid = 0;
1826 	else
1827 		rid = 1;
1828 	if (sc->rl_irq[0] != NULL) {
1829 		bus_release_resource(dev, SYS_RES_IRQ, rid, sc->rl_irq[0]);
1830 		sc->rl_irq[0] = NULL;
1831 	}
1832 	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0)
1833 		pci_release_msi(dev);
1834 	if (sc->rl_res_pba) {
1835 		rid = PCIR_BAR(4);
1836 		bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->rl_res_pba);
1837 	}
1838 	if (sc->rl_res)
1839 		bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id,
1840 		    sc->rl_res);
1841 
1842 	/* Unload and free the RX DMA ring memory and map */
1843 
1844 	if (sc->rl_ldata.rl_rx_list_tag) {
1845 		if (sc->rl_ldata.rl_rx_list_addr)
1846 			bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag,
1847 			    sc->rl_ldata.rl_rx_list_map);
1848 		if (sc->rl_ldata.rl_rx_list)
1849 			bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag,
1850 			    sc->rl_ldata.rl_rx_list,
1851 			    sc->rl_ldata.rl_rx_list_map);
1852 		bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag);
1853 	}
1854 
1855 	/* Unload and free the TX DMA ring memory and map */
1856 
1857 	if (sc->rl_ldata.rl_tx_list_tag) {
1858 		if (sc->rl_ldata.rl_tx_list_addr)
1859 			bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag,
1860 			    sc->rl_ldata.rl_tx_list_map);
1861 		if (sc->rl_ldata.rl_tx_list)
1862 			bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag,
1863 			    sc->rl_ldata.rl_tx_list,
1864 			    sc->rl_ldata.rl_tx_list_map);
1865 		bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag);
1866 	}
1867 
1868 	/* Destroy all the RX and TX buffer maps */
1869 
1870 	if (sc->rl_ldata.rl_tx_mtag) {
1871 		for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
1872 			if (sc->rl_ldata.rl_tx_desc[i].tx_dmamap)
1873 				bus_dmamap_destroy(sc->rl_ldata.rl_tx_mtag,
1874 				    sc->rl_ldata.rl_tx_desc[i].tx_dmamap);
1875 		}
1876 		bus_dma_tag_destroy(sc->rl_ldata.rl_tx_mtag);
1877 	}
1878 	if (sc->rl_ldata.rl_rx_mtag) {
1879 		for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1880 			if (sc->rl_ldata.rl_rx_desc[i].rx_dmamap)
1881 				bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag,
1882 				    sc->rl_ldata.rl_rx_desc[i].rx_dmamap);
1883 		}
1884 		if (sc->rl_ldata.rl_rx_sparemap)
1885 			bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag,
1886 			    sc->rl_ldata.rl_rx_sparemap);
1887 		bus_dma_tag_destroy(sc->rl_ldata.rl_rx_mtag);
1888 	}
1889 	if (sc->rl_ldata.rl_jrx_mtag) {
1890 		for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1891 			if (sc->rl_ldata.rl_jrx_desc[i].rx_dmamap)
1892 				bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag,
1893 				    sc->rl_ldata.rl_jrx_desc[i].rx_dmamap);
1894 		}
1895 		if (sc->rl_ldata.rl_jrx_sparemap)
1896 			bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag,
1897 			    sc->rl_ldata.rl_jrx_sparemap);
1898 		bus_dma_tag_destroy(sc->rl_ldata.rl_jrx_mtag);
1899 	}
1900 	/* Unload and free the stats buffer and map */
1901 
1902 	if (sc->rl_ldata.rl_stag) {
1903 		if (sc->rl_ldata.rl_stats_addr)
1904 			bus_dmamap_unload(sc->rl_ldata.rl_stag,
1905 			    sc->rl_ldata.rl_smap);
1906 		if (sc->rl_ldata.rl_stats)
1907 			bus_dmamem_free(sc->rl_ldata.rl_stag,
1908 			    sc->rl_ldata.rl_stats, sc->rl_ldata.rl_smap);
1909 		bus_dma_tag_destroy(sc->rl_ldata.rl_stag);
1910 	}
1911 
1912 	if (sc->rl_parent_tag)
1913 		bus_dma_tag_destroy(sc->rl_parent_tag);
1914 
1915 	mtx_destroy(&sc->rl_mtx);
1916 
1917 	return (0);
1918 }
1919 
1920 static __inline void
1921 re_discard_rxbuf(struct rl_softc *sc, int idx)
1922 {
1923 	struct rl_desc		*desc;
1924 	struct rl_rxdesc	*rxd;
1925 	uint32_t		cmdstat;
1926 
1927 	if (sc->rl_ifp->if_mtu > RL_MTU &&
1928 	    (sc->rl_flags & RL_FLAG_JUMBOV2) != 0)
1929 		rxd = &sc->rl_ldata.rl_jrx_desc[idx];
1930 	else
1931 		rxd = &sc->rl_ldata.rl_rx_desc[idx];
1932 	desc = &sc->rl_ldata.rl_rx_list[idx];
1933 	desc->rl_vlanctl = 0;
1934 	cmdstat = rxd->rx_size;
1935 	if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
1936 		cmdstat |= RL_RDESC_CMD_EOR;
1937 	desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
1938 }
1939 
1940 static int
1941 re_newbuf(struct rl_softc *sc, int idx)
1942 {
1943 	struct mbuf		*m;
1944 	struct rl_rxdesc	*rxd;
1945 	bus_dma_segment_t	segs[1];
1946 	bus_dmamap_t		map;
1947 	struct rl_desc		*desc;
1948 	uint32_t		cmdstat;
1949 	int			error, nsegs;
1950 
1951 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1952 	if (m == NULL)
1953 		return (ENOBUFS);
1954 
1955 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1956 #ifdef RE_FIXUP_RX
1957 	/*
1958 	 * This is part of an evil trick to deal with non-x86 platforms.
1959 	 * The RealTek chip requires RX buffers to be aligned on 64-bit
1960 	 * boundaries, but that will hose non-x86 machines. To get around
1961 	 * this, we leave some empty space at the start of each buffer
1962 	 * and for non-x86 hosts, we copy the buffer back six bytes
1963 	 * to achieve word alignment. This is slightly more efficient
1964 	 * than allocating a new buffer, copying the contents, and
1965 	 * discarding the old buffer.
1966 	 */
1967 	m_adj(m, RE_ETHER_ALIGN);
1968 #endif
1969 	error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_rx_mtag,
1970 	    sc->rl_ldata.rl_rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
1971 	if (error != 0) {
1972 		m_freem(m);
1973 		return (ENOBUFS);
1974 	}
1975 	KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs));
1976 
1977 	rxd = &sc->rl_ldata.rl_rx_desc[idx];
1978 	if (rxd->rx_m != NULL) {
1979 		bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap,
1980 		    BUS_DMASYNC_POSTREAD);
1981 		bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap);
1982 	}
1983 
1984 	rxd->rx_m = m;
1985 	map = rxd->rx_dmamap;
1986 	rxd->rx_dmamap = sc->rl_ldata.rl_rx_sparemap;
1987 	rxd->rx_size = segs[0].ds_len;
1988 	sc->rl_ldata.rl_rx_sparemap = map;
1989 	bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap,
1990 	    BUS_DMASYNC_PREREAD);
1991 
1992 	desc = &sc->rl_ldata.rl_rx_list[idx];
1993 	desc->rl_vlanctl = 0;
1994 	desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr));
1995 	desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr));
1996 	cmdstat = segs[0].ds_len;
1997 	if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
1998 		cmdstat |= RL_RDESC_CMD_EOR;
1999 	desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
2000 
2001 	return (0);
2002 }
2003 
2004 static int
2005 re_jumbo_newbuf(struct rl_softc *sc, int idx)
2006 {
2007 	struct mbuf		*m;
2008 	struct rl_rxdesc	*rxd;
2009 	bus_dma_segment_t	segs[1];
2010 	bus_dmamap_t		map;
2011 	struct rl_desc		*desc;
2012 	uint32_t		cmdstat;
2013 	int			error, nsegs;
2014 
2015 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
2016 	if (m == NULL)
2017 		return (ENOBUFS);
2018 	m->m_len = m->m_pkthdr.len = MJUM9BYTES;
2019 #ifdef RE_FIXUP_RX
2020 	m_adj(m, RE_ETHER_ALIGN);
2021 #endif
2022 	error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_jrx_mtag,
2023 	    sc->rl_ldata.rl_jrx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
2024 	if (error != 0) {
2025 		m_freem(m);
2026 		return (ENOBUFS);
2027 	}
2028 	KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs));
2029 
2030 	rxd = &sc->rl_ldata.rl_jrx_desc[idx];
2031 	if (rxd->rx_m != NULL) {
2032 		bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap,
2033 		    BUS_DMASYNC_POSTREAD);
2034 		bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap);
2035 	}
2036 
2037 	rxd->rx_m = m;
2038 	map = rxd->rx_dmamap;
2039 	rxd->rx_dmamap = sc->rl_ldata.rl_jrx_sparemap;
2040 	rxd->rx_size = segs[0].ds_len;
2041 	sc->rl_ldata.rl_jrx_sparemap = map;
2042 	bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap,
2043 	    BUS_DMASYNC_PREREAD);
2044 
2045 	desc = &sc->rl_ldata.rl_rx_list[idx];
2046 	desc->rl_vlanctl = 0;
2047 	desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr));
2048 	desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr));
2049 	cmdstat = segs[0].ds_len;
2050 	if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
2051 		cmdstat |= RL_RDESC_CMD_EOR;
2052 	desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
2053 
2054 	return (0);
2055 }
2056 
2057 #ifdef RE_FIXUP_RX
2058 static __inline void
2059 re_fixup_rx(struct mbuf *m)
2060 {
2061 	int                     i;
2062 	uint16_t                *src, *dst;
2063 
2064 	src = mtod(m, uint16_t *);
2065 	dst = src - (RE_ETHER_ALIGN - ETHER_ALIGN) / sizeof *src;
2066 
2067 	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
2068 		*dst++ = *src++;
2069 
2070 	m->m_data -= RE_ETHER_ALIGN - ETHER_ALIGN;
2071 }
2072 #endif
2073 
2074 static int
2075 re_tx_list_init(struct rl_softc *sc)
2076 {
2077 	struct rl_desc		*desc;
2078 	int			i;
2079 
2080 	RL_LOCK_ASSERT(sc);
2081 
2082 	bzero(sc->rl_ldata.rl_tx_list,
2083 	    sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc));
2084 	for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++)
2085 		sc->rl_ldata.rl_tx_desc[i].tx_m = NULL;
2086 #ifdef DEV_NETMAP
2087 	re_netmap_tx_init(sc);
2088 #endif /* DEV_NETMAP */
2089 	/* Set EOR. */
2090 	desc = &sc->rl_ldata.rl_tx_list[sc->rl_ldata.rl_tx_desc_cnt - 1];
2091 	desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOR);
2092 
2093 	bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
2094 	    sc->rl_ldata.rl_tx_list_map,
2095 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2096 
2097 	sc->rl_ldata.rl_tx_prodidx = 0;
2098 	sc->rl_ldata.rl_tx_considx = 0;
2099 	sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt;
2100 
2101 	return (0);
2102 }
2103 
2104 static int
2105 re_rx_list_init(struct rl_softc *sc)
2106 {
2107 	int			error, i;
2108 
2109 	bzero(sc->rl_ldata.rl_rx_list,
2110 	    sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc));
2111 	for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
2112 		sc->rl_ldata.rl_rx_desc[i].rx_m = NULL;
2113 		if ((error = re_newbuf(sc, i)) != 0)
2114 			return (error);
2115 	}
2116 #ifdef DEV_NETMAP
2117 	re_netmap_rx_init(sc);
2118 #endif /* DEV_NETMAP */
2119 
2120 	/* Flush the RX descriptors */
2121 
2122 	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2123 	    sc->rl_ldata.rl_rx_list_map,
2124 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2125 
2126 	sc->rl_ldata.rl_rx_prodidx = 0;
2127 	sc->rl_head = sc->rl_tail = NULL;
2128 	sc->rl_int_rx_act = 0;
2129 
2130 	return (0);
2131 }
2132 
2133 static int
2134 re_jrx_list_init(struct rl_softc *sc)
2135 {
2136 	int			error, i;
2137 
2138 	bzero(sc->rl_ldata.rl_rx_list,
2139 	    sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc));
2140 	for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
2141 		sc->rl_ldata.rl_jrx_desc[i].rx_m = NULL;
2142 		if ((error = re_jumbo_newbuf(sc, i)) != 0)
2143 			return (error);
2144 	}
2145 
2146 	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2147 	    sc->rl_ldata.rl_rx_list_map,
2148 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2149 
2150 	sc->rl_ldata.rl_rx_prodidx = 0;
2151 	sc->rl_head = sc->rl_tail = NULL;
2152 	sc->rl_int_rx_act = 0;
2153 
2154 	return (0);
2155 }
2156 
2157 /*
2158  * RX handler for C+ and 8169. For the gigE chips, we support
2159  * the reception of jumbo frames that have been fragmented
2160  * across multiple 2K mbuf cluster buffers.
2161  */
2162 static int
2163 re_rxeof(struct rl_softc *sc, int *rx_npktsp)
2164 {
2165 	struct mbuf		*m;
2166 	struct ifnet		*ifp;
2167 	int			i, rxerr, total_len;
2168 	struct rl_desc		*cur_rx;
2169 	u_int32_t		rxstat, rxvlan;
2170 	int			jumbo, maxpkt = 16, rx_npkts = 0;
2171 
2172 	RL_LOCK_ASSERT(sc);
2173 
2174 	ifp = sc->rl_ifp;
2175 #ifdef DEV_NETMAP
2176 	if (netmap_rx_irq(ifp, 0, &rx_npkts))
2177 		return 0;
2178 #endif /* DEV_NETMAP */
2179 	if (ifp->if_mtu > RL_MTU && (sc->rl_flags & RL_FLAG_JUMBOV2) != 0)
2180 		jumbo = 1;
2181 	else
2182 		jumbo = 0;
2183 
2184 	/* Invalidate the descriptor memory */
2185 
2186 	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2187 	    sc->rl_ldata.rl_rx_list_map,
2188 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2189 
2190 	for (i = sc->rl_ldata.rl_rx_prodidx; maxpkt > 0;
2191 	    i = RL_RX_DESC_NXT(sc, i)) {
2192 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2193 			break;
2194 		cur_rx = &sc->rl_ldata.rl_rx_list[i];
2195 		rxstat = le32toh(cur_rx->rl_cmdstat);
2196 		if ((rxstat & RL_RDESC_STAT_OWN) != 0)
2197 			break;
2198 		total_len = rxstat & sc->rl_rxlenmask;
2199 		rxvlan = le32toh(cur_rx->rl_vlanctl);
2200 		if (jumbo != 0)
2201 			m = sc->rl_ldata.rl_jrx_desc[i].rx_m;
2202 		else
2203 			m = sc->rl_ldata.rl_rx_desc[i].rx_m;
2204 
2205 		if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
2206 		    (rxstat & (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) !=
2207 		    (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) {
2208 			/*
2209 			 * RTL8168C or later controllers do not
2210 			 * support multi-fragment packet.
2211 			 */
2212 			re_discard_rxbuf(sc, i);
2213 			continue;
2214 		} else if ((rxstat & RL_RDESC_STAT_EOF) == 0) {
2215 			if (re_newbuf(sc, i) != 0) {
2216 				/*
2217 				 * If this is part of a multi-fragment packet,
2218 				 * discard all the pieces.
2219 				 */
2220 				if (sc->rl_head != NULL) {
2221 					m_freem(sc->rl_head);
2222 					sc->rl_head = sc->rl_tail = NULL;
2223 				}
2224 				re_discard_rxbuf(sc, i);
2225 				continue;
2226 			}
2227 			m->m_len = RE_RX_DESC_BUFLEN;
2228 			if (sc->rl_head == NULL)
2229 				sc->rl_head = sc->rl_tail = m;
2230 			else {
2231 				m->m_flags &= ~M_PKTHDR;
2232 				sc->rl_tail->m_next = m;
2233 				sc->rl_tail = m;
2234 			}
2235 			continue;
2236 		}
2237 
2238 		/*
2239 		 * NOTE: for the 8139C+, the frame length field
2240 		 * is always 12 bits in size, but for the gigE chips,
2241 		 * it is 13 bits (since the max RX frame length is 16K).
2242 		 * Unfortunately, all 32 bits in the status word
2243 		 * were already used, so to make room for the extra
2244 		 * length bit, RealTek took out the 'frame alignment
2245 		 * error' bit and shifted the other status bits
2246 		 * over one slot. The OWN, EOR, FS and LS bits are
2247 		 * still in the same places. We have already extracted
2248 		 * the frame length and checked the OWN bit, so rather
2249 		 * than using an alternate bit mapping, we shift the
2250 		 * status bits one space to the right so we can evaluate
2251 		 * them using the 8169 status as though it was in the
2252 		 * same format as that of the 8139C+.
2253 		 */
2254 		if (sc->rl_type == RL_8169)
2255 			rxstat >>= 1;
2256 
2257 		/*
2258 		 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be
2259 		 * set, but if CRC is clear, it will still be a valid frame.
2260 		 */
2261 		if ((rxstat & RL_RDESC_STAT_RXERRSUM) != 0) {
2262 			rxerr = 1;
2263 			if ((sc->rl_flags & RL_FLAG_JUMBOV2) == 0 &&
2264 			    total_len > 8191 &&
2265 			    (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT)
2266 				rxerr = 0;
2267 			if (rxerr != 0) {
2268 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2269 				/*
2270 				 * If this is part of a multi-fragment packet,
2271 				 * discard all the pieces.
2272 				 */
2273 				if (sc->rl_head != NULL) {
2274 					m_freem(sc->rl_head);
2275 					sc->rl_head = sc->rl_tail = NULL;
2276 				}
2277 				re_discard_rxbuf(sc, i);
2278 				continue;
2279 			}
2280 		}
2281 
2282 		/*
2283 		 * If allocating a replacement mbuf fails,
2284 		 * reload the current one.
2285 		 */
2286 		if (jumbo != 0)
2287 			rxerr = re_jumbo_newbuf(sc, i);
2288 		else
2289 			rxerr = re_newbuf(sc, i);
2290 		if (rxerr != 0) {
2291 			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2292 			if (sc->rl_head != NULL) {
2293 				m_freem(sc->rl_head);
2294 				sc->rl_head = sc->rl_tail = NULL;
2295 			}
2296 			re_discard_rxbuf(sc, i);
2297 			continue;
2298 		}
2299 
2300 		if (sc->rl_head != NULL) {
2301 			if (jumbo != 0)
2302 				m->m_len = total_len;
2303 			else {
2304 				m->m_len = total_len % RE_RX_DESC_BUFLEN;
2305 				if (m->m_len == 0)
2306 					m->m_len = RE_RX_DESC_BUFLEN;
2307 			}
2308 			/*
2309 			 * Special case: if there's 4 bytes or less
2310 			 * in this buffer, the mbuf can be discarded:
2311 			 * the last 4 bytes is the CRC, which we don't
2312 			 * care about anyway.
2313 			 */
2314 			if (m->m_len <= ETHER_CRC_LEN) {
2315 				sc->rl_tail->m_len -=
2316 				    (ETHER_CRC_LEN - m->m_len);
2317 				m_freem(m);
2318 			} else {
2319 				m->m_len -= ETHER_CRC_LEN;
2320 				m->m_flags &= ~M_PKTHDR;
2321 				sc->rl_tail->m_next = m;
2322 			}
2323 			m = sc->rl_head;
2324 			sc->rl_head = sc->rl_tail = NULL;
2325 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
2326 		} else
2327 			m->m_pkthdr.len = m->m_len =
2328 			    (total_len - ETHER_CRC_LEN);
2329 
2330 #ifdef RE_FIXUP_RX
2331 		re_fixup_rx(m);
2332 #endif
2333 		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2334 		m->m_pkthdr.rcvif = ifp;
2335 
2336 		/* Do RX checksumming if enabled */
2337 
2338 		if (ifp->if_capenable & IFCAP_RXCSUM) {
2339 			if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) {
2340 				/* Check IP header checksum */
2341 				if (rxstat & RL_RDESC_STAT_PROTOID)
2342 					m->m_pkthdr.csum_flags |=
2343 					    CSUM_IP_CHECKED;
2344 				if (!(rxstat & RL_RDESC_STAT_IPSUMBAD))
2345 					m->m_pkthdr.csum_flags |=
2346 					    CSUM_IP_VALID;
2347 
2348 				/* Check TCP/UDP checksum */
2349 				if ((RL_TCPPKT(rxstat) &&
2350 				    !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
2351 				    (RL_UDPPKT(rxstat) &&
2352 				     !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
2353 					m->m_pkthdr.csum_flags |=
2354 						CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
2355 					m->m_pkthdr.csum_data = 0xffff;
2356 				}
2357 			} else {
2358 				/*
2359 				 * RTL8168C/RTL816CP/RTL8111C/RTL8111CP
2360 				 */
2361 				if ((rxstat & RL_RDESC_STAT_PROTOID) &&
2362 				    (rxvlan & RL_RDESC_IPV4))
2363 					m->m_pkthdr.csum_flags |=
2364 					    CSUM_IP_CHECKED;
2365 				if (!(rxstat & RL_RDESC_STAT_IPSUMBAD) &&
2366 				    (rxvlan & RL_RDESC_IPV4))
2367 					m->m_pkthdr.csum_flags |=
2368 					    CSUM_IP_VALID;
2369 				if (((rxstat & RL_RDESC_STAT_TCP) &&
2370 				    !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
2371 				    ((rxstat & RL_RDESC_STAT_UDP) &&
2372 				    !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
2373 					m->m_pkthdr.csum_flags |=
2374 						CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
2375 					m->m_pkthdr.csum_data = 0xffff;
2376 				}
2377 			}
2378 		}
2379 		maxpkt--;
2380 		if (rxvlan & RL_RDESC_VLANCTL_TAG) {
2381 			m->m_pkthdr.ether_vtag =
2382 			    bswap16((rxvlan & RL_RDESC_VLANCTL_DATA));
2383 			m->m_flags |= M_VLANTAG;
2384 		}
2385 		RL_UNLOCK(sc);
2386 		(*ifp->if_input)(ifp, m);
2387 		RL_LOCK(sc);
2388 		rx_npkts++;
2389 	}
2390 
2391 	/* Flush the RX DMA ring */
2392 
2393 	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2394 	    sc->rl_ldata.rl_rx_list_map,
2395 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2396 
2397 	sc->rl_ldata.rl_rx_prodidx = i;
2398 
2399 	if (rx_npktsp != NULL)
2400 		*rx_npktsp = rx_npkts;
2401 	if (maxpkt)
2402 		return (EAGAIN);
2403 
2404 	return (0);
2405 }
2406 
2407 static void
2408 re_txeof(struct rl_softc *sc)
2409 {
2410 	struct ifnet		*ifp;
2411 	struct rl_txdesc	*txd;
2412 	u_int32_t		txstat;
2413 	int			cons;
2414 
2415 	cons = sc->rl_ldata.rl_tx_considx;
2416 	if (cons == sc->rl_ldata.rl_tx_prodidx)
2417 		return;
2418 
2419 	ifp = sc->rl_ifp;
2420 #ifdef DEV_NETMAP
2421 	if (netmap_tx_irq(ifp, 0))
2422 		return;
2423 #endif /* DEV_NETMAP */
2424 	/* Invalidate the TX descriptor list */
2425 	bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
2426 	    sc->rl_ldata.rl_tx_list_map,
2427 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2428 
2429 	for (; cons != sc->rl_ldata.rl_tx_prodidx;
2430 	    cons = RL_TX_DESC_NXT(sc, cons)) {
2431 		txstat = le32toh(sc->rl_ldata.rl_tx_list[cons].rl_cmdstat);
2432 		if (txstat & RL_TDESC_STAT_OWN)
2433 			break;
2434 		/*
2435 		 * We only stash mbufs in the last descriptor
2436 		 * in a fragment chain, which also happens to
2437 		 * be the only place where the TX status bits
2438 		 * are valid.
2439 		 */
2440 		if (txstat & RL_TDESC_CMD_EOF) {
2441 			txd = &sc->rl_ldata.rl_tx_desc[cons];
2442 			bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
2443 			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2444 			bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag,
2445 			    txd->tx_dmamap);
2446 			KASSERT(txd->tx_m != NULL,
2447 			    ("%s: freeing NULL mbufs!", __func__));
2448 			m_freem(txd->tx_m);
2449 			txd->tx_m = NULL;
2450 			if (txstat & (RL_TDESC_STAT_EXCESSCOL|
2451 			    RL_TDESC_STAT_COLCNT))
2452 				if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
2453 			if (txstat & RL_TDESC_STAT_TXERRSUM)
2454 				if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2455 			else
2456 				if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2457 		}
2458 		sc->rl_ldata.rl_tx_free++;
2459 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2460 	}
2461 	sc->rl_ldata.rl_tx_considx = cons;
2462 
2463 	/* No changes made to the TX ring, so no flush needed */
2464 
2465 	if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) {
2466 #ifdef RE_TX_MODERATION
2467 		/*
2468 		 * If not all descriptors have been reaped yet, reload
2469 		 * the timer so that we will eventually get another
2470 		 * interrupt that will cause us to re-enter this routine.
2471 		 * This is done in case the transmitter has gone idle.
2472 		 */
2473 		CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2474 #endif
2475 	} else
2476 		sc->rl_watchdog_timer = 0;
2477 }
2478 
2479 static void
2480 re_tick(void *xsc)
2481 {
2482 	struct rl_softc		*sc;
2483 	struct mii_data		*mii;
2484 
2485 	sc = xsc;
2486 
2487 	RL_LOCK_ASSERT(sc);
2488 
2489 	mii = device_get_softc(sc->rl_miibus);
2490 	mii_tick(mii);
2491 	if ((sc->rl_flags & RL_FLAG_LINK) == 0)
2492 		re_miibus_statchg(sc->rl_dev);
2493 	/*
2494 	 * Reclaim transmitted frames here. Technically it is not
2495 	 * necessary to do here but it ensures periodic reclamation
2496 	 * regardless of Tx completion interrupt which seems to be
2497 	 * lost on PCIe based controllers under certain situations.
2498 	 */
2499 	re_txeof(sc);
2500 	re_watchdog(sc);
2501 	callout_reset(&sc->rl_stat_callout, hz, re_tick, sc);
2502 }
2503 
2504 #ifdef DEVICE_POLLING
2505 static int
2506 re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2507 {
2508 	struct rl_softc *sc = ifp->if_softc;
2509 	int rx_npkts = 0;
2510 
2511 	RL_LOCK(sc);
2512 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2513 		rx_npkts = re_poll_locked(ifp, cmd, count);
2514 	RL_UNLOCK(sc);
2515 	return (rx_npkts);
2516 }
2517 
2518 static int
2519 re_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
2520 {
2521 	struct rl_softc *sc = ifp->if_softc;
2522 	int rx_npkts;
2523 
2524 	RL_LOCK_ASSERT(sc);
2525 
2526 	sc->rxcycles = count;
2527 	re_rxeof(sc, &rx_npkts);
2528 	re_txeof(sc);
2529 
2530 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2531 		re_start_locked(ifp);
2532 
2533 	if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
2534 		u_int16_t       status;
2535 
2536 		status = CSR_READ_2(sc, RL_ISR);
2537 		if (status == 0xffff)
2538 			return (rx_npkts);
2539 		if (status)
2540 			CSR_WRITE_2(sc, RL_ISR, status);
2541 		if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
2542 		    (sc->rl_flags & RL_FLAG_PCIE))
2543 			CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2544 
2545 		/*
2546 		 * XXX check behaviour on receiver stalls.
2547 		 */
2548 
2549 		if (status & RL_ISR_SYSTEM_ERR) {
2550 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2551 			re_init_locked(sc);
2552 		}
2553 	}
2554 	return (rx_npkts);
2555 }
2556 #endif /* DEVICE_POLLING */
2557 
2558 static int
2559 re_intr(void *arg)
2560 {
2561 	struct rl_softc		*sc;
2562 	uint16_t		status;
2563 
2564 	sc = arg;
2565 
2566 	status = CSR_READ_2(sc, RL_ISR);
2567 	if (status == 0xFFFF || (status & RL_INTRS_CPLUS) == 0)
2568                 return (FILTER_STRAY);
2569 	CSR_WRITE_2(sc, RL_IMR, 0);
2570 
2571 	taskqueue_enqueue(taskqueue_fast, &sc->rl_inttask);
2572 
2573 	return (FILTER_HANDLED);
2574 }
2575 
2576 static void
2577 re_int_task(void *arg, int npending)
2578 {
2579 	struct rl_softc		*sc;
2580 	struct ifnet		*ifp;
2581 	u_int16_t		status;
2582 	int			rval = 0;
2583 
2584 	sc = arg;
2585 	ifp = sc->rl_ifp;
2586 
2587 	RL_LOCK(sc);
2588 
2589 	status = CSR_READ_2(sc, RL_ISR);
2590         CSR_WRITE_2(sc, RL_ISR, status);
2591 
2592 	if (sc->suspended ||
2593 	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2594 		RL_UNLOCK(sc);
2595 		return;
2596 	}
2597 
2598 #ifdef DEVICE_POLLING
2599 	if  (ifp->if_capenable & IFCAP_POLLING) {
2600 		RL_UNLOCK(sc);
2601 		return;
2602 	}
2603 #endif
2604 
2605 	if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW))
2606 		rval = re_rxeof(sc, NULL);
2607 
2608 	/*
2609 	 * Some chips will ignore a second TX request issued
2610 	 * while an existing transmission is in progress. If
2611 	 * the transmitter goes idle but there are still
2612 	 * packets waiting to be sent, we need to restart the
2613 	 * channel here to flush them out. This only seems to
2614 	 * be required with the PCIe devices.
2615 	 */
2616 	if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
2617 	    (sc->rl_flags & RL_FLAG_PCIE))
2618 		CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2619 	if (status & (
2620 #ifdef RE_TX_MODERATION
2621 	    RL_ISR_TIMEOUT_EXPIRED|
2622 #else
2623 	    RL_ISR_TX_OK|
2624 #endif
2625 	    RL_ISR_TX_ERR|RL_ISR_TX_DESC_UNAVAIL))
2626 		re_txeof(sc);
2627 
2628 	if (status & RL_ISR_SYSTEM_ERR) {
2629 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2630 		re_init_locked(sc);
2631 	}
2632 
2633 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2634 		re_start_locked(ifp);
2635 
2636 	RL_UNLOCK(sc);
2637 
2638         if ((CSR_READ_2(sc, RL_ISR) & RL_INTRS_CPLUS) || rval) {
2639 		taskqueue_enqueue(taskqueue_fast, &sc->rl_inttask);
2640 		return;
2641 	}
2642 
2643 	CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
2644 }
2645 
2646 static void
2647 re_intr_msi(void *xsc)
2648 {
2649 	struct rl_softc		*sc;
2650 	struct ifnet		*ifp;
2651 	uint16_t		intrs, status;
2652 
2653 	sc = xsc;
2654 	RL_LOCK(sc);
2655 
2656 	ifp = sc->rl_ifp;
2657 #ifdef DEVICE_POLLING
2658 	if (ifp->if_capenable & IFCAP_POLLING) {
2659 		RL_UNLOCK(sc);
2660 		return;
2661 	}
2662 #endif
2663 	/* Disable interrupts. */
2664 	CSR_WRITE_2(sc, RL_IMR, 0);
2665 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2666 		RL_UNLOCK(sc);
2667 		return;
2668 	}
2669 
2670 	intrs = RL_INTRS_CPLUS;
2671 	status = CSR_READ_2(sc, RL_ISR);
2672         CSR_WRITE_2(sc, RL_ISR, status);
2673 	if (sc->rl_int_rx_act > 0) {
2674 		intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW |
2675 		    RL_ISR_RX_OVERRUN);
2676 		status &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW |
2677 		    RL_ISR_RX_OVERRUN);
2678 	}
2679 
2680 	if (status & (RL_ISR_TIMEOUT_EXPIRED | RL_ISR_RX_OK | RL_ISR_RX_ERR |
2681 	    RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) {
2682 		re_rxeof(sc, NULL);
2683 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2684 			if (sc->rl_int_rx_mod != 0 &&
2685 			    (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR |
2686 			    RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) != 0) {
2687 				/* Rearm one-shot timer. */
2688 				CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2689 				intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR |
2690 				    RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN);
2691 				sc->rl_int_rx_act = 1;
2692 			} else {
2693 				intrs |= RL_ISR_RX_OK | RL_ISR_RX_ERR |
2694 				    RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN;
2695 				sc->rl_int_rx_act = 0;
2696 			}
2697 		}
2698 	}
2699 
2700 	/*
2701 	 * Some chips will ignore a second TX request issued
2702 	 * while an existing transmission is in progress. If
2703 	 * the transmitter goes idle but there are still
2704 	 * packets waiting to be sent, we need to restart the
2705 	 * channel here to flush them out. This only seems to
2706 	 * be required with the PCIe devices.
2707 	 */
2708 	if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
2709 	    (sc->rl_flags & RL_FLAG_PCIE))
2710 		CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2711 	if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR | RL_ISR_TX_DESC_UNAVAIL))
2712 		re_txeof(sc);
2713 
2714 	if (status & RL_ISR_SYSTEM_ERR) {
2715 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2716 		re_init_locked(sc);
2717 	}
2718 
2719 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2720 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2721 			re_start_locked(ifp);
2722 		CSR_WRITE_2(sc, RL_IMR, intrs);
2723 	}
2724 	RL_UNLOCK(sc);
2725 }
2726 
2727 static int
2728 re_encap(struct rl_softc *sc, struct mbuf **m_head)
2729 {
2730 	struct rl_txdesc	*txd, *txd_last;
2731 	bus_dma_segment_t	segs[RL_NTXSEGS];
2732 	bus_dmamap_t		map;
2733 	struct mbuf		*m_new;
2734 	struct rl_desc		*desc;
2735 	int			nsegs, prod;
2736 	int			i, error, ei, si;
2737 	int			padlen;
2738 	uint32_t		cmdstat, csum_flags, vlanctl;
2739 
2740 	RL_LOCK_ASSERT(sc);
2741 	M_ASSERTPKTHDR((*m_head));
2742 
2743 	/*
2744 	 * With some of the RealTek chips, using the checksum offload
2745 	 * support in conjunction with the autopadding feature results
2746 	 * in the transmission of corrupt frames. For example, if we
2747 	 * need to send a really small IP fragment that's less than 60
2748 	 * bytes in size, and IP header checksumming is enabled, the
2749 	 * resulting ethernet frame that appears on the wire will
2750 	 * have garbled payload. To work around this, if TX IP checksum
2751 	 * offload is enabled, we always manually pad short frames out
2752 	 * to the minimum ethernet frame size.
2753 	 */
2754 	if ((sc->rl_flags & RL_FLAG_AUTOPAD) == 0 &&
2755 	    (*m_head)->m_pkthdr.len < RL_IP4CSUMTX_PADLEN &&
2756 	    ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) != 0) {
2757 		padlen = RL_MIN_FRAMELEN - (*m_head)->m_pkthdr.len;
2758 		if (M_WRITABLE(*m_head) == 0) {
2759 			/* Get a writable copy. */
2760 			m_new = m_dup(*m_head, M_NOWAIT);
2761 			m_freem(*m_head);
2762 			if (m_new == NULL) {
2763 				*m_head = NULL;
2764 				return (ENOBUFS);
2765 			}
2766 			*m_head = m_new;
2767 		}
2768 		if ((*m_head)->m_next != NULL ||
2769 		    M_TRAILINGSPACE(*m_head) < padlen) {
2770 			m_new = m_defrag(*m_head, M_NOWAIT);
2771 			if (m_new == NULL) {
2772 				m_freem(*m_head);
2773 				*m_head = NULL;
2774 				return (ENOBUFS);
2775 			}
2776 		} else
2777 			m_new = *m_head;
2778 
2779 		/*
2780 		 * Manually pad short frames, and zero the pad space
2781 		 * to avoid leaking data.
2782 		 */
2783 		bzero(mtod(m_new, char *) + m_new->m_pkthdr.len, padlen);
2784 		m_new->m_pkthdr.len += padlen;
2785 		m_new->m_len = m_new->m_pkthdr.len;
2786 		*m_head = m_new;
2787 	}
2788 
2789 	prod = sc->rl_ldata.rl_tx_prodidx;
2790 	txd = &sc->rl_ldata.rl_tx_desc[prod];
2791 	error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap,
2792 	    *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2793 	if (error == EFBIG) {
2794 		m_new = m_collapse(*m_head, M_NOWAIT, RL_NTXSEGS);
2795 		if (m_new == NULL) {
2796 			m_freem(*m_head);
2797 			*m_head = NULL;
2798 			return (ENOBUFS);
2799 		}
2800 		*m_head = m_new;
2801 		error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag,
2802 		    txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2803 		if (error != 0) {
2804 			m_freem(*m_head);
2805 			*m_head = NULL;
2806 			return (error);
2807 		}
2808 	} else if (error != 0)
2809 		return (error);
2810 	if (nsegs == 0) {
2811 		m_freem(*m_head);
2812 		*m_head = NULL;
2813 		return (EIO);
2814 	}
2815 
2816 	/* Check for number of available descriptors. */
2817 	if (sc->rl_ldata.rl_tx_free - nsegs <= 1) {
2818 		bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap);
2819 		return (ENOBUFS);
2820 	}
2821 
2822 	bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap,
2823 	    BUS_DMASYNC_PREWRITE);
2824 
2825 	/*
2826 	 * Set up checksum offload. Note: checksum offload bits must
2827 	 * appear in all descriptors of a multi-descriptor transmit
2828 	 * attempt. This is according to testing done with an 8169
2829 	 * chip. This is a requirement.
2830 	 */
2831 	vlanctl = 0;
2832 	csum_flags = 0;
2833 	if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2834 		if ((sc->rl_flags & RL_FLAG_DESCV2) != 0) {
2835 			csum_flags |= RL_TDESC_CMD_LGSEND;
2836 			vlanctl |= ((uint32_t)(*m_head)->m_pkthdr.tso_segsz <<
2837 			    RL_TDESC_CMD_MSSVALV2_SHIFT);
2838 		} else {
2839 			csum_flags |= RL_TDESC_CMD_LGSEND |
2840 			    ((uint32_t)(*m_head)->m_pkthdr.tso_segsz <<
2841 			    RL_TDESC_CMD_MSSVAL_SHIFT);
2842 		}
2843 	} else {
2844 		/*
2845 		 * Unconditionally enable IP checksum if TCP or UDP
2846 		 * checksum is required. Otherwise, TCP/UDP checksum
2847 		 * doesn't make effects.
2848 		 */
2849 		if (((*m_head)->m_pkthdr.csum_flags & RE_CSUM_FEATURES) != 0) {
2850 			if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) {
2851 				csum_flags |= RL_TDESC_CMD_IPCSUM;
2852 				if (((*m_head)->m_pkthdr.csum_flags &
2853 				    CSUM_TCP) != 0)
2854 					csum_flags |= RL_TDESC_CMD_TCPCSUM;
2855 				if (((*m_head)->m_pkthdr.csum_flags &
2856 				    CSUM_UDP) != 0)
2857 					csum_flags |= RL_TDESC_CMD_UDPCSUM;
2858 			} else {
2859 				vlanctl |= RL_TDESC_CMD_IPCSUMV2;
2860 				if (((*m_head)->m_pkthdr.csum_flags &
2861 				    CSUM_TCP) != 0)
2862 					vlanctl |= RL_TDESC_CMD_TCPCSUMV2;
2863 				if (((*m_head)->m_pkthdr.csum_flags &
2864 				    CSUM_UDP) != 0)
2865 					vlanctl |= RL_TDESC_CMD_UDPCSUMV2;
2866 			}
2867 		}
2868 	}
2869 
2870 	/*
2871 	 * Set up hardware VLAN tagging. Note: vlan tag info must
2872 	 * appear in all descriptors of a multi-descriptor
2873 	 * transmission attempt.
2874 	 */
2875 	if ((*m_head)->m_flags & M_VLANTAG)
2876 		vlanctl |= bswap16((*m_head)->m_pkthdr.ether_vtag) |
2877 		    RL_TDESC_VLANCTL_TAG;
2878 
2879 	si = prod;
2880 	for (i = 0; i < nsegs; i++, prod = RL_TX_DESC_NXT(sc, prod)) {
2881 		desc = &sc->rl_ldata.rl_tx_list[prod];
2882 		desc->rl_vlanctl = htole32(vlanctl);
2883 		desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr));
2884 		desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr));
2885 		cmdstat = segs[i].ds_len;
2886 		if (i != 0)
2887 			cmdstat |= RL_TDESC_CMD_OWN;
2888 		if (prod == sc->rl_ldata.rl_tx_desc_cnt - 1)
2889 			cmdstat |= RL_TDESC_CMD_EOR;
2890 		desc->rl_cmdstat = htole32(cmdstat | csum_flags);
2891 		sc->rl_ldata.rl_tx_free--;
2892 	}
2893 	/* Update producer index. */
2894 	sc->rl_ldata.rl_tx_prodidx = prod;
2895 
2896 	/* Set EOF on the last descriptor. */
2897 	ei = RL_TX_DESC_PRV(sc, prod);
2898 	desc = &sc->rl_ldata.rl_tx_list[ei];
2899 	desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF);
2900 
2901 	desc = &sc->rl_ldata.rl_tx_list[si];
2902 	/* Set SOF and transfer ownership of packet to the chip. */
2903 	desc->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF);
2904 
2905 	/*
2906 	 * Insure that the map for this transmission
2907 	 * is placed at the array index of the last descriptor
2908 	 * in this chain.  (Swap last and first dmamaps.)
2909 	 */
2910 	txd_last = &sc->rl_ldata.rl_tx_desc[ei];
2911 	map = txd->tx_dmamap;
2912 	txd->tx_dmamap = txd_last->tx_dmamap;
2913 	txd_last->tx_dmamap = map;
2914 	txd_last->tx_m = *m_head;
2915 
2916 	return (0);
2917 }
2918 
2919 static void
2920 re_start(struct ifnet *ifp)
2921 {
2922 	struct rl_softc		*sc;
2923 
2924 	sc = ifp->if_softc;
2925 	RL_LOCK(sc);
2926 	re_start_locked(ifp);
2927 	RL_UNLOCK(sc);
2928 }
2929 
2930 /*
2931  * Main transmit routine for C+ and gigE NICs.
2932  */
2933 static void
2934 re_start_locked(struct ifnet *ifp)
2935 {
2936 	struct rl_softc		*sc;
2937 	struct mbuf		*m_head;
2938 	int			queued;
2939 
2940 	sc = ifp->if_softc;
2941 
2942 #ifdef DEV_NETMAP
2943 	/* XXX is this necessary ? */
2944 	if (ifp->if_capenable & IFCAP_NETMAP) {
2945 		struct netmap_kring *kring = NA(ifp)->tx_rings[0];
2946 		if (sc->rl_ldata.rl_tx_prodidx != kring->nr_hwcur) {
2947 			/* kick the tx unit */
2948 			CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2949 #ifdef RE_TX_MODERATION
2950 			CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2951 #endif
2952 			sc->rl_watchdog_timer = 5;
2953 		}
2954 		return;
2955 	}
2956 #endif /* DEV_NETMAP */
2957 
2958 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2959 	    IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0)
2960 		return;
2961 
2962 	for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2963 	    sc->rl_ldata.rl_tx_free > 1;) {
2964 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2965 		if (m_head == NULL)
2966 			break;
2967 
2968 		if (re_encap(sc, &m_head) != 0) {
2969 			if (m_head == NULL)
2970 				break;
2971 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2972 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2973 			break;
2974 		}
2975 
2976 		/*
2977 		 * If there's a BPF listener, bounce a copy of this frame
2978 		 * to him.
2979 		 */
2980 		ETHER_BPF_MTAP(ifp, m_head);
2981 
2982 		queued++;
2983 	}
2984 
2985 	if (queued == 0) {
2986 #ifdef RE_TX_MODERATION
2987 		if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt)
2988 			CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2989 #endif
2990 		return;
2991 	}
2992 
2993 	re_start_tx(sc);
2994 }
2995 
2996 static void
2997 re_start_tx(struct rl_softc *sc)
2998 {
2999 
3000 	/* Flush the TX descriptors */
3001 	bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
3002 	    sc->rl_ldata.rl_tx_list_map,
3003 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
3004 
3005 	CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
3006 
3007 #ifdef RE_TX_MODERATION
3008 	/*
3009 	 * Use the countdown timer for interrupt moderation.
3010 	 * 'TX done' interrupts are disabled. Instead, we reset the
3011 	 * countdown timer, which will begin counting until it hits
3012 	 * the value in the TIMERINT register, and then trigger an
3013 	 * interrupt. Each time we write to the TIMERCNT register,
3014 	 * the timer count is reset to 0.
3015 	 */
3016 	CSR_WRITE_4(sc, RL_TIMERCNT, 1);
3017 #endif
3018 
3019 	/*
3020 	 * Set a timeout in case the chip goes out to lunch.
3021 	 */
3022 	sc->rl_watchdog_timer = 5;
3023 }
3024 
3025 static void
3026 re_set_jumbo(struct rl_softc *sc, int jumbo)
3027 {
3028 
3029 	if (sc->rl_hwrev->rl_rev == RL_HWREV_8168E_VL) {
3030 		pci_set_max_read_req(sc->rl_dev, 4096);
3031 		return;
3032 	}
3033 
3034 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
3035 	if (jumbo != 0) {
3036 		CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) |
3037 		    RL_CFG3_JUMBO_EN0);
3038 		switch (sc->rl_hwrev->rl_rev) {
3039 		case RL_HWREV_8168DP:
3040 			break;
3041 		case RL_HWREV_8168E:
3042 			CSR_WRITE_1(sc, sc->rl_cfg4,
3043 			    CSR_READ_1(sc, sc->rl_cfg4) | 0x01);
3044 			break;
3045 		default:
3046 			CSR_WRITE_1(sc, sc->rl_cfg4,
3047 			    CSR_READ_1(sc, sc->rl_cfg4) | RL_CFG4_JUMBO_EN1);
3048 		}
3049 	} else {
3050 		CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) &
3051 		    ~RL_CFG3_JUMBO_EN0);
3052 		switch (sc->rl_hwrev->rl_rev) {
3053 		case RL_HWREV_8168DP:
3054 			break;
3055 		case RL_HWREV_8168E:
3056 			CSR_WRITE_1(sc, sc->rl_cfg4,
3057 			    CSR_READ_1(sc, sc->rl_cfg4) & ~0x01);
3058 			break;
3059 		default:
3060 			CSR_WRITE_1(sc, sc->rl_cfg4,
3061 			    CSR_READ_1(sc, sc->rl_cfg4) & ~RL_CFG4_JUMBO_EN1);
3062 		}
3063 	}
3064 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3065 
3066 	switch (sc->rl_hwrev->rl_rev) {
3067 	case RL_HWREV_8168DP:
3068 		pci_set_max_read_req(sc->rl_dev, 4096);
3069 		break;
3070 	default:
3071 		if (jumbo != 0)
3072 			pci_set_max_read_req(sc->rl_dev, 512);
3073 		else
3074 			pci_set_max_read_req(sc->rl_dev, 4096);
3075 	}
3076 }
3077 
3078 static void
3079 re_init(void *xsc)
3080 {
3081 	struct rl_softc		*sc = xsc;
3082 
3083 	RL_LOCK(sc);
3084 	re_init_locked(sc);
3085 	RL_UNLOCK(sc);
3086 }
3087 
3088 static void
3089 re_init_locked(struct rl_softc *sc)
3090 {
3091 	struct ifnet		*ifp = sc->rl_ifp;
3092 	struct mii_data		*mii;
3093 	uint32_t		reg;
3094 	uint16_t		cfg;
3095 	union {
3096 		uint32_t align_dummy;
3097 		u_char eaddr[ETHER_ADDR_LEN];
3098         } eaddr;
3099 
3100 	RL_LOCK_ASSERT(sc);
3101 
3102 	mii = device_get_softc(sc->rl_miibus);
3103 
3104 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3105 		return;
3106 
3107 	/*
3108 	 * Cancel pending I/O and free all RX/TX buffers.
3109 	 */
3110 	re_stop(sc);
3111 
3112 	/* Put controller into known state. */
3113 	re_reset(sc);
3114 
3115 	/*
3116 	 * For C+ mode, initialize the RX descriptors and mbufs.
3117 	 */
3118 	if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
3119 		if (ifp->if_mtu > RL_MTU) {
3120 			if (re_jrx_list_init(sc) != 0) {
3121 				device_printf(sc->rl_dev,
3122 				    "no memory for jumbo RX buffers\n");
3123 				re_stop(sc);
3124 				return;
3125 			}
3126 			/* Disable checksum offloading for jumbo frames. */
3127 			ifp->if_capenable &= ~(IFCAP_HWCSUM | IFCAP_TSO4);
3128 			ifp->if_hwassist &= ~(RE_CSUM_FEATURES | CSUM_TSO);
3129 		} else {
3130 			if (re_rx_list_init(sc) != 0) {
3131 				device_printf(sc->rl_dev,
3132 				    "no memory for RX buffers\n");
3133 				re_stop(sc);
3134 				return;
3135 			}
3136 		}
3137 		re_set_jumbo(sc, ifp->if_mtu > RL_MTU);
3138 	} else {
3139 		if (re_rx_list_init(sc) != 0) {
3140 			device_printf(sc->rl_dev, "no memory for RX buffers\n");
3141 			re_stop(sc);
3142 			return;
3143 		}
3144 		if ((sc->rl_flags & RL_FLAG_PCIE) != 0 &&
3145 		    pci_get_device(sc->rl_dev) != RT_DEVICEID_8101E) {
3146 			if (ifp->if_mtu > RL_MTU)
3147 				pci_set_max_read_req(sc->rl_dev, 512);
3148 			else
3149 				pci_set_max_read_req(sc->rl_dev, 4096);
3150 		}
3151 	}
3152 	re_tx_list_init(sc);
3153 
3154 	/*
3155 	 * Enable C+ RX and TX mode, as well as VLAN stripping and
3156 	 * RX checksum offload. We must configure the C+ register
3157 	 * before all others.
3158 	 */
3159 	cfg = RL_CPLUSCMD_PCI_MRW;
3160 	if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3161 		cfg |= RL_CPLUSCMD_RXCSUM_ENB;
3162 	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3163 		cfg |= RL_CPLUSCMD_VLANSTRIP;
3164 	if ((sc->rl_flags & RL_FLAG_MACSTAT) != 0) {
3165 		cfg |= RL_CPLUSCMD_MACSTAT_DIS;
3166 		/* XXX magic. */
3167 		cfg |= 0x0001;
3168 	} else
3169 		cfg |= RL_CPLUSCMD_RXENB | RL_CPLUSCMD_TXENB;
3170 	CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg);
3171 	if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SC ||
3172 	    sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) {
3173 		reg = 0x000fff00;
3174 		if ((CSR_READ_1(sc, sc->rl_cfg2) & RL_CFG2_PCI66MHZ) != 0)
3175 			reg |= 0x000000ff;
3176 		if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE)
3177 			reg |= 0x00f00000;
3178 		CSR_WRITE_4(sc, 0x7c, reg);
3179 		/* Disable interrupt mitigation. */
3180 		CSR_WRITE_2(sc, 0xe2, 0);
3181 	}
3182 	/*
3183 	 * Disable TSO if interface MTU size is greater than MSS
3184 	 * allowed in controller.
3185 	 */
3186 	if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) {
3187 		ifp->if_capenable &= ~IFCAP_TSO4;
3188 		ifp->if_hwassist &= ~CSUM_TSO;
3189 	}
3190 
3191 	/*
3192 	 * Init our MAC address.  Even though the chipset
3193 	 * documentation doesn't mention it, we need to enter "Config
3194 	 * register write enable" mode to modify the ID registers.
3195 	 */
3196 	/* Copy MAC address on stack to align. */
3197 	bcopy(IF_LLADDR(ifp), eaddr.eaddr, ETHER_ADDR_LEN);
3198 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
3199 	CSR_WRITE_4(sc, RL_IDR0,
3200 	    htole32(*(u_int32_t *)(&eaddr.eaddr[0])));
3201 	CSR_WRITE_4(sc, RL_IDR4,
3202 	    htole32(*(u_int32_t *)(&eaddr.eaddr[4])));
3203 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3204 
3205 	/*
3206 	 * Load the addresses of the RX and TX lists into the chip.
3207 	 */
3208 
3209 	CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI,
3210 	    RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr));
3211 	CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO,
3212 	    RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr));
3213 
3214 	CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI,
3215 	    RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr));
3216 	CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO,
3217 	    RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr));
3218 
3219 	if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) {
3220 		/* Disable RXDV gate. */
3221 		CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) &
3222 		    ~0x00080000);
3223 	}
3224 
3225 	/*
3226 	 * Enable transmit and receive for pre-RTL8168G controllers.
3227 	 * RX/TX MACs should be enabled before RX/TX configuration.
3228 	 */
3229 	if ((sc->rl_flags & RL_FLAG_8168G_PLUS) == 0)
3230 		CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB | RL_CMD_RX_ENB);
3231 
3232 	/*
3233 	 * Set the initial TX configuration.
3234 	 */
3235 	if (sc->rl_testmode) {
3236 		if (sc->rl_type == RL_8169)
3237 			CSR_WRITE_4(sc, RL_TXCFG,
3238 			    RL_TXCFG_CONFIG|RL_LOOPTEST_ON);
3239 		else
3240 			CSR_WRITE_4(sc, RL_TXCFG,
3241 			    RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS);
3242 	} else
3243 		CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
3244 
3245 	CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16);
3246 
3247 	/*
3248 	 * Set the initial RX configuration.
3249 	 */
3250 	re_set_rxmode(sc);
3251 
3252 	/* Configure interrupt moderation. */
3253 	if (sc->rl_type == RL_8169) {
3254 		/* Magic from vendor. */
3255 		CSR_WRITE_2(sc, RL_INTRMOD, 0x5100);
3256 	}
3257 
3258 	/*
3259 	 * Enable transmit and receive for RTL8168G and later controllers.
3260 	 * RX/TX MACs should be enabled after RX/TX configuration.
3261 	 */
3262 	if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0)
3263 		CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB | RL_CMD_RX_ENB);
3264 
3265 #ifdef DEVICE_POLLING
3266 	/*
3267 	 * Disable interrupts if we are polling.
3268 	 */
3269 	if (ifp->if_capenable & IFCAP_POLLING)
3270 		CSR_WRITE_2(sc, RL_IMR, 0);
3271 	else	/* otherwise ... */
3272 #endif
3273 
3274 	/*
3275 	 * Enable interrupts.
3276 	 */
3277 	if (sc->rl_testmode)
3278 		CSR_WRITE_2(sc, RL_IMR, 0);
3279 	else
3280 		CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
3281 	CSR_WRITE_2(sc, RL_ISR, RL_INTRS_CPLUS);
3282 
3283 	/* Set initial TX threshold */
3284 	sc->rl_txthresh = RL_TX_THRESH_INIT;
3285 
3286 	/* Start RX/TX process. */
3287 	CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
3288 
3289 	/*
3290 	 * Initialize the timer interrupt register so that
3291 	 * a timer interrupt will be generated once the timer
3292 	 * reaches a certain number of ticks. The timer is
3293 	 * reloaded on each transmit.
3294 	 */
3295 #ifdef RE_TX_MODERATION
3296 	/*
3297 	 * Use timer interrupt register to moderate TX interrupt
3298 	 * moderation, which dramatically improves TX frame rate.
3299 	 */
3300 	if (sc->rl_type == RL_8169)
3301 		CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800);
3302 	else
3303 		CSR_WRITE_4(sc, RL_TIMERINT, 0x400);
3304 #else
3305 	/*
3306 	 * Use timer interrupt register to moderate RX interrupt
3307 	 * moderation.
3308 	 */
3309 	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 &&
3310 	    intr_filter == 0) {
3311 		if (sc->rl_type == RL_8169)
3312 			CSR_WRITE_4(sc, RL_TIMERINT_8169,
3313 			    RL_USECS(sc->rl_int_rx_mod));
3314 	} else {
3315 		if (sc->rl_type == RL_8169)
3316 			CSR_WRITE_4(sc, RL_TIMERINT_8169, RL_USECS(0));
3317 	}
3318 #endif
3319 
3320 	/*
3321 	 * For 8169 gigE NICs, set the max allowed RX packet
3322 	 * size so we can receive jumbo frames.
3323 	 */
3324 	if (sc->rl_type == RL_8169) {
3325 		if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
3326 			/*
3327 			 * For controllers that use new jumbo frame scheme,
3328 			 * set maximum size of jumbo frame depending on
3329 			 * controller revisions.
3330 			 */
3331 			if (ifp->if_mtu > RL_MTU)
3332 				CSR_WRITE_2(sc, RL_MAXRXPKTLEN,
3333 				    sc->rl_hwrev->rl_max_mtu +
3334 				    ETHER_VLAN_ENCAP_LEN + ETHER_HDR_LEN +
3335 				    ETHER_CRC_LEN);
3336 			else
3337 				CSR_WRITE_2(sc, RL_MAXRXPKTLEN,
3338 				    RE_RX_DESC_BUFLEN);
3339 		} else if ((sc->rl_flags & RL_FLAG_PCIE) != 0 &&
3340 		    sc->rl_hwrev->rl_max_mtu == RL_MTU) {
3341 			/* RTL810x has no jumbo frame support. */
3342 			CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN);
3343 		} else
3344 			CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383);
3345 	}
3346 
3347 	if (sc->rl_testmode)
3348 		return;
3349 
3350 	CSR_WRITE_1(sc, sc->rl_cfg1, CSR_READ_1(sc, sc->rl_cfg1) |
3351 	    RL_CFG1_DRVLOAD);
3352 
3353 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3354 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3355 
3356 	sc->rl_flags &= ~RL_FLAG_LINK;
3357 	mii_mediachg(mii);
3358 
3359 	sc->rl_watchdog_timer = 0;
3360 	callout_reset(&sc->rl_stat_callout, hz, re_tick, sc);
3361 }
3362 
3363 /*
3364  * Set media options.
3365  */
3366 static int
3367 re_ifmedia_upd(struct ifnet *ifp)
3368 {
3369 	struct rl_softc		*sc;
3370 	struct mii_data		*mii;
3371 	int			error;
3372 
3373 	sc = ifp->if_softc;
3374 	mii = device_get_softc(sc->rl_miibus);
3375 	RL_LOCK(sc);
3376 	error = mii_mediachg(mii);
3377 	RL_UNLOCK(sc);
3378 
3379 	return (error);
3380 }
3381 
3382 /*
3383  * Report current media status.
3384  */
3385 static void
3386 re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3387 {
3388 	struct rl_softc		*sc;
3389 	struct mii_data		*mii;
3390 
3391 	sc = ifp->if_softc;
3392 	mii = device_get_softc(sc->rl_miibus);
3393 
3394 	RL_LOCK(sc);
3395 	mii_pollstat(mii);
3396 	ifmr->ifm_active = mii->mii_media_active;
3397 	ifmr->ifm_status = mii->mii_media_status;
3398 	RL_UNLOCK(sc);
3399 }
3400 
3401 static int
3402 re_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3403 {
3404 	struct rl_softc		*sc = ifp->if_softc;
3405 	struct ifreq		*ifr = (struct ifreq *) data;
3406 	struct mii_data		*mii;
3407 	int			error = 0;
3408 
3409 	switch (command) {
3410 	case SIOCSIFMTU:
3411 		if (ifr->ifr_mtu < ETHERMIN ||
3412 		    ifr->ifr_mtu > sc->rl_hwrev->rl_max_mtu ||
3413 		    ((sc->rl_flags & RL_FLAG_FASTETHER) != 0 &&
3414 		    ifr->ifr_mtu > RL_MTU)) {
3415 			error = EINVAL;
3416 			break;
3417 		}
3418 		RL_LOCK(sc);
3419 		if (ifp->if_mtu != ifr->ifr_mtu) {
3420 			ifp->if_mtu = ifr->ifr_mtu;
3421 			if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
3422 			    (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
3423 				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3424 				re_init_locked(sc);
3425 			}
3426 			if (ifp->if_mtu > RL_TSO_MTU &&
3427 			    (ifp->if_capenable & IFCAP_TSO4) != 0) {
3428 				ifp->if_capenable &= ~(IFCAP_TSO4 |
3429 				    IFCAP_VLAN_HWTSO);
3430 				ifp->if_hwassist &= ~CSUM_TSO;
3431 			}
3432 			VLAN_CAPABILITIES(ifp);
3433 		}
3434 		RL_UNLOCK(sc);
3435 		break;
3436 	case SIOCSIFFLAGS:
3437 		RL_LOCK(sc);
3438 		if ((ifp->if_flags & IFF_UP) != 0) {
3439 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
3440 				if (((ifp->if_flags ^ sc->rl_if_flags)
3441 				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3442 					re_set_rxmode(sc);
3443 			} else
3444 				re_init_locked(sc);
3445 		} else {
3446 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3447 				re_stop(sc);
3448 		}
3449 		sc->rl_if_flags = ifp->if_flags;
3450 		RL_UNLOCK(sc);
3451 		break;
3452 	case SIOCADDMULTI:
3453 	case SIOCDELMULTI:
3454 		RL_LOCK(sc);
3455 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3456 			re_set_rxmode(sc);
3457 		RL_UNLOCK(sc);
3458 		break;
3459 	case SIOCGIFMEDIA:
3460 	case SIOCSIFMEDIA:
3461 		mii = device_get_softc(sc->rl_miibus);
3462 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
3463 		break;
3464 	case SIOCSIFCAP:
3465 	    {
3466 		int mask, reinit;
3467 
3468 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3469 		reinit = 0;
3470 #ifdef DEVICE_POLLING
3471 		if (mask & IFCAP_POLLING) {
3472 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
3473 				error = ether_poll_register(re_poll, ifp);
3474 				if (error)
3475 					return (error);
3476 				RL_LOCK(sc);
3477 				/* Disable interrupts */
3478 				CSR_WRITE_2(sc, RL_IMR, 0x0000);
3479 				ifp->if_capenable |= IFCAP_POLLING;
3480 				RL_UNLOCK(sc);
3481 			} else {
3482 				error = ether_poll_deregister(ifp);
3483 				/* Enable interrupts. */
3484 				RL_LOCK(sc);
3485 				CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
3486 				ifp->if_capenable &= ~IFCAP_POLLING;
3487 				RL_UNLOCK(sc);
3488 			}
3489 		}
3490 #endif /* DEVICE_POLLING */
3491 		RL_LOCK(sc);
3492 		if ((mask & IFCAP_TXCSUM) != 0 &&
3493 		    (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
3494 			ifp->if_capenable ^= IFCAP_TXCSUM;
3495 			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
3496 				ifp->if_hwassist |= RE_CSUM_FEATURES;
3497 			else
3498 				ifp->if_hwassist &= ~RE_CSUM_FEATURES;
3499 			reinit = 1;
3500 		}
3501 		if ((mask & IFCAP_RXCSUM) != 0 &&
3502 		    (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
3503 			ifp->if_capenable ^= IFCAP_RXCSUM;
3504 			reinit = 1;
3505 		}
3506 		if ((mask & IFCAP_TSO4) != 0 &&
3507 		    (ifp->if_capabilities & IFCAP_TSO4) != 0) {
3508 			ifp->if_capenable ^= IFCAP_TSO4;
3509 			if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
3510 				ifp->if_hwassist |= CSUM_TSO;
3511 			else
3512 				ifp->if_hwassist &= ~CSUM_TSO;
3513 			if (ifp->if_mtu > RL_TSO_MTU &&
3514 			    (ifp->if_capenable & IFCAP_TSO4) != 0) {
3515 				ifp->if_capenable &= ~IFCAP_TSO4;
3516 				ifp->if_hwassist &= ~CSUM_TSO;
3517 			}
3518 		}
3519 		if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
3520 		    (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
3521 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
3522 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
3523 		    (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
3524 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
3525 			/* TSO over VLAN requires VLAN hardware tagging. */
3526 			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
3527 				ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
3528 			reinit = 1;
3529 		}
3530 		if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
3531 		    (mask & (IFCAP_HWCSUM | IFCAP_TSO4 |
3532 		    IFCAP_VLAN_HWTSO)) != 0)
3533 				reinit = 1;
3534 		if ((mask & IFCAP_WOL) != 0 &&
3535 		    (ifp->if_capabilities & IFCAP_WOL) != 0) {
3536 			if ((mask & IFCAP_WOL_UCAST) != 0)
3537 				ifp->if_capenable ^= IFCAP_WOL_UCAST;
3538 			if ((mask & IFCAP_WOL_MCAST) != 0)
3539 				ifp->if_capenable ^= IFCAP_WOL_MCAST;
3540 			if ((mask & IFCAP_WOL_MAGIC) != 0)
3541 				ifp->if_capenable ^= IFCAP_WOL_MAGIC;
3542 		}
3543 		if (reinit && ifp->if_drv_flags & IFF_DRV_RUNNING) {
3544 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3545 			re_init_locked(sc);
3546 		}
3547 		RL_UNLOCK(sc);
3548 		VLAN_CAPABILITIES(ifp);
3549 	    }
3550 		break;
3551 	default:
3552 		error = ether_ioctl(ifp, command, data);
3553 		break;
3554 	}
3555 
3556 	return (error);
3557 }
3558 
3559 static void
3560 re_watchdog(struct rl_softc *sc)
3561 {
3562 	struct ifnet		*ifp;
3563 
3564 	RL_LOCK_ASSERT(sc);
3565 
3566 	if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer != 0)
3567 		return;
3568 
3569 	ifp = sc->rl_ifp;
3570 	re_txeof(sc);
3571 	if (sc->rl_ldata.rl_tx_free == sc->rl_ldata.rl_tx_desc_cnt) {
3572 		if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
3573 		    "-- recovering\n");
3574 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3575 			re_start_locked(ifp);
3576 		return;
3577 	}
3578 
3579 	if_printf(ifp, "watchdog timeout\n");
3580 	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
3581 
3582 	re_rxeof(sc, NULL);
3583 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3584 	re_init_locked(sc);
3585 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3586 		re_start_locked(ifp);
3587 }
3588 
3589 /*
3590  * Stop the adapter and free any mbufs allocated to the
3591  * RX and TX lists.
3592  */
3593 static void
3594 re_stop(struct rl_softc *sc)
3595 {
3596 	int			i;
3597 	struct ifnet		*ifp;
3598 	struct rl_txdesc	*txd;
3599 	struct rl_rxdesc	*rxd;
3600 
3601 	RL_LOCK_ASSERT(sc);
3602 
3603 	ifp = sc->rl_ifp;
3604 
3605 	sc->rl_watchdog_timer = 0;
3606 	callout_stop(&sc->rl_stat_callout);
3607 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3608 
3609 	/*
3610 	 * Disable accepting frames to put RX MAC into idle state.
3611 	 * Otherwise it's possible to get frames while stop command
3612 	 * execution is in progress and controller can DMA the frame
3613 	 * to already freed RX buffer during that period.
3614 	 */
3615 	CSR_WRITE_4(sc, RL_RXCFG, CSR_READ_4(sc, RL_RXCFG) &
3616 	    ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_MULTI |
3617 	    RL_RXCFG_RX_BROAD));
3618 
3619 	if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) {
3620 		/* Enable RXDV gate. */
3621 		CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) |
3622 		    0x00080000);
3623 	}
3624 
3625 	if ((sc->rl_flags & RL_FLAG_WAIT_TXPOLL) != 0) {
3626 		for (i = RL_TIMEOUT; i > 0; i--) {
3627 			if ((CSR_READ_1(sc, sc->rl_txstart) &
3628 			    RL_TXSTART_START) == 0)
3629 				break;
3630 			DELAY(20);
3631 		}
3632 		if (i == 0)
3633 			device_printf(sc->rl_dev,
3634 			    "stopping TX poll timed out!\n");
3635 		CSR_WRITE_1(sc, RL_COMMAND, 0x00);
3636 	} else if ((sc->rl_flags & RL_FLAG_CMDSTOP) != 0) {
3637 		CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB |
3638 		    RL_CMD_RX_ENB);
3639 		if ((sc->rl_flags & RL_FLAG_CMDSTOP_WAIT_TXQ) != 0) {
3640 			for (i = RL_TIMEOUT; i > 0; i--) {
3641 				if ((CSR_READ_4(sc, RL_TXCFG) &
3642 				    RL_TXCFG_QUEUE_EMPTY) != 0)
3643 					break;
3644 				DELAY(100);
3645 			}
3646 			if (i == 0)
3647 				device_printf(sc->rl_dev,
3648 				   "stopping TXQ timed out!\n");
3649 		}
3650 	} else
3651 		CSR_WRITE_1(sc, RL_COMMAND, 0x00);
3652 	DELAY(1000);
3653 	CSR_WRITE_2(sc, RL_IMR, 0x0000);
3654 	CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
3655 
3656 	if (sc->rl_head != NULL) {
3657 		m_freem(sc->rl_head);
3658 		sc->rl_head = sc->rl_tail = NULL;
3659 	}
3660 
3661 	/* Free the TX list buffers. */
3662 	for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
3663 		txd = &sc->rl_ldata.rl_tx_desc[i];
3664 		if (txd->tx_m != NULL) {
3665 			bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
3666 			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3667 			bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag,
3668 			    txd->tx_dmamap);
3669 			m_freem(txd->tx_m);
3670 			txd->tx_m = NULL;
3671 		}
3672 	}
3673 
3674 	/* Free the RX list buffers. */
3675 	for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
3676 		rxd = &sc->rl_ldata.rl_rx_desc[i];
3677 		if (rxd->rx_m != NULL) {
3678 			bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
3679 			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3680 			bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag,
3681 			    rxd->rx_dmamap);
3682 			m_freem(rxd->rx_m);
3683 			rxd->rx_m = NULL;
3684 		}
3685 	}
3686 
3687 	if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
3688 		for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
3689 			rxd = &sc->rl_ldata.rl_jrx_desc[i];
3690 			if (rxd->rx_m != NULL) {
3691 				bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag,
3692 				    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3693 				bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag,
3694 				    rxd->rx_dmamap);
3695 				m_freem(rxd->rx_m);
3696 				rxd->rx_m = NULL;
3697 			}
3698 		}
3699 	}
3700 }
3701 
3702 /*
3703  * Device suspend routine.  Stop the interface and save some PCI
3704  * settings in case the BIOS doesn't restore them properly on
3705  * resume.
3706  */
3707 static int
3708 re_suspend(device_t dev)
3709 {
3710 	struct rl_softc		*sc;
3711 
3712 	sc = device_get_softc(dev);
3713 
3714 	RL_LOCK(sc);
3715 	re_stop(sc);
3716 	re_setwol(sc);
3717 	sc->suspended = 1;
3718 	RL_UNLOCK(sc);
3719 
3720 	return (0);
3721 }
3722 
3723 /*
3724  * Device resume routine.  Restore some PCI settings in case the BIOS
3725  * doesn't, re-enable busmastering, and restart the interface if
3726  * appropriate.
3727  */
3728 static int
3729 re_resume(device_t dev)
3730 {
3731 	struct rl_softc		*sc;
3732 	struct ifnet		*ifp;
3733 
3734 	sc = device_get_softc(dev);
3735 
3736 	RL_LOCK(sc);
3737 
3738 	ifp = sc->rl_ifp;
3739 	/* Take controller out of sleep mode. */
3740 	if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
3741 		if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
3742 			CSR_WRITE_1(sc, RL_GPIO,
3743 			    CSR_READ_1(sc, RL_GPIO) | 0x01);
3744 	}
3745 
3746 	/*
3747 	 * Clear WOL matching such that normal Rx filtering
3748 	 * wouldn't interfere with WOL patterns.
3749 	 */
3750 	re_clrwol(sc);
3751 
3752 	/* reinitialize interface if necessary */
3753 	if (ifp->if_flags & IFF_UP)
3754 		re_init_locked(sc);
3755 
3756 	sc->suspended = 0;
3757 	RL_UNLOCK(sc);
3758 
3759 	return (0);
3760 }
3761 
3762 /*
3763  * Stop all chip I/O so that the kernel's probe routines don't
3764  * get confused by errant DMAs when rebooting.
3765  */
3766 static int
3767 re_shutdown(device_t dev)
3768 {
3769 	struct rl_softc		*sc;
3770 
3771 	sc = device_get_softc(dev);
3772 
3773 	RL_LOCK(sc);
3774 	re_stop(sc);
3775 	/*
3776 	 * Mark interface as down since otherwise we will panic if
3777 	 * interrupt comes in later on, which can happen in some
3778 	 * cases.
3779 	 */
3780 	sc->rl_ifp->if_flags &= ~IFF_UP;
3781 	re_setwol(sc);
3782 	RL_UNLOCK(sc);
3783 
3784 	return (0);
3785 }
3786 
3787 static void
3788 re_set_linkspeed(struct rl_softc *sc)
3789 {
3790 	struct mii_softc *miisc;
3791 	struct mii_data *mii;
3792 	int aneg, i, phyno;
3793 
3794 	RL_LOCK_ASSERT(sc);
3795 
3796 	mii = device_get_softc(sc->rl_miibus);
3797 	mii_pollstat(mii);
3798 	aneg = 0;
3799 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
3800 	    (IFM_ACTIVE | IFM_AVALID)) {
3801 		switch IFM_SUBTYPE(mii->mii_media_active) {
3802 		case IFM_10_T:
3803 		case IFM_100_TX:
3804 			return;
3805 		case IFM_1000_T:
3806 			aneg++;
3807 			break;
3808 		default:
3809 			break;
3810 		}
3811 	}
3812 	miisc = LIST_FIRST(&mii->mii_phys);
3813 	phyno = miisc->mii_phy;
3814 	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3815 		PHY_RESET(miisc);
3816 	re_miibus_writereg(sc->rl_dev, phyno, MII_100T2CR, 0);
3817 	re_miibus_writereg(sc->rl_dev, phyno,
3818 	    MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
3819 	re_miibus_writereg(sc->rl_dev, phyno,
3820 	    MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG);
3821 	DELAY(1000);
3822 	if (aneg != 0) {
3823 		/*
3824 		 * Poll link state until re(4) get a 10/100Mbps link.
3825 		 */
3826 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
3827 			mii_pollstat(mii);
3828 			if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
3829 			    == (IFM_ACTIVE | IFM_AVALID)) {
3830 				switch (IFM_SUBTYPE(mii->mii_media_active)) {
3831 				case IFM_10_T:
3832 				case IFM_100_TX:
3833 					return;
3834 				default:
3835 					break;
3836 				}
3837 			}
3838 			RL_UNLOCK(sc);
3839 			pause("relnk", hz);
3840 			RL_LOCK(sc);
3841 		}
3842 		if (i == MII_ANEGTICKS_GIGE)
3843 			device_printf(sc->rl_dev,
3844 			    "establishing a link failed, WOL may not work!");
3845 	}
3846 	/*
3847 	 * No link, force MAC to have 100Mbps, full-duplex link.
3848 	 * MAC does not require reprogramming on resolved speed/duplex,
3849 	 * so this is just for completeness.
3850 	 */
3851 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
3852 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
3853 }
3854 
3855 static void
3856 re_setwol(struct rl_softc *sc)
3857 {
3858 	struct ifnet		*ifp;
3859 	int			pmc;
3860 	uint16_t		pmstat;
3861 	uint8_t			v;
3862 
3863 	RL_LOCK_ASSERT(sc);
3864 
3865 	if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
3866 		return;
3867 
3868 	ifp = sc->rl_ifp;
3869 	/* Put controller into sleep mode. */
3870 	if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
3871 		if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
3872 			CSR_WRITE_1(sc, RL_GPIO,
3873 			    CSR_READ_1(sc, RL_GPIO) & ~0x01);
3874 	}
3875 	if ((ifp->if_capenable & IFCAP_WOL) != 0) {
3876 		if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) {
3877 			/* Disable RXDV gate. */
3878 			CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) &
3879 			    ~0x00080000);
3880 		}
3881 		re_set_rxmode(sc);
3882 		if ((sc->rl_flags & RL_FLAG_WOL_MANLINK) != 0)
3883 			re_set_linkspeed(sc);
3884 		if ((sc->rl_flags & RL_FLAG_WOLRXENB) != 0)
3885 			CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RX_ENB);
3886 	}
3887 	/* Enable config register write. */
3888 	CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
3889 
3890 	/* Enable PME. */
3891 	v = CSR_READ_1(sc, sc->rl_cfg1);
3892 	v &= ~RL_CFG1_PME;
3893 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
3894 		v |= RL_CFG1_PME;
3895 	CSR_WRITE_1(sc, sc->rl_cfg1, v);
3896 
3897 	v = CSR_READ_1(sc, sc->rl_cfg3);
3898 	v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
3899 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
3900 		v |= RL_CFG3_WOL_MAGIC;
3901 	CSR_WRITE_1(sc, sc->rl_cfg3, v);
3902 
3903 	v = CSR_READ_1(sc, sc->rl_cfg5);
3904 	v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST |
3905 	    RL_CFG5_WOL_LANWAKE);
3906 	if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
3907 		v |= RL_CFG5_WOL_UCAST;
3908 	if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
3909 		v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST;
3910 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
3911 		v |= RL_CFG5_WOL_LANWAKE;
3912 	CSR_WRITE_1(sc, sc->rl_cfg5, v);
3913 
3914 	/* Config register write done. */
3915 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3916 
3917 	if ((ifp->if_capenable & IFCAP_WOL) == 0 &&
3918 	    (sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0)
3919 		CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) & ~0x80);
3920 	/*
3921 	 * It seems that hardware resets its link speed to 100Mbps in
3922 	 * power down mode so switching to 100Mbps in driver is not
3923 	 * needed.
3924 	 */
3925 
3926 	/* Request PME if WOL is requested. */
3927 	pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2);
3928 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
3929 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
3930 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3931 	pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
3932 }
3933 
3934 static void
3935 re_clrwol(struct rl_softc *sc)
3936 {
3937 	int			pmc;
3938 	uint8_t			v;
3939 
3940 	RL_LOCK_ASSERT(sc);
3941 
3942 	if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
3943 		return;
3944 
3945 	/* Enable config register write. */
3946 	CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
3947 
3948 	v = CSR_READ_1(sc, sc->rl_cfg3);
3949 	v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
3950 	CSR_WRITE_1(sc, sc->rl_cfg3, v);
3951 
3952 	/* Config register write done. */
3953 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3954 
3955 	v = CSR_READ_1(sc, sc->rl_cfg5);
3956 	v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
3957 	v &= ~RL_CFG5_WOL_LANWAKE;
3958 	CSR_WRITE_1(sc, sc->rl_cfg5, v);
3959 }
3960 
3961 static void
3962 re_add_sysctls(struct rl_softc *sc)
3963 {
3964 	struct sysctl_ctx_list	*ctx;
3965 	struct sysctl_oid_list	*children;
3966 	int			error;
3967 
3968 	ctx = device_get_sysctl_ctx(sc->rl_dev);
3969 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev));
3970 
3971 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "stats",
3972 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
3973 	    re_sysctl_stats, "I", "Statistics Information");
3974 	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0)
3975 		return;
3976 
3977 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "int_rx_mod",
3978 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
3979 	    &sc->rl_int_rx_mod, 0, sysctl_hw_re_int_mod, "I",
3980 	    "re RX interrupt moderation");
3981 	/* Pull in device tunables. */
3982 	sc->rl_int_rx_mod = RL_TIMER_DEFAULT;
3983 	error = resource_int_value(device_get_name(sc->rl_dev),
3984 	    device_get_unit(sc->rl_dev), "int_rx_mod", &sc->rl_int_rx_mod);
3985 	if (error == 0) {
3986 		if (sc->rl_int_rx_mod < RL_TIMER_MIN ||
3987 		    sc->rl_int_rx_mod > RL_TIMER_MAX) {
3988 			device_printf(sc->rl_dev, "int_rx_mod value out of "
3989 			    "range; using default: %d\n",
3990 			    RL_TIMER_DEFAULT);
3991 			sc->rl_int_rx_mod = RL_TIMER_DEFAULT;
3992 		}
3993 	}
3994 }
3995 
3996 static int
3997 re_sysctl_stats(SYSCTL_HANDLER_ARGS)
3998 {
3999 	struct rl_softc		*sc;
4000 	struct rl_stats		*stats;
4001 	int			error, i, result;
4002 
4003 	result = -1;
4004 	error = sysctl_handle_int(oidp, &result, 0, req);
4005 	if (error || req->newptr == NULL)
4006 		return (error);
4007 
4008 	if (result == 1) {
4009 		sc = (struct rl_softc *)arg1;
4010 		RL_LOCK(sc);
4011 		if ((sc->rl_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
4012 			RL_UNLOCK(sc);
4013 			goto done;
4014 		}
4015 		bus_dmamap_sync(sc->rl_ldata.rl_stag,
4016 		    sc->rl_ldata.rl_smap, BUS_DMASYNC_PREREAD);
4017 		CSR_WRITE_4(sc, RL_DUMPSTATS_HI,
4018 		    RL_ADDR_HI(sc->rl_ldata.rl_stats_addr));
4019 		CSR_WRITE_4(sc, RL_DUMPSTATS_LO,
4020 		    RL_ADDR_LO(sc->rl_ldata.rl_stats_addr));
4021 		CSR_WRITE_4(sc, RL_DUMPSTATS_LO,
4022 		    RL_ADDR_LO(sc->rl_ldata.rl_stats_addr |
4023 		    RL_DUMPSTATS_START));
4024 		for (i = RL_TIMEOUT; i > 0; i--) {
4025 			if ((CSR_READ_4(sc, RL_DUMPSTATS_LO) &
4026 			    RL_DUMPSTATS_START) == 0)
4027 				break;
4028 			DELAY(1000);
4029 		}
4030 		bus_dmamap_sync(sc->rl_ldata.rl_stag,
4031 		    sc->rl_ldata.rl_smap, BUS_DMASYNC_POSTREAD);
4032 		RL_UNLOCK(sc);
4033 		if (i == 0) {
4034 			device_printf(sc->rl_dev,
4035 			    "DUMP statistics request timed out\n");
4036 			return (ETIMEDOUT);
4037 		}
4038 done:
4039 		stats = sc->rl_ldata.rl_stats;
4040 		printf("%s statistics:\n", device_get_nameunit(sc->rl_dev));
4041 		printf("Tx frames : %ju\n",
4042 		    (uintmax_t)le64toh(stats->rl_tx_pkts));
4043 		printf("Rx frames : %ju\n",
4044 		    (uintmax_t)le64toh(stats->rl_rx_pkts));
4045 		printf("Tx errors : %ju\n",
4046 		    (uintmax_t)le64toh(stats->rl_tx_errs));
4047 		printf("Rx errors : %u\n",
4048 		    le32toh(stats->rl_rx_errs));
4049 		printf("Rx missed frames : %u\n",
4050 		    (uint32_t)le16toh(stats->rl_missed_pkts));
4051 		printf("Rx frame alignment errs : %u\n",
4052 		    (uint32_t)le16toh(stats->rl_rx_framealign_errs));
4053 		printf("Tx single collisions : %u\n",
4054 		    le32toh(stats->rl_tx_onecoll));
4055 		printf("Tx multiple collisions : %u\n",
4056 		    le32toh(stats->rl_tx_multicolls));
4057 		printf("Rx unicast frames : %ju\n",
4058 		    (uintmax_t)le64toh(stats->rl_rx_ucasts));
4059 		printf("Rx broadcast frames : %ju\n",
4060 		    (uintmax_t)le64toh(stats->rl_rx_bcasts));
4061 		printf("Rx multicast frames : %u\n",
4062 		    le32toh(stats->rl_rx_mcasts));
4063 		printf("Tx aborts : %u\n",
4064 		    (uint32_t)le16toh(stats->rl_tx_aborts));
4065 		printf("Tx underruns : %u\n",
4066 		    (uint32_t)le16toh(stats->rl_rx_underruns));
4067 	}
4068 
4069 	return (error);
4070 }
4071 
4072 static int
4073 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
4074 {
4075 	int error, value;
4076 
4077 	if (arg1 == NULL)
4078 		return (EINVAL);
4079 	value = *(int *)arg1;
4080 	error = sysctl_handle_int(oidp, &value, 0, req);
4081 	if (error || req->newptr == NULL)
4082 		return (error);
4083 	if (value < low || value > high)
4084 		return (EINVAL);
4085 	*(int *)arg1 = value;
4086 
4087 	return (0);
4088 }
4089 
4090 static int
4091 sysctl_hw_re_int_mod(SYSCTL_HANDLER_ARGS)
4092 {
4093 
4094 	return (sysctl_int_range(oidp, arg1, arg2, req, RL_TIMER_MIN,
4095 	    RL_TIMER_MAX));
4096 }
4097 
4098 #ifdef DEBUGNET
4099 static void
4100 re_debugnet_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize)
4101 {
4102 	struct rl_softc *sc;
4103 
4104 	sc = if_getsoftc(ifp);
4105 	RL_LOCK(sc);
4106 	*nrxr = sc->rl_ldata.rl_rx_desc_cnt;
4107 	*ncl = DEBUGNET_MAX_IN_FLIGHT;
4108 	*clsize = (ifp->if_mtu > RL_MTU &&
4109 	    (sc->rl_flags & RL_FLAG_JUMBOV2) != 0) ? MJUM9BYTES : MCLBYTES;
4110 	RL_UNLOCK(sc);
4111 }
4112 
4113 static void
4114 re_debugnet_event(struct ifnet *ifp __unused, enum debugnet_ev event __unused)
4115 {
4116 }
4117 
4118 static int
4119 re_debugnet_transmit(struct ifnet *ifp, struct mbuf *m)
4120 {
4121 	struct rl_softc *sc;
4122 	int error;
4123 
4124 	sc = if_getsoftc(ifp);
4125 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4126 	    IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0)
4127 		return (EBUSY);
4128 
4129 	error = re_encap(sc, &m);
4130 	if (error == 0)
4131 		re_start_tx(sc);
4132 	return (error);
4133 }
4134 
4135 static int
4136 re_debugnet_poll(struct ifnet *ifp, int count)
4137 {
4138 	struct rl_softc *sc;
4139 	int error;
4140 
4141 	sc = if_getsoftc(ifp);
4142 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 ||
4143 	    (sc->rl_flags & RL_FLAG_LINK) == 0)
4144 		return (EBUSY);
4145 
4146 	re_txeof(sc);
4147 	error = re_rxeof(sc, NULL);
4148 	if (error != 0 && error != EAGAIN)
4149 		return (error);
4150 	return (0);
4151 }
4152 #endif /* DEBUGNET */
4153