xref: /freebsd/sys/dev/re/if_re.c (revision 4fd0d10e0fe684211328bc148edf89a792425b39)
1 /*-
2  * Copyright (c) 1997, 1998-2003
3  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /*
37  * RealTek 8139C+/8169/8169S/8110S/8168/8111/8101E PCI NIC driver
38  *
39  * Written by Bill Paul <wpaul@windriver.com>
40  * Senior Networking Software Engineer
41  * Wind River Systems
42  */
43 
44 /*
45  * This driver is designed to support RealTek's next generation of
46  * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently
47  * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S,
48  * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E.
49  *
50  * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible
51  * with the older 8139 family, however it also supports a special
52  * C+ mode of operation that provides several new performance enhancing
53  * features. These include:
54  *
55  *	o Descriptor based DMA mechanism. Each descriptor represents
56  *	  a single packet fragment. Data buffers may be aligned on
57  *	  any byte boundary.
58  *
59  *	o 64-bit DMA
60  *
61  *	o TCP/IP checksum offload for both RX and TX
62  *
63  *	o High and normal priority transmit DMA rings
64  *
65  *	o VLAN tag insertion and extraction
66  *
67  *	o TCP large send (segmentation offload)
68  *
69  * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+
70  * programming API is fairly straightforward. The RX filtering, EEPROM
71  * access and PHY access is the same as it is on the older 8139 series
72  * chips.
73  *
74  * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the
75  * same programming API and feature set as the 8139C+ with the following
76  * differences and additions:
77  *
78  *	o 1000Mbps mode
79  *
80  *	o Jumbo frames
81  *
82  *	o GMII and TBI ports/registers for interfacing with copper
83  *	  or fiber PHYs
84  *
85  *	o RX and TX DMA rings can have up to 1024 descriptors
86  *	  (the 8139C+ allows a maximum of 64)
87  *
88  *	o Slight differences in register layout from the 8139C+
89  *
90  * The TX start and timer interrupt registers are at different locations
91  * on the 8169 than they are on the 8139C+. Also, the status word in the
92  * RX descriptor has a slightly different bit layout. The 8169 does not
93  * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska'
94  * copper gigE PHY.
95  *
96  * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
97  * (the 'S' stands for 'single-chip'). These devices have the same
98  * programming API as the older 8169, but also have some vendor-specific
99  * registers for the on-board PHY. The 8110S is a LAN-on-motherboard
100  * part designed to be pin-compatible with the RealTek 8100 10/100 chip.
101  *
102  * This driver takes advantage of the RX and TX checksum offload and
103  * VLAN tag insertion/extraction features. It also implements TX
104  * interrupt moderation using the timer interrupt registers, which
105  * significantly reduces TX interrupt load. There is also support
106  * for jumbo frames, however the 8169/8169S/8110S can not transmit
107  * jumbo frames larger than 7440, so the max MTU possible with this
108  * driver is 7422 bytes.
109  */
110 
111 #ifdef HAVE_KERNEL_OPTION_HEADERS
112 #include "opt_device_polling.h"
113 #endif
114 
115 #include <sys/param.h>
116 #include <sys/endian.h>
117 #include <sys/systm.h>
118 #include <sys/sockio.h>
119 #include <sys/mbuf.h>
120 #include <sys/malloc.h>
121 #include <sys/module.h>
122 #include <sys/kernel.h>
123 #include <sys/socket.h>
124 #include <sys/lock.h>
125 #include <sys/mutex.h>
126 #include <sys/sysctl.h>
127 #include <sys/taskqueue.h>
128 
129 #include <net/if.h>
130 #include <net/if_arp.h>
131 #include <net/ethernet.h>
132 #include <net/if_dl.h>
133 #include <net/if_media.h>
134 #include <net/if_types.h>
135 #include <net/if_vlan_var.h>
136 
137 #include <net/bpf.h>
138 
139 #include <machine/bus.h>
140 #include <machine/resource.h>
141 #include <sys/bus.h>
142 #include <sys/rman.h>
143 
144 #include <dev/mii/mii.h>
145 #include <dev/mii/miivar.h>
146 
147 #include <dev/pci/pcireg.h>
148 #include <dev/pci/pcivar.h>
149 
150 #include <pci/if_rlreg.h>
151 
152 MODULE_DEPEND(re, pci, 1, 1, 1);
153 MODULE_DEPEND(re, ether, 1, 1, 1);
154 MODULE_DEPEND(re, miibus, 1, 1, 1);
155 
156 /* "device miibus" required.  See GENERIC if you get errors here. */
157 #include "miibus_if.h"
158 
159 /* Tunables. */
160 static int intr_filter = 0;
161 TUNABLE_INT("hw.re.intr_filter", &intr_filter);
162 static int msi_disable = 0;
163 TUNABLE_INT("hw.re.msi_disable", &msi_disable);
164 static int msix_disable = 0;
165 TUNABLE_INT("hw.re.msix_disable", &msix_disable);
166 static int prefer_iomap = 0;
167 TUNABLE_INT("hw.re.prefer_iomap", &prefer_iomap);
168 
169 #define RE_CSUM_FEATURES    (CSUM_IP | CSUM_TCP | CSUM_UDP)
170 
171 /*
172  * Various supported device vendors/types and their names.
173  */
174 static const struct rl_type re_devs[] = {
175 	{ DLINK_VENDORID, DLINK_DEVICEID_528T, 0,
176 	    "D-Link DGE-528(T) Gigabit Ethernet Adapter" },
177 	{ DLINK_VENDORID, DLINK_DEVICEID_530T_REVC, 0,
178 	    "D-Link DGE-530(T) Gigabit Ethernet Adapter" },
179 	{ RT_VENDORID, RT_DEVICEID_8139, 0,
180 	    "RealTek 8139C+ 10/100BaseTX" },
181 	{ RT_VENDORID, RT_DEVICEID_8101E, 0,
182 	    "RealTek 810xE PCIe 10/100baseTX" },
183 	{ RT_VENDORID, RT_DEVICEID_8168, 0,
184 	    "RealTek 8168/8111 B/C/CP/D/DP/E/F PCIe Gigabit Ethernet" },
185 	{ RT_VENDORID, RT_DEVICEID_8169, 0,
186 	    "RealTek 8169/8169S/8169SB(L)/8110S/8110SB(L) Gigabit Ethernet" },
187 	{ RT_VENDORID, RT_DEVICEID_8169SC, 0,
188 	    "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" },
189 	{ COREGA_VENDORID, COREGA_DEVICEID_CGLAPCIGT, 0,
190 	    "Corega CG-LAPCIGT (RTL8169S) Gigabit Ethernet" },
191 	{ LINKSYS_VENDORID, LINKSYS_DEVICEID_EG1032, 0,
192 	    "Linksys EG1032 (RTL8169S) Gigabit Ethernet" },
193 	{ USR_VENDORID, USR_DEVICEID_997902, 0,
194 	    "US Robotics 997902 (RTL8169S) Gigabit Ethernet" }
195 };
196 
197 static const struct rl_hwrev re_hwrevs[] = {
198 	{ RL_HWREV_8139, RL_8139, "", RL_MTU },
199 	{ RL_HWREV_8139A, RL_8139, "A", RL_MTU },
200 	{ RL_HWREV_8139AG, RL_8139, "A-G", RL_MTU },
201 	{ RL_HWREV_8139B, RL_8139, "B", RL_MTU },
202 	{ RL_HWREV_8130, RL_8139, "8130", RL_MTU },
203 	{ RL_HWREV_8139C, RL_8139, "C", RL_MTU },
204 	{ RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C", RL_MTU },
205 	{ RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+", RL_MTU },
206 	{ RL_HWREV_8168B_SPIN1, RL_8169, "8168", RL_JUMBO_MTU },
207 	{ RL_HWREV_8169, RL_8169, "8169", RL_JUMBO_MTU },
208 	{ RL_HWREV_8169S, RL_8169, "8169S", RL_JUMBO_MTU },
209 	{ RL_HWREV_8110S, RL_8169, "8110S", RL_JUMBO_MTU },
210 	{ RL_HWREV_8169_8110SB, RL_8169, "8169SB/8110SB", RL_JUMBO_MTU },
211 	{ RL_HWREV_8169_8110SC, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU },
212 	{ RL_HWREV_8169_8110SBL, RL_8169, "8169SBL/8110SBL", RL_JUMBO_MTU },
213 	{ RL_HWREV_8169_8110SCE, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU },
214 	{ RL_HWREV_8100, RL_8139, "8100", RL_MTU },
215 	{ RL_HWREV_8101, RL_8139, "8101", RL_MTU },
216 	{ RL_HWREV_8100E, RL_8169, "8100E", RL_MTU },
217 	{ RL_HWREV_8101E, RL_8169, "8101E", RL_MTU },
218 	{ RL_HWREV_8102E, RL_8169, "8102E", RL_MTU },
219 	{ RL_HWREV_8102EL, RL_8169, "8102EL", RL_MTU },
220 	{ RL_HWREV_8102EL_SPIN1, RL_8169, "8102EL", RL_MTU },
221 	{ RL_HWREV_8103E, RL_8169, "8103E", RL_MTU },
222 	{ RL_HWREV_8401E, RL_8169, "8401E", RL_MTU },
223 	{ RL_HWREV_8402, RL_8169, "8402", RL_MTU },
224 	{ RL_HWREV_8105E, RL_8169, "8105E", RL_MTU },
225 	{ RL_HWREV_8105E_SPIN1, RL_8169, "8105E", RL_MTU },
226 	{ RL_HWREV_8168B_SPIN2, RL_8169, "8168", RL_JUMBO_MTU },
227 	{ RL_HWREV_8168B_SPIN3, RL_8169, "8168", RL_JUMBO_MTU },
228 	{ RL_HWREV_8168C, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K },
229 	{ RL_HWREV_8168C_SPIN2, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K },
230 	{ RL_HWREV_8168CP, RL_8169, "8168CP/8111CP", RL_JUMBO_MTU_6K },
231 	{ RL_HWREV_8168D, RL_8169, "8168D/8111D", RL_JUMBO_MTU_9K },
232 	{ RL_HWREV_8168DP, RL_8169, "8168DP/8111DP", RL_JUMBO_MTU_9K },
233 	{ RL_HWREV_8168E, RL_8169, "8168E/8111E", RL_JUMBO_MTU_9K},
234 	{ RL_HWREV_8168E_VL, RL_8169, "8168E/8111E-VL", RL_JUMBO_MTU_6K},
235 	{ RL_HWREV_8168F, RL_8169, "8168F/8111F", RL_JUMBO_MTU_9K},
236 	{ RL_HWREV_8411, RL_8169, "8411", RL_JUMBO_MTU_9K},
237 	{ 0, 0, NULL, 0 }
238 };
239 
240 static int re_probe		(device_t);
241 static int re_attach		(device_t);
242 static int re_detach		(device_t);
243 
244 static int re_encap		(struct rl_softc *, struct mbuf **);
245 
246 static void re_dma_map_addr	(void *, bus_dma_segment_t *, int, int);
247 static int re_allocmem		(device_t, struct rl_softc *);
248 static __inline void re_discard_rxbuf
249 				(struct rl_softc *, int);
250 static int re_newbuf		(struct rl_softc *, int);
251 static int re_jumbo_newbuf	(struct rl_softc *, int);
252 static int re_rx_list_init	(struct rl_softc *);
253 static int re_jrx_list_init	(struct rl_softc *);
254 static int re_tx_list_init	(struct rl_softc *);
255 #ifdef RE_FIXUP_RX
256 static __inline void re_fixup_rx
257 				(struct mbuf *);
258 #endif
259 static int re_rxeof		(struct rl_softc *, int *);
260 static void re_txeof		(struct rl_softc *);
261 #ifdef DEVICE_POLLING
262 static int re_poll		(struct ifnet *, enum poll_cmd, int);
263 static int re_poll_locked	(struct ifnet *, enum poll_cmd, int);
264 #endif
265 static int re_intr		(void *);
266 static void re_intr_msi		(void *);
267 static void re_tick		(void *);
268 static void re_int_task		(void *, int);
269 static void re_start		(struct ifnet *);
270 static void re_start_locked	(struct ifnet *);
271 static int re_ioctl		(struct ifnet *, u_long, caddr_t);
272 static void re_init		(void *);
273 static void re_init_locked	(struct rl_softc *);
274 static void re_stop		(struct rl_softc *);
275 static void re_watchdog		(struct rl_softc *);
276 static int re_suspend		(device_t);
277 static int re_resume		(device_t);
278 static int re_shutdown		(device_t);
279 static int re_ifmedia_upd	(struct ifnet *);
280 static void re_ifmedia_sts	(struct ifnet *, struct ifmediareq *);
281 
282 static void re_eeprom_putbyte	(struct rl_softc *, int);
283 static void re_eeprom_getword	(struct rl_softc *, int, u_int16_t *);
284 static void re_read_eeprom	(struct rl_softc *, caddr_t, int, int);
285 static int re_gmii_readreg	(device_t, int, int);
286 static int re_gmii_writereg	(device_t, int, int, int);
287 
288 static int re_miibus_readreg	(device_t, int, int);
289 static int re_miibus_writereg	(device_t, int, int, int);
290 static void re_miibus_statchg	(device_t);
291 
292 static void re_set_jumbo	(struct rl_softc *, int);
293 static void re_set_rxmode		(struct rl_softc *);
294 static void re_reset		(struct rl_softc *);
295 static void re_setwol		(struct rl_softc *);
296 static void re_clrwol		(struct rl_softc *);
297 static void re_set_linkspeed	(struct rl_softc *);
298 
299 #ifdef DEV_NETMAP	/* see ixgbe.c for details */
300 #include <dev/netmap/if_re_netmap.h>
301 #endif /* !DEV_NETMAP */
302 
303 #ifdef RE_DIAG
304 static int re_diag		(struct rl_softc *);
305 #endif
306 
307 static void re_add_sysctls	(struct rl_softc *);
308 static int re_sysctl_stats	(SYSCTL_HANDLER_ARGS);
309 static int sysctl_int_range	(SYSCTL_HANDLER_ARGS, int, int);
310 static int sysctl_hw_re_int_mod	(SYSCTL_HANDLER_ARGS);
311 
312 static device_method_t re_methods[] = {
313 	/* Device interface */
314 	DEVMETHOD(device_probe,		re_probe),
315 	DEVMETHOD(device_attach,	re_attach),
316 	DEVMETHOD(device_detach,	re_detach),
317 	DEVMETHOD(device_suspend,	re_suspend),
318 	DEVMETHOD(device_resume,	re_resume),
319 	DEVMETHOD(device_shutdown,	re_shutdown),
320 
321 	/* MII interface */
322 	DEVMETHOD(miibus_readreg,	re_miibus_readreg),
323 	DEVMETHOD(miibus_writereg,	re_miibus_writereg),
324 	DEVMETHOD(miibus_statchg,	re_miibus_statchg),
325 
326 	DEVMETHOD_END
327 };
328 
329 static driver_t re_driver = {
330 	"re",
331 	re_methods,
332 	sizeof(struct rl_softc)
333 };
334 
335 static devclass_t re_devclass;
336 
337 DRIVER_MODULE(re, pci, re_driver, re_devclass, 0, 0);
338 DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0);
339 
340 #define EE_SET(x)					\
341 	CSR_WRITE_1(sc, RL_EECMD,			\
342 		CSR_READ_1(sc, RL_EECMD) | x)
343 
344 #define EE_CLR(x)					\
345 	CSR_WRITE_1(sc, RL_EECMD,			\
346 		CSR_READ_1(sc, RL_EECMD) & ~x)
347 
348 /*
349  * Send a read command and address to the EEPROM, check for ACK.
350  */
351 static void
352 re_eeprom_putbyte(struct rl_softc *sc, int addr)
353 {
354 	int			d, i;
355 
356 	d = addr | (RL_9346_READ << sc->rl_eewidth);
357 
358 	/*
359 	 * Feed in each bit and strobe the clock.
360 	 */
361 
362 	for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) {
363 		if (d & i) {
364 			EE_SET(RL_EE_DATAIN);
365 		} else {
366 			EE_CLR(RL_EE_DATAIN);
367 		}
368 		DELAY(100);
369 		EE_SET(RL_EE_CLK);
370 		DELAY(150);
371 		EE_CLR(RL_EE_CLK);
372 		DELAY(100);
373 	}
374 }
375 
376 /*
377  * Read a word of data stored in the EEPROM at address 'addr.'
378  */
379 static void
380 re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest)
381 {
382 	int			i;
383 	u_int16_t		word = 0;
384 
385 	/*
386 	 * Send address of word we want to read.
387 	 */
388 	re_eeprom_putbyte(sc, addr);
389 
390 	/*
391 	 * Start reading bits from EEPROM.
392 	 */
393 	for (i = 0x8000; i; i >>= 1) {
394 		EE_SET(RL_EE_CLK);
395 		DELAY(100);
396 		if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
397 			word |= i;
398 		EE_CLR(RL_EE_CLK);
399 		DELAY(100);
400 	}
401 
402 	*dest = word;
403 }
404 
405 /*
406  * Read a sequence of words from the EEPROM.
407  */
408 static void
409 re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt)
410 {
411 	int			i;
412 	u_int16_t		word = 0, *ptr;
413 
414 	CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
415 
416         DELAY(100);
417 
418 	for (i = 0; i < cnt; i++) {
419 		CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL);
420 		re_eeprom_getword(sc, off + i, &word);
421 		CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL);
422 		ptr = (u_int16_t *)(dest + (i * 2));
423                 *ptr = word;
424 	}
425 
426 	CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
427 }
428 
429 static int
430 re_gmii_readreg(device_t dev, int phy, int reg)
431 {
432 	struct rl_softc		*sc;
433 	u_int32_t		rval;
434 	int			i;
435 
436 	sc = device_get_softc(dev);
437 
438 	/* Let the rgephy driver read the GMEDIASTAT register */
439 
440 	if (reg == RL_GMEDIASTAT) {
441 		rval = CSR_READ_1(sc, RL_GMEDIASTAT);
442 		return (rval);
443 	}
444 
445 	CSR_WRITE_4(sc, RL_PHYAR, reg << 16);
446 
447 	for (i = 0; i < RL_PHY_TIMEOUT; i++) {
448 		rval = CSR_READ_4(sc, RL_PHYAR);
449 		if (rval & RL_PHYAR_BUSY)
450 			break;
451 		DELAY(25);
452 	}
453 
454 	if (i == RL_PHY_TIMEOUT) {
455 		device_printf(sc->rl_dev, "PHY read failed\n");
456 		return (0);
457 	}
458 
459 	/*
460 	 * Controller requires a 20us delay to process next MDIO request.
461 	 */
462 	DELAY(20);
463 
464 	return (rval & RL_PHYAR_PHYDATA);
465 }
466 
467 static int
468 re_gmii_writereg(device_t dev, int phy, int reg, int data)
469 {
470 	struct rl_softc		*sc;
471 	u_int32_t		rval;
472 	int			i;
473 
474 	sc = device_get_softc(dev);
475 
476 	CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) |
477 	    (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY);
478 
479 	for (i = 0; i < RL_PHY_TIMEOUT; i++) {
480 		rval = CSR_READ_4(sc, RL_PHYAR);
481 		if (!(rval & RL_PHYAR_BUSY))
482 			break;
483 		DELAY(25);
484 	}
485 
486 	if (i == RL_PHY_TIMEOUT) {
487 		device_printf(sc->rl_dev, "PHY write failed\n");
488 		return (0);
489 	}
490 
491 	/*
492 	 * Controller requires a 20us delay to process next MDIO request.
493 	 */
494 	DELAY(20);
495 
496 	return (0);
497 }
498 
499 static int
500 re_miibus_readreg(device_t dev, int phy, int reg)
501 {
502 	struct rl_softc		*sc;
503 	u_int16_t		rval = 0;
504 	u_int16_t		re8139_reg = 0;
505 
506 	sc = device_get_softc(dev);
507 
508 	if (sc->rl_type == RL_8169) {
509 		rval = re_gmii_readreg(dev, phy, reg);
510 		return (rval);
511 	}
512 
513 	switch (reg) {
514 	case MII_BMCR:
515 		re8139_reg = RL_BMCR;
516 		break;
517 	case MII_BMSR:
518 		re8139_reg = RL_BMSR;
519 		break;
520 	case MII_ANAR:
521 		re8139_reg = RL_ANAR;
522 		break;
523 	case MII_ANER:
524 		re8139_reg = RL_ANER;
525 		break;
526 	case MII_ANLPAR:
527 		re8139_reg = RL_LPAR;
528 		break;
529 	case MII_PHYIDR1:
530 	case MII_PHYIDR2:
531 		return (0);
532 	/*
533 	 * Allow the rlphy driver to read the media status
534 	 * register. If we have a link partner which does not
535 	 * support NWAY, this is the register which will tell
536 	 * us the results of parallel detection.
537 	 */
538 	case RL_MEDIASTAT:
539 		rval = CSR_READ_1(sc, RL_MEDIASTAT);
540 		return (rval);
541 	default:
542 		device_printf(sc->rl_dev, "bad phy register\n");
543 		return (0);
544 	}
545 	rval = CSR_READ_2(sc, re8139_reg);
546 	if (sc->rl_type == RL_8139CPLUS && re8139_reg == RL_BMCR) {
547 		/* 8139C+ has different bit layout. */
548 		rval &= ~(BMCR_LOOP | BMCR_ISO);
549 	}
550 	return (rval);
551 }
552 
553 static int
554 re_miibus_writereg(device_t dev, int phy, int reg, int data)
555 {
556 	struct rl_softc		*sc;
557 	u_int16_t		re8139_reg = 0;
558 	int			rval = 0;
559 
560 	sc = device_get_softc(dev);
561 
562 	if (sc->rl_type == RL_8169) {
563 		rval = re_gmii_writereg(dev, phy, reg, data);
564 		return (rval);
565 	}
566 
567 	switch (reg) {
568 	case MII_BMCR:
569 		re8139_reg = RL_BMCR;
570 		if (sc->rl_type == RL_8139CPLUS) {
571 			/* 8139C+ has different bit layout. */
572 			data &= ~(BMCR_LOOP | BMCR_ISO);
573 		}
574 		break;
575 	case MII_BMSR:
576 		re8139_reg = RL_BMSR;
577 		break;
578 	case MII_ANAR:
579 		re8139_reg = RL_ANAR;
580 		break;
581 	case MII_ANER:
582 		re8139_reg = RL_ANER;
583 		break;
584 	case MII_ANLPAR:
585 		re8139_reg = RL_LPAR;
586 		break;
587 	case MII_PHYIDR1:
588 	case MII_PHYIDR2:
589 		return (0);
590 		break;
591 	default:
592 		device_printf(sc->rl_dev, "bad phy register\n");
593 		return (0);
594 	}
595 	CSR_WRITE_2(sc, re8139_reg, data);
596 	return (0);
597 }
598 
599 static void
600 re_miibus_statchg(device_t dev)
601 {
602 	struct rl_softc		*sc;
603 	struct ifnet		*ifp;
604 	struct mii_data		*mii;
605 
606 	sc = device_get_softc(dev);
607 	mii = device_get_softc(sc->rl_miibus);
608 	ifp = sc->rl_ifp;
609 	if (mii == NULL || ifp == NULL ||
610 	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
611 		return;
612 
613 	sc->rl_flags &= ~RL_FLAG_LINK;
614 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
615 	    (IFM_ACTIVE | IFM_AVALID)) {
616 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
617 		case IFM_10_T:
618 		case IFM_100_TX:
619 			sc->rl_flags |= RL_FLAG_LINK;
620 			break;
621 		case IFM_1000_T:
622 			if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0)
623 				break;
624 			sc->rl_flags |= RL_FLAG_LINK;
625 			break;
626 		default:
627 			break;
628 		}
629 	}
630 	/*
631 	 * RealTek controllers does not provide any interface to
632 	 * Tx/Rx MACs for resolved speed, duplex and flow-control
633 	 * parameters.
634 	 */
635 }
636 
637 /*
638  * Set the RX configuration and 64-bit multicast hash filter.
639  */
640 static void
641 re_set_rxmode(struct rl_softc *sc)
642 {
643 	struct ifnet		*ifp;
644 	struct ifmultiaddr	*ifma;
645 	uint32_t		hashes[2] = { 0, 0 };
646 	uint32_t		h, rxfilt;
647 
648 	RL_LOCK_ASSERT(sc);
649 
650 	ifp = sc->rl_ifp;
651 
652 	rxfilt = RL_RXCFG_CONFIG | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD;
653 
654 	if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
655 		if (ifp->if_flags & IFF_PROMISC)
656 			rxfilt |= RL_RXCFG_RX_ALLPHYS;
657 		/*
658 		 * Unlike other hardwares, we have to explicitly set
659 		 * RL_RXCFG_RX_MULTI to receive multicast frames in
660 		 * promiscuous mode.
661 		 */
662 		rxfilt |= RL_RXCFG_RX_MULTI;
663 		hashes[0] = hashes[1] = 0xffffffff;
664 		goto done;
665 	}
666 
667 	if_maddr_rlock(ifp);
668 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
669 		if (ifma->ifma_addr->sa_family != AF_LINK)
670 			continue;
671 		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
672 		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
673 		if (h < 32)
674 			hashes[0] |= (1 << h);
675 		else
676 			hashes[1] |= (1 << (h - 32));
677 	}
678 	if_maddr_runlock(ifp);
679 
680 	if (hashes[0] != 0 || hashes[1] != 0) {
681 		/*
682 		 * For some unfathomable reason, RealTek decided to
683 		 * reverse the order of the multicast hash registers
684 		 * in the PCI Express parts.  This means we have to
685 		 * write the hash pattern in reverse order for those
686 		 * devices.
687 		 */
688 		if ((sc->rl_flags & RL_FLAG_PCIE) != 0) {
689 			h = bswap32(hashes[0]);
690 			hashes[0] = bswap32(hashes[1]);
691 			hashes[1] = h;
692 		}
693 		rxfilt |= RL_RXCFG_RX_MULTI;
694 	}
695 
696 done:
697 	CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
698 	CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
699 	CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
700 }
701 
702 static void
703 re_reset(struct rl_softc *sc)
704 {
705 	int			i;
706 
707 	RL_LOCK_ASSERT(sc);
708 
709 	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
710 
711 	for (i = 0; i < RL_TIMEOUT; i++) {
712 		DELAY(10);
713 		if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
714 			break;
715 	}
716 	if (i == RL_TIMEOUT)
717 		device_printf(sc->rl_dev, "reset never completed!\n");
718 
719 	if ((sc->rl_flags & RL_FLAG_MACRESET) != 0)
720 		CSR_WRITE_1(sc, 0x82, 1);
721 	if (sc->rl_hwrev->rl_rev == RL_HWREV_8169S)
722 		re_gmii_writereg(sc->rl_dev, 1, 0x0b, 0);
723 }
724 
725 #ifdef RE_DIAG
726 
727 /*
728  * The following routine is designed to test for a defect on some
729  * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64#
730  * lines connected to the bus, however for a 32-bit only card, they
731  * should be pulled high. The result of this defect is that the
732  * NIC will not work right if you plug it into a 64-bit slot: DMA
733  * operations will be done with 64-bit transfers, which will fail
734  * because the 64-bit data lines aren't connected.
735  *
736  * There's no way to work around this (short of talking a soldering
737  * iron to the board), however we can detect it. The method we use
738  * here is to put the NIC into digital loopback mode, set the receiver
739  * to promiscuous mode, and then try to send a frame. We then compare
740  * the frame data we sent to what was received. If the data matches,
741  * then the NIC is working correctly, otherwise we know the user has
742  * a defective NIC which has been mistakenly plugged into a 64-bit PCI
743  * slot. In the latter case, there's no way the NIC can work correctly,
744  * so we print out a message on the console and abort the device attach.
745  */
746 
747 static int
748 re_diag(struct rl_softc *sc)
749 {
750 	struct ifnet		*ifp = sc->rl_ifp;
751 	struct mbuf		*m0;
752 	struct ether_header	*eh;
753 	struct rl_desc		*cur_rx;
754 	u_int16_t		status;
755 	u_int32_t		rxstat;
756 	int			total_len, i, error = 0, phyaddr;
757 	u_int8_t		dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' };
758 	u_int8_t		src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' };
759 
760 	/* Allocate a single mbuf */
761 	MGETHDR(m0, M_NOWAIT, MT_DATA);
762 	if (m0 == NULL)
763 		return (ENOBUFS);
764 
765 	RL_LOCK(sc);
766 
767 	/*
768 	 * Initialize the NIC in test mode. This sets the chip up
769 	 * so that it can send and receive frames, but performs the
770 	 * following special functions:
771 	 * - Puts receiver in promiscuous mode
772 	 * - Enables digital loopback mode
773 	 * - Leaves interrupts turned off
774 	 */
775 
776 	ifp->if_flags |= IFF_PROMISC;
777 	sc->rl_testmode = 1;
778 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
779 	re_init_locked(sc);
780 	sc->rl_flags |= RL_FLAG_LINK;
781 	if (sc->rl_type == RL_8169)
782 		phyaddr = 1;
783 	else
784 		phyaddr = 0;
785 
786 	re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_RESET);
787 	for (i = 0; i < RL_TIMEOUT; i++) {
788 		status = re_miibus_readreg(sc->rl_dev, phyaddr, MII_BMCR);
789 		if (!(status & BMCR_RESET))
790 			break;
791 	}
792 
793 	re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_LOOP);
794 	CSR_WRITE_2(sc, RL_ISR, RL_INTRS);
795 
796 	DELAY(100000);
797 
798 	/* Put some data in the mbuf */
799 
800 	eh = mtod(m0, struct ether_header *);
801 	bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN);
802 	bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN);
803 	eh->ether_type = htons(ETHERTYPE_IP);
804 	m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN;
805 
806 	/*
807 	 * Queue the packet, start transmission.
808 	 * Note: IF_HANDOFF() ultimately calls re_start() for us.
809 	 */
810 
811 	CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
812 	RL_UNLOCK(sc);
813 	/* XXX: re_diag must not be called when in ALTQ mode */
814 	IF_HANDOFF(&ifp->if_snd, m0, ifp);
815 	RL_LOCK(sc);
816 	m0 = NULL;
817 
818 	/* Wait for it to propagate through the chip */
819 
820 	DELAY(100000);
821 	for (i = 0; i < RL_TIMEOUT; i++) {
822 		status = CSR_READ_2(sc, RL_ISR);
823 		CSR_WRITE_2(sc, RL_ISR, status);
824 		if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) ==
825 		    (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK))
826 			break;
827 		DELAY(10);
828 	}
829 
830 	if (i == RL_TIMEOUT) {
831 		device_printf(sc->rl_dev,
832 		    "diagnostic failed, failed to receive packet in"
833 		    " loopback mode\n");
834 		error = EIO;
835 		goto done;
836 	}
837 
838 	/*
839 	 * The packet should have been dumped into the first
840 	 * entry in the RX DMA ring. Grab it from there.
841 	 */
842 
843 	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
844 	    sc->rl_ldata.rl_rx_list_map,
845 	    BUS_DMASYNC_POSTREAD);
846 	bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
847 	    sc->rl_ldata.rl_rx_desc[0].rx_dmamap,
848 	    BUS_DMASYNC_POSTREAD);
849 	bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag,
850 	    sc->rl_ldata.rl_rx_desc[0].rx_dmamap);
851 
852 	m0 = sc->rl_ldata.rl_rx_desc[0].rx_m;
853 	sc->rl_ldata.rl_rx_desc[0].rx_m = NULL;
854 	eh = mtod(m0, struct ether_header *);
855 
856 	cur_rx = &sc->rl_ldata.rl_rx_list[0];
857 	total_len = RL_RXBYTES(cur_rx);
858 	rxstat = le32toh(cur_rx->rl_cmdstat);
859 
860 	if (total_len != ETHER_MIN_LEN) {
861 		device_printf(sc->rl_dev,
862 		    "diagnostic failed, received short packet\n");
863 		error = EIO;
864 		goto done;
865 	}
866 
867 	/* Test that the received packet data matches what we sent. */
868 
869 	if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) ||
870 	    bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) ||
871 	    ntohs(eh->ether_type) != ETHERTYPE_IP) {
872 		device_printf(sc->rl_dev, "WARNING, DMA FAILURE!\n");
873 		device_printf(sc->rl_dev, "expected TX data: %6D/%6D/0x%x\n",
874 		    dst, ":", src, ":", ETHERTYPE_IP);
875 		device_printf(sc->rl_dev, "received RX data: %6D/%6D/0x%x\n",
876 		    eh->ether_dhost, ":", eh->ether_shost, ":",
877 		    ntohs(eh->ether_type));
878 		device_printf(sc->rl_dev, "You may have a defective 32-bit "
879 		    "NIC plugged into a 64-bit PCI slot.\n");
880 		device_printf(sc->rl_dev, "Please re-install the NIC in a "
881 		    "32-bit slot for proper operation.\n");
882 		device_printf(sc->rl_dev, "Read the re(4) man page for more "
883 		    "details.\n");
884 		error = EIO;
885 	}
886 
887 done:
888 	/* Turn interface off, release resources */
889 
890 	sc->rl_testmode = 0;
891 	sc->rl_flags &= ~RL_FLAG_LINK;
892 	ifp->if_flags &= ~IFF_PROMISC;
893 	re_stop(sc);
894 	if (m0 != NULL)
895 		m_freem(m0);
896 
897 	RL_UNLOCK(sc);
898 
899 	return (error);
900 }
901 
902 #endif
903 
904 /*
905  * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device
906  * IDs against our list and return a device name if we find a match.
907  */
908 static int
909 re_probe(device_t dev)
910 {
911 	const struct rl_type	*t;
912 	uint16_t		devid, vendor;
913 	uint16_t		revid, sdevid;
914 	int			i;
915 
916 	vendor = pci_get_vendor(dev);
917 	devid = pci_get_device(dev);
918 	revid = pci_get_revid(dev);
919 	sdevid = pci_get_subdevice(dev);
920 
921 	if (vendor == LINKSYS_VENDORID && devid == LINKSYS_DEVICEID_EG1032) {
922 		if (sdevid != LINKSYS_SUBDEVICE_EG1032_REV3) {
923 			/*
924 			 * Only attach to rev. 3 of the Linksys EG1032 adapter.
925 			 * Rev. 2 is supported by sk(4).
926 			 */
927 			return (ENXIO);
928 		}
929 	}
930 
931 	if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) {
932 		if (revid != 0x20) {
933 			/* 8139, let rl(4) take care of this device. */
934 			return (ENXIO);
935 		}
936 	}
937 
938 	t = re_devs;
939 	for (i = 0; i < sizeof(re_devs) / sizeof(re_devs[0]); i++, t++) {
940 		if (vendor == t->rl_vid && devid == t->rl_did) {
941 			device_set_desc(dev, t->rl_name);
942 			return (BUS_PROBE_DEFAULT);
943 		}
944 	}
945 
946 	return (ENXIO);
947 }
948 
949 /*
950  * Map a single buffer address.
951  */
952 
953 static void
954 re_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
955 {
956 	bus_addr_t		*addr;
957 
958 	if (error)
959 		return;
960 
961 	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
962 	addr = arg;
963 	*addr = segs->ds_addr;
964 }
965 
966 static int
967 re_allocmem(device_t dev, struct rl_softc *sc)
968 {
969 	bus_addr_t		lowaddr;
970 	bus_size_t		rx_list_size, tx_list_size;
971 	int			error;
972 	int			i;
973 
974 	rx_list_size = sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc);
975 	tx_list_size = sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc);
976 
977 	/*
978 	 * Allocate the parent bus DMA tag appropriate for PCI.
979 	 * In order to use DAC, RL_CPLUSCMD_PCI_DAC bit of RL_CPLUS_CMD
980 	 * register should be set. However some RealTek chips are known
981 	 * to be buggy on DAC handling, therefore disable DAC by limiting
982 	 * DMA address space to 32bit. PCIe variants of RealTek chips
983 	 * may not have the limitation.
984 	 */
985 	lowaddr = BUS_SPACE_MAXADDR;
986 	if ((sc->rl_flags & RL_FLAG_PCIE) == 0)
987 		lowaddr = BUS_SPACE_MAXADDR_32BIT;
988 	error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
989 	    lowaddr, BUS_SPACE_MAXADDR, NULL, NULL,
990 	    BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
991 	    NULL, NULL, &sc->rl_parent_tag);
992 	if (error) {
993 		device_printf(dev, "could not allocate parent DMA tag\n");
994 		return (error);
995 	}
996 
997 	/*
998 	 * Allocate map for TX mbufs.
999 	 */
1000 	error = bus_dma_tag_create(sc->rl_parent_tag, 1, 0,
1001 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1002 	    NULL, MCLBYTES * RL_NTXSEGS, RL_NTXSEGS, 4096, 0,
1003 	    NULL, NULL, &sc->rl_ldata.rl_tx_mtag);
1004 	if (error) {
1005 		device_printf(dev, "could not allocate TX DMA tag\n");
1006 		return (error);
1007 	}
1008 
1009 	/*
1010 	 * Allocate map for RX mbufs.
1011 	 */
1012 
1013 	if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
1014 		error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t),
1015 		    0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1016 		    MJUM9BYTES, 1, MJUM9BYTES, 0, NULL, NULL,
1017 		    &sc->rl_ldata.rl_jrx_mtag);
1018 		if (error) {
1019 			device_printf(dev,
1020 			    "could not allocate jumbo RX DMA tag\n");
1021 			return (error);
1022 		}
1023 	}
1024 	error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 0,
1025 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1026 	    MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rl_ldata.rl_rx_mtag);
1027 	if (error) {
1028 		device_printf(dev, "could not allocate RX DMA tag\n");
1029 		return (error);
1030 	}
1031 
1032 	/*
1033 	 * Allocate map for TX descriptor list.
1034 	 */
1035 	error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
1036 	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1037 	    NULL, tx_list_size, 1, tx_list_size, 0,
1038 	    NULL, NULL, &sc->rl_ldata.rl_tx_list_tag);
1039 	if (error) {
1040 		device_printf(dev, "could not allocate TX DMA ring tag\n");
1041 		return (error);
1042 	}
1043 
1044 	/* Allocate DMA'able memory for the TX ring */
1045 
1046 	error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag,
1047 	    (void **)&sc->rl_ldata.rl_tx_list,
1048 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1049 	    &sc->rl_ldata.rl_tx_list_map);
1050 	if (error) {
1051 		device_printf(dev, "could not allocate TX DMA ring\n");
1052 		return (error);
1053 	}
1054 
1055 	/* Load the map for the TX ring. */
1056 
1057 	sc->rl_ldata.rl_tx_list_addr = 0;
1058 	error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag,
1059 	     sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list,
1060 	     tx_list_size, re_dma_map_addr,
1061 	     &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT);
1062 	if (error != 0 || sc->rl_ldata.rl_tx_list_addr == 0) {
1063 		device_printf(dev, "could not load TX DMA ring\n");
1064 		return (ENOMEM);
1065 	}
1066 
1067 	/* Create DMA maps for TX buffers */
1068 
1069 	for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
1070 		error = bus_dmamap_create(sc->rl_ldata.rl_tx_mtag, 0,
1071 		    &sc->rl_ldata.rl_tx_desc[i].tx_dmamap);
1072 		if (error) {
1073 			device_printf(dev, "could not create DMA map for TX\n");
1074 			return (error);
1075 		}
1076 	}
1077 
1078 	/*
1079 	 * Allocate map for RX descriptor list.
1080 	 */
1081 	error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
1082 	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1083 	    NULL, rx_list_size, 1, rx_list_size, 0,
1084 	    NULL, NULL, &sc->rl_ldata.rl_rx_list_tag);
1085 	if (error) {
1086 		device_printf(dev, "could not create RX DMA ring tag\n");
1087 		return (error);
1088 	}
1089 
1090 	/* Allocate DMA'able memory for the RX ring */
1091 
1092 	error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag,
1093 	    (void **)&sc->rl_ldata.rl_rx_list,
1094 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1095 	    &sc->rl_ldata.rl_rx_list_map);
1096 	if (error) {
1097 		device_printf(dev, "could not allocate RX DMA ring\n");
1098 		return (error);
1099 	}
1100 
1101 	/* Load the map for the RX ring. */
1102 
1103 	sc->rl_ldata.rl_rx_list_addr = 0;
1104 	error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag,
1105 	     sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list,
1106 	     rx_list_size, re_dma_map_addr,
1107 	     &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT);
1108 	if (error != 0 || sc->rl_ldata.rl_rx_list_addr == 0) {
1109 		device_printf(dev, "could not load RX DMA ring\n");
1110 		return (ENOMEM);
1111 	}
1112 
1113 	/* Create DMA maps for RX buffers */
1114 
1115 	if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
1116 		error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0,
1117 		    &sc->rl_ldata.rl_jrx_sparemap);
1118 		if (error) {
1119 			device_printf(dev,
1120 			    "could not create spare DMA map for jumbo RX\n");
1121 			return (error);
1122 		}
1123 		for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1124 			error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0,
1125 			    &sc->rl_ldata.rl_jrx_desc[i].rx_dmamap);
1126 			if (error) {
1127 				device_printf(dev,
1128 				    "could not create DMA map for jumbo RX\n");
1129 				return (error);
1130 			}
1131 		}
1132 	}
1133 	error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0,
1134 	    &sc->rl_ldata.rl_rx_sparemap);
1135 	if (error) {
1136 		device_printf(dev, "could not create spare DMA map for RX\n");
1137 		return (error);
1138 	}
1139 	for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1140 		error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0,
1141 		    &sc->rl_ldata.rl_rx_desc[i].rx_dmamap);
1142 		if (error) {
1143 			device_printf(dev, "could not create DMA map for RX\n");
1144 			return (error);
1145 		}
1146 	}
1147 
1148 	/* Create DMA map for statistics. */
1149 	error = bus_dma_tag_create(sc->rl_parent_tag, RL_DUMP_ALIGN, 0,
1150 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1151 	    sizeof(struct rl_stats), 1, sizeof(struct rl_stats), 0, NULL, NULL,
1152 	    &sc->rl_ldata.rl_stag);
1153 	if (error) {
1154 		device_printf(dev, "could not create statistics DMA tag\n");
1155 		return (error);
1156 	}
1157 	/* Allocate DMA'able memory for statistics. */
1158 	error = bus_dmamem_alloc(sc->rl_ldata.rl_stag,
1159 	    (void **)&sc->rl_ldata.rl_stats,
1160 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1161 	    &sc->rl_ldata.rl_smap);
1162 	if (error) {
1163 		device_printf(dev,
1164 		    "could not allocate statistics DMA memory\n");
1165 		return (error);
1166 	}
1167 	/* Load the map for statistics. */
1168 	sc->rl_ldata.rl_stats_addr = 0;
1169 	error = bus_dmamap_load(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap,
1170 	    sc->rl_ldata.rl_stats, sizeof(struct rl_stats), re_dma_map_addr,
1171 	     &sc->rl_ldata.rl_stats_addr, BUS_DMA_NOWAIT);
1172 	if (error != 0 || sc->rl_ldata.rl_stats_addr == 0) {
1173 		device_printf(dev, "could not load statistics DMA memory\n");
1174 		return (ENOMEM);
1175 	}
1176 
1177 	return (0);
1178 }
1179 
1180 /*
1181  * Attach the interface. Allocate softc structures, do ifmedia
1182  * setup and ethernet/BPF attach.
1183  */
1184 static int
1185 re_attach(device_t dev)
1186 {
1187 	u_char			eaddr[ETHER_ADDR_LEN];
1188 	u_int16_t		as[ETHER_ADDR_LEN / 2];
1189 	struct rl_softc		*sc;
1190 	struct ifnet		*ifp;
1191 	const struct rl_hwrev	*hw_rev;
1192 	u_int32_t		cap, ctl;
1193 	int			hwrev;
1194 	u_int16_t		devid, re_did = 0;
1195 	int			error = 0, i, phy, rid;
1196 	int			msic, msixc, reg;
1197 	uint8_t			cfg;
1198 
1199 	sc = device_get_softc(dev);
1200 	sc->rl_dev = dev;
1201 
1202 	mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1203 	    MTX_DEF);
1204 	callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0);
1205 
1206 	/*
1207 	 * Map control/status registers.
1208 	 */
1209 	pci_enable_busmaster(dev);
1210 
1211 	devid = pci_get_device(dev);
1212 	/*
1213 	 * Prefer memory space register mapping over IO space.
1214 	 * Because RTL8169SC does not seem to work when memory mapping
1215 	 * is used always activate io mapping.
1216 	 */
1217 	if (devid == RT_DEVICEID_8169SC)
1218 		prefer_iomap = 1;
1219 	if (prefer_iomap == 0) {
1220 		sc->rl_res_id = PCIR_BAR(1);
1221 		sc->rl_res_type = SYS_RES_MEMORY;
1222 		/* RTL8168/8101E seems to use different BARs. */
1223 		if (devid == RT_DEVICEID_8168 || devid == RT_DEVICEID_8101E)
1224 			sc->rl_res_id = PCIR_BAR(2);
1225 	} else {
1226 		sc->rl_res_id = PCIR_BAR(0);
1227 		sc->rl_res_type = SYS_RES_IOPORT;
1228 	}
1229 	sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
1230 	    &sc->rl_res_id, RF_ACTIVE);
1231 	if (sc->rl_res == NULL && prefer_iomap == 0) {
1232 		sc->rl_res_id = PCIR_BAR(0);
1233 		sc->rl_res_type = SYS_RES_IOPORT;
1234 		sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
1235 		    &sc->rl_res_id, RF_ACTIVE);
1236 	}
1237 	if (sc->rl_res == NULL) {
1238 		device_printf(dev, "couldn't map ports/memory\n");
1239 		error = ENXIO;
1240 		goto fail;
1241 	}
1242 
1243 	sc->rl_btag = rman_get_bustag(sc->rl_res);
1244 	sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
1245 
1246 	msic = pci_msi_count(dev);
1247 	msixc = pci_msix_count(dev);
1248 	if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
1249 		sc->rl_flags |= RL_FLAG_PCIE;
1250 		sc->rl_expcap = reg;
1251 	}
1252 	if (bootverbose) {
1253 		device_printf(dev, "MSI count : %d\n", msic);
1254 		device_printf(dev, "MSI-X count : %d\n", msixc);
1255 	}
1256 	if (msix_disable > 0)
1257 		msixc = 0;
1258 	if (msi_disable > 0)
1259 		msic = 0;
1260 	/* Prefer MSI-X to MSI. */
1261 	if (msixc > 0) {
1262 		msixc = 1;
1263 		rid = PCIR_BAR(4);
1264 		sc->rl_res_pba = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1265 		    &rid, RF_ACTIVE);
1266 		if (sc->rl_res_pba == NULL) {
1267 			device_printf(sc->rl_dev,
1268 			    "could not allocate MSI-X PBA resource\n");
1269 		}
1270 		if (sc->rl_res_pba != NULL &&
1271 		    pci_alloc_msix(dev, &msixc) == 0) {
1272 			if (msixc == 1) {
1273 				device_printf(dev, "Using %d MSI-X message\n",
1274 				    msixc);
1275 				sc->rl_flags |= RL_FLAG_MSIX;
1276 			} else
1277 				pci_release_msi(dev);
1278 		}
1279 		if ((sc->rl_flags & RL_FLAG_MSIX) == 0) {
1280 			if (sc->rl_res_pba != NULL)
1281 				bus_release_resource(dev, SYS_RES_MEMORY, rid,
1282 				    sc->rl_res_pba);
1283 			sc->rl_res_pba = NULL;
1284 			msixc = 0;
1285 		}
1286 	}
1287 	/* Prefer MSI to INTx. */
1288 	if (msixc == 0 && msic > 0) {
1289 		msic = 1;
1290 		if (pci_alloc_msi(dev, &msic) == 0) {
1291 			if (msic == RL_MSI_MESSAGES) {
1292 				device_printf(dev, "Using %d MSI message\n",
1293 				    msic);
1294 				sc->rl_flags |= RL_FLAG_MSI;
1295 				/* Explicitly set MSI enable bit. */
1296 				CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
1297 				cfg = CSR_READ_1(sc, RL_CFG2);
1298 				cfg |= RL_CFG2_MSI;
1299 				CSR_WRITE_1(sc, RL_CFG2, cfg);
1300 				CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1301 			} else
1302 				pci_release_msi(dev);
1303 		}
1304 		if ((sc->rl_flags & RL_FLAG_MSI) == 0)
1305 			msic = 0;
1306 	}
1307 
1308 	/* Allocate interrupt */
1309 	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) {
1310 		rid = 0;
1311 		sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1312 		    RF_SHAREABLE | RF_ACTIVE);
1313 		if (sc->rl_irq[0] == NULL) {
1314 			device_printf(dev, "couldn't allocate IRQ resources\n");
1315 			error = ENXIO;
1316 			goto fail;
1317 		}
1318 	} else {
1319 		for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) {
1320 			sc->rl_irq[i] = bus_alloc_resource_any(dev,
1321 			    SYS_RES_IRQ, &rid, RF_ACTIVE);
1322 			if (sc->rl_irq[i] == NULL) {
1323 				device_printf(dev,
1324 				    "couldn't allocate IRQ resources for "
1325 				    "message %d\n", rid);
1326 				error = ENXIO;
1327 				goto fail;
1328 			}
1329 		}
1330 	}
1331 
1332 	if ((sc->rl_flags & RL_FLAG_MSI) == 0) {
1333 		CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
1334 		cfg = CSR_READ_1(sc, RL_CFG2);
1335 		if ((cfg & RL_CFG2_MSI) != 0) {
1336 			device_printf(dev, "turning off MSI enable bit.\n");
1337 			cfg &= ~RL_CFG2_MSI;
1338 			CSR_WRITE_1(sc, RL_CFG2, cfg);
1339 		}
1340 		CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1341 	}
1342 
1343 	/* Disable ASPM L0S/L1. */
1344 	if (sc->rl_expcap != 0) {
1345 		cap = pci_read_config(dev, sc->rl_expcap +
1346 		    PCIER_LINK_CAP, 2);
1347 		if ((cap & PCIEM_LINK_CAP_ASPM) != 0) {
1348 			ctl = pci_read_config(dev, sc->rl_expcap +
1349 			    PCIER_LINK_CTL, 2);
1350 			if ((ctl & PCIEM_LINK_CTL_ASPMC) != 0) {
1351 				ctl &= ~PCIEM_LINK_CTL_ASPMC;
1352 				pci_write_config(dev, sc->rl_expcap +
1353 				    PCIER_LINK_CTL, ctl, 2);
1354 				device_printf(dev, "ASPM disabled\n");
1355 			}
1356 		} else
1357 			device_printf(dev, "no ASPM capability\n");
1358 	}
1359 
1360 	hw_rev = re_hwrevs;
1361 	hwrev = CSR_READ_4(sc, RL_TXCFG);
1362 	switch (hwrev & 0x70000000) {
1363 	case 0x00000000:
1364 	case 0x10000000:
1365 		device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0xfc800000);
1366 		hwrev &= (RL_TXCFG_HWREV | 0x80000000);
1367 		break;
1368 	default:
1369 		device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0x7c800000);
1370 		hwrev &= RL_TXCFG_HWREV;
1371 		break;
1372 	}
1373 	device_printf(dev, "MAC rev. 0x%08x\n", hwrev & 0x00700000);
1374 	while (hw_rev->rl_desc != NULL) {
1375 		if (hw_rev->rl_rev == hwrev) {
1376 			sc->rl_type = hw_rev->rl_type;
1377 			sc->rl_hwrev = hw_rev;
1378 			break;
1379 		}
1380 		hw_rev++;
1381 	}
1382 	if (hw_rev->rl_desc == NULL) {
1383 		device_printf(dev, "Unknown H/W revision: 0x%08x\n", hwrev);
1384 		error = ENXIO;
1385 		goto fail;
1386 	}
1387 
1388 	switch (hw_rev->rl_rev) {
1389 	case RL_HWREV_8139CPLUS:
1390 		sc->rl_flags |= RL_FLAG_FASTETHER | RL_FLAG_AUTOPAD;
1391 		break;
1392 	case RL_HWREV_8100E:
1393 	case RL_HWREV_8101E:
1394 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_FASTETHER;
1395 		break;
1396 	case RL_HWREV_8102E:
1397 	case RL_HWREV_8102EL:
1398 	case RL_HWREV_8102EL_SPIN1:
1399 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 |
1400 		    RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP |
1401 		    RL_FLAG_AUTOPAD;
1402 		break;
1403 	case RL_HWREV_8103E:
1404 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 |
1405 		    RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP |
1406 		    RL_FLAG_AUTOPAD | RL_FLAG_MACSLEEP;
1407 		break;
1408 	case RL_HWREV_8401E:
1409 	case RL_HWREV_8105E:
1410 	case RL_HWREV_8105E_SPIN1:
1411 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1412 		    RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1413 		    RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD;
1414 		break;
1415 	case RL_HWREV_8402:
1416 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1417 		    RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1418 		    RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD |
1419 		    RL_FLAG_CMDSTOP_WAIT_TXQ;
1420 		break;
1421 	case RL_HWREV_8168B_SPIN1:
1422 	case RL_HWREV_8168B_SPIN2:
1423 		sc->rl_flags |= RL_FLAG_WOLRXENB;
1424 		/* FALLTHROUGH */
1425 	case RL_HWREV_8168B_SPIN3:
1426 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT;
1427 		break;
1428 	case RL_HWREV_8168C_SPIN2:
1429 		sc->rl_flags |= RL_FLAG_MACSLEEP;
1430 		/* FALLTHROUGH */
1431 	case RL_HWREV_8168C:
1432 		if ((hwrev & 0x00700000) == 0x00200000)
1433 			sc->rl_flags |= RL_FLAG_MACSLEEP;
1434 		/* FALLTHROUGH */
1435 	case RL_HWREV_8168CP:
1436 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1437 		    RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1438 		    RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK;
1439 		break;
1440 	case RL_HWREV_8168D:
1441 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1442 		    RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1443 		    RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
1444 		    RL_FLAG_WOL_MANLINK;
1445 		break;
1446 	case RL_HWREV_8168DP:
1447 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1448 		    RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_AUTOPAD |
1449 		    RL_FLAG_JUMBOV2 | RL_FLAG_WAIT_TXPOLL | RL_FLAG_WOL_MANLINK;
1450 		break;
1451 	case RL_HWREV_8168E:
1452 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1453 		    RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1454 		    RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
1455 		    RL_FLAG_WOL_MANLINK;
1456 		break;
1457 	case RL_HWREV_8168E_VL:
1458 	case RL_HWREV_8168F:
1459 	case RL_HWREV_8411:
1460 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1461 		    RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1462 		    RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
1463 		    RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_WOL_MANLINK;
1464 		break;
1465 	case RL_HWREV_8169_8110SB:
1466 	case RL_HWREV_8169_8110SBL:
1467 	case RL_HWREV_8169_8110SC:
1468 	case RL_HWREV_8169_8110SCE:
1469 		sc->rl_flags |= RL_FLAG_PHYWAKE;
1470 		/* FALLTHROUGH */
1471 	case RL_HWREV_8169:
1472 	case RL_HWREV_8169S:
1473 	case RL_HWREV_8110S:
1474 		sc->rl_flags |= RL_FLAG_MACRESET;
1475 		break;
1476 	default:
1477 		break;
1478 	}
1479 
1480 	if (sc->rl_hwrev->rl_rev == RL_HWREV_8139CPLUS) {
1481 		sc->rl_cfg0 = RL_8139_CFG0;
1482 		sc->rl_cfg1 = RL_8139_CFG1;
1483 		sc->rl_cfg2 = 0;
1484 		sc->rl_cfg3 = RL_8139_CFG3;
1485 		sc->rl_cfg4 = RL_8139_CFG4;
1486 		sc->rl_cfg5 = RL_8139_CFG5;
1487 	} else {
1488 		sc->rl_cfg0 = RL_CFG0;
1489 		sc->rl_cfg1 = RL_CFG1;
1490 		sc->rl_cfg2 = RL_CFG2;
1491 		sc->rl_cfg3 = RL_CFG3;
1492 		sc->rl_cfg4 = RL_CFG4;
1493 		sc->rl_cfg5 = RL_CFG5;
1494 	}
1495 
1496 	/* Reset the adapter. */
1497 	RL_LOCK(sc);
1498 	re_reset(sc);
1499 	RL_UNLOCK(sc);
1500 
1501 	/* Enable PME. */
1502 	CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
1503 	cfg = CSR_READ_1(sc, sc->rl_cfg1);
1504 	cfg |= RL_CFG1_PME;
1505 	CSR_WRITE_1(sc, sc->rl_cfg1, cfg);
1506 	cfg = CSR_READ_1(sc, sc->rl_cfg5);
1507 	cfg &= RL_CFG5_PME_STS;
1508 	CSR_WRITE_1(sc, sc->rl_cfg5, cfg);
1509 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1510 
1511 	if ((sc->rl_flags & RL_FLAG_PAR) != 0) {
1512 		/*
1513 		 * XXX Should have a better way to extract station
1514 		 * address from EEPROM.
1515 		 */
1516 		for (i = 0; i < ETHER_ADDR_LEN; i++)
1517 			eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i);
1518 	} else {
1519 		sc->rl_eewidth = RL_9356_ADDR_LEN;
1520 		re_read_eeprom(sc, (caddr_t)&re_did, 0, 1);
1521 		if (re_did != 0x8129)
1522 			sc->rl_eewidth = RL_9346_ADDR_LEN;
1523 
1524 		/*
1525 		 * Get station address from the EEPROM.
1526 		 */
1527 		re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3);
1528 		for (i = 0; i < ETHER_ADDR_LEN / 2; i++)
1529 			as[i] = le16toh(as[i]);
1530 		bcopy(as, eaddr, ETHER_ADDR_LEN);
1531 	}
1532 
1533 	if (sc->rl_type == RL_8169) {
1534 		/* Set RX length mask and number of descriptors. */
1535 		sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN;
1536 		sc->rl_txstart = RL_GTXSTART;
1537 		sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT;
1538 		sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT;
1539 	} else {
1540 		/* Set RX length mask and number of descriptors. */
1541 		sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN;
1542 		sc->rl_txstart = RL_TXSTART;
1543 		sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT;
1544 		sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT;
1545 	}
1546 
1547 	error = re_allocmem(dev, sc);
1548 	if (error)
1549 		goto fail;
1550 	re_add_sysctls(sc);
1551 
1552 	ifp = sc->rl_ifp = if_alloc(IFT_ETHER);
1553 	if (ifp == NULL) {
1554 		device_printf(dev, "can not if_alloc()\n");
1555 		error = ENOSPC;
1556 		goto fail;
1557 	}
1558 
1559 	/* Take controller out of deep sleep mode. */
1560 	if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
1561 		if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
1562 			CSR_WRITE_1(sc, RL_GPIO,
1563 			    CSR_READ_1(sc, RL_GPIO) | 0x01);
1564 		else
1565 			CSR_WRITE_1(sc, RL_GPIO,
1566 			    CSR_READ_1(sc, RL_GPIO) & ~0x01);
1567 	}
1568 
1569 	/* Take PHY out of power down mode. */
1570 	if ((sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0) {
1571 		CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80);
1572 		if (hw_rev->rl_rev == RL_HWREV_8401E)
1573 			CSR_WRITE_1(sc, 0xD1, CSR_READ_1(sc, 0xD1) & ~0x08);
1574 	}
1575 	if ((sc->rl_flags & RL_FLAG_PHYWAKE) != 0) {
1576 		re_gmii_writereg(dev, 1, 0x1f, 0);
1577 		re_gmii_writereg(dev, 1, 0x0e, 0);
1578 	}
1579 
1580 	ifp->if_softc = sc;
1581 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1582 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1583 	ifp->if_ioctl = re_ioctl;
1584 	ifp->if_start = re_start;
1585 	/*
1586 	 * RTL8168/8111C generates wrong IP checksummed frame if the
1587 	 * packet has IP options so disable TX IP checksum offloading.
1588 	 */
1589 	if (sc->rl_hwrev->rl_rev == RL_HWREV_8168C ||
1590 	    sc->rl_hwrev->rl_rev == RL_HWREV_8168C_SPIN2 ||
1591 	    sc->rl_hwrev->rl_rev == RL_HWREV_8168CP)
1592 		ifp->if_hwassist = CSUM_TCP | CSUM_UDP;
1593 	else
1594 		ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP;
1595 	ifp->if_hwassist |= CSUM_TSO;
1596 	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4;
1597 	ifp->if_capenable = ifp->if_capabilities;
1598 	ifp->if_init = re_init;
1599 	IFQ_SET_MAXLEN(&ifp->if_snd, RL_IFQ_MAXLEN);
1600 	ifp->if_snd.ifq_drv_maxlen = RL_IFQ_MAXLEN;
1601 	IFQ_SET_READY(&ifp->if_snd);
1602 
1603 	TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc);
1604 
1605 #define	RE_PHYAD_INTERNAL	 0
1606 
1607 	/* Do MII setup. */
1608 	phy = RE_PHYAD_INTERNAL;
1609 	if (sc->rl_type == RL_8169)
1610 		phy = 1;
1611 	error = mii_attach(dev, &sc->rl_miibus, ifp, re_ifmedia_upd,
1612 	    re_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, MIIF_DOPAUSE);
1613 	if (error != 0) {
1614 		device_printf(dev, "attaching PHYs failed\n");
1615 		goto fail;
1616 	}
1617 
1618 	/*
1619 	 * Call MI attach routine.
1620 	 */
1621 	ether_ifattach(ifp, eaddr);
1622 
1623 	/* VLAN capability setup */
1624 	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
1625 	if (ifp->if_capabilities & IFCAP_HWCSUM)
1626 		ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
1627 	/* Enable WOL if PM is supported. */
1628 	if (pci_find_cap(sc->rl_dev, PCIY_PMG, &reg) == 0)
1629 		ifp->if_capabilities |= IFCAP_WOL;
1630 	ifp->if_capenable = ifp->if_capabilities;
1631 	ifp->if_capenable &= ~(IFCAP_WOL_UCAST | IFCAP_WOL_MCAST);
1632 	/*
1633 	 * Don't enable TSO by default.  It is known to generate
1634 	 * corrupted TCP segments(bad TCP options) under certain
1635 	 * circumstances.
1636 	 */
1637 	ifp->if_hwassist &= ~CSUM_TSO;
1638 	ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_VLAN_HWTSO);
1639 #ifdef DEVICE_POLLING
1640 	ifp->if_capabilities |= IFCAP_POLLING;
1641 #endif
1642 	/*
1643 	 * Tell the upper layer(s) we support long frames.
1644 	 * Must appear after the call to ether_ifattach() because
1645 	 * ether_ifattach() sets ifi_hdrlen to the default value.
1646 	 */
1647 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1648 
1649 #ifdef DEV_NETMAP
1650 	re_netmap_attach(sc);
1651 #endif /* DEV_NETMAP */
1652 #ifdef RE_DIAG
1653 	/*
1654 	 * Perform hardware diagnostic on the original RTL8169.
1655 	 * Some 32-bit cards were incorrectly wired and would
1656 	 * malfunction if plugged into a 64-bit slot.
1657 	 */
1658 
1659 	if (hwrev == RL_HWREV_8169) {
1660 		error = re_diag(sc);
1661 		if (error) {
1662 			device_printf(dev,
1663 		    	"attach aborted due to hardware diag failure\n");
1664 			ether_ifdetach(ifp);
1665 			goto fail;
1666 		}
1667 	}
1668 #endif
1669 
1670 #ifdef RE_TX_MODERATION
1671 	intr_filter = 1;
1672 #endif
1673 	/* Hook interrupt last to avoid having to lock softc */
1674 	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 &&
1675 	    intr_filter == 0) {
1676 		error = bus_setup_intr(dev, sc->rl_irq[0],
1677 		    INTR_TYPE_NET | INTR_MPSAFE, NULL, re_intr_msi, sc,
1678 		    &sc->rl_intrhand[0]);
1679 	} else {
1680 		error = bus_setup_intr(dev, sc->rl_irq[0],
1681 		    INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc,
1682 		    &sc->rl_intrhand[0]);
1683 	}
1684 	if (error) {
1685 		device_printf(dev, "couldn't set up irq\n");
1686 		ether_ifdetach(ifp);
1687 	}
1688 
1689 fail:
1690 
1691 	if (error)
1692 		re_detach(dev);
1693 
1694 	return (error);
1695 }
1696 
1697 /*
1698  * Shutdown hardware and free up resources. This can be called any
1699  * time after the mutex has been initialized. It is called in both
1700  * the error case in attach and the normal detach case so it needs
1701  * to be careful about only freeing resources that have actually been
1702  * allocated.
1703  */
1704 static int
1705 re_detach(device_t dev)
1706 {
1707 	struct rl_softc		*sc;
1708 	struct ifnet		*ifp;
1709 	int			i, rid;
1710 
1711 	sc = device_get_softc(dev);
1712 	ifp = sc->rl_ifp;
1713 	KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized"));
1714 
1715 	/* These should only be active if attach succeeded */
1716 	if (device_is_attached(dev)) {
1717 #ifdef DEVICE_POLLING
1718 		if (ifp->if_capenable & IFCAP_POLLING)
1719 			ether_poll_deregister(ifp);
1720 #endif
1721 		RL_LOCK(sc);
1722 #if 0
1723 		sc->suspended = 1;
1724 #endif
1725 		re_stop(sc);
1726 		RL_UNLOCK(sc);
1727 		callout_drain(&sc->rl_stat_callout);
1728 		taskqueue_drain(taskqueue_fast, &sc->rl_inttask);
1729 		/*
1730 		 * Force off the IFF_UP flag here, in case someone
1731 		 * still had a BPF descriptor attached to this
1732 		 * interface. If they do, ether_ifdetach() will cause
1733 		 * the BPF code to try and clear the promisc mode
1734 		 * flag, which will bubble down to re_ioctl(),
1735 		 * which will try to call re_init() again. This will
1736 		 * turn the NIC back on and restart the MII ticker,
1737 		 * which will panic the system when the kernel tries
1738 		 * to invoke the re_tick() function that isn't there
1739 		 * anymore.
1740 		 */
1741 		ifp->if_flags &= ~IFF_UP;
1742 		ether_ifdetach(ifp);
1743 	}
1744 	if (sc->rl_miibus)
1745 		device_delete_child(dev, sc->rl_miibus);
1746 	bus_generic_detach(dev);
1747 
1748 	/*
1749 	 * The rest is resource deallocation, so we should already be
1750 	 * stopped here.
1751 	 */
1752 
1753 	if (sc->rl_intrhand[0] != NULL) {
1754 		bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]);
1755 		sc->rl_intrhand[0] = NULL;
1756 	}
1757 	if (ifp != NULL) {
1758 #ifdef DEV_NETMAP
1759 		netmap_detach(ifp);
1760 #endif /* DEV_NETMAP */
1761 		if_free(ifp);
1762 	}
1763 	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0)
1764 		rid = 0;
1765 	else
1766 		rid = 1;
1767 	if (sc->rl_irq[0] != NULL) {
1768 		bus_release_resource(dev, SYS_RES_IRQ, rid, sc->rl_irq[0]);
1769 		sc->rl_irq[0] = NULL;
1770 	}
1771 	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0)
1772 		pci_release_msi(dev);
1773 	if (sc->rl_res_pba) {
1774 		rid = PCIR_BAR(4);
1775 		bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->rl_res_pba);
1776 	}
1777 	if (sc->rl_res)
1778 		bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id,
1779 		    sc->rl_res);
1780 
1781 	/* Unload and free the RX DMA ring memory and map */
1782 
1783 	if (sc->rl_ldata.rl_rx_list_tag) {
1784 		if (sc->rl_ldata.rl_rx_list_map)
1785 			bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag,
1786 			    sc->rl_ldata.rl_rx_list_map);
1787 		if (sc->rl_ldata.rl_rx_list_map && sc->rl_ldata.rl_rx_list)
1788 			bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag,
1789 			    sc->rl_ldata.rl_rx_list,
1790 			    sc->rl_ldata.rl_rx_list_map);
1791 		bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag);
1792 	}
1793 
1794 	/* Unload and free the TX DMA ring memory and map */
1795 
1796 	if (sc->rl_ldata.rl_tx_list_tag) {
1797 		if (sc->rl_ldata.rl_tx_list_map)
1798 			bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag,
1799 			    sc->rl_ldata.rl_tx_list_map);
1800 		if (sc->rl_ldata.rl_tx_list_map && sc->rl_ldata.rl_tx_list)
1801 			bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag,
1802 			    sc->rl_ldata.rl_tx_list,
1803 			    sc->rl_ldata.rl_tx_list_map);
1804 		bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag);
1805 	}
1806 
1807 	/* Destroy all the RX and TX buffer maps */
1808 
1809 	if (sc->rl_ldata.rl_tx_mtag) {
1810 		for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
1811 			if (sc->rl_ldata.rl_tx_desc[i].tx_dmamap)
1812 				bus_dmamap_destroy(sc->rl_ldata.rl_tx_mtag,
1813 				    sc->rl_ldata.rl_tx_desc[i].tx_dmamap);
1814 		}
1815 		bus_dma_tag_destroy(sc->rl_ldata.rl_tx_mtag);
1816 	}
1817 	if (sc->rl_ldata.rl_rx_mtag) {
1818 		for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1819 			if (sc->rl_ldata.rl_rx_desc[i].rx_dmamap)
1820 				bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag,
1821 				    sc->rl_ldata.rl_rx_desc[i].rx_dmamap);
1822 		}
1823 		if (sc->rl_ldata.rl_rx_sparemap)
1824 			bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag,
1825 			    sc->rl_ldata.rl_rx_sparemap);
1826 		bus_dma_tag_destroy(sc->rl_ldata.rl_rx_mtag);
1827 	}
1828 	if (sc->rl_ldata.rl_jrx_mtag) {
1829 		for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1830 			if (sc->rl_ldata.rl_jrx_desc[i].rx_dmamap)
1831 				bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag,
1832 				    sc->rl_ldata.rl_jrx_desc[i].rx_dmamap);
1833 		}
1834 		if (sc->rl_ldata.rl_jrx_sparemap)
1835 			bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag,
1836 			    sc->rl_ldata.rl_jrx_sparemap);
1837 		bus_dma_tag_destroy(sc->rl_ldata.rl_jrx_mtag);
1838 	}
1839 	/* Unload and free the stats buffer and map */
1840 
1841 	if (sc->rl_ldata.rl_stag) {
1842 		if (sc->rl_ldata.rl_smap)
1843 			bus_dmamap_unload(sc->rl_ldata.rl_stag,
1844 			    sc->rl_ldata.rl_smap);
1845 		if (sc->rl_ldata.rl_smap && sc->rl_ldata.rl_stats)
1846 			bus_dmamem_free(sc->rl_ldata.rl_stag,
1847 			    sc->rl_ldata.rl_stats, sc->rl_ldata.rl_smap);
1848 		bus_dma_tag_destroy(sc->rl_ldata.rl_stag);
1849 	}
1850 
1851 	if (sc->rl_parent_tag)
1852 		bus_dma_tag_destroy(sc->rl_parent_tag);
1853 
1854 	mtx_destroy(&sc->rl_mtx);
1855 
1856 	return (0);
1857 }
1858 
1859 static __inline void
1860 re_discard_rxbuf(struct rl_softc *sc, int idx)
1861 {
1862 	struct rl_desc		*desc;
1863 	struct rl_rxdesc	*rxd;
1864 	uint32_t		cmdstat;
1865 
1866 	if (sc->rl_ifp->if_mtu > RL_MTU &&
1867 	    (sc->rl_flags & RL_FLAG_JUMBOV2) != 0)
1868 		rxd = &sc->rl_ldata.rl_jrx_desc[idx];
1869 	else
1870 		rxd = &sc->rl_ldata.rl_rx_desc[idx];
1871 	desc = &sc->rl_ldata.rl_rx_list[idx];
1872 	desc->rl_vlanctl = 0;
1873 	cmdstat = rxd->rx_size;
1874 	if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
1875 		cmdstat |= RL_RDESC_CMD_EOR;
1876 	desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
1877 }
1878 
1879 static int
1880 re_newbuf(struct rl_softc *sc, int idx)
1881 {
1882 	struct mbuf		*m;
1883 	struct rl_rxdesc	*rxd;
1884 	bus_dma_segment_t	segs[1];
1885 	bus_dmamap_t		map;
1886 	struct rl_desc		*desc;
1887 	uint32_t		cmdstat;
1888 	int			error, nsegs;
1889 
1890 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1891 	if (m == NULL)
1892 		return (ENOBUFS);
1893 
1894 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1895 #ifdef RE_FIXUP_RX
1896 	/*
1897 	 * This is part of an evil trick to deal with non-x86 platforms.
1898 	 * The RealTek chip requires RX buffers to be aligned on 64-bit
1899 	 * boundaries, but that will hose non-x86 machines. To get around
1900 	 * this, we leave some empty space at the start of each buffer
1901 	 * and for non-x86 hosts, we copy the buffer back six bytes
1902 	 * to achieve word alignment. This is slightly more efficient
1903 	 * than allocating a new buffer, copying the contents, and
1904 	 * discarding the old buffer.
1905 	 */
1906 	m_adj(m, RE_ETHER_ALIGN);
1907 #endif
1908 	error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_rx_mtag,
1909 	    sc->rl_ldata.rl_rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
1910 	if (error != 0) {
1911 		m_freem(m);
1912 		return (ENOBUFS);
1913 	}
1914 	KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs));
1915 
1916 	rxd = &sc->rl_ldata.rl_rx_desc[idx];
1917 	if (rxd->rx_m != NULL) {
1918 		bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap,
1919 		    BUS_DMASYNC_POSTREAD);
1920 		bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap);
1921 	}
1922 
1923 	rxd->rx_m = m;
1924 	map = rxd->rx_dmamap;
1925 	rxd->rx_dmamap = sc->rl_ldata.rl_rx_sparemap;
1926 	rxd->rx_size = segs[0].ds_len;
1927 	sc->rl_ldata.rl_rx_sparemap = map;
1928 	bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap,
1929 	    BUS_DMASYNC_PREREAD);
1930 
1931 	desc = &sc->rl_ldata.rl_rx_list[idx];
1932 	desc->rl_vlanctl = 0;
1933 	desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr));
1934 	desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr));
1935 	cmdstat = segs[0].ds_len;
1936 	if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
1937 		cmdstat |= RL_RDESC_CMD_EOR;
1938 	desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
1939 
1940 	return (0);
1941 }
1942 
1943 static int
1944 re_jumbo_newbuf(struct rl_softc *sc, int idx)
1945 {
1946 	struct mbuf		*m;
1947 	struct rl_rxdesc	*rxd;
1948 	bus_dma_segment_t	segs[1];
1949 	bus_dmamap_t		map;
1950 	struct rl_desc		*desc;
1951 	uint32_t		cmdstat;
1952 	int			error, nsegs;
1953 
1954 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
1955 	if (m == NULL)
1956 		return (ENOBUFS);
1957 	m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1958 #ifdef RE_FIXUP_RX
1959 	m_adj(m, RE_ETHER_ALIGN);
1960 #endif
1961 	error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_jrx_mtag,
1962 	    sc->rl_ldata.rl_jrx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
1963 	if (error != 0) {
1964 		m_freem(m);
1965 		return (ENOBUFS);
1966 	}
1967 	KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs));
1968 
1969 	rxd = &sc->rl_ldata.rl_jrx_desc[idx];
1970 	if (rxd->rx_m != NULL) {
1971 		bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap,
1972 		    BUS_DMASYNC_POSTREAD);
1973 		bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap);
1974 	}
1975 
1976 	rxd->rx_m = m;
1977 	map = rxd->rx_dmamap;
1978 	rxd->rx_dmamap = sc->rl_ldata.rl_jrx_sparemap;
1979 	rxd->rx_size = segs[0].ds_len;
1980 	sc->rl_ldata.rl_jrx_sparemap = map;
1981 	bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap,
1982 	    BUS_DMASYNC_PREREAD);
1983 
1984 	desc = &sc->rl_ldata.rl_rx_list[idx];
1985 	desc->rl_vlanctl = 0;
1986 	desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr));
1987 	desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr));
1988 	cmdstat = segs[0].ds_len;
1989 	if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
1990 		cmdstat |= RL_RDESC_CMD_EOR;
1991 	desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
1992 
1993 	return (0);
1994 }
1995 
1996 #ifdef RE_FIXUP_RX
1997 static __inline void
1998 re_fixup_rx(struct mbuf *m)
1999 {
2000 	int                     i;
2001 	uint16_t                *src, *dst;
2002 
2003 	src = mtod(m, uint16_t *);
2004 	dst = src - (RE_ETHER_ALIGN - ETHER_ALIGN) / sizeof *src;
2005 
2006 	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
2007 		*dst++ = *src++;
2008 
2009 	m->m_data -= RE_ETHER_ALIGN - ETHER_ALIGN;
2010 }
2011 #endif
2012 
2013 static int
2014 re_tx_list_init(struct rl_softc *sc)
2015 {
2016 	struct rl_desc		*desc;
2017 	int			i;
2018 
2019 	RL_LOCK_ASSERT(sc);
2020 
2021 	bzero(sc->rl_ldata.rl_tx_list,
2022 	    sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc));
2023 	for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++)
2024 		sc->rl_ldata.rl_tx_desc[i].tx_m = NULL;
2025 #ifdef DEV_NETMAP
2026 	re_netmap_tx_init(sc);
2027 #endif /* DEV_NETMAP */
2028 	/* Set EOR. */
2029 	desc = &sc->rl_ldata.rl_tx_list[sc->rl_ldata.rl_tx_desc_cnt - 1];
2030 	desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOR);
2031 
2032 	bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
2033 	    sc->rl_ldata.rl_tx_list_map,
2034 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2035 
2036 	sc->rl_ldata.rl_tx_prodidx = 0;
2037 	sc->rl_ldata.rl_tx_considx = 0;
2038 	sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt;
2039 
2040 	return (0);
2041 }
2042 
2043 static int
2044 re_rx_list_init(struct rl_softc *sc)
2045 {
2046 	int			error, i;
2047 
2048 	bzero(sc->rl_ldata.rl_rx_list,
2049 	    sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc));
2050 	for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
2051 		sc->rl_ldata.rl_rx_desc[i].rx_m = NULL;
2052 		if ((error = re_newbuf(sc, i)) != 0)
2053 			return (error);
2054 	}
2055 #ifdef DEV_NETMAP
2056 	re_netmap_rx_init(sc);
2057 #endif /* DEV_NETMAP */
2058 
2059 	/* Flush the RX descriptors */
2060 
2061 	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2062 	    sc->rl_ldata.rl_rx_list_map,
2063 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2064 
2065 	sc->rl_ldata.rl_rx_prodidx = 0;
2066 	sc->rl_head = sc->rl_tail = NULL;
2067 	sc->rl_int_rx_act = 0;
2068 
2069 	return (0);
2070 }
2071 
2072 static int
2073 re_jrx_list_init(struct rl_softc *sc)
2074 {
2075 	int			error, i;
2076 
2077 	bzero(sc->rl_ldata.rl_rx_list,
2078 	    sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc));
2079 	for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
2080 		sc->rl_ldata.rl_jrx_desc[i].rx_m = NULL;
2081 		if ((error = re_jumbo_newbuf(sc, i)) != 0)
2082 			return (error);
2083 	}
2084 
2085 	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2086 	    sc->rl_ldata.rl_rx_list_map,
2087 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2088 
2089 	sc->rl_ldata.rl_rx_prodidx = 0;
2090 	sc->rl_head = sc->rl_tail = NULL;
2091 	sc->rl_int_rx_act = 0;
2092 
2093 	return (0);
2094 }
2095 
2096 /*
2097  * RX handler for C+ and 8169. For the gigE chips, we support
2098  * the reception of jumbo frames that have been fragmented
2099  * across multiple 2K mbuf cluster buffers.
2100  */
2101 static int
2102 re_rxeof(struct rl_softc *sc, int *rx_npktsp)
2103 {
2104 	struct mbuf		*m;
2105 	struct ifnet		*ifp;
2106 	int			i, rxerr, total_len;
2107 	struct rl_desc		*cur_rx;
2108 	u_int32_t		rxstat, rxvlan;
2109 	int			jumbo, maxpkt = 16, rx_npkts = 0;
2110 
2111 	RL_LOCK_ASSERT(sc);
2112 
2113 	ifp = sc->rl_ifp;
2114 #ifdef DEV_NETMAP
2115 	if (netmap_rx_irq(ifp, 0 | (NETMAP_LOCKED_ENTER|NETMAP_LOCKED_EXIT),
2116 	    &rx_npkts))
2117 		return 0;
2118 #endif /* DEV_NETMAP */
2119 	if (ifp->if_mtu > RL_MTU && (sc->rl_flags & RL_FLAG_JUMBOV2) != 0)
2120 		jumbo = 1;
2121 	else
2122 		jumbo = 0;
2123 
2124 	/* Invalidate the descriptor memory */
2125 
2126 	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2127 	    sc->rl_ldata.rl_rx_list_map,
2128 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2129 
2130 	for (i = sc->rl_ldata.rl_rx_prodidx; maxpkt > 0;
2131 	    i = RL_RX_DESC_NXT(sc, i)) {
2132 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2133 			break;
2134 		cur_rx = &sc->rl_ldata.rl_rx_list[i];
2135 		rxstat = le32toh(cur_rx->rl_cmdstat);
2136 		if ((rxstat & RL_RDESC_STAT_OWN) != 0)
2137 			break;
2138 		total_len = rxstat & sc->rl_rxlenmask;
2139 		rxvlan = le32toh(cur_rx->rl_vlanctl);
2140 		if (jumbo != 0)
2141 			m = sc->rl_ldata.rl_jrx_desc[i].rx_m;
2142 		else
2143 			m = sc->rl_ldata.rl_rx_desc[i].rx_m;
2144 
2145 		if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
2146 		    (rxstat & (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) !=
2147 		    (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) {
2148 			/*
2149 			 * RTL8168C or later controllers do not
2150 			 * support multi-fragment packet.
2151 			 */
2152 			re_discard_rxbuf(sc, i);
2153 			continue;
2154 		} else if ((rxstat & RL_RDESC_STAT_EOF) == 0) {
2155 			if (re_newbuf(sc, i) != 0) {
2156 				/*
2157 				 * If this is part of a multi-fragment packet,
2158 				 * discard all the pieces.
2159 				 */
2160 				if (sc->rl_head != NULL) {
2161 					m_freem(sc->rl_head);
2162 					sc->rl_head = sc->rl_tail = NULL;
2163 				}
2164 				re_discard_rxbuf(sc, i);
2165 				continue;
2166 			}
2167 			m->m_len = RE_RX_DESC_BUFLEN;
2168 			if (sc->rl_head == NULL)
2169 				sc->rl_head = sc->rl_tail = m;
2170 			else {
2171 				m->m_flags &= ~M_PKTHDR;
2172 				sc->rl_tail->m_next = m;
2173 				sc->rl_tail = m;
2174 			}
2175 			continue;
2176 		}
2177 
2178 		/*
2179 		 * NOTE: for the 8139C+, the frame length field
2180 		 * is always 12 bits in size, but for the gigE chips,
2181 		 * it is 13 bits (since the max RX frame length is 16K).
2182 		 * Unfortunately, all 32 bits in the status word
2183 		 * were already used, so to make room for the extra
2184 		 * length bit, RealTek took out the 'frame alignment
2185 		 * error' bit and shifted the other status bits
2186 		 * over one slot. The OWN, EOR, FS and LS bits are
2187 		 * still in the same places. We have already extracted
2188 		 * the frame length and checked the OWN bit, so rather
2189 		 * than using an alternate bit mapping, we shift the
2190 		 * status bits one space to the right so we can evaluate
2191 		 * them using the 8169 status as though it was in the
2192 		 * same format as that of the 8139C+.
2193 		 */
2194 		if (sc->rl_type == RL_8169)
2195 			rxstat >>= 1;
2196 
2197 		/*
2198 		 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be
2199 		 * set, but if CRC is clear, it will still be a valid frame.
2200 		 */
2201 		if ((rxstat & RL_RDESC_STAT_RXERRSUM) != 0) {
2202 			rxerr = 1;
2203 			if ((sc->rl_flags & RL_FLAG_JUMBOV2) == 0 &&
2204 			    total_len > 8191 &&
2205 			    (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT)
2206 				rxerr = 0;
2207 			if (rxerr != 0) {
2208 				ifp->if_ierrors++;
2209 				/*
2210 				 * If this is part of a multi-fragment packet,
2211 				 * discard all the pieces.
2212 				 */
2213 				if (sc->rl_head != NULL) {
2214 					m_freem(sc->rl_head);
2215 					sc->rl_head = sc->rl_tail = NULL;
2216 				}
2217 				re_discard_rxbuf(sc, i);
2218 				continue;
2219 			}
2220 		}
2221 
2222 		/*
2223 		 * If allocating a replacement mbuf fails,
2224 		 * reload the current one.
2225 		 */
2226 		if (jumbo != 0)
2227 			rxerr = re_jumbo_newbuf(sc, i);
2228 		else
2229 			rxerr = re_newbuf(sc, i);
2230 		if (rxerr != 0) {
2231 			ifp->if_iqdrops++;
2232 			if (sc->rl_head != NULL) {
2233 				m_freem(sc->rl_head);
2234 				sc->rl_head = sc->rl_tail = NULL;
2235 			}
2236 			re_discard_rxbuf(sc, i);
2237 			continue;
2238 		}
2239 
2240 		if (sc->rl_head != NULL) {
2241 			if (jumbo != 0)
2242 				m->m_len = total_len;
2243 			else {
2244 				m->m_len = total_len % RE_RX_DESC_BUFLEN;
2245 				if (m->m_len == 0)
2246 					m->m_len = RE_RX_DESC_BUFLEN;
2247 			}
2248 			/*
2249 			 * Special case: if there's 4 bytes or less
2250 			 * in this buffer, the mbuf can be discarded:
2251 			 * the last 4 bytes is the CRC, which we don't
2252 			 * care about anyway.
2253 			 */
2254 			if (m->m_len <= ETHER_CRC_LEN) {
2255 				sc->rl_tail->m_len -=
2256 				    (ETHER_CRC_LEN - m->m_len);
2257 				m_freem(m);
2258 			} else {
2259 				m->m_len -= ETHER_CRC_LEN;
2260 				m->m_flags &= ~M_PKTHDR;
2261 				sc->rl_tail->m_next = m;
2262 			}
2263 			m = sc->rl_head;
2264 			sc->rl_head = sc->rl_tail = NULL;
2265 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
2266 		} else
2267 			m->m_pkthdr.len = m->m_len =
2268 			    (total_len - ETHER_CRC_LEN);
2269 
2270 #ifdef RE_FIXUP_RX
2271 		re_fixup_rx(m);
2272 #endif
2273 		ifp->if_ipackets++;
2274 		m->m_pkthdr.rcvif = ifp;
2275 
2276 		/* Do RX checksumming if enabled */
2277 
2278 		if (ifp->if_capenable & IFCAP_RXCSUM) {
2279 			if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) {
2280 				/* Check IP header checksum */
2281 				if (rxstat & RL_RDESC_STAT_PROTOID)
2282 					m->m_pkthdr.csum_flags |=
2283 					    CSUM_IP_CHECKED;
2284 				if (!(rxstat & RL_RDESC_STAT_IPSUMBAD))
2285 					m->m_pkthdr.csum_flags |=
2286 					    CSUM_IP_VALID;
2287 
2288 				/* Check TCP/UDP checksum */
2289 				if ((RL_TCPPKT(rxstat) &&
2290 				    !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
2291 				    (RL_UDPPKT(rxstat) &&
2292 				     !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
2293 					m->m_pkthdr.csum_flags |=
2294 						CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
2295 					m->m_pkthdr.csum_data = 0xffff;
2296 				}
2297 			} else {
2298 				/*
2299 				 * RTL8168C/RTL816CP/RTL8111C/RTL8111CP
2300 				 */
2301 				if ((rxstat & RL_RDESC_STAT_PROTOID) &&
2302 				    (rxvlan & RL_RDESC_IPV4))
2303 					m->m_pkthdr.csum_flags |=
2304 					    CSUM_IP_CHECKED;
2305 				if (!(rxstat & RL_RDESC_STAT_IPSUMBAD) &&
2306 				    (rxvlan & RL_RDESC_IPV4))
2307 					m->m_pkthdr.csum_flags |=
2308 					    CSUM_IP_VALID;
2309 				if (((rxstat & RL_RDESC_STAT_TCP) &&
2310 				    !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
2311 				    ((rxstat & RL_RDESC_STAT_UDP) &&
2312 				    !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
2313 					m->m_pkthdr.csum_flags |=
2314 						CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
2315 					m->m_pkthdr.csum_data = 0xffff;
2316 				}
2317 			}
2318 		}
2319 		maxpkt--;
2320 		if (rxvlan & RL_RDESC_VLANCTL_TAG) {
2321 			m->m_pkthdr.ether_vtag =
2322 			    bswap16((rxvlan & RL_RDESC_VLANCTL_DATA));
2323 			m->m_flags |= M_VLANTAG;
2324 		}
2325 		RL_UNLOCK(sc);
2326 		(*ifp->if_input)(ifp, m);
2327 		RL_LOCK(sc);
2328 		rx_npkts++;
2329 	}
2330 
2331 	/* Flush the RX DMA ring */
2332 
2333 	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2334 	    sc->rl_ldata.rl_rx_list_map,
2335 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2336 
2337 	sc->rl_ldata.rl_rx_prodidx = i;
2338 
2339 	if (rx_npktsp != NULL)
2340 		*rx_npktsp = rx_npkts;
2341 	if (maxpkt)
2342 		return (EAGAIN);
2343 
2344 	return (0);
2345 }
2346 
2347 static void
2348 re_txeof(struct rl_softc *sc)
2349 {
2350 	struct ifnet		*ifp;
2351 	struct rl_txdesc	*txd;
2352 	u_int32_t		txstat;
2353 	int			cons;
2354 
2355 	cons = sc->rl_ldata.rl_tx_considx;
2356 	if (cons == sc->rl_ldata.rl_tx_prodidx)
2357 		return;
2358 
2359 	ifp = sc->rl_ifp;
2360 #ifdef DEV_NETMAP
2361 	if (netmap_tx_irq(ifp, 0 | (NETMAP_LOCKED_ENTER|NETMAP_LOCKED_EXIT)))
2362 		return;
2363 #endif /* DEV_NETMAP */
2364 	/* Invalidate the TX descriptor list */
2365 	bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
2366 	    sc->rl_ldata.rl_tx_list_map,
2367 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2368 
2369 	for (; cons != sc->rl_ldata.rl_tx_prodidx;
2370 	    cons = RL_TX_DESC_NXT(sc, cons)) {
2371 		txstat = le32toh(sc->rl_ldata.rl_tx_list[cons].rl_cmdstat);
2372 		if (txstat & RL_TDESC_STAT_OWN)
2373 			break;
2374 		/*
2375 		 * We only stash mbufs in the last descriptor
2376 		 * in a fragment chain, which also happens to
2377 		 * be the only place where the TX status bits
2378 		 * are valid.
2379 		 */
2380 		if (txstat & RL_TDESC_CMD_EOF) {
2381 			txd = &sc->rl_ldata.rl_tx_desc[cons];
2382 			bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
2383 			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2384 			bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag,
2385 			    txd->tx_dmamap);
2386 			KASSERT(txd->tx_m != NULL,
2387 			    ("%s: freeing NULL mbufs!", __func__));
2388 			m_freem(txd->tx_m);
2389 			txd->tx_m = NULL;
2390 			if (txstat & (RL_TDESC_STAT_EXCESSCOL|
2391 			    RL_TDESC_STAT_COLCNT))
2392 				ifp->if_collisions++;
2393 			if (txstat & RL_TDESC_STAT_TXERRSUM)
2394 				ifp->if_oerrors++;
2395 			else
2396 				ifp->if_opackets++;
2397 		}
2398 		sc->rl_ldata.rl_tx_free++;
2399 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2400 	}
2401 	sc->rl_ldata.rl_tx_considx = cons;
2402 
2403 	/* No changes made to the TX ring, so no flush needed */
2404 
2405 	if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) {
2406 #ifdef RE_TX_MODERATION
2407 		/*
2408 		 * If not all descriptors have been reaped yet, reload
2409 		 * the timer so that we will eventually get another
2410 		 * interrupt that will cause us to re-enter this routine.
2411 		 * This is done in case the transmitter has gone idle.
2412 		 */
2413 		CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2414 #endif
2415 	} else
2416 		sc->rl_watchdog_timer = 0;
2417 }
2418 
2419 static void
2420 re_tick(void *xsc)
2421 {
2422 	struct rl_softc		*sc;
2423 	struct mii_data		*mii;
2424 
2425 	sc = xsc;
2426 
2427 	RL_LOCK_ASSERT(sc);
2428 
2429 	mii = device_get_softc(sc->rl_miibus);
2430 	mii_tick(mii);
2431 	if ((sc->rl_flags & RL_FLAG_LINK) == 0)
2432 		re_miibus_statchg(sc->rl_dev);
2433 	/*
2434 	 * Reclaim transmitted frames here. Technically it is not
2435 	 * necessary to do here but it ensures periodic reclamation
2436 	 * regardless of Tx completion interrupt which seems to be
2437 	 * lost on PCIe based controllers under certain situations.
2438 	 */
2439 	re_txeof(sc);
2440 	re_watchdog(sc);
2441 	callout_reset(&sc->rl_stat_callout, hz, re_tick, sc);
2442 }
2443 
2444 #ifdef DEVICE_POLLING
2445 static int
2446 re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2447 {
2448 	struct rl_softc *sc = ifp->if_softc;
2449 	int rx_npkts = 0;
2450 
2451 	RL_LOCK(sc);
2452 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2453 		rx_npkts = re_poll_locked(ifp, cmd, count);
2454 	RL_UNLOCK(sc);
2455 	return (rx_npkts);
2456 }
2457 
2458 static int
2459 re_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
2460 {
2461 	struct rl_softc *sc = ifp->if_softc;
2462 	int rx_npkts;
2463 
2464 	RL_LOCK_ASSERT(sc);
2465 
2466 	sc->rxcycles = count;
2467 	re_rxeof(sc, &rx_npkts);
2468 	re_txeof(sc);
2469 
2470 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2471 		re_start_locked(ifp);
2472 
2473 	if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
2474 		u_int16_t       status;
2475 
2476 		status = CSR_READ_2(sc, RL_ISR);
2477 		if (status == 0xffff)
2478 			return (rx_npkts);
2479 		if (status)
2480 			CSR_WRITE_2(sc, RL_ISR, status);
2481 		if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
2482 		    (sc->rl_flags & RL_FLAG_PCIE))
2483 			CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2484 
2485 		/*
2486 		 * XXX check behaviour on receiver stalls.
2487 		 */
2488 
2489 		if (status & RL_ISR_SYSTEM_ERR) {
2490 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2491 			re_init_locked(sc);
2492 		}
2493 	}
2494 	return (rx_npkts);
2495 }
2496 #endif /* DEVICE_POLLING */
2497 
2498 static int
2499 re_intr(void *arg)
2500 {
2501 	struct rl_softc		*sc;
2502 	uint16_t		status;
2503 
2504 	sc = arg;
2505 
2506 	status = CSR_READ_2(sc, RL_ISR);
2507 	if (status == 0xFFFF || (status & RL_INTRS_CPLUS) == 0)
2508                 return (FILTER_STRAY);
2509 	CSR_WRITE_2(sc, RL_IMR, 0);
2510 
2511 	taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask);
2512 
2513 	return (FILTER_HANDLED);
2514 }
2515 
2516 static void
2517 re_int_task(void *arg, int npending)
2518 {
2519 	struct rl_softc		*sc;
2520 	struct ifnet		*ifp;
2521 	u_int16_t		status;
2522 	int			rval = 0;
2523 
2524 	sc = arg;
2525 	ifp = sc->rl_ifp;
2526 
2527 	RL_LOCK(sc);
2528 
2529 	status = CSR_READ_2(sc, RL_ISR);
2530         CSR_WRITE_2(sc, RL_ISR, status);
2531 
2532 	if (sc->suspended ||
2533 	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2534 		RL_UNLOCK(sc);
2535 		return;
2536 	}
2537 
2538 #ifdef DEVICE_POLLING
2539 	if  (ifp->if_capenable & IFCAP_POLLING) {
2540 		RL_UNLOCK(sc);
2541 		return;
2542 	}
2543 #endif
2544 
2545 	if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW))
2546 		rval = re_rxeof(sc, NULL);
2547 
2548 	/*
2549 	 * Some chips will ignore a second TX request issued
2550 	 * while an existing transmission is in progress. If
2551 	 * the transmitter goes idle but there are still
2552 	 * packets waiting to be sent, we need to restart the
2553 	 * channel here to flush them out. This only seems to
2554 	 * be required with the PCIe devices.
2555 	 */
2556 	if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
2557 	    (sc->rl_flags & RL_FLAG_PCIE))
2558 		CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2559 	if (status & (
2560 #ifdef RE_TX_MODERATION
2561 	    RL_ISR_TIMEOUT_EXPIRED|
2562 #else
2563 	    RL_ISR_TX_OK|
2564 #endif
2565 	    RL_ISR_TX_ERR|RL_ISR_TX_DESC_UNAVAIL))
2566 		re_txeof(sc);
2567 
2568 	if (status & RL_ISR_SYSTEM_ERR) {
2569 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2570 		re_init_locked(sc);
2571 	}
2572 
2573 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2574 		re_start_locked(ifp);
2575 
2576 	RL_UNLOCK(sc);
2577 
2578         if ((CSR_READ_2(sc, RL_ISR) & RL_INTRS_CPLUS) || rval) {
2579 		taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask);
2580 		return;
2581 	}
2582 
2583 	CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
2584 }
2585 
2586 static void
2587 re_intr_msi(void *xsc)
2588 {
2589 	struct rl_softc		*sc;
2590 	struct ifnet		*ifp;
2591 	uint16_t		intrs, status;
2592 
2593 	sc = xsc;
2594 	RL_LOCK(sc);
2595 
2596 	ifp = sc->rl_ifp;
2597 #ifdef DEVICE_POLLING
2598 	if (ifp->if_capenable & IFCAP_POLLING) {
2599 		RL_UNLOCK(sc);
2600 		return;
2601 	}
2602 #endif
2603 	/* Disable interrupts. */
2604 	CSR_WRITE_2(sc, RL_IMR, 0);
2605 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2606 		RL_UNLOCK(sc);
2607 		return;
2608 	}
2609 
2610 	intrs = RL_INTRS_CPLUS;
2611 	status = CSR_READ_2(sc, RL_ISR);
2612         CSR_WRITE_2(sc, RL_ISR, status);
2613 	if (sc->rl_int_rx_act > 0) {
2614 		intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW |
2615 		    RL_ISR_RX_OVERRUN);
2616 		status &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW |
2617 		    RL_ISR_RX_OVERRUN);
2618 	}
2619 
2620 	if (status & (RL_ISR_TIMEOUT_EXPIRED | RL_ISR_RX_OK | RL_ISR_RX_ERR |
2621 	    RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) {
2622 		re_rxeof(sc, NULL);
2623 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2624 			if (sc->rl_int_rx_mod != 0 &&
2625 			    (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR |
2626 			    RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) != 0) {
2627 				/* Rearm one-shot timer. */
2628 				CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2629 				intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR |
2630 				    RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN);
2631 				sc->rl_int_rx_act = 1;
2632 			} else {
2633 				intrs |= RL_ISR_RX_OK | RL_ISR_RX_ERR |
2634 				    RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN;
2635 				sc->rl_int_rx_act = 0;
2636 			}
2637 		}
2638 	}
2639 
2640 	/*
2641 	 * Some chips will ignore a second TX request issued
2642 	 * while an existing transmission is in progress. If
2643 	 * the transmitter goes idle but there are still
2644 	 * packets waiting to be sent, we need to restart the
2645 	 * channel here to flush them out. This only seems to
2646 	 * be required with the PCIe devices.
2647 	 */
2648 	if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
2649 	    (sc->rl_flags & RL_FLAG_PCIE))
2650 		CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2651 	if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR | RL_ISR_TX_DESC_UNAVAIL))
2652 		re_txeof(sc);
2653 
2654 	if (status & RL_ISR_SYSTEM_ERR) {
2655 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2656 		re_init_locked(sc);
2657 	}
2658 
2659 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2660 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2661 			re_start_locked(ifp);
2662 		CSR_WRITE_2(sc, RL_IMR, intrs);
2663 	}
2664 	RL_UNLOCK(sc);
2665 }
2666 
2667 static int
2668 re_encap(struct rl_softc *sc, struct mbuf **m_head)
2669 {
2670 	struct rl_txdesc	*txd, *txd_last;
2671 	bus_dma_segment_t	segs[RL_NTXSEGS];
2672 	bus_dmamap_t		map;
2673 	struct mbuf		*m_new;
2674 	struct rl_desc		*desc;
2675 	int			nsegs, prod;
2676 	int			i, error, ei, si;
2677 	int			padlen;
2678 	uint32_t		cmdstat, csum_flags, vlanctl;
2679 
2680 	RL_LOCK_ASSERT(sc);
2681 	M_ASSERTPKTHDR((*m_head));
2682 
2683 	/*
2684 	 * With some of the RealTek chips, using the checksum offload
2685 	 * support in conjunction with the autopadding feature results
2686 	 * in the transmission of corrupt frames. For example, if we
2687 	 * need to send a really small IP fragment that's less than 60
2688 	 * bytes in size, and IP header checksumming is enabled, the
2689 	 * resulting ethernet frame that appears on the wire will
2690 	 * have garbled payload. To work around this, if TX IP checksum
2691 	 * offload is enabled, we always manually pad short frames out
2692 	 * to the minimum ethernet frame size.
2693 	 */
2694 	if ((sc->rl_flags & RL_FLAG_AUTOPAD) == 0 &&
2695 	    (*m_head)->m_pkthdr.len < RL_IP4CSUMTX_PADLEN &&
2696 	    ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) != 0) {
2697 		padlen = RL_MIN_FRAMELEN - (*m_head)->m_pkthdr.len;
2698 		if (M_WRITABLE(*m_head) == 0) {
2699 			/* Get a writable copy. */
2700 			m_new = m_dup(*m_head, M_NOWAIT);
2701 			m_freem(*m_head);
2702 			if (m_new == NULL) {
2703 				*m_head = NULL;
2704 				return (ENOBUFS);
2705 			}
2706 			*m_head = m_new;
2707 		}
2708 		if ((*m_head)->m_next != NULL ||
2709 		    M_TRAILINGSPACE(*m_head) < padlen) {
2710 			m_new = m_defrag(*m_head, M_NOWAIT);
2711 			if (m_new == NULL) {
2712 				m_freem(*m_head);
2713 				*m_head = NULL;
2714 				return (ENOBUFS);
2715 			}
2716 		} else
2717 			m_new = *m_head;
2718 
2719 		/*
2720 		 * Manually pad short frames, and zero the pad space
2721 		 * to avoid leaking data.
2722 		 */
2723 		bzero(mtod(m_new, char *) + m_new->m_pkthdr.len, padlen);
2724 		m_new->m_pkthdr.len += padlen;
2725 		m_new->m_len = m_new->m_pkthdr.len;
2726 		*m_head = m_new;
2727 	}
2728 
2729 	prod = sc->rl_ldata.rl_tx_prodidx;
2730 	txd = &sc->rl_ldata.rl_tx_desc[prod];
2731 	error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap,
2732 	    *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2733 	if (error == EFBIG) {
2734 		m_new = m_collapse(*m_head, M_NOWAIT, RL_NTXSEGS);
2735 		if (m_new == NULL) {
2736 			m_freem(*m_head);
2737 			*m_head = NULL;
2738 			return (ENOBUFS);
2739 		}
2740 		*m_head = m_new;
2741 		error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag,
2742 		    txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2743 		if (error != 0) {
2744 			m_freem(*m_head);
2745 			*m_head = NULL;
2746 			return (error);
2747 		}
2748 	} else if (error != 0)
2749 		return (error);
2750 	if (nsegs == 0) {
2751 		m_freem(*m_head);
2752 		*m_head = NULL;
2753 		return (EIO);
2754 	}
2755 
2756 	/* Check for number of available descriptors. */
2757 	if (sc->rl_ldata.rl_tx_free - nsegs <= 1) {
2758 		bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap);
2759 		return (ENOBUFS);
2760 	}
2761 
2762 	bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap,
2763 	    BUS_DMASYNC_PREWRITE);
2764 
2765 	/*
2766 	 * Set up checksum offload. Note: checksum offload bits must
2767 	 * appear in all descriptors of a multi-descriptor transmit
2768 	 * attempt. This is according to testing done with an 8169
2769 	 * chip. This is a requirement.
2770 	 */
2771 	vlanctl = 0;
2772 	csum_flags = 0;
2773 	if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2774 		if ((sc->rl_flags & RL_FLAG_DESCV2) != 0) {
2775 			csum_flags |= RL_TDESC_CMD_LGSEND;
2776 			vlanctl |= ((uint32_t)(*m_head)->m_pkthdr.tso_segsz <<
2777 			    RL_TDESC_CMD_MSSVALV2_SHIFT);
2778 		} else {
2779 			csum_flags |= RL_TDESC_CMD_LGSEND |
2780 			    ((uint32_t)(*m_head)->m_pkthdr.tso_segsz <<
2781 			    RL_TDESC_CMD_MSSVAL_SHIFT);
2782 		}
2783 	} else {
2784 		/*
2785 		 * Unconditionally enable IP checksum if TCP or UDP
2786 		 * checksum is required. Otherwise, TCP/UDP checksum
2787 		 * doesn't make effects.
2788 		 */
2789 		if (((*m_head)->m_pkthdr.csum_flags & RE_CSUM_FEATURES) != 0) {
2790 			if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) {
2791 				csum_flags |= RL_TDESC_CMD_IPCSUM;
2792 				if (((*m_head)->m_pkthdr.csum_flags &
2793 				    CSUM_TCP) != 0)
2794 					csum_flags |= RL_TDESC_CMD_TCPCSUM;
2795 				if (((*m_head)->m_pkthdr.csum_flags &
2796 				    CSUM_UDP) != 0)
2797 					csum_flags |= RL_TDESC_CMD_UDPCSUM;
2798 			} else {
2799 				vlanctl |= RL_TDESC_CMD_IPCSUMV2;
2800 				if (((*m_head)->m_pkthdr.csum_flags &
2801 				    CSUM_TCP) != 0)
2802 					vlanctl |= RL_TDESC_CMD_TCPCSUMV2;
2803 				if (((*m_head)->m_pkthdr.csum_flags &
2804 				    CSUM_UDP) != 0)
2805 					vlanctl |= RL_TDESC_CMD_UDPCSUMV2;
2806 			}
2807 		}
2808 	}
2809 
2810 	/*
2811 	 * Set up hardware VLAN tagging. Note: vlan tag info must
2812 	 * appear in all descriptors of a multi-descriptor
2813 	 * transmission attempt.
2814 	 */
2815 	if ((*m_head)->m_flags & M_VLANTAG)
2816 		vlanctl |= bswap16((*m_head)->m_pkthdr.ether_vtag) |
2817 		    RL_TDESC_VLANCTL_TAG;
2818 
2819 	si = prod;
2820 	for (i = 0; i < nsegs; i++, prod = RL_TX_DESC_NXT(sc, prod)) {
2821 		desc = &sc->rl_ldata.rl_tx_list[prod];
2822 		desc->rl_vlanctl = htole32(vlanctl);
2823 		desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr));
2824 		desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr));
2825 		cmdstat = segs[i].ds_len;
2826 		if (i != 0)
2827 			cmdstat |= RL_TDESC_CMD_OWN;
2828 		if (prod == sc->rl_ldata.rl_tx_desc_cnt - 1)
2829 			cmdstat |= RL_TDESC_CMD_EOR;
2830 		desc->rl_cmdstat = htole32(cmdstat | csum_flags);
2831 		sc->rl_ldata.rl_tx_free--;
2832 	}
2833 	/* Update producer index. */
2834 	sc->rl_ldata.rl_tx_prodidx = prod;
2835 
2836 	/* Set EOF on the last descriptor. */
2837 	ei = RL_TX_DESC_PRV(sc, prod);
2838 	desc = &sc->rl_ldata.rl_tx_list[ei];
2839 	desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF);
2840 
2841 	desc = &sc->rl_ldata.rl_tx_list[si];
2842 	/* Set SOF and transfer ownership of packet to the chip. */
2843 	desc->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF);
2844 
2845 	/*
2846 	 * Insure that the map for this transmission
2847 	 * is placed at the array index of the last descriptor
2848 	 * in this chain.  (Swap last and first dmamaps.)
2849 	 */
2850 	txd_last = &sc->rl_ldata.rl_tx_desc[ei];
2851 	map = txd->tx_dmamap;
2852 	txd->tx_dmamap = txd_last->tx_dmamap;
2853 	txd_last->tx_dmamap = map;
2854 	txd_last->tx_m = *m_head;
2855 
2856 	return (0);
2857 }
2858 
2859 static void
2860 re_start(struct ifnet *ifp)
2861 {
2862 	struct rl_softc		*sc;
2863 
2864 	sc = ifp->if_softc;
2865 	RL_LOCK(sc);
2866 	re_start_locked(ifp);
2867 	RL_UNLOCK(sc);
2868 }
2869 
2870 /*
2871  * Main transmit routine for C+ and gigE NICs.
2872  */
2873 static void
2874 re_start_locked(struct ifnet *ifp)
2875 {
2876 	struct rl_softc		*sc;
2877 	struct mbuf		*m_head;
2878 	int			queued;
2879 
2880 	sc = ifp->if_softc;
2881 
2882 #ifdef DEV_NETMAP
2883 	/* XXX is this necessary ? */
2884 	if (ifp->if_capenable & IFCAP_NETMAP) {
2885 		struct netmap_kring *kring = &NA(ifp)->tx_rings[0];
2886 		if (sc->rl_ldata.rl_tx_prodidx != kring->nr_hwcur) {
2887 			/* kick the tx unit */
2888 			CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2889 #ifdef RE_TX_MODERATION
2890 			CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2891 #endif
2892 			sc->rl_watchdog_timer = 5;
2893 		}
2894 		return;
2895 	}
2896 #endif /* DEV_NETMAP */
2897 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2898 	    IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0)
2899 		return;
2900 
2901 	for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
2902 	    sc->rl_ldata.rl_tx_free > 1;) {
2903 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2904 		if (m_head == NULL)
2905 			break;
2906 
2907 		if (re_encap(sc, &m_head) != 0) {
2908 			if (m_head == NULL)
2909 				break;
2910 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2911 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2912 			break;
2913 		}
2914 
2915 		/*
2916 		 * If there's a BPF listener, bounce a copy of this frame
2917 		 * to him.
2918 		 */
2919 		ETHER_BPF_MTAP(ifp, m_head);
2920 
2921 		queued++;
2922 	}
2923 
2924 	if (queued == 0) {
2925 #ifdef RE_TX_MODERATION
2926 		if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt)
2927 			CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2928 #endif
2929 		return;
2930 	}
2931 
2932 	/* Flush the TX descriptors */
2933 
2934 	bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
2935 	    sc->rl_ldata.rl_tx_list_map,
2936 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2937 
2938 	CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2939 
2940 #ifdef RE_TX_MODERATION
2941 	/*
2942 	 * Use the countdown timer for interrupt moderation.
2943 	 * 'TX done' interrupts are disabled. Instead, we reset the
2944 	 * countdown timer, which will begin counting until it hits
2945 	 * the value in the TIMERINT register, and then trigger an
2946 	 * interrupt. Each time we write to the TIMERCNT register,
2947 	 * the timer count is reset to 0.
2948 	 */
2949 	CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2950 #endif
2951 
2952 	/*
2953 	 * Set a timeout in case the chip goes out to lunch.
2954 	 */
2955 	sc->rl_watchdog_timer = 5;
2956 }
2957 
2958 static void
2959 re_set_jumbo(struct rl_softc *sc, int jumbo)
2960 {
2961 
2962 	if (sc->rl_hwrev->rl_rev == RL_HWREV_8168E_VL) {
2963 		pci_set_max_read_req(sc->rl_dev, 4096);
2964 		return;
2965 	}
2966 
2967 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
2968 	if (jumbo != 0) {
2969 		CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) |
2970 		    RL_CFG3_JUMBO_EN0);
2971 		switch (sc->rl_hwrev->rl_rev) {
2972 		case RL_HWREV_8168DP:
2973 			break;
2974 		case RL_HWREV_8168E:
2975 			CSR_WRITE_1(sc, sc->rl_cfg4,
2976 			    CSR_READ_1(sc, sc->rl_cfg4) | 0x01);
2977 			break;
2978 		default:
2979 			CSR_WRITE_1(sc, sc->rl_cfg4,
2980 			    CSR_READ_1(sc, sc->rl_cfg4) | RL_CFG4_JUMBO_EN1);
2981 		}
2982 	} else {
2983 		CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) &
2984 		    ~RL_CFG3_JUMBO_EN0);
2985 		switch (sc->rl_hwrev->rl_rev) {
2986 		case RL_HWREV_8168DP:
2987 			break;
2988 		case RL_HWREV_8168E:
2989 			CSR_WRITE_1(sc, sc->rl_cfg4,
2990 			    CSR_READ_1(sc, sc->rl_cfg4) & ~0x01);
2991 			break;
2992 		default:
2993 			CSR_WRITE_1(sc, sc->rl_cfg4,
2994 			    CSR_READ_1(sc, sc->rl_cfg4) & ~RL_CFG4_JUMBO_EN1);
2995 		}
2996 	}
2997 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
2998 
2999 	switch (sc->rl_hwrev->rl_rev) {
3000 	case RL_HWREV_8168DP:
3001 		pci_set_max_read_req(sc->rl_dev, 4096);
3002 		break;
3003 	default:
3004 		if (jumbo != 0)
3005 			pci_set_max_read_req(sc->rl_dev, 512);
3006 		else
3007 			pci_set_max_read_req(sc->rl_dev, 4096);
3008 	}
3009 }
3010 
3011 static void
3012 re_init(void *xsc)
3013 {
3014 	struct rl_softc		*sc = xsc;
3015 
3016 	RL_LOCK(sc);
3017 	re_init_locked(sc);
3018 	RL_UNLOCK(sc);
3019 }
3020 
3021 static void
3022 re_init_locked(struct rl_softc *sc)
3023 {
3024 	struct ifnet		*ifp = sc->rl_ifp;
3025 	struct mii_data		*mii;
3026 	uint32_t		reg;
3027 	uint16_t		cfg;
3028 	union {
3029 		uint32_t align_dummy;
3030 		u_char eaddr[ETHER_ADDR_LEN];
3031         } eaddr;
3032 
3033 	RL_LOCK_ASSERT(sc);
3034 
3035 	mii = device_get_softc(sc->rl_miibus);
3036 
3037 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3038 		return;
3039 
3040 	/*
3041 	 * Cancel pending I/O and free all RX/TX buffers.
3042 	 */
3043 	re_stop(sc);
3044 
3045 	/* Put controller into known state. */
3046 	re_reset(sc);
3047 
3048 	/*
3049 	 * For C+ mode, initialize the RX descriptors and mbufs.
3050 	 */
3051 	if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
3052 		if (ifp->if_mtu > RL_MTU) {
3053 			if (re_jrx_list_init(sc) != 0) {
3054 				device_printf(sc->rl_dev,
3055 				    "no memory for jumbo RX buffers\n");
3056 				re_stop(sc);
3057 				return;
3058 			}
3059 			/* Disable checksum offloading for jumbo frames. */
3060 			ifp->if_capenable &= ~(IFCAP_HWCSUM | IFCAP_TSO4);
3061 			ifp->if_hwassist &= ~(RE_CSUM_FEATURES | CSUM_TSO);
3062 		} else {
3063 			if (re_rx_list_init(sc) != 0) {
3064 				device_printf(sc->rl_dev,
3065 				    "no memory for RX buffers\n");
3066 				re_stop(sc);
3067 				return;
3068 			}
3069 		}
3070 		re_set_jumbo(sc, ifp->if_mtu > RL_MTU);
3071 	} else {
3072 		if (re_rx_list_init(sc) != 0) {
3073 			device_printf(sc->rl_dev, "no memory for RX buffers\n");
3074 			re_stop(sc);
3075 			return;
3076 		}
3077 		if ((sc->rl_flags & RL_FLAG_PCIE) != 0 &&
3078 		    pci_get_device(sc->rl_dev) != RT_DEVICEID_8101E) {
3079 			if (ifp->if_mtu > RL_MTU)
3080 				pci_set_max_read_req(sc->rl_dev, 512);
3081 			else
3082 				pci_set_max_read_req(sc->rl_dev, 4096);
3083 		}
3084 	}
3085 	re_tx_list_init(sc);
3086 
3087 	/*
3088 	 * Enable C+ RX and TX mode, as well as VLAN stripping and
3089 	 * RX checksum offload. We must configure the C+ register
3090 	 * before all others.
3091 	 */
3092 	cfg = RL_CPLUSCMD_PCI_MRW;
3093 	if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
3094 		cfg |= RL_CPLUSCMD_RXCSUM_ENB;
3095 	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3096 		cfg |= RL_CPLUSCMD_VLANSTRIP;
3097 	if ((sc->rl_flags & RL_FLAG_MACSTAT) != 0) {
3098 		cfg |= RL_CPLUSCMD_MACSTAT_DIS;
3099 		/* XXX magic. */
3100 		cfg |= 0x0001;
3101 	} else
3102 		cfg |= RL_CPLUSCMD_RXENB | RL_CPLUSCMD_TXENB;
3103 	CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg);
3104 	if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SC ||
3105 	    sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) {
3106 		reg = 0x000fff00;
3107 		if ((CSR_READ_1(sc, sc->rl_cfg2) & RL_CFG2_PCI66MHZ) != 0)
3108 			reg |= 0x000000ff;
3109 		if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE)
3110 			reg |= 0x00f00000;
3111 		CSR_WRITE_4(sc, 0x7c, reg);
3112 		/* Disable interrupt mitigation. */
3113 		CSR_WRITE_2(sc, 0xe2, 0);
3114 	}
3115 	/*
3116 	 * Disable TSO if interface MTU size is greater than MSS
3117 	 * allowed in controller.
3118 	 */
3119 	if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) {
3120 		ifp->if_capenable &= ~IFCAP_TSO4;
3121 		ifp->if_hwassist &= ~CSUM_TSO;
3122 	}
3123 
3124 	/*
3125 	 * Init our MAC address.  Even though the chipset
3126 	 * documentation doesn't mention it, we need to enter "Config
3127 	 * register write enable" mode to modify the ID registers.
3128 	 */
3129 	/* Copy MAC address on stack to align. */
3130 	bcopy(IF_LLADDR(ifp), eaddr.eaddr, ETHER_ADDR_LEN);
3131 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
3132 	CSR_WRITE_4(sc, RL_IDR0,
3133 	    htole32(*(u_int32_t *)(&eaddr.eaddr[0])));
3134 	CSR_WRITE_4(sc, RL_IDR4,
3135 	    htole32(*(u_int32_t *)(&eaddr.eaddr[4])));
3136 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3137 
3138 	/*
3139 	 * Load the addresses of the RX and TX lists into the chip.
3140 	 */
3141 
3142 	CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI,
3143 	    RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr));
3144 	CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO,
3145 	    RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr));
3146 
3147 	CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI,
3148 	    RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr));
3149 	CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO,
3150 	    RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr));
3151 
3152 	/*
3153 	 * Enable transmit and receive.
3154 	 */
3155 	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
3156 
3157 	/*
3158 	 * Set the initial TX configuration.
3159 	 */
3160 	if (sc->rl_testmode) {
3161 		if (sc->rl_type == RL_8169)
3162 			CSR_WRITE_4(sc, RL_TXCFG,
3163 			    RL_TXCFG_CONFIG|RL_LOOPTEST_ON);
3164 		else
3165 			CSR_WRITE_4(sc, RL_TXCFG,
3166 			    RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS);
3167 	} else
3168 		CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
3169 
3170 	CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16);
3171 
3172 	/*
3173 	 * Set the initial RX configuration.
3174 	 */
3175 	re_set_rxmode(sc);
3176 
3177 	/* Configure interrupt moderation. */
3178 	if (sc->rl_type == RL_8169) {
3179 		/* Magic from vendor. */
3180 		CSR_WRITE_2(sc, RL_INTRMOD, 0x5100);
3181 	}
3182 
3183 #ifdef DEVICE_POLLING
3184 	/*
3185 	 * Disable interrupts if we are polling.
3186 	 */
3187 	if (ifp->if_capenable & IFCAP_POLLING)
3188 		CSR_WRITE_2(sc, RL_IMR, 0);
3189 	else	/* otherwise ... */
3190 #endif
3191 
3192 	/*
3193 	 * Enable interrupts.
3194 	 */
3195 	if (sc->rl_testmode)
3196 		CSR_WRITE_2(sc, RL_IMR, 0);
3197 	else
3198 		CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
3199 	CSR_WRITE_2(sc, RL_ISR, RL_INTRS_CPLUS);
3200 
3201 	/* Set initial TX threshold */
3202 	sc->rl_txthresh = RL_TX_THRESH_INIT;
3203 
3204 	/* Start RX/TX process. */
3205 	CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
3206 #ifdef notdef
3207 	/* Enable receiver and transmitter. */
3208 	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
3209 #endif
3210 
3211 	/*
3212 	 * Initialize the timer interrupt register so that
3213 	 * a timer interrupt will be generated once the timer
3214 	 * reaches a certain number of ticks. The timer is
3215 	 * reloaded on each transmit.
3216 	 */
3217 #ifdef RE_TX_MODERATION
3218 	/*
3219 	 * Use timer interrupt register to moderate TX interrupt
3220 	 * moderation, which dramatically improves TX frame rate.
3221 	 */
3222 	if (sc->rl_type == RL_8169)
3223 		CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800);
3224 	else
3225 		CSR_WRITE_4(sc, RL_TIMERINT, 0x400);
3226 #else
3227 	/*
3228 	 * Use timer interrupt register to moderate RX interrupt
3229 	 * moderation.
3230 	 */
3231 	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 &&
3232 	    intr_filter == 0) {
3233 		if (sc->rl_type == RL_8169)
3234 			CSR_WRITE_4(sc, RL_TIMERINT_8169,
3235 			    RL_USECS(sc->rl_int_rx_mod));
3236 	} else {
3237 		if (sc->rl_type == RL_8169)
3238 			CSR_WRITE_4(sc, RL_TIMERINT_8169, RL_USECS(0));
3239 	}
3240 #endif
3241 
3242 	/*
3243 	 * For 8169 gigE NICs, set the max allowed RX packet
3244 	 * size so we can receive jumbo frames.
3245 	 */
3246 	if (sc->rl_type == RL_8169) {
3247 		if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
3248 			/*
3249 			 * For controllers that use new jumbo frame scheme,
3250 			 * set maximum size of jumbo frame depending on
3251 			 * controller revisions.
3252 			 */
3253 			if (ifp->if_mtu > RL_MTU)
3254 				CSR_WRITE_2(sc, RL_MAXRXPKTLEN,
3255 				    sc->rl_hwrev->rl_max_mtu +
3256 				    ETHER_VLAN_ENCAP_LEN + ETHER_HDR_LEN +
3257 				    ETHER_CRC_LEN);
3258 			else
3259 				CSR_WRITE_2(sc, RL_MAXRXPKTLEN,
3260 				    RE_RX_DESC_BUFLEN);
3261 		} else if ((sc->rl_flags & RL_FLAG_PCIE) != 0 &&
3262 		    sc->rl_hwrev->rl_max_mtu == RL_MTU) {
3263 			/* RTL810x has no jumbo frame support. */
3264 			CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN);
3265 		} else
3266 			CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383);
3267 	}
3268 
3269 	if (sc->rl_testmode)
3270 		return;
3271 
3272 	CSR_WRITE_1(sc, sc->rl_cfg1, CSR_READ_1(sc, sc->rl_cfg1) |
3273 	    RL_CFG1_DRVLOAD);
3274 
3275 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3276 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3277 
3278 	sc->rl_flags &= ~RL_FLAG_LINK;
3279 	mii_mediachg(mii);
3280 
3281 	sc->rl_watchdog_timer = 0;
3282 	callout_reset(&sc->rl_stat_callout, hz, re_tick, sc);
3283 }
3284 
3285 /*
3286  * Set media options.
3287  */
3288 static int
3289 re_ifmedia_upd(struct ifnet *ifp)
3290 {
3291 	struct rl_softc		*sc;
3292 	struct mii_data		*mii;
3293 	int			error;
3294 
3295 	sc = ifp->if_softc;
3296 	mii = device_get_softc(sc->rl_miibus);
3297 	RL_LOCK(sc);
3298 	error = mii_mediachg(mii);
3299 	RL_UNLOCK(sc);
3300 
3301 	return (error);
3302 }
3303 
3304 /*
3305  * Report current media status.
3306  */
3307 static void
3308 re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3309 {
3310 	struct rl_softc		*sc;
3311 	struct mii_data		*mii;
3312 
3313 	sc = ifp->if_softc;
3314 	mii = device_get_softc(sc->rl_miibus);
3315 
3316 	RL_LOCK(sc);
3317 	mii_pollstat(mii);
3318 	ifmr->ifm_active = mii->mii_media_active;
3319 	ifmr->ifm_status = mii->mii_media_status;
3320 	RL_UNLOCK(sc);
3321 }
3322 
3323 static int
3324 re_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3325 {
3326 	struct rl_softc		*sc = ifp->if_softc;
3327 	struct ifreq		*ifr = (struct ifreq *) data;
3328 	struct mii_data		*mii;
3329 	uint32_t		rev;
3330 	int			error = 0;
3331 
3332 	switch (command) {
3333 	case SIOCSIFMTU:
3334 		if (ifr->ifr_mtu < ETHERMIN ||
3335 		    ifr->ifr_mtu > sc->rl_hwrev->rl_max_mtu) {
3336 			error = EINVAL;
3337 			break;
3338 		}
3339 		RL_LOCK(sc);
3340 		if (ifp->if_mtu != ifr->ifr_mtu) {
3341 			ifp->if_mtu = ifr->ifr_mtu;
3342 			if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
3343 			    (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
3344 				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3345 				re_init_locked(sc);
3346 			}
3347 			if (ifp->if_mtu > RL_TSO_MTU &&
3348 			    (ifp->if_capenable & IFCAP_TSO4) != 0) {
3349 				ifp->if_capenable &= ~(IFCAP_TSO4 |
3350 				    IFCAP_VLAN_HWTSO);
3351 				ifp->if_hwassist &= ~CSUM_TSO;
3352 			}
3353 			VLAN_CAPABILITIES(ifp);
3354 		}
3355 		RL_UNLOCK(sc);
3356 		break;
3357 	case SIOCSIFFLAGS:
3358 		RL_LOCK(sc);
3359 		if ((ifp->if_flags & IFF_UP) != 0) {
3360 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
3361 				if (((ifp->if_flags ^ sc->rl_if_flags)
3362 				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3363 					re_set_rxmode(sc);
3364 			} else
3365 				re_init_locked(sc);
3366 		} else {
3367 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3368 				re_stop(sc);
3369 		}
3370 		sc->rl_if_flags = ifp->if_flags;
3371 		RL_UNLOCK(sc);
3372 		break;
3373 	case SIOCADDMULTI:
3374 	case SIOCDELMULTI:
3375 		RL_LOCK(sc);
3376 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3377 			re_set_rxmode(sc);
3378 		RL_UNLOCK(sc);
3379 		break;
3380 	case SIOCGIFMEDIA:
3381 	case SIOCSIFMEDIA:
3382 		mii = device_get_softc(sc->rl_miibus);
3383 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
3384 		break;
3385 	case SIOCSIFCAP:
3386 	    {
3387 		int mask, reinit;
3388 
3389 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3390 		reinit = 0;
3391 #ifdef DEVICE_POLLING
3392 		if (mask & IFCAP_POLLING) {
3393 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
3394 				error = ether_poll_register(re_poll, ifp);
3395 				if (error)
3396 					return (error);
3397 				RL_LOCK(sc);
3398 				/* Disable interrupts */
3399 				CSR_WRITE_2(sc, RL_IMR, 0x0000);
3400 				ifp->if_capenable |= IFCAP_POLLING;
3401 				RL_UNLOCK(sc);
3402 			} else {
3403 				error = ether_poll_deregister(ifp);
3404 				/* Enable interrupts. */
3405 				RL_LOCK(sc);
3406 				CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
3407 				ifp->if_capenable &= ~IFCAP_POLLING;
3408 				RL_UNLOCK(sc);
3409 			}
3410 		}
3411 #endif /* DEVICE_POLLING */
3412 		RL_LOCK(sc);
3413 		if ((mask & IFCAP_TXCSUM) != 0 &&
3414 		    (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
3415 			ifp->if_capenable ^= IFCAP_TXCSUM;
3416 			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) {
3417 				rev = sc->rl_hwrev->rl_rev;
3418 				if (rev == RL_HWREV_8168C ||
3419 				    rev == RL_HWREV_8168C_SPIN2 ||
3420 				    rev == RL_HWREV_8168CP)
3421 					ifp->if_hwassist |= CSUM_TCP | CSUM_UDP;
3422 				else
3423 					ifp->if_hwassist |= RE_CSUM_FEATURES;
3424 			} else
3425 				ifp->if_hwassist &= ~RE_CSUM_FEATURES;
3426 			reinit = 1;
3427 		}
3428 		if ((mask & IFCAP_RXCSUM) != 0 &&
3429 		    (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
3430 			ifp->if_capenable ^= IFCAP_RXCSUM;
3431 			reinit = 1;
3432 		}
3433 		if ((mask & IFCAP_TSO4) != 0 &&
3434 		    (ifp->if_capabilities & IFCAP_TSO4) != 0) {
3435 			ifp->if_capenable ^= IFCAP_TSO4;
3436 			if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
3437 				ifp->if_hwassist |= CSUM_TSO;
3438 			else
3439 				ifp->if_hwassist &= ~CSUM_TSO;
3440 			if (ifp->if_mtu > RL_TSO_MTU &&
3441 			    (ifp->if_capenable & IFCAP_TSO4) != 0) {
3442 				ifp->if_capenable &= ~IFCAP_TSO4;
3443 				ifp->if_hwassist &= ~CSUM_TSO;
3444 			}
3445 		}
3446 		if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
3447 		    (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
3448 			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
3449 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
3450 		    (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
3451 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
3452 			/* TSO over VLAN requires VLAN hardware tagging. */
3453 			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
3454 				ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
3455 			reinit = 1;
3456 		}
3457 		if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
3458 		    (mask & (IFCAP_HWCSUM | IFCAP_TSO4 |
3459 		    IFCAP_VLAN_HWTSO)) != 0)
3460 				reinit = 1;
3461 		if ((mask & IFCAP_WOL) != 0 &&
3462 		    (ifp->if_capabilities & IFCAP_WOL) != 0) {
3463 			if ((mask & IFCAP_WOL_UCAST) != 0)
3464 				ifp->if_capenable ^= IFCAP_WOL_UCAST;
3465 			if ((mask & IFCAP_WOL_MCAST) != 0)
3466 				ifp->if_capenable ^= IFCAP_WOL_MCAST;
3467 			if ((mask & IFCAP_WOL_MAGIC) != 0)
3468 				ifp->if_capenable ^= IFCAP_WOL_MAGIC;
3469 		}
3470 		if (reinit && ifp->if_drv_flags & IFF_DRV_RUNNING) {
3471 			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3472 			re_init_locked(sc);
3473 		}
3474 		RL_UNLOCK(sc);
3475 		VLAN_CAPABILITIES(ifp);
3476 	    }
3477 		break;
3478 	default:
3479 		error = ether_ioctl(ifp, command, data);
3480 		break;
3481 	}
3482 
3483 	return (error);
3484 }
3485 
3486 static void
3487 re_watchdog(struct rl_softc *sc)
3488 {
3489 	struct ifnet		*ifp;
3490 
3491 	RL_LOCK_ASSERT(sc);
3492 
3493 	if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer != 0)
3494 		return;
3495 
3496 	ifp = sc->rl_ifp;
3497 	re_txeof(sc);
3498 	if (sc->rl_ldata.rl_tx_free == sc->rl_ldata.rl_tx_desc_cnt) {
3499 		if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
3500 		    "-- recovering\n");
3501 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3502 			re_start_locked(ifp);
3503 		return;
3504 	}
3505 
3506 	if_printf(ifp, "watchdog timeout\n");
3507 	ifp->if_oerrors++;
3508 
3509 	re_rxeof(sc, NULL);
3510 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3511 	re_init_locked(sc);
3512 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
3513 		re_start_locked(ifp);
3514 }
3515 
3516 /*
3517  * Stop the adapter and free any mbufs allocated to the
3518  * RX and TX lists.
3519  */
3520 static void
3521 re_stop(struct rl_softc *sc)
3522 {
3523 	int			i;
3524 	struct ifnet		*ifp;
3525 	struct rl_txdesc	*txd;
3526 	struct rl_rxdesc	*rxd;
3527 
3528 	RL_LOCK_ASSERT(sc);
3529 
3530 	ifp = sc->rl_ifp;
3531 
3532 	sc->rl_watchdog_timer = 0;
3533 	callout_stop(&sc->rl_stat_callout);
3534 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3535 
3536 	/*
3537 	 * Disable accepting frames to put RX MAC into idle state.
3538 	 * Otherwise it's possible to get frames while stop command
3539 	 * execution is in progress and controller can DMA the frame
3540 	 * to already freed RX buffer during that period.
3541 	 */
3542 	CSR_WRITE_4(sc, RL_RXCFG, CSR_READ_4(sc, RL_RXCFG) &
3543 	    ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_MULTI |
3544 	    RL_RXCFG_RX_BROAD));
3545 
3546 	if ((sc->rl_flags & RL_FLAG_WAIT_TXPOLL) != 0) {
3547 		for (i = RL_TIMEOUT; i > 0; i--) {
3548 			if ((CSR_READ_1(sc, sc->rl_txstart) &
3549 			    RL_TXSTART_START) == 0)
3550 				break;
3551 			DELAY(20);
3552 		}
3553 		if (i == 0)
3554 			device_printf(sc->rl_dev,
3555 			    "stopping TX poll timed out!\n");
3556 		CSR_WRITE_1(sc, RL_COMMAND, 0x00);
3557 	} else if ((sc->rl_flags & RL_FLAG_CMDSTOP) != 0) {
3558 		CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB |
3559 		    RL_CMD_RX_ENB);
3560 		if ((sc->rl_flags & RL_FLAG_CMDSTOP_WAIT_TXQ) != 0) {
3561 			for (i = RL_TIMEOUT; i > 0; i--) {
3562 				if ((CSR_READ_4(sc, RL_TXCFG) &
3563 				    RL_TXCFG_QUEUE_EMPTY) != 0)
3564 					break;
3565 				DELAY(100);
3566 			}
3567 			if (i == 0)
3568 				device_printf(sc->rl_dev,
3569 				   "stopping TXQ timed out!\n");
3570 		}
3571 	} else
3572 		CSR_WRITE_1(sc, RL_COMMAND, 0x00);
3573 	DELAY(1000);
3574 	CSR_WRITE_2(sc, RL_IMR, 0x0000);
3575 	CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
3576 
3577 	if (sc->rl_head != NULL) {
3578 		m_freem(sc->rl_head);
3579 		sc->rl_head = sc->rl_tail = NULL;
3580 	}
3581 
3582 	/* Free the TX list buffers. */
3583 	for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
3584 		txd = &sc->rl_ldata.rl_tx_desc[i];
3585 		if (txd->tx_m != NULL) {
3586 			bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
3587 			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3588 			bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag,
3589 			    txd->tx_dmamap);
3590 			m_freem(txd->tx_m);
3591 			txd->tx_m = NULL;
3592 		}
3593 	}
3594 
3595 	/* Free the RX list buffers. */
3596 	for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
3597 		rxd = &sc->rl_ldata.rl_rx_desc[i];
3598 		if (rxd->rx_m != NULL) {
3599 			bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
3600 			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3601 			bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag,
3602 			    rxd->rx_dmamap);
3603 			m_freem(rxd->rx_m);
3604 			rxd->rx_m = NULL;
3605 		}
3606 	}
3607 
3608 	if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
3609 		for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
3610 			rxd = &sc->rl_ldata.rl_jrx_desc[i];
3611 			if (rxd->rx_m != NULL) {
3612 				bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag,
3613 				    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3614 				bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag,
3615 				    rxd->rx_dmamap);
3616 				m_freem(rxd->rx_m);
3617 				rxd->rx_m = NULL;
3618 			}
3619 		}
3620 	}
3621 }
3622 
3623 /*
3624  * Device suspend routine.  Stop the interface and save some PCI
3625  * settings in case the BIOS doesn't restore them properly on
3626  * resume.
3627  */
3628 static int
3629 re_suspend(device_t dev)
3630 {
3631 	struct rl_softc		*sc;
3632 
3633 	sc = device_get_softc(dev);
3634 
3635 	RL_LOCK(sc);
3636 	re_stop(sc);
3637 	re_setwol(sc);
3638 	sc->suspended = 1;
3639 	RL_UNLOCK(sc);
3640 
3641 	return (0);
3642 }
3643 
3644 /*
3645  * Device resume routine.  Restore some PCI settings in case the BIOS
3646  * doesn't, re-enable busmastering, and restart the interface if
3647  * appropriate.
3648  */
3649 static int
3650 re_resume(device_t dev)
3651 {
3652 	struct rl_softc		*sc;
3653 	struct ifnet		*ifp;
3654 
3655 	sc = device_get_softc(dev);
3656 
3657 	RL_LOCK(sc);
3658 
3659 	ifp = sc->rl_ifp;
3660 	/* Take controller out of sleep mode. */
3661 	if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
3662 		if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
3663 			CSR_WRITE_1(sc, RL_GPIO,
3664 			    CSR_READ_1(sc, RL_GPIO) | 0x01);
3665 	}
3666 
3667 	/*
3668 	 * Clear WOL matching such that normal Rx filtering
3669 	 * wouldn't interfere with WOL patterns.
3670 	 */
3671 	re_clrwol(sc);
3672 
3673 	/* reinitialize interface if necessary */
3674 	if (ifp->if_flags & IFF_UP)
3675 		re_init_locked(sc);
3676 
3677 	sc->suspended = 0;
3678 	RL_UNLOCK(sc);
3679 
3680 	return (0);
3681 }
3682 
3683 /*
3684  * Stop all chip I/O so that the kernel's probe routines don't
3685  * get confused by errant DMAs when rebooting.
3686  */
3687 static int
3688 re_shutdown(device_t dev)
3689 {
3690 	struct rl_softc		*sc;
3691 
3692 	sc = device_get_softc(dev);
3693 
3694 	RL_LOCK(sc);
3695 	re_stop(sc);
3696 	/*
3697 	 * Mark interface as down since otherwise we will panic if
3698 	 * interrupt comes in later on, which can happen in some
3699 	 * cases.
3700 	 */
3701 	sc->rl_ifp->if_flags &= ~IFF_UP;
3702 	re_setwol(sc);
3703 	RL_UNLOCK(sc);
3704 
3705 	return (0);
3706 }
3707 
3708 static void
3709 re_set_linkspeed(struct rl_softc *sc)
3710 {
3711 	struct mii_softc *miisc;
3712 	struct mii_data *mii;
3713 	int aneg, i, phyno;
3714 
3715 	RL_LOCK_ASSERT(sc);
3716 
3717 	mii = device_get_softc(sc->rl_miibus);
3718 	mii_pollstat(mii);
3719 	aneg = 0;
3720 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
3721 	    (IFM_ACTIVE | IFM_AVALID)) {
3722 		switch IFM_SUBTYPE(mii->mii_media_active) {
3723 		case IFM_10_T:
3724 		case IFM_100_TX:
3725 			return;
3726 		case IFM_1000_T:
3727 			aneg++;
3728 			break;
3729 		default:
3730 			break;
3731 		}
3732 	}
3733 	miisc = LIST_FIRST(&mii->mii_phys);
3734 	phyno = miisc->mii_phy;
3735 	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3736 		PHY_RESET(miisc);
3737 	re_miibus_writereg(sc->rl_dev, phyno, MII_100T2CR, 0);
3738 	re_miibus_writereg(sc->rl_dev, phyno,
3739 	    MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
3740 	re_miibus_writereg(sc->rl_dev, phyno,
3741 	    MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG);
3742 	DELAY(1000);
3743 	if (aneg != 0) {
3744 		/*
3745 		 * Poll link state until re(4) get a 10/100Mbps link.
3746 		 */
3747 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
3748 			mii_pollstat(mii);
3749 			if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
3750 			    == (IFM_ACTIVE | IFM_AVALID)) {
3751 				switch (IFM_SUBTYPE(mii->mii_media_active)) {
3752 				case IFM_10_T:
3753 				case IFM_100_TX:
3754 					return;
3755 				default:
3756 					break;
3757 				}
3758 			}
3759 			RL_UNLOCK(sc);
3760 			pause("relnk", hz);
3761 			RL_LOCK(sc);
3762 		}
3763 		if (i == MII_ANEGTICKS_GIGE)
3764 			device_printf(sc->rl_dev,
3765 			    "establishing a link failed, WOL may not work!");
3766 	}
3767 	/*
3768 	 * No link, force MAC to have 100Mbps, full-duplex link.
3769 	 * MAC does not require reprogramming on resolved speed/duplex,
3770 	 * so this is just for completeness.
3771 	 */
3772 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
3773 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
3774 }
3775 
3776 static void
3777 re_setwol(struct rl_softc *sc)
3778 {
3779 	struct ifnet		*ifp;
3780 	int			pmc;
3781 	uint16_t		pmstat;
3782 	uint8_t			v;
3783 
3784 	RL_LOCK_ASSERT(sc);
3785 
3786 	if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
3787 		return;
3788 
3789 	ifp = sc->rl_ifp;
3790 	/* Put controller into sleep mode. */
3791 	if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
3792 		if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
3793 			CSR_WRITE_1(sc, RL_GPIO,
3794 			    CSR_READ_1(sc, RL_GPIO) & ~0x01);
3795 	}
3796 	if ((ifp->if_capenable & IFCAP_WOL) != 0) {
3797 		re_set_rxmode(sc);
3798 		if ((sc->rl_flags & RL_FLAG_WOL_MANLINK) != 0)
3799 			re_set_linkspeed(sc);
3800 		if ((sc->rl_flags & RL_FLAG_WOLRXENB) != 0)
3801 			CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RX_ENB);
3802 	}
3803 	/* Enable config register write. */
3804 	CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
3805 
3806 	/* Enable PME. */
3807 	v = CSR_READ_1(sc, sc->rl_cfg1);
3808 	v &= ~RL_CFG1_PME;
3809 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
3810 		v |= RL_CFG1_PME;
3811 	CSR_WRITE_1(sc, sc->rl_cfg1, v);
3812 
3813 	v = CSR_READ_1(sc, sc->rl_cfg3);
3814 	v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
3815 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
3816 		v |= RL_CFG3_WOL_MAGIC;
3817 	CSR_WRITE_1(sc, sc->rl_cfg3, v);
3818 
3819 	v = CSR_READ_1(sc, sc->rl_cfg5);
3820 	v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST |
3821 	    RL_CFG5_WOL_LANWAKE);
3822 	if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0)
3823 		v |= RL_CFG5_WOL_UCAST;
3824 	if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
3825 		v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST;
3826 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
3827 		v |= RL_CFG5_WOL_LANWAKE;
3828 	CSR_WRITE_1(sc, sc->rl_cfg5, v);
3829 
3830 	/* Config register write done. */
3831 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3832 
3833 	if ((ifp->if_capenable & IFCAP_WOL) == 0 &&
3834 	    (sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0)
3835 		CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) & ~0x80);
3836 	/*
3837 	 * It seems that hardware resets its link speed to 100Mbps in
3838 	 * power down mode so switching to 100Mbps in driver is not
3839 	 * needed.
3840 	 */
3841 
3842 	/* Request PME if WOL is requested. */
3843 	pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2);
3844 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
3845 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
3846 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3847 	pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
3848 }
3849 
3850 static void
3851 re_clrwol(struct rl_softc *sc)
3852 {
3853 	int			pmc;
3854 	uint8_t			v;
3855 
3856 	RL_LOCK_ASSERT(sc);
3857 
3858 	if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
3859 		return;
3860 
3861 	/* Enable config register write. */
3862 	CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
3863 
3864 	v = CSR_READ_1(sc, sc->rl_cfg3);
3865 	v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
3866 	CSR_WRITE_1(sc, sc->rl_cfg3, v);
3867 
3868 	/* Config register write done. */
3869 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3870 
3871 	v = CSR_READ_1(sc, sc->rl_cfg5);
3872 	v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
3873 	v &= ~RL_CFG5_WOL_LANWAKE;
3874 	CSR_WRITE_1(sc, sc->rl_cfg5, v);
3875 }
3876 
3877 static void
3878 re_add_sysctls(struct rl_softc *sc)
3879 {
3880 	struct sysctl_ctx_list	*ctx;
3881 	struct sysctl_oid_list	*children;
3882 	int			error;
3883 
3884 	ctx = device_get_sysctl_ctx(sc->rl_dev);
3885 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev));
3886 
3887 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "stats",
3888 	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, re_sysctl_stats, "I",
3889 	    "Statistics Information");
3890 	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0)
3891 		return;
3892 
3893 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "int_rx_mod",
3894 	    CTLTYPE_INT | CTLFLAG_RW, &sc->rl_int_rx_mod, 0,
3895 	    sysctl_hw_re_int_mod, "I", "re RX interrupt moderation");
3896 	/* Pull in device tunables. */
3897 	sc->rl_int_rx_mod = RL_TIMER_DEFAULT;
3898 	error = resource_int_value(device_get_name(sc->rl_dev),
3899 	    device_get_unit(sc->rl_dev), "int_rx_mod", &sc->rl_int_rx_mod);
3900 	if (error == 0) {
3901 		if (sc->rl_int_rx_mod < RL_TIMER_MIN ||
3902 		    sc->rl_int_rx_mod > RL_TIMER_MAX) {
3903 			device_printf(sc->rl_dev, "int_rx_mod value out of "
3904 			    "range; using default: %d\n",
3905 			    RL_TIMER_DEFAULT);
3906 			sc->rl_int_rx_mod = RL_TIMER_DEFAULT;
3907 		}
3908 	}
3909 
3910 }
3911 
3912 static int
3913 re_sysctl_stats(SYSCTL_HANDLER_ARGS)
3914 {
3915 	struct rl_softc		*sc;
3916 	struct rl_stats		*stats;
3917 	int			error, i, result;
3918 
3919 	result = -1;
3920 	error = sysctl_handle_int(oidp, &result, 0, req);
3921 	if (error || req->newptr == NULL)
3922 		return (error);
3923 
3924 	if (result == 1) {
3925 		sc = (struct rl_softc *)arg1;
3926 		RL_LOCK(sc);
3927 		if ((sc->rl_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3928 			RL_UNLOCK(sc);
3929 			goto done;
3930 		}
3931 		bus_dmamap_sync(sc->rl_ldata.rl_stag,
3932 		    sc->rl_ldata.rl_smap, BUS_DMASYNC_PREREAD);
3933 		CSR_WRITE_4(sc, RL_DUMPSTATS_HI,
3934 		    RL_ADDR_HI(sc->rl_ldata.rl_stats_addr));
3935 		CSR_WRITE_4(sc, RL_DUMPSTATS_LO,
3936 		    RL_ADDR_LO(sc->rl_ldata.rl_stats_addr));
3937 		CSR_WRITE_4(sc, RL_DUMPSTATS_LO,
3938 		    RL_ADDR_LO(sc->rl_ldata.rl_stats_addr |
3939 		    RL_DUMPSTATS_START));
3940 		for (i = RL_TIMEOUT; i > 0; i--) {
3941 			if ((CSR_READ_4(sc, RL_DUMPSTATS_LO) &
3942 			    RL_DUMPSTATS_START) == 0)
3943 				break;
3944 			DELAY(1000);
3945 		}
3946 		bus_dmamap_sync(sc->rl_ldata.rl_stag,
3947 		    sc->rl_ldata.rl_smap, BUS_DMASYNC_POSTREAD);
3948 		RL_UNLOCK(sc);
3949 		if (i == 0) {
3950 			device_printf(sc->rl_dev,
3951 			    "DUMP statistics request timed out\n");
3952 			return (ETIMEDOUT);
3953 		}
3954 done:
3955 		stats = sc->rl_ldata.rl_stats;
3956 		printf("%s statistics:\n", device_get_nameunit(sc->rl_dev));
3957 		printf("Tx frames : %ju\n",
3958 		    (uintmax_t)le64toh(stats->rl_tx_pkts));
3959 		printf("Rx frames : %ju\n",
3960 		    (uintmax_t)le64toh(stats->rl_rx_pkts));
3961 		printf("Tx errors : %ju\n",
3962 		    (uintmax_t)le64toh(stats->rl_tx_errs));
3963 		printf("Rx errors : %u\n",
3964 		    le32toh(stats->rl_rx_errs));
3965 		printf("Rx missed frames : %u\n",
3966 		    (uint32_t)le16toh(stats->rl_missed_pkts));
3967 		printf("Rx frame alignment errs : %u\n",
3968 		    (uint32_t)le16toh(stats->rl_rx_framealign_errs));
3969 		printf("Tx single collisions : %u\n",
3970 		    le32toh(stats->rl_tx_onecoll));
3971 		printf("Tx multiple collisions : %u\n",
3972 		    le32toh(stats->rl_tx_multicolls));
3973 		printf("Rx unicast frames : %ju\n",
3974 		    (uintmax_t)le64toh(stats->rl_rx_ucasts));
3975 		printf("Rx broadcast frames : %ju\n",
3976 		    (uintmax_t)le64toh(stats->rl_rx_bcasts));
3977 		printf("Rx multicast frames : %u\n",
3978 		    le32toh(stats->rl_rx_mcasts));
3979 		printf("Tx aborts : %u\n",
3980 		    (uint32_t)le16toh(stats->rl_tx_aborts));
3981 		printf("Tx underruns : %u\n",
3982 		    (uint32_t)le16toh(stats->rl_rx_underruns));
3983 	}
3984 
3985 	return (error);
3986 }
3987 
3988 static int
3989 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3990 {
3991 	int error, value;
3992 
3993 	if (arg1 == NULL)
3994 		return (EINVAL);
3995 	value = *(int *)arg1;
3996 	error = sysctl_handle_int(oidp, &value, 0, req);
3997 	if (error || req->newptr == NULL)
3998 		return (error);
3999 	if (value < low || value > high)
4000 		return (EINVAL);
4001 	*(int *)arg1 = value;
4002 
4003 	return (0);
4004 }
4005 
4006 static int
4007 sysctl_hw_re_int_mod(SYSCTL_HANDLER_ARGS)
4008 {
4009 
4010 	return (sysctl_int_range(oidp, arg1, arg2, req, RL_TIMER_MIN,
4011 	    RL_TIMER_MAX));
4012 }
4013