xref: /freebsd/sys/dev/re/if_re.c (revision aa3860851b9f6a6002d135b1cac7736e0995eedc)
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (c) 1997, 1998-2003
5  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 /*
37  * RealTek 8139C+/8169/8169S/8110S/8168/8111/8101E PCI NIC driver
38  *
39  * Written by Bill Paul <wpaul@windriver.com>
40  * Senior Networking Software Engineer
41  * Wind River Systems
42  */
43 
44 /*
45  * This driver is designed to support RealTek's next generation of
46  * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently
47  * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S,
48  * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E.
49  *
50  * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible
51  * with the older 8139 family, however it also supports a special
52  * C+ mode of operation that provides several new performance enhancing
53  * features. These include:
54  *
55  *	o Descriptor based DMA mechanism. Each descriptor represents
56  *	  a single packet fragment. Data buffers may be aligned on
57  *	  any byte boundary.
58  *
59  *	o 64-bit DMA
60  *
61  *	o TCP/IP checksum offload for both RX and TX
62  *
63  *	o High and normal priority transmit DMA rings
64  *
65  *	o VLAN tag insertion and extraction
66  *
67  *	o TCP large send (segmentation offload)
68  *
69  * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+
70  * programming API is fairly straightforward. The RX filtering, EEPROM
71  * access and PHY access is the same as it is on the older 8139 series
72  * chips.
73  *
74  * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the
75  * same programming API and feature set as the 8139C+ with the following
76  * differences and additions:
77  *
78  *	o 1000Mbps mode
79  *
80  *	o Jumbo frames
81  *
82  *	o GMII and TBI ports/registers for interfacing with copper
83  *	  or fiber PHYs
84  *
85  *	o RX and TX DMA rings can have up to 1024 descriptors
86  *	  (the 8139C+ allows a maximum of 64)
87  *
88  *	o Slight differences in register layout from the 8139C+
89  *
90  * The TX start and timer interrupt registers are at different locations
91  * on the 8169 than they are on the 8139C+. Also, the status word in the
92  * RX descriptor has a slightly different bit layout. The 8169 does not
93  * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska'
94  * copper gigE PHY.
95  *
96  * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
97  * (the 'S' stands for 'single-chip'). These devices have the same
98  * programming API as the older 8169, but also have some vendor-specific
99  * registers for the on-board PHY. The 8110S is a LAN-on-motherboard
100  * part designed to be pin-compatible with the RealTek 8100 10/100 chip.
101  *
102  * This driver takes advantage of the RX and TX checksum offload and
103  * VLAN tag insertion/extraction features. It also implements TX
104  * interrupt moderation using the timer interrupt registers, which
105  * significantly reduces TX interrupt load. There is also support
106  * for jumbo frames, however the 8169/8169S/8110S can not transmit
107  * jumbo frames larger than 7440, so the max MTU possible with this
108  * driver is 7422 bytes.
109  */
110 
111 #ifdef HAVE_KERNEL_OPTION_HEADERS
112 #include "opt_device_polling.h"
113 #endif
114 
115 #include <sys/param.h>
116 #include <sys/endian.h>
117 #include <sys/systm.h>
118 #include <sys/sockio.h>
119 #include <sys/mbuf.h>
120 #include <sys/malloc.h>
121 #include <sys/module.h>
122 #include <sys/kernel.h>
123 #include <sys/socket.h>
124 #include <sys/lock.h>
125 #include <sys/mutex.h>
126 #include <sys/sysctl.h>
127 #include <sys/taskqueue.h>
128 
129 #include <net/debugnet.h>
130 #include <net/if.h>
131 #include <net/if_var.h>
132 #include <net/if_arp.h>
133 #include <net/ethernet.h>
134 #include <net/if_dl.h>
135 #include <net/if_media.h>
136 #include <net/if_types.h>
137 #include <net/if_vlan_var.h>
138 
139 #include <net/bpf.h>
140 
141 #include <machine/bus.h>
142 #include <machine/resource.h>
143 #include <sys/bus.h>
144 #include <sys/rman.h>
145 
146 #include <dev/mii/mii.h>
147 #include <dev/mii/miivar.h>
148 
149 #include <dev/pci/pcireg.h>
150 #include <dev/pci/pcivar.h>
151 
152 #include <dev/rl/if_rlreg.h>
153 
154 MODULE_DEPEND(re, pci, 1, 1, 1);
155 MODULE_DEPEND(re, ether, 1, 1, 1);
156 MODULE_DEPEND(re, miibus, 1, 1, 1);
157 
158 /* "device miibus" required.  See GENERIC if you get errors here. */
159 #include "miibus_if.h"
160 
161 /* Tunables. */
162 static int intr_filter = 0;
163 TUNABLE_INT("hw.re.intr_filter", &intr_filter);
164 static int msi_disable = 0;
165 TUNABLE_INT("hw.re.msi_disable", &msi_disable);
166 static int msix_disable = 0;
167 TUNABLE_INT("hw.re.msix_disable", &msix_disable);
168 static int prefer_iomap = 0;
169 TUNABLE_INT("hw.re.prefer_iomap", &prefer_iomap);
170 
171 #define RE_CSUM_FEATURES    (CSUM_IP | CSUM_TCP | CSUM_UDP)
172 
173 /*
174  * Various supported device vendors/types and their names.
175  */
176 static const struct rl_type re_devs[] = {
177 	{ DLINK_VENDORID, DLINK_DEVICEID_528T, 0,
178 	    "D-Link DGE-528(T) Gigabit Ethernet Adapter" },
179 	{ DLINK_VENDORID, DLINK_DEVICEID_530T_REVC, 0,
180 	    "D-Link DGE-530(T) Gigabit Ethernet Adapter" },
181 	{ RT_VENDORID, RT_DEVICEID_2600, 0,
182 	   "RealTek Killer E2600 Gigabit Ethernet Controller" },
183 	{ RT_VENDORID, RT_DEVICEID_8139, 0,
184 	    "RealTek 8139C+ 10/100BaseTX" },
185 	{ RT_VENDORID, RT_DEVICEID_8101E, 0,
186 	    "RealTek 810xE PCIe 10/100baseTX" },
187 	{ RT_VENDORID, RT_DEVICEID_8168, 0,
188 	    "RealTek 8168/8111 B/C/CP/D/DP/E/F/G PCIe Gigabit Ethernet" },
189 	{ RT_VENDORID, RT_DEVICEID_8161, 0,
190 	    "RealTek 8168 Gigabit Ethernet" },
191 	{ NCUBE_VENDORID, RT_DEVICEID_8168, 0,
192 	    "TP-Link TG-3468 v2 (RTL8168) Gigabit Ethernet" },
193 	{ RT_VENDORID, RT_DEVICEID_8169, 0,
194 	    "RealTek 8169/8169S/8169SB(L)/8110S/8110SB(L) Gigabit Ethernet" },
195 	{ RT_VENDORID, RT_DEVICEID_8169SC, 0,
196 	    "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" },
197 	{ COREGA_VENDORID, COREGA_DEVICEID_CGLAPCIGT, 0,
198 	    "Corega CG-LAPCIGT (RTL8169S) Gigabit Ethernet" },
199 	{ LINKSYS_VENDORID, LINKSYS_DEVICEID_EG1032, 0,
200 	    "Linksys EG1032 (RTL8169S) Gigabit Ethernet" },
201 	{ USR_VENDORID, USR_DEVICEID_997902, 0,
202 	    "US Robotics 997902 (RTL8169S) Gigabit Ethernet" }
203 };
204 
205 static const struct rl_hwrev re_hwrevs[] = {
206 	{ RL_HWREV_8139, RL_8139, "", RL_MTU },
207 	{ RL_HWREV_8139A, RL_8139, "A", RL_MTU },
208 	{ RL_HWREV_8139AG, RL_8139, "A-G", RL_MTU },
209 	{ RL_HWREV_8139B, RL_8139, "B", RL_MTU },
210 	{ RL_HWREV_8130, RL_8139, "8130", RL_MTU },
211 	{ RL_HWREV_8139C, RL_8139, "C", RL_MTU },
212 	{ RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C", RL_MTU },
213 	{ RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+", RL_MTU },
214 	{ RL_HWREV_8168B_SPIN1, RL_8169, "8168", RL_JUMBO_MTU },
215 	{ RL_HWREV_8169, RL_8169, "8169", RL_JUMBO_MTU },
216 	{ RL_HWREV_8169S, RL_8169, "8169S", RL_JUMBO_MTU },
217 	{ RL_HWREV_8110S, RL_8169, "8110S", RL_JUMBO_MTU },
218 	{ RL_HWREV_8169_8110SB, RL_8169, "8169SB/8110SB", RL_JUMBO_MTU },
219 	{ RL_HWREV_8169_8110SC, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU },
220 	{ RL_HWREV_8169_8110SBL, RL_8169, "8169SBL/8110SBL", RL_JUMBO_MTU },
221 	{ RL_HWREV_8169_8110SCE, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU },
222 	{ RL_HWREV_8100, RL_8139, "8100", RL_MTU },
223 	{ RL_HWREV_8101, RL_8139, "8101", RL_MTU },
224 	{ RL_HWREV_8100E, RL_8169, "8100E", RL_MTU },
225 	{ RL_HWREV_8101E, RL_8169, "8101E", RL_MTU },
226 	{ RL_HWREV_8102E, RL_8169, "8102E", RL_MTU },
227 	{ RL_HWREV_8102EL, RL_8169, "8102EL", RL_MTU },
228 	{ RL_HWREV_8102EL_SPIN1, RL_8169, "8102EL", RL_MTU },
229 	{ RL_HWREV_8103E, RL_8169, "8103E", RL_MTU },
230 	{ RL_HWREV_8401E, RL_8169, "8401E", RL_MTU },
231 	{ RL_HWREV_8402, RL_8169, "8402", RL_MTU },
232 	{ RL_HWREV_8105E, RL_8169, "8105E", RL_MTU },
233 	{ RL_HWREV_8105E_SPIN1, RL_8169, "8105E", RL_MTU },
234 	{ RL_HWREV_8106E, RL_8169, "8106E", RL_MTU },
235 	{ RL_HWREV_8168B_SPIN2, RL_8169, "8168", RL_JUMBO_MTU },
236 	{ RL_HWREV_8168B_SPIN3, RL_8169, "8168", RL_JUMBO_MTU },
237 	{ RL_HWREV_8168C, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K },
238 	{ RL_HWREV_8168C_SPIN2, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K },
239 	{ RL_HWREV_8168CP, RL_8169, "8168CP/8111CP", RL_JUMBO_MTU_6K },
240 	{ RL_HWREV_8168D, RL_8169, "8168D/8111D", RL_JUMBO_MTU_9K },
241 	{ RL_HWREV_8168DP, RL_8169, "8168DP/8111DP", RL_JUMBO_MTU_9K },
242 	{ RL_HWREV_8168E, RL_8169, "8168E/8111E", RL_JUMBO_MTU_9K},
243 	{ RL_HWREV_8168E_VL, RL_8169, "8168E/8111E-VL", RL_JUMBO_MTU_6K},
244 	{ RL_HWREV_8168EP, RL_8169, "8168EP/8111EP", RL_JUMBO_MTU_9K},
245 	{ RL_HWREV_8168F, RL_8169, "8168F/8111F", RL_JUMBO_MTU_9K},
246 	{ RL_HWREV_8168FP, RL_8169, "8168FP/8111FP", RL_JUMBO_MTU_9K},
247 	{ RL_HWREV_8168G, RL_8169, "8168G/8111G", RL_JUMBO_MTU_9K},
248 	{ RL_HWREV_8168GU, RL_8169, "8168GU/8111GU", RL_JUMBO_MTU_9K},
249 	{ RL_HWREV_8168H, RL_8169, "8168H/8111H", RL_JUMBO_MTU_9K},
250 	{ RL_HWREV_8411, RL_8169, "8411", RL_JUMBO_MTU_9K},
251 	{ RL_HWREV_8411B, RL_8169, "8411B", RL_JUMBO_MTU_9K},
252 	{ 0, 0, NULL, 0 }
253 };
254 
255 static int re_probe		(device_t);
256 static int re_attach		(device_t);
257 static int re_detach		(device_t);
258 
259 static int re_encap		(struct rl_softc *, struct mbuf **);
260 
261 static void re_dma_map_addr	(void *, bus_dma_segment_t *, int, int);
262 static int re_allocmem		(device_t, struct rl_softc *);
263 static __inline void re_discard_rxbuf
264 				(struct rl_softc *, int);
265 static int re_newbuf		(struct rl_softc *, int);
266 static int re_jumbo_newbuf	(struct rl_softc *, int);
267 static int re_rx_list_init	(struct rl_softc *);
268 static int re_jrx_list_init	(struct rl_softc *);
269 static int re_tx_list_init	(struct rl_softc *);
270 #ifdef RE_FIXUP_RX
271 static __inline void re_fixup_rx
272 				(struct mbuf *);
273 #endif
274 static int re_rxeof		(struct rl_softc *, int *);
275 static void re_txeof		(struct rl_softc *);
276 #ifdef DEVICE_POLLING
277 static int re_poll		(if_t, enum poll_cmd, int);
278 static int re_poll_locked	(if_t, enum poll_cmd, int);
279 #endif
280 static int re_intr		(void *);
281 static void re_intr_msi		(void *);
282 static void re_tick		(void *);
283 static void re_int_task		(void *, int);
284 static void re_start		(if_t);
285 static void re_start_locked	(if_t);
286 static void re_start_tx		(struct rl_softc *);
287 static int re_ioctl		(if_t, u_long, caddr_t);
288 static void re_init		(void *);
289 static void re_init_locked	(struct rl_softc *);
290 static void re_stop		(struct rl_softc *);
291 static void re_watchdog		(struct rl_softc *);
292 static int re_suspend		(device_t);
293 static int re_resume		(device_t);
294 static int re_shutdown		(device_t);
295 static int re_ifmedia_upd	(if_t);
296 static void re_ifmedia_sts	(if_t, struct ifmediareq *);
297 
298 static void re_eeprom_putbyte	(struct rl_softc *, int);
299 static void re_eeprom_getword	(struct rl_softc *, int, u_int16_t *);
300 static void re_read_eeprom	(struct rl_softc *, caddr_t, int, int);
301 static int re_gmii_readreg	(device_t, int, int);
302 static int re_gmii_writereg	(device_t, int, int, int);
303 
304 static int re_miibus_readreg	(device_t, int, int);
305 static int re_miibus_writereg	(device_t, int, int, int);
306 static void re_miibus_statchg	(device_t);
307 
308 static void re_set_jumbo	(struct rl_softc *, int);
309 static void re_set_rxmode		(struct rl_softc *);
310 static void re_reset		(struct rl_softc *);
311 static void re_setwol		(struct rl_softc *);
312 static void re_clrwol		(struct rl_softc *);
313 static void re_set_linkspeed	(struct rl_softc *);
314 
315 DEBUGNET_DEFINE(re);
316 
317 #ifdef DEV_NETMAP	/* see ixgbe.c for details */
318 #include <dev/netmap/if_re_netmap.h>
319 MODULE_DEPEND(re, netmap, 1, 1, 1);
320 #endif /* !DEV_NETMAP */
321 
322 #ifdef RE_DIAG
323 static int re_diag		(struct rl_softc *);
324 #endif
325 
326 static void re_add_sysctls	(struct rl_softc *);
327 static int re_sysctl_stats	(SYSCTL_HANDLER_ARGS);
328 static int sysctl_int_range	(SYSCTL_HANDLER_ARGS, int, int);
329 static int sysctl_hw_re_int_mod	(SYSCTL_HANDLER_ARGS);
330 
331 static device_method_t re_methods[] = {
332 	/* Device interface */
333 	DEVMETHOD(device_probe,		re_probe),
334 	DEVMETHOD(device_attach,	re_attach),
335 	DEVMETHOD(device_detach,	re_detach),
336 	DEVMETHOD(device_suspend,	re_suspend),
337 	DEVMETHOD(device_resume,	re_resume),
338 	DEVMETHOD(device_shutdown,	re_shutdown),
339 
340 	/* MII interface */
341 	DEVMETHOD(miibus_readreg,	re_miibus_readreg),
342 	DEVMETHOD(miibus_writereg,	re_miibus_writereg),
343 	DEVMETHOD(miibus_statchg,	re_miibus_statchg),
344 
345 	DEVMETHOD_END
346 };
347 
348 static driver_t re_driver = {
349 	"re",
350 	re_methods,
351 	sizeof(struct rl_softc)
352 };
353 
354 DRIVER_MODULE(re, pci, re_driver, 0, 0);
355 DRIVER_MODULE(miibus, re, miibus_driver, 0, 0);
356 
357 #define EE_SET(x)					\
358 	CSR_WRITE_1(sc, RL_EECMD,			\
359 		CSR_READ_1(sc, RL_EECMD) | x)
360 
361 #define EE_CLR(x)					\
362 	CSR_WRITE_1(sc, RL_EECMD,			\
363 		CSR_READ_1(sc, RL_EECMD) & ~x)
364 
365 /*
366  * Send a read command and address to the EEPROM, check for ACK.
367  */
368 static void
re_eeprom_putbyte(struct rl_softc * sc,int addr)369 re_eeprom_putbyte(struct rl_softc *sc, int addr)
370 {
371 	int			d, i;
372 
373 	d = addr | (RL_9346_READ << sc->rl_eewidth);
374 
375 	/*
376 	 * Feed in each bit and strobe the clock.
377 	 */
378 
379 	for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) {
380 		if (d & i) {
381 			EE_SET(RL_EE_DATAIN);
382 		} else {
383 			EE_CLR(RL_EE_DATAIN);
384 		}
385 		DELAY(100);
386 		EE_SET(RL_EE_CLK);
387 		DELAY(150);
388 		EE_CLR(RL_EE_CLK);
389 		DELAY(100);
390 	}
391 }
392 
393 /*
394  * Read a word of data stored in the EEPROM at address 'addr.'
395  */
396 static void
re_eeprom_getword(struct rl_softc * sc,int addr,u_int16_t * dest)397 re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest)
398 {
399 	int			i;
400 	u_int16_t		word = 0;
401 
402 	/*
403 	 * Send address of word we want to read.
404 	 */
405 	re_eeprom_putbyte(sc, addr);
406 
407 	/*
408 	 * Start reading bits from EEPROM.
409 	 */
410 	for (i = 0x8000; i; i >>= 1) {
411 		EE_SET(RL_EE_CLK);
412 		DELAY(100);
413 		if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
414 			word |= i;
415 		EE_CLR(RL_EE_CLK);
416 		DELAY(100);
417 	}
418 
419 	*dest = word;
420 }
421 
422 /*
423  * Read a sequence of words from the EEPROM.
424  */
425 static void
re_read_eeprom(struct rl_softc * sc,caddr_t dest,int off,int cnt)426 re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt)
427 {
428 	int			i;
429 	u_int16_t		word = 0, *ptr;
430 
431 	CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
432 
433         DELAY(100);
434 
435 	for (i = 0; i < cnt; i++) {
436 		CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL);
437 		re_eeprom_getword(sc, off + i, &word);
438 		CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL);
439 		ptr = (u_int16_t *)(dest + (i * 2));
440                 *ptr = word;
441 	}
442 
443 	CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM);
444 }
445 
446 static int
re_gmii_readreg(device_t dev,int phy,int reg)447 re_gmii_readreg(device_t dev, int phy, int reg)
448 {
449 	struct rl_softc		*sc;
450 	u_int32_t		rval;
451 	int			i;
452 
453 	sc = device_get_softc(dev);
454 
455 	/* Let the rgephy driver read the GMEDIASTAT register */
456 
457 	if (reg == RL_GMEDIASTAT) {
458 		rval = CSR_READ_1(sc, RL_GMEDIASTAT);
459 		return (rval);
460 	}
461 
462 	CSR_WRITE_4(sc, RL_PHYAR, reg << 16);
463 
464 	for (i = 0; i < RL_PHY_TIMEOUT; i++) {
465 		rval = CSR_READ_4(sc, RL_PHYAR);
466 		if (rval & RL_PHYAR_BUSY)
467 			break;
468 		DELAY(25);
469 	}
470 
471 	if (i == RL_PHY_TIMEOUT) {
472 		device_printf(sc->rl_dev, "PHY read failed\n");
473 		return (0);
474 	}
475 
476 	/*
477 	 * Controller requires a 20us delay to process next MDIO request.
478 	 */
479 	DELAY(20);
480 
481 	return (rval & RL_PHYAR_PHYDATA);
482 }
483 
484 static int
re_gmii_writereg(device_t dev,int phy,int reg,int data)485 re_gmii_writereg(device_t dev, int phy, int reg, int data)
486 {
487 	struct rl_softc		*sc;
488 	u_int32_t		rval;
489 	int			i;
490 
491 	sc = device_get_softc(dev);
492 
493 	CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) |
494 	    (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY);
495 
496 	for (i = 0; i < RL_PHY_TIMEOUT; i++) {
497 		rval = CSR_READ_4(sc, RL_PHYAR);
498 		if (!(rval & RL_PHYAR_BUSY))
499 			break;
500 		DELAY(25);
501 	}
502 
503 	if (i == RL_PHY_TIMEOUT) {
504 		device_printf(sc->rl_dev, "PHY write failed\n");
505 		return (0);
506 	}
507 
508 	/*
509 	 * Controller requires a 20us delay to process next MDIO request.
510 	 */
511 	DELAY(20);
512 
513 	return (0);
514 }
515 
516 static int
re_miibus_readreg(device_t dev,int phy,int reg)517 re_miibus_readreg(device_t dev, int phy, int reg)
518 {
519 	struct rl_softc		*sc;
520 	u_int16_t		rval = 0;
521 	u_int16_t		re8139_reg = 0;
522 
523 	sc = device_get_softc(dev);
524 
525 	if (sc->rl_type == RL_8169) {
526 		rval = re_gmii_readreg(dev, phy, reg);
527 		return (rval);
528 	}
529 
530 	switch (reg) {
531 	case MII_BMCR:
532 		re8139_reg = RL_BMCR;
533 		break;
534 	case MII_BMSR:
535 		re8139_reg = RL_BMSR;
536 		break;
537 	case MII_ANAR:
538 		re8139_reg = RL_ANAR;
539 		break;
540 	case MII_ANER:
541 		re8139_reg = RL_ANER;
542 		break;
543 	case MII_ANLPAR:
544 		re8139_reg = RL_LPAR;
545 		break;
546 	case MII_PHYIDR1:
547 	case MII_PHYIDR2:
548 		return (0);
549 	/*
550 	 * Allow the rlphy driver to read the media status
551 	 * register. If we have a link partner which does not
552 	 * support NWAY, this is the register which will tell
553 	 * us the results of parallel detection.
554 	 */
555 	case RL_MEDIASTAT:
556 		rval = CSR_READ_1(sc, RL_MEDIASTAT);
557 		return (rval);
558 	default:
559 		device_printf(sc->rl_dev, "bad phy register\n");
560 		return (0);
561 	}
562 	rval = CSR_READ_2(sc, re8139_reg);
563 	if (sc->rl_type == RL_8139CPLUS && re8139_reg == RL_BMCR) {
564 		/* 8139C+ has different bit layout. */
565 		rval &= ~(BMCR_LOOP | BMCR_ISO);
566 	}
567 	return (rval);
568 }
569 
570 static int
re_miibus_writereg(device_t dev,int phy,int reg,int data)571 re_miibus_writereg(device_t dev, int phy, int reg, int data)
572 {
573 	struct rl_softc		*sc;
574 	u_int16_t		re8139_reg = 0;
575 	int			rval = 0;
576 
577 	sc = device_get_softc(dev);
578 
579 	if (sc->rl_type == RL_8169) {
580 		rval = re_gmii_writereg(dev, phy, reg, data);
581 		return (rval);
582 	}
583 
584 	switch (reg) {
585 	case MII_BMCR:
586 		re8139_reg = RL_BMCR;
587 		if (sc->rl_type == RL_8139CPLUS) {
588 			/* 8139C+ has different bit layout. */
589 			data &= ~(BMCR_LOOP | BMCR_ISO);
590 		}
591 		break;
592 	case MII_BMSR:
593 		re8139_reg = RL_BMSR;
594 		break;
595 	case MII_ANAR:
596 		re8139_reg = RL_ANAR;
597 		break;
598 	case MII_ANER:
599 		re8139_reg = RL_ANER;
600 		break;
601 	case MII_ANLPAR:
602 		re8139_reg = RL_LPAR;
603 		break;
604 	case MII_PHYIDR1:
605 	case MII_PHYIDR2:
606 		return (0);
607 		break;
608 	default:
609 		device_printf(sc->rl_dev, "bad phy register\n");
610 		return (0);
611 	}
612 	CSR_WRITE_2(sc, re8139_reg, data);
613 	return (0);
614 }
615 
616 static void
re_miibus_statchg(device_t dev)617 re_miibus_statchg(device_t dev)
618 {
619 	struct rl_softc		*sc;
620 	if_t ifp;
621 	struct mii_data		*mii;
622 
623 	sc = device_get_softc(dev);
624 	mii = device_get_softc(sc->rl_miibus);
625 	ifp = sc->rl_ifp;
626 	if (mii == NULL || ifp == NULL ||
627 	    (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
628 		return;
629 
630 	sc->rl_flags &= ~RL_FLAG_LINK;
631 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
632 	    (IFM_ACTIVE | IFM_AVALID)) {
633 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
634 		case IFM_10_T:
635 		case IFM_100_TX:
636 			sc->rl_flags |= RL_FLAG_LINK;
637 			break;
638 		case IFM_1000_T:
639 			if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0)
640 				break;
641 			sc->rl_flags |= RL_FLAG_LINK;
642 			break;
643 		default:
644 			break;
645 		}
646 	}
647 	/*
648 	 * RealTek controllers do not provide any interface to the RX/TX
649 	 * MACs for resolved speed, duplex and flow-control parameters.
650 	 */
651 }
652 
653 static u_int
re_hash_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)654 re_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
655 {
656 	uint32_t h, *hashes = arg;
657 
658 	h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26;
659 	if (h < 32)
660 		hashes[0] |= (1 << h);
661 	else
662 		hashes[1] |= (1 << (h - 32));
663 
664 	return (1);
665 }
666 
667 /*
668  * Set the RX configuration and 64-bit multicast hash filter.
669  */
670 static void
re_set_rxmode(struct rl_softc * sc)671 re_set_rxmode(struct rl_softc *sc)
672 {
673 	if_t ifp;
674 	uint32_t		h, hashes[2] = { 0, 0 };
675 	uint32_t		rxfilt;
676 
677 	RL_LOCK_ASSERT(sc);
678 
679 	ifp = sc->rl_ifp;
680 
681 	rxfilt = RL_RXCFG_CONFIG | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD;
682 	if ((sc->rl_flags & RL_FLAG_EARLYOFF) != 0)
683 		rxfilt |= RL_RXCFG_EARLYOFF;
684 	else if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0)
685 		rxfilt |= RL_RXCFG_EARLYOFFV2;
686 
687 	if (if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) {
688 		if (if_getflags(ifp) & IFF_PROMISC)
689 			rxfilt |= RL_RXCFG_RX_ALLPHYS;
690 		/*
691 		 * Unlike other hardwares, we have to explicitly set
692 		 * RL_RXCFG_RX_MULTI to receive multicast frames in
693 		 * promiscuous mode.
694 		 */
695 		rxfilt |= RL_RXCFG_RX_MULTI;
696 		hashes[0] = hashes[1] = 0xffffffff;
697 		goto done;
698 	}
699 
700 	if_foreach_llmaddr(ifp, re_hash_maddr, hashes);
701 
702 	if (hashes[0] != 0 || hashes[1] != 0) {
703 		/*
704 		 * For some unfathomable reason, RealTek decided to
705 		 * reverse the order of the multicast hash registers
706 		 * in the PCI Express parts.  This means we have to
707 		 * write the hash pattern in reverse order for those
708 		 * devices.
709 		 */
710 		if ((sc->rl_flags & RL_FLAG_PCIE) != 0) {
711 			h = bswap32(hashes[0]);
712 			hashes[0] = bswap32(hashes[1]);
713 			hashes[1] = h;
714 		}
715 		rxfilt |= RL_RXCFG_RX_MULTI;
716 	}
717 
718 	if  (sc->rl_hwrev->rl_rev == RL_HWREV_8168F) {
719 		/* Disable multicast filtering due to silicon bug. */
720 		hashes[0] = 0xffffffff;
721 		hashes[1] = 0xffffffff;
722 	}
723 
724 done:
725 	CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
726 	CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
727 	CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
728 }
729 
730 static void
re_reset(struct rl_softc * sc)731 re_reset(struct rl_softc *sc)
732 {
733 	int			i;
734 
735 	RL_LOCK_ASSERT(sc);
736 
737 	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
738 
739 	for (i = 0; i < RL_TIMEOUT; i++) {
740 		DELAY(10);
741 		if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
742 			break;
743 	}
744 	if (i == RL_TIMEOUT)
745 		device_printf(sc->rl_dev, "reset never completed!\n");
746 
747 	if ((sc->rl_flags & RL_FLAG_MACRESET) != 0)
748 		CSR_WRITE_1(sc, 0x82, 1);
749 	if (sc->rl_hwrev->rl_rev == RL_HWREV_8169S)
750 		re_gmii_writereg(sc->rl_dev, 1, 0x0b, 0);
751 }
752 
753 #ifdef RE_DIAG
754 
755 /*
756  * The following routine is designed to test for a defect on some
757  * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64#
758  * lines connected to the bus, however for a 32-bit only card, they
759  * should be pulled high. The result of this defect is that the
760  * NIC will not work right if you plug it into a 64-bit slot: DMA
761  * operations will be done with 64-bit transfers, which will fail
762  * because the 64-bit data lines aren't connected.
763  *
764  * There's no way to work around this (short of talking a soldering
765  * iron to the board), however we can detect it. The method we use
766  * here is to put the NIC into digital loopback mode, set the receiver
767  * to promiscuous mode, and then try to send a frame. We then compare
768  * the frame data we sent to what was received. If the data matches,
769  * then the NIC is working correctly, otherwise we know the user has
770  * a defective NIC which has been mistakenly plugged into a 64-bit PCI
771  * slot. In the latter case, there's no way the NIC can work correctly,
772  * so we print out a message on the console and abort the device attach.
773  */
774 
775 static int
re_diag(struct rl_softc * sc)776 re_diag(struct rl_softc *sc)
777 {
778 	if_t ifp = sc->rl_ifp;
779 	struct mbuf		*m0;
780 	struct ether_header	*eh;
781 	struct rl_desc		*cur_rx;
782 	u_int16_t		status;
783 	u_int32_t		rxstat;
784 	int			total_len, i, error = 0, phyaddr;
785 	u_int8_t		dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' };
786 	u_int8_t		src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' };
787 
788 	/* Allocate a single mbuf */
789 	MGETHDR(m0, M_NOWAIT, MT_DATA);
790 	if (m0 == NULL)
791 		return (ENOBUFS);
792 
793 	RL_LOCK(sc);
794 
795 	/*
796 	 * Initialize the NIC in test mode. This sets the chip up
797 	 * so that it can send and receive frames, but performs the
798 	 * following special functions:
799 	 * - Puts receiver in promiscuous mode
800 	 * - Enables digital loopback mode
801 	 * - Leaves interrupts turned off
802 	 */
803 
804 	if_setflagbit(ifp, IFF_PROMISC, 0);
805 	sc->rl_testmode = 1;
806 	if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
807 	re_init_locked(sc);
808 	sc->rl_flags |= RL_FLAG_LINK;
809 	if (sc->rl_type == RL_8169)
810 		phyaddr = 1;
811 	else
812 		phyaddr = 0;
813 
814 	re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_RESET);
815 	for (i = 0; i < RL_TIMEOUT; i++) {
816 		status = re_miibus_readreg(sc->rl_dev, phyaddr, MII_BMCR);
817 		if (!(status & BMCR_RESET))
818 			break;
819 	}
820 
821 	re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_LOOP);
822 	CSR_WRITE_2(sc, RL_ISR, RL_INTRS);
823 
824 	DELAY(100000);
825 
826 	/* Put some data in the mbuf */
827 
828 	eh = mtod(m0, struct ether_header *);
829 	bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN);
830 	bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN);
831 	eh->ether_type = htons(ETHERTYPE_IP);
832 	m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN;
833 
834 	/*
835 	 * Queue the packet, start transmission.
836 	 * Note: IF_HANDOFF() ultimately calls re_start() for us.
837 	 */
838 
839 	CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
840 	RL_UNLOCK(sc);
841 	/* XXX: re_diag must not be called when in ALTQ mode */
842 	if_handoff(ifp, m0, ifp);
843 	RL_LOCK(sc);
844 	m0 = NULL;
845 
846 	/* Wait for it to propagate through the chip */
847 
848 	DELAY(100000);
849 	for (i = 0; i < RL_TIMEOUT; i++) {
850 		status = CSR_READ_2(sc, RL_ISR);
851 		CSR_WRITE_2(sc, RL_ISR, status);
852 		if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) ==
853 		    (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK))
854 			break;
855 		DELAY(10);
856 	}
857 
858 	if (i == RL_TIMEOUT) {
859 		device_printf(sc->rl_dev,
860 		    "diagnostic failed, failed to receive packet in"
861 		    " loopback mode\n");
862 		error = EIO;
863 		goto done;
864 	}
865 
866 	/*
867 	 * The packet should have been dumped into the first
868 	 * entry in the RX DMA ring. Grab it from there.
869 	 */
870 
871 	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
872 	    sc->rl_ldata.rl_rx_list_map,
873 	    BUS_DMASYNC_POSTREAD);
874 	bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
875 	    sc->rl_ldata.rl_rx_desc[0].rx_dmamap,
876 	    BUS_DMASYNC_POSTREAD);
877 	bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag,
878 	    sc->rl_ldata.rl_rx_desc[0].rx_dmamap);
879 
880 	m0 = sc->rl_ldata.rl_rx_desc[0].rx_m;
881 	sc->rl_ldata.rl_rx_desc[0].rx_m = NULL;
882 	eh = mtod(m0, struct ether_header *);
883 
884 	cur_rx = &sc->rl_ldata.rl_rx_list[0];
885 	total_len = RL_RXBYTES(cur_rx);
886 	rxstat = le32toh(cur_rx->rl_cmdstat);
887 
888 	if (total_len != ETHER_MIN_LEN) {
889 		device_printf(sc->rl_dev,
890 		    "diagnostic failed, received short packet\n");
891 		error = EIO;
892 		goto done;
893 	}
894 
895 	/* Test that the received packet data matches what we sent. */
896 
897 	if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) ||
898 	    bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) ||
899 	    ntohs(eh->ether_type) != ETHERTYPE_IP) {
900 		device_printf(sc->rl_dev, "WARNING, DMA FAILURE!\n");
901 		device_printf(sc->rl_dev, "expected TX data: %6D/%6D/0x%x\n",
902 		    dst, ":", src, ":", ETHERTYPE_IP);
903 		device_printf(sc->rl_dev, "received RX data: %6D/%6D/0x%x\n",
904 		    eh->ether_dhost, ":", eh->ether_shost, ":",
905 		    ntohs(eh->ether_type));
906 		device_printf(sc->rl_dev, "You may have a defective 32-bit "
907 		    "NIC plugged into a 64-bit PCI slot.\n");
908 		device_printf(sc->rl_dev, "Please re-install the NIC in a "
909 		    "32-bit slot for proper operation.\n");
910 		device_printf(sc->rl_dev, "Read the re(4) man page for more "
911 		    "details.\n");
912 		error = EIO;
913 	}
914 
915 done:
916 	/* Turn interface off, release resources */
917 
918 	sc->rl_testmode = 0;
919 	sc->rl_flags &= ~RL_FLAG_LINK;
920 	if_setflagbit(ifp, 0, IFF_PROMISC);
921 	re_stop(sc);
922 	if (m0 != NULL)
923 		m_freem(m0);
924 
925 	RL_UNLOCK(sc);
926 
927 	return (error);
928 }
929 
930 #endif
931 
932 /*
933  * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device
934  * IDs against our list and return a device name if we find a match.
935  */
936 static int
re_probe(device_t dev)937 re_probe(device_t dev)
938 {
939 	const struct rl_type	*t;
940 	uint16_t		devid, vendor;
941 	uint16_t		revid, sdevid;
942 	int			i;
943 
944 	vendor = pci_get_vendor(dev);
945 	devid = pci_get_device(dev);
946 	revid = pci_get_revid(dev);
947 	sdevid = pci_get_subdevice(dev);
948 
949 	if (vendor == LINKSYS_VENDORID && devid == LINKSYS_DEVICEID_EG1032) {
950 		if (sdevid != LINKSYS_SUBDEVICE_EG1032_REV3) {
951 			/*
952 			 * Only attach to rev. 3 of the Linksys EG1032 adapter.
953 			 * Rev. 2 is supported by sk(4).
954 			 */
955 			return (ENXIO);
956 		}
957 	}
958 
959 	if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) {
960 		if (revid != 0x20) {
961 			/* 8139, let rl(4) take care of this device. */
962 			return (ENXIO);
963 		}
964 	}
965 
966 	t = re_devs;
967 	for (i = 0; i < nitems(re_devs); i++, t++) {
968 		if (vendor == t->rl_vid && devid == t->rl_did) {
969 			device_set_desc(dev, t->rl_name);
970 			return (BUS_PROBE_DEFAULT);
971 		}
972 	}
973 
974 	return (ENXIO);
975 }
976 
977 /*
978  * Map a single buffer address.
979  */
980 
981 static void
re_dma_map_addr(void * arg,bus_dma_segment_t * segs,int nseg,int error)982 re_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
983 {
984 	bus_addr_t		*addr;
985 
986 	if (error)
987 		return;
988 
989 	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
990 	addr = arg;
991 	*addr = segs->ds_addr;
992 }
993 
994 static int
re_allocmem(device_t dev,struct rl_softc * sc)995 re_allocmem(device_t dev, struct rl_softc *sc)
996 {
997 	bus_addr_t		lowaddr;
998 	bus_size_t		rx_list_size, tx_list_size;
999 	int			error;
1000 	int			i;
1001 
1002 	rx_list_size = sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc);
1003 	tx_list_size = sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc);
1004 
1005 	/*
1006 	 * Allocate the parent bus DMA tag appropriate for PCI.
1007 	 * In order to use DAC, RL_CPLUSCMD_PCI_DAC bit of RL_CPLUS_CMD
1008 	 * register should be set. However some RealTek chips are known
1009 	 * to be buggy on DAC handling, therefore disable DAC by limiting
1010 	 * DMA address space to 32bit. PCIe variants of RealTek chips
1011 	 * may not have the limitation.
1012 	 */
1013 	lowaddr = BUS_SPACE_MAXADDR;
1014 	if ((sc->rl_flags & RL_FLAG_PCIE) == 0)
1015 		lowaddr = BUS_SPACE_MAXADDR_32BIT;
1016 	error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
1017 	    lowaddr, BUS_SPACE_MAXADDR, NULL, NULL,
1018 	    BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
1019 	    NULL, NULL, &sc->rl_parent_tag);
1020 	if (error) {
1021 		device_printf(dev, "could not allocate parent DMA tag\n");
1022 		return (error);
1023 	}
1024 
1025 	/*
1026 	 * Allocate map for TX mbufs.
1027 	 */
1028 	error = bus_dma_tag_create(sc->rl_parent_tag, 1, 0,
1029 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
1030 	    NULL, MCLBYTES * RL_NTXSEGS, RL_NTXSEGS, 4096, 0,
1031 	    NULL, NULL, &sc->rl_ldata.rl_tx_mtag);
1032 	if (error) {
1033 		device_printf(dev, "could not allocate TX DMA tag\n");
1034 		return (error);
1035 	}
1036 
1037 	/*
1038 	 * Allocate map for RX mbufs.
1039 	 */
1040 
1041 	if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
1042 		error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t),
1043 		    0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1044 		    MJUM9BYTES, 1, MJUM9BYTES, 0, NULL, NULL,
1045 		    &sc->rl_ldata.rl_jrx_mtag);
1046 		if (error) {
1047 			device_printf(dev,
1048 			    "could not allocate jumbo RX DMA tag\n");
1049 			return (error);
1050 		}
1051 	}
1052 	error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 0,
1053 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1054 	    MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rl_ldata.rl_rx_mtag);
1055 	if (error) {
1056 		device_printf(dev, "could not allocate RX DMA tag\n");
1057 		return (error);
1058 	}
1059 
1060 	/*
1061 	 * Allocate map for TX descriptor list.
1062 	 */
1063 	error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
1064 	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1065 	    NULL, tx_list_size, 1, tx_list_size, 0,
1066 	    NULL, NULL, &sc->rl_ldata.rl_tx_list_tag);
1067 	if (error) {
1068 		device_printf(dev, "could not allocate TX DMA ring tag\n");
1069 		return (error);
1070 	}
1071 
1072 	/* Allocate DMA'able memory for the TX ring */
1073 
1074 	error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag,
1075 	    (void **)&sc->rl_ldata.rl_tx_list,
1076 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1077 	    &sc->rl_ldata.rl_tx_list_map);
1078 	if (error) {
1079 		device_printf(dev, "could not allocate TX DMA ring\n");
1080 		return (error);
1081 	}
1082 
1083 	/* Load the map for the TX ring. */
1084 
1085 	sc->rl_ldata.rl_tx_list_addr = 0;
1086 	error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag,
1087 	     sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list,
1088 	     tx_list_size, re_dma_map_addr,
1089 	     &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT);
1090 	if (error != 0 || sc->rl_ldata.rl_tx_list_addr == 0) {
1091 		device_printf(dev, "could not load TX DMA ring\n");
1092 		return (ENOMEM);
1093 	}
1094 
1095 	/* Create DMA maps for TX buffers */
1096 
1097 	for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
1098 		error = bus_dmamap_create(sc->rl_ldata.rl_tx_mtag, 0,
1099 		    &sc->rl_ldata.rl_tx_desc[i].tx_dmamap);
1100 		if (error) {
1101 			device_printf(dev, "could not create DMA map for TX\n");
1102 			return (error);
1103 		}
1104 	}
1105 
1106 	/*
1107 	 * Allocate map for RX descriptor list.
1108 	 */
1109 	error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN,
1110 	    0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
1111 	    NULL, rx_list_size, 1, rx_list_size, 0,
1112 	    NULL, NULL, &sc->rl_ldata.rl_rx_list_tag);
1113 	if (error) {
1114 		device_printf(dev, "could not create RX DMA ring tag\n");
1115 		return (error);
1116 	}
1117 
1118 	/* Allocate DMA'able memory for the RX ring */
1119 
1120 	error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag,
1121 	    (void **)&sc->rl_ldata.rl_rx_list,
1122 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1123 	    &sc->rl_ldata.rl_rx_list_map);
1124 	if (error) {
1125 		device_printf(dev, "could not allocate RX DMA ring\n");
1126 		return (error);
1127 	}
1128 
1129 	/* Load the map for the RX ring. */
1130 
1131 	sc->rl_ldata.rl_rx_list_addr = 0;
1132 	error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag,
1133 	     sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list,
1134 	     rx_list_size, re_dma_map_addr,
1135 	     &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT);
1136 	if (error != 0 || sc->rl_ldata.rl_rx_list_addr == 0) {
1137 		device_printf(dev, "could not load RX DMA ring\n");
1138 		return (ENOMEM);
1139 	}
1140 
1141 	/* Create DMA maps for RX buffers */
1142 
1143 	if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
1144 		error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0,
1145 		    &sc->rl_ldata.rl_jrx_sparemap);
1146 		if (error) {
1147 			device_printf(dev,
1148 			    "could not create spare DMA map for jumbo RX\n");
1149 			return (error);
1150 		}
1151 		for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1152 			error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0,
1153 			    &sc->rl_ldata.rl_jrx_desc[i].rx_dmamap);
1154 			if (error) {
1155 				device_printf(dev,
1156 				    "could not create DMA map for jumbo RX\n");
1157 				return (error);
1158 			}
1159 		}
1160 	}
1161 	error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0,
1162 	    &sc->rl_ldata.rl_rx_sparemap);
1163 	if (error) {
1164 		device_printf(dev, "could not create spare DMA map for RX\n");
1165 		return (error);
1166 	}
1167 	for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1168 		error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0,
1169 		    &sc->rl_ldata.rl_rx_desc[i].rx_dmamap);
1170 		if (error) {
1171 			device_printf(dev, "could not create DMA map for RX\n");
1172 			return (error);
1173 		}
1174 	}
1175 
1176 	/* Create DMA map for statistics. */
1177 	error = bus_dma_tag_create(sc->rl_parent_tag, RL_DUMP_ALIGN, 0,
1178 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1179 	    sizeof(struct rl_stats), 1, sizeof(struct rl_stats), 0, NULL, NULL,
1180 	    &sc->rl_ldata.rl_stag);
1181 	if (error) {
1182 		device_printf(dev, "could not create statistics DMA tag\n");
1183 		return (error);
1184 	}
1185 	/* Allocate DMA'able memory for statistics. */
1186 	error = bus_dmamem_alloc(sc->rl_ldata.rl_stag,
1187 	    (void **)&sc->rl_ldata.rl_stats,
1188 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1189 	    &sc->rl_ldata.rl_smap);
1190 	if (error) {
1191 		device_printf(dev,
1192 		    "could not allocate statistics DMA memory\n");
1193 		return (error);
1194 	}
1195 	/* Load the map for statistics. */
1196 	sc->rl_ldata.rl_stats_addr = 0;
1197 	error = bus_dmamap_load(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap,
1198 	    sc->rl_ldata.rl_stats, sizeof(struct rl_stats), re_dma_map_addr,
1199 	     &sc->rl_ldata.rl_stats_addr, BUS_DMA_NOWAIT);
1200 	if (error != 0 || sc->rl_ldata.rl_stats_addr == 0) {
1201 		device_printf(dev, "could not load statistics DMA memory\n");
1202 		return (ENOMEM);
1203 	}
1204 
1205 	return (0);
1206 }
1207 
1208 /*
1209  * Attach the interface. Allocate softc structures, do ifmedia
1210  * setup and ethernet/BPF attach.
1211  */
1212 static int
re_attach(device_t dev)1213 re_attach(device_t dev)
1214 {
1215 	u_char			eaddr[ETHER_ADDR_LEN];
1216 	u_int16_t		as[ETHER_ADDR_LEN / 2];
1217 	struct rl_softc		*sc;
1218 	if_t ifp;
1219 	const struct rl_hwrev	*hw_rev;
1220 	int			capmask, error = 0, hwrev, i, msic, msixc,
1221 				phy, reg, rid;
1222 	u_int32_t		cap, ctl;
1223 	u_int16_t		devid, re_did = 0;
1224 	uint8_t			cfg;
1225 
1226 	sc = device_get_softc(dev);
1227 	sc->rl_dev = dev;
1228 
1229 	mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1230 	    MTX_DEF);
1231 	callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0);
1232 
1233 	/*
1234 	 * Map control/status registers.
1235 	 */
1236 	pci_enable_busmaster(dev);
1237 
1238 	devid = pci_get_device(dev);
1239 	/*
1240 	 * Prefer memory space register mapping over IO space.
1241 	 * Because RTL8169SC does not seem to work when memory mapping
1242 	 * is used always activate io mapping.
1243 	 */
1244 	if (devid == RT_DEVICEID_8169SC)
1245 		prefer_iomap = 1;
1246 	if (prefer_iomap == 0) {
1247 		sc->rl_res_id = PCIR_BAR(1);
1248 		sc->rl_res_type = SYS_RES_MEMORY;
1249 		/* RTL8168/8101E seems to use different BARs. */
1250 		if (devid == RT_DEVICEID_8168 || devid == RT_DEVICEID_8101E)
1251 			sc->rl_res_id = PCIR_BAR(2);
1252 	} else {
1253 		sc->rl_res_id = PCIR_BAR(0);
1254 		sc->rl_res_type = SYS_RES_IOPORT;
1255 	}
1256 	sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
1257 	    &sc->rl_res_id, RF_ACTIVE);
1258 	if (sc->rl_res == NULL && prefer_iomap == 0) {
1259 		sc->rl_res_id = PCIR_BAR(0);
1260 		sc->rl_res_type = SYS_RES_IOPORT;
1261 		sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
1262 		    &sc->rl_res_id, RF_ACTIVE);
1263 	}
1264 	if (sc->rl_res == NULL) {
1265 		device_printf(dev, "couldn't map ports/memory\n");
1266 		error = ENXIO;
1267 		goto fail;
1268 	}
1269 
1270 	sc->rl_btag = rman_get_bustag(sc->rl_res);
1271 	sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
1272 
1273 	msic = pci_msi_count(dev);
1274 	msixc = pci_msix_count(dev);
1275 	if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
1276 		sc->rl_flags |= RL_FLAG_PCIE;
1277 		sc->rl_expcap = reg;
1278 	}
1279 	if (bootverbose) {
1280 		device_printf(dev, "MSI count : %d\n", msic);
1281 		device_printf(dev, "MSI-X count : %d\n", msixc);
1282 	}
1283 	if (msix_disable > 0)
1284 		msixc = 0;
1285 	if (msi_disable > 0)
1286 		msic = 0;
1287 	/* Prefer MSI-X to MSI. */
1288 	if (msixc > 0) {
1289 		msixc = RL_MSI_MESSAGES;
1290 		rid = PCIR_BAR(4);
1291 		sc->rl_res_pba = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1292 		    &rid, RF_ACTIVE);
1293 		if (sc->rl_res_pba == NULL) {
1294 			device_printf(sc->rl_dev,
1295 			    "could not allocate MSI-X PBA resource\n");
1296 		}
1297 		if (sc->rl_res_pba != NULL &&
1298 		    pci_alloc_msix(dev, &msixc) == 0) {
1299 			if (msixc == RL_MSI_MESSAGES) {
1300 				device_printf(dev, "Using %d MSI-X message\n",
1301 				    msixc);
1302 				sc->rl_flags |= RL_FLAG_MSIX;
1303 			} else
1304 				pci_release_msi(dev);
1305 		}
1306 		if ((sc->rl_flags & RL_FLAG_MSIX) == 0) {
1307 			if (sc->rl_res_pba != NULL)
1308 				bus_release_resource(dev, SYS_RES_MEMORY, rid,
1309 				    sc->rl_res_pba);
1310 			sc->rl_res_pba = NULL;
1311 			msixc = 0;
1312 		}
1313 	}
1314 	/* Prefer MSI to INTx. */
1315 	if (msixc == 0 && msic > 0) {
1316 		msic = RL_MSI_MESSAGES;
1317 		if (pci_alloc_msi(dev, &msic) == 0) {
1318 			if (msic == RL_MSI_MESSAGES) {
1319 				device_printf(dev, "Using %d MSI message\n",
1320 				    msic);
1321 				sc->rl_flags |= RL_FLAG_MSI;
1322 				/* Explicitly set MSI enable bit. */
1323 				CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
1324 				cfg = CSR_READ_1(sc, RL_CFG2);
1325 				cfg |= RL_CFG2_MSI;
1326 				CSR_WRITE_1(sc, RL_CFG2, cfg);
1327 				CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1328 			} else
1329 				pci_release_msi(dev);
1330 		}
1331 		if ((sc->rl_flags & RL_FLAG_MSI) == 0)
1332 			msic = 0;
1333 	}
1334 
1335 	/* Allocate interrupt */
1336 	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) {
1337 		rid = 0;
1338 		sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1339 		    RF_SHAREABLE | RF_ACTIVE);
1340 		if (sc->rl_irq[0] == NULL) {
1341 			device_printf(dev, "couldn't allocate IRQ resources\n");
1342 			error = ENXIO;
1343 			goto fail;
1344 		}
1345 	} else {
1346 		for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) {
1347 			sc->rl_irq[i] = bus_alloc_resource_any(dev,
1348 			    SYS_RES_IRQ, &rid, RF_ACTIVE);
1349 			if (sc->rl_irq[i] == NULL) {
1350 				device_printf(dev,
1351 				    "couldn't allocate IRQ resources for "
1352 				    "message %d\n", rid);
1353 				error = ENXIO;
1354 				goto fail;
1355 			}
1356 		}
1357 	}
1358 
1359 	if ((sc->rl_flags & RL_FLAG_MSI) == 0) {
1360 		CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
1361 		cfg = CSR_READ_1(sc, RL_CFG2);
1362 		if ((cfg & RL_CFG2_MSI) != 0) {
1363 			device_printf(dev, "turning off MSI enable bit.\n");
1364 			cfg &= ~RL_CFG2_MSI;
1365 			CSR_WRITE_1(sc, RL_CFG2, cfg);
1366 		}
1367 		CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1368 	}
1369 
1370 	/* Disable ASPM L0S/L1 and CLKREQ. */
1371 	if (sc->rl_expcap != 0) {
1372 		cap = pci_read_config(dev, sc->rl_expcap +
1373 		    PCIER_LINK_CAP, 2);
1374 		if ((cap & PCIEM_LINK_CAP_ASPM) != 0) {
1375 			ctl = pci_read_config(dev, sc->rl_expcap +
1376 			    PCIER_LINK_CTL, 2);
1377 			if ((ctl & (PCIEM_LINK_CTL_ECPM |
1378 			    PCIEM_LINK_CTL_ASPMC))!= 0) {
1379 				ctl &= ~(PCIEM_LINK_CTL_ECPM |
1380 				    PCIEM_LINK_CTL_ASPMC);
1381 				pci_write_config(dev, sc->rl_expcap +
1382 				    PCIER_LINK_CTL, ctl, 2);
1383 				device_printf(dev, "ASPM disabled\n");
1384 			}
1385 		} else
1386 			device_printf(dev, "no ASPM capability\n");
1387 	}
1388 
1389 	hw_rev = re_hwrevs;
1390 	hwrev = CSR_READ_4(sc, RL_TXCFG);
1391 	switch (hwrev & 0x70000000) {
1392 	case 0x00000000:
1393 	case 0x10000000:
1394 		device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0xfc800000);
1395 		hwrev &= (RL_TXCFG_HWREV | 0x80000000);
1396 		break;
1397 	default:
1398 		device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0x7c800000);
1399 		sc->rl_macrev = hwrev & 0x00700000;
1400 		hwrev &= RL_TXCFG_HWREV;
1401 		break;
1402 	}
1403 	device_printf(dev, "MAC rev. 0x%08x\n", sc->rl_macrev);
1404 	while (hw_rev->rl_desc != NULL) {
1405 		if (hw_rev->rl_rev == hwrev) {
1406 			sc->rl_type = hw_rev->rl_type;
1407 			sc->rl_hwrev = hw_rev;
1408 			break;
1409 		}
1410 		hw_rev++;
1411 	}
1412 	if (hw_rev->rl_desc == NULL) {
1413 		device_printf(dev, "Unknown H/W revision: 0x%08x\n", hwrev);
1414 		error = ENXIO;
1415 		goto fail;
1416 	}
1417 
1418 	switch (hw_rev->rl_rev) {
1419 	case RL_HWREV_8139CPLUS:
1420 		sc->rl_flags |= RL_FLAG_FASTETHER | RL_FLAG_AUTOPAD;
1421 		break;
1422 	case RL_HWREV_8100E:
1423 	case RL_HWREV_8101E:
1424 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_FASTETHER;
1425 		break;
1426 	case RL_HWREV_8102E:
1427 	case RL_HWREV_8102EL:
1428 	case RL_HWREV_8102EL_SPIN1:
1429 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 |
1430 		    RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP |
1431 		    RL_FLAG_AUTOPAD;
1432 		break;
1433 	case RL_HWREV_8103E:
1434 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 |
1435 		    RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP |
1436 		    RL_FLAG_AUTOPAD | RL_FLAG_MACSLEEP;
1437 		break;
1438 	case RL_HWREV_8401E:
1439 	case RL_HWREV_8105E:
1440 	case RL_HWREV_8105E_SPIN1:
1441 	case RL_HWREV_8106E:
1442 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1443 		    RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1444 		    RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD;
1445 		break;
1446 	case RL_HWREV_8402:
1447 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1448 		    RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1449 		    RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD |
1450 		    RL_FLAG_CMDSTOP_WAIT_TXQ;
1451 		break;
1452 	case RL_HWREV_8168B_SPIN1:
1453 	case RL_HWREV_8168B_SPIN2:
1454 		sc->rl_flags |= RL_FLAG_WOLRXENB;
1455 		/* FALLTHROUGH */
1456 	case RL_HWREV_8168B_SPIN3:
1457 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT;
1458 		break;
1459 	case RL_HWREV_8168C_SPIN2:
1460 		sc->rl_flags |= RL_FLAG_MACSLEEP;
1461 		/* FALLTHROUGH */
1462 	case RL_HWREV_8168C:
1463 		if (sc->rl_macrev == 0x00200000)
1464 			sc->rl_flags |= RL_FLAG_MACSLEEP;
1465 		/* FALLTHROUGH */
1466 	case RL_HWREV_8168CP:
1467 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1468 		    RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1469 		    RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK;
1470 		break;
1471 	case RL_HWREV_8168D:
1472 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1473 		    RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1474 		    RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
1475 		    RL_FLAG_WOL_MANLINK;
1476 		break;
1477 	case RL_HWREV_8168DP:
1478 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1479 		    RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_AUTOPAD |
1480 		    RL_FLAG_JUMBOV2 | RL_FLAG_WAIT_TXPOLL | RL_FLAG_WOL_MANLINK;
1481 		break;
1482 	case RL_HWREV_8168E:
1483 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM |
1484 		    RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT |
1485 		    RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
1486 		    RL_FLAG_WOL_MANLINK;
1487 		break;
1488 	case RL_HWREV_8168E_VL:
1489 	case RL_HWREV_8168F:
1490 		sc->rl_flags |= RL_FLAG_EARLYOFF;
1491 		/* FALLTHROUGH */
1492 	case RL_HWREV_8411:
1493 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1494 		    RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1495 		    RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
1496 		    RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_WOL_MANLINK;
1497 		break;
1498 	case RL_HWREV_8168EP:
1499 	case RL_HWREV_8168FP:
1500 	case RL_HWREV_8168G:
1501 	case RL_HWREV_8411B:
1502 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1503 		    RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1504 		    RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 |
1505 		    RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_WOL_MANLINK |
1506 		    RL_FLAG_8168G_PLUS;
1507 		break;
1508 	case RL_HWREV_8168GU:
1509 	case RL_HWREV_8168H:
1510 		if (pci_get_device(dev) == RT_DEVICEID_8101E) {
1511 			/* RTL8106E(US), RTL8107E */
1512 			sc->rl_flags |= RL_FLAG_FASTETHER;
1513 		} else
1514 			sc->rl_flags |= RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK;
1515 
1516 		sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
1517 		    RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |
1518 		    RL_FLAG_AUTOPAD | RL_FLAG_CMDSTOP_WAIT_TXQ |
1519 		    RL_FLAG_8168G_PLUS;
1520 		break;
1521 	case RL_HWREV_8169_8110SB:
1522 	case RL_HWREV_8169_8110SBL:
1523 	case RL_HWREV_8169_8110SC:
1524 	case RL_HWREV_8169_8110SCE:
1525 		sc->rl_flags |= RL_FLAG_PHYWAKE;
1526 		/* FALLTHROUGH */
1527 	case RL_HWREV_8169:
1528 	case RL_HWREV_8169S:
1529 	case RL_HWREV_8110S:
1530 		sc->rl_flags |= RL_FLAG_MACRESET;
1531 		break;
1532 	default:
1533 		break;
1534 	}
1535 
1536 	if (sc->rl_hwrev->rl_rev == RL_HWREV_8139CPLUS) {
1537 		sc->rl_cfg0 = RL_8139_CFG0;
1538 		sc->rl_cfg1 = RL_8139_CFG1;
1539 		sc->rl_cfg2 = 0;
1540 		sc->rl_cfg3 = RL_8139_CFG3;
1541 		sc->rl_cfg4 = RL_8139_CFG4;
1542 		sc->rl_cfg5 = RL_8139_CFG5;
1543 	} else {
1544 		sc->rl_cfg0 = RL_CFG0;
1545 		sc->rl_cfg1 = RL_CFG1;
1546 		sc->rl_cfg2 = RL_CFG2;
1547 		sc->rl_cfg3 = RL_CFG3;
1548 		sc->rl_cfg4 = RL_CFG4;
1549 		sc->rl_cfg5 = RL_CFG5;
1550 	}
1551 
1552 	/* Reset the adapter. */
1553 	RL_LOCK(sc);
1554 	re_reset(sc);
1555 	RL_UNLOCK(sc);
1556 
1557 	/* Enable PME. */
1558 	CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
1559 	cfg = CSR_READ_1(sc, sc->rl_cfg1);
1560 	cfg |= RL_CFG1_PME;
1561 	CSR_WRITE_1(sc, sc->rl_cfg1, cfg);
1562 	cfg = CSR_READ_1(sc, sc->rl_cfg5);
1563 	cfg &= RL_CFG5_PME_STS;
1564 	CSR_WRITE_1(sc, sc->rl_cfg5, cfg);
1565 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1566 
1567 	if ((sc->rl_flags & RL_FLAG_PAR) != 0) {
1568 		/*
1569 		 * XXX Should have a better way to extract station
1570 		 * address from EEPROM.
1571 		 */
1572 		for (i = 0; i < ETHER_ADDR_LEN; i++)
1573 			eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i);
1574 	} else {
1575 		sc->rl_eewidth = RL_9356_ADDR_LEN;
1576 		re_read_eeprom(sc, (caddr_t)&re_did, 0, 1);
1577 		if (re_did != 0x8129)
1578 			sc->rl_eewidth = RL_9346_ADDR_LEN;
1579 
1580 		/*
1581 		 * Get station address from the EEPROM.
1582 		 */
1583 		re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3);
1584 		for (i = 0; i < ETHER_ADDR_LEN / 2; i++)
1585 			as[i] = le16toh(as[i]);
1586 		bcopy(as, eaddr, ETHER_ADDR_LEN);
1587 	}
1588 
1589 	if (sc->rl_type == RL_8169) {
1590 		/* Set RX length mask and number of descriptors. */
1591 		sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN;
1592 		sc->rl_txstart = RL_GTXSTART;
1593 		sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT;
1594 		sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT;
1595 	} else {
1596 		/* Set RX length mask and number of descriptors. */
1597 		sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN;
1598 		sc->rl_txstart = RL_TXSTART;
1599 		sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT;
1600 		sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT;
1601 	}
1602 
1603 	error = re_allocmem(dev, sc);
1604 	if (error)
1605 		goto fail;
1606 	re_add_sysctls(sc);
1607 
1608 	ifp = sc->rl_ifp = if_alloc(IFT_ETHER);
1609 
1610 	/* Take controller out of deep sleep mode. */
1611 	if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
1612 		if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
1613 			CSR_WRITE_1(sc, RL_GPIO,
1614 			    CSR_READ_1(sc, RL_GPIO) | 0x01);
1615 		else
1616 			CSR_WRITE_1(sc, RL_GPIO,
1617 			    CSR_READ_1(sc, RL_GPIO) & ~0x01);
1618 	}
1619 
1620 	/* Take PHY out of power down mode. */
1621 	if ((sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0) {
1622 		CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80);
1623 		if (hw_rev->rl_rev == RL_HWREV_8401E)
1624 			CSR_WRITE_1(sc, 0xD1, CSR_READ_1(sc, 0xD1) & ~0x08);
1625 	}
1626 	if ((sc->rl_flags & RL_FLAG_PHYWAKE) != 0) {
1627 		re_gmii_writereg(dev, 1, 0x1f, 0);
1628 		re_gmii_writereg(dev, 1, 0x0e, 0);
1629 	}
1630 
1631 	if_setsoftc(ifp, sc);
1632 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1633 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
1634 	if_setioctlfn(ifp, re_ioctl);
1635 	if_setstartfn(ifp, re_start);
1636 	/*
1637 	 * RTL8168/8111C generates wrong IP checksummed frame if the
1638 	 * packet has IP options so disable TX checksum offloading.
1639 	 */
1640 	if (sc->rl_hwrev->rl_rev == RL_HWREV_8168C ||
1641 	    sc->rl_hwrev->rl_rev == RL_HWREV_8168C_SPIN2 ||
1642 	    sc->rl_hwrev->rl_rev == RL_HWREV_8168CP) {
1643 		if_sethwassist(ifp, 0);
1644 		if_setcapabilities(ifp, IFCAP_RXCSUM | IFCAP_TSO4);
1645 	} else {
1646 		if_sethwassist(ifp, CSUM_IP | CSUM_TCP | CSUM_UDP);
1647 		if_setcapabilities(ifp, IFCAP_HWCSUM | IFCAP_TSO4);
1648 	}
1649 	if_sethwassistbits(ifp, CSUM_TSO, 0);
1650 	if_setcapenable(ifp, if_getcapabilities(ifp));
1651 	if_setinitfn(ifp, re_init);
1652 	if_setsendqlen(ifp, RL_IFQ_MAXLEN);
1653 	if_setsendqready(ifp);
1654 
1655 	NET_TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc);
1656 
1657 #define	RE_PHYAD_INTERNAL	 0
1658 
1659 	/* Do MII setup. */
1660 	phy = RE_PHYAD_INTERNAL;
1661 	if (sc->rl_type == RL_8169)
1662 		phy = 1;
1663 	capmask = BMSR_DEFCAPMASK;
1664 	if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0)
1665 		 capmask &= ~BMSR_EXTSTAT;
1666 	error = mii_attach(dev, &sc->rl_miibus, ifp, re_ifmedia_upd,
1667 	    re_ifmedia_sts, capmask, phy, MII_OFFSET_ANY, MIIF_DOPAUSE);
1668 	if (error != 0) {
1669 		device_printf(dev, "attaching PHYs failed\n");
1670 		goto fail;
1671 	}
1672 
1673 	/* If address was not found, create one based on the hostid and name. */
1674 	if (ETHER_IS_ZERO(eaddr)) {
1675 		ether_gen_addr(ifp, (struct ether_addr *)eaddr);
1676 	}
1677 
1678 	/*
1679 	 * Call MI attach routine.
1680 	 */
1681 	ether_ifattach(ifp, eaddr);
1682 
1683 	/* VLAN capability setup */
1684 	if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING, 0);
1685 	if (if_getcapabilities(ifp) & IFCAP_HWCSUM)
1686 		if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM, 0);
1687 	/* Enable WOL if PM is supported. */
1688 	if (pci_find_cap(sc->rl_dev, PCIY_PMG, &reg) == 0)
1689 		if_setcapabilitiesbit(ifp, IFCAP_WOL, 0);
1690 	if_setcapenable(ifp, if_getcapabilities(ifp));
1691 	if_setcapenablebit(ifp, 0, (IFCAP_WOL_UCAST | IFCAP_WOL_MCAST));
1692 	/*
1693 	 * Don't enable TSO by default.  It is known to generate
1694 	 * corrupted TCP segments(bad TCP options) under certain
1695 	 * circumstances.
1696 	 */
1697 	if_sethwassistbits(ifp, 0, CSUM_TSO);
1698 	if_setcapenablebit(ifp, 0, (IFCAP_TSO4 | IFCAP_VLAN_HWTSO));
1699 #ifdef DEVICE_POLLING
1700 	if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
1701 #endif
1702 	/*
1703 	 * Tell the upper layer(s) we support long frames.
1704 	 * Must appear after the call to ether_ifattach() because
1705 	 * ether_ifattach() sets ifi_hdrlen to the default value.
1706 	 */
1707 	if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
1708 
1709 #ifdef DEV_NETMAP
1710 	re_netmap_attach(sc);
1711 #endif /* DEV_NETMAP */
1712 
1713 #ifdef RE_DIAG
1714 	/*
1715 	 * Perform hardware diagnostic on the original RTL8169.
1716 	 * Some 32-bit cards were incorrectly wired and would
1717 	 * malfunction if plugged into a 64-bit slot.
1718 	 */
1719 	if (hwrev == RL_HWREV_8169) {
1720 		error = re_diag(sc);
1721 		if (error) {
1722 			device_printf(dev,
1723 		    	"attach aborted due to hardware diag failure\n");
1724 			ether_ifdetach(ifp);
1725 			goto fail;
1726 		}
1727 	}
1728 #endif
1729 
1730 #ifdef RE_TX_MODERATION
1731 	intr_filter = 1;
1732 #endif
1733 	/* Hook interrupt last to avoid having to lock softc */
1734 	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 &&
1735 	    intr_filter == 0) {
1736 		error = bus_setup_intr(dev, sc->rl_irq[0],
1737 		    INTR_TYPE_NET | INTR_MPSAFE, NULL, re_intr_msi, sc,
1738 		    &sc->rl_intrhand[0]);
1739 	} else {
1740 		error = bus_setup_intr(dev, sc->rl_irq[0],
1741 		    INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc,
1742 		    &sc->rl_intrhand[0]);
1743 	}
1744 	if (error) {
1745 		device_printf(dev, "couldn't set up irq\n");
1746 		ether_ifdetach(ifp);
1747 		goto fail;
1748 	}
1749 
1750 	DEBUGNET_SET(ifp, re);
1751 
1752 fail:
1753 	if (error)
1754 		re_detach(dev);
1755 
1756 	return (error);
1757 }
1758 
1759 /*
1760  * Shutdown hardware and free up resources. This can be called any
1761  * time after the mutex has been initialized. It is called in both
1762  * the error case in attach and the normal detach case so it needs
1763  * to be careful about only freeing resources that have actually been
1764  * allocated.
1765  */
1766 static int
re_detach(device_t dev)1767 re_detach(device_t dev)
1768 {
1769 	struct rl_softc		*sc;
1770 	if_t ifp;
1771 	int			i, rid;
1772 
1773 	sc = device_get_softc(dev);
1774 	ifp = sc->rl_ifp;
1775 	KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized"));
1776 
1777 	/* These should only be active if attach succeeded */
1778 	if (device_is_attached(dev)) {
1779 #ifdef DEVICE_POLLING
1780 		if (if_getcapenable(ifp) & IFCAP_POLLING)
1781 			ether_poll_deregister(ifp);
1782 #endif
1783 		RL_LOCK(sc);
1784 #if 0
1785 		sc->suspended = 1;
1786 #endif
1787 		re_stop(sc);
1788 		RL_UNLOCK(sc);
1789 		callout_drain(&sc->rl_stat_callout);
1790 		taskqueue_drain(taskqueue_fast, &sc->rl_inttask);
1791 		/*
1792 		 * Force off the IFF_UP flag here, in case someone
1793 		 * still had a BPF descriptor attached to this
1794 		 * interface. If they do, ether_ifdetach() will cause
1795 		 * the BPF code to try and clear the promisc mode
1796 		 * flag, which will bubble down to re_ioctl(),
1797 		 * which will try to call re_init() again. This will
1798 		 * turn the NIC back on and restart the MII ticker,
1799 		 * which will panic the system when the kernel tries
1800 		 * to invoke the re_tick() function that isn't there
1801 		 * anymore.
1802 		 */
1803 		if_setflagbits(ifp, 0, IFF_UP);
1804 		ether_ifdetach(ifp);
1805 	}
1806 	if (sc->rl_miibus)
1807 		device_delete_child(dev, sc->rl_miibus);
1808 	bus_generic_detach(dev);
1809 
1810 	/*
1811 	 * The rest is resource deallocation, so we should already be
1812 	 * stopped here.
1813 	 */
1814 
1815 	if (sc->rl_intrhand[0] != NULL) {
1816 		bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]);
1817 		sc->rl_intrhand[0] = NULL;
1818 	}
1819 	if (ifp != NULL) {
1820 #ifdef DEV_NETMAP
1821 		netmap_detach(ifp);
1822 #endif /* DEV_NETMAP */
1823 		if_free(ifp);
1824 	}
1825 	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0)
1826 		rid = 0;
1827 	else
1828 		rid = 1;
1829 	if (sc->rl_irq[0] != NULL) {
1830 		bus_release_resource(dev, SYS_RES_IRQ, rid, sc->rl_irq[0]);
1831 		sc->rl_irq[0] = NULL;
1832 	}
1833 	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0)
1834 		pci_release_msi(dev);
1835 	if (sc->rl_res_pba) {
1836 		rid = PCIR_BAR(4);
1837 		bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->rl_res_pba);
1838 	}
1839 	if (sc->rl_res)
1840 		bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id,
1841 		    sc->rl_res);
1842 
1843 	/* Unload and free the RX DMA ring memory and map */
1844 
1845 	if (sc->rl_ldata.rl_rx_list_tag) {
1846 		if (sc->rl_ldata.rl_rx_list_addr)
1847 			bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag,
1848 			    sc->rl_ldata.rl_rx_list_map);
1849 		if (sc->rl_ldata.rl_rx_list)
1850 			bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag,
1851 			    sc->rl_ldata.rl_rx_list,
1852 			    sc->rl_ldata.rl_rx_list_map);
1853 		bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag);
1854 	}
1855 
1856 	/* Unload and free the TX DMA ring memory and map */
1857 
1858 	if (sc->rl_ldata.rl_tx_list_tag) {
1859 		if (sc->rl_ldata.rl_tx_list_addr)
1860 			bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag,
1861 			    sc->rl_ldata.rl_tx_list_map);
1862 		if (sc->rl_ldata.rl_tx_list)
1863 			bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag,
1864 			    sc->rl_ldata.rl_tx_list,
1865 			    sc->rl_ldata.rl_tx_list_map);
1866 		bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag);
1867 	}
1868 
1869 	/* Destroy all the RX and TX buffer maps */
1870 
1871 	if (sc->rl_ldata.rl_tx_mtag) {
1872 		for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
1873 			if (sc->rl_ldata.rl_tx_desc[i].tx_dmamap)
1874 				bus_dmamap_destroy(sc->rl_ldata.rl_tx_mtag,
1875 				    sc->rl_ldata.rl_tx_desc[i].tx_dmamap);
1876 		}
1877 		bus_dma_tag_destroy(sc->rl_ldata.rl_tx_mtag);
1878 	}
1879 	if (sc->rl_ldata.rl_rx_mtag) {
1880 		for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1881 			if (sc->rl_ldata.rl_rx_desc[i].rx_dmamap)
1882 				bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag,
1883 				    sc->rl_ldata.rl_rx_desc[i].rx_dmamap);
1884 		}
1885 		if (sc->rl_ldata.rl_rx_sparemap)
1886 			bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag,
1887 			    sc->rl_ldata.rl_rx_sparemap);
1888 		bus_dma_tag_destroy(sc->rl_ldata.rl_rx_mtag);
1889 	}
1890 	if (sc->rl_ldata.rl_jrx_mtag) {
1891 		for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
1892 			if (sc->rl_ldata.rl_jrx_desc[i].rx_dmamap)
1893 				bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag,
1894 				    sc->rl_ldata.rl_jrx_desc[i].rx_dmamap);
1895 		}
1896 		if (sc->rl_ldata.rl_jrx_sparemap)
1897 			bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag,
1898 			    sc->rl_ldata.rl_jrx_sparemap);
1899 		bus_dma_tag_destroy(sc->rl_ldata.rl_jrx_mtag);
1900 	}
1901 	/* Unload and free the stats buffer and map */
1902 
1903 	if (sc->rl_ldata.rl_stag) {
1904 		if (sc->rl_ldata.rl_stats_addr)
1905 			bus_dmamap_unload(sc->rl_ldata.rl_stag,
1906 			    sc->rl_ldata.rl_smap);
1907 		if (sc->rl_ldata.rl_stats)
1908 			bus_dmamem_free(sc->rl_ldata.rl_stag,
1909 			    sc->rl_ldata.rl_stats, sc->rl_ldata.rl_smap);
1910 		bus_dma_tag_destroy(sc->rl_ldata.rl_stag);
1911 	}
1912 
1913 	if (sc->rl_parent_tag)
1914 		bus_dma_tag_destroy(sc->rl_parent_tag);
1915 
1916 	mtx_destroy(&sc->rl_mtx);
1917 
1918 	return (0);
1919 }
1920 
1921 static __inline void
re_discard_rxbuf(struct rl_softc * sc,int idx)1922 re_discard_rxbuf(struct rl_softc *sc, int idx)
1923 {
1924 	struct rl_desc		*desc;
1925 	struct rl_rxdesc	*rxd;
1926 	uint32_t		cmdstat;
1927 
1928 	if (if_getmtu(sc->rl_ifp) > RL_MTU &&
1929 	    (sc->rl_flags & RL_FLAG_JUMBOV2) != 0)
1930 		rxd = &sc->rl_ldata.rl_jrx_desc[idx];
1931 	else
1932 		rxd = &sc->rl_ldata.rl_rx_desc[idx];
1933 	desc = &sc->rl_ldata.rl_rx_list[idx];
1934 	desc->rl_vlanctl = 0;
1935 	cmdstat = rxd->rx_size;
1936 	if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
1937 		cmdstat |= RL_RDESC_CMD_EOR;
1938 	desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
1939 }
1940 
1941 static int
re_newbuf(struct rl_softc * sc,int idx)1942 re_newbuf(struct rl_softc *sc, int idx)
1943 {
1944 	struct mbuf		*m;
1945 	struct rl_rxdesc	*rxd;
1946 	bus_dma_segment_t	segs[1];
1947 	bus_dmamap_t		map;
1948 	struct rl_desc		*desc;
1949 	uint32_t		cmdstat;
1950 	int			error, nsegs;
1951 
1952 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1953 	if (m == NULL)
1954 		return (ENOBUFS);
1955 
1956 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1957 #ifdef RE_FIXUP_RX
1958 	/*
1959 	 * This is part of an evil trick to deal with non-x86 platforms.
1960 	 * The RealTek chip requires RX buffers to be aligned on 64-bit
1961 	 * boundaries, but that will hose non-x86 machines. To get around
1962 	 * this, we leave some empty space at the start of each buffer
1963 	 * and for non-x86 hosts, we copy the buffer back six bytes
1964 	 * to achieve word alignment. This is slightly more efficient
1965 	 * than allocating a new buffer, copying the contents, and
1966 	 * discarding the old buffer.
1967 	 */
1968 	m_adj(m, RE_ETHER_ALIGN);
1969 #endif
1970 	error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_rx_mtag,
1971 	    sc->rl_ldata.rl_rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
1972 	if (error != 0) {
1973 		m_freem(m);
1974 		return (ENOBUFS);
1975 	}
1976 	KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs));
1977 
1978 	rxd = &sc->rl_ldata.rl_rx_desc[idx];
1979 	if (rxd->rx_m != NULL) {
1980 		bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap,
1981 		    BUS_DMASYNC_POSTREAD);
1982 		bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap);
1983 	}
1984 
1985 	rxd->rx_m = m;
1986 	map = rxd->rx_dmamap;
1987 	rxd->rx_dmamap = sc->rl_ldata.rl_rx_sparemap;
1988 	rxd->rx_size = segs[0].ds_len;
1989 	sc->rl_ldata.rl_rx_sparemap = map;
1990 	bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap,
1991 	    BUS_DMASYNC_PREREAD);
1992 
1993 	desc = &sc->rl_ldata.rl_rx_list[idx];
1994 	desc->rl_vlanctl = 0;
1995 	desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr));
1996 	desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr));
1997 	cmdstat = segs[0].ds_len;
1998 	if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
1999 		cmdstat |= RL_RDESC_CMD_EOR;
2000 	desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
2001 
2002 	return (0);
2003 }
2004 
2005 static int
re_jumbo_newbuf(struct rl_softc * sc,int idx)2006 re_jumbo_newbuf(struct rl_softc *sc, int idx)
2007 {
2008 	struct mbuf		*m;
2009 	struct rl_rxdesc	*rxd;
2010 	bus_dma_segment_t	segs[1];
2011 	bus_dmamap_t		map;
2012 	struct rl_desc		*desc;
2013 	uint32_t		cmdstat;
2014 	int			error, nsegs;
2015 
2016 	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
2017 	if (m == NULL)
2018 		return (ENOBUFS);
2019 	m->m_len = m->m_pkthdr.len = MJUM9BYTES;
2020 #ifdef RE_FIXUP_RX
2021 	m_adj(m, RE_ETHER_ALIGN);
2022 #endif
2023 	error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_jrx_mtag,
2024 	    sc->rl_ldata.rl_jrx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
2025 	if (error != 0) {
2026 		m_freem(m);
2027 		return (ENOBUFS);
2028 	}
2029 	KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs));
2030 
2031 	rxd = &sc->rl_ldata.rl_jrx_desc[idx];
2032 	if (rxd->rx_m != NULL) {
2033 		bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap,
2034 		    BUS_DMASYNC_POSTREAD);
2035 		bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap);
2036 	}
2037 
2038 	rxd->rx_m = m;
2039 	map = rxd->rx_dmamap;
2040 	rxd->rx_dmamap = sc->rl_ldata.rl_jrx_sparemap;
2041 	rxd->rx_size = segs[0].ds_len;
2042 	sc->rl_ldata.rl_jrx_sparemap = map;
2043 	bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap,
2044 	    BUS_DMASYNC_PREREAD);
2045 
2046 	desc = &sc->rl_ldata.rl_rx_list[idx];
2047 	desc->rl_vlanctl = 0;
2048 	desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr));
2049 	desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr));
2050 	cmdstat = segs[0].ds_len;
2051 	if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1)
2052 		cmdstat |= RL_RDESC_CMD_EOR;
2053 	desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN);
2054 
2055 	return (0);
2056 }
2057 
2058 #ifdef RE_FIXUP_RX
2059 static __inline void
re_fixup_rx(struct mbuf * m)2060 re_fixup_rx(struct mbuf *m)
2061 {
2062 	int                     i;
2063 	uint16_t                *src, *dst;
2064 
2065 	src = mtod(m, uint16_t *);
2066 	dst = src - (RE_ETHER_ALIGN - ETHER_ALIGN) / sizeof *src;
2067 
2068 	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
2069 		*dst++ = *src++;
2070 
2071 	m->m_data -= RE_ETHER_ALIGN - ETHER_ALIGN;
2072 }
2073 #endif
2074 
2075 static int
re_tx_list_init(struct rl_softc * sc)2076 re_tx_list_init(struct rl_softc *sc)
2077 {
2078 	struct rl_desc		*desc;
2079 	int			i;
2080 
2081 	RL_LOCK_ASSERT(sc);
2082 
2083 	bzero(sc->rl_ldata.rl_tx_list,
2084 	    sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc));
2085 	for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++)
2086 		sc->rl_ldata.rl_tx_desc[i].tx_m = NULL;
2087 #ifdef DEV_NETMAP
2088 	re_netmap_tx_init(sc);
2089 #endif /* DEV_NETMAP */
2090 	/* Set EOR. */
2091 	desc = &sc->rl_ldata.rl_tx_list[sc->rl_ldata.rl_tx_desc_cnt - 1];
2092 	desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOR);
2093 
2094 	bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
2095 	    sc->rl_ldata.rl_tx_list_map,
2096 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2097 
2098 	sc->rl_ldata.rl_tx_prodidx = 0;
2099 	sc->rl_ldata.rl_tx_considx = 0;
2100 	sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt;
2101 
2102 	return (0);
2103 }
2104 
2105 static int
re_rx_list_init(struct rl_softc * sc)2106 re_rx_list_init(struct rl_softc *sc)
2107 {
2108 	int			error, i;
2109 
2110 	bzero(sc->rl_ldata.rl_rx_list,
2111 	    sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc));
2112 	for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
2113 		sc->rl_ldata.rl_rx_desc[i].rx_m = NULL;
2114 		if ((error = re_newbuf(sc, i)) != 0)
2115 			return (error);
2116 	}
2117 #ifdef DEV_NETMAP
2118 	re_netmap_rx_init(sc);
2119 #endif /* DEV_NETMAP */
2120 
2121 	/* Flush the RX descriptors */
2122 
2123 	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2124 	    sc->rl_ldata.rl_rx_list_map,
2125 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2126 
2127 	sc->rl_ldata.rl_rx_prodidx = 0;
2128 	sc->rl_head = sc->rl_tail = NULL;
2129 	sc->rl_int_rx_act = 0;
2130 
2131 	return (0);
2132 }
2133 
2134 static int
re_jrx_list_init(struct rl_softc * sc)2135 re_jrx_list_init(struct rl_softc *sc)
2136 {
2137 	int			error, i;
2138 
2139 	bzero(sc->rl_ldata.rl_rx_list,
2140 	    sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc));
2141 	for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
2142 		sc->rl_ldata.rl_jrx_desc[i].rx_m = NULL;
2143 		if ((error = re_jumbo_newbuf(sc, i)) != 0)
2144 			return (error);
2145 	}
2146 
2147 	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2148 	    sc->rl_ldata.rl_rx_list_map,
2149 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2150 
2151 	sc->rl_ldata.rl_rx_prodidx = 0;
2152 	sc->rl_head = sc->rl_tail = NULL;
2153 	sc->rl_int_rx_act = 0;
2154 
2155 	return (0);
2156 }
2157 
2158 /*
2159  * RX handler for C+ and 8169. For the gigE chips, we support
2160  * the reception of jumbo frames that have been fragmented
2161  * across multiple 2K mbuf cluster buffers.
2162  */
2163 static int
re_rxeof(struct rl_softc * sc,int * rx_npktsp)2164 re_rxeof(struct rl_softc *sc, int *rx_npktsp)
2165 {
2166 	struct mbuf		*m;
2167 	if_t ifp;
2168 	int			i, rxerr, total_len;
2169 	struct rl_desc		*cur_rx;
2170 	u_int32_t		rxstat, rxvlan;
2171 	int			jumbo, maxpkt = 16, rx_npkts = 0;
2172 
2173 	RL_LOCK_ASSERT(sc);
2174 
2175 	ifp = sc->rl_ifp;
2176 #ifdef DEV_NETMAP
2177 	if (netmap_rx_irq(ifp, 0, &rx_npkts))
2178 		return 0;
2179 #endif /* DEV_NETMAP */
2180 	if (if_getmtu(ifp) > RL_MTU && (sc->rl_flags & RL_FLAG_JUMBOV2) != 0)
2181 		jumbo = 1;
2182 	else
2183 		jumbo = 0;
2184 
2185 	/* Invalidate the descriptor memory */
2186 
2187 	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2188 	    sc->rl_ldata.rl_rx_list_map,
2189 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2190 
2191 	for (i = sc->rl_ldata.rl_rx_prodidx; maxpkt > 0;
2192 	    i = RL_RX_DESC_NXT(sc, i)) {
2193 		if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
2194 			break;
2195 		cur_rx = &sc->rl_ldata.rl_rx_list[i];
2196 		rxstat = le32toh(cur_rx->rl_cmdstat);
2197 		if ((rxstat & RL_RDESC_STAT_OWN) != 0)
2198 			break;
2199 		total_len = rxstat & sc->rl_rxlenmask;
2200 		rxvlan = le32toh(cur_rx->rl_vlanctl);
2201 		if (jumbo != 0)
2202 			m = sc->rl_ldata.rl_jrx_desc[i].rx_m;
2203 		else
2204 			m = sc->rl_ldata.rl_rx_desc[i].rx_m;
2205 
2206 		if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
2207 		    (rxstat & (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) !=
2208 		    (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) {
2209 			/*
2210 			 * RTL8168C or later controllers do not
2211 			 * support multi-fragment packet.
2212 			 */
2213 			re_discard_rxbuf(sc, i);
2214 			continue;
2215 		} else if ((rxstat & RL_RDESC_STAT_EOF) == 0) {
2216 			if (re_newbuf(sc, i) != 0) {
2217 				/*
2218 				 * If this is part of a multi-fragment packet,
2219 				 * discard all the pieces.
2220 				 */
2221 				if (sc->rl_head != NULL) {
2222 					m_freem(sc->rl_head);
2223 					sc->rl_head = sc->rl_tail = NULL;
2224 				}
2225 				re_discard_rxbuf(sc, i);
2226 				continue;
2227 			}
2228 			m->m_len = RE_RX_DESC_BUFLEN;
2229 			if (sc->rl_head == NULL)
2230 				sc->rl_head = sc->rl_tail = m;
2231 			else {
2232 				m->m_flags &= ~M_PKTHDR;
2233 				sc->rl_tail->m_next = m;
2234 				sc->rl_tail = m;
2235 			}
2236 			continue;
2237 		}
2238 
2239 		/*
2240 		 * NOTE: for the 8139C+, the frame length field
2241 		 * is always 12 bits in size, but for the gigE chips,
2242 		 * it is 13 bits (since the max RX frame length is 16K).
2243 		 * Unfortunately, all 32 bits in the status word
2244 		 * were already used, so to make room for the extra
2245 		 * length bit, RealTek took out the 'frame alignment
2246 		 * error' bit and shifted the other status bits
2247 		 * over one slot. The OWN, EOR, FS and LS bits are
2248 		 * still in the same places. We have already extracted
2249 		 * the frame length and checked the OWN bit, so rather
2250 		 * than using an alternate bit mapping, we shift the
2251 		 * status bits one space to the right so we can evaluate
2252 		 * them using the 8169 status as though it was in the
2253 		 * same format as that of the 8139C+.
2254 		 */
2255 		if (sc->rl_type == RL_8169)
2256 			rxstat >>= 1;
2257 
2258 		/*
2259 		 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be
2260 		 * set, but if CRC is clear, it will still be a valid frame.
2261 		 */
2262 		if ((rxstat & RL_RDESC_STAT_RXERRSUM) != 0) {
2263 			rxerr = 1;
2264 			if ((sc->rl_flags & RL_FLAG_JUMBOV2) == 0 &&
2265 			    total_len > 8191 &&
2266 			    (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT)
2267 				rxerr = 0;
2268 			if (rxerr != 0) {
2269 				if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2270 				/*
2271 				 * If this is part of a multi-fragment packet,
2272 				 * discard all the pieces.
2273 				 */
2274 				if (sc->rl_head != NULL) {
2275 					m_freem(sc->rl_head);
2276 					sc->rl_head = sc->rl_tail = NULL;
2277 				}
2278 				re_discard_rxbuf(sc, i);
2279 				continue;
2280 			}
2281 		}
2282 
2283 		/*
2284 		 * If allocating a replacement mbuf fails,
2285 		 * reload the current one.
2286 		 */
2287 		if (jumbo != 0)
2288 			rxerr = re_jumbo_newbuf(sc, i);
2289 		else
2290 			rxerr = re_newbuf(sc, i);
2291 		if (rxerr != 0) {
2292 			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2293 			if (sc->rl_head != NULL) {
2294 				m_freem(sc->rl_head);
2295 				sc->rl_head = sc->rl_tail = NULL;
2296 			}
2297 			re_discard_rxbuf(sc, i);
2298 			continue;
2299 		}
2300 
2301 		if (sc->rl_head != NULL) {
2302 			if (jumbo != 0)
2303 				m->m_len = total_len;
2304 			else {
2305 				m->m_len = total_len % RE_RX_DESC_BUFLEN;
2306 				if (m->m_len == 0)
2307 					m->m_len = RE_RX_DESC_BUFLEN;
2308 			}
2309 			/*
2310 			 * Special case: if there's 4 bytes or less
2311 			 * in this buffer, the mbuf can be discarded:
2312 			 * the last 4 bytes is the CRC, which we don't
2313 			 * care about anyway.
2314 			 */
2315 			if (m->m_len <= ETHER_CRC_LEN) {
2316 				sc->rl_tail->m_len -=
2317 				    (ETHER_CRC_LEN - m->m_len);
2318 				m_freem(m);
2319 			} else {
2320 				m->m_len -= ETHER_CRC_LEN;
2321 				m->m_flags &= ~M_PKTHDR;
2322 				sc->rl_tail->m_next = m;
2323 			}
2324 			m = sc->rl_head;
2325 			sc->rl_head = sc->rl_tail = NULL;
2326 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
2327 		} else
2328 			m->m_pkthdr.len = m->m_len =
2329 			    (total_len - ETHER_CRC_LEN);
2330 
2331 #ifdef RE_FIXUP_RX
2332 		re_fixup_rx(m);
2333 #endif
2334 		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2335 		m->m_pkthdr.rcvif = ifp;
2336 
2337 		/* Do RX checksumming if enabled */
2338 
2339 		if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
2340 			if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) {
2341 				/* Check IP header checksum */
2342 				if (rxstat & RL_RDESC_STAT_PROTOID)
2343 					m->m_pkthdr.csum_flags |=
2344 					    CSUM_IP_CHECKED;
2345 				if (!(rxstat & RL_RDESC_STAT_IPSUMBAD))
2346 					m->m_pkthdr.csum_flags |=
2347 					    CSUM_IP_VALID;
2348 
2349 				/* Check TCP/UDP checksum */
2350 				if ((RL_TCPPKT(rxstat) &&
2351 				    !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
2352 				    (RL_UDPPKT(rxstat) &&
2353 				     !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
2354 					m->m_pkthdr.csum_flags |=
2355 						CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
2356 					m->m_pkthdr.csum_data = 0xffff;
2357 				}
2358 			} else {
2359 				/*
2360 				 * RTL8168C/RTL816CP/RTL8111C/RTL8111CP
2361 				 */
2362 				if ((rxstat & RL_RDESC_STAT_PROTOID) &&
2363 				    (rxvlan & RL_RDESC_IPV4))
2364 					m->m_pkthdr.csum_flags |=
2365 					    CSUM_IP_CHECKED;
2366 				if (!(rxstat & RL_RDESC_STAT_IPSUMBAD) &&
2367 				    (rxvlan & RL_RDESC_IPV4))
2368 					m->m_pkthdr.csum_flags |=
2369 					    CSUM_IP_VALID;
2370 				if (((rxstat & RL_RDESC_STAT_TCP) &&
2371 				    !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) ||
2372 				    ((rxstat & RL_RDESC_STAT_UDP) &&
2373 				    !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) {
2374 					m->m_pkthdr.csum_flags |=
2375 						CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
2376 					m->m_pkthdr.csum_data = 0xffff;
2377 				}
2378 			}
2379 		}
2380 		maxpkt--;
2381 		if (rxvlan & RL_RDESC_VLANCTL_TAG) {
2382 			m->m_pkthdr.ether_vtag =
2383 			    bswap16((rxvlan & RL_RDESC_VLANCTL_DATA));
2384 			m->m_flags |= M_VLANTAG;
2385 		}
2386 		RL_UNLOCK(sc);
2387 		if_input(ifp, m);
2388 		RL_LOCK(sc);
2389 		rx_npkts++;
2390 	}
2391 
2392 	/* Flush the RX DMA ring */
2393 
2394 	bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag,
2395 	    sc->rl_ldata.rl_rx_list_map,
2396 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2397 
2398 	sc->rl_ldata.rl_rx_prodidx = i;
2399 
2400 	if (rx_npktsp != NULL)
2401 		*rx_npktsp = rx_npkts;
2402 	if (maxpkt)
2403 		return (EAGAIN);
2404 
2405 	return (0);
2406 }
2407 
2408 static void
re_txeof(struct rl_softc * sc)2409 re_txeof(struct rl_softc *sc)
2410 {
2411 	if_t ifp;
2412 	struct rl_txdesc	*txd;
2413 	u_int32_t		txstat;
2414 	int			cons;
2415 
2416 	cons = sc->rl_ldata.rl_tx_considx;
2417 	if (cons == sc->rl_ldata.rl_tx_prodidx)
2418 		return;
2419 
2420 	ifp = sc->rl_ifp;
2421 #ifdef DEV_NETMAP
2422 	if (netmap_tx_irq(ifp, 0))
2423 		return;
2424 #endif /* DEV_NETMAP */
2425 	/* Invalidate the TX descriptor list */
2426 	bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
2427 	    sc->rl_ldata.rl_tx_list_map,
2428 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2429 
2430 	for (; cons != sc->rl_ldata.rl_tx_prodidx;
2431 	    cons = RL_TX_DESC_NXT(sc, cons)) {
2432 		txstat = le32toh(sc->rl_ldata.rl_tx_list[cons].rl_cmdstat);
2433 		if (txstat & RL_TDESC_STAT_OWN)
2434 			break;
2435 		/*
2436 		 * We only stash mbufs in the last descriptor
2437 		 * in a fragment chain, which also happens to
2438 		 * be the only place where the TX status bits
2439 		 * are valid.
2440 		 */
2441 		if (txstat & RL_TDESC_CMD_EOF) {
2442 			txd = &sc->rl_ldata.rl_tx_desc[cons];
2443 			bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
2444 			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2445 			bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag,
2446 			    txd->tx_dmamap);
2447 			KASSERT(txd->tx_m != NULL,
2448 			    ("%s: freeing NULL mbufs!", __func__));
2449 			m_freem(txd->tx_m);
2450 			txd->tx_m = NULL;
2451 			if (txstat & (RL_TDESC_STAT_EXCESSCOL|
2452 			    RL_TDESC_STAT_COLCNT))
2453 				if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
2454 			if (txstat & RL_TDESC_STAT_TXERRSUM)
2455 				if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2456 			else
2457 				if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2458 		}
2459 		sc->rl_ldata.rl_tx_free++;
2460 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2461 	}
2462 	sc->rl_ldata.rl_tx_considx = cons;
2463 
2464 	/* No changes made to the TX ring, so no flush needed */
2465 
2466 	if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) {
2467 #ifdef RE_TX_MODERATION
2468 		/*
2469 		 * If not all descriptors have been reaped yet, reload
2470 		 * the timer so that we will eventually get another
2471 		 * interrupt that will cause us to re-enter this routine.
2472 		 * This is done in case the transmitter has gone idle.
2473 		 */
2474 		CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2475 #endif
2476 	} else
2477 		sc->rl_watchdog_timer = 0;
2478 }
2479 
2480 static void
re_tick(void * xsc)2481 re_tick(void *xsc)
2482 {
2483 	struct rl_softc		*sc;
2484 	struct mii_data		*mii;
2485 
2486 	sc = xsc;
2487 
2488 	RL_LOCK_ASSERT(sc);
2489 
2490 	mii = device_get_softc(sc->rl_miibus);
2491 	mii_tick(mii);
2492 	if ((sc->rl_flags & RL_FLAG_LINK) == 0)
2493 		re_miibus_statchg(sc->rl_dev);
2494 	/*
2495 	 * Reclaim transmitted frames here. Technically it is not
2496 	 * necessary to do here but it ensures periodic reclamation
2497 	 * regardless of Tx completion interrupt which seems to be
2498 	 * lost on PCIe based controllers under certain situations.
2499 	 */
2500 	re_txeof(sc);
2501 	re_watchdog(sc);
2502 	callout_reset(&sc->rl_stat_callout, hz, re_tick, sc);
2503 }
2504 
2505 #ifdef DEVICE_POLLING
2506 static int
re_poll(if_t ifp,enum poll_cmd cmd,int count)2507 re_poll(if_t ifp, enum poll_cmd cmd, int count)
2508 {
2509 	struct rl_softc *sc = if_getsoftc(ifp);
2510 	int rx_npkts = 0;
2511 
2512 	RL_LOCK(sc);
2513 	if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2514 		rx_npkts = re_poll_locked(ifp, cmd, count);
2515 	RL_UNLOCK(sc);
2516 	return (rx_npkts);
2517 }
2518 
2519 static int
re_poll_locked(if_t ifp,enum poll_cmd cmd,int count)2520 re_poll_locked(if_t ifp, enum poll_cmd cmd, int count)
2521 {
2522 	struct rl_softc *sc = if_getsoftc(ifp);
2523 	int rx_npkts;
2524 
2525 	RL_LOCK_ASSERT(sc);
2526 
2527 	sc->rxcycles = count;
2528 	re_rxeof(sc, &rx_npkts);
2529 	re_txeof(sc);
2530 
2531 	if (!if_sendq_empty(ifp))
2532 		re_start_locked(ifp);
2533 
2534 	if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
2535 		u_int16_t       status;
2536 
2537 		status = CSR_READ_2(sc, RL_ISR);
2538 		if (status == 0xffff)
2539 			return (rx_npkts);
2540 		if (status)
2541 			CSR_WRITE_2(sc, RL_ISR, status);
2542 		if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
2543 		    (sc->rl_flags & RL_FLAG_PCIE))
2544 			CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2545 
2546 		/*
2547 		 * XXX check behaviour on receiver stalls.
2548 		 */
2549 
2550 		if (status & RL_ISR_SYSTEM_ERR) {
2551 			if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
2552 			re_init_locked(sc);
2553 		}
2554 	}
2555 	return (rx_npkts);
2556 }
2557 #endif /* DEVICE_POLLING */
2558 
2559 static int
re_intr(void * arg)2560 re_intr(void *arg)
2561 {
2562 	struct rl_softc		*sc;
2563 	uint16_t		status;
2564 
2565 	sc = arg;
2566 
2567 	status = CSR_READ_2(sc, RL_ISR);
2568 	if (status == 0xFFFF || (status & RL_INTRS_CPLUS) == 0)
2569                 return (FILTER_STRAY);
2570 	CSR_WRITE_2(sc, RL_IMR, 0);
2571 
2572 	taskqueue_enqueue(taskqueue_fast, &sc->rl_inttask);
2573 
2574 	return (FILTER_HANDLED);
2575 }
2576 
2577 static void
re_int_task(void * arg,int npending)2578 re_int_task(void *arg, int npending)
2579 {
2580 	struct rl_softc		*sc;
2581 	if_t ifp;
2582 	u_int16_t		status;
2583 	int			rval = 0;
2584 
2585 	sc = arg;
2586 	ifp = sc->rl_ifp;
2587 
2588 	RL_LOCK(sc);
2589 
2590 	status = CSR_READ_2(sc, RL_ISR);
2591         CSR_WRITE_2(sc, RL_ISR, status);
2592 
2593 	if (sc->suspended ||
2594 	    (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
2595 		RL_UNLOCK(sc);
2596 		return;
2597 	}
2598 
2599 #ifdef DEVICE_POLLING
2600 	if  (if_getcapenable(ifp) & IFCAP_POLLING) {
2601 		RL_UNLOCK(sc);
2602 		return;
2603 	}
2604 #endif
2605 
2606 	if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW))
2607 		rval = re_rxeof(sc, NULL);
2608 
2609 	/*
2610 	 * Some chips will ignore a second TX request issued
2611 	 * while an existing transmission is in progress. If
2612 	 * the transmitter goes idle but there are still
2613 	 * packets waiting to be sent, we need to restart the
2614 	 * channel here to flush them out. This only seems to
2615 	 * be required with the PCIe devices.
2616 	 */
2617 	if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
2618 	    (sc->rl_flags & RL_FLAG_PCIE))
2619 		CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2620 	if (status & (
2621 #ifdef RE_TX_MODERATION
2622 	    RL_ISR_TIMEOUT_EXPIRED|
2623 #else
2624 	    RL_ISR_TX_OK|
2625 #endif
2626 	    RL_ISR_TX_ERR|RL_ISR_TX_DESC_UNAVAIL))
2627 		re_txeof(sc);
2628 
2629 	if (status & RL_ISR_SYSTEM_ERR) {
2630 		if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
2631 		re_init_locked(sc);
2632 	}
2633 
2634 	if (!if_sendq_empty(ifp))
2635 		re_start_locked(ifp);
2636 
2637 	RL_UNLOCK(sc);
2638 
2639         if ((CSR_READ_2(sc, RL_ISR) & RL_INTRS_CPLUS) || rval) {
2640 		taskqueue_enqueue(taskqueue_fast, &sc->rl_inttask);
2641 		return;
2642 	}
2643 
2644 	CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
2645 }
2646 
2647 static void
re_intr_msi(void * xsc)2648 re_intr_msi(void *xsc)
2649 {
2650 	struct rl_softc		*sc;
2651 	if_t ifp;
2652 	uint16_t		intrs, status;
2653 
2654 	sc = xsc;
2655 	RL_LOCK(sc);
2656 
2657 	ifp = sc->rl_ifp;
2658 #ifdef DEVICE_POLLING
2659 	if (if_getcapenable(ifp) & IFCAP_POLLING) {
2660 		RL_UNLOCK(sc);
2661 		return;
2662 	}
2663 #endif
2664 	/* Disable interrupts. */
2665 	CSR_WRITE_2(sc, RL_IMR, 0);
2666 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
2667 		RL_UNLOCK(sc);
2668 		return;
2669 	}
2670 
2671 	intrs = RL_INTRS_CPLUS;
2672 	status = CSR_READ_2(sc, RL_ISR);
2673         CSR_WRITE_2(sc, RL_ISR, status);
2674 	if (sc->rl_int_rx_act > 0) {
2675 		intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW |
2676 		    RL_ISR_RX_OVERRUN);
2677 		status &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW |
2678 		    RL_ISR_RX_OVERRUN);
2679 	}
2680 
2681 	if (status & (RL_ISR_TIMEOUT_EXPIRED | RL_ISR_RX_OK | RL_ISR_RX_ERR |
2682 	    RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) {
2683 		re_rxeof(sc, NULL);
2684 		if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2685 			if (sc->rl_int_rx_mod != 0 &&
2686 			    (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR |
2687 			    RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) != 0) {
2688 				/* Rearm one-shot timer. */
2689 				CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2690 				intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR |
2691 				    RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN);
2692 				sc->rl_int_rx_act = 1;
2693 			} else {
2694 				intrs |= RL_ISR_RX_OK | RL_ISR_RX_ERR |
2695 				    RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN;
2696 				sc->rl_int_rx_act = 0;
2697 			}
2698 		}
2699 	}
2700 
2701 	/*
2702 	 * Some chips will ignore a second TX request issued
2703 	 * while an existing transmission is in progress. If
2704 	 * the transmitter goes idle but there are still
2705 	 * packets waiting to be sent, we need to restart the
2706 	 * channel here to flush them out. This only seems to
2707 	 * be required with the PCIe devices.
2708 	 */
2709 	if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) &&
2710 	    (sc->rl_flags & RL_FLAG_PCIE))
2711 		CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2712 	if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR | RL_ISR_TX_DESC_UNAVAIL))
2713 		re_txeof(sc);
2714 
2715 	if (status & RL_ISR_SYSTEM_ERR) {
2716 		if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
2717 		re_init_locked(sc);
2718 	}
2719 
2720 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2721 		if (!if_sendq_empty(ifp))
2722 			re_start_locked(ifp);
2723 		CSR_WRITE_2(sc, RL_IMR, intrs);
2724 	}
2725 	RL_UNLOCK(sc);
2726 }
2727 
2728 static int
re_encap(struct rl_softc * sc,struct mbuf ** m_head)2729 re_encap(struct rl_softc *sc, struct mbuf **m_head)
2730 {
2731 	struct rl_txdesc	*txd, *txd_last;
2732 	bus_dma_segment_t	segs[RL_NTXSEGS];
2733 	bus_dmamap_t		map;
2734 	struct mbuf		*m_new;
2735 	struct rl_desc		*desc;
2736 	int			nsegs, prod;
2737 	int			i, error, ei, si;
2738 	int			padlen;
2739 	uint32_t		cmdstat, csum_flags, vlanctl;
2740 
2741 	RL_LOCK_ASSERT(sc);
2742 	M_ASSERTPKTHDR((*m_head));
2743 
2744 	/*
2745 	 * With some of the RealTek chips, using the checksum offload
2746 	 * support in conjunction with the autopadding feature results
2747 	 * in the transmission of corrupt frames. For example, if we
2748 	 * need to send a really small IP fragment that's less than 60
2749 	 * bytes in size, and IP header checksumming is enabled, the
2750 	 * resulting ethernet frame that appears on the wire will
2751 	 * have garbled payload. To work around this, if TX IP checksum
2752 	 * offload is enabled, we always manually pad short frames out
2753 	 * to the minimum ethernet frame size.
2754 	 */
2755 	if ((sc->rl_flags & RL_FLAG_AUTOPAD) == 0 &&
2756 	    (*m_head)->m_pkthdr.len < RL_IP4CSUMTX_PADLEN &&
2757 	    ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) != 0) {
2758 		padlen = RL_MIN_FRAMELEN - (*m_head)->m_pkthdr.len;
2759 		if (M_WRITABLE(*m_head) == 0) {
2760 			/* Get a writable copy. */
2761 			m_new = m_dup(*m_head, M_NOWAIT);
2762 			m_freem(*m_head);
2763 			if (m_new == NULL) {
2764 				*m_head = NULL;
2765 				return (ENOBUFS);
2766 			}
2767 			*m_head = m_new;
2768 		}
2769 		if ((*m_head)->m_next != NULL ||
2770 		    M_TRAILINGSPACE(*m_head) < padlen) {
2771 			m_new = m_defrag(*m_head, M_NOWAIT);
2772 			if (m_new == NULL) {
2773 				m_freem(*m_head);
2774 				*m_head = NULL;
2775 				return (ENOBUFS);
2776 			}
2777 		} else
2778 			m_new = *m_head;
2779 
2780 		/*
2781 		 * Manually pad short frames, and zero the pad space
2782 		 * to avoid leaking data.
2783 		 */
2784 		bzero(mtod(m_new, char *) + m_new->m_pkthdr.len, padlen);
2785 		m_new->m_pkthdr.len += padlen;
2786 		m_new->m_len = m_new->m_pkthdr.len;
2787 		*m_head = m_new;
2788 	}
2789 
2790 	prod = sc->rl_ldata.rl_tx_prodidx;
2791 	txd = &sc->rl_ldata.rl_tx_desc[prod];
2792 	error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap,
2793 	    *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2794 	if (error == EFBIG) {
2795 		m_new = m_collapse(*m_head, M_NOWAIT, RL_NTXSEGS);
2796 		if (m_new == NULL) {
2797 			m_freem(*m_head);
2798 			*m_head = NULL;
2799 			return (ENOBUFS);
2800 		}
2801 		*m_head = m_new;
2802 		error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag,
2803 		    txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2804 		if (error != 0) {
2805 			m_freem(*m_head);
2806 			*m_head = NULL;
2807 			return (error);
2808 		}
2809 	} else if (error != 0)
2810 		return (error);
2811 	if (nsegs == 0) {
2812 		m_freem(*m_head);
2813 		*m_head = NULL;
2814 		return (EIO);
2815 	}
2816 
2817 	/* Check for number of available descriptors. */
2818 	if (sc->rl_ldata.rl_tx_free - nsegs <= 1) {
2819 		bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap);
2820 		return (ENOBUFS);
2821 	}
2822 
2823 	bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap,
2824 	    BUS_DMASYNC_PREWRITE);
2825 
2826 	/*
2827 	 * Set up checksum offload. Note: checksum offload bits must
2828 	 * appear in all descriptors of a multi-descriptor transmit
2829 	 * attempt. This is according to testing done with an 8169
2830 	 * chip. This is a requirement.
2831 	 */
2832 	vlanctl = 0;
2833 	csum_flags = 0;
2834 	if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2835 		if ((sc->rl_flags & RL_FLAG_DESCV2) != 0) {
2836 			csum_flags |= RL_TDESC_CMD_LGSEND;
2837 			vlanctl |= ((uint32_t)(*m_head)->m_pkthdr.tso_segsz <<
2838 			    RL_TDESC_CMD_MSSVALV2_SHIFT);
2839 		} else {
2840 			csum_flags |= RL_TDESC_CMD_LGSEND |
2841 			    ((uint32_t)(*m_head)->m_pkthdr.tso_segsz <<
2842 			    RL_TDESC_CMD_MSSVAL_SHIFT);
2843 		}
2844 	} else {
2845 		/*
2846 		 * Unconditionally enable IP checksum if TCP or UDP
2847 		 * checksum is required. Otherwise, TCP/UDP checksum
2848 		 * doesn't make effects.
2849 		 */
2850 		if (((*m_head)->m_pkthdr.csum_flags & RE_CSUM_FEATURES) != 0) {
2851 			if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) {
2852 				csum_flags |= RL_TDESC_CMD_IPCSUM;
2853 				if (((*m_head)->m_pkthdr.csum_flags &
2854 				    CSUM_TCP) != 0)
2855 					csum_flags |= RL_TDESC_CMD_TCPCSUM;
2856 				if (((*m_head)->m_pkthdr.csum_flags &
2857 				    CSUM_UDP) != 0)
2858 					csum_flags |= RL_TDESC_CMD_UDPCSUM;
2859 			} else {
2860 				vlanctl |= RL_TDESC_CMD_IPCSUMV2;
2861 				if (((*m_head)->m_pkthdr.csum_flags &
2862 				    CSUM_TCP) != 0)
2863 					vlanctl |= RL_TDESC_CMD_TCPCSUMV2;
2864 				if (((*m_head)->m_pkthdr.csum_flags &
2865 				    CSUM_UDP) != 0)
2866 					vlanctl |= RL_TDESC_CMD_UDPCSUMV2;
2867 			}
2868 		}
2869 	}
2870 
2871 	/*
2872 	 * Set up hardware VLAN tagging. Note: vlan tag info must
2873 	 * appear in all descriptors of a multi-descriptor
2874 	 * transmission attempt.
2875 	 */
2876 	if ((*m_head)->m_flags & M_VLANTAG)
2877 		vlanctl |= bswap16((*m_head)->m_pkthdr.ether_vtag) |
2878 		    RL_TDESC_VLANCTL_TAG;
2879 
2880 	si = prod;
2881 	for (i = 0; i < nsegs; i++, prod = RL_TX_DESC_NXT(sc, prod)) {
2882 		desc = &sc->rl_ldata.rl_tx_list[prod];
2883 		desc->rl_vlanctl = htole32(vlanctl);
2884 		desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr));
2885 		desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr));
2886 		cmdstat = segs[i].ds_len;
2887 		if (i != 0)
2888 			cmdstat |= RL_TDESC_CMD_OWN;
2889 		if (prod == sc->rl_ldata.rl_tx_desc_cnt - 1)
2890 			cmdstat |= RL_TDESC_CMD_EOR;
2891 		desc->rl_cmdstat = htole32(cmdstat | csum_flags);
2892 		sc->rl_ldata.rl_tx_free--;
2893 	}
2894 	/* Update producer index. */
2895 	sc->rl_ldata.rl_tx_prodidx = prod;
2896 
2897 	/* Set EOF on the last descriptor. */
2898 	ei = RL_TX_DESC_PRV(sc, prod);
2899 	desc = &sc->rl_ldata.rl_tx_list[ei];
2900 	desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF);
2901 
2902 	desc = &sc->rl_ldata.rl_tx_list[si];
2903 	/* Set SOF and transfer ownership of packet to the chip. */
2904 	desc->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF);
2905 
2906 	/*
2907 	 * Insure that the map for this transmission
2908 	 * is placed at the array index of the last descriptor
2909 	 * in this chain.  (Swap last and first dmamaps.)
2910 	 */
2911 	txd_last = &sc->rl_ldata.rl_tx_desc[ei];
2912 	map = txd->tx_dmamap;
2913 	txd->tx_dmamap = txd_last->tx_dmamap;
2914 	txd_last->tx_dmamap = map;
2915 	txd_last->tx_m = *m_head;
2916 
2917 	return (0);
2918 }
2919 
2920 static void
re_start(if_t ifp)2921 re_start(if_t ifp)
2922 {
2923 	struct rl_softc		*sc;
2924 
2925 	sc = if_getsoftc(ifp);
2926 	RL_LOCK(sc);
2927 	re_start_locked(ifp);
2928 	RL_UNLOCK(sc);
2929 }
2930 
2931 /*
2932  * Main transmit routine for C+ and gigE NICs.
2933  */
2934 static void
re_start_locked(if_t ifp)2935 re_start_locked(if_t ifp)
2936 {
2937 	struct rl_softc		*sc;
2938 	struct mbuf		*m_head;
2939 	int			queued;
2940 
2941 	sc = if_getsoftc(ifp);
2942 
2943 #ifdef DEV_NETMAP
2944 	/* XXX is this necessary ? */
2945 	if (if_getcapenable(ifp) & IFCAP_NETMAP) {
2946 		struct netmap_kring *kring = NA(ifp)->tx_rings[0];
2947 		if (sc->rl_ldata.rl_tx_prodidx != kring->nr_hwcur) {
2948 			/* kick the tx unit */
2949 			CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
2950 #ifdef RE_TX_MODERATION
2951 			CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2952 #endif
2953 			sc->rl_watchdog_timer = 5;
2954 		}
2955 		return;
2956 	}
2957 #endif /* DEV_NETMAP */
2958 
2959 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2960 	    IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0)
2961 		return;
2962 
2963 	for (queued = 0; !if_sendq_empty(ifp) &&
2964 	    sc->rl_ldata.rl_tx_free > 1;) {
2965 		m_head = if_dequeue(ifp);
2966 		if (m_head == NULL)
2967 			break;
2968 
2969 		if (re_encap(sc, &m_head) != 0) {
2970 			if (m_head == NULL)
2971 				break;
2972 			if_sendq_prepend(ifp, m_head);
2973 			if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
2974 			break;
2975 		}
2976 
2977 		/*
2978 		 * If there's a BPF listener, bounce a copy of this frame
2979 		 * to him.
2980 		 */
2981 		ETHER_BPF_MTAP(ifp, m_head);
2982 
2983 		queued++;
2984 	}
2985 
2986 	if (queued == 0) {
2987 #ifdef RE_TX_MODERATION
2988 		if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt)
2989 			CSR_WRITE_4(sc, RL_TIMERCNT, 1);
2990 #endif
2991 		return;
2992 	}
2993 
2994 	re_start_tx(sc);
2995 }
2996 
2997 static void
re_start_tx(struct rl_softc * sc)2998 re_start_tx(struct rl_softc *sc)
2999 {
3000 
3001 	/* Flush the TX descriptors */
3002 	bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag,
3003 	    sc->rl_ldata.rl_tx_list_map,
3004 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
3005 
3006 	CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START);
3007 
3008 #ifdef RE_TX_MODERATION
3009 	/*
3010 	 * Use the countdown timer for interrupt moderation.
3011 	 * 'TX done' interrupts are disabled. Instead, we reset the
3012 	 * countdown timer, which will begin counting until it hits
3013 	 * the value in the TIMERINT register, and then trigger an
3014 	 * interrupt. Each time we write to the TIMERCNT register,
3015 	 * the timer count is reset to 0.
3016 	 */
3017 	CSR_WRITE_4(sc, RL_TIMERCNT, 1);
3018 #endif
3019 
3020 	/*
3021 	 * Set a timeout in case the chip goes out to lunch.
3022 	 */
3023 	sc->rl_watchdog_timer = 5;
3024 }
3025 
3026 static void
re_set_jumbo(struct rl_softc * sc,int jumbo)3027 re_set_jumbo(struct rl_softc *sc, int jumbo)
3028 {
3029 
3030 	if (sc->rl_hwrev->rl_rev == RL_HWREV_8168E_VL) {
3031 		pci_set_max_read_req(sc->rl_dev, 4096);
3032 		return;
3033 	}
3034 
3035 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
3036 	if (jumbo != 0) {
3037 		CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) |
3038 		    RL_CFG3_JUMBO_EN0);
3039 		switch (sc->rl_hwrev->rl_rev) {
3040 		case RL_HWREV_8168DP:
3041 			break;
3042 		case RL_HWREV_8168E:
3043 			CSR_WRITE_1(sc, sc->rl_cfg4,
3044 			    CSR_READ_1(sc, sc->rl_cfg4) | 0x01);
3045 			break;
3046 		default:
3047 			CSR_WRITE_1(sc, sc->rl_cfg4,
3048 			    CSR_READ_1(sc, sc->rl_cfg4) | RL_CFG4_JUMBO_EN1);
3049 		}
3050 	} else {
3051 		CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) &
3052 		    ~RL_CFG3_JUMBO_EN0);
3053 		switch (sc->rl_hwrev->rl_rev) {
3054 		case RL_HWREV_8168DP:
3055 			break;
3056 		case RL_HWREV_8168E:
3057 			CSR_WRITE_1(sc, sc->rl_cfg4,
3058 			    CSR_READ_1(sc, sc->rl_cfg4) & ~0x01);
3059 			break;
3060 		default:
3061 			CSR_WRITE_1(sc, sc->rl_cfg4,
3062 			    CSR_READ_1(sc, sc->rl_cfg4) & ~RL_CFG4_JUMBO_EN1);
3063 		}
3064 	}
3065 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3066 
3067 	switch (sc->rl_hwrev->rl_rev) {
3068 	case RL_HWREV_8168DP:
3069 		pci_set_max_read_req(sc->rl_dev, 4096);
3070 		break;
3071 	default:
3072 		if (jumbo != 0)
3073 			pci_set_max_read_req(sc->rl_dev, 512);
3074 		else
3075 			pci_set_max_read_req(sc->rl_dev, 4096);
3076 	}
3077 }
3078 
3079 static void
re_init(void * xsc)3080 re_init(void *xsc)
3081 {
3082 	struct rl_softc		*sc = xsc;
3083 
3084 	RL_LOCK(sc);
3085 	re_init_locked(sc);
3086 	RL_UNLOCK(sc);
3087 }
3088 
3089 static void
re_init_locked(struct rl_softc * sc)3090 re_init_locked(struct rl_softc *sc)
3091 {
3092 	if_t ifp = sc->rl_ifp;
3093 	struct mii_data		*mii;
3094 	uint32_t		reg;
3095 	uint16_t		cfg;
3096 	uint32_t		idr[2];
3097 
3098 	RL_LOCK_ASSERT(sc);
3099 
3100 	mii = device_get_softc(sc->rl_miibus);
3101 
3102 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
3103 		return;
3104 
3105 	/*
3106 	 * Cancel pending I/O and free all RX/TX buffers.
3107 	 */
3108 	re_stop(sc);
3109 
3110 	/* Put controller into known state. */
3111 	re_reset(sc);
3112 
3113 	/*
3114 	 * For C+ mode, initialize the RX descriptors and mbufs.
3115 	 */
3116 	if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
3117 		if (if_getmtu(ifp) > RL_MTU) {
3118 			if (re_jrx_list_init(sc) != 0) {
3119 				device_printf(sc->rl_dev,
3120 				    "no memory for jumbo RX buffers\n");
3121 				re_stop(sc);
3122 				return;
3123 			}
3124 			/* Disable checksum offloading for jumbo frames. */
3125 			if_setcapenablebit(ifp, 0, (IFCAP_HWCSUM | IFCAP_TSO4));
3126 			if_sethwassistbits(ifp, 0, (RE_CSUM_FEATURES | CSUM_TSO));
3127 		} else {
3128 			if (re_rx_list_init(sc) != 0) {
3129 				device_printf(sc->rl_dev,
3130 				    "no memory for RX buffers\n");
3131 				re_stop(sc);
3132 				return;
3133 			}
3134 		}
3135 		re_set_jumbo(sc, if_getmtu(ifp) > RL_MTU);
3136 	} else {
3137 		if (re_rx_list_init(sc) != 0) {
3138 			device_printf(sc->rl_dev, "no memory for RX buffers\n");
3139 			re_stop(sc);
3140 			return;
3141 		}
3142 		if ((sc->rl_flags & RL_FLAG_PCIE) != 0 &&
3143 		    pci_get_device(sc->rl_dev) != RT_DEVICEID_8101E) {
3144 			if (if_getmtu(ifp) > RL_MTU)
3145 				pci_set_max_read_req(sc->rl_dev, 512);
3146 			else
3147 				pci_set_max_read_req(sc->rl_dev, 4096);
3148 		}
3149 	}
3150 	re_tx_list_init(sc);
3151 
3152 	/*
3153 	 * Enable C+ RX and TX mode, as well as VLAN stripping and
3154 	 * RX checksum offload. We must configure the C+ register
3155 	 * before all others.
3156 	 */
3157 	cfg = RL_CPLUSCMD_PCI_MRW;
3158 	if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
3159 		cfg |= RL_CPLUSCMD_RXCSUM_ENB;
3160 	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
3161 		cfg |= RL_CPLUSCMD_VLANSTRIP;
3162 	if ((sc->rl_flags & RL_FLAG_MACSTAT) != 0) {
3163 		cfg |= RL_CPLUSCMD_MACSTAT_DIS;
3164 		/* XXX magic. */
3165 		cfg |= 0x0001;
3166 	} else
3167 		cfg |= RL_CPLUSCMD_RXENB | RL_CPLUSCMD_TXENB;
3168 	CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg);
3169 	if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SC ||
3170 	    sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) {
3171 		reg = 0x000fff00;
3172 		if ((CSR_READ_1(sc, sc->rl_cfg2) & RL_CFG2_PCI66MHZ) != 0)
3173 			reg |= 0x000000ff;
3174 		if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE)
3175 			reg |= 0x00f00000;
3176 		CSR_WRITE_4(sc, 0x7c, reg);
3177 		/* Disable interrupt mitigation. */
3178 		CSR_WRITE_2(sc, 0xe2, 0);
3179 	}
3180 	/*
3181 	 * Disable TSO if interface MTU size is greater than MSS
3182 	 * allowed in controller.
3183 	 */
3184 	if (if_getmtu(ifp) > RL_TSO_MTU && (if_getcapenable(ifp) & IFCAP_TSO4) != 0) {
3185 		if_setcapenablebit(ifp, 0, IFCAP_TSO4);
3186 		if_sethwassistbits(ifp, 0, CSUM_TSO);
3187 	}
3188 
3189 	/*
3190 	 * Init our MAC address.  Even though the chipset
3191 	 * documentation doesn't mention it, we need to enter "Config
3192 	 * register write enable" mode to modify the ID registers.
3193 	 */
3194 	/* Copy MAC address on stack to align. */
3195 	bzero(idr, sizeof(idr));
3196 	bcopy(if_getlladdr(ifp), idr, ETHER_ADDR_LEN);
3197 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
3198 	CSR_WRITE_4(sc, RL_IDR0, htole32(idr[0]));
3199 	CSR_WRITE_4(sc, RL_IDR4, htole32(idr[1]));
3200 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3201 
3202 	/*
3203 	 * Load the addresses of the RX and TX lists into the chip.
3204 	 */
3205 
3206 	CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI,
3207 	    RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr));
3208 	CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO,
3209 	    RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr));
3210 
3211 	CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI,
3212 	    RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr));
3213 	CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO,
3214 	    RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr));
3215 
3216 	if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) {
3217 		/* Disable RXDV gate. */
3218 		CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) &
3219 		    ~0x00080000);
3220 	}
3221 
3222 	/*
3223 	 * Enable transmit and receive for pre-RTL8168G controllers.
3224 	 * RX/TX MACs should be enabled before RX/TX configuration.
3225 	 */
3226 	if ((sc->rl_flags & RL_FLAG_8168G_PLUS) == 0)
3227 		CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB | RL_CMD_RX_ENB);
3228 
3229 	/*
3230 	 * Set the initial TX configuration.
3231 	 */
3232 	if (sc->rl_testmode) {
3233 		if (sc->rl_type == RL_8169)
3234 			CSR_WRITE_4(sc, RL_TXCFG,
3235 			    RL_TXCFG_CONFIG|RL_LOOPTEST_ON);
3236 		else
3237 			CSR_WRITE_4(sc, RL_TXCFG,
3238 			    RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS);
3239 	} else
3240 		CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
3241 
3242 	CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16);
3243 
3244 	/*
3245 	 * Set the initial RX configuration.
3246 	 */
3247 	re_set_rxmode(sc);
3248 
3249 	/* Configure interrupt moderation. */
3250 	if (sc->rl_type == RL_8169) {
3251 		/* Magic from vendor. */
3252 		CSR_WRITE_2(sc, RL_INTRMOD, 0x5100);
3253 	}
3254 
3255 	/*
3256 	 * Enable transmit and receive for RTL8168G and later controllers.
3257 	 * RX/TX MACs should be enabled after RX/TX configuration.
3258 	 */
3259 	if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0)
3260 		CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB | RL_CMD_RX_ENB);
3261 
3262 #ifdef DEVICE_POLLING
3263 	/*
3264 	 * Disable interrupts if we are polling.
3265 	 */
3266 	if (if_getcapenable(ifp) & IFCAP_POLLING)
3267 		CSR_WRITE_2(sc, RL_IMR, 0);
3268 	else	/* otherwise ... */
3269 #endif
3270 
3271 	/*
3272 	 * Enable interrupts.
3273 	 */
3274 	if (sc->rl_testmode)
3275 		CSR_WRITE_2(sc, RL_IMR, 0);
3276 	else
3277 		CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
3278 	CSR_WRITE_2(sc, RL_ISR, RL_INTRS_CPLUS);
3279 
3280 	/* Set initial TX threshold */
3281 	sc->rl_txthresh = RL_TX_THRESH_INIT;
3282 
3283 	/* Start RX/TX process. */
3284 	CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
3285 
3286 	/*
3287 	 * Initialize the timer interrupt register so that
3288 	 * a timer interrupt will be generated once the timer
3289 	 * reaches a certain number of ticks. The timer is
3290 	 * reloaded on each transmit.
3291 	 */
3292 #ifdef RE_TX_MODERATION
3293 	/*
3294 	 * Use timer interrupt register to moderate TX interrupt
3295 	 * moderation, which dramatically improves TX frame rate.
3296 	 */
3297 	if (sc->rl_type == RL_8169)
3298 		CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800);
3299 	else
3300 		CSR_WRITE_4(sc, RL_TIMERINT, 0x400);
3301 #else
3302 	/*
3303 	 * Use timer interrupt register to moderate RX interrupt
3304 	 * moderation.
3305 	 */
3306 	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 &&
3307 	    intr_filter == 0) {
3308 		if (sc->rl_type == RL_8169)
3309 			CSR_WRITE_4(sc, RL_TIMERINT_8169,
3310 			    RL_USECS(sc->rl_int_rx_mod));
3311 	} else {
3312 		if (sc->rl_type == RL_8169)
3313 			CSR_WRITE_4(sc, RL_TIMERINT_8169, RL_USECS(0));
3314 	}
3315 #endif
3316 
3317 	/*
3318 	 * For 8169 gigE NICs, set the max allowed RX packet
3319 	 * size so we can receive jumbo frames.
3320 	 */
3321 	if (sc->rl_type == RL_8169) {
3322 		if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
3323 			/*
3324 			 * For controllers that use new jumbo frame scheme,
3325 			 * set maximum size of jumbo frame depending on
3326 			 * controller revisions.
3327 			 */
3328 			if (if_getmtu(ifp) > RL_MTU)
3329 				CSR_WRITE_2(sc, RL_MAXRXPKTLEN,
3330 				    sc->rl_hwrev->rl_max_mtu +
3331 				    ETHER_VLAN_ENCAP_LEN + ETHER_HDR_LEN +
3332 				    ETHER_CRC_LEN);
3333 			else
3334 				CSR_WRITE_2(sc, RL_MAXRXPKTLEN,
3335 				    RE_RX_DESC_BUFLEN);
3336 		} else if ((sc->rl_flags & RL_FLAG_PCIE) != 0 &&
3337 		    sc->rl_hwrev->rl_max_mtu == RL_MTU) {
3338 			/* RTL810x has no jumbo frame support. */
3339 			CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN);
3340 		} else
3341 			CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383);
3342 	}
3343 
3344 	if (sc->rl_testmode)
3345 		return;
3346 
3347 	CSR_WRITE_1(sc, sc->rl_cfg1, CSR_READ_1(sc, sc->rl_cfg1) |
3348 	    RL_CFG1_DRVLOAD);
3349 
3350 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
3351 	if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
3352 
3353 	sc->rl_flags &= ~RL_FLAG_LINK;
3354 	mii_mediachg(mii);
3355 
3356 	sc->rl_watchdog_timer = 0;
3357 	callout_reset(&sc->rl_stat_callout, hz, re_tick, sc);
3358 
3359 #ifdef DEV_NETMAP
3360 	netmap_enable_all_rings(ifp);
3361 #endif /* DEV_NETMAP */
3362 }
3363 
3364 /*
3365  * Set media options.
3366  */
3367 static int
re_ifmedia_upd(if_t ifp)3368 re_ifmedia_upd(if_t ifp)
3369 {
3370 	struct rl_softc		*sc;
3371 	struct mii_data		*mii;
3372 	int			error;
3373 
3374 	sc = if_getsoftc(ifp);
3375 	mii = device_get_softc(sc->rl_miibus);
3376 	RL_LOCK(sc);
3377 	error = mii_mediachg(mii);
3378 	RL_UNLOCK(sc);
3379 
3380 	return (error);
3381 }
3382 
3383 /*
3384  * Report current media status.
3385  */
3386 static void
re_ifmedia_sts(if_t ifp,struct ifmediareq * ifmr)3387 re_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
3388 {
3389 	struct rl_softc		*sc;
3390 	struct mii_data		*mii;
3391 
3392 	sc = if_getsoftc(ifp);
3393 	mii = device_get_softc(sc->rl_miibus);
3394 
3395 	RL_LOCK(sc);
3396 	mii_pollstat(mii);
3397 	ifmr->ifm_active = mii->mii_media_active;
3398 	ifmr->ifm_status = mii->mii_media_status;
3399 	RL_UNLOCK(sc);
3400 }
3401 
3402 static int
re_ioctl(if_t ifp,u_long command,caddr_t data)3403 re_ioctl(if_t ifp, u_long command, caddr_t data)
3404 {
3405 	struct rl_softc		*sc = if_getsoftc(ifp);
3406 	struct ifreq		*ifr = (struct ifreq *) data;
3407 	struct mii_data		*mii;
3408 	int			error = 0;
3409 
3410 	switch (command) {
3411 	case SIOCSIFMTU:
3412 		if (ifr->ifr_mtu < ETHERMIN ||
3413 		    ifr->ifr_mtu > sc->rl_hwrev->rl_max_mtu ||
3414 		    ((sc->rl_flags & RL_FLAG_FASTETHER) != 0 &&
3415 		    ifr->ifr_mtu > RL_MTU)) {
3416 			error = EINVAL;
3417 			break;
3418 		}
3419 		RL_LOCK(sc);
3420 		if (if_getmtu(ifp) != ifr->ifr_mtu) {
3421 			if_setmtu(ifp, ifr->ifr_mtu);
3422 			if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
3423 			    (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
3424 				if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
3425 				re_init_locked(sc);
3426 			}
3427 			if (if_getmtu(ifp) > RL_TSO_MTU &&
3428 			    (if_getcapenable(ifp) & IFCAP_TSO4) != 0) {
3429 				if_setcapenablebit(ifp, 0,
3430 				    IFCAP_TSO4 | IFCAP_VLAN_HWTSO);
3431 				if_sethwassistbits(ifp, 0, CSUM_TSO);
3432 			}
3433 			VLAN_CAPABILITIES(ifp);
3434 		}
3435 		RL_UNLOCK(sc);
3436 		break;
3437 	case SIOCSIFFLAGS:
3438 		RL_LOCK(sc);
3439 		if ((if_getflags(ifp) & IFF_UP) != 0) {
3440 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
3441 				if (((if_getflags(ifp) ^ sc->rl_if_flags)
3442 				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3443 					re_set_rxmode(sc);
3444 			} else
3445 				re_init_locked(sc);
3446 		} else {
3447 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
3448 				re_stop(sc);
3449 		}
3450 		sc->rl_if_flags = if_getflags(ifp);
3451 		RL_UNLOCK(sc);
3452 		break;
3453 	case SIOCADDMULTI:
3454 	case SIOCDELMULTI:
3455 		RL_LOCK(sc);
3456 		if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
3457 			re_set_rxmode(sc);
3458 		RL_UNLOCK(sc);
3459 		break;
3460 	case SIOCGIFMEDIA:
3461 	case SIOCSIFMEDIA:
3462 		mii = device_get_softc(sc->rl_miibus);
3463 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
3464 		break;
3465 	case SIOCSIFCAP:
3466 	    {
3467 		int mask, reinit;
3468 
3469 		mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
3470 		reinit = 0;
3471 #ifdef DEVICE_POLLING
3472 		if (mask & IFCAP_POLLING) {
3473 			if (ifr->ifr_reqcap & IFCAP_POLLING) {
3474 				error = ether_poll_register(re_poll, ifp);
3475 				if (error)
3476 					return (error);
3477 				RL_LOCK(sc);
3478 				/* Disable interrupts */
3479 				CSR_WRITE_2(sc, RL_IMR, 0x0000);
3480 				if_setcapenablebit(ifp, IFCAP_POLLING, 0);
3481 				RL_UNLOCK(sc);
3482 			} else {
3483 				error = ether_poll_deregister(ifp);
3484 				/* Enable interrupts. */
3485 				RL_LOCK(sc);
3486 				CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS);
3487 				if_setcapenablebit(ifp, 0, IFCAP_POLLING);
3488 				RL_UNLOCK(sc);
3489 			}
3490 		}
3491 #endif /* DEVICE_POLLING */
3492 		RL_LOCK(sc);
3493 		if ((mask & IFCAP_TXCSUM) != 0 &&
3494 		    (if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) {
3495 			if_togglecapenable(ifp, IFCAP_TXCSUM);
3496 			if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
3497 				if_sethwassistbits(ifp, RE_CSUM_FEATURES, 0);
3498 			else
3499 				if_sethwassistbits(ifp, 0, RE_CSUM_FEATURES);
3500 			reinit = 1;
3501 		}
3502 		if ((mask & IFCAP_RXCSUM) != 0 &&
3503 		    (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) {
3504 			if_togglecapenable(ifp, IFCAP_RXCSUM);
3505 			reinit = 1;
3506 		}
3507 		if ((mask & IFCAP_TSO4) != 0 &&
3508 		    (if_getcapabilities(ifp) & IFCAP_TSO4) != 0) {
3509 			if_togglecapenable(ifp, IFCAP_TSO4);
3510 			if ((IFCAP_TSO4 & if_getcapenable(ifp)) != 0)
3511 				if_sethwassistbits(ifp, CSUM_TSO, 0);
3512 			else
3513 				if_sethwassistbits(ifp, 0, CSUM_TSO);
3514 			if (if_getmtu(ifp) > RL_TSO_MTU &&
3515 			    (if_getcapenable(ifp) & IFCAP_TSO4) != 0) {
3516 				if_setcapenablebit(ifp, 0, IFCAP_TSO4);
3517 				if_sethwassistbits(ifp, 0, CSUM_TSO);
3518 			}
3519 		}
3520 		if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
3521 		    (if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0)
3522 			if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
3523 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
3524 		    (if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
3525 			if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
3526 			/* TSO over VLAN requires VLAN hardware tagging. */
3527 			if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0)
3528 				if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWTSO);
3529 			reinit = 1;
3530 		}
3531 		if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 &&
3532 		    (mask & (IFCAP_HWCSUM | IFCAP_TSO4 |
3533 		    IFCAP_VLAN_HWTSO)) != 0)
3534 				reinit = 1;
3535 		if ((mask & IFCAP_WOL) != 0 &&
3536 		    (if_getcapabilities(ifp) & IFCAP_WOL) != 0) {
3537 			if ((mask & IFCAP_WOL_UCAST) != 0)
3538 				if_togglecapenable(ifp, IFCAP_WOL_UCAST);
3539 			if ((mask & IFCAP_WOL_MCAST) != 0)
3540 				if_togglecapenable(ifp, IFCAP_WOL_MCAST);
3541 			if ((mask & IFCAP_WOL_MAGIC) != 0)
3542 				if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
3543 		}
3544 		if (reinit && if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
3545 			if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
3546 			re_init_locked(sc);
3547 		}
3548 		RL_UNLOCK(sc);
3549 		VLAN_CAPABILITIES(ifp);
3550 	    }
3551 		break;
3552 	default:
3553 		error = ether_ioctl(ifp, command, data);
3554 		break;
3555 	}
3556 
3557 	return (error);
3558 }
3559 
3560 static void
re_watchdog(struct rl_softc * sc)3561 re_watchdog(struct rl_softc *sc)
3562 {
3563 	if_t ifp;
3564 
3565 	RL_LOCK_ASSERT(sc);
3566 
3567 	if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer != 0)
3568 		return;
3569 
3570 	ifp = sc->rl_ifp;
3571 	re_txeof(sc);
3572 	if (sc->rl_ldata.rl_tx_free == sc->rl_ldata.rl_tx_desc_cnt) {
3573 		if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
3574 		    "-- recovering\n");
3575 		if (!if_sendq_empty(ifp))
3576 			re_start_locked(ifp);
3577 		return;
3578 	}
3579 
3580 	if_printf(ifp, "watchdog timeout\n");
3581 	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
3582 
3583 	re_rxeof(sc, NULL);
3584 	if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
3585 	re_init_locked(sc);
3586 	if (!if_sendq_empty(ifp))
3587 		re_start_locked(ifp);
3588 }
3589 
3590 /*
3591  * Stop the adapter and free any mbufs allocated to the
3592  * RX and TX lists.
3593  */
3594 static void
re_stop(struct rl_softc * sc)3595 re_stop(struct rl_softc *sc)
3596 {
3597 	int			i;
3598 	if_t ifp;
3599 	struct rl_txdesc	*txd;
3600 	struct rl_rxdesc	*rxd;
3601 
3602 	RL_LOCK_ASSERT(sc);
3603 
3604 	ifp = sc->rl_ifp;
3605 
3606 	sc->rl_watchdog_timer = 0;
3607 	callout_stop(&sc->rl_stat_callout);
3608 	if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
3609 
3610 #ifdef DEV_NETMAP
3611 	netmap_disable_all_rings(ifp);
3612 #endif /* DEV_NETMAP */
3613 
3614 	/*
3615 	 * Disable accepting frames to put RX MAC into idle state.
3616 	 * Otherwise it's possible to get frames while stop command
3617 	 * execution is in progress and controller can DMA the frame
3618 	 * to already freed RX buffer during that period.
3619 	 */
3620 	CSR_WRITE_4(sc, RL_RXCFG, CSR_READ_4(sc, RL_RXCFG) &
3621 	    ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_MULTI |
3622 	    RL_RXCFG_RX_BROAD));
3623 
3624 	if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) {
3625 		/* Enable RXDV gate. */
3626 		CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) |
3627 		    0x00080000);
3628 	}
3629 
3630 	if ((sc->rl_flags & RL_FLAG_WAIT_TXPOLL) != 0) {
3631 		for (i = RL_TIMEOUT; i > 0; i--) {
3632 			if ((CSR_READ_1(sc, sc->rl_txstart) &
3633 			    RL_TXSTART_START) == 0)
3634 				break;
3635 			DELAY(20);
3636 		}
3637 		if (i == 0)
3638 			device_printf(sc->rl_dev,
3639 			    "stopping TX poll timed out!\n");
3640 		CSR_WRITE_1(sc, RL_COMMAND, 0x00);
3641 	} else if ((sc->rl_flags & RL_FLAG_CMDSTOP) != 0) {
3642 		CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB |
3643 		    RL_CMD_RX_ENB);
3644 		if ((sc->rl_flags & RL_FLAG_CMDSTOP_WAIT_TXQ) != 0) {
3645 			for (i = RL_TIMEOUT; i > 0; i--) {
3646 				if ((CSR_READ_4(sc, RL_TXCFG) &
3647 				    RL_TXCFG_QUEUE_EMPTY) != 0)
3648 					break;
3649 				DELAY(100);
3650 			}
3651 			if (i == 0)
3652 				device_printf(sc->rl_dev,
3653 				   "stopping TXQ timed out!\n");
3654 		}
3655 	} else
3656 		CSR_WRITE_1(sc, RL_COMMAND, 0x00);
3657 	DELAY(1000);
3658 	CSR_WRITE_2(sc, RL_IMR, 0x0000);
3659 	CSR_WRITE_2(sc, RL_ISR, 0xFFFF);
3660 
3661 	if (sc->rl_head != NULL) {
3662 		m_freem(sc->rl_head);
3663 		sc->rl_head = sc->rl_tail = NULL;
3664 	}
3665 
3666 	/* Free the TX list buffers. */
3667 	for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) {
3668 		txd = &sc->rl_ldata.rl_tx_desc[i];
3669 		if (txd->tx_m != NULL) {
3670 			bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag,
3671 			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3672 			bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag,
3673 			    txd->tx_dmamap);
3674 			m_freem(txd->tx_m);
3675 			txd->tx_m = NULL;
3676 		}
3677 	}
3678 
3679 	/* Free the RX list buffers. */
3680 	for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
3681 		rxd = &sc->rl_ldata.rl_rx_desc[i];
3682 		if (rxd->rx_m != NULL) {
3683 			bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
3684 			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3685 			bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag,
3686 			    rxd->rx_dmamap);
3687 			m_freem(rxd->rx_m);
3688 			rxd->rx_m = NULL;
3689 		}
3690 	}
3691 
3692 	if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) {
3693 		for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) {
3694 			rxd = &sc->rl_ldata.rl_jrx_desc[i];
3695 			if (rxd->rx_m != NULL) {
3696 				bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag,
3697 				    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3698 				bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag,
3699 				    rxd->rx_dmamap);
3700 				m_freem(rxd->rx_m);
3701 				rxd->rx_m = NULL;
3702 			}
3703 		}
3704 	}
3705 }
3706 
3707 /*
3708  * Device suspend routine.  Stop the interface and save some PCI
3709  * settings in case the BIOS doesn't restore them properly on
3710  * resume.
3711  */
3712 static int
re_suspend(device_t dev)3713 re_suspend(device_t dev)
3714 {
3715 	struct rl_softc		*sc;
3716 
3717 	sc = device_get_softc(dev);
3718 
3719 	RL_LOCK(sc);
3720 	re_stop(sc);
3721 	re_setwol(sc);
3722 	sc->suspended = 1;
3723 	RL_UNLOCK(sc);
3724 
3725 	return (0);
3726 }
3727 
3728 /*
3729  * Device resume routine.  Restore some PCI settings in case the BIOS
3730  * doesn't, re-enable busmastering, and restart the interface if
3731  * appropriate.
3732  */
3733 static int
re_resume(device_t dev)3734 re_resume(device_t dev)
3735 {
3736 	struct rl_softc		*sc;
3737 	if_t ifp;
3738 
3739 	sc = device_get_softc(dev);
3740 
3741 	RL_LOCK(sc);
3742 
3743 	ifp = sc->rl_ifp;
3744 	/* Take controller out of sleep mode. */
3745 	if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
3746 		if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
3747 			CSR_WRITE_1(sc, RL_GPIO,
3748 			    CSR_READ_1(sc, RL_GPIO) | 0x01);
3749 	}
3750 
3751 	/*
3752 	 * Clear WOL matching such that normal Rx filtering
3753 	 * wouldn't interfere with WOL patterns.
3754 	 */
3755 	re_clrwol(sc);
3756 
3757 	/* reinitialize interface if necessary */
3758 	if (if_getflags(ifp) & IFF_UP)
3759 		re_init_locked(sc);
3760 
3761 	sc->suspended = 0;
3762 	RL_UNLOCK(sc);
3763 
3764 	return (0);
3765 }
3766 
3767 /*
3768  * Stop all chip I/O so that the kernel's probe routines don't
3769  * get confused by errant DMAs when rebooting.
3770  */
3771 static int
re_shutdown(device_t dev)3772 re_shutdown(device_t dev)
3773 {
3774 	struct rl_softc		*sc;
3775 
3776 	sc = device_get_softc(dev);
3777 
3778 	RL_LOCK(sc);
3779 	re_stop(sc);
3780 	/*
3781 	 * Mark interface as down since otherwise we will panic if
3782 	 * interrupt comes in later on, which can happen in some
3783 	 * cases.
3784 	 */
3785 	if_setflagbits(sc->rl_ifp, 0, IFF_UP);
3786 	re_setwol(sc);
3787 	RL_UNLOCK(sc);
3788 
3789 	return (0);
3790 }
3791 
3792 static void
re_set_linkspeed(struct rl_softc * sc)3793 re_set_linkspeed(struct rl_softc *sc)
3794 {
3795 	struct mii_softc *miisc;
3796 	struct mii_data *mii;
3797 	int aneg, i, phyno;
3798 
3799 	RL_LOCK_ASSERT(sc);
3800 
3801 	mii = device_get_softc(sc->rl_miibus);
3802 	mii_pollstat(mii);
3803 	aneg = 0;
3804 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
3805 	    (IFM_ACTIVE | IFM_AVALID)) {
3806 		switch IFM_SUBTYPE(mii->mii_media_active) {
3807 		case IFM_10_T:
3808 		case IFM_100_TX:
3809 			return;
3810 		case IFM_1000_T:
3811 			aneg++;
3812 			break;
3813 		default:
3814 			break;
3815 		}
3816 	}
3817 	miisc = LIST_FIRST(&mii->mii_phys);
3818 	phyno = miisc->mii_phy;
3819 	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3820 		PHY_RESET(miisc);
3821 	re_miibus_writereg(sc->rl_dev, phyno, MII_100T2CR, 0);
3822 	re_miibus_writereg(sc->rl_dev, phyno,
3823 	    MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
3824 	re_miibus_writereg(sc->rl_dev, phyno,
3825 	    MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG);
3826 	DELAY(1000);
3827 	if (aneg != 0) {
3828 		/*
3829 		 * Poll link state until re(4) get a 10/100Mbps link.
3830 		 */
3831 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
3832 			mii_pollstat(mii);
3833 			if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
3834 			    == (IFM_ACTIVE | IFM_AVALID)) {
3835 				switch (IFM_SUBTYPE(mii->mii_media_active)) {
3836 				case IFM_10_T:
3837 				case IFM_100_TX:
3838 					return;
3839 				default:
3840 					break;
3841 				}
3842 			}
3843 			RL_UNLOCK(sc);
3844 			pause("relnk", hz);
3845 			RL_LOCK(sc);
3846 		}
3847 		if (i == MII_ANEGTICKS_GIGE)
3848 			device_printf(sc->rl_dev,
3849 			    "establishing a link failed, WOL may not work!");
3850 	}
3851 	/*
3852 	 * No link, force MAC to have 100Mbps, full-duplex link.
3853 	 * MAC does not require reprogramming on resolved speed/duplex,
3854 	 * so this is just for completeness.
3855 	 */
3856 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
3857 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
3858 }
3859 
3860 static void
re_setwol(struct rl_softc * sc)3861 re_setwol(struct rl_softc *sc)
3862 {
3863 	if_t ifp;
3864 	int			pmc;
3865 	uint16_t		pmstat;
3866 	uint8_t			v;
3867 
3868 	RL_LOCK_ASSERT(sc);
3869 
3870 	if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
3871 		return;
3872 
3873 	ifp = sc->rl_ifp;
3874 	/* Put controller into sleep mode. */
3875 	if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
3876 		if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80)
3877 			CSR_WRITE_1(sc, RL_GPIO,
3878 			    CSR_READ_1(sc, RL_GPIO) & ~0x01);
3879 	}
3880 	if ((if_getcapenable(ifp) & IFCAP_WOL) != 0) {
3881 		if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) {
3882 			/* Disable RXDV gate. */
3883 			CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) &
3884 			    ~0x00080000);
3885 		}
3886 		re_set_rxmode(sc);
3887 		if ((sc->rl_flags & RL_FLAG_WOL_MANLINK) != 0)
3888 			re_set_linkspeed(sc);
3889 		if ((sc->rl_flags & RL_FLAG_WOLRXENB) != 0)
3890 			CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RX_ENB);
3891 	}
3892 	/* Enable config register write. */
3893 	CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
3894 
3895 	/* Enable PME. */
3896 	v = CSR_READ_1(sc, sc->rl_cfg1);
3897 	v &= ~RL_CFG1_PME;
3898 	if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
3899 		v |= RL_CFG1_PME;
3900 	CSR_WRITE_1(sc, sc->rl_cfg1, v);
3901 
3902 	v = CSR_READ_1(sc, sc->rl_cfg3);
3903 	v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
3904 	if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
3905 		v |= RL_CFG3_WOL_MAGIC;
3906 	CSR_WRITE_1(sc, sc->rl_cfg3, v);
3907 
3908 	v = CSR_READ_1(sc, sc->rl_cfg5);
3909 	v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST |
3910 	    RL_CFG5_WOL_LANWAKE);
3911 	if ((if_getcapenable(ifp) & IFCAP_WOL_UCAST) != 0)
3912 		v |= RL_CFG5_WOL_UCAST;
3913 	if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) != 0)
3914 		v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST;
3915 	if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
3916 		v |= RL_CFG5_WOL_LANWAKE;
3917 	CSR_WRITE_1(sc, sc->rl_cfg5, v);
3918 
3919 	/* Config register write done. */
3920 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3921 
3922 	if ((if_getcapenable(ifp) & IFCAP_WOL) == 0 &&
3923 	    (sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0)
3924 		CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) & ~0x80);
3925 	/*
3926 	 * It seems that hardware resets its link speed to 100Mbps in
3927 	 * power down mode so switching to 100Mbps in driver is not
3928 	 * needed.
3929 	 */
3930 
3931 	/* Request PME if WOL is requested. */
3932 	pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2);
3933 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
3934 	if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
3935 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3936 	pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
3937 }
3938 
3939 static void
re_clrwol(struct rl_softc * sc)3940 re_clrwol(struct rl_softc *sc)
3941 {
3942 	int			pmc;
3943 	uint8_t			v;
3944 
3945 	RL_LOCK_ASSERT(sc);
3946 
3947 	if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
3948 		return;
3949 
3950 	/* Enable config register write. */
3951 	CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
3952 
3953 	v = CSR_READ_1(sc, sc->rl_cfg3);
3954 	v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
3955 	CSR_WRITE_1(sc, sc->rl_cfg3, v);
3956 
3957 	/* Config register write done. */
3958 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
3959 
3960 	v = CSR_READ_1(sc, sc->rl_cfg5);
3961 	v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
3962 	v &= ~RL_CFG5_WOL_LANWAKE;
3963 	CSR_WRITE_1(sc, sc->rl_cfg5, v);
3964 }
3965 
3966 static void
re_add_sysctls(struct rl_softc * sc)3967 re_add_sysctls(struct rl_softc *sc)
3968 {
3969 	struct sysctl_ctx_list	*ctx;
3970 	struct sysctl_oid_list	*children;
3971 	int			error;
3972 
3973 	ctx = device_get_sysctl_ctx(sc->rl_dev);
3974 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev));
3975 
3976 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "stats",
3977 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
3978 	    re_sysctl_stats, "I", "Statistics Information");
3979 	if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0)
3980 		return;
3981 
3982 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "int_rx_mod",
3983 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
3984 	    &sc->rl_int_rx_mod, 0, sysctl_hw_re_int_mod, "I",
3985 	    "re RX interrupt moderation");
3986 	/* Pull in device tunables. */
3987 	sc->rl_int_rx_mod = RL_TIMER_DEFAULT;
3988 	error = resource_int_value(device_get_name(sc->rl_dev),
3989 	    device_get_unit(sc->rl_dev), "int_rx_mod", &sc->rl_int_rx_mod);
3990 	if (error == 0) {
3991 		if (sc->rl_int_rx_mod < RL_TIMER_MIN ||
3992 		    sc->rl_int_rx_mod > RL_TIMER_MAX) {
3993 			device_printf(sc->rl_dev, "int_rx_mod value out of "
3994 			    "range; using default: %d\n",
3995 			    RL_TIMER_DEFAULT);
3996 			sc->rl_int_rx_mod = RL_TIMER_DEFAULT;
3997 		}
3998 	}
3999 }
4000 
4001 static int
re_sysctl_stats(SYSCTL_HANDLER_ARGS)4002 re_sysctl_stats(SYSCTL_HANDLER_ARGS)
4003 {
4004 	struct rl_softc		*sc;
4005 	struct rl_stats		*stats;
4006 	int			error, i, result;
4007 
4008 	result = -1;
4009 	error = sysctl_handle_int(oidp, &result, 0, req);
4010 	if (error || req->newptr == NULL)
4011 		return (error);
4012 
4013 	if (result == 1) {
4014 		sc = (struct rl_softc *)arg1;
4015 		RL_LOCK(sc);
4016 		if ((if_getdrvflags(sc->rl_ifp) & IFF_DRV_RUNNING) == 0) {
4017 			RL_UNLOCK(sc);
4018 			goto done;
4019 		}
4020 		bus_dmamap_sync(sc->rl_ldata.rl_stag,
4021 		    sc->rl_ldata.rl_smap, BUS_DMASYNC_PREREAD);
4022 		CSR_WRITE_4(sc, RL_DUMPSTATS_HI,
4023 		    RL_ADDR_HI(sc->rl_ldata.rl_stats_addr));
4024 		CSR_WRITE_4(sc, RL_DUMPSTATS_LO,
4025 		    RL_ADDR_LO(sc->rl_ldata.rl_stats_addr));
4026 		CSR_WRITE_4(sc, RL_DUMPSTATS_LO,
4027 		    RL_ADDR_LO(sc->rl_ldata.rl_stats_addr |
4028 		    RL_DUMPSTATS_START));
4029 		for (i = RL_TIMEOUT; i > 0; i--) {
4030 			if ((CSR_READ_4(sc, RL_DUMPSTATS_LO) &
4031 			    RL_DUMPSTATS_START) == 0)
4032 				break;
4033 			DELAY(1000);
4034 		}
4035 		bus_dmamap_sync(sc->rl_ldata.rl_stag,
4036 		    sc->rl_ldata.rl_smap, BUS_DMASYNC_POSTREAD);
4037 		RL_UNLOCK(sc);
4038 		if (i == 0) {
4039 			device_printf(sc->rl_dev,
4040 			    "DUMP statistics request timed out\n");
4041 			return (ETIMEDOUT);
4042 		}
4043 done:
4044 		stats = sc->rl_ldata.rl_stats;
4045 		printf("%s statistics:\n", device_get_nameunit(sc->rl_dev));
4046 		printf("Tx frames : %ju\n",
4047 		    (uintmax_t)le64toh(stats->rl_tx_pkts));
4048 		printf("Rx frames : %ju\n",
4049 		    (uintmax_t)le64toh(stats->rl_rx_pkts));
4050 		printf("Tx errors : %ju\n",
4051 		    (uintmax_t)le64toh(stats->rl_tx_errs));
4052 		printf("Rx errors : %u\n",
4053 		    le32toh(stats->rl_rx_errs));
4054 		printf("Rx missed frames : %u\n",
4055 		    (uint32_t)le16toh(stats->rl_missed_pkts));
4056 		printf("Rx frame alignment errs : %u\n",
4057 		    (uint32_t)le16toh(stats->rl_rx_framealign_errs));
4058 		printf("Tx single collisions : %u\n",
4059 		    le32toh(stats->rl_tx_onecoll));
4060 		printf("Tx multiple collisions : %u\n",
4061 		    le32toh(stats->rl_tx_multicolls));
4062 		printf("Rx unicast frames : %ju\n",
4063 		    (uintmax_t)le64toh(stats->rl_rx_ucasts));
4064 		printf("Rx broadcast frames : %ju\n",
4065 		    (uintmax_t)le64toh(stats->rl_rx_bcasts));
4066 		printf("Rx multicast frames : %u\n",
4067 		    le32toh(stats->rl_rx_mcasts));
4068 		printf("Tx aborts : %u\n",
4069 		    (uint32_t)le16toh(stats->rl_tx_aborts));
4070 		printf("Tx underruns : %u\n",
4071 		    (uint32_t)le16toh(stats->rl_rx_underruns));
4072 	}
4073 
4074 	return (error);
4075 }
4076 
4077 static int
sysctl_int_range(SYSCTL_HANDLER_ARGS,int low,int high)4078 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
4079 {
4080 	int error, value;
4081 
4082 	if (arg1 == NULL)
4083 		return (EINVAL);
4084 	value = *(int *)arg1;
4085 	error = sysctl_handle_int(oidp, &value, 0, req);
4086 	if (error || req->newptr == NULL)
4087 		return (error);
4088 	if (value < low || value > high)
4089 		return (EINVAL);
4090 	*(int *)arg1 = value;
4091 
4092 	return (0);
4093 }
4094 
4095 static int
sysctl_hw_re_int_mod(SYSCTL_HANDLER_ARGS)4096 sysctl_hw_re_int_mod(SYSCTL_HANDLER_ARGS)
4097 {
4098 
4099 	return (sysctl_int_range(oidp, arg1, arg2, req, RL_TIMER_MIN,
4100 	    RL_TIMER_MAX));
4101 }
4102 
4103 #ifdef DEBUGNET
4104 static void
re_debugnet_init(if_t ifp,int * nrxr,int * ncl,int * clsize)4105 re_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize)
4106 {
4107 	struct rl_softc *sc;
4108 
4109 	sc = if_getsoftc(ifp);
4110 	RL_LOCK(sc);
4111 	*nrxr = sc->rl_ldata.rl_rx_desc_cnt;
4112 	*ncl = DEBUGNET_MAX_IN_FLIGHT;
4113 	*clsize = (if_getmtu(ifp) > RL_MTU &&
4114 	    (sc->rl_flags & RL_FLAG_JUMBOV2) != 0) ? MJUM9BYTES : MCLBYTES;
4115 	RL_UNLOCK(sc);
4116 }
4117 
4118 static void
re_debugnet_event(if_t ifp __unused,enum debugnet_ev event __unused)4119 re_debugnet_event(if_t ifp __unused, enum debugnet_ev event __unused)
4120 {
4121 }
4122 
4123 static int
re_debugnet_transmit(if_t ifp,struct mbuf * m)4124 re_debugnet_transmit(if_t ifp, struct mbuf *m)
4125 {
4126 	struct rl_softc *sc;
4127 	int error;
4128 
4129 	sc = if_getsoftc(ifp);
4130 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
4131 	    IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0)
4132 		return (EBUSY);
4133 
4134 	error = re_encap(sc, &m);
4135 	if (error == 0)
4136 		re_start_tx(sc);
4137 	return (error);
4138 }
4139 
4140 static int
re_debugnet_poll(if_t ifp,int count)4141 re_debugnet_poll(if_t ifp, int count)
4142 {
4143 	struct rl_softc *sc;
4144 	int error;
4145 
4146 	sc = if_getsoftc(ifp);
4147 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 ||
4148 	    (sc->rl_flags & RL_FLAG_LINK) == 0)
4149 		return (EBUSY);
4150 
4151 	re_txeof(sc);
4152 	error = re_rxeof(sc, NULL);
4153 	if (error != 0 && error != EAGAIN)
4154 		return (error);
4155 	return (0);
4156 }
4157 #endif /* DEBUGNET */
4158