xref: /freebsd/sys/dev/rl/if_rl.c (revision dd21556857e8d40f66bf5ad54754d9d52669ebf7)
1 /*-
2  * Copyright (c) 1997, 1998
3  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 /*
35  * RealTek 8129/8139 PCI NIC driver
36  *
37  * Supports several extremely cheap PCI 10/100 adapters based on
38  * the RealTek chipset. Datasheets can be obtained from
39  * www.realtek.com.tw.
40  *
41  * Written by Bill Paul <wpaul@ctr.columbia.edu>
42  * Electrical Engineering Department
43  * Columbia University, New York City
44  */
45 /*
46  * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is
47  * probably the worst PCI ethernet controller ever made, with the possible
48  * exception of the FEAST chip made by SMC. The 8139 supports bus-master
49  * DMA, but it has a terrible interface that nullifies any performance
50  * gains that bus-master DMA usually offers.
51  *
52  * For transmission, the chip offers a series of four TX descriptor
53  * registers. Each transmit frame must be in a contiguous buffer, aligned
54  * on a longword (32-bit) boundary. This means we almost always have to
55  * do mbuf copies in order to transmit a frame, except in the unlikely
56  * case where a) the packet fits into a single mbuf, and b) the packet
57  * is 32-bit aligned within the mbuf's data area. The presence of only
58  * four descriptor registers means that we can never have more than four
59  * packets queued for transmission at any one time.
60  *
61  * Reception is not much better. The driver has to allocate a single large
62  * buffer area (up to 64K in size) into which the chip will DMA received
63  * frames. Because we don't know where within this region received packets
64  * will begin or end, we have no choice but to copy data from the buffer
65  * area into mbufs in order to pass the packets up to the higher protocol
66  * levels.
67  *
68  * It's impossible given this rotten design to really achieve decent
69  * performance at 100Mbps, unless you happen to have a 400Mhz PII or
70  * some equally overmuscled CPU to drive it.
71  *
72  * On the bright side, the 8139 does have a built-in PHY, although
73  * rather than using an MDIO serial interface like most other NICs, the
74  * PHY registers are directly accessible through the 8139's register
75  * space. The 8139 supports autonegotiation, as well as a 64-bit multicast
76  * filter.
77  *
78  * The 8129 chip is an older version of the 8139 that uses an external PHY
79  * chip. The 8129 has a serial MDIO interface for accessing the MII where
80  * the 8139 lets you directly access the on-board PHY registers. We need
81  * to select which interface to use depending on the chip type.
82  */
83 
84 #ifdef HAVE_KERNEL_OPTION_HEADERS
85 #include "opt_device_polling.h"
86 #endif
87 
88 #include <sys/param.h>
89 #include <sys/endian.h>
90 #include <sys/systm.h>
91 #include <sys/sockio.h>
92 #include <sys/mbuf.h>
93 #include <sys/malloc.h>
94 #include <sys/kernel.h>
95 #include <sys/module.h>
96 #include <sys/socket.h>
97 #include <sys/sysctl.h>
98 
99 #include <net/if.h>
100 #include <net/if_var.h>
101 #include <net/if_arp.h>
102 #include <net/ethernet.h>
103 #include <net/if_dl.h>
104 #include <net/if_media.h>
105 #include <net/if_types.h>
106 
107 #include <net/bpf.h>
108 
109 #include <machine/bus.h>
110 #include <machine/resource.h>
111 #include <sys/bus.h>
112 #include <sys/rman.h>
113 
114 #include <dev/mii/mii.h>
115 #include <dev/mii/mii_bitbang.h>
116 #include <dev/mii/miivar.h>
117 
118 #include <dev/pci/pcireg.h>
119 #include <dev/pci/pcivar.h>
120 
121 MODULE_DEPEND(rl, pci, 1, 1, 1);
122 MODULE_DEPEND(rl, ether, 1, 1, 1);
123 MODULE_DEPEND(rl, miibus, 1, 1, 1);
124 
125 /* "device miibus" required.  See GENERIC if you get errors here. */
126 #include "miibus_if.h"
127 
128 #include <dev/rl/if_rlreg.h>
129 
130 /*
131  * Various supported device vendors/types and their names.
132  */
133 static const struct rl_type rl_devs[] = {
134 	{ RT_VENDORID, RT_DEVICEID_8129, RL_8129,
135 		"RealTek 8129 10/100BaseTX" },
136 	{ RT_VENDORID, RT_DEVICEID_8139, RL_8139,
137 		"RealTek 8139 10/100BaseTX" },
138 	{ RT_VENDORID, RT_DEVICEID_8139D, RL_8139,
139 		"RealTek 8139 10/100BaseTX" },
140 	{ RT_VENDORID, RT_DEVICEID_8138, RL_8139,
141 		"RealTek 8139 10/100BaseTX CardBus" },
142 	{ RT_VENDORID, RT_DEVICEID_8100, RL_8139,
143 		"RealTek 8100 10/100BaseTX" },
144 	{ ACCTON_VENDORID, ACCTON_DEVICEID_5030, RL_8139,
145 		"Accton MPX 5030/5038 10/100BaseTX" },
146 	{ DELTA_VENDORID, DELTA_DEVICEID_8139, RL_8139,
147 		"Delta Electronics 8139 10/100BaseTX" },
148 	{ ADDTRON_VENDORID, ADDTRON_DEVICEID_8139, RL_8139,
149 		"Addtron Technology 8139 10/100BaseTX" },
150 	{ DLINK_VENDORID, DLINK_DEVICEID_520TX_REVC1, RL_8139,
151 		"D-Link DFE-520TX (rev. C1) 10/100BaseTX" },
152 	{ DLINK_VENDORID, DLINK_DEVICEID_530TXPLUS, RL_8139,
153 		"D-Link DFE-530TX+ 10/100BaseTX" },
154 	{ DLINK_VENDORID, DLINK_DEVICEID_690TXD, RL_8139,
155 		"D-Link DFE-690TXD 10/100BaseTX" },
156 	{ NORTEL_VENDORID, ACCTON_DEVICEID_5030, RL_8139,
157 		"Nortel Networks 10/100BaseTX" },
158 	{ COREGA_VENDORID, COREGA_DEVICEID_FETHERCBTXD, RL_8139,
159 		"Corega FEther CB-TXD" },
160 	{ COREGA_VENDORID, COREGA_DEVICEID_FETHERIICBTXD, RL_8139,
161 		"Corega FEtherII CB-TXD" },
162 	{ PEPPERCON_VENDORID, PEPPERCON_DEVICEID_ROLF, RL_8139,
163 		"Peppercon AG ROL-F" },
164 	{ PLANEX_VENDORID, PLANEX_DEVICEID_FNW3603TX, RL_8139,
165 		"Planex FNW-3603-TX" },
166 	{ PLANEX_VENDORID, PLANEX_DEVICEID_FNW3800TX, RL_8139,
167 		"Planex FNW-3800-TX" },
168 	{ CP_VENDORID, RT_DEVICEID_8139, RL_8139,
169 		"Compaq HNE-300" },
170 	{ LEVEL1_VENDORID, LEVEL1_DEVICEID_FPC0106TX, RL_8139,
171 		"LevelOne FPC-0106TX" },
172 	{ EDIMAX_VENDORID, EDIMAX_DEVICEID_EP4103DL, RL_8139,
173 		"Edimax EP-4103DL CardBus" }
174 };
175 
176 static int rl_attach(device_t);
177 static int rl_detach(device_t);
178 static void rl_dmamap_cb(void *, bus_dma_segment_t *, int, int);
179 static int rl_dma_alloc(struct rl_softc *);
180 static void rl_dma_free(struct rl_softc *);
181 static void rl_eeprom_putbyte(struct rl_softc *, int);
182 static void rl_eeprom_getword(struct rl_softc *, int, uint16_t *);
183 static int rl_encap(struct rl_softc *, struct mbuf **);
184 static int rl_list_tx_init(struct rl_softc *);
185 static int rl_list_rx_init(struct rl_softc *);
186 static int rl_ifmedia_upd(if_t);
187 static void rl_ifmedia_sts(if_t, struct ifmediareq *);
188 static int rl_ioctl(if_t, u_long, caddr_t);
189 static void rl_intr(void *);
190 static void rl_init(void *);
191 static void rl_init_locked(struct rl_softc *sc);
192 static int rl_miibus_readreg(device_t, int, int);
193 static void rl_miibus_statchg(device_t);
194 static int rl_miibus_writereg(device_t, int, int, int);
195 #ifdef DEVICE_POLLING
196 static int rl_poll(if_t ifp, enum poll_cmd cmd, int count);
197 static int rl_poll_locked(if_t ifp, enum poll_cmd cmd, int count);
198 #endif
199 static int rl_probe(device_t);
200 static void rl_read_eeprom(struct rl_softc *, uint8_t *, int, int, int);
201 static void rl_reset(struct rl_softc *);
202 static int rl_resume(device_t);
203 static int rl_rxeof(struct rl_softc *);
204 static void rl_rxfilter(struct rl_softc *);
205 static int rl_shutdown(device_t);
206 static void rl_start(if_t);
207 static void rl_start_locked(if_t);
208 static void rl_stop(struct rl_softc *);
209 static int rl_suspend(device_t);
210 static void rl_tick(void *);
211 static void rl_txeof(struct rl_softc *);
212 static void rl_watchdog(struct rl_softc *);
213 static void rl_setwol(struct rl_softc *);
214 static void rl_clrwol(struct rl_softc *);
215 
216 /*
217  * MII bit-bang glue
218  */
219 static uint32_t rl_mii_bitbang_read(device_t);
220 static void rl_mii_bitbang_write(device_t, uint32_t);
221 
222 static const struct mii_bitbang_ops rl_mii_bitbang_ops = {
223 	rl_mii_bitbang_read,
224 	rl_mii_bitbang_write,
225 	{
226 		RL_MII_DATAOUT,	/* MII_BIT_MDO */
227 		RL_MII_DATAIN,	/* MII_BIT_MDI */
228 		RL_MII_CLK,	/* MII_BIT_MDC */
229 		RL_MII_DIR,	/* MII_BIT_DIR_HOST_PHY */
230 		0,		/* MII_BIT_DIR_PHY_HOST */
231 	}
232 };
233 
234 static device_method_t rl_methods[] = {
235 	/* Device interface */
236 	DEVMETHOD(device_probe,		rl_probe),
237 	DEVMETHOD(device_attach,	rl_attach),
238 	DEVMETHOD(device_detach,	rl_detach),
239 	DEVMETHOD(device_suspend,	rl_suspend),
240 	DEVMETHOD(device_resume,	rl_resume),
241 	DEVMETHOD(device_shutdown,	rl_shutdown),
242 
243 	/* MII interface */
244 	DEVMETHOD(miibus_readreg,	rl_miibus_readreg),
245 	DEVMETHOD(miibus_writereg,	rl_miibus_writereg),
246 	DEVMETHOD(miibus_statchg,	rl_miibus_statchg),
247 
248 	DEVMETHOD_END
249 };
250 
251 static driver_t rl_driver = {
252 	"rl",
253 	rl_methods,
254 	sizeof(struct rl_softc)
255 };
256 
257 DRIVER_MODULE(rl, pci, rl_driver, 0, 0);
258 MODULE_PNP_INFO("U16:vendor;U16:device", pci, rl, rl_devs,
259     nitems(rl_devs) - 1);
260 DRIVER_MODULE(rl, cardbus, rl_driver, 0, 0);
261 DRIVER_MODULE(miibus, rl, miibus_driver, 0, 0);
262 
263 #define EE_SET(x)					\
264 	CSR_WRITE_1(sc, RL_EECMD,			\
265 		CSR_READ_1(sc, RL_EECMD) | x)
266 
267 #define EE_CLR(x)					\
268 	CSR_WRITE_1(sc, RL_EECMD,			\
269 		CSR_READ_1(sc, RL_EECMD) & ~x)
270 
271 /*
272  * Send a read command and address to the EEPROM, check for ACK.
273  */
274 static void
275 rl_eeprom_putbyte(struct rl_softc *sc, int addr)
276 {
277 	int			d, i;
278 
279 	d = addr | sc->rl_eecmd_read;
280 
281 	/*
282 	 * Feed in each bit and strobe the clock.
283 	 */
284 	for (i = 0x400; i; i >>= 1) {
285 		if (d & i) {
286 			EE_SET(RL_EE_DATAIN);
287 		} else {
288 			EE_CLR(RL_EE_DATAIN);
289 		}
290 		DELAY(100);
291 		EE_SET(RL_EE_CLK);
292 		DELAY(150);
293 		EE_CLR(RL_EE_CLK);
294 		DELAY(100);
295 	}
296 }
297 
298 /*
299  * Read a word of data stored in the EEPROM at address 'addr.'
300  */
301 static void
302 rl_eeprom_getword(struct rl_softc *sc, int addr, uint16_t *dest)
303 {
304 	int			i;
305 	uint16_t		word = 0;
306 
307 	/* Enter EEPROM access mode. */
308 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
309 
310 	/*
311 	 * Send address of word we want to read.
312 	 */
313 	rl_eeprom_putbyte(sc, addr);
314 
315 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL);
316 
317 	/*
318 	 * Start reading bits from EEPROM.
319 	 */
320 	for (i = 0x8000; i; i >>= 1) {
321 		EE_SET(RL_EE_CLK);
322 		DELAY(100);
323 		if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT)
324 			word |= i;
325 		EE_CLR(RL_EE_CLK);
326 		DELAY(100);
327 	}
328 
329 	/* Turn off EEPROM access mode. */
330 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
331 
332 	*dest = word;
333 }
334 
335 /*
336  * Read a sequence of words from the EEPROM.
337  */
338 static void
339 rl_read_eeprom(struct rl_softc *sc, uint8_t *dest, int off, int cnt, int swap)
340 {
341 	int			i;
342 	uint16_t		word = 0, *ptr;
343 
344 	for (i = 0; i < cnt; i++) {
345 		rl_eeprom_getword(sc, off + i, &word);
346 		ptr = (uint16_t *)(dest + (i * 2));
347 		if (swap)
348 			*ptr = ntohs(word);
349 		else
350 			*ptr = word;
351 	}
352 }
353 
354 /*
355  * Read the MII serial port for the MII bit-bang module.
356  */
357 static uint32_t
358 rl_mii_bitbang_read(device_t dev)
359 {
360 	struct rl_softc *sc;
361 	uint32_t val;
362 
363 	sc = device_get_softc(dev);
364 
365 	val = CSR_READ_1(sc, RL_MII);
366 	CSR_BARRIER(sc, RL_MII, 1,
367 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
368 
369 	return (val);
370 }
371 
372 /*
373  * Write the MII serial port for the MII bit-bang module.
374  */
375 static void
376 rl_mii_bitbang_write(device_t dev, uint32_t val)
377 {
378 	struct rl_softc *sc;
379 
380 	sc = device_get_softc(dev);
381 
382 	CSR_WRITE_1(sc, RL_MII, val);
383 	CSR_BARRIER(sc, RL_MII, 1,
384 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
385 }
386 
387 static int
388 rl_miibus_readreg(device_t dev, int phy, int reg)
389 {
390 	struct rl_softc		*sc;
391 	uint16_t		rl8139_reg;
392 
393 	sc = device_get_softc(dev);
394 
395 	if (sc->rl_type == RL_8139) {
396 		switch (reg) {
397 		case MII_BMCR:
398 			rl8139_reg = RL_BMCR;
399 			break;
400 		case MII_BMSR:
401 			rl8139_reg = RL_BMSR;
402 			break;
403 		case MII_ANAR:
404 			rl8139_reg = RL_ANAR;
405 			break;
406 		case MII_ANER:
407 			rl8139_reg = RL_ANER;
408 			break;
409 		case MII_ANLPAR:
410 			rl8139_reg = RL_LPAR;
411 			break;
412 		case MII_PHYIDR1:
413 		case MII_PHYIDR2:
414 			return (0);
415 		/*
416 		 * Allow the rlphy driver to read the media status
417 		 * register. If we have a link partner which does not
418 		 * support NWAY, this is the register which will tell
419 		 * us the results of parallel detection.
420 		 */
421 		case RL_MEDIASTAT:
422 			return (CSR_READ_1(sc, RL_MEDIASTAT));
423 		default:
424 			device_printf(sc->rl_dev, "bad phy register\n");
425 			return (0);
426 		}
427 		return (CSR_READ_2(sc, rl8139_reg));
428 	}
429 
430 	return (mii_bitbang_readreg(dev, &rl_mii_bitbang_ops, phy, reg));
431 }
432 
433 static int
434 rl_miibus_writereg(device_t dev, int phy, int reg, int data)
435 {
436 	struct rl_softc		*sc;
437 	uint16_t		rl8139_reg;
438 
439 	sc = device_get_softc(dev);
440 
441 	if (sc->rl_type == RL_8139) {
442 		switch (reg) {
443 		case MII_BMCR:
444 			rl8139_reg = RL_BMCR;
445 			break;
446 		case MII_BMSR:
447 			rl8139_reg = RL_BMSR;
448 			break;
449 		case MII_ANAR:
450 			rl8139_reg = RL_ANAR;
451 			break;
452 		case MII_ANER:
453 			rl8139_reg = RL_ANER;
454 			break;
455 		case MII_ANLPAR:
456 			rl8139_reg = RL_LPAR;
457 			break;
458 		case MII_PHYIDR1:
459 		case MII_PHYIDR2:
460 			return (0);
461 			break;
462 		default:
463 			device_printf(sc->rl_dev, "bad phy register\n");
464 			return (0);
465 		}
466 		CSR_WRITE_2(sc, rl8139_reg, data);
467 		return (0);
468 	}
469 
470 	mii_bitbang_writereg(dev, &rl_mii_bitbang_ops, phy, reg, data);
471 
472 	return (0);
473 }
474 
475 static void
476 rl_miibus_statchg(device_t dev)
477 {
478 	struct rl_softc		*sc;
479 	if_t			ifp;
480 	struct mii_data		*mii;
481 
482 	sc = device_get_softc(dev);
483 	mii = device_get_softc(sc->rl_miibus);
484 	ifp = sc->rl_ifp;
485 	if (mii == NULL || ifp == NULL ||
486 	    (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
487 		return;
488 
489 	sc->rl_flags &= ~RL_FLAG_LINK;
490 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
491 	    (IFM_ACTIVE | IFM_AVALID)) {
492 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
493 		case IFM_10_T:
494 		case IFM_100_TX:
495 			sc->rl_flags |= RL_FLAG_LINK;
496 			break;
497 		default:
498 			break;
499 		}
500 	}
501 	/*
502 	 * RealTek controllers do not provide any interface to
503 	 * Tx/Rx MACs for resolved speed, duplex and flow-control
504 	 * parameters.
505 	 */
506 }
507 
508 static u_int
509 rl_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
510 {
511 	uint32_t *hashes = arg;
512 	int h;
513 
514 	h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26;
515 	if (h < 32)
516 		hashes[0] |= (1 << h);
517 	else
518 		hashes[1] |= (1 << (h - 32));
519 
520 	return (1);
521 }
522 
523 /*
524  * Program the 64-bit multicast hash filter.
525  */
526 static void
527 rl_rxfilter(struct rl_softc *sc)
528 {
529 	if_t			ifp = sc->rl_ifp;
530 	uint32_t		hashes[2] = { 0, 0 };
531 	uint32_t		rxfilt;
532 
533 	RL_LOCK_ASSERT(sc);
534 
535 	rxfilt = CSR_READ_4(sc, RL_RXCFG);
536 	rxfilt &= ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD |
537 	    RL_RXCFG_RX_MULTI);
538 	/* Always accept frames destined for this host. */
539 	rxfilt |= RL_RXCFG_RX_INDIV;
540 	/* Set capture broadcast bit to capture broadcast frames. */
541 	if (if_getflags(ifp) & IFF_BROADCAST)
542 		rxfilt |= RL_RXCFG_RX_BROAD;
543 	if (if_getflags(ifp) & IFF_ALLMULTI || if_getflags(ifp) & IFF_PROMISC) {
544 		rxfilt |= RL_RXCFG_RX_MULTI;
545 		if (if_getflags(ifp) & IFF_PROMISC)
546 			rxfilt |= RL_RXCFG_RX_ALLPHYS;
547 		hashes[0] = 0xFFFFFFFF;
548 		hashes[1] = 0xFFFFFFFF;
549 	} else {
550 		/* Now program new ones. */
551 		if_foreach_llmaddr(ifp, rl_hash_maddr, hashes);
552 		if (hashes[0] != 0 || hashes[1] != 0)
553 			rxfilt |= RL_RXCFG_RX_MULTI;
554 	}
555 
556 	CSR_WRITE_4(sc, RL_MAR0, hashes[0]);
557 	CSR_WRITE_4(sc, RL_MAR4, hashes[1]);
558 	CSR_WRITE_4(sc, RL_RXCFG, rxfilt);
559 }
560 
561 static void
562 rl_reset(struct rl_softc *sc)
563 {
564 	int			i;
565 
566 	RL_LOCK_ASSERT(sc);
567 
568 	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET);
569 
570 	for (i = 0; i < RL_TIMEOUT; i++) {
571 		DELAY(10);
572 		if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET))
573 			break;
574 	}
575 	if (i == RL_TIMEOUT)
576 		device_printf(sc->rl_dev, "reset never completed!\n");
577 }
578 
579 /*
580  * Probe for a RealTek 8129/8139 chip. Check the PCI vendor and device
581  * IDs against our list and return a device name if we find a match.
582  */
583 static int
584 rl_probe(device_t dev)
585 {
586 	const struct rl_type	*t;
587 	uint16_t		devid, revid, vendor;
588 	int			i;
589 
590 	vendor = pci_get_vendor(dev);
591 	devid = pci_get_device(dev);
592 	revid = pci_get_revid(dev);
593 
594 	if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) {
595 		if (revid == 0x20) {
596 			/* 8139C+, let re(4) take care of this device. */
597 			return (ENXIO);
598 		}
599 	}
600 	t = rl_devs;
601 	for (i = 0; i < nitems(rl_devs); i++, t++) {
602 		if (vendor == t->rl_vid && devid == t->rl_did) {
603 			device_set_desc(dev, t->rl_name);
604 			return (BUS_PROBE_DEFAULT);
605 		}
606 	}
607 
608 	return (ENXIO);
609 }
610 
611 struct rl_dmamap_arg {
612 	bus_addr_t	rl_busaddr;
613 };
614 
615 static void
616 rl_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
617 {
618 	struct rl_dmamap_arg	*ctx;
619 
620 	if (error != 0)
621 		return;
622 
623 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
624 
625         ctx = (struct rl_dmamap_arg *)arg;
626         ctx->rl_busaddr = segs[0].ds_addr;
627 }
628 
629 /*
630  * Attach the interface. Allocate softc structures, do ifmedia
631  * setup and ethernet/BPF attach.
632  */
633 static int
634 rl_attach(device_t dev)
635 {
636 	uint8_t			eaddr[ETHER_ADDR_LEN];
637 	uint16_t		as[3];
638 	if_t			ifp;
639 	struct rl_softc		*sc;
640 	const struct rl_type	*t;
641 	struct sysctl_ctx_list	*ctx;
642 	struct sysctl_oid_list	*children;
643 	int			error = 0, hwrev, i, phy, pmc, rid;
644 	int			prefer_iomap, unit;
645 	uint16_t		rl_did = 0;
646 	char			tn[32];
647 
648 	sc = device_get_softc(dev);
649 	unit = device_get_unit(dev);
650 	sc->rl_dev = dev;
651 
652 	sc->rl_twister_enable = 0;
653 	snprintf(tn, sizeof(tn), "dev.rl.%d.twister_enable", unit);
654 	TUNABLE_INT_FETCH(tn, &sc->rl_twister_enable);
655 	ctx = device_get_sysctl_ctx(sc->rl_dev);
656 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev));
657 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "twister_enable", CTLFLAG_RD,
658 	   &sc->rl_twister_enable, 0, "");
659 
660 	mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
661 	    MTX_DEF);
662 	callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0);
663 
664 	pci_enable_busmaster(dev);
665 
666 	/*
667 	 * Map control/status registers.
668 	 * Default to using PIO access for this driver. On SMP systems,
669 	 * there appear to be problems with memory mapped mode: it looks
670 	 * like doing too many memory mapped access back to back in rapid
671 	 * succession can hang the bus. I'm inclined to blame this on
672 	 * crummy design/construction on the part of RealTek. Memory
673 	 * mapped mode does appear to work on uniprocessor systems though.
674 	 */
675 	prefer_iomap = 1;
676 	snprintf(tn, sizeof(tn), "dev.rl.%d.prefer_iomap", unit);
677 	TUNABLE_INT_FETCH(tn, &prefer_iomap);
678 	if (prefer_iomap) {
679 		sc->rl_res_id = PCIR_BAR(0);
680 		sc->rl_res_type = SYS_RES_IOPORT;
681 		sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
682 		    &sc->rl_res_id, RF_ACTIVE);
683 	}
684 	if (prefer_iomap == 0 || sc->rl_res == NULL) {
685 		sc->rl_res_id = PCIR_BAR(1);
686 		sc->rl_res_type = SYS_RES_MEMORY;
687 		sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type,
688 		    &sc->rl_res_id, RF_ACTIVE);
689 	}
690 	if (sc->rl_res == NULL) {
691 		device_printf(dev, "couldn't map ports/memory\n");
692 		error = ENXIO;
693 		goto fail;
694 	}
695 
696 #ifdef notdef
697 	/*
698 	 * Detect the Realtek 8139B. For some reason, this chip is very
699 	 * unstable when left to autoselect the media
700 	 * The best workaround is to set the device to the required
701 	 * media type or to set it to the 10 Meg speed.
702 	 */
703 	if ((rman_get_end(sc->rl_res) - rman_get_start(sc->rl_res)) == 0xFF)
704 		device_printf(dev,
705 "Realtek 8139B detected. Warning, this may be unstable in autoselect mode\n");
706 #endif
707 
708 	sc->rl_btag = rman_get_bustag(sc->rl_res);
709 	sc->rl_bhandle = rman_get_bushandle(sc->rl_res);
710 
711 	/* Allocate interrupt */
712 	rid = 0;
713 	sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
714 	    RF_SHAREABLE | RF_ACTIVE);
715 
716 	if (sc->rl_irq[0] == NULL) {
717 		device_printf(dev, "couldn't map interrupt\n");
718 		error = ENXIO;
719 		goto fail;
720 	}
721 
722 	sc->rl_cfg0 = RL_8139_CFG0;
723 	sc->rl_cfg1 = RL_8139_CFG1;
724 	sc->rl_cfg2 = 0;
725 	sc->rl_cfg3 = RL_8139_CFG3;
726 	sc->rl_cfg4 = RL_8139_CFG4;
727 	sc->rl_cfg5 = RL_8139_CFG5;
728 
729 	/*
730 	 * Reset the adapter. Only take the lock here as it's needed in
731 	 * order to call rl_reset().
732 	 */
733 	RL_LOCK(sc);
734 	rl_reset(sc);
735 	RL_UNLOCK(sc);
736 
737 	sc->rl_eecmd_read = RL_EECMD_READ_6BIT;
738 	rl_read_eeprom(sc, (uint8_t *)&rl_did, 0, 1, 0);
739 	if (rl_did != 0x8129)
740 		sc->rl_eecmd_read = RL_EECMD_READ_8BIT;
741 
742 	/*
743 	 * Get station address from the EEPROM.
744 	 */
745 	rl_read_eeprom(sc, (uint8_t *)as, RL_EE_EADDR, 3, 0);
746 	for (i = 0; i < 3; i++) {
747 		eaddr[(i * 2) + 0] = as[i] & 0xff;
748 		eaddr[(i * 2) + 1] = as[i] >> 8;
749 	}
750 
751 	/*
752 	 * Now read the exact device type from the EEPROM to find
753 	 * out if it's an 8129 or 8139.
754 	 */
755 	rl_read_eeprom(sc, (uint8_t *)&rl_did, RL_EE_PCI_DID, 1, 0);
756 
757 	t = rl_devs;
758 	sc->rl_type = 0;
759 	while(t->rl_name != NULL) {
760 		if (rl_did == t->rl_did) {
761 			sc->rl_type = t->rl_basetype;
762 			break;
763 		}
764 		t++;
765 	}
766 
767 	if (sc->rl_type == 0) {
768 		device_printf(dev, "unknown device ID: %x assuming 8139\n",
769 		    rl_did);
770 		sc->rl_type = RL_8139;
771 		/*
772 		 * Read RL_IDR register to get ethernet address as accessing
773 		 * EEPROM may not extract correct address.
774 		 */
775 		for (i = 0; i < ETHER_ADDR_LEN; i++)
776 			eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i);
777 	}
778 
779 	if ((error = rl_dma_alloc(sc)) != 0)
780 		goto fail;
781 
782 	ifp = sc->rl_ifp = if_alloc(IFT_ETHER);
783 
784 #define	RL_PHYAD_INTERNAL	0
785 
786 	/* Do MII setup */
787 	phy = MII_PHY_ANY;
788 	if (sc->rl_type == RL_8139)
789 		phy = RL_PHYAD_INTERNAL;
790 	error = mii_attach(dev, &sc->rl_miibus, ifp, rl_ifmedia_upd,
791 	    rl_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
792 	if (error != 0) {
793 		device_printf(dev, "attaching PHYs failed\n");
794 		goto fail;
795 	}
796 
797 	if_setsoftc(ifp, sc);
798 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
799 	if_setmtu(ifp, ETHERMTU);
800 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
801 	if_setioctlfn(ifp, rl_ioctl);
802 	if_setstartfn(ifp, rl_start);
803 	if_setinitfn(ifp, rl_init);
804 	if_setcapabilities(ifp, IFCAP_VLAN_MTU);
805 	/* Check WOL for RTL8139B or newer controllers. */
806 	if (sc->rl_type == RL_8139 &&
807 	    pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) == 0) {
808 		hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV;
809 		switch (hwrev) {
810 		case RL_HWREV_8139B:
811 		case RL_HWREV_8130:
812 		case RL_HWREV_8139C:
813 		case RL_HWREV_8139D:
814 		case RL_HWREV_8101:
815 		case RL_HWREV_8100:
816 			if_setcapabilitiesbit(ifp, IFCAP_WOL, 0);
817 			/* Disable WOL. */
818 			rl_clrwol(sc);
819 			break;
820 		default:
821 			break;
822 		}
823 	}
824 	if_setcapenable(ifp, if_getcapabilities(ifp));
825 	if_setcapenablebit(ifp, 0, (IFCAP_WOL_UCAST | IFCAP_WOL_MCAST));
826 #ifdef DEVICE_POLLING
827 	if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
828 #endif
829 	if_setsendqlen(ifp, ifqmaxlen);
830 	if_setsendqready(ifp);
831 
832 	/*
833 	 * Call MI attach routine.
834 	 */
835 	ether_ifattach(ifp, eaddr);
836 
837 	/* Hook interrupt last to avoid having to lock softc */
838 	error = bus_setup_intr(dev, sc->rl_irq[0], INTR_TYPE_NET | INTR_MPSAFE,
839 	    NULL, rl_intr, sc, &sc->rl_intrhand[0]);
840 	if (error) {
841 		device_printf(sc->rl_dev, "couldn't set up irq\n");
842 		ether_ifdetach(ifp);
843 	}
844 
845 fail:
846 	if (error)
847 		rl_detach(dev);
848 
849 	return (error);
850 }
851 
852 /*
853  * Shutdown hardware and free up resources. This can be called any
854  * time after the mutex has been initialized. It is called in both
855  * the error case in attach and the normal detach case so it needs
856  * to be careful about only freeing resources that have actually been
857  * allocated.
858  */
859 static int
860 rl_detach(device_t dev)
861 {
862 	struct rl_softc		*sc;
863 	if_t			ifp;
864 
865 	sc = device_get_softc(dev);
866 	ifp = sc->rl_ifp;
867 
868 	KASSERT(mtx_initialized(&sc->rl_mtx), ("rl mutex not initialized"));
869 
870 #ifdef DEVICE_POLLING
871 	if (if_getcapenable(ifp) & IFCAP_POLLING)
872 		ether_poll_deregister(ifp);
873 #endif
874 	/* These should only be active if attach succeeded */
875 	if (device_is_attached(dev)) {
876 		RL_LOCK(sc);
877 		rl_stop(sc);
878 		RL_UNLOCK(sc);
879 		callout_drain(&sc->rl_stat_callout);
880 		ether_ifdetach(ifp);
881 	}
882 #if 0
883 	sc->suspended = 1;
884 #endif
885 	bus_generic_detach(dev);
886 
887 	if (sc->rl_intrhand[0])
888 		bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]);
889 	if (sc->rl_irq[0])
890 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq[0]);
891 	if (sc->rl_res)
892 		bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id,
893 		    sc->rl_res);
894 
895 	if (ifp)
896 		if_free(ifp);
897 
898 	rl_dma_free(sc);
899 
900 	mtx_destroy(&sc->rl_mtx);
901 
902 	return (0);
903 }
904 
905 static int
906 rl_dma_alloc(struct rl_softc *sc)
907 {
908 	struct rl_dmamap_arg	ctx;
909 	int			error, i;
910 
911 	/*
912 	 * Allocate the parent bus DMA tag appropriate for PCI.
913 	 */
914 	error = bus_dma_tag_create(bus_get_dma_tag(sc->rl_dev),	/* parent */
915 	    1, 0,			/* alignment, boundary */
916 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
917 	    BUS_SPACE_MAXADDR,		/* highaddr */
918 	    NULL, NULL,			/* filter, filterarg */
919 	    BUS_SPACE_MAXSIZE_32BIT, 0,	/* maxsize, nsegments */
920 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
921 	    0,				/* flags */
922 	    NULL, NULL,			/* lockfunc, lockarg */
923 	    &sc->rl_parent_tag);
924 	if (error) {
925                 device_printf(sc->rl_dev,
926 		    "failed to create parent DMA tag.\n");
927 		goto fail;
928 	}
929 	/* Create DMA tag for Rx memory block. */
930 	error = bus_dma_tag_create(sc->rl_parent_tag,	/* parent */
931 	    RL_RX_8139_BUF_ALIGN, 0,	/* alignment, boundary */
932 	    BUS_SPACE_MAXADDR,		/* lowaddr */
933 	    BUS_SPACE_MAXADDR,		/* highaddr */
934 	    NULL, NULL,			/* filter, filterarg */
935 	    RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, 1,	/* maxsize,nsegments */
936 	    RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ,	/* maxsegsize */
937 	    0,				/* flags */
938 	    NULL, NULL,			/* lockfunc, lockarg */
939 	    &sc->rl_cdata.rl_rx_tag);
940 	if (error) {
941                 device_printf(sc->rl_dev,
942 		    "failed to create Rx memory block DMA tag.\n");
943 		goto fail;
944 	}
945 	/* Create DMA tag for Tx buffer. */
946 	error = bus_dma_tag_create(sc->rl_parent_tag,	/* parent */
947 	    RL_TX_8139_BUF_ALIGN, 0,	/* alignment, boundary */
948 	    BUS_SPACE_MAXADDR,		/* lowaddr */
949 	    BUS_SPACE_MAXADDR,		/* highaddr */
950 	    NULL, NULL,			/* filter, filterarg */
951 	    MCLBYTES, 1,		/* maxsize, nsegments */
952 	    MCLBYTES,			/* maxsegsize */
953 	    0,				/* flags */
954 	    NULL, NULL,			/* lockfunc, lockarg */
955 	    &sc->rl_cdata.rl_tx_tag);
956 	if (error) {
957                 device_printf(sc->rl_dev, "failed to create Tx DMA tag.\n");
958 		goto fail;
959 	}
960 
961 	/*
962 	 * Allocate DMA'able memory and load DMA map for Rx memory block.
963 	 */
964 	error = bus_dmamem_alloc(sc->rl_cdata.rl_rx_tag,
965 	    (void **)&sc->rl_cdata.rl_rx_buf, BUS_DMA_WAITOK |
966 	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->rl_cdata.rl_rx_dmamap);
967 	if (error != 0) {
968 		device_printf(sc->rl_dev,
969 		    "failed to allocate Rx DMA memory block.\n");
970 		goto fail;
971 	}
972 	ctx.rl_busaddr = 0;
973 	error = bus_dmamap_load(sc->rl_cdata.rl_rx_tag,
974 	    sc->rl_cdata.rl_rx_dmamap, sc->rl_cdata.rl_rx_buf,
975 	    RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, rl_dmamap_cb, &ctx,
976 	    BUS_DMA_NOWAIT);
977 	if (error != 0 || ctx.rl_busaddr == 0) {
978 		device_printf(sc->rl_dev,
979 		    "could not load Rx DMA memory block.\n");
980 		goto fail;
981 	}
982 	sc->rl_cdata.rl_rx_buf_paddr = ctx.rl_busaddr;
983 
984 	/* Create DMA maps for Tx buffers. */
985 	for (i = 0; i < RL_TX_LIST_CNT; i++) {
986 		sc->rl_cdata.rl_tx_chain[i] = NULL;
987 		sc->rl_cdata.rl_tx_dmamap[i] = NULL;
988 		error = bus_dmamap_create(sc->rl_cdata.rl_tx_tag, 0,
989 		    &sc->rl_cdata.rl_tx_dmamap[i]);
990 		if (error != 0) {
991 			device_printf(sc->rl_dev,
992 			    "could not create Tx dmamap.\n");
993 			goto fail;
994 		}
995 	}
996 
997 	/* Leave a few bytes before the start of the RX ring buffer. */
998 	sc->rl_cdata.rl_rx_buf_ptr = sc->rl_cdata.rl_rx_buf;
999 	sc->rl_cdata.rl_rx_buf += RL_RX_8139_BUF_RESERVE;
1000 
1001 fail:
1002 	return (error);
1003 }
1004 
1005 static void
1006 rl_dma_free(struct rl_softc *sc)
1007 {
1008 	int			i;
1009 
1010 	/* Rx memory block. */
1011 	if (sc->rl_cdata.rl_rx_tag != NULL) {
1012 		if (sc->rl_cdata.rl_rx_buf_paddr != 0)
1013 			bus_dmamap_unload(sc->rl_cdata.rl_rx_tag,
1014 			    sc->rl_cdata.rl_rx_dmamap);
1015 		if (sc->rl_cdata.rl_rx_buf_ptr != NULL)
1016 			bus_dmamem_free(sc->rl_cdata.rl_rx_tag,
1017 			    sc->rl_cdata.rl_rx_buf_ptr,
1018 			    sc->rl_cdata.rl_rx_dmamap);
1019 		sc->rl_cdata.rl_rx_buf_ptr = NULL;
1020 		sc->rl_cdata.rl_rx_buf = NULL;
1021 		sc->rl_cdata.rl_rx_buf_paddr = 0;
1022 		bus_dma_tag_destroy(sc->rl_cdata.rl_rx_tag);
1023 		sc->rl_cdata.rl_tx_tag = NULL;
1024 	}
1025 
1026 	/* Tx buffers. */
1027 	if (sc->rl_cdata.rl_tx_tag != NULL) {
1028 		for (i = 0; i < RL_TX_LIST_CNT; i++) {
1029 			if (sc->rl_cdata.rl_tx_dmamap[i] != NULL) {
1030 				bus_dmamap_destroy(
1031 				    sc->rl_cdata.rl_tx_tag,
1032 				    sc->rl_cdata.rl_tx_dmamap[i]);
1033 				sc->rl_cdata.rl_tx_dmamap[i] = NULL;
1034 			}
1035 		}
1036 		bus_dma_tag_destroy(sc->rl_cdata.rl_tx_tag);
1037 		sc->rl_cdata.rl_tx_tag = NULL;
1038 	}
1039 
1040 	if (sc->rl_parent_tag != NULL) {
1041 		bus_dma_tag_destroy(sc->rl_parent_tag);
1042 		sc->rl_parent_tag = NULL;
1043 	}
1044 }
1045 
1046 /*
1047  * Initialize the transmit descriptors.
1048  */
1049 static int
1050 rl_list_tx_init(struct rl_softc *sc)
1051 {
1052 	struct rl_chain_data	*cd;
1053 	int			i;
1054 
1055 	RL_LOCK_ASSERT(sc);
1056 
1057 	cd = &sc->rl_cdata;
1058 	for (i = 0; i < RL_TX_LIST_CNT; i++) {
1059 		cd->rl_tx_chain[i] = NULL;
1060 		CSR_WRITE_4(sc,
1061 		    RL_TXADDR0 + (i * sizeof(uint32_t)), 0x0000000);
1062 	}
1063 
1064 	sc->rl_cdata.cur_tx = 0;
1065 	sc->rl_cdata.last_tx = 0;
1066 
1067 	return (0);
1068 }
1069 
1070 static int
1071 rl_list_rx_init(struct rl_softc *sc)
1072 {
1073 
1074 	RL_LOCK_ASSERT(sc);
1075 
1076 	bzero(sc->rl_cdata.rl_rx_buf_ptr,
1077 	    RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ);
1078 	bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, sc->rl_cdata.rl_rx_dmamap,
1079 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1080 
1081 	return (0);
1082 }
1083 
1084 /*
1085  * A frame has been uploaded: pass the resulting mbuf chain up to
1086  * the higher level protocols.
1087  *
1088  * You know there's something wrong with a PCI bus-master chip design
1089  * when you have to use m_devget().
1090  *
1091  * The receive operation is badly documented in the datasheet, so I'll
1092  * attempt to document it here. The driver provides a buffer area and
1093  * places its base address in the RX buffer start address register.
1094  * The chip then begins copying frames into the RX buffer. Each frame
1095  * is preceded by a 32-bit RX status word which specifies the length
1096  * of the frame and certain other status bits. Each frame (starting with
1097  * the status word) is also 32-bit aligned. The frame length is in the
1098  * first 16 bits of the status word; the lower 15 bits correspond with
1099  * the 'rx status register' mentioned in the datasheet.
1100  *
1101  * Note: to make the Alpha happy, the frame payload needs to be aligned
1102  * on a 32-bit boundary. To achieve this, we pass RL_ETHER_ALIGN (2 bytes)
1103  * as the offset argument to m_devget().
1104  */
1105 static int
1106 rl_rxeof(struct rl_softc *sc)
1107 {
1108 	struct mbuf		*m;
1109 	if_t			ifp = sc->rl_ifp;
1110 	uint8_t			*rxbufpos;
1111 	int			total_len = 0;
1112 	int			wrap = 0;
1113 	int			rx_npkts = 0;
1114 	uint32_t		rxstat;
1115 	uint16_t		cur_rx;
1116 	uint16_t		limit;
1117 	uint16_t		max_bytes, rx_bytes = 0;
1118 
1119 	RL_LOCK_ASSERT(sc);
1120 
1121 	bus_dmamap_sync(sc->rl_cdata.rl_rx_tag, sc->rl_cdata.rl_rx_dmamap,
1122 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1123 
1124 	cur_rx = (CSR_READ_2(sc, RL_CURRXADDR) + 16) % RL_RXBUFLEN;
1125 
1126 	/* Do not try to read past this point. */
1127 	limit = CSR_READ_2(sc, RL_CURRXBUF) % RL_RXBUFLEN;
1128 
1129 	if (limit < cur_rx)
1130 		max_bytes = (RL_RXBUFLEN - cur_rx) + limit;
1131 	else
1132 		max_bytes = limit - cur_rx;
1133 
1134 	while((CSR_READ_1(sc, RL_COMMAND) & RL_CMD_EMPTY_RXBUF) == 0) {
1135 #ifdef DEVICE_POLLING
1136 		if (if_getcapenable(ifp) & IFCAP_POLLING) {
1137 			if (sc->rxcycles <= 0)
1138 				break;
1139 			sc->rxcycles--;
1140 		}
1141 #endif
1142 		rxbufpos = sc->rl_cdata.rl_rx_buf + cur_rx;
1143 		rxstat = le32toh(*(uint32_t *)rxbufpos);
1144 
1145 		/*
1146 		 * Here's a totally undocumented fact for you. When the
1147 		 * RealTek chip is in the process of copying a packet into
1148 		 * RAM for you, the length will be 0xfff0. If you spot a
1149 		 * packet header with this value, you need to stop. The
1150 		 * datasheet makes absolutely no mention of this and
1151 		 * RealTek should be shot for this.
1152 		 */
1153 		total_len = rxstat >> 16;
1154 		if (total_len == RL_RXSTAT_UNFINISHED)
1155 			break;
1156 
1157 		if (!(rxstat & RL_RXSTAT_RXOK) ||
1158 		    total_len < ETHER_MIN_LEN ||
1159 		    total_len > ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) {
1160 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1161 			if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1162 			rl_init_locked(sc);
1163 			return (rx_npkts);
1164 		}
1165 
1166 		/* No errors; receive the packet. */
1167 		rx_bytes += total_len + 4;
1168 
1169 		/*
1170 		 * XXX The RealTek chip includes the CRC with every
1171 		 * received frame, and there's no way to turn this
1172 		 * behavior off (at least, I can't find anything in
1173 		 * the manual that explains how to do it) so we have
1174 		 * to trim off the CRC manually.
1175 		 */
1176 		total_len -= ETHER_CRC_LEN;
1177 
1178 		/*
1179 		 * Avoid trying to read more bytes than we know
1180 		 * the chip has prepared for us.
1181 		 */
1182 		if (rx_bytes > max_bytes)
1183 			break;
1184 
1185 		rxbufpos = sc->rl_cdata.rl_rx_buf +
1186 			((cur_rx + sizeof(uint32_t)) % RL_RXBUFLEN);
1187 		if (rxbufpos == (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN))
1188 			rxbufpos = sc->rl_cdata.rl_rx_buf;
1189 
1190 		wrap = (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN) - rxbufpos;
1191 		if (total_len > wrap) {
1192 			m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp,
1193 			    NULL);
1194 			if (m != NULL)
1195 				m_copyback(m, wrap, total_len - wrap,
1196 					sc->rl_cdata.rl_rx_buf);
1197 			cur_rx = (total_len - wrap + ETHER_CRC_LEN);
1198 		} else {
1199 			m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp,
1200 			    NULL);
1201 			cur_rx += total_len + 4 + ETHER_CRC_LEN;
1202 		}
1203 
1204 		/* Round up to 32-bit boundary. */
1205 		cur_rx = (cur_rx + 3) & ~3;
1206 		CSR_WRITE_2(sc, RL_CURRXADDR, cur_rx - 16);
1207 
1208 		if (m == NULL) {
1209 			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1210 			continue;
1211 		}
1212 
1213 		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1214 		RL_UNLOCK(sc);
1215 		if_input(ifp, m);
1216 		RL_LOCK(sc);
1217 		rx_npkts++;
1218 	}
1219 
1220 	/* No need to sync Rx memory block as we didn't modify it. */
1221 	return (rx_npkts);
1222 }
1223 
1224 /*
1225  * A frame was downloaded to the chip. It's safe for us to clean up
1226  * the list buffers.
1227  */
1228 static void
1229 rl_txeof(struct rl_softc *sc)
1230 {
1231 	if_t			ifp = sc->rl_ifp;
1232 	uint32_t		txstat;
1233 
1234 	RL_LOCK_ASSERT(sc);
1235 
1236 	/*
1237 	 * Go through our tx list and free mbufs for those
1238 	 * frames that have been uploaded.
1239 	 */
1240 	do {
1241 		if (RL_LAST_TXMBUF(sc) == NULL)
1242 			break;
1243 		txstat = CSR_READ_4(sc, RL_LAST_TXSTAT(sc));
1244 		if (!(txstat & (RL_TXSTAT_TX_OK|
1245 		    RL_TXSTAT_TX_UNDERRUN|RL_TXSTAT_TXABRT)))
1246 			break;
1247 
1248 		if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & RL_TXSTAT_COLLCNT) >> 24);
1249 
1250 		bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, RL_LAST_DMAMAP(sc),
1251 		    BUS_DMASYNC_POSTWRITE);
1252 		bus_dmamap_unload(sc->rl_cdata.rl_tx_tag, RL_LAST_DMAMAP(sc));
1253 		m_freem(RL_LAST_TXMBUF(sc));
1254 		RL_LAST_TXMBUF(sc) = NULL;
1255 		/*
1256 		 * If there was a transmit underrun, bump the TX threshold.
1257 		 * Make sure not to overflow the 63 * 32byte we can address
1258 		 * with the 6 available bit.
1259 		 */
1260 		if ((txstat & RL_TXSTAT_TX_UNDERRUN) &&
1261 		    (sc->rl_txthresh < 2016))
1262 			sc->rl_txthresh += 32;
1263 		if (txstat & RL_TXSTAT_TX_OK)
1264 			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1265 		else {
1266 			int			oldthresh;
1267 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1268 			if ((txstat & RL_TXSTAT_TXABRT) ||
1269 			    (txstat & RL_TXSTAT_OUTOFWIN))
1270 				CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
1271 			oldthresh = sc->rl_txthresh;
1272 			/* error recovery */
1273 			if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1274 			rl_init_locked(sc);
1275 			/* restore original threshold */
1276 			sc->rl_txthresh = oldthresh;
1277 			return;
1278 		}
1279 		RL_INC(sc->rl_cdata.last_tx);
1280 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1281 	} while (sc->rl_cdata.last_tx != sc->rl_cdata.cur_tx);
1282 
1283 	if (RL_LAST_TXMBUF(sc) == NULL)
1284 		sc->rl_watchdog_timer = 0;
1285 }
1286 
1287 static void
1288 rl_twister_update(struct rl_softc *sc)
1289 {
1290 	uint16_t linktest;
1291 	/*
1292 	 * Table provided by RealTek (Kinston <shangh@realtek.com.tw>) for
1293 	 * Linux driver.  Values undocumented otherwise.
1294 	 */
1295 	static const uint32_t param[4][4] = {
1296 		{0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43},
1297 		{0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
1298 		{0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
1299 		{0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83}
1300 	};
1301 
1302 	/*
1303 	 * Tune the so-called twister registers of the RTL8139.  These
1304 	 * are used to compensate for impedance mismatches.  The
1305 	 * method for tuning these registers is undocumented and the
1306 	 * following procedure is collected from public sources.
1307 	 */
1308 	switch (sc->rl_twister)
1309 	{
1310 	case CHK_LINK:
1311 		/*
1312 		 * If we have a sufficient link, then we can proceed in
1313 		 * the state machine to the next stage.  If not, then
1314 		 * disable further tuning after writing sane defaults.
1315 		 */
1316 		if (CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_LINK_OK) {
1317 			CSR_WRITE_2(sc, RL_CSCFG, RL_CSCFG_LINK_DOWN_OFF_CMD);
1318 			sc->rl_twister = FIND_ROW;
1319 		} else {
1320 			CSR_WRITE_2(sc, RL_CSCFG, RL_CSCFG_LINK_DOWN_CMD);
1321 			CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_CBL_TEST);
1322 			CSR_WRITE_4(sc, RL_PARA78, RL_PARA78_DEF);
1323 			CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_DEF);
1324 			sc->rl_twister = DONE;
1325 		}
1326 		break;
1327 	case FIND_ROW:
1328 		/*
1329 		 * Read how long it took to see the echo to find the tuning
1330 		 * row to use.
1331 		 */
1332 		linktest = CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_STATUS;
1333 		if (linktest == RL_CSCFG_ROW3)
1334 			sc->rl_twist_row = 3;
1335 		else if (linktest == RL_CSCFG_ROW2)
1336 			sc->rl_twist_row = 2;
1337 		else if (linktest == RL_CSCFG_ROW1)
1338 			sc->rl_twist_row = 1;
1339 		else
1340 			sc->rl_twist_row = 0;
1341 		sc->rl_twist_col = 0;
1342 		sc->rl_twister = SET_PARAM;
1343 		break;
1344 	case SET_PARAM:
1345 		if (sc->rl_twist_col == 0)
1346 			CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_RESET);
1347 		CSR_WRITE_4(sc, RL_PARA7C,
1348 		    param[sc->rl_twist_row][sc->rl_twist_col]);
1349 		if (++sc->rl_twist_col == 4) {
1350 			if (sc->rl_twist_row == 3)
1351 				sc->rl_twister = RECHK_LONG;
1352 			else
1353 				sc->rl_twister = DONE;
1354 		}
1355 		break;
1356 	case RECHK_LONG:
1357 		/*
1358 		 * For long cables, we have to double check to make sure we
1359 		 * don't mistune.
1360 		 */
1361 		linktest = CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_STATUS;
1362 		if (linktest == RL_CSCFG_ROW3)
1363 			sc->rl_twister = DONE;
1364 		else {
1365 			CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_RETUNE);
1366 			sc->rl_twister = RETUNE;
1367 		}
1368 		break;
1369 	case RETUNE:
1370 		/* Retune for a shorter cable (try column 2) */
1371 		CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_CBL_TEST);
1372 		CSR_WRITE_4(sc, RL_PARA78, RL_PARA78_DEF);
1373 		CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_DEF);
1374 		CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_RESET);
1375 		sc->rl_twist_row--;
1376 		sc->rl_twist_col = 0;
1377 		sc->rl_twister = SET_PARAM;
1378 		break;
1379 
1380 	case DONE:
1381 		break;
1382 	}
1383 
1384 }
1385 
1386 static void
1387 rl_tick(void *xsc)
1388 {
1389 	struct rl_softc		*sc = xsc;
1390 	struct mii_data		*mii;
1391 	int ticks;
1392 
1393 	RL_LOCK_ASSERT(sc);
1394 	/*
1395 	 * If we're doing the twister cable calibration, then we need to defer
1396 	 * watchdog timeouts.  This is a no-op in normal operations, but
1397 	 * can falsely trigger when the cable calibration takes a while and
1398 	 * there was traffic ready to go when rl was started.
1399 	 *
1400 	 * We don't defer mii_tick since that updates the mii status, which
1401 	 * helps the twister process, at least according to similar patches
1402 	 * for the Linux driver I found online while doing the fixes.  Worst
1403 	 * case is a few extra mii reads during calibration.
1404 	 */
1405 	mii = device_get_softc(sc->rl_miibus);
1406 	mii_tick(mii);
1407 	if ((sc->rl_flags & RL_FLAG_LINK) == 0)
1408 		rl_miibus_statchg(sc->rl_dev);
1409 	if (sc->rl_twister_enable) {
1410 		if (sc->rl_twister == DONE)
1411 			rl_watchdog(sc);
1412 		else
1413 			rl_twister_update(sc);
1414 		if (sc->rl_twister == DONE)
1415 			ticks = hz;
1416 		else
1417 			ticks = hz / 10;
1418 	} else {
1419 		rl_watchdog(sc);
1420 		ticks = hz;
1421 	}
1422 
1423 	callout_reset(&sc->rl_stat_callout, ticks, rl_tick, sc);
1424 }
1425 
1426 #ifdef DEVICE_POLLING
1427 static int
1428 rl_poll(if_t ifp, enum poll_cmd cmd, int count)
1429 {
1430 	struct rl_softc *sc = if_getsoftc(ifp);
1431 	int rx_npkts = 0;
1432 
1433 	RL_LOCK(sc);
1434 	if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1435 		rx_npkts = rl_poll_locked(ifp, cmd, count);
1436 	RL_UNLOCK(sc);
1437 	return (rx_npkts);
1438 }
1439 
1440 static int
1441 rl_poll_locked(if_t ifp, enum poll_cmd cmd, int count)
1442 {
1443 	struct rl_softc *sc = if_getsoftc(ifp);
1444 	int rx_npkts;
1445 
1446 	RL_LOCK_ASSERT(sc);
1447 
1448 	sc->rxcycles = count;
1449 	rx_npkts = rl_rxeof(sc);
1450 	rl_txeof(sc);
1451 
1452 	if (!if_sendq_empty(ifp))
1453 		rl_start_locked(ifp);
1454 
1455 	if (cmd == POLL_AND_CHECK_STATUS) {
1456 		uint16_t	status;
1457 
1458 		/* We should also check the status register. */
1459 		status = CSR_READ_2(sc, RL_ISR);
1460 		if (status == 0xffff)
1461 			return (rx_npkts);
1462 		if (status != 0)
1463 			CSR_WRITE_2(sc, RL_ISR, status);
1464 
1465 		/* XXX We should check behaviour on receiver stalls. */
1466 
1467 		if (status & RL_ISR_SYSTEM_ERR) {
1468 			if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1469 			rl_init_locked(sc);
1470 		}
1471 	}
1472 	return (rx_npkts);
1473 }
1474 #endif /* DEVICE_POLLING */
1475 
1476 static void
1477 rl_intr(void *arg)
1478 {
1479 	struct rl_softc		*sc = arg;
1480 	if_t			ifp = sc->rl_ifp;
1481 	uint16_t		status;
1482 	int			count;
1483 
1484 	RL_LOCK(sc);
1485 
1486 	if (sc->suspended)
1487 		goto done_locked;
1488 
1489 #ifdef DEVICE_POLLING
1490 	if  (if_getcapenable(ifp) & IFCAP_POLLING)
1491 		goto done_locked;
1492 #endif
1493 
1494 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
1495 		goto done_locked2;
1496 	status = CSR_READ_2(sc, RL_ISR);
1497 	if (status == 0xffff || (status & RL_INTRS) == 0)
1498 		goto done_locked;
1499 	/*
1500 	 * Ours, disable further interrupts.
1501 	 */
1502 	CSR_WRITE_2(sc, RL_IMR, 0);
1503 	for (count = 16; count > 0; count--) {
1504 		CSR_WRITE_2(sc, RL_ISR, status);
1505 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1506 			if (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR))
1507 				rl_rxeof(sc);
1508 			if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR))
1509 				rl_txeof(sc);
1510 			if (status & RL_ISR_SYSTEM_ERR) {
1511 				if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1512 				rl_init_locked(sc);
1513 				RL_UNLOCK(sc);
1514 				return;
1515 			}
1516 		}
1517 		status = CSR_READ_2(sc, RL_ISR);
1518 		/* If the card has gone away, the read returns 0xffff. */
1519 		if (status == 0xffff || (status & RL_INTRS) == 0)
1520 			break;
1521 	}
1522 
1523 	if (!if_sendq_empty(ifp))
1524 		rl_start_locked(ifp);
1525 
1526 done_locked2:
1527 	if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1528 		CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
1529 done_locked:
1530 	RL_UNLOCK(sc);
1531 }
1532 
1533 /*
1534  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1535  * pointers to the fragment pointers.
1536  */
1537 static int
1538 rl_encap(struct rl_softc *sc, struct mbuf **m_head)
1539 {
1540 	struct mbuf		*m;
1541 	bus_dma_segment_t	txsegs[1];
1542 	int			error, nsegs, padlen;
1543 
1544 	RL_LOCK_ASSERT(sc);
1545 
1546 	m = *m_head;
1547 	padlen = 0;
1548 	/*
1549 	 * Hardware doesn't auto-pad, so we have to make sure
1550 	 * pad short frames out to the minimum frame length.
1551 	 */
1552 	if (m->m_pkthdr.len < RL_MIN_FRAMELEN)
1553 		padlen = RL_MIN_FRAMELEN - m->m_pkthdr.len;
1554 	/*
1555 	 * The RealTek is brain damaged and wants longword-aligned
1556 	 * TX buffers, plus we can only have one fragment buffer
1557 	 * per packet. We have to copy pretty much all the time.
1558 	 */
1559 	if (m->m_next != NULL || (mtod(m, uintptr_t) & 3) != 0 ||
1560 	    (padlen > 0 && M_TRAILINGSPACE(m) < padlen)) {
1561 		m = m_defrag(*m_head, M_NOWAIT);
1562 		if (m == NULL) {
1563 			m_freem(*m_head);
1564 			*m_head = NULL;
1565 			return (ENOMEM);
1566 		}
1567 	}
1568 	*m_head = m;
1569 
1570 	if (padlen > 0) {
1571 		/*
1572 		 * Make security-conscious people happy: zero out the
1573 		 * bytes in the pad area, since we don't know what
1574 		 * this mbuf cluster buffer's previous user might
1575 		 * have left in it.
1576 		 */
1577 		bzero(mtod(m, char *) + m->m_pkthdr.len, padlen);
1578 		m->m_pkthdr.len += padlen;
1579 		m->m_len = m->m_pkthdr.len;
1580 	}
1581 
1582 	error = bus_dmamap_load_mbuf_sg(sc->rl_cdata.rl_tx_tag,
1583 	    RL_CUR_DMAMAP(sc), m, txsegs, &nsegs, 0);
1584 	if (error != 0)
1585 		return (error);
1586 	if (nsegs == 0) {
1587 		m_freem(*m_head);
1588 		*m_head = NULL;
1589 		return (EIO);
1590 	}
1591 
1592 	RL_CUR_TXMBUF(sc) = m;
1593 	bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, RL_CUR_DMAMAP(sc),
1594 	    BUS_DMASYNC_PREWRITE);
1595 	CSR_WRITE_4(sc, RL_CUR_TXADDR(sc), RL_ADDR_LO(txsegs[0].ds_addr));
1596 
1597 	return (0);
1598 }
1599 
1600 /*
1601  * Main transmit routine.
1602  */
1603 static void
1604 rl_start(if_t ifp)
1605 {
1606 	struct rl_softc		*sc = if_getsoftc(ifp);
1607 
1608 	RL_LOCK(sc);
1609 	rl_start_locked(ifp);
1610 	RL_UNLOCK(sc);
1611 }
1612 
1613 static void
1614 rl_start_locked(if_t ifp)
1615 {
1616 	struct rl_softc		*sc = if_getsoftc(ifp);
1617 	struct mbuf		*m_head = NULL;
1618 
1619 	RL_LOCK_ASSERT(sc);
1620 
1621 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1622 	    IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0)
1623 		return;
1624 
1625 	while (RL_CUR_TXMBUF(sc) == NULL) {
1626 		m_head = if_dequeue(ifp);
1627 
1628 		if (m_head == NULL)
1629 			break;
1630 
1631 		if (rl_encap(sc, &m_head)) {
1632 			if (m_head == NULL)
1633 				break;
1634 			if_sendq_prepend(ifp, m_head);
1635 			if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1636 			break;
1637 		}
1638 
1639 		/* Pass a copy of this mbuf chain to the bpf subsystem. */
1640 		BPF_MTAP(ifp, RL_CUR_TXMBUF(sc));
1641 
1642 		/* Transmit the frame. */
1643 		CSR_WRITE_4(sc, RL_CUR_TXSTAT(sc),
1644 		    RL_TXTHRESH(sc->rl_txthresh) |
1645 		    RL_CUR_TXMBUF(sc)->m_pkthdr.len);
1646 
1647 		RL_INC(sc->rl_cdata.cur_tx);
1648 
1649 		/* Set a timeout in case the chip goes out to lunch. */
1650 		sc->rl_watchdog_timer = 5;
1651 	}
1652 
1653 	/*
1654 	 * We broke out of the loop because all our TX slots are
1655 	 * full. Mark the NIC as busy until it drains some of the
1656 	 * packets from the queue.
1657 	 */
1658 	if (RL_CUR_TXMBUF(sc) != NULL)
1659 		if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1660 }
1661 
1662 static void
1663 rl_init(void *xsc)
1664 {
1665 	struct rl_softc		*sc = xsc;
1666 
1667 	RL_LOCK(sc);
1668 	rl_init_locked(sc);
1669 	RL_UNLOCK(sc);
1670 }
1671 
1672 static void
1673 rl_init_locked(struct rl_softc *sc)
1674 {
1675 	if_t			ifp = sc->rl_ifp;
1676 	struct mii_data		*mii;
1677 	uint32_t		eaddr[2];
1678 
1679 	RL_LOCK_ASSERT(sc);
1680 
1681 	mii = device_get_softc(sc->rl_miibus);
1682 
1683 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
1684 		return;
1685 
1686 	/*
1687 	 * Cancel pending I/O and free all RX/TX buffers.
1688 	 */
1689 	rl_stop(sc);
1690 
1691 	rl_reset(sc);
1692 	if (sc->rl_twister_enable) {
1693 		/*
1694 		 * Reset twister register tuning state.  The twister
1695 		 * registers and their tuning are undocumented, but
1696 		 * are necessary to cope with bad links.  rl_twister =
1697 		 * DONE here will disable this entirely.
1698 		 */
1699 		sc->rl_twister = CHK_LINK;
1700 	}
1701 
1702 	/*
1703 	 * Init our MAC address.  Even though the chipset
1704 	 * documentation doesn't mention it, we need to enter "Config
1705 	 * register write enable" mode to modify the ID registers.
1706 	 */
1707 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG);
1708 	bzero(eaddr, sizeof(eaddr));
1709 	bcopy(if_getlladdr(sc->rl_ifp), eaddr, ETHER_ADDR_LEN);
1710 	CSR_WRITE_STREAM_4(sc, RL_IDR0, eaddr[0]);
1711 	CSR_WRITE_STREAM_4(sc, RL_IDR4, eaddr[1]);
1712 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
1713 
1714 	/* Init the RX memory block pointer register. */
1715 	CSR_WRITE_4(sc, RL_RXADDR, sc->rl_cdata.rl_rx_buf_paddr +
1716 	    RL_RX_8139_BUF_RESERVE);
1717 	/* Init TX descriptors. */
1718 	rl_list_tx_init(sc);
1719 	/* Init Rx memory block. */
1720 	rl_list_rx_init(sc);
1721 
1722 	/*
1723 	 * Enable transmit and receive.
1724 	 */
1725 	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
1726 
1727 	/*
1728 	 * Set the initial TX and RX configuration.
1729 	 */
1730 	CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG);
1731 	CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG);
1732 
1733 	/* Set RX filter. */
1734 	rl_rxfilter(sc);
1735 
1736 #ifdef DEVICE_POLLING
1737 	/* Disable interrupts if we are polling. */
1738 	if (if_getcapenable(ifp) & IFCAP_POLLING)
1739 		CSR_WRITE_2(sc, RL_IMR, 0);
1740 	else
1741 #endif
1742 	/* Enable interrupts. */
1743 	CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
1744 
1745 	/* Set initial TX threshold */
1746 	sc->rl_txthresh = RL_TX_THRESH_INIT;
1747 
1748 	/* Start RX/TX process. */
1749 	CSR_WRITE_4(sc, RL_MISSEDPKT, 0);
1750 
1751 	/* Enable receiver and transmitter. */
1752 	CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB);
1753 
1754 	sc->rl_flags &= ~RL_FLAG_LINK;
1755 	mii_mediachg(mii);
1756 
1757 	CSR_WRITE_1(sc, sc->rl_cfg1, RL_CFG1_DRVLOAD|RL_CFG1_FULLDUPLEX);
1758 
1759 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
1760 	if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1761 
1762 	callout_reset(&sc->rl_stat_callout, hz, rl_tick, sc);
1763 }
1764 
1765 /*
1766  * Set media options.
1767  */
1768 static int
1769 rl_ifmedia_upd(if_t ifp)
1770 {
1771 	struct rl_softc		*sc = if_getsoftc(ifp);
1772 	struct mii_data		*mii;
1773 
1774 	mii = device_get_softc(sc->rl_miibus);
1775 
1776 	RL_LOCK(sc);
1777 	mii_mediachg(mii);
1778 	RL_UNLOCK(sc);
1779 
1780 	return (0);
1781 }
1782 
1783 /*
1784  * Report current media status.
1785  */
1786 static void
1787 rl_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
1788 {
1789 	struct rl_softc		*sc = if_getsoftc(ifp);
1790 	struct mii_data		*mii;
1791 
1792 	mii = device_get_softc(sc->rl_miibus);
1793 
1794 	RL_LOCK(sc);
1795 	mii_pollstat(mii);
1796 	ifmr->ifm_active = mii->mii_media_active;
1797 	ifmr->ifm_status = mii->mii_media_status;
1798 	RL_UNLOCK(sc);
1799 }
1800 
1801 static int
1802 rl_ioctl(if_t ifp, u_long command, caddr_t data)
1803 {
1804 	struct ifreq		*ifr = (struct ifreq *)data;
1805 	struct mii_data		*mii;
1806 	struct rl_softc		*sc = if_getsoftc(ifp);
1807 	int			error = 0, mask;
1808 
1809 	switch (command) {
1810 	case SIOCSIFFLAGS:
1811 		RL_LOCK(sc);
1812 		if (if_getflags(ifp) & IFF_UP) {
1813 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING &&
1814 			    ((if_getflags(ifp) ^ sc->rl_if_flags) &
1815                             (IFF_PROMISC | IFF_ALLMULTI)))
1816 				rl_rxfilter(sc);
1817                         else
1818 				rl_init_locked(sc);
1819                 } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1820 			rl_stop(sc);
1821 		sc->rl_if_flags = if_getflags(ifp);
1822 		RL_UNLOCK(sc);
1823 		break;
1824 	case SIOCADDMULTI:
1825 	case SIOCDELMULTI:
1826 		RL_LOCK(sc);
1827 		rl_rxfilter(sc);
1828 		RL_UNLOCK(sc);
1829 		break;
1830 	case SIOCGIFMEDIA:
1831 	case SIOCSIFMEDIA:
1832 		mii = device_get_softc(sc->rl_miibus);
1833 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1834 		break;
1835 	case SIOCSIFCAP:
1836 		mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
1837 #ifdef DEVICE_POLLING
1838 		if (ifr->ifr_reqcap & IFCAP_POLLING &&
1839 		    !(if_getcapenable(ifp) & IFCAP_POLLING)) {
1840 			error = ether_poll_register(rl_poll, ifp);
1841 			if (error)
1842 				return(error);
1843 			RL_LOCK(sc);
1844 			/* Disable interrupts */
1845 			CSR_WRITE_2(sc, RL_IMR, 0x0000);
1846 			if_setcapenablebit(ifp, IFCAP_POLLING, 0);
1847 			RL_UNLOCK(sc);
1848 			return (error);
1849 
1850 		}
1851 		if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
1852 		    if_getcapenable(ifp) & IFCAP_POLLING) {
1853 			error = ether_poll_deregister(ifp);
1854 			/* Enable interrupts. */
1855 			RL_LOCK(sc);
1856 			CSR_WRITE_2(sc, RL_IMR, RL_INTRS);
1857 			if_setcapenablebit(ifp, 0, IFCAP_POLLING);
1858 			RL_UNLOCK(sc);
1859 			return (error);
1860 		}
1861 #endif /* DEVICE_POLLING */
1862 		if ((mask & IFCAP_WOL) != 0 &&
1863 		    (if_getcapabilities(ifp) & IFCAP_WOL) != 0) {
1864 			if ((mask & IFCAP_WOL_UCAST) != 0)
1865 				if_togglecapenable(ifp, IFCAP_WOL_UCAST);
1866 			if ((mask & IFCAP_WOL_MCAST) != 0)
1867 				if_togglecapenable(ifp, IFCAP_WOL_MCAST);
1868 			if ((mask & IFCAP_WOL_MAGIC) != 0)
1869 				if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
1870 		}
1871 		break;
1872 	default:
1873 		error = ether_ioctl(ifp, command, data);
1874 		break;
1875 	}
1876 
1877 	return (error);
1878 }
1879 
1880 static void
1881 rl_watchdog(struct rl_softc *sc)
1882 {
1883 
1884 	RL_LOCK_ASSERT(sc);
1885 
1886 	if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer >0)
1887 		return;
1888 
1889 	device_printf(sc->rl_dev, "watchdog timeout\n");
1890 	if_inc_counter(sc->rl_ifp, IFCOUNTER_OERRORS, 1);
1891 
1892 	rl_txeof(sc);
1893 	rl_rxeof(sc);
1894 	if_setdrvflagbits(sc->rl_ifp, 0, IFF_DRV_RUNNING);
1895 	rl_init_locked(sc);
1896 }
1897 
1898 /*
1899  * Stop the adapter and free any mbufs allocated to the
1900  * RX and TX lists.
1901  */
1902 static void
1903 rl_stop(struct rl_softc *sc)
1904 {
1905 	int			i;
1906 	if_t			ifp = sc->rl_ifp;
1907 
1908 	RL_LOCK_ASSERT(sc);
1909 
1910 	sc->rl_watchdog_timer = 0;
1911 	callout_stop(&sc->rl_stat_callout);
1912 	if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
1913 	sc->rl_flags &= ~RL_FLAG_LINK;
1914 
1915 	CSR_WRITE_1(sc, RL_COMMAND, 0x00);
1916 	CSR_WRITE_2(sc, RL_IMR, 0x0000);
1917 	for (i = 0; i < RL_TIMEOUT; i++) {
1918 		DELAY(10);
1919 		if ((CSR_READ_1(sc, RL_COMMAND) &
1920 		    (RL_CMD_RX_ENB | RL_CMD_TX_ENB)) == 0)
1921 			break;
1922 	}
1923 	if (i == RL_TIMEOUT)
1924 		device_printf(sc->rl_dev, "Unable to stop Tx/Rx MAC\n");
1925 
1926 	/*
1927 	 * Free the TX list buffers.
1928 	 */
1929 	for (i = 0; i < RL_TX_LIST_CNT; i++) {
1930 		if (sc->rl_cdata.rl_tx_chain[i] != NULL) {
1931 			bus_dmamap_sync(sc->rl_cdata.rl_tx_tag,
1932 			    sc->rl_cdata.rl_tx_dmamap[i],
1933 			    BUS_DMASYNC_POSTWRITE);
1934 			bus_dmamap_unload(sc->rl_cdata.rl_tx_tag,
1935 			    sc->rl_cdata.rl_tx_dmamap[i]);
1936 			m_freem(sc->rl_cdata.rl_tx_chain[i]);
1937 			sc->rl_cdata.rl_tx_chain[i] = NULL;
1938 			CSR_WRITE_4(sc, RL_TXADDR0 + (i * sizeof(uint32_t)),
1939 			    0x0000000);
1940 		}
1941 	}
1942 }
1943 
1944 /*
1945  * Device suspend routine.  Stop the interface and save some PCI
1946  * settings in case the BIOS doesn't restore them properly on
1947  * resume.
1948  */
1949 static int
1950 rl_suspend(device_t dev)
1951 {
1952 	struct rl_softc		*sc;
1953 
1954 	sc = device_get_softc(dev);
1955 
1956 	RL_LOCK(sc);
1957 	rl_stop(sc);
1958 	rl_setwol(sc);
1959 	sc->suspended = 1;
1960 	RL_UNLOCK(sc);
1961 
1962 	return (0);
1963 }
1964 
1965 /*
1966  * Device resume routine.  Restore some PCI settings in case the BIOS
1967  * doesn't, re-enable busmastering, and restart the interface if
1968  * appropriate.
1969  */
1970 static int
1971 rl_resume(device_t dev)
1972 {
1973 	struct rl_softc		*sc;
1974 	if_t			ifp;
1975 	int			pmc;
1976 	uint16_t		pmstat;
1977 
1978 	sc = device_get_softc(dev);
1979 	ifp = sc->rl_ifp;
1980 
1981 	RL_LOCK(sc);
1982 
1983 	if ((if_getcapabilities(ifp) & IFCAP_WOL) != 0 &&
1984 	    pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) == 0) {
1985 		/* Disable PME and clear PME status. */
1986 		pmstat = pci_read_config(sc->rl_dev,
1987 		    pmc + PCIR_POWER_STATUS, 2);
1988 		if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
1989 			pmstat &= ~PCIM_PSTAT_PMEENABLE;
1990 			pci_write_config(sc->rl_dev,
1991 			    pmc + PCIR_POWER_STATUS, pmstat, 2);
1992 		}
1993 		/*
1994 		 * Clear WOL matching such that normal Rx filtering
1995 		 * wouldn't interfere with WOL patterns.
1996 		 */
1997 		rl_clrwol(sc);
1998 	}
1999 
2000 	/* reinitialize interface if necessary */
2001 	if (if_getflags(ifp) & IFF_UP)
2002 		rl_init_locked(sc);
2003 
2004 	sc->suspended = 0;
2005 
2006 	RL_UNLOCK(sc);
2007 
2008 	return (0);
2009 }
2010 
2011 /*
2012  * Stop all chip I/O so that the kernel's probe routines don't
2013  * get confused by errant DMAs when rebooting.
2014  */
2015 static int
2016 rl_shutdown(device_t dev)
2017 {
2018 	struct rl_softc		*sc;
2019 
2020 	sc = device_get_softc(dev);
2021 
2022 	RL_LOCK(sc);
2023 	rl_stop(sc);
2024 	/*
2025 	 * Mark interface as down since otherwise we will panic if
2026 	 * interrupt comes in later on, which can happen in some
2027 	 * cases.
2028 	 */
2029 	if_setflagbits(sc->rl_ifp, 0, IFF_UP);
2030 	rl_setwol(sc);
2031 	RL_UNLOCK(sc);
2032 
2033 	return (0);
2034 }
2035 
2036 static void
2037 rl_setwol(struct rl_softc *sc)
2038 {
2039 	if_t			ifp;
2040 	int			pmc;
2041 	uint16_t		pmstat;
2042 	uint8_t			v;
2043 
2044 	RL_LOCK_ASSERT(sc);
2045 
2046 	ifp = sc->rl_ifp;
2047 	if ((if_getcapabilities(ifp) & IFCAP_WOL) == 0)
2048 		return;
2049 	if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
2050 		return;
2051 
2052 	/* Enable config register write. */
2053 	CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
2054 
2055 	/* Enable PME. */
2056 	v = CSR_READ_1(sc, sc->rl_cfg1);
2057 	v &= ~RL_CFG1_PME;
2058 	if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
2059 		v |= RL_CFG1_PME;
2060 	CSR_WRITE_1(sc, sc->rl_cfg1, v);
2061 
2062 	v = CSR_READ_1(sc, sc->rl_cfg3);
2063 	v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
2064 	if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
2065 		v |= RL_CFG3_WOL_MAGIC;
2066 	CSR_WRITE_1(sc, sc->rl_cfg3, v);
2067 
2068 	v = CSR_READ_1(sc, sc->rl_cfg5);
2069 	v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
2070 	v &= ~RL_CFG5_WOL_LANWAKE;
2071 	if ((if_getcapenable(ifp) & IFCAP_WOL_UCAST) != 0)
2072 		v |= RL_CFG5_WOL_UCAST;
2073 	if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) != 0)
2074 		v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST;
2075 	if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
2076 		v |= RL_CFG5_WOL_LANWAKE;
2077 	CSR_WRITE_1(sc, sc->rl_cfg5, v);
2078 
2079 	/* Config register write done. */
2080 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
2081 
2082 	/* Request PME if WOL is requested. */
2083 	pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2);
2084 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
2085 	if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
2086 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2087 	pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
2088 }
2089 
2090 static void
2091 rl_clrwol(struct rl_softc *sc)
2092 {
2093 	if_t			ifp;
2094 	uint8_t			v;
2095 
2096 	ifp = sc->rl_ifp;
2097 	if ((if_getcapabilities(ifp) & IFCAP_WOL) == 0)
2098 		return;
2099 
2100 	/* Enable config register write. */
2101 	CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
2102 
2103 	v = CSR_READ_1(sc, sc->rl_cfg3);
2104 	v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC);
2105 	CSR_WRITE_1(sc, sc->rl_cfg3, v);
2106 
2107 	/* Config register write done. */
2108 	CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
2109 
2110 	v = CSR_READ_1(sc, sc->rl_cfg5);
2111 	v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST);
2112 	v &= ~RL_CFG5_WOL_LANWAKE;
2113 	CSR_WRITE_1(sc, sc->rl_cfg5, v);
2114 }
2115