xref: /freebsd/sys/dev/jme/if_jme.c (revision 4a5216a6dc0c3ce4cf5f2d3ee8af0c3ff3402c4f)
1 /*-
2  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/bus.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/rman.h>
39 #include <sys/module.h>
40 #include <sys/proc.h>
41 #include <sys/queue.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <sys/sysctl.h>
45 #include <sys/taskqueue.h>
46 
47 #include <net/bpf.h>
48 #include <net/if.h>
49 #include <net/if_arp.h>
50 #include <net/ethernet.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/if_types.h>
54 #include <net/if_vlan_var.h>
55 
56 #include <netinet/in.h>
57 #include <netinet/in_systm.h>
58 #include <netinet/ip.h>
59 #include <netinet/tcp.h>
60 
61 #include <dev/mii/mii.h>
62 #include <dev/mii/miivar.h>
63 
64 #include <dev/pci/pcireg.h>
65 #include <dev/pci/pcivar.h>
66 
67 #include <machine/atomic.h>
68 #include <machine/bus.h>
69 #include <machine/in_cksum.h>
70 
71 #include <dev/jme/if_jmereg.h>
72 #include <dev/jme/if_jmevar.h>
73 
74 /* "device miibus" required.  See GENERIC if you get errors here. */
75 #include "miibus_if.h"
76 
77 /* Define the following to disable printing Rx errors. */
78 #undef	JME_SHOW_ERRORS
79 
80 #define	JME_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
81 
82 MODULE_DEPEND(jme, pci, 1, 1, 1);
83 MODULE_DEPEND(jme, ether, 1, 1, 1);
84 MODULE_DEPEND(jme, miibus, 1, 1, 1);
85 
86 /* Tunables. */
87 static int msi_disable = 0;
88 static int msix_disable = 0;
89 TUNABLE_INT("hw.jme.msi_disable", &msi_disable);
90 TUNABLE_INT("hw.jme.msix_disable", &msix_disable);
91 
92 /*
93  * Devices supported by this driver.
94  */
95 static struct jme_dev {
96 	uint16_t	jme_vendorid;
97 	uint16_t	jme_deviceid;
98 	const char	*jme_name;
99 } jme_devs[] = {
100 	{ VENDORID_JMICRON, DEVICEID_JMC250,
101 	    "JMicron Inc, JMC250 Gigabit Ethernet" },
102 	{ VENDORID_JMICRON, DEVICEID_JMC260,
103 	    "JMicron Inc, JMC260 Fast Ethernet" },
104 };
105 
106 static int jme_miibus_readreg(device_t, int, int);
107 static int jme_miibus_writereg(device_t, int, int, int);
108 static void jme_miibus_statchg(device_t);
109 static void jme_mediastatus(struct ifnet *, struct ifmediareq *);
110 static int jme_mediachange(struct ifnet *);
111 static int jme_probe(device_t);
112 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
113 static int jme_eeprom_macaddr(struct jme_softc *);
114 static void jme_reg_macaddr(struct jme_softc *);
115 static void jme_map_intr_vector(struct jme_softc *);
116 static int jme_attach(device_t);
117 static int jme_detach(device_t);
118 static void jme_sysctl_node(struct jme_softc *);
119 static void jme_dmamap_cb(void *, bus_dma_segment_t *, int, int);
120 static int jme_dma_alloc(struct jme_softc *);
121 static void jme_dma_free(struct jme_softc *);
122 static int jme_shutdown(device_t);
123 static void jme_setlinkspeed(struct jme_softc *);
124 static void jme_setwol(struct jme_softc *);
125 static int jme_suspend(device_t);
126 static int jme_resume(device_t);
127 static int jme_encap(struct jme_softc *, struct mbuf **);
128 static void jme_tx_task(void *, int);
129 static void jme_start(struct ifnet *);
130 static void jme_watchdog(struct jme_softc *);
131 static int jme_ioctl(struct ifnet *, u_long, caddr_t);
132 static void jme_mac_config(struct jme_softc *);
133 static void jme_link_task(void *, int);
134 static int jme_intr(void *);
135 static void jme_int_task(void *, int);
136 static void jme_txeof(struct jme_softc *);
137 static __inline void jme_discard_rxbuf(struct jme_softc *, int);
138 static void jme_rxeof(struct jme_softc *);
139 static int jme_rxintr(struct jme_softc *, int);
140 static void jme_tick(void *);
141 static void jme_reset(struct jme_softc *);
142 static void jme_init(void *);
143 static void jme_init_locked(struct jme_softc *);
144 static void jme_stop(struct jme_softc *);
145 static void jme_stop_tx(struct jme_softc *);
146 static void jme_stop_rx(struct jme_softc *);
147 static int jme_init_rx_ring(struct jme_softc *);
148 static void jme_init_tx_ring(struct jme_softc *);
149 static void jme_init_ssb(struct jme_softc *);
150 static int jme_newbuf(struct jme_softc *, struct jme_rxdesc *);
151 static void jme_set_vlan(struct jme_softc *);
152 static void jme_set_filter(struct jme_softc *);
153 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
154 static int sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS);
155 static int sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
156 static int sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS);
157 static int sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
158 static int sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS);
159 
160 
161 static device_method_t jme_methods[] = {
162 	/* Device interface. */
163 	DEVMETHOD(device_probe,		jme_probe),
164 	DEVMETHOD(device_attach,	jme_attach),
165 	DEVMETHOD(device_detach,	jme_detach),
166 	DEVMETHOD(device_shutdown,	jme_shutdown),
167 	DEVMETHOD(device_suspend,	jme_suspend),
168 	DEVMETHOD(device_resume,	jme_resume),
169 
170 	/* MII interface. */
171 	DEVMETHOD(miibus_readreg,	jme_miibus_readreg),
172 	DEVMETHOD(miibus_writereg,	jme_miibus_writereg),
173 	DEVMETHOD(miibus_statchg,	jme_miibus_statchg),
174 
175 	{ NULL, NULL }
176 };
177 
178 static driver_t jme_driver = {
179 	"jme",
180 	jme_methods,
181 	sizeof(struct jme_softc)
182 };
183 
184 static devclass_t jme_devclass;
185 
186 DRIVER_MODULE(jme, pci, jme_driver, jme_devclass, 0, 0);
187 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, 0, 0);
188 
189 static struct resource_spec jme_res_spec_mem[] = {
190 	{ SYS_RES_MEMORY,	PCIR_BAR(0),	RF_ACTIVE },
191 	{ -1,			0,		0 }
192 };
193 
194 static struct resource_spec jme_irq_spec_legacy[] = {
195 	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
196 	{ -1,			0,		0 }
197 };
198 
199 static struct resource_spec jme_irq_spec_msi[] = {
200 	{ SYS_RES_IRQ,		1,		RF_ACTIVE },
201 	{ SYS_RES_IRQ,		2,		RF_ACTIVE },
202 	{ SYS_RES_IRQ,		3,		RF_ACTIVE },
203 	{ SYS_RES_IRQ,		4,		RF_ACTIVE },
204 	{ SYS_RES_IRQ,		5,		RF_ACTIVE },
205 	{ SYS_RES_IRQ,		6,		RF_ACTIVE },
206 	{ SYS_RES_IRQ,		7,		RF_ACTIVE },
207 	{ SYS_RES_IRQ,		8,		RF_ACTIVE },
208 	{ -1,			0,		0 }
209 };
210 
211 /*
212  *	Read a PHY register on the MII of the JMC250.
213  */
214 static int
215 jme_miibus_readreg(device_t dev, int phy, int reg)
216 {
217 	struct jme_softc *sc;
218 	uint32_t val;
219 	int i;
220 
221 	sc = device_get_softc(dev);
222 
223 	/* For FPGA version, PHY address 0 should be ignored. */
224 	if ((sc->jme_flags & JME_FLAG_FPGA) != 0) {
225 		if (phy == 0)
226 			return (0);
227 	} else {
228 		if (sc->jme_phyaddr != phy)
229 			return (0);
230 	}
231 
232 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
233 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
234 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
235 		DELAY(1);
236 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
237 			break;
238 	}
239 
240 	if (i == 0) {
241 		device_printf(sc->jme_dev, "phy read timeout : %d\n", reg);
242 		return (0);
243 	}
244 
245 	return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
246 }
247 
248 /*
249  *	Write a PHY register on the MII of the JMC250.
250  */
251 static int
252 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
253 {
254 	struct jme_softc *sc;
255 	int i;
256 
257 	sc = device_get_softc(dev);
258 
259 	/* For FPGA version, PHY address 0 should be ignored. */
260 	if ((sc->jme_flags & JME_FLAG_FPGA) != 0) {
261 		if (phy == 0)
262 			return (0);
263 	} else {
264 		if (sc->jme_phyaddr != phy)
265 			return (0);
266 	}
267 
268 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
269 	    ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
270 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
271 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
272 		DELAY(1);
273 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
274 			break;
275 	}
276 
277 	if (i == 0)
278 		device_printf(sc->jme_dev, "phy write timeout : %d\n", reg);
279 
280 	return (0);
281 }
282 
283 /*
284  *	Callback from MII layer when media changes.
285  */
286 static void
287 jme_miibus_statchg(device_t dev)
288 {
289 	struct jme_softc *sc;
290 
291 	sc = device_get_softc(dev);
292 	taskqueue_enqueue(taskqueue_swi, &sc->jme_link_task);
293 }
294 
295 /*
296  *	Get the current interface media status.
297  */
298 static void
299 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
300 {
301 	struct jme_softc *sc;
302 	struct mii_data *mii;
303 
304 	sc = ifp->if_softc;
305 	JME_LOCK(sc);
306 	mii = device_get_softc(sc->jme_miibus);
307 
308 	mii_pollstat(mii);
309 	ifmr->ifm_status = mii->mii_media_status;
310 	ifmr->ifm_active = mii->mii_media_active;
311 	JME_UNLOCK(sc);
312 }
313 
314 /*
315  *	Set hardware to newly-selected media.
316  */
317 static int
318 jme_mediachange(struct ifnet *ifp)
319 {
320 	struct jme_softc *sc;
321 	struct mii_data *mii;
322 	struct mii_softc *miisc;
323 	int error;
324 
325 	sc = ifp->if_softc;
326 	JME_LOCK(sc);
327 	mii = device_get_softc(sc->jme_miibus);
328 	if (mii->mii_instance != 0) {
329 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
330 			mii_phy_reset(miisc);
331 	}
332 	error = mii_mediachg(mii);
333 	JME_UNLOCK(sc);
334 
335 	return (error);
336 }
337 
338 static int
339 jme_probe(device_t dev)
340 {
341 	struct jme_dev *sp;
342 	int i;
343 	uint16_t vendor, devid;
344 
345 	vendor = pci_get_vendor(dev);
346 	devid = pci_get_device(dev);
347 	sp = jme_devs;
348 	for (i = 0; i < sizeof(jme_devs) / sizeof(jme_devs[0]);
349 	    i++, sp++) {
350 		if (vendor == sp->jme_vendorid &&
351 		    devid == sp->jme_deviceid) {
352 			device_set_desc(dev, sp->jme_name);
353 			return (BUS_PROBE_DEFAULT);
354 		}
355 	}
356 
357 	return (ENXIO);
358 }
359 
360 static int
361 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
362 {
363 	uint32_t reg;
364 	int i;
365 
366 	*val = 0;
367 	for (i = JME_TIMEOUT; i > 0; i--) {
368 		reg = CSR_READ_4(sc, JME_SMBCSR);
369 		if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
370 			break;
371 		DELAY(1);
372 	}
373 
374 	if (i == 0) {
375 		device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
376 		return (ETIMEDOUT);
377 	}
378 
379 	reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
380 	CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
381 	for (i = JME_TIMEOUT; i > 0; i--) {
382 		DELAY(1);
383 		reg = CSR_READ_4(sc, JME_SMBINTF);
384 		if ((reg & SMBINTF_CMD_TRIGGER) == 0)
385 			break;
386 	}
387 
388 	if (i == 0) {
389 		device_printf(sc->jme_dev, "EEPROM read timeout!\n");
390 		return (ETIMEDOUT);
391 	}
392 
393 	reg = CSR_READ_4(sc, JME_SMBINTF);
394 	*val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
395 
396 	return (0);
397 }
398 
399 static int
400 jme_eeprom_macaddr(struct jme_softc *sc)
401 {
402 	uint8_t eaddr[ETHER_ADDR_LEN];
403 	uint8_t fup, reg, val;
404 	uint32_t offset;
405 	int match;
406 
407 	offset = 0;
408 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
409 	    fup != JME_EEPROM_SIG0)
410 		return (ENOENT);
411 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
412 	    fup != JME_EEPROM_SIG1)
413 		return (ENOENT);
414 	match = 0;
415 	do {
416 		if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
417 			break;
418 		if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
419 		    (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
420 			if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
421 				break;
422 			if (reg >= JME_PAR0 &&
423 			    reg < JME_PAR0 + ETHER_ADDR_LEN) {
424 				if (jme_eeprom_read_byte(sc, offset + 2,
425 				    &val) != 0)
426 					break;
427 				eaddr[reg - JME_PAR0] = val;
428 				match++;
429 			}
430 		}
431 		/* Check for the end of EEPROM descriptor. */
432 		if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
433 			break;
434 		/* Try next eeprom descriptor. */
435 		offset += JME_EEPROM_DESC_BYTES;
436 	} while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
437 
438 	if (match == ETHER_ADDR_LEN) {
439 		bcopy(eaddr, sc->jme_eaddr, ETHER_ADDR_LEN);
440 		return (0);
441 	}
442 
443 	return (ENOENT);
444 }
445 
446 static void
447 jme_reg_macaddr(struct jme_softc *sc)
448 {
449 	uint32_t par0, par1;
450 
451 	/* Read station address. */
452 	par0 = CSR_READ_4(sc, JME_PAR0);
453 	par1 = CSR_READ_4(sc, JME_PAR1);
454 	par1 &= 0xFFFF;
455 	if ((par0 == 0 && par1 == 0) ||
456 	    (par0 == 0xFFFFFFFF && par1 == 0xFFFF)) {
457 		device_printf(sc->jme_dev,
458 		    "generating fake ethernet address.\n");
459 		par0 = arc4random();
460 		/* Set OUI to JMicron. */
461 		sc->jme_eaddr[0] = 0x00;
462 		sc->jme_eaddr[1] = 0x1B;
463 		sc->jme_eaddr[2] = 0x8C;
464 		sc->jme_eaddr[3] = (par0 >> 16) & 0xff;
465 		sc->jme_eaddr[4] = (par0 >> 8) & 0xff;
466 		sc->jme_eaddr[5] = par0 & 0xff;
467 	} else {
468 		sc->jme_eaddr[0] = (par0 >> 0) & 0xFF;
469 		sc->jme_eaddr[1] = (par0 >> 8) & 0xFF;
470 		sc->jme_eaddr[2] = (par0 >> 16) & 0xFF;
471 		sc->jme_eaddr[3] = (par0 >> 24) & 0xFF;
472 		sc->jme_eaddr[4] = (par1 >> 0) & 0xFF;
473 		sc->jme_eaddr[5] = (par1 >> 8) & 0xFF;
474 	}
475 }
476 
477 static void
478 jme_map_intr_vector(struct jme_softc *sc)
479 {
480 	uint32_t map[MSINUM_NUM_INTR_SOURCE / JME_MSI_MESSAGES];
481 
482 	bzero(map, sizeof(map));
483 
484 	/* Map Tx interrupts source to MSI/MSIX vector 2. */
485 	map[MSINUM_REG_INDEX(N_INTR_TXQ0_COMP)] =
486 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ0_COMP);
487 	map[MSINUM_REG_INDEX(N_INTR_TXQ1_COMP)] |=
488 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ1_COMP);
489 	map[MSINUM_REG_INDEX(N_INTR_TXQ2_COMP)] |=
490 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ2_COMP);
491 	map[MSINUM_REG_INDEX(N_INTR_TXQ3_COMP)] |=
492 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ3_COMP);
493 	map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
494 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ4_COMP);
495 	map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
496 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ5_COMP);
497 	map[MSINUM_REG_INDEX(N_INTR_TXQ6_COMP)] |=
498 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ6_COMP);
499 	map[MSINUM_REG_INDEX(N_INTR_TXQ7_COMP)] |=
500 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ7_COMP);
501 	map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL)] |=
502 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL);
503 	map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL_TO)] |=
504 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL_TO);
505 
506 	/* Map Rx interrupts source to MSI/MSIX vector 1. */
507 	map[MSINUM_REG_INDEX(N_INTR_RXQ0_COMP)] =
508 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COMP);
509 	map[MSINUM_REG_INDEX(N_INTR_RXQ1_COMP)] =
510 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COMP);
511 	map[MSINUM_REG_INDEX(N_INTR_RXQ2_COMP)] =
512 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COMP);
513 	map[MSINUM_REG_INDEX(N_INTR_RXQ3_COMP)] =
514 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COMP);
515 	map[MSINUM_REG_INDEX(N_INTR_RXQ0_DESC_EMPTY)] =
516 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_DESC_EMPTY);
517 	map[MSINUM_REG_INDEX(N_INTR_RXQ1_DESC_EMPTY)] =
518 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_DESC_EMPTY);
519 	map[MSINUM_REG_INDEX(N_INTR_RXQ2_DESC_EMPTY)] =
520 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_DESC_EMPTY);
521 	map[MSINUM_REG_INDEX(N_INTR_RXQ3_DESC_EMPTY)] =
522 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_DESC_EMPTY);
523 	map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL)] =
524 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL);
525 	map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL)] =
526 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL);
527 	map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL)] =
528 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL);
529 	map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL)] =
530 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL);
531 	map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL_TO)] =
532 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL_TO);
533 	map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL_TO)] =
534 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL_TO);
535 	map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL_TO)] =
536 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL_TO);
537 	map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL_TO)] =
538 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL_TO);
539 
540 	/* Map all other interrupts source to MSI/MSIX vector 0. */
541 	CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 0, map[0]);
542 	CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 1, map[1]);
543 	CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 2, map[2]);
544 	CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 3, map[3]);
545 }
546 
547 static int
548 jme_attach(device_t dev)
549 {
550 	struct jme_softc *sc;
551 	struct ifnet *ifp;
552 	struct mii_softc *miisc;
553 	struct mii_data *mii;
554 	uint32_t reg;
555 	uint16_t burst;
556 	int error, i, msic, msixc, pmc;
557 
558 	error = 0;
559 	sc = device_get_softc(dev);
560 	sc->jme_dev = dev;
561 
562 	mtx_init(&sc->jme_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
563 	    MTX_DEF);
564 	callout_init_mtx(&sc->jme_tick_ch, &sc->jme_mtx, 0);
565 	TASK_INIT(&sc->jme_int_task, 0, jme_int_task, sc);
566 	TASK_INIT(&sc->jme_link_task, 0, jme_link_task, sc);
567 
568 	/*
569 	 * Map the device. JMC250 supports both memory mapped and I/O
570 	 * register space access. Because I/O register access should
571 	 * use different BARs to access registers it's waste of time
572 	 * to use I/O register spce access. JMC250 uses 16K to map
573 	 * entire memory space.
574 	 */
575 	pci_enable_busmaster(dev);
576 	sc->jme_res_spec = jme_res_spec_mem;
577 	sc->jme_irq_spec = jme_irq_spec_legacy;
578 	error = bus_alloc_resources(dev, sc->jme_res_spec, sc->jme_res);
579 	if (error != 0) {
580 		device_printf(dev, "cannot allocate memory resources.\n");
581 		goto fail;
582 	}
583 
584 	/* Allocate IRQ resources. */
585 	msixc = pci_msix_count(dev);
586 	msic = pci_msi_count(dev);
587 	if (bootverbose) {
588 		device_printf(dev, "MSIX count : %d\n", msixc);
589 		device_printf(dev, "MSI count : %d\n", msic);
590 	}
591 
592 	/* Prefer MSIX over MSI. */
593 	if (msix_disable == 0 || msi_disable == 0) {
594 		if (msix_disable == 0 && msixc == JME_MSIX_MESSAGES &&
595 		    pci_alloc_msix(dev, &msixc) == 0) {
596 			if (msic == JME_MSIX_MESSAGES) {
597 				device_printf(dev, "Using %d MSIX messages.\n",
598 				    msixc);
599 				sc->jme_flags |= JME_FLAG_MSIX;
600 				sc->jme_irq_spec = jme_irq_spec_msi;
601 			} else
602 				pci_release_msi(dev);
603 		}
604 		if (msi_disable == 0 && (sc->jme_flags & JME_FLAG_MSIX) == 0 &&
605 		    msic == JME_MSI_MESSAGES &&
606 		    pci_alloc_msi(dev, &msic) == 0) {
607 			if (msic == JME_MSI_MESSAGES) {
608 				device_printf(dev, "Using %d MSI messages.\n",
609 				    msic);
610 				sc->jme_flags |= JME_FLAG_MSI;
611 				sc->jme_irq_spec = jme_irq_spec_msi;
612 			} else
613 				pci_release_msi(dev);
614 		}
615 		/* Map interrupt vector 0, 1 and 2. */
616 		if ((sc->jme_flags & JME_FLAG_MSI) != 0 ||
617 		    (sc->jme_flags & JME_FLAG_MSIX) != 0)
618 			jme_map_intr_vector(sc);
619 	}
620 
621 	error = bus_alloc_resources(dev, sc->jme_irq_spec, sc->jme_irq);
622 	if (error != 0) {
623 		device_printf(dev, "cannot allocate IRQ resources.\n");
624 		goto fail;
625 	}
626 
627 	sc->jme_rev = pci_get_device(dev);
628 	if ((sc->jme_rev & DEVICEID_JMC2XX_MASK) == DEVICEID_JMC260) {
629 		sc->jme_flags |= JME_FLAG_FASTETH;
630 		sc->jme_flags |= JME_FLAG_NOJUMBO;
631 	}
632 	reg = CSR_READ_4(sc, JME_CHIPMODE);
633 	sc->jme_chip_rev = (reg & CHIPMODE_REV_MASK) >> CHIPMODE_REV_SHIFT;
634 	if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
635 	    CHIPMODE_NOT_FPGA)
636 		sc->jme_flags |= JME_FLAG_FPGA;
637 	if (bootverbose) {
638 		device_printf(dev, "PCI device revision : 0x%04x\n",
639 		    sc->jme_rev);
640 		device_printf(dev, "Chip revision : 0x%02x\n",
641 		    sc->jme_chip_rev);
642 		if ((sc->jme_flags & JME_FLAG_FPGA) != 0)
643 			device_printf(dev, "FPGA revision : 0x%04x\n",
644 			    (reg & CHIPMODE_FPGA_REV_MASK) >>
645 			    CHIPMODE_FPGA_REV_SHIFT);
646 	}
647 	if (sc->jme_chip_rev == 0xFF) {
648 		device_printf(dev, "Unknown chip revision : 0x%02x\n",
649 		    sc->jme_rev);
650 		error = ENXIO;
651 		goto fail;
652 	}
653 
654 	/* Reset the ethernet controller. */
655 	jme_reset(sc);
656 
657 	/* Get station address. */
658 	reg = CSR_READ_4(sc, JME_SMBCSR);
659 	if ((reg & SMBCSR_EEPROM_PRESENT) != 0)
660 		error = jme_eeprom_macaddr(sc);
661 	if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
662 		if (error != 0 && (bootverbose))
663 			device_printf(sc->jme_dev,
664 			    "ethernet hardware address not found in EEPROM.\n");
665 		jme_reg_macaddr(sc);
666 	}
667 
668 	/*
669 	 * Save PHY address.
670 	 * Integrated JR0211 has fixed PHY address whereas FPGA version
671 	 * requires PHY probing to get correct PHY address.
672 	 */
673 	if ((sc->jme_flags & JME_FLAG_FPGA) == 0) {
674 		sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
675 		    GPREG0_PHY_ADDR_MASK;
676 		if (bootverbose)
677 			device_printf(dev, "PHY is at address %d.\n",
678 			    sc->jme_phyaddr);
679 	} else
680 		sc->jme_phyaddr = 0;
681 
682 	/* Set max allowable DMA size. */
683 	if (pci_find_extcap(dev, PCIY_EXPRESS, &i) == 0) {
684 		sc->jme_flags |= JME_FLAG_PCIE;
685 		burst = pci_read_config(dev, i + 0x08, 2);
686 		if (bootverbose) {
687 			device_printf(dev, "Read request size : %d bytes.\n",
688 			    128 << ((burst >> 12) & 0x07));
689 			device_printf(dev, "TLP payload size : %d bytes.\n",
690 			    128 << ((burst >> 5) & 0x07));
691 		}
692 		switch ((burst >> 12) & 0x07) {
693 		case 0:
694 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
695 			break;
696 		case 1:
697 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
698 			break;
699 		default:
700 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
701 			break;
702 		}
703 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
704 	} else {
705 		sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
706 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
707 	}
708 	/* Create coalescing sysctl node. */
709 	jme_sysctl_node(sc);
710 	if ((error = jme_dma_alloc(sc) != 0))
711 		goto fail;
712 
713 	ifp = sc->jme_ifp = if_alloc(IFT_ETHER);
714 	if (ifp == NULL) {
715 		device_printf(dev, "cannot allocate ifnet structure.\n");
716 		error = ENXIO;
717 		goto fail;
718 	}
719 
720 	ifp->if_softc = sc;
721 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
722 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
723 	ifp->if_ioctl = jme_ioctl;
724 	ifp->if_start = jme_start;
725 	ifp->if_init = jme_init;
726 	ifp->if_snd.ifq_drv_maxlen = JME_TX_RING_CNT - 1;
727 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
728 	IFQ_SET_READY(&ifp->if_snd);
729 	/* JMC250 supports Tx/Rx checksum offload as well as TSO. */
730 	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4;
731 	ifp->if_hwassist = JME_CSUM_FEATURES | CSUM_TSO;
732 	if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) {
733 		sc->jme_flags |= JME_FLAG_PMCAP;
734 		ifp->if_capabilities |= IFCAP_WOL_MAGIC;
735 	}
736 	ifp->if_capenable = ifp->if_capabilities;
737 
738 	/* Set up MII bus. */
739 	if ((error = mii_phy_probe(dev, &sc->jme_miibus, jme_mediachange,
740 	    jme_mediastatus)) != 0) {
741 		device_printf(dev, "no PHY found!\n");
742 		goto fail;
743 	}
744 
745 	/*
746 	 * Force PHY to FPGA mode.
747 	 */
748 	if ((sc->jme_flags & JME_FLAG_FPGA) != 0) {
749 		mii = device_get_softc(sc->jme_miibus);
750 		if (mii->mii_instance != 0) {
751 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
752 				if (miisc->mii_phy != 0) {
753 					sc->jme_phyaddr = miisc->mii_phy;
754 					break;
755 				}
756 			}
757 			if (sc->jme_phyaddr != 0) {
758 				device_printf(sc->jme_dev,
759 				    "FPGA PHY is at %d\n", sc->jme_phyaddr);
760 				/* vendor magic. */
761 				jme_miibus_writereg(dev, sc->jme_phyaddr, 27,
762 				    0x0004);
763 			}
764 		}
765 	}
766 
767 	ether_ifattach(ifp, sc->jme_eaddr);
768 
769 	/* VLAN capability setup */
770 	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
771 	    IFCAP_VLAN_HWCSUM;
772 	ifp->if_capenable = ifp->if_capabilities;
773 
774 	/* Tell the upper layer(s) we support long frames. */
775 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
776 
777 	/* Create local taskq. */
778 	TASK_INIT(&sc->jme_tx_task, 1, jme_tx_task, ifp);
779 	sc->jme_tq = taskqueue_create_fast("jme_taskq", M_WAITOK,
780 	    taskqueue_thread_enqueue, &sc->jme_tq);
781 	if (sc->jme_tq == NULL) {
782 		device_printf(dev, "could not create taskqueue.\n");
783 		ether_ifdetach(ifp);
784 		error = ENXIO;
785 		goto fail;
786 	}
787 	taskqueue_start_threads(&sc->jme_tq, 1, PI_NET, "%s taskq",
788 	    device_get_nameunit(sc->jme_dev));
789 
790 	if ((sc->jme_flags & JME_FLAG_MSIX) != 0)
791 		msic = JME_MSIX_MESSAGES;
792 	else if ((sc->jme_flags & JME_FLAG_MSI) != 0)
793 		msic = JME_MSI_MESSAGES;
794 	else
795 		msic = 1;
796 	for (i = 0; i < msic; i++) {
797 		error = bus_setup_intr(dev, sc->jme_irq[i],
798 		    INTR_TYPE_NET | INTR_MPSAFE, jme_intr, NULL, sc,
799 		    &sc->jme_intrhand[i]);
800 		if (error != 0)
801 			break;
802 	}
803 
804 	if (error != 0) {
805 		device_printf(dev, "could not set up interrupt handler.\n");
806 		taskqueue_free(sc->jme_tq);
807 		sc->jme_tq = NULL;
808 		ether_ifdetach(ifp);
809 		goto fail;
810 	}
811 
812 fail:
813 	if (error != 0)
814 		jme_detach(dev);
815 
816 	return (error);
817 }
818 
819 static int
820 jme_detach(device_t dev)
821 {
822 	struct jme_softc *sc;
823 	struct ifnet *ifp;
824 	int i, msic;
825 
826 	sc = device_get_softc(dev);
827 
828 	ifp = sc->jme_ifp;
829 	if (device_is_attached(dev)) {
830 		JME_LOCK(sc);
831 		sc->jme_flags |= JME_FLAG_DETACH;
832 		jme_stop(sc);
833 		JME_UNLOCK(sc);
834 		callout_drain(&sc->jme_tick_ch);
835 		taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
836 		taskqueue_drain(sc->jme_tq, &sc->jme_tx_task);
837 		taskqueue_drain(taskqueue_swi, &sc->jme_link_task);
838 		ether_ifdetach(ifp);
839 	}
840 
841 	if (sc->jme_tq != NULL) {
842 		taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
843 		taskqueue_free(sc->jme_tq);
844 		sc->jme_tq = NULL;
845 	}
846 
847 	if (sc->jme_miibus != NULL) {
848 		device_delete_child(dev, sc->jme_miibus);
849 		sc->jme_miibus = NULL;
850 	}
851 	bus_generic_detach(dev);
852 	jme_dma_free(sc);
853 
854 	if (ifp != NULL) {
855 		if_free(ifp);
856 		sc->jme_ifp = NULL;
857 	}
858 
859 	msic = 1;
860 	if ((sc->jme_flags & JME_FLAG_MSIX) != 0)
861 		msic = JME_MSIX_MESSAGES;
862 	else if ((sc->jme_flags & JME_FLAG_MSI) != 0)
863 		msic = JME_MSI_MESSAGES;
864 	else
865 		msic = 1;
866 	for (i = 0; i < msic; i++) {
867 		if (sc->jme_intrhand[i] != NULL) {
868 			bus_teardown_intr(dev, sc->jme_irq[i],
869 			    sc->jme_intrhand[i]);
870 			sc->jme_intrhand[i] = NULL;
871 		}
872 	}
873 
874 	bus_release_resources(dev, sc->jme_irq_spec, sc->jme_irq);
875 	if ((sc->jme_flags & (JME_FLAG_MSIX | JME_FLAG_MSI)) != 0)
876 		pci_release_msi(dev);
877 	bus_release_resources(dev, sc->jme_res_spec, sc->jme_res);
878 	mtx_destroy(&sc->jme_mtx);
879 
880 	return (0);
881 }
882 
883 static void
884 jme_sysctl_node(struct jme_softc *sc)
885 {
886 	int error;
887 
888 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->jme_dev),
889 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev)), OID_AUTO,
890 	    "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_tx_coal_to,
891 	    0, sysctl_hw_jme_tx_coal_to, "I", "jme tx coalescing timeout");
892 
893 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->jme_dev),
894 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev)), OID_AUTO,
895 	    "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_tx_coal_pkt,
896 	    0, sysctl_hw_jme_tx_coal_pkt, "I", "jme tx coalescing packet");
897 
898 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->jme_dev),
899 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev)), OID_AUTO,
900 	    "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_rx_coal_to,
901 	    0, sysctl_hw_jme_rx_coal_to, "I", "jme rx coalescing timeout");
902 
903 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->jme_dev),
904 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev)), OID_AUTO,
905 	    "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_rx_coal_pkt,
906 	    0, sysctl_hw_jme_rx_coal_pkt, "I", "jme rx coalescing packet");
907 
908 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->jme_dev),
909 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev)), OID_AUTO,
910 	    "process_limit", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_process_limit,
911 	    0, sysctl_hw_jme_proc_limit, "I",
912 	    "max number of Rx events to process");
913 
914 	/* Pull in device tunables. */
915 	sc->jme_process_limit = JME_PROC_DEFAULT;
916 	error = resource_int_value(device_get_name(sc->jme_dev),
917 	    device_get_unit(sc->jme_dev), "process_limit",
918 	    &sc->jme_process_limit);
919 	if (error == 0) {
920 		if (sc->jme_process_limit < JME_PROC_MIN ||
921 		    sc->jme_process_limit > JME_PROC_MAX) {
922 			device_printf(sc->jme_dev,
923 			    "process_limit value out of range; "
924 			    "using default: %d\n", JME_PROC_DEFAULT);
925 			sc->jme_process_limit = JME_PROC_DEFAULT;
926 		}
927 	}
928 
929 	sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
930 	error = resource_int_value(device_get_name(sc->jme_dev),
931 	    device_get_unit(sc->jme_dev), "tx_coal_to", &sc->jme_tx_coal_to);
932 	if (error == 0) {
933 		if (sc->jme_tx_coal_to < PCCTX_COAL_TO_MIN ||
934 		    sc->jme_tx_coal_to > PCCTX_COAL_TO_MAX) {
935 			device_printf(sc->jme_dev,
936 			    "tx_coal_to value out of range; "
937 			    "using default: %d\n", PCCTX_COAL_TO_DEFAULT);
938 			sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
939 		}
940 	}
941 
942 	sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
943 	error = resource_int_value(device_get_name(sc->jme_dev),
944 	    device_get_unit(sc->jme_dev), "tx_coal_pkt", &sc->jme_tx_coal_to);
945 	if (error == 0) {
946 		if (sc->jme_tx_coal_pkt < PCCTX_COAL_PKT_MIN ||
947 		    sc->jme_tx_coal_pkt > PCCTX_COAL_PKT_MAX) {
948 			device_printf(sc->jme_dev,
949 			    "tx_coal_pkt value out of range; "
950 			    "using default: %d\n", PCCTX_COAL_PKT_DEFAULT);
951 			sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
952 		}
953 	}
954 
955 	sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
956 	error = resource_int_value(device_get_name(sc->jme_dev),
957 	    device_get_unit(sc->jme_dev), "rx_coal_to", &sc->jme_rx_coal_to);
958 	if (error == 0) {
959 		if (sc->jme_rx_coal_to < PCCRX_COAL_TO_MIN ||
960 		    sc->jme_rx_coal_to > PCCRX_COAL_TO_MAX) {
961 			device_printf(sc->jme_dev,
962 			    "rx_coal_to value out of range; "
963 			    "using default: %d\n", PCCRX_COAL_TO_DEFAULT);
964 			sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
965 		}
966 	}
967 
968 	sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
969 	error = resource_int_value(device_get_name(sc->jme_dev),
970 	    device_get_unit(sc->jme_dev), "rx_coal_pkt", &sc->jme_rx_coal_to);
971 	if (error == 0) {
972 		if (sc->jme_rx_coal_pkt < PCCRX_COAL_PKT_MIN ||
973 		    sc->jme_rx_coal_pkt > PCCRX_COAL_PKT_MAX) {
974 			device_printf(sc->jme_dev,
975 			    "tx_coal_pkt value out of range; "
976 			    "using default: %d\n", PCCRX_COAL_PKT_DEFAULT);
977 			sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
978 		}
979 	}
980 }
981 
982 struct jme_dmamap_arg {
983 	bus_addr_t	jme_busaddr;
984 };
985 
986 static void
987 jme_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
988 {
989 	struct jme_dmamap_arg *ctx;
990 
991 	if (error != 0)
992 		return;
993 
994 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
995 
996 	ctx = (struct jme_dmamap_arg *)arg;
997 	ctx->jme_busaddr = segs[0].ds_addr;
998 }
999 
1000 static int
1001 jme_dma_alloc(struct jme_softc *sc)
1002 {
1003 	struct jme_dmamap_arg ctx;
1004 	struct jme_txdesc *txd;
1005 	struct jme_rxdesc *rxd;
1006 	bus_addr_t lowaddr, rx_ring_end, tx_ring_end;
1007 	int error, i;
1008 
1009 	lowaddr = BUS_SPACE_MAXADDR;
1010 
1011 again:
1012 	/* Create parent ring tag. */
1013 	error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */
1014 	    1, 0,			/* algnmnt, boundary */
1015 	    lowaddr,			/* lowaddr */
1016 	    BUS_SPACE_MAXADDR,		/* highaddr */
1017 	    NULL, NULL,			/* filter, filterarg */
1018 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1019 	    0,				/* nsegments */
1020 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1021 	    0,				/* flags */
1022 	    NULL, NULL,			/* lockfunc, lockarg */
1023 	    &sc->jme_cdata.jme_ring_tag);
1024 	if (error != 0) {
1025 		device_printf(sc->jme_dev,
1026 		    "could not create parent ring DMA tag.\n");
1027 		goto fail;
1028 	}
1029 	/* Create tag for Tx ring. */
1030 	error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1031 	    JME_TX_RING_ALIGN, 0,	/* algnmnt, boundary */
1032 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1033 	    BUS_SPACE_MAXADDR,		/* highaddr */
1034 	    NULL, NULL,			/* filter, filterarg */
1035 	    JME_TX_RING_SIZE,		/* maxsize */
1036 	    1,				/* nsegments */
1037 	    JME_TX_RING_SIZE,		/* maxsegsize */
1038 	    0,				/* flags */
1039 	    NULL, NULL,			/* lockfunc, lockarg */
1040 	    &sc->jme_cdata.jme_tx_ring_tag);
1041 	if (error != 0) {
1042 		device_printf(sc->jme_dev,
1043 		    "could not allocate Tx ring DMA tag.\n");
1044 		goto fail;
1045 	}
1046 
1047 	/* Create tag for Rx ring. */
1048 	error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1049 	    JME_RX_RING_ALIGN, 0,	/* algnmnt, boundary */
1050 	    lowaddr,			/* lowaddr */
1051 	    BUS_SPACE_MAXADDR,		/* highaddr */
1052 	    NULL, NULL,			/* filter, filterarg */
1053 	    JME_RX_RING_SIZE,		/* maxsize */
1054 	    1,				/* nsegments */
1055 	    JME_RX_RING_SIZE,		/* maxsegsize */
1056 	    0,				/* flags */
1057 	    NULL, NULL,			/* lockfunc, lockarg */
1058 	    &sc->jme_cdata.jme_rx_ring_tag);
1059 	if (error != 0) {
1060 		device_printf(sc->jme_dev,
1061 		    "could not allocate Rx ring DMA tag.\n");
1062 		goto fail;
1063 	}
1064 
1065 	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
1066 	error = bus_dmamem_alloc(sc->jme_cdata.jme_tx_ring_tag,
1067 	    (void **)&sc->jme_rdata.jme_tx_ring,
1068 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1069 	    &sc->jme_cdata.jme_tx_ring_map);
1070 	if (error != 0) {
1071 		device_printf(sc->jme_dev,
1072 		    "could not allocate DMA'able memory for Tx ring.\n");
1073 		goto fail;
1074 	}
1075 
1076 	ctx.jme_busaddr = 0;
1077 	error = bus_dmamap_load(sc->jme_cdata.jme_tx_ring_tag,
1078 	    sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring,
1079 	    JME_TX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1080 	if (error != 0 || ctx.jme_busaddr == 0) {
1081 		device_printf(sc->jme_dev,
1082 		    "could not load DMA'able memory for Tx ring.\n");
1083 		goto fail;
1084 	}
1085 	sc->jme_rdata.jme_tx_ring_paddr = ctx.jme_busaddr;
1086 
1087 	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
1088 	error = bus_dmamem_alloc(sc->jme_cdata.jme_rx_ring_tag,
1089 	    (void **)&sc->jme_rdata.jme_rx_ring,
1090 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1091 	    &sc->jme_cdata.jme_rx_ring_map);
1092 	if (error != 0) {
1093 		device_printf(sc->jme_dev,
1094 		    "could not allocate DMA'able memory for Rx ring.\n");
1095 		goto fail;
1096 	}
1097 
1098 	ctx.jme_busaddr = 0;
1099 	error = bus_dmamap_load(sc->jme_cdata.jme_rx_ring_tag,
1100 	    sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring,
1101 	    JME_RX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1102 	if (error != 0 || ctx.jme_busaddr == 0) {
1103 		device_printf(sc->jme_dev,
1104 		    "could not load DMA'able memory for Rx ring.\n");
1105 		goto fail;
1106 	}
1107 	sc->jme_rdata.jme_rx_ring_paddr = ctx.jme_busaddr;
1108 
1109 	/* Tx/Rx descriptor queue should reside within 4GB boundary. */
1110 	tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr + JME_TX_RING_SIZE;
1111 	rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr + JME_RX_RING_SIZE;
1112 	if ((JME_ADDR_HI(tx_ring_end) !=
1113 	    JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) ||
1114 	    (JME_ADDR_HI(rx_ring_end) !=
1115 	    JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) {
1116 		device_printf(sc->jme_dev, "4GB boundary crossed, "
1117 		    "switching to 32bit DMA address mode.\n");
1118 		jme_dma_free(sc);
1119 		/* Limit DMA address space to 32bit and try again. */
1120 		lowaddr = BUS_SPACE_MAXADDR_32BIT;
1121 		goto again;
1122 	}
1123 
1124 	/* Create parent buffer tag. */
1125 	error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */
1126 	    1, 0,			/* algnmnt, boundary */
1127 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1128 	    BUS_SPACE_MAXADDR,		/* highaddr */
1129 	    NULL, NULL,			/* filter, filterarg */
1130 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1131 	    0,				/* nsegments */
1132 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1133 	    0,				/* flags */
1134 	    NULL, NULL,			/* lockfunc, lockarg */
1135 	    &sc->jme_cdata.jme_buffer_tag);
1136 	if (error != 0) {
1137 		device_printf(sc->jme_dev,
1138 		    "could not create parent buffer DMA tag.\n");
1139 		goto fail;
1140 	}
1141 
1142 	/* Create shadow status block tag. */
1143 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1144 	    JME_SSB_ALIGN, 0,		/* algnmnt, boundary */
1145 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1146 	    BUS_SPACE_MAXADDR,		/* highaddr */
1147 	    NULL, NULL,			/* filter, filterarg */
1148 	    JME_SSB_SIZE,		/* maxsize */
1149 	    1,				/* nsegments */
1150 	    JME_SSB_SIZE,		/* maxsegsize */
1151 	    0,				/* flags */
1152 	    NULL, NULL,			/* lockfunc, lockarg */
1153 	    &sc->jme_cdata.jme_ssb_tag);
1154 	if (error != 0) {
1155 		device_printf(sc->jme_dev,
1156 		    "could not create shared status block DMA tag.\n");
1157 		goto fail;
1158 	}
1159 
1160 	/* Create tag for Tx buffers. */
1161 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1162 	    1, 0,			/* algnmnt, boundary */
1163 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1164 	    BUS_SPACE_MAXADDR,		/* highaddr */
1165 	    NULL, NULL,			/* filter, filterarg */
1166 	    JME_TSO_MAXSIZE,		/* maxsize */
1167 	    JME_MAXTXSEGS,		/* nsegments */
1168 	    JME_TSO_MAXSEGSIZE,		/* maxsegsize */
1169 	    0,				/* flags */
1170 	    NULL, NULL,			/* lockfunc, lockarg */
1171 	    &sc->jme_cdata.jme_tx_tag);
1172 	if (error != 0) {
1173 		device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1174 		goto fail;
1175 	}
1176 
1177 	/* Create tag for Rx buffers. */
1178 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1179 	    JME_RX_BUF_ALIGN, 0,	/* algnmnt, boundary */
1180 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1181 	    BUS_SPACE_MAXADDR,		/* highaddr */
1182 	    NULL, NULL,			/* filter, filterarg */
1183 	    MCLBYTES,			/* maxsize */
1184 	    1,				/* nsegments */
1185 	    MCLBYTES,			/* maxsegsize */
1186 	    0,				/* flags */
1187 	    NULL, NULL,			/* lockfunc, lockarg */
1188 	    &sc->jme_cdata.jme_rx_tag);
1189 	if (error != 0) {
1190 		device_printf(sc->jme_dev, "could not create Rx DMA tag.\n");
1191 		goto fail;
1192 	}
1193 
1194 	/*
1195 	 * Allocate DMA'able memory and load the DMA map for shared
1196 	 * status block.
1197 	 */
1198 	error = bus_dmamem_alloc(sc->jme_cdata.jme_ssb_tag,
1199 	    (void **)&sc->jme_rdata.jme_ssb_block,
1200 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1201 	    &sc->jme_cdata.jme_ssb_map);
1202 	if (error != 0) {
1203 		device_printf(sc->jme_dev, "could not allocate DMA'able "
1204 		    "memory for shared status block.\n");
1205 		goto fail;
1206 	}
1207 
1208 	ctx.jme_busaddr = 0;
1209 	error = bus_dmamap_load(sc->jme_cdata.jme_ssb_tag,
1210 	    sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block,
1211 	    JME_SSB_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1212 	if (error != 0 || ctx.jme_busaddr == 0) {
1213 		device_printf(sc->jme_dev, "could not load DMA'able memory "
1214 		    "for shared status block.\n");
1215 		goto fail;
1216 	}
1217 	sc->jme_rdata.jme_ssb_block_paddr = ctx.jme_busaddr;
1218 
1219 	/* Create DMA maps for Tx buffers. */
1220 	for (i = 0; i < JME_TX_RING_CNT; i++) {
1221 		txd = &sc->jme_cdata.jme_txdesc[i];
1222 		txd->tx_m = NULL;
1223 		txd->tx_dmamap = NULL;
1224 		error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 0,
1225 		    &txd->tx_dmamap);
1226 		if (error != 0) {
1227 			device_printf(sc->jme_dev,
1228 			    "could not create Tx dmamap.\n");
1229 			goto fail;
1230 		}
1231 	}
1232 	/* Create DMA maps for Rx buffers. */
1233 	if ((error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1234 	    &sc->jme_cdata.jme_rx_sparemap)) != 0) {
1235 		device_printf(sc->jme_dev,
1236 		    "could not create spare Rx dmamap.\n");
1237 		goto fail;
1238 	}
1239 	for (i = 0; i < JME_RX_RING_CNT; i++) {
1240 		rxd = &sc->jme_cdata.jme_rxdesc[i];
1241 		rxd->rx_m = NULL;
1242 		rxd->rx_dmamap = NULL;
1243 		error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1244 		    &rxd->rx_dmamap);
1245 		if (error != 0) {
1246 			device_printf(sc->jme_dev,
1247 			    "could not create Rx dmamap.\n");
1248 			goto fail;
1249 		}
1250 	}
1251 
1252 fail:
1253 	return (error);
1254 }
1255 
1256 static void
1257 jme_dma_free(struct jme_softc *sc)
1258 {
1259 	struct jme_txdesc *txd;
1260 	struct jme_rxdesc *rxd;
1261 	int i;
1262 
1263 	/* Tx ring */
1264 	if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1265 		if (sc->jme_cdata.jme_tx_ring_map)
1266 			bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1267 			    sc->jme_cdata.jme_tx_ring_map);
1268 		if (sc->jme_cdata.jme_tx_ring_map &&
1269 		    sc->jme_rdata.jme_tx_ring)
1270 			bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1271 			    sc->jme_rdata.jme_tx_ring,
1272 			    sc->jme_cdata.jme_tx_ring_map);
1273 		sc->jme_rdata.jme_tx_ring = NULL;
1274 		sc->jme_cdata.jme_tx_ring_map = NULL;
1275 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1276 		sc->jme_cdata.jme_tx_ring_tag = NULL;
1277 	}
1278 	/* Rx ring */
1279 	if (sc->jme_cdata.jme_rx_ring_tag != NULL) {
1280 		if (sc->jme_cdata.jme_rx_ring_map)
1281 			bus_dmamap_unload(sc->jme_cdata.jme_rx_ring_tag,
1282 			    sc->jme_cdata.jme_rx_ring_map);
1283 		if (sc->jme_cdata.jme_rx_ring_map &&
1284 		    sc->jme_rdata.jme_rx_ring)
1285 			bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag,
1286 			    sc->jme_rdata.jme_rx_ring,
1287 			    sc->jme_cdata.jme_rx_ring_map);
1288 		sc->jme_rdata.jme_rx_ring = NULL;
1289 		sc->jme_cdata.jme_rx_ring_map = NULL;
1290 		bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag);
1291 		sc->jme_cdata.jme_rx_ring_tag = NULL;
1292 	}
1293 	/* Tx buffers */
1294 	if (sc->jme_cdata.jme_tx_tag != NULL) {
1295 		for (i = 0; i < JME_TX_RING_CNT; i++) {
1296 			txd = &sc->jme_cdata.jme_txdesc[i];
1297 			if (txd->tx_dmamap != NULL) {
1298 				bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1299 				    txd->tx_dmamap);
1300 				txd->tx_dmamap = NULL;
1301 			}
1302 		}
1303 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1304 		sc->jme_cdata.jme_tx_tag = NULL;
1305 	}
1306 	/* Rx buffers */
1307 	if (sc->jme_cdata.jme_rx_tag != NULL) {
1308 		for (i = 0; i < JME_RX_RING_CNT; i++) {
1309 			rxd = &sc->jme_cdata.jme_rxdesc[i];
1310 			if (rxd->rx_dmamap != NULL) {
1311 				bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1312 				    rxd->rx_dmamap);
1313 				rxd->rx_dmamap = NULL;
1314 			}
1315 		}
1316 		if (sc->jme_cdata.jme_rx_sparemap != NULL) {
1317 			bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1318 			    sc->jme_cdata.jme_rx_sparemap);
1319 			sc->jme_cdata.jme_rx_sparemap = NULL;
1320 		}
1321 		bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag);
1322 		sc->jme_cdata.jme_rx_tag = NULL;
1323 	}
1324 
1325 	/* Shared status block. */
1326 	if (sc->jme_cdata.jme_ssb_tag != NULL) {
1327 		if (sc->jme_cdata.jme_ssb_map)
1328 			bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1329 			    sc->jme_cdata.jme_ssb_map);
1330 		if (sc->jme_cdata.jme_ssb_map && sc->jme_rdata.jme_ssb_block)
1331 			bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1332 			    sc->jme_rdata.jme_ssb_block,
1333 			    sc->jme_cdata.jme_ssb_map);
1334 		sc->jme_rdata.jme_ssb_block = NULL;
1335 		sc->jme_cdata.jme_ssb_map = NULL;
1336 		bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1337 		sc->jme_cdata.jme_ssb_tag = NULL;
1338 	}
1339 
1340 	if (sc->jme_cdata.jme_buffer_tag != NULL) {
1341 		bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1342 		sc->jme_cdata.jme_buffer_tag = NULL;
1343 	}
1344 	if (sc->jme_cdata.jme_ring_tag != NULL) {
1345 		bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1346 		sc->jme_cdata.jme_ring_tag = NULL;
1347 	}
1348 }
1349 
1350 /*
1351  *	Make sure the interface is stopped at reboot time.
1352  */
1353 static int
1354 jme_shutdown(device_t dev)
1355 {
1356 
1357 	return (jme_suspend(dev));
1358 }
1359 
1360 /*
1361  * Unlike other ethernet controllers, JMC250 requires
1362  * explicit resetting link speed to 10/100Mbps as gigabit
1363  * link will cunsume more power than 375mA.
1364  * Note, we reset the link speed to 10/100Mbps with
1365  * auto-negotiation but we don't know whether that operation
1366  * would succeed or not as we have no control after powering
1367  * off. If the renegotiation fail WOL may not work. Running
1368  * at 1Gbps draws more power than 375mA at 3.3V which is
1369  * specified in PCI specification and that would result in
1370  * complete shutdowning power to ethernet controller.
1371  *
1372  * TODO
1373  *  Save current negotiated media speed/duplex/flow-control
1374  *  to softc and restore the same link again after resuming.
1375  *  PHY handling such as power down/resetting to 100Mbps
1376  *  may be better handled in suspend method in phy driver.
1377  */
1378 static void
1379 jme_setlinkspeed(struct jme_softc *sc)
1380 {
1381 	struct mii_data *mii;
1382 	int aneg, i;
1383 
1384 	JME_LOCK_ASSERT(sc);
1385 
1386 	mii = device_get_softc(sc->jme_miibus);
1387 	mii_pollstat(mii);
1388 	aneg = 0;
1389 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
1390 		switch IFM_SUBTYPE(mii->mii_media_active) {
1391 		case IFM_10_T:
1392 		case IFM_100_TX:
1393 			return;
1394 		case IFM_1000_T:
1395 			aneg++;
1396 		default:
1397 			break;
1398 		}
1399 	}
1400 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1401 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1402 	    ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1403 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1404 	    BMCR_AUTOEN | BMCR_STARTNEG);
1405 	DELAY(1000);
1406 	if (aneg != 0) {
1407 		/* Poll link state until jme(4) get a 10/100 link. */
1408 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1409 			mii_pollstat(mii);
1410 			if ((mii->mii_media_status & IFM_AVALID) != 0) {
1411 				switch (IFM_SUBTYPE(mii->mii_media_active)) {
1412 				case IFM_10_T:
1413 				case IFM_100_TX:
1414 					jme_mac_config(sc);
1415 					return;
1416 				default:
1417 					break;
1418 				}
1419 			}
1420 			JME_UNLOCK(sc);
1421 			pause("jmelnk", hz);
1422 			JME_LOCK(sc);
1423 		}
1424 		if (i == MII_ANEGTICKS_GIGE)
1425 			device_printf(sc->jme_dev, "establishing link failed, "
1426 			    "WOL may not work!");
1427 	}
1428 	/*
1429 	 * No link, force MAC to have 100Mbps, full-duplex link.
1430 	 * This is the last resort and may/may not work.
1431 	 */
1432 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1433 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1434 	jme_mac_config(sc);
1435 }
1436 
1437 static void
1438 jme_setwol(struct jme_softc *sc)
1439 {
1440 	struct ifnet *ifp;
1441 	uint32_t gpr, pmcs;
1442 	uint16_t pmstat;
1443 	int pmc;
1444 
1445 	JME_LOCK_ASSERT(sc);
1446 
1447 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1448 		/* No PME capability, PHY power down. */
1449 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1450 		    MII_BMCR, BMCR_PDOWN);
1451 		return;
1452 	}
1453 
1454 	ifp = sc->jme_ifp;
1455 	gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1456 	pmcs = CSR_READ_4(sc, JME_PMCS);
1457 	pmcs &= ~PMCS_WOL_ENB_MASK;
1458 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1459 		pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1460 		/* Enable PME message. */
1461 		gpr |= GPREG0_PME_ENB;
1462 		/* For gigabit controllers, reset link speed to 10/100. */
1463 		if ((sc->jme_flags & JME_FLAG_FASTETH) == 0)
1464 			jme_setlinkspeed(sc);
1465 	}
1466 
1467 	CSR_WRITE_4(sc, JME_PMCS, pmcs);
1468 	CSR_WRITE_4(sc, JME_GPREG0, gpr);
1469 
1470 	/* Request PME. */
1471 	pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1472 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1473 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1474 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1475 	pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1476 	if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1477 		/* No WOL, PHY power down. */
1478 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1479 		    MII_BMCR, BMCR_PDOWN);
1480 	}
1481 }
1482 
1483 static int
1484 jme_suspend(device_t dev)
1485 {
1486 	struct jme_softc *sc;
1487 
1488 	sc = device_get_softc(dev);
1489 
1490 	JME_LOCK(sc);
1491 	jme_stop(sc);
1492 	jme_setwol(sc);
1493 	JME_UNLOCK(sc);
1494 
1495 	return (0);
1496 }
1497 
1498 static int
1499 jme_resume(device_t dev)
1500 {
1501 	struct jme_softc *sc;
1502 	struct ifnet *ifp;
1503 	uint16_t pmstat;
1504 	int pmc;
1505 
1506 	sc = device_get_softc(dev);
1507 
1508 	JME_LOCK(sc);
1509 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1510 		pmstat = pci_read_config(sc->jme_dev,
1511 		    pmc + PCIR_POWER_STATUS, 2);
1512 		/* Disable PME clear PME status. */
1513 		pmstat &= ~PCIM_PSTAT_PMEENABLE;
1514 		pci_write_config(sc->jme_dev,
1515 		    pmc + PCIR_POWER_STATUS, pmstat, 2);
1516 	}
1517 	ifp = sc->jme_ifp;
1518 	if ((ifp->if_flags & IFF_UP) != 0)
1519 		jme_init_locked(sc);
1520 
1521 	JME_UNLOCK(sc);
1522 
1523 	return (0);
1524 }
1525 
1526 static int
1527 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1528 {
1529 	struct jme_txdesc *txd;
1530 	struct jme_desc *desc;
1531 	struct mbuf *m;
1532 	bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1533 	int error, i, nsegs, prod;
1534 	uint32_t cflags, tso_segsz;
1535 
1536 	JME_LOCK_ASSERT(sc);
1537 
1538 	M_ASSERTPKTHDR((*m_head));
1539 
1540 	if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1541 		/*
1542 		 * Due to the adherence to NDIS specification JMC250
1543 		 * assumes upper stack computed TCP pseudo checksum
1544 		 * without including payload length. This breaks
1545 		 * checksum offload for TSO case so recompute TCP
1546 		 * pseudo checksum for JMC250. Hopefully this wouldn't
1547 		 * be much burden on modern CPUs.
1548 		 */
1549 		struct ether_header *eh;
1550 		struct ip *ip;
1551 		struct tcphdr *tcp;
1552 		uint32_t ip_off, poff;
1553 
1554 		if (M_WRITABLE(*m_head) == 0) {
1555 			/* Get a writable copy. */
1556 			m = m_dup(*m_head, M_DONTWAIT);
1557 			m_freem(*m_head);
1558 			if (m == NULL) {
1559 				*m_head = NULL;
1560 				return (ENOBUFS);
1561 			}
1562 			*m_head = m;
1563 		}
1564 		ip_off = sizeof(struct ether_header);
1565 		m = m_pullup(*m_head, ip_off);
1566 		if (m == NULL) {
1567 			*m_head = NULL;
1568 			return (ENOBUFS);
1569 		}
1570 		eh = mtod(m, struct ether_header *);
1571 		/* Check the existence of VLAN tag. */
1572 		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1573 			ip_off = sizeof(struct ether_vlan_header);
1574 			m = m_pullup(m, ip_off);
1575 			if (m == NULL) {
1576 				*m_head = NULL;
1577 				return (ENOBUFS);
1578 			}
1579 		}
1580 		m = m_pullup(m, ip_off + sizeof(struct ip));
1581 		if (m == NULL) {
1582 			*m_head = NULL;
1583 			return (ENOBUFS);
1584 		}
1585 		ip = (struct ip *)(mtod(m, char *) + ip_off);
1586 		poff = ip_off + (ip->ip_hl << 2);
1587 		m = m_pullup(m, poff + sizeof(struct tcphdr));
1588 		if (m == NULL) {
1589 			*m_head = NULL;
1590 			return (ENOBUFS);
1591 		}
1592 		tcp = (struct tcphdr *)(mtod(m, char *) + poff);
1593 		/*
1594 		 * Reset IP checksum and recompute TCP pseudo
1595 		 * checksum that NDIS specification requires.
1596 		 */
1597 		ip->ip_sum = 0;
1598 		if (poff + (tcp->th_off << 2) == m->m_pkthdr.len) {
1599 			tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1600 			    ip->ip_dst.s_addr,
1601 			    htons((tcp->th_off << 2) + IPPROTO_TCP));
1602 			/* No need to TSO, force IP checksum offload. */
1603 			(*m_head)->m_pkthdr.csum_flags &= ~CSUM_TSO;
1604 			(*m_head)->m_pkthdr.csum_flags |= CSUM_IP;
1605 		} else
1606 			tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1607 			    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1608 		*m_head = m;
1609 	}
1610 
1611 	prod = sc->jme_cdata.jme_tx_prod;
1612 	txd = &sc->jme_cdata.jme_txdesc[prod];
1613 
1614 	error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag,
1615 	    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1616 	if (error == EFBIG) {
1617 		m = m_collapse(*m_head, M_DONTWAIT, JME_MAXTXSEGS);
1618 		if (m == NULL) {
1619 			m_freem(*m_head);
1620 			*m_head = NULL;
1621 			return (ENOMEM);
1622 		}
1623 		*m_head = m;
1624 		error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag,
1625 		    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1626 		if (error != 0) {
1627 			m_freem(*m_head);
1628 			*m_head = NULL;
1629 			return (error);
1630 		}
1631 	} else if (error != 0)
1632 		return (error);
1633 	if (nsegs == 0) {
1634 		m_freem(*m_head);
1635 		*m_head = NULL;
1636 		return (EIO);
1637 	}
1638 
1639 	/*
1640 	 * Check descriptor overrun. Leave one free descriptor.
1641 	 * Since we always use 64bit address mode for transmitting,
1642 	 * each Tx request requires one more dummy descriptor.
1643 	 */
1644 	if (sc->jme_cdata.jme_tx_cnt + nsegs + 1 > JME_TX_RING_CNT - 1) {
1645 		bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1646 		return (ENOBUFS);
1647 	}
1648 
1649 	m = *m_head;
1650 	cflags = 0;
1651 	tso_segsz = 0;
1652 	/* Configure checksum offload and TSO. */
1653 	if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1654 		tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz <<
1655 		    JME_TD_MSS_SHIFT;
1656 		cflags |= JME_TD_TSO;
1657 	} else {
1658 		if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
1659 			cflags |= JME_TD_IPCSUM;
1660 		if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
1661 			cflags |= JME_TD_TCPCSUM;
1662 		if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
1663 			cflags |= JME_TD_UDPCSUM;
1664 	}
1665 	/* Configure VLAN. */
1666 	if ((m->m_flags & M_VLANTAG) != 0) {
1667 		cflags |= (m->m_pkthdr.ether_vtag & JME_TD_VLAN_MASK);
1668 		cflags |= JME_TD_VLAN_TAG;
1669 	}
1670 
1671 	desc = &sc->jme_rdata.jme_tx_ring[prod];
1672 	desc->flags = htole32(cflags);
1673 	desc->buflen = htole32(tso_segsz);
1674 	desc->addr_hi = htole32(m->m_pkthdr.len);
1675 	desc->addr_lo = 0;
1676 	sc->jme_cdata.jme_tx_cnt++;
1677 	JME_DESC_INC(prod, JME_TX_RING_CNT);
1678 	for (i = 0; i < nsegs; i++) {
1679 		desc = &sc->jme_rdata.jme_tx_ring[prod];
1680 		desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT);
1681 		desc->buflen = htole32(txsegs[i].ds_len);
1682 		desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1683 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1684 		sc->jme_cdata.jme_tx_cnt++;
1685 		JME_DESC_INC(prod, JME_TX_RING_CNT);
1686 	}
1687 
1688 	/* Update producer index. */
1689 	sc->jme_cdata.jme_tx_prod = prod;
1690 	/*
1691 	 * Finally request interrupt and give the first descriptor
1692 	 * owenership to hardware.
1693 	 */
1694 	desc = txd->tx_desc;
1695 	desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1696 
1697 	txd->tx_m = m;
1698 	txd->tx_ndesc = nsegs + 1;
1699 
1700 	/* Sync descriptors. */
1701 	bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1702 	    BUS_DMASYNC_PREWRITE);
1703 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
1704 	    sc->jme_cdata.jme_tx_ring_map,
1705 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1706 
1707 	return (0);
1708 }
1709 
1710 static void
1711 jme_tx_task(void *arg, int pending)
1712 {
1713 	struct ifnet *ifp;
1714 
1715 	ifp = (struct ifnet *)arg;
1716 	jme_start(ifp);
1717 }
1718 
1719 static void
1720 jme_start(struct ifnet *ifp)
1721 {
1722         struct jme_softc *sc;
1723         struct mbuf *m_head;
1724 	int enq;
1725 
1726 	sc = ifp->if_softc;
1727 
1728 	JME_LOCK(sc);
1729 
1730 	if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT)
1731 		jme_txeof(sc);
1732 
1733 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1734 	    IFF_DRV_RUNNING || (sc->jme_flags & JME_FLAG_LINK) == 0) {
1735 		JME_UNLOCK(sc);
1736 		return;
1737 	}
1738 
1739 	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1740 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1741 		if (m_head == NULL)
1742 			break;
1743 		/*
1744 		 * Pack the data into the transmit ring. If we
1745 		 * don't have room, set the OACTIVE flag and wait
1746 		 * for the NIC to drain the ring.
1747 		 */
1748 		if (jme_encap(sc, &m_head)) {
1749 			if (m_head == NULL)
1750 				break;
1751 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1752 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1753 			break;
1754 		}
1755 
1756 		enq++;
1757 		/*
1758 		 * If there's a BPF listener, bounce a copy of this frame
1759 		 * to him.
1760 		 */
1761 		ETHER_BPF_MTAP(ifp, m_head);
1762 	}
1763 
1764 	if (enq > 0) {
1765 		/*
1766 		 * Reading TXCSR takes very long time under heavy load
1767 		 * so cache TXCSR value and writes the ORed value with
1768 		 * the kick command to the TXCSR. This saves one register
1769 		 * access cycle.
1770 		 */
1771 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1772 		    TXCSR_TXQ_N_START(TXCSR_TXQ0));
1773 		/* Set a timeout in case the chip goes out to lunch. */
1774 		sc->jme_watchdog_timer = JME_TX_TIMEOUT;
1775 	}
1776 
1777 	JME_UNLOCK(sc);
1778 }
1779 
1780 static void
1781 jme_watchdog(struct jme_softc *sc)
1782 {
1783 	struct ifnet *ifp;
1784 
1785 	JME_LOCK_ASSERT(sc);
1786 
1787 	if (sc->jme_watchdog_timer == 0 || --sc->jme_watchdog_timer)
1788 		return;
1789 
1790 	ifp = sc->jme_ifp;
1791 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1792 		if_printf(sc->jme_ifp, "watchdog timeout (missed link)\n");
1793 		ifp->if_oerrors++;
1794 		jme_init_locked(sc);
1795 		return;
1796 	}
1797 	jme_txeof(sc);
1798 	if (sc->jme_cdata.jme_tx_cnt == 0) {
1799 		if_printf(sc->jme_ifp,
1800 		    "watchdog timeout (missed Tx interrupts) -- recovering\n");
1801 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1802 			taskqueue_enqueue(sc->jme_tq, &sc->jme_tx_task);
1803 		return;
1804 	}
1805 
1806 	if_printf(sc->jme_ifp, "watchdog timeout\n");
1807 	ifp->if_oerrors++;
1808 	jme_init_locked(sc);
1809 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1810 		taskqueue_enqueue(sc->jme_tq, &sc->jme_tx_task);
1811 }
1812 
1813 static int
1814 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1815 {
1816 	struct jme_softc *sc;
1817 	struct ifreq *ifr;
1818 	struct mii_data *mii;
1819 	uint32_t reg;
1820 	int error, mask;
1821 
1822 	sc = ifp->if_softc;
1823 	ifr = (struct ifreq *)data;
1824 	error = 0;
1825 	switch (cmd) {
1826 	case SIOCSIFMTU:
1827 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1828 		    ((sc->jme_flags & JME_FLAG_NOJUMBO) != 0 &&
1829 		    ifr->ifr_mtu > JME_MAX_MTU)) {
1830 			error = EINVAL;
1831 			break;
1832 		}
1833 
1834 		if (ifp->if_mtu != ifr->ifr_mtu) {
1835 			/*
1836 			 * No special configuration is required when interface
1837 			 * MTU is changed but availability of TSO/Tx checksum
1838 			 * offload should be chcked against new MTU size as
1839 			 * FIFO size is just 2K.
1840 			 */
1841 			JME_LOCK(sc);
1842 			if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1843 				ifp->if_capenable &=
1844 				    ~(IFCAP_TXCSUM | IFCAP_TSO4);
1845 				ifp->if_hwassist &=
1846 				    ~(JME_CSUM_FEATURES | CSUM_TSO);
1847 				VLAN_CAPABILITIES(ifp);
1848 			}
1849 			ifp->if_mtu = ifr->ifr_mtu;
1850 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1851 				jme_init_locked(sc);
1852 			JME_UNLOCK(sc);
1853 		}
1854 		break;
1855 	case SIOCSIFFLAGS:
1856 		JME_LOCK(sc);
1857 		if ((ifp->if_flags & IFF_UP) != 0) {
1858 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1859 				if (((ifp->if_flags ^ sc->jme_if_flags)
1860 				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1861 					jme_set_filter(sc);
1862 			} else {
1863 				if ((sc->jme_flags & JME_FLAG_DETACH) == 0)
1864 					jme_init_locked(sc);
1865 			}
1866 		} else {
1867 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1868 				jme_stop(sc);
1869 		}
1870 		sc->jme_if_flags = ifp->if_flags;
1871 		JME_UNLOCK(sc);
1872 		break;
1873 	case SIOCADDMULTI:
1874 	case SIOCDELMULTI:
1875 		JME_LOCK(sc);
1876 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1877 			jme_set_filter(sc);
1878 		JME_UNLOCK(sc);
1879 		break;
1880 	case SIOCSIFMEDIA:
1881 	case SIOCGIFMEDIA:
1882 		mii = device_get_softc(sc->jme_miibus);
1883 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1884 		break;
1885 	case SIOCSIFCAP:
1886 		JME_LOCK(sc);
1887 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1888 		if ((mask & IFCAP_TXCSUM) != 0 &&
1889 		    ifp->if_mtu < JME_TX_FIFO_SIZE) {
1890 			if ((IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
1891 				ifp->if_capenable ^= IFCAP_TXCSUM;
1892 				if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
1893 					ifp->if_hwassist |= JME_CSUM_FEATURES;
1894 				else
1895 					ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1896 			}
1897 		}
1898 		if ((mask & IFCAP_RXCSUM) != 0 &&
1899 		    (IFCAP_RXCSUM & ifp->if_capabilities) != 0) {
1900 			ifp->if_capenable ^= IFCAP_RXCSUM;
1901 			reg = CSR_READ_4(sc, JME_RXMAC);
1902 			reg &= ~RXMAC_CSUM_ENB;
1903 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1904 				reg |= RXMAC_CSUM_ENB;
1905 			CSR_WRITE_4(sc, JME_RXMAC, reg);
1906 		}
1907 		if ((mask & IFCAP_TSO4) != 0 &&
1908 		    ifp->if_mtu < JME_TX_FIFO_SIZE) {
1909 			if ((IFCAP_TSO4 & ifp->if_capabilities) != 0) {
1910 				ifp->if_capenable ^= IFCAP_TSO4;
1911 				if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
1912 					ifp->if_hwassist |= CSUM_TSO;
1913 				else
1914 					ifp->if_hwassist &= ~CSUM_TSO;
1915 			}
1916 		}
1917 		if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1918 		    (IFCAP_WOL_MAGIC & ifp->if_capabilities) != 0)
1919 			ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1920 		if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
1921 		    (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
1922 			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1923 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1924 		    (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
1925 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1926 			jme_set_vlan(sc);
1927 		}
1928 		JME_UNLOCK(sc);
1929 		VLAN_CAPABILITIES(ifp);
1930 		break;
1931 	default:
1932 		error = ether_ioctl(ifp, cmd, data);
1933 		break;
1934 	}
1935 
1936 	return (error);
1937 }
1938 
1939 static void
1940 jme_mac_config(struct jme_softc *sc)
1941 {
1942 	struct mii_data *mii;
1943 	uint32_t ghc, gpreg, rxmac, txmac, txpause;
1944 
1945 	JME_LOCK_ASSERT(sc);
1946 
1947 	mii = device_get_softc(sc->jme_miibus);
1948 
1949 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1950 	DELAY(10);
1951 	CSR_WRITE_4(sc, JME_GHC, 0);
1952 	ghc = 0;
1953 	rxmac = CSR_READ_4(sc, JME_RXMAC);
1954 	rxmac &= ~RXMAC_FC_ENB;
1955 	txmac = CSR_READ_4(sc, JME_TXMAC);
1956 	txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1957 	txpause = CSR_READ_4(sc, JME_TXPFC);
1958 	txpause &= ~TXPFC_PAUSE_ENB;
1959 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1960 		ghc |= GHC_FULL_DUPLEX;
1961 		rxmac &= ~RXMAC_COLL_DET_ENB;
1962 		txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1963 		    TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1964 		    TXMAC_FRAME_BURST);
1965 #ifdef notyet
1966 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1967 			txpause |= TXPFC_PAUSE_ENB;
1968 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1969 			rxmac |= RXMAC_FC_ENB;
1970 #endif
1971 		/* Disable retry transmit timer/retry limit. */
1972 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1973 		    ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1974 	} else {
1975 		rxmac |= RXMAC_COLL_DET_ENB;
1976 		txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1977 		/* Enable retry transmit timer/retry limit. */
1978 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1979 		    TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1980 	}
1981 		/* Reprogram Tx/Rx MACs with resolved speed/duplex. */
1982 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1983 	case IFM_10_T:
1984 		ghc |= GHC_SPEED_10;
1985 		break;
1986 	case IFM_100_TX:
1987 		ghc |= GHC_SPEED_100;
1988 		break;
1989 	case IFM_1000_T:
1990 		if ((sc->jme_flags & JME_FLAG_FASTETH) != 0)
1991 			break;
1992 		ghc |= GHC_SPEED_1000;
1993 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1994 			txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1995 		break;
1996 	default:
1997 		break;
1998 	}
1999 	if (sc->jme_rev == DEVICEID_JMC250 &&
2000 	    sc->jme_chip_rev == DEVICEREVID_JMC250_A2) {
2001 		/*
2002 		 * Workaround occasional packet loss issue of JMC250 A2
2003 		 * when it runs on half-duplex media.
2004 		 */
2005 		gpreg = CSR_READ_4(sc, JME_GPREG1);
2006 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
2007 			gpreg &= ~GPREG1_HDPX_FIX;
2008 		else
2009 			gpreg |= GPREG1_HDPX_FIX;
2010 		CSR_WRITE_4(sc, JME_GPREG1, gpreg);
2011 		/* Workaround CRC errors at 100Mbps on JMC250 A2. */
2012 		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
2013 			/* Extend interface FIFO depth. */
2014 			jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2015 			    0x1B, 0x0000);
2016 		} else {
2017 			/* Select default interface FIFO depth. */
2018 			jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2019 			    0x1B, 0x0004);
2020 		}
2021 	}
2022 	CSR_WRITE_4(sc, JME_GHC, ghc);
2023 	CSR_WRITE_4(sc, JME_RXMAC, rxmac);
2024 	CSR_WRITE_4(sc, JME_TXMAC, txmac);
2025 	CSR_WRITE_4(sc, JME_TXPFC, txpause);
2026 }
2027 
2028 static void
2029 jme_link_task(void *arg, int pending)
2030 {
2031 	struct jme_softc *sc;
2032 	struct mii_data *mii;
2033 	struct ifnet *ifp;
2034 	struct jme_txdesc *txd;
2035 	bus_addr_t paddr;
2036 	int i;
2037 
2038 	sc = (struct jme_softc *)arg;
2039 
2040 	JME_LOCK(sc);
2041 	mii = device_get_softc(sc->jme_miibus);
2042 	ifp = sc->jme_ifp;
2043 	if (mii == NULL || ifp == NULL ||
2044 	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2045 		JME_UNLOCK(sc);
2046 		return;
2047 	}
2048 
2049 	sc->jme_flags &= ~JME_FLAG_LINK;
2050 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
2051 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
2052 		case IFM_10_T:
2053 		case IFM_100_TX:
2054 			sc->jme_flags |= JME_FLAG_LINK;
2055 			break;
2056 		case IFM_1000_T:
2057 			if ((sc->jme_flags & JME_FLAG_FASTETH) != 0)
2058 				break;
2059 			sc->jme_flags |= JME_FLAG_LINK;
2060 			break;
2061 		default:
2062 			break;
2063 		}
2064 	}
2065 
2066 	/*
2067 	 * Disabling Rx/Tx MACs have a side-effect of resetting
2068 	 * JME_TXNDA/JME_RXNDA register to the first address of
2069 	 * Tx/Rx descriptor address. So driver should reset its
2070 	 * internal procucer/consumer pointer and reclaim any
2071 	 * allocated resources. Note, just saving the value of
2072 	 * JME_TXNDA and JME_RXNDA registers before stopping MAC
2073 	 * and restoring JME_TXNDA/JME_RXNDA register is not
2074 	 * sufficient to make sure correct MAC state because
2075 	 * stopping MAC operation can take a while and hardware
2076 	 * might have updated JME_TXNDA/JME_RXNDA registers
2077 	 * during the stop operation.
2078 	 */
2079 	/* Block execution of task. */
2080 	taskqueue_block(sc->jme_tq);
2081 	/* Disable interrupts and stop driver. */
2082 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2083 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2084 	callout_stop(&sc->jme_tick_ch);
2085 	sc->jme_watchdog_timer = 0;
2086 
2087 	/* Stop receiver/transmitter. */
2088 	jme_stop_rx(sc);
2089 	jme_stop_tx(sc);
2090 
2091 	/* XXX Drain all queued tasks. */
2092 	JME_UNLOCK(sc);
2093 	taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
2094 	taskqueue_drain(sc->jme_tq, &sc->jme_tx_task);
2095 	JME_LOCK(sc);
2096 
2097 	jme_rxintr(sc, JME_RX_RING_CNT);
2098 	if (sc->jme_cdata.jme_rxhead != NULL)
2099 		m_freem(sc->jme_cdata.jme_rxhead);
2100 	JME_RXCHAIN_RESET(sc);
2101 	jme_txeof(sc);
2102 	if (sc->jme_cdata.jme_tx_cnt != 0) {
2103 		/* Remove queued packets for transmit. */
2104 		for (i = 0; i < JME_TX_RING_CNT; i++) {
2105 			txd = &sc->jme_cdata.jme_txdesc[i];
2106 			if (txd->tx_m != NULL) {
2107 				bus_dmamap_sync(
2108 				    sc->jme_cdata.jme_tx_tag,
2109 				    txd->tx_dmamap,
2110 				    BUS_DMASYNC_POSTWRITE);
2111 				bus_dmamap_unload(
2112 				    sc->jme_cdata.jme_tx_tag,
2113 				    txd->tx_dmamap);
2114 				m_freem(txd->tx_m);
2115 				txd->tx_m = NULL;
2116 				txd->tx_ndesc = 0;
2117 				ifp->if_oerrors++;
2118 			}
2119 		}
2120 	}
2121 
2122 	/*
2123 	 * Reuse configured Rx descriptors and reset
2124 	 * procuder/consumer index.
2125 	 */
2126 	sc->jme_cdata.jme_rx_cons = 0;
2127 	atomic_set_int(&sc->jme_morework, 0);
2128 	jme_init_tx_ring(sc);
2129 	/* Initialize shadow status block. */
2130 	jme_init_ssb(sc);
2131 
2132 	/* Program MAC with resolved speed/duplex/flow-control. */
2133 	if ((sc->jme_flags & JME_FLAG_LINK) != 0) {
2134 		jme_mac_config(sc);
2135 
2136 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
2137 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2138 
2139 		/* Set Tx ring address to the hardware. */
2140 		paddr = JME_TX_RING_ADDR(sc, 0);
2141 		CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2142 		CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2143 
2144 		/* Set Rx ring address to the hardware. */
2145 		paddr = JME_RX_RING_ADDR(sc, 0);
2146 		CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2147 		CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2148 
2149 		/* Restart receiver/transmitter. */
2150 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
2151 		    RXCSR_RXQ_START);
2152 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
2153 	}
2154 
2155 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2156 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2157 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2158 	/* Unblock execution of task. */
2159 	taskqueue_unblock(sc->jme_tq);
2160 	/* Reenable interrupts. */
2161 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2162 
2163 	JME_UNLOCK(sc);
2164 }
2165 
2166 static int
2167 jme_intr(void *arg)
2168 {
2169 	struct jme_softc *sc;
2170 	uint32_t status;
2171 
2172 	sc = (struct jme_softc *)arg;
2173 
2174 	status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
2175 	if (status == 0 || status == 0xFFFFFFFF)
2176 		return (FILTER_STRAY);
2177 	/* Disable interrupts. */
2178 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2179 	taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task);
2180 
2181 	return (FILTER_HANDLED);
2182 }
2183 
2184 static void
2185 jme_int_task(void *arg, int pending)
2186 {
2187 	struct jme_softc *sc;
2188 	struct ifnet *ifp;
2189 	uint32_t status;
2190 	int more;
2191 
2192 	sc = (struct jme_softc *)arg;
2193 	ifp = sc->jme_ifp;
2194 
2195 	status = CSR_READ_4(sc, JME_INTR_STATUS);
2196 	more = atomic_readandclear_int(&sc->jme_morework);
2197 	if (more != 0) {
2198 		status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO;
2199 		more = 0;
2200 	}
2201 	if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
2202 		goto done;
2203 	/* Reset PCC counter/timer and Ack interrupts. */
2204 	status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
2205 	if ((status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) != 0)
2206 		status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
2207 	if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0)
2208 		status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP;
2209 	CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2210 	more = 0;
2211 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2212 		if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0) {
2213 			more = jme_rxintr(sc, sc->jme_process_limit);
2214 			if (more != 0)
2215 				atomic_set_int(&sc->jme_morework, 1);
2216 		}
2217 		if ((status & INTR_RXQ_DESC_EMPTY) != 0) {
2218 			/*
2219 			 * Notify hardware availability of new Rx
2220 			 * buffers.
2221 			 * Reading RXCSR takes very long time under
2222 			 * heavy load so cache RXCSR value and writes
2223 			 * the ORed value with the kick command to
2224 			 * the RXCSR. This saves one register access
2225 			 * cycle.
2226 			 */
2227 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2228 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
2229 		}
2230 		/*
2231 		 * Reclaiming Tx buffers are deferred to make jme(4) run
2232 		 * without locks held.
2233 		 */
2234 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2235 			taskqueue_enqueue(sc->jme_tq, &sc->jme_tx_task);
2236 	}
2237 
2238 	if (more != 0 || (CSR_READ_4(sc, JME_INTR_STATUS) & JME_INTRS) != 0) {
2239 		taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task);
2240 		return;
2241 	}
2242 done:
2243 	/* Reenable interrupts. */
2244 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2245 }
2246 
2247 static void
2248 jme_txeof(struct jme_softc *sc)
2249 {
2250 	struct ifnet *ifp;
2251 	struct jme_txdesc *txd;
2252 	uint32_t status;
2253 	int cons, nsegs;
2254 
2255 	JME_LOCK_ASSERT(sc);
2256 
2257 	ifp = sc->jme_ifp;
2258 
2259 	cons = sc->jme_cdata.jme_tx_cons;
2260 	if (cons == sc->jme_cdata.jme_tx_prod)
2261 		return;
2262 
2263 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2264 	    sc->jme_cdata.jme_tx_ring_map,
2265 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2266 
2267 	/*
2268 	 * Go through our Tx list and free mbufs for those
2269 	 * frames which have been transmitted.
2270 	 */
2271 	for (; cons != sc->jme_cdata.jme_tx_prod;) {
2272 		txd = &sc->jme_cdata.jme_txdesc[cons];
2273 		status = le32toh(txd->tx_desc->flags);
2274 		if ((status & JME_TD_OWN) == JME_TD_OWN)
2275 			break;
2276 
2277 		if ((status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) != 0)
2278 			ifp->if_oerrors++;
2279 		else {
2280 			ifp->if_opackets++;
2281 			if ((status & JME_TD_COLLISION) != 0)
2282 				ifp->if_collisions +=
2283 				    le32toh(txd->tx_desc->buflen) &
2284 				    JME_TD_BUF_LEN_MASK;
2285 		}
2286 		/*
2287 		 * Only the first descriptor of multi-descriptor
2288 		 * transmission is updated so driver have to skip entire
2289 		 * chained buffers for the transmiited frame. In other
2290 		 * words, JME_TD_OWN bit is valid only at the first
2291 		 * descriptor of a multi-descriptor transmission.
2292 		 */
2293 		for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2294 			sc->jme_rdata.jme_tx_ring[cons].flags = 0;
2295 			JME_DESC_INC(cons, JME_TX_RING_CNT);
2296 		}
2297 
2298 		/* Reclaim transferred mbufs. */
2299 		bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
2300 		    BUS_DMASYNC_POSTWRITE);
2301 		bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2302 
2303 		KASSERT(txd->tx_m != NULL,
2304 		    ("%s: freeing NULL mbuf!\n", __func__));
2305 		m_freem(txd->tx_m);
2306 		txd->tx_m = NULL;
2307 		sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2308 		KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2309 		    ("%s: Active Tx desc counter was garbled\n", __func__));
2310 		txd->tx_ndesc = 0;
2311 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2312 	}
2313 	sc->jme_cdata.jme_tx_cons = cons;
2314 	/* Unarm watchog timer when there is no pending descriptors in queue. */
2315 	if (sc->jme_cdata.jme_tx_cnt == 0)
2316 		sc->jme_watchdog_timer = 0;
2317 
2318 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2319 	    sc->jme_cdata.jme_tx_ring_map,
2320 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2321 }
2322 
2323 static __inline void
2324 jme_discard_rxbuf(struct jme_softc *sc, int cons)
2325 {
2326 	struct jme_desc *desc;
2327 
2328 	desc = &sc->jme_rdata.jme_rx_ring[cons];
2329 	desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2330 	desc->buflen = htole32(MCLBYTES);
2331 }
2332 
2333 /* Receive a frame. */
2334 static void
2335 jme_rxeof(struct jme_softc *sc)
2336 {
2337 	struct ifnet *ifp;
2338 	struct jme_desc *desc;
2339 	struct jme_rxdesc *rxd;
2340 	struct mbuf *mp, *m;
2341 	uint32_t flags, status;
2342 	int cons, count, nsegs;
2343 
2344 	ifp = sc->jme_ifp;
2345 
2346 	cons = sc->jme_cdata.jme_rx_cons;
2347 	desc = &sc->jme_rdata.jme_rx_ring[cons];
2348 	flags = le32toh(desc->flags);
2349 	status = le32toh(desc->buflen);
2350 	nsegs = JME_RX_NSEGS(status);
2351 	sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2352 	if ((status & JME_RX_ERR_STAT) != 0) {
2353 		ifp->if_ierrors++;
2354 		jme_discard_rxbuf(sc, sc->jme_cdata.jme_rx_cons);
2355 #ifdef JME_SHOW_ERRORS
2356 		device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2357 		    __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2358 #endif
2359 		sc->jme_cdata.jme_rx_cons += nsegs;
2360 		sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
2361 		return;
2362 	}
2363 
2364 	for (count = 0; count < nsegs; count++,
2365 	    JME_DESC_INC(cons, JME_RX_RING_CNT)) {
2366 		rxd = &sc->jme_cdata.jme_rxdesc[cons];
2367 		mp = rxd->rx_m;
2368 		/* Add a new receive buffer to the ring. */
2369 		if (jme_newbuf(sc, rxd) != 0) {
2370 			ifp->if_iqdrops++;
2371 			/* Reuse buffer. */
2372 			for (; count < nsegs; count++) {
2373 				jme_discard_rxbuf(sc, cons);
2374 				JME_DESC_INC(cons, JME_RX_RING_CNT);
2375 			}
2376 			if (sc->jme_cdata.jme_rxhead != NULL) {
2377 				m_freem(sc->jme_cdata.jme_rxhead);
2378 				JME_RXCHAIN_RESET(sc);
2379 			}
2380 			break;
2381 		}
2382 
2383 		/*
2384 		 * Assume we've received a full sized frame.
2385 		 * Actual size is fixed when we encounter the end of
2386 		 * multi-segmented frame.
2387 		 */
2388 		mp->m_len = MCLBYTES;
2389 
2390 		/* Chain received mbufs. */
2391 		if (sc->jme_cdata.jme_rxhead == NULL) {
2392 			sc->jme_cdata.jme_rxhead = mp;
2393 			sc->jme_cdata.jme_rxtail = mp;
2394 		} else {
2395 			/*
2396 			 * Receive processor can receive a maximum frame
2397 			 * size of 65535 bytes.
2398 			 */
2399 			mp->m_flags &= ~M_PKTHDR;
2400 			sc->jme_cdata.jme_rxtail->m_next = mp;
2401 			sc->jme_cdata.jme_rxtail = mp;
2402 		}
2403 
2404 		if (count == nsegs - 1) {
2405 			/* Last desc. for this frame. */
2406 			m = sc->jme_cdata.jme_rxhead;
2407 			m->m_flags |= M_PKTHDR;
2408 			m->m_pkthdr.len = sc->jme_cdata.jme_rxlen;
2409 			if (nsegs > 1) {
2410 				/* Set first mbuf size. */
2411 				m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2412 				/* Set last mbuf size. */
2413 				mp->m_len = sc->jme_cdata.jme_rxlen -
2414 				    ((MCLBYTES - JME_RX_PAD_BYTES) +
2415 				    (MCLBYTES * (nsegs - 2)));
2416 			} else
2417 				m->m_len = sc->jme_cdata.jme_rxlen;
2418 			m->m_pkthdr.rcvif = ifp;
2419 
2420 			/*
2421 			 * Account for 10bytes auto padding which is used
2422 			 * to align IP header on 32bit boundary. Also note,
2423 			 * CRC bytes is automatically removed by the
2424 			 * hardware.
2425 			 */
2426 			m->m_data += JME_RX_PAD_BYTES;
2427 
2428 			/* Set checksum information. */
2429 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
2430 			    (flags & JME_RD_IPV4) != 0) {
2431 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2432 				if ((flags & JME_RD_IPCSUM) != 0)
2433 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2434 				if (((flags & JME_RD_MORE_FRAG) == 0) &&
2435 				    ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2436 				    (JME_RD_TCP | JME_RD_TCPCSUM) ||
2437 				    (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2438 				    (JME_RD_UDP | JME_RD_UDPCSUM))) {
2439 					m->m_pkthdr.csum_flags |=
2440 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2441 					m->m_pkthdr.csum_data = 0xffff;
2442 				}
2443 			}
2444 
2445 			/* Check for VLAN tagged packets. */
2446 			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
2447 			    (flags & JME_RD_VLAN_TAG) != 0) {
2448 				m->m_pkthdr.ether_vtag =
2449 				    flags & JME_RD_VLAN_MASK;
2450 				m->m_flags |= M_VLANTAG;
2451 			}
2452 
2453 			ifp->if_ipackets++;
2454 			/* Pass it on. */
2455 			(*ifp->if_input)(ifp, m);
2456 
2457 			/* Reset mbuf chains. */
2458 			JME_RXCHAIN_RESET(sc);
2459 		}
2460 	}
2461 
2462 	sc->jme_cdata.jme_rx_cons += nsegs;
2463 	sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
2464 }
2465 
2466 static int
2467 jme_rxintr(struct jme_softc *sc, int count)
2468 {
2469 	struct jme_desc *desc;
2470 	int nsegs, prog, pktlen;
2471 
2472 	bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2473 	    sc->jme_cdata.jme_rx_ring_map,
2474 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2475 
2476 	for (prog = 0; count > 0; prog++) {
2477 		desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons];
2478 		if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2479 			break;
2480 		if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2481 			break;
2482 		nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2483 		/*
2484 		 * Check number of segments against received bytes.
2485 		 * Non-matching value would indicate that hardware
2486 		 * is still trying to update Rx descriptors. I'm not
2487 		 * sure whether this check is needed.
2488 		 */
2489 		pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2490 		if (nsegs != ((pktlen + (MCLBYTES - 1)) / MCLBYTES))
2491 			break;
2492 		prog++;
2493 		/* Received a frame. */
2494 		jme_rxeof(sc);
2495 		count -= nsegs;
2496 	}
2497 
2498 	if (prog > 0)
2499 		bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2500 		    sc->jme_cdata.jme_rx_ring_map,
2501 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2502 
2503 	return (count > 0 ? 0 : EAGAIN);
2504 }
2505 
2506 static void
2507 jme_tick(void *arg)
2508 {
2509 	struct jme_softc *sc;
2510 	struct mii_data *mii;
2511 
2512 	sc = (struct jme_softc *)arg;
2513 
2514 	JME_LOCK_ASSERT(sc);
2515 
2516 	mii = device_get_softc(sc->jme_miibus);
2517 	mii_tick(mii);
2518 	/*
2519 	 * Reclaim Tx buffers that have been completed. It's not
2520 	 * needed here but it would release allocated mbuf chains
2521 	 * faster and limit the maximum delay to a hz.
2522 	 */
2523 	jme_txeof(sc);
2524 	jme_watchdog(sc);
2525 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2526 }
2527 
2528 static void
2529 jme_reset(struct jme_softc *sc)
2530 {
2531 
2532 	/* Stop receiver, transmitter. */
2533 	jme_stop_rx(sc);
2534 	jme_stop_tx(sc);
2535 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2536 	DELAY(10);
2537 	CSR_WRITE_4(sc, JME_GHC, 0);
2538 }
2539 
2540 static void
2541 jme_init(void *xsc)
2542 {
2543 	struct jme_softc *sc;
2544 
2545 	sc = (struct jme_softc *)xsc;
2546 	JME_LOCK(sc);
2547 	jme_init_locked(sc);
2548 	JME_UNLOCK(sc);
2549 }
2550 
2551 static void
2552 jme_init_locked(struct jme_softc *sc)
2553 {
2554 	struct ifnet *ifp;
2555 	struct mii_data *mii;
2556 	uint8_t eaddr[ETHER_ADDR_LEN];
2557 	bus_addr_t paddr;
2558 	uint32_t reg;
2559 	int error;
2560 
2561 	JME_LOCK_ASSERT(sc);
2562 
2563 	ifp = sc->jme_ifp;
2564 	mii = device_get_softc(sc->jme_miibus);
2565 
2566 	/*
2567 	 * Cancel any pending I/O.
2568 	 */
2569 	jme_stop(sc);
2570 
2571 	/*
2572 	 * Reset the chip to a known state.
2573 	 */
2574 	jme_reset(sc);
2575 
2576 	/* Init descriptors. */
2577 	error = jme_init_rx_ring(sc);
2578         if (error != 0) {
2579                 device_printf(sc->jme_dev,
2580                     "%s: initialization failed: no memory for Rx buffers.\n",
2581 		    __func__);
2582                 jme_stop(sc);
2583 		return;
2584         }
2585 	jme_init_tx_ring(sc);
2586 	/* Initialize shadow status block. */
2587 	jme_init_ssb(sc);
2588 
2589 	/* Reprogram the station address. */
2590 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2591 	CSR_WRITE_4(sc, JME_PAR0,
2592 	    eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2593 	CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2594 
2595 	/*
2596 	 * Configure Tx queue.
2597 	 *  Tx priority queue weight value : 0
2598 	 *  Tx FIFO threshold for processing next packet : 16QW
2599 	 *  Maximum Tx DMA length : 512
2600 	 *  Allow Tx DMA burst.
2601 	 */
2602 	sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2603 	sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2604 	sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2605 	sc->jme_txcsr |= sc->jme_tx_dma_size;
2606 	sc->jme_txcsr |= TXCSR_DMA_BURST;
2607 	CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2608 
2609 	/* Set Tx descriptor counter. */
2610 	CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT);
2611 
2612 	/* Set Tx ring address to the hardware. */
2613 	paddr = JME_TX_RING_ADDR(sc, 0);
2614 	CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2615 	CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2616 
2617 	/* Configure TxMAC parameters. */
2618 	reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2619 	reg |= TXMAC_THRESH_1_PKT;
2620 	reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2621 	CSR_WRITE_4(sc, JME_TXMAC, reg);
2622 
2623 	/*
2624 	 * Configure Rx queue.
2625 	 *  FIFO full threshold for transmitting Tx pause packet : 128T
2626 	 *  FIFO threshold for processing next packet : 128QW
2627 	 *  Rx queue 0 select
2628 	 *  Max Rx DMA length : 128
2629 	 *  Rx descriptor retry : 32
2630 	 *  Rx descriptor retry time gap : 256ns
2631 	 *  Don't receive runt/bad frame.
2632 	 */
2633 	sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2634 	/*
2635 	 * Since Rx FIFO size is 4K bytes, receiving frames larger
2636 	 * than 4K bytes will suffer from Rx FIFO overruns. So
2637 	 * decrease FIFO threshold to reduce the FIFO overruns for
2638 	 * frames larger than 4000 bytes.
2639 	 * For best performance of standard MTU sized frames use
2640 	 * maximum allowable FIFO threshold, 128QW.
2641 	 */
2642 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
2643 	    ETHER_CRC_LEN) > JME_RX_FIFO_SIZE)
2644 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2645 	else
2646 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2647 	sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0);
2648 	sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2649 	sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2650 	CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
2651 
2652 	/* Set Rx descriptor counter. */
2653 	CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT);
2654 
2655 	/* Set Rx ring address to the hardware. */
2656 	paddr = JME_RX_RING_ADDR(sc, 0);
2657 	CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2658 	CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2659 
2660 	/* Clear receive filter. */
2661 	CSR_WRITE_4(sc, JME_RXMAC, 0);
2662 	/* Set up the receive filter. */
2663 	jme_set_filter(sc);
2664 	jme_set_vlan(sc);
2665 
2666 	/*
2667 	 * Disable all WOL bits as WOL can interfere normal Rx
2668 	 * operation. Also clear WOL detection status bits.
2669 	 */
2670 	reg = CSR_READ_4(sc, JME_PMCS);
2671 	reg &= ~PMCS_WOL_ENB_MASK;
2672 	CSR_WRITE_4(sc, JME_PMCS, reg);
2673 
2674 	reg = CSR_READ_4(sc, JME_RXMAC);
2675 	/*
2676 	 * Pad 10bytes right before received frame. This will greatly
2677 	 * help Rx performance on strict-alignment architectures as
2678 	 * it does not need to copy the frame to align the payload.
2679 	 */
2680 	reg |= RXMAC_PAD_10BYTES;
2681 	if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2682 		reg |= RXMAC_CSUM_ENB;
2683 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2684 
2685 	/* Configure general purpose reg0 */
2686 	reg = CSR_READ_4(sc, JME_GPREG0);
2687 	reg &= ~GPREG0_PCC_UNIT_MASK;
2688 	/* Set PCC timer resolution to micro-seconds unit. */
2689 	reg |= GPREG0_PCC_UNIT_US;
2690 	/*
2691 	 * Disable all shadow register posting as we have to read
2692 	 * JME_INTR_STATUS register in jme_int_task. Also it seems
2693 	 * that it's hard to synchronize interrupt status between
2694 	 * hardware and software with shadow posting due to
2695 	 * requirements of bus_dmamap_sync(9).
2696 	 */
2697 	reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2698 	    GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2699 	    GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2700 	    GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2701 	/* Disable posting of DW0. */
2702 	reg &= ~GPREG0_POST_DW0_ENB;
2703 	/* Clear PME message. */
2704 	reg &= ~GPREG0_PME_ENB;
2705 	/* Set PHY address. */
2706 	reg &= ~GPREG0_PHY_ADDR_MASK;
2707 	reg |= sc->jme_phyaddr;
2708 	CSR_WRITE_4(sc, JME_GPREG0, reg);
2709 
2710 	/* Configure Tx queue 0 packet completion coalescing. */
2711 	reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
2712 	    PCCTX_COAL_TO_MASK;
2713 	reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
2714 	    PCCTX_COAL_PKT_MASK;
2715 	reg |= PCCTX_COAL_TXQ0;
2716 	CSR_WRITE_4(sc, JME_PCCTX, reg);
2717 
2718 	/* Configure Rx queue 0 packet completion coalescing. */
2719 	reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
2720 	    PCCRX_COAL_TO_MASK;
2721 	reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
2722 	    PCCRX_COAL_PKT_MASK;
2723 	CSR_WRITE_4(sc, JME_PCCRX0, reg);
2724 
2725 	/* Configure shadow status block but don't enable posting. */
2726 	paddr = sc->jme_rdata.jme_ssb_block_paddr;
2727 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2728 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2729 
2730 	/* Disable Timer 1 and Timer 2. */
2731 	CSR_WRITE_4(sc, JME_TIMER1, 0);
2732 	CSR_WRITE_4(sc, JME_TIMER2, 0);
2733 
2734 	/* Configure retry transmit period, retry limit value. */
2735 	CSR_WRITE_4(sc, JME_TXTRHD,
2736 	    ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2737 	    TXTRHD_RT_PERIOD_MASK) |
2738 	    ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2739 	    TXTRHD_RT_LIMIT_SHIFT));
2740 
2741 	/* Disable RSS. */
2742 	CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
2743 
2744 	/* Initialize the interrupt mask. */
2745 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2746 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2747 
2748 	/*
2749 	 * Enabling Tx/Rx DMA engines and Rx queue processing is
2750 	 * done after detection of valid link in jme_link_task.
2751 	 */
2752 
2753 	sc->jme_flags &= ~JME_FLAG_LINK;
2754 	/* Set the current media. */
2755 	mii_mediachg(mii);
2756 
2757 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2758 
2759 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2760 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2761 }
2762 
2763 static void
2764 jme_stop(struct jme_softc *sc)
2765 {
2766 	struct ifnet *ifp;
2767 	struct jme_txdesc *txd;
2768 	struct jme_rxdesc *rxd;
2769 	int i;
2770 
2771 	JME_LOCK_ASSERT(sc);
2772 	/*
2773 	 * Mark the interface down and cancel the watchdog timer.
2774 	 */
2775 	ifp = sc->jme_ifp;
2776 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2777 	sc->jme_flags &= ~JME_FLAG_LINK;
2778 	callout_stop(&sc->jme_tick_ch);
2779 	sc->jme_watchdog_timer = 0;
2780 
2781 	/*
2782 	 * Disable interrupts.
2783 	 */
2784 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2785 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2786 
2787 	/* Disable updating shadow status block. */
2788 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2789 	    CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2790 
2791 	/* Stop receiver, transmitter. */
2792 	jme_stop_rx(sc);
2793 	jme_stop_tx(sc);
2794 
2795 	 /* Reclaim Rx/Tx buffers that have been completed. */
2796 	jme_rxintr(sc, JME_RX_RING_CNT);
2797 	if (sc->jme_cdata.jme_rxhead != NULL)
2798 		m_freem(sc->jme_cdata.jme_rxhead);
2799 	JME_RXCHAIN_RESET(sc);
2800 	jme_txeof(sc);
2801 	/*
2802 	 * Free RX and TX mbufs still in the queues.
2803 	 */
2804 	for (i = 0; i < JME_RX_RING_CNT; i++) {
2805 		rxd = &sc->jme_cdata.jme_rxdesc[i];
2806 		if (rxd->rx_m != NULL) {
2807 			bus_dmamap_sync(sc->jme_cdata.jme_rx_tag,
2808 			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2809 			bus_dmamap_unload(sc->jme_cdata.jme_rx_tag,
2810 			    rxd->rx_dmamap);
2811 			m_freem(rxd->rx_m);
2812 			rxd->rx_m = NULL;
2813 		}
2814         }
2815 	for (i = 0; i < JME_TX_RING_CNT; i++) {
2816 		txd = &sc->jme_cdata.jme_txdesc[i];
2817 		if (txd->tx_m != NULL) {
2818 			bus_dmamap_sync(sc->jme_cdata.jme_tx_tag,
2819 			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2820 			bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2821 			    txd->tx_dmamap);
2822 			m_freem(txd->tx_m);
2823 			txd->tx_m = NULL;
2824 			txd->tx_ndesc = 0;
2825 		}
2826         }
2827 }
2828 
2829 static void
2830 jme_stop_tx(struct jme_softc *sc)
2831 {
2832 	uint32_t reg;
2833 	int i;
2834 
2835 	reg = CSR_READ_4(sc, JME_TXCSR);
2836 	if ((reg & TXCSR_TX_ENB) == 0)
2837 		return;
2838 	reg &= ~TXCSR_TX_ENB;
2839 	CSR_WRITE_4(sc, JME_TXCSR, reg);
2840 	for (i = JME_TIMEOUT; i > 0; i--) {
2841 		DELAY(1);
2842 		if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2843 			break;
2844 	}
2845 	if (i == 0)
2846 		device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2847 }
2848 
2849 static void
2850 jme_stop_rx(struct jme_softc *sc)
2851 {
2852 	uint32_t reg;
2853 	int i;
2854 
2855 	reg = CSR_READ_4(sc, JME_RXCSR);
2856 	if ((reg & RXCSR_RX_ENB) == 0)
2857 		return;
2858 	reg &= ~RXCSR_RX_ENB;
2859 	CSR_WRITE_4(sc, JME_RXCSR, reg);
2860 	for (i = JME_TIMEOUT; i > 0; i--) {
2861 		DELAY(1);
2862 		if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2863 			break;
2864 	}
2865 	if (i == 0)
2866 		device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2867 }
2868 
2869 static void
2870 jme_init_tx_ring(struct jme_softc *sc)
2871 {
2872 	struct jme_ring_data *rd;
2873 	struct jme_txdesc *txd;
2874 	int i;
2875 
2876 	sc->jme_cdata.jme_tx_prod = 0;
2877 	sc->jme_cdata.jme_tx_cons = 0;
2878 	sc->jme_cdata.jme_tx_cnt = 0;
2879 
2880 	rd = &sc->jme_rdata;
2881 	bzero(rd->jme_tx_ring, JME_TX_RING_SIZE);
2882 	for (i = 0; i < JME_TX_RING_CNT; i++) {
2883 		txd = &sc->jme_cdata.jme_txdesc[i];
2884 		txd->tx_m = NULL;
2885 		txd->tx_desc = &rd->jme_tx_ring[i];
2886 		txd->tx_ndesc = 0;
2887 	}
2888 
2889 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2890 	    sc->jme_cdata.jme_tx_ring_map,
2891 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2892 }
2893 
2894 static void
2895 jme_init_ssb(struct jme_softc *sc)
2896 {
2897 	struct jme_ring_data *rd;
2898 
2899 	rd = &sc->jme_rdata;
2900 	bzero(rd->jme_ssb_block, JME_SSB_SIZE);
2901 	bus_dmamap_sync(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map,
2902 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2903 }
2904 
2905 static int
2906 jme_init_rx_ring(struct jme_softc *sc)
2907 {
2908 	struct jme_ring_data *rd;
2909 	struct jme_rxdesc *rxd;
2910 	int i;
2911 
2912 	sc->jme_cdata.jme_rx_cons = 0;
2913 	JME_RXCHAIN_RESET(sc);
2914 	atomic_set_int(&sc->jme_morework, 0);
2915 
2916 	rd = &sc->jme_rdata;
2917 	bzero(rd->jme_rx_ring, JME_RX_RING_SIZE);
2918 	for (i = 0; i < JME_RX_RING_CNT; i++) {
2919 		rxd = &sc->jme_cdata.jme_rxdesc[i];
2920 		rxd->rx_m = NULL;
2921 		rxd->rx_desc = &rd->jme_rx_ring[i];
2922 		if (jme_newbuf(sc, rxd) != 0)
2923 			return (ENOBUFS);
2924 	}
2925 
2926 	bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2927 	    sc->jme_cdata.jme_rx_ring_map,
2928 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2929 
2930 	return (0);
2931 }
2932 
2933 static int
2934 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd)
2935 {
2936 	struct jme_desc *desc;
2937 	struct mbuf *m;
2938 	bus_dma_segment_t segs[1];
2939 	bus_dmamap_t map;
2940 	int nsegs;
2941 
2942 	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2943 	if (m == NULL)
2944 		return (ENOBUFS);
2945 	/*
2946 	 * JMC250 has 64bit boundary alignment limitation so jme(4)
2947 	 * takes advantage of 10 bytes padding feature of hardware
2948 	 * in order not to copy entire frame to align IP header on
2949 	 * 32bit boundary.
2950 	 */
2951 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2952 
2953 	if (bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_rx_tag,
2954 	    sc->jme_cdata.jme_rx_sparemap, m, segs, &nsegs, 0) != 0) {
2955 		m_freem(m);
2956 		return (ENOBUFS);
2957 	}
2958 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2959 
2960 	if (rxd->rx_m != NULL) {
2961 		bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
2962 		    BUS_DMASYNC_POSTREAD);
2963 		bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap);
2964 	}
2965 	map = rxd->rx_dmamap;
2966 	rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap;
2967 	sc->jme_cdata.jme_rx_sparemap = map;
2968 	bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
2969 	    BUS_DMASYNC_PREREAD);
2970 	rxd->rx_m = m;
2971 
2972 	desc = rxd->rx_desc;
2973 	desc->buflen = htole32(segs[0].ds_len);
2974 	desc->addr_lo = htole32(JME_ADDR_LO(segs[0].ds_addr));
2975 	desc->addr_hi = htole32(JME_ADDR_HI(segs[0].ds_addr));
2976 	desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2977 
2978 	return (0);
2979 }
2980 
2981 static void
2982 jme_set_vlan(struct jme_softc *sc)
2983 {
2984 	struct ifnet *ifp;
2985 	uint32_t reg;
2986 
2987 	JME_LOCK_ASSERT(sc);
2988 
2989 	ifp = sc->jme_ifp;
2990 	reg = CSR_READ_4(sc, JME_RXMAC);
2991 	reg &= ~RXMAC_VLAN_ENB;
2992 	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2993 		reg |= RXMAC_VLAN_ENB;
2994 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2995 }
2996 
2997 static void
2998 jme_set_filter(struct jme_softc *sc)
2999 {
3000 	struct ifnet *ifp;
3001 	struct ifmultiaddr *ifma;
3002 	uint32_t crc;
3003 	uint32_t mchash[2];
3004 	uint32_t rxcfg;
3005 
3006 	JME_LOCK_ASSERT(sc);
3007 
3008 	ifp = sc->jme_ifp;
3009 
3010 	rxcfg = CSR_READ_4(sc, JME_RXMAC);
3011 	rxcfg &= ~ (RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
3012 	    RXMAC_ALLMULTI);
3013 	/* Always accept frames destined to our station address. */
3014 	rxcfg |= RXMAC_UNICAST;
3015 	if ((ifp->if_flags & IFF_BROADCAST) != 0)
3016 		rxcfg |= RXMAC_BROADCAST;
3017 	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3018 		if ((ifp->if_flags & IFF_PROMISC) != 0)
3019 			rxcfg |= RXMAC_PROMISC;
3020 		if ((ifp->if_flags & IFF_ALLMULTI) != 0)
3021 			rxcfg |= RXMAC_ALLMULTI;
3022 		CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
3023 		CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
3024 		CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3025 		return;
3026 	}
3027 
3028 	/*
3029 	 * Set up the multicast address filter by passing all multicast
3030 	 * addresses through a CRC generator, and then using the low-order
3031 	 * 6 bits as an index into the 64 bit multicast hash table.  The
3032 	 * high order bits select the register, while the rest of the bits
3033 	 * select the bit within the register.
3034 	 */
3035 	rxcfg |= RXMAC_MULTICAST;
3036 	bzero(mchash, sizeof(mchash));
3037 
3038 	IF_ADDR_LOCK(ifp);
3039 	TAILQ_FOREACH(ifma, &sc->jme_ifp->if_multiaddrs, ifma_link) {
3040 		if (ifma->ifma_addr->sa_family != AF_LINK)
3041 			continue;
3042 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
3043 		    ifma->ifma_addr), ETHER_ADDR_LEN);
3044 
3045 		/* Just want the 6 least significant bits. */
3046 		crc &= 0x3f;
3047 
3048 		/* Set the corresponding bit in the hash table. */
3049 		mchash[crc >> 5] |= 1 << (crc & 0x1f);
3050 	}
3051 	IF_ADDR_UNLOCK(ifp);
3052 
3053 	CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
3054 	CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
3055 	CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3056 }
3057 
3058 static int
3059 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3060 {
3061 	int error, value;
3062 
3063 	if (arg1 == NULL)
3064 		return (EINVAL);
3065 	value = *(int *)arg1;
3066 	error = sysctl_handle_int(oidp, &value, 0, req);
3067 	if (error || req->newptr == NULL)
3068 		return (error);
3069 	if (value < low || value > high)
3070 		return (EINVAL);
3071         *(int *)arg1 = value;
3072 
3073         return (0);
3074 }
3075 
3076 static int
3077 sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS)
3078 {
3079 	return (sysctl_int_range(oidp, arg1, arg2, req,
3080 	    PCCTX_COAL_TO_MIN, PCCTX_COAL_TO_MAX));
3081 }
3082 
3083 static int
3084 sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
3085 {
3086 	return (sysctl_int_range(oidp, arg1, arg2, req,
3087 	    PCCTX_COAL_PKT_MIN, PCCTX_COAL_PKT_MAX));
3088 }
3089 
3090 static int
3091 sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS)
3092 {
3093 	return (sysctl_int_range(oidp, arg1, arg2, req,
3094 	    PCCRX_COAL_TO_MIN, PCCRX_COAL_TO_MAX));
3095 }
3096 
3097 static int
3098 sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
3099 {
3100 	return (sysctl_int_range(oidp, arg1, arg2, req,
3101 	    PCCRX_COAL_PKT_MIN, PCCRX_COAL_PKT_MAX));
3102 }
3103 
3104 static int
3105 sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS)
3106 {
3107 	return (sysctl_int_range(oidp, arg1, arg2, req,
3108 	    JME_PROC_MIN, JME_PROC_MAX));
3109 }
3110