xref: /freebsd/sys/dev/jme/if_jme.c (revision f7c4bd95ba735bd6a5454b4953945a99cefbb80c)
1 /*-
2  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/bus.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/rman.h>
39 #include <sys/module.h>
40 #include <sys/proc.h>
41 #include <sys/queue.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <sys/sysctl.h>
45 #include <sys/taskqueue.h>
46 
47 #include <net/bpf.h>
48 #include <net/if.h>
49 #include <net/if_arp.h>
50 #include <net/ethernet.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/if_types.h>
54 #include <net/if_vlan_var.h>
55 
56 #include <netinet/in.h>
57 #include <netinet/in_systm.h>
58 #include <netinet/ip.h>
59 #include <netinet/tcp.h>
60 
61 #include <dev/mii/mii.h>
62 #include <dev/mii/miivar.h>
63 
64 #include <dev/pci/pcireg.h>
65 #include <dev/pci/pcivar.h>
66 
67 #include <machine/atomic.h>
68 #include <machine/bus.h>
69 #include <machine/in_cksum.h>
70 
71 #include <dev/jme/if_jmereg.h>
72 #include <dev/jme/if_jmevar.h>
73 
74 /* "device miibus" required.  See GENERIC if you get errors here. */
75 #include "miibus_if.h"
76 
77 /* Define the following to disable printing Rx errors. */
78 #undef	JME_SHOW_ERRORS
79 
80 #define	JME_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
81 
82 MODULE_DEPEND(jme, pci, 1, 1, 1);
83 MODULE_DEPEND(jme, ether, 1, 1, 1);
84 MODULE_DEPEND(jme, miibus, 1, 1, 1);
85 
86 /* Tunables. */
87 static int msi_disable = 0;
88 static int msix_disable = 0;
89 TUNABLE_INT("hw.jme.msi_disable", &msi_disable);
90 TUNABLE_INT("hw.jme.msix_disable", &msix_disable);
91 
92 /*
93  * Devices supported by this driver.
94  */
95 static struct jme_dev {
96 	uint16_t	jme_vendorid;
97 	uint16_t	jme_deviceid;
98 	const char	*jme_name;
99 } jme_devs[] = {
100 	{ VENDORID_JMICRON, DEVICEID_JMC250,
101 	    "JMicron Inc, JMC250 Gigabit Ethernet" },
102 	{ VENDORID_JMICRON, DEVICEID_JMC260,
103 	    "JMicron Inc, JMC260 Fast Ethernet" },
104 };
105 
106 static int jme_miibus_readreg(device_t, int, int);
107 static int jme_miibus_writereg(device_t, int, int, int);
108 static void jme_miibus_statchg(device_t);
109 static void jme_mediastatus(struct ifnet *, struct ifmediareq *);
110 static int jme_mediachange(struct ifnet *);
111 static int jme_probe(device_t);
112 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
113 static int jme_eeprom_macaddr(struct jme_softc *);
114 static void jme_reg_macaddr(struct jme_softc *);
115 static void jme_map_intr_vector(struct jme_softc *);
116 static int jme_attach(device_t);
117 static int jme_detach(device_t);
118 static void jme_sysctl_node(struct jme_softc *);
119 static void jme_dmamap_cb(void *, bus_dma_segment_t *, int, int);
120 static int jme_dma_alloc(struct jme_softc *);
121 static void jme_dma_free(struct jme_softc *);
122 static int jme_shutdown(device_t);
123 static void jme_setlinkspeed(struct jme_softc *);
124 static void jme_setwol(struct jme_softc *);
125 static int jme_suspend(device_t);
126 static int jme_resume(device_t);
127 static int jme_encap(struct jme_softc *, struct mbuf **);
128 static void jme_tx_task(void *, int);
129 static void jme_start(struct ifnet *);
130 static void jme_watchdog(struct jme_softc *);
131 static int jme_ioctl(struct ifnet *, u_long, caddr_t);
132 static void jme_mac_config(struct jme_softc *);
133 static void jme_link_task(void *, int);
134 static int jme_intr(void *);
135 static void jme_int_task(void *, int);
136 static void jme_txeof(struct jme_softc *);
137 static __inline void jme_discard_rxbuf(struct jme_softc *, int);
138 static void jme_rxeof(struct jme_softc *);
139 static int jme_rxintr(struct jme_softc *, int);
140 static void jme_tick(void *);
141 static void jme_reset(struct jme_softc *);
142 static void jme_init(void *);
143 static void jme_init_locked(struct jme_softc *);
144 static void jme_stop(struct jme_softc *);
145 static void jme_stop_tx(struct jme_softc *);
146 static void jme_stop_rx(struct jme_softc *);
147 static int jme_init_rx_ring(struct jme_softc *);
148 static void jme_init_tx_ring(struct jme_softc *);
149 static void jme_init_ssb(struct jme_softc *);
150 static int jme_newbuf(struct jme_softc *, struct jme_rxdesc *);
151 static void jme_set_vlan(struct jme_softc *);
152 static void jme_set_filter(struct jme_softc *);
153 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
154 static int sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS);
155 static int sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
156 static int sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS);
157 static int sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
158 static int sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS);
159 
160 
161 static device_method_t jme_methods[] = {
162 	/* Device interface. */
163 	DEVMETHOD(device_probe,		jme_probe),
164 	DEVMETHOD(device_attach,	jme_attach),
165 	DEVMETHOD(device_detach,	jme_detach),
166 	DEVMETHOD(device_shutdown,	jme_shutdown),
167 	DEVMETHOD(device_suspend,	jme_suspend),
168 	DEVMETHOD(device_resume,	jme_resume),
169 
170 	/* MII interface. */
171 	DEVMETHOD(miibus_readreg,	jme_miibus_readreg),
172 	DEVMETHOD(miibus_writereg,	jme_miibus_writereg),
173 	DEVMETHOD(miibus_statchg,	jme_miibus_statchg),
174 
175 	{ NULL, NULL }
176 };
177 
178 static driver_t jme_driver = {
179 	"jme",
180 	jme_methods,
181 	sizeof(struct jme_softc)
182 };
183 
184 static devclass_t jme_devclass;
185 
186 DRIVER_MODULE(jme, pci, jme_driver, jme_devclass, 0, 0);
187 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, 0, 0);
188 
189 static struct resource_spec jme_res_spec_mem[] = {
190 	{ SYS_RES_MEMORY,	PCIR_BAR(0),	RF_ACTIVE },
191 	{ -1,			0,		0 }
192 };
193 
194 static struct resource_spec jme_irq_spec_legacy[] = {
195 	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
196 	{ -1,			0,		0 }
197 };
198 
199 static struct resource_spec jme_irq_spec_msi[] = {
200 	{ SYS_RES_IRQ,		1,		RF_ACTIVE },
201 	{ SYS_RES_IRQ,		2,		RF_ACTIVE },
202 	{ SYS_RES_IRQ,		3,		RF_ACTIVE },
203 	{ SYS_RES_IRQ,		4,		RF_ACTIVE },
204 	{ SYS_RES_IRQ,		5,		RF_ACTIVE },
205 	{ SYS_RES_IRQ,		6,		RF_ACTIVE },
206 	{ SYS_RES_IRQ,		7,		RF_ACTIVE },
207 	{ SYS_RES_IRQ,		8,		RF_ACTIVE },
208 	{ -1,			0,		0 }
209 };
210 
211 /*
212  *	Read a PHY register on the MII of the JMC250.
213  */
214 static int
215 jme_miibus_readreg(device_t dev, int phy, int reg)
216 {
217 	struct jme_softc *sc;
218 	uint32_t val;
219 	int i;
220 
221 	sc = device_get_softc(dev);
222 
223 	/* For FPGA version, PHY address 0 should be ignored. */
224 	if ((sc->jme_flags & JME_FLAG_FPGA) != 0) {
225 		if (phy == 0)
226 			return (0);
227 	} else {
228 		if (sc->jme_phyaddr != phy)
229 			return (0);
230 	}
231 
232 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
233 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
234 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
235 		DELAY(1);
236 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
237 			break;
238 	}
239 
240 	if (i == 0) {
241 		device_printf(sc->jme_dev, "phy read timeout : %d\n", reg);
242 		return (0);
243 	}
244 
245 	return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
246 }
247 
248 /*
249  *	Write a PHY register on the MII of the JMC250.
250  */
251 static int
252 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
253 {
254 	struct jme_softc *sc;
255 	int i;
256 
257 	sc = device_get_softc(dev);
258 
259 	/* For FPGA version, PHY address 0 should be ignored. */
260 	if ((sc->jme_flags & JME_FLAG_FPGA) != 0) {
261 		if (phy == 0)
262 			return (0);
263 	} else {
264 		if (sc->jme_phyaddr != phy)
265 			return (0);
266 	}
267 
268 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
269 	    ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
270 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
271 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
272 		DELAY(1);
273 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
274 			break;
275 	}
276 
277 	if (i == 0)
278 		device_printf(sc->jme_dev, "phy write timeout : %d\n", reg);
279 
280 	return (0);
281 }
282 
283 /*
284  *	Callback from MII layer when media changes.
285  */
286 static void
287 jme_miibus_statchg(device_t dev)
288 {
289 	struct jme_softc *sc;
290 
291 	sc = device_get_softc(dev);
292 	taskqueue_enqueue(taskqueue_swi, &sc->jme_link_task);
293 }
294 
295 /*
296  *	Get the current interface media status.
297  */
298 static void
299 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
300 {
301 	struct jme_softc *sc;
302 	struct mii_data *mii;
303 
304 	sc = ifp->if_softc;
305 	JME_LOCK(sc);
306 	mii = device_get_softc(sc->jme_miibus);
307 
308 	mii_pollstat(mii);
309 	ifmr->ifm_status = mii->mii_media_status;
310 	ifmr->ifm_active = mii->mii_media_active;
311 	JME_UNLOCK(sc);
312 }
313 
314 /*
315  *	Set hardware to newly-selected media.
316  */
317 static int
318 jme_mediachange(struct ifnet *ifp)
319 {
320 	struct jme_softc *sc;
321 	struct mii_data *mii;
322 	struct mii_softc *miisc;
323 	int error;
324 
325 	sc = ifp->if_softc;
326 	JME_LOCK(sc);
327 	mii = device_get_softc(sc->jme_miibus);
328 	if (mii->mii_instance != 0) {
329 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
330 			mii_phy_reset(miisc);
331 	}
332 	error = mii_mediachg(mii);
333 	JME_UNLOCK(sc);
334 
335 	return (error);
336 }
337 
338 static int
339 jme_probe(device_t dev)
340 {
341 	struct jme_dev *sp;
342 	int i;
343 	uint16_t vendor, devid;
344 
345 	vendor = pci_get_vendor(dev);
346 	devid = pci_get_device(dev);
347 	sp = jme_devs;
348 	for (i = 0; i < sizeof(jme_devs) / sizeof(jme_devs[0]);
349 	    i++, sp++) {
350 		if (vendor == sp->jme_vendorid &&
351 		    devid == sp->jme_deviceid) {
352 			device_set_desc(dev, sp->jme_name);
353 			return (BUS_PROBE_DEFAULT);
354 		}
355 	}
356 
357 	return (ENXIO);
358 }
359 
360 static int
361 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
362 {
363 	uint32_t reg;
364 	int i;
365 
366 	*val = 0;
367 	for (i = JME_TIMEOUT; i > 0; i--) {
368 		reg = CSR_READ_4(sc, JME_SMBCSR);
369 		if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
370 			break;
371 		DELAY(1);
372 	}
373 
374 	if (i == 0) {
375 		device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
376 		return (ETIMEDOUT);
377 	}
378 
379 	reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
380 	CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
381 	for (i = JME_TIMEOUT; i > 0; i--) {
382 		DELAY(1);
383 		reg = CSR_READ_4(sc, JME_SMBINTF);
384 		if ((reg & SMBINTF_CMD_TRIGGER) == 0)
385 			break;
386 	}
387 
388 	if (i == 0) {
389 		device_printf(sc->jme_dev, "EEPROM read timeout!\n");
390 		return (ETIMEDOUT);
391 	}
392 
393 	reg = CSR_READ_4(sc, JME_SMBINTF);
394 	*val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
395 
396 	return (0);
397 }
398 
399 static int
400 jme_eeprom_macaddr(struct jme_softc *sc)
401 {
402 	uint8_t eaddr[ETHER_ADDR_LEN];
403 	uint8_t fup, reg, val;
404 	uint32_t offset;
405 	int match;
406 
407 	offset = 0;
408 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
409 	    fup != JME_EEPROM_SIG0)
410 		return (ENOENT);
411 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
412 	    fup != JME_EEPROM_SIG1)
413 		return (ENOENT);
414 	match = 0;
415 	do {
416 		if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
417 			break;
418 		/* Check for the end of EEPROM descriptor. */
419 		if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
420 			break;
421 		if ((uint8_t)JME_EEPROM_MKDESC(JME_EEPROM_FUNC0,
422 		    JME_EEPROM_PAGE_BAR1) == fup) {
423 			if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
424 				break;
425 			if (reg >= JME_PAR0 &&
426 			    reg < JME_PAR0 + ETHER_ADDR_LEN) {
427 				if (jme_eeprom_read_byte(sc, offset + 2,
428 				    &val) != 0)
429 					break;
430 				eaddr[reg - JME_PAR0] = val;
431 				match++;
432 			}
433 		}
434 		/* Try next eeprom descriptor. */
435 		offset += JME_EEPROM_DESC_BYTES;
436 	} while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
437 
438 	if (match == ETHER_ADDR_LEN) {
439 		bcopy(eaddr, sc->jme_eaddr, ETHER_ADDR_LEN);
440 		return (0);
441 	}
442 
443 	return (ENOENT);
444 }
445 
446 static void
447 jme_reg_macaddr(struct jme_softc *sc)
448 {
449 	uint32_t par0, par1;
450 
451 	/* Read station address. */
452 	par0 = CSR_READ_4(sc, JME_PAR0);
453 	par1 = CSR_READ_4(sc, JME_PAR1);
454 	par1 &= 0xFFFF;
455 	if ((par0 == 0 && par1 == 0) ||
456 	    (par0 == 0xFFFFFFFF && par1 == 0xFFFF)) {
457 		device_printf(sc->jme_dev,
458 		    "generating fake ethernet address.\n");
459 		par0 = arc4random();
460 		/* Set OUI to JMicron. */
461 		sc->jme_eaddr[0] = 0x00;
462 		sc->jme_eaddr[1] = 0x1B;
463 		sc->jme_eaddr[2] = 0x8C;
464 		sc->jme_eaddr[3] = (par0 >> 16) & 0xff;
465 		sc->jme_eaddr[4] = (par0 >> 8) & 0xff;
466 		sc->jme_eaddr[5] = par0 & 0xff;
467 	} else {
468 		sc->jme_eaddr[0] = (par0 >> 0) & 0xFF;
469 		sc->jme_eaddr[1] = (par0 >> 8) & 0xFF;
470 		sc->jme_eaddr[2] = (par0 >> 16) & 0xFF;
471 		sc->jme_eaddr[3] = (par0 >> 24) & 0xFF;
472 		sc->jme_eaddr[4] = (par1 >> 0) & 0xFF;
473 		sc->jme_eaddr[5] = (par1 >> 8) & 0xFF;
474 	}
475 }
476 
477 static void
478 jme_map_intr_vector(struct jme_softc *sc)
479 {
480 	uint32_t map[MSINUM_NUM_INTR_SOURCE / JME_MSI_MESSAGES];
481 
482 	bzero(map, sizeof(map));
483 
484 	/* Map Tx interrupts source to MSI/MSIX vector 2. */
485 	map[MSINUM_REG_INDEX(N_INTR_TXQ0_COMP)] =
486 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ0_COMP);
487 	map[MSINUM_REG_INDEX(N_INTR_TXQ1_COMP)] |=
488 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ1_COMP);
489 	map[MSINUM_REG_INDEX(N_INTR_TXQ2_COMP)] |=
490 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ2_COMP);
491 	map[MSINUM_REG_INDEX(N_INTR_TXQ3_COMP)] |=
492 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ3_COMP);
493 	map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
494 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ4_COMP);
495 	map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
496 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ5_COMP);
497 	map[MSINUM_REG_INDEX(N_INTR_TXQ6_COMP)] |=
498 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ6_COMP);
499 	map[MSINUM_REG_INDEX(N_INTR_TXQ7_COMP)] |=
500 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ7_COMP);
501 	map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL)] |=
502 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL);
503 	map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL_TO)] |=
504 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL_TO);
505 
506 	/* Map Rx interrupts source to MSI/MSIX vector 1. */
507 	map[MSINUM_REG_INDEX(N_INTR_RXQ0_COMP)] =
508 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COMP);
509 	map[MSINUM_REG_INDEX(N_INTR_RXQ1_COMP)] =
510 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COMP);
511 	map[MSINUM_REG_INDEX(N_INTR_RXQ2_COMP)] =
512 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COMP);
513 	map[MSINUM_REG_INDEX(N_INTR_RXQ3_COMP)] =
514 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COMP);
515 	map[MSINUM_REG_INDEX(N_INTR_RXQ0_DESC_EMPTY)] =
516 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_DESC_EMPTY);
517 	map[MSINUM_REG_INDEX(N_INTR_RXQ1_DESC_EMPTY)] =
518 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_DESC_EMPTY);
519 	map[MSINUM_REG_INDEX(N_INTR_RXQ2_DESC_EMPTY)] =
520 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_DESC_EMPTY);
521 	map[MSINUM_REG_INDEX(N_INTR_RXQ3_DESC_EMPTY)] =
522 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_DESC_EMPTY);
523 	map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL)] =
524 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL);
525 	map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL)] =
526 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL);
527 	map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL)] =
528 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL);
529 	map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL)] =
530 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL);
531 	map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL_TO)] =
532 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL_TO);
533 	map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL_TO)] =
534 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL_TO);
535 	map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL_TO)] =
536 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL_TO);
537 	map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL_TO)] =
538 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL_TO);
539 
540 	/* Map all other interrupts source to MSI/MSIX vector 0. */
541 	CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 0, map[0]);
542 	CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 1, map[1]);
543 	CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 2, map[2]);
544 	CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 3, map[3]);
545 }
546 
547 static int
548 jme_attach(device_t dev)
549 {
550 	struct jme_softc *sc;
551 	struct ifnet *ifp;
552 	struct mii_softc *miisc;
553 	struct mii_data *mii;
554 	uint32_t reg;
555 	uint16_t burst;
556 	int error, i, msic, msixc, pmc;
557 
558 	error = 0;
559 	sc = device_get_softc(dev);
560 	sc->jme_dev = dev;
561 
562 	mtx_init(&sc->jme_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
563 	    MTX_DEF);
564 	callout_init_mtx(&sc->jme_tick_ch, &sc->jme_mtx, 0);
565 	TASK_INIT(&sc->jme_int_task, 0, jme_int_task, sc);
566 	TASK_INIT(&sc->jme_link_task, 0, jme_link_task, sc);
567 
568 	/*
569 	 * Map the device. JMC250 supports both memory mapped and I/O
570 	 * register space access. Because I/O register access should
571 	 * use different BARs to access registers it's waste of time
572 	 * to use I/O register spce access. JMC250 uses 16K to map
573 	 * entire memory space.
574 	 */
575 	pci_enable_busmaster(dev);
576 	sc->jme_res_spec = jme_res_spec_mem;
577 	sc->jme_irq_spec = jme_irq_spec_legacy;
578 	error = bus_alloc_resources(dev, sc->jme_res_spec, sc->jme_res);
579 	if (error != 0) {
580 		device_printf(dev, "cannot allocate memory resources.\n");
581 		goto fail;
582 	}
583 
584 	/* Allocate IRQ resources. */
585 	msixc = pci_msix_count(dev);
586 	msic = pci_msi_count(dev);
587 	if (bootverbose) {
588 		device_printf(dev, "MSIX count : %d\n", msixc);
589 		device_printf(dev, "MSI count : %d\n", msic);
590 	}
591 
592 	/* Prefer MSIX over MSI. */
593 	if (msix_disable == 0 || msi_disable == 0) {
594 		if (msix_disable == 0 && msixc == JME_MSIX_MESSAGES &&
595 		    pci_alloc_msix(dev, &msixc) == 0) {
596 			if (msic == JME_MSIX_MESSAGES) {
597 				device_printf(dev, "Using %d MSIX messages.\n",
598 				    msixc);
599 				sc->jme_flags |= JME_FLAG_MSIX;
600 				sc->jme_irq_spec = jme_irq_spec_msi;
601 			} else
602 				pci_release_msi(dev);
603 		}
604 		if (msi_disable == 0 && (sc->jme_flags & JME_FLAG_MSIX) == 0 &&
605 		    msic == JME_MSI_MESSAGES &&
606 		    pci_alloc_msi(dev, &msic) == 0) {
607 			if (msic == JME_MSI_MESSAGES) {
608 				device_printf(dev, "Using %d MSI messages.\n",
609 				    msic);
610 				sc->jme_flags |= JME_FLAG_MSI;
611 				sc->jme_irq_spec = jme_irq_spec_msi;
612 			} else
613 				pci_release_msi(dev);
614 		}
615 		/* Map interrupt vector 0, 1 and 2. */
616 		if ((sc->jme_flags & JME_FLAG_MSI) != 0 ||
617 		    (sc->jme_flags & JME_FLAG_MSIX) != 0)
618 			jme_map_intr_vector(sc);
619 	}
620 
621 	error = bus_alloc_resources(dev, sc->jme_irq_spec, sc->jme_irq);
622 	if (error != 0) {
623 		device_printf(dev, "cannot allocate IRQ resources.\n");
624 		goto fail;
625 	}
626 
627 	sc->jme_rev = pci_get_revid(dev);
628 	if (sc->jme_rev == DEVICEREVID_JMC260) {
629 		sc->jme_flags |= JME_FLAG_FASTETH;
630 		sc->jme_flags |= JME_FLAG_NOJUMBO;
631 	}
632 	reg = CSR_READ_4(sc, JME_CHIPMODE);
633 	sc->jme_chip_rev = (reg & CHIPMODE_REV_MASK) >> CHIPMODE_REV_SHIFT;
634 	if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
635 	    CHIPMODE_NOT_FPGA)
636 		sc->jme_flags |= JME_FLAG_FPGA;
637 	if (bootverbose) {
638 		device_printf(dev, "PCI device revision : 0x%04x\n",
639 		    sc->jme_rev);
640 		device_printf(dev, "Chip revision : 0x%02x\n",
641 		    sc->jme_chip_rev);
642 		if ((sc->jme_flags & JME_FLAG_FPGA) != 0)
643 			device_printf(dev, "FPGA revision : 0x%04x\n",
644 			    (reg & CHIPMODE_FPGA_REV_MASK) >>
645 			    CHIPMODE_FPGA_REV_SHIFT);
646 	}
647 	if (sc->jme_chip_rev == 0xFF) {
648 		device_printf(dev, "Unknown chip revision : 0x%02x\n",
649 		    sc->jme_rev);
650 		error = ENXIO;
651 		goto fail;
652 	}
653 
654 	/* Reset the ethernet controller. */
655 	jme_reset(sc);
656 
657 	/* Get station address. */
658 	reg = CSR_READ_4(sc, JME_SMBCSR);
659 	if ((reg & SMBCSR_EEPROM_PRESENT) != 0)
660 		error = jme_eeprom_macaddr(sc);
661 	if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
662 		if (error != 0 && (bootverbose))
663 			device_printf(sc->jme_dev,
664 			    "ethernet hardware address not found in EEPROM.\n");
665 		jme_reg_macaddr(sc);
666 	}
667 
668 	/*
669 	 * Save PHY address.
670 	 * Integrated JR0211 has fixed PHY address whereas FPGA version
671 	 * requires PHY probing to get correct PHY address.
672 	 */
673 	if ((sc->jme_flags & JME_FLAG_FPGA) == 0) {
674 		sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
675 		    GPREG0_PHY_ADDR_MASK;
676 		if (bootverbose)
677 			device_printf(dev, "PHY is at address %d.\n",
678 			    sc->jme_phyaddr);
679 	} else
680 		sc->jme_phyaddr = 0;
681 
682 	/* Set max allowable DMA size. */
683 	if (pci_find_extcap(dev, PCIY_EXPRESS, &i) == 0) {
684 		sc->jme_flags |= JME_FLAG_PCIE;
685 		burst = pci_read_config(dev, i + 0x08, 2);
686 		if (bootverbose) {
687 			device_printf(dev, "Read request size : %d bytes.\n",
688 			    128 << ((burst >> 12) & 0x07));
689 			device_printf(dev, "TLP payload size : %d bytes.\n",
690 			    128 << ((burst >> 5) & 0x07));
691 		}
692 		switch ((burst >> 12) & 0x07) {
693 		case 0:
694 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
695 			break;
696 		case 1:
697 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
698 			break;
699 		default:
700 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
701 			break;
702 		}
703 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
704 	} else {
705 		sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
706 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
707 	}
708 	/* Create coalescing sysctl node. */
709 	jme_sysctl_node(sc);
710 	if ((error = jme_dma_alloc(sc) != 0))
711 		goto fail;
712 
713 	ifp = sc->jme_ifp = if_alloc(IFT_ETHER);
714 	if (ifp == NULL) {
715 		device_printf(dev, "cannot allocate ifnet structure.\n");
716 		error = ENXIO;
717 		goto fail;
718 	}
719 
720 	ifp->if_softc = sc;
721 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
722 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
723 	ifp->if_ioctl = jme_ioctl;
724 	ifp->if_start = jme_start;
725 	ifp->if_init = jme_init;
726 	ifp->if_snd.ifq_drv_maxlen = JME_TX_RING_CNT - 1;
727 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
728 	IFQ_SET_READY(&ifp->if_snd);
729 	/* JMC250 supports Tx/Rx checksum offload as well as TSO. */
730 	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4;
731 	ifp->if_hwassist = JME_CSUM_FEATURES | CSUM_TSO;
732 	if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) {
733 		sc->jme_flags |= JME_FLAG_PMCAP;
734 		ifp->if_capabilities |= IFCAP_WOL_MAGIC;
735 	}
736 	ifp->if_capenable = ifp->if_capabilities;
737 
738 	/* Set up MII bus. */
739 	if ((error = mii_phy_probe(dev, &sc->jme_miibus, jme_mediachange,
740 	    jme_mediastatus)) != 0) {
741 		device_printf(dev, "no PHY found!\n");
742 		goto fail;
743 	}
744 
745 	/*
746 	 * Force PHY to FPGA mode.
747 	 */
748 	if ((sc->jme_flags & JME_FLAG_FPGA) != 0) {
749 		mii = device_get_softc(sc->jme_miibus);
750 		if (mii->mii_instance != 0) {
751 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
752 				if (miisc->mii_phy != 0) {
753 					sc->jme_phyaddr = miisc->mii_phy;
754 					break;
755 				}
756 			}
757 			if (sc->jme_phyaddr != 0) {
758 				device_printf(sc->jme_dev,
759 				    "FPGA PHY is at %d\n", sc->jme_phyaddr);
760 				/* vendor magic. */
761 				jme_miibus_writereg(dev, sc->jme_phyaddr, 27,
762 				    0x0004);
763 			}
764 		}
765 	}
766 
767 	ether_ifattach(ifp, sc->jme_eaddr);
768 
769 	/* VLAN capability setup */
770 	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
771 	    IFCAP_VLAN_HWCSUM;
772 	ifp->if_capenable = ifp->if_capabilities;
773 
774 	/* Tell the upper layer(s) we support long frames. */
775 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
776 
777 	/* Create local taskq. */
778 	TASK_INIT(&sc->jme_tx_task, 1, jme_tx_task, ifp);
779 	sc->jme_tq = taskqueue_create_fast("jme_taskq", M_WAITOK,
780 	    taskqueue_thread_enqueue, &sc->jme_tq);
781 	if (sc->jme_tq == NULL) {
782 		device_printf(dev, "could not create taskqueue.\n");
783 		ether_ifdetach(ifp);
784 		error = ENXIO;
785 		goto fail;
786 	}
787 	taskqueue_start_threads(&sc->jme_tq, 1, PI_NET, "%s taskq",
788 	    device_get_nameunit(sc->jme_dev));
789 
790 	if ((sc->jme_flags & JME_FLAG_MSIX) != 0)
791 		msic = JME_MSIX_MESSAGES;
792 	else if ((sc->jme_flags & JME_FLAG_MSI) != 0)
793 		msic = JME_MSI_MESSAGES;
794 	else
795 		msic = 1;
796 	for (i = 0; i < msic; i++) {
797 		error = bus_setup_intr(dev, sc->jme_irq[i],
798 		    INTR_TYPE_NET | INTR_MPSAFE, jme_intr, NULL, sc,
799 		    &sc->jme_intrhand[i]);
800 		if (error != 0)
801 			break;
802 	}
803 
804 	if (error != 0) {
805 		device_printf(dev, "could not set up interrupt handler.\n");
806 		taskqueue_free(sc->jme_tq);
807 		sc->jme_tq = NULL;
808 		ether_ifdetach(ifp);
809 		goto fail;
810 	}
811 
812 fail:
813 	if (error != 0)
814 		jme_detach(dev);
815 
816 	return (error);
817 }
818 
819 static int
820 jme_detach(device_t dev)
821 {
822 	struct jme_softc *sc;
823 	struct ifnet *ifp;
824 	int i, msic;
825 
826 	sc = device_get_softc(dev);
827 
828 	ifp = sc->jme_ifp;
829 	if (device_is_attached(dev)) {
830 		JME_LOCK(sc);
831 		sc->jme_flags |= JME_FLAG_DETACH;
832 		jme_stop(sc);
833 		JME_UNLOCK(sc);
834 		callout_drain(&sc->jme_tick_ch);
835 		taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
836 		taskqueue_drain(sc->jme_tq, &sc->jme_tx_task);
837 		taskqueue_drain(taskqueue_swi, &sc->jme_link_task);
838 		ether_ifdetach(ifp);
839 	}
840 
841 	if (sc->jme_tq != NULL) {
842 		taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
843 		taskqueue_free(sc->jme_tq);
844 		sc->jme_tq = NULL;
845 	}
846 
847 	if (sc->jme_miibus != NULL) {
848 		device_delete_child(dev, sc->jme_miibus);
849 		sc->jme_miibus = NULL;
850 	}
851 	bus_generic_detach(dev);
852 	jme_dma_free(sc);
853 
854 	if (ifp != NULL) {
855 		if_free(ifp);
856 		sc->jme_ifp = NULL;
857 	}
858 
859 	msic = 1;
860 	if ((sc->jme_flags & JME_FLAG_MSIX) != 0)
861 		msic = JME_MSIX_MESSAGES;
862 	else if ((sc->jme_flags & JME_FLAG_MSI) != 0)
863 		msic = JME_MSI_MESSAGES;
864 	else
865 		msic = 1;
866 	for (i = 0; i < msic; i++) {
867 		if (sc->jme_intrhand[i] != NULL) {
868 			bus_teardown_intr(dev, sc->jme_irq[i],
869 			    sc->jme_intrhand[i]);
870 			sc->jme_intrhand[i] = NULL;
871 		}
872 	}
873 
874 	bus_release_resources(dev, sc->jme_irq_spec, sc->jme_irq);
875 	if ((sc->jme_flags & (JME_FLAG_MSIX | JME_FLAG_MSI)) != 0)
876 		pci_release_msi(dev);
877 	bus_release_resources(dev, sc->jme_res_spec, sc->jme_res);
878 	mtx_destroy(&sc->jme_mtx);
879 
880 	return (0);
881 }
882 
883 static void
884 jme_sysctl_node(struct jme_softc *sc)
885 {
886 	int error;
887 
888 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->jme_dev),
889 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev)), OID_AUTO,
890 	    "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_tx_coal_to,
891 	    0, sysctl_hw_jme_tx_coal_to, "I", "jme tx coalescing timeout");
892 
893 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->jme_dev),
894 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev)), OID_AUTO,
895 	    "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_tx_coal_pkt,
896 	    0, sysctl_hw_jme_tx_coal_pkt, "I", "jme tx coalescing packet");
897 
898 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->jme_dev),
899 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev)), OID_AUTO,
900 	    "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_rx_coal_to,
901 	    0, sysctl_hw_jme_rx_coal_to, "I", "jme rx coalescing timeout");
902 
903 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->jme_dev),
904 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev)), OID_AUTO,
905 	    "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_rx_coal_pkt,
906 	    0, sysctl_hw_jme_rx_coal_pkt, "I", "jme rx coalescing packet");
907 
908 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->jme_dev),
909 	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev)), OID_AUTO,
910 	    "process_limit", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_process_limit,
911 	    0, sysctl_hw_jme_proc_limit, "I",
912 	    "max number of Rx events to process");
913 
914 	/* Pull in device tunables. */
915 	sc->jme_process_limit = JME_PROC_DEFAULT;
916 	error = resource_int_value(device_get_name(sc->jme_dev),
917 	    device_get_unit(sc->jme_dev), "process_limit",
918 	    &sc->jme_process_limit);
919 	if (error == 0) {
920 		if (sc->jme_process_limit < JME_PROC_MIN ||
921 		    sc->jme_process_limit > JME_PROC_MAX) {
922 			device_printf(sc->jme_dev,
923 			    "process_limit value out of range; "
924 			    "using default: %d\n", JME_PROC_DEFAULT);
925 			sc->jme_process_limit = JME_PROC_DEFAULT;
926 		}
927 	}
928 
929 	sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
930 	error = resource_int_value(device_get_name(sc->jme_dev),
931 	    device_get_unit(sc->jme_dev), "tx_coal_to", &sc->jme_tx_coal_to);
932 	if (error == 0) {
933 		if (sc->jme_tx_coal_to < PCCTX_COAL_TO_MIN ||
934 		    sc->jme_tx_coal_to > PCCTX_COAL_TO_MAX) {
935 			device_printf(sc->jme_dev,
936 			    "tx_coal_to value out of range; "
937 			    "using default: %d\n", PCCTX_COAL_TO_DEFAULT);
938 			sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
939 		}
940 	}
941 
942 	sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
943 	error = resource_int_value(device_get_name(sc->jme_dev),
944 	    device_get_unit(sc->jme_dev), "tx_coal_pkt", &sc->jme_tx_coal_to);
945 	if (error == 0) {
946 		if (sc->jme_tx_coal_pkt < PCCTX_COAL_PKT_MIN ||
947 		    sc->jme_tx_coal_pkt > PCCTX_COAL_PKT_MAX) {
948 			device_printf(sc->jme_dev,
949 			    "tx_coal_pkt value out of range; "
950 			    "using default: %d\n", PCCTX_COAL_PKT_DEFAULT);
951 			sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
952 		}
953 	}
954 
955 	sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
956 	error = resource_int_value(device_get_name(sc->jme_dev),
957 	    device_get_unit(sc->jme_dev), "rx_coal_to", &sc->jme_rx_coal_to);
958 	if (error == 0) {
959 		if (sc->jme_rx_coal_to < PCCRX_COAL_TO_MIN ||
960 		    sc->jme_rx_coal_to > PCCRX_COAL_TO_MAX) {
961 			device_printf(sc->jme_dev,
962 			    "rx_coal_to value out of range; "
963 			    "using default: %d\n", PCCRX_COAL_TO_DEFAULT);
964 			sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
965 		}
966 	}
967 
968 	sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
969 	error = resource_int_value(device_get_name(sc->jme_dev),
970 	    device_get_unit(sc->jme_dev), "rx_coal_pkt", &sc->jme_rx_coal_to);
971 	if (error == 0) {
972 		if (sc->jme_rx_coal_pkt < PCCRX_COAL_PKT_MIN ||
973 		    sc->jme_rx_coal_pkt > PCCRX_COAL_PKT_MAX) {
974 			device_printf(sc->jme_dev,
975 			    "tx_coal_pkt value out of range; "
976 			    "using default: %d\n", PCCRX_COAL_PKT_DEFAULT);
977 			sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
978 		}
979 	}
980 }
981 
982 struct jme_dmamap_arg {
983 	bus_addr_t	jme_busaddr;
984 };
985 
986 static void
987 jme_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
988 {
989 	struct jme_dmamap_arg *ctx;
990 
991 	if (error != 0)
992 		return;
993 
994 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
995 
996 	ctx = (struct jme_dmamap_arg *)arg;
997 	ctx->jme_busaddr = segs[0].ds_addr;
998 }
999 
1000 static int
1001 jme_dma_alloc(struct jme_softc *sc)
1002 {
1003 	struct jme_dmamap_arg ctx;
1004 	struct jme_txdesc *txd;
1005 	struct jme_rxdesc *rxd;
1006 	bus_addr_t lowaddr, rx_ring_end, tx_ring_end;
1007 	int error, i;
1008 
1009 	lowaddr = BUS_SPACE_MAXADDR;
1010 
1011 again:
1012 	/* Create parent ring tag. */
1013 	error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */
1014 	    1, 0,			/* algnmnt, boundary */
1015 	    lowaddr,			/* lowaddr */
1016 	    BUS_SPACE_MAXADDR,		/* highaddr */
1017 	    NULL, NULL,			/* filter, filterarg */
1018 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1019 	    0,				/* nsegments */
1020 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1021 	    0,				/* flags */
1022 	    NULL, NULL,			/* lockfunc, lockarg */
1023 	    &sc->jme_cdata.jme_ring_tag);
1024 	if (error != 0) {
1025 		device_printf(sc->jme_dev,
1026 		    "could not create parent ring DMA tag.\n");
1027 		goto fail;
1028 	}
1029 	/* Create tag for Tx ring. */
1030 	error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1031 	    JME_TX_RING_ALIGN, 0,	/* algnmnt, boundary */
1032 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1033 	    BUS_SPACE_MAXADDR,		/* highaddr */
1034 	    NULL, NULL,			/* filter, filterarg */
1035 	    JME_TX_RING_SIZE,		/* maxsize */
1036 	    1,				/* nsegments */
1037 	    JME_TX_RING_SIZE,		/* maxsegsize */
1038 	    0,				/* flags */
1039 	    NULL, NULL,			/* lockfunc, lockarg */
1040 	    &sc->jme_cdata.jme_tx_ring_tag);
1041 	if (error != 0) {
1042 		device_printf(sc->jme_dev,
1043 		    "could not allocate Tx ring DMA tag.\n");
1044 		goto fail;
1045 	}
1046 
1047 	/* Create tag for Rx ring. */
1048 	error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1049 	    JME_RX_RING_ALIGN, 0,	/* algnmnt, boundary */
1050 	    lowaddr,			/* lowaddr */
1051 	    BUS_SPACE_MAXADDR,		/* highaddr */
1052 	    NULL, NULL,			/* filter, filterarg */
1053 	    JME_RX_RING_SIZE,		/* maxsize */
1054 	    1,				/* nsegments */
1055 	    JME_RX_RING_SIZE,		/* maxsegsize */
1056 	    0,				/* flags */
1057 	    NULL, NULL,			/* lockfunc, lockarg */
1058 	    &sc->jme_cdata.jme_rx_ring_tag);
1059 	if (error != 0) {
1060 		device_printf(sc->jme_dev,
1061 		    "could not allocate Rx ring DMA tag.\n");
1062 		goto fail;
1063 	}
1064 
1065 	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
1066 	error = bus_dmamem_alloc(sc->jme_cdata.jme_tx_ring_tag,
1067 	    (void **)&sc->jme_rdata.jme_tx_ring,
1068 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1069 	    &sc->jme_cdata.jme_tx_ring_map);
1070 	if (error != 0) {
1071 		device_printf(sc->jme_dev,
1072 		    "could not allocate DMA'able memory for Tx ring.\n");
1073 		goto fail;
1074 	}
1075 
1076 	ctx.jme_busaddr = 0;
1077 	error = bus_dmamap_load(sc->jme_cdata.jme_tx_ring_tag,
1078 	    sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring,
1079 	    JME_TX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1080 	if (error != 0 || ctx.jme_busaddr == 0) {
1081 		device_printf(sc->jme_dev,
1082 		    "could not load DMA'able memory for Tx ring.\n");
1083 		goto fail;
1084 	}
1085 	sc->jme_rdata.jme_tx_ring_paddr = ctx.jme_busaddr;
1086 
1087 	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
1088 	error = bus_dmamem_alloc(sc->jme_cdata.jme_rx_ring_tag,
1089 	    (void **)&sc->jme_rdata.jme_rx_ring,
1090 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1091 	    &sc->jme_cdata.jme_rx_ring_map);
1092 	if (error != 0) {
1093 		device_printf(sc->jme_dev,
1094 		    "could not allocate DMA'able memory for Rx ring.\n");
1095 		goto fail;
1096 	}
1097 
1098 	ctx.jme_busaddr = 0;
1099 	error = bus_dmamap_load(sc->jme_cdata.jme_rx_ring_tag,
1100 	    sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring,
1101 	    JME_RX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1102 	if (error != 0 || ctx.jme_busaddr == 0) {
1103 		device_printf(sc->jme_dev,
1104 		    "could not load DMA'able memory for Rx ring.\n");
1105 		goto fail;
1106 	}
1107 	sc->jme_rdata.jme_rx_ring_paddr = ctx.jme_busaddr;
1108 
1109 	/* Tx/Rx descriptor queue should reside within 4GB boundary. */
1110 	tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr + JME_TX_RING_SIZE;
1111 	rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr + JME_RX_RING_SIZE;
1112 	if ((JME_ADDR_HI(tx_ring_end) !=
1113 	    JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) ||
1114 	    (JME_ADDR_HI(rx_ring_end) !=
1115 	    JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) {
1116 		device_printf(sc->jme_dev, "4GB boundary crossed, "
1117 		    "switching to 32bit DMA address mode.\n");
1118 		jme_dma_free(sc);
1119 		/* Limit DMA address space to 32bit and try again. */
1120 		lowaddr = BUS_SPACE_MAXADDR_32BIT;
1121 		goto again;
1122 	}
1123 
1124 	/* Create parent buffer tag. */
1125 	error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */
1126 	    1, 0,			/* algnmnt, boundary */
1127 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1128 	    BUS_SPACE_MAXADDR,		/* highaddr */
1129 	    NULL, NULL,			/* filter, filterarg */
1130 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1131 	    0,				/* nsegments */
1132 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1133 	    0,				/* flags */
1134 	    NULL, NULL,			/* lockfunc, lockarg */
1135 	    &sc->jme_cdata.jme_buffer_tag);
1136 	if (error != 0) {
1137 		device_printf(sc->jme_dev,
1138 		    "could not create parent buffer DMA tag.\n");
1139 		goto fail;
1140 	}
1141 
1142 	/* Create shadow status block tag. */
1143 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1144 	    JME_SSB_ALIGN, 0,		/* algnmnt, boundary */
1145 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1146 	    BUS_SPACE_MAXADDR,		/* highaddr */
1147 	    NULL, NULL,			/* filter, filterarg */
1148 	    JME_SSB_SIZE,		/* maxsize */
1149 	    1,				/* nsegments */
1150 	    JME_SSB_SIZE,		/* maxsegsize */
1151 	    0,				/* flags */
1152 	    NULL, NULL,			/* lockfunc, lockarg */
1153 	    &sc->jme_cdata.jme_ssb_tag);
1154 	if (error != 0) {
1155 		device_printf(sc->jme_dev,
1156 		    "could not create shared status block DMA tag.\n");
1157 		goto fail;
1158 	}
1159 
1160 	/* Create tag for Tx buffers. */
1161 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1162 	    1, 0,			/* algnmnt, boundary */
1163 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1164 	    BUS_SPACE_MAXADDR,		/* highaddr */
1165 	    NULL, NULL,			/* filter, filterarg */
1166 	    JME_TSO_MAXSIZE,		/* maxsize */
1167 	    JME_MAXTXSEGS,		/* nsegments */
1168 	    JME_TSO_MAXSEGSIZE,		/* maxsegsize */
1169 	    0,				/* flags */
1170 	    NULL, NULL,			/* lockfunc, lockarg */
1171 	    &sc->jme_cdata.jme_tx_tag);
1172 	if (error != 0) {
1173 		device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1174 		goto fail;
1175 	}
1176 
1177 	/* Create tag for Rx buffers. */
1178 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1179 	    JME_RX_BUF_ALIGN, 0,	/* algnmnt, boundary */
1180 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1181 	    BUS_SPACE_MAXADDR,		/* highaddr */
1182 	    NULL, NULL,			/* filter, filterarg */
1183 	    MCLBYTES,			/* maxsize */
1184 	    1,				/* nsegments */
1185 	    MCLBYTES,			/* maxsegsize */
1186 	    0,				/* flags */
1187 	    NULL, NULL,			/* lockfunc, lockarg */
1188 	    &sc->jme_cdata.jme_rx_tag);
1189 	if (error != 0) {
1190 		device_printf(sc->jme_dev, "could not create Rx DMA tag.\n");
1191 		goto fail;
1192 	}
1193 
1194 	/*
1195 	 * Allocate DMA'able memory and load the DMA map for shared
1196 	 * status block.
1197 	 */
1198 	error = bus_dmamem_alloc(sc->jme_cdata.jme_ssb_tag,
1199 	    (void **)&sc->jme_rdata.jme_ssb_block,
1200 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1201 	    &sc->jme_cdata.jme_ssb_map);
1202 	if (error != 0) {
1203 		device_printf(sc->jme_dev, "could not allocate DMA'able "
1204 		    "memory for shared status block.\n");
1205 		goto fail;
1206 	}
1207 
1208 	ctx.jme_busaddr = 0;
1209 	error = bus_dmamap_load(sc->jme_cdata.jme_ssb_tag,
1210 	    sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block,
1211 	    JME_SSB_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1212 	if (error != 0 || ctx.jme_busaddr == 0) {
1213 		device_printf(sc->jme_dev, "could not load DMA'able memory "
1214 		    "for shared status block.\n");
1215 		goto fail;
1216 	}
1217 	sc->jme_rdata.jme_ssb_block_paddr = ctx.jme_busaddr;
1218 
1219 	/* Create DMA maps for Tx buffers. */
1220 	for (i = 0; i < JME_TX_RING_CNT; i++) {
1221 		txd = &sc->jme_cdata.jme_txdesc[i];
1222 		txd->tx_m = NULL;
1223 		txd->tx_dmamap = NULL;
1224 		error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 0,
1225 		    &txd->tx_dmamap);
1226 		if (error != 0) {
1227 			device_printf(sc->jme_dev,
1228 			    "could not create Tx dmamap.\n");
1229 			goto fail;
1230 		}
1231 	}
1232 	/* Create DMA maps for Rx buffers. */
1233 	if ((error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1234 	    &sc->jme_cdata.jme_rx_sparemap)) != 0) {
1235 		device_printf(sc->jme_dev,
1236 		    "could not create spare Rx dmamap.\n");
1237 		goto fail;
1238 	}
1239 	for (i = 0; i < JME_RX_RING_CNT; i++) {
1240 		rxd = &sc->jme_cdata.jme_rxdesc[i];
1241 		rxd->rx_m = NULL;
1242 		rxd->rx_dmamap = NULL;
1243 		error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1244 		    &rxd->rx_dmamap);
1245 		if (error != 0) {
1246 			device_printf(sc->jme_dev,
1247 			    "could not create Rx dmamap.\n");
1248 			goto fail;
1249 		}
1250 	}
1251 
1252 fail:
1253 	return (error);
1254 }
1255 
1256 static void
1257 jme_dma_free(struct jme_softc *sc)
1258 {
1259 	struct jme_txdesc *txd;
1260 	struct jme_rxdesc *rxd;
1261 	int i;
1262 
1263 	/* Tx ring */
1264 	if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1265 		if (sc->jme_cdata.jme_tx_ring_map)
1266 			bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1267 			    sc->jme_cdata.jme_tx_ring_map);
1268 		if (sc->jme_cdata.jme_tx_ring_map &&
1269 		    sc->jme_rdata.jme_tx_ring)
1270 			bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1271 			    sc->jme_rdata.jme_tx_ring,
1272 			    sc->jme_cdata.jme_tx_ring_map);
1273 		sc->jme_rdata.jme_tx_ring = NULL;
1274 		sc->jme_cdata.jme_tx_ring_map = NULL;
1275 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1276 		sc->jme_cdata.jme_tx_ring_tag = NULL;
1277 	}
1278 	/* Rx ring */
1279 	if (sc->jme_cdata.jme_rx_ring_tag != NULL) {
1280 		if (sc->jme_cdata.jme_rx_ring_map)
1281 			bus_dmamap_unload(sc->jme_cdata.jme_rx_ring_tag,
1282 			    sc->jme_cdata.jme_rx_ring_map);
1283 		if (sc->jme_cdata.jme_rx_ring_map &&
1284 		    sc->jme_rdata.jme_rx_ring)
1285 			bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag,
1286 			    sc->jme_rdata.jme_rx_ring,
1287 			    sc->jme_cdata.jme_rx_ring_map);
1288 		sc->jme_rdata.jme_rx_ring = NULL;
1289 		sc->jme_cdata.jme_rx_ring_map = NULL;
1290 		bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag);
1291 		sc->jme_cdata.jme_rx_ring_tag = NULL;
1292 	}
1293 	/* Tx buffers */
1294 	if (sc->jme_cdata.jme_tx_tag != NULL) {
1295 		for (i = 0; i < JME_TX_RING_CNT; i++) {
1296 			txd = &sc->jme_cdata.jme_txdesc[i];
1297 			if (txd->tx_dmamap != NULL) {
1298 				bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1299 				    txd->tx_dmamap);
1300 				txd->tx_dmamap = NULL;
1301 			}
1302 		}
1303 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1304 		sc->jme_cdata.jme_tx_tag = NULL;
1305 	}
1306 	/* Rx buffers */
1307 	if (sc->jme_cdata.jme_rx_tag != NULL) {
1308 		for (i = 0; i < JME_RX_RING_CNT; i++) {
1309 			rxd = &sc->jme_cdata.jme_rxdesc[i];
1310 			if (rxd->rx_dmamap != NULL) {
1311 				bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1312 				    rxd->rx_dmamap);
1313 				rxd->rx_dmamap = NULL;
1314 			}
1315 		}
1316 		if (sc->jme_cdata.jme_rx_sparemap != NULL) {
1317 			bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1318 			    sc->jme_cdata.jme_rx_sparemap);
1319 			sc->jme_cdata.jme_rx_sparemap = NULL;
1320 		}
1321 		bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag);
1322 		sc->jme_cdata.jme_rx_tag = NULL;
1323 	}
1324 
1325 	/* Shared status block. */
1326 	if (sc->jme_cdata.jme_ssb_tag != NULL) {
1327 		if (sc->jme_cdata.jme_ssb_map)
1328 			bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1329 			    sc->jme_cdata.jme_ssb_map);
1330 		if (sc->jme_cdata.jme_ssb_map && sc->jme_rdata.jme_ssb_block)
1331 			bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1332 			    sc->jme_rdata.jme_ssb_block,
1333 			    sc->jme_cdata.jme_ssb_map);
1334 		sc->jme_rdata.jme_ssb_block = NULL;
1335 		sc->jme_cdata.jme_ssb_map = NULL;
1336 		bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1337 		sc->jme_cdata.jme_ssb_tag = NULL;
1338 	}
1339 
1340 	if (sc->jme_cdata.jme_buffer_tag != NULL) {
1341 		bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1342 		sc->jme_cdata.jme_buffer_tag = NULL;
1343 	}
1344 	if (sc->jme_cdata.jme_ring_tag != NULL) {
1345 		bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1346 		sc->jme_cdata.jme_ring_tag = NULL;
1347 	}
1348 }
1349 
1350 /*
1351  *	Make sure the interface is stopped at reboot time.
1352  */
1353 static int
1354 jme_shutdown(device_t dev)
1355 {
1356 
1357 	return (jme_suspend(dev));
1358 }
1359 
1360 /*
1361  * Unlike other ethernet controllers, JMC250 requires
1362  * explicit resetting link speed to 10/100Mbps as gigabit
1363  * link will cunsume more power than 375mA.
1364  * Note, we reset the link speed to 10/100Mbps with
1365  * auto-negotiation but we don't know whether that operation
1366  * would succeed or not as we have no control after powering
1367  * off. If the renegotiation fail WOL may not work. Running
1368  * at 1Gbps draws more power than 375mA at 3.3V which is
1369  * specified in PCI specification and that would result in
1370  * complete shutdowning power to ethernet controller.
1371  *
1372  * TODO
1373  *  Save current negotiated media speed/duplex/flow-control
1374  *  to softc and restore the same link again after resuming.
1375  *  PHY handling such as power down/resetting to 100Mbps
1376  *  may be better handled in suspend method in phy driver.
1377  */
1378 static void
1379 jme_setlinkspeed(struct jme_softc *sc)
1380 {
1381 	struct mii_data *mii;
1382 	int aneg, i;
1383 
1384 	JME_LOCK_ASSERT(sc);
1385 
1386 	mii = device_get_softc(sc->jme_miibus);
1387 	mii_pollstat(mii);
1388 	aneg = 0;
1389 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
1390 		switch IFM_SUBTYPE(mii->mii_media_active) {
1391 		case IFM_10_T:
1392 		case IFM_100_TX:
1393 			return;
1394 		case IFM_1000_T:
1395 			aneg++;
1396 		default:
1397 			break;
1398 		}
1399 	}
1400 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1401 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1402 	    ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1403 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1404 	    BMCR_AUTOEN | BMCR_STARTNEG);
1405 	DELAY(1000);
1406 	if (aneg != 0) {
1407 		/* Poll link state until jme(4) get a 10/100 link. */
1408 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1409 			mii_pollstat(mii);
1410 			if ((mii->mii_media_status & IFM_AVALID) != 0) {
1411 				switch (IFM_SUBTYPE(mii->mii_media_active)) {
1412 				case IFM_10_T:
1413 				case IFM_100_TX:
1414 					jme_mac_config(sc);
1415 					return;
1416 				default:
1417 					break;
1418 				}
1419 			}
1420 			JME_UNLOCK(sc);
1421 			pause("jmelnk", hz);
1422 			JME_LOCK(sc);
1423 		}
1424 		if (i == MII_ANEGTICKS_GIGE)
1425 			device_printf(sc->jme_dev, "establishing link failed, "
1426 			    "WOL may not work!");
1427 	}
1428 	/*
1429 	 * No link, force MAC to have 100Mbps, full-duplex link.
1430 	 * This is the last resort and may/may not work.
1431 	 */
1432 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1433 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1434 	jme_mac_config(sc);
1435 }
1436 
1437 static void
1438 jme_setwol(struct jme_softc *sc)
1439 {
1440 	struct ifnet *ifp;
1441 	uint32_t gpr, pmcs;
1442 	uint16_t pmstat;
1443 	int pmc;
1444 
1445 	JME_LOCK_ASSERT(sc);
1446 
1447 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1448 		/* No PME capability, PHY power down. */
1449 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1450 		    MII_BMCR, BMCR_PDOWN);
1451 		return;
1452 	}
1453 
1454 	ifp = sc->jme_ifp;
1455 	gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1456 	pmcs = CSR_READ_4(sc, JME_PMCS);
1457 	pmcs &= ~PMCS_WOL_ENB_MASK;
1458 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1459 		pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1460 		/* Enable PME message. */
1461 		gpr |= GPREG0_PME_ENB;
1462 		/* For gigabit controllers, reset link speed to 10/100. */
1463 		if ((sc->jme_flags & JME_FLAG_FASTETH) == 0)
1464 			jme_setlinkspeed(sc);
1465 	}
1466 
1467 	CSR_WRITE_4(sc, JME_PMCS, pmcs);
1468 	CSR_WRITE_4(sc, JME_GPREG0, gpr);
1469 
1470 	/* Request PME. */
1471 	pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1472 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1473 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1474 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1475 	pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1476 	if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1477 		/* No WOL, PHY power down. */
1478 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1479 		    MII_BMCR, BMCR_PDOWN);
1480 	}
1481 }
1482 
1483 static int
1484 jme_suspend(device_t dev)
1485 {
1486 	struct jme_softc *sc;
1487 
1488 	sc = device_get_softc(dev);
1489 
1490 	JME_LOCK(sc);
1491 	jme_stop(sc);
1492 	jme_setwol(sc);
1493 	JME_UNLOCK(sc);
1494 
1495 	return (0);
1496 }
1497 
1498 static int
1499 jme_resume(device_t dev)
1500 {
1501 	struct jme_softc *sc;
1502 	struct ifnet *ifp;
1503 	uint16_t pmstat;
1504 	int pmc;
1505 
1506 	sc = device_get_softc(dev);
1507 
1508 	JME_LOCK(sc);
1509 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1510 		pmstat = pci_read_config(sc->jme_dev,
1511 		    pmc + PCIR_POWER_STATUS, 2);
1512 		/* Disable PME clear PME status. */
1513 		pmstat &= ~PCIM_PSTAT_PMEENABLE;
1514 		pci_write_config(sc->jme_dev,
1515 		    pmc + PCIR_POWER_STATUS, pmstat, 2);
1516 	}
1517 	ifp = sc->jme_ifp;
1518 	if ((ifp->if_flags & IFF_UP) != 0)
1519 		jme_init_locked(sc);
1520 
1521 	JME_UNLOCK(sc);
1522 
1523 	return (0);
1524 }
1525 
1526 static int
1527 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1528 {
1529 	struct jme_txdesc *txd;
1530 	struct jme_desc *desc;
1531 	struct mbuf *m;
1532 	bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1533 	int error, i, nsegs, prod;
1534 	uint32_t cflags, tso_segsz;
1535 
1536 	JME_LOCK_ASSERT(sc);
1537 
1538 	M_ASSERTPKTHDR((*m_head));
1539 
1540 	if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1541 		/*
1542 		 * Due to the adherence to NDIS specification JMC250
1543 		 * assumes upper stack computed TCP pseudo checksum
1544 		 * without including payload length. This breaks
1545 		 * checksum offload for TSO case so recompute TCP
1546 		 * pseudo checksum for JMC250. Hopefully this wouldn't
1547 		 * be much burden on modern CPUs.
1548 		 */
1549 		struct ether_header *eh;
1550 		struct ip *ip;
1551 		struct tcphdr *tcp;
1552 		uint32_t ip_off, poff;
1553 
1554 		if (M_WRITABLE(*m_head) == 0) {
1555 			/* Get a writable copy. */
1556 			m = m_dup(*m_head, M_DONTWAIT);
1557 			m_freem(*m_head);
1558 			if (m == NULL) {
1559 				*m_head = NULL;
1560 				return (ENOBUFS);
1561 			}
1562 			*m_head = m;
1563 		}
1564 		ip_off = sizeof(struct ether_header);
1565 		m = m_pullup(*m_head, ip_off);
1566 		if (m == NULL) {
1567 			*m_head = NULL;
1568 			return (ENOBUFS);
1569 		}
1570 		eh = mtod(m, struct ether_header *);
1571 		/* Check the existence of VLAN tag. */
1572 		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1573 			ip_off = sizeof(struct ether_vlan_header);
1574 			m = m_pullup(m, ip_off);
1575 			if (m == NULL) {
1576 				*m_head = NULL;
1577 				return (ENOBUFS);
1578 			}
1579 		}
1580 		m = m_pullup(m, ip_off + sizeof(struct ip));
1581 		if (m == NULL) {
1582 			*m_head = NULL;
1583 			return (ENOBUFS);
1584 		}
1585 		ip = (struct ip *)(mtod(m, char *) + ip_off);
1586 		poff = ip_off + (ip->ip_hl << 2);
1587 		m = m_pullup(m, poff + sizeof(struct tcphdr));
1588 		if (m == NULL) {
1589 			*m_head = NULL;
1590 			return (ENOBUFS);
1591 		}
1592 		tcp = (struct tcphdr *)(mtod(m, char *) + poff);
1593 		/*
1594 		 * Reset IP checksum and recompute TCP pseudo
1595 		 * checksum that NDIS specification requires.
1596 		 */
1597 		ip->ip_sum = 0;
1598 		if (poff + (tcp->th_off << 2) == m->m_pkthdr.len) {
1599 			tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1600 			    ip->ip_dst.s_addr,
1601 			    htons((tcp->th_off << 2) + IPPROTO_TCP));
1602 			/* No need to TSO, force IP checksum offload. */
1603 			(*m_head)->m_pkthdr.csum_flags &= ~CSUM_TSO;
1604 			(*m_head)->m_pkthdr.csum_flags |= CSUM_IP;
1605 		} else
1606 			tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1607 			    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1608 		*m_head = m;
1609 	}
1610 
1611 	prod = sc->jme_cdata.jme_tx_prod;
1612 	txd = &sc->jme_cdata.jme_txdesc[prod];
1613 
1614 	error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag,
1615 	    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1616 	if (error == EFBIG) {
1617 		m = m_collapse(*m_head, M_DONTWAIT, JME_MAXTXSEGS);
1618 		if (m == NULL) {
1619 			m_freem(*m_head);
1620 			*m_head = NULL;
1621 			return (ENOMEM);
1622 		}
1623 		*m_head = m;
1624 		error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag,
1625 		    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1626 		if (error != 0) {
1627 			m_freem(*m_head);
1628 			*m_head = NULL;
1629 			return (error);
1630 		}
1631 	} else if (error != 0)
1632 		return (error);
1633 	if (nsegs == 0) {
1634 		m_freem(*m_head);
1635 		*m_head = NULL;
1636 		return (EIO);
1637 	}
1638 
1639 	/*
1640 	 * Check descriptor overrun. Leave one free descriptor.
1641 	 * Since we always use 64bit address mode for transmitting,
1642 	 * each Tx request requires one more dummy descriptor.
1643 	 */
1644 	if (sc->jme_cdata.jme_tx_cnt + nsegs + 1 > JME_TX_RING_CNT - 1) {
1645 		bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1646 		return (ENOBUFS);
1647 	}
1648 
1649 	m = *m_head;
1650 	cflags = 0;
1651 	tso_segsz = 0;
1652 	/* Configure checksum offload and TSO. */
1653 	if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1654 		tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz <<
1655 		    JME_TD_MSS_SHIFT;
1656 		cflags |= JME_TD_TSO;
1657 	} else {
1658 		if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
1659 			cflags |= JME_TD_IPCSUM;
1660 		if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
1661 			cflags |= JME_TD_TCPCSUM;
1662 		if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
1663 			cflags |= JME_TD_UDPCSUM;
1664 	}
1665 	/* Configure VLAN. */
1666 	if ((m->m_flags & M_VLANTAG) != 0) {
1667 		cflags |= (m->m_pkthdr.ether_vtag & JME_TD_VLAN_MASK);
1668 		cflags |= JME_TD_VLAN_TAG;
1669 	}
1670 
1671 	desc = &sc->jme_rdata.jme_tx_ring[prod];
1672 	desc->flags = htole32(cflags);
1673 	desc->buflen = htole32(tso_segsz);
1674 	desc->addr_hi = htole32(m->m_pkthdr.len);
1675 	desc->addr_lo = 0;
1676 	sc->jme_cdata.jme_tx_cnt++;
1677 	JME_DESC_INC(prod, JME_TX_RING_CNT);
1678 	for (i = 0; i < nsegs; i++) {
1679 		desc = &sc->jme_rdata.jme_tx_ring[prod];
1680 		desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT);
1681 		desc->buflen = htole32(txsegs[i].ds_len);
1682 		desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1683 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1684 		sc->jme_cdata.jme_tx_cnt++;
1685 		JME_DESC_INC(prod, JME_TX_RING_CNT);
1686 	}
1687 
1688 	/* Update producer index. */
1689 	sc->jme_cdata.jme_tx_prod = prod;
1690 	/*
1691 	 * Finally request interrupt and give the first descriptor
1692 	 * owenership to hardware.
1693 	 */
1694 	desc = txd->tx_desc;
1695 	desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1696 
1697 	txd->tx_m = m;
1698 	txd->tx_ndesc = nsegs + 1;
1699 
1700 	/* Sync descriptors. */
1701 	bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1702 	    BUS_DMASYNC_PREWRITE);
1703 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
1704 	    sc->jme_cdata.jme_tx_ring_map,
1705 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1706 
1707 	return (0);
1708 }
1709 
1710 static void
1711 jme_tx_task(void *arg, int pending)
1712 {
1713 	struct ifnet *ifp;
1714 
1715 	ifp = (struct ifnet *)arg;
1716 	jme_start(ifp);
1717 }
1718 
1719 static void
1720 jme_start(struct ifnet *ifp)
1721 {
1722         struct jme_softc *sc;
1723         struct mbuf *m_head;
1724 	int enq;
1725 
1726 	sc = ifp->if_softc;
1727 
1728 	JME_LOCK(sc);
1729 
1730 	if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT)
1731 		jme_txeof(sc);
1732 
1733 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1734 	    IFF_DRV_RUNNING || (sc->jme_flags & JME_FLAG_LINK) == 0) {
1735 		JME_UNLOCK(sc);
1736 		return;
1737 	}
1738 
1739 	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1740 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1741 		if (m_head == NULL)
1742 			break;
1743 		/*
1744 		 * Pack the data into the transmit ring. If we
1745 		 * don't have room, set the OACTIVE flag and wait
1746 		 * for the NIC to drain the ring.
1747 		 */
1748 		if (jme_encap(sc, &m_head)) {
1749 			if (m_head == NULL)
1750 				break;
1751 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1752 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1753 			break;
1754 		}
1755 
1756 		enq++;
1757 		/*
1758 		 * If there's a BPF listener, bounce a copy of this frame
1759 		 * to him.
1760 		 */
1761 		ETHER_BPF_MTAP(ifp, m_head);
1762 	}
1763 
1764 	if (enq > 0) {
1765 		/*
1766 		 * Reading TXCSR takes very long time under heavy load
1767 		 * so cache TXCSR value and writes the ORed value with
1768 		 * the kick command to the TXCSR. This saves one register
1769 		 * access cycle.
1770 		 */
1771 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1772 		    TXCSR_TXQ_N_START(TXCSR_TXQ0));
1773 		/* Set a timeout in case the chip goes out to lunch. */
1774 		sc->jme_watchdog_timer = JME_TX_TIMEOUT;
1775 	}
1776 
1777 	JME_UNLOCK(sc);
1778 }
1779 
1780 static void
1781 jme_watchdog(struct jme_softc *sc)
1782 {
1783 	struct ifnet *ifp;
1784 
1785 	JME_LOCK_ASSERT(sc);
1786 
1787 	if (sc->jme_watchdog_timer == 0 || --sc->jme_watchdog_timer)
1788 		return;
1789 
1790 	ifp = sc->jme_ifp;
1791 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1792 		if_printf(sc->jme_ifp, "watchdog timeout (missed link)\n");
1793 		ifp->if_oerrors++;
1794 		jme_init_locked(sc);
1795 		return;
1796 	}
1797 	jme_txeof(sc);
1798 	if (sc->jme_cdata.jme_tx_cnt == 0) {
1799 		if_printf(sc->jme_ifp,
1800 		    "watchdog timeout (missed Tx interrupts) -- recovering\n");
1801 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1802 			taskqueue_enqueue(sc->jme_tq, &sc->jme_tx_task);
1803 		return;
1804 	}
1805 
1806 	if_printf(sc->jme_ifp, "watchdog timeout\n");
1807 	ifp->if_oerrors++;
1808 	jme_init_locked(sc);
1809 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1810 		taskqueue_enqueue(sc->jme_tq, &sc->jme_tx_task);
1811 }
1812 
1813 static int
1814 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1815 {
1816 	struct jme_softc *sc;
1817 	struct ifreq *ifr;
1818 	struct mii_data *mii;
1819 	uint32_t reg;
1820 	int error, mask;
1821 
1822 	sc = ifp->if_softc;
1823 	ifr = (struct ifreq *)data;
1824 	error = 0;
1825 	switch (cmd) {
1826 	case SIOCSIFMTU:
1827 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1828 		    ((sc->jme_flags & JME_FLAG_NOJUMBO) != 0 &&
1829 		    ifr->ifr_mtu > JME_MAX_MTU)) {
1830 			error = EINVAL;
1831 			break;
1832 		}
1833 
1834 		if (ifp->if_mtu != ifr->ifr_mtu) {
1835 			/*
1836 			 * No special configuration is required when interface
1837 			 * MTU is changed but availability of TSO/Tx checksum
1838 			 * offload should be chcked against new MTU size as
1839 			 * FIFO size is just 2K.
1840 			 */
1841 			JME_LOCK(sc);
1842 			if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1843 				ifp->if_capenable &=
1844 				    ~(IFCAP_TXCSUM | IFCAP_TSO4);
1845 				ifp->if_hwassist &=
1846 				    ~(JME_CSUM_FEATURES | CSUM_TSO);
1847 				VLAN_CAPABILITIES(ifp);
1848 			}
1849 			ifp->if_mtu = ifr->ifr_mtu;
1850 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1851 				jme_init_locked(sc);
1852 			JME_UNLOCK(sc);
1853 		}
1854 		break;
1855 	case SIOCSIFFLAGS:
1856 		JME_LOCK(sc);
1857 		if ((ifp->if_flags & IFF_UP) != 0) {
1858 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1859 				if (((ifp->if_flags ^ sc->jme_if_flags)
1860 				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1861 					jme_set_filter(sc);
1862 			} else {
1863 				if ((sc->jme_flags & JME_FLAG_DETACH) == 0)
1864 					jme_init_locked(sc);
1865 			}
1866 		} else {
1867 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1868 				jme_stop(sc);
1869 		}
1870 		sc->jme_if_flags = ifp->if_flags;
1871 		JME_UNLOCK(sc);
1872 		break;
1873 	case SIOCADDMULTI:
1874 	case SIOCDELMULTI:
1875 		JME_LOCK(sc);
1876 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1877 			jme_set_filter(sc);
1878 		JME_UNLOCK(sc);
1879 		break;
1880 	case SIOCSIFMEDIA:
1881 	case SIOCGIFMEDIA:
1882 		mii = device_get_softc(sc->jme_miibus);
1883 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1884 		break;
1885 	case SIOCSIFCAP:
1886 		JME_LOCK(sc);
1887 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1888 		if ((mask & IFCAP_TXCSUM) != 0 &&
1889 		    ifp->if_mtu < JME_TX_FIFO_SIZE) {
1890 			if ((IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
1891 				ifp->if_capenable ^= IFCAP_TXCSUM;
1892 				if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
1893 					ifp->if_hwassist |= JME_CSUM_FEATURES;
1894 				else
1895 					ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1896 			}
1897 		}
1898 		if ((mask & IFCAP_RXCSUM) != 0 &&
1899 		    (IFCAP_RXCSUM & ifp->if_capabilities) != 0) {
1900 			ifp->if_capenable ^= IFCAP_RXCSUM;
1901 			reg = CSR_READ_4(sc, JME_RXMAC);
1902 			reg &= ~RXMAC_CSUM_ENB;
1903 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1904 				reg |= RXMAC_CSUM_ENB;
1905 			CSR_WRITE_4(sc, JME_RXMAC, reg);
1906 		}
1907 		if ((mask & IFCAP_TSO4) != 0 &&
1908 		    ifp->if_mtu < JME_TX_FIFO_SIZE) {
1909 			if ((IFCAP_TSO4 & ifp->if_capabilities) != 0) {
1910 				ifp->if_capenable ^= IFCAP_TSO4;
1911 				if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
1912 					ifp->if_hwassist |= CSUM_TSO;
1913 				else
1914 					ifp->if_hwassist &= ~CSUM_TSO;
1915 			}
1916 		}
1917 		if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1918 		    (IFCAP_WOL_MAGIC & ifp->if_capabilities) != 0)
1919 			ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1920 		if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
1921 		    (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
1922 			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1923 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1924 		    (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
1925 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1926 			jme_set_vlan(sc);
1927 		}
1928 		JME_UNLOCK(sc);
1929 		VLAN_CAPABILITIES(ifp);
1930 		break;
1931 	default:
1932 		error = ether_ioctl(ifp, cmd, data);
1933 		break;
1934 	}
1935 
1936 	return (error);
1937 }
1938 
1939 static void
1940 jme_mac_config(struct jme_softc *sc)
1941 {
1942 	struct mii_data *mii;
1943 	uint32_t ghc, rxmac, txmac, txpause;
1944 
1945 	JME_LOCK_ASSERT(sc);
1946 
1947 	mii = device_get_softc(sc->jme_miibus);
1948 
1949 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1950 	DELAY(10);
1951 	CSR_WRITE_4(sc, JME_GHC, 0);
1952 	ghc = 0;
1953 	rxmac = CSR_READ_4(sc, JME_RXMAC);
1954 	rxmac &= ~RXMAC_FC_ENB;
1955 	txmac = CSR_READ_4(sc, JME_TXMAC);
1956 	txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1957 	txpause = CSR_READ_4(sc, JME_TXPFC);
1958 	txpause &= ~TXPFC_PAUSE_ENB;
1959 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1960 		ghc |= GHC_FULL_DUPLEX;
1961 		rxmac &= ~RXMAC_COLL_DET_ENB;
1962 		txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1963 		    TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1964 		    TXMAC_FRAME_BURST);
1965 #ifdef notyet
1966 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1967 			txpause |= TXPFC_PAUSE_ENB;
1968 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1969 			rxmac |= RXMAC_FC_ENB;
1970 #endif
1971 		/* Disable retry transmit timer/retry limit. */
1972 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1973 		    ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1974 	} else {
1975 		rxmac |= RXMAC_COLL_DET_ENB;
1976 		txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1977 		/* Enable retry transmit timer/retry limit. */
1978 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1979 		    TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1980 	}
1981 		/* Reprogram Tx/Rx MACs with resolved speed/duplex. */
1982 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1983 	case IFM_10_T:
1984 		ghc |= GHC_SPEED_10;
1985 		break;
1986 	case IFM_100_TX:
1987 		ghc |= GHC_SPEED_100;
1988 		break;
1989 	case IFM_1000_T:
1990 		if ((sc->jme_flags & JME_FLAG_FASTETH) != 0)
1991 			break;
1992 		ghc |= GHC_SPEED_1000;
1993 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1994 			txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1995 		break;
1996 	default:
1997 		break;
1998 	}
1999 	CSR_WRITE_4(sc, JME_GHC, ghc);
2000 	CSR_WRITE_4(sc, JME_RXMAC, rxmac);
2001 	CSR_WRITE_4(sc, JME_TXMAC, txmac);
2002 	CSR_WRITE_4(sc, JME_TXPFC, txpause);
2003 }
2004 
2005 static void
2006 jme_link_task(void *arg, int pending)
2007 {
2008 	struct jme_softc *sc;
2009 	struct mii_data *mii;
2010 	struct ifnet *ifp;
2011 	struct jme_txdesc *txd;
2012 	bus_addr_t paddr;
2013 	int i;
2014 
2015 	sc = (struct jme_softc *)arg;
2016 
2017 	JME_LOCK(sc);
2018 	mii = device_get_softc(sc->jme_miibus);
2019 	ifp = sc->jme_ifp;
2020 	if (mii == NULL || ifp == NULL ||
2021 	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2022 		JME_UNLOCK(sc);
2023 		return;
2024 	}
2025 
2026 	sc->jme_flags &= ~JME_FLAG_LINK;
2027 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
2028 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
2029 		case IFM_10_T:
2030 		case IFM_100_TX:
2031 			sc->jme_flags |= JME_FLAG_LINK;
2032 			break;
2033 		case IFM_1000_T:
2034 			if ((sc->jme_flags & JME_FLAG_FASTETH) != 0)
2035 				break;
2036 			sc->jme_flags |= JME_FLAG_LINK;
2037 			break;
2038 		default:
2039 			break;
2040 		}
2041 	}
2042 
2043 	/*
2044 	 * Disabling Rx/Tx MACs have a side-effect of resetting
2045 	 * JME_TXNDA/JME_RXNDA register to the first address of
2046 	 * Tx/Rx descriptor address. So driver should reset its
2047 	 * internal procucer/consumer pointer and reclaim any
2048 	 * allocated resources. Note, just saving the value of
2049 	 * JME_TXNDA and JME_RXNDA registers before stopping MAC
2050 	 * and restoring JME_TXNDA/JME_RXNDA register is not
2051 	 * sufficient to make sure correct MAC state because
2052 	 * stopping MAC operation can take a while and hardware
2053 	 * might have updated JME_TXNDA/JME_RXNDA registers
2054 	 * during the stop operation.
2055 	 */
2056 	/* Block execution of task. */
2057 	taskqueue_block(sc->jme_tq);
2058 	/* Disable interrupts and stop driver. */
2059 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2060 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2061 	callout_stop(&sc->jme_tick_ch);
2062 	sc->jme_watchdog_timer = 0;
2063 
2064 	/* Stop receiver/transmitter. */
2065 	jme_stop_rx(sc);
2066 	jme_stop_tx(sc);
2067 
2068 	/* XXX Drain all queued tasks. */
2069 	JME_UNLOCK(sc);
2070 	taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
2071 	taskqueue_drain(sc->jme_tq, &sc->jme_tx_task);
2072 	JME_LOCK(sc);
2073 
2074 	jme_rxintr(sc, JME_RX_RING_CNT);
2075 	if (sc->jme_cdata.jme_rxhead != NULL)
2076 		m_freem(sc->jme_cdata.jme_rxhead);
2077 	JME_RXCHAIN_RESET(sc);
2078 	jme_txeof(sc);
2079 	if (sc->jme_cdata.jme_tx_cnt != 0) {
2080 		/* Remove queued packets for transmit. */
2081 		for (i = 0; i < JME_TX_RING_CNT; i++) {
2082 			txd = &sc->jme_cdata.jme_txdesc[i];
2083 			if (txd->tx_m != NULL) {
2084 				bus_dmamap_sync(
2085 				    sc->jme_cdata.jme_tx_tag,
2086 				    txd->tx_dmamap,
2087 				    BUS_DMASYNC_POSTWRITE);
2088 				bus_dmamap_unload(
2089 				    sc->jme_cdata.jme_tx_tag,
2090 				    txd->tx_dmamap);
2091 				m_freem(txd->tx_m);
2092 				txd->tx_m = NULL;
2093 				txd->tx_ndesc = 0;
2094 				ifp->if_oerrors++;
2095 			}
2096 		}
2097 	}
2098 
2099 	/*
2100 	 * Reuse configured Rx descriptors and reset
2101 	 * procuder/consumer index.
2102 	 */
2103 	sc->jme_cdata.jme_rx_cons = 0;
2104 	atomic_set_int(&sc->jme_morework, 0);
2105 	jme_init_tx_ring(sc);
2106 	/* Initialize shadow status block. */
2107 	jme_init_ssb(sc);
2108 
2109 	/* Program MAC with resolved speed/duplex/flow-control. */
2110 	if ((sc->jme_flags & JME_FLAG_LINK) != 0) {
2111 		jme_mac_config(sc);
2112 
2113 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
2114 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2115 
2116 		/* Set Tx ring address to the hardware. */
2117 		paddr = JME_TX_RING_ADDR(sc, 0);
2118 		CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2119 		CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2120 
2121 		/* Set Rx ring address to the hardware. */
2122 		paddr = JME_RX_RING_ADDR(sc, 0);
2123 		CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2124 		CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2125 
2126 		/* Restart receiver/transmitter. */
2127 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
2128 		    RXCSR_RXQ_START);
2129 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
2130 	}
2131 
2132 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2133 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2134 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2135 	/* Unblock execution of task. */
2136 	taskqueue_unblock(sc->jme_tq);
2137 	/* Reenable interrupts. */
2138 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2139 
2140 	JME_UNLOCK(sc);
2141 }
2142 
2143 static int
2144 jme_intr(void *arg)
2145 {
2146 	struct jme_softc *sc;
2147 	uint32_t status;
2148 
2149 	sc = (struct jme_softc *)arg;
2150 
2151 	status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
2152 	if (status == 0 || status == 0xFFFFFFFF)
2153 		return (FILTER_STRAY);
2154 	/* Disable interrupts. */
2155 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2156 	taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task);
2157 
2158 	return (FILTER_HANDLED);
2159 }
2160 
2161 static void
2162 jme_int_task(void *arg, int pending)
2163 {
2164 	struct jme_softc *sc;
2165 	struct ifnet *ifp;
2166 	uint32_t status;
2167 	int more;
2168 
2169 	sc = (struct jme_softc *)arg;
2170 	ifp = sc->jme_ifp;
2171 
2172 	status = CSR_READ_4(sc, JME_INTR_STATUS);
2173 	more = atomic_readandclear_int(&sc->jme_morework);
2174 	if (more != 0) {
2175 		status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO;
2176 		more = 0;
2177 	}
2178 	if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
2179 		goto done;
2180 	/* Reset PCC counter/timer and Ack interrupts. */
2181 	status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
2182 	if ((status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) != 0)
2183 		status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
2184 	if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0)
2185 		status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP;
2186 	CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2187 	more = 0;
2188 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2189 		if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0) {
2190 			more = jme_rxintr(sc, sc->jme_process_limit);
2191 			if (more != 0)
2192 				atomic_set_int(&sc->jme_morework, 1);
2193 		}
2194 		if ((status & INTR_RXQ_DESC_EMPTY) != 0) {
2195 			/*
2196 			 * Notify hardware availability of new Rx
2197 			 * buffers.
2198 			 * Reading RXCSR takes very long time under
2199 			 * heavy load so cache RXCSR value and writes
2200 			 * the ORed value with the kick command to
2201 			 * the RXCSR. This saves one register access
2202 			 * cycle.
2203 			 */
2204 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2205 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
2206 		}
2207 		/*
2208 		 * Reclaiming Tx buffers are deferred to make jme(4) run
2209 		 * without locks held.
2210 		 */
2211 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2212 			taskqueue_enqueue(sc->jme_tq, &sc->jme_tx_task);
2213 	}
2214 
2215 	if (more != 0 || (CSR_READ_4(sc, JME_INTR_STATUS) & JME_INTRS) != 0) {
2216 		taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task);
2217 		return;
2218 	}
2219 done:
2220 	/* Reenable interrupts. */
2221 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2222 }
2223 
2224 static void
2225 jme_txeof(struct jme_softc *sc)
2226 {
2227 	struct ifnet *ifp;
2228 	struct jme_txdesc *txd;
2229 	uint32_t status;
2230 	int cons, nsegs;
2231 
2232 	JME_LOCK_ASSERT(sc);
2233 
2234 	ifp = sc->jme_ifp;
2235 
2236 	cons = sc->jme_cdata.jme_tx_cons;
2237 	if (cons == sc->jme_cdata.jme_tx_prod)
2238 		return;
2239 
2240 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2241 	    sc->jme_cdata.jme_tx_ring_map,
2242 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2243 
2244 	/*
2245 	 * Go through our Tx list and free mbufs for those
2246 	 * frames which have been transmitted.
2247 	 */
2248 	for (; cons != sc->jme_cdata.jme_tx_prod;) {
2249 		txd = &sc->jme_cdata.jme_txdesc[cons];
2250 		status = le32toh(txd->tx_desc->flags);
2251 		if ((status & JME_TD_OWN) == JME_TD_OWN)
2252 			break;
2253 
2254 		if ((status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) != 0)
2255 			ifp->if_oerrors++;
2256 		else {
2257 			ifp->if_opackets++;
2258 			if ((status & JME_TD_COLLISION) != 0)
2259 				ifp->if_collisions +=
2260 				    le32toh(txd->tx_desc->buflen) &
2261 				    JME_TD_BUF_LEN_MASK;
2262 		}
2263 		/*
2264 		 * Only the first descriptor of multi-descriptor
2265 		 * transmission is updated so driver have to skip entire
2266 		 * chained buffers for the transmiited frame. In other
2267 		 * words, JME_TD_OWN bit is valid only at the first
2268 		 * descriptor of a multi-descriptor transmission.
2269 		 */
2270 		for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2271 			sc->jme_rdata.jme_tx_ring[cons].flags = 0;
2272 			JME_DESC_INC(cons, JME_TX_RING_CNT);
2273 		}
2274 
2275 		/* Reclaim transferred mbufs. */
2276 		bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
2277 		    BUS_DMASYNC_POSTWRITE);
2278 		bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2279 
2280 		KASSERT(txd->tx_m != NULL,
2281 		    ("%s: freeing NULL mbuf!\n", __func__));
2282 		m_freem(txd->tx_m);
2283 		txd->tx_m = NULL;
2284 		sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2285 		KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2286 		    ("%s: Active Tx desc counter was garbled\n", __func__));
2287 		txd->tx_ndesc = 0;
2288 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2289 	}
2290 	sc->jme_cdata.jme_tx_cons = cons;
2291 	/* Unarm watchog timer when there is no pending descriptors in queue. */
2292 	if (sc->jme_cdata.jme_tx_cnt == 0)
2293 		sc->jme_watchdog_timer = 0;
2294 
2295 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2296 	    sc->jme_cdata.jme_tx_ring_map,
2297 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2298 }
2299 
2300 static __inline void
2301 jme_discard_rxbuf(struct jme_softc *sc, int cons)
2302 {
2303 	struct jme_desc *desc;
2304 
2305 	desc = &sc->jme_rdata.jme_rx_ring[cons];
2306 	desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2307 	desc->buflen = htole32(MCLBYTES);
2308 }
2309 
2310 /* Receive a frame. */
2311 static void
2312 jme_rxeof(struct jme_softc *sc)
2313 {
2314 	struct ifnet *ifp;
2315 	struct jme_desc *desc;
2316 	struct jme_rxdesc *rxd;
2317 	struct mbuf *mp, *m;
2318 	uint32_t flags, status;
2319 	int cons, count, nsegs;
2320 
2321 	ifp = sc->jme_ifp;
2322 
2323 	cons = sc->jme_cdata.jme_rx_cons;
2324 	desc = &sc->jme_rdata.jme_rx_ring[cons];
2325 	flags = le32toh(desc->flags);
2326 	status = le32toh(desc->buflen);
2327 	nsegs = JME_RX_NSEGS(status);
2328 	sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2329 	if ((status & JME_RX_ERR_STAT) != 0) {
2330 		ifp->if_ierrors++;
2331 		jme_discard_rxbuf(sc, sc->jme_cdata.jme_rx_cons);
2332 #ifdef JME_SHOW_ERRORS
2333 		device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2334 		    __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2335 #endif
2336 		sc->jme_cdata.jme_rx_cons += nsegs;
2337 		sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
2338 		return;
2339 	}
2340 
2341 	for (count = 0; count < nsegs; count++,
2342 	    JME_DESC_INC(cons, JME_RX_RING_CNT)) {
2343 		rxd = &sc->jme_cdata.jme_rxdesc[cons];
2344 		mp = rxd->rx_m;
2345 		/* Add a new receive buffer to the ring. */
2346 		if (jme_newbuf(sc, rxd) != 0) {
2347 			ifp->if_iqdrops++;
2348 			/* Reuse buffer. */
2349 			for (; count < nsegs; count++) {
2350 				jme_discard_rxbuf(sc, cons);
2351 				JME_DESC_INC(cons, JME_RX_RING_CNT);
2352 			}
2353 			if (sc->jme_cdata.jme_rxhead != NULL) {
2354 				m_freem(sc->jme_cdata.jme_rxhead);
2355 				JME_RXCHAIN_RESET(sc);
2356 			}
2357 			break;
2358 		}
2359 
2360 		/*
2361 		 * Assume we've received a full sized frame.
2362 		 * Actual size is fixed when we encounter the end of
2363 		 * multi-segmented frame.
2364 		 */
2365 		mp->m_len = MCLBYTES;
2366 
2367 		/* Chain received mbufs. */
2368 		if (sc->jme_cdata.jme_rxhead == NULL) {
2369 			sc->jme_cdata.jme_rxhead = mp;
2370 			sc->jme_cdata.jme_rxtail = mp;
2371 		} else {
2372 			/*
2373 			 * Receive processor can receive a maximum frame
2374 			 * size of 65535 bytes.
2375 			 */
2376 			mp->m_flags &= ~M_PKTHDR;
2377 			sc->jme_cdata.jme_rxtail->m_next = mp;
2378 			sc->jme_cdata.jme_rxtail = mp;
2379 		}
2380 
2381 		if (count == nsegs - 1) {
2382 			/* Last desc. for this frame. */
2383 			m = sc->jme_cdata.jme_rxhead;
2384 			m->m_flags |= M_PKTHDR;
2385 			m->m_pkthdr.len = sc->jme_cdata.jme_rxlen;
2386 			if (nsegs > 1) {
2387 				/* Set first mbuf size. */
2388 				m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2389 				/* Set last mbuf size. */
2390 				mp->m_len = sc->jme_cdata.jme_rxlen -
2391 				    ((MCLBYTES - JME_RX_PAD_BYTES) +
2392 				    (MCLBYTES * (nsegs - 2)));
2393 			} else
2394 				m->m_len = sc->jme_cdata.jme_rxlen;
2395 			m->m_pkthdr.rcvif = ifp;
2396 
2397 			/*
2398 			 * Account for 10bytes auto padding which is used
2399 			 * to align IP header on 32bit boundary. Also note,
2400 			 * CRC bytes is automatically removed by the
2401 			 * hardware.
2402 			 */
2403 			m->m_data += JME_RX_PAD_BYTES;
2404 
2405 			/* Set checksum information. */
2406 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
2407 			    (flags & JME_RD_IPV4) != 0) {
2408 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2409 				if ((flags & JME_RD_IPCSUM) != 0)
2410 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2411 				if (((flags & JME_RD_MORE_FRAG) == 0) &&
2412 				    ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2413 				    (JME_RD_TCP | JME_RD_TCPCSUM) ||
2414 				    (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2415 				    (JME_RD_UDP | JME_RD_UDPCSUM))) {
2416 					m->m_pkthdr.csum_flags |=
2417 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2418 					m->m_pkthdr.csum_data = 0xffff;
2419 				}
2420 			}
2421 
2422 			/* Check for VLAN tagged packets. */
2423 			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
2424 			    (flags & JME_RD_VLAN_TAG) != 0) {
2425 				m->m_pkthdr.ether_vtag =
2426 				    flags & JME_RD_VLAN_MASK;
2427 				m->m_flags |= M_VLANTAG;
2428 			}
2429 
2430 			ifp->if_ipackets++;
2431 			/* Pass it on. */
2432 			(*ifp->if_input)(ifp, m);
2433 
2434 			/* Reset mbuf chains. */
2435 			JME_RXCHAIN_RESET(sc);
2436 		}
2437 	}
2438 
2439 	sc->jme_cdata.jme_rx_cons += nsegs;
2440 	sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
2441 }
2442 
2443 static int
2444 jme_rxintr(struct jme_softc *sc, int count)
2445 {
2446 	struct jme_desc *desc;
2447 	int nsegs, prog, pktlen;
2448 
2449 	bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2450 	    sc->jme_cdata.jme_rx_ring_map,
2451 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2452 
2453 	for (prog = 0; count > 0; prog++) {
2454 		desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons];
2455 		if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2456 			break;
2457 		if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2458 			break;
2459 		nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2460 		/*
2461 		 * Check number of segments against received bytes.
2462 		 * Non-matching value would indicate that hardware
2463 		 * is still trying to update Rx descriptors. I'm not
2464 		 * sure whether this check is needed.
2465 		 */
2466 		pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2467 		if (nsegs != ((pktlen + (MCLBYTES - 1)) / MCLBYTES))
2468 			break;
2469 		prog++;
2470 		/* Received a frame. */
2471 		jme_rxeof(sc);
2472 		count -= nsegs;
2473 	}
2474 
2475 	if (prog > 0)
2476 		bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2477 		    sc->jme_cdata.jme_rx_ring_map,
2478 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2479 
2480 	return (count > 0 ? 0 : EAGAIN);
2481 }
2482 
2483 static void
2484 jme_tick(void *arg)
2485 {
2486 	struct jme_softc *sc;
2487 	struct mii_data *mii;
2488 
2489 	sc = (struct jme_softc *)arg;
2490 
2491 	JME_LOCK_ASSERT(sc);
2492 
2493 	mii = device_get_softc(sc->jme_miibus);
2494 	mii_tick(mii);
2495 	/*
2496 	 * Reclaim Tx buffers that have been completed. It's not
2497 	 * needed here but it would release allocated mbuf chains
2498 	 * faster and limit the maximum delay to a hz.
2499 	 */
2500 	jme_txeof(sc);
2501 	jme_watchdog(sc);
2502 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2503 }
2504 
2505 static void
2506 jme_reset(struct jme_softc *sc)
2507 {
2508 
2509 	/* Stop receiver, transmitter. */
2510 	jme_stop_rx(sc);
2511 	jme_stop_tx(sc);
2512 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2513 	DELAY(10);
2514 	CSR_WRITE_4(sc, JME_GHC, 0);
2515 }
2516 
2517 static void
2518 jme_init(void *xsc)
2519 {
2520 	struct jme_softc *sc;
2521 
2522 	sc = (struct jme_softc *)xsc;
2523 	JME_LOCK(sc);
2524 	jme_init_locked(sc);
2525 	JME_UNLOCK(sc);
2526 }
2527 
2528 static void
2529 jme_init_locked(struct jme_softc *sc)
2530 {
2531 	struct ifnet *ifp;
2532 	struct mii_data *mii;
2533 	uint8_t eaddr[ETHER_ADDR_LEN];
2534 	bus_addr_t paddr;
2535 	uint32_t reg;
2536 	int error;
2537 
2538 	JME_LOCK_ASSERT(sc);
2539 
2540 	ifp = sc->jme_ifp;
2541 	mii = device_get_softc(sc->jme_miibus);
2542 
2543 	/*
2544 	 * Cancel any pending I/O.
2545 	 */
2546 	jme_stop(sc);
2547 
2548 	/*
2549 	 * Reset the chip to a known state.
2550 	 */
2551 	jme_reset(sc);
2552 
2553 	/* Init descriptors. */
2554 	error = jme_init_rx_ring(sc);
2555         if (error != 0) {
2556                 device_printf(sc->jme_dev,
2557                     "%s: initialization failed: no memory for Rx buffers.\n",
2558 		    __func__);
2559                 jme_stop(sc);
2560 		return;
2561         }
2562 	jme_init_tx_ring(sc);
2563 	/* Initialize shadow status block. */
2564 	jme_init_ssb(sc);
2565 
2566 	/* Reprogram the station address. */
2567 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2568 	CSR_WRITE_4(sc, JME_PAR0,
2569 	    eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2570 	CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2571 
2572 	/*
2573 	 * Configure Tx queue.
2574 	 *  Tx priority queue weight value : 0
2575 	 *  Tx FIFO threshold for processing next packet : 16QW
2576 	 *  Maximum Tx DMA length : 512
2577 	 *  Allow Tx DMA burst.
2578 	 */
2579 	sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2580 	sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2581 	sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2582 	sc->jme_txcsr |= sc->jme_tx_dma_size;
2583 	sc->jme_txcsr |= TXCSR_DMA_BURST;
2584 	CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2585 
2586 	/* Set Tx descriptor counter. */
2587 	CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT);
2588 
2589 	/* Set Tx ring address to the hardware. */
2590 	paddr = JME_TX_RING_ADDR(sc, 0);
2591 	CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2592 	CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2593 
2594 	/* Configure TxMAC parameters. */
2595 	reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2596 	reg |= TXMAC_THRESH_1_PKT;
2597 	reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2598 	CSR_WRITE_4(sc, JME_TXMAC, reg);
2599 
2600 	/*
2601 	 * Configure Rx queue.
2602 	 *  FIFO full threshold for transmitting Tx pause packet : 128T
2603 	 *  FIFO threshold for processing next packet : 128QW
2604 	 *  Rx queue 0 select
2605 	 *  Max Rx DMA length : 128
2606 	 *  Rx descriptor retry : 32
2607 	 *  Rx descriptor retry time gap : 256ns
2608 	 *  Don't receive runt/bad frame.
2609 	 */
2610 	sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2611 	/*
2612 	 * Since Rx FIFO size is 4K bytes, receiving frames larger
2613 	 * than 4K bytes will suffer from Rx FIFO overruns. So
2614 	 * decrease FIFO threshold to reduce the FIFO overruns for
2615 	 * frames larger than 4000 bytes.
2616 	 * For best performance of standard MTU sized frames use
2617 	 * maximum allowable FIFO threshold, 128QW.
2618 	 */
2619 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
2620 	    ETHER_CRC_LEN) > JME_RX_FIFO_SIZE)
2621 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2622 	else
2623 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2624 	sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0);
2625 	sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2626 	sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2627 	CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
2628 
2629 	/* Set Rx descriptor counter. */
2630 	CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT);
2631 
2632 	/* Set Rx ring address to the hardware. */
2633 	paddr = JME_RX_RING_ADDR(sc, 0);
2634 	CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2635 	CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2636 
2637 	/* Clear receive filter. */
2638 	CSR_WRITE_4(sc, JME_RXMAC, 0);
2639 	/* Set up the receive filter. */
2640 	jme_set_filter(sc);
2641 	jme_set_vlan(sc);
2642 
2643 	/*
2644 	 * Disable all WOL bits as WOL can interfere normal Rx
2645 	 * operation. Also clear WOL detection status bits.
2646 	 */
2647 	reg = CSR_READ_4(sc, JME_PMCS);
2648 	reg &= ~PMCS_WOL_ENB_MASK;
2649 	CSR_WRITE_4(sc, JME_PMCS, reg);
2650 
2651 	reg = CSR_READ_4(sc, JME_RXMAC);
2652 	/*
2653 	 * Pad 10bytes right before received frame. This will greatly
2654 	 * help Rx performance on strict-alignment architectures as
2655 	 * it does not need to copy the frame to align the payload.
2656 	 */
2657 	reg |= RXMAC_PAD_10BYTES;
2658 	if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2659 		reg |= RXMAC_CSUM_ENB;
2660 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2661 
2662 	/* Configure general purpose reg0 */
2663 	reg = CSR_READ_4(sc, JME_GPREG0);
2664 	reg &= ~GPREG0_PCC_UNIT_MASK;
2665 	/* Set PCC timer resolution to micro-seconds unit. */
2666 	reg |= GPREG0_PCC_UNIT_US;
2667 	/*
2668 	 * Disable all shadow register posting as we have to read
2669 	 * JME_INTR_STATUS register in jme_int_task. Also it seems
2670 	 * that it's hard to synchronize interrupt status between
2671 	 * hardware and software with shadow posting due to
2672 	 * requirements of bus_dmamap_sync(9).
2673 	 */
2674 	reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2675 	    GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2676 	    GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2677 	    GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2678 	/* Disable posting of DW0. */
2679 	reg &= ~GPREG0_POST_DW0_ENB;
2680 	/* Clear PME message. */
2681 	reg &= ~GPREG0_PME_ENB;
2682 	/* Set PHY address. */
2683 	reg &= ~GPREG0_PHY_ADDR_MASK;
2684 	reg |= sc->jme_phyaddr;
2685 	CSR_WRITE_4(sc, JME_GPREG0, reg);
2686 
2687 	/* Configure Tx queue 0 packet completion coalescing. */
2688 	reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
2689 	    PCCTX_COAL_TO_MASK;
2690 	reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
2691 	    PCCTX_COAL_PKT_MASK;
2692 	reg |= PCCTX_COAL_TXQ0;
2693 	CSR_WRITE_4(sc, JME_PCCTX, reg);
2694 
2695 	/* Configure Rx queue 0 packet completion coalescing. */
2696 	reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
2697 	    PCCRX_COAL_TO_MASK;
2698 	reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
2699 	    PCCRX_COAL_PKT_MASK;
2700 	CSR_WRITE_4(sc, JME_PCCRX0, reg);
2701 
2702 	/* Configure shadow status block but don't enable posting. */
2703 	paddr = sc->jme_rdata.jme_ssb_block_paddr;
2704 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2705 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2706 
2707 	/* Disable Timer 1 and Timer 2. */
2708 	CSR_WRITE_4(sc, JME_TIMER1, 0);
2709 	CSR_WRITE_4(sc, JME_TIMER2, 0);
2710 
2711 	/* Configure retry transmit period, retry limit value. */
2712 	CSR_WRITE_4(sc, JME_TXTRHD,
2713 	    ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2714 	    TXTRHD_RT_PERIOD_MASK) |
2715 	    ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2716 	    TXTRHD_RT_LIMIT_SHIFT));
2717 
2718 	/* Disable RSS. */
2719 	CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
2720 
2721 	/* Initialize the interrupt mask. */
2722 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2723 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2724 
2725 	/*
2726 	 * Enabling Tx/Rx DMA engines and Rx queue processing is
2727 	 * done after detection of valid link in jme_link_task.
2728 	 */
2729 
2730 	sc->jme_flags &= ~JME_FLAG_LINK;
2731 	/* Set the current media. */
2732 	mii_mediachg(mii);
2733 
2734 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2735 
2736 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2737 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2738 }
2739 
2740 static void
2741 jme_stop(struct jme_softc *sc)
2742 {
2743 	struct ifnet *ifp;
2744 	struct jme_txdesc *txd;
2745 	struct jme_rxdesc *rxd;
2746 	int i;
2747 
2748 	JME_LOCK_ASSERT(sc);
2749 	/*
2750 	 * Mark the interface down and cancel the watchdog timer.
2751 	 */
2752 	ifp = sc->jme_ifp;
2753 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2754 	sc->jme_flags &= ~JME_FLAG_LINK;
2755 	callout_stop(&sc->jme_tick_ch);
2756 	sc->jme_watchdog_timer = 0;
2757 
2758 	/*
2759 	 * Disable interrupts.
2760 	 */
2761 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2762 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2763 
2764 	/* Disable updating shadow status block. */
2765 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2766 	    CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2767 
2768 	/* Stop receiver, transmitter. */
2769 	jme_stop_rx(sc);
2770 	jme_stop_tx(sc);
2771 
2772 	 /* Reclaim Rx/Tx buffers that have been completed. */
2773 	jme_rxintr(sc, JME_RX_RING_CNT);
2774 	if (sc->jme_cdata.jme_rxhead != NULL)
2775 		m_freem(sc->jme_cdata.jme_rxhead);
2776 	JME_RXCHAIN_RESET(sc);
2777 	jme_txeof(sc);
2778 	/*
2779 	 * Free RX and TX mbufs still in the queues.
2780 	 */
2781 	for (i = 0; i < JME_RX_RING_CNT; i++) {
2782 		rxd = &sc->jme_cdata.jme_rxdesc[i];
2783 		if (rxd->rx_m != NULL) {
2784 			bus_dmamap_sync(sc->jme_cdata.jme_rx_tag,
2785 			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2786 			bus_dmamap_unload(sc->jme_cdata.jme_rx_tag,
2787 			    rxd->rx_dmamap);
2788 			m_freem(rxd->rx_m);
2789 			rxd->rx_m = NULL;
2790 		}
2791         }
2792 	for (i = 0; i < JME_TX_RING_CNT; i++) {
2793 		txd = &sc->jme_cdata.jme_txdesc[i];
2794 		if (txd->tx_m != NULL) {
2795 			bus_dmamap_sync(sc->jme_cdata.jme_tx_tag,
2796 			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2797 			bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2798 			    txd->tx_dmamap);
2799 			m_freem(txd->tx_m);
2800 			txd->tx_m = NULL;
2801 			txd->tx_ndesc = 0;
2802 		}
2803         }
2804 }
2805 
2806 static void
2807 jme_stop_tx(struct jme_softc *sc)
2808 {
2809 	uint32_t reg;
2810 	int i;
2811 
2812 	reg = CSR_READ_4(sc, JME_TXCSR);
2813 	if ((reg & TXCSR_TX_ENB) == 0)
2814 		return;
2815 	reg &= ~TXCSR_TX_ENB;
2816 	CSR_WRITE_4(sc, JME_TXCSR, reg);
2817 	for (i = JME_TIMEOUT; i > 0; i--) {
2818 		DELAY(1);
2819 		if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2820 			break;
2821 	}
2822 	if (i == 0)
2823 		device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2824 }
2825 
2826 static void
2827 jme_stop_rx(struct jme_softc *sc)
2828 {
2829 	uint32_t reg;
2830 	int i;
2831 
2832 	reg = CSR_READ_4(sc, JME_RXCSR);
2833 	if ((reg & RXCSR_RX_ENB) == 0)
2834 		return;
2835 	reg &= ~RXCSR_RX_ENB;
2836 	CSR_WRITE_4(sc, JME_RXCSR, reg);
2837 	for (i = JME_TIMEOUT; i > 0; i--) {
2838 		DELAY(1);
2839 		if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2840 			break;
2841 	}
2842 	if (i == 0)
2843 		device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2844 }
2845 
2846 static void
2847 jme_init_tx_ring(struct jme_softc *sc)
2848 {
2849 	struct jme_ring_data *rd;
2850 	struct jme_txdesc *txd;
2851 	int i;
2852 
2853 	sc->jme_cdata.jme_tx_prod = 0;
2854 	sc->jme_cdata.jme_tx_cons = 0;
2855 	sc->jme_cdata.jme_tx_cnt = 0;
2856 
2857 	rd = &sc->jme_rdata;
2858 	bzero(rd->jme_tx_ring, JME_TX_RING_SIZE);
2859 	for (i = 0; i < JME_TX_RING_CNT; i++) {
2860 		txd = &sc->jme_cdata.jme_txdesc[i];
2861 		txd->tx_m = NULL;
2862 		txd->tx_desc = &rd->jme_tx_ring[i];
2863 		txd->tx_ndesc = 0;
2864 	}
2865 
2866 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2867 	    sc->jme_cdata.jme_tx_ring_map,
2868 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2869 }
2870 
2871 static void
2872 jme_init_ssb(struct jme_softc *sc)
2873 {
2874 	struct jme_ring_data *rd;
2875 
2876 	rd = &sc->jme_rdata;
2877 	bzero(rd->jme_ssb_block, JME_SSB_SIZE);
2878 	bus_dmamap_sync(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map,
2879 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2880 }
2881 
2882 static int
2883 jme_init_rx_ring(struct jme_softc *sc)
2884 {
2885 	struct jme_ring_data *rd;
2886 	struct jme_rxdesc *rxd;
2887 	int i;
2888 
2889 	sc->jme_cdata.jme_rx_cons = 0;
2890 	JME_RXCHAIN_RESET(sc);
2891 	atomic_set_int(&sc->jme_morework, 0);
2892 
2893 	rd = &sc->jme_rdata;
2894 	bzero(rd->jme_rx_ring, JME_RX_RING_SIZE);
2895 	for (i = 0; i < JME_RX_RING_CNT; i++) {
2896 		rxd = &sc->jme_cdata.jme_rxdesc[i];
2897 		rxd->rx_m = NULL;
2898 		rxd->rx_desc = &rd->jme_rx_ring[i];
2899 		if (jme_newbuf(sc, rxd) != 0)
2900 			return (ENOBUFS);
2901 	}
2902 
2903 	bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2904 	    sc->jme_cdata.jme_rx_ring_map,
2905 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2906 
2907 	return (0);
2908 }
2909 
2910 static int
2911 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd)
2912 {
2913 	struct jme_desc *desc;
2914 	struct mbuf *m;
2915 	bus_dma_segment_t segs[1];
2916 	bus_dmamap_t map;
2917 	int nsegs;
2918 
2919 	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2920 	if (m == NULL)
2921 		return (ENOBUFS);
2922 	/*
2923 	 * JMC250 has 64bit boundary alignment limitation so jme(4)
2924 	 * takes advantage of 10 bytes padding feature of hardware
2925 	 * in order not to copy entire frame to align IP header on
2926 	 * 32bit boundary.
2927 	 */
2928 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2929 
2930 	if (bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_rx_tag,
2931 	    sc->jme_cdata.jme_rx_sparemap, m, segs, &nsegs, 0) != 0) {
2932 		m_freem(m);
2933 		return (ENOBUFS);
2934 	}
2935 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2936 
2937 	if (rxd->rx_m != NULL) {
2938 		bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
2939 		    BUS_DMASYNC_POSTREAD);
2940 		bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap);
2941 	}
2942 	map = rxd->rx_dmamap;
2943 	rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap;
2944 	sc->jme_cdata.jme_rx_sparemap = map;
2945 	bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
2946 	    BUS_DMASYNC_PREREAD);
2947 	rxd->rx_m = m;
2948 
2949 	desc = rxd->rx_desc;
2950 	desc->buflen = htole32(segs[0].ds_len);
2951 	desc->addr_lo = htole32(JME_ADDR_LO(segs[0].ds_addr));
2952 	desc->addr_hi = htole32(JME_ADDR_HI(segs[0].ds_addr));
2953 	desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2954 
2955 	return (0);
2956 }
2957 
2958 static void
2959 jme_set_vlan(struct jme_softc *sc)
2960 {
2961 	struct ifnet *ifp;
2962 	uint32_t reg;
2963 
2964 	JME_LOCK_ASSERT(sc);
2965 
2966 	ifp = sc->jme_ifp;
2967 	reg = CSR_READ_4(sc, JME_RXMAC);
2968 	reg &= ~RXMAC_VLAN_ENB;
2969 	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2970 		reg |= RXMAC_VLAN_ENB;
2971 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2972 }
2973 
2974 static void
2975 jme_set_filter(struct jme_softc *sc)
2976 {
2977 	struct ifnet *ifp;
2978 	struct ifmultiaddr *ifma;
2979 	uint32_t crc;
2980 	uint32_t mchash[2];
2981 	uint32_t rxcfg;
2982 
2983 	JME_LOCK_ASSERT(sc);
2984 
2985 	ifp = sc->jme_ifp;
2986 
2987 	rxcfg = CSR_READ_4(sc, JME_RXMAC);
2988 	rxcfg &= ~ (RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2989 	    RXMAC_ALLMULTI);
2990 	/* Always accept frames destined to our station address. */
2991 	rxcfg |= RXMAC_UNICAST;
2992 	if ((ifp->if_flags & IFF_BROADCAST) != 0)
2993 		rxcfg |= RXMAC_BROADCAST;
2994 	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2995 		if ((ifp->if_flags & IFF_PROMISC) != 0)
2996 			rxcfg |= RXMAC_PROMISC;
2997 		if ((ifp->if_flags & IFF_ALLMULTI) != 0)
2998 			rxcfg |= RXMAC_ALLMULTI;
2999 		CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
3000 		CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
3001 		CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3002 		return;
3003 	}
3004 
3005 	/*
3006 	 * Set up the multicast address filter by passing all multicast
3007 	 * addresses through a CRC generator, and then using the low-order
3008 	 * 6 bits as an index into the 64 bit multicast hash table.  The
3009 	 * high order bits select the register, while the rest of the bits
3010 	 * select the bit within the register.
3011 	 */
3012 	rxcfg |= RXMAC_MULTICAST;
3013 	bzero(mchash, sizeof(mchash));
3014 
3015 	IF_ADDR_LOCK(ifp);
3016 	TAILQ_FOREACH(ifma, &sc->jme_ifp->if_multiaddrs, ifma_link) {
3017 		if (ifma->ifma_addr->sa_family != AF_LINK)
3018 			continue;
3019 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
3020 		    ifma->ifma_addr), ETHER_ADDR_LEN);
3021 
3022 		/* Just want the 6 least significant bits. */
3023 		crc &= 0x3f;
3024 
3025 		/* Set the corresponding bit in the hash table. */
3026 		mchash[crc >> 5] |= 1 << (crc & 0x1f);
3027 	}
3028 	IF_ADDR_UNLOCK(ifp);
3029 
3030 	CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
3031 	CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
3032 	CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3033 }
3034 
3035 static int
3036 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3037 {
3038 	int error, value;
3039 
3040 	if (arg1 == NULL)
3041 		return (EINVAL);
3042 	value = *(int *)arg1;
3043 	error = sysctl_handle_int(oidp, &value, 0, req);
3044 	if (error || req->newptr == NULL)
3045 		return (error);
3046 	if (value < low || value > high)
3047 		return (EINVAL);
3048         *(int *)arg1 = value;
3049 
3050         return (0);
3051 }
3052 
3053 static int
3054 sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS)
3055 {
3056 	return (sysctl_int_range(oidp, arg1, arg2, req,
3057 	    PCCTX_COAL_TO_MIN, PCCTX_COAL_TO_MAX));
3058 }
3059 
3060 static int
3061 sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
3062 {
3063 	return (sysctl_int_range(oidp, arg1, arg2, req,
3064 	    PCCTX_COAL_PKT_MIN, PCCTX_COAL_PKT_MAX));
3065 }
3066 
3067 static int
3068 sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS)
3069 {
3070 	return (sysctl_int_range(oidp, arg1, arg2, req,
3071 	    PCCRX_COAL_TO_MIN, PCCRX_COAL_TO_MAX));
3072 }
3073 
3074 static int
3075 sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
3076 {
3077 	return (sysctl_int_range(oidp, arg1, arg2, req,
3078 	    PCCRX_COAL_PKT_MIN, PCCRX_COAL_PKT_MAX));
3079 }
3080 
3081 static int
3082 sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS)
3083 {
3084 	return (sysctl_int_range(oidp, arg1, arg2, req,
3085 	    JME_PROC_MIN, JME_PROC_MAX));
3086 }
3087