xref: /freebsd/sys/dev/jme/if_jme.c (revision 0f454b93f8502e85e8d2b26a383e40b5dc50cd27)
1 /*-
2  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/bus.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/rman.h>
39 #include <sys/module.h>
40 #include <sys/proc.h>
41 #include <sys/queue.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <sys/sysctl.h>
45 #include <sys/taskqueue.h>
46 
47 #include <net/bpf.h>
48 #include <net/if.h>
49 #include <net/if_arp.h>
50 #include <net/ethernet.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/if_types.h>
54 #include <net/if_vlan_var.h>
55 
56 #include <netinet/in.h>
57 #include <netinet/in_systm.h>
58 #include <netinet/ip.h>
59 #include <netinet/tcp.h>
60 
61 #include <dev/mii/mii.h>
62 #include <dev/mii/miivar.h>
63 
64 #include <dev/pci/pcireg.h>
65 #include <dev/pci/pcivar.h>
66 
67 #include <machine/atomic.h>
68 #include <machine/bus.h>
69 #include <machine/in_cksum.h>
70 
71 #include <dev/jme/if_jmereg.h>
72 #include <dev/jme/if_jmevar.h>
73 
74 /* "device miibus" required.  See GENERIC if you get errors here. */
75 #include "miibus_if.h"
76 
77 /* Define the following to disable printing Rx errors. */
78 #undef	JME_SHOW_ERRORS
79 
80 #define	JME_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
81 
82 MODULE_DEPEND(jme, pci, 1, 1, 1);
83 MODULE_DEPEND(jme, ether, 1, 1, 1);
84 MODULE_DEPEND(jme, miibus, 1, 1, 1);
85 
86 /* Tunables. */
87 static int msi_disable = 0;
88 static int msix_disable = 0;
89 TUNABLE_INT("hw.jme.msi_disable", &msi_disable);
90 TUNABLE_INT("hw.jme.msix_disable", &msix_disable);
91 
92 /*
93  * Devices supported by this driver.
94  */
95 static struct jme_dev {
96 	uint16_t	jme_vendorid;
97 	uint16_t	jme_deviceid;
98 	const char	*jme_name;
99 } jme_devs[] = {
100 	{ VENDORID_JMICRON, DEVICEID_JMC250,
101 	    "JMicron Inc, JMC250 Gigabit Ethernet" },
102 	{ VENDORID_JMICRON, DEVICEID_JMC260,
103 	    "JMicron Inc, JMC260 Fast Ethernet" },
104 };
105 
106 static int jme_miibus_readreg(device_t, int, int);
107 static int jme_miibus_writereg(device_t, int, int, int);
108 static void jme_miibus_statchg(device_t);
109 static void jme_mediastatus(struct ifnet *, struct ifmediareq *);
110 static int jme_mediachange(struct ifnet *);
111 static int jme_probe(device_t);
112 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
113 static int jme_eeprom_macaddr(struct jme_softc *);
114 static void jme_reg_macaddr(struct jme_softc *);
115 static void jme_map_intr_vector(struct jme_softc *);
116 static int jme_attach(device_t);
117 static int jme_detach(device_t);
118 static void jme_sysctl_node(struct jme_softc *);
119 static void jme_dmamap_cb(void *, bus_dma_segment_t *, int, int);
120 static int jme_dma_alloc(struct jme_softc *);
121 static void jme_dma_free(struct jme_softc *);
122 static int jme_shutdown(device_t);
123 static void jme_setlinkspeed(struct jme_softc *);
124 static void jme_setwol(struct jme_softc *);
125 static int jme_suspend(device_t);
126 static int jme_resume(device_t);
127 static int jme_encap(struct jme_softc *, struct mbuf **);
128 static void jme_tx_task(void *, int);
129 static void jme_start(struct ifnet *);
130 static void jme_watchdog(struct jme_softc *);
131 static int jme_ioctl(struct ifnet *, u_long, caddr_t);
132 static void jme_mac_config(struct jme_softc *);
133 static void jme_link_task(void *, int);
134 static int jme_intr(void *);
135 static void jme_int_task(void *, int);
136 static void jme_txeof(struct jme_softc *);
137 static __inline void jme_discard_rxbuf(struct jme_softc *, int);
138 static void jme_rxeof(struct jme_softc *);
139 static int jme_rxintr(struct jme_softc *, int);
140 static void jme_tick(void *);
141 static void jme_reset(struct jme_softc *);
142 static void jme_init(void *);
143 static void jme_init_locked(struct jme_softc *);
144 static void jme_stop(struct jme_softc *);
145 static void jme_stop_tx(struct jme_softc *);
146 static void jme_stop_rx(struct jme_softc *);
147 static int jme_init_rx_ring(struct jme_softc *);
148 static void jme_init_tx_ring(struct jme_softc *);
149 static void jme_init_ssb(struct jme_softc *);
150 static int jme_newbuf(struct jme_softc *, struct jme_rxdesc *);
151 static void jme_set_vlan(struct jme_softc *);
152 static void jme_set_filter(struct jme_softc *);
153 static void jme_stats_clear(struct jme_softc *);
154 static void jme_stats_save(struct jme_softc *);
155 static void jme_stats_update(struct jme_softc *);
156 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
157 static int sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS);
158 static int sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
159 static int sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS);
160 static int sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
161 static int sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS);
162 
163 
164 static device_method_t jme_methods[] = {
165 	/* Device interface. */
166 	DEVMETHOD(device_probe,		jme_probe),
167 	DEVMETHOD(device_attach,	jme_attach),
168 	DEVMETHOD(device_detach,	jme_detach),
169 	DEVMETHOD(device_shutdown,	jme_shutdown),
170 	DEVMETHOD(device_suspend,	jme_suspend),
171 	DEVMETHOD(device_resume,	jme_resume),
172 
173 	/* MII interface. */
174 	DEVMETHOD(miibus_readreg,	jme_miibus_readreg),
175 	DEVMETHOD(miibus_writereg,	jme_miibus_writereg),
176 	DEVMETHOD(miibus_statchg,	jme_miibus_statchg),
177 
178 	{ NULL, NULL }
179 };
180 
181 static driver_t jme_driver = {
182 	"jme",
183 	jme_methods,
184 	sizeof(struct jme_softc)
185 };
186 
187 static devclass_t jme_devclass;
188 
189 DRIVER_MODULE(jme, pci, jme_driver, jme_devclass, 0, 0);
190 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, 0, 0);
191 
192 static struct resource_spec jme_res_spec_mem[] = {
193 	{ SYS_RES_MEMORY,	PCIR_BAR(0),	RF_ACTIVE },
194 	{ -1,			0,		0 }
195 };
196 
197 static struct resource_spec jme_irq_spec_legacy[] = {
198 	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
199 	{ -1,			0,		0 }
200 };
201 
202 static struct resource_spec jme_irq_spec_msi[] = {
203 	{ SYS_RES_IRQ,		1,		RF_ACTIVE },
204 	{ SYS_RES_IRQ,		2,		RF_ACTIVE },
205 	{ SYS_RES_IRQ,		3,		RF_ACTIVE },
206 	{ SYS_RES_IRQ,		4,		RF_ACTIVE },
207 	{ SYS_RES_IRQ,		5,		RF_ACTIVE },
208 	{ SYS_RES_IRQ,		6,		RF_ACTIVE },
209 	{ SYS_RES_IRQ,		7,		RF_ACTIVE },
210 	{ SYS_RES_IRQ,		8,		RF_ACTIVE },
211 	{ -1,			0,		0 }
212 };
213 
214 /*
215  *	Read a PHY register on the MII of the JMC250.
216  */
217 static int
218 jme_miibus_readreg(device_t dev, int phy, int reg)
219 {
220 	struct jme_softc *sc;
221 	uint32_t val;
222 	int i;
223 
224 	sc = device_get_softc(dev);
225 
226 	/* For FPGA version, PHY address 0 should be ignored. */
227 	if ((sc->jme_flags & JME_FLAG_FPGA) != 0) {
228 		if (phy == 0)
229 			return (0);
230 	} else {
231 		if (sc->jme_phyaddr != phy)
232 			return (0);
233 	}
234 
235 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
236 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
237 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
238 		DELAY(1);
239 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
240 			break;
241 	}
242 
243 	if (i == 0) {
244 		device_printf(sc->jme_dev, "phy read timeout : %d\n", reg);
245 		return (0);
246 	}
247 
248 	return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
249 }
250 
251 /*
252  *	Write a PHY register on the MII of the JMC250.
253  */
254 static int
255 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
256 {
257 	struct jme_softc *sc;
258 	int i;
259 
260 	sc = device_get_softc(dev);
261 
262 	/* For FPGA version, PHY address 0 should be ignored. */
263 	if ((sc->jme_flags & JME_FLAG_FPGA) != 0) {
264 		if (phy == 0)
265 			return (0);
266 	} else {
267 		if (sc->jme_phyaddr != phy)
268 			return (0);
269 	}
270 
271 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
272 	    ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
273 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
274 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
275 		DELAY(1);
276 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
277 			break;
278 	}
279 
280 	if (i == 0)
281 		device_printf(sc->jme_dev, "phy write timeout : %d\n", reg);
282 
283 	return (0);
284 }
285 
286 /*
287  *	Callback from MII layer when media changes.
288  */
289 static void
290 jme_miibus_statchg(device_t dev)
291 {
292 	struct jme_softc *sc;
293 
294 	sc = device_get_softc(dev);
295 	taskqueue_enqueue(taskqueue_swi, &sc->jme_link_task);
296 }
297 
298 /*
299  *	Get the current interface media status.
300  */
301 static void
302 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
303 {
304 	struct jme_softc *sc;
305 	struct mii_data *mii;
306 
307 	sc = ifp->if_softc;
308 	JME_LOCK(sc);
309 	if ((ifp->if_flags & IFF_UP) == 0) {
310 		JME_UNLOCK(sc);
311 		return;
312 	}
313 	mii = device_get_softc(sc->jme_miibus);
314 
315 	mii_pollstat(mii);
316 	ifmr->ifm_status = mii->mii_media_status;
317 	ifmr->ifm_active = mii->mii_media_active;
318 	JME_UNLOCK(sc);
319 }
320 
321 /*
322  *	Set hardware to newly-selected media.
323  */
324 static int
325 jme_mediachange(struct ifnet *ifp)
326 {
327 	struct jme_softc *sc;
328 	struct mii_data *mii;
329 	struct mii_softc *miisc;
330 	int error;
331 
332 	sc = ifp->if_softc;
333 	JME_LOCK(sc);
334 	mii = device_get_softc(sc->jme_miibus);
335 	if (mii->mii_instance != 0) {
336 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
337 			mii_phy_reset(miisc);
338 	}
339 	error = mii_mediachg(mii);
340 	JME_UNLOCK(sc);
341 
342 	return (error);
343 }
344 
345 static int
346 jme_probe(device_t dev)
347 {
348 	struct jme_dev *sp;
349 	int i;
350 	uint16_t vendor, devid;
351 
352 	vendor = pci_get_vendor(dev);
353 	devid = pci_get_device(dev);
354 	sp = jme_devs;
355 	for (i = 0; i < sizeof(jme_devs) / sizeof(jme_devs[0]);
356 	    i++, sp++) {
357 		if (vendor == sp->jme_vendorid &&
358 		    devid == sp->jme_deviceid) {
359 			device_set_desc(dev, sp->jme_name);
360 			return (BUS_PROBE_DEFAULT);
361 		}
362 	}
363 
364 	return (ENXIO);
365 }
366 
367 static int
368 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
369 {
370 	uint32_t reg;
371 	int i;
372 
373 	*val = 0;
374 	for (i = JME_TIMEOUT; i > 0; i--) {
375 		reg = CSR_READ_4(sc, JME_SMBCSR);
376 		if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
377 			break;
378 		DELAY(1);
379 	}
380 
381 	if (i == 0) {
382 		device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
383 		return (ETIMEDOUT);
384 	}
385 
386 	reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
387 	CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
388 	for (i = JME_TIMEOUT; i > 0; i--) {
389 		DELAY(1);
390 		reg = CSR_READ_4(sc, JME_SMBINTF);
391 		if ((reg & SMBINTF_CMD_TRIGGER) == 0)
392 			break;
393 	}
394 
395 	if (i == 0) {
396 		device_printf(sc->jme_dev, "EEPROM read timeout!\n");
397 		return (ETIMEDOUT);
398 	}
399 
400 	reg = CSR_READ_4(sc, JME_SMBINTF);
401 	*val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
402 
403 	return (0);
404 }
405 
406 static int
407 jme_eeprom_macaddr(struct jme_softc *sc)
408 {
409 	uint8_t eaddr[ETHER_ADDR_LEN];
410 	uint8_t fup, reg, val;
411 	uint32_t offset;
412 	int match;
413 
414 	offset = 0;
415 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
416 	    fup != JME_EEPROM_SIG0)
417 		return (ENOENT);
418 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
419 	    fup != JME_EEPROM_SIG1)
420 		return (ENOENT);
421 	match = 0;
422 	do {
423 		if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
424 			break;
425 		if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
426 		    (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
427 			if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
428 				break;
429 			if (reg >= JME_PAR0 &&
430 			    reg < JME_PAR0 + ETHER_ADDR_LEN) {
431 				if (jme_eeprom_read_byte(sc, offset + 2,
432 				    &val) != 0)
433 					break;
434 				eaddr[reg - JME_PAR0] = val;
435 				match++;
436 			}
437 		}
438 		/* Check for the end of EEPROM descriptor. */
439 		if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
440 			break;
441 		/* Try next eeprom descriptor. */
442 		offset += JME_EEPROM_DESC_BYTES;
443 	} while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
444 
445 	if (match == ETHER_ADDR_LEN) {
446 		bcopy(eaddr, sc->jme_eaddr, ETHER_ADDR_LEN);
447 		return (0);
448 	}
449 
450 	return (ENOENT);
451 }
452 
453 static void
454 jme_reg_macaddr(struct jme_softc *sc)
455 {
456 	uint32_t par0, par1;
457 
458 	/* Read station address. */
459 	par0 = CSR_READ_4(sc, JME_PAR0);
460 	par1 = CSR_READ_4(sc, JME_PAR1);
461 	par1 &= 0xFFFF;
462 	if ((par0 == 0 && par1 == 0) ||
463 	    (par0 == 0xFFFFFFFF && par1 == 0xFFFF)) {
464 		device_printf(sc->jme_dev,
465 		    "generating fake ethernet address.\n");
466 		par0 = arc4random();
467 		/* Set OUI to JMicron. */
468 		sc->jme_eaddr[0] = 0x02;	/* U/L bit set. */
469 		sc->jme_eaddr[1] = 0x1B;
470 		sc->jme_eaddr[2] = 0x8C;
471 		sc->jme_eaddr[3] = (par0 >> 16) & 0xff;
472 		sc->jme_eaddr[4] = (par0 >> 8) & 0xff;
473 		sc->jme_eaddr[5] = par0 & 0xff;
474 	} else {
475 		sc->jme_eaddr[0] = (par0 >> 0) & 0xFF;
476 		sc->jme_eaddr[1] = (par0 >> 8) & 0xFF;
477 		sc->jme_eaddr[2] = (par0 >> 16) & 0xFF;
478 		sc->jme_eaddr[3] = (par0 >> 24) & 0xFF;
479 		sc->jme_eaddr[4] = (par1 >> 0) & 0xFF;
480 		sc->jme_eaddr[5] = (par1 >> 8) & 0xFF;
481 	}
482 }
483 
484 static void
485 jme_map_intr_vector(struct jme_softc *sc)
486 {
487 	uint32_t map[MSINUM_NUM_INTR_SOURCE / JME_MSI_MESSAGES];
488 
489 	bzero(map, sizeof(map));
490 
491 	/* Map Tx interrupts source to MSI/MSIX vector 2. */
492 	map[MSINUM_REG_INDEX(N_INTR_TXQ0_COMP)] =
493 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ0_COMP);
494 	map[MSINUM_REG_INDEX(N_INTR_TXQ1_COMP)] |=
495 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ1_COMP);
496 	map[MSINUM_REG_INDEX(N_INTR_TXQ2_COMP)] |=
497 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ2_COMP);
498 	map[MSINUM_REG_INDEX(N_INTR_TXQ3_COMP)] |=
499 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ3_COMP);
500 	map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
501 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ4_COMP);
502 	map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
503 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ5_COMP);
504 	map[MSINUM_REG_INDEX(N_INTR_TXQ6_COMP)] |=
505 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ6_COMP);
506 	map[MSINUM_REG_INDEX(N_INTR_TXQ7_COMP)] |=
507 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ7_COMP);
508 	map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL)] |=
509 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL);
510 	map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL_TO)] |=
511 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL_TO);
512 
513 	/* Map Rx interrupts source to MSI/MSIX vector 1. */
514 	map[MSINUM_REG_INDEX(N_INTR_RXQ0_COMP)] =
515 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COMP);
516 	map[MSINUM_REG_INDEX(N_INTR_RXQ1_COMP)] =
517 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COMP);
518 	map[MSINUM_REG_INDEX(N_INTR_RXQ2_COMP)] =
519 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COMP);
520 	map[MSINUM_REG_INDEX(N_INTR_RXQ3_COMP)] =
521 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COMP);
522 	map[MSINUM_REG_INDEX(N_INTR_RXQ0_DESC_EMPTY)] =
523 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_DESC_EMPTY);
524 	map[MSINUM_REG_INDEX(N_INTR_RXQ1_DESC_EMPTY)] =
525 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_DESC_EMPTY);
526 	map[MSINUM_REG_INDEX(N_INTR_RXQ2_DESC_EMPTY)] =
527 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_DESC_EMPTY);
528 	map[MSINUM_REG_INDEX(N_INTR_RXQ3_DESC_EMPTY)] =
529 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_DESC_EMPTY);
530 	map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL)] =
531 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL);
532 	map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL)] =
533 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL);
534 	map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL)] =
535 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL);
536 	map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL)] =
537 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL);
538 	map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL_TO)] =
539 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL_TO);
540 	map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL_TO)] =
541 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL_TO);
542 	map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL_TO)] =
543 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL_TO);
544 	map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL_TO)] =
545 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL_TO);
546 
547 	/* Map all other interrupts source to MSI/MSIX vector 0. */
548 	CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 0, map[0]);
549 	CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 1, map[1]);
550 	CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 2, map[2]);
551 	CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 3, map[3]);
552 }
553 
554 static int
555 jme_attach(device_t dev)
556 {
557 	struct jme_softc *sc;
558 	struct ifnet *ifp;
559 	struct mii_softc *miisc;
560 	struct mii_data *mii;
561 	uint32_t reg;
562 	uint16_t burst;
563 	int error, i, msic, msixc, pmc;
564 
565 	error = 0;
566 	sc = device_get_softc(dev);
567 	sc->jme_dev = dev;
568 
569 	mtx_init(&sc->jme_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
570 	    MTX_DEF);
571 	callout_init_mtx(&sc->jme_tick_ch, &sc->jme_mtx, 0);
572 	TASK_INIT(&sc->jme_int_task, 0, jme_int_task, sc);
573 	TASK_INIT(&sc->jme_link_task, 0, jme_link_task, sc);
574 
575 	/*
576 	 * Map the device. JMC250 supports both memory mapped and I/O
577 	 * register space access. Because I/O register access should
578 	 * use different BARs to access registers it's waste of time
579 	 * to use I/O register spce access. JMC250 uses 16K to map
580 	 * entire memory space.
581 	 */
582 	pci_enable_busmaster(dev);
583 	sc->jme_res_spec = jme_res_spec_mem;
584 	sc->jme_irq_spec = jme_irq_spec_legacy;
585 	error = bus_alloc_resources(dev, sc->jme_res_spec, sc->jme_res);
586 	if (error != 0) {
587 		device_printf(dev, "cannot allocate memory resources.\n");
588 		goto fail;
589 	}
590 
591 	/* Allocate IRQ resources. */
592 	msixc = pci_msix_count(dev);
593 	msic = pci_msi_count(dev);
594 	if (bootverbose) {
595 		device_printf(dev, "MSIX count : %d\n", msixc);
596 		device_printf(dev, "MSI count : %d\n", msic);
597 	}
598 
599 	/* Prefer MSIX over MSI. */
600 	if (msix_disable == 0 || msi_disable == 0) {
601 		if (msix_disable == 0 && msixc == JME_MSIX_MESSAGES &&
602 		    pci_alloc_msix(dev, &msixc) == 0) {
603 			if (msic == JME_MSIX_MESSAGES) {
604 				device_printf(dev, "Using %d MSIX messages.\n",
605 				    msixc);
606 				sc->jme_flags |= JME_FLAG_MSIX;
607 				sc->jme_irq_spec = jme_irq_spec_msi;
608 			} else
609 				pci_release_msi(dev);
610 		}
611 		if (msi_disable == 0 && (sc->jme_flags & JME_FLAG_MSIX) == 0 &&
612 		    msic == JME_MSI_MESSAGES &&
613 		    pci_alloc_msi(dev, &msic) == 0) {
614 			if (msic == JME_MSI_MESSAGES) {
615 				device_printf(dev, "Using %d MSI messages.\n",
616 				    msic);
617 				sc->jme_flags |= JME_FLAG_MSI;
618 				sc->jme_irq_spec = jme_irq_spec_msi;
619 			} else
620 				pci_release_msi(dev);
621 		}
622 		/* Map interrupt vector 0, 1 and 2. */
623 		if ((sc->jme_flags & JME_FLAG_MSI) != 0 ||
624 		    (sc->jme_flags & JME_FLAG_MSIX) != 0)
625 			jme_map_intr_vector(sc);
626 	}
627 
628 	error = bus_alloc_resources(dev, sc->jme_irq_spec, sc->jme_irq);
629 	if (error != 0) {
630 		device_printf(dev, "cannot allocate IRQ resources.\n");
631 		goto fail;
632 	}
633 
634 	sc->jme_rev = pci_get_device(dev);
635 	if ((sc->jme_rev & DEVICEID_JMC2XX_MASK) == DEVICEID_JMC260) {
636 		sc->jme_flags |= JME_FLAG_FASTETH;
637 		sc->jme_flags |= JME_FLAG_NOJUMBO;
638 	}
639 	reg = CSR_READ_4(sc, JME_CHIPMODE);
640 	sc->jme_chip_rev = (reg & CHIPMODE_REV_MASK) >> CHIPMODE_REV_SHIFT;
641 	if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
642 	    CHIPMODE_NOT_FPGA)
643 		sc->jme_flags |= JME_FLAG_FPGA;
644 	if (bootverbose) {
645 		device_printf(dev, "PCI device revision : 0x%04x\n",
646 		    sc->jme_rev);
647 		device_printf(dev, "Chip revision : 0x%02x\n",
648 		    sc->jme_chip_rev);
649 		if ((sc->jme_flags & JME_FLAG_FPGA) != 0)
650 			device_printf(dev, "FPGA revision : 0x%04x\n",
651 			    (reg & CHIPMODE_FPGA_REV_MASK) >>
652 			    CHIPMODE_FPGA_REV_SHIFT);
653 	}
654 	if (sc->jme_chip_rev == 0xFF) {
655 		device_printf(dev, "Unknown chip revision : 0x%02x\n",
656 		    sc->jme_rev);
657 		error = ENXIO;
658 		goto fail;
659 	}
660 
661 	if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 2) {
662 		if ((sc->jme_rev & DEVICEID_JMC2XX_MASK) == DEVICEID_JMC260 &&
663 		    CHIPMODE_REVFM(sc->jme_chip_rev) == 2)
664 			sc->jme_flags |= JME_FLAG_DMA32BIT;
665 		sc->jme_flags |= JME_FLAG_TXCLK;
666 		sc->jme_flags |= JME_FLAG_HWMIB;
667 	}
668 
669 	/* Reset the ethernet controller. */
670 	jme_reset(sc);
671 
672 	/* Get station address. */
673 	reg = CSR_READ_4(sc, JME_SMBCSR);
674 	if ((reg & SMBCSR_EEPROM_PRESENT) != 0)
675 		error = jme_eeprom_macaddr(sc);
676 	if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
677 		if (error != 0 && (bootverbose))
678 			device_printf(sc->jme_dev,
679 			    "ethernet hardware address not found in EEPROM.\n");
680 		jme_reg_macaddr(sc);
681 	}
682 
683 	/*
684 	 * Save PHY address.
685 	 * Integrated JR0211 has fixed PHY address whereas FPGA version
686 	 * requires PHY probing to get correct PHY address.
687 	 */
688 	if ((sc->jme_flags & JME_FLAG_FPGA) == 0) {
689 		sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
690 		    GPREG0_PHY_ADDR_MASK;
691 		if (bootverbose)
692 			device_printf(dev, "PHY is at address %d.\n",
693 			    sc->jme_phyaddr);
694 	} else
695 		sc->jme_phyaddr = 0;
696 
697 	/* Set max allowable DMA size. */
698 	if (pci_find_extcap(dev, PCIY_EXPRESS, &i) == 0) {
699 		sc->jme_flags |= JME_FLAG_PCIE;
700 		burst = pci_read_config(dev, i + 0x08, 2);
701 		if (bootverbose) {
702 			device_printf(dev, "Read request size : %d bytes.\n",
703 			    128 << ((burst >> 12) & 0x07));
704 			device_printf(dev, "TLP payload size : %d bytes.\n",
705 			    128 << ((burst >> 5) & 0x07));
706 		}
707 		switch ((burst >> 12) & 0x07) {
708 		case 0:
709 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
710 			break;
711 		case 1:
712 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
713 			break;
714 		default:
715 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
716 			break;
717 		}
718 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
719 	} else {
720 		sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
721 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
722 	}
723 	/* Create coalescing sysctl node. */
724 	jme_sysctl_node(sc);
725 	if ((error = jme_dma_alloc(sc) != 0))
726 		goto fail;
727 
728 	ifp = sc->jme_ifp = if_alloc(IFT_ETHER);
729 	if (ifp == NULL) {
730 		device_printf(dev, "cannot allocate ifnet structure.\n");
731 		error = ENXIO;
732 		goto fail;
733 	}
734 
735 	ifp->if_softc = sc;
736 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
737 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
738 	ifp->if_ioctl = jme_ioctl;
739 	ifp->if_start = jme_start;
740 	ifp->if_init = jme_init;
741 	ifp->if_snd.ifq_drv_maxlen = JME_TX_RING_CNT - 1;
742 	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
743 	IFQ_SET_READY(&ifp->if_snd);
744 	/* JMC250 supports Tx/Rx checksum offload as well as TSO. */
745 	ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4;
746 	ifp->if_hwassist = JME_CSUM_FEATURES | CSUM_TSO;
747 	if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) {
748 		sc->jme_flags |= JME_FLAG_PMCAP;
749 		ifp->if_capabilities |= IFCAP_WOL_MAGIC;
750 	}
751 	ifp->if_capenable = ifp->if_capabilities;
752 
753 	/* Set up MII bus. */
754 	if ((error = mii_phy_probe(dev, &sc->jme_miibus, jme_mediachange,
755 	    jme_mediastatus)) != 0) {
756 		device_printf(dev, "no PHY found!\n");
757 		goto fail;
758 	}
759 
760 	/*
761 	 * Force PHY to FPGA mode.
762 	 */
763 	if ((sc->jme_flags & JME_FLAG_FPGA) != 0) {
764 		mii = device_get_softc(sc->jme_miibus);
765 		if (mii->mii_instance != 0) {
766 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
767 				if (miisc->mii_phy != 0) {
768 					sc->jme_phyaddr = miisc->mii_phy;
769 					break;
770 				}
771 			}
772 			if (sc->jme_phyaddr != 0) {
773 				device_printf(sc->jme_dev,
774 				    "FPGA PHY is at %d\n", sc->jme_phyaddr);
775 				/* vendor magic. */
776 				jme_miibus_writereg(dev, sc->jme_phyaddr, 27,
777 				    0x0004);
778 			}
779 		}
780 	}
781 
782 	ether_ifattach(ifp, sc->jme_eaddr);
783 
784 	/* VLAN capability setup */
785 	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
786 	    IFCAP_VLAN_HWCSUM;
787 	ifp->if_capenable = ifp->if_capabilities;
788 
789 	/* Tell the upper layer(s) we support long frames. */
790 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
791 
792 	/* Create local taskq. */
793 	TASK_INIT(&sc->jme_tx_task, 1, jme_tx_task, ifp);
794 	sc->jme_tq = taskqueue_create_fast("jme_taskq", M_WAITOK,
795 	    taskqueue_thread_enqueue, &sc->jme_tq);
796 	if (sc->jme_tq == NULL) {
797 		device_printf(dev, "could not create taskqueue.\n");
798 		ether_ifdetach(ifp);
799 		error = ENXIO;
800 		goto fail;
801 	}
802 	taskqueue_start_threads(&sc->jme_tq, 1, PI_NET, "%s taskq",
803 	    device_get_nameunit(sc->jme_dev));
804 
805 	if ((sc->jme_flags & JME_FLAG_MSIX) != 0)
806 		msic = JME_MSIX_MESSAGES;
807 	else if ((sc->jme_flags & JME_FLAG_MSI) != 0)
808 		msic = JME_MSI_MESSAGES;
809 	else
810 		msic = 1;
811 	for (i = 0; i < msic; i++) {
812 		error = bus_setup_intr(dev, sc->jme_irq[i],
813 		    INTR_TYPE_NET | INTR_MPSAFE, jme_intr, NULL, sc,
814 		    &sc->jme_intrhand[i]);
815 		if (error != 0)
816 			break;
817 	}
818 
819 	if (error != 0) {
820 		device_printf(dev, "could not set up interrupt handler.\n");
821 		taskqueue_free(sc->jme_tq);
822 		sc->jme_tq = NULL;
823 		ether_ifdetach(ifp);
824 		goto fail;
825 	}
826 
827 fail:
828 	if (error != 0)
829 		jme_detach(dev);
830 
831 	return (error);
832 }
833 
834 static int
835 jme_detach(device_t dev)
836 {
837 	struct jme_softc *sc;
838 	struct ifnet *ifp;
839 	int i, msic;
840 
841 	sc = device_get_softc(dev);
842 
843 	ifp = sc->jme_ifp;
844 	if (device_is_attached(dev)) {
845 		JME_LOCK(sc);
846 		sc->jme_flags |= JME_FLAG_DETACH;
847 		jme_stop(sc);
848 		JME_UNLOCK(sc);
849 		callout_drain(&sc->jme_tick_ch);
850 		taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
851 		taskqueue_drain(sc->jme_tq, &sc->jme_tx_task);
852 		taskqueue_drain(taskqueue_swi, &sc->jme_link_task);
853 		ether_ifdetach(ifp);
854 	}
855 
856 	if (sc->jme_tq != NULL) {
857 		taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
858 		taskqueue_free(sc->jme_tq);
859 		sc->jme_tq = NULL;
860 	}
861 
862 	if (sc->jme_miibus != NULL) {
863 		device_delete_child(dev, sc->jme_miibus);
864 		sc->jme_miibus = NULL;
865 	}
866 	bus_generic_detach(dev);
867 	jme_dma_free(sc);
868 
869 	if (ifp != NULL) {
870 		if_free(ifp);
871 		sc->jme_ifp = NULL;
872 	}
873 
874 	msic = 1;
875 	if ((sc->jme_flags & JME_FLAG_MSIX) != 0)
876 		msic = JME_MSIX_MESSAGES;
877 	else if ((sc->jme_flags & JME_FLAG_MSI) != 0)
878 		msic = JME_MSI_MESSAGES;
879 	else
880 		msic = 1;
881 	for (i = 0; i < msic; i++) {
882 		if (sc->jme_intrhand[i] != NULL) {
883 			bus_teardown_intr(dev, sc->jme_irq[i],
884 			    sc->jme_intrhand[i]);
885 			sc->jme_intrhand[i] = NULL;
886 		}
887 	}
888 
889 	bus_release_resources(dev, sc->jme_irq_spec, sc->jme_irq);
890 	if ((sc->jme_flags & (JME_FLAG_MSIX | JME_FLAG_MSI)) != 0)
891 		pci_release_msi(dev);
892 	bus_release_resources(dev, sc->jme_res_spec, sc->jme_res);
893 	mtx_destroy(&sc->jme_mtx);
894 
895 	return (0);
896 }
897 
898 #define	JME_SYSCTL_STAT_ADD32(c, h, n, p, d)	\
899 	    SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
900 
901 static void
902 jme_sysctl_node(struct jme_softc *sc)
903 {
904 	struct sysctl_ctx_list *ctx;
905 	struct sysctl_oid_list *child, *parent;
906 	struct sysctl_oid *tree;
907 	struct jme_hw_stats *stats;
908 	int error;
909 
910 	stats = &sc->jme_stats;
911 	ctx = device_get_sysctl_ctx(sc->jme_dev);
912 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev));
913 
914 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_coal_to",
915 	    CTLTYPE_INT | CTLFLAG_RW, &sc->jme_tx_coal_to, 0,
916 	    sysctl_hw_jme_tx_coal_to, "I", "jme tx coalescing timeout");
917 
918 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_coal_pkt",
919 	    CTLTYPE_INT | CTLFLAG_RW, &sc->jme_tx_coal_pkt, 0,
920 	    sysctl_hw_jme_tx_coal_pkt, "I", "jme tx coalescing packet");
921 
922 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_coal_to",
923 	    CTLTYPE_INT | CTLFLAG_RW, &sc->jme_rx_coal_to, 0,
924 	    sysctl_hw_jme_rx_coal_to, "I", "jme rx coalescing timeout");
925 
926 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_coal_pkt",
927 	    CTLTYPE_INT | CTLFLAG_RW, &sc->jme_rx_coal_pkt, 0,
928 	    sysctl_hw_jme_rx_coal_pkt, "I", "jme rx coalescing packet");
929 
930 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
931 	    CTLTYPE_INT | CTLFLAG_RW, &sc->jme_process_limit, 0,
932 	    sysctl_hw_jme_proc_limit, "I",
933 	    "max number of Rx events to process");
934 
935 	/* Pull in device tunables. */
936 	sc->jme_process_limit = JME_PROC_DEFAULT;
937 	error = resource_int_value(device_get_name(sc->jme_dev),
938 	    device_get_unit(sc->jme_dev), "process_limit",
939 	    &sc->jme_process_limit);
940 	if (error == 0) {
941 		if (sc->jme_process_limit < JME_PROC_MIN ||
942 		    sc->jme_process_limit > JME_PROC_MAX) {
943 			device_printf(sc->jme_dev,
944 			    "process_limit value out of range; "
945 			    "using default: %d\n", JME_PROC_DEFAULT);
946 			sc->jme_process_limit = JME_PROC_DEFAULT;
947 		}
948 	}
949 
950 	sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
951 	error = resource_int_value(device_get_name(sc->jme_dev),
952 	    device_get_unit(sc->jme_dev), "tx_coal_to", &sc->jme_tx_coal_to);
953 	if (error == 0) {
954 		if (sc->jme_tx_coal_to < PCCTX_COAL_TO_MIN ||
955 		    sc->jme_tx_coal_to > PCCTX_COAL_TO_MAX) {
956 			device_printf(sc->jme_dev,
957 			    "tx_coal_to value out of range; "
958 			    "using default: %d\n", PCCTX_COAL_TO_DEFAULT);
959 			sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
960 		}
961 	}
962 
963 	sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
964 	error = resource_int_value(device_get_name(sc->jme_dev),
965 	    device_get_unit(sc->jme_dev), "tx_coal_pkt", &sc->jme_tx_coal_to);
966 	if (error == 0) {
967 		if (sc->jme_tx_coal_pkt < PCCTX_COAL_PKT_MIN ||
968 		    sc->jme_tx_coal_pkt > PCCTX_COAL_PKT_MAX) {
969 			device_printf(sc->jme_dev,
970 			    "tx_coal_pkt value out of range; "
971 			    "using default: %d\n", PCCTX_COAL_PKT_DEFAULT);
972 			sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
973 		}
974 	}
975 
976 	sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
977 	error = resource_int_value(device_get_name(sc->jme_dev),
978 	    device_get_unit(sc->jme_dev), "rx_coal_to", &sc->jme_rx_coal_to);
979 	if (error == 0) {
980 		if (sc->jme_rx_coal_to < PCCRX_COAL_TO_MIN ||
981 		    sc->jme_rx_coal_to > PCCRX_COAL_TO_MAX) {
982 			device_printf(sc->jme_dev,
983 			    "rx_coal_to value out of range; "
984 			    "using default: %d\n", PCCRX_COAL_TO_DEFAULT);
985 			sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
986 		}
987 	}
988 
989 	sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
990 	error = resource_int_value(device_get_name(sc->jme_dev),
991 	    device_get_unit(sc->jme_dev), "rx_coal_pkt", &sc->jme_rx_coal_to);
992 	if (error == 0) {
993 		if (sc->jme_rx_coal_pkt < PCCRX_COAL_PKT_MIN ||
994 		    sc->jme_rx_coal_pkt > PCCRX_COAL_PKT_MAX) {
995 			device_printf(sc->jme_dev,
996 			    "tx_coal_pkt value out of range; "
997 			    "using default: %d\n", PCCRX_COAL_PKT_DEFAULT);
998 			sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
999 		}
1000 	}
1001 
1002 	if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
1003 		return;
1004 
1005 	tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
1006 	    NULL, "JME statistics");
1007 	parent = SYSCTL_CHILDREN(tree);
1008 
1009 	/* Rx statistics. */
1010 	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
1011 	    NULL, "Rx MAC statistics");
1012 	child = SYSCTL_CHILDREN(tree);
1013 	JME_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
1014 	    &stats->rx_good_frames, "Good frames");
1015 	JME_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
1016 	    &stats->rx_crc_errs, "CRC errors");
1017 	JME_SYSCTL_STAT_ADD32(ctx, child, "mii_errs",
1018 	    &stats->rx_mii_errs, "MII errors");
1019 	JME_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
1020 	    &stats->rx_fifo_oflows, "FIFO overflows");
1021 	JME_SYSCTL_STAT_ADD32(ctx, child, "desc_empty",
1022 	    &stats->rx_desc_empty, "Descriptor empty");
1023 	JME_SYSCTL_STAT_ADD32(ctx, child, "bad_frames",
1024 	    &stats->rx_bad_frames, "Bad frames");
1025 
1026 	/* Tx statistics. */
1027 	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
1028 	    NULL, "Tx MAC statistics");
1029 	child = SYSCTL_CHILDREN(tree);
1030 	JME_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
1031 	    &stats->tx_good_frames, "Good frames");
1032 	JME_SYSCTL_STAT_ADD32(ctx, child, "bad_frames",
1033 	    &stats->tx_bad_frames, "Bad frames");
1034 }
1035 
1036 #undef	JME_SYSCTL_STAT_ADD32
1037 
1038 struct jme_dmamap_arg {
1039 	bus_addr_t	jme_busaddr;
1040 };
1041 
1042 static void
1043 jme_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1044 {
1045 	struct jme_dmamap_arg *ctx;
1046 
1047 	if (error != 0)
1048 		return;
1049 
1050 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1051 
1052 	ctx = (struct jme_dmamap_arg *)arg;
1053 	ctx->jme_busaddr = segs[0].ds_addr;
1054 }
1055 
1056 static int
1057 jme_dma_alloc(struct jme_softc *sc)
1058 {
1059 	struct jme_dmamap_arg ctx;
1060 	struct jme_txdesc *txd;
1061 	struct jme_rxdesc *rxd;
1062 	bus_addr_t lowaddr, rx_ring_end, tx_ring_end;
1063 	int error, i;
1064 
1065 	lowaddr = BUS_SPACE_MAXADDR;
1066 	if ((sc->jme_flags & JME_FLAG_DMA32BIT) != 0)
1067 		lowaddr = BUS_SPACE_MAXADDR_32BIT;
1068 
1069 again:
1070 	/* Create parent ring tag. */
1071 	error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */
1072 	    1, 0,			/* algnmnt, boundary */
1073 	    lowaddr,			/* lowaddr */
1074 	    BUS_SPACE_MAXADDR,		/* highaddr */
1075 	    NULL, NULL,			/* filter, filterarg */
1076 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1077 	    0,				/* nsegments */
1078 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1079 	    0,				/* flags */
1080 	    NULL, NULL,			/* lockfunc, lockarg */
1081 	    &sc->jme_cdata.jme_ring_tag);
1082 	if (error != 0) {
1083 		device_printf(sc->jme_dev,
1084 		    "could not create parent ring DMA tag.\n");
1085 		goto fail;
1086 	}
1087 	/* Create tag for Tx ring. */
1088 	error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1089 	    JME_TX_RING_ALIGN, 0,	/* algnmnt, boundary */
1090 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1091 	    BUS_SPACE_MAXADDR,		/* highaddr */
1092 	    NULL, NULL,			/* filter, filterarg */
1093 	    JME_TX_RING_SIZE,		/* maxsize */
1094 	    1,				/* nsegments */
1095 	    JME_TX_RING_SIZE,		/* maxsegsize */
1096 	    0,				/* flags */
1097 	    NULL, NULL,			/* lockfunc, lockarg */
1098 	    &sc->jme_cdata.jme_tx_ring_tag);
1099 	if (error != 0) {
1100 		device_printf(sc->jme_dev,
1101 		    "could not allocate Tx ring DMA tag.\n");
1102 		goto fail;
1103 	}
1104 
1105 	/* Create tag for Rx ring. */
1106 	error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1107 	    JME_RX_RING_ALIGN, 0,	/* algnmnt, boundary */
1108 	    lowaddr,			/* lowaddr */
1109 	    BUS_SPACE_MAXADDR,		/* highaddr */
1110 	    NULL, NULL,			/* filter, filterarg */
1111 	    JME_RX_RING_SIZE,		/* maxsize */
1112 	    1,				/* nsegments */
1113 	    JME_RX_RING_SIZE,		/* maxsegsize */
1114 	    0,				/* flags */
1115 	    NULL, NULL,			/* lockfunc, lockarg */
1116 	    &sc->jme_cdata.jme_rx_ring_tag);
1117 	if (error != 0) {
1118 		device_printf(sc->jme_dev,
1119 		    "could not allocate Rx ring DMA tag.\n");
1120 		goto fail;
1121 	}
1122 
1123 	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
1124 	error = bus_dmamem_alloc(sc->jme_cdata.jme_tx_ring_tag,
1125 	    (void **)&sc->jme_rdata.jme_tx_ring,
1126 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1127 	    &sc->jme_cdata.jme_tx_ring_map);
1128 	if (error != 0) {
1129 		device_printf(sc->jme_dev,
1130 		    "could not allocate DMA'able memory for Tx ring.\n");
1131 		goto fail;
1132 	}
1133 
1134 	ctx.jme_busaddr = 0;
1135 	error = bus_dmamap_load(sc->jme_cdata.jme_tx_ring_tag,
1136 	    sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring,
1137 	    JME_TX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1138 	if (error != 0 || ctx.jme_busaddr == 0) {
1139 		device_printf(sc->jme_dev,
1140 		    "could not load DMA'able memory for Tx ring.\n");
1141 		goto fail;
1142 	}
1143 	sc->jme_rdata.jme_tx_ring_paddr = ctx.jme_busaddr;
1144 
1145 	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
1146 	error = bus_dmamem_alloc(sc->jme_cdata.jme_rx_ring_tag,
1147 	    (void **)&sc->jme_rdata.jme_rx_ring,
1148 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1149 	    &sc->jme_cdata.jme_rx_ring_map);
1150 	if (error != 0) {
1151 		device_printf(sc->jme_dev,
1152 		    "could not allocate DMA'able memory for Rx ring.\n");
1153 		goto fail;
1154 	}
1155 
1156 	ctx.jme_busaddr = 0;
1157 	error = bus_dmamap_load(sc->jme_cdata.jme_rx_ring_tag,
1158 	    sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring,
1159 	    JME_RX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1160 	if (error != 0 || ctx.jme_busaddr == 0) {
1161 		device_printf(sc->jme_dev,
1162 		    "could not load DMA'able memory for Rx ring.\n");
1163 		goto fail;
1164 	}
1165 	sc->jme_rdata.jme_rx_ring_paddr = ctx.jme_busaddr;
1166 
1167 	if (lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1168 		/* Tx/Rx descriptor queue should reside within 4GB boundary. */
1169 		tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr +
1170 		    JME_TX_RING_SIZE;
1171 		rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr +
1172 		    JME_RX_RING_SIZE;
1173 		if ((JME_ADDR_HI(tx_ring_end) !=
1174 		    JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) ||
1175 		    (JME_ADDR_HI(rx_ring_end) !=
1176 		     JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) {
1177 			device_printf(sc->jme_dev, "4GB boundary crossed, "
1178 			    "switching to 32bit DMA address mode.\n");
1179 			jme_dma_free(sc);
1180 			/* Limit DMA address space to 32bit and try again. */
1181 			lowaddr = BUS_SPACE_MAXADDR_32BIT;
1182 			goto again;
1183 		}
1184 	}
1185 
1186 	lowaddr = BUS_SPACE_MAXADDR;
1187 	if ((sc->jme_flags & JME_FLAG_DMA32BIT) != 0)
1188 		lowaddr = BUS_SPACE_MAXADDR_32BIT;
1189 	/* Create parent buffer tag. */
1190 	error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */
1191 	    1, 0,			/* algnmnt, boundary */
1192 	    lowaddr,			/* lowaddr */
1193 	    BUS_SPACE_MAXADDR,		/* highaddr */
1194 	    NULL, NULL,			/* filter, filterarg */
1195 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1196 	    0,				/* nsegments */
1197 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1198 	    0,				/* flags */
1199 	    NULL, NULL,			/* lockfunc, lockarg */
1200 	    &sc->jme_cdata.jme_buffer_tag);
1201 	if (error != 0) {
1202 		device_printf(sc->jme_dev,
1203 		    "could not create parent buffer DMA tag.\n");
1204 		goto fail;
1205 	}
1206 
1207 	/* Create shadow status block tag. */
1208 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1209 	    JME_SSB_ALIGN, 0,		/* algnmnt, boundary */
1210 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1211 	    BUS_SPACE_MAXADDR,		/* highaddr */
1212 	    NULL, NULL,			/* filter, filterarg */
1213 	    JME_SSB_SIZE,		/* maxsize */
1214 	    1,				/* nsegments */
1215 	    JME_SSB_SIZE,		/* maxsegsize */
1216 	    0,				/* flags */
1217 	    NULL, NULL,			/* lockfunc, lockarg */
1218 	    &sc->jme_cdata.jme_ssb_tag);
1219 	if (error != 0) {
1220 		device_printf(sc->jme_dev,
1221 		    "could not create shared status block DMA tag.\n");
1222 		goto fail;
1223 	}
1224 
1225 	/* Create tag for Tx buffers. */
1226 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1227 	    1, 0,			/* algnmnt, boundary */
1228 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1229 	    BUS_SPACE_MAXADDR,		/* highaddr */
1230 	    NULL, NULL,			/* filter, filterarg */
1231 	    JME_TSO_MAXSIZE,		/* maxsize */
1232 	    JME_MAXTXSEGS,		/* nsegments */
1233 	    JME_TSO_MAXSEGSIZE,		/* maxsegsize */
1234 	    0,				/* flags */
1235 	    NULL, NULL,			/* lockfunc, lockarg */
1236 	    &sc->jme_cdata.jme_tx_tag);
1237 	if (error != 0) {
1238 		device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1239 		goto fail;
1240 	}
1241 
1242 	/* Create tag for Rx buffers. */
1243 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1244 	    JME_RX_BUF_ALIGN, 0,	/* algnmnt, boundary */
1245 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1246 	    BUS_SPACE_MAXADDR,		/* highaddr */
1247 	    NULL, NULL,			/* filter, filterarg */
1248 	    MCLBYTES,			/* maxsize */
1249 	    1,				/* nsegments */
1250 	    MCLBYTES,			/* maxsegsize */
1251 	    0,				/* flags */
1252 	    NULL, NULL,			/* lockfunc, lockarg */
1253 	    &sc->jme_cdata.jme_rx_tag);
1254 	if (error != 0) {
1255 		device_printf(sc->jme_dev, "could not create Rx DMA tag.\n");
1256 		goto fail;
1257 	}
1258 
1259 	/*
1260 	 * Allocate DMA'able memory and load the DMA map for shared
1261 	 * status block.
1262 	 */
1263 	error = bus_dmamem_alloc(sc->jme_cdata.jme_ssb_tag,
1264 	    (void **)&sc->jme_rdata.jme_ssb_block,
1265 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1266 	    &sc->jme_cdata.jme_ssb_map);
1267 	if (error != 0) {
1268 		device_printf(sc->jme_dev, "could not allocate DMA'able "
1269 		    "memory for shared status block.\n");
1270 		goto fail;
1271 	}
1272 
1273 	ctx.jme_busaddr = 0;
1274 	error = bus_dmamap_load(sc->jme_cdata.jme_ssb_tag,
1275 	    sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block,
1276 	    JME_SSB_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1277 	if (error != 0 || ctx.jme_busaddr == 0) {
1278 		device_printf(sc->jme_dev, "could not load DMA'able memory "
1279 		    "for shared status block.\n");
1280 		goto fail;
1281 	}
1282 	sc->jme_rdata.jme_ssb_block_paddr = ctx.jme_busaddr;
1283 
1284 	/* Create DMA maps for Tx buffers. */
1285 	for (i = 0; i < JME_TX_RING_CNT; i++) {
1286 		txd = &sc->jme_cdata.jme_txdesc[i];
1287 		txd->tx_m = NULL;
1288 		txd->tx_dmamap = NULL;
1289 		error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 0,
1290 		    &txd->tx_dmamap);
1291 		if (error != 0) {
1292 			device_printf(sc->jme_dev,
1293 			    "could not create Tx dmamap.\n");
1294 			goto fail;
1295 		}
1296 	}
1297 	/* Create DMA maps for Rx buffers. */
1298 	if ((error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1299 	    &sc->jme_cdata.jme_rx_sparemap)) != 0) {
1300 		device_printf(sc->jme_dev,
1301 		    "could not create spare Rx dmamap.\n");
1302 		goto fail;
1303 	}
1304 	for (i = 0; i < JME_RX_RING_CNT; i++) {
1305 		rxd = &sc->jme_cdata.jme_rxdesc[i];
1306 		rxd->rx_m = NULL;
1307 		rxd->rx_dmamap = NULL;
1308 		error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1309 		    &rxd->rx_dmamap);
1310 		if (error != 0) {
1311 			device_printf(sc->jme_dev,
1312 			    "could not create Rx dmamap.\n");
1313 			goto fail;
1314 		}
1315 	}
1316 
1317 fail:
1318 	return (error);
1319 }
1320 
1321 static void
1322 jme_dma_free(struct jme_softc *sc)
1323 {
1324 	struct jme_txdesc *txd;
1325 	struct jme_rxdesc *rxd;
1326 	int i;
1327 
1328 	/* Tx ring */
1329 	if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1330 		if (sc->jme_cdata.jme_tx_ring_map)
1331 			bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1332 			    sc->jme_cdata.jme_tx_ring_map);
1333 		if (sc->jme_cdata.jme_tx_ring_map &&
1334 		    sc->jme_rdata.jme_tx_ring)
1335 			bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1336 			    sc->jme_rdata.jme_tx_ring,
1337 			    sc->jme_cdata.jme_tx_ring_map);
1338 		sc->jme_rdata.jme_tx_ring = NULL;
1339 		sc->jme_cdata.jme_tx_ring_map = NULL;
1340 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1341 		sc->jme_cdata.jme_tx_ring_tag = NULL;
1342 	}
1343 	/* Rx ring */
1344 	if (sc->jme_cdata.jme_rx_ring_tag != NULL) {
1345 		if (sc->jme_cdata.jme_rx_ring_map)
1346 			bus_dmamap_unload(sc->jme_cdata.jme_rx_ring_tag,
1347 			    sc->jme_cdata.jme_rx_ring_map);
1348 		if (sc->jme_cdata.jme_rx_ring_map &&
1349 		    sc->jme_rdata.jme_rx_ring)
1350 			bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag,
1351 			    sc->jme_rdata.jme_rx_ring,
1352 			    sc->jme_cdata.jme_rx_ring_map);
1353 		sc->jme_rdata.jme_rx_ring = NULL;
1354 		sc->jme_cdata.jme_rx_ring_map = NULL;
1355 		bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag);
1356 		sc->jme_cdata.jme_rx_ring_tag = NULL;
1357 	}
1358 	/* Tx buffers */
1359 	if (sc->jme_cdata.jme_tx_tag != NULL) {
1360 		for (i = 0; i < JME_TX_RING_CNT; i++) {
1361 			txd = &sc->jme_cdata.jme_txdesc[i];
1362 			if (txd->tx_dmamap != NULL) {
1363 				bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1364 				    txd->tx_dmamap);
1365 				txd->tx_dmamap = NULL;
1366 			}
1367 		}
1368 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1369 		sc->jme_cdata.jme_tx_tag = NULL;
1370 	}
1371 	/* Rx buffers */
1372 	if (sc->jme_cdata.jme_rx_tag != NULL) {
1373 		for (i = 0; i < JME_RX_RING_CNT; i++) {
1374 			rxd = &sc->jme_cdata.jme_rxdesc[i];
1375 			if (rxd->rx_dmamap != NULL) {
1376 				bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1377 				    rxd->rx_dmamap);
1378 				rxd->rx_dmamap = NULL;
1379 			}
1380 		}
1381 		if (sc->jme_cdata.jme_rx_sparemap != NULL) {
1382 			bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1383 			    sc->jme_cdata.jme_rx_sparemap);
1384 			sc->jme_cdata.jme_rx_sparemap = NULL;
1385 		}
1386 		bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag);
1387 		sc->jme_cdata.jme_rx_tag = NULL;
1388 	}
1389 
1390 	/* Shared status block. */
1391 	if (sc->jme_cdata.jme_ssb_tag != NULL) {
1392 		if (sc->jme_cdata.jme_ssb_map)
1393 			bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1394 			    sc->jme_cdata.jme_ssb_map);
1395 		if (sc->jme_cdata.jme_ssb_map && sc->jme_rdata.jme_ssb_block)
1396 			bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1397 			    sc->jme_rdata.jme_ssb_block,
1398 			    sc->jme_cdata.jme_ssb_map);
1399 		sc->jme_rdata.jme_ssb_block = NULL;
1400 		sc->jme_cdata.jme_ssb_map = NULL;
1401 		bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1402 		sc->jme_cdata.jme_ssb_tag = NULL;
1403 	}
1404 
1405 	if (sc->jme_cdata.jme_buffer_tag != NULL) {
1406 		bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1407 		sc->jme_cdata.jme_buffer_tag = NULL;
1408 	}
1409 	if (sc->jme_cdata.jme_ring_tag != NULL) {
1410 		bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1411 		sc->jme_cdata.jme_ring_tag = NULL;
1412 	}
1413 }
1414 
1415 /*
1416  *	Make sure the interface is stopped at reboot time.
1417  */
1418 static int
1419 jme_shutdown(device_t dev)
1420 {
1421 
1422 	return (jme_suspend(dev));
1423 }
1424 
1425 /*
1426  * Unlike other ethernet controllers, JMC250 requires
1427  * explicit resetting link speed to 10/100Mbps as gigabit
1428  * link will cunsume more power than 375mA.
1429  * Note, we reset the link speed to 10/100Mbps with
1430  * auto-negotiation but we don't know whether that operation
1431  * would succeed or not as we have no control after powering
1432  * off. If the renegotiation fail WOL may not work. Running
1433  * at 1Gbps draws more power than 375mA at 3.3V which is
1434  * specified in PCI specification and that would result in
1435  * complete shutdowning power to ethernet controller.
1436  *
1437  * TODO
1438  *  Save current negotiated media speed/duplex/flow-control
1439  *  to softc and restore the same link again after resuming.
1440  *  PHY handling such as power down/resetting to 100Mbps
1441  *  may be better handled in suspend method in phy driver.
1442  */
1443 static void
1444 jme_setlinkspeed(struct jme_softc *sc)
1445 {
1446 	struct mii_data *mii;
1447 	int aneg, i;
1448 
1449 	JME_LOCK_ASSERT(sc);
1450 
1451 	mii = device_get_softc(sc->jme_miibus);
1452 	mii_pollstat(mii);
1453 	aneg = 0;
1454 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
1455 		switch IFM_SUBTYPE(mii->mii_media_active) {
1456 		case IFM_10_T:
1457 		case IFM_100_TX:
1458 			return;
1459 		case IFM_1000_T:
1460 			aneg++;
1461 		default:
1462 			break;
1463 		}
1464 	}
1465 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1466 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1467 	    ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1468 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1469 	    BMCR_AUTOEN | BMCR_STARTNEG);
1470 	DELAY(1000);
1471 	if (aneg != 0) {
1472 		/* Poll link state until jme(4) get a 10/100 link. */
1473 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1474 			mii_pollstat(mii);
1475 			if ((mii->mii_media_status & IFM_AVALID) != 0) {
1476 				switch (IFM_SUBTYPE(mii->mii_media_active)) {
1477 				case IFM_10_T:
1478 				case IFM_100_TX:
1479 					jme_mac_config(sc);
1480 					return;
1481 				default:
1482 					break;
1483 				}
1484 			}
1485 			JME_UNLOCK(sc);
1486 			pause("jmelnk", hz);
1487 			JME_LOCK(sc);
1488 		}
1489 		if (i == MII_ANEGTICKS_GIGE)
1490 			device_printf(sc->jme_dev, "establishing link failed, "
1491 			    "WOL may not work!");
1492 	}
1493 	/*
1494 	 * No link, force MAC to have 100Mbps, full-duplex link.
1495 	 * This is the last resort and may/may not work.
1496 	 */
1497 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1498 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1499 	jme_mac_config(sc);
1500 }
1501 
1502 static void
1503 jme_setwol(struct jme_softc *sc)
1504 {
1505 	struct ifnet *ifp;
1506 	uint32_t gpr, pmcs;
1507 	uint16_t pmstat;
1508 	int pmc;
1509 
1510 	JME_LOCK_ASSERT(sc);
1511 
1512 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1513 		/* Remove Tx MAC/offload clock to save more power. */
1514 		if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
1515 			CSR_WRITE_4(sc, JME_GHC, CSR_READ_4(sc, JME_GHC) &
1516 			    ~(GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100 |
1517 			    GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000));
1518 		/* No PME capability, PHY power down. */
1519 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1520 		    MII_BMCR, BMCR_PDOWN);
1521 		return;
1522 	}
1523 
1524 	ifp = sc->jme_ifp;
1525 	gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1526 	pmcs = CSR_READ_4(sc, JME_PMCS);
1527 	pmcs &= ~PMCS_WOL_ENB_MASK;
1528 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1529 		pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1530 		/* Enable PME message. */
1531 		gpr |= GPREG0_PME_ENB;
1532 		/* For gigabit controllers, reset link speed to 10/100. */
1533 		if ((sc->jme_flags & JME_FLAG_FASTETH) == 0)
1534 			jme_setlinkspeed(sc);
1535 	}
1536 
1537 	CSR_WRITE_4(sc, JME_PMCS, pmcs);
1538 	CSR_WRITE_4(sc, JME_GPREG0, gpr);
1539 	/* Remove Tx MAC/offload clock to save more power. */
1540 	if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
1541 		CSR_WRITE_4(sc, JME_GHC, CSR_READ_4(sc, JME_GHC) &
1542 		    ~(GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100 |
1543 		    GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000));
1544 	/* Request PME. */
1545 	pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1546 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1547 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1548 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1549 	pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1550 	if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1551 		/* No WOL, PHY power down. */
1552 		jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1553 		    MII_BMCR, BMCR_PDOWN);
1554 	}
1555 }
1556 
1557 static int
1558 jme_suspend(device_t dev)
1559 {
1560 	struct jme_softc *sc;
1561 
1562 	sc = device_get_softc(dev);
1563 
1564 	JME_LOCK(sc);
1565 	jme_stop(sc);
1566 	jme_setwol(sc);
1567 	JME_UNLOCK(sc);
1568 
1569 	return (0);
1570 }
1571 
1572 static int
1573 jme_resume(device_t dev)
1574 {
1575 	struct jme_softc *sc;
1576 	struct ifnet *ifp;
1577 	uint16_t pmstat;
1578 	int pmc;
1579 
1580 	sc = device_get_softc(dev);
1581 
1582 	JME_LOCK(sc);
1583 	if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1584 		pmstat = pci_read_config(sc->jme_dev,
1585 		    pmc + PCIR_POWER_STATUS, 2);
1586 		/* Disable PME clear PME status. */
1587 		pmstat &= ~PCIM_PSTAT_PMEENABLE;
1588 		pci_write_config(sc->jme_dev,
1589 		    pmc + PCIR_POWER_STATUS, pmstat, 2);
1590 	}
1591 	ifp = sc->jme_ifp;
1592 	if ((ifp->if_flags & IFF_UP) != 0) {
1593 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1594 		jme_init_locked(sc);
1595 	}
1596 
1597 	JME_UNLOCK(sc);
1598 
1599 	return (0);
1600 }
1601 
1602 static int
1603 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1604 {
1605 	struct jme_txdesc *txd;
1606 	struct jme_desc *desc;
1607 	struct mbuf *m;
1608 	bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1609 	int error, i, nsegs, prod;
1610 	uint32_t cflags, tso_segsz;
1611 
1612 	JME_LOCK_ASSERT(sc);
1613 
1614 	M_ASSERTPKTHDR((*m_head));
1615 
1616 	if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1617 		/*
1618 		 * Due to the adherence to NDIS specification JMC250
1619 		 * assumes upper stack computed TCP pseudo checksum
1620 		 * without including payload length. This breaks
1621 		 * checksum offload for TSO case so recompute TCP
1622 		 * pseudo checksum for JMC250. Hopefully this wouldn't
1623 		 * be much burden on modern CPUs.
1624 		 */
1625 		struct ether_header *eh;
1626 		struct ip *ip;
1627 		struct tcphdr *tcp;
1628 		uint32_t ip_off, poff;
1629 
1630 		if (M_WRITABLE(*m_head) == 0) {
1631 			/* Get a writable copy. */
1632 			m = m_dup(*m_head, M_DONTWAIT);
1633 			m_freem(*m_head);
1634 			if (m == NULL) {
1635 				*m_head = NULL;
1636 				return (ENOBUFS);
1637 			}
1638 			*m_head = m;
1639 		}
1640 		ip_off = sizeof(struct ether_header);
1641 		m = m_pullup(*m_head, ip_off);
1642 		if (m == NULL) {
1643 			*m_head = NULL;
1644 			return (ENOBUFS);
1645 		}
1646 		eh = mtod(m, struct ether_header *);
1647 		/* Check the existence of VLAN tag. */
1648 		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1649 			ip_off = sizeof(struct ether_vlan_header);
1650 			m = m_pullup(m, ip_off);
1651 			if (m == NULL) {
1652 				*m_head = NULL;
1653 				return (ENOBUFS);
1654 			}
1655 		}
1656 		m = m_pullup(m, ip_off + sizeof(struct ip));
1657 		if (m == NULL) {
1658 			*m_head = NULL;
1659 			return (ENOBUFS);
1660 		}
1661 		ip = (struct ip *)(mtod(m, char *) + ip_off);
1662 		poff = ip_off + (ip->ip_hl << 2);
1663 		m = m_pullup(m, poff + sizeof(struct tcphdr));
1664 		if (m == NULL) {
1665 			*m_head = NULL;
1666 			return (ENOBUFS);
1667 		}
1668 		tcp = (struct tcphdr *)(mtod(m, char *) + poff);
1669 		/*
1670 		 * Reset IP checksum and recompute TCP pseudo
1671 		 * checksum that NDIS specification requires.
1672 		 */
1673 		ip->ip_sum = 0;
1674 		if (poff + (tcp->th_off << 2) == m->m_pkthdr.len) {
1675 			tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1676 			    ip->ip_dst.s_addr,
1677 			    htons((tcp->th_off << 2) + IPPROTO_TCP));
1678 			/* No need to TSO, force IP checksum offload. */
1679 			(*m_head)->m_pkthdr.csum_flags &= ~CSUM_TSO;
1680 			(*m_head)->m_pkthdr.csum_flags |= CSUM_IP;
1681 		} else
1682 			tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1683 			    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1684 		*m_head = m;
1685 	}
1686 
1687 	prod = sc->jme_cdata.jme_tx_prod;
1688 	txd = &sc->jme_cdata.jme_txdesc[prod];
1689 
1690 	error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag,
1691 	    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1692 	if (error == EFBIG) {
1693 		m = m_collapse(*m_head, M_DONTWAIT, JME_MAXTXSEGS);
1694 		if (m == NULL) {
1695 			m_freem(*m_head);
1696 			*m_head = NULL;
1697 			return (ENOMEM);
1698 		}
1699 		*m_head = m;
1700 		error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag,
1701 		    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1702 		if (error != 0) {
1703 			m_freem(*m_head);
1704 			*m_head = NULL;
1705 			return (error);
1706 		}
1707 	} else if (error != 0)
1708 		return (error);
1709 	if (nsegs == 0) {
1710 		m_freem(*m_head);
1711 		*m_head = NULL;
1712 		return (EIO);
1713 	}
1714 
1715 	/*
1716 	 * Check descriptor overrun. Leave one free descriptor.
1717 	 * Since we always use 64bit address mode for transmitting,
1718 	 * each Tx request requires one more dummy descriptor.
1719 	 */
1720 	if (sc->jme_cdata.jme_tx_cnt + nsegs + 1 > JME_TX_RING_CNT - 1) {
1721 		bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1722 		return (ENOBUFS);
1723 	}
1724 
1725 	m = *m_head;
1726 	cflags = 0;
1727 	tso_segsz = 0;
1728 	/* Configure checksum offload and TSO. */
1729 	if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1730 		tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz <<
1731 		    JME_TD_MSS_SHIFT;
1732 		cflags |= JME_TD_TSO;
1733 	} else {
1734 		if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
1735 			cflags |= JME_TD_IPCSUM;
1736 		if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
1737 			cflags |= JME_TD_TCPCSUM;
1738 		if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
1739 			cflags |= JME_TD_UDPCSUM;
1740 	}
1741 	/* Configure VLAN. */
1742 	if ((m->m_flags & M_VLANTAG) != 0) {
1743 		cflags |= (m->m_pkthdr.ether_vtag & JME_TD_VLAN_MASK);
1744 		cflags |= JME_TD_VLAN_TAG;
1745 	}
1746 
1747 	desc = &sc->jme_rdata.jme_tx_ring[prod];
1748 	desc->flags = htole32(cflags);
1749 	desc->buflen = htole32(tso_segsz);
1750 	desc->addr_hi = htole32(m->m_pkthdr.len);
1751 	desc->addr_lo = 0;
1752 	sc->jme_cdata.jme_tx_cnt++;
1753 	JME_DESC_INC(prod, JME_TX_RING_CNT);
1754 	for (i = 0; i < nsegs; i++) {
1755 		desc = &sc->jme_rdata.jme_tx_ring[prod];
1756 		desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT);
1757 		desc->buflen = htole32(txsegs[i].ds_len);
1758 		desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1759 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1760 		sc->jme_cdata.jme_tx_cnt++;
1761 		JME_DESC_INC(prod, JME_TX_RING_CNT);
1762 	}
1763 
1764 	/* Update producer index. */
1765 	sc->jme_cdata.jme_tx_prod = prod;
1766 	/*
1767 	 * Finally request interrupt and give the first descriptor
1768 	 * owenership to hardware.
1769 	 */
1770 	desc = txd->tx_desc;
1771 	desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1772 
1773 	txd->tx_m = m;
1774 	txd->tx_ndesc = nsegs + 1;
1775 
1776 	/* Sync descriptors. */
1777 	bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1778 	    BUS_DMASYNC_PREWRITE);
1779 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
1780 	    sc->jme_cdata.jme_tx_ring_map,
1781 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1782 
1783 	return (0);
1784 }
1785 
1786 static void
1787 jme_tx_task(void *arg, int pending)
1788 {
1789 	struct ifnet *ifp;
1790 
1791 	ifp = (struct ifnet *)arg;
1792 	jme_start(ifp);
1793 }
1794 
1795 static void
1796 jme_start(struct ifnet *ifp)
1797 {
1798         struct jme_softc *sc;
1799         struct mbuf *m_head;
1800 	int enq;
1801 
1802 	sc = ifp->if_softc;
1803 
1804 	JME_LOCK(sc);
1805 
1806 	if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT)
1807 		jme_txeof(sc);
1808 
1809 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1810 	    IFF_DRV_RUNNING || (sc->jme_flags & JME_FLAG_LINK) == 0) {
1811 		JME_UNLOCK(sc);
1812 		return;
1813 	}
1814 
1815 	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1816 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1817 		if (m_head == NULL)
1818 			break;
1819 		/*
1820 		 * Pack the data into the transmit ring. If we
1821 		 * don't have room, set the OACTIVE flag and wait
1822 		 * for the NIC to drain the ring.
1823 		 */
1824 		if (jme_encap(sc, &m_head)) {
1825 			if (m_head == NULL)
1826 				break;
1827 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1828 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1829 			break;
1830 		}
1831 
1832 		enq++;
1833 		/*
1834 		 * If there's a BPF listener, bounce a copy of this frame
1835 		 * to him.
1836 		 */
1837 		ETHER_BPF_MTAP(ifp, m_head);
1838 	}
1839 
1840 	if (enq > 0) {
1841 		/*
1842 		 * Reading TXCSR takes very long time under heavy load
1843 		 * so cache TXCSR value and writes the ORed value with
1844 		 * the kick command to the TXCSR. This saves one register
1845 		 * access cycle.
1846 		 */
1847 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1848 		    TXCSR_TXQ_N_START(TXCSR_TXQ0));
1849 		/* Set a timeout in case the chip goes out to lunch. */
1850 		sc->jme_watchdog_timer = JME_TX_TIMEOUT;
1851 	}
1852 
1853 	JME_UNLOCK(sc);
1854 }
1855 
1856 static void
1857 jme_watchdog(struct jme_softc *sc)
1858 {
1859 	struct ifnet *ifp;
1860 
1861 	JME_LOCK_ASSERT(sc);
1862 
1863 	if (sc->jme_watchdog_timer == 0 || --sc->jme_watchdog_timer)
1864 		return;
1865 
1866 	ifp = sc->jme_ifp;
1867 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1868 		if_printf(sc->jme_ifp, "watchdog timeout (missed link)\n");
1869 		ifp->if_oerrors++;
1870 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1871 		jme_init_locked(sc);
1872 		return;
1873 	}
1874 	jme_txeof(sc);
1875 	if (sc->jme_cdata.jme_tx_cnt == 0) {
1876 		if_printf(sc->jme_ifp,
1877 		    "watchdog timeout (missed Tx interrupts) -- recovering\n");
1878 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1879 			taskqueue_enqueue(sc->jme_tq, &sc->jme_tx_task);
1880 		return;
1881 	}
1882 
1883 	if_printf(sc->jme_ifp, "watchdog timeout\n");
1884 	ifp->if_oerrors++;
1885 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1886 	jme_init_locked(sc);
1887 	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1888 		taskqueue_enqueue(sc->jme_tq, &sc->jme_tx_task);
1889 }
1890 
1891 static int
1892 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1893 {
1894 	struct jme_softc *sc;
1895 	struct ifreq *ifr;
1896 	struct mii_data *mii;
1897 	uint32_t reg;
1898 	int error, mask;
1899 
1900 	sc = ifp->if_softc;
1901 	ifr = (struct ifreq *)data;
1902 	error = 0;
1903 	switch (cmd) {
1904 	case SIOCSIFMTU:
1905 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1906 		    ((sc->jme_flags & JME_FLAG_NOJUMBO) != 0 &&
1907 		    ifr->ifr_mtu > JME_MAX_MTU)) {
1908 			error = EINVAL;
1909 			break;
1910 		}
1911 
1912 		if (ifp->if_mtu != ifr->ifr_mtu) {
1913 			/*
1914 			 * No special configuration is required when interface
1915 			 * MTU is changed but availability of TSO/Tx checksum
1916 			 * offload should be chcked against new MTU size as
1917 			 * FIFO size is just 2K.
1918 			 */
1919 			JME_LOCK(sc);
1920 			if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1921 				ifp->if_capenable &=
1922 				    ~(IFCAP_TXCSUM | IFCAP_TSO4);
1923 				ifp->if_hwassist &=
1924 				    ~(JME_CSUM_FEATURES | CSUM_TSO);
1925 				VLAN_CAPABILITIES(ifp);
1926 			}
1927 			ifp->if_mtu = ifr->ifr_mtu;
1928 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1929 				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1930 				jme_init_locked(sc);
1931 			}
1932 			JME_UNLOCK(sc);
1933 		}
1934 		break;
1935 	case SIOCSIFFLAGS:
1936 		JME_LOCK(sc);
1937 		if ((ifp->if_flags & IFF_UP) != 0) {
1938 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1939 				if (((ifp->if_flags ^ sc->jme_if_flags)
1940 				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1941 					jme_set_filter(sc);
1942 			} else {
1943 				if ((sc->jme_flags & JME_FLAG_DETACH) == 0)
1944 					jme_init_locked(sc);
1945 			}
1946 		} else {
1947 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1948 				jme_stop(sc);
1949 		}
1950 		sc->jme_if_flags = ifp->if_flags;
1951 		JME_UNLOCK(sc);
1952 		break;
1953 	case SIOCADDMULTI:
1954 	case SIOCDELMULTI:
1955 		JME_LOCK(sc);
1956 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1957 			jme_set_filter(sc);
1958 		JME_UNLOCK(sc);
1959 		break;
1960 	case SIOCSIFMEDIA:
1961 	case SIOCGIFMEDIA:
1962 		mii = device_get_softc(sc->jme_miibus);
1963 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1964 		break;
1965 	case SIOCSIFCAP:
1966 		JME_LOCK(sc);
1967 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1968 		if ((mask & IFCAP_TXCSUM) != 0 &&
1969 		    ifp->if_mtu < JME_TX_FIFO_SIZE) {
1970 			if ((IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
1971 				ifp->if_capenable ^= IFCAP_TXCSUM;
1972 				if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
1973 					ifp->if_hwassist |= JME_CSUM_FEATURES;
1974 				else
1975 					ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1976 			}
1977 		}
1978 		if ((mask & IFCAP_RXCSUM) != 0 &&
1979 		    (IFCAP_RXCSUM & ifp->if_capabilities) != 0) {
1980 			ifp->if_capenable ^= IFCAP_RXCSUM;
1981 			reg = CSR_READ_4(sc, JME_RXMAC);
1982 			reg &= ~RXMAC_CSUM_ENB;
1983 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1984 				reg |= RXMAC_CSUM_ENB;
1985 			CSR_WRITE_4(sc, JME_RXMAC, reg);
1986 		}
1987 		if ((mask & IFCAP_TSO4) != 0 &&
1988 		    ifp->if_mtu < JME_TX_FIFO_SIZE) {
1989 			if ((IFCAP_TSO4 & ifp->if_capabilities) != 0) {
1990 				ifp->if_capenable ^= IFCAP_TSO4;
1991 				if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
1992 					ifp->if_hwassist |= CSUM_TSO;
1993 				else
1994 					ifp->if_hwassist &= ~CSUM_TSO;
1995 			}
1996 		}
1997 		if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1998 		    (IFCAP_WOL_MAGIC & ifp->if_capabilities) != 0)
1999 			ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2000 		if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
2001 		    (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
2002 			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2003 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2004 		    (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
2005 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2006 			jme_set_vlan(sc);
2007 		}
2008 		JME_UNLOCK(sc);
2009 		VLAN_CAPABILITIES(ifp);
2010 		break;
2011 	default:
2012 		error = ether_ioctl(ifp, cmd, data);
2013 		break;
2014 	}
2015 
2016 	return (error);
2017 }
2018 
2019 static void
2020 jme_mac_config(struct jme_softc *sc)
2021 {
2022 	struct mii_data *mii;
2023 	uint32_t ghc, gpreg, rxmac, txmac, txpause;
2024 	uint32_t txclk;
2025 
2026 	JME_LOCK_ASSERT(sc);
2027 
2028 	mii = device_get_softc(sc->jme_miibus);
2029 
2030 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2031 	DELAY(10);
2032 	CSR_WRITE_4(sc, JME_GHC, 0);
2033 	ghc = 0;
2034 	txclk = 0;
2035 	rxmac = CSR_READ_4(sc, JME_RXMAC);
2036 	rxmac &= ~RXMAC_FC_ENB;
2037 	txmac = CSR_READ_4(sc, JME_TXMAC);
2038 	txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
2039 	txpause = CSR_READ_4(sc, JME_TXPFC);
2040 	txpause &= ~TXPFC_PAUSE_ENB;
2041 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
2042 		ghc |= GHC_FULL_DUPLEX;
2043 		rxmac &= ~RXMAC_COLL_DET_ENB;
2044 		txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
2045 		    TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
2046 		    TXMAC_FRAME_BURST);
2047 #ifdef notyet
2048 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
2049 			txpause |= TXPFC_PAUSE_ENB;
2050 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
2051 			rxmac |= RXMAC_FC_ENB;
2052 #endif
2053 		/* Disable retry transmit timer/retry limit. */
2054 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
2055 		    ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
2056 	} else {
2057 		rxmac |= RXMAC_COLL_DET_ENB;
2058 		txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
2059 		/* Enable retry transmit timer/retry limit. */
2060 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
2061 		    TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
2062 	}
2063 		/* Reprogram Tx/Rx MACs with resolved speed/duplex. */
2064 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
2065 	case IFM_10_T:
2066 		ghc |= GHC_SPEED_10;
2067 		txclk |= GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100;
2068 		break;
2069 	case IFM_100_TX:
2070 		ghc |= GHC_SPEED_100;
2071 		txclk |= GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100;
2072 		break;
2073 	case IFM_1000_T:
2074 		if ((sc->jme_flags & JME_FLAG_FASTETH) != 0)
2075 			break;
2076 		ghc |= GHC_SPEED_1000;
2077 		txclk |= GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000;
2078 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
2079 			txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
2080 		break;
2081 	default:
2082 		break;
2083 	}
2084 	if (sc->jme_rev == DEVICEID_JMC250 &&
2085 	    sc->jme_chip_rev == DEVICEREVID_JMC250_A2) {
2086 		/*
2087 		 * Workaround occasional packet loss issue of JMC250 A2
2088 		 * when it runs on half-duplex media.
2089 		 */
2090 		gpreg = CSR_READ_4(sc, JME_GPREG1);
2091 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
2092 			gpreg &= ~GPREG1_HDPX_FIX;
2093 		else
2094 			gpreg |= GPREG1_HDPX_FIX;
2095 		CSR_WRITE_4(sc, JME_GPREG1, gpreg);
2096 		/* Workaround CRC errors at 100Mbps on JMC250 A2. */
2097 		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
2098 			/* Extend interface FIFO depth. */
2099 			jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2100 			    0x1B, 0x0000);
2101 		} else {
2102 			/* Select default interface FIFO depth. */
2103 			jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2104 			    0x1B, 0x0004);
2105 		}
2106 	}
2107 	if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
2108 		ghc |= txclk;
2109 	CSR_WRITE_4(sc, JME_GHC, ghc);
2110 	CSR_WRITE_4(sc, JME_RXMAC, rxmac);
2111 	CSR_WRITE_4(sc, JME_TXMAC, txmac);
2112 	CSR_WRITE_4(sc, JME_TXPFC, txpause);
2113 }
2114 
2115 static void
2116 jme_link_task(void *arg, int pending)
2117 {
2118 	struct jme_softc *sc;
2119 	struct mii_data *mii;
2120 	struct ifnet *ifp;
2121 	struct jme_txdesc *txd;
2122 	bus_addr_t paddr;
2123 	int i;
2124 
2125 	sc = (struct jme_softc *)arg;
2126 
2127 	JME_LOCK(sc);
2128 	mii = device_get_softc(sc->jme_miibus);
2129 	ifp = sc->jme_ifp;
2130 	if (mii == NULL || ifp == NULL ||
2131 	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2132 		JME_UNLOCK(sc);
2133 		return;
2134 	}
2135 
2136 	sc->jme_flags &= ~JME_FLAG_LINK;
2137 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
2138 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
2139 		case IFM_10_T:
2140 		case IFM_100_TX:
2141 			sc->jme_flags |= JME_FLAG_LINK;
2142 			break;
2143 		case IFM_1000_T:
2144 			if ((sc->jme_flags & JME_FLAG_FASTETH) != 0)
2145 				break;
2146 			sc->jme_flags |= JME_FLAG_LINK;
2147 			break;
2148 		default:
2149 			break;
2150 		}
2151 	}
2152 
2153 	/*
2154 	 * Disabling Rx/Tx MACs have a side-effect of resetting
2155 	 * JME_TXNDA/JME_RXNDA register to the first address of
2156 	 * Tx/Rx descriptor address. So driver should reset its
2157 	 * internal procucer/consumer pointer and reclaim any
2158 	 * allocated resources. Note, just saving the value of
2159 	 * JME_TXNDA and JME_RXNDA registers before stopping MAC
2160 	 * and restoring JME_TXNDA/JME_RXNDA register is not
2161 	 * sufficient to make sure correct MAC state because
2162 	 * stopping MAC operation can take a while and hardware
2163 	 * might have updated JME_TXNDA/JME_RXNDA registers
2164 	 * during the stop operation.
2165 	 */
2166 	/* Block execution of task. */
2167 	taskqueue_block(sc->jme_tq);
2168 	/* Disable interrupts and stop driver. */
2169 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2170 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2171 	callout_stop(&sc->jme_tick_ch);
2172 	sc->jme_watchdog_timer = 0;
2173 
2174 	/* Stop receiver/transmitter. */
2175 	jme_stop_rx(sc);
2176 	jme_stop_tx(sc);
2177 
2178 	/* XXX Drain all queued tasks. */
2179 	JME_UNLOCK(sc);
2180 	taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
2181 	taskqueue_drain(sc->jme_tq, &sc->jme_tx_task);
2182 	JME_LOCK(sc);
2183 
2184 	jme_rxintr(sc, JME_RX_RING_CNT);
2185 	if (sc->jme_cdata.jme_rxhead != NULL)
2186 		m_freem(sc->jme_cdata.jme_rxhead);
2187 	JME_RXCHAIN_RESET(sc);
2188 	jme_txeof(sc);
2189 	if (sc->jme_cdata.jme_tx_cnt != 0) {
2190 		/* Remove queued packets for transmit. */
2191 		for (i = 0; i < JME_TX_RING_CNT; i++) {
2192 			txd = &sc->jme_cdata.jme_txdesc[i];
2193 			if (txd->tx_m != NULL) {
2194 				bus_dmamap_sync(
2195 				    sc->jme_cdata.jme_tx_tag,
2196 				    txd->tx_dmamap,
2197 				    BUS_DMASYNC_POSTWRITE);
2198 				bus_dmamap_unload(
2199 				    sc->jme_cdata.jme_tx_tag,
2200 				    txd->tx_dmamap);
2201 				m_freem(txd->tx_m);
2202 				txd->tx_m = NULL;
2203 				txd->tx_ndesc = 0;
2204 				ifp->if_oerrors++;
2205 			}
2206 		}
2207 	}
2208 
2209 	/*
2210 	 * Reuse configured Rx descriptors and reset
2211 	 * procuder/consumer index.
2212 	 */
2213 	sc->jme_cdata.jme_rx_cons = 0;
2214 	atomic_set_int(&sc->jme_morework, 0);
2215 	jme_init_tx_ring(sc);
2216 	/* Initialize shadow status block. */
2217 	jme_init_ssb(sc);
2218 
2219 	/* Program MAC with resolved speed/duplex/flow-control. */
2220 	if ((sc->jme_flags & JME_FLAG_LINK) != 0) {
2221 		jme_mac_config(sc);
2222 		jme_stats_clear(sc);
2223 
2224 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
2225 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2226 
2227 		/* Set Tx ring address to the hardware. */
2228 		paddr = JME_TX_RING_ADDR(sc, 0);
2229 		CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2230 		CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2231 
2232 		/* Set Rx ring address to the hardware. */
2233 		paddr = JME_RX_RING_ADDR(sc, 0);
2234 		CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2235 		CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2236 
2237 		/* Restart receiver/transmitter. */
2238 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
2239 		    RXCSR_RXQ_START);
2240 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
2241 	}
2242 
2243 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2244 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2245 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2246 	/* Unblock execution of task. */
2247 	taskqueue_unblock(sc->jme_tq);
2248 	/* Reenable interrupts. */
2249 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2250 
2251 	JME_UNLOCK(sc);
2252 }
2253 
2254 static int
2255 jme_intr(void *arg)
2256 {
2257 	struct jme_softc *sc;
2258 	uint32_t status;
2259 
2260 	sc = (struct jme_softc *)arg;
2261 
2262 	status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
2263 	if (status == 0 || status == 0xFFFFFFFF)
2264 		return (FILTER_STRAY);
2265 	/* Disable interrupts. */
2266 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2267 	taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task);
2268 
2269 	return (FILTER_HANDLED);
2270 }
2271 
2272 static void
2273 jme_int_task(void *arg, int pending)
2274 {
2275 	struct jme_softc *sc;
2276 	struct ifnet *ifp;
2277 	uint32_t status;
2278 	int more;
2279 
2280 	sc = (struct jme_softc *)arg;
2281 	ifp = sc->jme_ifp;
2282 
2283 	status = CSR_READ_4(sc, JME_INTR_STATUS);
2284 	more = atomic_readandclear_int(&sc->jme_morework);
2285 	if (more != 0) {
2286 		status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO;
2287 		more = 0;
2288 	}
2289 	if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
2290 		goto done;
2291 	/* Reset PCC counter/timer and Ack interrupts. */
2292 	status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
2293 	if ((status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) != 0)
2294 		status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
2295 	if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0)
2296 		status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP;
2297 	CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2298 	more = 0;
2299 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2300 		if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0) {
2301 			more = jme_rxintr(sc, sc->jme_process_limit);
2302 			if (more != 0)
2303 				atomic_set_int(&sc->jme_morework, 1);
2304 		}
2305 		if ((status & INTR_RXQ_DESC_EMPTY) != 0) {
2306 			/*
2307 			 * Notify hardware availability of new Rx
2308 			 * buffers.
2309 			 * Reading RXCSR takes very long time under
2310 			 * heavy load so cache RXCSR value and writes
2311 			 * the ORed value with the kick command to
2312 			 * the RXCSR. This saves one register access
2313 			 * cycle.
2314 			 */
2315 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2316 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
2317 		}
2318 		/*
2319 		 * Reclaiming Tx buffers are deferred to make jme(4) run
2320 		 * without locks held.
2321 		 */
2322 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2323 			taskqueue_enqueue(sc->jme_tq, &sc->jme_tx_task);
2324 	}
2325 
2326 	if (more != 0 || (CSR_READ_4(sc, JME_INTR_STATUS) & JME_INTRS) != 0) {
2327 		taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task);
2328 		return;
2329 	}
2330 done:
2331 	/* Reenable interrupts. */
2332 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2333 }
2334 
2335 static void
2336 jme_txeof(struct jme_softc *sc)
2337 {
2338 	struct ifnet *ifp;
2339 	struct jme_txdesc *txd;
2340 	uint32_t status;
2341 	int cons, nsegs;
2342 
2343 	JME_LOCK_ASSERT(sc);
2344 
2345 	ifp = sc->jme_ifp;
2346 
2347 	cons = sc->jme_cdata.jme_tx_cons;
2348 	if (cons == sc->jme_cdata.jme_tx_prod)
2349 		return;
2350 
2351 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2352 	    sc->jme_cdata.jme_tx_ring_map,
2353 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2354 
2355 	/*
2356 	 * Go through our Tx list and free mbufs for those
2357 	 * frames which have been transmitted.
2358 	 */
2359 	for (; cons != sc->jme_cdata.jme_tx_prod;) {
2360 		txd = &sc->jme_cdata.jme_txdesc[cons];
2361 		status = le32toh(txd->tx_desc->flags);
2362 		if ((status & JME_TD_OWN) == JME_TD_OWN)
2363 			break;
2364 
2365 		if ((status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) != 0)
2366 			ifp->if_oerrors++;
2367 		else {
2368 			ifp->if_opackets++;
2369 			if ((status & JME_TD_COLLISION) != 0)
2370 				ifp->if_collisions +=
2371 				    le32toh(txd->tx_desc->buflen) &
2372 				    JME_TD_BUF_LEN_MASK;
2373 		}
2374 		/*
2375 		 * Only the first descriptor of multi-descriptor
2376 		 * transmission is updated so driver have to skip entire
2377 		 * chained buffers for the transmiited frame. In other
2378 		 * words, JME_TD_OWN bit is valid only at the first
2379 		 * descriptor of a multi-descriptor transmission.
2380 		 */
2381 		for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2382 			sc->jme_rdata.jme_tx_ring[cons].flags = 0;
2383 			JME_DESC_INC(cons, JME_TX_RING_CNT);
2384 		}
2385 
2386 		/* Reclaim transferred mbufs. */
2387 		bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
2388 		    BUS_DMASYNC_POSTWRITE);
2389 		bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2390 
2391 		KASSERT(txd->tx_m != NULL,
2392 		    ("%s: freeing NULL mbuf!\n", __func__));
2393 		m_freem(txd->tx_m);
2394 		txd->tx_m = NULL;
2395 		sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2396 		KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2397 		    ("%s: Active Tx desc counter was garbled\n", __func__));
2398 		txd->tx_ndesc = 0;
2399 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2400 	}
2401 	sc->jme_cdata.jme_tx_cons = cons;
2402 	/* Unarm watchog timer when there is no pending descriptors in queue. */
2403 	if (sc->jme_cdata.jme_tx_cnt == 0)
2404 		sc->jme_watchdog_timer = 0;
2405 
2406 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2407 	    sc->jme_cdata.jme_tx_ring_map,
2408 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2409 }
2410 
2411 static __inline void
2412 jme_discard_rxbuf(struct jme_softc *sc, int cons)
2413 {
2414 	struct jme_desc *desc;
2415 
2416 	desc = &sc->jme_rdata.jme_rx_ring[cons];
2417 	desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2418 	desc->buflen = htole32(MCLBYTES);
2419 }
2420 
2421 /* Receive a frame. */
2422 static void
2423 jme_rxeof(struct jme_softc *sc)
2424 {
2425 	struct ifnet *ifp;
2426 	struct jme_desc *desc;
2427 	struct jme_rxdesc *rxd;
2428 	struct mbuf *mp, *m;
2429 	uint32_t flags, status;
2430 	int cons, count, nsegs;
2431 
2432 	ifp = sc->jme_ifp;
2433 
2434 	cons = sc->jme_cdata.jme_rx_cons;
2435 	desc = &sc->jme_rdata.jme_rx_ring[cons];
2436 	flags = le32toh(desc->flags);
2437 	status = le32toh(desc->buflen);
2438 	nsegs = JME_RX_NSEGS(status);
2439 	sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2440 	if ((status & JME_RX_ERR_STAT) != 0) {
2441 		ifp->if_ierrors++;
2442 		jme_discard_rxbuf(sc, sc->jme_cdata.jme_rx_cons);
2443 #ifdef JME_SHOW_ERRORS
2444 		device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2445 		    __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2446 #endif
2447 		sc->jme_cdata.jme_rx_cons += nsegs;
2448 		sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
2449 		return;
2450 	}
2451 
2452 	for (count = 0; count < nsegs; count++,
2453 	    JME_DESC_INC(cons, JME_RX_RING_CNT)) {
2454 		rxd = &sc->jme_cdata.jme_rxdesc[cons];
2455 		mp = rxd->rx_m;
2456 		/* Add a new receive buffer to the ring. */
2457 		if (jme_newbuf(sc, rxd) != 0) {
2458 			ifp->if_iqdrops++;
2459 			/* Reuse buffer. */
2460 			for (; count < nsegs; count++) {
2461 				jme_discard_rxbuf(sc, cons);
2462 				JME_DESC_INC(cons, JME_RX_RING_CNT);
2463 			}
2464 			if (sc->jme_cdata.jme_rxhead != NULL) {
2465 				m_freem(sc->jme_cdata.jme_rxhead);
2466 				JME_RXCHAIN_RESET(sc);
2467 			}
2468 			break;
2469 		}
2470 
2471 		/*
2472 		 * Assume we've received a full sized frame.
2473 		 * Actual size is fixed when we encounter the end of
2474 		 * multi-segmented frame.
2475 		 */
2476 		mp->m_len = MCLBYTES;
2477 
2478 		/* Chain received mbufs. */
2479 		if (sc->jme_cdata.jme_rxhead == NULL) {
2480 			sc->jme_cdata.jme_rxhead = mp;
2481 			sc->jme_cdata.jme_rxtail = mp;
2482 		} else {
2483 			/*
2484 			 * Receive processor can receive a maximum frame
2485 			 * size of 65535 bytes.
2486 			 */
2487 			mp->m_flags &= ~M_PKTHDR;
2488 			sc->jme_cdata.jme_rxtail->m_next = mp;
2489 			sc->jme_cdata.jme_rxtail = mp;
2490 		}
2491 
2492 		if (count == nsegs - 1) {
2493 			/* Last desc. for this frame. */
2494 			m = sc->jme_cdata.jme_rxhead;
2495 			m->m_flags |= M_PKTHDR;
2496 			m->m_pkthdr.len = sc->jme_cdata.jme_rxlen;
2497 			if (nsegs > 1) {
2498 				/* Set first mbuf size. */
2499 				m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2500 				/* Set last mbuf size. */
2501 				mp->m_len = sc->jme_cdata.jme_rxlen -
2502 				    ((MCLBYTES - JME_RX_PAD_BYTES) +
2503 				    (MCLBYTES * (nsegs - 2)));
2504 			} else
2505 				m->m_len = sc->jme_cdata.jme_rxlen;
2506 			m->m_pkthdr.rcvif = ifp;
2507 
2508 			/*
2509 			 * Account for 10bytes auto padding which is used
2510 			 * to align IP header on 32bit boundary. Also note,
2511 			 * CRC bytes is automatically removed by the
2512 			 * hardware.
2513 			 */
2514 			m->m_data += JME_RX_PAD_BYTES;
2515 
2516 			/* Set checksum information. */
2517 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
2518 			    (flags & JME_RD_IPV4) != 0) {
2519 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2520 				if ((flags & JME_RD_IPCSUM) != 0)
2521 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2522 				if (((flags & JME_RD_MORE_FRAG) == 0) &&
2523 				    ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2524 				    (JME_RD_TCP | JME_RD_TCPCSUM) ||
2525 				    (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2526 				    (JME_RD_UDP | JME_RD_UDPCSUM))) {
2527 					m->m_pkthdr.csum_flags |=
2528 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2529 					m->m_pkthdr.csum_data = 0xffff;
2530 				}
2531 			}
2532 
2533 			/* Check for VLAN tagged packets. */
2534 			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
2535 			    (flags & JME_RD_VLAN_TAG) != 0) {
2536 				m->m_pkthdr.ether_vtag =
2537 				    flags & JME_RD_VLAN_MASK;
2538 				m->m_flags |= M_VLANTAG;
2539 			}
2540 
2541 			ifp->if_ipackets++;
2542 			/* Pass it on. */
2543 			(*ifp->if_input)(ifp, m);
2544 
2545 			/* Reset mbuf chains. */
2546 			JME_RXCHAIN_RESET(sc);
2547 		}
2548 	}
2549 
2550 	sc->jme_cdata.jme_rx_cons += nsegs;
2551 	sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
2552 }
2553 
2554 static int
2555 jme_rxintr(struct jme_softc *sc, int count)
2556 {
2557 	struct jme_desc *desc;
2558 	int nsegs, prog, pktlen;
2559 
2560 	bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2561 	    sc->jme_cdata.jme_rx_ring_map,
2562 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2563 
2564 	for (prog = 0; count > 0; prog++) {
2565 		desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons];
2566 		if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2567 			break;
2568 		if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2569 			break;
2570 		nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2571 		/*
2572 		 * Check number of segments against received bytes.
2573 		 * Non-matching value would indicate that hardware
2574 		 * is still trying to update Rx descriptors. I'm not
2575 		 * sure whether this check is needed.
2576 		 */
2577 		pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2578 		if (nsegs != ((pktlen + (MCLBYTES - 1)) / MCLBYTES))
2579 			break;
2580 		prog++;
2581 		/* Received a frame. */
2582 		jme_rxeof(sc);
2583 		count -= nsegs;
2584 	}
2585 
2586 	if (prog > 0)
2587 		bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2588 		    sc->jme_cdata.jme_rx_ring_map,
2589 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2590 
2591 	return (count > 0 ? 0 : EAGAIN);
2592 }
2593 
2594 static void
2595 jme_tick(void *arg)
2596 {
2597 	struct jme_softc *sc;
2598 	struct mii_data *mii;
2599 
2600 	sc = (struct jme_softc *)arg;
2601 
2602 	JME_LOCK_ASSERT(sc);
2603 
2604 	mii = device_get_softc(sc->jme_miibus);
2605 	mii_tick(mii);
2606 	/*
2607 	 * Reclaim Tx buffers that have been completed. It's not
2608 	 * needed here but it would release allocated mbuf chains
2609 	 * faster and limit the maximum delay to a hz.
2610 	 */
2611 	jme_txeof(sc);
2612 	jme_stats_update(sc);
2613 	jme_watchdog(sc);
2614 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2615 }
2616 
2617 static void
2618 jme_reset(struct jme_softc *sc)
2619 {
2620 
2621 	/* Stop receiver, transmitter. */
2622 	jme_stop_rx(sc);
2623 	jme_stop_tx(sc);
2624 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2625 	DELAY(10);
2626 	CSR_WRITE_4(sc, JME_GHC, 0);
2627 }
2628 
2629 static void
2630 jme_init(void *xsc)
2631 {
2632 	struct jme_softc *sc;
2633 
2634 	sc = (struct jme_softc *)xsc;
2635 	JME_LOCK(sc);
2636 	jme_init_locked(sc);
2637 	JME_UNLOCK(sc);
2638 }
2639 
2640 static void
2641 jme_init_locked(struct jme_softc *sc)
2642 {
2643 	struct ifnet *ifp;
2644 	struct mii_data *mii;
2645 	uint8_t eaddr[ETHER_ADDR_LEN];
2646 	bus_addr_t paddr;
2647 	uint32_t reg;
2648 	int error;
2649 
2650 	JME_LOCK_ASSERT(sc);
2651 
2652 	ifp = sc->jme_ifp;
2653 	mii = device_get_softc(sc->jme_miibus);
2654 
2655 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2656 		return;
2657 	/*
2658 	 * Cancel any pending I/O.
2659 	 */
2660 	jme_stop(sc);
2661 
2662 	/*
2663 	 * Reset the chip to a known state.
2664 	 */
2665 	jme_reset(sc);
2666 
2667 	/* Init descriptors. */
2668 	error = jme_init_rx_ring(sc);
2669         if (error != 0) {
2670                 device_printf(sc->jme_dev,
2671                     "%s: initialization failed: no memory for Rx buffers.\n",
2672 		    __func__);
2673                 jme_stop(sc);
2674 		return;
2675         }
2676 	jme_init_tx_ring(sc);
2677 	/* Initialize shadow status block. */
2678 	jme_init_ssb(sc);
2679 
2680 	/* Reprogram the station address. */
2681 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2682 	CSR_WRITE_4(sc, JME_PAR0,
2683 	    eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2684 	CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2685 
2686 	/*
2687 	 * Configure Tx queue.
2688 	 *  Tx priority queue weight value : 0
2689 	 *  Tx FIFO threshold for processing next packet : 16QW
2690 	 *  Maximum Tx DMA length : 512
2691 	 *  Allow Tx DMA burst.
2692 	 */
2693 	sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2694 	sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2695 	sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2696 	sc->jme_txcsr |= sc->jme_tx_dma_size;
2697 	sc->jme_txcsr |= TXCSR_DMA_BURST;
2698 	CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2699 
2700 	/* Set Tx descriptor counter. */
2701 	CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT);
2702 
2703 	/* Set Tx ring address to the hardware. */
2704 	paddr = JME_TX_RING_ADDR(sc, 0);
2705 	CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2706 	CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2707 
2708 	/* Configure TxMAC parameters. */
2709 	reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2710 	reg |= TXMAC_THRESH_1_PKT;
2711 	reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2712 	CSR_WRITE_4(sc, JME_TXMAC, reg);
2713 
2714 	/*
2715 	 * Configure Rx queue.
2716 	 *  FIFO full threshold for transmitting Tx pause packet : 128T
2717 	 *  FIFO threshold for processing next packet : 128QW
2718 	 *  Rx queue 0 select
2719 	 *  Max Rx DMA length : 128
2720 	 *  Rx descriptor retry : 32
2721 	 *  Rx descriptor retry time gap : 256ns
2722 	 *  Don't receive runt/bad frame.
2723 	 */
2724 	sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2725 	/*
2726 	 * Since Rx FIFO size is 4K bytes, receiving frames larger
2727 	 * than 4K bytes will suffer from Rx FIFO overruns. So
2728 	 * decrease FIFO threshold to reduce the FIFO overruns for
2729 	 * frames larger than 4000 bytes.
2730 	 * For best performance of standard MTU sized frames use
2731 	 * maximum allowable FIFO threshold, 128QW. Note these do
2732 	 * not hold on chip full mask verion >=2. For these
2733 	 * controllers 64QW and 128QW are not valid value.
2734 	 */
2735 	if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 2)
2736 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2737 	else {
2738 		if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
2739 		    ETHER_CRC_LEN) > JME_RX_FIFO_SIZE)
2740 			sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2741 		else
2742 			sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2743 	}
2744 	sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0);
2745 	sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2746 	sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2747 	CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
2748 
2749 	/* Set Rx descriptor counter. */
2750 	CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT);
2751 
2752 	/* Set Rx ring address to the hardware. */
2753 	paddr = JME_RX_RING_ADDR(sc, 0);
2754 	CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2755 	CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2756 
2757 	/* Clear receive filter. */
2758 	CSR_WRITE_4(sc, JME_RXMAC, 0);
2759 	/* Set up the receive filter. */
2760 	jme_set_filter(sc);
2761 	jme_set_vlan(sc);
2762 
2763 	/*
2764 	 * Disable all WOL bits as WOL can interfere normal Rx
2765 	 * operation. Also clear WOL detection status bits.
2766 	 */
2767 	reg = CSR_READ_4(sc, JME_PMCS);
2768 	reg &= ~PMCS_WOL_ENB_MASK;
2769 	CSR_WRITE_4(sc, JME_PMCS, reg);
2770 
2771 	reg = CSR_READ_4(sc, JME_RXMAC);
2772 	/*
2773 	 * Pad 10bytes right before received frame. This will greatly
2774 	 * help Rx performance on strict-alignment architectures as
2775 	 * it does not need to copy the frame to align the payload.
2776 	 */
2777 	reg |= RXMAC_PAD_10BYTES;
2778 	if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2779 		reg |= RXMAC_CSUM_ENB;
2780 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2781 
2782 	/* Configure general purpose reg0 */
2783 	reg = CSR_READ_4(sc, JME_GPREG0);
2784 	reg &= ~GPREG0_PCC_UNIT_MASK;
2785 	/* Set PCC timer resolution to micro-seconds unit. */
2786 	reg |= GPREG0_PCC_UNIT_US;
2787 	/*
2788 	 * Disable all shadow register posting as we have to read
2789 	 * JME_INTR_STATUS register in jme_int_task. Also it seems
2790 	 * that it's hard to synchronize interrupt status between
2791 	 * hardware and software with shadow posting due to
2792 	 * requirements of bus_dmamap_sync(9).
2793 	 */
2794 	reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2795 	    GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2796 	    GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2797 	    GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2798 	/* Disable posting of DW0. */
2799 	reg &= ~GPREG0_POST_DW0_ENB;
2800 	/* Clear PME message. */
2801 	reg &= ~GPREG0_PME_ENB;
2802 	/* Set PHY address. */
2803 	reg &= ~GPREG0_PHY_ADDR_MASK;
2804 	reg |= sc->jme_phyaddr;
2805 	CSR_WRITE_4(sc, JME_GPREG0, reg);
2806 
2807 	/* Configure Tx queue 0 packet completion coalescing. */
2808 	reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
2809 	    PCCTX_COAL_TO_MASK;
2810 	reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
2811 	    PCCTX_COAL_PKT_MASK;
2812 	reg |= PCCTX_COAL_TXQ0;
2813 	CSR_WRITE_4(sc, JME_PCCTX, reg);
2814 
2815 	/* Configure Rx queue 0 packet completion coalescing. */
2816 	reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
2817 	    PCCRX_COAL_TO_MASK;
2818 	reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
2819 	    PCCRX_COAL_PKT_MASK;
2820 	CSR_WRITE_4(sc, JME_PCCRX0, reg);
2821 
2822 	/* Configure shadow status block but don't enable posting. */
2823 	paddr = sc->jme_rdata.jme_ssb_block_paddr;
2824 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2825 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2826 
2827 	/* Disable Timer 1 and Timer 2. */
2828 	CSR_WRITE_4(sc, JME_TIMER1, 0);
2829 	CSR_WRITE_4(sc, JME_TIMER2, 0);
2830 
2831 	/* Configure retry transmit period, retry limit value. */
2832 	CSR_WRITE_4(sc, JME_TXTRHD,
2833 	    ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2834 	    TXTRHD_RT_PERIOD_MASK) |
2835 	    ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2836 	    TXTRHD_RT_LIMIT_SHIFT));
2837 
2838 	/* Disable RSS. */
2839 	CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
2840 
2841 	/* Initialize the interrupt mask. */
2842 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2843 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2844 
2845 	/*
2846 	 * Enabling Tx/Rx DMA engines and Rx queue processing is
2847 	 * done after detection of valid link in jme_link_task.
2848 	 */
2849 
2850 	sc->jme_flags &= ~JME_FLAG_LINK;
2851 	/* Set the current media. */
2852 	mii_mediachg(mii);
2853 
2854 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2855 
2856 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2857 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2858 }
2859 
2860 static void
2861 jme_stop(struct jme_softc *sc)
2862 {
2863 	struct ifnet *ifp;
2864 	struct jme_txdesc *txd;
2865 	struct jme_rxdesc *rxd;
2866 	int i;
2867 
2868 	JME_LOCK_ASSERT(sc);
2869 	/*
2870 	 * Mark the interface down and cancel the watchdog timer.
2871 	 */
2872 	ifp = sc->jme_ifp;
2873 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2874 	sc->jme_flags &= ~JME_FLAG_LINK;
2875 	callout_stop(&sc->jme_tick_ch);
2876 	sc->jme_watchdog_timer = 0;
2877 
2878 	/*
2879 	 * Disable interrupts.
2880 	 */
2881 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2882 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2883 
2884 	/* Disable updating shadow status block. */
2885 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2886 	    CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2887 
2888 	/* Stop receiver, transmitter. */
2889 	jme_stop_rx(sc);
2890 	jme_stop_tx(sc);
2891 
2892 	 /* Reclaim Rx/Tx buffers that have been completed. */
2893 	jme_rxintr(sc, JME_RX_RING_CNT);
2894 	if (sc->jme_cdata.jme_rxhead != NULL)
2895 		m_freem(sc->jme_cdata.jme_rxhead);
2896 	JME_RXCHAIN_RESET(sc);
2897 	jme_txeof(sc);
2898 	/*
2899 	 * Free RX and TX mbufs still in the queues.
2900 	 */
2901 	for (i = 0; i < JME_RX_RING_CNT; i++) {
2902 		rxd = &sc->jme_cdata.jme_rxdesc[i];
2903 		if (rxd->rx_m != NULL) {
2904 			bus_dmamap_sync(sc->jme_cdata.jme_rx_tag,
2905 			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2906 			bus_dmamap_unload(sc->jme_cdata.jme_rx_tag,
2907 			    rxd->rx_dmamap);
2908 			m_freem(rxd->rx_m);
2909 			rxd->rx_m = NULL;
2910 		}
2911         }
2912 	for (i = 0; i < JME_TX_RING_CNT; i++) {
2913 		txd = &sc->jme_cdata.jme_txdesc[i];
2914 		if (txd->tx_m != NULL) {
2915 			bus_dmamap_sync(sc->jme_cdata.jme_tx_tag,
2916 			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2917 			bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2918 			    txd->tx_dmamap);
2919 			m_freem(txd->tx_m);
2920 			txd->tx_m = NULL;
2921 			txd->tx_ndesc = 0;
2922 		}
2923         }
2924 	jme_stats_update(sc);
2925 	jme_stats_save(sc);
2926 }
2927 
2928 static void
2929 jme_stop_tx(struct jme_softc *sc)
2930 {
2931 	uint32_t reg;
2932 	int i;
2933 
2934 	reg = CSR_READ_4(sc, JME_TXCSR);
2935 	if ((reg & TXCSR_TX_ENB) == 0)
2936 		return;
2937 	reg &= ~TXCSR_TX_ENB;
2938 	CSR_WRITE_4(sc, JME_TXCSR, reg);
2939 	for (i = JME_TIMEOUT; i > 0; i--) {
2940 		DELAY(1);
2941 		if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2942 			break;
2943 	}
2944 	if (i == 0)
2945 		device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2946 }
2947 
2948 static void
2949 jme_stop_rx(struct jme_softc *sc)
2950 {
2951 	uint32_t reg;
2952 	int i;
2953 
2954 	reg = CSR_READ_4(sc, JME_RXCSR);
2955 	if ((reg & RXCSR_RX_ENB) == 0)
2956 		return;
2957 	reg &= ~RXCSR_RX_ENB;
2958 	CSR_WRITE_4(sc, JME_RXCSR, reg);
2959 	for (i = JME_TIMEOUT; i > 0; i--) {
2960 		DELAY(1);
2961 		if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2962 			break;
2963 	}
2964 	if (i == 0)
2965 		device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2966 }
2967 
2968 static void
2969 jme_init_tx_ring(struct jme_softc *sc)
2970 {
2971 	struct jme_ring_data *rd;
2972 	struct jme_txdesc *txd;
2973 	int i;
2974 
2975 	sc->jme_cdata.jme_tx_prod = 0;
2976 	sc->jme_cdata.jme_tx_cons = 0;
2977 	sc->jme_cdata.jme_tx_cnt = 0;
2978 
2979 	rd = &sc->jme_rdata;
2980 	bzero(rd->jme_tx_ring, JME_TX_RING_SIZE);
2981 	for (i = 0; i < JME_TX_RING_CNT; i++) {
2982 		txd = &sc->jme_cdata.jme_txdesc[i];
2983 		txd->tx_m = NULL;
2984 		txd->tx_desc = &rd->jme_tx_ring[i];
2985 		txd->tx_ndesc = 0;
2986 	}
2987 
2988 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2989 	    sc->jme_cdata.jme_tx_ring_map,
2990 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2991 }
2992 
2993 static void
2994 jme_init_ssb(struct jme_softc *sc)
2995 {
2996 	struct jme_ring_data *rd;
2997 
2998 	rd = &sc->jme_rdata;
2999 	bzero(rd->jme_ssb_block, JME_SSB_SIZE);
3000 	bus_dmamap_sync(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map,
3001 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3002 }
3003 
3004 static int
3005 jme_init_rx_ring(struct jme_softc *sc)
3006 {
3007 	struct jme_ring_data *rd;
3008 	struct jme_rxdesc *rxd;
3009 	int i;
3010 
3011 	sc->jme_cdata.jme_rx_cons = 0;
3012 	JME_RXCHAIN_RESET(sc);
3013 	atomic_set_int(&sc->jme_morework, 0);
3014 
3015 	rd = &sc->jme_rdata;
3016 	bzero(rd->jme_rx_ring, JME_RX_RING_SIZE);
3017 	for (i = 0; i < JME_RX_RING_CNT; i++) {
3018 		rxd = &sc->jme_cdata.jme_rxdesc[i];
3019 		rxd->rx_m = NULL;
3020 		rxd->rx_desc = &rd->jme_rx_ring[i];
3021 		if (jme_newbuf(sc, rxd) != 0)
3022 			return (ENOBUFS);
3023 	}
3024 
3025 	bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
3026 	    sc->jme_cdata.jme_rx_ring_map,
3027 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3028 
3029 	return (0);
3030 }
3031 
3032 static int
3033 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd)
3034 {
3035 	struct jme_desc *desc;
3036 	struct mbuf *m;
3037 	bus_dma_segment_t segs[1];
3038 	bus_dmamap_t map;
3039 	int nsegs;
3040 
3041 	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3042 	if (m == NULL)
3043 		return (ENOBUFS);
3044 	/*
3045 	 * JMC250 has 64bit boundary alignment limitation so jme(4)
3046 	 * takes advantage of 10 bytes padding feature of hardware
3047 	 * in order not to copy entire frame to align IP header on
3048 	 * 32bit boundary.
3049 	 */
3050 	m->m_len = m->m_pkthdr.len = MCLBYTES;
3051 
3052 	if (bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_rx_tag,
3053 	    sc->jme_cdata.jme_rx_sparemap, m, segs, &nsegs, 0) != 0) {
3054 		m_freem(m);
3055 		return (ENOBUFS);
3056 	}
3057 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
3058 
3059 	if (rxd->rx_m != NULL) {
3060 		bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
3061 		    BUS_DMASYNC_POSTREAD);
3062 		bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap);
3063 	}
3064 	map = rxd->rx_dmamap;
3065 	rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap;
3066 	sc->jme_cdata.jme_rx_sparemap = map;
3067 	bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
3068 	    BUS_DMASYNC_PREREAD);
3069 	rxd->rx_m = m;
3070 
3071 	desc = rxd->rx_desc;
3072 	desc->buflen = htole32(segs[0].ds_len);
3073 	desc->addr_lo = htole32(JME_ADDR_LO(segs[0].ds_addr));
3074 	desc->addr_hi = htole32(JME_ADDR_HI(segs[0].ds_addr));
3075 	desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
3076 
3077 	return (0);
3078 }
3079 
3080 static void
3081 jme_set_vlan(struct jme_softc *sc)
3082 {
3083 	struct ifnet *ifp;
3084 	uint32_t reg;
3085 
3086 	JME_LOCK_ASSERT(sc);
3087 
3088 	ifp = sc->jme_ifp;
3089 	reg = CSR_READ_4(sc, JME_RXMAC);
3090 	reg &= ~RXMAC_VLAN_ENB;
3091 	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3092 		reg |= RXMAC_VLAN_ENB;
3093 	CSR_WRITE_4(sc, JME_RXMAC, reg);
3094 }
3095 
3096 static void
3097 jme_set_filter(struct jme_softc *sc)
3098 {
3099 	struct ifnet *ifp;
3100 	struct ifmultiaddr *ifma;
3101 	uint32_t crc;
3102 	uint32_t mchash[2];
3103 	uint32_t rxcfg;
3104 
3105 	JME_LOCK_ASSERT(sc);
3106 
3107 	ifp = sc->jme_ifp;
3108 
3109 	rxcfg = CSR_READ_4(sc, JME_RXMAC);
3110 	rxcfg &= ~ (RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
3111 	    RXMAC_ALLMULTI);
3112 	/* Always accept frames destined to our station address. */
3113 	rxcfg |= RXMAC_UNICAST;
3114 	if ((ifp->if_flags & IFF_BROADCAST) != 0)
3115 		rxcfg |= RXMAC_BROADCAST;
3116 	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3117 		if ((ifp->if_flags & IFF_PROMISC) != 0)
3118 			rxcfg |= RXMAC_PROMISC;
3119 		if ((ifp->if_flags & IFF_ALLMULTI) != 0)
3120 			rxcfg |= RXMAC_ALLMULTI;
3121 		CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
3122 		CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
3123 		CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3124 		return;
3125 	}
3126 
3127 	/*
3128 	 * Set up the multicast address filter by passing all multicast
3129 	 * addresses through a CRC generator, and then using the low-order
3130 	 * 6 bits as an index into the 64 bit multicast hash table.  The
3131 	 * high order bits select the register, while the rest of the bits
3132 	 * select the bit within the register.
3133 	 */
3134 	rxcfg |= RXMAC_MULTICAST;
3135 	bzero(mchash, sizeof(mchash));
3136 
3137 	if_maddr_rlock(ifp);
3138 	TAILQ_FOREACH(ifma, &sc->jme_ifp->if_multiaddrs, ifma_link) {
3139 		if (ifma->ifma_addr->sa_family != AF_LINK)
3140 			continue;
3141 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
3142 		    ifma->ifma_addr), ETHER_ADDR_LEN);
3143 
3144 		/* Just want the 6 least significant bits. */
3145 		crc &= 0x3f;
3146 
3147 		/* Set the corresponding bit in the hash table. */
3148 		mchash[crc >> 5] |= 1 << (crc & 0x1f);
3149 	}
3150 	if_maddr_runlock(ifp);
3151 
3152 	CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
3153 	CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
3154 	CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3155 }
3156 
3157 static void
3158 jme_stats_clear(struct jme_softc *sc)
3159 {
3160 
3161 	JME_LOCK_ASSERT(sc);
3162 
3163 	if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
3164 		return;
3165 
3166 	/* Disable and clear counters. */
3167 	CSR_WRITE_4(sc, JME_STATCSR, 0xFFFFFFFF);
3168 	/* Activate hw counters. */
3169 	CSR_WRITE_4(sc, JME_STATCSR, 0);
3170 	CSR_READ_4(sc, JME_STATCSR);
3171 	bzero(&sc->jme_stats, sizeof(struct jme_hw_stats));
3172 }
3173 
3174 static void
3175 jme_stats_save(struct jme_softc *sc)
3176 {
3177 
3178 	JME_LOCK_ASSERT(sc);
3179 
3180 	if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
3181 		return;
3182 	/* Save current counters. */
3183 	bcopy(&sc->jme_stats, &sc->jme_ostats, sizeof(struct jme_hw_stats));
3184 	/* Disable and clear counters. */
3185 	CSR_WRITE_4(sc, JME_STATCSR, 0xFFFFFFFF);
3186 }
3187 
3188 static void
3189 jme_stats_update(struct jme_softc *sc)
3190 {
3191 	struct jme_hw_stats *stat, *ostat;
3192 	uint32_t reg;
3193 
3194 	JME_LOCK_ASSERT(sc);
3195 
3196 	if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
3197 		return;
3198 	stat = &sc->jme_stats;
3199 	ostat = &sc->jme_ostats;
3200 	stat->tx_good_frames = CSR_READ_4(sc, JME_STAT_TXGOOD);
3201 	stat->rx_good_frames = CSR_READ_4(sc, JME_STAT_RXGOOD);
3202 	reg = CSR_READ_4(sc, JME_STAT_CRCMII);
3203 	stat->rx_crc_errs = (reg & STAT_RX_CRC_ERR_MASK) >>
3204 	    STAT_RX_CRC_ERR_SHIFT;
3205 	stat->rx_mii_errs = (reg & STAT_RX_MII_ERR_MASK) >>
3206 	    STAT_RX_MII_ERR_SHIFT;
3207 	reg = CSR_READ_4(sc, JME_STAT_RXERR);
3208 	stat->rx_fifo_oflows = (reg & STAT_RXERR_OFLOW_MASK) >>
3209 	    STAT_RXERR_OFLOW_SHIFT;
3210 	stat->rx_desc_empty = (reg & STAT_RXERR_MPTY_MASK) >>
3211 	    STAT_RXERR_MPTY_SHIFT;
3212 	reg = CSR_READ_4(sc, JME_STAT_FAIL);
3213 	stat->rx_bad_frames = (reg & STAT_FAIL_RX_MASK) >> STAT_FAIL_RX_SHIFT;
3214 	stat->tx_bad_frames = (reg & STAT_FAIL_TX_MASK) >> STAT_FAIL_TX_SHIFT;
3215 
3216 	/* Account for previous counters. */
3217 	stat->rx_good_frames += ostat->rx_good_frames;
3218 	stat->rx_crc_errs += ostat->rx_crc_errs;
3219 	stat->rx_mii_errs += ostat->rx_mii_errs;
3220 	stat->rx_fifo_oflows += ostat->rx_fifo_oflows;
3221 	stat->rx_desc_empty += ostat->rx_desc_empty;
3222 	stat->rx_bad_frames += ostat->rx_bad_frames;
3223 	stat->tx_good_frames += ostat->tx_good_frames;
3224 	stat->tx_bad_frames += ostat->tx_bad_frames;
3225 }
3226 
3227 static int
3228 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3229 {
3230 	int error, value;
3231 
3232 	if (arg1 == NULL)
3233 		return (EINVAL);
3234 	value = *(int *)arg1;
3235 	error = sysctl_handle_int(oidp, &value, 0, req);
3236 	if (error || req->newptr == NULL)
3237 		return (error);
3238 	if (value < low || value > high)
3239 		return (EINVAL);
3240         *(int *)arg1 = value;
3241 
3242         return (0);
3243 }
3244 
3245 static int
3246 sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS)
3247 {
3248 	return (sysctl_int_range(oidp, arg1, arg2, req,
3249 	    PCCTX_COAL_TO_MIN, PCCTX_COAL_TO_MAX));
3250 }
3251 
3252 static int
3253 sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
3254 {
3255 	return (sysctl_int_range(oidp, arg1, arg2, req,
3256 	    PCCTX_COAL_PKT_MIN, PCCTX_COAL_PKT_MAX));
3257 }
3258 
3259 static int
3260 sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS)
3261 {
3262 	return (sysctl_int_range(oidp, arg1, arg2, req,
3263 	    PCCRX_COAL_TO_MIN, PCCRX_COAL_TO_MAX));
3264 }
3265 
3266 static int
3267 sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
3268 {
3269 	return (sysctl_int_range(oidp, arg1, arg2, req,
3270 	    PCCRX_COAL_PKT_MIN, PCCRX_COAL_PKT_MAX));
3271 }
3272 
3273 static int
3274 sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS)
3275 {
3276 	return (sysctl_int_range(oidp, arg1, arg2, req,
3277 	    JME_PROC_MIN, JME_PROC_MAX));
3278 }
3279