xref: /freebsd/sys/dev/jme/if_jme.c (revision 3fdef8e855027d5c9bec06e2a53e8f99f7d5694b)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bus.h>
33 #include <sys/endian.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/mbuf.h>
37 #include <sys/rman.h>
38 #include <sys/module.h>
39 #include <sys/proc.h>
40 #include <sys/queue.h>
41 #include <sys/socket.h>
42 #include <sys/sockio.h>
43 #include <sys/sysctl.h>
44 #include <sys/taskqueue.h>
45 
46 #include <net/bpf.h>
47 #include <net/if.h>
48 #include <net/if_var.h>
49 #include <net/if_arp.h>
50 #include <net/ethernet.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/if_types.h>
54 #include <net/if_vlan_var.h>
55 
56 #include <netinet/in.h>
57 #include <netinet/in_systm.h>
58 #include <netinet/ip.h>
59 #include <netinet/tcp.h>
60 
61 #include <dev/mii/mii.h>
62 #include <dev/mii/miivar.h>
63 
64 #include <dev/pci/pcireg.h>
65 #include <dev/pci/pcivar.h>
66 
67 #include <machine/bus.h>
68 #include <machine/in_cksum.h>
69 
70 #include <dev/jme/if_jmereg.h>
71 #include <dev/jme/if_jmevar.h>
72 
73 /* "device miibus" required.  See GENERIC if you get errors here. */
74 #include "miibus_if.h"
75 
76 /* Define the following to disable printing Rx errors. */
77 #undef	JME_SHOW_ERRORS
78 
79 #define	JME_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
80 
81 MODULE_DEPEND(jme, pci, 1, 1, 1);
82 MODULE_DEPEND(jme, ether, 1, 1, 1);
83 MODULE_DEPEND(jme, miibus, 1, 1, 1);
84 
85 /* Tunables. */
86 static int msi_disable = 0;
87 static int msix_disable = 0;
88 TUNABLE_INT("hw.jme.msi_disable", &msi_disable);
89 TUNABLE_INT("hw.jme.msix_disable", &msix_disable);
90 
91 /*
92  * Devices supported by this driver.
93  */
94 static struct jme_dev {
95 	uint16_t	jme_vendorid;
96 	uint16_t	jme_deviceid;
97 	const char	*jme_name;
98 } jme_devs[] = {
99 	{ VENDORID_JMICRON, DEVICEID_JMC250,
100 	    "JMicron Inc, JMC25x Gigabit Ethernet" },
101 	{ VENDORID_JMICRON, DEVICEID_JMC260,
102 	    "JMicron Inc, JMC26x Fast Ethernet" },
103 };
104 
105 static int jme_miibus_readreg(device_t, int, int);
106 static int jme_miibus_writereg(device_t, int, int, int);
107 static void jme_miibus_statchg(device_t);
108 static void jme_mediastatus(if_t, struct ifmediareq *);
109 static int jme_mediachange(if_t);
110 static int jme_probe(device_t);
111 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
112 static int jme_eeprom_macaddr(struct jme_softc *);
113 static int jme_efuse_macaddr(struct jme_softc *);
114 static void jme_reg_macaddr(struct jme_softc *);
115 static void jme_set_macaddr(struct jme_softc *, uint8_t *);
116 static void jme_map_intr_vector(struct jme_softc *);
117 static int jme_attach(device_t);
118 static int jme_detach(device_t);
119 static void jme_sysctl_node(struct jme_softc *);
120 static void jme_dmamap_cb(void *, bus_dma_segment_t *, int, int);
121 static int jme_dma_alloc(struct jme_softc *);
122 static void jme_dma_free(struct jme_softc *);
123 static int jme_shutdown(device_t);
124 static void jme_setlinkspeed(struct jme_softc *);
125 static void jme_setwol(struct jme_softc *);
126 static int jme_suspend(device_t);
127 static int jme_resume(device_t);
128 static int jme_encap(struct jme_softc *, struct mbuf **);
129 static void jme_start(if_t);
130 static void jme_start_locked(if_t);
131 static void jme_watchdog(struct jme_softc *);
132 static int jme_ioctl(if_t, u_long, caddr_t);
133 static void jme_mac_config(struct jme_softc *);
134 static void jme_link_task(void *, int);
135 static int jme_intr(void *);
136 static void jme_int_task(void *, int);
137 static void jme_txeof(struct jme_softc *);
138 static __inline void jme_discard_rxbuf(struct jme_softc *, int);
139 static void jme_rxeof(struct jme_softc *);
140 static int jme_rxintr(struct jme_softc *, int);
141 static void jme_tick(void *);
142 static void jme_reset(struct jme_softc *);
143 static void jme_init(void *);
144 static void jme_init_locked(struct jme_softc *);
145 static void jme_stop(struct jme_softc *);
146 static void jme_stop_tx(struct jme_softc *);
147 static void jme_stop_rx(struct jme_softc *);
148 static int jme_init_rx_ring(struct jme_softc *);
149 static void jme_init_tx_ring(struct jme_softc *);
150 static void jme_init_ssb(struct jme_softc *);
151 static int jme_newbuf(struct jme_softc *, struct jme_rxdesc *);
152 static void jme_set_vlan(struct jme_softc *);
153 static void jme_set_filter(struct jme_softc *);
154 static void jme_stats_clear(struct jme_softc *);
155 static void jme_stats_save(struct jme_softc *);
156 static void jme_stats_update(struct jme_softc *);
157 static void jme_phy_down(struct jme_softc *);
158 static void jme_phy_up(struct jme_softc *);
159 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
160 static int sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS);
161 static int sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
162 static int sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS);
163 static int sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
164 static int sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS);
165 
166 
167 static device_method_t jme_methods[] = {
168 	/* Device interface. */
169 	DEVMETHOD(device_probe,		jme_probe),
170 	DEVMETHOD(device_attach,	jme_attach),
171 	DEVMETHOD(device_detach,	jme_detach),
172 	DEVMETHOD(device_shutdown,	jme_shutdown),
173 	DEVMETHOD(device_suspend,	jme_suspend),
174 	DEVMETHOD(device_resume,	jme_resume),
175 
176 	/* MII interface. */
177 	DEVMETHOD(miibus_readreg,	jme_miibus_readreg),
178 	DEVMETHOD(miibus_writereg,	jme_miibus_writereg),
179 	DEVMETHOD(miibus_statchg,	jme_miibus_statchg),
180 
181 	{ NULL, NULL }
182 };
183 
184 static driver_t jme_driver = {
185 	"jme",
186 	jme_methods,
187 	sizeof(struct jme_softc)
188 };
189 
190 DRIVER_MODULE(jme, pci, jme_driver, 0, 0);
191 DRIVER_MODULE(miibus, jme, miibus_driver, 0, 0);
192 
193 static struct resource_spec jme_res_spec_mem[] = {
194 	{ SYS_RES_MEMORY,	PCIR_BAR(0),	RF_ACTIVE },
195 	{ -1,			0,		0 }
196 };
197 
198 static struct resource_spec jme_irq_spec_legacy[] = {
199 	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
200 	{ -1,			0,		0 }
201 };
202 
203 static struct resource_spec jme_irq_spec_msi[] = {
204 	{ SYS_RES_IRQ,		1,		RF_ACTIVE },
205 	{ -1,			0,		0 }
206 };
207 
208 /*
209  *	Read a PHY register on the MII of the JMC250.
210  */
211 static int
jme_miibus_readreg(device_t dev,int phy,int reg)212 jme_miibus_readreg(device_t dev, int phy, int reg)
213 {
214 	struct jme_softc *sc;
215 	uint32_t val;
216 	int i;
217 
218 	sc = device_get_softc(dev);
219 
220 	/* For FPGA version, PHY address 0 should be ignored. */
221 	if ((sc->jme_flags & JME_FLAG_FPGA) != 0 && phy == 0)
222 		return (0);
223 
224 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
225 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
226 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
227 		DELAY(1);
228 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
229 			break;
230 	}
231 
232 	if (i == 0) {
233 		device_printf(sc->jme_dev, "phy read timeout : %d\n", reg);
234 		return (0);
235 	}
236 
237 	return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
238 }
239 
240 /*
241  *	Write a PHY register on the MII of the JMC250.
242  */
243 static int
jme_miibus_writereg(device_t dev,int phy,int reg,int val)244 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
245 {
246 	struct jme_softc *sc;
247 	int i;
248 
249 	sc = device_get_softc(dev);
250 
251 	/* For FPGA version, PHY address 0 should be ignored. */
252 	if ((sc->jme_flags & JME_FLAG_FPGA) != 0 && phy == 0)
253 		return (0);
254 
255 	CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
256 	    ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
257 	    SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
258 	for (i = JME_PHY_TIMEOUT; i > 0; i--) {
259 		DELAY(1);
260 		if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
261 			break;
262 	}
263 
264 	if (i == 0)
265 		device_printf(sc->jme_dev, "phy write timeout : %d\n", reg);
266 
267 	return (0);
268 }
269 
270 /*
271  *	Callback from MII layer when media changes.
272  */
273 static void
jme_miibus_statchg(device_t dev)274 jme_miibus_statchg(device_t dev)
275 {
276 	struct jme_softc *sc;
277 
278 	sc = device_get_softc(dev);
279 	taskqueue_enqueue(taskqueue_swi, &sc->jme_link_task);
280 }
281 
282 /*
283  *	Get the current interface media status.
284  */
285 static void
jme_mediastatus(if_t ifp,struct ifmediareq * ifmr)286 jme_mediastatus(if_t ifp, struct ifmediareq *ifmr)
287 {
288 	struct jme_softc *sc;
289 	struct mii_data *mii;
290 
291 	sc = if_getsoftc(ifp);
292 	JME_LOCK(sc);
293 	if ((if_getflags(ifp) & IFF_UP) == 0) {
294 		JME_UNLOCK(sc);
295 		return;
296 	}
297 	mii = device_get_softc(sc->jme_miibus);
298 
299 	mii_pollstat(mii);
300 	ifmr->ifm_status = mii->mii_media_status;
301 	ifmr->ifm_active = mii->mii_media_active;
302 	JME_UNLOCK(sc);
303 }
304 
305 /*
306  *	Set hardware to newly-selected media.
307  */
308 static int
jme_mediachange(if_t ifp)309 jme_mediachange(if_t ifp)
310 {
311 	struct jme_softc *sc;
312 	struct mii_data *mii;
313 	struct mii_softc *miisc;
314 	int error;
315 
316 	sc = if_getsoftc(ifp);
317 	JME_LOCK(sc);
318 	mii = device_get_softc(sc->jme_miibus);
319 	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
320 		PHY_RESET(miisc);
321 	error = mii_mediachg(mii);
322 	JME_UNLOCK(sc);
323 
324 	return (error);
325 }
326 
327 static int
jme_probe(device_t dev)328 jme_probe(device_t dev)
329 {
330 	struct jme_dev *sp;
331 	int i;
332 	uint16_t vendor, devid;
333 
334 	vendor = pci_get_vendor(dev);
335 	devid = pci_get_device(dev);
336 	sp = jme_devs;
337 	for (i = 0; i < nitems(jme_devs); i++, sp++) {
338 		if (vendor == sp->jme_vendorid &&
339 		    devid == sp->jme_deviceid) {
340 			device_set_desc(dev, sp->jme_name);
341 			return (BUS_PROBE_DEFAULT);
342 		}
343 	}
344 
345 	return (ENXIO);
346 }
347 
348 static int
jme_eeprom_read_byte(struct jme_softc * sc,uint8_t addr,uint8_t * val)349 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
350 {
351 	uint32_t reg;
352 	int i;
353 
354 	*val = 0;
355 	for (i = JME_TIMEOUT; i > 0; i--) {
356 		reg = CSR_READ_4(sc, JME_SMBCSR);
357 		if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
358 			break;
359 		DELAY(1);
360 	}
361 
362 	if (i == 0) {
363 		device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
364 		return (ETIMEDOUT);
365 	}
366 
367 	reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
368 	CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
369 	for (i = JME_TIMEOUT; i > 0; i--) {
370 		DELAY(1);
371 		reg = CSR_READ_4(sc, JME_SMBINTF);
372 		if ((reg & SMBINTF_CMD_TRIGGER) == 0)
373 			break;
374 	}
375 
376 	if (i == 0) {
377 		device_printf(sc->jme_dev, "EEPROM read timeout!\n");
378 		return (ETIMEDOUT);
379 	}
380 
381 	reg = CSR_READ_4(sc, JME_SMBINTF);
382 	*val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
383 
384 	return (0);
385 }
386 
387 static int
jme_eeprom_macaddr(struct jme_softc * sc)388 jme_eeprom_macaddr(struct jme_softc *sc)
389 {
390 	uint8_t eaddr[ETHER_ADDR_LEN];
391 	uint8_t fup, reg, val;
392 	uint32_t offset;
393 	int match;
394 
395 	offset = 0;
396 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
397 	    fup != JME_EEPROM_SIG0)
398 		return (ENOENT);
399 	if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
400 	    fup != JME_EEPROM_SIG1)
401 		return (ENOENT);
402 	match = 0;
403 	do {
404 		if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
405 			break;
406 		if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
407 		    (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
408 			if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
409 				break;
410 			if (reg >= JME_PAR0 &&
411 			    reg < JME_PAR0 + ETHER_ADDR_LEN) {
412 				if (jme_eeprom_read_byte(sc, offset + 2,
413 				    &val) != 0)
414 					break;
415 				eaddr[reg - JME_PAR0] = val;
416 				match++;
417 			}
418 		}
419 		/* Check for the end of EEPROM descriptor. */
420 		if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
421 			break;
422 		/* Try next eeprom descriptor. */
423 		offset += JME_EEPROM_DESC_BYTES;
424 	} while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
425 
426 	if (match == ETHER_ADDR_LEN) {
427 		bcopy(eaddr, sc->jme_eaddr, ETHER_ADDR_LEN);
428 		return (0);
429 	}
430 
431 	return (ENOENT);
432 }
433 
434 static int
jme_efuse_macaddr(struct jme_softc * sc)435 jme_efuse_macaddr(struct jme_softc *sc)
436 {
437 	uint32_t reg;
438 	int i;
439 
440 	reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4);
441 	if ((reg & (EFUSE_CTL1_AUTOLOAD_ERR | EFUSE_CTL1_AUTOLAOD_DONE)) !=
442 	    EFUSE_CTL1_AUTOLAOD_DONE)
443 		return (ENOENT);
444 	/* Reset eFuse controller. */
445 	reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL2, 4);
446 	reg |= EFUSE_CTL2_RESET;
447 	pci_write_config(sc->jme_dev, JME_EFUSE_CTL2, reg, 4);
448 	reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL2, 4);
449 	reg &= ~EFUSE_CTL2_RESET;
450 	pci_write_config(sc->jme_dev, JME_EFUSE_CTL2, reg, 4);
451 
452 	/* Have eFuse reload station address to MAC controller. */
453 	reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4);
454 	reg &= ~EFUSE_CTL1_CMD_MASK;
455 	reg |= EFUSE_CTL1_CMD_AUTOLOAD | EFUSE_CTL1_EXECUTE;
456 	pci_write_config(sc->jme_dev, JME_EFUSE_CTL1, reg, 4);
457 
458 	/*
459 	 * Verify completion of eFuse autload command.  It should be
460 	 * completed within 108us.
461 	 */
462 	DELAY(110);
463 	for (i = 10; i > 0; i--) {
464 		reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4);
465 		if ((reg & (EFUSE_CTL1_AUTOLOAD_ERR |
466 		    EFUSE_CTL1_AUTOLAOD_DONE)) != EFUSE_CTL1_AUTOLAOD_DONE) {
467 			DELAY(20);
468 			continue;
469 		}
470 		if ((reg & EFUSE_CTL1_EXECUTE) == 0)
471 			break;
472 		/* Station address loading is still in progress. */
473 		DELAY(20);
474 	}
475 	if (i == 0) {
476 		device_printf(sc->jme_dev, "eFuse autoload timed out.\n");
477 		return (ETIMEDOUT);
478 	}
479 
480 	return (0);
481 }
482 
483 static void
jme_reg_macaddr(struct jme_softc * sc)484 jme_reg_macaddr(struct jme_softc *sc)
485 {
486 	uint32_t par0, par1;
487 
488 	/* Read station address. */
489 	par0 = CSR_READ_4(sc, JME_PAR0);
490 	par1 = CSR_READ_4(sc, JME_PAR1);
491 	par1 &= 0xFFFF;
492 	if ((par0 == 0 && par1 == 0) ||
493 	    (par0 == 0xFFFFFFFF && par1 == 0xFFFF)) {
494 		device_printf(sc->jme_dev,
495 		    "Failed to retrieve Ethernet address.\n");
496 	} else {
497 		/*
498 		 * For controllers that use eFuse, the station address
499 		 * could also be extracted from JME_PCI_PAR0 and
500 		 * JME_PCI_PAR1 registers in PCI configuration space.
501 		 * Each register holds exactly half of station address(24bits)
502 		 * so use JME_PAR0, JME_PAR1 registers instead.
503 		 */
504 		sc->jme_eaddr[0] = (par0 >> 0) & 0xFF;
505 		sc->jme_eaddr[1] = (par0 >> 8) & 0xFF;
506 		sc->jme_eaddr[2] = (par0 >> 16) & 0xFF;
507 		sc->jme_eaddr[3] = (par0 >> 24) & 0xFF;
508 		sc->jme_eaddr[4] = (par1 >> 0) & 0xFF;
509 		sc->jme_eaddr[5] = (par1 >> 8) & 0xFF;
510 	}
511 }
512 
513 static void
jme_set_macaddr(struct jme_softc * sc,uint8_t * eaddr)514 jme_set_macaddr(struct jme_softc *sc, uint8_t *eaddr)
515 {
516 	uint32_t val;
517 	int i;
518 
519 	if ((sc->jme_flags & JME_FLAG_EFUSE) != 0) {
520 		/*
521 		 * Avoid reprogramming station address if the address
522 		 * is the same as previous one.  Note, reprogrammed
523 		 * station address is permanent as if it was written
524 		 * to EEPROM. So if station address was changed by
525 		 * admistrator it's possible to lose factory configured
526 		 * address when driver fails to restore its address.
527 		 * (e.g. reboot or system crash)
528 		 */
529 		if (bcmp(eaddr, sc->jme_eaddr, ETHER_ADDR_LEN) != 0) {
530 			for (i = 0; i < ETHER_ADDR_LEN; i++) {
531 				val = JME_EFUSE_EEPROM_FUNC0 <<
532 				    JME_EFUSE_EEPROM_FUNC_SHIFT;
533 				val |= JME_EFUSE_EEPROM_PAGE_BAR1 <<
534 				    JME_EFUSE_EEPROM_PAGE_SHIFT;
535 				val |= (JME_PAR0 + i) <<
536 				    JME_EFUSE_EEPROM_ADDR_SHIFT;
537 				val |= eaddr[i] << JME_EFUSE_EEPROM_DATA_SHIFT;
538 				pci_write_config(sc->jme_dev, JME_EFUSE_EEPROM,
539 				    val | JME_EFUSE_EEPROM_WRITE, 4);
540 			}
541 		}
542 	} else {
543 		CSR_WRITE_4(sc, JME_PAR0,
544 		    eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
545 		CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
546 	}
547 }
548 
549 static void
jme_map_intr_vector(struct jme_softc * sc)550 jme_map_intr_vector(struct jme_softc *sc)
551 {
552 	uint32_t map[MSINUM_NUM_INTR_SOURCE / JME_MSI_MESSAGES];
553 
554 	bzero(map, sizeof(map));
555 
556 	/* Map Tx interrupts source to MSI/MSIX vector 2. */
557 	map[MSINUM_REG_INDEX(N_INTR_TXQ0_COMP)] =
558 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ0_COMP);
559 	map[MSINUM_REG_INDEX(N_INTR_TXQ1_COMP)] |=
560 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ1_COMP);
561 	map[MSINUM_REG_INDEX(N_INTR_TXQ2_COMP)] |=
562 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ2_COMP);
563 	map[MSINUM_REG_INDEX(N_INTR_TXQ3_COMP)] |=
564 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ3_COMP);
565 	map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
566 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ4_COMP);
567 	map[MSINUM_REG_INDEX(N_INTR_TXQ5_COMP)] |=
568 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ5_COMP);
569 	map[MSINUM_REG_INDEX(N_INTR_TXQ6_COMP)] |=
570 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ6_COMP);
571 	map[MSINUM_REG_INDEX(N_INTR_TXQ7_COMP)] |=
572 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ7_COMP);
573 	map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL)] |=
574 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL);
575 	map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL_TO)] |=
576 	    MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL_TO);
577 
578 	/* Map Rx interrupts source to MSI/MSIX vector 1. */
579 	map[MSINUM_REG_INDEX(N_INTR_RXQ0_COMP)] =
580 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COMP);
581 	map[MSINUM_REG_INDEX(N_INTR_RXQ1_COMP)] =
582 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COMP);
583 	map[MSINUM_REG_INDEX(N_INTR_RXQ2_COMP)] =
584 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COMP);
585 	map[MSINUM_REG_INDEX(N_INTR_RXQ3_COMP)] =
586 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COMP);
587 	map[MSINUM_REG_INDEX(N_INTR_RXQ0_DESC_EMPTY)] =
588 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_DESC_EMPTY);
589 	map[MSINUM_REG_INDEX(N_INTR_RXQ1_DESC_EMPTY)] =
590 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_DESC_EMPTY);
591 	map[MSINUM_REG_INDEX(N_INTR_RXQ2_DESC_EMPTY)] =
592 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_DESC_EMPTY);
593 	map[MSINUM_REG_INDEX(N_INTR_RXQ3_DESC_EMPTY)] =
594 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_DESC_EMPTY);
595 	map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL)] =
596 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL);
597 	map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL)] =
598 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL);
599 	map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL)] =
600 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL);
601 	map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL)] =
602 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL);
603 	map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL_TO)] =
604 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL_TO);
605 	map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL_TO)] =
606 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL_TO);
607 	map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL_TO)] =
608 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL_TO);
609 	map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL_TO)] =
610 	    MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL_TO);
611 
612 	/* Map all other interrupts source to MSI/MSIX vector 0. */
613 	CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 0, map[0]);
614 	CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 1, map[1]);
615 	CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 2, map[2]);
616 	CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 3, map[3]);
617 }
618 
619 static int
jme_attach(device_t dev)620 jme_attach(device_t dev)
621 {
622 	struct jme_softc *sc;
623 	if_t ifp;
624 	struct mii_softc *miisc;
625 	struct mii_data *mii;
626 	uint32_t reg;
627 	uint16_t burst;
628 	int error, i, mii_flags, msic, msixc, pmc;
629 
630 	error = 0;
631 	sc = device_get_softc(dev);
632 	sc->jme_dev = dev;
633 
634 	mtx_init(&sc->jme_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
635 	    MTX_DEF);
636 	callout_init_mtx(&sc->jme_tick_ch, &sc->jme_mtx, 0);
637 	TASK_INIT(&sc->jme_int_task, 0, jme_int_task, sc);
638 	TASK_INIT(&sc->jme_link_task, 0, jme_link_task, sc);
639 
640 	/*
641 	 * Map the device. JMC250 supports both memory mapped and I/O
642 	 * register space access. Because I/O register access should
643 	 * use different BARs to access registers it's waste of time
644 	 * to use I/O register spce access. JMC250 uses 16K to map
645 	 * entire memory space.
646 	 */
647 	pci_enable_busmaster(dev);
648 	sc->jme_res_spec = jme_res_spec_mem;
649 	sc->jme_irq_spec = jme_irq_spec_legacy;
650 	error = bus_alloc_resources(dev, sc->jme_res_spec, sc->jme_res);
651 	if (error != 0) {
652 		device_printf(dev, "cannot allocate memory resources.\n");
653 		goto fail;
654 	}
655 
656 	/* Allocate IRQ resources. */
657 	msixc = pci_msix_count(dev);
658 	msic = pci_msi_count(dev);
659 	if (bootverbose) {
660 		device_printf(dev, "MSIX count : %d\n", msixc);
661 		device_printf(dev, "MSI count : %d\n", msic);
662 	}
663 
664 	/* Use 1 MSI/MSI-X. */
665 	if (msixc > 1)
666 		msixc = 1;
667 	if (msic > 1)
668 		msic = 1;
669 	/* Prefer MSIX over MSI. */
670 	if (msix_disable == 0 || msi_disable == 0) {
671 		if (msix_disable == 0 && msixc > 0 &&
672 		    pci_alloc_msix(dev, &msixc) == 0) {
673 			if (msixc == 1) {
674 				device_printf(dev, "Using %d MSIX messages.\n",
675 				    msixc);
676 				sc->jme_flags |= JME_FLAG_MSIX;
677 				sc->jme_irq_spec = jme_irq_spec_msi;
678 			} else
679 				pci_release_msi(dev);
680 		}
681 		if (msi_disable == 0 && (sc->jme_flags & JME_FLAG_MSIX) == 0 &&
682 		    msic > 0 && pci_alloc_msi(dev, &msic) == 0) {
683 			if (msic == 1) {
684 				device_printf(dev, "Using %d MSI messages.\n",
685 				    msic);
686 				sc->jme_flags |= JME_FLAG_MSI;
687 				sc->jme_irq_spec = jme_irq_spec_msi;
688 			} else
689 				pci_release_msi(dev);
690 		}
691 		/* Map interrupt vector 0, 1 and 2. */
692 		if ((sc->jme_flags & JME_FLAG_MSI) != 0 ||
693 		    (sc->jme_flags & JME_FLAG_MSIX) != 0)
694 			jme_map_intr_vector(sc);
695 	}
696 
697 	error = bus_alloc_resources(dev, sc->jme_irq_spec, sc->jme_irq);
698 	if (error != 0) {
699 		device_printf(dev, "cannot allocate IRQ resources.\n");
700 		goto fail;
701 	}
702 
703 	sc->jme_rev = pci_get_device(dev);
704 	if ((sc->jme_rev & DEVICEID_JMC2XX_MASK) == DEVICEID_JMC260) {
705 		sc->jme_flags |= JME_FLAG_FASTETH;
706 		sc->jme_flags |= JME_FLAG_NOJUMBO;
707 	}
708 	reg = CSR_READ_4(sc, JME_CHIPMODE);
709 	sc->jme_chip_rev = (reg & CHIPMODE_REV_MASK) >> CHIPMODE_REV_SHIFT;
710 	if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
711 	    CHIPMODE_NOT_FPGA)
712 		sc->jme_flags |= JME_FLAG_FPGA;
713 	if (bootverbose) {
714 		device_printf(dev, "PCI device revision : 0x%04x\n",
715 		    sc->jme_rev);
716 		device_printf(dev, "Chip revision : 0x%02x\n",
717 		    sc->jme_chip_rev);
718 		if ((sc->jme_flags & JME_FLAG_FPGA) != 0)
719 			device_printf(dev, "FPGA revision : 0x%04x\n",
720 			    (reg & CHIPMODE_FPGA_REV_MASK) >>
721 			    CHIPMODE_FPGA_REV_SHIFT);
722 	}
723 	if (sc->jme_chip_rev == 0xFF) {
724 		device_printf(dev, "Unknown chip revision : 0x%02x\n",
725 		    sc->jme_rev);
726 		error = ENXIO;
727 		goto fail;
728 	}
729 
730 	/* Identify controller features and bugs. */
731 	if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 2) {
732 		if ((sc->jme_rev & DEVICEID_JMC2XX_MASK) == DEVICEID_JMC260 &&
733 		    CHIPMODE_REVFM(sc->jme_chip_rev) == 2)
734 			sc->jme_flags |= JME_FLAG_DMA32BIT;
735 		if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5)
736 			sc->jme_flags |= JME_FLAG_EFUSE | JME_FLAG_PCCPCD;
737 		sc->jme_flags |= JME_FLAG_TXCLK | JME_FLAG_RXCLK;
738 		sc->jme_flags |= JME_FLAG_HWMIB;
739 	}
740 
741 	/* Reset the ethernet controller. */
742 	jme_reset(sc);
743 
744 	/* Get station address. */
745 	if ((sc->jme_flags & JME_FLAG_EFUSE) != 0) {
746 		error = jme_efuse_macaddr(sc);
747 		if (error == 0)
748 			jme_reg_macaddr(sc);
749 	} else {
750 		error = ENOENT;
751 		reg = CSR_READ_4(sc, JME_SMBCSR);
752 		if ((reg & SMBCSR_EEPROM_PRESENT) != 0)
753 			error = jme_eeprom_macaddr(sc);
754 		if (error != 0 && bootverbose)
755 			device_printf(sc->jme_dev,
756 			    "ethernet hardware address not found in EEPROM.\n");
757 		if (error != 0)
758 			jme_reg_macaddr(sc);
759 	}
760 
761 	/*
762 	 * Save PHY address.
763 	 * Integrated JR0211 has fixed PHY address whereas FPGA version
764 	 * requires PHY probing to get correct PHY address.
765 	 */
766 	if ((sc->jme_flags & JME_FLAG_FPGA) == 0) {
767 		sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
768 		    GPREG0_PHY_ADDR_MASK;
769 		if (bootverbose)
770 			device_printf(dev, "PHY is at address %d.\n",
771 			    sc->jme_phyaddr);
772 	} else
773 		sc->jme_phyaddr = 0;
774 
775 	/* Set max allowable DMA size. */
776 	if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
777 		sc->jme_flags |= JME_FLAG_PCIE;
778 		burst = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
779 		if (bootverbose) {
780 			device_printf(dev, "Read request size : %d bytes.\n",
781 			    128 << ((burst >> 12) & 0x07));
782 			device_printf(dev, "TLP payload size : %d bytes.\n",
783 			    128 << ((burst >> 5) & 0x07));
784 		}
785 		switch ((burst >> 12) & 0x07) {
786 		case 0:
787 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
788 			break;
789 		case 1:
790 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
791 			break;
792 		default:
793 			sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
794 			break;
795 		}
796 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
797 	} else {
798 		sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
799 		sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
800 	}
801 	/* Create coalescing sysctl node. */
802 	jme_sysctl_node(sc);
803 	if ((error = jme_dma_alloc(sc)) != 0)
804 		goto fail;
805 
806 	ifp = sc->jme_ifp = if_alloc(IFT_ETHER);
807 	if_setsoftc(ifp, sc);
808 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
809 	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
810 	if_setioctlfn(ifp, jme_ioctl);
811 	if_setstartfn(ifp, jme_start);
812 	if_setinitfn(ifp, jme_init);
813 	if_setsendqlen(ifp, JME_TX_RING_CNT - 1);
814 	if_setsendqready(ifp);
815 	/* JMC250 supports Tx/Rx checksum offload as well as TSO. */
816 	if_setcapabilities(ifp, IFCAP_HWCSUM | IFCAP_TSO4);
817 	if_sethwassist(ifp, JME_CSUM_FEATURES | CSUM_TSO);
818 	if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) {
819 		sc->jme_flags |= JME_FLAG_PMCAP;
820 		if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0);
821 	}
822 	if_setcapenable(ifp, if_getcapabilities(ifp));
823 
824 	/* Wakeup PHY. */
825 	jme_phy_up(sc);
826 	mii_flags = MIIF_DOPAUSE;
827 	/* Ask PHY calibration to PHY driver. */
828 	if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5)
829 		mii_flags |= MIIF_MACPRIV0;
830 	/* Set up MII bus. */
831 	error = mii_attach(dev, &sc->jme_miibus, ifp, jme_mediachange,
832 	    jme_mediastatus, BMSR_DEFCAPMASK,
833 	    sc->jme_flags & JME_FLAG_FPGA ? MII_PHY_ANY : sc->jme_phyaddr,
834 	    MII_OFFSET_ANY, mii_flags);
835 	if (error != 0) {
836 		device_printf(dev, "attaching PHYs failed\n");
837 		goto fail;
838 	}
839 
840 	/*
841 	 * Force PHY to FPGA mode.
842 	 */
843 	if ((sc->jme_flags & JME_FLAG_FPGA) != 0) {
844 		mii = device_get_softc(sc->jme_miibus);
845 		if (mii->mii_instance != 0) {
846 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
847 				if (miisc->mii_phy != 0) {
848 					sc->jme_phyaddr = miisc->mii_phy;
849 					break;
850 				}
851 			}
852 			if (sc->jme_phyaddr != 0) {
853 				device_printf(sc->jme_dev,
854 				    "FPGA PHY is at %d\n", sc->jme_phyaddr);
855 				/* vendor magic. */
856 				jme_miibus_writereg(dev, sc->jme_phyaddr, 27,
857 				    0x0004);
858 			}
859 		}
860 	}
861 
862 	ether_ifattach(ifp, sc->jme_eaddr);
863 
864 	/* VLAN capability setup */
865 	if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
866 	    IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO, 0);
867 	if_setcapenable(ifp, if_getcapabilities(ifp));
868 
869 	/* Tell the upper layer(s) we support long frames. */
870 	if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
871 
872 	/* Create local taskq. */
873 	sc->jme_tq = taskqueue_create_fast("jme_taskq", M_WAITOK,
874 	    taskqueue_thread_enqueue, &sc->jme_tq);
875 	taskqueue_start_threads(&sc->jme_tq, 1, PI_NET, "%s taskq",
876 	    device_get_nameunit(sc->jme_dev));
877 
878 	for (i = 0; i < 1; i++) {
879 		error = bus_setup_intr(dev, sc->jme_irq[i],
880 		    INTR_TYPE_NET | INTR_MPSAFE, jme_intr, NULL, sc,
881 		    &sc->jme_intrhand[i]);
882 		if (error != 0)
883 			break;
884 	}
885 
886 	if (error != 0) {
887 		device_printf(dev, "could not set up interrupt handler.\n");
888 		taskqueue_free(sc->jme_tq);
889 		sc->jme_tq = NULL;
890 		ether_ifdetach(ifp);
891 		goto fail;
892 	}
893 
894 fail:
895 	if (error != 0)
896 		jme_detach(dev);
897 
898 	return (error);
899 }
900 
901 static int
jme_detach(device_t dev)902 jme_detach(device_t dev)
903 {
904 	struct jme_softc *sc;
905 	if_t ifp;
906 	int i;
907 
908 	sc = device_get_softc(dev);
909 
910 	ifp = sc->jme_ifp;
911 	if (device_is_attached(dev)) {
912 		JME_LOCK(sc);
913 		sc->jme_flags |= JME_FLAG_DETACH;
914 		jme_stop(sc);
915 		JME_UNLOCK(sc);
916 		callout_drain(&sc->jme_tick_ch);
917 		taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
918 		taskqueue_drain(taskqueue_swi, &sc->jme_link_task);
919 		/* Restore possibly modified station address. */
920 		if ((sc->jme_flags & JME_FLAG_EFUSE) != 0)
921 			jme_set_macaddr(sc, sc->jme_eaddr);
922 		ether_ifdetach(ifp);
923 	}
924 
925 	if (sc->jme_tq != NULL) {
926 		taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
927 		taskqueue_free(sc->jme_tq);
928 		sc->jme_tq = NULL;
929 	}
930 
931 	if (sc->jme_miibus != NULL) {
932 		device_delete_child(dev, sc->jme_miibus);
933 		sc->jme_miibus = NULL;
934 	}
935 	bus_generic_detach(dev);
936 	jme_dma_free(sc);
937 
938 	if (ifp != NULL) {
939 		if_free(ifp);
940 		sc->jme_ifp = NULL;
941 	}
942 
943 	for (i = 0; i < 1; i++) {
944 		if (sc->jme_intrhand[i] != NULL) {
945 			bus_teardown_intr(dev, sc->jme_irq[i],
946 			    sc->jme_intrhand[i]);
947 			sc->jme_intrhand[i] = NULL;
948 		}
949 	}
950 
951 	if (sc->jme_irq[0] != NULL)
952 		bus_release_resources(dev, sc->jme_irq_spec, sc->jme_irq);
953 	if ((sc->jme_flags & (JME_FLAG_MSIX | JME_FLAG_MSI)) != 0)
954 		pci_release_msi(dev);
955 	if (sc->jme_res[0] != NULL)
956 		bus_release_resources(dev, sc->jme_res_spec, sc->jme_res);
957 	mtx_destroy(&sc->jme_mtx);
958 
959 	return (0);
960 }
961 
962 #define	JME_SYSCTL_STAT_ADD32(c, h, n, p, d)	\
963 	    SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
964 
965 static void
jme_sysctl_node(struct jme_softc * sc)966 jme_sysctl_node(struct jme_softc *sc)
967 {
968 	struct sysctl_ctx_list *ctx;
969 	struct sysctl_oid_list *child, *parent;
970 	struct sysctl_oid *tree;
971 	struct jme_hw_stats *stats;
972 	int error;
973 
974 	stats = &sc->jme_stats;
975 	ctx = device_get_sysctl_ctx(sc->jme_dev);
976 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev));
977 
978 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_coal_to",
979 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->jme_tx_coal_to,
980 	    0, sysctl_hw_jme_tx_coal_to, "I", "jme tx coalescing timeout");
981 
982 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_coal_pkt",
983 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->jme_tx_coal_pkt,
984 	    0, sysctl_hw_jme_tx_coal_pkt, "I", "jme tx coalescing packet");
985 
986 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_coal_to",
987 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->jme_rx_coal_to,
988 	    0, sysctl_hw_jme_rx_coal_to, "I", "jme rx coalescing timeout");
989 
990 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_coal_pkt",
991 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->jme_rx_coal_pkt,
992 	    0, sysctl_hw_jme_rx_coal_pkt, "I", "jme rx coalescing packet");
993 
994 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
995 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
996 	    &sc->jme_process_limit, 0, sysctl_hw_jme_proc_limit, "I",
997 	    "max number of Rx events to process");
998 
999 	/* Pull in device tunables. */
1000 	sc->jme_process_limit = JME_PROC_DEFAULT;
1001 	error = resource_int_value(device_get_name(sc->jme_dev),
1002 	    device_get_unit(sc->jme_dev), "process_limit",
1003 	    &sc->jme_process_limit);
1004 	if (error == 0) {
1005 		if (sc->jme_process_limit < JME_PROC_MIN ||
1006 		    sc->jme_process_limit > JME_PROC_MAX) {
1007 			device_printf(sc->jme_dev,
1008 			    "process_limit value out of range; "
1009 			    "using default: %d\n", JME_PROC_DEFAULT);
1010 			sc->jme_process_limit = JME_PROC_DEFAULT;
1011 		}
1012 	}
1013 
1014 	sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1015 	error = resource_int_value(device_get_name(sc->jme_dev),
1016 	    device_get_unit(sc->jme_dev), "tx_coal_to", &sc->jme_tx_coal_to);
1017 	if (error == 0) {
1018 		if (sc->jme_tx_coal_to < PCCTX_COAL_TO_MIN ||
1019 		    sc->jme_tx_coal_to > PCCTX_COAL_TO_MAX) {
1020 			device_printf(sc->jme_dev,
1021 			    "tx_coal_to value out of range; "
1022 			    "using default: %d\n", PCCTX_COAL_TO_DEFAULT);
1023 			sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1024 		}
1025 	}
1026 
1027 	sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1028 	error = resource_int_value(device_get_name(sc->jme_dev),
1029 	    device_get_unit(sc->jme_dev), "tx_coal_pkt", &sc->jme_tx_coal_to);
1030 	if (error == 0) {
1031 		if (sc->jme_tx_coal_pkt < PCCTX_COAL_PKT_MIN ||
1032 		    sc->jme_tx_coal_pkt > PCCTX_COAL_PKT_MAX) {
1033 			device_printf(sc->jme_dev,
1034 			    "tx_coal_pkt value out of range; "
1035 			    "using default: %d\n", PCCTX_COAL_PKT_DEFAULT);
1036 			sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1037 		}
1038 	}
1039 
1040 	sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1041 	error = resource_int_value(device_get_name(sc->jme_dev),
1042 	    device_get_unit(sc->jme_dev), "rx_coal_to", &sc->jme_rx_coal_to);
1043 	if (error == 0) {
1044 		if (sc->jme_rx_coal_to < PCCRX_COAL_TO_MIN ||
1045 		    sc->jme_rx_coal_to > PCCRX_COAL_TO_MAX) {
1046 			device_printf(sc->jme_dev,
1047 			    "rx_coal_to value out of range; "
1048 			    "using default: %d\n", PCCRX_COAL_TO_DEFAULT);
1049 			sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1050 		}
1051 	}
1052 
1053 	sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1054 	error = resource_int_value(device_get_name(sc->jme_dev),
1055 	    device_get_unit(sc->jme_dev), "rx_coal_pkt", &sc->jme_rx_coal_to);
1056 	if (error == 0) {
1057 		if (sc->jme_rx_coal_pkt < PCCRX_COAL_PKT_MIN ||
1058 		    sc->jme_rx_coal_pkt > PCCRX_COAL_PKT_MAX) {
1059 			device_printf(sc->jme_dev,
1060 			    "tx_coal_pkt value out of range; "
1061 			    "using default: %d\n", PCCRX_COAL_PKT_DEFAULT);
1062 			sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1063 		}
1064 	}
1065 
1066 	if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
1067 		return;
1068 
1069 	tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
1070 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "JME statistics");
1071 	parent = SYSCTL_CHILDREN(tree);
1072 
1073 	/* Rx statistics. */
1074 	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx",
1075 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Rx MAC statistics");
1076 	child = SYSCTL_CHILDREN(tree);
1077 	JME_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
1078 	    &stats->rx_good_frames, "Good frames");
1079 	JME_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
1080 	    &stats->rx_crc_errs, "CRC errors");
1081 	JME_SYSCTL_STAT_ADD32(ctx, child, "mii_errs",
1082 	    &stats->rx_mii_errs, "MII errors");
1083 	JME_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
1084 	    &stats->rx_fifo_oflows, "FIFO overflows");
1085 	JME_SYSCTL_STAT_ADD32(ctx, child, "desc_empty",
1086 	    &stats->rx_desc_empty, "Descriptor empty");
1087 	JME_SYSCTL_STAT_ADD32(ctx, child, "bad_frames",
1088 	    &stats->rx_bad_frames, "Bad frames");
1089 
1090 	/* Tx statistics. */
1091 	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx",
1092 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx MAC statistics");
1093 	child = SYSCTL_CHILDREN(tree);
1094 	JME_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
1095 	    &stats->tx_good_frames, "Good frames");
1096 	JME_SYSCTL_STAT_ADD32(ctx, child, "bad_frames",
1097 	    &stats->tx_bad_frames, "Bad frames");
1098 }
1099 
1100 #undef	JME_SYSCTL_STAT_ADD32
1101 
1102 struct jme_dmamap_arg {
1103 	bus_addr_t	jme_busaddr;
1104 };
1105 
1106 static void
jme_dmamap_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)1107 jme_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1108 {
1109 	struct jme_dmamap_arg *ctx;
1110 
1111 	if (error != 0)
1112 		return;
1113 
1114 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1115 
1116 	ctx = (struct jme_dmamap_arg *)arg;
1117 	ctx->jme_busaddr = segs[0].ds_addr;
1118 }
1119 
1120 static int
jme_dma_alloc(struct jme_softc * sc)1121 jme_dma_alloc(struct jme_softc *sc)
1122 {
1123 	struct jme_dmamap_arg ctx;
1124 	struct jme_txdesc *txd;
1125 	struct jme_rxdesc *rxd;
1126 	bus_addr_t lowaddr, rx_ring_end, tx_ring_end;
1127 	int error, i;
1128 
1129 	lowaddr = BUS_SPACE_MAXADDR;
1130 	if ((sc->jme_flags & JME_FLAG_DMA32BIT) != 0)
1131 		lowaddr = BUS_SPACE_MAXADDR_32BIT;
1132 
1133 again:
1134 	/* Create parent ring tag. */
1135 	error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */
1136 	    1, 0,			/* algnmnt, boundary */
1137 	    lowaddr,			/* lowaddr */
1138 	    BUS_SPACE_MAXADDR,		/* highaddr */
1139 	    NULL, NULL,			/* filter, filterarg */
1140 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1141 	    0,				/* nsegments */
1142 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1143 	    0,				/* flags */
1144 	    NULL, NULL,			/* lockfunc, lockarg */
1145 	    &sc->jme_cdata.jme_ring_tag);
1146 	if (error != 0) {
1147 		device_printf(sc->jme_dev,
1148 		    "could not create parent ring DMA tag.\n");
1149 		goto fail;
1150 	}
1151 	/* Create tag for Tx ring. */
1152 	error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1153 	    JME_TX_RING_ALIGN, 0,	/* algnmnt, boundary */
1154 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1155 	    BUS_SPACE_MAXADDR,		/* highaddr */
1156 	    NULL, NULL,			/* filter, filterarg */
1157 	    JME_TX_RING_SIZE,		/* maxsize */
1158 	    1,				/* nsegments */
1159 	    JME_TX_RING_SIZE,		/* maxsegsize */
1160 	    0,				/* flags */
1161 	    NULL, NULL,			/* lockfunc, lockarg */
1162 	    &sc->jme_cdata.jme_tx_ring_tag);
1163 	if (error != 0) {
1164 		device_printf(sc->jme_dev,
1165 		    "could not allocate Tx ring DMA tag.\n");
1166 		goto fail;
1167 	}
1168 
1169 	/* Create tag for Rx ring. */
1170 	error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1171 	    JME_RX_RING_ALIGN, 0,	/* algnmnt, boundary */
1172 	    lowaddr,			/* lowaddr */
1173 	    BUS_SPACE_MAXADDR,		/* highaddr */
1174 	    NULL, NULL,			/* filter, filterarg */
1175 	    JME_RX_RING_SIZE,		/* maxsize */
1176 	    1,				/* nsegments */
1177 	    JME_RX_RING_SIZE,		/* maxsegsize */
1178 	    0,				/* flags */
1179 	    NULL, NULL,			/* lockfunc, lockarg */
1180 	    &sc->jme_cdata.jme_rx_ring_tag);
1181 	if (error != 0) {
1182 		device_printf(sc->jme_dev,
1183 		    "could not allocate Rx ring DMA tag.\n");
1184 		goto fail;
1185 	}
1186 
1187 	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
1188 	error = bus_dmamem_alloc(sc->jme_cdata.jme_tx_ring_tag,
1189 	    (void **)&sc->jme_rdata.jme_tx_ring,
1190 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1191 	    &sc->jme_cdata.jme_tx_ring_map);
1192 	if (error != 0) {
1193 		device_printf(sc->jme_dev,
1194 		    "could not allocate DMA'able memory for Tx ring.\n");
1195 		goto fail;
1196 	}
1197 
1198 	ctx.jme_busaddr = 0;
1199 	error = bus_dmamap_load(sc->jme_cdata.jme_tx_ring_tag,
1200 	    sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring,
1201 	    JME_TX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1202 	if (error != 0 || ctx.jme_busaddr == 0) {
1203 		device_printf(sc->jme_dev,
1204 		    "could not load DMA'able memory for Tx ring.\n");
1205 		goto fail;
1206 	}
1207 	sc->jme_rdata.jme_tx_ring_paddr = ctx.jme_busaddr;
1208 
1209 	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
1210 	error = bus_dmamem_alloc(sc->jme_cdata.jme_rx_ring_tag,
1211 	    (void **)&sc->jme_rdata.jme_rx_ring,
1212 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1213 	    &sc->jme_cdata.jme_rx_ring_map);
1214 	if (error != 0) {
1215 		device_printf(sc->jme_dev,
1216 		    "could not allocate DMA'able memory for Rx ring.\n");
1217 		goto fail;
1218 	}
1219 
1220 	ctx.jme_busaddr = 0;
1221 	error = bus_dmamap_load(sc->jme_cdata.jme_rx_ring_tag,
1222 	    sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring,
1223 	    JME_RX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1224 	if (error != 0 || ctx.jme_busaddr == 0) {
1225 		device_printf(sc->jme_dev,
1226 		    "could not load DMA'able memory for Rx ring.\n");
1227 		goto fail;
1228 	}
1229 	sc->jme_rdata.jme_rx_ring_paddr = ctx.jme_busaddr;
1230 
1231 	if (lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1232 		/* Tx/Rx descriptor queue should reside within 4GB boundary. */
1233 		tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr +
1234 		    JME_TX_RING_SIZE;
1235 		rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr +
1236 		    JME_RX_RING_SIZE;
1237 		if ((JME_ADDR_HI(tx_ring_end) !=
1238 		    JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) ||
1239 		    (JME_ADDR_HI(rx_ring_end) !=
1240 		     JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) {
1241 			device_printf(sc->jme_dev, "4GB boundary crossed, "
1242 			    "switching to 32bit DMA address mode.\n");
1243 			jme_dma_free(sc);
1244 			/* Limit DMA address space to 32bit and try again. */
1245 			lowaddr = BUS_SPACE_MAXADDR_32BIT;
1246 			goto again;
1247 		}
1248 	}
1249 
1250 	lowaddr = BUS_SPACE_MAXADDR;
1251 	if ((sc->jme_flags & JME_FLAG_DMA32BIT) != 0)
1252 		lowaddr = BUS_SPACE_MAXADDR_32BIT;
1253 	/* Create parent buffer tag. */
1254 	error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */
1255 	    1, 0,			/* algnmnt, boundary */
1256 	    lowaddr,			/* lowaddr */
1257 	    BUS_SPACE_MAXADDR,		/* highaddr */
1258 	    NULL, NULL,			/* filter, filterarg */
1259 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1260 	    0,				/* nsegments */
1261 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1262 	    0,				/* flags */
1263 	    NULL, NULL,			/* lockfunc, lockarg */
1264 	    &sc->jme_cdata.jme_buffer_tag);
1265 	if (error != 0) {
1266 		device_printf(sc->jme_dev,
1267 		    "could not create parent buffer DMA tag.\n");
1268 		goto fail;
1269 	}
1270 
1271 	/* Create shadow status block tag. */
1272 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1273 	    JME_SSB_ALIGN, 0,		/* algnmnt, boundary */
1274 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1275 	    BUS_SPACE_MAXADDR,		/* highaddr */
1276 	    NULL, NULL,			/* filter, filterarg */
1277 	    JME_SSB_SIZE,		/* maxsize */
1278 	    1,				/* nsegments */
1279 	    JME_SSB_SIZE,		/* maxsegsize */
1280 	    0,				/* flags */
1281 	    NULL, NULL,			/* lockfunc, lockarg */
1282 	    &sc->jme_cdata.jme_ssb_tag);
1283 	if (error != 0) {
1284 		device_printf(sc->jme_dev,
1285 		    "could not create shared status block DMA tag.\n");
1286 		goto fail;
1287 	}
1288 
1289 	/* Create tag for Tx buffers. */
1290 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1291 	    1, 0,			/* algnmnt, boundary */
1292 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1293 	    BUS_SPACE_MAXADDR,		/* highaddr */
1294 	    NULL, NULL,			/* filter, filterarg */
1295 	    JME_TSO_MAXSIZE,		/* maxsize */
1296 	    JME_MAXTXSEGS,		/* nsegments */
1297 	    JME_TSO_MAXSEGSIZE,		/* maxsegsize */
1298 	    0,				/* flags */
1299 	    NULL, NULL,			/* lockfunc, lockarg */
1300 	    &sc->jme_cdata.jme_tx_tag);
1301 	if (error != 0) {
1302 		device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1303 		goto fail;
1304 	}
1305 
1306 	/* Create tag for Rx buffers. */
1307 	error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1308 	    JME_RX_BUF_ALIGN, 0,	/* algnmnt, boundary */
1309 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1310 	    BUS_SPACE_MAXADDR,		/* highaddr */
1311 	    NULL, NULL,			/* filter, filterarg */
1312 	    MCLBYTES,			/* maxsize */
1313 	    1,				/* nsegments */
1314 	    MCLBYTES,			/* maxsegsize */
1315 	    0,				/* flags */
1316 	    NULL, NULL,			/* lockfunc, lockarg */
1317 	    &sc->jme_cdata.jme_rx_tag);
1318 	if (error != 0) {
1319 		device_printf(sc->jme_dev, "could not create Rx DMA tag.\n");
1320 		goto fail;
1321 	}
1322 
1323 	/*
1324 	 * Allocate DMA'able memory and load the DMA map for shared
1325 	 * status block.
1326 	 */
1327 	error = bus_dmamem_alloc(sc->jme_cdata.jme_ssb_tag,
1328 	    (void **)&sc->jme_rdata.jme_ssb_block,
1329 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1330 	    &sc->jme_cdata.jme_ssb_map);
1331 	if (error != 0) {
1332 		device_printf(sc->jme_dev, "could not allocate DMA'able "
1333 		    "memory for shared status block.\n");
1334 		goto fail;
1335 	}
1336 
1337 	ctx.jme_busaddr = 0;
1338 	error = bus_dmamap_load(sc->jme_cdata.jme_ssb_tag,
1339 	    sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block,
1340 	    JME_SSB_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1341 	if (error != 0 || ctx.jme_busaddr == 0) {
1342 		device_printf(sc->jme_dev, "could not load DMA'able memory "
1343 		    "for shared status block.\n");
1344 		goto fail;
1345 	}
1346 	sc->jme_rdata.jme_ssb_block_paddr = ctx.jme_busaddr;
1347 
1348 	/* Create DMA maps for Tx buffers. */
1349 	for (i = 0; i < JME_TX_RING_CNT; i++) {
1350 		txd = &sc->jme_cdata.jme_txdesc[i];
1351 		txd->tx_m = NULL;
1352 		txd->tx_dmamap = NULL;
1353 		error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 0,
1354 		    &txd->tx_dmamap);
1355 		if (error != 0) {
1356 			device_printf(sc->jme_dev,
1357 			    "could not create Tx dmamap.\n");
1358 			goto fail;
1359 		}
1360 	}
1361 	/* Create DMA maps for Rx buffers. */
1362 	if ((error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1363 	    &sc->jme_cdata.jme_rx_sparemap)) != 0) {
1364 		device_printf(sc->jme_dev,
1365 		    "could not create spare Rx dmamap.\n");
1366 		goto fail;
1367 	}
1368 	for (i = 0; i < JME_RX_RING_CNT; i++) {
1369 		rxd = &sc->jme_cdata.jme_rxdesc[i];
1370 		rxd->rx_m = NULL;
1371 		rxd->rx_dmamap = NULL;
1372 		error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1373 		    &rxd->rx_dmamap);
1374 		if (error != 0) {
1375 			device_printf(sc->jme_dev,
1376 			    "could not create Rx dmamap.\n");
1377 			goto fail;
1378 		}
1379 	}
1380 
1381 fail:
1382 	return (error);
1383 }
1384 
1385 static void
jme_dma_free(struct jme_softc * sc)1386 jme_dma_free(struct jme_softc *sc)
1387 {
1388 	struct jme_txdesc *txd;
1389 	struct jme_rxdesc *rxd;
1390 	int i;
1391 
1392 	/* Tx ring */
1393 	if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1394 		if (sc->jme_rdata.jme_tx_ring_paddr)
1395 			bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1396 			    sc->jme_cdata.jme_tx_ring_map);
1397 		if (sc->jme_rdata.jme_tx_ring)
1398 			bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1399 			    sc->jme_rdata.jme_tx_ring,
1400 			    sc->jme_cdata.jme_tx_ring_map);
1401 		sc->jme_rdata.jme_tx_ring = NULL;
1402 		sc->jme_rdata.jme_tx_ring_paddr = 0;
1403 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1404 		sc->jme_cdata.jme_tx_ring_tag = NULL;
1405 	}
1406 	/* Rx ring */
1407 	if (sc->jme_cdata.jme_rx_ring_tag != NULL) {
1408 		if (sc->jme_rdata.jme_rx_ring_paddr)
1409 			bus_dmamap_unload(sc->jme_cdata.jme_rx_ring_tag,
1410 			    sc->jme_cdata.jme_rx_ring_map);
1411 		if (sc->jme_rdata.jme_rx_ring)
1412 			bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag,
1413 			    sc->jme_rdata.jme_rx_ring,
1414 			    sc->jme_cdata.jme_rx_ring_map);
1415 		sc->jme_rdata.jme_rx_ring = NULL;
1416 		sc->jme_rdata.jme_rx_ring_paddr = 0;
1417 		bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag);
1418 		sc->jme_cdata.jme_rx_ring_tag = NULL;
1419 	}
1420 	/* Tx buffers */
1421 	if (sc->jme_cdata.jme_tx_tag != NULL) {
1422 		for (i = 0; i < JME_TX_RING_CNT; i++) {
1423 			txd = &sc->jme_cdata.jme_txdesc[i];
1424 			if (txd->tx_dmamap != NULL) {
1425 				bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1426 				    txd->tx_dmamap);
1427 				txd->tx_dmamap = NULL;
1428 			}
1429 		}
1430 		bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1431 		sc->jme_cdata.jme_tx_tag = NULL;
1432 	}
1433 	/* Rx buffers */
1434 	if (sc->jme_cdata.jme_rx_tag != NULL) {
1435 		for (i = 0; i < JME_RX_RING_CNT; i++) {
1436 			rxd = &sc->jme_cdata.jme_rxdesc[i];
1437 			if (rxd->rx_dmamap != NULL) {
1438 				bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1439 				    rxd->rx_dmamap);
1440 				rxd->rx_dmamap = NULL;
1441 			}
1442 		}
1443 		if (sc->jme_cdata.jme_rx_sparemap != NULL) {
1444 			bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1445 			    sc->jme_cdata.jme_rx_sparemap);
1446 			sc->jme_cdata.jme_rx_sparemap = NULL;
1447 		}
1448 		bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag);
1449 		sc->jme_cdata.jme_rx_tag = NULL;
1450 	}
1451 
1452 	/* Shared status block. */
1453 	if (sc->jme_cdata.jme_ssb_tag != NULL) {
1454 		if (sc->jme_rdata.jme_ssb_block_paddr)
1455 			bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1456 			    sc->jme_cdata.jme_ssb_map);
1457 		if (sc->jme_rdata.jme_ssb_block)
1458 			bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1459 			    sc->jme_rdata.jme_ssb_block,
1460 			    sc->jme_cdata.jme_ssb_map);
1461 		sc->jme_rdata.jme_ssb_block = NULL;
1462 		sc->jme_rdata.jme_ssb_block_paddr = 0;
1463 		bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1464 		sc->jme_cdata.jme_ssb_tag = NULL;
1465 	}
1466 
1467 	if (sc->jme_cdata.jme_buffer_tag != NULL) {
1468 		bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1469 		sc->jme_cdata.jme_buffer_tag = NULL;
1470 	}
1471 	if (sc->jme_cdata.jme_ring_tag != NULL) {
1472 		bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1473 		sc->jme_cdata.jme_ring_tag = NULL;
1474 	}
1475 }
1476 
1477 /*
1478  *	Make sure the interface is stopped at reboot time.
1479  */
1480 static int
jme_shutdown(device_t dev)1481 jme_shutdown(device_t dev)
1482 {
1483 
1484 	return (jme_suspend(dev));
1485 }
1486 
1487 /*
1488  * Unlike other ethernet controllers, JMC250 requires
1489  * explicit resetting link speed to 10/100Mbps as gigabit
1490  * link will cunsume more power than 375mA.
1491  * Note, we reset the link speed to 10/100Mbps with
1492  * auto-negotiation but we don't know whether that operation
1493  * would succeed or not as we have no control after powering
1494  * off. If the renegotiation fail WOL may not work. Running
1495  * at 1Gbps draws more power than 375mA at 3.3V which is
1496  * specified in PCI specification and that would result in
1497  * complete shutdowning power to ethernet controller.
1498  *
1499  * TODO
1500  *  Save current negotiated media speed/duplex/flow-control
1501  *  to softc and restore the same link again after resuming.
1502  *  PHY handling such as power down/resetting to 100Mbps
1503  *  may be better handled in suspend method in phy driver.
1504  */
1505 static void
jme_setlinkspeed(struct jme_softc * sc)1506 jme_setlinkspeed(struct jme_softc *sc)
1507 {
1508 	struct mii_data *mii;
1509 	int aneg, i;
1510 
1511 	JME_LOCK_ASSERT(sc);
1512 
1513 	mii = device_get_softc(sc->jme_miibus);
1514 	mii_pollstat(mii);
1515 	aneg = 0;
1516 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
1517 		switch IFM_SUBTYPE(mii->mii_media_active) {
1518 		case IFM_10_T:
1519 		case IFM_100_TX:
1520 			return;
1521 		case IFM_1000_T:
1522 			aneg++;
1523 		default:
1524 			break;
1525 		}
1526 	}
1527 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1528 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1529 	    ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1530 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1531 	    BMCR_AUTOEN | BMCR_STARTNEG);
1532 	DELAY(1000);
1533 	if (aneg != 0) {
1534 		/* Poll link state until jme(4) get a 10/100 link. */
1535 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1536 			mii_pollstat(mii);
1537 			if ((mii->mii_media_status & IFM_AVALID) != 0) {
1538 				switch (IFM_SUBTYPE(mii->mii_media_active)) {
1539 				case IFM_10_T:
1540 				case IFM_100_TX:
1541 					jme_mac_config(sc);
1542 					return;
1543 				default:
1544 					break;
1545 				}
1546 			}
1547 			JME_UNLOCK(sc);
1548 			pause("jmelnk", hz);
1549 			JME_LOCK(sc);
1550 		}
1551 		if (i == MII_ANEGTICKS_GIGE)
1552 			device_printf(sc->jme_dev, "establishing link failed, "
1553 			    "WOL may not work!");
1554 	}
1555 	/*
1556 	 * No link, force MAC to have 100Mbps, full-duplex link.
1557 	 * This is the last resort and may/may not work.
1558 	 */
1559 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1560 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1561 	jme_mac_config(sc);
1562 }
1563 
1564 static void
jme_setwol(struct jme_softc * sc)1565 jme_setwol(struct jme_softc *sc)
1566 {
1567 	if_t ifp;
1568 	uint32_t gpr, pmcs;
1569 	uint16_t pmstat;
1570 	int pmc;
1571 
1572 	JME_LOCK_ASSERT(sc);
1573 
1574 	if (pci_find_cap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1575 		/* Remove Tx MAC/offload clock to save more power. */
1576 		if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
1577 			CSR_WRITE_4(sc, JME_GHC, CSR_READ_4(sc, JME_GHC) &
1578 			    ~(GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100 |
1579 			    GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000));
1580 		if ((sc->jme_flags & JME_FLAG_RXCLK) != 0)
1581 			CSR_WRITE_4(sc, JME_GPREG1,
1582 			    CSR_READ_4(sc, JME_GPREG1) | GPREG1_RX_MAC_CLK_DIS);
1583 		/* No PME capability, PHY power down. */
1584 		jme_phy_down(sc);
1585 		return;
1586 	}
1587 
1588 	ifp = sc->jme_ifp;
1589 	gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1590 	pmcs = CSR_READ_4(sc, JME_PMCS);
1591 	pmcs &= ~PMCS_WOL_ENB_MASK;
1592 	if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0) {
1593 		pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1594 		/* Enable PME message. */
1595 		gpr |= GPREG0_PME_ENB;
1596 		/* For gigabit controllers, reset link speed to 10/100. */
1597 		if ((sc->jme_flags & JME_FLAG_FASTETH) == 0)
1598 			jme_setlinkspeed(sc);
1599 	}
1600 
1601 	CSR_WRITE_4(sc, JME_PMCS, pmcs);
1602 	CSR_WRITE_4(sc, JME_GPREG0, gpr);
1603 	/* Remove Tx MAC/offload clock to save more power. */
1604 	if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
1605 		CSR_WRITE_4(sc, JME_GHC, CSR_READ_4(sc, JME_GHC) &
1606 		    ~(GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100 |
1607 		    GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000));
1608 	/* Request PME. */
1609 	pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1610 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1611 	if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
1612 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1613 	pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1614 	if ((if_getcapenable(ifp) & IFCAP_WOL) == 0) {
1615 		/* No WOL, PHY power down. */
1616 		jme_phy_down(sc);
1617 	}
1618 }
1619 
1620 static int
jme_suspend(device_t dev)1621 jme_suspend(device_t dev)
1622 {
1623 	struct jme_softc *sc;
1624 
1625 	sc = device_get_softc(dev);
1626 
1627 	JME_LOCK(sc);
1628 	jme_stop(sc);
1629 	jme_setwol(sc);
1630 	JME_UNLOCK(sc);
1631 
1632 	return (0);
1633 }
1634 
1635 static int
jme_resume(device_t dev)1636 jme_resume(device_t dev)
1637 {
1638 	struct jme_softc *sc;
1639 	if_t ifp;
1640 	uint16_t pmstat;
1641 	int pmc;
1642 
1643 	sc = device_get_softc(dev);
1644 
1645 	JME_LOCK(sc);
1646 	if (pci_find_cap(sc->jme_dev, PCIY_PMG, &pmc) == 0) {
1647 		pmstat = pci_read_config(sc->jme_dev,
1648 		    pmc + PCIR_POWER_STATUS, 2);
1649 		/* Disable PME clear PME status. */
1650 		pmstat &= ~PCIM_PSTAT_PMEENABLE;
1651 		pci_write_config(sc->jme_dev,
1652 		    pmc + PCIR_POWER_STATUS, pmstat, 2);
1653 	}
1654 	/* Wakeup PHY. */
1655 	jme_phy_up(sc);
1656 	ifp = sc->jme_ifp;
1657 	if ((if_getflags(ifp) & IFF_UP) != 0) {
1658 		if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1659 		jme_init_locked(sc);
1660 	}
1661 
1662 	JME_UNLOCK(sc);
1663 
1664 	return (0);
1665 }
1666 
1667 static int
jme_encap(struct jme_softc * sc,struct mbuf ** m_head)1668 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1669 {
1670 	struct jme_txdesc *txd;
1671 	struct jme_desc *desc;
1672 	struct mbuf *m;
1673 	bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1674 	int error, i, nsegs, prod;
1675 	uint32_t cflags, tsosegsz;
1676 
1677 	JME_LOCK_ASSERT(sc);
1678 
1679 	M_ASSERTPKTHDR((*m_head));
1680 
1681 	if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1682 		/*
1683 		 * Due to the adherence to NDIS specification JMC250
1684 		 * assumes upper stack computed TCP pseudo checksum
1685 		 * without including payload length. This breaks
1686 		 * checksum offload for TSO case so recompute TCP
1687 		 * pseudo checksum for JMC250. Hopefully this wouldn't
1688 		 * be much burden on modern CPUs.
1689 		 */
1690 		struct ether_header *eh;
1691 		struct ip *ip;
1692 		struct tcphdr *tcp;
1693 		uint32_t ip_off, poff;
1694 
1695 		if (M_WRITABLE(*m_head) == 0) {
1696 			/* Get a writable copy. */
1697 			m = m_dup(*m_head, M_NOWAIT);
1698 			m_freem(*m_head);
1699 			if (m == NULL) {
1700 				*m_head = NULL;
1701 				return (ENOBUFS);
1702 			}
1703 			*m_head = m;
1704 		}
1705 		ip_off = sizeof(struct ether_header);
1706 		m = m_pullup(*m_head, ip_off);
1707 		if (m == NULL) {
1708 			*m_head = NULL;
1709 			return (ENOBUFS);
1710 		}
1711 		eh = mtod(m, struct ether_header *);
1712 		/* Check the existence of VLAN tag. */
1713 		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1714 			ip_off = sizeof(struct ether_vlan_header);
1715 			m = m_pullup(m, ip_off);
1716 			if (m == NULL) {
1717 				*m_head = NULL;
1718 				return (ENOBUFS);
1719 			}
1720 		}
1721 		m = m_pullup(m, ip_off + sizeof(struct ip));
1722 		if (m == NULL) {
1723 			*m_head = NULL;
1724 			return (ENOBUFS);
1725 		}
1726 		ip = (struct ip *)(mtod(m, char *) + ip_off);
1727 		poff = ip_off + (ip->ip_hl << 2);
1728 		m = m_pullup(m, poff + sizeof(struct tcphdr));
1729 		if (m == NULL) {
1730 			*m_head = NULL;
1731 			return (ENOBUFS);
1732 		}
1733 		/*
1734 		 * Reset IP checksum and recompute TCP pseudo
1735 		 * checksum that NDIS specification requires.
1736 		 */
1737 		ip = (struct ip *)(mtod(m, char *) + ip_off);
1738 		tcp = (struct tcphdr *)(mtod(m, char *) + poff);
1739 		ip->ip_sum = 0;
1740 		if (poff + (tcp->th_off << 2) == m->m_pkthdr.len) {
1741 			tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1742 			    ip->ip_dst.s_addr,
1743 			    htons((tcp->th_off << 2) + IPPROTO_TCP));
1744 			/* No need to TSO, force IP checksum offload. */
1745 			(*m_head)->m_pkthdr.csum_flags &= ~CSUM_TSO;
1746 			(*m_head)->m_pkthdr.csum_flags |= CSUM_IP;
1747 		} else
1748 			tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1749 			    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1750 		*m_head = m;
1751 	}
1752 
1753 	prod = sc->jme_cdata.jme_tx_prod;
1754 	txd = &sc->jme_cdata.jme_txdesc[prod];
1755 
1756 	error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag,
1757 	    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1758 	if (error == EFBIG) {
1759 		m = m_collapse(*m_head, M_NOWAIT, JME_MAXTXSEGS);
1760 		if (m == NULL) {
1761 			m_freem(*m_head);
1762 			*m_head = NULL;
1763 			return (ENOMEM);
1764 		}
1765 		*m_head = m;
1766 		error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag,
1767 		    txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1768 		if (error != 0) {
1769 			m_freem(*m_head);
1770 			*m_head = NULL;
1771 			return (error);
1772 		}
1773 	} else if (error != 0)
1774 		return (error);
1775 	if (nsegs == 0) {
1776 		m_freem(*m_head);
1777 		*m_head = NULL;
1778 		return (EIO);
1779 	}
1780 
1781 	/*
1782 	 * Check descriptor overrun. Leave one free descriptor.
1783 	 * Since we always use 64bit address mode for transmitting,
1784 	 * each Tx request requires one more dummy descriptor.
1785 	 */
1786 	if (sc->jme_cdata.jme_tx_cnt + nsegs + 1 > JME_TX_RING_CNT - 1) {
1787 		bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1788 		return (ENOBUFS);
1789 	}
1790 
1791 	m = *m_head;
1792 	cflags = 0;
1793 	tsosegsz = 0;
1794 	/* Configure checksum offload and TSO. */
1795 	if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1796 		tsosegsz = (uint32_t)m->m_pkthdr.tso_segsz <<
1797 		    JME_TD_MSS_SHIFT;
1798 		cflags |= JME_TD_TSO;
1799 	} else {
1800 		if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
1801 			cflags |= JME_TD_IPCSUM;
1802 		if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
1803 			cflags |= JME_TD_TCPCSUM;
1804 		if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
1805 			cflags |= JME_TD_UDPCSUM;
1806 	}
1807 	/* Configure VLAN. */
1808 	if ((m->m_flags & M_VLANTAG) != 0) {
1809 		cflags |= (m->m_pkthdr.ether_vtag & JME_TD_VLAN_MASK);
1810 		cflags |= JME_TD_VLAN_TAG;
1811 	}
1812 
1813 	desc = &sc->jme_rdata.jme_tx_ring[prod];
1814 	desc->flags = htole32(cflags);
1815 	desc->buflen = htole32(tsosegsz);
1816 	desc->addr_hi = htole32(m->m_pkthdr.len);
1817 	desc->addr_lo = 0;
1818 	sc->jme_cdata.jme_tx_cnt++;
1819 	JME_DESC_INC(prod, JME_TX_RING_CNT);
1820 	for (i = 0; i < nsegs; i++) {
1821 		desc = &sc->jme_rdata.jme_tx_ring[prod];
1822 		desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT);
1823 		desc->buflen = htole32(txsegs[i].ds_len);
1824 		desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1825 		desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1826 		sc->jme_cdata.jme_tx_cnt++;
1827 		JME_DESC_INC(prod, JME_TX_RING_CNT);
1828 	}
1829 
1830 	/* Update producer index. */
1831 	sc->jme_cdata.jme_tx_prod = prod;
1832 	/*
1833 	 * Finally request interrupt and give the first descriptor
1834 	 * owenership to hardware.
1835 	 */
1836 	desc = txd->tx_desc;
1837 	desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1838 
1839 	txd->tx_m = m;
1840 	txd->tx_ndesc = nsegs + 1;
1841 
1842 	/* Sync descriptors. */
1843 	bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1844 	    BUS_DMASYNC_PREWRITE);
1845 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
1846 	    sc->jme_cdata.jme_tx_ring_map,
1847 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1848 
1849 	return (0);
1850 }
1851 
1852 static void
jme_start(if_t ifp)1853 jme_start(if_t ifp)
1854 {
1855         struct jme_softc *sc;
1856 
1857 	sc = if_getsoftc(ifp);
1858 	JME_LOCK(sc);
1859 	jme_start_locked(ifp);
1860 	JME_UNLOCK(sc);
1861 }
1862 
1863 static void
jme_start_locked(if_t ifp)1864 jme_start_locked(if_t ifp)
1865 {
1866         struct jme_softc *sc;
1867         struct mbuf *m_head;
1868 	int enq;
1869 
1870 	sc = if_getsoftc(ifp);
1871 
1872 	JME_LOCK_ASSERT(sc);
1873 
1874 	if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT)
1875 		jme_txeof(sc);
1876 
1877 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1878 	    IFF_DRV_RUNNING || (sc->jme_flags & JME_FLAG_LINK) == 0)
1879 		return;
1880 
1881 	for (enq = 0; !if_sendq_empty(ifp); ) {
1882 		m_head = if_dequeue(ifp);
1883 		if (m_head == NULL)
1884 			break;
1885 		/*
1886 		 * Pack the data into the transmit ring. If we
1887 		 * don't have room, set the OACTIVE flag and wait
1888 		 * for the NIC to drain the ring.
1889 		 */
1890 		if (jme_encap(sc, &m_head)) {
1891 			if (m_head == NULL)
1892 				break;
1893 			if_sendq_prepend(ifp, m_head);
1894 			if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1895 			break;
1896 		}
1897 
1898 		enq++;
1899 		/*
1900 		 * If there's a BPF listener, bounce a copy of this frame
1901 		 * to him.
1902 		 */
1903 		ETHER_BPF_MTAP(ifp, m_head);
1904 	}
1905 
1906 	if (enq > 0) {
1907 		/*
1908 		 * Reading TXCSR takes very long time under heavy load
1909 		 * so cache TXCSR value and writes the ORed value with
1910 		 * the kick command to the TXCSR. This saves one register
1911 		 * access cycle.
1912 		 */
1913 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1914 		    TXCSR_TXQ_N_START(TXCSR_TXQ0));
1915 		/* Set a timeout in case the chip goes out to lunch. */
1916 		sc->jme_watchdog_timer = JME_TX_TIMEOUT;
1917 	}
1918 }
1919 
1920 static void
jme_watchdog(struct jme_softc * sc)1921 jme_watchdog(struct jme_softc *sc)
1922 {
1923 	if_t ifp;
1924 
1925 	JME_LOCK_ASSERT(sc);
1926 
1927 	if (sc->jme_watchdog_timer == 0 || --sc->jme_watchdog_timer)
1928 		return;
1929 
1930 	ifp = sc->jme_ifp;
1931 	if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1932 		if_printf(sc->jme_ifp, "watchdog timeout (missed link)\n");
1933 		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1934 		if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1935 		jme_init_locked(sc);
1936 		return;
1937 	}
1938 	jme_txeof(sc);
1939 	if (sc->jme_cdata.jme_tx_cnt == 0) {
1940 		if_printf(sc->jme_ifp,
1941 		    "watchdog timeout (missed Tx interrupts) -- recovering\n");
1942 		if (!if_sendq_empty(ifp))
1943 			jme_start_locked(ifp);
1944 		return;
1945 	}
1946 
1947 	if_printf(sc->jme_ifp, "watchdog timeout\n");
1948 	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1949 	if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1950 	jme_init_locked(sc);
1951 	if (!if_sendq_empty(ifp))
1952 		jme_start_locked(ifp);
1953 }
1954 
1955 static int
jme_ioctl(if_t ifp,u_long cmd,caddr_t data)1956 jme_ioctl(if_t ifp, u_long cmd, caddr_t data)
1957 {
1958 	struct jme_softc *sc;
1959 	struct ifreq *ifr;
1960 	struct mii_data *mii;
1961 	uint32_t reg;
1962 	int error, mask;
1963 
1964 	sc = if_getsoftc(ifp);
1965 	ifr = (struct ifreq *)data;
1966 	error = 0;
1967 	switch (cmd) {
1968 	case SIOCSIFMTU:
1969 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1970 		    ((sc->jme_flags & JME_FLAG_NOJUMBO) != 0 &&
1971 		    ifr->ifr_mtu > JME_MAX_MTU)) {
1972 			error = EINVAL;
1973 			break;
1974 		}
1975 
1976 		if (if_getmtu(ifp) != ifr->ifr_mtu) {
1977 			/*
1978 			 * No special configuration is required when interface
1979 			 * MTU is changed but availability of TSO/Tx checksum
1980 			 * offload should be chcked against new MTU size as
1981 			 * FIFO size is just 2K.
1982 			 */
1983 			JME_LOCK(sc);
1984 			if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1985 				if_setcapenablebit(ifp, 0,
1986 				    IFCAP_TXCSUM | IFCAP_TSO4);
1987 				if_sethwassistbits(ifp, 0,
1988 				    JME_CSUM_FEATURES | CSUM_TSO);
1989 				VLAN_CAPABILITIES(ifp);
1990 			}
1991 			if_setmtu(ifp, ifr->ifr_mtu);
1992 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1993 				if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1994 				jme_init_locked(sc);
1995 			}
1996 			JME_UNLOCK(sc);
1997 		}
1998 		break;
1999 	case SIOCSIFFLAGS:
2000 		JME_LOCK(sc);
2001 		if ((if_getflags(ifp) & IFF_UP) != 0) {
2002 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2003 				if (((if_getflags(ifp) ^ sc->jme_if_flags)
2004 				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2005 					jme_set_filter(sc);
2006 			} else {
2007 				if ((sc->jme_flags & JME_FLAG_DETACH) == 0)
2008 					jme_init_locked(sc);
2009 			}
2010 		} else {
2011 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
2012 				jme_stop(sc);
2013 		}
2014 		sc->jme_if_flags = if_getflags(ifp);
2015 		JME_UNLOCK(sc);
2016 		break;
2017 	case SIOCADDMULTI:
2018 	case SIOCDELMULTI:
2019 		JME_LOCK(sc);
2020 		if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
2021 			jme_set_filter(sc);
2022 		JME_UNLOCK(sc);
2023 		break;
2024 	case SIOCSIFMEDIA:
2025 	case SIOCGIFMEDIA:
2026 		mii = device_get_softc(sc->jme_miibus);
2027 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
2028 		break;
2029 	case SIOCSIFCAP:
2030 		JME_LOCK(sc);
2031 		mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
2032 		if ((mask & IFCAP_TXCSUM) != 0 &&
2033 		    if_getmtu(ifp) < JME_TX_FIFO_SIZE) {
2034 			if ((IFCAP_TXCSUM & if_getcapabilities(ifp)) != 0) {
2035 				if_togglecapenable(ifp, IFCAP_TXCSUM);
2036 				if ((IFCAP_TXCSUM & if_getcapenable(ifp)) != 0)
2037 					if_sethwassistbits(ifp, JME_CSUM_FEATURES, 0);
2038 				else
2039 					if_sethwassistbits(ifp, 0, JME_CSUM_FEATURES);
2040 			}
2041 		}
2042 		if ((mask & IFCAP_RXCSUM) != 0 &&
2043 		    (IFCAP_RXCSUM & if_getcapabilities(ifp)) != 0) {
2044 			if_togglecapenable(ifp, IFCAP_RXCSUM);
2045 			reg = CSR_READ_4(sc, JME_RXMAC);
2046 			reg &= ~RXMAC_CSUM_ENB;
2047 			if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
2048 				reg |= RXMAC_CSUM_ENB;
2049 			CSR_WRITE_4(sc, JME_RXMAC, reg);
2050 		}
2051 		if ((mask & IFCAP_TSO4) != 0 &&
2052 		    if_getmtu(ifp) < JME_TX_FIFO_SIZE) {
2053 			if ((IFCAP_TSO4 & if_getcapabilities(ifp)) != 0) {
2054 				if_togglecapenable(ifp, IFCAP_TSO4);
2055 				if ((IFCAP_TSO4 & if_getcapenable(ifp)) != 0)
2056 					if_sethwassistbits(ifp, CSUM_TSO, 0);
2057 				else
2058 					if_sethwassistbits(ifp, 0, CSUM_TSO);
2059 			}
2060 		}
2061 		if ((mask & IFCAP_WOL_MAGIC) != 0 &&
2062 		    (IFCAP_WOL_MAGIC & if_getcapabilities(ifp)) != 0)
2063 			if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
2064 		if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
2065 		    (if_getcapabilities(ifp) & IFCAP_VLAN_HWCSUM) != 0)
2066 			if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM);
2067 		if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
2068 		    (if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0)
2069 			if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
2070 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2071 		    (IFCAP_VLAN_HWTAGGING & if_getcapabilities(ifp)) != 0) {
2072 			if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
2073 			jme_set_vlan(sc);
2074 		}
2075 		JME_UNLOCK(sc);
2076 		VLAN_CAPABILITIES(ifp);
2077 		break;
2078 	default:
2079 		error = ether_ioctl(ifp, cmd, data);
2080 		break;
2081 	}
2082 
2083 	return (error);
2084 }
2085 
2086 static void
jme_mac_config(struct jme_softc * sc)2087 jme_mac_config(struct jme_softc *sc)
2088 {
2089 	struct mii_data *mii;
2090 	uint32_t ghc, gpreg, rxmac, txmac, txpause;
2091 	uint32_t txclk;
2092 
2093 	JME_LOCK_ASSERT(sc);
2094 
2095 	mii = device_get_softc(sc->jme_miibus);
2096 
2097 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2098 	DELAY(10);
2099 	CSR_WRITE_4(sc, JME_GHC, 0);
2100 	ghc = 0;
2101 	txclk = 0;
2102 	rxmac = CSR_READ_4(sc, JME_RXMAC);
2103 	rxmac &= ~RXMAC_FC_ENB;
2104 	txmac = CSR_READ_4(sc, JME_TXMAC);
2105 	txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
2106 	txpause = CSR_READ_4(sc, JME_TXPFC);
2107 	txpause &= ~TXPFC_PAUSE_ENB;
2108 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
2109 		ghc |= GHC_FULL_DUPLEX;
2110 		rxmac &= ~RXMAC_COLL_DET_ENB;
2111 		txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
2112 		    TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
2113 		    TXMAC_FRAME_BURST);
2114 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
2115 			txpause |= TXPFC_PAUSE_ENB;
2116 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
2117 			rxmac |= RXMAC_FC_ENB;
2118 		/* Disable retry transmit timer/retry limit. */
2119 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
2120 		    ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
2121 	} else {
2122 		rxmac |= RXMAC_COLL_DET_ENB;
2123 		txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
2124 		/* Enable retry transmit timer/retry limit. */
2125 		CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
2126 		    TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
2127 	}
2128 		/* Reprogram Tx/Rx MACs with resolved speed/duplex. */
2129 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
2130 	case IFM_10_T:
2131 		ghc |= GHC_SPEED_10;
2132 		txclk |= GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100;
2133 		break;
2134 	case IFM_100_TX:
2135 		ghc |= GHC_SPEED_100;
2136 		txclk |= GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100;
2137 		break;
2138 	case IFM_1000_T:
2139 		if ((sc->jme_flags & JME_FLAG_FASTETH) != 0)
2140 			break;
2141 		ghc |= GHC_SPEED_1000;
2142 		txclk |= GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000;
2143 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
2144 			txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
2145 		break;
2146 	default:
2147 		break;
2148 	}
2149 	if (sc->jme_rev == DEVICEID_JMC250 &&
2150 	    sc->jme_chip_rev == DEVICEREVID_JMC250_A2) {
2151 		/*
2152 		 * Workaround occasional packet loss issue of JMC250 A2
2153 		 * when it runs on half-duplex media.
2154 		 */
2155 		gpreg = CSR_READ_4(sc, JME_GPREG1);
2156 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
2157 			gpreg &= ~GPREG1_HDPX_FIX;
2158 		else
2159 			gpreg |= GPREG1_HDPX_FIX;
2160 		CSR_WRITE_4(sc, JME_GPREG1, gpreg);
2161 		/* Workaround CRC errors at 100Mbps on JMC250 A2. */
2162 		if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
2163 			/* Extend interface FIFO depth. */
2164 			jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2165 			    0x1B, 0x0000);
2166 		} else {
2167 			/* Select default interface FIFO depth. */
2168 			jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2169 			    0x1B, 0x0004);
2170 		}
2171 	}
2172 	if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
2173 		ghc |= txclk;
2174 	CSR_WRITE_4(sc, JME_GHC, ghc);
2175 	CSR_WRITE_4(sc, JME_RXMAC, rxmac);
2176 	CSR_WRITE_4(sc, JME_TXMAC, txmac);
2177 	CSR_WRITE_4(sc, JME_TXPFC, txpause);
2178 }
2179 
2180 static void
jme_link_task(void * arg,int pending)2181 jme_link_task(void *arg, int pending)
2182 {
2183 	struct jme_softc *sc;
2184 	struct mii_data *mii;
2185 	if_t ifp;
2186 	struct jme_txdesc *txd;
2187 	bus_addr_t paddr;
2188 	int i;
2189 
2190 	sc = (struct jme_softc *)arg;
2191 
2192 	JME_LOCK(sc);
2193 	mii = device_get_softc(sc->jme_miibus);
2194 	ifp = sc->jme_ifp;
2195 	if (mii == NULL || ifp == NULL ||
2196 	    (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
2197 		JME_UNLOCK(sc);
2198 		return;
2199 	}
2200 
2201 	sc->jme_flags &= ~JME_FLAG_LINK;
2202 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
2203 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
2204 		case IFM_10_T:
2205 		case IFM_100_TX:
2206 			sc->jme_flags |= JME_FLAG_LINK;
2207 			break;
2208 		case IFM_1000_T:
2209 			if ((sc->jme_flags & JME_FLAG_FASTETH) != 0)
2210 				break;
2211 			sc->jme_flags |= JME_FLAG_LINK;
2212 			break;
2213 		default:
2214 			break;
2215 		}
2216 	}
2217 
2218 	/*
2219 	 * Disabling Rx/Tx MACs have a side-effect of resetting
2220 	 * JME_TXNDA/JME_RXNDA register to the first address of
2221 	 * Tx/Rx descriptor address. So driver should reset its
2222 	 * internal procucer/consumer pointer and reclaim any
2223 	 * allocated resources. Note, just saving the value of
2224 	 * JME_TXNDA and JME_RXNDA registers before stopping MAC
2225 	 * and restoring JME_TXNDA/JME_RXNDA register is not
2226 	 * sufficient to make sure correct MAC state because
2227 	 * stopping MAC operation can take a while and hardware
2228 	 * might have updated JME_TXNDA/JME_RXNDA registers
2229 	 * during the stop operation.
2230 	 */
2231 	/* Block execution of task. */
2232 	taskqueue_block(sc->jme_tq);
2233 	/* Disable interrupts and stop driver. */
2234 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2235 	if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
2236 	callout_stop(&sc->jme_tick_ch);
2237 	sc->jme_watchdog_timer = 0;
2238 
2239 	/* Stop receiver/transmitter. */
2240 	jme_stop_rx(sc);
2241 	jme_stop_tx(sc);
2242 
2243 	/* XXX Drain all queued tasks. */
2244 	JME_UNLOCK(sc);
2245 	taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
2246 	JME_LOCK(sc);
2247 
2248 	if (sc->jme_cdata.jme_rxhead != NULL)
2249 		m_freem(sc->jme_cdata.jme_rxhead);
2250 	JME_RXCHAIN_RESET(sc);
2251 	jme_txeof(sc);
2252 	if (sc->jme_cdata.jme_tx_cnt != 0) {
2253 		/* Remove queued packets for transmit. */
2254 		for (i = 0; i < JME_TX_RING_CNT; i++) {
2255 			txd = &sc->jme_cdata.jme_txdesc[i];
2256 			if (txd->tx_m != NULL) {
2257 				bus_dmamap_sync(
2258 				    sc->jme_cdata.jme_tx_tag,
2259 				    txd->tx_dmamap,
2260 				    BUS_DMASYNC_POSTWRITE);
2261 				bus_dmamap_unload(
2262 				    sc->jme_cdata.jme_tx_tag,
2263 				    txd->tx_dmamap);
2264 				m_freem(txd->tx_m);
2265 				txd->tx_m = NULL;
2266 				txd->tx_ndesc = 0;
2267 				if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2268 			}
2269 		}
2270 	}
2271 
2272 	/*
2273 	 * Reuse configured Rx descriptors and reset
2274 	 * producer/consumer index.
2275 	 */
2276 	sc->jme_cdata.jme_rx_cons = 0;
2277 	sc->jme_morework = 0;
2278 	jme_init_tx_ring(sc);
2279 	/* Initialize shadow status block. */
2280 	jme_init_ssb(sc);
2281 
2282 	/* Program MAC with resolved speed/duplex/flow-control. */
2283 	if ((sc->jme_flags & JME_FLAG_LINK) != 0) {
2284 		jme_mac_config(sc);
2285 		jme_stats_clear(sc);
2286 
2287 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
2288 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2289 
2290 		/* Set Tx ring address to the hardware. */
2291 		paddr = JME_TX_RING_ADDR(sc, 0);
2292 		CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2293 		CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2294 
2295 		/* Set Rx ring address to the hardware. */
2296 		paddr = JME_RX_RING_ADDR(sc, 0);
2297 		CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2298 		CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2299 
2300 		/* Restart receiver/transmitter. */
2301 		CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
2302 		    RXCSR_RXQ_START);
2303 		CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
2304 		/* Lastly enable TX/RX clock. */
2305 		if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
2306 			CSR_WRITE_4(sc, JME_GHC,
2307 			    CSR_READ_4(sc, JME_GHC) & ~GHC_TX_MAC_CLK_DIS);
2308 		if ((sc->jme_flags & JME_FLAG_RXCLK) != 0)
2309 			CSR_WRITE_4(sc, JME_GPREG1,
2310 			    CSR_READ_4(sc, JME_GPREG1) & ~GPREG1_RX_MAC_CLK_DIS);
2311 	}
2312 
2313 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
2314 	if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2315 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2316 	/* Unblock execution of task. */
2317 	taskqueue_unblock(sc->jme_tq);
2318 	/* Reenable interrupts. */
2319 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2320 
2321 	JME_UNLOCK(sc);
2322 }
2323 
2324 static int
jme_intr(void * arg)2325 jme_intr(void *arg)
2326 {
2327 	struct jme_softc *sc;
2328 	uint32_t status;
2329 
2330 	sc = (struct jme_softc *)arg;
2331 
2332 	status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
2333 	if (status == 0 || status == 0xFFFFFFFF)
2334 		return (FILTER_STRAY);
2335 	/* Disable interrupts. */
2336 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2337 	taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task);
2338 
2339 	return (FILTER_HANDLED);
2340 }
2341 
2342 static void
jme_int_task(void * arg,int pending)2343 jme_int_task(void *arg, int pending)
2344 {
2345 	struct jme_softc *sc;
2346 	if_t ifp;
2347 	uint32_t status;
2348 	int more;
2349 
2350 	sc = (struct jme_softc *)arg;
2351 	ifp = sc->jme_ifp;
2352 
2353 	JME_LOCK(sc);
2354 	status = CSR_READ_4(sc, JME_INTR_STATUS);
2355 	if (sc->jme_morework != 0) {
2356 		sc->jme_morework = 0;
2357 		status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO;
2358 	}
2359 	if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
2360 		goto done;
2361 	/* Reset PCC counter/timer and Ack interrupts. */
2362 	status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
2363 	if ((status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) != 0)
2364 		status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
2365 	if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0)
2366 		status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP;
2367 	CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2368 	more = 0;
2369 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2370 		if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0) {
2371 			more = jme_rxintr(sc, sc->jme_process_limit);
2372 			if (more != 0)
2373 				sc->jme_morework = 1;
2374 		}
2375 		if ((status & INTR_RXQ_DESC_EMPTY) != 0) {
2376 			/*
2377 			 * Notify hardware availability of new Rx
2378 			 * buffers.
2379 			 * Reading RXCSR takes very long time under
2380 			 * heavy load so cache RXCSR value and writes
2381 			 * the ORed value with the kick command to
2382 			 * the RXCSR. This saves one register access
2383 			 * cycle.
2384 			 */
2385 			CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2386 			    RXCSR_RX_ENB | RXCSR_RXQ_START);
2387 		}
2388 		if (!if_sendq_empty(ifp))
2389 			jme_start_locked(ifp);
2390 	}
2391 
2392 	if (more != 0 || (CSR_READ_4(sc, JME_INTR_STATUS) & JME_INTRS) != 0) {
2393 		taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task);
2394 		JME_UNLOCK(sc);
2395 		return;
2396 	}
2397 done:
2398 	JME_UNLOCK(sc);
2399 
2400 	/* Reenable interrupts. */
2401 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2402 }
2403 
2404 static void
jme_txeof(struct jme_softc * sc)2405 jme_txeof(struct jme_softc *sc)
2406 {
2407 	if_t ifp;
2408 	struct jme_txdesc *txd;
2409 	uint32_t status;
2410 	int cons, nsegs;
2411 
2412 	JME_LOCK_ASSERT(sc);
2413 
2414 	ifp = sc->jme_ifp;
2415 
2416 	cons = sc->jme_cdata.jme_tx_cons;
2417 	if (cons == sc->jme_cdata.jme_tx_prod)
2418 		return;
2419 
2420 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2421 	    sc->jme_cdata.jme_tx_ring_map,
2422 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2423 
2424 	/*
2425 	 * Go through our Tx list and free mbufs for those
2426 	 * frames which have been transmitted.
2427 	 */
2428 	for (; cons != sc->jme_cdata.jme_tx_prod;) {
2429 		txd = &sc->jme_cdata.jme_txdesc[cons];
2430 		status = le32toh(txd->tx_desc->flags);
2431 		if ((status & JME_TD_OWN) == JME_TD_OWN)
2432 			break;
2433 
2434 		if ((status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) != 0)
2435 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2436 		else {
2437 			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2438 			if ((status & JME_TD_COLLISION) != 0)
2439 				if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
2440 				    le32toh(txd->tx_desc->buflen) &
2441 				    JME_TD_BUF_LEN_MASK);
2442 		}
2443 		/*
2444 		 * Only the first descriptor of multi-descriptor
2445 		 * transmission is updated so driver have to skip entire
2446 		 * chained buffers for the transmiited frame. In other
2447 		 * words, JME_TD_OWN bit is valid only at the first
2448 		 * descriptor of a multi-descriptor transmission.
2449 		 */
2450 		for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2451 			sc->jme_rdata.jme_tx_ring[cons].flags = 0;
2452 			JME_DESC_INC(cons, JME_TX_RING_CNT);
2453 		}
2454 
2455 		/* Reclaim transferred mbufs. */
2456 		bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
2457 		    BUS_DMASYNC_POSTWRITE);
2458 		bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2459 
2460 		KASSERT(txd->tx_m != NULL,
2461 		    ("%s: freeing NULL mbuf!\n", __func__));
2462 		m_freem(txd->tx_m);
2463 		txd->tx_m = NULL;
2464 		sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2465 		KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2466 		    ("%s: Active Tx desc counter was garbled\n", __func__));
2467 		txd->tx_ndesc = 0;
2468 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2469 	}
2470 	sc->jme_cdata.jme_tx_cons = cons;
2471 	/* Unarm watchdog timer when there is no pending descriptors in queue. */
2472 	if (sc->jme_cdata.jme_tx_cnt == 0)
2473 		sc->jme_watchdog_timer = 0;
2474 
2475 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2476 	    sc->jme_cdata.jme_tx_ring_map,
2477 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2478 }
2479 
2480 static __inline void
jme_discard_rxbuf(struct jme_softc * sc,int cons)2481 jme_discard_rxbuf(struct jme_softc *sc, int cons)
2482 {
2483 	struct jme_desc *desc;
2484 
2485 	desc = &sc->jme_rdata.jme_rx_ring[cons];
2486 	desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2487 	desc->buflen = htole32(MCLBYTES);
2488 }
2489 
2490 /* Receive a frame. */
2491 static void
jme_rxeof(struct jme_softc * sc)2492 jme_rxeof(struct jme_softc *sc)
2493 {
2494 	if_t ifp;
2495 	struct jme_desc *desc;
2496 	struct jme_rxdesc *rxd;
2497 	struct mbuf *mp, *m;
2498 	uint32_t flags, status;
2499 	int cons, count, nsegs;
2500 
2501 	JME_LOCK_ASSERT(sc);
2502 
2503 	ifp = sc->jme_ifp;
2504 
2505 	cons = sc->jme_cdata.jme_rx_cons;
2506 	desc = &sc->jme_rdata.jme_rx_ring[cons];
2507 	flags = le32toh(desc->flags);
2508 	status = le32toh(desc->buflen);
2509 	nsegs = JME_RX_NSEGS(status);
2510 	sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2511 	if ((status & JME_RX_ERR_STAT) != 0) {
2512 		if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2513 		jme_discard_rxbuf(sc, sc->jme_cdata.jme_rx_cons);
2514 #ifdef JME_SHOW_ERRORS
2515 		device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2516 		    __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2517 #endif
2518 		sc->jme_cdata.jme_rx_cons += nsegs;
2519 		sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
2520 		return;
2521 	}
2522 
2523 	for (count = 0; count < nsegs; count++,
2524 	    JME_DESC_INC(cons, JME_RX_RING_CNT)) {
2525 		rxd = &sc->jme_cdata.jme_rxdesc[cons];
2526 		mp = rxd->rx_m;
2527 		/* Add a new receive buffer to the ring. */
2528 		if (jme_newbuf(sc, rxd) != 0) {
2529 			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2530 			/* Reuse buffer. */
2531 			for (; count < nsegs; count++) {
2532 				jme_discard_rxbuf(sc, cons);
2533 				JME_DESC_INC(cons, JME_RX_RING_CNT);
2534 			}
2535 			if (sc->jme_cdata.jme_rxhead != NULL) {
2536 				m_freem(sc->jme_cdata.jme_rxhead);
2537 				JME_RXCHAIN_RESET(sc);
2538 			}
2539 			break;
2540 		}
2541 
2542 		/*
2543 		 * Assume we've received a full sized frame.
2544 		 * Actual size is fixed when we encounter the end of
2545 		 * multi-segmented frame.
2546 		 */
2547 		mp->m_len = MCLBYTES;
2548 
2549 		/* Chain received mbufs. */
2550 		if (sc->jme_cdata.jme_rxhead == NULL) {
2551 			sc->jme_cdata.jme_rxhead = mp;
2552 			sc->jme_cdata.jme_rxtail = mp;
2553 		} else {
2554 			/*
2555 			 * Receive processor can receive a maximum frame
2556 			 * size of 65535 bytes.
2557 			 */
2558 			mp->m_flags &= ~M_PKTHDR;
2559 			sc->jme_cdata.jme_rxtail->m_next = mp;
2560 			sc->jme_cdata.jme_rxtail = mp;
2561 		}
2562 
2563 		if (count == nsegs - 1) {
2564 			/* Last desc. for this frame. */
2565 			m = sc->jme_cdata.jme_rxhead;
2566 			m->m_flags |= M_PKTHDR;
2567 			m->m_pkthdr.len = sc->jme_cdata.jme_rxlen;
2568 			if (nsegs > 1) {
2569 				/* Set first mbuf size. */
2570 				m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2571 				/* Set last mbuf size. */
2572 				mp->m_len = sc->jme_cdata.jme_rxlen -
2573 				    ((MCLBYTES - JME_RX_PAD_BYTES) +
2574 				    (MCLBYTES * (nsegs - 2)));
2575 			} else
2576 				m->m_len = sc->jme_cdata.jme_rxlen;
2577 			m->m_pkthdr.rcvif = ifp;
2578 
2579 			/*
2580 			 * Account for 10bytes auto padding which is used
2581 			 * to align IP header on 32bit boundary. Also note,
2582 			 * CRC bytes is automatically removed by the
2583 			 * hardware.
2584 			 */
2585 			m->m_data += JME_RX_PAD_BYTES;
2586 
2587 			/* Set checksum information. */
2588 			if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 &&
2589 			    (flags & JME_RD_IPV4) != 0) {
2590 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2591 				if ((flags & JME_RD_IPCSUM) != 0)
2592 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2593 				if (((flags & JME_RD_MORE_FRAG) == 0) &&
2594 				    ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2595 				    (JME_RD_TCP | JME_RD_TCPCSUM) ||
2596 				    (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2597 				    (JME_RD_UDP | JME_RD_UDPCSUM))) {
2598 					m->m_pkthdr.csum_flags |=
2599 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2600 					m->m_pkthdr.csum_data = 0xffff;
2601 				}
2602 			}
2603 
2604 			/* Check for VLAN tagged packets. */
2605 			if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0 &&
2606 			    (flags & JME_RD_VLAN_TAG) != 0) {
2607 				m->m_pkthdr.ether_vtag =
2608 				    flags & JME_RD_VLAN_MASK;
2609 				m->m_flags |= M_VLANTAG;
2610 			}
2611 
2612 			if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2613 			/* Pass it on. */
2614 			JME_UNLOCK(sc);
2615 			if_input(ifp, m);
2616 			JME_LOCK(sc);
2617 
2618 			/* Reset mbuf chains. */
2619 			JME_RXCHAIN_RESET(sc);
2620 		}
2621 	}
2622 
2623 	sc->jme_cdata.jme_rx_cons += nsegs;
2624 	sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
2625 }
2626 
2627 static int
jme_rxintr(struct jme_softc * sc,int count)2628 jme_rxintr(struct jme_softc *sc, int count)
2629 {
2630 	struct jme_desc *desc;
2631 	int nsegs, prog, pktlen;
2632 
2633 	bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2634 	    sc->jme_cdata.jme_rx_ring_map,
2635 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2636 
2637 	for (prog = 0; count > 0; prog++) {
2638 		desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons];
2639 		if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2640 			break;
2641 		if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2642 			break;
2643 		nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2644 		/*
2645 		 * Check number of segments against received bytes.
2646 		 * Non-matching value would indicate that hardware
2647 		 * is still trying to update Rx descriptors. I'm not
2648 		 * sure whether this check is needed.
2649 		 */
2650 		pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2651 		if (nsegs != howmany(pktlen, MCLBYTES))
2652 			break;
2653 		prog++;
2654 		/* Received a frame. */
2655 		jme_rxeof(sc);
2656 		count -= nsegs;
2657 	}
2658 
2659 	if (prog > 0)
2660 		bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2661 		    sc->jme_cdata.jme_rx_ring_map,
2662 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2663 
2664 	return (count > 0 ? 0 : EAGAIN);
2665 }
2666 
2667 static void
jme_tick(void * arg)2668 jme_tick(void *arg)
2669 {
2670 	struct jme_softc *sc;
2671 	struct mii_data *mii;
2672 
2673 	sc = (struct jme_softc *)arg;
2674 
2675 	JME_LOCK_ASSERT(sc);
2676 
2677 	mii = device_get_softc(sc->jme_miibus);
2678 	mii_tick(mii);
2679 	/*
2680 	 * Reclaim Tx buffers that have been completed. It's not
2681 	 * needed here but it would release allocated mbuf chains
2682 	 * faster and limit the maximum delay to a hz.
2683 	 */
2684 	jme_txeof(sc);
2685 	jme_stats_update(sc);
2686 	jme_watchdog(sc);
2687 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2688 }
2689 
2690 static void
jme_reset(struct jme_softc * sc)2691 jme_reset(struct jme_softc *sc)
2692 {
2693 	uint32_t ghc, gpreg;
2694 
2695 	/* Stop receiver, transmitter. */
2696 	jme_stop_rx(sc);
2697 	jme_stop_tx(sc);
2698 
2699 	/* Reset controller. */
2700 	CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2701 	CSR_READ_4(sc, JME_GHC);
2702 	DELAY(10);
2703 	/*
2704 	 * Workaround Rx FIFO overruns seen under certain conditions.
2705 	 * Explicitly synchorize TX/RX clock.  TX/RX clock should be
2706 	 * enabled only after enabling TX/RX MACs.
2707 	 */
2708 	if ((sc->jme_flags & (JME_FLAG_TXCLK | JME_FLAG_RXCLK)) != 0) {
2709 		/* Disable TX clock. */
2710 		CSR_WRITE_4(sc, JME_GHC, GHC_RESET | GHC_TX_MAC_CLK_DIS);
2711 		/* Disable RX clock. */
2712 		gpreg = CSR_READ_4(sc, JME_GPREG1);
2713 		CSR_WRITE_4(sc, JME_GPREG1, gpreg | GPREG1_RX_MAC_CLK_DIS);
2714 		gpreg = CSR_READ_4(sc, JME_GPREG1);
2715 		/* De-assert RESET but still disable TX clock. */
2716 		CSR_WRITE_4(sc, JME_GHC, GHC_TX_MAC_CLK_DIS);
2717 		ghc = CSR_READ_4(sc, JME_GHC);
2718 
2719 		/* Enable TX clock. */
2720 		CSR_WRITE_4(sc, JME_GHC, ghc & ~GHC_TX_MAC_CLK_DIS);
2721 		/* Enable RX clock. */
2722 		CSR_WRITE_4(sc, JME_GPREG1, gpreg & ~GPREG1_RX_MAC_CLK_DIS);
2723 		CSR_READ_4(sc, JME_GPREG1);
2724 
2725 		/* Disable TX/RX clock again. */
2726 		CSR_WRITE_4(sc, JME_GHC, GHC_TX_MAC_CLK_DIS);
2727 		CSR_WRITE_4(sc, JME_GPREG1, gpreg | GPREG1_RX_MAC_CLK_DIS);
2728 	} else
2729 		CSR_WRITE_4(sc, JME_GHC, 0);
2730 	CSR_READ_4(sc, JME_GHC);
2731 	DELAY(10);
2732 }
2733 
2734 static void
jme_init(void * xsc)2735 jme_init(void *xsc)
2736 {
2737 	struct jme_softc *sc;
2738 
2739 	sc = (struct jme_softc *)xsc;
2740 	JME_LOCK(sc);
2741 	jme_init_locked(sc);
2742 	JME_UNLOCK(sc);
2743 }
2744 
2745 static void
jme_init_locked(struct jme_softc * sc)2746 jme_init_locked(struct jme_softc *sc)
2747 {
2748 	if_t ifp;
2749 	struct mii_data *mii;
2750 	bus_addr_t paddr;
2751 	uint32_t reg;
2752 	int error;
2753 
2754 	JME_LOCK_ASSERT(sc);
2755 
2756 	ifp = sc->jme_ifp;
2757 	mii = device_get_softc(sc->jme_miibus);
2758 
2759 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
2760 		return;
2761 	/*
2762 	 * Cancel any pending I/O.
2763 	 */
2764 	jme_stop(sc);
2765 
2766 	/*
2767 	 * Reset the chip to a known state.
2768 	 */
2769 	jme_reset(sc);
2770 
2771 	/* Init descriptors. */
2772 	error = jme_init_rx_ring(sc);
2773         if (error != 0) {
2774                 device_printf(sc->jme_dev,
2775                     "%s: initialization failed: no memory for Rx buffers.\n",
2776 		    __func__);
2777                 jme_stop(sc);
2778 		return;
2779         }
2780 	jme_init_tx_ring(sc);
2781 	/* Initialize shadow status block. */
2782 	jme_init_ssb(sc);
2783 
2784 	/* Reprogram the station address. */
2785 	jme_set_macaddr(sc, if_getlladdr(sc->jme_ifp));
2786 
2787 	/*
2788 	 * Configure Tx queue.
2789 	 *  Tx priority queue weight value : 0
2790 	 *  Tx FIFO threshold for processing next packet : 16QW
2791 	 *  Maximum Tx DMA length : 512
2792 	 *  Allow Tx DMA burst.
2793 	 */
2794 	sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2795 	sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2796 	sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2797 	sc->jme_txcsr |= sc->jme_tx_dma_size;
2798 	sc->jme_txcsr |= TXCSR_DMA_BURST;
2799 	CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2800 
2801 	/* Set Tx descriptor counter. */
2802 	CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT);
2803 
2804 	/* Set Tx ring address to the hardware. */
2805 	paddr = JME_TX_RING_ADDR(sc, 0);
2806 	CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2807 	CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2808 
2809 	/* Configure TxMAC parameters. */
2810 	reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2811 	reg |= TXMAC_THRESH_1_PKT;
2812 	reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2813 	CSR_WRITE_4(sc, JME_TXMAC, reg);
2814 
2815 	/*
2816 	 * Configure Rx queue.
2817 	 *  FIFO full threshold for transmitting Tx pause packet : 128T
2818 	 *  FIFO threshold for processing next packet : 128QW
2819 	 *  Rx queue 0 select
2820 	 *  Max Rx DMA length : 128
2821 	 *  Rx descriptor retry : 32
2822 	 *  Rx descriptor retry time gap : 256ns
2823 	 *  Don't receive runt/bad frame.
2824 	 */
2825 	sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2826 	/*
2827 	 * Since Rx FIFO size is 4K bytes, receiving frames larger
2828 	 * than 4K bytes will suffer from Rx FIFO overruns. So
2829 	 * decrease FIFO threshold to reduce the FIFO overruns for
2830 	 * frames larger than 4000 bytes.
2831 	 * For best performance of standard MTU sized frames use
2832 	 * maximum allowable FIFO threshold, 128QW. Note these do
2833 	 * not hold on chip full mask version >=2. For these
2834 	 * controllers 64QW and 128QW are not valid value.
2835 	 */
2836 	if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 2)
2837 		sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2838 	else {
2839 		if ((if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
2840 		    ETHER_CRC_LEN) > JME_RX_FIFO_SIZE)
2841 			sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2842 		else
2843 			sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2844 	}
2845 	sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0);
2846 	sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2847 	sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2848 	CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
2849 
2850 	/* Set Rx descriptor counter. */
2851 	CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT);
2852 
2853 	/* Set Rx ring address to the hardware. */
2854 	paddr = JME_RX_RING_ADDR(sc, 0);
2855 	CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2856 	CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2857 
2858 	/* Clear receive filter. */
2859 	CSR_WRITE_4(sc, JME_RXMAC, 0);
2860 	/* Set up the receive filter. */
2861 	jme_set_filter(sc);
2862 	jme_set_vlan(sc);
2863 
2864 	/*
2865 	 * Disable all WOL bits as WOL can interfere normal Rx
2866 	 * operation. Also clear WOL detection status bits.
2867 	 */
2868 	reg = CSR_READ_4(sc, JME_PMCS);
2869 	reg &= ~PMCS_WOL_ENB_MASK;
2870 	CSR_WRITE_4(sc, JME_PMCS, reg);
2871 
2872 	reg = CSR_READ_4(sc, JME_RXMAC);
2873 	/*
2874 	 * Pad 10bytes right before received frame. This will greatly
2875 	 * help Rx performance on strict-alignment architectures as
2876 	 * it does not need to copy the frame to align the payload.
2877 	 */
2878 	reg |= RXMAC_PAD_10BYTES;
2879 	if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
2880 		reg |= RXMAC_CSUM_ENB;
2881 	CSR_WRITE_4(sc, JME_RXMAC, reg);
2882 
2883 	/* Configure general purpose reg0 */
2884 	reg = CSR_READ_4(sc, JME_GPREG0);
2885 	reg &= ~GPREG0_PCC_UNIT_MASK;
2886 	/* Set PCC timer resolution to micro-seconds unit. */
2887 	reg |= GPREG0_PCC_UNIT_US;
2888 	/*
2889 	 * Disable all shadow register posting as we have to read
2890 	 * JME_INTR_STATUS register in jme_int_task. Also it seems
2891 	 * that it's hard to synchronize interrupt status between
2892 	 * hardware and software with shadow posting due to
2893 	 * requirements of bus_dmamap_sync(9).
2894 	 */
2895 	reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2896 	    GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2897 	    GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2898 	    GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2899 	/* Disable posting of DW0. */
2900 	reg &= ~GPREG0_POST_DW0_ENB;
2901 	/* Clear PME message. */
2902 	reg &= ~GPREG0_PME_ENB;
2903 	/* Set PHY address. */
2904 	reg &= ~GPREG0_PHY_ADDR_MASK;
2905 	reg |= sc->jme_phyaddr;
2906 	CSR_WRITE_4(sc, JME_GPREG0, reg);
2907 
2908 	/* Configure Tx queue 0 packet completion coalescing. */
2909 	reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
2910 	    PCCTX_COAL_TO_MASK;
2911 	reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
2912 	    PCCTX_COAL_PKT_MASK;
2913 	reg |= PCCTX_COAL_TXQ0;
2914 	CSR_WRITE_4(sc, JME_PCCTX, reg);
2915 
2916 	/* Configure Rx queue 0 packet completion coalescing. */
2917 	reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
2918 	    PCCRX_COAL_TO_MASK;
2919 	reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
2920 	    PCCRX_COAL_PKT_MASK;
2921 	CSR_WRITE_4(sc, JME_PCCRX0, reg);
2922 
2923 	/*
2924 	 * Configure PCD(Packet Completion Deferring).  It seems PCD
2925 	 * generates an interrupt when the time interval between two
2926 	 * back-to-back incoming/outgoing packet is long enough for
2927 	 * it to reach its timer value 0. The arrival of new packets
2928 	 * after timer has started causes the PCD timer to restart.
2929 	 * Unfortunately, it's not clear how PCD is useful at this
2930 	 * moment, so just use the same of PCC parameters.
2931 	 */
2932 	if ((sc->jme_flags & JME_FLAG_PCCPCD) != 0) {
2933 		sc->jme_rx_pcd_to = sc->jme_rx_coal_to;
2934 		if (sc->jme_rx_coal_to > PCDRX_TO_MAX)
2935 			sc->jme_rx_pcd_to = PCDRX_TO_MAX;
2936 		sc->jme_tx_pcd_to = sc->jme_tx_coal_to;
2937 		if (sc->jme_tx_coal_to > PCDTX_TO_MAX)
2938 			sc->jme_tx_pcd_to = PCDTX_TO_MAX;
2939 		reg = sc->jme_rx_pcd_to << PCDRX0_TO_THROTTLE_SHIFT;
2940 		reg |= sc->jme_rx_pcd_to << PCDRX0_TO_SHIFT;
2941 		CSR_WRITE_4(sc, PCDRX_REG(0), reg);
2942 		reg = sc->jme_tx_pcd_to << PCDTX_TO_THROTTLE_SHIFT;
2943 		reg |= sc->jme_tx_pcd_to << PCDTX_TO_SHIFT;
2944 		CSR_WRITE_4(sc, JME_PCDTX, reg);
2945 	}
2946 
2947 	/* Configure shadow status block but don't enable posting. */
2948 	paddr = sc->jme_rdata.jme_ssb_block_paddr;
2949 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2950 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2951 
2952 	/* Disable Timer 1 and Timer 2. */
2953 	CSR_WRITE_4(sc, JME_TIMER1, 0);
2954 	CSR_WRITE_4(sc, JME_TIMER2, 0);
2955 
2956 	/* Configure retry transmit period, retry limit value. */
2957 	CSR_WRITE_4(sc, JME_TXTRHD,
2958 	    ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2959 	    TXTRHD_RT_PERIOD_MASK) |
2960 	    ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2961 	    TXTRHD_RT_LIMIT_SHIFT));
2962 
2963 	/* Disable RSS. */
2964 	CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
2965 
2966 	/* Initialize the interrupt mask. */
2967 	CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2968 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2969 
2970 	/*
2971 	 * Enabling Tx/Rx DMA engines and Rx queue processing is
2972 	 * done after detection of valid link in jme_link_task.
2973 	 */
2974 
2975 	sc->jme_flags &= ~JME_FLAG_LINK;
2976 	/* Set the current media. */
2977 	mii_mediachg(mii);
2978 
2979 	callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2980 
2981 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
2982 	if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2983 }
2984 
2985 static void
jme_stop(struct jme_softc * sc)2986 jme_stop(struct jme_softc *sc)
2987 {
2988 	if_t ifp;
2989 	struct jme_txdesc *txd;
2990 	struct jme_rxdesc *rxd;
2991 	int i;
2992 
2993 	JME_LOCK_ASSERT(sc);
2994 	/*
2995 	 * Mark the interface down and cancel the watchdog timer.
2996 	 */
2997 	ifp = sc->jme_ifp;
2998 	if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
2999 	sc->jme_flags &= ~JME_FLAG_LINK;
3000 	callout_stop(&sc->jme_tick_ch);
3001 	sc->jme_watchdog_timer = 0;
3002 
3003 	/*
3004 	 * Disable interrupts.
3005 	 */
3006 	CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
3007 	CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
3008 
3009 	/* Disable updating shadow status block. */
3010 	CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
3011 	    CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
3012 
3013 	/* Stop receiver, transmitter. */
3014 	jme_stop_rx(sc);
3015 	jme_stop_tx(sc);
3016 
3017 	 /* Reclaim Rx/Tx buffers that have been completed. */
3018 	jme_rxintr(sc, JME_RX_RING_CNT);
3019 	if (sc->jme_cdata.jme_rxhead != NULL)
3020 		m_freem(sc->jme_cdata.jme_rxhead);
3021 	JME_RXCHAIN_RESET(sc);
3022 	jme_txeof(sc);
3023 	/*
3024 	 * Free RX and TX mbufs still in the queues.
3025 	 */
3026 	for (i = 0; i < JME_RX_RING_CNT; i++) {
3027 		rxd = &sc->jme_cdata.jme_rxdesc[i];
3028 		if (rxd->rx_m != NULL) {
3029 			bus_dmamap_sync(sc->jme_cdata.jme_rx_tag,
3030 			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3031 			bus_dmamap_unload(sc->jme_cdata.jme_rx_tag,
3032 			    rxd->rx_dmamap);
3033 			m_freem(rxd->rx_m);
3034 			rxd->rx_m = NULL;
3035 		}
3036         }
3037 	for (i = 0; i < JME_TX_RING_CNT; i++) {
3038 		txd = &sc->jme_cdata.jme_txdesc[i];
3039 		if (txd->tx_m != NULL) {
3040 			bus_dmamap_sync(sc->jme_cdata.jme_tx_tag,
3041 			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3042 			bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
3043 			    txd->tx_dmamap);
3044 			m_freem(txd->tx_m);
3045 			txd->tx_m = NULL;
3046 			txd->tx_ndesc = 0;
3047 		}
3048         }
3049 	jme_stats_update(sc);
3050 	jme_stats_save(sc);
3051 }
3052 
3053 static void
jme_stop_tx(struct jme_softc * sc)3054 jme_stop_tx(struct jme_softc *sc)
3055 {
3056 	uint32_t reg;
3057 	int i;
3058 
3059 	reg = CSR_READ_4(sc, JME_TXCSR);
3060 	if ((reg & TXCSR_TX_ENB) == 0)
3061 		return;
3062 	reg &= ~TXCSR_TX_ENB;
3063 	CSR_WRITE_4(sc, JME_TXCSR, reg);
3064 	for (i = JME_TIMEOUT; i > 0; i--) {
3065 		DELAY(1);
3066 		if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
3067 			break;
3068 	}
3069 	if (i == 0)
3070 		device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
3071 }
3072 
3073 static void
jme_stop_rx(struct jme_softc * sc)3074 jme_stop_rx(struct jme_softc *sc)
3075 {
3076 	uint32_t reg;
3077 	int i;
3078 
3079 	reg = CSR_READ_4(sc, JME_RXCSR);
3080 	if ((reg & RXCSR_RX_ENB) == 0)
3081 		return;
3082 	reg &= ~RXCSR_RX_ENB;
3083 	CSR_WRITE_4(sc, JME_RXCSR, reg);
3084 	for (i = JME_TIMEOUT; i > 0; i--) {
3085 		DELAY(1);
3086 		if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
3087 			break;
3088 	}
3089 	if (i == 0)
3090 		device_printf(sc->jme_dev, "stopping recevier timeout!\n");
3091 }
3092 
3093 static void
jme_init_tx_ring(struct jme_softc * sc)3094 jme_init_tx_ring(struct jme_softc *sc)
3095 {
3096 	struct jme_ring_data *rd;
3097 	struct jme_txdesc *txd;
3098 	int i;
3099 
3100 	sc->jme_cdata.jme_tx_prod = 0;
3101 	sc->jme_cdata.jme_tx_cons = 0;
3102 	sc->jme_cdata.jme_tx_cnt = 0;
3103 
3104 	rd = &sc->jme_rdata;
3105 	bzero(rd->jme_tx_ring, JME_TX_RING_SIZE);
3106 	for (i = 0; i < JME_TX_RING_CNT; i++) {
3107 		txd = &sc->jme_cdata.jme_txdesc[i];
3108 		txd->tx_m = NULL;
3109 		txd->tx_desc = &rd->jme_tx_ring[i];
3110 		txd->tx_ndesc = 0;
3111 	}
3112 
3113 	bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
3114 	    sc->jme_cdata.jme_tx_ring_map,
3115 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3116 }
3117 
3118 static void
jme_init_ssb(struct jme_softc * sc)3119 jme_init_ssb(struct jme_softc *sc)
3120 {
3121 	struct jme_ring_data *rd;
3122 
3123 	rd = &sc->jme_rdata;
3124 	bzero(rd->jme_ssb_block, JME_SSB_SIZE);
3125 	bus_dmamap_sync(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map,
3126 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3127 }
3128 
3129 static int
jme_init_rx_ring(struct jme_softc * sc)3130 jme_init_rx_ring(struct jme_softc *sc)
3131 {
3132 	struct jme_ring_data *rd;
3133 	struct jme_rxdesc *rxd;
3134 	int i;
3135 
3136 	sc->jme_cdata.jme_rx_cons = 0;
3137 	JME_RXCHAIN_RESET(sc);
3138 	sc->jme_morework = 0;
3139 
3140 	rd = &sc->jme_rdata;
3141 	bzero(rd->jme_rx_ring, JME_RX_RING_SIZE);
3142 	for (i = 0; i < JME_RX_RING_CNT; i++) {
3143 		rxd = &sc->jme_cdata.jme_rxdesc[i];
3144 		rxd->rx_m = NULL;
3145 		rxd->rx_desc = &rd->jme_rx_ring[i];
3146 		if (jme_newbuf(sc, rxd) != 0)
3147 			return (ENOBUFS);
3148 	}
3149 
3150 	bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
3151 	    sc->jme_cdata.jme_rx_ring_map,
3152 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3153 
3154 	return (0);
3155 }
3156 
3157 static int
jme_newbuf(struct jme_softc * sc,struct jme_rxdesc * rxd)3158 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd)
3159 {
3160 	struct jme_desc *desc;
3161 	struct mbuf *m;
3162 	bus_dma_segment_t segs[1];
3163 	bus_dmamap_t map;
3164 	int nsegs;
3165 
3166 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3167 	if (m == NULL)
3168 		return (ENOBUFS);
3169 	/*
3170 	 * JMC250 has 64bit boundary alignment limitation so jme(4)
3171 	 * takes advantage of 10 bytes padding feature of hardware
3172 	 * in order not to copy entire frame to align IP header on
3173 	 * 32bit boundary.
3174 	 */
3175 	m->m_len = m->m_pkthdr.len = MCLBYTES;
3176 
3177 	if (bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_rx_tag,
3178 	    sc->jme_cdata.jme_rx_sparemap, m, segs, &nsegs, 0) != 0) {
3179 		m_freem(m);
3180 		return (ENOBUFS);
3181 	}
3182 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
3183 
3184 	if (rxd->rx_m != NULL) {
3185 		bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
3186 		    BUS_DMASYNC_POSTREAD);
3187 		bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap);
3188 	}
3189 	map = rxd->rx_dmamap;
3190 	rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap;
3191 	sc->jme_cdata.jme_rx_sparemap = map;
3192 	bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
3193 	    BUS_DMASYNC_PREREAD);
3194 	rxd->rx_m = m;
3195 
3196 	desc = rxd->rx_desc;
3197 	desc->buflen = htole32(segs[0].ds_len);
3198 	desc->addr_lo = htole32(JME_ADDR_LO(segs[0].ds_addr));
3199 	desc->addr_hi = htole32(JME_ADDR_HI(segs[0].ds_addr));
3200 	desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
3201 
3202 	return (0);
3203 }
3204 
3205 static void
jme_set_vlan(struct jme_softc * sc)3206 jme_set_vlan(struct jme_softc *sc)
3207 {
3208 	if_t ifp;
3209 	uint32_t reg;
3210 
3211 	JME_LOCK_ASSERT(sc);
3212 
3213 	ifp = sc->jme_ifp;
3214 	reg = CSR_READ_4(sc, JME_RXMAC);
3215 	reg &= ~RXMAC_VLAN_ENB;
3216 	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
3217 		reg |= RXMAC_VLAN_ENB;
3218 	CSR_WRITE_4(sc, JME_RXMAC, reg);
3219 }
3220 
3221 static u_int
jme_hash_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)3222 jme_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
3223 {
3224 	uint32_t crc, *mchash = arg;
3225 
3226 	crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
3227 
3228 	/* Just want the 6 least significant bits. */
3229 	crc &= 0x3f;
3230 
3231 	/* Set the corresponding bit in the hash table. */
3232 	mchash[crc >> 5] |= 1 << (crc & 0x1f);
3233 
3234 	return (1);
3235 }
3236 
3237 static void
jme_set_filter(struct jme_softc * sc)3238 jme_set_filter(struct jme_softc *sc)
3239 {
3240 	if_t ifp;
3241 	uint32_t mchash[2];
3242 	uint32_t rxcfg;
3243 
3244 	JME_LOCK_ASSERT(sc);
3245 
3246 	ifp = sc->jme_ifp;
3247 
3248 	rxcfg = CSR_READ_4(sc, JME_RXMAC);
3249 	rxcfg &= ~ (RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
3250 	    RXMAC_ALLMULTI);
3251 	/* Always accept frames destined to our station address. */
3252 	rxcfg |= RXMAC_UNICAST;
3253 	if ((if_getflags(ifp) & IFF_BROADCAST) != 0)
3254 		rxcfg |= RXMAC_BROADCAST;
3255 	if ((if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3256 		if ((if_getflags(ifp) & IFF_PROMISC) != 0)
3257 			rxcfg |= RXMAC_PROMISC;
3258 		if ((if_getflags(ifp) & IFF_ALLMULTI) != 0)
3259 			rxcfg |= RXMAC_ALLMULTI;
3260 		CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
3261 		CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
3262 		CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3263 		return;
3264 	}
3265 
3266 	/*
3267 	 * Set up the multicast address filter by passing all multicast
3268 	 * addresses through a CRC generator, and then using the low-order
3269 	 * 6 bits as an index into the 64 bit multicast hash table.  The
3270 	 * high order bits select the register, while the rest of the bits
3271 	 * select the bit within the register.
3272 	 */
3273 	rxcfg |= RXMAC_MULTICAST;
3274 	bzero(mchash, sizeof(mchash));
3275 	if_foreach_llmaddr(ifp, jme_hash_maddr, &mchash);
3276 
3277 	CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
3278 	CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
3279 	CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3280 }
3281 
3282 static void
jme_stats_clear(struct jme_softc * sc)3283 jme_stats_clear(struct jme_softc *sc)
3284 {
3285 
3286 	JME_LOCK_ASSERT(sc);
3287 
3288 	if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
3289 		return;
3290 
3291 	/* Disable and clear counters. */
3292 	CSR_WRITE_4(sc, JME_STATCSR, 0xFFFFFFFF);
3293 	/* Activate hw counters. */
3294 	CSR_WRITE_4(sc, JME_STATCSR, 0);
3295 	CSR_READ_4(sc, JME_STATCSR);
3296 	bzero(&sc->jme_stats, sizeof(struct jme_hw_stats));
3297 }
3298 
3299 static void
jme_stats_save(struct jme_softc * sc)3300 jme_stats_save(struct jme_softc *sc)
3301 {
3302 
3303 	JME_LOCK_ASSERT(sc);
3304 
3305 	if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
3306 		return;
3307 	/* Save current counters. */
3308 	bcopy(&sc->jme_stats, &sc->jme_ostats, sizeof(struct jme_hw_stats));
3309 	/* Disable and clear counters. */
3310 	CSR_WRITE_4(sc, JME_STATCSR, 0xFFFFFFFF);
3311 }
3312 
3313 static void
jme_stats_update(struct jme_softc * sc)3314 jme_stats_update(struct jme_softc *sc)
3315 {
3316 	struct jme_hw_stats *stat, *ostat;
3317 	uint32_t reg;
3318 
3319 	JME_LOCK_ASSERT(sc);
3320 
3321 	if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
3322 		return;
3323 	stat = &sc->jme_stats;
3324 	ostat = &sc->jme_ostats;
3325 	stat->tx_good_frames = CSR_READ_4(sc, JME_STAT_TXGOOD);
3326 	stat->rx_good_frames = CSR_READ_4(sc, JME_STAT_RXGOOD);
3327 	reg = CSR_READ_4(sc, JME_STAT_CRCMII);
3328 	stat->rx_crc_errs = (reg & STAT_RX_CRC_ERR_MASK) >>
3329 	    STAT_RX_CRC_ERR_SHIFT;
3330 	stat->rx_mii_errs = (reg & STAT_RX_MII_ERR_MASK) >>
3331 	    STAT_RX_MII_ERR_SHIFT;
3332 	reg = CSR_READ_4(sc, JME_STAT_RXERR);
3333 	stat->rx_fifo_oflows = (reg & STAT_RXERR_OFLOW_MASK) >>
3334 	    STAT_RXERR_OFLOW_SHIFT;
3335 	stat->rx_desc_empty = (reg & STAT_RXERR_MPTY_MASK) >>
3336 	    STAT_RXERR_MPTY_SHIFT;
3337 	reg = CSR_READ_4(sc, JME_STAT_FAIL);
3338 	stat->rx_bad_frames = (reg & STAT_FAIL_RX_MASK) >> STAT_FAIL_RX_SHIFT;
3339 	stat->tx_bad_frames = (reg & STAT_FAIL_TX_MASK) >> STAT_FAIL_TX_SHIFT;
3340 
3341 	/* Account for previous counters. */
3342 	stat->rx_good_frames += ostat->rx_good_frames;
3343 	stat->rx_crc_errs += ostat->rx_crc_errs;
3344 	stat->rx_mii_errs += ostat->rx_mii_errs;
3345 	stat->rx_fifo_oflows += ostat->rx_fifo_oflows;
3346 	stat->rx_desc_empty += ostat->rx_desc_empty;
3347 	stat->rx_bad_frames += ostat->rx_bad_frames;
3348 	stat->tx_good_frames += ostat->tx_good_frames;
3349 	stat->tx_bad_frames += ostat->tx_bad_frames;
3350 }
3351 
3352 static void
jme_phy_down(struct jme_softc * sc)3353 jme_phy_down(struct jme_softc *sc)
3354 {
3355 	uint32_t reg;
3356 
3357 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, BMCR_PDOWN);
3358 	if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5) {
3359 		reg = CSR_READ_4(sc, JME_PHYPOWDN);
3360 		reg |= 0x0000000F;
3361 		CSR_WRITE_4(sc, JME_PHYPOWDN, reg);
3362 		reg = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4);
3363 		reg &= ~PE1_GIGA_PDOWN_MASK;
3364 		reg |= PE1_GIGA_PDOWN_D3;
3365 		pci_write_config(sc->jme_dev, JME_PCI_PE1, reg, 4);
3366 	}
3367 }
3368 
3369 static void
jme_phy_up(struct jme_softc * sc)3370 jme_phy_up(struct jme_softc *sc)
3371 {
3372 	uint32_t reg;
3373 	uint16_t bmcr;
3374 
3375 	bmcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR);
3376 	bmcr &= ~BMCR_PDOWN;
3377 	jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, bmcr);
3378 	if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5) {
3379 		reg = CSR_READ_4(sc, JME_PHYPOWDN);
3380 		reg &= ~0x0000000F;
3381 		CSR_WRITE_4(sc, JME_PHYPOWDN, reg);
3382 		reg = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4);
3383 		reg &= ~PE1_GIGA_PDOWN_MASK;
3384 		reg |= PE1_GIGA_PDOWN_DIS;
3385 		pci_write_config(sc->jme_dev, JME_PCI_PE1, reg, 4);
3386 	}
3387 }
3388 
3389 static int
sysctl_int_range(SYSCTL_HANDLER_ARGS,int low,int high)3390 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3391 {
3392 	int error, value;
3393 
3394 	if (arg1 == NULL)
3395 		return (EINVAL);
3396 	value = *(int *)arg1;
3397 	error = sysctl_handle_int(oidp, &value, 0, req);
3398 	if (error || req->newptr == NULL)
3399 		return (error);
3400 	if (value < low || value > high)
3401 		return (EINVAL);
3402         *(int *)arg1 = value;
3403 
3404         return (0);
3405 }
3406 
3407 static int
sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS)3408 sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS)
3409 {
3410 	return (sysctl_int_range(oidp, arg1, arg2, req,
3411 	    PCCTX_COAL_TO_MIN, PCCTX_COAL_TO_MAX));
3412 }
3413 
3414 static int
sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS)3415 sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
3416 {
3417 	return (sysctl_int_range(oidp, arg1, arg2, req,
3418 	    PCCTX_COAL_PKT_MIN, PCCTX_COAL_PKT_MAX));
3419 }
3420 
3421 static int
sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS)3422 sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS)
3423 {
3424 	return (sysctl_int_range(oidp, arg1, arg2, req,
3425 	    PCCRX_COAL_TO_MIN, PCCRX_COAL_TO_MAX));
3426 }
3427 
3428 static int
sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS)3429 sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
3430 {
3431 	return (sysctl_int_range(oidp, arg1, arg2, req,
3432 	    PCCRX_COAL_PKT_MIN, PCCRX_COAL_PKT_MAX));
3433 }
3434 
3435 static int
sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS)3436 sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS)
3437 {
3438 	return (sysctl_int_range(oidp, arg1, arg2, req,
3439 	    JME_PROC_MIN, JME_PROC_MAX));
3440 }
3441