1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bus.h>
33 #include <sys/endian.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/mbuf.h>
37 #include <sys/rman.h>
38 #include <sys/module.h>
39 #include <sys/proc.h>
40 #include <sys/queue.h>
41 #include <sys/socket.h>
42 #include <sys/sockio.h>
43 #include <sys/sysctl.h>
44 #include <sys/taskqueue.h>
45
46 #include <net/bpf.h>
47 #include <net/if.h>
48 #include <net/if_var.h>
49 #include <net/if_arp.h>
50 #include <net/ethernet.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/if_types.h>
54 #include <net/if_vlan_var.h>
55
56 #include <netinet/in.h>
57 #include <netinet/in_systm.h>
58 #include <netinet/ip.h>
59 #include <netinet/tcp.h>
60
61 #include <dev/mii/mii.h>
62 #include <dev/mii/miivar.h>
63
64 #include <dev/pci/pcireg.h>
65 #include <dev/pci/pcivar.h>
66
67 #include <machine/bus.h>
68 #include <machine/in_cksum.h>
69
70 #include <dev/jme/if_jmereg.h>
71 #include <dev/jme/if_jmevar.h>
72
73 /* "device miibus" required. See GENERIC if you get errors here. */
74 #include "miibus_if.h"
75
76 /* Define the following to disable printing Rx errors. */
77 #undef JME_SHOW_ERRORS
78
79 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
80
81 MODULE_DEPEND(jme, pci, 1, 1, 1);
82 MODULE_DEPEND(jme, ether, 1, 1, 1);
83 MODULE_DEPEND(jme, miibus, 1, 1, 1);
84
85 /* Tunables. */
86 static int msi_disable = 0;
87 static int msix_disable = 0;
88 TUNABLE_INT("hw.jme.msi_disable", &msi_disable);
89 TUNABLE_INT("hw.jme.msix_disable", &msix_disable);
90
91 /*
92 * Devices supported by this driver.
93 */
94 static struct jme_dev {
95 uint16_t jme_vendorid;
96 uint16_t jme_deviceid;
97 const char *jme_name;
98 } jme_devs[] = {
99 { VENDORID_JMICRON, DEVICEID_JMC250,
100 "JMicron Inc, JMC25x Gigabit Ethernet" },
101 { VENDORID_JMICRON, DEVICEID_JMC260,
102 "JMicron Inc, JMC26x Fast Ethernet" },
103 };
104
105 static int jme_miibus_readreg(device_t, int, int);
106 static int jme_miibus_writereg(device_t, int, int, int);
107 static void jme_miibus_statchg(device_t);
108 static void jme_mediastatus(if_t, struct ifmediareq *);
109 static int jme_mediachange(if_t);
110 static int jme_probe(device_t);
111 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
112 static int jme_eeprom_macaddr(struct jme_softc *);
113 static int jme_efuse_macaddr(struct jme_softc *);
114 static void jme_reg_macaddr(struct jme_softc *);
115 static void jme_set_macaddr(struct jme_softc *, uint8_t *);
116 static void jme_map_intr_vector(struct jme_softc *);
117 static int jme_attach(device_t);
118 static int jme_detach(device_t);
119 static void jme_sysctl_node(struct jme_softc *);
120 static void jme_dmamap_cb(void *, bus_dma_segment_t *, int, int);
121 static int jme_dma_alloc(struct jme_softc *);
122 static void jme_dma_free(struct jme_softc *);
123 static int jme_shutdown(device_t);
124 static void jme_setlinkspeed(struct jme_softc *);
125 static void jme_setwol(struct jme_softc *);
126 static int jme_suspend(device_t);
127 static int jme_resume(device_t);
128 static int jme_encap(struct jme_softc *, struct mbuf **);
129 static void jme_start(if_t);
130 static void jme_start_locked(if_t);
131 static void jme_watchdog(struct jme_softc *);
132 static int jme_ioctl(if_t, u_long, caddr_t);
133 static void jme_mac_config(struct jme_softc *);
134 static void jme_link_task(void *, int);
135 static int jme_intr(void *);
136 static void jme_int_task(void *, int);
137 static void jme_txeof(struct jme_softc *);
138 static __inline void jme_discard_rxbuf(struct jme_softc *, int);
139 static void jme_rxeof(struct jme_softc *);
140 static int jme_rxintr(struct jme_softc *, int);
141 static void jme_tick(void *);
142 static void jme_reset(struct jme_softc *);
143 static void jme_init(void *);
144 static void jme_init_locked(struct jme_softc *);
145 static void jme_stop(struct jme_softc *);
146 static void jme_stop_tx(struct jme_softc *);
147 static void jme_stop_rx(struct jme_softc *);
148 static int jme_init_rx_ring(struct jme_softc *);
149 static void jme_init_tx_ring(struct jme_softc *);
150 static void jme_init_ssb(struct jme_softc *);
151 static int jme_newbuf(struct jme_softc *, struct jme_rxdesc *);
152 static void jme_set_vlan(struct jme_softc *);
153 static void jme_set_filter(struct jme_softc *);
154 static void jme_stats_clear(struct jme_softc *);
155 static void jme_stats_save(struct jme_softc *);
156 static void jme_stats_update(struct jme_softc *);
157 static void jme_phy_down(struct jme_softc *);
158 static void jme_phy_up(struct jme_softc *);
159 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
160 static int sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS);
161 static int sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
162 static int sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS);
163 static int sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
164 static int sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS);
165
166
167 static device_method_t jme_methods[] = {
168 /* Device interface. */
169 DEVMETHOD(device_probe, jme_probe),
170 DEVMETHOD(device_attach, jme_attach),
171 DEVMETHOD(device_detach, jme_detach),
172 DEVMETHOD(device_shutdown, jme_shutdown),
173 DEVMETHOD(device_suspend, jme_suspend),
174 DEVMETHOD(device_resume, jme_resume),
175
176 /* MII interface. */
177 DEVMETHOD(miibus_readreg, jme_miibus_readreg),
178 DEVMETHOD(miibus_writereg, jme_miibus_writereg),
179 DEVMETHOD(miibus_statchg, jme_miibus_statchg),
180
181 { NULL, NULL }
182 };
183
184 static driver_t jme_driver = {
185 "jme",
186 jme_methods,
187 sizeof(struct jme_softc)
188 };
189
190 DRIVER_MODULE(jme, pci, jme_driver, 0, 0);
191 DRIVER_MODULE(miibus, jme, miibus_driver, 0, 0);
192
193 static struct resource_spec jme_res_spec_mem[] = {
194 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
195 { -1, 0, 0 }
196 };
197
198 static struct resource_spec jme_irq_spec_legacy[] = {
199 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
200 { -1, 0, 0 }
201 };
202
203 static struct resource_spec jme_irq_spec_msi[] = {
204 { SYS_RES_IRQ, 1, RF_ACTIVE },
205 { -1, 0, 0 }
206 };
207
208 /*
209 * Read a PHY register on the MII of the JMC250.
210 */
211 static int
jme_miibus_readreg(device_t dev,int phy,int reg)212 jme_miibus_readreg(device_t dev, int phy, int reg)
213 {
214 struct jme_softc *sc;
215 uint32_t val;
216 int i;
217
218 sc = device_get_softc(dev);
219
220 /* For FPGA version, PHY address 0 should be ignored. */
221 if ((sc->jme_flags & JME_FLAG_FPGA) != 0 && phy == 0)
222 return (0);
223
224 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
225 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
226 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
227 DELAY(1);
228 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
229 break;
230 }
231
232 if (i == 0) {
233 device_printf(sc->jme_dev, "phy read timeout : %d\n", reg);
234 return (0);
235 }
236
237 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
238 }
239
240 /*
241 * Write a PHY register on the MII of the JMC250.
242 */
243 static int
jme_miibus_writereg(device_t dev,int phy,int reg,int val)244 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
245 {
246 struct jme_softc *sc;
247 int i;
248
249 sc = device_get_softc(dev);
250
251 /* For FPGA version, PHY address 0 should be ignored. */
252 if ((sc->jme_flags & JME_FLAG_FPGA) != 0 && phy == 0)
253 return (0);
254
255 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
256 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
257 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
258 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
259 DELAY(1);
260 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
261 break;
262 }
263
264 if (i == 0)
265 device_printf(sc->jme_dev, "phy write timeout : %d\n", reg);
266
267 return (0);
268 }
269
270 /*
271 * Callback from MII layer when media changes.
272 */
273 static void
jme_miibus_statchg(device_t dev)274 jme_miibus_statchg(device_t dev)
275 {
276 struct jme_softc *sc;
277
278 sc = device_get_softc(dev);
279 taskqueue_enqueue(taskqueue_swi, &sc->jme_link_task);
280 }
281
282 /*
283 * Get the current interface media status.
284 */
285 static void
jme_mediastatus(if_t ifp,struct ifmediareq * ifmr)286 jme_mediastatus(if_t ifp, struct ifmediareq *ifmr)
287 {
288 struct jme_softc *sc;
289 struct mii_data *mii;
290
291 sc = if_getsoftc(ifp);
292 JME_LOCK(sc);
293 if ((if_getflags(ifp) & IFF_UP) == 0) {
294 JME_UNLOCK(sc);
295 return;
296 }
297 mii = device_get_softc(sc->jme_miibus);
298
299 mii_pollstat(mii);
300 ifmr->ifm_status = mii->mii_media_status;
301 ifmr->ifm_active = mii->mii_media_active;
302 JME_UNLOCK(sc);
303 }
304
305 /*
306 * Set hardware to newly-selected media.
307 */
308 static int
jme_mediachange(if_t ifp)309 jme_mediachange(if_t ifp)
310 {
311 struct jme_softc *sc;
312 struct mii_data *mii;
313 struct mii_softc *miisc;
314 int error;
315
316 sc = if_getsoftc(ifp);
317 JME_LOCK(sc);
318 mii = device_get_softc(sc->jme_miibus);
319 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
320 PHY_RESET(miisc);
321 error = mii_mediachg(mii);
322 JME_UNLOCK(sc);
323
324 return (error);
325 }
326
327 static int
jme_probe(device_t dev)328 jme_probe(device_t dev)
329 {
330 struct jme_dev *sp;
331 int i;
332 uint16_t vendor, devid;
333
334 vendor = pci_get_vendor(dev);
335 devid = pci_get_device(dev);
336 sp = jme_devs;
337 for (i = 0; i < nitems(jme_devs); i++, sp++) {
338 if (vendor == sp->jme_vendorid &&
339 devid == sp->jme_deviceid) {
340 device_set_desc(dev, sp->jme_name);
341 return (BUS_PROBE_DEFAULT);
342 }
343 }
344
345 return (ENXIO);
346 }
347
348 static int
jme_eeprom_read_byte(struct jme_softc * sc,uint8_t addr,uint8_t * val)349 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
350 {
351 uint32_t reg;
352 int i;
353
354 *val = 0;
355 for (i = JME_TIMEOUT; i > 0; i--) {
356 reg = CSR_READ_4(sc, JME_SMBCSR);
357 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
358 break;
359 DELAY(1);
360 }
361
362 if (i == 0) {
363 device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
364 return (ETIMEDOUT);
365 }
366
367 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
368 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
369 for (i = JME_TIMEOUT; i > 0; i--) {
370 DELAY(1);
371 reg = CSR_READ_4(sc, JME_SMBINTF);
372 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
373 break;
374 }
375
376 if (i == 0) {
377 device_printf(sc->jme_dev, "EEPROM read timeout!\n");
378 return (ETIMEDOUT);
379 }
380
381 reg = CSR_READ_4(sc, JME_SMBINTF);
382 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
383
384 return (0);
385 }
386
387 static int
jme_eeprom_macaddr(struct jme_softc * sc)388 jme_eeprom_macaddr(struct jme_softc *sc)
389 {
390 uint8_t eaddr[ETHER_ADDR_LEN];
391 uint8_t fup, reg, val;
392 uint32_t offset;
393 int match;
394
395 offset = 0;
396 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
397 fup != JME_EEPROM_SIG0)
398 return (ENOENT);
399 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
400 fup != JME_EEPROM_SIG1)
401 return (ENOENT);
402 match = 0;
403 do {
404 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
405 break;
406 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
407 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
408 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0)
409 break;
410 if (reg >= JME_PAR0 &&
411 reg < JME_PAR0 + ETHER_ADDR_LEN) {
412 if (jme_eeprom_read_byte(sc, offset + 2,
413 &val) != 0)
414 break;
415 eaddr[reg - JME_PAR0] = val;
416 match++;
417 }
418 }
419 /* Check for the end of EEPROM descriptor. */
420 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
421 break;
422 /* Try next eeprom descriptor. */
423 offset += JME_EEPROM_DESC_BYTES;
424 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
425
426 if (match == ETHER_ADDR_LEN) {
427 bcopy(eaddr, sc->jme_eaddr, ETHER_ADDR_LEN);
428 return (0);
429 }
430
431 return (ENOENT);
432 }
433
434 static int
jme_efuse_macaddr(struct jme_softc * sc)435 jme_efuse_macaddr(struct jme_softc *sc)
436 {
437 uint32_t reg;
438 int i;
439
440 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4);
441 if ((reg & (EFUSE_CTL1_AUTOLOAD_ERR | EFUSE_CTL1_AUTOLAOD_DONE)) !=
442 EFUSE_CTL1_AUTOLAOD_DONE)
443 return (ENOENT);
444 /* Reset eFuse controller. */
445 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL2, 4);
446 reg |= EFUSE_CTL2_RESET;
447 pci_write_config(sc->jme_dev, JME_EFUSE_CTL2, reg, 4);
448 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL2, 4);
449 reg &= ~EFUSE_CTL2_RESET;
450 pci_write_config(sc->jme_dev, JME_EFUSE_CTL2, reg, 4);
451
452 /* Have eFuse reload station address to MAC controller. */
453 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4);
454 reg &= ~EFUSE_CTL1_CMD_MASK;
455 reg |= EFUSE_CTL1_CMD_AUTOLOAD | EFUSE_CTL1_EXECUTE;
456 pci_write_config(sc->jme_dev, JME_EFUSE_CTL1, reg, 4);
457
458 /*
459 * Verify completion of eFuse autload command. It should be
460 * completed within 108us.
461 */
462 DELAY(110);
463 for (i = 10; i > 0; i--) {
464 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4);
465 if ((reg & (EFUSE_CTL1_AUTOLOAD_ERR |
466 EFUSE_CTL1_AUTOLAOD_DONE)) != EFUSE_CTL1_AUTOLAOD_DONE) {
467 DELAY(20);
468 continue;
469 }
470 if ((reg & EFUSE_CTL1_EXECUTE) == 0)
471 break;
472 /* Station address loading is still in progress. */
473 DELAY(20);
474 }
475 if (i == 0) {
476 device_printf(sc->jme_dev, "eFuse autoload timed out.\n");
477 return (ETIMEDOUT);
478 }
479
480 return (0);
481 }
482
483 static void
jme_reg_macaddr(struct jme_softc * sc)484 jme_reg_macaddr(struct jme_softc *sc)
485 {
486 uint32_t par0, par1;
487
488 /* Read station address. */
489 par0 = CSR_READ_4(sc, JME_PAR0);
490 par1 = CSR_READ_4(sc, JME_PAR1);
491 par1 &= 0xFFFF;
492 if ((par0 == 0 && par1 == 0) ||
493 (par0 == 0xFFFFFFFF && par1 == 0xFFFF)) {
494 device_printf(sc->jme_dev,
495 "Failed to retrieve Ethernet address.\n");
496 } else {
497 /*
498 * For controllers that use eFuse, the station address
499 * could also be extracted from JME_PCI_PAR0 and
500 * JME_PCI_PAR1 registers in PCI configuration space.
501 * Each register holds exactly half of station address(24bits)
502 * so use JME_PAR0, JME_PAR1 registers instead.
503 */
504 sc->jme_eaddr[0] = (par0 >> 0) & 0xFF;
505 sc->jme_eaddr[1] = (par0 >> 8) & 0xFF;
506 sc->jme_eaddr[2] = (par0 >> 16) & 0xFF;
507 sc->jme_eaddr[3] = (par0 >> 24) & 0xFF;
508 sc->jme_eaddr[4] = (par1 >> 0) & 0xFF;
509 sc->jme_eaddr[5] = (par1 >> 8) & 0xFF;
510 }
511 }
512
513 static void
jme_set_macaddr(struct jme_softc * sc,uint8_t * eaddr)514 jme_set_macaddr(struct jme_softc *sc, uint8_t *eaddr)
515 {
516 uint32_t val;
517 int i;
518
519 if ((sc->jme_flags & JME_FLAG_EFUSE) != 0) {
520 /*
521 * Avoid reprogramming station address if the address
522 * is the same as previous one. Note, reprogrammed
523 * station address is permanent as if it was written
524 * to EEPROM. So if station address was changed by
525 * admistrator it's possible to lose factory configured
526 * address when driver fails to restore its address.
527 * (e.g. reboot or system crash)
528 */
529 if (bcmp(eaddr, sc->jme_eaddr, ETHER_ADDR_LEN) != 0) {
530 for (i = 0; i < ETHER_ADDR_LEN; i++) {
531 val = JME_EFUSE_EEPROM_FUNC0 <<
532 JME_EFUSE_EEPROM_FUNC_SHIFT;
533 val |= JME_EFUSE_EEPROM_PAGE_BAR1 <<
534 JME_EFUSE_EEPROM_PAGE_SHIFT;
535 val |= (JME_PAR0 + i) <<
536 JME_EFUSE_EEPROM_ADDR_SHIFT;
537 val |= eaddr[i] << JME_EFUSE_EEPROM_DATA_SHIFT;
538 pci_write_config(sc->jme_dev, JME_EFUSE_EEPROM,
539 val | JME_EFUSE_EEPROM_WRITE, 4);
540 }
541 }
542 } else {
543 CSR_WRITE_4(sc, JME_PAR0,
544 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
545 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
546 }
547 }
548
549 static void
jme_map_intr_vector(struct jme_softc * sc)550 jme_map_intr_vector(struct jme_softc *sc)
551 {
552 uint32_t map[MSINUM_NUM_INTR_SOURCE / JME_MSI_MESSAGES];
553
554 bzero(map, sizeof(map));
555
556 /* Map Tx interrupts source to MSI/MSIX vector 2. */
557 map[MSINUM_REG_INDEX(N_INTR_TXQ0_COMP)] =
558 MSINUM_INTR_SOURCE(2, N_INTR_TXQ0_COMP);
559 map[MSINUM_REG_INDEX(N_INTR_TXQ1_COMP)] |=
560 MSINUM_INTR_SOURCE(2, N_INTR_TXQ1_COMP);
561 map[MSINUM_REG_INDEX(N_INTR_TXQ2_COMP)] |=
562 MSINUM_INTR_SOURCE(2, N_INTR_TXQ2_COMP);
563 map[MSINUM_REG_INDEX(N_INTR_TXQ3_COMP)] |=
564 MSINUM_INTR_SOURCE(2, N_INTR_TXQ3_COMP);
565 map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
566 MSINUM_INTR_SOURCE(2, N_INTR_TXQ4_COMP);
567 map[MSINUM_REG_INDEX(N_INTR_TXQ5_COMP)] |=
568 MSINUM_INTR_SOURCE(2, N_INTR_TXQ5_COMP);
569 map[MSINUM_REG_INDEX(N_INTR_TXQ6_COMP)] |=
570 MSINUM_INTR_SOURCE(2, N_INTR_TXQ6_COMP);
571 map[MSINUM_REG_INDEX(N_INTR_TXQ7_COMP)] |=
572 MSINUM_INTR_SOURCE(2, N_INTR_TXQ7_COMP);
573 map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL)] |=
574 MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL);
575 map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL_TO)] |=
576 MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL_TO);
577
578 /* Map Rx interrupts source to MSI/MSIX vector 1. */
579 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COMP)] =
580 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COMP);
581 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COMP)] =
582 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COMP);
583 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COMP)] =
584 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COMP);
585 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COMP)] =
586 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COMP);
587 map[MSINUM_REG_INDEX(N_INTR_RXQ0_DESC_EMPTY)] =
588 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_DESC_EMPTY);
589 map[MSINUM_REG_INDEX(N_INTR_RXQ1_DESC_EMPTY)] =
590 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_DESC_EMPTY);
591 map[MSINUM_REG_INDEX(N_INTR_RXQ2_DESC_EMPTY)] =
592 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_DESC_EMPTY);
593 map[MSINUM_REG_INDEX(N_INTR_RXQ3_DESC_EMPTY)] =
594 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_DESC_EMPTY);
595 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL)] =
596 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL);
597 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL)] =
598 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL);
599 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL)] =
600 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL);
601 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL)] =
602 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL);
603 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL_TO)] =
604 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL_TO);
605 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL_TO)] =
606 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL_TO);
607 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL_TO)] =
608 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL_TO);
609 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL_TO)] =
610 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL_TO);
611
612 /* Map all other interrupts source to MSI/MSIX vector 0. */
613 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 0, map[0]);
614 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 1, map[1]);
615 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 2, map[2]);
616 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 3, map[3]);
617 }
618
619 static int
jme_attach(device_t dev)620 jme_attach(device_t dev)
621 {
622 struct jme_softc *sc;
623 if_t ifp;
624 struct mii_softc *miisc;
625 struct mii_data *mii;
626 uint32_t reg;
627 uint16_t burst;
628 int error, i, mii_flags, msic, msixc;
629
630 error = 0;
631 sc = device_get_softc(dev);
632 sc->jme_dev = dev;
633
634 mtx_init(&sc->jme_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
635 MTX_DEF);
636 callout_init_mtx(&sc->jme_tick_ch, &sc->jme_mtx, 0);
637 TASK_INIT(&sc->jme_int_task, 0, jme_int_task, sc);
638 TASK_INIT(&sc->jme_link_task, 0, jme_link_task, sc);
639
640 /*
641 * Map the device. JMC250 supports both memory mapped and I/O
642 * register space access. Because I/O register access should
643 * use different BARs to access registers it's waste of time
644 * to use I/O register spce access. JMC250 uses 16K to map
645 * entire memory space.
646 */
647 pci_enable_busmaster(dev);
648 sc->jme_res_spec = jme_res_spec_mem;
649 sc->jme_irq_spec = jme_irq_spec_legacy;
650 error = bus_alloc_resources(dev, sc->jme_res_spec, sc->jme_res);
651 if (error != 0) {
652 device_printf(dev, "cannot allocate memory resources.\n");
653 goto fail;
654 }
655
656 /* Allocate IRQ resources. */
657 msixc = pci_msix_count(dev);
658 msic = pci_msi_count(dev);
659 if (bootverbose) {
660 device_printf(dev, "MSIX count : %d\n", msixc);
661 device_printf(dev, "MSI count : %d\n", msic);
662 }
663
664 /* Use 1 MSI/MSI-X. */
665 if (msixc > 1)
666 msixc = 1;
667 if (msic > 1)
668 msic = 1;
669 /* Prefer MSIX over MSI. */
670 if (msix_disable == 0 || msi_disable == 0) {
671 if (msix_disable == 0 && msixc > 0 &&
672 pci_alloc_msix(dev, &msixc) == 0) {
673 if (msixc == 1) {
674 device_printf(dev, "Using %d MSIX messages.\n",
675 msixc);
676 sc->jme_flags |= JME_FLAG_MSIX;
677 sc->jme_irq_spec = jme_irq_spec_msi;
678 } else
679 pci_release_msi(dev);
680 }
681 if (msi_disable == 0 && (sc->jme_flags & JME_FLAG_MSIX) == 0 &&
682 msic > 0 && pci_alloc_msi(dev, &msic) == 0) {
683 if (msic == 1) {
684 device_printf(dev, "Using %d MSI messages.\n",
685 msic);
686 sc->jme_flags |= JME_FLAG_MSI;
687 sc->jme_irq_spec = jme_irq_spec_msi;
688 } else
689 pci_release_msi(dev);
690 }
691 /* Map interrupt vector 0, 1 and 2. */
692 if ((sc->jme_flags & JME_FLAG_MSI) != 0 ||
693 (sc->jme_flags & JME_FLAG_MSIX) != 0)
694 jme_map_intr_vector(sc);
695 }
696
697 error = bus_alloc_resources(dev, sc->jme_irq_spec, sc->jme_irq);
698 if (error != 0) {
699 device_printf(dev, "cannot allocate IRQ resources.\n");
700 goto fail;
701 }
702
703 sc->jme_rev = pci_get_device(dev);
704 if ((sc->jme_rev & DEVICEID_JMC2XX_MASK) == DEVICEID_JMC260) {
705 sc->jme_flags |= JME_FLAG_FASTETH;
706 sc->jme_flags |= JME_FLAG_NOJUMBO;
707 }
708 reg = CSR_READ_4(sc, JME_CHIPMODE);
709 sc->jme_chip_rev = (reg & CHIPMODE_REV_MASK) >> CHIPMODE_REV_SHIFT;
710 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
711 CHIPMODE_NOT_FPGA)
712 sc->jme_flags |= JME_FLAG_FPGA;
713 if (bootverbose) {
714 device_printf(dev, "PCI device revision : 0x%04x\n",
715 sc->jme_rev);
716 device_printf(dev, "Chip revision : 0x%02x\n",
717 sc->jme_chip_rev);
718 if ((sc->jme_flags & JME_FLAG_FPGA) != 0)
719 device_printf(dev, "FPGA revision : 0x%04x\n",
720 (reg & CHIPMODE_FPGA_REV_MASK) >>
721 CHIPMODE_FPGA_REV_SHIFT);
722 }
723 if (sc->jme_chip_rev == 0xFF) {
724 device_printf(dev, "Unknown chip revision : 0x%02x\n",
725 sc->jme_rev);
726 error = ENXIO;
727 goto fail;
728 }
729
730 /* Identify controller features and bugs. */
731 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 2) {
732 if ((sc->jme_rev & DEVICEID_JMC2XX_MASK) == DEVICEID_JMC260 &&
733 CHIPMODE_REVFM(sc->jme_chip_rev) == 2)
734 sc->jme_flags |= JME_FLAG_DMA32BIT;
735 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5)
736 sc->jme_flags |= JME_FLAG_EFUSE | JME_FLAG_PCCPCD;
737 sc->jme_flags |= JME_FLAG_TXCLK | JME_FLAG_RXCLK;
738 sc->jme_flags |= JME_FLAG_HWMIB;
739 }
740
741 /* Reset the ethernet controller. */
742 jme_reset(sc);
743
744 /* Get station address. */
745 if ((sc->jme_flags & JME_FLAG_EFUSE) != 0) {
746 error = jme_efuse_macaddr(sc);
747 if (error == 0)
748 jme_reg_macaddr(sc);
749 } else {
750 error = ENOENT;
751 reg = CSR_READ_4(sc, JME_SMBCSR);
752 if ((reg & SMBCSR_EEPROM_PRESENT) != 0)
753 error = jme_eeprom_macaddr(sc);
754 if (error != 0 && bootverbose)
755 device_printf(sc->jme_dev,
756 "ethernet hardware address not found in EEPROM.\n");
757 if (error != 0)
758 jme_reg_macaddr(sc);
759 }
760
761 /*
762 * Save PHY address.
763 * Integrated JR0211 has fixed PHY address whereas FPGA version
764 * requires PHY probing to get correct PHY address.
765 */
766 if ((sc->jme_flags & JME_FLAG_FPGA) == 0) {
767 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
768 GPREG0_PHY_ADDR_MASK;
769 if (bootverbose)
770 device_printf(dev, "PHY is at address %d.\n",
771 sc->jme_phyaddr);
772 } else
773 sc->jme_phyaddr = 0;
774
775 /* Set max allowable DMA size. */
776 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
777 sc->jme_flags |= JME_FLAG_PCIE;
778 burst = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
779 if (bootverbose) {
780 device_printf(dev, "Read request size : %d bytes.\n",
781 128 << ((burst >> 12) & 0x07));
782 device_printf(dev, "TLP payload size : %d bytes.\n",
783 128 << ((burst >> 5) & 0x07));
784 }
785 switch ((burst >> 12) & 0x07) {
786 case 0:
787 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
788 break;
789 case 1:
790 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
791 break;
792 default:
793 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
794 break;
795 }
796 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
797 } else {
798 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
799 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
800 }
801 /* Create coalescing sysctl node. */
802 jme_sysctl_node(sc);
803 if ((error = jme_dma_alloc(sc)) != 0)
804 goto fail;
805
806 ifp = sc->jme_ifp = if_alloc(IFT_ETHER);
807 if_setsoftc(ifp, sc);
808 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
809 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
810 if_setioctlfn(ifp, jme_ioctl);
811 if_setstartfn(ifp, jme_start);
812 if_setinitfn(ifp, jme_init);
813 if_setsendqlen(ifp, JME_TX_RING_CNT - 1);
814 if_setsendqready(ifp);
815 /* JMC250 supports Tx/Rx checksum offload as well as TSO. */
816 if_setcapabilities(ifp, IFCAP_HWCSUM | IFCAP_TSO4);
817 if_sethwassist(ifp, JME_CSUM_FEATURES | CSUM_TSO);
818 if (pci_has_pm(dev)) {
819 if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0);
820 }
821 if_setcapenable(ifp, if_getcapabilities(ifp));
822
823 /* Wakeup PHY. */
824 jme_phy_up(sc);
825 mii_flags = MIIF_DOPAUSE;
826 /* Ask PHY calibration to PHY driver. */
827 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5)
828 mii_flags |= MIIF_MACPRIV0;
829 /* Set up MII bus. */
830 error = mii_attach(dev, &sc->jme_miibus, ifp, jme_mediachange,
831 jme_mediastatus, BMSR_DEFCAPMASK,
832 sc->jme_flags & JME_FLAG_FPGA ? MII_PHY_ANY : sc->jme_phyaddr,
833 MII_OFFSET_ANY, mii_flags);
834 if (error != 0) {
835 device_printf(dev, "attaching PHYs failed\n");
836 goto fail;
837 }
838
839 /*
840 * Force PHY to FPGA mode.
841 */
842 if ((sc->jme_flags & JME_FLAG_FPGA) != 0) {
843 mii = device_get_softc(sc->jme_miibus);
844 if (mii->mii_instance != 0) {
845 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
846 if (miisc->mii_phy != 0) {
847 sc->jme_phyaddr = miisc->mii_phy;
848 break;
849 }
850 }
851 if (sc->jme_phyaddr != 0) {
852 device_printf(sc->jme_dev,
853 "FPGA PHY is at %d\n", sc->jme_phyaddr);
854 /* vendor magic. */
855 jme_miibus_writereg(dev, sc->jme_phyaddr, 27,
856 0x0004);
857 }
858 }
859 }
860
861 ether_ifattach(ifp, sc->jme_eaddr);
862
863 /* VLAN capability setup */
864 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
865 IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO, 0);
866 if_setcapenable(ifp, if_getcapabilities(ifp));
867
868 /* Tell the upper layer(s) we support long frames. */
869 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
870
871 /* Create local taskq. */
872 sc->jme_tq = taskqueue_create_fast("jme_taskq", M_WAITOK,
873 taskqueue_thread_enqueue, &sc->jme_tq);
874 taskqueue_start_threads(&sc->jme_tq, 1, PI_NET, "%s taskq",
875 device_get_nameunit(sc->jme_dev));
876
877 for (i = 0; i < 1; i++) {
878 error = bus_setup_intr(dev, sc->jme_irq[i],
879 INTR_TYPE_NET | INTR_MPSAFE, jme_intr, NULL, sc,
880 &sc->jme_intrhand[i]);
881 if (error != 0)
882 break;
883 }
884
885 if (error != 0) {
886 device_printf(dev, "could not set up interrupt handler.\n");
887 taskqueue_free(sc->jme_tq);
888 sc->jme_tq = NULL;
889 ether_ifdetach(ifp);
890 goto fail;
891 }
892
893 fail:
894 if (error != 0)
895 jme_detach(dev);
896
897 return (error);
898 }
899
900 static int
jme_detach(device_t dev)901 jme_detach(device_t dev)
902 {
903 struct jme_softc *sc;
904 if_t ifp;
905 int i;
906
907 sc = device_get_softc(dev);
908
909 ifp = sc->jme_ifp;
910 if (device_is_attached(dev)) {
911 JME_LOCK(sc);
912 sc->jme_flags |= JME_FLAG_DETACH;
913 jme_stop(sc);
914 JME_UNLOCK(sc);
915 callout_drain(&sc->jme_tick_ch);
916 taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
917 taskqueue_drain(taskqueue_swi, &sc->jme_link_task);
918 /* Restore possibly modified station address. */
919 if ((sc->jme_flags & JME_FLAG_EFUSE) != 0)
920 jme_set_macaddr(sc, sc->jme_eaddr);
921 ether_ifdetach(ifp);
922 }
923
924 if (sc->jme_tq != NULL) {
925 taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
926 taskqueue_free(sc->jme_tq);
927 sc->jme_tq = NULL;
928 }
929
930 bus_generic_detach(dev);
931 jme_dma_free(sc);
932
933 if (ifp != NULL) {
934 if_free(ifp);
935 sc->jme_ifp = NULL;
936 }
937
938 for (i = 0; i < 1; i++) {
939 if (sc->jme_intrhand[i] != NULL) {
940 bus_teardown_intr(dev, sc->jme_irq[i],
941 sc->jme_intrhand[i]);
942 sc->jme_intrhand[i] = NULL;
943 }
944 }
945
946 if (sc->jme_irq[0] != NULL)
947 bus_release_resources(dev, sc->jme_irq_spec, sc->jme_irq);
948 if ((sc->jme_flags & (JME_FLAG_MSIX | JME_FLAG_MSI)) != 0)
949 pci_release_msi(dev);
950 if (sc->jme_res[0] != NULL)
951 bus_release_resources(dev, sc->jme_res_spec, sc->jme_res);
952 mtx_destroy(&sc->jme_mtx);
953
954 return (0);
955 }
956
957 #define JME_SYSCTL_STAT_ADD32(c, h, n, p, d) \
958 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
959
960 static void
jme_sysctl_node(struct jme_softc * sc)961 jme_sysctl_node(struct jme_softc *sc)
962 {
963 struct sysctl_ctx_list *ctx;
964 struct sysctl_oid_list *child, *parent;
965 struct sysctl_oid *tree;
966 struct jme_hw_stats *stats;
967 int error;
968
969 stats = &sc->jme_stats;
970 ctx = device_get_sysctl_ctx(sc->jme_dev);
971 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev));
972
973 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_coal_to",
974 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->jme_tx_coal_to,
975 0, sysctl_hw_jme_tx_coal_to, "I", "jme tx coalescing timeout");
976
977 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_coal_pkt",
978 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->jme_tx_coal_pkt,
979 0, sysctl_hw_jme_tx_coal_pkt, "I", "jme tx coalescing packet");
980
981 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_coal_to",
982 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->jme_rx_coal_to,
983 0, sysctl_hw_jme_rx_coal_to, "I", "jme rx coalescing timeout");
984
985 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_coal_pkt",
986 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->jme_rx_coal_pkt,
987 0, sysctl_hw_jme_rx_coal_pkt, "I", "jme rx coalescing packet");
988
989 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
990 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
991 &sc->jme_process_limit, 0, sysctl_hw_jme_proc_limit, "I",
992 "max number of Rx events to process");
993
994 /* Pull in device tunables. */
995 sc->jme_process_limit = JME_PROC_DEFAULT;
996 error = resource_int_value(device_get_name(sc->jme_dev),
997 device_get_unit(sc->jme_dev), "process_limit",
998 &sc->jme_process_limit);
999 if (error == 0) {
1000 if (sc->jme_process_limit < JME_PROC_MIN ||
1001 sc->jme_process_limit > JME_PROC_MAX) {
1002 device_printf(sc->jme_dev,
1003 "process_limit value out of range; "
1004 "using default: %d\n", JME_PROC_DEFAULT);
1005 sc->jme_process_limit = JME_PROC_DEFAULT;
1006 }
1007 }
1008
1009 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1010 error = resource_int_value(device_get_name(sc->jme_dev),
1011 device_get_unit(sc->jme_dev), "tx_coal_to", &sc->jme_tx_coal_to);
1012 if (error == 0) {
1013 if (sc->jme_tx_coal_to < PCCTX_COAL_TO_MIN ||
1014 sc->jme_tx_coal_to > PCCTX_COAL_TO_MAX) {
1015 device_printf(sc->jme_dev,
1016 "tx_coal_to value out of range; "
1017 "using default: %d\n", PCCTX_COAL_TO_DEFAULT);
1018 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1019 }
1020 }
1021
1022 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1023 error = resource_int_value(device_get_name(sc->jme_dev),
1024 device_get_unit(sc->jme_dev), "tx_coal_pkt", &sc->jme_tx_coal_to);
1025 if (error == 0) {
1026 if (sc->jme_tx_coal_pkt < PCCTX_COAL_PKT_MIN ||
1027 sc->jme_tx_coal_pkt > PCCTX_COAL_PKT_MAX) {
1028 device_printf(sc->jme_dev,
1029 "tx_coal_pkt value out of range; "
1030 "using default: %d\n", PCCTX_COAL_PKT_DEFAULT);
1031 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1032 }
1033 }
1034
1035 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1036 error = resource_int_value(device_get_name(sc->jme_dev),
1037 device_get_unit(sc->jme_dev), "rx_coal_to", &sc->jme_rx_coal_to);
1038 if (error == 0) {
1039 if (sc->jme_rx_coal_to < PCCRX_COAL_TO_MIN ||
1040 sc->jme_rx_coal_to > PCCRX_COAL_TO_MAX) {
1041 device_printf(sc->jme_dev,
1042 "rx_coal_to value out of range; "
1043 "using default: %d\n", PCCRX_COAL_TO_DEFAULT);
1044 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1045 }
1046 }
1047
1048 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1049 error = resource_int_value(device_get_name(sc->jme_dev),
1050 device_get_unit(sc->jme_dev), "rx_coal_pkt", &sc->jme_rx_coal_to);
1051 if (error == 0) {
1052 if (sc->jme_rx_coal_pkt < PCCRX_COAL_PKT_MIN ||
1053 sc->jme_rx_coal_pkt > PCCRX_COAL_PKT_MAX) {
1054 device_printf(sc->jme_dev,
1055 "tx_coal_pkt value out of range; "
1056 "using default: %d\n", PCCRX_COAL_PKT_DEFAULT);
1057 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1058 }
1059 }
1060
1061 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
1062 return;
1063
1064 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
1065 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "JME statistics");
1066 parent = SYSCTL_CHILDREN(tree);
1067
1068 /* Rx statistics. */
1069 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx",
1070 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Rx MAC statistics");
1071 child = SYSCTL_CHILDREN(tree);
1072 JME_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
1073 &stats->rx_good_frames, "Good frames");
1074 JME_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
1075 &stats->rx_crc_errs, "CRC errors");
1076 JME_SYSCTL_STAT_ADD32(ctx, child, "mii_errs",
1077 &stats->rx_mii_errs, "MII errors");
1078 JME_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
1079 &stats->rx_fifo_oflows, "FIFO overflows");
1080 JME_SYSCTL_STAT_ADD32(ctx, child, "desc_empty",
1081 &stats->rx_desc_empty, "Descriptor empty");
1082 JME_SYSCTL_STAT_ADD32(ctx, child, "bad_frames",
1083 &stats->rx_bad_frames, "Bad frames");
1084
1085 /* Tx statistics. */
1086 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx",
1087 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx MAC statistics");
1088 child = SYSCTL_CHILDREN(tree);
1089 JME_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
1090 &stats->tx_good_frames, "Good frames");
1091 JME_SYSCTL_STAT_ADD32(ctx, child, "bad_frames",
1092 &stats->tx_bad_frames, "Bad frames");
1093 }
1094
1095 #undef JME_SYSCTL_STAT_ADD32
1096
1097 struct jme_dmamap_arg {
1098 bus_addr_t jme_busaddr;
1099 };
1100
1101 static void
jme_dmamap_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)1102 jme_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1103 {
1104 struct jme_dmamap_arg *ctx;
1105
1106 if (error != 0)
1107 return;
1108
1109 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1110
1111 ctx = (struct jme_dmamap_arg *)arg;
1112 ctx->jme_busaddr = segs[0].ds_addr;
1113 }
1114
1115 static int
jme_dma_alloc(struct jme_softc * sc)1116 jme_dma_alloc(struct jme_softc *sc)
1117 {
1118 struct jme_dmamap_arg ctx;
1119 struct jme_txdesc *txd;
1120 struct jme_rxdesc *rxd;
1121 bus_addr_t lowaddr, rx_ring_end, tx_ring_end;
1122 int error, i;
1123
1124 lowaddr = BUS_SPACE_MAXADDR;
1125 if ((sc->jme_flags & JME_FLAG_DMA32BIT) != 0)
1126 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1127
1128 again:
1129 /* Create parent ring tag. */
1130 error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */
1131 1, 0, /* algnmnt, boundary */
1132 lowaddr, /* lowaddr */
1133 BUS_SPACE_MAXADDR, /* highaddr */
1134 NULL, NULL, /* filter, filterarg */
1135 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1136 0, /* nsegments */
1137 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1138 0, /* flags */
1139 NULL, NULL, /* lockfunc, lockarg */
1140 &sc->jme_cdata.jme_ring_tag);
1141 if (error != 0) {
1142 device_printf(sc->jme_dev,
1143 "could not create parent ring DMA tag.\n");
1144 goto fail;
1145 }
1146 /* Create tag for Tx ring. */
1147 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1148 JME_TX_RING_ALIGN, 0, /* algnmnt, boundary */
1149 BUS_SPACE_MAXADDR, /* lowaddr */
1150 BUS_SPACE_MAXADDR, /* highaddr */
1151 NULL, NULL, /* filter, filterarg */
1152 JME_TX_RING_SIZE, /* maxsize */
1153 1, /* nsegments */
1154 JME_TX_RING_SIZE, /* maxsegsize */
1155 0, /* flags */
1156 NULL, NULL, /* lockfunc, lockarg */
1157 &sc->jme_cdata.jme_tx_ring_tag);
1158 if (error != 0) {
1159 device_printf(sc->jme_dev,
1160 "could not allocate Tx ring DMA tag.\n");
1161 goto fail;
1162 }
1163
1164 /* Create tag for Rx ring. */
1165 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1166 JME_RX_RING_ALIGN, 0, /* algnmnt, boundary */
1167 lowaddr, /* lowaddr */
1168 BUS_SPACE_MAXADDR, /* highaddr */
1169 NULL, NULL, /* filter, filterarg */
1170 JME_RX_RING_SIZE, /* maxsize */
1171 1, /* nsegments */
1172 JME_RX_RING_SIZE, /* maxsegsize */
1173 0, /* flags */
1174 NULL, NULL, /* lockfunc, lockarg */
1175 &sc->jme_cdata.jme_rx_ring_tag);
1176 if (error != 0) {
1177 device_printf(sc->jme_dev,
1178 "could not allocate Rx ring DMA tag.\n");
1179 goto fail;
1180 }
1181
1182 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
1183 error = bus_dmamem_alloc(sc->jme_cdata.jme_tx_ring_tag,
1184 (void **)&sc->jme_rdata.jme_tx_ring,
1185 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1186 &sc->jme_cdata.jme_tx_ring_map);
1187 if (error != 0) {
1188 device_printf(sc->jme_dev,
1189 "could not allocate DMA'able memory for Tx ring.\n");
1190 goto fail;
1191 }
1192
1193 ctx.jme_busaddr = 0;
1194 error = bus_dmamap_load(sc->jme_cdata.jme_tx_ring_tag,
1195 sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring,
1196 JME_TX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1197 if (error != 0 || ctx.jme_busaddr == 0) {
1198 device_printf(sc->jme_dev,
1199 "could not load DMA'able memory for Tx ring.\n");
1200 goto fail;
1201 }
1202 sc->jme_rdata.jme_tx_ring_paddr = ctx.jme_busaddr;
1203
1204 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
1205 error = bus_dmamem_alloc(sc->jme_cdata.jme_rx_ring_tag,
1206 (void **)&sc->jme_rdata.jme_rx_ring,
1207 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1208 &sc->jme_cdata.jme_rx_ring_map);
1209 if (error != 0) {
1210 device_printf(sc->jme_dev,
1211 "could not allocate DMA'able memory for Rx ring.\n");
1212 goto fail;
1213 }
1214
1215 ctx.jme_busaddr = 0;
1216 error = bus_dmamap_load(sc->jme_cdata.jme_rx_ring_tag,
1217 sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring,
1218 JME_RX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1219 if (error != 0 || ctx.jme_busaddr == 0) {
1220 device_printf(sc->jme_dev,
1221 "could not load DMA'able memory for Rx ring.\n");
1222 goto fail;
1223 }
1224 sc->jme_rdata.jme_rx_ring_paddr = ctx.jme_busaddr;
1225
1226 if (lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1227 /* Tx/Rx descriptor queue should reside within 4GB boundary. */
1228 tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr +
1229 JME_TX_RING_SIZE;
1230 rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr +
1231 JME_RX_RING_SIZE;
1232 if ((JME_ADDR_HI(tx_ring_end) !=
1233 JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) ||
1234 (JME_ADDR_HI(rx_ring_end) !=
1235 JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) {
1236 device_printf(sc->jme_dev, "4GB boundary crossed, "
1237 "switching to 32bit DMA address mode.\n");
1238 jme_dma_free(sc);
1239 /* Limit DMA address space to 32bit and try again. */
1240 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1241 goto again;
1242 }
1243 }
1244
1245 lowaddr = BUS_SPACE_MAXADDR;
1246 if ((sc->jme_flags & JME_FLAG_DMA32BIT) != 0)
1247 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1248 /* Create parent buffer tag. */
1249 error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */
1250 1, 0, /* algnmnt, boundary */
1251 lowaddr, /* lowaddr */
1252 BUS_SPACE_MAXADDR, /* highaddr */
1253 NULL, NULL, /* filter, filterarg */
1254 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1255 0, /* nsegments */
1256 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1257 0, /* flags */
1258 NULL, NULL, /* lockfunc, lockarg */
1259 &sc->jme_cdata.jme_buffer_tag);
1260 if (error != 0) {
1261 device_printf(sc->jme_dev,
1262 "could not create parent buffer DMA tag.\n");
1263 goto fail;
1264 }
1265
1266 /* Create shadow status block tag. */
1267 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1268 JME_SSB_ALIGN, 0, /* algnmnt, boundary */
1269 BUS_SPACE_MAXADDR, /* lowaddr */
1270 BUS_SPACE_MAXADDR, /* highaddr */
1271 NULL, NULL, /* filter, filterarg */
1272 JME_SSB_SIZE, /* maxsize */
1273 1, /* nsegments */
1274 JME_SSB_SIZE, /* maxsegsize */
1275 0, /* flags */
1276 NULL, NULL, /* lockfunc, lockarg */
1277 &sc->jme_cdata.jme_ssb_tag);
1278 if (error != 0) {
1279 device_printf(sc->jme_dev,
1280 "could not create shared status block DMA tag.\n");
1281 goto fail;
1282 }
1283
1284 /* Create tag for Tx buffers. */
1285 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1286 1, 0, /* algnmnt, boundary */
1287 BUS_SPACE_MAXADDR, /* lowaddr */
1288 BUS_SPACE_MAXADDR, /* highaddr */
1289 NULL, NULL, /* filter, filterarg */
1290 JME_TSO_MAXSIZE, /* maxsize */
1291 JME_MAXTXSEGS, /* nsegments */
1292 JME_TSO_MAXSEGSIZE, /* maxsegsize */
1293 0, /* flags */
1294 NULL, NULL, /* lockfunc, lockarg */
1295 &sc->jme_cdata.jme_tx_tag);
1296 if (error != 0) {
1297 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1298 goto fail;
1299 }
1300
1301 /* Create tag for Rx buffers. */
1302 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1303 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */
1304 BUS_SPACE_MAXADDR, /* lowaddr */
1305 BUS_SPACE_MAXADDR, /* highaddr */
1306 NULL, NULL, /* filter, filterarg */
1307 MCLBYTES, /* maxsize */
1308 1, /* nsegments */
1309 MCLBYTES, /* maxsegsize */
1310 0, /* flags */
1311 NULL, NULL, /* lockfunc, lockarg */
1312 &sc->jme_cdata.jme_rx_tag);
1313 if (error != 0) {
1314 device_printf(sc->jme_dev, "could not create Rx DMA tag.\n");
1315 goto fail;
1316 }
1317
1318 /*
1319 * Allocate DMA'able memory and load the DMA map for shared
1320 * status block.
1321 */
1322 error = bus_dmamem_alloc(sc->jme_cdata.jme_ssb_tag,
1323 (void **)&sc->jme_rdata.jme_ssb_block,
1324 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1325 &sc->jme_cdata.jme_ssb_map);
1326 if (error != 0) {
1327 device_printf(sc->jme_dev, "could not allocate DMA'able "
1328 "memory for shared status block.\n");
1329 goto fail;
1330 }
1331
1332 ctx.jme_busaddr = 0;
1333 error = bus_dmamap_load(sc->jme_cdata.jme_ssb_tag,
1334 sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block,
1335 JME_SSB_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1336 if (error != 0 || ctx.jme_busaddr == 0) {
1337 device_printf(sc->jme_dev, "could not load DMA'able memory "
1338 "for shared status block.\n");
1339 goto fail;
1340 }
1341 sc->jme_rdata.jme_ssb_block_paddr = ctx.jme_busaddr;
1342
1343 /* Create DMA maps for Tx buffers. */
1344 for (i = 0; i < JME_TX_RING_CNT; i++) {
1345 txd = &sc->jme_cdata.jme_txdesc[i];
1346 txd->tx_m = NULL;
1347 txd->tx_dmamap = NULL;
1348 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 0,
1349 &txd->tx_dmamap);
1350 if (error != 0) {
1351 device_printf(sc->jme_dev,
1352 "could not create Tx dmamap.\n");
1353 goto fail;
1354 }
1355 }
1356 /* Create DMA maps for Rx buffers. */
1357 if ((error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1358 &sc->jme_cdata.jme_rx_sparemap)) != 0) {
1359 device_printf(sc->jme_dev,
1360 "could not create spare Rx dmamap.\n");
1361 goto fail;
1362 }
1363 for (i = 0; i < JME_RX_RING_CNT; i++) {
1364 rxd = &sc->jme_cdata.jme_rxdesc[i];
1365 rxd->rx_m = NULL;
1366 rxd->rx_dmamap = NULL;
1367 error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1368 &rxd->rx_dmamap);
1369 if (error != 0) {
1370 device_printf(sc->jme_dev,
1371 "could not create Rx dmamap.\n");
1372 goto fail;
1373 }
1374 }
1375
1376 fail:
1377 return (error);
1378 }
1379
1380 static void
jme_dma_free(struct jme_softc * sc)1381 jme_dma_free(struct jme_softc *sc)
1382 {
1383 struct jme_txdesc *txd;
1384 struct jme_rxdesc *rxd;
1385 int i;
1386
1387 /* Tx ring */
1388 if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1389 if (sc->jme_rdata.jme_tx_ring_paddr)
1390 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1391 sc->jme_cdata.jme_tx_ring_map);
1392 if (sc->jme_rdata.jme_tx_ring)
1393 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1394 sc->jme_rdata.jme_tx_ring,
1395 sc->jme_cdata.jme_tx_ring_map);
1396 sc->jme_rdata.jme_tx_ring = NULL;
1397 sc->jme_rdata.jme_tx_ring_paddr = 0;
1398 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1399 sc->jme_cdata.jme_tx_ring_tag = NULL;
1400 }
1401 /* Rx ring */
1402 if (sc->jme_cdata.jme_rx_ring_tag != NULL) {
1403 if (sc->jme_rdata.jme_rx_ring_paddr)
1404 bus_dmamap_unload(sc->jme_cdata.jme_rx_ring_tag,
1405 sc->jme_cdata.jme_rx_ring_map);
1406 if (sc->jme_rdata.jme_rx_ring)
1407 bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag,
1408 sc->jme_rdata.jme_rx_ring,
1409 sc->jme_cdata.jme_rx_ring_map);
1410 sc->jme_rdata.jme_rx_ring = NULL;
1411 sc->jme_rdata.jme_rx_ring_paddr = 0;
1412 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag);
1413 sc->jme_cdata.jme_rx_ring_tag = NULL;
1414 }
1415 /* Tx buffers */
1416 if (sc->jme_cdata.jme_tx_tag != NULL) {
1417 for (i = 0; i < JME_TX_RING_CNT; i++) {
1418 txd = &sc->jme_cdata.jme_txdesc[i];
1419 if (txd->tx_dmamap != NULL) {
1420 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1421 txd->tx_dmamap);
1422 txd->tx_dmamap = NULL;
1423 }
1424 }
1425 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1426 sc->jme_cdata.jme_tx_tag = NULL;
1427 }
1428 /* Rx buffers */
1429 if (sc->jme_cdata.jme_rx_tag != NULL) {
1430 for (i = 0; i < JME_RX_RING_CNT; i++) {
1431 rxd = &sc->jme_cdata.jme_rxdesc[i];
1432 if (rxd->rx_dmamap != NULL) {
1433 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1434 rxd->rx_dmamap);
1435 rxd->rx_dmamap = NULL;
1436 }
1437 }
1438 if (sc->jme_cdata.jme_rx_sparemap != NULL) {
1439 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1440 sc->jme_cdata.jme_rx_sparemap);
1441 sc->jme_cdata.jme_rx_sparemap = NULL;
1442 }
1443 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag);
1444 sc->jme_cdata.jme_rx_tag = NULL;
1445 }
1446
1447 /* Shared status block. */
1448 if (sc->jme_cdata.jme_ssb_tag != NULL) {
1449 if (sc->jme_rdata.jme_ssb_block_paddr)
1450 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1451 sc->jme_cdata.jme_ssb_map);
1452 if (sc->jme_rdata.jme_ssb_block)
1453 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1454 sc->jme_rdata.jme_ssb_block,
1455 sc->jme_cdata.jme_ssb_map);
1456 sc->jme_rdata.jme_ssb_block = NULL;
1457 sc->jme_rdata.jme_ssb_block_paddr = 0;
1458 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1459 sc->jme_cdata.jme_ssb_tag = NULL;
1460 }
1461
1462 if (sc->jme_cdata.jme_buffer_tag != NULL) {
1463 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1464 sc->jme_cdata.jme_buffer_tag = NULL;
1465 }
1466 if (sc->jme_cdata.jme_ring_tag != NULL) {
1467 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1468 sc->jme_cdata.jme_ring_tag = NULL;
1469 }
1470 }
1471
1472 /*
1473 * Make sure the interface is stopped at reboot time.
1474 */
1475 static int
jme_shutdown(device_t dev)1476 jme_shutdown(device_t dev)
1477 {
1478
1479 return (jme_suspend(dev));
1480 }
1481
1482 /*
1483 * Unlike other ethernet controllers, JMC250 requires
1484 * explicit resetting link speed to 10/100Mbps as gigabit
1485 * link will cunsume more power than 375mA.
1486 * Note, we reset the link speed to 10/100Mbps with
1487 * auto-negotiation but we don't know whether that operation
1488 * would succeed or not as we have no control after powering
1489 * off. If the renegotiation fail WOL may not work. Running
1490 * at 1Gbps draws more power than 375mA at 3.3V which is
1491 * specified in PCI specification and that would result in
1492 * complete shutdowning power to ethernet controller.
1493 *
1494 * TODO
1495 * Save current negotiated media speed/duplex/flow-control
1496 * to softc and restore the same link again after resuming.
1497 * PHY handling such as power down/resetting to 100Mbps
1498 * may be better handled in suspend method in phy driver.
1499 */
1500 static void
jme_setlinkspeed(struct jme_softc * sc)1501 jme_setlinkspeed(struct jme_softc *sc)
1502 {
1503 struct mii_data *mii;
1504 int aneg, i;
1505
1506 JME_LOCK_ASSERT(sc);
1507
1508 mii = device_get_softc(sc->jme_miibus);
1509 mii_pollstat(mii);
1510 aneg = 0;
1511 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1512 switch IFM_SUBTYPE(mii->mii_media_active) {
1513 case IFM_10_T:
1514 case IFM_100_TX:
1515 return;
1516 case IFM_1000_T:
1517 aneg++;
1518 default:
1519 break;
1520 }
1521 }
1522 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1523 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1524 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1525 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1526 BMCR_AUTOEN | BMCR_STARTNEG);
1527 DELAY(1000);
1528 if (aneg != 0) {
1529 /* Poll link state until jme(4) get a 10/100 link. */
1530 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1531 mii_pollstat(mii);
1532 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1533 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1534 case IFM_10_T:
1535 case IFM_100_TX:
1536 jme_mac_config(sc);
1537 return;
1538 default:
1539 break;
1540 }
1541 }
1542 JME_UNLOCK(sc);
1543 pause("jmelnk", hz);
1544 JME_LOCK(sc);
1545 }
1546 if (i == MII_ANEGTICKS_GIGE)
1547 device_printf(sc->jme_dev, "establishing link failed, "
1548 "WOL may not work!");
1549 }
1550 /*
1551 * No link, force MAC to have 100Mbps, full-duplex link.
1552 * This is the last resort and may/may not work.
1553 */
1554 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1555 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1556 jme_mac_config(sc);
1557 }
1558
1559 static void
jme_setwol(struct jme_softc * sc)1560 jme_setwol(struct jme_softc *sc)
1561 {
1562 if_t ifp;
1563 uint32_t gpr, pmcs;
1564
1565 JME_LOCK_ASSERT(sc);
1566
1567 if (!pci_has_pm(sc->jme_dev)) {
1568 /* Remove Tx MAC/offload clock to save more power. */
1569 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
1570 CSR_WRITE_4(sc, JME_GHC, CSR_READ_4(sc, JME_GHC) &
1571 ~(GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100 |
1572 GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000));
1573 if ((sc->jme_flags & JME_FLAG_RXCLK) != 0)
1574 CSR_WRITE_4(sc, JME_GPREG1,
1575 CSR_READ_4(sc, JME_GPREG1) | GPREG1_RX_MAC_CLK_DIS);
1576 /* No PME capability, PHY power down. */
1577 jme_phy_down(sc);
1578 return;
1579 }
1580
1581 ifp = sc->jme_ifp;
1582 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1583 pmcs = CSR_READ_4(sc, JME_PMCS);
1584 pmcs &= ~PMCS_WOL_ENB_MASK;
1585 if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0) {
1586 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1587 /* Enable PME message. */
1588 gpr |= GPREG0_PME_ENB;
1589 /* For gigabit controllers, reset link speed to 10/100. */
1590 if ((sc->jme_flags & JME_FLAG_FASTETH) == 0)
1591 jme_setlinkspeed(sc);
1592 }
1593
1594 CSR_WRITE_4(sc, JME_PMCS, pmcs);
1595 CSR_WRITE_4(sc, JME_GPREG0, gpr);
1596 /* Remove Tx MAC/offload clock to save more power. */
1597 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
1598 CSR_WRITE_4(sc, JME_GHC, CSR_READ_4(sc, JME_GHC) &
1599 ~(GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100 |
1600 GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000));
1601 /* Request PME. */
1602 if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
1603 pci_enable_pme(sc->jme_dev);
1604 if ((if_getcapenable(ifp) & IFCAP_WOL) == 0) {
1605 /* No WOL, PHY power down. */
1606 jme_phy_down(sc);
1607 }
1608 }
1609
1610 static int
jme_suspend(device_t dev)1611 jme_suspend(device_t dev)
1612 {
1613 struct jme_softc *sc;
1614
1615 sc = device_get_softc(dev);
1616
1617 JME_LOCK(sc);
1618 jme_stop(sc);
1619 jme_setwol(sc);
1620 JME_UNLOCK(sc);
1621
1622 return (0);
1623 }
1624
1625 static int
jme_resume(device_t dev)1626 jme_resume(device_t dev)
1627 {
1628 struct jme_softc *sc;
1629 if_t ifp;
1630
1631 sc = device_get_softc(dev);
1632
1633 /* Wakeup PHY. */
1634 JME_LOCK(sc);
1635 jme_phy_up(sc);
1636 ifp = sc->jme_ifp;
1637 if ((if_getflags(ifp) & IFF_UP) != 0) {
1638 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1639 jme_init_locked(sc);
1640 }
1641
1642 JME_UNLOCK(sc);
1643
1644 return (0);
1645 }
1646
1647 static int
jme_encap(struct jme_softc * sc,struct mbuf ** m_head)1648 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1649 {
1650 struct jme_txdesc *txd;
1651 struct jme_desc *desc;
1652 struct mbuf *m;
1653 bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1654 int error, i, nsegs, prod;
1655 uint32_t cflags, tsosegsz;
1656
1657 JME_LOCK_ASSERT(sc);
1658
1659 M_ASSERTPKTHDR((*m_head));
1660
1661 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1662 /*
1663 * Due to the adherence to NDIS specification JMC250
1664 * assumes upper stack computed TCP pseudo checksum
1665 * without including payload length. This breaks
1666 * checksum offload for TSO case so recompute TCP
1667 * pseudo checksum for JMC250. Hopefully this wouldn't
1668 * be much burden on modern CPUs.
1669 */
1670 struct ether_header *eh;
1671 struct ip *ip;
1672 struct tcphdr *tcp;
1673 uint32_t ip_off, poff;
1674
1675 if (M_WRITABLE(*m_head) == 0) {
1676 /* Get a writable copy. */
1677 m = m_dup(*m_head, M_NOWAIT);
1678 m_freem(*m_head);
1679 if (m == NULL) {
1680 *m_head = NULL;
1681 return (ENOBUFS);
1682 }
1683 *m_head = m;
1684 }
1685 ip_off = sizeof(struct ether_header);
1686 m = m_pullup(*m_head, ip_off);
1687 if (m == NULL) {
1688 *m_head = NULL;
1689 return (ENOBUFS);
1690 }
1691 eh = mtod(m, struct ether_header *);
1692 /* Check the existence of VLAN tag. */
1693 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1694 ip_off = sizeof(struct ether_vlan_header);
1695 m = m_pullup(m, ip_off);
1696 if (m == NULL) {
1697 *m_head = NULL;
1698 return (ENOBUFS);
1699 }
1700 }
1701 m = m_pullup(m, ip_off + sizeof(struct ip));
1702 if (m == NULL) {
1703 *m_head = NULL;
1704 return (ENOBUFS);
1705 }
1706 ip = (struct ip *)(mtod(m, char *) + ip_off);
1707 poff = ip_off + (ip->ip_hl << 2);
1708 m = m_pullup(m, poff + sizeof(struct tcphdr));
1709 if (m == NULL) {
1710 *m_head = NULL;
1711 return (ENOBUFS);
1712 }
1713 /*
1714 * Reset IP checksum and recompute TCP pseudo
1715 * checksum that NDIS specification requires.
1716 */
1717 ip = (struct ip *)(mtod(m, char *) + ip_off);
1718 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
1719 ip->ip_sum = 0;
1720 if (poff + (tcp->th_off << 2) == m->m_pkthdr.len) {
1721 tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1722 ip->ip_dst.s_addr,
1723 htons((tcp->th_off << 2) + IPPROTO_TCP));
1724 /* No need to TSO, force IP checksum offload. */
1725 (*m_head)->m_pkthdr.csum_flags &= ~CSUM_TSO;
1726 (*m_head)->m_pkthdr.csum_flags |= CSUM_IP;
1727 } else
1728 tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1729 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1730 *m_head = m;
1731 }
1732
1733 prod = sc->jme_cdata.jme_tx_prod;
1734 txd = &sc->jme_cdata.jme_txdesc[prod];
1735
1736 error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag,
1737 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1738 if (error == EFBIG) {
1739 m = m_collapse(*m_head, M_NOWAIT, JME_MAXTXSEGS);
1740 if (m == NULL) {
1741 m_freem(*m_head);
1742 *m_head = NULL;
1743 return (ENOMEM);
1744 }
1745 *m_head = m;
1746 error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag,
1747 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1748 if (error != 0) {
1749 m_freem(*m_head);
1750 *m_head = NULL;
1751 return (error);
1752 }
1753 } else if (error != 0)
1754 return (error);
1755 if (nsegs == 0) {
1756 m_freem(*m_head);
1757 *m_head = NULL;
1758 return (EIO);
1759 }
1760
1761 /*
1762 * Check descriptor overrun. Leave one free descriptor.
1763 * Since we always use 64bit address mode for transmitting,
1764 * each Tx request requires one more dummy descriptor.
1765 */
1766 if (sc->jme_cdata.jme_tx_cnt + nsegs + 1 > JME_TX_RING_CNT - 1) {
1767 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1768 return (ENOBUFS);
1769 }
1770
1771 m = *m_head;
1772 cflags = 0;
1773 tsosegsz = 0;
1774 /* Configure checksum offload and TSO. */
1775 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1776 tsosegsz = (uint32_t)m->m_pkthdr.tso_segsz <<
1777 JME_TD_MSS_SHIFT;
1778 cflags |= JME_TD_TSO;
1779 } else {
1780 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
1781 cflags |= JME_TD_IPCSUM;
1782 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
1783 cflags |= JME_TD_TCPCSUM;
1784 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
1785 cflags |= JME_TD_UDPCSUM;
1786 }
1787 /* Configure VLAN. */
1788 if ((m->m_flags & M_VLANTAG) != 0) {
1789 cflags |= (m->m_pkthdr.ether_vtag & JME_TD_VLAN_MASK);
1790 cflags |= JME_TD_VLAN_TAG;
1791 }
1792
1793 desc = &sc->jme_rdata.jme_tx_ring[prod];
1794 desc->flags = htole32(cflags);
1795 desc->buflen = htole32(tsosegsz);
1796 desc->addr_hi = htole32(m->m_pkthdr.len);
1797 desc->addr_lo = 0;
1798 sc->jme_cdata.jme_tx_cnt++;
1799 JME_DESC_INC(prod, JME_TX_RING_CNT);
1800 for (i = 0; i < nsegs; i++) {
1801 desc = &sc->jme_rdata.jme_tx_ring[prod];
1802 desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT);
1803 desc->buflen = htole32(txsegs[i].ds_len);
1804 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1805 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1806 sc->jme_cdata.jme_tx_cnt++;
1807 JME_DESC_INC(prod, JME_TX_RING_CNT);
1808 }
1809
1810 /* Update producer index. */
1811 sc->jme_cdata.jme_tx_prod = prod;
1812 /*
1813 * Finally request interrupt and give the first descriptor
1814 * owenership to hardware.
1815 */
1816 desc = txd->tx_desc;
1817 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1818
1819 txd->tx_m = m;
1820 txd->tx_ndesc = nsegs + 1;
1821
1822 /* Sync descriptors. */
1823 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1824 BUS_DMASYNC_PREWRITE);
1825 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
1826 sc->jme_cdata.jme_tx_ring_map,
1827 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1828
1829 return (0);
1830 }
1831
1832 static void
jme_start(if_t ifp)1833 jme_start(if_t ifp)
1834 {
1835 struct jme_softc *sc;
1836
1837 sc = if_getsoftc(ifp);
1838 JME_LOCK(sc);
1839 jme_start_locked(ifp);
1840 JME_UNLOCK(sc);
1841 }
1842
1843 static void
jme_start_locked(if_t ifp)1844 jme_start_locked(if_t ifp)
1845 {
1846 struct jme_softc *sc;
1847 struct mbuf *m_head;
1848 int enq;
1849
1850 sc = if_getsoftc(ifp);
1851
1852 JME_LOCK_ASSERT(sc);
1853
1854 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT)
1855 jme_txeof(sc);
1856
1857 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1858 IFF_DRV_RUNNING || (sc->jme_flags & JME_FLAG_LINK) == 0)
1859 return;
1860
1861 for (enq = 0; !if_sendq_empty(ifp); ) {
1862 m_head = if_dequeue(ifp);
1863 if (m_head == NULL)
1864 break;
1865 /*
1866 * Pack the data into the transmit ring. If we
1867 * don't have room, set the OACTIVE flag and wait
1868 * for the NIC to drain the ring.
1869 */
1870 if (jme_encap(sc, &m_head)) {
1871 if (m_head == NULL)
1872 break;
1873 if_sendq_prepend(ifp, m_head);
1874 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1875 break;
1876 }
1877
1878 enq++;
1879 /*
1880 * If there's a BPF listener, bounce a copy of this frame
1881 * to him.
1882 */
1883 ETHER_BPF_MTAP(ifp, m_head);
1884 }
1885
1886 if (enq > 0) {
1887 /*
1888 * Reading TXCSR takes very long time under heavy load
1889 * so cache TXCSR value and writes the ORed value with
1890 * the kick command to the TXCSR. This saves one register
1891 * access cycle.
1892 */
1893 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1894 TXCSR_TXQ_N_START(TXCSR_TXQ0));
1895 /* Set a timeout in case the chip goes out to lunch. */
1896 sc->jme_watchdog_timer = JME_TX_TIMEOUT;
1897 }
1898 }
1899
1900 static void
jme_watchdog(struct jme_softc * sc)1901 jme_watchdog(struct jme_softc *sc)
1902 {
1903 if_t ifp;
1904
1905 JME_LOCK_ASSERT(sc);
1906
1907 if (sc->jme_watchdog_timer == 0 || --sc->jme_watchdog_timer)
1908 return;
1909
1910 ifp = sc->jme_ifp;
1911 if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1912 if_printf(sc->jme_ifp, "watchdog timeout (missed link)\n");
1913 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1914 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1915 jme_init_locked(sc);
1916 return;
1917 }
1918 jme_txeof(sc);
1919 if (sc->jme_cdata.jme_tx_cnt == 0) {
1920 if_printf(sc->jme_ifp,
1921 "watchdog timeout (missed Tx interrupts) -- recovering\n");
1922 if (!if_sendq_empty(ifp))
1923 jme_start_locked(ifp);
1924 return;
1925 }
1926
1927 if_printf(sc->jme_ifp, "watchdog timeout\n");
1928 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1929 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1930 jme_init_locked(sc);
1931 if (!if_sendq_empty(ifp))
1932 jme_start_locked(ifp);
1933 }
1934
1935 static int
jme_ioctl(if_t ifp,u_long cmd,caddr_t data)1936 jme_ioctl(if_t ifp, u_long cmd, caddr_t data)
1937 {
1938 struct jme_softc *sc;
1939 struct ifreq *ifr;
1940 struct mii_data *mii;
1941 uint32_t reg;
1942 int error, mask;
1943
1944 sc = if_getsoftc(ifp);
1945 ifr = (struct ifreq *)data;
1946 error = 0;
1947 switch (cmd) {
1948 case SIOCSIFMTU:
1949 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1950 ((sc->jme_flags & JME_FLAG_NOJUMBO) != 0 &&
1951 ifr->ifr_mtu > JME_MAX_MTU)) {
1952 error = EINVAL;
1953 break;
1954 }
1955
1956 if (if_getmtu(ifp) != ifr->ifr_mtu) {
1957 /*
1958 * No special configuration is required when interface
1959 * MTU is changed but availability of TSO/Tx checksum
1960 * offload should be chcked against new MTU size as
1961 * FIFO size is just 2K.
1962 */
1963 JME_LOCK(sc);
1964 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1965 if_setcapenablebit(ifp, 0,
1966 IFCAP_TXCSUM | IFCAP_TSO4);
1967 if_sethwassistbits(ifp, 0,
1968 JME_CSUM_FEATURES | CSUM_TSO);
1969 VLAN_CAPABILITIES(ifp);
1970 }
1971 if_setmtu(ifp, ifr->ifr_mtu);
1972 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1973 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1974 jme_init_locked(sc);
1975 }
1976 JME_UNLOCK(sc);
1977 }
1978 break;
1979 case SIOCSIFFLAGS:
1980 JME_LOCK(sc);
1981 if ((if_getflags(ifp) & IFF_UP) != 0) {
1982 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1983 if (((if_getflags(ifp) ^ sc->jme_if_flags)
1984 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1985 jme_set_filter(sc);
1986 } else {
1987 if ((sc->jme_flags & JME_FLAG_DETACH) == 0)
1988 jme_init_locked(sc);
1989 }
1990 } else {
1991 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
1992 jme_stop(sc);
1993 }
1994 sc->jme_if_flags = if_getflags(ifp);
1995 JME_UNLOCK(sc);
1996 break;
1997 case SIOCADDMULTI:
1998 case SIOCDELMULTI:
1999 JME_LOCK(sc);
2000 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
2001 jme_set_filter(sc);
2002 JME_UNLOCK(sc);
2003 break;
2004 case SIOCSIFMEDIA:
2005 case SIOCGIFMEDIA:
2006 mii = device_get_softc(sc->jme_miibus);
2007 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
2008 break;
2009 case SIOCSIFCAP:
2010 JME_LOCK(sc);
2011 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
2012 if ((mask & IFCAP_TXCSUM) != 0 &&
2013 if_getmtu(ifp) < JME_TX_FIFO_SIZE) {
2014 if ((IFCAP_TXCSUM & if_getcapabilities(ifp)) != 0) {
2015 if_togglecapenable(ifp, IFCAP_TXCSUM);
2016 if ((IFCAP_TXCSUM & if_getcapenable(ifp)) != 0)
2017 if_sethwassistbits(ifp, JME_CSUM_FEATURES, 0);
2018 else
2019 if_sethwassistbits(ifp, 0, JME_CSUM_FEATURES);
2020 }
2021 }
2022 if ((mask & IFCAP_RXCSUM) != 0 &&
2023 (IFCAP_RXCSUM & if_getcapabilities(ifp)) != 0) {
2024 if_togglecapenable(ifp, IFCAP_RXCSUM);
2025 reg = CSR_READ_4(sc, JME_RXMAC);
2026 reg &= ~RXMAC_CSUM_ENB;
2027 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
2028 reg |= RXMAC_CSUM_ENB;
2029 CSR_WRITE_4(sc, JME_RXMAC, reg);
2030 }
2031 if ((mask & IFCAP_TSO4) != 0 &&
2032 if_getmtu(ifp) < JME_TX_FIFO_SIZE) {
2033 if ((IFCAP_TSO4 & if_getcapabilities(ifp)) != 0) {
2034 if_togglecapenable(ifp, IFCAP_TSO4);
2035 if ((IFCAP_TSO4 & if_getcapenable(ifp)) != 0)
2036 if_sethwassistbits(ifp, CSUM_TSO, 0);
2037 else
2038 if_sethwassistbits(ifp, 0, CSUM_TSO);
2039 }
2040 }
2041 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
2042 (IFCAP_WOL_MAGIC & if_getcapabilities(ifp)) != 0)
2043 if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
2044 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
2045 (if_getcapabilities(ifp) & IFCAP_VLAN_HWCSUM) != 0)
2046 if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM);
2047 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
2048 (if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0)
2049 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
2050 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2051 (IFCAP_VLAN_HWTAGGING & if_getcapabilities(ifp)) != 0) {
2052 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
2053 jme_set_vlan(sc);
2054 }
2055 JME_UNLOCK(sc);
2056 VLAN_CAPABILITIES(ifp);
2057 break;
2058 default:
2059 error = ether_ioctl(ifp, cmd, data);
2060 break;
2061 }
2062
2063 return (error);
2064 }
2065
2066 static void
jme_mac_config(struct jme_softc * sc)2067 jme_mac_config(struct jme_softc *sc)
2068 {
2069 struct mii_data *mii;
2070 uint32_t ghc, gpreg, rxmac, txmac, txpause;
2071 uint32_t txclk;
2072
2073 JME_LOCK_ASSERT(sc);
2074
2075 mii = device_get_softc(sc->jme_miibus);
2076
2077 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2078 DELAY(10);
2079 CSR_WRITE_4(sc, JME_GHC, 0);
2080 ghc = 0;
2081 txclk = 0;
2082 rxmac = CSR_READ_4(sc, JME_RXMAC);
2083 rxmac &= ~RXMAC_FC_ENB;
2084 txmac = CSR_READ_4(sc, JME_TXMAC);
2085 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
2086 txpause = CSR_READ_4(sc, JME_TXPFC);
2087 txpause &= ~TXPFC_PAUSE_ENB;
2088 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
2089 ghc |= GHC_FULL_DUPLEX;
2090 rxmac &= ~RXMAC_COLL_DET_ENB;
2091 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
2092 TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
2093 TXMAC_FRAME_BURST);
2094 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
2095 txpause |= TXPFC_PAUSE_ENB;
2096 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
2097 rxmac |= RXMAC_FC_ENB;
2098 /* Disable retry transmit timer/retry limit. */
2099 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
2100 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
2101 } else {
2102 rxmac |= RXMAC_COLL_DET_ENB;
2103 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
2104 /* Enable retry transmit timer/retry limit. */
2105 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
2106 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
2107 }
2108 /* Reprogram Tx/Rx MACs with resolved speed/duplex. */
2109 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2110 case IFM_10_T:
2111 ghc |= GHC_SPEED_10;
2112 txclk |= GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100;
2113 break;
2114 case IFM_100_TX:
2115 ghc |= GHC_SPEED_100;
2116 txclk |= GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100;
2117 break;
2118 case IFM_1000_T:
2119 if ((sc->jme_flags & JME_FLAG_FASTETH) != 0)
2120 break;
2121 ghc |= GHC_SPEED_1000;
2122 txclk |= GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000;
2123 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
2124 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
2125 break;
2126 default:
2127 break;
2128 }
2129 if (sc->jme_rev == DEVICEID_JMC250 &&
2130 sc->jme_chip_rev == DEVICEREVID_JMC250_A2) {
2131 /*
2132 * Workaround occasional packet loss issue of JMC250 A2
2133 * when it runs on half-duplex media.
2134 */
2135 gpreg = CSR_READ_4(sc, JME_GPREG1);
2136 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
2137 gpreg &= ~GPREG1_HDPX_FIX;
2138 else
2139 gpreg |= GPREG1_HDPX_FIX;
2140 CSR_WRITE_4(sc, JME_GPREG1, gpreg);
2141 /* Workaround CRC errors at 100Mbps on JMC250 A2. */
2142 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
2143 /* Extend interface FIFO depth. */
2144 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2145 0x1B, 0x0000);
2146 } else {
2147 /* Select default interface FIFO depth. */
2148 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2149 0x1B, 0x0004);
2150 }
2151 }
2152 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
2153 ghc |= txclk;
2154 CSR_WRITE_4(sc, JME_GHC, ghc);
2155 CSR_WRITE_4(sc, JME_RXMAC, rxmac);
2156 CSR_WRITE_4(sc, JME_TXMAC, txmac);
2157 CSR_WRITE_4(sc, JME_TXPFC, txpause);
2158 }
2159
2160 static void
jme_link_task(void * arg,int pending)2161 jme_link_task(void *arg, int pending)
2162 {
2163 struct jme_softc *sc;
2164 struct mii_data *mii;
2165 if_t ifp;
2166 struct jme_txdesc *txd;
2167 bus_addr_t paddr;
2168 int i;
2169
2170 sc = (struct jme_softc *)arg;
2171
2172 JME_LOCK(sc);
2173 mii = device_get_softc(sc->jme_miibus);
2174 ifp = sc->jme_ifp;
2175 if (mii == NULL || ifp == NULL ||
2176 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
2177 JME_UNLOCK(sc);
2178 return;
2179 }
2180
2181 sc->jme_flags &= ~JME_FLAG_LINK;
2182 if ((mii->mii_media_status & IFM_AVALID) != 0) {
2183 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2184 case IFM_10_T:
2185 case IFM_100_TX:
2186 sc->jme_flags |= JME_FLAG_LINK;
2187 break;
2188 case IFM_1000_T:
2189 if ((sc->jme_flags & JME_FLAG_FASTETH) != 0)
2190 break;
2191 sc->jme_flags |= JME_FLAG_LINK;
2192 break;
2193 default:
2194 break;
2195 }
2196 }
2197
2198 /*
2199 * Disabling Rx/Tx MACs have a side-effect of resetting
2200 * JME_TXNDA/JME_RXNDA register to the first address of
2201 * Tx/Rx descriptor address. So driver should reset its
2202 * internal procucer/consumer pointer and reclaim any
2203 * allocated resources. Note, just saving the value of
2204 * JME_TXNDA and JME_RXNDA registers before stopping MAC
2205 * and restoring JME_TXNDA/JME_RXNDA register is not
2206 * sufficient to make sure correct MAC state because
2207 * stopping MAC operation can take a while and hardware
2208 * might have updated JME_TXNDA/JME_RXNDA registers
2209 * during the stop operation.
2210 */
2211 /* Block execution of task. */
2212 taskqueue_block(sc->jme_tq);
2213 /* Disable interrupts and stop driver. */
2214 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2215 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
2216 callout_stop(&sc->jme_tick_ch);
2217 sc->jme_watchdog_timer = 0;
2218
2219 /* Stop receiver/transmitter. */
2220 jme_stop_rx(sc);
2221 jme_stop_tx(sc);
2222
2223 /* XXX Drain all queued tasks. */
2224 JME_UNLOCK(sc);
2225 taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
2226 JME_LOCK(sc);
2227
2228 if (sc->jme_cdata.jme_rxhead != NULL)
2229 m_freem(sc->jme_cdata.jme_rxhead);
2230 JME_RXCHAIN_RESET(sc);
2231 jme_txeof(sc);
2232 if (sc->jme_cdata.jme_tx_cnt != 0) {
2233 /* Remove queued packets for transmit. */
2234 for (i = 0; i < JME_TX_RING_CNT; i++) {
2235 txd = &sc->jme_cdata.jme_txdesc[i];
2236 if (txd->tx_m != NULL) {
2237 bus_dmamap_sync(
2238 sc->jme_cdata.jme_tx_tag,
2239 txd->tx_dmamap,
2240 BUS_DMASYNC_POSTWRITE);
2241 bus_dmamap_unload(
2242 sc->jme_cdata.jme_tx_tag,
2243 txd->tx_dmamap);
2244 m_freem(txd->tx_m);
2245 txd->tx_m = NULL;
2246 txd->tx_ndesc = 0;
2247 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2248 }
2249 }
2250 }
2251
2252 /*
2253 * Reuse configured Rx descriptors and reset
2254 * producer/consumer index.
2255 */
2256 sc->jme_cdata.jme_rx_cons = 0;
2257 sc->jme_morework = 0;
2258 jme_init_tx_ring(sc);
2259 /* Initialize shadow status block. */
2260 jme_init_ssb(sc);
2261
2262 /* Program MAC with resolved speed/duplex/flow-control. */
2263 if ((sc->jme_flags & JME_FLAG_LINK) != 0) {
2264 jme_mac_config(sc);
2265 jme_stats_clear(sc);
2266
2267 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
2268 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2269
2270 /* Set Tx ring address to the hardware. */
2271 paddr = JME_TX_RING_ADDR(sc, 0);
2272 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2273 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2274
2275 /* Set Rx ring address to the hardware. */
2276 paddr = JME_RX_RING_ADDR(sc, 0);
2277 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2278 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2279
2280 /* Restart receiver/transmitter. */
2281 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
2282 RXCSR_RXQ_START);
2283 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
2284 /* Lastly enable TX/RX clock. */
2285 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
2286 CSR_WRITE_4(sc, JME_GHC,
2287 CSR_READ_4(sc, JME_GHC) & ~GHC_TX_MAC_CLK_DIS);
2288 if ((sc->jme_flags & JME_FLAG_RXCLK) != 0)
2289 CSR_WRITE_4(sc, JME_GPREG1,
2290 CSR_READ_4(sc, JME_GPREG1) & ~GPREG1_RX_MAC_CLK_DIS);
2291 }
2292
2293 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
2294 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2295 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2296 /* Unblock execution of task. */
2297 taskqueue_unblock(sc->jme_tq);
2298 /* Reenable interrupts. */
2299 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2300
2301 JME_UNLOCK(sc);
2302 }
2303
2304 static int
jme_intr(void * arg)2305 jme_intr(void *arg)
2306 {
2307 struct jme_softc *sc;
2308 uint32_t status;
2309
2310 sc = (struct jme_softc *)arg;
2311
2312 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
2313 if (status == 0 || status == 0xFFFFFFFF)
2314 return (FILTER_STRAY);
2315 /* Disable interrupts. */
2316 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2317 taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task);
2318
2319 return (FILTER_HANDLED);
2320 }
2321
2322 static void
jme_int_task(void * arg,int pending)2323 jme_int_task(void *arg, int pending)
2324 {
2325 struct jme_softc *sc;
2326 if_t ifp;
2327 uint32_t status;
2328 int more;
2329
2330 sc = (struct jme_softc *)arg;
2331 ifp = sc->jme_ifp;
2332
2333 JME_LOCK(sc);
2334 status = CSR_READ_4(sc, JME_INTR_STATUS);
2335 if (sc->jme_morework != 0) {
2336 sc->jme_morework = 0;
2337 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO;
2338 }
2339 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
2340 goto done;
2341 /* Reset PCC counter/timer and Ack interrupts. */
2342 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
2343 if ((status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) != 0)
2344 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
2345 if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0)
2346 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP;
2347 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2348 more = 0;
2349 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2350 if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0) {
2351 more = jme_rxintr(sc, sc->jme_process_limit);
2352 if (more != 0)
2353 sc->jme_morework = 1;
2354 }
2355 if ((status & INTR_RXQ_DESC_EMPTY) != 0) {
2356 /*
2357 * Notify hardware availability of new Rx
2358 * buffers.
2359 * Reading RXCSR takes very long time under
2360 * heavy load so cache RXCSR value and writes
2361 * the ORed value with the kick command to
2362 * the RXCSR. This saves one register access
2363 * cycle.
2364 */
2365 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2366 RXCSR_RX_ENB | RXCSR_RXQ_START);
2367 }
2368 if (!if_sendq_empty(ifp))
2369 jme_start_locked(ifp);
2370 }
2371
2372 if (more != 0 || (CSR_READ_4(sc, JME_INTR_STATUS) & JME_INTRS) != 0) {
2373 taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task);
2374 JME_UNLOCK(sc);
2375 return;
2376 }
2377 done:
2378 JME_UNLOCK(sc);
2379
2380 /* Reenable interrupts. */
2381 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2382 }
2383
2384 static void
jme_txeof(struct jme_softc * sc)2385 jme_txeof(struct jme_softc *sc)
2386 {
2387 if_t ifp;
2388 struct jme_txdesc *txd;
2389 uint32_t status;
2390 int cons, nsegs;
2391
2392 JME_LOCK_ASSERT(sc);
2393
2394 ifp = sc->jme_ifp;
2395
2396 cons = sc->jme_cdata.jme_tx_cons;
2397 if (cons == sc->jme_cdata.jme_tx_prod)
2398 return;
2399
2400 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2401 sc->jme_cdata.jme_tx_ring_map,
2402 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2403
2404 /*
2405 * Go through our Tx list and free mbufs for those
2406 * frames which have been transmitted.
2407 */
2408 for (; cons != sc->jme_cdata.jme_tx_prod;) {
2409 txd = &sc->jme_cdata.jme_txdesc[cons];
2410 status = le32toh(txd->tx_desc->flags);
2411 if ((status & JME_TD_OWN) == JME_TD_OWN)
2412 break;
2413
2414 if ((status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) != 0)
2415 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2416 else {
2417 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2418 if ((status & JME_TD_COLLISION) != 0)
2419 if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
2420 le32toh(txd->tx_desc->buflen) &
2421 JME_TD_BUF_LEN_MASK);
2422 }
2423 /*
2424 * Only the first descriptor of multi-descriptor
2425 * transmission is updated so driver have to skip entire
2426 * chained buffers for the transmiited frame. In other
2427 * words, JME_TD_OWN bit is valid only at the first
2428 * descriptor of a multi-descriptor transmission.
2429 */
2430 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2431 sc->jme_rdata.jme_tx_ring[cons].flags = 0;
2432 JME_DESC_INC(cons, JME_TX_RING_CNT);
2433 }
2434
2435 /* Reclaim transferred mbufs. */
2436 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
2437 BUS_DMASYNC_POSTWRITE);
2438 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2439
2440 KASSERT(txd->tx_m != NULL,
2441 ("%s: freeing NULL mbuf!\n", __func__));
2442 m_freem(txd->tx_m);
2443 txd->tx_m = NULL;
2444 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2445 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2446 ("%s: Active Tx desc counter was garbled\n", __func__));
2447 txd->tx_ndesc = 0;
2448 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2449 }
2450 sc->jme_cdata.jme_tx_cons = cons;
2451 /* Unarm watchdog timer when there is no pending descriptors in queue. */
2452 if (sc->jme_cdata.jme_tx_cnt == 0)
2453 sc->jme_watchdog_timer = 0;
2454
2455 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2456 sc->jme_cdata.jme_tx_ring_map,
2457 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2458 }
2459
2460 static __inline void
jme_discard_rxbuf(struct jme_softc * sc,int cons)2461 jme_discard_rxbuf(struct jme_softc *sc, int cons)
2462 {
2463 struct jme_desc *desc;
2464
2465 desc = &sc->jme_rdata.jme_rx_ring[cons];
2466 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2467 desc->buflen = htole32(MCLBYTES);
2468 }
2469
2470 /* Receive a frame. */
2471 static void
jme_rxeof(struct jme_softc * sc)2472 jme_rxeof(struct jme_softc *sc)
2473 {
2474 if_t ifp;
2475 struct jme_desc *desc;
2476 struct jme_rxdesc *rxd;
2477 struct mbuf *mp, *m;
2478 uint32_t flags, status;
2479 int cons, count, nsegs;
2480
2481 JME_LOCK_ASSERT(sc);
2482
2483 ifp = sc->jme_ifp;
2484
2485 cons = sc->jme_cdata.jme_rx_cons;
2486 desc = &sc->jme_rdata.jme_rx_ring[cons];
2487 flags = le32toh(desc->flags);
2488 status = le32toh(desc->buflen);
2489 nsegs = JME_RX_NSEGS(status);
2490 sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2491 if ((status & JME_RX_ERR_STAT) != 0) {
2492 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2493 jme_discard_rxbuf(sc, sc->jme_cdata.jme_rx_cons);
2494 #ifdef JME_SHOW_ERRORS
2495 device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2496 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2497 #endif
2498 sc->jme_cdata.jme_rx_cons += nsegs;
2499 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
2500 return;
2501 }
2502
2503 for (count = 0; count < nsegs; count++,
2504 JME_DESC_INC(cons, JME_RX_RING_CNT)) {
2505 rxd = &sc->jme_cdata.jme_rxdesc[cons];
2506 mp = rxd->rx_m;
2507 /* Add a new receive buffer to the ring. */
2508 if (jme_newbuf(sc, rxd) != 0) {
2509 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2510 /* Reuse buffer. */
2511 for (; count < nsegs; count++) {
2512 jme_discard_rxbuf(sc, cons);
2513 JME_DESC_INC(cons, JME_RX_RING_CNT);
2514 }
2515 if (sc->jme_cdata.jme_rxhead != NULL) {
2516 m_freem(sc->jme_cdata.jme_rxhead);
2517 JME_RXCHAIN_RESET(sc);
2518 }
2519 break;
2520 }
2521
2522 /*
2523 * Assume we've received a full sized frame.
2524 * Actual size is fixed when we encounter the end of
2525 * multi-segmented frame.
2526 */
2527 mp->m_len = MCLBYTES;
2528
2529 /* Chain received mbufs. */
2530 if (sc->jme_cdata.jme_rxhead == NULL) {
2531 sc->jme_cdata.jme_rxhead = mp;
2532 sc->jme_cdata.jme_rxtail = mp;
2533 } else {
2534 /*
2535 * Receive processor can receive a maximum frame
2536 * size of 65535 bytes.
2537 */
2538 mp->m_flags &= ~M_PKTHDR;
2539 sc->jme_cdata.jme_rxtail->m_next = mp;
2540 sc->jme_cdata.jme_rxtail = mp;
2541 }
2542
2543 if (count == nsegs - 1) {
2544 /* Last desc. for this frame. */
2545 m = sc->jme_cdata.jme_rxhead;
2546 m->m_flags |= M_PKTHDR;
2547 m->m_pkthdr.len = sc->jme_cdata.jme_rxlen;
2548 if (nsegs > 1) {
2549 /* Set first mbuf size. */
2550 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2551 /* Set last mbuf size. */
2552 mp->m_len = sc->jme_cdata.jme_rxlen -
2553 ((MCLBYTES - JME_RX_PAD_BYTES) +
2554 (MCLBYTES * (nsegs - 2)));
2555 } else
2556 m->m_len = sc->jme_cdata.jme_rxlen;
2557 m->m_pkthdr.rcvif = ifp;
2558
2559 /*
2560 * Account for 10bytes auto padding which is used
2561 * to align IP header on 32bit boundary. Also note,
2562 * CRC bytes is automatically removed by the
2563 * hardware.
2564 */
2565 m->m_data += JME_RX_PAD_BYTES;
2566
2567 /* Set checksum information. */
2568 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 &&
2569 (flags & JME_RD_IPV4) != 0) {
2570 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2571 if ((flags & JME_RD_IPCSUM) != 0)
2572 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2573 if (((flags & JME_RD_MORE_FRAG) == 0) &&
2574 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2575 (JME_RD_TCP | JME_RD_TCPCSUM) ||
2576 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2577 (JME_RD_UDP | JME_RD_UDPCSUM))) {
2578 m->m_pkthdr.csum_flags |=
2579 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2580 m->m_pkthdr.csum_data = 0xffff;
2581 }
2582 }
2583
2584 /* Check for VLAN tagged packets. */
2585 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0 &&
2586 (flags & JME_RD_VLAN_TAG) != 0) {
2587 m->m_pkthdr.ether_vtag =
2588 flags & JME_RD_VLAN_MASK;
2589 m->m_flags |= M_VLANTAG;
2590 }
2591
2592 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2593 /* Pass it on. */
2594 JME_UNLOCK(sc);
2595 if_input(ifp, m);
2596 JME_LOCK(sc);
2597
2598 /* Reset mbuf chains. */
2599 JME_RXCHAIN_RESET(sc);
2600 }
2601 }
2602
2603 sc->jme_cdata.jme_rx_cons += nsegs;
2604 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
2605 }
2606
2607 static int
jme_rxintr(struct jme_softc * sc,int count)2608 jme_rxintr(struct jme_softc *sc, int count)
2609 {
2610 struct jme_desc *desc;
2611 int nsegs, prog, pktlen;
2612
2613 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2614 sc->jme_cdata.jme_rx_ring_map,
2615 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2616
2617 for (prog = 0; count > 0; prog++) {
2618 desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons];
2619 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2620 break;
2621 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2622 break;
2623 nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2624 /*
2625 * Check number of segments against received bytes.
2626 * Non-matching value would indicate that hardware
2627 * is still trying to update Rx descriptors. I'm not
2628 * sure whether this check is needed.
2629 */
2630 pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2631 if (nsegs != howmany(pktlen, MCLBYTES))
2632 break;
2633 prog++;
2634 /* Received a frame. */
2635 jme_rxeof(sc);
2636 count -= nsegs;
2637 }
2638
2639 if (prog > 0)
2640 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2641 sc->jme_cdata.jme_rx_ring_map,
2642 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2643
2644 return (count > 0 ? 0 : EAGAIN);
2645 }
2646
2647 static void
jme_tick(void * arg)2648 jme_tick(void *arg)
2649 {
2650 struct jme_softc *sc;
2651 struct mii_data *mii;
2652
2653 sc = (struct jme_softc *)arg;
2654
2655 JME_LOCK_ASSERT(sc);
2656
2657 mii = device_get_softc(sc->jme_miibus);
2658 mii_tick(mii);
2659 /*
2660 * Reclaim Tx buffers that have been completed. It's not
2661 * needed here but it would release allocated mbuf chains
2662 * faster and limit the maximum delay to a hz.
2663 */
2664 jme_txeof(sc);
2665 jme_stats_update(sc);
2666 jme_watchdog(sc);
2667 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2668 }
2669
2670 static void
jme_reset(struct jme_softc * sc)2671 jme_reset(struct jme_softc *sc)
2672 {
2673 uint32_t ghc, gpreg;
2674
2675 /* Stop receiver, transmitter. */
2676 jme_stop_rx(sc);
2677 jme_stop_tx(sc);
2678
2679 /* Reset controller. */
2680 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2681 CSR_READ_4(sc, JME_GHC);
2682 DELAY(10);
2683 /*
2684 * Workaround Rx FIFO overruns seen under certain conditions.
2685 * Explicitly synchorize TX/RX clock. TX/RX clock should be
2686 * enabled only after enabling TX/RX MACs.
2687 */
2688 if ((sc->jme_flags & (JME_FLAG_TXCLK | JME_FLAG_RXCLK)) != 0) {
2689 /* Disable TX clock. */
2690 CSR_WRITE_4(sc, JME_GHC, GHC_RESET | GHC_TX_MAC_CLK_DIS);
2691 /* Disable RX clock. */
2692 gpreg = CSR_READ_4(sc, JME_GPREG1);
2693 CSR_WRITE_4(sc, JME_GPREG1, gpreg | GPREG1_RX_MAC_CLK_DIS);
2694 gpreg = CSR_READ_4(sc, JME_GPREG1);
2695 /* De-assert RESET but still disable TX clock. */
2696 CSR_WRITE_4(sc, JME_GHC, GHC_TX_MAC_CLK_DIS);
2697 ghc = CSR_READ_4(sc, JME_GHC);
2698
2699 /* Enable TX clock. */
2700 CSR_WRITE_4(sc, JME_GHC, ghc & ~GHC_TX_MAC_CLK_DIS);
2701 /* Enable RX clock. */
2702 CSR_WRITE_4(sc, JME_GPREG1, gpreg & ~GPREG1_RX_MAC_CLK_DIS);
2703 CSR_READ_4(sc, JME_GPREG1);
2704
2705 /* Disable TX/RX clock again. */
2706 CSR_WRITE_4(sc, JME_GHC, GHC_TX_MAC_CLK_DIS);
2707 CSR_WRITE_4(sc, JME_GPREG1, gpreg | GPREG1_RX_MAC_CLK_DIS);
2708 } else
2709 CSR_WRITE_4(sc, JME_GHC, 0);
2710 CSR_READ_4(sc, JME_GHC);
2711 DELAY(10);
2712 }
2713
2714 static void
jme_init(void * xsc)2715 jme_init(void *xsc)
2716 {
2717 struct jme_softc *sc;
2718
2719 sc = (struct jme_softc *)xsc;
2720 JME_LOCK(sc);
2721 jme_init_locked(sc);
2722 JME_UNLOCK(sc);
2723 }
2724
2725 static void
jme_init_locked(struct jme_softc * sc)2726 jme_init_locked(struct jme_softc *sc)
2727 {
2728 if_t ifp;
2729 struct mii_data *mii;
2730 bus_addr_t paddr;
2731 uint32_t reg;
2732 int error;
2733
2734 JME_LOCK_ASSERT(sc);
2735
2736 ifp = sc->jme_ifp;
2737 mii = device_get_softc(sc->jme_miibus);
2738
2739 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
2740 return;
2741 /*
2742 * Cancel any pending I/O.
2743 */
2744 jme_stop(sc);
2745
2746 /*
2747 * Reset the chip to a known state.
2748 */
2749 jme_reset(sc);
2750
2751 /* Init descriptors. */
2752 error = jme_init_rx_ring(sc);
2753 if (error != 0) {
2754 device_printf(sc->jme_dev,
2755 "%s: initialization failed: no memory for Rx buffers.\n",
2756 __func__);
2757 jme_stop(sc);
2758 return;
2759 }
2760 jme_init_tx_ring(sc);
2761 /* Initialize shadow status block. */
2762 jme_init_ssb(sc);
2763
2764 /* Reprogram the station address. */
2765 jme_set_macaddr(sc, if_getlladdr(sc->jme_ifp));
2766
2767 /*
2768 * Configure Tx queue.
2769 * Tx priority queue weight value : 0
2770 * Tx FIFO threshold for processing next packet : 16QW
2771 * Maximum Tx DMA length : 512
2772 * Allow Tx DMA burst.
2773 */
2774 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2775 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2776 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2777 sc->jme_txcsr |= sc->jme_tx_dma_size;
2778 sc->jme_txcsr |= TXCSR_DMA_BURST;
2779 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2780
2781 /* Set Tx descriptor counter. */
2782 CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT);
2783
2784 /* Set Tx ring address to the hardware. */
2785 paddr = JME_TX_RING_ADDR(sc, 0);
2786 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2787 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2788
2789 /* Configure TxMAC parameters. */
2790 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2791 reg |= TXMAC_THRESH_1_PKT;
2792 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2793 CSR_WRITE_4(sc, JME_TXMAC, reg);
2794
2795 /*
2796 * Configure Rx queue.
2797 * FIFO full threshold for transmitting Tx pause packet : 128T
2798 * FIFO threshold for processing next packet : 128QW
2799 * Rx queue 0 select
2800 * Max Rx DMA length : 128
2801 * Rx descriptor retry : 32
2802 * Rx descriptor retry time gap : 256ns
2803 * Don't receive runt/bad frame.
2804 */
2805 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2806 /*
2807 * Since Rx FIFO size is 4K bytes, receiving frames larger
2808 * than 4K bytes will suffer from Rx FIFO overruns. So
2809 * decrease FIFO threshold to reduce the FIFO overruns for
2810 * frames larger than 4000 bytes.
2811 * For best performance of standard MTU sized frames use
2812 * maximum allowable FIFO threshold, 128QW. Note these do
2813 * not hold on chip full mask version >=2. For these
2814 * controllers 64QW and 128QW are not valid value.
2815 */
2816 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 2)
2817 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2818 else {
2819 if ((if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
2820 ETHER_CRC_LEN) > JME_RX_FIFO_SIZE)
2821 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2822 else
2823 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2824 }
2825 sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0);
2826 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2827 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2828 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
2829
2830 /* Set Rx descriptor counter. */
2831 CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT);
2832
2833 /* Set Rx ring address to the hardware. */
2834 paddr = JME_RX_RING_ADDR(sc, 0);
2835 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2836 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2837
2838 /* Clear receive filter. */
2839 CSR_WRITE_4(sc, JME_RXMAC, 0);
2840 /* Set up the receive filter. */
2841 jme_set_filter(sc);
2842 jme_set_vlan(sc);
2843
2844 /*
2845 * Disable all WOL bits as WOL can interfere normal Rx
2846 * operation. Also clear WOL detection status bits.
2847 */
2848 reg = CSR_READ_4(sc, JME_PMCS);
2849 reg &= ~PMCS_WOL_ENB_MASK;
2850 CSR_WRITE_4(sc, JME_PMCS, reg);
2851
2852 reg = CSR_READ_4(sc, JME_RXMAC);
2853 /*
2854 * Pad 10bytes right before received frame. This will greatly
2855 * help Rx performance on strict-alignment architectures as
2856 * it does not need to copy the frame to align the payload.
2857 */
2858 reg |= RXMAC_PAD_10BYTES;
2859 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
2860 reg |= RXMAC_CSUM_ENB;
2861 CSR_WRITE_4(sc, JME_RXMAC, reg);
2862
2863 /* Configure general purpose reg0 */
2864 reg = CSR_READ_4(sc, JME_GPREG0);
2865 reg &= ~GPREG0_PCC_UNIT_MASK;
2866 /* Set PCC timer resolution to micro-seconds unit. */
2867 reg |= GPREG0_PCC_UNIT_US;
2868 /*
2869 * Disable all shadow register posting as we have to read
2870 * JME_INTR_STATUS register in jme_int_task. Also it seems
2871 * that it's hard to synchronize interrupt status between
2872 * hardware and software with shadow posting due to
2873 * requirements of bus_dmamap_sync(9).
2874 */
2875 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2876 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2877 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2878 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2879 /* Disable posting of DW0. */
2880 reg &= ~GPREG0_POST_DW0_ENB;
2881 /* Clear PME message. */
2882 reg &= ~GPREG0_PME_ENB;
2883 /* Set PHY address. */
2884 reg &= ~GPREG0_PHY_ADDR_MASK;
2885 reg |= sc->jme_phyaddr;
2886 CSR_WRITE_4(sc, JME_GPREG0, reg);
2887
2888 /* Configure Tx queue 0 packet completion coalescing. */
2889 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
2890 PCCTX_COAL_TO_MASK;
2891 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
2892 PCCTX_COAL_PKT_MASK;
2893 reg |= PCCTX_COAL_TXQ0;
2894 CSR_WRITE_4(sc, JME_PCCTX, reg);
2895
2896 /* Configure Rx queue 0 packet completion coalescing. */
2897 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
2898 PCCRX_COAL_TO_MASK;
2899 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
2900 PCCRX_COAL_PKT_MASK;
2901 CSR_WRITE_4(sc, JME_PCCRX0, reg);
2902
2903 /*
2904 * Configure PCD(Packet Completion Deferring). It seems PCD
2905 * generates an interrupt when the time interval between two
2906 * back-to-back incoming/outgoing packet is long enough for
2907 * it to reach its timer value 0. The arrival of new packets
2908 * after timer has started causes the PCD timer to restart.
2909 * Unfortunately, it's not clear how PCD is useful at this
2910 * moment, so just use the same of PCC parameters.
2911 */
2912 if ((sc->jme_flags & JME_FLAG_PCCPCD) != 0) {
2913 sc->jme_rx_pcd_to = sc->jme_rx_coal_to;
2914 if (sc->jme_rx_coal_to > PCDRX_TO_MAX)
2915 sc->jme_rx_pcd_to = PCDRX_TO_MAX;
2916 sc->jme_tx_pcd_to = sc->jme_tx_coal_to;
2917 if (sc->jme_tx_coal_to > PCDTX_TO_MAX)
2918 sc->jme_tx_pcd_to = PCDTX_TO_MAX;
2919 reg = sc->jme_rx_pcd_to << PCDRX0_TO_THROTTLE_SHIFT;
2920 reg |= sc->jme_rx_pcd_to << PCDRX0_TO_SHIFT;
2921 CSR_WRITE_4(sc, PCDRX_REG(0), reg);
2922 reg = sc->jme_tx_pcd_to << PCDTX_TO_THROTTLE_SHIFT;
2923 reg |= sc->jme_tx_pcd_to << PCDTX_TO_SHIFT;
2924 CSR_WRITE_4(sc, JME_PCDTX, reg);
2925 }
2926
2927 /* Configure shadow status block but don't enable posting. */
2928 paddr = sc->jme_rdata.jme_ssb_block_paddr;
2929 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2930 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2931
2932 /* Disable Timer 1 and Timer 2. */
2933 CSR_WRITE_4(sc, JME_TIMER1, 0);
2934 CSR_WRITE_4(sc, JME_TIMER2, 0);
2935
2936 /* Configure retry transmit period, retry limit value. */
2937 CSR_WRITE_4(sc, JME_TXTRHD,
2938 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2939 TXTRHD_RT_PERIOD_MASK) |
2940 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2941 TXTRHD_RT_LIMIT_SHIFT));
2942
2943 /* Disable RSS. */
2944 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
2945
2946 /* Initialize the interrupt mask. */
2947 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2948 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2949
2950 /*
2951 * Enabling Tx/Rx DMA engines and Rx queue processing is
2952 * done after detection of valid link in jme_link_task.
2953 */
2954
2955 sc->jme_flags &= ~JME_FLAG_LINK;
2956 /* Set the current media. */
2957 mii_mediachg(mii);
2958
2959 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2960
2961 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
2962 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2963 }
2964
2965 static void
jme_stop(struct jme_softc * sc)2966 jme_stop(struct jme_softc *sc)
2967 {
2968 if_t ifp;
2969 struct jme_txdesc *txd;
2970 struct jme_rxdesc *rxd;
2971 int i;
2972
2973 JME_LOCK_ASSERT(sc);
2974 /*
2975 * Mark the interface down and cancel the watchdog timer.
2976 */
2977 ifp = sc->jme_ifp;
2978 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
2979 sc->jme_flags &= ~JME_FLAG_LINK;
2980 callout_stop(&sc->jme_tick_ch);
2981 sc->jme_watchdog_timer = 0;
2982
2983 /*
2984 * Disable interrupts.
2985 */
2986 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2987 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2988
2989 /* Disable updating shadow status block. */
2990 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2991 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2992
2993 /* Stop receiver, transmitter. */
2994 jme_stop_rx(sc);
2995 jme_stop_tx(sc);
2996
2997 /* Reclaim Rx/Tx buffers that have been completed. */
2998 jme_rxintr(sc, JME_RX_RING_CNT);
2999 if (sc->jme_cdata.jme_rxhead != NULL)
3000 m_freem(sc->jme_cdata.jme_rxhead);
3001 JME_RXCHAIN_RESET(sc);
3002 jme_txeof(sc);
3003 /*
3004 * Free RX and TX mbufs still in the queues.
3005 */
3006 for (i = 0; i < JME_RX_RING_CNT; i++) {
3007 rxd = &sc->jme_cdata.jme_rxdesc[i];
3008 if (rxd->rx_m != NULL) {
3009 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag,
3010 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3011 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag,
3012 rxd->rx_dmamap);
3013 m_freem(rxd->rx_m);
3014 rxd->rx_m = NULL;
3015 }
3016 }
3017 for (i = 0; i < JME_TX_RING_CNT; i++) {
3018 txd = &sc->jme_cdata.jme_txdesc[i];
3019 if (txd->tx_m != NULL) {
3020 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag,
3021 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3022 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
3023 txd->tx_dmamap);
3024 m_freem(txd->tx_m);
3025 txd->tx_m = NULL;
3026 txd->tx_ndesc = 0;
3027 }
3028 }
3029 jme_stats_update(sc);
3030 jme_stats_save(sc);
3031 }
3032
3033 static void
jme_stop_tx(struct jme_softc * sc)3034 jme_stop_tx(struct jme_softc *sc)
3035 {
3036 uint32_t reg;
3037 int i;
3038
3039 reg = CSR_READ_4(sc, JME_TXCSR);
3040 if ((reg & TXCSR_TX_ENB) == 0)
3041 return;
3042 reg &= ~TXCSR_TX_ENB;
3043 CSR_WRITE_4(sc, JME_TXCSR, reg);
3044 for (i = JME_TIMEOUT; i > 0; i--) {
3045 DELAY(1);
3046 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
3047 break;
3048 }
3049 if (i == 0)
3050 device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
3051 }
3052
3053 static void
jme_stop_rx(struct jme_softc * sc)3054 jme_stop_rx(struct jme_softc *sc)
3055 {
3056 uint32_t reg;
3057 int i;
3058
3059 reg = CSR_READ_4(sc, JME_RXCSR);
3060 if ((reg & RXCSR_RX_ENB) == 0)
3061 return;
3062 reg &= ~RXCSR_RX_ENB;
3063 CSR_WRITE_4(sc, JME_RXCSR, reg);
3064 for (i = JME_TIMEOUT; i > 0; i--) {
3065 DELAY(1);
3066 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
3067 break;
3068 }
3069 if (i == 0)
3070 device_printf(sc->jme_dev, "stopping recevier timeout!\n");
3071 }
3072
3073 static void
jme_init_tx_ring(struct jme_softc * sc)3074 jme_init_tx_ring(struct jme_softc *sc)
3075 {
3076 struct jme_ring_data *rd;
3077 struct jme_txdesc *txd;
3078 int i;
3079
3080 sc->jme_cdata.jme_tx_prod = 0;
3081 sc->jme_cdata.jme_tx_cons = 0;
3082 sc->jme_cdata.jme_tx_cnt = 0;
3083
3084 rd = &sc->jme_rdata;
3085 bzero(rd->jme_tx_ring, JME_TX_RING_SIZE);
3086 for (i = 0; i < JME_TX_RING_CNT; i++) {
3087 txd = &sc->jme_cdata.jme_txdesc[i];
3088 txd->tx_m = NULL;
3089 txd->tx_desc = &rd->jme_tx_ring[i];
3090 txd->tx_ndesc = 0;
3091 }
3092
3093 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
3094 sc->jme_cdata.jme_tx_ring_map,
3095 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3096 }
3097
3098 static void
jme_init_ssb(struct jme_softc * sc)3099 jme_init_ssb(struct jme_softc *sc)
3100 {
3101 struct jme_ring_data *rd;
3102
3103 rd = &sc->jme_rdata;
3104 bzero(rd->jme_ssb_block, JME_SSB_SIZE);
3105 bus_dmamap_sync(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map,
3106 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3107 }
3108
3109 static int
jme_init_rx_ring(struct jme_softc * sc)3110 jme_init_rx_ring(struct jme_softc *sc)
3111 {
3112 struct jme_ring_data *rd;
3113 struct jme_rxdesc *rxd;
3114 int i;
3115
3116 sc->jme_cdata.jme_rx_cons = 0;
3117 JME_RXCHAIN_RESET(sc);
3118 sc->jme_morework = 0;
3119
3120 rd = &sc->jme_rdata;
3121 bzero(rd->jme_rx_ring, JME_RX_RING_SIZE);
3122 for (i = 0; i < JME_RX_RING_CNT; i++) {
3123 rxd = &sc->jme_cdata.jme_rxdesc[i];
3124 rxd->rx_m = NULL;
3125 rxd->rx_desc = &rd->jme_rx_ring[i];
3126 if (jme_newbuf(sc, rxd) != 0)
3127 return (ENOBUFS);
3128 }
3129
3130 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
3131 sc->jme_cdata.jme_rx_ring_map,
3132 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3133
3134 return (0);
3135 }
3136
3137 static int
jme_newbuf(struct jme_softc * sc,struct jme_rxdesc * rxd)3138 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd)
3139 {
3140 struct jme_desc *desc;
3141 struct mbuf *m;
3142 bus_dma_segment_t segs[1];
3143 bus_dmamap_t map;
3144 int nsegs;
3145
3146 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3147 if (m == NULL)
3148 return (ENOBUFS);
3149 /*
3150 * JMC250 has 64bit boundary alignment limitation so jme(4)
3151 * takes advantage of 10 bytes padding feature of hardware
3152 * in order not to copy entire frame to align IP header on
3153 * 32bit boundary.
3154 */
3155 m->m_len = m->m_pkthdr.len = MCLBYTES;
3156
3157 if (bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_rx_tag,
3158 sc->jme_cdata.jme_rx_sparemap, m, segs, &nsegs, 0) != 0) {
3159 m_freem(m);
3160 return (ENOBUFS);
3161 }
3162 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
3163
3164 if (rxd->rx_m != NULL) {
3165 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
3166 BUS_DMASYNC_POSTREAD);
3167 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap);
3168 }
3169 map = rxd->rx_dmamap;
3170 rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap;
3171 sc->jme_cdata.jme_rx_sparemap = map;
3172 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
3173 BUS_DMASYNC_PREREAD);
3174 rxd->rx_m = m;
3175
3176 desc = rxd->rx_desc;
3177 desc->buflen = htole32(segs[0].ds_len);
3178 desc->addr_lo = htole32(JME_ADDR_LO(segs[0].ds_addr));
3179 desc->addr_hi = htole32(JME_ADDR_HI(segs[0].ds_addr));
3180 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
3181
3182 return (0);
3183 }
3184
3185 static void
jme_set_vlan(struct jme_softc * sc)3186 jme_set_vlan(struct jme_softc *sc)
3187 {
3188 if_t ifp;
3189 uint32_t reg;
3190
3191 JME_LOCK_ASSERT(sc);
3192
3193 ifp = sc->jme_ifp;
3194 reg = CSR_READ_4(sc, JME_RXMAC);
3195 reg &= ~RXMAC_VLAN_ENB;
3196 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
3197 reg |= RXMAC_VLAN_ENB;
3198 CSR_WRITE_4(sc, JME_RXMAC, reg);
3199 }
3200
3201 static u_int
jme_hash_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)3202 jme_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
3203 {
3204 uint32_t crc, *mchash = arg;
3205
3206 crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
3207
3208 /* Just want the 6 least significant bits. */
3209 crc &= 0x3f;
3210
3211 /* Set the corresponding bit in the hash table. */
3212 mchash[crc >> 5] |= 1 << (crc & 0x1f);
3213
3214 return (1);
3215 }
3216
3217 static void
jme_set_filter(struct jme_softc * sc)3218 jme_set_filter(struct jme_softc *sc)
3219 {
3220 if_t ifp;
3221 uint32_t mchash[2];
3222 uint32_t rxcfg;
3223
3224 JME_LOCK_ASSERT(sc);
3225
3226 ifp = sc->jme_ifp;
3227
3228 rxcfg = CSR_READ_4(sc, JME_RXMAC);
3229 rxcfg &= ~ (RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
3230 RXMAC_ALLMULTI);
3231 /* Always accept frames destined to our station address. */
3232 rxcfg |= RXMAC_UNICAST;
3233 if ((if_getflags(ifp) & IFF_BROADCAST) != 0)
3234 rxcfg |= RXMAC_BROADCAST;
3235 if ((if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3236 if ((if_getflags(ifp) & IFF_PROMISC) != 0)
3237 rxcfg |= RXMAC_PROMISC;
3238 if ((if_getflags(ifp) & IFF_ALLMULTI) != 0)
3239 rxcfg |= RXMAC_ALLMULTI;
3240 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
3241 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
3242 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3243 return;
3244 }
3245
3246 /*
3247 * Set up the multicast address filter by passing all multicast
3248 * addresses through a CRC generator, and then using the low-order
3249 * 6 bits as an index into the 64 bit multicast hash table. The
3250 * high order bits select the register, while the rest of the bits
3251 * select the bit within the register.
3252 */
3253 rxcfg |= RXMAC_MULTICAST;
3254 bzero(mchash, sizeof(mchash));
3255 if_foreach_llmaddr(ifp, jme_hash_maddr, &mchash);
3256
3257 CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
3258 CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
3259 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3260 }
3261
3262 static void
jme_stats_clear(struct jme_softc * sc)3263 jme_stats_clear(struct jme_softc *sc)
3264 {
3265
3266 JME_LOCK_ASSERT(sc);
3267
3268 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
3269 return;
3270
3271 /* Disable and clear counters. */
3272 CSR_WRITE_4(sc, JME_STATCSR, 0xFFFFFFFF);
3273 /* Activate hw counters. */
3274 CSR_WRITE_4(sc, JME_STATCSR, 0);
3275 CSR_READ_4(sc, JME_STATCSR);
3276 bzero(&sc->jme_stats, sizeof(struct jme_hw_stats));
3277 }
3278
3279 static void
jme_stats_save(struct jme_softc * sc)3280 jme_stats_save(struct jme_softc *sc)
3281 {
3282
3283 JME_LOCK_ASSERT(sc);
3284
3285 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
3286 return;
3287 /* Save current counters. */
3288 bcopy(&sc->jme_stats, &sc->jme_ostats, sizeof(struct jme_hw_stats));
3289 /* Disable and clear counters. */
3290 CSR_WRITE_4(sc, JME_STATCSR, 0xFFFFFFFF);
3291 }
3292
3293 static void
jme_stats_update(struct jme_softc * sc)3294 jme_stats_update(struct jme_softc *sc)
3295 {
3296 struct jme_hw_stats *stat, *ostat;
3297 uint32_t reg;
3298
3299 JME_LOCK_ASSERT(sc);
3300
3301 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
3302 return;
3303 stat = &sc->jme_stats;
3304 ostat = &sc->jme_ostats;
3305 stat->tx_good_frames = CSR_READ_4(sc, JME_STAT_TXGOOD);
3306 stat->rx_good_frames = CSR_READ_4(sc, JME_STAT_RXGOOD);
3307 reg = CSR_READ_4(sc, JME_STAT_CRCMII);
3308 stat->rx_crc_errs = (reg & STAT_RX_CRC_ERR_MASK) >>
3309 STAT_RX_CRC_ERR_SHIFT;
3310 stat->rx_mii_errs = (reg & STAT_RX_MII_ERR_MASK) >>
3311 STAT_RX_MII_ERR_SHIFT;
3312 reg = CSR_READ_4(sc, JME_STAT_RXERR);
3313 stat->rx_fifo_oflows = (reg & STAT_RXERR_OFLOW_MASK) >>
3314 STAT_RXERR_OFLOW_SHIFT;
3315 stat->rx_desc_empty = (reg & STAT_RXERR_MPTY_MASK) >>
3316 STAT_RXERR_MPTY_SHIFT;
3317 reg = CSR_READ_4(sc, JME_STAT_FAIL);
3318 stat->rx_bad_frames = (reg & STAT_FAIL_RX_MASK) >> STAT_FAIL_RX_SHIFT;
3319 stat->tx_bad_frames = (reg & STAT_FAIL_TX_MASK) >> STAT_FAIL_TX_SHIFT;
3320
3321 /* Account for previous counters. */
3322 stat->rx_good_frames += ostat->rx_good_frames;
3323 stat->rx_crc_errs += ostat->rx_crc_errs;
3324 stat->rx_mii_errs += ostat->rx_mii_errs;
3325 stat->rx_fifo_oflows += ostat->rx_fifo_oflows;
3326 stat->rx_desc_empty += ostat->rx_desc_empty;
3327 stat->rx_bad_frames += ostat->rx_bad_frames;
3328 stat->tx_good_frames += ostat->tx_good_frames;
3329 stat->tx_bad_frames += ostat->tx_bad_frames;
3330 }
3331
3332 static void
jme_phy_down(struct jme_softc * sc)3333 jme_phy_down(struct jme_softc *sc)
3334 {
3335 uint32_t reg;
3336
3337 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, BMCR_PDOWN);
3338 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5) {
3339 reg = CSR_READ_4(sc, JME_PHYPOWDN);
3340 reg |= 0x0000000F;
3341 CSR_WRITE_4(sc, JME_PHYPOWDN, reg);
3342 reg = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4);
3343 reg &= ~PE1_GIGA_PDOWN_MASK;
3344 reg |= PE1_GIGA_PDOWN_D3;
3345 pci_write_config(sc->jme_dev, JME_PCI_PE1, reg, 4);
3346 }
3347 }
3348
3349 static void
jme_phy_up(struct jme_softc * sc)3350 jme_phy_up(struct jme_softc *sc)
3351 {
3352 uint32_t reg;
3353 uint16_t bmcr;
3354
3355 bmcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR);
3356 bmcr &= ~BMCR_PDOWN;
3357 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, bmcr);
3358 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5) {
3359 reg = CSR_READ_4(sc, JME_PHYPOWDN);
3360 reg &= ~0x0000000F;
3361 CSR_WRITE_4(sc, JME_PHYPOWDN, reg);
3362 reg = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4);
3363 reg &= ~PE1_GIGA_PDOWN_MASK;
3364 reg |= PE1_GIGA_PDOWN_DIS;
3365 pci_write_config(sc->jme_dev, JME_PCI_PE1, reg, 4);
3366 }
3367 }
3368
3369 static int
sysctl_int_range(SYSCTL_HANDLER_ARGS,int low,int high)3370 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3371 {
3372 int error, value;
3373
3374 if (arg1 == NULL)
3375 return (EINVAL);
3376 value = *(int *)arg1;
3377 error = sysctl_handle_int(oidp, &value, 0, req);
3378 if (error || req->newptr == NULL)
3379 return (error);
3380 if (value < low || value > high)
3381 return (EINVAL);
3382 *(int *)arg1 = value;
3383
3384 return (0);
3385 }
3386
3387 static int
sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS)3388 sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS)
3389 {
3390 return (sysctl_int_range(oidp, arg1, arg2, req,
3391 PCCTX_COAL_TO_MIN, PCCTX_COAL_TO_MAX));
3392 }
3393
3394 static int
sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS)3395 sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
3396 {
3397 return (sysctl_int_range(oidp, arg1, arg2, req,
3398 PCCTX_COAL_PKT_MIN, PCCTX_COAL_PKT_MAX));
3399 }
3400
3401 static int
sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS)3402 sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS)
3403 {
3404 return (sysctl_int_range(oidp, arg1, arg2, req,
3405 PCCRX_COAL_TO_MIN, PCCRX_COAL_TO_MAX));
3406 }
3407
3408 static int
sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS)3409 sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
3410 {
3411 return (sysctl_int_range(oidp, arg1, arg2, req,
3412 PCCRX_COAL_PKT_MIN, PCCRX_COAL_PKT_MAX));
3413 }
3414
3415 static int
sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS)3416 sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS)
3417 {
3418 return (sysctl_int_range(oidp, arg1, arg2, req,
3419 JME_PROC_MIN, JME_PROC_MAX));
3420 }
3421