1 /*- 2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/bus.h> 34 #include <sys/endian.h> 35 #include <sys/kernel.h> 36 #include <sys/malloc.h> 37 #include <sys/mbuf.h> 38 #include <sys/rman.h> 39 #include <sys/module.h> 40 #include <sys/proc.h> 41 #include <sys/queue.h> 42 #include <sys/socket.h> 43 #include <sys/sockio.h> 44 #include <sys/sysctl.h> 45 #include <sys/taskqueue.h> 46 47 #include <net/bpf.h> 48 #include <net/if.h> 49 #include <net/if_arp.h> 50 #include <net/ethernet.h> 51 #include <net/if_dl.h> 52 #include <net/if_media.h> 53 #include <net/if_types.h> 54 #include <net/if_vlan_var.h> 55 56 #include <netinet/in.h> 57 #include <netinet/in_systm.h> 58 #include <netinet/ip.h> 59 #include <netinet/tcp.h> 60 61 #include <dev/mii/mii.h> 62 #include <dev/mii/miivar.h> 63 64 #include <dev/pci/pcireg.h> 65 #include <dev/pci/pcivar.h> 66 67 #include <machine/bus.h> 68 #include <machine/in_cksum.h> 69 70 #include <dev/jme/if_jmereg.h> 71 #include <dev/jme/if_jmevar.h> 72 73 /* "device miibus" required. See GENERIC if you get errors here. */ 74 #include "miibus_if.h" 75 76 /* Define the following to disable printing Rx errors. */ 77 #undef JME_SHOW_ERRORS 78 79 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 80 81 MODULE_DEPEND(jme, pci, 1, 1, 1); 82 MODULE_DEPEND(jme, ether, 1, 1, 1); 83 MODULE_DEPEND(jme, miibus, 1, 1, 1); 84 85 /* Tunables. */ 86 static int msi_disable = 0; 87 static int msix_disable = 0; 88 TUNABLE_INT("hw.jme.msi_disable", &msi_disable); 89 TUNABLE_INT("hw.jme.msix_disable", &msix_disable); 90 91 /* 92 * Devices supported by this driver. 93 */ 94 static struct jme_dev { 95 uint16_t jme_vendorid; 96 uint16_t jme_deviceid; 97 const char *jme_name; 98 } jme_devs[] = { 99 { VENDORID_JMICRON, DEVICEID_JMC250, 100 "JMicron Inc, JMC25x Gigabit Ethernet" }, 101 { VENDORID_JMICRON, DEVICEID_JMC260, 102 "JMicron Inc, JMC26x Fast Ethernet" }, 103 }; 104 105 static int jme_miibus_readreg(device_t, int, int); 106 static int jme_miibus_writereg(device_t, int, int, int); 107 static void jme_miibus_statchg(device_t); 108 static void jme_mediastatus(struct ifnet *, struct ifmediareq *); 109 static int jme_mediachange(struct ifnet *); 110 static int jme_probe(device_t); 111 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *); 112 static int jme_eeprom_macaddr(struct jme_softc *); 113 static int jme_efuse_macaddr(struct jme_softc *); 114 static void jme_reg_macaddr(struct jme_softc *); 115 static void jme_set_macaddr(struct jme_softc *, uint8_t *); 116 static void jme_map_intr_vector(struct jme_softc *); 117 static int jme_attach(device_t); 118 static int jme_detach(device_t); 119 static void jme_sysctl_node(struct jme_softc *); 120 static void jme_dmamap_cb(void *, bus_dma_segment_t *, int, int); 121 static int jme_dma_alloc(struct jme_softc *); 122 static void jme_dma_free(struct jme_softc *); 123 static int jme_shutdown(device_t); 124 static void jme_setlinkspeed(struct jme_softc *); 125 static void jme_setwol(struct jme_softc *); 126 static int jme_suspend(device_t); 127 static int jme_resume(device_t); 128 static int jme_encap(struct jme_softc *, struct mbuf **); 129 static void jme_tx_task(void *, int); 130 static void jme_start(struct ifnet *); 131 static void jme_watchdog(struct jme_softc *); 132 static int jme_ioctl(struct ifnet *, u_long, caddr_t); 133 static void jme_mac_config(struct jme_softc *); 134 static void jme_link_task(void *, int); 135 static int jme_intr(void *); 136 static void jme_int_task(void *, int); 137 static void jme_txeof(struct jme_softc *); 138 static __inline void jme_discard_rxbuf(struct jme_softc *, int); 139 static void jme_rxeof(struct jme_softc *); 140 static int jme_rxintr(struct jme_softc *, int); 141 static void jme_tick(void *); 142 static void jme_reset(struct jme_softc *); 143 static void jme_init(void *); 144 static void jme_init_locked(struct jme_softc *); 145 static void jme_stop(struct jme_softc *); 146 static void jme_stop_tx(struct jme_softc *); 147 static void jme_stop_rx(struct jme_softc *); 148 static int jme_init_rx_ring(struct jme_softc *); 149 static void jme_init_tx_ring(struct jme_softc *); 150 static void jme_init_ssb(struct jme_softc *); 151 static int jme_newbuf(struct jme_softc *, struct jme_rxdesc *); 152 static void jme_set_vlan(struct jme_softc *); 153 static void jme_set_filter(struct jme_softc *); 154 static void jme_stats_clear(struct jme_softc *); 155 static void jme_stats_save(struct jme_softc *); 156 static void jme_stats_update(struct jme_softc *); 157 static void jme_phy_down(struct jme_softc *); 158 static void jme_phy_up(struct jme_softc *); 159 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 160 static int sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS); 161 static int sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS); 162 static int sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS); 163 static int sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS); 164 static int sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS); 165 166 167 static device_method_t jme_methods[] = { 168 /* Device interface. */ 169 DEVMETHOD(device_probe, jme_probe), 170 DEVMETHOD(device_attach, jme_attach), 171 DEVMETHOD(device_detach, jme_detach), 172 DEVMETHOD(device_shutdown, jme_shutdown), 173 DEVMETHOD(device_suspend, jme_suspend), 174 DEVMETHOD(device_resume, jme_resume), 175 176 /* MII interface. */ 177 DEVMETHOD(miibus_readreg, jme_miibus_readreg), 178 DEVMETHOD(miibus_writereg, jme_miibus_writereg), 179 DEVMETHOD(miibus_statchg, jme_miibus_statchg), 180 181 { NULL, NULL } 182 }; 183 184 static driver_t jme_driver = { 185 "jme", 186 jme_methods, 187 sizeof(struct jme_softc) 188 }; 189 190 static devclass_t jme_devclass; 191 192 DRIVER_MODULE(jme, pci, jme_driver, jme_devclass, 0, 0); 193 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, 0, 0); 194 195 static struct resource_spec jme_res_spec_mem[] = { 196 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, 197 { -1, 0, 0 } 198 }; 199 200 static struct resource_spec jme_irq_spec_legacy[] = { 201 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 202 { -1, 0, 0 } 203 }; 204 205 static struct resource_spec jme_irq_spec_msi[] = { 206 { SYS_RES_IRQ, 1, RF_ACTIVE }, 207 { -1, 0, 0 } 208 }; 209 210 /* 211 * Read a PHY register on the MII of the JMC250. 212 */ 213 static int 214 jme_miibus_readreg(device_t dev, int phy, int reg) 215 { 216 struct jme_softc *sc; 217 uint32_t val; 218 int i; 219 220 sc = device_get_softc(dev); 221 222 /* For FPGA version, PHY address 0 should be ignored. */ 223 if ((sc->jme_flags & JME_FLAG_FPGA) != 0 && phy == 0) 224 return (0); 225 226 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE | 227 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 228 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 229 DELAY(1); 230 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 231 break; 232 } 233 234 if (i == 0) { 235 device_printf(sc->jme_dev, "phy read timeout : %d\n", reg); 236 return (0); 237 } 238 239 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT); 240 } 241 242 /* 243 * Write a PHY register on the MII of the JMC250. 244 */ 245 static int 246 jme_miibus_writereg(device_t dev, int phy, int reg, int val) 247 { 248 struct jme_softc *sc; 249 int i; 250 251 sc = device_get_softc(dev); 252 253 /* For FPGA version, PHY address 0 should be ignored. */ 254 if ((sc->jme_flags & JME_FLAG_FPGA) != 0 && phy == 0) 255 return (0); 256 257 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE | 258 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | 259 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 260 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 261 DELAY(1); 262 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 263 break; 264 } 265 266 if (i == 0) 267 device_printf(sc->jme_dev, "phy write timeout : %d\n", reg); 268 269 return (0); 270 } 271 272 /* 273 * Callback from MII layer when media changes. 274 */ 275 static void 276 jme_miibus_statchg(device_t dev) 277 { 278 struct jme_softc *sc; 279 280 sc = device_get_softc(dev); 281 taskqueue_enqueue(taskqueue_swi, &sc->jme_link_task); 282 } 283 284 /* 285 * Get the current interface media status. 286 */ 287 static void 288 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 289 { 290 struct jme_softc *sc; 291 struct mii_data *mii; 292 293 sc = ifp->if_softc; 294 JME_LOCK(sc); 295 if ((ifp->if_flags & IFF_UP) == 0) { 296 JME_UNLOCK(sc); 297 return; 298 } 299 mii = device_get_softc(sc->jme_miibus); 300 301 mii_pollstat(mii); 302 ifmr->ifm_status = mii->mii_media_status; 303 ifmr->ifm_active = mii->mii_media_active; 304 JME_UNLOCK(sc); 305 } 306 307 /* 308 * Set hardware to newly-selected media. 309 */ 310 static int 311 jme_mediachange(struct ifnet *ifp) 312 { 313 struct jme_softc *sc; 314 struct mii_data *mii; 315 struct mii_softc *miisc; 316 int error; 317 318 sc = ifp->if_softc; 319 JME_LOCK(sc); 320 mii = device_get_softc(sc->jme_miibus); 321 if (mii->mii_instance != 0) { 322 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 323 mii_phy_reset(miisc); 324 } 325 error = mii_mediachg(mii); 326 JME_UNLOCK(sc); 327 328 return (error); 329 } 330 331 static int 332 jme_probe(device_t dev) 333 { 334 struct jme_dev *sp; 335 int i; 336 uint16_t vendor, devid; 337 338 vendor = pci_get_vendor(dev); 339 devid = pci_get_device(dev); 340 sp = jme_devs; 341 for (i = 0; i < sizeof(jme_devs) / sizeof(jme_devs[0]); 342 i++, sp++) { 343 if (vendor == sp->jme_vendorid && 344 devid == sp->jme_deviceid) { 345 device_set_desc(dev, sp->jme_name); 346 return (BUS_PROBE_DEFAULT); 347 } 348 } 349 350 return (ENXIO); 351 } 352 353 static int 354 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val) 355 { 356 uint32_t reg; 357 int i; 358 359 *val = 0; 360 for (i = JME_TIMEOUT; i > 0; i--) { 361 reg = CSR_READ_4(sc, JME_SMBCSR); 362 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE) 363 break; 364 DELAY(1); 365 } 366 367 if (i == 0) { 368 device_printf(sc->jme_dev, "EEPROM idle timeout!\n"); 369 return (ETIMEDOUT); 370 } 371 372 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK; 373 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER); 374 for (i = JME_TIMEOUT; i > 0; i--) { 375 DELAY(1); 376 reg = CSR_READ_4(sc, JME_SMBINTF); 377 if ((reg & SMBINTF_CMD_TRIGGER) == 0) 378 break; 379 } 380 381 if (i == 0) { 382 device_printf(sc->jme_dev, "EEPROM read timeout!\n"); 383 return (ETIMEDOUT); 384 } 385 386 reg = CSR_READ_4(sc, JME_SMBINTF); 387 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT; 388 389 return (0); 390 } 391 392 static int 393 jme_eeprom_macaddr(struct jme_softc *sc) 394 { 395 uint8_t eaddr[ETHER_ADDR_LEN]; 396 uint8_t fup, reg, val; 397 uint32_t offset; 398 int match; 399 400 offset = 0; 401 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 402 fup != JME_EEPROM_SIG0) 403 return (ENOENT); 404 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 405 fup != JME_EEPROM_SIG1) 406 return (ENOENT); 407 match = 0; 408 do { 409 if (jme_eeprom_read_byte(sc, offset, &fup) != 0) 410 break; 411 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) == 412 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) { 413 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0) 414 break; 415 if (reg >= JME_PAR0 && 416 reg < JME_PAR0 + ETHER_ADDR_LEN) { 417 if (jme_eeprom_read_byte(sc, offset + 2, 418 &val) != 0) 419 break; 420 eaddr[reg - JME_PAR0] = val; 421 match++; 422 } 423 } 424 /* Check for the end of EEPROM descriptor. */ 425 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END) 426 break; 427 /* Try next eeprom descriptor. */ 428 offset += JME_EEPROM_DESC_BYTES; 429 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END); 430 431 if (match == ETHER_ADDR_LEN) { 432 bcopy(eaddr, sc->jme_eaddr, ETHER_ADDR_LEN); 433 return (0); 434 } 435 436 return (ENOENT); 437 } 438 439 static int 440 jme_efuse_macaddr(struct jme_softc *sc) 441 { 442 uint32_t reg; 443 int i; 444 445 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4); 446 if ((reg & (EFUSE_CTL1_AUTOLOAD_ERR | EFUSE_CTL1_AUTOLAOD_DONE)) != 447 EFUSE_CTL1_AUTOLAOD_DONE) 448 return (ENOENT); 449 /* Reset eFuse controller. */ 450 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL2, 4); 451 reg |= EFUSE_CTL2_RESET; 452 pci_write_config(sc->jme_dev, JME_EFUSE_CTL2, reg, 4); 453 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL2, 4); 454 reg &= ~EFUSE_CTL2_RESET; 455 pci_write_config(sc->jme_dev, JME_EFUSE_CTL2, reg, 4); 456 457 /* Have eFuse reload station address to MAC controller. */ 458 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4); 459 reg &= ~EFUSE_CTL1_CMD_MASK; 460 reg |= EFUSE_CTL1_CMD_AUTOLOAD | EFUSE_CTL1_EXECUTE; 461 pci_write_config(sc->jme_dev, JME_EFUSE_CTL1, reg, 4); 462 463 /* 464 * Verify completion of eFuse autload command. It should be 465 * completed within 108us. 466 */ 467 DELAY(110); 468 for (i = 10; i > 0; i--) { 469 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4); 470 if ((reg & (EFUSE_CTL1_AUTOLOAD_ERR | 471 EFUSE_CTL1_AUTOLAOD_DONE)) != EFUSE_CTL1_AUTOLAOD_DONE) { 472 DELAY(20); 473 continue; 474 } 475 if ((reg & EFUSE_CTL1_EXECUTE) == 0) 476 break; 477 /* Station address loading is still in progress. */ 478 DELAY(20); 479 } 480 if (i == 0) { 481 device_printf(sc->jme_dev, "eFuse autoload timed out.\n"); 482 return (ETIMEDOUT); 483 } 484 485 return (0); 486 } 487 488 static void 489 jme_reg_macaddr(struct jme_softc *sc) 490 { 491 uint32_t par0, par1; 492 493 /* Read station address. */ 494 par0 = CSR_READ_4(sc, JME_PAR0); 495 par1 = CSR_READ_4(sc, JME_PAR1); 496 par1 &= 0xFFFF; 497 if ((par0 == 0 && par1 == 0) || 498 (par0 == 0xFFFFFFFF && par1 == 0xFFFF)) { 499 device_printf(sc->jme_dev, 500 "Failed to retrieve Ethernet address.\n"); 501 } else { 502 /* 503 * For controllers that use eFuse, the station address 504 * could also be extracted from JME_PCI_PAR0 and 505 * JME_PCI_PAR1 registers in PCI configuration space. 506 * Each register holds exactly half of station address(24bits) 507 * so use JME_PAR0, JME_PAR1 registers instead. 508 */ 509 sc->jme_eaddr[0] = (par0 >> 0) & 0xFF; 510 sc->jme_eaddr[1] = (par0 >> 8) & 0xFF; 511 sc->jme_eaddr[2] = (par0 >> 16) & 0xFF; 512 sc->jme_eaddr[3] = (par0 >> 24) & 0xFF; 513 sc->jme_eaddr[4] = (par1 >> 0) & 0xFF; 514 sc->jme_eaddr[5] = (par1 >> 8) & 0xFF; 515 } 516 } 517 518 static void 519 jme_set_macaddr(struct jme_softc *sc, uint8_t *eaddr) 520 { 521 uint32_t val; 522 int i; 523 524 if ((sc->jme_flags & JME_FLAG_EFUSE) != 0) { 525 /* 526 * Avoid reprogramming station address if the address 527 * is the same as previous one. Note, reprogrammed 528 * station address is permanent as if it was written 529 * to EEPROM. So if station address was changed by 530 * admistrator it's possible to lose factory configured 531 * address when driver fails to restore its address. 532 * (e.g. reboot or system crash) 533 */ 534 if (bcmp(eaddr, sc->jme_eaddr, ETHER_ADDR_LEN) != 0) { 535 for (i = 0; i < ETHER_ADDR_LEN; i++) { 536 val = JME_EFUSE_EEPROM_FUNC0 << 537 JME_EFUSE_EEPROM_FUNC_SHIFT; 538 val |= JME_EFUSE_EEPROM_PAGE_BAR1 << 539 JME_EFUSE_EEPROM_PAGE_SHIFT; 540 val |= (JME_PAR0 + i) << 541 JME_EFUSE_EEPROM_ADDR_SHIFT; 542 val |= eaddr[i] << JME_EFUSE_EEPROM_DATA_SHIFT; 543 pci_write_config(sc->jme_dev, JME_EFUSE_EEPROM, 544 val | JME_EFUSE_EEPROM_WRITE, 4); 545 } 546 } 547 } else { 548 CSR_WRITE_4(sc, JME_PAR0, 549 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]); 550 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]); 551 } 552 } 553 554 static void 555 jme_map_intr_vector(struct jme_softc *sc) 556 { 557 uint32_t map[MSINUM_NUM_INTR_SOURCE / JME_MSI_MESSAGES]; 558 559 bzero(map, sizeof(map)); 560 561 /* Map Tx interrupts source to MSI/MSIX vector 2. */ 562 map[MSINUM_REG_INDEX(N_INTR_TXQ0_COMP)] = 563 MSINUM_INTR_SOURCE(2, N_INTR_TXQ0_COMP); 564 map[MSINUM_REG_INDEX(N_INTR_TXQ1_COMP)] |= 565 MSINUM_INTR_SOURCE(2, N_INTR_TXQ1_COMP); 566 map[MSINUM_REG_INDEX(N_INTR_TXQ2_COMP)] |= 567 MSINUM_INTR_SOURCE(2, N_INTR_TXQ2_COMP); 568 map[MSINUM_REG_INDEX(N_INTR_TXQ3_COMP)] |= 569 MSINUM_INTR_SOURCE(2, N_INTR_TXQ3_COMP); 570 map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |= 571 MSINUM_INTR_SOURCE(2, N_INTR_TXQ4_COMP); 572 map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |= 573 MSINUM_INTR_SOURCE(2, N_INTR_TXQ5_COMP); 574 map[MSINUM_REG_INDEX(N_INTR_TXQ6_COMP)] |= 575 MSINUM_INTR_SOURCE(2, N_INTR_TXQ6_COMP); 576 map[MSINUM_REG_INDEX(N_INTR_TXQ7_COMP)] |= 577 MSINUM_INTR_SOURCE(2, N_INTR_TXQ7_COMP); 578 map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL)] |= 579 MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL); 580 map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL_TO)] |= 581 MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL_TO); 582 583 /* Map Rx interrupts source to MSI/MSIX vector 1. */ 584 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COMP)] = 585 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COMP); 586 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COMP)] = 587 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COMP); 588 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COMP)] = 589 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COMP); 590 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COMP)] = 591 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COMP); 592 map[MSINUM_REG_INDEX(N_INTR_RXQ0_DESC_EMPTY)] = 593 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_DESC_EMPTY); 594 map[MSINUM_REG_INDEX(N_INTR_RXQ1_DESC_EMPTY)] = 595 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_DESC_EMPTY); 596 map[MSINUM_REG_INDEX(N_INTR_RXQ2_DESC_EMPTY)] = 597 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_DESC_EMPTY); 598 map[MSINUM_REG_INDEX(N_INTR_RXQ3_DESC_EMPTY)] = 599 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_DESC_EMPTY); 600 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL)] = 601 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL); 602 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL)] = 603 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL); 604 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL)] = 605 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL); 606 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL)] = 607 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL); 608 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL_TO)] = 609 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL_TO); 610 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL_TO)] = 611 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL_TO); 612 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL_TO)] = 613 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL_TO); 614 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL_TO)] = 615 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL_TO); 616 617 /* Map all other interrupts source to MSI/MSIX vector 0. */ 618 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 0, map[0]); 619 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 1, map[1]); 620 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 2, map[2]); 621 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 3, map[3]); 622 } 623 624 static int 625 jme_attach(device_t dev) 626 { 627 struct jme_softc *sc; 628 struct ifnet *ifp; 629 struct mii_softc *miisc; 630 struct mii_data *mii; 631 uint32_t reg; 632 uint16_t burst; 633 int error, i, mii_flags, msic, msixc, pmc; 634 635 error = 0; 636 sc = device_get_softc(dev); 637 sc->jme_dev = dev; 638 639 mtx_init(&sc->jme_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 640 MTX_DEF); 641 callout_init_mtx(&sc->jme_tick_ch, &sc->jme_mtx, 0); 642 TASK_INIT(&sc->jme_int_task, 0, jme_int_task, sc); 643 TASK_INIT(&sc->jme_link_task, 0, jme_link_task, sc); 644 645 /* 646 * Map the device. JMC250 supports both memory mapped and I/O 647 * register space access. Because I/O register access should 648 * use different BARs to access registers it's waste of time 649 * to use I/O register spce access. JMC250 uses 16K to map 650 * entire memory space. 651 */ 652 pci_enable_busmaster(dev); 653 sc->jme_res_spec = jme_res_spec_mem; 654 sc->jme_irq_spec = jme_irq_spec_legacy; 655 error = bus_alloc_resources(dev, sc->jme_res_spec, sc->jme_res); 656 if (error != 0) { 657 device_printf(dev, "cannot allocate memory resources.\n"); 658 goto fail; 659 } 660 661 /* Allocate IRQ resources. */ 662 msixc = pci_msix_count(dev); 663 msic = pci_msi_count(dev); 664 if (bootverbose) { 665 device_printf(dev, "MSIX count : %d\n", msixc); 666 device_printf(dev, "MSI count : %d\n", msic); 667 } 668 669 /* Use 1 MSI/MSI-X. */ 670 if (msixc > 1) 671 msixc = 1; 672 if (msic > 1) 673 msic = 1; 674 /* Prefer MSIX over MSI. */ 675 if (msix_disable == 0 || msi_disable == 0) { 676 if (msix_disable == 0 && msixc > 0 && 677 pci_alloc_msix(dev, &msixc) == 0) { 678 if (msixc == 1) { 679 device_printf(dev, "Using %d MSIX messages.\n", 680 msixc); 681 sc->jme_flags |= JME_FLAG_MSIX; 682 sc->jme_irq_spec = jme_irq_spec_msi; 683 } else 684 pci_release_msi(dev); 685 } 686 if (msi_disable == 0 && (sc->jme_flags & JME_FLAG_MSIX) == 0 && 687 msic > 0 && pci_alloc_msi(dev, &msic) == 0) { 688 if (msic == 1) { 689 device_printf(dev, "Using %d MSI messages.\n", 690 msic); 691 sc->jme_flags |= JME_FLAG_MSI; 692 sc->jme_irq_spec = jme_irq_spec_msi; 693 } else 694 pci_release_msi(dev); 695 } 696 /* Map interrupt vector 0, 1 and 2. */ 697 if ((sc->jme_flags & JME_FLAG_MSI) != 0 || 698 (sc->jme_flags & JME_FLAG_MSIX) != 0) 699 jme_map_intr_vector(sc); 700 } 701 702 error = bus_alloc_resources(dev, sc->jme_irq_spec, sc->jme_irq); 703 if (error != 0) { 704 device_printf(dev, "cannot allocate IRQ resources.\n"); 705 goto fail; 706 } 707 708 sc->jme_rev = pci_get_device(dev); 709 if ((sc->jme_rev & DEVICEID_JMC2XX_MASK) == DEVICEID_JMC260) { 710 sc->jme_flags |= JME_FLAG_FASTETH; 711 sc->jme_flags |= JME_FLAG_NOJUMBO; 712 } 713 reg = CSR_READ_4(sc, JME_CHIPMODE); 714 sc->jme_chip_rev = (reg & CHIPMODE_REV_MASK) >> CHIPMODE_REV_SHIFT; 715 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) != 716 CHIPMODE_NOT_FPGA) 717 sc->jme_flags |= JME_FLAG_FPGA; 718 if (bootverbose) { 719 device_printf(dev, "PCI device revision : 0x%04x\n", 720 sc->jme_rev); 721 device_printf(dev, "Chip revision : 0x%02x\n", 722 sc->jme_chip_rev); 723 if ((sc->jme_flags & JME_FLAG_FPGA) != 0) 724 device_printf(dev, "FPGA revision : 0x%04x\n", 725 (reg & CHIPMODE_FPGA_REV_MASK) >> 726 CHIPMODE_FPGA_REV_SHIFT); 727 } 728 if (sc->jme_chip_rev == 0xFF) { 729 device_printf(dev, "Unknown chip revision : 0x%02x\n", 730 sc->jme_rev); 731 error = ENXIO; 732 goto fail; 733 } 734 735 /* Identify controller features and bugs. */ 736 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 2) { 737 if ((sc->jme_rev & DEVICEID_JMC2XX_MASK) == DEVICEID_JMC260 && 738 CHIPMODE_REVFM(sc->jme_chip_rev) == 2) 739 sc->jme_flags |= JME_FLAG_DMA32BIT; 740 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5) 741 sc->jme_flags |= JME_FLAG_EFUSE | JME_FLAG_PCCPCD; 742 sc->jme_flags |= JME_FLAG_TXCLK | JME_FLAG_RXCLK; 743 sc->jme_flags |= JME_FLAG_HWMIB; 744 } 745 746 /* Reset the ethernet controller. */ 747 jme_reset(sc); 748 749 /* Get station address. */ 750 if ((sc->jme_flags & JME_FLAG_EFUSE) != 0) { 751 error = jme_efuse_macaddr(sc); 752 if (error == 0) 753 jme_reg_macaddr(sc); 754 } else { 755 error = ENOENT; 756 reg = CSR_READ_4(sc, JME_SMBCSR); 757 if ((reg & SMBCSR_EEPROM_PRESENT) != 0) 758 error = jme_eeprom_macaddr(sc); 759 if (error != 0 && bootverbose) 760 device_printf(sc->jme_dev, 761 "ethernet hardware address not found in EEPROM.\n"); 762 if (error != 0) 763 jme_reg_macaddr(sc); 764 } 765 766 /* 767 * Save PHY address. 768 * Integrated JR0211 has fixed PHY address whereas FPGA version 769 * requires PHY probing to get correct PHY address. 770 */ 771 if ((sc->jme_flags & JME_FLAG_FPGA) == 0) { 772 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) & 773 GPREG0_PHY_ADDR_MASK; 774 if (bootverbose) 775 device_printf(dev, "PHY is at address %d.\n", 776 sc->jme_phyaddr); 777 } else 778 sc->jme_phyaddr = 0; 779 780 /* Set max allowable DMA size. */ 781 if (pci_find_extcap(dev, PCIY_EXPRESS, &i) == 0) { 782 sc->jme_flags |= JME_FLAG_PCIE; 783 burst = pci_read_config(dev, i + PCIR_EXPRESS_DEVICE_CTL, 2); 784 if (bootverbose) { 785 device_printf(dev, "Read request size : %d bytes.\n", 786 128 << ((burst >> 12) & 0x07)); 787 device_printf(dev, "TLP payload size : %d bytes.\n", 788 128 << ((burst >> 5) & 0x07)); 789 } 790 switch ((burst >> 12) & 0x07) { 791 case 0: 792 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128; 793 break; 794 case 1: 795 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256; 796 break; 797 default: 798 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; 799 break; 800 } 801 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; 802 } else { 803 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; 804 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; 805 } 806 /* Create coalescing sysctl node. */ 807 jme_sysctl_node(sc); 808 if ((error = jme_dma_alloc(sc) != 0)) 809 goto fail; 810 811 ifp = sc->jme_ifp = if_alloc(IFT_ETHER); 812 if (ifp == NULL) { 813 device_printf(dev, "cannot allocate ifnet structure.\n"); 814 error = ENXIO; 815 goto fail; 816 } 817 818 ifp->if_softc = sc; 819 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 820 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 821 ifp->if_ioctl = jme_ioctl; 822 ifp->if_start = jme_start; 823 ifp->if_init = jme_init; 824 ifp->if_snd.ifq_drv_maxlen = JME_TX_RING_CNT - 1; 825 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 826 IFQ_SET_READY(&ifp->if_snd); 827 /* JMC250 supports Tx/Rx checksum offload as well as TSO. */ 828 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4; 829 ifp->if_hwassist = JME_CSUM_FEATURES | CSUM_TSO; 830 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) { 831 sc->jme_flags |= JME_FLAG_PMCAP; 832 ifp->if_capabilities |= IFCAP_WOL_MAGIC; 833 } 834 ifp->if_capenable = ifp->if_capabilities; 835 836 /* Wakeup PHY. */ 837 jme_phy_up(sc); 838 mii_flags = MIIF_DOPAUSE; 839 /* Ask PHY calibration to PHY driver. */ 840 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5) 841 mii_flags |= MIIF_MACPRIV0; 842 /* Set up MII bus. */ 843 error = mii_attach(dev, &sc->jme_miibus, ifp, jme_mediachange, 844 jme_mediastatus, BMSR_DEFCAPMASK, 845 sc->jme_flags & JME_FLAG_FPGA ? MII_PHY_ANY : sc->jme_phyaddr, 846 MII_OFFSET_ANY, mii_flags); 847 if (error != 0) { 848 device_printf(dev, "attaching PHYs failed\n"); 849 goto fail; 850 } 851 852 /* 853 * Force PHY to FPGA mode. 854 */ 855 if ((sc->jme_flags & JME_FLAG_FPGA) != 0) { 856 mii = device_get_softc(sc->jme_miibus); 857 if (mii->mii_instance != 0) { 858 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) { 859 if (miisc->mii_phy != 0) { 860 sc->jme_phyaddr = miisc->mii_phy; 861 break; 862 } 863 } 864 if (sc->jme_phyaddr != 0) { 865 device_printf(sc->jme_dev, 866 "FPGA PHY is at %d\n", sc->jme_phyaddr); 867 /* vendor magic. */ 868 jme_miibus_writereg(dev, sc->jme_phyaddr, 27, 869 0x0004); 870 } 871 } 872 } 873 874 ether_ifattach(ifp, sc->jme_eaddr); 875 876 /* VLAN capability setup */ 877 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | 878 IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO; 879 ifp->if_capenable = ifp->if_capabilities; 880 881 /* Tell the upper layer(s) we support long frames. */ 882 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 883 884 /* Create local taskq. */ 885 TASK_INIT(&sc->jme_tx_task, 1, jme_tx_task, ifp); 886 sc->jme_tq = taskqueue_create_fast("jme_taskq", M_WAITOK, 887 taskqueue_thread_enqueue, &sc->jme_tq); 888 if (sc->jme_tq == NULL) { 889 device_printf(dev, "could not create taskqueue.\n"); 890 ether_ifdetach(ifp); 891 error = ENXIO; 892 goto fail; 893 } 894 taskqueue_start_threads(&sc->jme_tq, 1, PI_NET, "%s taskq", 895 device_get_nameunit(sc->jme_dev)); 896 897 for (i = 0; i < 1; i++) { 898 error = bus_setup_intr(dev, sc->jme_irq[i], 899 INTR_TYPE_NET | INTR_MPSAFE, jme_intr, NULL, sc, 900 &sc->jme_intrhand[i]); 901 if (error != 0) 902 break; 903 } 904 905 if (error != 0) { 906 device_printf(dev, "could not set up interrupt handler.\n"); 907 taskqueue_free(sc->jme_tq); 908 sc->jme_tq = NULL; 909 ether_ifdetach(ifp); 910 goto fail; 911 } 912 913 fail: 914 if (error != 0) 915 jme_detach(dev); 916 917 return (error); 918 } 919 920 static int 921 jme_detach(device_t dev) 922 { 923 struct jme_softc *sc; 924 struct ifnet *ifp; 925 int i; 926 927 sc = device_get_softc(dev); 928 929 ifp = sc->jme_ifp; 930 if (device_is_attached(dev)) { 931 JME_LOCK(sc); 932 sc->jme_flags |= JME_FLAG_DETACH; 933 jme_stop(sc); 934 JME_UNLOCK(sc); 935 callout_drain(&sc->jme_tick_ch); 936 taskqueue_drain(sc->jme_tq, &sc->jme_int_task); 937 taskqueue_drain(sc->jme_tq, &sc->jme_tx_task); 938 taskqueue_drain(taskqueue_swi, &sc->jme_link_task); 939 /* Restore possibly modified station address. */ 940 if ((sc->jme_flags & JME_FLAG_EFUSE) != 0) 941 jme_set_macaddr(sc, sc->jme_eaddr); 942 ether_ifdetach(ifp); 943 } 944 945 if (sc->jme_tq != NULL) { 946 taskqueue_drain(sc->jme_tq, &sc->jme_int_task); 947 taskqueue_free(sc->jme_tq); 948 sc->jme_tq = NULL; 949 } 950 951 if (sc->jme_miibus != NULL) { 952 device_delete_child(dev, sc->jme_miibus); 953 sc->jme_miibus = NULL; 954 } 955 bus_generic_detach(dev); 956 jme_dma_free(sc); 957 958 if (ifp != NULL) { 959 if_free(ifp); 960 sc->jme_ifp = NULL; 961 } 962 963 for (i = 0; i < 1; i++) { 964 if (sc->jme_intrhand[i] != NULL) { 965 bus_teardown_intr(dev, sc->jme_irq[i], 966 sc->jme_intrhand[i]); 967 sc->jme_intrhand[i] = NULL; 968 } 969 } 970 971 if (sc->jme_irq[0] != NULL) 972 bus_release_resources(dev, sc->jme_irq_spec, sc->jme_irq); 973 if ((sc->jme_flags & (JME_FLAG_MSIX | JME_FLAG_MSI)) != 0) 974 pci_release_msi(dev); 975 if (sc->jme_res[0] != NULL) 976 bus_release_resources(dev, sc->jme_res_spec, sc->jme_res); 977 mtx_destroy(&sc->jme_mtx); 978 979 return (0); 980 } 981 982 #define JME_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 983 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 984 985 static void 986 jme_sysctl_node(struct jme_softc *sc) 987 { 988 struct sysctl_ctx_list *ctx; 989 struct sysctl_oid_list *child, *parent; 990 struct sysctl_oid *tree; 991 struct jme_hw_stats *stats; 992 int error; 993 994 stats = &sc->jme_stats; 995 ctx = device_get_sysctl_ctx(sc->jme_dev); 996 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev)); 997 998 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_coal_to", 999 CTLTYPE_INT | CTLFLAG_RW, &sc->jme_tx_coal_to, 0, 1000 sysctl_hw_jme_tx_coal_to, "I", "jme tx coalescing timeout"); 1001 1002 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_coal_pkt", 1003 CTLTYPE_INT | CTLFLAG_RW, &sc->jme_tx_coal_pkt, 0, 1004 sysctl_hw_jme_tx_coal_pkt, "I", "jme tx coalescing packet"); 1005 1006 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_coal_to", 1007 CTLTYPE_INT | CTLFLAG_RW, &sc->jme_rx_coal_to, 0, 1008 sysctl_hw_jme_rx_coal_to, "I", "jme rx coalescing timeout"); 1009 1010 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_coal_pkt", 1011 CTLTYPE_INT | CTLFLAG_RW, &sc->jme_rx_coal_pkt, 0, 1012 sysctl_hw_jme_rx_coal_pkt, "I", "jme rx coalescing packet"); 1013 1014 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit", 1015 CTLTYPE_INT | CTLFLAG_RW, &sc->jme_process_limit, 0, 1016 sysctl_hw_jme_proc_limit, "I", 1017 "max number of Rx events to process"); 1018 1019 /* Pull in device tunables. */ 1020 sc->jme_process_limit = JME_PROC_DEFAULT; 1021 error = resource_int_value(device_get_name(sc->jme_dev), 1022 device_get_unit(sc->jme_dev), "process_limit", 1023 &sc->jme_process_limit); 1024 if (error == 0) { 1025 if (sc->jme_process_limit < JME_PROC_MIN || 1026 sc->jme_process_limit > JME_PROC_MAX) { 1027 device_printf(sc->jme_dev, 1028 "process_limit value out of range; " 1029 "using default: %d\n", JME_PROC_DEFAULT); 1030 sc->jme_process_limit = JME_PROC_DEFAULT; 1031 } 1032 } 1033 1034 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT; 1035 error = resource_int_value(device_get_name(sc->jme_dev), 1036 device_get_unit(sc->jme_dev), "tx_coal_to", &sc->jme_tx_coal_to); 1037 if (error == 0) { 1038 if (sc->jme_tx_coal_to < PCCTX_COAL_TO_MIN || 1039 sc->jme_tx_coal_to > PCCTX_COAL_TO_MAX) { 1040 device_printf(sc->jme_dev, 1041 "tx_coal_to value out of range; " 1042 "using default: %d\n", PCCTX_COAL_TO_DEFAULT); 1043 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT; 1044 } 1045 } 1046 1047 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT; 1048 error = resource_int_value(device_get_name(sc->jme_dev), 1049 device_get_unit(sc->jme_dev), "tx_coal_pkt", &sc->jme_tx_coal_to); 1050 if (error == 0) { 1051 if (sc->jme_tx_coal_pkt < PCCTX_COAL_PKT_MIN || 1052 sc->jme_tx_coal_pkt > PCCTX_COAL_PKT_MAX) { 1053 device_printf(sc->jme_dev, 1054 "tx_coal_pkt value out of range; " 1055 "using default: %d\n", PCCTX_COAL_PKT_DEFAULT); 1056 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT; 1057 } 1058 } 1059 1060 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT; 1061 error = resource_int_value(device_get_name(sc->jme_dev), 1062 device_get_unit(sc->jme_dev), "rx_coal_to", &sc->jme_rx_coal_to); 1063 if (error == 0) { 1064 if (sc->jme_rx_coal_to < PCCRX_COAL_TO_MIN || 1065 sc->jme_rx_coal_to > PCCRX_COAL_TO_MAX) { 1066 device_printf(sc->jme_dev, 1067 "rx_coal_to value out of range; " 1068 "using default: %d\n", PCCRX_COAL_TO_DEFAULT); 1069 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT; 1070 } 1071 } 1072 1073 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT; 1074 error = resource_int_value(device_get_name(sc->jme_dev), 1075 device_get_unit(sc->jme_dev), "rx_coal_pkt", &sc->jme_rx_coal_to); 1076 if (error == 0) { 1077 if (sc->jme_rx_coal_pkt < PCCRX_COAL_PKT_MIN || 1078 sc->jme_rx_coal_pkt > PCCRX_COAL_PKT_MAX) { 1079 device_printf(sc->jme_dev, 1080 "tx_coal_pkt value out of range; " 1081 "using default: %d\n", PCCRX_COAL_PKT_DEFAULT); 1082 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT; 1083 } 1084 } 1085 1086 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0) 1087 return; 1088 1089 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 1090 NULL, "JME statistics"); 1091 parent = SYSCTL_CHILDREN(tree); 1092 1093 /* Rx statistics. */ 1094 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 1095 NULL, "Rx MAC statistics"); 1096 child = SYSCTL_CHILDREN(tree); 1097 JME_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 1098 &stats->rx_good_frames, "Good frames"); 1099 JME_SYSCTL_STAT_ADD32(ctx, child, "crc_errs", 1100 &stats->rx_crc_errs, "CRC errors"); 1101 JME_SYSCTL_STAT_ADD32(ctx, child, "mii_errs", 1102 &stats->rx_mii_errs, "MII errors"); 1103 JME_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", 1104 &stats->rx_fifo_oflows, "FIFO overflows"); 1105 JME_SYSCTL_STAT_ADD32(ctx, child, "desc_empty", 1106 &stats->rx_desc_empty, "Descriptor empty"); 1107 JME_SYSCTL_STAT_ADD32(ctx, child, "bad_frames", 1108 &stats->rx_bad_frames, "Bad frames"); 1109 1110 /* Tx statistics. */ 1111 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 1112 NULL, "Tx MAC statistics"); 1113 child = SYSCTL_CHILDREN(tree); 1114 JME_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 1115 &stats->tx_good_frames, "Good frames"); 1116 JME_SYSCTL_STAT_ADD32(ctx, child, "bad_frames", 1117 &stats->tx_bad_frames, "Bad frames"); 1118 } 1119 1120 #undef JME_SYSCTL_STAT_ADD32 1121 1122 struct jme_dmamap_arg { 1123 bus_addr_t jme_busaddr; 1124 }; 1125 1126 static void 1127 jme_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1128 { 1129 struct jme_dmamap_arg *ctx; 1130 1131 if (error != 0) 1132 return; 1133 1134 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1135 1136 ctx = (struct jme_dmamap_arg *)arg; 1137 ctx->jme_busaddr = segs[0].ds_addr; 1138 } 1139 1140 static int 1141 jme_dma_alloc(struct jme_softc *sc) 1142 { 1143 struct jme_dmamap_arg ctx; 1144 struct jme_txdesc *txd; 1145 struct jme_rxdesc *rxd; 1146 bus_addr_t lowaddr, rx_ring_end, tx_ring_end; 1147 int error, i; 1148 1149 lowaddr = BUS_SPACE_MAXADDR; 1150 if ((sc->jme_flags & JME_FLAG_DMA32BIT) != 0) 1151 lowaddr = BUS_SPACE_MAXADDR_32BIT; 1152 1153 again: 1154 /* Create parent ring tag. */ 1155 error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */ 1156 1, 0, /* algnmnt, boundary */ 1157 lowaddr, /* lowaddr */ 1158 BUS_SPACE_MAXADDR, /* highaddr */ 1159 NULL, NULL, /* filter, filterarg */ 1160 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1161 0, /* nsegments */ 1162 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1163 0, /* flags */ 1164 NULL, NULL, /* lockfunc, lockarg */ 1165 &sc->jme_cdata.jme_ring_tag); 1166 if (error != 0) { 1167 device_printf(sc->jme_dev, 1168 "could not create parent ring DMA tag.\n"); 1169 goto fail; 1170 } 1171 /* Create tag for Tx ring. */ 1172 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */ 1173 JME_TX_RING_ALIGN, 0, /* algnmnt, boundary */ 1174 BUS_SPACE_MAXADDR, /* lowaddr */ 1175 BUS_SPACE_MAXADDR, /* highaddr */ 1176 NULL, NULL, /* filter, filterarg */ 1177 JME_TX_RING_SIZE, /* maxsize */ 1178 1, /* nsegments */ 1179 JME_TX_RING_SIZE, /* maxsegsize */ 1180 0, /* flags */ 1181 NULL, NULL, /* lockfunc, lockarg */ 1182 &sc->jme_cdata.jme_tx_ring_tag); 1183 if (error != 0) { 1184 device_printf(sc->jme_dev, 1185 "could not allocate Tx ring DMA tag.\n"); 1186 goto fail; 1187 } 1188 1189 /* Create tag for Rx ring. */ 1190 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */ 1191 JME_RX_RING_ALIGN, 0, /* algnmnt, boundary */ 1192 lowaddr, /* lowaddr */ 1193 BUS_SPACE_MAXADDR, /* highaddr */ 1194 NULL, NULL, /* filter, filterarg */ 1195 JME_RX_RING_SIZE, /* maxsize */ 1196 1, /* nsegments */ 1197 JME_RX_RING_SIZE, /* maxsegsize */ 1198 0, /* flags */ 1199 NULL, NULL, /* lockfunc, lockarg */ 1200 &sc->jme_cdata.jme_rx_ring_tag); 1201 if (error != 0) { 1202 device_printf(sc->jme_dev, 1203 "could not allocate Rx ring DMA tag.\n"); 1204 goto fail; 1205 } 1206 1207 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 1208 error = bus_dmamem_alloc(sc->jme_cdata.jme_tx_ring_tag, 1209 (void **)&sc->jme_rdata.jme_tx_ring, 1210 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1211 &sc->jme_cdata.jme_tx_ring_map); 1212 if (error != 0) { 1213 device_printf(sc->jme_dev, 1214 "could not allocate DMA'able memory for Tx ring.\n"); 1215 goto fail; 1216 } 1217 1218 ctx.jme_busaddr = 0; 1219 error = bus_dmamap_load(sc->jme_cdata.jme_tx_ring_tag, 1220 sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring, 1221 JME_TX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 1222 if (error != 0 || ctx.jme_busaddr == 0) { 1223 device_printf(sc->jme_dev, 1224 "could not load DMA'able memory for Tx ring.\n"); 1225 goto fail; 1226 } 1227 sc->jme_rdata.jme_tx_ring_paddr = ctx.jme_busaddr; 1228 1229 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 1230 error = bus_dmamem_alloc(sc->jme_cdata.jme_rx_ring_tag, 1231 (void **)&sc->jme_rdata.jme_rx_ring, 1232 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1233 &sc->jme_cdata.jme_rx_ring_map); 1234 if (error != 0) { 1235 device_printf(sc->jme_dev, 1236 "could not allocate DMA'able memory for Rx ring.\n"); 1237 goto fail; 1238 } 1239 1240 ctx.jme_busaddr = 0; 1241 error = bus_dmamap_load(sc->jme_cdata.jme_rx_ring_tag, 1242 sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring, 1243 JME_RX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 1244 if (error != 0 || ctx.jme_busaddr == 0) { 1245 device_printf(sc->jme_dev, 1246 "could not load DMA'able memory for Rx ring.\n"); 1247 goto fail; 1248 } 1249 sc->jme_rdata.jme_rx_ring_paddr = ctx.jme_busaddr; 1250 1251 if (lowaddr != BUS_SPACE_MAXADDR_32BIT) { 1252 /* Tx/Rx descriptor queue should reside within 4GB boundary. */ 1253 tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr + 1254 JME_TX_RING_SIZE; 1255 rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr + 1256 JME_RX_RING_SIZE; 1257 if ((JME_ADDR_HI(tx_ring_end) != 1258 JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) || 1259 (JME_ADDR_HI(rx_ring_end) != 1260 JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) { 1261 device_printf(sc->jme_dev, "4GB boundary crossed, " 1262 "switching to 32bit DMA address mode.\n"); 1263 jme_dma_free(sc); 1264 /* Limit DMA address space to 32bit and try again. */ 1265 lowaddr = BUS_SPACE_MAXADDR_32BIT; 1266 goto again; 1267 } 1268 } 1269 1270 lowaddr = BUS_SPACE_MAXADDR; 1271 if ((sc->jme_flags & JME_FLAG_DMA32BIT) != 0) 1272 lowaddr = BUS_SPACE_MAXADDR_32BIT; 1273 /* Create parent buffer tag. */ 1274 error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */ 1275 1, 0, /* algnmnt, boundary */ 1276 lowaddr, /* lowaddr */ 1277 BUS_SPACE_MAXADDR, /* highaddr */ 1278 NULL, NULL, /* filter, filterarg */ 1279 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1280 0, /* nsegments */ 1281 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1282 0, /* flags */ 1283 NULL, NULL, /* lockfunc, lockarg */ 1284 &sc->jme_cdata.jme_buffer_tag); 1285 if (error != 0) { 1286 device_printf(sc->jme_dev, 1287 "could not create parent buffer DMA tag.\n"); 1288 goto fail; 1289 } 1290 1291 /* Create shadow status block tag. */ 1292 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */ 1293 JME_SSB_ALIGN, 0, /* algnmnt, boundary */ 1294 BUS_SPACE_MAXADDR, /* lowaddr */ 1295 BUS_SPACE_MAXADDR, /* highaddr */ 1296 NULL, NULL, /* filter, filterarg */ 1297 JME_SSB_SIZE, /* maxsize */ 1298 1, /* nsegments */ 1299 JME_SSB_SIZE, /* maxsegsize */ 1300 0, /* flags */ 1301 NULL, NULL, /* lockfunc, lockarg */ 1302 &sc->jme_cdata.jme_ssb_tag); 1303 if (error != 0) { 1304 device_printf(sc->jme_dev, 1305 "could not create shared status block DMA tag.\n"); 1306 goto fail; 1307 } 1308 1309 /* Create tag for Tx buffers. */ 1310 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */ 1311 1, 0, /* algnmnt, boundary */ 1312 BUS_SPACE_MAXADDR, /* lowaddr */ 1313 BUS_SPACE_MAXADDR, /* highaddr */ 1314 NULL, NULL, /* filter, filterarg */ 1315 JME_TSO_MAXSIZE, /* maxsize */ 1316 JME_MAXTXSEGS, /* nsegments */ 1317 JME_TSO_MAXSEGSIZE, /* maxsegsize */ 1318 0, /* flags */ 1319 NULL, NULL, /* lockfunc, lockarg */ 1320 &sc->jme_cdata.jme_tx_tag); 1321 if (error != 0) { 1322 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n"); 1323 goto fail; 1324 } 1325 1326 /* Create tag for Rx buffers. */ 1327 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */ 1328 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */ 1329 BUS_SPACE_MAXADDR, /* lowaddr */ 1330 BUS_SPACE_MAXADDR, /* highaddr */ 1331 NULL, NULL, /* filter, filterarg */ 1332 MCLBYTES, /* maxsize */ 1333 1, /* nsegments */ 1334 MCLBYTES, /* maxsegsize */ 1335 0, /* flags */ 1336 NULL, NULL, /* lockfunc, lockarg */ 1337 &sc->jme_cdata.jme_rx_tag); 1338 if (error != 0) { 1339 device_printf(sc->jme_dev, "could not create Rx DMA tag.\n"); 1340 goto fail; 1341 } 1342 1343 /* 1344 * Allocate DMA'able memory and load the DMA map for shared 1345 * status block. 1346 */ 1347 error = bus_dmamem_alloc(sc->jme_cdata.jme_ssb_tag, 1348 (void **)&sc->jme_rdata.jme_ssb_block, 1349 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1350 &sc->jme_cdata.jme_ssb_map); 1351 if (error != 0) { 1352 device_printf(sc->jme_dev, "could not allocate DMA'able " 1353 "memory for shared status block.\n"); 1354 goto fail; 1355 } 1356 1357 ctx.jme_busaddr = 0; 1358 error = bus_dmamap_load(sc->jme_cdata.jme_ssb_tag, 1359 sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block, 1360 JME_SSB_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 1361 if (error != 0 || ctx.jme_busaddr == 0) { 1362 device_printf(sc->jme_dev, "could not load DMA'able memory " 1363 "for shared status block.\n"); 1364 goto fail; 1365 } 1366 sc->jme_rdata.jme_ssb_block_paddr = ctx.jme_busaddr; 1367 1368 /* Create DMA maps for Tx buffers. */ 1369 for (i = 0; i < JME_TX_RING_CNT; i++) { 1370 txd = &sc->jme_cdata.jme_txdesc[i]; 1371 txd->tx_m = NULL; 1372 txd->tx_dmamap = NULL; 1373 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 0, 1374 &txd->tx_dmamap); 1375 if (error != 0) { 1376 device_printf(sc->jme_dev, 1377 "could not create Tx dmamap.\n"); 1378 goto fail; 1379 } 1380 } 1381 /* Create DMA maps for Rx buffers. */ 1382 if ((error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0, 1383 &sc->jme_cdata.jme_rx_sparemap)) != 0) { 1384 device_printf(sc->jme_dev, 1385 "could not create spare Rx dmamap.\n"); 1386 goto fail; 1387 } 1388 for (i = 0; i < JME_RX_RING_CNT; i++) { 1389 rxd = &sc->jme_cdata.jme_rxdesc[i]; 1390 rxd->rx_m = NULL; 1391 rxd->rx_dmamap = NULL; 1392 error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0, 1393 &rxd->rx_dmamap); 1394 if (error != 0) { 1395 device_printf(sc->jme_dev, 1396 "could not create Rx dmamap.\n"); 1397 goto fail; 1398 } 1399 } 1400 1401 fail: 1402 return (error); 1403 } 1404 1405 static void 1406 jme_dma_free(struct jme_softc *sc) 1407 { 1408 struct jme_txdesc *txd; 1409 struct jme_rxdesc *rxd; 1410 int i; 1411 1412 /* Tx ring */ 1413 if (sc->jme_cdata.jme_tx_ring_tag != NULL) { 1414 if (sc->jme_cdata.jme_tx_ring_map) 1415 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag, 1416 sc->jme_cdata.jme_tx_ring_map); 1417 if (sc->jme_cdata.jme_tx_ring_map && 1418 sc->jme_rdata.jme_tx_ring) 1419 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag, 1420 sc->jme_rdata.jme_tx_ring, 1421 sc->jme_cdata.jme_tx_ring_map); 1422 sc->jme_rdata.jme_tx_ring = NULL; 1423 sc->jme_cdata.jme_tx_ring_map = NULL; 1424 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag); 1425 sc->jme_cdata.jme_tx_ring_tag = NULL; 1426 } 1427 /* Rx ring */ 1428 if (sc->jme_cdata.jme_rx_ring_tag != NULL) { 1429 if (sc->jme_cdata.jme_rx_ring_map) 1430 bus_dmamap_unload(sc->jme_cdata.jme_rx_ring_tag, 1431 sc->jme_cdata.jme_rx_ring_map); 1432 if (sc->jme_cdata.jme_rx_ring_map && 1433 sc->jme_rdata.jme_rx_ring) 1434 bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag, 1435 sc->jme_rdata.jme_rx_ring, 1436 sc->jme_cdata.jme_rx_ring_map); 1437 sc->jme_rdata.jme_rx_ring = NULL; 1438 sc->jme_cdata.jme_rx_ring_map = NULL; 1439 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag); 1440 sc->jme_cdata.jme_rx_ring_tag = NULL; 1441 } 1442 /* Tx buffers */ 1443 if (sc->jme_cdata.jme_tx_tag != NULL) { 1444 for (i = 0; i < JME_TX_RING_CNT; i++) { 1445 txd = &sc->jme_cdata.jme_txdesc[i]; 1446 if (txd->tx_dmamap != NULL) { 1447 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag, 1448 txd->tx_dmamap); 1449 txd->tx_dmamap = NULL; 1450 } 1451 } 1452 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag); 1453 sc->jme_cdata.jme_tx_tag = NULL; 1454 } 1455 /* Rx buffers */ 1456 if (sc->jme_cdata.jme_rx_tag != NULL) { 1457 for (i = 0; i < JME_RX_RING_CNT; i++) { 1458 rxd = &sc->jme_cdata.jme_rxdesc[i]; 1459 if (rxd->rx_dmamap != NULL) { 1460 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag, 1461 rxd->rx_dmamap); 1462 rxd->rx_dmamap = NULL; 1463 } 1464 } 1465 if (sc->jme_cdata.jme_rx_sparemap != NULL) { 1466 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag, 1467 sc->jme_cdata.jme_rx_sparemap); 1468 sc->jme_cdata.jme_rx_sparemap = NULL; 1469 } 1470 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag); 1471 sc->jme_cdata.jme_rx_tag = NULL; 1472 } 1473 1474 /* Shared status block. */ 1475 if (sc->jme_cdata.jme_ssb_tag != NULL) { 1476 if (sc->jme_cdata.jme_ssb_map) 1477 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag, 1478 sc->jme_cdata.jme_ssb_map); 1479 if (sc->jme_cdata.jme_ssb_map && sc->jme_rdata.jme_ssb_block) 1480 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag, 1481 sc->jme_rdata.jme_ssb_block, 1482 sc->jme_cdata.jme_ssb_map); 1483 sc->jme_rdata.jme_ssb_block = NULL; 1484 sc->jme_cdata.jme_ssb_map = NULL; 1485 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag); 1486 sc->jme_cdata.jme_ssb_tag = NULL; 1487 } 1488 1489 if (sc->jme_cdata.jme_buffer_tag != NULL) { 1490 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag); 1491 sc->jme_cdata.jme_buffer_tag = NULL; 1492 } 1493 if (sc->jme_cdata.jme_ring_tag != NULL) { 1494 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag); 1495 sc->jme_cdata.jme_ring_tag = NULL; 1496 } 1497 } 1498 1499 /* 1500 * Make sure the interface is stopped at reboot time. 1501 */ 1502 static int 1503 jme_shutdown(device_t dev) 1504 { 1505 1506 return (jme_suspend(dev)); 1507 } 1508 1509 /* 1510 * Unlike other ethernet controllers, JMC250 requires 1511 * explicit resetting link speed to 10/100Mbps as gigabit 1512 * link will cunsume more power than 375mA. 1513 * Note, we reset the link speed to 10/100Mbps with 1514 * auto-negotiation but we don't know whether that operation 1515 * would succeed or not as we have no control after powering 1516 * off. If the renegotiation fail WOL may not work. Running 1517 * at 1Gbps draws more power than 375mA at 3.3V which is 1518 * specified in PCI specification and that would result in 1519 * complete shutdowning power to ethernet controller. 1520 * 1521 * TODO 1522 * Save current negotiated media speed/duplex/flow-control 1523 * to softc and restore the same link again after resuming. 1524 * PHY handling such as power down/resetting to 100Mbps 1525 * may be better handled in suspend method in phy driver. 1526 */ 1527 static void 1528 jme_setlinkspeed(struct jme_softc *sc) 1529 { 1530 struct mii_data *mii; 1531 int aneg, i; 1532 1533 JME_LOCK_ASSERT(sc); 1534 1535 mii = device_get_softc(sc->jme_miibus); 1536 mii_pollstat(mii); 1537 aneg = 0; 1538 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1539 switch IFM_SUBTYPE(mii->mii_media_active) { 1540 case IFM_10_T: 1541 case IFM_100_TX: 1542 return; 1543 case IFM_1000_T: 1544 aneg++; 1545 default: 1546 break; 1547 } 1548 } 1549 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0); 1550 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR, 1551 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 1552 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, 1553 BMCR_AUTOEN | BMCR_STARTNEG); 1554 DELAY(1000); 1555 if (aneg != 0) { 1556 /* Poll link state until jme(4) get a 10/100 link. */ 1557 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 1558 mii_pollstat(mii); 1559 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1560 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1561 case IFM_10_T: 1562 case IFM_100_TX: 1563 jme_mac_config(sc); 1564 return; 1565 default: 1566 break; 1567 } 1568 } 1569 JME_UNLOCK(sc); 1570 pause("jmelnk", hz); 1571 JME_LOCK(sc); 1572 } 1573 if (i == MII_ANEGTICKS_GIGE) 1574 device_printf(sc->jme_dev, "establishing link failed, " 1575 "WOL may not work!"); 1576 } 1577 /* 1578 * No link, force MAC to have 100Mbps, full-duplex link. 1579 * This is the last resort and may/may not work. 1580 */ 1581 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 1582 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1583 jme_mac_config(sc); 1584 } 1585 1586 static void 1587 jme_setwol(struct jme_softc *sc) 1588 { 1589 struct ifnet *ifp; 1590 uint32_t gpr, pmcs; 1591 uint16_t pmstat; 1592 int pmc; 1593 1594 JME_LOCK_ASSERT(sc); 1595 1596 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) { 1597 /* Remove Tx MAC/offload clock to save more power. */ 1598 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0) 1599 CSR_WRITE_4(sc, JME_GHC, CSR_READ_4(sc, JME_GHC) & 1600 ~(GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100 | 1601 GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000)); 1602 if ((sc->jme_flags & JME_FLAG_RXCLK) != 0) 1603 CSR_WRITE_4(sc, JME_GPREG1, 1604 CSR_READ_4(sc, JME_GPREG1) | GPREG1_RX_MAC_CLK_DIS); 1605 /* No PME capability, PHY power down. */ 1606 jme_phy_down(sc); 1607 return; 1608 } 1609 1610 ifp = sc->jme_ifp; 1611 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB; 1612 pmcs = CSR_READ_4(sc, JME_PMCS); 1613 pmcs &= ~PMCS_WOL_ENB_MASK; 1614 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) { 1615 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB; 1616 /* Enable PME message. */ 1617 gpr |= GPREG0_PME_ENB; 1618 /* For gigabit controllers, reset link speed to 10/100. */ 1619 if ((sc->jme_flags & JME_FLAG_FASTETH) == 0) 1620 jme_setlinkspeed(sc); 1621 } 1622 1623 CSR_WRITE_4(sc, JME_PMCS, pmcs); 1624 CSR_WRITE_4(sc, JME_GPREG0, gpr); 1625 /* Remove Tx MAC/offload clock to save more power. */ 1626 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0) 1627 CSR_WRITE_4(sc, JME_GHC, CSR_READ_4(sc, JME_GHC) & 1628 ~(GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100 | 1629 GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000)); 1630 /* Request PME. */ 1631 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2); 1632 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1633 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1634 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1635 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 1636 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 1637 /* No WOL, PHY power down. */ 1638 jme_phy_down(sc); 1639 } 1640 } 1641 1642 static int 1643 jme_suspend(device_t dev) 1644 { 1645 struct jme_softc *sc; 1646 1647 sc = device_get_softc(dev); 1648 1649 JME_LOCK(sc); 1650 jme_stop(sc); 1651 jme_setwol(sc); 1652 JME_UNLOCK(sc); 1653 1654 return (0); 1655 } 1656 1657 static int 1658 jme_resume(device_t dev) 1659 { 1660 struct jme_softc *sc; 1661 struct ifnet *ifp; 1662 uint16_t pmstat; 1663 int pmc; 1664 1665 sc = device_get_softc(dev); 1666 1667 JME_LOCK(sc); 1668 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) { 1669 pmstat = pci_read_config(sc->jme_dev, 1670 pmc + PCIR_POWER_STATUS, 2); 1671 /* Disable PME clear PME status. */ 1672 pmstat &= ~PCIM_PSTAT_PMEENABLE; 1673 pci_write_config(sc->jme_dev, 1674 pmc + PCIR_POWER_STATUS, pmstat, 2); 1675 } 1676 /* Wakeup PHY. */ 1677 jme_phy_up(sc); 1678 ifp = sc->jme_ifp; 1679 if ((ifp->if_flags & IFF_UP) != 0) { 1680 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1681 jme_init_locked(sc); 1682 } 1683 1684 JME_UNLOCK(sc); 1685 1686 return (0); 1687 } 1688 1689 static int 1690 jme_encap(struct jme_softc *sc, struct mbuf **m_head) 1691 { 1692 struct jme_txdesc *txd; 1693 struct jme_desc *desc; 1694 struct mbuf *m; 1695 bus_dma_segment_t txsegs[JME_MAXTXSEGS]; 1696 int error, i, nsegs, prod; 1697 uint32_t cflags, tso_segsz; 1698 1699 JME_LOCK_ASSERT(sc); 1700 1701 M_ASSERTPKTHDR((*m_head)); 1702 1703 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1704 /* 1705 * Due to the adherence to NDIS specification JMC250 1706 * assumes upper stack computed TCP pseudo checksum 1707 * without including payload length. This breaks 1708 * checksum offload for TSO case so recompute TCP 1709 * pseudo checksum for JMC250. Hopefully this wouldn't 1710 * be much burden on modern CPUs. 1711 */ 1712 struct ether_header *eh; 1713 struct ip *ip; 1714 struct tcphdr *tcp; 1715 uint32_t ip_off, poff; 1716 1717 if (M_WRITABLE(*m_head) == 0) { 1718 /* Get a writable copy. */ 1719 m = m_dup(*m_head, M_DONTWAIT); 1720 m_freem(*m_head); 1721 if (m == NULL) { 1722 *m_head = NULL; 1723 return (ENOBUFS); 1724 } 1725 *m_head = m; 1726 } 1727 ip_off = sizeof(struct ether_header); 1728 m = m_pullup(*m_head, ip_off); 1729 if (m == NULL) { 1730 *m_head = NULL; 1731 return (ENOBUFS); 1732 } 1733 eh = mtod(m, struct ether_header *); 1734 /* Check the existence of VLAN tag. */ 1735 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1736 ip_off = sizeof(struct ether_vlan_header); 1737 m = m_pullup(m, ip_off); 1738 if (m == NULL) { 1739 *m_head = NULL; 1740 return (ENOBUFS); 1741 } 1742 } 1743 m = m_pullup(m, ip_off + sizeof(struct ip)); 1744 if (m == NULL) { 1745 *m_head = NULL; 1746 return (ENOBUFS); 1747 } 1748 ip = (struct ip *)(mtod(m, char *) + ip_off); 1749 poff = ip_off + (ip->ip_hl << 2); 1750 m = m_pullup(m, poff + sizeof(struct tcphdr)); 1751 if (m == NULL) { 1752 *m_head = NULL; 1753 return (ENOBUFS); 1754 } 1755 /* 1756 * Reset IP checksum and recompute TCP pseudo 1757 * checksum that NDIS specification requires. 1758 */ 1759 ip = (struct ip *)(mtod(m, char *) + ip_off); 1760 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 1761 ip->ip_sum = 0; 1762 if (poff + (tcp->th_off << 2) == m->m_pkthdr.len) { 1763 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, 1764 ip->ip_dst.s_addr, 1765 htons((tcp->th_off << 2) + IPPROTO_TCP)); 1766 /* No need to TSO, force IP checksum offload. */ 1767 (*m_head)->m_pkthdr.csum_flags &= ~CSUM_TSO; 1768 (*m_head)->m_pkthdr.csum_flags |= CSUM_IP; 1769 } else 1770 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, 1771 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 1772 *m_head = m; 1773 } 1774 1775 prod = sc->jme_cdata.jme_tx_prod; 1776 txd = &sc->jme_cdata.jme_txdesc[prod]; 1777 1778 error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag, 1779 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1780 if (error == EFBIG) { 1781 m = m_collapse(*m_head, M_DONTWAIT, JME_MAXTXSEGS); 1782 if (m == NULL) { 1783 m_freem(*m_head); 1784 *m_head = NULL; 1785 return (ENOMEM); 1786 } 1787 *m_head = m; 1788 error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag, 1789 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1790 if (error != 0) { 1791 m_freem(*m_head); 1792 *m_head = NULL; 1793 return (error); 1794 } 1795 } else if (error != 0) 1796 return (error); 1797 if (nsegs == 0) { 1798 m_freem(*m_head); 1799 *m_head = NULL; 1800 return (EIO); 1801 } 1802 1803 /* 1804 * Check descriptor overrun. Leave one free descriptor. 1805 * Since we always use 64bit address mode for transmitting, 1806 * each Tx request requires one more dummy descriptor. 1807 */ 1808 if (sc->jme_cdata.jme_tx_cnt + nsegs + 1 > JME_TX_RING_CNT - 1) { 1809 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap); 1810 return (ENOBUFS); 1811 } 1812 1813 m = *m_head; 1814 cflags = 0; 1815 tso_segsz = 0; 1816 /* Configure checksum offload and TSO. */ 1817 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1818 tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz << 1819 JME_TD_MSS_SHIFT; 1820 cflags |= JME_TD_TSO; 1821 } else { 1822 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 1823 cflags |= JME_TD_IPCSUM; 1824 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 1825 cflags |= JME_TD_TCPCSUM; 1826 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 1827 cflags |= JME_TD_UDPCSUM; 1828 } 1829 /* Configure VLAN. */ 1830 if ((m->m_flags & M_VLANTAG) != 0) { 1831 cflags |= (m->m_pkthdr.ether_vtag & JME_TD_VLAN_MASK); 1832 cflags |= JME_TD_VLAN_TAG; 1833 } 1834 1835 desc = &sc->jme_rdata.jme_tx_ring[prod]; 1836 desc->flags = htole32(cflags); 1837 desc->buflen = htole32(tso_segsz); 1838 desc->addr_hi = htole32(m->m_pkthdr.len); 1839 desc->addr_lo = 0; 1840 sc->jme_cdata.jme_tx_cnt++; 1841 JME_DESC_INC(prod, JME_TX_RING_CNT); 1842 for (i = 0; i < nsegs; i++) { 1843 desc = &sc->jme_rdata.jme_tx_ring[prod]; 1844 desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT); 1845 desc->buflen = htole32(txsegs[i].ds_len); 1846 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr)); 1847 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr)); 1848 sc->jme_cdata.jme_tx_cnt++; 1849 JME_DESC_INC(prod, JME_TX_RING_CNT); 1850 } 1851 1852 /* Update producer index. */ 1853 sc->jme_cdata.jme_tx_prod = prod; 1854 /* 1855 * Finally request interrupt and give the first descriptor 1856 * owenership to hardware. 1857 */ 1858 desc = txd->tx_desc; 1859 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR); 1860 1861 txd->tx_m = m; 1862 txd->tx_ndesc = nsegs + 1; 1863 1864 /* Sync descriptors. */ 1865 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap, 1866 BUS_DMASYNC_PREWRITE); 1867 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag, 1868 sc->jme_cdata.jme_tx_ring_map, 1869 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1870 1871 return (0); 1872 } 1873 1874 static void 1875 jme_tx_task(void *arg, int pending) 1876 { 1877 struct ifnet *ifp; 1878 1879 ifp = (struct ifnet *)arg; 1880 jme_start(ifp); 1881 } 1882 1883 static void 1884 jme_start(struct ifnet *ifp) 1885 { 1886 struct jme_softc *sc; 1887 struct mbuf *m_head; 1888 int enq; 1889 1890 sc = ifp->if_softc; 1891 1892 JME_LOCK(sc); 1893 1894 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT) 1895 jme_txeof(sc); 1896 1897 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1898 IFF_DRV_RUNNING || (sc->jme_flags & JME_FLAG_LINK) == 0) { 1899 JME_UNLOCK(sc); 1900 return; 1901 } 1902 1903 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) { 1904 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1905 if (m_head == NULL) 1906 break; 1907 /* 1908 * Pack the data into the transmit ring. If we 1909 * don't have room, set the OACTIVE flag and wait 1910 * for the NIC to drain the ring. 1911 */ 1912 if (jme_encap(sc, &m_head)) { 1913 if (m_head == NULL) 1914 break; 1915 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1916 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1917 break; 1918 } 1919 1920 enq++; 1921 /* 1922 * If there's a BPF listener, bounce a copy of this frame 1923 * to him. 1924 */ 1925 ETHER_BPF_MTAP(ifp, m_head); 1926 } 1927 1928 if (enq > 0) { 1929 /* 1930 * Reading TXCSR takes very long time under heavy load 1931 * so cache TXCSR value and writes the ORed value with 1932 * the kick command to the TXCSR. This saves one register 1933 * access cycle. 1934 */ 1935 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB | 1936 TXCSR_TXQ_N_START(TXCSR_TXQ0)); 1937 /* Set a timeout in case the chip goes out to lunch. */ 1938 sc->jme_watchdog_timer = JME_TX_TIMEOUT; 1939 } 1940 1941 JME_UNLOCK(sc); 1942 } 1943 1944 static void 1945 jme_watchdog(struct jme_softc *sc) 1946 { 1947 struct ifnet *ifp; 1948 1949 JME_LOCK_ASSERT(sc); 1950 1951 if (sc->jme_watchdog_timer == 0 || --sc->jme_watchdog_timer) 1952 return; 1953 1954 ifp = sc->jme_ifp; 1955 if ((sc->jme_flags & JME_FLAG_LINK) == 0) { 1956 if_printf(sc->jme_ifp, "watchdog timeout (missed link)\n"); 1957 ifp->if_oerrors++; 1958 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1959 jme_init_locked(sc); 1960 return; 1961 } 1962 jme_txeof(sc); 1963 if (sc->jme_cdata.jme_tx_cnt == 0) { 1964 if_printf(sc->jme_ifp, 1965 "watchdog timeout (missed Tx interrupts) -- recovering\n"); 1966 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1967 taskqueue_enqueue(sc->jme_tq, &sc->jme_tx_task); 1968 return; 1969 } 1970 1971 if_printf(sc->jme_ifp, "watchdog timeout\n"); 1972 ifp->if_oerrors++; 1973 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1974 jme_init_locked(sc); 1975 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1976 taskqueue_enqueue(sc->jme_tq, &sc->jme_tx_task); 1977 } 1978 1979 static int 1980 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1981 { 1982 struct jme_softc *sc; 1983 struct ifreq *ifr; 1984 struct mii_data *mii; 1985 uint32_t reg; 1986 int error, mask; 1987 1988 sc = ifp->if_softc; 1989 ifr = (struct ifreq *)data; 1990 error = 0; 1991 switch (cmd) { 1992 case SIOCSIFMTU: 1993 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU || 1994 ((sc->jme_flags & JME_FLAG_NOJUMBO) != 0 && 1995 ifr->ifr_mtu > JME_MAX_MTU)) { 1996 error = EINVAL; 1997 break; 1998 } 1999 2000 if (ifp->if_mtu != ifr->ifr_mtu) { 2001 /* 2002 * No special configuration is required when interface 2003 * MTU is changed but availability of TSO/Tx checksum 2004 * offload should be chcked against new MTU size as 2005 * FIFO size is just 2K. 2006 */ 2007 JME_LOCK(sc); 2008 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) { 2009 ifp->if_capenable &= 2010 ~(IFCAP_TXCSUM | IFCAP_TSO4); 2011 ifp->if_hwassist &= 2012 ~(JME_CSUM_FEATURES | CSUM_TSO); 2013 VLAN_CAPABILITIES(ifp); 2014 } 2015 ifp->if_mtu = ifr->ifr_mtu; 2016 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2017 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2018 jme_init_locked(sc); 2019 } 2020 JME_UNLOCK(sc); 2021 } 2022 break; 2023 case SIOCSIFFLAGS: 2024 JME_LOCK(sc); 2025 if ((ifp->if_flags & IFF_UP) != 0) { 2026 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2027 if (((ifp->if_flags ^ sc->jme_if_flags) 2028 & (IFF_PROMISC | IFF_ALLMULTI)) != 0) 2029 jme_set_filter(sc); 2030 } else { 2031 if ((sc->jme_flags & JME_FLAG_DETACH) == 0) 2032 jme_init_locked(sc); 2033 } 2034 } else { 2035 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2036 jme_stop(sc); 2037 } 2038 sc->jme_if_flags = ifp->if_flags; 2039 JME_UNLOCK(sc); 2040 break; 2041 case SIOCADDMULTI: 2042 case SIOCDELMULTI: 2043 JME_LOCK(sc); 2044 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2045 jme_set_filter(sc); 2046 JME_UNLOCK(sc); 2047 break; 2048 case SIOCSIFMEDIA: 2049 case SIOCGIFMEDIA: 2050 mii = device_get_softc(sc->jme_miibus); 2051 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 2052 break; 2053 case SIOCSIFCAP: 2054 JME_LOCK(sc); 2055 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2056 if ((mask & IFCAP_TXCSUM) != 0 && 2057 ifp->if_mtu < JME_TX_FIFO_SIZE) { 2058 if ((IFCAP_TXCSUM & ifp->if_capabilities) != 0) { 2059 ifp->if_capenable ^= IFCAP_TXCSUM; 2060 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) 2061 ifp->if_hwassist |= JME_CSUM_FEATURES; 2062 else 2063 ifp->if_hwassist &= ~JME_CSUM_FEATURES; 2064 } 2065 } 2066 if ((mask & IFCAP_RXCSUM) != 0 && 2067 (IFCAP_RXCSUM & ifp->if_capabilities) != 0) { 2068 ifp->if_capenable ^= IFCAP_RXCSUM; 2069 reg = CSR_READ_4(sc, JME_RXMAC); 2070 reg &= ~RXMAC_CSUM_ENB; 2071 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 2072 reg |= RXMAC_CSUM_ENB; 2073 CSR_WRITE_4(sc, JME_RXMAC, reg); 2074 } 2075 if ((mask & IFCAP_TSO4) != 0 && 2076 ifp->if_mtu < JME_TX_FIFO_SIZE) { 2077 if ((IFCAP_TSO4 & ifp->if_capabilities) != 0) { 2078 ifp->if_capenable ^= IFCAP_TSO4; 2079 if ((IFCAP_TSO4 & ifp->if_capenable) != 0) 2080 ifp->if_hwassist |= CSUM_TSO; 2081 else 2082 ifp->if_hwassist &= ~CSUM_TSO; 2083 } 2084 } 2085 if ((mask & IFCAP_WOL_MAGIC) != 0 && 2086 (IFCAP_WOL_MAGIC & ifp->if_capabilities) != 0) 2087 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 2088 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 2089 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 2090 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 2091 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 2092 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) 2093 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 2094 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 2095 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) { 2096 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2097 jme_set_vlan(sc); 2098 } 2099 JME_UNLOCK(sc); 2100 VLAN_CAPABILITIES(ifp); 2101 break; 2102 default: 2103 error = ether_ioctl(ifp, cmd, data); 2104 break; 2105 } 2106 2107 return (error); 2108 } 2109 2110 static void 2111 jme_mac_config(struct jme_softc *sc) 2112 { 2113 struct mii_data *mii; 2114 uint32_t ghc, gpreg, rxmac, txmac, txpause; 2115 uint32_t txclk; 2116 2117 JME_LOCK_ASSERT(sc); 2118 2119 mii = device_get_softc(sc->jme_miibus); 2120 2121 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 2122 DELAY(10); 2123 CSR_WRITE_4(sc, JME_GHC, 0); 2124 ghc = 0; 2125 txclk = 0; 2126 rxmac = CSR_READ_4(sc, JME_RXMAC); 2127 rxmac &= ~RXMAC_FC_ENB; 2128 txmac = CSR_READ_4(sc, JME_TXMAC); 2129 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST); 2130 txpause = CSR_READ_4(sc, JME_TXPFC); 2131 txpause &= ~TXPFC_PAUSE_ENB; 2132 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 2133 ghc |= GHC_FULL_DUPLEX; 2134 rxmac &= ~RXMAC_COLL_DET_ENB; 2135 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | 2136 TXMAC_BACKOFF | TXMAC_CARRIER_EXT | 2137 TXMAC_FRAME_BURST); 2138 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 2139 txpause |= TXPFC_PAUSE_ENB; 2140 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 2141 rxmac |= RXMAC_FC_ENB; 2142 /* Disable retry transmit timer/retry limit. */ 2143 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) & 2144 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB)); 2145 } else { 2146 rxmac |= RXMAC_COLL_DET_ENB; 2147 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF; 2148 /* Enable retry transmit timer/retry limit. */ 2149 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) | 2150 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB); 2151 } 2152 /* Reprogram Tx/Rx MACs with resolved speed/duplex. */ 2153 switch (IFM_SUBTYPE(mii->mii_media_active)) { 2154 case IFM_10_T: 2155 ghc |= GHC_SPEED_10; 2156 txclk |= GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100; 2157 break; 2158 case IFM_100_TX: 2159 ghc |= GHC_SPEED_100; 2160 txclk |= GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100; 2161 break; 2162 case IFM_1000_T: 2163 if ((sc->jme_flags & JME_FLAG_FASTETH) != 0) 2164 break; 2165 ghc |= GHC_SPEED_1000; 2166 txclk |= GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000; 2167 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) 2168 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST; 2169 break; 2170 default: 2171 break; 2172 } 2173 if (sc->jme_rev == DEVICEID_JMC250 && 2174 sc->jme_chip_rev == DEVICEREVID_JMC250_A2) { 2175 /* 2176 * Workaround occasional packet loss issue of JMC250 A2 2177 * when it runs on half-duplex media. 2178 */ 2179 gpreg = CSR_READ_4(sc, JME_GPREG1); 2180 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 2181 gpreg &= ~GPREG1_HDPX_FIX; 2182 else 2183 gpreg |= GPREG1_HDPX_FIX; 2184 CSR_WRITE_4(sc, JME_GPREG1, gpreg); 2185 /* Workaround CRC errors at 100Mbps on JMC250 A2. */ 2186 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) { 2187 /* Extend interface FIFO depth. */ 2188 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 2189 0x1B, 0x0000); 2190 } else { 2191 /* Select default interface FIFO depth. */ 2192 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 2193 0x1B, 0x0004); 2194 } 2195 } 2196 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0) 2197 ghc |= txclk; 2198 CSR_WRITE_4(sc, JME_GHC, ghc); 2199 CSR_WRITE_4(sc, JME_RXMAC, rxmac); 2200 CSR_WRITE_4(sc, JME_TXMAC, txmac); 2201 CSR_WRITE_4(sc, JME_TXPFC, txpause); 2202 } 2203 2204 static void 2205 jme_link_task(void *arg, int pending) 2206 { 2207 struct jme_softc *sc; 2208 struct mii_data *mii; 2209 struct ifnet *ifp; 2210 struct jme_txdesc *txd; 2211 bus_addr_t paddr; 2212 int i; 2213 2214 sc = (struct jme_softc *)arg; 2215 2216 JME_LOCK(sc); 2217 mii = device_get_softc(sc->jme_miibus); 2218 ifp = sc->jme_ifp; 2219 if (mii == NULL || ifp == NULL || 2220 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2221 JME_UNLOCK(sc); 2222 return; 2223 } 2224 2225 sc->jme_flags &= ~JME_FLAG_LINK; 2226 if ((mii->mii_media_status & IFM_AVALID) != 0) { 2227 switch (IFM_SUBTYPE(mii->mii_media_active)) { 2228 case IFM_10_T: 2229 case IFM_100_TX: 2230 sc->jme_flags |= JME_FLAG_LINK; 2231 break; 2232 case IFM_1000_T: 2233 if ((sc->jme_flags & JME_FLAG_FASTETH) != 0) 2234 break; 2235 sc->jme_flags |= JME_FLAG_LINK; 2236 break; 2237 default: 2238 break; 2239 } 2240 } 2241 2242 /* 2243 * Disabling Rx/Tx MACs have a side-effect of resetting 2244 * JME_TXNDA/JME_RXNDA register to the first address of 2245 * Tx/Rx descriptor address. So driver should reset its 2246 * internal procucer/consumer pointer and reclaim any 2247 * allocated resources. Note, just saving the value of 2248 * JME_TXNDA and JME_RXNDA registers before stopping MAC 2249 * and restoring JME_TXNDA/JME_RXNDA register is not 2250 * sufficient to make sure correct MAC state because 2251 * stopping MAC operation can take a while and hardware 2252 * might have updated JME_TXNDA/JME_RXNDA registers 2253 * during the stop operation. 2254 */ 2255 /* Block execution of task. */ 2256 taskqueue_block(sc->jme_tq); 2257 /* Disable interrupts and stop driver. */ 2258 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 2259 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2260 callout_stop(&sc->jme_tick_ch); 2261 sc->jme_watchdog_timer = 0; 2262 2263 /* Stop receiver/transmitter. */ 2264 jme_stop_rx(sc); 2265 jme_stop_tx(sc); 2266 2267 /* XXX Drain all queued tasks. */ 2268 JME_UNLOCK(sc); 2269 taskqueue_drain(sc->jme_tq, &sc->jme_int_task); 2270 taskqueue_drain(sc->jme_tq, &sc->jme_tx_task); 2271 JME_LOCK(sc); 2272 2273 jme_rxintr(sc, JME_RX_RING_CNT); 2274 if (sc->jme_cdata.jme_rxhead != NULL) 2275 m_freem(sc->jme_cdata.jme_rxhead); 2276 JME_RXCHAIN_RESET(sc); 2277 jme_txeof(sc); 2278 if (sc->jme_cdata.jme_tx_cnt != 0) { 2279 /* Remove queued packets for transmit. */ 2280 for (i = 0; i < JME_TX_RING_CNT; i++) { 2281 txd = &sc->jme_cdata.jme_txdesc[i]; 2282 if (txd->tx_m != NULL) { 2283 bus_dmamap_sync( 2284 sc->jme_cdata.jme_tx_tag, 2285 txd->tx_dmamap, 2286 BUS_DMASYNC_POSTWRITE); 2287 bus_dmamap_unload( 2288 sc->jme_cdata.jme_tx_tag, 2289 txd->tx_dmamap); 2290 m_freem(txd->tx_m); 2291 txd->tx_m = NULL; 2292 txd->tx_ndesc = 0; 2293 ifp->if_oerrors++; 2294 } 2295 } 2296 } 2297 2298 /* 2299 * Reuse configured Rx descriptors and reset 2300 * procuder/consumer index. 2301 */ 2302 sc->jme_cdata.jme_rx_cons = 0; 2303 sc->jme_morework = 0; 2304 jme_init_tx_ring(sc); 2305 /* Initialize shadow status block. */ 2306 jme_init_ssb(sc); 2307 2308 /* Program MAC with resolved speed/duplex/flow-control. */ 2309 if ((sc->jme_flags & JME_FLAG_LINK) != 0) { 2310 jme_mac_config(sc); 2311 jme_stats_clear(sc); 2312 2313 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr); 2314 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 2315 2316 /* Set Tx ring address to the hardware. */ 2317 paddr = JME_TX_RING_ADDR(sc, 0); 2318 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 2319 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 2320 2321 /* Set Rx ring address to the hardware. */ 2322 paddr = JME_RX_RING_ADDR(sc, 0); 2323 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 2324 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 2325 2326 /* Restart receiver/transmitter. */ 2327 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB | 2328 RXCSR_RXQ_START); 2329 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB); 2330 /* Lastly enable TX/RX clock. */ 2331 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0) 2332 CSR_WRITE_4(sc, JME_GHC, 2333 CSR_READ_4(sc, JME_GHC) & ~GHC_TX_MAC_CLK_DIS); 2334 if ((sc->jme_flags & JME_FLAG_RXCLK) != 0) 2335 CSR_WRITE_4(sc, JME_GPREG1, 2336 CSR_READ_4(sc, JME_GPREG1) & ~GPREG1_RX_MAC_CLK_DIS); 2337 } 2338 2339 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2340 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2341 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); 2342 /* Unblock execution of task. */ 2343 taskqueue_unblock(sc->jme_tq); 2344 /* Reenable interrupts. */ 2345 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 2346 2347 JME_UNLOCK(sc); 2348 } 2349 2350 static int 2351 jme_intr(void *arg) 2352 { 2353 struct jme_softc *sc; 2354 uint32_t status; 2355 2356 sc = (struct jme_softc *)arg; 2357 2358 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS); 2359 if (status == 0 || status == 0xFFFFFFFF) 2360 return (FILTER_STRAY); 2361 /* Disable interrupts. */ 2362 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 2363 taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task); 2364 2365 return (FILTER_HANDLED); 2366 } 2367 2368 static void 2369 jme_int_task(void *arg, int pending) 2370 { 2371 struct jme_softc *sc; 2372 struct ifnet *ifp; 2373 uint32_t status; 2374 int more; 2375 2376 sc = (struct jme_softc *)arg; 2377 ifp = sc->jme_ifp; 2378 2379 status = CSR_READ_4(sc, JME_INTR_STATUS); 2380 if (sc->jme_morework != 0) { 2381 sc->jme_morework = 0; 2382 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO; 2383 } 2384 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF) 2385 goto done; 2386 /* Reset PCC counter/timer and Ack interrupts. */ 2387 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP); 2388 if ((status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) != 0) 2389 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP; 2390 if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0) 2391 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP; 2392 CSR_WRITE_4(sc, JME_INTR_STATUS, status); 2393 more = 0; 2394 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2395 if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0) { 2396 more = jme_rxintr(sc, sc->jme_process_limit); 2397 if (more != 0) 2398 sc->jme_morework = 1; 2399 } 2400 if ((status & INTR_RXQ_DESC_EMPTY) != 0) { 2401 /* 2402 * Notify hardware availability of new Rx 2403 * buffers. 2404 * Reading RXCSR takes very long time under 2405 * heavy load so cache RXCSR value and writes 2406 * the ORed value with the kick command to 2407 * the RXCSR. This saves one register access 2408 * cycle. 2409 */ 2410 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | 2411 RXCSR_RX_ENB | RXCSR_RXQ_START); 2412 } 2413 /* 2414 * Reclaiming Tx buffers are deferred to make jme(4) run 2415 * without locks held. 2416 */ 2417 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2418 taskqueue_enqueue(sc->jme_tq, &sc->jme_tx_task); 2419 } 2420 2421 if (more != 0 || (CSR_READ_4(sc, JME_INTR_STATUS) & JME_INTRS) != 0) { 2422 taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task); 2423 return; 2424 } 2425 done: 2426 /* Reenable interrupts. */ 2427 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 2428 } 2429 2430 static void 2431 jme_txeof(struct jme_softc *sc) 2432 { 2433 struct ifnet *ifp; 2434 struct jme_txdesc *txd; 2435 uint32_t status; 2436 int cons, nsegs; 2437 2438 JME_LOCK_ASSERT(sc); 2439 2440 ifp = sc->jme_ifp; 2441 2442 cons = sc->jme_cdata.jme_tx_cons; 2443 if (cons == sc->jme_cdata.jme_tx_prod) 2444 return; 2445 2446 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag, 2447 sc->jme_cdata.jme_tx_ring_map, 2448 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2449 2450 /* 2451 * Go through our Tx list and free mbufs for those 2452 * frames which have been transmitted. 2453 */ 2454 for (; cons != sc->jme_cdata.jme_tx_prod;) { 2455 txd = &sc->jme_cdata.jme_txdesc[cons]; 2456 status = le32toh(txd->tx_desc->flags); 2457 if ((status & JME_TD_OWN) == JME_TD_OWN) 2458 break; 2459 2460 if ((status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) != 0) 2461 ifp->if_oerrors++; 2462 else { 2463 ifp->if_opackets++; 2464 if ((status & JME_TD_COLLISION) != 0) 2465 ifp->if_collisions += 2466 le32toh(txd->tx_desc->buflen) & 2467 JME_TD_BUF_LEN_MASK; 2468 } 2469 /* 2470 * Only the first descriptor of multi-descriptor 2471 * transmission is updated so driver have to skip entire 2472 * chained buffers for the transmiited frame. In other 2473 * words, JME_TD_OWN bit is valid only at the first 2474 * descriptor of a multi-descriptor transmission. 2475 */ 2476 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) { 2477 sc->jme_rdata.jme_tx_ring[cons].flags = 0; 2478 JME_DESC_INC(cons, JME_TX_RING_CNT); 2479 } 2480 2481 /* Reclaim transferred mbufs. */ 2482 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap, 2483 BUS_DMASYNC_POSTWRITE); 2484 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap); 2485 2486 KASSERT(txd->tx_m != NULL, 2487 ("%s: freeing NULL mbuf!\n", __func__)); 2488 m_freem(txd->tx_m); 2489 txd->tx_m = NULL; 2490 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc; 2491 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0, 2492 ("%s: Active Tx desc counter was garbled\n", __func__)); 2493 txd->tx_ndesc = 0; 2494 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2495 } 2496 sc->jme_cdata.jme_tx_cons = cons; 2497 /* Unarm watchog timer when there is no pending descriptors in queue. */ 2498 if (sc->jme_cdata.jme_tx_cnt == 0) 2499 sc->jme_watchdog_timer = 0; 2500 2501 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag, 2502 sc->jme_cdata.jme_tx_ring_map, 2503 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2504 } 2505 2506 static __inline void 2507 jme_discard_rxbuf(struct jme_softc *sc, int cons) 2508 { 2509 struct jme_desc *desc; 2510 2511 desc = &sc->jme_rdata.jme_rx_ring[cons]; 2512 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); 2513 desc->buflen = htole32(MCLBYTES); 2514 } 2515 2516 /* Receive a frame. */ 2517 static void 2518 jme_rxeof(struct jme_softc *sc) 2519 { 2520 struct ifnet *ifp; 2521 struct jme_desc *desc; 2522 struct jme_rxdesc *rxd; 2523 struct mbuf *mp, *m; 2524 uint32_t flags, status; 2525 int cons, count, nsegs; 2526 2527 ifp = sc->jme_ifp; 2528 2529 cons = sc->jme_cdata.jme_rx_cons; 2530 desc = &sc->jme_rdata.jme_rx_ring[cons]; 2531 flags = le32toh(desc->flags); 2532 status = le32toh(desc->buflen); 2533 nsegs = JME_RX_NSEGS(status); 2534 sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES; 2535 if ((status & JME_RX_ERR_STAT) != 0) { 2536 ifp->if_ierrors++; 2537 jme_discard_rxbuf(sc, sc->jme_cdata.jme_rx_cons); 2538 #ifdef JME_SHOW_ERRORS 2539 device_printf(sc->jme_dev, "%s : receive error = 0x%b\n", 2540 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS); 2541 #endif 2542 sc->jme_cdata.jme_rx_cons += nsegs; 2543 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT; 2544 return; 2545 } 2546 2547 for (count = 0; count < nsegs; count++, 2548 JME_DESC_INC(cons, JME_RX_RING_CNT)) { 2549 rxd = &sc->jme_cdata.jme_rxdesc[cons]; 2550 mp = rxd->rx_m; 2551 /* Add a new receive buffer to the ring. */ 2552 if (jme_newbuf(sc, rxd) != 0) { 2553 ifp->if_iqdrops++; 2554 /* Reuse buffer. */ 2555 for (; count < nsegs; count++) { 2556 jme_discard_rxbuf(sc, cons); 2557 JME_DESC_INC(cons, JME_RX_RING_CNT); 2558 } 2559 if (sc->jme_cdata.jme_rxhead != NULL) { 2560 m_freem(sc->jme_cdata.jme_rxhead); 2561 JME_RXCHAIN_RESET(sc); 2562 } 2563 break; 2564 } 2565 2566 /* 2567 * Assume we've received a full sized frame. 2568 * Actual size is fixed when we encounter the end of 2569 * multi-segmented frame. 2570 */ 2571 mp->m_len = MCLBYTES; 2572 2573 /* Chain received mbufs. */ 2574 if (sc->jme_cdata.jme_rxhead == NULL) { 2575 sc->jme_cdata.jme_rxhead = mp; 2576 sc->jme_cdata.jme_rxtail = mp; 2577 } else { 2578 /* 2579 * Receive processor can receive a maximum frame 2580 * size of 65535 bytes. 2581 */ 2582 mp->m_flags &= ~M_PKTHDR; 2583 sc->jme_cdata.jme_rxtail->m_next = mp; 2584 sc->jme_cdata.jme_rxtail = mp; 2585 } 2586 2587 if (count == nsegs - 1) { 2588 /* Last desc. for this frame. */ 2589 m = sc->jme_cdata.jme_rxhead; 2590 m->m_flags |= M_PKTHDR; 2591 m->m_pkthdr.len = sc->jme_cdata.jme_rxlen; 2592 if (nsegs > 1) { 2593 /* Set first mbuf size. */ 2594 m->m_len = MCLBYTES - JME_RX_PAD_BYTES; 2595 /* Set last mbuf size. */ 2596 mp->m_len = sc->jme_cdata.jme_rxlen - 2597 ((MCLBYTES - JME_RX_PAD_BYTES) + 2598 (MCLBYTES * (nsegs - 2))); 2599 } else 2600 m->m_len = sc->jme_cdata.jme_rxlen; 2601 m->m_pkthdr.rcvif = ifp; 2602 2603 /* 2604 * Account for 10bytes auto padding which is used 2605 * to align IP header on 32bit boundary. Also note, 2606 * CRC bytes is automatically removed by the 2607 * hardware. 2608 */ 2609 m->m_data += JME_RX_PAD_BYTES; 2610 2611 /* Set checksum information. */ 2612 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && 2613 (flags & JME_RD_IPV4) != 0) { 2614 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2615 if ((flags & JME_RD_IPCSUM) != 0) 2616 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2617 if (((flags & JME_RD_MORE_FRAG) == 0) && 2618 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) == 2619 (JME_RD_TCP | JME_RD_TCPCSUM) || 2620 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) == 2621 (JME_RD_UDP | JME_RD_UDPCSUM))) { 2622 m->m_pkthdr.csum_flags |= 2623 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2624 m->m_pkthdr.csum_data = 0xffff; 2625 } 2626 } 2627 2628 /* Check for VLAN tagged packets. */ 2629 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 2630 (flags & JME_RD_VLAN_TAG) != 0) { 2631 m->m_pkthdr.ether_vtag = 2632 flags & JME_RD_VLAN_MASK; 2633 m->m_flags |= M_VLANTAG; 2634 } 2635 2636 ifp->if_ipackets++; 2637 /* Pass it on. */ 2638 (*ifp->if_input)(ifp, m); 2639 2640 /* Reset mbuf chains. */ 2641 JME_RXCHAIN_RESET(sc); 2642 } 2643 } 2644 2645 sc->jme_cdata.jme_rx_cons += nsegs; 2646 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT; 2647 } 2648 2649 static int 2650 jme_rxintr(struct jme_softc *sc, int count) 2651 { 2652 struct jme_desc *desc; 2653 int nsegs, prog, pktlen; 2654 2655 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag, 2656 sc->jme_cdata.jme_rx_ring_map, 2657 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2658 2659 for (prog = 0; count > 0; prog++) { 2660 desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons]; 2661 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN) 2662 break; 2663 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0) 2664 break; 2665 nsegs = JME_RX_NSEGS(le32toh(desc->buflen)); 2666 /* 2667 * Check number of segments against received bytes. 2668 * Non-matching value would indicate that hardware 2669 * is still trying to update Rx descriptors. I'm not 2670 * sure whether this check is needed. 2671 */ 2672 pktlen = JME_RX_BYTES(le32toh(desc->buflen)); 2673 if (nsegs != ((pktlen + (MCLBYTES - 1)) / MCLBYTES)) 2674 break; 2675 prog++; 2676 /* Received a frame. */ 2677 jme_rxeof(sc); 2678 count -= nsegs; 2679 } 2680 2681 if (prog > 0) 2682 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag, 2683 sc->jme_cdata.jme_rx_ring_map, 2684 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2685 2686 return (count > 0 ? 0 : EAGAIN); 2687 } 2688 2689 static void 2690 jme_tick(void *arg) 2691 { 2692 struct jme_softc *sc; 2693 struct mii_data *mii; 2694 2695 sc = (struct jme_softc *)arg; 2696 2697 JME_LOCK_ASSERT(sc); 2698 2699 mii = device_get_softc(sc->jme_miibus); 2700 mii_tick(mii); 2701 /* 2702 * Reclaim Tx buffers that have been completed. It's not 2703 * needed here but it would release allocated mbuf chains 2704 * faster and limit the maximum delay to a hz. 2705 */ 2706 jme_txeof(sc); 2707 jme_stats_update(sc); 2708 jme_watchdog(sc); 2709 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); 2710 } 2711 2712 static void 2713 jme_reset(struct jme_softc *sc) 2714 { 2715 uint32_t ghc, gpreg; 2716 2717 /* Stop receiver, transmitter. */ 2718 jme_stop_rx(sc); 2719 jme_stop_tx(sc); 2720 2721 /* Reset controller. */ 2722 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 2723 CSR_READ_4(sc, JME_GHC); 2724 DELAY(10); 2725 /* 2726 * Workaround Rx FIFO overruns seen under certain conditions. 2727 * Explicitly synchorize TX/RX clock. TX/RX clock should be 2728 * enabled only after enabling TX/RX MACs. 2729 */ 2730 if ((sc->jme_flags & (JME_FLAG_TXCLK | JME_FLAG_RXCLK)) != 0) { 2731 /* Disable TX clock. */ 2732 CSR_WRITE_4(sc, JME_GHC, GHC_RESET | GHC_TX_MAC_CLK_DIS); 2733 /* Disable RX clock. */ 2734 gpreg = CSR_READ_4(sc, JME_GPREG1); 2735 CSR_WRITE_4(sc, JME_GPREG1, gpreg | GPREG1_RX_MAC_CLK_DIS); 2736 gpreg = CSR_READ_4(sc, JME_GPREG1); 2737 /* De-assert RESET but still disable TX clock. */ 2738 CSR_WRITE_4(sc, JME_GHC, GHC_TX_MAC_CLK_DIS); 2739 ghc = CSR_READ_4(sc, JME_GHC); 2740 2741 /* Enable TX clock. */ 2742 CSR_WRITE_4(sc, JME_GHC, ghc & ~GHC_TX_MAC_CLK_DIS); 2743 /* Enable RX clock. */ 2744 CSR_WRITE_4(sc, JME_GPREG1, gpreg & ~GPREG1_RX_MAC_CLK_DIS); 2745 CSR_READ_4(sc, JME_GPREG1); 2746 2747 /* Disable TX/RX clock again. */ 2748 CSR_WRITE_4(sc, JME_GHC, GHC_TX_MAC_CLK_DIS); 2749 CSR_WRITE_4(sc, JME_GPREG1, gpreg | GPREG1_RX_MAC_CLK_DIS); 2750 } else 2751 CSR_WRITE_4(sc, JME_GHC, 0); 2752 CSR_READ_4(sc, JME_GHC); 2753 DELAY(10); 2754 } 2755 2756 static void 2757 jme_init(void *xsc) 2758 { 2759 struct jme_softc *sc; 2760 2761 sc = (struct jme_softc *)xsc; 2762 JME_LOCK(sc); 2763 jme_init_locked(sc); 2764 JME_UNLOCK(sc); 2765 } 2766 2767 static void 2768 jme_init_locked(struct jme_softc *sc) 2769 { 2770 struct ifnet *ifp; 2771 struct mii_data *mii; 2772 bus_addr_t paddr; 2773 uint32_t reg; 2774 int error; 2775 2776 JME_LOCK_ASSERT(sc); 2777 2778 ifp = sc->jme_ifp; 2779 mii = device_get_softc(sc->jme_miibus); 2780 2781 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2782 return; 2783 /* 2784 * Cancel any pending I/O. 2785 */ 2786 jme_stop(sc); 2787 2788 /* 2789 * Reset the chip to a known state. 2790 */ 2791 jme_reset(sc); 2792 2793 /* Init descriptors. */ 2794 error = jme_init_rx_ring(sc); 2795 if (error != 0) { 2796 device_printf(sc->jme_dev, 2797 "%s: initialization failed: no memory for Rx buffers.\n", 2798 __func__); 2799 jme_stop(sc); 2800 return; 2801 } 2802 jme_init_tx_ring(sc); 2803 /* Initialize shadow status block. */ 2804 jme_init_ssb(sc); 2805 2806 /* Reprogram the station address. */ 2807 jme_set_macaddr(sc, IF_LLADDR(sc->jme_ifp)); 2808 2809 /* 2810 * Configure Tx queue. 2811 * Tx priority queue weight value : 0 2812 * Tx FIFO threshold for processing next packet : 16QW 2813 * Maximum Tx DMA length : 512 2814 * Allow Tx DMA burst. 2815 */ 2816 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0); 2817 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN); 2818 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW; 2819 sc->jme_txcsr |= sc->jme_tx_dma_size; 2820 sc->jme_txcsr |= TXCSR_DMA_BURST; 2821 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 2822 2823 /* Set Tx descriptor counter. */ 2824 CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT); 2825 2826 /* Set Tx ring address to the hardware. */ 2827 paddr = JME_TX_RING_ADDR(sc, 0); 2828 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 2829 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 2830 2831 /* Configure TxMAC parameters. */ 2832 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB; 2833 reg |= TXMAC_THRESH_1_PKT; 2834 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB; 2835 CSR_WRITE_4(sc, JME_TXMAC, reg); 2836 2837 /* 2838 * Configure Rx queue. 2839 * FIFO full threshold for transmitting Tx pause packet : 128T 2840 * FIFO threshold for processing next packet : 128QW 2841 * Rx queue 0 select 2842 * Max Rx DMA length : 128 2843 * Rx descriptor retry : 32 2844 * Rx descriptor retry time gap : 256ns 2845 * Don't receive runt/bad frame. 2846 */ 2847 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T; 2848 /* 2849 * Since Rx FIFO size is 4K bytes, receiving frames larger 2850 * than 4K bytes will suffer from Rx FIFO overruns. So 2851 * decrease FIFO threshold to reduce the FIFO overruns for 2852 * frames larger than 4000 bytes. 2853 * For best performance of standard MTU sized frames use 2854 * maximum allowable FIFO threshold, 128QW. Note these do 2855 * not hold on chip full mask verion >=2. For these 2856 * controllers 64QW and 128QW are not valid value. 2857 */ 2858 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 2) 2859 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 2860 else { 2861 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + 2862 ETHER_CRC_LEN) > JME_RX_FIFO_SIZE) 2863 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 2864 else 2865 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW; 2866 } 2867 sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0); 2868 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT); 2869 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK; 2870 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr); 2871 2872 /* Set Rx descriptor counter. */ 2873 CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT); 2874 2875 /* Set Rx ring address to the hardware. */ 2876 paddr = JME_RX_RING_ADDR(sc, 0); 2877 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 2878 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 2879 2880 /* Clear receive filter. */ 2881 CSR_WRITE_4(sc, JME_RXMAC, 0); 2882 /* Set up the receive filter. */ 2883 jme_set_filter(sc); 2884 jme_set_vlan(sc); 2885 2886 /* 2887 * Disable all WOL bits as WOL can interfere normal Rx 2888 * operation. Also clear WOL detection status bits. 2889 */ 2890 reg = CSR_READ_4(sc, JME_PMCS); 2891 reg &= ~PMCS_WOL_ENB_MASK; 2892 CSR_WRITE_4(sc, JME_PMCS, reg); 2893 2894 reg = CSR_READ_4(sc, JME_RXMAC); 2895 /* 2896 * Pad 10bytes right before received frame. This will greatly 2897 * help Rx performance on strict-alignment architectures as 2898 * it does not need to copy the frame to align the payload. 2899 */ 2900 reg |= RXMAC_PAD_10BYTES; 2901 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 2902 reg |= RXMAC_CSUM_ENB; 2903 CSR_WRITE_4(sc, JME_RXMAC, reg); 2904 2905 /* Configure general purpose reg0 */ 2906 reg = CSR_READ_4(sc, JME_GPREG0); 2907 reg &= ~GPREG0_PCC_UNIT_MASK; 2908 /* Set PCC timer resolution to micro-seconds unit. */ 2909 reg |= GPREG0_PCC_UNIT_US; 2910 /* 2911 * Disable all shadow register posting as we have to read 2912 * JME_INTR_STATUS register in jme_int_task. Also it seems 2913 * that it's hard to synchronize interrupt status between 2914 * hardware and software with shadow posting due to 2915 * requirements of bus_dmamap_sync(9). 2916 */ 2917 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS | 2918 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS | 2919 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS | 2920 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS; 2921 /* Disable posting of DW0. */ 2922 reg &= ~GPREG0_POST_DW0_ENB; 2923 /* Clear PME message. */ 2924 reg &= ~GPREG0_PME_ENB; 2925 /* Set PHY address. */ 2926 reg &= ~GPREG0_PHY_ADDR_MASK; 2927 reg |= sc->jme_phyaddr; 2928 CSR_WRITE_4(sc, JME_GPREG0, reg); 2929 2930 /* Configure Tx queue 0 packet completion coalescing. */ 2931 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) & 2932 PCCTX_COAL_TO_MASK; 2933 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) & 2934 PCCTX_COAL_PKT_MASK; 2935 reg |= PCCTX_COAL_TXQ0; 2936 CSR_WRITE_4(sc, JME_PCCTX, reg); 2937 2938 /* Configure Rx queue 0 packet completion coalescing. */ 2939 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) & 2940 PCCRX_COAL_TO_MASK; 2941 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) & 2942 PCCRX_COAL_PKT_MASK; 2943 CSR_WRITE_4(sc, JME_PCCRX0, reg); 2944 2945 /* 2946 * Configure PCD(Packet Completion Deferring). It seems PCD 2947 * generates an interrupt when the time interval between two 2948 * back-to-back incoming/outgoing packet is long enough for 2949 * it to reach its timer value 0. The arrival of new packets 2950 * after timer has started causes the PCD timer to restart. 2951 * Unfortunately, it's not clear how PCD is useful at this 2952 * moment, so just use the same of PCC parameters. 2953 */ 2954 if ((sc->jme_flags & JME_FLAG_PCCPCD) != 0) { 2955 sc->jme_rx_pcd_to = sc->jme_rx_coal_to; 2956 if (sc->jme_rx_coal_to > PCDRX_TO_MAX) 2957 sc->jme_rx_pcd_to = PCDRX_TO_MAX; 2958 sc->jme_tx_pcd_to = sc->jme_tx_coal_to; 2959 if (sc->jme_tx_coal_to > PCDTX_TO_MAX) 2960 sc->jme_tx_pcd_to = PCDTX_TO_MAX; 2961 reg = sc->jme_rx_pcd_to << PCDRX0_TO_THROTTLE_SHIFT; 2962 reg |= sc->jme_rx_pcd_to << PCDRX0_TO_SHIFT; 2963 CSR_WRITE_4(sc, PCDRX_REG(0), reg); 2964 reg = sc->jme_tx_pcd_to << PCDTX_TO_THROTTLE_SHIFT; 2965 reg |= sc->jme_tx_pcd_to << PCDTX_TO_SHIFT; 2966 CSR_WRITE_4(sc, JME_PCDTX, reg); 2967 } 2968 2969 /* Configure shadow status block but don't enable posting. */ 2970 paddr = sc->jme_rdata.jme_ssb_block_paddr; 2971 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr)); 2972 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr)); 2973 2974 /* Disable Timer 1 and Timer 2. */ 2975 CSR_WRITE_4(sc, JME_TIMER1, 0); 2976 CSR_WRITE_4(sc, JME_TIMER2, 0); 2977 2978 /* Configure retry transmit period, retry limit value. */ 2979 CSR_WRITE_4(sc, JME_TXTRHD, 2980 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) & 2981 TXTRHD_RT_PERIOD_MASK) | 2982 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) & 2983 TXTRHD_RT_LIMIT_SHIFT)); 2984 2985 /* Disable RSS. */ 2986 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS); 2987 2988 /* Initialize the interrupt mask. */ 2989 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 2990 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 2991 2992 /* 2993 * Enabling Tx/Rx DMA engines and Rx queue processing is 2994 * done after detection of valid link in jme_link_task. 2995 */ 2996 2997 sc->jme_flags &= ~JME_FLAG_LINK; 2998 /* Set the current media. */ 2999 mii_mediachg(mii); 3000 3001 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); 3002 3003 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3004 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3005 } 3006 3007 static void 3008 jme_stop(struct jme_softc *sc) 3009 { 3010 struct ifnet *ifp; 3011 struct jme_txdesc *txd; 3012 struct jme_rxdesc *rxd; 3013 int i; 3014 3015 JME_LOCK_ASSERT(sc); 3016 /* 3017 * Mark the interface down and cancel the watchdog timer. 3018 */ 3019 ifp = sc->jme_ifp; 3020 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 3021 sc->jme_flags &= ~JME_FLAG_LINK; 3022 callout_stop(&sc->jme_tick_ch); 3023 sc->jme_watchdog_timer = 0; 3024 3025 /* 3026 * Disable interrupts. 3027 */ 3028 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 3029 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 3030 3031 /* Disable updating shadow status block. */ 3032 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, 3033 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB); 3034 3035 /* Stop receiver, transmitter. */ 3036 jme_stop_rx(sc); 3037 jme_stop_tx(sc); 3038 3039 /* Reclaim Rx/Tx buffers that have been completed. */ 3040 jme_rxintr(sc, JME_RX_RING_CNT); 3041 if (sc->jme_cdata.jme_rxhead != NULL) 3042 m_freem(sc->jme_cdata.jme_rxhead); 3043 JME_RXCHAIN_RESET(sc); 3044 jme_txeof(sc); 3045 /* 3046 * Free RX and TX mbufs still in the queues. 3047 */ 3048 for (i = 0; i < JME_RX_RING_CNT; i++) { 3049 rxd = &sc->jme_cdata.jme_rxdesc[i]; 3050 if (rxd->rx_m != NULL) { 3051 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, 3052 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3053 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, 3054 rxd->rx_dmamap); 3055 m_freem(rxd->rx_m); 3056 rxd->rx_m = NULL; 3057 } 3058 } 3059 for (i = 0; i < JME_TX_RING_CNT; i++) { 3060 txd = &sc->jme_cdata.jme_txdesc[i]; 3061 if (txd->tx_m != NULL) { 3062 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, 3063 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 3064 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, 3065 txd->tx_dmamap); 3066 m_freem(txd->tx_m); 3067 txd->tx_m = NULL; 3068 txd->tx_ndesc = 0; 3069 } 3070 } 3071 jme_stats_update(sc); 3072 jme_stats_save(sc); 3073 } 3074 3075 static void 3076 jme_stop_tx(struct jme_softc *sc) 3077 { 3078 uint32_t reg; 3079 int i; 3080 3081 reg = CSR_READ_4(sc, JME_TXCSR); 3082 if ((reg & TXCSR_TX_ENB) == 0) 3083 return; 3084 reg &= ~TXCSR_TX_ENB; 3085 CSR_WRITE_4(sc, JME_TXCSR, reg); 3086 for (i = JME_TIMEOUT; i > 0; i--) { 3087 DELAY(1); 3088 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0) 3089 break; 3090 } 3091 if (i == 0) 3092 device_printf(sc->jme_dev, "stopping transmitter timeout!\n"); 3093 } 3094 3095 static void 3096 jme_stop_rx(struct jme_softc *sc) 3097 { 3098 uint32_t reg; 3099 int i; 3100 3101 reg = CSR_READ_4(sc, JME_RXCSR); 3102 if ((reg & RXCSR_RX_ENB) == 0) 3103 return; 3104 reg &= ~RXCSR_RX_ENB; 3105 CSR_WRITE_4(sc, JME_RXCSR, reg); 3106 for (i = JME_TIMEOUT; i > 0; i--) { 3107 DELAY(1); 3108 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0) 3109 break; 3110 } 3111 if (i == 0) 3112 device_printf(sc->jme_dev, "stopping recevier timeout!\n"); 3113 } 3114 3115 static void 3116 jme_init_tx_ring(struct jme_softc *sc) 3117 { 3118 struct jme_ring_data *rd; 3119 struct jme_txdesc *txd; 3120 int i; 3121 3122 sc->jme_cdata.jme_tx_prod = 0; 3123 sc->jme_cdata.jme_tx_cons = 0; 3124 sc->jme_cdata.jme_tx_cnt = 0; 3125 3126 rd = &sc->jme_rdata; 3127 bzero(rd->jme_tx_ring, JME_TX_RING_SIZE); 3128 for (i = 0; i < JME_TX_RING_CNT; i++) { 3129 txd = &sc->jme_cdata.jme_txdesc[i]; 3130 txd->tx_m = NULL; 3131 txd->tx_desc = &rd->jme_tx_ring[i]; 3132 txd->tx_ndesc = 0; 3133 } 3134 3135 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag, 3136 sc->jme_cdata.jme_tx_ring_map, 3137 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3138 } 3139 3140 static void 3141 jme_init_ssb(struct jme_softc *sc) 3142 { 3143 struct jme_ring_data *rd; 3144 3145 rd = &sc->jme_rdata; 3146 bzero(rd->jme_ssb_block, JME_SSB_SIZE); 3147 bus_dmamap_sync(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map, 3148 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3149 } 3150 3151 static int 3152 jme_init_rx_ring(struct jme_softc *sc) 3153 { 3154 struct jme_ring_data *rd; 3155 struct jme_rxdesc *rxd; 3156 int i; 3157 3158 sc->jme_cdata.jme_rx_cons = 0; 3159 JME_RXCHAIN_RESET(sc); 3160 sc->jme_morework = 0; 3161 3162 rd = &sc->jme_rdata; 3163 bzero(rd->jme_rx_ring, JME_RX_RING_SIZE); 3164 for (i = 0; i < JME_RX_RING_CNT; i++) { 3165 rxd = &sc->jme_cdata.jme_rxdesc[i]; 3166 rxd->rx_m = NULL; 3167 rxd->rx_desc = &rd->jme_rx_ring[i]; 3168 if (jme_newbuf(sc, rxd) != 0) 3169 return (ENOBUFS); 3170 } 3171 3172 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag, 3173 sc->jme_cdata.jme_rx_ring_map, 3174 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3175 3176 return (0); 3177 } 3178 3179 static int 3180 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd) 3181 { 3182 struct jme_desc *desc; 3183 struct mbuf *m; 3184 bus_dma_segment_t segs[1]; 3185 bus_dmamap_t map; 3186 int nsegs; 3187 3188 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 3189 if (m == NULL) 3190 return (ENOBUFS); 3191 /* 3192 * JMC250 has 64bit boundary alignment limitation so jme(4) 3193 * takes advantage of 10 bytes padding feature of hardware 3194 * in order not to copy entire frame to align IP header on 3195 * 32bit boundary. 3196 */ 3197 m->m_len = m->m_pkthdr.len = MCLBYTES; 3198 3199 if (bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_rx_tag, 3200 sc->jme_cdata.jme_rx_sparemap, m, segs, &nsegs, 0) != 0) { 3201 m_freem(m); 3202 return (ENOBUFS); 3203 } 3204 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 3205 3206 if (rxd->rx_m != NULL) { 3207 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap, 3208 BUS_DMASYNC_POSTREAD); 3209 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap); 3210 } 3211 map = rxd->rx_dmamap; 3212 rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap; 3213 sc->jme_cdata.jme_rx_sparemap = map; 3214 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap, 3215 BUS_DMASYNC_PREREAD); 3216 rxd->rx_m = m; 3217 3218 desc = rxd->rx_desc; 3219 desc->buflen = htole32(segs[0].ds_len); 3220 desc->addr_lo = htole32(JME_ADDR_LO(segs[0].ds_addr)); 3221 desc->addr_hi = htole32(JME_ADDR_HI(segs[0].ds_addr)); 3222 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); 3223 3224 return (0); 3225 } 3226 3227 static void 3228 jme_set_vlan(struct jme_softc *sc) 3229 { 3230 struct ifnet *ifp; 3231 uint32_t reg; 3232 3233 JME_LOCK_ASSERT(sc); 3234 3235 ifp = sc->jme_ifp; 3236 reg = CSR_READ_4(sc, JME_RXMAC); 3237 reg &= ~RXMAC_VLAN_ENB; 3238 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 3239 reg |= RXMAC_VLAN_ENB; 3240 CSR_WRITE_4(sc, JME_RXMAC, reg); 3241 } 3242 3243 static void 3244 jme_set_filter(struct jme_softc *sc) 3245 { 3246 struct ifnet *ifp; 3247 struct ifmultiaddr *ifma; 3248 uint32_t crc; 3249 uint32_t mchash[2]; 3250 uint32_t rxcfg; 3251 3252 JME_LOCK_ASSERT(sc); 3253 3254 ifp = sc->jme_ifp; 3255 3256 rxcfg = CSR_READ_4(sc, JME_RXMAC); 3257 rxcfg &= ~ (RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST | 3258 RXMAC_ALLMULTI); 3259 /* Always accept frames destined to our station address. */ 3260 rxcfg |= RXMAC_UNICAST; 3261 if ((ifp->if_flags & IFF_BROADCAST) != 0) 3262 rxcfg |= RXMAC_BROADCAST; 3263 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 3264 if ((ifp->if_flags & IFF_PROMISC) != 0) 3265 rxcfg |= RXMAC_PROMISC; 3266 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 3267 rxcfg |= RXMAC_ALLMULTI; 3268 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF); 3269 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF); 3270 CSR_WRITE_4(sc, JME_RXMAC, rxcfg); 3271 return; 3272 } 3273 3274 /* 3275 * Set up the multicast address filter by passing all multicast 3276 * addresses through a CRC generator, and then using the low-order 3277 * 6 bits as an index into the 64 bit multicast hash table. The 3278 * high order bits select the register, while the rest of the bits 3279 * select the bit within the register. 3280 */ 3281 rxcfg |= RXMAC_MULTICAST; 3282 bzero(mchash, sizeof(mchash)); 3283 3284 if_maddr_rlock(ifp); 3285 TAILQ_FOREACH(ifma, &sc->jme_ifp->if_multiaddrs, ifma_link) { 3286 if (ifma->ifma_addr->sa_family != AF_LINK) 3287 continue; 3288 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 3289 ifma->ifma_addr), ETHER_ADDR_LEN); 3290 3291 /* Just want the 6 least significant bits. */ 3292 crc &= 0x3f; 3293 3294 /* Set the corresponding bit in the hash table. */ 3295 mchash[crc >> 5] |= 1 << (crc & 0x1f); 3296 } 3297 if_maddr_runlock(ifp); 3298 3299 CSR_WRITE_4(sc, JME_MAR0, mchash[0]); 3300 CSR_WRITE_4(sc, JME_MAR1, mchash[1]); 3301 CSR_WRITE_4(sc, JME_RXMAC, rxcfg); 3302 } 3303 3304 static void 3305 jme_stats_clear(struct jme_softc *sc) 3306 { 3307 3308 JME_LOCK_ASSERT(sc); 3309 3310 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0) 3311 return; 3312 3313 /* Disable and clear counters. */ 3314 CSR_WRITE_4(sc, JME_STATCSR, 0xFFFFFFFF); 3315 /* Activate hw counters. */ 3316 CSR_WRITE_4(sc, JME_STATCSR, 0); 3317 CSR_READ_4(sc, JME_STATCSR); 3318 bzero(&sc->jme_stats, sizeof(struct jme_hw_stats)); 3319 } 3320 3321 static void 3322 jme_stats_save(struct jme_softc *sc) 3323 { 3324 3325 JME_LOCK_ASSERT(sc); 3326 3327 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0) 3328 return; 3329 /* Save current counters. */ 3330 bcopy(&sc->jme_stats, &sc->jme_ostats, sizeof(struct jme_hw_stats)); 3331 /* Disable and clear counters. */ 3332 CSR_WRITE_4(sc, JME_STATCSR, 0xFFFFFFFF); 3333 } 3334 3335 static void 3336 jme_stats_update(struct jme_softc *sc) 3337 { 3338 struct jme_hw_stats *stat, *ostat; 3339 uint32_t reg; 3340 3341 JME_LOCK_ASSERT(sc); 3342 3343 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0) 3344 return; 3345 stat = &sc->jme_stats; 3346 ostat = &sc->jme_ostats; 3347 stat->tx_good_frames = CSR_READ_4(sc, JME_STAT_TXGOOD); 3348 stat->rx_good_frames = CSR_READ_4(sc, JME_STAT_RXGOOD); 3349 reg = CSR_READ_4(sc, JME_STAT_CRCMII); 3350 stat->rx_crc_errs = (reg & STAT_RX_CRC_ERR_MASK) >> 3351 STAT_RX_CRC_ERR_SHIFT; 3352 stat->rx_mii_errs = (reg & STAT_RX_MII_ERR_MASK) >> 3353 STAT_RX_MII_ERR_SHIFT; 3354 reg = CSR_READ_4(sc, JME_STAT_RXERR); 3355 stat->rx_fifo_oflows = (reg & STAT_RXERR_OFLOW_MASK) >> 3356 STAT_RXERR_OFLOW_SHIFT; 3357 stat->rx_desc_empty = (reg & STAT_RXERR_MPTY_MASK) >> 3358 STAT_RXERR_MPTY_SHIFT; 3359 reg = CSR_READ_4(sc, JME_STAT_FAIL); 3360 stat->rx_bad_frames = (reg & STAT_FAIL_RX_MASK) >> STAT_FAIL_RX_SHIFT; 3361 stat->tx_bad_frames = (reg & STAT_FAIL_TX_MASK) >> STAT_FAIL_TX_SHIFT; 3362 3363 /* Account for previous counters. */ 3364 stat->rx_good_frames += ostat->rx_good_frames; 3365 stat->rx_crc_errs += ostat->rx_crc_errs; 3366 stat->rx_mii_errs += ostat->rx_mii_errs; 3367 stat->rx_fifo_oflows += ostat->rx_fifo_oflows; 3368 stat->rx_desc_empty += ostat->rx_desc_empty; 3369 stat->rx_bad_frames += ostat->rx_bad_frames; 3370 stat->tx_good_frames += ostat->tx_good_frames; 3371 stat->tx_bad_frames += ostat->tx_bad_frames; 3372 } 3373 3374 static void 3375 jme_phy_down(struct jme_softc *sc) 3376 { 3377 uint32_t reg; 3378 3379 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, BMCR_PDOWN); 3380 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5) { 3381 reg = CSR_READ_4(sc, JME_PHYPOWDN); 3382 reg |= 0x0000000F; 3383 CSR_WRITE_4(sc, JME_PHYPOWDN, reg); 3384 reg = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4); 3385 reg &= ~PE1_GIGA_PDOWN_MASK; 3386 reg |= PE1_GIGA_PDOWN_D3; 3387 pci_write_config(sc->jme_dev, JME_PCI_PE1, reg, 4); 3388 } 3389 } 3390 3391 static void 3392 jme_phy_up(struct jme_softc *sc) 3393 { 3394 uint32_t reg; 3395 uint16_t bmcr; 3396 3397 bmcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR); 3398 bmcr &= ~BMCR_PDOWN; 3399 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, bmcr); 3400 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5) { 3401 reg = CSR_READ_4(sc, JME_PHYPOWDN); 3402 reg &= ~0x0000000F; 3403 CSR_WRITE_4(sc, JME_PHYPOWDN, reg); 3404 reg = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4); 3405 reg &= ~PE1_GIGA_PDOWN_MASK; 3406 reg |= PE1_GIGA_PDOWN_DIS; 3407 pci_write_config(sc->jme_dev, JME_PCI_PE1, reg, 4); 3408 } 3409 } 3410 3411 static int 3412 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 3413 { 3414 int error, value; 3415 3416 if (arg1 == NULL) 3417 return (EINVAL); 3418 value = *(int *)arg1; 3419 error = sysctl_handle_int(oidp, &value, 0, req); 3420 if (error || req->newptr == NULL) 3421 return (error); 3422 if (value < low || value > high) 3423 return (EINVAL); 3424 *(int *)arg1 = value; 3425 3426 return (0); 3427 } 3428 3429 static int 3430 sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS) 3431 { 3432 return (sysctl_int_range(oidp, arg1, arg2, req, 3433 PCCTX_COAL_TO_MIN, PCCTX_COAL_TO_MAX)); 3434 } 3435 3436 static int 3437 sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS) 3438 { 3439 return (sysctl_int_range(oidp, arg1, arg2, req, 3440 PCCTX_COAL_PKT_MIN, PCCTX_COAL_PKT_MAX)); 3441 } 3442 3443 static int 3444 sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS) 3445 { 3446 return (sysctl_int_range(oidp, arg1, arg2, req, 3447 PCCRX_COAL_TO_MIN, PCCRX_COAL_TO_MAX)); 3448 } 3449 3450 static int 3451 sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS) 3452 { 3453 return (sysctl_int_range(oidp, arg1, arg2, req, 3454 PCCRX_COAL_PKT_MIN, PCCRX_COAL_PKT_MAX)); 3455 } 3456 3457 static int 3458 sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS) 3459 { 3460 return (sysctl_int_range(oidp, arg1, arg2, req, 3461 JME_PROC_MIN, JME_PROC_MAX)); 3462 } 3463