1 /*- 2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/bus.h> 34 #include <sys/endian.h> 35 #include <sys/kernel.h> 36 #include <sys/malloc.h> 37 #include <sys/mbuf.h> 38 #include <sys/rman.h> 39 #include <sys/module.h> 40 #include <sys/proc.h> 41 #include <sys/queue.h> 42 #include <sys/socket.h> 43 #include <sys/sockio.h> 44 #include <sys/sysctl.h> 45 #include <sys/taskqueue.h> 46 47 #include <net/bpf.h> 48 #include <net/if.h> 49 #include <net/if_var.h> 50 #include <net/if_arp.h> 51 #include <net/ethernet.h> 52 #include <net/if_dl.h> 53 #include <net/if_media.h> 54 #include <net/if_types.h> 55 #include <net/if_vlan_var.h> 56 57 #include <netinet/in.h> 58 #include <netinet/in_systm.h> 59 #include <netinet/ip.h> 60 #include <netinet/tcp.h> 61 62 #include <dev/mii/mii.h> 63 #include <dev/mii/miivar.h> 64 65 #include <dev/pci/pcireg.h> 66 #include <dev/pci/pcivar.h> 67 68 #include <machine/bus.h> 69 #include <machine/in_cksum.h> 70 71 #include <dev/jme/if_jmereg.h> 72 #include <dev/jme/if_jmevar.h> 73 74 /* "device miibus" required. See GENERIC if you get errors here. */ 75 #include "miibus_if.h" 76 77 /* Define the following to disable printing Rx errors. */ 78 #undef JME_SHOW_ERRORS 79 80 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 81 82 MODULE_DEPEND(jme, pci, 1, 1, 1); 83 MODULE_DEPEND(jme, ether, 1, 1, 1); 84 MODULE_DEPEND(jme, miibus, 1, 1, 1); 85 86 /* Tunables. */ 87 static int msi_disable = 0; 88 static int msix_disable = 0; 89 TUNABLE_INT("hw.jme.msi_disable", &msi_disable); 90 TUNABLE_INT("hw.jme.msix_disable", &msix_disable); 91 92 /* 93 * Devices supported by this driver. 94 */ 95 static struct jme_dev { 96 uint16_t jme_vendorid; 97 uint16_t jme_deviceid; 98 const char *jme_name; 99 } jme_devs[] = { 100 { VENDORID_JMICRON, DEVICEID_JMC250, 101 "JMicron Inc, JMC25x Gigabit Ethernet" }, 102 { VENDORID_JMICRON, DEVICEID_JMC260, 103 "JMicron Inc, JMC26x Fast Ethernet" }, 104 }; 105 106 static int jme_miibus_readreg(device_t, int, int); 107 static int jme_miibus_writereg(device_t, int, int, int); 108 static void jme_miibus_statchg(device_t); 109 static void jme_mediastatus(struct ifnet *, struct ifmediareq *); 110 static int jme_mediachange(struct ifnet *); 111 static int jme_probe(device_t); 112 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *); 113 static int jme_eeprom_macaddr(struct jme_softc *); 114 static int jme_efuse_macaddr(struct jme_softc *); 115 static void jme_reg_macaddr(struct jme_softc *); 116 static void jme_set_macaddr(struct jme_softc *, uint8_t *); 117 static void jme_map_intr_vector(struct jme_softc *); 118 static int jme_attach(device_t); 119 static int jme_detach(device_t); 120 static void jme_sysctl_node(struct jme_softc *); 121 static void jme_dmamap_cb(void *, bus_dma_segment_t *, int, int); 122 static int jme_dma_alloc(struct jme_softc *); 123 static void jme_dma_free(struct jme_softc *); 124 static int jme_shutdown(device_t); 125 static void jme_setlinkspeed(struct jme_softc *); 126 static void jme_setwol(struct jme_softc *); 127 static int jme_suspend(device_t); 128 static int jme_resume(device_t); 129 static int jme_encap(struct jme_softc *, struct mbuf **); 130 static void jme_start(struct ifnet *); 131 static void jme_start_locked(struct ifnet *); 132 static void jme_watchdog(struct jme_softc *); 133 static int jme_ioctl(struct ifnet *, u_long, caddr_t); 134 static void jme_mac_config(struct jme_softc *); 135 static void jme_link_task(void *, int); 136 static int jme_intr(void *); 137 static void jme_int_task(void *, int); 138 static void jme_txeof(struct jme_softc *); 139 static __inline void jme_discard_rxbuf(struct jme_softc *, int); 140 static void jme_rxeof(struct jme_softc *); 141 static int jme_rxintr(struct jme_softc *, int); 142 static void jme_tick(void *); 143 static void jme_reset(struct jme_softc *); 144 static void jme_init(void *); 145 static void jme_init_locked(struct jme_softc *); 146 static void jme_stop(struct jme_softc *); 147 static void jme_stop_tx(struct jme_softc *); 148 static void jme_stop_rx(struct jme_softc *); 149 static int jme_init_rx_ring(struct jme_softc *); 150 static void jme_init_tx_ring(struct jme_softc *); 151 static void jme_init_ssb(struct jme_softc *); 152 static int jme_newbuf(struct jme_softc *, struct jme_rxdesc *); 153 static void jme_set_vlan(struct jme_softc *); 154 static void jme_set_filter(struct jme_softc *); 155 static void jme_stats_clear(struct jme_softc *); 156 static void jme_stats_save(struct jme_softc *); 157 static void jme_stats_update(struct jme_softc *); 158 static void jme_phy_down(struct jme_softc *); 159 static void jme_phy_up(struct jme_softc *); 160 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 161 static int sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS); 162 static int sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS); 163 static int sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS); 164 static int sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS); 165 static int sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS); 166 167 168 static device_method_t jme_methods[] = { 169 /* Device interface. */ 170 DEVMETHOD(device_probe, jme_probe), 171 DEVMETHOD(device_attach, jme_attach), 172 DEVMETHOD(device_detach, jme_detach), 173 DEVMETHOD(device_shutdown, jme_shutdown), 174 DEVMETHOD(device_suspend, jme_suspend), 175 DEVMETHOD(device_resume, jme_resume), 176 177 /* MII interface. */ 178 DEVMETHOD(miibus_readreg, jme_miibus_readreg), 179 DEVMETHOD(miibus_writereg, jme_miibus_writereg), 180 DEVMETHOD(miibus_statchg, jme_miibus_statchg), 181 182 { NULL, NULL } 183 }; 184 185 static driver_t jme_driver = { 186 "jme", 187 jme_methods, 188 sizeof(struct jme_softc) 189 }; 190 191 static devclass_t jme_devclass; 192 193 DRIVER_MODULE(jme, pci, jme_driver, jme_devclass, 0, 0); 194 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, 0, 0); 195 196 static struct resource_spec jme_res_spec_mem[] = { 197 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, 198 { -1, 0, 0 } 199 }; 200 201 static struct resource_spec jme_irq_spec_legacy[] = { 202 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 203 { -1, 0, 0 } 204 }; 205 206 static struct resource_spec jme_irq_spec_msi[] = { 207 { SYS_RES_IRQ, 1, RF_ACTIVE }, 208 { -1, 0, 0 } 209 }; 210 211 /* 212 * Read a PHY register on the MII of the JMC250. 213 */ 214 static int 215 jme_miibus_readreg(device_t dev, int phy, int reg) 216 { 217 struct jme_softc *sc; 218 uint32_t val; 219 int i; 220 221 sc = device_get_softc(dev); 222 223 /* For FPGA version, PHY address 0 should be ignored. */ 224 if ((sc->jme_flags & JME_FLAG_FPGA) != 0 && phy == 0) 225 return (0); 226 227 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE | 228 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 229 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 230 DELAY(1); 231 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 232 break; 233 } 234 235 if (i == 0) { 236 device_printf(sc->jme_dev, "phy read timeout : %d\n", reg); 237 return (0); 238 } 239 240 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT); 241 } 242 243 /* 244 * Write a PHY register on the MII of the JMC250. 245 */ 246 static int 247 jme_miibus_writereg(device_t dev, int phy, int reg, int val) 248 { 249 struct jme_softc *sc; 250 int i; 251 252 sc = device_get_softc(dev); 253 254 /* For FPGA version, PHY address 0 should be ignored. */ 255 if ((sc->jme_flags & JME_FLAG_FPGA) != 0 && phy == 0) 256 return (0); 257 258 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE | 259 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | 260 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg)); 261 for (i = JME_PHY_TIMEOUT; i > 0; i--) { 262 DELAY(1); 263 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0) 264 break; 265 } 266 267 if (i == 0) 268 device_printf(sc->jme_dev, "phy write timeout : %d\n", reg); 269 270 return (0); 271 } 272 273 /* 274 * Callback from MII layer when media changes. 275 */ 276 static void 277 jme_miibus_statchg(device_t dev) 278 { 279 struct jme_softc *sc; 280 281 sc = device_get_softc(dev); 282 taskqueue_enqueue(taskqueue_swi, &sc->jme_link_task); 283 } 284 285 /* 286 * Get the current interface media status. 287 */ 288 static void 289 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 290 { 291 struct jme_softc *sc; 292 struct mii_data *mii; 293 294 sc = ifp->if_softc; 295 JME_LOCK(sc); 296 if ((ifp->if_flags & IFF_UP) == 0) { 297 JME_UNLOCK(sc); 298 return; 299 } 300 mii = device_get_softc(sc->jme_miibus); 301 302 mii_pollstat(mii); 303 ifmr->ifm_status = mii->mii_media_status; 304 ifmr->ifm_active = mii->mii_media_active; 305 JME_UNLOCK(sc); 306 } 307 308 /* 309 * Set hardware to newly-selected media. 310 */ 311 static int 312 jme_mediachange(struct ifnet *ifp) 313 { 314 struct jme_softc *sc; 315 struct mii_data *mii; 316 struct mii_softc *miisc; 317 int error; 318 319 sc = ifp->if_softc; 320 JME_LOCK(sc); 321 mii = device_get_softc(sc->jme_miibus); 322 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 323 PHY_RESET(miisc); 324 error = mii_mediachg(mii); 325 JME_UNLOCK(sc); 326 327 return (error); 328 } 329 330 static int 331 jme_probe(device_t dev) 332 { 333 struct jme_dev *sp; 334 int i; 335 uint16_t vendor, devid; 336 337 vendor = pci_get_vendor(dev); 338 devid = pci_get_device(dev); 339 sp = jme_devs; 340 for (i = 0; i < sizeof(jme_devs) / sizeof(jme_devs[0]); 341 i++, sp++) { 342 if (vendor == sp->jme_vendorid && 343 devid == sp->jme_deviceid) { 344 device_set_desc(dev, sp->jme_name); 345 return (BUS_PROBE_DEFAULT); 346 } 347 } 348 349 return (ENXIO); 350 } 351 352 static int 353 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val) 354 { 355 uint32_t reg; 356 int i; 357 358 *val = 0; 359 for (i = JME_TIMEOUT; i > 0; i--) { 360 reg = CSR_READ_4(sc, JME_SMBCSR); 361 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE) 362 break; 363 DELAY(1); 364 } 365 366 if (i == 0) { 367 device_printf(sc->jme_dev, "EEPROM idle timeout!\n"); 368 return (ETIMEDOUT); 369 } 370 371 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK; 372 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER); 373 for (i = JME_TIMEOUT; i > 0; i--) { 374 DELAY(1); 375 reg = CSR_READ_4(sc, JME_SMBINTF); 376 if ((reg & SMBINTF_CMD_TRIGGER) == 0) 377 break; 378 } 379 380 if (i == 0) { 381 device_printf(sc->jme_dev, "EEPROM read timeout!\n"); 382 return (ETIMEDOUT); 383 } 384 385 reg = CSR_READ_4(sc, JME_SMBINTF); 386 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT; 387 388 return (0); 389 } 390 391 static int 392 jme_eeprom_macaddr(struct jme_softc *sc) 393 { 394 uint8_t eaddr[ETHER_ADDR_LEN]; 395 uint8_t fup, reg, val; 396 uint32_t offset; 397 int match; 398 399 offset = 0; 400 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 401 fup != JME_EEPROM_SIG0) 402 return (ENOENT); 403 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 || 404 fup != JME_EEPROM_SIG1) 405 return (ENOENT); 406 match = 0; 407 do { 408 if (jme_eeprom_read_byte(sc, offset, &fup) != 0) 409 break; 410 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) == 411 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) { 412 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0) 413 break; 414 if (reg >= JME_PAR0 && 415 reg < JME_PAR0 + ETHER_ADDR_LEN) { 416 if (jme_eeprom_read_byte(sc, offset + 2, 417 &val) != 0) 418 break; 419 eaddr[reg - JME_PAR0] = val; 420 match++; 421 } 422 } 423 /* Check for the end of EEPROM descriptor. */ 424 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END) 425 break; 426 /* Try next eeprom descriptor. */ 427 offset += JME_EEPROM_DESC_BYTES; 428 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END); 429 430 if (match == ETHER_ADDR_LEN) { 431 bcopy(eaddr, sc->jme_eaddr, ETHER_ADDR_LEN); 432 return (0); 433 } 434 435 return (ENOENT); 436 } 437 438 static int 439 jme_efuse_macaddr(struct jme_softc *sc) 440 { 441 uint32_t reg; 442 int i; 443 444 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4); 445 if ((reg & (EFUSE_CTL1_AUTOLOAD_ERR | EFUSE_CTL1_AUTOLAOD_DONE)) != 446 EFUSE_CTL1_AUTOLAOD_DONE) 447 return (ENOENT); 448 /* Reset eFuse controller. */ 449 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL2, 4); 450 reg |= EFUSE_CTL2_RESET; 451 pci_write_config(sc->jme_dev, JME_EFUSE_CTL2, reg, 4); 452 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL2, 4); 453 reg &= ~EFUSE_CTL2_RESET; 454 pci_write_config(sc->jme_dev, JME_EFUSE_CTL2, reg, 4); 455 456 /* Have eFuse reload station address to MAC controller. */ 457 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4); 458 reg &= ~EFUSE_CTL1_CMD_MASK; 459 reg |= EFUSE_CTL1_CMD_AUTOLOAD | EFUSE_CTL1_EXECUTE; 460 pci_write_config(sc->jme_dev, JME_EFUSE_CTL1, reg, 4); 461 462 /* 463 * Verify completion of eFuse autload command. It should be 464 * completed within 108us. 465 */ 466 DELAY(110); 467 for (i = 10; i > 0; i--) { 468 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4); 469 if ((reg & (EFUSE_CTL1_AUTOLOAD_ERR | 470 EFUSE_CTL1_AUTOLAOD_DONE)) != EFUSE_CTL1_AUTOLAOD_DONE) { 471 DELAY(20); 472 continue; 473 } 474 if ((reg & EFUSE_CTL1_EXECUTE) == 0) 475 break; 476 /* Station address loading is still in progress. */ 477 DELAY(20); 478 } 479 if (i == 0) { 480 device_printf(sc->jme_dev, "eFuse autoload timed out.\n"); 481 return (ETIMEDOUT); 482 } 483 484 return (0); 485 } 486 487 static void 488 jme_reg_macaddr(struct jme_softc *sc) 489 { 490 uint32_t par0, par1; 491 492 /* Read station address. */ 493 par0 = CSR_READ_4(sc, JME_PAR0); 494 par1 = CSR_READ_4(sc, JME_PAR1); 495 par1 &= 0xFFFF; 496 if ((par0 == 0 && par1 == 0) || 497 (par0 == 0xFFFFFFFF && par1 == 0xFFFF)) { 498 device_printf(sc->jme_dev, 499 "Failed to retrieve Ethernet address.\n"); 500 } else { 501 /* 502 * For controllers that use eFuse, the station address 503 * could also be extracted from JME_PCI_PAR0 and 504 * JME_PCI_PAR1 registers in PCI configuration space. 505 * Each register holds exactly half of station address(24bits) 506 * so use JME_PAR0, JME_PAR1 registers instead. 507 */ 508 sc->jme_eaddr[0] = (par0 >> 0) & 0xFF; 509 sc->jme_eaddr[1] = (par0 >> 8) & 0xFF; 510 sc->jme_eaddr[2] = (par0 >> 16) & 0xFF; 511 sc->jme_eaddr[3] = (par0 >> 24) & 0xFF; 512 sc->jme_eaddr[4] = (par1 >> 0) & 0xFF; 513 sc->jme_eaddr[5] = (par1 >> 8) & 0xFF; 514 } 515 } 516 517 static void 518 jme_set_macaddr(struct jme_softc *sc, uint8_t *eaddr) 519 { 520 uint32_t val; 521 int i; 522 523 if ((sc->jme_flags & JME_FLAG_EFUSE) != 0) { 524 /* 525 * Avoid reprogramming station address if the address 526 * is the same as previous one. Note, reprogrammed 527 * station address is permanent as if it was written 528 * to EEPROM. So if station address was changed by 529 * admistrator it's possible to lose factory configured 530 * address when driver fails to restore its address. 531 * (e.g. reboot or system crash) 532 */ 533 if (bcmp(eaddr, sc->jme_eaddr, ETHER_ADDR_LEN) != 0) { 534 for (i = 0; i < ETHER_ADDR_LEN; i++) { 535 val = JME_EFUSE_EEPROM_FUNC0 << 536 JME_EFUSE_EEPROM_FUNC_SHIFT; 537 val |= JME_EFUSE_EEPROM_PAGE_BAR1 << 538 JME_EFUSE_EEPROM_PAGE_SHIFT; 539 val |= (JME_PAR0 + i) << 540 JME_EFUSE_EEPROM_ADDR_SHIFT; 541 val |= eaddr[i] << JME_EFUSE_EEPROM_DATA_SHIFT; 542 pci_write_config(sc->jme_dev, JME_EFUSE_EEPROM, 543 val | JME_EFUSE_EEPROM_WRITE, 4); 544 } 545 } 546 } else { 547 CSR_WRITE_4(sc, JME_PAR0, 548 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]); 549 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]); 550 } 551 } 552 553 static void 554 jme_map_intr_vector(struct jme_softc *sc) 555 { 556 uint32_t map[MSINUM_NUM_INTR_SOURCE / JME_MSI_MESSAGES]; 557 558 bzero(map, sizeof(map)); 559 560 /* Map Tx interrupts source to MSI/MSIX vector 2. */ 561 map[MSINUM_REG_INDEX(N_INTR_TXQ0_COMP)] = 562 MSINUM_INTR_SOURCE(2, N_INTR_TXQ0_COMP); 563 map[MSINUM_REG_INDEX(N_INTR_TXQ1_COMP)] |= 564 MSINUM_INTR_SOURCE(2, N_INTR_TXQ1_COMP); 565 map[MSINUM_REG_INDEX(N_INTR_TXQ2_COMP)] |= 566 MSINUM_INTR_SOURCE(2, N_INTR_TXQ2_COMP); 567 map[MSINUM_REG_INDEX(N_INTR_TXQ3_COMP)] |= 568 MSINUM_INTR_SOURCE(2, N_INTR_TXQ3_COMP); 569 map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |= 570 MSINUM_INTR_SOURCE(2, N_INTR_TXQ4_COMP); 571 map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |= 572 MSINUM_INTR_SOURCE(2, N_INTR_TXQ5_COMP); 573 map[MSINUM_REG_INDEX(N_INTR_TXQ6_COMP)] |= 574 MSINUM_INTR_SOURCE(2, N_INTR_TXQ6_COMP); 575 map[MSINUM_REG_INDEX(N_INTR_TXQ7_COMP)] |= 576 MSINUM_INTR_SOURCE(2, N_INTR_TXQ7_COMP); 577 map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL)] |= 578 MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL); 579 map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL_TO)] |= 580 MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL_TO); 581 582 /* Map Rx interrupts source to MSI/MSIX vector 1. */ 583 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COMP)] = 584 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COMP); 585 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COMP)] = 586 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COMP); 587 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COMP)] = 588 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COMP); 589 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COMP)] = 590 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COMP); 591 map[MSINUM_REG_INDEX(N_INTR_RXQ0_DESC_EMPTY)] = 592 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_DESC_EMPTY); 593 map[MSINUM_REG_INDEX(N_INTR_RXQ1_DESC_EMPTY)] = 594 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_DESC_EMPTY); 595 map[MSINUM_REG_INDEX(N_INTR_RXQ2_DESC_EMPTY)] = 596 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_DESC_EMPTY); 597 map[MSINUM_REG_INDEX(N_INTR_RXQ3_DESC_EMPTY)] = 598 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_DESC_EMPTY); 599 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL)] = 600 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL); 601 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL)] = 602 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL); 603 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL)] = 604 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL); 605 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL)] = 606 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL); 607 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL_TO)] = 608 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL_TO); 609 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL_TO)] = 610 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL_TO); 611 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL_TO)] = 612 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL_TO); 613 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL_TO)] = 614 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL_TO); 615 616 /* Map all other interrupts source to MSI/MSIX vector 0. */ 617 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 0, map[0]); 618 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 1, map[1]); 619 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 2, map[2]); 620 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 3, map[3]); 621 } 622 623 static int 624 jme_attach(device_t dev) 625 { 626 struct jme_softc *sc; 627 struct ifnet *ifp; 628 struct mii_softc *miisc; 629 struct mii_data *mii; 630 uint32_t reg; 631 uint16_t burst; 632 int error, i, mii_flags, msic, msixc, pmc; 633 634 error = 0; 635 sc = device_get_softc(dev); 636 sc->jme_dev = dev; 637 638 mtx_init(&sc->jme_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 639 MTX_DEF); 640 callout_init_mtx(&sc->jme_tick_ch, &sc->jme_mtx, 0); 641 TASK_INIT(&sc->jme_int_task, 0, jme_int_task, sc); 642 TASK_INIT(&sc->jme_link_task, 0, jme_link_task, sc); 643 644 /* 645 * Map the device. JMC250 supports both memory mapped and I/O 646 * register space access. Because I/O register access should 647 * use different BARs to access registers it's waste of time 648 * to use I/O register spce access. JMC250 uses 16K to map 649 * entire memory space. 650 */ 651 pci_enable_busmaster(dev); 652 sc->jme_res_spec = jme_res_spec_mem; 653 sc->jme_irq_spec = jme_irq_spec_legacy; 654 error = bus_alloc_resources(dev, sc->jme_res_spec, sc->jme_res); 655 if (error != 0) { 656 device_printf(dev, "cannot allocate memory resources.\n"); 657 goto fail; 658 } 659 660 /* Allocate IRQ resources. */ 661 msixc = pci_msix_count(dev); 662 msic = pci_msi_count(dev); 663 if (bootverbose) { 664 device_printf(dev, "MSIX count : %d\n", msixc); 665 device_printf(dev, "MSI count : %d\n", msic); 666 } 667 668 /* Use 1 MSI/MSI-X. */ 669 if (msixc > 1) 670 msixc = 1; 671 if (msic > 1) 672 msic = 1; 673 /* Prefer MSIX over MSI. */ 674 if (msix_disable == 0 || msi_disable == 0) { 675 if (msix_disable == 0 && msixc > 0 && 676 pci_alloc_msix(dev, &msixc) == 0) { 677 if (msixc == 1) { 678 device_printf(dev, "Using %d MSIX messages.\n", 679 msixc); 680 sc->jme_flags |= JME_FLAG_MSIX; 681 sc->jme_irq_spec = jme_irq_spec_msi; 682 } else 683 pci_release_msi(dev); 684 } 685 if (msi_disable == 0 && (sc->jme_flags & JME_FLAG_MSIX) == 0 && 686 msic > 0 && pci_alloc_msi(dev, &msic) == 0) { 687 if (msic == 1) { 688 device_printf(dev, "Using %d MSI messages.\n", 689 msic); 690 sc->jme_flags |= JME_FLAG_MSI; 691 sc->jme_irq_spec = jme_irq_spec_msi; 692 } else 693 pci_release_msi(dev); 694 } 695 /* Map interrupt vector 0, 1 and 2. */ 696 if ((sc->jme_flags & JME_FLAG_MSI) != 0 || 697 (sc->jme_flags & JME_FLAG_MSIX) != 0) 698 jme_map_intr_vector(sc); 699 } 700 701 error = bus_alloc_resources(dev, sc->jme_irq_spec, sc->jme_irq); 702 if (error != 0) { 703 device_printf(dev, "cannot allocate IRQ resources.\n"); 704 goto fail; 705 } 706 707 sc->jme_rev = pci_get_device(dev); 708 if ((sc->jme_rev & DEVICEID_JMC2XX_MASK) == DEVICEID_JMC260) { 709 sc->jme_flags |= JME_FLAG_FASTETH; 710 sc->jme_flags |= JME_FLAG_NOJUMBO; 711 } 712 reg = CSR_READ_4(sc, JME_CHIPMODE); 713 sc->jme_chip_rev = (reg & CHIPMODE_REV_MASK) >> CHIPMODE_REV_SHIFT; 714 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) != 715 CHIPMODE_NOT_FPGA) 716 sc->jme_flags |= JME_FLAG_FPGA; 717 if (bootverbose) { 718 device_printf(dev, "PCI device revision : 0x%04x\n", 719 sc->jme_rev); 720 device_printf(dev, "Chip revision : 0x%02x\n", 721 sc->jme_chip_rev); 722 if ((sc->jme_flags & JME_FLAG_FPGA) != 0) 723 device_printf(dev, "FPGA revision : 0x%04x\n", 724 (reg & CHIPMODE_FPGA_REV_MASK) >> 725 CHIPMODE_FPGA_REV_SHIFT); 726 } 727 if (sc->jme_chip_rev == 0xFF) { 728 device_printf(dev, "Unknown chip revision : 0x%02x\n", 729 sc->jme_rev); 730 error = ENXIO; 731 goto fail; 732 } 733 734 /* Identify controller features and bugs. */ 735 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 2) { 736 if ((sc->jme_rev & DEVICEID_JMC2XX_MASK) == DEVICEID_JMC260 && 737 CHIPMODE_REVFM(sc->jme_chip_rev) == 2) 738 sc->jme_flags |= JME_FLAG_DMA32BIT; 739 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5) 740 sc->jme_flags |= JME_FLAG_EFUSE | JME_FLAG_PCCPCD; 741 sc->jme_flags |= JME_FLAG_TXCLK | JME_FLAG_RXCLK; 742 sc->jme_flags |= JME_FLAG_HWMIB; 743 } 744 745 /* Reset the ethernet controller. */ 746 jme_reset(sc); 747 748 /* Get station address. */ 749 if ((sc->jme_flags & JME_FLAG_EFUSE) != 0) { 750 error = jme_efuse_macaddr(sc); 751 if (error == 0) 752 jme_reg_macaddr(sc); 753 } else { 754 error = ENOENT; 755 reg = CSR_READ_4(sc, JME_SMBCSR); 756 if ((reg & SMBCSR_EEPROM_PRESENT) != 0) 757 error = jme_eeprom_macaddr(sc); 758 if (error != 0 && bootverbose) 759 device_printf(sc->jme_dev, 760 "ethernet hardware address not found in EEPROM.\n"); 761 if (error != 0) 762 jme_reg_macaddr(sc); 763 } 764 765 /* 766 * Save PHY address. 767 * Integrated JR0211 has fixed PHY address whereas FPGA version 768 * requires PHY probing to get correct PHY address. 769 */ 770 if ((sc->jme_flags & JME_FLAG_FPGA) == 0) { 771 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) & 772 GPREG0_PHY_ADDR_MASK; 773 if (bootverbose) 774 device_printf(dev, "PHY is at address %d.\n", 775 sc->jme_phyaddr); 776 } else 777 sc->jme_phyaddr = 0; 778 779 /* Set max allowable DMA size. */ 780 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) { 781 sc->jme_flags |= JME_FLAG_PCIE; 782 burst = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2); 783 if (bootverbose) { 784 device_printf(dev, "Read request size : %d bytes.\n", 785 128 << ((burst >> 12) & 0x07)); 786 device_printf(dev, "TLP payload size : %d bytes.\n", 787 128 << ((burst >> 5) & 0x07)); 788 } 789 switch ((burst >> 12) & 0x07) { 790 case 0: 791 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128; 792 break; 793 case 1: 794 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256; 795 break; 796 default: 797 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; 798 break; 799 } 800 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; 801 } else { 802 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512; 803 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128; 804 } 805 /* Create coalescing sysctl node. */ 806 jme_sysctl_node(sc); 807 if ((error = jme_dma_alloc(sc) != 0)) 808 goto fail; 809 810 ifp = sc->jme_ifp = if_alloc(IFT_ETHER); 811 if (ifp == NULL) { 812 device_printf(dev, "cannot allocate ifnet structure.\n"); 813 error = ENXIO; 814 goto fail; 815 } 816 817 ifp->if_softc = sc; 818 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 819 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 820 ifp->if_ioctl = jme_ioctl; 821 ifp->if_start = jme_start; 822 ifp->if_init = jme_init; 823 ifp->if_snd.ifq_drv_maxlen = JME_TX_RING_CNT - 1; 824 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 825 IFQ_SET_READY(&ifp->if_snd); 826 /* JMC250 supports Tx/Rx checksum offload as well as TSO. */ 827 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4; 828 ifp->if_hwassist = JME_CSUM_FEATURES | CSUM_TSO; 829 if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) { 830 sc->jme_flags |= JME_FLAG_PMCAP; 831 ifp->if_capabilities |= IFCAP_WOL_MAGIC; 832 } 833 ifp->if_capenable = ifp->if_capabilities; 834 835 /* Wakeup PHY. */ 836 jme_phy_up(sc); 837 mii_flags = MIIF_DOPAUSE; 838 /* Ask PHY calibration to PHY driver. */ 839 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5) 840 mii_flags |= MIIF_MACPRIV0; 841 /* Set up MII bus. */ 842 error = mii_attach(dev, &sc->jme_miibus, ifp, jme_mediachange, 843 jme_mediastatus, BMSR_DEFCAPMASK, 844 sc->jme_flags & JME_FLAG_FPGA ? MII_PHY_ANY : sc->jme_phyaddr, 845 MII_OFFSET_ANY, mii_flags); 846 if (error != 0) { 847 device_printf(dev, "attaching PHYs failed\n"); 848 goto fail; 849 } 850 851 /* 852 * Force PHY to FPGA mode. 853 */ 854 if ((sc->jme_flags & JME_FLAG_FPGA) != 0) { 855 mii = device_get_softc(sc->jme_miibus); 856 if (mii->mii_instance != 0) { 857 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) { 858 if (miisc->mii_phy != 0) { 859 sc->jme_phyaddr = miisc->mii_phy; 860 break; 861 } 862 } 863 if (sc->jme_phyaddr != 0) { 864 device_printf(sc->jme_dev, 865 "FPGA PHY is at %d\n", sc->jme_phyaddr); 866 /* vendor magic. */ 867 jme_miibus_writereg(dev, sc->jme_phyaddr, 27, 868 0x0004); 869 } 870 } 871 } 872 873 ether_ifattach(ifp, sc->jme_eaddr); 874 875 /* VLAN capability setup */ 876 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | 877 IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO; 878 ifp->if_capenable = ifp->if_capabilities; 879 880 /* Tell the upper layer(s) we support long frames. */ 881 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 882 883 /* Create local taskq. */ 884 sc->jme_tq = taskqueue_create_fast("jme_taskq", M_WAITOK, 885 taskqueue_thread_enqueue, &sc->jme_tq); 886 if (sc->jme_tq == NULL) { 887 device_printf(dev, "could not create taskqueue.\n"); 888 ether_ifdetach(ifp); 889 error = ENXIO; 890 goto fail; 891 } 892 taskqueue_start_threads(&sc->jme_tq, 1, PI_NET, "%s taskq", 893 device_get_nameunit(sc->jme_dev)); 894 895 for (i = 0; i < 1; i++) { 896 error = bus_setup_intr(dev, sc->jme_irq[i], 897 INTR_TYPE_NET | INTR_MPSAFE, jme_intr, NULL, sc, 898 &sc->jme_intrhand[i]); 899 if (error != 0) 900 break; 901 } 902 903 if (error != 0) { 904 device_printf(dev, "could not set up interrupt handler.\n"); 905 taskqueue_free(sc->jme_tq); 906 sc->jme_tq = NULL; 907 ether_ifdetach(ifp); 908 goto fail; 909 } 910 911 fail: 912 if (error != 0) 913 jme_detach(dev); 914 915 return (error); 916 } 917 918 static int 919 jme_detach(device_t dev) 920 { 921 struct jme_softc *sc; 922 struct ifnet *ifp; 923 int i; 924 925 sc = device_get_softc(dev); 926 927 ifp = sc->jme_ifp; 928 if (device_is_attached(dev)) { 929 JME_LOCK(sc); 930 sc->jme_flags |= JME_FLAG_DETACH; 931 jme_stop(sc); 932 JME_UNLOCK(sc); 933 callout_drain(&sc->jme_tick_ch); 934 taskqueue_drain(sc->jme_tq, &sc->jme_int_task); 935 taskqueue_drain(taskqueue_swi, &sc->jme_link_task); 936 /* Restore possibly modified station address. */ 937 if ((sc->jme_flags & JME_FLAG_EFUSE) != 0) 938 jme_set_macaddr(sc, sc->jme_eaddr); 939 ether_ifdetach(ifp); 940 } 941 942 if (sc->jme_tq != NULL) { 943 taskqueue_drain(sc->jme_tq, &sc->jme_int_task); 944 taskqueue_free(sc->jme_tq); 945 sc->jme_tq = NULL; 946 } 947 948 if (sc->jme_miibus != NULL) { 949 device_delete_child(dev, sc->jme_miibus); 950 sc->jme_miibus = NULL; 951 } 952 bus_generic_detach(dev); 953 jme_dma_free(sc); 954 955 if (ifp != NULL) { 956 if_free(ifp); 957 sc->jme_ifp = NULL; 958 } 959 960 for (i = 0; i < 1; i++) { 961 if (sc->jme_intrhand[i] != NULL) { 962 bus_teardown_intr(dev, sc->jme_irq[i], 963 sc->jme_intrhand[i]); 964 sc->jme_intrhand[i] = NULL; 965 } 966 } 967 968 if (sc->jme_irq[0] != NULL) 969 bus_release_resources(dev, sc->jme_irq_spec, sc->jme_irq); 970 if ((sc->jme_flags & (JME_FLAG_MSIX | JME_FLAG_MSI)) != 0) 971 pci_release_msi(dev); 972 if (sc->jme_res[0] != NULL) 973 bus_release_resources(dev, sc->jme_res_spec, sc->jme_res); 974 mtx_destroy(&sc->jme_mtx); 975 976 return (0); 977 } 978 979 #define JME_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 980 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 981 982 static void 983 jme_sysctl_node(struct jme_softc *sc) 984 { 985 struct sysctl_ctx_list *ctx; 986 struct sysctl_oid_list *child, *parent; 987 struct sysctl_oid *tree; 988 struct jme_hw_stats *stats; 989 int error; 990 991 stats = &sc->jme_stats; 992 ctx = device_get_sysctl_ctx(sc->jme_dev); 993 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev)); 994 995 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_coal_to", 996 CTLTYPE_INT | CTLFLAG_RW, &sc->jme_tx_coal_to, 0, 997 sysctl_hw_jme_tx_coal_to, "I", "jme tx coalescing timeout"); 998 999 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_coal_pkt", 1000 CTLTYPE_INT | CTLFLAG_RW, &sc->jme_tx_coal_pkt, 0, 1001 sysctl_hw_jme_tx_coal_pkt, "I", "jme tx coalescing packet"); 1002 1003 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_coal_to", 1004 CTLTYPE_INT | CTLFLAG_RW, &sc->jme_rx_coal_to, 0, 1005 sysctl_hw_jme_rx_coal_to, "I", "jme rx coalescing timeout"); 1006 1007 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_coal_pkt", 1008 CTLTYPE_INT | CTLFLAG_RW, &sc->jme_rx_coal_pkt, 0, 1009 sysctl_hw_jme_rx_coal_pkt, "I", "jme rx coalescing packet"); 1010 1011 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit", 1012 CTLTYPE_INT | CTLFLAG_RW, &sc->jme_process_limit, 0, 1013 sysctl_hw_jme_proc_limit, "I", 1014 "max number of Rx events to process"); 1015 1016 /* Pull in device tunables. */ 1017 sc->jme_process_limit = JME_PROC_DEFAULT; 1018 error = resource_int_value(device_get_name(sc->jme_dev), 1019 device_get_unit(sc->jme_dev), "process_limit", 1020 &sc->jme_process_limit); 1021 if (error == 0) { 1022 if (sc->jme_process_limit < JME_PROC_MIN || 1023 sc->jme_process_limit > JME_PROC_MAX) { 1024 device_printf(sc->jme_dev, 1025 "process_limit value out of range; " 1026 "using default: %d\n", JME_PROC_DEFAULT); 1027 sc->jme_process_limit = JME_PROC_DEFAULT; 1028 } 1029 } 1030 1031 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT; 1032 error = resource_int_value(device_get_name(sc->jme_dev), 1033 device_get_unit(sc->jme_dev), "tx_coal_to", &sc->jme_tx_coal_to); 1034 if (error == 0) { 1035 if (sc->jme_tx_coal_to < PCCTX_COAL_TO_MIN || 1036 sc->jme_tx_coal_to > PCCTX_COAL_TO_MAX) { 1037 device_printf(sc->jme_dev, 1038 "tx_coal_to value out of range; " 1039 "using default: %d\n", PCCTX_COAL_TO_DEFAULT); 1040 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT; 1041 } 1042 } 1043 1044 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT; 1045 error = resource_int_value(device_get_name(sc->jme_dev), 1046 device_get_unit(sc->jme_dev), "tx_coal_pkt", &sc->jme_tx_coal_to); 1047 if (error == 0) { 1048 if (sc->jme_tx_coal_pkt < PCCTX_COAL_PKT_MIN || 1049 sc->jme_tx_coal_pkt > PCCTX_COAL_PKT_MAX) { 1050 device_printf(sc->jme_dev, 1051 "tx_coal_pkt value out of range; " 1052 "using default: %d\n", PCCTX_COAL_PKT_DEFAULT); 1053 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT; 1054 } 1055 } 1056 1057 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT; 1058 error = resource_int_value(device_get_name(sc->jme_dev), 1059 device_get_unit(sc->jme_dev), "rx_coal_to", &sc->jme_rx_coal_to); 1060 if (error == 0) { 1061 if (sc->jme_rx_coal_to < PCCRX_COAL_TO_MIN || 1062 sc->jme_rx_coal_to > PCCRX_COAL_TO_MAX) { 1063 device_printf(sc->jme_dev, 1064 "rx_coal_to value out of range; " 1065 "using default: %d\n", PCCRX_COAL_TO_DEFAULT); 1066 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT; 1067 } 1068 } 1069 1070 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT; 1071 error = resource_int_value(device_get_name(sc->jme_dev), 1072 device_get_unit(sc->jme_dev), "rx_coal_pkt", &sc->jme_rx_coal_to); 1073 if (error == 0) { 1074 if (sc->jme_rx_coal_pkt < PCCRX_COAL_PKT_MIN || 1075 sc->jme_rx_coal_pkt > PCCRX_COAL_PKT_MAX) { 1076 device_printf(sc->jme_dev, 1077 "tx_coal_pkt value out of range; " 1078 "using default: %d\n", PCCRX_COAL_PKT_DEFAULT); 1079 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT; 1080 } 1081 } 1082 1083 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0) 1084 return; 1085 1086 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 1087 NULL, "JME statistics"); 1088 parent = SYSCTL_CHILDREN(tree); 1089 1090 /* Rx statistics. */ 1091 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 1092 NULL, "Rx MAC statistics"); 1093 child = SYSCTL_CHILDREN(tree); 1094 JME_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 1095 &stats->rx_good_frames, "Good frames"); 1096 JME_SYSCTL_STAT_ADD32(ctx, child, "crc_errs", 1097 &stats->rx_crc_errs, "CRC errors"); 1098 JME_SYSCTL_STAT_ADD32(ctx, child, "mii_errs", 1099 &stats->rx_mii_errs, "MII errors"); 1100 JME_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", 1101 &stats->rx_fifo_oflows, "FIFO overflows"); 1102 JME_SYSCTL_STAT_ADD32(ctx, child, "desc_empty", 1103 &stats->rx_desc_empty, "Descriptor empty"); 1104 JME_SYSCTL_STAT_ADD32(ctx, child, "bad_frames", 1105 &stats->rx_bad_frames, "Bad frames"); 1106 1107 /* Tx statistics. */ 1108 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 1109 NULL, "Tx MAC statistics"); 1110 child = SYSCTL_CHILDREN(tree); 1111 JME_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 1112 &stats->tx_good_frames, "Good frames"); 1113 JME_SYSCTL_STAT_ADD32(ctx, child, "bad_frames", 1114 &stats->tx_bad_frames, "Bad frames"); 1115 } 1116 1117 #undef JME_SYSCTL_STAT_ADD32 1118 1119 struct jme_dmamap_arg { 1120 bus_addr_t jme_busaddr; 1121 }; 1122 1123 static void 1124 jme_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1125 { 1126 struct jme_dmamap_arg *ctx; 1127 1128 if (error != 0) 1129 return; 1130 1131 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1132 1133 ctx = (struct jme_dmamap_arg *)arg; 1134 ctx->jme_busaddr = segs[0].ds_addr; 1135 } 1136 1137 static int 1138 jme_dma_alloc(struct jme_softc *sc) 1139 { 1140 struct jme_dmamap_arg ctx; 1141 struct jme_txdesc *txd; 1142 struct jme_rxdesc *rxd; 1143 bus_addr_t lowaddr, rx_ring_end, tx_ring_end; 1144 int error, i; 1145 1146 lowaddr = BUS_SPACE_MAXADDR; 1147 if ((sc->jme_flags & JME_FLAG_DMA32BIT) != 0) 1148 lowaddr = BUS_SPACE_MAXADDR_32BIT; 1149 1150 again: 1151 /* Create parent ring tag. */ 1152 error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */ 1153 1, 0, /* algnmnt, boundary */ 1154 lowaddr, /* lowaddr */ 1155 BUS_SPACE_MAXADDR, /* highaddr */ 1156 NULL, NULL, /* filter, filterarg */ 1157 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1158 0, /* nsegments */ 1159 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1160 0, /* flags */ 1161 NULL, NULL, /* lockfunc, lockarg */ 1162 &sc->jme_cdata.jme_ring_tag); 1163 if (error != 0) { 1164 device_printf(sc->jme_dev, 1165 "could not create parent ring DMA tag.\n"); 1166 goto fail; 1167 } 1168 /* Create tag for Tx ring. */ 1169 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */ 1170 JME_TX_RING_ALIGN, 0, /* algnmnt, boundary */ 1171 BUS_SPACE_MAXADDR, /* lowaddr */ 1172 BUS_SPACE_MAXADDR, /* highaddr */ 1173 NULL, NULL, /* filter, filterarg */ 1174 JME_TX_RING_SIZE, /* maxsize */ 1175 1, /* nsegments */ 1176 JME_TX_RING_SIZE, /* maxsegsize */ 1177 0, /* flags */ 1178 NULL, NULL, /* lockfunc, lockarg */ 1179 &sc->jme_cdata.jme_tx_ring_tag); 1180 if (error != 0) { 1181 device_printf(sc->jme_dev, 1182 "could not allocate Tx ring DMA tag.\n"); 1183 goto fail; 1184 } 1185 1186 /* Create tag for Rx ring. */ 1187 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */ 1188 JME_RX_RING_ALIGN, 0, /* algnmnt, boundary */ 1189 lowaddr, /* lowaddr */ 1190 BUS_SPACE_MAXADDR, /* highaddr */ 1191 NULL, NULL, /* filter, filterarg */ 1192 JME_RX_RING_SIZE, /* maxsize */ 1193 1, /* nsegments */ 1194 JME_RX_RING_SIZE, /* maxsegsize */ 1195 0, /* flags */ 1196 NULL, NULL, /* lockfunc, lockarg */ 1197 &sc->jme_cdata.jme_rx_ring_tag); 1198 if (error != 0) { 1199 device_printf(sc->jme_dev, 1200 "could not allocate Rx ring DMA tag.\n"); 1201 goto fail; 1202 } 1203 1204 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 1205 error = bus_dmamem_alloc(sc->jme_cdata.jme_tx_ring_tag, 1206 (void **)&sc->jme_rdata.jme_tx_ring, 1207 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1208 &sc->jme_cdata.jme_tx_ring_map); 1209 if (error != 0) { 1210 device_printf(sc->jme_dev, 1211 "could not allocate DMA'able memory for Tx ring.\n"); 1212 goto fail; 1213 } 1214 1215 ctx.jme_busaddr = 0; 1216 error = bus_dmamap_load(sc->jme_cdata.jme_tx_ring_tag, 1217 sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring, 1218 JME_TX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 1219 if (error != 0 || ctx.jme_busaddr == 0) { 1220 device_printf(sc->jme_dev, 1221 "could not load DMA'able memory for Tx ring.\n"); 1222 goto fail; 1223 } 1224 sc->jme_rdata.jme_tx_ring_paddr = ctx.jme_busaddr; 1225 1226 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 1227 error = bus_dmamem_alloc(sc->jme_cdata.jme_rx_ring_tag, 1228 (void **)&sc->jme_rdata.jme_rx_ring, 1229 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1230 &sc->jme_cdata.jme_rx_ring_map); 1231 if (error != 0) { 1232 device_printf(sc->jme_dev, 1233 "could not allocate DMA'able memory for Rx ring.\n"); 1234 goto fail; 1235 } 1236 1237 ctx.jme_busaddr = 0; 1238 error = bus_dmamap_load(sc->jme_cdata.jme_rx_ring_tag, 1239 sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring, 1240 JME_RX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 1241 if (error != 0 || ctx.jme_busaddr == 0) { 1242 device_printf(sc->jme_dev, 1243 "could not load DMA'able memory for Rx ring.\n"); 1244 goto fail; 1245 } 1246 sc->jme_rdata.jme_rx_ring_paddr = ctx.jme_busaddr; 1247 1248 if (lowaddr != BUS_SPACE_MAXADDR_32BIT) { 1249 /* Tx/Rx descriptor queue should reside within 4GB boundary. */ 1250 tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr + 1251 JME_TX_RING_SIZE; 1252 rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr + 1253 JME_RX_RING_SIZE; 1254 if ((JME_ADDR_HI(tx_ring_end) != 1255 JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) || 1256 (JME_ADDR_HI(rx_ring_end) != 1257 JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) { 1258 device_printf(sc->jme_dev, "4GB boundary crossed, " 1259 "switching to 32bit DMA address mode.\n"); 1260 jme_dma_free(sc); 1261 /* Limit DMA address space to 32bit and try again. */ 1262 lowaddr = BUS_SPACE_MAXADDR_32BIT; 1263 goto again; 1264 } 1265 } 1266 1267 lowaddr = BUS_SPACE_MAXADDR; 1268 if ((sc->jme_flags & JME_FLAG_DMA32BIT) != 0) 1269 lowaddr = BUS_SPACE_MAXADDR_32BIT; 1270 /* Create parent buffer tag. */ 1271 error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */ 1272 1, 0, /* algnmnt, boundary */ 1273 lowaddr, /* lowaddr */ 1274 BUS_SPACE_MAXADDR, /* highaddr */ 1275 NULL, NULL, /* filter, filterarg */ 1276 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1277 0, /* nsegments */ 1278 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1279 0, /* flags */ 1280 NULL, NULL, /* lockfunc, lockarg */ 1281 &sc->jme_cdata.jme_buffer_tag); 1282 if (error != 0) { 1283 device_printf(sc->jme_dev, 1284 "could not create parent buffer DMA tag.\n"); 1285 goto fail; 1286 } 1287 1288 /* Create shadow status block tag. */ 1289 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */ 1290 JME_SSB_ALIGN, 0, /* algnmnt, boundary */ 1291 BUS_SPACE_MAXADDR, /* lowaddr */ 1292 BUS_SPACE_MAXADDR, /* highaddr */ 1293 NULL, NULL, /* filter, filterarg */ 1294 JME_SSB_SIZE, /* maxsize */ 1295 1, /* nsegments */ 1296 JME_SSB_SIZE, /* maxsegsize */ 1297 0, /* flags */ 1298 NULL, NULL, /* lockfunc, lockarg */ 1299 &sc->jme_cdata.jme_ssb_tag); 1300 if (error != 0) { 1301 device_printf(sc->jme_dev, 1302 "could not create shared status block DMA tag.\n"); 1303 goto fail; 1304 } 1305 1306 /* Create tag for Tx buffers. */ 1307 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */ 1308 1, 0, /* algnmnt, boundary */ 1309 BUS_SPACE_MAXADDR, /* lowaddr */ 1310 BUS_SPACE_MAXADDR, /* highaddr */ 1311 NULL, NULL, /* filter, filterarg */ 1312 JME_TSO_MAXSIZE, /* maxsize */ 1313 JME_MAXTXSEGS, /* nsegments */ 1314 JME_TSO_MAXSEGSIZE, /* maxsegsize */ 1315 0, /* flags */ 1316 NULL, NULL, /* lockfunc, lockarg */ 1317 &sc->jme_cdata.jme_tx_tag); 1318 if (error != 0) { 1319 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n"); 1320 goto fail; 1321 } 1322 1323 /* Create tag for Rx buffers. */ 1324 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */ 1325 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */ 1326 BUS_SPACE_MAXADDR, /* lowaddr */ 1327 BUS_SPACE_MAXADDR, /* highaddr */ 1328 NULL, NULL, /* filter, filterarg */ 1329 MCLBYTES, /* maxsize */ 1330 1, /* nsegments */ 1331 MCLBYTES, /* maxsegsize */ 1332 0, /* flags */ 1333 NULL, NULL, /* lockfunc, lockarg */ 1334 &sc->jme_cdata.jme_rx_tag); 1335 if (error != 0) { 1336 device_printf(sc->jme_dev, "could not create Rx DMA tag.\n"); 1337 goto fail; 1338 } 1339 1340 /* 1341 * Allocate DMA'able memory and load the DMA map for shared 1342 * status block. 1343 */ 1344 error = bus_dmamem_alloc(sc->jme_cdata.jme_ssb_tag, 1345 (void **)&sc->jme_rdata.jme_ssb_block, 1346 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1347 &sc->jme_cdata.jme_ssb_map); 1348 if (error != 0) { 1349 device_printf(sc->jme_dev, "could not allocate DMA'able " 1350 "memory for shared status block.\n"); 1351 goto fail; 1352 } 1353 1354 ctx.jme_busaddr = 0; 1355 error = bus_dmamap_load(sc->jme_cdata.jme_ssb_tag, 1356 sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block, 1357 JME_SSB_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 1358 if (error != 0 || ctx.jme_busaddr == 0) { 1359 device_printf(sc->jme_dev, "could not load DMA'able memory " 1360 "for shared status block.\n"); 1361 goto fail; 1362 } 1363 sc->jme_rdata.jme_ssb_block_paddr = ctx.jme_busaddr; 1364 1365 /* Create DMA maps for Tx buffers. */ 1366 for (i = 0; i < JME_TX_RING_CNT; i++) { 1367 txd = &sc->jme_cdata.jme_txdesc[i]; 1368 txd->tx_m = NULL; 1369 txd->tx_dmamap = NULL; 1370 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 0, 1371 &txd->tx_dmamap); 1372 if (error != 0) { 1373 device_printf(sc->jme_dev, 1374 "could not create Tx dmamap.\n"); 1375 goto fail; 1376 } 1377 } 1378 /* Create DMA maps for Rx buffers. */ 1379 if ((error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0, 1380 &sc->jme_cdata.jme_rx_sparemap)) != 0) { 1381 device_printf(sc->jme_dev, 1382 "could not create spare Rx dmamap.\n"); 1383 goto fail; 1384 } 1385 for (i = 0; i < JME_RX_RING_CNT; i++) { 1386 rxd = &sc->jme_cdata.jme_rxdesc[i]; 1387 rxd->rx_m = NULL; 1388 rxd->rx_dmamap = NULL; 1389 error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0, 1390 &rxd->rx_dmamap); 1391 if (error != 0) { 1392 device_printf(sc->jme_dev, 1393 "could not create Rx dmamap.\n"); 1394 goto fail; 1395 } 1396 } 1397 1398 fail: 1399 return (error); 1400 } 1401 1402 static void 1403 jme_dma_free(struct jme_softc *sc) 1404 { 1405 struct jme_txdesc *txd; 1406 struct jme_rxdesc *rxd; 1407 int i; 1408 1409 /* Tx ring */ 1410 if (sc->jme_cdata.jme_tx_ring_tag != NULL) { 1411 if (sc->jme_cdata.jme_tx_ring_map) 1412 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag, 1413 sc->jme_cdata.jme_tx_ring_map); 1414 if (sc->jme_cdata.jme_tx_ring_map && 1415 sc->jme_rdata.jme_tx_ring) 1416 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag, 1417 sc->jme_rdata.jme_tx_ring, 1418 sc->jme_cdata.jme_tx_ring_map); 1419 sc->jme_rdata.jme_tx_ring = NULL; 1420 sc->jme_cdata.jme_tx_ring_map = NULL; 1421 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag); 1422 sc->jme_cdata.jme_tx_ring_tag = NULL; 1423 } 1424 /* Rx ring */ 1425 if (sc->jme_cdata.jme_rx_ring_tag != NULL) { 1426 if (sc->jme_cdata.jme_rx_ring_map) 1427 bus_dmamap_unload(sc->jme_cdata.jme_rx_ring_tag, 1428 sc->jme_cdata.jme_rx_ring_map); 1429 if (sc->jme_cdata.jme_rx_ring_map && 1430 sc->jme_rdata.jme_rx_ring) 1431 bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag, 1432 sc->jme_rdata.jme_rx_ring, 1433 sc->jme_cdata.jme_rx_ring_map); 1434 sc->jme_rdata.jme_rx_ring = NULL; 1435 sc->jme_cdata.jme_rx_ring_map = NULL; 1436 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag); 1437 sc->jme_cdata.jme_rx_ring_tag = NULL; 1438 } 1439 /* Tx buffers */ 1440 if (sc->jme_cdata.jme_tx_tag != NULL) { 1441 for (i = 0; i < JME_TX_RING_CNT; i++) { 1442 txd = &sc->jme_cdata.jme_txdesc[i]; 1443 if (txd->tx_dmamap != NULL) { 1444 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag, 1445 txd->tx_dmamap); 1446 txd->tx_dmamap = NULL; 1447 } 1448 } 1449 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag); 1450 sc->jme_cdata.jme_tx_tag = NULL; 1451 } 1452 /* Rx buffers */ 1453 if (sc->jme_cdata.jme_rx_tag != NULL) { 1454 for (i = 0; i < JME_RX_RING_CNT; i++) { 1455 rxd = &sc->jme_cdata.jme_rxdesc[i]; 1456 if (rxd->rx_dmamap != NULL) { 1457 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag, 1458 rxd->rx_dmamap); 1459 rxd->rx_dmamap = NULL; 1460 } 1461 } 1462 if (sc->jme_cdata.jme_rx_sparemap != NULL) { 1463 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag, 1464 sc->jme_cdata.jme_rx_sparemap); 1465 sc->jme_cdata.jme_rx_sparemap = NULL; 1466 } 1467 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag); 1468 sc->jme_cdata.jme_rx_tag = NULL; 1469 } 1470 1471 /* Shared status block. */ 1472 if (sc->jme_cdata.jme_ssb_tag != NULL) { 1473 if (sc->jme_cdata.jme_ssb_map) 1474 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag, 1475 sc->jme_cdata.jme_ssb_map); 1476 if (sc->jme_cdata.jme_ssb_map && sc->jme_rdata.jme_ssb_block) 1477 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag, 1478 sc->jme_rdata.jme_ssb_block, 1479 sc->jme_cdata.jme_ssb_map); 1480 sc->jme_rdata.jme_ssb_block = NULL; 1481 sc->jme_cdata.jme_ssb_map = NULL; 1482 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag); 1483 sc->jme_cdata.jme_ssb_tag = NULL; 1484 } 1485 1486 if (sc->jme_cdata.jme_buffer_tag != NULL) { 1487 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag); 1488 sc->jme_cdata.jme_buffer_tag = NULL; 1489 } 1490 if (sc->jme_cdata.jme_ring_tag != NULL) { 1491 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag); 1492 sc->jme_cdata.jme_ring_tag = NULL; 1493 } 1494 } 1495 1496 /* 1497 * Make sure the interface is stopped at reboot time. 1498 */ 1499 static int 1500 jme_shutdown(device_t dev) 1501 { 1502 1503 return (jme_suspend(dev)); 1504 } 1505 1506 /* 1507 * Unlike other ethernet controllers, JMC250 requires 1508 * explicit resetting link speed to 10/100Mbps as gigabit 1509 * link will cunsume more power than 375mA. 1510 * Note, we reset the link speed to 10/100Mbps with 1511 * auto-negotiation but we don't know whether that operation 1512 * would succeed or not as we have no control after powering 1513 * off. If the renegotiation fail WOL may not work. Running 1514 * at 1Gbps draws more power than 375mA at 3.3V which is 1515 * specified in PCI specification and that would result in 1516 * complete shutdowning power to ethernet controller. 1517 * 1518 * TODO 1519 * Save current negotiated media speed/duplex/flow-control 1520 * to softc and restore the same link again after resuming. 1521 * PHY handling such as power down/resetting to 100Mbps 1522 * may be better handled in suspend method in phy driver. 1523 */ 1524 static void 1525 jme_setlinkspeed(struct jme_softc *sc) 1526 { 1527 struct mii_data *mii; 1528 int aneg, i; 1529 1530 JME_LOCK_ASSERT(sc); 1531 1532 mii = device_get_softc(sc->jme_miibus); 1533 mii_pollstat(mii); 1534 aneg = 0; 1535 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1536 switch IFM_SUBTYPE(mii->mii_media_active) { 1537 case IFM_10_T: 1538 case IFM_100_TX: 1539 return; 1540 case IFM_1000_T: 1541 aneg++; 1542 default: 1543 break; 1544 } 1545 } 1546 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0); 1547 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR, 1548 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 1549 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, 1550 BMCR_AUTOEN | BMCR_STARTNEG); 1551 DELAY(1000); 1552 if (aneg != 0) { 1553 /* Poll link state until jme(4) get a 10/100 link. */ 1554 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 1555 mii_pollstat(mii); 1556 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1557 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1558 case IFM_10_T: 1559 case IFM_100_TX: 1560 jme_mac_config(sc); 1561 return; 1562 default: 1563 break; 1564 } 1565 } 1566 JME_UNLOCK(sc); 1567 pause("jmelnk", hz); 1568 JME_LOCK(sc); 1569 } 1570 if (i == MII_ANEGTICKS_GIGE) 1571 device_printf(sc->jme_dev, "establishing link failed, " 1572 "WOL may not work!"); 1573 } 1574 /* 1575 * No link, force MAC to have 100Mbps, full-duplex link. 1576 * This is the last resort and may/may not work. 1577 */ 1578 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 1579 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1580 jme_mac_config(sc); 1581 } 1582 1583 static void 1584 jme_setwol(struct jme_softc *sc) 1585 { 1586 struct ifnet *ifp; 1587 uint32_t gpr, pmcs; 1588 uint16_t pmstat; 1589 int pmc; 1590 1591 JME_LOCK_ASSERT(sc); 1592 1593 if (pci_find_cap(sc->jme_dev, PCIY_PMG, &pmc) != 0) { 1594 /* Remove Tx MAC/offload clock to save more power. */ 1595 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0) 1596 CSR_WRITE_4(sc, JME_GHC, CSR_READ_4(sc, JME_GHC) & 1597 ~(GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100 | 1598 GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000)); 1599 if ((sc->jme_flags & JME_FLAG_RXCLK) != 0) 1600 CSR_WRITE_4(sc, JME_GPREG1, 1601 CSR_READ_4(sc, JME_GPREG1) | GPREG1_RX_MAC_CLK_DIS); 1602 /* No PME capability, PHY power down. */ 1603 jme_phy_down(sc); 1604 return; 1605 } 1606 1607 ifp = sc->jme_ifp; 1608 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB; 1609 pmcs = CSR_READ_4(sc, JME_PMCS); 1610 pmcs &= ~PMCS_WOL_ENB_MASK; 1611 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) { 1612 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB; 1613 /* Enable PME message. */ 1614 gpr |= GPREG0_PME_ENB; 1615 /* For gigabit controllers, reset link speed to 10/100. */ 1616 if ((sc->jme_flags & JME_FLAG_FASTETH) == 0) 1617 jme_setlinkspeed(sc); 1618 } 1619 1620 CSR_WRITE_4(sc, JME_PMCS, pmcs); 1621 CSR_WRITE_4(sc, JME_GPREG0, gpr); 1622 /* Remove Tx MAC/offload clock to save more power. */ 1623 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0) 1624 CSR_WRITE_4(sc, JME_GHC, CSR_READ_4(sc, JME_GHC) & 1625 ~(GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100 | 1626 GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000)); 1627 /* Request PME. */ 1628 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2); 1629 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1630 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1631 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1632 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 1633 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 1634 /* No WOL, PHY power down. */ 1635 jme_phy_down(sc); 1636 } 1637 } 1638 1639 static int 1640 jme_suspend(device_t dev) 1641 { 1642 struct jme_softc *sc; 1643 1644 sc = device_get_softc(dev); 1645 1646 JME_LOCK(sc); 1647 jme_stop(sc); 1648 jme_setwol(sc); 1649 JME_UNLOCK(sc); 1650 1651 return (0); 1652 } 1653 1654 static int 1655 jme_resume(device_t dev) 1656 { 1657 struct jme_softc *sc; 1658 struct ifnet *ifp; 1659 uint16_t pmstat; 1660 int pmc; 1661 1662 sc = device_get_softc(dev); 1663 1664 JME_LOCK(sc); 1665 if (pci_find_cap(sc->jme_dev, PCIY_PMG, &pmc) == 0) { 1666 pmstat = pci_read_config(sc->jme_dev, 1667 pmc + PCIR_POWER_STATUS, 2); 1668 /* Disable PME clear PME status. */ 1669 pmstat &= ~PCIM_PSTAT_PMEENABLE; 1670 pci_write_config(sc->jme_dev, 1671 pmc + PCIR_POWER_STATUS, pmstat, 2); 1672 } 1673 /* Wakeup PHY. */ 1674 jme_phy_up(sc); 1675 ifp = sc->jme_ifp; 1676 if ((ifp->if_flags & IFF_UP) != 0) { 1677 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1678 jme_init_locked(sc); 1679 } 1680 1681 JME_UNLOCK(sc); 1682 1683 return (0); 1684 } 1685 1686 static int 1687 jme_encap(struct jme_softc *sc, struct mbuf **m_head) 1688 { 1689 struct jme_txdesc *txd; 1690 struct jme_desc *desc; 1691 struct mbuf *m; 1692 bus_dma_segment_t txsegs[JME_MAXTXSEGS]; 1693 int error, i, nsegs, prod; 1694 uint32_t cflags, tsosegsz; 1695 1696 JME_LOCK_ASSERT(sc); 1697 1698 M_ASSERTPKTHDR((*m_head)); 1699 1700 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1701 /* 1702 * Due to the adherence to NDIS specification JMC250 1703 * assumes upper stack computed TCP pseudo checksum 1704 * without including payload length. This breaks 1705 * checksum offload for TSO case so recompute TCP 1706 * pseudo checksum for JMC250. Hopefully this wouldn't 1707 * be much burden on modern CPUs. 1708 */ 1709 struct ether_header *eh; 1710 struct ip *ip; 1711 struct tcphdr *tcp; 1712 uint32_t ip_off, poff; 1713 1714 if (M_WRITABLE(*m_head) == 0) { 1715 /* Get a writable copy. */ 1716 m = m_dup(*m_head, M_NOWAIT); 1717 m_freem(*m_head); 1718 if (m == NULL) { 1719 *m_head = NULL; 1720 return (ENOBUFS); 1721 } 1722 *m_head = m; 1723 } 1724 ip_off = sizeof(struct ether_header); 1725 m = m_pullup(*m_head, ip_off); 1726 if (m == NULL) { 1727 *m_head = NULL; 1728 return (ENOBUFS); 1729 } 1730 eh = mtod(m, struct ether_header *); 1731 /* Check the existence of VLAN tag. */ 1732 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1733 ip_off = sizeof(struct ether_vlan_header); 1734 m = m_pullup(m, ip_off); 1735 if (m == NULL) { 1736 *m_head = NULL; 1737 return (ENOBUFS); 1738 } 1739 } 1740 m = m_pullup(m, ip_off + sizeof(struct ip)); 1741 if (m == NULL) { 1742 *m_head = NULL; 1743 return (ENOBUFS); 1744 } 1745 ip = (struct ip *)(mtod(m, char *) + ip_off); 1746 poff = ip_off + (ip->ip_hl << 2); 1747 m = m_pullup(m, poff + sizeof(struct tcphdr)); 1748 if (m == NULL) { 1749 *m_head = NULL; 1750 return (ENOBUFS); 1751 } 1752 /* 1753 * Reset IP checksum and recompute TCP pseudo 1754 * checksum that NDIS specification requires. 1755 */ 1756 ip = (struct ip *)(mtod(m, char *) + ip_off); 1757 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 1758 ip->ip_sum = 0; 1759 if (poff + (tcp->th_off << 2) == m->m_pkthdr.len) { 1760 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, 1761 ip->ip_dst.s_addr, 1762 htons((tcp->th_off << 2) + IPPROTO_TCP)); 1763 /* No need to TSO, force IP checksum offload. */ 1764 (*m_head)->m_pkthdr.csum_flags &= ~CSUM_TSO; 1765 (*m_head)->m_pkthdr.csum_flags |= CSUM_IP; 1766 } else 1767 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, 1768 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 1769 *m_head = m; 1770 } 1771 1772 prod = sc->jme_cdata.jme_tx_prod; 1773 txd = &sc->jme_cdata.jme_txdesc[prod]; 1774 1775 error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag, 1776 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1777 if (error == EFBIG) { 1778 m = m_collapse(*m_head, M_NOWAIT, JME_MAXTXSEGS); 1779 if (m == NULL) { 1780 m_freem(*m_head); 1781 *m_head = NULL; 1782 return (ENOMEM); 1783 } 1784 *m_head = m; 1785 error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag, 1786 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1787 if (error != 0) { 1788 m_freem(*m_head); 1789 *m_head = NULL; 1790 return (error); 1791 } 1792 } else if (error != 0) 1793 return (error); 1794 if (nsegs == 0) { 1795 m_freem(*m_head); 1796 *m_head = NULL; 1797 return (EIO); 1798 } 1799 1800 /* 1801 * Check descriptor overrun. Leave one free descriptor. 1802 * Since we always use 64bit address mode for transmitting, 1803 * each Tx request requires one more dummy descriptor. 1804 */ 1805 if (sc->jme_cdata.jme_tx_cnt + nsegs + 1 > JME_TX_RING_CNT - 1) { 1806 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap); 1807 return (ENOBUFS); 1808 } 1809 1810 m = *m_head; 1811 cflags = 0; 1812 tsosegsz = 0; 1813 /* Configure checksum offload and TSO. */ 1814 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1815 tsosegsz = (uint32_t)m->m_pkthdr.tso_segsz << 1816 JME_TD_MSS_SHIFT; 1817 cflags |= JME_TD_TSO; 1818 } else { 1819 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 1820 cflags |= JME_TD_IPCSUM; 1821 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 1822 cflags |= JME_TD_TCPCSUM; 1823 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 1824 cflags |= JME_TD_UDPCSUM; 1825 } 1826 /* Configure VLAN. */ 1827 if ((m->m_flags & M_VLANTAG) != 0) { 1828 cflags |= (m->m_pkthdr.ether_vtag & JME_TD_VLAN_MASK); 1829 cflags |= JME_TD_VLAN_TAG; 1830 } 1831 1832 desc = &sc->jme_rdata.jme_tx_ring[prod]; 1833 desc->flags = htole32(cflags); 1834 desc->buflen = htole32(tsosegsz); 1835 desc->addr_hi = htole32(m->m_pkthdr.len); 1836 desc->addr_lo = 0; 1837 sc->jme_cdata.jme_tx_cnt++; 1838 JME_DESC_INC(prod, JME_TX_RING_CNT); 1839 for (i = 0; i < nsegs; i++) { 1840 desc = &sc->jme_rdata.jme_tx_ring[prod]; 1841 desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT); 1842 desc->buflen = htole32(txsegs[i].ds_len); 1843 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr)); 1844 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr)); 1845 sc->jme_cdata.jme_tx_cnt++; 1846 JME_DESC_INC(prod, JME_TX_RING_CNT); 1847 } 1848 1849 /* Update producer index. */ 1850 sc->jme_cdata.jme_tx_prod = prod; 1851 /* 1852 * Finally request interrupt and give the first descriptor 1853 * owenership to hardware. 1854 */ 1855 desc = txd->tx_desc; 1856 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR); 1857 1858 txd->tx_m = m; 1859 txd->tx_ndesc = nsegs + 1; 1860 1861 /* Sync descriptors. */ 1862 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap, 1863 BUS_DMASYNC_PREWRITE); 1864 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag, 1865 sc->jme_cdata.jme_tx_ring_map, 1866 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1867 1868 return (0); 1869 } 1870 1871 static void 1872 jme_start(struct ifnet *ifp) 1873 { 1874 struct jme_softc *sc; 1875 1876 sc = ifp->if_softc; 1877 JME_LOCK(sc); 1878 jme_start_locked(ifp); 1879 JME_UNLOCK(sc); 1880 } 1881 1882 static void 1883 jme_start_locked(struct ifnet *ifp) 1884 { 1885 struct jme_softc *sc; 1886 struct mbuf *m_head; 1887 int enq; 1888 1889 sc = ifp->if_softc; 1890 1891 JME_LOCK_ASSERT(sc); 1892 1893 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT) 1894 jme_txeof(sc); 1895 1896 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1897 IFF_DRV_RUNNING || (sc->jme_flags & JME_FLAG_LINK) == 0) 1898 return; 1899 1900 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) { 1901 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1902 if (m_head == NULL) 1903 break; 1904 /* 1905 * Pack the data into the transmit ring. If we 1906 * don't have room, set the OACTIVE flag and wait 1907 * for the NIC to drain the ring. 1908 */ 1909 if (jme_encap(sc, &m_head)) { 1910 if (m_head == NULL) 1911 break; 1912 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1913 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1914 break; 1915 } 1916 1917 enq++; 1918 /* 1919 * If there's a BPF listener, bounce a copy of this frame 1920 * to him. 1921 */ 1922 ETHER_BPF_MTAP(ifp, m_head); 1923 } 1924 1925 if (enq > 0) { 1926 /* 1927 * Reading TXCSR takes very long time under heavy load 1928 * so cache TXCSR value and writes the ORed value with 1929 * the kick command to the TXCSR. This saves one register 1930 * access cycle. 1931 */ 1932 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB | 1933 TXCSR_TXQ_N_START(TXCSR_TXQ0)); 1934 /* Set a timeout in case the chip goes out to lunch. */ 1935 sc->jme_watchdog_timer = JME_TX_TIMEOUT; 1936 } 1937 } 1938 1939 static void 1940 jme_watchdog(struct jme_softc *sc) 1941 { 1942 struct ifnet *ifp; 1943 1944 JME_LOCK_ASSERT(sc); 1945 1946 if (sc->jme_watchdog_timer == 0 || --sc->jme_watchdog_timer) 1947 return; 1948 1949 ifp = sc->jme_ifp; 1950 if ((sc->jme_flags & JME_FLAG_LINK) == 0) { 1951 if_printf(sc->jme_ifp, "watchdog timeout (missed link)\n"); 1952 ifp->if_oerrors++; 1953 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1954 jme_init_locked(sc); 1955 return; 1956 } 1957 jme_txeof(sc); 1958 if (sc->jme_cdata.jme_tx_cnt == 0) { 1959 if_printf(sc->jme_ifp, 1960 "watchdog timeout (missed Tx interrupts) -- recovering\n"); 1961 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1962 jme_start_locked(ifp); 1963 return; 1964 } 1965 1966 if_printf(sc->jme_ifp, "watchdog timeout\n"); 1967 ifp->if_oerrors++; 1968 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1969 jme_init_locked(sc); 1970 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1971 jme_start_locked(ifp); 1972 } 1973 1974 static int 1975 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1976 { 1977 struct jme_softc *sc; 1978 struct ifreq *ifr; 1979 struct mii_data *mii; 1980 uint32_t reg; 1981 int error, mask; 1982 1983 sc = ifp->if_softc; 1984 ifr = (struct ifreq *)data; 1985 error = 0; 1986 switch (cmd) { 1987 case SIOCSIFMTU: 1988 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU || 1989 ((sc->jme_flags & JME_FLAG_NOJUMBO) != 0 && 1990 ifr->ifr_mtu > JME_MAX_MTU)) { 1991 error = EINVAL; 1992 break; 1993 } 1994 1995 if (ifp->if_mtu != ifr->ifr_mtu) { 1996 /* 1997 * No special configuration is required when interface 1998 * MTU is changed but availability of TSO/Tx checksum 1999 * offload should be chcked against new MTU size as 2000 * FIFO size is just 2K. 2001 */ 2002 JME_LOCK(sc); 2003 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) { 2004 ifp->if_capenable &= 2005 ~(IFCAP_TXCSUM | IFCAP_TSO4); 2006 ifp->if_hwassist &= 2007 ~(JME_CSUM_FEATURES | CSUM_TSO); 2008 VLAN_CAPABILITIES(ifp); 2009 } 2010 ifp->if_mtu = ifr->ifr_mtu; 2011 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2012 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2013 jme_init_locked(sc); 2014 } 2015 JME_UNLOCK(sc); 2016 } 2017 break; 2018 case SIOCSIFFLAGS: 2019 JME_LOCK(sc); 2020 if ((ifp->if_flags & IFF_UP) != 0) { 2021 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2022 if (((ifp->if_flags ^ sc->jme_if_flags) 2023 & (IFF_PROMISC | IFF_ALLMULTI)) != 0) 2024 jme_set_filter(sc); 2025 } else { 2026 if ((sc->jme_flags & JME_FLAG_DETACH) == 0) 2027 jme_init_locked(sc); 2028 } 2029 } else { 2030 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2031 jme_stop(sc); 2032 } 2033 sc->jme_if_flags = ifp->if_flags; 2034 JME_UNLOCK(sc); 2035 break; 2036 case SIOCADDMULTI: 2037 case SIOCDELMULTI: 2038 JME_LOCK(sc); 2039 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2040 jme_set_filter(sc); 2041 JME_UNLOCK(sc); 2042 break; 2043 case SIOCSIFMEDIA: 2044 case SIOCGIFMEDIA: 2045 mii = device_get_softc(sc->jme_miibus); 2046 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 2047 break; 2048 case SIOCSIFCAP: 2049 JME_LOCK(sc); 2050 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2051 if ((mask & IFCAP_TXCSUM) != 0 && 2052 ifp->if_mtu < JME_TX_FIFO_SIZE) { 2053 if ((IFCAP_TXCSUM & ifp->if_capabilities) != 0) { 2054 ifp->if_capenable ^= IFCAP_TXCSUM; 2055 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) 2056 ifp->if_hwassist |= JME_CSUM_FEATURES; 2057 else 2058 ifp->if_hwassist &= ~JME_CSUM_FEATURES; 2059 } 2060 } 2061 if ((mask & IFCAP_RXCSUM) != 0 && 2062 (IFCAP_RXCSUM & ifp->if_capabilities) != 0) { 2063 ifp->if_capenable ^= IFCAP_RXCSUM; 2064 reg = CSR_READ_4(sc, JME_RXMAC); 2065 reg &= ~RXMAC_CSUM_ENB; 2066 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 2067 reg |= RXMAC_CSUM_ENB; 2068 CSR_WRITE_4(sc, JME_RXMAC, reg); 2069 } 2070 if ((mask & IFCAP_TSO4) != 0 && 2071 ifp->if_mtu < JME_TX_FIFO_SIZE) { 2072 if ((IFCAP_TSO4 & ifp->if_capabilities) != 0) { 2073 ifp->if_capenable ^= IFCAP_TSO4; 2074 if ((IFCAP_TSO4 & ifp->if_capenable) != 0) 2075 ifp->if_hwassist |= CSUM_TSO; 2076 else 2077 ifp->if_hwassist &= ~CSUM_TSO; 2078 } 2079 } 2080 if ((mask & IFCAP_WOL_MAGIC) != 0 && 2081 (IFCAP_WOL_MAGIC & ifp->if_capabilities) != 0) 2082 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 2083 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 2084 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 2085 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 2086 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 2087 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) 2088 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 2089 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 2090 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) { 2091 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2092 jme_set_vlan(sc); 2093 } 2094 JME_UNLOCK(sc); 2095 VLAN_CAPABILITIES(ifp); 2096 break; 2097 default: 2098 error = ether_ioctl(ifp, cmd, data); 2099 break; 2100 } 2101 2102 return (error); 2103 } 2104 2105 static void 2106 jme_mac_config(struct jme_softc *sc) 2107 { 2108 struct mii_data *mii; 2109 uint32_t ghc, gpreg, rxmac, txmac, txpause; 2110 uint32_t txclk; 2111 2112 JME_LOCK_ASSERT(sc); 2113 2114 mii = device_get_softc(sc->jme_miibus); 2115 2116 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 2117 DELAY(10); 2118 CSR_WRITE_4(sc, JME_GHC, 0); 2119 ghc = 0; 2120 txclk = 0; 2121 rxmac = CSR_READ_4(sc, JME_RXMAC); 2122 rxmac &= ~RXMAC_FC_ENB; 2123 txmac = CSR_READ_4(sc, JME_TXMAC); 2124 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST); 2125 txpause = CSR_READ_4(sc, JME_TXPFC); 2126 txpause &= ~TXPFC_PAUSE_ENB; 2127 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 2128 ghc |= GHC_FULL_DUPLEX; 2129 rxmac &= ~RXMAC_COLL_DET_ENB; 2130 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | 2131 TXMAC_BACKOFF | TXMAC_CARRIER_EXT | 2132 TXMAC_FRAME_BURST); 2133 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 2134 txpause |= TXPFC_PAUSE_ENB; 2135 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 2136 rxmac |= RXMAC_FC_ENB; 2137 /* Disable retry transmit timer/retry limit. */ 2138 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) & 2139 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB)); 2140 } else { 2141 rxmac |= RXMAC_COLL_DET_ENB; 2142 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF; 2143 /* Enable retry transmit timer/retry limit. */ 2144 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) | 2145 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB); 2146 } 2147 /* Reprogram Tx/Rx MACs with resolved speed/duplex. */ 2148 switch (IFM_SUBTYPE(mii->mii_media_active)) { 2149 case IFM_10_T: 2150 ghc |= GHC_SPEED_10; 2151 txclk |= GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100; 2152 break; 2153 case IFM_100_TX: 2154 ghc |= GHC_SPEED_100; 2155 txclk |= GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100; 2156 break; 2157 case IFM_1000_T: 2158 if ((sc->jme_flags & JME_FLAG_FASTETH) != 0) 2159 break; 2160 ghc |= GHC_SPEED_1000; 2161 txclk |= GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000; 2162 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) 2163 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST; 2164 break; 2165 default: 2166 break; 2167 } 2168 if (sc->jme_rev == DEVICEID_JMC250 && 2169 sc->jme_chip_rev == DEVICEREVID_JMC250_A2) { 2170 /* 2171 * Workaround occasional packet loss issue of JMC250 A2 2172 * when it runs on half-duplex media. 2173 */ 2174 gpreg = CSR_READ_4(sc, JME_GPREG1); 2175 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 2176 gpreg &= ~GPREG1_HDPX_FIX; 2177 else 2178 gpreg |= GPREG1_HDPX_FIX; 2179 CSR_WRITE_4(sc, JME_GPREG1, gpreg); 2180 /* Workaround CRC errors at 100Mbps on JMC250 A2. */ 2181 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) { 2182 /* Extend interface FIFO depth. */ 2183 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 2184 0x1B, 0x0000); 2185 } else { 2186 /* Select default interface FIFO depth. */ 2187 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, 2188 0x1B, 0x0004); 2189 } 2190 } 2191 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0) 2192 ghc |= txclk; 2193 CSR_WRITE_4(sc, JME_GHC, ghc); 2194 CSR_WRITE_4(sc, JME_RXMAC, rxmac); 2195 CSR_WRITE_4(sc, JME_TXMAC, txmac); 2196 CSR_WRITE_4(sc, JME_TXPFC, txpause); 2197 } 2198 2199 static void 2200 jme_link_task(void *arg, int pending) 2201 { 2202 struct jme_softc *sc; 2203 struct mii_data *mii; 2204 struct ifnet *ifp; 2205 struct jme_txdesc *txd; 2206 bus_addr_t paddr; 2207 int i; 2208 2209 sc = (struct jme_softc *)arg; 2210 2211 JME_LOCK(sc); 2212 mii = device_get_softc(sc->jme_miibus); 2213 ifp = sc->jme_ifp; 2214 if (mii == NULL || ifp == NULL || 2215 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2216 JME_UNLOCK(sc); 2217 return; 2218 } 2219 2220 sc->jme_flags &= ~JME_FLAG_LINK; 2221 if ((mii->mii_media_status & IFM_AVALID) != 0) { 2222 switch (IFM_SUBTYPE(mii->mii_media_active)) { 2223 case IFM_10_T: 2224 case IFM_100_TX: 2225 sc->jme_flags |= JME_FLAG_LINK; 2226 break; 2227 case IFM_1000_T: 2228 if ((sc->jme_flags & JME_FLAG_FASTETH) != 0) 2229 break; 2230 sc->jme_flags |= JME_FLAG_LINK; 2231 break; 2232 default: 2233 break; 2234 } 2235 } 2236 2237 /* 2238 * Disabling Rx/Tx MACs have a side-effect of resetting 2239 * JME_TXNDA/JME_RXNDA register to the first address of 2240 * Tx/Rx descriptor address. So driver should reset its 2241 * internal procucer/consumer pointer and reclaim any 2242 * allocated resources. Note, just saving the value of 2243 * JME_TXNDA and JME_RXNDA registers before stopping MAC 2244 * and restoring JME_TXNDA/JME_RXNDA register is not 2245 * sufficient to make sure correct MAC state because 2246 * stopping MAC operation can take a while and hardware 2247 * might have updated JME_TXNDA/JME_RXNDA registers 2248 * during the stop operation. 2249 */ 2250 /* Block execution of task. */ 2251 taskqueue_block(sc->jme_tq); 2252 /* Disable interrupts and stop driver. */ 2253 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 2254 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2255 callout_stop(&sc->jme_tick_ch); 2256 sc->jme_watchdog_timer = 0; 2257 2258 /* Stop receiver/transmitter. */ 2259 jme_stop_rx(sc); 2260 jme_stop_tx(sc); 2261 2262 /* XXX Drain all queued tasks. */ 2263 JME_UNLOCK(sc); 2264 taskqueue_drain(sc->jme_tq, &sc->jme_int_task); 2265 JME_LOCK(sc); 2266 2267 if (sc->jme_cdata.jme_rxhead != NULL) 2268 m_freem(sc->jme_cdata.jme_rxhead); 2269 JME_RXCHAIN_RESET(sc); 2270 jme_txeof(sc); 2271 if (sc->jme_cdata.jme_tx_cnt != 0) { 2272 /* Remove queued packets for transmit. */ 2273 for (i = 0; i < JME_TX_RING_CNT; i++) { 2274 txd = &sc->jme_cdata.jme_txdesc[i]; 2275 if (txd->tx_m != NULL) { 2276 bus_dmamap_sync( 2277 sc->jme_cdata.jme_tx_tag, 2278 txd->tx_dmamap, 2279 BUS_DMASYNC_POSTWRITE); 2280 bus_dmamap_unload( 2281 sc->jme_cdata.jme_tx_tag, 2282 txd->tx_dmamap); 2283 m_freem(txd->tx_m); 2284 txd->tx_m = NULL; 2285 txd->tx_ndesc = 0; 2286 ifp->if_oerrors++; 2287 } 2288 } 2289 } 2290 2291 /* 2292 * Reuse configured Rx descriptors and reset 2293 * producer/consumer index. 2294 */ 2295 sc->jme_cdata.jme_rx_cons = 0; 2296 sc->jme_morework = 0; 2297 jme_init_tx_ring(sc); 2298 /* Initialize shadow status block. */ 2299 jme_init_ssb(sc); 2300 2301 /* Program MAC with resolved speed/duplex/flow-control. */ 2302 if ((sc->jme_flags & JME_FLAG_LINK) != 0) { 2303 jme_mac_config(sc); 2304 jme_stats_clear(sc); 2305 2306 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr); 2307 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 2308 2309 /* Set Tx ring address to the hardware. */ 2310 paddr = JME_TX_RING_ADDR(sc, 0); 2311 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 2312 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 2313 2314 /* Set Rx ring address to the hardware. */ 2315 paddr = JME_RX_RING_ADDR(sc, 0); 2316 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 2317 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 2318 2319 /* Restart receiver/transmitter. */ 2320 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB | 2321 RXCSR_RXQ_START); 2322 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB); 2323 /* Lastly enable TX/RX clock. */ 2324 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0) 2325 CSR_WRITE_4(sc, JME_GHC, 2326 CSR_READ_4(sc, JME_GHC) & ~GHC_TX_MAC_CLK_DIS); 2327 if ((sc->jme_flags & JME_FLAG_RXCLK) != 0) 2328 CSR_WRITE_4(sc, JME_GPREG1, 2329 CSR_READ_4(sc, JME_GPREG1) & ~GPREG1_RX_MAC_CLK_DIS); 2330 } 2331 2332 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2333 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2334 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); 2335 /* Unblock execution of task. */ 2336 taskqueue_unblock(sc->jme_tq); 2337 /* Reenable interrupts. */ 2338 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 2339 2340 JME_UNLOCK(sc); 2341 } 2342 2343 static int 2344 jme_intr(void *arg) 2345 { 2346 struct jme_softc *sc; 2347 uint32_t status; 2348 2349 sc = (struct jme_softc *)arg; 2350 2351 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS); 2352 if (status == 0 || status == 0xFFFFFFFF) 2353 return (FILTER_STRAY); 2354 /* Disable interrupts. */ 2355 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 2356 taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task); 2357 2358 return (FILTER_HANDLED); 2359 } 2360 2361 static void 2362 jme_int_task(void *arg, int pending) 2363 { 2364 struct jme_softc *sc; 2365 struct ifnet *ifp; 2366 uint32_t status; 2367 int more; 2368 2369 sc = (struct jme_softc *)arg; 2370 ifp = sc->jme_ifp; 2371 2372 JME_LOCK(sc); 2373 status = CSR_READ_4(sc, JME_INTR_STATUS); 2374 if (sc->jme_morework != 0) { 2375 sc->jme_morework = 0; 2376 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO; 2377 } 2378 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF) 2379 goto done; 2380 /* Reset PCC counter/timer and Ack interrupts. */ 2381 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP); 2382 if ((status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) != 0) 2383 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP; 2384 if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0) 2385 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP; 2386 CSR_WRITE_4(sc, JME_INTR_STATUS, status); 2387 more = 0; 2388 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2389 if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0) { 2390 more = jme_rxintr(sc, sc->jme_process_limit); 2391 if (more != 0) 2392 sc->jme_morework = 1; 2393 } 2394 if ((status & INTR_RXQ_DESC_EMPTY) != 0) { 2395 /* 2396 * Notify hardware availability of new Rx 2397 * buffers. 2398 * Reading RXCSR takes very long time under 2399 * heavy load so cache RXCSR value and writes 2400 * the ORed value with the kick command to 2401 * the RXCSR. This saves one register access 2402 * cycle. 2403 */ 2404 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | 2405 RXCSR_RX_ENB | RXCSR_RXQ_START); 2406 } 2407 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2408 jme_start_locked(ifp); 2409 } 2410 2411 if (more != 0 || (CSR_READ_4(sc, JME_INTR_STATUS) & JME_INTRS) != 0) { 2412 taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task); 2413 JME_UNLOCK(sc); 2414 return; 2415 } 2416 done: 2417 JME_UNLOCK(sc); 2418 2419 /* Reenable interrupts. */ 2420 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 2421 } 2422 2423 static void 2424 jme_txeof(struct jme_softc *sc) 2425 { 2426 struct ifnet *ifp; 2427 struct jme_txdesc *txd; 2428 uint32_t status; 2429 int cons, nsegs; 2430 2431 JME_LOCK_ASSERT(sc); 2432 2433 ifp = sc->jme_ifp; 2434 2435 cons = sc->jme_cdata.jme_tx_cons; 2436 if (cons == sc->jme_cdata.jme_tx_prod) 2437 return; 2438 2439 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag, 2440 sc->jme_cdata.jme_tx_ring_map, 2441 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2442 2443 /* 2444 * Go through our Tx list and free mbufs for those 2445 * frames which have been transmitted. 2446 */ 2447 for (; cons != sc->jme_cdata.jme_tx_prod;) { 2448 txd = &sc->jme_cdata.jme_txdesc[cons]; 2449 status = le32toh(txd->tx_desc->flags); 2450 if ((status & JME_TD_OWN) == JME_TD_OWN) 2451 break; 2452 2453 if ((status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) != 0) 2454 ifp->if_oerrors++; 2455 else { 2456 ifp->if_opackets++; 2457 if ((status & JME_TD_COLLISION) != 0) 2458 ifp->if_collisions += 2459 le32toh(txd->tx_desc->buflen) & 2460 JME_TD_BUF_LEN_MASK; 2461 } 2462 /* 2463 * Only the first descriptor of multi-descriptor 2464 * transmission is updated so driver have to skip entire 2465 * chained buffers for the transmiited frame. In other 2466 * words, JME_TD_OWN bit is valid only at the first 2467 * descriptor of a multi-descriptor transmission. 2468 */ 2469 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) { 2470 sc->jme_rdata.jme_tx_ring[cons].flags = 0; 2471 JME_DESC_INC(cons, JME_TX_RING_CNT); 2472 } 2473 2474 /* Reclaim transferred mbufs. */ 2475 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap, 2476 BUS_DMASYNC_POSTWRITE); 2477 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap); 2478 2479 KASSERT(txd->tx_m != NULL, 2480 ("%s: freeing NULL mbuf!\n", __func__)); 2481 m_freem(txd->tx_m); 2482 txd->tx_m = NULL; 2483 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc; 2484 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0, 2485 ("%s: Active Tx desc counter was garbled\n", __func__)); 2486 txd->tx_ndesc = 0; 2487 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2488 } 2489 sc->jme_cdata.jme_tx_cons = cons; 2490 /* Unarm watchog timer when there is no pending descriptors in queue. */ 2491 if (sc->jme_cdata.jme_tx_cnt == 0) 2492 sc->jme_watchdog_timer = 0; 2493 2494 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag, 2495 sc->jme_cdata.jme_tx_ring_map, 2496 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2497 } 2498 2499 static __inline void 2500 jme_discard_rxbuf(struct jme_softc *sc, int cons) 2501 { 2502 struct jme_desc *desc; 2503 2504 desc = &sc->jme_rdata.jme_rx_ring[cons]; 2505 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); 2506 desc->buflen = htole32(MCLBYTES); 2507 } 2508 2509 /* Receive a frame. */ 2510 static void 2511 jme_rxeof(struct jme_softc *sc) 2512 { 2513 struct ifnet *ifp; 2514 struct jme_desc *desc; 2515 struct jme_rxdesc *rxd; 2516 struct mbuf *mp, *m; 2517 uint32_t flags, status; 2518 int cons, count, nsegs; 2519 2520 JME_LOCK_ASSERT(sc); 2521 2522 ifp = sc->jme_ifp; 2523 2524 cons = sc->jme_cdata.jme_rx_cons; 2525 desc = &sc->jme_rdata.jme_rx_ring[cons]; 2526 flags = le32toh(desc->flags); 2527 status = le32toh(desc->buflen); 2528 nsegs = JME_RX_NSEGS(status); 2529 sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES; 2530 if ((status & JME_RX_ERR_STAT) != 0) { 2531 ifp->if_ierrors++; 2532 jme_discard_rxbuf(sc, sc->jme_cdata.jme_rx_cons); 2533 #ifdef JME_SHOW_ERRORS 2534 device_printf(sc->jme_dev, "%s : receive error = 0x%b\n", 2535 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS); 2536 #endif 2537 sc->jme_cdata.jme_rx_cons += nsegs; 2538 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT; 2539 return; 2540 } 2541 2542 for (count = 0; count < nsegs; count++, 2543 JME_DESC_INC(cons, JME_RX_RING_CNT)) { 2544 rxd = &sc->jme_cdata.jme_rxdesc[cons]; 2545 mp = rxd->rx_m; 2546 /* Add a new receive buffer to the ring. */ 2547 if (jme_newbuf(sc, rxd) != 0) { 2548 ifp->if_iqdrops++; 2549 /* Reuse buffer. */ 2550 for (; count < nsegs; count++) { 2551 jme_discard_rxbuf(sc, cons); 2552 JME_DESC_INC(cons, JME_RX_RING_CNT); 2553 } 2554 if (sc->jme_cdata.jme_rxhead != NULL) { 2555 m_freem(sc->jme_cdata.jme_rxhead); 2556 JME_RXCHAIN_RESET(sc); 2557 } 2558 break; 2559 } 2560 2561 /* 2562 * Assume we've received a full sized frame. 2563 * Actual size is fixed when we encounter the end of 2564 * multi-segmented frame. 2565 */ 2566 mp->m_len = MCLBYTES; 2567 2568 /* Chain received mbufs. */ 2569 if (sc->jme_cdata.jme_rxhead == NULL) { 2570 sc->jme_cdata.jme_rxhead = mp; 2571 sc->jme_cdata.jme_rxtail = mp; 2572 } else { 2573 /* 2574 * Receive processor can receive a maximum frame 2575 * size of 65535 bytes. 2576 */ 2577 mp->m_flags &= ~M_PKTHDR; 2578 sc->jme_cdata.jme_rxtail->m_next = mp; 2579 sc->jme_cdata.jme_rxtail = mp; 2580 } 2581 2582 if (count == nsegs - 1) { 2583 /* Last desc. for this frame. */ 2584 m = sc->jme_cdata.jme_rxhead; 2585 m->m_flags |= M_PKTHDR; 2586 m->m_pkthdr.len = sc->jme_cdata.jme_rxlen; 2587 if (nsegs > 1) { 2588 /* Set first mbuf size. */ 2589 m->m_len = MCLBYTES - JME_RX_PAD_BYTES; 2590 /* Set last mbuf size. */ 2591 mp->m_len = sc->jme_cdata.jme_rxlen - 2592 ((MCLBYTES - JME_RX_PAD_BYTES) + 2593 (MCLBYTES * (nsegs - 2))); 2594 } else 2595 m->m_len = sc->jme_cdata.jme_rxlen; 2596 m->m_pkthdr.rcvif = ifp; 2597 2598 /* 2599 * Account for 10bytes auto padding which is used 2600 * to align IP header on 32bit boundary. Also note, 2601 * CRC bytes is automatically removed by the 2602 * hardware. 2603 */ 2604 m->m_data += JME_RX_PAD_BYTES; 2605 2606 /* Set checksum information. */ 2607 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && 2608 (flags & JME_RD_IPV4) != 0) { 2609 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2610 if ((flags & JME_RD_IPCSUM) != 0) 2611 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2612 if (((flags & JME_RD_MORE_FRAG) == 0) && 2613 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) == 2614 (JME_RD_TCP | JME_RD_TCPCSUM) || 2615 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) == 2616 (JME_RD_UDP | JME_RD_UDPCSUM))) { 2617 m->m_pkthdr.csum_flags |= 2618 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2619 m->m_pkthdr.csum_data = 0xffff; 2620 } 2621 } 2622 2623 /* Check for VLAN tagged packets. */ 2624 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 2625 (flags & JME_RD_VLAN_TAG) != 0) { 2626 m->m_pkthdr.ether_vtag = 2627 flags & JME_RD_VLAN_MASK; 2628 m->m_flags |= M_VLANTAG; 2629 } 2630 2631 ifp->if_ipackets++; 2632 /* Pass it on. */ 2633 JME_UNLOCK(sc); 2634 (*ifp->if_input)(ifp, m); 2635 JME_LOCK(sc); 2636 2637 /* Reset mbuf chains. */ 2638 JME_RXCHAIN_RESET(sc); 2639 } 2640 } 2641 2642 sc->jme_cdata.jme_rx_cons += nsegs; 2643 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT; 2644 } 2645 2646 static int 2647 jme_rxintr(struct jme_softc *sc, int count) 2648 { 2649 struct jme_desc *desc; 2650 int nsegs, prog, pktlen; 2651 2652 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag, 2653 sc->jme_cdata.jme_rx_ring_map, 2654 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2655 2656 for (prog = 0; count > 0; prog++) { 2657 desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons]; 2658 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN) 2659 break; 2660 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0) 2661 break; 2662 nsegs = JME_RX_NSEGS(le32toh(desc->buflen)); 2663 /* 2664 * Check number of segments against received bytes. 2665 * Non-matching value would indicate that hardware 2666 * is still trying to update Rx descriptors. I'm not 2667 * sure whether this check is needed. 2668 */ 2669 pktlen = JME_RX_BYTES(le32toh(desc->buflen)); 2670 if (nsegs != ((pktlen + (MCLBYTES - 1)) / MCLBYTES)) 2671 break; 2672 prog++; 2673 /* Received a frame. */ 2674 jme_rxeof(sc); 2675 count -= nsegs; 2676 } 2677 2678 if (prog > 0) 2679 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag, 2680 sc->jme_cdata.jme_rx_ring_map, 2681 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2682 2683 return (count > 0 ? 0 : EAGAIN); 2684 } 2685 2686 static void 2687 jme_tick(void *arg) 2688 { 2689 struct jme_softc *sc; 2690 struct mii_data *mii; 2691 2692 sc = (struct jme_softc *)arg; 2693 2694 JME_LOCK_ASSERT(sc); 2695 2696 mii = device_get_softc(sc->jme_miibus); 2697 mii_tick(mii); 2698 /* 2699 * Reclaim Tx buffers that have been completed. It's not 2700 * needed here but it would release allocated mbuf chains 2701 * faster and limit the maximum delay to a hz. 2702 */ 2703 jme_txeof(sc); 2704 jme_stats_update(sc); 2705 jme_watchdog(sc); 2706 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); 2707 } 2708 2709 static void 2710 jme_reset(struct jme_softc *sc) 2711 { 2712 uint32_t ghc, gpreg; 2713 2714 /* Stop receiver, transmitter. */ 2715 jme_stop_rx(sc); 2716 jme_stop_tx(sc); 2717 2718 /* Reset controller. */ 2719 CSR_WRITE_4(sc, JME_GHC, GHC_RESET); 2720 CSR_READ_4(sc, JME_GHC); 2721 DELAY(10); 2722 /* 2723 * Workaround Rx FIFO overruns seen under certain conditions. 2724 * Explicitly synchorize TX/RX clock. TX/RX clock should be 2725 * enabled only after enabling TX/RX MACs. 2726 */ 2727 if ((sc->jme_flags & (JME_FLAG_TXCLK | JME_FLAG_RXCLK)) != 0) { 2728 /* Disable TX clock. */ 2729 CSR_WRITE_4(sc, JME_GHC, GHC_RESET | GHC_TX_MAC_CLK_DIS); 2730 /* Disable RX clock. */ 2731 gpreg = CSR_READ_4(sc, JME_GPREG1); 2732 CSR_WRITE_4(sc, JME_GPREG1, gpreg | GPREG1_RX_MAC_CLK_DIS); 2733 gpreg = CSR_READ_4(sc, JME_GPREG1); 2734 /* De-assert RESET but still disable TX clock. */ 2735 CSR_WRITE_4(sc, JME_GHC, GHC_TX_MAC_CLK_DIS); 2736 ghc = CSR_READ_4(sc, JME_GHC); 2737 2738 /* Enable TX clock. */ 2739 CSR_WRITE_4(sc, JME_GHC, ghc & ~GHC_TX_MAC_CLK_DIS); 2740 /* Enable RX clock. */ 2741 CSR_WRITE_4(sc, JME_GPREG1, gpreg & ~GPREG1_RX_MAC_CLK_DIS); 2742 CSR_READ_4(sc, JME_GPREG1); 2743 2744 /* Disable TX/RX clock again. */ 2745 CSR_WRITE_4(sc, JME_GHC, GHC_TX_MAC_CLK_DIS); 2746 CSR_WRITE_4(sc, JME_GPREG1, gpreg | GPREG1_RX_MAC_CLK_DIS); 2747 } else 2748 CSR_WRITE_4(sc, JME_GHC, 0); 2749 CSR_READ_4(sc, JME_GHC); 2750 DELAY(10); 2751 } 2752 2753 static void 2754 jme_init(void *xsc) 2755 { 2756 struct jme_softc *sc; 2757 2758 sc = (struct jme_softc *)xsc; 2759 JME_LOCK(sc); 2760 jme_init_locked(sc); 2761 JME_UNLOCK(sc); 2762 } 2763 2764 static void 2765 jme_init_locked(struct jme_softc *sc) 2766 { 2767 struct ifnet *ifp; 2768 struct mii_data *mii; 2769 bus_addr_t paddr; 2770 uint32_t reg; 2771 int error; 2772 2773 JME_LOCK_ASSERT(sc); 2774 2775 ifp = sc->jme_ifp; 2776 mii = device_get_softc(sc->jme_miibus); 2777 2778 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2779 return; 2780 /* 2781 * Cancel any pending I/O. 2782 */ 2783 jme_stop(sc); 2784 2785 /* 2786 * Reset the chip to a known state. 2787 */ 2788 jme_reset(sc); 2789 2790 /* Init descriptors. */ 2791 error = jme_init_rx_ring(sc); 2792 if (error != 0) { 2793 device_printf(sc->jme_dev, 2794 "%s: initialization failed: no memory for Rx buffers.\n", 2795 __func__); 2796 jme_stop(sc); 2797 return; 2798 } 2799 jme_init_tx_ring(sc); 2800 /* Initialize shadow status block. */ 2801 jme_init_ssb(sc); 2802 2803 /* Reprogram the station address. */ 2804 jme_set_macaddr(sc, IF_LLADDR(sc->jme_ifp)); 2805 2806 /* 2807 * Configure Tx queue. 2808 * Tx priority queue weight value : 0 2809 * Tx FIFO threshold for processing next packet : 16QW 2810 * Maximum Tx DMA length : 512 2811 * Allow Tx DMA burst. 2812 */ 2813 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0); 2814 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN); 2815 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW; 2816 sc->jme_txcsr |= sc->jme_tx_dma_size; 2817 sc->jme_txcsr |= TXCSR_DMA_BURST; 2818 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr); 2819 2820 /* Set Tx descriptor counter. */ 2821 CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT); 2822 2823 /* Set Tx ring address to the hardware. */ 2824 paddr = JME_TX_RING_ADDR(sc, 0); 2825 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr)); 2826 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr)); 2827 2828 /* Configure TxMAC parameters. */ 2829 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB; 2830 reg |= TXMAC_THRESH_1_PKT; 2831 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB; 2832 CSR_WRITE_4(sc, JME_TXMAC, reg); 2833 2834 /* 2835 * Configure Rx queue. 2836 * FIFO full threshold for transmitting Tx pause packet : 128T 2837 * FIFO threshold for processing next packet : 128QW 2838 * Rx queue 0 select 2839 * Max Rx DMA length : 128 2840 * Rx descriptor retry : 32 2841 * Rx descriptor retry time gap : 256ns 2842 * Don't receive runt/bad frame. 2843 */ 2844 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T; 2845 /* 2846 * Since Rx FIFO size is 4K bytes, receiving frames larger 2847 * than 4K bytes will suffer from Rx FIFO overruns. So 2848 * decrease FIFO threshold to reduce the FIFO overruns for 2849 * frames larger than 4000 bytes. 2850 * For best performance of standard MTU sized frames use 2851 * maximum allowable FIFO threshold, 128QW. Note these do 2852 * not hold on chip full mask verion >=2. For these 2853 * controllers 64QW and 128QW are not valid value. 2854 */ 2855 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 2) 2856 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 2857 else { 2858 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + 2859 ETHER_CRC_LEN) > JME_RX_FIFO_SIZE) 2860 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW; 2861 else 2862 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW; 2863 } 2864 sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0); 2865 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT); 2866 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK; 2867 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr); 2868 2869 /* Set Rx descriptor counter. */ 2870 CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT); 2871 2872 /* Set Rx ring address to the hardware. */ 2873 paddr = JME_RX_RING_ADDR(sc, 0); 2874 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr)); 2875 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr)); 2876 2877 /* Clear receive filter. */ 2878 CSR_WRITE_4(sc, JME_RXMAC, 0); 2879 /* Set up the receive filter. */ 2880 jme_set_filter(sc); 2881 jme_set_vlan(sc); 2882 2883 /* 2884 * Disable all WOL bits as WOL can interfere normal Rx 2885 * operation. Also clear WOL detection status bits. 2886 */ 2887 reg = CSR_READ_4(sc, JME_PMCS); 2888 reg &= ~PMCS_WOL_ENB_MASK; 2889 CSR_WRITE_4(sc, JME_PMCS, reg); 2890 2891 reg = CSR_READ_4(sc, JME_RXMAC); 2892 /* 2893 * Pad 10bytes right before received frame. This will greatly 2894 * help Rx performance on strict-alignment architectures as 2895 * it does not need to copy the frame to align the payload. 2896 */ 2897 reg |= RXMAC_PAD_10BYTES; 2898 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 2899 reg |= RXMAC_CSUM_ENB; 2900 CSR_WRITE_4(sc, JME_RXMAC, reg); 2901 2902 /* Configure general purpose reg0 */ 2903 reg = CSR_READ_4(sc, JME_GPREG0); 2904 reg &= ~GPREG0_PCC_UNIT_MASK; 2905 /* Set PCC timer resolution to micro-seconds unit. */ 2906 reg |= GPREG0_PCC_UNIT_US; 2907 /* 2908 * Disable all shadow register posting as we have to read 2909 * JME_INTR_STATUS register in jme_int_task. Also it seems 2910 * that it's hard to synchronize interrupt status between 2911 * hardware and software with shadow posting due to 2912 * requirements of bus_dmamap_sync(9). 2913 */ 2914 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS | 2915 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS | 2916 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS | 2917 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS; 2918 /* Disable posting of DW0. */ 2919 reg &= ~GPREG0_POST_DW0_ENB; 2920 /* Clear PME message. */ 2921 reg &= ~GPREG0_PME_ENB; 2922 /* Set PHY address. */ 2923 reg &= ~GPREG0_PHY_ADDR_MASK; 2924 reg |= sc->jme_phyaddr; 2925 CSR_WRITE_4(sc, JME_GPREG0, reg); 2926 2927 /* Configure Tx queue 0 packet completion coalescing. */ 2928 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) & 2929 PCCTX_COAL_TO_MASK; 2930 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) & 2931 PCCTX_COAL_PKT_MASK; 2932 reg |= PCCTX_COAL_TXQ0; 2933 CSR_WRITE_4(sc, JME_PCCTX, reg); 2934 2935 /* Configure Rx queue 0 packet completion coalescing. */ 2936 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) & 2937 PCCRX_COAL_TO_MASK; 2938 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) & 2939 PCCRX_COAL_PKT_MASK; 2940 CSR_WRITE_4(sc, JME_PCCRX0, reg); 2941 2942 /* 2943 * Configure PCD(Packet Completion Deferring). It seems PCD 2944 * generates an interrupt when the time interval between two 2945 * back-to-back incoming/outgoing packet is long enough for 2946 * it to reach its timer value 0. The arrival of new packets 2947 * after timer has started causes the PCD timer to restart. 2948 * Unfortunately, it's not clear how PCD is useful at this 2949 * moment, so just use the same of PCC parameters. 2950 */ 2951 if ((sc->jme_flags & JME_FLAG_PCCPCD) != 0) { 2952 sc->jme_rx_pcd_to = sc->jme_rx_coal_to; 2953 if (sc->jme_rx_coal_to > PCDRX_TO_MAX) 2954 sc->jme_rx_pcd_to = PCDRX_TO_MAX; 2955 sc->jme_tx_pcd_to = sc->jme_tx_coal_to; 2956 if (sc->jme_tx_coal_to > PCDTX_TO_MAX) 2957 sc->jme_tx_pcd_to = PCDTX_TO_MAX; 2958 reg = sc->jme_rx_pcd_to << PCDRX0_TO_THROTTLE_SHIFT; 2959 reg |= sc->jme_rx_pcd_to << PCDRX0_TO_SHIFT; 2960 CSR_WRITE_4(sc, PCDRX_REG(0), reg); 2961 reg = sc->jme_tx_pcd_to << PCDTX_TO_THROTTLE_SHIFT; 2962 reg |= sc->jme_tx_pcd_to << PCDTX_TO_SHIFT; 2963 CSR_WRITE_4(sc, JME_PCDTX, reg); 2964 } 2965 2966 /* Configure shadow status block but don't enable posting. */ 2967 paddr = sc->jme_rdata.jme_ssb_block_paddr; 2968 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr)); 2969 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr)); 2970 2971 /* Disable Timer 1 and Timer 2. */ 2972 CSR_WRITE_4(sc, JME_TIMER1, 0); 2973 CSR_WRITE_4(sc, JME_TIMER2, 0); 2974 2975 /* Configure retry transmit period, retry limit value. */ 2976 CSR_WRITE_4(sc, JME_TXTRHD, 2977 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) & 2978 TXTRHD_RT_PERIOD_MASK) | 2979 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) & 2980 TXTRHD_RT_LIMIT_SHIFT)); 2981 2982 /* Disable RSS. */ 2983 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS); 2984 2985 /* Initialize the interrupt mask. */ 2986 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS); 2987 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 2988 2989 /* 2990 * Enabling Tx/Rx DMA engines and Rx queue processing is 2991 * done after detection of valid link in jme_link_task. 2992 */ 2993 2994 sc->jme_flags &= ~JME_FLAG_LINK; 2995 /* Set the current media. */ 2996 mii_mediachg(mii); 2997 2998 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc); 2999 3000 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3001 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3002 } 3003 3004 static void 3005 jme_stop(struct jme_softc *sc) 3006 { 3007 struct ifnet *ifp; 3008 struct jme_txdesc *txd; 3009 struct jme_rxdesc *rxd; 3010 int i; 3011 3012 JME_LOCK_ASSERT(sc); 3013 /* 3014 * Mark the interface down and cancel the watchdog timer. 3015 */ 3016 ifp = sc->jme_ifp; 3017 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 3018 sc->jme_flags &= ~JME_FLAG_LINK; 3019 callout_stop(&sc->jme_tick_ch); 3020 sc->jme_watchdog_timer = 0; 3021 3022 /* 3023 * Disable interrupts. 3024 */ 3025 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS); 3026 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF); 3027 3028 /* Disable updating shadow status block. */ 3029 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, 3030 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB); 3031 3032 /* Stop receiver, transmitter. */ 3033 jme_stop_rx(sc); 3034 jme_stop_tx(sc); 3035 3036 /* Reclaim Rx/Tx buffers that have been completed. */ 3037 jme_rxintr(sc, JME_RX_RING_CNT); 3038 if (sc->jme_cdata.jme_rxhead != NULL) 3039 m_freem(sc->jme_cdata.jme_rxhead); 3040 JME_RXCHAIN_RESET(sc); 3041 jme_txeof(sc); 3042 /* 3043 * Free RX and TX mbufs still in the queues. 3044 */ 3045 for (i = 0; i < JME_RX_RING_CNT; i++) { 3046 rxd = &sc->jme_cdata.jme_rxdesc[i]; 3047 if (rxd->rx_m != NULL) { 3048 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, 3049 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3050 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, 3051 rxd->rx_dmamap); 3052 m_freem(rxd->rx_m); 3053 rxd->rx_m = NULL; 3054 } 3055 } 3056 for (i = 0; i < JME_TX_RING_CNT; i++) { 3057 txd = &sc->jme_cdata.jme_txdesc[i]; 3058 if (txd->tx_m != NULL) { 3059 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, 3060 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 3061 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, 3062 txd->tx_dmamap); 3063 m_freem(txd->tx_m); 3064 txd->tx_m = NULL; 3065 txd->tx_ndesc = 0; 3066 } 3067 } 3068 jme_stats_update(sc); 3069 jme_stats_save(sc); 3070 } 3071 3072 static void 3073 jme_stop_tx(struct jme_softc *sc) 3074 { 3075 uint32_t reg; 3076 int i; 3077 3078 reg = CSR_READ_4(sc, JME_TXCSR); 3079 if ((reg & TXCSR_TX_ENB) == 0) 3080 return; 3081 reg &= ~TXCSR_TX_ENB; 3082 CSR_WRITE_4(sc, JME_TXCSR, reg); 3083 for (i = JME_TIMEOUT; i > 0; i--) { 3084 DELAY(1); 3085 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0) 3086 break; 3087 } 3088 if (i == 0) 3089 device_printf(sc->jme_dev, "stopping transmitter timeout!\n"); 3090 } 3091 3092 static void 3093 jme_stop_rx(struct jme_softc *sc) 3094 { 3095 uint32_t reg; 3096 int i; 3097 3098 reg = CSR_READ_4(sc, JME_RXCSR); 3099 if ((reg & RXCSR_RX_ENB) == 0) 3100 return; 3101 reg &= ~RXCSR_RX_ENB; 3102 CSR_WRITE_4(sc, JME_RXCSR, reg); 3103 for (i = JME_TIMEOUT; i > 0; i--) { 3104 DELAY(1); 3105 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0) 3106 break; 3107 } 3108 if (i == 0) 3109 device_printf(sc->jme_dev, "stopping recevier timeout!\n"); 3110 } 3111 3112 static void 3113 jme_init_tx_ring(struct jme_softc *sc) 3114 { 3115 struct jme_ring_data *rd; 3116 struct jme_txdesc *txd; 3117 int i; 3118 3119 sc->jme_cdata.jme_tx_prod = 0; 3120 sc->jme_cdata.jme_tx_cons = 0; 3121 sc->jme_cdata.jme_tx_cnt = 0; 3122 3123 rd = &sc->jme_rdata; 3124 bzero(rd->jme_tx_ring, JME_TX_RING_SIZE); 3125 for (i = 0; i < JME_TX_RING_CNT; i++) { 3126 txd = &sc->jme_cdata.jme_txdesc[i]; 3127 txd->tx_m = NULL; 3128 txd->tx_desc = &rd->jme_tx_ring[i]; 3129 txd->tx_ndesc = 0; 3130 } 3131 3132 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag, 3133 sc->jme_cdata.jme_tx_ring_map, 3134 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3135 } 3136 3137 static void 3138 jme_init_ssb(struct jme_softc *sc) 3139 { 3140 struct jme_ring_data *rd; 3141 3142 rd = &sc->jme_rdata; 3143 bzero(rd->jme_ssb_block, JME_SSB_SIZE); 3144 bus_dmamap_sync(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map, 3145 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3146 } 3147 3148 static int 3149 jme_init_rx_ring(struct jme_softc *sc) 3150 { 3151 struct jme_ring_data *rd; 3152 struct jme_rxdesc *rxd; 3153 int i; 3154 3155 sc->jme_cdata.jme_rx_cons = 0; 3156 JME_RXCHAIN_RESET(sc); 3157 sc->jme_morework = 0; 3158 3159 rd = &sc->jme_rdata; 3160 bzero(rd->jme_rx_ring, JME_RX_RING_SIZE); 3161 for (i = 0; i < JME_RX_RING_CNT; i++) { 3162 rxd = &sc->jme_cdata.jme_rxdesc[i]; 3163 rxd->rx_m = NULL; 3164 rxd->rx_desc = &rd->jme_rx_ring[i]; 3165 if (jme_newbuf(sc, rxd) != 0) 3166 return (ENOBUFS); 3167 } 3168 3169 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag, 3170 sc->jme_cdata.jme_rx_ring_map, 3171 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3172 3173 return (0); 3174 } 3175 3176 static int 3177 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd) 3178 { 3179 struct jme_desc *desc; 3180 struct mbuf *m; 3181 bus_dma_segment_t segs[1]; 3182 bus_dmamap_t map; 3183 int nsegs; 3184 3185 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 3186 if (m == NULL) 3187 return (ENOBUFS); 3188 /* 3189 * JMC250 has 64bit boundary alignment limitation so jme(4) 3190 * takes advantage of 10 bytes padding feature of hardware 3191 * in order not to copy entire frame to align IP header on 3192 * 32bit boundary. 3193 */ 3194 m->m_len = m->m_pkthdr.len = MCLBYTES; 3195 3196 if (bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_rx_tag, 3197 sc->jme_cdata.jme_rx_sparemap, m, segs, &nsegs, 0) != 0) { 3198 m_freem(m); 3199 return (ENOBUFS); 3200 } 3201 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 3202 3203 if (rxd->rx_m != NULL) { 3204 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap, 3205 BUS_DMASYNC_POSTREAD); 3206 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap); 3207 } 3208 map = rxd->rx_dmamap; 3209 rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap; 3210 sc->jme_cdata.jme_rx_sparemap = map; 3211 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap, 3212 BUS_DMASYNC_PREREAD); 3213 rxd->rx_m = m; 3214 3215 desc = rxd->rx_desc; 3216 desc->buflen = htole32(segs[0].ds_len); 3217 desc->addr_lo = htole32(JME_ADDR_LO(segs[0].ds_addr)); 3218 desc->addr_hi = htole32(JME_ADDR_HI(segs[0].ds_addr)); 3219 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT); 3220 3221 return (0); 3222 } 3223 3224 static void 3225 jme_set_vlan(struct jme_softc *sc) 3226 { 3227 struct ifnet *ifp; 3228 uint32_t reg; 3229 3230 JME_LOCK_ASSERT(sc); 3231 3232 ifp = sc->jme_ifp; 3233 reg = CSR_READ_4(sc, JME_RXMAC); 3234 reg &= ~RXMAC_VLAN_ENB; 3235 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 3236 reg |= RXMAC_VLAN_ENB; 3237 CSR_WRITE_4(sc, JME_RXMAC, reg); 3238 } 3239 3240 static void 3241 jme_set_filter(struct jme_softc *sc) 3242 { 3243 struct ifnet *ifp; 3244 struct ifmultiaddr *ifma; 3245 uint32_t crc; 3246 uint32_t mchash[2]; 3247 uint32_t rxcfg; 3248 3249 JME_LOCK_ASSERT(sc); 3250 3251 ifp = sc->jme_ifp; 3252 3253 rxcfg = CSR_READ_4(sc, JME_RXMAC); 3254 rxcfg &= ~ (RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST | 3255 RXMAC_ALLMULTI); 3256 /* Always accept frames destined to our station address. */ 3257 rxcfg |= RXMAC_UNICAST; 3258 if ((ifp->if_flags & IFF_BROADCAST) != 0) 3259 rxcfg |= RXMAC_BROADCAST; 3260 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 3261 if ((ifp->if_flags & IFF_PROMISC) != 0) 3262 rxcfg |= RXMAC_PROMISC; 3263 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 3264 rxcfg |= RXMAC_ALLMULTI; 3265 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF); 3266 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF); 3267 CSR_WRITE_4(sc, JME_RXMAC, rxcfg); 3268 return; 3269 } 3270 3271 /* 3272 * Set up the multicast address filter by passing all multicast 3273 * addresses through a CRC generator, and then using the low-order 3274 * 6 bits as an index into the 64 bit multicast hash table. The 3275 * high order bits select the register, while the rest of the bits 3276 * select the bit within the register. 3277 */ 3278 rxcfg |= RXMAC_MULTICAST; 3279 bzero(mchash, sizeof(mchash)); 3280 3281 if_maddr_rlock(ifp); 3282 TAILQ_FOREACH(ifma, &sc->jme_ifp->if_multiaddrs, ifma_link) { 3283 if (ifma->ifma_addr->sa_family != AF_LINK) 3284 continue; 3285 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 3286 ifma->ifma_addr), ETHER_ADDR_LEN); 3287 3288 /* Just want the 6 least significant bits. */ 3289 crc &= 0x3f; 3290 3291 /* Set the corresponding bit in the hash table. */ 3292 mchash[crc >> 5] |= 1 << (crc & 0x1f); 3293 } 3294 if_maddr_runlock(ifp); 3295 3296 CSR_WRITE_4(sc, JME_MAR0, mchash[0]); 3297 CSR_WRITE_4(sc, JME_MAR1, mchash[1]); 3298 CSR_WRITE_4(sc, JME_RXMAC, rxcfg); 3299 } 3300 3301 static void 3302 jme_stats_clear(struct jme_softc *sc) 3303 { 3304 3305 JME_LOCK_ASSERT(sc); 3306 3307 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0) 3308 return; 3309 3310 /* Disable and clear counters. */ 3311 CSR_WRITE_4(sc, JME_STATCSR, 0xFFFFFFFF); 3312 /* Activate hw counters. */ 3313 CSR_WRITE_4(sc, JME_STATCSR, 0); 3314 CSR_READ_4(sc, JME_STATCSR); 3315 bzero(&sc->jme_stats, sizeof(struct jme_hw_stats)); 3316 } 3317 3318 static void 3319 jme_stats_save(struct jme_softc *sc) 3320 { 3321 3322 JME_LOCK_ASSERT(sc); 3323 3324 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0) 3325 return; 3326 /* Save current counters. */ 3327 bcopy(&sc->jme_stats, &sc->jme_ostats, sizeof(struct jme_hw_stats)); 3328 /* Disable and clear counters. */ 3329 CSR_WRITE_4(sc, JME_STATCSR, 0xFFFFFFFF); 3330 } 3331 3332 static void 3333 jme_stats_update(struct jme_softc *sc) 3334 { 3335 struct jme_hw_stats *stat, *ostat; 3336 uint32_t reg; 3337 3338 JME_LOCK_ASSERT(sc); 3339 3340 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0) 3341 return; 3342 stat = &sc->jme_stats; 3343 ostat = &sc->jme_ostats; 3344 stat->tx_good_frames = CSR_READ_4(sc, JME_STAT_TXGOOD); 3345 stat->rx_good_frames = CSR_READ_4(sc, JME_STAT_RXGOOD); 3346 reg = CSR_READ_4(sc, JME_STAT_CRCMII); 3347 stat->rx_crc_errs = (reg & STAT_RX_CRC_ERR_MASK) >> 3348 STAT_RX_CRC_ERR_SHIFT; 3349 stat->rx_mii_errs = (reg & STAT_RX_MII_ERR_MASK) >> 3350 STAT_RX_MII_ERR_SHIFT; 3351 reg = CSR_READ_4(sc, JME_STAT_RXERR); 3352 stat->rx_fifo_oflows = (reg & STAT_RXERR_OFLOW_MASK) >> 3353 STAT_RXERR_OFLOW_SHIFT; 3354 stat->rx_desc_empty = (reg & STAT_RXERR_MPTY_MASK) >> 3355 STAT_RXERR_MPTY_SHIFT; 3356 reg = CSR_READ_4(sc, JME_STAT_FAIL); 3357 stat->rx_bad_frames = (reg & STAT_FAIL_RX_MASK) >> STAT_FAIL_RX_SHIFT; 3358 stat->tx_bad_frames = (reg & STAT_FAIL_TX_MASK) >> STAT_FAIL_TX_SHIFT; 3359 3360 /* Account for previous counters. */ 3361 stat->rx_good_frames += ostat->rx_good_frames; 3362 stat->rx_crc_errs += ostat->rx_crc_errs; 3363 stat->rx_mii_errs += ostat->rx_mii_errs; 3364 stat->rx_fifo_oflows += ostat->rx_fifo_oflows; 3365 stat->rx_desc_empty += ostat->rx_desc_empty; 3366 stat->rx_bad_frames += ostat->rx_bad_frames; 3367 stat->tx_good_frames += ostat->tx_good_frames; 3368 stat->tx_bad_frames += ostat->tx_bad_frames; 3369 } 3370 3371 static void 3372 jme_phy_down(struct jme_softc *sc) 3373 { 3374 uint32_t reg; 3375 3376 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, BMCR_PDOWN); 3377 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5) { 3378 reg = CSR_READ_4(sc, JME_PHYPOWDN); 3379 reg |= 0x0000000F; 3380 CSR_WRITE_4(sc, JME_PHYPOWDN, reg); 3381 reg = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4); 3382 reg &= ~PE1_GIGA_PDOWN_MASK; 3383 reg |= PE1_GIGA_PDOWN_D3; 3384 pci_write_config(sc->jme_dev, JME_PCI_PE1, reg, 4); 3385 } 3386 } 3387 3388 static void 3389 jme_phy_up(struct jme_softc *sc) 3390 { 3391 uint32_t reg; 3392 uint16_t bmcr; 3393 3394 bmcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR); 3395 bmcr &= ~BMCR_PDOWN; 3396 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, bmcr); 3397 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5) { 3398 reg = CSR_READ_4(sc, JME_PHYPOWDN); 3399 reg &= ~0x0000000F; 3400 CSR_WRITE_4(sc, JME_PHYPOWDN, reg); 3401 reg = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4); 3402 reg &= ~PE1_GIGA_PDOWN_MASK; 3403 reg |= PE1_GIGA_PDOWN_DIS; 3404 pci_write_config(sc->jme_dev, JME_PCI_PE1, reg, 4); 3405 } 3406 } 3407 3408 static int 3409 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 3410 { 3411 int error, value; 3412 3413 if (arg1 == NULL) 3414 return (EINVAL); 3415 value = *(int *)arg1; 3416 error = sysctl_handle_int(oidp, &value, 0, req); 3417 if (error || req->newptr == NULL) 3418 return (error); 3419 if (value < low || value > high) 3420 return (EINVAL); 3421 *(int *)arg1 = value; 3422 3423 return (0); 3424 } 3425 3426 static int 3427 sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS) 3428 { 3429 return (sysctl_int_range(oidp, arg1, arg2, req, 3430 PCCTX_COAL_TO_MIN, PCCTX_COAL_TO_MAX)); 3431 } 3432 3433 static int 3434 sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS) 3435 { 3436 return (sysctl_int_range(oidp, arg1, arg2, req, 3437 PCCTX_COAL_PKT_MIN, PCCTX_COAL_PKT_MAX)); 3438 } 3439 3440 static int 3441 sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS) 3442 { 3443 return (sysctl_int_range(oidp, arg1, arg2, req, 3444 PCCRX_COAL_TO_MIN, PCCRX_COAL_TO_MAX)); 3445 } 3446 3447 static int 3448 sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS) 3449 { 3450 return (sysctl_int_range(oidp, arg1, arg2, req, 3451 PCCRX_COAL_PKT_MIN, PCCRX_COAL_PKT_MAX)); 3452 } 3453 3454 static int 3455 sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS) 3456 { 3457 return (sysctl_int_range(oidp, arg1, arg2, req, 3458 JME_PROC_MIN, JME_PROC_MAX)); 3459 } 3460