1 /*- 2 * Copyright (C) 2008 MARVELL INTERNATIONAL LTD. 3 * All rights reserved. 4 * 5 * Developed by Semihalf. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of MARVELL nor the names of contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #ifdef HAVE_KERNEL_OPTION_HEADERS 33 #include "opt_device_polling.h" 34 #endif 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/endian.h> 42 #include <sys/mbuf.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/kernel.h> 46 #include <sys/module.h> 47 #include <sys/socket.h> 48 #include <sys/sysctl.h> 49 50 #include <net/ethernet.h> 51 #include <net/bpf.h> 52 #include <net/if.h> 53 #include <net/if_var.h> 54 #include <net/if_arp.h> 55 #include <net/if_dl.h> 56 #include <net/if_media.h> 57 #include <net/if_types.h> 58 #include <net/if_vlan_var.h> 59 60 #include <netinet/in_systm.h> 61 #include <netinet/in.h> 62 #include <netinet/ip.h> 63 64 #include <sys/sockio.h> 65 #include <sys/bus.h> 66 #include <machine/bus.h> 67 #include <sys/rman.h> 68 #include <machine/resource.h> 69 70 #include <dev/mii/mii.h> 71 #include <dev/mii/miivar.h> 72 73 #include <dev/fdt/fdt_common.h> 74 #include <dev/ofw/ofw_bus.h> 75 #include <dev/ofw/ofw_bus_subr.h> 76 77 #include <dev/mge/if_mgevar.h> 78 #include <arm/mv/mvreg.h> 79 #include <arm/mv/mvvar.h> 80 81 #include "miibus_if.h" 82 83 static int mge_probe(device_t dev); 84 static int mge_attach(device_t dev); 85 static int mge_detach(device_t dev); 86 static int mge_shutdown(device_t dev); 87 static int mge_suspend(device_t dev); 88 static int mge_resume(device_t dev); 89 90 static int mge_miibus_readreg(device_t dev, int phy, int reg); 91 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value); 92 93 static int mge_ifmedia_upd(struct ifnet *ifp); 94 static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); 95 96 static void mge_init(void *arg); 97 static void mge_init_locked(void *arg); 98 static void mge_start(struct ifnet *ifp); 99 static void mge_start_locked(struct ifnet *ifp); 100 static void mge_watchdog(struct mge_softc *sc); 101 static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data); 102 103 static uint32_t mge_tfut_ipg(uint32_t val, int ver); 104 static uint32_t mge_rx_ipg(uint32_t val, int ver); 105 static void mge_ver_params(struct mge_softc *sc); 106 107 static void mge_intrs_ctrl(struct mge_softc *sc, int enable); 108 static void mge_intr_rxtx(void *arg); 109 static void mge_intr_rx(void *arg); 110 static void mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause, 111 uint32_t int_cause_ext); 112 static int mge_intr_rx_locked(struct mge_softc *sc, int count); 113 static void mge_intr_tx(void *arg); 114 static void mge_intr_tx_locked(struct mge_softc *sc); 115 static void mge_intr_misc(void *arg); 116 static void mge_intr_sum(void *arg); 117 static void mge_intr_err(void *arg); 118 static void mge_stop(struct mge_softc *sc); 119 static void mge_tick(void *msc); 120 static uint32_t mge_set_port_serial_control(uint32_t media); 121 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr); 122 static void mge_set_mac_address(struct mge_softc *sc); 123 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, 124 uint8_t queue); 125 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue); 126 static int mge_allocate_dma(struct mge_softc *sc); 127 static int mge_alloc_desc_dma(struct mge_softc *sc, 128 struct mge_desc_wrapper* desc_tab, uint32_t size, bus_dma_tag_t *buffer_tag); 129 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, 130 struct mbuf **mbufp, bus_addr_t *paddr); 131 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error); 132 static void mge_free_dma(struct mge_softc *sc); 133 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab, uint32_t size, 134 bus_dma_tag_t buffer_tag, uint8_t free_mbufs); 135 static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame, 136 uint32_t status, uint16_t bufsize); 137 static void mge_offload_setup_descriptor(struct mge_softc *sc, 138 struct mge_desc_wrapper *dw); 139 static uint8_t mge_crc8(uint8_t *data, int size); 140 static void mge_setup_multicast(struct mge_softc *sc); 141 static void mge_set_rxic(struct mge_softc *sc); 142 static void mge_set_txic(struct mge_softc *sc); 143 static void mge_add_sysctls(struct mge_softc *sc); 144 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS); 145 146 static device_method_t mge_methods[] = { 147 /* Device interface */ 148 DEVMETHOD(device_probe, mge_probe), 149 DEVMETHOD(device_attach, mge_attach), 150 DEVMETHOD(device_detach, mge_detach), 151 DEVMETHOD(device_shutdown, mge_shutdown), 152 DEVMETHOD(device_suspend, mge_suspend), 153 DEVMETHOD(device_resume, mge_resume), 154 /* MII interface */ 155 DEVMETHOD(miibus_readreg, mge_miibus_readreg), 156 DEVMETHOD(miibus_writereg, mge_miibus_writereg), 157 { 0, 0 } 158 }; 159 160 static driver_t mge_driver = { 161 "mge", 162 mge_methods, 163 sizeof(struct mge_softc), 164 }; 165 166 static devclass_t mge_devclass; 167 168 DRIVER_MODULE(mge, simplebus, mge_driver, mge_devclass, 0, 0); 169 DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0); 170 MODULE_DEPEND(mge, ether, 1, 1, 1); 171 MODULE_DEPEND(mge, miibus, 1, 1, 1); 172 173 static struct resource_spec res_spec[] = { 174 { SYS_RES_MEMORY, 0, RF_ACTIVE }, 175 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 176 { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE }, 177 { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE }, 178 { -1, 0 } 179 }; 180 181 static struct { 182 driver_intr_t *handler; 183 char * description; 184 } mge_intrs[MGE_INTR_COUNT + 1] = { 185 { mge_intr_rxtx,"GbE aggregated interrupt" }, 186 { mge_intr_rx, "GbE receive interrupt" }, 187 { mge_intr_tx, "GbE transmit interrupt" }, 188 { mge_intr_misc,"GbE misc interrupt" }, 189 { mge_intr_sum, "GbE summary interrupt" }, 190 { mge_intr_err, "GbE error interrupt" }, 191 }; 192 193 static void 194 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr) 195 { 196 uint32_t mac_l, mac_h; 197 uint8_t lmac[6]; 198 int i, valid; 199 200 /* 201 * Retrieve hw address from the device tree. 202 */ 203 i = OF_getprop(sc->node, "local-mac-address", (void *)lmac, 6); 204 if (i == 6) { 205 valid = 0; 206 for (i = 0; i < 6; i++) 207 if (lmac[i] != 0) { 208 valid = 1; 209 break; 210 } 211 212 if (valid) { 213 bcopy(lmac, addr, 6); 214 return; 215 } 216 } 217 218 /* 219 * Fall back -- use the currently programmed address. 220 */ 221 mac_l = MGE_READ(sc, MGE_MAC_ADDR_L); 222 mac_h = MGE_READ(sc, MGE_MAC_ADDR_H); 223 224 addr[0] = (mac_h & 0xff000000) >> 24; 225 addr[1] = (mac_h & 0x00ff0000) >> 16; 226 addr[2] = (mac_h & 0x0000ff00) >> 8; 227 addr[3] = (mac_h & 0x000000ff); 228 addr[4] = (mac_l & 0x0000ff00) >> 8; 229 addr[5] = (mac_l & 0x000000ff); 230 } 231 232 static uint32_t 233 mge_tfut_ipg(uint32_t val, int ver) 234 { 235 236 switch (ver) { 237 case 1: 238 return ((val & 0x3fff) << 4); 239 case 2: 240 default: 241 return ((val & 0xffff) << 4); 242 } 243 } 244 245 static uint32_t 246 mge_rx_ipg(uint32_t val, int ver) 247 { 248 249 switch (ver) { 250 case 1: 251 return ((val & 0x3fff) << 8); 252 case 2: 253 default: 254 return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7)); 255 } 256 } 257 258 static void 259 mge_ver_params(struct mge_softc *sc) 260 { 261 uint32_t d, r; 262 263 soc_id(&d, &r); 264 if (d == MV_DEV_88F6281 || d == MV_DEV_88F6781 || 265 d == MV_DEV_88F6282 || 266 d == MV_DEV_MV78100 || 267 d == MV_DEV_MV78100_Z0 || 268 (d & MV_DEV_FAMILY_MASK) == MV_DEV_DISCOVERY) { 269 sc->mge_ver = 2; 270 sc->mge_mtu = 0x4e8; 271 sc->mge_tfut_ipg_max = 0xFFFF; 272 sc->mge_rx_ipg_max = 0xFFFF; 273 sc->mge_tx_arb_cfg = 0xFC0000FF; 274 sc->mge_tx_tok_cfg = 0xFFFF7FFF; 275 sc->mge_tx_tok_cnt = 0x3FFFFFFF; 276 } else { 277 sc->mge_ver = 1; 278 sc->mge_mtu = 0x458; 279 sc->mge_tfut_ipg_max = 0x3FFF; 280 sc->mge_rx_ipg_max = 0x3FFF; 281 sc->mge_tx_arb_cfg = 0x000000FF; 282 sc->mge_tx_tok_cfg = 0x3FFFFFFF; 283 sc->mge_tx_tok_cnt = 0x3FFFFFFF; 284 } 285 if (d == MV_DEV_88RC8180) 286 sc->mge_intr_cnt = 1; 287 else 288 sc->mge_intr_cnt = 2; 289 290 if (d == MV_DEV_MV78160 || d == MV_DEV_MV78260 || d == MV_DEV_MV78460) 291 sc->mge_hw_csum = 0; 292 else 293 sc->mge_hw_csum = 1; 294 } 295 296 static void 297 mge_set_mac_address(struct mge_softc *sc) 298 { 299 char *if_mac; 300 uint32_t mac_l, mac_h; 301 302 MGE_GLOBAL_LOCK_ASSERT(sc); 303 304 if_mac = (char *)IF_LLADDR(sc->ifp); 305 306 mac_l = (if_mac[4] << 8) | (if_mac[5]); 307 mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) | 308 (if_mac[2] << 8) | (if_mac[3] << 0); 309 310 MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l); 311 MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h); 312 313 mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE); 314 } 315 316 static void 317 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue) 318 { 319 uint32_t reg_idx, reg_off, reg_val, i; 320 321 last_byte &= 0xf; 322 reg_idx = last_byte / MGE_UCAST_REG_NUMBER; 323 reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8; 324 reg_val = (1 | (queue << 1)) << reg_off; 325 326 for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) { 327 if ( i == reg_idx) 328 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val); 329 else 330 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0); 331 } 332 } 333 334 static void 335 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue) 336 { 337 uint32_t port_config; 338 uint32_t reg_val, i; 339 340 /* Enable or disable promiscuous mode as needed */ 341 if (sc->ifp->if_flags & IFF_PROMISC) { 342 port_config = MGE_READ(sc, MGE_PORT_CONFIG); 343 port_config |= PORT_CONFIG_UPM; 344 MGE_WRITE(sc, MGE_PORT_CONFIG, port_config); 345 346 reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 | 347 (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24); 348 349 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) { 350 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val); 351 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val); 352 } 353 354 for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) 355 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val); 356 357 } else { 358 port_config = MGE_READ(sc, MGE_PORT_CONFIG); 359 port_config &= ~PORT_CONFIG_UPM; 360 MGE_WRITE(sc, MGE_PORT_CONFIG, port_config); 361 362 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) { 363 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0); 364 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0); 365 } 366 367 mge_set_mac_address(sc); 368 } 369 } 370 371 static void 372 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 373 { 374 u_int32_t *paddr; 375 376 KASSERT(nseg == 1, ("wrong number of segments, should be 1")); 377 paddr = arg; 378 379 *paddr = segs->ds_addr; 380 } 381 382 static int 383 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp, 384 bus_addr_t *paddr) 385 { 386 struct mbuf *new_mbuf; 387 bus_dma_segment_t seg[1]; 388 int error; 389 int nsegs; 390 391 KASSERT(mbufp != NULL, ("NULL mbuf pointer!")); 392 393 new_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 394 if (new_mbuf == NULL) 395 return (ENOBUFS); 396 new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size; 397 398 if (*mbufp) { 399 bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD); 400 bus_dmamap_unload(tag, map); 401 } 402 403 error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs, 404 BUS_DMA_NOWAIT); 405 KASSERT(nsegs == 1, ("Too many segments returned!")); 406 if (nsegs != 1 || error) 407 panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error); 408 409 bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD); 410 411 (*mbufp) = new_mbuf; 412 (*paddr) = seg->ds_addr; 413 return (0); 414 } 415 416 static int 417 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab, 418 uint32_t size, bus_dma_tag_t *buffer_tag) 419 { 420 struct mge_desc_wrapper *dw; 421 bus_addr_t desc_paddr; 422 int i, error; 423 424 desc_paddr = 0; 425 for (i = size - 1; i >= 0; i--) { 426 dw = &(tab[i]); 427 error = bus_dmamem_alloc(sc->mge_desc_dtag, 428 (void**)&(dw->mge_desc), 429 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 430 &(dw->desc_dmap)); 431 432 if (error) { 433 if_printf(sc->ifp, "failed to allocate DMA memory\n"); 434 dw->mge_desc = NULL; 435 return (ENXIO); 436 } 437 438 error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap, 439 dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr, 440 &(dw->mge_desc_paddr), BUS_DMA_NOWAIT); 441 442 if (error) { 443 if_printf(sc->ifp, "can't load descriptor\n"); 444 bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc, 445 dw->desc_dmap); 446 dw->mge_desc = NULL; 447 return (ENXIO); 448 } 449 450 /* Chain descriptors */ 451 dw->mge_desc->next_desc = desc_paddr; 452 desc_paddr = dw->mge_desc_paddr; 453 } 454 tab[size - 1].mge_desc->next_desc = desc_paddr; 455 456 /* Allocate a busdma tag for mbufs. */ 457 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent */ 458 1, 0, /* alignment, boundary */ 459 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 460 BUS_SPACE_MAXADDR, /* highaddr */ 461 NULL, NULL, /* filtfunc, filtfuncarg */ 462 MCLBYTES, 1, /* maxsize, nsegments */ 463 MCLBYTES, 0, /* maxsegsz, flags */ 464 NULL, NULL, /* lockfunc, lockfuncarg */ 465 buffer_tag); /* dmat */ 466 if (error) { 467 if_printf(sc->ifp, "failed to create busdma tag for mbufs\n"); 468 return (ENXIO); 469 } 470 471 /* Create TX busdma maps */ 472 for (i = 0; i < size; i++) { 473 dw = &(tab[i]); 474 error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap); 475 if (error) { 476 if_printf(sc->ifp, "failed to create map for mbuf\n"); 477 return (ENXIO); 478 } 479 480 dw->buffer = (struct mbuf*)NULL; 481 dw->mge_desc->buffer = (bus_addr_t)NULL; 482 } 483 484 return (0); 485 } 486 487 static int 488 mge_allocate_dma(struct mge_softc *sc) 489 { 490 int error; 491 struct mge_desc_wrapper *dw; 492 int i; 493 494 /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */ 495 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent */ 496 16, 0, /* alignment, boundary */ 497 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 498 BUS_SPACE_MAXADDR, /* highaddr */ 499 NULL, NULL, /* filtfunc, filtfuncarg */ 500 sizeof(struct mge_desc), 1, /* maxsize, nsegments */ 501 sizeof(struct mge_desc), 0, /* maxsegsz, flags */ 502 NULL, NULL, /* lockfunc, lockfuncarg */ 503 &sc->mge_desc_dtag); /* dmat */ 504 505 506 mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, 507 &sc->mge_tx_dtag); 508 mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, 509 &sc->mge_rx_dtag); 510 511 for (i = 0; i < MGE_RX_DESC_NUM; i++) { 512 dw = &(sc->mge_rx_desc[i]); 513 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer, 514 &dw->mge_desc->buffer); 515 } 516 517 sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr; 518 sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr; 519 520 return (0); 521 } 522 523 static void 524 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab, 525 uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs) 526 { 527 struct mge_desc_wrapper *dw; 528 int i; 529 530 for (i = 0; i < size; i++) { 531 /* Free RX mbuf */ 532 dw = &(tab[i]); 533 534 if (dw->buffer_dmap) { 535 if (free_mbufs) { 536 bus_dmamap_sync(buffer_tag, dw->buffer_dmap, 537 BUS_DMASYNC_POSTREAD); 538 bus_dmamap_unload(buffer_tag, dw->buffer_dmap); 539 } 540 bus_dmamap_destroy(buffer_tag, dw->buffer_dmap); 541 if (free_mbufs) 542 m_freem(dw->buffer); 543 } 544 /* Free RX descriptors */ 545 if (dw->desc_dmap) { 546 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap, 547 BUS_DMASYNC_POSTREAD); 548 bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap); 549 bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc, 550 dw->desc_dmap); 551 } 552 } 553 } 554 555 static void 556 mge_free_dma(struct mge_softc *sc) 557 { 558 /* Free desciptors and mbufs */ 559 mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1); 560 mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0); 561 562 /* Destroy mbuf dma tag */ 563 bus_dma_tag_destroy(sc->mge_tx_dtag); 564 bus_dma_tag_destroy(sc->mge_rx_dtag); 565 /* Destroy descriptors tag */ 566 bus_dma_tag_destroy(sc->mge_desc_dtag); 567 } 568 569 static void 570 mge_reinit_rx(struct mge_softc *sc) 571 { 572 struct mge_desc_wrapper *dw; 573 int i; 574 575 MGE_RECEIVE_LOCK_ASSERT(sc); 576 577 mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1); 578 579 mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, 580 &sc->mge_rx_dtag); 581 582 for (i = 0; i < MGE_RX_DESC_NUM; i++) { 583 dw = &(sc->mge_rx_desc[i]); 584 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer, 585 &dw->mge_desc->buffer); 586 } 587 588 sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr; 589 sc->rx_desc_curr = 0; 590 591 MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE), 592 sc->rx_desc_start); 593 594 /* Enable RX queue */ 595 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE)); 596 } 597 598 #ifdef DEVICE_POLLING 599 static poll_handler_t mge_poll; 600 601 static int 602 mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 603 { 604 struct mge_softc *sc = ifp->if_softc; 605 uint32_t int_cause, int_cause_ext; 606 int rx_npkts = 0; 607 608 MGE_GLOBAL_LOCK(sc); 609 610 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 611 MGE_GLOBAL_UNLOCK(sc); 612 return (rx_npkts); 613 } 614 615 if (cmd == POLL_AND_CHECK_STATUS) { 616 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE); 617 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT); 618 619 /* Check for resource error */ 620 if (int_cause & MGE_PORT_INT_RXERRQ0) 621 mge_reinit_rx(sc); 622 623 if (int_cause || int_cause_ext) { 624 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause); 625 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext); 626 } 627 } 628 629 mge_intr_tx_locked(sc); 630 rx_npkts = mge_intr_rx_locked(sc, count); 631 632 MGE_GLOBAL_UNLOCK(sc); 633 return (rx_npkts); 634 } 635 #endif /* DEVICE_POLLING */ 636 637 static int 638 mge_attach(device_t dev) 639 { 640 struct mge_softc *sc; 641 struct mii_softc *miisc; 642 struct ifnet *ifp; 643 uint8_t hwaddr[ETHER_ADDR_LEN]; 644 int i, error, phy; 645 646 sc = device_get_softc(dev); 647 sc->dev = dev; 648 sc->node = ofw_bus_get_node(dev); 649 650 /* Set chip version-dependent parameters */ 651 mge_ver_params(sc); 652 653 /* Get phy address and used softc from fdt */ 654 if (fdt_get_phyaddr(sc->node, sc->dev, &phy, (void **)&sc->phy_sc) != 0) 655 return (ENXIO); 656 657 /* Initialize mutexes */ 658 mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock", MTX_DEF); 659 mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock", MTX_DEF); 660 661 /* Allocate IO and IRQ resources */ 662 error = bus_alloc_resources(dev, res_spec, sc->res); 663 if (error) { 664 device_printf(dev, "could not allocate resources\n"); 665 mge_detach(dev); 666 return (ENXIO); 667 } 668 669 /* Allocate DMA, buffers, buffer descriptors */ 670 error = mge_allocate_dma(sc); 671 if (error) { 672 mge_detach(dev); 673 return (ENXIO); 674 } 675 676 sc->tx_desc_curr = 0; 677 sc->rx_desc_curr = 0; 678 sc->tx_desc_used_idx = 0; 679 sc->tx_desc_used_count = 0; 680 681 /* Configure defaults for interrupts coalescing */ 682 sc->rx_ic_time = 768; 683 sc->tx_ic_time = 768; 684 mge_add_sysctls(sc); 685 686 /* Allocate network interface */ 687 ifp = sc->ifp = if_alloc(IFT_ETHER); 688 if (ifp == NULL) { 689 device_printf(dev, "if_alloc() failed\n"); 690 mge_detach(dev); 691 return (ENOMEM); 692 } 693 694 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 695 ifp->if_softc = sc; 696 ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST; 697 ifp->if_capabilities = IFCAP_VLAN_MTU; 698 if (sc->mge_hw_csum) { 699 ifp->if_capabilities |= IFCAP_HWCSUM; 700 ifp->if_hwassist = MGE_CHECKSUM_FEATURES; 701 } 702 ifp->if_capenable = ifp->if_capabilities; 703 704 #ifdef DEVICE_POLLING 705 /* Advertise that polling is supported */ 706 ifp->if_capabilities |= IFCAP_POLLING; 707 #endif 708 709 ifp->if_init = mge_init; 710 ifp->if_start = mge_start; 711 ifp->if_ioctl = mge_ioctl; 712 713 ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1; 714 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 715 IFQ_SET_READY(&ifp->if_snd); 716 717 mge_get_mac_address(sc, hwaddr); 718 ether_ifattach(ifp, hwaddr); 719 callout_init(&sc->wd_callout, 0); 720 721 /* Attach PHY(s) */ 722 error = mii_attach(dev, &sc->miibus, ifp, mge_ifmedia_upd, 723 mge_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0); 724 if (error) { 725 device_printf(dev, "attaching PHYs failed\n"); 726 mge_detach(dev); 727 return (error); 728 } 729 sc->mii = device_get_softc(sc->miibus); 730 731 /* Tell the MAC where to find the PHY so autoneg works */ 732 miisc = LIST_FIRST(&sc->mii->mii_phys); 733 MGE_WRITE(sc, MGE_REG_PHYDEV, miisc->mii_phy); 734 735 /* Attach interrupt handlers */ 736 /* TODO: review flags, in part. mark RX as INTR_ENTROPY ? */ 737 for (i = 1; i <= sc->mge_intr_cnt; ++i) { 738 error = bus_setup_intr(dev, sc->res[i], 739 INTR_TYPE_NET | INTR_MPSAFE, 740 NULL, *mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].handler, 741 sc, &sc->ih_cookie[i - 1]); 742 if (error) { 743 device_printf(dev, "could not setup %s\n", 744 mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].description); 745 mge_detach(dev); 746 return (error); 747 } 748 } 749 750 return (0); 751 } 752 753 static int 754 mge_detach(device_t dev) 755 { 756 struct mge_softc *sc; 757 int error,i; 758 759 sc = device_get_softc(dev); 760 761 /* Stop controller and free TX queue */ 762 if (sc->ifp) 763 mge_shutdown(dev); 764 765 /* Wait for stopping ticks */ 766 callout_drain(&sc->wd_callout); 767 768 /* Stop and release all interrupts */ 769 for (i = 0; i < sc->mge_intr_cnt; ++i) { 770 if (!sc->ih_cookie[i]) 771 continue; 772 773 error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]); 774 if (error) 775 device_printf(dev, "could not release %s\n", 776 mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i + 1)].description); 777 } 778 779 /* Detach network interface */ 780 if (sc->ifp) { 781 ether_ifdetach(sc->ifp); 782 if_free(sc->ifp); 783 } 784 785 /* Free DMA resources */ 786 mge_free_dma(sc); 787 788 /* Free IO memory handler */ 789 bus_release_resources(dev, res_spec, sc->res); 790 791 /* Destroy mutexes */ 792 mtx_destroy(&sc->receive_lock); 793 mtx_destroy(&sc->transmit_lock); 794 795 return (0); 796 } 797 798 static void 799 mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 800 { 801 struct mge_softc *sc = ifp->if_softc; 802 struct mii_data *mii; 803 804 MGE_TRANSMIT_LOCK(sc); 805 806 mii = sc->mii; 807 mii_pollstat(mii); 808 809 ifmr->ifm_active = mii->mii_media_active; 810 ifmr->ifm_status = mii->mii_media_status; 811 812 MGE_TRANSMIT_UNLOCK(sc); 813 } 814 815 static uint32_t 816 mge_set_port_serial_control(uint32_t media) 817 { 818 uint32_t port_config; 819 820 port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL | 821 PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552); 822 823 if (IFM_TYPE(media) == IFM_ETHER) { 824 switch(IFM_SUBTYPE(media)) { 825 case IFM_AUTO: 826 break; 827 case IFM_1000_T: 828 port_config |= (PORT_SERIAL_GMII_SPEED_1000 | 829 PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC | 830 PORT_SERIAL_SPEED_AUTONEG); 831 break; 832 case IFM_100_TX: 833 port_config |= (PORT_SERIAL_MII_SPEED_100 | 834 PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC | 835 PORT_SERIAL_SPEED_AUTONEG); 836 break; 837 case IFM_10_T: 838 port_config |= (PORT_SERIAL_AUTONEG | 839 PORT_SERIAL_AUTONEG_FC | 840 PORT_SERIAL_SPEED_AUTONEG); 841 break; 842 } 843 if (media & IFM_FDX) 844 port_config |= PORT_SERIAL_FULL_DUPLEX; 845 } 846 return (port_config); 847 } 848 849 static int 850 mge_ifmedia_upd(struct ifnet *ifp) 851 { 852 struct mge_softc *sc = ifp->if_softc; 853 854 if (ifp->if_flags & IFF_UP) { 855 MGE_GLOBAL_LOCK(sc); 856 857 sc->mge_media_status = sc->mii->mii_media.ifm_media; 858 mii_mediachg(sc->mii); 859 mge_init_locked(sc); 860 861 MGE_GLOBAL_UNLOCK(sc); 862 } 863 864 return (0); 865 } 866 867 static void 868 mge_init(void *arg) 869 { 870 struct mge_softc *sc = arg; 871 872 MGE_GLOBAL_LOCK(sc); 873 874 mge_init_locked(arg); 875 876 MGE_GLOBAL_UNLOCK(sc); 877 } 878 879 static void 880 mge_init_locked(void *arg) 881 { 882 struct mge_softc *sc = arg; 883 struct mge_desc_wrapper *dw; 884 volatile uint32_t reg_val; 885 int i, count; 886 887 888 MGE_GLOBAL_LOCK_ASSERT(sc); 889 890 /* Stop interface */ 891 mge_stop(sc); 892 893 /* Disable interrupts */ 894 mge_intrs_ctrl(sc, 0); 895 896 /* Set MAC address */ 897 mge_set_mac_address(sc); 898 899 /* Setup multicast filters */ 900 mge_setup_multicast(sc); 901 902 if (sc->mge_ver == 2) { 903 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN); 904 MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0)); 905 } 906 907 /* Initialize TX queue configuration registers */ 908 MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt); 909 MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg); 910 MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg); 911 912 /* Clear TX queue configuration registers for unused queues */ 913 for (i = 1; i < 7; i++) { 914 MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0); 915 MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0); 916 MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0); 917 } 918 919 /* Set default MTU */ 920 MGE_WRITE(sc, sc->mge_mtu, 0); 921 922 /* Port configuration */ 923 MGE_WRITE(sc, MGE_PORT_CONFIG, 924 PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) | 925 PORT_CONFIG_ARO_RXQ(0)); 926 MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0); 927 928 /* Setup port configuration */ 929 reg_val = mge_set_port_serial_control(sc->mge_media_status); 930 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val); 931 932 /* Setup SDMA configuration */ 933 MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP | 934 MGE_SDMA_TX_BYTE_SWAP | 935 MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) | 936 MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD)); 937 938 MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0); 939 940 MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start); 941 MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE), 942 sc->rx_desc_start); 943 944 /* Reset descriptor indexes */ 945 sc->tx_desc_curr = 0; 946 sc->rx_desc_curr = 0; 947 sc->tx_desc_used_idx = 0; 948 sc->tx_desc_used_count = 0; 949 950 /* Enable RX descriptors */ 951 for (i = 0; i < MGE_RX_DESC_NUM; i++) { 952 dw = &sc->mge_rx_desc[i]; 953 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED; 954 dw->mge_desc->buff_size = MCLBYTES; 955 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap, 956 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 957 } 958 959 /* Enable RX queue */ 960 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE)); 961 962 /* Enable port */ 963 reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL); 964 reg_val |= PORT_SERIAL_ENABLE; 965 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val); 966 count = 0x100000; 967 for (;;) { 968 reg_val = MGE_READ(sc, MGE_PORT_STATUS); 969 if (reg_val & MGE_STATUS_LINKUP) 970 break; 971 DELAY(100); 972 if (--count == 0) { 973 if_printf(sc->ifp, "Timeout on link-up\n"); 974 break; 975 } 976 } 977 978 /* Setup interrupts coalescing */ 979 mge_set_rxic(sc); 980 mge_set_txic(sc); 981 982 /* Enable interrupts */ 983 #ifdef DEVICE_POLLING 984 /* 985 * * ...only if polling is not turned on. Disable interrupts explicitly 986 * if polling is enabled. 987 */ 988 if (sc->ifp->if_capenable & IFCAP_POLLING) 989 mge_intrs_ctrl(sc, 0); 990 else 991 #endif /* DEVICE_POLLING */ 992 mge_intrs_ctrl(sc, 1); 993 994 /* Activate network interface */ 995 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; 996 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 997 sc->wd_timer = 0; 998 999 /* Schedule watchdog timeout */ 1000 callout_reset(&sc->wd_callout, hz, mge_tick, sc); 1001 } 1002 1003 static void 1004 mge_intr_rxtx(void *arg) 1005 { 1006 struct mge_softc *sc = arg; 1007 uint32_t int_cause, int_cause_ext; 1008 1009 MGE_GLOBAL_LOCK(sc); 1010 1011 #ifdef DEVICE_POLLING 1012 if (sc->ifp->if_capenable & IFCAP_POLLING) { 1013 MGE_GLOBAL_UNLOCK(sc); 1014 return; 1015 } 1016 #endif 1017 1018 /* Get interrupt cause */ 1019 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE); 1020 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT); 1021 1022 /* Check for Transmit interrupt */ 1023 if (int_cause_ext & (MGE_PORT_INT_EXT_TXBUF0 | 1024 MGE_PORT_INT_EXT_TXUR)) { 1025 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext & 1026 (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR))); 1027 mge_intr_tx_locked(sc); 1028 } 1029 1030 MGE_TRANSMIT_UNLOCK(sc); 1031 1032 /* Check for Receive interrupt */ 1033 mge_intr_rx_check(sc, int_cause, int_cause_ext); 1034 1035 MGE_RECEIVE_UNLOCK(sc); 1036 } 1037 1038 static void 1039 mge_intr_err(void *arg) 1040 { 1041 struct mge_softc *sc = arg; 1042 struct ifnet *ifp; 1043 1044 ifp = sc->ifp; 1045 if_printf(ifp, "%s\n", __FUNCTION__); 1046 } 1047 1048 static void 1049 mge_intr_misc(void *arg) 1050 { 1051 struct mge_softc *sc = arg; 1052 struct ifnet *ifp; 1053 1054 ifp = sc->ifp; 1055 if_printf(ifp, "%s\n", __FUNCTION__); 1056 } 1057 1058 static void 1059 mge_intr_rx(void *arg) { 1060 struct mge_softc *sc = arg; 1061 uint32_t int_cause, int_cause_ext; 1062 1063 MGE_RECEIVE_LOCK(sc); 1064 1065 #ifdef DEVICE_POLLING 1066 if (sc->ifp->if_capenable & IFCAP_POLLING) { 1067 MGE_RECEIVE_UNLOCK(sc); 1068 return; 1069 } 1070 #endif 1071 1072 /* Get interrupt cause */ 1073 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE); 1074 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT); 1075 1076 mge_intr_rx_check(sc, int_cause, int_cause_ext); 1077 1078 MGE_RECEIVE_UNLOCK(sc); 1079 } 1080 1081 static void 1082 mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause, 1083 uint32_t int_cause_ext) 1084 { 1085 /* Check for resource error */ 1086 if (int_cause & MGE_PORT_INT_RXERRQ0) { 1087 mge_reinit_rx(sc); 1088 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 1089 ~(int_cause & MGE_PORT_INT_RXERRQ0)); 1090 } 1091 1092 int_cause &= MGE_PORT_INT_RXQ0; 1093 int_cause_ext &= MGE_PORT_INT_EXT_RXOR; 1094 1095 if (int_cause || int_cause_ext) { 1096 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause); 1097 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext); 1098 mge_intr_rx_locked(sc, -1); 1099 } 1100 } 1101 1102 static int 1103 mge_intr_rx_locked(struct mge_softc *sc, int count) 1104 { 1105 struct ifnet *ifp = sc->ifp; 1106 uint32_t status; 1107 uint16_t bufsize; 1108 struct mge_desc_wrapper* dw; 1109 struct mbuf *mb; 1110 int rx_npkts = 0; 1111 1112 MGE_RECEIVE_LOCK_ASSERT(sc); 1113 1114 while (count != 0) { 1115 dw = &sc->mge_rx_desc[sc->rx_desc_curr]; 1116 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap, 1117 BUS_DMASYNC_POSTREAD); 1118 1119 /* Get status */ 1120 status = dw->mge_desc->cmd_status; 1121 bufsize = dw->mge_desc->buff_size; 1122 if ((status & MGE_DMA_OWNED) != 0) 1123 break; 1124 1125 if (dw->mge_desc->byte_count && 1126 ~(status & MGE_ERR_SUMMARY)) { 1127 1128 bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap, 1129 BUS_DMASYNC_POSTREAD); 1130 1131 mb = m_devget(dw->buffer->m_data, 1132 dw->mge_desc->byte_count - ETHER_CRC_LEN, 1133 0, ifp, NULL); 1134 1135 if (mb == NULL) 1136 /* Give up if no mbufs */ 1137 break; 1138 1139 mb->m_len -= 2; 1140 mb->m_pkthdr.len -= 2; 1141 mb->m_data += 2; 1142 1143 mge_offload_process_frame(ifp, mb, status, 1144 bufsize); 1145 1146 MGE_RECEIVE_UNLOCK(sc); 1147 (*ifp->if_input)(ifp, mb); 1148 MGE_RECEIVE_LOCK(sc); 1149 rx_npkts++; 1150 } 1151 1152 dw->mge_desc->byte_count = 0; 1153 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED; 1154 sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM); 1155 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap, 1156 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1157 1158 if (count > 0) 1159 count -= 1; 1160 } 1161 1162 return (rx_npkts); 1163 } 1164 1165 static void 1166 mge_intr_sum(void *arg) 1167 { 1168 struct mge_softc *sc = arg; 1169 struct ifnet *ifp; 1170 1171 ifp = sc->ifp; 1172 if_printf(ifp, "%s\n", __FUNCTION__); 1173 } 1174 1175 static void 1176 mge_intr_tx(void *arg) 1177 { 1178 struct mge_softc *sc = arg; 1179 uint32_t int_cause_ext; 1180 1181 MGE_TRANSMIT_LOCK(sc); 1182 1183 #ifdef DEVICE_POLLING 1184 if (sc->ifp->if_capenable & IFCAP_POLLING) { 1185 MGE_TRANSMIT_UNLOCK(sc); 1186 return; 1187 } 1188 #endif 1189 1190 /* Ack the interrupt */ 1191 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT); 1192 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext & 1193 (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR))); 1194 1195 mge_intr_tx_locked(sc); 1196 1197 MGE_TRANSMIT_UNLOCK(sc); 1198 } 1199 1200 1201 static void 1202 mge_intr_tx_locked(struct mge_softc *sc) 1203 { 1204 struct ifnet *ifp = sc->ifp; 1205 struct mge_desc_wrapper *dw; 1206 struct mge_desc *desc; 1207 uint32_t status; 1208 int send = 0; 1209 1210 MGE_TRANSMIT_LOCK_ASSERT(sc); 1211 1212 /* Disable watchdog */ 1213 sc->wd_timer = 0; 1214 1215 while (sc->tx_desc_used_count) { 1216 /* Get the descriptor */ 1217 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx]; 1218 desc = dw->mge_desc; 1219 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap, 1220 BUS_DMASYNC_POSTREAD); 1221 1222 /* Get descriptor status */ 1223 status = desc->cmd_status; 1224 1225 if (status & MGE_DMA_OWNED) 1226 break; 1227 1228 sc->tx_desc_used_idx = 1229 (++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM; 1230 sc->tx_desc_used_count--; 1231 1232 /* Update collision statistics */ 1233 if (status & MGE_ERR_SUMMARY) { 1234 if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC) 1235 ifp->if_collisions++; 1236 if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL) 1237 ifp->if_collisions += 16; 1238 } 1239 1240 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap, 1241 BUS_DMASYNC_POSTWRITE); 1242 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap); 1243 m_freem(dw->buffer); 1244 dw->buffer = (struct mbuf*)NULL; 1245 send++; 1246 1247 ifp->if_opackets++; 1248 } 1249 1250 if (send) { 1251 /* Now send anything that was pending */ 1252 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1253 mge_start_locked(ifp); 1254 } 1255 } 1256 1257 static int 1258 mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1259 { 1260 struct mge_softc *sc = ifp->if_softc; 1261 struct ifreq *ifr = (struct ifreq *)data; 1262 int mask, error; 1263 uint32_t flags; 1264 1265 error = 0; 1266 1267 switch (command) { 1268 case SIOCSIFFLAGS: 1269 MGE_GLOBAL_LOCK(sc); 1270 1271 if (ifp->if_flags & IFF_UP) { 1272 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1273 flags = ifp->if_flags ^ sc->mge_if_flags; 1274 if (flags & IFF_PROMISC) 1275 mge_set_prom_mode(sc, 1276 MGE_RX_DEFAULT_QUEUE); 1277 1278 if (flags & IFF_ALLMULTI) 1279 mge_setup_multicast(sc); 1280 } else 1281 mge_init_locked(sc); 1282 } 1283 else if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1284 mge_stop(sc); 1285 1286 sc->mge_if_flags = ifp->if_flags; 1287 MGE_GLOBAL_UNLOCK(sc); 1288 break; 1289 case SIOCADDMULTI: 1290 case SIOCDELMULTI: 1291 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1292 MGE_GLOBAL_LOCK(sc); 1293 mge_setup_multicast(sc); 1294 MGE_GLOBAL_UNLOCK(sc); 1295 } 1296 break; 1297 case SIOCSIFCAP: 1298 mask = ifp->if_capenable ^ ifr->ifr_reqcap; 1299 if (mask & IFCAP_HWCSUM) { 1300 ifp->if_capenable &= ~IFCAP_HWCSUM; 1301 ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap; 1302 if (ifp->if_capenable & IFCAP_TXCSUM) 1303 ifp->if_hwassist = MGE_CHECKSUM_FEATURES; 1304 else 1305 ifp->if_hwassist = 0; 1306 } 1307 #ifdef DEVICE_POLLING 1308 if (mask & IFCAP_POLLING) { 1309 if (ifr->ifr_reqcap & IFCAP_POLLING) { 1310 error = ether_poll_register(mge_poll, ifp); 1311 if (error) 1312 return(error); 1313 1314 MGE_GLOBAL_LOCK(sc); 1315 mge_intrs_ctrl(sc, 0); 1316 ifp->if_capenable |= IFCAP_POLLING; 1317 MGE_GLOBAL_UNLOCK(sc); 1318 } else { 1319 error = ether_poll_deregister(ifp); 1320 MGE_GLOBAL_LOCK(sc); 1321 mge_intrs_ctrl(sc, 1); 1322 ifp->if_capenable &= ~IFCAP_POLLING; 1323 MGE_GLOBAL_UNLOCK(sc); 1324 } 1325 } 1326 #endif 1327 break; 1328 case SIOCGIFMEDIA: /* fall through */ 1329 case SIOCSIFMEDIA: 1330 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T 1331 && !(ifr->ifr_media & IFM_FDX)) { 1332 device_printf(sc->dev, 1333 "1000baseTX half-duplex unsupported\n"); 1334 return 0; 1335 } 1336 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command); 1337 break; 1338 default: 1339 error = ether_ioctl(ifp, command, data); 1340 } 1341 return (error); 1342 } 1343 1344 static int 1345 mge_miibus_readreg(device_t dev, int phy, int reg) 1346 { 1347 struct mge_softc *sc; 1348 uint32_t retries; 1349 1350 sc = device_get_softc(dev); 1351 1352 MGE_WRITE(sc->phy_sc, MGE_REG_SMI, 0x1fffffff & 1353 (MGE_SMI_READ | (reg << 21) | (phy << 16))); 1354 1355 retries = MGE_SMI_READ_RETRIES; 1356 while (--retries && 1357 !(MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_READVALID)) 1358 DELAY(MGE_SMI_READ_DELAY); 1359 1360 if (retries == 0) 1361 device_printf(dev, "Timeout while reading from PHY\n"); 1362 1363 return (MGE_READ(sc->phy_sc, MGE_REG_SMI) & 0xffff); 1364 } 1365 1366 static int 1367 mge_miibus_writereg(device_t dev, int phy, int reg, int value) 1368 { 1369 struct mge_softc *sc; 1370 uint32_t retries; 1371 1372 sc = device_get_softc(dev); 1373 1374 MGE_WRITE(sc->phy_sc, MGE_REG_SMI, 0x1fffffff & 1375 (MGE_SMI_WRITE | (reg << 21) | (phy << 16) | (value & 0xffff))); 1376 1377 retries = MGE_SMI_WRITE_RETRIES; 1378 while (--retries && MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_BUSY) 1379 DELAY(MGE_SMI_WRITE_DELAY); 1380 1381 if (retries == 0) 1382 device_printf(dev, "Timeout while writing to PHY\n"); 1383 return (0); 1384 } 1385 1386 static int 1387 mge_probe(device_t dev) 1388 { 1389 1390 if (!ofw_bus_is_compatible(dev, "mrvl,ge")) 1391 return (ENXIO); 1392 1393 device_set_desc(dev, "Marvell Gigabit Ethernet controller"); 1394 return (BUS_PROBE_DEFAULT); 1395 } 1396 1397 static int 1398 mge_resume(device_t dev) 1399 { 1400 1401 device_printf(dev, "%s\n", __FUNCTION__); 1402 return (0); 1403 } 1404 1405 static int 1406 mge_shutdown(device_t dev) 1407 { 1408 struct mge_softc *sc = device_get_softc(dev); 1409 1410 MGE_GLOBAL_LOCK(sc); 1411 1412 #ifdef DEVICE_POLLING 1413 if (sc->ifp->if_capenable & IFCAP_POLLING) 1414 ether_poll_deregister(sc->ifp); 1415 #endif 1416 1417 mge_stop(sc); 1418 1419 MGE_GLOBAL_UNLOCK(sc); 1420 1421 return (0); 1422 } 1423 1424 static int 1425 mge_encap(struct mge_softc *sc, struct mbuf *m0) 1426 { 1427 struct mge_desc_wrapper *dw = NULL; 1428 struct ifnet *ifp; 1429 bus_dma_segment_t segs[MGE_TX_DESC_NUM]; 1430 bus_dmamap_t mapp; 1431 int error; 1432 int seg, nsegs; 1433 int desc_no; 1434 1435 ifp = sc->ifp; 1436 1437 /* Check for free descriptors */ 1438 if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) { 1439 /* No free descriptors */ 1440 return (-1); 1441 } 1442 1443 /* Fetch unused map */ 1444 desc_no = sc->tx_desc_curr; 1445 dw = &sc->mge_tx_desc[desc_no]; 1446 mapp = dw->buffer_dmap; 1447 1448 /* Create mapping in DMA memory */ 1449 error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs, 1450 BUS_DMA_NOWAIT); 1451 if (error != 0 || nsegs != 1 ) { 1452 bus_dmamap_unload(sc->mge_tx_dtag, mapp); 1453 return ((error != 0) ? error : -1); 1454 } 1455 1456 bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE); 1457 1458 /* Everything is ok, now we can send buffers */ 1459 for (seg = 0; seg < nsegs; seg++) { 1460 dw->mge_desc->byte_count = segs[seg].ds_len; 1461 dw->mge_desc->buffer = segs[seg].ds_addr; 1462 dw->buffer = m0; 1463 dw->mge_desc->cmd_status = 0; 1464 if (seg == 0) 1465 mge_offload_setup_descriptor(sc, dw); 1466 dw->mge_desc->cmd_status |= MGE_TX_LAST | MGE_TX_FIRST | 1467 MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING | 1468 MGE_DMA_OWNED; 1469 } 1470 1471 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap, 1472 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1473 1474 sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM; 1475 sc->tx_desc_used_count++; 1476 return (0); 1477 } 1478 1479 static void 1480 mge_tick(void *msc) 1481 { 1482 struct mge_softc *sc = msc; 1483 1484 /* Check for TX timeout */ 1485 mge_watchdog(sc); 1486 1487 mii_tick(sc->mii); 1488 1489 /* Check for media type change */ 1490 if(sc->mge_media_status != sc->mii->mii_media.ifm_media) 1491 mge_ifmedia_upd(sc->ifp); 1492 1493 /* Schedule another timeout one second from now */ 1494 callout_reset(&sc->wd_callout, hz, mge_tick, sc); 1495 } 1496 1497 static void 1498 mge_watchdog(struct mge_softc *sc) 1499 { 1500 struct ifnet *ifp; 1501 1502 ifp = sc->ifp; 1503 1504 MGE_GLOBAL_LOCK(sc); 1505 1506 if (sc->wd_timer == 0 || --sc->wd_timer) { 1507 MGE_GLOBAL_UNLOCK(sc); 1508 return; 1509 } 1510 1511 ifp->if_oerrors++; 1512 if_printf(ifp, "watchdog timeout\n"); 1513 1514 mge_stop(sc); 1515 mge_init_locked(sc); 1516 1517 MGE_GLOBAL_UNLOCK(sc); 1518 } 1519 1520 static void 1521 mge_start(struct ifnet *ifp) 1522 { 1523 struct mge_softc *sc = ifp->if_softc; 1524 1525 MGE_TRANSMIT_LOCK(sc); 1526 1527 mge_start_locked(ifp); 1528 1529 MGE_TRANSMIT_UNLOCK(sc); 1530 } 1531 1532 static void 1533 mge_start_locked(struct ifnet *ifp) 1534 { 1535 struct mge_softc *sc; 1536 struct mbuf *m0, *mtmp; 1537 uint32_t reg_val, queued = 0; 1538 1539 sc = ifp->if_softc; 1540 1541 MGE_TRANSMIT_LOCK_ASSERT(sc); 1542 1543 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1544 IFF_DRV_RUNNING) 1545 return; 1546 1547 for (;;) { 1548 /* Get packet from the queue */ 1549 IF_DEQUEUE(&ifp->if_snd, m0); 1550 if (m0 == NULL) 1551 break; 1552 1553 mtmp = m_defrag(m0, M_NOWAIT); 1554 if (mtmp) 1555 m0 = mtmp; 1556 1557 if (mge_encap(sc, m0)) { 1558 IF_PREPEND(&ifp->if_snd, m0); 1559 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1560 break; 1561 } 1562 queued++; 1563 BPF_MTAP(ifp, m0); 1564 } 1565 1566 if (queued) { 1567 /* Enable transmitter and watchdog timer */ 1568 reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD); 1569 MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ); 1570 sc->wd_timer = 5; 1571 } 1572 } 1573 1574 static void 1575 mge_stop(struct mge_softc *sc) 1576 { 1577 struct ifnet *ifp; 1578 volatile uint32_t reg_val, status; 1579 struct mge_desc_wrapper *dw; 1580 struct mge_desc *desc; 1581 int count; 1582 1583 ifp = sc->ifp; 1584 1585 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1586 return; 1587 1588 /* Stop tick engine */ 1589 callout_stop(&sc->wd_callout); 1590 1591 /* Disable interface */ 1592 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1593 sc->wd_timer = 0; 1594 1595 /* Disable interrupts */ 1596 mge_intrs_ctrl(sc, 0); 1597 1598 /* Disable Rx and Tx */ 1599 reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD); 1600 MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ); 1601 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL); 1602 1603 /* Remove pending data from TX queue */ 1604 while (sc->tx_desc_used_idx != sc->tx_desc_curr && 1605 sc->tx_desc_used_count) { 1606 /* Get the descriptor */ 1607 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx]; 1608 desc = dw->mge_desc; 1609 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap, 1610 BUS_DMASYNC_POSTREAD); 1611 1612 /* Get descriptor status */ 1613 status = desc->cmd_status; 1614 1615 if (status & MGE_DMA_OWNED) 1616 break; 1617 1618 sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) % 1619 MGE_TX_DESC_NUM; 1620 sc->tx_desc_used_count--; 1621 1622 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap, 1623 BUS_DMASYNC_POSTWRITE); 1624 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap); 1625 1626 m_freem(dw->buffer); 1627 dw->buffer = (struct mbuf*)NULL; 1628 } 1629 1630 /* Wait for end of transmission */ 1631 count = 0x100000; 1632 while (count--) { 1633 reg_val = MGE_READ(sc, MGE_PORT_STATUS); 1634 if ( !(reg_val & MGE_STATUS_TX_IN_PROG) && 1635 (reg_val & MGE_STATUS_TX_FIFO_EMPTY)) 1636 break; 1637 DELAY(100); 1638 } 1639 1640 if(!count) 1641 if_printf(ifp, "%s: timeout while waiting for end of transmission\n", 1642 __FUNCTION__); 1643 1644 reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL); 1645 reg_val &= ~(PORT_SERIAL_ENABLE); 1646 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val); 1647 } 1648 1649 static int 1650 mge_suspend(device_t dev) 1651 { 1652 1653 device_printf(dev, "%s\n", __FUNCTION__); 1654 return (0); 1655 } 1656 1657 static void 1658 mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame, 1659 uint32_t status, uint16_t bufsize) 1660 { 1661 int csum_flags = 0; 1662 1663 if (ifp->if_capenable & IFCAP_RXCSUM) { 1664 if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK)) 1665 csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 1666 1667 if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 && 1668 (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) && 1669 (status & MGE_RX_L4_CSUM_OK)) { 1670 csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1671 frame->m_pkthdr.csum_data = 0xFFFF; 1672 } 1673 1674 frame->m_pkthdr.csum_flags = csum_flags; 1675 } 1676 } 1677 1678 static void 1679 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw) 1680 { 1681 struct mbuf *m0 = dw->buffer; 1682 struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *); 1683 int csum_flags = m0->m_pkthdr.csum_flags; 1684 int cmd_status = 0; 1685 struct ip *ip; 1686 int ehlen, etype; 1687 1688 if (csum_flags) { 1689 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 1690 etype = ntohs(eh->evl_proto); 1691 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1692 csum_flags |= MGE_TX_VLAN_TAGGED; 1693 } else { 1694 etype = ntohs(eh->evl_encap_proto); 1695 ehlen = ETHER_HDR_LEN; 1696 } 1697 1698 if (etype != ETHERTYPE_IP) { 1699 if_printf(sc->ifp, 1700 "TCP/IP Offload enabled for unsupported " 1701 "protocol!\n"); 1702 return; 1703 } 1704 1705 ip = (struct ip *)(m0->m_data + ehlen); 1706 cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl); 1707 cmd_status |= MGE_TX_NOT_FRAGMENT; 1708 } 1709 1710 if (csum_flags & CSUM_IP) 1711 cmd_status |= MGE_TX_GEN_IP_CSUM; 1712 1713 if (csum_flags & CSUM_TCP) 1714 cmd_status |= MGE_TX_GEN_L4_CSUM; 1715 1716 if (csum_flags & CSUM_UDP) 1717 cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP; 1718 1719 dw->mge_desc->cmd_status |= cmd_status; 1720 } 1721 1722 static void 1723 mge_intrs_ctrl(struct mge_softc *sc, int enable) 1724 { 1725 1726 if (enable) { 1727 MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 | 1728 MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0); 1729 MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 | 1730 MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR | 1731 MGE_PORT_INT_EXT_TXBUF0); 1732 } else { 1733 MGE_WRITE(sc, MGE_INT_CAUSE, 0x0); 1734 MGE_WRITE(sc, MGE_INT_MASK, 0x0); 1735 1736 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0); 1737 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0); 1738 1739 MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0); 1740 MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0); 1741 } 1742 } 1743 1744 static uint8_t 1745 mge_crc8(uint8_t *data, int size) 1746 { 1747 uint8_t crc = 0; 1748 static const uint8_t ct[256] = { 1749 0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15, 1750 0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D, 1751 0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65, 1752 0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D, 1753 0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5, 1754 0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD, 1755 0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85, 1756 0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD, 1757 0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2, 1758 0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA, 1759 0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2, 1760 0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A, 1761 0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32, 1762 0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A, 1763 0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42, 1764 0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A, 1765 0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C, 1766 0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4, 1767 0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC, 1768 0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4, 1769 0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C, 1770 0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44, 1771 0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C, 1772 0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34, 1773 0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B, 1774 0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63, 1775 0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B, 1776 0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13, 1777 0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB, 1778 0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83, 1779 0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB, 1780 0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3 1781 }; 1782 1783 while(size--) 1784 crc = ct[crc ^ *(data++)]; 1785 1786 return(crc); 1787 } 1788 1789 static void 1790 mge_setup_multicast(struct mge_softc *sc) 1791 { 1792 uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 }; 1793 uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1; 1794 uint32_t smt[MGE_MCAST_REG_NUMBER]; 1795 uint32_t omt[MGE_MCAST_REG_NUMBER]; 1796 struct ifnet *ifp = sc->ifp; 1797 struct ifmultiaddr *ifma; 1798 uint8_t *mac; 1799 int i; 1800 1801 if (ifp->if_flags & IFF_ALLMULTI) { 1802 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) 1803 smt[i] = omt[i] = (v << 24) | (v << 16) | (v << 8) | v; 1804 } else { 1805 memset(smt, 0, sizeof(smt)); 1806 memset(omt, 0, sizeof(omt)); 1807 1808 if_maddr_rlock(ifp); 1809 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1810 if (ifma->ifma_addr->sa_family != AF_LINK) 1811 continue; 1812 1813 mac = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 1814 if (memcmp(mac, special, sizeof(special)) == 0) { 1815 i = mac[5]; 1816 smt[i >> 2] |= v << ((i & 0x03) << 3); 1817 } else { 1818 i = mge_crc8(mac, ETHER_ADDR_LEN); 1819 omt[i >> 2] |= v << ((i & 0x03) << 3); 1820 } 1821 } 1822 if_maddr_runlock(ifp); 1823 } 1824 1825 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) { 1826 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), smt[i]); 1827 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), omt[i]); 1828 } 1829 } 1830 1831 static void 1832 mge_set_rxic(struct mge_softc *sc) 1833 { 1834 uint32_t reg; 1835 1836 if (sc->rx_ic_time > sc->mge_rx_ipg_max) 1837 sc->rx_ic_time = sc->mge_rx_ipg_max; 1838 1839 reg = MGE_READ(sc, MGE_SDMA_CONFIG); 1840 reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver); 1841 reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver); 1842 MGE_WRITE(sc, MGE_SDMA_CONFIG, reg); 1843 } 1844 1845 static void 1846 mge_set_txic(struct mge_softc *sc) 1847 { 1848 uint32_t reg; 1849 1850 if (sc->tx_ic_time > sc->mge_tfut_ipg_max) 1851 sc->tx_ic_time = sc->mge_tfut_ipg_max; 1852 1853 reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH); 1854 reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver); 1855 reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver); 1856 MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg); 1857 } 1858 1859 static int 1860 mge_sysctl_ic(SYSCTL_HANDLER_ARGS) 1861 { 1862 struct mge_softc *sc = (struct mge_softc *)arg1; 1863 uint32_t time; 1864 int error; 1865 1866 time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time; 1867 error = sysctl_handle_int(oidp, &time, 0, req); 1868 if (error != 0) 1869 return(error); 1870 1871 MGE_GLOBAL_LOCK(sc); 1872 if (arg2 == MGE_IC_RX) { 1873 sc->rx_ic_time = time; 1874 mge_set_rxic(sc); 1875 } else { 1876 sc->tx_ic_time = time; 1877 mge_set_txic(sc); 1878 } 1879 MGE_GLOBAL_UNLOCK(sc); 1880 1881 return(0); 1882 } 1883 1884 static void 1885 mge_add_sysctls(struct mge_softc *sc) 1886 { 1887 struct sysctl_ctx_list *ctx; 1888 struct sysctl_oid_list *children; 1889 struct sysctl_oid *tree; 1890 1891 ctx = device_get_sysctl_ctx(sc->dev); 1892 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 1893 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal", 1894 CTLFLAG_RD, 0, "MGE Interrupts coalescing"); 1895 children = SYSCTL_CHILDREN(tree); 1896 1897 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time", 1898 CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_RX, mge_sysctl_ic, 1899 "I", "IC RX time threshold"); 1900 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time", 1901 CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_TX, mge_sysctl_ic, 1902 "I", "IC TX time threshold"); 1903 } 1904