1 /*- 2 * Copyright (C) 2001 Eduardo Horvath. 3 * Copyright (c) 2001-2003 Thomas Moestl 4 * Copyright (c) 2007-2009 Marius Strobl <marius@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp 29 * from: FreeBSD: if_gem.c 182060 2008-08-23 15:03:26Z marius 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 /* 36 * driver for Sun Cassini/Cassini+ and National Semiconductor DP83065 37 * Saturn Gigabit Ethernet controllers 38 */ 39 40 #if 0 41 #define CAS_DEBUG 42 #endif 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/bus.h> 47 #include <sys/callout.h> 48 #include <sys/endian.h> 49 #include <sys/mbuf.h> 50 #include <sys/malloc.h> 51 #include <sys/kernel.h> 52 #include <sys/lock.h> 53 #include <sys/module.h> 54 #include <sys/mutex.h> 55 #include <sys/refcount.h> 56 #include <sys/resource.h> 57 #include <sys/rman.h> 58 #include <sys/socket.h> 59 #include <sys/sockio.h> 60 #include <sys/taskqueue.h> 61 62 #include <net/bpf.h> 63 #include <net/ethernet.h> 64 #include <net/if.h> 65 #include <net/if_arp.h> 66 #include <net/if_dl.h> 67 #include <net/if_media.h> 68 #include <net/if_types.h> 69 #include <net/if_vlan_var.h> 70 71 #include <netinet/in.h> 72 #include <netinet/in_systm.h> 73 #include <netinet/ip.h> 74 #include <netinet/tcp.h> 75 #include <netinet/udp.h> 76 77 #include <machine/bus.h> 78 #if defined(__powerpc__) || defined(__sparc64__) 79 #include <dev/ofw/openfirm.h> 80 #include <machine/ofw_machdep.h> 81 #endif 82 #include <machine/resource.h> 83 84 #include <dev/mii/mii.h> 85 #include <dev/mii/miivar.h> 86 87 #include <dev/cas/if_casreg.h> 88 #include <dev/cas/if_casvar.h> 89 90 #include <dev/pci/pcireg.h> 91 #include <dev/pci/pcivar.h> 92 93 #include "miibus_if.h" 94 95 #define RINGASSERT(n , min, max) \ 96 CTASSERT(powerof2(n) && (n) >= (min) && (n) <= (max)) 97 98 RINGASSERT(CAS_NRXCOMP, 128, 32768); 99 RINGASSERT(CAS_NRXDESC, 32, 8192); 100 RINGASSERT(CAS_NRXDESC2, 32, 8192); 101 RINGASSERT(CAS_NTXDESC, 32, 8192); 102 103 #undef RINGASSERT 104 105 #define CCDASSERT(m, a) \ 106 CTASSERT((offsetof(struct cas_control_data, m) & ((a) - 1)) == 0) 107 108 CCDASSERT(ccd_rxcomps, CAS_RX_COMP_ALIGN); 109 CCDASSERT(ccd_rxdescs, CAS_RX_DESC_ALIGN); 110 CCDASSERT(ccd_rxdescs2, CAS_RX_DESC_ALIGN); 111 112 #undef CCDASSERT 113 114 #define CAS_TRIES 10000 115 116 /* 117 * According to documentation, the hardware has support for basic TCP 118 * checksum offloading only, in practice this can be also used for UDP 119 * however (i.e. the problem of previous Sun NICs that a checksum of 0x0 120 * is not converted to 0xffff no longer exists). 121 */ 122 #define CAS_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 123 124 static inline void cas_add_rxdesc(struct cas_softc *sc, u_int idx); 125 static int cas_attach(struct cas_softc *sc); 126 static int cas_bitwait(struct cas_softc *sc, bus_addr_t r, uint32_t clr, 127 uint32_t set); 128 static void cas_cddma_callback(void *xsc, bus_dma_segment_t *segs, 129 int nsegs, int error); 130 static void cas_detach(struct cas_softc *sc); 131 static int cas_disable_rx(struct cas_softc *sc); 132 static int cas_disable_tx(struct cas_softc *sc); 133 static void cas_eint(struct cas_softc *sc, u_int status); 134 static void cas_free(void *arg1, void* arg2); 135 static void cas_init(void *xsc); 136 static void cas_init_locked(struct cas_softc *sc); 137 static void cas_init_regs(struct cas_softc *sc); 138 static int cas_intr(void *v); 139 static void cas_intr_task(void *arg, int pending __unused); 140 static int cas_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 141 static int cas_load_txmbuf(struct cas_softc *sc, struct mbuf **m_head); 142 static int cas_mediachange(struct ifnet *ifp); 143 static void cas_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr); 144 static void cas_meminit(struct cas_softc *sc); 145 static void cas_mifinit(struct cas_softc *sc); 146 static int cas_mii_readreg(device_t dev, int phy, int reg); 147 static void cas_mii_statchg(device_t dev); 148 static int cas_mii_writereg(device_t dev, int phy, int reg, int val); 149 static void cas_reset(struct cas_softc *sc); 150 static int cas_reset_rx(struct cas_softc *sc); 151 static int cas_reset_tx(struct cas_softc *sc); 152 static void cas_resume(struct cas_softc *sc); 153 static u_int cas_descsize(u_int sz); 154 static void cas_rint(struct cas_softc *sc); 155 static void cas_rint_timeout(void *arg); 156 static inline void cas_rxcksum(struct mbuf *m, uint16_t cksum); 157 static inline void cas_rxcompinit(struct cas_rx_comp *rxcomp); 158 static u_int cas_rxcompsize(u_int sz); 159 static void cas_rxdma_callback(void *xsc, bus_dma_segment_t *segs, 160 int nsegs, int error); 161 static void cas_setladrf(struct cas_softc *sc); 162 static void cas_start(struct ifnet *ifp); 163 static void cas_stop(struct ifnet *ifp); 164 static void cas_suspend(struct cas_softc *sc); 165 static void cas_tick(void *arg); 166 static void cas_tint(struct cas_softc *sc); 167 static void cas_tx_task(void *arg, int pending __unused); 168 static inline void cas_txkick(struct cas_softc *sc); 169 static void cas_watchdog(struct cas_softc *sc); 170 171 static devclass_t cas_devclass; 172 173 MODULE_DEPEND(cas, ether, 1, 1, 1); 174 MODULE_DEPEND(cas, miibus, 1, 1, 1); 175 176 #ifdef CAS_DEBUG 177 #include <sys/ktr.h> 178 #define KTR_CAS KTR_CT2 179 #endif 180 181 static int 182 cas_attach(struct cas_softc *sc) 183 { 184 struct cas_txsoft *txs; 185 struct ifnet *ifp; 186 int error, i; 187 uint32_t v; 188 189 /* Set up ifnet structure. */ 190 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 191 if (ifp == NULL) 192 return (ENOSPC); 193 ifp->if_softc = sc; 194 if_initname(ifp, device_get_name(sc->sc_dev), 195 device_get_unit(sc->sc_dev)); 196 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 197 ifp->if_start = cas_start; 198 ifp->if_ioctl = cas_ioctl; 199 ifp->if_init = cas_init; 200 IFQ_SET_MAXLEN(&ifp->if_snd, CAS_TXQUEUELEN); 201 ifp->if_snd.ifq_drv_maxlen = CAS_TXQUEUELEN; 202 IFQ_SET_READY(&ifp->if_snd); 203 204 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0); 205 callout_init(&sc->sc_rx_ch, 1); 206 /* Create local taskq. */ 207 TASK_INIT(&sc->sc_intr_task, 0, cas_intr_task, sc); 208 TASK_INIT(&sc->sc_tx_task, 1, cas_tx_task, ifp); 209 sc->sc_tq = taskqueue_create_fast("cas_taskq", M_WAITOK, 210 taskqueue_thread_enqueue, &sc->sc_tq); 211 if (sc->sc_tq == NULL) { 212 device_printf(sc->sc_dev, "could not create taskqueue\n"); 213 error = ENXIO; 214 goto fail_ifnet; 215 } 216 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", 217 device_get_nameunit(sc->sc_dev)); 218 219 /* Make sure the chip is stopped. */ 220 cas_reset(sc); 221 222 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 223 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 224 BUS_SPACE_MAXSIZE, 0, BUS_SPACE_MAXSIZE, 0, NULL, NULL, 225 &sc->sc_pdmatag); 226 if (error != 0) 227 goto fail_taskq; 228 229 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 230 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 231 CAS_PAGE_SIZE, 1, CAS_PAGE_SIZE, 0, NULL, NULL, &sc->sc_rdmatag); 232 if (error != 0) 233 goto fail_ptag; 234 235 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 236 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 237 MCLBYTES * CAS_NTXSEGS, CAS_NTXSEGS, MCLBYTES, 238 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag); 239 if (error != 0) 240 goto fail_rtag; 241 242 error = bus_dma_tag_create(sc->sc_pdmatag, CAS_TX_DESC_ALIGN, 0, 243 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 244 sizeof(struct cas_control_data), 1, 245 sizeof(struct cas_control_data), 0, 246 NULL, NULL, &sc->sc_cdmatag); 247 if (error != 0) 248 goto fail_ttag; 249 250 /* 251 * Allocate the control data structures, create and load the 252 * DMA map for it. 253 */ 254 if ((error = bus_dmamem_alloc(sc->sc_cdmatag, 255 (void **)&sc->sc_control_data, 256 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 257 &sc->sc_cddmamap)) != 0) { 258 device_printf(sc->sc_dev, 259 "unable to allocate control data, error = %d\n", error); 260 goto fail_ctag; 261 } 262 263 sc->sc_cddma = 0; 264 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, 265 sc->sc_control_data, sizeof(struct cas_control_data), 266 cas_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { 267 device_printf(sc->sc_dev, 268 "unable to load control data DMA map, error = %d\n", 269 error); 270 goto fail_cmem; 271 } 272 273 /* 274 * Initialize the transmit job descriptors. 275 */ 276 STAILQ_INIT(&sc->sc_txfreeq); 277 STAILQ_INIT(&sc->sc_txdirtyq); 278 279 /* 280 * Create the transmit buffer DMA maps. 281 */ 282 error = ENOMEM; 283 for (i = 0; i < CAS_TXQUEUELEN; i++) { 284 txs = &sc->sc_txsoft[i]; 285 txs->txs_mbuf = NULL; 286 txs->txs_ndescs = 0; 287 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0, 288 &txs->txs_dmamap)) != 0) { 289 device_printf(sc->sc_dev, 290 "unable to create TX DMA map %d, error = %d\n", 291 i, error); 292 goto fail_txd; 293 } 294 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 295 } 296 297 /* 298 * Allocate the receive buffers, create and load the DMA maps 299 * for them. 300 */ 301 for (i = 0; i < CAS_NRXDESC; i++) { 302 if ((error = bus_dmamem_alloc(sc->sc_rdmatag, 303 &sc->sc_rxdsoft[i].rxds_buf, BUS_DMA_WAITOK, 304 &sc->sc_rxdsoft[i].rxds_dmamap)) != 0) { 305 device_printf(sc->sc_dev, 306 "unable to allocate RX buffer %d, error = %d\n", 307 i, error); 308 goto fail_rxmem; 309 } 310 311 sc->sc_rxdptr = i; 312 sc->sc_rxdsoft[i].rxds_paddr = 0; 313 if ((error = bus_dmamap_load(sc->sc_rdmatag, 314 sc->sc_rxdsoft[i].rxds_dmamap, sc->sc_rxdsoft[i].rxds_buf, 315 CAS_PAGE_SIZE, cas_rxdma_callback, sc, 0)) != 0 || 316 sc->sc_rxdsoft[i].rxds_paddr == 0) { 317 device_printf(sc->sc_dev, 318 "unable to load RX DMA map %d, error = %d\n", 319 i, error); 320 goto fail_rxmap; 321 } 322 } 323 324 CAS_WRITE_4(sc, CAS_PCS_DATAPATH, CAS_PCS_DATAPATH_MII); 325 326 cas_mifinit(sc); 327 328 /* 329 * Look for an external PHY. 330 */ 331 error = ENXIO; 332 v = CAS_READ_4(sc, CAS_MIF_CONF); 333 if ((v & CAS_MIF_CONF_MDI1) != 0) { 334 v |= CAS_MIF_CONF_PHY_SELECT; 335 CAS_WRITE_4(sc, CAS_MIF_CONF, v); 336 switch (sc->sc_variant) { 337 default: 338 sc->sc_phyad = -1; 339 break; 340 } 341 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, 342 cas_mediachange, cas_mediastatus); 343 } 344 345 /* 346 * Fall back on an internal PHY if no external PHY was found. 347 */ 348 if (error != 0 && (v & CAS_MIF_CONF_MDI0) != 0) { 349 v &= ~CAS_MIF_CONF_PHY_SELECT; 350 CAS_WRITE_4(sc, CAS_MIF_CONF, v); 351 switch (sc->sc_variant) { 352 default: 353 sc->sc_phyad = -1; 354 break; 355 } 356 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, 357 cas_mediachange, cas_mediastatus); 358 } 359 360 /* 361 * Try the external PCS SERDES if we didn't find any PHYs. 362 */ 363 if (error != 0) { 364 CAS_WRITE_4(sc, CAS_PCS_DATAPATH, CAS_PCS_DATAPATH_SERDES); 365 CAS_WRITE_4(sc, CAS_PCS_SERDES_CTRL, CAS_PCS_SERDES_CTRL_ESD); 366 CAS_WRITE_4(sc, CAS_PCS_CONF_EN, CAS_PCS_CONF_EN); 367 sc->sc_flags |= CAS_SERDES; 368 sc->sc_phyad = CAS_PHYAD_EXTERNAL; 369 error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, 370 cas_mediachange, cas_mediastatus); 371 } 372 373 if (error != 0) { 374 device_printf(sc->sc_dev, "PHY probe failed: %d\n", error); 375 goto fail_rxmap; 376 } 377 sc->sc_mii = device_get_softc(sc->sc_miibus); 378 379 /* 380 * From this point forward, the attachment cannot fail. A failure 381 * before this point releases all resources that may have been 382 * allocated. 383 */ 384 385 /* Announce FIFO sizes. */ 386 v = CAS_READ_4(sc, CAS_TX_FIFO_SIZE); 387 device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n", 388 CAS_RX_FIFO_SIZE / 1024, v / 16); 389 390 /* Attach the interface. */ 391 ether_ifattach(ifp, sc->sc_enaddr); 392 393 /* 394 * Tell the upper layer(s) we support long frames/checksum offloads. 395 */ 396 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 397 ifp->if_capabilities = IFCAP_VLAN_MTU; 398 if ((sc->sc_flags & CAS_NO_CSUM) == 0) { 399 ifp->if_capabilities |= IFCAP_HWCSUM; 400 ifp->if_hwassist = CAS_CSUM_FEATURES; 401 } 402 ifp->if_capenable = ifp->if_capabilities; 403 404 return (0); 405 406 /* 407 * Free any resources we've allocated during the failed attach 408 * attempt. Do this in reverse order and fall through. 409 */ 410 fail_rxmap: 411 for (i = 0; i < CAS_NRXDESC; i++) 412 if (sc->sc_rxdsoft[i].rxds_paddr != 0) 413 bus_dmamap_unload(sc->sc_rdmatag, 414 sc->sc_rxdsoft[i].rxds_dmamap); 415 fail_rxmem: 416 for (i = 0; i < CAS_NRXDESC; i++) 417 if (sc->sc_rxdsoft[i].rxds_buf != NULL) 418 bus_dmamem_free(sc->sc_rdmatag, 419 sc->sc_rxdsoft[i].rxds_buf, 420 sc->sc_rxdsoft[i].rxds_dmamap); 421 fail_txd: 422 for (i = 0; i < CAS_TXQUEUELEN; i++) 423 if (sc->sc_txsoft[i].txs_dmamap != NULL) 424 bus_dmamap_destroy(sc->sc_tdmatag, 425 sc->sc_txsoft[i].txs_dmamap); 426 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 427 fail_cmem: 428 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 429 sc->sc_cddmamap); 430 fail_ctag: 431 bus_dma_tag_destroy(sc->sc_cdmatag); 432 fail_ttag: 433 bus_dma_tag_destroy(sc->sc_tdmatag); 434 fail_rtag: 435 bus_dma_tag_destroy(sc->sc_rdmatag); 436 fail_ptag: 437 bus_dma_tag_destroy(sc->sc_pdmatag); 438 fail_taskq: 439 taskqueue_free(sc->sc_tq); 440 fail_ifnet: 441 if_free(ifp); 442 return (error); 443 } 444 445 static void 446 cas_detach(struct cas_softc *sc) 447 { 448 struct ifnet *ifp = sc->sc_ifp; 449 int i; 450 451 ether_ifdetach(ifp); 452 CAS_LOCK(sc); 453 cas_stop(ifp); 454 CAS_UNLOCK(sc); 455 callout_drain(&sc->sc_tick_ch); 456 callout_drain(&sc->sc_rx_ch); 457 taskqueue_drain(sc->sc_tq, &sc->sc_intr_task); 458 taskqueue_drain(sc->sc_tq, &sc->sc_tx_task); 459 if_free(ifp); 460 taskqueue_free(sc->sc_tq); 461 device_delete_child(sc->sc_dev, sc->sc_miibus); 462 463 for (i = 0; i < CAS_NRXDESC; i++) 464 if (sc->sc_rxdsoft[i].rxds_dmamap != NULL) 465 bus_dmamap_sync(sc->sc_rdmatag, 466 sc->sc_rxdsoft[i].rxds_dmamap, 467 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 468 for (i = 0; i < CAS_NRXDESC; i++) 469 if (sc->sc_rxdsoft[i].rxds_paddr != 0) 470 bus_dmamap_unload(sc->sc_rdmatag, 471 sc->sc_rxdsoft[i].rxds_dmamap); 472 for (i = 0; i < CAS_NRXDESC; i++) 473 if (sc->sc_rxdsoft[i].rxds_buf != NULL) 474 bus_dmamem_free(sc->sc_rdmatag, 475 sc->sc_rxdsoft[i].rxds_buf, 476 sc->sc_rxdsoft[i].rxds_dmamap); 477 for (i = 0; i < CAS_TXQUEUELEN; i++) 478 if (sc->sc_txsoft[i].txs_dmamap != NULL) 479 bus_dmamap_destroy(sc->sc_tdmatag, 480 sc->sc_txsoft[i].txs_dmamap); 481 CAS_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 482 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 483 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 484 sc->sc_cddmamap); 485 bus_dma_tag_destroy(sc->sc_cdmatag); 486 bus_dma_tag_destroy(sc->sc_tdmatag); 487 bus_dma_tag_destroy(sc->sc_rdmatag); 488 bus_dma_tag_destroy(sc->sc_pdmatag); 489 } 490 491 static void 492 cas_suspend(struct cas_softc *sc) 493 { 494 struct ifnet *ifp = sc->sc_ifp; 495 496 CAS_LOCK(sc); 497 cas_stop(ifp); 498 CAS_UNLOCK(sc); 499 } 500 501 static void 502 cas_resume(struct cas_softc *sc) 503 { 504 struct ifnet *ifp = sc->sc_ifp; 505 506 CAS_LOCK(sc); 507 /* 508 * On resume all registers have to be initialized again like 509 * after power-on. 510 */ 511 sc->sc_flags &= ~CAS_INITED; 512 if (ifp->if_flags & IFF_UP) 513 cas_init_locked(sc); 514 CAS_UNLOCK(sc); 515 } 516 517 static inline void 518 cas_rxcksum(struct mbuf *m, uint16_t cksum) 519 { 520 struct ether_header *eh; 521 struct ip *ip; 522 struct udphdr *uh; 523 uint16_t *opts; 524 int32_t hlen, len, pktlen; 525 uint32_t temp32; 526 527 pktlen = m->m_pkthdr.len; 528 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) 529 return; 530 eh = mtod(m, struct ether_header *); 531 if (eh->ether_type != htons(ETHERTYPE_IP)) 532 return; 533 ip = (struct ip *)(eh + 1); 534 if (ip->ip_v != IPVERSION) 535 return; 536 537 hlen = ip->ip_hl << 2; 538 pktlen -= sizeof(struct ether_header); 539 if (hlen < sizeof(struct ip)) 540 return; 541 if (ntohs(ip->ip_len) < hlen) 542 return; 543 if (ntohs(ip->ip_len) != pktlen) 544 return; 545 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) 546 return; /* Cannot handle fragmented packet. */ 547 548 switch (ip->ip_p) { 549 case IPPROTO_TCP: 550 if (pktlen < (hlen + sizeof(struct tcphdr))) 551 return; 552 break; 553 case IPPROTO_UDP: 554 if (pktlen < (hlen + sizeof(struct udphdr))) 555 return; 556 uh = (struct udphdr *)((uint8_t *)ip + hlen); 557 if (uh->uh_sum == 0) 558 return; /* no checksum */ 559 break; 560 default: 561 return; 562 } 563 564 cksum = ~cksum; 565 /* checksum fixup for IP options */ 566 len = hlen - sizeof(struct ip); 567 if (len > 0) { 568 opts = (uint16_t *)(ip + 1); 569 for (; len > 0; len -= sizeof(uint16_t), opts++) { 570 temp32 = cksum - *opts; 571 temp32 = (temp32 >> 16) + (temp32 & 65535); 572 cksum = temp32 & 65535; 573 } 574 } 575 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; 576 m->m_pkthdr.csum_data = cksum; 577 } 578 579 static void 580 cas_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) 581 { 582 struct cas_softc *sc = xsc; 583 584 if (error != 0) 585 return; 586 if (nsegs != 1) 587 panic("%s: bad control buffer segment count", __func__); 588 sc->sc_cddma = segs[0].ds_addr; 589 } 590 591 static void 592 cas_rxdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) 593 { 594 struct cas_softc *sc = xsc; 595 596 if (error != 0) 597 return; 598 if (nsegs != 1) 599 panic("%s: bad RX buffer segment count", __func__); 600 sc->sc_rxdsoft[sc->sc_rxdptr].rxds_paddr = segs[0].ds_addr; 601 } 602 603 static void 604 cas_tick(void *arg) 605 { 606 struct cas_softc *sc = arg; 607 struct ifnet *ifp = sc->sc_ifp; 608 uint32_t v; 609 610 CAS_LOCK_ASSERT(sc, MA_OWNED); 611 612 /* 613 * Unload collision and error counters. 614 */ 615 ifp->if_collisions += 616 CAS_READ_4(sc, CAS_MAC_NORM_COLL_CNT) + 617 CAS_READ_4(sc, CAS_MAC_FIRST_COLL_CNT); 618 v = CAS_READ_4(sc, CAS_MAC_EXCESS_COLL_CNT) + 619 CAS_READ_4(sc, CAS_MAC_LATE_COLL_CNT); 620 ifp->if_collisions += v; 621 ifp->if_oerrors += v; 622 ifp->if_ierrors += 623 CAS_READ_4(sc, CAS_MAC_RX_LEN_ERR_CNT) + 624 CAS_READ_4(sc, CAS_MAC_RX_ALIGN_ERR) + 625 CAS_READ_4(sc, CAS_MAC_RX_CRC_ERR_CNT) + 626 CAS_READ_4(sc, CAS_MAC_RX_CODE_VIOL); 627 628 /* 629 * Then clear the hardware counters. 630 */ 631 CAS_WRITE_4(sc, CAS_MAC_NORM_COLL_CNT, 0); 632 CAS_WRITE_4(sc, CAS_MAC_FIRST_COLL_CNT, 0); 633 CAS_WRITE_4(sc, CAS_MAC_EXCESS_COLL_CNT, 0); 634 CAS_WRITE_4(sc, CAS_MAC_LATE_COLL_CNT, 0); 635 CAS_WRITE_4(sc, CAS_MAC_RX_LEN_ERR_CNT, 0); 636 CAS_WRITE_4(sc, CAS_MAC_RX_ALIGN_ERR, 0); 637 CAS_WRITE_4(sc, CAS_MAC_RX_CRC_ERR_CNT, 0); 638 CAS_WRITE_4(sc, CAS_MAC_RX_CODE_VIOL, 0); 639 640 mii_tick(sc->sc_mii); 641 642 if (sc->sc_txfree != CAS_MAXTXFREE) 643 cas_tint(sc); 644 645 cas_watchdog(sc); 646 647 callout_reset(&sc->sc_tick_ch, hz, cas_tick, sc); 648 } 649 650 static int 651 cas_bitwait(struct cas_softc *sc, bus_addr_t r, uint32_t clr, uint32_t set) 652 { 653 int i; 654 uint32_t reg; 655 656 for (i = CAS_TRIES; i--; DELAY(100)) { 657 reg = CAS_READ_4(sc, r); 658 if ((reg & clr) == 0 && (reg & set) == set) 659 return (1); 660 } 661 return (0); 662 } 663 664 static void 665 cas_reset(struct cas_softc *sc) 666 { 667 668 #ifdef CAS_DEBUG 669 CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__); 670 #endif 671 /* Disable all interrupts in order to avoid spurious ones. */ 672 CAS_WRITE_4(sc, CAS_INTMASK, 0xffffffff); 673 674 cas_reset_rx(sc); 675 cas_reset_tx(sc); 676 677 /* 678 * Do a full reset modulo the result of the last auto-negotiation 679 * when using the SERDES. 680 */ 681 CAS_WRITE_4(sc, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX | 682 ((sc->sc_flags & CAS_SERDES) != 0 ? CAS_RESET_PCS_DIS : 0)); 683 CAS_BARRIER(sc, CAS_RESET, 4, 684 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 685 DELAY(3000); 686 if (!cas_bitwait(sc, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX, 0)) 687 device_printf(sc->sc_dev, "cannot reset device\n"); 688 } 689 690 static void 691 cas_stop(struct ifnet *ifp) 692 { 693 struct cas_softc *sc = ifp->if_softc; 694 struct cas_txsoft *txs; 695 696 #ifdef CAS_DEBUG 697 CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__); 698 #endif 699 700 callout_stop(&sc->sc_tick_ch); 701 callout_stop(&sc->sc_rx_ch); 702 703 /* Disable all interrupts in order to avoid spurious ones. */ 704 CAS_WRITE_4(sc, CAS_INTMASK, 0xffffffff); 705 706 cas_reset_tx(sc); 707 cas_reset_rx(sc); 708 709 /* 710 * Release any queued transmit buffers. 711 */ 712 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 713 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 714 if (txs->txs_ndescs != 0) { 715 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 716 BUS_DMASYNC_POSTWRITE); 717 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 718 if (txs->txs_mbuf != NULL) { 719 m_freem(txs->txs_mbuf); 720 txs->txs_mbuf = NULL; 721 } 722 } 723 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 724 } 725 726 /* 727 * Mark the interface down and cancel the watchdog timer. 728 */ 729 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 730 sc->sc_flags &= ~CAS_LINK; 731 sc->sc_wdog_timer = 0; 732 } 733 734 static int 735 cas_reset_rx(struct cas_softc *sc) 736 { 737 738 /* 739 * Resetting while DMA is in progress can cause a bus hang, so we 740 * disable DMA first. 741 */ 742 cas_disable_rx(sc); 743 CAS_WRITE_4(sc, CAS_RX_CONF, 0); 744 CAS_BARRIER(sc, CAS_RX_CONF, 4, 745 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 746 if (!cas_bitwait(sc, CAS_RX_CONF, CAS_RX_CONF_RXDMA_EN, 0)) 747 device_printf(sc->sc_dev, "cannot disable RX DMA\n"); 748 749 /* Finally, reset the ERX. */ 750 CAS_WRITE_4(sc, CAS_RESET, CAS_RESET_RX | 751 ((sc->sc_flags & CAS_SERDES) != 0 ? CAS_RESET_PCS_DIS : 0)); 752 CAS_BARRIER(sc, CAS_RESET, 4, 753 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 754 if (!cas_bitwait(sc, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX, 0)) { 755 device_printf(sc->sc_dev, "cannot reset receiver\n"); 756 return (1); 757 } 758 return (0); 759 } 760 761 static int 762 cas_reset_tx(struct cas_softc *sc) 763 { 764 765 /* 766 * Resetting while DMA is in progress can cause a bus hang, so we 767 * disable DMA first. 768 */ 769 cas_disable_tx(sc); 770 CAS_WRITE_4(sc, CAS_TX_CONF, 0); 771 CAS_BARRIER(sc, CAS_TX_CONF, 4, 772 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 773 if (!cas_bitwait(sc, CAS_TX_CONF, CAS_TX_CONF_TXDMA_EN, 0)) 774 device_printf(sc->sc_dev, "cannot disable TX DMA\n"); 775 776 /* Finally, reset the ETX. */ 777 CAS_WRITE_4(sc, CAS_RESET, CAS_RESET_TX | 778 ((sc->sc_flags & CAS_SERDES) != 0 ? CAS_RESET_PCS_DIS : 0)); 779 CAS_BARRIER(sc, CAS_RESET, 4, 780 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 781 if (!cas_bitwait(sc, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX, 0)) { 782 device_printf(sc->sc_dev, "cannot reset transmitter\n"); 783 return (1); 784 } 785 return (0); 786 } 787 788 static int 789 cas_disable_rx(struct cas_softc *sc) 790 { 791 792 CAS_WRITE_4(sc, CAS_MAC_RX_CONF, 793 CAS_READ_4(sc, CAS_MAC_RX_CONF) & ~CAS_MAC_RX_CONF_EN); 794 CAS_BARRIER(sc, CAS_MAC_RX_CONF, 4, 795 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 796 return (cas_bitwait(sc, CAS_MAC_RX_CONF, CAS_MAC_RX_CONF_EN, 0)); 797 } 798 799 static int 800 cas_disable_tx(struct cas_softc *sc) 801 { 802 803 CAS_WRITE_4(sc, CAS_MAC_TX_CONF, 804 CAS_READ_4(sc, CAS_MAC_TX_CONF) & ~CAS_MAC_TX_CONF_EN); 805 CAS_BARRIER(sc, CAS_MAC_TX_CONF, 4, 806 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 807 return (cas_bitwait(sc, CAS_MAC_TX_CONF, CAS_MAC_TX_CONF_EN, 0)); 808 } 809 810 static inline void 811 cas_rxcompinit(struct cas_rx_comp *rxcomp) 812 { 813 814 rxcomp->crc_word1 = 0; 815 rxcomp->crc_word2 = 0; 816 rxcomp->crc_word3 = 817 htole64(CAS_SET(ETHER_HDR_LEN + sizeof(struct ip), CAS_RC3_CSO)); 818 rxcomp->crc_word4 = htole64(CAS_RC4_ZERO); 819 } 820 821 static void 822 cas_meminit(struct cas_softc *sc) 823 { 824 int i; 825 826 CAS_LOCK_ASSERT(sc, MA_OWNED); 827 828 /* 829 * Initialize the transmit descriptor ring. 830 */ 831 for (i = 0; i < CAS_NTXDESC; i++) { 832 sc->sc_txdescs[i].cd_flags = 0; 833 sc->sc_txdescs[i].cd_buf_ptr = 0; 834 } 835 sc->sc_txfree = CAS_MAXTXFREE; 836 sc->sc_txnext = 0; 837 sc->sc_txwin = 0; 838 839 /* 840 * Initialize the receive completion ring. 841 */ 842 for (i = 0; i < CAS_NRXCOMP; i++) 843 cas_rxcompinit(&sc->sc_rxcomps[i]); 844 sc->sc_rxcptr = 0; 845 846 /* 847 * Initialize the first receive descriptor ring. We leave 848 * the second one zeroed as we don't actually use it. 849 */ 850 for (i = 0; i < CAS_NRXDESC; i++) 851 CAS_INIT_RXDESC(sc, i, i); 852 sc->sc_rxdptr = 0; 853 854 CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 855 } 856 857 static u_int 858 cas_descsize(u_int sz) 859 { 860 861 switch (sz) { 862 case 32: 863 return (CAS_DESC_32); 864 case 64: 865 return (CAS_DESC_64); 866 case 128: 867 return (CAS_DESC_128); 868 case 256: 869 return (CAS_DESC_256); 870 case 512: 871 return (CAS_DESC_512); 872 case 1024: 873 return (CAS_DESC_1K); 874 case 2048: 875 return (CAS_DESC_2K); 876 case 4096: 877 return (CAS_DESC_4K); 878 case 8192: 879 return (CAS_DESC_8K); 880 default: 881 printf("%s: invalid descriptor ring size %d\n", __func__, sz); 882 return (CAS_DESC_32); 883 } 884 } 885 886 static u_int 887 cas_rxcompsize(u_int sz) 888 { 889 890 switch (sz) { 891 case 128: 892 return (CAS_RX_CONF_COMP_128); 893 case 256: 894 return (CAS_RX_CONF_COMP_256); 895 case 512: 896 return (CAS_RX_CONF_COMP_512); 897 case 1024: 898 return (CAS_RX_CONF_COMP_1K); 899 case 2048: 900 return (CAS_RX_CONF_COMP_2K); 901 case 4096: 902 return (CAS_RX_CONF_COMP_4K); 903 case 8192: 904 return (CAS_RX_CONF_COMP_8K); 905 case 16384: 906 return (CAS_RX_CONF_COMP_16K); 907 case 32768: 908 return (CAS_RX_CONF_COMP_32K); 909 default: 910 printf("%s: invalid dcompletion ring size %d\n", __func__, sz); 911 return (CAS_RX_CONF_COMP_128); 912 } 913 } 914 915 static void 916 cas_init(void *xsc) 917 { 918 struct cas_softc *sc = xsc; 919 920 CAS_LOCK(sc); 921 cas_init_locked(sc); 922 CAS_UNLOCK(sc); 923 } 924 925 /* 926 * Initialization of interface; set up initialization block 927 * and transmit/receive descriptor rings. 928 */ 929 static void 930 cas_init_locked(struct cas_softc *sc) 931 { 932 struct ifnet *ifp = sc->sc_ifp; 933 uint32_t v; 934 935 CAS_LOCK_ASSERT(sc, MA_OWNED); 936 937 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 938 return; 939 940 #ifdef CAS_DEBUG 941 CTR2(KTR_CAS, "%s: %s: calling stop", device_get_name(sc->sc_dev), 942 __func__); 943 #endif 944 /* 945 * Initialization sequence. The numbered steps below correspond 946 * to the sequence outlined in section 6.3.5.1 in the Ethernet 947 * Channel Engine manual (part of the PCIO manual). 948 * See also the STP2002-STQ document from Sun Microsystems. 949 */ 950 951 /* step 1 & 2. Reset the Ethernet Channel. */ 952 cas_stop(ifp); 953 cas_reset(sc); 954 #ifdef CAS_DEBUG 955 CTR2(KTR_CAS, "%s: %s: restarting", device_get_name(sc->sc_dev), 956 __func__); 957 #endif 958 959 /* Re-initialize the MIF. */ 960 cas_mifinit(sc); 961 962 /* step 3. Setup data structures in host memory. */ 963 cas_meminit(sc); 964 965 /* step 4. TX MAC registers & counters */ 966 cas_init_regs(sc); 967 968 /* step 5. RX MAC registers & counters */ 969 cas_setladrf(sc); 970 971 /* step 6 & 7. Program Ring Base Addresses. */ 972 CAS_WRITE_4(sc, CAS_TX_DESC3_BASE_HI, 973 (((uint64_t)CAS_CDTXDADDR(sc, 0)) >> 32)); 974 CAS_WRITE_4(sc, CAS_TX_DESC3_BASE_LO, 975 CAS_CDTXDADDR(sc, 0) & 0xffffffff); 976 977 CAS_WRITE_4(sc, CAS_RX_COMP_BASE_HI, 978 (((uint64_t)CAS_CDRXCADDR(sc, 0)) >> 32)); 979 CAS_WRITE_4(sc, CAS_RX_COMP_BASE_LO, 980 CAS_CDRXCADDR(sc, 0) & 0xffffffff); 981 982 CAS_WRITE_4(sc, CAS_RX_DESC_BASE_HI, 983 (((uint64_t)CAS_CDRXDADDR(sc, 0)) >> 32)); 984 CAS_WRITE_4(sc, CAS_RX_DESC_BASE_LO, 985 CAS_CDRXDADDR(sc, 0) & 0xffffffff); 986 987 if ((sc->sc_flags & CAS_REG_PLUS) != 0) { 988 CAS_WRITE_4(sc, CAS_RX_DESC2_BASE_HI, 989 (((uint64_t)CAS_CDRXD2ADDR(sc, 0)) >> 32)); 990 CAS_WRITE_4(sc, CAS_RX_DESC2_BASE_LO, 991 CAS_CDRXD2ADDR(sc, 0) & 0xffffffff); 992 } 993 994 #ifdef CAS_DEBUG 995 CTR5(KTR_CAS, 996 "loading TXDR %lx, RXCR %lx, RXDR %lx, RXD2R %lx, cddma %lx", 997 CAS_CDTXDADDR(sc, 0), CAS_CDRXCADDR(sc, 0), CAS_CDRXDADDR(sc, 0), 998 CAS_CDRXD2ADDR(sc, 0), sc->sc_cddma); 999 #endif 1000 1001 /* step 8. Global Configuration & Interrupt Masks */ 1002 1003 /* Disable weighted round robin. */ 1004 CAS_WRITE_4(sc, CAS_CAW, CAS_CAW_RR_DIS); 1005 1006 /* 1007 * Enable infinite bursts for revisions without PCI issues if 1008 * applicable. Doing so greatly improves the TX performance on 1009 * !__sparc64__. 1010 */ 1011 CAS_WRITE_4(sc, CAS_INF_BURST, 1012 #if !defined(__sparc64__) 1013 (sc->sc_flags & CAS_TABORT) == 0 ? CAS_INF_BURST_EN : 1014 #endif 1015 0); 1016 1017 /* Set up interrupts. */ 1018 CAS_WRITE_4(sc, CAS_INTMASK, 1019 ~(CAS_INTR_TX_INT_ME | CAS_INTR_TX_TAG_ERR | 1020 CAS_INTR_RX_DONE | CAS_INTR_RX_BUF_NA | CAS_INTR_RX_TAG_ERR | 1021 CAS_INTR_RX_COMP_FULL | CAS_INTR_RX_BUF_AEMPTY | 1022 CAS_INTR_RX_COMP_AFULL | CAS_INTR_RX_LEN_MMATCH | 1023 CAS_INTR_PCI_ERROR_INT 1024 #ifdef CAS_DEBUG 1025 | CAS_INTR_PCS_INT | CAS_INTR_MIF 1026 #endif 1027 )); 1028 /* Don't clear top level interrupts when CAS_STATUS_ALIAS is read. */ 1029 CAS_WRITE_4(sc, CAS_CLEAR_ALIAS, 0); 1030 CAS_WRITE_4(sc, CAS_MAC_RX_MASK, ~CAS_MAC_RX_OVERFLOW); 1031 CAS_WRITE_4(sc, CAS_MAC_TX_MASK, 1032 ~(CAS_MAC_TX_UNDERRUN | CAS_MAC_TX_MAX_PKT_ERR)); 1033 #ifdef CAS_DEBUG 1034 CAS_WRITE_4(sc, CAS_MAC_CTRL_MASK, 1035 ~(CAS_MAC_CTRL_PAUSE_RCVD | CAS_MAC_CTRL_PAUSE | 1036 CAS_MAC_CTRL_NON_PAUSE)); 1037 #else 1038 CAS_WRITE_4(sc, CAS_MAC_CTRL_MASK, 1039 CAS_MAC_CTRL_PAUSE_RCVD | CAS_MAC_CTRL_PAUSE | 1040 CAS_MAC_CTRL_NON_PAUSE); 1041 #endif 1042 1043 /* Enable PCI error interrupts. */ 1044 CAS_WRITE_4(sc, CAS_ERROR_MASK, 1045 ~(CAS_ERROR_DTRTO | CAS_ERROR_OTHER | CAS_ERROR_DMAW_ZERO | 1046 CAS_ERROR_DMAR_ZERO | CAS_ERROR_RTRTO)); 1047 1048 /* Enable PCI error interrupts in BIM configuration. */ 1049 CAS_WRITE_4(sc, CAS_BIM_CONF, 1050 CAS_BIM_CONF_DPAR_EN | CAS_BIM_CONF_RMA_EN | CAS_BIM_CONF_RTA_EN); 1051 1052 /* 1053 * step 9. ETX Configuration: encode receive descriptor ring size, 1054 * enable DMA and disable pre-interrupt writeback completion. 1055 */ 1056 v = cas_descsize(CAS_NTXDESC) << CAS_TX_CONF_DESC3_SHFT; 1057 CAS_WRITE_4(sc, CAS_TX_CONF, v | CAS_TX_CONF_TXDMA_EN | 1058 CAS_TX_CONF_RDPP_DIS | CAS_TX_CONF_PICWB_DIS); 1059 1060 /* step 10. ERX Configuration */ 1061 1062 /* 1063 * Encode receive completion and descriptor ring sizes, set the 1064 * swivel offset. 1065 */ 1066 v = cas_rxcompsize(CAS_NRXCOMP) << CAS_RX_CONF_COMP_SHFT; 1067 v |= cas_descsize(CAS_NRXDESC) << CAS_RX_CONF_DESC_SHFT; 1068 if ((sc->sc_flags & CAS_REG_PLUS) != 0) 1069 v |= cas_descsize(CAS_NRXDESC2) << CAS_RX_CONF_DESC2_SHFT; 1070 CAS_WRITE_4(sc, CAS_RX_CONF, 1071 v | (ETHER_ALIGN << CAS_RX_CONF_SOFF_SHFT)); 1072 1073 /* Set the PAUSE thresholds. We use the maximum OFF threshold. */ 1074 CAS_WRITE_4(sc, CAS_RX_PTHRS, 1075 ((111 * 64) << CAS_RX_PTHRS_XOFF_SHFT) | 1076 ((15 * 64) << CAS_RX_PTHRS_XON_SHFT)); 1077 1078 /* RX blanking */ 1079 CAS_WRITE_4(sc, CAS_RX_BLANK, 1080 (15 << CAS_RX_BLANK_TIME_SHFT) | (5 << CAS_RX_BLANK_PKTS_SHFT)); 1081 1082 /* Set RX_COMP_AFULL threshold to half of the RX completions. */ 1083 CAS_WRITE_4(sc, CAS_RX_AEMPTY_THRS, 1084 (CAS_NRXCOMP / 2) << CAS_RX_AEMPTY_COMP_SHFT); 1085 1086 /* Initialize the RX page size register as appropriate for 8k. */ 1087 CAS_WRITE_4(sc, CAS_RX_PSZ, 1088 (CAS_RX_PSZ_8K << CAS_RX_PSZ_SHFT) | 1089 (4 << CAS_RX_PSZ_MB_CNT_SHFT) | 1090 (CAS_RX_PSZ_MB_STRD_2K << CAS_RX_PSZ_MB_STRD_SHFT) | 1091 (CAS_RX_PSZ_MB_OFF_64 << CAS_RX_PSZ_MB_OFF_SHFT)); 1092 1093 /* Disable RX random early detection. */ 1094 CAS_WRITE_4(sc, CAS_RX_RED, 0); 1095 1096 /* Zero the RX reassembly DMA table. */ 1097 for (v = 0; v <= CAS_RX_REAS_DMA_ADDR_LC; v++) { 1098 CAS_WRITE_4(sc, CAS_RX_REAS_DMA_ADDR, v); 1099 CAS_WRITE_4(sc, CAS_RX_REAS_DMA_DATA_LO, 0); 1100 CAS_WRITE_4(sc, CAS_RX_REAS_DMA_DATA_MD, 0); 1101 CAS_WRITE_4(sc, CAS_RX_REAS_DMA_DATA_HI, 0); 1102 } 1103 1104 /* Ensure the RX control FIFO and RX IPP FIFO addresses are zero. */ 1105 CAS_WRITE_4(sc, CAS_RX_CTRL_FIFO, 0); 1106 CAS_WRITE_4(sc, CAS_RX_IPP_ADDR, 0); 1107 1108 /* Finally, enable RX DMA. */ 1109 CAS_WRITE_4(sc, CAS_RX_CONF, 1110 CAS_READ_4(sc, CAS_RX_CONF) | CAS_RX_CONF_RXDMA_EN); 1111 1112 /* step 11. Configure Media. */ 1113 1114 /* step 12. RX_MAC Configuration Register */ 1115 v = CAS_READ_4(sc, CAS_MAC_RX_CONF) & ~CAS_MAC_RX_CONF_STRPPAD; 1116 v |= CAS_MAC_RX_CONF_EN | CAS_MAC_RX_CONF_STRPFCS; 1117 CAS_WRITE_4(sc, CAS_MAC_RX_CONF, 0); 1118 CAS_BARRIER(sc, CAS_MAC_RX_CONF, 4, 1119 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1120 if (!cas_bitwait(sc, CAS_MAC_RX_CONF, CAS_MAC_RX_CONF_EN, 0)) 1121 device_printf(sc->sc_dev, "cannot configure RX MAC\n"); 1122 CAS_WRITE_4(sc, CAS_MAC_RX_CONF, v); 1123 1124 /* step 13. TX_MAC Configuration Register */ 1125 v = CAS_READ_4(sc, CAS_MAC_TX_CONF); 1126 v |= CAS_MAC_TX_CONF_EN; 1127 CAS_WRITE_4(sc, CAS_MAC_TX_CONF, 0); 1128 CAS_BARRIER(sc, CAS_MAC_TX_CONF, 4, 1129 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1130 if (!cas_bitwait(sc, CAS_MAC_TX_CONF, CAS_MAC_TX_CONF_EN, 0)) 1131 device_printf(sc->sc_dev, "cannot configure TX MAC\n"); 1132 CAS_WRITE_4(sc, CAS_MAC_TX_CONF, v); 1133 1134 /* step 14. Issue Transmit Pending command. */ 1135 1136 /* step 15. Give the reciever a swift kick. */ 1137 CAS_WRITE_4(sc, CAS_RX_KICK, CAS_NRXDESC - 4); 1138 CAS_WRITE_4(sc, CAS_RX_COMP_TAIL, 0); 1139 if ((sc->sc_flags & CAS_REG_PLUS) != 0) 1140 CAS_WRITE_4(sc, CAS_RX_KICK2, CAS_NRXDESC2 - 4); 1141 1142 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1143 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1144 1145 mii_mediachg(sc->sc_mii); 1146 1147 /* Start the one second timer. */ 1148 sc->sc_wdog_timer = 0; 1149 callout_reset(&sc->sc_tick_ch, hz, cas_tick, sc); 1150 } 1151 1152 static int 1153 cas_load_txmbuf(struct cas_softc *sc, struct mbuf **m_head) 1154 { 1155 bus_dma_segment_t txsegs[CAS_NTXSEGS]; 1156 struct cas_txsoft *txs; 1157 struct ip *ip; 1158 struct mbuf *m; 1159 uint64_t cflags; 1160 int error, nexttx, nsegs, offset, seg; 1161 1162 CAS_LOCK_ASSERT(sc, MA_OWNED); 1163 1164 /* Get a work queue entry. */ 1165 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { 1166 /* Ran out of descriptors. */ 1167 return (ENOBUFS); 1168 } 1169 1170 cflags = 0; 1171 if (((*m_head)->m_pkthdr.csum_flags & CAS_CSUM_FEATURES) != 0) { 1172 if (M_WRITABLE(*m_head) == 0) { 1173 m = m_dup(*m_head, M_DONTWAIT); 1174 m_freem(*m_head); 1175 *m_head = m; 1176 if (m == NULL) 1177 return (ENOBUFS); 1178 } 1179 offset = sizeof(struct ether_header); 1180 m = m_pullup(*m_head, offset + sizeof(struct ip)); 1181 if (m == NULL) { 1182 *m_head = NULL; 1183 return (ENOBUFS); 1184 } 1185 ip = (struct ip *)(mtod(m, caddr_t) + offset); 1186 offset += (ip->ip_hl << 2); 1187 cflags = (offset << CAS_TD_CKSUM_START_SHFT) | 1188 ((offset + m->m_pkthdr.csum_data) << 1189 CAS_TD_CKSUM_STUFF_SHFT) | CAS_TD_CKSUM_EN; 1190 *m_head = m; 1191 } 1192 1193 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap, 1194 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1195 if (error == EFBIG) { 1196 m = m_collapse(*m_head, M_DONTWAIT, CAS_NTXSEGS); 1197 if (m == NULL) { 1198 m_freem(*m_head); 1199 *m_head = NULL; 1200 return (ENOBUFS); 1201 } 1202 *m_head = m; 1203 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, 1204 txs->txs_dmamap, *m_head, txsegs, &nsegs, 1205 BUS_DMA_NOWAIT); 1206 if (error != 0) { 1207 m_freem(*m_head); 1208 *m_head = NULL; 1209 return (error); 1210 } 1211 } else if (error != 0) 1212 return (error); 1213 /* If nsegs is wrong then the stack is corrupt. */ 1214 KASSERT(nsegs <= CAS_NTXSEGS, 1215 ("%s: too many DMA segments (%d)", __func__, nsegs)); 1216 if (nsegs == 0) { 1217 m_freem(*m_head); 1218 *m_head = NULL; 1219 return (EIO); 1220 } 1221 1222 /* 1223 * Ensure we have enough descriptors free to describe 1224 * the packet. Note, we always reserve one descriptor 1225 * at the end of the ring as a termination point, in 1226 * order to prevent wrap-around. 1227 */ 1228 if (nsegs > sc->sc_txfree - 1) { 1229 txs->txs_ndescs = 0; 1230 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1231 return (ENOBUFS); 1232 } 1233 1234 txs->txs_ndescs = nsegs; 1235 txs->txs_firstdesc = sc->sc_txnext; 1236 nexttx = txs->txs_firstdesc; 1237 for (seg = 0; seg < nsegs; seg++, nexttx = CAS_NEXTTX(nexttx)) { 1238 #ifdef CAS_DEBUG 1239 CTR6(KTR_CAS, 1240 "%s: mapping seg %d (txd %d), len %lx, addr %#lx (%#lx)", 1241 __func__, seg, nexttx, txsegs[seg].ds_len, 1242 txsegs[seg].ds_addr, htole64(txsegs[seg].ds_addr)); 1243 #endif 1244 sc->sc_txdescs[nexttx].cd_buf_ptr = 1245 htole64(txsegs[seg].ds_addr); 1246 KASSERT(txsegs[seg].ds_len < 1247 CAS_TD_BUF_LEN_MASK >> CAS_TD_BUF_LEN_SHFT, 1248 ("%s: segment size too large!", __func__)); 1249 sc->sc_txdescs[nexttx].cd_flags = 1250 htole64(txsegs[seg].ds_len << CAS_TD_BUF_LEN_SHFT); 1251 txs->txs_lastdesc = nexttx; 1252 } 1253 1254 /* Set EOF on the last descriptor. */ 1255 #ifdef CAS_DEBUG 1256 CTR3(KTR_CAS, "%s: end of frame at segment %d, TX %d", 1257 __func__, seg, nexttx); 1258 #endif 1259 sc->sc_txdescs[txs->txs_lastdesc].cd_flags |= 1260 htole64(CAS_TD_END_OF_FRAME); 1261 1262 /* Lastly set SOF on the first descriptor. */ 1263 #ifdef CAS_DEBUG 1264 CTR3(KTR_CAS, "%s: start of frame at segment %d, TX %d", 1265 __func__, seg, nexttx); 1266 #endif 1267 if (sc->sc_txwin += nsegs > CAS_MAXTXFREE * 2 / 3) { 1268 sc->sc_txwin = 0; 1269 sc->sc_txdescs[txs->txs_firstdesc].cd_flags |= 1270 htole64(cflags | CAS_TD_START_OF_FRAME | CAS_TD_INT_ME); 1271 } else 1272 sc->sc_txdescs[txs->txs_firstdesc].cd_flags |= 1273 htole64(cflags | CAS_TD_START_OF_FRAME); 1274 1275 /* Sync the DMA map. */ 1276 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1277 BUS_DMASYNC_PREWRITE); 1278 1279 #ifdef CAS_DEBUG 1280 CTR4(KTR_CAS, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d", 1281 __func__, txs->txs_firstdesc, txs->txs_lastdesc, 1282 txs->txs_ndescs); 1283 #endif 1284 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 1285 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 1286 txs->txs_mbuf = *m_head; 1287 1288 sc->sc_txnext = CAS_NEXTTX(txs->txs_lastdesc); 1289 sc->sc_txfree -= txs->txs_ndescs; 1290 1291 return (0); 1292 } 1293 1294 static void 1295 cas_init_regs(struct cas_softc *sc) 1296 { 1297 int i; 1298 const u_char *laddr = IF_LLADDR(sc->sc_ifp); 1299 1300 CAS_LOCK_ASSERT(sc, MA_OWNED); 1301 1302 /* These registers are not cleared on reset. */ 1303 if ((sc->sc_flags & CAS_INITED) == 0) { 1304 /* magic values */ 1305 CAS_WRITE_4(sc, CAS_MAC_IPG0, 0); 1306 CAS_WRITE_4(sc, CAS_MAC_IPG1, 8); 1307 CAS_WRITE_4(sc, CAS_MAC_IPG2, 4); 1308 1309 /* min frame length */ 1310 CAS_WRITE_4(sc, CAS_MAC_MIN_FRAME, ETHER_MIN_LEN); 1311 /* max frame length and max burst size */ 1312 CAS_WRITE_4(sc, CAS_MAC_MAX_BF, 1313 ((ETHER_MAX_LEN_JUMBO + ETHER_VLAN_ENCAP_LEN) << 1314 CAS_MAC_MAX_BF_FRM_SHFT) | 1315 (0x2000 << CAS_MAC_MAX_BF_BST_SHFT)); 1316 1317 /* more magic values */ 1318 CAS_WRITE_4(sc, CAS_MAC_PREAMBLE_LEN, 0x7); 1319 CAS_WRITE_4(sc, CAS_MAC_JAM_SIZE, 0x4); 1320 CAS_WRITE_4(sc, CAS_MAC_ATTEMPT_LIMIT, 0x10); 1321 CAS_WRITE_4(sc, CAS_MAC_CTRL_TYPE, 0x8088); 1322 1323 /* random number seed */ 1324 CAS_WRITE_4(sc, CAS_MAC_RANDOM_SEED, 1325 ((laddr[5] << 8) | laddr[4]) & 0x3ff); 1326 1327 /* secondary MAC addresses: 0:0:0:0:0:0 */ 1328 for (i = CAS_MAC_ADDR3; i <= CAS_MAC_ADDR41; 1329 i += CAS_MAC_ADDR4 - CAS_MAC_ADDR3) 1330 CAS_WRITE_4(sc, i, 0); 1331 1332 /* MAC control address: 01:80:c2:00:00:01 */ 1333 CAS_WRITE_4(sc, CAS_MAC_ADDR42, 0x0001); 1334 CAS_WRITE_4(sc, CAS_MAC_ADDR43, 0xc200); 1335 CAS_WRITE_4(sc, CAS_MAC_ADDR44, 0x0180); 1336 1337 /* MAC filter address: 0:0:0:0:0:0 */ 1338 CAS_WRITE_4(sc, CAS_MAC_AFILTER0, 0); 1339 CAS_WRITE_4(sc, CAS_MAC_AFILTER1, 0); 1340 CAS_WRITE_4(sc, CAS_MAC_AFILTER2, 0); 1341 CAS_WRITE_4(sc, CAS_MAC_AFILTER_MASK1_2, 0); 1342 CAS_WRITE_4(sc, CAS_MAC_AFILTER_MASK0, 0); 1343 1344 /* Zero the hash table. */ 1345 for (i = CAS_MAC_HASH0; i <= CAS_MAC_HASH15; 1346 i += CAS_MAC_HASH1 - CAS_MAC_HASH0) 1347 CAS_WRITE_4(sc, i, 0); 1348 1349 sc->sc_flags |= CAS_INITED; 1350 } 1351 1352 /* Counters need to be zeroed. */ 1353 CAS_WRITE_4(sc, CAS_MAC_NORM_COLL_CNT, 0); 1354 CAS_WRITE_4(sc, CAS_MAC_FIRST_COLL_CNT, 0); 1355 CAS_WRITE_4(sc, CAS_MAC_EXCESS_COLL_CNT, 0); 1356 CAS_WRITE_4(sc, CAS_MAC_LATE_COLL_CNT, 0); 1357 CAS_WRITE_4(sc, CAS_MAC_DEFER_TMR_CNT, 0); 1358 CAS_WRITE_4(sc, CAS_MAC_PEAK_ATTEMPTS, 0); 1359 CAS_WRITE_4(sc, CAS_MAC_RX_FRAME_COUNT, 0); 1360 CAS_WRITE_4(sc, CAS_MAC_RX_LEN_ERR_CNT, 0); 1361 CAS_WRITE_4(sc, CAS_MAC_RX_ALIGN_ERR, 0); 1362 CAS_WRITE_4(sc, CAS_MAC_RX_CRC_ERR_CNT, 0); 1363 CAS_WRITE_4(sc, CAS_MAC_RX_CODE_VIOL, 0); 1364 1365 /* Set XOFF PAUSE time. */ 1366 CAS_WRITE_4(sc, CAS_MAC_SPC, 0x1BF0 << CAS_MAC_SPC_TIME_SHFT); 1367 1368 /* Set the station address. */ 1369 CAS_WRITE_4(sc, CAS_MAC_ADDR0, (laddr[4] << 8) | laddr[5]); 1370 CAS_WRITE_4(sc, CAS_MAC_ADDR1, (laddr[2] << 8) | laddr[3]); 1371 CAS_WRITE_4(sc, CAS_MAC_ADDR2, (laddr[0] << 8) | laddr[1]); 1372 1373 /* Enable MII outputs. */ 1374 CAS_WRITE_4(sc, CAS_MAC_XIF_CONF, CAS_MAC_XIF_CONF_TX_OE); 1375 } 1376 1377 static void 1378 cas_tx_task(void *arg, int pending __unused) 1379 { 1380 struct ifnet *ifp; 1381 1382 ifp = (struct ifnet *)arg; 1383 cas_start(ifp); 1384 } 1385 1386 static inline void 1387 cas_txkick(struct cas_softc *sc) 1388 { 1389 1390 /* 1391 * Update the TX kick register. This register has to point to the 1392 * descriptor after the last valid one and for optimum performance 1393 * should be incremented in multiples of 4 (the DMA engine fetches/ 1394 * updates descriptors in batches of 4). 1395 */ 1396 #ifdef CAS_DEBUG 1397 CTR3(KTR_CAS, "%s: %s: kicking TX %d", 1398 device_get_name(sc->sc_dev), __func__, sc->sc_txnext); 1399 #endif 1400 CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1401 CAS_WRITE_4(sc, CAS_TX_KICK3, sc->sc_txnext); 1402 } 1403 1404 static void 1405 cas_start(struct ifnet *ifp) 1406 { 1407 struct cas_softc *sc = ifp->if_softc; 1408 struct mbuf *m; 1409 int kicked, ntx; 1410 1411 CAS_LOCK(sc); 1412 1413 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1414 IFF_DRV_RUNNING || (sc->sc_flags & CAS_LINK) == 0) { 1415 CAS_UNLOCK(sc); 1416 return; 1417 } 1418 1419 if (sc->sc_txfree < CAS_MAXTXFREE / 4) 1420 cas_tint(sc); 1421 1422 #ifdef CAS_DEBUG 1423 CTR4(KTR_CAS, "%s: %s: txfree %d, txnext %d", 1424 device_get_name(sc->sc_dev), __func__, sc->sc_txfree, 1425 sc->sc_txnext); 1426 #endif 1427 ntx = 0; 1428 kicked = 0; 1429 for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->sc_txfree > 1;) { 1430 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 1431 if (m == NULL) 1432 break; 1433 if (cas_load_txmbuf(sc, &m) != 0) { 1434 if (m == NULL) 1435 break; 1436 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1437 IFQ_DRV_PREPEND(&ifp->if_snd, m); 1438 break; 1439 } 1440 if ((sc->sc_txnext % 4) == 0) { 1441 cas_txkick(sc); 1442 kicked = 1; 1443 } else 1444 kicked = 0; 1445 ntx++; 1446 BPF_MTAP(ifp, m); 1447 } 1448 1449 if (ntx > 0) { 1450 if (kicked == 0) 1451 cas_txkick(sc); 1452 #ifdef CAS_DEBUG 1453 CTR2(KTR_CAS, "%s: packets enqueued, OWN on %d", 1454 device_get_name(sc->sc_dev), sc->sc_txnext); 1455 #endif 1456 1457 /* Set a watchdog timer in case the chip flakes out. */ 1458 sc->sc_wdog_timer = 5; 1459 #ifdef CAS_DEBUG 1460 CTR3(KTR_CAS, "%s: %s: watchdog %d", 1461 device_get_name(sc->sc_dev), __func__, 1462 sc->sc_wdog_timer); 1463 #endif 1464 } 1465 1466 CAS_UNLOCK(sc); 1467 } 1468 1469 static void 1470 cas_tint(struct cas_softc *sc) 1471 { 1472 struct ifnet *ifp = sc->sc_ifp; 1473 struct cas_txsoft *txs; 1474 int progress; 1475 uint32_t txlast; 1476 #ifdef CAS_DEBUG 1477 int i; 1478 1479 CAS_LOCK_ASSERT(sc, MA_OWNED); 1480 1481 CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__); 1482 #endif 1483 1484 /* 1485 * Go through our TX list and free mbufs for those 1486 * frames that have been transmitted. 1487 */ 1488 progress = 0; 1489 CAS_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1490 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1491 #ifdef CAS_DEBUG 1492 if ((ifp->if_flags & IFF_DEBUG) != 0) { 1493 printf(" txsoft %p transmit chain:\n", txs); 1494 for (i = txs->txs_firstdesc;; i = CAS_NEXTTX(i)) { 1495 printf("descriptor %d: ", i); 1496 printf("cd_flags: 0x%016llx\t", 1497 (long long)le64toh( 1498 sc->sc_txdescs[i].cd_flags)); 1499 printf("cd_buf_ptr: 0x%016llx\n", 1500 (long long)le64toh( 1501 sc->sc_txdescs[i].cd_buf_ptr)); 1502 if (i == txs->txs_lastdesc) 1503 break; 1504 } 1505 } 1506 #endif 1507 1508 /* 1509 * In theory, we could harvest some descriptors before 1510 * the ring is empty, but that's a bit complicated. 1511 * 1512 * CAS_TX_COMPn points to the last descriptor 1513 * processed + 1. 1514 */ 1515 txlast = CAS_READ_4(sc, CAS_TX_COMP3); 1516 #ifdef CAS_DEBUG 1517 CTR4(KTR_CAS, "%s: txs->txs_firstdesc = %d, " 1518 "txs->txs_lastdesc = %d, txlast = %d", 1519 __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast); 1520 #endif 1521 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1522 if ((txlast >= txs->txs_firstdesc) && 1523 (txlast <= txs->txs_lastdesc)) 1524 break; 1525 } else { 1526 /* Ick -- this command wraps. */ 1527 if ((txlast >= txs->txs_firstdesc) || 1528 (txlast <= txs->txs_lastdesc)) 1529 break; 1530 } 1531 1532 #ifdef CAS_DEBUG 1533 CTR1(KTR_CAS, "%s: releasing a descriptor", __func__); 1534 #endif 1535 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1536 1537 sc->sc_txfree += txs->txs_ndescs; 1538 1539 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1540 BUS_DMASYNC_POSTWRITE); 1541 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1542 if (txs->txs_mbuf != NULL) { 1543 m_freem(txs->txs_mbuf); 1544 txs->txs_mbuf = NULL; 1545 } 1546 1547 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1548 1549 ifp->if_opackets++; 1550 progress = 1; 1551 } 1552 1553 #ifdef CAS_DEBUG 1554 CTR4(KTR_CAS, "%s: CAS_TX_STATE_MACHINE %x CAS_TX_DESC_BASE %llx " 1555 "CAS_TX_COMP3 %x", 1556 __func__, CAS_READ_4(sc, CAS_TX_STATE_MACHINE), 1557 ((long long)CAS_READ_4(sc, CAS_TX_DESC_BASE_HI3) << 32) | 1558 CAS_READ_4(sc, CAS_TX_DESC_BASE_LO3), 1559 CAS_READ_4(sc, CAS_TX_COMP3)); 1560 #endif 1561 1562 if (progress) { 1563 /* We freed some descriptors, so reset IFF_DRV_OACTIVE. */ 1564 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1565 if (STAILQ_EMPTY(&sc->sc_txdirtyq)) 1566 sc->sc_wdog_timer = 0; 1567 } 1568 1569 #ifdef CAS_DEBUG 1570 CTR3(KTR_CAS, "%s: %s: watchdog %d", 1571 device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer); 1572 #endif 1573 } 1574 1575 static void 1576 cas_rint_timeout(void *arg) 1577 { 1578 struct cas_softc *sc = arg; 1579 1580 CAS_LOCK_ASSERT(sc, MA_NOTOWNED); 1581 1582 cas_rint(sc); 1583 } 1584 1585 static void 1586 cas_rint(struct cas_softc *sc) 1587 { 1588 struct cas_rxdsoft *rxds, *rxds2; 1589 struct ifnet *ifp = sc->sc_ifp; 1590 struct mbuf *m, *m2; 1591 uint64_t word1, word2, word3, word4; 1592 uint32_t rxhead; 1593 u_int idx, idx2, len, off, skip; 1594 1595 CAS_LOCK_ASSERT(sc, MA_NOTOWNED); 1596 1597 callout_stop(&sc->sc_rx_ch); 1598 1599 #ifdef CAS_DEBUG 1600 CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__); 1601 #endif 1602 1603 #define PRINTWORD(n, delimiter) \ 1604 printf("word ## n: 0x%016llx%c", (long long)word ## n, delimiter) 1605 1606 #define SKIPASSERT(n) \ 1607 KASSERT(sc->sc_rxcomps[sc->sc_rxcptr].crc_word ## n == 0, \ 1608 ("%s: word ## n not 0", __func__)) 1609 1610 #define WORDTOH(n) \ 1611 word ## n = le64toh(sc->sc_rxcomps[sc->sc_rxcptr].crc_word ## n) 1612 1613 /* 1614 * Read the completion head register once. This limits 1615 * how long the following loop can execute. 1616 */ 1617 rxhead = CAS_READ_4(sc, CAS_RX_COMP_HEAD); 1618 #ifdef CAS_DEBUG 1619 CTR4(KTR_CAS, "%s: sc->sc_rxcptr %d, sc->sc_rxdptr %d, head %d", 1620 __func__, sc->rxcptr, sc->sc_rxdptr, rxhead); 1621 #endif 1622 skip = 0; 1623 CAS_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1624 for (; sc->sc_rxcptr != rxhead; 1625 sc->sc_rxcptr = CAS_NEXTRXCOMP(sc->sc_rxcptr)) { 1626 if (skip != 0) { 1627 SKIPASSERT(1); 1628 SKIPASSERT(2); 1629 SKIPASSERT(3); 1630 1631 --skip; 1632 goto skip; 1633 } 1634 1635 WORDTOH(1); 1636 WORDTOH(2); 1637 WORDTOH(3); 1638 WORDTOH(4); 1639 1640 #ifdef CAS_DEBUG 1641 if ((ifp->if_flags & IFF_DEBUG) != 0) { 1642 printf(" completion %d: ", sc->sc_rxcptr); 1643 PRINTWORD(1, '\t'); 1644 PRINTWORD(2, '\t'); 1645 PRINTWORD(3, '\t'); 1646 PRINTWORD(4, '\n'); 1647 } 1648 #endif 1649 1650 if (__predict_false( 1651 (word1 & CAS_RC1_TYPE_MASK) == CAS_RC1_TYPE_HW || 1652 (word4 & CAS_RC4_ZERO) != 0)) { 1653 /* 1654 * The descriptor is still marked as owned, although 1655 * it is supposed to have completed. This has been 1656 * observed on some machines. Just exiting here 1657 * might leave the packet sitting around until another 1658 * one arrives to trigger a new interrupt, which is 1659 * generally undesirable, so set up a timeout. 1660 */ 1661 callout_reset(&sc->sc_rx_ch, CAS_RXOWN_TICKS, 1662 cas_rint_timeout, sc); 1663 break; 1664 } 1665 1666 if (__predict_false( 1667 (word4 & (CAS_RC4_BAD | CAS_RC4_LEN_MMATCH)) != 0)) { 1668 ifp->if_ierrors++; 1669 device_printf(sc->sc_dev, 1670 "receive error: CRC error\n"); 1671 continue; 1672 } 1673 1674 KASSERT(CAS_GET(word1, CAS_RC1_DATA_SIZE) == 0 || 1675 CAS_GET(word2, CAS_RC2_HDR_SIZE) == 0, 1676 ("%s: data and header present", __func__)); 1677 KASSERT((word1 & CAS_RC1_SPLIT_PKT) == 0 || 1678 CAS_GET(word2, CAS_RC2_HDR_SIZE) == 0, 1679 ("%s: split and header present", __func__)); 1680 KASSERT(CAS_GET(word1, CAS_RC1_DATA_SIZE) == 0 || 1681 (word1 & CAS_RC1_RELEASE_HDR) == 0, 1682 ("%s: data present but header release", __func__)); 1683 KASSERT(CAS_GET(word2, CAS_RC2_HDR_SIZE) == 0 || 1684 (word1 & CAS_RC1_RELEASE_DATA) == 0, 1685 ("%s: header present but data release", __func__)); 1686 1687 if ((len = CAS_GET(word2, CAS_RC2_HDR_SIZE)) != 0) { 1688 idx = CAS_GET(word2, CAS_RC2_HDR_INDEX); 1689 off = CAS_GET(word2, CAS_RC2_HDR_OFF); 1690 #ifdef CAS_DEBUG 1691 CTR4(KTR_CAS, "%s: hdr at idx %d, off %d, len %d", 1692 __func__, idx, off, len); 1693 #endif 1694 rxds = &sc->sc_rxdsoft[idx]; 1695 MGETHDR(m, M_DONTWAIT, MT_DATA); 1696 if (m != NULL) { 1697 refcount_acquire(&rxds->rxds_refcount); 1698 bus_dmamap_sync(sc->sc_rdmatag, 1699 rxds->rxds_dmamap, BUS_DMASYNC_POSTREAD); 1700 #if __FreeBSD_version < 800016 1701 MEXTADD(m, (caddr_t)rxds->rxds_buf + 1702 off * 256 + ETHER_ALIGN, len, cas_free, 1703 rxds, M_RDONLY, EXT_NET_DRV); 1704 #else 1705 MEXTADD(m, (caddr_t)rxds->rxds_buf + 1706 off * 256 + ETHER_ALIGN, len, cas_free, 1707 sc, (void *)(uintptr_t)idx, 1708 M_RDONLY, EXT_NET_DRV); 1709 #endif 1710 if ((m->m_flags & M_EXT) == 0) { 1711 m_freem(m); 1712 m = NULL; 1713 } 1714 } 1715 if (m != NULL) { 1716 m->m_pkthdr.rcvif = ifp; 1717 m->m_pkthdr.len = m->m_len = len; 1718 ifp->if_ipackets++; 1719 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 1720 cas_rxcksum(m, CAS_GET(word4, 1721 CAS_RC4_TCP_CSUM)); 1722 /* Pass it on. */ 1723 (*ifp->if_input)(ifp, m); 1724 } else 1725 ifp->if_ierrors++; 1726 1727 if ((word1 & CAS_RC1_RELEASE_HDR) != 0 && 1728 refcount_release(&rxds->rxds_refcount) != 0) 1729 cas_add_rxdesc(sc, idx); 1730 } else if ((len = CAS_GET(word1, CAS_RC1_DATA_SIZE)) != 0) { 1731 idx = CAS_GET(word1, CAS_RC1_DATA_INDEX); 1732 off = CAS_GET(word1, CAS_RC1_DATA_OFF); 1733 #ifdef CAS_DEBUG 1734 CTR4(KTR_CAS, "%s: data at idx %d, off %d, len %d", 1735 __func__, idx, off, len); 1736 #endif 1737 rxds = &sc->sc_rxdsoft[idx]; 1738 MGETHDR(m, M_DONTWAIT, MT_DATA); 1739 if (m != NULL) { 1740 refcount_acquire(&rxds->rxds_refcount); 1741 off += ETHER_ALIGN; 1742 m->m_len = min(CAS_PAGE_SIZE - off, len); 1743 bus_dmamap_sync(sc->sc_rdmatag, 1744 rxds->rxds_dmamap, BUS_DMASYNC_POSTREAD); 1745 #if __FreeBSD_version < 800016 1746 MEXTADD(m, (caddr_t)rxds->rxds_buf + off, 1747 m->m_len, cas_free, rxds, M_RDONLY, 1748 EXT_NET_DRV); 1749 #else 1750 MEXTADD(m, (caddr_t)rxds->rxds_buf + off, 1751 m->m_len, cas_free, sc, 1752 (void *)(uintptr_t)idx, M_RDONLY, 1753 EXT_NET_DRV); 1754 #endif 1755 if ((m->m_flags & M_EXT) == 0) { 1756 m_freem(m); 1757 m = NULL; 1758 } 1759 } 1760 idx2 = 0; 1761 rxds2 = NULL; 1762 if ((word1 & CAS_RC1_SPLIT_PKT) != 0) { 1763 KASSERT((word1 & CAS_RC1_RELEASE_NEXT) != 0, 1764 ("%s: split but no release next", 1765 __func__)); 1766 1767 idx2 = CAS_GET(word2, CAS_RC2_NEXT_INDEX); 1768 #ifdef CAS_DEBUG 1769 CTR2(KTR_CAS, "%s: split at idx %d", 1770 __func__, idx2); 1771 #endif 1772 rxds2 = &sc->sc_rxdsoft[idx2]; 1773 MGET(m2, M_DONTWAIT, MT_DATA); 1774 if (m2 != NULL) { 1775 refcount_acquire( 1776 &rxds2->rxds_refcount); 1777 m2->m_len = len - m->m_len; 1778 bus_dmamap_sync(sc->sc_rdmatag, 1779 rxds2->rxds_dmamap, 1780 BUS_DMASYNC_POSTREAD); 1781 #if __FreeBSD_version < 800016 1782 MEXTADD(m2, (caddr_t)rxds2->rxds_buf, 1783 m2->m_len, cas_free, rxds2, 1784 M_RDONLY, EXT_NET_DRV); 1785 #else 1786 MEXTADD(m2, (caddr_t)rxds2->rxds_buf, 1787 m2->m_len, cas_free, 1788 sc, (void *)(uintptr_t)idx2, 1789 M_RDONLY, EXT_NET_DRV); 1790 #endif 1791 if ((m2->m_flags & M_EXT) == 0) { 1792 m_freem(m2); 1793 m2 = NULL; 1794 } 1795 } 1796 if (m2 != NULL) 1797 m->m_next = m2; 1798 else { 1799 m_freem(m); 1800 m = NULL; 1801 } 1802 } 1803 if (m != NULL) { 1804 m->m_pkthdr.rcvif = ifp; 1805 m->m_pkthdr.len = len; 1806 ifp->if_ipackets++; 1807 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 1808 cas_rxcksum(m, CAS_GET(word4, 1809 CAS_RC4_TCP_CSUM)); 1810 /* Pass it on. */ 1811 (*ifp->if_input)(ifp, m); 1812 } else 1813 ifp->if_ierrors++; 1814 1815 if ((word1 & CAS_RC1_RELEASE_DATA) != 0 && 1816 refcount_release(&rxds->rxds_refcount) != 0) 1817 cas_add_rxdesc(sc, idx); 1818 if ((word1 & CAS_RC1_SPLIT_PKT) != 0 && 1819 refcount_release(&rxds2->rxds_refcount) != 0) 1820 cas_add_rxdesc(sc, idx2); 1821 } 1822 1823 skip = CAS_GET(word1, CAS_RC1_SKIP); 1824 1825 skip: 1826 cas_rxcompinit(&sc->sc_rxcomps[sc->sc_rxcptr]); 1827 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1828 break; 1829 } 1830 CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1831 CAS_WRITE_4(sc, CAS_RX_COMP_TAIL, sc->sc_rxcptr); 1832 1833 #undef PRINTWORD 1834 #undef SKIPASSERT 1835 #undef WORDTOH 1836 1837 #ifdef CAS_DEBUG 1838 CTR4(KTR_CAS, "%s: done sc->sc_rxcptr %d, sc->sc_rxdptr %d, head %d", 1839 __func__, sc->rxcptr, sc->sc_rxdptr, 1840 CAS_READ_4(sc, CAS_RX_COMP_HEAD)); 1841 #endif 1842 } 1843 1844 static void 1845 cas_free(void *arg1, void *arg2) 1846 { 1847 struct cas_rxdsoft *rxds; 1848 struct cas_softc *sc; 1849 u_int idx; 1850 1851 #if __FreeBSD_version < 800016 1852 rxds = arg2; 1853 sc = rxds->rxds_sc; 1854 idx = rxds->rxds_idx; 1855 #else 1856 sc = arg1; 1857 idx = (uintptr_t)arg2; 1858 rxds = &sc->sc_rxdsoft[idx]; 1859 #endif 1860 if (refcount_release(&rxds->rxds_refcount) == 0) 1861 return; 1862 1863 /* 1864 * NB: this function can be called via m_freem(9) within 1865 * this driver! 1866 */ 1867 1868 cas_add_rxdesc(sc, idx); 1869 } 1870 1871 static inline void 1872 cas_add_rxdesc(struct cas_softc *sc, u_int idx) 1873 { 1874 u_int locked; 1875 1876 if ((locked = CAS_LOCK_OWNED(sc)) == 0) 1877 CAS_LOCK(sc); 1878 1879 bus_dmamap_sync(sc->sc_rdmatag, sc->sc_rxdsoft[idx].rxds_dmamap, 1880 BUS_DMASYNC_PREREAD); 1881 CAS_UPDATE_RXDESC(sc, sc->sc_rxdptr, idx); 1882 sc->sc_rxdptr = CAS_NEXTRXDESC(sc->sc_rxdptr); 1883 1884 /* 1885 * Update the RX kick register. This register has to point to the 1886 * descriptor after the last valid one (before the current batch) 1887 * and for optimum performance should be incremented in multiples 1888 * of 4 (the DMA engine fetches/updates descriptors in batches of 4). 1889 */ 1890 if ((sc->sc_rxdptr % 4) == 0) { 1891 CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1892 CAS_WRITE_4(sc, CAS_RX_KICK, 1893 (sc->sc_rxdptr + CAS_NRXDESC - 4) & CAS_NRXDESC_MASK); 1894 } 1895 1896 if (locked == 0) 1897 CAS_UNLOCK(sc); 1898 } 1899 1900 static void 1901 cas_eint(struct cas_softc *sc, u_int status) 1902 { 1903 struct ifnet *ifp = sc->sc_ifp; 1904 1905 CAS_LOCK_ASSERT(sc, MA_NOTOWNED); 1906 1907 ifp->if_ierrors++; 1908 1909 device_printf(sc->sc_dev, "%s: status 0x%x", __func__, status); 1910 if ((status & CAS_INTR_PCI_ERROR_INT) != 0) { 1911 status = CAS_READ_4(sc, CAS_ERROR_STATUS); 1912 printf(", PCI bus error 0x%x", status); 1913 if ((status & CAS_ERROR_OTHER) != 0) { 1914 status = pci_read_config(sc->sc_dev, PCIR_STATUS, 2); 1915 printf(", PCI status 0x%x", status); 1916 pci_write_config(sc->sc_dev, PCIR_STATUS, status, 2); 1917 } 1918 } 1919 printf("\n"); 1920 1921 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1922 cas_init(sc); 1923 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1924 taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task); 1925 } 1926 1927 static int 1928 cas_intr(void *v) 1929 { 1930 struct cas_softc *sc = v; 1931 1932 if (__predict_false((CAS_READ_4(sc, CAS_STATUS_ALIAS) & 1933 CAS_INTR_SUMMARY) == 0)) 1934 return (FILTER_STRAY); 1935 1936 /* Disable interrupts. */ 1937 CAS_WRITE_4(sc, CAS_INTMASK, 0xffffffff); 1938 taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task); 1939 1940 return (FILTER_HANDLED); 1941 } 1942 1943 static void 1944 cas_intr_task(void *arg, int pending __unused) 1945 { 1946 struct cas_softc *sc = arg; 1947 struct ifnet *ifp = sc->sc_ifp; 1948 uint32_t status, status2; 1949 1950 CAS_LOCK_ASSERT(sc, MA_NOTOWNED); 1951 1952 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1953 return; 1954 1955 status = CAS_READ_4(sc, CAS_STATUS); 1956 if (__predict_false((status & CAS_INTR_SUMMARY) == 0)) 1957 goto done; 1958 1959 #ifdef CAS_DEBUG 1960 CTR4(KTR_CAS, "%s: %s: cplt %x, status %x", 1961 device_get_name(sc->sc_dev), __func__, 1962 (status >> CAS_STATUS_TX_COMP3_SHIFT), (u_int)status); 1963 1964 /* 1965 * PCS interrupts must be cleared, otherwise no traffic is passed! 1966 */ 1967 if ((status & CAS_INTR_PCS_INT) != 0) { 1968 status2 = 1969 CAS_READ_4(sc, CAS_PCS_INTR_STATUS) | 1970 CAS_READ_4(sc, CAS_PCS_INTR_STATUS); 1971 if ((status2 & CAS_PCS_INTR_LINK) != 0) 1972 device_printf(sc->sc_dev, 1973 "%s: PCS link status changed\n", __func__); 1974 } 1975 if ((status & CAS_MAC_CTRL_STATUS) != 0) { 1976 status2 = CAS_READ_4(sc, CAS_MAC_CTRL_STATUS); 1977 if ((status2 & CAS_MAC_CTRL_PAUSE) != 0) 1978 device_printf(sc->sc_dev, 1979 "%s: PAUSE received (PAUSE time %d slots)\n", 1980 __func__, 1981 (status2 & CAS_MAC_CTRL_STATUS_PT_MASK) >> 1982 CAS_MAC_CTRL_STATUS_PT_SHFT); 1983 if ((status2 & CAS_MAC_CTRL_PAUSE) != 0) 1984 device_printf(sc->sc_dev, 1985 "%s: transited to PAUSE state\n", __func__); 1986 if ((status2 & CAS_MAC_CTRL_NON_PAUSE) != 0) 1987 device_printf(sc->sc_dev, 1988 "%s: transited to non-PAUSE state\n", __func__); 1989 } 1990 if ((status & CAS_INTR_MIF) != 0) 1991 device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__); 1992 #endif 1993 1994 if (__predict_false((status & 1995 (CAS_INTR_TX_TAG_ERR | CAS_INTR_RX_TAG_ERR | 1996 CAS_INTR_RX_LEN_MMATCH | CAS_INTR_PCI_ERROR_INT)) != 0)) { 1997 cas_eint(sc, status); 1998 return; 1999 } 2000 2001 if (__predict_false(status & CAS_INTR_TX_MAC_INT)) { 2002 status2 = CAS_READ_4(sc, CAS_MAC_TX_STATUS); 2003 if ((status2 & 2004 (CAS_MAC_TX_UNDERRUN | CAS_MAC_TX_MAX_PKT_ERR)) != 0) 2005 sc->sc_ifp->if_oerrors++; 2006 else if ((status2 & ~CAS_MAC_TX_FRAME_XMTD) != 0) 2007 device_printf(sc->sc_dev, 2008 "MAC TX fault, status %x\n", status2); 2009 } 2010 2011 if (__predict_false(status & CAS_INTR_RX_MAC_INT)) { 2012 status2 = CAS_READ_4(sc, CAS_MAC_RX_STATUS); 2013 if ((status2 & CAS_MAC_RX_OVERFLOW) != 0) 2014 sc->sc_ifp->if_ierrors++; 2015 else if ((status2 & ~CAS_MAC_RX_FRAME_RCVD) != 0) 2016 device_printf(sc->sc_dev, 2017 "MAC RX fault, status %x\n", status2); 2018 } 2019 2020 if ((status & 2021 (CAS_INTR_RX_DONE | CAS_INTR_RX_BUF_NA | CAS_INTR_RX_COMP_FULL | 2022 CAS_INTR_RX_BUF_AEMPTY | CAS_INTR_RX_COMP_AFULL)) != 0) { 2023 cas_rint(sc); 2024 #ifdef CAS_DEBUG 2025 if (__predict_false((status & 2026 (CAS_INTR_RX_BUF_NA | CAS_INTR_RX_COMP_FULL | 2027 CAS_INTR_RX_BUF_AEMPTY | CAS_INTR_RX_COMP_AFULL)) != 0)) 2028 device_printf(sc->sc_dev, 2029 "RX fault, status %x\n", status); 2030 #endif 2031 } 2032 2033 if ((status & 2034 (CAS_INTR_TX_INT_ME | CAS_INTR_TX_ALL | CAS_INTR_TX_DONE)) != 0) { 2035 CAS_LOCK(sc); 2036 cas_tint(sc); 2037 CAS_UNLOCK(sc); 2038 } 2039 2040 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2041 return; 2042 else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2043 taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task); 2044 2045 status = CAS_READ_4(sc, CAS_STATUS_ALIAS); 2046 if (__predict_false((status & CAS_INTR_SUMMARY) != 0)) { 2047 taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task); 2048 return; 2049 } 2050 2051 done: 2052 /* Re-enable interrupts. */ 2053 CAS_WRITE_4(sc, CAS_INTMASK, 2054 ~(CAS_INTR_TX_INT_ME | CAS_INTR_TX_TAG_ERR | 2055 CAS_INTR_RX_DONE | CAS_INTR_RX_BUF_NA | CAS_INTR_RX_TAG_ERR | 2056 CAS_INTR_RX_COMP_FULL | CAS_INTR_RX_BUF_AEMPTY | 2057 CAS_INTR_RX_COMP_AFULL | CAS_INTR_RX_LEN_MMATCH | 2058 CAS_INTR_PCI_ERROR_INT 2059 #ifdef CAS_DEBUG 2060 | CAS_INTR_PCS_INT | CAS_INTR_MIF 2061 #endif 2062 )); 2063 } 2064 2065 static void 2066 cas_watchdog(struct cas_softc *sc) 2067 { 2068 struct ifnet *ifp = sc->sc_ifp; 2069 2070 CAS_LOCK_ASSERT(sc, MA_OWNED); 2071 2072 #ifdef CAS_DEBUG 2073 CTR4(KTR_CAS, 2074 "%s: CAS_RX_CONFIG %x CAS_MAC_RX_STATUS %x CAS_MAC_RX_CONFIG %x", 2075 __func__, CAS_READ_4(sc, CAS_RX_CONFIG), 2076 CAS_READ_4(sc, CAS_MAC_RX_STATUS), 2077 CAS_READ_4(sc, CAS_MAC_RX_CONFIG)); 2078 CTR4(KTR_CAS, 2079 "%s: CAS_TX_CONFIG %x CAS_MAC_TX_STATUS %x CAS_MAC_TX_CONFIG %x", 2080 __func__, CAS_READ_4(sc, CAS_TX_CONFIG), 2081 CAS_READ_4(sc, CAS_MAC_TX_STATUS), 2082 CAS_READ_4(sc, CAS_MAC_TX_CONFIG)); 2083 #endif 2084 2085 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) 2086 return; 2087 2088 if ((sc->sc_flags & CAS_LINK) != 0) 2089 device_printf(sc->sc_dev, "device timeout\n"); 2090 else if (bootverbose) 2091 device_printf(sc->sc_dev, "device timeout (no link)\n"); 2092 ++ifp->if_oerrors; 2093 2094 /* Try to get more packets going. */ 2095 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2096 cas_init_locked(sc); 2097 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2098 taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task); 2099 } 2100 2101 static void 2102 cas_mifinit(struct cas_softc *sc) 2103 { 2104 2105 /* Configure the MIF in frame mode. */ 2106 CAS_WRITE_4(sc, CAS_MIF_CONF, 2107 CAS_READ_4(sc, CAS_MIF_CONF) & ~CAS_MIF_CONF_BB_MODE); 2108 } 2109 2110 /* 2111 * MII interface 2112 * 2113 * The MII interface supports at least three different operating modes: 2114 * 2115 * Bitbang mode is implemented using data, clock and output enable registers. 2116 * 2117 * Frame mode is implemented by loading a complete frame into the frame 2118 * register and polling the valid bit for completion. 2119 * 2120 * Polling mode uses the frame register but completion is indicated by 2121 * an interrupt. 2122 * 2123 */ 2124 static int 2125 cas_mii_readreg(device_t dev, int phy, int reg) 2126 { 2127 struct cas_softc *sc; 2128 int n; 2129 uint32_t v; 2130 2131 #ifdef CAS_DEBUG_PHY 2132 printf("%s: phy %d reg %d\n", __func__, phy, reg); 2133 #endif 2134 2135 sc = device_get_softc(dev); 2136 if (sc->sc_phyad != -1 && phy != sc->sc_phyad) 2137 return (0); 2138 2139 if ((sc->sc_flags & CAS_SERDES) != 0) { 2140 switch (reg) { 2141 case MII_BMCR: 2142 reg = CAS_PCS_CTRL; 2143 break; 2144 case MII_BMSR: 2145 reg = CAS_PCS_STATUS; 2146 break; 2147 case MII_PHYIDR1: 2148 case MII_PHYIDR2: 2149 return (0); 2150 case MII_ANAR: 2151 reg = CAS_PCS_ANAR; 2152 break; 2153 case MII_ANLPAR: 2154 reg = CAS_PCS_ANLPAR; 2155 break; 2156 case MII_EXTSR: 2157 return (EXTSR_1000XFDX | EXTSR_1000XHDX); 2158 default: 2159 device_printf(sc->sc_dev, 2160 "%s: unhandled register %d\n", __func__, reg); 2161 return (0); 2162 } 2163 return (CAS_READ_4(sc, reg)); 2164 } 2165 2166 /* Construct the frame command. */ 2167 v = CAS_MIF_FRAME_READ | 2168 (phy << CAS_MIF_FRAME_PHY_SHFT) | 2169 (reg << CAS_MIF_FRAME_REG_SHFT); 2170 2171 CAS_WRITE_4(sc, CAS_MIF_FRAME, v); 2172 CAS_BARRIER(sc, CAS_MIF_FRAME, 4, 2173 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2174 for (n = 0; n < 100; n++) { 2175 DELAY(1); 2176 v = CAS_READ_4(sc, CAS_MIF_FRAME); 2177 if (v & CAS_MIF_FRAME_TA_LSB) 2178 return (v & CAS_MIF_FRAME_DATA); 2179 } 2180 2181 device_printf(sc->sc_dev, "%s: timed out\n", __func__); 2182 return (0); 2183 } 2184 2185 static int 2186 cas_mii_writereg(device_t dev, int phy, int reg, int val) 2187 { 2188 struct cas_softc *sc; 2189 int n; 2190 uint32_t v; 2191 2192 #ifdef CAS_DEBUG_PHY 2193 printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__); 2194 #endif 2195 2196 sc = device_get_softc(dev); 2197 if (sc->sc_phyad != -1 && phy != sc->sc_phyad) 2198 return (0); 2199 2200 if ((sc->sc_flags & CAS_SERDES) != 0) { 2201 switch (reg) { 2202 case MII_BMSR: 2203 reg = CAS_PCS_STATUS; 2204 break; 2205 case MII_BMCR: 2206 reg = CAS_PCS_CTRL; 2207 if ((val & CAS_PCS_CTRL_RESET) == 0) 2208 break; 2209 CAS_WRITE_4(sc, CAS_PCS_CTRL, val); 2210 CAS_BARRIER(sc, CAS_PCS_CTRL, 4, 2211 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2212 if (!cas_bitwait(sc, CAS_PCS_CTRL, 2213 CAS_PCS_CTRL_RESET, 0)) 2214 device_printf(sc->sc_dev, 2215 "cannot reset PCS\n"); 2216 /* FALLTHROUGH */ 2217 case MII_ANAR: 2218 CAS_WRITE_4(sc, CAS_PCS_CONF, 0); 2219 CAS_BARRIER(sc, CAS_PCS_CONF, 4, 2220 BUS_SPACE_BARRIER_WRITE); 2221 CAS_WRITE_4(sc, CAS_PCS_ANAR, val); 2222 CAS_WRITE_4(sc, CAS_PCS_SERDES_CTRL, 2223 CAS_PCS_SERDES_CTRL_ESD); 2224 CAS_WRITE_4(sc, CAS_PCS_CONF, 2225 CAS_PCS_CONF_EN); 2226 return (0); 2227 case MII_ANLPAR: 2228 reg = CAS_PCS_ANLPAR; 2229 break; 2230 default: 2231 device_printf(sc->sc_dev, 2232 "%s: unhandled register %d\n", __func__, reg); 2233 return (0); 2234 } 2235 CAS_WRITE_4(sc, reg, val); 2236 return (0); 2237 } 2238 2239 /* Construct the frame command. */ 2240 v = CAS_MIF_FRAME_WRITE | 2241 (phy << CAS_MIF_FRAME_PHY_SHFT) | 2242 (reg << CAS_MIF_FRAME_REG_SHFT) | 2243 (val & CAS_MIF_FRAME_DATA); 2244 2245 CAS_WRITE_4(sc, CAS_MIF_FRAME, v); 2246 CAS_BARRIER(sc, CAS_MIF_FRAME, 4, 2247 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2248 for (n = 0; n < 100; n++) { 2249 DELAY(1); 2250 v = CAS_READ_4(sc, CAS_MIF_FRAME); 2251 if (v & CAS_MIF_FRAME_TA_LSB) 2252 return (1); 2253 } 2254 2255 device_printf(sc->sc_dev, "%s: timed out\n", __func__); 2256 return (0); 2257 } 2258 2259 static void 2260 cas_mii_statchg(device_t dev) 2261 { 2262 struct cas_softc *sc; 2263 struct ifnet *ifp; 2264 int gigabit; 2265 uint32_t rxcfg, txcfg, v; 2266 2267 sc = device_get_softc(dev); 2268 ifp = sc->sc_ifp; 2269 2270 CAS_LOCK_ASSERT(sc, MA_OWNED); 2271 2272 #ifdef CAS_DEBUG 2273 if ((ifp->if_flags & IFF_DEBUG) != 0) 2274 device_printf(sc->sc_dev, "%s: status change: PHY = %d\n", 2275 __func__, sc->sc_phyad); 2276 #endif 2277 2278 if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 && 2279 IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE) 2280 sc->sc_flags |= CAS_LINK; 2281 else 2282 sc->sc_flags &= ~CAS_LINK; 2283 2284 switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) { 2285 case IFM_1000_SX: 2286 case IFM_1000_LX: 2287 case IFM_1000_CX: 2288 case IFM_1000_T: 2289 gigabit = 1; 2290 break; 2291 default: 2292 gigabit = 0; 2293 } 2294 2295 /* 2296 * The configuration done here corresponds to the steps F) and 2297 * G) and as far as enabling of RX and TX MAC goes also step H) 2298 * of the initialization sequence outlined in section 11.2.1 of 2299 * the Cassini+ ASIC Specification. 2300 */ 2301 2302 rxcfg = CAS_READ_4(sc, CAS_MAC_RX_CONF); 2303 rxcfg &= ~(CAS_MAC_RX_CONF_EN | CAS_MAC_RX_CONF_CARR); 2304 txcfg = CAS_MAC_TX_CONF_EN_IPG0 | CAS_MAC_TX_CONF_NGU | 2305 CAS_MAC_TX_CONF_NGUL; 2306 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 2307 txcfg |= CAS_MAC_TX_CONF_ICARR | CAS_MAC_TX_CONF_ICOLLIS; 2308 else if (gigabit != 0) { 2309 rxcfg |= CAS_MAC_RX_CONF_CARR; 2310 txcfg |= CAS_MAC_TX_CONF_CARR; 2311 } 2312 CAS_WRITE_4(sc, CAS_MAC_TX_CONF, 0); 2313 CAS_BARRIER(sc, CAS_MAC_TX_CONF, 4, 2314 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2315 if (!cas_bitwait(sc, CAS_MAC_TX_CONF, CAS_MAC_TX_CONF_EN, 0)) 2316 device_printf(sc->sc_dev, "cannot disable TX MAC\n"); 2317 CAS_WRITE_4(sc, CAS_MAC_TX_CONF, txcfg); 2318 CAS_WRITE_4(sc, CAS_MAC_RX_CONF, 0); 2319 CAS_BARRIER(sc, CAS_MAC_RX_CONF, 4, 2320 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2321 if (!cas_bitwait(sc, CAS_MAC_RX_CONF, CAS_MAC_RX_CONF_EN, 0)) 2322 device_printf(sc->sc_dev, "cannot disable RX MAC\n"); 2323 CAS_WRITE_4(sc, CAS_MAC_RX_CONF, rxcfg); 2324 2325 v = CAS_READ_4(sc, CAS_MAC_CTRL_CONF) & 2326 ~(CAS_MAC_CTRL_CONF_TXP | CAS_MAC_CTRL_CONF_RXP); 2327 #ifdef notyet 2328 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 2329 IFM_ETH_RXPAUSE) != 0) 2330 v |= CAS_MAC_CTRL_CONF_RXP; 2331 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 2332 IFM_ETH_TXPAUSE) != 0) 2333 v |= CAS_MAC_CTRL_CONF_TXP; 2334 #endif 2335 CAS_WRITE_4(sc, CAS_MAC_CTRL_CONF, v); 2336 2337 /* 2338 * All supported chips have a bug causing incorrect checksum 2339 * to be calculated when letting them strip the FCS in half- 2340 * duplex mode. In theory we could disable FCS stripping and 2341 * manually adjust the checksum accordingly. It seems to make 2342 * more sense to optimze for the common case and just disable 2343 * hardware checksumming in half-duplex mode though. 2344 */ 2345 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0) { 2346 ifp->if_capenable &= ~IFCAP_HWCSUM; 2347 ifp->if_hwassist = 0; 2348 } else if ((sc->sc_flags & CAS_NO_CSUM) == 0) { 2349 ifp->if_capenable = ifp->if_capabilities; 2350 ifp->if_hwassist = CAS_CSUM_FEATURES; 2351 } 2352 2353 if (sc->sc_variant == CAS_SATURN) { 2354 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0) 2355 /* silicon bug workaround */ 2356 CAS_WRITE_4(sc, CAS_MAC_PREAMBLE_LEN, 0x41); 2357 else 2358 CAS_WRITE_4(sc, CAS_MAC_PREAMBLE_LEN, 0x7); 2359 } 2360 2361 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 && 2362 gigabit != 0) 2363 CAS_WRITE_4(sc, CAS_MAC_SLOT_TIME, 2364 CAS_MAC_SLOT_TIME_CARR); 2365 else 2366 CAS_WRITE_4(sc, CAS_MAC_SLOT_TIME, 2367 CAS_MAC_SLOT_TIME_NORM); 2368 2369 /* XIF Configuration */ 2370 v = CAS_MAC_XIF_CONF_TX_OE | CAS_MAC_XIF_CONF_LNKLED; 2371 if ((sc->sc_flags & CAS_SERDES) == 0) { 2372 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0) 2373 v |= CAS_MAC_XIF_CONF_NOECHO; 2374 v |= CAS_MAC_XIF_CONF_BUF_OE; 2375 } 2376 if (gigabit != 0) 2377 v |= CAS_MAC_XIF_CONF_GMII; 2378 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 2379 v |= CAS_MAC_XIF_CONF_FDXLED; 2380 CAS_WRITE_4(sc, CAS_MAC_XIF_CONF, v); 2381 2382 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 2383 (sc->sc_flags & CAS_LINK) != 0) { 2384 CAS_WRITE_4(sc, CAS_MAC_TX_CONF, 2385 txcfg | CAS_MAC_TX_CONF_EN); 2386 CAS_WRITE_4(sc, CAS_MAC_RX_CONF, 2387 rxcfg | CAS_MAC_RX_CONF_EN); 2388 } 2389 } 2390 2391 static int 2392 cas_mediachange(struct ifnet *ifp) 2393 { 2394 struct cas_softc *sc = ifp->if_softc; 2395 int error; 2396 2397 /* XXX add support for serial media. */ 2398 2399 CAS_LOCK(sc); 2400 error = mii_mediachg(sc->sc_mii); 2401 CAS_UNLOCK(sc); 2402 return (error); 2403 } 2404 2405 static void 2406 cas_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 2407 { 2408 struct cas_softc *sc = ifp->if_softc; 2409 2410 CAS_LOCK(sc); 2411 if ((ifp->if_flags & IFF_UP) == 0) { 2412 CAS_UNLOCK(sc); 2413 return; 2414 } 2415 2416 mii_pollstat(sc->sc_mii); 2417 ifmr->ifm_active = sc->sc_mii->mii_media_active; 2418 ifmr->ifm_status = sc->sc_mii->mii_media_status; 2419 CAS_UNLOCK(sc); 2420 } 2421 2422 static int 2423 cas_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2424 { 2425 struct cas_softc *sc = ifp->if_softc; 2426 struct ifreq *ifr = (struct ifreq *)data; 2427 int error; 2428 2429 error = 0; 2430 switch (cmd) { 2431 case SIOCSIFFLAGS: 2432 CAS_LOCK(sc); 2433 if ((ifp->if_flags & IFF_UP) != 0) { 2434 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 2435 ((ifp->if_flags ^ sc->sc_ifflags) & 2436 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 2437 cas_setladrf(sc); 2438 else 2439 cas_init_locked(sc); 2440 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2441 cas_stop(ifp); 2442 sc->sc_ifflags = ifp->if_flags; 2443 CAS_UNLOCK(sc); 2444 break; 2445 case SIOCSIFCAP: 2446 CAS_LOCK(sc); 2447 if ((sc->sc_flags & CAS_NO_CSUM) != 0) { 2448 error = EINVAL; 2449 CAS_UNLOCK(sc); 2450 break; 2451 } 2452 ifp->if_capenable = ifr->ifr_reqcap; 2453 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2454 ifp->if_hwassist = CAS_CSUM_FEATURES; 2455 else 2456 ifp->if_hwassist = 0; 2457 CAS_UNLOCK(sc); 2458 break; 2459 case SIOCADDMULTI: 2460 case SIOCDELMULTI: 2461 CAS_LOCK(sc); 2462 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2463 cas_setladrf(sc); 2464 CAS_UNLOCK(sc); 2465 break; 2466 case SIOCSIFMTU: 2467 if ((ifr->ifr_mtu < ETHERMIN) || 2468 (ifr->ifr_mtu > ETHERMTU_JUMBO)) 2469 error = EINVAL; 2470 else 2471 ifp->if_mtu = ifr->ifr_mtu; 2472 break; 2473 case SIOCGIFMEDIA: 2474 case SIOCSIFMEDIA: 2475 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 2476 break; 2477 default: 2478 error = ether_ioctl(ifp, cmd, data); 2479 break; 2480 } 2481 2482 return (error); 2483 } 2484 2485 static void 2486 cas_setladrf(struct cas_softc *sc) 2487 { 2488 struct ifnet *ifp = sc->sc_ifp; 2489 struct ifmultiaddr *inm; 2490 int i; 2491 uint32_t hash[16]; 2492 uint32_t crc, v; 2493 2494 CAS_LOCK_ASSERT(sc, MA_OWNED); 2495 2496 /* Get the current RX configuration. */ 2497 v = CAS_READ_4(sc, CAS_MAC_RX_CONF); 2498 2499 /* 2500 * Turn off promiscuous mode, promiscuous group mode (all multicast), 2501 * and hash filter. Depending on the case, the right bit will be 2502 * enabled. 2503 */ 2504 v &= ~(CAS_MAC_RX_CONF_PROMISC | CAS_MAC_RX_CONF_HFILTER | 2505 CAS_MAC_RX_CONF_PGRP); 2506 2507 CAS_WRITE_4(sc, CAS_MAC_RX_CONF, v); 2508 CAS_BARRIER(sc, CAS_MAC_RX_CONF, 4, 2509 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2510 if (!cas_bitwait(sc, CAS_MAC_RX_CONF, CAS_MAC_RX_CONF_HFILTER, 0)) 2511 device_printf(sc->sc_dev, "cannot disable RX hash filter\n"); 2512 2513 if ((ifp->if_flags & IFF_PROMISC) != 0) { 2514 v |= CAS_MAC_RX_CONF_PROMISC; 2515 goto chipit; 2516 } 2517 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 2518 v |= CAS_MAC_RX_CONF_PGRP; 2519 goto chipit; 2520 } 2521 2522 /* 2523 * Set up multicast address filter by passing all multicast 2524 * addresses through a crc generator, and then using the high 2525 * order 8 bits as an index into the 256 bit logical address 2526 * filter. The high order 4 bits selects the word, while the 2527 * other 4 bits select the bit within the word (where bit 0 2528 * is the MSB). 2529 */ 2530 2531 /* Clear the hash table. */ 2532 memset(hash, 0, sizeof(hash)); 2533 2534 if_maddr_rlock(ifp); 2535 TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) { 2536 if (inm->ifma_addr->sa_family != AF_LINK) 2537 continue; 2538 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *) 2539 inm->ifma_addr), ETHER_ADDR_LEN); 2540 2541 /* We just want the 8 most significant bits. */ 2542 crc >>= 24; 2543 2544 /* Set the corresponding bit in the filter. */ 2545 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 2546 } 2547 if_maddr_runlock(ifp); 2548 2549 v |= CAS_MAC_RX_CONF_HFILTER; 2550 2551 /* Now load the hash table into the chip (if we are using it). */ 2552 for (i = 0; i < 16; i++) 2553 CAS_WRITE_4(sc, 2554 CAS_MAC_HASH0 + i * (CAS_MAC_HASH1 - CAS_MAC_HASH0), 2555 hash[i]); 2556 2557 chipit: 2558 CAS_WRITE_4(sc, CAS_MAC_RX_CONF, v); 2559 } 2560 2561 static int cas_pci_attach(device_t dev); 2562 static int cas_pci_detach(device_t dev); 2563 static int cas_pci_probe(device_t dev); 2564 static int cas_pci_resume(device_t dev); 2565 static int cas_pci_suspend(device_t dev); 2566 2567 static device_method_t cas_pci_methods[] = { 2568 /* Device interface */ 2569 DEVMETHOD(device_probe, cas_pci_probe), 2570 DEVMETHOD(device_attach, cas_pci_attach), 2571 DEVMETHOD(device_detach, cas_pci_detach), 2572 DEVMETHOD(device_suspend, cas_pci_suspend), 2573 DEVMETHOD(device_resume, cas_pci_resume), 2574 /* Use the suspend handler here, it is all that is required. */ 2575 DEVMETHOD(device_shutdown, cas_pci_suspend), 2576 2577 /* bus interface */ 2578 DEVMETHOD(bus_print_child, bus_generic_print_child), 2579 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 2580 2581 /* MII interface */ 2582 DEVMETHOD(miibus_readreg, cas_mii_readreg), 2583 DEVMETHOD(miibus_writereg, cas_mii_writereg), 2584 DEVMETHOD(miibus_statchg, cas_mii_statchg), 2585 2586 KOBJMETHOD_END 2587 }; 2588 2589 static driver_t cas_pci_driver = { 2590 "cas", 2591 cas_pci_methods, 2592 sizeof(struct cas_softc) 2593 }; 2594 2595 DRIVER_MODULE(cas, pci, cas_pci_driver, cas_devclass, 0, 0); 2596 DRIVER_MODULE(miibus, cas, miibus_driver, miibus_devclass, 0, 0); 2597 MODULE_DEPEND(cas, pci, 1, 1, 1); 2598 2599 static const struct cas_pci_dev { 2600 uint32_t cpd_devid; 2601 uint8_t cpd_revid; 2602 int cpd_variant; 2603 const char *cpd_desc; 2604 } const cas_pci_devlist[] = { 2605 { 0x0035100b, 0x0, CAS_SATURN, "NS DP83065 Saturn Gigabit Ethernet" }, 2606 { 0xabba108e, 0x10, CAS_CASPLUS, "Sun Cassini+ Gigabit Ethernet" }, 2607 { 0xabba108e, 0x0, CAS_CAS, "Sun Cassini Gigabit Ethernet" }, 2608 { 0, 0, 0, NULL } 2609 }; 2610 2611 static int 2612 cas_pci_probe(device_t dev) 2613 { 2614 int i; 2615 2616 for (i = 0; cas_pci_devlist[i].cpd_desc != NULL; i++) { 2617 if (pci_get_devid(dev) == cas_pci_devlist[i].cpd_devid && 2618 pci_get_revid(dev) >= cas_pci_devlist[i].cpd_revid) { 2619 device_set_desc(dev, cas_pci_devlist[i].cpd_desc); 2620 return (BUS_PROBE_DEFAULT); 2621 } 2622 } 2623 2624 return (ENXIO); 2625 } 2626 2627 static struct resource_spec cas_pci_res_spec[] = { 2628 { SYS_RES_IRQ, 0, RF_SHAREABLE | RF_ACTIVE }, /* CAS_RES_INTR */ 2629 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, /* CAS_RES_MEM */ 2630 { -1, 0 } 2631 }; 2632 2633 static int 2634 cas_pci_attach(device_t dev) 2635 { 2636 struct cas_softc *sc; 2637 int i; 2638 #if !(defined(__powerpc__) || defined(__sparc64__)) 2639 u_char enaddr[4][ETHER_ADDR_LEN]; 2640 char lma[sizeof("local-mac-address")]; 2641 int found, j; 2642 #endif 2643 2644 sc = device_get_softc(dev); 2645 sc->sc_variant = CAS_UNKNOWN; 2646 for (i = 0; cas_pci_devlist[i].cpd_desc != NULL; i++) { 2647 if (pci_get_devid(dev) == cas_pci_devlist[i].cpd_devid && 2648 pci_get_revid(dev) >= cas_pci_devlist[i].cpd_revid) { 2649 sc->sc_variant = cas_pci_devlist[i].cpd_variant; 2650 break; 2651 } 2652 } 2653 if (sc->sc_variant == CAS_UNKNOWN) { 2654 device_printf(dev, "unknown adaptor\n"); 2655 return (ENXIO); 2656 } 2657 2658 pci_enable_busmaster(dev); 2659 2660 sc->sc_dev = dev; 2661 if (sc->sc_variant == CAS_CAS && pci_get_devid(dev) < 0x02) 2662 /* Hardware checksumming may hang TX. */ 2663 sc->sc_flags |= CAS_NO_CSUM; 2664 if (sc->sc_variant == CAS_CASPLUS || sc->sc_variant == CAS_SATURN) 2665 sc->sc_flags |= CAS_REG_PLUS; 2666 if (sc->sc_variant == CAS_CAS || 2667 (sc->sc_variant == CAS_CASPLUS && pci_get_revid(dev) < 0x11)) 2668 sc->sc_flags |= CAS_TABORT; 2669 if (bootverbose) 2670 device_printf(dev, "flags=0x%x\n", sc->sc_flags); 2671 2672 if (bus_alloc_resources(dev, cas_pci_res_spec, sc->sc_res)) { 2673 device_printf(dev, "failed to allocate resources\n"); 2674 bus_release_resources(dev, cas_pci_res_spec, sc->sc_res); 2675 return (ENXIO); 2676 } 2677 2678 CAS_LOCK_INIT(sc, device_get_nameunit(dev)); 2679 2680 #if defined(__powerpc__) || defined(__sparc64__) 2681 OF_getetheraddr(dev, sc->sc_enaddr); 2682 #else 2683 /* 2684 * Dig out VPD (vital product data) and read the MAX address. 2685 * The VPD resides in the PCI Expansion ROM (PCI FCode) and 2686 * can't be accessed via the PCI capability pointer. 2687 * SUNW,pci-ce and SUNW,pci-qge use the Enhanced VPD format 2688 * described in US Patent 7149820. 2689 */ 2690 2691 #define PCI_ROMHDR_SIZE 0x1c 2692 #define PCI_ROMHDR_SIG 0x00 2693 #define PCI_ROMHDR_SIG_MAGIC 0xaa55 /* little endian */ 2694 #define PCI_ROMHDR_PTR_DATA 0x18 2695 #define PCI_ROM_SIZE 0x18 2696 #define PCI_ROM_SIG 0x00 2697 #define PCI_ROM_SIG_MAGIC 0x52494350 /* "PCIR", endian */ 2698 /* reversed */ 2699 #define PCI_ROM_VENDOR 0x04 2700 #define PCI_ROM_DEVICE 0x06 2701 #define PCI_ROM_PTR_VPD 0x08 2702 #define PCI_VPDRES_BYTE0 0x00 2703 #define PCI_VPDRES_ISLARGE(x) ((x) & 0x80) 2704 #define PCI_VPDRES_LARGE_NAME(x) ((x) & 0x7f) 2705 #define PCI_VPDRES_LARGE_LEN_LSB 0x01 2706 #define PCI_VPDRES_LARGE_LEN_MSB 0x02 2707 #define PCI_VPDRES_LARGE_SIZE 0x03 2708 #define PCI_VPDRES_TYPE_ID_STRING 0x02 /* large */ 2709 #define PCI_VPDRES_TYPE_VPD 0x10 /* large */ 2710 #define PCI_VPD_KEY0 0x00 2711 #define PCI_VPD_KEY1 0x01 2712 #define PCI_VPD_LEN 0x02 2713 #define PCI_VPD_SIZE 0x03 2714 2715 #define CAS_ROM_READ_1(sc, offs) \ 2716 CAS_READ_1((sc), CAS_PCI_ROM_OFFSET + (offs)) 2717 #define CAS_ROM_READ_2(sc, offs) \ 2718 CAS_READ_2((sc), CAS_PCI_ROM_OFFSET + (offs)) 2719 #define CAS_ROM_READ_4(sc, offs) \ 2720 CAS_READ_4((sc), CAS_PCI_ROM_OFFSET + (offs)) 2721 2722 found = 0; 2723 /* Enable PCI Expansion ROM access. */ 2724 CAS_WRITE_4(sc, CAS_BIM_LDEV_OEN, 2725 CAS_BIM_LDEV_OEN_PAD | CAS_BIM_LDEV_OEN_PROM); 2726 2727 /* Read PCI Expansion ROM header. */ 2728 if (CAS_ROM_READ_2(sc, PCI_ROMHDR_SIG) != PCI_ROMHDR_SIG_MAGIC || 2729 (i = CAS_ROM_READ_2(sc, PCI_ROMHDR_PTR_DATA)) < 2730 PCI_ROMHDR_SIZE) { 2731 device_printf(dev, "unexpected PCI Expansion ROM header\n"); 2732 goto fail_prom; 2733 } 2734 2735 /* Read PCI Expansion ROM data. */ 2736 if (CAS_ROM_READ_4(sc, i + PCI_ROM_SIG) != PCI_ROM_SIG_MAGIC || 2737 CAS_ROM_READ_2(sc, i + PCI_ROM_VENDOR) != pci_get_vendor(dev) || 2738 CAS_ROM_READ_2(sc, i + PCI_ROM_DEVICE) != pci_get_device(dev) || 2739 (j = CAS_ROM_READ_2(sc, i + PCI_ROM_PTR_VPD)) < 2740 i + PCI_ROM_SIZE) { 2741 device_printf(dev, "unexpected PCI Expansion ROM data\n"); 2742 goto fail_prom; 2743 } 2744 2745 /* Read PCI VPD. */ 2746 next: 2747 if (PCI_VPDRES_ISLARGE(CAS_ROM_READ_1(sc, 2748 j + PCI_VPDRES_BYTE0)) == 0) { 2749 device_printf(dev, "no large PCI VPD\n"); 2750 goto fail_prom; 2751 } 2752 2753 i = (CAS_ROM_READ_1(sc, j + PCI_VPDRES_LARGE_LEN_MSB) << 8) | 2754 CAS_ROM_READ_1(sc, j + PCI_VPDRES_LARGE_LEN_LSB); 2755 switch (PCI_VPDRES_LARGE_NAME(CAS_ROM_READ_1(sc, 2756 j + PCI_VPDRES_BYTE0))) { 2757 case PCI_VPDRES_TYPE_ID_STRING: 2758 /* Skip identifier string. */ 2759 j += PCI_VPDRES_LARGE_SIZE + i; 2760 goto next; 2761 case PCI_VPDRES_TYPE_VPD: 2762 for (j += PCI_VPDRES_LARGE_SIZE; i > 0; 2763 i -= PCI_VPD_SIZE + CAS_ROM_READ_1(sc, j + PCI_VPD_LEN), 2764 j += PCI_VPD_SIZE + CAS_ROM_READ_1(sc, j + PCI_VPD_LEN)) { 2765 if (CAS_ROM_READ_1(sc, j + PCI_VPD_KEY0) != 'Z') 2766 /* no Enhanced VPD */ 2767 continue; 2768 if (CAS_ROM_READ_1(sc, j + PCI_VPD_SIZE) != 'I') 2769 /* no instance property */ 2770 continue; 2771 if (CAS_ROM_READ_1(sc, j + PCI_VPD_SIZE + 3) != 'B') 2772 /* no byte array */ 2773 continue; 2774 if (CAS_ROM_READ_1(sc, j + PCI_VPD_SIZE + 4) != 2775 ETHER_ADDR_LEN) 2776 continue; 2777 bus_read_region_1(sc->sc_res[CAS_RES_MEM], 2778 CAS_PCI_ROM_OFFSET + j + PCI_VPD_SIZE + 5, 2779 lma, sizeof(lma)); 2780 if (strcmp(lma, "local-mac-address") != 0) 2781 continue; 2782 bus_read_region_1(sc->sc_res[CAS_RES_MEM], 2783 CAS_PCI_ROM_OFFSET + j + PCI_VPD_SIZE + 5 + 2784 sizeof(lma), enaddr[found], 2785 sizeof(enaddr[found])); 2786 if (found++ == 4) 2787 break; 2788 } 2789 break; 2790 default: 2791 device_printf(dev, "unexpected PCI VPD\n"); 2792 goto fail_prom; 2793 } 2794 2795 fail_prom: 2796 CAS_WRITE_4(sc, CAS_BIM_LDEV_OEN, 0); 2797 2798 if (found == 0) { 2799 device_printf(dev, "could not determine Ethernet address\n"); 2800 goto fail; 2801 } 2802 i = 0; 2803 if (found > 1 && pci_get_slot(dev) < sizeof(enaddr) / sizeof(*enaddr)) 2804 i = pci_get_slot(dev); 2805 memcpy(sc->sc_enaddr, enaddr[i], ETHER_ADDR_LEN); 2806 #endif 2807 2808 if (cas_attach(sc) != 0) { 2809 device_printf(dev, "could not be attached\n"); 2810 goto fail; 2811 } 2812 2813 if (bus_setup_intr(dev, sc->sc_res[CAS_RES_INTR], INTR_TYPE_NET | 2814 INTR_MPSAFE, cas_intr, NULL, sc, &sc->sc_ih) != 0) { 2815 device_printf(dev, "failed to set up interrupt\n"); 2816 cas_detach(sc); 2817 goto fail; 2818 } 2819 return (0); 2820 2821 fail: 2822 CAS_LOCK_DESTROY(sc); 2823 bus_release_resources(dev, cas_pci_res_spec, sc->sc_res); 2824 return (ENXIO); 2825 } 2826 2827 static int 2828 cas_pci_detach(device_t dev) 2829 { 2830 struct cas_softc *sc; 2831 2832 sc = device_get_softc(dev); 2833 bus_teardown_intr(dev, sc->sc_res[CAS_RES_INTR], sc->sc_ih); 2834 cas_detach(sc); 2835 CAS_LOCK_DESTROY(sc); 2836 bus_release_resources(dev, cas_pci_res_spec, sc->sc_res); 2837 return (0); 2838 } 2839 2840 static int 2841 cas_pci_suspend(device_t dev) 2842 { 2843 2844 cas_suspend(device_get_softc(dev)); 2845 return (0); 2846 } 2847 2848 static int 2849 cas_pci_resume(device_t dev) 2850 { 2851 2852 cas_resume(device_get_softc(dev)); 2853 return (0); 2854 } 2855