1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (C) 2001 Eduardo Horvath. 5 * Copyright (c) 2001-2003 Thomas Moestl 6 * Copyright (c) 2007-2009 Marius Strobl <marius@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp 31 * from: FreeBSD: if_gem.c 182060 2008-08-23 15:03:26Z marius 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 /* 38 * driver for Sun Cassini/Cassini+ and National Semiconductor DP83065 39 * Saturn Gigabit Ethernet controllers 40 */ 41 42 #if 0 43 #define CAS_DEBUG 44 #endif 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/bus.h> 49 #include <sys/callout.h> 50 #include <sys/endian.h> 51 #include <sys/mbuf.h> 52 #include <sys/malloc.h> 53 #include <sys/kernel.h> 54 #include <sys/lock.h> 55 #include <sys/module.h> 56 #include <sys/mutex.h> 57 #include <sys/refcount.h> 58 #include <sys/resource.h> 59 #include <sys/rman.h> 60 #include <sys/socket.h> 61 #include <sys/sockio.h> 62 #include <sys/taskqueue.h> 63 64 #include <net/bpf.h> 65 #include <net/ethernet.h> 66 #include <net/if.h> 67 #include <net/if_var.h> 68 #include <net/if_arp.h> 69 #include <net/if_dl.h> 70 #include <net/if_media.h> 71 #include <net/if_types.h> 72 #include <net/if_vlan_var.h> 73 74 #include <netinet/in.h> 75 #include <netinet/in_systm.h> 76 #include <netinet/ip.h> 77 #include <netinet/tcp.h> 78 #include <netinet/udp.h> 79 80 #include <machine/bus.h> 81 #if defined(__powerpc__) 82 #include <dev/ofw/ofw_bus.h> 83 #include <dev/ofw/openfirm.h> 84 #include <machine/ofw_machdep.h> 85 #endif 86 #include <machine/resource.h> 87 88 #include <dev/mii/mii.h> 89 #include <dev/mii/miivar.h> 90 91 #include <dev/cas/if_casreg.h> 92 #include <dev/cas/if_casvar.h> 93 94 #include <dev/pci/pcireg.h> 95 #include <dev/pci/pcivar.h> 96 97 #include "miibus_if.h" 98 99 #define RINGASSERT(n , min, max) \ 100 CTASSERT(powerof2(n) && (n) >= (min) && (n) <= (max)) 101 102 RINGASSERT(CAS_NRXCOMP, 128, 32768); 103 RINGASSERT(CAS_NRXDESC, 32, 8192); 104 RINGASSERT(CAS_NRXDESC2, 32, 8192); 105 RINGASSERT(CAS_NTXDESC, 32, 8192); 106 107 #undef RINGASSERT 108 109 #define CCDASSERT(m, a) \ 110 CTASSERT((offsetof(struct cas_control_data, m) & ((a) - 1)) == 0) 111 112 CCDASSERT(ccd_rxcomps, CAS_RX_COMP_ALIGN); 113 CCDASSERT(ccd_rxdescs, CAS_RX_DESC_ALIGN); 114 CCDASSERT(ccd_rxdescs2, CAS_RX_DESC_ALIGN); 115 116 #undef CCDASSERT 117 118 #define CAS_TRIES 10000 119 120 /* 121 * According to documentation, the hardware has support for basic TCP 122 * checksum offloading only, in practice this can be also used for UDP 123 * however (i.e. the problem of previous Sun NICs that a checksum of 0x0 124 * is not converted to 0xffff no longer exists). 125 */ 126 #define CAS_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 127 128 static inline void cas_add_rxdesc(struct cas_softc *sc, u_int idx); 129 static int cas_attach(struct cas_softc *sc); 130 static int cas_bitwait(struct cas_softc *sc, bus_addr_t r, uint32_t clr, 131 uint32_t set); 132 static void cas_cddma_callback(void *xsc, bus_dma_segment_t *segs, 133 int nsegs, int error); 134 static void cas_detach(struct cas_softc *sc); 135 static int cas_disable_rx(struct cas_softc *sc); 136 static int cas_disable_tx(struct cas_softc *sc); 137 static void cas_eint(struct cas_softc *sc, u_int status); 138 static void cas_free(struct mbuf *m); 139 static void cas_init(void *xsc); 140 static void cas_init_locked(struct cas_softc *sc); 141 static void cas_init_regs(struct cas_softc *sc); 142 static int cas_intr(void *v); 143 static void cas_intr_task(void *arg, int pending __unused); 144 static int cas_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 145 static int cas_load_txmbuf(struct cas_softc *sc, struct mbuf **m_head); 146 static int cas_mediachange(struct ifnet *ifp); 147 static void cas_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr); 148 static void cas_meminit(struct cas_softc *sc); 149 static void cas_mifinit(struct cas_softc *sc); 150 static int cas_mii_readreg(device_t dev, int phy, int reg); 151 static void cas_mii_statchg(device_t dev); 152 static int cas_mii_writereg(device_t dev, int phy, int reg, int val); 153 static void cas_reset(struct cas_softc *sc); 154 static int cas_reset_rx(struct cas_softc *sc); 155 static int cas_reset_tx(struct cas_softc *sc); 156 static void cas_resume(struct cas_softc *sc); 157 static u_int cas_descsize(u_int sz); 158 static void cas_rint(struct cas_softc *sc); 159 static void cas_rint_timeout(void *arg); 160 static inline void cas_rxcksum(struct mbuf *m, uint16_t cksum); 161 static inline void cas_rxcompinit(struct cas_rx_comp *rxcomp); 162 static u_int cas_rxcompsize(u_int sz); 163 static void cas_rxdma_callback(void *xsc, bus_dma_segment_t *segs, 164 int nsegs, int error); 165 static void cas_setladrf(struct cas_softc *sc); 166 static void cas_start(struct ifnet *ifp); 167 static void cas_stop(struct ifnet *ifp); 168 static void cas_suspend(struct cas_softc *sc); 169 static void cas_tick(void *arg); 170 static void cas_tint(struct cas_softc *sc); 171 static void cas_tx_task(void *arg, int pending __unused); 172 static inline void cas_txkick(struct cas_softc *sc); 173 static void cas_watchdog(struct cas_softc *sc); 174 175 static devclass_t cas_devclass; 176 177 MODULE_DEPEND(cas, ether, 1, 1, 1); 178 MODULE_DEPEND(cas, miibus, 1, 1, 1); 179 180 #ifdef CAS_DEBUG 181 #include <sys/ktr.h> 182 #define KTR_CAS KTR_SPARE2 183 #endif 184 185 static int 186 cas_attach(struct cas_softc *sc) 187 { 188 struct cas_txsoft *txs; 189 struct ifnet *ifp; 190 int error, i; 191 uint32_t v; 192 193 /* Set up ifnet structure. */ 194 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 195 if (ifp == NULL) 196 return (ENOSPC); 197 ifp->if_softc = sc; 198 if_initname(ifp, device_get_name(sc->sc_dev), 199 device_get_unit(sc->sc_dev)); 200 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 201 ifp->if_start = cas_start; 202 ifp->if_ioctl = cas_ioctl; 203 ifp->if_init = cas_init; 204 IFQ_SET_MAXLEN(&ifp->if_snd, CAS_TXQUEUELEN); 205 ifp->if_snd.ifq_drv_maxlen = CAS_TXQUEUELEN; 206 IFQ_SET_READY(&ifp->if_snd); 207 208 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0); 209 callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0); 210 /* Create local taskq. */ 211 NET_TASK_INIT(&sc->sc_intr_task, 0, cas_intr_task, sc); 212 TASK_INIT(&sc->sc_tx_task, 1, cas_tx_task, ifp); 213 sc->sc_tq = taskqueue_create_fast("cas_taskq", M_WAITOK, 214 taskqueue_thread_enqueue, &sc->sc_tq); 215 if (sc->sc_tq == NULL) { 216 device_printf(sc->sc_dev, "could not create taskqueue\n"); 217 error = ENXIO; 218 goto fail_ifnet; 219 } 220 error = taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", 221 device_get_nameunit(sc->sc_dev)); 222 if (error != 0) { 223 device_printf(sc->sc_dev, "could not start threads\n"); 224 goto fail_taskq; 225 } 226 227 /* Make sure the chip is stopped. */ 228 cas_reset(sc); 229 230 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 231 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 232 BUS_SPACE_MAXSIZE, 0, BUS_SPACE_MAXSIZE, 0, NULL, NULL, 233 &sc->sc_pdmatag); 234 if (error != 0) 235 goto fail_taskq; 236 237 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 238 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 239 CAS_PAGE_SIZE, 1, CAS_PAGE_SIZE, 0, NULL, NULL, &sc->sc_rdmatag); 240 if (error != 0) 241 goto fail_ptag; 242 243 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 244 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 245 MCLBYTES * CAS_NTXSEGS, CAS_NTXSEGS, MCLBYTES, 246 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag); 247 if (error != 0) 248 goto fail_rtag; 249 250 error = bus_dma_tag_create(sc->sc_pdmatag, CAS_TX_DESC_ALIGN, 0, 251 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 252 sizeof(struct cas_control_data), 1, 253 sizeof(struct cas_control_data), 0, 254 NULL, NULL, &sc->sc_cdmatag); 255 if (error != 0) 256 goto fail_ttag; 257 258 /* 259 * Allocate the control data structures, create and load the 260 * DMA map for it. 261 */ 262 if ((error = bus_dmamem_alloc(sc->sc_cdmatag, 263 (void **)&sc->sc_control_data, 264 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 265 &sc->sc_cddmamap)) != 0) { 266 device_printf(sc->sc_dev, 267 "unable to allocate control data, error = %d\n", error); 268 goto fail_ctag; 269 } 270 271 sc->sc_cddma = 0; 272 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, 273 sc->sc_control_data, sizeof(struct cas_control_data), 274 cas_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { 275 device_printf(sc->sc_dev, 276 "unable to load control data DMA map, error = %d\n", 277 error); 278 goto fail_cmem; 279 } 280 281 /* 282 * Initialize the transmit job descriptors. 283 */ 284 STAILQ_INIT(&sc->sc_txfreeq); 285 STAILQ_INIT(&sc->sc_txdirtyq); 286 287 /* 288 * Create the transmit buffer DMA maps. 289 */ 290 error = ENOMEM; 291 for (i = 0; i < CAS_TXQUEUELEN; i++) { 292 txs = &sc->sc_txsoft[i]; 293 txs->txs_mbuf = NULL; 294 txs->txs_ndescs = 0; 295 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0, 296 &txs->txs_dmamap)) != 0) { 297 device_printf(sc->sc_dev, 298 "unable to create TX DMA map %d, error = %d\n", 299 i, error); 300 goto fail_txd; 301 } 302 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 303 } 304 305 /* 306 * Allocate the receive buffers, create and load the DMA maps 307 * for them. 308 */ 309 for (i = 0; i < CAS_NRXDESC; i++) { 310 if ((error = bus_dmamem_alloc(sc->sc_rdmatag, 311 &sc->sc_rxdsoft[i].rxds_buf, BUS_DMA_WAITOK, 312 &sc->sc_rxdsoft[i].rxds_dmamap)) != 0) { 313 device_printf(sc->sc_dev, 314 "unable to allocate RX buffer %d, error = %d\n", 315 i, error); 316 goto fail_rxmem; 317 } 318 319 sc->sc_rxdptr = i; 320 sc->sc_rxdsoft[i].rxds_paddr = 0; 321 if ((error = bus_dmamap_load(sc->sc_rdmatag, 322 sc->sc_rxdsoft[i].rxds_dmamap, sc->sc_rxdsoft[i].rxds_buf, 323 CAS_PAGE_SIZE, cas_rxdma_callback, sc, 0)) != 0 || 324 sc->sc_rxdsoft[i].rxds_paddr == 0) { 325 device_printf(sc->sc_dev, 326 "unable to load RX DMA map %d, error = %d\n", 327 i, error); 328 goto fail_rxmap; 329 } 330 } 331 332 if ((sc->sc_flags & CAS_SERDES) == 0) { 333 CAS_WRITE_4(sc, CAS_PCS_DATAPATH, CAS_PCS_DATAPATH_MII); 334 CAS_BARRIER(sc, CAS_PCS_DATAPATH, 4, 335 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 336 cas_mifinit(sc); 337 /* 338 * Look for an external PHY. 339 */ 340 error = ENXIO; 341 v = CAS_READ_4(sc, CAS_MIF_CONF); 342 if ((v & CAS_MIF_CONF_MDI1) != 0) { 343 v |= CAS_MIF_CONF_PHY_SELECT; 344 CAS_WRITE_4(sc, CAS_MIF_CONF, v); 345 CAS_BARRIER(sc, CAS_MIF_CONF, 4, 346 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 347 /* Enable/unfreeze the GMII pins of Saturn. */ 348 if (sc->sc_variant == CAS_SATURN) { 349 CAS_WRITE_4(sc, CAS_SATURN_PCFG, 350 CAS_READ_4(sc, CAS_SATURN_PCFG) & 351 ~CAS_SATURN_PCFG_FSI); 352 CAS_BARRIER(sc, CAS_SATURN_PCFG, 4, 353 BUS_SPACE_BARRIER_READ | 354 BUS_SPACE_BARRIER_WRITE); 355 DELAY(10000); 356 } 357 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, 358 cas_mediachange, cas_mediastatus, BMSR_DEFCAPMASK, 359 MII_PHY_ANY, MII_OFFSET_ANY, MIIF_DOPAUSE); 360 } 361 /* 362 * Fall back on an internal PHY if no external PHY was found. 363 */ 364 if (error != 0 && (v & CAS_MIF_CONF_MDI0) != 0) { 365 v &= ~CAS_MIF_CONF_PHY_SELECT; 366 CAS_WRITE_4(sc, CAS_MIF_CONF, v); 367 CAS_BARRIER(sc, CAS_MIF_CONF, 4, 368 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 369 /* Freeze the GMII pins of Saturn for saving power. */ 370 if (sc->sc_variant == CAS_SATURN) { 371 CAS_WRITE_4(sc, CAS_SATURN_PCFG, 372 CAS_READ_4(sc, CAS_SATURN_PCFG) | 373 CAS_SATURN_PCFG_FSI); 374 CAS_BARRIER(sc, CAS_SATURN_PCFG, 4, 375 BUS_SPACE_BARRIER_READ | 376 BUS_SPACE_BARRIER_WRITE); 377 DELAY(10000); 378 } 379 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, 380 cas_mediachange, cas_mediastatus, BMSR_DEFCAPMASK, 381 MII_PHY_ANY, MII_OFFSET_ANY, MIIF_DOPAUSE); 382 } 383 } else { 384 /* 385 * Use the external PCS SERDES. 386 */ 387 CAS_WRITE_4(sc, CAS_PCS_DATAPATH, CAS_PCS_DATAPATH_SERDES); 388 CAS_BARRIER(sc, CAS_PCS_DATAPATH, 4, BUS_SPACE_BARRIER_WRITE); 389 /* Enable/unfreeze the SERDES pins of Saturn. */ 390 if (sc->sc_variant == CAS_SATURN) { 391 CAS_WRITE_4(sc, CAS_SATURN_PCFG, 0); 392 CAS_BARRIER(sc, CAS_SATURN_PCFG, 4, 393 BUS_SPACE_BARRIER_WRITE); 394 } 395 CAS_WRITE_4(sc, CAS_PCS_SERDES_CTRL, CAS_PCS_SERDES_CTRL_ESD); 396 CAS_BARRIER(sc, CAS_PCS_SERDES_CTRL, 4, 397 BUS_SPACE_BARRIER_WRITE); 398 CAS_WRITE_4(sc, CAS_PCS_CONF, CAS_PCS_CONF_EN); 399 CAS_BARRIER(sc, CAS_PCS_CONF, 4, 400 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 401 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, 402 cas_mediachange, cas_mediastatus, BMSR_DEFCAPMASK, 403 CAS_PHYAD_EXTERNAL, MII_OFFSET_ANY, MIIF_DOPAUSE); 404 } 405 if (error != 0) { 406 device_printf(sc->sc_dev, "attaching PHYs failed\n"); 407 goto fail_rxmap; 408 } 409 sc->sc_mii = device_get_softc(sc->sc_miibus); 410 411 /* 412 * From this point forward, the attachment cannot fail. A failure 413 * before this point releases all resources that may have been 414 * allocated. 415 */ 416 417 /* Announce FIFO sizes. */ 418 v = CAS_READ_4(sc, CAS_TX_FIFO_SIZE); 419 device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n", 420 CAS_RX_FIFO_SIZE / 1024, v / 16); 421 422 /* Attach the interface. */ 423 ether_ifattach(ifp, sc->sc_enaddr); 424 425 /* 426 * Tell the upper layer(s) we support long frames/checksum offloads. 427 */ 428 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 429 ifp->if_capabilities = IFCAP_VLAN_MTU; 430 if ((sc->sc_flags & CAS_NO_CSUM) == 0) { 431 ifp->if_capabilities |= IFCAP_HWCSUM; 432 ifp->if_hwassist = CAS_CSUM_FEATURES; 433 } 434 ifp->if_capenable = ifp->if_capabilities; 435 436 return (0); 437 438 /* 439 * Free any resources we've allocated during the failed attach 440 * attempt. Do this in reverse order and fall through. 441 */ 442 fail_rxmap: 443 for (i = 0; i < CAS_NRXDESC; i++) 444 if (sc->sc_rxdsoft[i].rxds_paddr != 0) 445 bus_dmamap_unload(sc->sc_rdmatag, 446 sc->sc_rxdsoft[i].rxds_dmamap); 447 fail_rxmem: 448 for (i = 0; i < CAS_NRXDESC; i++) 449 if (sc->sc_rxdsoft[i].rxds_buf != NULL) 450 bus_dmamem_free(sc->sc_rdmatag, 451 sc->sc_rxdsoft[i].rxds_buf, 452 sc->sc_rxdsoft[i].rxds_dmamap); 453 fail_txd: 454 for (i = 0; i < CAS_TXQUEUELEN; i++) 455 if (sc->sc_txsoft[i].txs_dmamap != NULL) 456 bus_dmamap_destroy(sc->sc_tdmatag, 457 sc->sc_txsoft[i].txs_dmamap); 458 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 459 fail_cmem: 460 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 461 sc->sc_cddmamap); 462 fail_ctag: 463 bus_dma_tag_destroy(sc->sc_cdmatag); 464 fail_ttag: 465 bus_dma_tag_destroy(sc->sc_tdmatag); 466 fail_rtag: 467 bus_dma_tag_destroy(sc->sc_rdmatag); 468 fail_ptag: 469 bus_dma_tag_destroy(sc->sc_pdmatag); 470 fail_taskq: 471 taskqueue_free(sc->sc_tq); 472 fail_ifnet: 473 if_free(ifp); 474 return (error); 475 } 476 477 static void 478 cas_detach(struct cas_softc *sc) 479 { 480 struct ifnet *ifp = sc->sc_ifp; 481 int i; 482 483 ether_ifdetach(ifp); 484 CAS_LOCK(sc); 485 cas_stop(ifp); 486 CAS_UNLOCK(sc); 487 callout_drain(&sc->sc_tick_ch); 488 callout_drain(&sc->sc_rx_ch); 489 taskqueue_drain(sc->sc_tq, &sc->sc_intr_task); 490 taskqueue_drain(sc->sc_tq, &sc->sc_tx_task); 491 if_free(ifp); 492 taskqueue_free(sc->sc_tq); 493 device_delete_child(sc->sc_dev, sc->sc_miibus); 494 495 for (i = 0; i < CAS_NRXDESC; i++) 496 if (sc->sc_rxdsoft[i].rxds_dmamap != NULL) 497 bus_dmamap_sync(sc->sc_rdmatag, 498 sc->sc_rxdsoft[i].rxds_dmamap, 499 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 500 for (i = 0; i < CAS_NRXDESC; i++) 501 if (sc->sc_rxdsoft[i].rxds_paddr != 0) 502 bus_dmamap_unload(sc->sc_rdmatag, 503 sc->sc_rxdsoft[i].rxds_dmamap); 504 for (i = 0; i < CAS_NRXDESC; i++) 505 if (sc->sc_rxdsoft[i].rxds_buf != NULL) 506 bus_dmamem_free(sc->sc_rdmatag, 507 sc->sc_rxdsoft[i].rxds_buf, 508 sc->sc_rxdsoft[i].rxds_dmamap); 509 for (i = 0; i < CAS_TXQUEUELEN; i++) 510 if (sc->sc_txsoft[i].txs_dmamap != NULL) 511 bus_dmamap_destroy(sc->sc_tdmatag, 512 sc->sc_txsoft[i].txs_dmamap); 513 CAS_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 514 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 515 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 516 sc->sc_cddmamap); 517 bus_dma_tag_destroy(sc->sc_cdmatag); 518 bus_dma_tag_destroy(sc->sc_tdmatag); 519 bus_dma_tag_destroy(sc->sc_rdmatag); 520 bus_dma_tag_destroy(sc->sc_pdmatag); 521 } 522 523 static void 524 cas_suspend(struct cas_softc *sc) 525 { 526 struct ifnet *ifp = sc->sc_ifp; 527 528 CAS_LOCK(sc); 529 cas_stop(ifp); 530 CAS_UNLOCK(sc); 531 } 532 533 static void 534 cas_resume(struct cas_softc *sc) 535 { 536 struct ifnet *ifp = sc->sc_ifp; 537 538 CAS_LOCK(sc); 539 /* 540 * On resume all registers have to be initialized again like 541 * after power-on. 542 */ 543 sc->sc_flags &= ~CAS_INITED; 544 if (ifp->if_flags & IFF_UP) 545 cas_init_locked(sc); 546 CAS_UNLOCK(sc); 547 } 548 549 static inline void 550 cas_rxcksum(struct mbuf *m, uint16_t cksum) 551 { 552 struct ether_header *eh; 553 struct ip *ip; 554 struct udphdr *uh; 555 uint16_t *opts; 556 int32_t hlen, len, pktlen; 557 uint32_t temp32; 558 559 pktlen = m->m_pkthdr.len; 560 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) 561 return; 562 eh = mtod(m, struct ether_header *); 563 if (eh->ether_type != htons(ETHERTYPE_IP)) 564 return; 565 ip = (struct ip *)(eh + 1); 566 if (ip->ip_v != IPVERSION) 567 return; 568 569 hlen = ip->ip_hl << 2; 570 pktlen -= sizeof(struct ether_header); 571 if (hlen < sizeof(struct ip)) 572 return; 573 if (ntohs(ip->ip_len) < hlen) 574 return; 575 if (ntohs(ip->ip_len) != pktlen) 576 return; 577 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) 578 return; /* Cannot handle fragmented packet. */ 579 580 switch (ip->ip_p) { 581 case IPPROTO_TCP: 582 if (pktlen < (hlen + sizeof(struct tcphdr))) 583 return; 584 break; 585 case IPPROTO_UDP: 586 if (pktlen < (hlen + sizeof(struct udphdr))) 587 return; 588 uh = (struct udphdr *)((uint8_t *)ip + hlen); 589 if (uh->uh_sum == 0) 590 return; /* no checksum */ 591 break; 592 default: 593 return; 594 } 595 596 cksum = ~cksum; 597 /* checksum fixup for IP options */ 598 len = hlen - sizeof(struct ip); 599 if (len > 0) { 600 opts = (uint16_t *)(ip + 1); 601 for (; len > 0; len -= sizeof(uint16_t), opts++) { 602 temp32 = cksum - *opts; 603 temp32 = (temp32 >> 16) + (temp32 & 65535); 604 cksum = temp32 & 65535; 605 } 606 } 607 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; 608 m->m_pkthdr.csum_data = cksum; 609 } 610 611 static void 612 cas_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) 613 { 614 struct cas_softc *sc = xsc; 615 616 if (error != 0) 617 return; 618 if (nsegs != 1) 619 panic("%s: bad control buffer segment count", __func__); 620 sc->sc_cddma = segs[0].ds_addr; 621 } 622 623 static void 624 cas_rxdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) 625 { 626 struct cas_softc *sc = xsc; 627 628 if (error != 0) 629 return; 630 if (nsegs != 1) 631 panic("%s: bad RX buffer segment count", __func__); 632 sc->sc_rxdsoft[sc->sc_rxdptr].rxds_paddr = segs[0].ds_addr; 633 } 634 635 static void 636 cas_tick(void *arg) 637 { 638 struct cas_softc *sc = arg; 639 struct ifnet *ifp = sc->sc_ifp; 640 uint32_t v; 641 642 CAS_LOCK_ASSERT(sc, MA_OWNED); 643 644 /* 645 * Unload collision and error counters. 646 */ 647 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 648 CAS_READ_4(sc, CAS_MAC_NORM_COLL_CNT) + 649 CAS_READ_4(sc, CAS_MAC_FIRST_COLL_CNT)); 650 v = CAS_READ_4(sc, CAS_MAC_EXCESS_COLL_CNT) + 651 CAS_READ_4(sc, CAS_MAC_LATE_COLL_CNT); 652 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, v); 653 if_inc_counter(ifp, IFCOUNTER_OERRORS, v); 654 if_inc_counter(ifp, IFCOUNTER_IERRORS, 655 CAS_READ_4(sc, CAS_MAC_RX_LEN_ERR_CNT) + 656 CAS_READ_4(sc, CAS_MAC_RX_ALIGN_ERR) + 657 CAS_READ_4(sc, CAS_MAC_RX_CRC_ERR_CNT) + 658 CAS_READ_4(sc, CAS_MAC_RX_CODE_VIOL)); 659 660 /* 661 * Then clear the hardware counters. 662 */ 663 CAS_WRITE_4(sc, CAS_MAC_NORM_COLL_CNT, 0); 664 CAS_WRITE_4(sc, CAS_MAC_FIRST_COLL_CNT, 0); 665 CAS_WRITE_4(sc, CAS_MAC_EXCESS_COLL_CNT, 0); 666 CAS_WRITE_4(sc, CAS_MAC_LATE_COLL_CNT, 0); 667 CAS_WRITE_4(sc, CAS_MAC_RX_LEN_ERR_CNT, 0); 668 CAS_WRITE_4(sc, CAS_MAC_RX_ALIGN_ERR, 0); 669 CAS_WRITE_4(sc, CAS_MAC_RX_CRC_ERR_CNT, 0); 670 CAS_WRITE_4(sc, CAS_MAC_RX_CODE_VIOL, 0); 671 672 mii_tick(sc->sc_mii); 673 674 if (sc->sc_txfree != CAS_MAXTXFREE) 675 cas_tint(sc); 676 677 cas_watchdog(sc); 678 679 callout_reset(&sc->sc_tick_ch, hz, cas_tick, sc); 680 } 681 682 static int 683 cas_bitwait(struct cas_softc *sc, bus_addr_t r, uint32_t clr, uint32_t set) 684 { 685 int i; 686 uint32_t reg; 687 688 for (i = CAS_TRIES; i--; DELAY(100)) { 689 reg = CAS_READ_4(sc, r); 690 if ((reg & clr) == 0 && (reg & set) == set) 691 return (1); 692 } 693 return (0); 694 } 695 696 static void 697 cas_reset(struct cas_softc *sc) 698 { 699 700 #ifdef CAS_DEBUG 701 CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__); 702 #endif 703 /* Disable all interrupts in order to avoid spurious ones. */ 704 CAS_WRITE_4(sc, CAS_INTMASK, 0xffffffff); 705 706 cas_reset_rx(sc); 707 cas_reset_tx(sc); 708 709 /* 710 * Do a full reset modulo the result of the last auto-negotiation 711 * when using the SERDES. 712 */ 713 CAS_WRITE_4(sc, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX | 714 ((sc->sc_flags & CAS_SERDES) != 0 ? CAS_RESET_PCS_DIS : 0)); 715 CAS_BARRIER(sc, CAS_RESET, 4, 716 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 717 DELAY(3000); 718 if (!cas_bitwait(sc, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX, 0)) 719 device_printf(sc->sc_dev, "cannot reset device\n"); 720 } 721 722 static void 723 cas_stop(struct ifnet *ifp) 724 { 725 struct cas_softc *sc = ifp->if_softc; 726 struct cas_txsoft *txs; 727 728 #ifdef CAS_DEBUG 729 CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__); 730 #endif 731 732 callout_stop(&sc->sc_tick_ch); 733 callout_stop(&sc->sc_rx_ch); 734 735 /* Disable all interrupts in order to avoid spurious ones. */ 736 CAS_WRITE_4(sc, CAS_INTMASK, 0xffffffff); 737 738 cas_reset_tx(sc); 739 cas_reset_rx(sc); 740 741 /* 742 * Release any queued transmit buffers. 743 */ 744 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 745 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 746 if (txs->txs_ndescs != 0) { 747 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 748 BUS_DMASYNC_POSTWRITE); 749 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 750 if (txs->txs_mbuf != NULL) { 751 m_freem(txs->txs_mbuf); 752 txs->txs_mbuf = NULL; 753 } 754 } 755 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 756 } 757 758 /* 759 * Mark the interface down and cancel the watchdog timer. 760 */ 761 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 762 sc->sc_flags &= ~CAS_LINK; 763 sc->sc_wdog_timer = 0; 764 } 765 766 static int 767 cas_reset_rx(struct cas_softc *sc) 768 { 769 770 /* 771 * Resetting while DMA is in progress can cause a bus hang, so we 772 * disable DMA first. 773 */ 774 (void)cas_disable_rx(sc); 775 CAS_WRITE_4(sc, CAS_RX_CONF, 0); 776 CAS_BARRIER(sc, CAS_RX_CONF, 4, 777 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 778 if (!cas_bitwait(sc, CAS_RX_CONF, CAS_RX_CONF_RXDMA_EN, 0)) 779 device_printf(sc->sc_dev, "cannot disable RX DMA\n"); 780 781 /* Finally, reset the ERX. */ 782 CAS_WRITE_4(sc, CAS_RESET, CAS_RESET_RX | 783 ((sc->sc_flags & CAS_SERDES) != 0 ? CAS_RESET_PCS_DIS : 0)); 784 CAS_BARRIER(sc, CAS_RESET, 4, 785 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 786 if (!cas_bitwait(sc, CAS_RESET, CAS_RESET_RX, 0)) { 787 device_printf(sc->sc_dev, "cannot reset receiver\n"); 788 return (1); 789 } 790 return (0); 791 } 792 793 static int 794 cas_reset_tx(struct cas_softc *sc) 795 { 796 797 /* 798 * Resetting while DMA is in progress can cause a bus hang, so we 799 * disable DMA first. 800 */ 801 (void)cas_disable_tx(sc); 802 CAS_WRITE_4(sc, CAS_TX_CONF, 0); 803 CAS_BARRIER(sc, CAS_TX_CONF, 4, 804 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 805 if (!cas_bitwait(sc, CAS_TX_CONF, CAS_TX_CONF_TXDMA_EN, 0)) 806 device_printf(sc->sc_dev, "cannot disable TX DMA\n"); 807 808 /* Finally, reset the ETX. */ 809 CAS_WRITE_4(sc, CAS_RESET, CAS_RESET_TX | 810 ((sc->sc_flags & CAS_SERDES) != 0 ? CAS_RESET_PCS_DIS : 0)); 811 CAS_BARRIER(sc, CAS_RESET, 4, 812 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 813 if (!cas_bitwait(sc, CAS_RESET, CAS_RESET_TX, 0)) { 814 device_printf(sc->sc_dev, "cannot reset transmitter\n"); 815 return (1); 816 } 817 return (0); 818 } 819 820 static int 821 cas_disable_rx(struct cas_softc *sc) 822 { 823 824 CAS_WRITE_4(sc, CAS_MAC_RX_CONF, 825 CAS_READ_4(sc, CAS_MAC_RX_CONF) & ~CAS_MAC_RX_CONF_EN); 826 CAS_BARRIER(sc, CAS_MAC_RX_CONF, 4, 827 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 828 if (cas_bitwait(sc, CAS_MAC_RX_CONF, CAS_MAC_RX_CONF_EN, 0)) 829 return (1); 830 if (bootverbose) 831 device_printf(sc->sc_dev, "cannot disable RX MAC\n"); 832 return (0); 833 } 834 835 static int 836 cas_disable_tx(struct cas_softc *sc) 837 { 838 839 CAS_WRITE_4(sc, CAS_MAC_TX_CONF, 840 CAS_READ_4(sc, CAS_MAC_TX_CONF) & ~CAS_MAC_TX_CONF_EN); 841 CAS_BARRIER(sc, CAS_MAC_TX_CONF, 4, 842 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 843 if (cas_bitwait(sc, CAS_MAC_TX_CONF, CAS_MAC_TX_CONF_EN, 0)) 844 return (1); 845 if (bootverbose) 846 device_printf(sc->sc_dev, "cannot disable TX MAC\n"); 847 return (0); 848 } 849 850 static inline void 851 cas_rxcompinit(struct cas_rx_comp *rxcomp) 852 { 853 854 rxcomp->crc_word1 = 0; 855 rxcomp->crc_word2 = 0; 856 rxcomp->crc_word3 = 857 htole64(CAS_SET(ETHER_HDR_LEN + sizeof(struct ip), CAS_RC3_CSO)); 858 rxcomp->crc_word4 = htole64(CAS_RC4_ZERO); 859 } 860 861 static void 862 cas_meminit(struct cas_softc *sc) 863 { 864 int i; 865 866 CAS_LOCK_ASSERT(sc, MA_OWNED); 867 868 /* 869 * Initialize the transmit descriptor ring. 870 */ 871 for (i = 0; i < CAS_NTXDESC; i++) { 872 sc->sc_txdescs[i].cd_flags = 0; 873 sc->sc_txdescs[i].cd_buf_ptr = 0; 874 } 875 sc->sc_txfree = CAS_MAXTXFREE; 876 sc->sc_txnext = 0; 877 sc->sc_txwin = 0; 878 879 /* 880 * Initialize the receive completion ring. 881 */ 882 for (i = 0; i < CAS_NRXCOMP; i++) 883 cas_rxcompinit(&sc->sc_rxcomps[i]); 884 sc->sc_rxcptr = 0; 885 886 /* 887 * Initialize the first receive descriptor ring. We leave 888 * the second one zeroed as we don't actually use it. 889 */ 890 for (i = 0; i < CAS_NRXDESC; i++) 891 CAS_INIT_RXDESC(sc, i, i); 892 sc->sc_rxdptr = 0; 893 894 CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 895 } 896 897 static u_int 898 cas_descsize(u_int sz) 899 { 900 901 switch (sz) { 902 case 32: 903 return (CAS_DESC_32); 904 case 64: 905 return (CAS_DESC_64); 906 case 128: 907 return (CAS_DESC_128); 908 case 256: 909 return (CAS_DESC_256); 910 case 512: 911 return (CAS_DESC_512); 912 case 1024: 913 return (CAS_DESC_1K); 914 case 2048: 915 return (CAS_DESC_2K); 916 case 4096: 917 return (CAS_DESC_4K); 918 case 8192: 919 return (CAS_DESC_8K); 920 default: 921 printf("%s: invalid descriptor ring size %d\n", __func__, sz); 922 return (CAS_DESC_32); 923 } 924 } 925 926 static u_int 927 cas_rxcompsize(u_int sz) 928 { 929 930 switch (sz) { 931 case 128: 932 return (CAS_RX_CONF_COMP_128); 933 case 256: 934 return (CAS_RX_CONF_COMP_256); 935 case 512: 936 return (CAS_RX_CONF_COMP_512); 937 case 1024: 938 return (CAS_RX_CONF_COMP_1K); 939 case 2048: 940 return (CAS_RX_CONF_COMP_2K); 941 case 4096: 942 return (CAS_RX_CONF_COMP_4K); 943 case 8192: 944 return (CAS_RX_CONF_COMP_8K); 945 case 16384: 946 return (CAS_RX_CONF_COMP_16K); 947 case 32768: 948 return (CAS_RX_CONF_COMP_32K); 949 default: 950 printf("%s: invalid dcompletion ring size %d\n", __func__, sz); 951 return (CAS_RX_CONF_COMP_128); 952 } 953 } 954 955 static void 956 cas_init(void *xsc) 957 { 958 struct cas_softc *sc = xsc; 959 960 CAS_LOCK(sc); 961 cas_init_locked(sc); 962 CAS_UNLOCK(sc); 963 } 964 965 /* 966 * Initialization of interface; set up initialization block 967 * and transmit/receive descriptor rings. 968 */ 969 static void 970 cas_init_locked(struct cas_softc *sc) 971 { 972 struct ifnet *ifp = sc->sc_ifp; 973 uint32_t v; 974 975 CAS_LOCK_ASSERT(sc, MA_OWNED); 976 977 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 978 return; 979 980 #ifdef CAS_DEBUG 981 CTR2(KTR_CAS, "%s: %s: calling stop", device_get_name(sc->sc_dev), 982 __func__); 983 #endif 984 /* 985 * Initialization sequence. The numbered steps below correspond 986 * to the sequence outlined in section 6.3.5.1 in the Ethernet 987 * Channel Engine manual (part of the PCIO manual). 988 * See also the STP2002-STQ document from Sun Microsystems. 989 */ 990 991 /* step 1 & 2. Reset the Ethernet Channel. */ 992 cas_stop(ifp); 993 cas_reset(sc); 994 #ifdef CAS_DEBUG 995 CTR2(KTR_CAS, "%s: %s: restarting", device_get_name(sc->sc_dev), 996 __func__); 997 #endif 998 999 if ((sc->sc_flags & CAS_SERDES) == 0) 1000 /* Re-initialize the MIF. */ 1001 cas_mifinit(sc); 1002 1003 /* step 3. Setup data structures in host memory. */ 1004 cas_meminit(sc); 1005 1006 /* step 4. TX MAC registers & counters */ 1007 cas_init_regs(sc); 1008 1009 /* step 5. RX MAC registers & counters */ 1010 1011 /* step 6 & 7. Program Ring Base Addresses. */ 1012 CAS_WRITE_4(sc, CAS_TX_DESC3_BASE_HI, 1013 (((uint64_t)CAS_CDTXDADDR(sc, 0)) >> 32)); 1014 CAS_WRITE_4(sc, CAS_TX_DESC3_BASE_LO, 1015 CAS_CDTXDADDR(sc, 0) & 0xffffffff); 1016 1017 CAS_WRITE_4(sc, CAS_RX_COMP_BASE_HI, 1018 (((uint64_t)CAS_CDRXCADDR(sc, 0)) >> 32)); 1019 CAS_WRITE_4(sc, CAS_RX_COMP_BASE_LO, 1020 CAS_CDRXCADDR(sc, 0) & 0xffffffff); 1021 1022 CAS_WRITE_4(sc, CAS_RX_DESC_BASE_HI, 1023 (((uint64_t)CAS_CDRXDADDR(sc, 0)) >> 32)); 1024 CAS_WRITE_4(sc, CAS_RX_DESC_BASE_LO, 1025 CAS_CDRXDADDR(sc, 0) & 0xffffffff); 1026 1027 if ((sc->sc_flags & CAS_REG_PLUS) != 0) { 1028 CAS_WRITE_4(sc, CAS_RX_DESC2_BASE_HI, 1029 (((uint64_t)CAS_CDRXD2ADDR(sc, 0)) >> 32)); 1030 CAS_WRITE_4(sc, CAS_RX_DESC2_BASE_LO, 1031 CAS_CDRXD2ADDR(sc, 0) & 0xffffffff); 1032 } 1033 1034 #ifdef CAS_DEBUG 1035 CTR5(KTR_CAS, 1036 "loading TXDR %lx, RXCR %lx, RXDR %lx, RXD2R %lx, cddma %lx", 1037 CAS_CDTXDADDR(sc, 0), CAS_CDRXCADDR(sc, 0), CAS_CDRXDADDR(sc, 0), 1038 CAS_CDRXD2ADDR(sc, 0), sc->sc_cddma); 1039 #endif 1040 1041 /* step 8. Global Configuration & Interrupt Masks */ 1042 1043 /* Disable weighted round robin. */ 1044 CAS_WRITE_4(sc, CAS_CAW, CAS_CAW_RR_DIS); 1045 1046 /* 1047 * Enable infinite bursts for revisions without PCI issues if 1048 * applicable. Doing so greatly improves the TX performance. 1049 */ 1050 CAS_WRITE_4(sc, CAS_INF_BURST, 1051 (sc->sc_flags & CAS_TABORT) == 0 ? CAS_INF_BURST_EN : 1052 0); 1053 1054 /* Set up interrupts. */ 1055 CAS_WRITE_4(sc, CAS_INTMASK, 1056 ~(CAS_INTR_TX_INT_ME | CAS_INTR_TX_TAG_ERR | 1057 CAS_INTR_RX_DONE | CAS_INTR_RX_BUF_NA | CAS_INTR_RX_TAG_ERR | 1058 CAS_INTR_RX_COMP_FULL | CAS_INTR_RX_BUF_AEMPTY | 1059 CAS_INTR_RX_COMP_AFULL | CAS_INTR_RX_LEN_MMATCH | 1060 CAS_INTR_PCI_ERROR_INT 1061 #ifdef CAS_DEBUG 1062 | CAS_INTR_PCS_INT | CAS_INTR_MIF 1063 #endif 1064 )); 1065 /* Don't clear top level interrupts when CAS_STATUS_ALIAS is read. */ 1066 CAS_WRITE_4(sc, CAS_CLEAR_ALIAS, 0); 1067 CAS_WRITE_4(sc, CAS_MAC_RX_MASK, ~CAS_MAC_RX_OVERFLOW); 1068 CAS_WRITE_4(sc, CAS_MAC_TX_MASK, 1069 ~(CAS_MAC_TX_UNDERRUN | CAS_MAC_TX_MAX_PKT_ERR)); 1070 #ifdef CAS_DEBUG 1071 CAS_WRITE_4(sc, CAS_MAC_CTRL_MASK, 1072 ~(CAS_MAC_CTRL_PAUSE_RCVD | CAS_MAC_CTRL_PAUSE | 1073 CAS_MAC_CTRL_NON_PAUSE)); 1074 #else 1075 CAS_WRITE_4(sc, CAS_MAC_CTRL_MASK, 1076 CAS_MAC_CTRL_PAUSE_RCVD | CAS_MAC_CTRL_PAUSE | 1077 CAS_MAC_CTRL_NON_PAUSE); 1078 #endif 1079 1080 /* Enable PCI error interrupts. */ 1081 CAS_WRITE_4(sc, CAS_ERROR_MASK, 1082 ~(CAS_ERROR_DTRTO | CAS_ERROR_OTHER | CAS_ERROR_DMAW_ZERO | 1083 CAS_ERROR_DMAR_ZERO | CAS_ERROR_RTRTO)); 1084 1085 /* Enable PCI error interrupts in BIM configuration. */ 1086 CAS_WRITE_4(sc, CAS_BIM_CONF, 1087 CAS_BIM_CONF_DPAR_EN | CAS_BIM_CONF_RMA_EN | CAS_BIM_CONF_RTA_EN); 1088 1089 /* 1090 * step 9. ETX Configuration: encode receive descriptor ring size, 1091 * enable DMA and disable pre-interrupt writeback completion. 1092 */ 1093 v = cas_descsize(CAS_NTXDESC) << CAS_TX_CONF_DESC3_SHFT; 1094 CAS_WRITE_4(sc, CAS_TX_CONF, v | CAS_TX_CONF_TXDMA_EN | 1095 CAS_TX_CONF_RDPP_DIS | CAS_TX_CONF_PICWB_DIS); 1096 1097 /* step 10. ERX Configuration */ 1098 1099 /* 1100 * Encode receive completion and descriptor ring sizes, set the 1101 * swivel offset. 1102 */ 1103 v = cas_rxcompsize(CAS_NRXCOMP) << CAS_RX_CONF_COMP_SHFT; 1104 v |= cas_descsize(CAS_NRXDESC) << CAS_RX_CONF_DESC_SHFT; 1105 if ((sc->sc_flags & CAS_REG_PLUS) != 0) 1106 v |= cas_descsize(CAS_NRXDESC2) << CAS_RX_CONF_DESC2_SHFT; 1107 CAS_WRITE_4(sc, CAS_RX_CONF, 1108 v | (ETHER_ALIGN << CAS_RX_CONF_SOFF_SHFT)); 1109 1110 /* Set the PAUSE thresholds. We use the maximum OFF threshold. */ 1111 CAS_WRITE_4(sc, CAS_RX_PTHRS, 1112 (111 << CAS_RX_PTHRS_XOFF_SHFT) | (15 << CAS_RX_PTHRS_XON_SHFT)); 1113 1114 /* RX blanking */ 1115 CAS_WRITE_4(sc, CAS_RX_BLANK, 1116 (15 << CAS_RX_BLANK_TIME_SHFT) | (5 << CAS_RX_BLANK_PKTS_SHFT)); 1117 1118 /* Set RX_COMP_AFULL threshold to half of the RX completions. */ 1119 CAS_WRITE_4(sc, CAS_RX_AEMPTY_THRS, 1120 (CAS_NRXCOMP / 2) << CAS_RX_AEMPTY_COMP_SHFT); 1121 1122 /* Initialize the RX page size register as appropriate for 8k. */ 1123 CAS_WRITE_4(sc, CAS_RX_PSZ, 1124 (CAS_RX_PSZ_8K << CAS_RX_PSZ_SHFT) | 1125 (4 << CAS_RX_PSZ_MB_CNT_SHFT) | 1126 (CAS_RX_PSZ_MB_STRD_2K << CAS_RX_PSZ_MB_STRD_SHFT) | 1127 (CAS_RX_PSZ_MB_OFF_64 << CAS_RX_PSZ_MB_OFF_SHFT)); 1128 1129 /* Disable RX random early detection. */ 1130 CAS_WRITE_4(sc, CAS_RX_RED, 0); 1131 1132 /* Zero the RX reassembly DMA table. */ 1133 for (v = 0; v <= CAS_RX_REAS_DMA_ADDR_LC; v++) { 1134 CAS_WRITE_4(sc, CAS_RX_REAS_DMA_ADDR, v); 1135 CAS_WRITE_4(sc, CAS_RX_REAS_DMA_DATA_LO, 0); 1136 CAS_WRITE_4(sc, CAS_RX_REAS_DMA_DATA_MD, 0); 1137 CAS_WRITE_4(sc, CAS_RX_REAS_DMA_DATA_HI, 0); 1138 } 1139 1140 /* Ensure the RX control FIFO and RX IPP FIFO addresses are zero. */ 1141 CAS_WRITE_4(sc, CAS_RX_CTRL_FIFO, 0); 1142 CAS_WRITE_4(sc, CAS_RX_IPP_ADDR, 0); 1143 1144 /* Finally, enable RX DMA. */ 1145 CAS_WRITE_4(sc, CAS_RX_CONF, 1146 CAS_READ_4(sc, CAS_RX_CONF) | CAS_RX_CONF_RXDMA_EN); 1147 1148 /* step 11. Configure Media. */ 1149 1150 /* step 12. RX_MAC Configuration Register */ 1151 v = CAS_READ_4(sc, CAS_MAC_RX_CONF); 1152 v &= ~(CAS_MAC_RX_CONF_STRPPAD | CAS_MAC_RX_CONF_EN); 1153 v |= CAS_MAC_RX_CONF_STRPFCS; 1154 sc->sc_mac_rxcfg = v; 1155 /* 1156 * Clear the RX filter and reprogram it. This will also set the 1157 * current RX MAC configuration and enable it. 1158 */ 1159 cas_setladrf(sc); 1160 1161 /* step 13. TX_MAC Configuration Register */ 1162 v = CAS_READ_4(sc, CAS_MAC_TX_CONF); 1163 v |= CAS_MAC_TX_CONF_EN; 1164 (void)cas_disable_tx(sc); 1165 CAS_WRITE_4(sc, CAS_MAC_TX_CONF, v); 1166 1167 /* step 14. Issue Transmit Pending command. */ 1168 1169 /* step 15. Give the receiver a swift kick. */ 1170 CAS_WRITE_4(sc, CAS_RX_KICK, CAS_NRXDESC - 4); 1171 CAS_WRITE_4(sc, CAS_RX_COMP_TAIL, 0); 1172 if ((sc->sc_flags & CAS_REG_PLUS) != 0) 1173 CAS_WRITE_4(sc, CAS_RX_KICK2, CAS_NRXDESC2 - 4); 1174 1175 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1176 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1177 1178 mii_mediachg(sc->sc_mii); 1179 1180 /* Start the one second timer. */ 1181 sc->sc_wdog_timer = 0; 1182 callout_reset(&sc->sc_tick_ch, hz, cas_tick, sc); 1183 } 1184 1185 static int 1186 cas_load_txmbuf(struct cas_softc *sc, struct mbuf **m_head) 1187 { 1188 bus_dma_segment_t txsegs[CAS_NTXSEGS]; 1189 struct cas_txsoft *txs; 1190 struct ip *ip; 1191 struct mbuf *m; 1192 uint64_t cflags; 1193 int error, nexttx, nsegs, offset, seg; 1194 1195 CAS_LOCK_ASSERT(sc, MA_OWNED); 1196 1197 /* Get a work queue entry. */ 1198 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { 1199 /* Ran out of descriptors. */ 1200 return (ENOBUFS); 1201 } 1202 1203 cflags = 0; 1204 if (((*m_head)->m_pkthdr.csum_flags & CAS_CSUM_FEATURES) != 0) { 1205 if (M_WRITABLE(*m_head) == 0) { 1206 m = m_dup(*m_head, M_NOWAIT); 1207 m_freem(*m_head); 1208 *m_head = m; 1209 if (m == NULL) 1210 return (ENOBUFS); 1211 } 1212 offset = sizeof(struct ether_header); 1213 m = m_pullup(*m_head, offset + sizeof(struct ip)); 1214 if (m == NULL) { 1215 *m_head = NULL; 1216 return (ENOBUFS); 1217 } 1218 ip = (struct ip *)(mtod(m, caddr_t) + offset); 1219 offset += (ip->ip_hl << 2); 1220 cflags = (offset << CAS_TD_CKSUM_START_SHFT) | 1221 ((offset + m->m_pkthdr.csum_data) << 1222 CAS_TD_CKSUM_STUFF_SHFT) | CAS_TD_CKSUM_EN; 1223 *m_head = m; 1224 } 1225 1226 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap, 1227 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1228 if (error == EFBIG) { 1229 m = m_collapse(*m_head, M_NOWAIT, CAS_NTXSEGS); 1230 if (m == NULL) { 1231 m_freem(*m_head); 1232 *m_head = NULL; 1233 return (ENOBUFS); 1234 } 1235 *m_head = m; 1236 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, 1237 txs->txs_dmamap, *m_head, txsegs, &nsegs, 1238 BUS_DMA_NOWAIT); 1239 if (error != 0) { 1240 m_freem(*m_head); 1241 *m_head = NULL; 1242 return (error); 1243 } 1244 } else if (error != 0) 1245 return (error); 1246 /* If nsegs is wrong then the stack is corrupt. */ 1247 KASSERT(nsegs <= CAS_NTXSEGS, 1248 ("%s: too many DMA segments (%d)", __func__, nsegs)); 1249 if (nsegs == 0) { 1250 m_freem(*m_head); 1251 *m_head = NULL; 1252 return (EIO); 1253 } 1254 1255 /* 1256 * Ensure we have enough descriptors free to describe 1257 * the packet. Note, we always reserve one descriptor 1258 * at the end of the ring as a termination point, in 1259 * order to prevent wrap-around. 1260 */ 1261 if (nsegs > sc->sc_txfree - 1) { 1262 txs->txs_ndescs = 0; 1263 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1264 return (ENOBUFS); 1265 } 1266 1267 txs->txs_ndescs = nsegs; 1268 txs->txs_firstdesc = sc->sc_txnext; 1269 nexttx = txs->txs_firstdesc; 1270 for (seg = 0; seg < nsegs; seg++, nexttx = CAS_NEXTTX(nexttx)) { 1271 #ifdef CAS_DEBUG 1272 CTR6(KTR_CAS, 1273 "%s: mapping seg %d (txd %d), len %lx, addr %#lx (%#lx)", 1274 __func__, seg, nexttx, txsegs[seg].ds_len, 1275 txsegs[seg].ds_addr, htole64(txsegs[seg].ds_addr)); 1276 #endif 1277 sc->sc_txdescs[nexttx].cd_buf_ptr = 1278 htole64(txsegs[seg].ds_addr); 1279 KASSERT(txsegs[seg].ds_len < 1280 CAS_TD_BUF_LEN_MASK >> CAS_TD_BUF_LEN_SHFT, 1281 ("%s: segment size too large!", __func__)); 1282 sc->sc_txdescs[nexttx].cd_flags = 1283 htole64(txsegs[seg].ds_len << CAS_TD_BUF_LEN_SHFT); 1284 txs->txs_lastdesc = nexttx; 1285 } 1286 1287 /* Set EOF on the last descriptor. */ 1288 #ifdef CAS_DEBUG 1289 CTR3(KTR_CAS, "%s: end of frame at segment %d, TX %d", 1290 __func__, seg, nexttx); 1291 #endif 1292 sc->sc_txdescs[txs->txs_lastdesc].cd_flags |= 1293 htole64(CAS_TD_END_OF_FRAME); 1294 1295 /* Lastly set SOF on the first descriptor. */ 1296 #ifdef CAS_DEBUG 1297 CTR3(KTR_CAS, "%s: start of frame at segment %d, TX %d", 1298 __func__, seg, nexttx); 1299 #endif 1300 if (sc->sc_txwin += nsegs > CAS_MAXTXFREE * 2 / 3) { 1301 sc->sc_txwin = 0; 1302 sc->sc_txdescs[txs->txs_firstdesc].cd_flags |= 1303 htole64(cflags | CAS_TD_START_OF_FRAME | CAS_TD_INT_ME); 1304 } else 1305 sc->sc_txdescs[txs->txs_firstdesc].cd_flags |= 1306 htole64(cflags | CAS_TD_START_OF_FRAME); 1307 1308 /* Sync the DMA map. */ 1309 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1310 BUS_DMASYNC_PREWRITE); 1311 1312 #ifdef CAS_DEBUG 1313 CTR4(KTR_CAS, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d", 1314 __func__, txs->txs_firstdesc, txs->txs_lastdesc, 1315 txs->txs_ndescs); 1316 #endif 1317 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 1318 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 1319 txs->txs_mbuf = *m_head; 1320 1321 sc->sc_txnext = CAS_NEXTTX(txs->txs_lastdesc); 1322 sc->sc_txfree -= txs->txs_ndescs; 1323 1324 return (0); 1325 } 1326 1327 static void 1328 cas_init_regs(struct cas_softc *sc) 1329 { 1330 int i; 1331 const u_char *laddr = IF_LLADDR(sc->sc_ifp); 1332 1333 CAS_LOCK_ASSERT(sc, MA_OWNED); 1334 1335 /* These registers are not cleared on reset. */ 1336 if ((sc->sc_flags & CAS_INITED) == 0) { 1337 /* magic values */ 1338 CAS_WRITE_4(sc, CAS_MAC_IPG0, 0); 1339 CAS_WRITE_4(sc, CAS_MAC_IPG1, 8); 1340 CAS_WRITE_4(sc, CAS_MAC_IPG2, 4); 1341 1342 /* min frame length */ 1343 CAS_WRITE_4(sc, CAS_MAC_MIN_FRAME, ETHER_MIN_LEN); 1344 /* max frame length and max burst size */ 1345 CAS_WRITE_4(sc, CAS_MAC_MAX_BF, 1346 ((ETHER_MAX_LEN_JUMBO + ETHER_VLAN_ENCAP_LEN) << 1347 CAS_MAC_MAX_BF_FRM_SHFT) | 1348 (0x2000 << CAS_MAC_MAX_BF_BST_SHFT)); 1349 1350 /* more magic values */ 1351 CAS_WRITE_4(sc, CAS_MAC_PREAMBLE_LEN, 0x7); 1352 CAS_WRITE_4(sc, CAS_MAC_JAM_SIZE, 0x4); 1353 CAS_WRITE_4(sc, CAS_MAC_ATTEMPT_LIMIT, 0x10); 1354 CAS_WRITE_4(sc, CAS_MAC_CTRL_TYPE, 0x8808); 1355 1356 /* random number seed */ 1357 CAS_WRITE_4(sc, CAS_MAC_RANDOM_SEED, 1358 ((laddr[5] << 8) | laddr[4]) & 0x3ff); 1359 1360 /* secondary MAC addresses: 0:0:0:0:0:0 */ 1361 for (i = CAS_MAC_ADDR3; i <= CAS_MAC_ADDR41; 1362 i += CAS_MAC_ADDR4 - CAS_MAC_ADDR3) 1363 CAS_WRITE_4(sc, i, 0); 1364 1365 /* MAC control address: 01:80:c2:00:00:01 */ 1366 CAS_WRITE_4(sc, CAS_MAC_ADDR42, 0x0001); 1367 CAS_WRITE_4(sc, CAS_MAC_ADDR43, 0xc200); 1368 CAS_WRITE_4(sc, CAS_MAC_ADDR44, 0x0180); 1369 1370 /* MAC filter address: 0:0:0:0:0:0 */ 1371 CAS_WRITE_4(sc, CAS_MAC_AFILTER0, 0); 1372 CAS_WRITE_4(sc, CAS_MAC_AFILTER1, 0); 1373 CAS_WRITE_4(sc, CAS_MAC_AFILTER2, 0); 1374 CAS_WRITE_4(sc, CAS_MAC_AFILTER_MASK1_2, 0); 1375 CAS_WRITE_4(sc, CAS_MAC_AFILTER_MASK0, 0); 1376 1377 /* Zero the hash table. */ 1378 for (i = CAS_MAC_HASH0; i <= CAS_MAC_HASH15; 1379 i += CAS_MAC_HASH1 - CAS_MAC_HASH0) 1380 CAS_WRITE_4(sc, i, 0); 1381 1382 sc->sc_flags |= CAS_INITED; 1383 } 1384 1385 /* Counters need to be zeroed. */ 1386 CAS_WRITE_4(sc, CAS_MAC_NORM_COLL_CNT, 0); 1387 CAS_WRITE_4(sc, CAS_MAC_FIRST_COLL_CNT, 0); 1388 CAS_WRITE_4(sc, CAS_MAC_EXCESS_COLL_CNT, 0); 1389 CAS_WRITE_4(sc, CAS_MAC_LATE_COLL_CNT, 0); 1390 CAS_WRITE_4(sc, CAS_MAC_DEFER_TMR_CNT, 0); 1391 CAS_WRITE_4(sc, CAS_MAC_PEAK_ATTEMPTS, 0); 1392 CAS_WRITE_4(sc, CAS_MAC_RX_FRAME_COUNT, 0); 1393 CAS_WRITE_4(sc, CAS_MAC_RX_LEN_ERR_CNT, 0); 1394 CAS_WRITE_4(sc, CAS_MAC_RX_ALIGN_ERR, 0); 1395 CAS_WRITE_4(sc, CAS_MAC_RX_CRC_ERR_CNT, 0); 1396 CAS_WRITE_4(sc, CAS_MAC_RX_CODE_VIOL, 0); 1397 1398 /* Set XOFF PAUSE time. */ 1399 CAS_WRITE_4(sc, CAS_MAC_SPC, 0x1BF0 << CAS_MAC_SPC_TIME_SHFT); 1400 1401 /* Set the station address. */ 1402 CAS_WRITE_4(sc, CAS_MAC_ADDR0, (laddr[4] << 8) | laddr[5]); 1403 CAS_WRITE_4(sc, CAS_MAC_ADDR1, (laddr[2] << 8) | laddr[3]); 1404 CAS_WRITE_4(sc, CAS_MAC_ADDR2, (laddr[0] << 8) | laddr[1]); 1405 1406 /* Enable MII outputs. */ 1407 CAS_WRITE_4(sc, CAS_MAC_XIF_CONF, CAS_MAC_XIF_CONF_TX_OE); 1408 } 1409 1410 static void 1411 cas_tx_task(void *arg, int pending __unused) 1412 { 1413 struct ifnet *ifp; 1414 1415 ifp = (struct ifnet *)arg; 1416 cas_start(ifp); 1417 } 1418 1419 static inline void 1420 cas_txkick(struct cas_softc *sc) 1421 { 1422 1423 /* 1424 * Update the TX kick register. This register has to point to the 1425 * descriptor after the last valid one and for optimum performance 1426 * should be incremented in multiples of 4 (the DMA engine fetches/ 1427 * updates descriptors in batches of 4). 1428 */ 1429 #ifdef CAS_DEBUG 1430 CTR3(KTR_CAS, "%s: %s: kicking TX %d", 1431 device_get_name(sc->sc_dev), __func__, sc->sc_txnext); 1432 #endif 1433 CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1434 CAS_WRITE_4(sc, CAS_TX_KICK3, sc->sc_txnext); 1435 } 1436 1437 static void 1438 cas_start(struct ifnet *ifp) 1439 { 1440 struct cas_softc *sc = ifp->if_softc; 1441 struct mbuf *m; 1442 int kicked, ntx; 1443 1444 CAS_LOCK(sc); 1445 1446 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1447 IFF_DRV_RUNNING || (sc->sc_flags & CAS_LINK) == 0) { 1448 CAS_UNLOCK(sc); 1449 return; 1450 } 1451 1452 if (sc->sc_txfree < CAS_MAXTXFREE / 4) 1453 cas_tint(sc); 1454 1455 #ifdef CAS_DEBUG 1456 CTR4(KTR_CAS, "%s: %s: txfree %d, txnext %d", 1457 device_get_name(sc->sc_dev), __func__, sc->sc_txfree, 1458 sc->sc_txnext); 1459 #endif 1460 ntx = 0; 1461 kicked = 0; 1462 for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->sc_txfree > 1;) { 1463 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 1464 if (m == NULL) 1465 break; 1466 if (cas_load_txmbuf(sc, &m) != 0) { 1467 if (m == NULL) 1468 break; 1469 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1470 IFQ_DRV_PREPEND(&ifp->if_snd, m); 1471 break; 1472 } 1473 if ((sc->sc_txnext % 4) == 0) { 1474 cas_txkick(sc); 1475 kicked = 1; 1476 } else 1477 kicked = 0; 1478 ntx++; 1479 BPF_MTAP(ifp, m); 1480 } 1481 1482 if (ntx > 0) { 1483 if (kicked == 0) 1484 cas_txkick(sc); 1485 #ifdef CAS_DEBUG 1486 CTR2(KTR_CAS, "%s: packets enqueued, OWN on %d", 1487 device_get_name(sc->sc_dev), sc->sc_txnext); 1488 #endif 1489 1490 /* Set a watchdog timer in case the chip flakes out. */ 1491 sc->sc_wdog_timer = 5; 1492 #ifdef CAS_DEBUG 1493 CTR3(KTR_CAS, "%s: %s: watchdog %d", 1494 device_get_name(sc->sc_dev), __func__, 1495 sc->sc_wdog_timer); 1496 #endif 1497 } 1498 1499 CAS_UNLOCK(sc); 1500 } 1501 1502 static void 1503 cas_tint(struct cas_softc *sc) 1504 { 1505 struct ifnet *ifp = sc->sc_ifp; 1506 struct cas_txsoft *txs; 1507 int progress; 1508 uint32_t txlast; 1509 #ifdef CAS_DEBUG 1510 int i; 1511 1512 CAS_LOCK_ASSERT(sc, MA_OWNED); 1513 1514 CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__); 1515 #endif 1516 1517 /* 1518 * Go through our TX list and free mbufs for those 1519 * frames that have been transmitted. 1520 */ 1521 progress = 0; 1522 CAS_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1523 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1524 #ifdef CAS_DEBUG 1525 if ((ifp->if_flags & IFF_DEBUG) != 0) { 1526 printf(" txsoft %p transmit chain:\n", txs); 1527 for (i = txs->txs_firstdesc;; i = CAS_NEXTTX(i)) { 1528 printf("descriptor %d: ", i); 1529 printf("cd_flags: 0x%016llx\t", 1530 (long long)le64toh( 1531 sc->sc_txdescs[i].cd_flags)); 1532 printf("cd_buf_ptr: 0x%016llx\n", 1533 (long long)le64toh( 1534 sc->sc_txdescs[i].cd_buf_ptr)); 1535 if (i == txs->txs_lastdesc) 1536 break; 1537 } 1538 } 1539 #endif 1540 1541 /* 1542 * In theory, we could harvest some descriptors before 1543 * the ring is empty, but that's a bit complicated. 1544 * 1545 * CAS_TX_COMPn points to the last descriptor 1546 * processed + 1. 1547 */ 1548 txlast = CAS_READ_4(sc, CAS_TX_COMP3); 1549 #ifdef CAS_DEBUG 1550 CTR4(KTR_CAS, "%s: txs->txs_firstdesc = %d, " 1551 "txs->txs_lastdesc = %d, txlast = %d", 1552 __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast); 1553 #endif 1554 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1555 if ((txlast >= txs->txs_firstdesc) && 1556 (txlast <= txs->txs_lastdesc)) 1557 break; 1558 } else { 1559 /* Ick -- this command wraps. */ 1560 if ((txlast >= txs->txs_firstdesc) || 1561 (txlast <= txs->txs_lastdesc)) 1562 break; 1563 } 1564 1565 #ifdef CAS_DEBUG 1566 CTR1(KTR_CAS, "%s: releasing a descriptor", __func__); 1567 #endif 1568 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1569 1570 sc->sc_txfree += txs->txs_ndescs; 1571 1572 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1573 BUS_DMASYNC_POSTWRITE); 1574 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1575 if (txs->txs_mbuf != NULL) { 1576 m_freem(txs->txs_mbuf); 1577 txs->txs_mbuf = NULL; 1578 } 1579 1580 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1581 1582 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1583 progress = 1; 1584 } 1585 1586 #ifdef CAS_DEBUG 1587 CTR5(KTR_CAS, "%s: CAS_TX_SM1 %x CAS_TX_SM2 %x CAS_TX_DESC_BASE %llx " 1588 "CAS_TX_COMP3 %x", 1589 __func__, CAS_READ_4(sc, CAS_TX_SM1), CAS_READ_4(sc, CAS_TX_SM2), 1590 ((long long)CAS_READ_4(sc, CAS_TX_DESC3_BASE_HI) << 32) | 1591 CAS_READ_4(sc, CAS_TX_DESC3_BASE_LO), 1592 CAS_READ_4(sc, CAS_TX_COMP3)); 1593 #endif 1594 1595 if (progress) { 1596 /* We freed some descriptors, so reset IFF_DRV_OACTIVE. */ 1597 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1598 if (STAILQ_EMPTY(&sc->sc_txdirtyq)) 1599 sc->sc_wdog_timer = 0; 1600 } 1601 1602 #ifdef CAS_DEBUG 1603 CTR3(KTR_CAS, "%s: %s: watchdog %d", 1604 device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer); 1605 #endif 1606 } 1607 1608 static void 1609 cas_rint_timeout(void *arg) 1610 { 1611 struct epoch_tracker et; 1612 struct cas_softc *sc = arg; 1613 1614 CAS_LOCK_ASSERT(sc, MA_OWNED); 1615 1616 NET_EPOCH_ENTER(et); 1617 cas_rint(sc); 1618 NET_EPOCH_EXIT(et); 1619 } 1620 1621 static void 1622 cas_rint(struct cas_softc *sc) 1623 { 1624 struct cas_rxdsoft *rxds, *rxds2; 1625 struct ifnet *ifp = sc->sc_ifp; 1626 struct mbuf *m, *m2; 1627 uint64_t word1, word2, word3, word4; 1628 uint32_t rxhead; 1629 u_int idx, idx2, len, off, skip; 1630 1631 CAS_LOCK_ASSERT(sc, MA_OWNED); 1632 1633 callout_stop(&sc->sc_rx_ch); 1634 1635 #ifdef CAS_DEBUG 1636 CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__); 1637 #endif 1638 1639 #define PRINTWORD(n, delimiter) \ 1640 printf("word ## n: 0x%016llx%c", (long long)word ## n, delimiter) 1641 1642 #define SKIPASSERT(n) \ 1643 KASSERT(sc->sc_rxcomps[sc->sc_rxcptr].crc_word ## n == 0, \ 1644 ("%s: word ## n not 0", __func__)) 1645 1646 #define WORDTOH(n) \ 1647 word ## n = le64toh(sc->sc_rxcomps[sc->sc_rxcptr].crc_word ## n) 1648 1649 /* 1650 * Read the completion head register once. This limits 1651 * how long the following loop can execute. 1652 */ 1653 rxhead = CAS_READ_4(sc, CAS_RX_COMP_HEAD); 1654 #ifdef CAS_DEBUG 1655 CTR4(KTR_CAS, "%s: sc->sc_rxcptr %d, sc->sc_rxdptr %d, head %d", 1656 __func__, sc->sc_rxcptr, sc->sc_rxdptr, rxhead); 1657 #endif 1658 skip = 0; 1659 CAS_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1660 for (; sc->sc_rxcptr != rxhead; 1661 sc->sc_rxcptr = CAS_NEXTRXCOMP(sc->sc_rxcptr)) { 1662 if (skip != 0) { 1663 SKIPASSERT(1); 1664 SKIPASSERT(2); 1665 SKIPASSERT(3); 1666 1667 --skip; 1668 goto skip; 1669 } 1670 1671 WORDTOH(1); 1672 WORDTOH(2); 1673 WORDTOH(3); 1674 WORDTOH(4); 1675 1676 #ifdef CAS_DEBUG 1677 if ((ifp->if_flags & IFF_DEBUG) != 0) { 1678 printf(" completion %d: ", sc->sc_rxcptr); 1679 PRINTWORD(1, '\t'); 1680 PRINTWORD(2, '\t'); 1681 PRINTWORD(3, '\t'); 1682 PRINTWORD(4, '\n'); 1683 } 1684 #endif 1685 1686 if (__predict_false( 1687 (word1 & CAS_RC1_TYPE_MASK) == CAS_RC1_TYPE_HW || 1688 (word4 & CAS_RC4_ZERO) != 0)) { 1689 /* 1690 * The descriptor is still marked as owned, although 1691 * it is supposed to have completed. This has been 1692 * observed on some machines. Just exiting here 1693 * might leave the packet sitting around until another 1694 * one arrives to trigger a new interrupt, which is 1695 * generally undesirable, so set up a timeout. 1696 */ 1697 callout_reset(&sc->sc_rx_ch, CAS_RXOWN_TICKS, 1698 cas_rint_timeout, sc); 1699 break; 1700 } 1701 1702 if (__predict_false( 1703 (word4 & (CAS_RC4_BAD | CAS_RC4_LEN_MMATCH)) != 0)) { 1704 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1705 device_printf(sc->sc_dev, 1706 "receive error: CRC error\n"); 1707 continue; 1708 } 1709 1710 KASSERT(CAS_GET(word1, CAS_RC1_DATA_SIZE) == 0 || 1711 CAS_GET(word2, CAS_RC2_HDR_SIZE) == 0, 1712 ("%s: data and header present", __func__)); 1713 KASSERT((word1 & CAS_RC1_SPLIT_PKT) == 0 || 1714 CAS_GET(word2, CAS_RC2_HDR_SIZE) == 0, 1715 ("%s: split and header present", __func__)); 1716 KASSERT(CAS_GET(word1, CAS_RC1_DATA_SIZE) == 0 || 1717 (word1 & CAS_RC1_RELEASE_HDR) == 0, 1718 ("%s: data present but header release", __func__)); 1719 KASSERT(CAS_GET(word2, CAS_RC2_HDR_SIZE) == 0 || 1720 (word1 & CAS_RC1_RELEASE_DATA) == 0, 1721 ("%s: header present but data release", __func__)); 1722 1723 if ((len = CAS_GET(word2, CAS_RC2_HDR_SIZE)) != 0) { 1724 idx = CAS_GET(word2, CAS_RC2_HDR_INDEX); 1725 off = CAS_GET(word2, CAS_RC2_HDR_OFF); 1726 #ifdef CAS_DEBUG 1727 CTR4(KTR_CAS, "%s: hdr at idx %d, off %d, len %d", 1728 __func__, idx, off, len); 1729 #endif 1730 rxds = &sc->sc_rxdsoft[idx]; 1731 MGETHDR(m, M_NOWAIT, MT_DATA); 1732 if (m != NULL) { 1733 refcount_acquire(&rxds->rxds_refcount); 1734 bus_dmamap_sync(sc->sc_rdmatag, 1735 rxds->rxds_dmamap, BUS_DMASYNC_POSTREAD); 1736 m_extadd(m, (char *)rxds->rxds_buf + 1737 off * 256 + ETHER_ALIGN, len, cas_free, 1738 sc, (void *)(uintptr_t)idx, 1739 M_RDONLY, EXT_NET_DRV); 1740 if ((m->m_flags & M_EXT) == 0) { 1741 m_freem(m); 1742 m = NULL; 1743 } 1744 } 1745 if (m != NULL) { 1746 m->m_pkthdr.rcvif = ifp; 1747 m->m_pkthdr.len = m->m_len = len; 1748 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1749 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 1750 cas_rxcksum(m, CAS_GET(word4, 1751 CAS_RC4_TCP_CSUM)); 1752 /* Pass it on. */ 1753 CAS_UNLOCK(sc); 1754 (*ifp->if_input)(ifp, m); 1755 CAS_LOCK(sc); 1756 } else 1757 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1758 1759 if ((word1 & CAS_RC1_RELEASE_HDR) != 0 && 1760 refcount_release(&rxds->rxds_refcount) != 0) 1761 cas_add_rxdesc(sc, idx); 1762 } else if ((len = CAS_GET(word1, CAS_RC1_DATA_SIZE)) != 0) { 1763 idx = CAS_GET(word1, CAS_RC1_DATA_INDEX); 1764 off = CAS_GET(word1, CAS_RC1_DATA_OFF); 1765 #ifdef CAS_DEBUG 1766 CTR4(KTR_CAS, "%s: data at idx %d, off %d, len %d", 1767 __func__, idx, off, len); 1768 #endif 1769 rxds = &sc->sc_rxdsoft[idx]; 1770 MGETHDR(m, M_NOWAIT, MT_DATA); 1771 if (m != NULL) { 1772 refcount_acquire(&rxds->rxds_refcount); 1773 off += ETHER_ALIGN; 1774 m->m_len = min(CAS_PAGE_SIZE - off, len); 1775 bus_dmamap_sync(sc->sc_rdmatag, 1776 rxds->rxds_dmamap, BUS_DMASYNC_POSTREAD); 1777 m_extadd(m, (char *)rxds->rxds_buf + off, 1778 m->m_len, cas_free, sc, 1779 (void *)(uintptr_t)idx, M_RDONLY, 1780 EXT_NET_DRV); 1781 if ((m->m_flags & M_EXT) == 0) { 1782 m_freem(m); 1783 m = NULL; 1784 } 1785 } 1786 idx2 = 0; 1787 m2 = NULL; 1788 rxds2 = NULL; 1789 if ((word1 & CAS_RC1_SPLIT_PKT) != 0) { 1790 KASSERT((word1 & CAS_RC1_RELEASE_NEXT) != 0, 1791 ("%s: split but no release next", 1792 __func__)); 1793 1794 idx2 = CAS_GET(word2, CAS_RC2_NEXT_INDEX); 1795 #ifdef CAS_DEBUG 1796 CTR2(KTR_CAS, "%s: split at idx %d", 1797 __func__, idx2); 1798 #endif 1799 rxds2 = &sc->sc_rxdsoft[idx2]; 1800 if (m != NULL) { 1801 MGET(m2, M_NOWAIT, MT_DATA); 1802 if (m2 != NULL) { 1803 refcount_acquire( 1804 &rxds2->rxds_refcount); 1805 m2->m_len = len - m->m_len; 1806 bus_dmamap_sync( 1807 sc->sc_rdmatag, 1808 rxds2->rxds_dmamap, 1809 BUS_DMASYNC_POSTREAD); 1810 m_extadd(m2, 1811 (char *)rxds2->rxds_buf, 1812 m2->m_len, cas_free, sc, 1813 (void *)(uintptr_t)idx2, 1814 M_RDONLY, EXT_NET_DRV); 1815 if ((m2->m_flags & M_EXT) == 1816 0) { 1817 m_freem(m2); 1818 m2 = NULL; 1819 } 1820 } 1821 } 1822 if (m2 != NULL) 1823 m->m_next = m2; 1824 else if (m != NULL) { 1825 m_freem(m); 1826 m = NULL; 1827 } 1828 } 1829 if (m != NULL) { 1830 m->m_pkthdr.rcvif = ifp; 1831 m->m_pkthdr.len = len; 1832 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1833 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 1834 cas_rxcksum(m, CAS_GET(word4, 1835 CAS_RC4_TCP_CSUM)); 1836 /* Pass it on. */ 1837 CAS_UNLOCK(sc); 1838 (*ifp->if_input)(ifp, m); 1839 CAS_LOCK(sc); 1840 } else 1841 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1842 1843 if ((word1 & CAS_RC1_RELEASE_DATA) != 0 && 1844 refcount_release(&rxds->rxds_refcount) != 0) 1845 cas_add_rxdesc(sc, idx); 1846 if ((word1 & CAS_RC1_SPLIT_PKT) != 0 && 1847 refcount_release(&rxds2->rxds_refcount) != 0) 1848 cas_add_rxdesc(sc, idx2); 1849 } 1850 1851 skip = CAS_GET(word1, CAS_RC1_SKIP); 1852 1853 skip: 1854 cas_rxcompinit(&sc->sc_rxcomps[sc->sc_rxcptr]); 1855 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1856 break; 1857 } 1858 CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1859 CAS_WRITE_4(sc, CAS_RX_COMP_TAIL, sc->sc_rxcptr); 1860 1861 #undef PRINTWORD 1862 #undef SKIPASSERT 1863 #undef WORDTOH 1864 1865 #ifdef CAS_DEBUG 1866 CTR4(KTR_CAS, "%s: done sc->sc_rxcptr %d, sc->sc_rxdptr %d, head %d", 1867 __func__, sc->sc_rxcptr, sc->sc_rxdptr, 1868 CAS_READ_4(sc, CAS_RX_COMP_HEAD)); 1869 #endif 1870 } 1871 1872 static void 1873 cas_free(struct mbuf *m) 1874 { 1875 struct cas_rxdsoft *rxds; 1876 struct cas_softc *sc; 1877 u_int idx, locked; 1878 1879 sc = m->m_ext.ext_arg1; 1880 idx = (uintptr_t)m->m_ext.ext_arg2; 1881 rxds = &sc->sc_rxdsoft[idx]; 1882 if (refcount_release(&rxds->rxds_refcount) == 0) 1883 return; 1884 1885 /* 1886 * NB: this function can be called via m_freem(9) within 1887 * this driver! 1888 */ 1889 if ((locked = CAS_LOCK_OWNED(sc)) == 0) 1890 CAS_LOCK(sc); 1891 cas_add_rxdesc(sc, idx); 1892 if (locked == 0) 1893 CAS_UNLOCK(sc); 1894 } 1895 1896 static inline void 1897 cas_add_rxdesc(struct cas_softc *sc, u_int idx) 1898 { 1899 1900 CAS_LOCK_ASSERT(sc, MA_OWNED); 1901 1902 bus_dmamap_sync(sc->sc_rdmatag, sc->sc_rxdsoft[idx].rxds_dmamap, 1903 BUS_DMASYNC_PREREAD); 1904 CAS_UPDATE_RXDESC(sc, sc->sc_rxdptr, idx); 1905 sc->sc_rxdptr = CAS_NEXTRXDESC(sc->sc_rxdptr); 1906 1907 /* 1908 * Update the RX kick register. This register has to point to the 1909 * descriptor after the last valid one (before the current batch) 1910 * and for optimum performance should be incremented in multiples 1911 * of 4 (the DMA engine fetches/updates descriptors in batches of 4). 1912 */ 1913 if ((sc->sc_rxdptr % 4) == 0) { 1914 CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1915 CAS_WRITE_4(sc, CAS_RX_KICK, 1916 (sc->sc_rxdptr + CAS_NRXDESC - 4) & CAS_NRXDESC_MASK); 1917 } 1918 } 1919 1920 static void 1921 cas_eint(struct cas_softc *sc, u_int status) 1922 { 1923 struct ifnet *ifp = sc->sc_ifp; 1924 1925 CAS_LOCK_ASSERT(sc, MA_OWNED); 1926 1927 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1928 1929 device_printf(sc->sc_dev, "%s: status 0x%x", __func__, status); 1930 if ((status & CAS_INTR_PCI_ERROR_INT) != 0) { 1931 status = CAS_READ_4(sc, CAS_ERROR_STATUS); 1932 printf(", PCI bus error 0x%x", status); 1933 if ((status & CAS_ERROR_OTHER) != 0) { 1934 status = pci_read_config(sc->sc_dev, PCIR_STATUS, 2); 1935 printf(", PCI status 0x%x", status); 1936 pci_write_config(sc->sc_dev, PCIR_STATUS, status, 2); 1937 } 1938 } 1939 printf("\n"); 1940 1941 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1942 cas_init_locked(sc); 1943 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1944 taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task); 1945 } 1946 1947 static int 1948 cas_intr(void *v) 1949 { 1950 struct cas_softc *sc = v; 1951 1952 if (__predict_false((CAS_READ_4(sc, CAS_STATUS_ALIAS) & 1953 CAS_INTR_SUMMARY) == 0)) 1954 return (FILTER_STRAY); 1955 1956 /* Disable interrupts. */ 1957 CAS_WRITE_4(sc, CAS_INTMASK, 0xffffffff); 1958 taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task); 1959 1960 return (FILTER_HANDLED); 1961 } 1962 1963 static void 1964 cas_intr_task(void *arg, int pending __unused) 1965 { 1966 struct cas_softc *sc = arg; 1967 struct ifnet *ifp = sc->sc_ifp; 1968 uint32_t status, status2; 1969 1970 CAS_LOCK_ASSERT(sc, MA_NOTOWNED); 1971 1972 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1973 return; 1974 1975 status = CAS_READ_4(sc, CAS_STATUS); 1976 if (__predict_false((status & CAS_INTR_SUMMARY) == 0)) 1977 goto done; 1978 1979 CAS_LOCK(sc); 1980 #ifdef CAS_DEBUG 1981 CTR4(KTR_CAS, "%s: %s: cplt %x, status %x", 1982 device_get_name(sc->sc_dev), __func__, 1983 (status >> CAS_STATUS_TX_COMP3_SHFT), (u_int)status); 1984 1985 /* 1986 * PCS interrupts must be cleared, otherwise no traffic is passed! 1987 */ 1988 if ((status & CAS_INTR_PCS_INT) != 0) { 1989 status2 = 1990 CAS_READ_4(sc, CAS_PCS_INTR_STATUS) | 1991 CAS_READ_4(sc, CAS_PCS_INTR_STATUS); 1992 if ((status2 & CAS_PCS_INTR_LINK) != 0) 1993 device_printf(sc->sc_dev, 1994 "%s: PCS link status changed\n", __func__); 1995 } 1996 if ((status & CAS_MAC_CTRL_STATUS) != 0) { 1997 status2 = CAS_READ_4(sc, CAS_MAC_CTRL_STATUS); 1998 if ((status2 & CAS_MAC_CTRL_PAUSE) != 0) 1999 device_printf(sc->sc_dev, 2000 "%s: PAUSE received (PAUSE time %d slots)\n", 2001 __func__, 2002 (status2 & CAS_MAC_CTRL_STATUS_PT_MASK) >> 2003 CAS_MAC_CTRL_STATUS_PT_SHFT); 2004 if ((status2 & CAS_MAC_CTRL_PAUSE) != 0) 2005 device_printf(sc->sc_dev, 2006 "%s: transited to PAUSE state\n", __func__); 2007 if ((status2 & CAS_MAC_CTRL_NON_PAUSE) != 0) 2008 device_printf(sc->sc_dev, 2009 "%s: transited to non-PAUSE state\n", __func__); 2010 } 2011 if ((status & CAS_INTR_MIF) != 0) 2012 device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__); 2013 #endif 2014 2015 if (__predict_false((status & 2016 (CAS_INTR_TX_TAG_ERR | CAS_INTR_RX_TAG_ERR | 2017 CAS_INTR_RX_LEN_MMATCH | CAS_INTR_PCI_ERROR_INT)) != 0)) { 2018 cas_eint(sc, status); 2019 CAS_UNLOCK(sc); 2020 return; 2021 } 2022 2023 if (__predict_false(status & CAS_INTR_TX_MAC_INT)) { 2024 status2 = CAS_READ_4(sc, CAS_MAC_TX_STATUS); 2025 if ((status2 & 2026 (CAS_MAC_TX_UNDERRUN | CAS_MAC_TX_MAX_PKT_ERR)) != 0) 2027 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2028 else if ((status2 & ~CAS_MAC_TX_FRAME_XMTD) != 0) 2029 device_printf(sc->sc_dev, 2030 "MAC TX fault, status %x\n", status2); 2031 } 2032 2033 if (__predict_false(status & CAS_INTR_RX_MAC_INT)) { 2034 status2 = CAS_READ_4(sc, CAS_MAC_RX_STATUS); 2035 if ((status2 & CAS_MAC_RX_OVERFLOW) != 0) 2036 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 2037 else if ((status2 & ~CAS_MAC_RX_FRAME_RCVD) != 0) 2038 device_printf(sc->sc_dev, 2039 "MAC RX fault, status %x\n", status2); 2040 } 2041 2042 if ((status & 2043 (CAS_INTR_RX_DONE | CAS_INTR_RX_BUF_NA | CAS_INTR_RX_COMP_FULL | 2044 CAS_INTR_RX_BUF_AEMPTY | CAS_INTR_RX_COMP_AFULL)) != 0) { 2045 cas_rint(sc); 2046 #ifdef CAS_DEBUG 2047 if (__predict_false((status & 2048 (CAS_INTR_RX_BUF_NA | CAS_INTR_RX_COMP_FULL | 2049 CAS_INTR_RX_BUF_AEMPTY | CAS_INTR_RX_COMP_AFULL)) != 0)) 2050 device_printf(sc->sc_dev, 2051 "RX fault, status %x\n", status); 2052 #endif 2053 } 2054 2055 if ((status & 2056 (CAS_INTR_TX_INT_ME | CAS_INTR_TX_ALL | CAS_INTR_TX_DONE)) != 0) 2057 cas_tint(sc); 2058 2059 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2060 CAS_UNLOCK(sc); 2061 return; 2062 } else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2063 taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task); 2064 CAS_UNLOCK(sc); 2065 2066 status = CAS_READ_4(sc, CAS_STATUS_ALIAS); 2067 if (__predict_false((status & CAS_INTR_SUMMARY) != 0)) { 2068 taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task); 2069 return; 2070 } 2071 2072 done: 2073 /* Re-enable interrupts. */ 2074 CAS_WRITE_4(sc, CAS_INTMASK, 2075 ~(CAS_INTR_TX_INT_ME | CAS_INTR_TX_TAG_ERR | 2076 CAS_INTR_RX_DONE | CAS_INTR_RX_BUF_NA | CAS_INTR_RX_TAG_ERR | 2077 CAS_INTR_RX_COMP_FULL | CAS_INTR_RX_BUF_AEMPTY | 2078 CAS_INTR_RX_COMP_AFULL | CAS_INTR_RX_LEN_MMATCH | 2079 CAS_INTR_PCI_ERROR_INT 2080 #ifdef CAS_DEBUG 2081 | CAS_INTR_PCS_INT | CAS_INTR_MIF 2082 #endif 2083 )); 2084 } 2085 2086 static void 2087 cas_watchdog(struct cas_softc *sc) 2088 { 2089 struct ifnet *ifp = sc->sc_ifp; 2090 2091 CAS_LOCK_ASSERT(sc, MA_OWNED); 2092 2093 #ifdef CAS_DEBUG 2094 CTR4(KTR_CAS, 2095 "%s: CAS_RX_CONF %x CAS_MAC_RX_STATUS %x CAS_MAC_RX_CONF %x", 2096 __func__, CAS_READ_4(sc, CAS_RX_CONF), 2097 CAS_READ_4(sc, CAS_MAC_RX_STATUS), 2098 CAS_READ_4(sc, CAS_MAC_RX_CONF)); 2099 CTR4(KTR_CAS, 2100 "%s: CAS_TX_CONF %x CAS_MAC_TX_STATUS %x CAS_MAC_TX_CONF %x", 2101 __func__, CAS_READ_4(sc, CAS_TX_CONF), 2102 CAS_READ_4(sc, CAS_MAC_TX_STATUS), 2103 CAS_READ_4(sc, CAS_MAC_TX_CONF)); 2104 #endif 2105 2106 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) 2107 return; 2108 2109 if ((sc->sc_flags & CAS_LINK) != 0) 2110 device_printf(sc->sc_dev, "device timeout\n"); 2111 else if (bootverbose) 2112 device_printf(sc->sc_dev, "device timeout (no link)\n"); 2113 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2114 2115 /* Try to get more packets going. */ 2116 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2117 cas_init_locked(sc); 2118 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2119 taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task); 2120 } 2121 2122 static void 2123 cas_mifinit(struct cas_softc *sc) 2124 { 2125 2126 /* Configure the MIF in frame mode. */ 2127 CAS_WRITE_4(sc, CAS_MIF_CONF, 2128 CAS_READ_4(sc, CAS_MIF_CONF) & ~CAS_MIF_CONF_BB_MODE); 2129 CAS_BARRIER(sc, CAS_MIF_CONF, 4, 2130 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2131 } 2132 2133 /* 2134 * MII interface 2135 * 2136 * The MII interface supports at least three different operating modes: 2137 * 2138 * Bitbang mode is implemented using data, clock and output enable registers. 2139 * 2140 * Frame mode is implemented by loading a complete frame into the frame 2141 * register and polling the valid bit for completion. 2142 * 2143 * Polling mode uses the frame register but completion is indicated by 2144 * an interrupt. 2145 * 2146 */ 2147 static int 2148 cas_mii_readreg(device_t dev, int phy, int reg) 2149 { 2150 struct cas_softc *sc; 2151 int n; 2152 uint32_t v; 2153 2154 #ifdef CAS_DEBUG_PHY 2155 printf("%s: phy %d reg %d\n", __func__, phy, reg); 2156 #endif 2157 2158 sc = device_get_softc(dev); 2159 if ((sc->sc_flags & CAS_SERDES) != 0) { 2160 switch (reg) { 2161 case MII_BMCR: 2162 reg = CAS_PCS_CTRL; 2163 break; 2164 case MII_BMSR: 2165 reg = CAS_PCS_STATUS; 2166 break; 2167 case MII_PHYIDR1: 2168 case MII_PHYIDR2: 2169 return (0); 2170 case MII_ANAR: 2171 reg = CAS_PCS_ANAR; 2172 break; 2173 case MII_ANLPAR: 2174 reg = CAS_PCS_ANLPAR; 2175 break; 2176 case MII_EXTSR: 2177 return (EXTSR_1000XFDX | EXTSR_1000XHDX); 2178 default: 2179 device_printf(sc->sc_dev, 2180 "%s: unhandled register %d\n", __func__, reg); 2181 return (0); 2182 } 2183 return (CAS_READ_4(sc, reg)); 2184 } 2185 2186 /* Construct the frame command. */ 2187 v = CAS_MIF_FRAME_READ | 2188 (phy << CAS_MIF_FRAME_PHY_SHFT) | 2189 (reg << CAS_MIF_FRAME_REG_SHFT); 2190 2191 CAS_WRITE_4(sc, CAS_MIF_FRAME, v); 2192 CAS_BARRIER(sc, CAS_MIF_FRAME, 4, 2193 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2194 for (n = 0; n < 100; n++) { 2195 DELAY(1); 2196 v = CAS_READ_4(sc, CAS_MIF_FRAME); 2197 if (v & CAS_MIF_FRAME_TA_LSB) 2198 return (v & CAS_MIF_FRAME_DATA); 2199 } 2200 2201 device_printf(sc->sc_dev, "%s: timed out\n", __func__); 2202 return (0); 2203 } 2204 2205 static int 2206 cas_mii_writereg(device_t dev, int phy, int reg, int val) 2207 { 2208 struct cas_softc *sc; 2209 int n; 2210 uint32_t v; 2211 2212 #ifdef CAS_DEBUG_PHY 2213 printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__); 2214 #endif 2215 2216 sc = device_get_softc(dev); 2217 if ((sc->sc_flags & CAS_SERDES) != 0) { 2218 switch (reg) { 2219 case MII_BMSR: 2220 reg = CAS_PCS_STATUS; 2221 break; 2222 case MII_BMCR: 2223 reg = CAS_PCS_CTRL; 2224 if ((val & CAS_PCS_CTRL_RESET) == 0) 2225 break; 2226 CAS_WRITE_4(sc, CAS_PCS_CTRL, val); 2227 CAS_BARRIER(sc, CAS_PCS_CTRL, 4, 2228 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2229 if (!cas_bitwait(sc, CAS_PCS_CTRL, 2230 CAS_PCS_CTRL_RESET, 0)) 2231 device_printf(sc->sc_dev, 2232 "cannot reset PCS\n"); 2233 /* FALLTHROUGH */ 2234 case MII_ANAR: 2235 CAS_WRITE_4(sc, CAS_PCS_CONF, 0); 2236 CAS_BARRIER(sc, CAS_PCS_CONF, 4, 2237 BUS_SPACE_BARRIER_WRITE); 2238 CAS_WRITE_4(sc, CAS_PCS_ANAR, val); 2239 CAS_BARRIER(sc, CAS_PCS_ANAR, 4, 2240 BUS_SPACE_BARRIER_WRITE); 2241 CAS_WRITE_4(sc, CAS_PCS_SERDES_CTRL, 2242 CAS_PCS_SERDES_CTRL_ESD); 2243 CAS_BARRIER(sc, CAS_PCS_CONF, 4, 2244 BUS_SPACE_BARRIER_WRITE); 2245 CAS_WRITE_4(sc, CAS_PCS_CONF, 2246 CAS_PCS_CONF_EN); 2247 CAS_BARRIER(sc, CAS_PCS_CONF, 4, 2248 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2249 return (0); 2250 case MII_ANLPAR: 2251 reg = CAS_PCS_ANLPAR; 2252 break; 2253 default: 2254 device_printf(sc->sc_dev, 2255 "%s: unhandled register %d\n", __func__, reg); 2256 return (0); 2257 } 2258 CAS_WRITE_4(sc, reg, val); 2259 CAS_BARRIER(sc, reg, 4, 2260 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2261 return (0); 2262 } 2263 2264 /* Construct the frame command. */ 2265 v = CAS_MIF_FRAME_WRITE | 2266 (phy << CAS_MIF_FRAME_PHY_SHFT) | 2267 (reg << CAS_MIF_FRAME_REG_SHFT) | 2268 (val & CAS_MIF_FRAME_DATA); 2269 2270 CAS_WRITE_4(sc, CAS_MIF_FRAME, v); 2271 CAS_BARRIER(sc, CAS_MIF_FRAME, 4, 2272 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2273 for (n = 0; n < 100; n++) { 2274 DELAY(1); 2275 v = CAS_READ_4(sc, CAS_MIF_FRAME); 2276 if (v & CAS_MIF_FRAME_TA_LSB) 2277 return (1); 2278 } 2279 2280 device_printf(sc->sc_dev, "%s: timed out\n", __func__); 2281 return (0); 2282 } 2283 2284 static void 2285 cas_mii_statchg(device_t dev) 2286 { 2287 struct cas_softc *sc; 2288 struct ifnet *ifp; 2289 int gigabit; 2290 uint32_t rxcfg, txcfg, v; 2291 2292 sc = device_get_softc(dev); 2293 ifp = sc->sc_ifp; 2294 2295 CAS_LOCK_ASSERT(sc, MA_OWNED); 2296 2297 #ifdef CAS_DEBUG 2298 if ((ifp->if_flags & IFF_DEBUG) != 0) 2299 device_printf(sc->sc_dev, "%s: status changen", __func__); 2300 #endif 2301 2302 if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 && 2303 IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE) 2304 sc->sc_flags |= CAS_LINK; 2305 else 2306 sc->sc_flags &= ~CAS_LINK; 2307 2308 switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) { 2309 case IFM_1000_SX: 2310 case IFM_1000_LX: 2311 case IFM_1000_CX: 2312 case IFM_1000_T: 2313 gigabit = 1; 2314 break; 2315 default: 2316 gigabit = 0; 2317 } 2318 2319 /* 2320 * The configuration done here corresponds to the steps F) and 2321 * G) and as far as enabling of RX and TX MAC goes also step H) 2322 * of the initialization sequence outlined in section 11.2.1 of 2323 * the Cassini+ ASIC Specification. 2324 */ 2325 2326 rxcfg = sc->sc_mac_rxcfg; 2327 rxcfg &= ~CAS_MAC_RX_CONF_CARR; 2328 txcfg = CAS_MAC_TX_CONF_EN_IPG0 | CAS_MAC_TX_CONF_NGU | 2329 CAS_MAC_TX_CONF_NGUL; 2330 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 2331 txcfg |= CAS_MAC_TX_CONF_ICARR | CAS_MAC_TX_CONF_ICOLLIS; 2332 else if (gigabit != 0) { 2333 rxcfg |= CAS_MAC_RX_CONF_CARR; 2334 txcfg |= CAS_MAC_TX_CONF_CARR; 2335 } 2336 (void)cas_disable_tx(sc); 2337 CAS_WRITE_4(sc, CAS_MAC_TX_CONF, txcfg); 2338 (void)cas_disable_rx(sc); 2339 CAS_WRITE_4(sc, CAS_MAC_RX_CONF, rxcfg); 2340 2341 v = CAS_READ_4(sc, CAS_MAC_CTRL_CONF) & 2342 ~(CAS_MAC_CTRL_CONF_TXP | CAS_MAC_CTRL_CONF_RXP); 2343 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 2344 IFM_ETH_RXPAUSE) != 0) 2345 v |= CAS_MAC_CTRL_CONF_RXP; 2346 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 2347 IFM_ETH_TXPAUSE) != 0) 2348 v |= CAS_MAC_CTRL_CONF_TXP; 2349 CAS_WRITE_4(sc, CAS_MAC_CTRL_CONF, v); 2350 2351 /* 2352 * All supported chips have a bug causing incorrect checksum 2353 * to be calculated when letting them strip the FCS in half- 2354 * duplex mode. In theory we could disable FCS stripping and 2355 * manually adjust the checksum accordingly. It seems to make 2356 * more sense to optimze for the common case and just disable 2357 * hardware checksumming in half-duplex mode though. 2358 */ 2359 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0) { 2360 ifp->if_capenable &= ~IFCAP_HWCSUM; 2361 ifp->if_hwassist = 0; 2362 } else if ((sc->sc_flags & CAS_NO_CSUM) == 0) { 2363 ifp->if_capenable = ifp->if_capabilities; 2364 ifp->if_hwassist = CAS_CSUM_FEATURES; 2365 } 2366 2367 if (sc->sc_variant == CAS_SATURN) { 2368 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0) 2369 /* silicon bug workaround */ 2370 CAS_WRITE_4(sc, CAS_MAC_PREAMBLE_LEN, 0x41); 2371 else 2372 CAS_WRITE_4(sc, CAS_MAC_PREAMBLE_LEN, 0x7); 2373 } 2374 2375 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 && 2376 gigabit != 0) 2377 CAS_WRITE_4(sc, CAS_MAC_SLOT_TIME, 2378 CAS_MAC_SLOT_TIME_CARR); 2379 else 2380 CAS_WRITE_4(sc, CAS_MAC_SLOT_TIME, 2381 CAS_MAC_SLOT_TIME_NORM); 2382 2383 /* XIF Configuration */ 2384 v = CAS_MAC_XIF_CONF_TX_OE | CAS_MAC_XIF_CONF_LNKLED; 2385 if ((sc->sc_flags & CAS_SERDES) == 0) { 2386 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0) 2387 v |= CAS_MAC_XIF_CONF_NOECHO; 2388 v |= CAS_MAC_XIF_CONF_BUF_OE; 2389 } 2390 if (gigabit != 0) 2391 v |= CAS_MAC_XIF_CONF_GMII; 2392 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 2393 v |= CAS_MAC_XIF_CONF_FDXLED; 2394 CAS_WRITE_4(sc, CAS_MAC_XIF_CONF, v); 2395 2396 sc->sc_mac_rxcfg = rxcfg; 2397 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 2398 (sc->sc_flags & CAS_LINK) != 0) { 2399 CAS_WRITE_4(sc, CAS_MAC_TX_CONF, 2400 txcfg | CAS_MAC_TX_CONF_EN); 2401 CAS_WRITE_4(sc, CAS_MAC_RX_CONF, 2402 rxcfg | CAS_MAC_RX_CONF_EN); 2403 } 2404 } 2405 2406 static int 2407 cas_mediachange(struct ifnet *ifp) 2408 { 2409 struct cas_softc *sc = ifp->if_softc; 2410 int error; 2411 2412 /* XXX add support for serial media. */ 2413 2414 CAS_LOCK(sc); 2415 error = mii_mediachg(sc->sc_mii); 2416 CAS_UNLOCK(sc); 2417 return (error); 2418 } 2419 2420 static void 2421 cas_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 2422 { 2423 struct cas_softc *sc = ifp->if_softc; 2424 2425 CAS_LOCK(sc); 2426 if ((ifp->if_flags & IFF_UP) == 0) { 2427 CAS_UNLOCK(sc); 2428 return; 2429 } 2430 2431 mii_pollstat(sc->sc_mii); 2432 ifmr->ifm_active = sc->sc_mii->mii_media_active; 2433 ifmr->ifm_status = sc->sc_mii->mii_media_status; 2434 CAS_UNLOCK(sc); 2435 } 2436 2437 static int 2438 cas_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2439 { 2440 struct cas_softc *sc = ifp->if_softc; 2441 struct ifreq *ifr = (struct ifreq *)data; 2442 int error; 2443 2444 error = 0; 2445 switch (cmd) { 2446 case SIOCSIFFLAGS: 2447 CAS_LOCK(sc); 2448 if ((ifp->if_flags & IFF_UP) != 0) { 2449 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 2450 ((ifp->if_flags ^ sc->sc_ifflags) & 2451 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 2452 cas_setladrf(sc); 2453 else 2454 cas_init_locked(sc); 2455 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2456 cas_stop(ifp); 2457 sc->sc_ifflags = ifp->if_flags; 2458 CAS_UNLOCK(sc); 2459 break; 2460 case SIOCSIFCAP: 2461 CAS_LOCK(sc); 2462 if ((sc->sc_flags & CAS_NO_CSUM) != 0) { 2463 error = EINVAL; 2464 CAS_UNLOCK(sc); 2465 break; 2466 } 2467 ifp->if_capenable = ifr->ifr_reqcap; 2468 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2469 ifp->if_hwassist = CAS_CSUM_FEATURES; 2470 else 2471 ifp->if_hwassist = 0; 2472 CAS_UNLOCK(sc); 2473 break; 2474 case SIOCADDMULTI: 2475 case SIOCDELMULTI: 2476 CAS_LOCK(sc); 2477 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2478 cas_setladrf(sc); 2479 CAS_UNLOCK(sc); 2480 break; 2481 case SIOCSIFMTU: 2482 if ((ifr->ifr_mtu < ETHERMIN) || 2483 (ifr->ifr_mtu > ETHERMTU_JUMBO)) 2484 error = EINVAL; 2485 else 2486 ifp->if_mtu = ifr->ifr_mtu; 2487 break; 2488 case SIOCGIFMEDIA: 2489 case SIOCSIFMEDIA: 2490 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 2491 break; 2492 default: 2493 error = ether_ioctl(ifp, cmd, data); 2494 break; 2495 } 2496 2497 return (error); 2498 } 2499 2500 static u_int 2501 cas_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 2502 { 2503 uint32_t crc, *hash = arg; 2504 2505 crc = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN); 2506 /* We just want the 8 most significant bits. */ 2507 crc >>= 24; 2508 /* Set the corresponding bit in the filter. */ 2509 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 2510 2511 return (1); 2512 } 2513 2514 static void 2515 cas_setladrf(struct cas_softc *sc) 2516 { 2517 struct ifnet *ifp = sc->sc_ifp; 2518 int i; 2519 uint32_t hash[16]; 2520 uint32_t v; 2521 2522 CAS_LOCK_ASSERT(sc, MA_OWNED); 2523 2524 /* 2525 * Turn off the RX MAC and the hash filter as required by the Sun 2526 * Cassini programming restrictions. 2527 */ 2528 v = sc->sc_mac_rxcfg & ~(CAS_MAC_RX_CONF_HFILTER | 2529 CAS_MAC_RX_CONF_EN); 2530 CAS_WRITE_4(sc, CAS_MAC_RX_CONF, v); 2531 CAS_BARRIER(sc, CAS_MAC_RX_CONF, 4, 2532 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2533 if (!cas_bitwait(sc, CAS_MAC_RX_CONF, CAS_MAC_RX_CONF_HFILTER | 2534 CAS_MAC_RX_CONF_EN, 0)) 2535 device_printf(sc->sc_dev, 2536 "cannot disable RX MAC or hash filter\n"); 2537 2538 v &= ~(CAS_MAC_RX_CONF_PROMISC | CAS_MAC_RX_CONF_PGRP); 2539 if ((ifp->if_flags & IFF_PROMISC) != 0) { 2540 v |= CAS_MAC_RX_CONF_PROMISC; 2541 goto chipit; 2542 } 2543 if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 2544 v |= CAS_MAC_RX_CONF_PGRP; 2545 goto chipit; 2546 } 2547 2548 /* 2549 * Set up multicast address filter by passing all multicast 2550 * addresses through a crc generator, and then using the high 2551 * order 8 bits as an index into the 256 bit logical address 2552 * filter. The high order 4 bits selects the word, while the 2553 * other 4 bits select the bit within the word (where bit 0 2554 * is the MSB). 2555 */ 2556 2557 memset(hash, 0, sizeof(hash)); 2558 if_foreach_llmaddr(ifp, cas_hash_maddr, &hash); 2559 2560 v |= CAS_MAC_RX_CONF_HFILTER; 2561 2562 /* Now load the hash table into the chip (if we are using it). */ 2563 for (i = 0; i < 16; i++) 2564 CAS_WRITE_4(sc, 2565 CAS_MAC_HASH0 + i * (CAS_MAC_HASH1 - CAS_MAC_HASH0), 2566 hash[i]); 2567 2568 chipit: 2569 sc->sc_mac_rxcfg = v; 2570 CAS_WRITE_4(sc, CAS_MAC_RX_CONF, v | CAS_MAC_RX_CONF_EN); 2571 } 2572 2573 static int cas_pci_attach(device_t dev); 2574 static int cas_pci_detach(device_t dev); 2575 static int cas_pci_probe(device_t dev); 2576 static int cas_pci_resume(device_t dev); 2577 static int cas_pci_suspend(device_t dev); 2578 2579 static device_method_t cas_pci_methods[] = { 2580 /* Device interface */ 2581 DEVMETHOD(device_probe, cas_pci_probe), 2582 DEVMETHOD(device_attach, cas_pci_attach), 2583 DEVMETHOD(device_detach, cas_pci_detach), 2584 DEVMETHOD(device_suspend, cas_pci_suspend), 2585 DEVMETHOD(device_resume, cas_pci_resume), 2586 /* Use the suspend handler here, it is all that is required. */ 2587 DEVMETHOD(device_shutdown, cas_pci_suspend), 2588 2589 /* MII interface */ 2590 DEVMETHOD(miibus_readreg, cas_mii_readreg), 2591 DEVMETHOD(miibus_writereg, cas_mii_writereg), 2592 DEVMETHOD(miibus_statchg, cas_mii_statchg), 2593 2594 DEVMETHOD_END 2595 }; 2596 2597 static driver_t cas_pci_driver = { 2598 "cas", 2599 cas_pci_methods, 2600 sizeof(struct cas_softc) 2601 }; 2602 2603 static const struct cas_pci_dev { 2604 uint32_t cpd_devid; 2605 uint8_t cpd_revid; 2606 int cpd_variant; 2607 const char *cpd_desc; 2608 } cas_pci_devlist[] = { 2609 { 0x0035100b, 0x0, CAS_SATURN, "NS DP83065 Saturn Gigabit Ethernet" }, 2610 { 0xabba108e, 0x10, CAS_CASPLUS, "Sun Cassini+ Gigabit Ethernet" }, 2611 { 0xabba108e, 0x0, CAS_CAS, "Sun Cassini Gigabit Ethernet" }, 2612 { 0, 0, 0, NULL } 2613 }; 2614 2615 DRIVER_MODULE(cas, pci, cas_pci_driver, cas_devclass, 0, 0); 2616 MODULE_PNP_INFO("W32:vendor/device", pci, cas, cas_pci_devlist, 2617 nitems(cas_pci_devlist) - 1); 2618 DRIVER_MODULE(miibus, cas, miibus_driver, miibus_devclass, 0, 0); 2619 MODULE_DEPEND(cas, pci, 1, 1, 1); 2620 2621 static int 2622 cas_pci_probe(device_t dev) 2623 { 2624 int i; 2625 2626 for (i = 0; cas_pci_devlist[i].cpd_desc != NULL; i++) { 2627 if (pci_get_devid(dev) == cas_pci_devlist[i].cpd_devid && 2628 pci_get_revid(dev) >= cas_pci_devlist[i].cpd_revid) { 2629 device_set_desc(dev, cas_pci_devlist[i].cpd_desc); 2630 return (BUS_PROBE_DEFAULT); 2631 } 2632 } 2633 2634 return (ENXIO); 2635 } 2636 2637 static struct resource_spec cas_pci_res_spec[] = { 2638 { SYS_RES_IRQ, 0, RF_SHAREABLE | RF_ACTIVE }, /* CAS_RES_INTR */ 2639 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, /* CAS_RES_MEM */ 2640 { -1, 0 } 2641 }; 2642 2643 #define CAS_LOCAL_MAC_ADDRESS "local-mac-address" 2644 #define CAS_PHY_INTERFACE "phy-interface" 2645 #define CAS_PHY_TYPE "phy-type" 2646 #define CAS_PHY_TYPE_PCS "pcs" 2647 2648 static int 2649 cas_pci_attach(device_t dev) 2650 { 2651 char buf[sizeof(CAS_LOCAL_MAC_ADDRESS)]; 2652 struct cas_softc *sc; 2653 int i; 2654 #if !defined(__powerpc__) 2655 u_char enaddr[4][ETHER_ADDR_LEN]; 2656 u_int j, k, lma, pcs[4], phy; 2657 #endif 2658 2659 sc = device_get_softc(dev); 2660 sc->sc_variant = CAS_UNKNOWN; 2661 for (i = 0; cas_pci_devlist[i].cpd_desc != NULL; i++) { 2662 if (pci_get_devid(dev) == cas_pci_devlist[i].cpd_devid && 2663 pci_get_revid(dev) >= cas_pci_devlist[i].cpd_revid) { 2664 sc->sc_variant = cas_pci_devlist[i].cpd_variant; 2665 break; 2666 } 2667 } 2668 if (sc->sc_variant == CAS_UNKNOWN) { 2669 device_printf(dev, "unknown adaptor\n"); 2670 return (ENXIO); 2671 } 2672 2673 /* PCI configuration */ 2674 pci_write_config(dev, PCIR_COMMAND, 2675 pci_read_config(dev, PCIR_COMMAND, 2) | PCIM_CMD_BUSMASTEREN | 2676 PCIM_CMD_MWRICEN | PCIM_CMD_PERRESPEN | PCIM_CMD_SERRESPEN, 2); 2677 2678 sc->sc_dev = dev; 2679 if (sc->sc_variant == CAS_CAS && pci_get_devid(dev) < 0x02) 2680 /* Hardware checksumming may hang TX. */ 2681 sc->sc_flags |= CAS_NO_CSUM; 2682 if (sc->sc_variant == CAS_CASPLUS || sc->sc_variant == CAS_SATURN) 2683 sc->sc_flags |= CAS_REG_PLUS; 2684 if (sc->sc_variant == CAS_CAS || 2685 (sc->sc_variant == CAS_CASPLUS && pci_get_revid(dev) < 0x11)) 2686 sc->sc_flags |= CAS_TABORT; 2687 if (bootverbose) 2688 device_printf(dev, "flags=0x%x\n", sc->sc_flags); 2689 2690 if (bus_alloc_resources(dev, cas_pci_res_spec, sc->sc_res)) { 2691 device_printf(dev, "failed to allocate resources\n"); 2692 bus_release_resources(dev, cas_pci_res_spec, sc->sc_res); 2693 return (ENXIO); 2694 } 2695 2696 CAS_LOCK_INIT(sc, device_get_nameunit(dev)); 2697 2698 #if defined(__powerpc__) 2699 OF_getetheraddr(dev, sc->sc_enaddr); 2700 if (OF_getprop(ofw_bus_get_node(dev), CAS_PHY_INTERFACE, buf, 2701 sizeof(buf)) > 0 || OF_getprop(ofw_bus_get_node(dev), 2702 CAS_PHY_TYPE, buf, sizeof(buf)) > 0) { 2703 buf[sizeof(buf) - 1] = '\0'; 2704 if (strcmp(buf, CAS_PHY_TYPE_PCS) == 0) 2705 sc->sc_flags |= CAS_SERDES; 2706 } 2707 #else 2708 /* 2709 * Dig out VPD (vital product data) and read the MAC address as well 2710 * as the PHY type. The VPD resides in the PCI Expansion ROM (PCI 2711 * FCode) and can't be accessed via the PCI capability pointer. 2712 * SUNW,pci-ce and SUNW,pci-qge use the Enhanced VPD format described 2713 * in the free US Patent 7149820. 2714 */ 2715 2716 #define PCI_ROMHDR_SIZE 0x1c 2717 #define PCI_ROMHDR_SIG 0x00 2718 #define PCI_ROMHDR_SIG_MAGIC 0xaa55 /* little endian */ 2719 #define PCI_ROMHDR_PTR_DATA 0x18 2720 #define PCI_ROM_SIZE 0x18 2721 #define PCI_ROM_SIG 0x00 2722 #define PCI_ROM_SIG_MAGIC 0x52494350 /* "PCIR", endian */ 2723 /* reversed */ 2724 #define PCI_ROM_VENDOR 0x04 2725 #define PCI_ROM_DEVICE 0x06 2726 #define PCI_ROM_PTR_VPD 0x08 2727 #define PCI_VPDRES_BYTE0 0x00 2728 #define PCI_VPDRES_ISLARGE(x) ((x) & 0x80) 2729 #define PCI_VPDRES_LARGE_NAME(x) ((x) & 0x7f) 2730 #define PCI_VPDRES_LARGE_LEN_LSB 0x01 2731 #define PCI_VPDRES_LARGE_LEN_MSB 0x02 2732 #define PCI_VPDRES_LARGE_SIZE 0x03 2733 #define PCI_VPDRES_TYPE_ID_STRING 0x02 /* large */ 2734 #define PCI_VPDRES_TYPE_VPD 0x10 /* large */ 2735 #define PCI_VPD_KEY0 0x00 2736 #define PCI_VPD_KEY1 0x01 2737 #define PCI_VPD_LEN 0x02 2738 #define PCI_VPD_SIZE 0x03 2739 2740 #define CAS_ROM_READ_1(sc, offs) \ 2741 CAS_READ_1((sc), CAS_PCI_ROM_OFFSET + (offs)) 2742 #define CAS_ROM_READ_2(sc, offs) \ 2743 CAS_READ_2((sc), CAS_PCI_ROM_OFFSET + (offs)) 2744 #define CAS_ROM_READ_4(sc, offs) \ 2745 CAS_READ_4((sc), CAS_PCI_ROM_OFFSET + (offs)) 2746 2747 lma = phy = 0; 2748 memset(enaddr, 0, sizeof(enaddr)); 2749 memset(pcs, 0, sizeof(pcs)); 2750 2751 /* Enable PCI Expansion ROM access. */ 2752 CAS_WRITE_4(sc, CAS_BIM_LDEV_OEN, 2753 CAS_BIM_LDEV_OEN_PAD | CAS_BIM_LDEV_OEN_PROM); 2754 2755 /* Read PCI Expansion ROM header. */ 2756 if (CAS_ROM_READ_2(sc, PCI_ROMHDR_SIG) != PCI_ROMHDR_SIG_MAGIC || 2757 (i = CAS_ROM_READ_2(sc, PCI_ROMHDR_PTR_DATA)) < 2758 PCI_ROMHDR_SIZE) { 2759 device_printf(dev, "unexpected PCI Expansion ROM header\n"); 2760 goto fail_prom; 2761 } 2762 2763 /* Read PCI Expansion ROM data. */ 2764 if (CAS_ROM_READ_4(sc, i + PCI_ROM_SIG) != PCI_ROM_SIG_MAGIC || 2765 CAS_ROM_READ_2(sc, i + PCI_ROM_VENDOR) != pci_get_vendor(dev) || 2766 CAS_ROM_READ_2(sc, i + PCI_ROM_DEVICE) != pci_get_device(dev) || 2767 (j = CAS_ROM_READ_2(sc, i + PCI_ROM_PTR_VPD)) < 2768 i + PCI_ROM_SIZE) { 2769 device_printf(dev, "unexpected PCI Expansion ROM data\n"); 2770 goto fail_prom; 2771 } 2772 2773 /* Read PCI VPD. */ 2774 next: 2775 if (PCI_VPDRES_ISLARGE(CAS_ROM_READ_1(sc, 2776 j + PCI_VPDRES_BYTE0)) == 0) { 2777 device_printf(dev, "no large PCI VPD\n"); 2778 goto fail_prom; 2779 } 2780 2781 i = (CAS_ROM_READ_1(sc, j + PCI_VPDRES_LARGE_LEN_MSB) << 8) | 2782 CAS_ROM_READ_1(sc, j + PCI_VPDRES_LARGE_LEN_LSB); 2783 switch (PCI_VPDRES_LARGE_NAME(CAS_ROM_READ_1(sc, 2784 j + PCI_VPDRES_BYTE0))) { 2785 case PCI_VPDRES_TYPE_ID_STRING: 2786 /* Skip identifier string. */ 2787 j += PCI_VPDRES_LARGE_SIZE + i; 2788 goto next; 2789 case PCI_VPDRES_TYPE_VPD: 2790 for (j += PCI_VPDRES_LARGE_SIZE; i > 0; 2791 i -= PCI_VPD_SIZE + CAS_ROM_READ_1(sc, j + PCI_VPD_LEN), 2792 j += PCI_VPD_SIZE + CAS_ROM_READ_1(sc, j + PCI_VPD_LEN)) { 2793 if (CAS_ROM_READ_1(sc, j + PCI_VPD_KEY0) != 'Z') 2794 /* no Enhanced VPD */ 2795 continue; 2796 if (CAS_ROM_READ_1(sc, j + PCI_VPD_SIZE) != 'I') 2797 /* no instance property */ 2798 continue; 2799 if (CAS_ROM_READ_1(sc, j + PCI_VPD_SIZE + 3) == 'B') { 2800 /* byte array */ 2801 if (CAS_ROM_READ_1(sc, 2802 j + PCI_VPD_SIZE + 4) != ETHER_ADDR_LEN) 2803 continue; 2804 bus_read_region_1(sc->sc_res[CAS_RES_MEM], 2805 CAS_PCI_ROM_OFFSET + j + PCI_VPD_SIZE + 5, 2806 buf, sizeof(buf)); 2807 buf[sizeof(buf) - 1] = '\0'; 2808 if (strcmp(buf, CAS_LOCAL_MAC_ADDRESS) != 0) 2809 continue; 2810 bus_read_region_1(sc->sc_res[CAS_RES_MEM], 2811 CAS_PCI_ROM_OFFSET + j + PCI_VPD_SIZE + 2812 5 + sizeof(CAS_LOCAL_MAC_ADDRESS), 2813 enaddr[lma], sizeof(enaddr[lma])); 2814 lma++; 2815 if (lma == 4 && phy == 4) 2816 break; 2817 } else if (CAS_ROM_READ_1(sc, j + PCI_VPD_SIZE + 3) == 2818 'S') { 2819 /* string */ 2820 if (CAS_ROM_READ_1(sc, 2821 j + PCI_VPD_SIZE + 4) != 2822 sizeof(CAS_PHY_TYPE_PCS)) 2823 continue; 2824 bus_read_region_1(sc->sc_res[CAS_RES_MEM], 2825 CAS_PCI_ROM_OFFSET + j + PCI_VPD_SIZE + 5, 2826 buf, sizeof(buf)); 2827 buf[sizeof(buf) - 1] = '\0'; 2828 if (strcmp(buf, CAS_PHY_INTERFACE) == 0) 2829 k = sizeof(CAS_PHY_INTERFACE); 2830 else if (strcmp(buf, CAS_PHY_TYPE) == 0) 2831 k = sizeof(CAS_PHY_TYPE); 2832 else 2833 continue; 2834 bus_read_region_1(sc->sc_res[CAS_RES_MEM], 2835 CAS_PCI_ROM_OFFSET + j + PCI_VPD_SIZE + 2836 5 + k, buf, sizeof(buf)); 2837 buf[sizeof(buf) - 1] = '\0'; 2838 if (strcmp(buf, CAS_PHY_TYPE_PCS) == 0) 2839 pcs[phy] = 1; 2840 phy++; 2841 if (lma == 4 && phy == 4) 2842 break; 2843 } 2844 } 2845 break; 2846 default: 2847 device_printf(dev, "unexpected PCI VPD\n"); 2848 goto fail_prom; 2849 } 2850 2851 fail_prom: 2852 CAS_WRITE_4(sc, CAS_BIM_LDEV_OEN, 0); 2853 2854 if (lma == 0) { 2855 device_printf(dev, "could not determine Ethernet address\n"); 2856 goto fail; 2857 } 2858 i = 0; 2859 if (lma > 1 && pci_get_slot(dev) < nitems(enaddr)) 2860 i = pci_get_slot(dev); 2861 memcpy(sc->sc_enaddr, enaddr[i], ETHER_ADDR_LEN); 2862 2863 if (phy == 0) { 2864 device_printf(dev, "could not determine PHY type\n"); 2865 goto fail; 2866 } 2867 i = 0; 2868 if (phy > 1 && pci_get_slot(dev) < nitems(pcs)) 2869 i = pci_get_slot(dev); 2870 if (pcs[i] != 0) 2871 sc->sc_flags |= CAS_SERDES; 2872 #endif 2873 2874 if (cas_attach(sc) != 0) { 2875 device_printf(dev, "could not be attached\n"); 2876 goto fail; 2877 } 2878 2879 if (bus_setup_intr(dev, sc->sc_res[CAS_RES_INTR], INTR_TYPE_NET | 2880 INTR_MPSAFE, cas_intr, NULL, sc, &sc->sc_ih) != 0) { 2881 device_printf(dev, "failed to set up interrupt\n"); 2882 cas_detach(sc); 2883 goto fail; 2884 } 2885 return (0); 2886 2887 fail: 2888 CAS_LOCK_DESTROY(sc); 2889 bus_release_resources(dev, cas_pci_res_spec, sc->sc_res); 2890 return (ENXIO); 2891 } 2892 2893 static int 2894 cas_pci_detach(device_t dev) 2895 { 2896 struct cas_softc *sc; 2897 2898 sc = device_get_softc(dev); 2899 bus_teardown_intr(dev, sc->sc_res[CAS_RES_INTR], sc->sc_ih); 2900 cas_detach(sc); 2901 CAS_LOCK_DESTROY(sc); 2902 bus_release_resources(dev, cas_pci_res_spec, sc->sc_res); 2903 return (0); 2904 } 2905 2906 static int 2907 cas_pci_suspend(device_t dev) 2908 { 2909 2910 cas_suspend(device_get_softc(dev)); 2911 return (0); 2912 } 2913 2914 static int 2915 cas_pci_resume(device_t dev) 2916 { 2917 2918 cas_resume(device_get_softc(dev)); 2919 return (0); 2920 } 2921