1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (C) 2001 Eduardo Horvath. 5 * Copyright (c) 2001-2003 Thomas Moestl 6 * Copyright (c) 2007-2009 Marius Strobl <marius@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp 31 * from: FreeBSD: if_gem.c 182060 2008-08-23 15:03:26Z marius 32 */ 33 34 #include <sys/cdefs.h> 35 /* 36 * driver for Sun Cassini/Cassini+ and National Semiconductor DP83065 37 * Saturn Gigabit Ethernet controllers 38 */ 39 40 #if 0 41 #define CAS_DEBUG 42 #endif 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/bus.h> 47 #include <sys/callout.h> 48 #include <sys/endian.h> 49 #include <sys/mbuf.h> 50 #include <sys/malloc.h> 51 #include <sys/kernel.h> 52 #include <sys/lock.h> 53 #include <sys/module.h> 54 #include <sys/mutex.h> 55 #include <sys/refcount.h> 56 #include <sys/resource.h> 57 #include <sys/rman.h> 58 #include <sys/socket.h> 59 #include <sys/sockio.h> 60 #include <sys/taskqueue.h> 61 62 #include <net/bpf.h> 63 #include <net/ethernet.h> 64 #include <net/if.h> 65 #include <net/if_var.h> 66 #include <net/if_arp.h> 67 #include <net/if_dl.h> 68 #include <net/if_media.h> 69 #include <net/if_types.h> 70 #include <net/if_vlan_var.h> 71 72 #include <netinet/in.h> 73 #include <netinet/in_systm.h> 74 #include <netinet/ip.h> 75 #include <netinet/tcp.h> 76 #include <netinet/udp.h> 77 78 #include <machine/bus.h> 79 #if defined(__powerpc__) 80 #include <dev/ofw/ofw_bus.h> 81 #include <dev/ofw/openfirm.h> 82 #include <machine/ofw_machdep.h> 83 #endif 84 #include <machine/resource.h> 85 86 #include <dev/mii/mii.h> 87 #include <dev/mii/miivar.h> 88 89 #include <dev/cas/if_casreg.h> 90 #include <dev/cas/if_casvar.h> 91 92 #include <dev/pci/pcireg.h> 93 #include <dev/pci/pcivar.h> 94 95 #include "miibus_if.h" 96 97 #define RINGASSERT(n , min, max) \ 98 CTASSERT(powerof2(n) && (n) >= (min) && (n) <= (max)) 99 100 RINGASSERT(CAS_NRXCOMP, 128, 32768); 101 RINGASSERT(CAS_NRXDESC, 32, 8192); 102 RINGASSERT(CAS_NRXDESC2, 32, 8192); 103 RINGASSERT(CAS_NTXDESC, 32, 8192); 104 105 #undef RINGASSERT 106 107 #define CCDASSERT(m, a) \ 108 CTASSERT((offsetof(struct cas_control_data, m) & ((a) - 1)) == 0) 109 110 CCDASSERT(ccd_rxcomps, CAS_RX_COMP_ALIGN); 111 CCDASSERT(ccd_rxdescs, CAS_RX_DESC_ALIGN); 112 CCDASSERT(ccd_rxdescs2, CAS_RX_DESC_ALIGN); 113 114 #undef CCDASSERT 115 116 #define CAS_TRIES 10000 117 118 /* 119 * According to documentation, the hardware has support for basic TCP 120 * checksum offloading only, in practice this can be also used for UDP 121 * however (i.e. the problem of previous Sun NICs that a checksum of 0x0 122 * is not converted to 0xffff no longer exists). 123 */ 124 #define CAS_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 125 126 static inline void cas_add_rxdesc(struct cas_softc *sc, u_int idx); 127 static int cas_attach(struct cas_softc *sc); 128 static int cas_bitwait(struct cas_softc *sc, bus_addr_t r, uint32_t clr, 129 uint32_t set); 130 static void cas_cddma_callback(void *xsc, bus_dma_segment_t *segs, 131 int nsegs, int error); 132 static void cas_detach(struct cas_softc *sc); 133 static int cas_disable_rx(struct cas_softc *sc); 134 static int cas_disable_tx(struct cas_softc *sc); 135 static void cas_eint(struct cas_softc *sc, u_int status); 136 static void cas_free(struct mbuf *m); 137 static void cas_init(void *xsc); 138 static void cas_init_locked(struct cas_softc *sc); 139 static void cas_init_regs(struct cas_softc *sc); 140 static int cas_intr(void *v); 141 static void cas_intr_task(void *arg, int pending __unused); 142 static int cas_ioctl(if_t ifp, u_long cmd, caddr_t data); 143 static int cas_load_txmbuf(struct cas_softc *sc, struct mbuf **m_head); 144 static int cas_mediachange(if_t ifp); 145 static void cas_mediastatus(if_t ifp, struct ifmediareq *ifmr); 146 static void cas_meminit(struct cas_softc *sc); 147 static void cas_mifinit(struct cas_softc *sc); 148 static int cas_mii_readreg(device_t dev, int phy, int reg); 149 static void cas_mii_statchg(device_t dev); 150 static int cas_mii_writereg(device_t dev, int phy, int reg, int val); 151 static void cas_reset(struct cas_softc *sc); 152 static int cas_reset_rx(struct cas_softc *sc); 153 static int cas_reset_tx(struct cas_softc *sc); 154 static void cas_resume(struct cas_softc *sc); 155 static u_int cas_descsize(u_int sz); 156 static void cas_rint(struct cas_softc *sc); 157 static void cas_rint_timeout(void *arg); 158 static inline void cas_rxcksum(struct mbuf *m, uint16_t cksum); 159 static inline void cas_rxcompinit(struct cas_rx_comp *rxcomp); 160 static u_int cas_rxcompsize(u_int sz); 161 static void cas_rxdma_callback(void *xsc, bus_dma_segment_t *segs, 162 int nsegs, int error); 163 static void cas_setladrf(struct cas_softc *sc); 164 static void cas_start(if_t ifp); 165 static void cas_stop(if_t ifp); 166 static void cas_suspend(struct cas_softc *sc); 167 static void cas_tick(void *arg); 168 static void cas_tint(struct cas_softc *sc); 169 static void cas_tx_task(void *arg, int pending __unused); 170 static inline void cas_txkick(struct cas_softc *sc); 171 static void cas_watchdog(struct cas_softc *sc); 172 173 MODULE_DEPEND(cas, ether, 1, 1, 1); 174 MODULE_DEPEND(cas, miibus, 1, 1, 1); 175 176 #ifdef CAS_DEBUG 177 #include <sys/ktr.h> 178 #define KTR_CAS KTR_SPARE2 179 #endif 180 181 static int 182 cas_attach(struct cas_softc *sc) 183 { 184 struct cas_txsoft *txs; 185 if_t ifp; 186 int error, i; 187 uint32_t v; 188 189 /* Set up ifnet structure. */ 190 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 191 if (ifp == NULL) 192 return (ENOSPC); 193 if_setsoftc(ifp, sc); 194 if_initname(ifp, device_get_name(sc->sc_dev), 195 device_get_unit(sc->sc_dev)); 196 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 197 if_setstartfn(ifp, cas_start); 198 if_setioctlfn(ifp, cas_ioctl); 199 if_setinitfn(ifp, cas_init); 200 if_setsendqlen(ifp, CAS_TXQUEUELEN); 201 if_setsendqready(ifp); 202 203 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0); 204 callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0); 205 /* Create local taskq. */ 206 NET_TASK_INIT(&sc->sc_intr_task, 0, cas_intr_task, sc); 207 TASK_INIT(&sc->sc_tx_task, 1, cas_tx_task, ifp); 208 sc->sc_tq = taskqueue_create_fast("cas_taskq", M_WAITOK, 209 taskqueue_thread_enqueue, &sc->sc_tq); 210 if (sc->sc_tq == NULL) { 211 device_printf(sc->sc_dev, "could not create taskqueue\n"); 212 error = ENXIO; 213 goto fail_ifnet; 214 } 215 error = taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", 216 device_get_nameunit(sc->sc_dev)); 217 if (error != 0) { 218 device_printf(sc->sc_dev, "could not start threads\n"); 219 goto fail_taskq; 220 } 221 222 /* Make sure the chip is stopped. */ 223 cas_reset(sc); 224 225 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 226 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 227 BUS_SPACE_MAXSIZE, 0, BUS_SPACE_MAXSIZE, 0, NULL, NULL, 228 &sc->sc_pdmatag); 229 if (error != 0) 230 goto fail_taskq; 231 232 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 233 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 234 CAS_PAGE_SIZE, 1, CAS_PAGE_SIZE, 0, NULL, NULL, &sc->sc_rdmatag); 235 if (error != 0) 236 goto fail_ptag; 237 238 error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 239 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 240 MCLBYTES * CAS_NTXSEGS, CAS_NTXSEGS, MCLBYTES, 241 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag); 242 if (error != 0) 243 goto fail_rtag; 244 245 error = bus_dma_tag_create(sc->sc_pdmatag, CAS_TX_DESC_ALIGN, 0, 246 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 247 sizeof(struct cas_control_data), 1, 248 sizeof(struct cas_control_data), 0, 249 NULL, NULL, &sc->sc_cdmatag); 250 if (error != 0) 251 goto fail_ttag; 252 253 /* 254 * Allocate the control data structures, create and load the 255 * DMA map for it. 256 */ 257 if ((error = bus_dmamem_alloc(sc->sc_cdmatag, 258 (void **)&sc->sc_control_data, 259 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 260 &sc->sc_cddmamap)) != 0) { 261 device_printf(sc->sc_dev, 262 "unable to allocate control data, error = %d\n", error); 263 goto fail_ctag; 264 } 265 266 sc->sc_cddma = 0; 267 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, 268 sc->sc_control_data, sizeof(struct cas_control_data), 269 cas_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { 270 device_printf(sc->sc_dev, 271 "unable to load control data DMA map, error = %d\n", 272 error); 273 goto fail_cmem; 274 } 275 276 /* 277 * Initialize the transmit job descriptors. 278 */ 279 STAILQ_INIT(&sc->sc_txfreeq); 280 STAILQ_INIT(&sc->sc_txdirtyq); 281 282 /* 283 * Create the transmit buffer DMA maps. 284 */ 285 error = ENOMEM; 286 for (i = 0; i < CAS_TXQUEUELEN; i++) { 287 txs = &sc->sc_txsoft[i]; 288 txs->txs_mbuf = NULL; 289 txs->txs_ndescs = 0; 290 if ((error = bus_dmamap_create(sc->sc_tdmatag, 0, 291 &txs->txs_dmamap)) != 0) { 292 device_printf(sc->sc_dev, 293 "unable to create TX DMA map %d, error = %d\n", 294 i, error); 295 goto fail_txd; 296 } 297 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 298 } 299 300 /* 301 * Allocate the receive buffers, create and load the DMA maps 302 * for them. 303 */ 304 for (i = 0; i < CAS_NRXDESC; i++) { 305 if ((error = bus_dmamem_alloc(sc->sc_rdmatag, 306 &sc->sc_rxdsoft[i].rxds_buf, BUS_DMA_WAITOK, 307 &sc->sc_rxdsoft[i].rxds_dmamap)) != 0) { 308 device_printf(sc->sc_dev, 309 "unable to allocate RX buffer %d, error = %d\n", 310 i, error); 311 goto fail_rxmem; 312 } 313 314 sc->sc_rxdptr = i; 315 sc->sc_rxdsoft[i].rxds_paddr = 0; 316 if ((error = bus_dmamap_load(sc->sc_rdmatag, 317 sc->sc_rxdsoft[i].rxds_dmamap, sc->sc_rxdsoft[i].rxds_buf, 318 CAS_PAGE_SIZE, cas_rxdma_callback, sc, 0)) != 0 || 319 sc->sc_rxdsoft[i].rxds_paddr == 0) { 320 device_printf(sc->sc_dev, 321 "unable to load RX DMA map %d, error = %d\n", 322 i, error); 323 goto fail_rxmap; 324 } 325 } 326 327 if ((sc->sc_flags & CAS_SERDES) == 0) { 328 CAS_WRITE_4(sc, CAS_PCS_DATAPATH, CAS_PCS_DATAPATH_MII); 329 CAS_BARRIER(sc, CAS_PCS_DATAPATH, 4, 330 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 331 cas_mifinit(sc); 332 /* 333 * Look for an external PHY. 334 */ 335 error = ENXIO; 336 v = CAS_READ_4(sc, CAS_MIF_CONF); 337 if ((v & CAS_MIF_CONF_MDI1) != 0) { 338 v |= CAS_MIF_CONF_PHY_SELECT; 339 CAS_WRITE_4(sc, CAS_MIF_CONF, v); 340 CAS_BARRIER(sc, CAS_MIF_CONF, 4, 341 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 342 /* Enable/unfreeze the GMII pins of Saturn. */ 343 if (sc->sc_variant == CAS_SATURN) { 344 CAS_WRITE_4(sc, CAS_SATURN_PCFG, 345 CAS_READ_4(sc, CAS_SATURN_PCFG) & 346 ~CAS_SATURN_PCFG_FSI); 347 CAS_BARRIER(sc, CAS_SATURN_PCFG, 4, 348 BUS_SPACE_BARRIER_READ | 349 BUS_SPACE_BARRIER_WRITE); 350 DELAY(10000); 351 } 352 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, 353 cas_mediachange, cas_mediastatus, BMSR_DEFCAPMASK, 354 MII_PHY_ANY, MII_OFFSET_ANY, MIIF_DOPAUSE); 355 } 356 /* 357 * Fall back on an internal PHY if no external PHY was found. 358 */ 359 if (error != 0 && (v & CAS_MIF_CONF_MDI0) != 0) { 360 v &= ~CAS_MIF_CONF_PHY_SELECT; 361 CAS_WRITE_4(sc, CAS_MIF_CONF, v); 362 CAS_BARRIER(sc, CAS_MIF_CONF, 4, 363 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 364 /* Freeze the GMII pins of Saturn for saving power. */ 365 if (sc->sc_variant == CAS_SATURN) { 366 CAS_WRITE_4(sc, CAS_SATURN_PCFG, 367 CAS_READ_4(sc, CAS_SATURN_PCFG) | 368 CAS_SATURN_PCFG_FSI); 369 CAS_BARRIER(sc, CAS_SATURN_PCFG, 4, 370 BUS_SPACE_BARRIER_READ | 371 BUS_SPACE_BARRIER_WRITE); 372 DELAY(10000); 373 } 374 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, 375 cas_mediachange, cas_mediastatus, BMSR_DEFCAPMASK, 376 MII_PHY_ANY, MII_OFFSET_ANY, MIIF_DOPAUSE); 377 } 378 } else { 379 /* 380 * Use the external PCS SERDES. 381 */ 382 CAS_WRITE_4(sc, CAS_PCS_DATAPATH, CAS_PCS_DATAPATH_SERDES); 383 CAS_BARRIER(sc, CAS_PCS_DATAPATH, 4, BUS_SPACE_BARRIER_WRITE); 384 /* Enable/unfreeze the SERDES pins of Saturn. */ 385 if (sc->sc_variant == CAS_SATURN) { 386 CAS_WRITE_4(sc, CAS_SATURN_PCFG, 0); 387 CAS_BARRIER(sc, CAS_SATURN_PCFG, 4, 388 BUS_SPACE_BARRIER_WRITE); 389 } 390 CAS_WRITE_4(sc, CAS_PCS_SERDES_CTRL, CAS_PCS_SERDES_CTRL_ESD); 391 CAS_BARRIER(sc, CAS_PCS_SERDES_CTRL, 4, 392 BUS_SPACE_BARRIER_WRITE); 393 CAS_WRITE_4(sc, CAS_PCS_CONF, CAS_PCS_CONF_EN); 394 CAS_BARRIER(sc, CAS_PCS_CONF, 4, 395 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 396 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, 397 cas_mediachange, cas_mediastatus, BMSR_DEFCAPMASK, 398 CAS_PHYAD_EXTERNAL, MII_OFFSET_ANY, MIIF_DOPAUSE); 399 } 400 if (error != 0) { 401 device_printf(sc->sc_dev, "attaching PHYs failed\n"); 402 goto fail_rxmap; 403 } 404 sc->sc_mii = device_get_softc(sc->sc_miibus); 405 406 /* 407 * From this point forward, the attachment cannot fail. A failure 408 * before this point releases all resources that may have been 409 * allocated. 410 */ 411 412 /* Announce FIFO sizes. */ 413 v = CAS_READ_4(sc, CAS_TX_FIFO_SIZE); 414 device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n", 415 CAS_RX_FIFO_SIZE / 1024, v / 16); 416 417 /* Attach the interface. */ 418 ether_ifattach(ifp, sc->sc_enaddr); 419 420 /* 421 * Tell the upper layer(s) we support long frames/checksum offloads. 422 */ 423 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 424 if_setcapabilities(ifp, IFCAP_VLAN_MTU); 425 if ((sc->sc_flags & CAS_NO_CSUM) == 0) { 426 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0); 427 if_sethwassist(ifp, CAS_CSUM_FEATURES); 428 } 429 if_setcapenable(ifp, if_getcapabilities(ifp)); 430 431 return (0); 432 433 /* 434 * Free any resources we've allocated during the failed attach 435 * attempt. Do this in reverse order and fall through. 436 */ 437 fail_rxmap: 438 for (i = 0; i < CAS_NRXDESC; i++) 439 if (sc->sc_rxdsoft[i].rxds_paddr != 0) 440 bus_dmamap_unload(sc->sc_rdmatag, 441 sc->sc_rxdsoft[i].rxds_dmamap); 442 fail_rxmem: 443 for (i = 0; i < CAS_NRXDESC; i++) 444 if (sc->sc_rxdsoft[i].rxds_buf != NULL) 445 bus_dmamem_free(sc->sc_rdmatag, 446 sc->sc_rxdsoft[i].rxds_buf, 447 sc->sc_rxdsoft[i].rxds_dmamap); 448 fail_txd: 449 for (i = 0; i < CAS_TXQUEUELEN; i++) 450 if (sc->sc_txsoft[i].txs_dmamap != NULL) 451 bus_dmamap_destroy(sc->sc_tdmatag, 452 sc->sc_txsoft[i].txs_dmamap); 453 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 454 fail_cmem: 455 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 456 sc->sc_cddmamap); 457 fail_ctag: 458 bus_dma_tag_destroy(sc->sc_cdmatag); 459 fail_ttag: 460 bus_dma_tag_destroy(sc->sc_tdmatag); 461 fail_rtag: 462 bus_dma_tag_destroy(sc->sc_rdmatag); 463 fail_ptag: 464 bus_dma_tag_destroy(sc->sc_pdmatag); 465 fail_taskq: 466 taskqueue_free(sc->sc_tq); 467 fail_ifnet: 468 if_free(ifp); 469 return (error); 470 } 471 472 static void 473 cas_detach(struct cas_softc *sc) 474 { 475 if_t ifp = sc->sc_ifp; 476 int i; 477 478 ether_ifdetach(ifp); 479 CAS_LOCK(sc); 480 cas_stop(ifp); 481 CAS_UNLOCK(sc); 482 callout_drain(&sc->sc_tick_ch); 483 callout_drain(&sc->sc_rx_ch); 484 taskqueue_drain(sc->sc_tq, &sc->sc_intr_task); 485 taskqueue_drain(sc->sc_tq, &sc->sc_tx_task); 486 if_free(ifp); 487 taskqueue_free(sc->sc_tq); 488 device_delete_child(sc->sc_dev, sc->sc_miibus); 489 490 for (i = 0; i < CAS_NRXDESC; i++) 491 if (sc->sc_rxdsoft[i].rxds_dmamap != NULL) 492 bus_dmamap_sync(sc->sc_rdmatag, 493 sc->sc_rxdsoft[i].rxds_dmamap, 494 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 495 for (i = 0; i < CAS_NRXDESC; i++) 496 if (sc->sc_rxdsoft[i].rxds_paddr != 0) 497 bus_dmamap_unload(sc->sc_rdmatag, 498 sc->sc_rxdsoft[i].rxds_dmamap); 499 for (i = 0; i < CAS_NRXDESC; i++) 500 if (sc->sc_rxdsoft[i].rxds_buf != NULL) 501 bus_dmamem_free(sc->sc_rdmatag, 502 sc->sc_rxdsoft[i].rxds_buf, 503 sc->sc_rxdsoft[i].rxds_dmamap); 504 for (i = 0; i < CAS_TXQUEUELEN; i++) 505 if (sc->sc_txsoft[i].txs_dmamap != NULL) 506 bus_dmamap_destroy(sc->sc_tdmatag, 507 sc->sc_txsoft[i].txs_dmamap); 508 CAS_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 509 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 510 bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 511 sc->sc_cddmamap); 512 bus_dma_tag_destroy(sc->sc_cdmatag); 513 bus_dma_tag_destroy(sc->sc_tdmatag); 514 bus_dma_tag_destroy(sc->sc_rdmatag); 515 bus_dma_tag_destroy(sc->sc_pdmatag); 516 } 517 518 static void 519 cas_suspend(struct cas_softc *sc) 520 { 521 if_t ifp = sc->sc_ifp; 522 523 CAS_LOCK(sc); 524 cas_stop(ifp); 525 CAS_UNLOCK(sc); 526 } 527 528 static void 529 cas_resume(struct cas_softc *sc) 530 { 531 if_t ifp = sc->sc_ifp; 532 533 CAS_LOCK(sc); 534 /* 535 * On resume all registers have to be initialized again like 536 * after power-on. 537 */ 538 sc->sc_flags &= ~CAS_INITED; 539 if (if_getflags(ifp) & IFF_UP) 540 cas_init_locked(sc); 541 CAS_UNLOCK(sc); 542 } 543 544 static inline void 545 cas_rxcksum(struct mbuf *m, uint16_t cksum) 546 { 547 struct ether_header *eh; 548 struct ip *ip; 549 struct udphdr *uh; 550 uint16_t *opts; 551 int32_t hlen, len, pktlen; 552 uint32_t temp32; 553 554 pktlen = m->m_pkthdr.len; 555 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) 556 return; 557 eh = mtod(m, struct ether_header *); 558 if (eh->ether_type != htons(ETHERTYPE_IP)) 559 return; 560 ip = (struct ip *)(eh + 1); 561 if (ip->ip_v != IPVERSION) 562 return; 563 564 hlen = ip->ip_hl << 2; 565 pktlen -= sizeof(struct ether_header); 566 if (hlen < sizeof(struct ip)) 567 return; 568 if (ntohs(ip->ip_len) < hlen) 569 return; 570 if (ntohs(ip->ip_len) != pktlen) 571 return; 572 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) 573 return; /* Cannot handle fragmented packet. */ 574 575 switch (ip->ip_p) { 576 case IPPROTO_TCP: 577 if (pktlen < (hlen + sizeof(struct tcphdr))) 578 return; 579 break; 580 case IPPROTO_UDP: 581 if (pktlen < (hlen + sizeof(struct udphdr))) 582 return; 583 uh = (struct udphdr *)((uint8_t *)ip + hlen); 584 if (uh->uh_sum == 0) 585 return; /* no checksum */ 586 break; 587 default: 588 return; 589 } 590 591 cksum = ~cksum; 592 /* checksum fixup for IP options */ 593 len = hlen - sizeof(struct ip); 594 if (len > 0) { 595 opts = (uint16_t *)(ip + 1); 596 for (; len > 0; len -= sizeof(uint16_t), opts++) { 597 temp32 = cksum - *opts; 598 temp32 = (temp32 >> 16) + (temp32 & 65535); 599 cksum = temp32 & 65535; 600 } 601 } 602 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; 603 m->m_pkthdr.csum_data = cksum; 604 } 605 606 static void 607 cas_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) 608 { 609 struct cas_softc *sc = xsc; 610 611 if (error != 0) 612 return; 613 if (nsegs != 1) 614 panic("%s: bad control buffer segment count", __func__); 615 sc->sc_cddma = segs[0].ds_addr; 616 } 617 618 static void 619 cas_rxdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) 620 { 621 struct cas_softc *sc = xsc; 622 623 if (error != 0) 624 return; 625 if (nsegs != 1) 626 panic("%s: bad RX buffer segment count", __func__); 627 sc->sc_rxdsoft[sc->sc_rxdptr].rxds_paddr = segs[0].ds_addr; 628 } 629 630 static void 631 cas_tick(void *arg) 632 { 633 struct cas_softc *sc = arg; 634 if_t ifp = sc->sc_ifp; 635 uint32_t v; 636 637 CAS_LOCK_ASSERT(sc, MA_OWNED); 638 639 /* 640 * Unload collision and error counters. 641 */ 642 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 643 CAS_READ_4(sc, CAS_MAC_NORM_COLL_CNT) + 644 CAS_READ_4(sc, CAS_MAC_FIRST_COLL_CNT)); 645 v = CAS_READ_4(sc, CAS_MAC_EXCESS_COLL_CNT) + 646 CAS_READ_4(sc, CAS_MAC_LATE_COLL_CNT); 647 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, v); 648 if_inc_counter(ifp, IFCOUNTER_OERRORS, v); 649 if_inc_counter(ifp, IFCOUNTER_IERRORS, 650 CAS_READ_4(sc, CAS_MAC_RX_LEN_ERR_CNT) + 651 CAS_READ_4(sc, CAS_MAC_RX_ALIGN_ERR) + 652 CAS_READ_4(sc, CAS_MAC_RX_CRC_ERR_CNT) + 653 CAS_READ_4(sc, CAS_MAC_RX_CODE_VIOL)); 654 655 /* 656 * Then clear the hardware counters. 657 */ 658 CAS_WRITE_4(sc, CAS_MAC_NORM_COLL_CNT, 0); 659 CAS_WRITE_4(sc, CAS_MAC_FIRST_COLL_CNT, 0); 660 CAS_WRITE_4(sc, CAS_MAC_EXCESS_COLL_CNT, 0); 661 CAS_WRITE_4(sc, CAS_MAC_LATE_COLL_CNT, 0); 662 CAS_WRITE_4(sc, CAS_MAC_RX_LEN_ERR_CNT, 0); 663 CAS_WRITE_4(sc, CAS_MAC_RX_ALIGN_ERR, 0); 664 CAS_WRITE_4(sc, CAS_MAC_RX_CRC_ERR_CNT, 0); 665 CAS_WRITE_4(sc, CAS_MAC_RX_CODE_VIOL, 0); 666 667 mii_tick(sc->sc_mii); 668 669 if (sc->sc_txfree != CAS_MAXTXFREE) 670 cas_tint(sc); 671 672 cas_watchdog(sc); 673 674 callout_reset(&sc->sc_tick_ch, hz, cas_tick, sc); 675 } 676 677 static int 678 cas_bitwait(struct cas_softc *sc, bus_addr_t r, uint32_t clr, uint32_t set) 679 { 680 int i; 681 uint32_t reg; 682 683 for (i = CAS_TRIES; i--; DELAY(100)) { 684 reg = CAS_READ_4(sc, r); 685 if ((reg & clr) == 0 && (reg & set) == set) 686 return (1); 687 } 688 return (0); 689 } 690 691 static void 692 cas_reset(struct cas_softc *sc) 693 { 694 695 #ifdef CAS_DEBUG 696 CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__); 697 #endif 698 /* Disable all interrupts in order to avoid spurious ones. */ 699 CAS_WRITE_4(sc, CAS_INTMASK, 0xffffffff); 700 701 cas_reset_rx(sc); 702 cas_reset_tx(sc); 703 704 /* 705 * Do a full reset modulo the result of the last auto-negotiation 706 * when using the SERDES. 707 */ 708 CAS_WRITE_4(sc, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX | 709 ((sc->sc_flags & CAS_SERDES) != 0 ? CAS_RESET_PCS_DIS : 0)); 710 CAS_BARRIER(sc, CAS_RESET, 4, 711 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 712 DELAY(3000); 713 if (!cas_bitwait(sc, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX, 0)) 714 device_printf(sc->sc_dev, "cannot reset device\n"); 715 } 716 717 static void 718 cas_stop(if_t ifp) 719 { 720 struct cas_softc *sc = if_getsoftc(ifp); 721 struct cas_txsoft *txs; 722 723 #ifdef CAS_DEBUG 724 CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__); 725 #endif 726 727 callout_stop(&sc->sc_tick_ch); 728 callout_stop(&sc->sc_rx_ch); 729 730 /* Disable all interrupts in order to avoid spurious ones. */ 731 CAS_WRITE_4(sc, CAS_INTMASK, 0xffffffff); 732 733 cas_reset_tx(sc); 734 cas_reset_rx(sc); 735 736 /* 737 * Release any queued transmit buffers. 738 */ 739 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 740 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 741 if (txs->txs_ndescs != 0) { 742 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 743 BUS_DMASYNC_POSTWRITE); 744 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 745 if (txs->txs_mbuf != NULL) { 746 m_freem(txs->txs_mbuf); 747 txs->txs_mbuf = NULL; 748 } 749 } 750 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 751 } 752 753 /* 754 * Mark the interface down and cancel the watchdog timer. 755 */ 756 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); 757 sc->sc_flags &= ~CAS_LINK; 758 sc->sc_wdog_timer = 0; 759 } 760 761 static int 762 cas_reset_rx(struct cas_softc *sc) 763 { 764 765 /* 766 * Resetting while DMA is in progress can cause a bus hang, so we 767 * disable DMA first. 768 */ 769 (void)cas_disable_rx(sc); 770 CAS_WRITE_4(sc, CAS_RX_CONF, 0); 771 CAS_BARRIER(sc, CAS_RX_CONF, 4, 772 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 773 if (!cas_bitwait(sc, CAS_RX_CONF, CAS_RX_CONF_RXDMA_EN, 0)) 774 device_printf(sc->sc_dev, "cannot disable RX DMA\n"); 775 776 /* Finally, reset the ERX. */ 777 CAS_WRITE_4(sc, CAS_RESET, CAS_RESET_RX | 778 ((sc->sc_flags & CAS_SERDES) != 0 ? CAS_RESET_PCS_DIS : 0)); 779 CAS_BARRIER(sc, CAS_RESET, 4, 780 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 781 if (!cas_bitwait(sc, CAS_RESET, CAS_RESET_RX, 0)) { 782 device_printf(sc->sc_dev, "cannot reset receiver\n"); 783 return (1); 784 } 785 return (0); 786 } 787 788 static int 789 cas_reset_tx(struct cas_softc *sc) 790 { 791 792 /* 793 * Resetting while DMA is in progress can cause a bus hang, so we 794 * disable DMA first. 795 */ 796 (void)cas_disable_tx(sc); 797 CAS_WRITE_4(sc, CAS_TX_CONF, 0); 798 CAS_BARRIER(sc, CAS_TX_CONF, 4, 799 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 800 if (!cas_bitwait(sc, CAS_TX_CONF, CAS_TX_CONF_TXDMA_EN, 0)) 801 device_printf(sc->sc_dev, "cannot disable TX DMA\n"); 802 803 /* Finally, reset the ETX. */ 804 CAS_WRITE_4(sc, CAS_RESET, CAS_RESET_TX | 805 ((sc->sc_flags & CAS_SERDES) != 0 ? CAS_RESET_PCS_DIS : 0)); 806 CAS_BARRIER(sc, CAS_RESET, 4, 807 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 808 if (!cas_bitwait(sc, CAS_RESET, CAS_RESET_TX, 0)) { 809 device_printf(sc->sc_dev, "cannot reset transmitter\n"); 810 return (1); 811 } 812 return (0); 813 } 814 815 static int 816 cas_disable_rx(struct cas_softc *sc) 817 { 818 819 CAS_WRITE_4(sc, CAS_MAC_RX_CONF, 820 CAS_READ_4(sc, CAS_MAC_RX_CONF) & ~CAS_MAC_RX_CONF_EN); 821 CAS_BARRIER(sc, CAS_MAC_RX_CONF, 4, 822 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 823 if (cas_bitwait(sc, CAS_MAC_RX_CONF, CAS_MAC_RX_CONF_EN, 0)) 824 return (1); 825 if (bootverbose) 826 device_printf(sc->sc_dev, "cannot disable RX MAC\n"); 827 return (0); 828 } 829 830 static int 831 cas_disable_tx(struct cas_softc *sc) 832 { 833 834 CAS_WRITE_4(sc, CAS_MAC_TX_CONF, 835 CAS_READ_4(sc, CAS_MAC_TX_CONF) & ~CAS_MAC_TX_CONF_EN); 836 CAS_BARRIER(sc, CAS_MAC_TX_CONF, 4, 837 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 838 if (cas_bitwait(sc, CAS_MAC_TX_CONF, CAS_MAC_TX_CONF_EN, 0)) 839 return (1); 840 if (bootverbose) 841 device_printf(sc->sc_dev, "cannot disable TX MAC\n"); 842 return (0); 843 } 844 845 static inline void 846 cas_rxcompinit(struct cas_rx_comp *rxcomp) 847 { 848 849 rxcomp->crc_word1 = 0; 850 rxcomp->crc_word2 = 0; 851 rxcomp->crc_word3 = 852 htole64(CAS_SET(ETHER_HDR_LEN + sizeof(struct ip), CAS_RC3_CSO)); 853 rxcomp->crc_word4 = htole64(CAS_RC4_ZERO); 854 } 855 856 static void 857 cas_meminit(struct cas_softc *sc) 858 { 859 int i; 860 861 CAS_LOCK_ASSERT(sc, MA_OWNED); 862 863 /* 864 * Initialize the transmit descriptor ring. 865 */ 866 for (i = 0; i < CAS_NTXDESC; i++) { 867 sc->sc_txdescs[i].cd_flags = 0; 868 sc->sc_txdescs[i].cd_buf_ptr = 0; 869 } 870 sc->sc_txfree = CAS_MAXTXFREE; 871 sc->sc_txnext = 0; 872 sc->sc_txwin = 0; 873 874 /* 875 * Initialize the receive completion ring. 876 */ 877 for (i = 0; i < CAS_NRXCOMP; i++) 878 cas_rxcompinit(&sc->sc_rxcomps[i]); 879 sc->sc_rxcptr = 0; 880 881 /* 882 * Initialize the first receive descriptor ring. We leave 883 * the second one zeroed as we don't actually use it. 884 */ 885 for (i = 0; i < CAS_NRXDESC; i++) 886 CAS_INIT_RXDESC(sc, i, i); 887 sc->sc_rxdptr = 0; 888 889 CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 890 } 891 892 static u_int 893 cas_descsize(u_int sz) 894 { 895 896 switch (sz) { 897 case 32: 898 return (CAS_DESC_32); 899 case 64: 900 return (CAS_DESC_64); 901 case 128: 902 return (CAS_DESC_128); 903 case 256: 904 return (CAS_DESC_256); 905 case 512: 906 return (CAS_DESC_512); 907 case 1024: 908 return (CAS_DESC_1K); 909 case 2048: 910 return (CAS_DESC_2K); 911 case 4096: 912 return (CAS_DESC_4K); 913 case 8192: 914 return (CAS_DESC_8K); 915 default: 916 printf("%s: invalid descriptor ring size %d\n", __func__, sz); 917 return (CAS_DESC_32); 918 } 919 } 920 921 static u_int 922 cas_rxcompsize(u_int sz) 923 { 924 925 switch (sz) { 926 case 128: 927 return (CAS_RX_CONF_COMP_128); 928 case 256: 929 return (CAS_RX_CONF_COMP_256); 930 case 512: 931 return (CAS_RX_CONF_COMP_512); 932 case 1024: 933 return (CAS_RX_CONF_COMP_1K); 934 case 2048: 935 return (CAS_RX_CONF_COMP_2K); 936 case 4096: 937 return (CAS_RX_CONF_COMP_4K); 938 case 8192: 939 return (CAS_RX_CONF_COMP_8K); 940 case 16384: 941 return (CAS_RX_CONF_COMP_16K); 942 case 32768: 943 return (CAS_RX_CONF_COMP_32K); 944 default: 945 printf("%s: invalid dcompletion ring size %d\n", __func__, sz); 946 return (CAS_RX_CONF_COMP_128); 947 } 948 } 949 950 static void 951 cas_init(void *xsc) 952 { 953 struct cas_softc *sc = xsc; 954 955 CAS_LOCK(sc); 956 cas_init_locked(sc); 957 CAS_UNLOCK(sc); 958 } 959 960 /* 961 * Initialization of interface; set up initialization block 962 * and transmit/receive descriptor rings. 963 */ 964 static void 965 cas_init_locked(struct cas_softc *sc) 966 { 967 if_t ifp = sc->sc_ifp; 968 uint32_t v; 969 970 CAS_LOCK_ASSERT(sc, MA_OWNED); 971 972 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 973 return; 974 975 #ifdef CAS_DEBUG 976 CTR2(KTR_CAS, "%s: %s: calling stop", device_get_name(sc->sc_dev), 977 __func__); 978 #endif 979 /* 980 * Initialization sequence. The numbered steps below correspond 981 * to the sequence outlined in section 6.3.5.1 in the Ethernet 982 * Channel Engine manual (part of the PCIO manual). 983 * See also the STP2002-STQ document from Sun Microsystems. 984 */ 985 986 /* step 1 & 2. Reset the Ethernet Channel. */ 987 cas_stop(ifp); 988 cas_reset(sc); 989 #ifdef CAS_DEBUG 990 CTR2(KTR_CAS, "%s: %s: restarting", device_get_name(sc->sc_dev), 991 __func__); 992 #endif 993 994 if ((sc->sc_flags & CAS_SERDES) == 0) 995 /* Re-initialize the MIF. */ 996 cas_mifinit(sc); 997 998 /* step 3. Setup data structures in host memory. */ 999 cas_meminit(sc); 1000 1001 /* step 4. TX MAC registers & counters */ 1002 cas_init_regs(sc); 1003 1004 /* step 5. RX MAC registers & counters */ 1005 1006 /* step 6 & 7. Program Ring Base Addresses. */ 1007 CAS_WRITE_4(sc, CAS_TX_DESC3_BASE_HI, 1008 (((uint64_t)CAS_CDTXDADDR(sc, 0)) >> 32)); 1009 CAS_WRITE_4(sc, CAS_TX_DESC3_BASE_LO, 1010 CAS_CDTXDADDR(sc, 0) & 0xffffffff); 1011 1012 CAS_WRITE_4(sc, CAS_RX_COMP_BASE_HI, 1013 (((uint64_t)CAS_CDRXCADDR(sc, 0)) >> 32)); 1014 CAS_WRITE_4(sc, CAS_RX_COMP_BASE_LO, 1015 CAS_CDRXCADDR(sc, 0) & 0xffffffff); 1016 1017 CAS_WRITE_4(sc, CAS_RX_DESC_BASE_HI, 1018 (((uint64_t)CAS_CDRXDADDR(sc, 0)) >> 32)); 1019 CAS_WRITE_4(sc, CAS_RX_DESC_BASE_LO, 1020 CAS_CDRXDADDR(sc, 0) & 0xffffffff); 1021 1022 if ((sc->sc_flags & CAS_REG_PLUS) != 0) { 1023 CAS_WRITE_4(sc, CAS_RX_DESC2_BASE_HI, 1024 (((uint64_t)CAS_CDRXD2ADDR(sc, 0)) >> 32)); 1025 CAS_WRITE_4(sc, CAS_RX_DESC2_BASE_LO, 1026 CAS_CDRXD2ADDR(sc, 0) & 0xffffffff); 1027 } 1028 1029 #ifdef CAS_DEBUG 1030 CTR5(KTR_CAS, 1031 "loading TXDR %lx, RXCR %lx, RXDR %lx, RXD2R %lx, cddma %lx", 1032 CAS_CDTXDADDR(sc, 0), CAS_CDRXCADDR(sc, 0), CAS_CDRXDADDR(sc, 0), 1033 CAS_CDRXD2ADDR(sc, 0), sc->sc_cddma); 1034 #endif 1035 1036 /* step 8. Global Configuration & Interrupt Masks */ 1037 1038 /* Disable weighted round robin. */ 1039 CAS_WRITE_4(sc, CAS_CAW, CAS_CAW_RR_DIS); 1040 1041 /* 1042 * Enable infinite bursts for revisions without PCI issues if 1043 * applicable. Doing so greatly improves the TX performance. 1044 */ 1045 CAS_WRITE_4(sc, CAS_INF_BURST, 1046 (sc->sc_flags & CAS_TABORT) == 0 ? CAS_INF_BURST_EN : 1047 0); 1048 1049 /* Set up interrupts. */ 1050 CAS_WRITE_4(sc, CAS_INTMASK, 1051 ~(CAS_INTR_TX_INT_ME | CAS_INTR_TX_TAG_ERR | 1052 CAS_INTR_RX_DONE | CAS_INTR_RX_BUF_NA | CAS_INTR_RX_TAG_ERR | 1053 CAS_INTR_RX_COMP_FULL | CAS_INTR_RX_BUF_AEMPTY | 1054 CAS_INTR_RX_COMP_AFULL | CAS_INTR_RX_LEN_MMATCH | 1055 CAS_INTR_PCI_ERROR_INT 1056 #ifdef CAS_DEBUG 1057 | CAS_INTR_PCS_INT | CAS_INTR_MIF 1058 #endif 1059 )); 1060 /* Don't clear top level interrupts when CAS_STATUS_ALIAS is read. */ 1061 CAS_WRITE_4(sc, CAS_CLEAR_ALIAS, 0); 1062 CAS_WRITE_4(sc, CAS_MAC_RX_MASK, ~CAS_MAC_RX_OVERFLOW); 1063 CAS_WRITE_4(sc, CAS_MAC_TX_MASK, 1064 ~(CAS_MAC_TX_UNDERRUN | CAS_MAC_TX_MAX_PKT_ERR)); 1065 #ifdef CAS_DEBUG 1066 CAS_WRITE_4(sc, CAS_MAC_CTRL_MASK, 1067 ~(CAS_MAC_CTRL_PAUSE_RCVD | CAS_MAC_CTRL_PAUSE | 1068 CAS_MAC_CTRL_NON_PAUSE)); 1069 #else 1070 CAS_WRITE_4(sc, CAS_MAC_CTRL_MASK, 1071 CAS_MAC_CTRL_PAUSE_RCVD | CAS_MAC_CTRL_PAUSE | 1072 CAS_MAC_CTRL_NON_PAUSE); 1073 #endif 1074 1075 /* Enable PCI error interrupts. */ 1076 CAS_WRITE_4(sc, CAS_ERROR_MASK, 1077 ~(CAS_ERROR_DTRTO | CAS_ERROR_OTHER | CAS_ERROR_DMAW_ZERO | 1078 CAS_ERROR_DMAR_ZERO | CAS_ERROR_RTRTO)); 1079 1080 /* Enable PCI error interrupts in BIM configuration. */ 1081 CAS_WRITE_4(sc, CAS_BIM_CONF, 1082 CAS_BIM_CONF_DPAR_EN | CAS_BIM_CONF_RMA_EN | CAS_BIM_CONF_RTA_EN); 1083 1084 /* 1085 * step 9. ETX Configuration: encode receive descriptor ring size, 1086 * enable DMA and disable pre-interrupt writeback completion. 1087 */ 1088 v = cas_descsize(CAS_NTXDESC) << CAS_TX_CONF_DESC3_SHFT; 1089 CAS_WRITE_4(sc, CAS_TX_CONF, v | CAS_TX_CONF_TXDMA_EN | 1090 CAS_TX_CONF_RDPP_DIS | CAS_TX_CONF_PICWB_DIS); 1091 1092 /* step 10. ERX Configuration */ 1093 1094 /* 1095 * Encode receive completion and descriptor ring sizes, set the 1096 * swivel offset. 1097 */ 1098 v = cas_rxcompsize(CAS_NRXCOMP) << CAS_RX_CONF_COMP_SHFT; 1099 v |= cas_descsize(CAS_NRXDESC) << CAS_RX_CONF_DESC_SHFT; 1100 if ((sc->sc_flags & CAS_REG_PLUS) != 0) 1101 v |= cas_descsize(CAS_NRXDESC2) << CAS_RX_CONF_DESC2_SHFT; 1102 CAS_WRITE_4(sc, CAS_RX_CONF, 1103 v | (ETHER_ALIGN << CAS_RX_CONF_SOFF_SHFT)); 1104 1105 /* Set the PAUSE thresholds. We use the maximum OFF threshold. */ 1106 CAS_WRITE_4(sc, CAS_RX_PTHRS, 1107 (111 << CAS_RX_PTHRS_XOFF_SHFT) | (15 << CAS_RX_PTHRS_XON_SHFT)); 1108 1109 /* RX blanking */ 1110 CAS_WRITE_4(sc, CAS_RX_BLANK, 1111 (15 << CAS_RX_BLANK_TIME_SHFT) | (5 << CAS_RX_BLANK_PKTS_SHFT)); 1112 1113 /* Set RX_COMP_AFULL threshold to half of the RX completions. */ 1114 CAS_WRITE_4(sc, CAS_RX_AEMPTY_THRS, 1115 (CAS_NRXCOMP / 2) << CAS_RX_AEMPTY_COMP_SHFT); 1116 1117 /* Initialize the RX page size register as appropriate for 8k. */ 1118 CAS_WRITE_4(sc, CAS_RX_PSZ, 1119 (CAS_RX_PSZ_8K << CAS_RX_PSZ_SHFT) | 1120 (4 << CAS_RX_PSZ_MB_CNT_SHFT) | 1121 (CAS_RX_PSZ_MB_STRD_2K << CAS_RX_PSZ_MB_STRD_SHFT) | 1122 (CAS_RX_PSZ_MB_OFF_64 << CAS_RX_PSZ_MB_OFF_SHFT)); 1123 1124 /* Disable RX random early detection. */ 1125 CAS_WRITE_4(sc, CAS_RX_RED, 0); 1126 1127 /* Zero the RX reassembly DMA table. */ 1128 for (v = 0; v <= CAS_RX_REAS_DMA_ADDR_LC; v++) { 1129 CAS_WRITE_4(sc, CAS_RX_REAS_DMA_ADDR, v); 1130 CAS_WRITE_4(sc, CAS_RX_REAS_DMA_DATA_LO, 0); 1131 CAS_WRITE_4(sc, CAS_RX_REAS_DMA_DATA_MD, 0); 1132 CAS_WRITE_4(sc, CAS_RX_REAS_DMA_DATA_HI, 0); 1133 } 1134 1135 /* Ensure the RX control FIFO and RX IPP FIFO addresses are zero. */ 1136 CAS_WRITE_4(sc, CAS_RX_CTRL_FIFO, 0); 1137 CAS_WRITE_4(sc, CAS_RX_IPP_ADDR, 0); 1138 1139 /* Finally, enable RX DMA. */ 1140 CAS_WRITE_4(sc, CAS_RX_CONF, 1141 CAS_READ_4(sc, CAS_RX_CONF) | CAS_RX_CONF_RXDMA_EN); 1142 1143 /* step 11. Configure Media. */ 1144 1145 /* step 12. RX_MAC Configuration Register */ 1146 v = CAS_READ_4(sc, CAS_MAC_RX_CONF); 1147 v &= ~(CAS_MAC_RX_CONF_STRPPAD | CAS_MAC_RX_CONF_EN); 1148 v |= CAS_MAC_RX_CONF_STRPFCS; 1149 sc->sc_mac_rxcfg = v; 1150 /* 1151 * Clear the RX filter and reprogram it. This will also set the 1152 * current RX MAC configuration and enable it. 1153 */ 1154 cas_setladrf(sc); 1155 1156 /* step 13. TX_MAC Configuration Register */ 1157 v = CAS_READ_4(sc, CAS_MAC_TX_CONF); 1158 v |= CAS_MAC_TX_CONF_EN; 1159 (void)cas_disable_tx(sc); 1160 CAS_WRITE_4(sc, CAS_MAC_TX_CONF, v); 1161 1162 /* step 14. Issue Transmit Pending command. */ 1163 1164 /* step 15. Give the receiver a swift kick. */ 1165 CAS_WRITE_4(sc, CAS_RX_KICK, CAS_NRXDESC - 4); 1166 CAS_WRITE_4(sc, CAS_RX_COMP_TAIL, 0); 1167 if ((sc->sc_flags & CAS_REG_PLUS) != 0) 1168 CAS_WRITE_4(sc, CAS_RX_KICK2, CAS_NRXDESC2 - 4); 1169 1170 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 1171 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1172 1173 mii_mediachg(sc->sc_mii); 1174 1175 /* Start the one second timer. */ 1176 sc->sc_wdog_timer = 0; 1177 callout_reset(&sc->sc_tick_ch, hz, cas_tick, sc); 1178 } 1179 1180 static int 1181 cas_load_txmbuf(struct cas_softc *sc, struct mbuf **m_head) 1182 { 1183 bus_dma_segment_t txsegs[CAS_NTXSEGS]; 1184 struct cas_txsoft *txs; 1185 struct ip *ip; 1186 struct mbuf *m; 1187 uint64_t cflags; 1188 int error, nexttx, nsegs, offset, seg; 1189 1190 CAS_LOCK_ASSERT(sc, MA_OWNED); 1191 1192 /* Get a work queue entry. */ 1193 if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { 1194 /* Ran out of descriptors. */ 1195 return (ENOBUFS); 1196 } 1197 1198 cflags = 0; 1199 if (((*m_head)->m_pkthdr.csum_flags & CAS_CSUM_FEATURES) != 0) { 1200 if (M_WRITABLE(*m_head) == 0) { 1201 m = m_dup(*m_head, M_NOWAIT); 1202 m_freem(*m_head); 1203 *m_head = m; 1204 if (m == NULL) 1205 return (ENOBUFS); 1206 } 1207 offset = sizeof(struct ether_header); 1208 m = m_pullup(*m_head, offset + sizeof(struct ip)); 1209 if (m == NULL) { 1210 *m_head = NULL; 1211 return (ENOBUFS); 1212 } 1213 ip = (struct ip *)(mtod(m, caddr_t) + offset); 1214 offset += (ip->ip_hl << 2); 1215 cflags = (offset << CAS_TD_CKSUM_START_SHFT) | 1216 ((offset + m->m_pkthdr.csum_data) << 1217 CAS_TD_CKSUM_STUFF_SHFT) | CAS_TD_CKSUM_EN; 1218 *m_head = m; 1219 } 1220 1221 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap, 1222 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1223 if (error == EFBIG) { 1224 m = m_collapse(*m_head, M_NOWAIT, CAS_NTXSEGS); 1225 if (m == NULL) { 1226 m_freem(*m_head); 1227 *m_head = NULL; 1228 return (ENOBUFS); 1229 } 1230 *m_head = m; 1231 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, 1232 txs->txs_dmamap, *m_head, txsegs, &nsegs, 1233 BUS_DMA_NOWAIT); 1234 if (error != 0) { 1235 m_freem(*m_head); 1236 *m_head = NULL; 1237 return (error); 1238 } 1239 } else if (error != 0) 1240 return (error); 1241 /* If nsegs is wrong then the stack is corrupt. */ 1242 KASSERT(nsegs <= CAS_NTXSEGS, 1243 ("%s: too many DMA segments (%d)", __func__, nsegs)); 1244 if (nsegs == 0) { 1245 m_freem(*m_head); 1246 *m_head = NULL; 1247 return (EIO); 1248 } 1249 1250 /* 1251 * Ensure we have enough descriptors free to describe 1252 * the packet. Note, we always reserve one descriptor 1253 * at the end of the ring as a termination point, in 1254 * order to prevent wrap-around. 1255 */ 1256 if (nsegs > sc->sc_txfree - 1) { 1257 txs->txs_ndescs = 0; 1258 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1259 return (ENOBUFS); 1260 } 1261 1262 txs->txs_ndescs = nsegs; 1263 txs->txs_firstdesc = sc->sc_txnext; 1264 nexttx = txs->txs_firstdesc; 1265 for (seg = 0; seg < nsegs; seg++, nexttx = CAS_NEXTTX(nexttx)) { 1266 #ifdef CAS_DEBUG 1267 CTR6(KTR_CAS, 1268 "%s: mapping seg %d (txd %d), len %lx, addr %#lx (%#lx)", 1269 __func__, seg, nexttx, txsegs[seg].ds_len, 1270 txsegs[seg].ds_addr, htole64(txsegs[seg].ds_addr)); 1271 #endif 1272 sc->sc_txdescs[nexttx].cd_buf_ptr = 1273 htole64(txsegs[seg].ds_addr); 1274 KASSERT(txsegs[seg].ds_len < 1275 CAS_TD_BUF_LEN_MASK >> CAS_TD_BUF_LEN_SHFT, 1276 ("%s: segment size too large!", __func__)); 1277 sc->sc_txdescs[nexttx].cd_flags = 1278 htole64(txsegs[seg].ds_len << CAS_TD_BUF_LEN_SHFT); 1279 txs->txs_lastdesc = nexttx; 1280 } 1281 1282 /* Set EOF on the last descriptor. */ 1283 #ifdef CAS_DEBUG 1284 CTR3(KTR_CAS, "%s: end of frame at segment %d, TX %d", 1285 __func__, seg, nexttx); 1286 #endif 1287 sc->sc_txdescs[txs->txs_lastdesc].cd_flags |= 1288 htole64(CAS_TD_END_OF_FRAME); 1289 1290 /* Lastly set SOF on the first descriptor. */ 1291 #ifdef CAS_DEBUG 1292 CTR3(KTR_CAS, "%s: start of frame at segment %d, TX %d", 1293 __func__, seg, nexttx); 1294 #endif 1295 if (sc->sc_txwin += nsegs > CAS_MAXTXFREE * 2 / 3) { 1296 sc->sc_txwin = 0; 1297 sc->sc_txdescs[txs->txs_firstdesc].cd_flags |= 1298 htole64(cflags | CAS_TD_START_OF_FRAME | CAS_TD_INT_ME); 1299 } else 1300 sc->sc_txdescs[txs->txs_firstdesc].cd_flags |= 1301 htole64(cflags | CAS_TD_START_OF_FRAME); 1302 1303 /* Sync the DMA map. */ 1304 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1305 BUS_DMASYNC_PREWRITE); 1306 1307 #ifdef CAS_DEBUG 1308 CTR4(KTR_CAS, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d", 1309 __func__, txs->txs_firstdesc, txs->txs_lastdesc, 1310 txs->txs_ndescs); 1311 #endif 1312 STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 1313 STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 1314 txs->txs_mbuf = *m_head; 1315 1316 sc->sc_txnext = CAS_NEXTTX(txs->txs_lastdesc); 1317 sc->sc_txfree -= txs->txs_ndescs; 1318 1319 return (0); 1320 } 1321 1322 static void 1323 cas_init_regs(struct cas_softc *sc) 1324 { 1325 int i; 1326 const u_char *laddr = if_getlladdr(sc->sc_ifp); 1327 1328 CAS_LOCK_ASSERT(sc, MA_OWNED); 1329 1330 /* These registers are not cleared on reset. */ 1331 if ((sc->sc_flags & CAS_INITED) == 0) { 1332 /* magic values */ 1333 CAS_WRITE_4(sc, CAS_MAC_IPG0, 0); 1334 CAS_WRITE_4(sc, CAS_MAC_IPG1, 8); 1335 CAS_WRITE_4(sc, CAS_MAC_IPG2, 4); 1336 1337 /* min frame length */ 1338 CAS_WRITE_4(sc, CAS_MAC_MIN_FRAME, ETHER_MIN_LEN); 1339 /* max frame length and max burst size */ 1340 CAS_WRITE_4(sc, CAS_MAC_MAX_BF, 1341 ((ETHER_MAX_LEN_JUMBO + ETHER_VLAN_ENCAP_LEN) << 1342 CAS_MAC_MAX_BF_FRM_SHFT) | 1343 (0x2000 << CAS_MAC_MAX_BF_BST_SHFT)); 1344 1345 /* more magic values */ 1346 CAS_WRITE_4(sc, CAS_MAC_PREAMBLE_LEN, 0x7); 1347 CAS_WRITE_4(sc, CAS_MAC_JAM_SIZE, 0x4); 1348 CAS_WRITE_4(sc, CAS_MAC_ATTEMPT_LIMIT, 0x10); 1349 CAS_WRITE_4(sc, CAS_MAC_CTRL_TYPE, 0x8808); 1350 1351 /* random number seed */ 1352 CAS_WRITE_4(sc, CAS_MAC_RANDOM_SEED, 1353 ((laddr[5] << 8) | laddr[4]) & 0x3ff); 1354 1355 /* secondary MAC addresses: 0:0:0:0:0:0 */ 1356 for (i = CAS_MAC_ADDR3; i <= CAS_MAC_ADDR41; 1357 i += CAS_MAC_ADDR4 - CAS_MAC_ADDR3) 1358 CAS_WRITE_4(sc, i, 0); 1359 1360 /* MAC control address: 01:80:c2:00:00:01 */ 1361 CAS_WRITE_4(sc, CAS_MAC_ADDR42, 0x0001); 1362 CAS_WRITE_4(sc, CAS_MAC_ADDR43, 0xc200); 1363 CAS_WRITE_4(sc, CAS_MAC_ADDR44, 0x0180); 1364 1365 /* MAC filter address: 0:0:0:0:0:0 */ 1366 CAS_WRITE_4(sc, CAS_MAC_AFILTER0, 0); 1367 CAS_WRITE_4(sc, CAS_MAC_AFILTER1, 0); 1368 CAS_WRITE_4(sc, CAS_MAC_AFILTER2, 0); 1369 CAS_WRITE_4(sc, CAS_MAC_AFILTER_MASK1_2, 0); 1370 CAS_WRITE_4(sc, CAS_MAC_AFILTER_MASK0, 0); 1371 1372 /* Zero the hash table. */ 1373 for (i = CAS_MAC_HASH0; i <= CAS_MAC_HASH15; 1374 i += CAS_MAC_HASH1 - CAS_MAC_HASH0) 1375 CAS_WRITE_4(sc, i, 0); 1376 1377 sc->sc_flags |= CAS_INITED; 1378 } 1379 1380 /* Counters need to be zeroed. */ 1381 CAS_WRITE_4(sc, CAS_MAC_NORM_COLL_CNT, 0); 1382 CAS_WRITE_4(sc, CAS_MAC_FIRST_COLL_CNT, 0); 1383 CAS_WRITE_4(sc, CAS_MAC_EXCESS_COLL_CNT, 0); 1384 CAS_WRITE_4(sc, CAS_MAC_LATE_COLL_CNT, 0); 1385 CAS_WRITE_4(sc, CAS_MAC_DEFER_TMR_CNT, 0); 1386 CAS_WRITE_4(sc, CAS_MAC_PEAK_ATTEMPTS, 0); 1387 CAS_WRITE_4(sc, CAS_MAC_RX_FRAME_COUNT, 0); 1388 CAS_WRITE_4(sc, CAS_MAC_RX_LEN_ERR_CNT, 0); 1389 CAS_WRITE_4(sc, CAS_MAC_RX_ALIGN_ERR, 0); 1390 CAS_WRITE_4(sc, CAS_MAC_RX_CRC_ERR_CNT, 0); 1391 CAS_WRITE_4(sc, CAS_MAC_RX_CODE_VIOL, 0); 1392 1393 /* Set XOFF PAUSE time. */ 1394 CAS_WRITE_4(sc, CAS_MAC_SPC, 0x1BF0 << CAS_MAC_SPC_TIME_SHFT); 1395 1396 /* Set the station address. */ 1397 CAS_WRITE_4(sc, CAS_MAC_ADDR0, (laddr[4] << 8) | laddr[5]); 1398 CAS_WRITE_4(sc, CAS_MAC_ADDR1, (laddr[2] << 8) | laddr[3]); 1399 CAS_WRITE_4(sc, CAS_MAC_ADDR2, (laddr[0] << 8) | laddr[1]); 1400 1401 /* Enable MII outputs. */ 1402 CAS_WRITE_4(sc, CAS_MAC_XIF_CONF, CAS_MAC_XIF_CONF_TX_OE); 1403 } 1404 1405 static void 1406 cas_tx_task(void *arg, int pending __unused) 1407 { 1408 if_t ifp; 1409 1410 ifp = (if_t)arg; 1411 cas_start(ifp); 1412 } 1413 1414 static inline void 1415 cas_txkick(struct cas_softc *sc) 1416 { 1417 1418 /* 1419 * Update the TX kick register. This register has to point to the 1420 * descriptor after the last valid one and for optimum performance 1421 * should be incremented in multiples of 4 (the DMA engine fetches/ 1422 * updates descriptors in batches of 4). 1423 */ 1424 #ifdef CAS_DEBUG 1425 CTR3(KTR_CAS, "%s: %s: kicking TX %d", 1426 device_get_name(sc->sc_dev), __func__, sc->sc_txnext); 1427 #endif 1428 CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1429 CAS_WRITE_4(sc, CAS_TX_KICK3, sc->sc_txnext); 1430 } 1431 1432 static void 1433 cas_start(if_t ifp) 1434 { 1435 struct cas_softc *sc = if_getsoftc(ifp); 1436 struct mbuf *m; 1437 int kicked, ntx; 1438 1439 CAS_LOCK(sc); 1440 1441 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1442 IFF_DRV_RUNNING || (sc->sc_flags & CAS_LINK) == 0) { 1443 CAS_UNLOCK(sc); 1444 return; 1445 } 1446 1447 if (sc->sc_txfree < CAS_MAXTXFREE / 4) 1448 cas_tint(sc); 1449 1450 #ifdef CAS_DEBUG 1451 CTR4(KTR_CAS, "%s: %s: txfree %d, txnext %d", 1452 device_get_name(sc->sc_dev), __func__, sc->sc_txfree, 1453 sc->sc_txnext); 1454 #endif 1455 ntx = 0; 1456 kicked = 0; 1457 for (; !if_sendq_empty(ifp) && sc->sc_txfree > 1;) { 1458 m = if_dequeue(ifp); 1459 if (m == NULL) 1460 break; 1461 if (cas_load_txmbuf(sc, &m) != 0) { 1462 if (m == NULL) 1463 break; 1464 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1465 if_sendq_prepend(ifp, m); 1466 break; 1467 } 1468 if ((sc->sc_txnext % 4) == 0) { 1469 cas_txkick(sc); 1470 kicked = 1; 1471 } else 1472 kicked = 0; 1473 ntx++; 1474 BPF_MTAP(ifp, m); 1475 } 1476 1477 if (ntx > 0) { 1478 if (kicked == 0) 1479 cas_txkick(sc); 1480 #ifdef CAS_DEBUG 1481 CTR2(KTR_CAS, "%s: packets enqueued, OWN on %d", 1482 device_get_name(sc->sc_dev), sc->sc_txnext); 1483 #endif 1484 1485 /* Set a watchdog timer in case the chip flakes out. */ 1486 sc->sc_wdog_timer = 5; 1487 #ifdef CAS_DEBUG 1488 CTR3(KTR_CAS, "%s: %s: watchdog %d", 1489 device_get_name(sc->sc_dev), __func__, 1490 sc->sc_wdog_timer); 1491 #endif 1492 } 1493 1494 CAS_UNLOCK(sc); 1495 } 1496 1497 static void 1498 cas_tint(struct cas_softc *sc) 1499 { 1500 if_t ifp = sc->sc_ifp; 1501 struct cas_txsoft *txs; 1502 int progress; 1503 uint32_t txlast; 1504 #ifdef CAS_DEBUG 1505 int i; 1506 1507 CAS_LOCK_ASSERT(sc, MA_OWNED); 1508 1509 CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__); 1510 #endif 1511 1512 /* 1513 * Go through our TX list and free mbufs for those 1514 * frames that have been transmitted. 1515 */ 1516 progress = 0; 1517 CAS_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 1518 while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1519 #ifdef CAS_DEBUG 1520 if ((if_getflags(ifp) & IFF_DEBUG) != 0) { 1521 printf(" txsoft %p transmit chain:\n", txs); 1522 for (i = txs->txs_firstdesc;; i = CAS_NEXTTX(i)) { 1523 printf("descriptor %d: ", i); 1524 printf("cd_flags: 0x%016llx\t", 1525 (long long)le64toh( 1526 sc->sc_txdescs[i].cd_flags)); 1527 printf("cd_buf_ptr: 0x%016llx\n", 1528 (long long)le64toh( 1529 sc->sc_txdescs[i].cd_buf_ptr)); 1530 if (i == txs->txs_lastdesc) 1531 break; 1532 } 1533 } 1534 #endif 1535 1536 /* 1537 * In theory, we could harvest some descriptors before 1538 * the ring is empty, but that's a bit complicated. 1539 * 1540 * CAS_TX_COMPn points to the last descriptor 1541 * processed + 1. 1542 */ 1543 txlast = CAS_READ_4(sc, CAS_TX_COMP3); 1544 #ifdef CAS_DEBUG 1545 CTR4(KTR_CAS, "%s: txs->txs_firstdesc = %d, " 1546 "txs->txs_lastdesc = %d, txlast = %d", 1547 __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast); 1548 #endif 1549 if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1550 if ((txlast >= txs->txs_firstdesc) && 1551 (txlast <= txs->txs_lastdesc)) 1552 break; 1553 } else { 1554 /* Ick -- this command wraps. */ 1555 if ((txlast >= txs->txs_firstdesc) || 1556 (txlast <= txs->txs_lastdesc)) 1557 break; 1558 } 1559 1560 #ifdef CAS_DEBUG 1561 CTR1(KTR_CAS, "%s: releasing a descriptor", __func__); 1562 #endif 1563 STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1564 1565 sc->sc_txfree += txs->txs_ndescs; 1566 1567 bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 1568 BUS_DMASYNC_POSTWRITE); 1569 bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 1570 if (txs->txs_mbuf != NULL) { 1571 m_freem(txs->txs_mbuf); 1572 txs->txs_mbuf = NULL; 1573 } 1574 1575 STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1576 1577 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1578 progress = 1; 1579 } 1580 1581 #ifdef CAS_DEBUG 1582 CTR5(KTR_CAS, "%s: CAS_TX_SM1 %x CAS_TX_SM2 %x CAS_TX_DESC_BASE %llx " 1583 "CAS_TX_COMP3 %x", 1584 __func__, CAS_READ_4(sc, CAS_TX_SM1), CAS_READ_4(sc, CAS_TX_SM2), 1585 ((long long)CAS_READ_4(sc, CAS_TX_DESC3_BASE_HI) << 32) | 1586 CAS_READ_4(sc, CAS_TX_DESC3_BASE_LO), 1587 CAS_READ_4(sc, CAS_TX_COMP3)); 1588 #endif 1589 1590 if (progress) { 1591 /* We freed some descriptors, so reset IFF_DRV_OACTIVE. */ 1592 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1593 if (STAILQ_EMPTY(&sc->sc_txdirtyq)) 1594 sc->sc_wdog_timer = 0; 1595 } 1596 1597 #ifdef CAS_DEBUG 1598 CTR3(KTR_CAS, "%s: %s: watchdog %d", 1599 device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer); 1600 #endif 1601 } 1602 1603 static void 1604 cas_rint_timeout(void *arg) 1605 { 1606 struct epoch_tracker et; 1607 struct cas_softc *sc = arg; 1608 1609 CAS_LOCK_ASSERT(sc, MA_OWNED); 1610 1611 NET_EPOCH_ENTER(et); 1612 cas_rint(sc); 1613 NET_EPOCH_EXIT(et); 1614 } 1615 1616 static void 1617 cas_rint(struct cas_softc *sc) 1618 { 1619 struct cas_rxdsoft *rxds, *rxds2; 1620 if_t ifp = sc->sc_ifp; 1621 struct mbuf *m, *m2; 1622 uint64_t word1, word2, word3 __unused, word4; 1623 uint32_t rxhead; 1624 u_int idx, idx2, len, off, skip; 1625 1626 CAS_LOCK_ASSERT(sc, MA_OWNED); 1627 1628 callout_stop(&sc->sc_rx_ch); 1629 1630 #ifdef CAS_DEBUG 1631 CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__); 1632 #endif 1633 1634 #define PRINTWORD(n, delimiter) \ 1635 printf("word ## n: 0x%016llx%c", (long long)word ## n, delimiter) 1636 1637 #define SKIPASSERT(n) \ 1638 KASSERT(sc->sc_rxcomps[sc->sc_rxcptr].crc_word ## n == 0, \ 1639 ("%s: word ## n not 0", __func__)) 1640 1641 #define WORDTOH(n) \ 1642 word ## n = le64toh(sc->sc_rxcomps[sc->sc_rxcptr].crc_word ## n) 1643 1644 /* 1645 * Read the completion head register once. This limits 1646 * how long the following loop can execute. 1647 */ 1648 rxhead = CAS_READ_4(sc, CAS_RX_COMP_HEAD); 1649 #ifdef CAS_DEBUG 1650 CTR4(KTR_CAS, "%s: sc->sc_rxcptr %d, sc->sc_rxdptr %d, head %d", 1651 __func__, sc->sc_rxcptr, sc->sc_rxdptr, rxhead); 1652 #endif 1653 skip = 0; 1654 CAS_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1655 for (; sc->sc_rxcptr != rxhead; 1656 sc->sc_rxcptr = CAS_NEXTRXCOMP(sc->sc_rxcptr)) { 1657 if (skip != 0) { 1658 SKIPASSERT(1); 1659 SKIPASSERT(2); 1660 SKIPASSERT(3); 1661 1662 --skip; 1663 goto skip; 1664 } 1665 1666 WORDTOH(1); 1667 WORDTOH(2); 1668 WORDTOH(3); 1669 WORDTOH(4); 1670 1671 #ifdef CAS_DEBUG 1672 if ((if_getflags(ifp) & IFF_DEBUG) != 0) { 1673 printf(" completion %d: ", sc->sc_rxcptr); 1674 PRINTWORD(1, '\t'); 1675 PRINTWORD(2, '\t'); 1676 PRINTWORD(3, '\t'); 1677 PRINTWORD(4, '\n'); 1678 } 1679 #endif 1680 1681 if (__predict_false( 1682 (word1 & CAS_RC1_TYPE_MASK) == CAS_RC1_TYPE_HW || 1683 (word4 & CAS_RC4_ZERO) != 0)) { 1684 /* 1685 * The descriptor is still marked as owned, although 1686 * it is supposed to have completed. This has been 1687 * observed on some machines. Just exiting here 1688 * might leave the packet sitting around until another 1689 * one arrives to trigger a new interrupt, which is 1690 * generally undesirable, so set up a timeout. 1691 */ 1692 callout_reset(&sc->sc_rx_ch, CAS_RXOWN_TICKS, 1693 cas_rint_timeout, sc); 1694 break; 1695 } 1696 1697 if (__predict_false( 1698 (word4 & (CAS_RC4_BAD | CAS_RC4_LEN_MMATCH)) != 0)) { 1699 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1700 device_printf(sc->sc_dev, 1701 "receive error: CRC error\n"); 1702 continue; 1703 } 1704 1705 KASSERT(CAS_GET(word1, CAS_RC1_DATA_SIZE) == 0 || 1706 CAS_GET(word2, CAS_RC2_HDR_SIZE) == 0, 1707 ("%s: data and header present", __func__)); 1708 KASSERT((word1 & CAS_RC1_SPLIT_PKT) == 0 || 1709 CAS_GET(word2, CAS_RC2_HDR_SIZE) == 0, 1710 ("%s: split and header present", __func__)); 1711 KASSERT(CAS_GET(word1, CAS_RC1_DATA_SIZE) == 0 || 1712 (word1 & CAS_RC1_RELEASE_HDR) == 0, 1713 ("%s: data present but header release", __func__)); 1714 KASSERT(CAS_GET(word2, CAS_RC2_HDR_SIZE) == 0 || 1715 (word1 & CAS_RC1_RELEASE_DATA) == 0, 1716 ("%s: header present but data release", __func__)); 1717 1718 if ((len = CAS_GET(word2, CAS_RC2_HDR_SIZE)) != 0) { 1719 idx = CAS_GET(word2, CAS_RC2_HDR_INDEX); 1720 off = CAS_GET(word2, CAS_RC2_HDR_OFF); 1721 #ifdef CAS_DEBUG 1722 CTR4(KTR_CAS, "%s: hdr at idx %d, off %d, len %d", 1723 __func__, idx, off, len); 1724 #endif 1725 rxds = &sc->sc_rxdsoft[idx]; 1726 MGETHDR(m, M_NOWAIT, MT_DATA); 1727 if (m != NULL) { 1728 refcount_acquire(&rxds->rxds_refcount); 1729 bus_dmamap_sync(sc->sc_rdmatag, 1730 rxds->rxds_dmamap, BUS_DMASYNC_POSTREAD); 1731 m_extadd(m, (char *)rxds->rxds_buf + 1732 off * 256 + ETHER_ALIGN, len, cas_free, 1733 sc, (void *)(uintptr_t)idx, 1734 M_RDONLY, EXT_NET_DRV); 1735 if ((m->m_flags & M_EXT) == 0) { 1736 m_freem(m); 1737 m = NULL; 1738 } 1739 } 1740 if (m != NULL) { 1741 m->m_pkthdr.rcvif = ifp; 1742 m->m_pkthdr.len = m->m_len = len; 1743 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1744 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) 1745 cas_rxcksum(m, CAS_GET(word4, 1746 CAS_RC4_TCP_CSUM)); 1747 /* Pass it on. */ 1748 CAS_UNLOCK(sc); 1749 if_input(ifp, m); 1750 CAS_LOCK(sc); 1751 } else 1752 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1753 1754 if ((word1 & CAS_RC1_RELEASE_HDR) != 0 && 1755 refcount_release(&rxds->rxds_refcount) != 0) 1756 cas_add_rxdesc(sc, idx); 1757 } else if ((len = CAS_GET(word1, CAS_RC1_DATA_SIZE)) != 0) { 1758 idx = CAS_GET(word1, CAS_RC1_DATA_INDEX); 1759 off = CAS_GET(word1, CAS_RC1_DATA_OFF); 1760 #ifdef CAS_DEBUG 1761 CTR4(KTR_CAS, "%s: data at idx %d, off %d, len %d", 1762 __func__, idx, off, len); 1763 #endif 1764 rxds = &sc->sc_rxdsoft[idx]; 1765 MGETHDR(m, M_NOWAIT, MT_DATA); 1766 if (m != NULL) { 1767 refcount_acquire(&rxds->rxds_refcount); 1768 off += ETHER_ALIGN; 1769 m->m_len = min(CAS_PAGE_SIZE - off, len); 1770 bus_dmamap_sync(sc->sc_rdmatag, 1771 rxds->rxds_dmamap, BUS_DMASYNC_POSTREAD); 1772 m_extadd(m, (char *)rxds->rxds_buf + off, 1773 m->m_len, cas_free, sc, 1774 (void *)(uintptr_t)idx, M_RDONLY, 1775 EXT_NET_DRV); 1776 if ((m->m_flags & M_EXT) == 0) { 1777 m_freem(m); 1778 m = NULL; 1779 } 1780 } 1781 idx2 = 0; 1782 m2 = NULL; 1783 rxds2 = NULL; 1784 if ((word1 & CAS_RC1_SPLIT_PKT) != 0) { 1785 KASSERT((word1 & CAS_RC1_RELEASE_NEXT) != 0, 1786 ("%s: split but no release next", 1787 __func__)); 1788 1789 idx2 = CAS_GET(word2, CAS_RC2_NEXT_INDEX); 1790 #ifdef CAS_DEBUG 1791 CTR2(KTR_CAS, "%s: split at idx %d", 1792 __func__, idx2); 1793 #endif 1794 rxds2 = &sc->sc_rxdsoft[idx2]; 1795 if (m != NULL) { 1796 MGET(m2, M_NOWAIT, MT_DATA); 1797 if (m2 != NULL) { 1798 refcount_acquire( 1799 &rxds2->rxds_refcount); 1800 m2->m_len = len - m->m_len; 1801 bus_dmamap_sync( 1802 sc->sc_rdmatag, 1803 rxds2->rxds_dmamap, 1804 BUS_DMASYNC_POSTREAD); 1805 m_extadd(m2, 1806 (char *)rxds2->rxds_buf, 1807 m2->m_len, cas_free, sc, 1808 (void *)(uintptr_t)idx2, 1809 M_RDONLY, EXT_NET_DRV); 1810 if ((m2->m_flags & M_EXT) == 1811 0) { 1812 m_freem(m2); 1813 m2 = NULL; 1814 } 1815 } 1816 } 1817 if (m2 != NULL) 1818 m->m_next = m2; 1819 else if (m != NULL) { 1820 m_freem(m); 1821 m = NULL; 1822 } 1823 } 1824 if (m != NULL) { 1825 m->m_pkthdr.rcvif = ifp; 1826 m->m_pkthdr.len = len; 1827 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1828 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) 1829 cas_rxcksum(m, CAS_GET(word4, 1830 CAS_RC4_TCP_CSUM)); 1831 /* Pass it on. */ 1832 CAS_UNLOCK(sc); 1833 if_input(ifp, m); 1834 CAS_LOCK(sc); 1835 } else 1836 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1837 1838 if ((word1 & CAS_RC1_RELEASE_DATA) != 0 && 1839 refcount_release(&rxds->rxds_refcount) != 0) 1840 cas_add_rxdesc(sc, idx); 1841 if ((word1 & CAS_RC1_SPLIT_PKT) != 0 && 1842 refcount_release(&rxds2->rxds_refcount) != 0) 1843 cas_add_rxdesc(sc, idx2); 1844 } 1845 1846 skip = CAS_GET(word1, CAS_RC1_SKIP); 1847 1848 skip: 1849 cas_rxcompinit(&sc->sc_rxcomps[sc->sc_rxcptr]); 1850 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 1851 break; 1852 } 1853 CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1854 CAS_WRITE_4(sc, CAS_RX_COMP_TAIL, sc->sc_rxcptr); 1855 1856 #undef PRINTWORD 1857 #undef SKIPASSERT 1858 #undef WORDTOH 1859 1860 #ifdef CAS_DEBUG 1861 CTR4(KTR_CAS, "%s: done sc->sc_rxcptr %d, sc->sc_rxdptr %d, head %d", 1862 __func__, sc->sc_rxcptr, sc->sc_rxdptr, 1863 CAS_READ_4(sc, CAS_RX_COMP_HEAD)); 1864 #endif 1865 } 1866 1867 static void 1868 cas_free(struct mbuf *m) 1869 { 1870 struct cas_rxdsoft *rxds; 1871 struct cas_softc *sc; 1872 u_int idx, locked; 1873 1874 sc = m->m_ext.ext_arg1; 1875 idx = (uintptr_t)m->m_ext.ext_arg2; 1876 rxds = &sc->sc_rxdsoft[idx]; 1877 if (refcount_release(&rxds->rxds_refcount) == 0) 1878 return; 1879 1880 /* 1881 * NB: this function can be called via m_freem(9) within 1882 * this driver! 1883 */ 1884 if ((locked = CAS_LOCK_OWNED(sc)) == 0) 1885 CAS_LOCK(sc); 1886 cas_add_rxdesc(sc, idx); 1887 if (locked == 0) 1888 CAS_UNLOCK(sc); 1889 } 1890 1891 static inline void 1892 cas_add_rxdesc(struct cas_softc *sc, u_int idx) 1893 { 1894 1895 CAS_LOCK_ASSERT(sc, MA_OWNED); 1896 1897 bus_dmamap_sync(sc->sc_rdmatag, sc->sc_rxdsoft[idx].rxds_dmamap, 1898 BUS_DMASYNC_PREREAD); 1899 CAS_UPDATE_RXDESC(sc, sc->sc_rxdptr, idx); 1900 sc->sc_rxdptr = CAS_NEXTRXDESC(sc->sc_rxdptr); 1901 1902 /* 1903 * Update the RX kick register. This register has to point to the 1904 * descriptor after the last valid one (before the current batch) 1905 * and for optimum performance should be incremented in multiples 1906 * of 4 (the DMA engine fetches/updates descriptors in batches of 4). 1907 */ 1908 if ((sc->sc_rxdptr % 4) == 0) { 1909 CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1910 CAS_WRITE_4(sc, CAS_RX_KICK, 1911 (sc->sc_rxdptr + CAS_NRXDESC - 4) & CAS_NRXDESC_MASK); 1912 } 1913 } 1914 1915 static void 1916 cas_eint(struct cas_softc *sc, u_int status) 1917 { 1918 if_t ifp = sc->sc_ifp; 1919 1920 CAS_LOCK_ASSERT(sc, MA_OWNED); 1921 1922 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1923 1924 device_printf(sc->sc_dev, "%s: status 0x%x", __func__, status); 1925 if ((status & CAS_INTR_PCI_ERROR_INT) != 0) { 1926 status = CAS_READ_4(sc, CAS_ERROR_STATUS); 1927 printf(", PCI bus error 0x%x", status); 1928 if ((status & CAS_ERROR_OTHER) != 0) { 1929 status = pci_read_config(sc->sc_dev, PCIR_STATUS, 2); 1930 printf(", PCI status 0x%x", status); 1931 pci_write_config(sc->sc_dev, PCIR_STATUS, status, 2); 1932 } 1933 } 1934 printf("\n"); 1935 1936 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1937 cas_init_locked(sc); 1938 if (!if_sendq_empty(ifp)) 1939 taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task); 1940 } 1941 1942 static int 1943 cas_intr(void *v) 1944 { 1945 struct cas_softc *sc = v; 1946 1947 if (__predict_false((CAS_READ_4(sc, CAS_STATUS_ALIAS) & 1948 CAS_INTR_SUMMARY) == 0)) 1949 return (FILTER_STRAY); 1950 1951 /* Disable interrupts. */ 1952 CAS_WRITE_4(sc, CAS_INTMASK, 0xffffffff); 1953 taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task); 1954 1955 return (FILTER_HANDLED); 1956 } 1957 1958 static void 1959 cas_intr_task(void *arg, int pending __unused) 1960 { 1961 struct cas_softc *sc = arg; 1962 if_t ifp = sc->sc_ifp; 1963 uint32_t status, status2; 1964 1965 CAS_LOCK_ASSERT(sc, MA_NOTOWNED); 1966 1967 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 1968 return; 1969 1970 status = CAS_READ_4(sc, CAS_STATUS); 1971 if (__predict_false((status & CAS_INTR_SUMMARY) == 0)) 1972 goto done; 1973 1974 CAS_LOCK(sc); 1975 #ifdef CAS_DEBUG 1976 CTR4(KTR_CAS, "%s: %s: cplt %x, status %x", 1977 device_get_name(sc->sc_dev), __func__, 1978 (status >> CAS_STATUS_TX_COMP3_SHFT), (u_int)status); 1979 1980 /* 1981 * PCS interrupts must be cleared, otherwise no traffic is passed! 1982 */ 1983 if ((status & CAS_INTR_PCS_INT) != 0) { 1984 status2 = 1985 CAS_READ_4(sc, CAS_PCS_INTR_STATUS) | 1986 CAS_READ_4(sc, CAS_PCS_INTR_STATUS); 1987 if ((status2 & CAS_PCS_INTR_LINK) != 0) 1988 device_printf(sc->sc_dev, 1989 "%s: PCS link status changed\n", __func__); 1990 } 1991 if ((status & CAS_MAC_CTRL_STATUS) != 0) { 1992 status2 = CAS_READ_4(sc, CAS_MAC_CTRL_STATUS); 1993 if ((status2 & CAS_MAC_CTRL_PAUSE) != 0) 1994 device_printf(sc->sc_dev, 1995 "%s: PAUSE received (PAUSE time %d slots)\n", 1996 __func__, 1997 (status2 & CAS_MAC_CTRL_STATUS_PT_MASK) >> 1998 CAS_MAC_CTRL_STATUS_PT_SHFT); 1999 if ((status2 & CAS_MAC_CTRL_PAUSE) != 0) 2000 device_printf(sc->sc_dev, 2001 "%s: transited to PAUSE state\n", __func__); 2002 if ((status2 & CAS_MAC_CTRL_NON_PAUSE) != 0) 2003 device_printf(sc->sc_dev, 2004 "%s: transited to non-PAUSE state\n", __func__); 2005 } 2006 if ((status & CAS_INTR_MIF) != 0) 2007 device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__); 2008 #endif 2009 2010 if (__predict_false((status & 2011 (CAS_INTR_TX_TAG_ERR | CAS_INTR_RX_TAG_ERR | 2012 CAS_INTR_RX_LEN_MMATCH | CAS_INTR_PCI_ERROR_INT)) != 0)) { 2013 cas_eint(sc, status); 2014 CAS_UNLOCK(sc); 2015 return; 2016 } 2017 2018 if (__predict_false(status & CAS_INTR_TX_MAC_INT)) { 2019 status2 = CAS_READ_4(sc, CAS_MAC_TX_STATUS); 2020 if ((status2 & 2021 (CAS_MAC_TX_UNDERRUN | CAS_MAC_TX_MAX_PKT_ERR)) != 0) 2022 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2023 else if ((status2 & ~CAS_MAC_TX_FRAME_XMTD) != 0) 2024 device_printf(sc->sc_dev, 2025 "MAC TX fault, status %x\n", status2); 2026 } 2027 2028 if (__predict_false(status & CAS_INTR_RX_MAC_INT)) { 2029 status2 = CAS_READ_4(sc, CAS_MAC_RX_STATUS); 2030 if ((status2 & CAS_MAC_RX_OVERFLOW) != 0) 2031 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 2032 else if ((status2 & ~CAS_MAC_RX_FRAME_RCVD) != 0) 2033 device_printf(sc->sc_dev, 2034 "MAC RX fault, status %x\n", status2); 2035 } 2036 2037 if ((status & 2038 (CAS_INTR_RX_DONE | CAS_INTR_RX_BUF_NA | CAS_INTR_RX_COMP_FULL | 2039 CAS_INTR_RX_BUF_AEMPTY | CAS_INTR_RX_COMP_AFULL)) != 0) { 2040 cas_rint(sc); 2041 #ifdef CAS_DEBUG 2042 if (__predict_false((status & 2043 (CAS_INTR_RX_BUF_NA | CAS_INTR_RX_COMP_FULL | 2044 CAS_INTR_RX_BUF_AEMPTY | CAS_INTR_RX_COMP_AFULL)) != 0)) 2045 device_printf(sc->sc_dev, 2046 "RX fault, status %x\n", status); 2047 #endif 2048 } 2049 2050 if ((status & 2051 (CAS_INTR_TX_INT_ME | CAS_INTR_TX_ALL | CAS_INTR_TX_DONE)) != 0) 2052 cas_tint(sc); 2053 2054 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { 2055 CAS_UNLOCK(sc); 2056 return; 2057 } else if (!if_sendq_empty(ifp)) 2058 taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task); 2059 CAS_UNLOCK(sc); 2060 2061 status = CAS_READ_4(sc, CAS_STATUS_ALIAS); 2062 if (__predict_false((status & CAS_INTR_SUMMARY) != 0)) { 2063 taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task); 2064 return; 2065 } 2066 2067 done: 2068 /* Re-enable interrupts. */ 2069 CAS_WRITE_4(sc, CAS_INTMASK, 2070 ~(CAS_INTR_TX_INT_ME | CAS_INTR_TX_TAG_ERR | 2071 CAS_INTR_RX_DONE | CAS_INTR_RX_BUF_NA | CAS_INTR_RX_TAG_ERR | 2072 CAS_INTR_RX_COMP_FULL | CAS_INTR_RX_BUF_AEMPTY | 2073 CAS_INTR_RX_COMP_AFULL | CAS_INTR_RX_LEN_MMATCH | 2074 CAS_INTR_PCI_ERROR_INT 2075 #ifdef CAS_DEBUG 2076 | CAS_INTR_PCS_INT | CAS_INTR_MIF 2077 #endif 2078 )); 2079 } 2080 2081 static void 2082 cas_watchdog(struct cas_softc *sc) 2083 { 2084 if_t ifp = sc->sc_ifp; 2085 2086 CAS_LOCK_ASSERT(sc, MA_OWNED); 2087 2088 #ifdef CAS_DEBUG 2089 CTR4(KTR_CAS, 2090 "%s: CAS_RX_CONF %x CAS_MAC_RX_STATUS %x CAS_MAC_RX_CONF %x", 2091 __func__, CAS_READ_4(sc, CAS_RX_CONF), 2092 CAS_READ_4(sc, CAS_MAC_RX_STATUS), 2093 CAS_READ_4(sc, CAS_MAC_RX_CONF)); 2094 CTR4(KTR_CAS, 2095 "%s: CAS_TX_CONF %x CAS_MAC_TX_STATUS %x CAS_MAC_TX_CONF %x", 2096 __func__, CAS_READ_4(sc, CAS_TX_CONF), 2097 CAS_READ_4(sc, CAS_MAC_TX_STATUS), 2098 CAS_READ_4(sc, CAS_MAC_TX_CONF)); 2099 #endif 2100 2101 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) 2102 return; 2103 2104 if ((sc->sc_flags & CAS_LINK) != 0) 2105 device_printf(sc->sc_dev, "device timeout\n"); 2106 else if (bootverbose) 2107 device_printf(sc->sc_dev, "device timeout (no link)\n"); 2108 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2109 2110 /* Try to get more packets going. */ 2111 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 2112 cas_init_locked(sc); 2113 if (!if_sendq_empty(ifp)) 2114 taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task); 2115 } 2116 2117 static void 2118 cas_mifinit(struct cas_softc *sc) 2119 { 2120 2121 /* Configure the MIF in frame mode. */ 2122 CAS_WRITE_4(sc, CAS_MIF_CONF, 2123 CAS_READ_4(sc, CAS_MIF_CONF) & ~CAS_MIF_CONF_BB_MODE); 2124 CAS_BARRIER(sc, CAS_MIF_CONF, 4, 2125 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2126 } 2127 2128 /* 2129 * MII interface 2130 * 2131 * The MII interface supports at least three different operating modes: 2132 * 2133 * Bitbang mode is implemented using data, clock and output enable registers. 2134 * 2135 * Frame mode is implemented by loading a complete frame into the frame 2136 * register and polling the valid bit for completion. 2137 * 2138 * Polling mode uses the frame register but completion is indicated by 2139 * an interrupt. 2140 * 2141 */ 2142 static int 2143 cas_mii_readreg(device_t dev, int phy, int reg) 2144 { 2145 struct cas_softc *sc; 2146 int n; 2147 uint32_t v; 2148 2149 #ifdef CAS_DEBUG_PHY 2150 printf("%s: phy %d reg %d\n", __func__, phy, reg); 2151 #endif 2152 2153 sc = device_get_softc(dev); 2154 if ((sc->sc_flags & CAS_SERDES) != 0) { 2155 switch (reg) { 2156 case MII_BMCR: 2157 reg = CAS_PCS_CTRL; 2158 break; 2159 case MII_BMSR: 2160 reg = CAS_PCS_STATUS; 2161 break; 2162 case MII_PHYIDR1: 2163 case MII_PHYIDR2: 2164 return (0); 2165 case MII_ANAR: 2166 reg = CAS_PCS_ANAR; 2167 break; 2168 case MII_ANLPAR: 2169 reg = CAS_PCS_ANLPAR; 2170 break; 2171 case MII_EXTSR: 2172 return (EXTSR_1000XFDX | EXTSR_1000XHDX); 2173 default: 2174 device_printf(sc->sc_dev, 2175 "%s: unhandled register %d\n", __func__, reg); 2176 return (0); 2177 } 2178 return (CAS_READ_4(sc, reg)); 2179 } 2180 2181 /* Construct the frame command. */ 2182 v = CAS_MIF_FRAME_READ | 2183 (phy << CAS_MIF_FRAME_PHY_SHFT) | 2184 (reg << CAS_MIF_FRAME_REG_SHFT); 2185 2186 CAS_WRITE_4(sc, CAS_MIF_FRAME, v); 2187 CAS_BARRIER(sc, CAS_MIF_FRAME, 4, 2188 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2189 for (n = 0; n < 100; n++) { 2190 DELAY(1); 2191 v = CAS_READ_4(sc, CAS_MIF_FRAME); 2192 if (v & CAS_MIF_FRAME_TA_LSB) 2193 return (v & CAS_MIF_FRAME_DATA); 2194 } 2195 2196 device_printf(sc->sc_dev, "%s: timed out\n", __func__); 2197 return (0); 2198 } 2199 2200 static int 2201 cas_mii_writereg(device_t dev, int phy, int reg, int val) 2202 { 2203 struct cas_softc *sc; 2204 int n; 2205 uint32_t v; 2206 2207 #ifdef CAS_DEBUG_PHY 2208 printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__); 2209 #endif 2210 2211 sc = device_get_softc(dev); 2212 if ((sc->sc_flags & CAS_SERDES) != 0) { 2213 switch (reg) { 2214 case MII_BMSR: 2215 reg = CAS_PCS_STATUS; 2216 break; 2217 case MII_BMCR: 2218 reg = CAS_PCS_CTRL; 2219 if ((val & CAS_PCS_CTRL_RESET) == 0) 2220 break; 2221 CAS_WRITE_4(sc, CAS_PCS_CTRL, val); 2222 CAS_BARRIER(sc, CAS_PCS_CTRL, 4, 2223 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2224 if (!cas_bitwait(sc, CAS_PCS_CTRL, 2225 CAS_PCS_CTRL_RESET, 0)) 2226 device_printf(sc->sc_dev, 2227 "cannot reset PCS\n"); 2228 /* FALLTHROUGH */ 2229 case MII_ANAR: 2230 CAS_WRITE_4(sc, CAS_PCS_CONF, 0); 2231 CAS_BARRIER(sc, CAS_PCS_CONF, 4, 2232 BUS_SPACE_BARRIER_WRITE); 2233 CAS_WRITE_4(sc, CAS_PCS_ANAR, val); 2234 CAS_BARRIER(sc, CAS_PCS_ANAR, 4, 2235 BUS_SPACE_BARRIER_WRITE); 2236 CAS_WRITE_4(sc, CAS_PCS_SERDES_CTRL, 2237 CAS_PCS_SERDES_CTRL_ESD); 2238 CAS_BARRIER(sc, CAS_PCS_CONF, 4, 2239 BUS_SPACE_BARRIER_WRITE); 2240 CAS_WRITE_4(sc, CAS_PCS_CONF, 2241 CAS_PCS_CONF_EN); 2242 CAS_BARRIER(sc, CAS_PCS_CONF, 4, 2243 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2244 return (0); 2245 case MII_ANLPAR: 2246 reg = CAS_PCS_ANLPAR; 2247 break; 2248 default: 2249 device_printf(sc->sc_dev, 2250 "%s: unhandled register %d\n", __func__, reg); 2251 return (0); 2252 } 2253 CAS_WRITE_4(sc, reg, val); 2254 CAS_BARRIER(sc, reg, 4, 2255 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2256 return (0); 2257 } 2258 2259 /* Construct the frame command. */ 2260 v = CAS_MIF_FRAME_WRITE | 2261 (phy << CAS_MIF_FRAME_PHY_SHFT) | 2262 (reg << CAS_MIF_FRAME_REG_SHFT) | 2263 (val & CAS_MIF_FRAME_DATA); 2264 2265 CAS_WRITE_4(sc, CAS_MIF_FRAME, v); 2266 CAS_BARRIER(sc, CAS_MIF_FRAME, 4, 2267 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2268 for (n = 0; n < 100; n++) { 2269 DELAY(1); 2270 v = CAS_READ_4(sc, CAS_MIF_FRAME); 2271 if (v & CAS_MIF_FRAME_TA_LSB) 2272 return (1); 2273 } 2274 2275 device_printf(sc->sc_dev, "%s: timed out\n", __func__); 2276 return (0); 2277 } 2278 2279 static void 2280 cas_mii_statchg(device_t dev) 2281 { 2282 struct cas_softc *sc; 2283 if_t ifp; 2284 int gigabit; 2285 uint32_t rxcfg, txcfg, v; 2286 2287 sc = device_get_softc(dev); 2288 ifp = sc->sc_ifp; 2289 2290 CAS_LOCK_ASSERT(sc, MA_OWNED); 2291 2292 #ifdef CAS_DEBUG 2293 if ((if_getflags(ifp) & IFF_DEBUG) != 0) 2294 device_printf(sc->sc_dev, "%s: status changen", __func__); 2295 #endif 2296 2297 if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 && 2298 IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE) 2299 sc->sc_flags |= CAS_LINK; 2300 else 2301 sc->sc_flags &= ~CAS_LINK; 2302 2303 switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) { 2304 case IFM_1000_SX: 2305 case IFM_1000_LX: 2306 case IFM_1000_CX: 2307 case IFM_1000_T: 2308 gigabit = 1; 2309 break; 2310 default: 2311 gigabit = 0; 2312 } 2313 2314 /* 2315 * The configuration done here corresponds to the steps F) and 2316 * G) and as far as enabling of RX and TX MAC goes also step H) 2317 * of the initialization sequence outlined in section 11.2.1 of 2318 * the Cassini+ ASIC Specification. 2319 */ 2320 2321 rxcfg = sc->sc_mac_rxcfg; 2322 rxcfg &= ~CAS_MAC_RX_CONF_CARR; 2323 txcfg = CAS_MAC_TX_CONF_EN_IPG0 | CAS_MAC_TX_CONF_NGU | 2324 CAS_MAC_TX_CONF_NGUL; 2325 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 2326 txcfg |= CAS_MAC_TX_CONF_ICARR | CAS_MAC_TX_CONF_ICOLLIS; 2327 else if (gigabit != 0) { 2328 rxcfg |= CAS_MAC_RX_CONF_CARR; 2329 txcfg |= CAS_MAC_TX_CONF_CARR; 2330 } 2331 (void)cas_disable_tx(sc); 2332 CAS_WRITE_4(sc, CAS_MAC_TX_CONF, txcfg); 2333 (void)cas_disable_rx(sc); 2334 CAS_WRITE_4(sc, CAS_MAC_RX_CONF, rxcfg); 2335 2336 v = CAS_READ_4(sc, CAS_MAC_CTRL_CONF) & 2337 ~(CAS_MAC_CTRL_CONF_TXP | CAS_MAC_CTRL_CONF_RXP); 2338 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 2339 IFM_ETH_RXPAUSE) != 0) 2340 v |= CAS_MAC_CTRL_CONF_RXP; 2341 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 2342 IFM_ETH_TXPAUSE) != 0) 2343 v |= CAS_MAC_CTRL_CONF_TXP; 2344 CAS_WRITE_4(sc, CAS_MAC_CTRL_CONF, v); 2345 2346 /* 2347 * All supported chips have a bug causing incorrect checksum 2348 * to be calculated when letting them strip the FCS in half- 2349 * duplex mode. In theory we could disable FCS stripping and 2350 * manually adjust the checksum accordingly. It seems to make 2351 * more sense to optimze for the common case and just disable 2352 * hardware checksumming in half-duplex mode though. 2353 */ 2354 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0) { 2355 if_setcapenablebit(ifp, 0, IFCAP_HWCSUM); 2356 if_sethwassist(ifp, 0); 2357 } else if ((sc->sc_flags & CAS_NO_CSUM) == 0) { 2358 if_setcapenable(ifp, if_getcapabilities(ifp)); 2359 if_sethwassist(ifp, CAS_CSUM_FEATURES); 2360 } 2361 2362 if (sc->sc_variant == CAS_SATURN) { 2363 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0) 2364 /* silicon bug workaround */ 2365 CAS_WRITE_4(sc, CAS_MAC_PREAMBLE_LEN, 0x41); 2366 else 2367 CAS_WRITE_4(sc, CAS_MAC_PREAMBLE_LEN, 0x7); 2368 } 2369 2370 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 && 2371 gigabit != 0) 2372 CAS_WRITE_4(sc, CAS_MAC_SLOT_TIME, 2373 CAS_MAC_SLOT_TIME_CARR); 2374 else 2375 CAS_WRITE_4(sc, CAS_MAC_SLOT_TIME, 2376 CAS_MAC_SLOT_TIME_NORM); 2377 2378 /* XIF Configuration */ 2379 v = CAS_MAC_XIF_CONF_TX_OE | CAS_MAC_XIF_CONF_LNKLED; 2380 if ((sc->sc_flags & CAS_SERDES) == 0) { 2381 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0) 2382 v |= CAS_MAC_XIF_CONF_NOECHO; 2383 v |= CAS_MAC_XIF_CONF_BUF_OE; 2384 } 2385 if (gigabit != 0) 2386 v |= CAS_MAC_XIF_CONF_GMII; 2387 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 2388 v |= CAS_MAC_XIF_CONF_FDXLED; 2389 CAS_WRITE_4(sc, CAS_MAC_XIF_CONF, v); 2390 2391 sc->sc_mac_rxcfg = rxcfg; 2392 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 && 2393 (sc->sc_flags & CAS_LINK) != 0) { 2394 CAS_WRITE_4(sc, CAS_MAC_TX_CONF, 2395 txcfg | CAS_MAC_TX_CONF_EN); 2396 CAS_WRITE_4(sc, CAS_MAC_RX_CONF, 2397 rxcfg | CAS_MAC_RX_CONF_EN); 2398 } 2399 } 2400 2401 static int 2402 cas_mediachange(if_t ifp) 2403 { 2404 struct cas_softc *sc = if_getsoftc(ifp); 2405 int error; 2406 2407 /* XXX add support for serial media. */ 2408 2409 CAS_LOCK(sc); 2410 error = mii_mediachg(sc->sc_mii); 2411 CAS_UNLOCK(sc); 2412 return (error); 2413 } 2414 2415 static void 2416 cas_mediastatus(if_t ifp, struct ifmediareq *ifmr) 2417 { 2418 struct cas_softc *sc = if_getsoftc(ifp); 2419 2420 CAS_LOCK(sc); 2421 if ((if_getflags(ifp) & IFF_UP) == 0) { 2422 CAS_UNLOCK(sc); 2423 return; 2424 } 2425 2426 mii_pollstat(sc->sc_mii); 2427 ifmr->ifm_active = sc->sc_mii->mii_media_active; 2428 ifmr->ifm_status = sc->sc_mii->mii_media_status; 2429 CAS_UNLOCK(sc); 2430 } 2431 2432 static int 2433 cas_ioctl(if_t ifp, u_long cmd, caddr_t data) 2434 { 2435 struct cas_softc *sc = if_getsoftc(ifp); 2436 struct ifreq *ifr = (struct ifreq *)data; 2437 int error; 2438 2439 error = 0; 2440 switch (cmd) { 2441 case SIOCSIFFLAGS: 2442 CAS_LOCK(sc); 2443 if ((if_getflags(ifp) & IFF_UP) != 0) { 2444 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 && 2445 ((if_getflags(ifp) ^ sc->sc_ifflags) & 2446 (IFF_ALLMULTI | IFF_PROMISC)) != 0) 2447 cas_setladrf(sc); 2448 else 2449 cas_init_locked(sc); 2450 } else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 2451 cas_stop(ifp); 2452 sc->sc_ifflags = if_getflags(ifp); 2453 CAS_UNLOCK(sc); 2454 break; 2455 case SIOCSIFCAP: 2456 CAS_LOCK(sc); 2457 if ((sc->sc_flags & CAS_NO_CSUM) != 0) { 2458 error = EINVAL; 2459 CAS_UNLOCK(sc); 2460 break; 2461 } 2462 if_setcapenable(ifp, ifr->ifr_reqcap); 2463 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) 2464 if_sethwassist(ifp, CAS_CSUM_FEATURES); 2465 else 2466 if_sethwassist(ifp, 0); 2467 CAS_UNLOCK(sc); 2468 break; 2469 case SIOCADDMULTI: 2470 case SIOCDELMULTI: 2471 CAS_LOCK(sc); 2472 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 2473 cas_setladrf(sc); 2474 CAS_UNLOCK(sc); 2475 break; 2476 case SIOCSIFMTU: 2477 if ((ifr->ifr_mtu < ETHERMIN) || 2478 (ifr->ifr_mtu > ETHERMTU_JUMBO)) 2479 error = EINVAL; 2480 else 2481 if_setmtu(ifp, ifr->ifr_mtu); 2482 break; 2483 case SIOCGIFMEDIA: 2484 case SIOCSIFMEDIA: 2485 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 2486 break; 2487 default: 2488 error = ether_ioctl(ifp, cmd, data); 2489 break; 2490 } 2491 2492 return (error); 2493 } 2494 2495 static u_int 2496 cas_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 2497 { 2498 uint32_t crc, *hash = arg; 2499 2500 crc = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN); 2501 /* We just want the 8 most significant bits. */ 2502 crc >>= 24; 2503 /* Set the corresponding bit in the filter. */ 2504 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 2505 2506 return (1); 2507 } 2508 2509 static void 2510 cas_setladrf(struct cas_softc *sc) 2511 { 2512 if_t ifp = sc->sc_ifp; 2513 int i; 2514 uint32_t hash[16]; 2515 uint32_t v; 2516 2517 CAS_LOCK_ASSERT(sc, MA_OWNED); 2518 2519 /* 2520 * Turn off the RX MAC and the hash filter as required by the Sun 2521 * Cassini programming restrictions. 2522 */ 2523 v = sc->sc_mac_rxcfg & ~(CAS_MAC_RX_CONF_HFILTER | 2524 CAS_MAC_RX_CONF_EN); 2525 CAS_WRITE_4(sc, CAS_MAC_RX_CONF, v); 2526 CAS_BARRIER(sc, CAS_MAC_RX_CONF, 4, 2527 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2528 if (!cas_bitwait(sc, CAS_MAC_RX_CONF, CAS_MAC_RX_CONF_HFILTER | 2529 CAS_MAC_RX_CONF_EN, 0)) 2530 device_printf(sc->sc_dev, 2531 "cannot disable RX MAC or hash filter\n"); 2532 2533 v &= ~(CAS_MAC_RX_CONF_PROMISC | CAS_MAC_RX_CONF_PGRP); 2534 if ((if_getflags(ifp) & IFF_PROMISC) != 0) { 2535 v |= CAS_MAC_RX_CONF_PROMISC; 2536 goto chipit; 2537 } 2538 if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) { 2539 v |= CAS_MAC_RX_CONF_PGRP; 2540 goto chipit; 2541 } 2542 2543 /* 2544 * Set up multicast address filter by passing all multicast 2545 * addresses through a crc generator, and then using the high 2546 * order 8 bits as an index into the 256 bit logical address 2547 * filter. The high order 4 bits selects the word, while the 2548 * other 4 bits select the bit within the word (where bit 0 2549 * is the MSB). 2550 */ 2551 2552 memset(hash, 0, sizeof(hash)); 2553 if_foreach_llmaddr(ifp, cas_hash_maddr, &hash); 2554 2555 v |= CAS_MAC_RX_CONF_HFILTER; 2556 2557 /* Now load the hash table into the chip (if we are using it). */ 2558 for (i = 0; i < 16; i++) 2559 CAS_WRITE_4(sc, 2560 CAS_MAC_HASH0 + i * (CAS_MAC_HASH1 - CAS_MAC_HASH0), 2561 hash[i]); 2562 2563 chipit: 2564 sc->sc_mac_rxcfg = v; 2565 CAS_WRITE_4(sc, CAS_MAC_RX_CONF, v | CAS_MAC_RX_CONF_EN); 2566 } 2567 2568 static int cas_pci_attach(device_t dev); 2569 static int cas_pci_detach(device_t dev); 2570 static int cas_pci_probe(device_t dev); 2571 static int cas_pci_resume(device_t dev); 2572 static int cas_pci_suspend(device_t dev); 2573 2574 static device_method_t cas_pci_methods[] = { 2575 /* Device interface */ 2576 DEVMETHOD(device_probe, cas_pci_probe), 2577 DEVMETHOD(device_attach, cas_pci_attach), 2578 DEVMETHOD(device_detach, cas_pci_detach), 2579 DEVMETHOD(device_suspend, cas_pci_suspend), 2580 DEVMETHOD(device_resume, cas_pci_resume), 2581 /* Use the suspend handler here, it is all that is required. */ 2582 DEVMETHOD(device_shutdown, cas_pci_suspend), 2583 2584 /* MII interface */ 2585 DEVMETHOD(miibus_readreg, cas_mii_readreg), 2586 DEVMETHOD(miibus_writereg, cas_mii_writereg), 2587 DEVMETHOD(miibus_statchg, cas_mii_statchg), 2588 2589 DEVMETHOD_END 2590 }; 2591 2592 static driver_t cas_pci_driver = { 2593 "cas", 2594 cas_pci_methods, 2595 sizeof(struct cas_softc) 2596 }; 2597 2598 static const struct cas_pci_dev { 2599 uint32_t cpd_devid; 2600 uint8_t cpd_revid; 2601 int cpd_variant; 2602 const char *cpd_desc; 2603 } cas_pci_devlist[] = { 2604 { 0x0035100b, 0x0, CAS_SATURN, "NS DP83065 Saturn Gigabit Ethernet" }, 2605 { 0xabba108e, 0x10, CAS_CASPLUS, "Sun Cassini+ Gigabit Ethernet" }, 2606 { 0xabba108e, 0x0, CAS_CAS, "Sun Cassini Gigabit Ethernet" }, 2607 { 0, 0, 0, NULL } 2608 }; 2609 2610 DRIVER_MODULE(cas, pci, cas_pci_driver, 0, 0); 2611 MODULE_PNP_INFO("W32:vendor/device", pci, cas, cas_pci_devlist, 2612 nitems(cas_pci_devlist) - 1); 2613 DRIVER_MODULE(miibus, cas, miibus_driver, 0, 0); 2614 MODULE_DEPEND(cas, pci, 1, 1, 1); 2615 2616 static int 2617 cas_pci_probe(device_t dev) 2618 { 2619 int i; 2620 2621 for (i = 0; cas_pci_devlist[i].cpd_desc != NULL; i++) { 2622 if (pci_get_devid(dev) == cas_pci_devlist[i].cpd_devid && 2623 pci_get_revid(dev) >= cas_pci_devlist[i].cpd_revid) { 2624 device_set_desc(dev, cas_pci_devlist[i].cpd_desc); 2625 return (BUS_PROBE_DEFAULT); 2626 } 2627 } 2628 2629 return (ENXIO); 2630 } 2631 2632 static struct resource_spec cas_pci_res_spec[] = { 2633 { SYS_RES_IRQ, 0, RF_SHAREABLE | RF_ACTIVE }, /* CAS_RES_INTR */ 2634 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, /* CAS_RES_MEM */ 2635 { -1, 0 } 2636 }; 2637 2638 #define CAS_LOCAL_MAC_ADDRESS "local-mac-address" 2639 #define CAS_PHY_INTERFACE "phy-interface" 2640 #define CAS_PHY_TYPE "phy-type" 2641 #define CAS_PHY_TYPE_PCS "pcs" 2642 2643 static int 2644 cas_pci_attach(device_t dev) 2645 { 2646 char buf[sizeof(CAS_LOCAL_MAC_ADDRESS)]; 2647 struct cas_softc *sc; 2648 int i; 2649 #if !defined(__powerpc__) 2650 u_char enaddr[4][ETHER_ADDR_LEN]; 2651 u_int j, k, lma, pcs[4], phy; 2652 #endif 2653 2654 sc = device_get_softc(dev); 2655 sc->sc_variant = CAS_UNKNOWN; 2656 for (i = 0; cas_pci_devlist[i].cpd_desc != NULL; i++) { 2657 if (pci_get_devid(dev) == cas_pci_devlist[i].cpd_devid && 2658 pci_get_revid(dev) >= cas_pci_devlist[i].cpd_revid) { 2659 sc->sc_variant = cas_pci_devlist[i].cpd_variant; 2660 break; 2661 } 2662 } 2663 if (sc->sc_variant == CAS_UNKNOWN) { 2664 device_printf(dev, "unknown adaptor\n"); 2665 return (ENXIO); 2666 } 2667 2668 /* PCI configuration */ 2669 pci_write_config(dev, PCIR_COMMAND, 2670 pci_read_config(dev, PCIR_COMMAND, 2) | PCIM_CMD_BUSMASTEREN | 2671 PCIM_CMD_MWRICEN | PCIM_CMD_PERRESPEN | PCIM_CMD_SERRESPEN, 2); 2672 2673 sc->sc_dev = dev; 2674 if (sc->sc_variant == CAS_CAS && pci_get_devid(dev) < 0x02) 2675 /* Hardware checksumming may hang TX. */ 2676 sc->sc_flags |= CAS_NO_CSUM; 2677 if (sc->sc_variant == CAS_CASPLUS || sc->sc_variant == CAS_SATURN) 2678 sc->sc_flags |= CAS_REG_PLUS; 2679 if (sc->sc_variant == CAS_CAS || 2680 (sc->sc_variant == CAS_CASPLUS && pci_get_revid(dev) < 0x11)) 2681 sc->sc_flags |= CAS_TABORT; 2682 if (bootverbose) 2683 device_printf(dev, "flags=0x%x\n", sc->sc_flags); 2684 2685 if (bus_alloc_resources(dev, cas_pci_res_spec, sc->sc_res)) { 2686 device_printf(dev, "failed to allocate resources\n"); 2687 bus_release_resources(dev, cas_pci_res_spec, sc->sc_res); 2688 return (ENXIO); 2689 } 2690 2691 CAS_LOCK_INIT(sc, device_get_nameunit(dev)); 2692 2693 #if defined(__powerpc__) 2694 OF_getetheraddr(dev, sc->sc_enaddr); 2695 if (OF_getprop(ofw_bus_get_node(dev), CAS_PHY_INTERFACE, buf, 2696 sizeof(buf)) > 0 || OF_getprop(ofw_bus_get_node(dev), 2697 CAS_PHY_TYPE, buf, sizeof(buf)) > 0) { 2698 buf[sizeof(buf) - 1] = '\0'; 2699 if (strcmp(buf, CAS_PHY_TYPE_PCS) == 0) 2700 sc->sc_flags |= CAS_SERDES; 2701 } 2702 #else 2703 /* 2704 * Dig out VPD (vital product data) and read the MAC address as well 2705 * as the PHY type. The VPD resides in the PCI Expansion ROM (PCI 2706 * FCode) and can't be accessed via the PCI capability pointer. 2707 * SUNW,pci-ce and SUNW,pci-qge use the Enhanced VPD format described 2708 * in the free US Patent 7149820. 2709 */ 2710 2711 #define PCI_ROMHDR_SIZE 0x1c 2712 #define PCI_ROMHDR_SIG 0x00 2713 #define PCI_ROMHDR_SIG_MAGIC 0xaa55 /* little endian */ 2714 #define PCI_ROMHDR_PTR_DATA 0x18 2715 #define PCI_ROM_SIZE 0x18 2716 #define PCI_ROM_SIG 0x00 2717 #define PCI_ROM_SIG_MAGIC 0x52494350 /* "PCIR", endian */ 2718 /* reversed */ 2719 #define PCI_ROM_VENDOR 0x04 2720 #define PCI_ROM_DEVICE 0x06 2721 #define PCI_ROM_PTR_VPD 0x08 2722 #define PCI_VPDRES_BYTE0 0x00 2723 #define PCI_VPDRES_ISLARGE(x) ((x) & 0x80) 2724 #define PCI_VPDRES_LARGE_NAME(x) ((x) & 0x7f) 2725 #define PCI_VPDRES_LARGE_LEN_LSB 0x01 2726 #define PCI_VPDRES_LARGE_LEN_MSB 0x02 2727 #define PCI_VPDRES_LARGE_SIZE 0x03 2728 #define PCI_VPDRES_TYPE_ID_STRING 0x02 /* large */ 2729 #define PCI_VPDRES_TYPE_VPD 0x10 /* large */ 2730 #define PCI_VPD_KEY0 0x00 2731 #define PCI_VPD_KEY1 0x01 2732 #define PCI_VPD_LEN 0x02 2733 #define PCI_VPD_SIZE 0x03 2734 2735 #define CAS_ROM_READ_1(sc, offs) \ 2736 CAS_READ_1((sc), CAS_PCI_ROM_OFFSET + (offs)) 2737 #define CAS_ROM_READ_2(sc, offs) \ 2738 CAS_READ_2((sc), CAS_PCI_ROM_OFFSET + (offs)) 2739 #define CAS_ROM_READ_4(sc, offs) \ 2740 CAS_READ_4((sc), CAS_PCI_ROM_OFFSET + (offs)) 2741 2742 lma = phy = 0; 2743 memset(enaddr, 0, sizeof(enaddr)); 2744 memset(pcs, 0, sizeof(pcs)); 2745 2746 /* Enable PCI Expansion ROM access. */ 2747 CAS_WRITE_4(sc, CAS_BIM_LDEV_OEN, 2748 CAS_BIM_LDEV_OEN_PAD | CAS_BIM_LDEV_OEN_PROM); 2749 2750 /* Read PCI Expansion ROM header. */ 2751 if (CAS_ROM_READ_2(sc, PCI_ROMHDR_SIG) != PCI_ROMHDR_SIG_MAGIC || 2752 (i = CAS_ROM_READ_2(sc, PCI_ROMHDR_PTR_DATA)) < 2753 PCI_ROMHDR_SIZE) { 2754 device_printf(dev, "unexpected PCI Expansion ROM header\n"); 2755 goto fail_prom; 2756 } 2757 2758 /* Read PCI Expansion ROM data. */ 2759 if (CAS_ROM_READ_4(sc, i + PCI_ROM_SIG) != PCI_ROM_SIG_MAGIC || 2760 CAS_ROM_READ_2(sc, i + PCI_ROM_VENDOR) != pci_get_vendor(dev) || 2761 CAS_ROM_READ_2(sc, i + PCI_ROM_DEVICE) != pci_get_device(dev) || 2762 (j = CAS_ROM_READ_2(sc, i + PCI_ROM_PTR_VPD)) < 2763 i + PCI_ROM_SIZE) { 2764 device_printf(dev, "unexpected PCI Expansion ROM data\n"); 2765 goto fail_prom; 2766 } 2767 2768 /* Read PCI VPD. */ 2769 next: 2770 if (PCI_VPDRES_ISLARGE(CAS_ROM_READ_1(sc, 2771 j + PCI_VPDRES_BYTE0)) == 0) { 2772 device_printf(dev, "no large PCI VPD\n"); 2773 goto fail_prom; 2774 } 2775 2776 i = (CAS_ROM_READ_1(sc, j + PCI_VPDRES_LARGE_LEN_MSB) << 8) | 2777 CAS_ROM_READ_1(sc, j + PCI_VPDRES_LARGE_LEN_LSB); 2778 switch (PCI_VPDRES_LARGE_NAME(CAS_ROM_READ_1(sc, 2779 j + PCI_VPDRES_BYTE0))) { 2780 case PCI_VPDRES_TYPE_ID_STRING: 2781 /* Skip identifier string. */ 2782 j += PCI_VPDRES_LARGE_SIZE + i; 2783 goto next; 2784 case PCI_VPDRES_TYPE_VPD: 2785 for (j += PCI_VPDRES_LARGE_SIZE; i > 0; 2786 i -= PCI_VPD_SIZE + CAS_ROM_READ_1(sc, j + PCI_VPD_LEN), 2787 j += PCI_VPD_SIZE + CAS_ROM_READ_1(sc, j + PCI_VPD_LEN)) { 2788 if (CAS_ROM_READ_1(sc, j + PCI_VPD_KEY0) != 'Z') 2789 /* no Enhanced VPD */ 2790 continue; 2791 if (CAS_ROM_READ_1(sc, j + PCI_VPD_SIZE) != 'I') 2792 /* no instance property */ 2793 continue; 2794 if (CAS_ROM_READ_1(sc, j + PCI_VPD_SIZE + 3) == 'B') { 2795 /* byte array */ 2796 if (CAS_ROM_READ_1(sc, 2797 j + PCI_VPD_SIZE + 4) != ETHER_ADDR_LEN) 2798 continue; 2799 bus_read_region_1(sc->sc_res[CAS_RES_MEM], 2800 CAS_PCI_ROM_OFFSET + j + PCI_VPD_SIZE + 5, 2801 buf, sizeof(buf)); 2802 buf[sizeof(buf) - 1] = '\0'; 2803 if (strcmp(buf, CAS_LOCAL_MAC_ADDRESS) != 0) 2804 continue; 2805 bus_read_region_1(sc->sc_res[CAS_RES_MEM], 2806 CAS_PCI_ROM_OFFSET + j + PCI_VPD_SIZE + 2807 5 + sizeof(CAS_LOCAL_MAC_ADDRESS), 2808 enaddr[lma], sizeof(enaddr[lma])); 2809 lma++; 2810 if (lma == 4 && phy == 4) 2811 break; 2812 } else if (CAS_ROM_READ_1(sc, j + PCI_VPD_SIZE + 3) == 2813 'S') { 2814 /* string */ 2815 if (CAS_ROM_READ_1(sc, 2816 j + PCI_VPD_SIZE + 4) != 2817 sizeof(CAS_PHY_TYPE_PCS)) 2818 continue; 2819 bus_read_region_1(sc->sc_res[CAS_RES_MEM], 2820 CAS_PCI_ROM_OFFSET + j + PCI_VPD_SIZE + 5, 2821 buf, sizeof(buf)); 2822 buf[sizeof(buf) - 1] = '\0'; 2823 if (strcmp(buf, CAS_PHY_INTERFACE) == 0) 2824 k = sizeof(CAS_PHY_INTERFACE); 2825 else if (strcmp(buf, CAS_PHY_TYPE) == 0) 2826 k = sizeof(CAS_PHY_TYPE); 2827 else 2828 continue; 2829 bus_read_region_1(sc->sc_res[CAS_RES_MEM], 2830 CAS_PCI_ROM_OFFSET + j + PCI_VPD_SIZE + 2831 5 + k, buf, sizeof(buf)); 2832 buf[sizeof(buf) - 1] = '\0'; 2833 if (strcmp(buf, CAS_PHY_TYPE_PCS) == 0) 2834 pcs[phy] = 1; 2835 phy++; 2836 if (lma == 4 && phy == 4) 2837 break; 2838 } 2839 } 2840 break; 2841 default: 2842 device_printf(dev, "unexpected PCI VPD\n"); 2843 goto fail_prom; 2844 } 2845 2846 fail_prom: 2847 CAS_WRITE_4(sc, CAS_BIM_LDEV_OEN, 0); 2848 2849 if (lma == 0) { 2850 device_printf(dev, "could not determine Ethernet address\n"); 2851 goto fail; 2852 } 2853 i = 0; 2854 if (lma > 1 && pci_get_slot(dev) < nitems(enaddr)) 2855 i = pci_get_slot(dev); 2856 memcpy(sc->sc_enaddr, enaddr[i], ETHER_ADDR_LEN); 2857 2858 if (phy == 0) { 2859 device_printf(dev, "could not determine PHY type\n"); 2860 goto fail; 2861 } 2862 i = 0; 2863 if (phy > 1 && pci_get_slot(dev) < nitems(pcs)) 2864 i = pci_get_slot(dev); 2865 if (pcs[i] != 0) 2866 sc->sc_flags |= CAS_SERDES; 2867 #endif 2868 2869 if (cas_attach(sc) != 0) { 2870 device_printf(dev, "could not be attached\n"); 2871 goto fail; 2872 } 2873 2874 if (bus_setup_intr(dev, sc->sc_res[CAS_RES_INTR], INTR_TYPE_NET | 2875 INTR_MPSAFE, cas_intr, NULL, sc, &sc->sc_ih) != 0) { 2876 device_printf(dev, "failed to set up interrupt\n"); 2877 cas_detach(sc); 2878 goto fail; 2879 } 2880 return (0); 2881 2882 fail: 2883 CAS_LOCK_DESTROY(sc); 2884 bus_release_resources(dev, cas_pci_res_spec, sc->sc_res); 2885 return (ENXIO); 2886 } 2887 2888 static int 2889 cas_pci_detach(device_t dev) 2890 { 2891 struct cas_softc *sc; 2892 2893 sc = device_get_softc(dev); 2894 bus_teardown_intr(dev, sc->sc_res[CAS_RES_INTR], sc->sc_ih); 2895 cas_detach(sc); 2896 CAS_LOCK_DESTROY(sc); 2897 bus_release_resources(dev, cas_pci_res_spec, sc->sc_res); 2898 return (0); 2899 } 2900 2901 static int 2902 cas_pci_suspend(device_t dev) 2903 { 2904 2905 cas_suspend(device_get_softc(dev)); 2906 return (0); 2907 } 2908 2909 static int 2910 cas_pci_resume(device_t dev) 2911 { 2912 2913 cas_resume(device_get_softc(dev)); 2914 return (0); 2915 } 2916