1 /*- 2 * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com> 3 * 4 * This software was developed by SRI International and the University of 5 * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) 6 * ("CTSRD"), as part of the DARPA CRASH research programme. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/bus.h> 34 #include <sys/kernel.h> 35 #include <sys/lock.h> 36 #include <sys/malloc.h> 37 #include <sys/mbuf.h> 38 #include <sys/module.h> 39 #include <sys/mutex.h> 40 #include <sys/rman.h> 41 #include <sys/socket.h> 42 43 #include <net/bpf.h> 44 #include <net/if.h> 45 #include <net/ethernet.h> 46 #include <net/if_dl.h> 47 #include <net/if_media.h> 48 #include <net/if_types.h> 49 #include <net/if_var.h> 50 51 #include <machine/bus.h> 52 53 #include <dev/extres/clk/clk.h> 54 #include <dev/extres/hwreset/hwreset.h> 55 56 #include <dev/ofw/ofw_bus.h> 57 #include <dev/ofw/ofw_bus_subr.h> 58 59 #include <dev/dwc/if_dwcvar.h> 60 #include <dev/dwc/dwc1000_reg.h> 61 #include <dev/dwc/dwc1000_dma.h> 62 63 #define WATCHDOG_TIMEOUT_SECS 5 64 #define DMA_RESET_TIMEOUT 100 65 66 /* TX descriptors - TDESC0 is almost unified */ 67 #define TDESC0_OWN (1U << 31) 68 #define TDESC0_IHE (1U << 16) /* IP Header Error */ 69 #define TDESC0_ES (1U << 15) /* Error Summary */ 70 #define TDESC0_JT (1U << 14) /* Jabber Timeout */ 71 #define TDESC0_FF (1U << 13) /* Frame Flushed */ 72 #define TDESC0_PCE (1U << 12) /* Payload Checksum Error */ 73 #define TDESC0_LOC (1U << 11) /* Loss of Carrier */ 74 #define TDESC0_NC (1U << 10) /* No Carrier */ 75 #define TDESC0_LC (1U << 9) /* Late Collision */ 76 #define TDESC0_EC (1U << 8) /* Excessive Collision */ 77 #define TDESC0_VF (1U << 7) /* VLAN Frame */ 78 #define TDESC0_CC_MASK 0xf 79 #define TDESC0_CC_SHIFT 3 /* Collision Count */ 80 #define TDESC0_ED (1U << 2) /* Excessive Deferral */ 81 #define TDESC0_UF (1U << 1) /* Underflow Error */ 82 #define TDESC0_DB (1U << 0) /* Deferred Bit */ 83 /* TX descriptors - TDESC0 extended format only */ 84 #define ETDESC0_IC (1U << 30) /* Interrupt on Completion */ 85 #define ETDESC0_LS (1U << 29) /* Last Segment */ 86 #define ETDESC0_FS (1U << 28) /* First Segment */ 87 #define ETDESC0_DC (1U << 27) /* Disable CRC */ 88 #define ETDESC0_DP (1U << 26) /* Disable Padding */ 89 #define ETDESC0_CIC_NONE (0U << 22) /* Checksum Insertion Control */ 90 #define ETDESC0_CIC_HDR (1U << 22) 91 #define ETDESC0_CIC_SEG (2U << 22) 92 #define ETDESC0_CIC_FULL (3U << 22) 93 #define ETDESC0_TER (1U << 21) /* Transmit End of Ring */ 94 #define ETDESC0_TCH (1U << 20) /* Second Address Chained */ 95 96 /* TX descriptors - TDESC1 normal format */ 97 #define NTDESC1_IC (1U << 31) /* Interrupt on Completion */ 98 #define NTDESC1_LS (1U << 30) /* Last Segment */ 99 #define NTDESC1_FS (1U << 29) /* First Segment */ 100 #define NTDESC1_CIC_NONE (0U << 27) /* Checksum Insertion Control */ 101 #define NTDESC1_CIC_HDR (1U << 27) 102 #define NTDESC1_CIC_SEG (2U << 27) 103 #define NTDESC1_CIC_FULL (3U << 27) 104 #define NTDESC1_DC (1U << 26) /* Disable CRC */ 105 #define NTDESC1_TER (1U << 25) /* Transmit End of Ring */ 106 #define NTDESC1_TCH (1U << 24) /* Second Address Chained */ 107 /* TX descriptors - TDESC1 extended format */ 108 #define ETDESC1_DP (1U << 23) /* Disable Padding */ 109 #define ETDESC1_TBS2_MASK 0x7ff 110 #define ETDESC1_TBS2_SHIFT 11 /* Receive Buffer 2 Size */ 111 #define ETDESC1_TBS1_MASK 0x7ff 112 #define ETDESC1_TBS1_SHIFT 0 /* Receive Buffer 1 Size */ 113 114 /* RX descriptor - RDESC0 is unified */ 115 #define RDESC0_OWN (1U << 31) 116 #define RDESC0_AFM (1U << 30) /* Dest. Address Filter Fail */ 117 #define RDESC0_FL_MASK 0x3fff 118 #define RDESC0_FL_SHIFT 16 /* Frame Length */ 119 #define RDESC0_ES (1U << 15) /* Error Summary */ 120 #define RDESC0_DE (1U << 14) /* Descriptor Error */ 121 #define RDESC0_SAF (1U << 13) /* Source Address Filter Fail */ 122 #define RDESC0_LE (1U << 12) /* Length Error */ 123 #define RDESC0_OE (1U << 11) /* Overflow Error */ 124 #define RDESC0_VLAN (1U << 10) /* VLAN Tag */ 125 #define RDESC0_FS (1U << 9) /* First Descriptor */ 126 #define RDESC0_LS (1U << 8) /* Last Descriptor */ 127 #define RDESC0_ICE (1U << 7) /* IPC Checksum Error */ 128 #define RDESC0_LC (1U << 6) /* Late Collision */ 129 #define RDESC0_FT (1U << 5) /* Frame Type */ 130 #define RDESC0_RWT (1U << 4) /* Receive Watchdog Timeout */ 131 #define RDESC0_RE (1U << 3) /* Receive Error */ 132 #define RDESC0_DBE (1U << 2) /* Dribble Bit Error */ 133 #define RDESC0_CE (1U << 1) /* CRC Error */ 134 #define RDESC0_PCE (1U << 0) /* Payload Checksum Error */ 135 #define RDESC0_RXMA (1U << 0) /* Rx MAC Address */ 136 137 /* RX descriptors - RDESC1 normal format */ 138 #define NRDESC1_DIC (1U << 31) /* Disable Intr on Completion */ 139 #define NRDESC1_RER (1U << 25) /* Receive End of Ring */ 140 #define NRDESC1_RCH (1U << 24) /* Second Address Chained */ 141 #define NRDESC1_RBS2_MASK 0x7ff 142 #define NRDESC1_RBS2_SHIFT 11 /* Receive Buffer 2 Size */ 143 #define NRDESC1_RBS1_MASK 0x7ff 144 #define NRDESC1_RBS1_SHIFT 0 /* Receive Buffer 1 Size */ 145 146 /* RX descriptors - RDESC1 enhanced format */ 147 #define ERDESC1_DIC (1U << 31) /* Disable Intr on Completion */ 148 #define ERDESC1_RBS2_MASK 0x7ffff 149 #define ERDESC1_RBS2_SHIFT 16 /* Receive Buffer 2 Size */ 150 #define ERDESC1_RER (1U << 15) /* Receive End of Ring */ 151 #define ERDESC1_RCH (1U << 14) /* Second Address Chained */ 152 #define ERDESC1_RBS1_MASK 0x7ffff 153 #define ERDESC1_RBS1_SHIFT 0 /* Receive Buffer 1 Size */ 154 155 /* 156 * The hardware imposes alignment restrictions on various objects involved in 157 * DMA transfers. These values are expressed in bytes (not bits). 158 */ 159 #define DWC_DESC_RING_ALIGN 2048 160 161 static inline uint32_t 162 next_txidx(struct dwc_softc *sc, uint32_t curidx) 163 { 164 165 return ((curidx + 1) % TX_DESC_COUNT); 166 } 167 168 static inline uint32_t 169 next_rxidx(struct dwc_softc *sc, uint32_t curidx) 170 { 171 172 return ((curidx + 1) % RX_DESC_COUNT); 173 } 174 175 static void 176 dwc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 177 { 178 179 if (error != 0) 180 return; 181 *(bus_addr_t *)arg = segs[0].ds_addr; 182 } 183 184 inline static void 185 dwc_set_owner(struct dwc_softc *sc, int idx) 186 { 187 wmb(); 188 sc->txdesc_ring[idx].desc0 |= TDESC0_OWN; 189 wmb(); 190 } 191 192 inline static void 193 dwc_setup_txdesc(struct dwc_softc *sc, int idx, bus_addr_t paddr, 194 uint32_t len, uint32_t flags, bool first, bool last) 195 { 196 uint32_t desc0, desc1; 197 198 /* Addr/len 0 means we're clearing the descriptor after xmit done. */ 199 if (paddr == 0 || len == 0) { 200 desc0 = 0; 201 desc1 = 0; 202 --sc->tx_desccount; 203 } else { 204 if (!sc->dma_ext_desc) { 205 desc0 = 0; 206 desc1 = NTDESC1_TCH | len | flags; 207 if (first) 208 desc1 |= NTDESC1_FS; 209 if (last) 210 desc1 |= NTDESC1_LS | NTDESC1_IC; 211 } else { 212 desc0 = ETDESC0_TCH | flags; 213 if (first) 214 desc0 |= ETDESC0_FS; 215 if (last) 216 desc0 |= ETDESC0_LS | ETDESC0_IC; 217 desc1 = len; 218 } 219 ++sc->tx_desccount; 220 } 221 222 sc->txdesc_ring[idx].addr1 = (uint32_t)(paddr); 223 sc->txdesc_ring[idx].desc0 = desc0; 224 sc->txdesc_ring[idx].desc1 = desc1; 225 } 226 227 inline static uint32_t 228 dwc_setup_rxdesc(struct dwc_softc *sc, int idx, bus_addr_t paddr) 229 { 230 uint32_t nidx; 231 232 sc->rxdesc_ring[idx].addr1 = (uint32_t)paddr; 233 nidx = next_rxidx(sc, idx); 234 sc->rxdesc_ring[idx].addr2 = sc->rxdesc_ring_paddr + 235 (nidx * sizeof(struct dwc_hwdesc)); 236 if (!sc->dma_ext_desc) 237 sc->rxdesc_ring[idx].desc1 = NRDESC1_RCH | 238 MIN(MCLBYTES, NRDESC1_RBS1_MASK); 239 else 240 sc->rxdesc_ring[idx].desc1 = ERDESC1_RCH | 241 MIN(MCLBYTES, ERDESC1_RBS1_MASK); 242 243 wmb(); 244 sc->rxdesc_ring[idx].desc0 = RDESC0_OWN; 245 wmb(); 246 return (nidx); 247 } 248 249 int 250 dma1000_setup_txbuf(struct dwc_softc *sc, int idx, struct mbuf **mp) 251 { 252 struct bus_dma_segment segs[TX_MAP_MAX_SEGS]; 253 int error, nsegs; 254 struct mbuf * m; 255 uint32_t flags = 0; 256 int i; 257 int first, last; 258 259 error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map, 260 *mp, segs, &nsegs, 0); 261 if (error == EFBIG) { 262 /* 263 * The map may be partially mapped from the first call. 264 * Make sure to reset it. 265 */ 266 bus_dmamap_unload(sc->txbuf_tag, sc->txbuf_map[idx].map); 267 if ((m = m_defrag(*mp, M_NOWAIT)) == NULL) 268 return (ENOMEM); 269 *mp = m; 270 error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map, 271 *mp, segs, &nsegs, 0); 272 } 273 if (error != 0) 274 return (ENOMEM); 275 276 if (sc->tx_desccount + nsegs > TX_DESC_COUNT) { 277 bus_dmamap_unload(sc->txbuf_tag, sc->txbuf_map[idx].map); 278 return (ENOMEM); 279 } 280 281 m = *mp; 282 283 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) { 284 if ((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) != 0) { 285 if (!sc->dma_ext_desc) 286 flags = NTDESC1_CIC_FULL; 287 else 288 flags = ETDESC0_CIC_FULL; 289 } else { 290 if (!sc->dma_ext_desc) 291 flags = NTDESC1_CIC_HDR; 292 else 293 flags = ETDESC0_CIC_HDR; 294 } 295 } 296 297 bus_dmamap_sync(sc->txbuf_tag, sc->txbuf_map[idx].map, 298 BUS_DMASYNC_PREWRITE); 299 300 sc->txbuf_map[idx].mbuf = m; 301 302 first = sc->tx_desc_head; 303 for (i = 0; i < nsegs; i++) { 304 dwc_setup_txdesc(sc, sc->tx_desc_head, 305 segs[i].ds_addr, segs[i].ds_len, 306 (i == 0) ? flags : 0, /* only first desc needs flags */ 307 (i == 0), 308 (i == nsegs - 1)); 309 if (i > 0) 310 dwc_set_owner(sc, sc->tx_desc_head); 311 last = sc->tx_desc_head; 312 sc->tx_desc_head = next_txidx(sc, sc->tx_desc_head); 313 } 314 315 sc->txbuf_map[idx].last_desc_idx = last; 316 317 dwc_set_owner(sc, first); 318 319 return (0); 320 } 321 322 static int 323 dma1000_setup_rxbuf(struct dwc_softc *sc, int idx, struct mbuf *m) 324 { 325 struct bus_dma_segment seg; 326 int error, nsegs; 327 328 m_adj(m, ETHER_ALIGN); 329 330 error = bus_dmamap_load_mbuf_sg(sc->rxbuf_tag, sc->rxbuf_map[idx].map, 331 m, &seg, &nsegs, 0); 332 if (error != 0) 333 return (error); 334 335 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 336 337 bus_dmamap_sync(sc->rxbuf_tag, sc->rxbuf_map[idx].map, 338 BUS_DMASYNC_PREREAD); 339 340 sc->rxbuf_map[idx].mbuf = m; 341 dwc_setup_rxdesc(sc, idx, seg.ds_addr); 342 343 return (0); 344 } 345 346 static struct mbuf * 347 dwc_alloc_mbufcl(struct dwc_softc *sc) 348 { 349 struct mbuf *m; 350 351 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 352 if (m != NULL) 353 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 354 355 return (m); 356 } 357 358 static struct mbuf * 359 dwc_rxfinish_one(struct dwc_softc *sc, struct dwc_hwdesc *desc, 360 struct dwc_bufmap *map) 361 { 362 if_t ifp; 363 struct mbuf *m, *m0; 364 int len; 365 uint32_t rdesc0; 366 367 m = map->mbuf; 368 ifp = sc->ifp; 369 rdesc0 = desc ->desc0; 370 371 if ((rdesc0 & (RDESC0_FS | RDESC0_LS)) != 372 (RDESC0_FS | RDESC0_LS)) { 373 /* 374 * Something very wrong happens. The whole packet should be 375 * recevied in one descriptr. Report problem. 376 */ 377 device_printf(sc->dev, 378 "%s: RX descriptor without FIRST and LAST bit set: 0x%08X", 379 __func__, rdesc0); 380 return (NULL); 381 } 382 383 len = (rdesc0 >> RDESC0_FL_SHIFT) & RDESC0_FL_MASK; 384 if (len < 64) { 385 /* 386 * Lenght is invalid, recycle old mbuf 387 * Probably impossible case 388 */ 389 return (NULL); 390 } 391 392 /* Allocate new buffer */ 393 m0 = dwc_alloc_mbufcl(sc); 394 if (m0 == NULL) { 395 /* no new mbuf available, recycle old */ 396 if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, 1); 397 return (NULL); 398 } 399 /* Do dmasync for newly received packet */ 400 bus_dmamap_sync(sc->rxbuf_tag, map->map, BUS_DMASYNC_POSTREAD); 401 bus_dmamap_unload(sc->rxbuf_tag, map->map); 402 403 /* Received packet is valid, process it */ 404 m->m_pkthdr.rcvif = ifp; 405 m->m_pkthdr.len = len; 406 m->m_len = len; 407 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 408 409 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 && 410 (rdesc0 & RDESC0_FT) != 0) { 411 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED; 412 if ((rdesc0 & RDESC0_ICE) == 0) 413 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 414 if ((rdesc0 & RDESC0_PCE) == 0) { 415 m->m_pkthdr.csum_flags |= 416 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 417 m->m_pkthdr.csum_data = 0xffff; 418 } 419 } 420 421 /* Remove trailing FCS */ 422 m_adj(m, -ETHER_CRC_LEN); 423 424 DWC_UNLOCK(sc); 425 if_input(ifp, m); 426 DWC_LOCK(sc); 427 return (m0); 428 } 429 430 void 431 dma1000_txfinish_locked(struct dwc_softc *sc) 432 { 433 struct dwc_bufmap *bmap; 434 struct dwc_hwdesc *desc; 435 if_t ifp; 436 int idx, last_idx; 437 bool map_finished; 438 439 DWC_ASSERT_LOCKED(sc); 440 441 ifp = sc->ifp; 442 /* check if all descriptors of the map are done */ 443 while (sc->tx_map_tail != sc->tx_map_head) { 444 map_finished = true; 445 bmap = &sc->txbuf_map[sc->tx_map_tail]; 446 idx = sc->tx_desc_tail; 447 last_idx = next_txidx(sc, bmap->last_desc_idx); 448 while (idx != last_idx) { 449 desc = &sc->txdesc_ring[idx]; 450 if ((desc->desc0 & TDESC0_OWN) != 0) { 451 map_finished = false; 452 break; 453 } 454 idx = next_txidx(sc, idx); 455 } 456 457 if (!map_finished) 458 break; 459 bus_dmamap_sync(sc->txbuf_tag, bmap->map, 460 BUS_DMASYNC_POSTWRITE); 461 bus_dmamap_unload(sc->txbuf_tag, bmap->map); 462 m_freem(bmap->mbuf); 463 bmap->mbuf = NULL; 464 sc->tx_mapcount--; 465 while (sc->tx_desc_tail != last_idx) { 466 dwc_setup_txdesc(sc, sc->tx_desc_tail, 0, 0, 0, false, false); 467 sc->tx_desc_tail = next_txidx(sc, sc->tx_desc_tail); 468 } 469 sc->tx_map_tail = next_txidx(sc, sc->tx_map_tail); 470 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 471 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 472 } 473 474 /* If there are no buffers outstanding, muzzle the watchdog. */ 475 if (sc->tx_desc_tail == sc->tx_desc_head) { 476 sc->tx_watchdog_count = 0; 477 } 478 } 479 480 void 481 dma1000_txstart(struct dwc_softc *sc) 482 { 483 int enqueued; 484 struct mbuf *m; 485 486 enqueued = 0; 487 488 for (;;) { 489 if (sc->tx_desccount > (TX_DESC_COUNT - TX_MAP_MAX_SEGS + 1)) { 490 if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0); 491 break; 492 } 493 494 if (sc->tx_mapcount == (TX_MAP_COUNT - 1)) { 495 if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0); 496 break; 497 } 498 499 m = if_dequeue(sc->ifp); 500 if (m == NULL) 501 break; 502 if (dma1000_setup_txbuf(sc, sc->tx_map_head, &m) != 0) { 503 if_sendq_prepend(sc->ifp, m); 504 if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0); 505 break; 506 } 507 bpf_mtap_if(sc->ifp, m); 508 sc->tx_map_head = next_txidx(sc, sc->tx_map_head); 509 sc->tx_mapcount++; 510 ++enqueued; 511 } 512 513 if (enqueued != 0) { 514 WRITE4(sc, TRANSMIT_POLL_DEMAND, 0x1); 515 sc->tx_watchdog_count = WATCHDOG_TIMEOUT_SECS; 516 } 517 } 518 519 void 520 dma1000_rxfinish_locked(struct dwc_softc *sc) 521 { 522 struct mbuf *m; 523 int error, idx; 524 struct dwc_hwdesc *desc; 525 526 DWC_ASSERT_LOCKED(sc); 527 for (;;) { 528 idx = sc->rx_idx; 529 desc = sc->rxdesc_ring + idx; 530 if ((desc->desc0 & RDESC0_OWN) != 0) 531 break; 532 533 m = dwc_rxfinish_one(sc, desc, sc->rxbuf_map + idx); 534 if (m == NULL) { 535 wmb(); 536 desc->desc0 = RDESC0_OWN; 537 wmb(); 538 } else { 539 /* We cannot create hole in RX ring */ 540 error = dma1000_setup_rxbuf(sc, idx, m); 541 if (error != 0) 542 panic("dma1000_setup_rxbuf failed: error %d\n", 543 error); 544 545 } 546 sc->rx_idx = next_rxidx(sc, sc->rx_idx); 547 } 548 } 549 550 /* 551 * Start the DMA controller 552 */ 553 void 554 dma1000_start(struct dwc_softc *sc) 555 { 556 uint32_t reg; 557 558 DWC_ASSERT_LOCKED(sc); 559 560 /* Initializa DMA and enable transmitters */ 561 reg = READ4(sc, OPERATION_MODE); 562 reg |= (MODE_TSF | MODE_OSF | MODE_FUF); 563 reg &= ~(MODE_RSF); 564 reg |= (MODE_RTC_LEV32 << MODE_RTC_SHIFT); 565 WRITE4(sc, OPERATION_MODE, reg); 566 567 WRITE4(sc, INTERRUPT_ENABLE, INT_EN_DEFAULT); 568 569 /* Start DMA */ 570 reg = READ4(sc, OPERATION_MODE); 571 reg |= (MODE_ST | MODE_SR); 572 WRITE4(sc, OPERATION_MODE, reg); 573 } 574 575 /* 576 * Stop the DMA controller 577 */ 578 void 579 dma1000_stop(struct dwc_softc *sc) 580 { 581 uint32_t reg; 582 583 DWC_ASSERT_LOCKED(sc); 584 585 /* Stop DMA TX */ 586 reg = READ4(sc, OPERATION_MODE); 587 reg &= ~(MODE_ST); 588 WRITE4(sc, OPERATION_MODE, reg); 589 590 /* Flush TX */ 591 reg = READ4(sc, OPERATION_MODE); 592 reg |= (MODE_FTF); 593 WRITE4(sc, OPERATION_MODE, reg); 594 595 /* Stop DMA RX */ 596 reg = READ4(sc, OPERATION_MODE); 597 reg &= ~(MODE_SR); 598 WRITE4(sc, OPERATION_MODE, reg); 599 } 600 601 int 602 dma1000_reset(struct dwc_softc *sc) 603 { 604 uint32_t reg; 605 int i; 606 607 reg = READ4(sc, BUS_MODE); 608 reg |= (BUS_MODE_SWR); 609 WRITE4(sc, BUS_MODE, reg); 610 611 for (i = 0; i < DMA_RESET_TIMEOUT; i++) { 612 if ((READ4(sc, BUS_MODE) & BUS_MODE_SWR) == 0) 613 break; 614 DELAY(10); 615 } 616 if (i >= DMA_RESET_TIMEOUT) { 617 return (ENXIO); 618 } 619 620 return (0); 621 } 622 623 /* 624 * Create the bus_dma resources 625 */ 626 int 627 dma1000_init(struct dwc_softc *sc) 628 { 629 struct mbuf *m; 630 uint32_t reg; 631 int error; 632 int nidx; 633 int idx; 634 635 reg = BUS_MODE_USP; 636 if (!sc->nopblx8) 637 reg |= BUS_MODE_EIGHTXPBL; 638 reg |= (sc->txpbl << BUS_MODE_PBL_SHIFT); 639 reg |= (sc->rxpbl << BUS_MODE_RPBL_SHIFT); 640 if (sc->fixed_burst) 641 reg |= BUS_MODE_FIXEDBURST; 642 if (sc->mixed_burst) 643 reg |= BUS_MODE_MIXEDBURST; 644 if (sc->aal) 645 reg |= BUS_MODE_AAL; 646 647 WRITE4(sc, BUS_MODE, reg); 648 649 reg = READ4(sc, HW_FEATURE); 650 if (reg & HW_FEATURE_EXT_DESCRIPTOR) 651 sc->dma_ext_desc = true; 652 653 /* 654 * DMA must be stop while changing descriptor list addresses. 655 */ 656 reg = READ4(sc, OPERATION_MODE); 657 reg &= ~(MODE_ST | MODE_SR); 658 WRITE4(sc, OPERATION_MODE, reg); 659 660 /* 661 * Set up TX descriptor ring, descriptors, and dma maps. 662 */ 663 error = bus_dma_tag_create( 664 bus_get_dma_tag(sc->dev), /* Parent tag. */ 665 DWC_DESC_RING_ALIGN, 0, /* alignment, boundary */ 666 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 667 BUS_SPACE_MAXADDR, /* highaddr */ 668 NULL, NULL, /* filter, filterarg */ 669 TX_DESC_SIZE, 1, /* maxsize, nsegments */ 670 TX_DESC_SIZE, /* maxsegsize */ 671 0, /* flags */ 672 NULL, NULL, /* lockfunc, lockarg */ 673 &sc->txdesc_tag); 674 if (error != 0) { 675 device_printf(sc->dev, 676 "could not create TX ring DMA tag.\n"); 677 goto out; 678 } 679 680 error = bus_dmamem_alloc(sc->txdesc_tag, (void**)&sc->txdesc_ring, 681 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, 682 &sc->txdesc_map); 683 if (error != 0) { 684 device_printf(sc->dev, 685 "could not allocate TX descriptor ring.\n"); 686 goto out; 687 } 688 689 error = bus_dmamap_load(sc->txdesc_tag, sc->txdesc_map, 690 sc->txdesc_ring, TX_DESC_SIZE, dwc_get1paddr, 691 &sc->txdesc_ring_paddr, 0); 692 if (error != 0) { 693 device_printf(sc->dev, 694 "could not load TX descriptor ring map.\n"); 695 goto out; 696 } 697 698 for (idx = 0; idx < TX_DESC_COUNT; idx++) { 699 nidx = next_txidx(sc, idx); 700 sc->txdesc_ring[idx].addr2 = sc->txdesc_ring_paddr + 701 (nidx * sizeof(struct dwc_hwdesc)); 702 } 703 704 error = bus_dma_tag_create( 705 bus_get_dma_tag(sc->dev), /* Parent tag. */ 706 1, 0, /* alignment, boundary */ 707 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 708 BUS_SPACE_MAXADDR, /* highaddr */ 709 NULL, NULL, /* filter, filterarg */ 710 MCLBYTES*TX_MAP_MAX_SEGS, /* maxsize */ 711 TX_MAP_MAX_SEGS, /* nsegments */ 712 MCLBYTES, /* maxsegsize */ 713 0, /* flags */ 714 NULL, NULL, /* lockfunc, lockarg */ 715 &sc->txbuf_tag); 716 if (error != 0) { 717 device_printf(sc->dev, 718 "could not create TX ring DMA tag.\n"); 719 goto out; 720 } 721 722 for (idx = 0; idx < TX_MAP_COUNT; idx++) { 723 error = bus_dmamap_create(sc->txbuf_tag, BUS_DMA_COHERENT, 724 &sc->txbuf_map[idx].map); 725 if (error != 0) { 726 device_printf(sc->dev, 727 "could not create TX buffer DMA map.\n"); 728 goto out; 729 } 730 } 731 732 for (idx = 0; idx < TX_DESC_COUNT; idx++) 733 dwc_setup_txdesc(sc, idx, 0, 0, 0, false, false); 734 735 WRITE4(sc, TX_DESCR_LIST_ADDR, sc->txdesc_ring_paddr); 736 737 /* 738 * Set up RX descriptor ring, descriptors, dma maps, and mbufs. 739 */ 740 error = bus_dma_tag_create( 741 bus_get_dma_tag(sc->dev), /* Parent tag. */ 742 DWC_DESC_RING_ALIGN, 0, /* alignment, boundary */ 743 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 744 BUS_SPACE_MAXADDR, /* highaddr */ 745 NULL, NULL, /* filter, filterarg */ 746 RX_DESC_SIZE, 1, /* maxsize, nsegments */ 747 RX_DESC_SIZE, /* maxsegsize */ 748 0, /* flags */ 749 NULL, NULL, /* lockfunc, lockarg */ 750 &sc->rxdesc_tag); 751 if (error != 0) { 752 device_printf(sc->dev, 753 "could not create RX ring DMA tag.\n"); 754 goto out; 755 } 756 757 error = bus_dmamem_alloc(sc->rxdesc_tag, (void **)&sc->rxdesc_ring, 758 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, 759 &sc->rxdesc_map); 760 if (error != 0) { 761 device_printf(sc->dev, 762 "could not allocate RX descriptor ring.\n"); 763 goto out; 764 } 765 766 error = bus_dmamap_load(sc->rxdesc_tag, sc->rxdesc_map, 767 sc->rxdesc_ring, RX_DESC_SIZE, dwc_get1paddr, 768 &sc->rxdesc_ring_paddr, 0); 769 if (error != 0) { 770 device_printf(sc->dev, 771 "could not load RX descriptor ring map.\n"); 772 goto out; 773 } 774 775 error = bus_dma_tag_create( 776 bus_get_dma_tag(sc->dev), /* Parent tag. */ 777 1, 0, /* alignment, boundary */ 778 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 779 BUS_SPACE_MAXADDR, /* highaddr */ 780 NULL, NULL, /* filter, filterarg */ 781 MCLBYTES, 1, /* maxsize, nsegments */ 782 MCLBYTES, /* maxsegsize */ 783 0, /* flags */ 784 NULL, NULL, /* lockfunc, lockarg */ 785 &sc->rxbuf_tag); 786 if (error != 0) { 787 device_printf(sc->dev, 788 "could not create RX buf DMA tag.\n"); 789 goto out; 790 } 791 792 for (idx = 0; idx < RX_DESC_COUNT; idx++) { 793 error = bus_dmamap_create(sc->rxbuf_tag, BUS_DMA_COHERENT, 794 &sc->rxbuf_map[idx].map); 795 if (error != 0) { 796 device_printf(sc->dev, 797 "could not create RX buffer DMA map.\n"); 798 goto out; 799 } 800 if ((m = dwc_alloc_mbufcl(sc)) == NULL) { 801 device_printf(sc->dev, "Could not alloc mbuf\n"); 802 error = ENOMEM; 803 goto out; 804 } 805 if ((error = dma1000_setup_rxbuf(sc, idx, m)) != 0) { 806 device_printf(sc->dev, 807 "could not create new RX buffer.\n"); 808 goto out; 809 } 810 } 811 WRITE4(sc, RX_DESCR_LIST_ADDR, sc->rxdesc_ring_paddr); 812 813 out: 814 if (error != 0) 815 return (ENXIO); 816 817 return (0); 818 } 819 820 /* 821 * Free the bus_dma resources 822 */ 823 void 824 dma1000_free(struct dwc_softc *sc) 825 { 826 bus_dmamap_t map; 827 int idx; 828 829 /* Clean up RX DMA resources and free mbufs. */ 830 for (idx = 0; idx < RX_DESC_COUNT; ++idx) { 831 if ((map = sc->rxbuf_map[idx].map) != NULL) { 832 bus_dmamap_unload(sc->rxbuf_tag, map); 833 bus_dmamap_destroy(sc->rxbuf_tag, map); 834 m_freem(sc->rxbuf_map[idx].mbuf); 835 } 836 } 837 if (sc->rxbuf_tag != NULL) 838 bus_dma_tag_destroy(sc->rxbuf_tag); 839 if (sc->rxdesc_map != NULL) { 840 bus_dmamap_unload(sc->rxdesc_tag, sc->rxdesc_map); 841 bus_dmamem_free(sc->rxdesc_tag, sc->rxdesc_ring, 842 sc->rxdesc_map); 843 } 844 if (sc->rxdesc_tag != NULL) 845 bus_dma_tag_destroy(sc->rxdesc_tag); 846 847 /* Clean up TX DMA resources. */ 848 for (idx = 0; idx < TX_DESC_COUNT; ++idx) { 849 if ((map = sc->txbuf_map[idx].map) != NULL) { 850 /* TX maps are already unloaded. */ 851 bus_dmamap_destroy(sc->txbuf_tag, map); 852 } 853 } 854 if (sc->txbuf_tag != NULL) 855 bus_dma_tag_destroy(sc->txbuf_tag); 856 if (sc->txdesc_map != NULL) { 857 bus_dmamap_unload(sc->txdesc_tag, sc->txdesc_map); 858 bus_dmamem_free(sc->txdesc_tag, sc->txdesc_ring, 859 sc->txdesc_map); 860 } 861 if (sc->txdesc_tag != NULL) 862 bus_dma_tag_destroy(sc->txdesc_tag); 863 } 864 865 /* 866 * Interrupt function 867 */ 868 869 int 870 dma1000_intr(struct dwc_softc *sc) 871 { 872 uint32_t reg; 873 int rv; 874 875 DWC_ASSERT_LOCKED(sc); 876 877 rv = 0; 878 reg = READ4(sc, DMA_STATUS); 879 if (reg & DMA_STATUS_NIS) { 880 if (reg & DMA_STATUS_RI) 881 dma1000_rxfinish_locked(sc); 882 883 if (reg & DMA_STATUS_TI) { 884 dma1000_txfinish_locked(sc); 885 dma1000_txstart(sc); 886 } 887 } 888 889 if (reg & DMA_STATUS_AIS) { 890 if (reg & DMA_STATUS_FBI) { 891 /* Fatal bus error */ 892 rv = EIO; 893 } 894 } 895 896 WRITE4(sc, DMA_STATUS, reg & DMA_STATUS_INTR_MASK); 897 return (rv); 898 } 899