1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice unmodified, this list of conditions, and the following 11 * disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 * 30 */ 31 32 #ifndef __QCOM_ESS_EDMA_VAR_H__ 33 #define __QCOM_ESS_EDMA_VAR_H__ 34 35 #define EDMA_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) 36 #define EDMA_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) 37 #define EDMA_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED) 38 39 #define EDMA_RING_LOCK(_ring) mtx_lock(&(_ring)->mtx) 40 #define EDMA_RING_UNLOCK(_ring) mtx_unlock(&(_ring)->mtx) 41 #define EDMA_RING_LOCK_ASSERT(_ring) mtx_assert(&(_ring)->mtx, MA_OWNED) 42 43 /* 44 * register space access macros 45 */ 46 #define EDMA_REG_WRITE(sc, reg, val) do { \ 47 bus_write_4(sc->sc_mem_res, (reg), (val)); \ 48 } while (0) 49 50 #define EDMA_REG_READ(sc, reg) bus_read_4(sc->sc_mem_res, (reg)) 51 52 #define EDMA_REG_SET_BITS(sc, reg, bits) \ 53 EDMA_REG_WRITE(sc, reg, EDMA_REG_READ(sc, (reg)) | (bits)) 54 55 #define EDMA_REG_CLEAR_BITS(sc, reg, bits) \ 56 EDMA_REG_WRITE(sc, reg, EDMA_REG_READ(sc, (reg)) & ~(bits)) 57 58 #define EDMA_REG_BARRIER_WRITE(sc) bus_barrier((sc)->sc_mem_res, \ 59 0, (sc)->sc_mem_res_size, BUS_SPACE_BARRIER_WRITE) 60 #define EDMA_REG_BARRIER_READ(sc) bus_barrier((sc)->sc_mem_res, \ 61 0, (sc)->sc_mem_res_size, BUS_SPACE_BARRIER_READ) 62 #define EDMA_REG_BARRIER_RW(sc) bus_barrier((sc)->sc_mem_res, \ 63 0, (sc)->sc_mem_res_size, \ 64 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE) 65 66 67 /* 68 * Fixed number of interrupts - 16 TX, 8 RX. 69 * 70 * The Linux driver supports 4 or 8 RX queues. 71 */ 72 73 #define QCOM_ESS_EDMA_NUM_TX_IRQS 16 74 #define QCOM_ESS_EDMA_NUM_RX_IRQS 8 75 76 #define QCOM_ESS_EDMA_NUM_TX_RINGS 16 77 #define QCOM_ESS_EDMA_NUM_RX_RINGS 8 78 79 #define EDMA_TX_RING_SIZE 128 80 #define EDMA_RX_RING_SIZE 128 81 82 #define EDMA_TX_BUFRING_SIZE 512 83 84 /* Maximum number of GMAC instances */ 85 #define QCOM_ESS_EDMA_MAX_NUM_GMACS 5 86 87 /* Maximum number of ports to support mapping to GMACs */ 88 #define QCOM_ESS_EDMA_MAX_NUM_PORTS 8 89 90 #define QCOM_ESS_EDMA_MAX_TXFRAGS 8 91 92 struct qcom_ess_edma_softc; 93 94 /* 95 * An instance of an interrupt queue. 96 */ 97 struct qcom_ess_edma_intr { 98 struct qcom_ess_edma_softc *sc; 99 struct resource *irq_res; 100 int irq_rid; 101 void *irq_intr; 102 103 struct { 104 uint64_t num_intr; 105 } stats; 106 }; 107 108 /* 109 * A TX/RX descriptor ring. 110 */ 111 struct qcom_ess_edma_desc_ring { 112 bus_dma_tag_t hw_ring_dma_tag; /* tag for hw ring */ 113 bus_dma_tag_t buffer_dma_tag; /* tag for mbufs */ 114 char *label; 115 116 struct mtx mtx; 117 118 bus_dmamap_t hw_desc_map; 119 bus_addr_t hw_desc_paddr; 120 void *hw_desc; 121 122 void *sw_desc; 123 int hw_entry_size; /* hw desc entry size */ 124 int sw_entry_size; /* sw desc entry size */ 125 int ring_count; /* Number of entries */ 126 int buffer_align; 127 int ring_align; 128 129 uint16_t next_to_fill; 130 uint16_t next_to_clean; 131 uint16_t pending_fill; 132 133 struct { 134 uint64_t num_added; 135 uint64_t num_cleaned; 136 uint64_t num_dropped; 137 uint64_t num_enqueue_full; 138 uint64_t num_rx_no_gmac; 139 uint64_t num_rx_ok; 140 uint64_t num_tx_ok; 141 uint64_t num_tx_maxfrags; 142 uint64_t num_tx_mapfail; 143 uint64_t num_rx_csum_ok; 144 uint64_t num_rx_csum_fail; 145 uint64_t num_tx_complete; 146 uint64_t num_tx_xmit_defer; 147 uint64_t num_tx_xmit_task; 148 } stats; 149 }; 150 151 /* 152 * Structs for transmit and receive software 153 * ring entries. 154 */ 155 struct qcom_ess_edma_sw_desc_tx { 156 struct mbuf *m; 157 bus_dmamap_t m_dmamap; 158 uint32_t is_first:1; 159 uint32_t is_last:1; 160 }; 161 162 struct qcom_ess_edma_sw_desc_rx { 163 struct mbuf *m; 164 bus_dmamap_t m_dmamap; 165 bus_addr_t m_physaddr; 166 }; 167 168 #define QCOM_ESS_EDMA_LABEL_SZ 16 169 170 /* 171 * Per transmit ring TX state for TX queue / buf_ring stuff. 172 */ 173 struct qcom_ess_edma_tx_state { 174 struct task completion_task; 175 struct task xmit_task; 176 struct buf_ring *br; 177 struct taskqueue *completion_tq; 178 struct qcom_ess_edma_softc *sc; 179 char label[QCOM_ESS_EDMA_LABEL_SZ]; 180 int enqueue_is_running; 181 int queue_id; 182 }; 183 184 /* 185 * Per receive ring RX state for taskqueue stuff. 186 */ 187 struct qcom_ess_edma_rx_state { 188 struct task completion_task; 189 struct taskqueue *completion_tq; 190 struct qcom_ess_edma_softc *sc; 191 char label[QCOM_ESS_EDMA_LABEL_SZ]; 192 int queue_id; 193 }; 194 195 struct qcom_ess_edma_gmac { 196 struct qcom_ess_edma_softc *sc; 197 int id; 198 bool enabled; 199 /* Native VLAN ID */ 200 int vlan_id; 201 /* Switch portmask for this instance */ 202 int port_mask; 203 /* MAC address for this ifnet (from device tree) */ 204 struct ether_addr eaddr; 205 /* ifnet interface! */ 206 if_t ifp; 207 /* media interface */ 208 struct ifmedia ifm; 209 }; 210 211 struct qcom_ess_edma_softc { 212 device_t sc_dev; 213 struct mtx sc_mtx; 214 struct resource *sc_mem_res; 215 size_t sc_mem_res_size; 216 int sc_mem_rid; 217 uint32_t sc_debug; 218 bus_dma_tag_t sc_dma_tag; 219 220 struct qcom_ess_edma_intr sc_tx_irq[QCOM_ESS_EDMA_NUM_TX_IRQS]; 221 struct qcom_ess_edma_intr sc_rx_irq[QCOM_ESS_EDMA_NUM_RX_IRQS]; 222 223 struct qcom_ess_edma_desc_ring sc_tx_ring[QCOM_ESS_EDMA_NUM_TX_RINGS]; 224 struct qcom_ess_edma_desc_ring sc_rx_ring[QCOM_ESS_EDMA_NUM_RX_RINGS]; 225 struct qcom_ess_edma_tx_state sc_tx_state[QCOM_ESS_EDMA_NUM_TX_RINGS]; 226 struct qcom_ess_edma_rx_state sc_rx_state[QCOM_ESS_EDMA_NUM_RX_RINGS]; 227 struct qcom_ess_edma_gmac sc_gmac[QCOM_ESS_EDMA_MAX_NUM_GMACS]; 228 229 int sc_gmac_port_map[QCOM_ESS_EDMA_MAX_NUM_PORTS]; 230 231 struct { 232 uint32_t num_gmac; 233 uint32_t mdio_supported; 234 uint32_t poll_required; 235 uint32_t rss_type; 236 237 uint32_t rx_buf_size; 238 bool rx_buf_ether_align; 239 240 uint32_t tx_intr_mask; 241 uint32_t rx_intr_mask; 242 243 /* number of tx/rx descriptor entries in each ring */ 244 uint32_t rx_ring_count; 245 uint32_t tx_ring_count; 246 247 /* how many queues for each CPU */ 248 uint32_t num_tx_queue_per_cpu; 249 } sc_config; 250 251 struct { 252 uint32_t misc_intr_mask; 253 uint32_t wol_intr_mask; 254 uint32_t intr_sw_idx_w; 255 } sc_state; 256 }; 257 258 #endif /* __QCOM_ESS_EDMA_VAR_H__ */ 259