1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2016-2019 Ruslan Bukin <br@bsdpad.com> 5 * 6 * This software was developed by SRI International and the University of 7 * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237 8 * ("CTSRD"), as part of the DARPA CRASH research programme. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #ifndef _DEV_XDMA_XDMA_H_ 33 #define _DEV_XDMA_XDMA_H_ 34 35 #include <sys/proc.h> 36 #include <sys/vmem.h> 37 38 #ifdef FDT 39 #include <dev/fdt/fdt_common.h> 40 #include <dev/ofw/openfirm.h> 41 #endif 42 43 #include <vm/vm.h> 44 #include <vm/pmap.h> 45 46 enum xdma_direction { 47 XDMA_MEM_TO_MEM, 48 XDMA_MEM_TO_DEV, 49 XDMA_DEV_TO_MEM, 50 XDMA_DEV_TO_DEV, 51 }; 52 53 enum xdma_operation_type { 54 XDMA_MEMCPY, 55 XDMA_CYCLIC, 56 XDMA_FIFO, 57 XDMA_SG, 58 }; 59 60 enum xdma_request_type { 61 XR_TYPE_PHYS, 62 XR_TYPE_VIRT, 63 XR_TYPE_MBUF, 64 XR_TYPE_BIO, 65 }; 66 67 enum xdma_command { 68 XDMA_CMD_BEGIN, 69 XDMA_CMD_PAUSE, 70 XDMA_CMD_TERMINATE, 71 }; 72 73 struct xdma_transfer_status { 74 uint32_t transferred; 75 int error; 76 }; 77 78 typedef struct xdma_transfer_status xdma_transfer_status_t; 79 80 struct xdma_controller { 81 device_t dev; /* DMA consumer device_t. */ 82 device_t dma_dev; /* A real DMA device_t. */ 83 void *data; /* OFW MD part. */ 84 vmem_t *vmem; /* Bounce memory. */ 85 86 /* List of virtual channels allocated. */ 87 TAILQ_HEAD(xdma_channel_list, xdma_channel) channels; 88 }; 89 90 typedef struct xdma_controller xdma_controller_t; 91 92 struct xchan_buf { 93 bus_dmamap_t map; 94 uint32_t nsegs; 95 uint32_t nsegs_left; 96 vm_offset_t vaddr; 97 vm_offset_t paddr; 98 vm_size_t size; 99 }; 100 101 struct xdma_request { 102 struct mbuf *m; 103 struct bio *bp; 104 enum xdma_operation_type operation; 105 enum xdma_request_type req_type; 106 enum xdma_direction direction; 107 bus_addr_t src_addr; 108 bus_addr_t dst_addr; 109 uint8_t src_width; 110 uint8_t dst_width; 111 bus_size_t block_num; 112 bus_size_t block_len; 113 xdma_transfer_status_t status; 114 void *user; 115 TAILQ_ENTRY(xdma_request) xr_next; 116 struct xchan_buf buf; 117 }; 118 119 struct xdma_sglist { 120 bus_addr_t src_addr; 121 bus_addr_t dst_addr; 122 size_t len; 123 uint8_t src_width; 124 uint8_t dst_width; 125 enum xdma_direction direction; 126 bool first; 127 bool last; 128 }; 129 130 struct xdma_iommu { 131 struct pmap p; 132 vmem_t *vmem; /* VA space */ 133 device_t dev; /* IOMMU device */ 134 }; 135 136 struct xdma_channel { 137 xdma_controller_t *xdma; 138 vmem_t *vmem; 139 140 uint32_t flags; 141 #define XCHAN_BUFS_ALLOCATED (1 << 0) 142 #define XCHAN_SGLIST_ALLOCATED (1 << 1) 143 #define XCHAN_CONFIGURED (1 << 2) 144 #define XCHAN_TYPE_CYCLIC (1 << 3) 145 #define XCHAN_TYPE_MEMCPY (1 << 4) 146 #define XCHAN_TYPE_FIFO (1 << 5) 147 #define XCHAN_TYPE_SG (1 << 6) 148 149 uint32_t caps; 150 #define XCHAN_CAP_BUSDMA (1 << 0) 151 #define XCHAN_CAP_NOSEG (1 << 1) 152 #define XCHAN_CAP_BOUNCE (1 << 2) 153 #define XCHAN_CAP_IOMMU (1 << 3) 154 155 /* A real hardware driver channel. */ 156 void *chan; 157 158 /* Interrupt handlers. */ 159 TAILQ_HEAD(, xdma_intr_handler) ie_handlers; 160 TAILQ_ENTRY(xdma_channel) xchan_next; 161 162 struct mtx mtx_lock; 163 struct mtx mtx_qin_lock; 164 struct mtx mtx_qout_lock; 165 struct mtx mtx_bank_lock; 166 struct mtx mtx_proc_lock; 167 168 /* Request queue. */ 169 bus_dma_tag_t dma_tag_bufs; 170 struct xdma_request *xr_mem; 171 uint32_t xr_num; 172 173 /* Bus dma tag options. */ 174 bus_size_t maxsegsize; 175 bus_size_t maxnsegs; 176 bus_size_t alignment; 177 bus_addr_t boundary; 178 bus_addr_t lowaddr; 179 bus_addr_t highaddr; 180 181 struct xdma_sglist *sg; 182 183 TAILQ_HEAD(, xdma_request) bank; 184 TAILQ_HEAD(, xdma_request) queue_in; 185 TAILQ_HEAD(, xdma_request) queue_out; 186 TAILQ_HEAD(, xdma_request) processing; 187 188 /* iommu */ 189 struct xdma_iommu xio; 190 }; 191 192 typedef struct xdma_channel xdma_channel_t; 193 194 struct xdma_intr_handler { 195 int (*cb)(void *cb_user, xdma_transfer_status_t *status); 196 int flags; 197 #define XDMA_INTR_NET (1 << 0) 198 void *cb_user; 199 TAILQ_ENTRY(xdma_intr_handler) ih_next; 200 }; 201 202 static MALLOC_DEFINE(M_XDMA, "xdma", "xDMA framework"); 203 204 #define XCHAN_LOCK(xchan) mtx_lock(&(xchan)->mtx_lock) 205 #define XCHAN_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_lock) 206 #define XCHAN_ASSERT_LOCKED(xchan) \ 207 mtx_assert(&(xchan)->mtx_lock, MA_OWNED) 208 209 #define QUEUE_IN_LOCK(xchan) mtx_lock(&(xchan)->mtx_qin_lock) 210 #define QUEUE_IN_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_qin_lock) 211 #define QUEUE_IN_ASSERT_LOCKED(xchan) \ 212 mtx_assert(&(xchan)->mtx_qin_lock, MA_OWNED) 213 214 #define QUEUE_OUT_LOCK(xchan) mtx_lock(&(xchan)->mtx_qout_lock) 215 #define QUEUE_OUT_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_qout_lock) 216 #define QUEUE_OUT_ASSERT_LOCKED(xchan) \ 217 mtx_assert(&(xchan)->mtx_qout_lock, MA_OWNED) 218 219 #define QUEUE_BANK_LOCK(xchan) mtx_lock(&(xchan)->mtx_bank_lock) 220 #define QUEUE_BANK_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_bank_lock) 221 #define QUEUE_BANK_ASSERT_LOCKED(xchan) \ 222 mtx_assert(&(xchan)->mtx_bank_lock, MA_OWNED) 223 224 #define QUEUE_PROC_LOCK(xchan) mtx_lock(&(xchan)->mtx_proc_lock) 225 #define QUEUE_PROC_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_proc_lock) 226 #define QUEUE_PROC_ASSERT_LOCKED(xchan) \ 227 mtx_assert(&(xchan)->mtx_proc_lock, MA_OWNED) 228 229 #define XDMA_SGLIST_MAXLEN 2048 230 #define XDMA_MAX_SEG 128 231 232 /* xDMA controller ops */ 233 xdma_controller_t *xdma_ofw_get(device_t dev, const char *prop); 234 xdma_controller_t *xdma_get(device_t dev, device_t dma_dev); 235 int xdma_put(xdma_controller_t *xdma); 236 vmem_t * xdma_get_memory(device_t dev); 237 void xdma_put_memory(vmem_t *vmem); 238 #ifdef FDT 239 int xdma_handle_mem_node(vmem_t *vmem, phandle_t memory); 240 #endif 241 242 /* xDMA channel ops */ 243 xdma_channel_t * xdma_channel_alloc(xdma_controller_t *, uint32_t caps); 244 int xdma_channel_free(xdma_channel_t *); 245 int xdma_request(xdma_channel_t *xchan, struct xdma_request *r); 246 void xchan_set_memory(xdma_channel_t *xchan, vmem_t *vmem); 247 248 /* SG interface */ 249 int xdma_prep_sg(xdma_channel_t *, uint32_t, 250 bus_size_t, bus_size_t, bus_size_t, bus_addr_t, bus_addr_t, bus_addr_t); 251 void xdma_channel_free_sg(xdma_channel_t *xchan); 252 int xdma_queue_submit_sg(xdma_channel_t *xchan); 253 void xchan_seg_done(xdma_channel_t *xchan, xdma_transfer_status_t *); 254 255 /* Queue operations */ 256 int xdma_dequeue_mbuf(xdma_channel_t *xchan, struct mbuf **m, 257 xdma_transfer_status_t *); 258 int xdma_enqueue_mbuf(xdma_channel_t *xchan, struct mbuf **m, uintptr_t addr, 259 uint8_t, uint8_t, enum xdma_direction dir); 260 int xdma_dequeue_bio(xdma_channel_t *xchan, struct bio **bp, 261 xdma_transfer_status_t *status); 262 int xdma_enqueue_bio(xdma_channel_t *xchan, struct bio **bp, bus_addr_t addr, 263 uint8_t, uint8_t, enum xdma_direction dir); 264 int xdma_dequeue(xdma_channel_t *xchan, void **user, 265 xdma_transfer_status_t *status); 266 int xdma_enqueue(xdma_channel_t *xchan, uintptr_t src, uintptr_t dst, 267 uint8_t, uint8_t, bus_size_t, enum xdma_direction dir, void *); 268 int xdma_queue_submit(xdma_channel_t *xchan); 269 270 /* Mbuf operations */ 271 uint32_t xdma_mbuf_defrag(xdma_channel_t *xchan, struct xdma_request *xr); 272 uint32_t xdma_mbuf_chain_count(struct mbuf *m0); 273 274 /* Channel Control */ 275 int xdma_control(xdma_channel_t *xchan, enum xdma_command cmd); 276 277 /* Interrupt callback */ 278 int xdma_setup_intr(xdma_channel_t *xchan, int flags, int (*cb)(void *, 279 xdma_transfer_status_t *), void *arg, void **); 280 int xdma_teardown_intr(xdma_channel_t *xchan, struct xdma_intr_handler *ih); 281 int xdma_teardown_all_intr(xdma_channel_t *xchan); 282 void xdma_callback(struct xdma_channel *xchan, xdma_transfer_status_t *status); 283 284 /* Sglist */ 285 int xchan_sglist_alloc(xdma_channel_t *xchan); 286 void xchan_sglist_free(xdma_channel_t *xchan); 287 int xdma_sglist_add(struct xdma_sglist *sg, struct bus_dma_segment *seg, 288 uint32_t nsegs, struct xdma_request *xr); 289 290 /* Requests bank */ 291 void xchan_bank_init(xdma_channel_t *xchan); 292 int xchan_bank_free(xdma_channel_t *xchan); 293 struct xdma_request * xchan_bank_get(xdma_channel_t *xchan); 294 int xchan_bank_put(xdma_channel_t *xchan, struct xdma_request *xr); 295 296 /* IOMMU */ 297 void xdma_iommu_add_entry(xdma_channel_t *xchan, vm_offset_t *va, 298 vm_paddr_t pa, vm_size_t size, vm_prot_t prot); 299 void xdma_iommu_remove_entry(xdma_channel_t *xchan, vm_offset_t va); 300 int xdma_iommu_init(struct xdma_iommu *xio); 301 int xdma_iommu_release(struct xdma_iommu *xio); 302 303 #endif /* !_DEV_XDMA_XDMA_H_ */ 304