1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2016-2019 Ruslan Bukin <br@bsdpad.com> 5 * 6 * This software was developed by SRI International and the University of 7 * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237 8 * ("CTSRD"), as part of the DARPA CRASH research programme. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * $FreeBSD$ 32 */ 33 34 #ifndef _DEV_XDMA_XDMA_H_ 35 #define _DEV_XDMA_XDMA_H_ 36 37 #include <sys/proc.h> 38 #include <sys/vmem.h> 39 40 enum xdma_direction { 41 XDMA_MEM_TO_MEM, 42 XDMA_MEM_TO_DEV, 43 XDMA_DEV_TO_MEM, 44 XDMA_DEV_TO_DEV, 45 }; 46 47 enum xdma_operation_type { 48 XDMA_MEMCPY, 49 XDMA_CYCLIC, 50 XDMA_FIFO, 51 XDMA_SG, 52 }; 53 54 enum xdma_request_type { 55 XR_TYPE_PHYS, 56 XR_TYPE_VIRT, 57 XR_TYPE_MBUF, 58 XR_TYPE_BIO, 59 }; 60 61 enum xdma_command { 62 XDMA_CMD_BEGIN, 63 XDMA_CMD_PAUSE, 64 XDMA_CMD_TERMINATE, 65 }; 66 67 struct xdma_transfer_status { 68 uint32_t transferred; 69 int error; 70 }; 71 72 typedef struct xdma_transfer_status xdma_transfer_status_t; 73 74 struct xdma_controller { 75 device_t dev; /* DMA consumer device_t. */ 76 device_t dma_dev; /* A real DMA device_t. */ 77 void *data; /* OFW MD part. */ 78 vmem_t *vmem; /* Bounce memory. */ 79 80 /* List of virtual channels allocated. */ 81 TAILQ_HEAD(xdma_channel_list, xdma_channel) channels; 82 }; 83 84 typedef struct xdma_controller xdma_controller_t; 85 86 struct xchan_buf { 87 bus_dmamap_t map; 88 uint32_t nsegs; 89 uint32_t nsegs_left; 90 vm_offset_t vaddr; 91 vm_offset_t paddr; 92 vm_size_t size; 93 }; 94 95 struct xdma_request { 96 struct mbuf *m; 97 struct bio *bp; 98 enum xdma_operation_type operation; 99 enum xdma_request_type req_type; 100 enum xdma_direction direction; 101 bus_addr_t src_addr; 102 bus_addr_t dst_addr; 103 uint8_t src_width; 104 uint8_t dst_width; 105 bus_size_t block_num; 106 bus_size_t block_len; 107 xdma_transfer_status_t status; 108 void *user; 109 TAILQ_ENTRY(xdma_request) xr_next; 110 struct xchan_buf buf; 111 }; 112 113 struct xdma_sglist { 114 bus_addr_t src_addr; 115 bus_addr_t dst_addr; 116 size_t len; 117 uint8_t src_width; 118 uint8_t dst_width; 119 enum xdma_direction direction; 120 bool first; 121 bool last; 122 }; 123 124 struct xdma_channel { 125 xdma_controller_t *xdma; 126 vmem_t *vmem; 127 128 uint32_t flags; 129 #define XCHAN_BUFS_ALLOCATED (1 << 0) 130 #define XCHAN_SGLIST_ALLOCATED (1 << 1) 131 #define XCHAN_CONFIGURED (1 << 2) 132 #define XCHAN_TYPE_CYCLIC (1 << 3) 133 #define XCHAN_TYPE_MEMCPY (1 << 4) 134 #define XCHAN_TYPE_FIFO (1 << 5) 135 #define XCHAN_TYPE_SG (1 << 6) 136 137 uint32_t caps; 138 #define XCHAN_CAP_BUSDMA (1 << 0) 139 #define XCHAN_CAP_NOSEG (1 << 1) 140 #define XCHAN_CAP_NOBUFS (1 << 2) 141 142 /* A real hardware driver channel. */ 143 void *chan; 144 145 /* Interrupt handlers. */ 146 TAILQ_HEAD(, xdma_intr_handler) ie_handlers; 147 TAILQ_ENTRY(xdma_channel) xchan_next; 148 149 struct mtx mtx_lock; 150 struct mtx mtx_qin_lock; 151 struct mtx mtx_qout_lock; 152 struct mtx mtx_bank_lock; 153 struct mtx mtx_proc_lock; 154 155 /* Request queue. */ 156 bus_dma_tag_t dma_tag_bufs; 157 struct xdma_request *xr_mem; 158 uint32_t xr_num; 159 160 /* Bus dma tag options. */ 161 bus_size_t maxsegsize; 162 bus_size_t maxnsegs; 163 bus_size_t alignment; 164 bus_addr_t boundary; 165 bus_addr_t lowaddr; 166 bus_addr_t highaddr; 167 168 struct xdma_sglist *sg; 169 170 TAILQ_HEAD(, xdma_request) bank; 171 TAILQ_HEAD(, xdma_request) queue_in; 172 TAILQ_HEAD(, xdma_request) queue_out; 173 TAILQ_HEAD(, xdma_request) processing; 174 }; 175 176 typedef struct xdma_channel xdma_channel_t; 177 178 struct xdma_intr_handler { 179 int (*cb)(void *cb_user, xdma_transfer_status_t *status); 180 void *cb_user; 181 TAILQ_ENTRY(xdma_intr_handler) ih_next; 182 }; 183 184 static MALLOC_DEFINE(M_XDMA, "xdma", "xDMA framework"); 185 186 #define XCHAN_LOCK(xchan) mtx_lock(&(xchan)->mtx_lock) 187 #define XCHAN_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_lock) 188 #define XCHAN_ASSERT_LOCKED(xchan) \ 189 mtx_assert(&(xchan)->mtx_lock, MA_OWNED) 190 191 #define QUEUE_IN_LOCK(xchan) mtx_lock(&(xchan)->mtx_qin_lock) 192 #define QUEUE_IN_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_qin_lock) 193 #define QUEUE_IN_ASSERT_LOCKED(xchan) \ 194 mtx_assert(&(xchan)->mtx_qin_lock, MA_OWNED) 195 196 #define QUEUE_OUT_LOCK(xchan) mtx_lock(&(xchan)->mtx_qout_lock) 197 #define QUEUE_OUT_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_qout_lock) 198 #define QUEUE_OUT_ASSERT_LOCKED(xchan) \ 199 mtx_assert(&(xchan)->mtx_qout_lock, MA_OWNED) 200 201 #define QUEUE_BANK_LOCK(xchan) mtx_lock(&(xchan)->mtx_bank_lock) 202 #define QUEUE_BANK_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_bank_lock) 203 #define QUEUE_BANK_ASSERT_LOCKED(xchan) \ 204 mtx_assert(&(xchan)->mtx_bank_lock, MA_OWNED) 205 206 #define QUEUE_PROC_LOCK(xchan) mtx_lock(&(xchan)->mtx_proc_lock) 207 #define QUEUE_PROC_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_proc_lock) 208 #define QUEUE_PROC_ASSERT_LOCKED(xchan) \ 209 mtx_assert(&(xchan)->mtx_proc_lock, MA_OWNED) 210 211 #define XDMA_SGLIST_MAXLEN 2048 212 #define XDMA_MAX_SEG 128 213 214 /* xDMA controller ops */ 215 xdma_controller_t *xdma_ofw_get(device_t dev, const char *prop); 216 int xdma_put(xdma_controller_t *xdma); 217 vmem_t * xdma_get_memory(device_t dev); 218 void xdma_put_memory(vmem_t *vmem); 219 220 /* xDMA channel ops */ 221 xdma_channel_t * xdma_channel_alloc(xdma_controller_t *, uint32_t caps); 222 int xdma_channel_free(xdma_channel_t *); 223 int xdma_request(xdma_channel_t *xchan, struct xdma_request *r); 224 void xchan_set_memory(xdma_channel_t *xchan, vmem_t *vmem); 225 226 /* SG interface */ 227 int xdma_prep_sg(xdma_channel_t *, uint32_t, 228 bus_size_t, bus_size_t, bus_size_t, bus_addr_t, bus_addr_t, bus_addr_t); 229 void xdma_channel_free_sg(xdma_channel_t *xchan); 230 int xdma_queue_submit_sg(xdma_channel_t *xchan); 231 void xchan_seg_done(xdma_channel_t *xchan, xdma_transfer_status_t *); 232 233 /* Queue operations */ 234 int xdma_dequeue_mbuf(xdma_channel_t *xchan, struct mbuf **m, 235 xdma_transfer_status_t *); 236 int xdma_enqueue_mbuf(xdma_channel_t *xchan, struct mbuf **m, uintptr_t addr, 237 uint8_t, uint8_t, enum xdma_direction dir); 238 int xdma_dequeue_bio(xdma_channel_t *xchan, struct bio **bp, 239 xdma_transfer_status_t *status); 240 int xdma_enqueue_bio(xdma_channel_t *xchan, struct bio **bp, bus_addr_t addr, 241 uint8_t, uint8_t, enum xdma_direction dir); 242 int xdma_dequeue(xdma_channel_t *xchan, void **user, 243 xdma_transfer_status_t *status); 244 int xdma_enqueue(xdma_channel_t *xchan, uintptr_t src, uintptr_t dst, 245 uint8_t, uint8_t, bus_size_t, enum xdma_direction dir, void *); 246 int xdma_queue_submit(xdma_channel_t *xchan); 247 248 /* Mbuf operations */ 249 uint32_t xdma_mbuf_defrag(xdma_channel_t *xchan, struct xdma_request *xr); 250 uint32_t xdma_mbuf_chain_count(struct mbuf *m0); 251 252 /* Channel Control */ 253 int xdma_control(xdma_channel_t *xchan, enum xdma_command cmd); 254 255 /* Interrupt callback */ 256 int xdma_setup_intr(xdma_channel_t *xchan, int (*cb)(void *, 257 xdma_transfer_status_t *), void *arg, void **); 258 int xdma_teardown_intr(xdma_channel_t *xchan, struct xdma_intr_handler *ih); 259 int xdma_teardown_all_intr(xdma_channel_t *xchan); 260 void xdma_callback(struct xdma_channel *xchan, xdma_transfer_status_t *status); 261 262 /* Sglist */ 263 int xchan_sglist_alloc(xdma_channel_t *xchan); 264 void xchan_sglist_free(xdma_channel_t *xchan); 265 int xdma_sglist_add(struct xdma_sglist *sg, struct bus_dma_segment *seg, 266 uint32_t nsegs, struct xdma_request *xr); 267 268 /* Requests bank */ 269 void xchan_bank_init(xdma_channel_t *xchan); 270 int xchan_bank_free(xdma_channel_t *xchan); 271 struct xdma_request * xchan_bank_get(xdma_channel_t *xchan); 272 int xchan_bank_put(xdma_channel_t *xchan, struct xdma_request *xr); 273 274 #endif /* !_DEV_XDMA_XDMA_H_ */ 275