xref: /freebsd/sys/dev/xdma/xdma.h (revision 86aa9539fef591a363b06a0ebd3aa7a07f4c1579)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2016-2019 Ruslan Bukin <br@bsdpad.com>
5  *
6  * This software was developed by SRI International and the University of
7  * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
8  * ("CTSRD"), as part of the DARPA CRASH research programme.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $FreeBSD$
32  */
33 
34 #ifndef _DEV_XDMA_XDMA_H_
35 #define _DEV_XDMA_XDMA_H_
36 
37 #include <sys/proc.h>
38 #include <sys/vmem.h>
39 
40 #ifdef FDT
41 #include <dev/fdt/fdt_common.h>
42 #include <dev/ofw/openfirm.h>
43 #endif
44 
45 #include <vm/vm.h>
46 #include <vm/pmap.h>
47 
48 enum xdma_direction {
49 	XDMA_MEM_TO_MEM,
50 	XDMA_MEM_TO_DEV,
51 	XDMA_DEV_TO_MEM,
52 	XDMA_DEV_TO_DEV,
53 };
54 
55 enum xdma_operation_type {
56 	XDMA_MEMCPY,
57 	XDMA_CYCLIC,
58 	XDMA_FIFO,
59 	XDMA_SG,
60 };
61 
62 enum xdma_request_type {
63 	XR_TYPE_PHYS,
64 	XR_TYPE_VIRT,
65 	XR_TYPE_MBUF,
66 	XR_TYPE_BIO,
67 };
68 
69 enum xdma_command {
70 	XDMA_CMD_BEGIN,
71 	XDMA_CMD_PAUSE,
72 	XDMA_CMD_TERMINATE,
73 };
74 
75 struct xdma_transfer_status {
76 	uint32_t	transferred;
77 	int		error;
78 };
79 
80 typedef struct xdma_transfer_status xdma_transfer_status_t;
81 
82 struct xdma_controller {
83 	device_t dev;		/* DMA consumer device_t. */
84 	device_t dma_dev;	/* A real DMA device_t. */
85 	void *data;		/* OFW MD part. */
86 	vmem_t *vmem;		/* Bounce memory. */
87 
88 	/* List of virtual channels allocated. */
89 	TAILQ_HEAD(xdma_channel_list, xdma_channel)	channels;
90 };
91 
92 typedef struct xdma_controller xdma_controller_t;
93 
94 struct xchan_buf {
95 	bus_dmamap_t			map;
96 	uint32_t			nsegs;
97 	uint32_t			nsegs_left;
98 	vm_offset_t			vaddr;
99 	vm_offset_t			paddr;
100 	vm_size_t			size;
101 };
102 
103 struct xdma_request {
104 	struct mbuf			*m;
105 	struct bio			*bp;
106 	enum xdma_operation_type	operation;
107 	enum xdma_request_type		req_type;
108 	enum xdma_direction		direction;
109 	bus_addr_t			src_addr;
110 	bus_addr_t			dst_addr;
111 	uint8_t				src_width;
112 	uint8_t				dst_width;
113 	bus_size_t			block_num;
114 	bus_size_t			block_len;
115 	xdma_transfer_status_t		status;
116 	void				*user;
117 	TAILQ_ENTRY(xdma_request)	xr_next;
118 	struct xchan_buf		buf;
119 };
120 
121 struct xdma_sglist {
122 	bus_addr_t			src_addr;
123 	bus_addr_t			dst_addr;
124 	size_t				len;
125 	uint8_t				src_width;
126 	uint8_t				dst_width;
127 	enum xdma_direction		direction;
128 	bool				first;
129 	bool				last;
130 };
131 
132 struct xdma_iommu {
133 	struct pmap p;
134 	vmem_t *vmem;		/* VA space */
135 	device_t dev;		/* IOMMU device */
136 };
137 
138 struct xdma_channel {
139 	xdma_controller_t		*xdma;
140 	vmem_t				*vmem;
141 
142 	uint32_t			flags;
143 #define	XCHAN_BUFS_ALLOCATED		(1 << 0)
144 #define	XCHAN_SGLIST_ALLOCATED		(1 << 1)
145 #define	XCHAN_CONFIGURED		(1 << 2)
146 #define	XCHAN_TYPE_CYCLIC		(1 << 3)
147 #define	XCHAN_TYPE_MEMCPY		(1 << 4)
148 #define	XCHAN_TYPE_FIFO			(1 << 5)
149 #define	XCHAN_TYPE_SG			(1 << 6)
150 
151 	uint32_t			caps;
152 #define	XCHAN_CAP_BUSDMA		(1 << 0)
153 #define	XCHAN_CAP_NOSEG			(1 << 1)
154 #define	XCHAN_CAP_BOUNCE		(1 << 2)
155 #define	XCHAN_CAP_IOMMU			(1 << 3)
156 
157 	/* A real hardware driver channel. */
158 	void				*chan;
159 
160 	/* Interrupt handlers. */
161 	TAILQ_HEAD(, xdma_intr_handler)	ie_handlers;
162 	TAILQ_ENTRY(xdma_channel)	xchan_next;
163 
164 	struct mtx			mtx_lock;
165 	struct mtx			mtx_qin_lock;
166 	struct mtx			mtx_qout_lock;
167 	struct mtx			mtx_bank_lock;
168 	struct mtx			mtx_proc_lock;
169 
170 	/* Request queue. */
171 	bus_dma_tag_t			dma_tag_bufs;
172 	struct xdma_request		*xr_mem;
173 	uint32_t			xr_num;
174 
175 	/* Bus dma tag options. */
176 	bus_size_t			maxsegsize;
177 	bus_size_t			maxnsegs;
178 	bus_size_t			alignment;
179 	bus_addr_t			boundary;
180 	bus_addr_t			lowaddr;
181 	bus_addr_t			highaddr;
182 
183 	struct xdma_sglist		*sg;
184 
185 	TAILQ_HEAD(, xdma_request)	bank;
186 	TAILQ_HEAD(, xdma_request)	queue_in;
187 	TAILQ_HEAD(, xdma_request)	queue_out;
188 	TAILQ_HEAD(, xdma_request)	processing;
189 
190 	/* iommu */
191 	struct xdma_iommu		xio;
192 };
193 
194 typedef struct xdma_channel xdma_channel_t;
195 
196 struct xdma_intr_handler {
197 	int		(*cb)(void *cb_user, xdma_transfer_status_t *status);
198 	void		*cb_user;
199 	TAILQ_ENTRY(xdma_intr_handler)	ih_next;
200 };
201 
202 static MALLOC_DEFINE(M_XDMA, "xdma", "xDMA framework");
203 
204 #define	XCHAN_LOCK(xchan)		mtx_lock(&(xchan)->mtx_lock)
205 #define	XCHAN_UNLOCK(xchan)		mtx_unlock(&(xchan)->mtx_lock)
206 #define	XCHAN_ASSERT_LOCKED(xchan)	\
207     mtx_assert(&(xchan)->mtx_lock, MA_OWNED)
208 
209 #define	QUEUE_IN_LOCK(xchan)		mtx_lock(&(xchan)->mtx_qin_lock)
210 #define	QUEUE_IN_UNLOCK(xchan)		mtx_unlock(&(xchan)->mtx_qin_lock)
211 #define	QUEUE_IN_ASSERT_LOCKED(xchan)	\
212     mtx_assert(&(xchan)->mtx_qin_lock, MA_OWNED)
213 
214 #define	QUEUE_OUT_LOCK(xchan)		mtx_lock(&(xchan)->mtx_qout_lock)
215 #define	QUEUE_OUT_UNLOCK(xchan)		mtx_unlock(&(xchan)->mtx_qout_lock)
216 #define	QUEUE_OUT_ASSERT_LOCKED(xchan)	\
217     mtx_assert(&(xchan)->mtx_qout_lock, MA_OWNED)
218 
219 #define	QUEUE_BANK_LOCK(xchan)		mtx_lock(&(xchan)->mtx_bank_lock)
220 #define	QUEUE_BANK_UNLOCK(xchan)	mtx_unlock(&(xchan)->mtx_bank_lock)
221 #define	QUEUE_BANK_ASSERT_LOCKED(xchan)	\
222     mtx_assert(&(xchan)->mtx_bank_lock, MA_OWNED)
223 
224 #define	QUEUE_PROC_LOCK(xchan)		mtx_lock(&(xchan)->mtx_proc_lock)
225 #define	QUEUE_PROC_UNLOCK(xchan)	mtx_unlock(&(xchan)->mtx_proc_lock)
226 #define	QUEUE_PROC_ASSERT_LOCKED(xchan)	\
227     mtx_assert(&(xchan)->mtx_proc_lock, MA_OWNED)
228 
229 #define	XDMA_SGLIST_MAXLEN	2048
230 #define	XDMA_MAX_SEG		128
231 
232 /* xDMA controller ops */
233 xdma_controller_t *xdma_ofw_get(device_t dev, const char *prop);
234 int xdma_put(xdma_controller_t *xdma);
235 vmem_t * xdma_get_memory(device_t dev);
236 void xdma_put_memory(vmem_t *vmem);
237 #ifdef FDT
238 int xdma_handle_mem_node(vmem_t *vmem, phandle_t memory);
239 #endif
240 
241 /* xDMA channel ops */
242 xdma_channel_t * xdma_channel_alloc(xdma_controller_t *, uint32_t caps);
243 int xdma_channel_free(xdma_channel_t *);
244 int xdma_request(xdma_channel_t *xchan, struct xdma_request *r);
245 void xchan_set_memory(xdma_channel_t *xchan, vmem_t *vmem);
246 
247 /* SG interface */
248 int xdma_prep_sg(xdma_channel_t *, uint32_t,
249     bus_size_t, bus_size_t, bus_size_t, bus_addr_t, bus_addr_t, bus_addr_t);
250 void xdma_channel_free_sg(xdma_channel_t *xchan);
251 int xdma_queue_submit_sg(xdma_channel_t *xchan);
252 void xchan_seg_done(xdma_channel_t *xchan, xdma_transfer_status_t *);
253 
254 /* Queue operations */
255 int xdma_dequeue_mbuf(xdma_channel_t *xchan, struct mbuf **m,
256     xdma_transfer_status_t *);
257 int xdma_enqueue_mbuf(xdma_channel_t *xchan, struct mbuf **m, uintptr_t addr,
258     uint8_t, uint8_t, enum xdma_direction dir);
259 int xdma_dequeue_bio(xdma_channel_t *xchan, struct bio **bp,
260     xdma_transfer_status_t *status);
261 int xdma_enqueue_bio(xdma_channel_t *xchan, struct bio **bp, bus_addr_t addr,
262     uint8_t, uint8_t, enum xdma_direction dir);
263 int xdma_dequeue(xdma_channel_t *xchan, void **user,
264     xdma_transfer_status_t *status);
265 int xdma_enqueue(xdma_channel_t *xchan, uintptr_t src, uintptr_t dst,
266     uint8_t, uint8_t, bus_size_t, enum xdma_direction dir, void *);
267 int xdma_queue_submit(xdma_channel_t *xchan);
268 
269 /* Mbuf operations */
270 uint32_t xdma_mbuf_defrag(xdma_channel_t *xchan, struct xdma_request *xr);
271 uint32_t xdma_mbuf_chain_count(struct mbuf *m0);
272 
273 /* Channel Control */
274 int xdma_control(xdma_channel_t *xchan, enum xdma_command cmd);
275 
276 /* Interrupt callback */
277 int xdma_setup_intr(xdma_channel_t *xchan, int (*cb)(void *,
278     xdma_transfer_status_t *), void *arg, void **);
279 int xdma_teardown_intr(xdma_channel_t *xchan, struct xdma_intr_handler *ih);
280 int xdma_teardown_all_intr(xdma_channel_t *xchan);
281 void xdma_callback(struct xdma_channel *xchan, xdma_transfer_status_t *status);
282 
283 /* Sglist */
284 int xchan_sglist_alloc(xdma_channel_t *xchan);
285 void xchan_sglist_free(xdma_channel_t *xchan);
286 int xdma_sglist_add(struct xdma_sglist *sg, struct bus_dma_segment *seg,
287     uint32_t nsegs, struct xdma_request *xr);
288 
289 /* Requests bank */
290 void xchan_bank_init(xdma_channel_t *xchan);
291 int xchan_bank_free(xdma_channel_t *xchan);
292 struct xdma_request * xchan_bank_get(xdma_channel_t *xchan);
293 int xchan_bank_put(xdma_channel_t *xchan, struct xdma_request *xr);
294 
295 /* IOMMU */
296 void xdma_iommu_add_entry(xdma_channel_t *xchan, vm_offset_t *va,
297     vm_paddr_t pa, vm_size_t size, vm_prot_t prot);
298 void xdma_iommu_remove_entry(xdma_channel_t *xchan, vm_offset_t va);
299 int xdma_iommu_init(struct xdma_iommu *xio);
300 int xdma_iommu_release(struct xdma_iommu *xio);
301 
302 #endif /* !_DEV_XDMA_XDMA_H_ */
303