xref: /freebsd/sys/dev/xdma/xdma.h (revision 123af6ec70016f5556da5972d4d63c7d175c06d3)
1 /*-
2  * Copyright (c) 2016-2018 Ruslan Bukin <br@bsdpad.com>
3  * All rights reserved.
4  *
5  * This software was developed by SRI International and the University of
6  * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
7  * ("CTSRD"), as part of the DARPA CRASH research programme.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * $FreeBSD$
31  */
32 
33 #ifndef _DEV_XDMA_XDMA_H_
34 #define _DEV_XDMA_XDMA_H_
35 
36 #include <sys/proc.h>
37 
38 enum xdma_direction {
39 	XDMA_MEM_TO_MEM,
40 	XDMA_MEM_TO_DEV,
41 	XDMA_DEV_TO_MEM,
42 	XDMA_DEV_TO_DEV,
43 };
44 
45 enum xdma_operation_type {
46 	XDMA_MEMCPY,
47 	XDMA_CYCLIC,
48 	XDMA_FIFO,
49 	XDMA_SG,
50 };
51 
52 enum xdma_request_type {
53 	XR_TYPE_PHYS,
54 	XR_TYPE_VIRT,
55 	XR_TYPE_MBUF,
56 	XR_TYPE_BIO,
57 };
58 
59 enum xdma_command {
60 	XDMA_CMD_BEGIN,
61 	XDMA_CMD_PAUSE,
62 	XDMA_CMD_TERMINATE,
63 };
64 
65 struct xdma_transfer_status {
66 	uint32_t	transferred;
67 	int		error;
68 };
69 
70 typedef struct xdma_transfer_status xdma_transfer_status_t;
71 
72 struct xdma_controller {
73 	device_t dev;		/* DMA consumer device_t. */
74 	device_t dma_dev;	/* A real DMA device_t. */
75 	void *data;		/* OFW MD part. */
76 
77 	/* List of virtual channels allocated. */
78 	TAILQ_HEAD(xdma_channel_list, xdma_channel)	channels;
79 };
80 
81 typedef struct xdma_controller xdma_controller_t;
82 
83 struct xchan_buf {
84 	bus_dmamap_t			map;
85 	uint32_t			nsegs;
86 	uint32_t			nsegs_left;
87 	void				*cbuf;
88 };
89 
90 struct xdma_request {
91 	struct mbuf			*m;
92 	struct bio			*bp;
93 	enum xdma_operation_type	operation;
94 	enum xdma_request_type		req_type;
95 	enum xdma_direction		direction;
96 	bus_addr_t			src_addr;
97 	bus_addr_t			dst_addr;
98 	uint8_t				src_width;
99 	uint8_t				dst_width;
100 	bus_size_t			block_num;
101 	bus_size_t			block_len;
102 	xdma_transfer_status_t		status;
103 	void				*user;
104 	TAILQ_ENTRY(xdma_request)	xr_next;
105 	struct xchan_buf		buf;
106 };
107 
108 struct xdma_sglist {
109 	bus_addr_t			src_addr;
110 	bus_addr_t			dst_addr;
111 	size_t				len;
112 	uint8_t				src_width;
113 	uint8_t				dst_width;
114 	enum xdma_direction		direction;
115 	bool				first;
116 	bool				last;
117 };
118 
119 struct xdma_channel {
120 	xdma_controller_t		*xdma;
121 
122 	uint32_t			flags;
123 #define	XCHAN_BUFS_ALLOCATED		(1 << 0)
124 #define	XCHAN_SGLIST_ALLOCATED		(1 << 1)
125 #define	XCHAN_CONFIGURED		(1 << 2)
126 #define	XCHAN_TYPE_CYCLIC		(1 << 3)
127 #define	XCHAN_TYPE_MEMCPY		(1 << 4)
128 #define	XCHAN_TYPE_FIFO			(1 << 5)
129 #define	XCHAN_TYPE_SG			(1 << 6)
130 
131 	uint32_t			caps;
132 #define	XCHAN_CAP_BUSDMA		(1 << 0)
133 #define	XCHAN_CAP_BUSDMA_NOSEG		(1 << 1)
134 
135 	/* A real hardware driver channel. */
136 	void				*chan;
137 
138 	/* Interrupt handlers. */
139 	TAILQ_HEAD(, xdma_intr_handler)	ie_handlers;
140 	TAILQ_ENTRY(xdma_channel)	xchan_next;
141 
142 	struct sx			sx_lock;
143 	struct sx			sx_qin_lock;
144 	struct sx			sx_qout_lock;
145 	struct sx			sx_bank_lock;
146 	struct sx			sx_proc_lock;
147 
148 	/* Request queue. */
149 	bus_dma_tag_t			dma_tag_bufs;
150 	struct xdma_request		*xr_mem;
151 	uint32_t			xr_num;
152 
153 	/* Bus dma tag options. */
154 	bus_size_t			maxsegsize;
155 	bus_size_t			maxnsegs;
156 	bus_size_t			alignment;
157 	bus_addr_t			boundary;
158 	bus_addr_t			lowaddr;
159 	bus_addr_t			highaddr;
160 
161 	struct xdma_sglist		*sg;
162 
163 	TAILQ_HEAD(, xdma_request)	bank;
164 	TAILQ_HEAD(, xdma_request)	queue_in;
165 	TAILQ_HEAD(, xdma_request)	queue_out;
166 	TAILQ_HEAD(, xdma_request)	processing;
167 };
168 
169 typedef struct xdma_channel xdma_channel_t;
170 
171 struct xdma_intr_handler {
172 	int		(*cb)(void *cb_user, xdma_transfer_status_t *status);
173 	void		*cb_user;
174 	TAILQ_ENTRY(xdma_intr_handler)	ih_next;
175 };
176 
177 static MALLOC_DEFINE(M_XDMA, "xdma", "xDMA framework");
178 
179 #define	XCHAN_LOCK(xchan)		sx_xlock(&(xchan)->sx_lock)
180 #define	XCHAN_UNLOCK(xchan)		sx_xunlock(&(xchan)->sx_lock)
181 #define	XCHAN_ASSERT_LOCKED(xchan)	\
182     sx_assert(&(xchan)->sx_lock, SX_XLOCKED)
183 
184 #define	QUEUE_IN_LOCK(xchan)		sx_xlock(&(xchan)->sx_qin_lock)
185 #define	QUEUE_IN_UNLOCK(xchan)		sx_xunlock(&(xchan)->sx_qin_lock)
186 #define	QUEUE_IN_ASSERT_LOCKED(xchan)	\
187     sx_assert(&(xchan)->sx_qin_lock, SX_XLOCKED)
188 
189 #define	QUEUE_OUT_LOCK(xchan)		sx_xlock(&(xchan)->sx_qout_lock)
190 #define	QUEUE_OUT_UNLOCK(xchan)		sx_xunlock(&(xchan)->sx_qout_lock)
191 #define	QUEUE_OUT_ASSERT_LOCKED(xchan)	\
192     sx_assert(&(xchan)->sx_qout_lock, SX_XLOCKED)
193 
194 #define	QUEUE_BANK_LOCK(xchan)		sx_xlock(&(xchan)->sx_bank_lock)
195 #define	QUEUE_BANK_UNLOCK(xchan)	sx_xunlock(&(xchan)->sx_bank_lock)
196 #define	QUEUE_BANK_ASSERT_LOCKED(xchan)	\
197     sx_assert(&(xchan)->sx_bank_lock, SX_XLOCKED)
198 
199 #define	QUEUE_PROC_LOCK(xchan)		sx_xlock(&(xchan)->sx_proc_lock)
200 #define	QUEUE_PROC_UNLOCK(xchan)	sx_xunlock(&(xchan)->sx_proc_lock)
201 #define	QUEUE_PROC_ASSERT_LOCKED(xchan)	\
202     sx_assert(&(xchan)->sx_proc_lock, SX_XLOCKED)
203 
204 #define	XDMA_SGLIST_MAXLEN	2048
205 #define	XDMA_MAX_SEG		128
206 
207 /* xDMA controller ops */
208 xdma_controller_t *xdma_ofw_get(device_t dev, const char *prop);
209 int xdma_put(xdma_controller_t *xdma);
210 
211 /* xDMA channel ops */
212 xdma_channel_t * xdma_channel_alloc(xdma_controller_t *, uint32_t caps);
213 int xdma_channel_free(xdma_channel_t *);
214 int xdma_request(xdma_channel_t *xchan, struct xdma_request *r);
215 
216 /* SG interface */
217 int xdma_prep_sg(xdma_channel_t *, uint32_t,
218     bus_size_t, bus_size_t, bus_size_t, bus_addr_t, bus_addr_t, bus_addr_t);
219 void xdma_channel_free_sg(xdma_channel_t *xchan);
220 int xdma_queue_submit_sg(xdma_channel_t *xchan);
221 void xchan_seg_done(xdma_channel_t *xchan, xdma_transfer_status_t *);
222 
223 /* Queue operations */
224 int xdma_dequeue_mbuf(xdma_channel_t *xchan, struct mbuf **m,
225     xdma_transfer_status_t *);
226 int xdma_enqueue_mbuf(xdma_channel_t *xchan, struct mbuf **m, uintptr_t addr,
227     uint8_t, uint8_t, enum xdma_direction dir);
228 int xdma_dequeue_bio(xdma_channel_t *xchan, struct bio **bp,
229     xdma_transfer_status_t *status);
230 int xdma_enqueue_bio(xdma_channel_t *xchan, struct bio **bp, bus_addr_t addr,
231     uint8_t, uint8_t, enum xdma_direction dir);
232 int xdma_dequeue(xdma_channel_t *xchan, void **user,
233     xdma_transfer_status_t *status);
234 int xdma_enqueue(xdma_channel_t *xchan, uintptr_t src, uintptr_t dst,
235     uint8_t, uint8_t, bus_size_t, enum xdma_direction dir, void *);
236 int xdma_queue_submit(xdma_channel_t *xchan);
237 
238 /* Mbuf operations */
239 uint32_t xdma_mbuf_defrag(xdma_channel_t *xchan, struct xdma_request *xr);
240 uint32_t xdma_mbuf_chain_count(struct mbuf *m0);
241 
242 /* Channel Control */
243 int xdma_control(xdma_channel_t *xchan, enum xdma_command cmd);
244 
245 /* Interrupt callback */
246 int xdma_setup_intr(xdma_channel_t *xchan, int (*cb)(void *,
247     xdma_transfer_status_t *), void *arg, void **);
248 int xdma_teardown_intr(xdma_channel_t *xchan, struct xdma_intr_handler *ih);
249 int xdma_teardown_all_intr(xdma_channel_t *xchan);
250 void xdma_callback(struct xdma_channel *xchan, xdma_transfer_status_t *status);
251 
252 /* Sglist */
253 int xchan_sglist_alloc(xdma_channel_t *xchan);
254 void xchan_sglist_free(xdma_channel_t *xchan);
255 int xdma_sglist_add(struct xdma_sglist *sg, struct bus_dma_segment *seg,
256     uint32_t nsegs, struct xdma_request *xr);
257 
258 /* Requests bank */
259 void xchan_bank_init(xdma_channel_t *xchan);
260 int xchan_bank_free(xdma_channel_t *xchan);
261 struct xdma_request * xchan_bank_get(xdma_channel_t *xchan);
262 int xchan_bank_put(xdma_channel_t *xchan, struct xdma_request *xr);
263 
264 #endif /* !_DEV_XDMA_XDMA_H_ */
265