1 /*-
2 * Copyright (c) 2015, 2019 Marcel Moolenaar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <machine/bus.h>
29 #include <machine/bus_dma.h>
30 #include <machine/resource.h>
31 #include <sys/bus.h>
32 #include <sys/conf.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/module.h>
36 #include <sys/proc.h>
37 #include <sys/queue.h>
38 #include <sys/rman.h>
39 #include <sys/sbuf.h>
40 #include <sys/sx.h>
41 #include <sys/uio.h>
42 #include <vm/vm.h>
43 #include <vm/pmap.h>
44 #include <vm/vm_map.h>
45
46 #include <dev/proto/proto.h>
47 #include <dev/proto/proto_dev.h>
48 #include <dev/proto/proto_busdma.h>
49
50 MALLOC_DEFINE(M_PROTO_BUSDMA, "proto_busdma", "DMA management data");
51
52 #define BNDRY_MIN(a, b) \
53 (((a) == 0) ? (b) : (((b) == 0) ? (a) : MIN((a), (b))))
54
55 struct proto_callback_bundle {
56 struct proto_busdma *busdma;
57 struct proto_md *md;
58 struct proto_ioc_busdma *ioc;
59 };
60
61 static int
proto_busdma_tag_create(struct proto_busdma * busdma,struct proto_tag * parent,struct proto_ioc_busdma * ioc)62 proto_busdma_tag_create(struct proto_busdma *busdma, struct proto_tag *parent,
63 struct proto_ioc_busdma *ioc)
64 {
65 struct proto_tag *tag;
66
67 /* Make sure that when a boundary is specified, it's a power of 2 */
68 if (ioc->u.tag.bndry != 0 &&
69 (ioc->u.tag.bndry & (ioc->u.tag.bndry - 1)) != 0)
70 return (EINVAL);
71
72 /*
73 * If nsegs is 1, ignore maxsegsz. What this means is that if we have
74 * just 1 segment, then maxsz should be equal to maxsegsz. To keep it
75 * simple for us, limit maxsegsz to maxsz in any case.
76 */
77 if (ioc->u.tag.maxsegsz > ioc->u.tag.maxsz || ioc->u.tag.nsegs == 1)
78 ioc->u.tag.maxsegsz = ioc->u.tag.maxsz;
79
80 tag = malloc(sizeof(*tag), M_PROTO_BUSDMA, M_WAITOK | M_ZERO);
81 if (parent != NULL) {
82 tag->parent = parent;
83 LIST_INSERT_HEAD(&parent->children, tag, peers);
84 tag->align = MAX(ioc->u.tag.align, parent->align);
85 tag->bndry = BNDRY_MIN(ioc->u.tag.bndry, parent->bndry);
86 tag->maxaddr = MIN(ioc->u.tag.maxaddr, parent->maxaddr);
87 tag->maxsz = MIN(ioc->u.tag.maxsz, parent->maxsz);
88 tag->maxsegsz = MIN(ioc->u.tag.maxsegsz, parent->maxsegsz);
89 tag->nsegs = MIN(ioc->u.tag.nsegs, parent->nsegs);
90 tag->datarate = MIN(ioc->u.tag.datarate, parent->datarate);
91 /* Write constraints back */
92 ioc->u.tag.align = tag->align;
93 ioc->u.tag.bndry = tag->bndry;
94 ioc->u.tag.maxaddr = tag->maxaddr;
95 ioc->u.tag.maxsz = tag->maxsz;
96 ioc->u.tag.maxsegsz = tag->maxsegsz;
97 ioc->u.tag.nsegs = tag->nsegs;
98 ioc->u.tag.datarate = tag->datarate;
99 } else {
100 tag->align = ioc->u.tag.align;
101 tag->bndry = ioc->u.tag.bndry;
102 tag->maxaddr = ioc->u.tag.maxaddr;
103 tag->maxsz = ioc->u.tag.maxsz;
104 tag->maxsegsz = ioc->u.tag.maxsegsz;
105 tag->nsegs = ioc->u.tag.nsegs;
106 tag->datarate = ioc->u.tag.datarate;
107 }
108 LIST_INSERT_HEAD(&busdma->tags, tag, tags);
109 ioc->result = (uintptr_t)(void *)tag;
110 return (0);
111 }
112
113 static int
proto_busdma_tag_destroy(struct proto_busdma * busdma,struct proto_tag * tag)114 proto_busdma_tag_destroy(struct proto_busdma *busdma, struct proto_tag *tag)
115 {
116
117 if (!LIST_EMPTY(&tag->mds))
118 return (EBUSY);
119 if (!LIST_EMPTY(&tag->children))
120 return (EBUSY);
121
122 if (tag->parent != NULL) {
123 LIST_REMOVE(tag, peers);
124 tag->parent = NULL;
125 }
126 LIST_REMOVE(tag, tags);
127 free(tag, M_PROTO_BUSDMA);
128 return (0);
129 }
130
131 static struct proto_tag *
proto_busdma_tag_lookup(struct proto_busdma * busdma,u_long key)132 proto_busdma_tag_lookup(struct proto_busdma *busdma, u_long key)
133 {
134 struct proto_tag *tag;
135
136 LIST_FOREACH(tag, &busdma->tags, tags) {
137 if ((void *)tag == (void *)key)
138 return (tag);
139 }
140 return (NULL);
141 }
142
143 static int
proto_busdma_md_destroy_internal(struct proto_busdma * busdma,struct proto_md * md)144 proto_busdma_md_destroy_internal(struct proto_busdma *busdma,
145 struct proto_md *md)
146 {
147
148 LIST_REMOVE(md, mds);
149 LIST_REMOVE(md, peers);
150 if (md->physaddr)
151 bus_dmamap_unload(md->bd_tag, md->bd_map);
152 if (md->virtaddr != NULL)
153 bus_dmamem_free(md->bd_tag, md->virtaddr, md->bd_map);
154 else
155 bus_dmamap_destroy(md->bd_tag, md->bd_map);
156 bus_dma_tag_destroy(md->bd_tag);
157 free(md, M_PROTO_BUSDMA);
158 return (0);
159 }
160
161 static void
proto_busdma_mem_alloc_callback(void * arg,bus_dma_segment_t * segs,int nseg,int error)162 proto_busdma_mem_alloc_callback(void *arg, bus_dma_segment_t *segs, int nseg,
163 int error)
164 {
165 struct proto_callback_bundle *pcb = arg;
166
167 pcb->ioc->u.md.bus_nsegs = nseg;
168 pcb->ioc->u.md.bus_addr = segs[0].ds_addr;
169 }
170
171 static int
proto_busdma_mem_alloc(struct proto_busdma * busdma,struct proto_tag * tag,struct proto_ioc_busdma * ioc)172 proto_busdma_mem_alloc(struct proto_busdma *busdma, struct proto_tag *tag,
173 struct proto_ioc_busdma *ioc)
174 {
175 struct proto_callback_bundle pcb;
176 struct proto_md *md;
177 int error;
178
179 md = malloc(sizeof(*md), M_PROTO_BUSDMA, M_WAITOK | M_ZERO);
180 md->tag = tag;
181
182 error = bus_dma_tag_create(busdma->bd_roottag, tag->align, tag->bndry,
183 tag->maxaddr, BUS_SPACE_MAXADDR, NULL, NULL, tag->maxsz,
184 tag->nsegs, tag->maxsegsz, 0, NULL, NULL, &md->bd_tag);
185 if (error) {
186 free(md, M_PROTO_BUSDMA);
187 return (error);
188 }
189 error = bus_dmamem_alloc(md->bd_tag, &md->virtaddr, 0, &md->bd_map);
190 if (error) {
191 bus_dma_tag_destroy(md->bd_tag);
192 free(md, M_PROTO_BUSDMA);
193 return (error);
194 }
195 md->physaddr = pmap_kextract((uintptr_t)(md->virtaddr));
196 pcb.busdma = busdma;
197 pcb.md = md;
198 pcb.ioc = ioc;
199 error = bus_dmamap_load(md->bd_tag, md->bd_map, md->virtaddr,
200 tag->maxsz, proto_busdma_mem_alloc_callback, &pcb, BUS_DMA_NOWAIT);
201 if (error) {
202 bus_dmamem_free(md->bd_tag, md->virtaddr, md->bd_map);
203 bus_dma_tag_destroy(md->bd_tag);
204 free(md, M_PROTO_BUSDMA);
205 return (error);
206 }
207 LIST_INSERT_HEAD(&tag->mds, md, peers);
208 LIST_INSERT_HEAD(&busdma->mds, md, mds);
209 ioc->u.md.virt_addr = (uintptr_t)md->virtaddr;
210 ioc->u.md.virt_size = tag->maxsz;
211 ioc->u.md.phys_nsegs = 1;
212 ioc->u.md.phys_addr = md->physaddr;
213 ioc->result = (uintptr_t)(void *)md;
214 return (0);
215 }
216
217 static int
proto_busdma_mem_free(struct proto_busdma * busdma,struct proto_md * md)218 proto_busdma_mem_free(struct proto_busdma *busdma, struct proto_md *md)
219 {
220
221 if (md->virtaddr == NULL)
222 return (ENXIO);
223 return (proto_busdma_md_destroy_internal(busdma, md));
224 }
225
226 static int
proto_busdma_md_create(struct proto_busdma * busdma,struct proto_tag * tag,struct proto_ioc_busdma * ioc)227 proto_busdma_md_create(struct proto_busdma *busdma, struct proto_tag *tag,
228 struct proto_ioc_busdma *ioc)
229 {
230 struct proto_md *md;
231 int error;
232
233 md = malloc(sizeof(*md), M_PROTO_BUSDMA, M_WAITOK | M_ZERO);
234 md->tag = tag;
235
236 error = bus_dma_tag_create(busdma->bd_roottag, tag->align, tag->bndry,
237 tag->maxaddr, BUS_SPACE_MAXADDR, NULL, NULL, tag->maxsz,
238 tag->nsegs, tag->maxsegsz, 0, NULL, NULL, &md->bd_tag);
239 if (error) {
240 free(md, M_PROTO_BUSDMA);
241 return (error);
242 }
243 error = bus_dmamap_create(md->bd_tag, 0, &md->bd_map);
244 if (error) {
245 bus_dma_tag_destroy(md->bd_tag);
246 free(md, M_PROTO_BUSDMA);
247 return (error);
248 }
249
250 LIST_INSERT_HEAD(&tag->mds, md, peers);
251 LIST_INSERT_HEAD(&busdma->mds, md, mds);
252 ioc->result = (uintptr_t)(void *)md;
253 return (0);
254 }
255
256 static int
proto_busdma_md_destroy(struct proto_busdma * busdma,struct proto_md * md)257 proto_busdma_md_destroy(struct proto_busdma *busdma, struct proto_md *md)
258 {
259
260 if (md->virtaddr != NULL)
261 return (ENXIO);
262 return (proto_busdma_md_destroy_internal(busdma, md));
263 }
264
265 static void
proto_busdma_md_load_callback(void * arg,bus_dma_segment_t * segs,int nseg,bus_size_t sz,int error)266 proto_busdma_md_load_callback(void *arg, bus_dma_segment_t *segs, int nseg,
267 bus_size_t sz, int error)
268 {
269 struct proto_callback_bundle *pcb = arg;
270
271 pcb->ioc->u.md.bus_nsegs = nseg;
272 pcb->ioc->u.md.bus_addr = segs[0].ds_addr;
273 }
274
275 static int
proto_busdma_md_load(struct proto_busdma * busdma,struct proto_md * md,struct proto_ioc_busdma * ioc,struct thread * td)276 proto_busdma_md_load(struct proto_busdma *busdma, struct proto_md *md,
277 struct proto_ioc_busdma *ioc, struct thread *td)
278 {
279 struct proto_callback_bundle pcb;
280 struct iovec iov;
281 struct uio uio;
282 pmap_t pmap;
283 int error;
284
285 iov.iov_base = (void *)(uintptr_t)ioc->u.md.virt_addr;
286 iov.iov_len = ioc->u.md.virt_size;
287 uio.uio_iov = &iov;
288 uio.uio_iovcnt = 1;
289 uio.uio_offset = 0;
290 uio.uio_resid = iov.iov_len;
291 uio.uio_segflg = UIO_USERSPACE;
292 uio.uio_rw = UIO_READ;
293 uio.uio_td = td;
294
295 pcb.busdma = busdma;
296 pcb.md = md;
297 pcb.ioc = ioc;
298 error = bus_dmamap_load_uio(md->bd_tag, md->bd_map, &uio,
299 proto_busdma_md_load_callback, &pcb, BUS_DMA_NOWAIT);
300 if (error)
301 return (error);
302
303 /* XXX determine *all* physical memory segments */
304 pmap = vmspace_pmap(td->td_proc->p_vmspace);
305 md->physaddr = pmap_extract(pmap, ioc->u.md.virt_addr);
306 ioc->u.md.phys_nsegs = 1; /* XXX */
307 ioc->u.md.phys_addr = md->physaddr;
308 return (0);
309 }
310
311 static int
proto_busdma_md_unload(struct proto_busdma * busdma,struct proto_md * md)312 proto_busdma_md_unload(struct proto_busdma *busdma, struct proto_md *md)
313 {
314
315 if (!md->physaddr)
316 return (ENXIO);
317 bus_dmamap_unload(md->bd_tag, md->bd_map);
318 md->physaddr = 0;
319 return (0);
320 }
321
322 static int
proto_busdma_sync(struct proto_busdma * busdma,struct proto_md * md,struct proto_ioc_busdma * ioc)323 proto_busdma_sync(struct proto_busdma *busdma, struct proto_md *md,
324 struct proto_ioc_busdma *ioc)
325 {
326 u_int ops;
327
328 ops = BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE |
329 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE;
330 if (ioc->u.sync.op & ~ops)
331 return (EINVAL);
332 if (!md->physaddr)
333 return (ENXIO);
334 bus_dmamap_sync(md->bd_tag, md->bd_map, ioc->u.sync.op);
335 return (0);
336 }
337
338 static struct proto_md *
proto_busdma_md_lookup(struct proto_busdma * busdma,u_long key)339 proto_busdma_md_lookup(struct proto_busdma *busdma, u_long key)
340 {
341 struct proto_md *md;
342
343 LIST_FOREACH(md, &busdma->mds, mds) {
344 if ((void *)md == (void *)key)
345 return (md);
346 }
347 return (NULL);
348 }
349
350 struct proto_busdma *
proto_busdma_attach(struct proto_softc * sc)351 proto_busdma_attach(struct proto_softc *sc)
352 {
353 struct proto_busdma *busdma;
354
355 busdma = malloc(sizeof(*busdma), M_PROTO_BUSDMA, M_WAITOK | M_ZERO);
356 sx_init(&busdma->sxlck, "proto-busdma");
357 return (busdma);
358 }
359
360 int
proto_busdma_detach(struct proto_softc * sc,struct proto_busdma * busdma)361 proto_busdma_detach(struct proto_softc *sc, struct proto_busdma *busdma)
362 {
363
364 proto_busdma_cleanup(sc, busdma);
365 sx_destroy(&busdma->sxlck);
366 free(busdma, M_PROTO_BUSDMA);
367 return (0);
368 }
369
370 int
proto_busdma_cleanup(struct proto_softc * sc,struct proto_busdma * busdma)371 proto_busdma_cleanup(struct proto_softc *sc, struct proto_busdma *busdma)
372 {
373 struct proto_md *md, *md1;
374 struct proto_tag *tag, *tag1;
375
376 sx_xlock(&busdma->sxlck);
377 LIST_FOREACH_SAFE(md, &busdma->mds, mds, md1)
378 proto_busdma_md_destroy_internal(busdma, md);
379 LIST_FOREACH_SAFE(tag, &busdma->tags, tags, tag1)
380 proto_busdma_tag_destroy(busdma, tag);
381 sx_xunlock(&busdma->sxlck);
382 return (0);
383 }
384
385 int
proto_busdma_ioctl(struct proto_softc * sc,struct proto_busdma * busdma,struct proto_ioc_busdma * ioc,struct thread * td)386 proto_busdma_ioctl(struct proto_softc *sc, struct proto_busdma *busdma,
387 struct proto_ioc_busdma *ioc, struct thread *td)
388 {
389 struct proto_tag *tag;
390 struct proto_md *md;
391 int error;
392
393 sx_xlock(&busdma->sxlck);
394
395 error = 0;
396 switch (ioc->request) {
397 case PROTO_IOC_BUSDMA_TAG_CREATE:
398 busdma->bd_roottag = bus_get_dma_tag(sc->sc_dev);
399 error = proto_busdma_tag_create(busdma, NULL, ioc);
400 break;
401 case PROTO_IOC_BUSDMA_TAG_DERIVE:
402 tag = proto_busdma_tag_lookup(busdma, ioc->key);
403 if (tag == NULL) {
404 error = EINVAL;
405 break;
406 }
407 error = proto_busdma_tag_create(busdma, tag, ioc);
408 break;
409 case PROTO_IOC_BUSDMA_TAG_DESTROY:
410 tag = proto_busdma_tag_lookup(busdma, ioc->key);
411 if (tag == NULL) {
412 error = EINVAL;
413 break;
414 }
415 error = proto_busdma_tag_destroy(busdma, tag);
416 break;
417 case PROTO_IOC_BUSDMA_MEM_ALLOC:
418 tag = proto_busdma_tag_lookup(busdma, ioc->u.md.tag);
419 if (tag == NULL) {
420 error = EINVAL;
421 break;
422 }
423 error = proto_busdma_mem_alloc(busdma, tag, ioc);
424 break;
425 case PROTO_IOC_BUSDMA_MEM_FREE:
426 md = proto_busdma_md_lookup(busdma, ioc->key);
427 if (md == NULL) {
428 error = EINVAL;
429 break;
430 }
431 error = proto_busdma_mem_free(busdma, md);
432 break;
433 case PROTO_IOC_BUSDMA_MD_CREATE:
434 tag = proto_busdma_tag_lookup(busdma, ioc->u.md.tag);
435 if (tag == NULL) {
436 error = EINVAL;
437 break;
438 }
439 error = proto_busdma_md_create(busdma, tag, ioc);
440 break;
441 case PROTO_IOC_BUSDMA_MD_DESTROY:
442 md = proto_busdma_md_lookup(busdma, ioc->key);
443 if (md == NULL) {
444 error = EINVAL;
445 break;
446 }
447 error = proto_busdma_md_destroy(busdma, md);
448 break;
449 case PROTO_IOC_BUSDMA_MD_LOAD:
450 md = proto_busdma_md_lookup(busdma, ioc->key);
451 if (md == NULL) {
452 error = EINVAL;
453 break;
454 }
455 error = proto_busdma_md_load(busdma, md, ioc, td);
456 break;
457 case PROTO_IOC_BUSDMA_MD_UNLOAD:
458 md = proto_busdma_md_lookup(busdma, ioc->key);
459 if (md == NULL) {
460 error = EINVAL;
461 break;
462 }
463 error = proto_busdma_md_unload(busdma, md);
464 break;
465 case PROTO_IOC_BUSDMA_SYNC:
466 md = proto_busdma_md_lookup(busdma, ioc->key);
467 if (md == NULL) {
468 error = EINVAL;
469 break;
470 }
471 error = proto_busdma_sync(busdma, md, ioc);
472 break;
473 default:
474 error = EINVAL;
475 break;
476 }
477
478 sx_xunlock(&busdma->sxlck);
479
480 return (error);
481 }
482
483 int
proto_busdma_mmap_allowed(struct proto_busdma * busdma,vm_paddr_t physaddr)484 proto_busdma_mmap_allowed(struct proto_busdma *busdma, vm_paddr_t physaddr)
485 {
486 struct proto_md *md;
487 int result;
488
489 sx_xlock(&busdma->sxlck);
490
491 result = 0;
492 LIST_FOREACH(md, &busdma->mds, mds) {
493 if (physaddr >= trunc_page(md->physaddr) &&
494 physaddr <= trunc_page(md->physaddr + md->tag->maxsz)) {
495 result = 1;
496 break;
497 }
498 }
499
500 sx_xunlock(&busdma->sxlck);
501
502 return (result);
503 }
504