xref: /freebsd/sys/compat/linuxkpi/common/src/linux_pci.c (revision daceb336172a6b0572de864b97e70b28451ca636)
1 /*-
2  * Copyright (c) 2015-2016 Mellanox Technologies, Ltd.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
33 #include <sys/kernel.h>
34 #include <sys/sysctl.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/bus.h>
38 #include <sys/fcntl.h>
39 #include <sys/file.h>
40 #include <sys/filio.h>
41 #include <sys/pctrie.h>
42 #include <sys/rwlock.h>
43 
44 #include <vm/vm.h>
45 #include <vm/pmap.h>
46 
47 #include <machine/stdarg.h>
48 
49 #include <linux/kobject.h>
50 #include <linux/device.h>
51 #include <linux/slab.h>
52 #include <linux/module.h>
53 #include <linux/cdev.h>
54 #include <linux/file.h>
55 #include <linux/sysfs.h>
56 #include <linux/mm.h>
57 #include <linux/io.h>
58 #include <linux/vmalloc.h>
59 #include <linux/pci.h>
60 #include <linux/compat.h>
61 
62 static device_probe_t linux_pci_probe;
63 static device_attach_t linux_pci_attach;
64 static device_detach_t linux_pci_detach;
65 static device_suspend_t linux_pci_suspend;
66 static device_resume_t linux_pci_resume;
67 static device_shutdown_t linux_pci_shutdown;
68 
69 static device_method_t pci_methods[] = {
70 	DEVMETHOD(device_probe, linux_pci_probe),
71 	DEVMETHOD(device_attach, linux_pci_attach),
72 	DEVMETHOD(device_detach, linux_pci_detach),
73 	DEVMETHOD(device_suspend, linux_pci_suspend),
74 	DEVMETHOD(device_resume, linux_pci_resume),
75 	DEVMETHOD(device_shutdown, linux_pci_shutdown),
76 	DEVMETHOD_END
77 };
78 
79 struct linux_dma_priv {
80 	uint64_t	dma_mask;
81 	struct mtx	lock;
82 	bus_dma_tag_t	dmat;
83 	struct pctrie	ptree;
84 };
85 #define	DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock)
86 #define	DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock)
87 
88 static int
89 linux_pdev_dma_init(struct pci_dev *pdev)
90 {
91 	struct linux_dma_priv *priv;
92 
93 	priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO);
94 	pdev->dev.dma_priv = priv;
95 
96 	mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF);
97 
98 	pctrie_init(&priv->ptree);
99 
100 	return (0);
101 }
102 
103 static int
104 linux_pdev_dma_uninit(struct pci_dev *pdev)
105 {
106 	struct linux_dma_priv *priv;
107 
108 	priv = pdev->dev.dma_priv;
109 	if (priv->dmat)
110 		bus_dma_tag_destroy(priv->dmat);
111 	mtx_destroy(&priv->lock);
112 	free(priv, M_DEVBUF);
113 	pdev->dev.dma_priv = NULL;
114 	return (0);
115 }
116 
117 int
118 linux_dma_tag_init(struct device *dev, u64 dma_mask)
119 {
120 	struct linux_dma_priv *priv;
121 	int error;
122 
123 	priv = dev->dma_priv;
124 
125 	if (priv->dmat) {
126 		if (priv->dma_mask == dma_mask)
127 			return (0);
128 
129 		bus_dma_tag_destroy(priv->dmat);
130 	}
131 
132 	priv->dma_mask = dma_mask;
133 
134 	error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev),
135 	    1, 0,			/* alignment, boundary */
136 	    dma_mask,			/* lowaddr */
137 	    BUS_SPACE_MAXADDR,		/* highaddr */
138 	    NULL, NULL,			/* filtfunc, filtfuncarg */
139 	    BUS_SPACE_MAXSIZE,		/* maxsize */
140 	    1,				/* nsegments */
141 	    BUS_SPACE_MAXSIZE,		/* maxsegsz */
142 	    0,				/* flags */
143 	    NULL, NULL,			/* lockfunc, lockfuncarg */
144 	    &priv->dmat);
145 	return (-error);
146 }
147 
148 static struct pci_driver *
149 linux_pci_find(device_t dev, const struct pci_device_id **idp)
150 {
151 	const struct pci_device_id *id;
152 	struct pci_driver *pdrv;
153 	uint16_t vendor;
154 	uint16_t device;
155 	uint16_t subvendor;
156 	uint16_t subdevice;
157 
158 	vendor = pci_get_vendor(dev);
159 	device = pci_get_device(dev);
160 	subvendor = pci_get_subvendor(dev);
161 	subdevice = pci_get_subdevice(dev);
162 
163 	spin_lock(&pci_lock);
164 	list_for_each_entry(pdrv, &pci_drivers, links) {
165 		for (id = pdrv->id_table; id->vendor != 0; id++) {
166 			if (vendor == id->vendor &&
167 			    (PCI_ANY_ID == id->device || device == id->device) &&
168 			    (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) &&
169 			    (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) {
170 				*idp = id;
171 				spin_unlock(&pci_lock);
172 				return (pdrv);
173 			}
174 		}
175 	}
176 	spin_unlock(&pci_lock);
177 	return (NULL);
178 }
179 
180 static int
181 linux_pci_probe(device_t dev)
182 {
183 	const struct pci_device_id *id;
184 	struct pci_driver *pdrv;
185 
186 	if ((pdrv = linux_pci_find(dev, &id)) == NULL)
187 		return (ENXIO);
188 	if (device_get_driver(dev) != &pdrv->bsddriver)
189 		return (ENXIO);
190 	device_set_desc(dev, pdrv->name);
191 	return (0);
192 }
193 
194 static int
195 linux_pci_attach(device_t dev)
196 {
197 	struct resource_list_entry *rle;
198 	struct pci_bus *pbus;
199 	struct pci_dev *pdev;
200 	struct pci_devinfo *dinfo;
201 	struct pci_driver *pdrv;
202 	const struct pci_device_id *id;
203 	device_t parent;
204 	devclass_t devclass;
205 	int error;
206 
207 	linux_set_current(curthread);
208 
209 	pdrv = linux_pci_find(dev, &id);
210 	pdev = device_get_softc(dev);
211 
212 	parent = device_get_parent(dev);
213 	devclass = device_get_devclass(parent);
214 	if (pdrv->isdrm) {
215 		dinfo = device_get_ivars(parent);
216 		device_set_ivars(dev, dinfo);
217 	} else {
218 		dinfo = device_get_ivars(dev);
219 	}
220 
221 	pdev->dev.parent = &linux_root_device;
222 	pdev->dev.bsddev = dev;
223 	INIT_LIST_HEAD(&pdev->dev.irqents);
224 	pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev));
225 	pdev->device = dinfo->cfg.device;
226 	pdev->vendor = dinfo->cfg.vendor;
227 	pdev->subsystem_vendor = dinfo->cfg.subvendor;
228 	pdev->subsystem_device = dinfo->cfg.subdevice;
229 	pdev->class = pci_get_class(dev);
230 	pdev->revision = pci_get_revid(dev);
231 	pdev->pdrv = pdrv;
232 	kobject_init(&pdev->dev.kobj, &linux_dev_ktype);
233 	kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev));
234 	kobject_add(&pdev->dev.kobj, &linux_root_device.kobj,
235 	    kobject_name(&pdev->dev.kobj));
236 	rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0);
237 	if (rle != NULL)
238 		pdev->dev.irq = rle->start;
239 	else
240 		pdev->dev.irq = LINUX_IRQ_INVALID;
241 	pdev->irq = pdev->dev.irq;
242 	error = linux_pdev_dma_init(pdev);
243 	if (error)
244 		goto out;
245 
246 	if (pdev->bus == NULL) {
247 		pbus = malloc(sizeof(*pbus), M_DEVBUF, M_WAITOK | M_ZERO);
248 		pbus->self = pdev;
249 		pbus->number = pci_get_bus(dev);
250 		pdev->bus = pbus;
251 	}
252 
253 	spin_lock(&pci_lock);
254 	list_add(&pdev->links, &pci_devices);
255 	spin_unlock(&pci_lock);
256 
257 	error = pdrv->probe(pdev, id);
258 out:
259 	if (error) {
260 		spin_lock(&pci_lock);
261 		list_del(&pdev->links);
262 		spin_unlock(&pci_lock);
263 		put_device(&pdev->dev);
264 		error = -error;
265 	}
266 	return (error);
267 }
268 
269 static int
270 linux_pci_detach(device_t dev)
271 {
272 	struct pci_dev *pdev;
273 
274 	linux_set_current(curthread);
275 	pdev = device_get_softc(dev);
276 
277 	pdev->pdrv->remove(pdev);
278 	linux_pdev_dma_uninit(pdev);
279 
280 	spin_lock(&pci_lock);
281 	list_del(&pdev->links);
282 	spin_unlock(&pci_lock);
283 	device_set_desc(dev, NULL);
284 	put_device(&pdev->dev);
285 
286 	return (0);
287 }
288 
289 static int
290 linux_pci_suspend(device_t dev)
291 {
292 	const struct dev_pm_ops *pmops;
293 	struct pm_message pm = { };
294 	struct pci_dev *pdev;
295 	int error;
296 
297 	error = 0;
298 	linux_set_current(curthread);
299 	pdev = device_get_softc(dev);
300 	pmops = pdev->pdrv->driver.pm;
301 
302 	if (pdev->pdrv->suspend != NULL)
303 		error = -pdev->pdrv->suspend(pdev, pm);
304 	else if (pmops != NULL && pmops->suspend != NULL) {
305 		error = -pmops->suspend(&pdev->dev);
306 		if (error == 0 && pmops->suspend_late != NULL)
307 			error = -pmops->suspend_late(&pdev->dev);
308 	}
309 	return (error);
310 }
311 
312 static int
313 linux_pci_resume(device_t dev)
314 {
315 	const struct dev_pm_ops *pmops;
316 	struct pci_dev *pdev;
317 	int error;
318 
319 	error = 0;
320 	linux_set_current(curthread);
321 	pdev = device_get_softc(dev);
322 	pmops = pdev->pdrv->driver.pm;
323 
324 	if (pdev->pdrv->resume != NULL)
325 		error = -pdev->pdrv->resume(pdev);
326 	else if (pmops != NULL && pmops->resume != NULL) {
327 		if (pmops->resume_early != NULL)
328 			error = -pmops->resume_early(&pdev->dev);
329 		if (error == 0 && pmops->resume != NULL)
330 			error = -pmops->resume(&pdev->dev);
331 	}
332 	return (error);
333 }
334 
335 static int
336 linux_pci_shutdown(device_t dev)
337 {
338 	struct pci_dev *pdev;
339 
340 	linux_set_current(curthread);
341 	pdev = device_get_softc(dev);
342 	if (pdev->pdrv->shutdown != NULL)
343 		pdev->pdrv->shutdown(pdev);
344 	return (0);
345 }
346 
347 static int
348 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc)
349 {
350 	int error;
351 
352 	linux_set_current(curthread);
353 	spin_lock(&pci_lock);
354 	list_add(&pdrv->links, &pci_drivers);
355 	spin_unlock(&pci_lock);
356 	pdrv->bsddriver.name = pdrv->name;
357 	pdrv->bsddriver.methods = pci_methods;
358 	pdrv->bsddriver.size = sizeof(struct pci_dev);
359 
360 	mtx_lock(&Giant);
361 	error = devclass_add_driver(dc, &pdrv->bsddriver,
362 	    BUS_PASS_DEFAULT, &pdrv->bsdclass);
363 	mtx_unlock(&Giant);
364 	return (-error);
365 }
366 
367 int
368 linux_pci_register_driver(struct pci_driver *pdrv)
369 {
370 	devclass_t dc;
371 
372 	dc = devclass_find("pci");
373 	if (dc == NULL)
374 		return (-ENXIO);
375 	pdrv->isdrm = false;
376 	return (_linux_pci_register_driver(pdrv, dc));
377 }
378 
379 int
380 linux_pci_register_drm_driver(struct pci_driver *pdrv)
381 {
382 	devclass_t dc;
383 
384 	dc = devclass_create("vgapci");
385 	if (dc == NULL)
386 		return (-ENXIO);
387 	pdrv->isdrm = true;
388 	pdrv->name = "drmn";
389 	return (_linux_pci_register_driver(pdrv, dc));
390 }
391 
392 void
393 linux_pci_unregister_driver(struct pci_driver *pdrv)
394 {
395 	devclass_t bus;
396 
397 	bus = devclass_find("pci");
398 
399 	spin_lock(&pci_lock);
400 	list_del(&pdrv->links);
401 	spin_unlock(&pci_lock);
402 	mtx_lock(&Giant);
403 	if (bus != NULL)
404 		devclass_delete_driver(bus, &pdrv->bsddriver);
405 	mtx_unlock(&Giant);
406 }
407 
408 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t));
409 
410 struct linux_dma_obj {
411 	void		*vaddr;
412 	uint64_t	dma_addr;
413 	bus_dmamap_t	dmamap;
414 };
415 
416 static uma_zone_t linux_dma_trie_zone;
417 static uma_zone_t linux_dma_obj_zone;
418 
419 static void
420 linux_dma_init(void *arg)
421 {
422 
423 	linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie",
424 	    pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL,
425 	    UMA_ALIGN_PTR, 0);
426 	linux_dma_obj_zone = uma_zcreate("linux_dma_object",
427 	    sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL,
428 	    UMA_ALIGN_PTR, 0);
429 
430 }
431 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL);
432 
433 static void
434 linux_dma_uninit(void *arg)
435 {
436 
437 	uma_zdestroy(linux_dma_obj_zone);
438 	uma_zdestroy(linux_dma_trie_zone);
439 }
440 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL);
441 
442 static void *
443 linux_dma_trie_alloc(struct pctrie *ptree)
444 {
445 
446 	return (uma_zalloc(linux_dma_trie_zone, 0));
447 }
448 
449 static void
450 linux_dma_trie_free(struct pctrie *ptree, void *node)
451 {
452 
453 	uma_zfree(linux_dma_trie_zone, node);
454 }
455 
456 
457 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc,
458     linux_dma_trie_free);
459 
460 void *
461 linux_dma_alloc_coherent(struct device *dev, size_t size,
462     dma_addr_t *dma_handle, gfp_t flag)
463 {
464 	struct linux_dma_priv *priv;
465 	vm_paddr_t high;
466 	size_t align;
467 	void *mem;
468 
469 	if (dev == NULL || dev->dma_priv == NULL) {
470 		*dma_handle = 0;
471 		return (NULL);
472 	}
473 	priv = dev->dma_priv;
474 	if (priv->dma_mask)
475 		high = priv->dma_mask;
476 	else if (flag & GFP_DMA32)
477 		high = BUS_SPACE_MAXADDR_32BIT;
478 	else
479 		high = BUS_SPACE_MAXADDR;
480 	align = PAGE_SIZE << get_order(size);
481 	mem = (void *)kmem_alloc_contig(size, flag, 0, high, align, 0,
482 	    VM_MEMATTR_DEFAULT);
483 	if (mem != NULL) {
484 		*dma_handle = linux_dma_map_phys(dev, vtophys(mem), size);
485 		if (*dma_handle == 0) {
486 			kmem_free((vm_offset_t)mem, size);
487 			mem = NULL;
488 		}
489 	} else {
490 		*dma_handle = 0;
491 	}
492 	return (mem);
493 }
494 
495 dma_addr_t
496 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len)
497 {
498 	struct linux_dma_priv *priv;
499 	struct linux_dma_obj *obj;
500 	int error, nseg;
501 	bus_dma_segment_t seg;
502 
503 	priv = dev->dma_priv;
504 
505 	obj = uma_zalloc(linux_dma_obj_zone, 0);
506 
507 	DMA_PRIV_LOCK(priv);
508 	if (bus_dmamap_create(priv->dmat, 0, &obj->dmamap) != 0) {
509 		DMA_PRIV_UNLOCK(priv);
510 		uma_zfree(linux_dma_obj_zone, obj);
511 		return (0);
512 	}
513 
514 	nseg = -1;
515 	if (_bus_dmamap_load_phys(priv->dmat, obj->dmamap, phys, len,
516 	    BUS_DMA_NOWAIT, &seg, &nseg) != 0) {
517 		bus_dmamap_destroy(priv->dmat, obj->dmamap);
518 		DMA_PRIV_UNLOCK(priv);
519 		uma_zfree(linux_dma_obj_zone, obj);
520 		return (0);
521 	}
522 
523 	KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg));
524 	obj->dma_addr = seg.ds_addr;
525 
526 	error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj);
527 	if (error != 0) {
528 		bus_dmamap_unload(priv->dmat, obj->dmamap);
529 		bus_dmamap_destroy(priv->dmat, obj->dmamap);
530 		DMA_PRIV_UNLOCK(priv);
531 		uma_zfree(linux_dma_obj_zone, obj);
532 		return (0);
533 	}
534 	DMA_PRIV_UNLOCK(priv);
535 	return (obj->dma_addr);
536 }
537 
538 void
539 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len)
540 {
541 	struct linux_dma_priv *priv;
542 	struct linux_dma_obj *obj;
543 
544 	priv = dev->dma_priv;
545 
546 	DMA_PRIV_LOCK(priv);
547 	obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr);
548 	if (obj == NULL) {
549 		DMA_PRIV_UNLOCK(priv);
550 		return;
551 	}
552 	LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr);
553 	bus_dmamap_unload(priv->dmat, obj->dmamap);
554 	bus_dmamap_destroy(priv->dmat, obj->dmamap);
555 	DMA_PRIV_UNLOCK(priv);
556 
557 	uma_zfree(linux_dma_obj_zone, obj);
558 }
559 
560 int
561 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents,
562     enum dma_data_direction dir, struct dma_attrs *attrs)
563 {
564 	struct linux_dma_priv *priv;
565 	struct scatterlist *sg;
566 	int i, nseg;
567 	bus_dma_segment_t seg;
568 
569 	priv = dev->dma_priv;
570 
571 	DMA_PRIV_LOCK(priv);
572 
573 	/* create common DMA map in the first S/G entry */
574 	if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) {
575 		DMA_PRIV_UNLOCK(priv);
576 		return (0);
577 	}
578 
579 	/* load all S/G list entries */
580 	for_each_sg(sgl, sg, nents, i) {
581 		nseg = -1;
582 		if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map,
583 		    sg_phys(sg), sg->length, BUS_DMA_NOWAIT,
584 		    &seg, &nseg) != 0) {
585 			bus_dmamap_unload(priv->dmat, sgl->dma_map);
586 			bus_dmamap_destroy(priv->dmat, sgl->dma_map);
587 			DMA_PRIV_UNLOCK(priv);
588 			return (0);
589 		}
590 		KASSERT(nseg == 0,
591 		    ("More than one segment (nseg=%d)", nseg + 1));
592 
593 		sg_dma_address(sg) = seg.ds_addr;
594 	}
595 	DMA_PRIV_UNLOCK(priv);
596 
597 	return (nents);
598 }
599 
600 void
601 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
602     int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
603 {
604 	struct linux_dma_priv *priv;
605 
606 	priv = dev->dma_priv;
607 
608 	DMA_PRIV_LOCK(priv);
609 	bus_dmamap_unload(priv->dmat, sgl->dma_map);
610 	bus_dmamap_destroy(priv->dmat, sgl->dma_map);
611 	DMA_PRIV_UNLOCK(priv);
612 }
613 
614 struct dma_pool {
615 	struct device  *pool_device;
616 	uma_zone_t	pool_zone;
617 	struct mtx	pool_lock;
618 	bus_dma_tag_t	pool_dmat;
619 	size_t		pool_entry_size;
620 	struct pctrie	pool_ptree;
621 };
622 
623 #define	DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock)
624 #define	DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock)
625 
626 static inline int
627 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags)
628 {
629 	struct linux_dma_obj *obj = mem;
630 	struct dma_pool *pool = arg;
631 	int error, nseg;
632 	bus_dma_segment_t seg;
633 
634 	nseg = -1;
635 	DMA_POOL_LOCK(pool);
636 	error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap,
637 	    vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT,
638 	    &seg, &nseg);
639 	DMA_POOL_UNLOCK(pool);
640 	if (error != 0) {
641 		return (error);
642 	}
643 	KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg));
644 	obj->dma_addr = seg.ds_addr;
645 
646 	return (0);
647 }
648 
649 static void
650 dma_pool_obj_dtor(void *mem, int size, void *arg)
651 {
652 	struct linux_dma_obj *obj = mem;
653 	struct dma_pool *pool = arg;
654 
655 	DMA_POOL_LOCK(pool);
656 	bus_dmamap_unload(pool->pool_dmat, obj->dmamap);
657 	DMA_POOL_UNLOCK(pool);
658 }
659 
660 static int
661 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused,
662     int flags)
663 {
664 	struct dma_pool *pool = arg;
665 	struct linux_dma_priv *priv;
666 	struct linux_dma_obj *obj;
667 	int error, i;
668 
669 	priv = pool->pool_device->dma_priv;
670 	for (i = 0; i < count; i++) {
671 		obj = uma_zalloc(linux_dma_obj_zone, flags);
672 		if (obj == NULL)
673 			break;
674 
675 		error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr,
676 		    BUS_DMA_NOWAIT, &obj->dmamap);
677 		if (error!= 0) {
678 			uma_zfree(linux_dma_obj_zone, obj);
679 			break;
680 		}
681 
682 		store[i] = obj;
683 	}
684 
685 	return (i);
686 }
687 
688 static void
689 dma_pool_obj_release(void *arg, void **store, int count)
690 {
691 	struct dma_pool *pool = arg;
692 	struct linux_dma_priv *priv;
693 	struct linux_dma_obj *obj;
694 	int i;
695 
696 	priv = pool->pool_device->dma_priv;
697 	for (i = 0; i < count; i++) {
698 		obj = store[i];
699 		bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap);
700 		uma_zfree(linux_dma_obj_zone, obj);
701 	}
702 }
703 
704 struct dma_pool *
705 linux_dma_pool_create(char *name, struct device *dev, size_t size,
706     size_t align, size_t boundary)
707 {
708 	struct linux_dma_priv *priv;
709 	struct dma_pool *pool;
710 
711 	priv = dev->dma_priv;
712 
713 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
714 	pool->pool_device = dev;
715 	pool->pool_entry_size = size;
716 
717 	if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev),
718 	    align, boundary,		/* alignment, boundary */
719 	    priv->dma_mask,		/* lowaddr */
720 	    BUS_SPACE_MAXADDR,		/* highaddr */
721 	    NULL, NULL,			/* filtfunc, filtfuncarg */
722 	    size,			/* maxsize */
723 	    1,				/* nsegments */
724 	    size,			/* maxsegsz */
725 	    0,				/* flags */
726 	    NULL, NULL,			/* lockfunc, lockfuncarg */
727 	    &pool->pool_dmat)) {
728 		kfree(pool);
729 		return (NULL);
730 	}
731 
732 	pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor,
733 	    dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import,
734 	    dma_pool_obj_release, pool, 0);
735 
736 	mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF);
737 	pctrie_init(&pool->pool_ptree);
738 
739 	return (pool);
740 }
741 
742 void
743 linux_dma_pool_destroy(struct dma_pool *pool)
744 {
745 
746 	uma_zdestroy(pool->pool_zone);
747 	bus_dma_tag_destroy(pool->pool_dmat);
748 	mtx_destroy(&pool->pool_lock);
749 	kfree(pool);
750 }
751 
752 void *
753 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
754     dma_addr_t *handle)
755 {
756 	struct linux_dma_obj *obj;
757 
758 	obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags);
759 	if (obj == NULL)
760 		return (NULL);
761 
762 	DMA_POOL_LOCK(pool);
763 	if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) {
764 		DMA_POOL_UNLOCK(pool);
765 		uma_zfree_arg(pool->pool_zone, obj, pool);
766 		return (NULL);
767 	}
768 	DMA_POOL_UNLOCK(pool);
769 
770 	*handle = obj->dma_addr;
771 	return (obj->vaddr);
772 }
773 
774 void
775 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr)
776 {
777 	struct linux_dma_obj *obj;
778 
779 	DMA_POOL_LOCK(pool);
780 	obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr);
781 	if (obj == NULL) {
782 		DMA_POOL_UNLOCK(pool);
783 		return;
784 	}
785 	LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr);
786 	DMA_POOL_UNLOCK(pool);
787 
788 	uma_zfree_arg(pool->pool_zone, obj, pool);
789 }
790