xref: /freebsd/sys/compat/linuxkpi/common/src/linux_pci.c (revision dd41de95a84d979615a2ef11df6850622bf6184e)
1 /*-
2  * Copyright (c) 2015-2016 Mellanox Technologies, Ltd.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bus.h>
33 #include <sys/malloc.h>
34 #include <sys/kernel.h>
35 #include <sys/sysctl.h>
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 #include <sys/fcntl.h>
39 #include <sys/file.h>
40 #include <sys/filio.h>
41 #include <sys/pciio.h>
42 #include <sys/pctrie.h>
43 #include <sys/rwlock.h>
44 
45 #include <vm/vm.h>
46 #include <vm/pmap.h>
47 
48 #include <machine/stdarg.h>
49 
50 #include <dev/pci/pcivar.h>
51 #include <dev/pci/pci_private.h>
52 #include <dev/pci/pci_iov.h>
53 #include <dev/backlight/backlight.h>
54 
55 #include <linux/kobject.h>
56 #include <linux/device.h>
57 #include <linux/slab.h>
58 #include <linux/module.h>
59 #include <linux/cdev.h>
60 #include <linux/file.h>
61 #include <linux/sysfs.h>
62 #include <linux/mm.h>
63 #include <linux/io.h>
64 #include <linux/vmalloc.h>
65 #include <linux/pci.h>
66 #include <linux/compat.h>
67 
68 #include <linux/backlight.h>
69 
70 #include "backlight_if.h"
71 #include "pcib_if.h"
72 
73 /* Undef the linux function macro defined in linux/pci.h */
74 #undef pci_get_class
75 
76 static device_probe_t linux_pci_probe;
77 static device_attach_t linux_pci_attach;
78 static device_detach_t linux_pci_detach;
79 static device_suspend_t linux_pci_suspend;
80 static device_resume_t linux_pci_resume;
81 static device_shutdown_t linux_pci_shutdown;
82 static pci_iov_init_t linux_pci_iov_init;
83 static pci_iov_uninit_t linux_pci_iov_uninit;
84 static pci_iov_add_vf_t linux_pci_iov_add_vf;
85 static int linux_backlight_get_status(device_t dev, struct backlight_props *props);
86 static int linux_backlight_update_status(device_t dev, struct backlight_props *props);
87 static int linux_backlight_get_info(device_t dev, struct backlight_info *info);
88 
89 static device_method_t pci_methods[] = {
90 	DEVMETHOD(device_probe, linux_pci_probe),
91 	DEVMETHOD(device_attach, linux_pci_attach),
92 	DEVMETHOD(device_detach, linux_pci_detach),
93 	DEVMETHOD(device_suspend, linux_pci_suspend),
94 	DEVMETHOD(device_resume, linux_pci_resume),
95 	DEVMETHOD(device_shutdown, linux_pci_shutdown),
96 	DEVMETHOD(pci_iov_init, linux_pci_iov_init),
97 	DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit),
98 	DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf),
99 
100 	/* backlight interface */
101 	DEVMETHOD(backlight_update_status, linux_backlight_update_status),
102 	DEVMETHOD(backlight_get_status, linux_backlight_get_status),
103 	DEVMETHOD(backlight_get_info, linux_backlight_get_info),
104 	DEVMETHOD_END
105 };
106 
107 struct linux_dma_priv {
108 	uint64_t	dma_mask;
109 	struct mtx	lock;
110 	bus_dma_tag_t	dmat;
111 	struct pctrie	ptree;
112 };
113 #define	DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock)
114 #define	DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock)
115 
116 static int
117 linux_pdev_dma_init(struct pci_dev *pdev)
118 {
119 	struct linux_dma_priv *priv;
120 	int error;
121 
122 	priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO);
123 	pdev->dev.dma_priv = priv;
124 
125 	mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF);
126 
127 	pctrie_init(&priv->ptree);
128 
129 	/* create a default DMA tag */
130 	error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64));
131 	if (error) {
132 		mtx_destroy(&priv->lock);
133 		free(priv, M_DEVBUF);
134 		pdev->dev.dma_priv = NULL;
135 	}
136 	return (error);
137 }
138 
139 static int
140 linux_pdev_dma_uninit(struct pci_dev *pdev)
141 {
142 	struct linux_dma_priv *priv;
143 
144 	priv = pdev->dev.dma_priv;
145 	if (priv->dmat)
146 		bus_dma_tag_destroy(priv->dmat);
147 	mtx_destroy(&priv->lock);
148 	free(priv, M_DEVBUF);
149 	pdev->dev.dma_priv = NULL;
150 	return (0);
151 }
152 
153 int
154 linux_dma_tag_init(struct device *dev, u64 dma_mask)
155 {
156 	struct linux_dma_priv *priv;
157 	int error;
158 
159 	priv = dev->dma_priv;
160 
161 	if (priv->dmat) {
162 		if (priv->dma_mask == dma_mask)
163 			return (0);
164 
165 		bus_dma_tag_destroy(priv->dmat);
166 	}
167 
168 	priv->dma_mask = dma_mask;
169 
170 	error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev),
171 	    1, 0,			/* alignment, boundary */
172 	    dma_mask,			/* lowaddr */
173 	    BUS_SPACE_MAXADDR,		/* highaddr */
174 	    NULL, NULL,			/* filtfunc, filtfuncarg */
175 	    BUS_SPACE_MAXSIZE,		/* maxsize */
176 	    1,				/* nsegments */
177 	    BUS_SPACE_MAXSIZE,		/* maxsegsz */
178 	    0,				/* flags */
179 	    NULL, NULL,			/* lockfunc, lockfuncarg */
180 	    &priv->dmat);
181 	return (-error);
182 }
183 
184 static struct pci_driver *
185 linux_pci_find(device_t dev, const struct pci_device_id **idp)
186 {
187 	const struct pci_device_id *id;
188 	struct pci_driver *pdrv;
189 	uint16_t vendor;
190 	uint16_t device;
191 	uint16_t subvendor;
192 	uint16_t subdevice;
193 
194 	vendor = pci_get_vendor(dev);
195 	device = pci_get_device(dev);
196 	subvendor = pci_get_subvendor(dev);
197 	subdevice = pci_get_subdevice(dev);
198 
199 	spin_lock(&pci_lock);
200 	list_for_each_entry(pdrv, &pci_drivers, links) {
201 		for (id = pdrv->id_table; id->vendor != 0; id++) {
202 			if (vendor == id->vendor &&
203 			    (PCI_ANY_ID == id->device || device == id->device) &&
204 			    (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) &&
205 			    (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) {
206 				*idp = id;
207 				spin_unlock(&pci_lock);
208 				return (pdrv);
209 			}
210 		}
211 	}
212 	spin_unlock(&pci_lock);
213 	return (NULL);
214 }
215 
216 static void
217 lkpifill_pci_dev(device_t dev, struct pci_dev *pdev)
218 {
219 
220 	pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev));
221 	pdev->vendor = pci_get_vendor(dev);
222 	pdev->device = pci_get_device(dev);
223 	pdev->subsystem_vendor = pci_get_subvendor(dev);
224 	pdev->subsystem_device = pci_get_subdevice(dev);
225 	pdev->class = pci_get_class(dev);
226 	pdev->revision = pci_get_revid(dev);
227 	pdev->bus = malloc(sizeof(*pdev->bus), M_DEVBUF, M_WAITOK | M_ZERO);
228 	pdev->bus->self = pdev;
229 	pdev->bus->number = pci_get_bus(dev);
230 	pdev->bus->domain = pci_get_domain(dev);
231 	pdev->dev.bsddev = dev;
232 	pdev->dev.parent = &linux_root_device;
233 	INIT_LIST_HEAD(&pdev->dev.irqents);
234 	kobject_init(&pdev->dev.kobj, &linux_dev_ktype);
235 	kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev));
236 	kobject_add(&pdev->dev.kobj, &linux_root_device.kobj,
237 	    kobject_name(&pdev->dev.kobj));
238 }
239 
240 static void
241 lkpinew_pci_dev_release(struct device *dev)
242 {
243 	struct pci_dev *pdev;
244 
245 	pdev = to_pci_dev(dev);
246 	free(pdev->bus, M_DEVBUF);
247 	free(pdev, M_DEVBUF);
248 }
249 
250 static struct pci_dev *
251 lkpinew_pci_dev(device_t dev)
252 {
253 	struct pci_dev *pdev;
254 
255 	pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK|M_ZERO);
256 	lkpifill_pci_dev(dev, pdev);
257 	pdev->dev.release = lkpinew_pci_dev_release;
258 
259 	return (pdev);
260 }
261 
262 struct pci_dev *
263 lkpi_pci_get_class(unsigned int class, struct pci_dev *from)
264 {
265 	device_t dev;
266 	device_t devfrom = NULL;
267 	struct pci_dev *pdev;
268 
269 	if (from != NULL)
270 		devfrom = from->dev.bsddev;
271 
272 	dev = pci_find_class_from(class >> 16, (class >> 8) & 0xFF, devfrom);
273 	if (dev == NULL)
274 		return (NULL);
275 
276 	pdev = lkpinew_pci_dev(dev);
277 	return (pdev);
278 }
279 
280 struct pci_dev *
281 lkpi_pci_get_domain_bus_and_slot(int domain, unsigned int bus,
282     unsigned int devfn)
283 {
284 	device_t dev;
285 	struct pci_dev *pdev;
286 
287 	dev = pci_find_dbsf(domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
288 	if (dev == NULL)
289 		return (NULL);
290 
291 	pdev = lkpinew_pci_dev(dev);
292 	return (pdev);
293 }
294 
295 static int
296 linux_pci_probe(device_t dev)
297 {
298 	const struct pci_device_id *id;
299 	struct pci_driver *pdrv;
300 
301 	if ((pdrv = linux_pci_find(dev, &id)) == NULL)
302 		return (ENXIO);
303 	if (device_get_driver(dev) != &pdrv->bsddriver)
304 		return (ENXIO);
305 	device_set_desc(dev, pdrv->name);
306 	return (0);
307 }
308 
309 static int
310 linux_pci_attach(device_t dev)
311 {
312 	const struct pci_device_id *id;
313 	struct pci_driver *pdrv;
314 	struct pci_dev *pdev;
315 
316 	pdrv = linux_pci_find(dev, &id);
317 	pdev = device_get_softc(dev);
318 
319 	MPASS(pdrv != NULL);
320 	MPASS(pdev != NULL);
321 
322 	return (linux_pci_attach_device(dev, pdrv, id, pdev));
323 }
324 
325 int
326 linux_pci_attach_device(device_t dev, struct pci_driver *pdrv,
327     const struct pci_device_id *id, struct pci_dev *pdev)
328 {
329 	struct resource_list_entry *rle;
330 	device_t parent;
331 	uintptr_t rid;
332 	int error;
333 	bool isdrm;
334 
335 	linux_set_current(curthread);
336 
337 	parent = device_get_parent(dev);
338 	isdrm = pdrv != NULL && pdrv->isdrm;
339 
340 	if (isdrm) {
341 		struct pci_devinfo *dinfo;
342 
343 		dinfo = device_get_ivars(parent);
344 		device_set_ivars(dev, dinfo);
345 	}
346 
347 	lkpifill_pci_dev(dev, pdev);
348 	if (isdrm)
349 		PCI_GET_ID(device_get_parent(parent), parent, PCI_ID_RID, &rid);
350 	else
351 		PCI_GET_ID(parent, dev, PCI_ID_RID, &rid);
352 	pdev->devfn = rid;
353 	pdev->pdrv = pdrv;
354 	rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0);
355 	if (rle != NULL)
356 		pdev->dev.irq = rle->start;
357 	else
358 		pdev->dev.irq = LINUX_IRQ_INVALID;
359 	pdev->irq = pdev->dev.irq;
360 	error = linux_pdev_dma_init(pdev);
361 	if (error)
362 		goto out_dma_init;
363 
364 	TAILQ_INIT(&pdev->mmio);
365 
366 	spin_lock(&pci_lock);
367 	list_add(&pdev->links, &pci_devices);
368 	spin_unlock(&pci_lock);
369 
370 	if (pdrv != NULL) {
371 		error = pdrv->probe(pdev, id);
372 		if (error)
373 			goto out_probe;
374 	}
375 	return (0);
376 
377 out_probe:
378 	free(pdev->bus, M_DEVBUF);
379 	linux_pdev_dma_uninit(pdev);
380 out_dma_init:
381 	spin_lock(&pci_lock);
382 	list_del(&pdev->links);
383 	spin_unlock(&pci_lock);
384 	put_device(&pdev->dev);
385 	return (-error);
386 }
387 
388 static int
389 linux_pci_detach(device_t dev)
390 {
391 	struct pci_dev *pdev;
392 
393 	pdev = device_get_softc(dev);
394 
395 	MPASS(pdev != NULL);
396 
397 	device_set_desc(dev, NULL);
398 
399 	return (linux_pci_detach_device(pdev));
400 }
401 
402 int
403 linux_pci_detach_device(struct pci_dev *pdev)
404 {
405 
406 	linux_set_current(curthread);
407 
408 	if (pdev->pdrv != NULL)
409 		pdev->pdrv->remove(pdev);
410 
411 	free(pdev->bus, M_DEVBUF);
412 	linux_pdev_dma_uninit(pdev);
413 
414 	spin_lock(&pci_lock);
415 	list_del(&pdev->links);
416 	spin_unlock(&pci_lock);
417 	put_device(&pdev->dev);
418 
419 	return (0);
420 }
421 
422 static int
423 linux_pci_suspend(device_t dev)
424 {
425 	const struct dev_pm_ops *pmops;
426 	struct pm_message pm = { };
427 	struct pci_dev *pdev;
428 	int error;
429 
430 	error = 0;
431 	linux_set_current(curthread);
432 	pdev = device_get_softc(dev);
433 	pmops = pdev->pdrv->driver.pm;
434 
435 	if (pdev->pdrv->suspend != NULL)
436 		error = -pdev->pdrv->suspend(pdev, pm);
437 	else if (pmops != NULL && pmops->suspend != NULL) {
438 		error = -pmops->suspend(&pdev->dev);
439 		if (error == 0 && pmops->suspend_late != NULL)
440 			error = -pmops->suspend_late(&pdev->dev);
441 	}
442 	return (error);
443 }
444 
445 static int
446 linux_pci_resume(device_t dev)
447 {
448 	const struct dev_pm_ops *pmops;
449 	struct pci_dev *pdev;
450 	int error;
451 
452 	error = 0;
453 	linux_set_current(curthread);
454 	pdev = device_get_softc(dev);
455 	pmops = pdev->pdrv->driver.pm;
456 
457 	if (pdev->pdrv->resume != NULL)
458 		error = -pdev->pdrv->resume(pdev);
459 	else if (pmops != NULL && pmops->resume != NULL) {
460 		if (pmops->resume_early != NULL)
461 			error = -pmops->resume_early(&pdev->dev);
462 		if (error == 0 && pmops->resume != NULL)
463 			error = -pmops->resume(&pdev->dev);
464 	}
465 	return (error);
466 }
467 
468 static int
469 linux_pci_shutdown(device_t dev)
470 {
471 	struct pci_dev *pdev;
472 
473 	linux_set_current(curthread);
474 	pdev = device_get_softc(dev);
475 	if (pdev->pdrv->shutdown != NULL)
476 		pdev->pdrv->shutdown(pdev);
477 	return (0);
478 }
479 
480 static int
481 linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config)
482 {
483 	struct pci_dev *pdev;
484 	int error;
485 
486 	linux_set_current(curthread);
487 	pdev = device_get_softc(dev);
488 	if (pdev->pdrv->bsd_iov_init != NULL)
489 		error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config);
490 	else
491 		error = EINVAL;
492 	return (error);
493 }
494 
495 static void
496 linux_pci_iov_uninit(device_t dev)
497 {
498 	struct pci_dev *pdev;
499 
500 	linux_set_current(curthread);
501 	pdev = device_get_softc(dev);
502 	if (pdev->pdrv->bsd_iov_uninit != NULL)
503 		pdev->pdrv->bsd_iov_uninit(dev);
504 }
505 
506 static int
507 linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config)
508 {
509 	struct pci_dev *pdev;
510 	int error;
511 
512 	linux_set_current(curthread);
513 	pdev = device_get_softc(dev);
514 	if (pdev->pdrv->bsd_iov_add_vf != NULL)
515 		error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config);
516 	else
517 		error = EINVAL;
518 	return (error);
519 }
520 
521 static int
522 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc)
523 {
524 	int error;
525 
526 	linux_set_current(curthread);
527 	spin_lock(&pci_lock);
528 	list_add(&pdrv->links, &pci_drivers);
529 	spin_unlock(&pci_lock);
530 	pdrv->bsddriver.name = pdrv->name;
531 	pdrv->bsddriver.methods = pci_methods;
532 	pdrv->bsddriver.size = sizeof(struct pci_dev);
533 
534 	mtx_lock(&Giant);
535 	error = devclass_add_driver(dc, &pdrv->bsddriver,
536 	    BUS_PASS_DEFAULT, &pdrv->bsdclass);
537 	mtx_unlock(&Giant);
538 	return (-error);
539 }
540 
541 int
542 linux_pci_register_driver(struct pci_driver *pdrv)
543 {
544 	devclass_t dc;
545 
546 	dc = devclass_find("pci");
547 	if (dc == NULL)
548 		return (-ENXIO);
549 	pdrv->isdrm = false;
550 	return (_linux_pci_register_driver(pdrv, dc));
551 }
552 
553 unsigned long
554 pci_resource_start(struct pci_dev *pdev, int bar)
555 {
556 	struct resource_list_entry *rle;
557 	rman_res_t newstart;
558 	device_t dev;
559 
560 	if ((rle = linux_pci_get_bar(pdev, bar)) == NULL)
561 		return (0);
562 	dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ?
563 	    device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev;
564 	if (BUS_TRANSLATE_RESOURCE(dev, rle->type, rle->start, &newstart)) {
565 		device_printf(pdev->dev.bsddev, "translate of %#jx failed\n",
566 		    (uintmax_t)rle->start);
567 		return (0);
568 	}
569 	return (newstart);
570 }
571 
572 unsigned long
573 pci_resource_len(struct pci_dev *pdev, int bar)
574 {
575 	struct resource_list_entry *rle;
576 
577 	if ((rle = linux_pci_get_bar(pdev, bar)) == NULL)
578 		return (0);
579 	return (rle->count);
580 }
581 
582 int
583 linux_pci_register_drm_driver(struct pci_driver *pdrv)
584 {
585 	devclass_t dc;
586 
587 	dc = devclass_create("vgapci");
588 	if (dc == NULL)
589 		return (-ENXIO);
590 	pdrv->isdrm = true;
591 	pdrv->name = "drmn";
592 	return (_linux_pci_register_driver(pdrv, dc));
593 }
594 
595 void
596 linux_pci_unregister_driver(struct pci_driver *pdrv)
597 {
598 	devclass_t bus;
599 
600 	bus = devclass_find("pci");
601 
602 	spin_lock(&pci_lock);
603 	list_del(&pdrv->links);
604 	spin_unlock(&pci_lock);
605 	mtx_lock(&Giant);
606 	if (bus != NULL)
607 		devclass_delete_driver(bus, &pdrv->bsddriver);
608 	mtx_unlock(&Giant);
609 }
610 
611 void
612 linux_pci_unregister_drm_driver(struct pci_driver *pdrv)
613 {
614 	devclass_t bus;
615 
616 	bus = devclass_find("vgapci");
617 
618 	spin_lock(&pci_lock);
619 	list_del(&pdrv->links);
620 	spin_unlock(&pci_lock);
621 	mtx_lock(&Giant);
622 	if (bus != NULL)
623 		devclass_delete_driver(bus, &pdrv->bsddriver);
624 	mtx_unlock(&Giant);
625 }
626 
627 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t));
628 
629 struct linux_dma_obj {
630 	void		*vaddr;
631 	uint64_t	dma_addr;
632 	bus_dmamap_t	dmamap;
633 };
634 
635 static uma_zone_t linux_dma_trie_zone;
636 static uma_zone_t linux_dma_obj_zone;
637 
638 static void
639 linux_dma_init(void *arg)
640 {
641 
642 	linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie",
643 	    pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL,
644 	    UMA_ALIGN_PTR, 0);
645 	linux_dma_obj_zone = uma_zcreate("linux_dma_object",
646 	    sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL,
647 	    UMA_ALIGN_PTR, 0);
648 
649 }
650 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL);
651 
652 static void
653 linux_dma_uninit(void *arg)
654 {
655 
656 	uma_zdestroy(linux_dma_obj_zone);
657 	uma_zdestroy(linux_dma_trie_zone);
658 }
659 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL);
660 
661 static void *
662 linux_dma_trie_alloc(struct pctrie *ptree)
663 {
664 
665 	return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT));
666 }
667 
668 static void
669 linux_dma_trie_free(struct pctrie *ptree, void *node)
670 {
671 
672 	uma_zfree(linux_dma_trie_zone, node);
673 }
674 
675 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc,
676     linux_dma_trie_free);
677 
678 void *
679 linux_dma_alloc_coherent(struct device *dev, size_t size,
680     dma_addr_t *dma_handle, gfp_t flag)
681 {
682 	struct linux_dma_priv *priv;
683 	vm_paddr_t high;
684 	size_t align;
685 	void *mem;
686 
687 	if (dev == NULL || dev->dma_priv == NULL) {
688 		*dma_handle = 0;
689 		return (NULL);
690 	}
691 	priv = dev->dma_priv;
692 	if (priv->dma_mask)
693 		high = priv->dma_mask;
694 	else if (flag & GFP_DMA32)
695 		high = BUS_SPACE_MAXADDR_32BIT;
696 	else
697 		high = BUS_SPACE_MAXADDR;
698 	align = PAGE_SIZE << get_order(size);
699 	mem = (void *)kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high,
700 	    align, 0, VM_MEMATTR_DEFAULT);
701 	if (mem != NULL) {
702 		*dma_handle = linux_dma_map_phys(dev, vtophys(mem), size);
703 		if (*dma_handle == 0) {
704 			kmem_free((vm_offset_t)mem, size);
705 			mem = NULL;
706 		}
707 	} else {
708 		*dma_handle = 0;
709 	}
710 	return (mem);
711 }
712 
713 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
714 dma_addr_t
715 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len)
716 {
717 	struct linux_dma_priv *priv;
718 	struct linux_dma_obj *obj;
719 	int error, nseg;
720 	bus_dma_segment_t seg;
721 
722 	priv = dev->dma_priv;
723 
724 	/*
725 	 * If the resultant mapping will be entirely 1:1 with the
726 	 * physical address, short-circuit the remainder of the
727 	 * bus_dma API.  This avoids tracking collisions in the pctrie
728 	 * with the additional benefit of reducing overhead.
729 	 */
730 	if (bus_dma_id_mapped(priv->dmat, phys, len))
731 		return (phys);
732 
733 	obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT);
734 	if (obj == NULL) {
735 		return (0);
736 	}
737 
738 	DMA_PRIV_LOCK(priv);
739 	if (bus_dmamap_create(priv->dmat, 0, &obj->dmamap) != 0) {
740 		DMA_PRIV_UNLOCK(priv);
741 		uma_zfree(linux_dma_obj_zone, obj);
742 		return (0);
743 	}
744 
745 	nseg = -1;
746 	if (_bus_dmamap_load_phys(priv->dmat, obj->dmamap, phys, len,
747 	    BUS_DMA_NOWAIT, &seg, &nseg) != 0) {
748 		bus_dmamap_destroy(priv->dmat, obj->dmamap);
749 		DMA_PRIV_UNLOCK(priv);
750 		uma_zfree(linux_dma_obj_zone, obj);
751 		return (0);
752 	}
753 
754 	KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg));
755 	obj->dma_addr = seg.ds_addr;
756 
757 	error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj);
758 	if (error != 0) {
759 		bus_dmamap_unload(priv->dmat, obj->dmamap);
760 		bus_dmamap_destroy(priv->dmat, obj->dmamap);
761 		DMA_PRIV_UNLOCK(priv);
762 		uma_zfree(linux_dma_obj_zone, obj);
763 		return (0);
764 	}
765 	DMA_PRIV_UNLOCK(priv);
766 	return (obj->dma_addr);
767 }
768 #else
769 dma_addr_t
770 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len)
771 {
772 	return (phys);
773 }
774 #endif
775 
776 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
777 void
778 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len)
779 {
780 	struct linux_dma_priv *priv;
781 	struct linux_dma_obj *obj;
782 
783 	priv = dev->dma_priv;
784 
785 	if (pctrie_is_empty(&priv->ptree))
786 		return;
787 
788 	DMA_PRIV_LOCK(priv);
789 	obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr);
790 	if (obj == NULL) {
791 		DMA_PRIV_UNLOCK(priv);
792 		return;
793 	}
794 	LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr);
795 	bus_dmamap_unload(priv->dmat, obj->dmamap);
796 	bus_dmamap_destroy(priv->dmat, obj->dmamap);
797 	DMA_PRIV_UNLOCK(priv);
798 
799 	uma_zfree(linux_dma_obj_zone, obj);
800 }
801 #else
802 void
803 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len)
804 {
805 }
806 #endif
807 
808 int
809 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents,
810     enum dma_data_direction dir, struct dma_attrs *attrs)
811 {
812 	struct linux_dma_priv *priv;
813 	struct scatterlist *sg;
814 	int i, nseg;
815 	bus_dma_segment_t seg;
816 
817 	priv = dev->dma_priv;
818 
819 	DMA_PRIV_LOCK(priv);
820 
821 	/* create common DMA map in the first S/G entry */
822 	if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) {
823 		DMA_PRIV_UNLOCK(priv);
824 		return (0);
825 	}
826 
827 	/* load all S/G list entries */
828 	for_each_sg(sgl, sg, nents, i) {
829 		nseg = -1;
830 		if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map,
831 		    sg_phys(sg), sg->length, BUS_DMA_NOWAIT,
832 		    &seg, &nseg) != 0) {
833 			bus_dmamap_unload(priv->dmat, sgl->dma_map);
834 			bus_dmamap_destroy(priv->dmat, sgl->dma_map);
835 			DMA_PRIV_UNLOCK(priv);
836 			return (0);
837 		}
838 		KASSERT(nseg == 0,
839 		    ("More than one segment (nseg=%d)", nseg + 1));
840 
841 		sg_dma_address(sg) = seg.ds_addr;
842 	}
843 	DMA_PRIV_UNLOCK(priv);
844 
845 	return (nents);
846 }
847 
848 void
849 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
850     int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
851 {
852 	struct linux_dma_priv *priv;
853 
854 	priv = dev->dma_priv;
855 
856 	DMA_PRIV_LOCK(priv);
857 	bus_dmamap_unload(priv->dmat, sgl->dma_map);
858 	bus_dmamap_destroy(priv->dmat, sgl->dma_map);
859 	DMA_PRIV_UNLOCK(priv);
860 }
861 
862 struct dma_pool {
863 	struct device  *pool_device;
864 	uma_zone_t	pool_zone;
865 	struct mtx	pool_lock;
866 	bus_dma_tag_t	pool_dmat;
867 	size_t		pool_entry_size;
868 	struct pctrie	pool_ptree;
869 };
870 
871 #define	DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock)
872 #define	DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock)
873 
874 static inline int
875 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags)
876 {
877 	struct linux_dma_obj *obj = mem;
878 	struct dma_pool *pool = arg;
879 	int error, nseg;
880 	bus_dma_segment_t seg;
881 
882 	nseg = -1;
883 	DMA_POOL_LOCK(pool);
884 	error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap,
885 	    vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT,
886 	    &seg, &nseg);
887 	DMA_POOL_UNLOCK(pool);
888 	if (error != 0) {
889 		return (error);
890 	}
891 	KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg));
892 	obj->dma_addr = seg.ds_addr;
893 
894 	return (0);
895 }
896 
897 static void
898 dma_pool_obj_dtor(void *mem, int size, void *arg)
899 {
900 	struct linux_dma_obj *obj = mem;
901 	struct dma_pool *pool = arg;
902 
903 	DMA_POOL_LOCK(pool);
904 	bus_dmamap_unload(pool->pool_dmat, obj->dmamap);
905 	DMA_POOL_UNLOCK(pool);
906 }
907 
908 static int
909 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused,
910     int flags)
911 {
912 	struct dma_pool *pool = arg;
913 	struct linux_dma_priv *priv;
914 	struct linux_dma_obj *obj;
915 	int error, i;
916 
917 	priv = pool->pool_device->dma_priv;
918 	for (i = 0; i < count; i++) {
919 		obj = uma_zalloc(linux_dma_obj_zone, flags);
920 		if (obj == NULL)
921 			break;
922 
923 		error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr,
924 		    BUS_DMA_NOWAIT, &obj->dmamap);
925 		if (error!= 0) {
926 			uma_zfree(linux_dma_obj_zone, obj);
927 			break;
928 		}
929 
930 		store[i] = obj;
931 	}
932 
933 	return (i);
934 }
935 
936 static void
937 dma_pool_obj_release(void *arg, void **store, int count)
938 {
939 	struct dma_pool *pool = arg;
940 	struct linux_dma_priv *priv;
941 	struct linux_dma_obj *obj;
942 	int i;
943 
944 	priv = pool->pool_device->dma_priv;
945 	for (i = 0; i < count; i++) {
946 		obj = store[i];
947 		bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap);
948 		uma_zfree(linux_dma_obj_zone, obj);
949 	}
950 }
951 
952 struct dma_pool *
953 linux_dma_pool_create(char *name, struct device *dev, size_t size,
954     size_t align, size_t boundary)
955 {
956 	struct linux_dma_priv *priv;
957 	struct dma_pool *pool;
958 
959 	priv = dev->dma_priv;
960 
961 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
962 	pool->pool_device = dev;
963 	pool->pool_entry_size = size;
964 
965 	if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev),
966 	    align, boundary,		/* alignment, boundary */
967 	    priv->dma_mask,		/* lowaddr */
968 	    BUS_SPACE_MAXADDR,		/* highaddr */
969 	    NULL, NULL,			/* filtfunc, filtfuncarg */
970 	    size,			/* maxsize */
971 	    1,				/* nsegments */
972 	    size,			/* maxsegsz */
973 	    0,				/* flags */
974 	    NULL, NULL,			/* lockfunc, lockfuncarg */
975 	    &pool->pool_dmat)) {
976 		kfree(pool);
977 		return (NULL);
978 	}
979 
980 	pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor,
981 	    dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import,
982 	    dma_pool_obj_release, pool, 0);
983 
984 	mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF);
985 	pctrie_init(&pool->pool_ptree);
986 
987 	return (pool);
988 }
989 
990 void
991 linux_dma_pool_destroy(struct dma_pool *pool)
992 {
993 
994 	uma_zdestroy(pool->pool_zone);
995 	bus_dma_tag_destroy(pool->pool_dmat);
996 	mtx_destroy(&pool->pool_lock);
997 	kfree(pool);
998 }
999 
1000 void *
1001 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
1002     dma_addr_t *handle)
1003 {
1004 	struct linux_dma_obj *obj;
1005 
1006 	obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags & GFP_NATIVE_MASK);
1007 	if (obj == NULL)
1008 		return (NULL);
1009 
1010 	DMA_POOL_LOCK(pool);
1011 	if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) {
1012 		DMA_POOL_UNLOCK(pool);
1013 		uma_zfree_arg(pool->pool_zone, obj, pool);
1014 		return (NULL);
1015 	}
1016 	DMA_POOL_UNLOCK(pool);
1017 
1018 	*handle = obj->dma_addr;
1019 	return (obj->vaddr);
1020 }
1021 
1022 void
1023 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr)
1024 {
1025 	struct linux_dma_obj *obj;
1026 
1027 	DMA_POOL_LOCK(pool);
1028 	obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr);
1029 	if (obj == NULL) {
1030 		DMA_POOL_UNLOCK(pool);
1031 		return;
1032 	}
1033 	LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr);
1034 	DMA_POOL_UNLOCK(pool);
1035 
1036 	uma_zfree_arg(pool->pool_zone, obj, pool);
1037 }
1038 
1039 static int
1040 linux_backlight_get_status(device_t dev, struct backlight_props *props)
1041 {
1042 	struct pci_dev *pdev;
1043 
1044 	linux_set_current(curthread);
1045 	pdev = device_get_softc(dev);
1046 
1047 	props->brightness = pdev->dev.bd->props.brightness;
1048 	props->brightness = props->brightness * 100 / pdev->dev.bd->props.max_brightness;
1049 	props->nlevels = 0;
1050 
1051 	return (0);
1052 }
1053 
1054 static int
1055 linux_backlight_get_info(device_t dev, struct backlight_info *info)
1056 {
1057 	struct pci_dev *pdev;
1058 
1059 	linux_set_current(curthread);
1060 	pdev = device_get_softc(dev);
1061 
1062 	info->type = BACKLIGHT_TYPE_PANEL;
1063 	strlcpy(info->name, pdev->dev.bd->name, BACKLIGHTMAXNAMELENGTH);
1064 	return (0);
1065 }
1066 
1067 static int
1068 linux_backlight_update_status(device_t dev, struct backlight_props *props)
1069 {
1070 	struct pci_dev *pdev;
1071 
1072 	linux_set_current(curthread);
1073 	pdev = device_get_softc(dev);
1074 
1075 	pdev->dev.bd->props.brightness = pdev->dev.bd->props.max_brightness *
1076 		props->brightness / 100;
1077 	return (pdev->dev.bd->ops->update_status(pdev->dev.bd));
1078 }
1079 
1080 struct backlight_device *
1081 linux_backlight_device_register(const char *name, struct device *dev,
1082     void *data, const struct backlight_ops *ops, struct backlight_properties *props)
1083 {
1084 
1085 	dev->bd = malloc(sizeof(*dev->bd), M_DEVBUF, M_WAITOK | M_ZERO);
1086 	dev->bd->ops = ops;
1087 	dev->bd->props.type = props->type;
1088 	dev->bd->props.max_brightness = props->max_brightness;
1089 	dev->bd->props.brightness = props->brightness;
1090 	dev->bd->props.power = props->power;
1091 	dev->bd->data = data;
1092 	dev->bd->dev = dev;
1093 	dev->bd->name = strdup(name, M_DEVBUF);
1094 
1095 	dev->backlight_dev = backlight_register(name, dev->bsddev);
1096 
1097 	return (dev->bd);
1098 }
1099 
1100 void
1101 linux_backlight_device_unregister(struct backlight_device *bd)
1102 {
1103 
1104 	backlight_destroy(bd->dev->backlight_dev);
1105 	free(bd->name, M_DEVBUF);
1106 	free(bd, M_DEVBUF);
1107 }
1108