1 /*- 2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/bus.h> 33 #include <sys/malloc.h> 34 #include <sys/kernel.h> 35 #include <sys/sysctl.h> 36 #include <sys/lock.h> 37 #include <sys/mutex.h> 38 #include <sys/fcntl.h> 39 #include <sys/file.h> 40 #include <sys/filio.h> 41 #include <sys/pciio.h> 42 #include <sys/pctrie.h> 43 #include <sys/rwlock.h> 44 45 #include <vm/vm.h> 46 #include <vm/pmap.h> 47 48 #include <machine/stdarg.h> 49 50 #include <dev/pci/pcivar.h> 51 #include <dev/pci/pci_private.h> 52 #include <dev/pci/pci_iov.h> 53 #include <dev/backlight/backlight.h> 54 55 #include <linux/kobject.h> 56 #include <linux/device.h> 57 #include <linux/slab.h> 58 #include <linux/module.h> 59 #include <linux/cdev.h> 60 #include <linux/file.h> 61 #include <linux/sysfs.h> 62 #include <linux/mm.h> 63 #include <linux/io.h> 64 #include <linux/vmalloc.h> 65 #include <linux/pci.h> 66 #include <linux/compat.h> 67 68 #include <linux/backlight.h> 69 70 #include "backlight_if.h" 71 #include "pcib_if.h" 72 73 /* Undef the linux function macro defined in linux/pci.h */ 74 #undef pci_get_class 75 76 static device_probe_t linux_pci_probe; 77 static device_attach_t linux_pci_attach; 78 static device_detach_t linux_pci_detach; 79 static device_suspend_t linux_pci_suspend; 80 static device_resume_t linux_pci_resume; 81 static device_shutdown_t linux_pci_shutdown; 82 static pci_iov_init_t linux_pci_iov_init; 83 static pci_iov_uninit_t linux_pci_iov_uninit; 84 static pci_iov_add_vf_t linux_pci_iov_add_vf; 85 static int linux_backlight_get_status(device_t dev, struct backlight_props *props); 86 static int linux_backlight_update_status(device_t dev, struct backlight_props *props); 87 static int linux_backlight_get_info(device_t dev, struct backlight_info *info); 88 89 static device_method_t pci_methods[] = { 90 DEVMETHOD(device_probe, linux_pci_probe), 91 DEVMETHOD(device_attach, linux_pci_attach), 92 DEVMETHOD(device_detach, linux_pci_detach), 93 DEVMETHOD(device_suspend, linux_pci_suspend), 94 DEVMETHOD(device_resume, linux_pci_resume), 95 DEVMETHOD(device_shutdown, linux_pci_shutdown), 96 DEVMETHOD(pci_iov_init, linux_pci_iov_init), 97 DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit), 98 DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf), 99 100 /* backlight interface */ 101 DEVMETHOD(backlight_update_status, linux_backlight_update_status), 102 DEVMETHOD(backlight_get_status, linux_backlight_get_status), 103 DEVMETHOD(backlight_get_info, linux_backlight_get_info), 104 DEVMETHOD_END 105 }; 106 107 struct linux_dma_priv { 108 uint64_t dma_mask; 109 struct mtx lock; 110 bus_dma_tag_t dmat; 111 struct pctrie ptree; 112 }; 113 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock) 114 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock) 115 116 static int 117 linux_pdev_dma_init(struct pci_dev *pdev) 118 { 119 struct linux_dma_priv *priv; 120 int error; 121 122 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); 123 pdev->dev.dma_priv = priv; 124 125 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF); 126 127 pctrie_init(&priv->ptree); 128 129 /* create a default DMA tag */ 130 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64)); 131 if (error) { 132 mtx_destroy(&priv->lock); 133 free(priv, M_DEVBUF); 134 pdev->dev.dma_priv = NULL; 135 } 136 return (error); 137 } 138 139 static int 140 linux_pdev_dma_uninit(struct pci_dev *pdev) 141 { 142 struct linux_dma_priv *priv; 143 144 priv = pdev->dev.dma_priv; 145 if (priv->dmat) 146 bus_dma_tag_destroy(priv->dmat); 147 mtx_destroy(&priv->lock); 148 free(priv, M_DEVBUF); 149 pdev->dev.dma_priv = NULL; 150 return (0); 151 } 152 153 int 154 linux_dma_tag_init(struct device *dev, u64 dma_mask) 155 { 156 struct linux_dma_priv *priv; 157 int error; 158 159 priv = dev->dma_priv; 160 161 if (priv->dmat) { 162 if (priv->dma_mask == dma_mask) 163 return (0); 164 165 bus_dma_tag_destroy(priv->dmat); 166 } 167 168 priv->dma_mask = dma_mask; 169 170 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 171 1, 0, /* alignment, boundary */ 172 dma_mask, /* lowaddr */ 173 BUS_SPACE_MAXADDR, /* highaddr */ 174 NULL, NULL, /* filtfunc, filtfuncarg */ 175 BUS_SPACE_MAXSIZE, /* maxsize */ 176 1, /* nsegments */ 177 BUS_SPACE_MAXSIZE, /* maxsegsz */ 178 0, /* flags */ 179 NULL, NULL, /* lockfunc, lockfuncarg */ 180 &priv->dmat); 181 return (-error); 182 } 183 184 static struct pci_driver * 185 linux_pci_find(device_t dev, const struct pci_device_id **idp) 186 { 187 const struct pci_device_id *id; 188 struct pci_driver *pdrv; 189 uint16_t vendor; 190 uint16_t device; 191 uint16_t subvendor; 192 uint16_t subdevice; 193 194 vendor = pci_get_vendor(dev); 195 device = pci_get_device(dev); 196 subvendor = pci_get_subvendor(dev); 197 subdevice = pci_get_subdevice(dev); 198 199 spin_lock(&pci_lock); 200 list_for_each_entry(pdrv, &pci_drivers, links) { 201 for (id = pdrv->id_table; id->vendor != 0; id++) { 202 if (vendor == id->vendor && 203 (PCI_ANY_ID == id->device || device == id->device) && 204 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) && 205 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) { 206 *idp = id; 207 spin_unlock(&pci_lock); 208 return (pdrv); 209 } 210 } 211 } 212 spin_unlock(&pci_lock); 213 return (NULL); 214 } 215 216 static void 217 lkpifill_pci_dev(device_t dev, struct pci_dev *pdev) 218 { 219 220 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); 221 pdev->vendor = pci_get_vendor(dev); 222 pdev->device = pci_get_device(dev); 223 pdev->class = pci_get_class(dev); 224 pdev->revision = pci_get_revid(dev); 225 pdev->dev.bsddev = dev; 226 pdev->bus->self = pdev; 227 pdev->bus->number = pci_get_bus(dev); 228 pdev->bus->domain = pci_get_domain(dev); 229 } 230 231 static struct pci_dev * 232 lkpinew_pci_dev(device_t dev) 233 { 234 struct pci_dev *pdev; 235 struct pci_bus *pbus; 236 237 pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK|M_ZERO); 238 pbus = malloc(sizeof(*pbus), M_DEVBUF, M_WAITOK|M_ZERO); 239 pdev->bus = pbus; 240 lkpifill_pci_dev(dev, pdev); 241 return (pdev); 242 } 243 244 struct pci_dev * 245 lkpi_pci_get_class(unsigned int class, struct pci_dev *from) 246 { 247 device_t dev; 248 device_t devfrom = NULL; 249 struct pci_dev *pdev; 250 251 if (from != NULL) 252 devfrom = from->dev.bsddev; 253 254 dev = pci_find_class_from(class >> 16, (class >> 8) & 0xFF, devfrom); 255 if (dev == NULL) 256 return (NULL); 257 258 pdev = lkpinew_pci_dev(dev); 259 return (pdev); 260 } 261 262 struct pci_dev * 263 lkpi_pci_get_domain_bus_and_slot(int domain, unsigned int bus, 264 unsigned int devfn) 265 { 266 device_t dev; 267 struct pci_dev *pdev; 268 269 dev = pci_find_dbsf(domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 270 if (dev == NULL) 271 return (NULL); 272 273 pdev = lkpinew_pci_dev(dev); 274 return (pdev); 275 } 276 277 static int 278 linux_pci_probe(device_t dev) 279 { 280 const struct pci_device_id *id; 281 struct pci_driver *pdrv; 282 283 if ((pdrv = linux_pci_find(dev, &id)) == NULL) 284 return (ENXIO); 285 if (device_get_driver(dev) != &pdrv->bsddriver) 286 return (ENXIO); 287 device_set_desc(dev, pdrv->name); 288 return (0); 289 } 290 291 static int 292 linux_pci_attach(device_t dev) 293 { 294 const struct pci_device_id *id; 295 struct pci_driver *pdrv; 296 struct pci_dev *pdev; 297 298 pdrv = linux_pci_find(dev, &id); 299 pdev = device_get_softc(dev); 300 301 MPASS(pdrv != NULL); 302 MPASS(pdev != NULL); 303 304 return (linux_pci_attach_device(dev, pdrv, id, pdev)); 305 } 306 307 int 308 linux_pci_attach_device(device_t dev, struct pci_driver *pdrv, 309 const struct pci_device_id *id, struct pci_dev *pdev) 310 { 311 struct resource_list_entry *rle; 312 struct pci_devinfo *dinfo; 313 device_t parent; 314 uintptr_t rid; 315 int error; 316 bool isdrm; 317 318 linux_set_current(curthread); 319 320 parent = device_get_parent(dev); 321 isdrm = pdrv != NULL && pdrv->isdrm; 322 323 if (isdrm) { 324 dinfo = device_get_ivars(parent); 325 device_set_ivars(dev, dinfo); 326 } else { 327 dinfo = device_get_ivars(dev); 328 } 329 330 pdev->bus = malloc(sizeof(*pdev->bus), M_DEVBUF, M_WAITOK | M_ZERO); 331 lkpifill_pci_dev(dev, pdev); 332 pdev->dev.parent = &linux_root_device; 333 INIT_LIST_HEAD(&pdev->dev.irqents); 334 if (isdrm) 335 PCI_GET_ID(device_get_parent(parent), parent, PCI_ID_RID, &rid); 336 else 337 PCI_GET_ID(parent, dev, PCI_ID_RID, &rid); 338 pdev->devfn = rid; 339 pdev->device = dinfo->cfg.device; 340 pdev->vendor = dinfo->cfg.vendor; 341 pdev->subsystem_vendor = dinfo->cfg.subvendor; 342 pdev->subsystem_device = dinfo->cfg.subdevice; 343 pdev->pdrv = pdrv; 344 kobject_init(&pdev->dev.kobj, &linux_dev_ktype); 345 kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); 346 kobject_add(&pdev->dev.kobj, &linux_root_device.kobj, 347 kobject_name(&pdev->dev.kobj)); 348 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0); 349 if (rle != NULL) 350 pdev->dev.irq = rle->start; 351 else 352 pdev->dev.irq = LINUX_IRQ_INVALID; 353 pdev->irq = pdev->dev.irq; 354 error = linux_pdev_dma_init(pdev); 355 if (error) 356 goto out_dma_init; 357 358 TAILQ_INIT(&pdev->mmio); 359 360 spin_lock(&pci_lock); 361 list_add(&pdev->links, &pci_devices); 362 spin_unlock(&pci_lock); 363 364 if (pdrv != NULL) { 365 error = pdrv->probe(pdev, id); 366 if (error) 367 goto out_probe; 368 } 369 return (0); 370 371 out_probe: 372 free(pdev->bus, M_DEVBUF); 373 linux_pdev_dma_uninit(pdev); 374 out_dma_init: 375 spin_lock(&pci_lock); 376 list_del(&pdev->links); 377 spin_unlock(&pci_lock); 378 put_device(&pdev->dev); 379 return (-error); 380 } 381 382 static int 383 linux_pci_detach(device_t dev) 384 { 385 struct pci_dev *pdev; 386 387 pdev = device_get_softc(dev); 388 389 MPASS(pdev != NULL); 390 391 device_set_desc(dev, NULL); 392 393 return (linux_pci_detach_device(pdev)); 394 } 395 396 int 397 linux_pci_detach_device(struct pci_dev *pdev) 398 { 399 400 linux_set_current(curthread); 401 402 if (pdev->pdrv != NULL) 403 pdev->pdrv->remove(pdev); 404 405 free(pdev->bus, M_DEVBUF); 406 linux_pdev_dma_uninit(pdev); 407 408 spin_lock(&pci_lock); 409 list_del(&pdev->links); 410 spin_unlock(&pci_lock); 411 put_device(&pdev->dev); 412 413 return (0); 414 } 415 416 static int 417 linux_pci_suspend(device_t dev) 418 { 419 const struct dev_pm_ops *pmops; 420 struct pm_message pm = { }; 421 struct pci_dev *pdev; 422 int error; 423 424 error = 0; 425 linux_set_current(curthread); 426 pdev = device_get_softc(dev); 427 pmops = pdev->pdrv->driver.pm; 428 429 if (pdev->pdrv->suspend != NULL) 430 error = -pdev->pdrv->suspend(pdev, pm); 431 else if (pmops != NULL && pmops->suspend != NULL) { 432 error = -pmops->suspend(&pdev->dev); 433 if (error == 0 && pmops->suspend_late != NULL) 434 error = -pmops->suspend_late(&pdev->dev); 435 } 436 return (error); 437 } 438 439 static int 440 linux_pci_resume(device_t dev) 441 { 442 const struct dev_pm_ops *pmops; 443 struct pci_dev *pdev; 444 int error; 445 446 error = 0; 447 linux_set_current(curthread); 448 pdev = device_get_softc(dev); 449 pmops = pdev->pdrv->driver.pm; 450 451 if (pdev->pdrv->resume != NULL) 452 error = -pdev->pdrv->resume(pdev); 453 else if (pmops != NULL && pmops->resume != NULL) { 454 if (pmops->resume_early != NULL) 455 error = -pmops->resume_early(&pdev->dev); 456 if (error == 0 && pmops->resume != NULL) 457 error = -pmops->resume(&pdev->dev); 458 } 459 return (error); 460 } 461 462 static int 463 linux_pci_shutdown(device_t dev) 464 { 465 struct pci_dev *pdev; 466 467 linux_set_current(curthread); 468 pdev = device_get_softc(dev); 469 if (pdev->pdrv->shutdown != NULL) 470 pdev->pdrv->shutdown(pdev); 471 return (0); 472 } 473 474 static int 475 linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config) 476 { 477 struct pci_dev *pdev; 478 int error; 479 480 linux_set_current(curthread); 481 pdev = device_get_softc(dev); 482 if (pdev->pdrv->bsd_iov_init != NULL) 483 error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config); 484 else 485 error = EINVAL; 486 return (error); 487 } 488 489 static void 490 linux_pci_iov_uninit(device_t dev) 491 { 492 struct pci_dev *pdev; 493 494 linux_set_current(curthread); 495 pdev = device_get_softc(dev); 496 if (pdev->pdrv->bsd_iov_uninit != NULL) 497 pdev->pdrv->bsd_iov_uninit(dev); 498 } 499 500 static int 501 linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config) 502 { 503 struct pci_dev *pdev; 504 int error; 505 506 linux_set_current(curthread); 507 pdev = device_get_softc(dev); 508 if (pdev->pdrv->bsd_iov_add_vf != NULL) 509 error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config); 510 else 511 error = EINVAL; 512 return (error); 513 } 514 515 static int 516 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc) 517 { 518 int error; 519 520 linux_set_current(curthread); 521 spin_lock(&pci_lock); 522 list_add(&pdrv->links, &pci_drivers); 523 spin_unlock(&pci_lock); 524 pdrv->bsddriver.name = pdrv->name; 525 pdrv->bsddriver.methods = pci_methods; 526 pdrv->bsddriver.size = sizeof(struct pci_dev); 527 528 mtx_lock(&Giant); 529 error = devclass_add_driver(dc, &pdrv->bsddriver, 530 BUS_PASS_DEFAULT, &pdrv->bsdclass); 531 mtx_unlock(&Giant); 532 return (-error); 533 } 534 535 int 536 linux_pci_register_driver(struct pci_driver *pdrv) 537 { 538 devclass_t dc; 539 540 dc = devclass_find("pci"); 541 if (dc == NULL) 542 return (-ENXIO); 543 pdrv->isdrm = false; 544 return (_linux_pci_register_driver(pdrv, dc)); 545 } 546 547 unsigned long 548 pci_resource_start(struct pci_dev *pdev, int bar) 549 { 550 struct resource_list_entry *rle; 551 rman_res_t newstart; 552 device_t dev; 553 554 if ((rle = linux_pci_get_bar(pdev, bar)) == NULL) 555 return (0); 556 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 557 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 558 if (BUS_TRANSLATE_RESOURCE(dev, rle->type, rle->start, &newstart)) { 559 device_printf(pdev->dev.bsddev, "translate of %#jx failed\n", 560 (uintmax_t)rle->start); 561 return (0); 562 } 563 return (newstart); 564 } 565 566 unsigned long 567 pci_resource_len(struct pci_dev *pdev, int bar) 568 { 569 struct resource_list_entry *rle; 570 571 if ((rle = linux_pci_get_bar(pdev, bar)) == NULL) 572 return (0); 573 return (rle->count); 574 } 575 576 int 577 linux_pci_register_drm_driver(struct pci_driver *pdrv) 578 { 579 devclass_t dc; 580 581 dc = devclass_create("vgapci"); 582 if (dc == NULL) 583 return (-ENXIO); 584 pdrv->isdrm = true; 585 pdrv->name = "drmn"; 586 return (_linux_pci_register_driver(pdrv, dc)); 587 } 588 589 void 590 linux_pci_unregister_driver(struct pci_driver *pdrv) 591 { 592 devclass_t bus; 593 594 bus = devclass_find("pci"); 595 596 spin_lock(&pci_lock); 597 list_del(&pdrv->links); 598 spin_unlock(&pci_lock); 599 mtx_lock(&Giant); 600 if (bus != NULL) 601 devclass_delete_driver(bus, &pdrv->bsddriver); 602 mtx_unlock(&Giant); 603 } 604 605 void 606 linux_pci_unregister_drm_driver(struct pci_driver *pdrv) 607 { 608 devclass_t bus; 609 610 bus = devclass_find("vgapci"); 611 612 spin_lock(&pci_lock); 613 list_del(&pdrv->links); 614 spin_unlock(&pci_lock); 615 mtx_lock(&Giant); 616 if (bus != NULL) 617 devclass_delete_driver(bus, &pdrv->bsddriver); 618 mtx_unlock(&Giant); 619 } 620 621 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); 622 623 struct linux_dma_obj { 624 void *vaddr; 625 uint64_t dma_addr; 626 bus_dmamap_t dmamap; 627 }; 628 629 static uma_zone_t linux_dma_trie_zone; 630 static uma_zone_t linux_dma_obj_zone; 631 632 static void 633 linux_dma_init(void *arg) 634 { 635 636 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", 637 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, 638 UMA_ALIGN_PTR, 0); 639 linux_dma_obj_zone = uma_zcreate("linux_dma_object", 640 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, 641 UMA_ALIGN_PTR, 0); 642 643 } 644 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); 645 646 static void 647 linux_dma_uninit(void *arg) 648 { 649 650 uma_zdestroy(linux_dma_obj_zone); 651 uma_zdestroy(linux_dma_trie_zone); 652 } 653 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); 654 655 static void * 656 linux_dma_trie_alloc(struct pctrie *ptree) 657 { 658 659 return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT)); 660 } 661 662 static void 663 linux_dma_trie_free(struct pctrie *ptree, void *node) 664 { 665 666 uma_zfree(linux_dma_trie_zone, node); 667 } 668 669 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, 670 linux_dma_trie_free); 671 672 void * 673 linux_dma_alloc_coherent(struct device *dev, size_t size, 674 dma_addr_t *dma_handle, gfp_t flag) 675 { 676 struct linux_dma_priv *priv; 677 vm_paddr_t high; 678 size_t align; 679 void *mem; 680 681 if (dev == NULL || dev->dma_priv == NULL) { 682 *dma_handle = 0; 683 return (NULL); 684 } 685 priv = dev->dma_priv; 686 if (priv->dma_mask) 687 high = priv->dma_mask; 688 else if (flag & GFP_DMA32) 689 high = BUS_SPACE_MAXADDR_32BIT; 690 else 691 high = BUS_SPACE_MAXADDR; 692 align = PAGE_SIZE << get_order(size); 693 mem = (void *)kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high, 694 align, 0, VM_MEMATTR_DEFAULT); 695 if (mem != NULL) { 696 *dma_handle = linux_dma_map_phys(dev, vtophys(mem), size); 697 if (*dma_handle == 0) { 698 kmem_free((vm_offset_t)mem, size); 699 mem = NULL; 700 } 701 } else { 702 *dma_handle = 0; 703 } 704 return (mem); 705 } 706 707 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 708 dma_addr_t 709 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 710 { 711 struct linux_dma_priv *priv; 712 struct linux_dma_obj *obj; 713 int error, nseg; 714 bus_dma_segment_t seg; 715 716 priv = dev->dma_priv; 717 718 /* 719 * If the resultant mapping will be entirely 1:1 with the 720 * physical address, short-circuit the remainder of the 721 * bus_dma API. This avoids tracking collisions in the pctrie 722 * with the additional benefit of reducing overhead. 723 */ 724 if (bus_dma_id_mapped(priv->dmat, phys, len)) 725 return (phys); 726 727 obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT); 728 if (obj == NULL) { 729 return (0); 730 } 731 732 DMA_PRIV_LOCK(priv); 733 if (bus_dmamap_create(priv->dmat, 0, &obj->dmamap) != 0) { 734 DMA_PRIV_UNLOCK(priv); 735 uma_zfree(linux_dma_obj_zone, obj); 736 return (0); 737 } 738 739 nseg = -1; 740 if (_bus_dmamap_load_phys(priv->dmat, obj->dmamap, phys, len, 741 BUS_DMA_NOWAIT, &seg, &nseg) != 0) { 742 bus_dmamap_destroy(priv->dmat, obj->dmamap); 743 DMA_PRIV_UNLOCK(priv); 744 uma_zfree(linux_dma_obj_zone, obj); 745 return (0); 746 } 747 748 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 749 obj->dma_addr = seg.ds_addr; 750 751 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); 752 if (error != 0) { 753 bus_dmamap_unload(priv->dmat, obj->dmamap); 754 bus_dmamap_destroy(priv->dmat, obj->dmamap); 755 DMA_PRIV_UNLOCK(priv); 756 uma_zfree(linux_dma_obj_zone, obj); 757 return (0); 758 } 759 DMA_PRIV_UNLOCK(priv); 760 return (obj->dma_addr); 761 } 762 #else 763 dma_addr_t 764 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 765 { 766 return (phys); 767 } 768 #endif 769 770 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 771 void 772 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 773 { 774 struct linux_dma_priv *priv; 775 struct linux_dma_obj *obj; 776 777 priv = dev->dma_priv; 778 779 if (pctrie_is_empty(&priv->ptree)) 780 return; 781 782 DMA_PRIV_LOCK(priv); 783 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 784 if (obj == NULL) { 785 DMA_PRIV_UNLOCK(priv); 786 return; 787 } 788 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); 789 bus_dmamap_unload(priv->dmat, obj->dmamap); 790 bus_dmamap_destroy(priv->dmat, obj->dmamap); 791 DMA_PRIV_UNLOCK(priv); 792 793 uma_zfree(linux_dma_obj_zone, obj); 794 } 795 #else 796 void 797 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 798 { 799 } 800 #endif 801 802 int 803 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, 804 enum dma_data_direction dir, struct dma_attrs *attrs) 805 { 806 struct linux_dma_priv *priv; 807 struct scatterlist *sg; 808 int i, nseg; 809 bus_dma_segment_t seg; 810 811 priv = dev->dma_priv; 812 813 DMA_PRIV_LOCK(priv); 814 815 /* create common DMA map in the first S/G entry */ 816 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) { 817 DMA_PRIV_UNLOCK(priv); 818 return (0); 819 } 820 821 /* load all S/G list entries */ 822 for_each_sg(sgl, sg, nents, i) { 823 nseg = -1; 824 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map, 825 sg_phys(sg), sg->length, BUS_DMA_NOWAIT, 826 &seg, &nseg) != 0) { 827 bus_dmamap_unload(priv->dmat, sgl->dma_map); 828 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 829 DMA_PRIV_UNLOCK(priv); 830 return (0); 831 } 832 KASSERT(nseg == 0, 833 ("More than one segment (nseg=%d)", nseg + 1)); 834 835 sg_dma_address(sg) = seg.ds_addr; 836 } 837 DMA_PRIV_UNLOCK(priv); 838 839 return (nents); 840 } 841 842 void 843 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 844 int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 845 { 846 struct linux_dma_priv *priv; 847 848 priv = dev->dma_priv; 849 850 DMA_PRIV_LOCK(priv); 851 bus_dmamap_unload(priv->dmat, sgl->dma_map); 852 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 853 DMA_PRIV_UNLOCK(priv); 854 } 855 856 struct dma_pool { 857 struct device *pool_device; 858 uma_zone_t pool_zone; 859 struct mtx pool_lock; 860 bus_dma_tag_t pool_dmat; 861 size_t pool_entry_size; 862 struct pctrie pool_ptree; 863 }; 864 865 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) 866 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) 867 868 static inline int 869 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) 870 { 871 struct linux_dma_obj *obj = mem; 872 struct dma_pool *pool = arg; 873 int error, nseg; 874 bus_dma_segment_t seg; 875 876 nseg = -1; 877 DMA_POOL_LOCK(pool); 878 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, 879 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, 880 &seg, &nseg); 881 DMA_POOL_UNLOCK(pool); 882 if (error != 0) { 883 return (error); 884 } 885 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 886 obj->dma_addr = seg.ds_addr; 887 888 return (0); 889 } 890 891 static void 892 dma_pool_obj_dtor(void *mem, int size, void *arg) 893 { 894 struct linux_dma_obj *obj = mem; 895 struct dma_pool *pool = arg; 896 897 DMA_POOL_LOCK(pool); 898 bus_dmamap_unload(pool->pool_dmat, obj->dmamap); 899 DMA_POOL_UNLOCK(pool); 900 } 901 902 static int 903 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, 904 int flags) 905 { 906 struct dma_pool *pool = arg; 907 struct linux_dma_priv *priv; 908 struct linux_dma_obj *obj; 909 int error, i; 910 911 priv = pool->pool_device->dma_priv; 912 for (i = 0; i < count; i++) { 913 obj = uma_zalloc(linux_dma_obj_zone, flags); 914 if (obj == NULL) 915 break; 916 917 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, 918 BUS_DMA_NOWAIT, &obj->dmamap); 919 if (error!= 0) { 920 uma_zfree(linux_dma_obj_zone, obj); 921 break; 922 } 923 924 store[i] = obj; 925 } 926 927 return (i); 928 } 929 930 static void 931 dma_pool_obj_release(void *arg, void **store, int count) 932 { 933 struct dma_pool *pool = arg; 934 struct linux_dma_priv *priv; 935 struct linux_dma_obj *obj; 936 int i; 937 938 priv = pool->pool_device->dma_priv; 939 for (i = 0; i < count; i++) { 940 obj = store[i]; 941 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); 942 uma_zfree(linux_dma_obj_zone, obj); 943 } 944 } 945 946 struct dma_pool * 947 linux_dma_pool_create(char *name, struct device *dev, size_t size, 948 size_t align, size_t boundary) 949 { 950 struct linux_dma_priv *priv; 951 struct dma_pool *pool; 952 953 priv = dev->dma_priv; 954 955 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 956 pool->pool_device = dev; 957 pool->pool_entry_size = size; 958 959 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 960 align, boundary, /* alignment, boundary */ 961 priv->dma_mask, /* lowaddr */ 962 BUS_SPACE_MAXADDR, /* highaddr */ 963 NULL, NULL, /* filtfunc, filtfuncarg */ 964 size, /* maxsize */ 965 1, /* nsegments */ 966 size, /* maxsegsz */ 967 0, /* flags */ 968 NULL, NULL, /* lockfunc, lockfuncarg */ 969 &pool->pool_dmat)) { 970 kfree(pool); 971 return (NULL); 972 } 973 974 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, 975 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, 976 dma_pool_obj_release, pool, 0); 977 978 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF); 979 pctrie_init(&pool->pool_ptree); 980 981 return (pool); 982 } 983 984 void 985 linux_dma_pool_destroy(struct dma_pool *pool) 986 { 987 988 uma_zdestroy(pool->pool_zone); 989 bus_dma_tag_destroy(pool->pool_dmat); 990 mtx_destroy(&pool->pool_lock); 991 kfree(pool); 992 } 993 994 void * 995 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 996 dma_addr_t *handle) 997 { 998 struct linux_dma_obj *obj; 999 1000 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags & GFP_NATIVE_MASK); 1001 if (obj == NULL) 1002 return (NULL); 1003 1004 DMA_POOL_LOCK(pool); 1005 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { 1006 DMA_POOL_UNLOCK(pool); 1007 uma_zfree_arg(pool->pool_zone, obj, pool); 1008 return (NULL); 1009 } 1010 DMA_POOL_UNLOCK(pool); 1011 1012 *handle = obj->dma_addr; 1013 return (obj->vaddr); 1014 } 1015 1016 void 1017 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) 1018 { 1019 struct linux_dma_obj *obj; 1020 1021 DMA_POOL_LOCK(pool); 1022 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); 1023 if (obj == NULL) { 1024 DMA_POOL_UNLOCK(pool); 1025 return; 1026 } 1027 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); 1028 DMA_POOL_UNLOCK(pool); 1029 1030 uma_zfree_arg(pool->pool_zone, obj, pool); 1031 } 1032 1033 static int 1034 linux_backlight_get_status(device_t dev, struct backlight_props *props) 1035 { 1036 struct pci_dev *pdev; 1037 1038 linux_set_current(curthread); 1039 pdev = device_get_softc(dev); 1040 1041 props->brightness = pdev->dev.bd->props.brightness; 1042 props->brightness = props->brightness * 100 / pdev->dev.bd->props.max_brightness; 1043 props->nlevels = 0; 1044 1045 return (0); 1046 } 1047 1048 static int 1049 linux_backlight_get_info(device_t dev, struct backlight_info *info) 1050 { 1051 struct pci_dev *pdev; 1052 1053 linux_set_current(curthread); 1054 pdev = device_get_softc(dev); 1055 1056 info->type = BACKLIGHT_TYPE_PANEL; 1057 strlcpy(info->name, pdev->dev.bd->name, BACKLIGHTMAXNAMELENGTH); 1058 return (0); 1059 } 1060 1061 static int 1062 linux_backlight_update_status(device_t dev, struct backlight_props *props) 1063 { 1064 struct pci_dev *pdev; 1065 1066 linux_set_current(curthread); 1067 pdev = device_get_softc(dev); 1068 1069 pdev->dev.bd->props.brightness = pdev->dev.bd->props.max_brightness * 1070 props->brightness / 100; 1071 return (pdev->dev.bd->ops->update_status(pdev->dev.bd)); 1072 } 1073 1074 struct backlight_device * 1075 linux_backlight_device_register(const char *name, struct device *dev, 1076 void *data, const struct backlight_ops *ops, struct backlight_properties *props) 1077 { 1078 1079 dev->bd = malloc(sizeof(*dev->bd), M_DEVBUF, M_WAITOK | M_ZERO); 1080 dev->bd->ops = ops; 1081 dev->bd->props.type = props->type; 1082 dev->bd->props.max_brightness = props->max_brightness; 1083 dev->bd->props.brightness = props->brightness; 1084 dev->bd->props.power = props->power; 1085 dev->bd->data = data; 1086 dev->bd->dev = dev; 1087 dev->bd->name = strdup(name, M_DEVBUF); 1088 1089 dev->backlight_dev = backlight_register(name, dev->bsddev); 1090 1091 return (dev->bd); 1092 } 1093 1094 void 1095 linux_backlight_device_unregister(struct backlight_device *bd) 1096 { 1097 1098 backlight_destroy(bd->dev->backlight_dev); 1099 free(bd->name, M_DEVBUF); 1100 free(bd, M_DEVBUF); 1101 } 1102