1 /*- 2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. 3 * All rights reserved. 4 * Copyright (c) 2020-2022 The FreeBSD Foundation 5 * 6 * Portions of this software were developed by Björn Zeeb 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice unmodified, this list of conditions, and the following 14 * disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/bus.h> 34 #include <sys/malloc.h> 35 #include <sys/kernel.h> 36 #include <sys/sysctl.h> 37 #include <sys/lock.h> 38 #include <sys/mutex.h> 39 #include <sys/fcntl.h> 40 #include <sys/file.h> 41 #include <sys/filio.h> 42 #include <sys/pciio.h> 43 #include <sys/pctrie.h> 44 #include <sys/rman.h> 45 #include <sys/rwlock.h> 46 #include <sys/stdarg.h> 47 48 #include <vm/vm.h> 49 #include <vm/pmap.h> 50 51 #include <machine/bus.h> 52 #include <machine/resource.h> 53 54 #include <dev/pci/pcivar.h> 55 #include <dev/pci/pci_private.h> 56 #include <dev/pci/pci_iov.h> 57 #include <dev/backlight/backlight.h> 58 59 #include <linux/kernel.h> 60 #include <linux/kobject.h> 61 #include <linux/device.h> 62 #include <linux/slab.h> 63 #include <linux/module.h> 64 #include <linux/cdev.h> 65 #include <linux/file.h> 66 #include <linux/sysfs.h> 67 #include <linux/mm.h> 68 #include <linux/io.h> 69 #include <linux/vmalloc.h> 70 #include <linux/pci.h> 71 #include <linux/compat.h> 72 73 #include <linux/backlight.h> 74 75 #include "backlight_if.h" 76 #include "pcib_if.h" 77 78 /* Undef the linux function macro defined in linux/pci.h */ 79 #undef pci_get_class 80 81 extern int linuxkpi_debug; 82 83 SYSCTL_DECL(_compat_linuxkpi); 84 85 static counter_u64_t lkpi_pci_nseg1_fail; 86 SYSCTL_COUNTER_U64(_compat_linuxkpi, OID_AUTO, lkpi_pci_nseg1_fail, CTLFLAG_RD, 87 &lkpi_pci_nseg1_fail, "Count of busdma mapping failures of single-segment"); 88 89 static device_probe_t linux_pci_probe; 90 static device_attach_t linux_pci_attach; 91 static device_detach_t linux_pci_detach; 92 static device_suspend_t linux_pci_suspend; 93 static device_resume_t linux_pci_resume; 94 static device_shutdown_t linux_pci_shutdown; 95 static pci_iov_init_t linux_pci_iov_init; 96 static pci_iov_uninit_t linux_pci_iov_uninit; 97 static pci_iov_add_vf_t linux_pci_iov_add_vf; 98 static int linux_backlight_get_status(device_t dev, struct backlight_props *props); 99 static int linux_backlight_update_status(device_t dev, struct backlight_props *props); 100 static int linux_backlight_get_info(device_t dev, struct backlight_info *info); 101 static void lkpi_pcim_iomap_table_release(struct device *, void *); 102 103 static device_method_t pci_methods[] = { 104 DEVMETHOD(device_probe, linux_pci_probe), 105 DEVMETHOD(device_attach, linux_pci_attach), 106 DEVMETHOD(device_detach, linux_pci_detach), 107 DEVMETHOD(device_suspend, linux_pci_suspend), 108 DEVMETHOD(device_resume, linux_pci_resume), 109 DEVMETHOD(device_shutdown, linux_pci_shutdown), 110 DEVMETHOD(pci_iov_init, linux_pci_iov_init), 111 DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit), 112 DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf), 113 114 /* backlight interface */ 115 DEVMETHOD(backlight_update_status, linux_backlight_update_status), 116 DEVMETHOD(backlight_get_status, linux_backlight_get_status), 117 DEVMETHOD(backlight_get_info, linux_backlight_get_info), 118 DEVMETHOD_END 119 }; 120 121 const char *pci_power_names[] = { 122 "UNKNOWN", "D0", "D1", "D2", "D3hot", "D3cold" 123 }; 124 125 /* We need some meta-struct to keep track of these for devres. */ 126 struct pci_devres { 127 bool enable_io; 128 /* PCIR_MAX_BAR_0 + 1 = 6 => BIT(0..5). */ 129 uint8_t region_mask; 130 struct resource *region_table[PCIR_MAX_BAR_0 + 1]; /* Not needed. */ 131 }; 132 struct pcim_iomap_devres { 133 void *mmio_table[PCIR_MAX_BAR_0 + 1]; 134 struct resource *res_table[PCIR_MAX_BAR_0 + 1]; 135 }; 136 137 struct linux_dma_priv { 138 uint64_t dma_mask; 139 bus_dma_tag_t dmat; 140 uint64_t dma_coherent_mask; 141 bus_dma_tag_t dmat_coherent; 142 struct mtx lock; 143 struct pctrie ptree; 144 }; 145 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock) 146 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock) 147 148 static int 149 linux_pdev_dma_uninit(struct pci_dev *pdev) 150 { 151 struct linux_dma_priv *priv; 152 153 priv = pdev->dev.dma_priv; 154 if (priv->dmat) 155 bus_dma_tag_destroy(priv->dmat); 156 if (priv->dmat_coherent) 157 bus_dma_tag_destroy(priv->dmat_coherent); 158 mtx_destroy(&priv->lock); 159 pdev->dev.dma_priv = NULL; 160 free(priv, M_DEVBUF); 161 return (0); 162 } 163 164 static int 165 linux_pdev_dma_init(struct pci_dev *pdev) 166 { 167 struct linux_dma_priv *priv; 168 int error; 169 170 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); 171 172 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF); 173 pctrie_init(&priv->ptree); 174 175 pdev->dev.dma_priv = priv; 176 177 /* Create a default DMA tags. */ 178 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64)); 179 if (error != 0) 180 goto err; 181 /* Coherent is lower 32bit only by default in Linux. */ 182 error = linux_dma_tag_init_coherent(&pdev->dev, DMA_BIT_MASK(32)); 183 if (error != 0) 184 goto err; 185 186 return (error); 187 188 err: 189 linux_pdev_dma_uninit(pdev); 190 return (error); 191 } 192 193 int 194 linux_dma_tag_init(struct device *dev, u64 dma_mask) 195 { 196 struct linux_dma_priv *priv; 197 int error; 198 199 priv = dev->dma_priv; 200 201 if (priv->dmat) { 202 if (priv->dma_mask == dma_mask) 203 return (0); 204 205 bus_dma_tag_destroy(priv->dmat); 206 } 207 208 priv->dma_mask = dma_mask; 209 210 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 211 1, 0, /* alignment, boundary */ 212 dma_mask, /* lowaddr */ 213 BUS_SPACE_MAXADDR, /* highaddr */ 214 NULL, NULL, /* filtfunc, filtfuncarg */ 215 BUS_SPACE_MAXSIZE, /* maxsize */ 216 1, /* nsegments */ 217 BUS_SPACE_MAXSIZE, /* maxsegsz */ 218 0, /* flags */ 219 NULL, NULL, /* lockfunc, lockfuncarg */ 220 &priv->dmat); 221 return (-error); 222 } 223 224 int 225 linux_dma_tag_init_coherent(struct device *dev, u64 dma_mask) 226 { 227 struct linux_dma_priv *priv; 228 int error; 229 230 priv = dev->dma_priv; 231 232 if (priv->dmat_coherent) { 233 if (priv->dma_coherent_mask == dma_mask) 234 return (0); 235 236 bus_dma_tag_destroy(priv->dmat_coherent); 237 } 238 239 priv->dma_coherent_mask = dma_mask; 240 241 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 242 1, 0, /* alignment, boundary */ 243 dma_mask, /* lowaddr */ 244 BUS_SPACE_MAXADDR, /* highaddr */ 245 NULL, NULL, /* filtfunc, filtfuncarg */ 246 BUS_SPACE_MAXSIZE, /* maxsize */ 247 1, /* nsegments */ 248 BUS_SPACE_MAXSIZE, /* maxsegsz */ 249 0, /* flags */ 250 NULL, NULL, /* lockfunc, lockfuncarg */ 251 &priv->dmat_coherent); 252 return (-error); 253 } 254 255 static struct pci_driver * 256 linux_pci_find(device_t dev, const struct pci_device_id **idp) 257 { 258 const struct pci_device_id *id; 259 struct pci_driver *pdrv; 260 uint16_t vendor; 261 uint16_t device; 262 uint16_t subvendor; 263 uint16_t subdevice; 264 265 vendor = pci_get_vendor(dev); 266 device = pci_get_device(dev); 267 subvendor = pci_get_subvendor(dev); 268 subdevice = pci_get_subdevice(dev); 269 270 spin_lock(&pci_lock); 271 list_for_each_entry(pdrv, &pci_drivers, node) { 272 for (id = pdrv->id_table; id->vendor != 0; id++) { 273 if (vendor == id->vendor && 274 (PCI_ANY_ID == id->device || device == id->device) && 275 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) && 276 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) { 277 *idp = id; 278 spin_unlock(&pci_lock); 279 return (pdrv); 280 } 281 } 282 } 283 spin_unlock(&pci_lock); 284 return (NULL); 285 } 286 287 struct pci_dev * 288 lkpi_pci_get_device(uint16_t vendor, uint16_t device, struct pci_dev *odev) 289 { 290 struct pci_dev *pdev, *found; 291 292 KASSERT(odev == NULL, ("%s: odev argument not yet supported\n", __func__)); 293 294 found = NULL; 295 spin_lock(&pci_lock); 296 list_for_each_entry(pdev, &pci_devices, links) { 297 if (pdev->vendor == vendor && pdev->device == device) { 298 found = pdev; 299 break; 300 } 301 } 302 pci_dev_get(found); 303 spin_unlock(&pci_lock); 304 305 return (found); 306 } 307 308 static void 309 lkpi_pci_dev_release(struct device *dev) 310 { 311 312 lkpi_devres_release_free_list(dev); 313 spin_lock_destroy(&dev->devres_lock); 314 } 315 316 static int 317 lkpifill_pci_dev(device_t dev, struct pci_dev *pdev) 318 { 319 int error; 320 321 error = kobject_init_and_add(&pdev->dev.kobj, &linux_dev_ktype, 322 &linux_root_device.kobj, device_get_nameunit(dev)); 323 if (error != 0) { 324 printf("%s:%d: kobject_init_and_add returned %d\n", 325 __func__, __LINE__, error); 326 return (error); 327 } 328 329 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); 330 pdev->vendor = pci_get_vendor(dev); 331 pdev->device = pci_get_device(dev); 332 pdev->subsystem_vendor = pci_get_subvendor(dev); 333 pdev->subsystem_device = pci_get_subdevice(dev); 334 pdev->class = pci_get_class(dev); 335 pdev->revision = pci_get_revid(dev); 336 pdev->path_name = kasprintf(GFP_KERNEL, "%04d:%02d:%02d.%d", 337 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), 338 pci_get_function(dev)); 339 pdev->bus = malloc(sizeof(*pdev->bus), M_DEVBUF, M_WAITOK | M_ZERO); 340 /* 341 * This should be the upstream bridge; pci_upstream_bridge() 342 * handles that case on demand as otherwise we'll shadow the 343 * entire PCI hierarchy. 344 */ 345 pdev->bus->self = pdev; 346 pdev->bus->number = pci_get_bus(dev); 347 pdev->bus->domain = pci_get_domain(dev); 348 pdev->dev.bsddev = dev; 349 pdev->dev.parent = &linux_root_device; 350 pdev->dev.release = lkpi_pci_dev_release; 351 INIT_LIST_HEAD(&pdev->dev.irqents); 352 353 if (pci_msi_count(dev) > 0) 354 pdev->msi_desc = malloc(pci_msi_count(dev) * 355 sizeof(*pdev->msi_desc), M_DEVBUF, M_WAITOK | M_ZERO); 356 357 spin_lock_init(&pdev->dev.devres_lock); 358 INIT_LIST_HEAD(&pdev->dev.devres_head); 359 360 return (0); 361 } 362 363 static void 364 lkpinew_pci_dev_release(struct device *dev) 365 { 366 struct pci_dev *pdev; 367 int i; 368 369 pdev = to_pci_dev(dev); 370 if (pdev->root != NULL) 371 pci_dev_put(pdev->root); 372 if (pdev->bus->self != pdev) 373 pci_dev_put(pdev->bus->self); 374 free(pdev->bus, M_DEVBUF); 375 if (pdev->msi_desc != NULL) { 376 for (i = pci_msi_count(pdev->dev.bsddev) - 1; i >= 0; i--) 377 free(pdev->msi_desc[i], M_DEVBUF); 378 free(pdev->msi_desc, M_DEVBUF); 379 } 380 kfree(pdev->path_name); 381 free(pdev, M_DEVBUF); 382 } 383 384 struct pci_dev * 385 lkpinew_pci_dev(device_t dev) 386 { 387 struct pci_dev *pdev; 388 int error; 389 390 pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK|M_ZERO); 391 error = lkpifill_pci_dev(dev, pdev); 392 if (error != 0) { 393 free(pdev, M_DEVBUF); 394 return (NULL); 395 } 396 pdev->dev.release = lkpinew_pci_dev_release; 397 398 return (pdev); 399 } 400 401 struct pci_dev * 402 lkpi_pci_get_class(unsigned int class, struct pci_dev *from) 403 { 404 device_t dev; 405 device_t devfrom = NULL; 406 struct pci_dev *pdev; 407 408 if (from != NULL) 409 devfrom = from->dev.bsddev; 410 411 dev = pci_find_class_from(class >> 16, (class >> 8) & 0xFF, devfrom); 412 if (dev == NULL) 413 return (NULL); 414 415 pdev = lkpinew_pci_dev(dev); 416 return (pdev); 417 } 418 419 struct pci_dev * 420 lkpi_pci_get_base_class(unsigned int baseclass, struct pci_dev *from) 421 { 422 device_t dev; 423 device_t devfrom = NULL; 424 struct pci_dev *pdev; 425 426 if (from != NULL) 427 devfrom = from->dev.bsddev; 428 429 dev = pci_find_base_class_from(baseclass, devfrom); 430 if (dev == NULL) 431 return (NULL); 432 433 pdev = lkpinew_pci_dev(dev); 434 return (pdev); 435 } 436 437 struct pci_dev * 438 lkpi_pci_get_domain_bus_and_slot(int domain, unsigned int bus, 439 unsigned int devfn) 440 { 441 device_t dev; 442 struct pci_dev *pdev; 443 444 dev = pci_find_dbsf(domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 445 if (dev == NULL) 446 return (NULL); 447 448 pdev = lkpinew_pci_dev(dev); 449 return (pdev); 450 } 451 452 static int 453 linux_pci_probe(device_t dev) 454 { 455 const struct pci_device_id *id; 456 struct pci_driver *pdrv; 457 458 if ((pdrv = linux_pci_find(dev, &id)) == NULL) 459 return (ENXIO); 460 if (device_get_driver(dev) != &pdrv->bsddriver) 461 return (ENXIO); 462 device_set_desc(dev, pdrv->name); 463 464 /* Assume BSS initialized (should never return BUS_PROBE_SPECIFIC). */ 465 if (pdrv->bsd_probe_return == 0) 466 return (BUS_PROBE_DEFAULT); 467 else 468 return (pdrv->bsd_probe_return); 469 } 470 471 static int 472 linux_pci_attach(device_t dev) 473 { 474 const struct pci_device_id *id; 475 struct pci_driver *pdrv; 476 struct pci_dev *pdev; 477 478 pdrv = linux_pci_find(dev, &id); 479 pdev = device_get_softc(dev); 480 481 MPASS(pdrv != NULL); 482 MPASS(pdev != NULL); 483 484 return (linux_pci_attach_device(dev, pdrv, id, pdev)); 485 } 486 487 static struct resource_list_entry * 488 linux_pci_reserve_bar(struct pci_dev *pdev, struct resource_list *rl, 489 int type, int rid) 490 { 491 device_t dev; 492 struct resource *res; 493 494 KASSERT(type == SYS_RES_IOPORT || type == SYS_RES_MEMORY, 495 ("trying to reserve non-BAR type %d", type)); 496 497 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 498 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 499 res = pci_reserve_map(device_get_parent(dev), dev, type, &rid, 0, ~0, 500 1, 1, 0); 501 if (res == NULL) 502 return (NULL); 503 return (resource_list_find(rl, type, rid)); 504 } 505 506 static struct resource_list_entry * 507 linux_pci_get_rle(struct pci_dev *pdev, int type, int rid, bool reserve_bar) 508 { 509 struct pci_devinfo *dinfo; 510 struct resource_list *rl; 511 struct resource_list_entry *rle; 512 513 dinfo = device_get_ivars(pdev->dev.bsddev); 514 rl = &dinfo->resources; 515 rle = resource_list_find(rl, type, rid); 516 /* Reserve resources for this BAR if needed. */ 517 if (rle == NULL && reserve_bar) 518 rle = linux_pci_reserve_bar(pdev, rl, type, rid); 519 return (rle); 520 } 521 522 int 523 linux_pci_attach_device(device_t dev, struct pci_driver *pdrv, 524 const struct pci_device_id *id, struct pci_dev *pdev) 525 { 526 struct resource_list_entry *rle; 527 device_t parent; 528 uintptr_t rid; 529 int error; 530 bool isdrm; 531 532 linux_set_current(curthread); 533 534 parent = device_get_parent(dev); 535 isdrm = pdrv != NULL && pdrv->isdrm; 536 537 if (isdrm) { 538 struct pci_devinfo *dinfo; 539 540 dinfo = device_get_ivars(parent); 541 device_set_ivars(dev, dinfo); 542 } 543 544 error = lkpifill_pci_dev(dev, pdev); 545 if (error != 0) 546 return (error); 547 548 if (isdrm) 549 PCI_GET_ID(device_get_parent(parent), parent, PCI_ID_RID, &rid); 550 else 551 PCI_GET_ID(parent, dev, PCI_ID_RID, &rid); 552 pdev->devfn = rid; 553 pdev->pdrv = pdrv; 554 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0, false); 555 if (rle != NULL) 556 pdev->dev.irq = rle->start; 557 else 558 pdev->dev.irq = LINUX_IRQ_INVALID; 559 pdev->irq = pdev->dev.irq; 560 error = linux_pdev_dma_init(pdev); 561 if (error) 562 goto out_dma_init; 563 564 TAILQ_INIT(&pdev->mmio); 565 spin_lock_init(&pdev->pcie_cap_lock); 566 567 spin_lock(&pci_lock); 568 list_add(&pdev->links, &pci_devices); 569 spin_unlock(&pci_lock); 570 571 if (pdrv != NULL) { 572 error = pdrv->probe(pdev, id); 573 if (error) 574 goto out_probe; 575 } 576 return (0); 577 578 out_probe: 579 free(pdev->bus, M_DEVBUF); 580 spin_lock_destroy(&pdev->pcie_cap_lock); 581 linux_pdev_dma_uninit(pdev); 582 out_dma_init: 583 spin_lock(&pci_lock); 584 list_del(&pdev->links); 585 spin_unlock(&pci_lock); 586 put_device(&pdev->dev); 587 return (-error); 588 } 589 590 static int 591 linux_pci_detach(device_t dev) 592 { 593 struct pci_dev *pdev; 594 595 pdev = device_get_softc(dev); 596 597 MPASS(pdev != NULL); 598 599 device_set_desc(dev, NULL); 600 601 return (linux_pci_detach_device(pdev)); 602 } 603 604 int 605 linux_pci_detach_device(struct pci_dev *pdev) 606 { 607 608 linux_set_current(curthread); 609 610 if (pdev->pdrv != NULL) 611 pdev->pdrv->remove(pdev); 612 613 if (pdev->root != NULL) 614 pci_dev_put(pdev->root); 615 free(pdev->bus, M_DEVBUF); 616 linux_pdev_dma_uninit(pdev); 617 618 spin_lock(&pci_lock); 619 list_del(&pdev->links); 620 spin_unlock(&pci_lock); 621 spin_lock_destroy(&pdev->pcie_cap_lock); 622 put_device(&pdev->dev); 623 624 return (0); 625 } 626 627 static int 628 lkpi_pci_disable_dev(struct device *dev) 629 { 630 631 (void) pci_disable_io(dev->bsddev, SYS_RES_MEMORY); 632 (void) pci_disable_io(dev->bsddev, SYS_RES_IOPORT); 633 return (0); 634 } 635 636 static struct pci_devres * 637 lkpi_pci_devres_get_alloc(struct pci_dev *pdev) 638 { 639 struct pci_devres *dr; 640 641 dr = lkpi_devres_find(&pdev->dev, lkpi_pci_devres_release, NULL, NULL); 642 if (dr == NULL) { 643 dr = lkpi_devres_alloc(lkpi_pci_devres_release, sizeof(*dr), 644 GFP_KERNEL | __GFP_ZERO); 645 if (dr != NULL) 646 lkpi_devres_add(&pdev->dev, dr); 647 } 648 649 return (dr); 650 } 651 652 static struct pci_devres * 653 lkpi_pci_devres_find(struct pci_dev *pdev) 654 { 655 if (!pdev->managed) 656 return (NULL); 657 658 return (lkpi_pci_devres_get_alloc(pdev)); 659 } 660 661 void 662 lkpi_pci_devres_release(struct device *dev, void *p) 663 { 664 struct pci_devres *dr; 665 struct pci_dev *pdev; 666 int bar; 667 668 pdev = to_pci_dev(dev); 669 dr = p; 670 671 if (pdev->msix_enabled) 672 lkpi_pci_disable_msix(pdev); 673 if (pdev->msi_enabled) 674 lkpi_pci_disable_msi(pdev); 675 676 if (dr->enable_io && lkpi_pci_disable_dev(dev) == 0) 677 dr->enable_io = false; 678 679 if (dr->region_mask == 0) 680 return; 681 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 682 683 if ((dr->region_mask & (1 << bar)) == 0) 684 continue; 685 pci_release_region(pdev, bar); 686 } 687 } 688 689 int 690 linuxkpi_pcim_enable_device(struct pci_dev *pdev) 691 { 692 struct pci_devres *dr; 693 int error; 694 695 /* Here we cannot run through the pdev->managed check. */ 696 dr = lkpi_pci_devres_get_alloc(pdev); 697 if (dr == NULL) 698 return (-ENOMEM); 699 700 /* If resources were enabled before do not do it again. */ 701 if (dr->enable_io) 702 return (0); 703 704 error = pci_enable_device(pdev); 705 if (error == 0) 706 dr->enable_io = true; 707 708 /* This device is not managed. */ 709 pdev->managed = true; 710 711 return (error); 712 } 713 714 static struct pcim_iomap_devres * 715 lkpi_pcim_iomap_devres_find(struct pci_dev *pdev) 716 { 717 struct pcim_iomap_devres *dr; 718 719 dr = lkpi_devres_find(&pdev->dev, lkpi_pcim_iomap_table_release, 720 NULL, NULL); 721 if (dr == NULL) { 722 dr = lkpi_devres_alloc(lkpi_pcim_iomap_table_release, 723 sizeof(*dr), GFP_KERNEL | __GFP_ZERO); 724 if (dr != NULL) 725 lkpi_devres_add(&pdev->dev, dr); 726 } 727 728 if (dr == NULL) 729 device_printf(pdev->dev.bsddev, "%s: NULL\n", __func__); 730 731 return (dr); 732 } 733 734 void __iomem ** 735 linuxkpi_pcim_iomap_table(struct pci_dev *pdev) 736 { 737 struct pcim_iomap_devres *dr; 738 739 dr = lkpi_pcim_iomap_devres_find(pdev); 740 if (dr == NULL) 741 return (NULL); 742 743 /* 744 * If the driver has manually set a flag to be able to request the 745 * resource to use bus_read/write_<n>, return the shadow table. 746 */ 747 if (pdev->want_iomap_res) 748 return ((void **)dr->res_table); 749 750 /* This is the Linux default. */ 751 return (dr->mmio_table); 752 } 753 754 static struct resource * 755 _lkpi_pci_iomap(struct pci_dev *pdev, int bar, int mmio_size __unused) 756 { 757 struct pci_mmio_region *mmio, *p; 758 int type; 759 760 type = pci_resource_type(pdev, bar); 761 if (type < 0) { 762 device_printf(pdev->dev.bsddev, "%s: bar %d type %d\n", 763 __func__, bar, type); 764 return (NULL); 765 } 766 767 /* 768 * Check for duplicate mappings. 769 * This can happen if a driver calls pci_request_region() first. 770 */ 771 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) { 772 if (mmio->type == type && mmio->rid == PCIR_BAR(bar)) { 773 return (mmio->res); 774 } 775 } 776 777 mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO); 778 mmio->rid = PCIR_BAR(bar); 779 mmio->type = type; 780 mmio->res = bus_alloc_resource_any(pdev->dev.bsddev, mmio->type, 781 &mmio->rid, RF_ACTIVE|RF_SHAREABLE); 782 if (mmio->res == NULL) { 783 device_printf(pdev->dev.bsddev, "%s: failed to alloc " 784 "bar %d type %d rid %d\n", 785 __func__, bar, type, PCIR_BAR(bar)); 786 free(mmio, M_DEVBUF); 787 return (NULL); 788 } 789 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next); 790 791 return (mmio->res); 792 } 793 794 void * 795 linuxkpi_pci_iomap_range(struct pci_dev *pdev, int mmio_bar, 796 unsigned long mmio_off, unsigned long mmio_size) 797 { 798 struct resource *res; 799 800 res = _lkpi_pci_iomap(pdev, mmio_bar, mmio_size); 801 if (res == NULL) 802 return (NULL); 803 /* This is a FreeBSD extension so we can use bus_*(). */ 804 if (pdev->want_iomap_res) 805 return (res); 806 MPASS(mmio_off < rman_get_size(res)); 807 return ((void *)(rman_get_bushandle(res) + mmio_off)); 808 } 809 810 void * 811 linuxkpi_pci_iomap(struct pci_dev *pdev, int mmio_bar, int mmio_size) 812 { 813 return (linuxkpi_pci_iomap_range(pdev, mmio_bar, 0, mmio_size)); 814 } 815 816 void 817 linuxkpi_pci_iounmap(struct pci_dev *pdev, void *res) 818 { 819 struct pci_mmio_region *mmio, *p; 820 bus_space_handle_t bh = (bus_space_handle_t)res; 821 822 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) { 823 if (pdev->want_iomap_res) { 824 if (res != mmio->res) 825 continue; 826 } else { 827 if (bh < rman_get_bushandle(mmio->res) || 828 bh >= rman_get_bushandle(mmio->res) + 829 rman_get_size(mmio->res)) 830 continue; 831 } 832 bus_release_resource(pdev->dev.bsddev, 833 mmio->type, mmio->rid, mmio->res); 834 TAILQ_REMOVE(&pdev->mmio, mmio, next); 835 free(mmio, M_DEVBUF); 836 return; 837 } 838 } 839 840 int 841 linuxkpi_pcim_iomap_regions(struct pci_dev *pdev, uint32_t mask, const char *name) 842 { 843 struct pcim_iomap_devres *dr; 844 void *res; 845 uint32_t mappings; 846 int bar; 847 848 dr = lkpi_pcim_iomap_devres_find(pdev); 849 if (dr == NULL) 850 return (-ENOMEM); 851 852 /* Now iomap all the requested (by "mask") ones. */ 853 for (bar = mappings = 0; mappings != mask; bar++) { 854 if ((mask & (1 << bar)) == 0) 855 continue; 856 857 /* Request double is not allowed. */ 858 if (dr->mmio_table[bar] != NULL) { 859 device_printf(pdev->dev.bsddev, "%s: bar %d %p\n", 860 __func__, bar, dr->mmio_table[bar]); 861 goto err; 862 } 863 864 res = _lkpi_pci_iomap(pdev, bar, 0); 865 if (res == NULL) 866 goto err; 867 dr->mmio_table[bar] = (void *)rman_get_bushandle(res); 868 dr->res_table[bar] = res; 869 870 mappings |= (1 << bar); 871 } 872 873 return (0); 874 err: 875 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 876 if ((mappings & (1 << bar)) != 0) { 877 res = dr->mmio_table[bar]; 878 if (res == NULL) 879 continue; 880 pci_iounmap(pdev, res); 881 } 882 } 883 884 return (-EINVAL); 885 } 886 887 static void 888 lkpi_pcim_iomap_table_release(struct device *dev, void *p) 889 { 890 struct pcim_iomap_devres *dr; 891 struct pci_dev *pdev; 892 int bar; 893 894 dr = p; 895 pdev = to_pci_dev(dev); 896 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 897 898 if (dr->mmio_table[bar] == NULL) 899 continue; 900 901 pci_iounmap(pdev, dr->mmio_table[bar]); 902 } 903 } 904 905 static int 906 linux_pci_suspend(device_t dev) 907 { 908 const struct dev_pm_ops *pmops; 909 struct pm_message pm = { }; 910 struct pci_dev *pdev; 911 int error; 912 913 error = 0; 914 linux_set_current(curthread); 915 pdev = device_get_softc(dev); 916 pmops = pdev->pdrv->driver.pm; 917 918 if (pdev->pdrv->suspend != NULL) 919 error = -pdev->pdrv->suspend(pdev, pm); 920 else if (pmops != NULL && pmops->suspend != NULL) { 921 error = -pmops->suspend(&pdev->dev); 922 if (error == 0 && pmops->suspend_late != NULL) 923 error = -pmops->suspend_late(&pdev->dev); 924 if (error == 0 && pmops->suspend_noirq != NULL) 925 error = -pmops->suspend_noirq(&pdev->dev); 926 } 927 return (error); 928 } 929 930 static int 931 linux_pci_resume(device_t dev) 932 { 933 const struct dev_pm_ops *pmops; 934 struct pci_dev *pdev; 935 int error; 936 937 error = 0; 938 linux_set_current(curthread); 939 pdev = device_get_softc(dev); 940 pmops = pdev->pdrv->driver.pm; 941 942 if (pdev->pdrv->resume != NULL) 943 error = -pdev->pdrv->resume(pdev); 944 else if (pmops != NULL && pmops->resume != NULL) { 945 if (pmops->resume_early != NULL) 946 error = -pmops->resume_early(&pdev->dev); 947 if (error == 0 && pmops->resume != NULL) 948 error = -pmops->resume(&pdev->dev); 949 } 950 return (error); 951 } 952 953 static int 954 linux_pci_shutdown(device_t dev) 955 { 956 struct pci_dev *pdev; 957 958 linux_set_current(curthread); 959 pdev = device_get_softc(dev); 960 if (pdev->pdrv->shutdown != NULL) 961 pdev->pdrv->shutdown(pdev); 962 return (0); 963 } 964 965 static int 966 linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config) 967 { 968 struct pci_dev *pdev; 969 int error; 970 971 linux_set_current(curthread); 972 pdev = device_get_softc(dev); 973 if (pdev->pdrv->bsd_iov_init != NULL) 974 error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config); 975 else 976 error = EINVAL; 977 return (error); 978 } 979 980 static void 981 linux_pci_iov_uninit(device_t dev) 982 { 983 struct pci_dev *pdev; 984 985 linux_set_current(curthread); 986 pdev = device_get_softc(dev); 987 if (pdev->pdrv->bsd_iov_uninit != NULL) 988 pdev->pdrv->bsd_iov_uninit(dev); 989 } 990 991 static int 992 linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config) 993 { 994 struct pci_dev *pdev; 995 int error; 996 997 linux_set_current(curthread); 998 pdev = device_get_softc(dev); 999 if (pdev->pdrv->bsd_iov_add_vf != NULL) 1000 error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config); 1001 else 1002 error = EINVAL; 1003 return (error); 1004 } 1005 1006 static int 1007 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc) 1008 { 1009 int error; 1010 1011 linux_set_current(curthread); 1012 spin_lock(&pci_lock); 1013 list_add(&pdrv->node, &pci_drivers); 1014 spin_unlock(&pci_lock); 1015 if (pdrv->bsddriver.name == NULL) 1016 pdrv->bsddriver.name = pdrv->name; 1017 pdrv->bsddriver.methods = pci_methods; 1018 pdrv->bsddriver.size = sizeof(struct pci_dev); 1019 1020 bus_topo_lock(); 1021 error = devclass_add_driver(dc, &pdrv->bsddriver, 1022 BUS_PASS_DEFAULT, &pdrv->bsdclass); 1023 bus_topo_unlock(); 1024 return (-error); 1025 } 1026 1027 int 1028 linux_pci_register_driver(struct pci_driver *pdrv) 1029 { 1030 devclass_t dc; 1031 1032 pdrv->isdrm = strcmp(pdrv->name, "drmn") == 0; 1033 dc = pdrv->isdrm ? devclass_create("vgapci") : devclass_find("pci"); 1034 if (dc == NULL) 1035 return (-ENXIO); 1036 return (_linux_pci_register_driver(pdrv, dc)); 1037 } 1038 1039 static struct resource_list_entry * 1040 lkpi_pci_get_bar(struct pci_dev *pdev, int bar, bool reserve) 1041 { 1042 int type; 1043 1044 type = pci_resource_type(pdev, bar); 1045 if (type < 0) 1046 return (NULL); 1047 bar = PCIR_BAR(bar); 1048 return (linux_pci_get_rle(pdev, type, bar, reserve)); 1049 } 1050 1051 struct device * 1052 lkpi_pci_find_irq_dev(unsigned int irq) 1053 { 1054 struct pci_dev *pdev; 1055 struct device *found; 1056 1057 found = NULL; 1058 spin_lock(&pci_lock); 1059 list_for_each_entry(pdev, &pci_devices, links) { 1060 if (irq == pdev->dev.irq || 1061 (irq >= pdev->dev.irq_start && irq < pdev->dev.irq_end)) { 1062 found = &pdev->dev; 1063 break; 1064 } 1065 } 1066 spin_unlock(&pci_lock); 1067 return (found); 1068 } 1069 1070 unsigned long 1071 pci_resource_start(struct pci_dev *pdev, int bar) 1072 { 1073 struct resource_list_entry *rle; 1074 rman_res_t newstart; 1075 device_t dev; 1076 int error; 1077 1078 if ((rle = lkpi_pci_get_bar(pdev, bar, true)) == NULL) 1079 return (0); 1080 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 1081 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 1082 error = bus_translate_resource(dev, rle->type, rle->start, &newstart); 1083 if (error != 0) { 1084 device_printf(pdev->dev.bsddev, 1085 "translate of %#jx failed: %d\n", 1086 (uintmax_t)rle->start, error); 1087 return (0); 1088 } 1089 return (newstart); 1090 } 1091 1092 unsigned long 1093 pci_resource_len(struct pci_dev *pdev, int bar) 1094 { 1095 struct resource_list_entry *rle; 1096 1097 if ((rle = lkpi_pci_get_bar(pdev, bar, true)) == NULL) 1098 return (0); 1099 return (rle->count); 1100 } 1101 1102 int 1103 pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 1104 { 1105 struct resource *res; 1106 struct pci_devres *dr; 1107 struct pci_mmio_region *mmio; 1108 int rid; 1109 int type; 1110 1111 type = pci_resource_type(pdev, bar); 1112 if (type < 0) 1113 return (-ENODEV); 1114 rid = PCIR_BAR(bar); 1115 res = bus_alloc_resource_any(pdev->dev.bsddev, type, &rid, 1116 RF_ACTIVE|RF_SHAREABLE); 1117 if (res == NULL) { 1118 device_printf(pdev->dev.bsddev, "%s: failed to alloc " 1119 "bar %d type %d rid %d\n", 1120 __func__, bar, type, PCIR_BAR(bar)); 1121 return (-ENODEV); 1122 } 1123 1124 /* 1125 * It seems there is an implicit devres tracking on these if the device 1126 * is managed; otherwise the resources are not automatiaclly freed on 1127 * FreeBSD/LinuxKPI tough they should be/are expected to be by Linux 1128 * drivers. 1129 */ 1130 dr = lkpi_pci_devres_find(pdev); 1131 if (dr != NULL) { 1132 dr->region_mask |= (1 << bar); 1133 dr->region_table[bar] = res; 1134 } 1135 1136 /* Even if the device is not managed we need to track it for iomap. */ 1137 mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO); 1138 mmio->rid = PCIR_BAR(bar); 1139 mmio->type = type; 1140 mmio->res = res; 1141 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next); 1142 1143 return (0); 1144 } 1145 1146 int 1147 linuxkpi_pci_request_regions(struct pci_dev *pdev, const char *res_name) 1148 { 1149 int error; 1150 int i; 1151 1152 for (i = 0; i <= PCIR_MAX_BAR_0; i++) { 1153 error = pci_request_region(pdev, i, res_name); 1154 if (error && error != -ENODEV) { 1155 pci_release_regions(pdev); 1156 return (error); 1157 } 1158 } 1159 return (0); 1160 } 1161 1162 void 1163 linuxkpi_pci_release_region(struct pci_dev *pdev, int bar) 1164 { 1165 struct resource_list_entry *rle; 1166 struct pci_devres *dr; 1167 struct pci_mmio_region *mmio, *p; 1168 1169 if ((rle = lkpi_pci_get_bar(pdev, bar, false)) == NULL) 1170 return; 1171 1172 /* 1173 * As we implicitly track the requests we also need to clear them on 1174 * release. Do clear before resource release. 1175 */ 1176 dr = lkpi_pci_devres_find(pdev); 1177 if (dr != NULL) { 1178 KASSERT(dr->region_table[bar] == rle->res, ("%s: pdev %p bar %d" 1179 " region_table res %p != rel->res %p\n", __func__, pdev, 1180 bar, dr->region_table[bar], rle->res)); 1181 dr->region_table[bar] = NULL; 1182 dr->region_mask &= ~(1 << bar); 1183 } 1184 1185 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) { 1186 if (rle->res != (void *)rman_get_bushandle(mmio->res)) 1187 continue; 1188 TAILQ_REMOVE(&pdev->mmio, mmio, next); 1189 free(mmio, M_DEVBUF); 1190 } 1191 1192 bus_release_resource(pdev->dev.bsddev, rle->type, rle->rid, rle->res); 1193 } 1194 1195 void 1196 linuxkpi_pci_release_regions(struct pci_dev *pdev) 1197 { 1198 int i; 1199 1200 for (i = 0; i <= PCIR_MAX_BAR_0; i++) 1201 pci_release_region(pdev, i); 1202 } 1203 1204 int 1205 linux_pci_register_drm_driver(struct pci_driver *pdrv) 1206 { 1207 devclass_t dc; 1208 1209 dc = devclass_create("vgapci"); 1210 if (dc == NULL) 1211 return (-ENXIO); 1212 pdrv->isdrm = true; 1213 pdrv->name = "drmn"; 1214 return (_linux_pci_register_driver(pdrv, dc)); 1215 } 1216 1217 void 1218 linux_pci_unregister_driver(struct pci_driver *pdrv) 1219 { 1220 devclass_t bus; 1221 1222 bus = devclass_find(pdrv->isdrm ? "vgapci" : "pci"); 1223 1224 spin_lock(&pci_lock); 1225 list_del(&pdrv->node); 1226 spin_unlock(&pci_lock); 1227 bus_topo_lock(); 1228 if (bus != NULL) 1229 devclass_delete_driver(bus, &pdrv->bsddriver); 1230 bus_topo_unlock(); 1231 } 1232 1233 void 1234 linux_pci_unregister_drm_driver(struct pci_driver *pdrv) 1235 { 1236 devclass_t bus; 1237 1238 bus = devclass_find("vgapci"); 1239 1240 spin_lock(&pci_lock); 1241 list_del(&pdrv->node); 1242 spin_unlock(&pci_lock); 1243 bus_topo_lock(); 1244 if (bus != NULL) 1245 devclass_delete_driver(bus, &pdrv->bsddriver); 1246 bus_topo_unlock(); 1247 } 1248 1249 int 1250 linuxkpi_pci_enable_msix(struct pci_dev *pdev, struct msix_entry *entries, 1251 int nreq) 1252 { 1253 struct resource_list_entry *rle; 1254 int error; 1255 int avail; 1256 int i; 1257 1258 avail = pci_msix_count(pdev->dev.bsddev); 1259 if (avail < nreq) { 1260 if (avail == 0) 1261 return -EINVAL; 1262 return avail; 1263 } 1264 avail = nreq; 1265 if ((error = -pci_alloc_msix(pdev->dev.bsddev, &avail)) != 0) 1266 return error; 1267 /* 1268 * Handle case where "pci_alloc_msix()" may allocate less 1269 * interrupts than available and return with no error: 1270 */ 1271 if (avail < nreq) { 1272 pci_release_msi(pdev->dev.bsddev); 1273 return avail; 1274 } 1275 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false); 1276 pdev->dev.irq_start = rle->start; 1277 pdev->dev.irq_end = rle->start + avail; 1278 for (i = 0; i < nreq; i++) 1279 entries[i].vector = pdev->dev.irq_start + i; 1280 pdev->msix_enabled = true; 1281 return (0); 1282 } 1283 1284 int 1285 _lkpi_pci_enable_msi_range(struct pci_dev *pdev, int minvec, int maxvec) 1286 { 1287 struct resource_list_entry *rle; 1288 int error; 1289 int nvec; 1290 1291 if (maxvec < minvec) 1292 return (-EINVAL); 1293 1294 nvec = pci_msi_count(pdev->dev.bsddev); 1295 if (nvec < 1 || nvec < minvec) 1296 return (-ENOSPC); 1297 1298 nvec = min(nvec, maxvec); 1299 if ((error = -pci_alloc_msi(pdev->dev.bsddev, &nvec)) != 0) 1300 return error; 1301 1302 /* Native PCI might only ever ask for 32 vectors. */ 1303 if (nvec < minvec) { 1304 pci_release_msi(pdev->dev.bsddev); 1305 return (-ENOSPC); 1306 } 1307 1308 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false); 1309 pdev->dev.irq_start = rle->start; 1310 pdev->dev.irq_end = rle->start + nvec; 1311 pdev->irq = rle->start; 1312 pdev->msi_enabled = true; 1313 return (0); 1314 } 1315 1316 int 1317 pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv, 1318 unsigned int flags) 1319 { 1320 int error; 1321 1322 if (flags & PCI_IRQ_MSIX) { 1323 struct msix_entry *entries; 1324 int i; 1325 1326 entries = kcalloc(maxv, sizeof(*entries), GFP_KERNEL); 1327 if (entries == NULL) { 1328 error = -ENOMEM; 1329 goto out; 1330 } 1331 for (i = 0; i < maxv; ++i) 1332 entries[i].entry = i; 1333 error = pci_enable_msix(pdev, entries, maxv); 1334 out: 1335 kfree(entries); 1336 if (error == 0 && pdev->msix_enabled) 1337 return (pdev->dev.irq_end - pdev->dev.irq_start); 1338 } 1339 if (flags & PCI_IRQ_MSI) { 1340 if (pci_msi_count(pdev->dev.bsddev) < minv) 1341 return (-ENOSPC); 1342 error = _lkpi_pci_enable_msi_range(pdev, minv, maxv); 1343 if (error == 0 && pdev->msi_enabled) 1344 return (pdev->dev.irq_end - pdev->dev.irq_start); 1345 } 1346 if (flags & PCI_IRQ_INTX) { 1347 if (pdev->irq) 1348 return (1); 1349 } 1350 1351 return (-EINVAL); 1352 } 1353 1354 struct msi_desc * 1355 lkpi_pci_msi_desc_alloc(int irq) 1356 { 1357 struct device *dev; 1358 struct pci_dev *pdev; 1359 struct msi_desc *desc; 1360 struct pci_devinfo *dinfo; 1361 struct pcicfg_msi *msi; 1362 int vec; 1363 1364 dev = lkpi_pci_find_irq_dev(irq); 1365 if (dev == NULL) 1366 return (NULL); 1367 1368 pdev = to_pci_dev(dev); 1369 1370 if (pdev->msi_desc == NULL) 1371 return (NULL); 1372 1373 if (irq < pdev->dev.irq_start || irq >= pdev->dev.irq_end) 1374 return (NULL); 1375 1376 vec = pdev->dev.irq_start - irq; 1377 1378 if (pdev->msi_desc[vec] != NULL) 1379 return (pdev->msi_desc[vec]); 1380 1381 dinfo = device_get_ivars(dev->bsddev); 1382 msi = &dinfo->cfg.msi; 1383 1384 desc = malloc(sizeof(*desc), M_DEVBUF, M_WAITOK | M_ZERO); 1385 1386 desc->pci.msi_attrib.is_64 = 1387 (msi->msi_ctrl & PCIM_MSICTRL_64BIT) ? true : false; 1388 desc->msg.data = msi->msi_data; 1389 1390 pdev->msi_desc[vec] = desc; 1391 1392 return (desc); 1393 } 1394 1395 bool 1396 pci_device_is_present(struct pci_dev *pdev) 1397 { 1398 device_t dev; 1399 1400 dev = pdev->dev.bsddev; 1401 1402 return (bus_child_present(dev)); 1403 } 1404 1405 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); 1406 1407 struct linux_dma_obj { 1408 void *vaddr; 1409 uint64_t dma_addr; 1410 bus_dmamap_t dmamap; 1411 bus_dma_tag_t dmat; 1412 }; 1413 1414 static uma_zone_t linux_dma_trie_zone; 1415 static uma_zone_t linux_dma_obj_zone; 1416 1417 static void 1418 linux_dma_init(void *arg) 1419 { 1420 1421 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", 1422 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, 1423 UMA_ALIGN_PTR, 0); 1424 linux_dma_obj_zone = uma_zcreate("linux_dma_object", 1425 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, 1426 UMA_ALIGN_PTR, 0); 1427 lkpi_pci_nseg1_fail = counter_u64_alloc(M_WAITOK); 1428 } 1429 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); 1430 1431 static void 1432 linux_dma_uninit(void *arg) 1433 { 1434 1435 counter_u64_free(lkpi_pci_nseg1_fail); 1436 uma_zdestroy(linux_dma_obj_zone); 1437 uma_zdestroy(linux_dma_trie_zone); 1438 } 1439 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); 1440 1441 static void * 1442 linux_dma_trie_alloc(struct pctrie *ptree) 1443 { 1444 1445 return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT)); 1446 } 1447 1448 static void 1449 linux_dma_trie_free(struct pctrie *ptree, void *node) 1450 { 1451 1452 uma_zfree(linux_dma_trie_zone, node); 1453 } 1454 1455 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, 1456 linux_dma_trie_free); 1457 1458 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 1459 static dma_addr_t 1460 linux_dma_map_phys_common(struct device *dev, vm_paddr_t phys, size_t len, 1461 bus_dma_tag_t dmat) 1462 { 1463 struct linux_dma_priv *priv; 1464 struct linux_dma_obj *obj; 1465 int error, nseg; 1466 bus_dma_segment_t seg; 1467 1468 priv = dev->dma_priv; 1469 1470 /* 1471 * If the resultant mapping will be entirely 1:1 with the 1472 * physical address, short-circuit the remainder of the 1473 * bus_dma API. This avoids tracking collisions in the pctrie 1474 * with the additional benefit of reducing overhead. 1475 */ 1476 if (bus_dma_id_mapped(dmat, phys, len)) 1477 return (phys); 1478 1479 obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT); 1480 if (obj == NULL) { 1481 return (0); 1482 } 1483 obj->dmat = dmat; 1484 1485 DMA_PRIV_LOCK(priv); 1486 if (bus_dmamap_create(obj->dmat, 0, &obj->dmamap) != 0) { 1487 DMA_PRIV_UNLOCK(priv); 1488 uma_zfree(linux_dma_obj_zone, obj); 1489 return (0); 1490 } 1491 1492 nseg = -1; 1493 error = _bus_dmamap_load_phys(obj->dmat, obj->dmamap, phys, len, 1494 BUS_DMA_NOWAIT, &seg, &nseg); 1495 if (error != 0) { 1496 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1497 DMA_PRIV_UNLOCK(priv); 1498 uma_zfree(linux_dma_obj_zone, obj); 1499 counter_u64_add(lkpi_pci_nseg1_fail, 1); 1500 if (linuxkpi_debug) { 1501 device_printf(dev->bsddev, "%s: _bus_dmamap_load_phys " 1502 "error %d, phys %#018jx len %zu\n", __func__, 1503 error, (uintmax_t)phys, len); 1504 dump_stack(); 1505 } 1506 return (0); 1507 } 1508 1509 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 1510 obj->dma_addr = seg.ds_addr; 1511 1512 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); 1513 if (error != 0) { 1514 bus_dmamap_unload(obj->dmat, obj->dmamap); 1515 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1516 DMA_PRIV_UNLOCK(priv); 1517 uma_zfree(linux_dma_obj_zone, obj); 1518 return (0); 1519 } 1520 DMA_PRIV_UNLOCK(priv); 1521 return (obj->dma_addr); 1522 } 1523 #else 1524 static dma_addr_t 1525 linux_dma_map_phys_common(struct device *dev __unused, vm_paddr_t phys, 1526 size_t len __unused, bus_dma_tag_t dmat __unused) 1527 { 1528 return (phys); 1529 } 1530 #endif 1531 1532 dma_addr_t 1533 lkpi_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len, 1534 enum dma_data_direction direction, unsigned long attrs) 1535 { 1536 struct linux_dma_priv *priv; 1537 dma_addr_t dma; 1538 1539 priv = dev->dma_priv; 1540 dma = linux_dma_map_phys_common(dev, phys, len, priv->dmat); 1541 if (dma_mapping_error(dev, dma)) 1542 return (dma); 1543 1544 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 1545 dma_sync_single_for_device(dev, dma, len, direction); 1546 1547 return (dma); 1548 } 1549 1550 /* For backward compat only so we can MFC this. Remove before 15. */ 1551 dma_addr_t 1552 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 1553 { 1554 return (lkpi_dma_map_phys(dev, phys, len, DMA_NONE, 0)); 1555 } 1556 1557 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 1558 void 1559 lkpi_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len, 1560 enum dma_data_direction direction, unsigned long attrs) 1561 { 1562 struct linux_dma_priv *priv; 1563 struct linux_dma_obj *obj; 1564 1565 priv = dev->dma_priv; 1566 1567 if (pctrie_is_empty(&priv->ptree)) 1568 return; 1569 1570 DMA_PRIV_LOCK(priv); 1571 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 1572 if (obj == NULL) { 1573 DMA_PRIV_UNLOCK(priv); 1574 return; 1575 } 1576 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); 1577 1578 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 1579 dma_sync_single_for_cpu(dev, dma_addr, len, direction); 1580 1581 bus_dmamap_unload(obj->dmat, obj->dmamap); 1582 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1583 DMA_PRIV_UNLOCK(priv); 1584 1585 uma_zfree(linux_dma_obj_zone, obj); 1586 } 1587 #else 1588 void 1589 lkpi_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len, 1590 enum dma_data_direction direction, unsigned long attrs) 1591 { 1592 } 1593 #endif 1594 1595 /* For backward compat only so we can MFC this. Remove before 15. */ 1596 void 1597 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 1598 { 1599 lkpi_dma_unmap(dev, dma_addr, len, DMA_NONE, 0); 1600 } 1601 1602 void * 1603 linux_dma_alloc_coherent(struct device *dev, size_t size, 1604 dma_addr_t *dma_handle, gfp_t flag) 1605 { 1606 struct linux_dma_priv *priv; 1607 vm_paddr_t high; 1608 size_t align; 1609 void *mem; 1610 1611 if (dev == NULL || dev->dma_priv == NULL) { 1612 *dma_handle = 0; 1613 return (NULL); 1614 } 1615 priv = dev->dma_priv; 1616 if (priv->dma_coherent_mask) 1617 high = priv->dma_coherent_mask; 1618 else 1619 /* Coherent is lower 32bit only by default in Linux. */ 1620 high = BUS_SPACE_MAXADDR_32BIT; 1621 align = PAGE_SIZE << get_order(size); 1622 /* Always zero the allocation. */ 1623 flag |= M_ZERO; 1624 mem = kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high, 1625 align, 0, VM_MEMATTR_DEFAULT); 1626 if (mem != NULL) { 1627 *dma_handle = linux_dma_map_phys_common(dev, vtophys(mem), size, 1628 priv->dmat_coherent); 1629 if (*dma_handle == 0) { 1630 kmem_free(mem, size); 1631 mem = NULL; 1632 } 1633 } else { 1634 *dma_handle = 0; 1635 } 1636 return (mem); 1637 } 1638 1639 struct lkpi_devres_dmam_coherent { 1640 size_t size; 1641 dma_addr_t *handle; 1642 void *mem; 1643 }; 1644 1645 static void 1646 lkpi_dmam_free_coherent(struct device *dev, void *p) 1647 { 1648 struct lkpi_devres_dmam_coherent *dr; 1649 1650 dr = p; 1651 dma_free_coherent(dev, dr->size, dr->mem, *dr->handle); 1652 } 1653 1654 void * 1655 linuxkpi_dmam_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 1656 gfp_t flag) 1657 { 1658 struct lkpi_devres_dmam_coherent *dr; 1659 1660 dr = lkpi_devres_alloc(lkpi_dmam_free_coherent, 1661 sizeof(*dr), GFP_KERNEL | __GFP_ZERO); 1662 1663 if (dr == NULL) 1664 return (NULL); 1665 1666 dr->size = size; 1667 dr->mem = linux_dma_alloc_coherent(dev, size, dma_handle, flag); 1668 dr->handle = dma_handle; 1669 if (dr->mem == NULL) { 1670 lkpi_devres_free(dr); 1671 return (NULL); 1672 } 1673 1674 lkpi_devres_add(dev, dr); 1675 return (dr->mem); 1676 } 1677 1678 void 1679 linuxkpi_dma_sync(struct device *dev, dma_addr_t dma_addr, size_t size, 1680 bus_dmasync_op_t op) 1681 { 1682 struct linux_dma_priv *priv; 1683 struct linux_dma_obj *obj; 1684 1685 priv = dev->dma_priv; 1686 1687 if (pctrie_is_empty(&priv->ptree)) 1688 return; 1689 1690 DMA_PRIV_LOCK(priv); 1691 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 1692 if (obj == NULL) { 1693 DMA_PRIV_UNLOCK(priv); 1694 return; 1695 } 1696 1697 bus_dmamap_sync(obj->dmat, obj->dmamap, op); 1698 DMA_PRIV_UNLOCK(priv); 1699 } 1700 1701 int 1702 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, 1703 enum dma_data_direction direction, unsigned long attrs) 1704 { 1705 struct linux_dma_priv *priv; 1706 struct scatterlist *sg; 1707 int i, nseg; 1708 bus_dma_segment_t seg; 1709 1710 priv = dev->dma_priv; 1711 1712 DMA_PRIV_LOCK(priv); 1713 1714 /* create common DMA map in the first S/G entry */ 1715 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) { 1716 DMA_PRIV_UNLOCK(priv); 1717 return (0); 1718 } 1719 1720 /* load all S/G list entries */ 1721 for_each_sg(sgl, sg, nents, i) { 1722 nseg = -1; 1723 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map, 1724 sg_phys(sg), sg->length, BUS_DMA_NOWAIT, 1725 &seg, &nseg) != 0) { 1726 bus_dmamap_unload(priv->dmat, sgl->dma_map); 1727 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 1728 DMA_PRIV_UNLOCK(priv); 1729 return (0); 1730 } 1731 KASSERT(nseg == 0, 1732 ("More than one segment (nseg=%d)", nseg + 1)); 1733 1734 sg_dma_address(sg) = seg.ds_addr; 1735 } 1736 1737 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) != 0) 1738 goto skip_sync; 1739 1740 switch (direction) { 1741 case DMA_BIDIRECTIONAL: 1742 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); 1743 break; 1744 case DMA_TO_DEVICE: 1745 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); 1746 break; 1747 case DMA_FROM_DEVICE: 1748 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); 1749 break; 1750 default: 1751 break; 1752 } 1753 skip_sync: 1754 1755 DMA_PRIV_UNLOCK(priv); 1756 1757 return (nents); 1758 } 1759 1760 void 1761 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 1762 int nents __unused, enum dma_data_direction direction, 1763 unsigned long attrs) 1764 { 1765 struct linux_dma_priv *priv; 1766 1767 priv = dev->dma_priv; 1768 1769 DMA_PRIV_LOCK(priv); 1770 1771 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) != 0) 1772 goto skip_sync; 1773 1774 switch (direction) { 1775 case DMA_BIDIRECTIONAL: 1776 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); 1777 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); 1778 break; 1779 case DMA_TO_DEVICE: 1780 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTWRITE); 1781 break; 1782 case DMA_FROM_DEVICE: 1783 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); 1784 break; 1785 default: 1786 break; 1787 } 1788 skip_sync: 1789 1790 bus_dmamap_unload(priv->dmat, sgl->dma_map); 1791 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 1792 DMA_PRIV_UNLOCK(priv); 1793 } 1794 1795 struct dma_pool { 1796 struct device *pool_device; 1797 uma_zone_t pool_zone; 1798 struct mtx pool_lock; 1799 bus_dma_tag_t pool_dmat; 1800 size_t pool_entry_size; 1801 struct pctrie pool_ptree; 1802 }; 1803 1804 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) 1805 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) 1806 1807 static inline int 1808 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) 1809 { 1810 struct linux_dma_obj *obj = mem; 1811 struct dma_pool *pool = arg; 1812 int error, nseg; 1813 bus_dma_segment_t seg; 1814 1815 nseg = -1; 1816 DMA_POOL_LOCK(pool); 1817 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, 1818 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, 1819 &seg, &nseg); 1820 DMA_POOL_UNLOCK(pool); 1821 if (error != 0) { 1822 return (error); 1823 } 1824 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 1825 obj->dma_addr = seg.ds_addr; 1826 1827 return (0); 1828 } 1829 1830 static void 1831 dma_pool_obj_dtor(void *mem, int size, void *arg) 1832 { 1833 struct linux_dma_obj *obj = mem; 1834 struct dma_pool *pool = arg; 1835 1836 DMA_POOL_LOCK(pool); 1837 bus_dmamap_unload(pool->pool_dmat, obj->dmamap); 1838 DMA_POOL_UNLOCK(pool); 1839 } 1840 1841 static int 1842 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, 1843 int flags) 1844 { 1845 struct dma_pool *pool = arg; 1846 struct linux_dma_obj *obj; 1847 int error, i; 1848 1849 for (i = 0; i < count; i++) { 1850 obj = uma_zalloc(linux_dma_obj_zone, flags); 1851 if (obj == NULL) 1852 break; 1853 1854 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, 1855 BUS_DMA_NOWAIT, &obj->dmamap); 1856 if (error!= 0) { 1857 uma_zfree(linux_dma_obj_zone, obj); 1858 break; 1859 } 1860 1861 store[i] = obj; 1862 } 1863 1864 return (i); 1865 } 1866 1867 static void 1868 dma_pool_obj_release(void *arg, void **store, int count) 1869 { 1870 struct dma_pool *pool = arg; 1871 struct linux_dma_obj *obj; 1872 int i; 1873 1874 for (i = 0; i < count; i++) { 1875 obj = store[i]; 1876 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); 1877 uma_zfree(linux_dma_obj_zone, obj); 1878 } 1879 } 1880 1881 struct dma_pool * 1882 linux_dma_pool_create(char *name, struct device *dev, size_t size, 1883 size_t align, size_t boundary) 1884 { 1885 struct linux_dma_priv *priv; 1886 struct dma_pool *pool; 1887 1888 priv = dev->dma_priv; 1889 1890 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 1891 pool->pool_device = dev; 1892 pool->pool_entry_size = size; 1893 1894 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 1895 align, boundary, /* alignment, boundary */ 1896 priv->dma_mask, /* lowaddr */ 1897 BUS_SPACE_MAXADDR, /* highaddr */ 1898 NULL, NULL, /* filtfunc, filtfuncarg */ 1899 size, /* maxsize */ 1900 1, /* nsegments */ 1901 size, /* maxsegsz */ 1902 0, /* flags */ 1903 NULL, NULL, /* lockfunc, lockfuncarg */ 1904 &pool->pool_dmat)) { 1905 kfree(pool); 1906 return (NULL); 1907 } 1908 1909 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, 1910 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, 1911 dma_pool_obj_release, pool, 0); 1912 1913 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF); 1914 pctrie_init(&pool->pool_ptree); 1915 1916 return (pool); 1917 } 1918 1919 void 1920 linux_dma_pool_destroy(struct dma_pool *pool) 1921 { 1922 1923 uma_zdestroy(pool->pool_zone); 1924 bus_dma_tag_destroy(pool->pool_dmat); 1925 mtx_destroy(&pool->pool_lock); 1926 kfree(pool); 1927 } 1928 1929 void 1930 lkpi_dmam_pool_destroy(struct device *dev, void *p) 1931 { 1932 struct dma_pool *pool; 1933 1934 pool = *(struct dma_pool **)p; 1935 LINUX_DMA_PCTRIE_RECLAIM(&pool->pool_ptree); 1936 linux_dma_pool_destroy(pool); 1937 } 1938 1939 void * 1940 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 1941 dma_addr_t *handle) 1942 { 1943 struct linux_dma_obj *obj; 1944 1945 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags & GFP_NATIVE_MASK); 1946 if (obj == NULL) 1947 return (NULL); 1948 1949 DMA_POOL_LOCK(pool); 1950 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { 1951 DMA_POOL_UNLOCK(pool); 1952 uma_zfree_arg(pool->pool_zone, obj, pool); 1953 return (NULL); 1954 } 1955 DMA_POOL_UNLOCK(pool); 1956 1957 *handle = obj->dma_addr; 1958 return (obj->vaddr); 1959 } 1960 1961 void 1962 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) 1963 { 1964 struct linux_dma_obj *obj; 1965 1966 DMA_POOL_LOCK(pool); 1967 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); 1968 if (obj == NULL) { 1969 DMA_POOL_UNLOCK(pool); 1970 return; 1971 } 1972 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); 1973 DMA_POOL_UNLOCK(pool); 1974 1975 uma_zfree_arg(pool->pool_zone, obj, pool); 1976 } 1977 1978 static int 1979 linux_backlight_get_status(device_t dev, struct backlight_props *props) 1980 { 1981 struct pci_dev *pdev; 1982 1983 linux_set_current(curthread); 1984 pdev = device_get_softc(dev); 1985 1986 props->brightness = pdev->dev.bd->props.brightness; 1987 props->brightness = props->brightness * 100 / pdev->dev.bd->props.max_brightness; 1988 props->nlevels = 0; 1989 1990 return (0); 1991 } 1992 1993 static int 1994 linux_backlight_get_info(device_t dev, struct backlight_info *info) 1995 { 1996 struct pci_dev *pdev; 1997 1998 linux_set_current(curthread); 1999 pdev = device_get_softc(dev); 2000 2001 info->type = BACKLIGHT_TYPE_PANEL; 2002 strlcpy(info->name, pdev->dev.bd->name, BACKLIGHTMAXNAMELENGTH); 2003 return (0); 2004 } 2005 2006 static int 2007 linux_backlight_update_status(device_t dev, struct backlight_props *props) 2008 { 2009 struct pci_dev *pdev; 2010 2011 linux_set_current(curthread); 2012 pdev = device_get_softc(dev); 2013 2014 pdev->dev.bd->props.brightness = pdev->dev.bd->props.max_brightness * 2015 props->brightness / 100; 2016 pdev->dev.bd->props.power = props->brightness == 0 ? 2017 4/* FB_BLANK_POWERDOWN */ : 0/* FB_BLANK_UNBLANK */; 2018 return (pdev->dev.bd->ops->update_status(pdev->dev.bd)); 2019 } 2020 2021 struct backlight_device * 2022 linux_backlight_device_register(const char *name, struct device *dev, 2023 void *data, const struct backlight_ops *ops, struct backlight_properties *props) 2024 { 2025 2026 dev->bd = malloc(sizeof(*dev->bd), M_DEVBUF, M_WAITOK | M_ZERO); 2027 dev->bd->ops = ops; 2028 dev->bd->props.type = props->type; 2029 dev->bd->props.max_brightness = props->max_brightness; 2030 dev->bd->props.brightness = props->brightness; 2031 dev->bd->props.power = props->power; 2032 dev->bd->data = data; 2033 dev->bd->dev = dev; 2034 dev->bd->name = strdup(name, M_DEVBUF); 2035 2036 dev->backlight_dev = backlight_register(name, dev->bsddev); 2037 2038 return (dev->bd); 2039 } 2040 2041 void 2042 linux_backlight_device_unregister(struct backlight_device *bd) 2043 { 2044 2045 backlight_destroy(bd->dev->backlight_dev); 2046 free(bd->name, M_DEVBUF); 2047 free(bd, M_DEVBUF); 2048 } 2049