1 /* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */ 2 /** 3 * \file drm_pci.c 4 * \brief Functions and ioctls to manage PCI memory 5 * 6 * \warning These interfaces aren't stable yet. 7 * 8 * \todo Implement the remaining ioctl's for the PCI pools. 9 * \todo The wrappers here are so thin that they would be better off inlined.. 10 * 11 * \author José Fonseca <jrfonseca@tungstengraphics.com> 12 * \author Leif Delgass <ldelgass@retinalburn.net> 13 */ 14 15 /* 16 * Copyright 2003 José Fonseca. 17 * Copyright 2003 Leif Delgass. 18 * All Rights Reserved. 19 * 20 * Permission is hereby granted, free of charge, to any person obtaining a 21 * copy of this software and associated documentation files (the "Software"), 22 * to deal in the Software without restriction, including without limitation 23 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 24 * and/or sell copies of the Software, and to permit persons to whom the 25 * Software is furnished to do so, subject to the following conditions: 26 * 27 * The above copyright notice and this permission notice (including the next 28 * paragraph) shall be included in all copies or substantial portions of the 29 * Software. 30 * 31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 32 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 33 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 34 * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 35 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 36 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 37 */ 38 39 #include <sys/cdefs.h> 40 __FBSDID("$FreeBSD$"); 41 42 #include <dev/drm2/drmP.h> 43 44 static int drm_msi = 1; /* Enable by default. */ 45 SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device"); 46 SYSCTL_INT(_hw_drm, OID_AUTO, msi, CTLFLAG_RDTUN, &drm_msi, 1, 47 "Enable MSI interrupts for drm devices"); 48 49 /**********************************************************************/ 50 /** \name PCI memory */ 51 /*@{*/ 52 53 static void 54 drm_pci_busdma_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 55 { 56 drm_dma_handle_t *dmah = arg; 57 58 if (error != 0) 59 return; 60 61 KASSERT(nsegs == 1, ("drm_pci_busdma_callback: bad dma segment count")); 62 dmah->busaddr = segs[0].ds_addr; 63 } 64 65 /** 66 * \brief Allocate a PCI consistent memory block, for DMA. 67 */ 68 drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, 69 size_t align, dma_addr_t maxaddr) 70 { 71 drm_dma_handle_t *dmah; 72 int ret; 73 74 /* Need power-of-two alignment, so fail the allocation if it isn't. */ 75 if ((align & (align - 1)) != 0) { 76 DRM_ERROR("drm_pci_alloc with non-power-of-two alignment %d\n", 77 (int)align); 78 return NULL; 79 } 80 81 dmah = malloc(sizeof(drm_dma_handle_t), DRM_MEM_DMA, M_ZERO | M_NOWAIT); 82 if (dmah == NULL) 83 return NULL; 84 85 /* Make sure we aren't holding mutexes here */ 86 mtx_assert(&dev->dma_lock, MA_NOTOWNED); 87 if (mtx_owned(&dev->dma_lock)) 88 DRM_ERROR("called while holding dma_lock\n"); 89 90 ret = bus_dma_tag_create( 91 bus_get_dma_tag(dev->dev), /* parent */ 92 align, 0, /* align, boundary */ 93 maxaddr, BUS_SPACE_MAXADDR, /* lowaddr, highaddr */ 94 NULL, NULL, /* filtfunc, filtfuncargs */ 95 size, 1, size, /* maxsize, nsegs, maxsegsize */ 96 0, NULL, NULL, /* flags, lockfunc, lockfuncargs */ 97 &dmah->tag); 98 if (ret != 0) { 99 free(dmah, DRM_MEM_DMA); 100 return NULL; 101 } 102 103 ret = bus_dmamem_alloc(dmah->tag, &dmah->vaddr, 104 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_NOCACHE, &dmah->map); 105 if (ret != 0) { 106 bus_dma_tag_destroy(dmah->tag); 107 free(dmah, DRM_MEM_DMA); 108 return NULL; 109 } 110 111 ret = bus_dmamap_load(dmah->tag, dmah->map, dmah->vaddr, size, 112 drm_pci_busdma_callback, dmah, BUS_DMA_NOWAIT); 113 if (ret != 0) { 114 bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map); 115 bus_dma_tag_destroy(dmah->tag); 116 free(dmah, DRM_MEM_DMA); 117 return NULL; 118 } 119 120 return dmah; 121 } 122 123 EXPORT_SYMBOL(drm_pci_alloc); 124 125 /** 126 * \brief Free a PCI consistent memory block without freeing its descriptor. 127 * 128 * This function is for internal use in the Linux-specific DRM core code. 129 */ 130 void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) 131 { 132 if (dmah == NULL) 133 return; 134 135 bus_dmamap_unload(dmah->tag, dmah->map); 136 bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map); 137 bus_dma_tag_destroy(dmah->tag); 138 } 139 140 /** 141 * \brief Free a PCI consistent memory block 142 */ 143 void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) 144 { 145 __drm_pci_free(dev, dmah); 146 free(dmah, DRM_MEM_DMA); 147 } 148 149 EXPORT_SYMBOL(drm_pci_free); 150 151 static int drm_get_pci_domain(struct drm_device *dev) 152 { 153 return dev->pci_domain; 154 } 155 156 static int drm_pci_get_irq(struct drm_device *dev) 157 { 158 159 if (dev->irqr) 160 return (dev->irq); 161 162 dev->irqr = bus_alloc_resource_any(dev->dev, SYS_RES_IRQ, 163 &dev->irqrid, RF_SHAREABLE); 164 if (!dev->irqr) { 165 dev_err(dev->dev, "Failed to allocate IRQ\n"); 166 return (0); 167 } 168 169 dev->irq = (int) rman_get_start(dev->irqr); 170 171 return (dev->irq); 172 } 173 174 static void drm_pci_free_irq(struct drm_device *dev) 175 { 176 if (dev->irqr == NULL) 177 return; 178 179 bus_release_resource(dev->dev, SYS_RES_IRQ, 180 dev->irqrid, dev->irqr); 181 182 dev->irqr = NULL; 183 dev->irq = 0; 184 } 185 186 static const char *drm_pci_get_name(struct drm_device *dev) 187 { 188 return dev->driver->name; 189 } 190 191 int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master) 192 { 193 int len, ret; 194 master->unique_len = 40; 195 master->unique_size = master->unique_len; 196 master->unique = malloc(master->unique_size, DRM_MEM_DRIVER, M_NOWAIT); 197 if (master->unique == NULL) 198 return -ENOMEM; 199 200 201 len = snprintf(master->unique, master->unique_len, 202 "pci:%04x:%02x:%02x.%d", 203 dev->pci_domain, 204 dev->pci_bus, 205 dev->pci_slot, 206 dev->pci_func); 207 208 if (len >= master->unique_len) { 209 DRM_ERROR("buffer overflow"); 210 ret = -EINVAL; 211 goto err; 212 } else 213 master->unique_len = len; 214 215 return 0; 216 err: 217 return ret; 218 } 219 220 int drm_pci_set_unique(struct drm_device *dev, 221 struct drm_master *master, 222 struct drm_unique *u) 223 { 224 int domain, bus, slot, func, ret; 225 226 master->unique_len = u->unique_len; 227 master->unique_size = u->unique_len + 1; 228 master->unique = malloc(master->unique_size, DRM_MEM_DRIVER, M_WAITOK); 229 if (!master->unique) { 230 ret = -ENOMEM; 231 goto err; 232 } 233 234 if (copy_from_user(master->unique, u->unique, master->unique_len)) { 235 ret = -EFAULT; 236 goto err; 237 } 238 239 master->unique[master->unique_len] = '\0'; 240 241 /* Return error if the busid submitted doesn't match the device's actual 242 * busid. 243 */ 244 ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func); 245 if (ret != 3) { 246 ret = -EINVAL; 247 goto err; 248 } 249 250 domain = bus >> 8; 251 bus &= 0xff; 252 253 if ((domain != dev->pci_domain) || 254 (bus != dev->pci_bus) || 255 (slot != dev->pci_slot) || 256 (func != dev->pci_func)) { 257 ret = -EINVAL; 258 goto err; 259 } 260 return 0; 261 err: 262 return ret; 263 } 264 265 266 static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p) 267 { 268 if ((p->busnum >> 8) != drm_get_pci_domain(dev) || 269 (p->busnum & 0xff) != dev->pci_bus || 270 p->devnum != dev->pci_slot || p->funcnum != dev->pci_func) 271 return -EINVAL; 272 273 p->irq = dev->irq; 274 275 DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum, 276 p->irq); 277 return 0; 278 } 279 280 int drm_pci_agp_init(struct drm_device *dev) 281 { 282 if (drm_core_has_AGP(dev)) { 283 if (drm_pci_device_is_agp(dev)) 284 dev->agp = drm_agp_init(dev); 285 if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) 286 && (dev->agp == NULL)) { 287 DRM_ERROR("Cannot initialize the agpgart module.\n"); 288 return -EINVAL; 289 } 290 if (drm_core_has_MTRR(dev)) { 291 if (dev->agp && dev->agp->agp_info.ai_aperture_base != 0) { 292 if (drm_mtrr_add(dev->agp->agp_info.ai_aperture_base, 293 dev->agp->agp_info.ai_aperture_size, DRM_MTRR_WC) == 0) 294 dev->agp->agp_mtrr = 1; 295 else 296 dev->agp->agp_mtrr = -1; 297 } 298 } 299 } 300 return 0; 301 } 302 303 static struct drm_bus drm_pci_bus = { 304 .bus_type = DRIVER_BUS_PCI, 305 .get_irq = drm_pci_get_irq, 306 .free_irq = drm_pci_free_irq, 307 .get_name = drm_pci_get_name, 308 .set_busid = drm_pci_set_busid, 309 .set_unique = drm_pci_set_unique, 310 .irq_by_busid = drm_pci_irq_by_busid, 311 .agp_init = drm_pci_agp_init, 312 }; 313 314 /** 315 * Register. 316 * 317 * \param pdev - PCI device structure 318 * \param ent entry from the PCI ID table with device type flags 319 * \return zero on success or a negative number on failure. 320 * 321 * Attempt to gets inter module "drm" information. If we are first 322 * then register the character device and inter module information. 323 * Try and register, if we fail to register, backout previous work. 324 */ 325 int drm_get_pci_dev(device_t kdev, struct drm_device *dev, 326 struct drm_driver *driver) 327 { 328 int ret; 329 330 DRM_DEBUG("\n"); 331 332 driver->bus = &drm_pci_bus; 333 334 dev->dev = kdev; 335 336 dev->pci_domain = pci_get_domain(dev->dev); 337 dev->pci_bus = pci_get_bus(dev->dev); 338 dev->pci_slot = pci_get_slot(dev->dev); 339 dev->pci_func = pci_get_function(dev->dev); 340 341 dev->pci_vendor = pci_get_vendor(dev->dev); 342 dev->pci_device = pci_get_device(dev->dev); 343 dev->pci_subvendor = pci_get_subvendor(dev->dev); 344 dev->pci_subdevice = pci_get_subdevice(dev->dev); 345 346 sx_xlock(&drm_global_mutex); 347 348 if ((ret = drm_fill_in_dev(dev, driver))) { 349 DRM_ERROR("Failed to fill in dev: %d\n", ret); 350 goto err_g1; 351 } 352 353 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 354 ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL); 355 if (ret) 356 goto err_g2; 357 } 358 359 if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY))) 360 goto err_g3; 361 362 if (dev->driver->load) { 363 ret = dev->driver->load(dev, 364 dev->id_entry->driver_private); 365 if (ret) 366 goto err_g4; 367 } 368 369 /* setup the grouping for the legacy output */ 370 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 371 ret = drm_mode_group_init_legacy_group(dev, 372 &dev->primary->mode_group); 373 if (ret) 374 goto err_g5; 375 } 376 377 #ifdef FREEBSD_NOTYET 378 list_add_tail(&dev->driver_item, &driver->device_list); 379 #endif /* FREEBSD_NOTYET */ 380 381 DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", 382 driver->name, driver->major, driver->minor, driver->patchlevel, 383 driver->date, device_get_nameunit(dev->dev), dev->primary->index); 384 385 sx_xunlock(&drm_global_mutex); 386 return 0; 387 388 err_g5: 389 if (dev->driver->unload) 390 dev->driver->unload(dev); 391 err_g4: 392 drm_put_minor(&dev->primary); 393 err_g3: 394 if (drm_core_check_feature(dev, DRIVER_MODESET)) 395 drm_put_minor(&dev->control); 396 err_g2: 397 drm_cancel_fill_in_dev(dev); 398 err_g1: 399 sx_xunlock(&drm_global_mutex); 400 return ret; 401 } 402 EXPORT_SYMBOL(drm_get_pci_dev); 403 404 int 405 drm_pci_enable_msi(struct drm_device *dev) 406 { 407 int msicount, ret; 408 409 if (!drm_msi) 410 return (-ENOENT); 411 412 msicount = pci_msi_count(dev->dev); 413 DRM_DEBUG("MSI count = %d\n", msicount); 414 if (msicount > 1) 415 msicount = 1; 416 417 ret = pci_alloc_msi(dev->dev, &msicount); 418 if (ret == 0) { 419 DRM_INFO("MSI enabled %d message(s)\n", msicount); 420 dev->msi_enabled = 1; 421 dev->irqrid = 1; 422 } 423 424 return (-ret); 425 } 426 427 void 428 drm_pci_disable_msi(struct drm_device *dev) 429 { 430 431 if (!dev->msi_enabled) 432 return; 433 434 pci_release_msi(dev->dev); 435 dev->msi_enabled = 0; 436 dev->irqrid = 0; 437 } 438 439 int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask) 440 { 441 device_t root; 442 int pos; 443 u32 lnkcap = 0, lnkcap2 = 0; 444 445 *mask = 0; 446 if (!drm_pci_device_is_pcie(dev)) 447 return -EINVAL; 448 449 root = 450 device_get_parent( /* pcib */ 451 device_get_parent( /* `-- pci */ 452 device_get_parent( /* `-- vgapci */ 453 dev->dev))); /* `-- drmn */ 454 455 pos = 0; 456 pci_find_cap(root, PCIY_EXPRESS, &pos); 457 if (!pos) 458 return -EINVAL; 459 460 /* we've been informed via and serverworks don't make the cut */ 461 if (pci_get_vendor(root) == PCI_VENDOR_ID_VIA || 462 pci_get_vendor(root) == PCI_VENDOR_ID_SERVERWORKS) 463 return -EINVAL; 464 465 lnkcap = pci_read_config(root, pos + PCIER_LINK_CAP, 4); 466 lnkcap2 = pci_read_config(root, pos + PCIER_LINK_CAP2, 4); 467 468 lnkcap &= PCIEM_LINK_CAP_MAX_SPEED; 469 lnkcap2 &= 0xfe; 470 471 #define PCI_EXP_LNKCAP2_SLS_2_5GB 0x02 /* Supported Link Speed 2.5GT/s */ 472 #define PCI_EXP_LNKCAP2_SLS_5_0GB 0x04 /* Supported Link Speed 5.0GT/s */ 473 #define PCI_EXP_LNKCAP2_SLS_8_0GB 0x08 /* Supported Link Speed 8.0GT/s */ 474 475 if (lnkcap2) { /* PCIE GEN 3.0 */ 476 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) 477 *mask |= DRM_PCIE_SPEED_25; 478 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) 479 *mask |= DRM_PCIE_SPEED_50; 480 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) 481 *mask |= DRM_PCIE_SPEED_80; 482 } else { 483 if (lnkcap & 1) 484 *mask |= DRM_PCIE_SPEED_25; 485 if (lnkcap & 2) 486 *mask |= DRM_PCIE_SPEED_50; 487 } 488 489 DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", pci_get_vendor(root), pci_get_device(root), lnkcap, lnkcap2); 490 return 0; 491 } 492 EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask); 493