xref: /linux/drivers/gpu/drm/drm_pci.c (revision 4413e16d9d21673bb5048a2e542f1aaa00015c2e)
1 /* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */
2 /**
3  * \file drm_pci.c
4  * \brief Functions and ioctls to manage PCI memory
5  *
6  * \warning These interfaces aren't stable yet.
7  *
8  * \todo Implement the remaining ioctl's for the PCI pools.
9  * \todo The wrappers here are so thin that they would be better off inlined..
10  *
11  * \author José Fonseca <jrfonseca@tungstengraphics.com>
12  * \author Leif Delgass <ldelgass@retinalburn.net>
13  */
14 
15 /*
16  * Copyright 2003 José Fonseca.
17  * Copyright 2003 Leif Delgass.
18  * All Rights Reserved.
19  *
20  * Permission is hereby granted, free of charge, to any person obtaining a
21  * copy of this software and associated documentation files (the "Software"),
22  * to deal in the Software without restriction, including without limitation
23  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
24  * and/or sell copies of the Software, and to permit persons to whom the
25  * Software is furnished to do so, subject to the following conditions:
26  *
27  * The above copyright notice and this permission notice (including the next
28  * paragraph) shall be included in all copies or substantial portions of the
29  * Software.
30  *
31  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
34  * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
35  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
36  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
37  */
38 
39 #include <linux/pci.h>
40 #include <linux/slab.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/export.h>
43 #include <drm/drmP.h>
44 
45 /**********************************************************************/
46 /** \name PCI memory */
47 /*@{*/
48 
49 /**
50  * \brief Allocate a PCI consistent memory block, for DMA.
51  */
52 drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
53 {
54 	drm_dma_handle_t *dmah;
55 #if 1
56 	unsigned long addr;
57 	size_t sz;
58 #endif
59 
60 	/* pci_alloc_consistent only guarantees alignment to the smallest
61 	 * PAGE_SIZE order which is greater than or equal to the requested size.
62 	 * Return NULL here for now to make sure nobody tries for larger alignment
63 	 */
64 	if (align > size)
65 		return NULL;
66 
67 	dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
68 	if (!dmah)
69 		return NULL;
70 
71 	dmah->size = size;
72 	dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
73 
74 	if (dmah->vaddr == NULL) {
75 		kfree(dmah);
76 		return NULL;
77 	}
78 
79 	memset(dmah->vaddr, 0, size);
80 
81 	/* XXX - Is virt_to_page() legal for consistent mem? */
82 	/* Reserve */
83 	for (addr = (unsigned long)dmah->vaddr, sz = size;
84 	     sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
85 		SetPageReserved(virt_to_page(addr));
86 	}
87 
88 	return dmah;
89 }
90 
91 EXPORT_SYMBOL(drm_pci_alloc);
92 
93 /**
94  * \brief Free a PCI consistent memory block without freeing its descriptor.
95  *
96  * This function is for internal use in the Linux-specific DRM core code.
97  */
98 void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
99 {
100 #if 1
101 	unsigned long addr;
102 	size_t sz;
103 #endif
104 
105 	if (dmah->vaddr) {
106 		/* XXX - Is virt_to_page() legal for consistent mem? */
107 		/* Unreserve */
108 		for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
109 		     sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
110 			ClearPageReserved(virt_to_page(addr));
111 		}
112 		dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
113 				  dmah->busaddr);
114 	}
115 }
116 
117 /**
118  * \brief Free a PCI consistent memory block
119  */
120 void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
121 {
122 	__drm_pci_free(dev, dmah);
123 	kfree(dmah);
124 }
125 
126 EXPORT_SYMBOL(drm_pci_free);
127 
128 #ifdef CONFIG_PCI
129 
130 static int drm_get_pci_domain(struct drm_device *dev)
131 {
132 #ifndef __alpha__
133 	/* For historical reasons, drm_get_pci_domain() is busticated
134 	 * on most archs and has to remain so for userspace interface
135 	 * < 1.4, except on alpha which was right from the beginning
136 	 */
137 	if (dev->if_version < 0x10004)
138 		return 0;
139 #endif /* __alpha__ */
140 
141 	return pci_domain_nr(dev->pdev->bus);
142 }
143 
144 static int drm_pci_get_irq(struct drm_device *dev)
145 {
146 	return dev->pdev->irq;
147 }
148 
149 static const char *drm_pci_get_name(struct drm_device *dev)
150 {
151 	struct pci_driver *pdriver = dev->driver->kdriver.pci;
152 	return pdriver->name;
153 }
154 
155 int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
156 {
157 	int len, ret;
158 	struct pci_driver *pdriver = dev->driver->kdriver.pci;
159 	master->unique_len = 40;
160 	master->unique_size = master->unique_len;
161 	master->unique = kmalloc(master->unique_size, GFP_KERNEL);
162 	if (master->unique == NULL)
163 		return -ENOMEM;
164 
165 
166 	len = snprintf(master->unique, master->unique_len,
167 		       "pci:%04x:%02x:%02x.%d",
168 		       drm_get_pci_domain(dev),
169 		       dev->pdev->bus->number,
170 		       PCI_SLOT(dev->pdev->devfn),
171 		       PCI_FUNC(dev->pdev->devfn));
172 
173 	if (len >= master->unique_len) {
174 		DRM_ERROR("buffer overflow");
175 		ret = -EINVAL;
176 		goto err;
177 	} else
178 		master->unique_len = len;
179 
180 	dev->devname =
181 		kmalloc(strlen(pdriver->name) +
182 			master->unique_len + 2, GFP_KERNEL);
183 
184 	if (dev->devname == NULL) {
185 		ret = -ENOMEM;
186 		goto err;
187 	}
188 
189 	sprintf(dev->devname, "%s@%s", pdriver->name,
190 		master->unique);
191 
192 	return 0;
193 err:
194 	return ret;
195 }
196 
197 int drm_pci_set_unique(struct drm_device *dev,
198 		       struct drm_master *master,
199 		       struct drm_unique *u)
200 {
201 	int domain, bus, slot, func, ret;
202 	const char *bus_name;
203 
204 	master->unique_len = u->unique_len;
205 	master->unique_size = u->unique_len + 1;
206 	master->unique = kmalloc(master->unique_size, GFP_KERNEL);
207 	if (!master->unique) {
208 		ret = -ENOMEM;
209 		goto err;
210 	}
211 
212 	if (copy_from_user(master->unique, u->unique, master->unique_len)) {
213 		ret = -EFAULT;
214 		goto err;
215 	}
216 
217 	master->unique[master->unique_len] = '\0';
218 
219 	bus_name = dev->driver->bus->get_name(dev);
220 	dev->devname = kmalloc(strlen(bus_name) +
221 			       strlen(master->unique) + 2, GFP_KERNEL);
222 	if (!dev->devname) {
223 		ret = -ENOMEM;
224 		goto err;
225 	}
226 
227 	sprintf(dev->devname, "%s@%s", bus_name,
228 		master->unique);
229 
230 	/* Return error if the busid submitted doesn't match the device's actual
231 	 * busid.
232 	 */
233 	ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
234 	if (ret != 3) {
235 		ret = -EINVAL;
236 		goto err;
237 	}
238 
239 	domain = bus >> 8;
240 	bus &= 0xff;
241 
242 	if ((domain != drm_get_pci_domain(dev)) ||
243 	    (bus != dev->pdev->bus->number) ||
244 	    (slot != PCI_SLOT(dev->pdev->devfn)) ||
245 	    (func != PCI_FUNC(dev->pdev->devfn))) {
246 		ret = -EINVAL;
247 		goto err;
248 	}
249 	return 0;
250 err:
251 	return ret;
252 }
253 
254 
255 static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
256 {
257 	if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
258 	    (p->busnum & 0xff) != dev->pdev->bus->number ||
259 	    p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
260 		return -EINVAL;
261 
262 	p->irq = dev->pdev->irq;
263 
264 	DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
265 		  p->irq);
266 	return 0;
267 }
268 
269 int drm_pci_agp_init(struct drm_device *dev)
270 {
271 	if (drm_core_has_AGP(dev)) {
272 		if (drm_pci_device_is_agp(dev))
273 			dev->agp = drm_agp_init(dev);
274 		if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
275 		    && (dev->agp == NULL)) {
276 			DRM_ERROR("Cannot initialize the agpgart module.\n");
277 			return -EINVAL;
278 		}
279 		if (drm_core_has_MTRR(dev)) {
280 			if (dev->agp)
281 				dev->agp->agp_mtrr =
282 					mtrr_add(dev->agp->agp_info.aper_base,
283 						 dev->agp->agp_info.aper_size *
284 						 1024 * 1024, MTRR_TYPE_WRCOMB, 1);
285 		}
286 	}
287 	return 0;
288 }
289 
290 static struct drm_bus drm_pci_bus = {
291 	.bus_type = DRIVER_BUS_PCI,
292 	.get_irq = drm_pci_get_irq,
293 	.get_name = drm_pci_get_name,
294 	.set_busid = drm_pci_set_busid,
295 	.set_unique = drm_pci_set_unique,
296 	.irq_by_busid = drm_pci_irq_by_busid,
297 	.agp_init = drm_pci_agp_init,
298 };
299 
300 /**
301  * Register.
302  *
303  * \param pdev - PCI device structure
304  * \param ent entry from the PCI ID table with device type flags
305  * \return zero on success or a negative number on failure.
306  *
307  * Attempt to gets inter module "drm" information. If we are first
308  * then register the character device and inter module information.
309  * Try and register, if we fail to register, backout previous work.
310  */
311 int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
312 		    struct drm_driver *driver)
313 {
314 	struct drm_device *dev;
315 	int ret;
316 
317 	DRM_DEBUG("\n");
318 
319 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
320 	if (!dev)
321 		return -ENOMEM;
322 
323 	ret = pci_enable_device(pdev);
324 	if (ret)
325 		goto err_g1;
326 
327 	dev->pdev = pdev;
328 	dev->dev = &pdev->dev;
329 
330 	dev->pci_device = pdev->device;
331 	dev->pci_vendor = pdev->vendor;
332 
333 #ifdef __alpha__
334 	dev->hose = pdev->sysdata;
335 #endif
336 
337 	mutex_lock(&drm_global_mutex);
338 
339 	if ((ret = drm_fill_in_dev(dev, ent, driver))) {
340 		printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
341 		goto err_g2;
342 	}
343 
344 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
345 		pci_set_drvdata(pdev, dev);
346 		ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
347 		if (ret)
348 			goto err_g2;
349 	}
350 
351 	if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
352 		goto err_g3;
353 
354 	if (dev->driver->load) {
355 		ret = dev->driver->load(dev, ent->driver_data);
356 		if (ret)
357 			goto err_g4;
358 	}
359 
360 	/* setup the grouping for the legacy output */
361 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
362 		ret = drm_mode_group_init_legacy_group(dev,
363 						&dev->primary->mode_group);
364 		if (ret)
365 			goto err_g4;
366 	}
367 
368 	list_add_tail(&dev->driver_item, &driver->device_list);
369 
370 	DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
371 		 driver->name, driver->major, driver->minor, driver->patchlevel,
372 		 driver->date, pci_name(pdev), dev->primary->index);
373 
374 	mutex_unlock(&drm_global_mutex);
375 	return 0;
376 
377 err_g4:
378 	drm_put_minor(&dev->primary);
379 err_g3:
380 	if (drm_core_check_feature(dev, DRIVER_MODESET))
381 		drm_put_minor(&dev->control);
382 err_g2:
383 	pci_disable_device(pdev);
384 err_g1:
385 	kfree(dev);
386 	mutex_unlock(&drm_global_mutex);
387 	return ret;
388 }
389 EXPORT_SYMBOL(drm_get_pci_dev);
390 
391 /**
392  * PCI device initialization. Called direct from modules at load time.
393  *
394  * \return zero on success or a negative number on failure.
395  *
396  * Initializes a drm_device structures,registering the
397  * stubs and initializing the AGP device.
398  *
399  * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
400  * after the initialization for driver customization.
401  */
402 int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
403 {
404 	struct pci_dev *pdev = NULL;
405 	const struct pci_device_id *pid;
406 	int i;
407 
408 	DRM_DEBUG("\n");
409 
410 	INIT_LIST_HEAD(&driver->device_list);
411 	driver->kdriver.pci = pdriver;
412 	driver->bus = &drm_pci_bus;
413 
414 	if (driver->driver_features & DRIVER_MODESET)
415 		return pci_register_driver(pdriver);
416 
417 	/* If not using KMS, fall back to stealth mode manual scanning. */
418 	for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
419 		pid = &pdriver->id_table[i];
420 
421 		/* Loop around setting up a DRM device for each PCI device
422 		 * matching our ID and device class.  If we had the internal
423 		 * function that pci_get_subsys and pci_get_class used, we'd
424 		 * be able to just pass pid in instead of doing a two-stage
425 		 * thing.
426 		 */
427 		pdev = NULL;
428 		while ((pdev =
429 			pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
430 				       pid->subdevice, pdev)) != NULL) {
431 			if ((pdev->class & pid->class_mask) != pid->class)
432 				continue;
433 
434 			/* stealth mode requires a manual probe */
435 			pci_dev_get(pdev);
436 			drm_get_pci_dev(pdev, pid, driver);
437 		}
438 	}
439 	return 0;
440 }
441 
442 #else
443 
444 int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
445 {
446 	return -1;
447 }
448 
449 #endif
450 
451 EXPORT_SYMBOL(drm_pci_init);
452 
453 /*@}*/
454 void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
455 {
456 	struct drm_device *dev, *tmp;
457 	DRM_DEBUG("\n");
458 
459 	if (driver->driver_features & DRIVER_MODESET) {
460 		pci_unregister_driver(pdriver);
461 	} else {
462 		list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
463 			drm_put_dev(dev);
464 	}
465 	DRM_INFO("Module unloaded\n");
466 }
467 EXPORT_SYMBOL(drm_pci_exit);
468 
469 int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
470 {
471 	struct pci_dev *root;
472 	int pos;
473 	u32 lnkcap, lnkcap2;
474 
475 	*mask = 0;
476 	if (!dev->pdev)
477 		return -EINVAL;
478 
479 	if (!pci_is_pcie(dev->pdev))
480 		return -EINVAL;
481 
482 	root = dev->pdev->bus->self;
483 
484 	pos = pci_pcie_cap(root);
485 	if (!pos)
486 		return -EINVAL;
487 
488 	/* we've been informed via and serverworks don't make the cut */
489 	if (root->vendor == PCI_VENDOR_ID_VIA ||
490 	    root->vendor == PCI_VENDOR_ID_SERVERWORKS)
491 		return -EINVAL;
492 
493 	pci_read_config_dword(root, pos + PCI_EXP_LNKCAP, &lnkcap);
494 	pci_read_config_dword(root, pos + PCI_EXP_LNKCAP2, &lnkcap2);
495 
496 	lnkcap &= PCI_EXP_LNKCAP_SLS;
497 	lnkcap2 &= 0xfe;
498 
499 	if (lnkcap2) { /* PCIE GEN 3.0 */
500 		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
501 			*mask |= DRM_PCIE_SPEED_25;
502 		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
503 			*mask |= DRM_PCIE_SPEED_50;
504 		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
505 			*mask |= DRM_PCIE_SPEED_80;
506 	} else {
507 		if (lnkcap & 1)
508 			*mask |= DRM_PCIE_SPEED_25;
509 		if (lnkcap & 2)
510 			*mask |= DRM_PCIE_SPEED_50;
511 	}
512 
513 	DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2);
514 	return 0;
515 }
516 EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask);
517