xref: /freebsd/sys/dev/drm2/drm_pci.c (revision 685dc743dc3b5645e34836464128e1c0558b404b)
1 /* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */
2 /**
3  * \file drm_pci.c
4  * \brief Functions and ioctls to manage PCI memory
5  *
6  * \warning These interfaces aren't stable yet.
7  *
8  * \todo Implement the remaining ioctl's for the PCI pools.
9  * \todo The wrappers here are so thin that they would be better off inlined..
10  *
11  * \author José Fonseca <jrfonseca@tungstengraphics.com>
12  * \author Leif Delgass <ldelgass@retinalburn.net>
13  */
14 
15 /*
16  * Copyright 2003 José Fonseca.
17  * Copyright 2003 Leif Delgass.
18  * All Rights Reserved.
19  *
20  * Permission is hereby granted, free of charge, to any person obtaining a
21  * copy of this software and associated documentation files (the "Software"),
22  * to deal in the Software without restriction, including without limitation
23  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
24  * and/or sell copies of the Software, and to permit persons to whom the
25  * Software is furnished to do so, subject to the following conditions:
26  *
27  * The above copyright notice and this permission notice (including the next
28  * paragraph) shall be included in all copies or substantial portions of the
29  * Software.
30  *
31  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
34  * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
35  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
36  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
37  */
38 
39 #include <sys/cdefs.h>
40 #include <dev/drm2/drmP.h>
41 
42 static int drm_msi = 1;	/* Enable by default. */
43 SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
44     "DRM device");
45 SYSCTL_INT(_hw_drm, OID_AUTO, msi, CTLFLAG_RDTUN, &drm_msi, 1,
46     "Enable MSI interrupts for drm devices");
47 
48 /**********************************************************************/
49 /** \name PCI memory */
50 /*@{*/
51 
52 static void
drm_pci_busdma_callback(void * arg,bus_dma_segment_t * segs,int nsegs,int error)53 drm_pci_busdma_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
54 {
55 	drm_dma_handle_t *dmah = arg;
56 
57 	if (error != 0)
58 		return;
59 
60 	KASSERT(nsegs == 1, ("drm_pci_busdma_callback: bad dma segment count"));
61 	dmah->busaddr = segs[0].ds_addr;
62 }
63 
64 /**
65  * \brief Allocate a PCI consistent memory block, for DMA.
66  */
drm_pci_alloc(struct drm_device * dev,size_t size,size_t align,dma_addr_t maxaddr)67 drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size,
68     size_t align, dma_addr_t maxaddr)
69 {
70 	drm_dma_handle_t *dmah;
71 	int ret;
72 
73 	/* Need power-of-two alignment, so fail the allocation if it isn't. */
74 	if ((align & (align - 1)) != 0) {
75 		DRM_ERROR("drm_pci_alloc with non-power-of-two alignment %d\n",
76 		    (int)align);
77 		return NULL;
78 	}
79 
80 	dmah = malloc(sizeof(drm_dma_handle_t), DRM_MEM_DMA, M_ZERO | M_NOWAIT);
81 	if (dmah == NULL)
82 		return NULL;
83 
84 	/* Make sure we aren't holding mutexes here */
85 	mtx_assert(&dev->dma_lock, MA_NOTOWNED);
86 	if (mtx_owned(&dev->dma_lock))
87 	    DRM_ERROR("called while holding dma_lock\n");
88 
89 	ret = bus_dma_tag_create(
90 	    bus_get_dma_tag(dev->dev), /* parent */
91 	    align, 0, /* align, boundary */
92 	    maxaddr, BUS_SPACE_MAXADDR, /* lowaddr, highaddr */
93 	    NULL, NULL, /* filtfunc, filtfuncargs */
94 	    size, 1, size, /* maxsize, nsegs, maxsegsize */
95 	    0, NULL, NULL, /* flags, lockfunc, lockfuncargs */
96 	    &dmah->tag);
97 	if (ret != 0) {
98 		free(dmah, DRM_MEM_DMA);
99 		return NULL;
100 	}
101 
102 	ret = bus_dmamem_alloc(dmah->tag, &dmah->vaddr,
103 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_NOCACHE, &dmah->map);
104 	if (ret != 0) {
105 		bus_dma_tag_destroy(dmah->tag);
106 		free(dmah, DRM_MEM_DMA);
107 		return NULL;
108 	}
109 
110 	ret = bus_dmamap_load(dmah->tag, dmah->map, dmah->vaddr, size,
111 	    drm_pci_busdma_callback, dmah, BUS_DMA_NOWAIT);
112 	if (ret != 0) {
113 		bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map);
114 		bus_dma_tag_destroy(dmah->tag);
115 		free(dmah, DRM_MEM_DMA);
116 		return NULL;
117 	}
118 
119 	return dmah;
120 }
121 
122 EXPORT_SYMBOL(drm_pci_alloc);
123 
124 /**
125  * \brief Free a PCI consistent memory block without freeing its descriptor.
126  *
127  * This function is for internal use in the Linux-specific DRM core code.
128  */
__drm_pci_free(struct drm_device * dev,drm_dma_handle_t * dmah)129 void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
130 {
131 	if (dmah == NULL)
132 		return;
133 
134 	bus_dmamap_unload(dmah->tag, dmah->map);
135 	bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map);
136 	bus_dma_tag_destroy(dmah->tag);
137 }
138 
139 /**
140  * \brief Free a PCI consistent memory block
141  */
drm_pci_free(struct drm_device * dev,drm_dma_handle_t * dmah)142 void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
143 {
144 	__drm_pci_free(dev, dmah);
145 	free(dmah, DRM_MEM_DMA);
146 }
147 
148 EXPORT_SYMBOL(drm_pci_free);
149 
drm_get_pci_domain(struct drm_device * dev)150 static int drm_get_pci_domain(struct drm_device *dev)
151 {
152 	return dev->pci_domain;
153 }
154 
drm_pci_get_irq(struct drm_device * dev)155 static int drm_pci_get_irq(struct drm_device *dev)
156 {
157 
158 	if (dev->irqr)
159 		return (dev->irq);
160 
161 	dev->irqr = bus_alloc_resource_any(dev->dev, SYS_RES_IRQ,
162 	    &dev->irqrid, RF_SHAREABLE);
163 	if (!dev->irqr) {
164 		dev_err(dev->dev, "Failed to allocate IRQ\n");
165 		return (0);
166 	}
167 
168 	dev->irq = (int) rman_get_start(dev->irqr);
169 
170 	return (dev->irq);
171 }
172 
drm_pci_free_irq(struct drm_device * dev)173 static void drm_pci_free_irq(struct drm_device *dev)
174 {
175 	if (dev->irqr == NULL)
176 		return;
177 
178 	bus_release_resource(dev->dev, SYS_RES_IRQ,
179 	    dev->irqrid, dev->irqr);
180 
181 	dev->irqr = NULL;
182 	dev->irq = 0;
183 }
184 
drm_pci_get_name(struct drm_device * dev)185 static const char *drm_pci_get_name(struct drm_device *dev)
186 {
187 	return dev->driver->name;
188 }
189 
drm_pci_set_busid(struct drm_device * dev,struct drm_master * master)190 int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
191 {
192 	int len, ret;
193 	master->unique_len = 40;
194 	master->unique_size = master->unique_len;
195 	master->unique = malloc(master->unique_size, DRM_MEM_DRIVER, M_NOWAIT);
196 	if (master->unique == NULL)
197 		return -ENOMEM;
198 
199 
200 	len = snprintf(master->unique, master->unique_len,
201 		       "pci:%04x:%02x:%02x.%d",
202 		       dev->pci_domain,
203 		       dev->pci_bus,
204 		       dev->pci_slot,
205 		       dev->pci_func);
206 
207 	if (len >= master->unique_len) {
208 		DRM_ERROR("buffer overflow");
209 		ret = -EINVAL;
210 		goto err;
211 	} else
212 		master->unique_len = len;
213 
214 	return 0;
215 err:
216 	return ret;
217 }
218 
drm_pci_set_unique(struct drm_device * dev,struct drm_master * master,struct drm_unique * u)219 int drm_pci_set_unique(struct drm_device *dev,
220 		       struct drm_master *master,
221 		       struct drm_unique *u)
222 {
223 	int domain, bus, slot, func, ret;
224 
225 	master->unique_len = u->unique_len;
226 	master->unique_size = u->unique_len + 1;
227 	master->unique = malloc(master->unique_size, DRM_MEM_DRIVER, M_WAITOK);
228 	if (!master->unique) {
229 		ret = -ENOMEM;
230 		goto err;
231 	}
232 
233 	if (copy_from_user(master->unique, u->unique, master->unique_len)) {
234 		ret = -EFAULT;
235 		goto err;
236 	}
237 
238 	master->unique[master->unique_len] = '\0';
239 
240 	/* Return error if the busid submitted doesn't match the device's actual
241 	 * busid.
242 	 */
243 	ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
244 	if (ret != 3) {
245 		ret = -EINVAL;
246 		goto err;
247 	}
248 
249 	domain = bus >> 8;
250 	bus &= 0xff;
251 
252 	if ((domain != dev->pci_domain) ||
253 	    (bus != dev->pci_bus) ||
254 	    (slot != dev->pci_slot) ||
255 	    (func != dev->pci_func)) {
256 		ret = -EINVAL;
257 		goto err;
258 	}
259 	return 0;
260 err:
261 	return ret;
262 }
263 
264 
drm_pci_irq_by_busid(struct drm_device * dev,struct drm_irq_busid * p)265 static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
266 {
267 	if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
268 	    (p->busnum & 0xff) != dev->pci_bus ||
269 	    p->devnum != dev->pci_slot || p->funcnum != dev->pci_func)
270 		return -EINVAL;
271 
272 	p->irq = dev->irq;
273 
274 	DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
275 		  p->irq);
276 	return 0;
277 }
278 
drm_pci_agp_init(struct drm_device * dev)279 int drm_pci_agp_init(struct drm_device *dev)
280 {
281 	if (drm_core_has_AGP(dev)) {
282 		if (drm_pci_device_is_agp(dev))
283 			dev->agp = drm_agp_init(dev);
284 		if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
285 		    && (dev->agp == NULL)) {
286 			DRM_ERROR("Cannot initialize the agpgart module.\n");
287 			return -EINVAL;
288 		}
289 		if (drm_core_has_MTRR(dev)) {
290 			if (dev->agp && dev->agp->agp_info.ai_aperture_base != 0) {
291 				if (drm_mtrr_add(dev->agp->agp_info.ai_aperture_base,
292 				    dev->agp->agp_info.ai_aperture_size, DRM_MTRR_WC) == 0)
293 					dev->agp->agp_mtrr = 1;
294 				else
295 					dev->agp->agp_mtrr = -1;
296 			}
297 		}
298 	}
299 	return 0;
300 }
301 
302 static struct drm_bus drm_pci_bus = {
303 	.bus_type = DRIVER_BUS_PCI,
304 	.get_irq = drm_pci_get_irq,
305 	.free_irq = drm_pci_free_irq,
306 	.get_name = drm_pci_get_name,
307 	.set_busid = drm_pci_set_busid,
308 	.set_unique = drm_pci_set_unique,
309 	.irq_by_busid = drm_pci_irq_by_busid,
310 	.agp_init = drm_pci_agp_init,
311 };
312 
313 /**
314  * Register.
315  *
316  * \param pdev - PCI device structure
317  * \param ent entry from the PCI ID table with device type flags
318  * \return zero on success or a negative number on failure.
319  *
320  * Attempt to gets inter module "drm" information. If we are first
321  * then register the character device and inter module information.
322  * Try and register, if we fail to register, backout previous work.
323  */
drm_get_pci_dev(device_t kdev,struct drm_device * dev,struct drm_driver * driver)324 int drm_get_pci_dev(device_t kdev, struct drm_device *dev,
325 		    struct drm_driver *driver)
326 {
327 	int ret;
328 
329 	DRM_DEBUG("\n");
330 
331 	driver->bus = &drm_pci_bus;
332 
333 	dev->dev = kdev;
334 
335 	dev->pci_domain = pci_get_domain(dev->dev);
336 	dev->pci_bus = pci_get_bus(dev->dev);
337 	dev->pci_slot = pci_get_slot(dev->dev);
338 	dev->pci_func = pci_get_function(dev->dev);
339 
340 	dev->pci_vendor = pci_get_vendor(dev->dev);
341 	dev->pci_device = pci_get_device(dev->dev);
342 	dev->pci_subvendor = pci_get_subvendor(dev->dev);
343 	dev->pci_subdevice = pci_get_subdevice(dev->dev);
344 
345 	sx_xlock(&drm_global_mutex);
346 
347 	if ((ret = drm_fill_in_dev(dev, driver))) {
348 		DRM_ERROR("Failed to fill in dev: %d\n", ret);
349 		goto err_g1;
350 	}
351 
352 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
353 		ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
354 		if (ret)
355 			goto err_g2;
356 	}
357 
358 	if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
359 		goto err_g3;
360 
361 	if (dev->driver->load) {
362 		ret = dev->driver->load(dev,
363 		    dev->id_entry->driver_private);
364 		if (ret)
365 			goto err_g4;
366 	}
367 
368 	/* setup the grouping for the legacy output */
369 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
370 		ret = drm_mode_group_init_legacy_group(dev,
371 						&dev->primary->mode_group);
372 		if (ret)
373 			goto err_g5;
374 	}
375 
376 #ifdef FREEBSD_NOTYET
377 	list_add_tail(&dev->driver_item, &driver->device_list);
378 #endif /* FREEBSD_NOTYET */
379 
380 	DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
381 		 driver->name, driver->major, driver->minor, driver->patchlevel,
382 		 driver->date, device_get_nameunit(dev->dev), dev->primary->index);
383 
384 	sx_xunlock(&drm_global_mutex);
385 	return 0;
386 
387 err_g5:
388 	if (dev->driver->unload)
389 		dev->driver->unload(dev);
390 err_g4:
391 	drm_put_minor(&dev->primary);
392 err_g3:
393 	if (drm_core_check_feature(dev, DRIVER_MODESET))
394 		drm_put_minor(&dev->control);
395 err_g2:
396 	drm_cancel_fill_in_dev(dev);
397 err_g1:
398 	sx_xunlock(&drm_global_mutex);
399 	return ret;
400 }
401 EXPORT_SYMBOL(drm_get_pci_dev);
402 
403 int
drm_pci_enable_msi(struct drm_device * dev)404 drm_pci_enable_msi(struct drm_device *dev)
405 {
406 	int msicount, ret;
407 
408 	if (!drm_msi)
409 		return (-ENOENT);
410 
411 	msicount = pci_msi_count(dev->dev);
412 	DRM_DEBUG("MSI count = %d\n", msicount);
413 	if (msicount > 1)
414 		msicount = 1;
415 
416 	ret = pci_alloc_msi(dev->dev, &msicount);
417 	if (ret == 0) {
418 		DRM_INFO("MSI enabled %d message(s)\n", msicount);
419 		dev->msi_enabled = 1;
420 		dev->irqrid = 1;
421 	}
422 
423 	return (-ret);
424 }
425 
426 void
drm_pci_disable_msi(struct drm_device * dev)427 drm_pci_disable_msi(struct drm_device *dev)
428 {
429 
430 	if (!dev->msi_enabled)
431 		return;
432 
433 	pci_release_msi(dev->dev);
434 	dev->msi_enabled = 0;
435 	dev->irqrid = 0;
436 }
437 
drm_pcie_get_speed_cap_mask(struct drm_device * dev,u32 * mask)438 int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
439 {
440 	device_t root;
441 	int pos;
442 	u32 lnkcap = 0, lnkcap2 = 0;
443 
444 	*mask = 0;
445 	if (!drm_pci_device_is_pcie(dev))
446 		return -EINVAL;
447 
448 	root =
449 	    device_get_parent( /* pcib             */
450 	    device_get_parent( /* `-- pci          */
451 	    device_get_parent( /*     `-- vgapci   */
452 	    dev->dev)));       /*         `-- drmn */
453 
454 	pos = 0;
455 	pci_find_cap(root, PCIY_EXPRESS, &pos);
456 	if (!pos)
457 		return -EINVAL;
458 
459 	/* we've been informed via and serverworks don't make the cut */
460 	if (pci_get_vendor(root) == PCI_VENDOR_ID_VIA ||
461 	    pci_get_vendor(root) == PCI_VENDOR_ID_SERVERWORKS)
462 		return -EINVAL;
463 
464 	lnkcap = pci_read_config(root, pos + PCIER_LINK_CAP, 4);
465 	lnkcap2 = pci_read_config(root, pos + PCIER_LINK_CAP2, 4);
466 
467 	lnkcap &= PCIEM_LINK_CAP_MAX_SPEED;
468 	lnkcap2 &= 0xfe;
469 
470 #define	PCI_EXP_LNKCAP2_SLS_2_5GB 0x02	/* Supported Link Speed 2.5GT/s */
471 #define	PCI_EXP_LNKCAP2_SLS_5_0GB 0x04	/* Supported Link Speed 5.0GT/s */
472 #define	PCI_EXP_LNKCAP2_SLS_8_0GB 0x08	/* Supported Link Speed 8.0GT/s */
473 
474 	if (lnkcap2) { /* PCIE GEN 3.0 */
475 		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
476 			*mask |= DRM_PCIE_SPEED_25;
477 		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
478 			*mask |= DRM_PCIE_SPEED_50;
479 		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
480 			*mask |= DRM_PCIE_SPEED_80;
481 	} else {
482 		if (lnkcap & 1)
483 			*mask |= DRM_PCIE_SPEED_25;
484 		if (lnkcap & 2)
485 			*mask |= DRM_PCIE_SPEED_50;
486 	}
487 
488 	DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", pci_get_vendor(root), pci_get_device(root), lnkcap, lnkcap2);
489 	return 0;
490 }
491 EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask);
492