xref: /linux/drivers/gpu/drm/radeon/radeon_kms.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 #include <linux/pci.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/slab.h>
32 #include <linux/uaccess.h>
33 #include <linux/vga_switcheroo.h>
34 
35 #include <drm/drm_file.h>
36 #include <drm/drm_ioctl.h>
37 #include <drm/radeon_drm.h>
38 
39 #include "radeon.h"
40 #include "radeon_asic.h"
41 #include "radeon_drv.h"
42 #include "radeon_kms.h"
43 
44 #if defined(CONFIG_VGA_SWITCHEROO)
45 bool radeon_has_atpx(void);
46 #else
47 static inline bool radeon_has_atpx(void) { return false; }
48 #endif
49 
50 /**
51  * radeon_driver_unload_kms - Main unload function for KMS.
52  *
53  * @dev: drm dev pointer
54  *
55  * This is the main unload function for KMS (all asics).
56  * It calls radeon_modeset_fini() to tear down the
57  * displays, and radeon_device_fini() to tear down
58  * the rest of the device (CP, writeback, etc.).
59  * Returns 0 on success.
60  */
61 void radeon_driver_unload_kms(struct drm_device *dev)
62 {
63 	struct radeon_device *rdev = dev->dev_private;
64 
65 	if (rdev == NULL)
66 		return;
67 
68 	if (rdev->rmmio == NULL)
69 		goto done_free;
70 
71 	if (radeon_is_px(dev)) {
72 		pm_runtime_get_sync(dev->dev);
73 		pm_runtime_forbid(dev->dev);
74 	}
75 
76 	radeon_acpi_fini(rdev);
77 
78 	radeon_modeset_fini(rdev);
79 	radeon_device_fini(rdev);
80 
81 	if (rdev->agp)
82 		arch_phys_wc_del(rdev->agp->agp_mtrr);
83 	kfree(rdev->agp);
84 	rdev->agp = NULL;
85 
86 done_free:
87 	kfree(rdev);
88 	dev->dev_private = NULL;
89 }
90 
91 /**
92  * radeon_driver_load_kms - Main load function for KMS.
93  *
94  * @dev: drm dev pointer
95  * @flags: device flags
96  *
97  * This is the main load function for KMS (all asics).
98  * It calls radeon_device_init() to set up the non-display
99  * parts of the chip (asic init, CP, writeback, etc.), and
100  * radeon_modeset_init() to set up the display parts
101  * (crtcs, encoders, hotplug detect, etc.).
102  * Returns 0 on success, error on failure.
103  */
104 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
105 {
106 	struct pci_dev *pdev = to_pci_dev(dev->dev);
107 	struct radeon_device *rdev = dev->dev_private;
108 	int r, acpi_status;
109 
110 #ifdef __alpha__
111 	rdev->hose = pdev->sysdata;
112 #endif
113 
114 	if (pci_find_capability(pdev, PCI_CAP_ID_AGP))
115 		rdev->agp = radeon_agp_head_init(dev);
116 	if (rdev->agp) {
117 		rdev->agp->agp_mtrr = arch_phys_wc_add(
118 			rdev->agp->agp_info.aper_base,
119 			rdev->agp->agp_info.aper_size *
120 			1024 * 1024);
121 	}
122 
123 	/* update BUS flag */
124 	if (pci_find_capability(pdev, PCI_CAP_ID_AGP)) {
125 		flags |= RADEON_IS_AGP;
126 	} else if (pci_is_pcie(pdev)) {
127 		flags |= RADEON_IS_PCIE;
128 	} else {
129 		flags |= RADEON_IS_PCI;
130 	}
131 
132 	if ((radeon_runtime_pm != 0) &&
133 	    radeon_has_atpx() &&
134 	    ((flags & RADEON_IS_IGP) == 0) &&
135 	    !pci_is_thunderbolt_attached(pdev))
136 		flags |= RADEON_IS_PX;
137 
138 	/* radeon_device_init should report only fatal error
139 	 * like memory allocation failure or iomapping failure,
140 	 * or memory manager initialization failure, it must
141 	 * properly initialize the GPU MC controller and permit
142 	 * VRAM allocation
143 	 */
144 	r = radeon_device_init(rdev, dev, pdev, flags);
145 	if (r) {
146 		dev_err(dev->dev, "Fatal error during GPU init\n");
147 		goto out;
148 	}
149 
150 	/* Again modeset_init should fail only on fatal error
151 	 * otherwise it should provide enough functionalities
152 	 * for shadowfb to run
153 	 */
154 	r = radeon_modeset_init(rdev);
155 	if (r)
156 		dev_err(dev->dev, "Fatal error during modeset init\n");
157 
158 	/* Call ACPI methods: require modeset init
159 	 * but failure is not fatal
160 	 */
161 	if (!r) {
162 		acpi_status = radeon_acpi_init(rdev);
163 		if (acpi_status)
164 			dev_dbg(dev->dev, "Error during ACPI methods call\n");
165 	}
166 
167 	if (radeon_is_px(dev)) {
168 		dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
169 		pm_runtime_use_autosuspend(dev->dev);
170 		pm_runtime_set_autosuspend_delay(dev->dev, 5000);
171 		pm_runtime_set_active(dev->dev);
172 		pm_runtime_allow(dev->dev);
173 		pm_runtime_mark_last_busy(dev->dev);
174 		pm_runtime_put_autosuspend(dev->dev);
175 	}
176 
177 out:
178 	if (r)
179 		radeon_driver_unload_kms(dev);
180 
181 
182 	return r;
183 }
184 
185 /**
186  * radeon_set_filp_rights - Set filp right.
187  *
188  * @dev: drm dev pointer
189  * @owner: drm file
190  * @applier: drm file
191  * @value: value
192  *
193  * Sets the filp rights for the device (all asics).
194  */
195 static void radeon_set_filp_rights(struct drm_device *dev,
196 				   struct drm_file **owner,
197 				   struct drm_file *applier,
198 				   uint32_t *value)
199 {
200 	struct radeon_device *rdev = dev->dev_private;
201 
202 	mutex_lock(&rdev->gem.mutex);
203 	if (*value == 1) {
204 		/* wants rights */
205 		if (!*owner)
206 			*owner = applier;
207 	} else if (*value == 0) {
208 		/* revokes rights */
209 		if (*owner == applier)
210 			*owner = NULL;
211 	}
212 	*value = *owner == applier ? 1 : 0;
213 	mutex_unlock(&rdev->gem.mutex);
214 }
215 
216 /*
217  * Userspace get information ioctl
218  */
219 /**
220  * radeon_info_ioctl - answer a device specific request.
221  *
222  * @dev: drm device pointer
223  * @data: request object
224  * @filp: drm filp
225  *
226  * This function is used to pass device specific parameters to the userspace
227  * drivers.  Examples include: pci device id, pipeline parms, tiling params,
228  * etc. (all asics).
229  * Returns 0 on success, -EINVAL on failure.
230  */
231 int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
232 {
233 	struct radeon_device *rdev = dev->dev_private;
234 	struct drm_radeon_info *info = data;
235 	struct radeon_mode_info *minfo = &rdev->mode_info;
236 	uint32_t *value, value_tmp, *value_ptr, value_size;
237 	struct ttm_resource_manager *man;
238 	uint64_t value64;
239 	struct drm_crtc *crtc;
240 	int i, found;
241 
242 	value_ptr = (uint32_t *)((unsigned long)info->value);
243 	value = &value_tmp;
244 	value_size = sizeof(uint32_t);
245 
246 	switch (info->request) {
247 	case RADEON_INFO_DEVICE_ID:
248 		*value = to_pci_dev(dev->dev)->device;
249 		break;
250 	case RADEON_INFO_NUM_GB_PIPES:
251 		*value = rdev->num_gb_pipes;
252 		break;
253 	case RADEON_INFO_NUM_Z_PIPES:
254 		*value = rdev->num_z_pipes;
255 		break;
256 	case RADEON_INFO_ACCEL_WORKING:
257 		/* xf86-video-ati 6.13.0 relies on this being false for evergreen */
258 		if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK))
259 			*value = false;
260 		else
261 			*value = rdev->accel_working;
262 		break;
263 	case RADEON_INFO_CRTC_FROM_ID:
264 		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
265 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
266 			return -EFAULT;
267 		}
268 		for (i = 0, found = 0; i < rdev->num_crtc; i++) {
269 			crtc = (struct drm_crtc *)minfo->crtcs[i];
270 			if (crtc && crtc->base.id == *value) {
271 				struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
272 				*value = radeon_crtc->crtc_id;
273 				found = 1;
274 				break;
275 			}
276 		}
277 		if (!found) {
278 			DRM_DEBUG_KMS("unknown crtc id %d\n", *value);
279 			return -EINVAL;
280 		}
281 		break;
282 	case RADEON_INFO_ACCEL_WORKING2:
283 		if (rdev->family == CHIP_HAWAII) {
284 			if (rdev->accel_working) {
285 				if (rdev->new_fw)
286 					*value = 3;
287 				else
288 					*value = 2;
289 			} else {
290 				*value = 0;
291 			}
292 		} else {
293 			*value = rdev->accel_working;
294 		}
295 		break;
296 	case RADEON_INFO_TILING_CONFIG:
297 		if (rdev->family >= CHIP_BONAIRE)
298 			*value = rdev->config.cik.tile_config;
299 		else if (rdev->family >= CHIP_TAHITI)
300 			*value = rdev->config.si.tile_config;
301 		else if (rdev->family >= CHIP_CAYMAN)
302 			*value = rdev->config.cayman.tile_config;
303 		else if (rdev->family >= CHIP_CEDAR)
304 			*value = rdev->config.evergreen.tile_config;
305 		else if (rdev->family >= CHIP_RV770)
306 			*value = rdev->config.rv770.tile_config;
307 		else if (rdev->family >= CHIP_R600)
308 			*value = rdev->config.r600.tile_config;
309 		else {
310 			DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
311 			return -EINVAL;
312 		}
313 		break;
314 	case RADEON_INFO_WANT_HYPERZ:
315 		/* The "value" here is both an input and output parameter.
316 		 * If the input value is 1, filp requests hyper-z access.
317 		 * If the input value is 0, filp revokes its hyper-z access.
318 		 *
319 		 * When returning, the value is 1 if filp owns hyper-z access,
320 		 * 0 otherwise. */
321 		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
322 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
323 			return -EFAULT;
324 		}
325 		if (*value >= 2) {
326 			DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", *value);
327 			return -EINVAL;
328 		}
329 		radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, value);
330 		break;
331 	case RADEON_INFO_WANT_CMASK:
332 		/* The same logic as Hyper-Z. */
333 		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
334 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
335 			return -EFAULT;
336 		}
337 		if (*value >= 2) {
338 			DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", *value);
339 			return -EINVAL;
340 		}
341 		radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, value);
342 		break;
343 	case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
344 		/* return clock value in KHz */
345 		if (rdev->asic->get_xclk)
346 			*value = radeon_get_xclk(rdev) * 10;
347 		else
348 			*value = rdev->clock.spll.reference_freq * 10;
349 		break;
350 	case RADEON_INFO_NUM_BACKENDS:
351 		if (rdev->family >= CHIP_BONAIRE)
352 			*value = rdev->config.cik.max_backends_per_se *
353 				rdev->config.cik.max_shader_engines;
354 		else if (rdev->family >= CHIP_TAHITI)
355 			*value = rdev->config.si.max_backends_per_se *
356 				rdev->config.si.max_shader_engines;
357 		else if (rdev->family >= CHIP_CAYMAN)
358 			*value = rdev->config.cayman.max_backends_per_se *
359 				rdev->config.cayman.max_shader_engines;
360 		else if (rdev->family >= CHIP_CEDAR)
361 			*value = rdev->config.evergreen.max_backends;
362 		else if (rdev->family >= CHIP_RV770)
363 			*value = rdev->config.rv770.max_backends;
364 		else if (rdev->family >= CHIP_R600)
365 			*value = rdev->config.r600.max_backends;
366 		else {
367 			return -EINVAL;
368 		}
369 		break;
370 	case RADEON_INFO_NUM_TILE_PIPES:
371 		if (rdev->family >= CHIP_BONAIRE)
372 			*value = rdev->config.cik.max_tile_pipes;
373 		else if (rdev->family >= CHIP_TAHITI)
374 			*value = rdev->config.si.max_tile_pipes;
375 		else if (rdev->family >= CHIP_CAYMAN)
376 			*value = rdev->config.cayman.max_tile_pipes;
377 		else if (rdev->family >= CHIP_CEDAR)
378 			*value = rdev->config.evergreen.max_tile_pipes;
379 		else if (rdev->family >= CHIP_RV770)
380 			*value = rdev->config.rv770.max_tile_pipes;
381 		else if (rdev->family >= CHIP_R600)
382 			*value = rdev->config.r600.max_tile_pipes;
383 		else {
384 			return -EINVAL;
385 		}
386 		break;
387 	case RADEON_INFO_FUSION_GART_WORKING:
388 		*value = 1;
389 		break;
390 	case RADEON_INFO_BACKEND_MAP:
391 		if (rdev->family >= CHIP_BONAIRE)
392 			*value = rdev->config.cik.backend_map;
393 		else if (rdev->family >= CHIP_TAHITI)
394 			*value = rdev->config.si.backend_map;
395 		else if (rdev->family >= CHIP_CAYMAN)
396 			*value = rdev->config.cayman.backend_map;
397 		else if (rdev->family >= CHIP_CEDAR)
398 			*value = rdev->config.evergreen.backend_map;
399 		else if (rdev->family >= CHIP_RV770)
400 			*value = rdev->config.rv770.backend_map;
401 		else if (rdev->family >= CHIP_R600)
402 			*value = rdev->config.r600.backend_map;
403 		else {
404 			return -EINVAL;
405 		}
406 		break;
407 	case RADEON_INFO_VA_START:
408 		/* this is where we report if vm is supported or not */
409 		if (rdev->family < CHIP_CAYMAN)
410 			return -EINVAL;
411 		*value = RADEON_VA_RESERVED_SIZE;
412 		break;
413 	case RADEON_INFO_IB_VM_MAX_SIZE:
414 		/* this is where we report if vm is supported or not */
415 		if (rdev->family < CHIP_CAYMAN)
416 			return -EINVAL;
417 		*value = RADEON_IB_VM_MAX_SIZE;
418 		break;
419 	case RADEON_INFO_MAX_PIPES:
420 		if (rdev->family >= CHIP_BONAIRE)
421 			*value = rdev->config.cik.max_cu_per_sh;
422 		else if (rdev->family >= CHIP_TAHITI)
423 			*value = rdev->config.si.max_cu_per_sh;
424 		else if (rdev->family >= CHIP_CAYMAN)
425 			*value = rdev->config.cayman.max_pipes_per_simd;
426 		else if (rdev->family >= CHIP_CEDAR)
427 			*value = rdev->config.evergreen.max_pipes;
428 		else if (rdev->family >= CHIP_RV770)
429 			*value = rdev->config.rv770.max_pipes;
430 		else if (rdev->family >= CHIP_R600)
431 			*value = rdev->config.r600.max_pipes;
432 		else {
433 			return -EINVAL;
434 		}
435 		break;
436 	case RADEON_INFO_TIMESTAMP:
437 		if (rdev->family < CHIP_R600) {
438 			DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
439 			return -EINVAL;
440 		}
441 		value = (uint32_t *)&value64;
442 		value_size = sizeof(uint64_t);
443 		value64 = radeon_get_gpu_clock_counter(rdev);
444 		break;
445 	case RADEON_INFO_MAX_SE:
446 		if (rdev->family >= CHIP_BONAIRE)
447 			*value = rdev->config.cik.max_shader_engines;
448 		else if (rdev->family >= CHIP_TAHITI)
449 			*value = rdev->config.si.max_shader_engines;
450 		else if (rdev->family >= CHIP_CAYMAN)
451 			*value = rdev->config.cayman.max_shader_engines;
452 		else if (rdev->family >= CHIP_CEDAR)
453 			*value = rdev->config.evergreen.num_ses;
454 		else
455 			*value = 1;
456 		break;
457 	case RADEON_INFO_MAX_SH_PER_SE:
458 		if (rdev->family >= CHIP_BONAIRE)
459 			*value = rdev->config.cik.max_sh_per_se;
460 		else if (rdev->family >= CHIP_TAHITI)
461 			*value = rdev->config.si.max_sh_per_se;
462 		else
463 			return -EINVAL;
464 		break;
465 	case RADEON_INFO_FASTFB_WORKING:
466 		*value = rdev->fastfb_working;
467 		break;
468 	case RADEON_INFO_RING_WORKING:
469 		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
470 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
471 			return -EFAULT;
472 		}
473 		switch (*value) {
474 		case RADEON_CS_RING_GFX:
475 		case RADEON_CS_RING_COMPUTE:
476 			*value = rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready;
477 			break;
478 		case RADEON_CS_RING_DMA:
479 			*value = rdev->ring[R600_RING_TYPE_DMA_INDEX].ready;
480 			*value |= rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready;
481 			break;
482 		case RADEON_CS_RING_UVD:
483 			*value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready;
484 			break;
485 		case RADEON_CS_RING_VCE:
486 			*value = rdev->ring[TN_RING_TYPE_VCE1_INDEX].ready;
487 			break;
488 		default:
489 			return -EINVAL;
490 		}
491 		break;
492 	case RADEON_INFO_SI_TILE_MODE_ARRAY:
493 		if (rdev->family >= CHIP_BONAIRE) {
494 			value = rdev->config.cik.tile_mode_array;
495 			value_size = sizeof(uint32_t)*32;
496 		} else if (rdev->family >= CHIP_TAHITI) {
497 			value = rdev->config.si.tile_mode_array;
498 			value_size = sizeof(uint32_t)*32;
499 		} else {
500 			DRM_DEBUG_KMS("tile mode array is si+ only!\n");
501 			return -EINVAL;
502 		}
503 		break;
504 	case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY:
505 		if (rdev->family >= CHIP_BONAIRE) {
506 			value = rdev->config.cik.macrotile_mode_array;
507 			value_size = sizeof(uint32_t)*16;
508 		} else {
509 			DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n");
510 			return -EINVAL;
511 		}
512 		break;
513 	case RADEON_INFO_SI_CP_DMA_COMPUTE:
514 		*value = 1;
515 		break;
516 	case RADEON_INFO_SI_BACKEND_ENABLED_MASK:
517 		if (rdev->family >= CHIP_BONAIRE) {
518 			*value = rdev->config.cik.backend_enable_mask;
519 		} else if (rdev->family >= CHIP_TAHITI) {
520 			*value = rdev->config.si.backend_enable_mask;
521 		} else {
522 			DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
523 			return -EINVAL;
524 		}
525 		break;
526 	case RADEON_INFO_MAX_SCLK:
527 		if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
528 		    rdev->pm.dpm_enabled)
529 			*value = rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10;
530 		else
531 			*value = rdev->pm.default_sclk * 10;
532 		break;
533 	case RADEON_INFO_VCE_FW_VERSION:
534 		*value = rdev->vce.fw_version;
535 		break;
536 	case RADEON_INFO_VCE_FB_VERSION:
537 		*value = rdev->vce.fb_version;
538 		break;
539 	case RADEON_INFO_NUM_BYTES_MOVED:
540 		value = (uint32_t *)&value64;
541 		value_size = sizeof(uint64_t);
542 		value64 = atomic64_read(&rdev->num_bytes_moved);
543 		break;
544 	case RADEON_INFO_VRAM_USAGE:
545 		value = (uint32_t *)&value64;
546 		value_size = sizeof(uint64_t);
547 		man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
548 		value64 = ttm_resource_manager_usage(man);
549 		break;
550 	case RADEON_INFO_GTT_USAGE:
551 		value = (uint32_t *)&value64;
552 		value_size = sizeof(uint64_t);
553 		man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_TT);
554 		value64 = ttm_resource_manager_usage(man);
555 		break;
556 	case RADEON_INFO_ACTIVE_CU_COUNT:
557 		if (rdev->family >= CHIP_BONAIRE)
558 			*value = rdev->config.cik.active_cus;
559 		else if (rdev->family >= CHIP_TAHITI)
560 			*value = rdev->config.si.active_cus;
561 		else if (rdev->family >= CHIP_CAYMAN)
562 			*value = rdev->config.cayman.active_simds;
563 		else if (rdev->family >= CHIP_CEDAR)
564 			*value = rdev->config.evergreen.active_simds;
565 		else if (rdev->family >= CHIP_RV770)
566 			*value = rdev->config.rv770.active_simds;
567 		else if (rdev->family >= CHIP_R600)
568 			*value = rdev->config.r600.active_simds;
569 		else
570 			*value = 1;
571 		break;
572 	case RADEON_INFO_CURRENT_GPU_TEMP:
573 		/* get temperature in millidegrees C */
574 		if (rdev->asic->pm.get_temperature)
575 			*value = radeon_get_temperature(rdev);
576 		else
577 			*value = 0;
578 		break;
579 	case RADEON_INFO_CURRENT_GPU_SCLK:
580 		/* get sclk in Mhz */
581 		if (rdev->pm.dpm_enabled)
582 			*value = radeon_dpm_get_current_sclk(rdev) / 100;
583 		else
584 			*value = rdev->pm.current_sclk / 100;
585 		break;
586 	case RADEON_INFO_CURRENT_GPU_MCLK:
587 		/* get mclk in Mhz */
588 		if (rdev->pm.dpm_enabled)
589 			*value = radeon_dpm_get_current_mclk(rdev) / 100;
590 		else
591 			*value = rdev->pm.current_mclk / 100;
592 		break;
593 	case RADEON_INFO_READ_REG:
594 		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
595 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
596 			return -EFAULT;
597 		}
598 		if (radeon_get_allowed_info_register(rdev, *value, value))
599 			return -EINVAL;
600 		break;
601 	case RADEON_INFO_VA_UNMAP_WORKING:
602 		*value = true;
603 		break;
604 	case RADEON_INFO_GPU_RESET_COUNTER:
605 		*value = atomic_read(&rdev->gpu_reset_counter);
606 		break;
607 	default:
608 		DRM_DEBUG_KMS("Invalid request %d\n", info->request);
609 		return -EINVAL;
610 	}
611 	if (copy_to_user(value_ptr, (char *)value, value_size)) {
612 		DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
613 		return -EFAULT;
614 	}
615 	return 0;
616 }
617 
618 /**
619  * radeon_driver_open_kms - drm callback for open
620  *
621  * @dev: drm dev pointer
622  * @file_priv: drm file
623  *
624  * On device open, init vm on cayman+ (all asics).
625  * Returns 0 on success, error on failure.
626  */
627 int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
628 {
629 	struct radeon_device *rdev = dev->dev_private;
630 	struct radeon_fpriv *fpriv;
631 	struct radeon_vm *vm;
632 	int r;
633 
634 	file_priv->driver_priv = NULL;
635 
636 	r = pm_runtime_get_sync(dev->dev);
637 	if (r < 0) {
638 		pm_runtime_put_autosuspend(dev->dev);
639 		return r;
640 	}
641 
642 	/* new gpu have virtual address space support */
643 	if (rdev->family >= CHIP_CAYMAN) {
644 
645 		fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
646 		if (unlikely(!fpriv)) {
647 			r = -ENOMEM;
648 			goto err_suspend;
649 		}
650 
651 		if (rdev->accel_working) {
652 			vm = &fpriv->vm;
653 			r = radeon_vm_init(rdev, vm);
654 			if (r)
655 				goto err_fpriv;
656 
657 			r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
658 			if (r)
659 				goto err_vm_fini;
660 
661 			/* map the ib pool buffer read only into
662 			 * virtual address space */
663 			vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
664 							rdev->ring_tmp_bo.bo);
665 			if (!vm->ib_bo_va) {
666 				r = -ENOMEM;
667 				goto err_vm_fini;
668 			}
669 
670 			r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
671 						  RADEON_VA_IB_OFFSET,
672 						  RADEON_VM_PAGE_READABLE |
673 						  RADEON_VM_PAGE_SNOOPED);
674 			if (r)
675 				goto err_vm_fini;
676 		}
677 		file_priv->driver_priv = fpriv;
678 	}
679 
680 	pm_runtime_mark_last_busy(dev->dev);
681 	pm_runtime_put_autosuspend(dev->dev);
682 	return 0;
683 
684 err_vm_fini:
685 	radeon_vm_fini(rdev, vm);
686 err_fpriv:
687 	kfree(fpriv);
688 
689 err_suspend:
690 	pm_runtime_mark_last_busy(dev->dev);
691 	pm_runtime_put_autosuspend(dev->dev);
692 	return r;
693 }
694 
695 /**
696  * radeon_driver_postclose_kms - drm callback for post close
697  *
698  * @dev: drm dev pointer
699  * @file_priv: drm file
700  *
701  * On device close, tear down hyperz and cmask filps on r1xx-r5xx
702  * (all asics).  And tear down vm on cayman+ (all asics).
703  */
704 void radeon_driver_postclose_kms(struct drm_device *dev,
705 				 struct drm_file *file_priv)
706 {
707 	struct radeon_device *rdev = dev->dev_private;
708 
709 	pm_runtime_get_sync(dev->dev);
710 
711 	mutex_lock(&rdev->gem.mutex);
712 	if (rdev->hyperz_filp == file_priv)
713 		rdev->hyperz_filp = NULL;
714 	if (rdev->cmask_filp == file_priv)
715 		rdev->cmask_filp = NULL;
716 	mutex_unlock(&rdev->gem.mutex);
717 
718 	radeon_uvd_free_handles(rdev, file_priv);
719 	radeon_vce_free_handles(rdev, file_priv);
720 
721 	/* new gpu have virtual address space support */
722 	if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
723 		struct radeon_fpriv *fpriv = file_priv->driver_priv;
724 		struct radeon_vm *vm = &fpriv->vm;
725 		int r;
726 
727 		if (rdev->accel_working) {
728 			r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
729 			if (!r) {
730 				if (vm->ib_bo_va)
731 					radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
732 				radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
733 			}
734 			radeon_vm_fini(rdev, vm);
735 		}
736 
737 		kfree(fpriv);
738 		file_priv->driver_priv = NULL;
739 	}
740 	pm_runtime_mark_last_busy(dev->dev);
741 	pm_runtime_put_autosuspend(dev->dev);
742 }
743 
744 /*
745  * VBlank related functions.
746  */
747 /**
748  * radeon_get_vblank_counter_kms - get frame count
749  *
750  * @crtc: crtc to get the frame count from
751  *
752  * Gets the frame count on the requested crtc (all asics).
753  * Returns frame count on success, -EINVAL on failure.
754  */
755 u32 radeon_get_vblank_counter_kms(struct drm_crtc *crtc)
756 {
757 	struct drm_device *dev = crtc->dev;
758 	unsigned int pipe = crtc->index;
759 	int vpos, hpos, stat;
760 	u32 count;
761 	struct radeon_device *rdev = dev->dev_private;
762 
763 	if (pipe >= rdev->num_crtc) {
764 		DRM_ERROR("Invalid crtc %u\n", pipe);
765 		return -EINVAL;
766 	}
767 
768 	/* The hw increments its frame counter at start of vsync, not at start
769 	 * of vblank, as is required by DRM core vblank counter handling.
770 	 * Cook the hw count here to make it appear to the caller as if it
771 	 * incremented at start of vblank. We measure distance to start of
772 	 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
773 	 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
774 	 * result by 1 to give the proper appearance to caller.
775 	 */
776 	if (rdev->mode_info.crtcs[pipe]) {
777 		/* Repeat readout if needed to provide stable result if
778 		 * we cross start of vsync during the queries.
779 		 */
780 		do {
781 			count = radeon_get_vblank_counter(rdev, pipe);
782 			/* Ask radeon_get_crtc_scanoutpos to return vpos as
783 			 * distance to start of vblank, instead of regular
784 			 * vertical scanout pos.
785 			 */
786 			stat = radeon_get_crtc_scanoutpos(
787 				dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
788 				&vpos, &hpos, NULL, NULL,
789 				&rdev->mode_info.crtcs[pipe]->base.hwmode);
790 		} while (count != radeon_get_vblank_counter(rdev, pipe));
791 
792 		if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
793 		    (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
794 			DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
795 		}
796 		else {
797 			DRM_DEBUG_VBL("crtc %u: dist from vblank start %d\n",
798 				      pipe, vpos);
799 
800 			/* Bump counter if we are at >= leading edge of vblank,
801 			 * but before vsync where vpos would turn negative and
802 			 * the hw counter really increments.
803 			 */
804 			if (vpos >= 0)
805 				count++;
806 		}
807 	}
808 	else {
809 	    /* Fallback to use value as is. */
810 	    count = radeon_get_vblank_counter(rdev, pipe);
811 	    DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
812 	}
813 
814 	return count;
815 }
816 
817 /**
818  * radeon_enable_vblank_kms - enable vblank interrupt
819  *
820  * @crtc: crtc to enable vblank interrupt for
821  *
822  * Enable the interrupt on the requested crtc (all asics).
823  * Returns 0 on success, -EINVAL on failure.
824  */
825 int radeon_enable_vblank_kms(struct drm_crtc *crtc)
826 {
827 	struct drm_device *dev = crtc->dev;
828 	unsigned int pipe = crtc->index;
829 	struct radeon_device *rdev = dev->dev_private;
830 	unsigned long irqflags;
831 	int r;
832 
833 	if (pipe >= rdev->num_crtc) {
834 		DRM_ERROR("Invalid crtc %d\n", pipe);
835 		return -EINVAL;
836 	}
837 
838 	spin_lock_irqsave(&rdev->irq.lock, irqflags);
839 	rdev->irq.crtc_vblank_int[pipe] = true;
840 	r = radeon_irq_set(rdev);
841 	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
842 	return r;
843 }
844 
845 /**
846  * radeon_disable_vblank_kms - disable vblank interrupt
847  *
848  * @crtc: crtc to disable vblank interrupt for
849  *
850  * Disable the interrupt on the requested crtc (all asics).
851  */
852 void radeon_disable_vblank_kms(struct drm_crtc *crtc)
853 {
854 	struct drm_device *dev = crtc->dev;
855 	unsigned int pipe = crtc->index;
856 	struct radeon_device *rdev = dev->dev_private;
857 	unsigned long irqflags;
858 
859 	if (pipe >= rdev->num_crtc) {
860 		DRM_ERROR("Invalid crtc %d\n", pipe);
861 		return;
862 	}
863 
864 	spin_lock_irqsave(&rdev->irq.lock, irqflags);
865 	rdev->irq.crtc_vblank_int[pipe] = false;
866 	radeon_irq_set(rdev);
867 	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
868 }
869