xref: /linux/drivers/gpu/drm/radeon/radeon_kms.c (revision 09b1704f5b02c18dd02b21343530463fcfc92c54)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 #include <linux/pci.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/slab.h>
32 #include <linux/uaccess.h>
33 #include <linux/vga_switcheroo.h>
34 
35 #include <drm/drm_file.h>
36 #include <drm/drm_ioctl.h>
37 #include <drm/radeon_drm.h>
38 
39 #include "radeon.h"
40 #include "radeon_asic.h"
41 #include "radeon_drv.h"
42 #include "radeon_kms.h"
43 
44 #if defined(CONFIG_VGA_SWITCHEROO)
45 bool radeon_has_atpx(void);
46 #else
47 static inline bool radeon_has_atpx(void) { return false; }
48 #endif
49 
50 /**
51  * radeon_driver_unload_kms - Main unload function for KMS.
52  *
53  * @dev: drm dev pointer
54  *
55  * This is the main unload function for KMS (all asics).
56  * It calls radeon_modeset_fini() to tear down the
57  * displays, and radeon_device_fini() to tear down
58  * the rest of the device (CP, writeback, etc.).
59  * Returns 0 on success.
60  */
61 void radeon_driver_unload_kms(struct drm_device *dev)
62 {
63 	struct radeon_device *rdev = dev->dev_private;
64 
65 	if (rdev == NULL)
66 		return;
67 
68 	if (rdev->rmmio == NULL)
69 		goto done_free;
70 
71 	if (radeon_is_px(dev)) {
72 		pm_runtime_get_sync(dev->dev);
73 		pm_runtime_forbid(dev->dev);
74 	}
75 
76 	radeon_acpi_fini(rdev);
77 
78 	radeon_modeset_fini(rdev);
79 	radeon_device_fini(rdev);
80 
81 	if (rdev->agp)
82 		arch_phys_wc_del(rdev->agp->agp_mtrr);
83 	kfree(rdev->agp);
84 	rdev->agp = NULL;
85 
86 done_free:
87 	dev->dev_private = NULL;
88 }
89 
90 /**
91  * radeon_driver_load_kms - Main load function for KMS.
92  *
93  * @dev: drm dev pointer
94  * @flags: device flags
95  *
96  * This is the main load function for KMS (all asics).
97  * It calls radeon_device_init() to set up the non-display
98  * parts of the chip (asic init, CP, writeback, etc.), and
99  * radeon_modeset_init() to set up the display parts
100  * (crtcs, encoders, hotplug detect, etc.).
101  * Returns 0 on success, error on failure.
102  */
103 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
104 {
105 	struct pci_dev *pdev = to_pci_dev(dev->dev);
106 	struct radeon_device *rdev = dev->dev_private;
107 	int r, acpi_status;
108 
109 #ifdef __alpha__
110 	rdev->hose = pdev->sysdata;
111 #endif
112 
113 	if (pci_find_capability(pdev, PCI_CAP_ID_AGP))
114 		rdev->agp = radeon_agp_head_init(dev);
115 	if (rdev->agp) {
116 		rdev->agp->agp_mtrr = arch_phys_wc_add(
117 			rdev->agp->agp_info.aper_base,
118 			rdev->agp->agp_info.aper_size *
119 			1024 * 1024);
120 	}
121 
122 	/* update BUS flag */
123 	if (pci_find_capability(pdev, PCI_CAP_ID_AGP)) {
124 		flags |= RADEON_IS_AGP;
125 	} else if (pci_is_pcie(pdev)) {
126 		flags |= RADEON_IS_PCIE;
127 	} else {
128 		flags |= RADEON_IS_PCI;
129 	}
130 
131 	if ((radeon_runtime_pm != 0) &&
132 	    radeon_has_atpx() &&
133 	    ((flags & RADEON_IS_IGP) == 0) &&
134 	    !pci_is_thunderbolt_attached(pdev))
135 		flags |= RADEON_IS_PX;
136 
137 	/* radeon_device_init should report only fatal error
138 	 * like memory allocation failure or iomapping failure,
139 	 * or memory manager initialization failure, it must
140 	 * properly initialize the GPU MC controller and permit
141 	 * VRAM allocation
142 	 */
143 	r = radeon_device_init(rdev, dev, pdev, flags);
144 	if (r) {
145 		dev_err(dev->dev, "Fatal error during GPU init\n");
146 		goto out;
147 	}
148 
149 	/* Again modeset_init should fail only on fatal error
150 	 * otherwise it should provide enough functionalities
151 	 * for shadowfb to run
152 	 */
153 	r = radeon_modeset_init(rdev);
154 	if (r)
155 		dev_err(dev->dev, "Fatal error during modeset init\n");
156 
157 	/* Call ACPI methods: require modeset init
158 	 * but failure is not fatal
159 	 */
160 	if (!r) {
161 		acpi_status = radeon_acpi_init(rdev);
162 		if (acpi_status)
163 			dev_dbg(dev->dev, "Error during ACPI methods call\n");
164 	}
165 
166 	if (radeon_is_px(dev)) {
167 		dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
168 		pm_runtime_use_autosuspend(dev->dev);
169 		pm_runtime_set_autosuspend_delay(dev->dev, 5000);
170 		pm_runtime_set_active(dev->dev);
171 		pm_runtime_allow(dev->dev);
172 		pm_runtime_mark_last_busy(dev->dev);
173 		pm_runtime_put_autosuspend(dev->dev);
174 	}
175 
176 out:
177 	if (r)
178 		radeon_driver_unload_kms(dev);
179 
180 
181 	return r;
182 }
183 
184 /**
185  * radeon_set_filp_rights - Set filp right.
186  *
187  * @dev: drm dev pointer
188  * @owner: drm file
189  * @applier: drm file
190  * @value: value
191  *
192  * Sets the filp rights for the device (all asics).
193  */
194 static void radeon_set_filp_rights(struct drm_device *dev,
195 				   struct drm_file **owner,
196 				   struct drm_file *applier,
197 				   uint32_t *value)
198 {
199 	struct radeon_device *rdev = dev->dev_private;
200 
201 	mutex_lock(&rdev->gem.mutex);
202 	if (*value == 1) {
203 		/* wants rights */
204 		if (!*owner)
205 			*owner = applier;
206 	} else if (*value == 0) {
207 		/* revokes rights */
208 		if (*owner == applier)
209 			*owner = NULL;
210 	}
211 	*value = *owner == applier ? 1 : 0;
212 	mutex_unlock(&rdev->gem.mutex);
213 }
214 
215 /*
216  * Userspace get information ioctl
217  */
218 /**
219  * radeon_info_ioctl - answer a device specific request.
220  *
221  * @dev: drm device pointer
222  * @data: request object
223  * @filp: drm filp
224  *
225  * This function is used to pass device specific parameters to the userspace
226  * drivers.  Examples include: pci device id, pipeline parms, tiling params,
227  * etc. (all asics).
228  * Returns 0 on success, -EINVAL on failure.
229  */
230 int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
231 {
232 	struct radeon_device *rdev = dev->dev_private;
233 	struct drm_radeon_info *info = data;
234 	struct radeon_mode_info *minfo = &rdev->mode_info;
235 	uint32_t *value, value_tmp, *value_ptr, value_size;
236 	struct ttm_resource_manager *man;
237 	uint64_t value64;
238 	struct drm_crtc *crtc;
239 	int i, found;
240 
241 	value_ptr = (uint32_t *)((unsigned long)info->value);
242 	value = &value_tmp;
243 	value_size = sizeof(uint32_t);
244 
245 	switch (info->request) {
246 	case RADEON_INFO_DEVICE_ID:
247 		*value = to_pci_dev(dev->dev)->device;
248 		break;
249 	case RADEON_INFO_NUM_GB_PIPES:
250 		*value = rdev->num_gb_pipes;
251 		break;
252 	case RADEON_INFO_NUM_Z_PIPES:
253 		*value = rdev->num_z_pipes;
254 		break;
255 	case RADEON_INFO_ACCEL_WORKING:
256 		/* xf86-video-ati 6.13.0 relies on this being false for evergreen */
257 		if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK))
258 			*value = false;
259 		else
260 			*value = rdev->accel_working;
261 		break;
262 	case RADEON_INFO_CRTC_FROM_ID:
263 		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
264 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
265 			return -EFAULT;
266 		}
267 		for (i = 0, found = 0; i < rdev->num_crtc; i++) {
268 			crtc = (struct drm_crtc *)minfo->crtcs[i];
269 			if (crtc && crtc->base.id == *value) {
270 				struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
271 				*value = radeon_crtc->crtc_id;
272 				found = 1;
273 				break;
274 			}
275 		}
276 		if (!found) {
277 			DRM_DEBUG_KMS("unknown crtc id %d\n", *value);
278 			return -EINVAL;
279 		}
280 		break;
281 	case RADEON_INFO_ACCEL_WORKING2:
282 		if (rdev->family == CHIP_HAWAII) {
283 			if (rdev->accel_working) {
284 				if (rdev->new_fw)
285 					*value = 3;
286 				else
287 					*value = 2;
288 			} else {
289 				*value = 0;
290 			}
291 		} else {
292 			*value = rdev->accel_working;
293 		}
294 		break;
295 	case RADEON_INFO_TILING_CONFIG:
296 		if (rdev->family >= CHIP_BONAIRE)
297 			*value = rdev->config.cik.tile_config;
298 		else if (rdev->family >= CHIP_TAHITI)
299 			*value = rdev->config.si.tile_config;
300 		else if (rdev->family >= CHIP_CAYMAN)
301 			*value = rdev->config.cayman.tile_config;
302 		else if (rdev->family >= CHIP_CEDAR)
303 			*value = rdev->config.evergreen.tile_config;
304 		else if (rdev->family >= CHIP_RV770)
305 			*value = rdev->config.rv770.tile_config;
306 		else if (rdev->family >= CHIP_R600)
307 			*value = rdev->config.r600.tile_config;
308 		else {
309 			DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
310 			return -EINVAL;
311 		}
312 		break;
313 	case RADEON_INFO_WANT_HYPERZ:
314 		/* The "value" here is both an input and output parameter.
315 		 * If the input value is 1, filp requests hyper-z access.
316 		 * If the input value is 0, filp revokes its hyper-z access.
317 		 *
318 		 * When returning, the value is 1 if filp owns hyper-z access,
319 		 * 0 otherwise. */
320 		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
321 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
322 			return -EFAULT;
323 		}
324 		if (*value >= 2) {
325 			DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", *value);
326 			return -EINVAL;
327 		}
328 		radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, value);
329 		break;
330 	case RADEON_INFO_WANT_CMASK:
331 		/* The same logic as Hyper-Z. */
332 		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
333 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
334 			return -EFAULT;
335 		}
336 		if (*value >= 2) {
337 			DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", *value);
338 			return -EINVAL;
339 		}
340 		radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, value);
341 		break;
342 	case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
343 		/* return clock value in KHz */
344 		if (rdev->asic->get_xclk)
345 			*value = radeon_get_xclk(rdev) * 10;
346 		else
347 			*value = rdev->clock.spll.reference_freq * 10;
348 		break;
349 	case RADEON_INFO_NUM_BACKENDS:
350 		if (rdev->family >= CHIP_BONAIRE)
351 			*value = rdev->config.cik.max_backends_per_se *
352 				rdev->config.cik.max_shader_engines;
353 		else if (rdev->family >= CHIP_TAHITI)
354 			*value = rdev->config.si.max_backends_per_se *
355 				rdev->config.si.max_shader_engines;
356 		else if (rdev->family >= CHIP_CAYMAN)
357 			*value = rdev->config.cayman.max_backends_per_se *
358 				rdev->config.cayman.max_shader_engines;
359 		else if (rdev->family >= CHIP_CEDAR)
360 			*value = rdev->config.evergreen.max_backends;
361 		else if (rdev->family >= CHIP_RV770)
362 			*value = rdev->config.rv770.max_backends;
363 		else if (rdev->family >= CHIP_R600)
364 			*value = rdev->config.r600.max_backends;
365 		else {
366 			return -EINVAL;
367 		}
368 		break;
369 	case RADEON_INFO_NUM_TILE_PIPES:
370 		if (rdev->family >= CHIP_BONAIRE)
371 			*value = rdev->config.cik.max_tile_pipes;
372 		else if (rdev->family >= CHIP_TAHITI)
373 			*value = rdev->config.si.max_tile_pipes;
374 		else if (rdev->family >= CHIP_CAYMAN)
375 			*value = rdev->config.cayman.max_tile_pipes;
376 		else if (rdev->family >= CHIP_CEDAR)
377 			*value = rdev->config.evergreen.max_tile_pipes;
378 		else if (rdev->family >= CHIP_RV770)
379 			*value = rdev->config.rv770.max_tile_pipes;
380 		else if (rdev->family >= CHIP_R600)
381 			*value = rdev->config.r600.max_tile_pipes;
382 		else {
383 			return -EINVAL;
384 		}
385 		break;
386 	case RADEON_INFO_FUSION_GART_WORKING:
387 		*value = 1;
388 		break;
389 	case RADEON_INFO_BACKEND_MAP:
390 		if (rdev->family >= CHIP_BONAIRE)
391 			*value = rdev->config.cik.backend_map;
392 		else if (rdev->family >= CHIP_TAHITI)
393 			*value = rdev->config.si.backend_map;
394 		else if (rdev->family >= CHIP_CAYMAN)
395 			*value = rdev->config.cayman.backend_map;
396 		else if (rdev->family >= CHIP_CEDAR)
397 			*value = rdev->config.evergreen.backend_map;
398 		else if (rdev->family >= CHIP_RV770)
399 			*value = rdev->config.rv770.backend_map;
400 		else if (rdev->family >= CHIP_R600)
401 			*value = rdev->config.r600.backend_map;
402 		else {
403 			return -EINVAL;
404 		}
405 		break;
406 	case RADEON_INFO_VA_START:
407 		/* this is where we report if vm is supported or not */
408 		if (rdev->family < CHIP_CAYMAN)
409 			return -EINVAL;
410 		*value = RADEON_VA_RESERVED_SIZE;
411 		break;
412 	case RADEON_INFO_IB_VM_MAX_SIZE:
413 		/* this is where we report if vm is supported or not */
414 		if (rdev->family < CHIP_CAYMAN)
415 			return -EINVAL;
416 		*value = RADEON_IB_VM_MAX_SIZE;
417 		break;
418 	case RADEON_INFO_MAX_PIPES:
419 		if (rdev->family >= CHIP_BONAIRE)
420 			*value = rdev->config.cik.max_cu_per_sh;
421 		else if (rdev->family >= CHIP_TAHITI)
422 			*value = rdev->config.si.max_cu_per_sh;
423 		else if (rdev->family >= CHIP_CAYMAN)
424 			*value = rdev->config.cayman.max_pipes_per_simd;
425 		else if (rdev->family >= CHIP_CEDAR)
426 			*value = rdev->config.evergreen.max_pipes;
427 		else if (rdev->family >= CHIP_RV770)
428 			*value = rdev->config.rv770.max_pipes;
429 		else if (rdev->family >= CHIP_R600)
430 			*value = rdev->config.r600.max_pipes;
431 		else {
432 			return -EINVAL;
433 		}
434 		break;
435 	case RADEON_INFO_TIMESTAMP:
436 		if (rdev->family < CHIP_R600) {
437 			DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
438 			return -EINVAL;
439 		}
440 		value = (uint32_t *)&value64;
441 		value_size = sizeof(uint64_t);
442 		value64 = radeon_get_gpu_clock_counter(rdev);
443 		break;
444 	case RADEON_INFO_MAX_SE:
445 		if (rdev->family >= CHIP_BONAIRE)
446 			*value = rdev->config.cik.max_shader_engines;
447 		else if (rdev->family >= CHIP_TAHITI)
448 			*value = rdev->config.si.max_shader_engines;
449 		else if (rdev->family >= CHIP_CAYMAN)
450 			*value = rdev->config.cayman.max_shader_engines;
451 		else if (rdev->family >= CHIP_CEDAR)
452 			*value = rdev->config.evergreen.num_ses;
453 		else
454 			*value = 1;
455 		break;
456 	case RADEON_INFO_MAX_SH_PER_SE:
457 		if (rdev->family >= CHIP_BONAIRE)
458 			*value = rdev->config.cik.max_sh_per_se;
459 		else if (rdev->family >= CHIP_TAHITI)
460 			*value = rdev->config.si.max_sh_per_se;
461 		else
462 			return -EINVAL;
463 		break;
464 	case RADEON_INFO_FASTFB_WORKING:
465 		*value = rdev->fastfb_working;
466 		break;
467 	case RADEON_INFO_RING_WORKING:
468 		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
469 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
470 			return -EFAULT;
471 		}
472 		switch (*value) {
473 		case RADEON_CS_RING_GFX:
474 		case RADEON_CS_RING_COMPUTE:
475 			*value = rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready;
476 			break;
477 		case RADEON_CS_RING_DMA:
478 			*value = rdev->ring[R600_RING_TYPE_DMA_INDEX].ready;
479 			*value |= rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready;
480 			break;
481 		case RADEON_CS_RING_UVD:
482 			*value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready;
483 			break;
484 		case RADEON_CS_RING_VCE:
485 			*value = rdev->ring[TN_RING_TYPE_VCE1_INDEX].ready;
486 			break;
487 		default:
488 			return -EINVAL;
489 		}
490 		break;
491 	case RADEON_INFO_SI_TILE_MODE_ARRAY:
492 		if (rdev->family >= CHIP_BONAIRE) {
493 			value = rdev->config.cik.tile_mode_array;
494 			value_size = sizeof(uint32_t)*32;
495 		} else if (rdev->family >= CHIP_TAHITI) {
496 			value = rdev->config.si.tile_mode_array;
497 			value_size = sizeof(uint32_t)*32;
498 		} else {
499 			DRM_DEBUG_KMS("tile mode array is si+ only!\n");
500 			return -EINVAL;
501 		}
502 		break;
503 	case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY:
504 		if (rdev->family >= CHIP_BONAIRE) {
505 			value = rdev->config.cik.macrotile_mode_array;
506 			value_size = sizeof(uint32_t)*16;
507 		} else {
508 			DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n");
509 			return -EINVAL;
510 		}
511 		break;
512 	case RADEON_INFO_SI_CP_DMA_COMPUTE:
513 		*value = 1;
514 		break;
515 	case RADEON_INFO_SI_BACKEND_ENABLED_MASK:
516 		if (rdev->family >= CHIP_BONAIRE) {
517 			*value = rdev->config.cik.backend_enable_mask;
518 		} else if (rdev->family >= CHIP_TAHITI) {
519 			*value = rdev->config.si.backend_enable_mask;
520 		} else {
521 			DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
522 			return -EINVAL;
523 		}
524 		break;
525 	case RADEON_INFO_MAX_SCLK:
526 		if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
527 		    rdev->pm.dpm_enabled)
528 			*value = rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10;
529 		else
530 			*value = rdev->pm.default_sclk * 10;
531 		break;
532 	case RADEON_INFO_VCE_FW_VERSION:
533 		*value = rdev->vce.fw_version;
534 		break;
535 	case RADEON_INFO_VCE_FB_VERSION:
536 		*value = rdev->vce.fb_version;
537 		break;
538 	case RADEON_INFO_NUM_BYTES_MOVED:
539 		value = (uint32_t *)&value64;
540 		value_size = sizeof(uint64_t);
541 		value64 = atomic64_read(&rdev->num_bytes_moved);
542 		break;
543 	case RADEON_INFO_VRAM_USAGE:
544 		value = (uint32_t *)&value64;
545 		value_size = sizeof(uint64_t);
546 		man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
547 		value64 = ttm_resource_manager_usage(man);
548 		break;
549 	case RADEON_INFO_GTT_USAGE:
550 		value = (uint32_t *)&value64;
551 		value_size = sizeof(uint64_t);
552 		man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_TT);
553 		value64 = ttm_resource_manager_usage(man);
554 		break;
555 	case RADEON_INFO_ACTIVE_CU_COUNT:
556 		if (rdev->family >= CHIP_BONAIRE)
557 			*value = rdev->config.cik.active_cus;
558 		else if (rdev->family >= CHIP_TAHITI)
559 			*value = rdev->config.si.active_cus;
560 		else if (rdev->family >= CHIP_CAYMAN)
561 			*value = rdev->config.cayman.active_simds;
562 		else if (rdev->family >= CHIP_CEDAR)
563 			*value = rdev->config.evergreen.active_simds;
564 		else if (rdev->family >= CHIP_RV770)
565 			*value = rdev->config.rv770.active_simds;
566 		else if (rdev->family >= CHIP_R600)
567 			*value = rdev->config.r600.active_simds;
568 		else
569 			*value = 1;
570 		break;
571 	case RADEON_INFO_CURRENT_GPU_TEMP:
572 		/* get temperature in millidegrees C */
573 		if (rdev->asic->pm.get_temperature)
574 			*value = radeon_get_temperature(rdev);
575 		else
576 			*value = 0;
577 		break;
578 	case RADEON_INFO_CURRENT_GPU_SCLK:
579 		/* get sclk in Mhz */
580 		if (rdev->pm.dpm_enabled)
581 			*value = radeon_dpm_get_current_sclk(rdev) / 100;
582 		else
583 			*value = rdev->pm.current_sclk / 100;
584 		break;
585 	case RADEON_INFO_CURRENT_GPU_MCLK:
586 		/* get mclk in Mhz */
587 		if (rdev->pm.dpm_enabled)
588 			*value = radeon_dpm_get_current_mclk(rdev) / 100;
589 		else
590 			*value = rdev->pm.current_mclk / 100;
591 		break;
592 	case RADEON_INFO_READ_REG:
593 		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
594 			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
595 			return -EFAULT;
596 		}
597 		if (radeon_get_allowed_info_register(rdev, *value, value))
598 			return -EINVAL;
599 		break;
600 	case RADEON_INFO_VA_UNMAP_WORKING:
601 		*value = true;
602 		break;
603 	case RADEON_INFO_GPU_RESET_COUNTER:
604 		*value = atomic_read(&rdev->gpu_reset_counter);
605 		break;
606 	default:
607 		DRM_DEBUG_KMS("Invalid request %d\n", info->request);
608 		return -EINVAL;
609 	}
610 	if (copy_to_user(value_ptr, (char *)value, value_size)) {
611 		DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
612 		return -EFAULT;
613 	}
614 	return 0;
615 }
616 
617 /**
618  * radeon_driver_open_kms - drm callback for open
619  *
620  * @dev: drm dev pointer
621  * @file_priv: drm file
622  *
623  * On device open, init vm on cayman+ (all asics).
624  * Returns 0 on success, error on failure.
625  */
626 int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
627 {
628 	struct radeon_device *rdev = dev->dev_private;
629 	struct radeon_fpriv *fpriv;
630 	struct radeon_vm *vm;
631 	int r;
632 
633 	file_priv->driver_priv = NULL;
634 
635 	r = pm_runtime_get_sync(dev->dev);
636 	if (r < 0) {
637 		pm_runtime_put_autosuspend(dev->dev);
638 		return r;
639 	}
640 
641 	/* new gpu have virtual address space support */
642 	if (rdev->family >= CHIP_CAYMAN) {
643 
644 		fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
645 		if (unlikely(!fpriv)) {
646 			r = -ENOMEM;
647 			goto err_suspend;
648 		}
649 
650 		if (rdev->accel_working) {
651 			vm = &fpriv->vm;
652 			r = radeon_vm_init(rdev, vm);
653 			if (r)
654 				goto err_fpriv;
655 
656 			r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
657 			if (r)
658 				goto err_vm_fini;
659 
660 			/* map the ib pool buffer read only into
661 			 * virtual address space */
662 			vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
663 							rdev->ring_tmp_bo.bo);
664 			if (!vm->ib_bo_va) {
665 				r = -ENOMEM;
666 				goto err_vm_fini;
667 			}
668 
669 			r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
670 						  RADEON_VA_IB_OFFSET,
671 						  RADEON_VM_PAGE_READABLE |
672 						  RADEON_VM_PAGE_SNOOPED);
673 			if (r)
674 				goto err_vm_fini;
675 		}
676 		file_priv->driver_priv = fpriv;
677 	}
678 
679 	pm_runtime_mark_last_busy(dev->dev);
680 	pm_runtime_put_autosuspend(dev->dev);
681 	return 0;
682 
683 err_vm_fini:
684 	radeon_vm_fini(rdev, vm);
685 err_fpriv:
686 	kfree(fpriv);
687 
688 err_suspend:
689 	pm_runtime_mark_last_busy(dev->dev);
690 	pm_runtime_put_autosuspend(dev->dev);
691 	return r;
692 }
693 
694 /**
695  * radeon_driver_postclose_kms - drm callback for post close
696  *
697  * @dev: drm dev pointer
698  * @file_priv: drm file
699  *
700  * On device close, tear down hyperz and cmask filps on r1xx-r5xx
701  * (all asics).  And tear down vm on cayman+ (all asics).
702  */
703 void radeon_driver_postclose_kms(struct drm_device *dev,
704 				 struct drm_file *file_priv)
705 {
706 	struct radeon_device *rdev = dev->dev_private;
707 
708 	pm_runtime_get_sync(dev->dev);
709 
710 	mutex_lock(&rdev->gem.mutex);
711 	if (rdev->hyperz_filp == file_priv)
712 		rdev->hyperz_filp = NULL;
713 	if (rdev->cmask_filp == file_priv)
714 		rdev->cmask_filp = NULL;
715 	mutex_unlock(&rdev->gem.mutex);
716 
717 	radeon_uvd_free_handles(rdev, file_priv);
718 	radeon_vce_free_handles(rdev, file_priv);
719 
720 	/* new gpu have virtual address space support */
721 	if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
722 		struct radeon_fpriv *fpriv = file_priv->driver_priv;
723 		struct radeon_vm *vm = &fpriv->vm;
724 		int r;
725 
726 		if (rdev->accel_working) {
727 			r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
728 			if (!r) {
729 				if (vm->ib_bo_va)
730 					radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
731 				radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
732 			}
733 			radeon_vm_fini(rdev, vm);
734 		}
735 
736 		kfree(fpriv);
737 		file_priv->driver_priv = NULL;
738 	}
739 	pm_runtime_mark_last_busy(dev->dev);
740 	pm_runtime_put_autosuspend(dev->dev);
741 }
742 
743 /*
744  * VBlank related functions.
745  */
746 /**
747  * radeon_get_vblank_counter_kms - get frame count
748  *
749  * @crtc: crtc to get the frame count from
750  *
751  * Gets the frame count on the requested crtc (all asics).
752  * Returns frame count on success, -EINVAL on failure.
753  */
754 u32 radeon_get_vblank_counter_kms(struct drm_crtc *crtc)
755 {
756 	struct drm_device *dev = crtc->dev;
757 	unsigned int pipe = crtc->index;
758 	int vpos, hpos, stat;
759 	u32 count;
760 	struct radeon_device *rdev = dev->dev_private;
761 
762 	if (pipe >= rdev->num_crtc) {
763 		DRM_ERROR("Invalid crtc %u\n", pipe);
764 		return -EINVAL;
765 	}
766 
767 	/* The hw increments its frame counter at start of vsync, not at start
768 	 * of vblank, as is required by DRM core vblank counter handling.
769 	 * Cook the hw count here to make it appear to the caller as if it
770 	 * incremented at start of vblank. We measure distance to start of
771 	 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
772 	 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
773 	 * result by 1 to give the proper appearance to caller.
774 	 */
775 	if (rdev->mode_info.crtcs[pipe]) {
776 		/* Repeat readout if needed to provide stable result if
777 		 * we cross start of vsync during the queries.
778 		 */
779 		do {
780 			count = radeon_get_vblank_counter(rdev, pipe);
781 			/* Ask radeon_get_crtc_scanoutpos to return vpos as
782 			 * distance to start of vblank, instead of regular
783 			 * vertical scanout pos.
784 			 */
785 			stat = radeon_get_crtc_scanoutpos(
786 				dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
787 				&vpos, &hpos, NULL, NULL,
788 				&rdev->mode_info.crtcs[pipe]->base.hwmode);
789 		} while (count != radeon_get_vblank_counter(rdev, pipe));
790 
791 		if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
792 		    (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
793 			DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
794 		}
795 		else {
796 			DRM_DEBUG_VBL("crtc %u: dist from vblank start %d\n",
797 				      pipe, vpos);
798 
799 			/* Bump counter if we are at >= leading edge of vblank,
800 			 * but before vsync where vpos would turn negative and
801 			 * the hw counter really increments.
802 			 */
803 			if (vpos >= 0)
804 				count++;
805 		}
806 	}
807 	else {
808 	    /* Fallback to use value as is. */
809 	    count = radeon_get_vblank_counter(rdev, pipe);
810 	    DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
811 	}
812 
813 	return count;
814 }
815 
816 /**
817  * radeon_enable_vblank_kms - enable vblank interrupt
818  *
819  * @crtc: crtc to enable vblank interrupt for
820  *
821  * Enable the interrupt on the requested crtc (all asics).
822  * Returns 0 on success, -EINVAL on failure.
823  */
824 int radeon_enable_vblank_kms(struct drm_crtc *crtc)
825 {
826 	struct drm_device *dev = crtc->dev;
827 	unsigned int pipe = crtc->index;
828 	struct radeon_device *rdev = dev->dev_private;
829 	unsigned long irqflags;
830 	int r;
831 
832 	if (pipe >= rdev->num_crtc) {
833 		DRM_ERROR("Invalid crtc %d\n", pipe);
834 		return -EINVAL;
835 	}
836 
837 	spin_lock_irqsave(&rdev->irq.lock, irqflags);
838 	rdev->irq.crtc_vblank_int[pipe] = true;
839 	r = radeon_irq_set(rdev);
840 	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
841 	return r;
842 }
843 
844 /**
845  * radeon_disable_vblank_kms - disable vblank interrupt
846  *
847  * @crtc: crtc to disable vblank interrupt for
848  *
849  * Disable the interrupt on the requested crtc (all asics).
850  */
851 void radeon_disable_vblank_kms(struct drm_crtc *crtc)
852 {
853 	struct drm_device *dev = crtc->dev;
854 	unsigned int pipe = crtc->index;
855 	struct radeon_device *rdev = dev->dev_private;
856 	unsigned long irqflags;
857 
858 	if (pipe >= rdev->num_crtc) {
859 		DRM_ERROR("Invalid crtc %d\n", pipe);
860 		return;
861 	}
862 
863 	spin_lock_irqsave(&rdev->irq.lock, irqflags);
864 	rdev->irq.crtc_vblank_int[pipe] = false;
865 	radeon_irq_set(rdev);
866 	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
867 }
868