xref: /linux/drivers/gpu/drm/radeon/radeon_device.c (revision 6000fc4d6f3e55ad52cce8d76317187fe01af2aa)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/console.h>
29 #include <drm/drmP.h>
30 #include <drm/drm_crtc_helper.h>
31 #include <drm/radeon_drm.h>
32 #include "radeon_reg.h"
33 #include "radeon.h"
34 #include "radeon_asic.h"
35 #include "atom.h"
36 
37 /*
38  * Clear GPU surface registers.
39  */
40 static void radeon_surface_init(struct radeon_device *rdev)
41 {
42 	/* FIXME: check this out */
43 	if (rdev->family < CHIP_R600) {
44 		int i;
45 
46 		for (i = 0; i < 8; i++) {
47 			WREG32(RADEON_SURFACE0_INFO +
48 			       i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO),
49 			       0);
50 		}
51 		/* enable surfaces */
52 		WREG32(RADEON_SURFACE_CNTL, 0);
53 	}
54 }
55 
56 /*
57  * GPU scratch registers helpers function.
58  */
59 static void radeon_scratch_init(struct radeon_device *rdev)
60 {
61 	int i;
62 
63 	/* FIXME: check this out */
64 	if (rdev->family < CHIP_R300) {
65 		rdev->scratch.num_reg = 5;
66 	} else {
67 		rdev->scratch.num_reg = 7;
68 	}
69 	for (i = 0; i < rdev->scratch.num_reg; i++) {
70 		rdev->scratch.free[i] = true;
71 		rdev->scratch.reg[i] = RADEON_SCRATCH_REG0 + (i * 4);
72 	}
73 }
74 
75 int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
76 {
77 	int i;
78 
79 	for (i = 0; i < rdev->scratch.num_reg; i++) {
80 		if (rdev->scratch.free[i]) {
81 			rdev->scratch.free[i] = false;
82 			*reg = rdev->scratch.reg[i];
83 			return 0;
84 		}
85 	}
86 	return -EINVAL;
87 }
88 
89 void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
90 {
91 	int i;
92 
93 	for (i = 0; i < rdev->scratch.num_reg; i++) {
94 		if (rdev->scratch.reg[i] == reg) {
95 			rdev->scratch.free[i] = true;
96 			return;
97 		}
98 	}
99 }
100 
101 /*
102  * MC common functions
103  */
104 int radeon_mc_setup(struct radeon_device *rdev)
105 {
106 	uint32_t tmp;
107 
108 	/* Some chips have an "issue" with the memory controller, the
109 	 * location must be aligned to the size. We just align it down,
110 	 * too bad if we walk over the top of system memory, we don't
111 	 * use DMA without a remapped anyway.
112 	 * Affected chips are rv280, all r3xx, and all r4xx, but not IGP
113 	 */
114 	/* FGLRX seems to setup like this, VRAM a 0, then GART.
115 	 */
116 	/*
117 	 * Note: from R6xx the address space is 40bits but here we only
118 	 * use 32bits (still have to see a card which would exhaust 4G
119 	 * address space).
120 	 */
121 	if (rdev->mc.vram_location != 0xFFFFFFFFUL) {
122 		/* vram location was already setup try to put gtt after
123 		 * if it fits */
124 		tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
125 		tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
126 		if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
127 			rdev->mc.gtt_location = tmp;
128 		} else {
129 			if (rdev->mc.gtt_size >= rdev->mc.vram_location) {
130 				printk(KERN_ERR "[drm] GTT too big to fit "
131 				       "before or after vram location.\n");
132 				return -EINVAL;
133 			}
134 			rdev->mc.gtt_location = 0;
135 		}
136 	} else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) {
137 		/* gtt location was already setup try to put vram before
138 		 * if it fits */
139 		if (rdev->mc.mc_vram_size < rdev->mc.gtt_location) {
140 			rdev->mc.vram_location = 0;
141 		} else {
142 			tmp = rdev->mc.gtt_location + rdev->mc.gtt_size;
143 			tmp += (rdev->mc.mc_vram_size - 1);
144 			tmp &= ~(rdev->mc.mc_vram_size - 1);
145 			if ((0xFFFFFFFFUL - tmp) >= rdev->mc.mc_vram_size) {
146 				rdev->mc.vram_location = tmp;
147 			} else {
148 				printk(KERN_ERR "[drm] vram too big to fit "
149 				       "before or after GTT location.\n");
150 				return -EINVAL;
151 			}
152 		}
153 	} else {
154 		rdev->mc.vram_location = 0;
155 		tmp = rdev->mc.mc_vram_size;
156 		tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
157 		rdev->mc.gtt_location = tmp;
158 	}
159 	DRM_INFO("radeon: VRAM %uM\n", rdev->mc.real_vram_size >> 20);
160 	DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n",
161 		 rdev->mc.vram_location,
162 		 rdev->mc.vram_location + rdev->mc.mc_vram_size - 1);
163 	if (rdev->mc.real_vram_size != rdev->mc.mc_vram_size)
164 		DRM_INFO("radeon: VRAM less than aperture workaround enabled\n");
165 	DRM_INFO("radeon: GTT %uM\n", rdev->mc.gtt_size >> 20);
166 	DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n",
167 		 rdev->mc.gtt_location,
168 		 rdev->mc.gtt_location + rdev->mc.gtt_size - 1);
169 	return 0;
170 }
171 
172 
173 /*
174  * GPU helpers function.
175  */
176 static bool radeon_card_posted(struct radeon_device *rdev)
177 {
178 	uint32_t reg;
179 
180 	/* first check CRTCs */
181 	if (ASIC_IS_AVIVO(rdev)) {
182 		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
183 		      RREG32(AVIVO_D2CRTC_CONTROL);
184 		if (reg & AVIVO_CRTC_EN) {
185 			return true;
186 		}
187 	} else {
188 		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
189 		      RREG32(RADEON_CRTC2_GEN_CNTL);
190 		if (reg & RADEON_CRTC_EN) {
191 			return true;
192 		}
193 	}
194 
195 	/* then check MEM_SIZE, in case the crtcs are off */
196 	if (rdev->family >= CHIP_R600)
197 		reg = RREG32(R600_CONFIG_MEMSIZE);
198 	else
199 		reg = RREG32(RADEON_CONFIG_MEMSIZE);
200 
201 	if (reg)
202 		return true;
203 
204 	return false;
205 
206 }
207 
208 
209 /*
210  * Registers accessors functions.
211  */
212 uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
213 {
214 	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
215 	BUG_ON(1);
216 	return 0;
217 }
218 
219 void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
220 {
221 	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
222 		  reg, v);
223 	BUG_ON(1);
224 }
225 
226 void radeon_register_accessor_init(struct radeon_device *rdev)
227 {
228 	rdev->mc_rreg = &radeon_invalid_rreg;
229 	rdev->mc_wreg = &radeon_invalid_wreg;
230 	rdev->pll_rreg = &radeon_invalid_rreg;
231 	rdev->pll_wreg = &radeon_invalid_wreg;
232 	rdev->pciep_rreg = &radeon_invalid_rreg;
233 	rdev->pciep_wreg = &radeon_invalid_wreg;
234 
235 	/* Don't change order as we are overridding accessor. */
236 	if (rdev->family < CHIP_RV515) {
237 		rdev->pcie_reg_mask = 0xff;
238 	} else {
239 		rdev->pcie_reg_mask = 0x7ff;
240 	}
241 	/* FIXME: not sure here */
242 	if (rdev->family <= CHIP_R580) {
243 		rdev->pll_rreg = &r100_pll_rreg;
244 		rdev->pll_wreg = &r100_pll_wreg;
245 	}
246 	if (rdev->family >= CHIP_RV515) {
247 		rdev->mc_rreg = &rv515_mc_rreg;
248 		rdev->mc_wreg = &rv515_mc_wreg;
249 	}
250 	if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
251 		rdev->mc_rreg = &rs400_mc_rreg;
252 		rdev->mc_wreg = &rs400_mc_wreg;
253 	}
254 	if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
255 		rdev->mc_rreg = &rs690_mc_rreg;
256 		rdev->mc_wreg = &rs690_mc_wreg;
257 	}
258 	if (rdev->family == CHIP_RS600) {
259 		rdev->mc_rreg = &rs600_mc_rreg;
260 		rdev->mc_wreg = &rs600_mc_wreg;
261 	}
262 	if (rdev->family >= CHIP_R600) {
263 		rdev->pciep_rreg = &r600_pciep_rreg;
264 		rdev->pciep_wreg = &r600_pciep_wreg;
265 	}
266 }
267 
268 
269 /*
270  * ASIC
271  */
272 int radeon_asic_init(struct radeon_device *rdev)
273 {
274 	radeon_register_accessor_init(rdev);
275 	switch (rdev->family) {
276 	case CHIP_R100:
277 	case CHIP_RV100:
278 	case CHIP_RS100:
279 	case CHIP_RV200:
280 	case CHIP_RS200:
281 	case CHIP_R200:
282 	case CHIP_RV250:
283 	case CHIP_RS300:
284 	case CHIP_RV280:
285 		rdev->asic = &r100_asic;
286 		break;
287 	case CHIP_R300:
288 	case CHIP_R350:
289 	case CHIP_RV350:
290 	case CHIP_RV380:
291 		rdev->asic = &r300_asic;
292 		break;
293 	case CHIP_R420:
294 	case CHIP_R423:
295 	case CHIP_RV410:
296 		rdev->asic = &r420_asic;
297 		break;
298 	case CHIP_RS400:
299 	case CHIP_RS480:
300 		rdev->asic = &rs400_asic;
301 		break;
302 	case CHIP_RS600:
303 		rdev->asic = &rs600_asic;
304 		break;
305 	case CHIP_RS690:
306 	case CHIP_RS740:
307 		rdev->asic = &rs690_asic;
308 		break;
309 	case CHIP_RV515:
310 		rdev->asic = &rv515_asic;
311 		break;
312 	case CHIP_R520:
313 	case CHIP_RV530:
314 	case CHIP_RV560:
315 	case CHIP_RV570:
316 	case CHIP_R580:
317 		rdev->asic = &r520_asic;
318 		break;
319 	case CHIP_R600:
320 	case CHIP_RV610:
321 	case CHIP_RV630:
322 	case CHIP_RV620:
323 	case CHIP_RV635:
324 	case CHIP_RV670:
325 	case CHIP_RS780:
326 	case CHIP_RV770:
327 	case CHIP_RV730:
328 	case CHIP_RV710:
329 	default:
330 		/* FIXME: not supported yet */
331 		return -EINVAL;
332 	}
333 	return 0;
334 }
335 
336 
337 /*
338  * Wrapper around modesetting bits.
339  */
340 int radeon_clocks_init(struct radeon_device *rdev)
341 {
342 	int r;
343 
344 	radeon_get_clock_info(rdev->ddev);
345 	r = radeon_static_clocks_init(rdev->ddev);
346 	if (r) {
347 		return r;
348 	}
349 	DRM_INFO("Clocks initialized !\n");
350 	return 0;
351 }
352 
353 void radeon_clocks_fini(struct radeon_device *rdev)
354 {
355 }
356 
357 /* ATOM accessor methods */
358 static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
359 {
360 	struct radeon_device *rdev = info->dev->dev_private;
361 	uint32_t r;
362 
363 	r = rdev->pll_rreg(rdev, reg);
364 	return r;
365 }
366 
367 static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
368 {
369 	struct radeon_device *rdev = info->dev->dev_private;
370 
371 	rdev->pll_wreg(rdev, reg, val);
372 }
373 
374 static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
375 {
376 	struct radeon_device *rdev = info->dev->dev_private;
377 	uint32_t r;
378 
379 	r = rdev->mc_rreg(rdev, reg);
380 	return r;
381 }
382 
383 static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
384 {
385 	struct radeon_device *rdev = info->dev->dev_private;
386 
387 	rdev->mc_wreg(rdev, reg, val);
388 }
389 
390 static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
391 {
392 	struct radeon_device *rdev = info->dev->dev_private;
393 
394 	WREG32(reg*4, val);
395 }
396 
397 static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
398 {
399 	struct radeon_device *rdev = info->dev->dev_private;
400 	uint32_t r;
401 
402 	r = RREG32(reg*4);
403 	return r;
404 }
405 
406 static struct card_info atom_card_info = {
407 	.dev = NULL,
408 	.reg_read = cail_reg_read,
409 	.reg_write = cail_reg_write,
410 	.mc_read = cail_mc_read,
411 	.mc_write = cail_mc_write,
412 	.pll_read = cail_pll_read,
413 	.pll_write = cail_pll_write,
414 };
415 
416 int radeon_atombios_init(struct radeon_device *rdev)
417 {
418 	atom_card_info.dev = rdev->ddev;
419 	rdev->mode_info.atom_context = atom_parse(&atom_card_info, rdev->bios);
420 	radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
421 	return 0;
422 }
423 
424 void radeon_atombios_fini(struct radeon_device *rdev)
425 {
426 	kfree(rdev->mode_info.atom_context);
427 }
428 
429 int radeon_combios_init(struct radeon_device *rdev)
430 {
431 	radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
432 	return 0;
433 }
434 
435 void radeon_combios_fini(struct radeon_device *rdev)
436 {
437 }
438 
439 int radeon_modeset_init(struct radeon_device *rdev);
440 void radeon_modeset_fini(struct radeon_device *rdev);
441 
442 
443 /*
444  * Radeon device.
445  */
446 int radeon_device_init(struct radeon_device *rdev,
447 		       struct drm_device *ddev,
448 		       struct pci_dev *pdev,
449 		       uint32_t flags)
450 {
451 	int r, ret;
452 	int dma_bits;
453 
454 	DRM_INFO("radeon: Initializing kernel modesetting.\n");
455 	rdev->shutdown = false;
456 	rdev->ddev = ddev;
457 	rdev->pdev = pdev;
458 	rdev->flags = flags;
459 	rdev->family = flags & RADEON_FAMILY_MASK;
460 	rdev->is_atom_bios = false;
461 	rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
462 	rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
463 	rdev->gpu_lockup = false;
464 	/* mutex initialization are all done here so we
465 	 * can recall function without having locking issues */
466 	mutex_init(&rdev->cs_mutex);
467 	mutex_init(&rdev->ib_pool.mutex);
468 	mutex_init(&rdev->cp.mutex);
469 	rwlock_init(&rdev->fence_drv.lock);
470 
471 	if (radeon_agpmode == -1) {
472 		rdev->flags &= ~RADEON_IS_AGP;
473 		if (rdev->family > CHIP_RV515 ||
474 		    rdev->family == CHIP_RV380 ||
475 		    rdev->family == CHIP_RV410 ||
476 		    rdev->family == CHIP_R423) {
477 			DRM_INFO("Forcing AGP to PCIE mode\n");
478 			rdev->flags |= RADEON_IS_PCIE;
479 		} else {
480 			DRM_INFO("Forcing AGP to PCI mode\n");
481 			rdev->flags |= RADEON_IS_PCI;
482 		}
483 	}
484 
485 	/* Set asic functions */
486 	r = radeon_asic_init(rdev);
487 	if (r) {
488 		return r;
489 	}
490 	r = radeon_init(rdev);
491 	if (r) {
492 		return r;
493 	}
494 
495 	/* set DMA mask + need_dma32 flags.
496 	 * PCIE - can handle 40-bits.
497 	 * IGP - can handle 40-bits (in theory)
498 	 * AGP - generally dma32 is safest
499 	 * PCI - only dma32
500 	 */
501 	rdev->need_dma32 = false;
502 	if (rdev->flags & RADEON_IS_AGP)
503 		rdev->need_dma32 = true;
504 	if (rdev->flags & RADEON_IS_PCI)
505 		rdev->need_dma32 = true;
506 
507 	dma_bits = rdev->need_dma32 ? 32 : 40;
508 	r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
509 	if (r) {
510 		printk(KERN_WARNING "radeon: No suitable DMA available.\n");
511 	}
512 
513 	/* Registers mapping */
514 	/* TODO: block userspace mapping of io register */
515 	rdev->rmmio_base = drm_get_resource_start(rdev->ddev, 2);
516 	rdev->rmmio_size = drm_get_resource_len(rdev->ddev, 2);
517 	rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
518 	if (rdev->rmmio == NULL) {
519 		return -ENOMEM;
520 	}
521 	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
522 	DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
523 
524 	/* Setup errata flags */
525 	radeon_errata(rdev);
526 	/* Initialize scratch registers */
527 	radeon_scratch_init(rdev);
528 	/* Initialize surface registers */
529 	radeon_surface_init(rdev);
530 
531 	/* TODO: disable VGA need to use VGA request */
532 	/* BIOS*/
533 	if (!radeon_get_bios(rdev)) {
534 		if (ASIC_IS_AVIVO(rdev))
535 			return -EINVAL;
536 	}
537 	if (rdev->is_atom_bios) {
538 		r = radeon_atombios_init(rdev);
539 		if (r) {
540 			return r;
541 		}
542 	} else {
543 		r = radeon_combios_init(rdev);
544 		if (r) {
545 			return r;
546 		}
547 	}
548 	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
549 	if (radeon_gpu_reset(rdev)) {
550 		/* FIXME: what do we want to do here ? */
551 	}
552 	/* check if cards are posted or not */
553 	if (!radeon_card_posted(rdev) && rdev->bios) {
554 		DRM_INFO("GPU not posted. posting now...\n");
555 		if (rdev->is_atom_bios) {
556 			atom_asic_init(rdev->mode_info.atom_context);
557 		} else {
558 			radeon_combios_asic_init(rdev->ddev);
559 		}
560 	}
561 	/* Initialize clocks */
562 	r = radeon_clocks_init(rdev);
563 	if (r) {
564 		return r;
565 	}
566 	/* Get vram informations */
567 	radeon_vram_info(rdev);
568 
569 	/* Add an MTRR for the VRAM */
570 	rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
571 				      MTRR_TYPE_WRCOMB, 1);
572 	DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n",
573 		 rdev->mc.real_vram_size >> 20,
574 		 (unsigned)rdev->mc.aper_size >> 20);
575 	DRM_INFO("RAM width %dbits %cDR\n",
576 		 rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
577 	/* Initialize memory controller (also test AGP) */
578 	r = radeon_mc_init(rdev);
579 	if (r) {
580 		return r;
581 	}
582 	/* Fence driver */
583 	r = radeon_fence_driver_init(rdev);
584 	if (r) {
585 		return r;
586 	}
587 	r = radeon_irq_kms_init(rdev);
588 	if (r) {
589 		return r;
590 	}
591 	/* Memory manager */
592 	r = radeon_object_init(rdev);
593 	if (r) {
594 		return r;
595 	}
596 	/* Initialize GART (initialize after TTM so we can allocate
597 	 * memory through TTM but finalize after TTM) */
598 	r = radeon_gart_enable(rdev);
599 	if (!r) {
600 		r = radeon_gem_init(rdev);
601 	}
602 
603 	/* 1M ring buffer */
604 	if (!r) {
605 		r = radeon_cp_init(rdev, 1024 * 1024);
606 	}
607 	if (!r) {
608 		r = radeon_wb_init(rdev);
609 		if (r) {
610 			DRM_ERROR("radeon: failled initializing WB (%d).\n", r);
611 			return r;
612 		}
613 	}
614 	if (!r) {
615 		r = radeon_ib_pool_init(rdev);
616 		if (r) {
617 			DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
618 			return r;
619 		}
620 	}
621 	if (!r) {
622 		r = radeon_ib_test(rdev);
623 		if (r) {
624 			DRM_ERROR("radeon: failled testing IB (%d).\n", r);
625 			return r;
626 		}
627 	}
628 	ret = r;
629 	r = radeon_modeset_init(rdev);
630 	if (r) {
631 		return r;
632 	}
633 	if (!ret) {
634 		DRM_INFO("radeon: kernel modesetting successfully initialized.\n");
635 	}
636 	if (radeon_testing) {
637 		radeon_test_moves(rdev);
638 	}
639 	if (radeon_benchmarking) {
640 		radeon_benchmark(rdev);
641 	}
642 	return ret;
643 }
644 
645 void radeon_device_fini(struct radeon_device *rdev)
646 {
647 	if (rdev == NULL || rdev->rmmio == NULL) {
648 		return;
649 	}
650 	DRM_INFO("radeon: finishing device.\n");
651 	rdev->shutdown = true;
652 	/* Order matter so becarefull if you rearrange anythings */
653 	radeon_modeset_fini(rdev);
654 	radeon_ib_pool_fini(rdev);
655 	radeon_cp_fini(rdev);
656 	radeon_wb_fini(rdev);
657 	radeon_gem_fini(rdev);
658 	radeon_object_fini(rdev);
659 	/* mc_fini must be after object_fini */
660 	radeon_mc_fini(rdev);
661 #if __OS_HAS_AGP
662 	radeon_agp_fini(rdev);
663 #endif
664 	radeon_irq_kms_fini(rdev);
665 	radeon_fence_driver_fini(rdev);
666 	radeon_clocks_fini(rdev);
667 	if (rdev->is_atom_bios) {
668 		radeon_atombios_fini(rdev);
669 	} else {
670 		radeon_combios_fini(rdev);
671 	}
672 	kfree(rdev->bios);
673 	rdev->bios = NULL;
674 	iounmap(rdev->rmmio);
675 	rdev->rmmio = NULL;
676 }
677 
678 
679 /*
680  * Suspend & resume.
681  */
682 int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
683 {
684 	struct radeon_device *rdev = dev->dev_private;
685 	struct drm_crtc *crtc;
686 
687 	if (dev == NULL || rdev == NULL) {
688 		return -ENODEV;
689 	}
690 	if (state.event == PM_EVENT_PRETHAW) {
691 		return 0;
692 	}
693 	/* unpin the front buffers */
694 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
695 		struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
696 		struct radeon_object *robj;
697 
698 		if (rfb == NULL || rfb->obj == NULL) {
699 			continue;
700 		}
701 		robj = rfb->obj->driver_private;
702 		if (robj != rdev->fbdev_robj) {
703 			radeon_object_unpin(robj);
704 		}
705 	}
706 	/* evict vram memory */
707 	radeon_object_evict_vram(rdev);
708 	/* wait for gpu to finish processing current batch */
709 	radeon_fence_wait_last(rdev);
710 
711 	radeon_cp_disable(rdev);
712 	radeon_gart_disable(rdev);
713 
714 	/* evict remaining vram memory */
715 	radeon_object_evict_vram(rdev);
716 
717 	rdev->irq.sw_int = false;
718 	radeon_irq_set(rdev);
719 
720 	pci_save_state(dev->pdev);
721 	if (state.event == PM_EVENT_SUSPEND) {
722 		/* Shut down the device */
723 		pci_disable_device(dev->pdev);
724 		pci_set_power_state(dev->pdev, PCI_D3hot);
725 	}
726 	acquire_console_sem();
727 	fb_set_suspend(rdev->fbdev_info, 1);
728 	release_console_sem();
729 	return 0;
730 }
731 
732 int radeon_resume_kms(struct drm_device *dev)
733 {
734 	struct radeon_device *rdev = dev->dev_private;
735 	int r;
736 
737 	acquire_console_sem();
738 	pci_set_power_state(dev->pdev, PCI_D0);
739 	pci_restore_state(dev->pdev);
740 	if (pci_enable_device(dev->pdev)) {
741 		release_console_sem();
742 		return -1;
743 	}
744 	pci_set_master(dev->pdev);
745 	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
746 	if (radeon_gpu_reset(rdev)) {
747 		/* FIXME: what do we want to do here ? */
748 	}
749 	/* post card */
750 	if (rdev->is_atom_bios) {
751 		atom_asic_init(rdev->mode_info.atom_context);
752 	} else {
753 		radeon_combios_asic_init(rdev->ddev);
754 	}
755 	/* Initialize clocks */
756 	r = radeon_clocks_init(rdev);
757 	if (r) {
758 		release_console_sem();
759 		return r;
760 	}
761 	/* Enable IRQ */
762 	rdev->irq.sw_int = true;
763 	radeon_irq_set(rdev);
764 	/* Initialize GPU Memory Controller */
765 	r = radeon_mc_init(rdev);
766 	if (r) {
767 		goto out;
768 	}
769 	r = radeon_gart_enable(rdev);
770 	if (r) {
771 		goto out;
772 	}
773 	r = radeon_cp_init(rdev, rdev->cp.ring_size);
774 	if (r) {
775 		goto out;
776 	}
777 out:
778 	fb_set_suspend(rdev->fbdev_info, 0);
779 	release_console_sem();
780 
781 	/* blat the mode back in */
782 	drm_helper_resume_force_mode(dev);
783 	return 0;
784 }
785 
786 
787 /*
788  * Debugfs
789  */
790 struct radeon_debugfs {
791 	struct drm_info_list	*files;
792 	unsigned		num_files;
793 };
794 static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_NUM_FILES];
795 static unsigned _radeon_debugfs_count = 0;
796 
797 int radeon_debugfs_add_files(struct radeon_device *rdev,
798 			     struct drm_info_list *files,
799 			     unsigned nfiles)
800 {
801 	unsigned i;
802 
803 	for (i = 0; i < _radeon_debugfs_count; i++) {
804 		if (_radeon_debugfs[i].files == files) {
805 			/* Already registered */
806 			return 0;
807 		}
808 	}
809 	if ((_radeon_debugfs_count + nfiles) > RADEON_DEBUGFS_MAX_NUM_FILES) {
810 		DRM_ERROR("Reached maximum number of debugfs files.\n");
811 		DRM_ERROR("Report so we increase RADEON_DEBUGFS_MAX_NUM_FILES.\n");
812 		return -EINVAL;
813 	}
814 	_radeon_debugfs[_radeon_debugfs_count].files = files;
815 	_radeon_debugfs[_radeon_debugfs_count].num_files = nfiles;
816 	_radeon_debugfs_count++;
817 #if defined(CONFIG_DEBUG_FS)
818 	drm_debugfs_create_files(files, nfiles,
819 				 rdev->ddev->control->debugfs_root,
820 				 rdev->ddev->control);
821 	drm_debugfs_create_files(files, nfiles,
822 				 rdev->ddev->primary->debugfs_root,
823 				 rdev->ddev->primary);
824 #endif
825 	return 0;
826 }
827 
828 #if defined(CONFIG_DEBUG_FS)
829 int radeon_debugfs_init(struct drm_minor *minor)
830 {
831 	return 0;
832 }
833 
834 void radeon_debugfs_cleanup(struct drm_minor *minor)
835 {
836 	unsigned i;
837 
838 	for (i = 0; i < _radeon_debugfs_count; i++) {
839 		drm_debugfs_remove_files(_radeon_debugfs[i].files,
840 					 _radeon_debugfs[i].num_files, minor);
841 	}
842 }
843 #endif
844