xref: /linux/drivers/gpu/drm/radeon/r100.c (revision ff10fca5ceacf7bc59636f5ab808e775d1717167)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include "drmP.h"
31 #include "drm.h"
32 #include "radeon_drm.h"
33 #include "radeon_reg.h"
34 #include "radeon.h"
35 #include "radeon_asic.h"
36 #include "r100d.h"
37 #include "rs100d.h"
38 #include "rv200d.h"
39 #include "rv250d.h"
40 #include "atom.h"
41 
42 #include <linux/firmware.h>
43 #include <linux/platform_device.h>
44 
45 #include "r100_reg_safe.h"
46 #include "rn50_reg_safe.h"
47 
48 /* Firmware Names */
49 #define FIRMWARE_R100		"radeon/R100_cp.bin"
50 #define FIRMWARE_R200		"radeon/R200_cp.bin"
51 #define FIRMWARE_R300		"radeon/R300_cp.bin"
52 #define FIRMWARE_R420		"radeon/R420_cp.bin"
53 #define FIRMWARE_RS690		"radeon/RS690_cp.bin"
54 #define FIRMWARE_RS600		"radeon/RS600_cp.bin"
55 #define FIRMWARE_R520		"radeon/R520_cp.bin"
56 
57 MODULE_FIRMWARE(FIRMWARE_R100);
58 MODULE_FIRMWARE(FIRMWARE_R200);
59 MODULE_FIRMWARE(FIRMWARE_R300);
60 MODULE_FIRMWARE(FIRMWARE_R420);
61 MODULE_FIRMWARE(FIRMWARE_RS690);
62 MODULE_FIRMWARE(FIRMWARE_RS600);
63 MODULE_FIRMWARE(FIRMWARE_R520);
64 
65 #include "r100_track.h"
66 
67 /* This files gather functions specifics to:
68  * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
69  */
70 
71 void r100_pm_get_dynpm_state(struct radeon_device *rdev)
72 {
73 	int i;
74 	rdev->pm.dynpm_can_upclock = true;
75 	rdev->pm.dynpm_can_downclock = true;
76 
77 	switch (rdev->pm.dynpm_planned_action) {
78 	case DYNPM_ACTION_MINIMUM:
79 		rdev->pm.requested_power_state_index = 0;
80 		rdev->pm.dynpm_can_downclock = false;
81 		break;
82 	case DYNPM_ACTION_DOWNCLOCK:
83 		if (rdev->pm.current_power_state_index == 0) {
84 			rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
85 			rdev->pm.dynpm_can_downclock = false;
86 		} else {
87 			if (rdev->pm.active_crtc_count > 1) {
88 				for (i = 0; i < rdev->pm.num_power_states; i++) {
89 					if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
90 						continue;
91 					else if (i >= rdev->pm.current_power_state_index) {
92 						rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
93 						break;
94 					} else {
95 						rdev->pm.requested_power_state_index = i;
96 						break;
97 					}
98 				}
99 			} else
100 				rdev->pm.requested_power_state_index =
101 					rdev->pm.current_power_state_index - 1;
102 		}
103 		/* don't use the power state if crtcs are active and no display flag is set */
104 		if ((rdev->pm.active_crtc_count > 0) &&
105 		    (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags &
106 		     RADEON_PM_MODE_NO_DISPLAY)) {
107 			rdev->pm.requested_power_state_index++;
108 		}
109 		break;
110 	case DYNPM_ACTION_UPCLOCK:
111 		if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
112 			rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
113 			rdev->pm.dynpm_can_upclock = false;
114 		} else {
115 			if (rdev->pm.active_crtc_count > 1) {
116 				for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
117 					if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
118 						continue;
119 					else if (i <= rdev->pm.current_power_state_index) {
120 						rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
121 						break;
122 					} else {
123 						rdev->pm.requested_power_state_index = i;
124 						break;
125 					}
126 				}
127 			} else
128 				rdev->pm.requested_power_state_index =
129 					rdev->pm.current_power_state_index + 1;
130 		}
131 		break;
132 	case DYNPM_ACTION_DEFAULT:
133 		rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
134 		rdev->pm.dynpm_can_upclock = false;
135 		break;
136 	case DYNPM_ACTION_NONE:
137 	default:
138 		DRM_ERROR("Requested mode for not defined action\n");
139 		return;
140 	}
141 	/* only one clock mode per power state */
142 	rdev->pm.requested_clock_mode_index = 0;
143 
144 	DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
145 		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
146 		  clock_info[rdev->pm.requested_clock_mode_index].sclk,
147 		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
148 		  clock_info[rdev->pm.requested_clock_mode_index].mclk,
149 		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
150 		  pcie_lanes);
151 }
152 
153 void r100_pm_init_profile(struct radeon_device *rdev)
154 {
155 	/* default */
156 	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
157 	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
158 	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
159 	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
160 	/* low sh */
161 	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
162 	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
163 	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
164 	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
165 	/* mid sh */
166 	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
167 	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
168 	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
169 	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
170 	/* high sh */
171 	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
172 	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
173 	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
174 	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
175 	/* low mh */
176 	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
177 	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
178 	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
179 	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
180 	/* mid mh */
181 	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
182 	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
183 	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
184 	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
185 	/* high mh */
186 	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
187 	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
188 	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
189 	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
190 }
191 
192 void r100_pm_misc(struct radeon_device *rdev)
193 {
194 	int requested_index = rdev->pm.requested_power_state_index;
195 	struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
196 	struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
197 	u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl;
198 
199 	if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
200 		if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
201 			tmp = RREG32(voltage->gpio.reg);
202 			if (voltage->active_high)
203 				tmp |= voltage->gpio.mask;
204 			else
205 				tmp &= ~(voltage->gpio.mask);
206 			WREG32(voltage->gpio.reg, tmp);
207 			if (voltage->delay)
208 				udelay(voltage->delay);
209 		} else {
210 			tmp = RREG32(voltage->gpio.reg);
211 			if (voltage->active_high)
212 				tmp &= ~voltage->gpio.mask;
213 			else
214 				tmp |= voltage->gpio.mask;
215 			WREG32(voltage->gpio.reg, tmp);
216 			if (voltage->delay)
217 				udelay(voltage->delay);
218 		}
219 	}
220 
221 	sclk_cntl = RREG32_PLL(SCLK_CNTL);
222 	sclk_cntl2 = RREG32_PLL(SCLK_CNTL2);
223 	sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3);
224 	sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL);
225 	sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3);
226 	if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
227 		sclk_more_cntl |= REDUCED_SPEED_SCLK_EN;
228 		if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE)
229 			sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE;
230 		else
231 			sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE;
232 		if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2)
233 			sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0);
234 		else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4)
235 			sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2);
236 	} else
237 		sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN;
238 
239 	if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
240 		sclk_more_cntl |= IO_CG_VOLTAGE_DROP;
241 		if (voltage->delay) {
242 			sclk_more_cntl |= VOLTAGE_DROP_SYNC;
243 			switch (voltage->delay) {
244 			case 33:
245 				sclk_more_cntl |= VOLTAGE_DELAY_SEL(0);
246 				break;
247 			case 66:
248 				sclk_more_cntl |= VOLTAGE_DELAY_SEL(1);
249 				break;
250 			case 99:
251 				sclk_more_cntl |= VOLTAGE_DELAY_SEL(2);
252 				break;
253 			case 132:
254 				sclk_more_cntl |= VOLTAGE_DELAY_SEL(3);
255 				break;
256 			}
257 		} else
258 			sclk_more_cntl &= ~VOLTAGE_DROP_SYNC;
259 	} else
260 		sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP;
261 
262 	if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
263 		sclk_cntl &= ~FORCE_HDP;
264 	else
265 		sclk_cntl |= FORCE_HDP;
266 
267 	WREG32_PLL(SCLK_CNTL, sclk_cntl);
268 	WREG32_PLL(SCLK_CNTL2, sclk_cntl2);
269 	WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl);
270 
271 	/* set pcie lanes */
272 	if ((rdev->flags & RADEON_IS_PCIE) &&
273 	    !(rdev->flags & RADEON_IS_IGP) &&
274 	    rdev->asic->set_pcie_lanes &&
275 	    (ps->pcie_lanes !=
276 	     rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
277 		radeon_set_pcie_lanes(rdev,
278 				      ps->pcie_lanes);
279 		DRM_DEBUG_DRIVER("Setting: p: %d\n", ps->pcie_lanes);
280 	}
281 }
282 
283 void r100_pm_prepare(struct radeon_device *rdev)
284 {
285 	struct drm_device *ddev = rdev->ddev;
286 	struct drm_crtc *crtc;
287 	struct radeon_crtc *radeon_crtc;
288 	u32 tmp;
289 
290 	/* disable any active CRTCs */
291 	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
292 		radeon_crtc = to_radeon_crtc(crtc);
293 		if (radeon_crtc->enabled) {
294 			if (radeon_crtc->crtc_id) {
295 				tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
296 				tmp |= RADEON_CRTC2_DISP_REQ_EN_B;
297 				WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
298 			} else {
299 				tmp = RREG32(RADEON_CRTC_GEN_CNTL);
300 				tmp |= RADEON_CRTC_DISP_REQ_EN_B;
301 				WREG32(RADEON_CRTC_GEN_CNTL, tmp);
302 			}
303 		}
304 	}
305 }
306 
307 void r100_pm_finish(struct radeon_device *rdev)
308 {
309 	struct drm_device *ddev = rdev->ddev;
310 	struct drm_crtc *crtc;
311 	struct radeon_crtc *radeon_crtc;
312 	u32 tmp;
313 
314 	/* enable any active CRTCs */
315 	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
316 		radeon_crtc = to_radeon_crtc(crtc);
317 		if (radeon_crtc->enabled) {
318 			if (radeon_crtc->crtc_id) {
319 				tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
320 				tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B;
321 				WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
322 			} else {
323 				tmp = RREG32(RADEON_CRTC_GEN_CNTL);
324 				tmp &= ~RADEON_CRTC_DISP_REQ_EN_B;
325 				WREG32(RADEON_CRTC_GEN_CNTL, tmp);
326 			}
327 		}
328 	}
329 }
330 
331 bool r100_gui_idle(struct radeon_device *rdev)
332 {
333 	if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
334 		return false;
335 	else
336 		return true;
337 }
338 
339 /* hpd for digital panel detect/disconnect */
340 bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
341 {
342 	bool connected = false;
343 
344 	switch (hpd) {
345 	case RADEON_HPD_1:
346 		if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE)
347 			connected = true;
348 		break;
349 	case RADEON_HPD_2:
350 		if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE)
351 			connected = true;
352 		break;
353 	default:
354 		break;
355 	}
356 	return connected;
357 }
358 
359 void r100_hpd_set_polarity(struct radeon_device *rdev,
360 			   enum radeon_hpd_id hpd)
361 {
362 	u32 tmp;
363 	bool connected = r100_hpd_sense(rdev, hpd);
364 
365 	switch (hpd) {
366 	case RADEON_HPD_1:
367 		tmp = RREG32(RADEON_FP_GEN_CNTL);
368 		if (connected)
369 			tmp &= ~RADEON_FP_DETECT_INT_POL;
370 		else
371 			tmp |= RADEON_FP_DETECT_INT_POL;
372 		WREG32(RADEON_FP_GEN_CNTL, tmp);
373 		break;
374 	case RADEON_HPD_2:
375 		tmp = RREG32(RADEON_FP2_GEN_CNTL);
376 		if (connected)
377 			tmp &= ~RADEON_FP2_DETECT_INT_POL;
378 		else
379 			tmp |= RADEON_FP2_DETECT_INT_POL;
380 		WREG32(RADEON_FP2_GEN_CNTL, tmp);
381 		break;
382 	default:
383 		break;
384 	}
385 }
386 
387 void r100_hpd_init(struct radeon_device *rdev)
388 {
389 	struct drm_device *dev = rdev->ddev;
390 	struct drm_connector *connector;
391 
392 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
393 		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
394 		switch (radeon_connector->hpd.hpd) {
395 		case RADEON_HPD_1:
396 			rdev->irq.hpd[0] = true;
397 			break;
398 		case RADEON_HPD_2:
399 			rdev->irq.hpd[1] = true;
400 			break;
401 		default:
402 			break;
403 		}
404 	}
405 	if (rdev->irq.installed)
406 		r100_irq_set(rdev);
407 }
408 
409 void r100_hpd_fini(struct radeon_device *rdev)
410 {
411 	struct drm_device *dev = rdev->ddev;
412 	struct drm_connector *connector;
413 
414 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
415 		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
416 		switch (radeon_connector->hpd.hpd) {
417 		case RADEON_HPD_1:
418 			rdev->irq.hpd[0] = false;
419 			break;
420 		case RADEON_HPD_2:
421 			rdev->irq.hpd[1] = false;
422 			break;
423 		default:
424 			break;
425 		}
426 	}
427 }
428 
429 /*
430  * PCI GART
431  */
432 void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
433 {
434 	/* TODO: can we do somethings here ? */
435 	/* It seems hw only cache one entry so we should discard this
436 	 * entry otherwise if first GPU GART read hit this entry it
437 	 * could end up in wrong address. */
438 }
439 
440 int r100_pci_gart_init(struct radeon_device *rdev)
441 {
442 	int r;
443 
444 	if (rdev->gart.table.ram.ptr) {
445 		WARN(1, "R100 PCI GART already initialized.\n");
446 		return 0;
447 	}
448 	/* Initialize common gart structure */
449 	r = radeon_gart_init(rdev);
450 	if (r)
451 		return r;
452 	rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
453 	rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
454 	rdev->asic->gart_set_page = &r100_pci_gart_set_page;
455 	return radeon_gart_table_ram_alloc(rdev);
456 }
457 
458 /* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
459 void r100_enable_bm(struct radeon_device *rdev)
460 {
461 	uint32_t tmp;
462 	/* Enable bus mastering */
463 	tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
464 	WREG32(RADEON_BUS_CNTL, tmp);
465 }
466 
467 int r100_pci_gart_enable(struct radeon_device *rdev)
468 {
469 	uint32_t tmp;
470 
471 	radeon_gart_restore(rdev);
472 	/* discard memory request outside of configured range */
473 	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
474 	WREG32(RADEON_AIC_CNTL, tmp);
475 	/* set address range for PCI address translate */
476 	WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start);
477 	WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end);
478 	/* set PCI GART page-table base address */
479 	WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
480 	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
481 	WREG32(RADEON_AIC_CNTL, tmp);
482 	r100_pci_gart_tlb_flush(rdev);
483 	rdev->gart.ready = true;
484 	return 0;
485 }
486 
487 void r100_pci_gart_disable(struct radeon_device *rdev)
488 {
489 	uint32_t tmp;
490 
491 	/* discard memory request outside of configured range */
492 	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
493 	WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
494 	WREG32(RADEON_AIC_LO_ADDR, 0);
495 	WREG32(RADEON_AIC_HI_ADDR, 0);
496 }
497 
498 int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
499 {
500 	if (i < 0 || i > rdev->gart.num_gpu_pages) {
501 		return -EINVAL;
502 	}
503 	rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr));
504 	return 0;
505 }
506 
507 void r100_pci_gart_fini(struct radeon_device *rdev)
508 {
509 	radeon_gart_fini(rdev);
510 	r100_pci_gart_disable(rdev);
511 	radeon_gart_table_ram_free(rdev);
512 }
513 
514 int r100_irq_set(struct radeon_device *rdev)
515 {
516 	uint32_t tmp = 0;
517 
518 	if (!rdev->irq.installed) {
519 		WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
520 		WREG32(R_000040_GEN_INT_CNTL, 0);
521 		return -EINVAL;
522 	}
523 	if (rdev->irq.sw_int) {
524 		tmp |= RADEON_SW_INT_ENABLE;
525 	}
526 	if (rdev->irq.gui_idle) {
527 		tmp |= RADEON_GUI_IDLE_MASK;
528 	}
529 	if (rdev->irq.crtc_vblank_int[0]) {
530 		tmp |= RADEON_CRTC_VBLANK_MASK;
531 	}
532 	if (rdev->irq.crtc_vblank_int[1]) {
533 		tmp |= RADEON_CRTC2_VBLANK_MASK;
534 	}
535 	if (rdev->irq.hpd[0]) {
536 		tmp |= RADEON_FP_DETECT_MASK;
537 	}
538 	if (rdev->irq.hpd[1]) {
539 		tmp |= RADEON_FP2_DETECT_MASK;
540 	}
541 	WREG32(RADEON_GEN_INT_CNTL, tmp);
542 	return 0;
543 }
544 
545 void r100_irq_disable(struct radeon_device *rdev)
546 {
547 	u32 tmp;
548 
549 	WREG32(R_000040_GEN_INT_CNTL, 0);
550 	/* Wait and acknowledge irq */
551 	mdelay(1);
552 	tmp = RREG32(R_000044_GEN_INT_STATUS);
553 	WREG32(R_000044_GEN_INT_STATUS, tmp);
554 }
555 
556 static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
557 {
558 	uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
559 	uint32_t irq_mask = RADEON_SW_INT_TEST |
560 		RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
561 		RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
562 
563 	/* the interrupt works, but the status bit is permanently asserted */
564 	if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
565 		if (!rdev->irq.gui_idle_acked)
566 			irq_mask |= RADEON_GUI_IDLE_STAT;
567 	}
568 
569 	if (irqs) {
570 		WREG32(RADEON_GEN_INT_STATUS, irqs);
571 	}
572 	return irqs & irq_mask;
573 }
574 
575 int r100_irq_process(struct radeon_device *rdev)
576 {
577 	uint32_t status, msi_rearm;
578 	bool queue_hotplug = false;
579 
580 	/* reset gui idle ack.  the status bit is broken */
581 	rdev->irq.gui_idle_acked = false;
582 
583 	status = r100_irq_ack(rdev);
584 	if (!status) {
585 		return IRQ_NONE;
586 	}
587 	if (rdev->shutdown) {
588 		return IRQ_NONE;
589 	}
590 	while (status) {
591 		/* SW interrupt */
592 		if (status & RADEON_SW_INT_TEST) {
593 			radeon_fence_process(rdev);
594 		}
595 		/* gui idle interrupt */
596 		if (status & RADEON_GUI_IDLE_STAT) {
597 			rdev->irq.gui_idle_acked = true;
598 			rdev->pm.gui_idle = true;
599 			wake_up(&rdev->irq.idle_queue);
600 		}
601 		/* Vertical blank interrupts */
602 		if (status & RADEON_CRTC_VBLANK_STAT) {
603 			drm_handle_vblank(rdev->ddev, 0);
604 			rdev->pm.vblank_sync = true;
605 			wake_up(&rdev->irq.vblank_queue);
606 		}
607 		if (status & RADEON_CRTC2_VBLANK_STAT) {
608 			drm_handle_vblank(rdev->ddev, 1);
609 			rdev->pm.vblank_sync = true;
610 			wake_up(&rdev->irq.vblank_queue);
611 		}
612 		if (status & RADEON_FP_DETECT_STAT) {
613 			queue_hotplug = true;
614 			DRM_DEBUG("HPD1\n");
615 		}
616 		if (status & RADEON_FP2_DETECT_STAT) {
617 			queue_hotplug = true;
618 			DRM_DEBUG("HPD2\n");
619 		}
620 		status = r100_irq_ack(rdev);
621 	}
622 	/* reset gui idle ack.  the status bit is broken */
623 	rdev->irq.gui_idle_acked = false;
624 	if (queue_hotplug)
625 		queue_work(rdev->wq, &rdev->hotplug_work);
626 	if (rdev->msi_enabled) {
627 		switch (rdev->family) {
628 		case CHIP_RS400:
629 		case CHIP_RS480:
630 			msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM;
631 			WREG32(RADEON_AIC_CNTL, msi_rearm);
632 			WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM);
633 			break;
634 		default:
635 			msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
636 			WREG32(RADEON_MSI_REARM_EN, msi_rearm);
637 			WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
638 			break;
639 		}
640 	}
641 	return IRQ_HANDLED;
642 }
643 
644 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
645 {
646 	if (crtc == 0)
647 		return RREG32(RADEON_CRTC_CRNT_FRAME);
648 	else
649 		return RREG32(RADEON_CRTC2_CRNT_FRAME);
650 }
651 
652 /* Who ever call radeon_fence_emit should call ring_lock and ask
653  * for enough space (today caller are ib schedule and buffer move) */
654 void r100_fence_ring_emit(struct radeon_device *rdev,
655 			  struct radeon_fence *fence)
656 {
657 	/* We have to make sure that caches are flushed before
658 	 * CPU might read something from VRAM. */
659 	radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
660 	radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
661 	radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
662 	radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
663 	/* Wait until IDLE & CLEAN */
664 	radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
665 	radeon_ring_write(rdev, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
666 	radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
667 	radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
668 				RADEON_HDP_READ_BUFFER_INVALIDATE);
669 	radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
670 	radeon_ring_write(rdev, rdev->config.r100.hdp_cntl);
671 	/* Emit fence sequence & fire IRQ */
672 	radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
673 	radeon_ring_write(rdev, fence->seq);
674 	radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
675 	radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
676 }
677 
678 int r100_wb_init(struct radeon_device *rdev)
679 {
680 	int r;
681 
682 	if (rdev->wb.wb_obj == NULL) {
683 		r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
684 					RADEON_GEM_DOMAIN_GTT,
685 					&rdev->wb.wb_obj);
686 		if (r) {
687 			dev_err(rdev->dev, "(%d) create WB buffer failed\n", r);
688 			return r;
689 		}
690 		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
691 		if (unlikely(r != 0))
692 			return r;
693 		r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
694 					&rdev->wb.gpu_addr);
695 		if (r) {
696 			dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r);
697 			radeon_bo_unreserve(rdev->wb.wb_obj);
698 			return r;
699 		}
700 		r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
701 		radeon_bo_unreserve(rdev->wb.wb_obj);
702 		if (r) {
703 			dev_err(rdev->dev, "(%d) map WB buffer failed\n", r);
704 			return r;
705 		}
706 	}
707 	WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr);
708 	WREG32(R_00070C_CP_RB_RPTR_ADDR,
709 		S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + 1024) >> 2));
710 	WREG32(R_000770_SCRATCH_UMSK, 0xff);
711 	return 0;
712 }
713 
714 void r100_wb_disable(struct radeon_device *rdev)
715 {
716 	WREG32(R_000770_SCRATCH_UMSK, 0);
717 }
718 
719 void r100_wb_fini(struct radeon_device *rdev)
720 {
721 	int r;
722 
723 	r100_wb_disable(rdev);
724 	if (rdev->wb.wb_obj) {
725 		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
726 		if (unlikely(r != 0)) {
727 			dev_err(rdev->dev, "(%d) can't finish WB\n", r);
728 			return;
729 		}
730 		radeon_bo_kunmap(rdev->wb.wb_obj);
731 		radeon_bo_unpin(rdev->wb.wb_obj);
732 		radeon_bo_unreserve(rdev->wb.wb_obj);
733 		radeon_bo_unref(&rdev->wb.wb_obj);
734 		rdev->wb.wb = NULL;
735 		rdev->wb.wb_obj = NULL;
736 	}
737 }
738 
739 int r100_copy_blit(struct radeon_device *rdev,
740 		   uint64_t src_offset,
741 		   uint64_t dst_offset,
742 		   unsigned num_pages,
743 		   struct radeon_fence *fence)
744 {
745 	uint32_t cur_pages;
746 	uint32_t stride_bytes = PAGE_SIZE;
747 	uint32_t pitch;
748 	uint32_t stride_pixels;
749 	unsigned ndw;
750 	int num_loops;
751 	int r = 0;
752 
753 	/* radeon limited to 16k stride */
754 	stride_bytes &= 0x3fff;
755 	/* radeon pitch is /64 */
756 	pitch = stride_bytes / 64;
757 	stride_pixels = stride_bytes / 4;
758 	num_loops = DIV_ROUND_UP(num_pages, 8191);
759 
760 	/* Ask for enough room for blit + flush + fence */
761 	ndw = 64 + (10 * num_loops);
762 	r = radeon_ring_lock(rdev, ndw);
763 	if (r) {
764 		DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
765 		return -EINVAL;
766 	}
767 	while (num_pages > 0) {
768 		cur_pages = num_pages;
769 		if (cur_pages > 8191) {
770 			cur_pages = 8191;
771 		}
772 		num_pages -= cur_pages;
773 
774 		/* pages are in Y direction - height
775 		   page width in X direction - width */
776 		radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8));
777 		radeon_ring_write(rdev,
778 				  RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
779 				  RADEON_GMC_DST_PITCH_OFFSET_CNTL |
780 				  RADEON_GMC_SRC_CLIPPING |
781 				  RADEON_GMC_DST_CLIPPING |
782 				  RADEON_GMC_BRUSH_NONE |
783 				  (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
784 				  RADEON_GMC_SRC_DATATYPE_COLOR |
785 				  RADEON_ROP3_S |
786 				  RADEON_DP_SRC_SOURCE_MEMORY |
787 				  RADEON_GMC_CLR_CMP_CNTL_DIS |
788 				  RADEON_GMC_WR_MSK_DIS);
789 		radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10));
790 		radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10));
791 		radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
792 		radeon_ring_write(rdev, 0);
793 		radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
794 		radeon_ring_write(rdev, num_pages);
795 		radeon_ring_write(rdev, num_pages);
796 		radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
797 	}
798 	radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
799 	radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL);
800 	radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
801 	radeon_ring_write(rdev,
802 			  RADEON_WAIT_2D_IDLECLEAN |
803 			  RADEON_WAIT_HOST_IDLECLEAN |
804 			  RADEON_WAIT_DMA_GUI_IDLE);
805 	if (fence) {
806 		r = radeon_fence_emit(rdev, fence);
807 	}
808 	radeon_ring_unlock_commit(rdev);
809 	return r;
810 }
811 
812 static int r100_cp_wait_for_idle(struct radeon_device *rdev)
813 {
814 	unsigned i;
815 	u32 tmp;
816 
817 	for (i = 0; i < rdev->usec_timeout; i++) {
818 		tmp = RREG32(R_000E40_RBBM_STATUS);
819 		if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) {
820 			return 0;
821 		}
822 		udelay(1);
823 	}
824 	return -1;
825 }
826 
827 void r100_ring_start(struct radeon_device *rdev)
828 {
829 	int r;
830 
831 	r = radeon_ring_lock(rdev, 2);
832 	if (r) {
833 		return;
834 	}
835 	radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
836 	radeon_ring_write(rdev,
837 			  RADEON_ISYNC_ANY2D_IDLE3D |
838 			  RADEON_ISYNC_ANY3D_IDLE2D |
839 			  RADEON_ISYNC_WAIT_IDLEGUI |
840 			  RADEON_ISYNC_CPSCRATCH_IDLEGUI);
841 	radeon_ring_unlock_commit(rdev);
842 }
843 
844 
845 /* Load the microcode for the CP */
846 static int r100_cp_init_microcode(struct radeon_device *rdev)
847 {
848 	struct platform_device *pdev;
849 	const char *fw_name = NULL;
850 	int err;
851 
852 	DRM_DEBUG_KMS("\n");
853 
854 	pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
855 	err = IS_ERR(pdev);
856 	if (err) {
857 		printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
858 		return -EINVAL;
859 	}
860 	if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
861 	    (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
862 	    (rdev->family == CHIP_RS200)) {
863 		DRM_INFO("Loading R100 Microcode\n");
864 		fw_name = FIRMWARE_R100;
865 	} else if ((rdev->family == CHIP_R200) ||
866 		   (rdev->family == CHIP_RV250) ||
867 		   (rdev->family == CHIP_RV280) ||
868 		   (rdev->family == CHIP_RS300)) {
869 		DRM_INFO("Loading R200 Microcode\n");
870 		fw_name = FIRMWARE_R200;
871 	} else if ((rdev->family == CHIP_R300) ||
872 		   (rdev->family == CHIP_R350) ||
873 		   (rdev->family == CHIP_RV350) ||
874 		   (rdev->family == CHIP_RV380) ||
875 		   (rdev->family == CHIP_RS400) ||
876 		   (rdev->family == CHIP_RS480)) {
877 		DRM_INFO("Loading R300 Microcode\n");
878 		fw_name = FIRMWARE_R300;
879 	} else if ((rdev->family == CHIP_R420) ||
880 		   (rdev->family == CHIP_R423) ||
881 		   (rdev->family == CHIP_RV410)) {
882 		DRM_INFO("Loading R400 Microcode\n");
883 		fw_name = FIRMWARE_R420;
884 	} else if ((rdev->family == CHIP_RS690) ||
885 		   (rdev->family == CHIP_RS740)) {
886 		DRM_INFO("Loading RS690/RS740 Microcode\n");
887 		fw_name = FIRMWARE_RS690;
888 	} else if (rdev->family == CHIP_RS600) {
889 		DRM_INFO("Loading RS600 Microcode\n");
890 		fw_name = FIRMWARE_RS600;
891 	} else if ((rdev->family == CHIP_RV515) ||
892 		   (rdev->family == CHIP_R520) ||
893 		   (rdev->family == CHIP_RV530) ||
894 		   (rdev->family == CHIP_R580) ||
895 		   (rdev->family == CHIP_RV560) ||
896 		   (rdev->family == CHIP_RV570)) {
897 		DRM_INFO("Loading R500 Microcode\n");
898 		fw_name = FIRMWARE_R520;
899 	}
900 
901 	err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
902 	platform_device_unregister(pdev);
903 	if (err) {
904 		printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n",
905 		       fw_name);
906 	} else if (rdev->me_fw->size % 8) {
907 		printk(KERN_ERR
908 		       "radeon_cp: Bogus length %zu in firmware \"%s\"\n",
909 		       rdev->me_fw->size, fw_name);
910 		err = -EINVAL;
911 		release_firmware(rdev->me_fw);
912 		rdev->me_fw = NULL;
913 	}
914 	return err;
915 }
916 
917 static void r100_cp_load_microcode(struct radeon_device *rdev)
918 {
919 	const __be32 *fw_data;
920 	int i, size;
921 
922 	if (r100_gui_wait_for_idle(rdev)) {
923 		printk(KERN_WARNING "Failed to wait GUI idle while "
924 		       "programming pipes. Bad things might happen.\n");
925 	}
926 
927 	if (rdev->me_fw) {
928 		size = rdev->me_fw->size / 4;
929 		fw_data = (const __be32 *)&rdev->me_fw->data[0];
930 		WREG32(RADEON_CP_ME_RAM_ADDR, 0);
931 		for (i = 0; i < size; i += 2) {
932 			WREG32(RADEON_CP_ME_RAM_DATAH,
933 			       be32_to_cpup(&fw_data[i]));
934 			WREG32(RADEON_CP_ME_RAM_DATAL,
935 			       be32_to_cpup(&fw_data[i + 1]));
936 		}
937 	}
938 }
939 
940 int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
941 {
942 	unsigned rb_bufsz;
943 	unsigned rb_blksz;
944 	unsigned max_fetch;
945 	unsigned pre_write_timer;
946 	unsigned pre_write_limit;
947 	unsigned indirect2_start;
948 	unsigned indirect1_start;
949 	uint32_t tmp;
950 	int r;
951 
952 	if (r100_debugfs_cp_init(rdev)) {
953 		DRM_ERROR("Failed to register debugfs file for CP !\n");
954 	}
955 	if (!rdev->me_fw) {
956 		r = r100_cp_init_microcode(rdev);
957 		if (r) {
958 			DRM_ERROR("Failed to load firmware!\n");
959 			return r;
960 		}
961 	}
962 
963 	/* Align ring size */
964 	rb_bufsz = drm_order(ring_size / 8);
965 	ring_size = (1 << (rb_bufsz + 1)) * 4;
966 	r100_cp_load_microcode(rdev);
967 	r = radeon_ring_init(rdev, ring_size);
968 	if (r) {
969 		return r;
970 	}
971 	/* Each time the cp read 1024 bytes (16 dword/quadword) update
972 	 * the rptr copy in system ram */
973 	rb_blksz = 9;
974 	/* cp will read 128bytes at a time (4 dwords) */
975 	max_fetch = 1;
976 	rdev->cp.align_mask = 16 - 1;
977 	/* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
978 	pre_write_timer = 64;
979 	/* Force CP_RB_WPTR write if written more than one time before the
980 	 * delay expire
981 	 */
982 	pre_write_limit = 0;
983 	/* Setup the cp cache like this (cache size is 96 dwords) :
984 	 *	RING		0  to 15
985 	 *	INDIRECT1	16 to 79
986 	 *	INDIRECT2	80 to 95
987 	 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
988 	 *    indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
989 	 *    indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
990 	 * Idea being that most of the gpu cmd will be through indirect1 buffer
991 	 * so it gets the bigger cache.
992 	 */
993 	indirect2_start = 80;
994 	indirect1_start = 16;
995 	/* cp setup */
996 	WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
997 	tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
998 	       REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
999 	       REG_SET(RADEON_MAX_FETCH, max_fetch) |
1000 	       RADEON_RB_NO_UPDATE);
1001 #ifdef __BIG_ENDIAN
1002 	tmp |= RADEON_BUF_SWAP_32BIT;
1003 #endif
1004 	WREG32(RADEON_CP_RB_CNTL, tmp);
1005 
1006 	/* Set ring address */
1007 	DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
1008 	WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
1009 	/* Force read & write ptr to 0 */
1010 	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
1011 	WREG32(RADEON_CP_RB_RPTR_WR, 0);
1012 	WREG32(RADEON_CP_RB_WPTR, 0);
1013 	WREG32(RADEON_CP_RB_CNTL, tmp);
1014 	udelay(10);
1015 	rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
1016 	rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
1017 	/* protect against crazy HW on resume */
1018 	rdev->cp.wptr &= rdev->cp.ptr_mask;
1019 	/* Set cp mode to bus mastering & enable cp*/
1020 	WREG32(RADEON_CP_CSQ_MODE,
1021 	       REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
1022 	       REG_SET(RADEON_INDIRECT1_START, indirect1_start));
1023 	WREG32(0x718, 0);
1024 	WREG32(0x744, 0x00004D4D);
1025 	WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
1026 	radeon_ring_start(rdev);
1027 	r = radeon_ring_test(rdev);
1028 	if (r) {
1029 		DRM_ERROR("radeon: cp isn't working (%d).\n", r);
1030 		return r;
1031 	}
1032 	rdev->cp.ready = true;
1033 	rdev->mc.active_vram_size = rdev->mc.real_vram_size;
1034 	return 0;
1035 }
1036 
1037 void r100_cp_fini(struct radeon_device *rdev)
1038 {
1039 	if (r100_cp_wait_for_idle(rdev)) {
1040 		DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n");
1041 	}
1042 	/* Disable ring */
1043 	r100_cp_disable(rdev);
1044 	radeon_ring_fini(rdev);
1045 	DRM_INFO("radeon: cp finalized\n");
1046 }
1047 
1048 void r100_cp_disable(struct radeon_device *rdev)
1049 {
1050 	/* Disable ring */
1051 	rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
1052 	rdev->cp.ready = false;
1053 	WREG32(RADEON_CP_CSQ_MODE, 0);
1054 	WREG32(RADEON_CP_CSQ_CNTL, 0);
1055 	if (r100_gui_wait_for_idle(rdev)) {
1056 		printk(KERN_WARNING "Failed to wait GUI idle while "
1057 		       "programming pipes. Bad things might happen.\n");
1058 	}
1059 }
1060 
1061 void r100_cp_commit(struct radeon_device *rdev)
1062 {
1063 	WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
1064 	(void)RREG32(RADEON_CP_RB_WPTR);
1065 }
1066 
1067 
1068 /*
1069  * CS functions
1070  */
1071 int r100_cs_parse_packet0(struct radeon_cs_parser *p,
1072 			  struct radeon_cs_packet *pkt,
1073 			  const unsigned *auth, unsigned n,
1074 			  radeon_packet0_check_t check)
1075 {
1076 	unsigned reg;
1077 	unsigned i, j, m;
1078 	unsigned idx;
1079 	int r;
1080 
1081 	idx = pkt->idx + 1;
1082 	reg = pkt->reg;
1083 	/* Check that register fall into register range
1084 	 * determined by the number of entry (n) in the
1085 	 * safe register bitmap.
1086 	 */
1087 	if (pkt->one_reg_wr) {
1088 		if ((reg >> 7) > n) {
1089 			return -EINVAL;
1090 		}
1091 	} else {
1092 		if (((reg + (pkt->count << 2)) >> 7) > n) {
1093 			return -EINVAL;
1094 		}
1095 	}
1096 	for (i = 0; i <= pkt->count; i++, idx++) {
1097 		j = (reg >> 7);
1098 		m = 1 << ((reg >> 2) & 31);
1099 		if (auth[j] & m) {
1100 			r = check(p, pkt, idx, reg);
1101 			if (r) {
1102 				return r;
1103 			}
1104 		}
1105 		if (pkt->one_reg_wr) {
1106 			if (!(auth[j] & m)) {
1107 				break;
1108 			}
1109 		} else {
1110 			reg += 4;
1111 		}
1112 	}
1113 	return 0;
1114 }
1115 
1116 void r100_cs_dump_packet(struct radeon_cs_parser *p,
1117 			 struct radeon_cs_packet *pkt)
1118 {
1119 	volatile uint32_t *ib;
1120 	unsigned i;
1121 	unsigned idx;
1122 
1123 	ib = p->ib->ptr;
1124 	idx = pkt->idx;
1125 	for (i = 0; i <= (pkt->count + 1); i++, idx++) {
1126 		DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
1127 	}
1128 }
1129 
1130 /**
1131  * r100_cs_packet_parse() - parse cp packet and point ib index to next packet
1132  * @parser:	parser structure holding parsing context.
1133  * @pkt:	where to store packet informations
1134  *
1135  * Assume that chunk_ib_index is properly set. Will return -EINVAL
1136  * if packet is bigger than remaining ib size. or if packets is unknown.
1137  **/
1138 int r100_cs_packet_parse(struct radeon_cs_parser *p,
1139 			 struct radeon_cs_packet *pkt,
1140 			 unsigned idx)
1141 {
1142 	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
1143 	uint32_t header;
1144 
1145 	if (idx >= ib_chunk->length_dw) {
1146 		DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
1147 			  idx, ib_chunk->length_dw);
1148 		return -EINVAL;
1149 	}
1150 	header = radeon_get_ib_value(p, idx);
1151 	pkt->idx = idx;
1152 	pkt->type = CP_PACKET_GET_TYPE(header);
1153 	pkt->count = CP_PACKET_GET_COUNT(header);
1154 	switch (pkt->type) {
1155 	case PACKET_TYPE0:
1156 		pkt->reg = CP_PACKET0_GET_REG(header);
1157 		pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
1158 		break;
1159 	case PACKET_TYPE3:
1160 		pkt->opcode = CP_PACKET3_GET_OPCODE(header);
1161 		break;
1162 	case PACKET_TYPE2:
1163 		pkt->count = -1;
1164 		break;
1165 	default:
1166 		DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
1167 		return -EINVAL;
1168 	}
1169 	if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
1170 		DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
1171 			  pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
1172 		return -EINVAL;
1173 	}
1174 	return 0;
1175 }
1176 
1177 /**
1178  * r100_cs_packet_next_vline() - parse userspace VLINE packet
1179  * @parser:		parser structure holding parsing context.
1180  *
1181  * Userspace sends a special sequence for VLINE waits.
1182  * PACKET0 - VLINE_START_END + value
1183  * PACKET0 - WAIT_UNTIL +_value
1184  * RELOC (P3) - crtc_id in reloc.
1185  *
1186  * This function parses this and relocates the VLINE START END
1187  * and WAIT UNTIL packets to the correct crtc.
1188  * It also detects a switched off crtc and nulls out the
1189  * wait in that case.
1190  */
1191 int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
1192 {
1193 	struct drm_mode_object *obj;
1194 	struct drm_crtc *crtc;
1195 	struct radeon_crtc *radeon_crtc;
1196 	struct radeon_cs_packet p3reloc, waitreloc;
1197 	int crtc_id;
1198 	int r;
1199 	uint32_t header, h_idx, reg;
1200 	volatile uint32_t *ib;
1201 
1202 	ib = p->ib->ptr;
1203 
1204 	/* parse the wait until */
1205 	r = r100_cs_packet_parse(p, &waitreloc, p->idx);
1206 	if (r)
1207 		return r;
1208 
1209 	/* check its a wait until and only 1 count */
1210 	if (waitreloc.reg != RADEON_WAIT_UNTIL ||
1211 	    waitreloc.count != 0) {
1212 		DRM_ERROR("vline wait had illegal wait until segment\n");
1213 		r = -EINVAL;
1214 		return r;
1215 	}
1216 
1217 	if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
1218 		DRM_ERROR("vline wait had illegal wait until\n");
1219 		r = -EINVAL;
1220 		return r;
1221 	}
1222 
1223 	/* jump over the NOP */
1224 	r = r100_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
1225 	if (r)
1226 		return r;
1227 
1228 	h_idx = p->idx - 2;
1229 	p->idx += waitreloc.count + 2;
1230 	p->idx += p3reloc.count + 2;
1231 
1232 	header = radeon_get_ib_value(p, h_idx);
1233 	crtc_id = radeon_get_ib_value(p, h_idx + 5);
1234 	reg = CP_PACKET0_GET_REG(header);
1235 	obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
1236 	if (!obj) {
1237 		DRM_ERROR("cannot find crtc %d\n", crtc_id);
1238 		r = -EINVAL;
1239 		goto out;
1240 	}
1241 	crtc = obj_to_crtc(obj);
1242 	radeon_crtc = to_radeon_crtc(crtc);
1243 	crtc_id = radeon_crtc->crtc_id;
1244 
1245 	if (!crtc->enabled) {
1246 		/* if the CRTC isn't enabled - we need to nop out the wait until */
1247 		ib[h_idx + 2] = PACKET2(0);
1248 		ib[h_idx + 3] = PACKET2(0);
1249 	} else if (crtc_id == 1) {
1250 		switch (reg) {
1251 		case AVIVO_D1MODE_VLINE_START_END:
1252 			header &= ~R300_CP_PACKET0_REG_MASK;
1253 			header |= AVIVO_D2MODE_VLINE_START_END >> 2;
1254 			break;
1255 		case RADEON_CRTC_GUI_TRIG_VLINE:
1256 			header &= ~R300_CP_PACKET0_REG_MASK;
1257 			header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
1258 			break;
1259 		default:
1260 			DRM_ERROR("unknown crtc reloc\n");
1261 			r = -EINVAL;
1262 			goto out;
1263 		}
1264 		ib[h_idx] = header;
1265 		ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
1266 	}
1267 out:
1268 	return r;
1269 }
1270 
1271 /**
1272  * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
1273  * @parser:		parser structure holding parsing context.
1274  * @data:		pointer to relocation data
1275  * @offset_start:	starting offset
1276  * @offset_mask:	offset mask (to align start offset on)
1277  * @reloc:		reloc informations
1278  *
1279  * Check next packet is relocation packet3, do bo validation and compute
1280  * GPU offset using the provided start.
1281  **/
1282 int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
1283 			      struct radeon_cs_reloc **cs_reloc)
1284 {
1285 	struct radeon_cs_chunk *relocs_chunk;
1286 	struct radeon_cs_packet p3reloc;
1287 	unsigned idx;
1288 	int r;
1289 
1290 	if (p->chunk_relocs_idx == -1) {
1291 		DRM_ERROR("No relocation chunk !\n");
1292 		return -EINVAL;
1293 	}
1294 	*cs_reloc = NULL;
1295 	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
1296 	r = r100_cs_packet_parse(p, &p3reloc, p->idx);
1297 	if (r) {
1298 		return r;
1299 	}
1300 	p->idx += p3reloc.count + 2;
1301 	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
1302 		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
1303 			  p3reloc.idx);
1304 		r100_cs_dump_packet(p, &p3reloc);
1305 		return -EINVAL;
1306 	}
1307 	idx = radeon_get_ib_value(p, p3reloc.idx + 1);
1308 	if (idx >= relocs_chunk->length_dw) {
1309 		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
1310 			  idx, relocs_chunk->length_dw);
1311 		r100_cs_dump_packet(p, &p3reloc);
1312 		return -EINVAL;
1313 	}
1314 	/* FIXME: we assume reloc size is 4 dwords */
1315 	*cs_reloc = p->relocs_ptr[(idx / 4)];
1316 	return 0;
1317 }
1318 
1319 static int r100_get_vtx_size(uint32_t vtx_fmt)
1320 {
1321 	int vtx_size;
1322 	vtx_size = 2;
1323 	/* ordered according to bits in spec */
1324 	if (vtx_fmt & RADEON_SE_VTX_FMT_W0)
1325 		vtx_size++;
1326 	if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR)
1327 		vtx_size += 3;
1328 	if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA)
1329 		vtx_size++;
1330 	if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR)
1331 		vtx_size++;
1332 	if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC)
1333 		vtx_size += 3;
1334 	if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG)
1335 		vtx_size++;
1336 	if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC)
1337 		vtx_size++;
1338 	if (vtx_fmt & RADEON_SE_VTX_FMT_ST0)
1339 		vtx_size += 2;
1340 	if (vtx_fmt & RADEON_SE_VTX_FMT_ST1)
1341 		vtx_size += 2;
1342 	if (vtx_fmt & RADEON_SE_VTX_FMT_Q1)
1343 		vtx_size++;
1344 	if (vtx_fmt & RADEON_SE_VTX_FMT_ST2)
1345 		vtx_size += 2;
1346 	if (vtx_fmt & RADEON_SE_VTX_FMT_Q2)
1347 		vtx_size++;
1348 	if (vtx_fmt & RADEON_SE_VTX_FMT_ST3)
1349 		vtx_size += 2;
1350 	if (vtx_fmt & RADEON_SE_VTX_FMT_Q3)
1351 		vtx_size++;
1352 	if (vtx_fmt & RADEON_SE_VTX_FMT_Q0)
1353 		vtx_size++;
1354 	/* blend weight */
1355 	if (vtx_fmt & (0x7 << 15))
1356 		vtx_size += (vtx_fmt >> 15) & 0x7;
1357 	if (vtx_fmt & RADEON_SE_VTX_FMT_N0)
1358 		vtx_size += 3;
1359 	if (vtx_fmt & RADEON_SE_VTX_FMT_XY1)
1360 		vtx_size += 2;
1361 	if (vtx_fmt & RADEON_SE_VTX_FMT_Z1)
1362 		vtx_size++;
1363 	if (vtx_fmt & RADEON_SE_VTX_FMT_W1)
1364 		vtx_size++;
1365 	if (vtx_fmt & RADEON_SE_VTX_FMT_N1)
1366 		vtx_size++;
1367 	if (vtx_fmt & RADEON_SE_VTX_FMT_Z)
1368 		vtx_size++;
1369 	return vtx_size;
1370 }
1371 
1372 static int r100_packet0_check(struct radeon_cs_parser *p,
1373 			      struct radeon_cs_packet *pkt,
1374 			      unsigned idx, unsigned reg)
1375 {
1376 	struct radeon_cs_reloc *reloc;
1377 	struct r100_cs_track *track;
1378 	volatile uint32_t *ib;
1379 	uint32_t tmp;
1380 	int r;
1381 	int i, face;
1382 	u32 tile_flags = 0;
1383 	u32 idx_value;
1384 
1385 	ib = p->ib->ptr;
1386 	track = (struct r100_cs_track *)p->track;
1387 
1388 	idx_value = radeon_get_ib_value(p, idx);
1389 
1390 	switch (reg) {
1391 	case RADEON_CRTC_GUI_TRIG_VLINE:
1392 		r = r100_cs_packet_parse_vline(p);
1393 		if (r) {
1394 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1395 				  idx, reg);
1396 			r100_cs_dump_packet(p, pkt);
1397 			return r;
1398 		}
1399 		break;
1400 		/* FIXME: only allow PACKET3 blit? easier to check for out of
1401 		 * range access */
1402 	case RADEON_DST_PITCH_OFFSET:
1403 	case RADEON_SRC_PITCH_OFFSET:
1404 		r = r100_reloc_pitch_offset(p, pkt, idx, reg);
1405 		if (r)
1406 			return r;
1407 		break;
1408 	case RADEON_RB3D_DEPTHOFFSET:
1409 		r = r100_cs_packet_next_reloc(p, &reloc);
1410 		if (r) {
1411 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1412 				  idx, reg);
1413 			r100_cs_dump_packet(p, pkt);
1414 			return r;
1415 		}
1416 		track->zb.robj = reloc->robj;
1417 		track->zb.offset = idx_value;
1418 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1419 		break;
1420 	case RADEON_RB3D_COLOROFFSET:
1421 		r = r100_cs_packet_next_reloc(p, &reloc);
1422 		if (r) {
1423 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1424 				  idx, reg);
1425 			r100_cs_dump_packet(p, pkt);
1426 			return r;
1427 		}
1428 		track->cb[0].robj = reloc->robj;
1429 		track->cb[0].offset = idx_value;
1430 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1431 		break;
1432 	case RADEON_PP_TXOFFSET_0:
1433 	case RADEON_PP_TXOFFSET_1:
1434 	case RADEON_PP_TXOFFSET_2:
1435 		i = (reg - RADEON_PP_TXOFFSET_0) / 24;
1436 		r = r100_cs_packet_next_reloc(p, &reloc);
1437 		if (r) {
1438 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1439 				  idx, reg);
1440 			r100_cs_dump_packet(p, pkt);
1441 			return r;
1442 		}
1443 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1444 		track->textures[i].robj = reloc->robj;
1445 		break;
1446 	case RADEON_PP_CUBIC_OFFSET_T0_0:
1447 	case RADEON_PP_CUBIC_OFFSET_T0_1:
1448 	case RADEON_PP_CUBIC_OFFSET_T0_2:
1449 	case RADEON_PP_CUBIC_OFFSET_T0_3:
1450 	case RADEON_PP_CUBIC_OFFSET_T0_4:
1451 		i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
1452 		r = r100_cs_packet_next_reloc(p, &reloc);
1453 		if (r) {
1454 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1455 				  idx, reg);
1456 			r100_cs_dump_packet(p, pkt);
1457 			return r;
1458 		}
1459 		track->textures[0].cube_info[i].offset = idx_value;
1460 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1461 		track->textures[0].cube_info[i].robj = reloc->robj;
1462 		break;
1463 	case RADEON_PP_CUBIC_OFFSET_T1_0:
1464 	case RADEON_PP_CUBIC_OFFSET_T1_1:
1465 	case RADEON_PP_CUBIC_OFFSET_T1_2:
1466 	case RADEON_PP_CUBIC_OFFSET_T1_3:
1467 	case RADEON_PP_CUBIC_OFFSET_T1_4:
1468 		i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
1469 		r = r100_cs_packet_next_reloc(p, &reloc);
1470 		if (r) {
1471 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1472 				  idx, reg);
1473 			r100_cs_dump_packet(p, pkt);
1474 			return r;
1475 		}
1476 		track->textures[1].cube_info[i].offset = idx_value;
1477 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1478 		track->textures[1].cube_info[i].robj = reloc->robj;
1479 		break;
1480 	case RADEON_PP_CUBIC_OFFSET_T2_0:
1481 	case RADEON_PP_CUBIC_OFFSET_T2_1:
1482 	case RADEON_PP_CUBIC_OFFSET_T2_2:
1483 	case RADEON_PP_CUBIC_OFFSET_T2_3:
1484 	case RADEON_PP_CUBIC_OFFSET_T2_4:
1485 		i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
1486 		r = r100_cs_packet_next_reloc(p, &reloc);
1487 		if (r) {
1488 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1489 				  idx, reg);
1490 			r100_cs_dump_packet(p, pkt);
1491 			return r;
1492 		}
1493 		track->textures[2].cube_info[i].offset = idx_value;
1494 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1495 		track->textures[2].cube_info[i].robj = reloc->robj;
1496 		break;
1497 	case RADEON_RE_WIDTH_HEIGHT:
1498 		track->maxy = ((idx_value >> 16) & 0x7FF);
1499 		break;
1500 	case RADEON_RB3D_COLORPITCH:
1501 		r = r100_cs_packet_next_reloc(p, &reloc);
1502 		if (r) {
1503 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1504 				  idx, reg);
1505 			r100_cs_dump_packet(p, pkt);
1506 			return r;
1507 		}
1508 
1509 		if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1510 			tile_flags |= RADEON_COLOR_TILE_ENABLE;
1511 		if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1512 			tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
1513 
1514 		tmp = idx_value & ~(0x7 << 16);
1515 		tmp |= tile_flags;
1516 		ib[idx] = tmp;
1517 
1518 		track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
1519 		break;
1520 	case RADEON_RB3D_DEPTHPITCH:
1521 		track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
1522 		break;
1523 	case RADEON_RB3D_CNTL:
1524 		switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
1525 		case 7:
1526 		case 8:
1527 		case 9:
1528 		case 11:
1529 		case 12:
1530 			track->cb[0].cpp = 1;
1531 			break;
1532 		case 3:
1533 		case 4:
1534 		case 15:
1535 			track->cb[0].cpp = 2;
1536 			break;
1537 		case 6:
1538 			track->cb[0].cpp = 4;
1539 			break;
1540 		default:
1541 			DRM_ERROR("Invalid color buffer format (%d) !\n",
1542 				  ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
1543 			return -EINVAL;
1544 		}
1545 		track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
1546 		break;
1547 	case RADEON_RB3D_ZSTENCILCNTL:
1548 		switch (idx_value & 0xf) {
1549 		case 0:
1550 			track->zb.cpp = 2;
1551 			break;
1552 		case 2:
1553 		case 3:
1554 		case 4:
1555 		case 5:
1556 		case 9:
1557 		case 11:
1558 			track->zb.cpp = 4;
1559 			break;
1560 		default:
1561 			break;
1562 		}
1563 		break;
1564 	case RADEON_RB3D_ZPASS_ADDR:
1565 		r = r100_cs_packet_next_reloc(p, &reloc);
1566 		if (r) {
1567 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1568 				  idx, reg);
1569 			r100_cs_dump_packet(p, pkt);
1570 			return r;
1571 		}
1572 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1573 		break;
1574 	case RADEON_PP_CNTL:
1575 		{
1576 			uint32_t temp = idx_value >> 4;
1577 			for (i = 0; i < track->num_texture; i++)
1578 				track->textures[i].enabled = !!(temp & (1 << i));
1579 		}
1580 		break;
1581 	case RADEON_SE_VF_CNTL:
1582 		track->vap_vf_cntl = idx_value;
1583 		break;
1584 	case RADEON_SE_VTX_FMT:
1585 		track->vtx_size = r100_get_vtx_size(idx_value);
1586 		break;
1587 	case RADEON_PP_TEX_SIZE_0:
1588 	case RADEON_PP_TEX_SIZE_1:
1589 	case RADEON_PP_TEX_SIZE_2:
1590 		i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
1591 		track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
1592 		track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
1593 		break;
1594 	case RADEON_PP_TEX_PITCH_0:
1595 	case RADEON_PP_TEX_PITCH_1:
1596 	case RADEON_PP_TEX_PITCH_2:
1597 		i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
1598 		track->textures[i].pitch = idx_value + 32;
1599 		break;
1600 	case RADEON_PP_TXFILTER_0:
1601 	case RADEON_PP_TXFILTER_1:
1602 	case RADEON_PP_TXFILTER_2:
1603 		i = (reg - RADEON_PP_TXFILTER_0) / 24;
1604 		track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK)
1605 						 >> RADEON_MAX_MIP_LEVEL_SHIFT);
1606 		tmp = (idx_value >> 23) & 0x7;
1607 		if (tmp == 2 || tmp == 6)
1608 			track->textures[i].roundup_w = false;
1609 		tmp = (idx_value >> 27) & 0x7;
1610 		if (tmp == 2 || tmp == 6)
1611 			track->textures[i].roundup_h = false;
1612 		break;
1613 	case RADEON_PP_TXFORMAT_0:
1614 	case RADEON_PP_TXFORMAT_1:
1615 	case RADEON_PP_TXFORMAT_2:
1616 		i = (reg - RADEON_PP_TXFORMAT_0) / 24;
1617 		if (idx_value & RADEON_TXFORMAT_NON_POWER2) {
1618 			track->textures[i].use_pitch = 1;
1619 		} else {
1620 			track->textures[i].use_pitch = 0;
1621 			track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
1622 			track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
1623 		}
1624 		if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
1625 			track->textures[i].tex_coord_type = 2;
1626 		switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
1627 		case RADEON_TXFORMAT_I8:
1628 		case RADEON_TXFORMAT_RGB332:
1629 		case RADEON_TXFORMAT_Y8:
1630 			track->textures[i].cpp = 1;
1631 			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
1632 			break;
1633 		case RADEON_TXFORMAT_AI88:
1634 		case RADEON_TXFORMAT_ARGB1555:
1635 		case RADEON_TXFORMAT_RGB565:
1636 		case RADEON_TXFORMAT_ARGB4444:
1637 		case RADEON_TXFORMAT_VYUY422:
1638 		case RADEON_TXFORMAT_YVYU422:
1639 		case RADEON_TXFORMAT_SHADOW16:
1640 		case RADEON_TXFORMAT_LDUDV655:
1641 		case RADEON_TXFORMAT_DUDV88:
1642 			track->textures[i].cpp = 2;
1643 			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
1644 			break;
1645 		case RADEON_TXFORMAT_ARGB8888:
1646 		case RADEON_TXFORMAT_RGBA8888:
1647 		case RADEON_TXFORMAT_SHADOW32:
1648 		case RADEON_TXFORMAT_LDUDUV8888:
1649 			track->textures[i].cpp = 4;
1650 			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
1651 			break;
1652 		case RADEON_TXFORMAT_DXT1:
1653 			track->textures[i].cpp = 1;
1654 			track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
1655 			break;
1656 		case RADEON_TXFORMAT_DXT23:
1657 		case RADEON_TXFORMAT_DXT45:
1658 			track->textures[i].cpp = 1;
1659 			track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
1660 			break;
1661 		}
1662 		track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
1663 		track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
1664 		break;
1665 	case RADEON_PP_CUBIC_FACES_0:
1666 	case RADEON_PP_CUBIC_FACES_1:
1667 	case RADEON_PP_CUBIC_FACES_2:
1668 		tmp = idx_value;
1669 		i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
1670 		for (face = 0; face < 4; face++) {
1671 			track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
1672 			track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
1673 		}
1674 		break;
1675 	default:
1676 		printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1677 		       reg, idx);
1678 		return -EINVAL;
1679 	}
1680 	return 0;
1681 }
1682 
1683 int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1684 					 struct radeon_cs_packet *pkt,
1685 					 struct radeon_bo *robj)
1686 {
1687 	unsigned idx;
1688 	u32 value;
1689 	idx = pkt->idx + 1;
1690 	value = radeon_get_ib_value(p, idx + 2);
1691 	if ((value + 1) > radeon_bo_size(robj)) {
1692 		DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
1693 			  "(need %u have %lu) !\n",
1694 			  value + 1,
1695 			  radeon_bo_size(robj));
1696 		return -EINVAL;
1697 	}
1698 	return 0;
1699 }
1700 
1701 static int r100_packet3_check(struct radeon_cs_parser *p,
1702 			      struct radeon_cs_packet *pkt)
1703 {
1704 	struct radeon_cs_reloc *reloc;
1705 	struct r100_cs_track *track;
1706 	unsigned idx;
1707 	volatile uint32_t *ib;
1708 	int r;
1709 
1710 	ib = p->ib->ptr;
1711 	idx = pkt->idx + 1;
1712 	track = (struct r100_cs_track *)p->track;
1713 	switch (pkt->opcode) {
1714 	case PACKET3_3D_LOAD_VBPNTR:
1715 		r = r100_packet3_load_vbpntr(p, pkt, idx);
1716 		if (r)
1717 			return r;
1718 		break;
1719 	case PACKET3_INDX_BUFFER:
1720 		r = r100_cs_packet_next_reloc(p, &reloc);
1721 		if (r) {
1722 			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1723 			r100_cs_dump_packet(p, pkt);
1724 			return r;
1725 		}
1726 		ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
1727 		r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1728 		if (r) {
1729 			return r;
1730 		}
1731 		break;
1732 	case 0x23:
1733 		/* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
1734 		r = r100_cs_packet_next_reloc(p, &reloc);
1735 		if (r) {
1736 			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1737 			r100_cs_dump_packet(p, pkt);
1738 			return r;
1739 		}
1740 		ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
1741 		track->num_arrays = 1;
1742 		track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
1743 
1744 		track->arrays[0].robj = reloc->robj;
1745 		track->arrays[0].esize = track->vtx_size;
1746 
1747 		track->max_indx = radeon_get_ib_value(p, idx+1);
1748 
1749 		track->vap_vf_cntl = radeon_get_ib_value(p, idx+3);
1750 		track->immd_dwords = pkt->count - 1;
1751 		r = r100_cs_track_check(p->rdev, track);
1752 		if (r)
1753 			return r;
1754 		break;
1755 	case PACKET3_3D_DRAW_IMMD:
1756 		if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
1757 			DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1758 			return -EINVAL;
1759 		}
1760 		track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
1761 		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1762 		track->immd_dwords = pkt->count - 1;
1763 		r = r100_cs_track_check(p->rdev, track);
1764 		if (r)
1765 			return r;
1766 		break;
1767 		/* triggers drawing using in-packet vertex data */
1768 	case PACKET3_3D_DRAW_IMMD_2:
1769 		if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
1770 			DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1771 			return -EINVAL;
1772 		}
1773 		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1774 		track->immd_dwords = pkt->count;
1775 		r = r100_cs_track_check(p->rdev, track);
1776 		if (r)
1777 			return r;
1778 		break;
1779 		/* triggers drawing using in-packet vertex data */
1780 	case PACKET3_3D_DRAW_VBUF_2:
1781 		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1782 		r = r100_cs_track_check(p->rdev, track);
1783 		if (r)
1784 			return r;
1785 		break;
1786 		/* triggers drawing of vertex buffers setup elsewhere */
1787 	case PACKET3_3D_DRAW_INDX_2:
1788 		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1789 		r = r100_cs_track_check(p->rdev, track);
1790 		if (r)
1791 			return r;
1792 		break;
1793 		/* triggers drawing using indices to vertex buffer */
1794 	case PACKET3_3D_DRAW_VBUF:
1795 		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1796 		r = r100_cs_track_check(p->rdev, track);
1797 		if (r)
1798 			return r;
1799 		break;
1800 		/* triggers drawing of vertex buffers setup elsewhere */
1801 	case PACKET3_3D_DRAW_INDX:
1802 		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1803 		r = r100_cs_track_check(p->rdev, track);
1804 		if (r)
1805 			return r;
1806 		break;
1807 		/* triggers drawing using indices to vertex buffer */
1808 	case PACKET3_3D_CLEAR_HIZ:
1809 	case PACKET3_3D_CLEAR_ZMASK:
1810 		if (p->rdev->hyperz_filp != p->filp)
1811 			return -EINVAL;
1812 		break;
1813 	case PACKET3_NOP:
1814 		break;
1815 	default:
1816 		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1817 		return -EINVAL;
1818 	}
1819 	return 0;
1820 }
1821 
1822 int r100_cs_parse(struct radeon_cs_parser *p)
1823 {
1824 	struct radeon_cs_packet pkt;
1825 	struct r100_cs_track *track;
1826 	int r;
1827 
1828 	track = kzalloc(sizeof(*track), GFP_KERNEL);
1829 	r100_cs_track_clear(p->rdev, track);
1830 	p->track = track;
1831 	do {
1832 		r = r100_cs_packet_parse(p, &pkt, p->idx);
1833 		if (r) {
1834 			return r;
1835 		}
1836 		p->idx += pkt.count + 2;
1837 		switch (pkt.type) {
1838 			case PACKET_TYPE0:
1839 				if (p->rdev->family >= CHIP_R200)
1840 					r = r100_cs_parse_packet0(p, &pkt,
1841 								  p->rdev->config.r100.reg_safe_bm,
1842 								  p->rdev->config.r100.reg_safe_bm_size,
1843 								  &r200_packet0_check);
1844 				else
1845 					r = r100_cs_parse_packet0(p, &pkt,
1846 								  p->rdev->config.r100.reg_safe_bm,
1847 								  p->rdev->config.r100.reg_safe_bm_size,
1848 								  &r100_packet0_check);
1849 				break;
1850 			case PACKET_TYPE2:
1851 				break;
1852 			case PACKET_TYPE3:
1853 				r = r100_packet3_check(p, &pkt);
1854 				break;
1855 			default:
1856 				DRM_ERROR("Unknown packet type %d !\n",
1857 					  pkt.type);
1858 				return -EINVAL;
1859 		}
1860 		if (r) {
1861 			return r;
1862 		}
1863 	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1864 	return 0;
1865 }
1866 
1867 
1868 /*
1869  * Global GPU functions
1870  */
1871 void r100_errata(struct radeon_device *rdev)
1872 {
1873 	rdev->pll_errata = 0;
1874 
1875 	if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
1876 		rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
1877 	}
1878 
1879 	if (rdev->family == CHIP_RV100 ||
1880 	    rdev->family == CHIP_RS100 ||
1881 	    rdev->family == CHIP_RS200) {
1882 		rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
1883 	}
1884 }
1885 
1886 /* Wait for vertical sync on primary CRTC */
1887 void r100_gpu_wait_for_vsync(struct radeon_device *rdev)
1888 {
1889 	uint32_t crtc_gen_cntl, tmp;
1890 	int i;
1891 
1892 	crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
1893 	if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) ||
1894 	    !(crtc_gen_cntl & RADEON_CRTC_EN)) {
1895 		return;
1896 	}
1897 	/* Clear the CRTC_VBLANK_SAVE bit */
1898 	WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR);
1899 	for (i = 0; i < rdev->usec_timeout; i++) {
1900 		tmp = RREG32(RADEON_CRTC_STATUS);
1901 		if (tmp & RADEON_CRTC_VBLANK_SAVE) {
1902 			return;
1903 		}
1904 		DRM_UDELAY(1);
1905 	}
1906 }
1907 
1908 /* Wait for vertical sync on secondary CRTC */
1909 void r100_gpu_wait_for_vsync2(struct radeon_device *rdev)
1910 {
1911 	uint32_t crtc2_gen_cntl, tmp;
1912 	int i;
1913 
1914 	crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
1915 	if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) ||
1916 	    !(crtc2_gen_cntl & RADEON_CRTC2_EN))
1917 		return;
1918 
1919 	/* Clear the CRTC_VBLANK_SAVE bit */
1920 	WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR);
1921 	for (i = 0; i < rdev->usec_timeout; i++) {
1922 		tmp = RREG32(RADEON_CRTC2_STATUS);
1923 		if (tmp & RADEON_CRTC2_VBLANK_SAVE) {
1924 			return;
1925 		}
1926 		DRM_UDELAY(1);
1927 	}
1928 }
1929 
1930 int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
1931 {
1932 	unsigned i;
1933 	uint32_t tmp;
1934 
1935 	for (i = 0; i < rdev->usec_timeout; i++) {
1936 		tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
1937 		if (tmp >= n) {
1938 			return 0;
1939 		}
1940 		DRM_UDELAY(1);
1941 	}
1942 	return -1;
1943 }
1944 
1945 int r100_gui_wait_for_idle(struct radeon_device *rdev)
1946 {
1947 	unsigned i;
1948 	uint32_t tmp;
1949 
1950 	if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
1951 		printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !"
1952 		       " Bad things might happen.\n");
1953 	}
1954 	for (i = 0; i < rdev->usec_timeout; i++) {
1955 		tmp = RREG32(RADEON_RBBM_STATUS);
1956 		if (!(tmp & RADEON_RBBM_ACTIVE)) {
1957 			return 0;
1958 		}
1959 		DRM_UDELAY(1);
1960 	}
1961 	return -1;
1962 }
1963 
1964 int r100_mc_wait_for_idle(struct radeon_device *rdev)
1965 {
1966 	unsigned i;
1967 	uint32_t tmp;
1968 
1969 	for (i = 0; i < rdev->usec_timeout; i++) {
1970 		/* read MC_STATUS */
1971 		tmp = RREG32(RADEON_MC_STATUS);
1972 		if (tmp & RADEON_MC_IDLE) {
1973 			return 0;
1974 		}
1975 		DRM_UDELAY(1);
1976 	}
1977 	return -1;
1978 }
1979 
1980 void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
1981 {
1982 	lockup->last_cp_rptr = cp->rptr;
1983 	lockup->last_jiffies = jiffies;
1984 }
1985 
1986 /**
1987  * r100_gpu_cp_is_lockup() - check if CP is lockup by recording information
1988  * @rdev:	radeon device structure
1989  * @lockup:	r100_gpu_lockup structure holding CP lockup tracking informations
1990  * @cp:		radeon_cp structure holding CP information
1991  *
1992  * We don't need to initialize the lockup tracking information as we will either
1993  * have CP rptr to a different value of jiffies wrap around which will force
1994  * initialization of the lockup tracking informations.
1995  *
1996  * A possible false positivie is if we get call after while and last_cp_rptr ==
1997  * the current CP rptr, even if it's unlikely it might happen. To avoid this
1998  * if the elapsed time since last call is bigger than 2 second than we return
1999  * false and update the tracking information. Due to this the caller must call
2000  * r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported
2001  * the fencing code should be cautious about that.
2002  *
2003  * Caller should write to the ring to force CP to do something so we don't get
2004  * false positive when CP is just gived nothing to do.
2005  *
2006  **/
2007 bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
2008 {
2009 	unsigned long cjiffies, elapsed;
2010 
2011 	cjiffies = jiffies;
2012 	if (!time_after(cjiffies, lockup->last_jiffies)) {
2013 		/* likely a wrap around */
2014 		lockup->last_cp_rptr = cp->rptr;
2015 		lockup->last_jiffies = jiffies;
2016 		return false;
2017 	}
2018 	if (cp->rptr != lockup->last_cp_rptr) {
2019 		/* CP is still working no lockup */
2020 		lockup->last_cp_rptr = cp->rptr;
2021 		lockup->last_jiffies = jiffies;
2022 		return false;
2023 	}
2024 	elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
2025 	if (elapsed >= 10000) {
2026 		dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
2027 		return true;
2028 	}
2029 	/* give a chance to the GPU ... */
2030 	return false;
2031 }
2032 
2033 bool r100_gpu_is_lockup(struct radeon_device *rdev)
2034 {
2035 	u32 rbbm_status;
2036 	int r;
2037 
2038 	rbbm_status = RREG32(R_000E40_RBBM_STATUS);
2039 	if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
2040 		r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp);
2041 		return false;
2042 	}
2043 	/* force CP activities */
2044 	r = radeon_ring_lock(rdev, 2);
2045 	if (!r) {
2046 		/* PACKET2 NOP */
2047 		radeon_ring_write(rdev, 0x80000000);
2048 		radeon_ring_write(rdev, 0x80000000);
2049 		radeon_ring_unlock_commit(rdev);
2050 	}
2051 	rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
2052 	return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp);
2053 }
2054 
2055 void r100_bm_disable(struct radeon_device *rdev)
2056 {
2057 	u32 tmp;
2058 
2059 	/* disable bus mastering */
2060 	tmp = RREG32(R_000030_BUS_CNTL);
2061 	WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
2062 	mdelay(1);
2063 	WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
2064 	mdelay(1);
2065 	WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
2066 	tmp = RREG32(RADEON_BUS_CNTL);
2067 	mdelay(1);
2068 	pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
2069 	pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
2070 	mdelay(1);
2071 }
2072 
2073 int r100_asic_reset(struct radeon_device *rdev)
2074 {
2075 	struct r100_mc_save save;
2076 	u32 status, tmp;
2077 
2078 	r100_mc_stop(rdev, &save);
2079 	status = RREG32(R_000E40_RBBM_STATUS);
2080 	if (!G_000E40_GUI_ACTIVE(status)) {
2081 		return 0;
2082 	}
2083 	status = RREG32(R_000E40_RBBM_STATUS);
2084 	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2085 	/* stop CP */
2086 	WREG32(RADEON_CP_CSQ_CNTL, 0);
2087 	tmp = RREG32(RADEON_CP_RB_CNTL);
2088 	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
2089 	WREG32(RADEON_CP_RB_RPTR_WR, 0);
2090 	WREG32(RADEON_CP_RB_WPTR, 0);
2091 	WREG32(RADEON_CP_RB_CNTL, tmp);
2092 	/* save PCI state */
2093 	pci_save_state(rdev->pdev);
2094 	/* disable bus mastering */
2095 	r100_bm_disable(rdev);
2096 	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
2097 					S_0000F0_SOFT_RESET_RE(1) |
2098 					S_0000F0_SOFT_RESET_PP(1) |
2099 					S_0000F0_SOFT_RESET_RB(1));
2100 	RREG32(R_0000F0_RBBM_SOFT_RESET);
2101 	mdelay(500);
2102 	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
2103 	mdelay(1);
2104 	status = RREG32(R_000E40_RBBM_STATUS);
2105 	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2106 	/* reset CP */
2107 	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
2108 	RREG32(R_0000F0_RBBM_SOFT_RESET);
2109 	mdelay(500);
2110 	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
2111 	mdelay(1);
2112 	status = RREG32(R_000E40_RBBM_STATUS);
2113 	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2114 	/* restore PCI & busmastering */
2115 	pci_restore_state(rdev->pdev);
2116 	r100_enable_bm(rdev);
2117 	/* Check if GPU is idle */
2118 	if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
2119 		G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
2120 		dev_err(rdev->dev, "failed to reset GPU\n");
2121 		rdev->gpu_lockup = true;
2122 		return -1;
2123 	}
2124 	r100_mc_resume(rdev, &save);
2125 	dev_info(rdev->dev, "GPU reset succeed\n");
2126 	return 0;
2127 }
2128 
2129 void r100_set_common_regs(struct radeon_device *rdev)
2130 {
2131 	struct drm_device *dev = rdev->ddev;
2132 	bool force_dac2 = false;
2133 	u32 tmp;
2134 
2135 	/* set these so they don't interfere with anything */
2136 	WREG32(RADEON_OV0_SCALE_CNTL, 0);
2137 	WREG32(RADEON_SUBPIC_CNTL, 0);
2138 	WREG32(RADEON_VIPH_CONTROL, 0);
2139 	WREG32(RADEON_I2C_CNTL_1, 0);
2140 	WREG32(RADEON_DVI_I2C_CNTL_1, 0);
2141 	WREG32(RADEON_CAP0_TRIG_CNTL, 0);
2142 	WREG32(RADEON_CAP1_TRIG_CNTL, 0);
2143 
2144 	/* always set up dac2 on rn50 and some rv100 as lots
2145 	 * of servers seem to wire it up to a VGA port but
2146 	 * don't report it in the bios connector
2147 	 * table.
2148 	 */
2149 	switch (dev->pdev->device) {
2150 		/* RN50 */
2151 	case 0x515e:
2152 	case 0x5969:
2153 		force_dac2 = true;
2154 		break;
2155 		/* RV100*/
2156 	case 0x5159:
2157 	case 0x515a:
2158 		/* DELL triple head servers */
2159 		if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) &&
2160 		    ((dev->pdev->subsystem_device == 0x016c) ||
2161 		     (dev->pdev->subsystem_device == 0x016d) ||
2162 		     (dev->pdev->subsystem_device == 0x016e) ||
2163 		     (dev->pdev->subsystem_device == 0x016f) ||
2164 		     (dev->pdev->subsystem_device == 0x0170) ||
2165 		     (dev->pdev->subsystem_device == 0x017d) ||
2166 		     (dev->pdev->subsystem_device == 0x017e) ||
2167 		     (dev->pdev->subsystem_device == 0x0183) ||
2168 		     (dev->pdev->subsystem_device == 0x018a) ||
2169 		     (dev->pdev->subsystem_device == 0x019a)))
2170 			force_dac2 = true;
2171 		break;
2172 	}
2173 
2174 	if (force_dac2) {
2175 		u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
2176 		u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
2177 		u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2);
2178 
2179 		/* For CRT on DAC2, don't turn it on if BIOS didn't
2180 		   enable it, even it's detected.
2181 		*/
2182 
2183 		/* force it to crtc0 */
2184 		dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
2185 		dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
2186 		disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
2187 
2188 		/* set up the TV DAC */
2189 		tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL |
2190 				 RADEON_TV_DAC_STD_MASK |
2191 				 RADEON_TV_DAC_RDACPD |
2192 				 RADEON_TV_DAC_GDACPD |
2193 				 RADEON_TV_DAC_BDACPD |
2194 				 RADEON_TV_DAC_BGADJ_MASK |
2195 				 RADEON_TV_DAC_DACADJ_MASK);
2196 		tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
2197 				RADEON_TV_DAC_NHOLD |
2198 				RADEON_TV_DAC_STD_PS2 |
2199 				(0x58 << 16));
2200 
2201 		WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
2202 		WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
2203 		WREG32(RADEON_DAC_CNTL2, dac2_cntl);
2204 	}
2205 
2206 	/* switch PM block to ACPI mode */
2207 	tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
2208 	tmp &= ~RADEON_PM_MODE_SEL;
2209 	WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
2210 
2211 }
2212 
2213 /*
2214  * VRAM info
2215  */
2216 static void r100_vram_get_type(struct radeon_device *rdev)
2217 {
2218 	uint32_t tmp;
2219 
2220 	rdev->mc.vram_is_ddr = false;
2221 	if (rdev->flags & RADEON_IS_IGP)
2222 		rdev->mc.vram_is_ddr = true;
2223 	else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
2224 		rdev->mc.vram_is_ddr = true;
2225 	if ((rdev->family == CHIP_RV100) ||
2226 	    (rdev->family == CHIP_RS100) ||
2227 	    (rdev->family == CHIP_RS200)) {
2228 		tmp = RREG32(RADEON_MEM_CNTL);
2229 		if (tmp & RV100_HALF_MODE) {
2230 			rdev->mc.vram_width = 32;
2231 		} else {
2232 			rdev->mc.vram_width = 64;
2233 		}
2234 		if (rdev->flags & RADEON_SINGLE_CRTC) {
2235 			rdev->mc.vram_width /= 4;
2236 			rdev->mc.vram_is_ddr = true;
2237 		}
2238 	} else if (rdev->family <= CHIP_RV280) {
2239 		tmp = RREG32(RADEON_MEM_CNTL);
2240 		if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
2241 			rdev->mc.vram_width = 128;
2242 		} else {
2243 			rdev->mc.vram_width = 64;
2244 		}
2245 	} else {
2246 		/* newer IGPs */
2247 		rdev->mc.vram_width = 128;
2248 	}
2249 }
2250 
2251 static u32 r100_get_accessible_vram(struct radeon_device *rdev)
2252 {
2253 	u32 aper_size;
2254 	u8 byte;
2255 
2256 	aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
2257 
2258 	/* Set HDP_APER_CNTL only on cards that are known not to be broken,
2259 	 * that is has the 2nd generation multifunction PCI interface
2260 	 */
2261 	if (rdev->family == CHIP_RV280 ||
2262 	    rdev->family >= CHIP_RV350) {
2263 		WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL,
2264 		       ~RADEON_HDP_APER_CNTL);
2265 		DRM_INFO("Generation 2 PCI interface, using max accessible memory\n");
2266 		return aper_size * 2;
2267 	}
2268 
2269 	/* Older cards have all sorts of funny issues to deal with. First
2270 	 * check if it's a multifunction card by reading the PCI config
2271 	 * header type... Limit those to one aperture size
2272 	 */
2273 	pci_read_config_byte(rdev->pdev, 0xe, &byte);
2274 	if (byte & 0x80) {
2275 		DRM_INFO("Generation 1 PCI interface in multifunction mode\n");
2276 		DRM_INFO("Limiting VRAM to one aperture\n");
2277 		return aper_size;
2278 	}
2279 
2280 	/* Single function older card. We read HDP_APER_CNTL to see how the BIOS
2281 	 * have set it up. We don't write this as it's broken on some ASICs but
2282 	 * we expect the BIOS to have done the right thing (might be too optimistic...)
2283 	 */
2284 	if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL)
2285 		return aper_size * 2;
2286 	return aper_size;
2287 }
2288 
2289 void r100_vram_init_sizes(struct radeon_device *rdev)
2290 {
2291 	u64 config_aper_size;
2292 
2293 	/* work out accessible VRAM */
2294 	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
2295 	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
2296 	rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev);
2297 	/* FIXME we don't use the second aperture yet when we could use it */
2298 	if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
2299 		rdev->mc.visible_vram_size = rdev->mc.aper_size;
2300 	rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
2301 	config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
2302 	if (rdev->flags & RADEON_IS_IGP) {
2303 		uint32_t tom;
2304 		/* read NB_TOM to get the amount of ram stolen for the GPU */
2305 		tom = RREG32(RADEON_NB_TOM);
2306 		rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
2307 		WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
2308 		rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
2309 	} else {
2310 		rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
2311 		/* Some production boards of m6 will report 0
2312 		 * if it's 8 MB
2313 		 */
2314 		if (rdev->mc.real_vram_size == 0) {
2315 			rdev->mc.real_vram_size = 8192 * 1024;
2316 			WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
2317 		}
2318 		/* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
2319 		 * Novell bug 204882 + along with lots of ubuntu ones
2320 		 */
2321 		if (config_aper_size > rdev->mc.real_vram_size)
2322 			rdev->mc.mc_vram_size = config_aper_size;
2323 		else
2324 			rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
2325 	}
2326 }
2327 
2328 void r100_vga_set_state(struct radeon_device *rdev, bool state)
2329 {
2330 	uint32_t temp;
2331 
2332 	temp = RREG32(RADEON_CONFIG_CNTL);
2333 	if (state == false) {
2334 		temp &= ~(1<<8);
2335 		temp |= (1<<9);
2336 	} else {
2337 		temp &= ~(1<<9);
2338 	}
2339 	WREG32(RADEON_CONFIG_CNTL, temp);
2340 }
2341 
2342 void r100_mc_init(struct radeon_device *rdev)
2343 {
2344 	u64 base;
2345 
2346 	r100_vram_get_type(rdev);
2347 	r100_vram_init_sizes(rdev);
2348 	base = rdev->mc.aper_base;
2349 	if (rdev->flags & RADEON_IS_IGP)
2350 		base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
2351 	radeon_vram_location(rdev, &rdev->mc, base);
2352 	rdev->mc.gtt_base_align = 0;
2353 	if (!(rdev->flags & RADEON_IS_AGP))
2354 		radeon_gtt_location(rdev, &rdev->mc);
2355 	radeon_update_bandwidth_info(rdev);
2356 }
2357 
2358 
2359 /*
2360  * Indirect registers accessor
2361  */
2362 void r100_pll_errata_after_index(struct radeon_device *rdev)
2363 {
2364 	if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) {
2365 		(void)RREG32(RADEON_CLOCK_CNTL_DATA);
2366 		(void)RREG32(RADEON_CRTC_GEN_CNTL);
2367 	}
2368 }
2369 
2370 static void r100_pll_errata_after_data(struct radeon_device *rdev)
2371 {
2372 	/* This workarounds is necessary on RV100, RS100 and RS200 chips
2373 	 * or the chip could hang on a subsequent access
2374 	 */
2375 	if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
2376 		udelay(5000);
2377 	}
2378 
2379 	/* This function is required to workaround a hardware bug in some (all?)
2380 	 * revisions of the R300.  This workaround should be called after every
2381 	 * CLOCK_CNTL_INDEX register access.  If not, register reads afterward
2382 	 * may not be correct.
2383 	 */
2384 	if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
2385 		uint32_t save, tmp;
2386 
2387 		save = RREG32(RADEON_CLOCK_CNTL_INDEX);
2388 		tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
2389 		WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
2390 		tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
2391 		WREG32(RADEON_CLOCK_CNTL_INDEX, save);
2392 	}
2393 }
2394 
2395 uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
2396 {
2397 	uint32_t data;
2398 
2399 	WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
2400 	r100_pll_errata_after_index(rdev);
2401 	data = RREG32(RADEON_CLOCK_CNTL_DATA);
2402 	r100_pll_errata_after_data(rdev);
2403 	return data;
2404 }
2405 
2406 void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
2407 {
2408 	WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
2409 	r100_pll_errata_after_index(rdev);
2410 	WREG32(RADEON_CLOCK_CNTL_DATA, v);
2411 	r100_pll_errata_after_data(rdev);
2412 }
2413 
2414 void r100_set_safe_registers(struct radeon_device *rdev)
2415 {
2416 	if (ASIC_IS_RN50(rdev)) {
2417 		rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
2418 		rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm);
2419 	} else if (rdev->family < CHIP_R200) {
2420 		rdev->config.r100.reg_safe_bm = r100_reg_safe_bm;
2421 		rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm);
2422 	} else {
2423 		r200_set_safe_registers(rdev);
2424 	}
2425 }
2426 
2427 /*
2428  * Debugfs info
2429  */
2430 #if defined(CONFIG_DEBUG_FS)
2431 static int r100_debugfs_rbbm_info(struct seq_file *m, void *data)
2432 {
2433 	struct drm_info_node *node = (struct drm_info_node *) m->private;
2434 	struct drm_device *dev = node->minor->dev;
2435 	struct radeon_device *rdev = dev->dev_private;
2436 	uint32_t reg, value;
2437 	unsigned i;
2438 
2439 	seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS));
2440 	seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C));
2441 	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2442 	for (i = 0; i < 64; i++) {
2443 		WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100);
2444 		reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2;
2445 		WREG32(RADEON_RBBM_CMDFIFO_ADDR, i);
2446 		value = RREG32(RADEON_RBBM_CMDFIFO_DATA);
2447 		seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value);
2448 	}
2449 	return 0;
2450 }
2451 
2452 static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
2453 {
2454 	struct drm_info_node *node = (struct drm_info_node *) m->private;
2455 	struct drm_device *dev = node->minor->dev;
2456 	struct radeon_device *rdev = dev->dev_private;
2457 	uint32_t rdp, wdp;
2458 	unsigned count, i, j;
2459 
2460 	radeon_ring_free_size(rdev);
2461 	rdp = RREG32(RADEON_CP_RB_RPTR);
2462 	wdp = RREG32(RADEON_CP_RB_WPTR);
2463 	count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
2464 	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2465 	seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
2466 	seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
2467 	seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
2468 	seq_printf(m, "%u dwords in ring\n", count);
2469 	for (j = 0; j <= count; j++) {
2470 		i = (rdp + j) & rdev->cp.ptr_mask;
2471 		seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
2472 	}
2473 	return 0;
2474 }
2475 
2476 
2477 static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data)
2478 {
2479 	struct drm_info_node *node = (struct drm_info_node *) m->private;
2480 	struct drm_device *dev = node->minor->dev;
2481 	struct radeon_device *rdev = dev->dev_private;
2482 	uint32_t csq_stat, csq2_stat, tmp;
2483 	unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr;
2484 	unsigned i;
2485 
2486 	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2487 	seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE));
2488 	csq_stat = RREG32(RADEON_CP_CSQ_STAT);
2489 	csq2_stat = RREG32(RADEON_CP_CSQ2_STAT);
2490 	r_rptr = (csq_stat >> 0) & 0x3ff;
2491 	r_wptr = (csq_stat >> 10) & 0x3ff;
2492 	ib1_rptr = (csq_stat >> 20) & 0x3ff;
2493 	ib1_wptr = (csq2_stat >> 0) & 0x3ff;
2494 	ib2_rptr = (csq2_stat >> 10) & 0x3ff;
2495 	ib2_wptr = (csq2_stat >> 20) & 0x3ff;
2496 	seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat);
2497 	seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat);
2498 	seq_printf(m, "Ring rptr %u\n", r_rptr);
2499 	seq_printf(m, "Ring wptr %u\n", r_wptr);
2500 	seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr);
2501 	seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr);
2502 	seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr);
2503 	seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr);
2504 	/* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms
2505 	 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */
2506 	seq_printf(m, "Ring fifo:\n");
2507 	for (i = 0; i < 256; i++) {
2508 		WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2509 		tmp = RREG32(RADEON_CP_CSQ_DATA);
2510 		seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp);
2511 	}
2512 	seq_printf(m, "Indirect1 fifo:\n");
2513 	for (i = 256; i <= 512; i++) {
2514 		WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2515 		tmp = RREG32(RADEON_CP_CSQ_DATA);
2516 		seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp);
2517 	}
2518 	seq_printf(m, "Indirect2 fifo:\n");
2519 	for (i = 640; i < ib1_wptr; i++) {
2520 		WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2521 		tmp = RREG32(RADEON_CP_CSQ_DATA);
2522 		seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp);
2523 	}
2524 	return 0;
2525 }
2526 
2527 static int r100_debugfs_mc_info(struct seq_file *m, void *data)
2528 {
2529 	struct drm_info_node *node = (struct drm_info_node *) m->private;
2530 	struct drm_device *dev = node->minor->dev;
2531 	struct radeon_device *rdev = dev->dev_private;
2532 	uint32_t tmp;
2533 
2534 	tmp = RREG32(RADEON_CONFIG_MEMSIZE);
2535 	seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp);
2536 	tmp = RREG32(RADEON_MC_FB_LOCATION);
2537 	seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp);
2538 	tmp = RREG32(RADEON_BUS_CNTL);
2539 	seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
2540 	tmp = RREG32(RADEON_MC_AGP_LOCATION);
2541 	seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
2542 	tmp = RREG32(RADEON_AGP_BASE);
2543 	seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
2544 	tmp = RREG32(RADEON_HOST_PATH_CNTL);
2545 	seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
2546 	tmp = RREG32(0x01D0);
2547 	seq_printf(m, "AIC_CTRL 0x%08x\n", tmp);
2548 	tmp = RREG32(RADEON_AIC_LO_ADDR);
2549 	seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp);
2550 	tmp = RREG32(RADEON_AIC_HI_ADDR);
2551 	seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp);
2552 	tmp = RREG32(0x01E4);
2553 	seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp);
2554 	return 0;
2555 }
2556 
2557 static struct drm_info_list r100_debugfs_rbbm_list[] = {
2558 	{"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL},
2559 };
2560 
2561 static struct drm_info_list r100_debugfs_cp_list[] = {
2562 	{"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL},
2563 	{"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL},
2564 };
2565 
2566 static struct drm_info_list r100_debugfs_mc_info_list[] = {
2567 	{"r100_mc_info", r100_debugfs_mc_info, 0, NULL},
2568 };
2569 #endif
2570 
2571 int r100_debugfs_rbbm_init(struct radeon_device *rdev)
2572 {
2573 #if defined(CONFIG_DEBUG_FS)
2574 	return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1);
2575 #else
2576 	return 0;
2577 #endif
2578 }
2579 
2580 int r100_debugfs_cp_init(struct radeon_device *rdev)
2581 {
2582 #if defined(CONFIG_DEBUG_FS)
2583 	return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2);
2584 #else
2585 	return 0;
2586 #endif
2587 }
2588 
2589 int r100_debugfs_mc_info_init(struct radeon_device *rdev)
2590 {
2591 #if defined(CONFIG_DEBUG_FS)
2592 	return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1);
2593 #else
2594 	return 0;
2595 #endif
2596 }
2597 
2598 int r100_set_surface_reg(struct radeon_device *rdev, int reg,
2599 			 uint32_t tiling_flags, uint32_t pitch,
2600 			 uint32_t offset, uint32_t obj_size)
2601 {
2602 	int surf_index = reg * 16;
2603 	int flags = 0;
2604 
2605 	if (rdev->family <= CHIP_RS200) {
2606 		if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
2607 				 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
2608 			flags |= RADEON_SURF_TILE_COLOR_BOTH;
2609 		if (tiling_flags & RADEON_TILING_MACRO)
2610 			flags |= RADEON_SURF_TILE_COLOR_MACRO;
2611 	} else if (rdev->family <= CHIP_RV280) {
2612 		if (tiling_flags & (RADEON_TILING_MACRO))
2613 			flags |= R200_SURF_TILE_COLOR_MACRO;
2614 		if (tiling_flags & RADEON_TILING_MICRO)
2615 			flags |= R200_SURF_TILE_COLOR_MICRO;
2616 	} else {
2617 		if (tiling_flags & RADEON_TILING_MACRO)
2618 			flags |= R300_SURF_TILE_MACRO;
2619 		if (tiling_flags & RADEON_TILING_MICRO)
2620 			flags |= R300_SURF_TILE_MICRO;
2621 	}
2622 
2623 	if (tiling_flags & RADEON_TILING_SWAP_16BIT)
2624 		flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP;
2625 	if (tiling_flags & RADEON_TILING_SWAP_32BIT)
2626 		flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP;
2627 
2628 	/* when we aren't tiling the pitch seems to needs to be furtherdivided down. - tested on power5 + rn50 server */
2629 	if (tiling_flags & (RADEON_TILING_SWAP_16BIT | RADEON_TILING_SWAP_32BIT)) {
2630 		if (!(tiling_flags & (RADEON_TILING_MACRO | RADEON_TILING_MICRO)))
2631 			if (ASIC_IS_RN50(rdev))
2632 				pitch /= 16;
2633 	}
2634 
2635 	/* r100/r200 divide by 16 */
2636 	if (rdev->family < CHIP_R300)
2637 		flags |= pitch / 16;
2638 	else
2639 		flags |= pitch / 8;
2640 
2641 
2642 	DRM_DEBUG_KMS("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
2643 	WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
2644 	WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
2645 	WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1);
2646 	return 0;
2647 }
2648 
2649 void r100_clear_surface_reg(struct radeon_device *rdev, int reg)
2650 {
2651 	int surf_index = reg * 16;
2652 	WREG32(RADEON_SURFACE0_INFO + surf_index, 0);
2653 }
2654 
2655 void r100_bandwidth_update(struct radeon_device *rdev)
2656 {
2657 	fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
2658 	fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
2659 	fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
2660 	uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
2661 	fixed20_12 memtcas_ff[8] = {
2662 		dfixed_init(1),
2663 		dfixed_init(2),
2664 		dfixed_init(3),
2665 		dfixed_init(0),
2666 		dfixed_init_half(1),
2667 		dfixed_init_half(2),
2668 		dfixed_init(0),
2669 	};
2670 	fixed20_12 memtcas_rs480_ff[8] = {
2671 		dfixed_init(0),
2672 		dfixed_init(1),
2673 		dfixed_init(2),
2674 		dfixed_init(3),
2675 		dfixed_init(0),
2676 		dfixed_init_half(1),
2677 		dfixed_init_half(2),
2678 		dfixed_init_half(3),
2679 	};
2680 	fixed20_12 memtcas2_ff[8] = {
2681 		dfixed_init(0),
2682 		dfixed_init(1),
2683 		dfixed_init(2),
2684 		dfixed_init(3),
2685 		dfixed_init(4),
2686 		dfixed_init(5),
2687 		dfixed_init(6),
2688 		dfixed_init(7),
2689 	};
2690 	fixed20_12 memtrbs[8] = {
2691 		dfixed_init(1),
2692 		dfixed_init_half(1),
2693 		dfixed_init(2),
2694 		dfixed_init_half(2),
2695 		dfixed_init(3),
2696 		dfixed_init_half(3),
2697 		dfixed_init(4),
2698 		dfixed_init_half(4)
2699 	};
2700 	fixed20_12 memtrbs_r4xx[8] = {
2701 		dfixed_init(4),
2702 		dfixed_init(5),
2703 		dfixed_init(6),
2704 		dfixed_init(7),
2705 		dfixed_init(8),
2706 		dfixed_init(9),
2707 		dfixed_init(10),
2708 		dfixed_init(11)
2709 	};
2710 	fixed20_12 min_mem_eff;
2711 	fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
2712 	fixed20_12 cur_latency_mclk, cur_latency_sclk;
2713 	fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate,
2714 		disp_drain_rate2, read_return_rate;
2715 	fixed20_12 time_disp1_drop_priority;
2716 	int c;
2717 	int cur_size = 16;       /* in octawords */
2718 	int critical_point = 0, critical_point2;
2719 /* 	uint32_t read_return_rate, time_disp1_drop_priority; */
2720 	int stop_req, max_stop_req;
2721 	struct drm_display_mode *mode1 = NULL;
2722 	struct drm_display_mode *mode2 = NULL;
2723 	uint32_t pixel_bytes1 = 0;
2724 	uint32_t pixel_bytes2 = 0;
2725 
2726 	radeon_update_display_priority(rdev);
2727 
2728 	if (rdev->mode_info.crtcs[0]->base.enabled) {
2729 		mode1 = &rdev->mode_info.crtcs[0]->base.mode;
2730 		pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
2731 	}
2732 	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
2733 		if (rdev->mode_info.crtcs[1]->base.enabled) {
2734 			mode2 = &rdev->mode_info.crtcs[1]->base.mode;
2735 			pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8;
2736 		}
2737 	}
2738 
2739 	min_mem_eff.full = dfixed_const_8(0);
2740 	/* get modes */
2741 	if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
2742 		uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
2743 		mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
2744 		mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
2745 		/* check crtc enables */
2746 		if (mode2)
2747 			mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
2748 		if (mode1)
2749 			mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
2750 		WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
2751 	}
2752 
2753 	/*
2754 	 * determine is there is enough bw for current mode
2755 	 */
2756 	sclk_ff = rdev->pm.sclk;
2757 	mclk_ff = rdev->pm.mclk;
2758 
2759 	temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
2760 	temp_ff.full = dfixed_const(temp);
2761 	mem_bw.full = dfixed_mul(mclk_ff, temp_ff);
2762 
2763 	pix_clk.full = 0;
2764 	pix_clk2.full = 0;
2765 	peak_disp_bw.full = 0;
2766 	if (mode1) {
2767 		temp_ff.full = dfixed_const(1000);
2768 		pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */
2769 		pix_clk.full = dfixed_div(pix_clk, temp_ff);
2770 		temp_ff.full = dfixed_const(pixel_bytes1);
2771 		peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff);
2772 	}
2773 	if (mode2) {
2774 		temp_ff.full = dfixed_const(1000);
2775 		pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */
2776 		pix_clk2.full = dfixed_div(pix_clk2, temp_ff);
2777 		temp_ff.full = dfixed_const(pixel_bytes2);
2778 		peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff);
2779 	}
2780 
2781 	mem_bw.full = dfixed_mul(mem_bw, min_mem_eff);
2782 	if (peak_disp_bw.full >= mem_bw.full) {
2783 		DRM_ERROR("You may not have enough display bandwidth for current mode\n"
2784 			  "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
2785 	}
2786 
2787 	/*  Get values from the EXT_MEM_CNTL register...converting its contents. */
2788 	temp = RREG32(RADEON_MEM_TIMING_CNTL);
2789 	if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
2790 		mem_trcd = ((temp >> 2) & 0x3) + 1;
2791 		mem_trp  = ((temp & 0x3)) + 1;
2792 		mem_tras = ((temp & 0x70) >> 4) + 1;
2793 	} else if (rdev->family == CHIP_R300 ||
2794 		   rdev->family == CHIP_R350) { /* r300, r350 */
2795 		mem_trcd = (temp & 0x7) + 1;
2796 		mem_trp = ((temp >> 8) & 0x7) + 1;
2797 		mem_tras = ((temp >> 11) & 0xf) + 4;
2798 	} else if (rdev->family == CHIP_RV350 ||
2799 		   rdev->family <= CHIP_RV380) {
2800 		/* rv3x0 */
2801 		mem_trcd = (temp & 0x7) + 3;
2802 		mem_trp = ((temp >> 8) & 0x7) + 3;
2803 		mem_tras = ((temp >> 11) & 0xf) + 6;
2804 	} else if (rdev->family == CHIP_R420 ||
2805 		   rdev->family == CHIP_R423 ||
2806 		   rdev->family == CHIP_RV410) {
2807 		/* r4xx */
2808 		mem_trcd = (temp & 0xf) + 3;
2809 		if (mem_trcd > 15)
2810 			mem_trcd = 15;
2811 		mem_trp = ((temp >> 8) & 0xf) + 3;
2812 		if (mem_trp > 15)
2813 			mem_trp = 15;
2814 		mem_tras = ((temp >> 12) & 0x1f) + 6;
2815 		if (mem_tras > 31)
2816 			mem_tras = 31;
2817 	} else { /* RV200, R200 */
2818 		mem_trcd = (temp & 0x7) + 1;
2819 		mem_trp = ((temp >> 8) & 0x7) + 1;
2820 		mem_tras = ((temp >> 12) & 0xf) + 4;
2821 	}
2822 	/* convert to FF */
2823 	trcd_ff.full = dfixed_const(mem_trcd);
2824 	trp_ff.full = dfixed_const(mem_trp);
2825 	tras_ff.full = dfixed_const(mem_tras);
2826 
2827 	/* Get values from the MEM_SDRAM_MODE_REG register...converting its */
2828 	temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
2829 	data = (temp & (7 << 20)) >> 20;
2830 	if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
2831 		if (rdev->family == CHIP_RS480) /* don't think rs400 */
2832 			tcas_ff = memtcas_rs480_ff[data];
2833 		else
2834 			tcas_ff = memtcas_ff[data];
2835 	} else
2836 		tcas_ff = memtcas2_ff[data];
2837 
2838 	if (rdev->family == CHIP_RS400 ||
2839 	    rdev->family == CHIP_RS480) {
2840 		/* extra cas latency stored in bits 23-25 0-4 clocks */
2841 		data = (temp >> 23) & 0x7;
2842 		if (data < 5)
2843 			tcas_ff.full += dfixed_const(data);
2844 	}
2845 
2846 	if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
2847 		/* on the R300, Tcas is included in Trbs.
2848 		 */
2849 		temp = RREG32(RADEON_MEM_CNTL);
2850 		data = (R300_MEM_NUM_CHANNELS_MASK & temp);
2851 		if (data == 1) {
2852 			if (R300_MEM_USE_CD_CH_ONLY & temp) {
2853 				temp = RREG32(R300_MC_IND_INDEX);
2854 				temp &= ~R300_MC_IND_ADDR_MASK;
2855 				temp |= R300_MC_READ_CNTL_CD_mcind;
2856 				WREG32(R300_MC_IND_INDEX, temp);
2857 				temp = RREG32(R300_MC_IND_DATA);
2858 				data = (R300_MEM_RBS_POSITION_C_MASK & temp);
2859 			} else {
2860 				temp = RREG32(R300_MC_READ_CNTL_AB);
2861 				data = (R300_MEM_RBS_POSITION_A_MASK & temp);
2862 			}
2863 		} else {
2864 			temp = RREG32(R300_MC_READ_CNTL_AB);
2865 			data = (R300_MEM_RBS_POSITION_A_MASK & temp);
2866 		}
2867 		if (rdev->family == CHIP_RV410 ||
2868 		    rdev->family == CHIP_R420 ||
2869 		    rdev->family == CHIP_R423)
2870 			trbs_ff = memtrbs_r4xx[data];
2871 		else
2872 			trbs_ff = memtrbs[data];
2873 		tcas_ff.full += trbs_ff.full;
2874 	}
2875 
2876 	sclk_eff_ff.full = sclk_ff.full;
2877 
2878 	if (rdev->flags & RADEON_IS_AGP) {
2879 		fixed20_12 agpmode_ff;
2880 		agpmode_ff.full = dfixed_const(radeon_agpmode);
2881 		temp_ff.full = dfixed_const_666(16);
2882 		sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff);
2883 	}
2884 	/* TODO PCIE lanes may affect this - agpmode == 16?? */
2885 
2886 	if (ASIC_IS_R300(rdev)) {
2887 		sclk_delay_ff.full = dfixed_const(250);
2888 	} else {
2889 		if ((rdev->family == CHIP_RV100) ||
2890 		    rdev->flags & RADEON_IS_IGP) {
2891 			if (rdev->mc.vram_is_ddr)
2892 				sclk_delay_ff.full = dfixed_const(41);
2893 			else
2894 				sclk_delay_ff.full = dfixed_const(33);
2895 		} else {
2896 			if (rdev->mc.vram_width == 128)
2897 				sclk_delay_ff.full = dfixed_const(57);
2898 			else
2899 				sclk_delay_ff.full = dfixed_const(41);
2900 		}
2901 	}
2902 
2903 	mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff);
2904 
2905 	if (rdev->mc.vram_is_ddr) {
2906 		if (rdev->mc.vram_width == 32) {
2907 			k1.full = dfixed_const(40);
2908 			c  = 3;
2909 		} else {
2910 			k1.full = dfixed_const(20);
2911 			c  = 1;
2912 		}
2913 	} else {
2914 		k1.full = dfixed_const(40);
2915 		c  = 3;
2916 	}
2917 
2918 	temp_ff.full = dfixed_const(2);
2919 	mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff);
2920 	temp_ff.full = dfixed_const(c);
2921 	mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff);
2922 	temp_ff.full = dfixed_const(4);
2923 	mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff);
2924 	mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff);
2925 	mc_latency_mclk.full += k1.full;
2926 
2927 	mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff);
2928 	mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff);
2929 
2930 	/*
2931 	  HW cursor time assuming worst case of full size colour cursor.
2932 	*/
2933 	temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
2934 	temp_ff.full += trcd_ff.full;
2935 	if (temp_ff.full < tras_ff.full)
2936 		temp_ff.full = tras_ff.full;
2937 	cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff);
2938 
2939 	temp_ff.full = dfixed_const(cur_size);
2940 	cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff);
2941 	/*
2942 	  Find the total latency for the display data.
2943 	*/
2944 	disp_latency_overhead.full = dfixed_const(8);
2945 	disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff);
2946 	mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
2947 	mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
2948 
2949 	if (mc_latency_mclk.full > mc_latency_sclk.full)
2950 		disp_latency.full = mc_latency_mclk.full;
2951 	else
2952 		disp_latency.full = mc_latency_sclk.full;
2953 
2954 	/* setup Max GRPH_STOP_REQ default value */
2955 	if (ASIC_IS_RV100(rdev))
2956 		max_stop_req = 0x5c;
2957 	else
2958 		max_stop_req = 0x7c;
2959 
2960 	if (mode1) {
2961 		/*  CRTC1
2962 		    Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
2963 		    GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
2964 		*/
2965 		stop_req = mode1->hdisplay * pixel_bytes1 / 16;
2966 
2967 		if (stop_req > max_stop_req)
2968 			stop_req = max_stop_req;
2969 
2970 		/*
2971 		  Find the drain rate of the display buffer.
2972 		*/
2973 		temp_ff.full = dfixed_const((16/pixel_bytes1));
2974 		disp_drain_rate.full = dfixed_div(pix_clk, temp_ff);
2975 
2976 		/*
2977 		  Find the critical point of the display buffer.
2978 		*/
2979 		crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency);
2980 		crit_point_ff.full += dfixed_const_half(0);
2981 
2982 		critical_point = dfixed_trunc(crit_point_ff);
2983 
2984 		if (rdev->disp_priority == 2) {
2985 			critical_point = 0;
2986 		}
2987 
2988 		/*
2989 		  The critical point should never be above max_stop_req-4.  Setting
2990 		  GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
2991 		*/
2992 		if (max_stop_req - critical_point < 4)
2993 			critical_point = 0;
2994 
2995 		if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
2996 			/* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
2997 			critical_point = 0x10;
2998 		}
2999 
3000 		temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
3001 		temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
3002 		temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
3003 		temp &= ~(RADEON_GRPH_START_REQ_MASK);
3004 		if ((rdev->family == CHIP_R350) &&
3005 		    (stop_req > 0x15)) {
3006 			stop_req -= 0x10;
3007 		}
3008 		temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
3009 		temp |= RADEON_GRPH_BUFFER_SIZE;
3010 		temp &= ~(RADEON_GRPH_CRITICAL_CNTL   |
3011 			  RADEON_GRPH_CRITICAL_AT_SOF |
3012 			  RADEON_GRPH_STOP_CNTL);
3013 		/*
3014 		  Write the result into the register.
3015 		*/
3016 		WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
3017 						       (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
3018 
3019 #if 0
3020 		if ((rdev->family == CHIP_RS400) ||
3021 		    (rdev->family == CHIP_RS480)) {
3022 			/* attempt to program RS400 disp regs correctly ??? */
3023 			temp = RREG32(RS400_DISP1_REG_CNTL);
3024 			temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
3025 				  RS400_DISP1_STOP_REQ_LEVEL_MASK);
3026 			WREG32(RS400_DISP1_REQ_CNTL1, (temp |
3027 						       (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
3028 						       (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
3029 			temp = RREG32(RS400_DMIF_MEM_CNTL1);
3030 			temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
3031 				  RS400_DISP1_CRITICAL_POINT_STOP_MASK);
3032 			WREG32(RS400_DMIF_MEM_CNTL1, (temp |
3033 						      (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
3034 						      (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
3035 		}
3036 #endif
3037 
3038 		DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n",
3039 			  /* 	  (unsigned int)info->SavedReg->grph_buffer_cntl, */
3040 			  (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
3041 	}
3042 
3043 	if (mode2) {
3044 		u32 grph2_cntl;
3045 		stop_req = mode2->hdisplay * pixel_bytes2 / 16;
3046 
3047 		if (stop_req > max_stop_req)
3048 			stop_req = max_stop_req;
3049 
3050 		/*
3051 		  Find the drain rate of the display buffer.
3052 		*/
3053 		temp_ff.full = dfixed_const((16/pixel_bytes2));
3054 		disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff);
3055 
3056 		grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
3057 		grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
3058 		grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
3059 		grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
3060 		if ((rdev->family == CHIP_R350) &&
3061 		    (stop_req > 0x15)) {
3062 			stop_req -= 0x10;
3063 		}
3064 		grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
3065 		grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
3066 		grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL   |
3067 			  RADEON_GRPH_CRITICAL_AT_SOF |
3068 			  RADEON_GRPH_STOP_CNTL);
3069 
3070 		if ((rdev->family == CHIP_RS100) ||
3071 		    (rdev->family == CHIP_RS200))
3072 			critical_point2 = 0;
3073 		else {
3074 			temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
3075 			temp_ff.full = dfixed_const(temp);
3076 			temp_ff.full = dfixed_mul(mclk_ff, temp_ff);
3077 			if (sclk_ff.full < temp_ff.full)
3078 				temp_ff.full = sclk_ff.full;
3079 
3080 			read_return_rate.full = temp_ff.full;
3081 
3082 			if (mode1) {
3083 				temp_ff.full = read_return_rate.full - disp_drain_rate.full;
3084 				time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff);
3085 			} else {
3086 				time_disp1_drop_priority.full = 0;
3087 			}
3088 			crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
3089 			crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2);
3090 			crit_point_ff.full += dfixed_const_half(0);
3091 
3092 			critical_point2 = dfixed_trunc(crit_point_ff);
3093 
3094 			if (rdev->disp_priority == 2) {
3095 				critical_point2 = 0;
3096 			}
3097 
3098 			if (max_stop_req - critical_point2 < 4)
3099 				critical_point2 = 0;
3100 
3101 		}
3102 
3103 		if (critical_point2 == 0 && rdev->family == CHIP_R300) {
3104 			/* some R300 cards have problem with this set to 0 */
3105 			critical_point2 = 0x10;
3106 		}
3107 
3108 		WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
3109 						  (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
3110 
3111 		if ((rdev->family == CHIP_RS400) ||
3112 		    (rdev->family == CHIP_RS480)) {
3113 #if 0
3114 			/* attempt to program RS400 disp2 regs correctly ??? */
3115 			temp = RREG32(RS400_DISP2_REQ_CNTL1);
3116 			temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
3117 				  RS400_DISP2_STOP_REQ_LEVEL_MASK);
3118 			WREG32(RS400_DISP2_REQ_CNTL1, (temp |
3119 						       (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
3120 						       (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
3121 			temp = RREG32(RS400_DISP2_REQ_CNTL2);
3122 			temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
3123 				  RS400_DISP2_CRITICAL_POINT_STOP_MASK);
3124 			WREG32(RS400_DISP2_REQ_CNTL2, (temp |
3125 						       (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
3126 						       (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
3127 #endif
3128 			WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
3129 			WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
3130 			WREG32(RS400_DMIF_MEM_CNTL1,  0x29CA71DC);
3131 			WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
3132 		}
3133 
3134 		DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n",
3135 			  (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
3136 	}
3137 }
3138 
3139 static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
3140 {
3141 	DRM_ERROR("pitch                      %d\n", t->pitch);
3142 	DRM_ERROR("use_pitch                  %d\n", t->use_pitch);
3143 	DRM_ERROR("width                      %d\n", t->width);
3144 	DRM_ERROR("width_11                   %d\n", t->width_11);
3145 	DRM_ERROR("height                     %d\n", t->height);
3146 	DRM_ERROR("height_11                  %d\n", t->height_11);
3147 	DRM_ERROR("num levels                 %d\n", t->num_levels);
3148 	DRM_ERROR("depth                      %d\n", t->txdepth);
3149 	DRM_ERROR("bpp                        %d\n", t->cpp);
3150 	DRM_ERROR("coordinate type            %d\n", t->tex_coord_type);
3151 	DRM_ERROR("width round to power of 2  %d\n", t->roundup_w);
3152 	DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
3153 	DRM_ERROR("compress format            %d\n", t->compress_format);
3154 }
3155 
3156 static int r100_track_compress_size(int compress_format, int w, int h)
3157 {
3158 	int block_width, block_height, block_bytes;
3159 	int wblocks, hblocks;
3160 	int min_wblocks;
3161 	int sz;
3162 
3163 	block_width = 4;
3164 	block_height = 4;
3165 
3166 	switch (compress_format) {
3167 	case R100_TRACK_COMP_DXT1:
3168 		block_bytes = 8;
3169 		min_wblocks = 4;
3170 		break;
3171 	default:
3172 	case R100_TRACK_COMP_DXT35:
3173 		block_bytes = 16;
3174 		min_wblocks = 2;
3175 		break;
3176 	}
3177 
3178 	hblocks = (h + block_height - 1) / block_height;
3179 	wblocks = (w + block_width - 1) / block_width;
3180 	if (wblocks < min_wblocks)
3181 		wblocks = min_wblocks;
3182 	sz = wblocks * hblocks * block_bytes;
3183 	return sz;
3184 }
3185 
3186 static int r100_cs_track_cube(struct radeon_device *rdev,
3187 			      struct r100_cs_track *track, unsigned idx)
3188 {
3189 	unsigned face, w, h;
3190 	struct radeon_bo *cube_robj;
3191 	unsigned long size;
3192 	unsigned compress_format = track->textures[idx].compress_format;
3193 
3194 	for (face = 0; face < 5; face++) {
3195 		cube_robj = track->textures[idx].cube_info[face].robj;
3196 		w = track->textures[idx].cube_info[face].width;
3197 		h = track->textures[idx].cube_info[face].height;
3198 
3199 		if (compress_format) {
3200 			size = r100_track_compress_size(compress_format, w, h);
3201 		} else
3202 			size = w * h;
3203 		size *= track->textures[idx].cpp;
3204 
3205 		size += track->textures[idx].cube_info[face].offset;
3206 
3207 		if (size > radeon_bo_size(cube_robj)) {
3208 			DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
3209 				  size, radeon_bo_size(cube_robj));
3210 			r100_cs_track_texture_print(&track->textures[idx]);
3211 			return -1;
3212 		}
3213 	}
3214 	return 0;
3215 }
3216 
3217 static int r100_cs_track_texture_check(struct radeon_device *rdev,
3218 				       struct r100_cs_track *track)
3219 {
3220 	struct radeon_bo *robj;
3221 	unsigned long size;
3222 	unsigned u, i, w, h, d;
3223 	int ret;
3224 
3225 	for (u = 0; u < track->num_texture; u++) {
3226 		if (!track->textures[u].enabled)
3227 			continue;
3228 		robj = track->textures[u].robj;
3229 		if (robj == NULL) {
3230 			DRM_ERROR("No texture bound to unit %u\n", u);
3231 			return -EINVAL;
3232 		}
3233 		size = 0;
3234 		for (i = 0; i <= track->textures[u].num_levels; i++) {
3235 			if (track->textures[u].use_pitch) {
3236 				if (rdev->family < CHIP_R300)
3237 					w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
3238 				else
3239 					w = track->textures[u].pitch / (1 << i);
3240 			} else {
3241 				w = track->textures[u].width;
3242 				if (rdev->family >= CHIP_RV515)
3243 					w |= track->textures[u].width_11;
3244 				w = w / (1 << i);
3245 				if (track->textures[u].roundup_w)
3246 					w = roundup_pow_of_two(w);
3247 			}
3248 			h = track->textures[u].height;
3249 			if (rdev->family >= CHIP_RV515)
3250 				h |= track->textures[u].height_11;
3251 			h = h / (1 << i);
3252 			if (track->textures[u].roundup_h)
3253 				h = roundup_pow_of_two(h);
3254 			if (track->textures[u].tex_coord_type == 1) {
3255 				d = (1 << track->textures[u].txdepth) / (1 << i);
3256 				if (!d)
3257 					d = 1;
3258 			} else {
3259 				d = 1;
3260 			}
3261 			if (track->textures[u].compress_format) {
3262 
3263 				size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
3264 				/* compressed textures are block based */
3265 			} else
3266 				size += w * h * d;
3267 		}
3268 		size *= track->textures[u].cpp;
3269 
3270 		switch (track->textures[u].tex_coord_type) {
3271 		case 0:
3272 		case 1:
3273 			break;
3274 		case 2:
3275 			if (track->separate_cube) {
3276 				ret = r100_cs_track_cube(rdev, track, u);
3277 				if (ret)
3278 					return ret;
3279 			} else
3280 				size *= 6;
3281 			break;
3282 		default:
3283 			DRM_ERROR("Invalid texture coordinate type %u for unit "
3284 				  "%u\n", track->textures[u].tex_coord_type, u);
3285 			return -EINVAL;
3286 		}
3287 		if (size > radeon_bo_size(robj)) {
3288 			DRM_ERROR("Texture of unit %u needs %lu bytes but is "
3289 				  "%lu\n", u, size, radeon_bo_size(robj));
3290 			r100_cs_track_texture_print(&track->textures[u]);
3291 			return -EINVAL;
3292 		}
3293 	}
3294 	return 0;
3295 }
3296 
3297 int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
3298 {
3299 	unsigned i;
3300 	unsigned long size;
3301 	unsigned prim_walk;
3302 	unsigned nverts;
3303 	unsigned num_cb = track->num_cb;
3304 
3305 	if (!track->zb_cb_clear && !track->color_channel_mask &&
3306 	    !track->blend_read_enable)
3307 		num_cb = 0;
3308 
3309 	for (i = 0; i < num_cb; i++) {
3310 		if (track->cb[i].robj == NULL) {
3311 			DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
3312 			return -EINVAL;
3313 		}
3314 		size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
3315 		size += track->cb[i].offset;
3316 		if (size > radeon_bo_size(track->cb[i].robj)) {
3317 			DRM_ERROR("[drm] Buffer too small for color buffer %d "
3318 				  "(need %lu have %lu) !\n", i, size,
3319 				  radeon_bo_size(track->cb[i].robj));
3320 			DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
3321 				  i, track->cb[i].pitch, track->cb[i].cpp,
3322 				  track->cb[i].offset, track->maxy);
3323 			return -EINVAL;
3324 		}
3325 	}
3326 	if (track->z_enabled) {
3327 		if (track->zb.robj == NULL) {
3328 			DRM_ERROR("[drm] No buffer for z buffer !\n");
3329 			return -EINVAL;
3330 		}
3331 		size = track->zb.pitch * track->zb.cpp * track->maxy;
3332 		size += track->zb.offset;
3333 		if (size > radeon_bo_size(track->zb.robj)) {
3334 			DRM_ERROR("[drm] Buffer too small for z buffer "
3335 				  "(need %lu have %lu) !\n", size,
3336 				  radeon_bo_size(track->zb.robj));
3337 			DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
3338 				  track->zb.pitch, track->zb.cpp,
3339 				  track->zb.offset, track->maxy);
3340 			return -EINVAL;
3341 		}
3342 	}
3343 	prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
3344 	if (track->vap_vf_cntl & (1 << 14)) {
3345 		nverts = track->vap_alt_nverts;
3346 	} else {
3347 		nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
3348 	}
3349 	switch (prim_walk) {
3350 	case 1:
3351 		for (i = 0; i < track->num_arrays; i++) {
3352 			size = track->arrays[i].esize * track->max_indx * 4;
3353 			if (track->arrays[i].robj == NULL) {
3354 				DRM_ERROR("(PW %u) Vertex array %u no buffer "
3355 					  "bound\n", prim_walk, i);
3356 				return -EINVAL;
3357 			}
3358 			if (size > radeon_bo_size(track->arrays[i].robj)) {
3359 				dev_err(rdev->dev, "(PW %u) Vertex array %u "
3360 					"need %lu dwords have %lu dwords\n",
3361 					prim_walk, i, size >> 2,
3362 					radeon_bo_size(track->arrays[i].robj)
3363 					>> 2);
3364 				DRM_ERROR("Max indices %u\n", track->max_indx);
3365 				return -EINVAL;
3366 			}
3367 		}
3368 		break;
3369 	case 2:
3370 		for (i = 0; i < track->num_arrays; i++) {
3371 			size = track->arrays[i].esize * (nverts - 1) * 4;
3372 			if (track->arrays[i].robj == NULL) {
3373 				DRM_ERROR("(PW %u) Vertex array %u no buffer "
3374 					  "bound\n", prim_walk, i);
3375 				return -EINVAL;
3376 			}
3377 			if (size > radeon_bo_size(track->arrays[i].robj)) {
3378 				dev_err(rdev->dev, "(PW %u) Vertex array %u "
3379 					"need %lu dwords have %lu dwords\n",
3380 					prim_walk, i, size >> 2,
3381 					radeon_bo_size(track->arrays[i].robj)
3382 					>> 2);
3383 				return -EINVAL;
3384 			}
3385 		}
3386 		break;
3387 	case 3:
3388 		size = track->vtx_size * nverts;
3389 		if (size != track->immd_dwords) {
3390 			DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
3391 				  track->immd_dwords, size);
3392 			DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
3393 				  nverts, track->vtx_size);
3394 			return -EINVAL;
3395 		}
3396 		break;
3397 	default:
3398 		DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
3399 			  prim_walk);
3400 		return -EINVAL;
3401 	}
3402 	return r100_cs_track_texture_check(rdev, track);
3403 }
3404 
3405 void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
3406 {
3407 	unsigned i, face;
3408 
3409 	if (rdev->family < CHIP_R300) {
3410 		track->num_cb = 1;
3411 		if (rdev->family <= CHIP_RS200)
3412 			track->num_texture = 3;
3413 		else
3414 			track->num_texture = 6;
3415 		track->maxy = 2048;
3416 		track->separate_cube = 1;
3417 	} else {
3418 		track->num_cb = 4;
3419 		track->num_texture = 16;
3420 		track->maxy = 4096;
3421 		track->separate_cube = 0;
3422 	}
3423 
3424 	for (i = 0; i < track->num_cb; i++) {
3425 		track->cb[i].robj = NULL;
3426 		track->cb[i].pitch = 8192;
3427 		track->cb[i].cpp = 16;
3428 		track->cb[i].offset = 0;
3429 	}
3430 	track->z_enabled = true;
3431 	track->zb.robj = NULL;
3432 	track->zb.pitch = 8192;
3433 	track->zb.cpp = 4;
3434 	track->zb.offset = 0;
3435 	track->vtx_size = 0x7F;
3436 	track->immd_dwords = 0xFFFFFFFFUL;
3437 	track->num_arrays = 11;
3438 	track->max_indx = 0x00FFFFFFUL;
3439 	for (i = 0; i < track->num_arrays; i++) {
3440 		track->arrays[i].robj = NULL;
3441 		track->arrays[i].esize = 0x7F;
3442 	}
3443 	for (i = 0; i < track->num_texture; i++) {
3444 		track->textures[i].compress_format = R100_TRACK_COMP_NONE;
3445 		track->textures[i].pitch = 16536;
3446 		track->textures[i].width = 16536;
3447 		track->textures[i].height = 16536;
3448 		track->textures[i].width_11 = 1 << 11;
3449 		track->textures[i].height_11 = 1 << 11;
3450 		track->textures[i].num_levels = 12;
3451 		if (rdev->family <= CHIP_RS200) {
3452 			track->textures[i].tex_coord_type = 0;
3453 			track->textures[i].txdepth = 0;
3454 		} else {
3455 			track->textures[i].txdepth = 16;
3456 			track->textures[i].tex_coord_type = 1;
3457 		}
3458 		track->textures[i].cpp = 64;
3459 		track->textures[i].robj = NULL;
3460 		/* CS IB emission code makes sure texture unit are disabled */
3461 		track->textures[i].enabled = false;
3462 		track->textures[i].roundup_w = true;
3463 		track->textures[i].roundup_h = true;
3464 		if (track->separate_cube)
3465 			for (face = 0; face < 5; face++) {
3466 				track->textures[i].cube_info[face].robj = NULL;
3467 				track->textures[i].cube_info[face].width = 16536;
3468 				track->textures[i].cube_info[face].height = 16536;
3469 				track->textures[i].cube_info[face].offset = 0;
3470 			}
3471 	}
3472 }
3473 
3474 int r100_ring_test(struct radeon_device *rdev)
3475 {
3476 	uint32_t scratch;
3477 	uint32_t tmp = 0;
3478 	unsigned i;
3479 	int r;
3480 
3481 	r = radeon_scratch_get(rdev, &scratch);
3482 	if (r) {
3483 		DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
3484 		return r;
3485 	}
3486 	WREG32(scratch, 0xCAFEDEAD);
3487 	r = radeon_ring_lock(rdev, 2);
3488 	if (r) {
3489 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3490 		radeon_scratch_free(rdev, scratch);
3491 		return r;
3492 	}
3493 	radeon_ring_write(rdev, PACKET0(scratch, 0));
3494 	radeon_ring_write(rdev, 0xDEADBEEF);
3495 	radeon_ring_unlock_commit(rdev);
3496 	for (i = 0; i < rdev->usec_timeout; i++) {
3497 		tmp = RREG32(scratch);
3498 		if (tmp == 0xDEADBEEF) {
3499 			break;
3500 		}
3501 		DRM_UDELAY(1);
3502 	}
3503 	if (i < rdev->usec_timeout) {
3504 		DRM_INFO("ring test succeeded in %d usecs\n", i);
3505 	} else {
3506 		DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n",
3507 			  scratch, tmp);
3508 		r = -EINVAL;
3509 	}
3510 	radeon_scratch_free(rdev, scratch);
3511 	return r;
3512 }
3513 
3514 void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3515 {
3516 	radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1));
3517 	radeon_ring_write(rdev, ib->gpu_addr);
3518 	radeon_ring_write(rdev, ib->length_dw);
3519 }
3520 
3521 int r100_ib_test(struct radeon_device *rdev)
3522 {
3523 	struct radeon_ib *ib;
3524 	uint32_t scratch;
3525 	uint32_t tmp = 0;
3526 	unsigned i;
3527 	int r;
3528 
3529 	r = radeon_scratch_get(rdev, &scratch);
3530 	if (r) {
3531 		DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3532 		return r;
3533 	}
3534 	WREG32(scratch, 0xCAFEDEAD);
3535 	r = radeon_ib_get(rdev, &ib);
3536 	if (r) {
3537 		return r;
3538 	}
3539 	ib->ptr[0] = PACKET0(scratch, 0);
3540 	ib->ptr[1] = 0xDEADBEEF;
3541 	ib->ptr[2] = PACKET2(0);
3542 	ib->ptr[3] = PACKET2(0);
3543 	ib->ptr[4] = PACKET2(0);
3544 	ib->ptr[5] = PACKET2(0);
3545 	ib->ptr[6] = PACKET2(0);
3546 	ib->ptr[7] = PACKET2(0);
3547 	ib->length_dw = 8;
3548 	r = radeon_ib_schedule(rdev, ib);
3549 	if (r) {
3550 		radeon_scratch_free(rdev, scratch);
3551 		radeon_ib_free(rdev, &ib);
3552 		return r;
3553 	}
3554 	r = radeon_fence_wait(ib->fence, false);
3555 	if (r) {
3556 		return r;
3557 	}
3558 	for (i = 0; i < rdev->usec_timeout; i++) {
3559 		tmp = RREG32(scratch);
3560 		if (tmp == 0xDEADBEEF) {
3561 			break;
3562 		}
3563 		DRM_UDELAY(1);
3564 	}
3565 	if (i < rdev->usec_timeout) {
3566 		DRM_INFO("ib test succeeded in %u usecs\n", i);
3567 	} else {
3568 		DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
3569 			  scratch, tmp);
3570 		r = -EINVAL;
3571 	}
3572 	radeon_scratch_free(rdev, scratch);
3573 	radeon_ib_free(rdev, &ib);
3574 	return r;
3575 }
3576 
3577 void r100_ib_fini(struct radeon_device *rdev)
3578 {
3579 	radeon_ib_pool_fini(rdev);
3580 }
3581 
3582 int r100_ib_init(struct radeon_device *rdev)
3583 {
3584 	int r;
3585 
3586 	r = radeon_ib_pool_init(rdev);
3587 	if (r) {
3588 		dev_err(rdev->dev, "failled initializing IB pool (%d).\n", r);
3589 		r100_ib_fini(rdev);
3590 		return r;
3591 	}
3592 	r = r100_ib_test(rdev);
3593 	if (r) {
3594 		dev_err(rdev->dev, "failled testing IB (%d).\n", r);
3595 		r100_ib_fini(rdev);
3596 		return r;
3597 	}
3598 	return 0;
3599 }
3600 
3601 void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
3602 {
3603 	/* Shutdown CP we shouldn't need to do that but better be safe than
3604 	 * sorry
3605 	 */
3606 	rdev->cp.ready = false;
3607 	WREG32(R_000740_CP_CSQ_CNTL, 0);
3608 
3609 	/* Save few CRTC registers */
3610 	save->GENMO_WT = RREG8(R_0003C2_GENMO_WT);
3611 	save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL);
3612 	save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL);
3613 	save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET);
3614 	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3615 		save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL);
3616 		save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET);
3617 	}
3618 
3619 	/* Disable VGA aperture access */
3620 	WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT);
3621 	/* Disable cursor, overlay, crtc */
3622 	WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1));
3623 	WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL |
3624 					S_000054_CRTC_DISPLAY_DIS(1));
3625 	WREG32(R_000050_CRTC_GEN_CNTL,
3626 			(C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) |
3627 			S_000050_CRTC_DISP_REQ_EN_B(1));
3628 	WREG32(R_000420_OV0_SCALE_CNTL,
3629 		C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL));
3630 	WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET);
3631 	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3632 		WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET |
3633 						S_000360_CUR2_LOCK(1));
3634 		WREG32(R_0003F8_CRTC2_GEN_CNTL,
3635 			(C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) |
3636 			S_0003F8_CRTC2_DISPLAY_DIS(1) |
3637 			S_0003F8_CRTC2_DISP_REQ_EN_B(1));
3638 		WREG32(R_000360_CUR2_OFFSET,
3639 			C_000360_CUR2_LOCK & save->CUR2_OFFSET);
3640 	}
3641 }
3642 
3643 void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
3644 {
3645 	/* Update base address for crtc */
3646 	WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
3647 	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3648 		WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
3649 	}
3650 	/* Restore CRTC registers */
3651 	WREG8(R_0003C2_GENMO_WT, save->GENMO_WT);
3652 	WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL);
3653 	WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL);
3654 	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3655 		WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL);
3656 	}
3657 }
3658 
3659 void r100_vga_render_disable(struct radeon_device *rdev)
3660 {
3661 	u32 tmp;
3662 
3663 	tmp = RREG8(R_0003C2_GENMO_WT);
3664 	WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp);
3665 }
3666 
3667 static void r100_debugfs(struct radeon_device *rdev)
3668 {
3669 	int r;
3670 
3671 	r = r100_debugfs_mc_info_init(rdev);
3672 	if (r)
3673 		dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n");
3674 }
3675 
3676 static void r100_mc_program(struct radeon_device *rdev)
3677 {
3678 	struct r100_mc_save save;
3679 
3680 	/* Stops all mc clients */
3681 	r100_mc_stop(rdev, &save);
3682 	if (rdev->flags & RADEON_IS_AGP) {
3683 		WREG32(R_00014C_MC_AGP_LOCATION,
3684 			S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
3685 			S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
3686 		WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
3687 		if (rdev->family > CHIP_RV200)
3688 			WREG32(R_00015C_AGP_BASE_2,
3689 				upper_32_bits(rdev->mc.agp_base) & 0xff);
3690 	} else {
3691 		WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
3692 		WREG32(R_000170_AGP_BASE, 0);
3693 		if (rdev->family > CHIP_RV200)
3694 			WREG32(R_00015C_AGP_BASE_2, 0);
3695 	}
3696 	/* Wait for mc idle */
3697 	if (r100_mc_wait_for_idle(rdev))
3698 		dev_warn(rdev->dev, "Wait for MC idle timeout.\n");
3699 	/* Program MC, should be a 32bits limited address space */
3700 	WREG32(R_000148_MC_FB_LOCATION,
3701 		S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
3702 		S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
3703 	r100_mc_resume(rdev, &save);
3704 }
3705 
3706 void r100_clock_startup(struct radeon_device *rdev)
3707 {
3708 	u32 tmp;
3709 
3710 	if (radeon_dynclks != -1 && radeon_dynclks)
3711 		radeon_legacy_set_clock_gating(rdev, 1);
3712 	/* We need to force on some of the block */
3713 	tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
3714 	tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
3715 	if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280))
3716 		tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1);
3717 	WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
3718 }
3719 
3720 static int r100_startup(struct radeon_device *rdev)
3721 {
3722 	int r;
3723 
3724 	/* set common regs */
3725 	r100_set_common_regs(rdev);
3726 	/* program mc */
3727 	r100_mc_program(rdev);
3728 	/* Resume clock */
3729 	r100_clock_startup(rdev);
3730 	/* Initialize GPU configuration (# pipes, ...) */
3731 //	r100_gpu_init(rdev);
3732 	/* Initialize GART (initialize after TTM so we can allocate
3733 	 * memory through TTM but finalize after TTM) */
3734 	r100_enable_bm(rdev);
3735 	if (rdev->flags & RADEON_IS_PCI) {
3736 		r = r100_pci_gart_enable(rdev);
3737 		if (r)
3738 			return r;
3739 	}
3740 	/* Enable IRQ */
3741 	r100_irq_set(rdev);
3742 	rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
3743 	/* 1M ring buffer */
3744 	r = r100_cp_init(rdev, 1024 * 1024);
3745 	if (r) {
3746 		dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
3747 		return r;
3748 	}
3749 	r = r100_wb_init(rdev);
3750 	if (r)
3751 		dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
3752 	r = r100_ib_init(rdev);
3753 	if (r) {
3754 		dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
3755 		return r;
3756 	}
3757 	return 0;
3758 }
3759 
3760 int r100_resume(struct radeon_device *rdev)
3761 {
3762 	/* Make sur GART are not working */
3763 	if (rdev->flags & RADEON_IS_PCI)
3764 		r100_pci_gart_disable(rdev);
3765 	/* Resume clock before doing reset */
3766 	r100_clock_startup(rdev);
3767 	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
3768 	if (radeon_asic_reset(rdev)) {
3769 		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
3770 			RREG32(R_000E40_RBBM_STATUS),
3771 			RREG32(R_0007C0_CP_STAT));
3772 	}
3773 	/* post */
3774 	radeon_combios_asic_init(rdev->ddev);
3775 	/* Resume clock after posting */
3776 	r100_clock_startup(rdev);
3777 	/* Initialize surface registers */
3778 	radeon_surface_init(rdev);
3779 	return r100_startup(rdev);
3780 }
3781 
3782 int r100_suspend(struct radeon_device *rdev)
3783 {
3784 	r100_cp_disable(rdev);
3785 	r100_wb_disable(rdev);
3786 	r100_irq_disable(rdev);
3787 	if (rdev->flags & RADEON_IS_PCI)
3788 		r100_pci_gart_disable(rdev);
3789 	return 0;
3790 }
3791 
3792 void r100_fini(struct radeon_device *rdev)
3793 {
3794 	r100_cp_fini(rdev);
3795 	r100_wb_fini(rdev);
3796 	r100_ib_fini(rdev);
3797 	radeon_gem_fini(rdev);
3798 	if (rdev->flags & RADEON_IS_PCI)
3799 		r100_pci_gart_fini(rdev);
3800 	radeon_agp_fini(rdev);
3801 	radeon_irq_kms_fini(rdev);
3802 	radeon_fence_driver_fini(rdev);
3803 	radeon_bo_fini(rdev);
3804 	radeon_atombios_fini(rdev);
3805 	kfree(rdev->bios);
3806 	rdev->bios = NULL;
3807 }
3808 
3809 /*
3810  * Due to how kexec works, it can leave the hw fully initialised when it
3811  * boots the new kernel. However doing our init sequence with the CP and
3812  * WB stuff setup causes GPU hangs on the RN50 at least. So at startup
3813  * do some quick sanity checks and restore sane values to avoid this
3814  * problem.
3815  */
3816 void r100_restore_sanity(struct radeon_device *rdev)
3817 {
3818 	u32 tmp;
3819 
3820 	tmp = RREG32(RADEON_CP_CSQ_CNTL);
3821 	if (tmp) {
3822 		WREG32(RADEON_CP_CSQ_CNTL, 0);
3823 	}
3824 	tmp = RREG32(RADEON_CP_RB_CNTL);
3825 	if (tmp) {
3826 		WREG32(RADEON_CP_RB_CNTL, 0);
3827 	}
3828 	tmp = RREG32(RADEON_SCRATCH_UMSK);
3829 	if (tmp) {
3830 		WREG32(RADEON_SCRATCH_UMSK, 0);
3831 	}
3832 }
3833 
3834 int r100_init(struct radeon_device *rdev)
3835 {
3836 	int r;
3837 
3838 	/* Register debugfs file specific to this group of asics */
3839 	r100_debugfs(rdev);
3840 	/* Disable VGA */
3841 	r100_vga_render_disable(rdev);
3842 	/* Initialize scratch registers */
3843 	radeon_scratch_init(rdev);
3844 	/* Initialize surface registers */
3845 	radeon_surface_init(rdev);
3846 	/* sanity check some register to avoid hangs like after kexec */
3847 	r100_restore_sanity(rdev);
3848 	/* TODO: disable VGA need to use VGA request */
3849 	/* BIOS*/
3850 	if (!radeon_get_bios(rdev)) {
3851 		if (ASIC_IS_AVIVO(rdev))
3852 			return -EINVAL;
3853 	}
3854 	if (rdev->is_atom_bios) {
3855 		dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
3856 		return -EINVAL;
3857 	} else {
3858 		r = radeon_combios_init(rdev);
3859 		if (r)
3860 			return r;
3861 	}
3862 	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
3863 	if (radeon_asic_reset(rdev)) {
3864 		dev_warn(rdev->dev,
3865 			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
3866 			RREG32(R_000E40_RBBM_STATUS),
3867 			RREG32(R_0007C0_CP_STAT));
3868 	}
3869 	/* check if cards are posted or not */
3870 	if (radeon_boot_test_post_card(rdev) == false)
3871 		return -EINVAL;
3872 	/* Set asic errata */
3873 	r100_errata(rdev);
3874 	/* Initialize clocks */
3875 	radeon_get_clock_info(rdev->ddev);
3876 	/* initialize AGP */
3877 	if (rdev->flags & RADEON_IS_AGP) {
3878 		r = radeon_agp_init(rdev);
3879 		if (r) {
3880 			radeon_agp_disable(rdev);
3881 		}
3882 	}
3883 	/* initialize VRAM */
3884 	r100_mc_init(rdev);
3885 	/* Fence driver */
3886 	r = radeon_fence_driver_init(rdev);
3887 	if (r)
3888 		return r;
3889 	r = radeon_irq_kms_init(rdev);
3890 	if (r)
3891 		return r;
3892 	/* Memory manager */
3893 	r = radeon_bo_init(rdev);
3894 	if (r)
3895 		return r;
3896 	if (rdev->flags & RADEON_IS_PCI) {
3897 		r = r100_pci_gart_init(rdev);
3898 		if (r)
3899 			return r;
3900 	}
3901 	r100_set_safe_registers(rdev);
3902 	rdev->accel_working = true;
3903 	r = r100_startup(rdev);
3904 	if (r) {
3905 		/* Somethings want wront with the accel init stop accel */
3906 		dev_err(rdev->dev, "Disabling GPU acceleration\n");
3907 		r100_cp_fini(rdev);
3908 		r100_wb_fini(rdev);
3909 		r100_ib_fini(rdev);
3910 		radeon_irq_kms_fini(rdev);
3911 		if (rdev->flags & RADEON_IS_PCI)
3912 			r100_pci_gart_fini(rdev);
3913 		rdev->accel_working = false;
3914 	}
3915 	return 0;
3916 }
3917