xref: /linux/drivers/gpu/drm/radeon/r100.c (revision 5d4a2e29fba5b2bef95b96a46b338ec4d76fa4fd)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include "drmP.h"
31 #include "drm.h"
32 #include "radeon_drm.h"
33 #include "radeon_reg.h"
34 #include "radeon.h"
35 #include "radeon_asic.h"
36 #include "r100d.h"
37 #include "rs100d.h"
38 #include "rv200d.h"
39 #include "rv250d.h"
40 #include "atom.h"
41 
42 #include <linux/firmware.h>
43 #include <linux/platform_device.h>
44 
45 #include "r100_reg_safe.h"
46 #include "rn50_reg_safe.h"
47 
48 /* Firmware Names */
49 #define FIRMWARE_R100		"radeon/R100_cp.bin"
50 #define FIRMWARE_R200		"radeon/R200_cp.bin"
51 #define FIRMWARE_R300		"radeon/R300_cp.bin"
52 #define FIRMWARE_R420		"radeon/R420_cp.bin"
53 #define FIRMWARE_RS690		"radeon/RS690_cp.bin"
54 #define FIRMWARE_RS600		"radeon/RS600_cp.bin"
55 #define FIRMWARE_R520		"radeon/R520_cp.bin"
56 
57 MODULE_FIRMWARE(FIRMWARE_R100);
58 MODULE_FIRMWARE(FIRMWARE_R200);
59 MODULE_FIRMWARE(FIRMWARE_R300);
60 MODULE_FIRMWARE(FIRMWARE_R420);
61 MODULE_FIRMWARE(FIRMWARE_RS690);
62 MODULE_FIRMWARE(FIRMWARE_RS600);
63 MODULE_FIRMWARE(FIRMWARE_R520);
64 
65 #include "r100_track.h"
66 
67 /* This files gather functions specifics to:
68  * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
69  */
70 
71 void r100_pm_get_dynpm_state(struct radeon_device *rdev)
72 {
73 	int i;
74 	rdev->pm.dynpm_can_upclock = true;
75 	rdev->pm.dynpm_can_downclock = true;
76 
77 	switch (rdev->pm.dynpm_planned_action) {
78 	case DYNPM_ACTION_MINIMUM:
79 		rdev->pm.requested_power_state_index = 0;
80 		rdev->pm.dynpm_can_downclock = false;
81 		break;
82 	case DYNPM_ACTION_DOWNCLOCK:
83 		if (rdev->pm.current_power_state_index == 0) {
84 			rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
85 			rdev->pm.dynpm_can_downclock = false;
86 		} else {
87 			if (rdev->pm.active_crtc_count > 1) {
88 				for (i = 0; i < rdev->pm.num_power_states; i++) {
89 					if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
90 						continue;
91 					else if (i >= rdev->pm.current_power_state_index) {
92 						rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
93 						break;
94 					} else {
95 						rdev->pm.requested_power_state_index = i;
96 						break;
97 					}
98 				}
99 			} else
100 				rdev->pm.requested_power_state_index =
101 					rdev->pm.current_power_state_index - 1;
102 		}
103 		/* don't use the power state if crtcs are active and no display flag is set */
104 		if ((rdev->pm.active_crtc_count > 0) &&
105 		    (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags &
106 		     RADEON_PM_MODE_NO_DISPLAY)) {
107 			rdev->pm.requested_power_state_index++;
108 		}
109 		break;
110 	case DYNPM_ACTION_UPCLOCK:
111 		if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
112 			rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
113 			rdev->pm.dynpm_can_upclock = false;
114 		} else {
115 			if (rdev->pm.active_crtc_count > 1) {
116 				for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
117 					if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
118 						continue;
119 					else if (i <= rdev->pm.current_power_state_index) {
120 						rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
121 						break;
122 					} else {
123 						rdev->pm.requested_power_state_index = i;
124 						break;
125 					}
126 				}
127 			} else
128 				rdev->pm.requested_power_state_index =
129 					rdev->pm.current_power_state_index + 1;
130 		}
131 		break;
132 	case DYNPM_ACTION_DEFAULT:
133 		rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
134 		rdev->pm.dynpm_can_upclock = false;
135 		break;
136 	case DYNPM_ACTION_NONE:
137 	default:
138 		DRM_ERROR("Requested mode for not defined action\n");
139 		return;
140 	}
141 	/* only one clock mode per power state */
142 	rdev->pm.requested_clock_mode_index = 0;
143 
144 	DRM_DEBUG("Requested: e: %d m: %d p: %d\n",
145 		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
146 		  clock_info[rdev->pm.requested_clock_mode_index].sclk,
147 		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
148 		  clock_info[rdev->pm.requested_clock_mode_index].mclk,
149 		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
150 		  pcie_lanes);
151 }
152 
153 void r100_pm_init_profile(struct radeon_device *rdev)
154 {
155 	/* default */
156 	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
157 	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
158 	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
159 	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
160 	/* low sh */
161 	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
162 	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
163 	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
164 	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
165 	/* mid sh */
166 	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
167 	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
168 	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
169 	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
170 	/* high sh */
171 	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
172 	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
173 	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
174 	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
175 	/* low mh */
176 	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
177 	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
178 	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
179 	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
180 	/* mid mh */
181 	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
182 	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
183 	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
184 	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
185 	/* high mh */
186 	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
187 	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
188 	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
189 	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
190 }
191 
192 void r100_pm_misc(struct radeon_device *rdev)
193 {
194 	int requested_index = rdev->pm.requested_power_state_index;
195 	struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
196 	struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
197 	u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl;
198 
199 	if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
200 		if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
201 			tmp = RREG32(voltage->gpio.reg);
202 			if (voltage->active_high)
203 				tmp |= voltage->gpio.mask;
204 			else
205 				tmp &= ~(voltage->gpio.mask);
206 			WREG32(voltage->gpio.reg, tmp);
207 			if (voltage->delay)
208 				udelay(voltage->delay);
209 		} else {
210 			tmp = RREG32(voltage->gpio.reg);
211 			if (voltage->active_high)
212 				tmp &= ~voltage->gpio.mask;
213 			else
214 				tmp |= voltage->gpio.mask;
215 			WREG32(voltage->gpio.reg, tmp);
216 			if (voltage->delay)
217 				udelay(voltage->delay);
218 		}
219 	}
220 
221 	sclk_cntl = RREG32_PLL(SCLK_CNTL);
222 	sclk_cntl2 = RREG32_PLL(SCLK_CNTL2);
223 	sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3);
224 	sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL);
225 	sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3);
226 	if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
227 		sclk_more_cntl |= REDUCED_SPEED_SCLK_EN;
228 		if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE)
229 			sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE;
230 		else
231 			sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE;
232 		if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2)
233 			sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0);
234 		else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4)
235 			sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2);
236 	} else
237 		sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN;
238 
239 	if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
240 		sclk_more_cntl |= IO_CG_VOLTAGE_DROP;
241 		if (voltage->delay) {
242 			sclk_more_cntl |= VOLTAGE_DROP_SYNC;
243 			switch (voltage->delay) {
244 			case 33:
245 				sclk_more_cntl |= VOLTAGE_DELAY_SEL(0);
246 				break;
247 			case 66:
248 				sclk_more_cntl |= VOLTAGE_DELAY_SEL(1);
249 				break;
250 			case 99:
251 				sclk_more_cntl |= VOLTAGE_DELAY_SEL(2);
252 				break;
253 			case 132:
254 				sclk_more_cntl |= VOLTAGE_DELAY_SEL(3);
255 				break;
256 			}
257 		} else
258 			sclk_more_cntl &= ~VOLTAGE_DROP_SYNC;
259 	} else
260 		sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP;
261 
262 	if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
263 		sclk_cntl &= ~FORCE_HDP;
264 	else
265 		sclk_cntl |= FORCE_HDP;
266 
267 	WREG32_PLL(SCLK_CNTL, sclk_cntl);
268 	WREG32_PLL(SCLK_CNTL2, sclk_cntl2);
269 	WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl);
270 
271 	/* set pcie lanes */
272 	if ((rdev->flags & RADEON_IS_PCIE) &&
273 	    !(rdev->flags & RADEON_IS_IGP) &&
274 	    rdev->asic->set_pcie_lanes &&
275 	    (ps->pcie_lanes !=
276 	     rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
277 		radeon_set_pcie_lanes(rdev,
278 				      ps->pcie_lanes);
279 		DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
280 	}
281 }
282 
283 void r100_pm_prepare(struct radeon_device *rdev)
284 {
285 	struct drm_device *ddev = rdev->ddev;
286 	struct drm_crtc *crtc;
287 	struct radeon_crtc *radeon_crtc;
288 	u32 tmp;
289 
290 	/* disable any active CRTCs */
291 	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
292 		radeon_crtc = to_radeon_crtc(crtc);
293 		if (radeon_crtc->enabled) {
294 			if (radeon_crtc->crtc_id) {
295 				tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
296 				tmp |= RADEON_CRTC2_DISP_REQ_EN_B;
297 				WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
298 			} else {
299 				tmp = RREG32(RADEON_CRTC_GEN_CNTL);
300 				tmp |= RADEON_CRTC_DISP_REQ_EN_B;
301 				WREG32(RADEON_CRTC_GEN_CNTL, tmp);
302 			}
303 		}
304 	}
305 }
306 
307 void r100_pm_finish(struct radeon_device *rdev)
308 {
309 	struct drm_device *ddev = rdev->ddev;
310 	struct drm_crtc *crtc;
311 	struct radeon_crtc *radeon_crtc;
312 	u32 tmp;
313 
314 	/* enable any active CRTCs */
315 	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
316 		radeon_crtc = to_radeon_crtc(crtc);
317 		if (radeon_crtc->enabled) {
318 			if (radeon_crtc->crtc_id) {
319 				tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
320 				tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B;
321 				WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
322 			} else {
323 				tmp = RREG32(RADEON_CRTC_GEN_CNTL);
324 				tmp &= ~RADEON_CRTC_DISP_REQ_EN_B;
325 				WREG32(RADEON_CRTC_GEN_CNTL, tmp);
326 			}
327 		}
328 	}
329 }
330 
331 bool r100_gui_idle(struct radeon_device *rdev)
332 {
333 	if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
334 		return false;
335 	else
336 		return true;
337 }
338 
339 /* hpd for digital panel detect/disconnect */
340 bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
341 {
342 	bool connected = false;
343 
344 	switch (hpd) {
345 	case RADEON_HPD_1:
346 		if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE)
347 			connected = true;
348 		break;
349 	case RADEON_HPD_2:
350 		if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE)
351 			connected = true;
352 		break;
353 	default:
354 		break;
355 	}
356 	return connected;
357 }
358 
359 void r100_hpd_set_polarity(struct radeon_device *rdev,
360 			   enum radeon_hpd_id hpd)
361 {
362 	u32 tmp;
363 	bool connected = r100_hpd_sense(rdev, hpd);
364 
365 	switch (hpd) {
366 	case RADEON_HPD_1:
367 		tmp = RREG32(RADEON_FP_GEN_CNTL);
368 		if (connected)
369 			tmp &= ~RADEON_FP_DETECT_INT_POL;
370 		else
371 			tmp |= RADEON_FP_DETECT_INT_POL;
372 		WREG32(RADEON_FP_GEN_CNTL, tmp);
373 		break;
374 	case RADEON_HPD_2:
375 		tmp = RREG32(RADEON_FP2_GEN_CNTL);
376 		if (connected)
377 			tmp &= ~RADEON_FP2_DETECT_INT_POL;
378 		else
379 			tmp |= RADEON_FP2_DETECT_INT_POL;
380 		WREG32(RADEON_FP2_GEN_CNTL, tmp);
381 		break;
382 	default:
383 		break;
384 	}
385 }
386 
387 void r100_hpd_init(struct radeon_device *rdev)
388 {
389 	struct drm_device *dev = rdev->ddev;
390 	struct drm_connector *connector;
391 
392 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
393 		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
394 		switch (radeon_connector->hpd.hpd) {
395 		case RADEON_HPD_1:
396 			rdev->irq.hpd[0] = true;
397 			break;
398 		case RADEON_HPD_2:
399 			rdev->irq.hpd[1] = true;
400 			break;
401 		default:
402 			break;
403 		}
404 	}
405 	if (rdev->irq.installed)
406 		r100_irq_set(rdev);
407 }
408 
409 void r100_hpd_fini(struct radeon_device *rdev)
410 {
411 	struct drm_device *dev = rdev->ddev;
412 	struct drm_connector *connector;
413 
414 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
415 		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
416 		switch (radeon_connector->hpd.hpd) {
417 		case RADEON_HPD_1:
418 			rdev->irq.hpd[0] = false;
419 			break;
420 		case RADEON_HPD_2:
421 			rdev->irq.hpd[1] = false;
422 			break;
423 		default:
424 			break;
425 		}
426 	}
427 }
428 
429 /*
430  * PCI GART
431  */
432 void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
433 {
434 	/* TODO: can we do somethings here ? */
435 	/* It seems hw only cache one entry so we should discard this
436 	 * entry otherwise if first GPU GART read hit this entry it
437 	 * could end up in wrong address. */
438 }
439 
440 int r100_pci_gart_init(struct radeon_device *rdev)
441 {
442 	int r;
443 
444 	if (rdev->gart.table.ram.ptr) {
445 		WARN(1, "R100 PCI GART already initialized.\n");
446 		return 0;
447 	}
448 	/* Initialize common gart structure */
449 	r = radeon_gart_init(rdev);
450 	if (r)
451 		return r;
452 	rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
453 	rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
454 	rdev->asic->gart_set_page = &r100_pci_gart_set_page;
455 	return radeon_gart_table_ram_alloc(rdev);
456 }
457 
458 /* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
459 void r100_enable_bm(struct radeon_device *rdev)
460 {
461 	uint32_t tmp;
462 	/* Enable bus mastering */
463 	tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
464 	WREG32(RADEON_BUS_CNTL, tmp);
465 }
466 
467 int r100_pci_gart_enable(struct radeon_device *rdev)
468 {
469 	uint32_t tmp;
470 
471 	radeon_gart_restore(rdev);
472 	/* discard memory request outside of configured range */
473 	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
474 	WREG32(RADEON_AIC_CNTL, tmp);
475 	/* set address range for PCI address translate */
476 	WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start);
477 	WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end);
478 	/* set PCI GART page-table base address */
479 	WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
480 	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
481 	WREG32(RADEON_AIC_CNTL, tmp);
482 	r100_pci_gart_tlb_flush(rdev);
483 	rdev->gart.ready = true;
484 	return 0;
485 }
486 
487 void r100_pci_gart_disable(struct radeon_device *rdev)
488 {
489 	uint32_t tmp;
490 
491 	/* discard memory request outside of configured range */
492 	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
493 	WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
494 	WREG32(RADEON_AIC_LO_ADDR, 0);
495 	WREG32(RADEON_AIC_HI_ADDR, 0);
496 }
497 
498 int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
499 {
500 	if (i < 0 || i > rdev->gart.num_gpu_pages) {
501 		return -EINVAL;
502 	}
503 	rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr));
504 	return 0;
505 }
506 
507 void r100_pci_gart_fini(struct radeon_device *rdev)
508 {
509 	radeon_gart_fini(rdev);
510 	r100_pci_gart_disable(rdev);
511 	radeon_gart_table_ram_free(rdev);
512 }
513 
514 int r100_irq_set(struct radeon_device *rdev)
515 {
516 	uint32_t tmp = 0;
517 
518 	if (!rdev->irq.installed) {
519 		WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
520 		WREG32(R_000040_GEN_INT_CNTL, 0);
521 		return -EINVAL;
522 	}
523 	if (rdev->irq.sw_int) {
524 		tmp |= RADEON_SW_INT_ENABLE;
525 	}
526 	if (rdev->irq.gui_idle) {
527 		tmp |= RADEON_GUI_IDLE_MASK;
528 	}
529 	if (rdev->irq.crtc_vblank_int[0]) {
530 		tmp |= RADEON_CRTC_VBLANK_MASK;
531 	}
532 	if (rdev->irq.crtc_vblank_int[1]) {
533 		tmp |= RADEON_CRTC2_VBLANK_MASK;
534 	}
535 	if (rdev->irq.hpd[0]) {
536 		tmp |= RADEON_FP_DETECT_MASK;
537 	}
538 	if (rdev->irq.hpd[1]) {
539 		tmp |= RADEON_FP2_DETECT_MASK;
540 	}
541 	WREG32(RADEON_GEN_INT_CNTL, tmp);
542 	return 0;
543 }
544 
545 void r100_irq_disable(struct radeon_device *rdev)
546 {
547 	u32 tmp;
548 
549 	WREG32(R_000040_GEN_INT_CNTL, 0);
550 	/* Wait and acknowledge irq */
551 	mdelay(1);
552 	tmp = RREG32(R_000044_GEN_INT_STATUS);
553 	WREG32(R_000044_GEN_INT_STATUS, tmp);
554 }
555 
556 static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
557 {
558 	uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
559 	uint32_t irq_mask = RADEON_SW_INT_TEST |
560 		RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
561 		RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
562 
563 	/* the interrupt works, but the status bit is permanently asserted */
564 	if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
565 		if (!rdev->irq.gui_idle_acked)
566 			irq_mask |= RADEON_GUI_IDLE_STAT;
567 	}
568 
569 	if (irqs) {
570 		WREG32(RADEON_GEN_INT_STATUS, irqs);
571 	}
572 	return irqs & irq_mask;
573 }
574 
575 int r100_irq_process(struct radeon_device *rdev)
576 {
577 	uint32_t status, msi_rearm;
578 	bool queue_hotplug = false;
579 
580 	/* reset gui idle ack.  the status bit is broken */
581 	rdev->irq.gui_idle_acked = false;
582 
583 	status = r100_irq_ack(rdev);
584 	if (!status) {
585 		return IRQ_NONE;
586 	}
587 	if (rdev->shutdown) {
588 		return IRQ_NONE;
589 	}
590 	while (status) {
591 		/* SW interrupt */
592 		if (status & RADEON_SW_INT_TEST) {
593 			radeon_fence_process(rdev);
594 		}
595 		/* gui idle interrupt */
596 		if (status & RADEON_GUI_IDLE_STAT) {
597 			rdev->irq.gui_idle_acked = true;
598 			rdev->pm.gui_idle = true;
599 			wake_up(&rdev->irq.idle_queue);
600 		}
601 		/* Vertical blank interrupts */
602 		if (status & RADEON_CRTC_VBLANK_STAT) {
603 			drm_handle_vblank(rdev->ddev, 0);
604 			rdev->pm.vblank_sync = true;
605 			wake_up(&rdev->irq.vblank_queue);
606 		}
607 		if (status & RADEON_CRTC2_VBLANK_STAT) {
608 			drm_handle_vblank(rdev->ddev, 1);
609 			rdev->pm.vblank_sync = true;
610 			wake_up(&rdev->irq.vblank_queue);
611 		}
612 		if (status & RADEON_FP_DETECT_STAT) {
613 			queue_hotplug = true;
614 			DRM_DEBUG("HPD1\n");
615 		}
616 		if (status & RADEON_FP2_DETECT_STAT) {
617 			queue_hotplug = true;
618 			DRM_DEBUG("HPD2\n");
619 		}
620 		status = r100_irq_ack(rdev);
621 	}
622 	/* reset gui idle ack.  the status bit is broken */
623 	rdev->irq.gui_idle_acked = false;
624 	if (queue_hotplug)
625 		queue_work(rdev->wq, &rdev->hotplug_work);
626 	if (rdev->msi_enabled) {
627 		switch (rdev->family) {
628 		case CHIP_RS400:
629 		case CHIP_RS480:
630 			msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM;
631 			WREG32(RADEON_AIC_CNTL, msi_rearm);
632 			WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM);
633 			break;
634 		default:
635 			msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
636 			WREG32(RADEON_MSI_REARM_EN, msi_rearm);
637 			WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
638 			break;
639 		}
640 	}
641 	return IRQ_HANDLED;
642 }
643 
644 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
645 {
646 	if (crtc == 0)
647 		return RREG32(RADEON_CRTC_CRNT_FRAME);
648 	else
649 		return RREG32(RADEON_CRTC2_CRNT_FRAME);
650 }
651 
652 /* Who ever call radeon_fence_emit should call ring_lock and ask
653  * for enough space (today caller are ib schedule and buffer move) */
654 void r100_fence_ring_emit(struct radeon_device *rdev,
655 			  struct radeon_fence *fence)
656 {
657 	/* We have to make sure that caches are flushed before
658 	 * CPU might read something from VRAM. */
659 	radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
660 	radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
661 	radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
662 	radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
663 	/* Wait until IDLE & CLEAN */
664 	radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
665 	radeon_ring_write(rdev, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
666 	radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
667 	radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
668 				RADEON_HDP_READ_BUFFER_INVALIDATE);
669 	radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
670 	radeon_ring_write(rdev, rdev->config.r100.hdp_cntl);
671 	/* Emit fence sequence & fire IRQ */
672 	radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
673 	radeon_ring_write(rdev, fence->seq);
674 	radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
675 	radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
676 }
677 
678 int r100_wb_init(struct radeon_device *rdev)
679 {
680 	int r;
681 
682 	if (rdev->wb.wb_obj == NULL) {
683 		r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
684 					RADEON_GEM_DOMAIN_GTT,
685 					&rdev->wb.wb_obj);
686 		if (r) {
687 			dev_err(rdev->dev, "(%d) create WB buffer failed\n", r);
688 			return r;
689 		}
690 		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
691 		if (unlikely(r != 0))
692 			return r;
693 		r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
694 					&rdev->wb.gpu_addr);
695 		if (r) {
696 			dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r);
697 			radeon_bo_unreserve(rdev->wb.wb_obj);
698 			return r;
699 		}
700 		r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
701 		radeon_bo_unreserve(rdev->wb.wb_obj);
702 		if (r) {
703 			dev_err(rdev->dev, "(%d) map WB buffer failed\n", r);
704 			return r;
705 		}
706 	}
707 	WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr);
708 	WREG32(R_00070C_CP_RB_RPTR_ADDR,
709 		S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + 1024) >> 2));
710 	WREG32(R_000770_SCRATCH_UMSK, 0xff);
711 	return 0;
712 }
713 
714 void r100_wb_disable(struct radeon_device *rdev)
715 {
716 	WREG32(R_000770_SCRATCH_UMSK, 0);
717 }
718 
719 void r100_wb_fini(struct radeon_device *rdev)
720 {
721 	int r;
722 
723 	r100_wb_disable(rdev);
724 	if (rdev->wb.wb_obj) {
725 		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
726 		if (unlikely(r != 0)) {
727 			dev_err(rdev->dev, "(%d) can't finish WB\n", r);
728 			return;
729 		}
730 		radeon_bo_kunmap(rdev->wb.wb_obj);
731 		radeon_bo_unpin(rdev->wb.wb_obj);
732 		radeon_bo_unreserve(rdev->wb.wb_obj);
733 		radeon_bo_unref(&rdev->wb.wb_obj);
734 		rdev->wb.wb = NULL;
735 		rdev->wb.wb_obj = NULL;
736 	}
737 }
738 
739 int r100_copy_blit(struct radeon_device *rdev,
740 		   uint64_t src_offset,
741 		   uint64_t dst_offset,
742 		   unsigned num_pages,
743 		   struct radeon_fence *fence)
744 {
745 	uint32_t cur_pages;
746 	uint32_t stride_bytes = PAGE_SIZE;
747 	uint32_t pitch;
748 	uint32_t stride_pixels;
749 	unsigned ndw;
750 	int num_loops;
751 	int r = 0;
752 
753 	/* radeon limited to 16k stride */
754 	stride_bytes &= 0x3fff;
755 	/* radeon pitch is /64 */
756 	pitch = stride_bytes / 64;
757 	stride_pixels = stride_bytes / 4;
758 	num_loops = DIV_ROUND_UP(num_pages, 8191);
759 
760 	/* Ask for enough room for blit + flush + fence */
761 	ndw = 64 + (10 * num_loops);
762 	r = radeon_ring_lock(rdev, ndw);
763 	if (r) {
764 		DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
765 		return -EINVAL;
766 	}
767 	while (num_pages > 0) {
768 		cur_pages = num_pages;
769 		if (cur_pages > 8191) {
770 			cur_pages = 8191;
771 		}
772 		num_pages -= cur_pages;
773 
774 		/* pages are in Y direction - height
775 		   page width in X direction - width */
776 		radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8));
777 		radeon_ring_write(rdev,
778 				  RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
779 				  RADEON_GMC_DST_PITCH_OFFSET_CNTL |
780 				  RADEON_GMC_SRC_CLIPPING |
781 				  RADEON_GMC_DST_CLIPPING |
782 				  RADEON_GMC_BRUSH_NONE |
783 				  (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
784 				  RADEON_GMC_SRC_DATATYPE_COLOR |
785 				  RADEON_ROP3_S |
786 				  RADEON_DP_SRC_SOURCE_MEMORY |
787 				  RADEON_GMC_CLR_CMP_CNTL_DIS |
788 				  RADEON_GMC_WR_MSK_DIS);
789 		radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10));
790 		radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10));
791 		radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
792 		radeon_ring_write(rdev, 0);
793 		radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
794 		radeon_ring_write(rdev, num_pages);
795 		radeon_ring_write(rdev, num_pages);
796 		radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
797 	}
798 	radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
799 	radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL);
800 	radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
801 	radeon_ring_write(rdev,
802 			  RADEON_WAIT_2D_IDLECLEAN |
803 			  RADEON_WAIT_HOST_IDLECLEAN |
804 			  RADEON_WAIT_DMA_GUI_IDLE);
805 	if (fence) {
806 		r = radeon_fence_emit(rdev, fence);
807 	}
808 	radeon_ring_unlock_commit(rdev);
809 	return r;
810 }
811 
812 static int r100_cp_wait_for_idle(struct radeon_device *rdev)
813 {
814 	unsigned i;
815 	u32 tmp;
816 
817 	for (i = 0; i < rdev->usec_timeout; i++) {
818 		tmp = RREG32(R_000E40_RBBM_STATUS);
819 		if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) {
820 			return 0;
821 		}
822 		udelay(1);
823 	}
824 	return -1;
825 }
826 
827 void r100_ring_start(struct radeon_device *rdev)
828 {
829 	int r;
830 
831 	r = radeon_ring_lock(rdev, 2);
832 	if (r) {
833 		return;
834 	}
835 	radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
836 	radeon_ring_write(rdev,
837 			  RADEON_ISYNC_ANY2D_IDLE3D |
838 			  RADEON_ISYNC_ANY3D_IDLE2D |
839 			  RADEON_ISYNC_WAIT_IDLEGUI |
840 			  RADEON_ISYNC_CPSCRATCH_IDLEGUI);
841 	radeon_ring_unlock_commit(rdev);
842 }
843 
844 
845 /* Load the microcode for the CP */
846 static int r100_cp_init_microcode(struct radeon_device *rdev)
847 {
848 	struct platform_device *pdev;
849 	const char *fw_name = NULL;
850 	int err;
851 
852 	DRM_DEBUG("\n");
853 
854 	pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
855 	err = IS_ERR(pdev);
856 	if (err) {
857 		printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
858 		return -EINVAL;
859 	}
860 	if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
861 	    (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
862 	    (rdev->family == CHIP_RS200)) {
863 		DRM_INFO("Loading R100 Microcode\n");
864 		fw_name = FIRMWARE_R100;
865 	} else if ((rdev->family == CHIP_R200) ||
866 		   (rdev->family == CHIP_RV250) ||
867 		   (rdev->family == CHIP_RV280) ||
868 		   (rdev->family == CHIP_RS300)) {
869 		DRM_INFO("Loading R200 Microcode\n");
870 		fw_name = FIRMWARE_R200;
871 	} else if ((rdev->family == CHIP_R300) ||
872 		   (rdev->family == CHIP_R350) ||
873 		   (rdev->family == CHIP_RV350) ||
874 		   (rdev->family == CHIP_RV380) ||
875 		   (rdev->family == CHIP_RS400) ||
876 		   (rdev->family == CHIP_RS480)) {
877 		DRM_INFO("Loading R300 Microcode\n");
878 		fw_name = FIRMWARE_R300;
879 	} else if ((rdev->family == CHIP_R420) ||
880 		   (rdev->family == CHIP_R423) ||
881 		   (rdev->family == CHIP_RV410)) {
882 		DRM_INFO("Loading R400 Microcode\n");
883 		fw_name = FIRMWARE_R420;
884 	} else if ((rdev->family == CHIP_RS690) ||
885 		   (rdev->family == CHIP_RS740)) {
886 		DRM_INFO("Loading RS690/RS740 Microcode\n");
887 		fw_name = FIRMWARE_RS690;
888 	} else if (rdev->family == CHIP_RS600) {
889 		DRM_INFO("Loading RS600 Microcode\n");
890 		fw_name = FIRMWARE_RS600;
891 	} else if ((rdev->family == CHIP_RV515) ||
892 		   (rdev->family == CHIP_R520) ||
893 		   (rdev->family == CHIP_RV530) ||
894 		   (rdev->family == CHIP_R580) ||
895 		   (rdev->family == CHIP_RV560) ||
896 		   (rdev->family == CHIP_RV570)) {
897 		DRM_INFO("Loading R500 Microcode\n");
898 		fw_name = FIRMWARE_R520;
899 	}
900 
901 	err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
902 	platform_device_unregister(pdev);
903 	if (err) {
904 		printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n",
905 		       fw_name);
906 	} else if (rdev->me_fw->size % 8) {
907 		printk(KERN_ERR
908 		       "radeon_cp: Bogus length %zu in firmware \"%s\"\n",
909 		       rdev->me_fw->size, fw_name);
910 		err = -EINVAL;
911 		release_firmware(rdev->me_fw);
912 		rdev->me_fw = NULL;
913 	}
914 	return err;
915 }
916 
917 static void r100_cp_load_microcode(struct radeon_device *rdev)
918 {
919 	const __be32 *fw_data;
920 	int i, size;
921 
922 	if (r100_gui_wait_for_idle(rdev)) {
923 		printk(KERN_WARNING "Failed to wait GUI idle while "
924 		       "programming pipes. Bad things might happen.\n");
925 	}
926 
927 	if (rdev->me_fw) {
928 		size = rdev->me_fw->size / 4;
929 		fw_data = (const __be32 *)&rdev->me_fw->data[0];
930 		WREG32(RADEON_CP_ME_RAM_ADDR, 0);
931 		for (i = 0; i < size; i += 2) {
932 			WREG32(RADEON_CP_ME_RAM_DATAH,
933 			       be32_to_cpup(&fw_data[i]));
934 			WREG32(RADEON_CP_ME_RAM_DATAL,
935 			       be32_to_cpup(&fw_data[i + 1]));
936 		}
937 	}
938 }
939 
940 int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
941 {
942 	unsigned rb_bufsz;
943 	unsigned rb_blksz;
944 	unsigned max_fetch;
945 	unsigned pre_write_timer;
946 	unsigned pre_write_limit;
947 	unsigned indirect2_start;
948 	unsigned indirect1_start;
949 	uint32_t tmp;
950 	int r;
951 
952 	if (r100_debugfs_cp_init(rdev)) {
953 		DRM_ERROR("Failed to register debugfs file for CP !\n");
954 	}
955 	if (!rdev->me_fw) {
956 		r = r100_cp_init_microcode(rdev);
957 		if (r) {
958 			DRM_ERROR("Failed to load firmware!\n");
959 			return r;
960 		}
961 	}
962 
963 	/* Align ring size */
964 	rb_bufsz = drm_order(ring_size / 8);
965 	ring_size = (1 << (rb_bufsz + 1)) * 4;
966 	r100_cp_load_microcode(rdev);
967 	r = radeon_ring_init(rdev, ring_size);
968 	if (r) {
969 		return r;
970 	}
971 	/* Each time the cp read 1024 bytes (16 dword/quadword) update
972 	 * the rptr copy in system ram */
973 	rb_blksz = 9;
974 	/* cp will read 128bytes at a time (4 dwords) */
975 	max_fetch = 1;
976 	rdev->cp.align_mask = 16 - 1;
977 	/* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
978 	pre_write_timer = 64;
979 	/* Force CP_RB_WPTR write if written more than one time before the
980 	 * delay expire
981 	 */
982 	pre_write_limit = 0;
983 	/* Setup the cp cache like this (cache size is 96 dwords) :
984 	 *	RING		0  to 15
985 	 *	INDIRECT1	16 to 79
986 	 *	INDIRECT2	80 to 95
987 	 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
988 	 *    indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
989 	 *    indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
990 	 * Idea being that most of the gpu cmd will be through indirect1 buffer
991 	 * so it gets the bigger cache.
992 	 */
993 	indirect2_start = 80;
994 	indirect1_start = 16;
995 	/* cp setup */
996 	WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
997 	tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
998 	       REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
999 	       REG_SET(RADEON_MAX_FETCH, max_fetch) |
1000 	       RADEON_RB_NO_UPDATE);
1001 #ifdef __BIG_ENDIAN
1002 	tmp |= RADEON_BUF_SWAP_32BIT;
1003 #endif
1004 	WREG32(RADEON_CP_RB_CNTL, tmp);
1005 
1006 	/* Set ring address */
1007 	DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
1008 	WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
1009 	/* Force read & write ptr to 0 */
1010 	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
1011 	WREG32(RADEON_CP_RB_RPTR_WR, 0);
1012 	WREG32(RADEON_CP_RB_WPTR, 0);
1013 	WREG32(RADEON_CP_RB_CNTL, tmp);
1014 	udelay(10);
1015 	rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
1016 	rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
1017 	/* protect against crazy HW on resume */
1018 	rdev->cp.wptr &= rdev->cp.ptr_mask;
1019 	/* Set cp mode to bus mastering & enable cp*/
1020 	WREG32(RADEON_CP_CSQ_MODE,
1021 	       REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
1022 	       REG_SET(RADEON_INDIRECT1_START, indirect1_start));
1023 	WREG32(0x718, 0);
1024 	WREG32(0x744, 0x00004D4D);
1025 	WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
1026 	radeon_ring_start(rdev);
1027 	r = radeon_ring_test(rdev);
1028 	if (r) {
1029 		DRM_ERROR("radeon: cp isn't working (%d).\n", r);
1030 		return r;
1031 	}
1032 	rdev->cp.ready = true;
1033 	return 0;
1034 }
1035 
1036 void r100_cp_fini(struct radeon_device *rdev)
1037 {
1038 	if (r100_cp_wait_for_idle(rdev)) {
1039 		DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n");
1040 	}
1041 	/* Disable ring */
1042 	r100_cp_disable(rdev);
1043 	radeon_ring_fini(rdev);
1044 	DRM_INFO("radeon: cp finalized\n");
1045 }
1046 
1047 void r100_cp_disable(struct radeon_device *rdev)
1048 {
1049 	/* Disable ring */
1050 	rdev->cp.ready = false;
1051 	WREG32(RADEON_CP_CSQ_MODE, 0);
1052 	WREG32(RADEON_CP_CSQ_CNTL, 0);
1053 	if (r100_gui_wait_for_idle(rdev)) {
1054 		printk(KERN_WARNING "Failed to wait GUI idle while "
1055 		       "programming pipes. Bad things might happen.\n");
1056 	}
1057 }
1058 
1059 void r100_cp_commit(struct radeon_device *rdev)
1060 {
1061 	WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
1062 	(void)RREG32(RADEON_CP_RB_WPTR);
1063 }
1064 
1065 
1066 /*
1067  * CS functions
1068  */
1069 int r100_cs_parse_packet0(struct radeon_cs_parser *p,
1070 			  struct radeon_cs_packet *pkt,
1071 			  const unsigned *auth, unsigned n,
1072 			  radeon_packet0_check_t check)
1073 {
1074 	unsigned reg;
1075 	unsigned i, j, m;
1076 	unsigned idx;
1077 	int r;
1078 
1079 	idx = pkt->idx + 1;
1080 	reg = pkt->reg;
1081 	/* Check that register fall into register range
1082 	 * determined by the number of entry (n) in the
1083 	 * safe register bitmap.
1084 	 */
1085 	if (pkt->one_reg_wr) {
1086 		if ((reg >> 7) > n) {
1087 			return -EINVAL;
1088 		}
1089 	} else {
1090 		if (((reg + (pkt->count << 2)) >> 7) > n) {
1091 			return -EINVAL;
1092 		}
1093 	}
1094 	for (i = 0; i <= pkt->count; i++, idx++) {
1095 		j = (reg >> 7);
1096 		m = 1 << ((reg >> 2) & 31);
1097 		if (auth[j] & m) {
1098 			r = check(p, pkt, idx, reg);
1099 			if (r) {
1100 				return r;
1101 			}
1102 		}
1103 		if (pkt->one_reg_wr) {
1104 			if (!(auth[j] & m)) {
1105 				break;
1106 			}
1107 		} else {
1108 			reg += 4;
1109 		}
1110 	}
1111 	return 0;
1112 }
1113 
1114 void r100_cs_dump_packet(struct radeon_cs_parser *p,
1115 			 struct radeon_cs_packet *pkt)
1116 {
1117 	volatile uint32_t *ib;
1118 	unsigned i;
1119 	unsigned idx;
1120 
1121 	ib = p->ib->ptr;
1122 	idx = pkt->idx;
1123 	for (i = 0; i <= (pkt->count + 1); i++, idx++) {
1124 		DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
1125 	}
1126 }
1127 
1128 /**
1129  * r100_cs_packet_parse() - parse cp packet and point ib index to next packet
1130  * @parser:	parser structure holding parsing context.
1131  * @pkt:	where to store packet informations
1132  *
1133  * Assume that chunk_ib_index is properly set. Will return -EINVAL
1134  * if packet is bigger than remaining ib size. or if packets is unknown.
1135  **/
1136 int r100_cs_packet_parse(struct radeon_cs_parser *p,
1137 			 struct radeon_cs_packet *pkt,
1138 			 unsigned idx)
1139 {
1140 	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
1141 	uint32_t header;
1142 
1143 	if (idx >= ib_chunk->length_dw) {
1144 		DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
1145 			  idx, ib_chunk->length_dw);
1146 		return -EINVAL;
1147 	}
1148 	header = radeon_get_ib_value(p, idx);
1149 	pkt->idx = idx;
1150 	pkt->type = CP_PACKET_GET_TYPE(header);
1151 	pkt->count = CP_PACKET_GET_COUNT(header);
1152 	switch (pkt->type) {
1153 	case PACKET_TYPE0:
1154 		pkt->reg = CP_PACKET0_GET_REG(header);
1155 		pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
1156 		break;
1157 	case PACKET_TYPE3:
1158 		pkt->opcode = CP_PACKET3_GET_OPCODE(header);
1159 		break;
1160 	case PACKET_TYPE2:
1161 		pkt->count = -1;
1162 		break;
1163 	default:
1164 		DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
1165 		return -EINVAL;
1166 	}
1167 	if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
1168 		DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
1169 			  pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
1170 		return -EINVAL;
1171 	}
1172 	return 0;
1173 }
1174 
1175 /**
1176  * r100_cs_packet_next_vline() - parse userspace VLINE packet
1177  * @parser:		parser structure holding parsing context.
1178  *
1179  * Userspace sends a special sequence for VLINE waits.
1180  * PACKET0 - VLINE_START_END + value
1181  * PACKET0 - WAIT_UNTIL +_value
1182  * RELOC (P3) - crtc_id in reloc.
1183  *
1184  * This function parses this and relocates the VLINE START END
1185  * and WAIT UNTIL packets to the correct crtc.
1186  * It also detects a switched off crtc and nulls out the
1187  * wait in that case.
1188  */
1189 int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
1190 {
1191 	struct drm_mode_object *obj;
1192 	struct drm_crtc *crtc;
1193 	struct radeon_crtc *radeon_crtc;
1194 	struct radeon_cs_packet p3reloc, waitreloc;
1195 	int crtc_id;
1196 	int r;
1197 	uint32_t header, h_idx, reg;
1198 	volatile uint32_t *ib;
1199 
1200 	ib = p->ib->ptr;
1201 
1202 	/* parse the wait until */
1203 	r = r100_cs_packet_parse(p, &waitreloc, p->idx);
1204 	if (r)
1205 		return r;
1206 
1207 	/* check its a wait until and only 1 count */
1208 	if (waitreloc.reg != RADEON_WAIT_UNTIL ||
1209 	    waitreloc.count != 0) {
1210 		DRM_ERROR("vline wait had illegal wait until segment\n");
1211 		r = -EINVAL;
1212 		return r;
1213 	}
1214 
1215 	if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
1216 		DRM_ERROR("vline wait had illegal wait until\n");
1217 		r = -EINVAL;
1218 		return r;
1219 	}
1220 
1221 	/* jump over the NOP */
1222 	r = r100_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
1223 	if (r)
1224 		return r;
1225 
1226 	h_idx = p->idx - 2;
1227 	p->idx += waitreloc.count + 2;
1228 	p->idx += p3reloc.count + 2;
1229 
1230 	header = radeon_get_ib_value(p, h_idx);
1231 	crtc_id = radeon_get_ib_value(p, h_idx + 5);
1232 	reg = CP_PACKET0_GET_REG(header);
1233 	mutex_lock(&p->rdev->ddev->mode_config.mutex);
1234 	obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
1235 	if (!obj) {
1236 		DRM_ERROR("cannot find crtc %d\n", crtc_id);
1237 		r = -EINVAL;
1238 		goto out;
1239 	}
1240 	crtc = obj_to_crtc(obj);
1241 	radeon_crtc = to_radeon_crtc(crtc);
1242 	crtc_id = radeon_crtc->crtc_id;
1243 
1244 	if (!crtc->enabled) {
1245 		/* if the CRTC isn't enabled - we need to nop out the wait until */
1246 		ib[h_idx + 2] = PACKET2(0);
1247 		ib[h_idx + 3] = PACKET2(0);
1248 	} else if (crtc_id == 1) {
1249 		switch (reg) {
1250 		case AVIVO_D1MODE_VLINE_START_END:
1251 			header &= ~R300_CP_PACKET0_REG_MASK;
1252 			header |= AVIVO_D2MODE_VLINE_START_END >> 2;
1253 			break;
1254 		case RADEON_CRTC_GUI_TRIG_VLINE:
1255 			header &= ~R300_CP_PACKET0_REG_MASK;
1256 			header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
1257 			break;
1258 		default:
1259 			DRM_ERROR("unknown crtc reloc\n");
1260 			r = -EINVAL;
1261 			goto out;
1262 		}
1263 		ib[h_idx] = header;
1264 		ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
1265 	}
1266 out:
1267 	mutex_unlock(&p->rdev->ddev->mode_config.mutex);
1268 	return r;
1269 }
1270 
1271 /**
1272  * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
1273  * @parser:		parser structure holding parsing context.
1274  * @data:		pointer to relocation data
1275  * @offset_start:	starting offset
1276  * @offset_mask:	offset mask (to align start offset on)
1277  * @reloc:		reloc informations
1278  *
1279  * Check next packet is relocation packet3, do bo validation and compute
1280  * GPU offset using the provided start.
1281  **/
1282 int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
1283 			      struct radeon_cs_reloc **cs_reloc)
1284 {
1285 	struct radeon_cs_chunk *relocs_chunk;
1286 	struct radeon_cs_packet p3reloc;
1287 	unsigned idx;
1288 	int r;
1289 
1290 	if (p->chunk_relocs_idx == -1) {
1291 		DRM_ERROR("No relocation chunk !\n");
1292 		return -EINVAL;
1293 	}
1294 	*cs_reloc = NULL;
1295 	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
1296 	r = r100_cs_packet_parse(p, &p3reloc, p->idx);
1297 	if (r) {
1298 		return r;
1299 	}
1300 	p->idx += p3reloc.count + 2;
1301 	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
1302 		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
1303 			  p3reloc.idx);
1304 		r100_cs_dump_packet(p, &p3reloc);
1305 		return -EINVAL;
1306 	}
1307 	idx = radeon_get_ib_value(p, p3reloc.idx + 1);
1308 	if (idx >= relocs_chunk->length_dw) {
1309 		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
1310 			  idx, relocs_chunk->length_dw);
1311 		r100_cs_dump_packet(p, &p3reloc);
1312 		return -EINVAL;
1313 	}
1314 	/* FIXME: we assume reloc size is 4 dwords */
1315 	*cs_reloc = p->relocs_ptr[(idx / 4)];
1316 	return 0;
1317 }
1318 
1319 static int r100_get_vtx_size(uint32_t vtx_fmt)
1320 {
1321 	int vtx_size;
1322 	vtx_size = 2;
1323 	/* ordered according to bits in spec */
1324 	if (vtx_fmt & RADEON_SE_VTX_FMT_W0)
1325 		vtx_size++;
1326 	if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR)
1327 		vtx_size += 3;
1328 	if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA)
1329 		vtx_size++;
1330 	if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR)
1331 		vtx_size++;
1332 	if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC)
1333 		vtx_size += 3;
1334 	if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG)
1335 		vtx_size++;
1336 	if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC)
1337 		vtx_size++;
1338 	if (vtx_fmt & RADEON_SE_VTX_FMT_ST0)
1339 		vtx_size += 2;
1340 	if (vtx_fmt & RADEON_SE_VTX_FMT_ST1)
1341 		vtx_size += 2;
1342 	if (vtx_fmt & RADEON_SE_VTX_FMT_Q1)
1343 		vtx_size++;
1344 	if (vtx_fmt & RADEON_SE_VTX_FMT_ST2)
1345 		vtx_size += 2;
1346 	if (vtx_fmt & RADEON_SE_VTX_FMT_Q2)
1347 		vtx_size++;
1348 	if (vtx_fmt & RADEON_SE_VTX_FMT_ST3)
1349 		vtx_size += 2;
1350 	if (vtx_fmt & RADEON_SE_VTX_FMT_Q3)
1351 		vtx_size++;
1352 	if (vtx_fmt & RADEON_SE_VTX_FMT_Q0)
1353 		vtx_size++;
1354 	/* blend weight */
1355 	if (vtx_fmt & (0x7 << 15))
1356 		vtx_size += (vtx_fmt >> 15) & 0x7;
1357 	if (vtx_fmt & RADEON_SE_VTX_FMT_N0)
1358 		vtx_size += 3;
1359 	if (vtx_fmt & RADEON_SE_VTX_FMT_XY1)
1360 		vtx_size += 2;
1361 	if (vtx_fmt & RADEON_SE_VTX_FMT_Z1)
1362 		vtx_size++;
1363 	if (vtx_fmt & RADEON_SE_VTX_FMT_W1)
1364 		vtx_size++;
1365 	if (vtx_fmt & RADEON_SE_VTX_FMT_N1)
1366 		vtx_size++;
1367 	if (vtx_fmt & RADEON_SE_VTX_FMT_Z)
1368 		vtx_size++;
1369 	return vtx_size;
1370 }
1371 
1372 static int r100_packet0_check(struct radeon_cs_parser *p,
1373 			      struct radeon_cs_packet *pkt,
1374 			      unsigned idx, unsigned reg)
1375 {
1376 	struct radeon_cs_reloc *reloc;
1377 	struct r100_cs_track *track;
1378 	volatile uint32_t *ib;
1379 	uint32_t tmp;
1380 	int r;
1381 	int i, face;
1382 	u32 tile_flags = 0;
1383 	u32 idx_value;
1384 
1385 	ib = p->ib->ptr;
1386 	track = (struct r100_cs_track *)p->track;
1387 
1388 	idx_value = radeon_get_ib_value(p, idx);
1389 
1390 	switch (reg) {
1391 	case RADEON_CRTC_GUI_TRIG_VLINE:
1392 		r = r100_cs_packet_parse_vline(p);
1393 		if (r) {
1394 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1395 				  idx, reg);
1396 			r100_cs_dump_packet(p, pkt);
1397 			return r;
1398 		}
1399 		break;
1400 		/* FIXME: only allow PACKET3 blit? easier to check for out of
1401 		 * range access */
1402 	case RADEON_DST_PITCH_OFFSET:
1403 	case RADEON_SRC_PITCH_OFFSET:
1404 		r = r100_reloc_pitch_offset(p, pkt, idx, reg);
1405 		if (r)
1406 			return r;
1407 		break;
1408 	case RADEON_RB3D_DEPTHOFFSET:
1409 		r = r100_cs_packet_next_reloc(p, &reloc);
1410 		if (r) {
1411 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1412 				  idx, reg);
1413 			r100_cs_dump_packet(p, pkt);
1414 			return r;
1415 		}
1416 		track->zb.robj = reloc->robj;
1417 		track->zb.offset = idx_value;
1418 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1419 		break;
1420 	case RADEON_RB3D_COLOROFFSET:
1421 		r = r100_cs_packet_next_reloc(p, &reloc);
1422 		if (r) {
1423 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1424 				  idx, reg);
1425 			r100_cs_dump_packet(p, pkt);
1426 			return r;
1427 		}
1428 		track->cb[0].robj = reloc->robj;
1429 		track->cb[0].offset = idx_value;
1430 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1431 		break;
1432 	case RADEON_PP_TXOFFSET_0:
1433 	case RADEON_PP_TXOFFSET_1:
1434 	case RADEON_PP_TXOFFSET_2:
1435 		i = (reg - RADEON_PP_TXOFFSET_0) / 24;
1436 		r = r100_cs_packet_next_reloc(p, &reloc);
1437 		if (r) {
1438 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1439 				  idx, reg);
1440 			r100_cs_dump_packet(p, pkt);
1441 			return r;
1442 		}
1443 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1444 		track->textures[i].robj = reloc->robj;
1445 		break;
1446 	case RADEON_PP_CUBIC_OFFSET_T0_0:
1447 	case RADEON_PP_CUBIC_OFFSET_T0_1:
1448 	case RADEON_PP_CUBIC_OFFSET_T0_2:
1449 	case RADEON_PP_CUBIC_OFFSET_T0_3:
1450 	case RADEON_PP_CUBIC_OFFSET_T0_4:
1451 		i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
1452 		r = r100_cs_packet_next_reloc(p, &reloc);
1453 		if (r) {
1454 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1455 				  idx, reg);
1456 			r100_cs_dump_packet(p, pkt);
1457 			return r;
1458 		}
1459 		track->textures[0].cube_info[i].offset = idx_value;
1460 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1461 		track->textures[0].cube_info[i].robj = reloc->robj;
1462 		break;
1463 	case RADEON_PP_CUBIC_OFFSET_T1_0:
1464 	case RADEON_PP_CUBIC_OFFSET_T1_1:
1465 	case RADEON_PP_CUBIC_OFFSET_T1_2:
1466 	case RADEON_PP_CUBIC_OFFSET_T1_3:
1467 	case RADEON_PP_CUBIC_OFFSET_T1_4:
1468 		i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
1469 		r = r100_cs_packet_next_reloc(p, &reloc);
1470 		if (r) {
1471 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1472 				  idx, reg);
1473 			r100_cs_dump_packet(p, pkt);
1474 			return r;
1475 		}
1476 		track->textures[1].cube_info[i].offset = idx_value;
1477 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1478 		track->textures[1].cube_info[i].robj = reloc->robj;
1479 		break;
1480 	case RADEON_PP_CUBIC_OFFSET_T2_0:
1481 	case RADEON_PP_CUBIC_OFFSET_T2_1:
1482 	case RADEON_PP_CUBIC_OFFSET_T2_2:
1483 	case RADEON_PP_CUBIC_OFFSET_T2_3:
1484 	case RADEON_PP_CUBIC_OFFSET_T2_4:
1485 		i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
1486 		r = r100_cs_packet_next_reloc(p, &reloc);
1487 		if (r) {
1488 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1489 				  idx, reg);
1490 			r100_cs_dump_packet(p, pkt);
1491 			return r;
1492 		}
1493 		track->textures[2].cube_info[i].offset = idx_value;
1494 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1495 		track->textures[2].cube_info[i].robj = reloc->robj;
1496 		break;
1497 	case RADEON_RE_WIDTH_HEIGHT:
1498 		track->maxy = ((idx_value >> 16) & 0x7FF);
1499 		break;
1500 	case RADEON_RB3D_COLORPITCH:
1501 		r = r100_cs_packet_next_reloc(p, &reloc);
1502 		if (r) {
1503 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1504 				  idx, reg);
1505 			r100_cs_dump_packet(p, pkt);
1506 			return r;
1507 		}
1508 
1509 		if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1510 			tile_flags |= RADEON_COLOR_TILE_ENABLE;
1511 		if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1512 			tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
1513 
1514 		tmp = idx_value & ~(0x7 << 16);
1515 		tmp |= tile_flags;
1516 		ib[idx] = tmp;
1517 
1518 		track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
1519 		break;
1520 	case RADEON_RB3D_DEPTHPITCH:
1521 		track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
1522 		break;
1523 	case RADEON_RB3D_CNTL:
1524 		switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
1525 		case 7:
1526 		case 8:
1527 		case 9:
1528 		case 11:
1529 		case 12:
1530 			track->cb[0].cpp = 1;
1531 			break;
1532 		case 3:
1533 		case 4:
1534 		case 15:
1535 			track->cb[0].cpp = 2;
1536 			break;
1537 		case 6:
1538 			track->cb[0].cpp = 4;
1539 			break;
1540 		default:
1541 			DRM_ERROR("Invalid color buffer format (%d) !\n",
1542 				  ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
1543 			return -EINVAL;
1544 		}
1545 		track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
1546 		break;
1547 	case RADEON_RB3D_ZSTENCILCNTL:
1548 		switch (idx_value & 0xf) {
1549 		case 0:
1550 			track->zb.cpp = 2;
1551 			break;
1552 		case 2:
1553 		case 3:
1554 		case 4:
1555 		case 5:
1556 		case 9:
1557 		case 11:
1558 			track->zb.cpp = 4;
1559 			break;
1560 		default:
1561 			break;
1562 		}
1563 		break;
1564 	case RADEON_RB3D_ZPASS_ADDR:
1565 		r = r100_cs_packet_next_reloc(p, &reloc);
1566 		if (r) {
1567 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1568 				  idx, reg);
1569 			r100_cs_dump_packet(p, pkt);
1570 			return r;
1571 		}
1572 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1573 		break;
1574 	case RADEON_PP_CNTL:
1575 		{
1576 			uint32_t temp = idx_value >> 4;
1577 			for (i = 0; i < track->num_texture; i++)
1578 				track->textures[i].enabled = !!(temp & (1 << i));
1579 		}
1580 		break;
1581 	case RADEON_SE_VF_CNTL:
1582 		track->vap_vf_cntl = idx_value;
1583 		break;
1584 	case RADEON_SE_VTX_FMT:
1585 		track->vtx_size = r100_get_vtx_size(idx_value);
1586 		break;
1587 	case RADEON_PP_TEX_SIZE_0:
1588 	case RADEON_PP_TEX_SIZE_1:
1589 	case RADEON_PP_TEX_SIZE_2:
1590 		i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
1591 		track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
1592 		track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
1593 		break;
1594 	case RADEON_PP_TEX_PITCH_0:
1595 	case RADEON_PP_TEX_PITCH_1:
1596 	case RADEON_PP_TEX_PITCH_2:
1597 		i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
1598 		track->textures[i].pitch = idx_value + 32;
1599 		break;
1600 	case RADEON_PP_TXFILTER_0:
1601 	case RADEON_PP_TXFILTER_1:
1602 	case RADEON_PP_TXFILTER_2:
1603 		i = (reg - RADEON_PP_TXFILTER_0) / 24;
1604 		track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK)
1605 						 >> RADEON_MAX_MIP_LEVEL_SHIFT);
1606 		tmp = (idx_value >> 23) & 0x7;
1607 		if (tmp == 2 || tmp == 6)
1608 			track->textures[i].roundup_w = false;
1609 		tmp = (idx_value >> 27) & 0x7;
1610 		if (tmp == 2 || tmp == 6)
1611 			track->textures[i].roundup_h = false;
1612 		break;
1613 	case RADEON_PP_TXFORMAT_0:
1614 	case RADEON_PP_TXFORMAT_1:
1615 	case RADEON_PP_TXFORMAT_2:
1616 		i = (reg - RADEON_PP_TXFORMAT_0) / 24;
1617 		if (idx_value & RADEON_TXFORMAT_NON_POWER2) {
1618 			track->textures[i].use_pitch = 1;
1619 		} else {
1620 			track->textures[i].use_pitch = 0;
1621 			track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
1622 			track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
1623 		}
1624 		if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
1625 			track->textures[i].tex_coord_type = 2;
1626 		switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
1627 		case RADEON_TXFORMAT_I8:
1628 		case RADEON_TXFORMAT_RGB332:
1629 		case RADEON_TXFORMAT_Y8:
1630 			track->textures[i].cpp = 1;
1631 			break;
1632 		case RADEON_TXFORMAT_AI88:
1633 		case RADEON_TXFORMAT_ARGB1555:
1634 		case RADEON_TXFORMAT_RGB565:
1635 		case RADEON_TXFORMAT_ARGB4444:
1636 		case RADEON_TXFORMAT_VYUY422:
1637 		case RADEON_TXFORMAT_YVYU422:
1638 		case RADEON_TXFORMAT_SHADOW16:
1639 		case RADEON_TXFORMAT_LDUDV655:
1640 		case RADEON_TXFORMAT_DUDV88:
1641 			track->textures[i].cpp = 2;
1642 			break;
1643 		case RADEON_TXFORMAT_ARGB8888:
1644 		case RADEON_TXFORMAT_RGBA8888:
1645 		case RADEON_TXFORMAT_SHADOW32:
1646 		case RADEON_TXFORMAT_LDUDUV8888:
1647 			track->textures[i].cpp = 4;
1648 			break;
1649 		case RADEON_TXFORMAT_DXT1:
1650 			track->textures[i].cpp = 1;
1651 			track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
1652 			break;
1653 		case RADEON_TXFORMAT_DXT23:
1654 		case RADEON_TXFORMAT_DXT45:
1655 			track->textures[i].cpp = 1;
1656 			track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
1657 			break;
1658 		}
1659 		track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
1660 		track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
1661 		break;
1662 	case RADEON_PP_CUBIC_FACES_0:
1663 	case RADEON_PP_CUBIC_FACES_1:
1664 	case RADEON_PP_CUBIC_FACES_2:
1665 		tmp = idx_value;
1666 		i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
1667 		for (face = 0; face < 4; face++) {
1668 			track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
1669 			track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
1670 		}
1671 		break;
1672 	default:
1673 		printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1674 		       reg, idx);
1675 		return -EINVAL;
1676 	}
1677 	return 0;
1678 }
1679 
1680 int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1681 					 struct radeon_cs_packet *pkt,
1682 					 struct radeon_bo *robj)
1683 {
1684 	unsigned idx;
1685 	u32 value;
1686 	idx = pkt->idx + 1;
1687 	value = radeon_get_ib_value(p, idx + 2);
1688 	if ((value + 1) > radeon_bo_size(robj)) {
1689 		DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
1690 			  "(need %u have %lu) !\n",
1691 			  value + 1,
1692 			  radeon_bo_size(robj));
1693 		return -EINVAL;
1694 	}
1695 	return 0;
1696 }
1697 
1698 static int r100_packet3_check(struct radeon_cs_parser *p,
1699 			      struct radeon_cs_packet *pkt)
1700 {
1701 	struct radeon_cs_reloc *reloc;
1702 	struct r100_cs_track *track;
1703 	unsigned idx;
1704 	volatile uint32_t *ib;
1705 	int r;
1706 
1707 	ib = p->ib->ptr;
1708 	idx = pkt->idx + 1;
1709 	track = (struct r100_cs_track *)p->track;
1710 	switch (pkt->opcode) {
1711 	case PACKET3_3D_LOAD_VBPNTR:
1712 		r = r100_packet3_load_vbpntr(p, pkt, idx);
1713 		if (r)
1714 			return r;
1715 		break;
1716 	case PACKET3_INDX_BUFFER:
1717 		r = r100_cs_packet_next_reloc(p, &reloc);
1718 		if (r) {
1719 			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1720 			r100_cs_dump_packet(p, pkt);
1721 			return r;
1722 		}
1723 		ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
1724 		r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1725 		if (r) {
1726 			return r;
1727 		}
1728 		break;
1729 	case 0x23:
1730 		/* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
1731 		r = r100_cs_packet_next_reloc(p, &reloc);
1732 		if (r) {
1733 			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1734 			r100_cs_dump_packet(p, pkt);
1735 			return r;
1736 		}
1737 		ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
1738 		track->num_arrays = 1;
1739 		track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
1740 
1741 		track->arrays[0].robj = reloc->robj;
1742 		track->arrays[0].esize = track->vtx_size;
1743 
1744 		track->max_indx = radeon_get_ib_value(p, idx+1);
1745 
1746 		track->vap_vf_cntl = radeon_get_ib_value(p, idx+3);
1747 		track->immd_dwords = pkt->count - 1;
1748 		r = r100_cs_track_check(p->rdev, track);
1749 		if (r)
1750 			return r;
1751 		break;
1752 	case PACKET3_3D_DRAW_IMMD:
1753 		if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
1754 			DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1755 			return -EINVAL;
1756 		}
1757 		track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
1758 		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1759 		track->immd_dwords = pkt->count - 1;
1760 		r = r100_cs_track_check(p->rdev, track);
1761 		if (r)
1762 			return r;
1763 		break;
1764 		/* triggers drawing using in-packet vertex data */
1765 	case PACKET3_3D_DRAW_IMMD_2:
1766 		if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
1767 			DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1768 			return -EINVAL;
1769 		}
1770 		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1771 		track->immd_dwords = pkt->count;
1772 		r = r100_cs_track_check(p->rdev, track);
1773 		if (r)
1774 			return r;
1775 		break;
1776 		/* triggers drawing using in-packet vertex data */
1777 	case PACKET3_3D_DRAW_VBUF_2:
1778 		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1779 		r = r100_cs_track_check(p->rdev, track);
1780 		if (r)
1781 			return r;
1782 		break;
1783 		/* triggers drawing of vertex buffers setup elsewhere */
1784 	case PACKET3_3D_DRAW_INDX_2:
1785 		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1786 		r = r100_cs_track_check(p->rdev, track);
1787 		if (r)
1788 			return r;
1789 		break;
1790 		/* triggers drawing using indices to vertex buffer */
1791 	case PACKET3_3D_DRAW_VBUF:
1792 		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1793 		r = r100_cs_track_check(p->rdev, track);
1794 		if (r)
1795 			return r;
1796 		break;
1797 		/* triggers drawing of vertex buffers setup elsewhere */
1798 	case PACKET3_3D_DRAW_INDX:
1799 		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1800 		r = r100_cs_track_check(p->rdev, track);
1801 		if (r)
1802 			return r;
1803 		break;
1804 		/* triggers drawing using indices to vertex buffer */
1805 	case PACKET3_NOP:
1806 		break;
1807 	default:
1808 		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1809 		return -EINVAL;
1810 	}
1811 	return 0;
1812 }
1813 
1814 int r100_cs_parse(struct radeon_cs_parser *p)
1815 {
1816 	struct radeon_cs_packet pkt;
1817 	struct r100_cs_track *track;
1818 	int r;
1819 
1820 	track = kzalloc(sizeof(*track), GFP_KERNEL);
1821 	r100_cs_track_clear(p->rdev, track);
1822 	p->track = track;
1823 	do {
1824 		r = r100_cs_packet_parse(p, &pkt, p->idx);
1825 		if (r) {
1826 			return r;
1827 		}
1828 		p->idx += pkt.count + 2;
1829 		switch (pkt.type) {
1830 			case PACKET_TYPE0:
1831 				if (p->rdev->family >= CHIP_R200)
1832 					r = r100_cs_parse_packet0(p, &pkt,
1833 								  p->rdev->config.r100.reg_safe_bm,
1834 								  p->rdev->config.r100.reg_safe_bm_size,
1835 								  &r200_packet0_check);
1836 				else
1837 					r = r100_cs_parse_packet0(p, &pkt,
1838 								  p->rdev->config.r100.reg_safe_bm,
1839 								  p->rdev->config.r100.reg_safe_bm_size,
1840 								  &r100_packet0_check);
1841 				break;
1842 			case PACKET_TYPE2:
1843 				break;
1844 			case PACKET_TYPE3:
1845 				r = r100_packet3_check(p, &pkt);
1846 				break;
1847 			default:
1848 				DRM_ERROR("Unknown packet type %d !\n",
1849 					  pkt.type);
1850 				return -EINVAL;
1851 		}
1852 		if (r) {
1853 			return r;
1854 		}
1855 	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1856 	return 0;
1857 }
1858 
1859 
1860 /*
1861  * Global GPU functions
1862  */
1863 void r100_errata(struct radeon_device *rdev)
1864 {
1865 	rdev->pll_errata = 0;
1866 
1867 	if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
1868 		rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
1869 	}
1870 
1871 	if (rdev->family == CHIP_RV100 ||
1872 	    rdev->family == CHIP_RS100 ||
1873 	    rdev->family == CHIP_RS200) {
1874 		rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
1875 	}
1876 }
1877 
1878 /* Wait for vertical sync on primary CRTC */
1879 void r100_gpu_wait_for_vsync(struct radeon_device *rdev)
1880 {
1881 	uint32_t crtc_gen_cntl, tmp;
1882 	int i;
1883 
1884 	crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
1885 	if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) ||
1886 	    !(crtc_gen_cntl & RADEON_CRTC_EN)) {
1887 		return;
1888 	}
1889 	/* Clear the CRTC_VBLANK_SAVE bit */
1890 	WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR);
1891 	for (i = 0; i < rdev->usec_timeout; i++) {
1892 		tmp = RREG32(RADEON_CRTC_STATUS);
1893 		if (tmp & RADEON_CRTC_VBLANK_SAVE) {
1894 			return;
1895 		}
1896 		DRM_UDELAY(1);
1897 	}
1898 }
1899 
1900 /* Wait for vertical sync on secondary CRTC */
1901 void r100_gpu_wait_for_vsync2(struct radeon_device *rdev)
1902 {
1903 	uint32_t crtc2_gen_cntl, tmp;
1904 	int i;
1905 
1906 	crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
1907 	if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) ||
1908 	    !(crtc2_gen_cntl & RADEON_CRTC2_EN))
1909 		return;
1910 
1911 	/* Clear the CRTC_VBLANK_SAVE bit */
1912 	WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR);
1913 	for (i = 0; i < rdev->usec_timeout; i++) {
1914 		tmp = RREG32(RADEON_CRTC2_STATUS);
1915 		if (tmp & RADEON_CRTC2_VBLANK_SAVE) {
1916 			return;
1917 		}
1918 		DRM_UDELAY(1);
1919 	}
1920 }
1921 
1922 int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
1923 {
1924 	unsigned i;
1925 	uint32_t tmp;
1926 
1927 	for (i = 0; i < rdev->usec_timeout; i++) {
1928 		tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
1929 		if (tmp >= n) {
1930 			return 0;
1931 		}
1932 		DRM_UDELAY(1);
1933 	}
1934 	return -1;
1935 }
1936 
1937 int r100_gui_wait_for_idle(struct radeon_device *rdev)
1938 {
1939 	unsigned i;
1940 	uint32_t tmp;
1941 
1942 	if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
1943 		printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !"
1944 		       " Bad things might happen.\n");
1945 	}
1946 	for (i = 0; i < rdev->usec_timeout; i++) {
1947 		tmp = RREG32(RADEON_RBBM_STATUS);
1948 		if (!(tmp & RADEON_RBBM_ACTIVE)) {
1949 			return 0;
1950 		}
1951 		DRM_UDELAY(1);
1952 	}
1953 	return -1;
1954 }
1955 
1956 int r100_mc_wait_for_idle(struct radeon_device *rdev)
1957 {
1958 	unsigned i;
1959 	uint32_t tmp;
1960 
1961 	for (i = 0; i < rdev->usec_timeout; i++) {
1962 		/* read MC_STATUS */
1963 		tmp = RREG32(RADEON_MC_STATUS);
1964 		if (tmp & RADEON_MC_IDLE) {
1965 			return 0;
1966 		}
1967 		DRM_UDELAY(1);
1968 	}
1969 	return -1;
1970 }
1971 
1972 void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
1973 {
1974 	lockup->last_cp_rptr = cp->rptr;
1975 	lockup->last_jiffies = jiffies;
1976 }
1977 
1978 /**
1979  * r100_gpu_cp_is_lockup() - check if CP is lockup by recording information
1980  * @rdev:	radeon device structure
1981  * @lockup:	r100_gpu_lockup structure holding CP lockup tracking informations
1982  * @cp:		radeon_cp structure holding CP information
1983  *
1984  * We don't need to initialize the lockup tracking information as we will either
1985  * have CP rptr to a different value of jiffies wrap around which will force
1986  * initialization of the lockup tracking informations.
1987  *
1988  * A possible false positivie is if we get call after while and last_cp_rptr ==
1989  * the current CP rptr, even if it's unlikely it might happen. To avoid this
1990  * if the elapsed time since last call is bigger than 2 second than we return
1991  * false and update the tracking information. Due to this the caller must call
1992  * r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported
1993  * the fencing code should be cautious about that.
1994  *
1995  * Caller should write to the ring to force CP to do something so we don't get
1996  * false positive when CP is just gived nothing to do.
1997  *
1998  **/
1999 bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
2000 {
2001 	unsigned long cjiffies, elapsed;
2002 
2003 	cjiffies = jiffies;
2004 	if (!time_after(cjiffies, lockup->last_jiffies)) {
2005 		/* likely a wrap around */
2006 		lockup->last_cp_rptr = cp->rptr;
2007 		lockup->last_jiffies = jiffies;
2008 		return false;
2009 	}
2010 	if (cp->rptr != lockup->last_cp_rptr) {
2011 		/* CP is still working no lockup */
2012 		lockup->last_cp_rptr = cp->rptr;
2013 		lockup->last_jiffies = jiffies;
2014 		return false;
2015 	}
2016 	elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
2017 	if (elapsed >= 3000) {
2018 		/* very likely the improbable case where current
2019 		 * rptr is equal to last recorded, a while ago, rptr
2020 		 * this is more likely a false positive update tracking
2021 		 * information which should force us to be recall at
2022 		 * latter point
2023 		 */
2024 		lockup->last_cp_rptr = cp->rptr;
2025 		lockup->last_jiffies = jiffies;
2026 		return false;
2027 	}
2028 	if (elapsed >= 1000) {
2029 		dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
2030 		return true;
2031 	}
2032 	/* give a chance to the GPU ... */
2033 	return false;
2034 }
2035 
2036 bool r100_gpu_is_lockup(struct radeon_device *rdev)
2037 {
2038 	u32 rbbm_status;
2039 	int r;
2040 
2041 	rbbm_status = RREG32(R_000E40_RBBM_STATUS);
2042 	if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
2043 		r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp);
2044 		return false;
2045 	}
2046 	/* force CP activities */
2047 	r = radeon_ring_lock(rdev, 2);
2048 	if (!r) {
2049 		/* PACKET2 NOP */
2050 		radeon_ring_write(rdev, 0x80000000);
2051 		radeon_ring_write(rdev, 0x80000000);
2052 		radeon_ring_unlock_commit(rdev);
2053 	}
2054 	rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
2055 	return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp);
2056 }
2057 
2058 void r100_bm_disable(struct radeon_device *rdev)
2059 {
2060 	u32 tmp;
2061 
2062 	/* disable bus mastering */
2063 	tmp = RREG32(R_000030_BUS_CNTL);
2064 	WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
2065 	mdelay(1);
2066 	WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
2067 	mdelay(1);
2068 	WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
2069 	tmp = RREG32(RADEON_BUS_CNTL);
2070 	mdelay(1);
2071 	pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
2072 	pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
2073 	mdelay(1);
2074 }
2075 
2076 int r100_asic_reset(struct radeon_device *rdev)
2077 {
2078 	struct r100_mc_save save;
2079 	u32 status, tmp;
2080 
2081 	r100_mc_stop(rdev, &save);
2082 	status = RREG32(R_000E40_RBBM_STATUS);
2083 	if (!G_000E40_GUI_ACTIVE(status)) {
2084 		return 0;
2085 	}
2086 	status = RREG32(R_000E40_RBBM_STATUS);
2087 	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2088 	/* stop CP */
2089 	WREG32(RADEON_CP_CSQ_CNTL, 0);
2090 	tmp = RREG32(RADEON_CP_RB_CNTL);
2091 	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
2092 	WREG32(RADEON_CP_RB_RPTR_WR, 0);
2093 	WREG32(RADEON_CP_RB_WPTR, 0);
2094 	WREG32(RADEON_CP_RB_CNTL, tmp);
2095 	/* save PCI state */
2096 	pci_save_state(rdev->pdev);
2097 	/* disable bus mastering */
2098 	r100_bm_disable(rdev);
2099 	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
2100 					S_0000F0_SOFT_RESET_RE(1) |
2101 					S_0000F0_SOFT_RESET_PP(1) |
2102 					S_0000F0_SOFT_RESET_RB(1));
2103 	RREG32(R_0000F0_RBBM_SOFT_RESET);
2104 	mdelay(500);
2105 	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
2106 	mdelay(1);
2107 	status = RREG32(R_000E40_RBBM_STATUS);
2108 	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2109 	/* reset CP */
2110 	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
2111 	RREG32(R_0000F0_RBBM_SOFT_RESET);
2112 	mdelay(500);
2113 	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
2114 	mdelay(1);
2115 	status = RREG32(R_000E40_RBBM_STATUS);
2116 	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2117 	/* restore PCI & busmastering */
2118 	pci_restore_state(rdev->pdev);
2119 	r100_enable_bm(rdev);
2120 	/* Check if GPU is idle */
2121 	if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
2122 		G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
2123 		dev_err(rdev->dev, "failed to reset GPU\n");
2124 		rdev->gpu_lockup = true;
2125 		return -1;
2126 	}
2127 	r100_mc_resume(rdev, &save);
2128 	dev_info(rdev->dev, "GPU reset succeed\n");
2129 	return 0;
2130 }
2131 
2132 void r100_set_common_regs(struct radeon_device *rdev)
2133 {
2134 	struct drm_device *dev = rdev->ddev;
2135 	bool force_dac2 = false;
2136 	u32 tmp;
2137 
2138 	/* set these so they don't interfere with anything */
2139 	WREG32(RADEON_OV0_SCALE_CNTL, 0);
2140 	WREG32(RADEON_SUBPIC_CNTL, 0);
2141 	WREG32(RADEON_VIPH_CONTROL, 0);
2142 	WREG32(RADEON_I2C_CNTL_1, 0);
2143 	WREG32(RADEON_DVI_I2C_CNTL_1, 0);
2144 	WREG32(RADEON_CAP0_TRIG_CNTL, 0);
2145 	WREG32(RADEON_CAP1_TRIG_CNTL, 0);
2146 
2147 	/* always set up dac2 on rn50 and some rv100 as lots
2148 	 * of servers seem to wire it up to a VGA port but
2149 	 * don't report it in the bios connector
2150 	 * table.
2151 	 */
2152 	switch (dev->pdev->device) {
2153 		/* RN50 */
2154 	case 0x515e:
2155 	case 0x5969:
2156 		force_dac2 = true;
2157 		break;
2158 		/* RV100*/
2159 	case 0x5159:
2160 	case 0x515a:
2161 		/* DELL triple head servers */
2162 		if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) &&
2163 		    ((dev->pdev->subsystem_device == 0x016c) ||
2164 		     (dev->pdev->subsystem_device == 0x016d) ||
2165 		     (dev->pdev->subsystem_device == 0x016e) ||
2166 		     (dev->pdev->subsystem_device == 0x016f) ||
2167 		     (dev->pdev->subsystem_device == 0x0170) ||
2168 		     (dev->pdev->subsystem_device == 0x017d) ||
2169 		     (dev->pdev->subsystem_device == 0x017e) ||
2170 		     (dev->pdev->subsystem_device == 0x0183) ||
2171 		     (dev->pdev->subsystem_device == 0x018a) ||
2172 		     (dev->pdev->subsystem_device == 0x019a)))
2173 			force_dac2 = true;
2174 		break;
2175 	}
2176 
2177 	if (force_dac2) {
2178 		u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
2179 		u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
2180 		u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2);
2181 
2182 		/* For CRT on DAC2, don't turn it on if BIOS didn't
2183 		   enable it, even it's detected.
2184 		*/
2185 
2186 		/* force it to crtc0 */
2187 		dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
2188 		dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
2189 		disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
2190 
2191 		/* set up the TV DAC */
2192 		tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL |
2193 				 RADEON_TV_DAC_STD_MASK |
2194 				 RADEON_TV_DAC_RDACPD |
2195 				 RADEON_TV_DAC_GDACPD |
2196 				 RADEON_TV_DAC_BDACPD |
2197 				 RADEON_TV_DAC_BGADJ_MASK |
2198 				 RADEON_TV_DAC_DACADJ_MASK);
2199 		tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
2200 				RADEON_TV_DAC_NHOLD |
2201 				RADEON_TV_DAC_STD_PS2 |
2202 				(0x58 << 16));
2203 
2204 		WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
2205 		WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
2206 		WREG32(RADEON_DAC_CNTL2, dac2_cntl);
2207 	}
2208 
2209 	/* switch PM block to ACPI mode */
2210 	tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
2211 	tmp &= ~RADEON_PM_MODE_SEL;
2212 	WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
2213 
2214 }
2215 
2216 /*
2217  * VRAM info
2218  */
2219 static void r100_vram_get_type(struct radeon_device *rdev)
2220 {
2221 	uint32_t tmp;
2222 
2223 	rdev->mc.vram_is_ddr = false;
2224 	if (rdev->flags & RADEON_IS_IGP)
2225 		rdev->mc.vram_is_ddr = true;
2226 	else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
2227 		rdev->mc.vram_is_ddr = true;
2228 	if ((rdev->family == CHIP_RV100) ||
2229 	    (rdev->family == CHIP_RS100) ||
2230 	    (rdev->family == CHIP_RS200)) {
2231 		tmp = RREG32(RADEON_MEM_CNTL);
2232 		if (tmp & RV100_HALF_MODE) {
2233 			rdev->mc.vram_width = 32;
2234 		} else {
2235 			rdev->mc.vram_width = 64;
2236 		}
2237 		if (rdev->flags & RADEON_SINGLE_CRTC) {
2238 			rdev->mc.vram_width /= 4;
2239 			rdev->mc.vram_is_ddr = true;
2240 		}
2241 	} else if (rdev->family <= CHIP_RV280) {
2242 		tmp = RREG32(RADEON_MEM_CNTL);
2243 		if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
2244 			rdev->mc.vram_width = 128;
2245 		} else {
2246 			rdev->mc.vram_width = 64;
2247 		}
2248 	} else {
2249 		/* newer IGPs */
2250 		rdev->mc.vram_width = 128;
2251 	}
2252 }
2253 
2254 static u32 r100_get_accessible_vram(struct radeon_device *rdev)
2255 {
2256 	u32 aper_size;
2257 	u8 byte;
2258 
2259 	aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
2260 
2261 	/* Set HDP_APER_CNTL only on cards that are known not to be broken,
2262 	 * that is has the 2nd generation multifunction PCI interface
2263 	 */
2264 	if (rdev->family == CHIP_RV280 ||
2265 	    rdev->family >= CHIP_RV350) {
2266 		WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL,
2267 		       ~RADEON_HDP_APER_CNTL);
2268 		DRM_INFO("Generation 2 PCI interface, using max accessible memory\n");
2269 		return aper_size * 2;
2270 	}
2271 
2272 	/* Older cards have all sorts of funny issues to deal with. First
2273 	 * check if it's a multifunction card by reading the PCI config
2274 	 * header type... Limit those to one aperture size
2275 	 */
2276 	pci_read_config_byte(rdev->pdev, 0xe, &byte);
2277 	if (byte & 0x80) {
2278 		DRM_INFO("Generation 1 PCI interface in multifunction mode\n");
2279 		DRM_INFO("Limiting VRAM to one aperture\n");
2280 		return aper_size;
2281 	}
2282 
2283 	/* Single function older card. We read HDP_APER_CNTL to see how the BIOS
2284 	 * have set it up. We don't write this as it's broken on some ASICs but
2285 	 * we expect the BIOS to have done the right thing (might be too optimistic...)
2286 	 */
2287 	if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL)
2288 		return aper_size * 2;
2289 	return aper_size;
2290 }
2291 
2292 void r100_vram_init_sizes(struct radeon_device *rdev)
2293 {
2294 	u64 config_aper_size;
2295 
2296 	/* work out accessible VRAM */
2297 	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
2298 	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
2299 	rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev);
2300 	/* FIXME we don't use the second aperture yet when we could use it */
2301 	if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
2302 		rdev->mc.visible_vram_size = rdev->mc.aper_size;
2303 	config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
2304 	if (rdev->flags & RADEON_IS_IGP) {
2305 		uint32_t tom;
2306 		/* read NB_TOM to get the amount of ram stolen for the GPU */
2307 		tom = RREG32(RADEON_NB_TOM);
2308 		rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
2309 		WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
2310 		rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
2311 	} else {
2312 		rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
2313 		/* Some production boards of m6 will report 0
2314 		 * if it's 8 MB
2315 		 */
2316 		if (rdev->mc.real_vram_size == 0) {
2317 			rdev->mc.real_vram_size = 8192 * 1024;
2318 			WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
2319 		}
2320 		/* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
2321 		 * Novell bug 204882 + along with lots of ubuntu ones
2322 		 */
2323 		if (config_aper_size > rdev->mc.real_vram_size)
2324 			rdev->mc.mc_vram_size = config_aper_size;
2325 		else
2326 			rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
2327 	}
2328 }
2329 
2330 void r100_vga_set_state(struct radeon_device *rdev, bool state)
2331 {
2332 	uint32_t temp;
2333 
2334 	temp = RREG32(RADEON_CONFIG_CNTL);
2335 	if (state == false) {
2336 		temp &= ~(1<<8);
2337 		temp |= (1<<9);
2338 	} else {
2339 		temp &= ~(1<<9);
2340 	}
2341 	WREG32(RADEON_CONFIG_CNTL, temp);
2342 }
2343 
2344 void r100_mc_init(struct radeon_device *rdev)
2345 {
2346 	u64 base;
2347 
2348 	r100_vram_get_type(rdev);
2349 	r100_vram_init_sizes(rdev);
2350 	base = rdev->mc.aper_base;
2351 	if (rdev->flags & RADEON_IS_IGP)
2352 		base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
2353 	radeon_vram_location(rdev, &rdev->mc, base);
2354 	if (!(rdev->flags & RADEON_IS_AGP))
2355 		radeon_gtt_location(rdev, &rdev->mc);
2356 	radeon_update_bandwidth_info(rdev);
2357 }
2358 
2359 
2360 /*
2361  * Indirect registers accessor
2362  */
2363 void r100_pll_errata_after_index(struct radeon_device *rdev)
2364 {
2365 	if (!(rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS)) {
2366 		return;
2367 	}
2368 	(void)RREG32(RADEON_CLOCK_CNTL_DATA);
2369 	(void)RREG32(RADEON_CRTC_GEN_CNTL);
2370 }
2371 
2372 static void r100_pll_errata_after_data(struct radeon_device *rdev)
2373 {
2374 	/* This workarounds is necessary on RV100, RS100 and RS200 chips
2375 	 * or the chip could hang on a subsequent access
2376 	 */
2377 	if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
2378 		udelay(5000);
2379 	}
2380 
2381 	/* This function is required to workaround a hardware bug in some (all?)
2382 	 * revisions of the R300.  This workaround should be called after every
2383 	 * CLOCK_CNTL_INDEX register access.  If not, register reads afterward
2384 	 * may not be correct.
2385 	 */
2386 	if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
2387 		uint32_t save, tmp;
2388 
2389 		save = RREG32(RADEON_CLOCK_CNTL_INDEX);
2390 		tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
2391 		WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
2392 		tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
2393 		WREG32(RADEON_CLOCK_CNTL_INDEX, save);
2394 	}
2395 }
2396 
2397 uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
2398 {
2399 	uint32_t data;
2400 
2401 	WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
2402 	r100_pll_errata_after_index(rdev);
2403 	data = RREG32(RADEON_CLOCK_CNTL_DATA);
2404 	r100_pll_errata_after_data(rdev);
2405 	return data;
2406 }
2407 
2408 void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
2409 {
2410 	WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
2411 	r100_pll_errata_after_index(rdev);
2412 	WREG32(RADEON_CLOCK_CNTL_DATA, v);
2413 	r100_pll_errata_after_data(rdev);
2414 }
2415 
2416 void r100_set_safe_registers(struct radeon_device *rdev)
2417 {
2418 	if (ASIC_IS_RN50(rdev)) {
2419 		rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
2420 		rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm);
2421 	} else if (rdev->family < CHIP_R200) {
2422 		rdev->config.r100.reg_safe_bm = r100_reg_safe_bm;
2423 		rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm);
2424 	} else {
2425 		r200_set_safe_registers(rdev);
2426 	}
2427 }
2428 
2429 /*
2430  * Debugfs info
2431  */
2432 #if defined(CONFIG_DEBUG_FS)
2433 static int r100_debugfs_rbbm_info(struct seq_file *m, void *data)
2434 {
2435 	struct drm_info_node *node = (struct drm_info_node *) m->private;
2436 	struct drm_device *dev = node->minor->dev;
2437 	struct radeon_device *rdev = dev->dev_private;
2438 	uint32_t reg, value;
2439 	unsigned i;
2440 
2441 	seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS));
2442 	seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C));
2443 	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2444 	for (i = 0; i < 64; i++) {
2445 		WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100);
2446 		reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2;
2447 		WREG32(RADEON_RBBM_CMDFIFO_ADDR, i);
2448 		value = RREG32(RADEON_RBBM_CMDFIFO_DATA);
2449 		seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value);
2450 	}
2451 	return 0;
2452 }
2453 
2454 static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
2455 {
2456 	struct drm_info_node *node = (struct drm_info_node *) m->private;
2457 	struct drm_device *dev = node->minor->dev;
2458 	struct radeon_device *rdev = dev->dev_private;
2459 	uint32_t rdp, wdp;
2460 	unsigned count, i, j;
2461 
2462 	radeon_ring_free_size(rdev);
2463 	rdp = RREG32(RADEON_CP_RB_RPTR);
2464 	wdp = RREG32(RADEON_CP_RB_WPTR);
2465 	count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
2466 	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2467 	seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
2468 	seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
2469 	seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
2470 	seq_printf(m, "%u dwords in ring\n", count);
2471 	for (j = 0; j <= count; j++) {
2472 		i = (rdp + j) & rdev->cp.ptr_mask;
2473 		seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
2474 	}
2475 	return 0;
2476 }
2477 
2478 
2479 static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data)
2480 {
2481 	struct drm_info_node *node = (struct drm_info_node *) m->private;
2482 	struct drm_device *dev = node->minor->dev;
2483 	struct radeon_device *rdev = dev->dev_private;
2484 	uint32_t csq_stat, csq2_stat, tmp;
2485 	unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr;
2486 	unsigned i;
2487 
2488 	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2489 	seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE));
2490 	csq_stat = RREG32(RADEON_CP_CSQ_STAT);
2491 	csq2_stat = RREG32(RADEON_CP_CSQ2_STAT);
2492 	r_rptr = (csq_stat >> 0) & 0x3ff;
2493 	r_wptr = (csq_stat >> 10) & 0x3ff;
2494 	ib1_rptr = (csq_stat >> 20) & 0x3ff;
2495 	ib1_wptr = (csq2_stat >> 0) & 0x3ff;
2496 	ib2_rptr = (csq2_stat >> 10) & 0x3ff;
2497 	ib2_wptr = (csq2_stat >> 20) & 0x3ff;
2498 	seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat);
2499 	seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat);
2500 	seq_printf(m, "Ring rptr %u\n", r_rptr);
2501 	seq_printf(m, "Ring wptr %u\n", r_wptr);
2502 	seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr);
2503 	seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr);
2504 	seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr);
2505 	seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr);
2506 	/* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms
2507 	 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */
2508 	seq_printf(m, "Ring fifo:\n");
2509 	for (i = 0; i < 256; i++) {
2510 		WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2511 		tmp = RREG32(RADEON_CP_CSQ_DATA);
2512 		seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp);
2513 	}
2514 	seq_printf(m, "Indirect1 fifo:\n");
2515 	for (i = 256; i <= 512; i++) {
2516 		WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2517 		tmp = RREG32(RADEON_CP_CSQ_DATA);
2518 		seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp);
2519 	}
2520 	seq_printf(m, "Indirect2 fifo:\n");
2521 	for (i = 640; i < ib1_wptr; i++) {
2522 		WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2523 		tmp = RREG32(RADEON_CP_CSQ_DATA);
2524 		seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp);
2525 	}
2526 	return 0;
2527 }
2528 
2529 static int r100_debugfs_mc_info(struct seq_file *m, void *data)
2530 {
2531 	struct drm_info_node *node = (struct drm_info_node *) m->private;
2532 	struct drm_device *dev = node->minor->dev;
2533 	struct radeon_device *rdev = dev->dev_private;
2534 	uint32_t tmp;
2535 
2536 	tmp = RREG32(RADEON_CONFIG_MEMSIZE);
2537 	seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp);
2538 	tmp = RREG32(RADEON_MC_FB_LOCATION);
2539 	seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp);
2540 	tmp = RREG32(RADEON_BUS_CNTL);
2541 	seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
2542 	tmp = RREG32(RADEON_MC_AGP_LOCATION);
2543 	seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
2544 	tmp = RREG32(RADEON_AGP_BASE);
2545 	seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
2546 	tmp = RREG32(RADEON_HOST_PATH_CNTL);
2547 	seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
2548 	tmp = RREG32(0x01D0);
2549 	seq_printf(m, "AIC_CTRL 0x%08x\n", tmp);
2550 	tmp = RREG32(RADEON_AIC_LO_ADDR);
2551 	seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp);
2552 	tmp = RREG32(RADEON_AIC_HI_ADDR);
2553 	seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp);
2554 	tmp = RREG32(0x01E4);
2555 	seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp);
2556 	return 0;
2557 }
2558 
2559 static struct drm_info_list r100_debugfs_rbbm_list[] = {
2560 	{"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL},
2561 };
2562 
2563 static struct drm_info_list r100_debugfs_cp_list[] = {
2564 	{"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL},
2565 	{"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL},
2566 };
2567 
2568 static struct drm_info_list r100_debugfs_mc_info_list[] = {
2569 	{"r100_mc_info", r100_debugfs_mc_info, 0, NULL},
2570 };
2571 #endif
2572 
2573 int r100_debugfs_rbbm_init(struct radeon_device *rdev)
2574 {
2575 #if defined(CONFIG_DEBUG_FS)
2576 	return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1);
2577 #else
2578 	return 0;
2579 #endif
2580 }
2581 
2582 int r100_debugfs_cp_init(struct radeon_device *rdev)
2583 {
2584 #if defined(CONFIG_DEBUG_FS)
2585 	return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2);
2586 #else
2587 	return 0;
2588 #endif
2589 }
2590 
2591 int r100_debugfs_mc_info_init(struct radeon_device *rdev)
2592 {
2593 #if defined(CONFIG_DEBUG_FS)
2594 	return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1);
2595 #else
2596 	return 0;
2597 #endif
2598 }
2599 
2600 int r100_set_surface_reg(struct radeon_device *rdev, int reg,
2601 			 uint32_t tiling_flags, uint32_t pitch,
2602 			 uint32_t offset, uint32_t obj_size)
2603 {
2604 	int surf_index = reg * 16;
2605 	int flags = 0;
2606 
2607 	/* r100/r200 divide by 16 */
2608 	if (rdev->family < CHIP_R300)
2609 		flags = pitch / 16;
2610 	else
2611 		flags = pitch / 8;
2612 
2613 	if (rdev->family <= CHIP_RS200) {
2614 		if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
2615 				 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
2616 			flags |= RADEON_SURF_TILE_COLOR_BOTH;
2617 		if (tiling_flags & RADEON_TILING_MACRO)
2618 			flags |= RADEON_SURF_TILE_COLOR_MACRO;
2619 	} else if (rdev->family <= CHIP_RV280) {
2620 		if (tiling_flags & (RADEON_TILING_MACRO))
2621 			flags |= R200_SURF_TILE_COLOR_MACRO;
2622 		if (tiling_flags & RADEON_TILING_MICRO)
2623 			flags |= R200_SURF_TILE_COLOR_MICRO;
2624 	} else {
2625 		if (tiling_flags & RADEON_TILING_MACRO)
2626 			flags |= R300_SURF_TILE_MACRO;
2627 		if (tiling_flags & RADEON_TILING_MICRO)
2628 			flags |= R300_SURF_TILE_MICRO;
2629 	}
2630 
2631 	if (tiling_flags & RADEON_TILING_SWAP_16BIT)
2632 		flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP;
2633 	if (tiling_flags & RADEON_TILING_SWAP_32BIT)
2634 		flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP;
2635 
2636 	DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
2637 	WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
2638 	WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
2639 	WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1);
2640 	return 0;
2641 }
2642 
2643 void r100_clear_surface_reg(struct radeon_device *rdev, int reg)
2644 {
2645 	int surf_index = reg * 16;
2646 	WREG32(RADEON_SURFACE0_INFO + surf_index, 0);
2647 }
2648 
2649 void r100_bandwidth_update(struct radeon_device *rdev)
2650 {
2651 	fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
2652 	fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
2653 	fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
2654 	uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
2655 	fixed20_12 memtcas_ff[8] = {
2656 		dfixed_init(1),
2657 		dfixed_init(2),
2658 		dfixed_init(3),
2659 		dfixed_init(0),
2660 		dfixed_init_half(1),
2661 		dfixed_init_half(2),
2662 		dfixed_init(0),
2663 	};
2664 	fixed20_12 memtcas_rs480_ff[8] = {
2665 		dfixed_init(0),
2666 		dfixed_init(1),
2667 		dfixed_init(2),
2668 		dfixed_init(3),
2669 		dfixed_init(0),
2670 		dfixed_init_half(1),
2671 		dfixed_init_half(2),
2672 		dfixed_init_half(3),
2673 	};
2674 	fixed20_12 memtcas2_ff[8] = {
2675 		dfixed_init(0),
2676 		dfixed_init(1),
2677 		dfixed_init(2),
2678 		dfixed_init(3),
2679 		dfixed_init(4),
2680 		dfixed_init(5),
2681 		dfixed_init(6),
2682 		dfixed_init(7),
2683 	};
2684 	fixed20_12 memtrbs[8] = {
2685 		dfixed_init(1),
2686 		dfixed_init_half(1),
2687 		dfixed_init(2),
2688 		dfixed_init_half(2),
2689 		dfixed_init(3),
2690 		dfixed_init_half(3),
2691 		dfixed_init(4),
2692 		dfixed_init_half(4)
2693 	};
2694 	fixed20_12 memtrbs_r4xx[8] = {
2695 		dfixed_init(4),
2696 		dfixed_init(5),
2697 		dfixed_init(6),
2698 		dfixed_init(7),
2699 		dfixed_init(8),
2700 		dfixed_init(9),
2701 		dfixed_init(10),
2702 		dfixed_init(11)
2703 	};
2704 	fixed20_12 min_mem_eff;
2705 	fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
2706 	fixed20_12 cur_latency_mclk, cur_latency_sclk;
2707 	fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate,
2708 		disp_drain_rate2, read_return_rate;
2709 	fixed20_12 time_disp1_drop_priority;
2710 	int c;
2711 	int cur_size = 16;       /* in octawords */
2712 	int critical_point = 0, critical_point2;
2713 /* 	uint32_t read_return_rate, time_disp1_drop_priority; */
2714 	int stop_req, max_stop_req;
2715 	struct drm_display_mode *mode1 = NULL;
2716 	struct drm_display_mode *mode2 = NULL;
2717 	uint32_t pixel_bytes1 = 0;
2718 	uint32_t pixel_bytes2 = 0;
2719 
2720 	radeon_update_display_priority(rdev);
2721 
2722 	if (rdev->mode_info.crtcs[0]->base.enabled) {
2723 		mode1 = &rdev->mode_info.crtcs[0]->base.mode;
2724 		pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
2725 	}
2726 	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
2727 		if (rdev->mode_info.crtcs[1]->base.enabled) {
2728 			mode2 = &rdev->mode_info.crtcs[1]->base.mode;
2729 			pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8;
2730 		}
2731 	}
2732 
2733 	min_mem_eff.full = dfixed_const_8(0);
2734 	/* get modes */
2735 	if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
2736 		uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
2737 		mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
2738 		mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
2739 		/* check crtc enables */
2740 		if (mode2)
2741 			mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
2742 		if (mode1)
2743 			mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
2744 		WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
2745 	}
2746 
2747 	/*
2748 	 * determine is there is enough bw for current mode
2749 	 */
2750 	sclk_ff = rdev->pm.sclk;
2751 	mclk_ff = rdev->pm.mclk;
2752 
2753 	temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
2754 	temp_ff.full = dfixed_const(temp);
2755 	mem_bw.full = dfixed_mul(mclk_ff, temp_ff);
2756 
2757 	pix_clk.full = 0;
2758 	pix_clk2.full = 0;
2759 	peak_disp_bw.full = 0;
2760 	if (mode1) {
2761 		temp_ff.full = dfixed_const(1000);
2762 		pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */
2763 		pix_clk.full = dfixed_div(pix_clk, temp_ff);
2764 		temp_ff.full = dfixed_const(pixel_bytes1);
2765 		peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff);
2766 	}
2767 	if (mode2) {
2768 		temp_ff.full = dfixed_const(1000);
2769 		pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */
2770 		pix_clk2.full = dfixed_div(pix_clk2, temp_ff);
2771 		temp_ff.full = dfixed_const(pixel_bytes2);
2772 		peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff);
2773 	}
2774 
2775 	mem_bw.full = dfixed_mul(mem_bw, min_mem_eff);
2776 	if (peak_disp_bw.full >= mem_bw.full) {
2777 		DRM_ERROR("You may not have enough display bandwidth for current mode\n"
2778 			  "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
2779 	}
2780 
2781 	/*  Get values from the EXT_MEM_CNTL register...converting its contents. */
2782 	temp = RREG32(RADEON_MEM_TIMING_CNTL);
2783 	if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
2784 		mem_trcd = ((temp >> 2) & 0x3) + 1;
2785 		mem_trp  = ((temp & 0x3)) + 1;
2786 		mem_tras = ((temp & 0x70) >> 4) + 1;
2787 	} else if (rdev->family == CHIP_R300 ||
2788 		   rdev->family == CHIP_R350) { /* r300, r350 */
2789 		mem_trcd = (temp & 0x7) + 1;
2790 		mem_trp = ((temp >> 8) & 0x7) + 1;
2791 		mem_tras = ((temp >> 11) & 0xf) + 4;
2792 	} else if (rdev->family == CHIP_RV350 ||
2793 		   rdev->family <= CHIP_RV380) {
2794 		/* rv3x0 */
2795 		mem_trcd = (temp & 0x7) + 3;
2796 		mem_trp = ((temp >> 8) & 0x7) + 3;
2797 		mem_tras = ((temp >> 11) & 0xf) + 6;
2798 	} else if (rdev->family == CHIP_R420 ||
2799 		   rdev->family == CHIP_R423 ||
2800 		   rdev->family == CHIP_RV410) {
2801 		/* r4xx */
2802 		mem_trcd = (temp & 0xf) + 3;
2803 		if (mem_trcd > 15)
2804 			mem_trcd = 15;
2805 		mem_trp = ((temp >> 8) & 0xf) + 3;
2806 		if (mem_trp > 15)
2807 			mem_trp = 15;
2808 		mem_tras = ((temp >> 12) & 0x1f) + 6;
2809 		if (mem_tras > 31)
2810 			mem_tras = 31;
2811 	} else { /* RV200, R200 */
2812 		mem_trcd = (temp & 0x7) + 1;
2813 		mem_trp = ((temp >> 8) & 0x7) + 1;
2814 		mem_tras = ((temp >> 12) & 0xf) + 4;
2815 	}
2816 	/* convert to FF */
2817 	trcd_ff.full = dfixed_const(mem_trcd);
2818 	trp_ff.full = dfixed_const(mem_trp);
2819 	tras_ff.full = dfixed_const(mem_tras);
2820 
2821 	/* Get values from the MEM_SDRAM_MODE_REG register...converting its */
2822 	temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
2823 	data = (temp & (7 << 20)) >> 20;
2824 	if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
2825 		if (rdev->family == CHIP_RS480) /* don't think rs400 */
2826 			tcas_ff = memtcas_rs480_ff[data];
2827 		else
2828 			tcas_ff = memtcas_ff[data];
2829 	} else
2830 		tcas_ff = memtcas2_ff[data];
2831 
2832 	if (rdev->family == CHIP_RS400 ||
2833 	    rdev->family == CHIP_RS480) {
2834 		/* extra cas latency stored in bits 23-25 0-4 clocks */
2835 		data = (temp >> 23) & 0x7;
2836 		if (data < 5)
2837 			tcas_ff.full += dfixed_const(data);
2838 	}
2839 
2840 	if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
2841 		/* on the R300, Tcas is included in Trbs.
2842 		 */
2843 		temp = RREG32(RADEON_MEM_CNTL);
2844 		data = (R300_MEM_NUM_CHANNELS_MASK & temp);
2845 		if (data == 1) {
2846 			if (R300_MEM_USE_CD_CH_ONLY & temp) {
2847 				temp = RREG32(R300_MC_IND_INDEX);
2848 				temp &= ~R300_MC_IND_ADDR_MASK;
2849 				temp |= R300_MC_READ_CNTL_CD_mcind;
2850 				WREG32(R300_MC_IND_INDEX, temp);
2851 				temp = RREG32(R300_MC_IND_DATA);
2852 				data = (R300_MEM_RBS_POSITION_C_MASK & temp);
2853 			} else {
2854 				temp = RREG32(R300_MC_READ_CNTL_AB);
2855 				data = (R300_MEM_RBS_POSITION_A_MASK & temp);
2856 			}
2857 		} else {
2858 			temp = RREG32(R300_MC_READ_CNTL_AB);
2859 			data = (R300_MEM_RBS_POSITION_A_MASK & temp);
2860 		}
2861 		if (rdev->family == CHIP_RV410 ||
2862 		    rdev->family == CHIP_R420 ||
2863 		    rdev->family == CHIP_R423)
2864 			trbs_ff = memtrbs_r4xx[data];
2865 		else
2866 			trbs_ff = memtrbs[data];
2867 		tcas_ff.full += trbs_ff.full;
2868 	}
2869 
2870 	sclk_eff_ff.full = sclk_ff.full;
2871 
2872 	if (rdev->flags & RADEON_IS_AGP) {
2873 		fixed20_12 agpmode_ff;
2874 		agpmode_ff.full = dfixed_const(radeon_agpmode);
2875 		temp_ff.full = dfixed_const_666(16);
2876 		sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff);
2877 	}
2878 	/* TODO PCIE lanes may affect this - agpmode == 16?? */
2879 
2880 	if (ASIC_IS_R300(rdev)) {
2881 		sclk_delay_ff.full = dfixed_const(250);
2882 	} else {
2883 		if ((rdev->family == CHIP_RV100) ||
2884 		    rdev->flags & RADEON_IS_IGP) {
2885 			if (rdev->mc.vram_is_ddr)
2886 				sclk_delay_ff.full = dfixed_const(41);
2887 			else
2888 				sclk_delay_ff.full = dfixed_const(33);
2889 		} else {
2890 			if (rdev->mc.vram_width == 128)
2891 				sclk_delay_ff.full = dfixed_const(57);
2892 			else
2893 				sclk_delay_ff.full = dfixed_const(41);
2894 		}
2895 	}
2896 
2897 	mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff);
2898 
2899 	if (rdev->mc.vram_is_ddr) {
2900 		if (rdev->mc.vram_width == 32) {
2901 			k1.full = dfixed_const(40);
2902 			c  = 3;
2903 		} else {
2904 			k1.full = dfixed_const(20);
2905 			c  = 1;
2906 		}
2907 	} else {
2908 		k1.full = dfixed_const(40);
2909 		c  = 3;
2910 	}
2911 
2912 	temp_ff.full = dfixed_const(2);
2913 	mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff);
2914 	temp_ff.full = dfixed_const(c);
2915 	mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff);
2916 	temp_ff.full = dfixed_const(4);
2917 	mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff);
2918 	mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff);
2919 	mc_latency_mclk.full += k1.full;
2920 
2921 	mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff);
2922 	mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff);
2923 
2924 	/*
2925 	  HW cursor time assuming worst case of full size colour cursor.
2926 	*/
2927 	temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
2928 	temp_ff.full += trcd_ff.full;
2929 	if (temp_ff.full < tras_ff.full)
2930 		temp_ff.full = tras_ff.full;
2931 	cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff);
2932 
2933 	temp_ff.full = dfixed_const(cur_size);
2934 	cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff);
2935 	/*
2936 	  Find the total latency for the display data.
2937 	*/
2938 	disp_latency_overhead.full = dfixed_const(8);
2939 	disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff);
2940 	mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
2941 	mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
2942 
2943 	if (mc_latency_mclk.full > mc_latency_sclk.full)
2944 		disp_latency.full = mc_latency_mclk.full;
2945 	else
2946 		disp_latency.full = mc_latency_sclk.full;
2947 
2948 	/* setup Max GRPH_STOP_REQ default value */
2949 	if (ASIC_IS_RV100(rdev))
2950 		max_stop_req = 0x5c;
2951 	else
2952 		max_stop_req = 0x7c;
2953 
2954 	if (mode1) {
2955 		/*  CRTC1
2956 		    Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
2957 		    GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
2958 		*/
2959 		stop_req = mode1->hdisplay * pixel_bytes1 / 16;
2960 
2961 		if (stop_req > max_stop_req)
2962 			stop_req = max_stop_req;
2963 
2964 		/*
2965 		  Find the drain rate of the display buffer.
2966 		*/
2967 		temp_ff.full = dfixed_const((16/pixel_bytes1));
2968 		disp_drain_rate.full = dfixed_div(pix_clk, temp_ff);
2969 
2970 		/*
2971 		  Find the critical point of the display buffer.
2972 		*/
2973 		crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency);
2974 		crit_point_ff.full += dfixed_const_half(0);
2975 
2976 		critical_point = dfixed_trunc(crit_point_ff);
2977 
2978 		if (rdev->disp_priority == 2) {
2979 			critical_point = 0;
2980 		}
2981 
2982 		/*
2983 		  The critical point should never be above max_stop_req-4.  Setting
2984 		  GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
2985 		*/
2986 		if (max_stop_req - critical_point < 4)
2987 			critical_point = 0;
2988 
2989 		if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
2990 			/* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
2991 			critical_point = 0x10;
2992 		}
2993 
2994 		temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
2995 		temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
2996 		temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
2997 		temp &= ~(RADEON_GRPH_START_REQ_MASK);
2998 		if ((rdev->family == CHIP_R350) &&
2999 		    (stop_req > 0x15)) {
3000 			stop_req -= 0x10;
3001 		}
3002 		temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
3003 		temp |= RADEON_GRPH_BUFFER_SIZE;
3004 		temp &= ~(RADEON_GRPH_CRITICAL_CNTL   |
3005 			  RADEON_GRPH_CRITICAL_AT_SOF |
3006 			  RADEON_GRPH_STOP_CNTL);
3007 		/*
3008 		  Write the result into the register.
3009 		*/
3010 		WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
3011 						       (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
3012 
3013 #if 0
3014 		if ((rdev->family == CHIP_RS400) ||
3015 		    (rdev->family == CHIP_RS480)) {
3016 			/* attempt to program RS400 disp regs correctly ??? */
3017 			temp = RREG32(RS400_DISP1_REG_CNTL);
3018 			temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
3019 				  RS400_DISP1_STOP_REQ_LEVEL_MASK);
3020 			WREG32(RS400_DISP1_REQ_CNTL1, (temp |
3021 						       (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
3022 						       (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
3023 			temp = RREG32(RS400_DMIF_MEM_CNTL1);
3024 			temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
3025 				  RS400_DISP1_CRITICAL_POINT_STOP_MASK);
3026 			WREG32(RS400_DMIF_MEM_CNTL1, (temp |
3027 						      (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
3028 						      (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
3029 		}
3030 #endif
3031 
3032 		DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n",
3033 			  /* 	  (unsigned int)info->SavedReg->grph_buffer_cntl, */
3034 			  (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
3035 	}
3036 
3037 	if (mode2) {
3038 		u32 grph2_cntl;
3039 		stop_req = mode2->hdisplay * pixel_bytes2 / 16;
3040 
3041 		if (stop_req > max_stop_req)
3042 			stop_req = max_stop_req;
3043 
3044 		/*
3045 		  Find the drain rate of the display buffer.
3046 		*/
3047 		temp_ff.full = dfixed_const((16/pixel_bytes2));
3048 		disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff);
3049 
3050 		grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
3051 		grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
3052 		grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
3053 		grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
3054 		if ((rdev->family == CHIP_R350) &&
3055 		    (stop_req > 0x15)) {
3056 			stop_req -= 0x10;
3057 		}
3058 		grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
3059 		grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
3060 		grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL   |
3061 			  RADEON_GRPH_CRITICAL_AT_SOF |
3062 			  RADEON_GRPH_STOP_CNTL);
3063 
3064 		if ((rdev->family == CHIP_RS100) ||
3065 		    (rdev->family == CHIP_RS200))
3066 			critical_point2 = 0;
3067 		else {
3068 			temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
3069 			temp_ff.full = dfixed_const(temp);
3070 			temp_ff.full = dfixed_mul(mclk_ff, temp_ff);
3071 			if (sclk_ff.full < temp_ff.full)
3072 				temp_ff.full = sclk_ff.full;
3073 
3074 			read_return_rate.full = temp_ff.full;
3075 
3076 			if (mode1) {
3077 				temp_ff.full = read_return_rate.full - disp_drain_rate.full;
3078 				time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff);
3079 			} else {
3080 				time_disp1_drop_priority.full = 0;
3081 			}
3082 			crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
3083 			crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2);
3084 			crit_point_ff.full += dfixed_const_half(0);
3085 
3086 			critical_point2 = dfixed_trunc(crit_point_ff);
3087 
3088 			if (rdev->disp_priority == 2) {
3089 				critical_point2 = 0;
3090 			}
3091 
3092 			if (max_stop_req - critical_point2 < 4)
3093 				critical_point2 = 0;
3094 
3095 		}
3096 
3097 		if (critical_point2 == 0 && rdev->family == CHIP_R300) {
3098 			/* some R300 cards have problem with this set to 0 */
3099 			critical_point2 = 0x10;
3100 		}
3101 
3102 		WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
3103 						  (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
3104 
3105 		if ((rdev->family == CHIP_RS400) ||
3106 		    (rdev->family == CHIP_RS480)) {
3107 #if 0
3108 			/* attempt to program RS400 disp2 regs correctly ??? */
3109 			temp = RREG32(RS400_DISP2_REQ_CNTL1);
3110 			temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
3111 				  RS400_DISP2_STOP_REQ_LEVEL_MASK);
3112 			WREG32(RS400_DISP2_REQ_CNTL1, (temp |
3113 						       (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
3114 						       (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
3115 			temp = RREG32(RS400_DISP2_REQ_CNTL2);
3116 			temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
3117 				  RS400_DISP2_CRITICAL_POINT_STOP_MASK);
3118 			WREG32(RS400_DISP2_REQ_CNTL2, (temp |
3119 						       (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
3120 						       (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
3121 #endif
3122 			WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
3123 			WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
3124 			WREG32(RS400_DMIF_MEM_CNTL1,  0x29CA71DC);
3125 			WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
3126 		}
3127 
3128 		DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n",
3129 			  (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
3130 	}
3131 }
3132 
3133 static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
3134 {
3135 	DRM_ERROR("pitch                      %d\n", t->pitch);
3136 	DRM_ERROR("use_pitch                  %d\n", t->use_pitch);
3137 	DRM_ERROR("width                      %d\n", t->width);
3138 	DRM_ERROR("width_11                   %d\n", t->width_11);
3139 	DRM_ERROR("height                     %d\n", t->height);
3140 	DRM_ERROR("height_11                  %d\n", t->height_11);
3141 	DRM_ERROR("num levels                 %d\n", t->num_levels);
3142 	DRM_ERROR("depth                      %d\n", t->txdepth);
3143 	DRM_ERROR("bpp                        %d\n", t->cpp);
3144 	DRM_ERROR("coordinate type            %d\n", t->tex_coord_type);
3145 	DRM_ERROR("width round to power of 2  %d\n", t->roundup_w);
3146 	DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
3147 	DRM_ERROR("compress format            %d\n", t->compress_format);
3148 }
3149 
3150 static int r100_cs_track_cube(struct radeon_device *rdev,
3151 			      struct r100_cs_track *track, unsigned idx)
3152 {
3153 	unsigned face, w, h;
3154 	struct radeon_bo *cube_robj;
3155 	unsigned long size;
3156 
3157 	for (face = 0; face < 5; face++) {
3158 		cube_robj = track->textures[idx].cube_info[face].robj;
3159 		w = track->textures[idx].cube_info[face].width;
3160 		h = track->textures[idx].cube_info[face].height;
3161 
3162 		size = w * h;
3163 		size *= track->textures[idx].cpp;
3164 
3165 		size += track->textures[idx].cube_info[face].offset;
3166 
3167 		if (size > radeon_bo_size(cube_robj)) {
3168 			DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
3169 				  size, radeon_bo_size(cube_robj));
3170 			r100_cs_track_texture_print(&track->textures[idx]);
3171 			return -1;
3172 		}
3173 	}
3174 	return 0;
3175 }
3176 
3177 static int r100_track_compress_size(int compress_format, int w, int h)
3178 {
3179 	int block_width, block_height, block_bytes;
3180 	int wblocks, hblocks;
3181 	int min_wblocks;
3182 	int sz;
3183 
3184 	block_width = 4;
3185 	block_height = 4;
3186 
3187 	switch (compress_format) {
3188 	case R100_TRACK_COMP_DXT1:
3189 		block_bytes = 8;
3190 		min_wblocks = 4;
3191 		break;
3192 	default:
3193 	case R100_TRACK_COMP_DXT35:
3194 		block_bytes = 16;
3195 		min_wblocks = 2;
3196 		break;
3197 	}
3198 
3199 	hblocks = (h + block_height - 1) / block_height;
3200 	wblocks = (w + block_width - 1) / block_width;
3201 	if (wblocks < min_wblocks)
3202 		wblocks = min_wblocks;
3203 	sz = wblocks * hblocks * block_bytes;
3204 	return sz;
3205 }
3206 
3207 static int r100_cs_track_texture_check(struct radeon_device *rdev,
3208 				       struct r100_cs_track *track)
3209 {
3210 	struct radeon_bo *robj;
3211 	unsigned long size;
3212 	unsigned u, i, w, h, d;
3213 	int ret;
3214 
3215 	for (u = 0; u < track->num_texture; u++) {
3216 		if (!track->textures[u].enabled)
3217 			continue;
3218 		robj = track->textures[u].robj;
3219 		if (robj == NULL) {
3220 			DRM_ERROR("No texture bound to unit %u\n", u);
3221 			return -EINVAL;
3222 		}
3223 		size = 0;
3224 		for (i = 0; i <= track->textures[u].num_levels; i++) {
3225 			if (track->textures[u].use_pitch) {
3226 				if (rdev->family < CHIP_R300)
3227 					w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
3228 				else
3229 					w = track->textures[u].pitch / (1 << i);
3230 			} else {
3231 				w = track->textures[u].width;
3232 				if (rdev->family >= CHIP_RV515)
3233 					w |= track->textures[u].width_11;
3234 				w = w / (1 << i);
3235 				if (track->textures[u].roundup_w)
3236 					w = roundup_pow_of_two(w);
3237 			}
3238 			h = track->textures[u].height;
3239 			if (rdev->family >= CHIP_RV515)
3240 				h |= track->textures[u].height_11;
3241 			h = h / (1 << i);
3242 			if (track->textures[u].roundup_h)
3243 				h = roundup_pow_of_two(h);
3244 			if (track->textures[u].tex_coord_type == 1) {
3245 				d = (1 << track->textures[u].txdepth) / (1 << i);
3246 				if (!d)
3247 					d = 1;
3248 			} else {
3249 				d = 1;
3250 			}
3251 			if (track->textures[u].compress_format) {
3252 
3253 				size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
3254 				/* compressed textures are block based */
3255 			} else
3256 				size += w * h * d;
3257 		}
3258 		size *= track->textures[u].cpp;
3259 
3260 		switch (track->textures[u].tex_coord_type) {
3261 		case 0:
3262 		case 1:
3263 			break;
3264 		case 2:
3265 			if (track->separate_cube) {
3266 				ret = r100_cs_track_cube(rdev, track, u);
3267 				if (ret)
3268 					return ret;
3269 			} else
3270 				size *= 6;
3271 			break;
3272 		default:
3273 			DRM_ERROR("Invalid texture coordinate type %u for unit "
3274 				  "%u\n", track->textures[u].tex_coord_type, u);
3275 			return -EINVAL;
3276 		}
3277 		if (size > radeon_bo_size(robj)) {
3278 			DRM_ERROR("Texture of unit %u needs %lu bytes but is "
3279 				  "%lu\n", u, size, radeon_bo_size(robj));
3280 			r100_cs_track_texture_print(&track->textures[u]);
3281 			return -EINVAL;
3282 		}
3283 	}
3284 	return 0;
3285 }
3286 
3287 int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
3288 {
3289 	unsigned i;
3290 	unsigned long size;
3291 	unsigned prim_walk;
3292 	unsigned nverts;
3293 
3294 	for (i = 0; i < track->num_cb; i++) {
3295 		if (track->cb[i].robj == NULL) {
3296 			if (!(track->zb_cb_clear || track->color_channel_mask ||
3297 			      track->blend_read_enable)) {
3298 				continue;
3299 			}
3300 			DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
3301 			return -EINVAL;
3302 		}
3303 		size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
3304 		size += track->cb[i].offset;
3305 		if (size > radeon_bo_size(track->cb[i].robj)) {
3306 			DRM_ERROR("[drm] Buffer too small for color buffer %d "
3307 				  "(need %lu have %lu) !\n", i, size,
3308 				  radeon_bo_size(track->cb[i].robj));
3309 			DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
3310 				  i, track->cb[i].pitch, track->cb[i].cpp,
3311 				  track->cb[i].offset, track->maxy);
3312 			return -EINVAL;
3313 		}
3314 	}
3315 	if (track->z_enabled) {
3316 		if (track->zb.robj == NULL) {
3317 			DRM_ERROR("[drm] No buffer for z buffer !\n");
3318 			return -EINVAL;
3319 		}
3320 		size = track->zb.pitch * track->zb.cpp * track->maxy;
3321 		size += track->zb.offset;
3322 		if (size > radeon_bo_size(track->zb.robj)) {
3323 			DRM_ERROR("[drm] Buffer too small for z buffer "
3324 				  "(need %lu have %lu) !\n", size,
3325 				  radeon_bo_size(track->zb.robj));
3326 			DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
3327 				  track->zb.pitch, track->zb.cpp,
3328 				  track->zb.offset, track->maxy);
3329 			return -EINVAL;
3330 		}
3331 	}
3332 	prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
3333 	if (track->vap_vf_cntl & (1 << 14)) {
3334 		nverts = track->vap_alt_nverts;
3335 	} else {
3336 		nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
3337 	}
3338 	switch (prim_walk) {
3339 	case 1:
3340 		for (i = 0; i < track->num_arrays; i++) {
3341 			size = track->arrays[i].esize * track->max_indx * 4;
3342 			if (track->arrays[i].robj == NULL) {
3343 				DRM_ERROR("(PW %u) Vertex array %u no buffer "
3344 					  "bound\n", prim_walk, i);
3345 				return -EINVAL;
3346 			}
3347 			if (size > radeon_bo_size(track->arrays[i].robj)) {
3348 				dev_err(rdev->dev, "(PW %u) Vertex array %u "
3349 					"need %lu dwords have %lu dwords\n",
3350 					prim_walk, i, size >> 2,
3351 					radeon_bo_size(track->arrays[i].robj)
3352 					>> 2);
3353 				DRM_ERROR("Max indices %u\n", track->max_indx);
3354 				return -EINVAL;
3355 			}
3356 		}
3357 		break;
3358 	case 2:
3359 		for (i = 0; i < track->num_arrays; i++) {
3360 			size = track->arrays[i].esize * (nverts - 1) * 4;
3361 			if (track->arrays[i].robj == NULL) {
3362 				DRM_ERROR("(PW %u) Vertex array %u no buffer "
3363 					  "bound\n", prim_walk, i);
3364 				return -EINVAL;
3365 			}
3366 			if (size > radeon_bo_size(track->arrays[i].robj)) {
3367 				dev_err(rdev->dev, "(PW %u) Vertex array %u "
3368 					"need %lu dwords have %lu dwords\n",
3369 					prim_walk, i, size >> 2,
3370 					radeon_bo_size(track->arrays[i].robj)
3371 					>> 2);
3372 				return -EINVAL;
3373 			}
3374 		}
3375 		break;
3376 	case 3:
3377 		size = track->vtx_size * nverts;
3378 		if (size != track->immd_dwords) {
3379 			DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
3380 				  track->immd_dwords, size);
3381 			DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
3382 				  nverts, track->vtx_size);
3383 			return -EINVAL;
3384 		}
3385 		break;
3386 	default:
3387 		DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
3388 			  prim_walk);
3389 		return -EINVAL;
3390 	}
3391 	return r100_cs_track_texture_check(rdev, track);
3392 }
3393 
3394 void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
3395 {
3396 	unsigned i, face;
3397 
3398 	if (rdev->family < CHIP_R300) {
3399 		track->num_cb = 1;
3400 		if (rdev->family <= CHIP_RS200)
3401 			track->num_texture = 3;
3402 		else
3403 			track->num_texture = 6;
3404 		track->maxy = 2048;
3405 		track->separate_cube = 1;
3406 	} else {
3407 		track->num_cb = 4;
3408 		track->num_texture = 16;
3409 		track->maxy = 4096;
3410 		track->separate_cube = 0;
3411 	}
3412 
3413 	for (i = 0; i < track->num_cb; i++) {
3414 		track->cb[i].robj = NULL;
3415 		track->cb[i].pitch = 8192;
3416 		track->cb[i].cpp = 16;
3417 		track->cb[i].offset = 0;
3418 	}
3419 	track->z_enabled = true;
3420 	track->zb.robj = NULL;
3421 	track->zb.pitch = 8192;
3422 	track->zb.cpp = 4;
3423 	track->zb.offset = 0;
3424 	track->vtx_size = 0x7F;
3425 	track->immd_dwords = 0xFFFFFFFFUL;
3426 	track->num_arrays = 11;
3427 	track->max_indx = 0x00FFFFFFUL;
3428 	for (i = 0; i < track->num_arrays; i++) {
3429 		track->arrays[i].robj = NULL;
3430 		track->arrays[i].esize = 0x7F;
3431 	}
3432 	for (i = 0; i < track->num_texture; i++) {
3433 		track->textures[i].compress_format = R100_TRACK_COMP_NONE;
3434 		track->textures[i].pitch = 16536;
3435 		track->textures[i].width = 16536;
3436 		track->textures[i].height = 16536;
3437 		track->textures[i].width_11 = 1 << 11;
3438 		track->textures[i].height_11 = 1 << 11;
3439 		track->textures[i].num_levels = 12;
3440 		if (rdev->family <= CHIP_RS200) {
3441 			track->textures[i].tex_coord_type = 0;
3442 			track->textures[i].txdepth = 0;
3443 		} else {
3444 			track->textures[i].txdepth = 16;
3445 			track->textures[i].tex_coord_type = 1;
3446 		}
3447 		track->textures[i].cpp = 64;
3448 		track->textures[i].robj = NULL;
3449 		/* CS IB emission code makes sure texture unit are disabled */
3450 		track->textures[i].enabled = false;
3451 		track->textures[i].roundup_w = true;
3452 		track->textures[i].roundup_h = true;
3453 		if (track->separate_cube)
3454 			for (face = 0; face < 5; face++) {
3455 				track->textures[i].cube_info[face].robj = NULL;
3456 				track->textures[i].cube_info[face].width = 16536;
3457 				track->textures[i].cube_info[face].height = 16536;
3458 				track->textures[i].cube_info[face].offset = 0;
3459 			}
3460 	}
3461 }
3462 
3463 int r100_ring_test(struct radeon_device *rdev)
3464 {
3465 	uint32_t scratch;
3466 	uint32_t tmp = 0;
3467 	unsigned i;
3468 	int r;
3469 
3470 	r = radeon_scratch_get(rdev, &scratch);
3471 	if (r) {
3472 		DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
3473 		return r;
3474 	}
3475 	WREG32(scratch, 0xCAFEDEAD);
3476 	r = radeon_ring_lock(rdev, 2);
3477 	if (r) {
3478 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3479 		radeon_scratch_free(rdev, scratch);
3480 		return r;
3481 	}
3482 	radeon_ring_write(rdev, PACKET0(scratch, 0));
3483 	radeon_ring_write(rdev, 0xDEADBEEF);
3484 	radeon_ring_unlock_commit(rdev);
3485 	for (i = 0; i < rdev->usec_timeout; i++) {
3486 		tmp = RREG32(scratch);
3487 		if (tmp == 0xDEADBEEF) {
3488 			break;
3489 		}
3490 		DRM_UDELAY(1);
3491 	}
3492 	if (i < rdev->usec_timeout) {
3493 		DRM_INFO("ring test succeeded in %d usecs\n", i);
3494 	} else {
3495 		DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n",
3496 			  scratch, tmp);
3497 		r = -EINVAL;
3498 	}
3499 	radeon_scratch_free(rdev, scratch);
3500 	return r;
3501 }
3502 
3503 void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3504 {
3505 	radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1));
3506 	radeon_ring_write(rdev, ib->gpu_addr);
3507 	radeon_ring_write(rdev, ib->length_dw);
3508 }
3509 
3510 int r100_ib_test(struct radeon_device *rdev)
3511 {
3512 	struct radeon_ib *ib;
3513 	uint32_t scratch;
3514 	uint32_t tmp = 0;
3515 	unsigned i;
3516 	int r;
3517 
3518 	r = radeon_scratch_get(rdev, &scratch);
3519 	if (r) {
3520 		DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3521 		return r;
3522 	}
3523 	WREG32(scratch, 0xCAFEDEAD);
3524 	r = radeon_ib_get(rdev, &ib);
3525 	if (r) {
3526 		return r;
3527 	}
3528 	ib->ptr[0] = PACKET0(scratch, 0);
3529 	ib->ptr[1] = 0xDEADBEEF;
3530 	ib->ptr[2] = PACKET2(0);
3531 	ib->ptr[3] = PACKET2(0);
3532 	ib->ptr[4] = PACKET2(0);
3533 	ib->ptr[5] = PACKET2(0);
3534 	ib->ptr[6] = PACKET2(0);
3535 	ib->ptr[7] = PACKET2(0);
3536 	ib->length_dw = 8;
3537 	r = radeon_ib_schedule(rdev, ib);
3538 	if (r) {
3539 		radeon_scratch_free(rdev, scratch);
3540 		radeon_ib_free(rdev, &ib);
3541 		return r;
3542 	}
3543 	r = radeon_fence_wait(ib->fence, false);
3544 	if (r) {
3545 		return r;
3546 	}
3547 	for (i = 0; i < rdev->usec_timeout; i++) {
3548 		tmp = RREG32(scratch);
3549 		if (tmp == 0xDEADBEEF) {
3550 			break;
3551 		}
3552 		DRM_UDELAY(1);
3553 	}
3554 	if (i < rdev->usec_timeout) {
3555 		DRM_INFO("ib test succeeded in %u usecs\n", i);
3556 	} else {
3557 		DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
3558 			  scratch, tmp);
3559 		r = -EINVAL;
3560 	}
3561 	radeon_scratch_free(rdev, scratch);
3562 	radeon_ib_free(rdev, &ib);
3563 	return r;
3564 }
3565 
3566 void r100_ib_fini(struct radeon_device *rdev)
3567 {
3568 	radeon_ib_pool_fini(rdev);
3569 }
3570 
3571 int r100_ib_init(struct radeon_device *rdev)
3572 {
3573 	int r;
3574 
3575 	r = radeon_ib_pool_init(rdev);
3576 	if (r) {
3577 		dev_err(rdev->dev, "failled initializing IB pool (%d).\n", r);
3578 		r100_ib_fini(rdev);
3579 		return r;
3580 	}
3581 	r = r100_ib_test(rdev);
3582 	if (r) {
3583 		dev_err(rdev->dev, "failled testing IB (%d).\n", r);
3584 		r100_ib_fini(rdev);
3585 		return r;
3586 	}
3587 	return 0;
3588 }
3589 
3590 void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
3591 {
3592 	/* Shutdown CP we shouldn't need to do that but better be safe than
3593 	 * sorry
3594 	 */
3595 	rdev->cp.ready = false;
3596 	WREG32(R_000740_CP_CSQ_CNTL, 0);
3597 
3598 	/* Save few CRTC registers */
3599 	save->GENMO_WT = RREG8(R_0003C2_GENMO_WT);
3600 	save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL);
3601 	save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL);
3602 	save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET);
3603 	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3604 		save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL);
3605 		save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET);
3606 	}
3607 
3608 	/* Disable VGA aperture access */
3609 	WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT);
3610 	/* Disable cursor, overlay, crtc */
3611 	WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1));
3612 	WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL |
3613 					S_000054_CRTC_DISPLAY_DIS(1));
3614 	WREG32(R_000050_CRTC_GEN_CNTL,
3615 			(C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) |
3616 			S_000050_CRTC_DISP_REQ_EN_B(1));
3617 	WREG32(R_000420_OV0_SCALE_CNTL,
3618 		C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL));
3619 	WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET);
3620 	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3621 		WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET |
3622 						S_000360_CUR2_LOCK(1));
3623 		WREG32(R_0003F8_CRTC2_GEN_CNTL,
3624 			(C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) |
3625 			S_0003F8_CRTC2_DISPLAY_DIS(1) |
3626 			S_0003F8_CRTC2_DISP_REQ_EN_B(1));
3627 		WREG32(R_000360_CUR2_OFFSET,
3628 			C_000360_CUR2_LOCK & save->CUR2_OFFSET);
3629 	}
3630 }
3631 
3632 void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
3633 {
3634 	/* Update base address for crtc */
3635 	WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
3636 	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3637 		WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
3638 	}
3639 	/* Restore CRTC registers */
3640 	WREG8(R_0003C2_GENMO_WT, save->GENMO_WT);
3641 	WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL);
3642 	WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL);
3643 	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3644 		WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL);
3645 	}
3646 }
3647 
3648 void r100_vga_render_disable(struct radeon_device *rdev)
3649 {
3650 	u32 tmp;
3651 
3652 	tmp = RREG8(R_0003C2_GENMO_WT);
3653 	WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp);
3654 }
3655 
3656 static void r100_debugfs(struct radeon_device *rdev)
3657 {
3658 	int r;
3659 
3660 	r = r100_debugfs_mc_info_init(rdev);
3661 	if (r)
3662 		dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n");
3663 }
3664 
3665 static void r100_mc_program(struct radeon_device *rdev)
3666 {
3667 	struct r100_mc_save save;
3668 
3669 	/* Stops all mc clients */
3670 	r100_mc_stop(rdev, &save);
3671 	if (rdev->flags & RADEON_IS_AGP) {
3672 		WREG32(R_00014C_MC_AGP_LOCATION,
3673 			S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
3674 			S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
3675 		WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
3676 		if (rdev->family > CHIP_RV200)
3677 			WREG32(R_00015C_AGP_BASE_2,
3678 				upper_32_bits(rdev->mc.agp_base) & 0xff);
3679 	} else {
3680 		WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
3681 		WREG32(R_000170_AGP_BASE, 0);
3682 		if (rdev->family > CHIP_RV200)
3683 			WREG32(R_00015C_AGP_BASE_2, 0);
3684 	}
3685 	/* Wait for mc idle */
3686 	if (r100_mc_wait_for_idle(rdev))
3687 		dev_warn(rdev->dev, "Wait for MC idle timeout.\n");
3688 	/* Program MC, should be a 32bits limited address space */
3689 	WREG32(R_000148_MC_FB_LOCATION,
3690 		S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
3691 		S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
3692 	r100_mc_resume(rdev, &save);
3693 }
3694 
3695 void r100_clock_startup(struct radeon_device *rdev)
3696 {
3697 	u32 tmp;
3698 
3699 	if (radeon_dynclks != -1 && radeon_dynclks)
3700 		radeon_legacy_set_clock_gating(rdev, 1);
3701 	/* We need to force on some of the block */
3702 	tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
3703 	tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
3704 	if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280))
3705 		tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1);
3706 	WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
3707 }
3708 
3709 static int r100_startup(struct radeon_device *rdev)
3710 {
3711 	int r;
3712 
3713 	/* set common regs */
3714 	r100_set_common_regs(rdev);
3715 	/* program mc */
3716 	r100_mc_program(rdev);
3717 	/* Resume clock */
3718 	r100_clock_startup(rdev);
3719 	/* Initialize GPU configuration (# pipes, ...) */
3720 //	r100_gpu_init(rdev);
3721 	/* Initialize GART (initialize after TTM so we can allocate
3722 	 * memory through TTM but finalize after TTM) */
3723 	r100_enable_bm(rdev);
3724 	if (rdev->flags & RADEON_IS_PCI) {
3725 		r = r100_pci_gart_enable(rdev);
3726 		if (r)
3727 			return r;
3728 	}
3729 	/* Enable IRQ */
3730 	r100_irq_set(rdev);
3731 	rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
3732 	/* 1M ring buffer */
3733 	r = r100_cp_init(rdev, 1024 * 1024);
3734 	if (r) {
3735 		dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
3736 		return r;
3737 	}
3738 	r = r100_wb_init(rdev);
3739 	if (r)
3740 		dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
3741 	r = r100_ib_init(rdev);
3742 	if (r) {
3743 		dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
3744 		return r;
3745 	}
3746 	return 0;
3747 }
3748 
3749 int r100_resume(struct radeon_device *rdev)
3750 {
3751 	/* Make sur GART are not working */
3752 	if (rdev->flags & RADEON_IS_PCI)
3753 		r100_pci_gart_disable(rdev);
3754 	/* Resume clock before doing reset */
3755 	r100_clock_startup(rdev);
3756 	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
3757 	if (radeon_asic_reset(rdev)) {
3758 		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
3759 			RREG32(R_000E40_RBBM_STATUS),
3760 			RREG32(R_0007C0_CP_STAT));
3761 	}
3762 	/* post */
3763 	radeon_combios_asic_init(rdev->ddev);
3764 	/* Resume clock after posting */
3765 	r100_clock_startup(rdev);
3766 	/* Initialize surface registers */
3767 	radeon_surface_init(rdev);
3768 	return r100_startup(rdev);
3769 }
3770 
3771 int r100_suspend(struct radeon_device *rdev)
3772 {
3773 	r100_cp_disable(rdev);
3774 	r100_wb_disable(rdev);
3775 	r100_irq_disable(rdev);
3776 	if (rdev->flags & RADEON_IS_PCI)
3777 		r100_pci_gart_disable(rdev);
3778 	return 0;
3779 }
3780 
3781 void r100_fini(struct radeon_device *rdev)
3782 {
3783 	r100_cp_fini(rdev);
3784 	r100_wb_fini(rdev);
3785 	r100_ib_fini(rdev);
3786 	radeon_gem_fini(rdev);
3787 	if (rdev->flags & RADEON_IS_PCI)
3788 		r100_pci_gart_fini(rdev);
3789 	radeon_agp_fini(rdev);
3790 	radeon_irq_kms_fini(rdev);
3791 	radeon_fence_driver_fini(rdev);
3792 	radeon_bo_fini(rdev);
3793 	radeon_atombios_fini(rdev);
3794 	kfree(rdev->bios);
3795 	rdev->bios = NULL;
3796 }
3797 
3798 int r100_init(struct radeon_device *rdev)
3799 {
3800 	int r;
3801 
3802 	/* Register debugfs file specific to this group of asics */
3803 	r100_debugfs(rdev);
3804 	/* Disable VGA */
3805 	r100_vga_render_disable(rdev);
3806 	/* Initialize scratch registers */
3807 	radeon_scratch_init(rdev);
3808 	/* Initialize surface registers */
3809 	radeon_surface_init(rdev);
3810 	/* TODO: disable VGA need to use VGA request */
3811 	/* BIOS*/
3812 	if (!radeon_get_bios(rdev)) {
3813 		if (ASIC_IS_AVIVO(rdev))
3814 			return -EINVAL;
3815 	}
3816 	if (rdev->is_atom_bios) {
3817 		dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
3818 		return -EINVAL;
3819 	} else {
3820 		r = radeon_combios_init(rdev);
3821 		if (r)
3822 			return r;
3823 	}
3824 	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
3825 	if (radeon_asic_reset(rdev)) {
3826 		dev_warn(rdev->dev,
3827 			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
3828 			RREG32(R_000E40_RBBM_STATUS),
3829 			RREG32(R_0007C0_CP_STAT));
3830 	}
3831 	/* check if cards are posted or not */
3832 	if (radeon_boot_test_post_card(rdev) == false)
3833 		return -EINVAL;
3834 	/* Set asic errata */
3835 	r100_errata(rdev);
3836 	/* Initialize clocks */
3837 	radeon_get_clock_info(rdev->ddev);
3838 	/* initialize AGP */
3839 	if (rdev->flags & RADEON_IS_AGP) {
3840 		r = radeon_agp_init(rdev);
3841 		if (r) {
3842 			radeon_agp_disable(rdev);
3843 		}
3844 	}
3845 	/* initialize VRAM */
3846 	r100_mc_init(rdev);
3847 	/* Fence driver */
3848 	r = radeon_fence_driver_init(rdev);
3849 	if (r)
3850 		return r;
3851 	r = radeon_irq_kms_init(rdev);
3852 	if (r)
3853 		return r;
3854 	/* Memory manager */
3855 	r = radeon_bo_init(rdev);
3856 	if (r)
3857 		return r;
3858 	if (rdev->flags & RADEON_IS_PCI) {
3859 		r = r100_pci_gart_init(rdev);
3860 		if (r)
3861 			return r;
3862 	}
3863 	r100_set_safe_registers(rdev);
3864 	rdev->accel_working = true;
3865 	r = r100_startup(rdev);
3866 	if (r) {
3867 		/* Somethings want wront with the accel init stop accel */
3868 		dev_err(rdev->dev, "Disabling GPU acceleration\n");
3869 		r100_cp_fini(rdev);
3870 		r100_wb_fini(rdev);
3871 		r100_ib_fini(rdev);
3872 		radeon_irq_kms_fini(rdev);
3873 		if (rdev->flags & RADEON_IS_PCI)
3874 			r100_pci_gart_fini(rdev);
3875 		rdev->accel_working = false;
3876 	}
3877 	return 0;
3878 }
3879