xref: /linux/drivers/gpu/drm/radeon/r100.c (revision 95e9fd10f06cb5642028b6b851e32b8c8afb4571)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include "drmP.h"
31 #include "drm.h"
32 #include "radeon_drm.h"
33 #include "radeon_reg.h"
34 #include "radeon.h"
35 #include "radeon_asic.h"
36 #include "r100d.h"
37 #include "rs100d.h"
38 #include "rv200d.h"
39 #include "rv250d.h"
40 #include "atom.h"
41 
42 #include <linux/firmware.h>
43 #include <linux/platform_device.h>
44 #include <linux/module.h>
45 
46 #include "r100_reg_safe.h"
47 #include "rn50_reg_safe.h"
48 
49 /* Firmware Names */
50 #define FIRMWARE_R100		"radeon/R100_cp.bin"
51 #define FIRMWARE_R200		"radeon/R200_cp.bin"
52 #define FIRMWARE_R300		"radeon/R300_cp.bin"
53 #define FIRMWARE_R420		"radeon/R420_cp.bin"
54 #define FIRMWARE_RS690		"radeon/RS690_cp.bin"
55 #define FIRMWARE_RS600		"radeon/RS600_cp.bin"
56 #define FIRMWARE_R520		"radeon/R520_cp.bin"
57 
58 MODULE_FIRMWARE(FIRMWARE_R100);
59 MODULE_FIRMWARE(FIRMWARE_R200);
60 MODULE_FIRMWARE(FIRMWARE_R300);
61 MODULE_FIRMWARE(FIRMWARE_R420);
62 MODULE_FIRMWARE(FIRMWARE_RS690);
63 MODULE_FIRMWARE(FIRMWARE_RS600);
64 MODULE_FIRMWARE(FIRMWARE_R520);
65 
66 #include "r100_track.h"
67 
68 /* This files gather functions specifics to:
69  * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
70  * and others in some cases.
71  */
72 
73 /**
74  * r100_wait_for_vblank - vblank wait asic callback.
75  *
76  * @rdev: radeon_device pointer
77  * @crtc: crtc to wait for vblank on
78  *
79  * Wait for vblank on the requested crtc (r1xx-r4xx).
80  */
81 void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
82 {
83 	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
84 	int i;
85 
86 	if (radeon_crtc->crtc_id == 0) {
87 		if (RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN) {
88 			for (i = 0; i < rdev->usec_timeout; i++) {
89 				if (!(RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR))
90 					break;
91 				udelay(1);
92 			}
93 			for (i = 0; i < rdev->usec_timeout; i++) {
94 				if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
95 					break;
96 				udelay(1);
97 			}
98 		}
99 	} else {
100 		if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN) {
101 			for (i = 0; i < rdev->usec_timeout; i++) {
102 				if (!(RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR))
103 					break;
104 				udelay(1);
105 			}
106 			for (i = 0; i < rdev->usec_timeout; i++) {
107 				if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
108 					break;
109 				udelay(1);
110 			}
111 		}
112 	}
113 }
114 
115 /**
116  * r100_pre_page_flip - pre-pageflip callback.
117  *
118  * @rdev: radeon_device pointer
119  * @crtc: crtc to prepare for pageflip on
120  *
121  * Pre-pageflip callback (r1xx-r4xx).
122  * Enables the pageflip irq (vblank irq).
123  */
124 void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
125 {
126 	/* enable the pflip int */
127 	radeon_irq_kms_pflip_irq_get(rdev, crtc);
128 }
129 
130 /**
131  * r100_post_page_flip - pos-pageflip callback.
132  *
133  * @rdev: radeon_device pointer
134  * @crtc: crtc to cleanup pageflip on
135  *
136  * Post-pageflip callback (r1xx-r4xx).
137  * Disables the pageflip irq (vblank irq).
138  */
139 void r100_post_page_flip(struct radeon_device *rdev, int crtc)
140 {
141 	/* disable the pflip int */
142 	radeon_irq_kms_pflip_irq_put(rdev, crtc);
143 }
144 
145 /**
146  * r100_page_flip - pageflip callback.
147  *
148  * @rdev: radeon_device pointer
149  * @crtc_id: crtc to cleanup pageflip on
150  * @crtc_base: new address of the crtc (GPU MC address)
151  *
152  * Does the actual pageflip (r1xx-r4xx).
153  * During vblank we take the crtc lock and wait for the update_pending
154  * bit to go high, when it does, we release the lock, and allow the
155  * double buffered update to take place.
156  * Returns the current update pending status.
157  */
158 u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
159 {
160 	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
161 	u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
162 	int i;
163 
164 	/* Lock the graphics update lock */
165 	/* update the scanout addresses */
166 	WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
167 
168 	/* Wait for update_pending to go high. */
169 	for (i = 0; i < rdev->usec_timeout; i++) {
170 		if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
171 			break;
172 		udelay(1);
173 	}
174 	DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
175 
176 	/* Unlock the lock, so double-buffering can take place inside vblank */
177 	tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK;
178 	WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
179 
180 	/* Return current update_pending status: */
181 	return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET;
182 }
183 
184 /**
185  * r100_pm_get_dynpm_state - look up dynpm power state callback.
186  *
187  * @rdev: radeon_device pointer
188  *
189  * Look up the optimal power state based on the
190  * current state of the GPU (r1xx-r5xx).
191  * Used for dynpm only.
192  */
193 void r100_pm_get_dynpm_state(struct radeon_device *rdev)
194 {
195 	int i;
196 	rdev->pm.dynpm_can_upclock = true;
197 	rdev->pm.dynpm_can_downclock = true;
198 
199 	switch (rdev->pm.dynpm_planned_action) {
200 	case DYNPM_ACTION_MINIMUM:
201 		rdev->pm.requested_power_state_index = 0;
202 		rdev->pm.dynpm_can_downclock = false;
203 		break;
204 	case DYNPM_ACTION_DOWNCLOCK:
205 		if (rdev->pm.current_power_state_index == 0) {
206 			rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
207 			rdev->pm.dynpm_can_downclock = false;
208 		} else {
209 			if (rdev->pm.active_crtc_count > 1) {
210 				for (i = 0; i < rdev->pm.num_power_states; i++) {
211 					if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
212 						continue;
213 					else if (i >= rdev->pm.current_power_state_index) {
214 						rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
215 						break;
216 					} else {
217 						rdev->pm.requested_power_state_index = i;
218 						break;
219 					}
220 				}
221 			} else
222 				rdev->pm.requested_power_state_index =
223 					rdev->pm.current_power_state_index - 1;
224 		}
225 		/* don't use the power state if crtcs are active and no display flag is set */
226 		if ((rdev->pm.active_crtc_count > 0) &&
227 		    (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags &
228 		     RADEON_PM_MODE_NO_DISPLAY)) {
229 			rdev->pm.requested_power_state_index++;
230 		}
231 		break;
232 	case DYNPM_ACTION_UPCLOCK:
233 		if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
234 			rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
235 			rdev->pm.dynpm_can_upclock = false;
236 		} else {
237 			if (rdev->pm.active_crtc_count > 1) {
238 				for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
239 					if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
240 						continue;
241 					else if (i <= rdev->pm.current_power_state_index) {
242 						rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
243 						break;
244 					} else {
245 						rdev->pm.requested_power_state_index = i;
246 						break;
247 					}
248 				}
249 			} else
250 				rdev->pm.requested_power_state_index =
251 					rdev->pm.current_power_state_index + 1;
252 		}
253 		break;
254 	case DYNPM_ACTION_DEFAULT:
255 		rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
256 		rdev->pm.dynpm_can_upclock = false;
257 		break;
258 	case DYNPM_ACTION_NONE:
259 	default:
260 		DRM_ERROR("Requested mode for not defined action\n");
261 		return;
262 	}
263 	/* only one clock mode per power state */
264 	rdev->pm.requested_clock_mode_index = 0;
265 
266 	DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
267 		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
268 		  clock_info[rdev->pm.requested_clock_mode_index].sclk,
269 		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
270 		  clock_info[rdev->pm.requested_clock_mode_index].mclk,
271 		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
272 		  pcie_lanes);
273 }
274 
275 /**
276  * r100_pm_init_profile - Initialize power profiles callback.
277  *
278  * @rdev: radeon_device pointer
279  *
280  * Initialize the power states used in profile mode
281  * (r1xx-r3xx).
282  * Used for profile mode only.
283  */
284 void r100_pm_init_profile(struct radeon_device *rdev)
285 {
286 	/* default */
287 	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
288 	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
289 	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
290 	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
291 	/* low sh */
292 	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
293 	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
294 	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
295 	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
296 	/* mid sh */
297 	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
298 	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
299 	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
300 	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
301 	/* high sh */
302 	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
303 	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
304 	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
305 	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
306 	/* low mh */
307 	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
308 	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
309 	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
310 	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
311 	/* mid mh */
312 	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
313 	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
314 	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
315 	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
316 	/* high mh */
317 	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
318 	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
319 	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
320 	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
321 }
322 
323 /**
324  * r100_pm_misc - set additional pm hw parameters callback.
325  *
326  * @rdev: radeon_device pointer
327  *
328  * Set non-clock parameters associated with a power state
329  * (voltage, pcie lanes, etc.) (r1xx-r4xx).
330  */
331 void r100_pm_misc(struct radeon_device *rdev)
332 {
333 	int requested_index = rdev->pm.requested_power_state_index;
334 	struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
335 	struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
336 	u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl;
337 
338 	if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
339 		if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
340 			tmp = RREG32(voltage->gpio.reg);
341 			if (voltage->active_high)
342 				tmp |= voltage->gpio.mask;
343 			else
344 				tmp &= ~(voltage->gpio.mask);
345 			WREG32(voltage->gpio.reg, tmp);
346 			if (voltage->delay)
347 				udelay(voltage->delay);
348 		} else {
349 			tmp = RREG32(voltage->gpio.reg);
350 			if (voltage->active_high)
351 				tmp &= ~voltage->gpio.mask;
352 			else
353 				tmp |= voltage->gpio.mask;
354 			WREG32(voltage->gpio.reg, tmp);
355 			if (voltage->delay)
356 				udelay(voltage->delay);
357 		}
358 	}
359 
360 	sclk_cntl = RREG32_PLL(SCLK_CNTL);
361 	sclk_cntl2 = RREG32_PLL(SCLK_CNTL2);
362 	sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3);
363 	sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL);
364 	sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3);
365 	if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
366 		sclk_more_cntl |= REDUCED_SPEED_SCLK_EN;
367 		if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE)
368 			sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE;
369 		else
370 			sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE;
371 		if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2)
372 			sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0);
373 		else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4)
374 			sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2);
375 	} else
376 		sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN;
377 
378 	if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
379 		sclk_more_cntl |= IO_CG_VOLTAGE_DROP;
380 		if (voltage->delay) {
381 			sclk_more_cntl |= VOLTAGE_DROP_SYNC;
382 			switch (voltage->delay) {
383 			case 33:
384 				sclk_more_cntl |= VOLTAGE_DELAY_SEL(0);
385 				break;
386 			case 66:
387 				sclk_more_cntl |= VOLTAGE_DELAY_SEL(1);
388 				break;
389 			case 99:
390 				sclk_more_cntl |= VOLTAGE_DELAY_SEL(2);
391 				break;
392 			case 132:
393 				sclk_more_cntl |= VOLTAGE_DELAY_SEL(3);
394 				break;
395 			}
396 		} else
397 			sclk_more_cntl &= ~VOLTAGE_DROP_SYNC;
398 	} else
399 		sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP;
400 
401 	if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
402 		sclk_cntl &= ~FORCE_HDP;
403 	else
404 		sclk_cntl |= FORCE_HDP;
405 
406 	WREG32_PLL(SCLK_CNTL, sclk_cntl);
407 	WREG32_PLL(SCLK_CNTL2, sclk_cntl2);
408 	WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl);
409 
410 	/* set pcie lanes */
411 	if ((rdev->flags & RADEON_IS_PCIE) &&
412 	    !(rdev->flags & RADEON_IS_IGP) &&
413 	    rdev->asic->pm.set_pcie_lanes &&
414 	    (ps->pcie_lanes !=
415 	     rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
416 		radeon_set_pcie_lanes(rdev,
417 				      ps->pcie_lanes);
418 		DRM_DEBUG_DRIVER("Setting: p: %d\n", ps->pcie_lanes);
419 	}
420 }
421 
422 /**
423  * r100_pm_prepare - pre-power state change callback.
424  *
425  * @rdev: radeon_device pointer
426  *
427  * Prepare for a power state change (r1xx-r4xx).
428  */
429 void r100_pm_prepare(struct radeon_device *rdev)
430 {
431 	struct drm_device *ddev = rdev->ddev;
432 	struct drm_crtc *crtc;
433 	struct radeon_crtc *radeon_crtc;
434 	u32 tmp;
435 
436 	/* disable any active CRTCs */
437 	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
438 		radeon_crtc = to_radeon_crtc(crtc);
439 		if (radeon_crtc->enabled) {
440 			if (radeon_crtc->crtc_id) {
441 				tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
442 				tmp |= RADEON_CRTC2_DISP_REQ_EN_B;
443 				WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
444 			} else {
445 				tmp = RREG32(RADEON_CRTC_GEN_CNTL);
446 				tmp |= RADEON_CRTC_DISP_REQ_EN_B;
447 				WREG32(RADEON_CRTC_GEN_CNTL, tmp);
448 			}
449 		}
450 	}
451 }
452 
453 /**
454  * r100_pm_finish - post-power state change callback.
455  *
456  * @rdev: radeon_device pointer
457  *
458  * Clean up after a power state change (r1xx-r4xx).
459  */
460 void r100_pm_finish(struct radeon_device *rdev)
461 {
462 	struct drm_device *ddev = rdev->ddev;
463 	struct drm_crtc *crtc;
464 	struct radeon_crtc *radeon_crtc;
465 	u32 tmp;
466 
467 	/* enable any active CRTCs */
468 	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
469 		radeon_crtc = to_radeon_crtc(crtc);
470 		if (radeon_crtc->enabled) {
471 			if (radeon_crtc->crtc_id) {
472 				tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
473 				tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B;
474 				WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
475 			} else {
476 				tmp = RREG32(RADEON_CRTC_GEN_CNTL);
477 				tmp &= ~RADEON_CRTC_DISP_REQ_EN_B;
478 				WREG32(RADEON_CRTC_GEN_CNTL, tmp);
479 			}
480 		}
481 	}
482 }
483 
484 /**
485  * r100_gui_idle - gui idle callback.
486  *
487  * @rdev: radeon_device pointer
488  *
489  * Check of the GUI (2D/3D engines) are idle (r1xx-r5xx).
490  * Returns true if idle, false if not.
491  */
492 bool r100_gui_idle(struct radeon_device *rdev)
493 {
494 	if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
495 		return false;
496 	else
497 		return true;
498 }
499 
500 /* hpd for digital panel detect/disconnect */
501 /**
502  * r100_hpd_sense - hpd sense callback.
503  *
504  * @rdev: radeon_device pointer
505  * @hpd: hpd (hotplug detect) pin
506  *
507  * Checks if a digital monitor is connected (r1xx-r4xx).
508  * Returns true if connected, false if not connected.
509  */
510 bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
511 {
512 	bool connected = false;
513 
514 	switch (hpd) {
515 	case RADEON_HPD_1:
516 		if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE)
517 			connected = true;
518 		break;
519 	case RADEON_HPD_2:
520 		if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE)
521 			connected = true;
522 		break;
523 	default:
524 		break;
525 	}
526 	return connected;
527 }
528 
529 /**
530  * r100_hpd_set_polarity - hpd set polarity callback.
531  *
532  * @rdev: radeon_device pointer
533  * @hpd: hpd (hotplug detect) pin
534  *
535  * Set the polarity of the hpd pin (r1xx-r4xx).
536  */
537 void r100_hpd_set_polarity(struct radeon_device *rdev,
538 			   enum radeon_hpd_id hpd)
539 {
540 	u32 tmp;
541 	bool connected = r100_hpd_sense(rdev, hpd);
542 
543 	switch (hpd) {
544 	case RADEON_HPD_1:
545 		tmp = RREG32(RADEON_FP_GEN_CNTL);
546 		if (connected)
547 			tmp &= ~RADEON_FP_DETECT_INT_POL;
548 		else
549 			tmp |= RADEON_FP_DETECT_INT_POL;
550 		WREG32(RADEON_FP_GEN_CNTL, tmp);
551 		break;
552 	case RADEON_HPD_2:
553 		tmp = RREG32(RADEON_FP2_GEN_CNTL);
554 		if (connected)
555 			tmp &= ~RADEON_FP2_DETECT_INT_POL;
556 		else
557 			tmp |= RADEON_FP2_DETECT_INT_POL;
558 		WREG32(RADEON_FP2_GEN_CNTL, tmp);
559 		break;
560 	default:
561 		break;
562 	}
563 }
564 
565 /**
566  * r100_hpd_init - hpd setup callback.
567  *
568  * @rdev: radeon_device pointer
569  *
570  * Setup the hpd pins used by the card (r1xx-r4xx).
571  * Set the polarity, and enable the hpd interrupts.
572  */
573 void r100_hpd_init(struct radeon_device *rdev)
574 {
575 	struct drm_device *dev = rdev->ddev;
576 	struct drm_connector *connector;
577 	unsigned enable = 0;
578 
579 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
580 		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
581 		enable |= 1 << radeon_connector->hpd.hpd;
582 		radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
583 	}
584 	radeon_irq_kms_enable_hpd(rdev, enable);
585 }
586 
587 /**
588  * r100_hpd_fini - hpd tear down callback.
589  *
590  * @rdev: radeon_device pointer
591  *
592  * Tear down the hpd pins used by the card (r1xx-r4xx).
593  * Disable the hpd interrupts.
594  */
595 void r100_hpd_fini(struct radeon_device *rdev)
596 {
597 	struct drm_device *dev = rdev->ddev;
598 	struct drm_connector *connector;
599 	unsigned disable = 0;
600 
601 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
602 		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
603 		disable |= 1 << radeon_connector->hpd.hpd;
604 	}
605 	radeon_irq_kms_disable_hpd(rdev, disable);
606 }
607 
608 /*
609  * PCI GART
610  */
611 void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
612 {
613 	/* TODO: can we do somethings here ? */
614 	/* It seems hw only cache one entry so we should discard this
615 	 * entry otherwise if first GPU GART read hit this entry it
616 	 * could end up in wrong address. */
617 }
618 
619 int r100_pci_gart_init(struct radeon_device *rdev)
620 {
621 	int r;
622 
623 	if (rdev->gart.ptr) {
624 		WARN(1, "R100 PCI GART already initialized\n");
625 		return 0;
626 	}
627 	/* Initialize common gart structure */
628 	r = radeon_gart_init(rdev);
629 	if (r)
630 		return r;
631 	rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
632 	rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
633 	rdev->asic->gart.set_page = &r100_pci_gart_set_page;
634 	return radeon_gart_table_ram_alloc(rdev);
635 }
636 
637 int r100_pci_gart_enable(struct radeon_device *rdev)
638 {
639 	uint32_t tmp;
640 
641 	radeon_gart_restore(rdev);
642 	/* discard memory request outside of configured range */
643 	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
644 	WREG32(RADEON_AIC_CNTL, tmp);
645 	/* set address range for PCI address translate */
646 	WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start);
647 	WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end);
648 	/* set PCI GART page-table base address */
649 	WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
650 	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
651 	WREG32(RADEON_AIC_CNTL, tmp);
652 	r100_pci_gart_tlb_flush(rdev);
653 	DRM_INFO("PCI GART of %uM enabled (table at 0x%016llX).\n",
654 		 (unsigned)(rdev->mc.gtt_size >> 20),
655 		 (unsigned long long)rdev->gart.table_addr);
656 	rdev->gart.ready = true;
657 	return 0;
658 }
659 
660 void r100_pci_gart_disable(struct radeon_device *rdev)
661 {
662 	uint32_t tmp;
663 
664 	/* discard memory request outside of configured range */
665 	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
666 	WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
667 	WREG32(RADEON_AIC_LO_ADDR, 0);
668 	WREG32(RADEON_AIC_HI_ADDR, 0);
669 }
670 
671 int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
672 {
673 	u32 *gtt = rdev->gart.ptr;
674 
675 	if (i < 0 || i > rdev->gart.num_gpu_pages) {
676 		return -EINVAL;
677 	}
678 	gtt[i] = cpu_to_le32(lower_32_bits(addr));
679 	return 0;
680 }
681 
682 void r100_pci_gart_fini(struct radeon_device *rdev)
683 {
684 	radeon_gart_fini(rdev);
685 	r100_pci_gart_disable(rdev);
686 	radeon_gart_table_ram_free(rdev);
687 }
688 
689 int r100_irq_set(struct radeon_device *rdev)
690 {
691 	uint32_t tmp = 0;
692 
693 	if (!rdev->irq.installed) {
694 		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
695 		WREG32(R_000040_GEN_INT_CNTL, 0);
696 		return -EINVAL;
697 	}
698 	if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
699 		tmp |= RADEON_SW_INT_ENABLE;
700 	}
701 	if (rdev->irq.gui_idle) {
702 		tmp |= RADEON_GUI_IDLE_MASK;
703 	}
704 	if (rdev->irq.crtc_vblank_int[0] ||
705 	    atomic_read(&rdev->irq.pflip[0])) {
706 		tmp |= RADEON_CRTC_VBLANK_MASK;
707 	}
708 	if (rdev->irq.crtc_vblank_int[1] ||
709 	    atomic_read(&rdev->irq.pflip[1])) {
710 		tmp |= RADEON_CRTC2_VBLANK_MASK;
711 	}
712 	if (rdev->irq.hpd[0]) {
713 		tmp |= RADEON_FP_DETECT_MASK;
714 	}
715 	if (rdev->irq.hpd[1]) {
716 		tmp |= RADEON_FP2_DETECT_MASK;
717 	}
718 	WREG32(RADEON_GEN_INT_CNTL, tmp);
719 	return 0;
720 }
721 
722 void r100_irq_disable(struct radeon_device *rdev)
723 {
724 	u32 tmp;
725 
726 	WREG32(R_000040_GEN_INT_CNTL, 0);
727 	/* Wait and acknowledge irq */
728 	mdelay(1);
729 	tmp = RREG32(R_000044_GEN_INT_STATUS);
730 	WREG32(R_000044_GEN_INT_STATUS, tmp);
731 }
732 
733 static uint32_t r100_irq_ack(struct radeon_device *rdev)
734 {
735 	uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
736 	uint32_t irq_mask = RADEON_SW_INT_TEST |
737 		RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
738 		RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
739 
740 	/* the interrupt works, but the status bit is permanently asserted */
741 	if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
742 		if (!rdev->irq.gui_idle_acked)
743 			irq_mask |= RADEON_GUI_IDLE_STAT;
744 	}
745 
746 	if (irqs) {
747 		WREG32(RADEON_GEN_INT_STATUS, irqs);
748 	}
749 	return irqs & irq_mask;
750 }
751 
752 int r100_irq_process(struct radeon_device *rdev)
753 {
754 	uint32_t status, msi_rearm;
755 	bool queue_hotplug = false;
756 
757 	/* reset gui idle ack.  the status bit is broken */
758 	rdev->irq.gui_idle_acked = false;
759 
760 	status = r100_irq_ack(rdev);
761 	if (!status) {
762 		return IRQ_NONE;
763 	}
764 	if (rdev->shutdown) {
765 		return IRQ_NONE;
766 	}
767 	while (status) {
768 		/* SW interrupt */
769 		if (status & RADEON_SW_INT_TEST) {
770 			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
771 		}
772 		/* gui idle interrupt */
773 		if (status & RADEON_GUI_IDLE_STAT) {
774 			rdev->irq.gui_idle_acked = true;
775 			wake_up(&rdev->irq.idle_queue);
776 		}
777 		/* Vertical blank interrupts */
778 		if (status & RADEON_CRTC_VBLANK_STAT) {
779 			if (rdev->irq.crtc_vblank_int[0]) {
780 				drm_handle_vblank(rdev->ddev, 0);
781 				rdev->pm.vblank_sync = true;
782 				wake_up(&rdev->irq.vblank_queue);
783 			}
784 			if (atomic_read(&rdev->irq.pflip[0]))
785 				radeon_crtc_handle_flip(rdev, 0);
786 		}
787 		if (status & RADEON_CRTC2_VBLANK_STAT) {
788 			if (rdev->irq.crtc_vblank_int[1]) {
789 				drm_handle_vblank(rdev->ddev, 1);
790 				rdev->pm.vblank_sync = true;
791 				wake_up(&rdev->irq.vblank_queue);
792 			}
793 			if (atomic_read(&rdev->irq.pflip[1]))
794 				radeon_crtc_handle_flip(rdev, 1);
795 		}
796 		if (status & RADEON_FP_DETECT_STAT) {
797 			queue_hotplug = true;
798 			DRM_DEBUG("HPD1\n");
799 		}
800 		if (status & RADEON_FP2_DETECT_STAT) {
801 			queue_hotplug = true;
802 			DRM_DEBUG("HPD2\n");
803 		}
804 		status = r100_irq_ack(rdev);
805 	}
806 	/* reset gui idle ack.  the status bit is broken */
807 	rdev->irq.gui_idle_acked = false;
808 	if (queue_hotplug)
809 		schedule_work(&rdev->hotplug_work);
810 	if (rdev->msi_enabled) {
811 		switch (rdev->family) {
812 		case CHIP_RS400:
813 		case CHIP_RS480:
814 			msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM;
815 			WREG32(RADEON_AIC_CNTL, msi_rearm);
816 			WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM);
817 			break;
818 		default:
819 			WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
820 			break;
821 		}
822 	}
823 	return IRQ_HANDLED;
824 }
825 
826 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
827 {
828 	if (crtc == 0)
829 		return RREG32(RADEON_CRTC_CRNT_FRAME);
830 	else
831 		return RREG32(RADEON_CRTC2_CRNT_FRAME);
832 }
833 
834 /* Who ever call radeon_fence_emit should call ring_lock and ask
835  * for enough space (today caller are ib schedule and buffer move) */
836 void r100_fence_ring_emit(struct radeon_device *rdev,
837 			  struct radeon_fence *fence)
838 {
839 	struct radeon_ring *ring = &rdev->ring[fence->ring];
840 
841 	/* We have to make sure that caches are flushed before
842 	 * CPU might read something from VRAM. */
843 	radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
844 	radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL);
845 	radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
846 	radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL);
847 	/* Wait until IDLE & CLEAN */
848 	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
849 	radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
850 	radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
851 	radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
852 				RADEON_HDP_READ_BUFFER_INVALIDATE);
853 	radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
854 	radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
855 	/* Emit fence sequence & fire IRQ */
856 	radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
857 	radeon_ring_write(ring, fence->seq);
858 	radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
859 	radeon_ring_write(ring, RADEON_SW_INT_FIRE);
860 }
861 
862 void r100_semaphore_ring_emit(struct radeon_device *rdev,
863 			      struct radeon_ring *ring,
864 			      struct radeon_semaphore *semaphore,
865 			      bool emit_wait)
866 {
867 	/* Unused on older asics, since we don't have semaphores or multiple rings */
868 	BUG();
869 }
870 
871 int r100_copy_blit(struct radeon_device *rdev,
872 		   uint64_t src_offset,
873 		   uint64_t dst_offset,
874 		   unsigned num_gpu_pages,
875 		   struct radeon_fence **fence)
876 {
877 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
878 	uint32_t cur_pages;
879 	uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
880 	uint32_t pitch;
881 	uint32_t stride_pixels;
882 	unsigned ndw;
883 	int num_loops;
884 	int r = 0;
885 
886 	/* radeon limited to 16k stride */
887 	stride_bytes &= 0x3fff;
888 	/* radeon pitch is /64 */
889 	pitch = stride_bytes / 64;
890 	stride_pixels = stride_bytes / 4;
891 	num_loops = DIV_ROUND_UP(num_gpu_pages, 8191);
892 
893 	/* Ask for enough room for blit + flush + fence */
894 	ndw = 64 + (10 * num_loops);
895 	r = radeon_ring_lock(rdev, ring, ndw);
896 	if (r) {
897 		DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
898 		return -EINVAL;
899 	}
900 	while (num_gpu_pages > 0) {
901 		cur_pages = num_gpu_pages;
902 		if (cur_pages > 8191) {
903 			cur_pages = 8191;
904 		}
905 		num_gpu_pages -= cur_pages;
906 
907 		/* pages are in Y direction - height
908 		   page width in X direction - width */
909 		radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8));
910 		radeon_ring_write(ring,
911 				  RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
912 				  RADEON_GMC_DST_PITCH_OFFSET_CNTL |
913 				  RADEON_GMC_SRC_CLIPPING |
914 				  RADEON_GMC_DST_CLIPPING |
915 				  RADEON_GMC_BRUSH_NONE |
916 				  (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
917 				  RADEON_GMC_SRC_DATATYPE_COLOR |
918 				  RADEON_ROP3_S |
919 				  RADEON_DP_SRC_SOURCE_MEMORY |
920 				  RADEON_GMC_CLR_CMP_CNTL_DIS |
921 				  RADEON_GMC_WR_MSK_DIS);
922 		radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10));
923 		radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10));
924 		radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
925 		radeon_ring_write(ring, 0);
926 		radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
927 		radeon_ring_write(ring, num_gpu_pages);
928 		radeon_ring_write(ring, num_gpu_pages);
929 		radeon_ring_write(ring, cur_pages | (stride_pixels << 16));
930 	}
931 	radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
932 	radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL);
933 	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
934 	radeon_ring_write(ring,
935 			  RADEON_WAIT_2D_IDLECLEAN |
936 			  RADEON_WAIT_HOST_IDLECLEAN |
937 			  RADEON_WAIT_DMA_GUI_IDLE);
938 	if (fence) {
939 		r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
940 	}
941 	radeon_ring_unlock_commit(rdev, ring);
942 	return r;
943 }
944 
945 static int r100_cp_wait_for_idle(struct radeon_device *rdev)
946 {
947 	unsigned i;
948 	u32 tmp;
949 
950 	for (i = 0; i < rdev->usec_timeout; i++) {
951 		tmp = RREG32(R_000E40_RBBM_STATUS);
952 		if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) {
953 			return 0;
954 		}
955 		udelay(1);
956 	}
957 	return -1;
958 }
959 
960 void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
961 {
962 	int r;
963 
964 	r = radeon_ring_lock(rdev, ring, 2);
965 	if (r) {
966 		return;
967 	}
968 	radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
969 	radeon_ring_write(ring,
970 			  RADEON_ISYNC_ANY2D_IDLE3D |
971 			  RADEON_ISYNC_ANY3D_IDLE2D |
972 			  RADEON_ISYNC_WAIT_IDLEGUI |
973 			  RADEON_ISYNC_CPSCRATCH_IDLEGUI);
974 	radeon_ring_unlock_commit(rdev, ring);
975 }
976 
977 
978 /* Load the microcode for the CP */
979 static int r100_cp_init_microcode(struct radeon_device *rdev)
980 {
981 	struct platform_device *pdev;
982 	const char *fw_name = NULL;
983 	int err;
984 
985 	DRM_DEBUG_KMS("\n");
986 
987 	pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
988 	err = IS_ERR(pdev);
989 	if (err) {
990 		printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
991 		return -EINVAL;
992 	}
993 	if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
994 	    (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
995 	    (rdev->family == CHIP_RS200)) {
996 		DRM_INFO("Loading R100 Microcode\n");
997 		fw_name = FIRMWARE_R100;
998 	} else if ((rdev->family == CHIP_R200) ||
999 		   (rdev->family == CHIP_RV250) ||
1000 		   (rdev->family == CHIP_RV280) ||
1001 		   (rdev->family == CHIP_RS300)) {
1002 		DRM_INFO("Loading R200 Microcode\n");
1003 		fw_name = FIRMWARE_R200;
1004 	} else if ((rdev->family == CHIP_R300) ||
1005 		   (rdev->family == CHIP_R350) ||
1006 		   (rdev->family == CHIP_RV350) ||
1007 		   (rdev->family == CHIP_RV380) ||
1008 		   (rdev->family == CHIP_RS400) ||
1009 		   (rdev->family == CHIP_RS480)) {
1010 		DRM_INFO("Loading R300 Microcode\n");
1011 		fw_name = FIRMWARE_R300;
1012 	} else if ((rdev->family == CHIP_R420) ||
1013 		   (rdev->family == CHIP_R423) ||
1014 		   (rdev->family == CHIP_RV410)) {
1015 		DRM_INFO("Loading R400 Microcode\n");
1016 		fw_name = FIRMWARE_R420;
1017 	} else if ((rdev->family == CHIP_RS690) ||
1018 		   (rdev->family == CHIP_RS740)) {
1019 		DRM_INFO("Loading RS690/RS740 Microcode\n");
1020 		fw_name = FIRMWARE_RS690;
1021 	} else if (rdev->family == CHIP_RS600) {
1022 		DRM_INFO("Loading RS600 Microcode\n");
1023 		fw_name = FIRMWARE_RS600;
1024 	} else if ((rdev->family == CHIP_RV515) ||
1025 		   (rdev->family == CHIP_R520) ||
1026 		   (rdev->family == CHIP_RV530) ||
1027 		   (rdev->family == CHIP_R580) ||
1028 		   (rdev->family == CHIP_RV560) ||
1029 		   (rdev->family == CHIP_RV570)) {
1030 		DRM_INFO("Loading R500 Microcode\n");
1031 		fw_name = FIRMWARE_R520;
1032 	}
1033 
1034 	err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
1035 	platform_device_unregister(pdev);
1036 	if (err) {
1037 		printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n",
1038 		       fw_name);
1039 	} else if (rdev->me_fw->size % 8) {
1040 		printk(KERN_ERR
1041 		       "radeon_cp: Bogus length %zu in firmware \"%s\"\n",
1042 		       rdev->me_fw->size, fw_name);
1043 		err = -EINVAL;
1044 		release_firmware(rdev->me_fw);
1045 		rdev->me_fw = NULL;
1046 	}
1047 	return err;
1048 }
1049 
1050 static void r100_cp_load_microcode(struct radeon_device *rdev)
1051 {
1052 	const __be32 *fw_data;
1053 	int i, size;
1054 
1055 	if (r100_gui_wait_for_idle(rdev)) {
1056 		printk(KERN_WARNING "Failed to wait GUI idle while "
1057 		       "programming pipes. Bad things might happen.\n");
1058 	}
1059 
1060 	if (rdev->me_fw) {
1061 		size = rdev->me_fw->size / 4;
1062 		fw_data = (const __be32 *)&rdev->me_fw->data[0];
1063 		WREG32(RADEON_CP_ME_RAM_ADDR, 0);
1064 		for (i = 0; i < size; i += 2) {
1065 			WREG32(RADEON_CP_ME_RAM_DATAH,
1066 			       be32_to_cpup(&fw_data[i]));
1067 			WREG32(RADEON_CP_ME_RAM_DATAL,
1068 			       be32_to_cpup(&fw_data[i + 1]));
1069 		}
1070 	}
1071 }
1072 
1073 int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1074 {
1075 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1076 	unsigned rb_bufsz;
1077 	unsigned rb_blksz;
1078 	unsigned max_fetch;
1079 	unsigned pre_write_timer;
1080 	unsigned pre_write_limit;
1081 	unsigned indirect2_start;
1082 	unsigned indirect1_start;
1083 	uint32_t tmp;
1084 	int r;
1085 
1086 	if (r100_debugfs_cp_init(rdev)) {
1087 		DRM_ERROR("Failed to register debugfs file for CP !\n");
1088 	}
1089 	if (!rdev->me_fw) {
1090 		r = r100_cp_init_microcode(rdev);
1091 		if (r) {
1092 			DRM_ERROR("Failed to load firmware!\n");
1093 			return r;
1094 		}
1095 	}
1096 
1097 	/* Align ring size */
1098 	rb_bufsz = drm_order(ring_size / 8);
1099 	ring_size = (1 << (rb_bufsz + 1)) * 4;
1100 	r100_cp_load_microcode(rdev);
1101 	r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
1102 			     RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR,
1103 			     0, 0x7fffff, RADEON_CP_PACKET2);
1104 	if (r) {
1105 		return r;
1106 	}
1107 	/* Each time the cp read 1024 bytes (16 dword/quadword) update
1108 	 * the rptr copy in system ram */
1109 	rb_blksz = 9;
1110 	/* cp will read 128bytes at a time (4 dwords) */
1111 	max_fetch = 1;
1112 	ring->align_mask = 16 - 1;
1113 	/* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
1114 	pre_write_timer = 64;
1115 	/* Force CP_RB_WPTR write if written more than one time before the
1116 	 * delay expire
1117 	 */
1118 	pre_write_limit = 0;
1119 	/* Setup the cp cache like this (cache size is 96 dwords) :
1120 	 *	RING		0  to 15
1121 	 *	INDIRECT1	16 to 79
1122 	 *	INDIRECT2	80 to 95
1123 	 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
1124 	 *    indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
1125 	 *    indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
1126 	 * Idea being that most of the gpu cmd will be through indirect1 buffer
1127 	 * so it gets the bigger cache.
1128 	 */
1129 	indirect2_start = 80;
1130 	indirect1_start = 16;
1131 	/* cp setup */
1132 	WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
1133 	tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
1134 	       REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
1135 	       REG_SET(RADEON_MAX_FETCH, max_fetch));
1136 #ifdef __BIG_ENDIAN
1137 	tmp |= RADEON_BUF_SWAP_32BIT;
1138 #endif
1139 	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE);
1140 
1141 	/* Set ring address */
1142 	DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr);
1143 	WREG32(RADEON_CP_RB_BASE, ring->gpu_addr);
1144 	/* Force read & write ptr to 0 */
1145 	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
1146 	WREG32(RADEON_CP_RB_RPTR_WR, 0);
1147 	ring->wptr = 0;
1148 	WREG32(RADEON_CP_RB_WPTR, ring->wptr);
1149 
1150 	/* set the wb address whether it's enabled or not */
1151 	WREG32(R_00070C_CP_RB_RPTR_ADDR,
1152 		S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2));
1153 	WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET);
1154 
1155 	if (rdev->wb.enabled)
1156 		WREG32(R_000770_SCRATCH_UMSK, 0xff);
1157 	else {
1158 		tmp |= RADEON_RB_NO_UPDATE;
1159 		WREG32(R_000770_SCRATCH_UMSK, 0);
1160 	}
1161 
1162 	WREG32(RADEON_CP_RB_CNTL, tmp);
1163 	udelay(10);
1164 	ring->rptr = RREG32(RADEON_CP_RB_RPTR);
1165 	/* Set cp mode to bus mastering & enable cp*/
1166 	WREG32(RADEON_CP_CSQ_MODE,
1167 	       REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
1168 	       REG_SET(RADEON_INDIRECT1_START, indirect1_start));
1169 	WREG32(RADEON_CP_RB_WPTR_DELAY, 0);
1170 	WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
1171 	WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
1172 
1173 	/* at this point everything should be setup correctly to enable master */
1174 	pci_set_master(rdev->pdev);
1175 
1176 	radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1177 	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
1178 	if (r) {
1179 		DRM_ERROR("radeon: cp isn't working (%d).\n", r);
1180 		return r;
1181 	}
1182 	ring->ready = true;
1183 	radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
1184 
1185 	if (radeon_ring_supports_scratch_reg(rdev, ring)) {
1186 		r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
1187 		if (r) {
1188 			DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
1189 			ring->rptr_save_reg = 0;
1190 		}
1191 	}
1192 	return 0;
1193 }
1194 
1195 void r100_cp_fini(struct radeon_device *rdev)
1196 {
1197 	if (r100_cp_wait_for_idle(rdev)) {
1198 		DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n");
1199 	}
1200 	/* Disable ring */
1201 	r100_cp_disable(rdev);
1202 	radeon_scratch_free(rdev, rdev->ring[RADEON_RING_TYPE_GFX_INDEX].rptr_save_reg);
1203 	radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1204 	DRM_INFO("radeon: cp finalized\n");
1205 }
1206 
1207 void r100_cp_disable(struct radeon_device *rdev)
1208 {
1209 	/* Disable ring */
1210 	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1211 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1212 	WREG32(RADEON_CP_CSQ_MODE, 0);
1213 	WREG32(RADEON_CP_CSQ_CNTL, 0);
1214 	WREG32(R_000770_SCRATCH_UMSK, 0);
1215 	if (r100_gui_wait_for_idle(rdev)) {
1216 		printk(KERN_WARNING "Failed to wait GUI idle while "
1217 		       "programming pipes. Bad things might happen.\n");
1218 	}
1219 }
1220 
1221 /*
1222  * CS functions
1223  */
1224 int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
1225 			    struct radeon_cs_packet *pkt,
1226 			    unsigned idx,
1227 			    unsigned reg)
1228 {
1229 	int r;
1230 	u32 tile_flags = 0;
1231 	u32 tmp;
1232 	struct radeon_cs_reloc *reloc;
1233 	u32 value;
1234 
1235 	r = r100_cs_packet_next_reloc(p, &reloc);
1236 	if (r) {
1237 		DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1238 			  idx, reg);
1239 		r100_cs_dump_packet(p, pkt);
1240 		return r;
1241 	}
1242 
1243 	value = radeon_get_ib_value(p, idx);
1244 	tmp = value & 0x003fffff;
1245 	tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
1246 
1247 	if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1248 		if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1249 			tile_flags |= RADEON_DST_TILE_MACRO;
1250 		if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
1251 			if (reg == RADEON_SRC_PITCH_OFFSET) {
1252 				DRM_ERROR("Cannot src blit from microtiled surface\n");
1253 				r100_cs_dump_packet(p, pkt);
1254 				return -EINVAL;
1255 			}
1256 			tile_flags |= RADEON_DST_TILE_MICRO;
1257 		}
1258 
1259 		tmp |= tile_flags;
1260 		p->ib.ptr[idx] = (value & 0x3fc00000) | tmp;
1261 	} else
1262 		p->ib.ptr[idx] = (value & 0xffc00000) | tmp;
1263 	return 0;
1264 }
1265 
1266 int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
1267 			     struct radeon_cs_packet *pkt,
1268 			     int idx)
1269 {
1270 	unsigned c, i;
1271 	struct radeon_cs_reloc *reloc;
1272 	struct r100_cs_track *track;
1273 	int r = 0;
1274 	volatile uint32_t *ib;
1275 	u32 idx_value;
1276 
1277 	ib = p->ib.ptr;
1278 	track = (struct r100_cs_track *)p->track;
1279 	c = radeon_get_ib_value(p, idx++) & 0x1F;
1280 	if (c > 16) {
1281 	    DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
1282 		      pkt->opcode);
1283 	    r100_cs_dump_packet(p, pkt);
1284 	    return -EINVAL;
1285 	}
1286 	track->num_arrays = c;
1287 	for (i = 0; i < (c - 1); i+=2, idx+=3) {
1288 		r = r100_cs_packet_next_reloc(p, &reloc);
1289 		if (r) {
1290 			DRM_ERROR("No reloc for packet3 %d\n",
1291 				  pkt->opcode);
1292 			r100_cs_dump_packet(p, pkt);
1293 			return r;
1294 		}
1295 		idx_value = radeon_get_ib_value(p, idx);
1296 		ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
1297 
1298 		track->arrays[i + 0].esize = idx_value >> 8;
1299 		track->arrays[i + 0].robj = reloc->robj;
1300 		track->arrays[i + 0].esize &= 0x7F;
1301 		r = r100_cs_packet_next_reloc(p, &reloc);
1302 		if (r) {
1303 			DRM_ERROR("No reloc for packet3 %d\n",
1304 				  pkt->opcode);
1305 			r100_cs_dump_packet(p, pkt);
1306 			return r;
1307 		}
1308 		ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
1309 		track->arrays[i + 1].robj = reloc->robj;
1310 		track->arrays[i + 1].esize = idx_value >> 24;
1311 		track->arrays[i + 1].esize &= 0x7F;
1312 	}
1313 	if (c & 1) {
1314 		r = r100_cs_packet_next_reloc(p, &reloc);
1315 		if (r) {
1316 			DRM_ERROR("No reloc for packet3 %d\n",
1317 					  pkt->opcode);
1318 			r100_cs_dump_packet(p, pkt);
1319 			return r;
1320 		}
1321 		idx_value = radeon_get_ib_value(p, idx);
1322 		ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
1323 		track->arrays[i + 0].robj = reloc->robj;
1324 		track->arrays[i + 0].esize = idx_value >> 8;
1325 		track->arrays[i + 0].esize &= 0x7F;
1326 	}
1327 	return r;
1328 }
1329 
1330 int r100_cs_parse_packet0(struct radeon_cs_parser *p,
1331 			  struct radeon_cs_packet *pkt,
1332 			  const unsigned *auth, unsigned n,
1333 			  radeon_packet0_check_t check)
1334 {
1335 	unsigned reg;
1336 	unsigned i, j, m;
1337 	unsigned idx;
1338 	int r;
1339 
1340 	idx = pkt->idx + 1;
1341 	reg = pkt->reg;
1342 	/* Check that register fall into register range
1343 	 * determined by the number of entry (n) in the
1344 	 * safe register bitmap.
1345 	 */
1346 	if (pkt->one_reg_wr) {
1347 		if ((reg >> 7) > n) {
1348 			return -EINVAL;
1349 		}
1350 	} else {
1351 		if (((reg + (pkt->count << 2)) >> 7) > n) {
1352 			return -EINVAL;
1353 		}
1354 	}
1355 	for (i = 0; i <= pkt->count; i++, idx++) {
1356 		j = (reg >> 7);
1357 		m = 1 << ((reg >> 2) & 31);
1358 		if (auth[j] & m) {
1359 			r = check(p, pkt, idx, reg);
1360 			if (r) {
1361 				return r;
1362 			}
1363 		}
1364 		if (pkt->one_reg_wr) {
1365 			if (!(auth[j] & m)) {
1366 				break;
1367 			}
1368 		} else {
1369 			reg += 4;
1370 		}
1371 	}
1372 	return 0;
1373 }
1374 
1375 void r100_cs_dump_packet(struct radeon_cs_parser *p,
1376 			 struct radeon_cs_packet *pkt)
1377 {
1378 	volatile uint32_t *ib;
1379 	unsigned i;
1380 	unsigned idx;
1381 
1382 	ib = p->ib.ptr;
1383 	idx = pkt->idx;
1384 	for (i = 0; i <= (pkt->count + 1); i++, idx++) {
1385 		DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
1386 	}
1387 }
1388 
1389 /**
1390  * r100_cs_packet_parse() - parse cp packet and point ib index to next packet
1391  * @parser:	parser structure holding parsing context.
1392  * @pkt:	where to store packet informations
1393  *
1394  * Assume that chunk_ib_index is properly set. Will return -EINVAL
1395  * if packet is bigger than remaining ib size. or if packets is unknown.
1396  **/
1397 int r100_cs_packet_parse(struct radeon_cs_parser *p,
1398 			 struct radeon_cs_packet *pkt,
1399 			 unsigned idx)
1400 {
1401 	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
1402 	uint32_t header;
1403 
1404 	if (idx >= ib_chunk->length_dw) {
1405 		DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
1406 			  idx, ib_chunk->length_dw);
1407 		return -EINVAL;
1408 	}
1409 	header = radeon_get_ib_value(p, idx);
1410 	pkt->idx = idx;
1411 	pkt->type = CP_PACKET_GET_TYPE(header);
1412 	pkt->count = CP_PACKET_GET_COUNT(header);
1413 	switch (pkt->type) {
1414 	case PACKET_TYPE0:
1415 		pkt->reg = CP_PACKET0_GET_REG(header);
1416 		pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
1417 		break;
1418 	case PACKET_TYPE3:
1419 		pkt->opcode = CP_PACKET3_GET_OPCODE(header);
1420 		break;
1421 	case PACKET_TYPE2:
1422 		pkt->count = -1;
1423 		break;
1424 	default:
1425 		DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
1426 		return -EINVAL;
1427 	}
1428 	if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
1429 		DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
1430 			  pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
1431 		return -EINVAL;
1432 	}
1433 	return 0;
1434 }
1435 
1436 /**
1437  * r100_cs_packet_next_vline() - parse userspace VLINE packet
1438  * @parser:		parser structure holding parsing context.
1439  *
1440  * Userspace sends a special sequence for VLINE waits.
1441  * PACKET0 - VLINE_START_END + value
1442  * PACKET0 - WAIT_UNTIL +_value
1443  * RELOC (P3) - crtc_id in reloc.
1444  *
1445  * This function parses this and relocates the VLINE START END
1446  * and WAIT UNTIL packets to the correct crtc.
1447  * It also detects a switched off crtc and nulls out the
1448  * wait in that case.
1449  */
1450 int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
1451 {
1452 	struct drm_mode_object *obj;
1453 	struct drm_crtc *crtc;
1454 	struct radeon_crtc *radeon_crtc;
1455 	struct radeon_cs_packet p3reloc, waitreloc;
1456 	int crtc_id;
1457 	int r;
1458 	uint32_t header, h_idx, reg;
1459 	volatile uint32_t *ib;
1460 
1461 	ib = p->ib.ptr;
1462 
1463 	/* parse the wait until */
1464 	r = r100_cs_packet_parse(p, &waitreloc, p->idx);
1465 	if (r)
1466 		return r;
1467 
1468 	/* check its a wait until and only 1 count */
1469 	if (waitreloc.reg != RADEON_WAIT_UNTIL ||
1470 	    waitreloc.count != 0) {
1471 		DRM_ERROR("vline wait had illegal wait until segment\n");
1472 		return -EINVAL;
1473 	}
1474 
1475 	if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
1476 		DRM_ERROR("vline wait had illegal wait until\n");
1477 		return -EINVAL;
1478 	}
1479 
1480 	/* jump over the NOP */
1481 	r = r100_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
1482 	if (r)
1483 		return r;
1484 
1485 	h_idx = p->idx - 2;
1486 	p->idx += waitreloc.count + 2;
1487 	p->idx += p3reloc.count + 2;
1488 
1489 	header = radeon_get_ib_value(p, h_idx);
1490 	crtc_id = radeon_get_ib_value(p, h_idx + 5);
1491 	reg = CP_PACKET0_GET_REG(header);
1492 	obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
1493 	if (!obj) {
1494 		DRM_ERROR("cannot find crtc %d\n", crtc_id);
1495 		return -EINVAL;
1496 	}
1497 	crtc = obj_to_crtc(obj);
1498 	radeon_crtc = to_radeon_crtc(crtc);
1499 	crtc_id = radeon_crtc->crtc_id;
1500 
1501 	if (!crtc->enabled) {
1502 		/* if the CRTC isn't enabled - we need to nop out the wait until */
1503 		ib[h_idx + 2] = PACKET2(0);
1504 		ib[h_idx + 3] = PACKET2(0);
1505 	} else if (crtc_id == 1) {
1506 		switch (reg) {
1507 		case AVIVO_D1MODE_VLINE_START_END:
1508 			header &= ~R300_CP_PACKET0_REG_MASK;
1509 			header |= AVIVO_D2MODE_VLINE_START_END >> 2;
1510 			break;
1511 		case RADEON_CRTC_GUI_TRIG_VLINE:
1512 			header &= ~R300_CP_PACKET0_REG_MASK;
1513 			header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
1514 			break;
1515 		default:
1516 			DRM_ERROR("unknown crtc reloc\n");
1517 			return -EINVAL;
1518 		}
1519 		ib[h_idx] = header;
1520 		ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
1521 	}
1522 
1523 	return 0;
1524 }
1525 
1526 /**
1527  * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
1528  * @parser:		parser structure holding parsing context.
1529  * @data:		pointer to relocation data
1530  * @offset_start:	starting offset
1531  * @offset_mask:	offset mask (to align start offset on)
1532  * @reloc:		reloc informations
1533  *
1534  * Check next packet is relocation packet3, do bo validation and compute
1535  * GPU offset using the provided start.
1536  **/
1537 int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
1538 			      struct radeon_cs_reloc **cs_reloc)
1539 {
1540 	struct radeon_cs_chunk *relocs_chunk;
1541 	struct radeon_cs_packet p3reloc;
1542 	unsigned idx;
1543 	int r;
1544 
1545 	if (p->chunk_relocs_idx == -1) {
1546 		DRM_ERROR("No relocation chunk !\n");
1547 		return -EINVAL;
1548 	}
1549 	*cs_reloc = NULL;
1550 	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
1551 	r = r100_cs_packet_parse(p, &p3reloc, p->idx);
1552 	if (r) {
1553 		return r;
1554 	}
1555 	p->idx += p3reloc.count + 2;
1556 	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
1557 		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
1558 			  p3reloc.idx);
1559 		r100_cs_dump_packet(p, &p3reloc);
1560 		return -EINVAL;
1561 	}
1562 	idx = radeon_get_ib_value(p, p3reloc.idx + 1);
1563 	if (idx >= relocs_chunk->length_dw) {
1564 		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
1565 			  idx, relocs_chunk->length_dw);
1566 		r100_cs_dump_packet(p, &p3reloc);
1567 		return -EINVAL;
1568 	}
1569 	/* FIXME: we assume reloc size is 4 dwords */
1570 	*cs_reloc = p->relocs_ptr[(idx / 4)];
1571 	return 0;
1572 }
1573 
1574 static int r100_get_vtx_size(uint32_t vtx_fmt)
1575 {
1576 	int vtx_size;
1577 	vtx_size = 2;
1578 	/* ordered according to bits in spec */
1579 	if (vtx_fmt & RADEON_SE_VTX_FMT_W0)
1580 		vtx_size++;
1581 	if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR)
1582 		vtx_size += 3;
1583 	if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA)
1584 		vtx_size++;
1585 	if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR)
1586 		vtx_size++;
1587 	if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC)
1588 		vtx_size += 3;
1589 	if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG)
1590 		vtx_size++;
1591 	if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC)
1592 		vtx_size++;
1593 	if (vtx_fmt & RADEON_SE_VTX_FMT_ST0)
1594 		vtx_size += 2;
1595 	if (vtx_fmt & RADEON_SE_VTX_FMT_ST1)
1596 		vtx_size += 2;
1597 	if (vtx_fmt & RADEON_SE_VTX_FMT_Q1)
1598 		vtx_size++;
1599 	if (vtx_fmt & RADEON_SE_VTX_FMT_ST2)
1600 		vtx_size += 2;
1601 	if (vtx_fmt & RADEON_SE_VTX_FMT_Q2)
1602 		vtx_size++;
1603 	if (vtx_fmt & RADEON_SE_VTX_FMT_ST3)
1604 		vtx_size += 2;
1605 	if (vtx_fmt & RADEON_SE_VTX_FMT_Q3)
1606 		vtx_size++;
1607 	if (vtx_fmt & RADEON_SE_VTX_FMT_Q0)
1608 		vtx_size++;
1609 	/* blend weight */
1610 	if (vtx_fmt & (0x7 << 15))
1611 		vtx_size += (vtx_fmt >> 15) & 0x7;
1612 	if (vtx_fmt & RADEON_SE_VTX_FMT_N0)
1613 		vtx_size += 3;
1614 	if (vtx_fmt & RADEON_SE_VTX_FMT_XY1)
1615 		vtx_size += 2;
1616 	if (vtx_fmt & RADEON_SE_VTX_FMT_Z1)
1617 		vtx_size++;
1618 	if (vtx_fmt & RADEON_SE_VTX_FMT_W1)
1619 		vtx_size++;
1620 	if (vtx_fmt & RADEON_SE_VTX_FMT_N1)
1621 		vtx_size++;
1622 	if (vtx_fmt & RADEON_SE_VTX_FMT_Z)
1623 		vtx_size++;
1624 	return vtx_size;
1625 }
1626 
1627 static int r100_packet0_check(struct radeon_cs_parser *p,
1628 			      struct radeon_cs_packet *pkt,
1629 			      unsigned idx, unsigned reg)
1630 {
1631 	struct radeon_cs_reloc *reloc;
1632 	struct r100_cs_track *track;
1633 	volatile uint32_t *ib;
1634 	uint32_t tmp;
1635 	int r;
1636 	int i, face;
1637 	u32 tile_flags = 0;
1638 	u32 idx_value;
1639 
1640 	ib = p->ib.ptr;
1641 	track = (struct r100_cs_track *)p->track;
1642 
1643 	idx_value = radeon_get_ib_value(p, idx);
1644 
1645 	switch (reg) {
1646 	case RADEON_CRTC_GUI_TRIG_VLINE:
1647 		r = r100_cs_packet_parse_vline(p);
1648 		if (r) {
1649 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1650 				  idx, reg);
1651 			r100_cs_dump_packet(p, pkt);
1652 			return r;
1653 		}
1654 		break;
1655 		/* FIXME: only allow PACKET3 blit? easier to check for out of
1656 		 * range access */
1657 	case RADEON_DST_PITCH_OFFSET:
1658 	case RADEON_SRC_PITCH_OFFSET:
1659 		r = r100_reloc_pitch_offset(p, pkt, idx, reg);
1660 		if (r)
1661 			return r;
1662 		break;
1663 	case RADEON_RB3D_DEPTHOFFSET:
1664 		r = r100_cs_packet_next_reloc(p, &reloc);
1665 		if (r) {
1666 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1667 				  idx, reg);
1668 			r100_cs_dump_packet(p, pkt);
1669 			return r;
1670 		}
1671 		track->zb.robj = reloc->robj;
1672 		track->zb.offset = idx_value;
1673 		track->zb_dirty = true;
1674 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1675 		break;
1676 	case RADEON_RB3D_COLOROFFSET:
1677 		r = r100_cs_packet_next_reloc(p, &reloc);
1678 		if (r) {
1679 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1680 				  idx, reg);
1681 			r100_cs_dump_packet(p, pkt);
1682 			return r;
1683 		}
1684 		track->cb[0].robj = reloc->robj;
1685 		track->cb[0].offset = idx_value;
1686 		track->cb_dirty = true;
1687 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1688 		break;
1689 	case RADEON_PP_TXOFFSET_0:
1690 	case RADEON_PP_TXOFFSET_1:
1691 	case RADEON_PP_TXOFFSET_2:
1692 		i = (reg - RADEON_PP_TXOFFSET_0) / 24;
1693 		r = r100_cs_packet_next_reloc(p, &reloc);
1694 		if (r) {
1695 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1696 				  idx, reg);
1697 			r100_cs_dump_packet(p, pkt);
1698 			return r;
1699 		}
1700 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1701 			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1702 				tile_flags |= RADEON_TXO_MACRO_TILE;
1703 			if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1704 				tile_flags |= RADEON_TXO_MICRO_TILE_X2;
1705 
1706 			tmp = idx_value & ~(0x7 << 2);
1707 			tmp |= tile_flags;
1708 			ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
1709 		} else
1710 			ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1711 		track->textures[i].robj = reloc->robj;
1712 		track->tex_dirty = true;
1713 		break;
1714 	case RADEON_PP_CUBIC_OFFSET_T0_0:
1715 	case RADEON_PP_CUBIC_OFFSET_T0_1:
1716 	case RADEON_PP_CUBIC_OFFSET_T0_2:
1717 	case RADEON_PP_CUBIC_OFFSET_T0_3:
1718 	case RADEON_PP_CUBIC_OFFSET_T0_4:
1719 		i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
1720 		r = r100_cs_packet_next_reloc(p, &reloc);
1721 		if (r) {
1722 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1723 				  idx, reg);
1724 			r100_cs_dump_packet(p, pkt);
1725 			return r;
1726 		}
1727 		track->textures[0].cube_info[i].offset = idx_value;
1728 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1729 		track->textures[0].cube_info[i].robj = reloc->robj;
1730 		track->tex_dirty = true;
1731 		break;
1732 	case RADEON_PP_CUBIC_OFFSET_T1_0:
1733 	case RADEON_PP_CUBIC_OFFSET_T1_1:
1734 	case RADEON_PP_CUBIC_OFFSET_T1_2:
1735 	case RADEON_PP_CUBIC_OFFSET_T1_3:
1736 	case RADEON_PP_CUBIC_OFFSET_T1_4:
1737 		i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
1738 		r = r100_cs_packet_next_reloc(p, &reloc);
1739 		if (r) {
1740 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1741 				  idx, reg);
1742 			r100_cs_dump_packet(p, pkt);
1743 			return r;
1744 		}
1745 		track->textures[1].cube_info[i].offset = idx_value;
1746 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1747 		track->textures[1].cube_info[i].robj = reloc->robj;
1748 		track->tex_dirty = true;
1749 		break;
1750 	case RADEON_PP_CUBIC_OFFSET_T2_0:
1751 	case RADEON_PP_CUBIC_OFFSET_T2_1:
1752 	case RADEON_PP_CUBIC_OFFSET_T2_2:
1753 	case RADEON_PP_CUBIC_OFFSET_T2_3:
1754 	case RADEON_PP_CUBIC_OFFSET_T2_4:
1755 		i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
1756 		r = r100_cs_packet_next_reloc(p, &reloc);
1757 		if (r) {
1758 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1759 				  idx, reg);
1760 			r100_cs_dump_packet(p, pkt);
1761 			return r;
1762 		}
1763 		track->textures[2].cube_info[i].offset = idx_value;
1764 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1765 		track->textures[2].cube_info[i].robj = reloc->robj;
1766 		track->tex_dirty = true;
1767 		break;
1768 	case RADEON_RE_WIDTH_HEIGHT:
1769 		track->maxy = ((idx_value >> 16) & 0x7FF);
1770 		track->cb_dirty = true;
1771 		track->zb_dirty = true;
1772 		break;
1773 	case RADEON_RB3D_COLORPITCH:
1774 		r = r100_cs_packet_next_reloc(p, &reloc);
1775 		if (r) {
1776 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1777 				  idx, reg);
1778 			r100_cs_dump_packet(p, pkt);
1779 			return r;
1780 		}
1781 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1782 			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1783 				tile_flags |= RADEON_COLOR_TILE_ENABLE;
1784 			if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1785 				tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
1786 
1787 			tmp = idx_value & ~(0x7 << 16);
1788 			tmp |= tile_flags;
1789 			ib[idx] = tmp;
1790 		} else
1791 			ib[idx] = idx_value;
1792 
1793 		track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
1794 		track->cb_dirty = true;
1795 		break;
1796 	case RADEON_RB3D_DEPTHPITCH:
1797 		track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
1798 		track->zb_dirty = true;
1799 		break;
1800 	case RADEON_RB3D_CNTL:
1801 		switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
1802 		case 7:
1803 		case 8:
1804 		case 9:
1805 		case 11:
1806 		case 12:
1807 			track->cb[0].cpp = 1;
1808 			break;
1809 		case 3:
1810 		case 4:
1811 		case 15:
1812 			track->cb[0].cpp = 2;
1813 			break;
1814 		case 6:
1815 			track->cb[0].cpp = 4;
1816 			break;
1817 		default:
1818 			DRM_ERROR("Invalid color buffer format (%d) !\n",
1819 				  ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
1820 			return -EINVAL;
1821 		}
1822 		track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
1823 		track->cb_dirty = true;
1824 		track->zb_dirty = true;
1825 		break;
1826 	case RADEON_RB3D_ZSTENCILCNTL:
1827 		switch (idx_value & 0xf) {
1828 		case 0:
1829 			track->zb.cpp = 2;
1830 			break;
1831 		case 2:
1832 		case 3:
1833 		case 4:
1834 		case 5:
1835 		case 9:
1836 		case 11:
1837 			track->zb.cpp = 4;
1838 			break;
1839 		default:
1840 			break;
1841 		}
1842 		track->zb_dirty = true;
1843 		break;
1844 	case RADEON_RB3D_ZPASS_ADDR:
1845 		r = r100_cs_packet_next_reloc(p, &reloc);
1846 		if (r) {
1847 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1848 				  idx, reg);
1849 			r100_cs_dump_packet(p, pkt);
1850 			return r;
1851 		}
1852 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1853 		break;
1854 	case RADEON_PP_CNTL:
1855 		{
1856 			uint32_t temp = idx_value >> 4;
1857 			for (i = 0; i < track->num_texture; i++)
1858 				track->textures[i].enabled = !!(temp & (1 << i));
1859 			track->tex_dirty = true;
1860 		}
1861 		break;
1862 	case RADEON_SE_VF_CNTL:
1863 		track->vap_vf_cntl = idx_value;
1864 		break;
1865 	case RADEON_SE_VTX_FMT:
1866 		track->vtx_size = r100_get_vtx_size(idx_value);
1867 		break;
1868 	case RADEON_PP_TEX_SIZE_0:
1869 	case RADEON_PP_TEX_SIZE_1:
1870 	case RADEON_PP_TEX_SIZE_2:
1871 		i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
1872 		track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
1873 		track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
1874 		track->tex_dirty = true;
1875 		break;
1876 	case RADEON_PP_TEX_PITCH_0:
1877 	case RADEON_PP_TEX_PITCH_1:
1878 	case RADEON_PP_TEX_PITCH_2:
1879 		i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
1880 		track->textures[i].pitch = idx_value + 32;
1881 		track->tex_dirty = true;
1882 		break;
1883 	case RADEON_PP_TXFILTER_0:
1884 	case RADEON_PP_TXFILTER_1:
1885 	case RADEON_PP_TXFILTER_2:
1886 		i = (reg - RADEON_PP_TXFILTER_0) / 24;
1887 		track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK)
1888 						 >> RADEON_MAX_MIP_LEVEL_SHIFT);
1889 		tmp = (idx_value >> 23) & 0x7;
1890 		if (tmp == 2 || tmp == 6)
1891 			track->textures[i].roundup_w = false;
1892 		tmp = (idx_value >> 27) & 0x7;
1893 		if (tmp == 2 || tmp == 6)
1894 			track->textures[i].roundup_h = false;
1895 		track->tex_dirty = true;
1896 		break;
1897 	case RADEON_PP_TXFORMAT_0:
1898 	case RADEON_PP_TXFORMAT_1:
1899 	case RADEON_PP_TXFORMAT_2:
1900 		i = (reg - RADEON_PP_TXFORMAT_0) / 24;
1901 		if (idx_value & RADEON_TXFORMAT_NON_POWER2) {
1902 			track->textures[i].use_pitch = 1;
1903 		} else {
1904 			track->textures[i].use_pitch = 0;
1905 			track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
1906 			track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
1907 		}
1908 		if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
1909 			track->textures[i].tex_coord_type = 2;
1910 		switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
1911 		case RADEON_TXFORMAT_I8:
1912 		case RADEON_TXFORMAT_RGB332:
1913 		case RADEON_TXFORMAT_Y8:
1914 			track->textures[i].cpp = 1;
1915 			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
1916 			break;
1917 		case RADEON_TXFORMAT_AI88:
1918 		case RADEON_TXFORMAT_ARGB1555:
1919 		case RADEON_TXFORMAT_RGB565:
1920 		case RADEON_TXFORMAT_ARGB4444:
1921 		case RADEON_TXFORMAT_VYUY422:
1922 		case RADEON_TXFORMAT_YVYU422:
1923 		case RADEON_TXFORMAT_SHADOW16:
1924 		case RADEON_TXFORMAT_LDUDV655:
1925 		case RADEON_TXFORMAT_DUDV88:
1926 			track->textures[i].cpp = 2;
1927 			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
1928 			break;
1929 		case RADEON_TXFORMAT_ARGB8888:
1930 		case RADEON_TXFORMAT_RGBA8888:
1931 		case RADEON_TXFORMAT_SHADOW32:
1932 		case RADEON_TXFORMAT_LDUDUV8888:
1933 			track->textures[i].cpp = 4;
1934 			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
1935 			break;
1936 		case RADEON_TXFORMAT_DXT1:
1937 			track->textures[i].cpp = 1;
1938 			track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
1939 			break;
1940 		case RADEON_TXFORMAT_DXT23:
1941 		case RADEON_TXFORMAT_DXT45:
1942 			track->textures[i].cpp = 1;
1943 			track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
1944 			break;
1945 		}
1946 		track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
1947 		track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
1948 		track->tex_dirty = true;
1949 		break;
1950 	case RADEON_PP_CUBIC_FACES_0:
1951 	case RADEON_PP_CUBIC_FACES_1:
1952 	case RADEON_PP_CUBIC_FACES_2:
1953 		tmp = idx_value;
1954 		i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
1955 		for (face = 0; face < 4; face++) {
1956 			track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
1957 			track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
1958 		}
1959 		track->tex_dirty = true;
1960 		break;
1961 	default:
1962 		printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1963 		       reg, idx);
1964 		return -EINVAL;
1965 	}
1966 	return 0;
1967 }
1968 
1969 int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1970 					 struct radeon_cs_packet *pkt,
1971 					 struct radeon_bo *robj)
1972 {
1973 	unsigned idx;
1974 	u32 value;
1975 	idx = pkt->idx + 1;
1976 	value = radeon_get_ib_value(p, idx + 2);
1977 	if ((value + 1) > radeon_bo_size(robj)) {
1978 		DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
1979 			  "(need %u have %lu) !\n",
1980 			  value + 1,
1981 			  radeon_bo_size(robj));
1982 		return -EINVAL;
1983 	}
1984 	return 0;
1985 }
1986 
1987 static int r100_packet3_check(struct radeon_cs_parser *p,
1988 			      struct radeon_cs_packet *pkt)
1989 {
1990 	struct radeon_cs_reloc *reloc;
1991 	struct r100_cs_track *track;
1992 	unsigned idx;
1993 	volatile uint32_t *ib;
1994 	int r;
1995 
1996 	ib = p->ib.ptr;
1997 	idx = pkt->idx + 1;
1998 	track = (struct r100_cs_track *)p->track;
1999 	switch (pkt->opcode) {
2000 	case PACKET3_3D_LOAD_VBPNTR:
2001 		r = r100_packet3_load_vbpntr(p, pkt, idx);
2002 		if (r)
2003 			return r;
2004 		break;
2005 	case PACKET3_INDX_BUFFER:
2006 		r = r100_cs_packet_next_reloc(p, &reloc);
2007 		if (r) {
2008 			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
2009 			r100_cs_dump_packet(p, pkt);
2010 			return r;
2011 		}
2012 		ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
2013 		r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
2014 		if (r) {
2015 			return r;
2016 		}
2017 		break;
2018 	case 0x23:
2019 		/* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
2020 		r = r100_cs_packet_next_reloc(p, &reloc);
2021 		if (r) {
2022 			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
2023 			r100_cs_dump_packet(p, pkt);
2024 			return r;
2025 		}
2026 		ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
2027 		track->num_arrays = 1;
2028 		track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
2029 
2030 		track->arrays[0].robj = reloc->robj;
2031 		track->arrays[0].esize = track->vtx_size;
2032 
2033 		track->max_indx = radeon_get_ib_value(p, idx+1);
2034 
2035 		track->vap_vf_cntl = radeon_get_ib_value(p, idx+3);
2036 		track->immd_dwords = pkt->count - 1;
2037 		r = r100_cs_track_check(p->rdev, track);
2038 		if (r)
2039 			return r;
2040 		break;
2041 	case PACKET3_3D_DRAW_IMMD:
2042 		if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
2043 			DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
2044 			return -EINVAL;
2045 		}
2046 		track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
2047 		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
2048 		track->immd_dwords = pkt->count - 1;
2049 		r = r100_cs_track_check(p->rdev, track);
2050 		if (r)
2051 			return r;
2052 		break;
2053 		/* triggers drawing using in-packet vertex data */
2054 	case PACKET3_3D_DRAW_IMMD_2:
2055 		if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
2056 			DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
2057 			return -EINVAL;
2058 		}
2059 		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
2060 		track->immd_dwords = pkt->count;
2061 		r = r100_cs_track_check(p->rdev, track);
2062 		if (r)
2063 			return r;
2064 		break;
2065 		/* triggers drawing using in-packet vertex data */
2066 	case PACKET3_3D_DRAW_VBUF_2:
2067 		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
2068 		r = r100_cs_track_check(p->rdev, track);
2069 		if (r)
2070 			return r;
2071 		break;
2072 		/* triggers drawing of vertex buffers setup elsewhere */
2073 	case PACKET3_3D_DRAW_INDX_2:
2074 		track->vap_vf_cntl = radeon_get_ib_value(p, idx);
2075 		r = r100_cs_track_check(p->rdev, track);
2076 		if (r)
2077 			return r;
2078 		break;
2079 		/* triggers drawing using indices to vertex buffer */
2080 	case PACKET3_3D_DRAW_VBUF:
2081 		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
2082 		r = r100_cs_track_check(p->rdev, track);
2083 		if (r)
2084 			return r;
2085 		break;
2086 		/* triggers drawing of vertex buffers setup elsewhere */
2087 	case PACKET3_3D_DRAW_INDX:
2088 		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
2089 		r = r100_cs_track_check(p->rdev, track);
2090 		if (r)
2091 			return r;
2092 		break;
2093 		/* triggers drawing using indices to vertex buffer */
2094 	case PACKET3_3D_CLEAR_HIZ:
2095 	case PACKET3_3D_CLEAR_ZMASK:
2096 		if (p->rdev->hyperz_filp != p->filp)
2097 			return -EINVAL;
2098 		break;
2099 	case PACKET3_NOP:
2100 		break;
2101 	default:
2102 		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
2103 		return -EINVAL;
2104 	}
2105 	return 0;
2106 }
2107 
2108 int r100_cs_parse(struct radeon_cs_parser *p)
2109 {
2110 	struct radeon_cs_packet pkt;
2111 	struct r100_cs_track *track;
2112 	int r;
2113 
2114 	track = kzalloc(sizeof(*track), GFP_KERNEL);
2115 	if (!track)
2116 		return -ENOMEM;
2117 	r100_cs_track_clear(p->rdev, track);
2118 	p->track = track;
2119 	do {
2120 		r = r100_cs_packet_parse(p, &pkt, p->idx);
2121 		if (r) {
2122 			return r;
2123 		}
2124 		p->idx += pkt.count + 2;
2125 		switch (pkt.type) {
2126 			case PACKET_TYPE0:
2127 				if (p->rdev->family >= CHIP_R200)
2128 					r = r100_cs_parse_packet0(p, &pkt,
2129 								  p->rdev->config.r100.reg_safe_bm,
2130 								  p->rdev->config.r100.reg_safe_bm_size,
2131 								  &r200_packet0_check);
2132 				else
2133 					r = r100_cs_parse_packet0(p, &pkt,
2134 								  p->rdev->config.r100.reg_safe_bm,
2135 								  p->rdev->config.r100.reg_safe_bm_size,
2136 								  &r100_packet0_check);
2137 				break;
2138 			case PACKET_TYPE2:
2139 				break;
2140 			case PACKET_TYPE3:
2141 				r = r100_packet3_check(p, &pkt);
2142 				break;
2143 			default:
2144 				DRM_ERROR("Unknown packet type %d !\n",
2145 					  pkt.type);
2146 				return -EINVAL;
2147 		}
2148 		if (r) {
2149 			return r;
2150 		}
2151 	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2152 	return 0;
2153 }
2154 
2155 static void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
2156 {
2157 	DRM_ERROR("pitch                      %d\n", t->pitch);
2158 	DRM_ERROR("use_pitch                  %d\n", t->use_pitch);
2159 	DRM_ERROR("width                      %d\n", t->width);
2160 	DRM_ERROR("width_11                   %d\n", t->width_11);
2161 	DRM_ERROR("height                     %d\n", t->height);
2162 	DRM_ERROR("height_11                  %d\n", t->height_11);
2163 	DRM_ERROR("num levels                 %d\n", t->num_levels);
2164 	DRM_ERROR("depth                      %d\n", t->txdepth);
2165 	DRM_ERROR("bpp                        %d\n", t->cpp);
2166 	DRM_ERROR("coordinate type            %d\n", t->tex_coord_type);
2167 	DRM_ERROR("width round to power of 2  %d\n", t->roundup_w);
2168 	DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
2169 	DRM_ERROR("compress format            %d\n", t->compress_format);
2170 }
2171 
2172 static int r100_track_compress_size(int compress_format, int w, int h)
2173 {
2174 	int block_width, block_height, block_bytes;
2175 	int wblocks, hblocks;
2176 	int min_wblocks;
2177 	int sz;
2178 
2179 	block_width = 4;
2180 	block_height = 4;
2181 
2182 	switch (compress_format) {
2183 	case R100_TRACK_COMP_DXT1:
2184 		block_bytes = 8;
2185 		min_wblocks = 4;
2186 		break;
2187 	default:
2188 	case R100_TRACK_COMP_DXT35:
2189 		block_bytes = 16;
2190 		min_wblocks = 2;
2191 		break;
2192 	}
2193 
2194 	hblocks = (h + block_height - 1) / block_height;
2195 	wblocks = (w + block_width - 1) / block_width;
2196 	if (wblocks < min_wblocks)
2197 		wblocks = min_wblocks;
2198 	sz = wblocks * hblocks * block_bytes;
2199 	return sz;
2200 }
2201 
2202 static int r100_cs_track_cube(struct radeon_device *rdev,
2203 			      struct r100_cs_track *track, unsigned idx)
2204 {
2205 	unsigned face, w, h;
2206 	struct radeon_bo *cube_robj;
2207 	unsigned long size;
2208 	unsigned compress_format = track->textures[idx].compress_format;
2209 
2210 	for (face = 0; face < 5; face++) {
2211 		cube_robj = track->textures[idx].cube_info[face].robj;
2212 		w = track->textures[idx].cube_info[face].width;
2213 		h = track->textures[idx].cube_info[face].height;
2214 
2215 		if (compress_format) {
2216 			size = r100_track_compress_size(compress_format, w, h);
2217 		} else
2218 			size = w * h;
2219 		size *= track->textures[idx].cpp;
2220 
2221 		size += track->textures[idx].cube_info[face].offset;
2222 
2223 		if (size > radeon_bo_size(cube_robj)) {
2224 			DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
2225 				  size, radeon_bo_size(cube_robj));
2226 			r100_cs_track_texture_print(&track->textures[idx]);
2227 			return -1;
2228 		}
2229 	}
2230 	return 0;
2231 }
2232 
2233 static int r100_cs_track_texture_check(struct radeon_device *rdev,
2234 				       struct r100_cs_track *track)
2235 {
2236 	struct radeon_bo *robj;
2237 	unsigned long size;
2238 	unsigned u, i, w, h, d;
2239 	int ret;
2240 
2241 	for (u = 0; u < track->num_texture; u++) {
2242 		if (!track->textures[u].enabled)
2243 			continue;
2244 		if (track->textures[u].lookup_disable)
2245 			continue;
2246 		robj = track->textures[u].robj;
2247 		if (robj == NULL) {
2248 			DRM_ERROR("No texture bound to unit %u\n", u);
2249 			return -EINVAL;
2250 		}
2251 		size = 0;
2252 		for (i = 0; i <= track->textures[u].num_levels; i++) {
2253 			if (track->textures[u].use_pitch) {
2254 				if (rdev->family < CHIP_R300)
2255 					w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
2256 				else
2257 					w = track->textures[u].pitch / (1 << i);
2258 			} else {
2259 				w = track->textures[u].width;
2260 				if (rdev->family >= CHIP_RV515)
2261 					w |= track->textures[u].width_11;
2262 				w = w / (1 << i);
2263 				if (track->textures[u].roundup_w)
2264 					w = roundup_pow_of_two(w);
2265 			}
2266 			h = track->textures[u].height;
2267 			if (rdev->family >= CHIP_RV515)
2268 				h |= track->textures[u].height_11;
2269 			h = h / (1 << i);
2270 			if (track->textures[u].roundup_h)
2271 				h = roundup_pow_of_two(h);
2272 			if (track->textures[u].tex_coord_type == 1) {
2273 				d = (1 << track->textures[u].txdepth) / (1 << i);
2274 				if (!d)
2275 					d = 1;
2276 			} else {
2277 				d = 1;
2278 			}
2279 			if (track->textures[u].compress_format) {
2280 
2281 				size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
2282 				/* compressed textures are block based */
2283 			} else
2284 				size += w * h * d;
2285 		}
2286 		size *= track->textures[u].cpp;
2287 
2288 		switch (track->textures[u].tex_coord_type) {
2289 		case 0:
2290 		case 1:
2291 			break;
2292 		case 2:
2293 			if (track->separate_cube) {
2294 				ret = r100_cs_track_cube(rdev, track, u);
2295 				if (ret)
2296 					return ret;
2297 			} else
2298 				size *= 6;
2299 			break;
2300 		default:
2301 			DRM_ERROR("Invalid texture coordinate type %u for unit "
2302 				  "%u\n", track->textures[u].tex_coord_type, u);
2303 			return -EINVAL;
2304 		}
2305 		if (size > radeon_bo_size(robj)) {
2306 			DRM_ERROR("Texture of unit %u needs %lu bytes but is "
2307 				  "%lu\n", u, size, radeon_bo_size(robj));
2308 			r100_cs_track_texture_print(&track->textures[u]);
2309 			return -EINVAL;
2310 		}
2311 	}
2312 	return 0;
2313 }
2314 
2315 int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
2316 {
2317 	unsigned i;
2318 	unsigned long size;
2319 	unsigned prim_walk;
2320 	unsigned nverts;
2321 	unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
2322 
2323 	if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
2324 	    !track->blend_read_enable)
2325 		num_cb = 0;
2326 
2327 	for (i = 0; i < num_cb; i++) {
2328 		if (track->cb[i].robj == NULL) {
2329 			DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
2330 			return -EINVAL;
2331 		}
2332 		size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
2333 		size += track->cb[i].offset;
2334 		if (size > radeon_bo_size(track->cb[i].robj)) {
2335 			DRM_ERROR("[drm] Buffer too small for color buffer %d "
2336 				  "(need %lu have %lu) !\n", i, size,
2337 				  radeon_bo_size(track->cb[i].robj));
2338 			DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
2339 				  i, track->cb[i].pitch, track->cb[i].cpp,
2340 				  track->cb[i].offset, track->maxy);
2341 			return -EINVAL;
2342 		}
2343 	}
2344 	track->cb_dirty = false;
2345 
2346 	if (track->zb_dirty && track->z_enabled) {
2347 		if (track->zb.robj == NULL) {
2348 			DRM_ERROR("[drm] No buffer for z buffer !\n");
2349 			return -EINVAL;
2350 		}
2351 		size = track->zb.pitch * track->zb.cpp * track->maxy;
2352 		size += track->zb.offset;
2353 		if (size > radeon_bo_size(track->zb.robj)) {
2354 			DRM_ERROR("[drm] Buffer too small for z buffer "
2355 				  "(need %lu have %lu) !\n", size,
2356 				  radeon_bo_size(track->zb.robj));
2357 			DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
2358 				  track->zb.pitch, track->zb.cpp,
2359 				  track->zb.offset, track->maxy);
2360 			return -EINVAL;
2361 		}
2362 	}
2363 	track->zb_dirty = false;
2364 
2365 	if (track->aa_dirty && track->aaresolve) {
2366 		if (track->aa.robj == NULL) {
2367 			DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
2368 			return -EINVAL;
2369 		}
2370 		/* I believe the format comes from colorbuffer0. */
2371 		size = track->aa.pitch * track->cb[0].cpp * track->maxy;
2372 		size += track->aa.offset;
2373 		if (size > radeon_bo_size(track->aa.robj)) {
2374 			DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
2375 				  "(need %lu have %lu) !\n", i, size,
2376 				  radeon_bo_size(track->aa.robj));
2377 			DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
2378 				  i, track->aa.pitch, track->cb[0].cpp,
2379 				  track->aa.offset, track->maxy);
2380 			return -EINVAL;
2381 		}
2382 	}
2383 	track->aa_dirty = false;
2384 
2385 	prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
2386 	if (track->vap_vf_cntl & (1 << 14)) {
2387 		nverts = track->vap_alt_nverts;
2388 	} else {
2389 		nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
2390 	}
2391 	switch (prim_walk) {
2392 	case 1:
2393 		for (i = 0; i < track->num_arrays; i++) {
2394 			size = track->arrays[i].esize * track->max_indx * 4;
2395 			if (track->arrays[i].robj == NULL) {
2396 				DRM_ERROR("(PW %u) Vertex array %u no buffer "
2397 					  "bound\n", prim_walk, i);
2398 				return -EINVAL;
2399 			}
2400 			if (size > radeon_bo_size(track->arrays[i].robj)) {
2401 				dev_err(rdev->dev, "(PW %u) Vertex array %u "
2402 					"need %lu dwords have %lu dwords\n",
2403 					prim_walk, i, size >> 2,
2404 					radeon_bo_size(track->arrays[i].robj)
2405 					>> 2);
2406 				DRM_ERROR("Max indices %u\n", track->max_indx);
2407 				return -EINVAL;
2408 			}
2409 		}
2410 		break;
2411 	case 2:
2412 		for (i = 0; i < track->num_arrays; i++) {
2413 			size = track->arrays[i].esize * (nverts - 1) * 4;
2414 			if (track->arrays[i].robj == NULL) {
2415 				DRM_ERROR("(PW %u) Vertex array %u no buffer "
2416 					  "bound\n", prim_walk, i);
2417 				return -EINVAL;
2418 			}
2419 			if (size > radeon_bo_size(track->arrays[i].robj)) {
2420 				dev_err(rdev->dev, "(PW %u) Vertex array %u "
2421 					"need %lu dwords have %lu dwords\n",
2422 					prim_walk, i, size >> 2,
2423 					radeon_bo_size(track->arrays[i].robj)
2424 					>> 2);
2425 				return -EINVAL;
2426 			}
2427 		}
2428 		break;
2429 	case 3:
2430 		size = track->vtx_size * nverts;
2431 		if (size != track->immd_dwords) {
2432 			DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
2433 				  track->immd_dwords, size);
2434 			DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
2435 				  nverts, track->vtx_size);
2436 			return -EINVAL;
2437 		}
2438 		break;
2439 	default:
2440 		DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
2441 			  prim_walk);
2442 		return -EINVAL;
2443 	}
2444 
2445 	if (track->tex_dirty) {
2446 		track->tex_dirty = false;
2447 		return r100_cs_track_texture_check(rdev, track);
2448 	}
2449 	return 0;
2450 }
2451 
2452 void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
2453 {
2454 	unsigned i, face;
2455 
2456 	track->cb_dirty = true;
2457 	track->zb_dirty = true;
2458 	track->tex_dirty = true;
2459 	track->aa_dirty = true;
2460 
2461 	if (rdev->family < CHIP_R300) {
2462 		track->num_cb = 1;
2463 		if (rdev->family <= CHIP_RS200)
2464 			track->num_texture = 3;
2465 		else
2466 			track->num_texture = 6;
2467 		track->maxy = 2048;
2468 		track->separate_cube = 1;
2469 	} else {
2470 		track->num_cb = 4;
2471 		track->num_texture = 16;
2472 		track->maxy = 4096;
2473 		track->separate_cube = 0;
2474 		track->aaresolve = false;
2475 		track->aa.robj = NULL;
2476 	}
2477 
2478 	for (i = 0; i < track->num_cb; i++) {
2479 		track->cb[i].robj = NULL;
2480 		track->cb[i].pitch = 8192;
2481 		track->cb[i].cpp = 16;
2482 		track->cb[i].offset = 0;
2483 	}
2484 	track->z_enabled = true;
2485 	track->zb.robj = NULL;
2486 	track->zb.pitch = 8192;
2487 	track->zb.cpp = 4;
2488 	track->zb.offset = 0;
2489 	track->vtx_size = 0x7F;
2490 	track->immd_dwords = 0xFFFFFFFFUL;
2491 	track->num_arrays = 11;
2492 	track->max_indx = 0x00FFFFFFUL;
2493 	for (i = 0; i < track->num_arrays; i++) {
2494 		track->arrays[i].robj = NULL;
2495 		track->arrays[i].esize = 0x7F;
2496 	}
2497 	for (i = 0; i < track->num_texture; i++) {
2498 		track->textures[i].compress_format = R100_TRACK_COMP_NONE;
2499 		track->textures[i].pitch = 16536;
2500 		track->textures[i].width = 16536;
2501 		track->textures[i].height = 16536;
2502 		track->textures[i].width_11 = 1 << 11;
2503 		track->textures[i].height_11 = 1 << 11;
2504 		track->textures[i].num_levels = 12;
2505 		if (rdev->family <= CHIP_RS200) {
2506 			track->textures[i].tex_coord_type = 0;
2507 			track->textures[i].txdepth = 0;
2508 		} else {
2509 			track->textures[i].txdepth = 16;
2510 			track->textures[i].tex_coord_type = 1;
2511 		}
2512 		track->textures[i].cpp = 64;
2513 		track->textures[i].robj = NULL;
2514 		/* CS IB emission code makes sure texture unit are disabled */
2515 		track->textures[i].enabled = false;
2516 		track->textures[i].lookup_disable = false;
2517 		track->textures[i].roundup_w = true;
2518 		track->textures[i].roundup_h = true;
2519 		if (track->separate_cube)
2520 			for (face = 0; face < 5; face++) {
2521 				track->textures[i].cube_info[face].robj = NULL;
2522 				track->textures[i].cube_info[face].width = 16536;
2523 				track->textures[i].cube_info[face].height = 16536;
2524 				track->textures[i].cube_info[face].offset = 0;
2525 			}
2526 	}
2527 }
2528 
2529 /*
2530  * Global GPU functions
2531  */
2532 void r100_errata(struct radeon_device *rdev)
2533 {
2534 	rdev->pll_errata = 0;
2535 
2536 	if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
2537 		rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
2538 	}
2539 
2540 	if (rdev->family == CHIP_RV100 ||
2541 	    rdev->family == CHIP_RS100 ||
2542 	    rdev->family == CHIP_RS200) {
2543 		rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
2544 	}
2545 }
2546 
2547 /* Wait for vertical sync on primary CRTC */
2548 void r100_gpu_wait_for_vsync(struct radeon_device *rdev)
2549 {
2550 	uint32_t crtc_gen_cntl, tmp;
2551 	int i;
2552 
2553 	crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
2554 	if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) ||
2555 	    !(crtc_gen_cntl & RADEON_CRTC_EN)) {
2556 		return;
2557 	}
2558 	/* Clear the CRTC_VBLANK_SAVE bit */
2559 	WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR);
2560 	for (i = 0; i < rdev->usec_timeout; i++) {
2561 		tmp = RREG32(RADEON_CRTC_STATUS);
2562 		if (tmp & RADEON_CRTC_VBLANK_SAVE) {
2563 			return;
2564 		}
2565 		DRM_UDELAY(1);
2566 	}
2567 }
2568 
2569 /* Wait for vertical sync on secondary CRTC */
2570 void r100_gpu_wait_for_vsync2(struct radeon_device *rdev)
2571 {
2572 	uint32_t crtc2_gen_cntl, tmp;
2573 	int i;
2574 
2575 	crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
2576 	if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) ||
2577 	    !(crtc2_gen_cntl & RADEON_CRTC2_EN))
2578 		return;
2579 
2580 	/* Clear the CRTC_VBLANK_SAVE bit */
2581 	WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR);
2582 	for (i = 0; i < rdev->usec_timeout; i++) {
2583 		tmp = RREG32(RADEON_CRTC2_STATUS);
2584 		if (tmp & RADEON_CRTC2_VBLANK_SAVE) {
2585 			return;
2586 		}
2587 		DRM_UDELAY(1);
2588 	}
2589 }
2590 
2591 int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
2592 {
2593 	unsigned i;
2594 	uint32_t tmp;
2595 
2596 	for (i = 0; i < rdev->usec_timeout; i++) {
2597 		tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
2598 		if (tmp >= n) {
2599 			return 0;
2600 		}
2601 		DRM_UDELAY(1);
2602 	}
2603 	return -1;
2604 }
2605 
2606 int r100_gui_wait_for_idle(struct radeon_device *rdev)
2607 {
2608 	unsigned i;
2609 	uint32_t tmp;
2610 
2611 	if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
2612 		printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !"
2613 		       " Bad things might happen.\n");
2614 	}
2615 	for (i = 0; i < rdev->usec_timeout; i++) {
2616 		tmp = RREG32(RADEON_RBBM_STATUS);
2617 		if (!(tmp & RADEON_RBBM_ACTIVE)) {
2618 			return 0;
2619 		}
2620 		DRM_UDELAY(1);
2621 	}
2622 	return -1;
2623 }
2624 
2625 int r100_mc_wait_for_idle(struct radeon_device *rdev)
2626 {
2627 	unsigned i;
2628 	uint32_t tmp;
2629 
2630 	for (i = 0; i < rdev->usec_timeout; i++) {
2631 		/* read MC_STATUS */
2632 		tmp = RREG32(RADEON_MC_STATUS);
2633 		if (tmp & RADEON_MC_IDLE) {
2634 			return 0;
2635 		}
2636 		DRM_UDELAY(1);
2637 	}
2638 	return -1;
2639 }
2640 
2641 bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2642 {
2643 	u32 rbbm_status;
2644 
2645 	rbbm_status = RREG32(R_000E40_RBBM_STATUS);
2646 	if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
2647 		radeon_ring_lockup_update(ring);
2648 		return false;
2649 	}
2650 	/* force CP activities */
2651 	radeon_ring_force_activity(rdev, ring);
2652 	return radeon_ring_test_lockup(rdev, ring);
2653 }
2654 
2655 /* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
2656 void r100_enable_bm(struct radeon_device *rdev)
2657 {
2658 	uint32_t tmp;
2659 	/* Enable bus mastering */
2660 	tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
2661 	WREG32(RADEON_BUS_CNTL, tmp);
2662 }
2663 
2664 void r100_bm_disable(struct radeon_device *rdev)
2665 {
2666 	u32 tmp;
2667 
2668 	/* disable bus mastering */
2669 	tmp = RREG32(R_000030_BUS_CNTL);
2670 	WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
2671 	mdelay(1);
2672 	WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
2673 	mdelay(1);
2674 	WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
2675 	tmp = RREG32(RADEON_BUS_CNTL);
2676 	mdelay(1);
2677 	pci_clear_master(rdev->pdev);
2678 	mdelay(1);
2679 }
2680 
2681 int r100_asic_reset(struct radeon_device *rdev)
2682 {
2683 	struct r100_mc_save save;
2684 	u32 status, tmp;
2685 	int ret = 0;
2686 
2687 	status = RREG32(R_000E40_RBBM_STATUS);
2688 	if (!G_000E40_GUI_ACTIVE(status)) {
2689 		return 0;
2690 	}
2691 	r100_mc_stop(rdev, &save);
2692 	status = RREG32(R_000E40_RBBM_STATUS);
2693 	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2694 	/* stop CP */
2695 	WREG32(RADEON_CP_CSQ_CNTL, 0);
2696 	tmp = RREG32(RADEON_CP_RB_CNTL);
2697 	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
2698 	WREG32(RADEON_CP_RB_RPTR_WR, 0);
2699 	WREG32(RADEON_CP_RB_WPTR, 0);
2700 	WREG32(RADEON_CP_RB_CNTL, tmp);
2701 	/* save PCI state */
2702 	pci_save_state(rdev->pdev);
2703 	/* disable bus mastering */
2704 	r100_bm_disable(rdev);
2705 	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
2706 					S_0000F0_SOFT_RESET_RE(1) |
2707 					S_0000F0_SOFT_RESET_PP(1) |
2708 					S_0000F0_SOFT_RESET_RB(1));
2709 	RREG32(R_0000F0_RBBM_SOFT_RESET);
2710 	mdelay(500);
2711 	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
2712 	mdelay(1);
2713 	status = RREG32(R_000E40_RBBM_STATUS);
2714 	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2715 	/* reset CP */
2716 	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
2717 	RREG32(R_0000F0_RBBM_SOFT_RESET);
2718 	mdelay(500);
2719 	WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
2720 	mdelay(1);
2721 	status = RREG32(R_000E40_RBBM_STATUS);
2722 	dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2723 	/* restore PCI & busmastering */
2724 	pci_restore_state(rdev->pdev);
2725 	r100_enable_bm(rdev);
2726 	/* Check if GPU is idle */
2727 	if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
2728 		G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
2729 		dev_err(rdev->dev, "failed to reset GPU\n");
2730 		ret = -1;
2731 	} else
2732 		dev_info(rdev->dev, "GPU reset succeed\n");
2733 	r100_mc_resume(rdev, &save);
2734 	return ret;
2735 }
2736 
2737 void r100_set_common_regs(struct radeon_device *rdev)
2738 {
2739 	struct drm_device *dev = rdev->ddev;
2740 	bool force_dac2 = false;
2741 	u32 tmp;
2742 
2743 	/* set these so they don't interfere with anything */
2744 	WREG32(RADEON_OV0_SCALE_CNTL, 0);
2745 	WREG32(RADEON_SUBPIC_CNTL, 0);
2746 	WREG32(RADEON_VIPH_CONTROL, 0);
2747 	WREG32(RADEON_I2C_CNTL_1, 0);
2748 	WREG32(RADEON_DVI_I2C_CNTL_1, 0);
2749 	WREG32(RADEON_CAP0_TRIG_CNTL, 0);
2750 	WREG32(RADEON_CAP1_TRIG_CNTL, 0);
2751 
2752 	/* always set up dac2 on rn50 and some rv100 as lots
2753 	 * of servers seem to wire it up to a VGA port but
2754 	 * don't report it in the bios connector
2755 	 * table.
2756 	 */
2757 	switch (dev->pdev->device) {
2758 		/* RN50 */
2759 	case 0x515e:
2760 	case 0x5969:
2761 		force_dac2 = true;
2762 		break;
2763 		/* RV100*/
2764 	case 0x5159:
2765 	case 0x515a:
2766 		/* DELL triple head servers */
2767 		if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) &&
2768 		    ((dev->pdev->subsystem_device == 0x016c) ||
2769 		     (dev->pdev->subsystem_device == 0x016d) ||
2770 		     (dev->pdev->subsystem_device == 0x016e) ||
2771 		     (dev->pdev->subsystem_device == 0x016f) ||
2772 		     (dev->pdev->subsystem_device == 0x0170) ||
2773 		     (dev->pdev->subsystem_device == 0x017d) ||
2774 		     (dev->pdev->subsystem_device == 0x017e) ||
2775 		     (dev->pdev->subsystem_device == 0x0183) ||
2776 		     (dev->pdev->subsystem_device == 0x018a) ||
2777 		     (dev->pdev->subsystem_device == 0x019a)))
2778 			force_dac2 = true;
2779 		break;
2780 	}
2781 
2782 	if (force_dac2) {
2783 		u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
2784 		u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
2785 		u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2);
2786 
2787 		/* For CRT on DAC2, don't turn it on if BIOS didn't
2788 		   enable it, even it's detected.
2789 		*/
2790 
2791 		/* force it to crtc0 */
2792 		dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
2793 		dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
2794 		disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
2795 
2796 		/* set up the TV DAC */
2797 		tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL |
2798 				 RADEON_TV_DAC_STD_MASK |
2799 				 RADEON_TV_DAC_RDACPD |
2800 				 RADEON_TV_DAC_GDACPD |
2801 				 RADEON_TV_DAC_BDACPD |
2802 				 RADEON_TV_DAC_BGADJ_MASK |
2803 				 RADEON_TV_DAC_DACADJ_MASK);
2804 		tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
2805 				RADEON_TV_DAC_NHOLD |
2806 				RADEON_TV_DAC_STD_PS2 |
2807 				(0x58 << 16));
2808 
2809 		WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
2810 		WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
2811 		WREG32(RADEON_DAC_CNTL2, dac2_cntl);
2812 	}
2813 
2814 	/* switch PM block to ACPI mode */
2815 	tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
2816 	tmp &= ~RADEON_PM_MODE_SEL;
2817 	WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
2818 
2819 }
2820 
2821 /*
2822  * VRAM info
2823  */
2824 static void r100_vram_get_type(struct radeon_device *rdev)
2825 {
2826 	uint32_t tmp;
2827 
2828 	rdev->mc.vram_is_ddr = false;
2829 	if (rdev->flags & RADEON_IS_IGP)
2830 		rdev->mc.vram_is_ddr = true;
2831 	else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
2832 		rdev->mc.vram_is_ddr = true;
2833 	if ((rdev->family == CHIP_RV100) ||
2834 	    (rdev->family == CHIP_RS100) ||
2835 	    (rdev->family == CHIP_RS200)) {
2836 		tmp = RREG32(RADEON_MEM_CNTL);
2837 		if (tmp & RV100_HALF_MODE) {
2838 			rdev->mc.vram_width = 32;
2839 		} else {
2840 			rdev->mc.vram_width = 64;
2841 		}
2842 		if (rdev->flags & RADEON_SINGLE_CRTC) {
2843 			rdev->mc.vram_width /= 4;
2844 			rdev->mc.vram_is_ddr = true;
2845 		}
2846 	} else if (rdev->family <= CHIP_RV280) {
2847 		tmp = RREG32(RADEON_MEM_CNTL);
2848 		if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
2849 			rdev->mc.vram_width = 128;
2850 		} else {
2851 			rdev->mc.vram_width = 64;
2852 		}
2853 	} else {
2854 		/* newer IGPs */
2855 		rdev->mc.vram_width = 128;
2856 	}
2857 }
2858 
2859 static u32 r100_get_accessible_vram(struct radeon_device *rdev)
2860 {
2861 	u32 aper_size;
2862 	u8 byte;
2863 
2864 	aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
2865 
2866 	/* Set HDP_APER_CNTL only on cards that are known not to be broken,
2867 	 * that is has the 2nd generation multifunction PCI interface
2868 	 */
2869 	if (rdev->family == CHIP_RV280 ||
2870 	    rdev->family >= CHIP_RV350) {
2871 		WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL,
2872 		       ~RADEON_HDP_APER_CNTL);
2873 		DRM_INFO("Generation 2 PCI interface, using max accessible memory\n");
2874 		return aper_size * 2;
2875 	}
2876 
2877 	/* Older cards have all sorts of funny issues to deal with. First
2878 	 * check if it's a multifunction card by reading the PCI config
2879 	 * header type... Limit those to one aperture size
2880 	 */
2881 	pci_read_config_byte(rdev->pdev, 0xe, &byte);
2882 	if (byte & 0x80) {
2883 		DRM_INFO("Generation 1 PCI interface in multifunction mode\n");
2884 		DRM_INFO("Limiting VRAM to one aperture\n");
2885 		return aper_size;
2886 	}
2887 
2888 	/* Single function older card. We read HDP_APER_CNTL to see how the BIOS
2889 	 * have set it up. We don't write this as it's broken on some ASICs but
2890 	 * we expect the BIOS to have done the right thing (might be too optimistic...)
2891 	 */
2892 	if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL)
2893 		return aper_size * 2;
2894 	return aper_size;
2895 }
2896 
2897 void r100_vram_init_sizes(struct radeon_device *rdev)
2898 {
2899 	u64 config_aper_size;
2900 
2901 	/* work out accessible VRAM */
2902 	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
2903 	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
2904 	rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev);
2905 	/* FIXME we don't use the second aperture yet when we could use it */
2906 	if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
2907 		rdev->mc.visible_vram_size = rdev->mc.aper_size;
2908 	config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
2909 	if (rdev->flags & RADEON_IS_IGP) {
2910 		uint32_t tom;
2911 		/* read NB_TOM to get the amount of ram stolen for the GPU */
2912 		tom = RREG32(RADEON_NB_TOM);
2913 		rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
2914 		WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
2915 		rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
2916 	} else {
2917 		rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
2918 		/* Some production boards of m6 will report 0
2919 		 * if it's 8 MB
2920 		 */
2921 		if (rdev->mc.real_vram_size == 0) {
2922 			rdev->mc.real_vram_size = 8192 * 1024;
2923 			WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
2924 		}
2925 		/* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
2926 		 * Novell bug 204882 + along with lots of ubuntu ones
2927 		 */
2928 		if (rdev->mc.aper_size > config_aper_size)
2929 			config_aper_size = rdev->mc.aper_size;
2930 
2931 		if (config_aper_size > rdev->mc.real_vram_size)
2932 			rdev->mc.mc_vram_size = config_aper_size;
2933 		else
2934 			rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
2935 	}
2936 }
2937 
2938 void r100_vga_set_state(struct radeon_device *rdev, bool state)
2939 {
2940 	uint32_t temp;
2941 
2942 	temp = RREG32(RADEON_CONFIG_CNTL);
2943 	if (state == false) {
2944 		temp &= ~RADEON_CFG_VGA_RAM_EN;
2945 		temp |= RADEON_CFG_VGA_IO_DIS;
2946 	} else {
2947 		temp &= ~RADEON_CFG_VGA_IO_DIS;
2948 	}
2949 	WREG32(RADEON_CONFIG_CNTL, temp);
2950 }
2951 
2952 void r100_mc_init(struct radeon_device *rdev)
2953 {
2954 	u64 base;
2955 
2956 	r100_vram_get_type(rdev);
2957 	r100_vram_init_sizes(rdev);
2958 	base = rdev->mc.aper_base;
2959 	if (rdev->flags & RADEON_IS_IGP)
2960 		base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
2961 	radeon_vram_location(rdev, &rdev->mc, base);
2962 	rdev->mc.gtt_base_align = 0;
2963 	if (!(rdev->flags & RADEON_IS_AGP))
2964 		radeon_gtt_location(rdev, &rdev->mc);
2965 	radeon_update_bandwidth_info(rdev);
2966 }
2967 
2968 
2969 /*
2970  * Indirect registers accessor
2971  */
2972 void r100_pll_errata_after_index(struct radeon_device *rdev)
2973 {
2974 	if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) {
2975 		(void)RREG32(RADEON_CLOCK_CNTL_DATA);
2976 		(void)RREG32(RADEON_CRTC_GEN_CNTL);
2977 	}
2978 }
2979 
2980 static void r100_pll_errata_after_data(struct radeon_device *rdev)
2981 {
2982 	/* This workarounds is necessary on RV100, RS100 and RS200 chips
2983 	 * or the chip could hang on a subsequent access
2984 	 */
2985 	if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
2986 		mdelay(5);
2987 	}
2988 
2989 	/* This function is required to workaround a hardware bug in some (all?)
2990 	 * revisions of the R300.  This workaround should be called after every
2991 	 * CLOCK_CNTL_INDEX register access.  If not, register reads afterward
2992 	 * may not be correct.
2993 	 */
2994 	if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
2995 		uint32_t save, tmp;
2996 
2997 		save = RREG32(RADEON_CLOCK_CNTL_INDEX);
2998 		tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
2999 		WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
3000 		tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
3001 		WREG32(RADEON_CLOCK_CNTL_INDEX, save);
3002 	}
3003 }
3004 
3005 uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
3006 {
3007 	uint32_t data;
3008 
3009 	WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
3010 	r100_pll_errata_after_index(rdev);
3011 	data = RREG32(RADEON_CLOCK_CNTL_DATA);
3012 	r100_pll_errata_after_data(rdev);
3013 	return data;
3014 }
3015 
3016 void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
3017 {
3018 	WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
3019 	r100_pll_errata_after_index(rdev);
3020 	WREG32(RADEON_CLOCK_CNTL_DATA, v);
3021 	r100_pll_errata_after_data(rdev);
3022 }
3023 
3024 void r100_set_safe_registers(struct radeon_device *rdev)
3025 {
3026 	if (ASIC_IS_RN50(rdev)) {
3027 		rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
3028 		rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm);
3029 	} else if (rdev->family < CHIP_R200) {
3030 		rdev->config.r100.reg_safe_bm = r100_reg_safe_bm;
3031 		rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm);
3032 	} else {
3033 		r200_set_safe_registers(rdev);
3034 	}
3035 }
3036 
3037 /*
3038  * Debugfs info
3039  */
3040 #if defined(CONFIG_DEBUG_FS)
3041 static int r100_debugfs_rbbm_info(struct seq_file *m, void *data)
3042 {
3043 	struct drm_info_node *node = (struct drm_info_node *) m->private;
3044 	struct drm_device *dev = node->minor->dev;
3045 	struct radeon_device *rdev = dev->dev_private;
3046 	uint32_t reg, value;
3047 	unsigned i;
3048 
3049 	seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS));
3050 	seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C));
3051 	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
3052 	for (i = 0; i < 64; i++) {
3053 		WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100);
3054 		reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2;
3055 		WREG32(RADEON_RBBM_CMDFIFO_ADDR, i);
3056 		value = RREG32(RADEON_RBBM_CMDFIFO_DATA);
3057 		seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value);
3058 	}
3059 	return 0;
3060 }
3061 
3062 static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
3063 {
3064 	struct drm_info_node *node = (struct drm_info_node *) m->private;
3065 	struct drm_device *dev = node->minor->dev;
3066 	struct radeon_device *rdev = dev->dev_private;
3067 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3068 	uint32_t rdp, wdp;
3069 	unsigned count, i, j;
3070 
3071 	radeon_ring_free_size(rdev, ring);
3072 	rdp = RREG32(RADEON_CP_RB_RPTR);
3073 	wdp = RREG32(RADEON_CP_RB_WPTR);
3074 	count = (rdp + ring->ring_size - wdp) & ring->ptr_mask;
3075 	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
3076 	seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
3077 	seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
3078 	seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
3079 	seq_printf(m, "%u dwords in ring\n", count);
3080 	for (j = 0; j <= count; j++) {
3081 		i = (rdp + j) & ring->ptr_mask;
3082 		seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
3083 	}
3084 	return 0;
3085 }
3086 
3087 
3088 static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data)
3089 {
3090 	struct drm_info_node *node = (struct drm_info_node *) m->private;
3091 	struct drm_device *dev = node->minor->dev;
3092 	struct radeon_device *rdev = dev->dev_private;
3093 	uint32_t csq_stat, csq2_stat, tmp;
3094 	unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr;
3095 	unsigned i;
3096 
3097 	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
3098 	seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE));
3099 	csq_stat = RREG32(RADEON_CP_CSQ_STAT);
3100 	csq2_stat = RREG32(RADEON_CP_CSQ2_STAT);
3101 	r_rptr = (csq_stat >> 0) & 0x3ff;
3102 	r_wptr = (csq_stat >> 10) & 0x3ff;
3103 	ib1_rptr = (csq_stat >> 20) & 0x3ff;
3104 	ib1_wptr = (csq2_stat >> 0) & 0x3ff;
3105 	ib2_rptr = (csq2_stat >> 10) & 0x3ff;
3106 	ib2_wptr = (csq2_stat >> 20) & 0x3ff;
3107 	seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat);
3108 	seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat);
3109 	seq_printf(m, "Ring rptr %u\n", r_rptr);
3110 	seq_printf(m, "Ring wptr %u\n", r_wptr);
3111 	seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr);
3112 	seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr);
3113 	seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr);
3114 	seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr);
3115 	/* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms
3116 	 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */
3117 	seq_printf(m, "Ring fifo:\n");
3118 	for (i = 0; i < 256; i++) {
3119 		WREG32(RADEON_CP_CSQ_ADDR, i << 2);
3120 		tmp = RREG32(RADEON_CP_CSQ_DATA);
3121 		seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp);
3122 	}
3123 	seq_printf(m, "Indirect1 fifo:\n");
3124 	for (i = 256; i <= 512; i++) {
3125 		WREG32(RADEON_CP_CSQ_ADDR, i << 2);
3126 		tmp = RREG32(RADEON_CP_CSQ_DATA);
3127 		seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp);
3128 	}
3129 	seq_printf(m, "Indirect2 fifo:\n");
3130 	for (i = 640; i < ib1_wptr; i++) {
3131 		WREG32(RADEON_CP_CSQ_ADDR, i << 2);
3132 		tmp = RREG32(RADEON_CP_CSQ_DATA);
3133 		seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp);
3134 	}
3135 	return 0;
3136 }
3137 
3138 static int r100_debugfs_mc_info(struct seq_file *m, void *data)
3139 {
3140 	struct drm_info_node *node = (struct drm_info_node *) m->private;
3141 	struct drm_device *dev = node->minor->dev;
3142 	struct radeon_device *rdev = dev->dev_private;
3143 	uint32_t tmp;
3144 
3145 	tmp = RREG32(RADEON_CONFIG_MEMSIZE);
3146 	seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp);
3147 	tmp = RREG32(RADEON_MC_FB_LOCATION);
3148 	seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp);
3149 	tmp = RREG32(RADEON_BUS_CNTL);
3150 	seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
3151 	tmp = RREG32(RADEON_MC_AGP_LOCATION);
3152 	seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
3153 	tmp = RREG32(RADEON_AGP_BASE);
3154 	seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
3155 	tmp = RREG32(RADEON_HOST_PATH_CNTL);
3156 	seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
3157 	tmp = RREG32(0x01D0);
3158 	seq_printf(m, "AIC_CTRL 0x%08x\n", tmp);
3159 	tmp = RREG32(RADEON_AIC_LO_ADDR);
3160 	seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp);
3161 	tmp = RREG32(RADEON_AIC_HI_ADDR);
3162 	seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp);
3163 	tmp = RREG32(0x01E4);
3164 	seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp);
3165 	return 0;
3166 }
3167 
3168 static struct drm_info_list r100_debugfs_rbbm_list[] = {
3169 	{"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL},
3170 };
3171 
3172 static struct drm_info_list r100_debugfs_cp_list[] = {
3173 	{"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL},
3174 	{"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL},
3175 };
3176 
3177 static struct drm_info_list r100_debugfs_mc_info_list[] = {
3178 	{"r100_mc_info", r100_debugfs_mc_info, 0, NULL},
3179 };
3180 #endif
3181 
3182 int r100_debugfs_rbbm_init(struct radeon_device *rdev)
3183 {
3184 #if defined(CONFIG_DEBUG_FS)
3185 	return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1);
3186 #else
3187 	return 0;
3188 #endif
3189 }
3190 
3191 int r100_debugfs_cp_init(struct radeon_device *rdev)
3192 {
3193 #if defined(CONFIG_DEBUG_FS)
3194 	return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2);
3195 #else
3196 	return 0;
3197 #endif
3198 }
3199 
3200 int r100_debugfs_mc_info_init(struct radeon_device *rdev)
3201 {
3202 #if defined(CONFIG_DEBUG_FS)
3203 	return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1);
3204 #else
3205 	return 0;
3206 #endif
3207 }
3208 
3209 int r100_set_surface_reg(struct radeon_device *rdev, int reg,
3210 			 uint32_t tiling_flags, uint32_t pitch,
3211 			 uint32_t offset, uint32_t obj_size)
3212 {
3213 	int surf_index = reg * 16;
3214 	int flags = 0;
3215 
3216 	if (rdev->family <= CHIP_RS200) {
3217 		if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
3218 				 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
3219 			flags |= RADEON_SURF_TILE_COLOR_BOTH;
3220 		if (tiling_flags & RADEON_TILING_MACRO)
3221 			flags |= RADEON_SURF_TILE_COLOR_MACRO;
3222 	} else if (rdev->family <= CHIP_RV280) {
3223 		if (tiling_flags & (RADEON_TILING_MACRO))
3224 			flags |= R200_SURF_TILE_COLOR_MACRO;
3225 		if (tiling_flags & RADEON_TILING_MICRO)
3226 			flags |= R200_SURF_TILE_COLOR_MICRO;
3227 	} else {
3228 		if (tiling_flags & RADEON_TILING_MACRO)
3229 			flags |= R300_SURF_TILE_MACRO;
3230 		if (tiling_flags & RADEON_TILING_MICRO)
3231 			flags |= R300_SURF_TILE_MICRO;
3232 	}
3233 
3234 	if (tiling_flags & RADEON_TILING_SWAP_16BIT)
3235 		flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP;
3236 	if (tiling_flags & RADEON_TILING_SWAP_32BIT)
3237 		flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP;
3238 
3239 	/* when we aren't tiling the pitch seems to needs to be furtherdivided down. - tested on power5 + rn50 server */
3240 	if (tiling_flags & (RADEON_TILING_SWAP_16BIT | RADEON_TILING_SWAP_32BIT)) {
3241 		if (!(tiling_flags & (RADEON_TILING_MACRO | RADEON_TILING_MICRO)))
3242 			if (ASIC_IS_RN50(rdev))
3243 				pitch /= 16;
3244 	}
3245 
3246 	/* r100/r200 divide by 16 */
3247 	if (rdev->family < CHIP_R300)
3248 		flags |= pitch / 16;
3249 	else
3250 		flags |= pitch / 8;
3251 
3252 
3253 	DRM_DEBUG_KMS("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
3254 	WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
3255 	WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
3256 	WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1);
3257 	return 0;
3258 }
3259 
3260 void r100_clear_surface_reg(struct radeon_device *rdev, int reg)
3261 {
3262 	int surf_index = reg * 16;
3263 	WREG32(RADEON_SURFACE0_INFO + surf_index, 0);
3264 }
3265 
3266 void r100_bandwidth_update(struct radeon_device *rdev)
3267 {
3268 	fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
3269 	fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
3270 	fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
3271 	uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
3272 	fixed20_12 memtcas_ff[8] = {
3273 		dfixed_init(1),
3274 		dfixed_init(2),
3275 		dfixed_init(3),
3276 		dfixed_init(0),
3277 		dfixed_init_half(1),
3278 		dfixed_init_half(2),
3279 		dfixed_init(0),
3280 	};
3281 	fixed20_12 memtcas_rs480_ff[8] = {
3282 		dfixed_init(0),
3283 		dfixed_init(1),
3284 		dfixed_init(2),
3285 		dfixed_init(3),
3286 		dfixed_init(0),
3287 		dfixed_init_half(1),
3288 		dfixed_init_half(2),
3289 		dfixed_init_half(3),
3290 	};
3291 	fixed20_12 memtcas2_ff[8] = {
3292 		dfixed_init(0),
3293 		dfixed_init(1),
3294 		dfixed_init(2),
3295 		dfixed_init(3),
3296 		dfixed_init(4),
3297 		dfixed_init(5),
3298 		dfixed_init(6),
3299 		dfixed_init(7),
3300 	};
3301 	fixed20_12 memtrbs[8] = {
3302 		dfixed_init(1),
3303 		dfixed_init_half(1),
3304 		dfixed_init(2),
3305 		dfixed_init_half(2),
3306 		dfixed_init(3),
3307 		dfixed_init_half(3),
3308 		dfixed_init(4),
3309 		dfixed_init_half(4)
3310 	};
3311 	fixed20_12 memtrbs_r4xx[8] = {
3312 		dfixed_init(4),
3313 		dfixed_init(5),
3314 		dfixed_init(6),
3315 		dfixed_init(7),
3316 		dfixed_init(8),
3317 		dfixed_init(9),
3318 		dfixed_init(10),
3319 		dfixed_init(11)
3320 	};
3321 	fixed20_12 min_mem_eff;
3322 	fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
3323 	fixed20_12 cur_latency_mclk, cur_latency_sclk;
3324 	fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate,
3325 		disp_drain_rate2, read_return_rate;
3326 	fixed20_12 time_disp1_drop_priority;
3327 	int c;
3328 	int cur_size = 16;       /* in octawords */
3329 	int critical_point = 0, critical_point2;
3330 /* 	uint32_t read_return_rate, time_disp1_drop_priority; */
3331 	int stop_req, max_stop_req;
3332 	struct drm_display_mode *mode1 = NULL;
3333 	struct drm_display_mode *mode2 = NULL;
3334 	uint32_t pixel_bytes1 = 0;
3335 	uint32_t pixel_bytes2 = 0;
3336 
3337 	radeon_update_display_priority(rdev);
3338 
3339 	if (rdev->mode_info.crtcs[0]->base.enabled) {
3340 		mode1 = &rdev->mode_info.crtcs[0]->base.mode;
3341 		pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
3342 	}
3343 	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3344 		if (rdev->mode_info.crtcs[1]->base.enabled) {
3345 			mode2 = &rdev->mode_info.crtcs[1]->base.mode;
3346 			pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8;
3347 		}
3348 	}
3349 
3350 	min_mem_eff.full = dfixed_const_8(0);
3351 	/* get modes */
3352 	if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
3353 		uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
3354 		mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
3355 		mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
3356 		/* check crtc enables */
3357 		if (mode2)
3358 			mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
3359 		if (mode1)
3360 			mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
3361 		WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
3362 	}
3363 
3364 	/*
3365 	 * determine is there is enough bw for current mode
3366 	 */
3367 	sclk_ff = rdev->pm.sclk;
3368 	mclk_ff = rdev->pm.mclk;
3369 
3370 	temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
3371 	temp_ff.full = dfixed_const(temp);
3372 	mem_bw.full = dfixed_mul(mclk_ff, temp_ff);
3373 
3374 	pix_clk.full = 0;
3375 	pix_clk2.full = 0;
3376 	peak_disp_bw.full = 0;
3377 	if (mode1) {
3378 		temp_ff.full = dfixed_const(1000);
3379 		pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */
3380 		pix_clk.full = dfixed_div(pix_clk, temp_ff);
3381 		temp_ff.full = dfixed_const(pixel_bytes1);
3382 		peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff);
3383 	}
3384 	if (mode2) {
3385 		temp_ff.full = dfixed_const(1000);
3386 		pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */
3387 		pix_clk2.full = dfixed_div(pix_clk2, temp_ff);
3388 		temp_ff.full = dfixed_const(pixel_bytes2);
3389 		peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff);
3390 	}
3391 
3392 	mem_bw.full = dfixed_mul(mem_bw, min_mem_eff);
3393 	if (peak_disp_bw.full >= mem_bw.full) {
3394 		DRM_ERROR("You may not have enough display bandwidth for current mode\n"
3395 			  "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
3396 	}
3397 
3398 	/*  Get values from the EXT_MEM_CNTL register...converting its contents. */
3399 	temp = RREG32(RADEON_MEM_TIMING_CNTL);
3400 	if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
3401 		mem_trcd = ((temp >> 2) & 0x3) + 1;
3402 		mem_trp  = ((temp & 0x3)) + 1;
3403 		mem_tras = ((temp & 0x70) >> 4) + 1;
3404 	} else if (rdev->family == CHIP_R300 ||
3405 		   rdev->family == CHIP_R350) { /* r300, r350 */
3406 		mem_trcd = (temp & 0x7) + 1;
3407 		mem_trp = ((temp >> 8) & 0x7) + 1;
3408 		mem_tras = ((temp >> 11) & 0xf) + 4;
3409 	} else if (rdev->family == CHIP_RV350 ||
3410 		   rdev->family <= CHIP_RV380) {
3411 		/* rv3x0 */
3412 		mem_trcd = (temp & 0x7) + 3;
3413 		mem_trp = ((temp >> 8) & 0x7) + 3;
3414 		mem_tras = ((temp >> 11) & 0xf) + 6;
3415 	} else if (rdev->family == CHIP_R420 ||
3416 		   rdev->family == CHIP_R423 ||
3417 		   rdev->family == CHIP_RV410) {
3418 		/* r4xx */
3419 		mem_trcd = (temp & 0xf) + 3;
3420 		if (mem_trcd > 15)
3421 			mem_trcd = 15;
3422 		mem_trp = ((temp >> 8) & 0xf) + 3;
3423 		if (mem_trp > 15)
3424 			mem_trp = 15;
3425 		mem_tras = ((temp >> 12) & 0x1f) + 6;
3426 		if (mem_tras > 31)
3427 			mem_tras = 31;
3428 	} else { /* RV200, R200 */
3429 		mem_trcd = (temp & 0x7) + 1;
3430 		mem_trp = ((temp >> 8) & 0x7) + 1;
3431 		mem_tras = ((temp >> 12) & 0xf) + 4;
3432 	}
3433 	/* convert to FF */
3434 	trcd_ff.full = dfixed_const(mem_trcd);
3435 	trp_ff.full = dfixed_const(mem_trp);
3436 	tras_ff.full = dfixed_const(mem_tras);
3437 
3438 	/* Get values from the MEM_SDRAM_MODE_REG register...converting its */
3439 	temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
3440 	data = (temp & (7 << 20)) >> 20;
3441 	if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
3442 		if (rdev->family == CHIP_RS480) /* don't think rs400 */
3443 			tcas_ff = memtcas_rs480_ff[data];
3444 		else
3445 			tcas_ff = memtcas_ff[data];
3446 	} else
3447 		tcas_ff = memtcas2_ff[data];
3448 
3449 	if (rdev->family == CHIP_RS400 ||
3450 	    rdev->family == CHIP_RS480) {
3451 		/* extra cas latency stored in bits 23-25 0-4 clocks */
3452 		data = (temp >> 23) & 0x7;
3453 		if (data < 5)
3454 			tcas_ff.full += dfixed_const(data);
3455 	}
3456 
3457 	if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
3458 		/* on the R300, Tcas is included in Trbs.
3459 		 */
3460 		temp = RREG32(RADEON_MEM_CNTL);
3461 		data = (R300_MEM_NUM_CHANNELS_MASK & temp);
3462 		if (data == 1) {
3463 			if (R300_MEM_USE_CD_CH_ONLY & temp) {
3464 				temp = RREG32(R300_MC_IND_INDEX);
3465 				temp &= ~R300_MC_IND_ADDR_MASK;
3466 				temp |= R300_MC_READ_CNTL_CD_mcind;
3467 				WREG32(R300_MC_IND_INDEX, temp);
3468 				temp = RREG32(R300_MC_IND_DATA);
3469 				data = (R300_MEM_RBS_POSITION_C_MASK & temp);
3470 			} else {
3471 				temp = RREG32(R300_MC_READ_CNTL_AB);
3472 				data = (R300_MEM_RBS_POSITION_A_MASK & temp);
3473 			}
3474 		} else {
3475 			temp = RREG32(R300_MC_READ_CNTL_AB);
3476 			data = (R300_MEM_RBS_POSITION_A_MASK & temp);
3477 		}
3478 		if (rdev->family == CHIP_RV410 ||
3479 		    rdev->family == CHIP_R420 ||
3480 		    rdev->family == CHIP_R423)
3481 			trbs_ff = memtrbs_r4xx[data];
3482 		else
3483 			trbs_ff = memtrbs[data];
3484 		tcas_ff.full += trbs_ff.full;
3485 	}
3486 
3487 	sclk_eff_ff.full = sclk_ff.full;
3488 
3489 	if (rdev->flags & RADEON_IS_AGP) {
3490 		fixed20_12 agpmode_ff;
3491 		agpmode_ff.full = dfixed_const(radeon_agpmode);
3492 		temp_ff.full = dfixed_const_666(16);
3493 		sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff);
3494 	}
3495 	/* TODO PCIE lanes may affect this - agpmode == 16?? */
3496 
3497 	if (ASIC_IS_R300(rdev)) {
3498 		sclk_delay_ff.full = dfixed_const(250);
3499 	} else {
3500 		if ((rdev->family == CHIP_RV100) ||
3501 		    rdev->flags & RADEON_IS_IGP) {
3502 			if (rdev->mc.vram_is_ddr)
3503 				sclk_delay_ff.full = dfixed_const(41);
3504 			else
3505 				sclk_delay_ff.full = dfixed_const(33);
3506 		} else {
3507 			if (rdev->mc.vram_width == 128)
3508 				sclk_delay_ff.full = dfixed_const(57);
3509 			else
3510 				sclk_delay_ff.full = dfixed_const(41);
3511 		}
3512 	}
3513 
3514 	mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff);
3515 
3516 	if (rdev->mc.vram_is_ddr) {
3517 		if (rdev->mc.vram_width == 32) {
3518 			k1.full = dfixed_const(40);
3519 			c  = 3;
3520 		} else {
3521 			k1.full = dfixed_const(20);
3522 			c  = 1;
3523 		}
3524 	} else {
3525 		k1.full = dfixed_const(40);
3526 		c  = 3;
3527 	}
3528 
3529 	temp_ff.full = dfixed_const(2);
3530 	mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff);
3531 	temp_ff.full = dfixed_const(c);
3532 	mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff);
3533 	temp_ff.full = dfixed_const(4);
3534 	mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff);
3535 	mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff);
3536 	mc_latency_mclk.full += k1.full;
3537 
3538 	mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff);
3539 	mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff);
3540 
3541 	/*
3542 	  HW cursor time assuming worst case of full size colour cursor.
3543 	*/
3544 	temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
3545 	temp_ff.full += trcd_ff.full;
3546 	if (temp_ff.full < tras_ff.full)
3547 		temp_ff.full = tras_ff.full;
3548 	cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff);
3549 
3550 	temp_ff.full = dfixed_const(cur_size);
3551 	cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff);
3552 	/*
3553 	  Find the total latency for the display data.
3554 	*/
3555 	disp_latency_overhead.full = dfixed_const(8);
3556 	disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff);
3557 	mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
3558 	mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
3559 
3560 	if (mc_latency_mclk.full > mc_latency_sclk.full)
3561 		disp_latency.full = mc_latency_mclk.full;
3562 	else
3563 		disp_latency.full = mc_latency_sclk.full;
3564 
3565 	/* setup Max GRPH_STOP_REQ default value */
3566 	if (ASIC_IS_RV100(rdev))
3567 		max_stop_req = 0x5c;
3568 	else
3569 		max_stop_req = 0x7c;
3570 
3571 	if (mode1) {
3572 		/*  CRTC1
3573 		    Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
3574 		    GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
3575 		*/
3576 		stop_req = mode1->hdisplay * pixel_bytes1 / 16;
3577 
3578 		if (stop_req > max_stop_req)
3579 			stop_req = max_stop_req;
3580 
3581 		/*
3582 		  Find the drain rate of the display buffer.
3583 		*/
3584 		temp_ff.full = dfixed_const((16/pixel_bytes1));
3585 		disp_drain_rate.full = dfixed_div(pix_clk, temp_ff);
3586 
3587 		/*
3588 		  Find the critical point of the display buffer.
3589 		*/
3590 		crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency);
3591 		crit_point_ff.full += dfixed_const_half(0);
3592 
3593 		critical_point = dfixed_trunc(crit_point_ff);
3594 
3595 		if (rdev->disp_priority == 2) {
3596 			critical_point = 0;
3597 		}
3598 
3599 		/*
3600 		  The critical point should never be above max_stop_req-4.  Setting
3601 		  GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
3602 		*/
3603 		if (max_stop_req - critical_point < 4)
3604 			critical_point = 0;
3605 
3606 		if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
3607 			/* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
3608 			critical_point = 0x10;
3609 		}
3610 
3611 		temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
3612 		temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
3613 		temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
3614 		temp &= ~(RADEON_GRPH_START_REQ_MASK);
3615 		if ((rdev->family == CHIP_R350) &&
3616 		    (stop_req > 0x15)) {
3617 			stop_req -= 0x10;
3618 		}
3619 		temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
3620 		temp |= RADEON_GRPH_BUFFER_SIZE;
3621 		temp &= ~(RADEON_GRPH_CRITICAL_CNTL   |
3622 			  RADEON_GRPH_CRITICAL_AT_SOF |
3623 			  RADEON_GRPH_STOP_CNTL);
3624 		/*
3625 		  Write the result into the register.
3626 		*/
3627 		WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
3628 						       (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
3629 
3630 #if 0
3631 		if ((rdev->family == CHIP_RS400) ||
3632 		    (rdev->family == CHIP_RS480)) {
3633 			/* attempt to program RS400 disp regs correctly ??? */
3634 			temp = RREG32(RS400_DISP1_REG_CNTL);
3635 			temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
3636 				  RS400_DISP1_STOP_REQ_LEVEL_MASK);
3637 			WREG32(RS400_DISP1_REQ_CNTL1, (temp |
3638 						       (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
3639 						       (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
3640 			temp = RREG32(RS400_DMIF_MEM_CNTL1);
3641 			temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
3642 				  RS400_DISP1_CRITICAL_POINT_STOP_MASK);
3643 			WREG32(RS400_DMIF_MEM_CNTL1, (temp |
3644 						      (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
3645 						      (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
3646 		}
3647 #endif
3648 
3649 		DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n",
3650 			  /* 	  (unsigned int)info->SavedReg->grph_buffer_cntl, */
3651 			  (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
3652 	}
3653 
3654 	if (mode2) {
3655 		u32 grph2_cntl;
3656 		stop_req = mode2->hdisplay * pixel_bytes2 / 16;
3657 
3658 		if (stop_req > max_stop_req)
3659 			stop_req = max_stop_req;
3660 
3661 		/*
3662 		  Find the drain rate of the display buffer.
3663 		*/
3664 		temp_ff.full = dfixed_const((16/pixel_bytes2));
3665 		disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff);
3666 
3667 		grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
3668 		grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
3669 		grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
3670 		grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
3671 		if ((rdev->family == CHIP_R350) &&
3672 		    (stop_req > 0x15)) {
3673 			stop_req -= 0x10;
3674 		}
3675 		grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
3676 		grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
3677 		grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL   |
3678 			  RADEON_GRPH_CRITICAL_AT_SOF |
3679 			  RADEON_GRPH_STOP_CNTL);
3680 
3681 		if ((rdev->family == CHIP_RS100) ||
3682 		    (rdev->family == CHIP_RS200))
3683 			critical_point2 = 0;
3684 		else {
3685 			temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
3686 			temp_ff.full = dfixed_const(temp);
3687 			temp_ff.full = dfixed_mul(mclk_ff, temp_ff);
3688 			if (sclk_ff.full < temp_ff.full)
3689 				temp_ff.full = sclk_ff.full;
3690 
3691 			read_return_rate.full = temp_ff.full;
3692 
3693 			if (mode1) {
3694 				temp_ff.full = read_return_rate.full - disp_drain_rate.full;
3695 				time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff);
3696 			} else {
3697 				time_disp1_drop_priority.full = 0;
3698 			}
3699 			crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
3700 			crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2);
3701 			crit_point_ff.full += dfixed_const_half(0);
3702 
3703 			critical_point2 = dfixed_trunc(crit_point_ff);
3704 
3705 			if (rdev->disp_priority == 2) {
3706 				critical_point2 = 0;
3707 			}
3708 
3709 			if (max_stop_req - critical_point2 < 4)
3710 				critical_point2 = 0;
3711 
3712 		}
3713 
3714 		if (critical_point2 == 0 && rdev->family == CHIP_R300) {
3715 			/* some R300 cards have problem with this set to 0 */
3716 			critical_point2 = 0x10;
3717 		}
3718 
3719 		WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
3720 						  (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
3721 
3722 		if ((rdev->family == CHIP_RS400) ||
3723 		    (rdev->family == CHIP_RS480)) {
3724 #if 0
3725 			/* attempt to program RS400 disp2 regs correctly ??? */
3726 			temp = RREG32(RS400_DISP2_REQ_CNTL1);
3727 			temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
3728 				  RS400_DISP2_STOP_REQ_LEVEL_MASK);
3729 			WREG32(RS400_DISP2_REQ_CNTL1, (temp |
3730 						       (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
3731 						       (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
3732 			temp = RREG32(RS400_DISP2_REQ_CNTL2);
3733 			temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
3734 				  RS400_DISP2_CRITICAL_POINT_STOP_MASK);
3735 			WREG32(RS400_DISP2_REQ_CNTL2, (temp |
3736 						       (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
3737 						       (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
3738 #endif
3739 			WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
3740 			WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
3741 			WREG32(RS400_DMIF_MEM_CNTL1,  0x29CA71DC);
3742 			WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
3743 		}
3744 
3745 		DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n",
3746 			  (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
3747 	}
3748 }
3749 
3750 int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
3751 {
3752 	uint32_t scratch;
3753 	uint32_t tmp = 0;
3754 	unsigned i;
3755 	int r;
3756 
3757 	r = radeon_scratch_get(rdev, &scratch);
3758 	if (r) {
3759 		DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
3760 		return r;
3761 	}
3762 	WREG32(scratch, 0xCAFEDEAD);
3763 	r = radeon_ring_lock(rdev, ring, 2);
3764 	if (r) {
3765 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3766 		radeon_scratch_free(rdev, scratch);
3767 		return r;
3768 	}
3769 	radeon_ring_write(ring, PACKET0(scratch, 0));
3770 	radeon_ring_write(ring, 0xDEADBEEF);
3771 	radeon_ring_unlock_commit(rdev, ring);
3772 	for (i = 0; i < rdev->usec_timeout; i++) {
3773 		tmp = RREG32(scratch);
3774 		if (tmp == 0xDEADBEEF) {
3775 			break;
3776 		}
3777 		DRM_UDELAY(1);
3778 	}
3779 	if (i < rdev->usec_timeout) {
3780 		DRM_INFO("ring test succeeded in %d usecs\n", i);
3781 	} else {
3782 		DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
3783 			  scratch, tmp);
3784 		r = -EINVAL;
3785 	}
3786 	radeon_scratch_free(rdev, scratch);
3787 	return r;
3788 }
3789 
3790 void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3791 {
3792 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3793 
3794 	if (ring->rptr_save_reg) {
3795 		u32 next_rptr = ring->wptr + 2 + 3;
3796 		radeon_ring_write(ring, PACKET0(ring->rptr_save_reg, 0));
3797 		radeon_ring_write(ring, next_rptr);
3798 	}
3799 
3800 	radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1));
3801 	radeon_ring_write(ring, ib->gpu_addr);
3802 	radeon_ring_write(ring, ib->length_dw);
3803 }
3804 
3805 int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3806 {
3807 	struct radeon_ib ib;
3808 	uint32_t scratch;
3809 	uint32_t tmp = 0;
3810 	unsigned i;
3811 	int r;
3812 
3813 	r = radeon_scratch_get(rdev, &scratch);
3814 	if (r) {
3815 		DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3816 		return r;
3817 	}
3818 	WREG32(scratch, 0xCAFEDEAD);
3819 	r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, 256);
3820 	if (r) {
3821 		return r;
3822 	}
3823 	ib.ptr[0] = PACKET0(scratch, 0);
3824 	ib.ptr[1] = 0xDEADBEEF;
3825 	ib.ptr[2] = PACKET2(0);
3826 	ib.ptr[3] = PACKET2(0);
3827 	ib.ptr[4] = PACKET2(0);
3828 	ib.ptr[5] = PACKET2(0);
3829 	ib.ptr[6] = PACKET2(0);
3830 	ib.ptr[7] = PACKET2(0);
3831 	ib.length_dw = 8;
3832 	r = radeon_ib_schedule(rdev, &ib, NULL);
3833 	if (r) {
3834 		radeon_scratch_free(rdev, scratch);
3835 		radeon_ib_free(rdev, &ib);
3836 		return r;
3837 	}
3838 	r = radeon_fence_wait(ib.fence, false);
3839 	if (r) {
3840 		return r;
3841 	}
3842 	for (i = 0; i < rdev->usec_timeout; i++) {
3843 		tmp = RREG32(scratch);
3844 		if (tmp == 0xDEADBEEF) {
3845 			break;
3846 		}
3847 		DRM_UDELAY(1);
3848 	}
3849 	if (i < rdev->usec_timeout) {
3850 		DRM_INFO("ib test succeeded in %u usecs\n", i);
3851 	} else {
3852 		DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
3853 			  scratch, tmp);
3854 		r = -EINVAL;
3855 	}
3856 	radeon_scratch_free(rdev, scratch);
3857 	radeon_ib_free(rdev, &ib);
3858 	return r;
3859 }
3860 
3861 void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
3862 {
3863 	/* Shutdown CP we shouldn't need to do that but better be safe than
3864 	 * sorry
3865 	 */
3866 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
3867 	WREG32(R_000740_CP_CSQ_CNTL, 0);
3868 
3869 	/* Save few CRTC registers */
3870 	save->GENMO_WT = RREG8(R_0003C2_GENMO_WT);
3871 	save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL);
3872 	save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL);
3873 	save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET);
3874 	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3875 		save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL);
3876 		save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET);
3877 	}
3878 
3879 	/* Disable VGA aperture access */
3880 	WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT);
3881 	/* Disable cursor, overlay, crtc */
3882 	WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1));
3883 	WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL |
3884 					S_000054_CRTC_DISPLAY_DIS(1));
3885 	WREG32(R_000050_CRTC_GEN_CNTL,
3886 			(C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) |
3887 			S_000050_CRTC_DISP_REQ_EN_B(1));
3888 	WREG32(R_000420_OV0_SCALE_CNTL,
3889 		C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL));
3890 	WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET);
3891 	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3892 		WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET |
3893 						S_000360_CUR2_LOCK(1));
3894 		WREG32(R_0003F8_CRTC2_GEN_CNTL,
3895 			(C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) |
3896 			S_0003F8_CRTC2_DISPLAY_DIS(1) |
3897 			S_0003F8_CRTC2_DISP_REQ_EN_B(1));
3898 		WREG32(R_000360_CUR2_OFFSET,
3899 			C_000360_CUR2_LOCK & save->CUR2_OFFSET);
3900 	}
3901 }
3902 
3903 void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
3904 {
3905 	/* Update base address for crtc */
3906 	WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
3907 	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3908 		WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
3909 	}
3910 	/* Restore CRTC registers */
3911 	WREG8(R_0003C2_GENMO_WT, save->GENMO_WT);
3912 	WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL);
3913 	WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL);
3914 	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3915 		WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL);
3916 	}
3917 }
3918 
3919 void r100_vga_render_disable(struct radeon_device *rdev)
3920 {
3921 	u32 tmp;
3922 
3923 	tmp = RREG8(R_0003C2_GENMO_WT);
3924 	WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp);
3925 }
3926 
3927 static void r100_debugfs(struct radeon_device *rdev)
3928 {
3929 	int r;
3930 
3931 	r = r100_debugfs_mc_info_init(rdev);
3932 	if (r)
3933 		dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n");
3934 }
3935 
3936 static void r100_mc_program(struct radeon_device *rdev)
3937 {
3938 	struct r100_mc_save save;
3939 
3940 	/* Stops all mc clients */
3941 	r100_mc_stop(rdev, &save);
3942 	if (rdev->flags & RADEON_IS_AGP) {
3943 		WREG32(R_00014C_MC_AGP_LOCATION,
3944 			S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
3945 			S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
3946 		WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
3947 		if (rdev->family > CHIP_RV200)
3948 			WREG32(R_00015C_AGP_BASE_2,
3949 				upper_32_bits(rdev->mc.agp_base) & 0xff);
3950 	} else {
3951 		WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
3952 		WREG32(R_000170_AGP_BASE, 0);
3953 		if (rdev->family > CHIP_RV200)
3954 			WREG32(R_00015C_AGP_BASE_2, 0);
3955 	}
3956 	/* Wait for mc idle */
3957 	if (r100_mc_wait_for_idle(rdev))
3958 		dev_warn(rdev->dev, "Wait for MC idle timeout.\n");
3959 	/* Program MC, should be a 32bits limited address space */
3960 	WREG32(R_000148_MC_FB_LOCATION,
3961 		S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
3962 		S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
3963 	r100_mc_resume(rdev, &save);
3964 }
3965 
3966 void r100_clock_startup(struct radeon_device *rdev)
3967 {
3968 	u32 tmp;
3969 
3970 	if (radeon_dynclks != -1 && radeon_dynclks)
3971 		radeon_legacy_set_clock_gating(rdev, 1);
3972 	/* We need to force on some of the block */
3973 	tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
3974 	tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
3975 	if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280))
3976 		tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1);
3977 	WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
3978 }
3979 
3980 static int r100_startup(struct radeon_device *rdev)
3981 {
3982 	int r;
3983 
3984 	/* set common regs */
3985 	r100_set_common_regs(rdev);
3986 	/* program mc */
3987 	r100_mc_program(rdev);
3988 	/* Resume clock */
3989 	r100_clock_startup(rdev);
3990 	/* Initialize GART (initialize after TTM so we can allocate
3991 	 * memory through TTM but finalize after TTM) */
3992 	r100_enable_bm(rdev);
3993 	if (rdev->flags & RADEON_IS_PCI) {
3994 		r = r100_pci_gart_enable(rdev);
3995 		if (r)
3996 			return r;
3997 	}
3998 
3999 	/* allocate wb buffer */
4000 	r = radeon_wb_init(rdev);
4001 	if (r)
4002 		return r;
4003 
4004 	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
4005 	if (r) {
4006 		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
4007 		return r;
4008 	}
4009 
4010 	/* Enable IRQ */
4011 	r100_irq_set(rdev);
4012 	rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
4013 	/* 1M ring buffer */
4014 	r = r100_cp_init(rdev, 1024 * 1024);
4015 	if (r) {
4016 		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
4017 		return r;
4018 	}
4019 
4020 	r = radeon_ib_pool_init(rdev);
4021 	if (r) {
4022 		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
4023 		return r;
4024 	}
4025 
4026 	return 0;
4027 }
4028 
4029 int r100_resume(struct radeon_device *rdev)
4030 {
4031 	int r;
4032 
4033 	/* Make sur GART are not working */
4034 	if (rdev->flags & RADEON_IS_PCI)
4035 		r100_pci_gart_disable(rdev);
4036 	/* Resume clock before doing reset */
4037 	r100_clock_startup(rdev);
4038 	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
4039 	if (radeon_asic_reset(rdev)) {
4040 		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
4041 			RREG32(R_000E40_RBBM_STATUS),
4042 			RREG32(R_0007C0_CP_STAT));
4043 	}
4044 	/* post */
4045 	radeon_combios_asic_init(rdev->ddev);
4046 	/* Resume clock after posting */
4047 	r100_clock_startup(rdev);
4048 	/* Initialize surface registers */
4049 	radeon_surface_init(rdev);
4050 
4051 	rdev->accel_working = true;
4052 	r = r100_startup(rdev);
4053 	if (r) {
4054 		rdev->accel_working = false;
4055 	}
4056 	return r;
4057 }
4058 
4059 int r100_suspend(struct radeon_device *rdev)
4060 {
4061 	r100_cp_disable(rdev);
4062 	radeon_wb_disable(rdev);
4063 	r100_irq_disable(rdev);
4064 	if (rdev->flags & RADEON_IS_PCI)
4065 		r100_pci_gart_disable(rdev);
4066 	return 0;
4067 }
4068 
4069 void r100_fini(struct radeon_device *rdev)
4070 {
4071 	r100_cp_fini(rdev);
4072 	radeon_wb_fini(rdev);
4073 	radeon_ib_pool_fini(rdev);
4074 	radeon_gem_fini(rdev);
4075 	if (rdev->flags & RADEON_IS_PCI)
4076 		r100_pci_gart_fini(rdev);
4077 	radeon_agp_fini(rdev);
4078 	radeon_irq_kms_fini(rdev);
4079 	radeon_fence_driver_fini(rdev);
4080 	radeon_bo_fini(rdev);
4081 	radeon_atombios_fini(rdev);
4082 	kfree(rdev->bios);
4083 	rdev->bios = NULL;
4084 }
4085 
4086 /*
4087  * Due to how kexec works, it can leave the hw fully initialised when it
4088  * boots the new kernel. However doing our init sequence with the CP and
4089  * WB stuff setup causes GPU hangs on the RN50 at least. So at startup
4090  * do some quick sanity checks and restore sane values to avoid this
4091  * problem.
4092  */
4093 void r100_restore_sanity(struct radeon_device *rdev)
4094 {
4095 	u32 tmp;
4096 
4097 	tmp = RREG32(RADEON_CP_CSQ_CNTL);
4098 	if (tmp) {
4099 		WREG32(RADEON_CP_CSQ_CNTL, 0);
4100 	}
4101 	tmp = RREG32(RADEON_CP_RB_CNTL);
4102 	if (tmp) {
4103 		WREG32(RADEON_CP_RB_CNTL, 0);
4104 	}
4105 	tmp = RREG32(RADEON_SCRATCH_UMSK);
4106 	if (tmp) {
4107 		WREG32(RADEON_SCRATCH_UMSK, 0);
4108 	}
4109 }
4110 
4111 int r100_init(struct radeon_device *rdev)
4112 {
4113 	int r;
4114 
4115 	/* Register debugfs file specific to this group of asics */
4116 	r100_debugfs(rdev);
4117 	/* Disable VGA */
4118 	r100_vga_render_disable(rdev);
4119 	/* Initialize scratch registers */
4120 	radeon_scratch_init(rdev);
4121 	/* Initialize surface registers */
4122 	radeon_surface_init(rdev);
4123 	/* sanity check some register to avoid hangs like after kexec */
4124 	r100_restore_sanity(rdev);
4125 	/* TODO: disable VGA need to use VGA request */
4126 	/* BIOS*/
4127 	if (!radeon_get_bios(rdev)) {
4128 		if (ASIC_IS_AVIVO(rdev))
4129 			return -EINVAL;
4130 	}
4131 	if (rdev->is_atom_bios) {
4132 		dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
4133 		return -EINVAL;
4134 	} else {
4135 		r = radeon_combios_init(rdev);
4136 		if (r)
4137 			return r;
4138 	}
4139 	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
4140 	if (radeon_asic_reset(rdev)) {
4141 		dev_warn(rdev->dev,
4142 			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
4143 			RREG32(R_000E40_RBBM_STATUS),
4144 			RREG32(R_0007C0_CP_STAT));
4145 	}
4146 	/* check if cards are posted or not */
4147 	if (radeon_boot_test_post_card(rdev) == false)
4148 		return -EINVAL;
4149 	/* Set asic errata */
4150 	r100_errata(rdev);
4151 	/* Initialize clocks */
4152 	radeon_get_clock_info(rdev->ddev);
4153 	/* initialize AGP */
4154 	if (rdev->flags & RADEON_IS_AGP) {
4155 		r = radeon_agp_init(rdev);
4156 		if (r) {
4157 			radeon_agp_disable(rdev);
4158 		}
4159 	}
4160 	/* initialize VRAM */
4161 	r100_mc_init(rdev);
4162 	/* Fence driver */
4163 	r = radeon_fence_driver_init(rdev);
4164 	if (r)
4165 		return r;
4166 	r = radeon_irq_kms_init(rdev);
4167 	if (r)
4168 		return r;
4169 	/* Memory manager */
4170 	r = radeon_bo_init(rdev);
4171 	if (r)
4172 		return r;
4173 	if (rdev->flags & RADEON_IS_PCI) {
4174 		r = r100_pci_gart_init(rdev);
4175 		if (r)
4176 			return r;
4177 	}
4178 	r100_set_safe_registers(rdev);
4179 
4180 	rdev->accel_working = true;
4181 	r = r100_startup(rdev);
4182 	if (r) {
4183 		/* Somethings want wront with the accel init stop accel */
4184 		dev_err(rdev->dev, "Disabling GPU acceleration\n");
4185 		r100_cp_fini(rdev);
4186 		radeon_wb_fini(rdev);
4187 		radeon_ib_pool_fini(rdev);
4188 		radeon_irq_kms_fini(rdev);
4189 		if (rdev->flags & RADEON_IS_PCI)
4190 			r100_pci_gart_fini(rdev);
4191 		rdev->accel_working = false;
4192 	}
4193 	return 0;
4194 }
4195 
4196 uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
4197 {
4198 	if (reg < rdev->rmmio_size)
4199 		return readl(((void __iomem *)rdev->rmmio) + reg);
4200 	else {
4201 		writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
4202 		return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
4203 	}
4204 }
4205 
4206 void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
4207 {
4208 	if (reg < rdev->rmmio_size)
4209 		writel(v, ((void __iomem *)rdev->rmmio) + reg);
4210 	else {
4211 		writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
4212 		writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
4213 	}
4214 }
4215 
4216 u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
4217 {
4218 	if (reg < rdev->rio_mem_size)
4219 		return ioread32(rdev->rio_mem + reg);
4220 	else {
4221 		iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
4222 		return ioread32(rdev->rio_mem + RADEON_MM_DATA);
4223 	}
4224 }
4225 
4226 void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
4227 {
4228 	if (reg < rdev->rio_mem_size)
4229 		iowrite32(v, rdev->rio_mem + reg);
4230 	else {
4231 		iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
4232 		iowrite32(v, rdev->rio_mem + RADEON_MM_DATA);
4233 	}
4234 }
4235