xref: /linux/drivers/gpu/drm/radeon/r600.c (revision 5d4a2e29fba5b2bef95b96a46b338ec4d76fa4fd)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/slab.h>
29 #include <linux/seq_file.h>
30 #include <linux/firmware.h>
31 #include <linux/platform_device.h>
32 #include "drmP.h"
33 #include "radeon_drm.h"
34 #include "radeon.h"
35 #include "radeon_asic.h"
36 #include "radeon_mode.h"
37 #include "r600d.h"
38 #include "atom.h"
39 #include "avivod.h"
40 
41 #define PFP_UCODE_SIZE 576
42 #define PM4_UCODE_SIZE 1792
43 #define RLC_UCODE_SIZE 768
44 #define R700_PFP_UCODE_SIZE 848
45 #define R700_PM4_UCODE_SIZE 1360
46 #define R700_RLC_UCODE_SIZE 1024
47 #define EVERGREEN_PFP_UCODE_SIZE 1120
48 #define EVERGREEN_PM4_UCODE_SIZE 1376
49 #define EVERGREEN_RLC_UCODE_SIZE 768
50 
51 /* Firmware Names */
52 MODULE_FIRMWARE("radeon/R600_pfp.bin");
53 MODULE_FIRMWARE("radeon/R600_me.bin");
54 MODULE_FIRMWARE("radeon/RV610_pfp.bin");
55 MODULE_FIRMWARE("radeon/RV610_me.bin");
56 MODULE_FIRMWARE("radeon/RV630_pfp.bin");
57 MODULE_FIRMWARE("radeon/RV630_me.bin");
58 MODULE_FIRMWARE("radeon/RV620_pfp.bin");
59 MODULE_FIRMWARE("radeon/RV620_me.bin");
60 MODULE_FIRMWARE("radeon/RV635_pfp.bin");
61 MODULE_FIRMWARE("radeon/RV635_me.bin");
62 MODULE_FIRMWARE("radeon/RV670_pfp.bin");
63 MODULE_FIRMWARE("radeon/RV670_me.bin");
64 MODULE_FIRMWARE("radeon/RS780_pfp.bin");
65 MODULE_FIRMWARE("radeon/RS780_me.bin");
66 MODULE_FIRMWARE("radeon/RV770_pfp.bin");
67 MODULE_FIRMWARE("radeon/RV770_me.bin");
68 MODULE_FIRMWARE("radeon/RV730_pfp.bin");
69 MODULE_FIRMWARE("radeon/RV730_me.bin");
70 MODULE_FIRMWARE("radeon/RV710_pfp.bin");
71 MODULE_FIRMWARE("radeon/RV710_me.bin");
72 MODULE_FIRMWARE("radeon/R600_rlc.bin");
73 MODULE_FIRMWARE("radeon/R700_rlc.bin");
74 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
75 MODULE_FIRMWARE("radeon/CEDAR_me.bin");
76 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
77 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
78 MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
79 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
80 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
81 MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
82 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
83 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
84 MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
85 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
86 
87 int r600_debugfs_mc_info_init(struct radeon_device *rdev);
88 
89 /* r600,rv610,rv630,rv620,rv635,rv670 */
90 int r600_mc_wait_for_idle(struct radeon_device *rdev);
91 void r600_gpu_init(struct radeon_device *rdev);
92 void r600_fini(struct radeon_device *rdev);
93 void r600_irq_disable(struct radeon_device *rdev);
94 
95 void r600_pm_get_dynpm_state(struct radeon_device *rdev)
96 {
97 	int i;
98 
99 	rdev->pm.dynpm_can_upclock = true;
100 	rdev->pm.dynpm_can_downclock = true;
101 
102 	/* power state array is low to high, default is first */
103 	if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
104 		int min_power_state_index = 0;
105 
106 		if (rdev->pm.num_power_states > 2)
107 			min_power_state_index = 1;
108 
109 		switch (rdev->pm.dynpm_planned_action) {
110 		case DYNPM_ACTION_MINIMUM:
111 			rdev->pm.requested_power_state_index = min_power_state_index;
112 			rdev->pm.requested_clock_mode_index = 0;
113 			rdev->pm.dynpm_can_downclock = false;
114 			break;
115 		case DYNPM_ACTION_DOWNCLOCK:
116 			if (rdev->pm.current_power_state_index == min_power_state_index) {
117 				rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
118 				rdev->pm.dynpm_can_downclock = false;
119 			} else {
120 				if (rdev->pm.active_crtc_count > 1) {
121 					for (i = 0; i < rdev->pm.num_power_states; i++) {
122 						if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
123 							continue;
124 						else if (i >= rdev->pm.current_power_state_index) {
125 							rdev->pm.requested_power_state_index =
126 								rdev->pm.current_power_state_index;
127 							break;
128 						} else {
129 							rdev->pm.requested_power_state_index = i;
130 							break;
131 						}
132 					}
133 				} else
134 					rdev->pm.requested_power_state_index =
135 						rdev->pm.current_power_state_index - 1;
136 			}
137 			rdev->pm.requested_clock_mode_index = 0;
138 			/* don't use the power state if crtcs are active and no display flag is set */
139 			if ((rdev->pm.active_crtc_count > 0) &&
140 			    (rdev->pm.power_state[rdev->pm.requested_power_state_index].
141 			     clock_info[rdev->pm.requested_clock_mode_index].flags &
142 			     RADEON_PM_MODE_NO_DISPLAY)) {
143 				rdev->pm.requested_power_state_index++;
144 			}
145 			break;
146 		case DYNPM_ACTION_UPCLOCK:
147 			if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
148 				rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
149 				rdev->pm.dynpm_can_upclock = false;
150 			} else {
151 				if (rdev->pm.active_crtc_count > 1) {
152 					for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
153 						if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
154 							continue;
155 						else if (i <= rdev->pm.current_power_state_index) {
156 							rdev->pm.requested_power_state_index =
157 								rdev->pm.current_power_state_index;
158 							break;
159 						} else {
160 							rdev->pm.requested_power_state_index = i;
161 							break;
162 						}
163 					}
164 				} else
165 					rdev->pm.requested_power_state_index =
166 						rdev->pm.current_power_state_index + 1;
167 			}
168 			rdev->pm.requested_clock_mode_index = 0;
169 			break;
170 		case DYNPM_ACTION_DEFAULT:
171 			rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
172 			rdev->pm.requested_clock_mode_index = 0;
173 			rdev->pm.dynpm_can_upclock = false;
174 			break;
175 		case DYNPM_ACTION_NONE:
176 		default:
177 			DRM_ERROR("Requested mode for not defined action\n");
178 			return;
179 		}
180 	} else {
181 		/* XXX select a power state based on AC/DC, single/dualhead, etc. */
182 		/* for now just select the first power state and switch between clock modes */
183 		/* power state array is low to high, default is first (0) */
184 		if (rdev->pm.active_crtc_count > 1) {
185 			rdev->pm.requested_power_state_index = -1;
186 			/* start at 1 as we don't want the default mode */
187 			for (i = 1; i < rdev->pm.num_power_states; i++) {
188 				if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
189 					continue;
190 				else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
191 					 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
192 					rdev->pm.requested_power_state_index = i;
193 					break;
194 				}
195 			}
196 			/* if nothing selected, grab the default state. */
197 			if (rdev->pm.requested_power_state_index == -1)
198 				rdev->pm.requested_power_state_index = 0;
199 		} else
200 			rdev->pm.requested_power_state_index = 1;
201 
202 		switch (rdev->pm.dynpm_planned_action) {
203 		case DYNPM_ACTION_MINIMUM:
204 			rdev->pm.requested_clock_mode_index = 0;
205 			rdev->pm.dynpm_can_downclock = false;
206 			break;
207 		case DYNPM_ACTION_DOWNCLOCK:
208 			if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
209 				if (rdev->pm.current_clock_mode_index == 0) {
210 					rdev->pm.requested_clock_mode_index = 0;
211 					rdev->pm.dynpm_can_downclock = false;
212 				} else
213 					rdev->pm.requested_clock_mode_index =
214 						rdev->pm.current_clock_mode_index - 1;
215 			} else {
216 				rdev->pm.requested_clock_mode_index = 0;
217 				rdev->pm.dynpm_can_downclock = false;
218 			}
219 			/* don't use the power state if crtcs are active and no display flag is set */
220 			if ((rdev->pm.active_crtc_count > 0) &&
221 			    (rdev->pm.power_state[rdev->pm.requested_power_state_index].
222 			     clock_info[rdev->pm.requested_clock_mode_index].flags &
223 			     RADEON_PM_MODE_NO_DISPLAY)) {
224 				rdev->pm.requested_clock_mode_index++;
225 			}
226 			break;
227 		case DYNPM_ACTION_UPCLOCK:
228 			if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
229 				if (rdev->pm.current_clock_mode_index ==
230 				    (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
231 					rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
232 					rdev->pm.dynpm_can_upclock = false;
233 				} else
234 					rdev->pm.requested_clock_mode_index =
235 						rdev->pm.current_clock_mode_index + 1;
236 			} else {
237 				rdev->pm.requested_clock_mode_index =
238 					rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
239 				rdev->pm.dynpm_can_upclock = false;
240 			}
241 			break;
242 		case DYNPM_ACTION_DEFAULT:
243 			rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
244 			rdev->pm.requested_clock_mode_index = 0;
245 			rdev->pm.dynpm_can_upclock = false;
246 			break;
247 		case DYNPM_ACTION_NONE:
248 		default:
249 			DRM_ERROR("Requested mode for not defined action\n");
250 			return;
251 		}
252 	}
253 
254 	DRM_DEBUG("Requested: e: %d m: %d p: %d\n",
255 		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
256 		  clock_info[rdev->pm.requested_clock_mode_index].sclk,
257 		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
258 		  clock_info[rdev->pm.requested_clock_mode_index].mclk,
259 		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
260 		  pcie_lanes);
261 }
262 
263 static int r600_pm_get_type_index(struct radeon_device *rdev,
264 				  enum radeon_pm_state_type ps_type,
265 				  int instance)
266 {
267 	int i;
268 	int found_instance = -1;
269 
270 	for (i = 0; i < rdev->pm.num_power_states; i++) {
271 		if (rdev->pm.power_state[i].type == ps_type) {
272 			found_instance++;
273 			if (found_instance == instance)
274 				return i;
275 		}
276 	}
277 	/* return default if no match */
278 	return rdev->pm.default_power_state_index;
279 }
280 
281 void rs780_pm_init_profile(struct radeon_device *rdev)
282 {
283 	if (rdev->pm.num_power_states == 2) {
284 		/* default */
285 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
286 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
287 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
288 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
289 		/* low sh */
290 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
291 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
292 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
293 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
294 		/* mid sh */
295 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
296 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
297 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
298 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
299 		/* high sh */
300 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
301 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
302 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
303 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
304 		/* low mh */
305 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
306 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
307 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
308 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
309 		/* mid mh */
310 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
311 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
312 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
313 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
314 		/* high mh */
315 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
316 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
317 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
318 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
319 	} else if (rdev->pm.num_power_states == 3) {
320 		/* default */
321 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
322 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
323 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
324 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
325 		/* low sh */
326 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
327 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
328 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
329 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
330 		/* mid sh */
331 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
332 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
333 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
334 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
335 		/* high sh */
336 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
337 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
338 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
339 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
340 		/* low mh */
341 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
342 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
343 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
344 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
345 		/* mid mh */
346 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
347 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
348 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
349 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
350 		/* high mh */
351 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
352 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
353 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
354 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
355 	} else {
356 		/* default */
357 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
358 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
359 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
360 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
361 		/* low sh */
362 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
363 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
364 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
365 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
366 		/* mid sh */
367 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
368 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
369 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
370 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
371 		/* high sh */
372 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
373 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
374 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
375 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
376 		/* low mh */
377 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
378 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
379 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
380 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
381 		/* mid mh */
382 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
383 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
384 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
385 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
386 		/* high mh */
387 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
388 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
389 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
390 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
391 	}
392 }
393 
394 void r600_pm_init_profile(struct radeon_device *rdev)
395 {
396 	if (rdev->family == CHIP_R600) {
397 		/* XXX */
398 		/* default */
399 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
400 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
401 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
402 		rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
403 		/* low sh */
404 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
405 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
406 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
407 		rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
408 		/* mid sh */
409 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
410 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
411 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
412 		rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
413 		/* high sh */
414 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
415 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
416 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
417 		rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
418 		/* low mh */
419 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
420 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
421 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
422 		rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
423 		/* mid mh */
424 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
425 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
426 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
427 		rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
428 		/* high mh */
429 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
430 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
431 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
432 		rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
433 	} else {
434 		if (rdev->pm.num_power_states < 4) {
435 			/* default */
436 			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
437 			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
438 			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
439 			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
440 			/* low sh */
441 			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
442 			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
443 			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
444 			rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
445 			/* mid sh */
446 			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
447 			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
448 			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
449 			rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
450 			/* high sh */
451 			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
452 			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
453 			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
454 			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
455 			/* low mh */
456 			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
457 			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
458 			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
459 			rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
460 			/* low mh */
461 			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
462 			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
463 			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
464 			rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
465 			/* high mh */
466 			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
467 			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
468 			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
469 			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
470 		} else {
471 			/* default */
472 			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
473 			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
474 			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
475 			rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
476 			/* low sh */
477 			if (rdev->flags & RADEON_IS_MOBILITY) {
478 				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
479 					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
480 				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
481 					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
482 				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
483 				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
484 			} else {
485 				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
486 					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
487 				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
488 					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
489 				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
490 				rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
491 			}
492 			/* mid sh */
493 			if (rdev->flags & RADEON_IS_MOBILITY) {
494 				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
495 					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
496 				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
497 					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
498 				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
499 				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
500 			} else {
501 				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
502 					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
503 				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
504 					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
505 				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
506 				rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
507 			}
508 			/* high sh */
509 			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx =
510 				r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
511 			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx =
512 				r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
513 			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
514 			rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
515 			/* low mh */
516 			if (rdev->flags & RADEON_IS_MOBILITY) {
517 				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
518 					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
519 				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
520 					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
521 				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
522 				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
523 			} else {
524 				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
525 					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
526 				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
527 					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
528 				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
529 				rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
530 			}
531 			/* mid mh */
532 			if (rdev->flags & RADEON_IS_MOBILITY) {
533 				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
534 					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
535 				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
536 					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
537 				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
538 				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
539 			} else {
540 				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
541 					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
542 				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
543 					r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
544 				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
545 				rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
546 			}
547 			/* high mh */
548 			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx =
549 				r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
550 			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx =
551 				r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
552 			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
553 			rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
554 		}
555 	}
556 }
557 
558 void r600_pm_misc(struct radeon_device *rdev)
559 {
560 	int req_ps_idx = rdev->pm.requested_power_state_index;
561 	int req_cm_idx = rdev->pm.requested_clock_mode_index;
562 	struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
563 	struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
564 
565 	if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
566 		if (voltage->voltage != rdev->pm.current_vddc) {
567 			radeon_atom_set_voltage(rdev, voltage->voltage);
568 			rdev->pm.current_vddc = voltage->voltage;
569 			DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
570 		}
571 	}
572 }
573 
574 bool r600_gui_idle(struct radeon_device *rdev)
575 {
576 	if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
577 		return false;
578 	else
579 		return true;
580 }
581 
582 /* hpd for digital panel detect/disconnect */
583 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
584 {
585 	bool connected = false;
586 
587 	if (ASIC_IS_DCE3(rdev)) {
588 		switch (hpd) {
589 		case RADEON_HPD_1:
590 			if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
591 				connected = true;
592 			break;
593 		case RADEON_HPD_2:
594 			if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
595 				connected = true;
596 			break;
597 		case RADEON_HPD_3:
598 			if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
599 				connected = true;
600 			break;
601 		case RADEON_HPD_4:
602 			if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
603 				connected = true;
604 			break;
605 			/* DCE 3.2 */
606 		case RADEON_HPD_5:
607 			if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
608 				connected = true;
609 			break;
610 		case RADEON_HPD_6:
611 			if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
612 				connected = true;
613 			break;
614 		default:
615 			break;
616 		}
617 	} else {
618 		switch (hpd) {
619 		case RADEON_HPD_1:
620 			if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
621 				connected = true;
622 			break;
623 		case RADEON_HPD_2:
624 			if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
625 				connected = true;
626 			break;
627 		case RADEON_HPD_3:
628 			if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
629 				connected = true;
630 			break;
631 		default:
632 			break;
633 		}
634 	}
635 	return connected;
636 }
637 
638 void r600_hpd_set_polarity(struct radeon_device *rdev,
639 			   enum radeon_hpd_id hpd)
640 {
641 	u32 tmp;
642 	bool connected = r600_hpd_sense(rdev, hpd);
643 
644 	if (ASIC_IS_DCE3(rdev)) {
645 		switch (hpd) {
646 		case RADEON_HPD_1:
647 			tmp = RREG32(DC_HPD1_INT_CONTROL);
648 			if (connected)
649 				tmp &= ~DC_HPDx_INT_POLARITY;
650 			else
651 				tmp |= DC_HPDx_INT_POLARITY;
652 			WREG32(DC_HPD1_INT_CONTROL, tmp);
653 			break;
654 		case RADEON_HPD_2:
655 			tmp = RREG32(DC_HPD2_INT_CONTROL);
656 			if (connected)
657 				tmp &= ~DC_HPDx_INT_POLARITY;
658 			else
659 				tmp |= DC_HPDx_INT_POLARITY;
660 			WREG32(DC_HPD2_INT_CONTROL, tmp);
661 			break;
662 		case RADEON_HPD_3:
663 			tmp = RREG32(DC_HPD3_INT_CONTROL);
664 			if (connected)
665 				tmp &= ~DC_HPDx_INT_POLARITY;
666 			else
667 				tmp |= DC_HPDx_INT_POLARITY;
668 			WREG32(DC_HPD3_INT_CONTROL, tmp);
669 			break;
670 		case RADEON_HPD_4:
671 			tmp = RREG32(DC_HPD4_INT_CONTROL);
672 			if (connected)
673 				tmp &= ~DC_HPDx_INT_POLARITY;
674 			else
675 				tmp |= DC_HPDx_INT_POLARITY;
676 			WREG32(DC_HPD4_INT_CONTROL, tmp);
677 			break;
678 		case RADEON_HPD_5:
679 			tmp = RREG32(DC_HPD5_INT_CONTROL);
680 			if (connected)
681 				tmp &= ~DC_HPDx_INT_POLARITY;
682 			else
683 				tmp |= DC_HPDx_INT_POLARITY;
684 			WREG32(DC_HPD5_INT_CONTROL, tmp);
685 			break;
686 			/* DCE 3.2 */
687 		case RADEON_HPD_6:
688 			tmp = RREG32(DC_HPD6_INT_CONTROL);
689 			if (connected)
690 				tmp &= ~DC_HPDx_INT_POLARITY;
691 			else
692 				tmp |= DC_HPDx_INT_POLARITY;
693 			WREG32(DC_HPD6_INT_CONTROL, tmp);
694 			break;
695 		default:
696 			break;
697 		}
698 	} else {
699 		switch (hpd) {
700 		case RADEON_HPD_1:
701 			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
702 			if (connected)
703 				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
704 			else
705 				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
706 			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
707 			break;
708 		case RADEON_HPD_2:
709 			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
710 			if (connected)
711 				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
712 			else
713 				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
714 			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
715 			break;
716 		case RADEON_HPD_3:
717 			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
718 			if (connected)
719 				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
720 			else
721 				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
722 			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
723 			break;
724 		default:
725 			break;
726 		}
727 	}
728 }
729 
730 void r600_hpd_init(struct radeon_device *rdev)
731 {
732 	struct drm_device *dev = rdev->ddev;
733 	struct drm_connector *connector;
734 
735 	if (ASIC_IS_DCE3(rdev)) {
736 		u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
737 		if (ASIC_IS_DCE32(rdev))
738 			tmp |= DC_HPDx_EN;
739 
740 		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
741 			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
742 			switch (radeon_connector->hpd.hpd) {
743 			case RADEON_HPD_1:
744 				WREG32(DC_HPD1_CONTROL, tmp);
745 				rdev->irq.hpd[0] = true;
746 				break;
747 			case RADEON_HPD_2:
748 				WREG32(DC_HPD2_CONTROL, tmp);
749 				rdev->irq.hpd[1] = true;
750 				break;
751 			case RADEON_HPD_3:
752 				WREG32(DC_HPD3_CONTROL, tmp);
753 				rdev->irq.hpd[2] = true;
754 				break;
755 			case RADEON_HPD_4:
756 				WREG32(DC_HPD4_CONTROL, tmp);
757 				rdev->irq.hpd[3] = true;
758 				break;
759 				/* DCE 3.2 */
760 			case RADEON_HPD_5:
761 				WREG32(DC_HPD5_CONTROL, tmp);
762 				rdev->irq.hpd[4] = true;
763 				break;
764 			case RADEON_HPD_6:
765 				WREG32(DC_HPD6_CONTROL, tmp);
766 				rdev->irq.hpd[5] = true;
767 				break;
768 			default:
769 				break;
770 			}
771 		}
772 	} else {
773 		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
774 			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
775 			switch (radeon_connector->hpd.hpd) {
776 			case RADEON_HPD_1:
777 				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
778 				rdev->irq.hpd[0] = true;
779 				break;
780 			case RADEON_HPD_2:
781 				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
782 				rdev->irq.hpd[1] = true;
783 				break;
784 			case RADEON_HPD_3:
785 				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
786 				rdev->irq.hpd[2] = true;
787 				break;
788 			default:
789 				break;
790 			}
791 		}
792 	}
793 	if (rdev->irq.installed)
794 		r600_irq_set(rdev);
795 }
796 
797 void r600_hpd_fini(struct radeon_device *rdev)
798 {
799 	struct drm_device *dev = rdev->ddev;
800 	struct drm_connector *connector;
801 
802 	if (ASIC_IS_DCE3(rdev)) {
803 		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
804 			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
805 			switch (radeon_connector->hpd.hpd) {
806 			case RADEON_HPD_1:
807 				WREG32(DC_HPD1_CONTROL, 0);
808 				rdev->irq.hpd[0] = false;
809 				break;
810 			case RADEON_HPD_2:
811 				WREG32(DC_HPD2_CONTROL, 0);
812 				rdev->irq.hpd[1] = false;
813 				break;
814 			case RADEON_HPD_3:
815 				WREG32(DC_HPD3_CONTROL, 0);
816 				rdev->irq.hpd[2] = false;
817 				break;
818 			case RADEON_HPD_4:
819 				WREG32(DC_HPD4_CONTROL, 0);
820 				rdev->irq.hpd[3] = false;
821 				break;
822 				/* DCE 3.2 */
823 			case RADEON_HPD_5:
824 				WREG32(DC_HPD5_CONTROL, 0);
825 				rdev->irq.hpd[4] = false;
826 				break;
827 			case RADEON_HPD_6:
828 				WREG32(DC_HPD6_CONTROL, 0);
829 				rdev->irq.hpd[5] = false;
830 				break;
831 			default:
832 				break;
833 			}
834 		}
835 	} else {
836 		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
837 			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
838 			switch (radeon_connector->hpd.hpd) {
839 			case RADEON_HPD_1:
840 				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
841 				rdev->irq.hpd[0] = false;
842 				break;
843 			case RADEON_HPD_2:
844 				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
845 				rdev->irq.hpd[1] = false;
846 				break;
847 			case RADEON_HPD_3:
848 				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
849 				rdev->irq.hpd[2] = false;
850 				break;
851 			default:
852 				break;
853 			}
854 		}
855 	}
856 }
857 
858 /*
859  * R600 PCIE GART
860  */
861 void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
862 {
863 	unsigned i;
864 	u32 tmp;
865 
866 	/* flush hdp cache so updates hit vram */
867 	WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
868 
869 	WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
870 	WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
871 	WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
872 	for (i = 0; i < rdev->usec_timeout; i++) {
873 		/* read MC_STATUS */
874 		tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
875 		tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
876 		if (tmp == 2) {
877 			printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
878 			return;
879 		}
880 		if (tmp) {
881 			return;
882 		}
883 		udelay(1);
884 	}
885 }
886 
887 int r600_pcie_gart_init(struct radeon_device *rdev)
888 {
889 	int r;
890 
891 	if (rdev->gart.table.vram.robj) {
892 		WARN(1, "R600 PCIE GART already initialized.\n");
893 		return 0;
894 	}
895 	/* Initialize common gart structure */
896 	r = radeon_gart_init(rdev);
897 	if (r)
898 		return r;
899 	rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
900 	return radeon_gart_table_vram_alloc(rdev);
901 }
902 
903 int r600_pcie_gart_enable(struct radeon_device *rdev)
904 {
905 	u32 tmp;
906 	int r, i;
907 
908 	if (rdev->gart.table.vram.robj == NULL) {
909 		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
910 		return -EINVAL;
911 	}
912 	r = radeon_gart_table_vram_pin(rdev);
913 	if (r)
914 		return r;
915 	radeon_gart_restore(rdev);
916 
917 	/* Setup L2 cache */
918 	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
919 				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
920 				EFFECTIVE_L2_QUEUE_SIZE(7));
921 	WREG32(VM_L2_CNTL2, 0);
922 	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
923 	/* Setup TLB control */
924 	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
925 		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
926 		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
927 		ENABLE_WAIT_L2_QUERY;
928 	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
929 	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
930 	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
931 	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
932 	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
933 	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
934 	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
935 	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
936 	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
937 	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
938 	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
939 	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
940 	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
941 	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
942 	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
943 	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
944 	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
945 	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
946 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
947 	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
948 			(u32)(rdev->dummy_page.addr >> 12));
949 	for (i = 1; i < 7; i++)
950 		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
951 
952 	r600_pcie_gart_tlb_flush(rdev);
953 	rdev->gart.ready = true;
954 	return 0;
955 }
956 
957 void r600_pcie_gart_disable(struct radeon_device *rdev)
958 {
959 	u32 tmp;
960 	int i, r;
961 
962 	/* Disable all tables */
963 	for (i = 0; i < 7; i++)
964 		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
965 
966 	/* Disable L2 cache */
967 	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
968 				EFFECTIVE_L2_QUEUE_SIZE(7));
969 	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
970 	/* Setup L1 TLB control */
971 	tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
972 		ENABLE_WAIT_L2_QUERY;
973 	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
974 	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
975 	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
976 	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
977 	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
978 	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
979 	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
980 	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
981 	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
982 	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
983 	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
984 	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
985 	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
986 	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
987 	if (rdev->gart.table.vram.robj) {
988 		r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
989 		if (likely(r == 0)) {
990 			radeon_bo_kunmap(rdev->gart.table.vram.robj);
991 			radeon_bo_unpin(rdev->gart.table.vram.robj);
992 			radeon_bo_unreserve(rdev->gart.table.vram.robj);
993 		}
994 	}
995 }
996 
997 void r600_pcie_gart_fini(struct radeon_device *rdev)
998 {
999 	radeon_gart_fini(rdev);
1000 	r600_pcie_gart_disable(rdev);
1001 	radeon_gart_table_vram_free(rdev);
1002 }
1003 
1004 void r600_agp_enable(struct radeon_device *rdev)
1005 {
1006 	u32 tmp;
1007 	int i;
1008 
1009 	/* Setup L2 cache */
1010 	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1011 				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1012 				EFFECTIVE_L2_QUEUE_SIZE(7));
1013 	WREG32(VM_L2_CNTL2, 0);
1014 	WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1015 	/* Setup TLB control */
1016 	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1017 		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1018 		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1019 		ENABLE_WAIT_L2_QUERY;
1020 	WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1021 	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1022 	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1023 	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1024 	WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1025 	WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1026 	WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1027 	WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1028 	WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1029 	WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1030 	WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1031 	WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1032 	WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1033 	WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1034 	for (i = 0; i < 7; i++)
1035 		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1036 }
1037 
1038 int r600_mc_wait_for_idle(struct radeon_device *rdev)
1039 {
1040 	unsigned i;
1041 	u32 tmp;
1042 
1043 	for (i = 0; i < rdev->usec_timeout; i++) {
1044 		/* read MC_STATUS */
1045 		tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
1046 		if (!tmp)
1047 			return 0;
1048 		udelay(1);
1049 	}
1050 	return -1;
1051 }
1052 
1053 static void r600_mc_program(struct radeon_device *rdev)
1054 {
1055 	struct rv515_mc_save save;
1056 	u32 tmp;
1057 	int i, j;
1058 
1059 	/* Initialize HDP */
1060 	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1061 		WREG32((0x2c14 + j), 0x00000000);
1062 		WREG32((0x2c18 + j), 0x00000000);
1063 		WREG32((0x2c1c + j), 0x00000000);
1064 		WREG32((0x2c20 + j), 0x00000000);
1065 		WREG32((0x2c24 + j), 0x00000000);
1066 	}
1067 	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1068 
1069 	rv515_mc_stop(rdev, &save);
1070 	if (r600_mc_wait_for_idle(rdev)) {
1071 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1072 	}
1073 	/* Lockout access through VGA aperture (doesn't exist before R600) */
1074 	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1075 	/* Update configuration */
1076 	if (rdev->flags & RADEON_IS_AGP) {
1077 		if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1078 			/* VRAM before AGP */
1079 			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1080 				rdev->mc.vram_start >> 12);
1081 			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1082 				rdev->mc.gtt_end >> 12);
1083 		} else {
1084 			/* VRAM after AGP */
1085 			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1086 				rdev->mc.gtt_start >> 12);
1087 			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1088 				rdev->mc.vram_end >> 12);
1089 		}
1090 	} else {
1091 		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1092 		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1093 	}
1094 	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
1095 	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1096 	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1097 	WREG32(MC_VM_FB_LOCATION, tmp);
1098 	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1099 	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
1100 	WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF);
1101 	if (rdev->flags & RADEON_IS_AGP) {
1102 		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1103 		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
1104 		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1105 	} else {
1106 		WREG32(MC_VM_AGP_BASE, 0);
1107 		WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1108 		WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1109 	}
1110 	if (r600_mc_wait_for_idle(rdev)) {
1111 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1112 	}
1113 	rv515_mc_resume(rdev, &save);
1114 	/* we need to own VRAM, so turn off the VGA renderer here
1115 	 * to stop it overwriting our objects */
1116 	rv515_vga_render_disable(rdev);
1117 }
1118 
1119 /**
1120  * r600_vram_gtt_location - try to find VRAM & GTT location
1121  * @rdev: radeon device structure holding all necessary informations
1122  * @mc: memory controller structure holding memory informations
1123  *
1124  * Function will place try to place VRAM at same place as in CPU (PCI)
1125  * address space as some GPU seems to have issue when we reprogram at
1126  * different address space.
1127  *
1128  * If there is not enough space to fit the unvisible VRAM after the
1129  * aperture then we limit the VRAM size to the aperture.
1130  *
1131  * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1132  * them to be in one from GPU point of view so that we can program GPU to
1133  * catch access outside them (weird GPU policy see ??).
1134  *
1135  * This function will never fails, worst case are limiting VRAM or GTT.
1136  *
1137  * Note: GTT start, end, size should be initialized before calling this
1138  * function on AGP platform.
1139  */
1140 void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1141 {
1142 	u64 size_bf, size_af;
1143 
1144 	if (mc->mc_vram_size > 0xE0000000) {
1145 		/* leave room for at least 512M GTT */
1146 		dev_warn(rdev->dev, "limiting VRAM\n");
1147 		mc->real_vram_size = 0xE0000000;
1148 		mc->mc_vram_size = 0xE0000000;
1149 	}
1150 	if (rdev->flags & RADEON_IS_AGP) {
1151 		size_bf = mc->gtt_start;
1152 		size_af = 0xFFFFFFFF - mc->gtt_end + 1;
1153 		if (size_bf > size_af) {
1154 			if (mc->mc_vram_size > size_bf) {
1155 				dev_warn(rdev->dev, "limiting VRAM\n");
1156 				mc->real_vram_size = size_bf;
1157 				mc->mc_vram_size = size_bf;
1158 			}
1159 			mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1160 		} else {
1161 			if (mc->mc_vram_size > size_af) {
1162 				dev_warn(rdev->dev, "limiting VRAM\n");
1163 				mc->real_vram_size = size_af;
1164 				mc->mc_vram_size = size_af;
1165 			}
1166 			mc->vram_start = mc->gtt_end;
1167 		}
1168 		mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1169 		dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1170 				mc->mc_vram_size >> 20, mc->vram_start,
1171 				mc->vram_end, mc->real_vram_size >> 20);
1172 	} else {
1173 		u64 base = 0;
1174 		if (rdev->flags & RADEON_IS_IGP)
1175 			base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
1176 		radeon_vram_location(rdev, &rdev->mc, base);
1177 		radeon_gtt_location(rdev, mc);
1178 	}
1179 }
1180 
1181 int r600_mc_init(struct radeon_device *rdev)
1182 {
1183 	u32 tmp;
1184 	int chansize, numchan;
1185 
1186 	/* Get VRAM informations */
1187 	rdev->mc.vram_is_ddr = true;
1188 	tmp = RREG32(RAMCFG);
1189 	if (tmp & CHANSIZE_OVERRIDE) {
1190 		chansize = 16;
1191 	} else if (tmp & CHANSIZE_MASK) {
1192 		chansize = 64;
1193 	} else {
1194 		chansize = 32;
1195 	}
1196 	tmp = RREG32(CHMAP);
1197 	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1198 	case 0:
1199 	default:
1200 		numchan = 1;
1201 		break;
1202 	case 1:
1203 		numchan = 2;
1204 		break;
1205 	case 2:
1206 		numchan = 4;
1207 		break;
1208 	case 3:
1209 		numchan = 8;
1210 		break;
1211 	}
1212 	rdev->mc.vram_width = numchan * chansize;
1213 	/* Could aper size report 0 ? */
1214 	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
1215 	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
1216 	/* Setup GPU memory space */
1217 	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1218 	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1219 	rdev->mc.visible_vram_size = rdev->mc.aper_size;
1220 	r600_vram_gtt_location(rdev, &rdev->mc);
1221 
1222 	if (rdev->flags & RADEON_IS_IGP)
1223 		rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
1224 	radeon_update_bandwidth_info(rdev);
1225 	return 0;
1226 }
1227 
1228 /* We doesn't check that the GPU really needs a reset we simply do the
1229  * reset, it's up to the caller to determine if the GPU needs one. We
1230  * might add an helper function to check that.
1231  */
1232 int r600_gpu_soft_reset(struct radeon_device *rdev)
1233 {
1234 	struct rv515_mc_save save;
1235 	u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
1236 				S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
1237 				S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
1238 				S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
1239 				S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
1240 				S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
1241 				S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
1242 				S_008010_GUI_ACTIVE(1);
1243 	u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
1244 			S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
1245 			S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
1246 			S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
1247 			S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
1248 			S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
1249 			S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
1250 			S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
1251 	u32 tmp;
1252 
1253 	dev_info(rdev->dev, "GPU softreset \n");
1254 	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
1255 		RREG32(R_008010_GRBM_STATUS));
1256 	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
1257 		RREG32(R_008014_GRBM_STATUS2));
1258 	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
1259 		RREG32(R_000E50_SRBM_STATUS));
1260 	rv515_mc_stop(rdev, &save);
1261 	if (r600_mc_wait_for_idle(rdev)) {
1262 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1263 	}
1264 	/* Disable CP parsing/prefetching */
1265 	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1266 	/* Check if any of the rendering block is busy and reset it */
1267 	if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
1268 	    (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
1269 		tmp = S_008020_SOFT_RESET_CR(1) |
1270 			S_008020_SOFT_RESET_DB(1) |
1271 			S_008020_SOFT_RESET_CB(1) |
1272 			S_008020_SOFT_RESET_PA(1) |
1273 			S_008020_SOFT_RESET_SC(1) |
1274 			S_008020_SOFT_RESET_SMX(1) |
1275 			S_008020_SOFT_RESET_SPI(1) |
1276 			S_008020_SOFT_RESET_SX(1) |
1277 			S_008020_SOFT_RESET_SH(1) |
1278 			S_008020_SOFT_RESET_TC(1) |
1279 			S_008020_SOFT_RESET_TA(1) |
1280 			S_008020_SOFT_RESET_VC(1) |
1281 			S_008020_SOFT_RESET_VGT(1);
1282 		dev_info(rdev->dev, "  R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1283 		WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1284 		RREG32(R_008020_GRBM_SOFT_RESET);
1285 		mdelay(15);
1286 		WREG32(R_008020_GRBM_SOFT_RESET, 0);
1287 	}
1288 	/* Reset CP (we always reset CP) */
1289 	tmp = S_008020_SOFT_RESET_CP(1);
1290 	dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1291 	WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1292 	RREG32(R_008020_GRBM_SOFT_RESET);
1293 	mdelay(15);
1294 	WREG32(R_008020_GRBM_SOFT_RESET, 0);
1295 	/* Wait a little for things to settle down */
1296 	mdelay(1);
1297 	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
1298 		RREG32(R_008010_GRBM_STATUS));
1299 	dev_info(rdev->dev, "  R_008014_GRBM_STATUS2=0x%08X\n",
1300 		RREG32(R_008014_GRBM_STATUS2));
1301 	dev_info(rdev->dev, "  R_000E50_SRBM_STATUS=0x%08X\n",
1302 		RREG32(R_000E50_SRBM_STATUS));
1303 	rv515_mc_resume(rdev, &save);
1304 	return 0;
1305 }
1306 
1307 bool r600_gpu_is_lockup(struct radeon_device *rdev)
1308 {
1309 	u32 srbm_status;
1310 	u32 grbm_status;
1311 	u32 grbm_status2;
1312 	int r;
1313 
1314 	srbm_status = RREG32(R_000E50_SRBM_STATUS);
1315 	grbm_status = RREG32(R_008010_GRBM_STATUS);
1316 	grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
1317 	if (!G_008010_GUI_ACTIVE(grbm_status)) {
1318 		r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
1319 		return false;
1320 	}
1321 	/* force CP activities */
1322 	r = radeon_ring_lock(rdev, 2);
1323 	if (!r) {
1324 		/* PACKET2 NOP */
1325 		radeon_ring_write(rdev, 0x80000000);
1326 		radeon_ring_write(rdev, 0x80000000);
1327 		radeon_ring_unlock_commit(rdev);
1328 	}
1329 	rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
1330 	return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
1331 }
1332 
1333 int r600_asic_reset(struct radeon_device *rdev)
1334 {
1335 	return r600_gpu_soft_reset(rdev);
1336 }
1337 
1338 static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
1339 					     u32 num_backends,
1340 					     u32 backend_disable_mask)
1341 {
1342 	u32 backend_map = 0;
1343 	u32 enabled_backends_mask;
1344 	u32 enabled_backends_count;
1345 	u32 cur_pipe;
1346 	u32 swizzle_pipe[R6XX_MAX_PIPES];
1347 	u32 cur_backend;
1348 	u32 i;
1349 
1350 	if (num_tile_pipes > R6XX_MAX_PIPES)
1351 		num_tile_pipes = R6XX_MAX_PIPES;
1352 	if (num_tile_pipes < 1)
1353 		num_tile_pipes = 1;
1354 	if (num_backends > R6XX_MAX_BACKENDS)
1355 		num_backends = R6XX_MAX_BACKENDS;
1356 	if (num_backends < 1)
1357 		num_backends = 1;
1358 
1359 	enabled_backends_mask = 0;
1360 	enabled_backends_count = 0;
1361 	for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
1362 		if (((backend_disable_mask >> i) & 1) == 0) {
1363 			enabled_backends_mask |= (1 << i);
1364 			++enabled_backends_count;
1365 		}
1366 		if (enabled_backends_count == num_backends)
1367 			break;
1368 	}
1369 
1370 	if (enabled_backends_count == 0) {
1371 		enabled_backends_mask = 1;
1372 		enabled_backends_count = 1;
1373 	}
1374 
1375 	if (enabled_backends_count != num_backends)
1376 		num_backends = enabled_backends_count;
1377 
1378 	memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
1379 	switch (num_tile_pipes) {
1380 	case 1:
1381 		swizzle_pipe[0] = 0;
1382 		break;
1383 	case 2:
1384 		swizzle_pipe[0] = 0;
1385 		swizzle_pipe[1] = 1;
1386 		break;
1387 	case 3:
1388 		swizzle_pipe[0] = 0;
1389 		swizzle_pipe[1] = 1;
1390 		swizzle_pipe[2] = 2;
1391 		break;
1392 	case 4:
1393 		swizzle_pipe[0] = 0;
1394 		swizzle_pipe[1] = 1;
1395 		swizzle_pipe[2] = 2;
1396 		swizzle_pipe[3] = 3;
1397 		break;
1398 	case 5:
1399 		swizzle_pipe[0] = 0;
1400 		swizzle_pipe[1] = 1;
1401 		swizzle_pipe[2] = 2;
1402 		swizzle_pipe[3] = 3;
1403 		swizzle_pipe[4] = 4;
1404 		break;
1405 	case 6:
1406 		swizzle_pipe[0] = 0;
1407 		swizzle_pipe[1] = 2;
1408 		swizzle_pipe[2] = 4;
1409 		swizzle_pipe[3] = 5;
1410 		swizzle_pipe[4] = 1;
1411 		swizzle_pipe[5] = 3;
1412 		break;
1413 	case 7:
1414 		swizzle_pipe[0] = 0;
1415 		swizzle_pipe[1] = 2;
1416 		swizzle_pipe[2] = 4;
1417 		swizzle_pipe[3] = 6;
1418 		swizzle_pipe[4] = 1;
1419 		swizzle_pipe[5] = 3;
1420 		swizzle_pipe[6] = 5;
1421 		break;
1422 	case 8:
1423 		swizzle_pipe[0] = 0;
1424 		swizzle_pipe[1] = 2;
1425 		swizzle_pipe[2] = 4;
1426 		swizzle_pipe[3] = 6;
1427 		swizzle_pipe[4] = 1;
1428 		swizzle_pipe[5] = 3;
1429 		swizzle_pipe[6] = 5;
1430 		swizzle_pipe[7] = 7;
1431 		break;
1432 	}
1433 
1434 	cur_backend = 0;
1435 	for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1436 		while (((1 << cur_backend) & enabled_backends_mask) == 0)
1437 			cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1438 
1439 		backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
1440 
1441 		cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1442 	}
1443 
1444 	return backend_map;
1445 }
1446 
1447 int r600_count_pipe_bits(uint32_t val)
1448 {
1449 	int i, ret = 0;
1450 
1451 	for (i = 0; i < 32; i++) {
1452 		ret += val & 1;
1453 		val >>= 1;
1454 	}
1455 	return ret;
1456 }
1457 
1458 void r600_gpu_init(struct radeon_device *rdev)
1459 {
1460 	u32 tiling_config;
1461 	u32 ramcfg;
1462 	u32 backend_map;
1463 	u32 cc_rb_backend_disable;
1464 	u32 cc_gc_shader_pipe_config;
1465 	u32 tmp;
1466 	int i, j;
1467 	u32 sq_config;
1468 	u32 sq_gpr_resource_mgmt_1 = 0;
1469 	u32 sq_gpr_resource_mgmt_2 = 0;
1470 	u32 sq_thread_resource_mgmt = 0;
1471 	u32 sq_stack_resource_mgmt_1 = 0;
1472 	u32 sq_stack_resource_mgmt_2 = 0;
1473 
1474 	/* FIXME: implement */
1475 	switch (rdev->family) {
1476 	case CHIP_R600:
1477 		rdev->config.r600.max_pipes = 4;
1478 		rdev->config.r600.max_tile_pipes = 8;
1479 		rdev->config.r600.max_simds = 4;
1480 		rdev->config.r600.max_backends = 4;
1481 		rdev->config.r600.max_gprs = 256;
1482 		rdev->config.r600.max_threads = 192;
1483 		rdev->config.r600.max_stack_entries = 256;
1484 		rdev->config.r600.max_hw_contexts = 8;
1485 		rdev->config.r600.max_gs_threads = 16;
1486 		rdev->config.r600.sx_max_export_size = 128;
1487 		rdev->config.r600.sx_max_export_pos_size = 16;
1488 		rdev->config.r600.sx_max_export_smx_size = 128;
1489 		rdev->config.r600.sq_num_cf_insts = 2;
1490 		break;
1491 	case CHIP_RV630:
1492 	case CHIP_RV635:
1493 		rdev->config.r600.max_pipes = 2;
1494 		rdev->config.r600.max_tile_pipes = 2;
1495 		rdev->config.r600.max_simds = 3;
1496 		rdev->config.r600.max_backends = 1;
1497 		rdev->config.r600.max_gprs = 128;
1498 		rdev->config.r600.max_threads = 192;
1499 		rdev->config.r600.max_stack_entries = 128;
1500 		rdev->config.r600.max_hw_contexts = 8;
1501 		rdev->config.r600.max_gs_threads = 4;
1502 		rdev->config.r600.sx_max_export_size = 128;
1503 		rdev->config.r600.sx_max_export_pos_size = 16;
1504 		rdev->config.r600.sx_max_export_smx_size = 128;
1505 		rdev->config.r600.sq_num_cf_insts = 2;
1506 		break;
1507 	case CHIP_RV610:
1508 	case CHIP_RV620:
1509 	case CHIP_RS780:
1510 	case CHIP_RS880:
1511 		rdev->config.r600.max_pipes = 1;
1512 		rdev->config.r600.max_tile_pipes = 1;
1513 		rdev->config.r600.max_simds = 2;
1514 		rdev->config.r600.max_backends = 1;
1515 		rdev->config.r600.max_gprs = 128;
1516 		rdev->config.r600.max_threads = 192;
1517 		rdev->config.r600.max_stack_entries = 128;
1518 		rdev->config.r600.max_hw_contexts = 4;
1519 		rdev->config.r600.max_gs_threads = 4;
1520 		rdev->config.r600.sx_max_export_size = 128;
1521 		rdev->config.r600.sx_max_export_pos_size = 16;
1522 		rdev->config.r600.sx_max_export_smx_size = 128;
1523 		rdev->config.r600.sq_num_cf_insts = 1;
1524 		break;
1525 	case CHIP_RV670:
1526 		rdev->config.r600.max_pipes = 4;
1527 		rdev->config.r600.max_tile_pipes = 4;
1528 		rdev->config.r600.max_simds = 4;
1529 		rdev->config.r600.max_backends = 4;
1530 		rdev->config.r600.max_gprs = 192;
1531 		rdev->config.r600.max_threads = 192;
1532 		rdev->config.r600.max_stack_entries = 256;
1533 		rdev->config.r600.max_hw_contexts = 8;
1534 		rdev->config.r600.max_gs_threads = 16;
1535 		rdev->config.r600.sx_max_export_size = 128;
1536 		rdev->config.r600.sx_max_export_pos_size = 16;
1537 		rdev->config.r600.sx_max_export_smx_size = 128;
1538 		rdev->config.r600.sq_num_cf_insts = 2;
1539 		break;
1540 	default:
1541 		break;
1542 	}
1543 
1544 	/* Initialize HDP */
1545 	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1546 		WREG32((0x2c14 + j), 0x00000000);
1547 		WREG32((0x2c18 + j), 0x00000000);
1548 		WREG32((0x2c1c + j), 0x00000000);
1549 		WREG32((0x2c20 + j), 0x00000000);
1550 		WREG32((0x2c24 + j), 0x00000000);
1551 	}
1552 
1553 	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1554 
1555 	/* Setup tiling */
1556 	tiling_config = 0;
1557 	ramcfg = RREG32(RAMCFG);
1558 	switch (rdev->config.r600.max_tile_pipes) {
1559 	case 1:
1560 		tiling_config |= PIPE_TILING(0);
1561 		break;
1562 	case 2:
1563 		tiling_config |= PIPE_TILING(1);
1564 		break;
1565 	case 4:
1566 		tiling_config |= PIPE_TILING(2);
1567 		break;
1568 	case 8:
1569 		tiling_config |= PIPE_TILING(3);
1570 		break;
1571 	default:
1572 		break;
1573 	}
1574 	rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1575 	rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1576 	tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1577 	tiling_config |= GROUP_SIZE(0);
1578 	rdev->config.r600.tiling_group_size = 256;
1579 	tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1580 	if (tmp > 3) {
1581 		tiling_config |= ROW_TILING(3);
1582 		tiling_config |= SAMPLE_SPLIT(3);
1583 	} else {
1584 		tiling_config |= ROW_TILING(tmp);
1585 		tiling_config |= SAMPLE_SPLIT(tmp);
1586 	}
1587 	tiling_config |= BANK_SWAPS(1);
1588 
1589 	cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1590 	cc_rb_backend_disable |=
1591 		BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1592 
1593 	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1594 	cc_gc_shader_pipe_config |=
1595 		INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1596 	cc_gc_shader_pipe_config |=
1597 		INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1598 
1599 	backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1600 							(R6XX_MAX_BACKENDS -
1601 							 r600_count_pipe_bits((cc_rb_backend_disable &
1602 									       R6XX_MAX_BACKENDS_MASK) >> 16)),
1603 							(cc_rb_backend_disable >> 16));
1604 
1605 	tiling_config |= BACKEND_MAP(backend_map);
1606 	WREG32(GB_TILING_CONFIG, tiling_config);
1607 	WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1608 	WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1609 
1610 	/* Setup pipes */
1611 	WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1612 	WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1613 	WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1614 
1615 	tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1616 	WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1617 	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1618 
1619 	/* Setup some CP states */
1620 	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1621 	WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1622 
1623 	WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1624 			     SYNC_WALKER | SYNC_ALIGNER));
1625 	/* Setup various GPU states */
1626 	if (rdev->family == CHIP_RV670)
1627 		WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1628 
1629 	tmp = RREG32(SX_DEBUG_1);
1630 	tmp |= SMX_EVENT_RELEASE;
1631 	if ((rdev->family > CHIP_R600))
1632 		tmp |= ENABLE_NEW_SMX_ADDRESS;
1633 	WREG32(SX_DEBUG_1, tmp);
1634 
1635 	if (((rdev->family) == CHIP_R600) ||
1636 	    ((rdev->family) == CHIP_RV630) ||
1637 	    ((rdev->family) == CHIP_RV610) ||
1638 	    ((rdev->family) == CHIP_RV620) ||
1639 	    ((rdev->family) == CHIP_RS780) ||
1640 	    ((rdev->family) == CHIP_RS880)) {
1641 		WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1642 	} else {
1643 		WREG32(DB_DEBUG, 0);
1644 	}
1645 	WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1646 			       DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1647 
1648 	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1649 	WREG32(VGT_NUM_INSTANCES, 0);
1650 
1651 	WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1652 	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1653 
1654 	tmp = RREG32(SQ_MS_FIFO_SIZES);
1655 	if (((rdev->family) == CHIP_RV610) ||
1656 	    ((rdev->family) == CHIP_RV620) ||
1657 	    ((rdev->family) == CHIP_RS780) ||
1658 	    ((rdev->family) == CHIP_RS880)) {
1659 		tmp = (CACHE_FIFO_SIZE(0xa) |
1660 		       FETCH_FIFO_HIWATER(0xa) |
1661 		       DONE_FIFO_HIWATER(0xe0) |
1662 		       ALU_UPDATE_FIFO_HIWATER(0x8));
1663 	} else if (((rdev->family) == CHIP_R600) ||
1664 		   ((rdev->family) == CHIP_RV630)) {
1665 		tmp &= ~DONE_FIFO_HIWATER(0xff);
1666 		tmp |= DONE_FIFO_HIWATER(0x4);
1667 	}
1668 	WREG32(SQ_MS_FIFO_SIZES, tmp);
1669 
1670 	/* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1671 	 * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
1672 	 */
1673 	sq_config = RREG32(SQ_CONFIG);
1674 	sq_config &= ~(PS_PRIO(3) |
1675 		       VS_PRIO(3) |
1676 		       GS_PRIO(3) |
1677 		       ES_PRIO(3));
1678 	sq_config |= (DX9_CONSTS |
1679 		      VC_ENABLE |
1680 		      PS_PRIO(0) |
1681 		      VS_PRIO(1) |
1682 		      GS_PRIO(2) |
1683 		      ES_PRIO(3));
1684 
1685 	if ((rdev->family) == CHIP_R600) {
1686 		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1687 					  NUM_VS_GPRS(124) |
1688 					  NUM_CLAUSE_TEMP_GPRS(4));
1689 		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1690 					  NUM_ES_GPRS(0));
1691 		sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1692 					   NUM_VS_THREADS(48) |
1693 					   NUM_GS_THREADS(4) |
1694 					   NUM_ES_THREADS(4));
1695 		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1696 					    NUM_VS_STACK_ENTRIES(128));
1697 		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1698 					    NUM_ES_STACK_ENTRIES(0));
1699 	} else if (((rdev->family) == CHIP_RV610) ||
1700 		   ((rdev->family) == CHIP_RV620) ||
1701 		   ((rdev->family) == CHIP_RS780) ||
1702 		   ((rdev->family) == CHIP_RS880)) {
1703 		/* no vertex cache */
1704 		sq_config &= ~VC_ENABLE;
1705 
1706 		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1707 					  NUM_VS_GPRS(44) |
1708 					  NUM_CLAUSE_TEMP_GPRS(2));
1709 		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1710 					  NUM_ES_GPRS(17));
1711 		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1712 					   NUM_VS_THREADS(78) |
1713 					   NUM_GS_THREADS(4) |
1714 					   NUM_ES_THREADS(31));
1715 		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1716 					    NUM_VS_STACK_ENTRIES(40));
1717 		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1718 					    NUM_ES_STACK_ENTRIES(16));
1719 	} else if (((rdev->family) == CHIP_RV630) ||
1720 		   ((rdev->family) == CHIP_RV635)) {
1721 		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1722 					  NUM_VS_GPRS(44) |
1723 					  NUM_CLAUSE_TEMP_GPRS(2));
1724 		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1725 					  NUM_ES_GPRS(18));
1726 		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1727 					   NUM_VS_THREADS(78) |
1728 					   NUM_GS_THREADS(4) |
1729 					   NUM_ES_THREADS(31));
1730 		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1731 					    NUM_VS_STACK_ENTRIES(40));
1732 		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1733 					    NUM_ES_STACK_ENTRIES(16));
1734 	} else if ((rdev->family) == CHIP_RV670) {
1735 		sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1736 					  NUM_VS_GPRS(44) |
1737 					  NUM_CLAUSE_TEMP_GPRS(2));
1738 		sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1739 					  NUM_ES_GPRS(17));
1740 		sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1741 					   NUM_VS_THREADS(78) |
1742 					   NUM_GS_THREADS(4) |
1743 					   NUM_ES_THREADS(31));
1744 		sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1745 					    NUM_VS_STACK_ENTRIES(64));
1746 		sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1747 					    NUM_ES_STACK_ENTRIES(64));
1748 	}
1749 
1750 	WREG32(SQ_CONFIG, sq_config);
1751 	WREG32(SQ_GPR_RESOURCE_MGMT_1,  sq_gpr_resource_mgmt_1);
1752 	WREG32(SQ_GPR_RESOURCE_MGMT_2,  sq_gpr_resource_mgmt_2);
1753 	WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1754 	WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1755 	WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1756 
1757 	if (((rdev->family) == CHIP_RV610) ||
1758 	    ((rdev->family) == CHIP_RV620) ||
1759 	    ((rdev->family) == CHIP_RS780) ||
1760 	    ((rdev->family) == CHIP_RS880)) {
1761 		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
1762 	} else {
1763 		WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
1764 	}
1765 
1766 	/* More default values. 2D/3D driver should adjust as needed */
1767 	WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
1768 					 S1_X(0x4) | S1_Y(0xc)));
1769 	WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
1770 					 S1_X(0x2) | S1_Y(0x2) |
1771 					 S2_X(0xa) | S2_Y(0x6) |
1772 					 S3_X(0x6) | S3_Y(0xa)));
1773 	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
1774 					     S1_X(0x4) | S1_Y(0xc) |
1775 					     S2_X(0x1) | S2_Y(0x6) |
1776 					     S3_X(0xa) | S3_Y(0xe)));
1777 	WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1778 					     S5_X(0x0) | S5_Y(0x0) |
1779 					     S6_X(0xb) | S6_Y(0x4) |
1780 					     S7_X(0x7) | S7_Y(0x8)));
1781 
1782 	WREG32(VGT_STRMOUT_EN, 0);
1783 	tmp = rdev->config.r600.max_pipes * 16;
1784 	switch (rdev->family) {
1785 	case CHIP_RV610:
1786 	case CHIP_RV620:
1787 	case CHIP_RS780:
1788 	case CHIP_RS880:
1789 		tmp += 32;
1790 		break;
1791 	case CHIP_RV670:
1792 		tmp += 128;
1793 		break;
1794 	default:
1795 		break;
1796 	}
1797 	if (tmp > 256) {
1798 		tmp = 256;
1799 	}
1800 	WREG32(VGT_ES_PER_GS, 128);
1801 	WREG32(VGT_GS_PER_ES, tmp);
1802 	WREG32(VGT_GS_PER_VS, 2);
1803 	WREG32(VGT_GS_VERTEX_REUSE, 16);
1804 
1805 	/* more default values. 2D/3D driver should adjust as needed */
1806 	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1807 	WREG32(VGT_STRMOUT_EN, 0);
1808 	WREG32(SX_MISC, 0);
1809 	WREG32(PA_SC_MODE_CNTL, 0);
1810 	WREG32(PA_SC_AA_CONFIG, 0);
1811 	WREG32(PA_SC_LINE_STIPPLE, 0);
1812 	WREG32(SPI_INPUT_Z, 0);
1813 	WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1814 	WREG32(CB_COLOR7_FRAG, 0);
1815 
1816 	/* Clear render buffer base addresses */
1817 	WREG32(CB_COLOR0_BASE, 0);
1818 	WREG32(CB_COLOR1_BASE, 0);
1819 	WREG32(CB_COLOR2_BASE, 0);
1820 	WREG32(CB_COLOR3_BASE, 0);
1821 	WREG32(CB_COLOR4_BASE, 0);
1822 	WREG32(CB_COLOR5_BASE, 0);
1823 	WREG32(CB_COLOR6_BASE, 0);
1824 	WREG32(CB_COLOR7_BASE, 0);
1825 	WREG32(CB_COLOR7_FRAG, 0);
1826 
1827 	switch (rdev->family) {
1828 	case CHIP_RV610:
1829 	case CHIP_RV620:
1830 	case CHIP_RS780:
1831 	case CHIP_RS880:
1832 		tmp = TC_L2_SIZE(8);
1833 		break;
1834 	case CHIP_RV630:
1835 	case CHIP_RV635:
1836 		tmp = TC_L2_SIZE(4);
1837 		break;
1838 	case CHIP_R600:
1839 		tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1840 		break;
1841 	default:
1842 		tmp = TC_L2_SIZE(0);
1843 		break;
1844 	}
1845 	WREG32(TC_CNTL, tmp);
1846 
1847 	tmp = RREG32(HDP_HOST_PATH_CNTL);
1848 	WREG32(HDP_HOST_PATH_CNTL, tmp);
1849 
1850 	tmp = RREG32(ARB_POP);
1851 	tmp |= ENABLE_TC128;
1852 	WREG32(ARB_POP, tmp);
1853 
1854 	WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1855 	WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1856 			       NUM_CLIP_SEQ(3)));
1857 	WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1858 }
1859 
1860 
1861 /*
1862  * Indirect registers accessor
1863  */
1864 u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1865 {
1866 	u32 r;
1867 
1868 	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1869 	(void)RREG32(PCIE_PORT_INDEX);
1870 	r = RREG32(PCIE_PORT_DATA);
1871 	return r;
1872 }
1873 
1874 void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1875 {
1876 	WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1877 	(void)RREG32(PCIE_PORT_INDEX);
1878 	WREG32(PCIE_PORT_DATA, (v));
1879 	(void)RREG32(PCIE_PORT_DATA);
1880 }
1881 
1882 /*
1883  * CP & Ring
1884  */
1885 void r600_cp_stop(struct radeon_device *rdev)
1886 {
1887 	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1888 }
1889 
1890 int r600_init_microcode(struct radeon_device *rdev)
1891 {
1892 	struct platform_device *pdev;
1893 	const char *chip_name;
1894 	const char *rlc_chip_name;
1895 	size_t pfp_req_size, me_req_size, rlc_req_size;
1896 	char fw_name[30];
1897 	int err;
1898 
1899 	DRM_DEBUG("\n");
1900 
1901 	pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1902 	err = IS_ERR(pdev);
1903 	if (err) {
1904 		printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1905 		return -EINVAL;
1906 	}
1907 
1908 	switch (rdev->family) {
1909 	case CHIP_R600:
1910 		chip_name = "R600";
1911 		rlc_chip_name = "R600";
1912 		break;
1913 	case CHIP_RV610:
1914 		chip_name = "RV610";
1915 		rlc_chip_name = "R600";
1916 		break;
1917 	case CHIP_RV630:
1918 		chip_name = "RV630";
1919 		rlc_chip_name = "R600";
1920 		break;
1921 	case CHIP_RV620:
1922 		chip_name = "RV620";
1923 		rlc_chip_name = "R600";
1924 		break;
1925 	case CHIP_RV635:
1926 		chip_name = "RV635";
1927 		rlc_chip_name = "R600";
1928 		break;
1929 	case CHIP_RV670:
1930 		chip_name = "RV670";
1931 		rlc_chip_name = "R600";
1932 		break;
1933 	case CHIP_RS780:
1934 	case CHIP_RS880:
1935 		chip_name = "RS780";
1936 		rlc_chip_name = "R600";
1937 		break;
1938 	case CHIP_RV770:
1939 		chip_name = "RV770";
1940 		rlc_chip_name = "R700";
1941 		break;
1942 	case CHIP_RV730:
1943 	case CHIP_RV740:
1944 		chip_name = "RV730";
1945 		rlc_chip_name = "R700";
1946 		break;
1947 	case CHIP_RV710:
1948 		chip_name = "RV710";
1949 		rlc_chip_name = "R700";
1950 		break;
1951 	case CHIP_CEDAR:
1952 		chip_name = "CEDAR";
1953 		rlc_chip_name = "CEDAR";
1954 		break;
1955 	case CHIP_REDWOOD:
1956 		chip_name = "REDWOOD";
1957 		rlc_chip_name = "REDWOOD";
1958 		break;
1959 	case CHIP_JUNIPER:
1960 		chip_name = "JUNIPER";
1961 		rlc_chip_name = "JUNIPER";
1962 		break;
1963 	case CHIP_CYPRESS:
1964 	case CHIP_HEMLOCK:
1965 		chip_name = "CYPRESS";
1966 		rlc_chip_name = "CYPRESS";
1967 		break;
1968 	default: BUG();
1969 	}
1970 
1971 	if (rdev->family >= CHIP_CEDAR) {
1972 		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
1973 		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
1974 		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
1975 	} else if (rdev->family >= CHIP_RV770) {
1976 		pfp_req_size = R700_PFP_UCODE_SIZE * 4;
1977 		me_req_size = R700_PM4_UCODE_SIZE * 4;
1978 		rlc_req_size = R700_RLC_UCODE_SIZE * 4;
1979 	} else {
1980 		pfp_req_size = PFP_UCODE_SIZE * 4;
1981 		me_req_size = PM4_UCODE_SIZE * 12;
1982 		rlc_req_size = RLC_UCODE_SIZE * 4;
1983 	}
1984 
1985 	DRM_INFO("Loading %s Microcode\n", chip_name);
1986 
1987 	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1988 	err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
1989 	if (err)
1990 		goto out;
1991 	if (rdev->pfp_fw->size != pfp_req_size) {
1992 		printk(KERN_ERR
1993 		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1994 		       rdev->pfp_fw->size, fw_name);
1995 		err = -EINVAL;
1996 		goto out;
1997 	}
1998 
1999 	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
2000 	err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
2001 	if (err)
2002 		goto out;
2003 	if (rdev->me_fw->size != me_req_size) {
2004 		printk(KERN_ERR
2005 		       "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2006 		       rdev->me_fw->size, fw_name);
2007 		err = -EINVAL;
2008 	}
2009 
2010 	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
2011 	err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
2012 	if (err)
2013 		goto out;
2014 	if (rdev->rlc_fw->size != rlc_req_size) {
2015 		printk(KERN_ERR
2016 		       "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2017 		       rdev->rlc_fw->size, fw_name);
2018 		err = -EINVAL;
2019 	}
2020 
2021 out:
2022 	platform_device_unregister(pdev);
2023 
2024 	if (err) {
2025 		if (err != -EINVAL)
2026 			printk(KERN_ERR
2027 			       "r600_cp: Failed to load firmware \"%s\"\n",
2028 			       fw_name);
2029 		release_firmware(rdev->pfp_fw);
2030 		rdev->pfp_fw = NULL;
2031 		release_firmware(rdev->me_fw);
2032 		rdev->me_fw = NULL;
2033 		release_firmware(rdev->rlc_fw);
2034 		rdev->rlc_fw = NULL;
2035 	}
2036 	return err;
2037 }
2038 
2039 static int r600_cp_load_microcode(struct radeon_device *rdev)
2040 {
2041 	const __be32 *fw_data;
2042 	int i;
2043 
2044 	if (!rdev->me_fw || !rdev->pfp_fw)
2045 		return -EINVAL;
2046 
2047 	r600_cp_stop(rdev);
2048 
2049 	WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
2050 
2051 	/* Reset cp */
2052 	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2053 	RREG32(GRBM_SOFT_RESET);
2054 	mdelay(15);
2055 	WREG32(GRBM_SOFT_RESET, 0);
2056 
2057 	WREG32(CP_ME_RAM_WADDR, 0);
2058 
2059 	fw_data = (const __be32 *)rdev->me_fw->data;
2060 	WREG32(CP_ME_RAM_WADDR, 0);
2061 	for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
2062 		WREG32(CP_ME_RAM_DATA,
2063 		       be32_to_cpup(fw_data++));
2064 
2065 	fw_data = (const __be32 *)rdev->pfp_fw->data;
2066 	WREG32(CP_PFP_UCODE_ADDR, 0);
2067 	for (i = 0; i < PFP_UCODE_SIZE; i++)
2068 		WREG32(CP_PFP_UCODE_DATA,
2069 		       be32_to_cpup(fw_data++));
2070 
2071 	WREG32(CP_PFP_UCODE_ADDR, 0);
2072 	WREG32(CP_ME_RAM_WADDR, 0);
2073 	WREG32(CP_ME_RAM_RADDR, 0);
2074 	return 0;
2075 }
2076 
2077 int r600_cp_start(struct radeon_device *rdev)
2078 {
2079 	int r;
2080 	uint32_t cp_me;
2081 
2082 	r = radeon_ring_lock(rdev, 7);
2083 	if (r) {
2084 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2085 		return r;
2086 	}
2087 	radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
2088 	radeon_ring_write(rdev, 0x1);
2089 	if (rdev->family >= CHIP_CEDAR) {
2090 		radeon_ring_write(rdev, 0x0);
2091 		radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
2092 	} else if (rdev->family >= CHIP_RV770) {
2093 		radeon_ring_write(rdev, 0x0);
2094 		radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
2095 	} else {
2096 		radeon_ring_write(rdev, 0x3);
2097 		radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
2098 	}
2099 	radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2100 	radeon_ring_write(rdev, 0);
2101 	radeon_ring_write(rdev, 0);
2102 	radeon_ring_unlock_commit(rdev);
2103 
2104 	cp_me = 0xff;
2105 	WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2106 	return 0;
2107 }
2108 
2109 int r600_cp_resume(struct radeon_device *rdev)
2110 {
2111 	u32 tmp;
2112 	u32 rb_bufsz;
2113 	int r;
2114 
2115 	/* Reset cp */
2116 	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2117 	RREG32(GRBM_SOFT_RESET);
2118 	mdelay(15);
2119 	WREG32(GRBM_SOFT_RESET, 0);
2120 
2121 	/* Set ring buffer size */
2122 	rb_bufsz = drm_order(rdev->cp.ring_size / 8);
2123 	tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2124 #ifdef __BIG_ENDIAN
2125 	tmp |= BUF_SWAP_32BIT;
2126 #endif
2127 	WREG32(CP_RB_CNTL, tmp);
2128 	WREG32(CP_SEM_WAIT_TIMER, 0x4);
2129 
2130 	/* Set the write pointer delay */
2131 	WREG32(CP_RB_WPTR_DELAY, 0);
2132 
2133 	/* Initialize the ring buffer's read and write pointers */
2134 	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2135 	WREG32(CP_RB_RPTR_WR, 0);
2136 	WREG32(CP_RB_WPTR, 0);
2137 	WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
2138 	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
2139 	mdelay(1);
2140 	WREG32(CP_RB_CNTL, tmp);
2141 
2142 	WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
2143 	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2144 
2145 	rdev->cp.rptr = RREG32(CP_RB_RPTR);
2146 	rdev->cp.wptr = RREG32(CP_RB_WPTR);
2147 
2148 	r600_cp_start(rdev);
2149 	rdev->cp.ready = true;
2150 	r = radeon_ring_test(rdev);
2151 	if (r) {
2152 		rdev->cp.ready = false;
2153 		return r;
2154 	}
2155 	return 0;
2156 }
2157 
2158 void r600_cp_commit(struct radeon_device *rdev)
2159 {
2160 	WREG32(CP_RB_WPTR, rdev->cp.wptr);
2161 	(void)RREG32(CP_RB_WPTR);
2162 }
2163 
2164 void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
2165 {
2166 	u32 rb_bufsz;
2167 
2168 	/* Align ring size */
2169 	rb_bufsz = drm_order(ring_size / 8);
2170 	ring_size = (1 << (rb_bufsz + 1)) * 4;
2171 	rdev->cp.ring_size = ring_size;
2172 	rdev->cp.align_mask = 16 - 1;
2173 }
2174 
2175 void r600_cp_fini(struct radeon_device *rdev)
2176 {
2177 	r600_cp_stop(rdev);
2178 	radeon_ring_fini(rdev);
2179 }
2180 
2181 
2182 /*
2183  * GPU scratch registers helpers function.
2184  */
2185 void r600_scratch_init(struct radeon_device *rdev)
2186 {
2187 	int i;
2188 
2189 	rdev->scratch.num_reg = 7;
2190 	for (i = 0; i < rdev->scratch.num_reg; i++) {
2191 		rdev->scratch.free[i] = true;
2192 		rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4);
2193 	}
2194 }
2195 
2196 int r600_ring_test(struct radeon_device *rdev)
2197 {
2198 	uint32_t scratch;
2199 	uint32_t tmp = 0;
2200 	unsigned i;
2201 	int r;
2202 
2203 	r = radeon_scratch_get(rdev, &scratch);
2204 	if (r) {
2205 		DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2206 		return r;
2207 	}
2208 	WREG32(scratch, 0xCAFEDEAD);
2209 	r = radeon_ring_lock(rdev, 3);
2210 	if (r) {
2211 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2212 		radeon_scratch_free(rdev, scratch);
2213 		return r;
2214 	}
2215 	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2216 	radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2217 	radeon_ring_write(rdev, 0xDEADBEEF);
2218 	radeon_ring_unlock_commit(rdev);
2219 	for (i = 0; i < rdev->usec_timeout; i++) {
2220 		tmp = RREG32(scratch);
2221 		if (tmp == 0xDEADBEEF)
2222 			break;
2223 		DRM_UDELAY(1);
2224 	}
2225 	if (i < rdev->usec_timeout) {
2226 		DRM_INFO("ring test succeeded in %d usecs\n", i);
2227 	} else {
2228 		DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
2229 			  scratch, tmp);
2230 		r = -EINVAL;
2231 	}
2232 	radeon_scratch_free(rdev, scratch);
2233 	return r;
2234 }
2235 
2236 void r600_wb_disable(struct radeon_device *rdev)
2237 {
2238 	int r;
2239 
2240 	WREG32(SCRATCH_UMSK, 0);
2241 	if (rdev->wb.wb_obj) {
2242 		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
2243 		if (unlikely(r != 0))
2244 			return;
2245 		radeon_bo_kunmap(rdev->wb.wb_obj);
2246 		radeon_bo_unpin(rdev->wb.wb_obj);
2247 		radeon_bo_unreserve(rdev->wb.wb_obj);
2248 	}
2249 }
2250 
2251 void r600_wb_fini(struct radeon_device *rdev)
2252 {
2253 	r600_wb_disable(rdev);
2254 	if (rdev->wb.wb_obj) {
2255 		radeon_bo_unref(&rdev->wb.wb_obj);
2256 		rdev->wb.wb = NULL;
2257 		rdev->wb.wb_obj = NULL;
2258 	}
2259 }
2260 
2261 int r600_wb_enable(struct radeon_device *rdev)
2262 {
2263 	int r;
2264 
2265 	if (rdev->wb.wb_obj == NULL) {
2266 		r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
2267 				RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
2268 		if (r) {
2269 			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
2270 			return r;
2271 		}
2272 		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
2273 		if (unlikely(r != 0)) {
2274 			r600_wb_fini(rdev);
2275 			return r;
2276 		}
2277 		r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
2278 				&rdev->wb.gpu_addr);
2279 		if (r) {
2280 			radeon_bo_unreserve(rdev->wb.wb_obj);
2281 			dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
2282 			r600_wb_fini(rdev);
2283 			return r;
2284 		}
2285 		r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
2286 		radeon_bo_unreserve(rdev->wb.wb_obj);
2287 		if (r) {
2288 			dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
2289 			r600_wb_fini(rdev);
2290 			return r;
2291 		}
2292 	}
2293 	WREG32(SCRATCH_ADDR, (rdev->wb.gpu_addr >> 8) & 0xFFFFFFFF);
2294 	WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + 1024) & 0xFFFFFFFC);
2295 	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + 1024) & 0xFF);
2296 	WREG32(SCRATCH_UMSK, 0xff);
2297 	return 0;
2298 }
2299 
2300 void r600_fence_ring_emit(struct radeon_device *rdev,
2301 			  struct radeon_fence *fence)
2302 {
2303 	/* Also consider EVENT_WRITE_EOP.  it handles the interrupts + timestamps + events */
2304 
2305 	radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
2306 	radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
2307 	/* wait for 3D idle clean */
2308 	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2309 	radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2310 	radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
2311 	/* Emit fence sequence & fire IRQ */
2312 	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2313 	radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2314 	radeon_ring_write(rdev, fence->seq);
2315 	/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2316 	radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
2317 	radeon_ring_write(rdev, RB_INT_STAT);
2318 }
2319 
2320 int r600_copy_blit(struct radeon_device *rdev,
2321 		   uint64_t src_offset, uint64_t dst_offset,
2322 		   unsigned num_pages, struct radeon_fence *fence)
2323 {
2324 	int r;
2325 
2326 	mutex_lock(&rdev->r600_blit.mutex);
2327 	rdev->r600_blit.vb_ib = NULL;
2328 	r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
2329 	if (r) {
2330 		if (rdev->r600_blit.vb_ib)
2331 			radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
2332 		mutex_unlock(&rdev->r600_blit.mutex);
2333 		return r;
2334 	}
2335 	r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
2336 	r600_blit_done_copy(rdev, fence);
2337 	mutex_unlock(&rdev->r600_blit.mutex);
2338 	return 0;
2339 }
2340 
2341 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2342 			 uint32_t tiling_flags, uint32_t pitch,
2343 			 uint32_t offset, uint32_t obj_size)
2344 {
2345 	/* FIXME: implement */
2346 	return 0;
2347 }
2348 
2349 void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2350 {
2351 	/* FIXME: implement */
2352 }
2353 
2354 
2355 bool r600_card_posted(struct radeon_device *rdev)
2356 {
2357 	uint32_t reg;
2358 
2359 	/* first check CRTCs */
2360 	reg = RREG32(D1CRTC_CONTROL) |
2361 		RREG32(D2CRTC_CONTROL);
2362 	if (reg & CRTC_EN)
2363 		return true;
2364 
2365 	/* then check MEM_SIZE, in case the crtcs are off */
2366 	if (RREG32(CONFIG_MEMSIZE))
2367 		return true;
2368 
2369 	return false;
2370 }
2371 
2372 int r600_startup(struct radeon_device *rdev)
2373 {
2374 	int r;
2375 
2376 	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2377 		r = r600_init_microcode(rdev);
2378 		if (r) {
2379 			DRM_ERROR("Failed to load firmware!\n");
2380 			return r;
2381 		}
2382 	}
2383 
2384 	r600_mc_program(rdev);
2385 	if (rdev->flags & RADEON_IS_AGP) {
2386 		r600_agp_enable(rdev);
2387 	} else {
2388 		r = r600_pcie_gart_enable(rdev);
2389 		if (r)
2390 			return r;
2391 	}
2392 	r600_gpu_init(rdev);
2393 	r = r600_blit_init(rdev);
2394 	if (r) {
2395 		r600_blit_fini(rdev);
2396 		rdev->asic->copy = NULL;
2397 		dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2398 	}
2399 	/* pin copy shader into vram */
2400 	if (rdev->r600_blit.shader_obj) {
2401 		r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2402 		if (unlikely(r != 0))
2403 			return r;
2404 		r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
2405 				&rdev->r600_blit.shader_gpu_addr);
2406 		radeon_bo_unreserve(rdev->r600_blit.shader_obj);
2407 		if (r) {
2408 			dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
2409 			return r;
2410 		}
2411 	}
2412 	/* Enable IRQ */
2413 	r = r600_irq_init(rdev);
2414 	if (r) {
2415 		DRM_ERROR("radeon: IH init failed (%d).\n", r);
2416 		radeon_irq_kms_fini(rdev);
2417 		return r;
2418 	}
2419 	r600_irq_set(rdev);
2420 
2421 	r = radeon_ring_init(rdev, rdev->cp.ring_size);
2422 	if (r)
2423 		return r;
2424 	r = r600_cp_load_microcode(rdev);
2425 	if (r)
2426 		return r;
2427 	r = r600_cp_resume(rdev);
2428 	if (r)
2429 		return r;
2430 	/* write back buffer are not vital so don't worry about failure */
2431 	r600_wb_enable(rdev);
2432 	return 0;
2433 }
2434 
2435 void r600_vga_set_state(struct radeon_device *rdev, bool state)
2436 {
2437 	uint32_t temp;
2438 
2439 	temp = RREG32(CONFIG_CNTL);
2440 	if (state == false) {
2441 		temp &= ~(1<<0);
2442 		temp |= (1<<1);
2443 	} else {
2444 		temp &= ~(1<<1);
2445 	}
2446 	WREG32(CONFIG_CNTL, temp);
2447 }
2448 
2449 int r600_resume(struct radeon_device *rdev)
2450 {
2451 	int r;
2452 
2453 	/* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2454 	 * posting will perform necessary task to bring back GPU into good
2455 	 * shape.
2456 	 */
2457 	/* post card */
2458 	atom_asic_init(rdev->mode_info.atom_context);
2459 	/* Initialize clocks */
2460 	r = radeon_clocks_init(rdev);
2461 	if (r) {
2462 		return r;
2463 	}
2464 
2465 	r = r600_startup(rdev);
2466 	if (r) {
2467 		DRM_ERROR("r600 startup failed on resume\n");
2468 		return r;
2469 	}
2470 
2471 	r = r600_ib_test(rdev);
2472 	if (r) {
2473 		DRM_ERROR("radeon: failled testing IB (%d).\n", r);
2474 		return r;
2475 	}
2476 
2477 	r = r600_audio_init(rdev);
2478 	if (r) {
2479 		DRM_ERROR("radeon: audio resume failed\n");
2480 		return r;
2481 	}
2482 
2483 	return r;
2484 }
2485 
2486 int r600_suspend(struct radeon_device *rdev)
2487 {
2488 	int r;
2489 
2490 	r600_audio_fini(rdev);
2491 	/* FIXME: we should wait for ring to be empty */
2492 	r600_cp_stop(rdev);
2493 	rdev->cp.ready = false;
2494 	r600_irq_suspend(rdev);
2495 	r600_wb_disable(rdev);
2496 	r600_pcie_gart_disable(rdev);
2497 	/* unpin shaders bo */
2498 	if (rdev->r600_blit.shader_obj) {
2499 		r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2500 		if (!r) {
2501 			radeon_bo_unpin(rdev->r600_blit.shader_obj);
2502 			radeon_bo_unreserve(rdev->r600_blit.shader_obj);
2503 		}
2504 	}
2505 	return 0;
2506 }
2507 
2508 /* Plan is to move initialization in that function and use
2509  * helper function so that radeon_device_init pretty much
2510  * do nothing more than calling asic specific function. This
2511  * should also allow to remove a bunch of callback function
2512  * like vram_info.
2513  */
2514 int r600_init(struct radeon_device *rdev)
2515 {
2516 	int r;
2517 
2518 	r = radeon_dummy_page_init(rdev);
2519 	if (r)
2520 		return r;
2521 	if (r600_debugfs_mc_info_init(rdev)) {
2522 		DRM_ERROR("Failed to register debugfs file for mc !\n");
2523 	}
2524 	/* This don't do much */
2525 	r = radeon_gem_init(rdev);
2526 	if (r)
2527 		return r;
2528 	/* Read BIOS */
2529 	if (!radeon_get_bios(rdev)) {
2530 		if (ASIC_IS_AVIVO(rdev))
2531 			return -EINVAL;
2532 	}
2533 	/* Must be an ATOMBIOS */
2534 	if (!rdev->is_atom_bios) {
2535 		dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
2536 		return -EINVAL;
2537 	}
2538 	r = radeon_atombios_init(rdev);
2539 	if (r)
2540 		return r;
2541 	/* Post card if necessary */
2542 	if (!r600_card_posted(rdev)) {
2543 		if (!rdev->bios) {
2544 			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2545 			return -EINVAL;
2546 		}
2547 		DRM_INFO("GPU not posted. posting now...\n");
2548 		atom_asic_init(rdev->mode_info.atom_context);
2549 	}
2550 	/* Initialize scratch registers */
2551 	r600_scratch_init(rdev);
2552 	/* Initialize surface registers */
2553 	radeon_surface_init(rdev);
2554 	/* Initialize clocks */
2555 	radeon_get_clock_info(rdev->ddev);
2556 	r = radeon_clocks_init(rdev);
2557 	if (r)
2558 		return r;
2559 	/* Fence driver */
2560 	r = radeon_fence_driver_init(rdev);
2561 	if (r)
2562 		return r;
2563 	if (rdev->flags & RADEON_IS_AGP) {
2564 		r = radeon_agp_init(rdev);
2565 		if (r)
2566 			radeon_agp_disable(rdev);
2567 	}
2568 	r = r600_mc_init(rdev);
2569 	if (r)
2570 		return r;
2571 	/* Memory manager */
2572 	r = radeon_bo_init(rdev);
2573 	if (r)
2574 		return r;
2575 
2576 	r = radeon_irq_kms_init(rdev);
2577 	if (r)
2578 		return r;
2579 
2580 	rdev->cp.ring_obj = NULL;
2581 	r600_ring_init(rdev, 1024 * 1024);
2582 
2583 	rdev->ih.ring_obj = NULL;
2584 	r600_ih_ring_init(rdev, 64 * 1024);
2585 
2586 	r = r600_pcie_gart_init(rdev);
2587 	if (r)
2588 		return r;
2589 
2590 	rdev->accel_working = true;
2591 	r = r600_startup(rdev);
2592 	if (r) {
2593 		dev_err(rdev->dev, "disabling GPU acceleration\n");
2594 		r600_cp_fini(rdev);
2595 		r600_wb_fini(rdev);
2596 		r600_irq_fini(rdev);
2597 		radeon_irq_kms_fini(rdev);
2598 		r600_pcie_gart_fini(rdev);
2599 		rdev->accel_working = false;
2600 	}
2601 	if (rdev->accel_working) {
2602 		r = radeon_ib_pool_init(rdev);
2603 		if (r) {
2604 			dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2605 			rdev->accel_working = false;
2606 		} else {
2607 			r = r600_ib_test(rdev);
2608 			if (r) {
2609 				dev_err(rdev->dev, "IB test failed (%d).\n", r);
2610 				rdev->accel_working = false;
2611 			}
2612 		}
2613 	}
2614 
2615 	r = r600_audio_init(rdev);
2616 	if (r)
2617 		return r; /* TODO error handling */
2618 	return 0;
2619 }
2620 
2621 void r600_fini(struct radeon_device *rdev)
2622 {
2623 	r600_audio_fini(rdev);
2624 	r600_blit_fini(rdev);
2625 	r600_cp_fini(rdev);
2626 	r600_wb_fini(rdev);
2627 	r600_irq_fini(rdev);
2628 	radeon_irq_kms_fini(rdev);
2629 	r600_pcie_gart_fini(rdev);
2630 	radeon_agp_fini(rdev);
2631 	radeon_gem_fini(rdev);
2632 	radeon_fence_driver_fini(rdev);
2633 	radeon_clocks_fini(rdev);
2634 	radeon_bo_fini(rdev);
2635 	radeon_atombios_fini(rdev);
2636 	kfree(rdev->bios);
2637 	rdev->bios = NULL;
2638 	radeon_dummy_page_fini(rdev);
2639 }
2640 
2641 
2642 /*
2643  * CS stuff
2644  */
2645 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2646 {
2647 	/* FIXME: implement */
2648 	radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2649 	radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC);
2650 	radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
2651 	radeon_ring_write(rdev, ib->length_dw);
2652 }
2653 
2654 int r600_ib_test(struct radeon_device *rdev)
2655 {
2656 	struct radeon_ib *ib;
2657 	uint32_t scratch;
2658 	uint32_t tmp = 0;
2659 	unsigned i;
2660 	int r;
2661 
2662 	r = radeon_scratch_get(rdev, &scratch);
2663 	if (r) {
2664 		DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
2665 		return r;
2666 	}
2667 	WREG32(scratch, 0xCAFEDEAD);
2668 	r = radeon_ib_get(rdev, &ib);
2669 	if (r) {
2670 		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2671 		return r;
2672 	}
2673 	ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
2674 	ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2675 	ib->ptr[2] = 0xDEADBEEF;
2676 	ib->ptr[3] = PACKET2(0);
2677 	ib->ptr[4] = PACKET2(0);
2678 	ib->ptr[5] = PACKET2(0);
2679 	ib->ptr[6] = PACKET2(0);
2680 	ib->ptr[7] = PACKET2(0);
2681 	ib->ptr[8] = PACKET2(0);
2682 	ib->ptr[9] = PACKET2(0);
2683 	ib->ptr[10] = PACKET2(0);
2684 	ib->ptr[11] = PACKET2(0);
2685 	ib->ptr[12] = PACKET2(0);
2686 	ib->ptr[13] = PACKET2(0);
2687 	ib->ptr[14] = PACKET2(0);
2688 	ib->ptr[15] = PACKET2(0);
2689 	ib->length_dw = 16;
2690 	r = radeon_ib_schedule(rdev, ib);
2691 	if (r) {
2692 		radeon_scratch_free(rdev, scratch);
2693 		radeon_ib_free(rdev, &ib);
2694 		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2695 		return r;
2696 	}
2697 	r = radeon_fence_wait(ib->fence, false);
2698 	if (r) {
2699 		DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2700 		return r;
2701 	}
2702 	for (i = 0; i < rdev->usec_timeout; i++) {
2703 		tmp = RREG32(scratch);
2704 		if (tmp == 0xDEADBEEF)
2705 			break;
2706 		DRM_UDELAY(1);
2707 	}
2708 	if (i < rdev->usec_timeout) {
2709 		DRM_INFO("ib test succeeded in %u usecs\n", i);
2710 	} else {
2711 		DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
2712 			  scratch, tmp);
2713 		r = -EINVAL;
2714 	}
2715 	radeon_scratch_free(rdev, scratch);
2716 	radeon_ib_free(rdev, &ib);
2717 	return r;
2718 }
2719 
2720 /*
2721  * Interrupts
2722  *
2723  * Interrupts use a ring buffer on r6xx/r7xx hardware.  It works pretty
2724  * the same as the CP ring buffer, but in reverse.  Rather than the CPU
2725  * writing to the ring and the GPU consuming, the GPU writes to the ring
2726  * and host consumes.  As the host irq handler processes interrupts, it
2727  * increments the rptr.  When the rptr catches up with the wptr, all the
2728  * current interrupts have been processed.
2729  */
2730 
2731 void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2732 {
2733 	u32 rb_bufsz;
2734 
2735 	/* Align ring size */
2736 	rb_bufsz = drm_order(ring_size / 4);
2737 	ring_size = (1 << rb_bufsz) * 4;
2738 	rdev->ih.ring_size = ring_size;
2739 	rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
2740 	rdev->ih.rptr = 0;
2741 }
2742 
2743 static int r600_ih_ring_alloc(struct radeon_device *rdev)
2744 {
2745 	int r;
2746 
2747 	/* Allocate ring buffer */
2748 	if (rdev->ih.ring_obj == NULL) {
2749 		r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
2750 				     true,
2751 				     RADEON_GEM_DOMAIN_GTT,
2752 				     &rdev->ih.ring_obj);
2753 		if (r) {
2754 			DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
2755 			return r;
2756 		}
2757 		r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2758 		if (unlikely(r != 0))
2759 			return r;
2760 		r = radeon_bo_pin(rdev->ih.ring_obj,
2761 				  RADEON_GEM_DOMAIN_GTT,
2762 				  &rdev->ih.gpu_addr);
2763 		if (r) {
2764 			radeon_bo_unreserve(rdev->ih.ring_obj);
2765 			DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
2766 			return r;
2767 		}
2768 		r = radeon_bo_kmap(rdev->ih.ring_obj,
2769 				   (void **)&rdev->ih.ring);
2770 		radeon_bo_unreserve(rdev->ih.ring_obj);
2771 		if (r) {
2772 			DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
2773 			return r;
2774 		}
2775 	}
2776 	return 0;
2777 }
2778 
2779 static void r600_ih_ring_fini(struct radeon_device *rdev)
2780 {
2781 	int r;
2782 	if (rdev->ih.ring_obj) {
2783 		r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2784 		if (likely(r == 0)) {
2785 			radeon_bo_kunmap(rdev->ih.ring_obj);
2786 			radeon_bo_unpin(rdev->ih.ring_obj);
2787 			radeon_bo_unreserve(rdev->ih.ring_obj);
2788 		}
2789 		radeon_bo_unref(&rdev->ih.ring_obj);
2790 		rdev->ih.ring = NULL;
2791 		rdev->ih.ring_obj = NULL;
2792 	}
2793 }
2794 
2795 void r600_rlc_stop(struct radeon_device *rdev)
2796 {
2797 
2798 	if ((rdev->family >= CHIP_RV770) &&
2799 	    (rdev->family <= CHIP_RV740)) {
2800 		/* r7xx asics need to soft reset RLC before halting */
2801 		WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
2802 		RREG32(SRBM_SOFT_RESET);
2803 		udelay(15000);
2804 		WREG32(SRBM_SOFT_RESET, 0);
2805 		RREG32(SRBM_SOFT_RESET);
2806 	}
2807 
2808 	WREG32(RLC_CNTL, 0);
2809 }
2810 
2811 static void r600_rlc_start(struct radeon_device *rdev)
2812 {
2813 	WREG32(RLC_CNTL, RLC_ENABLE);
2814 }
2815 
2816 static int r600_rlc_init(struct radeon_device *rdev)
2817 {
2818 	u32 i;
2819 	const __be32 *fw_data;
2820 
2821 	if (!rdev->rlc_fw)
2822 		return -EINVAL;
2823 
2824 	r600_rlc_stop(rdev);
2825 
2826 	WREG32(RLC_HB_BASE, 0);
2827 	WREG32(RLC_HB_CNTL, 0);
2828 	WREG32(RLC_HB_RPTR, 0);
2829 	WREG32(RLC_HB_WPTR, 0);
2830 	WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
2831 	WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
2832 	WREG32(RLC_MC_CNTL, 0);
2833 	WREG32(RLC_UCODE_CNTL, 0);
2834 
2835 	fw_data = (const __be32 *)rdev->rlc_fw->data;
2836 	if (rdev->family >= CHIP_CEDAR) {
2837 		for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
2838 			WREG32(RLC_UCODE_ADDR, i);
2839 			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2840 		}
2841 	} else if (rdev->family >= CHIP_RV770) {
2842 		for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
2843 			WREG32(RLC_UCODE_ADDR, i);
2844 			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2845 		}
2846 	} else {
2847 		for (i = 0; i < RLC_UCODE_SIZE; i++) {
2848 			WREG32(RLC_UCODE_ADDR, i);
2849 			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2850 		}
2851 	}
2852 	WREG32(RLC_UCODE_ADDR, 0);
2853 
2854 	r600_rlc_start(rdev);
2855 
2856 	return 0;
2857 }
2858 
2859 static void r600_enable_interrupts(struct radeon_device *rdev)
2860 {
2861 	u32 ih_cntl = RREG32(IH_CNTL);
2862 	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2863 
2864 	ih_cntl |= ENABLE_INTR;
2865 	ih_rb_cntl |= IH_RB_ENABLE;
2866 	WREG32(IH_CNTL, ih_cntl);
2867 	WREG32(IH_RB_CNTL, ih_rb_cntl);
2868 	rdev->ih.enabled = true;
2869 }
2870 
2871 void r600_disable_interrupts(struct radeon_device *rdev)
2872 {
2873 	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2874 	u32 ih_cntl = RREG32(IH_CNTL);
2875 
2876 	ih_rb_cntl &= ~IH_RB_ENABLE;
2877 	ih_cntl &= ~ENABLE_INTR;
2878 	WREG32(IH_RB_CNTL, ih_rb_cntl);
2879 	WREG32(IH_CNTL, ih_cntl);
2880 	/* set rptr, wptr to 0 */
2881 	WREG32(IH_RB_RPTR, 0);
2882 	WREG32(IH_RB_WPTR, 0);
2883 	rdev->ih.enabled = false;
2884 	rdev->ih.wptr = 0;
2885 	rdev->ih.rptr = 0;
2886 }
2887 
2888 static void r600_disable_interrupt_state(struct radeon_device *rdev)
2889 {
2890 	u32 tmp;
2891 
2892 	WREG32(CP_INT_CNTL, 0);
2893 	WREG32(GRBM_INT_CNTL, 0);
2894 	WREG32(DxMODE_INT_MASK, 0);
2895 	if (ASIC_IS_DCE3(rdev)) {
2896 		WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2897 		WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
2898 		tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2899 		WREG32(DC_HPD1_INT_CONTROL, tmp);
2900 		tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2901 		WREG32(DC_HPD2_INT_CONTROL, tmp);
2902 		tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2903 		WREG32(DC_HPD3_INT_CONTROL, tmp);
2904 		tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2905 		WREG32(DC_HPD4_INT_CONTROL, tmp);
2906 		if (ASIC_IS_DCE32(rdev)) {
2907 			tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2908 			WREG32(DC_HPD5_INT_CONTROL, tmp);
2909 			tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2910 			WREG32(DC_HPD6_INT_CONTROL, tmp);
2911 		}
2912 	} else {
2913 		WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2914 		WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2915 		tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2916 		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2917 		tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2918 		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2919 		tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2920 		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2921 	}
2922 }
2923 
2924 int r600_irq_init(struct radeon_device *rdev)
2925 {
2926 	int ret = 0;
2927 	int rb_bufsz;
2928 	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2929 
2930 	/* allocate ring */
2931 	ret = r600_ih_ring_alloc(rdev);
2932 	if (ret)
2933 		return ret;
2934 
2935 	/* disable irqs */
2936 	r600_disable_interrupts(rdev);
2937 
2938 	/* init rlc */
2939 	ret = r600_rlc_init(rdev);
2940 	if (ret) {
2941 		r600_ih_ring_fini(rdev);
2942 		return ret;
2943 	}
2944 
2945 	/* setup interrupt control */
2946 	/* set dummy read address to ring address */
2947 	WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
2948 	interrupt_cntl = RREG32(INTERRUPT_CNTL);
2949 	/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
2950 	 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
2951 	 */
2952 	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
2953 	/* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
2954 	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
2955 	WREG32(INTERRUPT_CNTL, interrupt_cntl);
2956 
2957 	WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
2958 	rb_bufsz = drm_order(rdev->ih.ring_size / 4);
2959 
2960 	ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
2961 		      IH_WPTR_OVERFLOW_CLEAR |
2962 		      (rb_bufsz << 1));
2963 	/* WPTR writeback, not yet */
2964 	/*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/
2965 	WREG32(IH_RB_WPTR_ADDR_LO, 0);
2966 	WREG32(IH_RB_WPTR_ADDR_HI, 0);
2967 
2968 	WREG32(IH_RB_CNTL, ih_rb_cntl);
2969 
2970 	/* set rptr, wptr to 0 */
2971 	WREG32(IH_RB_RPTR, 0);
2972 	WREG32(IH_RB_WPTR, 0);
2973 
2974 	/* Default settings for IH_CNTL (disabled at first) */
2975 	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
2976 	/* RPTR_REARM only works if msi's are enabled */
2977 	if (rdev->msi_enabled)
2978 		ih_cntl |= RPTR_REARM;
2979 
2980 #ifdef __BIG_ENDIAN
2981 	ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
2982 #endif
2983 	WREG32(IH_CNTL, ih_cntl);
2984 
2985 	/* force the active interrupt state to all disabled */
2986 	if (rdev->family >= CHIP_CEDAR)
2987 		evergreen_disable_interrupt_state(rdev);
2988 	else
2989 		r600_disable_interrupt_state(rdev);
2990 
2991 	/* enable irqs */
2992 	r600_enable_interrupts(rdev);
2993 
2994 	return ret;
2995 }
2996 
2997 void r600_irq_suspend(struct radeon_device *rdev)
2998 {
2999 	r600_irq_disable(rdev);
3000 	r600_rlc_stop(rdev);
3001 }
3002 
3003 void r600_irq_fini(struct radeon_device *rdev)
3004 {
3005 	r600_irq_suspend(rdev);
3006 	r600_ih_ring_fini(rdev);
3007 }
3008 
3009 int r600_irq_set(struct radeon_device *rdev)
3010 {
3011 	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
3012 	u32 mode_int = 0;
3013 	u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
3014 	u32 grbm_int_cntl = 0;
3015 	u32 hdmi1, hdmi2;
3016 
3017 	if (!rdev->irq.installed) {
3018 		WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
3019 		return -EINVAL;
3020 	}
3021 	/* don't enable anything if the ih is disabled */
3022 	if (!rdev->ih.enabled) {
3023 		r600_disable_interrupts(rdev);
3024 		/* force the active interrupt state to all disabled */
3025 		r600_disable_interrupt_state(rdev);
3026 		return 0;
3027 	}
3028 
3029 	hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
3030 	if (ASIC_IS_DCE3(rdev)) {
3031 		hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
3032 		hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3033 		hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3034 		hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3035 		hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3036 		if (ASIC_IS_DCE32(rdev)) {
3037 			hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3038 			hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
3039 		}
3040 	} else {
3041 		hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
3042 		hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3043 		hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3044 		hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3045 	}
3046 
3047 	if (rdev->irq.sw_int) {
3048 		DRM_DEBUG("r600_irq_set: sw int\n");
3049 		cp_int_cntl |= RB_INT_ENABLE;
3050 	}
3051 	if (rdev->irq.crtc_vblank_int[0]) {
3052 		DRM_DEBUG("r600_irq_set: vblank 0\n");
3053 		mode_int |= D1MODE_VBLANK_INT_MASK;
3054 	}
3055 	if (rdev->irq.crtc_vblank_int[1]) {
3056 		DRM_DEBUG("r600_irq_set: vblank 1\n");
3057 		mode_int |= D2MODE_VBLANK_INT_MASK;
3058 	}
3059 	if (rdev->irq.hpd[0]) {
3060 		DRM_DEBUG("r600_irq_set: hpd 1\n");
3061 		hpd1 |= DC_HPDx_INT_EN;
3062 	}
3063 	if (rdev->irq.hpd[1]) {
3064 		DRM_DEBUG("r600_irq_set: hpd 2\n");
3065 		hpd2 |= DC_HPDx_INT_EN;
3066 	}
3067 	if (rdev->irq.hpd[2]) {
3068 		DRM_DEBUG("r600_irq_set: hpd 3\n");
3069 		hpd3 |= DC_HPDx_INT_EN;
3070 	}
3071 	if (rdev->irq.hpd[3]) {
3072 		DRM_DEBUG("r600_irq_set: hpd 4\n");
3073 		hpd4 |= DC_HPDx_INT_EN;
3074 	}
3075 	if (rdev->irq.hpd[4]) {
3076 		DRM_DEBUG("r600_irq_set: hpd 5\n");
3077 		hpd5 |= DC_HPDx_INT_EN;
3078 	}
3079 	if (rdev->irq.hpd[5]) {
3080 		DRM_DEBUG("r600_irq_set: hpd 6\n");
3081 		hpd6 |= DC_HPDx_INT_EN;
3082 	}
3083 	if (rdev->irq.hdmi[0]) {
3084 		DRM_DEBUG("r600_irq_set: hdmi 1\n");
3085 		hdmi1 |= R600_HDMI_INT_EN;
3086 	}
3087 	if (rdev->irq.hdmi[1]) {
3088 		DRM_DEBUG("r600_irq_set: hdmi 2\n");
3089 		hdmi2 |= R600_HDMI_INT_EN;
3090 	}
3091 	if (rdev->irq.gui_idle) {
3092 		DRM_DEBUG("gui idle\n");
3093 		grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
3094 	}
3095 
3096 	WREG32(CP_INT_CNTL, cp_int_cntl);
3097 	WREG32(DxMODE_INT_MASK, mode_int);
3098 	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3099 	WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
3100 	if (ASIC_IS_DCE3(rdev)) {
3101 		WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
3102 		WREG32(DC_HPD1_INT_CONTROL, hpd1);
3103 		WREG32(DC_HPD2_INT_CONTROL, hpd2);
3104 		WREG32(DC_HPD3_INT_CONTROL, hpd3);
3105 		WREG32(DC_HPD4_INT_CONTROL, hpd4);
3106 		if (ASIC_IS_DCE32(rdev)) {
3107 			WREG32(DC_HPD5_INT_CONTROL, hpd5);
3108 			WREG32(DC_HPD6_INT_CONTROL, hpd6);
3109 		}
3110 	} else {
3111 		WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
3112 		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
3113 		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
3114 		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
3115 	}
3116 
3117 	return 0;
3118 }
3119 
3120 static inline void r600_irq_ack(struct radeon_device *rdev,
3121 				u32 *disp_int,
3122 				u32 *disp_int_cont,
3123 				u32 *disp_int_cont2)
3124 {
3125 	u32 tmp;
3126 
3127 	if (ASIC_IS_DCE3(rdev)) {
3128 		*disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3129 		*disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3130 		*disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
3131 	} else {
3132 		*disp_int = RREG32(DISP_INTERRUPT_STATUS);
3133 		*disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3134 		*disp_int_cont2 = 0;
3135 	}
3136 
3137 	if (*disp_int & LB_D1_VBLANK_INTERRUPT)
3138 		WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3139 	if (*disp_int & LB_D1_VLINE_INTERRUPT)
3140 		WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3141 	if (*disp_int & LB_D2_VBLANK_INTERRUPT)
3142 		WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3143 	if (*disp_int & LB_D2_VLINE_INTERRUPT)
3144 		WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3145 	if (*disp_int & DC_HPD1_INTERRUPT) {
3146 		if (ASIC_IS_DCE3(rdev)) {
3147 			tmp = RREG32(DC_HPD1_INT_CONTROL);
3148 			tmp |= DC_HPDx_INT_ACK;
3149 			WREG32(DC_HPD1_INT_CONTROL, tmp);
3150 		} else {
3151 			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
3152 			tmp |= DC_HPDx_INT_ACK;
3153 			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3154 		}
3155 	}
3156 	if (*disp_int & DC_HPD2_INTERRUPT) {
3157 		if (ASIC_IS_DCE3(rdev)) {
3158 			tmp = RREG32(DC_HPD2_INT_CONTROL);
3159 			tmp |= DC_HPDx_INT_ACK;
3160 			WREG32(DC_HPD2_INT_CONTROL, tmp);
3161 		} else {
3162 			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
3163 			tmp |= DC_HPDx_INT_ACK;
3164 			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3165 		}
3166 	}
3167 	if (*disp_int_cont & DC_HPD3_INTERRUPT) {
3168 		if (ASIC_IS_DCE3(rdev)) {
3169 			tmp = RREG32(DC_HPD3_INT_CONTROL);
3170 			tmp |= DC_HPDx_INT_ACK;
3171 			WREG32(DC_HPD3_INT_CONTROL, tmp);
3172 		} else {
3173 			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
3174 			tmp |= DC_HPDx_INT_ACK;
3175 			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3176 		}
3177 	}
3178 	if (*disp_int_cont & DC_HPD4_INTERRUPT) {
3179 		tmp = RREG32(DC_HPD4_INT_CONTROL);
3180 		tmp |= DC_HPDx_INT_ACK;
3181 		WREG32(DC_HPD4_INT_CONTROL, tmp);
3182 	}
3183 	if (ASIC_IS_DCE32(rdev)) {
3184 		if (*disp_int_cont2 & DC_HPD5_INTERRUPT) {
3185 			tmp = RREG32(DC_HPD5_INT_CONTROL);
3186 			tmp |= DC_HPDx_INT_ACK;
3187 			WREG32(DC_HPD5_INT_CONTROL, tmp);
3188 		}
3189 		if (*disp_int_cont2 & DC_HPD6_INTERRUPT) {
3190 			tmp = RREG32(DC_HPD5_INT_CONTROL);
3191 			tmp |= DC_HPDx_INT_ACK;
3192 			WREG32(DC_HPD6_INT_CONTROL, tmp);
3193 		}
3194 	}
3195 	if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3196 		WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3197 	}
3198 	if (ASIC_IS_DCE3(rdev)) {
3199 		if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3200 			WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3201 		}
3202 	} else {
3203 		if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3204 			WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3205 		}
3206 	}
3207 }
3208 
3209 void r600_irq_disable(struct radeon_device *rdev)
3210 {
3211 	u32 disp_int, disp_int_cont, disp_int_cont2;
3212 
3213 	r600_disable_interrupts(rdev);
3214 	/* Wait and acknowledge irq */
3215 	mdelay(1);
3216 	r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
3217 	r600_disable_interrupt_state(rdev);
3218 }
3219 
3220 static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
3221 {
3222 	u32 wptr, tmp;
3223 
3224 	/* XXX use writeback */
3225 	wptr = RREG32(IH_RB_WPTR);
3226 
3227 	if (wptr & RB_OVERFLOW) {
3228 		/* When a ring buffer overflow happen start parsing interrupt
3229 		 * from the last not overwritten vector (wptr + 16). Hopefully
3230 		 * this should allow us to catchup.
3231 		 */
3232 		dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3233 			wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3234 		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3235 		tmp = RREG32(IH_RB_CNTL);
3236 		tmp |= IH_WPTR_OVERFLOW_CLEAR;
3237 		WREG32(IH_RB_CNTL, tmp);
3238 	}
3239 	return (wptr & rdev->ih.ptr_mask);
3240 }
3241 
3242 /*        r600 IV Ring
3243  * Each IV ring entry is 128 bits:
3244  * [7:0]    - interrupt source id
3245  * [31:8]   - reserved
3246  * [59:32]  - interrupt source data
3247  * [127:60]  - reserved
3248  *
3249  * The basic interrupt vector entries
3250  * are decoded as follows:
3251  * src_id  src_data  description
3252  *      1         0  D1 Vblank
3253  *      1         1  D1 Vline
3254  *      5         0  D2 Vblank
3255  *      5         1  D2 Vline
3256  *     19         0  FP Hot plug detection A
3257  *     19         1  FP Hot plug detection B
3258  *     19         2  DAC A auto-detection
3259  *     19         3  DAC B auto-detection
3260  *     21         4  HDMI block A
3261  *     21         5  HDMI block B
3262  *    176         -  CP_INT RB
3263  *    177         -  CP_INT IB1
3264  *    178         -  CP_INT IB2
3265  *    181         -  EOP Interrupt
3266  *    233         -  GUI Idle
3267  *
3268  * Note, these are based on r600 and may need to be
3269  * adjusted or added to on newer asics
3270  */
3271 
3272 int r600_irq_process(struct radeon_device *rdev)
3273 {
3274 	u32 wptr = r600_get_ih_wptr(rdev);
3275 	u32 rptr = rdev->ih.rptr;
3276 	u32 src_id, src_data;
3277 	u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
3278 	unsigned long flags;
3279 	bool queue_hotplug = false;
3280 
3281 	DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3282 	if (!rdev->ih.enabled)
3283 		return IRQ_NONE;
3284 
3285 	spin_lock_irqsave(&rdev->ih.lock, flags);
3286 
3287 	if (rptr == wptr) {
3288 		spin_unlock_irqrestore(&rdev->ih.lock, flags);
3289 		return IRQ_NONE;
3290 	}
3291 	if (rdev->shutdown) {
3292 		spin_unlock_irqrestore(&rdev->ih.lock, flags);
3293 		return IRQ_NONE;
3294 	}
3295 
3296 restart_ih:
3297 	/* display interrupts */
3298 	r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
3299 
3300 	rdev->ih.wptr = wptr;
3301 	while (rptr != wptr) {
3302 		/* wptr/rptr are in bytes! */
3303 		ring_index = rptr / 4;
3304 		src_id =  rdev->ih.ring[ring_index] & 0xff;
3305 		src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
3306 
3307 		switch (src_id) {
3308 		case 1: /* D1 vblank/vline */
3309 			switch (src_data) {
3310 			case 0: /* D1 vblank */
3311 				if (disp_int & LB_D1_VBLANK_INTERRUPT) {
3312 					drm_handle_vblank(rdev->ddev, 0);
3313 					rdev->pm.vblank_sync = true;
3314 					wake_up(&rdev->irq.vblank_queue);
3315 					disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3316 					DRM_DEBUG("IH: D1 vblank\n");
3317 				}
3318 				break;
3319 			case 1: /* D1 vline */
3320 				if (disp_int & LB_D1_VLINE_INTERRUPT) {
3321 					disp_int &= ~LB_D1_VLINE_INTERRUPT;
3322 					DRM_DEBUG("IH: D1 vline\n");
3323 				}
3324 				break;
3325 			default:
3326 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3327 				break;
3328 			}
3329 			break;
3330 		case 5: /* D2 vblank/vline */
3331 			switch (src_data) {
3332 			case 0: /* D2 vblank */
3333 				if (disp_int & LB_D2_VBLANK_INTERRUPT) {
3334 					drm_handle_vblank(rdev->ddev, 1);
3335 					rdev->pm.vblank_sync = true;
3336 					wake_up(&rdev->irq.vblank_queue);
3337 					disp_int &= ~LB_D2_VBLANK_INTERRUPT;
3338 					DRM_DEBUG("IH: D2 vblank\n");
3339 				}
3340 				break;
3341 			case 1: /* D1 vline */
3342 				if (disp_int & LB_D2_VLINE_INTERRUPT) {
3343 					disp_int &= ~LB_D2_VLINE_INTERRUPT;
3344 					DRM_DEBUG("IH: D2 vline\n");
3345 				}
3346 				break;
3347 			default:
3348 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3349 				break;
3350 			}
3351 			break;
3352 		case 19: /* HPD/DAC hotplug */
3353 			switch (src_data) {
3354 			case 0:
3355 				if (disp_int & DC_HPD1_INTERRUPT) {
3356 					disp_int &= ~DC_HPD1_INTERRUPT;
3357 					queue_hotplug = true;
3358 					DRM_DEBUG("IH: HPD1\n");
3359 				}
3360 				break;
3361 			case 1:
3362 				if (disp_int & DC_HPD2_INTERRUPT) {
3363 					disp_int &= ~DC_HPD2_INTERRUPT;
3364 					queue_hotplug = true;
3365 					DRM_DEBUG("IH: HPD2\n");
3366 				}
3367 				break;
3368 			case 4:
3369 				if (disp_int_cont & DC_HPD3_INTERRUPT) {
3370 					disp_int_cont &= ~DC_HPD3_INTERRUPT;
3371 					queue_hotplug = true;
3372 					DRM_DEBUG("IH: HPD3\n");
3373 				}
3374 				break;
3375 			case 5:
3376 				if (disp_int_cont & DC_HPD4_INTERRUPT) {
3377 					disp_int_cont &= ~DC_HPD4_INTERRUPT;
3378 					queue_hotplug = true;
3379 					DRM_DEBUG("IH: HPD4\n");
3380 				}
3381 				break;
3382 			case 10:
3383 				if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
3384 					disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
3385 					queue_hotplug = true;
3386 					DRM_DEBUG("IH: HPD5\n");
3387 				}
3388 				break;
3389 			case 12:
3390 				if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
3391 					disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
3392 					queue_hotplug = true;
3393 					DRM_DEBUG("IH: HPD6\n");
3394 				}
3395 				break;
3396 			default:
3397 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3398 				break;
3399 			}
3400 			break;
3401 		case 21: /* HDMI */
3402 			DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
3403 			r600_audio_schedule_polling(rdev);
3404 			break;
3405 		case 176: /* CP_INT in ring buffer */
3406 		case 177: /* CP_INT in IB1 */
3407 		case 178: /* CP_INT in IB2 */
3408 			DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
3409 			radeon_fence_process(rdev);
3410 			break;
3411 		case 181: /* CP EOP event */
3412 			DRM_DEBUG("IH: CP EOP\n");
3413 			break;
3414 		case 233: /* GUI IDLE */
3415 			DRM_DEBUG("IH: CP EOP\n");
3416 			rdev->pm.gui_idle = true;
3417 			wake_up(&rdev->irq.idle_queue);
3418 			break;
3419 		default:
3420 			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3421 			break;
3422 		}
3423 
3424 		/* wptr/rptr are in bytes! */
3425 		rptr += 16;
3426 		rptr &= rdev->ih.ptr_mask;
3427 	}
3428 	/* make sure wptr hasn't changed while processing */
3429 	wptr = r600_get_ih_wptr(rdev);
3430 	if (wptr != rdev->ih.wptr)
3431 		goto restart_ih;
3432 	if (queue_hotplug)
3433 		queue_work(rdev->wq, &rdev->hotplug_work);
3434 	rdev->ih.rptr = rptr;
3435 	WREG32(IH_RB_RPTR, rdev->ih.rptr);
3436 	spin_unlock_irqrestore(&rdev->ih.lock, flags);
3437 	return IRQ_HANDLED;
3438 }
3439 
3440 /*
3441  * Debugfs info
3442  */
3443 #if defined(CONFIG_DEBUG_FS)
3444 
3445 static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
3446 {
3447 	struct drm_info_node *node = (struct drm_info_node *) m->private;
3448 	struct drm_device *dev = node->minor->dev;
3449 	struct radeon_device *rdev = dev->dev_private;
3450 	unsigned count, i, j;
3451 
3452 	radeon_ring_free_size(rdev);
3453 	count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
3454 	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
3455 	seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
3456 	seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
3457 	seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
3458 	seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
3459 	seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
3460 	seq_printf(m, "%u dwords in ring\n", count);
3461 	i = rdev->cp.rptr;
3462 	for (j = 0; j <= count; j++) {
3463 		seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
3464 		i = (i + 1) & rdev->cp.ptr_mask;
3465 	}
3466 	return 0;
3467 }
3468 
3469 static int r600_debugfs_mc_info(struct seq_file *m, void *data)
3470 {
3471 	struct drm_info_node *node = (struct drm_info_node *) m->private;
3472 	struct drm_device *dev = node->minor->dev;
3473 	struct radeon_device *rdev = dev->dev_private;
3474 
3475 	DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
3476 	DREG32_SYS(m, rdev, VM_L2_STATUS);
3477 	return 0;
3478 }
3479 
3480 static struct drm_info_list r600_mc_info_list[] = {
3481 	{"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
3482 	{"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
3483 };
3484 #endif
3485 
3486 int r600_debugfs_mc_info_init(struct radeon_device *rdev)
3487 {
3488 #if defined(CONFIG_DEBUG_FS)
3489 	return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
3490 #else
3491 	return 0;
3492 #endif
3493 }
3494 
3495 /**
3496  * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
3497  * rdev: radeon device structure
3498  * bo: buffer object struct which userspace is waiting for idle
3499  *
3500  * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
3501  * through ring buffer, this leads to corruption in rendering, see
3502  * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
3503  * directly perform HDP flush by writing register through MMIO.
3504  */
3505 void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
3506 {
3507 	WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
3508 }
3509