xref: /linux/drivers/gpu/drm/radeon/si.c (revision a3a4a816b4b194c45d0217e8b9e08b2639802cda)
1 /*
2  * Copyright 2011 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 #include <linux/firmware.h>
25 #include <linux/slab.h>
26 #include <linux/module.h>
27 #include <drm/drmP.h>
28 #include "radeon.h"
29 #include "radeon_asic.h"
30 #include "radeon_audio.h"
31 #include <drm/radeon_drm.h>
32 #include "sid.h"
33 #include "atom.h"
34 #include "si_blit_shaders.h"
35 #include "clearstate_si.h"
36 #include "radeon_ucode.h"
37 
38 
39 MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
40 MODULE_FIRMWARE("radeon/TAHITI_me.bin");
41 MODULE_FIRMWARE("radeon/TAHITI_ce.bin");
42 MODULE_FIRMWARE("radeon/TAHITI_mc.bin");
43 MODULE_FIRMWARE("radeon/TAHITI_mc2.bin");
44 MODULE_FIRMWARE("radeon/TAHITI_rlc.bin");
45 MODULE_FIRMWARE("radeon/TAHITI_smc.bin");
46 
47 MODULE_FIRMWARE("radeon/tahiti_pfp.bin");
48 MODULE_FIRMWARE("radeon/tahiti_me.bin");
49 MODULE_FIRMWARE("radeon/tahiti_ce.bin");
50 MODULE_FIRMWARE("radeon/tahiti_mc.bin");
51 MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
52 MODULE_FIRMWARE("radeon/tahiti_smc.bin");
53 
54 MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
55 MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
56 MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin");
57 MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin");
58 MODULE_FIRMWARE("radeon/PITCAIRN_mc2.bin");
59 MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin");
60 MODULE_FIRMWARE("radeon/PITCAIRN_smc.bin");
61 
62 MODULE_FIRMWARE("radeon/pitcairn_pfp.bin");
63 MODULE_FIRMWARE("radeon/pitcairn_me.bin");
64 MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
65 MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
66 MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
67 MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
68 MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
69 
70 MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
71 MODULE_FIRMWARE("radeon/VERDE_me.bin");
72 MODULE_FIRMWARE("radeon/VERDE_ce.bin");
73 MODULE_FIRMWARE("radeon/VERDE_mc.bin");
74 MODULE_FIRMWARE("radeon/VERDE_mc2.bin");
75 MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
76 MODULE_FIRMWARE("radeon/VERDE_smc.bin");
77 
78 MODULE_FIRMWARE("radeon/verde_pfp.bin");
79 MODULE_FIRMWARE("radeon/verde_me.bin");
80 MODULE_FIRMWARE("radeon/verde_ce.bin");
81 MODULE_FIRMWARE("radeon/verde_mc.bin");
82 MODULE_FIRMWARE("radeon/verde_rlc.bin");
83 MODULE_FIRMWARE("radeon/verde_smc.bin");
84 MODULE_FIRMWARE("radeon/verde_k_smc.bin");
85 
86 MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
87 MODULE_FIRMWARE("radeon/OLAND_me.bin");
88 MODULE_FIRMWARE("radeon/OLAND_ce.bin");
89 MODULE_FIRMWARE("radeon/OLAND_mc.bin");
90 MODULE_FIRMWARE("radeon/OLAND_mc2.bin");
91 MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
92 MODULE_FIRMWARE("radeon/OLAND_smc.bin");
93 
94 MODULE_FIRMWARE("radeon/oland_pfp.bin");
95 MODULE_FIRMWARE("radeon/oland_me.bin");
96 MODULE_FIRMWARE("radeon/oland_ce.bin");
97 MODULE_FIRMWARE("radeon/oland_mc.bin");
98 MODULE_FIRMWARE("radeon/oland_rlc.bin");
99 MODULE_FIRMWARE("radeon/oland_smc.bin");
100 MODULE_FIRMWARE("radeon/oland_k_smc.bin");
101 
102 MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
103 MODULE_FIRMWARE("radeon/HAINAN_me.bin");
104 MODULE_FIRMWARE("radeon/HAINAN_ce.bin");
105 MODULE_FIRMWARE("radeon/HAINAN_mc.bin");
106 MODULE_FIRMWARE("radeon/HAINAN_mc2.bin");
107 MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
108 MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
109 
110 MODULE_FIRMWARE("radeon/hainan_pfp.bin");
111 MODULE_FIRMWARE("radeon/hainan_me.bin");
112 MODULE_FIRMWARE("radeon/hainan_ce.bin");
113 MODULE_FIRMWARE("radeon/hainan_mc.bin");
114 MODULE_FIRMWARE("radeon/hainan_rlc.bin");
115 MODULE_FIRMWARE("radeon/hainan_smc.bin");
116 MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
117 MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
118 
119 MODULE_FIRMWARE("radeon/si58_mc.bin");
120 
121 static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
122 static void si_pcie_gen3_enable(struct radeon_device *rdev);
123 static void si_program_aspm(struct radeon_device *rdev);
124 extern void sumo_rlc_fini(struct radeon_device *rdev);
125 extern int sumo_rlc_init(struct radeon_device *rdev);
126 extern int r600_ih_ring_alloc(struct radeon_device *rdev);
127 extern void r600_ih_ring_fini(struct radeon_device *rdev);
128 extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
129 extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
130 extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
131 extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
132 extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
133 extern bool evergreen_is_display_hung(struct radeon_device *rdev);
134 static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
135 					 bool enable);
136 static void si_init_pg(struct radeon_device *rdev);
137 static void si_init_cg(struct radeon_device *rdev);
138 static void si_fini_pg(struct radeon_device *rdev);
139 static void si_fini_cg(struct radeon_device *rdev);
140 static void si_rlc_stop(struct radeon_device *rdev);
141 
142 static const u32 verde_rlc_save_restore_register_list[] =
143 {
144 	(0x8000 << 16) | (0x98f4 >> 2),
145 	0x00000000,
146 	(0x8040 << 16) | (0x98f4 >> 2),
147 	0x00000000,
148 	(0x8000 << 16) | (0xe80 >> 2),
149 	0x00000000,
150 	(0x8040 << 16) | (0xe80 >> 2),
151 	0x00000000,
152 	(0x8000 << 16) | (0x89bc >> 2),
153 	0x00000000,
154 	(0x8040 << 16) | (0x89bc >> 2),
155 	0x00000000,
156 	(0x8000 << 16) | (0x8c1c >> 2),
157 	0x00000000,
158 	(0x8040 << 16) | (0x8c1c >> 2),
159 	0x00000000,
160 	(0x9c00 << 16) | (0x98f0 >> 2),
161 	0x00000000,
162 	(0x9c00 << 16) | (0xe7c >> 2),
163 	0x00000000,
164 	(0x8000 << 16) | (0x9148 >> 2),
165 	0x00000000,
166 	(0x8040 << 16) | (0x9148 >> 2),
167 	0x00000000,
168 	(0x9c00 << 16) | (0x9150 >> 2),
169 	0x00000000,
170 	(0x9c00 << 16) | (0x897c >> 2),
171 	0x00000000,
172 	(0x9c00 << 16) | (0x8d8c >> 2),
173 	0x00000000,
174 	(0x9c00 << 16) | (0xac54 >> 2),
175 	0X00000000,
176 	0x3,
177 	(0x9c00 << 16) | (0x98f8 >> 2),
178 	0x00000000,
179 	(0x9c00 << 16) | (0x9910 >> 2),
180 	0x00000000,
181 	(0x9c00 << 16) | (0x9914 >> 2),
182 	0x00000000,
183 	(0x9c00 << 16) | (0x9918 >> 2),
184 	0x00000000,
185 	(0x9c00 << 16) | (0x991c >> 2),
186 	0x00000000,
187 	(0x9c00 << 16) | (0x9920 >> 2),
188 	0x00000000,
189 	(0x9c00 << 16) | (0x9924 >> 2),
190 	0x00000000,
191 	(0x9c00 << 16) | (0x9928 >> 2),
192 	0x00000000,
193 	(0x9c00 << 16) | (0x992c >> 2),
194 	0x00000000,
195 	(0x9c00 << 16) | (0x9930 >> 2),
196 	0x00000000,
197 	(0x9c00 << 16) | (0x9934 >> 2),
198 	0x00000000,
199 	(0x9c00 << 16) | (0x9938 >> 2),
200 	0x00000000,
201 	(0x9c00 << 16) | (0x993c >> 2),
202 	0x00000000,
203 	(0x9c00 << 16) | (0x9940 >> 2),
204 	0x00000000,
205 	(0x9c00 << 16) | (0x9944 >> 2),
206 	0x00000000,
207 	(0x9c00 << 16) | (0x9948 >> 2),
208 	0x00000000,
209 	(0x9c00 << 16) | (0x994c >> 2),
210 	0x00000000,
211 	(0x9c00 << 16) | (0x9950 >> 2),
212 	0x00000000,
213 	(0x9c00 << 16) | (0x9954 >> 2),
214 	0x00000000,
215 	(0x9c00 << 16) | (0x9958 >> 2),
216 	0x00000000,
217 	(0x9c00 << 16) | (0x995c >> 2),
218 	0x00000000,
219 	(0x9c00 << 16) | (0x9960 >> 2),
220 	0x00000000,
221 	(0x9c00 << 16) | (0x9964 >> 2),
222 	0x00000000,
223 	(0x9c00 << 16) | (0x9968 >> 2),
224 	0x00000000,
225 	(0x9c00 << 16) | (0x996c >> 2),
226 	0x00000000,
227 	(0x9c00 << 16) | (0x9970 >> 2),
228 	0x00000000,
229 	(0x9c00 << 16) | (0x9974 >> 2),
230 	0x00000000,
231 	(0x9c00 << 16) | (0x9978 >> 2),
232 	0x00000000,
233 	(0x9c00 << 16) | (0x997c >> 2),
234 	0x00000000,
235 	(0x9c00 << 16) | (0x9980 >> 2),
236 	0x00000000,
237 	(0x9c00 << 16) | (0x9984 >> 2),
238 	0x00000000,
239 	(0x9c00 << 16) | (0x9988 >> 2),
240 	0x00000000,
241 	(0x9c00 << 16) | (0x998c >> 2),
242 	0x00000000,
243 	(0x9c00 << 16) | (0x8c00 >> 2),
244 	0x00000000,
245 	(0x9c00 << 16) | (0x8c14 >> 2),
246 	0x00000000,
247 	(0x9c00 << 16) | (0x8c04 >> 2),
248 	0x00000000,
249 	(0x9c00 << 16) | (0x8c08 >> 2),
250 	0x00000000,
251 	(0x8000 << 16) | (0x9b7c >> 2),
252 	0x00000000,
253 	(0x8040 << 16) | (0x9b7c >> 2),
254 	0x00000000,
255 	(0x8000 << 16) | (0xe84 >> 2),
256 	0x00000000,
257 	(0x8040 << 16) | (0xe84 >> 2),
258 	0x00000000,
259 	(0x8000 << 16) | (0x89c0 >> 2),
260 	0x00000000,
261 	(0x8040 << 16) | (0x89c0 >> 2),
262 	0x00000000,
263 	(0x8000 << 16) | (0x914c >> 2),
264 	0x00000000,
265 	(0x8040 << 16) | (0x914c >> 2),
266 	0x00000000,
267 	(0x8000 << 16) | (0x8c20 >> 2),
268 	0x00000000,
269 	(0x8040 << 16) | (0x8c20 >> 2),
270 	0x00000000,
271 	(0x8000 << 16) | (0x9354 >> 2),
272 	0x00000000,
273 	(0x8040 << 16) | (0x9354 >> 2),
274 	0x00000000,
275 	(0x9c00 << 16) | (0x9060 >> 2),
276 	0x00000000,
277 	(0x9c00 << 16) | (0x9364 >> 2),
278 	0x00000000,
279 	(0x9c00 << 16) | (0x9100 >> 2),
280 	0x00000000,
281 	(0x9c00 << 16) | (0x913c >> 2),
282 	0x00000000,
283 	(0x8000 << 16) | (0x90e0 >> 2),
284 	0x00000000,
285 	(0x8000 << 16) | (0x90e4 >> 2),
286 	0x00000000,
287 	(0x8000 << 16) | (0x90e8 >> 2),
288 	0x00000000,
289 	(0x8040 << 16) | (0x90e0 >> 2),
290 	0x00000000,
291 	(0x8040 << 16) | (0x90e4 >> 2),
292 	0x00000000,
293 	(0x8040 << 16) | (0x90e8 >> 2),
294 	0x00000000,
295 	(0x9c00 << 16) | (0x8bcc >> 2),
296 	0x00000000,
297 	(0x9c00 << 16) | (0x8b24 >> 2),
298 	0x00000000,
299 	(0x9c00 << 16) | (0x88c4 >> 2),
300 	0x00000000,
301 	(0x9c00 << 16) | (0x8e50 >> 2),
302 	0x00000000,
303 	(0x9c00 << 16) | (0x8c0c >> 2),
304 	0x00000000,
305 	(0x9c00 << 16) | (0x8e58 >> 2),
306 	0x00000000,
307 	(0x9c00 << 16) | (0x8e5c >> 2),
308 	0x00000000,
309 	(0x9c00 << 16) | (0x9508 >> 2),
310 	0x00000000,
311 	(0x9c00 << 16) | (0x950c >> 2),
312 	0x00000000,
313 	(0x9c00 << 16) | (0x9494 >> 2),
314 	0x00000000,
315 	(0x9c00 << 16) | (0xac0c >> 2),
316 	0x00000000,
317 	(0x9c00 << 16) | (0xac10 >> 2),
318 	0x00000000,
319 	(0x9c00 << 16) | (0xac14 >> 2),
320 	0x00000000,
321 	(0x9c00 << 16) | (0xae00 >> 2),
322 	0x00000000,
323 	(0x9c00 << 16) | (0xac08 >> 2),
324 	0x00000000,
325 	(0x9c00 << 16) | (0x88d4 >> 2),
326 	0x00000000,
327 	(0x9c00 << 16) | (0x88c8 >> 2),
328 	0x00000000,
329 	(0x9c00 << 16) | (0x88cc >> 2),
330 	0x00000000,
331 	(0x9c00 << 16) | (0x89b0 >> 2),
332 	0x00000000,
333 	(0x9c00 << 16) | (0x8b10 >> 2),
334 	0x00000000,
335 	(0x9c00 << 16) | (0x8a14 >> 2),
336 	0x00000000,
337 	(0x9c00 << 16) | (0x9830 >> 2),
338 	0x00000000,
339 	(0x9c00 << 16) | (0x9834 >> 2),
340 	0x00000000,
341 	(0x9c00 << 16) | (0x9838 >> 2),
342 	0x00000000,
343 	(0x9c00 << 16) | (0x9a10 >> 2),
344 	0x00000000,
345 	(0x8000 << 16) | (0x9870 >> 2),
346 	0x00000000,
347 	(0x8000 << 16) | (0x9874 >> 2),
348 	0x00000000,
349 	(0x8001 << 16) | (0x9870 >> 2),
350 	0x00000000,
351 	(0x8001 << 16) | (0x9874 >> 2),
352 	0x00000000,
353 	(0x8040 << 16) | (0x9870 >> 2),
354 	0x00000000,
355 	(0x8040 << 16) | (0x9874 >> 2),
356 	0x00000000,
357 	(0x8041 << 16) | (0x9870 >> 2),
358 	0x00000000,
359 	(0x8041 << 16) | (0x9874 >> 2),
360 	0x00000000,
361 	0x00000000
362 };
363 
364 static const u32 tahiti_golden_rlc_registers[] =
365 {
366 	0xc424, 0xffffffff, 0x00601005,
367 	0xc47c, 0xffffffff, 0x10104040,
368 	0xc488, 0xffffffff, 0x0100000a,
369 	0xc314, 0xffffffff, 0x00000800,
370 	0xc30c, 0xffffffff, 0x800000f4,
371 	0xf4a8, 0xffffffff, 0x00000000
372 };
373 
374 static const u32 tahiti_golden_registers[] =
375 {
376 	0x9a10, 0x00010000, 0x00018208,
377 	0x9830, 0xffffffff, 0x00000000,
378 	0x9834, 0xf00fffff, 0x00000400,
379 	0x9838, 0x0002021c, 0x00020200,
380 	0xc78, 0x00000080, 0x00000000,
381 	0xd030, 0x000300c0, 0x00800040,
382 	0xd830, 0x000300c0, 0x00800040,
383 	0x5bb0, 0x000000f0, 0x00000070,
384 	0x5bc0, 0x00200000, 0x50100000,
385 	0x7030, 0x31000311, 0x00000011,
386 	0x277c, 0x00000003, 0x000007ff,
387 	0x240c, 0x000007ff, 0x00000000,
388 	0x8a14, 0xf000001f, 0x00000007,
389 	0x8b24, 0xffffffff, 0x00ffffff,
390 	0x8b10, 0x0000ff0f, 0x00000000,
391 	0x28a4c, 0x07ffffff, 0x4e000000,
392 	0x28350, 0x3f3f3fff, 0x2a00126a,
393 	0x30, 0x000000ff, 0x0040,
394 	0x34, 0x00000040, 0x00004040,
395 	0x9100, 0x07ffffff, 0x03000000,
396 	0x8e88, 0x01ff1f3f, 0x00000000,
397 	0x8e84, 0x01ff1f3f, 0x00000000,
398 	0x9060, 0x0000007f, 0x00000020,
399 	0x9508, 0x00010000, 0x00010000,
400 	0xac14, 0x00000200, 0x000002fb,
401 	0xac10, 0xffffffff, 0x0000543b,
402 	0xac0c, 0xffffffff, 0xa9210876,
403 	0x88d0, 0xffffffff, 0x000fff40,
404 	0x88d4, 0x0000001f, 0x00000010,
405 	0x1410, 0x20000000, 0x20fffed8,
406 	0x15c0, 0x000c0fc0, 0x000c0400
407 };
408 
409 static const u32 tahiti_golden_registers2[] =
410 {
411 	0xc64, 0x00000001, 0x00000001
412 };
413 
414 static const u32 pitcairn_golden_rlc_registers[] =
415 {
416 	0xc424, 0xffffffff, 0x00601004,
417 	0xc47c, 0xffffffff, 0x10102020,
418 	0xc488, 0xffffffff, 0x01000020,
419 	0xc314, 0xffffffff, 0x00000800,
420 	0xc30c, 0xffffffff, 0x800000a4
421 };
422 
423 static const u32 pitcairn_golden_registers[] =
424 {
425 	0x9a10, 0x00010000, 0x00018208,
426 	0x9830, 0xffffffff, 0x00000000,
427 	0x9834, 0xf00fffff, 0x00000400,
428 	0x9838, 0x0002021c, 0x00020200,
429 	0xc78, 0x00000080, 0x00000000,
430 	0xd030, 0x000300c0, 0x00800040,
431 	0xd830, 0x000300c0, 0x00800040,
432 	0x5bb0, 0x000000f0, 0x00000070,
433 	0x5bc0, 0x00200000, 0x50100000,
434 	0x7030, 0x31000311, 0x00000011,
435 	0x2ae4, 0x00073ffe, 0x000022a2,
436 	0x240c, 0x000007ff, 0x00000000,
437 	0x8a14, 0xf000001f, 0x00000007,
438 	0x8b24, 0xffffffff, 0x00ffffff,
439 	0x8b10, 0x0000ff0f, 0x00000000,
440 	0x28a4c, 0x07ffffff, 0x4e000000,
441 	0x28350, 0x3f3f3fff, 0x2a00126a,
442 	0x30, 0x000000ff, 0x0040,
443 	0x34, 0x00000040, 0x00004040,
444 	0x9100, 0x07ffffff, 0x03000000,
445 	0x9060, 0x0000007f, 0x00000020,
446 	0x9508, 0x00010000, 0x00010000,
447 	0xac14, 0x000003ff, 0x000000f7,
448 	0xac10, 0xffffffff, 0x00000000,
449 	0xac0c, 0xffffffff, 0x32761054,
450 	0x88d4, 0x0000001f, 0x00000010,
451 	0x15c0, 0x000c0fc0, 0x000c0400
452 };
453 
454 static const u32 verde_golden_rlc_registers[] =
455 {
456 	0xc424, 0xffffffff, 0x033f1005,
457 	0xc47c, 0xffffffff, 0x10808020,
458 	0xc488, 0xffffffff, 0x00800008,
459 	0xc314, 0xffffffff, 0x00001000,
460 	0xc30c, 0xffffffff, 0x80010014
461 };
462 
463 static const u32 verde_golden_registers[] =
464 {
465 	0x9a10, 0x00010000, 0x00018208,
466 	0x9830, 0xffffffff, 0x00000000,
467 	0x9834, 0xf00fffff, 0x00000400,
468 	0x9838, 0x0002021c, 0x00020200,
469 	0xc78, 0x00000080, 0x00000000,
470 	0xd030, 0x000300c0, 0x00800040,
471 	0xd030, 0x000300c0, 0x00800040,
472 	0xd830, 0x000300c0, 0x00800040,
473 	0xd830, 0x000300c0, 0x00800040,
474 	0x5bb0, 0x000000f0, 0x00000070,
475 	0x5bc0, 0x00200000, 0x50100000,
476 	0x7030, 0x31000311, 0x00000011,
477 	0x2ae4, 0x00073ffe, 0x000022a2,
478 	0x2ae4, 0x00073ffe, 0x000022a2,
479 	0x2ae4, 0x00073ffe, 0x000022a2,
480 	0x240c, 0x000007ff, 0x00000000,
481 	0x240c, 0x000007ff, 0x00000000,
482 	0x240c, 0x000007ff, 0x00000000,
483 	0x8a14, 0xf000001f, 0x00000007,
484 	0x8a14, 0xf000001f, 0x00000007,
485 	0x8a14, 0xf000001f, 0x00000007,
486 	0x8b24, 0xffffffff, 0x00ffffff,
487 	0x8b10, 0x0000ff0f, 0x00000000,
488 	0x28a4c, 0x07ffffff, 0x4e000000,
489 	0x28350, 0x3f3f3fff, 0x0000124a,
490 	0x28350, 0x3f3f3fff, 0x0000124a,
491 	0x28350, 0x3f3f3fff, 0x0000124a,
492 	0x30, 0x000000ff, 0x0040,
493 	0x34, 0x00000040, 0x00004040,
494 	0x9100, 0x07ffffff, 0x03000000,
495 	0x9100, 0x07ffffff, 0x03000000,
496 	0x8e88, 0x01ff1f3f, 0x00000000,
497 	0x8e88, 0x01ff1f3f, 0x00000000,
498 	0x8e88, 0x01ff1f3f, 0x00000000,
499 	0x8e84, 0x01ff1f3f, 0x00000000,
500 	0x8e84, 0x01ff1f3f, 0x00000000,
501 	0x8e84, 0x01ff1f3f, 0x00000000,
502 	0x9060, 0x0000007f, 0x00000020,
503 	0x9508, 0x00010000, 0x00010000,
504 	0xac14, 0x000003ff, 0x00000003,
505 	0xac14, 0x000003ff, 0x00000003,
506 	0xac14, 0x000003ff, 0x00000003,
507 	0xac10, 0xffffffff, 0x00000000,
508 	0xac10, 0xffffffff, 0x00000000,
509 	0xac10, 0xffffffff, 0x00000000,
510 	0xac0c, 0xffffffff, 0x00001032,
511 	0xac0c, 0xffffffff, 0x00001032,
512 	0xac0c, 0xffffffff, 0x00001032,
513 	0x88d4, 0x0000001f, 0x00000010,
514 	0x88d4, 0x0000001f, 0x00000010,
515 	0x88d4, 0x0000001f, 0x00000010,
516 	0x15c0, 0x000c0fc0, 0x000c0400
517 };
518 
519 static const u32 oland_golden_rlc_registers[] =
520 {
521 	0xc424, 0xffffffff, 0x00601005,
522 	0xc47c, 0xffffffff, 0x10104040,
523 	0xc488, 0xffffffff, 0x0100000a,
524 	0xc314, 0xffffffff, 0x00000800,
525 	0xc30c, 0xffffffff, 0x800000f4
526 };
527 
528 static const u32 oland_golden_registers[] =
529 {
530 	0x9a10, 0x00010000, 0x00018208,
531 	0x9830, 0xffffffff, 0x00000000,
532 	0x9834, 0xf00fffff, 0x00000400,
533 	0x9838, 0x0002021c, 0x00020200,
534 	0xc78, 0x00000080, 0x00000000,
535 	0xd030, 0x000300c0, 0x00800040,
536 	0xd830, 0x000300c0, 0x00800040,
537 	0x5bb0, 0x000000f0, 0x00000070,
538 	0x5bc0, 0x00200000, 0x50100000,
539 	0x7030, 0x31000311, 0x00000011,
540 	0x2ae4, 0x00073ffe, 0x000022a2,
541 	0x240c, 0x000007ff, 0x00000000,
542 	0x8a14, 0xf000001f, 0x00000007,
543 	0x8b24, 0xffffffff, 0x00ffffff,
544 	0x8b10, 0x0000ff0f, 0x00000000,
545 	0x28a4c, 0x07ffffff, 0x4e000000,
546 	0x28350, 0x3f3f3fff, 0x00000082,
547 	0x30, 0x000000ff, 0x0040,
548 	0x34, 0x00000040, 0x00004040,
549 	0x9100, 0x07ffffff, 0x03000000,
550 	0x9060, 0x0000007f, 0x00000020,
551 	0x9508, 0x00010000, 0x00010000,
552 	0xac14, 0x000003ff, 0x000000f3,
553 	0xac10, 0xffffffff, 0x00000000,
554 	0xac0c, 0xffffffff, 0x00003210,
555 	0x88d4, 0x0000001f, 0x00000010,
556 	0x15c0, 0x000c0fc0, 0x000c0400
557 };
558 
559 static const u32 hainan_golden_registers[] =
560 {
561 	0x9a10, 0x00010000, 0x00018208,
562 	0x9830, 0xffffffff, 0x00000000,
563 	0x9834, 0xf00fffff, 0x00000400,
564 	0x9838, 0x0002021c, 0x00020200,
565 	0xd0c0, 0xff000fff, 0x00000100,
566 	0xd030, 0x000300c0, 0x00800040,
567 	0xd8c0, 0xff000fff, 0x00000100,
568 	0xd830, 0x000300c0, 0x00800040,
569 	0x2ae4, 0x00073ffe, 0x000022a2,
570 	0x240c, 0x000007ff, 0x00000000,
571 	0x8a14, 0xf000001f, 0x00000007,
572 	0x8b24, 0xffffffff, 0x00ffffff,
573 	0x8b10, 0x0000ff0f, 0x00000000,
574 	0x28a4c, 0x07ffffff, 0x4e000000,
575 	0x28350, 0x3f3f3fff, 0x00000000,
576 	0x30, 0x000000ff, 0x0040,
577 	0x34, 0x00000040, 0x00004040,
578 	0x9100, 0x03e00000, 0x03600000,
579 	0x9060, 0x0000007f, 0x00000020,
580 	0x9508, 0x00010000, 0x00010000,
581 	0xac14, 0x000003ff, 0x000000f1,
582 	0xac10, 0xffffffff, 0x00000000,
583 	0xac0c, 0xffffffff, 0x00003210,
584 	0x88d4, 0x0000001f, 0x00000010,
585 	0x15c0, 0x000c0fc0, 0x000c0400
586 };
587 
588 static const u32 hainan_golden_registers2[] =
589 {
590 	0x98f8, 0xffffffff, 0x02010001
591 };
592 
593 static const u32 tahiti_mgcg_cgcg_init[] =
594 {
595 	0xc400, 0xffffffff, 0xfffffffc,
596 	0x802c, 0xffffffff, 0xe0000000,
597 	0x9a60, 0xffffffff, 0x00000100,
598 	0x92a4, 0xffffffff, 0x00000100,
599 	0xc164, 0xffffffff, 0x00000100,
600 	0x9774, 0xffffffff, 0x00000100,
601 	0x8984, 0xffffffff, 0x06000100,
602 	0x8a18, 0xffffffff, 0x00000100,
603 	0x92a0, 0xffffffff, 0x00000100,
604 	0xc380, 0xffffffff, 0x00000100,
605 	0x8b28, 0xffffffff, 0x00000100,
606 	0x9144, 0xffffffff, 0x00000100,
607 	0x8d88, 0xffffffff, 0x00000100,
608 	0x8d8c, 0xffffffff, 0x00000100,
609 	0x9030, 0xffffffff, 0x00000100,
610 	0x9034, 0xffffffff, 0x00000100,
611 	0x9038, 0xffffffff, 0x00000100,
612 	0x903c, 0xffffffff, 0x00000100,
613 	0xad80, 0xffffffff, 0x00000100,
614 	0xac54, 0xffffffff, 0x00000100,
615 	0x897c, 0xffffffff, 0x06000100,
616 	0x9868, 0xffffffff, 0x00000100,
617 	0x9510, 0xffffffff, 0x00000100,
618 	0xaf04, 0xffffffff, 0x00000100,
619 	0xae04, 0xffffffff, 0x00000100,
620 	0x949c, 0xffffffff, 0x00000100,
621 	0x802c, 0xffffffff, 0xe0000000,
622 	0x9160, 0xffffffff, 0x00010000,
623 	0x9164, 0xffffffff, 0x00030002,
624 	0x9168, 0xffffffff, 0x00040007,
625 	0x916c, 0xffffffff, 0x00060005,
626 	0x9170, 0xffffffff, 0x00090008,
627 	0x9174, 0xffffffff, 0x00020001,
628 	0x9178, 0xffffffff, 0x00040003,
629 	0x917c, 0xffffffff, 0x00000007,
630 	0x9180, 0xffffffff, 0x00060005,
631 	0x9184, 0xffffffff, 0x00090008,
632 	0x9188, 0xffffffff, 0x00030002,
633 	0x918c, 0xffffffff, 0x00050004,
634 	0x9190, 0xffffffff, 0x00000008,
635 	0x9194, 0xffffffff, 0x00070006,
636 	0x9198, 0xffffffff, 0x000a0009,
637 	0x919c, 0xffffffff, 0x00040003,
638 	0x91a0, 0xffffffff, 0x00060005,
639 	0x91a4, 0xffffffff, 0x00000009,
640 	0x91a8, 0xffffffff, 0x00080007,
641 	0x91ac, 0xffffffff, 0x000b000a,
642 	0x91b0, 0xffffffff, 0x00050004,
643 	0x91b4, 0xffffffff, 0x00070006,
644 	0x91b8, 0xffffffff, 0x0008000b,
645 	0x91bc, 0xffffffff, 0x000a0009,
646 	0x91c0, 0xffffffff, 0x000d000c,
647 	0x91c4, 0xffffffff, 0x00060005,
648 	0x91c8, 0xffffffff, 0x00080007,
649 	0x91cc, 0xffffffff, 0x0000000b,
650 	0x91d0, 0xffffffff, 0x000a0009,
651 	0x91d4, 0xffffffff, 0x000d000c,
652 	0x91d8, 0xffffffff, 0x00070006,
653 	0x91dc, 0xffffffff, 0x00090008,
654 	0x91e0, 0xffffffff, 0x0000000c,
655 	0x91e4, 0xffffffff, 0x000b000a,
656 	0x91e8, 0xffffffff, 0x000e000d,
657 	0x91ec, 0xffffffff, 0x00080007,
658 	0x91f0, 0xffffffff, 0x000a0009,
659 	0x91f4, 0xffffffff, 0x0000000d,
660 	0x91f8, 0xffffffff, 0x000c000b,
661 	0x91fc, 0xffffffff, 0x000f000e,
662 	0x9200, 0xffffffff, 0x00090008,
663 	0x9204, 0xffffffff, 0x000b000a,
664 	0x9208, 0xffffffff, 0x000c000f,
665 	0x920c, 0xffffffff, 0x000e000d,
666 	0x9210, 0xffffffff, 0x00110010,
667 	0x9214, 0xffffffff, 0x000a0009,
668 	0x9218, 0xffffffff, 0x000c000b,
669 	0x921c, 0xffffffff, 0x0000000f,
670 	0x9220, 0xffffffff, 0x000e000d,
671 	0x9224, 0xffffffff, 0x00110010,
672 	0x9228, 0xffffffff, 0x000b000a,
673 	0x922c, 0xffffffff, 0x000d000c,
674 	0x9230, 0xffffffff, 0x00000010,
675 	0x9234, 0xffffffff, 0x000f000e,
676 	0x9238, 0xffffffff, 0x00120011,
677 	0x923c, 0xffffffff, 0x000c000b,
678 	0x9240, 0xffffffff, 0x000e000d,
679 	0x9244, 0xffffffff, 0x00000011,
680 	0x9248, 0xffffffff, 0x0010000f,
681 	0x924c, 0xffffffff, 0x00130012,
682 	0x9250, 0xffffffff, 0x000d000c,
683 	0x9254, 0xffffffff, 0x000f000e,
684 	0x9258, 0xffffffff, 0x00100013,
685 	0x925c, 0xffffffff, 0x00120011,
686 	0x9260, 0xffffffff, 0x00150014,
687 	0x9264, 0xffffffff, 0x000e000d,
688 	0x9268, 0xffffffff, 0x0010000f,
689 	0x926c, 0xffffffff, 0x00000013,
690 	0x9270, 0xffffffff, 0x00120011,
691 	0x9274, 0xffffffff, 0x00150014,
692 	0x9278, 0xffffffff, 0x000f000e,
693 	0x927c, 0xffffffff, 0x00110010,
694 	0x9280, 0xffffffff, 0x00000014,
695 	0x9284, 0xffffffff, 0x00130012,
696 	0x9288, 0xffffffff, 0x00160015,
697 	0x928c, 0xffffffff, 0x0010000f,
698 	0x9290, 0xffffffff, 0x00120011,
699 	0x9294, 0xffffffff, 0x00000015,
700 	0x9298, 0xffffffff, 0x00140013,
701 	0x929c, 0xffffffff, 0x00170016,
702 	0x9150, 0xffffffff, 0x96940200,
703 	0x8708, 0xffffffff, 0x00900100,
704 	0xc478, 0xffffffff, 0x00000080,
705 	0xc404, 0xffffffff, 0x0020003f,
706 	0x30, 0xffffffff, 0x0000001c,
707 	0x34, 0x000f0000, 0x000f0000,
708 	0x160c, 0xffffffff, 0x00000100,
709 	0x1024, 0xffffffff, 0x00000100,
710 	0x102c, 0x00000101, 0x00000000,
711 	0x20a8, 0xffffffff, 0x00000104,
712 	0x264c, 0x000c0000, 0x000c0000,
713 	0x2648, 0x000c0000, 0x000c0000,
714 	0x55e4, 0xff000fff, 0x00000100,
715 	0x55e8, 0x00000001, 0x00000001,
716 	0x2f50, 0x00000001, 0x00000001,
717 	0x30cc, 0xc0000fff, 0x00000104,
718 	0xc1e4, 0x00000001, 0x00000001,
719 	0xd0c0, 0xfffffff0, 0x00000100,
720 	0xd8c0, 0xfffffff0, 0x00000100
721 };
722 
723 static const u32 pitcairn_mgcg_cgcg_init[] =
724 {
725 	0xc400, 0xffffffff, 0xfffffffc,
726 	0x802c, 0xffffffff, 0xe0000000,
727 	0x9a60, 0xffffffff, 0x00000100,
728 	0x92a4, 0xffffffff, 0x00000100,
729 	0xc164, 0xffffffff, 0x00000100,
730 	0x9774, 0xffffffff, 0x00000100,
731 	0x8984, 0xffffffff, 0x06000100,
732 	0x8a18, 0xffffffff, 0x00000100,
733 	0x92a0, 0xffffffff, 0x00000100,
734 	0xc380, 0xffffffff, 0x00000100,
735 	0x8b28, 0xffffffff, 0x00000100,
736 	0x9144, 0xffffffff, 0x00000100,
737 	0x8d88, 0xffffffff, 0x00000100,
738 	0x8d8c, 0xffffffff, 0x00000100,
739 	0x9030, 0xffffffff, 0x00000100,
740 	0x9034, 0xffffffff, 0x00000100,
741 	0x9038, 0xffffffff, 0x00000100,
742 	0x903c, 0xffffffff, 0x00000100,
743 	0xad80, 0xffffffff, 0x00000100,
744 	0xac54, 0xffffffff, 0x00000100,
745 	0x897c, 0xffffffff, 0x06000100,
746 	0x9868, 0xffffffff, 0x00000100,
747 	0x9510, 0xffffffff, 0x00000100,
748 	0xaf04, 0xffffffff, 0x00000100,
749 	0xae04, 0xffffffff, 0x00000100,
750 	0x949c, 0xffffffff, 0x00000100,
751 	0x802c, 0xffffffff, 0xe0000000,
752 	0x9160, 0xffffffff, 0x00010000,
753 	0x9164, 0xffffffff, 0x00030002,
754 	0x9168, 0xffffffff, 0x00040007,
755 	0x916c, 0xffffffff, 0x00060005,
756 	0x9170, 0xffffffff, 0x00090008,
757 	0x9174, 0xffffffff, 0x00020001,
758 	0x9178, 0xffffffff, 0x00040003,
759 	0x917c, 0xffffffff, 0x00000007,
760 	0x9180, 0xffffffff, 0x00060005,
761 	0x9184, 0xffffffff, 0x00090008,
762 	0x9188, 0xffffffff, 0x00030002,
763 	0x918c, 0xffffffff, 0x00050004,
764 	0x9190, 0xffffffff, 0x00000008,
765 	0x9194, 0xffffffff, 0x00070006,
766 	0x9198, 0xffffffff, 0x000a0009,
767 	0x919c, 0xffffffff, 0x00040003,
768 	0x91a0, 0xffffffff, 0x00060005,
769 	0x91a4, 0xffffffff, 0x00000009,
770 	0x91a8, 0xffffffff, 0x00080007,
771 	0x91ac, 0xffffffff, 0x000b000a,
772 	0x91b0, 0xffffffff, 0x00050004,
773 	0x91b4, 0xffffffff, 0x00070006,
774 	0x91b8, 0xffffffff, 0x0008000b,
775 	0x91bc, 0xffffffff, 0x000a0009,
776 	0x91c0, 0xffffffff, 0x000d000c,
777 	0x9200, 0xffffffff, 0x00090008,
778 	0x9204, 0xffffffff, 0x000b000a,
779 	0x9208, 0xffffffff, 0x000c000f,
780 	0x920c, 0xffffffff, 0x000e000d,
781 	0x9210, 0xffffffff, 0x00110010,
782 	0x9214, 0xffffffff, 0x000a0009,
783 	0x9218, 0xffffffff, 0x000c000b,
784 	0x921c, 0xffffffff, 0x0000000f,
785 	0x9220, 0xffffffff, 0x000e000d,
786 	0x9224, 0xffffffff, 0x00110010,
787 	0x9228, 0xffffffff, 0x000b000a,
788 	0x922c, 0xffffffff, 0x000d000c,
789 	0x9230, 0xffffffff, 0x00000010,
790 	0x9234, 0xffffffff, 0x000f000e,
791 	0x9238, 0xffffffff, 0x00120011,
792 	0x923c, 0xffffffff, 0x000c000b,
793 	0x9240, 0xffffffff, 0x000e000d,
794 	0x9244, 0xffffffff, 0x00000011,
795 	0x9248, 0xffffffff, 0x0010000f,
796 	0x924c, 0xffffffff, 0x00130012,
797 	0x9250, 0xffffffff, 0x000d000c,
798 	0x9254, 0xffffffff, 0x000f000e,
799 	0x9258, 0xffffffff, 0x00100013,
800 	0x925c, 0xffffffff, 0x00120011,
801 	0x9260, 0xffffffff, 0x00150014,
802 	0x9150, 0xffffffff, 0x96940200,
803 	0x8708, 0xffffffff, 0x00900100,
804 	0xc478, 0xffffffff, 0x00000080,
805 	0xc404, 0xffffffff, 0x0020003f,
806 	0x30, 0xffffffff, 0x0000001c,
807 	0x34, 0x000f0000, 0x000f0000,
808 	0x160c, 0xffffffff, 0x00000100,
809 	0x1024, 0xffffffff, 0x00000100,
810 	0x102c, 0x00000101, 0x00000000,
811 	0x20a8, 0xffffffff, 0x00000104,
812 	0x55e4, 0xff000fff, 0x00000100,
813 	0x55e8, 0x00000001, 0x00000001,
814 	0x2f50, 0x00000001, 0x00000001,
815 	0x30cc, 0xc0000fff, 0x00000104,
816 	0xc1e4, 0x00000001, 0x00000001,
817 	0xd0c0, 0xfffffff0, 0x00000100,
818 	0xd8c0, 0xfffffff0, 0x00000100
819 };
820 
821 static const u32 verde_mgcg_cgcg_init[] =
822 {
823 	0xc400, 0xffffffff, 0xfffffffc,
824 	0x802c, 0xffffffff, 0xe0000000,
825 	0x9a60, 0xffffffff, 0x00000100,
826 	0x92a4, 0xffffffff, 0x00000100,
827 	0xc164, 0xffffffff, 0x00000100,
828 	0x9774, 0xffffffff, 0x00000100,
829 	0x8984, 0xffffffff, 0x06000100,
830 	0x8a18, 0xffffffff, 0x00000100,
831 	0x92a0, 0xffffffff, 0x00000100,
832 	0xc380, 0xffffffff, 0x00000100,
833 	0x8b28, 0xffffffff, 0x00000100,
834 	0x9144, 0xffffffff, 0x00000100,
835 	0x8d88, 0xffffffff, 0x00000100,
836 	0x8d8c, 0xffffffff, 0x00000100,
837 	0x9030, 0xffffffff, 0x00000100,
838 	0x9034, 0xffffffff, 0x00000100,
839 	0x9038, 0xffffffff, 0x00000100,
840 	0x903c, 0xffffffff, 0x00000100,
841 	0xad80, 0xffffffff, 0x00000100,
842 	0xac54, 0xffffffff, 0x00000100,
843 	0x897c, 0xffffffff, 0x06000100,
844 	0x9868, 0xffffffff, 0x00000100,
845 	0x9510, 0xffffffff, 0x00000100,
846 	0xaf04, 0xffffffff, 0x00000100,
847 	0xae04, 0xffffffff, 0x00000100,
848 	0x949c, 0xffffffff, 0x00000100,
849 	0x802c, 0xffffffff, 0xe0000000,
850 	0x9160, 0xffffffff, 0x00010000,
851 	0x9164, 0xffffffff, 0x00030002,
852 	0x9168, 0xffffffff, 0x00040007,
853 	0x916c, 0xffffffff, 0x00060005,
854 	0x9170, 0xffffffff, 0x00090008,
855 	0x9174, 0xffffffff, 0x00020001,
856 	0x9178, 0xffffffff, 0x00040003,
857 	0x917c, 0xffffffff, 0x00000007,
858 	0x9180, 0xffffffff, 0x00060005,
859 	0x9184, 0xffffffff, 0x00090008,
860 	0x9188, 0xffffffff, 0x00030002,
861 	0x918c, 0xffffffff, 0x00050004,
862 	0x9190, 0xffffffff, 0x00000008,
863 	0x9194, 0xffffffff, 0x00070006,
864 	0x9198, 0xffffffff, 0x000a0009,
865 	0x919c, 0xffffffff, 0x00040003,
866 	0x91a0, 0xffffffff, 0x00060005,
867 	0x91a4, 0xffffffff, 0x00000009,
868 	0x91a8, 0xffffffff, 0x00080007,
869 	0x91ac, 0xffffffff, 0x000b000a,
870 	0x91b0, 0xffffffff, 0x00050004,
871 	0x91b4, 0xffffffff, 0x00070006,
872 	0x91b8, 0xffffffff, 0x0008000b,
873 	0x91bc, 0xffffffff, 0x000a0009,
874 	0x91c0, 0xffffffff, 0x000d000c,
875 	0x9200, 0xffffffff, 0x00090008,
876 	0x9204, 0xffffffff, 0x000b000a,
877 	0x9208, 0xffffffff, 0x000c000f,
878 	0x920c, 0xffffffff, 0x000e000d,
879 	0x9210, 0xffffffff, 0x00110010,
880 	0x9214, 0xffffffff, 0x000a0009,
881 	0x9218, 0xffffffff, 0x000c000b,
882 	0x921c, 0xffffffff, 0x0000000f,
883 	0x9220, 0xffffffff, 0x000e000d,
884 	0x9224, 0xffffffff, 0x00110010,
885 	0x9228, 0xffffffff, 0x000b000a,
886 	0x922c, 0xffffffff, 0x000d000c,
887 	0x9230, 0xffffffff, 0x00000010,
888 	0x9234, 0xffffffff, 0x000f000e,
889 	0x9238, 0xffffffff, 0x00120011,
890 	0x923c, 0xffffffff, 0x000c000b,
891 	0x9240, 0xffffffff, 0x000e000d,
892 	0x9244, 0xffffffff, 0x00000011,
893 	0x9248, 0xffffffff, 0x0010000f,
894 	0x924c, 0xffffffff, 0x00130012,
895 	0x9250, 0xffffffff, 0x000d000c,
896 	0x9254, 0xffffffff, 0x000f000e,
897 	0x9258, 0xffffffff, 0x00100013,
898 	0x925c, 0xffffffff, 0x00120011,
899 	0x9260, 0xffffffff, 0x00150014,
900 	0x9150, 0xffffffff, 0x96940200,
901 	0x8708, 0xffffffff, 0x00900100,
902 	0xc478, 0xffffffff, 0x00000080,
903 	0xc404, 0xffffffff, 0x0020003f,
904 	0x30, 0xffffffff, 0x0000001c,
905 	0x34, 0x000f0000, 0x000f0000,
906 	0x160c, 0xffffffff, 0x00000100,
907 	0x1024, 0xffffffff, 0x00000100,
908 	0x102c, 0x00000101, 0x00000000,
909 	0x20a8, 0xffffffff, 0x00000104,
910 	0x264c, 0x000c0000, 0x000c0000,
911 	0x2648, 0x000c0000, 0x000c0000,
912 	0x55e4, 0xff000fff, 0x00000100,
913 	0x55e8, 0x00000001, 0x00000001,
914 	0x2f50, 0x00000001, 0x00000001,
915 	0x30cc, 0xc0000fff, 0x00000104,
916 	0xc1e4, 0x00000001, 0x00000001,
917 	0xd0c0, 0xfffffff0, 0x00000100,
918 	0xd8c0, 0xfffffff0, 0x00000100
919 };
920 
921 static const u32 oland_mgcg_cgcg_init[] =
922 {
923 	0xc400, 0xffffffff, 0xfffffffc,
924 	0x802c, 0xffffffff, 0xe0000000,
925 	0x9a60, 0xffffffff, 0x00000100,
926 	0x92a4, 0xffffffff, 0x00000100,
927 	0xc164, 0xffffffff, 0x00000100,
928 	0x9774, 0xffffffff, 0x00000100,
929 	0x8984, 0xffffffff, 0x06000100,
930 	0x8a18, 0xffffffff, 0x00000100,
931 	0x92a0, 0xffffffff, 0x00000100,
932 	0xc380, 0xffffffff, 0x00000100,
933 	0x8b28, 0xffffffff, 0x00000100,
934 	0x9144, 0xffffffff, 0x00000100,
935 	0x8d88, 0xffffffff, 0x00000100,
936 	0x8d8c, 0xffffffff, 0x00000100,
937 	0x9030, 0xffffffff, 0x00000100,
938 	0x9034, 0xffffffff, 0x00000100,
939 	0x9038, 0xffffffff, 0x00000100,
940 	0x903c, 0xffffffff, 0x00000100,
941 	0xad80, 0xffffffff, 0x00000100,
942 	0xac54, 0xffffffff, 0x00000100,
943 	0x897c, 0xffffffff, 0x06000100,
944 	0x9868, 0xffffffff, 0x00000100,
945 	0x9510, 0xffffffff, 0x00000100,
946 	0xaf04, 0xffffffff, 0x00000100,
947 	0xae04, 0xffffffff, 0x00000100,
948 	0x949c, 0xffffffff, 0x00000100,
949 	0x802c, 0xffffffff, 0xe0000000,
950 	0x9160, 0xffffffff, 0x00010000,
951 	0x9164, 0xffffffff, 0x00030002,
952 	0x9168, 0xffffffff, 0x00040007,
953 	0x916c, 0xffffffff, 0x00060005,
954 	0x9170, 0xffffffff, 0x00090008,
955 	0x9174, 0xffffffff, 0x00020001,
956 	0x9178, 0xffffffff, 0x00040003,
957 	0x917c, 0xffffffff, 0x00000007,
958 	0x9180, 0xffffffff, 0x00060005,
959 	0x9184, 0xffffffff, 0x00090008,
960 	0x9188, 0xffffffff, 0x00030002,
961 	0x918c, 0xffffffff, 0x00050004,
962 	0x9190, 0xffffffff, 0x00000008,
963 	0x9194, 0xffffffff, 0x00070006,
964 	0x9198, 0xffffffff, 0x000a0009,
965 	0x919c, 0xffffffff, 0x00040003,
966 	0x91a0, 0xffffffff, 0x00060005,
967 	0x91a4, 0xffffffff, 0x00000009,
968 	0x91a8, 0xffffffff, 0x00080007,
969 	0x91ac, 0xffffffff, 0x000b000a,
970 	0x91b0, 0xffffffff, 0x00050004,
971 	0x91b4, 0xffffffff, 0x00070006,
972 	0x91b8, 0xffffffff, 0x0008000b,
973 	0x91bc, 0xffffffff, 0x000a0009,
974 	0x91c0, 0xffffffff, 0x000d000c,
975 	0x91c4, 0xffffffff, 0x00060005,
976 	0x91c8, 0xffffffff, 0x00080007,
977 	0x91cc, 0xffffffff, 0x0000000b,
978 	0x91d0, 0xffffffff, 0x000a0009,
979 	0x91d4, 0xffffffff, 0x000d000c,
980 	0x9150, 0xffffffff, 0x96940200,
981 	0x8708, 0xffffffff, 0x00900100,
982 	0xc478, 0xffffffff, 0x00000080,
983 	0xc404, 0xffffffff, 0x0020003f,
984 	0x30, 0xffffffff, 0x0000001c,
985 	0x34, 0x000f0000, 0x000f0000,
986 	0x160c, 0xffffffff, 0x00000100,
987 	0x1024, 0xffffffff, 0x00000100,
988 	0x102c, 0x00000101, 0x00000000,
989 	0x20a8, 0xffffffff, 0x00000104,
990 	0x264c, 0x000c0000, 0x000c0000,
991 	0x2648, 0x000c0000, 0x000c0000,
992 	0x55e4, 0xff000fff, 0x00000100,
993 	0x55e8, 0x00000001, 0x00000001,
994 	0x2f50, 0x00000001, 0x00000001,
995 	0x30cc, 0xc0000fff, 0x00000104,
996 	0xc1e4, 0x00000001, 0x00000001,
997 	0xd0c0, 0xfffffff0, 0x00000100,
998 	0xd8c0, 0xfffffff0, 0x00000100
999 };
1000 
1001 static const u32 hainan_mgcg_cgcg_init[] =
1002 {
1003 	0xc400, 0xffffffff, 0xfffffffc,
1004 	0x802c, 0xffffffff, 0xe0000000,
1005 	0x9a60, 0xffffffff, 0x00000100,
1006 	0x92a4, 0xffffffff, 0x00000100,
1007 	0xc164, 0xffffffff, 0x00000100,
1008 	0x9774, 0xffffffff, 0x00000100,
1009 	0x8984, 0xffffffff, 0x06000100,
1010 	0x8a18, 0xffffffff, 0x00000100,
1011 	0x92a0, 0xffffffff, 0x00000100,
1012 	0xc380, 0xffffffff, 0x00000100,
1013 	0x8b28, 0xffffffff, 0x00000100,
1014 	0x9144, 0xffffffff, 0x00000100,
1015 	0x8d88, 0xffffffff, 0x00000100,
1016 	0x8d8c, 0xffffffff, 0x00000100,
1017 	0x9030, 0xffffffff, 0x00000100,
1018 	0x9034, 0xffffffff, 0x00000100,
1019 	0x9038, 0xffffffff, 0x00000100,
1020 	0x903c, 0xffffffff, 0x00000100,
1021 	0xad80, 0xffffffff, 0x00000100,
1022 	0xac54, 0xffffffff, 0x00000100,
1023 	0x897c, 0xffffffff, 0x06000100,
1024 	0x9868, 0xffffffff, 0x00000100,
1025 	0x9510, 0xffffffff, 0x00000100,
1026 	0xaf04, 0xffffffff, 0x00000100,
1027 	0xae04, 0xffffffff, 0x00000100,
1028 	0x949c, 0xffffffff, 0x00000100,
1029 	0x802c, 0xffffffff, 0xe0000000,
1030 	0x9160, 0xffffffff, 0x00010000,
1031 	0x9164, 0xffffffff, 0x00030002,
1032 	0x9168, 0xffffffff, 0x00040007,
1033 	0x916c, 0xffffffff, 0x00060005,
1034 	0x9170, 0xffffffff, 0x00090008,
1035 	0x9174, 0xffffffff, 0x00020001,
1036 	0x9178, 0xffffffff, 0x00040003,
1037 	0x917c, 0xffffffff, 0x00000007,
1038 	0x9180, 0xffffffff, 0x00060005,
1039 	0x9184, 0xffffffff, 0x00090008,
1040 	0x9188, 0xffffffff, 0x00030002,
1041 	0x918c, 0xffffffff, 0x00050004,
1042 	0x9190, 0xffffffff, 0x00000008,
1043 	0x9194, 0xffffffff, 0x00070006,
1044 	0x9198, 0xffffffff, 0x000a0009,
1045 	0x919c, 0xffffffff, 0x00040003,
1046 	0x91a0, 0xffffffff, 0x00060005,
1047 	0x91a4, 0xffffffff, 0x00000009,
1048 	0x91a8, 0xffffffff, 0x00080007,
1049 	0x91ac, 0xffffffff, 0x000b000a,
1050 	0x91b0, 0xffffffff, 0x00050004,
1051 	0x91b4, 0xffffffff, 0x00070006,
1052 	0x91b8, 0xffffffff, 0x0008000b,
1053 	0x91bc, 0xffffffff, 0x000a0009,
1054 	0x91c0, 0xffffffff, 0x000d000c,
1055 	0x91c4, 0xffffffff, 0x00060005,
1056 	0x91c8, 0xffffffff, 0x00080007,
1057 	0x91cc, 0xffffffff, 0x0000000b,
1058 	0x91d0, 0xffffffff, 0x000a0009,
1059 	0x91d4, 0xffffffff, 0x000d000c,
1060 	0x9150, 0xffffffff, 0x96940200,
1061 	0x8708, 0xffffffff, 0x00900100,
1062 	0xc478, 0xffffffff, 0x00000080,
1063 	0xc404, 0xffffffff, 0x0020003f,
1064 	0x30, 0xffffffff, 0x0000001c,
1065 	0x34, 0x000f0000, 0x000f0000,
1066 	0x160c, 0xffffffff, 0x00000100,
1067 	0x1024, 0xffffffff, 0x00000100,
1068 	0x20a8, 0xffffffff, 0x00000104,
1069 	0x264c, 0x000c0000, 0x000c0000,
1070 	0x2648, 0x000c0000, 0x000c0000,
1071 	0x2f50, 0x00000001, 0x00000001,
1072 	0x30cc, 0xc0000fff, 0x00000104,
1073 	0xc1e4, 0x00000001, 0x00000001,
1074 	0xd0c0, 0xfffffff0, 0x00000100,
1075 	0xd8c0, 0xfffffff0, 0x00000100
1076 };
1077 
1078 static u32 verde_pg_init[] =
1079 {
1080 	0x353c, 0xffffffff, 0x40000,
1081 	0x3538, 0xffffffff, 0x200010ff,
1082 	0x353c, 0xffffffff, 0x0,
1083 	0x353c, 0xffffffff, 0x0,
1084 	0x353c, 0xffffffff, 0x0,
1085 	0x353c, 0xffffffff, 0x0,
1086 	0x353c, 0xffffffff, 0x0,
1087 	0x353c, 0xffffffff, 0x7007,
1088 	0x3538, 0xffffffff, 0x300010ff,
1089 	0x353c, 0xffffffff, 0x0,
1090 	0x353c, 0xffffffff, 0x0,
1091 	0x353c, 0xffffffff, 0x0,
1092 	0x353c, 0xffffffff, 0x0,
1093 	0x353c, 0xffffffff, 0x0,
1094 	0x353c, 0xffffffff, 0x400000,
1095 	0x3538, 0xffffffff, 0x100010ff,
1096 	0x353c, 0xffffffff, 0x0,
1097 	0x353c, 0xffffffff, 0x0,
1098 	0x353c, 0xffffffff, 0x0,
1099 	0x353c, 0xffffffff, 0x0,
1100 	0x353c, 0xffffffff, 0x0,
1101 	0x353c, 0xffffffff, 0x120200,
1102 	0x3538, 0xffffffff, 0x500010ff,
1103 	0x353c, 0xffffffff, 0x0,
1104 	0x353c, 0xffffffff, 0x0,
1105 	0x353c, 0xffffffff, 0x0,
1106 	0x353c, 0xffffffff, 0x0,
1107 	0x353c, 0xffffffff, 0x0,
1108 	0x353c, 0xffffffff, 0x1e1e16,
1109 	0x3538, 0xffffffff, 0x600010ff,
1110 	0x353c, 0xffffffff, 0x0,
1111 	0x353c, 0xffffffff, 0x0,
1112 	0x353c, 0xffffffff, 0x0,
1113 	0x353c, 0xffffffff, 0x0,
1114 	0x353c, 0xffffffff, 0x0,
1115 	0x353c, 0xffffffff, 0x171f1e,
1116 	0x3538, 0xffffffff, 0x700010ff,
1117 	0x353c, 0xffffffff, 0x0,
1118 	0x353c, 0xffffffff, 0x0,
1119 	0x353c, 0xffffffff, 0x0,
1120 	0x353c, 0xffffffff, 0x0,
1121 	0x353c, 0xffffffff, 0x0,
1122 	0x353c, 0xffffffff, 0x0,
1123 	0x3538, 0xffffffff, 0x9ff,
1124 	0x3500, 0xffffffff, 0x0,
1125 	0x3504, 0xffffffff, 0x10000800,
1126 	0x3504, 0xffffffff, 0xf,
1127 	0x3504, 0xffffffff, 0xf,
1128 	0x3500, 0xffffffff, 0x4,
1129 	0x3504, 0xffffffff, 0x1000051e,
1130 	0x3504, 0xffffffff, 0xffff,
1131 	0x3504, 0xffffffff, 0xffff,
1132 	0x3500, 0xffffffff, 0x8,
1133 	0x3504, 0xffffffff, 0x80500,
1134 	0x3500, 0xffffffff, 0x12,
1135 	0x3504, 0xffffffff, 0x9050c,
1136 	0x3500, 0xffffffff, 0x1d,
1137 	0x3504, 0xffffffff, 0xb052c,
1138 	0x3500, 0xffffffff, 0x2a,
1139 	0x3504, 0xffffffff, 0x1053e,
1140 	0x3500, 0xffffffff, 0x2d,
1141 	0x3504, 0xffffffff, 0x10546,
1142 	0x3500, 0xffffffff, 0x30,
1143 	0x3504, 0xffffffff, 0xa054e,
1144 	0x3500, 0xffffffff, 0x3c,
1145 	0x3504, 0xffffffff, 0x1055f,
1146 	0x3500, 0xffffffff, 0x3f,
1147 	0x3504, 0xffffffff, 0x10567,
1148 	0x3500, 0xffffffff, 0x42,
1149 	0x3504, 0xffffffff, 0x1056f,
1150 	0x3500, 0xffffffff, 0x45,
1151 	0x3504, 0xffffffff, 0x10572,
1152 	0x3500, 0xffffffff, 0x48,
1153 	0x3504, 0xffffffff, 0x20575,
1154 	0x3500, 0xffffffff, 0x4c,
1155 	0x3504, 0xffffffff, 0x190801,
1156 	0x3500, 0xffffffff, 0x67,
1157 	0x3504, 0xffffffff, 0x1082a,
1158 	0x3500, 0xffffffff, 0x6a,
1159 	0x3504, 0xffffffff, 0x1b082d,
1160 	0x3500, 0xffffffff, 0x87,
1161 	0x3504, 0xffffffff, 0x310851,
1162 	0x3500, 0xffffffff, 0xba,
1163 	0x3504, 0xffffffff, 0x891,
1164 	0x3500, 0xffffffff, 0xbc,
1165 	0x3504, 0xffffffff, 0x893,
1166 	0x3500, 0xffffffff, 0xbe,
1167 	0x3504, 0xffffffff, 0x20895,
1168 	0x3500, 0xffffffff, 0xc2,
1169 	0x3504, 0xffffffff, 0x20899,
1170 	0x3500, 0xffffffff, 0xc6,
1171 	0x3504, 0xffffffff, 0x2089d,
1172 	0x3500, 0xffffffff, 0xca,
1173 	0x3504, 0xffffffff, 0x8a1,
1174 	0x3500, 0xffffffff, 0xcc,
1175 	0x3504, 0xffffffff, 0x8a3,
1176 	0x3500, 0xffffffff, 0xce,
1177 	0x3504, 0xffffffff, 0x308a5,
1178 	0x3500, 0xffffffff, 0xd3,
1179 	0x3504, 0xffffffff, 0x6d08cd,
1180 	0x3500, 0xffffffff, 0x142,
1181 	0x3504, 0xffffffff, 0x2000095a,
1182 	0x3504, 0xffffffff, 0x1,
1183 	0x3500, 0xffffffff, 0x144,
1184 	0x3504, 0xffffffff, 0x301f095b,
1185 	0x3500, 0xffffffff, 0x165,
1186 	0x3504, 0xffffffff, 0xc094d,
1187 	0x3500, 0xffffffff, 0x173,
1188 	0x3504, 0xffffffff, 0xf096d,
1189 	0x3500, 0xffffffff, 0x184,
1190 	0x3504, 0xffffffff, 0x15097f,
1191 	0x3500, 0xffffffff, 0x19b,
1192 	0x3504, 0xffffffff, 0xc0998,
1193 	0x3500, 0xffffffff, 0x1a9,
1194 	0x3504, 0xffffffff, 0x409a7,
1195 	0x3500, 0xffffffff, 0x1af,
1196 	0x3504, 0xffffffff, 0xcdc,
1197 	0x3500, 0xffffffff, 0x1b1,
1198 	0x3504, 0xffffffff, 0x800,
1199 	0x3508, 0xffffffff, 0x6c9b2000,
1200 	0x3510, 0xfc00, 0x2000,
1201 	0x3544, 0xffffffff, 0xfc0,
1202 	0x28d4, 0x00000100, 0x100
1203 };
1204 
1205 static void si_init_golden_registers(struct radeon_device *rdev)
1206 {
1207 	switch (rdev->family) {
1208 	case CHIP_TAHITI:
1209 		radeon_program_register_sequence(rdev,
1210 						 tahiti_golden_registers,
1211 						 (const u32)ARRAY_SIZE(tahiti_golden_registers));
1212 		radeon_program_register_sequence(rdev,
1213 						 tahiti_golden_rlc_registers,
1214 						 (const u32)ARRAY_SIZE(tahiti_golden_rlc_registers));
1215 		radeon_program_register_sequence(rdev,
1216 						 tahiti_mgcg_cgcg_init,
1217 						 (const u32)ARRAY_SIZE(tahiti_mgcg_cgcg_init));
1218 		radeon_program_register_sequence(rdev,
1219 						 tahiti_golden_registers2,
1220 						 (const u32)ARRAY_SIZE(tahiti_golden_registers2));
1221 		break;
1222 	case CHIP_PITCAIRN:
1223 		radeon_program_register_sequence(rdev,
1224 						 pitcairn_golden_registers,
1225 						 (const u32)ARRAY_SIZE(pitcairn_golden_registers));
1226 		radeon_program_register_sequence(rdev,
1227 						 pitcairn_golden_rlc_registers,
1228 						 (const u32)ARRAY_SIZE(pitcairn_golden_rlc_registers));
1229 		radeon_program_register_sequence(rdev,
1230 						 pitcairn_mgcg_cgcg_init,
1231 						 (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
1232 		break;
1233 	case CHIP_VERDE:
1234 		radeon_program_register_sequence(rdev,
1235 						 verde_golden_registers,
1236 						 (const u32)ARRAY_SIZE(verde_golden_registers));
1237 		radeon_program_register_sequence(rdev,
1238 						 verde_golden_rlc_registers,
1239 						 (const u32)ARRAY_SIZE(verde_golden_rlc_registers));
1240 		radeon_program_register_sequence(rdev,
1241 						 verde_mgcg_cgcg_init,
1242 						 (const u32)ARRAY_SIZE(verde_mgcg_cgcg_init));
1243 		radeon_program_register_sequence(rdev,
1244 						 verde_pg_init,
1245 						 (const u32)ARRAY_SIZE(verde_pg_init));
1246 		break;
1247 	case CHIP_OLAND:
1248 		radeon_program_register_sequence(rdev,
1249 						 oland_golden_registers,
1250 						 (const u32)ARRAY_SIZE(oland_golden_registers));
1251 		radeon_program_register_sequence(rdev,
1252 						 oland_golden_rlc_registers,
1253 						 (const u32)ARRAY_SIZE(oland_golden_rlc_registers));
1254 		radeon_program_register_sequence(rdev,
1255 						 oland_mgcg_cgcg_init,
1256 						 (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
1257 		break;
1258 	case CHIP_HAINAN:
1259 		radeon_program_register_sequence(rdev,
1260 						 hainan_golden_registers,
1261 						 (const u32)ARRAY_SIZE(hainan_golden_registers));
1262 		radeon_program_register_sequence(rdev,
1263 						 hainan_golden_registers2,
1264 						 (const u32)ARRAY_SIZE(hainan_golden_registers2));
1265 		radeon_program_register_sequence(rdev,
1266 						 hainan_mgcg_cgcg_init,
1267 						 (const u32)ARRAY_SIZE(hainan_mgcg_cgcg_init));
1268 		break;
1269 	default:
1270 		break;
1271 	}
1272 }
1273 
1274 /**
1275  * si_get_allowed_info_register - fetch the register for the info ioctl
1276  *
1277  * @rdev: radeon_device pointer
1278  * @reg: register offset in bytes
1279  * @val: register value
1280  *
1281  * Returns 0 for success or -EINVAL for an invalid register
1282  *
1283  */
1284 int si_get_allowed_info_register(struct radeon_device *rdev,
1285 				 u32 reg, u32 *val)
1286 {
1287 	switch (reg) {
1288 	case GRBM_STATUS:
1289 	case GRBM_STATUS2:
1290 	case GRBM_STATUS_SE0:
1291 	case GRBM_STATUS_SE1:
1292 	case SRBM_STATUS:
1293 	case SRBM_STATUS2:
1294 	case (DMA_STATUS_REG + DMA0_REGISTER_OFFSET):
1295 	case (DMA_STATUS_REG + DMA1_REGISTER_OFFSET):
1296 	case UVD_STATUS:
1297 		*val = RREG32(reg);
1298 		return 0;
1299 	default:
1300 		return -EINVAL;
1301 	}
1302 }
1303 
1304 #define PCIE_BUS_CLK                10000
1305 #define TCLK                        (PCIE_BUS_CLK / 10)
1306 
1307 /**
1308  * si_get_xclk - get the xclk
1309  *
1310  * @rdev: radeon_device pointer
1311  *
1312  * Returns the reference clock used by the gfx engine
1313  * (SI).
1314  */
1315 u32 si_get_xclk(struct radeon_device *rdev)
1316 {
1317 	u32 reference_clock = rdev->clock.spll.reference_freq;
1318 	u32 tmp;
1319 
1320 	tmp = RREG32(CG_CLKPIN_CNTL_2);
1321 	if (tmp & MUX_TCLK_TO_XCLK)
1322 		return TCLK;
1323 
1324 	tmp = RREG32(CG_CLKPIN_CNTL);
1325 	if (tmp & XTALIN_DIVIDE)
1326 		return reference_clock / 4;
1327 
1328 	return reference_clock;
1329 }
1330 
1331 /* get temperature in millidegrees */
1332 int si_get_temp(struct radeon_device *rdev)
1333 {
1334 	u32 temp;
1335 	int actual_temp = 0;
1336 
1337 	temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
1338 		CTF_TEMP_SHIFT;
1339 
1340 	if (temp & 0x200)
1341 		actual_temp = 255;
1342 	else
1343 		actual_temp = temp & 0x1ff;
1344 
1345 	actual_temp = (actual_temp * 1000);
1346 
1347 	return actual_temp;
1348 }
1349 
1350 #define TAHITI_IO_MC_REGS_SIZE 36
1351 
1352 static const u32 tahiti_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1353 	{0x0000006f, 0x03044000},
1354 	{0x00000070, 0x0480c018},
1355 	{0x00000071, 0x00000040},
1356 	{0x00000072, 0x01000000},
1357 	{0x00000074, 0x000000ff},
1358 	{0x00000075, 0x00143400},
1359 	{0x00000076, 0x08ec0800},
1360 	{0x00000077, 0x040000cc},
1361 	{0x00000079, 0x00000000},
1362 	{0x0000007a, 0x21000409},
1363 	{0x0000007c, 0x00000000},
1364 	{0x0000007d, 0xe8000000},
1365 	{0x0000007e, 0x044408a8},
1366 	{0x0000007f, 0x00000003},
1367 	{0x00000080, 0x00000000},
1368 	{0x00000081, 0x01000000},
1369 	{0x00000082, 0x02000000},
1370 	{0x00000083, 0x00000000},
1371 	{0x00000084, 0xe3f3e4f4},
1372 	{0x00000085, 0x00052024},
1373 	{0x00000087, 0x00000000},
1374 	{0x00000088, 0x66036603},
1375 	{0x00000089, 0x01000000},
1376 	{0x0000008b, 0x1c0a0000},
1377 	{0x0000008c, 0xff010000},
1378 	{0x0000008e, 0xffffefff},
1379 	{0x0000008f, 0xfff3efff},
1380 	{0x00000090, 0xfff3efbf},
1381 	{0x00000094, 0x00101101},
1382 	{0x00000095, 0x00000fff},
1383 	{0x00000096, 0x00116fff},
1384 	{0x00000097, 0x60010000},
1385 	{0x00000098, 0x10010000},
1386 	{0x00000099, 0x00006000},
1387 	{0x0000009a, 0x00001000},
1388 	{0x0000009f, 0x00a77400}
1389 };
1390 
1391 static const u32 pitcairn_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1392 	{0x0000006f, 0x03044000},
1393 	{0x00000070, 0x0480c018},
1394 	{0x00000071, 0x00000040},
1395 	{0x00000072, 0x01000000},
1396 	{0x00000074, 0x000000ff},
1397 	{0x00000075, 0x00143400},
1398 	{0x00000076, 0x08ec0800},
1399 	{0x00000077, 0x040000cc},
1400 	{0x00000079, 0x00000000},
1401 	{0x0000007a, 0x21000409},
1402 	{0x0000007c, 0x00000000},
1403 	{0x0000007d, 0xe8000000},
1404 	{0x0000007e, 0x044408a8},
1405 	{0x0000007f, 0x00000003},
1406 	{0x00000080, 0x00000000},
1407 	{0x00000081, 0x01000000},
1408 	{0x00000082, 0x02000000},
1409 	{0x00000083, 0x00000000},
1410 	{0x00000084, 0xe3f3e4f4},
1411 	{0x00000085, 0x00052024},
1412 	{0x00000087, 0x00000000},
1413 	{0x00000088, 0x66036603},
1414 	{0x00000089, 0x01000000},
1415 	{0x0000008b, 0x1c0a0000},
1416 	{0x0000008c, 0xff010000},
1417 	{0x0000008e, 0xffffefff},
1418 	{0x0000008f, 0xfff3efff},
1419 	{0x00000090, 0xfff3efbf},
1420 	{0x00000094, 0x00101101},
1421 	{0x00000095, 0x00000fff},
1422 	{0x00000096, 0x00116fff},
1423 	{0x00000097, 0x60010000},
1424 	{0x00000098, 0x10010000},
1425 	{0x00000099, 0x00006000},
1426 	{0x0000009a, 0x00001000},
1427 	{0x0000009f, 0x00a47400}
1428 };
1429 
1430 static const u32 verde_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1431 	{0x0000006f, 0x03044000},
1432 	{0x00000070, 0x0480c018},
1433 	{0x00000071, 0x00000040},
1434 	{0x00000072, 0x01000000},
1435 	{0x00000074, 0x000000ff},
1436 	{0x00000075, 0x00143400},
1437 	{0x00000076, 0x08ec0800},
1438 	{0x00000077, 0x040000cc},
1439 	{0x00000079, 0x00000000},
1440 	{0x0000007a, 0x21000409},
1441 	{0x0000007c, 0x00000000},
1442 	{0x0000007d, 0xe8000000},
1443 	{0x0000007e, 0x044408a8},
1444 	{0x0000007f, 0x00000003},
1445 	{0x00000080, 0x00000000},
1446 	{0x00000081, 0x01000000},
1447 	{0x00000082, 0x02000000},
1448 	{0x00000083, 0x00000000},
1449 	{0x00000084, 0xe3f3e4f4},
1450 	{0x00000085, 0x00052024},
1451 	{0x00000087, 0x00000000},
1452 	{0x00000088, 0x66036603},
1453 	{0x00000089, 0x01000000},
1454 	{0x0000008b, 0x1c0a0000},
1455 	{0x0000008c, 0xff010000},
1456 	{0x0000008e, 0xffffefff},
1457 	{0x0000008f, 0xfff3efff},
1458 	{0x00000090, 0xfff3efbf},
1459 	{0x00000094, 0x00101101},
1460 	{0x00000095, 0x00000fff},
1461 	{0x00000096, 0x00116fff},
1462 	{0x00000097, 0x60010000},
1463 	{0x00000098, 0x10010000},
1464 	{0x00000099, 0x00006000},
1465 	{0x0000009a, 0x00001000},
1466 	{0x0000009f, 0x00a37400}
1467 };
1468 
1469 static const u32 oland_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1470 	{0x0000006f, 0x03044000},
1471 	{0x00000070, 0x0480c018},
1472 	{0x00000071, 0x00000040},
1473 	{0x00000072, 0x01000000},
1474 	{0x00000074, 0x000000ff},
1475 	{0x00000075, 0x00143400},
1476 	{0x00000076, 0x08ec0800},
1477 	{0x00000077, 0x040000cc},
1478 	{0x00000079, 0x00000000},
1479 	{0x0000007a, 0x21000409},
1480 	{0x0000007c, 0x00000000},
1481 	{0x0000007d, 0xe8000000},
1482 	{0x0000007e, 0x044408a8},
1483 	{0x0000007f, 0x00000003},
1484 	{0x00000080, 0x00000000},
1485 	{0x00000081, 0x01000000},
1486 	{0x00000082, 0x02000000},
1487 	{0x00000083, 0x00000000},
1488 	{0x00000084, 0xe3f3e4f4},
1489 	{0x00000085, 0x00052024},
1490 	{0x00000087, 0x00000000},
1491 	{0x00000088, 0x66036603},
1492 	{0x00000089, 0x01000000},
1493 	{0x0000008b, 0x1c0a0000},
1494 	{0x0000008c, 0xff010000},
1495 	{0x0000008e, 0xffffefff},
1496 	{0x0000008f, 0xfff3efff},
1497 	{0x00000090, 0xfff3efbf},
1498 	{0x00000094, 0x00101101},
1499 	{0x00000095, 0x00000fff},
1500 	{0x00000096, 0x00116fff},
1501 	{0x00000097, 0x60010000},
1502 	{0x00000098, 0x10010000},
1503 	{0x00000099, 0x00006000},
1504 	{0x0000009a, 0x00001000},
1505 	{0x0000009f, 0x00a17730}
1506 };
1507 
1508 static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1509 	{0x0000006f, 0x03044000},
1510 	{0x00000070, 0x0480c018},
1511 	{0x00000071, 0x00000040},
1512 	{0x00000072, 0x01000000},
1513 	{0x00000074, 0x000000ff},
1514 	{0x00000075, 0x00143400},
1515 	{0x00000076, 0x08ec0800},
1516 	{0x00000077, 0x040000cc},
1517 	{0x00000079, 0x00000000},
1518 	{0x0000007a, 0x21000409},
1519 	{0x0000007c, 0x00000000},
1520 	{0x0000007d, 0xe8000000},
1521 	{0x0000007e, 0x044408a8},
1522 	{0x0000007f, 0x00000003},
1523 	{0x00000080, 0x00000000},
1524 	{0x00000081, 0x01000000},
1525 	{0x00000082, 0x02000000},
1526 	{0x00000083, 0x00000000},
1527 	{0x00000084, 0xe3f3e4f4},
1528 	{0x00000085, 0x00052024},
1529 	{0x00000087, 0x00000000},
1530 	{0x00000088, 0x66036603},
1531 	{0x00000089, 0x01000000},
1532 	{0x0000008b, 0x1c0a0000},
1533 	{0x0000008c, 0xff010000},
1534 	{0x0000008e, 0xffffefff},
1535 	{0x0000008f, 0xfff3efff},
1536 	{0x00000090, 0xfff3efbf},
1537 	{0x00000094, 0x00101101},
1538 	{0x00000095, 0x00000fff},
1539 	{0x00000096, 0x00116fff},
1540 	{0x00000097, 0x60010000},
1541 	{0x00000098, 0x10010000},
1542 	{0x00000099, 0x00006000},
1543 	{0x0000009a, 0x00001000},
1544 	{0x0000009f, 0x00a07730}
1545 };
1546 
1547 /* ucode loading */
1548 int si_mc_load_microcode(struct radeon_device *rdev)
1549 {
1550 	const __be32 *fw_data = NULL;
1551 	const __le32 *new_fw_data = NULL;
1552 	u32 running;
1553 	u32 *io_mc_regs = NULL;
1554 	const __le32 *new_io_mc_regs = NULL;
1555 	int i, regs_size, ucode_size;
1556 
1557 	if (!rdev->mc_fw)
1558 		return -EINVAL;
1559 
1560 	if (rdev->new_fw) {
1561 		const struct mc_firmware_header_v1_0 *hdr =
1562 			(const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data;
1563 
1564 		radeon_ucode_print_mc_hdr(&hdr->header);
1565 		regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
1566 		new_io_mc_regs = (const __le32 *)
1567 			(rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
1568 		ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1569 		new_fw_data = (const __le32 *)
1570 			(rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1571 	} else {
1572 		ucode_size = rdev->mc_fw->size / 4;
1573 
1574 		switch (rdev->family) {
1575 		case CHIP_TAHITI:
1576 			io_mc_regs = (u32 *)&tahiti_io_mc_regs;
1577 			regs_size = TAHITI_IO_MC_REGS_SIZE;
1578 			break;
1579 		case CHIP_PITCAIRN:
1580 			io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
1581 			regs_size = TAHITI_IO_MC_REGS_SIZE;
1582 			break;
1583 		case CHIP_VERDE:
1584 		default:
1585 			io_mc_regs = (u32 *)&verde_io_mc_regs;
1586 			regs_size = TAHITI_IO_MC_REGS_SIZE;
1587 			break;
1588 		case CHIP_OLAND:
1589 			io_mc_regs = (u32 *)&oland_io_mc_regs;
1590 			regs_size = TAHITI_IO_MC_REGS_SIZE;
1591 			break;
1592 		case CHIP_HAINAN:
1593 			io_mc_regs = (u32 *)&hainan_io_mc_regs;
1594 			regs_size = TAHITI_IO_MC_REGS_SIZE;
1595 			break;
1596 		}
1597 		fw_data = (const __be32 *)rdev->mc_fw->data;
1598 	}
1599 
1600 	running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
1601 
1602 	if (running == 0) {
1603 		/* reset the engine and set to writable */
1604 		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
1605 		WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
1606 
1607 		/* load mc io regs */
1608 		for (i = 0; i < regs_size; i++) {
1609 			if (rdev->new_fw) {
1610 				WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
1611 				WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
1612 			} else {
1613 				WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
1614 				WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
1615 			}
1616 		}
1617 		/* load the MC ucode */
1618 		for (i = 0; i < ucode_size; i++) {
1619 			if (rdev->new_fw)
1620 				WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
1621 			else
1622 				WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
1623 		}
1624 
1625 		/* put the engine back into the active state */
1626 		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
1627 		WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
1628 		WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
1629 
1630 		/* wait for training to complete */
1631 		for (i = 0; i < rdev->usec_timeout; i++) {
1632 			if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
1633 				break;
1634 			udelay(1);
1635 		}
1636 		for (i = 0; i < rdev->usec_timeout; i++) {
1637 			if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
1638 				break;
1639 			udelay(1);
1640 		}
1641 	}
1642 
1643 	return 0;
1644 }
1645 
1646 static int si_init_microcode(struct radeon_device *rdev)
1647 {
1648 	const char *chip_name;
1649 	const char *new_chip_name;
1650 	size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
1651 	size_t smc_req_size, mc2_req_size;
1652 	char fw_name[30];
1653 	int err;
1654 	int new_fw = 0;
1655 	bool new_smc = false;
1656 	bool si58_fw = false;
1657 	bool banks2_fw = false;
1658 
1659 	DRM_DEBUG("\n");
1660 
1661 	switch (rdev->family) {
1662 	case CHIP_TAHITI:
1663 		chip_name = "TAHITI";
1664 		new_chip_name = "tahiti";
1665 		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1666 		me_req_size = SI_PM4_UCODE_SIZE * 4;
1667 		ce_req_size = SI_CE_UCODE_SIZE * 4;
1668 		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1669 		mc_req_size = SI_MC_UCODE_SIZE * 4;
1670 		mc2_req_size = TAHITI_MC_UCODE_SIZE * 4;
1671 		smc_req_size = ALIGN(TAHITI_SMC_UCODE_SIZE, 4);
1672 		break;
1673 	case CHIP_PITCAIRN:
1674 		chip_name = "PITCAIRN";
1675 		if ((rdev->pdev->revision == 0x81) &&
1676 		    ((rdev->pdev->device == 0x6810) ||
1677 		     (rdev->pdev->device == 0x6811)))
1678 			new_smc = true;
1679 		new_chip_name = "pitcairn";
1680 		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1681 		me_req_size = SI_PM4_UCODE_SIZE * 4;
1682 		ce_req_size = SI_CE_UCODE_SIZE * 4;
1683 		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1684 		mc_req_size = SI_MC_UCODE_SIZE * 4;
1685 		mc2_req_size = PITCAIRN_MC_UCODE_SIZE * 4;
1686 		smc_req_size = ALIGN(PITCAIRN_SMC_UCODE_SIZE, 4);
1687 		break;
1688 	case CHIP_VERDE:
1689 		chip_name = "VERDE";
1690 		if (((rdev->pdev->device == 0x6820) &&
1691 		     ((rdev->pdev->revision == 0x81) ||
1692 		      (rdev->pdev->revision == 0x83))) ||
1693 		    ((rdev->pdev->device == 0x6821) &&
1694 		     ((rdev->pdev->revision == 0x83) ||
1695 		      (rdev->pdev->revision == 0x87))) ||
1696 		    ((rdev->pdev->revision == 0x87) &&
1697 		     ((rdev->pdev->device == 0x6823) ||
1698 		      (rdev->pdev->device == 0x682b))))
1699 			new_smc = true;
1700 		new_chip_name = "verde";
1701 		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1702 		me_req_size = SI_PM4_UCODE_SIZE * 4;
1703 		ce_req_size = SI_CE_UCODE_SIZE * 4;
1704 		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1705 		mc_req_size = SI_MC_UCODE_SIZE * 4;
1706 		mc2_req_size = VERDE_MC_UCODE_SIZE * 4;
1707 		smc_req_size = ALIGN(VERDE_SMC_UCODE_SIZE, 4);
1708 		break;
1709 	case CHIP_OLAND:
1710 		chip_name = "OLAND";
1711 		if (((rdev->pdev->revision == 0x81) &&
1712 		     ((rdev->pdev->device == 0x6600) ||
1713 		      (rdev->pdev->device == 0x6604) ||
1714 		      (rdev->pdev->device == 0x6605) ||
1715 		      (rdev->pdev->device == 0x6610))) ||
1716 		    ((rdev->pdev->revision == 0x83) &&
1717 		     (rdev->pdev->device == 0x6610)))
1718 			new_smc = true;
1719 		new_chip_name = "oland";
1720 		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1721 		me_req_size = SI_PM4_UCODE_SIZE * 4;
1722 		ce_req_size = SI_CE_UCODE_SIZE * 4;
1723 		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1724 		mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
1725 		smc_req_size = ALIGN(OLAND_SMC_UCODE_SIZE, 4);
1726 		break;
1727 	case CHIP_HAINAN:
1728 		chip_name = "HAINAN";
1729 		if (((rdev->pdev->revision == 0x81) &&
1730 		     (rdev->pdev->device == 0x6660)) ||
1731 		    ((rdev->pdev->revision == 0x83) &&
1732 		     ((rdev->pdev->device == 0x6660) ||
1733 		      (rdev->pdev->device == 0x6663) ||
1734 		      (rdev->pdev->device == 0x6665) ||
1735 		      (rdev->pdev->device == 0x6667))))
1736 			new_smc = true;
1737 		else if ((rdev->pdev->revision == 0xc3) &&
1738 			 (rdev->pdev->device == 0x6665))
1739 			banks2_fw = true;
1740 		new_chip_name = "hainan";
1741 		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1742 		me_req_size = SI_PM4_UCODE_SIZE * 4;
1743 		ce_req_size = SI_CE_UCODE_SIZE * 4;
1744 		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1745 		mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
1746 		smc_req_size = ALIGN(HAINAN_SMC_UCODE_SIZE, 4);
1747 		break;
1748 	default: BUG();
1749 	}
1750 
1751 	/* this memory configuration requires special firmware */
1752 	if (((RREG32(MC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
1753 		si58_fw = true;
1754 
1755 	DRM_INFO("Loading %s Microcode\n", new_chip_name);
1756 
1757 	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
1758 	err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
1759 	if (err) {
1760 		snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1761 		err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
1762 		if (err)
1763 			goto out;
1764 		if (rdev->pfp_fw->size != pfp_req_size) {
1765 			printk(KERN_ERR
1766 			       "si_cp: Bogus length %zu in firmware \"%s\"\n",
1767 			       rdev->pfp_fw->size, fw_name);
1768 			err = -EINVAL;
1769 			goto out;
1770 		}
1771 	} else {
1772 		err = radeon_ucode_validate(rdev->pfp_fw);
1773 		if (err) {
1774 			printk(KERN_ERR
1775 			       "si_cp: validation failed for firmware \"%s\"\n",
1776 			       fw_name);
1777 			goto out;
1778 		} else {
1779 			new_fw++;
1780 		}
1781 	}
1782 
1783 	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", new_chip_name);
1784 	err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
1785 	if (err) {
1786 		snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
1787 		err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
1788 		if (err)
1789 			goto out;
1790 		if (rdev->me_fw->size != me_req_size) {
1791 			printk(KERN_ERR
1792 			       "si_cp: Bogus length %zu in firmware \"%s\"\n",
1793 			       rdev->me_fw->size, fw_name);
1794 			err = -EINVAL;
1795 		}
1796 	} else {
1797 		err = radeon_ucode_validate(rdev->me_fw);
1798 		if (err) {
1799 			printk(KERN_ERR
1800 			       "si_cp: validation failed for firmware \"%s\"\n",
1801 			       fw_name);
1802 			goto out;
1803 		} else {
1804 			new_fw++;
1805 		}
1806 	}
1807 
1808 	snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", new_chip_name);
1809 	err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
1810 	if (err) {
1811 		snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
1812 		err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
1813 		if (err)
1814 			goto out;
1815 		if (rdev->ce_fw->size != ce_req_size) {
1816 			printk(KERN_ERR
1817 			       "si_cp: Bogus length %zu in firmware \"%s\"\n",
1818 			       rdev->ce_fw->size, fw_name);
1819 			err = -EINVAL;
1820 		}
1821 	} else {
1822 		err = radeon_ucode_validate(rdev->ce_fw);
1823 		if (err) {
1824 			printk(KERN_ERR
1825 			       "si_cp: validation failed for firmware \"%s\"\n",
1826 			       fw_name);
1827 			goto out;
1828 		} else {
1829 			new_fw++;
1830 		}
1831 	}
1832 
1833 	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", new_chip_name);
1834 	err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
1835 	if (err) {
1836 		snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
1837 		err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
1838 		if (err)
1839 			goto out;
1840 		if (rdev->rlc_fw->size != rlc_req_size) {
1841 			printk(KERN_ERR
1842 			       "si_rlc: Bogus length %zu in firmware \"%s\"\n",
1843 			       rdev->rlc_fw->size, fw_name);
1844 			err = -EINVAL;
1845 		}
1846 	} else {
1847 		err = radeon_ucode_validate(rdev->rlc_fw);
1848 		if (err) {
1849 			printk(KERN_ERR
1850 			       "si_cp: validation failed for firmware \"%s\"\n",
1851 			       fw_name);
1852 			goto out;
1853 		} else {
1854 			new_fw++;
1855 		}
1856 	}
1857 
1858 	if (si58_fw)
1859 		snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
1860 	else
1861 		snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
1862 	err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1863 	if (err) {
1864 		snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
1865 		err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1866 		if (err) {
1867 			snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
1868 			err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1869 			if (err)
1870 				goto out;
1871 		}
1872 		if ((rdev->mc_fw->size != mc_req_size) &&
1873 		    (rdev->mc_fw->size != mc2_req_size)) {
1874 			printk(KERN_ERR
1875 			       "si_mc: Bogus length %zu in firmware \"%s\"\n",
1876 			       rdev->mc_fw->size, fw_name);
1877 			err = -EINVAL;
1878 		}
1879 		DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
1880 	} else {
1881 		err = radeon_ucode_validate(rdev->mc_fw);
1882 		if (err) {
1883 			printk(KERN_ERR
1884 			       "si_cp: validation failed for firmware \"%s\"\n",
1885 			       fw_name);
1886 			goto out;
1887 		} else {
1888 			new_fw++;
1889 		}
1890 	}
1891 
1892 	if (banks2_fw)
1893 		snprintf(fw_name, sizeof(fw_name), "radeon/banks_k_2_smc.bin");
1894 	else if (new_smc)
1895 		snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name);
1896 	else
1897 		snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
1898 	err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
1899 	if (err) {
1900 		snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
1901 		err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
1902 		if (err) {
1903 			printk(KERN_ERR
1904 			       "smc: error loading firmware \"%s\"\n",
1905 			       fw_name);
1906 			release_firmware(rdev->smc_fw);
1907 			rdev->smc_fw = NULL;
1908 			err = 0;
1909 		} else if (rdev->smc_fw->size != smc_req_size) {
1910 			printk(KERN_ERR
1911 			       "si_smc: Bogus length %zu in firmware \"%s\"\n",
1912 			       rdev->smc_fw->size, fw_name);
1913 			err = -EINVAL;
1914 		}
1915 	} else {
1916 		err = radeon_ucode_validate(rdev->smc_fw);
1917 		if (err) {
1918 			printk(KERN_ERR
1919 			       "si_cp: validation failed for firmware \"%s\"\n",
1920 			       fw_name);
1921 			goto out;
1922 		} else {
1923 			new_fw++;
1924 		}
1925 	}
1926 
1927 	if (new_fw == 0) {
1928 		rdev->new_fw = false;
1929 	} else if (new_fw < 6) {
1930 		printk(KERN_ERR "si_fw: mixing new and old firmware!\n");
1931 		err = -EINVAL;
1932 	} else {
1933 		rdev->new_fw = true;
1934 	}
1935 out:
1936 	if (err) {
1937 		if (err != -EINVAL)
1938 			printk(KERN_ERR
1939 			       "si_cp: Failed to load firmware \"%s\"\n",
1940 			       fw_name);
1941 		release_firmware(rdev->pfp_fw);
1942 		rdev->pfp_fw = NULL;
1943 		release_firmware(rdev->me_fw);
1944 		rdev->me_fw = NULL;
1945 		release_firmware(rdev->ce_fw);
1946 		rdev->ce_fw = NULL;
1947 		release_firmware(rdev->rlc_fw);
1948 		rdev->rlc_fw = NULL;
1949 		release_firmware(rdev->mc_fw);
1950 		rdev->mc_fw = NULL;
1951 		release_firmware(rdev->smc_fw);
1952 		rdev->smc_fw = NULL;
1953 	}
1954 	return err;
1955 }
1956 
1957 /* watermark setup */
1958 static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
1959 				   struct radeon_crtc *radeon_crtc,
1960 				   struct drm_display_mode *mode,
1961 				   struct drm_display_mode *other_mode)
1962 {
1963 	u32 tmp, buffer_alloc, i;
1964 	u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
1965 	/*
1966 	 * Line Buffer Setup
1967 	 * There are 3 line buffers, each one shared by 2 display controllers.
1968 	 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
1969 	 * the display controllers.  The paritioning is done via one of four
1970 	 * preset allocations specified in bits 21:20:
1971 	 *  0 - half lb
1972 	 *  2 - whole lb, other crtc must be disabled
1973 	 */
1974 	/* this can get tricky if we have two large displays on a paired group
1975 	 * of crtcs.  Ideally for multiple large displays we'd assign them to
1976 	 * non-linked crtcs for maximum line buffer allocation.
1977 	 */
1978 	if (radeon_crtc->base.enabled && mode) {
1979 		if (other_mode) {
1980 			tmp = 0; /* 1/2 */
1981 			buffer_alloc = 1;
1982 		} else {
1983 			tmp = 2; /* whole */
1984 			buffer_alloc = 2;
1985 		}
1986 	} else {
1987 		tmp = 0;
1988 		buffer_alloc = 0;
1989 	}
1990 
1991 	WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset,
1992 	       DC_LB_MEMORY_CONFIG(tmp));
1993 
1994 	WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1995 	       DMIF_BUFFERS_ALLOCATED(buffer_alloc));
1996 	for (i = 0; i < rdev->usec_timeout; i++) {
1997 		if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1998 		    DMIF_BUFFERS_ALLOCATED_COMPLETED)
1999 			break;
2000 		udelay(1);
2001 	}
2002 
2003 	if (radeon_crtc->base.enabled && mode) {
2004 		switch (tmp) {
2005 		case 0:
2006 		default:
2007 			return 4096 * 2;
2008 		case 2:
2009 			return 8192 * 2;
2010 		}
2011 	}
2012 
2013 	/* controller not enabled, so no lb used */
2014 	return 0;
2015 }
2016 
2017 static u32 si_get_number_of_dram_channels(struct radeon_device *rdev)
2018 {
2019 	u32 tmp = RREG32(MC_SHARED_CHMAP);
2020 
2021 	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
2022 	case 0:
2023 	default:
2024 		return 1;
2025 	case 1:
2026 		return 2;
2027 	case 2:
2028 		return 4;
2029 	case 3:
2030 		return 8;
2031 	case 4:
2032 		return 3;
2033 	case 5:
2034 		return 6;
2035 	case 6:
2036 		return 10;
2037 	case 7:
2038 		return 12;
2039 	case 8:
2040 		return 16;
2041 	}
2042 }
2043 
2044 struct dce6_wm_params {
2045 	u32 dram_channels; /* number of dram channels */
2046 	u32 yclk;          /* bandwidth per dram data pin in kHz */
2047 	u32 sclk;          /* engine clock in kHz */
2048 	u32 disp_clk;      /* display clock in kHz */
2049 	u32 src_width;     /* viewport width */
2050 	u32 active_time;   /* active display time in ns */
2051 	u32 blank_time;    /* blank time in ns */
2052 	bool interlaced;    /* mode is interlaced */
2053 	fixed20_12 vsc;    /* vertical scale ratio */
2054 	u32 num_heads;     /* number of active crtcs */
2055 	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
2056 	u32 lb_size;       /* line buffer allocated to pipe */
2057 	u32 vtaps;         /* vertical scaler taps */
2058 };
2059 
2060 static u32 dce6_dram_bandwidth(struct dce6_wm_params *wm)
2061 {
2062 	/* Calculate raw DRAM Bandwidth */
2063 	fixed20_12 dram_efficiency; /* 0.7 */
2064 	fixed20_12 yclk, dram_channels, bandwidth;
2065 	fixed20_12 a;
2066 
2067 	a.full = dfixed_const(1000);
2068 	yclk.full = dfixed_const(wm->yclk);
2069 	yclk.full = dfixed_div(yclk, a);
2070 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
2071 	a.full = dfixed_const(10);
2072 	dram_efficiency.full = dfixed_const(7);
2073 	dram_efficiency.full = dfixed_div(dram_efficiency, a);
2074 	bandwidth.full = dfixed_mul(dram_channels, yclk);
2075 	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
2076 
2077 	return dfixed_trunc(bandwidth);
2078 }
2079 
2080 static u32 dce6_dram_bandwidth_for_display(struct dce6_wm_params *wm)
2081 {
2082 	/* Calculate DRAM Bandwidth and the part allocated to display. */
2083 	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
2084 	fixed20_12 yclk, dram_channels, bandwidth;
2085 	fixed20_12 a;
2086 
2087 	a.full = dfixed_const(1000);
2088 	yclk.full = dfixed_const(wm->yclk);
2089 	yclk.full = dfixed_div(yclk, a);
2090 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
2091 	a.full = dfixed_const(10);
2092 	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
2093 	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
2094 	bandwidth.full = dfixed_mul(dram_channels, yclk);
2095 	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
2096 
2097 	return dfixed_trunc(bandwidth);
2098 }
2099 
2100 static u32 dce6_data_return_bandwidth(struct dce6_wm_params *wm)
2101 {
2102 	/* Calculate the display Data return Bandwidth */
2103 	fixed20_12 return_efficiency; /* 0.8 */
2104 	fixed20_12 sclk, bandwidth;
2105 	fixed20_12 a;
2106 
2107 	a.full = dfixed_const(1000);
2108 	sclk.full = dfixed_const(wm->sclk);
2109 	sclk.full = dfixed_div(sclk, a);
2110 	a.full = dfixed_const(10);
2111 	return_efficiency.full = dfixed_const(8);
2112 	return_efficiency.full = dfixed_div(return_efficiency, a);
2113 	a.full = dfixed_const(32);
2114 	bandwidth.full = dfixed_mul(a, sclk);
2115 	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
2116 
2117 	return dfixed_trunc(bandwidth);
2118 }
2119 
2120 static u32 dce6_get_dmif_bytes_per_request(struct dce6_wm_params *wm)
2121 {
2122 	return 32;
2123 }
2124 
2125 static u32 dce6_dmif_request_bandwidth(struct dce6_wm_params *wm)
2126 {
2127 	/* Calculate the DMIF Request Bandwidth */
2128 	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
2129 	fixed20_12 disp_clk, sclk, bandwidth;
2130 	fixed20_12 a, b1, b2;
2131 	u32 min_bandwidth;
2132 
2133 	a.full = dfixed_const(1000);
2134 	disp_clk.full = dfixed_const(wm->disp_clk);
2135 	disp_clk.full = dfixed_div(disp_clk, a);
2136 	a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm) / 2);
2137 	b1.full = dfixed_mul(a, disp_clk);
2138 
2139 	a.full = dfixed_const(1000);
2140 	sclk.full = dfixed_const(wm->sclk);
2141 	sclk.full = dfixed_div(sclk, a);
2142 	a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm));
2143 	b2.full = dfixed_mul(a, sclk);
2144 
2145 	a.full = dfixed_const(10);
2146 	disp_clk_request_efficiency.full = dfixed_const(8);
2147 	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
2148 
2149 	min_bandwidth = min(dfixed_trunc(b1), dfixed_trunc(b2));
2150 
2151 	a.full = dfixed_const(min_bandwidth);
2152 	bandwidth.full = dfixed_mul(a, disp_clk_request_efficiency);
2153 
2154 	return dfixed_trunc(bandwidth);
2155 }
2156 
2157 static u32 dce6_available_bandwidth(struct dce6_wm_params *wm)
2158 {
2159 	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
2160 	u32 dram_bandwidth = dce6_dram_bandwidth(wm);
2161 	u32 data_return_bandwidth = dce6_data_return_bandwidth(wm);
2162 	u32 dmif_req_bandwidth = dce6_dmif_request_bandwidth(wm);
2163 
2164 	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
2165 }
2166 
2167 static u32 dce6_average_bandwidth(struct dce6_wm_params *wm)
2168 {
2169 	/* Calculate the display mode Average Bandwidth
2170 	 * DisplayMode should contain the source and destination dimensions,
2171 	 * timing, etc.
2172 	 */
2173 	fixed20_12 bpp;
2174 	fixed20_12 line_time;
2175 	fixed20_12 src_width;
2176 	fixed20_12 bandwidth;
2177 	fixed20_12 a;
2178 
2179 	a.full = dfixed_const(1000);
2180 	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
2181 	line_time.full = dfixed_div(line_time, a);
2182 	bpp.full = dfixed_const(wm->bytes_per_pixel);
2183 	src_width.full = dfixed_const(wm->src_width);
2184 	bandwidth.full = dfixed_mul(src_width, bpp);
2185 	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
2186 	bandwidth.full = dfixed_div(bandwidth, line_time);
2187 
2188 	return dfixed_trunc(bandwidth);
2189 }
2190 
2191 static u32 dce6_latency_watermark(struct dce6_wm_params *wm)
2192 {
2193 	/* First calcualte the latency in ns */
2194 	u32 mc_latency = 2000; /* 2000 ns. */
2195 	u32 available_bandwidth = dce6_available_bandwidth(wm);
2196 	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
2197 	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
2198 	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
2199 	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
2200 		(wm->num_heads * cursor_line_pair_return_time);
2201 	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
2202 	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
2203 	u32 tmp, dmif_size = 12288;
2204 	fixed20_12 a, b, c;
2205 
2206 	if (wm->num_heads == 0)
2207 		return 0;
2208 
2209 	a.full = dfixed_const(2);
2210 	b.full = dfixed_const(1);
2211 	if ((wm->vsc.full > a.full) ||
2212 	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
2213 	    (wm->vtaps >= 5) ||
2214 	    ((wm->vsc.full >= a.full) && wm->interlaced))
2215 		max_src_lines_per_dst_line = 4;
2216 	else
2217 		max_src_lines_per_dst_line = 2;
2218 
2219 	a.full = dfixed_const(available_bandwidth);
2220 	b.full = dfixed_const(wm->num_heads);
2221 	a.full = dfixed_div(a, b);
2222 
2223 	b.full = dfixed_const(mc_latency + 512);
2224 	c.full = dfixed_const(wm->disp_clk);
2225 	b.full = dfixed_div(b, c);
2226 
2227 	c.full = dfixed_const(dmif_size);
2228 	b.full = dfixed_div(c, b);
2229 
2230 	tmp = min(dfixed_trunc(a), dfixed_trunc(b));
2231 
2232 	b.full = dfixed_const(1000);
2233 	c.full = dfixed_const(wm->disp_clk);
2234 	b.full = dfixed_div(c, b);
2235 	c.full = dfixed_const(wm->bytes_per_pixel);
2236 	b.full = dfixed_mul(b, c);
2237 
2238 	lb_fill_bw = min(tmp, dfixed_trunc(b));
2239 
2240 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
2241 	b.full = dfixed_const(1000);
2242 	c.full = dfixed_const(lb_fill_bw);
2243 	b.full = dfixed_div(c, b);
2244 	a.full = dfixed_div(a, b);
2245 	line_fill_time = dfixed_trunc(a);
2246 
2247 	if (line_fill_time < wm->active_time)
2248 		return latency;
2249 	else
2250 		return latency + (line_fill_time - wm->active_time);
2251 
2252 }
2253 
2254 static bool dce6_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
2255 {
2256 	if (dce6_average_bandwidth(wm) <=
2257 	    (dce6_dram_bandwidth_for_display(wm) / wm->num_heads))
2258 		return true;
2259 	else
2260 		return false;
2261 };
2262 
2263 static bool dce6_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
2264 {
2265 	if (dce6_average_bandwidth(wm) <=
2266 	    (dce6_available_bandwidth(wm) / wm->num_heads))
2267 		return true;
2268 	else
2269 		return false;
2270 };
2271 
2272 static bool dce6_check_latency_hiding(struct dce6_wm_params *wm)
2273 {
2274 	u32 lb_partitions = wm->lb_size / wm->src_width;
2275 	u32 line_time = wm->active_time + wm->blank_time;
2276 	u32 latency_tolerant_lines;
2277 	u32 latency_hiding;
2278 	fixed20_12 a;
2279 
2280 	a.full = dfixed_const(1);
2281 	if (wm->vsc.full > a.full)
2282 		latency_tolerant_lines = 1;
2283 	else {
2284 		if (lb_partitions <= (wm->vtaps + 1))
2285 			latency_tolerant_lines = 1;
2286 		else
2287 			latency_tolerant_lines = 2;
2288 	}
2289 
2290 	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
2291 
2292 	if (dce6_latency_watermark(wm) <= latency_hiding)
2293 		return true;
2294 	else
2295 		return false;
2296 }
2297 
2298 static void dce6_program_watermarks(struct radeon_device *rdev,
2299 					 struct radeon_crtc *radeon_crtc,
2300 					 u32 lb_size, u32 num_heads)
2301 {
2302 	struct drm_display_mode *mode = &radeon_crtc->base.mode;
2303 	struct dce6_wm_params wm_low, wm_high;
2304 	u32 dram_channels;
2305 	u32 pixel_period;
2306 	u32 line_time = 0;
2307 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
2308 	u32 priority_a_mark = 0, priority_b_mark = 0;
2309 	u32 priority_a_cnt = PRIORITY_OFF;
2310 	u32 priority_b_cnt = PRIORITY_OFF;
2311 	u32 tmp, arb_control3;
2312 	fixed20_12 a, b, c;
2313 
2314 	if (radeon_crtc->base.enabled && num_heads && mode) {
2315 		pixel_period = 1000000 / (u32)mode->clock;
2316 		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
2317 		priority_a_cnt = 0;
2318 		priority_b_cnt = 0;
2319 
2320 		if (rdev->family == CHIP_ARUBA)
2321 			dram_channels = evergreen_get_number_of_dram_channels(rdev);
2322 		else
2323 			dram_channels = si_get_number_of_dram_channels(rdev);
2324 
2325 		/* watermark for high clocks */
2326 		if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2327 			wm_high.yclk =
2328 				radeon_dpm_get_mclk(rdev, false) * 10;
2329 			wm_high.sclk =
2330 				radeon_dpm_get_sclk(rdev, false) * 10;
2331 		} else {
2332 			wm_high.yclk = rdev->pm.current_mclk * 10;
2333 			wm_high.sclk = rdev->pm.current_sclk * 10;
2334 		}
2335 
2336 		wm_high.disp_clk = mode->clock;
2337 		wm_high.src_width = mode->crtc_hdisplay;
2338 		wm_high.active_time = mode->crtc_hdisplay * pixel_period;
2339 		wm_high.blank_time = line_time - wm_high.active_time;
2340 		wm_high.interlaced = false;
2341 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2342 			wm_high.interlaced = true;
2343 		wm_high.vsc = radeon_crtc->vsc;
2344 		wm_high.vtaps = 1;
2345 		if (radeon_crtc->rmx_type != RMX_OFF)
2346 			wm_high.vtaps = 2;
2347 		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
2348 		wm_high.lb_size = lb_size;
2349 		wm_high.dram_channels = dram_channels;
2350 		wm_high.num_heads = num_heads;
2351 
2352 		/* watermark for low clocks */
2353 		if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2354 			wm_low.yclk =
2355 				radeon_dpm_get_mclk(rdev, true) * 10;
2356 			wm_low.sclk =
2357 				radeon_dpm_get_sclk(rdev, true) * 10;
2358 		} else {
2359 			wm_low.yclk = rdev->pm.current_mclk * 10;
2360 			wm_low.sclk = rdev->pm.current_sclk * 10;
2361 		}
2362 
2363 		wm_low.disp_clk = mode->clock;
2364 		wm_low.src_width = mode->crtc_hdisplay;
2365 		wm_low.active_time = mode->crtc_hdisplay * pixel_period;
2366 		wm_low.blank_time = line_time - wm_low.active_time;
2367 		wm_low.interlaced = false;
2368 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2369 			wm_low.interlaced = true;
2370 		wm_low.vsc = radeon_crtc->vsc;
2371 		wm_low.vtaps = 1;
2372 		if (radeon_crtc->rmx_type != RMX_OFF)
2373 			wm_low.vtaps = 2;
2374 		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
2375 		wm_low.lb_size = lb_size;
2376 		wm_low.dram_channels = dram_channels;
2377 		wm_low.num_heads = num_heads;
2378 
2379 		/* set for high clocks */
2380 		latency_watermark_a = min(dce6_latency_watermark(&wm_high), (u32)65535);
2381 		/* set for low clocks */
2382 		latency_watermark_b = min(dce6_latency_watermark(&wm_low), (u32)65535);
2383 
2384 		/* possibly force display priority to high */
2385 		/* should really do this at mode validation time... */
2386 		if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
2387 		    !dce6_average_bandwidth_vs_available_bandwidth(&wm_high) ||
2388 		    !dce6_check_latency_hiding(&wm_high) ||
2389 		    (rdev->disp_priority == 2)) {
2390 			DRM_DEBUG_KMS("force priority to high\n");
2391 			priority_a_cnt |= PRIORITY_ALWAYS_ON;
2392 			priority_b_cnt |= PRIORITY_ALWAYS_ON;
2393 		}
2394 		if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
2395 		    !dce6_average_bandwidth_vs_available_bandwidth(&wm_low) ||
2396 		    !dce6_check_latency_hiding(&wm_low) ||
2397 		    (rdev->disp_priority == 2)) {
2398 			DRM_DEBUG_KMS("force priority to high\n");
2399 			priority_a_cnt |= PRIORITY_ALWAYS_ON;
2400 			priority_b_cnt |= PRIORITY_ALWAYS_ON;
2401 		}
2402 
2403 		a.full = dfixed_const(1000);
2404 		b.full = dfixed_const(mode->clock);
2405 		b.full = dfixed_div(b, a);
2406 		c.full = dfixed_const(latency_watermark_a);
2407 		c.full = dfixed_mul(c, b);
2408 		c.full = dfixed_mul(c, radeon_crtc->hsc);
2409 		c.full = dfixed_div(c, a);
2410 		a.full = dfixed_const(16);
2411 		c.full = dfixed_div(c, a);
2412 		priority_a_mark = dfixed_trunc(c);
2413 		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
2414 
2415 		a.full = dfixed_const(1000);
2416 		b.full = dfixed_const(mode->clock);
2417 		b.full = dfixed_div(b, a);
2418 		c.full = dfixed_const(latency_watermark_b);
2419 		c.full = dfixed_mul(c, b);
2420 		c.full = dfixed_mul(c, radeon_crtc->hsc);
2421 		c.full = dfixed_div(c, a);
2422 		a.full = dfixed_const(16);
2423 		c.full = dfixed_div(c, a);
2424 		priority_b_mark = dfixed_trunc(c);
2425 		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
2426 
2427 		/* Save number of lines the linebuffer leads before the scanout */
2428 		radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
2429 	}
2430 
2431 	/* select wm A */
2432 	arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
2433 	tmp = arb_control3;
2434 	tmp &= ~LATENCY_WATERMARK_MASK(3);
2435 	tmp |= LATENCY_WATERMARK_MASK(1);
2436 	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
2437 	WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
2438 	       (LATENCY_LOW_WATERMARK(latency_watermark_a) |
2439 		LATENCY_HIGH_WATERMARK(line_time)));
2440 	/* select wm B */
2441 	tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
2442 	tmp &= ~LATENCY_WATERMARK_MASK(3);
2443 	tmp |= LATENCY_WATERMARK_MASK(2);
2444 	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
2445 	WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
2446 	       (LATENCY_LOW_WATERMARK(latency_watermark_b) |
2447 		LATENCY_HIGH_WATERMARK(line_time)));
2448 	/* restore original selection */
2449 	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, arb_control3);
2450 
2451 	/* write the priority marks */
2452 	WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
2453 	WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
2454 
2455 	/* save values for DPM */
2456 	radeon_crtc->line_time = line_time;
2457 	radeon_crtc->wm_high = latency_watermark_a;
2458 	radeon_crtc->wm_low = latency_watermark_b;
2459 }
2460 
2461 void dce6_bandwidth_update(struct radeon_device *rdev)
2462 {
2463 	struct drm_display_mode *mode0 = NULL;
2464 	struct drm_display_mode *mode1 = NULL;
2465 	u32 num_heads = 0, lb_size;
2466 	int i;
2467 
2468 	if (!rdev->mode_info.mode_config_initialized)
2469 		return;
2470 
2471 	radeon_update_display_priority(rdev);
2472 
2473 	for (i = 0; i < rdev->num_crtc; i++) {
2474 		if (rdev->mode_info.crtcs[i]->base.enabled)
2475 			num_heads++;
2476 	}
2477 	for (i = 0; i < rdev->num_crtc; i += 2) {
2478 		mode0 = &rdev->mode_info.crtcs[i]->base.mode;
2479 		mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
2480 		lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
2481 		dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
2482 		lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
2483 		dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
2484 	}
2485 }
2486 
2487 /*
2488  * Core functions
2489  */
2490 static void si_tiling_mode_table_init(struct radeon_device *rdev)
2491 {
2492 	u32 *tile = rdev->config.si.tile_mode_array;
2493 	const u32 num_tile_mode_states =
2494 			ARRAY_SIZE(rdev->config.si.tile_mode_array);
2495 	u32 reg_offset, split_equal_to_row_size;
2496 
2497 	switch (rdev->config.si.mem_row_size_in_kb) {
2498 	case 1:
2499 		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
2500 		break;
2501 	case 2:
2502 	default:
2503 		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
2504 		break;
2505 	case 4:
2506 		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
2507 		break;
2508 	}
2509 
2510 	for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2511 		tile[reg_offset] = 0;
2512 
2513 	switch(rdev->family) {
2514 	case CHIP_TAHITI:
2515 	case CHIP_PITCAIRN:
2516 		/* non-AA compressed depth or any compressed stencil */
2517 		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2518 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2519 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2520 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2521 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2522 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2523 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2524 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2525 		/* 2xAA/4xAA compressed depth only */
2526 		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2527 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2528 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2529 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2530 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2531 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2532 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2533 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2534 		/* 8xAA compressed depth only */
2535 		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2536 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2537 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2538 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2539 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2540 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2541 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2542 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2543 		/* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
2544 		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2545 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2546 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2547 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2548 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2549 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2550 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2551 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2552 		/* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
2553 		tile[4] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2554 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2555 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2556 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2557 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2558 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2559 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2560 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2561 		/* Uncompressed 16bpp depth - and stencil buffer allocated with it */
2562 		tile[5] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2563 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2564 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2565 			   TILE_SPLIT(split_equal_to_row_size) |
2566 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2567 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2568 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2569 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2570 		/* Uncompressed 32bpp depth - and stencil buffer allocated with it */
2571 		tile[6] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2572 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2573 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2574 			   TILE_SPLIT(split_equal_to_row_size) |
2575 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2576 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2577 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2578 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2579 		/* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
2580 		tile[7] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2581 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2582 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2583 			   TILE_SPLIT(split_equal_to_row_size) |
2584 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2585 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2586 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2587 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2588 		/* 1D and 1D Array Surfaces */
2589 		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2590 			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2591 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2592 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2593 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2594 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2595 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2596 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2597 		/* Displayable maps. */
2598 		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2599 			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2600 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2601 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2602 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2603 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2604 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2605 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2606 		/* Display 8bpp. */
2607 		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2608 			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2609 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2610 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2611 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2612 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2613 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2614 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2615 		/* Display 16bpp. */
2616 		tile[11] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2617 			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2618 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2619 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2620 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2621 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2622 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2623 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2624 		/* Display 32bpp. */
2625 		tile[12] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2626 			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2627 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2628 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2629 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2630 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2631 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2632 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2633 		/* Thin. */
2634 		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2635 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2636 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2637 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2638 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2639 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2640 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2641 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2642 		/* Thin 8 bpp. */
2643 		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2644 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2645 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2646 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2647 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2648 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2649 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2650 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2651 		/* Thin 16 bpp. */
2652 		tile[15] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2653 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2654 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2655 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2656 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2657 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2658 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2659 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2660 		/* Thin 32 bpp. */
2661 		tile[16] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2662 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2663 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2664 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2665 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2666 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2667 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2668 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2669 		/* Thin 64 bpp. */
2670 		tile[17] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2671 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2672 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2673 			   TILE_SPLIT(split_equal_to_row_size) |
2674 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2675 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2676 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2677 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2678 		/* 8 bpp PRT. */
2679 		tile[21] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2680 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2681 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2682 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2683 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2684 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2685 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2686 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2687 		/* 16 bpp PRT */
2688 		tile[22] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2689 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2690 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2691 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2692 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2693 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2694 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2695 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2696 		/* 32 bpp PRT */
2697 		tile[23] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2698 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2699 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2700 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2701 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2702 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2703 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2704 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2705 		/* 64 bpp PRT */
2706 		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2707 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2708 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2709 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2710 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2711 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2712 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2713 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2714 		/* 128 bpp PRT */
2715 		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2716 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2717 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2718 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
2719 			   NUM_BANKS(ADDR_SURF_8_BANK) |
2720 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2721 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2722 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2723 
2724 		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2725 			WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
2726 		break;
2727 
2728 	case CHIP_VERDE:
2729 	case CHIP_OLAND:
2730 	case CHIP_HAINAN:
2731 		/* non-AA compressed depth or any compressed stencil */
2732 		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2733 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2734 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2735 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2736 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2737 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2738 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2739 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2740 		/* 2xAA/4xAA compressed depth only */
2741 		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2742 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2743 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2744 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2745 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2746 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2747 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2748 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2749 		/* 8xAA compressed depth only */
2750 		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2751 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2752 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2753 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2754 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2755 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2756 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2757 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2758 		/* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
2759 		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2760 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2761 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2762 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2763 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2764 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2765 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2766 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2767 		/* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
2768 		tile[4] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2769 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2770 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2771 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2772 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2773 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2774 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2775 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2776 		/* Uncompressed 16bpp depth - and stencil buffer allocated with it */
2777 		tile[5] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2778 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2779 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2780 			   TILE_SPLIT(split_equal_to_row_size) |
2781 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2782 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2783 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2784 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2785 		/* Uncompressed 32bpp depth - and stencil buffer allocated with it */
2786 		tile[6] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2787 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2788 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2789 			   TILE_SPLIT(split_equal_to_row_size) |
2790 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2791 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2792 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2793 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2794 		/* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
2795 		tile[7] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2796 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2797 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2798 			   TILE_SPLIT(split_equal_to_row_size) |
2799 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2800 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2801 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2802 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2803 		/* 1D and 1D Array Surfaces */
2804 		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2805 			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2806 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2807 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2808 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2809 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2810 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2811 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2812 		/* Displayable maps. */
2813 		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2814 			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2815 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2816 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2817 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2818 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2819 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2820 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2821 		/* Display 8bpp. */
2822 		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2823 			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2824 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2825 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2826 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2827 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2828 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2829 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2830 		/* Display 16bpp. */
2831 		tile[11] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2832 			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2833 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2834 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2835 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2836 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2837 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2838 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2839 		/* Display 32bpp. */
2840 		tile[12] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2841 			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2842 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2843 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2844 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2845 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2846 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2847 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2848 		/* Thin. */
2849 		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2850 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2851 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2852 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2853 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2854 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2855 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2856 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2857 		/* Thin 8 bpp. */
2858 		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2859 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2860 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2861 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2862 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2863 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2864 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2865 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2866 		/* Thin 16 bpp. */
2867 		tile[15] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2868 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2869 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2870 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2871 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2872 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2873 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2874 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2875 		/* Thin 32 bpp. */
2876 		tile[16] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2877 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2878 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2879 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2880 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2881 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2882 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2883 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2884 		/* Thin 64 bpp. */
2885 		tile[17] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2886 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2887 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2888 			   TILE_SPLIT(split_equal_to_row_size) |
2889 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2890 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2891 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2892 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2893 		/* 8 bpp PRT. */
2894 		tile[21] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2895 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2896 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2897 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2898 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2899 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2900 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2901 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2902 		/* 16 bpp PRT */
2903 		tile[22] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2904 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2905 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2906 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2907 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2908 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2909 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2910 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2911 		/* 32 bpp PRT */
2912 		tile[23] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2913 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2914 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2915 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2916 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2917 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2918 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2919 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2920 		/* 64 bpp PRT */
2921 		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2922 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2923 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2924 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2925 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2926 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2927 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2928 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2929 		/* 128 bpp PRT */
2930 		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2931 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2932 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2933 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
2934 			   NUM_BANKS(ADDR_SURF_8_BANK) |
2935 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2936 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2937 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2938 
2939 		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2940 			WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
2941 		break;
2942 
2943 	default:
2944 		DRM_ERROR("unknown asic: 0x%x\n", rdev->family);
2945 	}
2946 }
2947 
2948 static void si_select_se_sh(struct radeon_device *rdev,
2949 			    u32 se_num, u32 sh_num)
2950 {
2951 	u32 data = INSTANCE_BROADCAST_WRITES;
2952 
2953 	if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
2954 		data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
2955 	else if (se_num == 0xffffffff)
2956 		data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
2957 	else if (sh_num == 0xffffffff)
2958 		data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
2959 	else
2960 		data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
2961 	WREG32(GRBM_GFX_INDEX, data);
2962 }
2963 
2964 static u32 si_create_bitmask(u32 bit_width)
2965 {
2966 	u32 i, mask = 0;
2967 
2968 	for (i = 0; i < bit_width; i++) {
2969 		mask <<= 1;
2970 		mask |= 1;
2971 	}
2972 	return mask;
2973 }
2974 
2975 static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh)
2976 {
2977 	u32 data, mask;
2978 
2979 	data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
2980 	if (data & 1)
2981 		data &= INACTIVE_CUS_MASK;
2982 	else
2983 		data = 0;
2984 	data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
2985 
2986 	data >>= INACTIVE_CUS_SHIFT;
2987 
2988 	mask = si_create_bitmask(cu_per_sh);
2989 
2990 	return ~data & mask;
2991 }
2992 
2993 static void si_setup_spi(struct radeon_device *rdev,
2994 			 u32 se_num, u32 sh_per_se,
2995 			 u32 cu_per_sh)
2996 {
2997 	int i, j, k;
2998 	u32 data, mask, active_cu;
2999 
3000 	for (i = 0; i < se_num; i++) {
3001 		for (j = 0; j < sh_per_se; j++) {
3002 			si_select_se_sh(rdev, i, j);
3003 			data = RREG32(SPI_STATIC_THREAD_MGMT_3);
3004 			active_cu = si_get_cu_enabled(rdev, cu_per_sh);
3005 
3006 			mask = 1;
3007 			for (k = 0; k < 16; k++) {
3008 				mask <<= k;
3009 				if (active_cu & mask) {
3010 					data &= ~mask;
3011 					WREG32(SPI_STATIC_THREAD_MGMT_3, data);
3012 					break;
3013 				}
3014 			}
3015 		}
3016 	}
3017 	si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3018 }
3019 
3020 static u32 si_get_rb_disabled(struct radeon_device *rdev,
3021 			      u32 max_rb_num_per_se,
3022 			      u32 sh_per_se)
3023 {
3024 	u32 data, mask;
3025 
3026 	data = RREG32(CC_RB_BACKEND_DISABLE);
3027 	if (data & 1)
3028 		data &= BACKEND_DISABLE_MASK;
3029 	else
3030 		data = 0;
3031 	data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
3032 
3033 	data >>= BACKEND_DISABLE_SHIFT;
3034 
3035 	mask = si_create_bitmask(max_rb_num_per_se / sh_per_se);
3036 
3037 	return data & mask;
3038 }
3039 
3040 static void si_setup_rb(struct radeon_device *rdev,
3041 			u32 se_num, u32 sh_per_se,
3042 			u32 max_rb_num_per_se)
3043 {
3044 	int i, j;
3045 	u32 data, mask;
3046 	u32 disabled_rbs = 0;
3047 	u32 enabled_rbs = 0;
3048 
3049 	for (i = 0; i < se_num; i++) {
3050 		for (j = 0; j < sh_per_se; j++) {
3051 			si_select_se_sh(rdev, i, j);
3052 			data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
3053 			disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
3054 		}
3055 	}
3056 	si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3057 
3058 	mask = 1;
3059 	for (i = 0; i < max_rb_num_per_se * se_num; i++) {
3060 		if (!(disabled_rbs & mask))
3061 			enabled_rbs |= mask;
3062 		mask <<= 1;
3063 	}
3064 
3065 	rdev->config.si.backend_enable_mask = enabled_rbs;
3066 
3067 	for (i = 0; i < se_num; i++) {
3068 		si_select_se_sh(rdev, i, 0xffffffff);
3069 		data = 0;
3070 		for (j = 0; j < sh_per_se; j++) {
3071 			switch (enabled_rbs & 3) {
3072 			case 1:
3073 				data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
3074 				break;
3075 			case 2:
3076 				data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
3077 				break;
3078 			case 3:
3079 			default:
3080 				data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
3081 				break;
3082 			}
3083 			enabled_rbs >>= 2;
3084 		}
3085 		WREG32(PA_SC_RASTER_CONFIG, data);
3086 	}
3087 	si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3088 }
3089 
3090 static void si_gpu_init(struct radeon_device *rdev)
3091 {
3092 	u32 gb_addr_config = 0;
3093 	u32 mc_shared_chmap, mc_arb_ramcfg;
3094 	u32 sx_debug_1;
3095 	u32 hdp_host_path_cntl;
3096 	u32 tmp;
3097 	int i, j;
3098 
3099 	switch (rdev->family) {
3100 	case CHIP_TAHITI:
3101 		rdev->config.si.max_shader_engines = 2;
3102 		rdev->config.si.max_tile_pipes = 12;
3103 		rdev->config.si.max_cu_per_sh = 8;
3104 		rdev->config.si.max_sh_per_se = 2;
3105 		rdev->config.si.max_backends_per_se = 4;
3106 		rdev->config.si.max_texture_channel_caches = 12;
3107 		rdev->config.si.max_gprs = 256;
3108 		rdev->config.si.max_gs_threads = 32;
3109 		rdev->config.si.max_hw_contexts = 8;
3110 
3111 		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3112 		rdev->config.si.sc_prim_fifo_size_backend = 0x100;
3113 		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3114 		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3115 		gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
3116 		break;
3117 	case CHIP_PITCAIRN:
3118 		rdev->config.si.max_shader_engines = 2;
3119 		rdev->config.si.max_tile_pipes = 8;
3120 		rdev->config.si.max_cu_per_sh = 5;
3121 		rdev->config.si.max_sh_per_se = 2;
3122 		rdev->config.si.max_backends_per_se = 4;
3123 		rdev->config.si.max_texture_channel_caches = 8;
3124 		rdev->config.si.max_gprs = 256;
3125 		rdev->config.si.max_gs_threads = 32;
3126 		rdev->config.si.max_hw_contexts = 8;
3127 
3128 		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3129 		rdev->config.si.sc_prim_fifo_size_backend = 0x100;
3130 		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3131 		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3132 		gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
3133 		break;
3134 	case CHIP_VERDE:
3135 	default:
3136 		rdev->config.si.max_shader_engines = 1;
3137 		rdev->config.si.max_tile_pipes = 4;
3138 		rdev->config.si.max_cu_per_sh = 5;
3139 		rdev->config.si.max_sh_per_se = 2;
3140 		rdev->config.si.max_backends_per_se = 4;
3141 		rdev->config.si.max_texture_channel_caches = 4;
3142 		rdev->config.si.max_gprs = 256;
3143 		rdev->config.si.max_gs_threads = 32;
3144 		rdev->config.si.max_hw_contexts = 8;
3145 
3146 		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3147 		rdev->config.si.sc_prim_fifo_size_backend = 0x40;
3148 		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3149 		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3150 		gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
3151 		break;
3152 	case CHIP_OLAND:
3153 		rdev->config.si.max_shader_engines = 1;
3154 		rdev->config.si.max_tile_pipes = 4;
3155 		rdev->config.si.max_cu_per_sh = 6;
3156 		rdev->config.si.max_sh_per_se = 1;
3157 		rdev->config.si.max_backends_per_se = 2;
3158 		rdev->config.si.max_texture_channel_caches = 4;
3159 		rdev->config.si.max_gprs = 256;
3160 		rdev->config.si.max_gs_threads = 16;
3161 		rdev->config.si.max_hw_contexts = 8;
3162 
3163 		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3164 		rdev->config.si.sc_prim_fifo_size_backend = 0x40;
3165 		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3166 		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3167 		gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
3168 		break;
3169 	case CHIP_HAINAN:
3170 		rdev->config.si.max_shader_engines = 1;
3171 		rdev->config.si.max_tile_pipes = 4;
3172 		rdev->config.si.max_cu_per_sh = 5;
3173 		rdev->config.si.max_sh_per_se = 1;
3174 		rdev->config.si.max_backends_per_se = 1;
3175 		rdev->config.si.max_texture_channel_caches = 2;
3176 		rdev->config.si.max_gprs = 256;
3177 		rdev->config.si.max_gs_threads = 16;
3178 		rdev->config.si.max_hw_contexts = 8;
3179 
3180 		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3181 		rdev->config.si.sc_prim_fifo_size_backend = 0x40;
3182 		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3183 		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3184 		gb_addr_config = HAINAN_GB_ADDR_CONFIG_GOLDEN;
3185 		break;
3186 	}
3187 
3188 	/* Initialize HDP */
3189 	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
3190 		WREG32((0x2c14 + j), 0x00000000);
3191 		WREG32((0x2c18 + j), 0x00000000);
3192 		WREG32((0x2c1c + j), 0x00000000);
3193 		WREG32((0x2c20 + j), 0x00000000);
3194 		WREG32((0x2c24 + j), 0x00000000);
3195 	}
3196 
3197 	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
3198 	WREG32(SRBM_INT_CNTL, 1);
3199 	WREG32(SRBM_INT_ACK, 1);
3200 
3201 	evergreen_fix_pci_max_read_req_size(rdev);
3202 
3203 	WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
3204 
3205 	mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
3206 	mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
3207 
3208 	rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes;
3209 	rdev->config.si.mem_max_burst_length_bytes = 256;
3210 	tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
3211 	rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
3212 	if (rdev->config.si.mem_row_size_in_kb > 4)
3213 		rdev->config.si.mem_row_size_in_kb = 4;
3214 	/* XXX use MC settings? */
3215 	rdev->config.si.shader_engine_tile_size = 32;
3216 	rdev->config.si.num_gpus = 1;
3217 	rdev->config.si.multi_gpu_tile_size = 64;
3218 
3219 	/* fix up row size */
3220 	gb_addr_config &= ~ROW_SIZE_MASK;
3221 	switch (rdev->config.si.mem_row_size_in_kb) {
3222 	case 1:
3223 	default:
3224 		gb_addr_config |= ROW_SIZE(0);
3225 		break;
3226 	case 2:
3227 		gb_addr_config |= ROW_SIZE(1);
3228 		break;
3229 	case 4:
3230 		gb_addr_config |= ROW_SIZE(2);
3231 		break;
3232 	}
3233 
3234 	/* setup tiling info dword.  gb_addr_config is not adequate since it does
3235 	 * not have bank info, so create a custom tiling dword.
3236 	 * bits 3:0   num_pipes
3237 	 * bits 7:4   num_banks
3238 	 * bits 11:8  group_size
3239 	 * bits 15:12 row_size
3240 	 */
3241 	rdev->config.si.tile_config = 0;
3242 	switch (rdev->config.si.num_tile_pipes) {
3243 	case 1:
3244 		rdev->config.si.tile_config |= (0 << 0);
3245 		break;
3246 	case 2:
3247 		rdev->config.si.tile_config |= (1 << 0);
3248 		break;
3249 	case 4:
3250 		rdev->config.si.tile_config |= (2 << 0);
3251 		break;
3252 	case 8:
3253 	default:
3254 		/* XXX what about 12? */
3255 		rdev->config.si.tile_config |= (3 << 0);
3256 		break;
3257 	}
3258 	switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
3259 	case 0: /* four banks */
3260 		rdev->config.si.tile_config |= 0 << 4;
3261 		break;
3262 	case 1: /* eight banks */
3263 		rdev->config.si.tile_config |= 1 << 4;
3264 		break;
3265 	case 2: /* sixteen banks */
3266 	default:
3267 		rdev->config.si.tile_config |= 2 << 4;
3268 		break;
3269 	}
3270 	rdev->config.si.tile_config |=
3271 		((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
3272 	rdev->config.si.tile_config |=
3273 		((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
3274 
3275 	WREG32(GB_ADDR_CONFIG, gb_addr_config);
3276 	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
3277 	WREG32(DMIF_ADDR_CALC, gb_addr_config);
3278 	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
3279 	WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
3280 	WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
3281 	if (rdev->has_uvd) {
3282 		WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
3283 		WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
3284 		WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
3285 	}
3286 
3287 	si_tiling_mode_table_init(rdev);
3288 
3289 	si_setup_rb(rdev, rdev->config.si.max_shader_engines,
3290 		    rdev->config.si.max_sh_per_se,
3291 		    rdev->config.si.max_backends_per_se);
3292 
3293 	si_setup_spi(rdev, rdev->config.si.max_shader_engines,
3294 		     rdev->config.si.max_sh_per_se,
3295 		     rdev->config.si.max_cu_per_sh);
3296 
3297 	rdev->config.si.active_cus = 0;
3298 	for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
3299 		for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
3300 			rdev->config.si.active_cus +=
3301 				hweight32(si_get_cu_active_bitmap(rdev, i, j));
3302 		}
3303 	}
3304 
3305 	/* set HW defaults for 3D engine */
3306 	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
3307 				     ROQ_IB2_START(0x2b)));
3308 	WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
3309 
3310 	sx_debug_1 = RREG32(SX_DEBUG_1);
3311 	WREG32(SX_DEBUG_1, sx_debug_1);
3312 
3313 	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
3314 
3315 	WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_frontend) |
3316 				 SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_backend) |
3317 				 SC_HIZ_TILE_FIFO_SIZE(rdev->config.si.sc_hiz_tile_fifo_size) |
3318 				 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.si.sc_earlyz_tile_fifo_size)));
3319 
3320 	WREG32(VGT_NUM_INSTANCES, 1);
3321 
3322 	WREG32(CP_PERFMON_CNTL, 0);
3323 
3324 	WREG32(SQ_CONFIG, 0);
3325 
3326 	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
3327 					  FORCE_EOV_MAX_REZ_CNT(255)));
3328 
3329 	WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
3330 	       AUTO_INVLD_EN(ES_AND_GS_AUTO));
3331 
3332 	WREG32(VGT_GS_VERTEX_REUSE, 16);
3333 	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
3334 
3335 	WREG32(CB_PERFCOUNTER0_SELECT0, 0);
3336 	WREG32(CB_PERFCOUNTER0_SELECT1, 0);
3337 	WREG32(CB_PERFCOUNTER1_SELECT0, 0);
3338 	WREG32(CB_PERFCOUNTER1_SELECT1, 0);
3339 	WREG32(CB_PERFCOUNTER2_SELECT0, 0);
3340 	WREG32(CB_PERFCOUNTER2_SELECT1, 0);
3341 	WREG32(CB_PERFCOUNTER3_SELECT0, 0);
3342 	WREG32(CB_PERFCOUNTER3_SELECT1, 0);
3343 
3344 	tmp = RREG32(HDP_MISC_CNTL);
3345 	tmp |= HDP_FLUSH_INVALIDATE_CACHE;
3346 	WREG32(HDP_MISC_CNTL, tmp);
3347 
3348 	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
3349 	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
3350 
3351 	WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
3352 
3353 	udelay(50);
3354 }
3355 
3356 /*
3357  * GPU scratch registers helpers function.
3358  */
3359 static void si_scratch_init(struct radeon_device *rdev)
3360 {
3361 	int i;
3362 
3363 	rdev->scratch.num_reg = 7;
3364 	rdev->scratch.reg_base = SCRATCH_REG0;
3365 	for (i = 0; i < rdev->scratch.num_reg; i++) {
3366 		rdev->scratch.free[i] = true;
3367 		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
3368 	}
3369 }
3370 
3371 void si_fence_ring_emit(struct radeon_device *rdev,
3372 			struct radeon_fence *fence)
3373 {
3374 	struct radeon_ring *ring = &rdev->ring[fence->ring];
3375 	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
3376 
3377 	/* flush read cache over gart */
3378 	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3379 	radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
3380 	radeon_ring_write(ring, 0);
3381 	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
3382 	radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
3383 			  PACKET3_TC_ACTION_ENA |
3384 			  PACKET3_SH_KCACHE_ACTION_ENA |
3385 			  PACKET3_SH_ICACHE_ACTION_ENA);
3386 	radeon_ring_write(ring, 0xFFFFFFFF);
3387 	radeon_ring_write(ring, 0);
3388 	radeon_ring_write(ring, 10); /* poll interval */
3389 	/* EVENT_WRITE_EOP - flush caches, send int */
3390 	radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
3391 	radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
3392 	radeon_ring_write(ring, lower_32_bits(addr));
3393 	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
3394 	radeon_ring_write(ring, fence->seq);
3395 	radeon_ring_write(ring, 0);
3396 }
3397 
3398 /*
3399  * IB stuff
3400  */
3401 void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3402 {
3403 	struct radeon_ring *ring = &rdev->ring[ib->ring];
3404 	unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
3405 	u32 header;
3406 
3407 	if (ib->is_const_ib) {
3408 		/* set switch buffer packet before const IB */
3409 		radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3410 		radeon_ring_write(ring, 0);
3411 
3412 		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
3413 	} else {
3414 		u32 next_rptr;
3415 		if (ring->rptr_save_reg) {
3416 			next_rptr = ring->wptr + 3 + 4 + 8;
3417 			radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3418 			radeon_ring_write(ring, ((ring->rptr_save_reg -
3419 						  PACKET3_SET_CONFIG_REG_START) >> 2));
3420 			radeon_ring_write(ring, next_rptr);
3421 		} else if (rdev->wb.enabled) {
3422 			next_rptr = ring->wptr + 5 + 4 + 8;
3423 			radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3424 			radeon_ring_write(ring, (1 << 8));
3425 			radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3426 			radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
3427 			radeon_ring_write(ring, next_rptr);
3428 		}
3429 
3430 		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
3431 	}
3432 
3433 	radeon_ring_write(ring, header);
3434 	radeon_ring_write(ring,
3435 #ifdef __BIG_ENDIAN
3436 			  (2 << 0) |
3437 #endif
3438 			  (ib->gpu_addr & 0xFFFFFFFC));
3439 	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
3440 	radeon_ring_write(ring, ib->length_dw | (vm_id << 24));
3441 
3442 	if (!ib->is_const_ib) {
3443 		/* flush read cache over gart for this vmid */
3444 		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3445 		radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
3446 		radeon_ring_write(ring, vm_id);
3447 		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
3448 		radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
3449 				  PACKET3_TC_ACTION_ENA |
3450 				  PACKET3_SH_KCACHE_ACTION_ENA |
3451 				  PACKET3_SH_ICACHE_ACTION_ENA);
3452 		radeon_ring_write(ring, 0xFFFFFFFF);
3453 		radeon_ring_write(ring, 0);
3454 		radeon_ring_write(ring, 10); /* poll interval */
3455 	}
3456 }
3457 
3458 /*
3459  * CP.
3460  */
3461 static void si_cp_enable(struct radeon_device *rdev, bool enable)
3462 {
3463 	if (enable)
3464 		WREG32(CP_ME_CNTL, 0);
3465 	else {
3466 		if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
3467 			radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
3468 		WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
3469 		WREG32(SCRATCH_UMSK, 0);
3470 		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
3471 		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
3472 		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
3473 	}
3474 	udelay(50);
3475 }
3476 
3477 static int si_cp_load_microcode(struct radeon_device *rdev)
3478 {
3479 	int i;
3480 
3481 	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
3482 		return -EINVAL;
3483 
3484 	si_cp_enable(rdev, false);
3485 
3486 	if (rdev->new_fw) {
3487 		const struct gfx_firmware_header_v1_0 *pfp_hdr =
3488 			(const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
3489 		const struct gfx_firmware_header_v1_0 *ce_hdr =
3490 			(const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
3491 		const struct gfx_firmware_header_v1_0 *me_hdr =
3492 			(const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
3493 		const __le32 *fw_data;
3494 		u32 fw_size;
3495 
3496 		radeon_ucode_print_gfx_hdr(&pfp_hdr->header);
3497 		radeon_ucode_print_gfx_hdr(&ce_hdr->header);
3498 		radeon_ucode_print_gfx_hdr(&me_hdr->header);
3499 
3500 		/* PFP */
3501 		fw_data = (const __le32 *)
3502 			(rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
3503 		fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
3504 		WREG32(CP_PFP_UCODE_ADDR, 0);
3505 		for (i = 0; i < fw_size; i++)
3506 			WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
3507 		WREG32(CP_PFP_UCODE_ADDR, 0);
3508 
3509 		/* CE */
3510 		fw_data = (const __le32 *)
3511 			(rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
3512 		fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
3513 		WREG32(CP_CE_UCODE_ADDR, 0);
3514 		for (i = 0; i < fw_size; i++)
3515 			WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
3516 		WREG32(CP_CE_UCODE_ADDR, 0);
3517 
3518 		/* ME */
3519 		fw_data = (const __be32 *)
3520 			(rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
3521 		fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
3522 		WREG32(CP_ME_RAM_WADDR, 0);
3523 		for (i = 0; i < fw_size; i++)
3524 			WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
3525 		WREG32(CP_ME_RAM_WADDR, 0);
3526 	} else {
3527 		const __be32 *fw_data;
3528 
3529 		/* PFP */
3530 		fw_data = (const __be32 *)rdev->pfp_fw->data;
3531 		WREG32(CP_PFP_UCODE_ADDR, 0);
3532 		for (i = 0; i < SI_PFP_UCODE_SIZE; i++)
3533 			WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
3534 		WREG32(CP_PFP_UCODE_ADDR, 0);
3535 
3536 		/* CE */
3537 		fw_data = (const __be32 *)rdev->ce_fw->data;
3538 		WREG32(CP_CE_UCODE_ADDR, 0);
3539 		for (i = 0; i < SI_CE_UCODE_SIZE; i++)
3540 			WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
3541 		WREG32(CP_CE_UCODE_ADDR, 0);
3542 
3543 		/* ME */
3544 		fw_data = (const __be32 *)rdev->me_fw->data;
3545 		WREG32(CP_ME_RAM_WADDR, 0);
3546 		for (i = 0; i < SI_PM4_UCODE_SIZE; i++)
3547 			WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
3548 		WREG32(CP_ME_RAM_WADDR, 0);
3549 	}
3550 
3551 	WREG32(CP_PFP_UCODE_ADDR, 0);
3552 	WREG32(CP_CE_UCODE_ADDR, 0);
3553 	WREG32(CP_ME_RAM_WADDR, 0);
3554 	WREG32(CP_ME_RAM_RADDR, 0);
3555 	return 0;
3556 }
3557 
3558 static int si_cp_start(struct radeon_device *rdev)
3559 {
3560 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3561 	int r, i;
3562 
3563 	r = radeon_ring_lock(rdev, ring, 7 + 4);
3564 	if (r) {
3565 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3566 		return r;
3567 	}
3568 	/* init the CP */
3569 	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
3570 	radeon_ring_write(ring, 0x1);
3571 	radeon_ring_write(ring, 0x0);
3572 	radeon_ring_write(ring, rdev->config.si.max_hw_contexts - 1);
3573 	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
3574 	radeon_ring_write(ring, 0);
3575 	radeon_ring_write(ring, 0);
3576 
3577 	/* init the CE partitions */
3578 	radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
3579 	radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
3580 	radeon_ring_write(ring, 0xc000);
3581 	radeon_ring_write(ring, 0xe000);
3582 	radeon_ring_unlock_commit(rdev, ring, false);
3583 
3584 	si_cp_enable(rdev, true);
3585 
3586 	r = radeon_ring_lock(rdev, ring, si_default_size + 10);
3587 	if (r) {
3588 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3589 		return r;
3590 	}
3591 
3592 	/* setup clear context state */
3593 	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3594 	radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3595 
3596 	for (i = 0; i < si_default_size; i++)
3597 		radeon_ring_write(ring, si_default_state[i]);
3598 
3599 	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3600 	radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3601 
3602 	/* set clear context state */
3603 	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3604 	radeon_ring_write(ring, 0);
3605 
3606 	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
3607 	radeon_ring_write(ring, 0x00000316);
3608 	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
3609 	radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
3610 
3611 	radeon_ring_unlock_commit(rdev, ring, false);
3612 
3613 	for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
3614 		ring = &rdev->ring[i];
3615 		r = radeon_ring_lock(rdev, ring, 2);
3616 
3617 		/* clear the compute context state */
3618 		radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
3619 		radeon_ring_write(ring, 0);
3620 
3621 		radeon_ring_unlock_commit(rdev, ring, false);
3622 	}
3623 
3624 	return 0;
3625 }
3626 
3627 static void si_cp_fini(struct radeon_device *rdev)
3628 {
3629 	struct radeon_ring *ring;
3630 	si_cp_enable(rdev, false);
3631 
3632 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3633 	radeon_ring_fini(rdev, ring);
3634 	radeon_scratch_free(rdev, ring->rptr_save_reg);
3635 
3636 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
3637 	radeon_ring_fini(rdev, ring);
3638 	radeon_scratch_free(rdev, ring->rptr_save_reg);
3639 
3640 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
3641 	radeon_ring_fini(rdev, ring);
3642 	radeon_scratch_free(rdev, ring->rptr_save_reg);
3643 }
3644 
3645 static int si_cp_resume(struct radeon_device *rdev)
3646 {
3647 	struct radeon_ring *ring;
3648 	u32 tmp;
3649 	u32 rb_bufsz;
3650 	int r;
3651 
3652 	si_enable_gui_idle_interrupt(rdev, false);
3653 
3654 	WREG32(CP_SEM_WAIT_TIMER, 0x0);
3655 	WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
3656 
3657 	/* Set the write pointer delay */
3658 	WREG32(CP_RB_WPTR_DELAY, 0);
3659 
3660 	WREG32(CP_DEBUG, 0);
3661 	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
3662 
3663 	/* ring 0 - compute and gfx */
3664 	/* Set ring buffer size */
3665 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3666 	rb_bufsz = order_base_2(ring->ring_size / 8);
3667 	tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
3668 #ifdef __BIG_ENDIAN
3669 	tmp |= BUF_SWAP_32BIT;
3670 #endif
3671 	WREG32(CP_RB0_CNTL, tmp);
3672 
3673 	/* Initialize the ring buffer's read and write pointers */
3674 	WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
3675 	ring->wptr = 0;
3676 	WREG32(CP_RB0_WPTR, ring->wptr);
3677 
3678 	/* set the wb address whether it's enabled or not */
3679 	WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
3680 	WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
3681 
3682 	if (rdev->wb.enabled)
3683 		WREG32(SCRATCH_UMSK, 0xff);
3684 	else {
3685 		tmp |= RB_NO_UPDATE;
3686 		WREG32(SCRATCH_UMSK, 0);
3687 	}
3688 
3689 	mdelay(1);
3690 	WREG32(CP_RB0_CNTL, tmp);
3691 
3692 	WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
3693 
3694 	/* ring1  - compute only */
3695 	/* Set ring buffer size */
3696 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
3697 	rb_bufsz = order_base_2(ring->ring_size / 8);
3698 	tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
3699 #ifdef __BIG_ENDIAN
3700 	tmp |= BUF_SWAP_32BIT;
3701 #endif
3702 	WREG32(CP_RB1_CNTL, tmp);
3703 
3704 	/* Initialize the ring buffer's read and write pointers */
3705 	WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
3706 	ring->wptr = 0;
3707 	WREG32(CP_RB1_WPTR, ring->wptr);
3708 
3709 	/* set the wb address whether it's enabled or not */
3710 	WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
3711 	WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
3712 
3713 	mdelay(1);
3714 	WREG32(CP_RB1_CNTL, tmp);
3715 
3716 	WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
3717 
3718 	/* ring2 - compute only */
3719 	/* Set ring buffer size */
3720 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
3721 	rb_bufsz = order_base_2(ring->ring_size / 8);
3722 	tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
3723 #ifdef __BIG_ENDIAN
3724 	tmp |= BUF_SWAP_32BIT;
3725 #endif
3726 	WREG32(CP_RB2_CNTL, tmp);
3727 
3728 	/* Initialize the ring buffer's read and write pointers */
3729 	WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
3730 	ring->wptr = 0;
3731 	WREG32(CP_RB2_WPTR, ring->wptr);
3732 
3733 	/* set the wb address whether it's enabled or not */
3734 	WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
3735 	WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
3736 
3737 	mdelay(1);
3738 	WREG32(CP_RB2_CNTL, tmp);
3739 
3740 	WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
3741 
3742 	/* start the rings */
3743 	si_cp_start(rdev);
3744 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
3745 	rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = true;
3746 	rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = true;
3747 	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
3748 	if (r) {
3749 		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
3750 		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
3751 		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
3752 		return r;
3753 	}
3754 	r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP1_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
3755 	if (r) {
3756 		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
3757 	}
3758 	r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP2_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
3759 	if (r) {
3760 		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
3761 	}
3762 
3763 	si_enable_gui_idle_interrupt(rdev, true);
3764 
3765 	if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
3766 		radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
3767 
3768 	return 0;
3769 }
3770 
3771 u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
3772 {
3773 	u32 reset_mask = 0;
3774 	u32 tmp;
3775 
3776 	/* GRBM_STATUS */
3777 	tmp = RREG32(GRBM_STATUS);
3778 	if (tmp & (PA_BUSY | SC_BUSY |
3779 		   BCI_BUSY | SX_BUSY |
3780 		   TA_BUSY | VGT_BUSY |
3781 		   DB_BUSY | CB_BUSY |
3782 		   GDS_BUSY | SPI_BUSY |
3783 		   IA_BUSY | IA_BUSY_NO_DMA))
3784 		reset_mask |= RADEON_RESET_GFX;
3785 
3786 	if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
3787 		   CP_BUSY | CP_COHERENCY_BUSY))
3788 		reset_mask |= RADEON_RESET_CP;
3789 
3790 	if (tmp & GRBM_EE_BUSY)
3791 		reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
3792 
3793 	/* GRBM_STATUS2 */
3794 	tmp = RREG32(GRBM_STATUS2);
3795 	if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
3796 		reset_mask |= RADEON_RESET_RLC;
3797 
3798 	/* DMA_STATUS_REG 0 */
3799 	tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
3800 	if (!(tmp & DMA_IDLE))
3801 		reset_mask |= RADEON_RESET_DMA;
3802 
3803 	/* DMA_STATUS_REG 1 */
3804 	tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
3805 	if (!(tmp & DMA_IDLE))
3806 		reset_mask |= RADEON_RESET_DMA1;
3807 
3808 	/* SRBM_STATUS2 */
3809 	tmp = RREG32(SRBM_STATUS2);
3810 	if (tmp & DMA_BUSY)
3811 		reset_mask |= RADEON_RESET_DMA;
3812 
3813 	if (tmp & DMA1_BUSY)
3814 		reset_mask |= RADEON_RESET_DMA1;
3815 
3816 	/* SRBM_STATUS */
3817 	tmp = RREG32(SRBM_STATUS);
3818 
3819 	if (tmp & IH_BUSY)
3820 		reset_mask |= RADEON_RESET_IH;
3821 
3822 	if (tmp & SEM_BUSY)
3823 		reset_mask |= RADEON_RESET_SEM;
3824 
3825 	if (tmp & GRBM_RQ_PENDING)
3826 		reset_mask |= RADEON_RESET_GRBM;
3827 
3828 	if (tmp & VMC_BUSY)
3829 		reset_mask |= RADEON_RESET_VMC;
3830 
3831 	if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
3832 		   MCC_BUSY | MCD_BUSY))
3833 		reset_mask |= RADEON_RESET_MC;
3834 
3835 	if (evergreen_is_display_hung(rdev))
3836 		reset_mask |= RADEON_RESET_DISPLAY;
3837 
3838 	/* VM_L2_STATUS */
3839 	tmp = RREG32(VM_L2_STATUS);
3840 	if (tmp & L2_BUSY)
3841 		reset_mask |= RADEON_RESET_VMC;
3842 
3843 	/* Skip MC reset as it's mostly likely not hung, just busy */
3844 	if (reset_mask & RADEON_RESET_MC) {
3845 		DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
3846 		reset_mask &= ~RADEON_RESET_MC;
3847 	}
3848 
3849 	return reset_mask;
3850 }
3851 
3852 static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
3853 {
3854 	struct evergreen_mc_save save;
3855 	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
3856 	u32 tmp;
3857 
3858 	if (reset_mask == 0)
3859 		return;
3860 
3861 	dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
3862 
3863 	evergreen_print_gpu_status_regs(rdev);
3864 	dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
3865 		 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
3866 	dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
3867 		 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
3868 
3869 	/* disable PG/CG */
3870 	si_fini_pg(rdev);
3871 	si_fini_cg(rdev);
3872 
3873 	/* stop the rlc */
3874 	si_rlc_stop(rdev);
3875 
3876 	/* Disable CP parsing/prefetching */
3877 	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
3878 
3879 	if (reset_mask & RADEON_RESET_DMA) {
3880 		/* dma0 */
3881 		tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
3882 		tmp &= ~DMA_RB_ENABLE;
3883 		WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
3884 	}
3885 	if (reset_mask & RADEON_RESET_DMA1) {
3886 		/* dma1 */
3887 		tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
3888 		tmp &= ~DMA_RB_ENABLE;
3889 		WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
3890 	}
3891 
3892 	udelay(50);
3893 
3894 	evergreen_mc_stop(rdev, &save);
3895 	if (evergreen_mc_wait_for_idle(rdev)) {
3896 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
3897 	}
3898 
3899 	if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP)) {
3900 		grbm_soft_reset = SOFT_RESET_CB |
3901 			SOFT_RESET_DB |
3902 			SOFT_RESET_GDS |
3903 			SOFT_RESET_PA |
3904 			SOFT_RESET_SC |
3905 			SOFT_RESET_BCI |
3906 			SOFT_RESET_SPI |
3907 			SOFT_RESET_SX |
3908 			SOFT_RESET_TC |
3909 			SOFT_RESET_TA |
3910 			SOFT_RESET_VGT |
3911 			SOFT_RESET_IA;
3912 	}
3913 
3914 	if (reset_mask & RADEON_RESET_CP) {
3915 		grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
3916 
3917 		srbm_soft_reset |= SOFT_RESET_GRBM;
3918 	}
3919 
3920 	if (reset_mask & RADEON_RESET_DMA)
3921 		srbm_soft_reset |= SOFT_RESET_DMA;
3922 
3923 	if (reset_mask & RADEON_RESET_DMA1)
3924 		srbm_soft_reset |= SOFT_RESET_DMA1;
3925 
3926 	if (reset_mask & RADEON_RESET_DISPLAY)
3927 		srbm_soft_reset |= SOFT_RESET_DC;
3928 
3929 	if (reset_mask & RADEON_RESET_RLC)
3930 		grbm_soft_reset |= SOFT_RESET_RLC;
3931 
3932 	if (reset_mask & RADEON_RESET_SEM)
3933 		srbm_soft_reset |= SOFT_RESET_SEM;
3934 
3935 	if (reset_mask & RADEON_RESET_IH)
3936 		srbm_soft_reset |= SOFT_RESET_IH;
3937 
3938 	if (reset_mask & RADEON_RESET_GRBM)
3939 		srbm_soft_reset |= SOFT_RESET_GRBM;
3940 
3941 	if (reset_mask & RADEON_RESET_VMC)
3942 		srbm_soft_reset |= SOFT_RESET_VMC;
3943 
3944 	if (reset_mask & RADEON_RESET_MC)
3945 		srbm_soft_reset |= SOFT_RESET_MC;
3946 
3947 	if (grbm_soft_reset) {
3948 		tmp = RREG32(GRBM_SOFT_RESET);
3949 		tmp |= grbm_soft_reset;
3950 		dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3951 		WREG32(GRBM_SOFT_RESET, tmp);
3952 		tmp = RREG32(GRBM_SOFT_RESET);
3953 
3954 		udelay(50);
3955 
3956 		tmp &= ~grbm_soft_reset;
3957 		WREG32(GRBM_SOFT_RESET, tmp);
3958 		tmp = RREG32(GRBM_SOFT_RESET);
3959 	}
3960 
3961 	if (srbm_soft_reset) {
3962 		tmp = RREG32(SRBM_SOFT_RESET);
3963 		tmp |= srbm_soft_reset;
3964 		dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
3965 		WREG32(SRBM_SOFT_RESET, tmp);
3966 		tmp = RREG32(SRBM_SOFT_RESET);
3967 
3968 		udelay(50);
3969 
3970 		tmp &= ~srbm_soft_reset;
3971 		WREG32(SRBM_SOFT_RESET, tmp);
3972 		tmp = RREG32(SRBM_SOFT_RESET);
3973 	}
3974 
3975 	/* Wait a little for things to settle down */
3976 	udelay(50);
3977 
3978 	evergreen_mc_resume(rdev, &save);
3979 	udelay(50);
3980 
3981 	evergreen_print_gpu_status_regs(rdev);
3982 }
3983 
3984 static void si_set_clk_bypass_mode(struct radeon_device *rdev)
3985 {
3986 	u32 tmp, i;
3987 
3988 	tmp = RREG32(CG_SPLL_FUNC_CNTL);
3989 	tmp |= SPLL_BYPASS_EN;
3990 	WREG32(CG_SPLL_FUNC_CNTL, tmp);
3991 
3992 	tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
3993 	tmp |= SPLL_CTLREQ_CHG;
3994 	WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
3995 
3996 	for (i = 0; i < rdev->usec_timeout; i++) {
3997 		if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS)
3998 			break;
3999 		udelay(1);
4000 	}
4001 
4002 	tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
4003 	tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE);
4004 	WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
4005 
4006 	tmp = RREG32(MPLL_CNTL_MODE);
4007 	tmp &= ~MPLL_MCLK_SEL;
4008 	WREG32(MPLL_CNTL_MODE, tmp);
4009 }
4010 
4011 static void si_spll_powerdown(struct radeon_device *rdev)
4012 {
4013 	u32 tmp;
4014 
4015 	tmp = RREG32(SPLL_CNTL_MODE);
4016 	tmp |= SPLL_SW_DIR_CONTROL;
4017 	WREG32(SPLL_CNTL_MODE, tmp);
4018 
4019 	tmp = RREG32(CG_SPLL_FUNC_CNTL);
4020 	tmp |= SPLL_RESET;
4021 	WREG32(CG_SPLL_FUNC_CNTL, tmp);
4022 
4023 	tmp = RREG32(CG_SPLL_FUNC_CNTL);
4024 	tmp |= SPLL_SLEEP;
4025 	WREG32(CG_SPLL_FUNC_CNTL, tmp);
4026 
4027 	tmp = RREG32(SPLL_CNTL_MODE);
4028 	tmp &= ~SPLL_SW_DIR_CONTROL;
4029 	WREG32(SPLL_CNTL_MODE, tmp);
4030 }
4031 
4032 static void si_gpu_pci_config_reset(struct radeon_device *rdev)
4033 {
4034 	struct evergreen_mc_save save;
4035 	u32 tmp, i;
4036 
4037 	dev_info(rdev->dev, "GPU pci config reset\n");
4038 
4039 	/* disable dpm? */
4040 
4041 	/* disable cg/pg */
4042 	si_fini_pg(rdev);
4043 	si_fini_cg(rdev);
4044 
4045 	/* Disable CP parsing/prefetching */
4046 	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
4047 	/* dma0 */
4048 	tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
4049 	tmp &= ~DMA_RB_ENABLE;
4050 	WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
4051 	/* dma1 */
4052 	tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
4053 	tmp &= ~DMA_RB_ENABLE;
4054 	WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
4055 	/* XXX other engines? */
4056 
4057 	/* halt the rlc, disable cp internal ints */
4058 	si_rlc_stop(rdev);
4059 
4060 	udelay(50);
4061 
4062 	/* disable mem access */
4063 	evergreen_mc_stop(rdev, &save);
4064 	if (evergreen_mc_wait_for_idle(rdev)) {
4065 		dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
4066 	}
4067 
4068 	/* set mclk/sclk to bypass */
4069 	si_set_clk_bypass_mode(rdev);
4070 	/* powerdown spll */
4071 	si_spll_powerdown(rdev);
4072 	/* disable BM */
4073 	pci_clear_master(rdev->pdev);
4074 	/* reset */
4075 	radeon_pci_config_reset(rdev);
4076 	/* wait for asic to come out of reset */
4077 	for (i = 0; i < rdev->usec_timeout; i++) {
4078 		if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
4079 			break;
4080 		udelay(1);
4081 	}
4082 }
4083 
4084 int si_asic_reset(struct radeon_device *rdev, bool hard)
4085 {
4086 	u32 reset_mask;
4087 
4088 	if (hard) {
4089 		si_gpu_pci_config_reset(rdev);
4090 		return 0;
4091 	}
4092 
4093 	reset_mask = si_gpu_check_soft_reset(rdev);
4094 
4095 	if (reset_mask)
4096 		r600_set_bios_scratch_engine_hung(rdev, true);
4097 
4098 	/* try soft reset */
4099 	si_gpu_soft_reset(rdev, reset_mask);
4100 
4101 	reset_mask = si_gpu_check_soft_reset(rdev);
4102 
4103 	/* try pci config reset */
4104 	if (reset_mask && radeon_hard_reset)
4105 		si_gpu_pci_config_reset(rdev);
4106 
4107 	reset_mask = si_gpu_check_soft_reset(rdev);
4108 
4109 	if (!reset_mask)
4110 		r600_set_bios_scratch_engine_hung(rdev, false);
4111 
4112 	return 0;
4113 }
4114 
4115 /**
4116  * si_gfx_is_lockup - Check if the GFX engine is locked up
4117  *
4118  * @rdev: radeon_device pointer
4119  * @ring: radeon_ring structure holding ring information
4120  *
4121  * Check if the GFX engine is locked up.
4122  * Returns true if the engine appears to be locked up, false if not.
4123  */
4124 bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
4125 {
4126 	u32 reset_mask = si_gpu_check_soft_reset(rdev);
4127 
4128 	if (!(reset_mask & (RADEON_RESET_GFX |
4129 			    RADEON_RESET_COMPUTE |
4130 			    RADEON_RESET_CP))) {
4131 		radeon_ring_lockup_update(rdev, ring);
4132 		return false;
4133 	}
4134 	return radeon_ring_test_lockup(rdev, ring);
4135 }
4136 
4137 /* MC */
4138 static void si_mc_program(struct radeon_device *rdev)
4139 {
4140 	struct evergreen_mc_save save;
4141 	u32 tmp;
4142 	int i, j;
4143 
4144 	/* Initialize HDP */
4145 	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
4146 		WREG32((0x2c14 + j), 0x00000000);
4147 		WREG32((0x2c18 + j), 0x00000000);
4148 		WREG32((0x2c1c + j), 0x00000000);
4149 		WREG32((0x2c20 + j), 0x00000000);
4150 		WREG32((0x2c24 + j), 0x00000000);
4151 	}
4152 	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
4153 
4154 	evergreen_mc_stop(rdev, &save);
4155 	if (radeon_mc_wait_for_idle(rdev)) {
4156 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
4157 	}
4158 	if (!ASIC_IS_NODCE(rdev))
4159 		/* Lockout access through VGA aperture*/
4160 		WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
4161 	/* Update configuration */
4162 	WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
4163 	       rdev->mc.vram_start >> 12);
4164 	WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
4165 	       rdev->mc.vram_end >> 12);
4166 	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
4167 	       rdev->vram_scratch.gpu_addr >> 12);
4168 	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
4169 	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
4170 	WREG32(MC_VM_FB_LOCATION, tmp);
4171 	/* XXX double check these! */
4172 	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
4173 	WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
4174 	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
4175 	WREG32(MC_VM_AGP_BASE, 0);
4176 	WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
4177 	WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
4178 	if (radeon_mc_wait_for_idle(rdev)) {
4179 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
4180 	}
4181 	evergreen_mc_resume(rdev, &save);
4182 	if (!ASIC_IS_NODCE(rdev)) {
4183 		/* we need to own VRAM, so turn off the VGA renderer here
4184 		 * to stop it overwriting our objects */
4185 		rv515_vga_render_disable(rdev);
4186 	}
4187 }
4188 
4189 void si_vram_gtt_location(struct radeon_device *rdev,
4190 			  struct radeon_mc *mc)
4191 {
4192 	if (mc->mc_vram_size > 0xFFC0000000ULL) {
4193 		/* leave room for at least 1024M GTT */
4194 		dev_warn(rdev->dev, "limiting VRAM\n");
4195 		mc->real_vram_size = 0xFFC0000000ULL;
4196 		mc->mc_vram_size = 0xFFC0000000ULL;
4197 	}
4198 	radeon_vram_location(rdev, &rdev->mc, 0);
4199 	rdev->mc.gtt_base_align = 0;
4200 	radeon_gtt_location(rdev, mc);
4201 }
4202 
4203 static int si_mc_init(struct radeon_device *rdev)
4204 {
4205 	u32 tmp;
4206 	int chansize, numchan;
4207 
4208 	/* Get VRAM informations */
4209 	rdev->mc.vram_is_ddr = true;
4210 	tmp = RREG32(MC_ARB_RAMCFG);
4211 	if (tmp & CHANSIZE_OVERRIDE) {
4212 		chansize = 16;
4213 	} else if (tmp & CHANSIZE_MASK) {
4214 		chansize = 64;
4215 	} else {
4216 		chansize = 32;
4217 	}
4218 	tmp = RREG32(MC_SHARED_CHMAP);
4219 	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
4220 	case 0:
4221 	default:
4222 		numchan = 1;
4223 		break;
4224 	case 1:
4225 		numchan = 2;
4226 		break;
4227 	case 2:
4228 		numchan = 4;
4229 		break;
4230 	case 3:
4231 		numchan = 8;
4232 		break;
4233 	case 4:
4234 		numchan = 3;
4235 		break;
4236 	case 5:
4237 		numchan = 6;
4238 		break;
4239 	case 6:
4240 		numchan = 10;
4241 		break;
4242 	case 7:
4243 		numchan = 12;
4244 		break;
4245 	case 8:
4246 		numchan = 16;
4247 		break;
4248 	}
4249 	rdev->mc.vram_width = numchan * chansize;
4250 	/* Could aper size report 0 ? */
4251 	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
4252 	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
4253 	/* size in MB on si */
4254 	tmp = RREG32(CONFIG_MEMSIZE);
4255 	/* some boards may have garbage in the upper 16 bits */
4256 	if (tmp & 0xffff0000) {
4257 		DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
4258 		if (tmp & 0xffff)
4259 			tmp &= 0xffff;
4260 	}
4261 	rdev->mc.mc_vram_size = tmp * 1024ULL * 1024ULL;
4262 	rdev->mc.real_vram_size = rdev->mc.mc_vram_size;
4263 	rdev->mc.visible_vram_size = rdev->mc.aper_size;
4264 	si_vram_gtt_location(rdev, &rdev->mc);
4265 	radeon_update_bandwidth_info(rdev);
4266 
4267 	return 0;
4268 }
4269 
4270 /*
4271  * GART
4272  */
4273 void si_pcie_gart_tlb_flush(struct radeon_device *rdev)
4274 {
4275 	/* flush hdp cache */
4276 	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
4277 
4278 	/* bits 0-15 are the VM contexts0-15 */
4279 	WREG32(VM_INVALIDATE_REQUEST, 1);
4280 }
4281 
4282 static int si_pcie_gart_enable(struct radeon_device *rdev)
4283 {
4284 	int r, i;
4285 
4286 	if (rdev->gart.robj == NULL) {
4287 		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
4288 		return -EINVAL;
4289 	}
4290 	r = radeon_gart_table_vram_pin(rdev);
4291 	if (r)
4292 		return r;
4293 	/* Setup TLB control */
4294 	WREG32(MC_VM_MX_L1_TLB_CNTL,
4295 	       (0xA << 7) |
4296 	       ENABLE_L1_TLB |
4297 	       ENABLE_L1_FRAGMENT_PROCESSING |
4298 	       SYSTEM_ACCESS_MODE_NOT_IN_SYS |
4299 	       ENABLE_ADVANCED_DRIVER_MODEL |
4300 	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
4301 	/* Setup L2 cache */
4302 	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
4303 	       ENABLE_L2_FRAGMENT_PROCESSING |
4304 	       ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
4305 	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
4306 	       EFFECTIVE_L2_QUEUE_SIZE(7) |
4307 	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
4308 	WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
4309 	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
4310 	       BANK_SELECT(4) |
4311 	       L2_CACHE_BIGK_FRAGMENT_SIZE(4));
4312 	/* setup context0 */
4313 	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
4314 	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
4315 	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
4316 	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
4317 			(u32)(rdev->dummy_page.addr >> 12));
4318 	WREG32(VM_CONTEXT0_CNTL2, 0);
4319 	WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
4320 				  RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
4321 
4322 	WREG32(0x15D4, 0);
4323 	WREG32(0x15D8, 0);
4324 	WREG32(0x15DC, 0);
4325 
4326 	/* empty context1-15 */
4327 	/* set vm size, must be a multiple of 4 */
4328 	WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
4329 	WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1);
4330 	/* Assign the pt base to something valid for now; the pts used for
4331 	 * the VMs are determined by the application and setup and assigned
4332 	 * on the fly in the vm part of radeon_gart.c
4333 	 */
4334 	for (i = 1; i < 16; i++) {
4335 		if (i < 8)
4336 			WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
4337 			       rdev->vm_manager.saved_table_addr[i]);
4338 		else
4339 			WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
4340 			       rdev->vm_manager.saved_table_addr[i]);
4341 	}
4342 
4343 	/* enable context1-15 */
4344 	WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
4345 	       (u32)(rdev->dummy_page.addr >> 12));
4346 	WREG32(VM_CONTEXT1_CNTL2, 4);
4347 	WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
4348 				PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
4349 				RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
4350 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
4351 				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
4352 				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
4353 				PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
4354 				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
4355 				VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
4356 				VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
4357 				READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
4358 				READ_PROTECTION_FAULT_ENABLE_DEFAULT |
4359 				WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
4360 				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
4361 
4362 	si_pcie_gart_tlb_flush(rdev);
4363 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
4364 		 (unsigned)(rdev->mc.gtt_size >> 20),
4365 		 (unsigned long long)rdev->gart.table_addr);
4366 	rdev->gart.ready = true;
4367 	return 0;
4368 }
4369 
4370 static void si_pcie_gart_disable(struct radeon_device *rdev)
4371 {
4372 	unsigned i;
4373 
4374 	for (i = 1; i < 16; ++i) {
4375 		uint32_t reg;
4376 		if (i < 8)
4377 			reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2);
4378 		else
4379 			reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2);
4380 		rdev->vm_manager.saved_table_addr[i] = RREG32(reg);
4381 	}
4382 
4383 	/* Disable all tables */
4384 	WREG32(VM_CONTEXT0_CNTL, 0);
4385 	WREG32(VM_CONTEXT1_CNTL, 0);
4386 	/* Setup TLB control */
4387 	WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
4388 	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
4389 	/* Setup L2 cache */
4390 	WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
4391 	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
4392 	       EFFECTIVE_L2_QUEUE_SIZE(7) |
4393 	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
4394 	WREG32(VM_L2_CNTL2, 0);
4395 	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
4396 	       L2_CACHE_BIGK_FRAGMENT_SIZE(0));
4397 	radeon_gart_table_vram_unpin(rdev);
4398 }
4399 
4400 static void si_pcie_gart_fini(struct radeon_device *rdev)
4401 {
4402 	si_pcie_gart_disable(rdev);
4403 	radeon_gart_table_vram_free(rdev);
4404 	radeon_gart_fini(rdev);
4405 }
4406 
4407 /* vm parser */
4408 static bool si_vm_reg_valid(u32 reg)
4409 {
4410 	/* context regs are fine */
4411 	if (reg >= 0x28000)
4412 		return true;
4413 
4414 	/* shader regs are also fine */
4415 	if (reg >= 0xB000 && reg < 0xC000)
4416 		return true;
4417 
4418 	/* check config regs */
4419 	switch (reg) {
4420 	case GRBM_GFX_INDEX:
4421 	case CP_STRMOUT_CNTL:
4422 	case VGT_VTX_VECT_EJECT_REG:
4423 	case VGT_CACHE_INVALIDATION:
4424 	case VGT_ESGS_RING_SIZE:
4425 	case VGT_GSVS_RING_SIZE:
4426 	case VGT_GS_VERTEX_REUSE:
4427 	case VGT_PRIMITIVE_TYPE:
4428 	case VGT_INDEX_TYPE:
4429 	case VGT_NUM_INDICES:
4430 	case VGT_NUM_INSTANCES:
4431 	case VGT_TF_RING_SIZE:
4432 	case VGT_HS_OFFCHIP_PARAM:
4433 	case VGT_TF_MEMORY_BASE:
4434 	case PA_CL_ENHANCE:
4435 	case PA_SU_LINE_STIPPLE_VALUE:
4436 	case PA_SC_LINE_STIPPLE_STATE:
4437 	case PA_SC_ENHANCE:
4438 	case SQC_CACHES:
4439 	case SPI_STATIC_THREAD_MGMT_1:
4440 	case SPI_STATIC_THREAD_MGMT_2:
4441 	case SPI_STATIC_THREAD_MGMT_3:
4442 	case SPI_PS_MAX_WAVE_ID:
4443 	case SPI_CONFIG_CNTL:
4444 	case SPI_CONFIG_CNTL_1:
4445 	case TA_CNTL_AUX:
4446 	case TA_CS_BC_BASE_ADDR:
4447 		return true;
4448 	default:
4449 		DRM_ERROR("Invalid register 0x%x in CS\n", reg);
4450 		return false;
4451 	}
4452 }
4453 
4454 static int si_vm_packet3_ce_check(struct radeon_device *rdev,
4455 				  u32 *ib, struct radeon_cs_packet *pkt)
4456 {
4457 	switch (pkt->opcode) {
4458 	case PACKET3_NOP:
4459 	case PACKET3_SET_BASE:
4460 	case PACKET3_SET_CE_DE_COUNTERS:
4461 	case PACKET3_LOAD_CONST_RAM:
4462 	case PACKET3_WRITE_CONST_RAM:
4463 	case PACKET3_WRITE_CONST_RAM_OFFSET:
4464 	case PACKET3_DUMP_CONST_RAM:
4465 	case PACKET3_INCREMENT_CE_COUNTER:
4466 	case PACKET3_WAIT_ON_DE_COUNTER:
4467 	case PACKET3_CE_WRITE:
4468 		break;
4469 	default:
4470 		DRM_ERROR("Invalid CE packet3: 0x%x\n", pkt->opcode);
4471 		return -EINVAL;
4472 	}
4473 	return 0;
4474 }
4475 
4476 static int si_vm_packet3_cp_dma_check(u32 *ib, u32 idx)
4477 {
4478 	u32 start_reg, reg, i;
4479 	u32 command = ib[idx + 4];
4480 	u32 info = ib[idx + 1];
4481 	u32 idx_value = ib[idx];
4482 	if (command & PACKET3_CP_DMA_CMD_SAS) {
4483 		/* src address space is register */
4484 		if (((info & 0x60000000) >> 29) == 0) {
4485 			start_reg = idx_value << 2;
4486 			if (command & PACKET3_CP_DMA_CMD_SAIC) {
4487 				reg = start_reg;
4488 				if (!si_vm_reg_valid(reg)) {
4489 					DRM_ERROR("CP DMA Bad SRC register\n");
4490 					return -EINVAL;
4491 				}
4492 			} else {
4493 				for (i = 0; i < (command & 0x1fffff); i++) {
4494 					reg = start_reg + (4 * i);
4495 					if (!si_vm_reg_valid(reg)) {
4496 						DRM_ERROR("CP DMA Bad SRC register\n");
4497 						return -EINVAL;
4498 					}
4499 				}
4500 			}
4501 		}
4502 	}
4503 	if (command & PACKET3_CP_DMA_CMD_DAS) {
4504 		/* dst address space is register */
4505 		if (((info & 0x00300000) >> 20) == 0) {
4506 			start_reg = ib[idx + 2];
4507 			if (command & PACKET3_CP_DMA_CMD_DAIC) {
4508 				reg = start_reg;
4509 				if (!si_vm_reg_valid(reg)) {
4510 					DRM_ERROR("CP DMA Bad DST register\n");
4511 					return -EINVAL;
4512 				}
4513 			} else {
4514 				for (i = 0; i < (command & 0x1fffff); i++) {
4515 					reg = start_reg + (4 * i);
4516 				if (!si_vm_reg_valid(reg)) {
4517 						DRM_ERROR("CP DMA Bad DST register\n");
4518 						return -EINVAL;
4519 					}
4520 				}
4521 			}
4522 		}
4523 	}
4524 	return 0;
4525 }
4526 
4527 static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
4528 				   u32 *ib, struct radeon_cs_packet *pkt)
4529 {
4530 	int r;
4531 	u32 idx = pkt->idx + 1;
4532 	u32 idx_value = ib[idx];
4533 	u32 start_reg, end_reg, reg, i;
4534 
4535 	switch (pkt->opcode) {
4536 	case PACKET3_NOP:
4537 	case PACKET3_SET_BASE:
4538 	case PACKET3_CLEAR_STATE:
4539 	case PACKET3_INDEX_BUFFER_SIZE:
4540 	case PACKET3_DISPATCH_DIRECT:
4541 	case PACKET3_DISPATCH_INDIRECT:
4542 	case PACKET3_ALLOC_GDS:
4543 	case PACKET3_WRITE_GDS_RAM:
4544 	case PACKET3_ATOMIC_GDS:
4545 	case PACKET3_ATOMIC:
4546 	case PACKET3_OCCLUSION_QUERY:
4547 	case PACKET3_SET_PREDICATION:
4548 	case PACKET3_COND_EXEC:
4549 	case PACKET3_PRED_EXEC:
4550 	case PACKET3_DRAW_INDIRECT:
4551 	case PACKET3_DRAW_INDEX_INDIRECT:
4552 	case PACKET3_INDEX_BASE:
4553 	case PACKET3_DRAW_INDEX_2:
4554 	case PACKET3_CONTEXT_CONTROL:
4555 	case PACKET3_INDEX_TYPE:
4556 	case PACKET3_DRAW_INDIRECT_MULTI:
4557 	case PACKET3_DRAW_INDEX_AUTO:
4558 	case PACKET3_DRAW_INDEX_IMMD:
4559 	case PACKET3_NUM_INSTANCES:
4560 	case PACKET3_DRAW_INDEX_MULTI_AUTO:
4561 	case PACKET3_STRMOUT_BUFFER_UPDATE:
4562 	case PACKET3_DRAW_INDEX_OFFSET_2:
4563 	case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
4564 	case PACKET3_DRAW_INDEX_INDIRECT_MULTI:
4565 	case PACKET3_MPEG_INDEX:
4566 	case PACKET3_WAIT_REG_MEM:
4567 	case PACKET3_MEM_WRITE:
4568 	case PACKET3_PFP_SYNC_ME:
4569 	case PACKET3_SURFACE_SYNC:
4570 	case PACKET3_EVENT_WRITE:
4571 	case PACKET3_EVENT_WRITE_EOP:
4572 	case PACKET3_EVENT_WRITE_EOS:
4573 	case PACKET3_SET_CONTEXT_REG:
4574 	case PACKET3_SET_CONTEXT_REG_INDIRECT:
4575 	case PACKET3_SET_SH_REG:
4576 	case PACKET3_SET_SH_REG_OFFSET:
4577 	case PACKET3_INCREMENT_DE_COUNTER:
4578 	case PACKET3_WAIT_ON_CE_COUNTER:
4579 	case PACKET3_WAIT_ON_AVAIL_BUFFER:
4580 	case PACKET3_ME_WRITE:
4581 		break;
4582 	case PACKET3_COPY_DATA:
4583 		if ((idx_value & 0xf00) == 0) {
4584 			reg = ib[idx + 3] * 4;
4585 			if (!si_vm_reg_valid(reg))
4586 				return -EINVAL;
4587 		}
4588 		break;
4589 	case PACKET3_WRITE_DATA:
4590 		if ((idx_value & 0xf00) == 0) {
4591 			start_reg = ib[idx + 1] * 4;
4592 			if (idx_value & 0x10000) {
4593 				if (!si_vm_reg_valid(start_reg))
4594 					return -EINVAL;
4595 			} else {
4596 				for (i = 0; i < (pkt->count - 2); i++) {
4597 					reg = start_reg + (4 * i);
4598 					if (!si_vm_reg_valid(reg))
4599 						return -EINVAL;
4600 				}
4601 			}
4602 		}
4603 		break;
4604 	case PACKET3_COND_WRITE:
4605 		if (idx_value & 0x100) {
4606 			reg = ib[idx + 5] * 4;
4607 			if (!si_vm_reg_valid(reg))
4608 				return -EINVAL;
4609 		}
4610 		break;
4611 	case PACKET3_COPY_DW:
4612 		if (idx_value & 0x2) {
4613 			reg = ib[idx + 3] * 4;
4614 			if (!si_vm_reg_valid(reg))
4615 				return -EINVAL;
4616 		}
4617 		break;
4618 	case PACKET3_SET_CONFIG_REG:
4619 		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
4620 		end_reg = 4 * pkt->count + start_reg - 4;
4621 		if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
4622 		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
4623 		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
4624 			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
4625 			return -EINVAL;
4626 		}
4627 		for (i = 0; i < pkt->count; i++) {
4628 			reg = start_reg + (4 * i);
4629 			if (!si_vm_reg_valid(reg))
4630 				return -EINVAL;
4631 		}
4632 		break;
4633 	case PACKET3_CP_DMA:
4634 		r = si_vm_packet3_cp_dma_check(ib, idx);
4635 		if (r)
4636 			return r;
4637 		break;
4638 	default:
4639 		DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
4640 		return -EINVAL;
4641 	}
4642 	return 0;
4643 }
4644 
4645 static int si_vm_packet3_compute_check(struct radeon_device *rdev,
4646 				       u32 *ib, struct radeon_cs_packet *pkt)
4647 {
4648 	int r;
4649 	u32 idx = pkt->idx + 1;
4650 	u32 idx_value = ib[idx];
4651 	u32 start_reg, reg, i;
4652 
4653 	switch (pkt->opcode) {
4654 	case PACKET3_NOP:
4655 	case PACKET3_SET_BASE:
4656 	case PACKET3_CLEAR_STATE:
4657 	case PACKET3_DISPATCH_DIRECT:
4658 	case PACKET3_DISPATCH_INDIRECT:
4659 	case PACKET3_ALLOC_GDS:
4660 	case PACKET3_WRITE_GDS_RAM:
4661 	case PACKET3_ATOMIC_GDS:
4662 	case PACKET3_ATOMIC:
4663 	case PACKET3_OCCLUSION_QUERY:
4664 	case PACKET3_SET_PREDICATION:
4665 	case PACKET3_COND_EXEC:
4666 	case PACKET3_PRED_EXEC:
4667 	case PACKET3_CONTEXT_CONTROL:
4668 	case PACKET3_STRMOUT_BUFFER_UPDATE:
4669 	case PACKET3_WAIT_REG_MEM:
4670 	case PACKET3_MEM_WRITE:
4671 	case PACKET3_PFP_SYNC_ME:
4672 	case PACKET3_SURFACE_SYNC:
4673 	case PACKET3_EVENT_WRITE:
4674 	case PACKET3_EVENT_WRITE_EOP:
4675 	case PACKET3_EVENT_WRITE_EOS:
4676 	case PACKET3_SET_CONTEXT_REG:
4677 	case PACKET3_SET_CONTEXT_REG_INDIRECT:
4678 	case PACKET3_SET_SH_REG:
4679 	case PACKET3_SET_SH_REG_OFFSET:
4680 	case PACKET3_INCREMENT_DE_COUNTER:
4681 	case PACKET3_WAIT_ON_CE_COUNTER:
4682 	case PACKET3_WAIT_ON_AVAIL_BUFFER:
4683 	case PACKET3_ME_WRITE:
4684 		break;
4685 	case PACKET3_COPY_DATA:
4686 		if ((idx_value & 0xf00) == 0) {
4687 			reg = ib[idx + 3] * 4;
4688 			if (!si_vm_reg_valid(reg))
4689 				return -EINVAL;
4690 		}
4691 		break;
4692 	case PACKET3_WRITE_DATA:
4693 		if ((idx_value & 0xf00) == 0) {
4694 			start_reg = ib[idx + 1] * 4;
4695 			if (idx_value & 0x10000) {
4696 				if (!si_vm_reg_valid(start_reg))
4697 					return -EINVAL;
4698 			} else {
4699 				for (i = 0; i < (pkt->count - 2); i++) {
4700 					reg = start_reg + (4 * i);
4701 					if (!si_vm_reg_valid(reg))
4702 						return -EINVAL;
4703 				}
4704 			}
4705 		}
4706 		break;
4707 	case PACKET3_COND_WRITE:
4708 		if (idx_value & 0x100) {
4709 			reg = ib[idx + 5] * 4;
4710 			if (!si_vm_reg_valid(reg))
4711 				return -EINVAL;
4712 		}
4713 		break;
4714 	case PACKET3_COPY_DW:
4715 		if (idx_value & 0x2) {
4716 			reg = ib[idx + 3] * 4;
4717 			if (!si_vm_reg_valid(reg))
4718 				return -EINVAL;
4719 		}
4720 		break;
4721 	case PACKET3_CP_DMA:
4722 		r = si_vm_packet3_cp_dma_check(ib, idx);
4723 		if (r)
4724 			return r;
4725 		break;
4726 	default:
4727 		DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode);
4728 		return -EINVAL;
4729 	}
4730 	return 0;
4731 }
4732 
4733 int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
4734 {
4735 	int ret = 0;
4736 	u32 idx = 0, i;
4737 	struct radeon_cs_packet pkt;
4738 
4739 	do {
4740 		pkt.idx = idx;
4741 		pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
4742 		pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
4743 		pkt.one_reg_wr = 0;
4744 		switch (pkt.type) {
4745 		case RADEON_PACKET_TYPE0:
4746 			dev_err(rdev->dev, "Packet0 not allowed!\n");
4747 			ret = -EINVAL;
4748 			break;
4749 		case RADEON_PACKET_TYPE2:
4750 			idx += 1;
4751 			break;
4752 		case RADEON_PACKET_TYPE3:
4753 			pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
4754 			if (ib->is_const_ib)
4755 				ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt);
4756 			else {
4757 				switch (ib->ring) {
4758 				case RADEON_RING_TYPE_GFX_INDEX:
4759 					ret = si_vm_packet3_gfx_check(rdev, ib->ptr, &pkt);
4760 					break;
4761 				case CAYMAN_RING_TYPE_CP1_INDEX:
4762 				case CAYMAN_RING_TYPE_CP2_INDEX:
4763 					ret = si_vm_packet3_compute_check(rdev, ib->ptr, &pkt);
4764 					break;
4765 				default:
4766 					dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->ring);
4767 					ret = -EINVAL;
4768 					break;
4769 				}
4770 			}
4771 			idx += pkt.count + 2;
4772 			break;
4773 		default:
4774 			dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
4775 			ret = -EINVAL;
4776 			break;
4777 		}
4778 		if (ret) {
4779 			for (i = 0; i < ib->length_dw; i++) {
4780 				if (i == idx)
4781 					printk("\t0x%08x <---\n", ib->ptr[i]);
4782 				else
4783 					printk("\t0x%08x\n", ib->ptr[i]);
4784 			}
4785 			break;
4786 		}
4787 	} while (idx < ib->length_dw);
4788 
4789 	return ret;
4790 }
4791 
4792 /*
4793  * vm
4794  */
4795 int si_vm_init(struct radeon_device *rdev)
4796 {
4797 	/* number of VMs */
4798 	rdev->vm_manager.nvm = 16;
4799 	/* base offset of vram pages */
4800 	rdev->vm_manager.vram_base_offset = 0;
4801 
4802 	return 0;
4803 }
4804 
4805 void si_vm_fini(struct radeon_device *rdev)
4806 {
4807 }
4808 
4809 /**
4810  * si_vm_decode_fault - print human readable fault info
4811  *
4812  * @rdev: radeon_device pointer
4813  * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
4814  * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
4815  *
4816  * Print human readable fault information (SI).
4817  */
4818 static void si_vm_decode_fault(struct radeon_device *rdev,
4819 			       u32 status, u32 addr)
4820 {
4821 	u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
4822 	u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
4823 	u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
4824 	char *block;
4825 
4826 	if (rdev->family == CHIP_TAHITI) {
4827 		switch (mc_id) {
4828 		case 160:
4829 		case 144:
4830 		case 96:
4831 		case 80:
4832 		case 224:
4833 		case 208:
4834 		case 32:
4835 		case 16:
4836 			block = "CB";
4837 			break;
4838 		case 161:
4839 		case 145:
4840 		case 97:
4841 		case 81:
4842 		case 225:
4843 		case 209:
4844 		case 33:
4845 		case 17:
4846 			block = "CB_FMASK";
4847 			break;
4848 		case 162:
4849 		case 146:
4850 		case 98:
4851 		case 82:
4852 		case 226:
4853 		case 210:
4854 		case 34:
4855 		case 18:
4856 			block = "CB_CMASK";
4857 			break;
4858 		case 163:
4859 		case 147:
4860 		case 99:
4861 		case 83:
4862 		case 227:
4863 		case 211:
4864 		case 35:
4865 		case 19:
4866 			block = "CB_IMMED";
4867 			break;
4868 		case 164:
4869 		case 148:
4870 		case 100:
4871 		case 84:
4872 		case 228:
4873 		case 212:
4874 		case 36:
4875 		case 20:
4876 			block = "DB";
4877 			break;
4878 		case 165:
4879 		case 149:
4880 		case 101:
4881 		case 85:
4882 		case 229:
4883 		case 213:
4884 		case 37:
4885 		case 21:
4886 			block = "DB_HTILE";
4887 			break;
4888 		case 167:
4889 		case 151:
4890 		case 103:
4891 		case 87:
4892 		case 231:
4893 		case 215:
4894 		case 39:
4895 		case 23:
4896 			block = "DB_STEN";
4897 			break;
4898 		case 72:
4899 		case 68:
4900 		case 64:
4901 		case 8:
4902 		case 4:
4903 		case 0:
4904 		case 136:
4905 		case 132:
4906 		case 128:
4907 		case 200:
4908 		case 196:
4909 		case 192:
4910 			block = "TC";
4911 			break;
4912 		case 112:
4913 		case 48:
4914 			block = "CP";
4915 			break;
4916 		case 49:
4917 		case 177:
4918 		case 50:
4919 		case 178:
4920 			block = "SH";
4921 			break;
4922 		case 53:
4923 		case 190:
4924 			block = "VGT";
4925 			break;
4926 		case 117:
4927 			block = "IH";
4928 			break;
4929 		case 51:
4930 		case 115:
4931 			block = "RLC";
4932 			break;
4933 		case 119:
4934 		case 183:
4935 			block = "DMA0";
4936 			break;
4937 		case 61:
4938 			block = "DMA1";
4939 			break;
4940 		case 248:
4941 		case 120:
4942 			block = "HDP";
4943 			break;
4944 		default:
4945 			block = "unknown";
4946 			break;
4947 		}
4948 	} else {
4949 		switch (mc_id) {
4950 		case 32:
4951 		case 16:
4952 		case 96:
4953 		case 80:
4954 		case 160:
4955 		case 144:
4956 		case 224:
4957 		case 208:
4958 			block = "CB";
4959 			break;
4960 		case 33:
4961 		case 17:
4962 		case 97:
4963 		case 81:
4964 		case 161:
4965 		case 145:
4966 		case 225:
4967 		case 209:
4968 			block = "CB_FMASK";
4969 			break;
4970 		case 34:
4971 		case 18:
4972 		case 98:
4973 		case 82:
4974 		case 162:
4975 		case 146:
4976 		case 226:
4977 		case 210:
4978 			block = "CB_CMASK";
4979 			break;
4980 		case 35:
4981 		case 19:
4982 		case 99:
4983 		case 83:
4984 		case 163:
4985 		case 147:
4986 		case 227:
4987 		case 211:
4988 			block = "CB_IMMED";
4989 			break;
4990 		case 36:
4991 		case 20:
4992 		case 100:
4993 		case 84:
4994 		case 164:
4995 		case 148:
4996 		case 228:
4997 		case 212:
4998 			block = "DB";
4999 			break;
5000 		case 37:
5001 		case 21:
5002 		case 101:
5003 		case 85:
5004 		case 165:
5005 		case 149:
5006 		case 229:
5007 		case 213:
5008 			block = "DB_HTILE";
5009 			break;
5010 		case 39:
5011 		case 23:
5012 		case 103:
5013 		case 87:
5014 		case 167:
5015 		case 151:
5016 		case 231:
5017 		case 215:
5018 			block = "DB_STEN";
5019 			break;
5020 		case 72:
5021 		case 68:
5022 		case 8:
5023 		case 4:
5024 		case 136:
5025 		case 132:
5026 		case 200:
5027 		case 196:
5028 			block = "TC";
5029 			break;
5030 		case 112:
5031 		case 48:
5032 			block = "CP";
5033 			break;
5034 		case 49:
5035 		case 177:
5036 		case 50:
5037 		case 178:
5038 			block = "SH";
5039 			break;
5040 		case 53:
5041 			block = "VGT";
5042 			break;
5043 		case 117:
5044 			block = "IH";
5045 			break;
5046 		case 51:
5047 		case 115:
5048 			block = "RLC";
5049 			break;
5050 		case 119:
5051 		case 183:
5052 			block = "DMA0";
5053 			break;
5054 		case 61:
5055 			block = "DMA1";
5056 			break;
5057 		case 248:
5058 		case 120:
5059 			block = "HDP";
5060 			break;
5061 		default:
5062 			block = "unknown";
5063 			break;
5064 		}
5065 	}
5066 
5067 	printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
5068 	       protections, vmid, addr,
5069 	       (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
5070 	       block, mc_id);
5071 }
5072 
5073 void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
5074 		 unsigned vm_id, uint64_t pd_addr)
5075 {
5076 	/* write new base address */
5077 	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5078 	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5079 				 WRITE_DATA_DST_SEL(0)));
5080 
5081 	if (vm_id < 8) {
5082 		radeon_ring_write(ring,
5083 				  (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
5084 	} else {
5085 		radeon_ring_write(ring,
5086 				  (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
5087 	}
5088 	radeon_ring_write(ring, 0);
5089 	radeon_ring_write(ring, pd_addr >> 12);
5090 
5091 	/* flush hdp cache */
5092 	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5093 	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5094 				 WRITE_DATA_DST_SEL(0)));
5095 	radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
5096 	radeon_ring_write(ring, 0);
5097 	radeon_ring_write(ring, 0x1);
5098 
5099 	/* bits 0-15 are the VM contexts0-15 */
5100 	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5101 	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5102 				 WRITE_DATA_DST_SEL(0)));
5103 	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
5104 	radeon_ring_write(ring, 0);
5105 	radeon_ring_write(ring, 1 << vm_id);
5106 
5107 	/* wait for the invalidate to complete */
5108 	radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
5109 	radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) |  /* always */
5110 				 WAIT_REG_MEM_ENGINE(0))); /* me */
5111 	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
5112 	radeon_ring_write(ring, 0);
5113 	radeon_ring_write(ring, 0); /* ref */
5114 	radeon_ring_write(ring, 0); /* mask */
5115 	radeon_ring_write(ring, 0x20); /* poll interval */
5116 
5117 	/* sync PFP to ME, otherwise we might get invalid PFP reads */
5118 	radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5119 	radeon_ring_write(ring, 0x0);
5120 }
5121 
5122 /*
5123  *  Power and clock gating
5124  */
5125 static void si_wait_for_rlc_serdes(struct radeon_device *rdev)
5126 {
5127 	int i;
5128 
5129 	for (i = 0; i < rdev->usec_timeout; i++) {
5130 		if (RREG32(RLC_SERDES_MASTER_BUSY_0) == 0)
5131 			break;
5132 		udelay(1);
5133 	}
5134 
5135 	for (i = 0; i < rdev->usec_timeout; i++) {
5136 		if (RREG32(RLC_SERDES_MASTER_BUSY_1) == 0)
5137 			break;
5138 		udelay(1);
5139 	}
5140 }
5141 
5142 static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
5143 					 bool enable)
5144 {
5145 	u32 tmp = RREG32(CP_INT_CNTL_RING0);
5146 	u32 mask;
5147 	int i;
5148 
5149 	if (enable)
5150 		tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
5151 	else
5152 		tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
5153 	WREG32(CP_INT_CNTL_RING0, tmp);
5154 
5155 	if (!enable) {
5156 		/* read a gfx register */
5157 		tmp = RREG32(DB_DEPTH_INFO);
5158 
5159 		mask = RLC_BUSY_STATUS | GFX_POWER_STATUS | GFX_CLOCK_STATUS | GFX_LS_STATUS;
5160 		for (i = 0; i < rdev->usec_timeout; i++) {
5161 			if ((RREG32(RLC_STAT) & mask) == (GFX_CLOCK_STATUS | GFX_POWER_STATUS))
5162 				break;
5163 			udelay(1);
5164 		}
5165 	}
5166 }
5167 
5168 static void si_set_uvd_dcm(struct radeon_device *rdev,
5169 			   bool sw_mode)
5170 {
5171 	u32 tmp, tmp2;
5172 
5173 	tmp = RREG32(UVD_CGC_CTRL);
5174 	tmp &= ~(CLK_OD_MASK | CG_DT_MASK);
5175 	tmp |= DCM | CG_DT(1) | CLK_OD(4);
5176 
5177 	if (sw_mode) {
5178 		tmp &= ~0x7ffff800;
5179 		tmp2 = DYN_OR_EN | DYN_RR_EN | G_DIV_ID(7);
5180 	} else {
5181 		tmp |= 0x7ffff800;
5182 		tmp2 = 0;
5183 	}
5184 
5185 	WREG32(UVD_CGC_CTRL, tmp);
5186 	WREG32_UVD_CTX(UVD_CGC_CTRL2, tmp2);
5187 }
5188 
5189 void si_init_uvd_internal_cg(struct radeon_device *rdev)
5190 {
5191 	bool hw_mode = true;
5192 
5193 	if (hw_mode) {
5194 		si_set_uvd_dcm(rdev, false);
5195 	} else {
5196 		u32 tmp = RREG32(UVD_CGC_CTRL);
5197 		tmp &= ~DCM;
5198 		WREG32(UVD_CGC_CTRL, tmp);
5199 	}
5200 }
5201 
5202 static u32 si_halt_rlc(struct radeon_device *rdev)
5203 {
5204 	u32 data, orig;
5205 
5206 	orig = data = RREG32(RLC_CNTL);
5207 
5208 	if (data & RLC_ENABLE) {
5209 		data &= ~RLC_ENABLE;
5210 		WREG32(RLC_CNTL, data);
5211 
5212 		si_wait_for_rlc_serdes(rdev);
5213 	}
5214 
5215 	return orig;
5216 }
5217 
5218 static void si_update_rlc(struct radeon_device *rdev, u32 rlc)
5219 {
5220 	u32 tmp;
5221 
5222 	tmp = RREG32(RLC_CNTL);
5223 	if (tmp != rlc)
5224 		WREG32(RLC_CNTL, rlc);
5225 }
5226 
5227 static void si_enable_dma_pg(struct radeon_device *rdev, bool enable)
5228 {
5229 	u32 data, orig;
5230 
5231 	orig = data = RREG32(DMA_PG);
5232 	if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA))
5233 		data |= PG_CNTL_ENABLE;
5234 	else
5235 		data &= ~PG_CNTL_ENABLE;
5236 	if (orig != data)
5237 		WREG32(DMA_PG, data);
5238 }
5239 
5240 static void si_init_dma_pg(struct radeon_device *rdev)
5241 {
5242 	u32 tmp;
5243 
5244 	WREG32(DMA_PGFSM_WRITE,  0x00002000);
5245 	WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
5246 
5247 	for (tmp = 0; tmp < 5; tmp++)
5248 		WREG32(DMA_PGFSM_WRITE, 0);
5249 }
5250 
5251 static void si_enable_gfx_cgpg(struct radeon_device *rdev,
5252 			       bool enable)
5253 {
5254 	u32 tmp;
5255 
5256 	if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
5257 		tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
5258 		WREG32(RLC_TTOP_D, tmp);
5259 
5260 		tmp = RREG32(RLC_PG_CNTL);
5261 		tmp |= GFX_PG_ENABLE;
5262 		WREG32(RLC_PG_CNTL, tmp);
5263 
5264 		tmp = RREG32(RLC_AUTO_PG_CTRL);
5265 		tmp |= AUTO_PG_EN;
5266 		WREG32(RLC_AUTO_PG_CTRL, tmp);
5267 	} else {
5268 		tmp = RREG32(RLC_AUTO_PG_CTRL);
5269 		tmp &= ~AUTO_PG_EN;
5270 		WREG32(RLC_AUTO_PG_CTRL, tmp);
5271 
5272 		tmp = RREG32(DB_RENDER_CONTROL);
5273 	}
5274 }
5275 
5276 static void si_init_gfx_cgpg(struct radeon_device *rdev)
5277 {
5278 	u32 tmp;
5279 
5280 	WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
5281 
5282 	tmp = RREG32(RLC_PG_CNTL);
5283 	tmp |= GFX_PG_SRC;
5284 	WREG32(RLC_PG_CNTL, tmp);
5285 
5286 	WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
5287 
5288 	tmp = RREG32(RLC_AUTO_PG_CTRL);
5289 
5290 	tmp &= ~GRBM_REG_SGIT_MASK;
5291 	tmp |= GRBM_REG_SGIT(0x700);
5292 	tmp &= ~PG_AFTER_GRBM_REG_ST_MASK;
5293 	WREG32(RLC_AUTO_PG_CTRL, tmp);
5294 }
5295 
5296 static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
5297 {
5298 	u32 mask = 0, tmp, tmp1;
5299 	int i;
5300 
5301 	si_select_se_sh(rdev, se, sh);
5302 	tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
5303 	tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
5304 	si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5305 
5306 	tmp &= 0xffff0000;
5307 
5308 	tmp |= tmp1;
5309 	tmp >>= 16;
5310 
5311 	for (i = 0; i < rdev->config.si.max_cu_per_sh; i ++) {
5312 		mask <<= 1;
5313 		mask |= 1;
5314 	}
5315 
5316 	return (~tmp) & mask;
5317 }
5318 
5319 static void si_init_ao_cu_mask(struct radeon_device *rdev)
5320 {
5321 	u32 i, j, k, active_cu_number = 0;
5322 	u32 mask, counter, cu_bitmap;
5323 	u32 tmp = 0;
5324 
5325 	for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
5326 		for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
5327 			mask = 1;
5328 			cu_bitmap = 0;
5329 			counter  = 0;
5330 			for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) {
5331 				if (si_get_cu_active_bitmap(rdev, i, j) & mask) {
5332 					if (counter < 2)
5333 						cu_bitmap |= mask;
5334 					counter++;
5335 				}
5336 				mask <<= 1;
5337 			}
5338 
5339 			active_cu_number += counter;
5340 			tmp |= (cu_bitmap << (i * 16 + j * 8));
5341 		}
5342 	}
5343 
5344 	WREG32(RLC_PG_AO_CU_MASK, tmp);
5345 
5346 	tmp = RREG32(RLC_MAX_PG_CU);
5347 	tmp &= ~MAX_PU_CU_MASK;
5348 	tmp |= MAX_PU_CU(active_cu_number);
5349 	WREG32(RLC_MAX_PG_CU, tmp);
5350 }
5351 
5352 static void si_enable_cgcg(struct radeon_device *rdev,
5353 			   bool enable)
5354 {
5355 	u32 data, orig, tmp;
5356 
5357 	orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
5358 
5359 	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) {
5360 		si_enable_gui_idle_interrupt(rdev, true);
5361 
5362 		WREG32(RLC_GCPM_GENERAL_3, 0x00000080);
5363 
5364 		tmp = si_halt_rlc(rdev);
5365 
5366 		WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
5367 		WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
5368 		WREG32(RLC_SERDES_WR_CTRL, 0x00b000ff);
5369 
5370 		si_wait_for_rlc_serdes(rdev);
5371 
5372 		si_update_rlc(rdev, tmp);
5373 
5374 		WREG32(RLC_SERDES_WR_CTRL, 0x007000ff);
5375 
5376 		data |= CGCG_EN | CGLS_EN;
5377 	} else {
5378 		si_enable_gui_idle_interrupt(rdev, false);
5379 
5380 		RREG32(CB_CGTT_SCLK_CTRL);
5381 		RREG32(CB_CGTT_SCLK_CTRL);
5382 		RREG32(CB_CGTT_SCLK_CTRL);
5383 		RREG32(CB_CGTT_SCLK_CTRL);
5384 
5385 		data &= ~(CGCG_EN | CGLS_EN);
5386 	}
5387 
5388 	if (orig != data)
5389 		WREG32(RLC_CGCG_CGLS_CTRL, data);
5390 }
5391 
5392 static void si_enable_mgcg(struct radeon_device *rdev,
5393 			   bool enable)
5394 {
5395 	u32 data, orig, tmp = 0;
5396 
5397 	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) {
5398 		orig = data = RREG32(CGTS_SM_CTRL_REG);
5399 		data = 0x96940200;
5400 		if (orig != data)
5401 			WREG32(CGTS_SM_CTRL_REG, data);
5402 
5403 		if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) {
5404 			orig = data = RREG32(CP_MEM_SLP_CNTL);
5405 			data |= CP_MEM_LS_EN;
5406 			if (orig != data)
5407 				WREG32(CP_MEM_SLP_CNTL, data);
5408 		}
5409 
5410 		orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
5411 		data &= 0xffffffc0;
5412 		if (orig != data)
5413 			WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
5414 
5415 		tmp = si_halt_rlc(rdev);
5416 
5417 		WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
5418 		WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
5419 		WREG32(RLC_SERDES_WR_CTRL, 0x00d000ff);
5420 
5421 		si_update_rlc(rdev, tmp);
5422 	} else {
5423 		orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
5424 		data |= 0x00000003;
5425 		if (orig != data)
5426 			WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
5427 
5428 		data = RREG32(CP_MEM_SLP_CNTL);
5429 		if (data & CP_MEM_LS_EN) {
5430 			data &= ~CP_MEM_LS_EN;
5431 			WREG32(CP_MEM_SLP_CNTL, data);
5432 		}
5433 		orig = data = RREG32(CGTS_SM_CTRL_REG);
5434 		data |= LS_OVERRIDE | OVERRIDE;
5435 		if (orig != data)
5436 			WREG32(CGTS_SM_CTRL_REG, data);
5437 
5438 		tmp = si_halt_rlc(rdev);
5439 
5440 		WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
5441 		WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
5442 		WREG32(RLC_SERDES_WR_CTRL, 0x00e000ff);
5443 
5444 		si_update_rlc(rdev, tmp);
5445 	}
5446 }
5447 
5448 static void si_enable_uvd_mgcg(struct radeon_device *rdev,
5449 			       bool enable)
5450 {
5451 	u32 orig, data, tmp;
5452 
5453 	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) {
5454 		tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
5455 		tmp |= 0x3fff;
5456 		WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp);
5457 
5458 		orig = data = RREG32(UVD_CGC_CTRL);
5459 		data |= DCM;
5460 		if (orig != data)
5461 			WREG32(UVD_CGC_CTRL, data);
5462 
5463 		WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_0, 0);
5464 		WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_1, 0);
5465 	} else {
5466 		tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
5467 		tmp &= ~0x3fff;
5468 		WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp);
5469 
5470 		orig = data = RREG32(UVD_CGC_CTRL);
5471 		data &= ~DCM;
5472 		if (orig != data)
5473 			WREG32(UVD_CGC_CTRL, data);
5474 
5475 		WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_0, 0xffffffff);
5476 		WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_1, 0xffffffff);
5477 	}
5478 }
5479 
5480 static const u32 mc_cg_registers[] =
5481 {
5482 	MC_HUB_MISC_HUB_CG,
5483 	MC_HUB_MISC_SIP_CG,
5484 	MC_HUB_MISC_VM_CG,
5485 	MC_XPB_CLK_GAT,
5486 	ATC_MISC_CG,
5487 	MC_CITF_MISC_WR_CG,
5488 	MC_CITF_MISC_RD_CG,
5489 	MC_CITF_MISC_VM_CG,
5490 	VM_L2_CG,
5491 };
5492 
5493 static void si_enable_mc_ls(struct radeon_device *rdev,
5494 			    bool enable)
5495 {
5496 	int i;
5497 	u32 orig, data;
5498 
5499 	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
5500 		orig = data = RREG32(mc_cg_registers[i]);
5501 		if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS))
5502 			data |= MC_LS_ENABLE;
5503 		else
5504 			data &= ~MC_LS_ENABLE;
5505 		if (data != orig)
5506 			WREG32(mc_cg_registers[i], data);
5507 	}
5508 }
5509 
5510 static void si_enable_mc_mgcg(struct radeon_device *rdev,
5511 			       bool enable)
5512 {
5513 	int i;
5514 	u32 orig, data;
5515 
5516 	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
5517 		orig = data = RREG32(mc_cg_registers[i]);
5518 		if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG))
5519 			data |= MC_CG_ENABLE;
5520 		else
5521 			data &= ~MC_CG_ENABLE;
5522 		if (data != orig)
5523 			WREG32(mc_cg_registers[i], data);
5524 	}
5525 }
5526 
5527 static void si_enable_dma_mgcg(struct radeon_device *rdev,
5528 			       bool enable)
5529 {
5530 	u32 orig, data, offset;
5531 	int i;
5532 
5533 	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) {
5534 		for (i = 0; i < 2; i++) {
5535 			if (i == 0)
5536 				offset = DMA0_REGISTER_OFFSET;
5537 			else
5538 				offset = DMA1_REGISTER_OFFSET;
5539 			orig = data = RREG32(DMA_POWER_CNTL + offset);
5540 			data &= ~MEM_POWER_OVERRIDE;
5541 			if (data != orig)
5542 				WREG32(DMA_POWER_CNTL + offset, data);
5543 			WREG32(DMA_CLK_CTRL + offset, 0x00000100);
5544 		}
5545 	} else {
5546 		for (i = 0; i < 2; i++) {
5547 			if (i == 0)
5548 				offset = DMA0_REGISTER_OFFSET;
5549 			else
5550 				offset = DMA1_REGISTER_OFFSET;
5551 			orig = data = RREG32(DMA_POWER_CNTL + offset);
5552 			data |= MEM_POWER_OVERRIDE;
5553 			if (data != orig)
5554 				WREG32(DMA_POWER_CNTL + offset, data);
5555 
5556 			orig = data = RREG32(DMA_CLK_CTRL + offset);
5557 			data = 0xff000000;
5558 			if (data != orig)
5559 				WREG32(DMA_CLK_CTRL + offset, data);
5560 		}
5561 	}
5562 }
5563 
5564 static void si_enable_bif_mgls(struct radeon_device *rdev,
5565 			       bool enable)
5566 {
5567 	u32 orig, data;
5568 
5569 	orig = data = RREG32_PCIE(PCIE_CNTL2);
5570 
5571 	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS))
5572 		data |= SLV_MEM_LS_EN | MST_MEM_LS_EN |
5573 			REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN;
5574 	else
5575 		data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN |
5576 			  REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN);
5577 
5578 	if (orig != data)
5579 		WREG32_PCIE(PCIE_CNTL2, data);
5580 }
5581 
5582 static void si_enable_hdp_mgcg(struct radeon_device *rdev,
5583 			       bool enable)
5584 {
5585 	u32 orig, data;
5586 
5587 	orig = data = RREG32(HDP_HOST_PATH_CNTL);
5588 
5589 	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG))
5590 		data &= ~CLOCK_GATING_DIS;
5591 	else
5592 		data |= CLOCK_GATING_DIS;
5593 
5594 	if (orig != data)
5595 		WREG32(HDP_HOST_PATH_CNTL, data);
5596 }
5597 
5598 static void si_enable_hdp_ls(struct radeon_device *rdev,
5599 			     bool enable)
5600 {
5601 	u32 orig, data;
5602 
5603 	orig = data = RREG32(HDP_MEM_POWER_LS);
5604 
5605 	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS))
5606 		data |= HDP_LS_ENABLE;
5607 	else
5608 		data &= ~HDP_LS_ENABLE;
5609 
5610 	if (orig != data)
5611 		WREG32(HDP_MEM_POWER_LS, data);
5612 }
5613 
5614 static void si_update_cg(struct radeon_device *rdev,
5615 			 u32 block, bool enable)
5616 {
5617 	if (block & RADEON_CG_BLOCK_GFX) {
5618 		si_enable_gui_idle_interrupt(rdev, false);
5619 		/* order matters! */
5620 		if (enable) {
5621 			si_enable_mgcg(rdev, true);
5622 			si_enable_cgcg(rdev, true);
5623 		} else {
5624 			si_enable_cgcg(rdev, false);
5625 			si_enable_mgcg(rdev, false);
5626 		}
5627 		si_enable_gui_idle_interrupt(rdev, true);
5628 	}
5629 
5630 	if (block & RADEON_CG_BLOCK_MC) {
5631 		si_enable_mc_mgcg(rdev, enable);
5632 		si_enable_mc_ls(rdev, enable);
5633 	}
5634 
5635 	if (block & RADEON_CG_BLOCK_SDMA) {
5636 		si_enable_dma_mgcg(rdev, enable);
5637 	}
5638 
5639 	if (block & RADEON_CG_BLOCK_BIF) {
5640 		si_enable_bif_mgls(rdev, enable);
5641 	}
5642 
5643 	if (block & RADEON_CG_BLOCK_UVD) {
5644 		if (rdev->has_uvd) {
5645 			si_enable_uvd_mgcg(rdev, enable);
5646 		}
5647 	}
5648 
5649 	if (block & RADEON_CG_BLOCK_HDP) {
5650 		si_enable_hdp_mgcg(rdev, enable);
5651 		si_enable_hdp_ls(rdev, enable);
5652 	}
5653 }
5654 
5655 static void si_init_cg(struct radeon_device *rdev)
5656 {
5657 	si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
5658 			    RADEON_CG_BLOCK_MC |
5659 			    RADEON_CG_BLOCK_SDMA |
5660 			    RADEON_CG_BLOCK_BIF |
5661 			    RADEON_CG_BLOCK_HDP), true);
5662 	if (rdev->has_uvd) {
5663 		si_update_cg(rdev, RADEON_CG_BLOCK_UVD, true);
5664 		si_init_uvd_internal_cg(rdev);
5665 	}
5666 }
5667 
5668 static void si_fini_cg(struct radeon_device *rdev)
5669 {
5670 	if (rdev->has_uvd) {
5671 		si_update_cg(rdev, RADEON_CG_BLOCK_UVD, false);
5672 	}
5673 	si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
5674 			    RADEON_CG_BLOCK_MC |
5675 			    RADEON_CG_BLOCK_SDMA |
5676 			    RADEON_CG_BLOCK_BIF |
5677 			    RADEON_CG_BLOCK_HDP), false);
5678 }
5679 
5680 u32 si_get_csb_size(struct radeon_device *rdev)
5681 {
5682 	u32 count = 0;
5683 	const struct cs_section_def *sect = NULL;
5684 	const struct cs_extent_def *ext = NULL;
5685 
5686 	if (rdev->rlc.cs_data == NULL)
5687 		return 0;
5688 
5689 	/* begin clear state */
5690 	count += 2;
5691 	/* context control state */
5692 	count += 3;
5693 
5694 	for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
5695 		for (ext = sect->section; ext->extent != NULL; ++ext) {
5696 			if (sect->id == SECT_CONTEXT)
5697 				count += 2 + ext->reg_count;
5698 			else
5699 				return 0;
5700 		}
5701 	}
5702 	/* pa_sc_raster_config */
5703 	count += 3;
5704 	/* end clear state */
5705 	count += 2;
5706 	/* clear state */
5707 	count += 2;
5708 
5709 	return count;
5710 }
5711 
5712 void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
5713 {
5714 	u32 count = 0, i;
5715 	const struct cs_section_def *sect = NULL;
5716 	const struct cs_extent_def *ext = NULL;
5717 
5718 	if (rdev->rlc.cs_data == NULL)
5719 		return;
5720 	if (buffer == NULL)
5721 		return;
5722 
5723 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
5724 	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
5725 
5726 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5727 	buffer[count++] = cpu_to_le32(0x80000000);
5728 	buffer[count++] = cpu_to_le32(0x80000000);
5729 
5730 	for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
5731 		for (ext = sect->section; ext->extent != NULL; ++ext) {
5732 			if (sect->id == SECT_CONTEXT) {
5733 				buffer[count++] =
5734 					cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
5735 				buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
5736 				for (i = 0; i < ext->reg_count; i++)
5737 					buffer[count++] = cpu_to_le32(ext->extent[i]);
5738 			} else {
5739 				return;
5740 			}
5741 		}
5742 	}
5743 
5744 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
5745 	buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
5746 	switch (rdev->family) {
5747 	case CHIP_TAHITI:
5748 	case CHIP_PITCAIRN:
5749 		buffer[count++] = cpu_to_le32(0x2a00126a);
5750 		break;
5751 	case CHIP_VERDE:
5752 		buffer[count++] = cpu_to_le32(0x0000124a);
5753 		break;
5754 	case CHIP_OLAND:
5755 		buffer[count++] = cpu_to_le32(0x00000082);
5756 		break;
5757 	case CHIP_HAINAN:
5758 		buffer[count++] = cpu_to_le32(0x00000000);
5759 		break;
5760 	default:
5761 		buffer[count++] = cpu_to_le32(0x00000000);
5762 		break;
5763 	}
5764 
5765 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
5766 	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
5767 
5768 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
5769 	buffer[count++] = cpu_to_le32(0);
5770 }
5771 
5772 static void si_init_pg(struct radeon_device *rdev)
5773 {
5774 	if (rdev->pg_flags) {
5775 		if (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA) {
5776 			si_init_dma_pg(rdev);
5777 		}
5778 		si_init_ao_cu_mask(rdev);
5779 		if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
5780 			si_init_gfx_cgpg(rdev);
5781 		} else {
5782 			WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
5783 			WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
5784 		}
5785 		si_enable_dma_pg(rdev, true);
5786 		si_enable_gfx_cgpg(rdev, true);
5787 	} else {
5788 		WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
5789 		WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
5790 	}
5791 }
5792 
5793 static void si_fini_pg(struct radeon_device *rdev)
5794 {
5795 	if (rdev->pg_flags) {
5796 		si_enable_dma_pg(rdev, false);
5797 		si_enable_gfx_cgpg(rdev, false);
5798 	}
5799 }
5800 
5801 /*
5802  * RLC
5803  */
5804 void si_rlc_reset(struct radeon_device *rdev)
5805 {
5806 	u32 tmp = RREG32(GRBM_SOFT_RESET);
5807 
5808 	tmp |= SOFT_RESET_RLC;
5809 	WREG32(GRBM_SOFT_RESET, tmp);
5810 	udelay(50);
5811 	tmp &= ~SOFT_RESET_RLC;
5812 	WREG32(GRBM_SOFT_RESET, tmp);
5813 	udelay(50);
5814 }
5815 
5816 static void si_rlc_stop(struct radeon_device *rdev)
5817 {
5818 	WREG32(RLC_CNTL, 0);
5819 
5820 	si_enable_gui_idle_interrupt(rdev, false);
5821 
5822 	si_wait_for_rlc_serdes(rdev);
5823 }
5824 
5825 static void si_rlc_start(struct radeon_device *rdev)
5826 {
5827 	WREG32(RLC_CNTL, RLC_ENABLE);
5828 
5829 	si_enable_gui_idle_interrupt(rdev, true);
5830 
5831 	udelay(50);
5832 }
5833 
5834 static bool si_lbpw_supported(struct radeon_device *rdev)
5835 {
5836 	u32 tmp;
5837 
5838 	/* Enable LBPW only for DDR3 */
5839 	tmp = RREG32(MC_SEQ_MISC0);
5840 	if ((tmp & 0xF0000000) == 0xB0000000)
5841 		return true;
5842 	return false;
5843 }
5844 
5845 static void si_enable_lbpw(struct radeon_device *rdev, bool enable)
5846 {
5847 	u32 tmp;
5848 
5849 	tmp = RREG32(RLC_LB_CNTL);
5850 	if (enable)
5851 		tmp |= LOAD_BALANCE_ENABLE;
5852 	else
5853 		tmp &= ~LOAD_BALANCE_ENABLE;
5854 	WREG32(RLC_LB_CNTL, tmp);
5855 
5856 	if (!enable) {
5857 		si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5858 		WREG32(SPI_LB_CU_MASK, 0x00ff);
5859 	}
5860 }
5861 
5862 static int si_rlc_resume(struct radeon_device *rdev)
5863 {
5864 	u32 i;
5865 
5866 	if (!rdev->rlc_fw)
5867 		return -EINVAL;
5868 
5869 	si_rlc_stop(rdev);
5870 
5871 	si_rlc_reset(rdev);
5872 
5873 	si_init_pg(rdev);
5874 
5875 	si_init_cg(rdev);
5876 
5877 	WREG32(RLC_RL_BASE, 0);
5878 	WREG32(RLC_RL_SIZE, 0);
5879 	WREG32(RLC_LB_CNTL, 0);
5880 	WREG32(RLC_LB_CNTR_MAX, 0xffffffff);
5881 	WREG32(RLC_LB_CNTR_INIT, 0);
5882 	WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
5883 
5884 	WREG32(RLC_MC_CNTL, 0);
5885 	WREG32(RLC_UCODE_CNTL, 0);
5886 
5887 	if (rdev->new_fw) {
5888 		const struct rlc_firmware_header_v1_0 *hdr =
5889 			(const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data;
5890 		u32 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
5891 		const __le32 *fw_data = (const __le32 *)
5892 			(rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
5893 
5894 		radeon_ucode_print_rlc_hdr(&hdr->header);
5895 
5896 		for (i = 0; i < fw_size; i++) {
5897 			WREG32(RLC_UCODE_ADDR, i);
5898 			WREG32(RLC_UCODE_DATA, le32_to_cpup(fw_data++));
5899 		}
5900 	} else {
5901 		const __be32 *fw_data =
5902 			(const __be32 *)rdev->rlc_fw->data;
5903 		for (i = 0; i < SI_RLC_UCODE_SIZE; i++) {
5904 			WREG32(RLC_UCODE_ADDR, i);
5905 			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
5906 		}
5907 	}
5908 	WREG32(RLC_UCODE_ADDR, 0);
5909 
5910 	si_enable_lbpw(rdev, si_lbpw_supported(rdev));
5911 
5912 	si_rlc_start(rdev);
5913 
5914 	return 0;
5915 }
5916 
5917 static void si_enable_interrupts(struct radeon_device *rdev)
5918 {
5919 	u32 ih_cntl = RREG32(IH_CNTL);
5920 	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
5921 
5922 	ih_cntl |= ENABLE_INTR;
5923 	ih_rb_cntl |= IH_RB_ENABLE;
5924 	WREG32(IH_CNTL, ih_cntl);
5925 	WREG32(IH_RB_CNTL, ih_rb_cntl);
5926 	rdev->ih.enabled = true;
5927 }
5928 
5929 static void si_disable_interrupts(struct radeon_device *rdev)
5930 {
5931 	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
5932 	u32 ih_cntl = RREG32(IH_CNTL);
5933 
5934 	ih_rb_cntl &= ~IH_RB_ENABLE;
5935 	ih_cntl &= ~ENABLE_INTR;
5936 	WREG32(IH_RB_CNTL, ih_rb_cntl);
5937 	WREG32(IH_CNTL, ih_cntl);
5938 	/* set rptr, wptr to 0 */
5939 	WREG32(IH_RB_RPTR, 0);
5940 	WREG32(IH_RB_WPTR, 0);
5941 	rdev->ih.enabled = false;
5942 	rdev->ih.rptr = 0;
5943 }
5944 
5945 static void si_disable_interrupt_state(struct radeon_device *rdev)
5946 {
5947 	u32 tmp;
5948 
5949 	tmp = RREG32(CP_INT_CNTL_RING0) &
5950 		(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
5951 	WREG32(CP_INT_CNTL_RING0, tmp);
5952 	WREG32(CP_INT_CNTL_RING1, 0);
5953 	WREG32(CP_INT_CNTL_RING2, 0);
5954 	tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
5955 	WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, tmp);
5956 	tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
5957 	WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
5958 	WREG32(GRBM_INT_CNTL, 0);
5959 	WREG32(SRBM_INT_CNTL, 0);
5960 	if (rdev->num_crtc >= 2) {
5961 		WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
5962 		WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
5963 	}
5964 	if (rdev->num_crtc >= 4) {
5965 		WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
5966 		WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
5967 	}
5968 	if (rdev->num_crtc >= 6) {
5969 		WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
5970 		WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
5971 	}
5972 
5973 	if (rdev->num_crtc >= 2) {
5974 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
5975 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
5976 	}
5977 	if (rdev->num_crtc >= 4) {
5978 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
5979 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
5980 	}
5981 	if (rdev->num_crtc >= 6) {
5982 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
5983 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
5984 	}
5985 
5986 	if (!ASIC_IS_NODCE(rdev)) {
5987 		WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
5988 
5989 		tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5990 		WREG32(DC_HPD1_INT_CONTROL, tmp);
5991 		tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5992 		WREG32(DC_HPD2_INT_CONTROL, tmp);
5993 		tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5994 		WREG32(DC_HPD3_INT_CONTROL, tmp);
5995 		tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5996 		WREG32(DC_HPD4_INT_CONTROL, tmp);
5997 		tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5998 		WREG32(DC_HPD5_INT_CONTROL, tmp);
5999 		tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
6000 		WREG32(DC_HPD6_INT_CONTROL, tmp);
6001 	}
6002 }
6003 
6004 static int si_irq_init(struct radeon_device *rdev)
6005 {
6006 	int ret = 0;
6007 	int rb_bufsz;
6008 	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
6009 
6010 	/* allocate ring */
6011 	ret = r600_ih_ring_alloc(rdev);
6012 	if (ret)
6013 		return ret;
6014 
6015 	/* disable irqs */
6016 	si_disable_interrupts(rdev);
6017 
6018 	/* init rlc */
6019 	ret = si_rlc_resume(rdev);
6020 	if (ret) {
6021 		r600_ih_ring_fini(rdev);
6022 		return ret;
6023 	}
6024 
6025 	/* setup interrupt control */
6026 	/* set dummy read address to ring address */
6027 	WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
6028 	interrupt_cntl = RREG32(INTERRUPT_CNTL);
6029 	/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
6030 	 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
6031 	 */
6032 	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
6033 	/* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
6034 	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
6035 	WREG32(INTERRUPT_CNTL, interrupt_cntl);
6036 
6037 	WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
6038 	rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
6039 
6040 	ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
6041 		      IH_WPTR_OVERFLOW_CLEAR |
6042 		      (rb_bufsz << 1));
6043 
6044 	if (rdev->wb.enabled)
6045 		ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
6046 
6047 	/* set the writeback address whether it's enabled or not */
6048 	WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
6049 	WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
6050 
6051 	WREG32(IH_RB_CNTL, ih_rb_cntl);
6052 
6053 	/* set rptr, wptr to 0 */
6054 	WREG32(IH_RB_RPTR, 0);
6055 	WREG32(IH_RB_WPTR, 0);
6056 
6057 	/* Default settings for IH_CNTL (disabled at first) */
6058 	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
6059 	/* RPTR_REARM only works if msi's are enabled */
6060 	if (rdev->msi_enabled)
6061 		ih_cntl |= RPTR_REARM;
6062 	WREG32(IH_CNTL, ih_cntl);
6063 
6064 	/* force the active interrupt state to all disabled */
6065 	si_disable_interrupt_state(rdev);
6066 
6067 	pci_set_master(rdev->pdev);
6068 
6069 	/* enable irqs */
6070 	si_enable_interrupts(rdev);
6071 
6072 	return ret;
6073 }
6074 
6075 int si_irq_set(struct radeon_device *rdev)
6076 {
6077 	u32 cp_int_cntl;
6078 	u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
6079 	u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
6080 	u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0;
6081 	u32 grbm_int_cntl = 0;
6082 	u32 dma_cntl, dma_cntl1;
6083 	u32 thermal_int = 0;
6084 
6085 	if (!rdev->irq.installed) {
6086 		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
6087 		return -EINVAL;
6088 	}
6089 	/* don't enable anything if the ih is disabled */
6090 	if (!rdev->ih.enabled) {
6091 		si_disable_interrupts(rdev);
6092 		/* force the active interrupt state to all disabled */
6093 		si_disable_interrupt_state(rdev);
6094 		return 0;
6095 	}
6096 
6097 	cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
6098 		(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
6099 
6100 	if (!ASIC_IS_NODCE(rdev)) {
6101 		hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
6102 		hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
6103 		hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
6104 		hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
6105 		hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
6106 		hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
6107 	}
6108 
6109 	dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
6110 	dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
6111 
6112 	thermal_int = RREG32(CG_THERMAL_INT) &
6113 		~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
6114 
6115 	/* enable CP interrupts on all rings */
6116 	if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
6117 		DRM_DEBUG("si_irq_set: sw int gfx\n");
6118 		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
6119 	}
6120 	if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
6121 		DRM_DEBUG("si_irq_set: sw int cp1\n");
6122 		cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
6123 	}
6124 	if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
6125 		DRM_DEBUG("si_irq_set: sw int cp2\n");
6126 		cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
6127 	}
6128 	if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
6129 		DRM_DEBUG("si_irq_set: sw int dma\n");
6130 		dma_cntl |= TRAP_ENABLE;
6131 	}
6132 
6133 	if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
6134 		DRM_DEBUG("si_irq_set: sw int dma1\n");
6135 		dma_cntl1 |= TRAP_ENABLE;
6136 	}
6137 	if (rdev->irq.crtc_vblank_int[0] ||
6138 	    atomic_read(&rdev->irq.pflip[0])) {
6139 		DRM_DEBUG("si_irq_set: vblank 0\n");
6140 		crtc1 |= VBLANK_INT_MASK;
6141 	}
6142 	if (rdev->irq.crtc_vblank_int[1] ||
6143 	    atomic_read(&rdev->irq.pflip[1])) {
6144 		DRM_DEBUG("si_irq_set: vblank 1\n");
6145 		crtc2 |= VBLANK_INT_MASK;
6146 	}
6147 	if (rdev->irq.crtc_vblank_int[2] ||
6148 	    atomic_read(&rdev->irq.pflip[2])) {
6149 		DRM_DEBUG("si_irq_set: vblank 2\n");
6150 		crtc3 |= VBLANK_INT_MASK;
6151 	}
6152 	if (rdev->irq.crtc_vblank_int[3] ||
6153 	    atomic_read(&rdev->irq.pflip[3])) {
6154 		DRM_DEBUG("si_irq_set: vblank 3\n");
6155 		crtc4 |= VBLANK_INT_MASK;
6156 	}
6157 	if (rdev->irq.crtc_vblank_int[4] ||
6158 	    atomic_read(&rdev->irq.pflip[4])) {
6159 		DRM_DEBUG("si_irq_set: vblank 4\n");
6160 		crtc5 |= VBLANK_INT_MASK;
6161 	}
6162 	if (rdev->irq.crtc_vblank_int[5] ||
6163 	    atomic_read(&rdev->irq.pflip[5])) {
6164 		DRM_DEBUG("si_irq_set: vblank 5\n");
6165 		crtc6 |= VBLANK_INT_MASK;
6166 	}
6167 	if (rdev->irq.hpd[0]) {
6168 		DRM_DEBUG("si_irq_set: hpd 1\n");
6169 		hpd1 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
6170 	}
6171 	if (rdev->irq.hpd[1]) {
6172 		DRM_DEBUG("si_irq_set: hpd 2\n");
6173 		hpd2 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
6174 	}
6175 	if (rdev->irq.hpd[2]) {
6176 		DRM_DEBUG("si_irq_set: hpd 3\n");
6177 		hpd3 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
6178 	}
6179 	if (rdev->irq.hpd[3]) {
6180 		DRM_DEBUG("si_irq_set: hpd 4\n");
6181 		hpd4 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
6182 	}
6183 	if (rdev->irq.hpd[4]) {
6184 		DRM_DEBUG("si_irq_set: hpd 5\n");
6185 		hpd5 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
6186 	}
6187 	if (rdev->irq.hpd[5]) {
6188 		DRM_DEBUG("si_irq_set: hpd 6\n");
6189 		hpd6 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
6190 	}
6191 
6192 	WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
6193 	WREG32(CP_INT_CNTL_RING1, cp_int_cntl1);
6194 	WREG32(CP_INT_CNTL_RING2, cp_int_cntl2);
6195 
6196 	WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, dma_cntl);
6197 	WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, dma_cntl1);
6198 
6199 	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
6200 
6201 	if (rdev->irq.dpm_thermal) {
6202 		DRM_DEBUG("dpm thermal\n");
6203 		thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
6204 	}
6205 
6206 	if (rdev->num_crtc >= 2) {
6207 		WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
6208 		WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
6209 	}
6210 	if (rdev->num_crtc >= 4) {
6211 		WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
6212 		WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
6213 	}
6214 	if (rdev->num_crtc >= 6) {
6215 		WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
6216 		WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
6217 	}
6218 
6219 	if (rdev->num_crtc >= 2) {
6220 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
6221 		       GRPH_PFLIP_INT_MASK);
6222 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
6223 		       GRPH_PFLIP_INT_MASK);
6224 	}
6225 	if (rdev->num_crtc >= 4) {
6226 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
6227 		       GRPH_PFLIP_INT_MASK);
6228 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
6229 		       GRPH_PFLIP_INT_MASK);
6230 	}
6231 	if (rdev->num_crtc >= 6) {
6232 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
6233 		       GRPH_PFLIP_INT_MASK);
6234 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
6235 		       GRPH_PFLIP_INT_MASK);
6236 	}
6237 
6238 	if (!ASIC_IS_NODCE(rdev)) {
6239 		WREG32(DC_HPD1_INT_CONTROL, hpd1);
6240 		WREG32(DC_HPD2_INT_CONTROL, hpd2);
6241 		WREG32(DC_HPD3_INT_CONTROL, hpd3);
6242 		WREG32(DC_HPD4_INT_CONTROL, hpd4);
6243 		WREG32(DC_HPD5_INT_CONTROL, hpd5);
6244 		WREG32(DC_HPD6_INT_CONTROL, hpd6);
6245 	}
6246 
6247 	WREG32(CG_THERMAL_INT, thermal_int);
6248 
6249 	/* posting read */
6250 	RREG32(SRBM_STATUS);
6251 
6252 	return 0;
6253 }
6254 
6255 static inline void si_irq_ack(struct radeon_device *rdev)
6256 {
6257 	u32 tmp;
6258 
6259 	if (ASIC_IS_NODCE(rdev))
6260 		return;
6261 
6262 	rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
6263 	rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
6264 	rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
6265 	rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
6266 	rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
6267 	rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
6268 	rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
6269 	rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
6270 	if (rdev->num_crtc >= 4) {
6271 		rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
6272 		rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
6273 	}
6274 	if (rdev->num_crtc >= 6) {
6275 		rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
6276 		rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
6277 	}
6278 
6279 	if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
6280 		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
6281 	if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
6282 		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
6283 	if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
6284 		WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
6285 	if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
6286 		WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
6287 	if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
6288 		WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
6289 	if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
6290 		WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
6291 
6292 	if (rdev->num_crtc >= 4) {
6293 		if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
6294 			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
6295 		if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
6296 			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
6297 		if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
6298 			WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
6299 		if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
6300 			WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
6301 		if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
6302 			WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
6303 		if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
6304 			WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
6305 	}
6306 
6307 	if (rdev->num_crtc >= 6) {
6308 		if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
6309 			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
6310 		if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
6311 			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
6312 		if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
6313 			WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
6314 		if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
6315 			WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
6316 		if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
6317 			WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
6318 		if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
6319 			WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
6320 	}
6321 
6322 	if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
6323 		tmp = RREG32(DC_HPD1_INT_CONTROL);
6324 		tmp |= DC_HPDx_INT_ACK;
6325 		WREG32(DC_HPD1_INT_CONTROL, tmp);
6326 	}
6327 	if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
6328 		tmp = RREG32(DC_HPD2_INT_CONTROL);
6329 		tmp |= DC_HPDx_INT_ACK;
6330 		WREG32(DC_HPD2_INT_CONTROL, tmp);
6331 	}
6332 	if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
6333 		tmp = RREG32(DC_HPD3_INT_CONTROL);
6334 		tmp |= DC_HPDx_INT_ACK;
6335 		WREG32(DC_HPD3_INT_CONTROL, tmp);
6336 	}
6337 	if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
6338 		tmp = RREG32(DC_HPD4_INT_CONTROL);
6339 		tmp |= DC_HPDx_INT_ACK;
6340 		WREG32(DC_HPD4_INT_CONTROL, tmp);
6341 	}
6342 	if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
6343 		tmp = RREG32(DC_HPD5_INT_CONTROL);
6344 		tmp |= DC_HPDx_INT_ACK;
6345 		WREG32(DC_HPD5_INT_CONTROL, tmp);
6346 	}
6347 	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
6348 		tmp = RREG32(DC_HPD5_INT_CONTROL);
6349 		tmp |= DC_HPDx_INT_ACK;
6350 		WREG32(DC_HPD6_INT_CONTROL, tmp);
6351 	}
6352 
6353 	if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
6354 		tmp = RREG32(DC_HPD1_INT_CONTROL);
6355 		tmp |= DC_HPDx_RX_INT_ACK;
6356 		WREG32(DC_HPD1_INT_CONTROL, tmp);
6357 	}
6358 	if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
6359 		tmp = RREG32(DC_HPD2_INT_CONTROL);
6360 		tmp |= DC_HPDx_RX_INT_ACK;
6361 		WREG32(DC_HPD2_INT_CONTROL, tmp);
6362 	}
6363 	if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
6364 		tmp = RREG32(DC_HPD3_INT_CONTROL);
6365 		tmp |= DC_HPDx_RX_INT_ACK;
6366 		WREG32(DC_HPD3_INT_CONTROL, tmp);
6367 	}
6368 	if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
6369 		tmp = RREG32(DC_HPD4_INT_CONTROL);
6370 		tmp |= DC_HPDx_RX_INT_ACK;
6371 		WREG32(DC_HPD4_INT_CONTROL, tmp);
6372 	}
6373 	if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
6374 		tmp = RREG32(DC_HPD5_INT_CONTROL);
6375 		tmp |= DC_HPDx_RX_INT_ACK;
6376 		WREG32(DC_HPD5_INT_CONTROL, tmp);
6377 	}
6378 	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
6379 		tmp = RREG32(DC_HPD5_INT_CONTROL);
6380 		tmp |= DC_HPDx_RX_INT_ACK;
6381 		WREG32(DC_HPD6_INT_CONTROL, tmp);
6382 	}
6383 }
6384 
6385 static void si_irq_disable(struct radeon_device *rdev)
6386 {
6387 	si_disable_interrupts(rdev);
6388 	/* Wait and acknowledge irq */
6389 	mdelay(1);
6390 	si_irq_ack(rdev);
6391 	si_disable_interrupt_state(rdev);
6392 }
6393 
6394 static void si_irq_suspend(struct radeon_device *rdev)
6395 {
6396 	si_irq_disable(rdev);
6397 	si_rlc_stop(rdev);
6398 }
6399 
6400 static void si_irq_fini(struct radeon_device *rdev)
6401 {
6402 	si_irq_suspend(rdev);
6403 	r600_ih_ring_fini(rdev);
6404 }
6405 
6406 static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
6407 {
6408 	u32 wptr, tmp;
6409 
6410 	if (rdev->wb.enabled)
6411 		wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
6412 	else
6413 		wptr = RREG32(IH_RB_WPTR);
6414 
6415 	if (wptr & RB_OVERFLOW) {
6416 		wptr &= ~RB_OVERFLOW;
6417 		/* When a ring buffer overflow happen start parsing interrupt
6418 		 * from the last not overwritten vector (wptr + 16). Hopefully
6419 		 * this should allow us to catchup.
6420 		 */
6421 		dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
6422 			 wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
6423 		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
6424 		tmp = RREG32(IH_RB_CNTL);
6425 		tmp |= IH_WPTR_OVERFLOW_CLEAR;
6426 		WREG32(IH_RB_CNTL, tmp);
6427 	}
6428 	return (wptr & rdev->ih.ptr_mask);
6429 }
6430 
6431 /*        SI IV Ring
6432  * Each IV ring entry is 128 bits:
6433  * [7:0]    - interrupt source id
6434  * [31:8]   - reserved
6435  * [59:32]  - interrupt source data
6436  * [63:60]  - reserved
6437  * [71:64]  - RINGID
6438  * [79:72]  - VMID
6439  * [127:80] - reserved
6440  */
6441 int si_irq_process(struct radeon_device *rdev)
6442 {
6443 	u32 wptr;
6444 	u32 rptr;
6445 	u32 src_id, src_data, ring_id;
6446 	u32 ring_index;
6447 	bool queue_hotplug = false;
6448 	bool queue_dp = false;
6449 	bool queue_thermal = false;
6450 	u32 status, addr;
6451 
6452 	if (!rdev->ih.enabled || rdev->shutdown)
6453 		return IRQ_NONE;
6454 
6455 	wptr = si_get_ih_wptr(rdev);
6456 
6457 restart_ih:
6458 	/* is somebody else already processing irqs? */
6459 	if (atomic_xchg(&rdev->ih.lock, 1))
6460 		return IRQ_NONE;
6461 
6462 	rptr = rdev->ih.rptr;
6463 	DRM_DEBUG("si_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
6464 
6465 	/* Order reading of wptr vs. reading of IH ring data */
6466 	rmb();
6467 
6468 	/* display interrupts */
6469 	si_irq_ack(rdev);
6470 
6471 	while (rptr != wptr) {
6472 		/* wptr/rptr are in bytes! */
6473 		ring_index = rptr / 4;
6474 		src_id =  le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
6475 		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
6476 		ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
6477 
6478 		switch (src_id) {
6479 		case 1: /* D1 vblank/vline */
6480 			switch (src_data) {
6481 			case 0: /* D1 vblank */
6482 				if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
6483 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6484 
6485 				if (rdev->irq.crtc_vblank_int[0]) {
6486 					drm_handle_vblank(rdev->ddev, 0);
6487 					rdev->pm.vblank_sync = true;
6488 					wake_up(&rdev->irq.vblank_queue);
6489 				}
6490 				if (atomic_read(&rdev->irq.pflip[0]))
6491 					radeon_crtc_handle_vblank(rdev, 0);
6492 				rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
6493 				DRM_DEBUG("IH: D1 vblank\n");
6494 
6495 				break;
6496 			case 1: /* D1 vline */
6497 				if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
6498 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6499 
6500 				rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
6501 				DRM_DEBUG("IH: D1 vline\n");
6502 
6503 				break;
6504 			default:
6505 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6506 				break;
6507 			}
6508 			break;
6509 		case 2: /* D2 vblank/vline */
6510 			switch (src_data) {
6511 			case 0: /* D2 vblank */
6512 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
6513 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6514 
6515 				if (rdev->irq.crtc_vblank_int[1]) {
6516 					drm_handle_vblank(rdev->ddev, 1);
6517 					rdev->pm.vblank_sync = true;
6518 					wake_up(&rdev->irq.vblank_queue);
6519 				}
6520 				if (atomic_read(&rdev->irq.pflip[1]))
6521 					radeon_crtc_handle_vblank(rdev, 1);
6522 				rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
6523 				DRM_DEBUG("IH: D2 vblank\n");
6524 
6525 				break;
6526 			case 1: /* D2 vline */
6527 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
6528 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6529 
6530 				rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
6531 				DRM_DEBUG("IH: D2 vline\n");
6532 
6533 				break;
6534 			default:
6535 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6536 				break;
6537 			}
6538 			break;
6539 		case 3: /* D3 vblank/vline */
6540 			switch (src_data) {
6541 			case 0: /* D3 vblank */
6542 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
6543 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6544 
6545 				if (rdev->irq.crtc_vblank_int[2]) {
6546 					drm_handle_vblank(rdev->ddev, 2);
6547 					rdev->pm.vblank_sync = true;
6548 					wake_up(&rdev->irq.vblank_queue);
6549 				}
6550 				if (atomic_read(&rdev->irq.pflip[2]))
6551 					radeon_crtc_handle_vblank(rdev, 2);
6552 				rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
6553 				DRM_DEBUG("IH: D3 vblank\n");
6554 
6555 				break;
6556 			case 1: /* D3 vline */
6557 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
6558 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6559 
6560 				rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
6561 				DRM_DEBUG("IH: D3 vline\n");
6562 
6563 				break;
6564 			default:
6565 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6566 				break;
6567 			}
6568 			break;
6569 		case 4: /* D4 vblank/vline */
6570 			switch (src_data) {
6571 			case 0: /* D4 vblank */
6572 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
6573 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6574 
6575 				if (rdev->irq.crtc_vblank_int[3]) {
6576 					drm_handle_vblank(rdev->ddev, 3);
6577 					rdev->pm.vblank_sync = true;
6578 					wake_up(&rdev->irq.vblank_queue);
6579 				}
6580 				if (atomic_read(&rdev->irq.pflip[3]))
6581 					radeon_crtc_handle_vblank(rdev, 3);
6582 				rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
6583 				DRM_DEBUG("IH: D4 vblank\n");
6584 
6585 				break;
6586 			case 1: /* D4 vline */
6587 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
6588 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6589 
6590 				rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
6591 				DRM_DEBUG("IH: D4 vline\n");
6592 
6593 				break;
6594 			default:
6595 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6596 				break;
6597 			}
6598 			break;
6599 		case 5: /* D5 vblank/vline */
6600 			switch (src_data) {
6601 			case 0: /* D5 vblank */
6602 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
6603 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6604 
6605 				if (rdev->irq.crtc_vblank_int[4]) {
6606 					drm_handle_vblank(rdev->ddev, 4);
6607 					rdev->pm.vblank_sync = true;
6608 					wake_up(&rdev->irq.vblank_queue);
6609 				}
6610 				if (atomic_read(&rdev->irq.pflip[4]))
6611 					radeon_crtc_handle_vblank(rdev, 4);
6612 				rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
6613 				DRM_DEBUG("IH: D5 vblank\n");
6614 
6615 				break;
6616 			case 1: /* D5 vline */
6617 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
6618 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6619 
6620 				rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
6621 				DRM_DEBUG("IH: D5 vline\n");
6622 
6623 				break;
6624 			default:
6625 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6626 				break;
6627 			}
6628 			break;
6629 		case 6: /* D6 vblank/vline */
6630 			switch (src_data) {
6631 			case 0: /* D6 vblank */
6632 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
6633 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6634 
6635 				if (rdev->irq.crtc_vblank_int[5]) {
6636 					drm_handle_vblank(rdev->ddev, 5);
6637 					rdev->pm.vblank_sync = true;
6638 					wake_up(&rdev->irq.vblank_queue);
6639 				}
6640 				if (atomic_read(&rdev->irq.pflip[5]))
6641 					radeon_crtc_handle_vblank(rdev, 5);
6642 				rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
6643 				DRM_DEBUG("IH: D6 vblank\n");
6644 
6645 				break;
6646 			case 1: /* D6 vline */
6647 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
6648 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6649 
6650 				rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
6651 				DRM_DEBUG("IH: D6 vline\n");
6652 
6653 				break;
6654 			default:
6655 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6656 				break;
6657 			}
6658 			break;
6659 		case 8: /* D1 page flip */
6660 		case 10: /* D2 page flip */
6661 		case 12: /* D3 page flip */
6662 		case 14: /* D4 page flip */
6663 		case 16: /* D5 page flip */
6664 		case 18: /* D6 page flip */
6665 			DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
6666 			if (radeon_use_pflipirq > 0)
6667 				radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
6668 			break;
6669 		case 42: /* HPD hotplug */
6670 			switch (src_data) {
6671 			case 0:
6672 				if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
6673 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6674 
6675 				rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
6676 				queue_hotplug = true;
6677 				DRM_DEBUG("IH: HPD1\n");
6678 
6679 				break;
6680 			case 1:
6681 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
6682 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6683 
6684 				rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
6685 				queue_hotplug = true;
6686 				DRM_DEBUG("IH: HPD2\n");
6687 
6688 				break;
6689 			case 2:
6690 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
6691 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6692 
6693 				rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
6694 				queue_hotplug = true;
6695 				DRM_DEBUG("IH: HPD3\n");
6696 
6697 				break;
6698 			case 3:
6699 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
6700 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6701 
6702 				rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
6703 				queue_hotplug = true;
6704 				DRM_DEBUG("IH: HPD4\n");
6705 
6706 				break;
6707 			case 4:
6708 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
6709 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6710 
6711 				rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
6712 				queue_hotplug = true;
6713 				DRM_DEBUG("IH: HPD5\n");
6714 
6715 				break;
6716 			case 5:
6717 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
6718 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6719 
6720 				rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
6721 				queue_hotplug = true;
6722 				DRM_DEBUG("IH: HPD6\n");
6723 
6724 				break;
6725 			case 6:
6726 				if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
6727 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6728 
6729 				rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
6730 				queue_dp = true;
6731 				DRM_DEBUG("IH: HPD_RX 1\n");
6732 
6733 				break;
6734 			case 7:
6735 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
6736 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6737 
6738 				rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
6739 				queue_dp = true;
6740 				DRM_DEBUG("IH: HPD_RX 2\n");
6741 
6742 				break;
6743 			case 8:
6744 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
6745 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6746 
6747 				rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
6748 				queue_dp = true;
6749 				DRM_DEBUG("IH: HPD_RX 3\n");
6750 
6751 				break;
6752 			case 9:
6753 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
6754 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6755 
6756 				rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
6757 				queue_dp = true;
6758 				DRM_DEBUG("IH: HPD_RX 4\n");
6759 
6760 				break;
6761 			case 10:
6762 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
6763 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6764 
6765 				rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
6766 				queue_dp = true;
6767 				DRM_DEBUG("IH: HPD_RX 5\n");
6768 
6769 				break;
6770 			case 11:
6771 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
6772 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6773 
6774 				rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
6775 				queue_dp = true;
6776 				DRM_DEBUG("IH: HPD_RX 6\n");
6777 
6778 				break;
6779 			default:
6780 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6781 				break;
6782 			}
6783 			break;
6784 		case 96:
6785 			DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
6786 			WREG32(SRBM_INT_ACK, 0x1);
6787 			break;
6788 		case 124: /* UVD */
6789 			DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
6790 			radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
6791 			break;
6792 		case 146:
6793 		case 147:
6794 			addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
6795 			status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
6796 			/* reset addr and status */
6797 			WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
6798 			if (addr == 0x0 && status == 0x0)
6799 				break;
6800 			dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
6801 			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
6802 				addr);
6803 			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
6804 				status);
6805 			si_vm_decode_fault(rdev, status, addr);
6806 			break;
6807 		case 176: /* RINGID0 CP_INT */
6808 			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
6809 			break;
6810 		case 177: /* RINGID1 CP_INT */
6811 			radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
6812 			break;
6813 		case 178: /* RINGID2 CP_INT */
6814 			radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
6815 			break;
6816 		case 181: /* CP EOP event */
6817 			DRM_DEBUG("IH: CP EOP\n");
6818 			switch (ring_id) {
6819 			case 0:
6820 				radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
6821 				break;
6822 			case 1:
6823 				radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
6824 				break;
6825 			case 2:
6826 				radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
6827 				break;
6828 			}
6829 			break;
6830 		case 224: /* DMA trap event */
6831 			DRM_DEBUG("IH: DMA trap\n");
6832 			radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
6833 			break;
6834 		case 230: /* thermal low to high */
6835 			DRM_DEBUG("IH: thermal low to high\n");
6836 			rdev->pm.dpm.thermal.high_to_low = false;
6837 			queue_thermal = true;
6838 			break;
6839 		case 231: /* thermal high to low */
6840 			DRM_DEBUG("IH: thermal high to low\n");
6841 			rdev->pm.dpm.thermal.high_to_low = true;
6842 			queue_thermal = true;
6843 			break;
6844 		case 233: /* GUI IDLE */
6845 			DRM_DEBUG("IH: GUI idle\n");
6846 			break;
6847 		case 244: /* DMA trap event */
6848 			DRM_DEBUG("IH: DMA1 trap\n");
6849 			radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
6850 			break;
6851 		default:
6852 			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6853 			break;
6854 		}
6855 
6856 		/* wptr/rptr are in bytes! */
6857 		rptr += 16;
6858 		rptr &= rdev->ih.ptr_mask;
6859 		WREG32(IH_RB_RPTR, rptr);
6860 	}
6861 	if (queue_dp)
6862 		schedule_work(&rdev->dp_work);
6863 	if (queue_hotplug)
6864 		schedule_delayed_work(&rdev->hotplug_work, 0);
6865 	if (queue_thermal && rdev->pm.dpm_enabled)
6866 		schedule_work(&rdev->pm.dpm.thermal.work);
6867 	rdev->ih.rptr = rptr;
6868 	atomic_set(&rdev->ih.lock, 0);
6869 
6870 	/* make sure wptr hasn't changed while processing */
6871 	wptr = si_get_ih_wptr(rdev);
6872 	if (wptr != rptr)
6873 		goto restart_ih;
6874 
6875 	return IRQ_HANDLED;
6876 }
6877 
6878 /*
6879  * startup/shutdown callbacks
6880  */
6881 static void si_uvd_init(struct radeon_device *rdev)
6882 {
6883 	int r;
6884 
6885 	if (!rdev->has_uvd)
6886 		return;
6887 
6888 	r = radeon_uvd_init(rdev);
6889 	if (r) {
6890 		dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
6891 		/*
6892 		 * At this point rdev->uvd.vcpu_bo is NULL which trickles down
6893 		 * to early fails uvd_v2_2_resume() and thus nothing happens
6894 		 * there. So it is pointless to try to go through that code
6895 		 * hence why we disable uvd here.
6896 		 */
6897 		rdev->has_uvd = 0;
6898 		return;
6899 	}
6900 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
6901 	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
6902 }
6903 
6904 static void si_uvd_start(struct radeon_device *rdev)
6905 {
6906 	int r;
6907 
6908 	if (!rdev->has_uvd)
6909 		return;
6910 
6911 	r = uvd_v2_2_resume(rdev);
6912 	if (r) {
6913 		dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
6914 		goto error;
6915 	}
6916 	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
6917 	if (r) {
6918 		dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
6919 		goto error;
6920 	}
6921 	return;
6922 
6923 error:
6924 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
6925 }
6926 
6927 static void si_uvd_resume(struct radeon_device *rdev)
6928 {
6929 	struct radeon_ring *ring;
6930 	int r;
6931 
6932 	if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
6933 		return;
6934 
6935 	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
6936 	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
6937 	if (r) {
6938 		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
6939 		return;
6940 	}
6941 	r = uvd_v1_0_init(rdev);
6942 	if (r) {
6943 		dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
6944 		return;
6945 	}
6946 }
6947 
6948 static void si_vce_init(struct radeon_device *rdev)
6949 {
6950 	int r;
6951 
6952 	if (!rdev->has_vce)
6953 		return;
6954 
6955 	r = radeon_vce_init(rdev);
6956 	if (r) {
6957 		dev_err(rdev->dev, "failed VCE (%d) init.\n", r);
6958 		/*
6959 		 * At this point rdev->vce.vcpu_bo is NULL which trickles down
6960 		 * to early fails si_vce_start() and thus nothing happens
6961 		 * there. So it is pointless to try to go through that code
6962 		 * hence why we disable vce here.
6963 		 */
6964 		rdev->has_vce = 0;
6965 		return;
6966 	}
6967 	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
6968 	r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE1_INDEX], 4096);
6969 	rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_obj = NULL;
6970 	r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE2_INDEX], 4096);
6971 }
6972 
6973 static void si_vce_start(struct radeon_device *rdev)
6974 {
6975 	int r;
6976 
6977 	if (!rdev->has_vce)
6978 		return;
6979 
6980 	r = radeon_vce_resume(rdev);
6981 	if (r) {
6982 		dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
6983 		goto error;
6984 	}
6985 	r = vce_v1_0_resume(rdev);
6986 	if (r) {
6987 		dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
6988 		goto error;
6989 	}
6990 	r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE1_INDEX);
6991 	if (r) {
6992 		dev_err(rdev->dev, "failed initializing VCE1 fences (%d).\n", r);
6993 		goto error;
6994 	}
6995 	r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE2_INDEX);
6996 	if (r) {
6997 		dev_err(rdev->dev, "failed initializing VCE2 fences (%d).\n", r);
6998 		goto error;
6999 	}
7000 	return;
7001 
7002 error:
7003 	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
7004 	rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
7005 }
7006 
7007 static void si_vce_resume(struct radeon_device *rdev)
7008 {
7009 	struct radeon_ring *ring;
7010 	int r;
7011 
7012 	if (!rdev->has_vce || !rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size)
7013 		return;
7014 
7015 	ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
7016 	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP);
7017 	if (r) {
7018 		dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
7019 		return;
7020 	}
7021 	ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
7022 	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP);
7023 	if (r) {
7024 		dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
7025 		return;
7026 	}
7027 	r = vce_v1_0_init(rdev);
7028 	if (r) {
7029 		dev_err(rdev->dev, "failed initializing VCE (%d).\n", r);
7030 		return;
7031 	}
7032 }
7033 
7034 static int si_startup(struct radeon_device *rdev)
7035 {
7036 	struct radeon_ring *ring;
7037 	int r;
7038 
7039 	/* enable pcie gen2/3 link */
7040 	si_pcie_gen3_enable(rdev);
7041 	/* enable aspm */
7042 	si_program_aspm(rdev);
7043 
7044 	/* scratch needs to be initialized before MC */
7045 	r = r600_vram_scratch_init(rdev);
7046 	if (r)
7047 		return r;
7048 
7049 	si_mc_program(rdev);
7050 
7051 	if (!rdev->pm.dpm_enabled) {
7052 		r = si_mc_load_microcode(rdev);
7053 		if (r) {
7054 			DRM_ERROR("Failed to load MC firmware!\n");
7055 			return r;
7056 		}
7057 	}
7058 
7059 	r = si_pcie_gart_enable(rdev);
7060 	if (r)
7061 		return r;
7062 	si_gpu_init(rdev);
7063 
7064 	/* allocate rlc buffers */
7065 	if (rdev->family == CHIP_VERDE) {
7066 		rdev->rlc.reg_list = verde_rlc_save_restore_register_list;
7067 		rdev->rlc.reg_list_size =
7068 			(u32)ARRAY_SIZE(verde_rlc_save_restore_register_list);
7069 	}
7070 	rdev->rlc.cs_data = si_cs_data;
7071 	r = sumo_rlc_init(rdev);
7072 	if (r) {
7073 		DRM_ERROR("Failed to init rlc BOs!\n");
7074 		return r;
7075 	}
7076 
7077 	/* allocate wb buffer */
7078 	r = radeon_wb_init(rdev);
7079 	if (r)
7080 		return r;
7081 
7082 	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
7083 	if (r) {
7084 		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
7085 		return r;
7086 	}
7087 
7088 	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
7089 	if (r) {
7090 		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
7091 		return r;
7092 	}
7093 
7094 	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
7095 	if (r) {
7096 		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
7097 		return r;
7098 	}
7099 
7100 	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
7101 	if (r) {
7102 		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
7103 		return r;
7104 	}
7105 
7106 	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
7107 	if (r) {
7108 		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
7109 		return r;
7110 	}
7111 
7112 	si_uvd_start(rdev);
7113 	si_vce_start(rdev);
7114 
7115 	/* Enable IRQ */
7116 	if (!rdev->irq.installed) {
7117 		r = radeon_irq_kms_init(rdev);
7118 		if (r)
7119 			return r;
7120 	}
7121 
7122 	r = si_irq_init(rdev);
7123 	if (r) {
7124 		DRM_ERROR("radeon: IH init failed (%d).\n", r);
7125 		radeon_irq_kms_fini(rdev);
7126 		return r;
7127 	}
7128 	si_irq_set(rdev);
7129 
7130 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
7131 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
7132 			     RADEON_CP_PACKET2);
7133 	if (r)
7134 		return r;
7135 
7136 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
7137 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
7138 			     RADEON_CP_PACKET2);
7139 	if (r)
7140 		return r;
7141 
7142 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
7143 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
7144 			     RADEON_CP_PACKET2);
7145 	if (r)
7146 		return r;
7147 
7148 	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
7149 	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
7150 			     DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
7151 	if (r)
7152 		return r;
7153 
7154 	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
7155 	r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
7156 			     DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
7157 	if (r)
7158 		return r;
7159 
7160 	r = si_cp_load_microcode(rdev);
7161 	if (r)
7162 		return r;
7163 	r = si_cp_resume(rdev);
7164 	if (r)
7165 		return r;
7166 
7167 	r = cayman_dma_resume(rdev);
7168 	if (r)
7169 		return r;
7170 
7171 	si_uvd_resume(rdev);
7172 	si_vce_resume(rdev);
7173 
7174 	r = radeon_ib_pool_init(rdev);
7175 	if (r) {
7176 		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
7177 		return r;
7178 	}
7179 
7180 	r = radeon_vm_manager_init(rdev);
7181 	if (r) {
7182 		dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
7183 		return r;
7184 	}
7185 
7186 	r = radeon_audio_init(rdev);
7187 	if (r)
7188 		return r;
7189 
7190 	return 0;
7191 }
7192 
7193 int si_resume(struct radeon_device *rdev)
7194 {
7195 	int r;
7196 
7197 	/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
7198 	 * posting will perform necessary task to bring back GPU into good
7199 	 * shape.
7200 	 */
7201 	/* post card */
7202 	atom_asic_init(rdev->mode_info.atom_context);
7203 
7204 	/* init golden registers */
7205 	si_init_golden_registers(rdev);
7206 
7207 	if (rdev->pm.pm_method == PM_METHOD_DPM)
7208 		radeon_pm_resume(rdev);
7209 
7210 	rdev->accel_working = true;
7211 	r = si_startup(rdev);
7212 	if (r) {
7213 		DRM_ERROR("si startup failed on resume\n");
7214 		rdev->accel_working = false;
7215 		return r;
7216 	}
7217 
7218 	return r;
7219 
7220 }
7221 
7222 int si_suspend(struct radeon_device *rdev)
7223 {
7224 	radeon_pm_suspend(rdev);
7225 	radeon_audio_fini(rdev);
7226 	radeon_vm_manager_fini(rdev);
7227 	si_cp_enable(rdev, false);
7228 	cayman_dma_stop(rdev);
7229 	if (rdev->has_uvd) {
7230 		uvd_v1_0_fini(rdev);
7231 		radeon_uvd_suspend(rdev);
7232 	}
7233 	if (rdev->has_vce)
7234 		radeon_vce_suspend(rdev);
7235 	si_fini_pg(rdev);
7236 	si_fini_cg(rdev);
7237 	si_irq_suspend(rdev);
7238 	radeon_wb_disable(rdev);
7239 	si_pcie_gart_disable(rdev);
7240 	return 0;
7241 }
7242 
7243 /* Plan is to move initialization in that function and use
7244  * helper function so that radeon_device_init pretty much
7245  * do nothing more than calling asic specific function. This
7246  * should also allow to remove a bunch of callback function
7247  * like vram_info.
7248  */
7249 int si_init(struct radeon_device *rdev)
7250 {
7251 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
7252 	int r;
7253 
7254 	/* Read BIOS */
7255 	if (!radeon_get_bios(rdev)) {
7256 		if (ASIC_IS_AVIVO(rdev))
7257 			return -EINVAL;
7258 	}
7259 	/* Must be an ATOMBIOS */
7260 	if (!rdev->is_atom_bios) {
7261 		dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
7262 		return -EINVAL;
7263 	}
7264 	r = radeon_atombios_init(rdev);
7265 	if (r)
7266 		return r;
7267 
7268 	/* Post card if necessary */
7269 	if (!radeon_card_posted(rdev)) {
7270 		if (!rdev->bios) {
7271 			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
7272 			return -EINVAL;
7273 		}
7274 		DRM_INFO("GPU not posted. posting now...\n");
7275 		atom_asic_init(rdev->mode_info.atom_context);
7276 	}
7277 	/* init golden registers */
7278 	si_init_golden_registers(rdev);
7279 	/* Initialize scratch registers */
7280 	si_scratch_init(rdev);
7281 	/* Initialize surface registers */
7282 	radeon_surface_init(rdev);
7283 	/* Initialize clocks */
7284 	radeon_get_clock_info(rdev->ddev);
7285 
7286 	/* Fence driver */
7287 	r = radeon_fence_driver_init(rdev);
7288 	if (r)
7289 		return r;
7290 
7291 	/* initialize memory controller */
7292 	r = si_mc_init(rdev);
7293 	if (r)
7294 		return r;
7295 	/* Memory manager */
7296 	r = radeon_bo_init(rdev);
7297 	if (r)
7298 		return r;
7299 
7300 	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
7301 	    !rdev->rlc_fw || !rdev->mc_fw) {
7302 		r = si_init_microcode(rdev);
7303 		if (r) {
7304 			DRM_ERROR("Failed to load firmware!\n");
7305 			return r;
7306 		}
7307 	}
7308 
7309 	/* Initialize power management */
7310 	radeon_pm_init(rdev);
7311 
7312 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
7313 	ring->ring_obj = NULL;
7314 	r600_ring_init(rdev, ring, 1024 * 1024);
7315 
7316 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
7317 	ring->ring_obj = NULL;
7318 	r600_ring_init(rdev, ring, 1024 * 1024);
7319 
7320 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
7321 	ring->ring_obj = NULL;
7322 	r600_ring_init(rdev, ring, 1024 * 1024);
7323 
7324 	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
7325 	ring->ring_obj = NULL;
7326 	r600_ring_init(rdev, ring, 64 * 1024);
7327 
7328 	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
7329 	ring->ring_obj = NULL;
7330 	r600_ring_init(rdev, ring, 64 * 1024);
7331 
7332 	si_uvd_init(rdev);
7333 	si_vce_init(rdev);
7334 
7335 	rdev->ih.ring_obj = NULL;
7336 	r600_ih_ring_init(rdev, 64 * 1024);
7337 
7338 	r = r600_pcie_gart_init(rdev);
7339 	if (r)
7340 		return r;
7341 
7342 	rdev->accel_working = true;
7343 	r = si_startup(rdev);
7344 	if (r) {
7345 		dev_err(rdev->dev, "disabling GPU acceleration\n");
7346 		si_cp_fini(rdev);
7347 		cayman_dma_fini(rdev);
7348 		si_irq_fini(rdev);
7349 		sumo_rlc_fini(rdev);
7350 		radeon_wb_fini(rdev);
7351 		radeon_ib_pool_fini(rdev);
7352 		radeon_vm_manager_fini(rdev);
7353 		radeon_irq_kms_fini(rdev);
7354 		si_pcie_gart_fini(rdev);
7355 		rdev->accel_working = false;
7356 	}
7357 
7358 	/* Don't start up if the MC ucode is missing.
7359 	 * The default clocks and voltages before the MC ucode
7360 	 * is loaded are not suffient for advanced operations.
7361 	 */
7362 	if (!rdev->mc_fw) {
7363 		DRM_ERROR("radeon: MC ucode required for NI+.\n");
7364 		return -EINVAL;
7365 	}
7366 
7367 	return 0;
7368 }
7369 
7370 void si_fini(struct radeon_device *rdev)
7371 {
7372 	radeon_pm_fini(rdev);
7373 	si_cp_fini(rdev);
7374 	cayman_dma_fini(rdev);
7375 	si_fini_pg(rdev);
7376 	si_fini_cg(rdev);
7377 	si_irq_fini(rdev);
7378 	sumo_rlc_fini(rdev);
7379 	radeon_wb_fini(rdev);
7380 	radeon_vm_manager_fini(rdev);
7381 	radeon_ib_pool_fini(rdev);
7382 	radeon_irq_kms_fini(rdev);
7383 	if (rdev->has_uvd) {
7384 		uvd_v1_0_fini(rdev);
7385 		radeon_uvd_fini(rdev);
7386 	}
7387 	if (rdev->has_vce)
7388 		radeon_vce_fini(rdev);
7389 	si_pcie_gart_fini(rdev);
7390 	r600_vram_scratch_fini(rdev);
7391 	radeon_gem_fini(rdev);
7392 	radeon_fence_driver_fini(rdev);
7393 	radeon_bo_fini(rdev);
7394 	radeon_atombios_fini(rdev);
7395 	kfree(rdev->bios);
7396 	rdev->bios = NULL;
7397 }
7398 
7399 /**
7400  * si_get_gpu_clock_counter - return GPU clock counter snapshot
7401  *
7402  * @rdev: radeon_device pointer
7403  *
7404  * Fetches a GPU clock counter snapshot (SI).
7405  * Returns the 64 bit clock counter snapshot.
7406  */
7407 uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev)
7408 {
7409 	uint64_t clock;
7410 
7411 	mutex_lock(&rdev->gpu_clock_mutex);
7412 	WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
7413 	clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
7414 		((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
7415 	mutex_unlock(&rdev->gpu_clock_mutex);
7416 	return clock;
7417 }
7418 
7419 int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
7420 {
7421 	unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
7422 	int r;
7423 
7424 	/* bypass vclk and dclk with bclk */
7425 	WREG32_P(CG_UPLL_FUNC_CNTL_2,
7426 		VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
7427 		~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
7428 
7429 	/* put PLL in bypass mode */
7430 	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
7431 
7432 	if (!vclk || !dclk) {
7433 		/* keep the Bypass mode */
7434 		return 0;
7435 	}
7436 
7437 	r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
7438 					  16384, 0x03FFFFFF, 0, 128, 5,
7439 					  &fb_div, &vclk_div, &dclk_div);
7440 	if (r)
7441 		return r;
7442 
7443 	/* set RESET_ANTI_MUX to 0 */
7444 	WREG32_P(CG_UPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK);
7445 
7446 	/* set VCO_MODE to 1 */
7447 	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
7448 
7449 	/* disable sleep mode */
7450 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
7451 
7452 	/* deassert UPLL_RESET */
7453 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
7454 
7455 	mdelay(1);
7456 
7457 	r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
7458 	if (r)
7459 		return r;
7460 
7461 	/* assert UPLL_RESET again */
7462 	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
7463 
7464 	/* disable spread spectrum. */
7465 	WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
7466 
7467 	/* set feedback divider */
7468 	WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
7469 
7470 	/* set ref divider to 0 */
7471 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
7472 
7473 	if (fb_div < 307200)
7474 		WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
7475 	else
7476 		WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
7477 
7478 	/* set PDIV_A and PDIV_B */
7479 	WREG32_P(CG_UPLL_FUNC_CNTL_2,
7480 		UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
7481 		~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
7482 
7483 	/* give the PLL some time to settle */
7484 	mdelay(15);
7485 
7486 	/* deassert PLL_RESET */
7487 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
7488 
7489 	mdelay(15);
7490 
7491 	/* switch from bypass mode to normal mode */
7492 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
7493 
7494 	r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
7495 	if (r)
7496 		return r;
7497 
7498 	/* switch VCLK and DCLK selection */
7499 	WREG32_P(CG_UPLL_FUNC_CNTL_2,
7500 		VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
7501 		~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
7502 
7503 	mdelay(100);
7504 
7505 	return 0;
7506 }
7507 
7508 static void si_pcie_gen3_enable(struct radeon_device *rdev)
7509 {
7510 	struct pci_dev *root = rdev->pdev->bus->self;
7511 	int bridge_pos, gpu_pos;
7512 	u32 speed_cntl, mask, current_data_rate;
7513 	int ret, i;
7514 	u16 tmp16;
7515 
7516 	if (pci_is_root_bus(rdev->pdev->bus))
7517 		return;
7518 
7519 	if (radeon_pcie_gen2 == 0)
7520 		return;
7521 
7522 	if (rdev->flags & RADEON_IS_IGP)
7523 		return;
7524 
7525 	if (!(rdev->flags & RADEON_IS_PCIE))
7526 		return;
7527 
7528 	ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
7529 	if (ret != 0)
7530 		return;
7531 
7532 	if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
7533 		return;
7534 
7535 	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
7536 	current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
7537 		LC_CURRENT_DATA_RATE_SHIFT;
7538 	if (mask & DRM_PCIE_SPEED_80) {
7539 		if (current_data_rate == 2) {
7540 			DRM_INFO("PCIE gen 3 link speeds already enabled\n");
7541 			return;
7542 		}
7543 		DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
7544 	} else if (mask & DRM_PCIE_SPEED_50) {
7545 		if (current_data_rate == 1) {
7546 			DRM_INFO("PCIE gen 2 link speeds already enabled\n");
7547 			return;
7548 		}
7549 		DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
7550 	}
7551 
7552 	bridge_pos = pci_pcie_cap(root);
7553 	if (!bridge_pos)
7554 		return;
7555 
7556 	gpu_pos = pci_pcie_cap(rdev->pdev);
7557 	if (!gpu_pos)
7558 		return;
7559 
7560 	if (mask & DRM_PCIE_SPEED_80) {
7561 		/* re-try equalization if gen3 is not already enabled */
7562 		if (current_data_rate != 2) {
7563 			u16 bridge_cfg, gpu_cfg;
7564 			u16 bridge_cfg2, gpu_cfg2;
7565 			u32 max_lw, current_lw, tmp;
7566 
7567 			pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
7568 			pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
7569 
7570 			tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
7571 			pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
7572 
7573 			tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
7574 			pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
7575 
7576 			tmp = RREG32_PCIE(PCIE_LC_STATUS1);
7577 			max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
7578 			current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
7579 
7580 			if (current_lw < max_lw) {
7581 				tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
7582 				if (tmp & LC_RENEGOTIATION_SUPPORT) {
7583 					tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
7584 					tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
7585 					tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
7586 					WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
7587 				}
7588 			}
7589 
7590 			for (i = 0; i < 10; i++) {
7591 				/* check status */
7592 				pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
7593 				if (tmp16 & PCI_EXP_DEVSTA_TRPND)
7594 					break;
7595 
7596 				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
7597 				pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
7598 
7599 				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
7600 				pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
7601 
7602 				tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
7603 				tmp |= LC_SET_QUIESCE;
7604 				WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
7605 
7606 				tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
7607 				tmp |= LC_REDO_EQ;
7608 				WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
7609 
7610 				mdelay(100);
7611 
7612 				/* linkctl */
7613 				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
7614 				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
7615 				tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
7616 				pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
7617 
7618 				pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
7619 				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
7620 				tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
7621 				pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
7622 
7623 				/* linkctl2 */
7624 				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
7625 				tmp16 &= ~((1 << 4) | (7 << 9));
7626 				tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
7627 				pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
7628 
7629 				pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
7630 				tmp16 &= ~((1 << 4) | (7 << 9));
7631 				tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
7632 				pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
7633 
7634 				tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
7635 				tmp &= ~LC_SET_QUIESCE;
7636 				WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
7637 			}
7638 		}
7639 	}
7640 
7641 	/* set the link speed */
7642 	speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
7643 	speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
7644 	WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
7645 
7646 	pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
7647 	tmp16 &= ~0xf;
7648 	if (mask & DRM_PCIE_SPEED_80)
7649 		tmp16 |= 3; /* gen3 */
7650 	else if (mask & DRM_PCIE_SPEED_50)
7651 		tmp16 |= 2; /* gen2 */
7652 	else
7653 		tmp16 |= 1; /* gen1 */
7654 	pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
7655 
7656 	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
7657 	speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
7658 	WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
7659 
7660 	for (i = 0; i < rdev->usec_timeout; i++) {
7661 		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
7662 		if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
7663 			break;
7664 		udelay(1);
7665 	}
7666 }
7667 
7668 static void si_program_aspm(struct radeon_device *rdev)
7669 {
7670 	u32 data, orig;
7671 	bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
7672 	bool disable_clkreq = false;
7673 
7674 	if (radeon_aspm == 0)
7675 		return;
7676 
7677 	if (!(rdev->flags & RADEON_IS_PCIE))
7678 		return;
7679 
7680 	orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
7681 	data &= ~LC_XMIT_N_FTS_MASK;
7682 	data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
7683 	if (orig != data)
7684 		WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
7685 
7686 	orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
7687 	data |= LC_GO_TO_RECOVERY;
7688 	if (orig != data)
7689 		WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
7690 
7691 	orig = data = RREG32_PCIE(PCIE_P_CNTL);
7692 	data |= P_IGNORE_EDB_ERR;
7693 	if (orig != data)
7694 		WREG32_PCIE(PCIE_P_CNTL, data);
7695 
7696 	orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
7697 	data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
7698 	data |= LC_PMI_TO_L1_DIS;
7699 	if (!disable_l0s)
7700 		data |= LC_L0S_INACTIVITY(7);
7701 
7702 	if (!disable_l1) {
7703 		data |= LC_L1_INACTIVITY(7);
7704 		data &= ~LC_PMI_TO_L1_DIS;
7705 		if (orig != data)
7706 			WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
7707 
7708 		if (!disable_plloff_in_l1) {
7709 			bool clk_req_support;
7710 
7711 			orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
7712 			data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
7713 			data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
7714 			if (orig != data)
7715 				WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
7716 
7717 			orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
7718 			data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
7719 			data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
7720 			if (orig != data)
7721 				WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
7722 
7723 			orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
7724 			data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
7725 			data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
7726 			if (orig != data)
7727 				WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
7728 
7729 			orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
7730 			data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
7731 			data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
7732 			if (orig != data)
7733 				WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
7734 
7735 			if ((rdev->family != CHIP_OLAND) && (rdev->family != CHIP_HAINAN)) {
7736 				orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
7737 				data &= ~PLL_RAMP_UP_TIME_0_MASK;
7738 				if (orig != data)
7739 					WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
7740 
7741 				orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
7742 				data &= ~PLL_RAMP_UP_TIME_1_MASK;
7743 				if (orig != data)
7744 					WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
7745 
7746 				orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_2);
7747 				data &= ~PLL_RAMP_UP_TIME_2_MASK;
7748 				if (orig != data)
7749 					WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_2, data);
7750 
7751 				orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_3);
7752 				data &= ~PLL_RAMP_UP_TIME_3_MASK;
7753 				if (orig != data)
7754 					WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_3, data);
7755 
7756 				orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
7757 				data &= ~PLL_RAMP_UP_TIME_0_MASK;
7758 				if (orig != data)
7759 					WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
7760 
7761 				orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
7762 				data &= ~PLL_RAMP_UP_TIME_1_MASK;
7763 				if (orig != data)
7764 					WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
7765 
7766 				orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_2);
7767 				data &= ~PLL_RAMP_UP_TIME_2_MASK;
7768 				if (orig != data)
7769 					WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_2, data);
7770 
7771 				orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_3);
7772 				data &= ~PLL_RAMP_UP_TIME_3_MASK;
7773 				if (orig != data)
7774 					WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_3, data);
7775 			}
7776 			orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
7777 			data &= ~LC_DYN_LANES_PWR_STATE_MASK;
7778 			data |= LC_DYN_LANES_PWR_STATE(3);
7779 			if (orig != data)
7780 				WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
7781 
7782 			orig = data = RREG32_PIF_PHY0(PB0_PIF_CNTL);
7783 			data &= ~LS2_EXIT_TIME_MASK;
7784 			if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN))
7785 				data |= LS2_EXIT_TIME(5);
7786 			if (orig != data)
7787 				WREG32_PIF_PHY0(PB0_PIF_CNTL, data);
7788 
7789 			orig = data = RREG32_PIF_PHY1(PB1_PIF_CNTL);
7790 			data &= ~LS2_EXIT_TIME_MASK;
7791 			if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN))
7792 				data |= LS2_EXIT_TIME(5);
7793 			if (orig != data)
7794 				WREG32_PIF_PHY1(PB1_PIF_CNTL, data);
7795 
7796 			if (!disable_clkreq &&
7797 			    !pci_is_root_bus(rdev->pdev->bus)) {
7798 				struct pci_dev *root = rdev->pdev->bus->self;
7799 				u32 lnkcap;
7800 
7801 				clk_req_support = false;
7802 				pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
7803 				if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
7804 					clk_req_support = true;
7805 			} else {
7806 				clk_req_support = false;
7807 			}
7808 
7809 			if (clk_req_support) {
7810 				orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
7811 				data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
7812 				if (orig != data)
7813 					WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
7814 
7815 				orig = data = RREG32(THM_CLK_CNTL);
7816 				data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
7817 				data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
7818 				if (orig != data)
7819 					WREG32(THM_CLK_CNTL, data);
7820 
7821 				orig = data = RREG32(MISC_CLK_CNTL);
7822 				data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
7823 				data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
7824 				if (orig != data)
7825 					WREG32(MISC_CLK_CNTL, data);
7826 
7827 				orig = data = RREG32(CG_CLKPIN_CNTL);
7828 				data &= ~BCLK_AS_XCLK;
7829 				if (orig != data)
7830 					WREG32(CG_CLKPIN_CNTL, data);
7831 
7832 				orig = data = RREG32(CG_CLKPIN_CNTL_2);
7833 				data &= ~FORCE_BIF_REFCLK_EN;
7834 				if (orig != data)
7835 					WREG32(CG_CLKPIN_CNTL_2, data);
7836 
7837 				orig = data = RREG32(MPLL_BYPASSCLK_SEL);
7838 				data &= ~MPLL_CLKOUT_SEL_MASK;
7839 				data |= MPLL_CLKOUT_SEL(4);
7840 				if (orig != data)
7841 					WREG32(MPLL_BYPASSCLK_SEL, data);
7842 
7843 				orig = data = RREG32(SPLL_CNTL_MODE);
7844 				data &= ~SPLL_REFCLK_SEL_MASK;
7845 				if (orig != data)
7846 					WREG32(SPLL_CNTL_MODE, data);
7847 			}
7848 		}
7849 	} else {
7850 		if (orig != data)
7851 			WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
7852 	}
7853 
7854 	orig = data = RREG32_PCIE(PCIE_CNTL2);
7855 	data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
7856 	if (orig != data)
7857 		WREG32_PCIE(PCIE_CNTL2, data);
7858 
7859 	if (!disable_l0s) {
7860 		data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
7861 		if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
7862 			data = RREG32_PCIE(PCIE_LC_STATUS1);
7863 			if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
7864 				orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
7865 				data &= ~LC_L0S_INACTIVITY_MASK;
7866 				if (orig != data)
7867 					WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
7868 			}
7869 		}
7870 	}
7871 }
7872 
7873 static int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev)
7874 {
7875 	unsigned i;
7876 
7877 	/* make sure VCEPLL_CTLREQ is deasserted */
7878 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
7879 
7880 	mdelay(10);
7881 
7882 	/* assert UPLL_CTLREQ */
7883 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
7884 
7885 	/* wait for CTLACK and CTLACK2 to get asserted */
7886 	for (i = 0; i < 100; ++i) {
7887 		uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
7888 		if ((RREG32_SMC(CG_VCEPLL_FUNC_CNTL) & mask) == mask)
7889 			break;
7890 		mdelay(10);
7891 	}
7892 
7893 	/* deassert UPLL_CTLREQ */
7894 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
7895 
7896 	if (i == 100) {
7897 		DRM_ERROR("Timeout setting UVD clocks!\n");
7898 		return -ETIMEDOUT;
7899 	}
7900 
7901 	return 0;
7902 }
7903 
7904 int si_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
7905 {
7906 	unsigned fb_div = 0, evclk_div = 0, ecclk_div = 0;
7907 	int r;
7908 
7909 	/* bypass evclk and ecclk with bclk */
7910 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,
7911 		     EVCLK_SRC_SEL(1) | ECCLK_SRC_SEL(1),
7912 		     ~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK));
7913 
7914 	/* put PLL in bypass mode */
7915 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_BYPASS_EN_MASK,
7916 		     ~VCEPLL_BYPASS_EN_MASK);
7917 
7918 	if (!evclk || !ecclk) {
7919 		/* keep the Bypass mode, put PLL to sleep */
7920 		WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK,
7921 			     ~VCEPLL_SLEEP_MASK);
7922 		return 0;
7923 	}
7924 
7925 	r = radeon_uvd_calc_upll_dividers(rdev, evclk, ecclk, 125000, 250000,
7926 					  16384, 0x03FFFFFF, 0, 128, 5,
7927 					  &fb_div, &evclk_div, &ecclk_div);
7928 	if (r)
7929 		return r;
7930 
7931 	/* set RESET_ANTI_MUX to 0 */
7932 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK);
7933 
7934 	/* set VCO_MODE to 1 */
7935 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_VCO_MODE_MASK,
7936 		     ~VCEPLL_VCO_MODE_MASK);
7937 
7938 	/* toggle VCEPLL_SLEEP to 1 then back to 0 */
7939 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK,
7940 		     ~VCEPLL_SLEEP_MASK);
7941 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_SLEEP_MASK);
7942 
7943 	/* deassert VCEPLL_RESET */
7944 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK);
7945 
7946 	mdelay(1);
7947 
7948 	r = si_vce_send_vcepll_ctlreq(rdev);
7949 	if (r)
7950 		return r;
7951 
7952 	/* assert VCEPLL_RESET again */
7953 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_RESET_MASK, ~VCEPLL_RESET_MASK);
7954 
7955 	/* disable spread spectrum. */
7956 	WREG32_SMC_P(CG_VCEPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
7957 
7958 	/* set feedback divider */
7959 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_3, VCEPLL_FB_DIV(fb_div), ~VCEPLL_FB_DIV_MASK);
7960 
7961 	/* set ref divider to 0 */
7962 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_REF_DIV_MASK);
7963 
7964 	/* set PDIV_A and PDIV_B */
7965 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,
7966 		     VCEPLL_PDIV_A(evclk_div) | VCEPLL_PDIV_B(ecclk_div),
7967 		     ~(VCEPLL_PDIV_A_MASK | VCEPLL_PDIV_B_MASK));
7968 
7969 	/* give the PLL some time to settle */
7970 	mdelay(15);
7971 
7972 	/* deassert PLL_RESET */
7973 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK);
7974 
7975 	mdelay(15);
7976 
7977 	/* switch from bypass mode to normal mode */
7978 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_BYPASS_EN_MASK);
7979 
7980 	r = si_vce_send_vcepll_ctlreq(rdev);
7981 	if (r)
7982 		return r;
7983 
7984 	/* switch VCLK and DCLK selection */
7985 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,
7986 		     EVCLK_SRC_SEL(16) | ECCLK_SRC_SEL(16),
7987 		     ~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK));
7988 
7989 	mdelay(100);
7990 
7991 	return 0;
7992 }
7993