xref: /linux/drivers/gpu/drm/amd/amdgpu/vi.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include "drmP.h"
27 #include "amdgpu.h"
28 #include "amdgpu_atombios.h"
29 #include "amdgpu_ih.h"
30 #include "amdgpu_uvd.h"
31 #include "amdgpu_vce.h"
32 #include "amdgpu_ucode.h"
33 #include "atom.h"
34 
35 #include "gmc/gmc_8_1_d.h"
36 #include "gmc/gmc_8_1_sh_mask.h"
37 
38 #include "oss/oss_3_0_d.h"
39 #include "oss/oss_3_0_sh_mask.h"
40 
41 #include "bif/bif_5_0_d.h"
42 #include "bif/bif_5_0_sh_mask.h"
43 
44 #include "gca/gfx_8_0_d.h"
45 #include "gca/gfx_8_0_sh_mask.h"
46 
47 #include "smu/smu_7_1_1_d.h"
48 #include "smu/smu_7_1_1_sh_mask.h"
49 
50 #include "uvd/uvd_5_0_d.h"
51 #include "uvd/uvd_5_0_sh_mask.h"
52 
53 #include "vce/vce_3_0_d.h"
54 #include "vce/vce_3_0_sh_mask.h"
55 
56 #include "dce/dce_10_0_d.h"
57 #include "dce/dce_10_0_sh_mask.h"
58 
59 #include "vid.h"
60 #include "vi.h"
61 #include "vi_dpm.h"
62 #include "gmc_v8_0.h"
63 #include "gfx_v8_0.h"
64 #include "sdma_v2_4.h"
65 #include "sdma_v3_0.h"
66 #include "dce_v10_0.h"
67 #include "dce_v11_0.h"
68 #include "iceland_ih.h"
69 #include "tonga_ih.h"
70 #include "cz_ih.h"
71 #include "uvd_v5_0.h"
72 #include "uvd_v6_0.h"
73 #include "vce_v3_0.h"
74 
75 /*
76  * Indirect registers accessor
77  */
78 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
79 {
80 	unsigned long flags;
81 	u32 r;
82 
83 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
84 	WREG32(mmPCIE_INDEX, reg);
85 	(void)RREG32(mmPCIE_INDEX);
86 	r = RREG32(mmPCIE_DATA);
87 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
88 	return r;
89 }
90 
91 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
92 {
93 	unsigned long flags;
94 
95 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
96 	WREG32(mmPCIE_INDEX, reg);
97 	(void)RREG32(mmPCIE_INDEX);
98 	WREG32(mmPCIE_DATA, v);
99 	(void)RREG32(mmPCIE_DATA);
100 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
101 }
102 
103 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
104 {
105 	unsigned long flags;
106 	u32 r;
107 
108 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
109 	WREG32(mmSMC_IND_INDEX_0, (reg));
110 	r = RREG32(mmSMC_IND_DATA_0);
111 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
112 	return r;
113 }
114 
115 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
116 {
117 	unsigned long flags;
118 
119 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
120 	WREG32(mmSMC_IND_INDEX_0, (reg));
121 	WREG32(mmSMC_IND_DATA_0, (v));
122 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
123 }
124 
125 /* smu_8_0_d.h */
126 #define mmMP0PUB_IND_INDEX                                                      0x180
127 #define mmMP0PUB_IND_DATA                                                       0x181
128 
129 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
130 {
131 	unsigned long flags;
132 	u32 r;
133 
134 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
135 	WREG32(mmMP0PUB_IND_INDEX, (reg));
136 	r = RREG32(mmMP0PUB_IND_DATA);
137 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
138 	return r;
139 }
140 
141 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
142 {
143 	unsigned long flags;
144 
145 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
146 	WREG32(mmMP0PUB_IND_INDEX, (reg));
147 	WREG32(mmMP0PUB_IND_DATA, (v));
148 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
149 }
150 
151 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
152 {
153 	unsigned long flags;
154 	u32 r;
155 
156 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
157 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
158 	r = RREG32(mmUVD_CTX_DATA);
159 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
160 	return r;
161 }
162 
163 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
164 {
165 	unsigned long flags;
166 
167 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
168 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
169 	WREG32(mmUVD_CTX_DATA, (v));
170 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
171 }
172 
173 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
174 {
175 	unsigned long flags;
176 	u32 r;
177 
178 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
179 	WREG32(mmDIDT_IND_INDEX, (reg));
180 	r = RREG32(mmDIDT_IND_DATA);
181 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
182 	return r;
183 }
184 
185 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
186 {
187 	unsigned long flags;
188 
189 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
190 	WREG32(mmDIDT_IND_INDEX, (reg));
191 	WREG32(mmDIDT_IND_DATA, (v));
192 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
193 }
194 
195 static const u32 tonga_mgcg_cgcg_init[] =
196 {
197 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
198 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
199 	mmPCIE_DATA, 0x000f0000, 0x00000000,
200 	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
201 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
202 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
203 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
204 };
205 
206 static const u32 fiji_mgcg_cgcg_init[] =
207 {
208 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
209 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
210 	mmPCIE_DATA, 0x000f0000, 0x00000000,
211 	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
212 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
213 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
214 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
215 };
216 
217 static const u32 iceland_mgcg_cgcg_init[] =
218 {
219 	mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
220 	mmPCIE_DATA, 0x000f0000, 0x00000000,
221 	mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
222 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
223 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
224 };
225 
226 static const u32 cz_mgcg_cgcg_init[] =
227 {
228 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
229 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
230 	mmPCIE_DATA, 0x000f0000, 0x00000000,
231 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
232 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
233 };
234 
235 static void vi_init_golden_registers(struct amdgpu_device *adev)
236 {
237 	/* Some of the registers might be dependent on GRBM_GFX_INDEX */
238 	mutex_lock(&adev->grbm_idx_mutex);
239 
240 	switch (adev->asic_type) {
241 	case CHIP_TOPAZ:
242 		amdgpu_program_register_sequence(adev,
243 						 iceland_mgcg_cgcg_init,
244 						 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
245 		break;
246 	case CHIP_FIJI:
247 		amdgpu_program_register_sequence(adev,
248 						 fiji_mgcg_cgcg_init,
249 						 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
250 		break;
251 	case CHIP_TONGA:
252 		amdgpu_program_register_sequence(adev,
253 						 tonga_mgcg_cgcg_init,
254 						 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
255 		break;
256 	case CHIP_CARRIZO:
257 		amdgpu_program_register_sequence(adev,
258 						 cz_mgcg_cgcg_init,
259 						 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
260 		break;
261 	default:
262 		break;
263 	}
264 	mutex_unlock(&adev->grbm_idx_mutex);
265 }
266 
267 /**
268  * vi_get_xclk - get the xclk
269  *
270  * @adev: amdgpu_device pointer
271  *
272  * Returns the reference clock used by the gfx engine
273  * (VI).
274  */
275 static u32 vi_get_xclk(struct amdgpu_device *adev)
276 {
277 	u32 reference_clock = adev->clock.spll.reference_freq;
278 	u32 tmp;
279 
280 	if (adev->flags & AMD_IS_APU)
281 		return reference_clock;
282 
283 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
284 	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
285 		return 1000;
286 
287 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
288 	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
289 		return reference_clock / 4;
290 
291 	return reference_clock;
292 }
293 
294 /**
295  * vi_srbm_select - select specific register instances
296  *
297  * @adev: amdgpu_device pointer
298  * @me: selected ME (micro engine)
299  * @pipe: pipe
300  * @queue: queue
301  * @vmid: VMID
302  *
303  * Switches the currently active registers instances.  Some
304  * registers are instanced per VMID, others are instanced per
305  * me/pipe/queue combination.
306  */
307 void vi_srbm_select(struct amdgpu_device *adev,
308 		     u32 me, u32 pipe, u32 queue, u32 vmid)
309 {
310 	u32 srbm_gfx_cntl = 0;
311 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
312 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
313 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
314 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
315 	WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
316 }
317 
318 static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
319 {
320 	/* todo */
321 }
322 
323 static bool vi_read_disabled_bios(struct amdgpu_device *adev)
324 {
325 	u32 bus_cntl;
326 	u32 d1vga_control = 0;
327 	u32 d2vga_control = 0;
328 	u32 vga_render_control = 0;
329 	u32 rom_cntl;
330 	bool r;
331 
332 	bus_cntl = RREG32(mmBUS_CNTL);
333 	if (adev->mode_info.num_crtc) {
334 		d1vga_control = RREG32(mmD1VGA_CONTROL);
335 		d2vga_control = RREG32(mmD2VGA_CONTROL);
336 		vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
337 	}
338 	rom_cntl = RREG32_SMC(ixROM_CNTL);
339 
340 	/* enable the rom */
341 	WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
342 	if (adev->mode_info.num_crtc) {
343 		/* Disable VGA mode */
344 		WREG32(mmD1VGA_CONTROL,
345 		       (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
346 					  D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
347 		WREG32(mmD2VGA_CONTROL,
348 		       (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
349 					  D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
350 		WREG32(mmVGA_RENDER_CONTROL,
351 		       (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
352 	}
353 	WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
354 
355 	r = amdgpu_read_bios(adev);
356 
357 	/* restore regs */
358 	WREG32(mmBUS_CNTL, bus_cntl);
359 	if (adev->mode_info.num_crtc) {
360 		WREG32(mmD1VGA_CONTROL, d1vga_control);
361 		WREG32(mmD2VGA_CONTROL, d2vga_control);
362 		WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
363 	}
364 	WREG32_SMC(ixROM_CNTL, rom_cntl);
365 	return r;
366 }
367 static struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
368 	{mmGB_MACROTILE_MODE7, true},
369 };
370 
371 static struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = {
372 	{mmGB_TILE_MODE7, true},
373 	{mmGB_TILE_MODE12, true},
374 	{mmGB_TILE_MODE17, true},
375 	{mmGB_TILE_MODE23, true},
376 	{mmGB_MACROTILE_MODE7, true},
377 };
378 
379 static struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
380 	{mmGRBM_STATUS, false},
381 	{mmGRBM_STATUS2, false},
382 	{mmGRBM_STATUS_SE0, false},
383 	{mmGRBM_STATUS_SE1, false},
384 	{mmGRBM_STATUS_SE2, false},
385 	{mmGRBM_STATUS_SE3, false},
386 	{mmSRBM_STATUS, false},
387 	{mmSRBM_STATUS2, false},
388 	{mmSRBM_STATUS3, false},
389 	{mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET, false},
390 	{mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET, false},
391 	{mmCP_STAT, false},
392 	{mmCP_STALLED_STAT1, false},
393 	{mmCP_STALLED_STAT2, false},
394 	{mmCP_STALLED_STAT3, false},
395 	{mmCP_CPF_BUSY_STAT, false},
396 	{mmCP_CPF_STALLED_STAT1, false},
397 	{mmCP_CPF_STATUS, false},
398 	{mmCP_CPC_BUSY_STAT, false},
399 	{mmCP_CPC_STALLED_STAT1, false},
400 	{mmCP_CPC_STATUS, false},
401 	{mmGB_ADDR_CONFIG, false},
402 	{mmMC_ARB_RAMCFG, false},
403 	{mmGB_TILE_MODE0, false},
404 	{mmGB_TILE_MODE1, false},
405 	{mmGB_TILE_MODE2, false},
406 	{mmGB_TILE_MODE3, false},
407 	{mmGB_TILE_MODE4, false},
408 	{mmGB_TILE_MODE5, false},
409 	{mmGB_TILE_MODE6, false},
410 	{mmGB_TILE_MODE7, false},
411 	{mmGB_TILE_MODE8, false},
412 	{mmGB_TILE_MODE9, false},
413 	{mmGB_TILE_MODE10, false},
414 	{mmGB_TILE_MODE11, false},
415 	{mmGB_TILE_MODE12, false},
416 	{mmGB_TILE_MODE13, false},
417 	{mmGB_TILE_MODE14, false},
418 	{mmGB_TILE_MODE15, false},
419 	{mmGB_TILE_MODE16, false},
420 	{mmGB_TILE_MODE17, false},
421 	{mmGB_TILE_MODE18, false},
422 	{mmGB_TILE_MODE19, false},
423 	{mmGB_TILE_MODE20, false},
424 	{mmGB_TILE_MODE21, false},
425 	{mmGB_TILE_MODE22, false},
426 	{mmGB_TILE_MODE23, false},
427 	{mmGB_TILE_MODE24, false},
428 	{mmGB_TILE_MODE25, false},
429 	{mmGB_TILE_MODE26, false},
430 	{mmGB_TILE_MODE27, false},
431 	{mmGB_TILE_MODE28, false},
432 	{mmGB_TILE_MODE29, false},
433 	{mmGB_TILE_MODE30, false},
434 	{mmGB_TILE_MODE31, false},
435 	{mmGB_MACROTILE_MODE0, false},
436 	{mmGB_MACROTILE_MODE1, false},
437 	{mmGB_MACROTILE_MODE2, false},
438 	{mmGB_MACROTILE_MODE3, false},
439 	{mmGB_MACROTILE_MODE4, false},
440 	{mmGB_MACROTILE_MODE5, false},
441 	{mmGB_MACROTILE_MODE6, false},
442 	{mmGB_MACROTILE_MODE7, false},
443 	{mmGB_MACROTILE_MODE8, false},
444 	{mmGB_MACROTILE_MODE9, false},
445 	{mmGB_MACROTILE_MODE10, false},
446 	{mmGB_MACROTILE_MODE11, false},
447 	{mmGB_MACROTILE_MODE12, false},
448 	{mmGB_MACROTILE_MODE13, false},
449 	{mmGB_MACROTILE_MODE14, false},
450 	{mmGB_MACROTILE_MODE15, false},
451 	{mmCC_RB_BACKEND_DISABLE, false, true},
452 	{mmGC_USER_RB_BACKEND_DISABLE, false, true},
453 	{mmGB_BACKEND_MAP, false, false},
454 	{mmPA_SC_RASTER_CONFIG, false, true},
455 	{mmPA_SC_RASTER_CONFIG_1, false, true},
456 };
457 
458 static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
459 					 u32 sh_num, u32 reg_offset)
460 {
461 	uint32_t val;
462 
463 	mutex_lock(&adev->grbm_idx_mutex);
464 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
465 		gfx_v8_0_select_se_sh(adev, se_num, sh_num);
466 
467 	val = RREG32(reg_offset);
468 
469 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
470 		gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
471 	mutex_unlock(&adev->grbm_idx_mutex);
472 	return val;
473 }
474 
475 static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
476 			    u32 sh_num, u32 reg_offset, u32 *value)
477 {
478 	struct amdgpu_allowed_register_entry *asic_register_table = NULL;
479 	struct amdgpu_allowed_register_entry *asic_register_entry;
480 	uint32_t size, i;
481 
482 	*value = 0;
483 	switch (adev->asic_type) {
484 	case CHIP_TOPAZ:
485 		asic_register_table = tonga_allowed_read_registers;
486 		size = ARRAY_SIZE(tonga_allowed_read_registers);
487 		break;
488 	case CHIP_FIJI:
489 	case CHIP_TONGA:
490 	case CHIP_CARRIZO:
491 		asic_register_table = cz_allowed_read_registers;
492 		size = ARRAY_SIZE(cz_allowed_read_registers);
493 		break;
494 	default:
495 		return -EINVAL;
496 	}
497 
498 	if (asic_register_table) {
499 		for (i = 0; i < size; i++) {
500 			asic_register_entry = asic_register_table + i;
501 			if (reg_offset != asic_register_entry->reg_offset)
502 				continue;
503 			if (!asic_register_entry->untouched)
504 				*value = asic_register_entry->grbm_indexed ?
505 					vi_read_indexed_register(adev, se_num,
506 								 sh_num, reg_offset) :
507 					RREG32(reg_offset);
508 			return 0;
509 		}
510 	}
511 
512 	for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
513 		if (reg_offset != vi_allowed_read_registers[i].reg_offset)
514 			continue;
515 
516 		if (!vi_allowed_read_registers[i].untouched)
517 			*value = vi_allowed_read_registers[i].grbm_indexed ?
518 				vi_read_indexed_register(adev, se_num,
519 							 sh_num, reg_offset) :
520 				RREG32(reg_offset);
521 		return 0;
522 	}
523 	return -EINVAL;
524 }
525 
526 static void vi_print_gpu_status_regs(struct amdgpu_device *adev)
527 {
528 	dev_info(adev->dev, "  GRBM_STATUS=0x%08X\n",
529 		RREG32(mmGRBM_STATUS));
530 	dev_info(adev->dev, "  GRBM_STATUS2=0x%08X\n",
531 		RREG32(mmGRBM_STATUS2));
532 	dev_info(adev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
533 		RREG32(mmGRBM_STATUS_SE0));
534 	dev_info(adev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
535 		RREG32(mmGRBM_STATUS_SE1));
536 	dev_info(adev->dev, "  GRBM_STATUS_SE2=0x%08X\n",
537 		RREG32(mmGRBM_STATUS_SE2));
538 	dev_info(adev->dev, "  GRBM_STATUS_SE3=0x%08X\n",
539 		RREG32(mmGRBM_STATUS_SE3));
540 	dev_info(adev->dev, "  SRBM_STATUS=0x%08X\n",
541 		RREG32(mmSRBM_STATUS));
542 	dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
543 		RREG32(mmSRBM_STATUS2));
544 	dev_info(adev->dev, "  SDMA0_STATUS_REG   = 0x%08X\n",
545 		RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
546 	dev_info(adev->dev, "  SDMA1_STATUS_REG   = 0x%08X\n",
547 		 RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
548 	dev_info(adev->dev, "  CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
549 	dev_info(adev->dev, "  CP_STALLED_STAT1 = 0x%08x\n",
550 		 RREG32(mmCP_STALLED_STAT1));
551 	dev_info(adev->dev, "  CP_STALLED_STAT2 = 0x%08x\n",
552 		 RREG32(mmCP_STALLED_STAT2));
553 	dev_info(adev->dev, "  CP_STALLED_STAT3 = 0x%08x\n",
554 		 RREG32(mmCP_STALLED_STAT3));
555 	dev_info(adev->dev, "  CP_CPF_BUSY_STAT = 0x%08x\n",
556 		 RREG32(mmCP_CPF_BUSY_STAT));
557 	dev_info(adev->dev, "  CP_CPF_STALLED_STAT1 = 0x%08x\n",
558 		 RREG32(mmCP_CPF_STALLED_STAT1));
559 	dev_info(adev->dev, "  CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS));
560 	dev_info(adev->dev, "  CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT));
561 	dev_info(adev->dev, "  CP_CPC_STALLED_STAT1 = 0x%08x\n",
562 		 RREG32(mmCP_CPC_STALLED_STAT1));
563 	dev_info(adev->dev, "  CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS));
564 }
565 
566 /**
567  * vi_gpu_check_soft_reset - check which blocks are busy
568  *
569  * @adev: amdgpu_device pointer
570  *
571  * Check which blocks are busy and return the relevant reset
572  * mask to be used by vi_gpu_soft_reset().
573  * Returns a mask of the blocks to be reset.
574  */
575 u32 vi_gpu_check_soft_reset(struct amdgpu_device *adev)
576 {
577 	u32 reset_mask = 0;
578 	u32 tmp;
579 
580 	/* GRBM_STATUS */
581 	tmp = RREG32(mmGRBM_STATUS);
582 	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
583 		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
584 		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
585 		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
586 		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
587 		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK))
588 		reset_mask |= AMDGPU_RESET_GFX;
589 
590 	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK))
591 		reset_mask |= AMDGPU_RESET_CP;
592 
593 	/* GRBM_STATUS2 */
594 	tmp = RREG32(mmGRBM_STATUS2);
595 	if (tmp & GRBM_STATUS2__RLC_BUSY_MASK)
596 		reset_mask |= AMDGPU_RESET_RLC;
597 
598 	if (tmp & (GRBM_STATUS2__CPF_BUSY_MASK |
599 		   GRBM_STATUS2__CPC_BUSY_MASK |
600 		   GRBM_STATUS2__CPG_BUSY_MASK))
601 		reset_mask |= AMDGPU_RESET_CP;
602 
603 	/* SRBM_STATUS2 */
604 	tmp = RREG32(mmSRBM_STATUS2);
605 	if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK)
606 		reset_mask |= AMDGPU_RESET_DMA;
607 
608 	if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)
609 		reset_mask |= AMDGPU_RESET_DMA1;
610 
611 	/* SRBM_STATUS */
612 	tmp = RREG32(mmSRBM_STATUS);
613 
614 	if (tmp & SRBM_STATUS__IH_BUSY_MASK)
615 		reset_mask |= AMDGPU_RESET_IH;
616 
617 	if (tmp & SRBM_STATUS__SEM_BUSY_MASK)
618 		reset_mask |= AMDGPU_RESET_SEM;
619 
620 	if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK)
621 		reset_mask |= AMDGPU_RESET_GRBM;
622 
623 	if (adev->asic_type != CHIP_TOPAZ) {
624 		if (tmp & (SRBM_STATUS__UVD_RQ_PENDING_MASK |
625 			   SRBM_STATUS__UVD_BUSY_MASK))
626 			reset_mask |= AMDGPU_RESET_UVD;
627 	}
628 
629 	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
630 		reset_mask |= AMDGPU_RESET_VMC;
631 
632 	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
633 		   SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK))
634 		reset_mask |= AMDGPU_RESET_MC;
635 
636 	/* SDMA0_STATUS_REG */
637 	tmp = RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET);
638 	if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
639 		reset_mask |= AMDGPU_RESET_DMA;
640 
641 	/* SDMA1_STATUS_REG */
642 	tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
643 	if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
644 		reset_mask |= AMDGPU_RESET_DMA1;
645 #if 0
646 	/* VCE_STATUS */
647 	if (adev->asic_type != CHIP_TOPAZ) {
648 		tmp = RREG32(mmVCE_STATUS);
649 		if (tmp & VCE_STATUS__VCPU_REPORT_RB0_BUSY_MASK)
650 			reset_mask |= AMDGPU_RESET_VCE;
651 		if (tmp & VCE_STATUS__VCPU_REPORT_RB1_BUSY_MASK)
652 			reset_mask |= AMDGPU_RESET_VCE1;
653 
654 	}
655 
656 	if (adev->asic_type != CHIP_TOPAZ) {
657 		if (amdgpu_display_is_display_hung(adev))
658 			reset_mask |= AMDGPU_RESET_DISPLAY;
659 	}
660 #endif
661 
662 	/* Skip MC reset as it's mostly likely not hung, just busy */
663 	if (reset_mask & AMDGPU_RESET_MC) {
664 		DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
665 		reset_mask &= ~AMDGPU_RESET_MC;
666 	}
667 
668 	return reset_mask;
669 }
670 
671 /**
672  * vi_gpu_soft_reset - soft reset GPU
673  *
674  * @adev: amdgpu_device pointer
675  * @reset_mask: mask of which blocks to reset
676  *
677  * Soft reset the blocks specified in @reset_mask.
678  */
679 static void vi_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask)
680 {
681 	struct amdgpu_mode_mc_save save;
682 	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
683 	u32 tmp;
684 
685 	if (reset_mask == 0)
686 		return;
687 
688 	dev_info(adev->dev, "GPU softreset: 0x%08X\n", reset_mask);
689 
690 	vi_print_gpu_status_regs(adev);
691 	dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
692 		 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
693 	dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
694 		 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
695 
696 	/* disable CG/PG */
697 
698 	/* stop the rlc */
699 	//XXX
700 	//gfx_v8_0_rlc_stop(adev);
701 
702 	/* Disable GFX parsing/prefetching */
703 	tmp = RREG32(mmCP_ME_CNTL);
704 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
705 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
706 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
707 	WREG32(mmCP_ME_CNTL, tmp);
708 
709 	/* Disable MEC parsing/prefetching */
710 	tmp = RREG32(mmCP_MEC_CNTL);
711 	tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
712 	tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
713 	WREG32(mmCP_MEC_CNTL, tmp);
714 
715 	if (reset_mask & AMDGPU_RESET_DMA) {
716 		/* sdma0 */
717 		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
718 		tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
719 		WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
720 	}
721 	if (reset_mask & AMDGPU_RESET_DMA1) {
722 		/* sdma1 */
723 		tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
724 		tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
725 		WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
726 	}
727 
728 	gmc_v8_0_mc_stop(adev, &save);
729 	if (amdgpu_asic_wait_for_mc_idle(adev)) {
730 		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
731 	}
732 
733 	if (reset_mask & (AMDGPU_RESET_GFX | AMDGPU_RESET_COMPUTE | AMDGPU_RESET_CP)) {
734 		grbm_soft_reset =
735 			REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
736 		grbm_soft_reset =
737 			REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
738 	}
739 
740 	if (reset_mask & AMDGPU_RESET_CP) {
741 		grbm_soft_reset =
742 			REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
743 		srbm_soft_reset =
744 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
745 	}
746 
747 	if (reset_mask & AMDGPU_RESET_DMA)
748 		srbm_soft_reset =
749 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA, 1);
750 
751 	if (reset_mask & AMDGPU_RESET_DMA1)
752 		srbm_soft_reset =
753 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1, 1);
754 
755 	if (reset_mask & AMDGPU_RESET_DISPLAY)
756 		srbm_soft_reset =
757 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_DC, 1);
758 
759 	if (reset_mask & AMDGPU_RESET_RLC)
760 		grbm_soft_reset =
761 			REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
762 
763 	if (reset_mask & AMDGPU_RESET_SEM)
764 		srbm_soft_reset =
765 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
766 
767 	if (reset_mask & AMDGPU_RESET_IH)
768 		srbm_soft_reset =
769 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_IH, 1);
770 
771 	if (reset_mask & AMDGPU_RESET_GRBM)
772 		srbm_soft_reset =
773 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
774 
775 	if (reset_mask & AMDGPU_RESET_VMC)
776 		srbm_soft_reset =
777 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
778 
779 	if (reset_mask & AMDGPU_RESET_UVD)
780 		srbm_soft_reset =
781 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
782 
783 	if (reset_mask & AMDGPU_RESET_VCE)
784 		srbm_soft_reset =
785 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
786 
787 	if (reset_mask & AMDGPU_RESET_VCE)
788 		srbm_soft_reset =
789 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
790 
791 	if (!(adev->flags & AMD_IS_APU)) {
792 		if (reset_mask & AMDGPU_RESET_MC)
793 		srbm_soft_reset =
794 			REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
795 	}
796 
797 	if (grbm_soft_reset) {
798 		tmp = RREG32(mmGRBM_SOFT_RESET);
799 		tmp |= grbm_soft_reset;
800 		dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
801 		WREG32(mmGRBM_SOFT_RESET, tmp);
802 		tmp = RREG32(mmGRBM_SOFT_RESET);
803 
804 		udelay(50);
805 
806 		tmp &= ~grbm_soft_reset;
807 		WREG32(mmGRBM_SOFT_RESET, tmp);
808 		tmp = RREG32(mmGRBM_SOFT_RESET);
809 	}
810 
811 	if (srbm_soft_reset) {
812 		tmp = RREG32(mmSRBM_SOFT_RESET);
813 		tmp |= srbm_soft_reset;
814 		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
815 		WREG32(mmSRBM_SOFT_RESET, tmp);
816 		tmp = RREG32(mmSRBM_SOFT_RESET);
817 
818 		udelay(50);
819 
820 		tmp &= ~srbm_soft_reset;
821 		WREG32(mmSRBM_SOFT_RESET, tmp);
822 		tmp = RREG32(mmSRBM_SOFT_RESET);
823 	}
824 
825 	/* Wait a little for things to settle down */
826 	udelay(50);
827 
828 	gmc_v8_0_mc_resume(adev, &save);
829 	udelay(50);
830 
831 	vi_print_gpu_status_regs(adev);
832 }
833 
834 static void vi_gpu_pci_config_reset(struct amdgpu_device *adev)
835 {
836 	struct amdgpu_mode_mc_save save;
837 	u32 tmp, i;
838 
839 	dev_info(adev->dev, "GPU pci config reset\n");
840 
841 	/* disable dpm? */
842 
843 	/* disable cg/pg */
844 
845 	/* Disable GFX parsing/prefetching */
846 	tmp = RREG32(mmCP_ME_CNTL);
847 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
848 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
849 	tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
850 	WREG32(mmCP_ME_CNTL, tmp);
851 
852 	/* Disable MEC parsing/prefetching */
853 	tmp = RREG32(mmCP_MEC_CNTL);
854 	tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
855 	tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
856 	WREG32(mmCP_MEC_CNTL, tmp);
857 
858 	/* Disable GFX parsing/prefetching */
859 	WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK |
860 		CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
861 
862 	/* Disable MEC parsing/prefetching */
863 	WREG32(mmCP_MEC_CNTL,
864 			CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
865 
866 	/* sdma0 */
867 	tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
868 	tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
869 	WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
870 
871 	/* sdma1 */
872 	tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
873 	tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
874 	WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
875 
876 	/* XXX other engines? */
877 
878 	/* halt the rlc, disable cp internal ints */
879 	//XXX
880 	//gfx_v8_0_rlc_stop(adev);
881 
882 	udelay(50);
883 
884 	/* disable mem access */
885 	gmc_v8_0_mc_stop(adev, &save);
886 	if (amdgpu_asic_wait_for_mc_idle(adev)) {
887 		dev_warn(adev->dev, "Wait for MC idle timed out !\n");
888 	}
889 
890 	/* disable BM */
891 	pci_clear_master(adev->pdev);
892 	/* reset */
893 	amdgpu_pci_config_reset(adev);
894 
895 	udelay(100);
896 
897 	/* wait for asic to come out of reset */
898 	for (i = 0; i < adev->usec_timeout; i++) {
899 		if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff)
900 			break;
901 		udelay(1);
902 	}
903 
904 }
905 
906 static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
907 {
908 	u32 tmp = RREG32(mmBIOS_SCRATCH_3);
909 
910 	if (hung)
911 		tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
912 	else
913 		tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
914 
915 	WREG32(mmBIOS_SCRATCH_3, tmp);
916 }
917 
918 /**
919  * vi_asic_reset - soft reset GPU
920  *
921  * @adev: amdgpu_device pointer
922  *
923  * Look up which blocks are hung and attempt
924  * to reset them.
925  * Returns 0 for success.
926  */
927 static int vi_asic_reset(struct amdgpu_device *adev)
928 {
929 	u32 reset_mask;
930 
931 	reset_mask = vi_gpu_check_soft_reset(adev);
932 
933 	if (reset_mask)
934 		vi_set_bios_scratch_engine_hung(adev, true);
935 
936 	/* try soft reset */
937 	vi_gpu_soft_reset(adev, reset_mask);
938 
939 	reset_mask = vi_gpu_check_soft_reset(adev);
940 
941 	/* try pci config reset */
942 	if (reset_mask && amdgpu_hard_reset)
943 		vi_gpu_pci_config_reset(adev);
944 
945 	reset_mask = vi_gpu_check_soft_reset(adev);
946 
947 	if (!reset_mask)
948 		vi_set_bios_scratch_engine_hung(adev, false);
949 
950 	return 0;
951 }
952 
953 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
954 			u32 cntl_reg, u32 status_reg)
955 {
956 	int r, i;
957 	struct atom_clock_dividers dividers;
958 	uint32_t tmp;
959 
960 	r = amdgpu_atombios_get_clock_dividers(adev,
961 					       COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
962 					       clock, false, &dividers);
963 	if (r)
964 		return r;
965 
966 	tmp = RREG32_SMC(cntl_reg);
967 	tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
968 		CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
969 	tmp |= dividers.post_divider;
970 	WREG32_SMC(cntl_reg, tmp);
971 
972 	for (i = 0; i < 100; i++) {
973 		if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK)
974 			break;
975 		mdelay(10);
976 	}
977 	if (i == 100)
978 		return -ETIMEDOUT;
979 
980 	return 0;
981 }
982 
983 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
984 {
985 	int r;
986 
987 	r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
988 	if (r)
989 		return r;
990 
991 	r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
992 
993 	return 0;
994 }
995 
996 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
997 {
998 	/* todo */
999 
1000 	return 0;
1001 }
1002 
1003 static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
1004 {
1005 	u32 mask;
1006 	int ret;
1007 
1008 	if (pci_is_root_bus(adev->pdev->bus))
1009 		return;
1010 
1011 	if (amdgpu_pcie_gen2 == 0)
1012 		return;
1013 
1014 	if (adev->flags & AMD_IS_APU)
1015 		return;
1016 
1017 	ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
1018 	if (ret != 0)
1019 		return;
1020 
1021 	if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
1022 		return;
1023 
1024 	/* todo */
1025 }
1026 
1027 static void vi_program_aspm(struct amdgpu_device *adev)
1028 {
1029 
1030 	if (amdgpu_aspm == 0)
1031 		return;
1032 
1033 	/* todo */
1034 }
1035 
1036 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
1037 					bool enable)
1038 {
1039 	u32 tmp;
1040 
1041 	/* not necessary on CZ */
1042 	if (adev->flags & AMD_IS_APU)
1043 		return;
1044 
1045 	tmp = RREG32(mmBIF_DOORBELL_APER_EN);
1046 	if (enable)
1047 		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
1048 	else
1049 		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
1050 
1051 	WREG32(mmBIF_DOORBELL_APER_EN, tmp);
1052 }
1053 
1054 /* topaz has no DCE, UVD, VCE */
1055 static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
1056 {
1057 	/* ORDER MATTERS! */
1058 	{
1059 		.type = AMD_IP_BLOCK_TYPE_COMMON,
1060 		.major = 2,
1061 		.minor = 0,
1062 		.rev = 0,
1063 		.funcs = &vi_common_ip_funcs,
1064 	},
1065 	{
1066 		.type = AMD_IP_BLOCK_TYPE_GMC,
1067 		.major = 8,
1068 		.minor = 0,
1069 		.rev = 0,
1070 		.funcs = &gmc_v8_0_ip_funcs,
1071 	},
1072 	{
1073 		.type = AMD_IP_BLOCK_TYPE_IH,
1074 		.major = 2,
1075 		.minor = 4,
1076 		.rev = 0,
1077 		.funcs = &iceland_ih_ip_funcs,
1078 	},
1079 	{
1080 		.type = AMD_IP_BLOCK_TYPE_SMC,
1081 		.major = 7,
1082 		.minor = 1,
1083 		.rev = 0,
1084 		.funcs = &iceland_dpm_ip_funcs,
1085 	},
1086 	{
1087 		.type = AMD_IP_BLOCK_TYPE_GFX,
1088 		.major = 8,
1089 		.minor = 0,
1090 		.rev = 0,
1091 		.funcs = &gfx_v8_0_ip_funcs,
1092 	},
1093 	{
1094 		.type = AMD_IP_BLOCK_TYPE_SDMA,
1095 		.major = 2,
1096 		.minor = 4,
1097 		.rev = 0,
1098 		.funcs = &sdma_v2_4_ip_funcs,
1099 	},
1100 };
1101 
1102 static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
1103 {
1104 	/* ORDER MATTERS! */
1105 	{
1106 		.type = AMD_IP_BLOCK_TYPE_COMMON,
1107 		.major = 2,
1108 		.minor = 0,
1109 		.rev = 0,
1110 		.funcs = &vi_common_ip_funcs,
1111 	},
1112 	{
1113 		.type = AMD_IP_BLOCK_TYPE_GMC,
1114 		.major = 8,
1115 		.minor = 0,
1116 		.rev = 0,
1117 		.funcs = &gmc_v8_0_ip_funcs,
1118 	},
1119 	{
1120 		.type = AMD_IP_BLOCK_TYPE_IH,
1121 		.major = 3,
1122 		.minor = 0,
1123 		.rev = 0,
1124 		.funcs = &tonga_ih_ip_funcs,
1125 	},
1126 	{
1127 		.type = AMD_IP_BLOCK_TYPE_SMC,
1128 		.major = 7,
1129 		.minor = 1,
1130 		.rev = 0,
1131 		.funcs = &tonga_dpm_ip_funcs,
1132 	},
1133 	{
1134 		.type = AMD_IP_BLOCK_TYPE_DCE,
1135 		.major = 10,
1136 		.minor = 0,
1137 		.rev = 0,
1138 		.funcs = &dce_v10_0_ip_funcs,
1139 	},
1140 	{
1141 		.type = AMD_IP_BLOCK_TYPE_GFX,
1142 		.major = 8,
1143 		.minor = 0,
1144 		.rev = 0,
1145 		.funcs = &gfx_v8_0_ip_funcs,
1146 	},
1147 	{
1148 		.type = AMD_IP_BLOCK_TYPE_SDMA,
1149 		.major = 3,
1150 		.minor = 0,
1151 		.rev = 0,
1152 		.funcs = &sdma_v3_0_ip_funcs,
1153 	},
1154 	{
1155 		.type = AMD_IP_BLOCK_TYPE_UVD,
1156 		.major = 5,
1157 		.minor = 0,
1158 		.rev = 0,
1159 		.funcs = &uvd_v5_0_ip_funcs,
1160 	},
1161 	{
1162 		.type = AMD_IP_BLOCK_TYPE_VCE,
1163 		.major = 3,
1164 		.minor = 0,
1165 		.rev = 0,
1166 		.funcs = &vce_v3_0_ip_funcs,
1167 	},
1168 };
1169 
1170 static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
1171 {
1172 	/* ORDER MATTERS! */
1173 	{
1174 		.type = AMD_IP_BLOCK_TYPE_COMMON,
1175 		.major = 2,
1176 		.minor = 0,
1177 		.rev = 0,
1178 		.funcs = &vi_common_ip_funcs,
1179 	},
1180 	{
1181 		.type = AMD_IP_BLOCK_TYPE_GMC,
1182 		.major = 8,
1183 		.minor = 5,
1184 		.rev = 0,
1185 		.funcs = &gmc_v8_0_ip_funcs,
1186 	},
1187 	{
1188 		.type = AMD_IP_BLOCK_TYPE_IH,
1189 		.major = 3,
1190 		.minor = 0,
1191 		.rev = 0,
1192 		.funcs = &tonga_ih_ip_funcs,
1193 	},
1194 	{
1195 		.type = AMD_IP_BLOCK_TYPE_SMC,
1196 		.major = 7,
1197 		.minor = 1,
1198 		.rev = 0,
1199 		.funcs = &fiji_dpm_ip_funcs,
1200 	},
1201 	{
1202 		.type = AMD_IP_BLOCK_TYPE_DCE,
1203 		.major = 10,
1204 		.minor = 1,
1205 		.rev = 0,
1206 		.funcs = &dce_v10_0_ip_funcs,
1207 	},
1208 	{
1209 		.type = AMD_IP_BLOCK_TYPE_GFX,
1210 		.major = 8,
1211 		.minor = 0,
1212 		.rev = 0,
1213 		.funcs = &gfx_v8_0_ip_funcs,
1214 	},
1215 	{
1216 		.type = AMD_IP_BLOCK_TYPE_SDMA,
1217 		.major = 3,
1218 		.minor = 0,
1219 		.rev = 0,
1220 		.funcs = &sdma_v3_0_ip_funcs,
1221 	},
1222 	{
1223 		.type = AMD_IP_BLOCK_TYPE_UVD,
1224 		.major = 6,
1225 		.minor = 0,
1226 		.rev = 0,
1227 		.funcs = &uvd_v6_0_ip_funcs,
1228 	},
1229 	{
1230 		.type = AMD_IP_BLOCK_TYPE_VCE,
1231 		.major = 3,
1232 		.minor = 0,
1233 		.rev = 0,
1234 		.funcs = &vce_v3_0_ip_funcs,
1235 	},
1236 };
1237 
1238 static const struct amdgpu_ip_block_version cz_ip_blocks[] =
1239 {
1240 	/* ORDER MATTERS! */
1241 	{
1242 		.type = AMD_IP_BLOCK_TYPE_COMMON,
1243 		.major = 2,
1244 		.minor = 0,
1245 		.rev = 0,
1246 		.funcs = &vi_common_ip_funcs,
1247 	},
1248 	{
1249 		.type = AMD_IP_BLOCK_TYPE_GMC,
1250 		.major = 8,
1251 		.minor = 0,
1252 		.rev = 0,
1253 		.funcs = &gmc_v8_0_ip_funcs,
1254 	},
1255 	{
1256 		.type = AMD_IP_BLOCK_TYPE_IH,
1257 		.major = 3,
1258 		.minor = 0,
1259 		.rev = 0,
1260 		.funcs = &cz_ih_ip_funcs,
1261 	},
1262 	{
1263 		.type = AMD_IP_BLOCK_TYPE_SMC,
1264 		.major = 8,
1265 		.minor = 0,
1266 		.rev = 0,
1267 		.funcs = &cz_dpm_ip_funcs,
1268 	},
1269 	{
1270 		.type = AMD_IP_BLOCK_TYPE_DCE,
1271 		.major = 11,
1272 		.minor = 0,
1273 		.rev = 0,
1274 		.funcs = &dce_v11_0_ip_funcs,
1275 	},
1276 	{
1277 		.type = AMD_IP_BLOCK_TYPE_GFX,
1278 		.major = 8,
1279 		.minor = 0,
1280 		.rev = 0,
1281 		.funcs = &gfx_v8_0_ip_funcs,
1282 	},
1283 	{
1284 		.type = AMD_IP_BLOCK_TYPE_SDMA,
1285 		.major = 3,
1286 		.minor = 0,
1287 		.rev = 0,
1288 		.funcs = &sdma_v3_0_ip_funcs,
1289 	},
1290 	{
1291 		.type = AMD_IP_BLOCK_TYPE_UVD,
1292 		.major = 6,
1293 		.minor = 0,
1294 		.rev = 0,
1295 		.funcs = &uvd_v6_0_ip_funcs,
1296 	},
1297 	{
1298 		.type = AMD_IP_BLOCK_TYPE_VCE,
1299 		.major = 3,
1300 		.minor = 0,
1301 		.rev = 0,
1302 		.funcs = &vce_v3_0_ip_funcs,
1303 	},
1304 };
1305 
1306 int vi_set_ip_blocks(struct amdgpu_device *adev)
1307 {
1308 	switch (adev->asic_type) {
1309 	case CHIP_TOPAZ:
1310 		adev->ip_blocks = topaz_ip_blocks;
1311 		adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
1312 		break;
1313 	case CHIP_FIJI:
1314 		adev->ip_blocks = fiji_ip_blocks;
1315 		adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
1316 		break;
1317 	case CHIP_TONGA:
1318 		adev->ip_blocks = tonga_ip_blocks;
1319 		adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
1320 		break;
1321 	case CHIP_CARRIZO:
1322 		adev->ip_blocks = cz_ip_blocks;
1323 		adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
1324 		break;
1325 	default:
1326 		/* FIXME: not supported yet */
1327 		return -EINVAL;
1328 	}
1329 
1330 	return 0;
1331 }
1332 
1333 static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
1334 {
1335 	if (adev->asic_type == CHIP_TOPAZ)
1336 		return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
1337 			>> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
1338 	else
1339 		return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
1340 			>> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
1341 }
1342 
1343 static const struct amdgpu_asic_funcs vi_asic_funcs =
1344 {
1345 	.read_disabled_bios = &vi_read_disabled_bios,
1346 	.read_register = &vi_read_register,
1347 	.reset = &vi_asic_reset,
1348 	.set_vga_state = &vi_vga_set_state,
1349 	.get_xclk = &vi_get_xclk,
1350 	.set_uvd_clocks = &vi_set_uvd_clocks,
1351 	.set_vce_clocks = &vi_set_vce_clocks,
1352 	.get_cu_info = &gfx_v8_0_get_cu_info,
1353 	/* these should be moved to their own ip modules */
1354 	.get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
1355 	.wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle,
1356 };
1357 
1358 static int vi_common_early_init(void *handle)
1359 {
1360 	bool smc_enabled = false;
1361 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1362 
1363 	if (adev->flags & AMD_IS_APU) {
1364 		adev->smc_rreg = &cz_smc_rreg;
1365 		adev->smc_wreg = &cz_smc_wreg;
1366 	} else {
1367 		adev->smc_rreg = &vi_smc_rreg;
1368 		adev->smc_wreg = &vi_smc_wreg;
1369 	}
1370 	adev->pcie_rreg = &vi_pcie_rreg;
1371 	adev->pcie_wreg = &vi_pcie_wreg;
1372 	adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
1373 	adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
1374 	adev->didt_rreg = &vi_didt_rreg;
1375 	adev->didt_wreg = &vi_didt_wreg;
1376 
1377 	adev->asic_funcs = &vi_asic_funcs;
1378 
1379 	if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) &&
1380 		(amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC)))
1381 		smc_enabled = true;
1382 
1383 	adev->rev_id = vi_get_rev_id(adev);
1384 	adev->external_rev_id = 0xFF;
1385 	switch (adev->asic_type) {
1386 	case CHIP_TOPAZ:
1387 		adev->has_uvd = false;
1388 		adev->cg_flags = 0;
1389 		adev->pg_flags = 0;
1390 		adev->external_rev_id = 0x1;
1391 		if (amdgpu_smc_load_fw && smc_enabled)
1392 			adev->firmware.smu_load = true;
1393 		break;
1394 	case CHIP_FIJI:
1395 	case CHIP_TONGA:
1396 		adev->has_uvd = true;
1397 		adev->cg_flags = 0;
1398 		adev->pg_flags = 0;
1399 		adev->external_rev_id = adev->rev_id + 0x14;
1400 		if (amdgpu_smc_load_fw && smc_enabled)
1401 			adev->firmware.smu_load = true;
1402 		break;
1403 	case CHIP_CARRIZO:
1404 		adev->has_uvd = true;
1405 		adev->cg_flags = 0;
1406 		/* Disable UVD pg */
1407 		adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE;
1408 		adev->external_rev_id = adev->rev_id + 0x1;
1409 		if (amdgpu_smc_load_fw && smc_enabled)
1410 			adev->firmware.smu_load = true;
1411 		break;
1412 	default:
1413 		/* FIXME: not supported yet */
1414 		return -EINVAL;
1415 	}
1416 
1417 	return 0;
1418 }
1419 
1420 static int vi_common_sw_init(void *handle)
1421 {
1422 	return 0;
1423 }
1424 
1425 static int vi_common_sw_fini(void *handle)
1426 {
1427 	return 0;
1428 }
1429 
1430 static int vi_common_hw_init(void *handle)
1431 {
1432 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1433 
1434 	/* move the golden regs per IP block */
1435 	vi_init_golden_registers(adev);
1436 	/* enable pcie gen2/3 link */
1437 	vi_pcie_gen3_enable(adev);
1438 	/* enable aspm */
1439 	vi_program_aspm(adev);
1440 	/* enable the doorbell aperture */
1441 	vi_enable_doorbell_aperture(adev, true);
1442 
1443 	return 0;
1444 }
1445 
1446 static int vi_common_hw_fini(void *handle)
1447 {
1448 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1449 
1450 	/* enable the doorbell aperture */
1451 	vi_enable_doorbell_aperture(adev, false);
1452 
1453 	return 0;
1454 }
1455 
1456 static int vi_common_suspend(void *handle)
1457 {
1458 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1459 
1460 	return vi_common_hw_fini(adev);
1461 }
1462 
1463 static int vi_common_resume(void *handle)
1464 {
1465 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1466 
1467 	return vi_common_hw_init(adev);
1468 }
1469 
1470 static bool vi_common_is_idle(void *handle)
1471 {
1472 	return true;
1473 }
1474 
1475 static int vi_common_wait_for_idle(void *handle)
1476 {
1477 	return 0;
1478 }
1479 
1480 static void vi_common_print_status(void *handle)
1481 {
1482 	return;
1483 }
1484 
1485 static int vi_common_soft_reset(void *handle)
1486 {
1487 	return 0;
1488 }
1489 
1490 static int vi_common_set_clockgating_state(void *handle,
1491 					    enum amd_clockgating_state state)
1492 {
1493 	return 0;
1494 }
1495 
1496 static int vi_common_set_powergating_state(void *handle,
1497 					    enum amd_powergating_state state)
1498 {
1499 	return 0;
1500 }
1501 
1502 const struct amd_ip_funcs vi_common_ip_funcs = {
1503 	.early_init = vi_common_early_init,
1504 	.late_init = NULL,
1505 	.sw_init = vi_common_sw_init,
1506 	.sw_fini = vi_common_sw_fini,
1507 	.hw_init = vi_common_hw_init,
1508 	.hw_fini = vi_common_hw_fini,
1509 	.suspend = vi_common_suspend,
1510 	.resume = vi_common_resume,
1511 	.is_idle = vi_common_is_idle,
1512 	.wait_for_idle = vi_common_wait_for_idle,
1513 	.soft_reset = vi_common_soft_reset,
1514 	.print_status = vi_common_print_status,
1515 	.set_clockgating_state = vi_common_set_clockgating_state,
1516 	.set_powergating_state = vi_common_set_powergating_state,
1517 };
1518 
1519