xref: /linux/drivers/gpu/drm/amd/amdgpu/vi.c (revision 8c27f5c1fda5e06405d9c3092735fbc5ec2b1dfa)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/slab.h>
24 #include "drmP.h"
25 #include "amdgpu.h"
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_ih.h"
28 #include "amdgpu_uvd.h"
29 #include "amdgpu_vce.h"
30 #include "amdgpu_ucode.h"
31 #include "atom.h"
32 #include "amd_pcie.h"
33 
34 #include "gmc/gmc_8_1_d.h"
35 #include "gmc/gmc_8_1_sh_mask.h"
36 
37 #include "oss/oss_3_0_d.h"
38 #include "oss/oss_3_0_sh_mask.h"
39 
40 #include "bif/bif_5_0_d.h"
41 #include "bif/bif_5_0_sh_mask.h"
42 
43 #include "gca/gfx_8_0_d.h"
44 #include "gca/gfx_8_0_sh_mask.h"
45 
46 #include "smu/smu_7_1_1_d.h"
47 #include "smu/smu_7_1_1_sh_mask.h"
48 
49 #include "uvd/uvd_5_0_d.h"
50 #include "uvd/uvd_5_0_sh_mask.h"
51 
52 #include "vce/vce_3_0_d.h"
53 #include "vce/vce_3_0_sh_mask.h"
54 
55 #include "dce/dce_10_0_d.h"
56 #include "dce/dce_10_0_sh_mask.h"
57 
58 #include "vid.h"
59 #include "vi.h"
60 #include "vi_dpm.h"
61 #include "gmc_v8_0.h"
62 #include "gmc_v7_0.h"
63 #include "gfx_v8_0.h"
64 #include "sdma_v2_4.h"
65 #include "sdma_v3_0.h"
66 #include "dce_v10_0.h"
67 #include "dce_v11_0.h"
68 #include "iceland_ih.h"
69 #include "tonga_ih.h"
70 #include "cz_ih.h"
71 #include "uvd_v5_0.h"
72 #include "uvd_v6_0.h"
73 #include "vce_v3_0.h"
74 #include "amdgpu_powerplay.h"
75 #if defined(CONFIG_DRM_AMD_ACP)
76 #include "amdgpu_acp.h"
77 #endif
78 #include "dce_virtual.h"
79 
80 /*
81  * Indirect registers accessor
82  */
83 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
84 {
85 	unsigned long flags;
86 	u32 r;
87 
88 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
89 	WREG32(mmPCIE_INDEX, reg);
90 	(void)RREG32(mmPCIE_INDEX);
91 	r = RREG32(mmPCIE_DATA);
92 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
93 	return r;
94 }
95 
96 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
97 {
98 	unsigned long flags;
99 
100 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
101 	WREG32(mmPCIE_INDEX, reg);
102 	(void)RREG32(mmPCIE_INDEX);
103 	WREG32(mmPCIE_DATA, v);
104 	(void)RREG32(mmPCIE_DATA);
105 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
106 }
107 
108 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
109 {
110 	unsigned long flags;
111 	u32 r;
112 
113 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
114 	WREG32(mmSMC_IND_INDEX_11, (reg));
115 	r = RREG32(mmSMC_IND_DATA_11);
116 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
117 	return r;
118 }
119 
120 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
121 {
122 	unsigned long flags;
123 
124 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
125 	WREG32(mmSMC_IND_INDEX_11, (reg));
126 	WREG32(mmSMC_IND_DATA_11, (v));
127 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
128 }
129 
130 /* smu_8_0_d.h */
131 #define mmMP0PUB_IND_INDEX                                                      0x180
132 #define mmMP0PUB_IND_DATA                                                       0x181
133 
134 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
135 {
136 	unsigned long flags;
137 	u32 r;
138 
139 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
140 	WREG32(mmMP0PUB_IND_INDEX, (reg));
141 	r = RREG32(mmMP0PUB_IND_DATA);
142 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
143 	return r;
144 }
145 
146 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
147 {
148 	unsigned long flags;
149 
150 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
151 	WREG32(mmMP0PUB_IND_INDEX, (reg));
152 	WREG32(mmMP0PUB_IND_DATA, (v));
153 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
154 }
155 
156 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
157 {
158 	unsigned long flags;
159 	u32 r;
160 
161 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
162 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
163 	r = RREG32(mmUVD_CTX_DATA);
164 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
165 	return r;
166 }
167 
168 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
169 {
170 	unsigned long flags;
171 
172 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
173 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
174 	WREG32(mmUVD_CTX_DATA, (v));
175 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
176 }
177 
178 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
179 {
180 	unsigned long flags;
181 	u32 r;
182 
183 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
184 	WREG32(mmDIDT_IND_INDEX, (reg));
185 	r = RREG32(mmDIDT_IND_DATA);
186 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
187 	return r;
188 }
189 
190 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
191 {
192 	unsigned long flags;
193 
194 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
195 	WREG32(mmDIDT_IND_INDEX, (reg));
196 	WREG32(mmDIDT_IND_DATA, (v));
197 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
198 }
199 
200 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
201 {
202 	unsigned long flags;
203 	u32 r;
204 
205 	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
206 	WREG32(mmGC_CAC_IND_INDEX, (reg));
207 	r = RREG32(mmGC_CAC_IND_DATA);
208 	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
209 	return r;
210 }
211 
212 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
213 {
214 	unsigned long flags;
215 
216 	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
217 	WREG32(mmGC_CAC_IND_INDEX, (reg));
218 	WREG32(mmGC_CAC_IND_DATA, (v));
219 	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
220 }
221 
222 
223 static const u32 tonga_mgcg_cgcg_init[] =
224 {
225 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
226 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
227 	mmPCIE_DATA, 0x000f0000, 0x00000000,
228 	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
229 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
230 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
231 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
232 };
233 
234 static const u32 fiji_mgcg_cgcg_init[] =
235 {
236 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
237 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
238 	mmPCIE_DATA, 0x000f0000, 0x00000000,
239 	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
240 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
241 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
242 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
243 };
244 
245 static const u32 iceland_mgcg_cgcg_init[] =
246 {
247 	mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
248 	mmPCIE_DATA, 0x000f0000, 0x00000000,
249 	mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
250 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
251 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
252 };
253 
254 static const u32 cz_mgcg_cgcg_init[] =
255 {
256 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
257 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
258 	mmPCIE_DATA, 0x000f0000, 0x00000000,
259 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
260 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
261 };
262 
263 static const u32 stoney_mgcg_cgcg_init[] =
264 {
265 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100,
266 	mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104,
267 	mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027,
268 };
269 
270 static void vi_init_golden_registers(struct amdgpu_device *adev)
271 {
272 	/* Some of the registers might be dependent on GRBM_GFX_INDEX */
273 	mutex_lock(&adev->grbm_idx_mutex);
274 
275 	switch (adev->asic_type) {
276 	case CHIP_TOPAZ:
277 		amdgpu_program_register_sequence(adev,
278 						 iceland_mgcg_cgcg_init,
279 						 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
280 		break;
281 	case CHIP_FIJI:
282 		amdgpu_program_register_sequence(adev,
283 						 fiji_mgcg_cgcg_init,
284 						 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
285 		break;
286 	case CHIP_TONGA:
287 		amdgpu_program_register_sequence(adev,
288 						 tonga_mgcg_cgcg_init,
289 						 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
290 		break;
291 	case CHIP_CARRIZO:
292 		amdgpu_program_register_sequence(adev,
293 						 cz_mgcg_cgcg_init,
294 						 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
295 		break;
296 	case CHIP_STONEY:
297 		amdgpu_program_register_sequence(adev,
298 						 stoney_mgcg_cgcg_init,
299 						 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
300 		break;
301 	case CHIP_POLARIS11:
302 	case CHIP_POLARIS10:
303 	case CHIP_POLARIS12:
304 	default:
305 		break;
306 	}
307 	mutex_unlock(&adev->grbm_idx_mutex);
308 }
309 
310 /**
311  * vi_get_xclk - get the xclk
312  *
313  * @adev: amdgpu_device pointer
314  *
315  * Returns the reference clock used by the gfx engine
316  * (VI).
317  */
318 static u32 vi_get_xclk(struct amdgpu_device *adev)
319 {
320 	u32 reference_clock = adev->clock.spll.reference_freq;
321 	u32 tmp;
322 
323 	if (adev->flags & AMD_IS_APU)
324 		return reference_clock;
325 
326 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
327 	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
328 		return 1000;
329 
330 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
331 	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
332 		return reference_clock / 4;
333 
334 	return reference_clock;
335 }
336 
337 /**
338  * vi_srbm_select - select specific register instances
339  *
340  * @adev: amdgpu_device pointer
341  * @me: selected ME (micro engine)
342  * @pipe: pipe
343  * @queue: queue
344  * @vmid: VMID
345  *
346  * Switches the currently active registers instances.  Some
347  * registers are instanced per VMID, others are instanced per
348  * me/pipe/queue combination.
349  */
350 void vi_srbm_select(struct amdgpu_device *adev,
351 		     u32 me, u32 pipe, u32 queue, u32 vmid)
352 {
353 	u32 srbm_gfx_cntl = 0;
354 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
355 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
356 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
357 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
358 	WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
359 }
360 
361 static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
362 {
363 	/* todo */
364 }
365 
366 static bool vi_read_disabled_bios(struct amdgpu_device *adev)
367 {
368 	u32 bus_cntl;
369 	u32 d1vga_control = 0;
370 	u32 d2vga_control = 0;
371 	u32 vga_render_control = 0;
372 	u32 rom_cntl;
373 	bool r;
374 
375 	bus_cntl = RREG32(mmBUS_CNTL);
376 	if (adev->mode_info.num_crtc) {
377 		d1vga_control = RREG32(mmD1VGA_CONTROL);
378 		d2vga_control = RREG32(mmD2VGA_CONTROL);
379 		vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
380 	}
381 	rom_cntl = RREG32_SMC(ixROM_CNTL);
382 
383 	/* enable the rom */
384 	WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
385 	if (adev->mode_info.num_crtc) {
386 		/* Disable VGA mode */
387 		WREG32(mmD1VGA_CONTROL,
388 		       (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
389 					  D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
390 		WREG32(mmD2VGA_CONTROL,
391 		       (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
392 					  D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
393 		WREG32(mmVGA_RENDER_CONTROL,
394 		       (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
395 	}
396 	WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
397 
398 	r = amdgpu_read_bios(adev);
399 
400 	/* restore regs */
401 	WREG32(mmBUS_CNTL, bus_cntl);
402 	if (adev->mode_info.num_crtc) {
403 		WREG32(mmD1VGA_CONTROL, d1vga_control);
404 		WREG32(mmD2VGA_CONTROL, d2vga_control);
405 		WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
406 	}
407 	WREG32_SMC(ixROM_CNTL, rom_cntl);
408 	return r;
409 }
410 
411 static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
412 				  u8 *bios, u32 length_bytes)
413 {
414 	u32 *dw_ptr;
415 	unsigned long flags;
416 	u32 i, length_dw;
417 
418 	if (bios == NULL)
419 		return false;
420 	if (length_bytes == 0)
421 		return false;
422 	/* APU vbios image is part of sbios image */
423 	if (adev->flags & AMD_IS_APU)
424 		return false;
425 
426 	dw_ptr = (u32 *)bios;
427 	length_dw = ALIGN(length_bytes, 4) / 4;
428 	/* take the smc lock since we are using the smc index */
429 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
430 	/* set rom index to 0 */
431 	WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX);
432 	WREG32(mmSMC_IND_DATA_11, 0);
433 	/* set index to data for continous read */
434 	WREG32(mmSMC_IND_INDEX_11, ixROM_DATA);
435 	for (i = 0; i < length_dw; i++)
436 		dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);
437 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
438 
439 	return true;
440 }
441 
442 static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
443 {
444 	uint32_t reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
445 	/* bit0: 0 means pf and 1 means vf */
446 	/* bit31: 0 means disable IOV and 1 means enable */
447 	if (reg & 1)
448 		adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_IS_VF;
449 
450 	if (reg & 0x80000000)
451 		adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
452 
453 	if (reg == 0) {
454 		if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
455 			adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
456 	}
457 }
458 
459 static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
460 	{mmGB_MACROTILE_MODE7, true},
461 };
462 
463 static const struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = {
464 	{mmGB_TILE_MODE7, true},
465 	{mmGB_TILE_MODE12, true},
466 	{mmGB_TILE_MODE17, true},
467 	{mmGB_TILE_MODE23, true},
468 	{mmGB_MACROTILE_MODE7, true},
469 };
470 
471 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
472 	{mmGRBM_STATUS, false},
473 	{mmGRBM_STATUS2, false},
474 	{mmGRBM_STATUS_SE0, false},
475 	{mmGRBM_STATUS_SE1, false},
476 	{mmGRBM_STATUS_SE2, false},
477 	{mmGRBM_STATUS_SE3, false},
478 	{mmSRBM_STATUS, false},
479 	{mmSRBM_STATUS2, false},
480 	{mmSRBM_STATUS3, false},
481 	{mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET, false},
482 	{mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET, false},
483 	{mmCP_STAT, false},
484 	{mmCP_STALLED_STAT1, false},
485 	{mmCP_STALLED_STAT2, false},
486 	{mmCP_STALLED_STAT3, false},
487 	{mmCP_CPF_BUSY_STAT, false},
488 	{mmCP_CPF_STALLED_STAT1, false},
489 	{mmCP_CPF_STATUS, false},
490 	{mmCP_CPC_BUSY_STAT, false},
491 	{mmCP_CPC_STALLED_STAT1, false},
492 	{mmCP_CPC_STATUS, false},
493 	{mmGB_ADDR_CONFIG, false},
494 	{mmMC_ARB_RAMCFG, false},
495 	{mmGB_TILE_MODE0, false},
496 	{mmGB_TILE_MODE1, false},
497 	{mmGB_TILE_MODE2, false},
498 	{mmGB_TILE_MODE3, false},
499 	{mmGB_TILE_MODE4, false},
500 	{mmGB_TILE_MODE5, false},
501 	{mmGB_TILE_MODE6, false},
502 	{mmGB_TILE_MODE7, false},
503 	{mmGB_TILE_MODE8, false},
504 	{mmGB_TILE_MODE9, false},
505 	{mmGB_TILE_MODE10, false},
506 	{mmGB_TILE_MODE11, false},
507 	{mmGB_TILE_MODE12, false},
508 	{mmGB_TILE_MODE13, false},
509 	{mmGB_TILE_MODE14, false},
510 	{mmGB_TILE_MODE15, false},
511 	{mmGB_TILE_MODE16, false},
512 	{mmGB_TILE_MODE17, false},
513 	{mmGB_TILE_MODE18, false},
514 	{mmGB_TILE_MODE19, false},
515 	{mmGB_TILE_MODE20, false},
516 	{mmGB_TILE_MODE21, false},
517 	{mmGB_TILE_MODE22, false},
518 	{mmGB_TILE_MODE23, false},
519 	{mmGB_TILE_MODE24, false},
520 	{mmGB_TILE_MODE25, false},
521 	{mmGB_TILE_MODE26, false},
522 	{mmGB_TILE_MODE27, false},
523 	{mmGB_TILE_MODE28, false},
524 	{mmGB_TILE_MODE29, false},
525 	{mmGB_TILE_MODE30, false},
526 	{mmGB_TILE_MODE31, false},
527 	{mmGB_MACROTILE_MODE0, false},
528 	{mmGB_MACROTILE_MODE1, false},
529 	{mmGB_MACROTILE_MODE2, false},
530 	{mmGB_MACROTILE_MODE3, false},
531 	{mmGB_MACROTILE_MODE4, false},
532 	{mmGB_MACROTILE_MODE5, false},
533 	{mmGB_MACROTILE_MODE6, false},
534 	{mmGB_MACROTILE_MODE7, false},
535 	{mmGB_MACROTILE_MODE8, false},
536 	{mmGB_MACROTILE_MODE9, false},
537 	{mmGB_MACROTILE_MODE10, false},
538 	{mmGB_MACROTILE_MODE11, false},
539 	{mmGB_MACROTILE_MODE12, false},
540 	{mmGB_MACROTILE_MODE13, false},
541 	{mmGB_MACROTILE_MODE14, false},
542 	{mmGB_MACROTILE_MODE15, false},
543 	{mmCC_RB_BACKEND_DISABLE, false, true},
544 	{mmGC_USER_RB_BACKEND_DISABLE, false, true},
545 	{mmGB_BACKEND_MAP, false, false},
546 	{mmPA_SC_RASTER_CONFIG, false, true},
547 	{mmPA_SC_RASTER_CONFIG_1, false, true},
548 };
549 
550 static uint32_t vi_get_register_value(struct amdgpu_device *adev,
551 				      bool indexed, u32 se_num,
552 				      u32 sh_num, u32 reg_offset)
553 {
554 	if (indexed) {
555 		uint32_t val;
556 		unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
557 		unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
558 
559 		switch (reg_offset) {
560 		case mmCC_RB_BACKEND_DISABLE:
561 			return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
562 		case mmGC_USER_RB_BACKEND_DISABLE:
563 			return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
564 		case mmPA_SC_RASTER_CONFIG:
565 			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
566 		case mmPA_SC_RASTER_CONFIG_1:
567 			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
568 		}
569 
570 		mutex_lock(&adev->grbm_idx_mutex);
571 		if (se_num != 0xffffffff || sh_num != 0xffffffff)
572 			amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
573 
574 		val = RREG32(reg_offset);
575 
576 		if (se_num != 0xffffffff || sh_num != 0xffffffff)
577 			amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
578 		mutex_unlock(&adev->grbm_idx_mutex);
579 		return val;
580 	} else {
581 		unsigned idx;
582 
583 		switch (reg_offset) {
584 		case mmGB_ADDR_CONFIG:
585 			return adev->gfx.config.gb_addr_config;
586 		case mmMC_ARB_RAMCFG:
587 			return adev->gfx.config.mc_arb_ramcfg;
588 		case mmGB_TILE_MODE0:
589 		case mmGB_TILE_MODE1:
590 		case mmGB_TILE_MODE2:
591 		case mmGB_TILE_MODE3:
592 		case mmGB_TILE_MODE4:
593 		case mmGB_TILE_MODE5:
594 		case mmGB_TILE_MODE6:
595 		case mmGB_TILE_MODE7:
596 		case mmGB_TILE_MODE8:
597 		case mmGB_TILE_MODE9:
598 		case mmGB_TILE_MODE10:
599 		case mmGB_TILE_MODE11:
600 		case mmGB_TILE_MODE12:
601 		case mmGB_TILE_MODE13:
602 		case mmGB_TILE_MODE14:
603 		case mmGB_TILE_MODE15:
604 		case mmGB_TILE_MODE16:
605 		case mmGB_TILE_MODE17:
606 		case mmGB_TILE_MODE18:
607 		case mmGB_TILE_MODE19:
608 		case mmGB_TILE_MODE20:
609 		case mmGB_TILE_MODE21:
610 		case mmGB_TILE_MODE22:
611 		case mmGB_TILE_MODE23:
612 		case mmGB_TILE_MODE24:
613 		case mmGB_TILE_MODE25:
614 		case mmGB_TILE_MODE26:
615 		case mmGB_TILE_MODE27:
616 		case mmGB_TILE_MODE28:
617 		case mmGB_TILE_MODE29:
618 		case mmGB_TILE_MODE30:
619 		case mmGB_TILE_MODE31:
620 			idx = (reg_offset - mmGB_TILE_MODE0);
621 			return adev->gfx.config.tile_mode_array[idx];
622 		case mmGB_MACROTILE_MODE0:
623 		case mmGB_MACROTILE_MODE1:
624 		case mmGB_MACROTILE_MODE2:
625 		case mmGB_MACROTILE_MODE3:
626 		case mmGB_MACROTILE_MODE4:
627 		case mmGB_MACROTILE_MODE5:
628 		case mmGB_MACROTILE_MODE6:
629 		case mmGB_MACROTILE_MODE7:
630 		case mmGB_MACROTILE_MODE8:
631 		case mmGB_MACROTILE_MODE9:
632 		case mmGB_MACROTILE_MODE10:
633 		case mmGB_MACROTILE_MODE11:
634 		case mmGB_MACROTILE_MODE12:
635 		case mmGB_MACROTILE_MODE13:
636 		case mmGB_MACROTILE_MODE14:
637 		case mmGB_MACROTILE_MODE15:
638 			idx = (reg_offset - mmGB_MACROTILE_MODE0);
639 			return adev->gfx.config.macrotile_mode_array[idx];
640 		default:
641 			return RREG32(reg_offset);
642 		}
643 	}
644 }
645 
646 static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
647 			    u32 sh_num, u32 reg_offset, u32 *value)
648 {
649 	const struct amdgpu_allowed_register_entry *asic_register_table = NULL;
650 	const struct amdgpu_allowed_register_entry *asic_register_entry;
651 	uint32_t size, i;
652 
653 	*value = 0;
654 	switch (adev->asic_type) {
655 	case CHIP_TOPAZ:
656 		asic_register_table = tonga_allowed_read_registers;
657 		size = ARRAY_SIZE(tonga_allowed_read_registers);
658 		break;
659 	case CHIP_FIJI:
660 	case CHIP_TONGA:
661 	case CHIP_POLARIS11:
662 	case CHIP_POLARIS10:
663 	case CHIP_POLARIS12:
664 	case CHIP_CARRIZO:
665 	case CHIP_STONEY:
666 		asic_register_table = cz_allowed_read_registers;
667 		size = ARRAY_SIZE(cz_allowed_read_registers);
668 		break;
669 	default:
670 		return -EINVAL;
671 	}
672 
673 	if (asic_register_table) {
674 		for (i = 0; i < size; i++) {
675 			asic_register_entry = asic_register_table + i;
676 			if (reg_offset != asic_register_entry->reg_offset)
677 				continue;
678 			if (!asic_register_entry->untouched)
679 				*value = vi_get_register_value(adev,
680 							       asic_register_entry->grbm_indexed,
681 							       se_num, sh_num, reg_offset);
682 			return 0;
683 		}
684 	}
685 
686 	for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
687 		if (reg_offset != vi_allowed_read_registers[i].reg_offset)
688 			continue;
689 
690 		if (!vi_allowed_read_registers[i].untouched)
691 			*value = vi_get_register_value(adev,
692 						       vi_allowed_read_registers[i].grbm_indexed,
693 						       se_num, sh_num, reg_offset);
694 		return 0;
695 	}
696 	return -EINVAL;
697 }
698 
699 static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
700 {
701 	u32 i;
702 
703 	dev_info(adev->dev, "GPU pci config reset\n");
704 
705 	/* disable BM */
706 	pci_clear_master(adev->pdev);
707 	/* reset */
708 	amdgpu_pci_config_reset(adev);
709 
710 	udelay(100);
711 
712 	/* wait for asic to come out of reset */
713 	for (i = 0; i < adev->usec_timeout; i++) {
714 		if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
715 			/* enable BM */
716 			pci_set_master(adev->pdev);
717 			return 0;
718 		}
719 		udelay(1);
720 	}
721 	return -EINVAL;
722 }
723 
724 /**
725  * vi_asic_reset - soft reset GPU
726  *
727  * @adev: amdgpu_device pointer
728  *
729  * Look up which blocks are hung and attempt
730  * to reset them.
731  * Returns 0 for success.
732  */
733 static int vi_asic_reset(struct amdgpu_device *adev)
734 {
735 	int r;
736 
737 	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
738 
739 	r = vi_gpu_pci_config_reset(adev);
740 
741 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
742 
743 	return r;
744 }
745 
746 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
747 			u32 cntl_reg, u32 status_reg)
748 {
749 	int r, i;
750 	struct atom_clock_dividers dividers;
751 	uint32_t tmp;
752 
753 	r = amdgpu_atombios_get_clock_dividers(adev,
754 					       COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
755 					       clock, false, &dividers);
756 	if (r)
757 		return r;
758 
759 	tmp = RREG32_SMC(cntl_reg);
760 	tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
761 		CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
762 	tmp |= dividers.post_divider;
763 	WREG32_SMC(cntl_reg, tmp);
764 
765 	for (i = 0; i < 100; i++) {
766 		if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK)
767 			break;
768 		mdelay(10);
769 	}
770 	if (i == 100)
771 		return -ETIMEDOUT;
772 
773 	return 0;
774 }
775 
776 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
777 {
778 	int r;
779 
780 	r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
781 	if (r)
782 		return r;
783 
784 	r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
785 
786 	return 0;
787 }
788 
789 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
790 {
791 	/* todo */
792 
793 	return 0;
794 }
795 
796 static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
797 {
798 	if (pci_is_root_bus(adev->pdev->bus))
799 		return;
800 
801 	if (amdgpu_pcie_gen2 == 0)
802 		return;
803 
804 	if (adev->flags & AMD_IS_APU)
805 		return;
806 
807 	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
808 					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
809 		return;
810 
811 	/* todo */
812 }
813 
814 static void vi_program_aspm(struct amdgpu_device *adev)
815 {
816 
817 	if (amdgpu_aspm == 0)
818 		return;
819 
820 	/* todo */
821 }
822 
823 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
824 					bool enable)
825 {
826 	u32 tmp;
827 
828 	/* not necessary on CZ */
829 	if (adev->flags & AMD_IS_APU)
830 		return;
831 
832 	tmp = RREG32(mmBIF_DOORBELL_APER_EN);
833 	if (enable)
834 		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
835 	else
836 		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
837 
838 	WREG32(mmBIF_DOORBELL_APER_EN, tmp);
839 }
840 
841 #define ATI_REV_ID_FUSE_MACRO__ADDRESS      0xC0014044
842 #define ATI_REV_ID_FUSE_MACRO__SHIFT        9
843 #define ATI_REV_ID_FUSE_MACRO__MASK         0x00001E00
844 
845 static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
846 {
847 	if (adev->flags & AMD_IS_APU)
848 		return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
849 			>> ATI_REV_ID_FUSE_MACRO__SHIFT;
850 	else
851 		return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
852 			>> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
853 }
854 
855 static const struct amdgpu_asic_funcs vi_asic_funcs =
856 {
857 	.read_disabled_bios = &vi_read_disabled_bios,
858 	.read_bios_from_rom = &vi_read_bios_from_rom,
859 	.detect_hw_virtualization = vi_detect_hw_virtualization,
860 	.read_register = &vi_read_register,
861 	.reset = &vi_asic_reset,
862 	.set_vga_state = &vi_vga_set_state,
863 	.get_xclk = &vi_get_xclk,
864 	.set_uvd_clocks = &vi_set_uvd_clocks,
865 	.set_vce_clocks = &vi_set_vce_clocks,
866 };
867 
868 static int vi_common_early_init(void *handle)
869 {
870 	bool smc_enabled = false;
871 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
872 
873 	if (adev->flags & AMD_IS_APU) {
874 		adev->smc_rreg = &cz_smc_rreg;
875 		adev->smc_wreg = &cz_smc_wreg;
876 	} else {
877 		adev->smc_rreg = &vi_smc_rreg;
878 		adev->smc_wreg = &vi_smc_wreg;
879 	}
880 	adev->pcie_rreg = &vi_pcie_rreg;
881 	adev->pcie_wreg = &vi_pcie_wreg;
882 	adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
883 	adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
884 	adev->didt_rreg = &vi_didt_rreg;
885 	adev->didt_wreg = &vi_didt_wreg;
886 	adev->gc_cac_rreg = &vi_gc_cac_rreg;
887 	adev->gc_cac_wreg = &vi_gc_cac_wreg;
888 
889 	adev->asic_funcs = &vi_asic_funcs;
890 
891 	if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) &&
892 		(amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC)))
893 		smc_enabled = true;
894 
895 	adev->rev_id = vi_get_rev_id(adev);
896 	adev->external_rev_id = 0xFF;
897 	switch (adev->asic_type) {
898 	case CHIP_TOPAZ:
899 		adev->cg_flags = 0;
900 		adev->pg_flags = 0;
901 		adev->external_rev_id = 0x1;
902 		break;
903 	case CHIP_FIJI:
904 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
905 			AMD_CG_SUPPORT_GFX_MGLS |
906 			AMD_CG_SUPPORT_GFX_RLC_LS |
907 			AMD_CG_SUPPORT_GFX_CP_LS |
908 			AMD_CG_SUPPORT_GFX_CGTS |
909 			AMD_CG_SUPPORT_GFX_CGTS_LS |
910 			AMD_CG_SUPPORT_GFX_CGCG |
911 			AMD_CG_SUPPORT_GFX_CGLS |
912 			AMD_CG_SUPPORT_SDMA_MGCG |
913 			AMD_CG_SUPPORT_SDMA_LS |
914 			AMD_CG_SUPPORT_BIF_LS |
915 			AMD_CG_SUPPORT_HDP_MGCG |
916 			AMD_CG_SUPPORT_HDP_LS |
917 			AMD_CG_SUPPORT_ROM_MGCG |
918 			AMD_CG_SUPPORT_MC_MGCG |
919 			AMD_CG_SUPPORT_MC_LS |
920 			AMD_CG_SUPPORT_UVD_MGCG;
921 		adev->pg_flags = 0;
922 		adev->external_rev_id = adev->rev_id + 0x3c;
923 		break;
924 	case CHIP_TONGA:
925 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
926 			AMD_CG_SUPPORT_GFX_CGCG |
927 			AMD_CG_SUPPORT_GFX_CGLS |
928 			AMD_CG_SUPPORT_SDMA_MGCG |
929 			AMD_CG_SUPPORT_SDMA_LS |
930 			AMD_CG_SUPPORT_BIF_LS |
931 			AMD_CG_SUPPORT_HDP_MGCG |
932 			AMD_CG_SUPPORT_HDP_LS |
933 			AMD_CG_SUPPORT_ROM_MGCG |
934 			AMD_CG_SUPPORT_MC_MGCG |
935 			AMD_CG_SUPPORT_MC_LS |
936 			AMD_CG_SUPPORT_DRM_LS |
937 			AMD_CG_SUPPORT_UVD_MGCG;
938 		adev->pg_flags = 0;
939 		adev->external_rev_id = adev->rev_id + 0x14;
940 		break;
941 	case CHIP_POLARIS11:
942 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
943 			AMD_CG_SUPPORT_GFX_RLC_LS |
944 			AMD_CG_SUPPORT_GFX_CP_LS |
945 			AMD_CG_SUPPORT_GFX_CGCG |
946 			AMD_CG_SUPPORT_GFX_CGLS |
947 			AMD_CG_SUPPORT_GFX_3D_CGCG |
948 			AMD_CG_SUPPORT_GFX_3D_CGLS |
949 			AMD_CG_SUPPORT_SDMA_MGCG |
950 			AMD_CG_SUPPORT_SDMA_LS |
951 			AMD_CG_SUPPORT_BIF_MGCG |
952 			AMD_CG_SUPPORT_BIF_LS |
953 			AMD_CG_SUPPORT_HDP_MGCG |
954 			AMD_CG_SUPPORT_HDP_LS |
955 			AMD_CG_SUPPORT_ROM_MGCG |
956 			AMD_CG_SUPPORT_MC_MGCG |
957 			AMD_CG_SUPPORT_MC_LS |
958 			AMD_CG_SUPPORT_DRM_LS |
959 			AMD_CG_SUPPORT_UVD_MGCG |
960 			AMD_CG_SUPPORT_VCE_MGCG;
961 		adev->pg_flags = 0;
962 		adev->external_rev_id = adev->rev_id + 0x5A;
963 		break;
964 	case CHIP_POLARIS10:
965 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
966 			AMD_CG_SUPPORT_GFX_RLC_LS |
967 			AMD_CG_SUPPORT_GFX_CP_LS |
968 			AMD_CG_SUPPORT_GFX_CGCG |
969 			AMD_CG_SUPPORT_GFX_CGLS |
970 			AMD_CG_SUPPORT_GFX_3D_CGCG |
971 			AMD_CG_SUPPORT_GFX_3D_CGLS |
972 			AMD_CG_SUPPORT_SDMA_MGCG |
973 			AMD_CG_SUPPORT_SDMA_LS |
974 			AMD_CG_SUPPORT_BIF_MGCG |
975 			AMD_CG_SUPPORT_BIF_LS |
976 			AMD_CG_SUPPORT_HDP_MGCG |
977 			AMD_CG_SUPPORT_HDP_LS |
978 			AMD_CG_SUPPORT_ROM_MGCG |
979 			AMD_CG_SUPPORT_MC_MGCG |
980 			AMD_CG_SUPPORT_MC_LS |
981 			AMD_CG_SUPPORT_DRM_LS |
982 			AMD_CG_SUPPORT_UVD_MGCG |
983 			AMD_CG_SUPPORT_VCE_MGCG;
984 		adev->pg_flags = 0;
985 		adev->external_rev_id = adev->rev_id + 0x50;
986 		break;
987 	case CHIP_POLARIS12:
988 		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG;
989 		adev->pg_flags = 0;
990 		adev->external_rev_id = adev->rev_id + 0x64;
991 		break;
992 	case CHIP_CARRIZO:
993 		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
994 			AMD_CG_SUPPORT_GFX_MGCG |
995 			AMD_CG_SUPPORT_GFX_MGLS |
996 			AMD_CG_SUPPORT_GFX_RLC_LS |
997 			AMD_CG_SUPPORT_GFX_CP_LS |
998 			AMD_CG_SUPPORT_GFX_CGTS |
999 			AMD_CG_SUPPORT_GFX_MGLS |
1000 			AMD_CG_SUPPORT_GFX_CGTS_LS |
1001 			AMD_CG_SUPPORT_GFX_CGCG |
1002 			AMD_CG_SUPPORT_GFX_CGLS |
1003 			AMD_CG_SUPPORT_BIF_LS |
1004 			AMD_CG_SUPPORT_HDP_MGCG |
1005 			AMD_CG_SUPPORT_HDP_LS |
1006 			AMD_CG_SUPPORT_SDMA_MGCG |
1007 			AMD_CG_SUPPORT_SDMA_LS |
1008 			AMD_CG_SUPPORT_VCE_MGCG;
1009 		/* rev0 hardware requires workarounds to support PG */
1010 		adev->pg_flags = 0;
1011 		if (adev->rev_id != 0x00) {
1012 			adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1013 				AMD_PG_SUPPORT_GFX_SMG |
1014 				AMD_PG_SUPPORT_GFX_PIPELINE |
1015 				AMD_PG_SUPPORT_CP |
1016 				AMD_PG_SUPPORT_UVD |
1017 				AMD_PG_SUPPORT_VCE;
1018 		}
1019 		adev->external_rev_id = adev->rev_id + 0x1;
1020 		break;
1021 	case CHIP_STONEY:
1022 		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1023 			AMD_CG_SUPPORT_GFX_MGCG |
1024 			AMD_CG_SUPPORT_GFX_MGLS |
1025 			AMD_CG_SUPPORT_GFX_RLC_LS |
1026 			AMD_CG_SUPPORT_GFX_CP_LS |
1027 			AMD_CG_SUPPORT_GFX_CGTS |
1028 			AMD_CG_SUPPORT_GFX_MGLS |
1029 			AMD_CG_SUPPORT_GFX_CGTS_LS |
1030 			AMD_CG_SUPPORT_GFX_CGCG |
1031 			AMD_CG_SUPPORT_GFX_CGLS |
1032 			AMD_CG_SUPPORT_BIF_LS |
1033 			AMD_CG_SUPPORT_HDP_MGCG |
1034 			AMD_CG_SUPPORT_HDP_LS |
1035 			AMD_CG_SUPPORT_SDMA_MGCG |
1036 			AMD_CG_SUPPORT_SDMA_LS |
1037 			AMD_CG_SUPPORT_VCE_MGCG;
1038 		adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
1039 			AMD_PG_SUPPORT_GFX_SMG |
1040 			AMD_PG_SUPPORT_GFX_PIPELINE |
1041 			AMD_PG_SUPPORT_CP |
1042 			AMD_PG_SUPPORT_UVD |
1043 			AMD_PG_SUPPORT_VCE;
1044 		adev->external_rev_id = adev->rev_id + 0x61;
1045 		break;
1046 	default:
1047 		/* FIXME: not supported yet */
1048 		return -EINVAL;
1049 	}
1050 
1051 	/* in early init stage, vbios code won't work */
1052 	if (adev->asic_funcs->detect_hw_virtualization)
1053 		amdgpu_asic_detect_hw_virtualization(adev);
1054 
1055 	if (amdgpu_smc_load_fw && smc_enabled)
1056 		adev->firmware.smu_load = true;
1057 
1058 	amdgpu_get_pcie_info(adev);
1059 
1060 	return 0;
1061 }
1062 
1063 static int vi_common_sw_init(void *handle)
1064 {
1065 	return 0;
1066 }
1067 
1068 static int vi_common_sw_fini(void *handle)
1069 {
1070 	return 0;
1071 }
1072 
1073 static int vi_common_hw_init(void *handle)
1074 {
1075 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1076 
1077 	/* move the golden regs per IP block */
1078 	vi_init_golden_registers(adev);
1079 	/* enable pcie gen2/3 link */
1080 	vi_pcie_gen3_enable(adev);
1081 	/* enable aspm */
1082 	vi_program_aspm(adev);
1083 	/* enable the doorbell aperture */
1084 	vi_enable_doorbell_aperture(adev, true);
1085 
1086 	return 0;
1087 }
1088 
1089 static int vi_common_hw_fini(void *handle)
1090 {
1091 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1092 
1093 	/* enable the doorbell aperture */
1094 	vi_enable_doorbell_aperture(adev, false);
1095 
1096 	return 0;
1097 }
1098 
1099 static int vi_common_suspend(void *handle)
1100 {
1101 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1102 
1103 	return vi_common_hw_fini(adev);
1104 }
1105 
1106 static int vi_common_resume(void *handle)
1107 {
1108 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1109 
1110 	return vi_common_hw_init(adev);
1111 }
1112 
1113 static bool vi_common_is_idle(void *handle)
1114 {
1115 	return true;
1116 }
1117 
1118 static int vi_common_wait_for_idle(void *handle)
1119 {
1120 	return 0;
1121 }
1122 
1123 static int vi_common_soft_reset(void *handle)
1124 {
1125 	return 0;
1126 }
1127 
1128 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
1129 						   bool enable)
1130 {
1131 	uint32_t temp, data;
1132 
1133 	temp = data = RREG32_PCIE(ixPCIE_CNTL2);
1134 
1135 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
1136 		data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1137 				PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1138 				PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
1139 	else
1140 		data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1141 				PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1142 				PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
1143 
1144 	if (temp != data)
1145 		WREG32_PCIE(ixPCIE_CNTL2, data);
1146 }
1147 
1148 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
1149 						    bool enable)
1150 {
1151 	uint32_t temp, data;
1152 
1153 	temp = data = RREG32(mmHDP_HOST_PATH_CNTL);
1154 
1155 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
1156 		data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1157 	else
1158 		data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1159 
1160 	if (temp != data)
1161 		WREG32(mmHDP_HOST_PATH_CNTL, data);
1162 }
1163 
1164 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev,
1165 				      bool enable)
1166 {
1167 	uint32_t temp, data;
1168 
1169 	temp = data = RREG32(mmHDP_MEM_POWER_LS);
1170 
1171 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1172 		data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1173 	else
1174 		data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1175 
1176 	if (temp != data)
1177 		WREG32(mmHDP_MEM_POWER_LS, data);
1178 }
1179 
1180 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1181 						    bool enable)
1182 {
1183 	uint32_t temp, data;
1184 
1185 	temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1186 
1187 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1188 		data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1189 				CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1190 	else
1191 		data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1192 				CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1193 
1194 	if (temp != data)
1195 		WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
1196 }
1197 
1198 static int vi_common_set_clockgating_state_by_smu(void *handle,
1199 					   enum amd_clockgating_state state)
1200 {
1201 	uint32_t msg_id, pp_state = 0;
1202 	uint32_t pp_support_state = 0;
1203 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1204 	void *pp_handle = adev->powerplay.pp_handle;
1205 
1206 	if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
1207 		if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
1208 			pp_support_state = AMD_CG_SUPPORT_MC_LS;
1209 			pp_state = PP_STATE_LS;
1210 		}
1211 		if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
1212 			pp_support_state |= AMD_CG_SUPPORT_MC_MGCG;
1213 			pp_state |= PP_STATE_CG;
1214 		}
1215 		if (state == AMD_CG_STATE_UNGATE)
1216 			pp_state = 0;
1217 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1218 			       PP_BLOCK_SYS_MC,
1219 			       pp_support_state,
1220 			       pp_state);
1221 		amd_set_clockgating_by_smu(pp_handle, msg_id);
1222 	}
1223 
1224 	if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
1225 		if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
1226 			pp_support_state = AMD_CG_SUPPORT_SDMA_LS;
1227 			pp_state = PP_STATE_LS;
1228 		}
1229 		if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
1230 			pp_support_state |= AMD_CG_SUPPORT_SDMA_MGCG;
1231 			pp_state |= PP_STATE_CG;
1232 		}
1233 		if (state == AMD_CG_STATE_UNGATE)
1234 			pp_state = 0;
1235 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1236 			       PP_BLOCK_SYS_SDMA,
1237 			       pp_support_state,
1238 			       pp_state);
1239 		amd_set_clockgating_by_smu(pp_handle, msg_id);
1240 	}
1241 
1242 	if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
1243 		if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
1244 			pp_support_state = AMD_CG_SUPPORT_HDP_LS;
1245 			pp_state = PP_STATE_LS;
1246 		}
1247 		if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
1248 			pp_support_state |= AMD_CG_SUPPORT_HDP_MGCG;
1249 			pp_state |= PP_STATE_CG;
1250 		}
1251 		if (state == AMD_CG_STATE_UNGATE)
1252 			pp_state = 0;
1253 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1254 			       PP_BLOCK_SYS_HDP,
1255 			       pp_support_state,
1256 			       pp_state);
1257 		amd_set_clockgating_by_smu(pp_handle, msg_id);
1258 	}
1259 
1260 
1261 	if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) {
1262 		if (state == AMD_CG_STATE_UNGATE)
1263 			pp_state = 0;
1264 		else
1265 			pp_state = PP_STATE_LS;
1266 
1267 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1268 			       PP_BLOCK_SYS_BIF,
1269 			       PP_STATE_SUPPORT_LS,
1270 			        pp_state);
1271 		amd_set_clockgating_by_smu(pp_handle, msg_id);
1272 	}
1273 	if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
1274 		if (state == AMD_CG_STATE_UNGATE)
1275 			pp_state = 0;
1276 		else
1277 			pp_state = PP_STATE_CG;
1278 
1279 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1280 			       PP_BLOCK_SYS_BIF,
1281 			       PP_STATE_SUPPORT_CG,
1282 			       pp_state);
1283 		amd_set_clockgating_by_smu(pp_handle, msg_id);
1284 	}
1285 
1286 	if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
1287 
1288 		if (state == AMD_CG_STATE_UNGATE)
1289 			pp_state = 0;
1290 		else
1291 			pp_state = PP_STATE_LS;
1292 
1293 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1294 			       PP_BLOCK_SYS_DRM,
1295 			       PP_STATE_SUPPORT_LS,
1296 			       pp_state);
1297 		amd_set_clockgating_by_smu(pp_handle, msg_id);
1298 	}
1299 
1300 	if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
1301 
1302 		if (state == AMD_CG_STATE_UNGATE)
1303 			pp_state = 0;
1304 		else
1305 			pp_state = PP_STATE_CG;
1306 
1307 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1308 			       PP_BLOCK_SYS_ROM,
1309 			       PP_STATE_SUPPORT_CG,
1310 			       pp_state);
1311 		amd_set_clockgating_by_smu(pp_handle, msg_id);
1312 	}
1313 	return 0;
1314 }
1315 
1316 static int vi_common_set_clockgating_state(void *handle,
1317 					   enum amd_clockgating_state state)
1318 {
1319 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1320 
1321 	switch (adev->asic_type) {
1322 	case CHIP_FIJI:
1323 		vi_update_bif_medium_grain_light_sleep(adev,
1324 				state == AMD_CG_STATE_GATE ? true : false);
1325 		vi_update_hdp_medium_grain_clock_gating(adev,
1326 				state == AMD_CG_STATE_GATE ? true : false);
1327 		vi_update_hdp_light_sleep(adev,
1328 				state == AMD_CG_STATE_GATE ? true : false);
1329 		vi_update_rom_medium_grain_clock_gating(adev,
1330 				state == AMD_CG_STATE_GATE ? true : false);
1331 		break;
1332 	case CHIP_CARRIZO:
1333 	case CHIP_STONEY:
1334 		vi_update_bif_medium_grain_light_sleep(adev,
1335 				state == AMD_CG_STATE_GATE ? true : false);
1336 		vi_update_hdp_medium_grain_clock_gating(adev,
1337 				state == AMD_CG_STATE_GATE ? true : false);
1338 		vi_update_hdp_light_sleep(adev,
1339 				state == AMD_CG_STATE_GATE ? true : false);
1340 		break;
1341 	case CHIP_TONGA:
1342 	case CHIP_POLARIS10:
1343 	case CHIP_POLARIS11:
1344 	case CHIP_POLARIS12:
1345 		vi_common_set_clockgating_state_by_smu(adev, state);
1346 	default:
1347 		break;
1348 	}
1349 	return 0;
1350 }
1351 
1352 static int vi_common_set_powergating_state(void *handle,
1353 					    enum amd_powergating_state state)
1354 {
1355 	return 0;
1356 }
1357 
1358 static const struct amd_ip_funcs vi_common_ip_funcs = {
1359 	.name = "vi_common",
1360 	.early_init = vi_common_early_init,
1361 	.late_init = NULL,
1362 	.sw_init = vi_common_sw_init,
1363 	.sw_fini = vi_common_sw_fini,
1364 	.hw_init = vi_common_hw_init,
1365 	.hw_fini = vi_common_hw_fini,
1366 	.suspend = vi_common_suspend,
1367 	.resume = vi_common_resume,
1368 	.is_idle = vi_common_is_idle,
1369 	.wait_for_idle = vi_common_wait_for_idle,
1370 	.soft_reset = vi_common_soft_reset,
1371 	.set_clockgating_state = vi_common_set_clockgating_state,
1372 	.set_powergating_state = vi_common_set_powergating_state,
1373 };
1374 
1375 static const struct amdgpu_ip_block_version vi_common_ip_block =
1376 {
1377 	.type = AMD_IP_BLOCK_TYPE_COMMON,
1378 	.major = 1,
1379 	.minor = 0,
1380 	.rev = 0,
1381 	.funcs = &vi_common_ip_funcs,
1382 };
1383 
1384 int vi_set_ip_blocks(struct amdgpu_device *adev)
1385 {
1386 	switch (adev->asic_type) {
1387 	case CHIP_TOPAZ:
1388 		/* topaz has no DCE, UVD, VCE */
1389 		amdgpu_ip_block_add(adev, &vi_common_ip_block);
1390 		amdgpu_ip_block_add(adev, &gmc_v7_4_ip_block);
1391 		amdgpu_ip_block_add(adev, &iceland_ih_ip_block);
1392 		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
1393 		if (adev->enable_virtual_display)
1394 			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
1395 		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
1396 		amdgpu_ip_block_add(adev, &sdma_v2_4_ip_block);
1397 		break;
1398 	case CHIP_FIJI:
1399 		amdgpu_ip_block_add(adev, &vi_common_ip_block);
1400 		amdgpu_ip_block_add(adev, &gmc_v8_5_ip_block);
1401 		amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
1402 		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
1403 		if (adev->enable_virtual_display)
1404 			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
1405 		else
1406 			amdgpu_ip_block_add(adev, &dce_v10_1_ip_block);
1407 		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
1408 		amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
1409 		amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
1410 		amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
1411 		break;
1412 	case CHIP_TONGA:
1413 		amdgpu_ip_block_add(adev, &vi_common_ip_block);
1414 		amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
1415 		amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
1416 		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
1417 		if (adev->enable_virtual_display)
1418 			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
1419 		else
1420 			amdgpu_ip_block_add(adev, &dce_v10_0_ip_block);
1421 		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
1422 		amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
1423 		amdgpu_ip_block_add(adev, &uvd_v5_0_ip_block);
1424 		amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
1425 		break;
1426 	case CHIP_POLARIS11:
1427 	case CHIP_POLARIS10:
1428 	case CHIP_POLARIS12:
1429 		amdgpu_ip_block_add(adev, &vi_common_ip_block);
1430 		amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block);
1431 		amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
1432 		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
1433 		if (adev->enable_virtual_display)
1434 			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
1435 		else
1436 			amdgpu_ip_block_add(adev, &dce_v11_2_ip_block);
1437 		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
1438 		amdgpu_ip_block_add(adev, &sdma_v3_1_ip_block);
1439 		amdgpu_ip_block_add(adev, &uvd_v6_3_ip_block);
1440 		amdgpu_ip_block_add(adev, &vce_v3_4_ip_block);
1441 		break;
1442 	case CHIP_CARRIZO:
1443 		amdgpu_ip_block_add(adev, &vi_common_ip_block);
1444 		amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
1445 		amdgpu_ip_block_add(adev, &cz_ih_ip_block);
1446 		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
1447 		if (adev->enable_virtual_display)
1448 			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
1449 		else
1450 			amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
1451 		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
1452 		amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
1453 		amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
1454 		amdgpu_ip_block_add(adev, &vce_v3_1_ip_block);
1455 #if defined(CONFIG_DRM_AMD_ACP)
1456 		amdgpu_ip_block_add(adev, &acp_ip_block);
1457 #endif
1458 		break;
1459 	case CHIP_STONEY:
1460 		amdgpu_ip_block_add(adev, &vi_common_ip_block);
1461 		amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
1462 		amdgpu_ip_block_add(adev, &cz_ih_ip_block);
1463 		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
1464 		if (adev->enable_virtual_display)
1465 			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
1466 		else
1467 			amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
1468 		amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block);
1469 		amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
1470 		amdgpu_ip_block_add(adev, &uvd_v6_2_ip_block);
1471 		amdgpu_ip_block_add(adev, &vce_v3_4_ip_block);
1472 #if defined(CONFIG_DRM_AMD_ACP)
1473 		amdgpu_ip_block_add(adev, &acp_ip_block);
1474 #endif
1475 		break;
1476 	default:
1477 		/* FIXME: not supported yet */
1478 		return -EINVAL;
1479 	}
1480 
1481 	return 0;
1482 }
1483