xref: /linux/drivers/gpu/drm/amd/amdgpu/vi.c (revision e08a1d97d33e2ac05cd368b955f9fdc2823f15fd)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include "drmP.h"
27 #include "amdgpu.h"
28 #include "amdgpu_atombios.h"
29 #include "amdgpu_ih.h"
30 #include "amdgpu_uvd.h"
31 #include "amdgpu_vce.h"
32 #include "amdgpu_ucode.h"
33 #include "atom.h"
34 #include "amd_pcie.h"
35 
36 #include "gmc/gmc_8_1_d.h"
37 #include "gmc/gmc_8_1_sh_mask.h"
38 
39 #include "oss/oss_3_0_d.h"
40 #include "oss/oss_3_0_sh_mask.h"
41 
42 #include "bif/bif_5_0_d.h"
43 #include "bif/bif_5_0_sh_mask.h"
44 
45 #include "gca/gfx_8_0_d.h"
46 #include "gca/gfx_8_0_sh_mask.h"
47 
48 #include "smu/smu_7_1_1_d.h"
49 #include "smu/smu_7_1_1_sh_mask.h"
50 
51 #include "uvd/uvd_5_0_d.h"
52 #include "uvd/uvd_5_0_sh_mask.h"
53 
54 #include "vce/vce_3_0_d.h"
55 #include "vce/vce_3_0_sh_mask.h"
56 
57 #include "dce/dce_10_0_d.h"
58 #include "dce/dce_10_0_sh_mask.h"
59 
60 #include "vid.h"
61 #include "vi.h"
62 #include "vi_dpm.h"
63 #include "gmc_v8_0.h"
64 #include "gmc_v7_0.h"
65 #include "gfx_v8_0.h"
66 #include "sdma_v2_4.h"
67 #include "sdma_v3_0.h"
68 #include "dce_v10_0.h"
69 #include "dce_v11_0.h"
70 #include "iceland_ih.h"
71 #include "tonga_ih.h"
72 #include "cz_ih.h"
73 #include "uvd_v5_0.h"
74 #include "uvd_v6_0.h"
75 #include "vce_v3_0.h"
76 #include "amdgpu_powerplay.h"
77 #if defined(CONFIG_DRM_AMD_ACP)
78 #include "amdgpu_acp.h"
79 #endif
80 #include "dce_virtual.h"
81 
82 MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
83 MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
84 MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
85 MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
86 MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
87 MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
88 MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
89 
90 /*
91  * Indirect registers accessor
92  */
93 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
94 {
95 	unsigned long flags;
96 	u32 r;
97 
98 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
99 	WREG32(mmPCIE_INDEX, reg);
100 	(void)RREG32(mmPCIE_INDEX);
101 	r = RREG32(mmPCIE_DATA);
102 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
103 	return r;
104 }
105 
106 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
107 {
108 	unsigned long flags;
109 
110 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
111 	WREG32(mmPCIE_INDEX, reg);
112 	(void)RREG32(mmPCIE_INDEX);
113 	WREG32(mmPCIE_DATA, v);
114 	(void)RREG32(mmPCIE_DATA);
115 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
116 }
117 
118 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
119 {
120 	unsigned long flags;
121 	u32 r;
122 
123 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
124 	WREG32(mmSMC_IND_INDEX_11, (reg));
125 	r = RREG32(mmSMC_IND_DATA_11);
126 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
127 	return r;
128 }
129 
130 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
131 {
132 	unsigned long flags;
133 
134 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
135 	WREG32(mmSMC_IND_INDEX_11, (reg));
136 	WREG32(mmSMC_IND_DATA_11, (v));
137 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
138 }
139 
140 /* smu_8_0_d.h */
141 #define mmMP0PUB_IND_INDEX                                                      0x180
142 #define mmMP0PUB_IND_DATA                                                       0x181
143 
144 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
145 {
146 	unsigned long flags;
147 	u32 r;
148 
149 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
150 	WREG32(mmMP0PUB_IND_INDEX, (reg));
151 	r = RREG32(mmMP0PUB_IND_DATA);
152 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
153 	return r;
154 }
155 
156 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
157 {
158 	unsigned long flags;
159 
160 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
161 	WREG32(mmMP0PUB_IND_INDEX, (reg));
162 	WREG32(mmMP0PUB_IND_DATA, (v));
163 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
164 }
165 
166 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
167 {
168 	unsigned long flags;
169 	u32 r;
170 
171 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
172 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
173 	r = RREG32(mmUVD_CTX_DATA);
174 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
175 	return r;
176 }
177 
178 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
179 {
180 	unsigned long flags;
181 
182 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
183 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
184 	WREG32(mmUVD_CTX_DATA, (v));
185 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
186 }
187 
188 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
189 {
190 	unsigned long flags;
191 	u32 r;
192 
193 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
194 	WREG32(mmDIDT_IND_INDEX, (reg));
195 	r = RREG32(mmDIDT_IND_DATA);
196 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
197 	return r;
198 }
199 
200 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
201 {
202 	unsigned long flags;
203 
204 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
205 	WREG32(mmDIDT_IND_INDEX, (reg));
206 	WREG32(mmDIDT_IND_DATA, (v));
207 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
208 }
209 
210 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
211 {
212 	unsigned long flags;
213 	u32 r;
214 
215 	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
216 	WREG32(mmGC_CAC_IND_INDEX, (reg));
217 	r = RREG32(mmGC_CAC_IND_DATA);
218 	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
219 	return r;
220 }
221 
222 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
223 {
224 	unsigned long flags;
225 
226 	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
227 	WREG32(mmGC_CAC_IND_INDEX, (reg));
228 	WREG32(mmGC_CAC_IND_DATA, (v));
229 	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
230 }
231 
232 
233 static const u32 tonga_mgcg_cgcg_init[] =
234 {
235 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
236 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
237 	mmPCIE_DATA, 0x000f0000, 0x00000000,
238 	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
239 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
240 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
241 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
242 };
243 
244 static const u32 fiji_mgcg_cgcg_init[] =
245 {
246 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
247 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
248 	mmPCIE_DATA, 0x000f0000, 0x00000000,
249 	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
250 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
251 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
252 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
253 };
254 
255 static const u32 iceland_mgcg_cgcg_init[] =
256 {
257 	mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
258 	mmPCIE_DATA, 0x000f0000, 0x00000000,
259 	mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
260 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
261 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
262 };
263 
264 static const u32 cz_mgcg_cgcg_init[] =
265 {
266 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
267 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
268 	mmPCIE_DATA, 0x000f0000, 0x00000000,
269 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
270 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
271 };
272 
273 static const u32 stoney_mgcg_cgcg_init[] =
274 {
275 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100,
276 	mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104,
277 	mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027,
278 };
279 
280 static void vi_init_golden_registers(struct amdgpu_device *adev)
281 {
282 	/* Some of the registers might be dependent on GRBM_GFX_INDEX */
283 	mutex_lock(&adev->grbm_idx_mutex);
284 
285 	switch (adev->asic_type) {
286 	case CHIP_TOPAZ:
287 		amdgpu_program_register_sequence(adev,
288 						 iceland_mgcg_cgcg_init,
289 						 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
290 		break;
291 	case CHIP_FIJI:
292 		amdgpu_program_register_sequence(adev,
293 						 fiji_mgcg_cgcg_init,
294 						 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
295 		break;
296 	case CHIP_TONGA:
297 		amdgpu_program_register_sequence(adev,
298 						 tonga_mgcg_cgcg_init,
299 						 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
300 		break;
301 	case CHIP_CARRIZO:
302 		amdgpu_program_register_sequence(adev,
303 						 cz_mgcg_cgcg_init,
304 						 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
305 		break;
306 	case CHIP_STONEY:
307 		amdgpu_program_register_sequence(adev,
308 						 stoney_mgcg_cgcg_init,
309 						 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
310 		break;
311 	case CHIP_POLARIS11:
312 	case CHIP_POLARIS10:
313 	default:
314 		break;
315 	}
316 	mutex_unlock(&adev->grbm_idx_mutex);
317 }
318 
319 /**
320  * vi_get_xclk - get the xclk
321  *
322  * @adev: amdgpu_device pointer
323  *
324  * Returns the reference clock used by the gfx engine
325  * (VI).
326  */
327 static u32 vi_get_xclk(struct amdgpu_device *adev)
328 {
329 	u32 reference_clock = adev->clock.spll.reference_freq;
330 	u32 tmp;
331 
332 	if (adev->flags & AMD_IS_APU)
333 		return reference_clock;
334 
335 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
336 	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
337 		return 1000;
338 
339 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
340 	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
341 		return reference_clock / 4;
342 
343 	return reference_clock;
344 }
345 
346 /**
347  * vi_srbm_select - select specific register instances
348  *
349  * @adev: amdgpu_device pointer
350  * @me: selected ME (micro engine)
351  * @pipe: pipe
352  * @queue: queue
353  * @vmid: VMID
354  *
355  * Switches the currently active registers instances.  Some
356  * registers are instanced per VMID, others are instanced per
357  * me/pipe/queue combination.
358  */
359 void vi_srbm_select(struct amdgpu_device *adev,
360 		     u32 me, u32 pipe, u32 queue, u32 vmid)
361 {
362 	u32 srbm_gfx_cntl = 0;
363 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
364 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
365 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
366 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
367 	WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
368 }
369 
370 static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
371 {
372 	/* todo */
373 }
374 
375 static bool vi_read_disabled_bios(struct amdgpu_device *adev)
376 {
377 	u32 bus_cntl;
378 	u32 d1vga_control = 0;
379 	u32 d2vga_control = 0;
380 	u32 vga_render_control = 0;
381 	u32 rom_cntl;
382 	bool r;
383 
384 	bus_cntl = RREG32(mmBUS_CNTL);
385 	if (adev->mode_info.num_crtc) {
386 		d1vga_control = RREG32(mmD1VGA_CONTROL);
387 		d2vga_control = RREG32(mmD2VGA_CONTROL);
388 		vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
389 	}
390 	rom_cntl = RREG32_SMC(ixROM_CNTL);
391 
392 	/* enable the rom */
393 	WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
394 	if (adev->mode_info.num_crtc) {
395 		/* Disable VGA mode */
396 		WREG32(mmD1VGA_CONTROL,
397 		       (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
398 					  D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
399 		WREG32(mmD2VGA_CONTROL,
400 		       (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
401 					  D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
402 		WREG32(mmVGA_RENDER_CONTROL,
403 		       (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
404 	}
405 	WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
406 
407 	r = amdgpu_read_bios(adev);
408 
409 	/* restore regs */
410 	WREG32(mmBUS_CNTL, bus_cntl);
411 	if (adev->mode_info.num_crtc) {
412 		WREG32(mmD1VGA_CONTROL, d1vga_control);
413 		WREG32(mmD2VGA_CONTROL, d2vga_control);
414 		WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
415 	}
416 	WREG32_SMC(ixROM_CNTL, rom_cntl);
417 	return r;
418 }
419 
420 static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
421 				  u8 *bios, u32 length_bytes)
422 {
423 	u32 *dw_ptr;
424 	unsigned long flags;
425 	u32 i, length_dw;
426 
427 	if (bios == NULL)
428 		return false;
429 	if (length_bytes == 0)
430 		return false;
431 	/* APU vbios image is part of sbios image */
432 	if (adev->flags & AMD_IS_APU)
433 		return false;
434 
435 	dw_ptr = (u32 *)bios;
436 	length_dw = ALIGN(length_bytes, 4) / 4;
437 	/* take the smc lock since we are using the smc index */
438 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
439 	/* set rom index to 0 */
440 	WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX);
441 	WREG32(mmSMC_IND_DATA_11, 0);
442 	/* set index to data for continous read */
443 	WREG32(mmSMC_IND_INDEX_11, ixROM_DATA);
444 	for (i = 0; i < length_dw; i++)
445 		dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);
446 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
447 
448 	return true;
449 }
450 
451 static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
452 {
453 	uint32_t reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
454 	/* bit0: 0 means pf and 1 means vf */
455 	/* bit31: 0 means disable IOV and 1 means enable */
456 	if (reg & 1)
457 		adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_IS_VF;
458 
459 	if (reg & 0x80000000)
460 		adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
461 
462 	if (reg == 0) {
463 		if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
464 			adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
465 	}
466 }
467 
468 static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
469 	{mmGB_MACROTILE_MODE7, true},
470 };
471 
472 static const struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = {
473 	{mmGB_TILE_MODE7, true},
474 	{mmGB_TILE_MODE12, true},
475 	{mmGB_TILE_MODE17, true},
476 	{mmGB_TILE_MODE23, true},
477 	{mmGB_MACROTILE_MODE7, true},
478 };
479 
480 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
481 	{mmGRBM_STATUS, false},
482 	{mmGRBM_STATUS2, false},
483 	{mmGRBM_STATUS_SE0, false},
484 	{mmGRBM_STATUS_SE1, false},
485 	{mmGRBM_STATUS_SE2, false},
486 	{mmGRBM_STATUS_SE3, false},
487 	{mmSRBM_STATUS, false},
488 	{mmSRBM_STATUS2, false},
489 	{mmSRBM_STATUS3, false},
490 	{mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET, false},
491 	{mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET, false},
492 	{mmCP_STAT, false},
493 	{mmCP_STALLED_STAT1, false},
494 	{mmCP_STALLED_STAT2, false},
495 	{mmCP_STALLED_STAT3, false},
496 	{mmCP_CPF_BUSY_STAT, false},
497 	{mmCP_CPF_STALLED_STAT1, false},
498 	{mmCP_CPF_STATUS, false},
499 	{mmCP_CPC_BUSY_STAT, false},
500 	{mmCP_CPC_STALLED_STAT1, false},
501 	{mmCP_CPC_STATUS, false},
502 	{mmGB_ADDR_CONFIG, false},
503 	{mmMC_ARB_RAMCFG, false},
504 	{mmGB_TILE_MODE0, false},
505 	{mmGB_TILE_MODE1, false},
506 	{mmGB_TILE_MODE2, false},
507 	{mmGB_TILE_MODE3, false},
508 	{mmGB_TILE_MODE4, false},
509 	{mmGB_TILE_MODE5, false},
510 	{mmGB_TILE_MODE6, false},
511 	{mmGB_TILE_MODE7, false},
512 	{mmGB_TILE_MODE8, false},
513 	{mmGB_TILE_MODE9, false},
514 	{mmGB_TILE_MODE10, false},
515 	{mmGB_TILE_MODE11, false},
516 	{mmGB_TILE_MODE12, false},
517 	{mmGB_TILE_MODE13, false},
518 	{mmGB_TILE_MODE14, false},
519 	{mmGB_TILE_MODE15, false},
520 	{mmGB_TILE_MODE16, false},
521 	{mmGB_TILE_MODE17, false},
522 	{mmGB_TILE_MODE18, false},
523 	{mmGB_TILE_MODE19, false},
524 	{mmGB_TILE_MODE20, false},
525 	{mmGB_TILE_MODE21, false},
526 	{mmGB_TILE_MODE22, false},
527 	{mmGB_TILE_MODE23, false},
528 	{mmGB_TILE_MODE24, false},
529 	{mmGB_TILE_MODE25, false},
530 	{mmGB_TILE_MODE26, false},
531 	{mmGB_TILE_MODE27, false},
532 	{mmGB_TILE_MODE28, false},
533 	{mmGB_TILE_MODE29, false},
534 	{mmGB_TILE_MODE30, false},
535 	{mmGB_TILE_MODE31, false},
536 	{mmGB_MACROTILE_MODE0, false},
537 	{mmGB_MACROTILE_MODE1, false},
538 	{mmGB_MACROTILE_MODE2, false},
539 	{mmGB_MACROTILE_MODE3, false},
540 	{mmGB_MACROTILE_MODE4, false},
541 	{mmGB_MACROTILE_MODE5, false},
542 	{mmGB_MACROTILE_MODE6, false},
543 	{mmGB_MACROTILE_MODE7, false},
544 	{mmGB_MACROTILE_MODE8, false},
545 	{mmGB_MACROTILE_MODE9, false},
546 	{mmGB_MACROTILE_MODE10, false},
547 	{mmGB_MACROTILE_MODE11, false},
548 	{mmGB_MACROTILE_MODE12, false},
549 	{mmGB_MACROTILE_MODE13, false},
550 	{mmGB_MACROTILE_MODE14, false},
551 	{mmGB_MACROTILE_MODE15, false},
552 	{mmCC_RB_BACKEND_DISABLE, false, true},
553 	{mmGC_USER_RB_BACKEND_DISABLE, false, true},
554 	{mmGB_BACKEND_MAP, false, false},
555 	{mmPA_SC_RASTER_CONFIG, false, true},
556 	{mmPA_SC_RASTER_CONFIG_1, false, true},
557 };
558 
559 static uint32_t vi_get_register_value(struct amdgpu_device *adev,
560 				      bool indexed, u32 se_num,
561 				      u32 sh_num, u32 reg_offset)
562 {
563 	if (indexed) {
564 		uint32_t val;
565 		unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
566 		unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
567 
568 		switch (reg_offset) {
569 		case mmCC_RB_BACKEND_DISABLE:
570 			return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
571 		case mmGC_USER_RB_BACKEND_DISABLE:
572 			return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
573 		case mmPA_SC_RASTER_CONFIG:
574 			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
575 		case mmPA_SC_RASTER_CONFIG_1:
576 			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
577 		}
578 
579 		mutex_lock(&adev->grbm_idx_mutex);
580 		if (se_num != 0xffffffff || sh_num != 0xffffffff)
581 			amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
582 
583 		val = RREG32(reg_offset);
584 
585 		if (se_num != 0xffffffff || sh_num != 0xffffffff)
586 			amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
587 		mutex_unlock(&adev->grbm_idx_mutex);
588 		return val;
589 	} else {
590 		unsigned idx;
591 
592 		switch (reg_offset) {
593 		case mmGB_ADDR_CONFIG:
594 			return adev->gfx.config.gb_addr_config;
595 		case mmMC_ARB_RAMCFG:
596 			return adev->gfx.config.mc_arb_ramcfg;
597 		case mmGB_TILE_MODE0:
598 		case mmGB_TILE_MODE1:
599 		case mmGB_TILE_MODE2:
600 		case mmGB_TILE_MODE3:
601 		case mmGB_TILE_MODE4:
602 		case mmGB_TILE_MODE5:
603 		case mmGB_TILE_MODE6:
604 		case mmGB_TILE_MODE7:
605 		case mmGB_TILE_MODE8:
606 		case mmGB_TILE_MODE9:
607 		case mmGB_TILE_MODE10:
608 		case mmGB_TILE_MODE11:
609 		case mmGB_TILE_MODE12:
610 		case mmGB_TILE_MODE13:
611 		case mmGB_TILE_MODE14:
612 		case mmGB_TILE_MODE15:
613 		case mmGB_TILE_MODE16:
614 		case mmGB_TILE_MODE17:
615 		case mmGB_TILE_MODE18:
616 		case mmGB_TILE_MODE19:
617 		case mmGB_TILE_MODE20:
618 		case mmGB_TILE_MODE21:
619 		case mmGB_TILE_MODE22:
620 		case mmGB_TILE_MODE23:
621 		case mmGB_TILE_MODE24:
622 		case mmGB_TILE_MODE25:
623 		case mmGB_TILE_MODE26:
624 		case mmGB_TILE_MODE27:
625 		case mmGB_TILE_MODE28:
626 		case mmGB_TILE_MODE29:
627 		case mmGB_TILE_MODE30:
628 		case mmGB_TILE_MODE31:
629 			idx = (reg_offset - mmGB_TILE_MODE0);
630 			return adev->gfx.config.tile_mode_array[idx];
631 		case mmGB_MACROTILE_MODE0:
632 		case mmGB_MACROTILE_MODE1:
633 		case mmGB_MACROTILE_MODE2:
634 		case mmGB_MACROTILE_MODE3:
635 		case mmGB_MACROTILE_MODE4:
636 		case mmGB_MACROTILE_MODE5:
637 		case mmGB_MACROTILE_MODE6:
638 		case mmGB_MACROTILE_MODE7:
639 		case mmGB_MACROTILE_MODE8:
640 		case mmGB_MACROTILE_MODE9:
641 		case mmGB_MACROTILE_MODE10:
642 		case mmGB_MACROTILE_MODE11:
643 		case mmGB_MACROTILE_MODE12:
644 		case mmGB_MACROTILE_MODE13:
645 		case mmGB_MACROTILE_MODE14:
646 		case mmGB_MACROTILE_MODE15:
647 			idx = (reg_offset - mmGB_MACROTILE_MODE0);
648 			return adev->gfx.config.macrotile_mode_array[idx];
649 		default:
650 			return RREG32(reg_offset);
651 		}
652 	}
653 }
654 
655 static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
656 			    u32 sh_num, u32 reg_offset, u32 *value)
657 {
658 	const struct amdgpu_allowed_register_entry *asic_register_table = NULL;
659 	const struct amdgpu_allowed_register_entry *asic_register_entry;
660 	uint32_t size, i;
661 
662 	*value = 0;
663 	switch (adev->asic_type) {
664 	case CHIP_TOPAZ:
665 		asic_register_table = tonga_allowed_read_registers;
666 		size = ARRAY_SIZE(tonga_allowed_read_registers);
667 		break;
668 	case CHIP_FIJI:
669 	case CHIP_TONGA:
670 	case CHIP_POLARIS11:
671 	case CHIP_POLARIS10:
672 	case CHIP_CARRIZO:
673 	case CHIP_STONEY:
674 		asic_register_table = cz_allowed_read_registers;
675 		size = ARRAY_SIZE(cz_allowed_read_registers);
676 		break;
677 	default:
678 		return -EINVAL;
679 	}
680 
681 	if (asic_register_table) {
682 		for (i = 0; i < size; i++) {
683 			asic_register_entry = asic_register_table + i;
684 			if (reg_offset != asic_register_entry->reg_offset)
685 				continue;
686 			if (!asic_register_entry->untouched)
687 				*value = vi_get_register_value(adev,
688 							       asic_register_entry->grbm_indexed,
689 							       se_num, sh_num, reg_offset);
690 			return 0;
691 		}
692 	}
693 
694 	for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
695 		if (reg_offset != vi_allowed_read_registers[i].reg_offset)
696 			continue;
697 
698 		if (!vi_allowed_read_registers[i].untouched)
699 			*value = vi_get_register_value(adev,
700 						       vi_allowed_read_registers[i].grbm_indexed,
701 						       se_num, sh_num, reg_offset);
702 		return 0;
703 	}
704 	return -EINVAL;
705 }
706 
707 static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
708 {
709 	u32 i;
710 
711 	dev_info(adev->dev, "GPU pci config reset\n");
712 
713 	/* disable BM */
714 	pci_clear_master(adev->pdev);
715 	/* reset */
716 	amdgpu_pci_config_reset(adev);
717 
718 	udelay(100);
719 
720 	/* wait for asic to come out of reset */
721 	for (i = 0; i < adev->usec_timeout; i++) {
722 		if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
723 			/* enable BM */
724 			pci_set_master(adev->pdev);
725 			return 0;
726 		}
727 		udelay(1);
728 	}
729 	return -EINVAL;
730 }
731 
732 /**
733  * vi_asic_reset - soft reset GPU
734  *
735  * @adev: amdgpu_device pointer
736  *
737  * Look up which blocks are hung and attempt
738  * to reset them.
739  * Returns 0 for success.
740  */
741 static int vi_asic_reset(struct amdgpu_device *adev)
742 {
743 	int r;
744 
745 	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
746 
747 	r = vi_gpu_pci_config_reset(adev);
748 
749 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
750 
751 	return r;
752 }
753 
754 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
755 			u32 cntl_reg, u32 status_reg)
756 {
757 	int r, i;
758 	struct atom_clock_dividers dividers;
759 	uint32_t tmp;
760 
761 	r = amdgpu_atombios_get_clock_dividers(adev,
762 					       COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
763 					       clock, false, &dividers);
764 	if (r)
765 		return r;
766 
767 	tmp = RREG32_SMC(cntl_reg);
768 	tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
769 		CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
770 	tmp |= dividers.post_divider;
771 	WREG32_SMC(cntl_reg, tmp);
772 
773 	for (i = 0; i < 100; i++) {
774 		if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK)
775 			break;
776 		mdelay(10);
777 	}
778 	if (i == 100)
779 		return -ETIMEDOUT;
780 
781 	return 0;
782 }
783 
784 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
785 {
786 	int r;
787 
788 	r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
789 	if (r)
790 		return r;
791 
792 	r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
793 
794 	return 0;
795 }
796 
797 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
798 {
799 	/* todo */
800 
801 	return 0;
802 }
803 
804 static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
805 {
806 	if (pci_is_root_bus(adev->pdev->bus))
807 		return;
808 
809 	if (amdgpu_pcie_gen2 == 0)
810 		return;
811 
812 	if (adev->flags & AMD_IS_APU)
813 		return;
814 
815 	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
816 					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
817 		return;
818 
819 	/* todo */
820 }
821 
822 static void vi_program_aspm(struct amdgpu_device *adev)
823 {
824 
825 	if (amdgpu_aspm == 0)
826 		return;
827 
828 	/* todo */
829 }
830 
831 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
832 					bool enable)
833 {
834 	u32 tmp;
835 
836 	/* not necessary on CZ */
837 	if (adev->flags & AMD_IS_APU)
838 		return;
839 
840 	tmp = RREG32(mmBIF_DOORBELL_APER_EN);
841 	if (enable)
842 		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
843 	else
844 		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
845 
846 	WREG32(mmBIF_DOORBELL_APER_EN, tmp);
847 }
848 
849 #define ATI_REV_ID_FUSE_MACRO__ADDRESS      0xC0014044
850 #define ATI_REV_ID_FUSE_MACRO__SHIFT        9
851 #define ATI_REV_ID_FUSE_MACRO__MASK         0x00001E00
852 
853 static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
854 {
855 	if (adev->flags & AMD_IS_APU)
856 		return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
857 			>> ATI_REV_ID_FUSE_MACRO__SHIFT;
858 	else
859 		return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
860 			>> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
861 }
862 
863 static const struct amdgpu_asic_funcs vi_asic_funcs =
864 {
865 	.read_disabled_bios = &vi_read_disabled_bios,
866 	.read_bios_from_rom = &vi_read_bios_from_rom,
867 	.detect_hw_virtualization = vi_detect_hw_virtualization,
868 	.read_register = &vi_read_register,
869 	.reset = &vi_asic_reset,
870 	.set_vga_state = &vi_vga_set_state,
871 	.get_xclk = &vi_get_xclk,
872 	.set_uvd_clocks = &vi_set_uvd_clocks,
873 	.set_vce_clocks = &vi_set_vce_clocks,
874 };
875 
876 static int vi_common_early_init(void *handle)
877 {
878 	bool smc_enabled = false;
879 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
880 
881 	if (adev->flags & AMD_IS_APU) {
882 		adev->smc_rreg = &cz_smc_rreg;
883 		adev->smc_wreg = &cz_smc_wreg;
884 	} else {
885 		adev->smc_rreg = &vi_smc_rreg;
886 		adev->smc_wreg = &vi_smc_wreg;
887 	}
888 	adev->pcie_rreg = &vi_pcie_rreg;
889 	adev->pcie_wreg = &vi_pcie_wreg;
890 	adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
891 	adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
892 	adev->didt_rreg = &vi_didt_rreg;
893 	adev->didt_wreg = &vi_didt_wreg;
894 	adev->gc_cac_rreg = &vi_gc_cac_rreg;
895 	adev->gc_cac_wreg = &vi_gc_cac_wreg;
896 
897 	adev->asic_funcs = &vi_asic_funcs;
898 
899 	if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) &&
900 		(amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC)))
901 		smc_enabled = true;
902 
903 	adev->rev_id = vi_get_rev_id(adev);
904 	adev->external_rev_id = 0xFF;
905 	switch (adev->asic_type) {
906 	case CHIP_TOPAZ:
907 		adev->cg_flags = 0;
908 		adev->pg_flags = 0;
909 		adev->external_rev_id = 0x1;
910 		break;
911 	case CHIP_FIJI:
912 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
913 			AMD_CG_SUPPORT_GFX_MGLS |
914 			AMD_CG_SUPPORT_GFX_RLC_LS |
915 			AMD_CG_SUPPORT_GFX_CP_LS |
916 			AMD_CG_SUPPORT_GFX_CGTS |
917 			AMD_CG_SUPPORT_GFX_CGTS_LS |
918 			AMD_CG_SUPPORT_GFX_CGCG |
919 			AMD_CG_SUPPORT_GFX_CGLS |
920 			AMD_CG_SUPPORT_SDMA_MGCG |
921 			AMD_CG_SUPPORT_SDMA_LS |
922 			AMD_CG_SUPPORT_BIF_LS |
923 			AMD_CG_SUPPORT_HDP_MGCG |
924 			AMD_CG_SUPPORT_HDP_LS |
925 			AMD_CG_SUPPORT_ROM_MGCG |
926 			AMD_CG_SUPPORT_MC_MGCG |
927 			AMD_CG_SUPPORT_MC_LS;
928 		adev->pg_flags = 0;
929 		adev->external_rev_id = adev->rev_id + 0x3c;
930 		break;
931 	case CHIP_TONGA:
932 		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG;
933 		adev->pg_flags = AMD_PG_SUPPORT_UVD;
934 		adev->external_rev_id = adev->rev_id + 0x14;
935 		break;
936 	case CHIP_POLARIS11:
937 		adev->cg_flags = 0;
938 		adev->pg_flags = 0;
939 		adev->external_rev_id = adev->rev_id + 0x5A;
940 		break;
941 	case CHIP_POLARIS10:
942 		adev->cg_flags = 0;
943 		adev->pg_flags = 0;
944 		adev->external_rev_id = adev->rev_id + 0x50;
945 		break;
946 	case CHIP_CARRIZO:
947 		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
948 			AMD_CG_SUPPORT_GFX_MGCG |
949 			AMD_CG_SUPPORT_GFX_MGLS |
950 			AMD_CG_SUPPORT_GFX_RLC_LS |
951 			AMD_CG_SUPPORT_GFX_CP_LS |
952 			AMD_CG_SUPPORT_GFX_CGTS |
953 			AMD_CG_SUPPORT_GFX_MGLS |
954 			AMD_CG_SUPPORT_GFX_CGTS_LS |
955 			AMD_CG_SUPPORT_GFX_CGCG |
956 			AMD_CG_SUPPORT_GFX_CGLS |
957 			AMD_CG_SUPPORT_BIF_LS |
958 			AMD_CG_SUPPORT_HDP_MGCG |
959 			AMD_CG_SUPPORT_HDP_LS |
960 			AMD_CG_SUPPORT_SDMA_MGCG |
961 			AMD_CG_SUPPORT_SDMA_LS |
962 			AMD_CG_SUPPORT_VCE_MGCG;
963 		/* rev0 hardware requires workarounds to support PG */
964 		adev->pg_flags = 0;
965 		if (adev->rev_id != 0x00) {
966 			adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
967 				AMD_PG_SUPPORT_GFX_SMG |
968 				AMD_PG_SUPPORT_GFX_PIPELINE |
969 				AMD_PG_SUPPORT_UVD |
970 				AMD_PG_SUPPORT_VCE;
971 		}
972 		adev->external_rev_id = adev->rev_id + 0x1;
973 		break;
974 	case CHIP_STONEY:
975 		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
976 			AMD_CG_SUPPORT_GFX_MGCG |
977 			AMD_CG_SUPPORT_GFX_MGLS |
978 			AMD_CG_SUPPORT_GFX_RLC_LS |
979 			AMD_CG_SUPPORT_GFX_CP_LS |
980 			AMD_CG_SUPPORT_GFX_CGTS |
981 			AMD_CG_SUPPORT_GFX_MGLS |
982 			AMD_CG_SUPPORT_GFX_CGTS_LS |
983 			AMD_CG_SUPPORT_GFX_CGCG |
984 			AMD_CG_SUPPORT_GFX_CGLS |
985 			AMD_CG_SUPPORT_BIF_LS |
986 			AMD_CG_SUPPORT_HDP_MGCG |
987 			AMD_CG_SUPPORT_HDP_LS |
988 			AMD_CG_SUPPORT_SDMA_MGCG |
989 			AMD_CG_SUPPORT_SDMA_LS |
990 			AMD_CG_SUPPORT_VCE_MGCG;
991 		adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
992 			AMD_PG_SUPPORT_GFX_SMG |
993 			AMD_PG_SUPPORT_GFX_PIPELINE |
994 			AMD_PG_SUPPORT_UVD |
995 			AMD_PG_SUPPORT_VCE;
996 		adev->external_rev_id = adev->rev_id + 0x61;
997 		break;
998 	default:
999 		/* FIXME: not supported yet */
1000 		return -EINVAL;
1001 	}
1002 
1003 	/* in early init stage, vbios code won't work */
1004 	if (adev->asic_funcs->detect_hw_virtualization)
1005 		amdgpu_asic_detect_hw_virtualization(adev);
1006 
1007 	if (amdgpu_smc_load_fw && smc_enabled)
1008 		adev->firmware.smu_load = true;
1009 
1010 	amdgpu_get_pcie_info(adev);
1011 
1012 	return 0;
1013 }
1014 
1015 static int vi_common_sw_init(void *handle)
1016 {
1017 	return 0;
1018 }
1019 
1020 static int vi_common_sw_fini(void *handle)
1021 {
1022 	return 0;
1023 }
1024 
1025 static int vi_common_hw_init(void *handle)
1026 {
1027 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1028 
1029 	/* move the golden regs per IP block */
1030 	vi_init_golden_registers(adev);
1031 	/* enable pcie gen2/3 link */
1032 	vi_pcie_gen3_enable(adev);
1033 	/* enable aspm */
1034 	vi_program_aspm(adev);
1035 	/* enable the doorbell aperture */
1036 	vi_enable_doorbell_aperture(adev, true);
1037 
1038 	return 0;
1039 }
1040 
1041 static int vi_common_hw_fini(void *handle)
1042 {
1043 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1044 
1045 	/* enable the doorbell aperture */
1046 	vi_enable_doorbell_aperture(adev, false);
1047 
1048 	return 0;
1049 }
1050 
1051 static int vi_common_suspend(void *handle)
1052 {
1053 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1054 
1055 	return vi_common_hw_fini(adev);
1056 }
1057 
1058 static int vi_common_resume(void *handle)
1059 {
1060 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1061 
1062 	return vi_common_hw_init(adev);
1063 }
1064 
1065 static bool vi_common_is_idle(void *handle)
1066 {
1067 	return true;
1068 }
1069 
1070 static int vi_common_wait_for_idle(void *handle)
1071 {
1072 	return 0;
1073 }
1074 
1075 static int vi_common_soft_reset(void *handle)
1076 {
1077 	return 0;
1078 }
1079 
1080 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
1081 						   bool enable)
1082 {
1083 	uint32_t temp, data;
1084 
1085 	temp = data = RREG32_PCIE(ixPCIE_CNTL2);
1086 
1087 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
1088 		data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1089 				PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1090 				PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
1091 	else
1092 		data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1093 				PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1094 				PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
1095 
1096 	if (temp != data)
1097 		WREG32_PCIE(ixPCIE_CNTL2, data);
1098 }
1099 
1100 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
1101 						    bool enable)
1102 {
1103 	uint32_t temp, data;
1104 
1105 	temp = data = RREG32(mmHDP_HOST_PATH_CNTL);
1106 
1107 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
1108 		data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1109 	else
1110 		data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1111 
1112 	if (temp != data)
1113 		WREG32(mmHDP_HOST_PATH_CNTL, data);
1114 }
1115 
1116 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev,
1117 				      bool enable)
1118 {
1119 	uint32_t temp, data;
1120 
1121 	temp = data = RREG32(mmHDP_MEM_POWER_LS);
1122 
1123 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1124 		data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1125 	else
1126 		data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1127 
1128 	if (temp != data)
1129 		WREG32(mmHDP_MEM_POWER_LS, data);
1130 }
1131 
1132 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1133 						    bool enable)
1134 {
1135 	uint32_t temp, data;
1136 
1137 	temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1138 
1139 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1140 		data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1141 				CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1142 	else
1143 		data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1144 				CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1145 
1146 	if (temp != data)
1147 		WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
1148 }
1149 
1150 static int vi_common_set_clockgating_state_by_smu(void *handle,
1151 					   enum amd_clockgating_state state)
1152 {
1153 	uint32_t msg_id, pp_state;
1154 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1155 	void *pp_handle = adev->powerplay.pp_handle;
1156 
1157 	if (state == AMD_CG_STATE_UNGATE)
1158 		pp_state = 0;
1159 	else
1160 		pp_state = PP_STATE_CG | PP_STATE_LS;
1161 
1162 	msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1163 		       PP_BLOCK_SYS_MC,
1164 		       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
1165 		       pp_state);
1166 	amd_set_clockgating_by_smu(pp_handle, msg_id);
1167 
1168 	msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1169 		       PP_BLOCK_SYS_SDMA,
1170 		       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
1171 		       pp_state);
1172 	amd_set_clockgating_by_smu(pp_handle, msg_id);
1173 
1174 	msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1175 		       PP_BLOCK_SYS_HDP,
1176 		       PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
1177 		       pp_state);
1178 	amd_set_clockgating_by_smu(pp_handle, msg_id);
1179 
1180 	msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1181 		       PP_BLOCK_SYS_BIF,
1182 		       PP_STATE_SUPPORT_LS,
1183 		       pp_state);
1184 	amd_set_clockgating_by_smu(pp_handle, msg_id);
1185 
1186 	msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1187 		       PP_BLOCK_SYS_BIF,
1188 		       PP_STATE_SUPPORT_CG,
1189 		       pp_state);
1190 	amd_set_clockgating_by_smu(pp_handle, msg_id);
1191 
1192 	msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1193 		       PP_BLOCK_SYS_DRM,
1194 		       PP_STATE_SUPPORT_LS,
1195 		       pp_state);
1196 	amd_set_clockgating_by_smu(pp_handle, msg_id);
1197 
1198 	msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1199 		       PP_BLOCK_SYS_ROM,
1200 		       PP_STATE_SUPPORT_CG,
1201 		       pp_state);
1202 	amd_set_clockgating_by_smu(pp_handle, msg_id);
1203 
1204 	return 0;
1205 }
1206 
1207 static int vi_common_set_clockgating_state(void *handle,
1208 					   enum amd_clockgating_state state)
1209 {
1210 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1211 
1212 	switch (adev->asic_type) {
1213 	case CHIP_FIJI:
1214 		vi_update_bif_medium_grain_light_sleep(adev,
1215 				state == AMD_CG_STATE_GATE ? true : false);
1216 		vi_update_hdp_medium_grain_clock_gating(adev,
1217 				state == AMD_CG_STATE_GATE ? true : false);
1218 		vi_update_hdp_light_sleep(adev,
1219 				state == AMD_CG_STATE_GATE ? true : false);
1220 		vi_update_rom_medium_grain_clock_gating(adev,
1221 				state == AMD_CG_STATE_GATE ? true : false);
1222 		break;
1223 	case CHIP_CARRIZO:
1224 	case CHIP_STONEY:
1225 		vi_update_bif_medium_grain_light_sleep(adev,
1226 				state == AMD_CG_STATE_GATE ? true : false);
1227 		vi_update_hdp_medium_grain_clock_gating(adev,
1228 				state == AMD_CG_STATE_GATE ? true : false);
1229 		vi_update_hdp_light_sleep(adev,
1230 				state == AMD_CG_STATE_GATE ? true : false);
1231 		break;
1232 	case CHIP_TONGA:
1233 	case CHIP_POLARIS10:
1234 	case CHIP_POLARIS11:
1235 		vi_common_set_clockgating_state_by_smu(adev, state);
1236 	default:
1237 		break;
1238 	}
1239 	return 0;
1240 }
1241 
1242 static int vi_common_set_powergating_state(void *handle,
1243 					    enum amd_powergating_state state)
1244 {
1245 	return 0;
1246 }
1247 
1248 static const struct amd_ip_funcs vi_common_ip_funcs = {
1249 	.name = "vi_common",
1250 	.early_init = vi_common_early_init,
1251 	.late_init = NULL,
1252 	.sw_init = vi_common_sw_init,
1253 	.sw_fini = vi_common_sw_fini,
1254 	.hw_init = vi_common_hw_init,
1255 	.hw_fini = vi_common_hw_fini,
1256 	.suspend = vi_common_suspend,
1257 	.resume = vi_common_resume,
1258 	.is_idle = vi_common_is_idle,
1259 	.wait_for_idle = vi_common_wait_for_idle,
1260 	.soft_reset = vi_common_soft_reset,
1261 	.set_clockgating_state = vi_common_set_clockgating_state,
1262 	.set_powergating_state = vi_common_set_powergating_state,
1263 };
1264 
1265 static const struct amdgpu_ip_block_version vi_common_ip_block =
1266 {
1267 	.type = AMD_IP_BLOCK_TYPE_COMMON,
1268 	.major = 1,
1269 	.minor = 0,
1270 	.rev = 0,
1271 	.funcs = &vi_common_ip_funcs,
1272 };
1273 
1274 int vi_set_ip_blocks(struct amdgpu_device *adev)
1275 {
1276 	switch (adev->asic_type) {
1277 	case CHIP_TOPAZ:
1278 		/* topaz has no DCE, UVD, VCE */
1279 		amdgpu_ip_block_add(adev, &vi_common_ip_block);
1280 		amdgpu_ip_block_add(adev, &gmc_v7_4_ip_block);
1281 		amdgpu_ip_block_add(adev, &iceland_ih_ip_block);
1282 		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
1283 		if (adev->enable_virtual_display)
1284 			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
1285 		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
1286 		amdgpu_ip_block_add(adev, &sdma_v2_4_ip_block);
1287 		break;
1288 	case CHIP_FIJI:
1289 		amdgpu_ip_block_add(adev, &vi_common_ip_block);
1290 		amdgpu_ip_block_add(adev, &gmc_v8_5_ip_block);
1291 		amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
1292 		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
1293 		if (adev->enable_virtual_display)
1294 			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
1295 		else
1296 			amdgpu_ip_block_add(adev, &dce_v10_1_ip_block);
1297 		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
1298 		amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
1299 		amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
1300 		amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
1301 		break;
1302 	case CHIP_TONGA:
1303 		amdgpu_ip_block_add(adev, &vi_common_ip_block);
1304 		amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
1305 		amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
1306 		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
1307 		if (adev->enable_virtual_display)
1308 			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
1309 		else
1310 			amdgpu_ip_block_add(adev, &dce_v10_0_ip_block);
1311 		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
1312 		amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
1313 		amdgpu_ip_block_add(adev, &uvd_v5_0_ip_block);
1314 		amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
1315 		break;
1316 	case CHIP_POLARIS11:
1317 	case CHIP_POLARIS10:
1318 		amdgpu_ip_block_add(adev, &vi_common_ip_block);
1319 		amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block);
1320 		amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
1321 		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
1322 		if (adev->enable_virtual_display)
1323 			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
1324 		else
1325 			amdgpu_ip_block_add(adev, &dce_v11_2_ip_block);
1326 		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
1327 		amdgpu_ip_block_add(adev, &sdma_v3_1_ip_block);
1328 		amdgpu_ip_block_add(adev, &uvd_v6_3_ip_block);
1329 		amdgpu_ip_block_add(adev, &vce_v3_4_ip_block);
1330 		break;
1331 	case CHIP_CARRIZO:
1332 		amdgpu_ip_block_add(adev, &vi_common_ip_block);
1333 		amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
1334 		amdgpu_ip_block_add(adev, &cz_ih_ip_block);
1335 		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
1336 		if (adev->enable_virtual_display)
1337 			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
1338 		else
1339 			amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
1340 		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
1341 		amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
1342 		amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
1343 		amdgpu_ip_block_add(adev, &vce_v3_1_ip_block);
1344 #if defined(CONFIG_DRM_AMD_ACP)
1345 		amdgpu_ip_block_add(adev, &acp_ip_block);
1346 #endif
1347 		break;
1348 	case CHIP_STONEY:
1349 		amdgpu_ip_block_add(adev, &vi_common_ip_block);
1350 		amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
1351 		amdgpu_ip_block_add(adev, &cz_ih_ip_block);
1352 		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
1353 		if (adev->enable_virtual_display)
1354 			amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
1355 		else
1356 			amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
1357 		amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block);
1358 		amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
1359 		amdgpu_ip_block_add(adev, &uvd_v6_2_ip_block);
1360 		amdgpu_ip_block_add(adev, &vce_v3_4_ip_block);
1361 #if defined(CONFIG_DRM_AMD_ACP)
1362 		amdgpu_ip_block_add(adev, &acp_ip_block);
1363 #endif
1364 		break;
1365 	default:
1366 		/* FIXME: not supported yet */
1367 		return -EINVAL;
1368 	}
1369 
1370 	return 0;
1371 }
1372