xref: /linux/drivers/gpu/drm/amd/amdgpu/vi.c (revision 3eb514866f20c5eb74637279774b6d73b855480a)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/pci.h>
25 #include <linux/slab.h>
26 
27 #include "amdgpu.h"
28 #include "amdgpu_atombios.h"
29 #include "amdgpu_ih.h"
30 #include "amdgpu_uvd.h"
31 #include "amdgpu_vce.h"
32 #include "amdgpu_ucode.h"
33 #include "atom.h"
34 #include "amd_pcie.h"
35 
36 #include "gmc/gmc_8_1_d.h"
37 #include "gmc/gmc_8_1_sh_mask.h"
38 
39 #include "oss/oss_3_0_d.h"
40 #include "oss/oss_3_0_sh_mask.h"
41 
42 #include "bif/bif_5_0_d.h"
43 #include "bif/bif_5_0_sh_mask.h"
44 
45 #include "gca/gfx_8_0_d.h"
46 #include "gca/gfx_8_0_sh_mask.h"
47 
48 #include "smu/smu_7_1_1_d.h"
49 #include "smu/smu_7_1_1_sh_mask.h"
50 
51 #include "uvd/uvd_5_0_d.h"
52 #include "uvd/uvd_5_0_sh_mask.h"
53 
54 #include "vce/vce_3_0_d.h"
55 #include "vce/vce_3_0_sh_mask.h"
56 
57 #include "dce/dce_10_0_d.h"
58 #include "dce/dce_10_0_sh_mask.h"
59 
60 #include "vid.h"
61 #include "vi.h"
62 #include "vi_dpm.h"
63 #include "gmc_v8_0.h"
64 #include "gmc_v7_0.h"
65 #include "gfx_v8_0.h"
66 #include "sdma_v2_4.h"
67 #include "sdma_v3_0.h"
68 #include "dce_v10_0.h"
69 #include "dce_v11_0.h"
70 #include "iceland_ih.h"
71 #include "tonga_ih.h"
72 #include "cz_ih.h"
73 #include "uvd_v5_0.h"
74 #include "uvd_v6_0.h"
75 #include "vce_v3_0.h"
76 #if defined(CONFIG_DRM_AMD_ACP)
77 #include "amdgpu_acp.h"
78 #endif
79 #include "dce_virtual.h"
80 #include "mxgpu_vi.h"
81 #include "amdgpu_dm.h"
82 
83 /*
84  * Indirect registers accessor
85  */
86 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
87 {
88 	unsigned long flags;
89 	u32 r;
90 
91 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
92 	WREG32_NO_KIQ(mmPCIE_INDEX, reg);
93 	(void)RREG32_NO_KIQ(mmPCIE_INDEX);
94 	r = RREG32_NO_KIQ(mmPCIE_DATA);
95 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
96 	return r;
97 }
98 
99 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
100 {
101 	unsigned long flags;
102 
103 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
104 	WREG32_NO_KIQ(mmPCIE_INDEX, reg);
105 	(void)RREG32_NO_KIQ(mmPCIE_INDEX);
106 	WREG32_NO_KIQ(mmPCIE_DATA, v);
107 	(void)RREG32_NO_KIQ(mmPCIE_DATA);
108 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
109 }
110 
111 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
112 {
113 	unsigned long flags;
114 	u32 r;
115 
116 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
117 	WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
118 	r = RREG32_NO_KIQ(mmSMC_IND_DATA_11);
119 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
120 	return r;
121 }
122 
123 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
124 {
125 	unsigned long flags;
126 
127 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
128 	WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
129 	WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v));
130 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
131 }
132 
133 /* smu_8_0_d.h */
134 #define mmMP0PUB_IND_INDEX                                                      0x180
135 #define mmMP0PUB_IND_DATA                                                       0x181
136 
137 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
138 {
139 	unsigned long flags;
140 	u32 r;
141 
142 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
143 	WREG32(mmMP0PUB_IND_INDEX, (reg));
144 	r = RREG32(mmMP0PUB_IND_DATA);
145 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
146 	return r;
147 }
148 
149 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
150 {
151 	unsigned long flags;
152 
153 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
154 	WREG32(mmMP0PUB_IND_INDEX, (reg));
155 	WREG32(mmMP0PUB_IND_DATA, (v));
156 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
157 }
158 
159 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
160 {
161 	unsigned long flags;
162 	u32 r;
163 
164 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
165 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
166 	r = RREG32(mmUVD_CTX_DATA);
167 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
168 	return r;
169 }
170 
171 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
172 {
173 	unsigned long flags;
174 
175 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
176 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
177 	WREG32(mmUVD_CTX_DATA, (v));
178 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
179 }
180 
181 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
182 {
183 	unsigned long flags;
184 	u32 r;
185 
186 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
187 	WREG32(mmDIDT_IND_INDEX, (reg));
188 	r = RREG32(mmDIDT_IND_DATA);
189 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
190 	return r;
191 }
192 
193 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
194 {
195 	unsigned long flags;
196 
197 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
198 	WREG32(mmDIDT_IND_INDEX, (reg));
199 	WREG32(mmDIDT_IND_DATA, (v));
200 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
201 }
202 
203 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
204 {
205 	unsigned long flags;
206 	u32 r;
207 
208 	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
209 	WREG32(mmGC_CAC_IND_INDEX, (reg));
210 	r = RREG32(mmGC_CAC_IND_DATA);
211 	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
212 	return r;
213 }
214 
215 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
216 {
217 	unsigned long flags;
218 
219 	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
220 	WREG32(mmGC_CAC_IND_INDEX, (reg));
221 	WREG32(mmGC_CAC_IND_DATA, (v));
222 	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
223 }
224 
225 
226 static const u32 tonga_mgcg_cgcg_init[] =
227 {
228 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
229 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
230 	mmPCIE_DATA, 0x000f0000, 0x00000000,
231 	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
232 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
233 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
234 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
235 };
236 
237 static const u32 fiji_mgcg_cgcg_init[] =
238 {
239 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
240 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
241 	mmPCIE_DATA, 0x000f0000, 0x00000000,
242 	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
243 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
244 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
245 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
246 };
247 
248 static const u32 iceland_mgcg_cgcg_init[] =
249 {
250 	mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
251 	mmPCIE_DATA, 0x000f0000, 0x00000000,
252 	mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
253 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
254 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
255 };
256 
257 static const u32 cz_mgcg_cgcg_init[] =
258 {
259 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
260 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
261 	mmPCIE_DATA, 0x000f0000, 0x00000000,
262 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
263 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
264 };
265 
266 static const u32 stoney_mgcg_cgcg_init[] =
267 {
268 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100,
269 	mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104,
270 	mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027,
271 };
272 
273 static void vi_init_golden_registers(struct amdgpu_device *adev)
274 {
275 	/* Some of the registers might be dependent on GRBM_GFX_INDEX */
276 	mutex_lock(&adev->grbm_idx_mutex);
277 
278 	if (amdgpu_sriov_vf(adev)) {
279 		xgpu_vi_init_golden_registers(adev);
280 		mutex_unlock(&adev->grbm_idx_mutex);
281 		return;
282 	}
283 
284 	switch (adev->asic_type) {
285 	case CHIP_TOPAZ:
286 		amdgpu_device_program_register_sequence(adev,
287 							iceland_mgcg_cgcg_init,
288 							ARRAY_SIZE(iceland_mgcg_cgcg_init));
289 		break;
290 	case CHIP_FIJI:
291 		amdgpu_device_program_register_sequence(adev,
292 							fiji_mgcg_cgcg_init,
293 							ARRAY_SIZE(fiji_mgcg_cgcg_init));
294 		break;
295 	case CHIP_TONGA:
296 		amdgpu_device_program_register_sequence(adev,
297 							tonga_mgcg_cgcg_init,
298 							ARRAY_SIZE(tonga_mgcg_cgcg_init));
299 		break;
300 	case CHIP_CARRIZO:
301 		amdgpu_device_program_register_sequence(adev,
302 							cz_mgcg_cgcg_init,
303 							ARRAY_SIZE(cz_mgcg_cgcg_init));
304 		break;
305 	case CHIP_STONEY:
306 		amdgpu_device_program_register_sequence(adev,
307 							stoney_mgcg_cgcg_init,
308 							ARRAY_SIZE(stoney_mgcg_cgcg_init));
309 		break;
310 	case CHIP_POLARIS10:
311 	case CHIP_POLARIS11:
312 	case CHIP_POLARIS12:
313 	case CHIP_VEGAM:
314 	default:
315 		break;
316 	}
317 	mutex_unlock(&adev->grbm_idx_mutex);
318 }
319 
320 /**
321  * vi_get_xclk - get the xclk
322  *
323  * @adev: amdgpu_device pointer
324  *
325  * Returns the reference clock used by the gfx engine
326  * (VI).
327  */
328 static u32 vi_get_xclk(struct amdgpu_device *adev)
329 {
330 	u32 reference_clock = adev->clock.spll.reference_freq;
331 	u32 tmp;
332 
333 	if (adev->flags & AMD_IS_APU)
334 		return reference_clock;
335 
336 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
337 	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
338 		return 1000;
339 
340 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
341 	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
342 		return reference_clock / 4;
343 
344 	return reference_clock;
345 }
346 
347 /**
348  * vi_srbm_select - select specific register instances
349  *
350  * @adev: amdgpu_device pointer
351  * @me: selected ME (micro engine)
352  * @pipe: pipe
353  * @queue: queue
354  * @vmid: VMID
355  *
356  * Switches the currently active registers instances.  Some
357  * registers are instanced per VMID, others are instanced per
358  * me/pipe/queue combination.
359  */
360 void vi_srbm_select(struct amdgpu_device *adev,
361 		     u32 me, u32 pipe, u32 queue, u32 vmid)
362 {
363 	u32 srbm_gfx_cntl = 0;
364 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
365 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
366 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
367 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
368 	WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
369 }
370 
371 static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
372 {
373 	/* todo */
374 }
375 
376 static bool vi_read_disabled_bios(struct amdgpu_device *adev)
377 {
378 	u32 bus_cntl;
379 	u32 d1vga_control = 0;
380 	u32 d2vga_control = 0;
381 	u32 vga_render_control = 0;
382 	u32 rom_cntl;
383 	bool r;
384 
385 	bus_cntl = RREG32(mmBUS_CNTL);
386 	if (adev->mode_info.num_crtc) {
387 		d1vga_control = RREG32(mmD1VGA_CONTROL);
388 		d2vga_control = RREG32(mmD2VGA_CONTROL);
389 		vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
390 	}
391 	rom_cntl = RREG32_SMC(ixROM_CNTL);
392 
393 	/* enable the rom */
394 	WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
395 	if (adev->mode_info.num_crtc) {
396 		/* Disable VGA mode */
397 		WREG32(mmD1VGA_CONTROL,
398 		       (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
399 					  D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
400 		WREG32(mmD2VGA_CONTROL,
401 		       (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
402 					  D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
403 		WREG32(mmVGA_RENDER_CONTROL,
404 		       (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
405 	}
406 	WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
407 
408 	r = amdgpu_read_bios(adev);
409 
410 	/* restore regs */
411 	WREG32(mmBUS_CNTL, bus_cntl);
412 	if (adev->mode_info.num_crtc) {
413 		WREG32(mmD1VGA_CONTROL, d1vga_control);
414 		WREG32(mmD2VGA_CONTROL, d2vga_control);
415 		WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
416 	}
417 	WREG32_SMC(ixROM_CNTL, rom_cntl);
418 	return r;
419 }
420 
421 static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
422 				  u8 *bios, u32 length_bytes)
423 {
424 	u32 *dw_ptr;
425 	unsigned long flags;
426 	u32 i, length_dw;
427 
428 	if (bios == NULL)
429 		return false;
430 	if (length_bytes == 0)
431 		return false;
432 	/* APU vbios image is part of sbios image */
433 	if (adev->flags & AMD_IS_APU)
434 		return false;
435 
436 	dw_ptr = (u32 *)bios;
437 	length_dw = ALIGN(length_bytes, 4) / 4;
438 	/* take the smc lock since we are using the smc index */
439 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
440 	/* set rom index to 0 */
441 	WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX);
442 	WREG32(mmSMC_IND_DATA_11, 0);
443 	/* set index to data for continous read */
444 	WREG32(mmSMC_IND_INDEX_11, ixROM_DATA);
445 	for (i = 0; i < length_dw; i++)
446 		dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);
447 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
448 
449 	return true;
450 }
451 
452 static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
453 {
454 	uint32_t reg = 0;
455 
456 	if (adev->asic_type == CHIP_TONGA ||
457 	    adev->asic_type == CHIP_FIJI) {
458 	       reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
459 	       /* bit0: 0 means pf and 1 means vf */
460 	       if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
461 		       adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
462 	       /* bit31: 0 means disable IOV and 1 means enable */
463 	       if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
464 		       adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
465 	}
466 
467 	if (reg == 0) {
468 		if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
469 			adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
470 	}
471 }
472 
473 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
474 	{mmGRBM_STATUS},
475 	{mmGRBM_STATUS2},
476 	{mmGRBM_STATUS_SE0},
477 	{mmGRBM_STATUS_SE1},
478 	{mmGRBM_STATUS_SE2},
479 	{mmGRBM_STATUS_SE3},
480 	{mmSRBM_STATUS},
481 	{mmSRBM_STATUS2},
482 	{mmSRBM_STATUS3},
483 	{mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET},
484 	{mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET},
485 	{mmCP_STAT},
486 	{mmCP_STALLED_STAT1},
487 	{mmCP_STALLED_STAT2},
488 	{mmCP_STALLED_STAT3},
489 	{mmCP_CPF_BUSY_STAT},
490 	{mmCP_CPF_STALLED_STAT1},
491 	{mmCP_CPF_STATUS},
492 	{mmCP_CPC_BUSY_STAT},
493 	{mmCP_CPC_STALLED_STAT1},
494 	{mmCP_CPC_STATUS},
495 	{mmGB_ADDR_CONFIG},
496 	{mmMC_ARB_RAMCFG},
497 	{mmGB_TILE_MODE0},
498 	{mmGB_TILE_MODE1},
499 	{mmGB_TILE_MODE2},
500 	{mmGB_TILE_MODE3},
501 	{mmGB_TILE_MODE4},
502 	{mmGB_TILE_MODE5},
503 	{mmGB_TILE_MODE6},
504 	{mmGB_TILE_MODE7},
505 	{mmGB_TILE_MODE8},
506 	{mmGB_TILE_MODE9},
507 	{mmGB_TILE_MODE10},
508 	{mmGB_TILE_MODE11},
509 	{mmGB_TILE_MODE12},
510 	{mmGB_TILE_MODE13},
511 	{mmGB_TILE_MODE14},
512 	{mmGB_TILE_MODE15},
513 	{mmGB_TILE_MODE16},
514 	{mmGB_TILE_MODE17},
515 	{mmGB_TILE_MODE18},
516 	{mmGB_TILE_MODE19},
517 	{mmGB_TILE_MODE20},
518 	{mmGB_TILE_MODE21},
519 	{mmGB_TILE_MODE22},
520 	{mmGB_TILE_MODE23},
521 	{mmGB_TILE_MODE24},
522 	{mmGB_TILE_MODE25},
523 	{mmGB_TILE_MODE26},
524 	{mmGB_TILE_MODE27},
525 	{mmGB_TILE_MODE28},
526 	{mmGB_TILE_MODE29},
527 	{mmGB_TILE_MODE30},
528 	{mmGB_TILE_MODE31},
529 	{mmGB_MACROTILE_MODE0},
530 	{mmGB_MACROTILE_MODE1},
531 	{mmGB_MACROTILE_MODE2},
532 	{mmGB_MACROTILE_MODE3},
533 	{mmGB_MACROTILE_MODE4},
534 	{mmGB_MACROTILE_MODE5},
535 	{mmGB_MACROTILE_MODE6},
536 	{mmGB_MACROTILE_MODE7},
537 	{mmGB_MACROTILE_MODE8},
538 	{mmGB_MACROTILE_MODE9},
539 	{mmGB_MACROTILE_MODE10},
540 	{mmGB_MACROTILE_MODE11},
541 	{mmGB_MACROTILE_MODE12},
542 	{mmGB_MACROTILE_MODE13},
543 	{mmGB_MACROTILE_MODE14},
544 	{mmGB_MACROTILE_MODE15},
545 	{mmCC_RB_BACKEND_DISABLE, true},
546 	{mmGC_USER_RB_BACKEND_DISABLE, true},
547 	{mmGB_BACKEND_MAP, false},
548 	{mmPA_SC_RASTER_CONFIG, true},
549 	{mmPA_SC_RASTER_CONFIG_1, true},
550 };
551 
552 static uint32_t vi_get_register_value(struct amdgpu_device *adev,
553 				      bool indexed, u32 se_num,
554 				      u32 sh_num, u32 reg_offset)
555 {
556 	if (indexed) {
557 		uint32_t val;
558 		unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
559 		unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
560 
561 		switch (reg_offset) {
562 		case mmCC_RB_BACKEND_DISABLE:
563 			return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
564 		case mmGC_USER_RB_BACKEND_DISABLE:
565 			return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
566 		case mmPA_SC_RASTER_CONFIG:
567 			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
568 		case mmPA_SC_RASTER_CONFIG_1:
569 			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
570 		}
571 
572 		mutex_lock(&adev->grbm_idx_mutex);
573 		if (se_num != 0xffffffff || sh_num != 0xffffffff)
574 			amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
575 
576 		val = RREG32(reg_offset);
577 
578 		if (se_num != 0xffffffff || sh_num != 0xffffffff)
579 			amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
580 		mutex_unlock(&adev->grbm_idx_mutex);
581 		return val;
582 	} else {
583 		unsigned idx;
584 
585 		switch (reg_offset) {
586 		case mmGB_ADDR_CONFIG:
587 			return adev->gfx.config.gb_addr_config;
588 		case mmMC_ARB_RAMCFG:
589 			return adev->gfx.config.mc_arb_ramcfg;
590 		case mmGB_TILE_MODE0:
591 		case mmGB_TILE_MODE1:
592 		case mmGB_TILE_MODE2:
593 		case mmGB_TILE_MODE3:
594 		case mmGB_TILE_MODE4:
595 		case mmGB_TILE_MODE5:
596 		case mmGB_TILE_MODE6:
597 		case mmGB_TILE_MODE7:
598 		case mmGB_TILE_MODE8:
599 		case mmGB_TILE_MODE9:
600 		case mmGB_TILE_MODE10:
601 		case mmGB_TILE_MODE11:
602 		case mmGB_TILE_MODE12:
603 		case mmGB_TILE_MODE13:
604 		case mmGB_TILE_MODE14:
605 		case mmGB_TILE_MODE15:
606 		case mmGB_TILE_MODE16:
607 		case mmGB_TILE_MODE17:
608 		case mmGB_TILE_MODE18:
609 		case mmGB_TILE_MODE19:
610 		case mmGB_TILE_MODE20:
611 		case mmGB_TILE_MODE21:
612 		case mmGB_TILE_MODE22:
613 		case mmGB_TILE_MODE23:
614 		case mmGB_TILE_MODE24:
615 		case mmGB_TILE_MODE25:
616 		case mmGB_TILE_MODE26:
617 		case mmGB_TILE_MODE27:
618 		case mmGB_TILE_MODE28:
619 		case mmGB_TILE_MODE29:
620 		case mmGB_TILE_MODE30:
621 		case mmGB_TILE_MODE31:
622 			idx = (reg_offset - mmGB_TILE_MODE0);
623 			return adev->gfx.config.tile_mode_array[idx];
624 		case mmGB_MACROTILE_MODE0:
625 		case mmGB_MACROTILE_MODE1:
626 		case mmGB_MACROTILE_MODE2:
627 		case mmGB_MACROTILE_MODE3:
628 		case mmGB_MACROTILE_MODE4:
629 		case mmGB_MACROTILE_MODE5:
630 		case mmGB_MACROTILE_MODE6:
631 		case mmGB_MACROTILE_MODE7:
632 		case mmGB_MACROTILE_MODE8:
633 		case mmGB_MACROTILE_MODE9:
634 		case mmGB_MACROTILE_MODE10:
635 		case mmGB_MACROTILE_MODE11:
636 		case mmGB_MACROTILE_MODE12:
637 		case mmGB_MACROTILE_MODE13:
638 		case mmGB_MACROTILE_MODE14:
639 		case mmGB_MACROTILE_MODE15:
640 			idx = (reg_offset - mmGB_MACROTILE_MODE0);
641 			return adev->gfx.config.macrotile_mode_array[idx];
642 		default:
643 			return RREG32(reg_offset);
644 		}
645 	}
646 }
647 
648 static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
649 			    u32 sh_num, u32 reg_offset, u32 *value)
650 {
651 	uint32_t i;
652 
653 	*value = 0;
654 	for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
655 		bool indexed = vi_allowed_read_registers[i].grbm_indexed;
656 
657 		if (reg_offset != vi_allowed_read_registers[i].reg_offset)
658 			continue;
659 
660 		*value = vi_get_register_value(adev, indexed, se_num, sh_num,
661 					       reg_offset);
662 		return 0;
663 	}
664 	return -EINVAL;
665 }
666 
667 static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
668 {
669 	u32 i;
670 
671 	dev_info(adev->dev, "GPU pci config reset\n");
672 
673 	/* disable BM */
674 	pci_clear_master(adev->pdev);
675 	/* reset */
676 	amdgpu_device_pci_config_reset(adev);
677 
678 	udelay(100);
679 
680 	/* wait for asic to come out of reset */
681 	for (i = 0; i < adev->usec_timeout; i++) {
682 		if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
683 			/* enable BM */
684 			pci_set_master(adev->pdev);
685 			adev->has_hw_reset = true;
686 			return 0;
687 		}
688 		udelay(1);
689 	}
690 	return -EINVAL;
691 }
692 
693 /**
694  * vi_asic_reset - soft reset GPU
695  *
696  * @adev: amdgpu_device pointer
697  *
698  * Look up which blocks are hung and attempt
699  * to reset them.
700  * Returns 0 for success.
701  */
702 static int vi_asic_reset(struct amdgpu_device *adev)
703 {
704 	int r;
705 
706 	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
707 
708 	r = vi_gpu_pci_config_reset(adev);
709 
710 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
711 
712 	return r;
713 }
714 
715 static u32 vi_get_config_memsize(struct amdgpu_device *adev)
716 {
717 	return RREG32(mmCONFIG_MEMSIZE);
718 }
719 
720 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
721 			u32 cntl_reg, u32 status_reg)
722 {
723 	int r, i;
724 	struct atom_clock_dividers dividers;
725 	uint32_t tmp;
726 
727 	r = amdgpu_atombios_get_clock_dividers(adev,
728 					       COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
729 					       clock, false, &dividers);
730 	if (r)
731 		return r;
732 
733 	tmp = RREG32_SMC(cntl_reg);
734 
735 	if (adev->flags & AMD_IS_APU)
736 		tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK;
737 	else
738 		tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
739 				CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
740 	tmp |= dividers.post_divider;
741 	WREG32_SMC(cntl_reg, tmp);
742 
743 	for (i = 0; i < 100; i++) {
744 		tmp = RREG32_SMC(status_reg);
745 		if (adev->flags & AMD_IS_APU) {
746 			if (tmp & 0x10000)
747 				break;
748 		} else {
749 			if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK)
750 				break;
751 		}
752 		mdelay(10);
753 	}
754 	if (i == 100)
755 		return -ETIMEDOUT;
756 	return 0;
757 }
758 
759 #define ixGNB_CLK1_DFS_CNTL 0xD82200F0
760 #define ixGNB_CLK1_STATUS   0xD822010C
761 #define ixGNB_CLK2_DFS_CNTL 0xD8220110
762 #define ixGNB_CLK2_STATUS   0xD822012C
763 #define ixGNB_CLK3_DFS_CNTL 0xD8220130
764 #define ixGNB_CLK3_STATUS   0xD822014C
765 
766 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
767 {
768 	int r;
769 
770 	if (adev->flags & AMD_IS_APU) {
771 		r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS);
772 		if (r)
773 			return r;
774 
775 		r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS);
776 		if (r)
777 			return r;
778 	} else {
779 		r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
780 		if (r)
781 			return r;
782 
783 		r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
784 		if (r)
785 			return r;
786 	}
787 
788 	return 0;
789 }
790 
791 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
792 {
793 	int r, i;
794 	struct atom_clock_dividers dividers;
795 	u32 tmp;
796 	u32 reg_ctrl;
797 	u32 reg_status;
798 	u32 status_mask;
799 	u32 reg_mask;
800 
801 	if (adev->flags & AMD_IS_APU) {
802 		reg_ctrl = ixGNB_CLK3_DFS_CNTL;
803 		reg_status = ixGNB_CLK3_STATUS;
804 		status_mask = 0x00010000;
805 		reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
806 	} else {
807 		reg_ctrl = ixCG_ECLK_CNTL;
808 		reg_status = ixCG_ECLK_STATUS;
809 		status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK;
810 		reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
811 	}
812 
813 	r = amdgpu_atombios_get_clock_dividers(adev,
814 					       COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
815 					       ecclk, false, &dividers);
816 	if (r)
817 		return r;
818 
819 	for (i = 0; i < 100; i++) {
820 		if (RREG32_SMC(reg_status) & status_mask)
821 			break;
822 		mdelay(10);
823 	}
824 
825 	if (i == 100)
826 		return -ETIMEDOUT;
827 
828 	tmp = RREG32_SMC(reg_ctrl);
829 	tmp &= ~reg_mask;
830 	tmp |= dividers.post_divider;
831 	WREG32_SMC(reg_ctrl, tmp);
832 
833 	for (i = 0; i < 100; i++) {
834 		if (RREG32_SMC(reg_status) & status_mask)
835 			break;
836 		mdelay(10);
837 	}
838 
839 	if (i == 100)
840 		return -ETIMEDOUT;
841 
842 	return 0;
843 }
844 
845 static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
846 {
847 	if (pci_is_root_bus(adev->pdev->bus))
848 		return;
849 
850 	if (amdgpu_pcie_gen2 == 0)
851 		return;
852 
853 	if (adev->flags & AMD_IS_APU)
854 		return;
855 
856 	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
857 					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
858 		return;
859 
860 	/* todo */
861 }
862 
863 static void vi_program_aspm(struct amdgpu_device *adev)
864 {
865 
866 	if (amdgpu_aspm == 0)
867 		return;
868 
869 	/* todo */
870 }
871 
872 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
873 					bool enable)
874 {
875 	u32 tmp;
876 
877 	/* not necessary on CZ */
878 	if (adev->flags & AMD_IS_APU)
879 		return;
880 
881 	tmp = RREG32(mmBIF_DOORBELL_APER_EN);
882 	if (enable)
883 		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
884 	else
885 		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
886 
887 	WREG32(mmBIF_DOORBELL_APER_EN, tmp);
888 }
889 
890 #define ATI_REV_ID_FUSE_MACRO__ADDRESS      0xC0014044
891 #define ATI_REV_ID_FUSE_MACRO__SHIFT        9
892 #define ATI_REV_ID_FUSE_MACRO__MASK         0x00001E00
893 
894 static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
895 {
896 	if (adev->flags & AMD_IS_APU)
897 		return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
898 			>> ATI_REV_ID_FUSE_MACRO__SHIFT;
899 	else
900 		return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
901 			>> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
902 }
903 
904 static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
905 {
906 	if (!ring || !ring->funcs->emit_wreg) {
907 		WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
908 		RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
909 	} else {
910 		amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
911 	}
912 }
913 
914 static void vi_invalidate_hdp(struct amdgpu_device *adev,
915 			      struct amdgpu_ring *ring)
916 {
917 	if (!ring || !ring->funcs->emit_wreg) {
918 		WREG32(mmHDP_DEBUG0, 1);
919 		RREG32(mmHDP_DEBUG0);
920 	} else {
921 		amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
922 	}
923 }
924 
925 static bool vi_need_full_reset(struct amdgpu_device *adev)
926 {
927 	switch (adev->asic_type) {
928 	case CHIP_CARRIZO:
929 	case CHIP_STONEY:
930 		/* CZ has hang issues with full reset at the moment */
931 		return false;
932 	case CHIP_FIJI:
933 	case CHIP_TONGA:
934 		/* XXX: soft reset should work on fiji and tonga */
935 		return true;
936 	case CHIP_POLARIS10:
937 	case CHIP_POLARIS11:
938 	case CHIP_POLARIS12:
939 	case CHIP_TOPAZ:
940 	default:
941 		/* change this when we support soft reset */
942 		return true;
943 	}
944 }
945 
946 static void vi_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
947 			      uint64_t *count1)
948 {
949 	uint32_t perfctr = 0;
950 	uint64_t cnt0_of, cnt1_of;
951 	int tmp;
952 
953 	/* This reports 0 on APUs, so return to avoid writing/reading registers
954 	 * that may or may not be different from their GPU counterparts
955 	 */
956 	if (adev->flags & AMD_IS_APU)
957 		return;
958 
959 	/* Set the 2 events that we wish to watch, defined above */
960 	/* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
961 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
962 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
963 
964 	/* Write to enable desired perf counters */
965 	WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr);
966 	/* Zero out and enable the perf counters
967 	 * Write 0x5:
968 	 * Bit 0 = Start all counters(1)
969 	 * Bit 2 = Global counter reset enable(1)
970 	 */
971 	WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005);
972 
973 	msleep(1000);
974 
975 	/* Load the shadow and disable the perf counters
976 	 * Write 0x2:
977 	 * Bit 0 = Stop counters(0)
978 	 * Bit 1 = Load the shadow counters(1)
979 	 */
980 	WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002);
981 
982 	/* Read register values to get any >32bit overflow */
983 	tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK);
984 	cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
985 	cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
986 
987 	/* Get the values and add the overflow */
988 	*count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
989 	*count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
990 }
991 
992 static uint64_t vi_get_pcie_replay_count(struct amdgpu_device *adev)
993 {
994 	uint64_t nak_r, nak_g;
995 
996 	/* Get the number of NAKs received and generated */
997 	nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK);
998 	nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED);
999 
1000 	/* Add the total number of NAKs, i.e the number of replays */
1001 	return (nak_r + nak_g);
1002 }
1003 
1004 static bool vi_need_reset_on_init(struct amdgpu_device *adev)
1005 {
1006 	u32 clock_cntl, pc;
1007 
1008 	if (adev->flags & AMD_IS_APU)
1009 		return false;
1010 
1011 	/* check if the SMC is already running */
1012 	clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
1013 	pc = RREG32_SMC(ixSMC_PC_C);
1014 	if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) &&
1015 	    (0x20100 <= pc))
1016 		return true;
1017 
1018 	return false;
1019 }
1020 
1021 static const struct amdgpu_asic_funcs vi_asic_funcs =
1022 {
1023 	.read_disabled_bios = &vi_read_disabled_bios,
1024 	.read_bios_from_rom = &vi_read_bios_from_rom,
1025 	.read_register = &vi_read_register,
1026 	.reset = &vi_asic_reset,
1027 	.set_vga_state = &vi_vga_set_state,
1028 	.get_xclk = &vi_get_xclk,
1029 	.set_uvd_clocks = &vi_set_uvd_clocks,
1030 	.set_vce_clocks = &vi_set_vce_clocks,
1031 	.get_config_memsize = &vi_get_config_memsize,
1032 	.flush_hdp = &vi_flush_hdp,
1033 	.invalidate_hdp = &vi_invalidate_hdp,
1034 	.need_full_reset = &vi_need_full_reset,
1035 	.init_doorbell_index = &legacy_doorbell_index_init,
1036 	.get_pcie_usage = &vi_get_pcie_usage,
1037 	.need_reset_on_init = &vi_need_reset_on_init,
1038 	.get_pcie_replay_count = &vi_get_pcie_replay_count,
1039 };
1040 
1041 #define CZ_REV_BRISTOL(rev)	 \
1042 	((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6))
1043 
1044 static int vi_common_early_init(void *handle)
1045 {
1046 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1047 
1048 	if (adev->flags & AMD_IS_APU) {
1049 		adev->smc_rreg = &cz_smc_rreg;
1050 		adev->smc_wreg = &cz_smc_wreg;
1051 	} else {
1052 		adev->smc_rreg = &vi_smc_rreg;
1053 		adev->smc_wreg = &vi_smc_wreg;
1054 	}
1055 	adev->pcie_rreg = &vi_pcie_rreg;
1056 	adev->pcie_wreg = &vi_pcie_wreg;
1057 	adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
1058 	adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
1059 	adev->didt_rreg = &vi_didt_rreg;
1060 	adev->didt_wreg = &vi_didt_wreg;
1061 	adev->gc_cac_rreg = &vi_gc_cac_rreg;
1062 	adev->gc_cac_wreg = &vi_gc_cac_wreg;
1063 
1064 	adev->asic_funcs = &vi_asic_funcs;
1065 
1066 	adev->rev_id = vi_get_rev_id(adev);
1067 	adev->external_rev_id = 0xFF;
1068 	switch (adev->asic_type) {
1069 	case CHIP_TOPAZ:
1070 		adev->cg_flags = 0;
1071 		adev->pg_flags = 0;
1072 		adev->external_rev_id = 0x1;
1073 		break;
1074 	case CHIP_FIJI:
1075 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1076 			AMD_CG_SUPPORT_GFX_MGLS |
1077 			AMD_CG_SUPPORT_GFX_RLC_LS |
1078 			AMD_CG_SUPPORT_GFX_CP_LS |
1079 			AMD_CG_SUPPORT_GFX_CGTS |
1080 			AMD_CG_SUPPORT_GFX_CGTS_LS |
1081 			AMD_CG_SUPPORT_GFX_CGCG |
1082 			AMD_CG_SUPPORT_GFX_CGLS |
1083 			AMD_CG_SUPPORT_SDMA_MGCG |
1084 			AMD_CG_SUPPORT_SDMA_LS |
1085 			AMD_CG_SUPPORT_BIF_LS |
1086 			AMD_CG_SUPPORT_HDP_MGCG |
1087 			AMD_CG_SUPPORT_HDP_LS |
1088 			AMD_CG_SUPPORT_ROM_MGCG |
1089 			AMD_CG_SUPPORT_MC_MGCG |
1090 			AMD_CG_SUPPORT_MC_LS |
1091 			AMD_CG_SUPPORT_UVD_MGCG;
1092 		adev->pg_flags = 0;
1093 		adev->external_rev_id = adev->rev_id + 0x3c;
1094 		break;
1095 	case CHIP_TONGA:
1096 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1097 			AMD_CG_SUPPORT_GFX_CGCG |
1098 			AMD_CG_SUPPORT_GFX_CGLS |
1099 			AMD_CG_SUPPORT_SDMA_MGCG |
1100 			AMD_CG_SUPPORT_SDMA_LS |
1101 			AMD_CG_SUPPORT_BIF_LS |
1102 			AMD_CG_SUPPORT_HDP_MGCG |
1103 			AMD_CG_SUPPORT_HDP_LS |
1104 			AMD_CG_SUPPORT_ROM_MGCG |
1105 			AMD_CG_SUPPORT_MC_MGCG |
1106 			AMD_CG_SUPPORT_MC_LS |
1107 			AMD_CG_SUPPORT_DRM_LS |
1108 			AMD_CG_SUPPORT_UVD_MGCG;
1109 		adev->pg_flags = 0;
1110 		adev->external_rev_id = adev->rev_id + 0x14;
1111 		break;
1112 	case CHIP_POLARIS11:
1113 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1114 			AMD_CG_SUPPORT_GFX_RLC_LS |
1115 			AMD_CG_SUPPORT_GFX_CP_LS |
1116 			AMD_CG_SUPPORT_GFX_CGCG |
1117 			AMD_CG_SUPPORT_GFX_CGLS |
1118 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1119 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1120 			AMD_CG_SUPPORT_SDMA_MGCG |
1121 			AMD_CG_SUPPORT_SDMA_LS |
1122 			AMD_CG_SUPPORT_BIF_MGCG |
1123 			AMD_CG_SUPPORT_BIF_LS |
1124 			AMD_CG_SUPPORT_HDP_MGCG |
1125 			AMD_CG_SUPPORT_HDP_LS |
1126 			AMD_CG_SUPPORT_ROM_MGCG |
1127 			AMD_CG_SUPPORT_MC_MGCG |
1128 			AMD_CG_SUPPORT_MC_LS |
1129 			AMD_CG_SUPPORT_DRM_LS |
1130 			AMD_CG_SUPPORT_UVD_MGCG |
1131 			AMD_CG_SUPPORT_VCE_MGCG;
1132 		adev->pg_flags = 0;
1133 		adev->external_rev_id = adev->rev_id + 0x5A;
1134 		break;
1135 	case CHIP_POLARIS10:
1136 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1137 			AMD_CG_SUPPORT_GFX_RLC_LS |
1138 			AMD_CG_SUPPORT_GFX_CP_LS |
1139 			AMD_CG_SUPPORT_GFX_CGCG |
1140 			AMD_CG_SUPPORT_GFX_CGLS |
1141 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1142 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1143 			AMD_CG_SUPPORT_SDMA_MGCG |
1144 			AMD_CG_SUPPORT_SDMA_LS |
1145 			AMD_CG_SUPPORT_BIF_MGCG |
1146 			AMD_CG_SUPPORT_BIF_LS |
1147 			AMD_CG_SUPPORT_HDP_MGCG |
1148 			AMD_CG_SUPPORT_HDP_LS |
1149 			AMD_CG_SUPPORT_ROM_MGCG |
1150 			AMD_CG_SUPPORT_MC_MGCG |
1151 			AMD_CG_SUPPORT_MC_LS |
1152 			AMD_CG_SUPPORT_DRM_LS |
1153 			AMD_CG_SUPPORT_UVD_MGCG |
1154 			AMD_CG_SUPPORT_VCE_MGCG;
1155 		adev->pg_flags = 0;
1156 		adev->external_rev_id = adev->rev_id + 0x50;
1157 		break;
1158 	case CHIP_POLARIS12:
1159 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1160 			AMD_CG_SUPPORT_GFX_RLC_LS |
1161 			AMD_CG_SUPPORT_GFX_CP_LS |
1162 			AMD_CG_SUPPORT_GFX_CGCG |
1163 			AMD_CG_SUPPORT_GFX_CGLS |
1164 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1165 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1166 			AMD_CG_SUPPORT_SDMA_MGCG |
1167 			AMD_CG_SUPPORT_SDMA_LS |
1168 			AMD_CG_SUPPORT_BIF_MGCG |
1169 			AMD_CG_SUPPORT_BIF_LS |
1170 			AMD_CG_SUPPORT_HDP_MGCG |
1171 			AMD_CG_SUPPORT_HDP_LS |
1172 			AMD_CG_SUPPORT_ROM_MGCG |
1173 			AMD_CG_SUPPORT_MC_MGCG |
1174 			AMD_CG_SUPPORT_MC_LS |
1175 			AMD_CG_SUPPORT_DRM_LS |
1176 			AMD_CG_SUPPORT_UVD_MGCG |
1177 			AMD_CG_SUPPORT_VCE_MGCG;
1178 		adev->pg_flags = 0;
1179 		adev->external_rev_id = adev->rev_id + 0x64;
1180 		break;
1181 	case CHIP_VEGAM:
1182 		adev->cg_flags = 0;
1183 			/*AMD_CG_SUPPORT_GFX_MGCG |
1184 			AMD_CG_SUPPORT_GFX_RLC_LS |
1185 			AMD_CG_SUPPORT_GFX_CP_LS |
1186 			AMD_CG_SUPPORT_GFX_CGCG |
1187 			AMD_CG_SUPPORT_GFX_CGLS |
1188 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1189 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1190 			AMD_CG_SUPPORT_SDMA_MGCG |
1191 			AMD_CG_SUPPORT_SDMA_LS |
1192 			AMD_CG_SUPPORT_BIF_MGCG |
1193 			AMD_CG_SUPPORT_BIF_LS |
1194 			AMD_CG_SUPPORT_HDP_MGCG |
1195 			AMD_CG_SUPPORT_HDP_LS |
1196 			AMD_CG_SUPPORT_ROM_MGCG |
1197 			AMD_CG_SUPPORT_MC_MGCG |
1198 			AMD_CG_SUPPORT_MC_LS |
1199 			AMD_CG_SUPPORT_DRM_LS |
1200 			AMD_CG_SUPPORT_UVD_MGCG |
1201 			AMD_CG_SUPPORT_VCE_MGCG;*/
1202 		adev->pg_flags = 0;
1203 		adev->external_rev_id = adev->rev_id + 0x6E;
1204 		break;
1205 	case CHIP_CARRIZO:
1206 		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1207 			AMD_CG_SUPPORT_GFX_MGCG |
1208 			AMD_CG_SUPPORT_GFX_MGLS |
1209 			AMD_CG_SUPPORT_GFX_RLC_LS |
1210 			AMD_CG_SUPPORT_GFX_CP_LS |
1211 			AMD_CG_SUPPORT_GFX_CGTS |
1212 			AMD_CG_SUPPORT_GFX_CGTS_LS |
1213 			AMD_CG_SUPPORT_GFX_CGCG |
1214 			AMD_CG_SUPPORT_GFX_CGLS |
1215 			AMD_CG_SUPPORT_BIF_LS |
1216 			AMD_CG_SUPPORT_HDP_MGCG |
1217 			AMD_CG_SUPPORT_HDP_LS |
1218 			AMD_CG_SUPPORT_SDMA_MGCG |
1219 			AMD_CG_SUPPORT_SDMA_LS |
1220 			AMD_CG_SUPPORT_VCE_MGCG;
1221 		/* rev0 hardware requires workarounds to support PG */
1222 		adev->pg_flags = 0;
1223 		if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) {
1224 			adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG |
1225 				AMD_PG_SUPPORT_GFX_PIPELINE |
1226 				AMD_PG_SUPPORT_CP |
1227 				AMD_PG_SUPPORT_UVD |
1228 				AMD_PG_SUPPORT_VCE;
1229 		}
1230 		adev->external_rev_id = adev->rev_id + 0x1;
1231 		break;
1232 	case CHIP_STONEY:
1233 		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1234 			AMD_CG_SUPPORT_GFX_MGCG |
1235 			AMD_CG_SUPPORT_GFX_MGLS |
1236 			AMD_CG_SUPPORT_GFX_RLC_LS |
1237 			AMD_CG_SUPPORT_GFX_CP_LS |
1238 			AMD_CG_SUPPORT_GFX_CGTS |
1239 			AMD_CG_SUPPORT_GFX_CGTS_LS |
1240 			AMD_CG_SUPPORT_GFX_CGLS |
1241 			AMD_CG_SUPPORT_BIF_LS |
1242 			AMD_CG_SUPPORT_HDP_MGCG |
1243 			AMD_CG_SUPPORT_HDP_LS |
1244 			AMD_CG_SUPPORT_SDMA_MGCG |
1245 			AMD_CG_SUPPORT_SDMA_LS |
1246 			AMD_CG_SUPPORT_VCE_MGCG;
1247 		adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
1248 			AMD_PG_SUPPORT_GFX_SMG |
1249 			AMD_PG_SUPPORT_GFX_PIPELINE |
1250 			AMD_PG_SUPPORT_CP |
1251 			AMD_PG_SUPPORT_UVD |
1252 			AMD_PG_SUPPORT_VCE;
1253 		adev->external_rev_id = adev->rev_id + 0x61;
1254 		break;
1255 	default:
1256 		/* FIXME: not supported yet */
1257 		return -EINVAL;
1258 	}
1259 
1260 	if (amdgpu_sriov_vf(adev)) {
1261 		amdgpu_virt_init_setting(adev);
1262 		xgpu_vi_mailbox_set_irq_funcs(adev);
1263 	}
1264 
1265 	return 0;
1266 }
1267 
1268 static int vi_common_late_init(void *handle)
1269 {
1270 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1271 
1272 	if (amdgpu_sriov_vf(adev))
1273 		xgpu_vi_mailbox_get_irq(adev);
1274 
1275 	return 0;
1276 }
1277 
1278 static int vi_common_sw_init(void *handle)
1279 {
1280 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1281 
1282 	if (amdgpu_sriov_vf(adev))
1283 		xgpu_vi_mailbox_add_irq_id(adev);
1284 
1285 	return 0;
1286 }
1287 
1288 static int vi_common_sw_fini(void *handle)
1289 {
1290 	return 0;
1291 }
1292 
1293 static int vi_common_hw_init(void *handle)
1294 {
1295 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1296 
1297 	/* move the golden regs per IP block */
1298 	vi_init_golden_registers(adev);
1299 	/* enable pcie gen2/3 link */
1300 	vi_pcie_gen3_enable(adev);
1301 	/* enable aspm */
1302 	vi_program_aspm(adev);
1303 	/* enable the doorbell aperture */
1304 	vi_enable_doorbell_aperture(adev, true);
1305 
1306 	return 0;
1307 }
1308 
1309 static int vi_common_hw_fini(void *handle)
1310 {
1311 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1312 
1313 	/* enable the doorbell aperture */
1314 	vi_enable_doorbell_aperture(adev, false);
1315 
1316 	if (amdgpu_sriov_vf(adev))
1317 		xgpu_vi_mailbox_put_irq(adev);
1318 
1319 	return 0;
1320 }
1321 
1322 static int vi_common_suspend(void *handle)
1323 {
1324 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1325 
1326 	return vi_common_hw_fini(adev);
1327 }
1328 
1329 static int vi_common_resume(void *handle)
1330 {
1331 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1332 
1333 	return vi_common_hw_init(adev);
1334 }
1335 
1336 static bool vi_common_is_idle(void *handle)
1337 {
1338 	return true;
1339 }
1340 
1341 static int vi_common_wait_for_idle(void *handle)
1342 {
1343 	return 0;
1344 }
1345 
1346 static int vi_common_soft_reset(void *handle)
1347 {
1348 	return 0;
1349 }
1350 
1351 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
1352 						   bool enable)
1353 {
1354 	uint32_t temp, data;
1355 
1356 	temp = data = RREG32_PCIE(ixPCIE_CNTL2);
1357 
1358 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
1359 		data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1360 				PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1361 				PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
1362 	else
1363 		data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1364 				PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1365 				PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
1366 
1367 	if (temp != data)
1368 		WREG32_PCIE(ixPCIE_CNTL2, data);
1369 }
1370 
1371 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
1372 						    bool enable)
1373 {
1374 	uint32_t temp, data;
1375 
1376 	temp = data = RREG32(mmHDP_HOST_PATH_CNTL);
1377 
1378 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
1379 		data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1380 	else
1381 		data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1382 
1383 	if (temp != data)
1384 		WREG32(mmHDP_HOST_PATH_CNTL, data);
1385 }
1386 
1387 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev,
1388 				      bool enable)
1389 {
1390 	uint32_t temp, data;
1391 
1392 	temp = data = RREG32(mmHDP_MEM_POWER_LS);
1393 
1394 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1395 		data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1396 	else
1397 		data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1398 
1399 	if (temp != data)
1400 		WREG32(mmHDP_MEM_POWER_LS, data);
1401 }
1402 
1403 static void vi_update_drm_light_sleep(struct amdgpu_device *adev,
1404 				      bool enable)
1405 {
1406 	uint32_t temp, data;
1407 
1408 	temp = data = RREG32(0x157a);
1409 
1410 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1411 		data |= 1;
1412 	else
1413 		data &= ~1;
1414 
1415 	if (temp != data)
1416 		WREG32(0x157a, data);
1417 }
1418 
1419 
1420 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1421 						    bool enable)
1422 {
1423 	uint32_t temp, data;
1424 
1425 	temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1426 
1427 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1428 		data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1429 				CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1430 	else
1431 		data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1432 				CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1433 
1434 	if (temp != data)
1435 		WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
1436 }
1437 
1438 static int vi_common_set_clockgating_state_by_smu(void *handle,
1439 					   enum amd_clockgating_state state)
1440 {
1441 	uint32_t msg_id, pp_state = 0;
1442 	uint32_t pp_support_state = 0;
1443 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1444 
1445 	if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
1446 		if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
1447 			pp_support_state = PP_STATE_SUPPORT_LS;
1448 			pp_state = PP_STATE_LS;
1449 		}
1450 		if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
1451 			pp_support_state |= PP_STATE_SUPPORT_CG;
1452 			pp_state |= PP_STATE_CG;
1453 		}
1454 		if (state == AMD_CG_STATE_UNGATE)
1455 			pp_state = 0;
1456 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1457 			       PP_BLOCK_SYS_MC,
1458 			       pp_support_state,
1459 			       pp_state);
1460 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1461 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1462 	}
1463 
1464 	if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
1465 		if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
1466 			pp_support_state = PP_STATE_SUPPORT_LS;
1467 			pp_state = PP_STATE_LS;
1468 		}
1469 		if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
1470 			pp_support_state |= PP_STATE_SUPPORT_CG;
1471 			pp_state |= PP_STATE_CG;
1472 		}
1473 		if (state == AMD_CG_STATE_UNGATE)
1474 			pp_state = 0;
1475 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1476 			       PP_BLOCK_SYS_SDMA,
1477 			       pp_support_state,
1478 			       pp_state);
1479 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1480 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1481 	}
1482 
1483 	if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
1484 		if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
1485 			pp_support_state = PP_STATE_SUPPORT_LS;
1486 			pp_state = PP_STATE_LS;
1487 		}
1488 		if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
1489 			pp_support_state |= PP_STATE_SUPPORT_CG;
1490 			pp_state |= PP_STATE_CG;
1491 		}
1492 		if (state == AMD_CG_STATE_UNGATE)
1493 			pp_state = 0;
1494 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1495 			       PP_BLOCK_SYS_HDP,
1496 			       pp_support_state,
1497 			       pp_state);
1498 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1499 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1500 	}
1501 
1502 
1503 	if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) {
1504 		if (state == AMD_CG_STATE_UNGATE)
1505 			pp_state = 0;
1506 		else
1507 			pp_state = PP_STATE_LS;
1508 
1509 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1510 			       PP_BLOCK_SYS_BIF,
1511 			       PP_STATE_SUPPORT_LS,
1512 			        pp_state);
1513 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1514 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1515 	}
1516 	if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
1517 		if (state == AMD_CG_STATE_UNGATE)
1518 			pp_state = 0;
1519 		else
1520 			pp_state = PP_STATE_CG;
1521 
1522 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1523 			       PP_BLOCK_SYS_BIF,
1524 			       PP_STATE_SUPPORT_CG,
1525 			       pp_state);
1526 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1527 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1528 	}
1529 
1530 	if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
1531 
1532 		if (state == AMD_CG_STATE_UNGATE)
1533 			pp_state = 0;
1534 		else
1535 			pp_state = PP_STATE_LS;
1536 
1537 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1538 			       PP_BLOCK_SYS_DRM,
1539 			       PP_STATE_SUPPORT_LS,
1540 			       pp_state);
1541 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1542 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1543 	}
1544 
1545 	if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
1546 
1547 		if (state == AMD_CG_STATE_UNGATE)
1548 			pp_state = 0;
1549 		else
1550 			pp_state = PP_STATE_CG;
1551 
1552 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1553 			       PP_BLOCK_SYS_ROM,
1554 			       PP_STATE_SUPPORT_CG,
1555 			       pp_state);
1556 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1557 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1558 	}
1559 	return 0;
1560 }
1561 
1562 static int vi_common_set_clockgating_state(void *handle,
1563 					   enum amd_clockgating_state state)
1564 {
1565 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1566 
1567 	if (amdgpu_sriov_vf(adev))
1568 		return 0;
1569 
1570 	switch (adev->asic_type) {
1571 	case CHIP_FIJI:
1572 		vi_update_bif_medium_grain_light_sleep(adev,
1573 				state == AMD_CG_STATE_GATE);
1574 		vi_update_hdp_medium_grain_clock_gating(adev,
1575 				state == AMD_CG_STATE_GATE);
1576 		vi_update_hdp_light_sleep(adev,
1577 				state == AMD_CG_STATE_GATE);
1578 		vi_update_rom_medium_grain_clock_gating(adev,
1579 				state == AMD_CG_STATE_GATE);
1580 		break;
1581 	case CHIP_CARRIZO:
1582 	case CHIP_STONEY:
1583 		vi_update_bif_medium_grain_light_sleep(adev,
1584 				state == AMD_CG_STATE_GATE);
1585 		vi_update_hdp_medium_grain_clock_gating(adev,
1586 				state == AMD_CG_STATE_GATE);
1587 		vi_update_hdp_light_sleep(adev,
1588 				state == AMD_CG_STATE_GATE);
1589 		vi_update_drm_light_sleep(adev,
1590 				state == AMD_CG_STATE_GATE);
1591 		break;
1592 	case CHIP_TONGA:
1593 	case CHIP_POLARIS10:
1594 	case CHIP_POLARIS11:
1595 	case CHIP_POLARIS12:
1596 	case CHIP_VEGAM:
1597 		vi_common_set_clockgating_state_by_smu(adev, state);
1598 	default:
1599 		break;
1600 	}
1601 	return 0;
1602 }
1603 
1604 static int vi_common_set_powergating_state(void *handle,
1605 					    enum amd_powergating_state state)
1606 {
1607 	return 0;
1608 }
1609 
1610 static void vi_common_get_clockgating_state(void *handle, u32 *flags)
1611 {
1612 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1613 	int data;
1614 
1615 	if (amdgpu_sriov_vf(adev))
1616 		*flags = 0;
1617 
1618 	/* AMD_CG_SUPPORT_BIF_LS */
1619 	data = RREG32_PCIE(ixPCIE_CNTL2);
1620 	if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
1621 		*flags |= AMD_CG_SUPPORT_BIF_LS;
1622 
1623 	/* AMD_CG_SUPPORT_HDP_LS */
1624 	data = RREG32(mmHDP_MEM_POWER_LS);
1625 	if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
1626 		*flags |= AMD_CG_SUPPORT_HDP_LS;
1627 
1628 	/* AMD_CG_SUPPORT_HDP_MGCG */
1629 	data = RREG32(mmHDP_HOST_PATH_CNTL);
1630 	if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK))
1631 		*flags |= AMD_CG_SUPPORT_HDP_MGCG;
1632 
1633 	/* AMD_CG_SUPPORT_ROM_MGCG */
1634 	data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1635 	if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
1636 		*flags |= AMD_CG_SUPPORT_ROM_MGCG;
1637 }
1638 
1639 static const struct amd_ip_funcs vi_common_ip_funcs = {
1640 	.name = "vi_common",
1641 	.early_init = vi_common_early_init,
1642 	.late_init = vi_common_late_init,
1643 	.sw_init = vi_common_sw_init,
1644 	.sw_fini = vi_common_sw_fini,
1645 	.hw_init = vi_common_hw_init,
1646 	.hw_fini = vi_common_hw_fini,
1647 	.suspend = vi_common_suspend,
1648 	.resume = vi_common_resume,
1649 	.is_idle = vi_common_is_idle,
1650 	.wait_for_idle = vi_common_wait_for_idle,
1651 	.soft_reset = vi_common_soft_reset,
1652 	.set_clockgating_state = vi_common_set_clockgating_state,
1653 	.set_powergating_state = vi_common_set_powergating_state,
1654 	.get_clockgating_state = vi_common_get_clockgating_state,
1655 };
1656 
1657 static const struct amdgpu_ip_block_version vi_common_ip_block =
1658 {
1659 	.type = AMD_IP_BLOCK_TYPE_COMMON,
1660 	.major = 1,
1661 	.minor = 0,
1662 	.rev = 0,
1663 	.funcs = &vi_common_ip_funcs,
1664 };
1665 
1666 int vi_set_ip_blocks(struct amdgpu_device *adev)
1667 {
1668 	/* in early init stage, vbios code won't work */
1669 	vi_detect_hw_virtualization(adev);
1670 
1671 	if (amdgpu_sriov_vf(adev))
1672 		adev->virt.ops = &xgpu_vi_virt_ops;
1673 
1674 	switch (adev->asic_type) {
1675 	case CHIP_TOPAZ:
1676 		/* topaz has no DCE, UVD, VCE */
1677 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1678 		amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block);
1679 		amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block);
1680 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1681 		amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block);
1682 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1683 		if (adev->enable_virtual_display)
1684 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1685 		break;
1686 	case CHIP_FIJI:
1687 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1688 		amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block);
1689 		amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1690 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1691 		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1692 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1693 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1694 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1695 #if defined(CONFIG_DRM_AMD_DC)
1696 		else if (amdgpu_device_has_dc_support(adev))
1697 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1698 #endif
1699 		else
1700 			amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block);
1701 		if (!amdgpu_sriov_vf(adev)) {
1702 			amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
1703 			amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
1704 		}
1705 		break;
1706 	case CHIP_TONGA:
1707 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1708 		amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1709 		amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1710 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1711 		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1712 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1713 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1714 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1715 #if defined(CONFIG_DRM_AMD_DC)
1716 		else if (amdgpu_device_has_dc_support(adev))
1717 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1718 #endif
1719 		else
1720 			amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block);
1721 		if (!amdgpu_sriov_vf(adev)) {
1722 			amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block);
1723 			amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
1724 		}
1725 		break;
1726 	case CHIP_POLARIS10:
1727 	case CHIP_POLARIS11:
1728 	case CHIP_POLARIS12:
1729 	case CHIP_VEGAM:
1730 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1731 		amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
1732 		amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1733 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1734 		amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block);
1735 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1736 		if (adev->enable_virtual_display)
1737 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1738 #if defined(CONFIG_DRM_AMD_DC)
1739 		else if (amdgpu_device_has_dc_support(adev))
1740 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1741 #endif
1742 		else
1743 			amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block);
1744 		amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block);
1745 		amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
1746 		break;
1747 	case CHIP_CARRIZO:
1748 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1749 		amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1750 		amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
1751 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1752 		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1753 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1754 		if (adev->enable_virtual_display)
1755 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1756 #if defined(CONFIG_DRM_AMD_DC)
1757 		else if (amdgpu_device_has_dc_support(adev))
1758 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1759 #endif
1760 		else
1761 			amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
1762 		amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
1763 		amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block);
1764 #if defined(CONFIG_DRM_AMD_ACP)
1765 		amdgpu_device_ip_block_add(adev, &acp_ip_block);
1766 #endif
1767 		break;
1768 	case CHIP_STONEY:
1769 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1770 		amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1771 		amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
1772 		amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block);
1773 		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1774 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1775 		if (adev->enable_virtual_display)
1776 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1777 #if defined(CONFIG_DRM_AMD_DC)
1778 		else if (amdgpu_device_has_dc_support(adev))
1779 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1780 #endif
1781 		else
1782 			amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
1783 		amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block);
1784 		amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
1785 #if defined(CONFIG_DRM_AMD_ACP)
1786 		amdgpu_device_ip_block_add(adev, &acp_ip_block);
1787 #endif
1788 		break;
1789 	default:
1790 		/* FIXME: not supported yet */
1791 		return -EINVAL;
1792 	}
1793 
1794 	return 0;
1795 }
1796 
1797 void legacy_doorbell_index_init(struct amdgpu_device *adev)
1798 {
1799 	adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ;
1800 	adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0;
1801 	adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1;
1802 	adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2;
1803 	adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3;
1804 	adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4;
1805 	adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5;
1806 	adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6;
1807 	adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7;
1808 	adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0;
1809 	adev->doorbell_index.sdma_engine[0] = AMDGPU_DOORBELL_sDMA_ENGINE0;
1810 	adev->doorbell_index.sdma_engine[1] = AMDGPU_DOORBELL_sDMA_ENGINE1;
1811 	adev->doorbell_index.ih = AMDGPU_DOORBELL_IH;
1812 	adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT;
1813 }
1814