xref: /linux/drivers/gpu/drm/amd/amdgpu/vi.c (revision 0ade34c37012ea5c516d9aa4d19a56e9f40a55ed)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/slab.h>
24 #include <drm/drmP.h>
25 #include "amdgpu.h"
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_ih.h"
28 #include "amdgpu_uvd.h"
29 #include "amdgpu_vce.h"
30 #include "amdgpu_ucode.h"
31 #include "atom.h"
32 #include "amd_pcie.h"
33 
34 #include "gmc/gmc_8_1_d.h"
35 #include "gmc/gmc_8_1_sh_mask.h"
36 
37 #include "oss/oss_3_0_d.h"
38 #include "oss/oss_3_0_sh_mask.h"
39 
40 #include "bif/bif_5_0_d.h"
41 #include "bif/bif_5_0_sh_mask.h"
42 
43 #include "gca/gfx_8_0_d.h"
44 #include "gca/gfx_8_0_sh_mask.h"
45 
46 #include "smu/smu_7_1_1_d.h"
47 #include "smu/smu_7_1_1_sh_mask.h"
48 
49 #include "uvd/uvd_5_0_d.h"
50 #include "uvd/uvd_5_0_sh_mask.h"
51 
52 #include "vce/vce_3_0_d.h"
53 #include "vce/vce_3_0_sh_mask.h"
54 
55 #include "dce/dce_10_0_d.h"
56 #include "dce/dce_10_0_sh_mask.h"
57 
58 #include "vid.h"
59 #include "vi.h"
60 #include "vi_dpm.h"
61 #include "gmc_v8_0.h"
62 #include "gmc_v7_0.h"
63 #include "gfx_v8_0.h"
64 #include "sdma_v2_4.h"
65 #include "sdma_v3_0.h"
66 #include "dce_v10_0.h"
67 #include "dce_v11_0.h"
68 #include "iceland_ih.h"
69 #include "tonga_ih.h"
70 #include "cz_ih.h"
71 #include "uvd_v5_0.h"
72 #include "uvd_v6_0.h"
73 #include "vce_v3_0.h"
74 #include "amdgpu_powerplay.h"
75 #if defined(CONFIG_DRM_AMD_ACP)
76 #include "amdgpu_acp.h"
77 #endif
78 #include "dce_virtual.h"
79 #include "mxgpu_vi.h"
80 #include "amdgpu_dm.h"
81 
82 /*
83  * Indirect registers accessor
84  */
85 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
86 {
87 	unsigned long flags;
88 	u32 r;
89 
90 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
91 	WREG32(mmPCIE_INDEX, reg);
92 	(void)RREG32(mmPCIE_INDEX);
93 	r = RREG32(mmPCIE_DATA);
94 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
95 	return r;
96 }
97 
98 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
99 {
100 	unsigned long flags;
101 
102 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
103 	WREG32(mmPCIE_INDEX, reg);
104 	(void)RREG32(mmPCIE_INDEX);
105 	WREG32(mmPCIE_DATA, v);
106 	(void)RREG32(mmPCIE_DATA);
107 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
108 }
109 
110 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
111 {
112 	unsigned long flags;
113 	u32 r;
114 
115 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
116 	WREG32(mmSMC_IND_INDEX_11, (reg));
117 	r = RREG32(mmSMC_IND_DATA_11);
118 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
119 	return r;
120 }
121 
122 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
123 {
124 	unsigned long flags;
125 
126 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
127 	WREG32(mmSMC_IND_INDEX_11, (reg));
128 	WREG32(mmSMC_IND_DATA_11, (v));
129 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
130 }
131 
132 /* smu_8_0_d.h */
133 #define mmMP0PUB_IND_INDEX                                                      0x180
134 #define mmMP0PUB_IND_DATA                                                       0x181
135 
136 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
137 {
138 	unsigned long flags;
139 	u32 r;
140 
141 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
142 	WREG32(mmMP0PUB_IND_INDEX, (reg));
143 	r = RREG32(mmMP0PUB_IND_DATA);
144 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
145 	return r;
146 }
147 
148 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
149 {
150 	unsigned long flags;
151 
152 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
153 	WREG32(mmMP0PUB_IND_INDEX, (reg));
154 	WREG32(mmMP0PUB_IND_DATA, (v));
155 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
156 }
157 
158 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
159 {
160 	unsigned long flags;
161 	u32 r;
162 
163 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
164 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
165 	r = RREG32(mmUVD_CTX_DATA);
166 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
167 	return r;
168 }
169 
170 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
171 {
172 	unsigned long flags;
173 
174 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
175 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
176 	WREG32(mmUVD_CTX_DATA, (v));
177 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
178 }
179 
180 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
181 {
182 	unsigned long flags;
183 	u32 r;
184 
185 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
186 	WREG32(mmDIDT_IND_INDEX, (reg));
187 	r = RREG32(mmDIDT_IND_DATA);
188 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
189 	return r;
190 }
191 
192 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
193 {
194 	unsigned long flags;
195 
196 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
197 	WREG32(mmDIDT_IND_INDEX, (reg));
198 	WREG32(mmDIDT_IND_DATA, (v));
199 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
200 }
201 
202 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
203 {
204 	unsigned long flags;
205 	u32 r;
206 
207 	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
208 	WREG32(mmGC_CAC_IND_INDEX, (reg));
209 	r = RREG32(mmGC_CAC_IND_DATA);
210 	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
211 	return r;
212 }
213 
214 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
215 {
216 	unsigned long flags;
217 
218 	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
219 	WREG32(mmGC_CAC_IND_INDEX, (reg));
220 	WREG32(mmGC_CAC_IND_DATA, (v));
221 	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
222 }
223 
224 
225 static const u32 tonga_mgcg_cgcg_init[] =
226 {
227 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
228 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
229 	mmPCIE_DATA, 0x000f0000, 0x00000000,
230 	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
231 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
232 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
233 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
234 };
235 
236 static const u32 fiji_mgcg_cgcg_init[] =
237 {
238 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
239 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
240 	mmPCIE_DATA, 0x000f0000, 0x00000000,
241 	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
242 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
243 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
244 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
245 };
246 
247 static const u32 iceland_mgcg_cgcg_init[] =
248 {
249 	mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
250 	mmPCIE_DATA, 0x000f0000, 0x00000000,
251 	mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
252 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
253 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
254 };
255 
256 static const u32 cz_mgcg_cgcg_init[] =
257 {
258 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
259 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
260 	mmPCIE_DATA, 0x000f0000, 0x00000000,
261 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
262 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
263 };
264 
265 static const u32 stoney_mgcg_cgcg_init[] =
266 {
267 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100,
268 	mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104,
269 	mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027,
270 };
271 
272 static void vi_init_golden_registers(struct amdgpu_device *adev)
273 {
274 	/* Some of the registers might be dependent on GRBM_GFX_INDEX */
275 	mutex_lock(&adev->grbm_idx_mutex);
276 
277 	if (amdgpu_sriov_vf(adev)) {
278 		xgpu_vi_init_golden_registers(adev);
279 		mutex_unlock(&adev->grbm_idx_mutex);
280 		return;
281 	}
282 
283 	switch (adev->asic_type) {
284 	case CHIP_TOPAZ:
285 		amdgpu_device_program_register_sequence(adev,
286 							iceland_mgcg_cgcg_init,
287 							ARRAY_SIZE(iceland_mgcg_cgcg_init));
288 		break;
289 	case CHIP_FIJI:
290 		amdgpu_device_program_register_sequence(adev,
291 							fiji_mgcg_cgcg_init,
292 							ARRAY_SIZE(fiji_mgcg_cgcg_init));
293 		break;
294 	case CHIP_TONGA:
295 		amdgpu_device_program_register_sequence(adev,
296 							tonga_mgcg_cgcg_init,
297 							ARRAY_SIZE(tonga_mgcg_cgcg_init));
298 		break;
299 	case CHIP_CARRIZO:
300 		amdgpu_device_program_register_sequence(adev,
301 							cz_mgcg_cgcg_init,
302 							ARRAY_SIZE(cz_mgcg_cgcg_init));
303 		break;
304 	case CHIP_STONEY:
305 		amdgpu_device_program_register_sequence(adev,
306 							stoney_mgcg_cgcg_init,
307 							ARRAY_SIZE(stoney_mgcg_cgcg_init));
308 		break;
309 	case CHIP_POLARIS11:
310 	case CHIP_POLARIS10:
311 	case CHIP_POLARIS12:
312 	default:
313 		break;
314 	}
315 	mutex_unlock(&adev->grbm_idx_mutex);
316 }
317 
318 /**
319  * vi_get_xclk - get the xclk
320  *
321  * @adev: amdgpu_device pointer
322  *
323  * Returns the reference clock used by the gfx engine
324  * (VI).
325  */
326 static u32 vi_get_xclk(struct amdgpu_device *adev)
327 {
328 	u32 reference_clock = adev->clock.spll.reference_freq;
329 	u32 tmp;
330 
331 	if (adev->flags & AMD_IS_APU)
332 		return reference_clock;
333 
334 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
335 	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
336 		return 1000;
337 
338 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
339 	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
340 		return reference_clock / 4;
341 
342 	return reference_clock;
343 }
344 
345 /**
346  * vi_srbm_select - select specific register instances
347  *
348  * @adev: amdgpu_device pointer
349  * @me: selected ME (micro engine)
350  * @pipe: pipe
351  * @queue: queue
352  * @vmid: VMID
353  *
354  * Switches the currently active registers instances.  Some
355  * registers are instanced per VMID, others are instanced per
356  * me/pipe/queue combination.
357  */
358 void vi_srbm_select(struct amdgpu_device *adev,
359 		     u32 me, u32 pipe, u32 queue, u32 vmid)
360 {
361 	u32 srbm_gfx_cntl = 0;
362 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
363 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
364 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
365 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
366 	WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
367 }
368 
369 static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
370 {
371 	/* todo */
372 }
373 
374 static bool vi_read_disabled_bios(struct amdgpu_device *adev)
375 {
376 	u32 bus_cntl;
377 	u32 d1vga_control = 0;
378 	u32 d2vga_control = 0;
379 	u32 vga_render_control = 0;
380 	u32 rom_cntl;
381 	bool r;
382 
383 	bus_cntl = RREG32(mmBUS_CNTL);
384 	if (adev->mode_info.num_crtc) {
385 		d1vga_control = RREG32(mmD1VGA_CONTROL);
386 		d2vga_control = RREG32(mmD2VGA_CONTROL);
387 		vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
388 	}
389 	rom_cntl = RREG32_SMC(ixROM_CNTL);
390 
391 	/* enable the rom */
392 	WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
393 	if (adev->mode_info.num_crtc) {
394 		/* Disable VGA mode */
395 		WREG32(mmD1VGA_CONTROL,
396 		       (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
397 					  D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
398 		WREG32(mmD2VGA_CONTROL,
399 		       (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
400 					  D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
401 		WREG32(mmVGA_RENDER_CONTROL,
402 		       (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
403 	}
404 	WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
405 
406 	r = amdgpu_read_bios(adev);
407 
408 	/* restore regs */
409 	WREG32(mmBUS_CNTL, bus_cntl);
410 	if (adev->mode_info.num_crtc) {
411 		WREG32(mmD1VGA_CONTROL, d1vga_control);
412 		WREG32(mmD2VGA_CONTROL, d2vga_control);
413 		WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
414 	}
415 	WREG32_SMC(ixROM_CNTL, rom_cntl);
416 	return r;
417 }
418 
419 static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
420 				  u8 *bios, u32 length_bytes)
421 {
422 	u32 *dw_ptr;
423 	unsigned long flags;
424 	u32 i, length_dw;
425 
426 	if (bios == NULL)
427 		return false;
428 	if (length_bytes == 0)
429 		return false;
430 	/* APU vbios image is part of sbios image */
431 	if (adev->flags & AMD_IS_APU)
432 		return false;
433 
434 	dw_ptr = (u32 *)bios;
435 	length_dw = ALIGN(length_bytes, 4) / 4;
436 	/* take the smc lock since we are using the smc index */
437 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
438 	/* set rom index to 0 */
439 	WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX);
440 	WREG32(mmSMC_IND_DATA_11, 0);
441 	/* set index to data for continous read */
442 	WREG32(mmSMC_IND_INDEX_11, ixROM_DATA);
443 	for (i = 0; i < length_dw; i++)
444 		dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);
445 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
446 
447 	return true;
448 }
449 
450 static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
451 {
452 	uint32_t reg = 0;
453 
454 	if (adev->asic_type == CHIP_TONGA ||
455 	    adev->asic_type == CHIP_FIJI) {
456 	       reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
457 	       /* bit0: 0 means pf and 1 means vf */
458 	       if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
459 		       adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
460 	       /* bit31: 0 means disable IOV and 1 means enable */
461 	       if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
462 		       adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
463 	}
464 
465 	if (reg == 0) {
466 		if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
467 			adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
468 	}
469 }
470 
471 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
472 	{mmGRBM_STATUS},
473 	{mmGRBM_STATUS2},
474 	{mmGRBM_STATUS_SE0},
475 	{mmGRBM_STATUS_SE1},
476 	{mmGRBM_STATUS_SE2},
477 	{mmGRBM_STATUS_SE3},
478 	{mmSRBM_STATUS},
479 	{mmSRBM_STATUS2},
480 	{mmSRBM_STATUS3},
481 	{mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET},
482 	{mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET},
483 	{mmCP_STAT},
484 	{mmCP_STALLED_STAT1},
485 	{mmCP_STALLED_STAT2},
486 	{mmCP_STALLED_STAT3},
487 	{mmCP_CPF_BUSY_STAT},
488 	{mmCP_CPF_STALLED_STAT1},
489 	{mmCP_CPF_STATUS},
490 	{mmCP_CPC_BUSY_STAT},
491 	{mmCP_CPC_STALLED_STAT1},
492 	{mmCP_CPC_STATUS},
493 	{mmGB_ADDR_CONFIG},
494 	{mmMC_ARB_RAMCFG},
495 	{mmGB_TILE_MODE0},
496 	{mmGB_TILE_MODE1},
497 	{mmGB_TILE_MODE2},
498 	{mmGB_TILE_MODE3},
499 	{mmGB_TILE_MODE4},
500 	{mmGB_TILE_MODE5},
501 	{mmGB_TILE_MODE6},
502 	{mmGB_TILE_MODE7},
503 	{mmGB_TILE_MODE8},
504 	{mmGB_TILE_MODE9},
505 	{mmGB_TILE_MODE10},
506 	{mmGB_TILE_MODE11},
507 	{mmGB_TILE_MODE12},
508 	{mmGB_TILE_MODE13},
509 	{mmGB_TILE_MODE14},
510 	{mmGB_TILE_MODE15},
511 	{mmGB_TILE_MODE16},
512 	{mmGB_TILE_MODE17},
513 	{mmGB_TILE_MODE18},
514 	{mmGB_TILE_MODE19},
515 	{mmGB_TILE_MODE20},
516 	{mmGB_TILE_MODE21},
517 	{mmGB_TILE_MODE22},
518 	{mmGB_TILE_MODE23},
519 	{mmGB_TILE_MODE24},
520 	{mmGB_TILE_MODE25},
521 	{mmGB_TILE_MODE26},
522 	{mmGB_TILE_MODE27},
523 	{mmGB_TILE_MODE28},
524 	{mmGB_TILE_MODE29},
525 	{mmGB_TILE_MODE30},
526 	{mmGB_TILE_MODE31},
527 	{mmGB_MACROTILE_MODE0},
528 	{mmGB_MACROTILE_MODE1},
529 	{mmGB_MACROTILE_MODE2},
530 	{mmGB_MACROTILE_MODE3},
531 	{mmGB_MACROTILE_MODE4},
532 	{mmGB_MACROTILE_MODE5},
533 	{mmGB_MACROTILE_MODE6},
534 	{mmGB_MACROTILE_MODE7},
535 	{mmGB_MACROTILE_MODE8},
536 	{mmGB_MACROTILE_MODE9},
537 	{mmGB_MACROTILE_MODE10},
538 	{mmGB_MACROTILE_MODE11},
539 	{mmGB_MACROTILE_MODE12},
540 	{mmGB_MACROTILE_MODE13},
541 	{mmGB_MACROTILE_MODE14},
542 	{mmGB_MACROTILE_MODE15},
543 	{mmCC_RB_BACKEND_DISABLE, true},
544 	{mmGC_USER_RB_BACKEND_DISABLE, true},
545 	{mmGB_BACKEND_MAP, false},
546 	{mmPA_SC_RASTER_CONFIG, true},
547 	{mmPA_SC_RASTER_CONFIG_1, true},
548 };
549 
550 static uint32_t vi_get_register_value(struct amdgpu_device *adev,
551 				      bool indexed, u32 se_num,
552 				      u32 sh_num, u32 reg_offset)
553 {
554 	if (indexed) {
555 		uint32_t val;
556 		unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
557 		unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
558 
559 		switch (reg_offset) {
560 		case mmCC_RB_BACKEND_DISABLE:
561 			return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
562 		case mmGC_USER_RB_BACKEND_DISABLE:
563 			return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
564 		case mmPA_SC_RASTER_CONFIG:
565 			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
566 		case mmPA_SC_RASTER_CONFIG_1:
567 			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
568 		}
569 
570 		mutex_lock(&adev->grbm_idx_mutex);
571 		if (se_num != 0xffffffff || sh_num != 0xffffffff)
572 			amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
573 
574 		val = RREG32(reg_offset);
575 
576 		if (se_num != 0xffffffff || sh_num != 0xffffffff)
577 			amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
578 		mutex_unlock(&adev->grbm_idx_mutex);
579 		return val;
580 	} else {
581 		unsigned idx;
582 
583 		switch (reg_offset) {
584 		case mmGB_ADDR_CONFIG:
585 			return adev->gfx.config.gb_addr_config;
586 		case mmMC_ARB_RAMCFG:
587 			return adev->gfx.config.mc_arb_ramcfg;
588 		case mmGB_TILE_MODE0:
589 		case mmGB_TILE_MODE1:
590 		case mmGB_TILE_MODE2:
591 		case mmGB_TILE_MODE3:
592 		case mmGB_TILE_MODE4:
593 		case mmGB_TILE_MODE5:
594 		case mmGB_TILE_MODE6:
595 		case mmGB_TILE_MODE7:
596 		case mmGB_TILE_MODE8:
597 		case mmGB_TILE_MODE9:
598 		case mmGB_TILE_MODE10:
599 		case mmGB_TILE_MODE11:
600 		case mmGB_TILE_MODE12:
601 		case mmGB_TILE_MODE13:
602 		case mmGB_TILE_MODE14:
603 		case mmGB_TILE_MODE15:
604 		case mmGB_TILE_MODE16:
605 		case mmGB_TILE_MODE17:
606 		case mmGB_TILE_MODE18:
607 		case mmGB_TILE_MODE19:
608 		case mmGB_TILE_MODE20:
609 		case mmGB_TILE_MODE21:
610 		case mmGB_TILE_MODE22:
611 		case mmGB_TILE_MODE23:
612 		case mmGB_TILE_MODE24:
613 		case mmGB_TILE_MODE25:
614 		case mmGB_TILE_MODE26:
615 		case mmGB_TILE_MODE27:
616 		case mmGB_TILE_MODE28:
617 		case mmGB_TILE_MODE29:
618 		case mmGB_TILE_MODE30:
619 		case mmGB_TILE_MODE31:
620 			idx = (reg_offset - mmGB_TILE_MODE0);
621 			return adev->gfx.config.tile_mode_array[idx];
622 		case mmGB_MACROTILE_MODE0:
623 		case mmGB_MACROTILE_MODE1:
624 		case mmGB_MACROTILE_MODE2:
625 		case mmGB_MACROTILE_MODE3:
626 		case mmGB_MACROTILE_MODE4:
627 		case mmGB_MACROTILE_MODE5:
628 		case mmGB_MACROTILE_MODE6:
629 		case mmGB_MACROTILE_MODE7:
630 		case mmGB_MACROTILE_MODE8:
631 		case mmGB_MACROTILE_MODE9:
632 		case mmGB_MACROTILE_MODE10:
633 		case mmGB_MACROTILE_MODE11:
634 		case mmGB_MACROTILE_MODE12:
635 		case mmGB_MACROTILE_MODE13:
636 		case mmGB_MACROTILE_MODE14:
637 		case mmGB_MACROTILE_MODE15:
638 			idx = (reg_offset - mmGB_MACROTILE_MODE0);
639 			return adev->gfx.config.macrotile_mode_array[idx];
640 		default:
641 			return RREG32(reg_offset);
642 		}
643 	}
644 }
645 
646 static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
647 			    u32 sh_num, u32 reg_offset, u32 *value)
648 {
649 	uint32_t i;
650 
651 	*value = 0;
652 	for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
653 		bool indexed = vi_allowed_read_registers[i].grbm_indexed;
654 
655 		if (reg_offset != vi_allowed_read_registers[i].reg_offset)
656 			continue;
657 
658 		*value = vi_get_register_value(adev, indexed, se_num, sh_num,
659 					       reg_offset);
660 		return 0;
661 	}
662 	return -EINVAL;
663 }
664 
665 static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
666 {
667 	u32 i;
668 
669 	dev_info(adev->dev, "GPU pci config reset\n");
670 
671 	/* disable BM */
672 	pci_clear_master(adev->pdev);
673 	/* reset */
674 	amdgpu_device_pci_config_reset(adev);
675 
676 	udelay(100);
677 
678 	/* wait for asic to come out of reset */
679 	for (i = 0; i < adev->usec_timeout; i++) {
680 		if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
681 			/* enable BM */
682 			pci_set_master(adev->pdev);
683 			adev->has_hw_reset = true;
684 			return 0;
685 		}
686 		udelay(1);
687 	}
688 	return -EINVAL;
689 }
690 
691 /**
692  * vi_asic_reset - soft reset GPU
693  *
694  * @adev: amdgpu_device pointer
695  *
696  * Look up which blocks are hung and attempt
697  * to reset them.
698  * Returns 0 for success.
699  */
700 static int vi_asic_reset(struct amdgpu_device *adev)
701 {
702 	int r;
703 
704 	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
705 
706 	r = vi_gpu_pci_config_reset(adev);
707 
708 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
709 
710 	return r;
711 }
712 
713 static u32 vi_get_config_memsize(struct amdgpu_device *adev)
714 {
715 	return RREG32(mmCONFIG_MEMSIZE);
716 }
717 
718 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
719 			u32 cntl_reg, u32 status_reg)
720 {
721 	int r, i;
722 	struct atom_clock_dividers dividers;
723 	uint32_t tmp;
724 
725 	r = amdgpu_atombios_get_clock_dividers(adev,
726 					       COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
727 					       clock, false, &dividers);
728 	if (r)
729 		return r;
730 
731 	tmp = RREG32_SMC(cntl_reg);
732 	tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
733 		CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
734 	tmp |= dividers.post_divider;
735 	WREG32_SMC(cntl_reg, tmp);
736 
737 	for (i = 0; i < 100; i++) {
738 		if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK)
739 			break;
740 		mdelay(10);
741 	}
742 	if (i == 100)
743 		return -ETIMEDOUT;
744 
745 	return 0;
746 }
747 
748 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
749 {
750 	int r;
751 
752 	r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
753 	if (r)
754 		return r;
755 
756 	r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
757 	if (r)
758 		return r;
759 
760 	return 0;
761 }
762 
763 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
764 {
765 	int r, i;
766 	struct atom_clock_dividers dividers;
767 	u32 tmp;
768 
769 	r = amdgpu_atombios_get_clock_dividers(adev,
770 					       COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
771 					       ecclk, false, &dividers);
772 	if (r)
773 		return r;
774 
775 	for (i = 0; i < 100; i++) {
776 		if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
777 			break;
778 		mdelay(10);
779 	}
780 	if (i == 100)
781 		return -ETIMEDOUT;
782 
783 	tmp = RREG32_SMC(ixCG_ECLK_CNTL);
784 	tmp &= ~(CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK |
785 		CG_ECLK_CNTL__ECLK_DIVIDER_MASK);
786 	tmp |= dividers.post_divider;
787 	WREG32_SMC(ixCG_ECLK_CNTL, tmp);
788 
789 	for (i = 0; i < 100; i++) {
790 		if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
791 			break;
792 		mdelay(10);
793 	}
794 	if (i == 100)
795 		return -ETIMEDOUT;
796 
797 	return 0;
798 }
799 
800 static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
801 {
802 	if (pci_is_root_bus(adev->pdev->bus))
803 		return;
804 
805 	if (amdgpu_pcie_gen2 == 0)
806 		return;
807 
808 	if (adev->flags & AMD_IS_APU)
809 		return;
810 
811 	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
812 					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
813 		return;
814 
815 	/* todo */
816 }
817 
818 static void vi_program_aspm(struct amdgpu_device *adev)
819 {
820 
821 	if (amdgpu_aspm == 0)
822 		return;
823 
824 	/* todo */
825 }
826 
827 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
828 					bool enable)
829 {
830 	u32 tmp;
831 
832 	/* not necessary on CZ */
833 	if (adev->flags & AMD_IS_APU)
834 		return;
835 
836 	tmp = RREG32(mmBIF_DOORBELL_APER_EN);
837 	if (enable)
838 		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
839 	else
840 		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
841 
842 	WREG32(mmBIF_DOORBELL_APER_EN, tmp);
843 }
844 
845 #define ATI_REV_ID_FUSE_MACRO__ADDRESS      0xC0014044
846 #define ATI_REV_ID_FUSE_MACRO__SHIFT        9
847 #define ATI_REV_ID_FUSE_MACRO__MASK         0x00001E00
848 
849 static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
850 {
851 	if (adev->flags & AMD_IS_APU)
852 		return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
853 			>> ATI_REV_ID_FUSE_MACRO__SHIFT;
854 	else
855 		return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
856 			>> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
857 }
858 
859 static const struct amdgpu_asic_funcs vi_asic_funcs =
860 {
861 	.read_disabled_bios = &vi_read_disabled_bios,
862 	.read_bios_from_rom = &vi_read_bios_from_rom,
863 	.read_register = &vi_read_register,
864 	.reset = &vi_asic_reset,
865 	.set_vga_state = &vi_vga_set_state,
866 	.get_xclk = &vi_get_xclk,
867 	.set_uvd_clocks = &vi_set_uvd_clocks,
868 	.set_vce_clocks = &vi_set_vce_clocks,
869 	.get_config_memsize = &vi_get_config_memsize,
870 };
871 
872 #define CZ_REV_BRISTOL(rev)	 \
873 	((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6))
874 
875 static int vi_common_early_init(void *handle)
876 {
877 	bool smc_enabled = false;
878 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
879 
880 	if (adev->flags & AMD_IS_APU) {
881 		adev->smc_rreg = &cz_smc_rreg;
882 		adev->smc_wreg = &cz_smc_wreg;
883 	} else {
884 		adev->smc_rreg = &vi_smc_rreg;
885 		adev->smc_wreg = &vi_smc_wreg;
886 	}
887 	adev->pcie_rreg = &vi_pcie_rreg;
888 	adev->pcie_wreg = &vi_pcie_wreg;
889 	adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
890 	adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
891 	adev->didt_rreg = &vi_didt_rreg;
892 	adev->didt_wreg = &vi_didt_wreg;
893 	adev->gc_cac_rreg = &vi_gc_cac_rreg;
894 	adev->gc_cac_wreg = &vi_gc_cac_wreg;
895 
896 	adev->asic_funcs = &vi_asic_funcs;
897 
898 	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) &&
899 	    (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC)))
900 		smc_enabled = true;
901 
902 	adev->rev_id = vi_get_rev_id(adev);
903 	adev->external_rev_id = 0xFF;
904 	switch (adev->asic_type) {
905 	case CHIP_TOPAZ:
906 		adev->cg_flags = 0;
907 		adev->pg_flags = 0;
908 		adev->external_rev_id = 0x1;
909 		break;
910 	case CHIP_FIJI:
911 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
912 			AMD_CG_SUPPORT_GFX_MGLS |
913 			AMD_CG_SUPPORT_GFX_RLC_LS |
914 			AMD_CG_SUPPORT_GFX_CP_LS |
915 			AMD_CG_SUPPORT_GFX_CGTS |
916 			AMD_CG_SUPPORT_GFX_CGTS_LS |
917 			AMD_CG_SUPPORT_GFX_CGCG |
918 			AMD_CG_SUPPORT_GFX_CGLS |
919 			AMD_CG_SUPPORT_SDMA_MGCG |
920 			AMD_CG_SUPPORT_SDMA_LS |
921 			AMD_CG_SUPPORT_BIF_LS |
922 			AMD_CG_SUPPORT_HDP_MGCG |
923 			AMD_CG_SUPPORT_HDP_LS |
924 			AMD_CG_SUPPORT_ROM_MGCG |
925 			AMD_CG_SUPPORT_MC_MGCG |
926 			AMD_CG_SUPPORT_MC_LS |
927 			AMD_CG_SUPPORT_UVD_MGCG;
928 		adev->pg_flags = 0;
929 		adev->external_rev_id = adev->rev_id + 0x3c;
930 		break;
931 	case CHIP_TONGA:
932 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
933 			AMD_CG_SUPPORT_GFX_CGCG |
934 			AMD_CG_SUPPORT_GFX_CGLS |
935 			AMD_CG_SUPPORT_SDMA_MGCG |
936 			AMD_CG_SUPPORT_SDMA_LS |
937 			AMD_CG_SUPPORT_BIF_LS |
938 			AMD_CG_SUPPORT_HDP_MGCG |
939 			AMD_CG_SUPPORT_HDP_LS |
940 			AMD_CG_SUPPORT_ROM_MGCG |
941 			AMD_CG_SUPPORT_MC_MGCG |
942 			AMD_CG_SUPPORT_MC_LS |
943 			AMD_CG_SUPPORT_DRM_LS |
944 			AMD_CG_SUPPORT_UVD_MGCG;
945 		adev->pg_flags = 0;
946 		adev->external_rev_id = adev->rev_id + 0x14;
947 		break;
948 	case CHIP_POLARIS11:
949 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
950 			AMD_CG_SUPPORT_GFX_RLC_LS |
951 			AMD_CG_SUPPORT_GFX_CP_LS |
952 			AMD_CG_SUPPORT_GFX_CGCG |
953 			AMD_CG_SUPPORT_GFX_CGLS |
954 			AMD_CG_SUPPORT_GFX_3D_CGCG |
955 			AMD_CG_SUPPORT_GFX_3D_CGLS |
956 			AMD_CG_SUPPORT_SDMA_MGCG |
957 			AMD_CG_SUPPORT_SDMA_LS |
958 			AMD_CG_SUPPORT_BIF_MGCG |
959 			AMD_CG_SUPPORT_BIF_LS |
960 			AMD_CG_SUPPORT_HDP_MGCG |
961 			AMD_CG_SUPPORT_HDP_LS |
962 			AMD_CG_SUPPORT_ROM_MGCG |
963 			AMD_CG_SUPPORT_MC_MGCG |
964 			AMD_CG_SUPPORT_MC_LS |
965 			AMD_CG_SUPPORT_DRM_LS |
966 			AMD_CG_SUPPORT_UVD_MGCG |
967 			AMD_CG_SUPPORT_VCE_MGCG;
968 		adev->pg_flags = 0;
969 		adev->external_rev_id = adev->rev_id + 0x5A;
970 		break;
971 	case CHIP_POLARIS10:
972 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
973 			AMD_CG_SUPPORT_GFX_RLC_LS |
974 			AMD_CG_SUPPORT_GFX_CP_LS |
975 			AMD_CG_SUPPORT_GFX_CGCG |
976 			AMD_CG_SUPPORT_GFX_CGLS |
977 			AMD_CG_SUPPORT_GFX_3D_CGCG |
978 			AMD_CG_SUPPORT_GFX_3D_CGLS |
979 			AMD_CG_SUPPORT_SDMA_MGCG |
980 			AMD_CG_SUPPORT_SDMA_LS |
981 			AMD_CG_SUPPORT_BIF_MGCG |
982 			AMD_CG_SUPPORT_BIF_LS |
983 			AMD_CG_SUPPORT_HDP_MGCG |
984 			AMD_CG_SUPPORT_HDP_LS |
985 			AMD_CG_SUPPORT_ROM_MGCG |
986 			AMD_CG_SUPPORT_MC_MGCG |
987 			AMD_CG_SUPPORT_MC_LS |
988 			AMD_CG_SUPPORT_DRM_LS |
989 			AMD_CG_SUPPORT_UVD_MGCG |
990 			AMD_CG_SUPPORT_VCE_MGCG;
991 		adev->pg_flags = 0;
992 		adev->external_rev_id = adev->rev_id + 0x50;
993 		break;
994 	case CHIP_POLARIS12:
995 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
996 			AMD_CG_SUPPORT_GFX_RLC_LS |
997 			AMD_CG_SUPPORT_GFX_CP_LS |
998 			AMD_CG_SUPPORT_GFX_CGCG |
999 			AMD_CG_SUPPORT_GFX_CGLS |
1000 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1001 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1002 			AMD_CG_SUPPORT_SDMA_MGCG |
1003 			AMD_CG_SUPPORT_SDMA_LS |
1004 			AMD_CG_SUPPORT_BIF_MGCG |
1005 			AMD_CG_SUPPORT_BIF_LS |
1006 			AMD_CG_SUPPORT_HDP_MGCG |
1007 			AMD_CG_SUPPORT_HDP_LS |
1008 			AMD_CG_SUPPORT_ROM_MGCG |
1009 			AMD_CG_SUPPORT_MC_MGCG |
1010 			AMD_CG_SUPPORT_MC_LS |
1011 			AMD_CG_SUPPORT_DRM_LS |
1012 			AMD_CG_SUPPORT_UVD_MGCG |
1013 			AMD_CG_SUPPORT_VCE_MGCG;
1014 		adev->pg_flags = 0;
1015 		adev->external_rev_id = adev->rev_id + 0x64;
1016 		break;
1017 	case CHIP_CARRIZO:
1018 		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1019 			AMD_CG_SUPPORT_GFX_MGCG |
1020 			AMD_CG_SUPPORT_GFX_MGLS |
1021 			AMD_CG_SUPPORT_GFX_RLC_LS |
1022 			AMD_CG_SUPPORT_GFX_CP_LS |
1023 			AMD_CG_SUPPORT_GFX_CGTS |
1024 			AMD_CG_SUPPORT_GFX_CGTS_LS |
1025 			AMD_CG_SUPPORT_GFX_CGCG |
1026 			AMD_CG_SUPPORT_GFX_CGLS |
1027 			AMD_CG_SUPPORT_BIF_LS |
1028 			AMD_CG_SUPPORT_HDP_MGCG |
1029 			AMD_CG_SUPPORT_HDP_LS |
1030 			AMD_CG_SUPPORT_SDMA_MGCG |
1031 			AMD_CG_SUPPORT_SDMA_LS |
1032 			AMD_CG_SUPPORT_VCE_MGCG;
1033 		/* rev0 hardware requires workarounds to support PG */
1034 		adev->pg_flags = 0;
1035 		if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) {
1036 			adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG |
1037 				AMD_PG_SUPPORT_GFX_PIPELINE |
1038 				AMD_PG_SUPPORT_CP |
1039 				AMD_PG_SUPPORT_UVD |
1040 				AMD_PG_SUPPORT_VCE;
1041 		}
1042 		adev->external_rev_id = adev->rev_id + 0x1;
1043 		break;
1044 	case CHIP_STONEY:
1045 		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1046 			AMD_CG_SUPPORT_GFX_MGCG |
1047 			AMD_CG_SUPPORT_GFX_MGLS |
1048 			AMD_CG_SUPPORT_GFX_RLC_LS |
1049 			AMD_CG_SUPPORT_GFX_CP_LS |
1050 			AMD_CG_SUPPORT_GFX_CGTS |
1051 			AMD_CG_SUPPORT_GFX_CGTS_LS |
1052 			AMD_CG_SUPPORT_GFX_CGCG |
1053 			AMD_CG_SUPPORT_GFX_CGLS |
1054 			AMD_CG_SUPPORT_BIF_LS |
1055 			AMD_CG_SUPPORT_HDP_MGCG |
1056 			AMD_CG_SUPPORT_HDP_LS |
1057 			AMD_CG_SUPPORT_SDMA_MGCG |
1058 			AMD_CG_SUPPORT_SDMA_LS |
1059 			AMD_CG_SUPPORT_VCE_MGCG;
1060 		adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
1061 			AMD_PG_SUPPORT_GFX_SMG |
1062 			AMD_PG_SUPPORT_GFX_PIPELINE |
1063 			AMD_PG_SUPPORT_CP |
1064 			AMD_PG_SUPPORT_UVD |
1065 			AMD_PG_SUPPORT_VCE;
1066 		adev->external_rev_id = adev->rev_id + 0x61;
1067 		break;
1068 	default:
1069 		/* FIXME: not supported yet */
1070 		return -EINVAL;
1071 	}
1072 
1073 	if (amdgpu_sriov_vf(adev)) {
1074 		amdgpu_virt_init_setting(adev);
1075 		xgpu_vi_mailbox_set_irq_funcs(adev);
1076 	}
1077 
1078 	/* vi use smc load by default */
1079 	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1080 
1081 	amdgpu_device_get_pcie_info(adev);
1082 
1083 	return 0;
1084 }
1085 
1086 static int vi_common_late_init(void *handle)
1087 {
1088 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1089 
1090 	if (amdgpu_sriov_vf(adev))
1091 		xgpu_vi_mailbox_get_irq(adev);
1092 
1093 	return 0;
1094 }
1095 
1096 static int vi_common_sw_init(void *handle)
1097 {
1098 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1099 
1100 	if (amdgpu_sriov_vf(adev))
1101 		xgpu_vi_mailbox_add_irq_id(adev);
1102 
1103 	return 0;
1104 }
1105 
1106 static int vi_common_sw_fini(void *handle)
1107 {
1108 	return 0;
1109 }
1110 
1111 static int vi_common_hw_init(void *handle)
1112 {
1113 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1114 
1115 	/* move the golden regs per IP block */
1116 	vi_init_golden_registers(adev);
1117 	/* enable pcie gen2/3 link */
1118 	vi_pcie_gen3_enable(adev);
1119 	/* enable aspm */
1120 	vi_program_aspm(adev);
1121 	/* enable the doorbell aperture */
1122 	vi_enable_doorbell_aperture(adev, true);
1123 
1124 	return 0;
1125 }
1126 
1127 static int vi_common_hw_fini(void *handle)
1128 {
1129 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1130 
1131 	/* enable the doorbell aperture */
1132 	vi_enable_doorbell_aperture(adev, false);
1133 
1134 	if (amdgpu_sriov_vf(adev))
1135 		xgpu_vi_mailbox_put_irq(adev);
1136 
1137 	return 0;
1138 }
1139 
1140 static int vi_common_suspend(void *handle)
1141 {
1142 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1143 
1144 	return vi_common_hw_fini(adev);
1145 }
1146 
1147 static int vi_common_resume(void *handle)
1148 {
1149 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1150 
1151 	return vi_common_hw_init(adev);
1152 }
1153 
1154 static bool vi_common_is_idle(void *handle)
1155 {
1156 	return true;
1157 }
1158 
1159 static int vi_common_wait_for_idle(void *handle)
1160 {
1161 	return 0;
1162 }
1163 
1164 static int vi_common_soft_reset(void *handle)
1165 {
1166 	return 0;
1167 }
1168 
1169 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
1170 						   bool enable)
1171 {
1172 	uint32_t temp, data;
1173 
1174 	temp = data = RREG32_PCIE(ixPCIE_CNTL2);
1175 
1176 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
1177 		data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1178 				PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1179 				PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
1180 	else
1181 		data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1182 				PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1183 				PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
1184 
1185 	if (temp != data)
1186 		WREG32_PCIE(ixPCIE_CNTL2, data);
1187 }
1188 
1189 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
1190 						    bool enable)
1191 {
1192 	uint32_t temp, data;
1193 
1194 	temp = data = RREG32(mmHDP_HOST_PATH_CNTL);
1195 
1196 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
1197 		data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1198 	else
1199 		data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1200 
1201 	if (temp != data)
1202 		WREG32(mmHDP_HOST_PATH_CNTL, data);
1203 }
1204 
1205 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev,
1206 				      bool enable)
1207 {
1208 	uint32_t temp, data;
1209 
1210 	temp = data = RREG32(mmHDP_MEM_POWER_LS);
1211 
1212 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1213 		data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1214 	else
1215 		data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1216 
1217 	if (temp != data)
1218 		WREG32(mmHDP_MEM_POWER_LS, data);
1219 }
1220 
1221 static void vi_update_drm_light_sleep(struct amdgpu_device *adev,
1222 				      bool enable)
1223 {
1224 	uint32_t temp, data;
1225 
1226 	temp = data = RREG32(0x157a);
1227 
1228 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1229 		data |= 1;
1230 	else
1231 		data &= ~1;
1232 
1233 	if (temp != data)
1234 		WREG32(0x157a, data);
1235 }
1236 
1237 
1238 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1239 						    bool enable)
1240 {
1241 	uint32_t temp, data;
1242 
1243 	temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1244 
1245 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1246 		data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1247 				CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1248 	else
1249 		data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1250 				CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1251 
1252 	if (temp != data)
1253 		WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
1254 }
1255 
1256 static int vi_common_set_clockgating_state_by_smu(void *handle,
1257 					   enum amd_clockgating_state state)
1258 {
1259 	uint32_t msg_id, pp_state = 0;
1260 	uint32_t pp_support_state = 0;
1261 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1262 
1263 	if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
1264 		if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
1265 			pp_support_state = AMD_CG_SUPPORT_MC_LS;
1266 			pp_state = PP_STATE_LS;
1267 		}
1268 		if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
1269 			pp_support_state |= AMD_CG_SUPPORT_MC_MGCG;
1270 			pp_state |= PP_STATE_CG;
1271 		}
1272 		if (state == AMD_CG_STATE_UNGATE)
1273 			pp_state = 0;
1274 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1275 			       PP_BLOCK_SYS_MC,
1276 			       pp_support_state,
1277 			       pp_state);
1278 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1279 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1280 	}
1281 
1282 	if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
1283 		if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
1284 			pp_support_state = AMD_CG_SUPPORT_SDMA_LS;
1285 			pp_state = PP_STATE_LS;
1286 		}
1287 		if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
1288 			pp_support_state |= AMD_CG_SUPPORT_SDMA_MGCG;
1289 			pp_state |= PP_STATE_CG;
1290 		}
1291 		if (state == AMD_CG_STATE_UNGATE)
1292 			pp_state = 0;
1293 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1294 			       PP_BLOCK_SYS_SDMA,
1295 			       pp_support_state,
1296 			       pp_state);
1297 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1298 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1299 	}
1300 
1301 	if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
1302 		if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
1303 			pp_support_state = AMD_CG_SUPPORT_HDP_LS;
1304 			pp_state = PP_STATE_LS;
1305 		}
1306 		if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
1307 			pp_support_state |= AMD_CG_SUPPORT_HDP_MGCG;
1308 			pp_state |= PP_STATE_CG;
1309 		}
1310 		if (state == AMD_CG_STATE_UNGATE)
1311 			pp_state = 0;
1312 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1313 			       PP_BLOCK_SYS_HDP,
1314 			       pp_support_state,
1315 			       pp_state);
1316 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1317 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1318 	}
1319 
1320 
1321 	if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) {
1322 		if (state == AMD_CG_STATE_UNGATE)
1323 			pp_state = 0;
1324 		else
1325 			pp_state = PP_STATE_LS;
1326 
1327 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1328 			       PP_BLOCK_SYS_BIF,
1329 			       PP_STATE_SUPPORT_LS,
1330 			        pp_state);
1331 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1332 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1333 	}
1334 	if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
1335 		if (state == AMD_CG_STATE_UNGATE)
1336 			pp_state = 0;
1337 		else
1338 			pp_state = PP_STATE_CG;
1339 
1340 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1341 			       PP_BLOCK_SYS_BIF,
1342 			       PP_STATE_SUPPORT_CG,
1343 			       pp_state);
1344 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1345 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1346 	}
1347 
1348 	if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
1349 
1350 		if (state == AMD_CG_STATE_UNGATE)
1351 			pp_state = 0;
1352 		else
1353 			pp_state = PP_STATE_LS;
1354 
1355 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1356 			       PP_BLOCK_SYS_DRM,
1357 			       PP_STATE_SUPPORT_LS,
1358 			       pp_state);
1359 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1360 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1361 	}
1362 
1363 	if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
1364 
1365 		if (state == AMD_CG_STATE_UNGATE)
1366 			pp_state = 0;
1367 		else
1368 			pp_state = PP_STATE_CG;
1369 
1370 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1371 			       PP_BLOCK_SYS_ROM,
1372 			       PP_STATE_SUPPORT_CG,
1373 			       pp_state);
1374 		if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1375 			amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1376 	}
1377 	return 0;
1378 }
1379 
1380 static int vi_common_set_clockgating_state(void *handle,
1381 					   enum amd_clockgating_state state)
1382 {
1383 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1384 
1385 	if (amdgpu_sriov_vf(adev))
1386 		return 0;
1387 
1388 	switch (adev->asic_type) {
1389 	case CHIP_FIJI:
1390 		vi_update_bif_medium_grain_light_sleep(adev,
1391 				state == AMD_CG_STATE_GATE);
1392 		vi_update_hdp_medium_grain_clock_gating(adev,
1393 				state == AMD_CG_STATE_GATE);
1394 		vi_update_hdp_light_sleep(adev,
1395 				state == AMD_CG_STATE_GATE);
1396 		vi_update_rom_medium_grain_clock_gating(adev,
1397 				state == AMD_CG_STATE_GATE);
1398 		break;
1399 	case CHIP_CARRIZO:
1400 	case CHIP_STONEY:
1401 		vi_update_bif_medium_grain_light_sleep(adev,
1402 				state == AMD_CG_STATE_GATE);
1403 		vi_update_hdp_medium_grain_clock_gating(adev,
1404 				state == AMD_CG_STATE_GATE);
1405 		vi_update_hdp_light_sleep(adev,
1406 				state == AMD_CG_STATE_GATE);
1407 		vi_update_drm_light_sleep(adev,
1408 				state == AMD_CG_STATE_GATE);
1409 		break;
1410 	case CHIP_TONGA:
1411 	case CHIP_POLARIS10:
1412 	case CHIP_POLARIS11:
1413 	case CHIP_POLARIS12:
1414 		vi_common_set_clockgating_state_by_smu(adev, state);
1415 	default:
1416 		break;
1417 	}
1418 	return 0;
1419 }
1420 
1421 static int vi_common_set_powergating_state(void *handle,
1422 					    enum amd_powergating_state state)
1423 {
1424 	return 0;
1425 }
1426 
1427 static void vi_common_get_clockgating_state(void *handle, u32 *flags)
1428 {
1429 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1430 	int data;
1431 
1432 	if (amdgpu_sriov_vf(adev))
1433 		*flags = 0;
1434 
1435 	/* AMD_CG_SUPPORT_BIF_LS */
1436 	data = RREG32_PCIE(ixPCIE_CNTL2);
1437 	if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
1438 		*flags |= AMD_CG_SUPPORT_BIF_LS;
1439 
1440 	/* AMD_CG_SUPPORT_HDP_LS */
1441 	data = RREG32(mmHDP_MEM_POWER_LS);
1442 	if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
1443 		*flags |= AMD_CG_SUPPORT_HDP_LS;
1444 
1445 	/* AMD_CG_SUPPORT_HDP_MGCG */
1446 	data = RREG32(mmHDP_HOST_PATH_CNTL);
1447 	if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK))
1448 		*flags |= AMD_CG_SUPPORT_HDP_MGCG;
1449 
1450 	/* AMD_CG_SUPPORT_ROM_MGCG */
1451 	data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1452 	if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
1453 		*flags |= AMD_CG_SUPPORT_ROM_MGCG;
1454 }
1455 
1456 static const struct amd_ip_funcs vi_common_ip_funcs = {
1457 	.name = "vi_common",
1458 	.early_init = vi_common_early_init,
1459 	.late_init = vi_common_late_init,
1460 	.sw_init = vi_common_sw_init,
1461 	.sw_fini = vi_common_sw_fini,
1462 	.hw_init = vi_common_hw_init,
1463 	.hw_fini = vi_common_hw_fini,
1464 	.suspend = vi_common_suspend,
1465 	.resume = vi_common_resume,
1466 	.is_idle = vi_common_is_idle,
1467 	.wait_for_idle = vi_common_wait_for_idle,
1468 	.soft_reset = vi_common_soft_reset,
1469 	.set_clockgating_state = vi_common_set_clockgating_state,
1470 	.set_powergating_state = vi_common_set_powergating_state,
1471 	.get_clockgating_state = vi_common_get_clockgating_state,
1472 };
1473 
1474 static const struct amdgpu_ip_block_version vi_common_ip_block =
1475 {
1476 	.type = AMD_IP_BLOCK_TYPE_COMMON,
1477 	.major = 1,
1478 	.minor = 0,
1479 	.rev = 0,
1480 	.funcs = &vi_common_ip_funcs,
1481 };
1482 
1483 int vi_set_ip_blocks(struct amdgpu_device *adev)
1484 {
1485 	/* in early init stage, vbios code won't work */
1486 	vi_detect_hw_virtualization(adev);
1487 
1488 	if (amdgpu_sriov_vf(adev))
1489 		adev->virt.ops = &xgpu_vi_virt_ops;
1490 
1491 	switch (adev->asic_type) {
1492 	case CHIP_TOPAZ:
1493 		/* topaz has no DCE, UVD, VCE */
1494 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1495 		amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block);
1496 		amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block);
1497 		amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
1498 		if (adev->enable_virtual_display)
1499 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1500 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1501 		amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block);
1502 		break;
1503 	case CHIP_FIJI:
1504 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1505 		amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block);
1506 		amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1507 		amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
1508 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1509 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1510 #if defined(CONFIG_DRM_AMD_DC)
1511 		else if (amdgpu_device_has_dc_support(adev))
1512 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1513 #endif
1514 		else
1515 			amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block);
1516 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1517 		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1518 		if (!amdgpu_sriov_vf(adev)) {
1519 			amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
1520 			amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
1521 		}
1522 		break;
1523 	case CHIP_TONGA:
1524 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1525 		amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1526 		amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1527 		amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
1528 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1529 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1530 #if defined(CONFIG_DRM_AMD_DC)
1531 		else if (amdgpu_device_has_dc_support(adev))
1532 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1533 #endif
1534 		else
1535 			amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block);
1536 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1537 		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1538 		if (!amdgpu_sriov_vf(adev)) {
1539 			amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block);
1540 			amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
1541 		}
1542 		break;
1543 	case CHIP_POLARIS11:
1544 	case CHIP_POLARIS10:
1545 	case CHIP_POLARIS12:
1546 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1547 		amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
1548 		amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1549 		amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
1550 		if (adev->enable_virtual_display)
1551 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1552 #if defined(CONFIG_DRM_AMD_DC)
1553 		else if (amdgpu_device_has_dc_support(adev))
1554 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1555 #endif
1556 		else
1557 			amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block);
1558 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1559 		amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block);
1560 		amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block);
1561 		amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
1562 		break;
1563 	case CHIP_CARRIZO:
1564 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1565 		amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1566 		amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
1567 		amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
1568 		if (adev->enable_virtual_display)
1569 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1570 #if defined(CONFIG_DRM_AMD_DC)
1571 		else if (amdgpu_device_has_dc_support(adev))
1572 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1573 #endif
1574 		else
1575 			amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
1576 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1577 		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1578 		amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
1579 		amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block);
1580 #if defined(CONFIG_DRM_AMD_ACP)
1581 		amdgpu_device_ip_block_add(adev, &acp_ip_block);
1582 #endif
1583 		break;
1584 	case CHIP_STONEY:
1585 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1586 		amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1587 		amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
1588 		amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
1589 		if (adev->enable_virtual_display)
1590 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1591 #if defined(CONFIG_DRM_AMD_DC)
1592 		else if (amdgpu_device_has_dc_support(adev))
1593 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1594 #endif
1595 		else
1596 			amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
1597 		amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block);
1598 		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1599 		amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block);
1600 		amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
1601 #if defined(CONFIG_DRM_AMD_ACP)
1602 		amdgpu_device_ip_block_add(adev, &acp_ip_block);
1603 #endif
1604 		break;
1605 	default:
1606 		/* FIXME: not supported yet */
1607 		return -EINVAL;
1608 	}
1609 
1610 	return 0;
1611 }
1612