xref: /linux/drivers/gpu/drm/amd/amdgpu/soc15.c (revision 0a6e7b06bdbead2e43d56a2274b7e0c9c86d536e)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27 
28 #include <drm/amdgpu_drm.h>
29 
30 #include "amdgpu.h"
31 #include "amdgpu_ih.h"
32 #include "amdgpu_uvd.h"
33 #include "amdgpu_vce.h"
34 #include "amdgpu_ucode.h"
35 #include "amdgpu_psp.h"
36 #include "atom.h"
37 #include "amd_pcie.h"
38 
39 #include "uvd/uvd_7_0_offset.h"
40 #include "gc/gc_9_0_offset.h"
41 #include "gc/gc_9_0_sh_mask.h"
42 #include "sdma0/sdma0_4_0_offset.h"
43 #include "sdma1/sdma1_4_0_offset.h"
44 #include "nbio/nbio_7_0_default.h"
45 #include "nbio/nbio_7_0_offset.h"
46 #include "nbio/nbio_7_0_sh_mask.h"
47 #include "nbio/nbio_7_0_smn.h"
48 #include "mp/mp_9_0_offset.h"
49 
50 #include "soc15.h"
51 #include "soc15_common.h"
52 #include "gfx_v9_0.h"
53 #include "gmc_v9_0.h"
54 #include "gfxhub_v1_0.h"
55 #include "mmhub_v1_0.h"
56 #include "df_v1_7.h"
57 #include "df_v3_6.h"
58 #include "nbio_v6_1.h"
59 #include "nbio_v7_0.h"
60 #include "nbio_v7_4.h"
61 #include "hdp_v4_0.h"
62 #include "vega10_ih.h"
63 #include "vega20_ih.h"
64 #include "navi10_ih.h"
65 #include "sdma_v4_0.h"
66 #include "uvd_v7_0.h"
67 #include "vce_v4_0.h"
68 #include "vcn_v1_0.h"
69 #include "vcn_v2_0.h"
70 #include "jpeg_v2_0.h"
71 #include "vcn_v2_5.h"
72 #include "jpeg_v2_5.h"
73 #include "smuio_v9_0.h"
74 #include "smuio_v11_0.h"
75 #include "smuio_v13_0.h"
76 #include "amdgpu_vkms.h"
77 #include "mxgpu_ai.h"
78 #include "amdgpu_ras.h"
79 #include "amdgpu_xgmi.h"
80 #include <uapi/linux/kfd_ioctl.h>
81 
82 #define mmMP0_MISC_CGTT_CTRL0                                                                   0x01b9
83 #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX                                                          0
84 #define mmMP0_MISC_LIGHT_SLEEP_CTRL                                                             0x01ba
85 #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX                                                    0
86 
87 static const struct amd_ip_funcs soc15_common_ip_funcs;
88 
89 /* Vega, Raven, Arcturus */
90 static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] =
91 {
92 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
93 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 0)},
94 };
95 
96 static const struct amdgpu_video_codecs vega_video_codecs_encode =
97 {
98 	.codec_count = ARRAY_SIZE(vega_video_codecs_encode_array),
99 	.codec_array = vega_video_codecs_encode_array,
100 };
101 
102 /* Vega */
103 static const struct amdgpu_video_codec_info vega_video_codecs_decode_array[] =
104 {
105 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
106 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
107 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
108 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
109 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
110 };
111 
112 static const struct amdgpu_video_codecs vega_video_codecs_decode =
113 {
114 	.codec_count = ARRAY_SIZE(vega_video_codecs_decode_array),
115 	.codec_array = vega_video_codecs_decode_array,
116 };
117 
118 /* Raven */
119 static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] =
120 {
121 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
122 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
123 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
124 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
125 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
126 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 8192, 8192, 0)},
127 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 4096, 4096, 0)},
128 };
129 
130 static const struct amdgpu_video_codecs rv_video_codecs_decode =
131 {
132 	.codec_count = ARRAY_SIZE(rv_video_codecs_decode_array),
133 	.codec_array = rv_video_codecs_decode_array,
134 };
135 
136 /* Renoir, Arcturus */
137 static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] =
138 {
139 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
140 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
141 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
142 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
143 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
144 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
145 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
146 };
147 
148 static const struct amdgpu_video_codecs rn_video_codecs_decode =
149 {
150 	.codec_count = ARRAY_SIZE(rn_video_codecs_decode_array),
151 	.codec_array = rn_video_codecs_decode_array,
152 };
153 
154 static const struct amdgpu_video_codec_info vcn_4_0_3_video_codecs_decode_array[] = {
155 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
156 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
157 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
158 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
159 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
160 };
161 
162 static const struct amdgpu_video_codecs vcn_4_0_3_video_codecs_decode = {
163 	.codec_count = ARRAY_SIZE(vcn_4_0_3_video_codecs_decode_array),
164 	.codec_array = vcn_4_0_3_video_codecs_decode_array,
165 };
166 
167 static const struct amdgpu_video_codecs vcn_4_0_3_video_codecs_encode = {
168 	.codec_count = 0,
169 	.codec_array = NULL,
170 };
171 
172 static const struct amdgpu_video_codecs vcn_5_0_1_video_codecs_encode_vcn0 = {
173 	.codec_count = 0,
174 	.codec_array = NULL,
175 };
176 
177 static const struct amdgpu_video_codec_info vcn_5_0_1_video_codecs_decode_array_vcn0[] = {
178 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
179 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
180 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
181 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
182 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
183 };
184 
185 static const struct amdgpu_video_codecs vcn_5_0_1_video_codecs_decode_vcn0 = {
186 	.codec_count = ARRAY_SIZE(vcn_5_0_1_video_codecs_decode_array_vcn0),
187 	.codec_array = vcn_5_0_1_video_codecs_decode_array_vcn0,
188 };
189 
190 static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,
191 				    const struct amdgpu_video_codecs **codecs)
192 {
193 	if (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
194 		switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
195 		case IP_VERSION(4, 0, 0):
196 		case IP_VERSION(4, 1, 0):
197 			if (encode)
198 				*codecs = &vega_video_codecs_encode;
199 			else
200 				*codecs = &vega_video_codecs_decode;
201 			return 0;
202 		default:
203 			return -EINVAL;
204 		}
205 	} else {
206 		switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
207 		case IP_VERSION(1, 0, 0):
208 		case IP_VERSION(1, 0, 1):
209 			if (encode)
210 				*codecs = &vega_video_codecs_encode;
211 			else
212 				*codecs = &rv_video_codecs_decode;
213 			return 0;
214 		case IP_VERSION(2, 5, 0):
215 		case IP_VERSION(2, 6, 0):
216 		case IP_VERSION(2, 2, 0):
217 			if (encode)
218 				*codecs = &vega_video_codecs_encode;
219 			else
220 				*codecs = &rn_video_codecs_decode;
221 			return 0;
222 		case IP_VERSION(4, 0, 3):
223 			if (encode)
224 				*codecs = &vcn_4_0_3_video_codecs_encode;
225 			else
226 				*codecs = &vcn_4_0_3_video_codecs_decode;
227 			return 0;
228 		case IP_VERSION(5, 0, 1):
229 			if (encode)
230 				*codecs = &vcn_5_0_1_video_codecs_encode_vcn0;
231 			else
232 				*codecs = &vcn_5_0_1_video_codecs_decode_vcn0;
233 			return 0;
234 		default:
235 			return -EINVAL;
236 		}
237 	}
238 }
239 
240 static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
241 {
242 	unsigned long flags, address, data;
243 	u32 r;
244 
245 	address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
246 	data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
247 
248 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
249 	WREG32(address, ((reg) & 0x1ff));
250 	r = RREG32(data);
251 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
252 	return r;
253 }
254 
255 static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
256 {
257 	unsigned long flags, address, data;
258 
259 	address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
260 	data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
261 
262 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
263 	WREG32(address, ((reg) & 0x1ff));
264 	WREG32(data, (v));
265 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
266 }
267 
268 static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
269 {
270 	unsigned long flags, address, data;
271 	u32 r;
272 
273 	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
274 	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
275 
276 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
277 	WREG32(address, (reg));
278 	r = RREG32(data);
279 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
280 	return r;
281 }
282 
283 static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
284 {
285 	unsigned long flags, address, data;
286 
287 	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
288 	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
289 
290 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
291 	WREG32(address, (reg));
292 	WREG32(data, (v));
293 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
294 }
295 
296 static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
297 {
298 	unsigned long flags;
299 	u32 r;
300 
301 	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
302 	WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
303 	r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA);
304 	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
305 	return r;
306 }
307 
308 static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
309 {
310 	unsigned long flags;
311 
312 	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
313 	WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
314 	WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v));
315 	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
316 }
317 
318 static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg)
319 {
320 	unsigned long flags;
321 	u32 r;
322 
323 	spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
324 	WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
325 	r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA);
326 	spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
327 	return r;
328 }
329 
330 static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
331 {
332 	unsigned long flags;
333 
334 	spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
335 	WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
336 	WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v));
337 	spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
338 }
339 
340 static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
341 {
342 	return adev->nbio.funcs->get_memsize(adev);
343 }
344 
345 static u32 soc15_get_xclk(struct amdgpu_device *adev)
346 {
347 	u32 reference_clock = adev->clock.spll.reference_freq;
348 
349 	if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(12, 0, 0) ||
350 	    amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(12, 0, 1) ||
351 	    amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6) ||
352 	    amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) ||
353 	    amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 14))
354 		return 10000;
355 	if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(10, 0, 0) ||
356 	    amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(10, 0, 1))
357 		return reference_clock / 4;
358 
359 	return reference_clock;
360 }
361 
362 
363 void soc15_grbm_select(struct amdgpu_device *adev,
364 		     u32 me, u32 pipe, u32 queue, u32 vmid, int xcc_id)
365 {
366 	u32 grbm_gfx_cntl = 0;
367 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
368 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
369 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
370 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
371 
372 	WREG32_SOC15_RLC_SHADOW(GC, xcc_id, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
373 }
374 
375 static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
376 {
377 	/* todo */
378 	return false;
379 }
380 
381 static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
382 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
383 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
384 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
385 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
386 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
387 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
388 	{ SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
389 	{ SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
390 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
391 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
392 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
393 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
394 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
395 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
396 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
397 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
398 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
399 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
400 	{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
401 	{ SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)},
402 };
403 
404 static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
405 					 u32 sh_num, u32 reg_offset)
406 {
407 	uint32_t val;
408 
409 	mutex_lock(&adev->grbm_idx_mutex);
410 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
411 		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0);
412 
413 	val = RREG32(reg_offset);
414 
415 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
416 		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
417 	mutex_unlock(&adev->grbm_idx_mutex);
418 	return val;
419 }
420 
421 static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
422 					 bool indexed, u32 se_num,
423 					 u32 sh_num, u32 reg_offset)
424 {
425 	if (indexed) {
426 		return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
427 	} else {
428 		if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
429 			return adev->gfx.config.gb_addr_config;
430 		else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2))
431 			return adev->gfx.config.db_debug2;
432 		return RREG32(reg_offset);
433 	}
434 }
435 
436 static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
437 			    u32 sh_num, u32 reg_offset, u32 *value)
438 {
439 	uint32_t i;
440 	struct soc15_allowed_register_entry  *en;
441 
442 	*value = 0;
443 	for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
444 		en = &soc15_allowed_read_registers[i];
445 		if (!adev->reg_offset[en->hwip][en->inst])
446 			continue;
447 		else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
448 					+ en->reg_offset))
449 			continue;
450 
451 		*value = soc15_get_register_value(adev,
452 						  soc15_allowed_read_registers[i].grbm_indexed,
453 						  se_num, sh_num, reg_offset);
454 		return 0;
455 	}
456 	return -EINVAL;
457 }
458 
459 
460 /**
461  * soc15_program_register_sequence - program an array of registers.
462  *
463  * @adev: amdgpu_device pointer
464  * @regs: pointer to the register array
465  * @array_size: size of the register array
466  *
467  * Programs an array or registers with and and or masks.
468  * This is a helper for setting golden registers.
469  */
470 
471 void soc15_program_register_sequence(struct amdgpu_device *adev,
472 					     const struct soc15_reg_golden *regs,
473 					     const u32 array_size)
474 {
475 	const struct soc15_reg_golden *entry;
476 	u32 tmp, reg;
477 	int i;
478 
479 	for (i = 0; i < array_size; ++i) {
480 		entry = &regs[i];
481 		reg =  adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
482 
483 		if (entry->and_mask == 0xffffffff) {
484 			tmp = entry->or_mask;
485 		} else {
486 			tmp = (entry->hwip == GC_HWIP) ?
487 				RREG32_SOC15_IP(GC, reg) : RREG32(reg);
488 
489 			tmp &= ~(entry->and_mask);
490 			tmp |= (entry->or_mask & entry->and_mask);
491 		}
492 
493 		if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) ||
494 			reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE) ||
495 			reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1) ||
496 			reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG))
497 			WREG32_RLC(reg, tmp);
498 		else
499 			(entry->hwip == GC_HWIP) ?
500 				WREG32_SOC15_IP(GC, reg, tmp) : WREG32(reg, tmp);
501 
502 	}
503 
504 }
505 
506 static int soc15_asic_baco_reset(struct amdgpu_device *adev)
507 {
508 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
509 	int ret = 0;
510 
511 	/* avoid NBIF got stuck when do RAS recovery in BACO reset */
512 	if (ras && adev->ras_enabled)
513 		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
514 
515 	ret = amdgpu_dpm_baco_reset(adev);
516 	if (ret)
517 		return ret;
518 
519 	/* re-enable doorbell interrupt after BACO exit */
520 	if (ras && adev->ras_enabled)
521 		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
522 
523 	return 0;
524 }
525 
526 static enum amd_reset_method
527 soc15_asic_reset_method(struct amdgpu_device *adev)
528 {
529 	int baco_reset = 0;
530 	bool connected_to_cpu = false;
531 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
532 
533         if (adev->gmc.xgmi.supported && adev->gmc.xgmi.connected_to_cpu)
534                 connected_to_cpu = true;
535 
536 	if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
537 	    amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
538 	    amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
539 	    amdgpu_reset_method == AMD_RESET_METHOD_PCI) {
540 		/* If connected to cpu, driver only support mode2 */
541                 if (connected_to_cpu)
542                         return AMD_RESET_METHOD_MODE2;
543                 return amdgpu_reset_method;
544         }
545 
546 	if (amdgpu_reset_method != -1)
547 		dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
548 				  amdgpu_reset_method);
549 
550 	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
551 	case IP_VERSION(10, 0, 0):
552 	case IP_VERSION(10, 0, 1):
553 	case IP_VERSION(12, 0, 0):
554 	case IP_VERSION(12, 0, 1):
555 		return AMD_RESET_METHOD_MODE2;
556 	case IP_VERSION(9, 0, 0):
557 	case IP_VERSION(11, 0, 2):
558 		if (adev->asic_type == CHIP_VEGA20) {
559 			if (adev->psp.sos.fw_version >= 0x80067)
560 				baco_reset = amdgpu_dpm_is_baco_supported(adev);
561 			/*
562 			 * 1. PMFW version > 0x284300: all cases use baco
563 			 * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco
564 			 */
565 			if (ras && adev->ras_enabled &&
566 			    adev->pm.fw_version <= 0x283400)
567 				baco_reset = 0;
568 		} else {
569 			baco_reset = amdgpu_dpm_is_baco_supported(adev);
570 		}
571 		break;
572 	case IP_VERSION(13, 0, 2):
573 		 /*
574 		 * 1.connected to cpu: driver issue mode2 reset
575 		 * 2.discret gpu: driver issue mode1 reset
576 		 */
577 		if (connected_to_cpu)
578 			return AMD_RESET_METHOD_MODE2;
579 		break;
580 	case IP_VERSION(13, 0, 6):
581 	case IP_VERSION(13, 0, 14):
582 	case IP_VERSION(13, 0, 12):
583 		/* Use gpu_recovery param to target a reset method.
584 		 * Enable triggering of GPU reset only if specified
585 		 * by module parameter.
586 		 */
587 		if (amdgpu_gpu_recovery == 4 || amdgpu_gpu_recovery == 5)
588 			return AMD_RESET_METHOD_MODE2;
589 		else if (!(adev->flags & AMD_IS_APU))
590 			return AMD_RESET_METHOD_MODE1;
591 		else
592 			return AMD_RESET_METHOD_MODE2;
593 	default:
594 		break;
595 	}
596 
597 	if (baco_reset)
598 		return AMD_RESET_METHOD_BACO;
599 	else
600 		return AMD_RESET_METHOD_MODE1;
601 }
602 
603 static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
604 {
605 	/* Will reset for the following suspend abort cases.
606 	 * 1) S3 suspend aborted in the normal S3 suspend
607 	 * 2) S3 suspend aborted in performing pm core test.
608 	 */
609 	if (adev->in_s3 && !pm_resume_via_firmware())
610 		return true;
611 	else
612 		return false;
613 }
614 
615 static int soc15_asic_reset(struct amdgpu_device *adev)
616 {
617 	/* original raven doesn't have full asic reset */
618 	/* On the latest Raven, the GPU reset can be performed
619 	 * successfully. So now, temporarily enable it for the
620 	 * S3 suspend abort case.
621 	 */
622 
623 	if ((adev->apu_flags & AMD_APU_IS_PICASSO ||
624 			!(adev->apu_flags & AMD_APU_IS_RAVEN)) &&
625 			soc15_need_reset_on_resume(adev))
626 		goto asic_reset;
627 
628 	if ((adev->apu_flags & AMD_APU_IS_RAVEN) ||
629 			(adev->apu_flags & AMD_APU_IS_RAVEN2))
630 		return 0;
631 
632 asic_reset:
633 	switch (soc15_asic_reset_method(adev)) {
634 	case AMD_RESET_METHOD_PCI:
635 		dev_info(adev->dev, "PCI reset\n");
636 		return amdgpu_device_pci_reset(adev);
637 	case AMD_RESET_METHOD_BACO:
638 		dev_info(adev->dev, "BACO reset\n");
639 		return soc15_asic_baco_reset(adev);
640 	case AMD_RESET_METHOD_MODE2:
641 		dev_info(adev->dev, "MODE2 reset\n");
642 		return amdgpu_dpm_mode2_reset(adev);
643 	default:
644 		dev_info(adev->dev, "MODE1 reset\n");
645 		return amdgpu_device_mode1_reset(adev);
646 	}
647 }
648 
649 static int soc15_supports_baco(struct amdgpu_device *adev)
650 {
651 	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
652 	case IP_VERSION(9, 0, 0):
653 	case IP_VERSION(11, 0, 2):
654 		if (adev->asic_type == CHIP_VEGA20) {
655 			if (adev->psp.sos.fw_version >= 0x80067)
656 				return amdgpu_dpm_is_baco_supported(adev);
657 			return 0;
658 		} else {
659 			return amdgpu_dpm_is_baco_supported(adev);
660 		}
661 		break;
662 	default:
663 		return 0;
664 	}
665 }
666 
667 /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
668 			u32 cntl_reg, u32 status_reg)
669 {
670 	return 0;
671 }*/
672 
673 static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
674 {
675 	/*int r;
676 
677 	r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
678 	if (r)
679 		return r;
680 
681 	r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
682 	*/
683 	return 0;
684 }
685 
686 static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
687 {
688 	/* todo */
689 
690 	return 0;
691 }
692 
693 static void soc15_program_aspm(struct amdgpu_device *adev)
694 {
695 	if (!amdgpu_device_should_use_aspm(adev))
696 		return;
697 
698 	if (adev->nbio.funcs->program_aspm)
699 		adev->nbio.funcs->program_aspm(adev);
700 }
701 
702 const struct amdgpu_ip_block_version vega10_common_ip_block =
703 {
704 	.type = AMD_IP_BLOCK_TYPE_COMMON,
705 	.major = 2,
706 	.minor = 0,
707 	.rev = 0,
708 	.funcs = &soc15_common_ip_funcs,
709 };
710 
711 static void soc15_reg_base_init(struct amdgpu_device *adev)
712 {
713 	/* Set IP register base before any HW register access */
714 	switch (adev->asic_type) {
715 	case CHIP_VEGA10:
716 	case CHIP_VEGA12:
717 	case CHIP_RAVEN:
718 	case CHIP_RENOIR:
719 		vega10_reg_base_init(adev);
720 		break;
721 	case CHIP_VEGA20:
722 		vega20_reg_base_init(adev);
723 		break;
724 	case CHIP_ARCTURUS:
725 		arct_reg_base_init(adev);
726 		break;
727 	case CHIP_ALDEBARAN:
728 		aldebaran_reg_base_init(adev);
729 		break;
730 	default:
731 		DRM_ERROR("Unsupported asic type: %d!\n", adev->asic_type);
732 		break;
733 	}
734 }
735 
736 void soc15_set_virt_ops(struct amdgpu_device *adev)
737 {
738 	adev->virt.ops = &xgpu_ai_virt_ops;
739 
740 	/* init soc15 reg base early enough so we can
741 	 * request request full access for sriov before
742 	 * set_ip_blocks. */
743 	soc15_reg_base_init(adev);
744 }
745 
746 static bool soc15_need_full_reset(struct amdgpu_device *adev)
747 {
748 	/* change this when we implement soft reset */
749 	return true;
750 }
751 
752 static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
753 				 uint64_t *count1)
754 {
755 	uint32_t perfctr = 0;
756 	uint64_t cnt0_of, cnt1_of;
757 	int tmp;
758 
759 	/* This reports 0 on APUs, so return to avoid writing/reading registers
760 	 * that may or may not be different from their GPU counterparts
761 	 */
762 	if (adev->flags & AMD_IS_APU)
763 		return;
764 
765 	/* Set the 2 events that we wish to watch, defined above */
766 	/* Reg 40 is # received msgs */
767 	/* Reg 104 is # of posted requests sent */
768 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
769 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
770 
771 	/* Write to enable desired perf counters */
772 	WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr);
773 	/* Zero out and enable the perf counters
774 	 * Write 0x5:
775 	 * Bit 0 = Start all counters(1)
776 	 * Bit 2 = Global counter reset enable(1)
777 	 */
778 	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
779 
780 	msleep(1000);
781 
782 	/* Load the shadow and disable the perf counters
783 	 * Write 0x2:
784 	 * Bit 0 = Stop counters(0)
785 	 * Bit 1 = Load the shadow counters(1)
786 	 */
787 	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
788 
789 	/* Read register values to get any >32bit overflow */
790 	tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK);
791 	cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
792 	cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
793 
794 	/* Get the values and add the overflow */
795 	*count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
796 	*count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
797 }
798 
799 static void vega20_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
800 				 uint64_t *count1)
801 {
802 	uint32_t perfctr = 0;
803 	uint64_t cnt0_of, cnt1_of;
804 	int tmp;
805 
806 	/* This reports 0 on APUs, so return to avoid writing/reading registers
807 	 * that may or may not be different from their GPU counterparts
808 	 */
809 	if (adev->flags & AMD_IS_APU)
810 		return;
811 
812 	/* Set the 2 events that we wish to watch, defined above */
813 	/* Reg 40 is # received msgs */
814 	/* Reg 108 is # of posted requests sent on VG20 */
815 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
816 				EVENT0_SEL, 40);
817 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
818 				EVENT1_SEL, 108);
819 
820 	/* Write to enable desired perf counters */
821 	WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctr);
822 	/* Zero out and enable the perf counters
823 	 * Write 0x5:
824 	 * Bit 0 = Start all counters(1)
825 	 * Bit 2 = Global counter reset enable(1)
826 	 */
827 	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
828 
829 	msleep(1000);
830 
831 	/* Load the shadow and disable the perf counters
832 	 * Write 0x2:
833 	 * Bit 0 = Stop counters(0)
834 	 * Bit 1 = Load the shadow counters(1)
835 	 */
836 	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
837 
838 	/* Read register values to get any >32bit overflow */
839 	tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3);
840 	cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER0_UPPER);
841 	cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER1_UPPER);
842 
843 	/* Get the values and add the overflow */
844 	*count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) | (cnt0_of << 32);
845 	*count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK3) | (cnt1_of << 32);
846 }
847 
848 static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
849 {
850 	u32 sol_reg;
851 
852 	/* CP hangs in IGT reloading test on RN, reset to WA */
853 	if (adev->asic_type == CHIP_RENOIR)
854 		return true;
855 
856 	if (amdgpu_gmc_need_reset_on_init(adev))
857 		return true;
858 	if (amdgpu_psp_tos_reload_needed(adev))
859 		return true;
860 	/* Just return false for soc15 GPUs.  Reset does not seem to
861 	 * be necessary.
862 	 */
863 	if (!amdgpu_passthrough(adev))
864 		return false;
865 
866 	if (adev->flags & AMD_IS_APU)
867 		return false;
868 
869 	/* Check sOS sign of life register to confirm sys driver and sOS
870 	 * are already been loaded.
871 	 */
872 	sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
873 	if (sol_reg)
874 		return true;
875 
876 	return false;
877 }
878 
879 static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev)
880 {
881 	uint64_t nak_r, nak_g;
882 
883 	/* Get the number of NAKs received and generated */
884 	nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK);
885 	nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED);
886 
887 	/* Add the total number of NAKs, i.e the number of replays */
888 	return (nak_r + nak_g);
889 }
890 
891 static void soc15_pre_asic_init(struct amdgpu_device *adev)
892 {
893 	gmc_v9_0_restore_registers(adev);
894 }
895 
896 static const struct amdgpu_asic_funcs soc15_asic_funcs =
897 {
898 	.read_disabled_bios = &soc15_read_disabled_bios,
899 	.read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
900 	.read_register = &soc15_read_register,
901 	.reset = &soc15_asic_reset,
902 	.reset_method = &soc15_asic_reset_method,
903 	.get_xclk = &soc15_get_xclk,
904 	.set_uvd_clocks = &soc15_set_uvd_clocks,
905 	.set_vce_clocks = &soc15_set_vce_clocks,
906 	.get_config_memsize = &soc15_get_config_memsize,
907 	.need_full_reset = &soc15_need_full_reset,
908 	.init_doorbell_index = &vega10_doorbell_index_init,
909 	.get_pcie_usage = &soc15_get_pcie_usage,
910 	.need_reset_on_init = &soc15_need_reset_on_init,
911 	.get_pcie_replay_count = &soc15_get_pcie_replay_count,
912 	.supports_baco = &soc15_supports_baco,
913 	.pre_asic_init = &soc15_pre_asic_init,
914 	.query_video_codecs = &soc15_query_video_codecs,
915 };
916 
917 static const struct amdgpu_asic_funcs vega20_asic_funcs =
918 {
919 	.read_disabled_bios = &soc15_read_disabled_bios,
920 	.read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
921 	.read_register = &soc15_read_register,
922 	.reset = &soc15_asic_reset,
923 	.reset_method = &soc15_asic_reset_method,
924 	.get_xclk = &soc15_get_xclk,
925 	.set_uvd_clocks = &soc15_set_uvd_clocks,
926 	.set_vce_clocks = &soc15_set_vce_clocks,
927 	.get_config_memsize = &soc15_get_config_memsize,
928 	.need_full_reset = &soc15_need_full_reset,
929 	.init_doorbell_index = &vega20_doorbell_index_init,
930 	.get_pcie_usage = &vega20_get_pcie_usage,
931 	.need_reset_on_init = &soc15_need_reset_on_init,
932 	.get_pcie_replay_count = &soc15_get_pcie_replay_count,
933 	.supports_baco = &soc15_supports_baco,
934 	.pre_asic_init = &soc15_pre_asic_init,
935 	.query_video_codecs = &soc15_query_video_codecs,
936 };
937 
938 static const struct amdgpu_asic_funcs aqua_vanjaram_asic_funcs =
939 {
940 	.read_disabled_bios = &soc15_read_disabled_bios,
941 	.read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
942 	.read_register = &soc15_read_register,
943 	.reset = &soc15_asic_reset,
944 	.reset_method = &soc15_asic_reset_method,
945 	.get_xclk = &soc15_get_xclk,
946 	.set_uvd_clocks = &soc15_set_uvd_clocks,
947 	.set_vce_clocks = &soc15_set_vce_clocks,
948 	.get_config_memsize = &soc15_get_config_memsize,
949 	.need_full_reset = &soc15_need_full_reset,
950 	.init_doorbell_index = &aqua_vanjaram_doorbell_index_init,
951 	.need_reset_on_init = &soc15_need_reset_on_init,
952 	.get_pcie_replay_count = &amdgpu_nbio_get_pcie_replay_count,
953 	.supports_baco = &soc15_supports_baco,
954 	.pre_asic_init = &soc15_pre_asic_init,
955 	.query_video_codecs = &soc15_query_video_codecs,
956 	.encode_ext_smn_addressing = &aqua_vanjaram_encode_ext_smn_addressing,
957 	.get_reg_state = &aqua_vanjaram_get_reg_state,
958 };
959 
960 static int soc15_common_early_init(struct amdgpu_ip_block *ip_block)
961 {
962 	struct amdgpu_device *adev = ip_block->adev;
963 
964 	adev->nbio.funcs->set_reg_remap(adev);
965 	adev->smc_rreg = NULL;
966 	adev->smc_wreg = NULL;
967 	adev->pcie_rreg = &amdgpu_device_indirect_rreg;
968 	adev->pcie_wreg = &amdgpu_device_indirect_wreg;
969 	adev->pcie_rreg_ext = &amdgpu_device_indirect_rreg_ext;
970 	adev->pcie_wreg_ext = &amdgpu_device_indirect_wreg_ext;
971 	adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64;
972 	adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64;
973 	adev->pcie_rreg64_ext = &amdgpu_device_indirect_rreg64_ext;
974 	adev->pcie_wreg64_ext = &amdgpu_device_indirect_wreg64_ext;
975 	adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
976 	adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
977 	adev->didt_rreg = &soc15_didt_rreg;
978 	adev->didt_wreg = &soc15_didt_wreg;
979 	adev->gc_cac_rreg = &soc15_gc_cac_rreg;
980 	adev->gc_cac_wreg = &soc15_gc_cac_wreg;
981 	adev->se_cac_rreg = &soc15_se_cac_rreg;
982 	adev->se_cac_wreg = &soc15_se_cac_wreg;
983 
984 	adev->rev_id = amdgpu_device_get_rev_id(adev);
985 	adev->external_rev_id = 0xFF;
986 	/* TODO: split the GC and PG flags based on the relevant IP version for which
987 	 * they are relevant.
988 	 */
989 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
990 	case IP_VERSION(9, 0, 1):
991 		adev->asic_funcs = &soc15_asic_funcs;
992 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
993 			AMD_CG_SUPPORT_GFX_MGLS |
994 			AMD_CG_SUPPORT_GFX_RLC_LS |
995 			AMD_CG_SUPPORT_GFX_CP_LS |
996 			AMD_CG_SUPPORT_GFX_3D_CGCG |
997 			AMD_CG_SUPPORT_GFX_3D_CGLS |
998 			AMD_CG_SUPPORT_GFX_CGCG |
999 			AMD_CG_SUPPORT_GFX_CGLS |
1000 			AMD_CG_SUPPORT_BIF_MGCG |
1001 			AMD_CG_SUPPORT_BIF_LS |
1002 			AMD_CG_SUPPORT_HDP_LS |
1003 			AMD_CG_SUPPORT_DRM_MGCG |
1004 			AMD_CG_SUPPORT_DRM_LS |
1005 			AMD_CG_SUPPORT_ROM_MGCG |
1006 			AMD_CG_SUPPORT_DF_MGCG |
1007 			AMD_CG_SUPPORT_SDMA_MGCG |
1008 			AMD_CG_SUPPORT_SDMA_LS |
1009 			AMD_CG_SUPPORT_MC_MGCG |
1010 			AMD_CG_SUPPORT_MC_LS;
1011 		adev->pg_flags = 0;
1012 		adev->external_rev_id = 0x1;
1013 		break;
1014 	case IP_VERSION(9, 2, 1):
1015 		adev->asic_funcs = &soc15_asic_funcs;
1016 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1017 			AMD_CG_SUPPORT_GFX_MGLS |
1018 			AMD_CG_SUPPORT_GFX_CGCG |
1019 			AMD_CG_SUPPORT_GFX_CGLS |
1020 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1021 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1022 			AMD_CG_SUPPORT_GFX_CP_LS |
1023 			AMD_CG_SUPPORT_MC_LS |
1024 			AMD_CG_SUPPORT_MC_MGCG |
1025 			AMD_CG_SUPPORT_SDMA_MGCG |
1026 			AMD_CG_SUPPORT_SDMA_LS |
1027 			AMD_CG_SUPPORT_BIF_MGCG |
1028 			AMD_CG_SUPPORT_BIF_LS |
1029 			AMD_CG_SUPPORT_HDP_MGCG |
1030 			AMD_CG_SUPPORT_HDP_LS |
1031 			AMD_CG_SUPPORT_ROM_MGCG |
1032 			AMD_CG_SUPPORT_VCE_MGCG |
1033 			AMD_CG_SUPPORT_UVD_MGCG;
1034 		adev->pg_flags = 0;
1035 		adev->external_rev_id = adev->rev_id + 0x14;
1036 		break;
1037 	case IP_VERSION(9, 4, 0):
1038 		adev->asic_funcs = &vega20_asic_funcs;
1039 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1040 			AMD_CG_SUPPORT_GFX_MGLS |
1041 			AMD_CG_SUPPORT_GFX_CGCG |
1042 			AMD_CG_SUPPORT_GFX_CGLS |
1043 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1044 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1045 			AMD_CG_SUPPORT_GFX_CP_LS |
1046 			AMD_CG_SUPPORT_MC_LS |
1047 			AMD_CG_SUPPORT_MC_MGCG |
1048 			AMD_CG_SUPPORT_SDMA_MGCG |
1049 			AMD_CG_SUPPORT_SDMA_LS |
1050 			AMD_CG_SUPPORT_BIF_MGCG |
1051 			AMD_CG_SUPPORT_BIF_LS |
1052 			AMD_CG_SUPPORT_HDP_MGCG |
1053 			AMD_CG_SUPPORT_HDP_LS |
1054 			AMD_CG_SUPPORT_ROM_MGCG |
1055 			AMD_CG_SUPPORT_VCE_MGCG |
1056 			AMD_CG_SUPPORT_UVD_MGCG;
1057 		adev->pg_flags = 0;
1058 		adev->external_rev_id = adev->rev_id + 0x28;
1059 		break;
1060 	case IP_VERSION(9, 1, 0):
1061 	case IP_VERSION(9, 2, 2):
1062 		adev->asic_funcs = &soc15_asic_funcs;
1063 
1064 		if (adev->rev_id >= 0x8)
1065 			adev->apu_flags |= AMD_APU_IS_RAVEN2;
1066 
1067 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1068 			adev->external_rev_id = adev->rev_id + 0x79;
1069 		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1070 			adev->external_rev_id = adev->rev_id + 0x41;
1071 		else if (adev->rev_id == 1)
1072 			adev->external_rev_id = adev->rev_id + 0x20;
1073 		else
1074 			adev->external_rev_id = adev->rev_id + 0x01;
1075 
1076 		if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
1077 			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1078 				AMD_CG_SUPPORT_GFX_MGLS |
1079 				AMD_CG_SUPPORT_GFX_CP_LS |
1080 				AMD_CG_SUPPORT_GFX_3D_CGCG |
1081 				AMD_CG_SUPPORT_GFX_3D_CGLS |
1082 				AMD_CG_SUPPORT_GFX_CGCG |
1083 				AMD_CG_SUPPORT_GFX_CGLS |
1084 				AMD_CG_SUPPORT_BIF_LS |
1085 				AMD_CG_SUPPORT_HDP_LS |
1086 				AMD_CG_SUPPORT_MC_MGCG |
1087 				AMD_CG_SUPPORT_MC_LS |
1088 				AMD_CG_SUPPORT_SDMA_MGCG |
1089 				AMD_CG_SUPPORT_SDMA_LS |
1090 				AMD_CG_SUPPORT_VCN_MGCG;
1091 
1092 			adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1093 		} else if (adev->apu_flags & AMD_APU_IS_PICASSO) {
1094 			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1095 				AMD_CG_SUPPORT_GFX_MGLS |
1096 				AMD_CG_SUPPORT_GFX_CP_LS |
1097 				AMD_CG_SUPPORT_GFX_3D_CGLS |
1098 				AMD_CG_SUPPORT_GFX_CGCG |
1099 				AMD_CG_SUPPORT_GFX_CGLS |
1100 				AMD_CG_SUPPORT_BIF_LS |
1101 				AMD_CG_SUPPORT_HDP_LS |
1102 				AMD_CG_SUPPORT_MC_MGCG |
1103 				AMD_CG_SUPPORT_MC_LS |
1104 				AMD_CG_SUPPORT_SDMA_MGCG |
1105 				AMD_CG_SUPPORT_SDMA_LS |
1106 				AMD_CG_SUPPORT_VCN_MGCG;
1107 
1108 			/*
1109 			 * MMHUB PG needs to be disabled for Picasso for
1110 			 * stability reasons.
1111 			 */
1112 			adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1113 				AMD_PG_SUPPORT_VCN;
1114 		} else {
1115 			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1116 				AMD_CG_SUPPORT_GFX_MGLS |
1117 				AMD_CG_SUPPORT_GFX_RLC_LS |
1118 				AMD_CG_SUPPORT_GFX_CP_LS |
1119 				AMD_CG_SUPPORT_GFX_3D_CGLS |
1120 				AMD_CG_SUPPORT_GFX_CGCG |
1121 				AMD_CG_SUPPORT_GFX_CGLS |
1122 				AMD_CG_SUPPORT_BIF_MGCG |
1123 				AMD_CG_SUPPORT_BIF_LS |
1124 				AMD_CG_SUPPORT_HDP_MGCG |
1125 				AMD_CG_SUPPORT_HDP_LS |
1126 				AMD_CG_SUPPORT_DRM_MGCG |
1127 				AMD_CG_SUPPORT_DRM_LS |
1128 				AMD_CG_SUPPORT_MC_MGCG |
1129 				AMD_CG_SUPPORT_MC_LS |
1130 				AMD_CG_SUPPORT_SDMA_MGCG |
1131 				AMD_CG_SUPPORT_SDMA_LS |
1132 				AMD_CG_SUPPORT_VCN_MGCG;
1133 
1134 			adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1135 		}
1136 		break;
1137 	case IP_VERSION(9, 4, 1):
1138 		adev->asic_funcs = &vega20_asic_funcs;
1139 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1140 			AMD_CG_SUPPORT_GFX_MGLS |
1141 			AMD_CG_SUPPORT_GFX_CGCG |
1142 			AMD_CG_SUPPORT_GFX_CGLS |
1143 			AMD_CG_SUPPORT_GFX_CP_LS |
1144 			AMD_CG_SUPPORT_HDP_MGCG |
1145 			AMD_CG_SUPPORT_HDP_LS |
1146 			AMD_CG_SUPPORT_SDMA_MGCG |
1147 			AMD_CG_SUPPORT_SDMA_LS |
1148 			AMD_CG_SUPPORT_MC_MGCG |
1149 			AMD_CG_SUPPORT_MC_LS |
1150 			AMD_CG_SUPPORT_IH_CG |
1151 			AMD_CG_SUPPORT_VCN_MGCG |
1152 			AMD_CG_SUPPORT_JPEG_MGCG;
1153 		adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG;
1154 		adev->external_rev_id = adev->rev_id + 0x32;
1155 		break;
1156 	case IP_VERSION(9, 3, 0):
1157 		adev->asic_funcs = &soc15_asic_funcs;
1158 
1159 		if (adev->apu_flags & AMD_APU_IS_RENOIR)
1160 			adev->external_rev_id = adev->rev_id + 0x91;
1161 		else
1162 			adev->external_rev_id = adev->rev_id + 0xa1;
1163 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1164 				 AMD_CG_SUPPORT_GFX_MGLS |
1165 				 AMD_CG_SUPPORT_GFX_3D_CGCG |
1166 				 AMD_CG_SUPPORT_GFX_3D_CGLS |
1167 				 AMD_CG_SUPPORT_GFX_CGCG |
1168 				 AMD_CG_SUPPORT_GFX_CGLS |
1169 				 AMD_CG_SUPPORT_GFX_CP_LS |
1170 				 AMD_CG_SUPPORT_MC_MGCG |
1171 				 AMD_CG_SUPPORT_MC_LS |
1172 				 AMD_CG_SUPPORT_SDMA_MGCG |
1173 				 AMD_CG_SUPPORT_SDMA_LS |
1174 				 AMD_CG_SUPPORT_BIF_LS |
1175 				 AMD_CG_SUPPORT_HDP_LS |
1176 				 AMD_CG_SUPPORT_VCN_MGCG |
1177 				 AMD_CG_SUPPORT_JPEG_MGCG |
1178 				 AMD_CG_SUPPORT_IH_CG |
1179 				 AMD_CG_SUPPORT_ATHUB_LS |
1180 				 AMD_CG_SUPPORT_ATHUB_MGCG |
1181 				 AMD_CG_SUPPORT_DF_MGCG;
1182 		adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1183 				 AMD_PG_SUPPORT_VCN |
1184 				 AMD_PG_SUPPORT_JPEG |
1185 				 AMD_PG_SUPPORT_VCN_DPG;
1186 		break;
1187 	case IP_VERSION(9, 4, 2):
1188 		adev->asic_funcs = &vega20_asic_funcs;
1189 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1190 			AMD_CG_SUPPORT_GFX_MGLS |
1191 			AMD_CG_SUPPORT_GFX_CP_LS |
1192 			AMD_CG_SUPPORT_HDP_LS |
1193 			AMD_CG_SUPPORT_SDMA_MGCG |
1194 			AMD_CG_SUPPORT_SDMA_LS |
1195 			AMD_CG_SUPPORT_IH_CG |
1196 			AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG;
1197 		adev->pg_flags = AMD_PG_SUPPORT_VCN_DPG;
1198 		adev->external_rev_id = adev->rev_id + 0x3c;
1199 		break;
1200 	case IP_VERSION(9, 4, 3):
1201 	case IP_VERSION(9, 4, 4):
1202 	case IP_VERSION(9, 5, 0):
1203 		adev->asic_funcs = &aqua_vanjaram_asic_funcs;
1204 		adev->cg_flags =
1205 			AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_CGCG |
1206 			AMD_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_SDMA_MGCG |
1207 			AMD_CG_SUPPORT_GFX_FGCG | AMD_CG_SUPPORT_REPEATER_FGCG |
1208 			AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG |
1209 			AMD_CG_SUPPORT_IH_CG;
1210 		adev->pg_flags =
1211 			AMD_PG_SUPPORT_VCN |
1212 			AMD_PG_SUPPORT_VCN_DPG |
1213 			AMD_PG_SUPPORT_JPEG;
1214 		/*TODO: need a new external_rev_id for GC 9.4.4? */
1215 		adev->external_rev_id = adev->rev_id + 0x46;
1216 		break;
1217 	default:
1218 		/* FIXME: not supported yet */
1219 		return -EINVAL;
1220 	}
1221 
1222 	if (amdgpu_sriov_vf(adev)) {
1223 		amdgpu_virt_init_setting(adev);
1224 		xgpu_ai_mailbox_set_irq_funcs(adev);
1225 	}
1226 
1227 	return 0;
1228 }
1229 
1230 static int soc15_common_late_init(struct amdgpu_ip_block *ip_block)
1231 {
1232 	struct amdgpu_device *adev = ip_block->adev;
1233 
1234 	if (amdgpu_sriov_vf(adev))
1235 		xgpu_ai_mailbox_get_irq(adev);
1236 
1237 	/* Enable selfring doorbell aperture late because doorbell BAR
1238 	 * aperture will change if resize BAR successfully in gmc sw_init.
1239 	 */
1240 	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true);
1241 
1242 	return 0;
1243 }
1244 
1245 static int soc15_common_sw_init(struct amdgpu_ip_block *ip_block)
1246 {
1247 	struct amdgpu_device *adev = ip_block->adev;
1248 
1249 	if (amdgpu_sriov_vf(adev))
1250 		xgpu_ai_mailbox_add_irq_id(adev);
1251 
1252 	if (adev->df.funcs &&
1253 	    adev->df.funcs->sw_init)
1254 		adev->df.funcs->sw_init(adev);
1255 
1256 	return 0;
1257 }
1258 
1259 static int soc15_common_sw_fini(struct amdgpu_ip_block *ip_block)
1260 {
1261 	struct amdgpu_device *adev = ip_block->adev;
1262 
1263 	if (adev->df.funcs &&
1264 	    adev->df.funcs->sw_fini)
1265 		adev->df.funcs->sw_fini(adev);
1266 	return 0;
1267 }
1268 
1269 static void soc15_sdma_doorbell_range_init(struct amdgpu_device *adev)
1270 {
1271 	int i;
1272 
1273 	/* sdma doorbell range is programed by hypervisor */
1274 	if (!amdgpu_sriov_vf(adev)) {
1275 		for (i = 0; i < adev->sdma.num_instances; i++) {
1276 			adev->nbio.funcs->sdma_doorbell_range(adev, i,
1277 				true, adev->doorbell_index.sdma_engine[i] << 1,
1278 				adev->doorbell_index.sdma_doorbell_range);
1279 		}
1280 	}
1281 }
1282 
1283 static int soc15_common_hw_init(struct amdgpu_ip_block *ip_block)
1284 {
1285 	struct amdgpu_device *adev = ip_block->adev;
1286 
1287 	/* enable aspm */
1288 	soc15_program_aspm(adev);
1289 	/* setup nbio registers */
1290 	adev->nbio.funcs->init_registers(adev);
1291 	/* remap HDP registers to a hole in mmio space,
1292 	 * for the purpose of expose those registers
1293 	 * to process space
1294 	 */
1295 	if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
1296 		adev->nbio.funcs->remap_hdp_registers(adev);
1297 
1298 	/* enable the doorbell aperture */
1299 	adev->nbio.funcs->enable_doorbell_aperture(adev, true);
1300 
1301 	/* HW doorbell routing policy: doorbell writing not
1302 	 * in SDMA/IH/MM/ACV range will be routed to CP. So
1303 	 * we need to init SDMA doorbell range prior
1304 	 * to CP ip block init and ring test.  IH already
1305 	 * happens before CP.
1306 	 */
1307 	soc15_sdma_doorbell_range_init(adev);
1308 
1309 	return 0;
1310 }
1311 
1312 static int soc15_common_hw_fini(struct amdgpu_ip_block *ip_block)
1313 {
1314 	struct amdgpu_device *adev = ip_block->adev;
1315 
1316 	/* Disable the doorbell aperture and selfring doorbell aperture
1317 	 * separately in hw_fini because soc15_enable_doorbell_aperture
1318 	 * has been removed and there is no need to delay disabling
1319 	 * selfring doorbell.
1320 	 */
1321 	adev->nbio.funcs->enable_doorbell_aperture(adev, false);
1322 	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false);
1323 
1324 	if (amdgpu_sriov_vf(adev))
1325 		xgpu_ai_mailbox_put_irq(adev);
1326 
1327 	/*
1328 	 * For minimal init, late_init is not called, hence RAS irqs are not
1329 	 * enabled.
1330 	 */
1331 	if ((!amdgpu_sriov_vf(adev)) &&
1332 	    (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) &&
1333 	    adev->nbio.ras_if &&
1334 	    amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
1335 		if (adev->nbio.ras &&
1336 		    adev->nbio.ras->init_ras_controller_interrupt)
1337 			amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0);
1338 		if (adev->nbio.ras &&
1339 		    adev->nbio.ras->init_ras_err_event_athub_interrupt)
1340 			amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
1341 	}
1342 
1343 	return 0;
1344 }
1345 
1346 static int soc15_common_suspend(struct amdgpu_ip_block *ip_block)
1347 {
1348 	return soc15_common_hw_fini(ip_block);
1349 }
1350 
1351 static int soc15_common_resume(struct amdgpu_ip_block *ip_block)
1352 {
1353 	struct amdgpu_device *adev = ip_block->adev;
1354 
1355 	if (soc15_need_reset_on_resume(adev)) {
1356 		dev_info(adev->dev, "S3 suspend abort case, let's reset ASIC.\n");
1357 		soc15_asic_reset(adev);
1358 	}
1359 	return soc15_common_hw_init(ip_block);
1360 }
1361 
1362 static bool soc15_common_is_idle(struct amdgpu_ip_block *ip_block)
1363 {
1364 	return true;
1365 }
1366 
1367 static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
1368 {
1369 	uint32_t def, data;
1370 
1371 	def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1372 
1373 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG))
1374 		data &= ~(0x01000000 |
1375 			  0x02000000 |
1376 			  0x04000000 |
1377 			  0x08000000 |
1378 			  0x10000000 |
1379 			  0x20000000 |
1380 			  0x40000000 |
1381 			  0x80000000);
1382 	else
1383 		data |= (0x01000000 |
1384 			 0x02000000 |
1385 			 0x04000000 |
1386 			 0x08000000 |
1387 			 0x10000000 |
1388 			 0x20000000 |
1389 			 0x40000000 |
1390 			 0x80000000);
1391 
1392 	if (def != data)
1393 		WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data);
1394 }
1395 
1396 static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)
1397 {
1398 	uint32_t def, data;
1399 
1400 	def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1401 
1402 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1403 		data |= 1;
1404 	else
1405 		data &= ~1;
1406 
1407 	if (def != data)
1408 		WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
1409 }
1410 
1411 static int soc15_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1412 					    enum amd_clockgating_state state)
1413 {
1414 	struct amdgpu_device *adev = ip_block->adev;
1415 
1416 	if (amdgpu_sriov_vf(adev))
1417 		return 0;
1418 
1419 	switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
1420 	case IP_VERSION(6, 1, 0):
1421 	case IP_VERSION(6, 2, 0):
1422 	case IP_VERSION(7, 4, 0):
1423 		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1424 				state == AMD_CG_STATE_GATE);
1425 		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1426 				state == AMD_CG_STATE_GATE);
1427 		adev->hdp.funcs->update_clock_gating(adev,
1428 				state == AMD_CG_STATE_GATE);
1429 		soc15_update_drm_clock_gating(adev,
1430 				state == AMD_CG_STATE_GATE);
1431 		soc15_update_drm_light_sleep(adev,
1432 				state == AMD_CG_STATE_GATE);
1433 		adev->smuio.funcs->update_rom_clock_gating(adev,
1434 				state == AMD_CG_STATE_GATE);
1435 		adev->df.funcs->update_medium_grain_clock_gating(adev,
1436 				state == AMD_CG_STATE_GATE);
1437 		break;
1438 	case IP_VERSION(7, 0, 0):
1439 	case IP_VERSION(7, 0, 1):
1440 	case IP_VERSION(2, 5, 0):
1441 		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1442 				state == AMD_CG_STATE_GATE);
1443 		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1444 				state == AMD_CG_STATE_GATE);
1445 		adev->hdp.funcs->update_clock_gating(adev,
1446 				state == AMD_CG_STATE_GATE);
1447 		soc15_update_drm_clock_gating(adev,
1448 				state == AMD_CG_STATE_GATE);
1449 		soc15_update_drm_light_sleep(adev,
1450 				state == AMD_CG_STATE_GATE);
1451 		break;
1452 	case IP_VERSION(7, 4, 1):
1453 	case IP_VERSION(7, 4, 4):
1454 		adev->hdp.funcs->update_clock_gating(adev,
1455 				state == AMD_CG_STATE_GATE);
1456 		break;
1457 	default:
1458 		break;
1459 	}
1460 	return 0;
1461 }
1462 
1463 static void soc15_common_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
1464 {
1465 	struct amdgpu_device *adev = ip_block->adev;
1466 	int data;
1467 
1468 	if (amdgpu_sriov_vf(adev))
1469 		*flags = 0;
1470 
1471 	if (adev->nbio.funcs && adev->nbio.funcs->get_clockgating_state)
1472 		adev->nbio.funcs->get_clockgating_state(adev, flags);
1473 
1474 	if (adev->hdp.funcs && adev->hdp.funcs->get_clock_gating_state)
1475 		adev->hdp.funcs->get_clock_gating_state(adev, flags);
1476 
1477 	if ((amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2)) &&
1478 	    (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) &&
1479 	    (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 12)) &&
1480 	    (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 14))) {
1481 		/* AMD_CG_SUPPORT_DRM_MGCG */
1482 		data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1483 		if (!(data & 0x01000000))
1484 			*flags |= AMD_CG_SUPPORT_DRM_MGCG;
1485 
1486 		/* AMD_CG_SUPPORT_DRM_LS */
1487 		data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1488 		if (data & 0x1)
1489 			*flags |= AMD_CG_SUPPORT_DRM_LS;
1490 	}
1491 
1492 	/* AMD_CG_SUPPORT_ROM_MGCG */
1493 	if (adev->smuio.funcs && adev->smuio.funcs->get_clock_gating_state)
1494 		adev->smuio.funcs->get_clock_gating_state(adev, flags);
1495 
1496 	if (adev->df.funcs && adev->df.funcs->get_clockgating_state)
1497 		adev->df.funcs->get_clockgating_state(adev, flags);
1498 }
1499 
1500 static int soc15_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
1501 					    enum amd_powergating_state state)
1502 {
1503 	/* todo */
1504 	return 0;
1505 }
1506 
1507 static const struct amd_ip_funcs soc15_common_ip_funcs = {
1508 	.name = "soc15_common",
1509 	.early_init = soc15_common_early_init,
1510 	.late_init = soc15_common_late_init,
1511 	.sw_init = soc15_common_sw_init,
1512 	.sw_fini = soc15_common_sw_fini,
1513 	.hw_init = soc15_common_hw_init,
1514 	.hw_fini = soc15_common_hw_fini,
1515 	.suspend = soc15_common_suspend,
1516 	.resume = soc15_common_resume,
1517 	.is_idle = soc15_common_is_idle,
1518 	.set_clockgating_state = soc15_common_set_clockgating_state,
1519 	.set_powergating_state = soc15_common_set_powergating_state,
1520 	.get_clockgating_state= soc15_common_get_clockgating_state,
1521 };
1522