1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27
28 #include <drm/amdgpu_drm.h>
29
30 #include "amdgpu.h"
31 #include "amdgpu_atombios.h"
32 #include "amdgpu_ih.h"
33 #include "amdgpu_uvd.h"
34 #include "amdgpu_vce.h"
35 #include "amdgpu_ucode.h"
36 #include "amdgpu_psp.h"
37 #include "atom.h"
38 #include "amd_pcie.h"
39
40 #include "uvd/uvd_7_0_offset.h"
41 #include "gc/gc_9_0_offset.h"
42 #include "gc/gc_9_0_sh_mask.h"
43 #include "sdma0/sdma0_4_0_offset.h"
44 #include "sdma1/sdma1_4_0_offset.h"
45 #include "nbio/nbio_7_0_default.h"
46 #include "nbio/nbio_7_0_offset.h"
47 #include "nbio/nbio_7_0_sh_mask.h"
48 #include "nbio/nbio_7_0_smn.h"
49 #include "mp/mp_9_0_offset.h"
50
51 #include "soc15.h"
52 #include "soc15_common.h"
53 #include "gfx_v9_0.h"
54 #include "gmc_v9_0.h"
55 #include "gfxhub_v1_0.h"
56 #include "mmhub_v1_0.h"
57 #include "df_v1_7.h"
58 #include "df_v3_6.h"
59 #include "nbio_v6_1.h"
60 #include "nbio_v7_0.h"
61 #include "nbio_v7_4.h"
62 #include "hdp_v4_0.h"
63 #include "vega10_ih.h"
64 #include "vega20_ih.h"
65 #include "navi10_ih.h"
66 #include "sdma_v4_0.h"
67 #include "uvd_v7_0.h"
68 #include "vce_v4_0.h"
69 #include "vcn_v1_0.h"
70 #include "vcn_v2_0.h"
71 #include "jpeg_v2_0.h"
72 #include "vcn_v2_5.h"
73 #include "jpeg_v2_5.h"
74 #include "smuio_v9_0.h"
75 #include "smuio_v11_0.h"
76 #include "smuio_v13_0.h"
77 #include "amdgpu_vkms.h"
78 #include "mxgpu_ai.h"
79 #include "amdgpu_ras.h"
80 #include "amdgpu_xgmi.h"
81 #include <uapi/linux/kfd_ioctl.h>
82
83 #define mmMP0_MISC_CGTT_CTRL0 0x01b9
84 #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0
85 #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
86 #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
87
88 static const struct amd_ip_funcs soc15_common_ip_funcs;
89
90 /* Vega, Raven, Arcturus */
91 static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] =
92 {
93 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
94 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 0)},
95 };
96
97 static const struct amdgpu_video_codecs vega_video_codecs_encode =
98 {
99 .codec_count = ARRAY_SIZE(vega_video_codecs_encode_array),
100 .codec_array = vega_video_codecs_encode_array,
101 };
102
103 /* Vega */
104 static const struct amdgpu_video_codec_info vega_video_codecs_decode_array[] =
105 {
106 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
107 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
108 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
109 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
110 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
111 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
112 };
113
114 static const struct amdgpu_video_codecs vega_video_codecs_decode =
115 {
116 .codec_count = ARRAY_SIZE(vega_video_codecs_decode_array),
117 .codec_array = vega_video_codecs_decode_array,
118 };
119
120 /* Raven */
121 static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] =
122 {
123 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
124 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
125 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
126 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
127 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
128 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
129 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 4096, 4096, 0)},
130 };
131
132 static const struct amdgpu_video_codecs rv_video_codecs_decode =
133 {
134 .codec_count = ARRAY_SIZE(rv_video_codecs_decode_array),
135 .codec_array = rv_video_codecs_decode_array,
136 };
137
138 /* Renoir, Arcturus */
139 static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] =
140 {
141 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
142 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
143 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
144 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
145 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
146 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
147 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
148 };
149
150 static const struct amdgpu_video_codecs rn_video_codecs_decode =
151 {
152 .codec_count = ARRAY_SIZE(rn_video_codecs_decode_array),
153 .codec_array = rn_video_codecs_decode_array,
154 };
155
156 static const struct amdgpu_video_codec_info vcn_4_0_3_video_codecs_decode_array[] = {
157 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
158 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
159 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
160 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
161 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
162 };
163
164 static const struct amdgpu_video_codecs vcn_4_0_3_video_codecs_decode = {
165 .codec_count = ARRAY_SIZE(vcn_4_0_3_video_codecs_decode_array),
166 .codec_array = vcn_4_0_3_video_codecs_decode_array,
167 };
168
169 static const struct amdgpu_video_codecs vcn_4_0_3_video_codecs_encode = {
170 .codec_count = 0,
171 .codec_array = NULL,
172 };
173
soc15_query_video_codecs(struct amdgpu_device * adev,bool encode,const struct amdgpu_video_codecs ** codecs)174 static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,
175 const struct amdgpu_video_codecs **codecs)
176 {
177 if (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
178 switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
179 case IP_VERSION(4, 0, 0):
180 case IP_VERSION(4, 1, 0):
181 if (encode)
182 *codecs = &vega_video_codecs_encode;
183 else
184 *codecs = &vega_video_codecs_decode;
185 return 0;
186 default:
187 return -EINVAL;
188 }
189 } else {
190 switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
191 case IP_VERSION(1, 0, 0):
192 case IP_VERSION(1, 0, 1):
193 if (encode)
194 *codecs = &vega_video_codecs_encode;
195 else
196 *codecs = &rv_video_codecs_decode;
197 return 0;
198 case IP_VERSION(2, 5, 0):
199 case IP_VERSION(2, 6, 0):
200 case IP_VERSION(2, 2, 0):
201 if (encode)
202 *codecs = &vega_video_codecs_encode;
203 else
204 *codecs = &rn_video_codecs_decode;
205 return 0;
206 case IP_VERSION(4, 0, 3):
207 if (encode)
208 *codecs = &vcn_4_0_3_video_codecs_encode;
209 else
210 *codecs = &vcn_4_0_3_video_codecs_decode;
211 return 0;
212 default:
213 return -EINVAL;
214 }
215 }
216 }
217
soc15_uvd_ctx_rreg(struct amdgpu_device * adev,u32 reg)218 static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
219 {
220 unsigned long flags, address, data;
221 u32 r;
222
223 address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
224 data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
225
226 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
227 WREG32(address, ((reg) & 0x1ff));
228 r = RREG32(data);
229 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
230 return r;
231 }
232
soc15_uvd_ctx_wreg(struct amdgpu_device * adev,u32 reg,u32 v)233 static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
234 {
235 unsigned long flags, address, data;
236
237 address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
238 data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
239
240 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
241 WREG32(address, ((reg) & 0x1ff));
242 WREG32(data, (v));
243 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
244 }
245
soc15_didt_rreg(struct amdgpu_device * adev,u32 reg)246 static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
247 {
248 unsigned long flags, address, data;
249 u32 r;
250
251 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
252 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
253
254 spin_lock_irqsave(&adev->didt_idx_lock, flags);
255 WREG32(address, (reg));
256 r = RREG32(data);
257 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
258 return r;
259 }
260
soc15_didt_wreg(struct amdgpu_device * adev,u32 reg,u32 v)261 static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
262 {
263 unsigned long flags, address, data;
264
265 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
266 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
267
268 spin_lock_irqsave(&adev->didt_idx_lock, flags);
269 WREG32(address, (reg));
270 WREG32(data, (v));
271 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
272 }
273
soc15_gc_cac_rreg(struct amdgpu_device * adev,u32 reg)274 static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
275 {
276 unsigned long flags;
277 u32 r;
278
279 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
280 WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
281 r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA);
282 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
283 return r;
284 }
285
soc15_gc_cac_wreg(struct amdgpu_device * adev,u32 reg,u32 v)286 static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
287 {
288 unsigned long flags;
289
290 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
291 WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
292 WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v));
293 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
294 }
295
soc15_se_cac_rreg(struct amdgpu_device * adev,u32 reg)296 static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg)
297 {
298 unsigned long flags;
299 u32 r;
300
301 spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
302 WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
303 r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA);
304 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
305 return r;
306 }
307
soc15_se_cac_wreg(struct amdgpu_device * adev,u32 reg,u32 v)308 static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
309 {
310 unsigned long flags;
311
312 spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
313 WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
314 WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v));
315 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
316 }
317
soc15_get_config_memsize(struct amdgpu_device * adev)318 static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
319 {
320 return adev->nbio.funcs->get_memsize(adev);
321 }
322
soc15_get_xclk(struct amdgpu_device * adev)323 static u32 soc15_get_xclk(struct amdgpu_device *adev)
324 {
325 u32 reference_clock = adev->clock.spll.reference_freq;
326
327 if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(12, 0, 0) ||
328 amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(12, 0, 1) ||
329 amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6) ||
330 amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 14))
331 return 10000;
332 if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(10, 0, 0) ||
333 amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(10, 0, 1))
334 return reference_clock / 4;
335
336 return reference_clock;
337 }
338
339
soc15_grbm_select(struct amdgpu_device * adev,u32 me,u32 pipe,u32 queue,u32 vmid,int xcc_id)340 void soc15_grbm_select(struct amdgpu_device *adev,
341 u32 me, u32 pipe, u32 queue, u32 vmid, int xcc_id)
342 {
343 u32 grbm_gfx_cntl = 0;
344 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
345 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
346 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
347 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
348
349 WREG32_SOC15_RLC_SHADOW(GC, xcc_id, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
350 }
351
soc15_read_disabled_bios(struct amdgpu_device * adev)352 static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
353 {
354 /* todo */
355 return false;
356 }
357
358 static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
359 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
360 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
361 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
362 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
363 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
364 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
365 { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
366 { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
367 { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
368 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
369 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
370 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
371 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
372 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
373 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
374 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
375 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
376 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
377 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
378 { SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)},
379 };
380
soc15_read_indexed_register(struct amdgpu_device * adev,u32 se_num,u32 sh_num,u32 reg_offset)381 static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
382 u32 sh_num, u32 reg_offset)
383 {
384 uint32_t val;
385
386 mutex_lock(&adev->grbm_idx_mutex);
387 if (se_num != 0xffffffff || sh_num != 0xffffffff)
388 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0);
389
390 val = RREG32(reg_offset);
391
392 if (se_num != 0xffffffff || sh_num != 0xffffffff)
393 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
394 mutex_unlock(&adev->grbm_idx_mutex);
395 return val;
396 }
397
soc15_get_register_value(struct amdgpu_device * adev,bool indexed,u32 se_num,u32 sh_num,u32 reg_offset)398 static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
399 bool indexed, u32 se_num,
400 u32 sh_num, u32 reg_offset)
401 {
402 if (indexed) {
403 return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
404 } else {
405 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
406 return adev->gfx.config.gb_addr_config;
407 else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2))
408 return adev->gfx.config.db_debug2;
409 return RREG32(reg_offset);
410 }
411 }
412
soc15_read_register(struct amdgpu_device * adev,u32 se_num,u32 sh_num,u32 reg_offset,u32 * value)413 static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
414 u32 sh_num, u32 reg_offset, u32 *value)
415 {
416 uint32_t i;
417 struct soc15_allowed_register_entry *en;
418
419 *value = 0;
420 for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
421 en = &soc15_allowed_read_registers[i];
422 if (!adev->reg_offset[en->hwip][en->inst])
423 continue;
424 else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
425 + en->reg_offset))
426 continue;
427
428 *value = soc15_get_register_value(adev,
429 soc15_allowed_read_registers[i].grbm_indexed,
430 se_num, sh_num, reg_offset);
431 return 0;
432 }
433 return -EINVAL;
434 }
435
436
437 /**
438 * soc15_program_register_sequence - program an array of registers.
439 *
440 * @adev: amdgpu_device pointer
441 * @regs: pointer to the register array
442 * @array_size: size of the register array
443 *
444 * Programs an array or registers with and and or masks.
445 * This is a helper for setting golden registers.
446 */
447
soc15_program_register_sequence(struct amdgpu_device * adev,const struct soc15_reg_golden * regs,const u32 array_size)448 void soc15_program_register_sequence(struct amdgpu_device *adev,
449 const struct soc15_reg_golden *regs,
450 const u32 array_size)
451 {
452 const struct soc15_reg_golden *entry;
453 u32 tmp, reg;
454 int i;
455
456 for (i = 0; i < array_size; ++i) {
457 entry = ®s[i];
458 reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
459
460 if (entry->and_mask == 0xffffffff) {
461 tmp = entry->or_mask;
462 } else {
463 tmp = (entry->hwip == GC_HWIP) ?
464 RREG32_SOC15_IP(GC, reg) : RREG32(reg);
465
466 tmp &= ~(entry->and_mask);
467 tmp |= (entry->or_mask & entry->and_mask);
468 }
469
470 if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) ||
471 reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE) ||
472 reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1) ||
473 reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG))
474 WREG32_RLC(reg, tmp);
475 else
476 (entry->hwip == GC_HWIP) ?
477 WREG32_SOC15_IP(GC, reg, tmp) : WREG32(reg, tmp);
478
479 }
480
481 }
482
soc15_asic_baco_reset(struct amdgpu_device * adev)483 static int soc15_asic_baco_reset(struct amdgpu_device *adev)
484 {
485 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
486 int ret = 0;
487
488 /* avoid NBIF got stuck when do RAS recovery in BACO reset */
489 if (ras && adev->ras_enabled)
490 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
491
492 ret = amdgpu_dpm_baco_reset(adev);
493 if (ret)
494 return ret;
495
496 /* re-enable doorbell interrupt after BACO exit */
497 if (ras && adev->ras_enabled)
498 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
499
500 return 0;
501 }
502
503 static enum amd_reset_method
soc15_asic_reset_method(struct amdgpu_device * adev)504 soc15_asic_reset_method(struct amdgpu_device *adev)
505 {
506 int baco_reset = 0;
507 bool connected_to_cpu = false;
508 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
509
510 if (adev->gmc.xgmi.supported && adev->gmc.xgmi.connected_to_cpu)
511 connected_to_cpu = true;
512
513 if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
514 amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
515 amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
516 amdgpu_reset_method == AMD_RESET_METHOD_PCI) {
517 /* If connected to cpu, driver only support mode2 */
518 if (connected_to_cpu)
519 return AMD_RESET_METHOD_MODE2;
520 return amdgpu_reset_method;
521 }
522
523 if (amdgpu_reset_method != -1)
524 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
525 amdgpu_reset_method);
526
527 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
528 case IP_VERSION(10, 0, 0):
529 case IP_VERSION(10, 0, 1):
530 case IP_VERSION(12, 0, 0):
531 case IP_VERSION(12, 0, 1):
532 return AMD_RESET_METHOD_MODE2;
533 case IP_VERSION(9, 0, 0):
534 case IP_VERSION(11, 0, 2):
535 if (adev->asic_type == CHIP_VEGA20) {
536 if (adev->psp.sos.fw_version >= 0x80067)
537 baco_reset = amdgpu_dpm_is_baco_supported(adev);
538 /*
539 * 1. PMFW version > 0x284300: all cases use baco
540 * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco
541 */
542 if (ras && adev->ras_enabled &&
543 adev->pm.fw_version <= 0x283400)
544 baco_reset = 0;
545 } else {
546 baco_reset = amdgpu_dpm_is_baco_supported(adev);
547 }
548 break;
549 case IP_VERSION(13, 0, 2):
550 /*
551 * 1.connected to cpu: driver issue mode2 reset
552 * 2.discret gpu: driver issue mode1 reset
553 */
554 if (connected_to_cpu)
555 return AMD_RESET_METHOD_MODE2;
556 break;
557 case IP_VERSION(13, 0, 6):
558 case IP_VERSION(13, 0, 14):
559 /* Use gpu_recovery param to target a reset method.
560 * Enable triggering of GPU reset only if specified
561 * by module parameter.
562 */
563 if (amdgpu_gpu_recovery == 4 || amdgpu_gpu_recovery == 5)
564 return AMD_RESET_METHOD_MODE2;
565 else if (!(adev->flags & AMD_IS_APU))
566 return AMD_RESET_METHOD_MODE1;
567 else
568 return AMD_RESET_METHOD_MODE2;
569 default:
570 break;
571 }
572
573 if (baco_reset)
574 return AMD_RESET_METHOD_BACO;
575 else
576 return AMD_RESET_METHOD_MODE1;
577 }
578
soc15_need_reset_on_resume(struct amdgpu_device * adev)579 static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
580 {
581 u32 sol_reg;
582
583 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
584
585 /* Will reset for the following suspend abort cases.
586 * 1) Only reset limit on APU side, dGPU hasn't checked yet.
587 * 2) S3 suspend abort and TOS already launched.
588 */
589 if (adev->flags & AMD_IS_APU && adev->in_s3 &&
590 !adev->suspend_complete &&
591 sol_reg)
592 return true;
593
594 return false;
595 }
596
soc15_asic_reset(struct amdgpu_device * adev)597 static int soc15_asic_reset(struct amdgpu_device *adev)
598 {
599 /* original raven doesn't have full asic reset */
600 /* On the latest Raven, the GPU reset can be performed
601 * successfully. So now, temporarily enable it for the
602 * S3 suspend abort case.
603 */
604 if (((adev->apu_flags & AMD_APU_IS_RAVEN) ||
605 (adev->apu_flags & AMD_APU_IS_RAVEN2)) &&
606 !soc15_need_reset_on_resume(adev))
607 return 0;
608
609 switch (soc15_asic_reset_method(adev)) {
610 case AMD_RESET_METHOD_PCI:
611 dev_info(adev->dev, "PCI reset\n");
612 return amdgpu_device_pci_reset(adev);
613 case AMD_RESET_METHOD_BACO:
614 dev_info(adev->dev, "BACO reset\n");
615 return soc15_asic_baco_reset(adev);
616 case AMD_RESET_METHOD_MODE2:
617 dev_info(adev->dev, "MODE2 reset\n");
618 return amdgpu_dpm_mode2_reset(adev);
619 default:
620 dev_info(adev->dev, "MODE1 reset\n");
621 return amdgpu_device_mode1_reset(adev);
622 }
623 }
624
soc15_supports_baco(struct amdgpu_device * adev)625 static int soc15_supports_baco(struct amdgpu_device *adev)
626 {
627 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
628 case IP_VERSION(9, 0, 0):
629 case IP_VERSION(11, 0, 2):
630 if (adev->asic_type == CHIP_VEGA20) {
631 if (adev->psp.sos.fw_version >= 0x80067)
632 return amdgpu_dpm_is_baco_supported(adev);
633 return 0;
634 } else {
635 return amdgpu_dpm_is_baco_supported(adev);
636 }
637 break;
638 default:
639 return 0;
640 }
641 }
642
643 /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
644 u32 cntl_reg, u32 status_reg)
645 {
646 return 0;
647 }*/
648
soc15_set_uvd_clocks(struct amdgpu_device * adev,u32 vclk,u32 dclk)649 static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
650 {
651 /*int r;
652
653 r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
654 if (r)
655 return r;
656
657 r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
658 */
659 return 0;
660 }
661
soc15_set_vce_clocks(struct amdgpu_device * adev,u32 evclk,u32 ecclk)662 static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
663 {
664 /* todo */
665
666 return 0;
667 }
668
soc15_program_aspm(struct amdgpu_device * adev)669 static void soc15_program_aspm(struct amdgpu_device *adev)
670 {
671 if (!amdgpu_device_should_use_aspm(adev))
672 return;
673
674 if (adev->nbio.funcs->program_aspm)
675 adev->nbio.funcs->program_aspm(adev);
676 }
677
678 const struct amdgpu_ip_block_version vega10_common_ip_block =
679 {
680 .type = AMD_IP_BLOCK_TYPE_COMMON,
681 .major = 2,
682 .minor = 0,
683 .rev = 0,
684 .funcs = &soc15_common_ip_funcs,
685 };
686
soc15_reg_base_init(struct amdgpu_device * adev)687 static void soc15_reg_base_init(struct amdgpu_device *adev)
688 {
689 /* Set IP register base before any HW register access */
690 switch (adev->asic_type) {
691 case CHIP_VEGA10:
692 case CHIP_VEGA12:
693 case CHIP_RAVEN:
694 case CHIP_RENOIR:
695 vega10_reg_base_init(adev);
696 break;
697 case CHIP_VEGA20:
698 vega20_reg_base_init(adev);
699 break;
700 case CHIP_ARCTURUS:
701 arct_reg_base_init(adev);
702 break;
703 case CHIP_ALDEBARAN:
704 aldebaran_reg_base_init(adev);
705 break;
706 default:
707 DRM_ERROR("Unsupported asic type: %d!\n", adev->asic_type);
708 break;
709 }
710 }
711
soc15_set_virt_ops(struct amdgpu_device * adev)712 void soc15_set_virt_ops(struct amdgpu_device *adev)
713 {
714 adev->virt.ops = &xgpu_ai_virt_ops;
715
716 /* init soc15 reg base early enough so we can
717 * request request full access for sriov before
718 * set_ip_blocks. */
719 soc15_reg_base_init(adev);
720 }
721
soc15_need_full_reset(struct amdgpu_device * adev)722 static bool soc15_need_full_reset(struct amdgpu_device *adev)
723 {
724 /* change this when we implement soft reset */
725 return true;
726 }
727
soc15_get_pcie_usage(struct amdgpu_device * adev,uint64_t * count0,uint64_t * count1)728 static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
729 uint64_t *count1)
730 {
731 uint32_t perfctr = 0;
732 uint64_t cnt0_of, cnt1_of;
733 int tmp;
734
735 /* This reports 0 on APUs, so return to avoid writing/reading registers
736 * that may or may not be different from their GPU counterparts
737 */
738 if (adev->flags & AMD_IS_APU)
739 return;
740
741 /* Set the 2 events that we wish to watch, defined above */
742 /* Reg 40 is # received msgs */
743 /* Reg 104 is # of posted requests sent */
744 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
745 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
746
747 /* Write to enable desired perf counters */
748 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr);
749 /* Zero out and enable the perf counters
750 * Write 0x5:
751 * Bit 0 = Start all counters(1)
752 * Bit 2 = Global counter reset enable(1)
753 */
754 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
755
756 msleep(1000);
757
758 /* Load the shadow and disable the perf counters
759 * Write 0x2:
760 * Bit 0 = Stop counters(0)
761 * Bit 1 = Load the shadow counters(1)
762 */
763 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
764
765 /* Read register values to get any >32bit overflow */
766 tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK);
767 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
768 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
769
770 /* Get the values and add the overflow */
771 *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
772 *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
773 }
774
vega20_get_pcie_usage(struct amdgpu_device * adev,uint64_t * count0,uint64_t * count1)775 static void vega20_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
776 uint64_t *count1)
777 {
778 uint32_t perfctr = 0;
779 uint64_t cnt0_of, cnt1_of;
780 int tmp;
781
782 /* This reports 0 on APUs, so return to avoid writing/reading registers
783 * that may or may not be different from their GPU counterparts
784 */
785 if (adev->flags & AMD_IS_APU)
786 return;
787
788 /* Set the 2 events that we wish to watch, defined above */
789 /* Reg 40 is # received msgs */
790 /* Reg 108 is # of posted requests sent on VG20 */
791 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
792 EVENT0_SEL, 40);
793 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
794 EVENT1_SEL, 108);
795
796 /* Write to enable desired perf counters */
797 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctr);
798 /* Zero out and enable the perf counters
799 * Write 0x5:
800 * Bit 0 = Start all counters(1)
801 * Bit 2 = Global counter reset enable(1)
802 */
803 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
804
805 msleep(1000);
806
807 /* Load the shadow and disable the perf counters
808 * Write 0x2:
809 * Bit 0 = Stop counters(0)
810 * Bit 1 = Load the shadow counters(1)
811 */
812 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
813
814 /* Read register values to get any >32bit overflow */
815 tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3);
816 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER0_UPPER);
817 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER1_UPPER);
818
819 /* Get the values and add the overflow */
820 *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) | (cnt0_of << 32);
821 *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK3) | (cnt1_of << 32);
822 }
823
soc15_need_reset_on_init(struct amdgpu_device * adev)824 static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
825 {
826 u32 sol_reg;
827
828 /* CP hangs in IGT reloading test on RN, reset to WA */
829 if (adev->asic_type == CHIP_RENOIR)
830 return true;
831
832 /* Just return false for soc15 GPUs. Reset does not seem to
833 * be necessary.
834 */
835 if (!amdgpu_passthrough(adev))
836 return false;
837
838 if (adev->flags & AMD_IS_APU)
839 return false;
840
841 /* Check sOS sign of life register to confirm sys driver and sOS
842 * are already been loaded.
843 */
844 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
845 if (sol_reg)
846 return true;
847
848 return false;
849 }
850
soc15_get_pcie_replay_count(struct amdgpu_device * adev)851 static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev)
852 {
853 uint64_t nak_r, nak_g;
854
855 /* Get the number of NAKs received and generated */
856 nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK);
857 nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED);
858
859 /* Add the total number of NAKs, i.e the number of replays */
860 return (nak_r + nak_g);
861 }
862
soc15_pre_asic_init(struct amdgpu_device * adev)863 static void soc15_pre_asic_init(struct amdgpu_device *adev)
864 {
865 gmc_v9_0_restore_registers(adev);
866 }
867
868 static const struct amdgpu_asic_funcs soc15_asic_funcs =
869 {
870 .read_disabled_bios = &soc15_read_disabled_bios,
871 .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
872 .read_register = &soc15_read_register,
873 .reset = &soc15_asic_reset,
874 .reset_method = &soc15_asic_reset_method,
875 .get_xclk = &soc15_get_xclk,
876 .set_uvd_clocks = &soc15_set_uvd_clocks,
877 .set_vce_clocks = &soc15_set_vce_clocks,
878 .get_config_memsize = &soc15_get_config_memsize,
879 .need_full_reset = &soc15_need_full_reset,
880 .init_doorbell_index = &vega10_doorbell_index_init,
881 .get_pcie_usage = &soc15_get_pcie_usage,
882 .need_reset_on_init = &soc15_need_reset_on_init,
883 .get_pcie_replay_count = &soc15_get_pcie_replay_count,
884 .supports_baco = &soc15_supports_baco,
885 .pre_asic_init = &soc15_pre_asic_init,
886 .query_video_codecs = &soc15_query_video_codecs,
887 };
888
889 static const struct amdgpu_asic_funcs vega20_asic_funcs =
890 {
891 .read_disabled_bios = &soc15_read_disabled_bios,
892 .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
893 .read_register = &soc15_read_register,
894 .reset = &soc15_asic_reset,
895 .reset_method = &soc15_asic_reset_method,
896 .get_xclk = &soc15_get_xclk,
897 .set_uvd_clocks = &soc15_set_uvd_clocks,
898 .set_vce_clocks = &soc15_set_vce_clocks,
899 .get_config_memsize = &soc15_get_config_memsize,
900 .need_full_reset = &soc15_need_full_reset,
901 .init_doorbell_index = &vega20_doorbell_index_init,
902 .get_pcie_usage = &vega20_get_pcie_usage,
903 .need_reset_on_init = &soc15_need_reset_on_init,
904 .get_pcie_replay_count = &soc15_get_pcie_replay_count,
905 .supports_baco = &soc15_supports_baco,
906 .pre_asic_init = &soc15_pre_asic_init,
907 .query_video_codecs = &soc15_query_video_codecs,
908 };
909
910 static const struct amdgpu_asic_funcs aqua_vanjaram_asic_funcs =
911 {
912 .read_disabled_bios = &soc15_read_disabled_bios,
913 .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
914 .read_register = &soc15_read_register,
915 .reset = &soc15_asic_reset,
916 .reset_method = &soc15_asic_reset_method,
917 .get_xclk = &soc15_get_xclk,
918 .set_uvd_clocks = &soc15_set_uvd_clocks,
919 .set_vce_clocks = &soc15_set_vce_clocks,
920 .get_config_memsize = &soc15_get_config_memsize,
921 .need_full_reset = &soc15_need_full_reset,
922 .init_doorbell_index = &aqua_vanjaram_doorbell_index_init,
923 .need_reset_on_init = &soc15_need_reset_on_init,
924 .get_pcie_replay_count = &amdgpu_nbio_get_pcie_replay_count,
925 .supports_baco = &soc15_supports_baco,
926 .pre_asic_init = &soc15_pre_asic_init,
927 .query_video_codecs = &soc15_query_video_codecs,
928 .encode_ext_smn_addressing = &aqua_vanjaram_encode_ext_smn_addressing,
929 .get_reg_state = &aqua_vanjaram_get_reg_state,
930 };
931
soc15_common_early_init(void * handle)932 static int soc15_common_early_init(void *handle)
933 {
934 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
935
936 adev->nbio.funcs->set_reg_remap(adev);
937 adev->smc_rreg = NULL;
938 adev->smc_wreg = NULL;
939 adev->pcie_rreg = &amdgpu_device_indirect_rreg;
940 adev->pcie_wreg = &amdgpu_device_indirect_wreg;
941 adev->pcie_rreg_ext = &amdgpu_device_indirect_rreg_ext;
942 adev->pcie_wreg_ext = &amdgpu_device_indirect_wreg_ext;
943 adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64;
944 adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64;
945 adev->pcie_rreg64_ext = &amdgpu_device_indirect_rreg64_ext;
946 adev->pcie_wreg64_ext = &amdgpu_device_indirect_wreg64_ext;
947 adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
948 adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
949 adev->didt_rreg = &soc15_didt_rreg;
950 adev->didt_wreg = &soc15_didt_wreg;
951 adev->gc_cac_rreg = &soc15_gc_cac_rreg;
952 adev->gc_cac_wreg = &soc15_gc_cac_wreg;
953 adev->se_cac_rreg = &soc15_se_cac_rreg;
954 adev->se_cac_wreg = &soc15_se_cac_wreg;
955
956 adev->rev_id = amdgpu_device_get_rev_id(adev);
957 adev->external_rev_id = 0xFF;
958 /* TODO: split the GC and PG flags based on the relevant IP version for which
959 * they are relevant.
960 */
961 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
962 case IP_VERSION(9, 0, 1):
963 adev->asic_funcs = &soc15_asic_funcs;
964 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
965 AMD_CG_SUPPORT_GFX_MGLS |
966 AMD_CG_SUPPORT_GFX_RLC_LS |
967 AMD_CG_SUPPORT_GFX_CP_LS |
968 AMD_CG_SUPPORT_GFX_3D_CGCG |
969 AMD_CG_SUPPORT_GFX_3D_CGLS |
970 AMD_CG_SUPPORT_GFX_CGCG |
971 AMD_CG_SUPPORT_GFX_CGLS |
972 AMD_CG_SUPPORT_BIF_MGCG |
973 AMD_CG_SUPPORT_BIF_LS |
974 AMD_CG_SUPPORT_HDP_LS |
975 AMD_CG_SUPPORT_DRM_MGCG |
976 AMD_CG_SUPPORT_DRM_LS |
977 AMD_CG_SUPPORT_ROM_MGCG |
978 AMD_CG_SUPPORT_DF_MGCG |
979 AMD_CG_SUPPORT_SDMA_MGCG |
980 AMD_CG_SUPPORT_SDMA_LS |
981 AMD_CG_SUPPORT_MC_MGCG |
982 AMD_CG_SUPPORT_MC_LS;
983 adev->pg_flags = 0;
984 adev->external_rev_id = 0x1;
985 break;
986 case IP_VERSION(9, 2, 1):
987 adev->asic_funcs = &soc15_asic_funcs;
988 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
989 AMD_CG_SUPPORT_GFX_MGLS |
990 AMD_CG_SUPPORT_GFX_CGCG |
991 AMD_CG_SUPPORT_GFX_CGLS |
992 AMD_CG_SUPPORT_GFX_3D_CGCG |
993 AMD_CG_SUPPORT_GFX_3D_CGLS |
994 AMD_CG_SUPPORT_GFX_CP_LS |
995 AMD_CG_SUPPORT_MC_LS |
996 AMD_CG_SUPPORT_MC_MGCG |
997 AMD_CG_SUPPORT_SDMA_MGCG |
998 AMD_CG_SUPPORT_SDMA_LS |
999 AMD_CG_SUPPORT_BIF_MGCG |
1000 AMD_CG_SUPPORT_BIF_LS |
1001 AMD_CG_SUPPORT_HDP_MGCG |
1002 AMD_CG_SUPPORT_HDP_LS |
1003 AMD_CG_SUPPORT_ROM_MGCG |
1004 AMD_CG_SUPPORT_VCE_MGCG |
1005 AMD_CG_SUPPORT_UVD_MGCG;
1006 adev->pg_flags = 0;
1007 adev->external_rev_id = adev->rev_id + 0x14;
1008 break;
1009 case IP_VERSION(9, 4, 0):
1010 adev->asic_funcs = &vega20_asic_funcs;
1011 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1012 AMD_CG_SUPPORT_GFX_MGLS |
1013 AMD_CG_SUPPORT_GFX_CGCG |
1014 AMD_CG_SUPPORT_GFX_CGLS |
1015 AMD_CG_SUPPORT_GFX_3D_CGCG |
1016 AMD_CG_SUPPORT_GFX_3D_CGLS |
1017 AMD_CG_SUPPORT_GFX_CP_LS |
1018 AMD_CG_SUPPORT_MC_LS |
1019 AMD_CG_SUPPORT_MC_MGCG |
1020 AMD_CG_SUPPORT_SDMA_MGCG |
1021 AMD_CG_SUPPORT_SDMA_LS |
1022 AMD_CG_SUPPORT_BIF_MGCG |
1023 AMD_CG_SUPPORT_BIF_LS |
1024 AMD_CG_SUPPORT_HDP_MGCG |
1025 AMD_CG_SUPPORT_HDP_LS |
1026 AMD_CG_SUPPORT_ROM_MGCG |
1027 AMD_CG_SUPPORT_VCE_MGCG |
1028 AMD_CG_SUPPORT_UVD_MGCG;
1029 adev->pg_flags = 0;
1030 adev->external_rev_id = adev->rev_id + 0x28;
1031 break;
1032 case IP_VERSION(9, 1, 0):
1033 case IP_VERSION(9, 2, 2):
1034 adev->asic_funcs = &soc15_asic_funcs;
1035
1036 if (adev->rev_id >= 0x8)
1037 adev->apu_flags |= AMD_APU_IS_RAVEN2;
1038
1039 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1040 adev->external_rev_id = adev->rev_id + 0x79;
1041 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1042 adev->external_rev_id = adev->rev_id + 0x41;
1043 else if (adev->rev_id == 1)
1044 adev->external_rev_id = adev->rev_id + 0x20;
1045 else
1046 adev->external_rev_id = adev->rev_id + 0x01;
1047
1048 if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
1049 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1050 AMD_CG_SUPPORT_GFX_MGLS |
1051 AMD_CG_SUPPORT_GFX_CP_LS |
1052 AMD_CG_SUPPORT_GFX_3D_CGCG |
1053 AMD_CG_SUPPORT_GFX_3D_CGLS |
1054 AMD_CG_SUPPORT_GFX_CGCG |
1055 AMD_CG_SUPPORT_GFX_CGLS |
1056 AMD_CG_SUPPORT_BIF_LS |
1057 AMD_CG_SUPPORT_HDP_LS |
1058 AMD_CG_SUPPORT_MC_MGCG |
1059 AMD_CG_SUPPORT_MC_LS |
1060 AMD_CG_SUPPORT_SDMA_MGCG |
1061 AMD_CG_SUPPORT_SDMA_LS |
1062 AMD_CG_SUPPORT_VCN_MGCG;
1063
1064 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1065 } else if (adev->apu_flags & AMD_APU_IS_PICASSO) {
1066 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1067 AMD_CG_SUPPORT_GFX_MGLS |
1068 AMD_CG_SUPPORT_GFX_CP_LS |
1069 AMD_CG_SUPPORT_GFX_3D_CGLS |
1070 AMD_CG_SUPPORT_GFX_CGCG |
1071 AMD_CG_SUPPORT_GFX_CGLS |
1072 AMD_CG_SUPPORT_BIF_LS |
1073 AMD_CG_SUPPORT_HDP_LS |
1074 AMD_CG_SUPPORT_MC_MGCG |
1075 AMD_CG_SUPPORT_MC_LS |
1076 AMD_CG_SUPPORT_SDMA_MGCG |
1077 AMD_CG_SUPPORT_SDMA_LS |
1078 AMD_CG_SUPPORT_VCN_MGCG;
1079
1080 /*
1081 * MMHUB PG needs to be disabled for Picasso for
1082 * stability reasons.
1083 */
1084 adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1085 AMD_PG_SUPPORT_VCN;
1086 } else {
1087 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1088 AMD_CG_SUPPORT_GFX_MGLS |
1089 AMD_CG_SUPPORT_GFX_RLC_LS |
1090 AMD_CG_SUPPORT_GFX_CP_LS |
1091 AMD_CG_SUPPORT_GFX_3D_CGLS |
1092 AMD_CG_SUPPORT_GFX_CGCG |
1093 AMD_CG_SUPPORT_GFX_CGLS |
1094 AMD_CG_SUPPORT_BIF_MGCG |
1095 AMD_CG_SUPPORT_BIF_LS |
1096 AMD_CG_SUPPORT_HDP_MGCG |
1097 AMD_CG_SUPPORT_HDP_LS |
1098 AMD_CG_SUPPORT_DRM_MGCG |
1099 AMD_CG_SUPPORT_DRM_LS |
1100 AMD_CG_SUPPORT_MC_MGCG |
1101 AMD_CG_SUPPORT_MC_LS |
1102 AMD_CG_SUPPORT_SDMA_MGCG |
1103 AMD_CG_SUPPORT_SDMA_LS |
1104 AMD_CG_SUPPORT_VCN_MGCG;
1105
1106 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1107 }
1108 break;
1109 case IP_VERSION(9, 4, 1):
1110 adev->asic_funcs = &vega20_asic_funcs;
1111 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1112 AMD_CG_SUPPORT_GFX_MGLS |
1113 AMD_CG_SUPPORT_GFX_CGCG |
1114 AMD_CG_SUPPORT_GFX_CGLS |
1115 AMD_CG_SUPPORT_GFX_CP_LS |
1116 AMD_CG_SUPPORT_HDP_MGCG |
1117 AMD_CG_SUPPORT_HDP_LS |
1118 AMD_CG_SUPPORT_SDMA_MGCG |
1119 AMD_CG_SUPPORT_SDMA_LS |
1120 AMD_CG_SUPPORT_MC_MGCG |
1121 AMD_CG_SUPPORT_MC_LS |
1122 AMD_CG_SUPPORT_IH_CG |
1123 AMD_CG_SUPPORT_VCN_MGCG |
1124 AMD_CG_SUPPORT_JPEG_MGCG;
1125 adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG;
1126 adev->external_rev_id = adev->rev_id + 0x32;
1127 break;
1128 case IP_VERSION(9, 3, 0):
1129 adev->asic_funcs = &soc15_asic_funcs;
1130
1131 if (adev->apu_flags & AMD_APU_IS_RENOIR)
1132 adev->external_rev_id = adev->rev_id + 0x91;
1133 else
1134 adev->external_rev_id = adev->rev_id + 0xa1;
1135 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1136 AMD_CG_SUPPORT_GFX_MGLS |
1137 AMD_CG_SUPPORT_GFX_3D_CGCG |
1138 AMD_CG_SUPPORT_GFX_3D_CGLS |
1139 AMD_CG_SUPPORT_GFX_CGCG |
1140 AMD_CG_SUPPORT_GFX_CGLS |
1141 AMD_CG_SUPPORT_GFX_CP_LS |
1142 AMD_CG_SUPPORT_MC_MGCG |
1143 AMD_CG_SUPPORT_MC_LS |
1144 AMD_CG_SUPPORT_SDMA_MGCG |
1145 AMD_CG_SUPPORT_SDMA_LS |
1146 AMD_CG_SUPPORT_BIF_LS |
1147 AMD_CG_SUPPORT_HDP_LS |
1148 AMD_CG_SUPPORT_VCN_MGCG |
1149 AMD_CG_SUPPORT_JPEG_MGCG |
1150 AMD_CG_SUPPORT_IH_CG |
1151 AMD_CG_SUPPORT_ATHUB_LS |
1152 AMD_CG_SUPPORT_ATHUB_MGCG |
1153 AMD_CG_SUPPORT_DF_MGCG;
1154 adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1155 AMD_PG_SUPPORT_VCN |
1156 AMD_PG_SUPPORT_JPEG |
1157 AMD_PG_SUPPORT_VCN_DPG;
1158 break;
1159 case IP_VERSION(9, 4, 2):
1160 adev->asic_funcs = &vega20_asic_funcs;
1161 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1162 AMD_CG_SUPPORT_GFX_MGLS |
1163 AMD_CG_SUPPORT_GFX_CP_LS |
1164 AMD_CG_SUPPORT_HDP_LS |
1165 AMD_CG_SUPPORT_SDMA_MGCG |
1166 AMD_CG_SUPPORT_SDMA_LS |
1167 AMD_CG_SUPPORT_IH_CG |
1168 AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG;
1169 adev->pg_flags = AMD_PG_SUPPORT_VCN_DPG;
1170 adev->external_rev_id = adev->rev_id + 0x3c;
1171 break;
1172 case IP_VERSION(9, 4, 3):
1173 case IP_VERSION(9, 4, 4):
1174 adev->asic_funcs = &aqua_vanjaram_asic_funcs;
1175 adev->cg_flags =
1176 AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_CGCG |
1177 AMD_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_SDMA_MGCG |
1178 AMD_CG_SUPPORT_GFX_FGCG | AMD_CG_SUPPORT_REPEATER_FGCG |
1179 AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG |
1180 AMD_CG_SUPPORT_IH_CG;
1181 adev->pg_flags =
1182 AMD_PG_SUPPORT_VCN |
1183 AMD_PG_SUPPORT_VCN_DPG |
1184 AMD_PG_SUPPORT_JPEG;
1185 /*TODO: need a new external_rev_id for GC 9.4.4? */
1186 adev->external_rev_id = adev->rev_id + 0x46;
1187 break;
1188 default:
1189 /* FIXME: not supported yet */
1190 return -EINVAL;
1191 }
1192
1193 if (amdgpu_sriov_vf(adev)) {
1194 amdgpu_virt_init_setting(adev);
1195 xgpu_ai_mailbox_set_irq_funcs(adev);
1196 }
1197
1198 return 0;
1199 }
1200
soc15_common_late_init(void * handle)1201 static int soc15_common_late_init(void *handle)
1202 {
1203 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1204
1205 if (amdgpu_sriov_vf(adev))
1206 xgpu_ai_mailbox_get_irq(adev);
1207
1208 /* Enable selfring doorbell aperture late because doorbell BAR
1209 * aperture will change if resize BAR successfully in gmc sw_init.
1210 */
1211 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true);
1212
1213 return 0;
1214 }
1215
soc15_common_sw_init(void * handle)1216 static int soc15_common_sw_init(void *handle)
1217 {
1218 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1219
1220 if (amdgpu_sriov_vf(adev))
1221 xgpu_ai_mailbox_add_irq_id(adev);
1222
1223 if (adev->df.funcs &&
1224 adev->df.funcs->sw_init)
1225 adev->df.funcs->sw_init(adev);
1226
1227 return 0;
1228 }
1229
soc15_common_sw_fini(void * handle)1230 static int soc15_common_sw_fini(void *handle)
1231 {
1232 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1233
1234 if (adev->df.funcs &&
1235 adev->df.funcs->sw_fini)
1236 adev->df.funcs->sw_fini(adev);
1237 return 0;
1238 }
1239
soc15_sdma_doorbell_range_init(struct amdgpu_device * adev)1240 static void soc15_sdma_doorbell_range_init(struct amdgpu_device *adev)
1241 {
1242 int i;
1243
1244 /* sdma doorbell range is programed by hypervisor */
1245 if (!amdgpu_sriov_vf(adev)) {
1246 for (i = 0; i < adev->sdma.num_instances; i++) {
1247 adev->nbio.funcs->sdma_doorbell_range(adev, i,
1248 true, adev->doorbell_index.sdma_engine[i] << 1,
1249 adev->doorbell_index.sdma_doorbell_range);
1250 }
1251 }
1252 }
1253
soc15_common_hw_init(void * handle)1254 static int soc15_common_hw_init(void *handle)
1255 {
1256 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1257
1258 /* enable aspm */
1259 soc15_program_aspm(adev);
1260 /* setup nbio registers */
1261 adev->nbio.funcs->init_registers(adev);
1262 /* remap HDP registers to a hole in mmio space,
1263 * for the purpose of expose those registers
1264 * to process space
1265 */
1266 if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
1267 adev->nbio.funcs->remap_hdp_registers(adev);
1268
1269 /* enable the doorbell aperture */
1270 adev->nbio.funcs->enable_doorbell_aperture(adev, true);
1271
1272 /* HW doorbell routing policy: doorbell writing not
1273 * in SDMA/IH/MM/ACV range will be routed to CP. So
1274 * we need to init SDMA doorbell range prior
1275 * to CP ip block init and ring test. IH already
1276 * happens before CP.
1277 */
1278 soc15_sdma_doorbell_range_init(adev);
1279
1280 return 0;
1281 }
1282
soc15_common_hw_fini(void * handle)1283 static int soc15_common_hw_fini(void *handle)
1284 {
1285 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1286
1287 /* Disable the doorbell aperture and selfring doorbell aperture
1288 * separately in hw_fini because soc15_enable_doorbell_aperture
1289 * has been removed and there is no need to delay disabling
1290 * selfring doorbell.
1291 */
1292 adev->nbio.funcs->enable_doorbell_aperture(adev, false);
1293 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false);
1294
1295 if (amdgpu_sriov_vf(adev))
1296 xgpu_ai_mailbox_put_irq(adev);
1297
1298 if ((!amdgpu_sriov_vf(adev)) &&
1299 adev->nbio.ras_if &&
1300 amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
1301 if (adev->nbio.ras &&
1302 adev->nbio.ras->init_ras_controller_interrupt)
1303 amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0);
1304 if (adev->nbio.ras &&
1305 adev->nbio.ras->init_ras_err_event_athub_interrupt)
1306 amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
1307 }
1308
1309 return 0;
1310 }
1311
soc15_common_suspend(void * handle)1312 static int soc15_common_suspend(void *handle)
1313 {
1314 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1315
1316 return soc15_common_hw_fini(adev);
1317 }
1318
soc15_common_resume(void * handle)1319 static int soc15_common_resume(void *handle)
1320 {
1321 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1322
1323 if (soc15_need_reset_on_resume(adev)) {
1324 dev_info(adev->dev, "S3 suspend abort case, let's reset ASIC.\n");
1325 soc15_asic_reset(adev);
1326 }
1327 return soc15_common_hw_init(adev);
1328 }
1329
soc15_common_is_idle(void * handle)1330 static bool soc15_common_is_idle(void *handle)
1331 {
1332 return true;
1333 }
1334
soc15_common_wait_for_idle(void * handle)1335 static int soc15_common_wait_for_idle(void *handle)
1336 {
1337 return 0;
1338 }
1339
soc15_common_soft_reset(void * handle)1340 static int soc15_common_soft_reset(void *handle)
1341 {
1342 return 0;
1343 }
1344
soc15_update_drm_clock_gating(struct amdgpu_device * adev,bool enable)1345 static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
1346 {
1347 uint32_t def, data;
1348
1349 def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1350
1351 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG))
1352 data &= ~(0x01000000 |
1353 0x02000000 |
1354 0x04000000 |
1355 0x08000000 |
1356 0x10000000 |
1357 0x20000000 |
1358 0x40000000 |
1359 0x80000000);
1360 else
1361 data |= (0x01000000 |
1362 0x02000000 |
1363 0x04000000 |
1364 0x08000000 |
1365 0x10000000 |
1366 0x20000000 |
1367 0x40000000 |
1368 0x80000000);
1369
1370 if (def != data)
1371 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data);
1372 }
1373
soc15_update_drm_light_sleep(struct amdgpu_device * adev,bool enable)1374 static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)
1375 {
1376 uint32_t def, data;
1377
1378 def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1379
1380 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1381 data |= 1;
1382 else
1383 data &= ~1;
1384
1385 if (def != data)
1386 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
1387 }
1388
soc15_common_set_clockgating_state(void * handle,enum amd_clockgating_state state)1389 static int soc15_common_set_clockgating_state(void *handle,
1390 enum amd_clockgating_state state)
1391 {
1392 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1393
1394 if (amdgpu_sriov_vf(adev))
1395 return 0;
1396
1397 switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
1398 case IP_VERSION(6, 1, 0):
1399 case IP_VERSION(6, 2, 0):
1400 case IP_VERSION(7, 4, 0):
1401 adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1402 state == AMD_CG_STATE_GATE);
1403 adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1404 state == AMD_CG_STATE_GATE);
1405 adev->hdp.funcs->update_clock_gating(adev,
1406 state == AMD_CG_STATE_GATE);
1407 soc15_update_drm_clock_gating(adev,
1408 state == AMD_CG_STATE_GATE);
1409 soc15_update_drm_light_sleep(adev,
1410 state == AMD_CG_STATE_GATE);
1411 adev->smuio.funcs->update_rom_clock_gating(adev,
1412 state == AMD_CG_STATE_GATE);
1413 adev->df.funcs->update_medium_grain_clock_gating(adev,
1414 state == AMD_CG_STATE_GATE);
1415 break;
1416 case IP_VERSION(7, 0, 0):
1417 case IP_VERSION(7, 0, 1):
1418 case IP_VERSION(2, 5, 0):
1419 adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1420 state == AMD_CG_STATE_GATE);
1421 adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1422 state == AMD_CG_STATE_GATE);
1423 adev->hdp.funcs->update_clock_gating(adev,
1424 state == AMD_CG_STATE_GATE);
1425 soc15_update_drm_clock_gating(adev,
1426 state == AMD_CG_STATE_GATE);
1427 soc15_update_drm_light_sleep(adev,
1428 state == AMD_CG_STATE_GATE);
1429 break;
1430 case IP_VERSION(7, 4, 1):
1431 case IP_VERSION(7, 4, 4):
1432 adev->hdp.funcs->update_clock_gating(adev,
1433 state == AMD_CG_STATE_GATE);
1434 break;
1435 default:
1436 break;
1437 }
1438 return 0;
1439 }
1440
soc15_common_get_clockgating_state(void * handle,u64 * flags)1441 static void soc15_common_get_clockgating_state(void *handle, u64 *flags)
1442 {
1443 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1444 int data;
1445
1446 if (amdgpu_sriov_vf(adev))
1447 *flags = 0;
1448
1449 if (adev->nbio.funcs && adev->nbio.funcs->get_clockgating_state)
1450 adev->nbio.funcs->get_clockgating_state(adev, flags);
1451
1452 if (adev->hdp.funcs && adev->hdp.funcs->get_clock_gating_state)
1453 adev->hdp.funcs->get_clock_gating_state(adev, flags);
1454
1455 if ((amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2)) &&
1456 (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) &&
1457 (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 14))) {
1458 /* AMD_CG_SUPPORT_DRM_MGCG */
1459 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1460 if (!(data & 0x01000000))
1461 *flags |= AMD_CG_SUPPORT_DRM_MGCG;
1462
1463 /* AMD_CG_SUPPORT_DRM_LS */
1464 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1465 if (data & 0x1)
1466 *flags |= AMD_CG_SUPPORT_DRM_LS;
1467 }
1468
1469 /* AMD_CG_SUPPORT_ROM_MGCG */
1470 if (adev->smuio.funcs && adev->smuio.funcs->get_clock_gating_state)
1471 adev->smuio.funcs->get_clock_gating_state(adev, flags);
1472
1473 if (adev->df.funcs && adev->df.funcs->get_clockgating_state)
1474 adev->df.funcs->get_clockgating_state(adev, flags);
1475 }
1476
soc15_common_set_powergating_state(void * handle,enum amd_powergating_state state)1477 static int soc15_common_set_powergating_state(void *handle,
1478 enum amd_powergating_state state)
1479 {
1480 /* todo */
1481 return 0;
1482 }
1483
1484 static const struct amd_ip_funcs soc15_common_ip_funcs = {
1485 .name = "soc15_common",
1486 .early_init = soc15_common_early_init,
1487 .late_init = soc15_common_late_init,
1488 .sw_init = soc15_common_sw_init,
1489 .sw_fini = soc15_common_sw_fini,
1490 .hw_init = soc15_common_hw_init,
1491 .hw_fini = soc15_common_hw_fini,
1492 .suspend = soc15_common_suspend,
1493 .resume = soc15_common_resume,
1494 .is_idle = soc15_common_is_idle,
1495 .wait_for_idle = soc15_common_wait_for_idle,
1496 .soft_reset = soc15_common_soft_reset,
1497 .set_clockgating_state = soc15_common_set_clockgating_state,
1498 .set_powergating_state = soc15_common_set_powergating_state,
1499 .get_clockgating_state= soc15_common_get_clockgating_state,
1500 .dump_ip_state = NULL,
1501 .print_ip_state = NULL,
1502 };
1503