xref: /linux/drivers/gpu/drm/amd/amdgpu/soc15.c (revision ea518afc992032f7570c0a89ac9240b387dc0faf)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27 
28 #include <drm/amdgpu_drm.h>
29 
30 #include "amdgpu.h"
31 #include "amdgpu_atombios.h"
32 #include "amdgpu_ih.h"
33 #include "amdgpu_uvd.h"
34 #include "amdgpu_vce.h"
35 #include "amdgpu_ucode.h"
36 #include "amdgpu_psp.h"
37 #include "atom.h"
38 #include "amd_pcie.h"
39 
40 #include "uvd/uvd_7_0_offset.h"
41 #include "gc/gc_9_0_offset.h"
42 #include "gc/gc_9_0_sh_mask.h"
43 #include "sdma0/sdma0_4_0_offset.h"
44 #include "sdma1/sdma1_4_0_offset.h"
45 #include "nbio/nbio_7_0_default.h"
46 #include "nbio/nbio_7_0_offset.h"
47 #include "nbio/nbio_7_0_sh_mask.h"
48 #include "nbio/nbio_7_0_smn.h"
49 #include "mp/mp_9_0_offset.h"
50 
51 #include "soc15.h"
52 #include "soc15_common.h"
53 #include "gfx_v9_0.h"
54 #include "gmc_v9_0.h"
55 #include "gfxhub_v1_0.h"
56 #include "mmhub_v1_0.h"
57 #include "df_v1_7.h"
58 #include "df_v3_6.h"
59 #include "nbio_v6_1.h"
60 #include "nbio_v7_0.h"
61 #include "nbio_v7_4.h"
62 #include "hdp_v4_0.h"
63 #include "vega10_ih.h"
64 #include "vega20_ih.h"
65 #include "navi10_ih.h"
66 #include "sdma_v4_0.h"
67 #include "uvd_v7_0.h"
68 #include "vce_v4_0.h"
69 #include "vcn_v1_0.h"
70 #include "vcn_v2_0.h"
71 #include "jpeg_v2_0.h"
72 #include "vcn_v2_5.h"
73 #include "jpeg_v2_5.h"
74 #include "smuio_v9_0.h"
75 #include "smuio_v11_0.h"
76 #include "smuio_v13_0.h"
77 #include "amdgpu_vkms.h"
78 #include "mxgpu_ai.h"
79 #include "amdgpu_ras.h"
80 #include "amdgpu_xgmi.h"
81 #include <uapi/linux/kfd_ioctl.h>
82 
83 #define mmMP0_MISC_CGTT_CTRL0                                                                   0x01b9
84 #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX                                                          0
85 #define mmMP0_MISC_LIGHT_SLEEP_CTRL                                                             0x01ba
86 #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX                                                    0
87 
88 static const struct amd_ip_funcs soc15_common_ip_funcs;
89 
90 /* Vega, Raven, Arcturus */
91 static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] =
92 {
93 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
94 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
95 };
96 
97 static const struct amdgpu_video_codecs vega_video_codecs_encode =
98 {
99 	.codec_count = ARRAY_SIZE(vega_video_codecs_encode_array),
100 	.codec_array = vega_video_codecs_encode_array,
101 };
102 
103 /* Vega */
104 static const struct amdgpu_video_codec_info vega_video_codecs_decode_array[] =
105 {
106 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
107 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
108 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
109 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
110 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
111 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
112 };
113 
114 static const struct amdgpu_video_codecs vega_video_codecs_decode =
115 {
116 	.codec_count = ARRAY_SIZE(vega_video_codecs_decode_array),
117 	.codec_array = vega_video_codecs_decode_array,
118 };
119 
120 /* Raven */
121 static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] =
122 {
123 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
124 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
125 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
126 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
127 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
128 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
129 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 4096, 4096, 0)},
130 };
131 
132 static const struct amdgpu_video_codecs rv_video_codecs_decode =
133 {
134 	.codec_count = ARRAY_SIZE(rv_video_codecs_decode_array),
135 	.codec_array = rv_video_codecs_decode_array,
136 };
137 
138 /* Renoir, Arcturus */
139 static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] =
140 {
141 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
142 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
143 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
144 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
145 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
146 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
147 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
148 };
149 
150 static const struct amdgpu_video_codecs rn_video_codecs_decode =
151 {
152 	.codec_count = ARRAY_SIZE(rn_video_codecs_decode_array),
153 	.codec_array = rn_video_codecs_decode_array,
154 };
155 
156 static const struct amdgpu_video_codec_info vcn_4_0_3_video_codecs_decode_array[] = {
157 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
158 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
159 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
160 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
161 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
162 };
163 
164 static const struct amdgpu_video_codecs vcn_4_0_3_video_codecs_decode = {
165 	.codec_count = ARRAY_SIZE(vcn_4_0_3_video_codecs_decode_array),
166 	.codec_array = vcn_4_0_3_video_codecs_decode_array,
167 };
168 
169 static const struct amdgpu_video_codecs vcn_4_0_3_video_codecs_encode = {
170 	.codec_count = 0,
171 	.codec_array = NULL,
172 };
173 
174 static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,
175 				    const struct amdgpu_video_codecs **codecs)
176 {
177 	if (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
178 		switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
179 		case IP_VERSION(4, 0, 0):
180 		case IP_VERSION(4, 1, 0):
181 			if (encode)
182 				*codecs = &vega_video_codecs_encode;
183 			else
184 				*codecs = &vega_video_codecs_decode;
185 			return 0;
186 		default:
187 			return -EINVAL;
188 		}
189 	} else {
190 		switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
191 		case IP_VERSION(1, 0, 0):
192 		case IP_VERSION(1, 0, 1):
193 			if (encode)
194 				*codecs = &vega_video_codecs_encode;
195 			else
196 				*codecs = &rv_video_codecs_decode;
197 			return 0;
198 		case IP_VERSION(2, 5, 0):
199 		case IP_VERSION(2, 6, 0):
200 		case IP_VERSION(2, 2, 0):
201 			if (encode)
202 				*codecs = &vega_video_codecs_encode;
203 			else
204 				*codecs = &rn_video_codecs_decode;
205 			return 0;
206 		case IP_VERSION(4, 0, 3):
207 			if (encode)
208 				*codecs = &vcn_4_0_3_video_codecs_encode;
209 			else
210 				*codecs = &vcn_4_0_3_video_codecs_decode;
211 			return 0;
212 		default:
213 			return -EINVAL;
214 		}
215 	}
216 }
217 
218 static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
219 {
220 	unsigned long flags, address, data;
221 	u32 r;
222 
223 	address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
224 	data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
225 
226 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
227 	WREG32(address, ((reg) & 0x1ff));
228 	r = RREG32(data);
229 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
230 	return r;
231 }
232 
233 static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
234 {
235 	unsigned long flags, address, data;
236 
237 	address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
238 	data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
239 
240 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
241 	WREG32(address, ((reg) & 0x1ff));
242 	WREG32(data, (v));
243 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
244 }
245 
246 static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
247 {
248 	unsigned long flags, address, data;
249 	u32 r;
250 
251 	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
252 	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
253 
254 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
255 	WREG32(address, (reg));
256 	r = RREG32(data);
257 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
258 	return r;
259 }
260 
261 static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
262 {
263 	unsigned long flags, address, data;
264 
265 	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
266 	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
267 
268 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
269 	WREG32(address, (reg));
270 	WREG32(data, (v));
271 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
272 }
273 
274 static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
275 {
276 	unsigned long flags;
277 	u32 r;
278 
279 	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
280 	WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
281 	r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA);
282 	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
283 	return r;
284 }
285 
286 static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
287 {
288 	unsigned long flags;
289 
290 	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
291 	WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
292 	WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v));
293 	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
294 }
295 
296 static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg)
297 {
298 	unsigned long flags;
299 	u32 r;
300 
301 	spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
302 	WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
303 	r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA);
304 	spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
305 	return r;
306 }
307 
308 static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
309 {
310 	unsigned long flags;
311 
312 	spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
313 	WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
314 	WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v));
315 	spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
316 }
317 
318 static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
319 {
320 	return adev->nbio.funcs->get_memsize(adev);
321 }
322 
323 static u32 soc15_get_xclk(struct amdgpu_device *adev)
324 {
325 	u32 reference_clock = adev->clock.spll.reference_freq;
326 
327 	if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(12, 0, 0) ||
328 	    amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(12, 0, 1) ||
329 	    amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6))
330 		return 10000;
331 	if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(10, 0, 0) ||
332 	    amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(10, 0, 1))
333 		return reference_clock / 4;
334 
335 	return reference_clock;
336 }
337 
338 
339 void soc15_grbm_select(struct amdgpu_device *adev,
340 		     u32 me, u32 pipe, u32 queue, u32 vmid, int xcc_id)
341 {
342 	u32 grbm_gfx_cntl = 0;
343 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
344 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
345 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
346 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
347 
348 	WREG32_SOC15_RLC_SHADOW(GC, xcc_id, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
349 }
350 
351 static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
352 {
353 	/* todo */
354 	return false;
355 }
356 
357 static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
358 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
359 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
360 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
361 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
362 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
363 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
364 	{ SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
365 	{ SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
366 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
367 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
368 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
369 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
370 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
371 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
372 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
373 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
374 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
375 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
376 	{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
377 	{ SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)},
378 };
379 
380 static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
381 					 u32 sh_num, u32 reg_offset)
382 {
383 	uint32_t val;
384 
385 	mutex_lock(&adev->grbm_idx_mutex);
386 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
387 		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0);
388 
389 	val = RREG32(reg_offset);
390 
391 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
392 		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
393 	mutex_unlock(&adev->grbm_idx_mutex);
394 	return val;
395 }
396 
397 static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
398 					 bool indexed, u32 se_num,
399 					 u32 sh_num, u32 reg_offset)
400 {
401 	if (indexed) {
402 		return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
403 	} else {
404 		if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
405 			return adev->gfx.config.gb_addr_config;
406 		else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2))
407 			return adev->gfx.config.db_debug2;
408 		return RREG32(reg_offset);
409 	}
410 }
411 
412 static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
413 			    u32 sh_num, u32 reg_offset, u32 *value)
414 {
415 	uint32_t i;
416 	struct soc15_allowed_register_entry  *en;
417 
418 	*value = 0;
419 	for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
420 		en = &soc15_allowed_read_registers[i];
421 		if (!adev->reg_offset[en->hwip][en->inst])
422 			continue;
423 		else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
424 					+ en->reg_offset))
425 			continue;
426 
427 		*value = soc15_get_register_value(adev,
428 						  soc15_allowed_read_registers[i].grbm_indexed,
429 						  se_num, sh_num, reg_offset);
430 		return 0;
431 	}
432 	return -EINVAL;
433 }
434 
435 
436 /**
437  * soc15_program_register_sequence - program an array of registers.
438  *
439  * @adev: amdgpu_device pointer
440  * @regs: pointer to the register array
441  * @array_size: size of the register array
442  *
443  * Programs an array or registers with and and or masks.
444  * This is a helper for setting golden registers.
445  */
446 
447 void soc15_program_register_sequence(struct amdgpu_device *adev,
448 					     const struct soc15_reg_golden *regs,
449 					     const u32 array_size)
450 {
451 	const struct soc15_reg_golden *entry;
452 	u32 tmp, reg;
453 	int i;
454 
455 	for (i = 0; i < array_size; ++i) {
456 		entry = &regs[i];
457 		reg =  adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
458 
459 		if (entry->and_mask == 0xffffffff) {
460 			tmp = entry->or_mask;
461 		} else {
462 			tmp = (entry->hwip == GC_HWIP) ?
463 				RREG32_SOC15_IP(GC, reg) : RREG32(reg);
464 
465 			tmp &= ~(entry->and_mask);
466 			tmp |= (entry->or_mask & entry->and_mask);
467 		}
468 
469 		if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) ||
470 			reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE) ||
471 			reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1) ||
472 			reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG))
473 			WREG32_RLC(reg, tmp);
474 		else
475 			(entry->hwip == GC_HWIP) ?
476 				WREG32_SOC15_IP(GC, reg, tmp) : WREG32(reg, tmp);
477 
478 	}
479 
480 }
481 
482 static int soc15_asic_baco_reset(struct amdgpu_device *adev)
483 {
484 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
485 	int ret = 0;
486 
487 	/* avoid NBIF got stuck when do RAS recovery in BACO reset */
488 	if (ras && adev->ras_enabled)
489 		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
490 
491 	ret = amdgpu_dpm_baco_reset(adev);
492 	if (ret)
493 		return ret;
494 
495 	/* re-enable doorbell interrupt after BACO exit */
496 	if (ras && adev->ras_enabled)
497 		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
498 
499 	return 0;
500 }
501 
502 static enum amd_reset_method
503 soc15_asic_reset_method(struct amdgpu_device *adev)
504 {
505 	bool baco_reset = false;
506 	bool connected_to_cpu = false;
507 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
508 
509         if (adev->gmc.xgmi.supported && adev->gmc.xgmi.connected_to_cpu)
510                 connected_to_cpu = true;
511 
512 	if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
513 	    amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
514 	    amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
515 	    amdgpu_reset_method == AMD_RESET_METHOD_PCI) {
516 		/* If connected to cpu, driver only support mode2 */
517                 if (connected_to_cpu)
518                         return AMD_RESET_METHOD_MODE2;
519                 return amdgpu_reset_method;
520         }
521 
522 	if (amdgpu_reset_method != -1)
523 		dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
524 				  amdgpu_reset_method);
525 
526 	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
527 	case IP_VERSION(10, 0, 0):
528 	case IP_VERSION(10, 0, 1):
529 	case IP_VERSION(12, 0, 0):
530 	case IP_VERSION(12, 0, 1):
531 		return AMD_RESET_METHOD_MODE2;
532 	case IP_VERSION(9, 0, 0):
533 	case IP_VERSION(11, 0, 2):
534 		if (adev->asic_type == CHIP_VEGA20) {
535 			if (adev->psp.sos.fw_version >= 0x80067)
536 				baco_reset = amdgpu_dpm_is_baco_supported(adev);
537 			/*
538 			 * 1. PMFW version > 0x284300: all cases use baco
539 			 * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco
540 			 */
541 			if (ras && adev->ras_enabled &&
542 			    adev->pm.fw_version <= 0x283400)
543 				baco_reset = false;
544 		} else {
545 			baco_reset = amdgpu_dpm_is_baco_supported(adev);
546 		}
547 		break;
548 	case IP_VERSION(13, 0, 2):
549 		 /*
550 		 * 1.connected to cpu: driver issue mode2 reset
551 		 * 2.discret gpu: driver issue mode1 reset
552 		 */
553 		if (connected_to_cpu)
554 			return AMD_RESET_METHOD_MODE2;
555 		break;
556 	case IP_VERSION(13, 0, 6):
557 		/* Use gpu_recovery param to target a reset method.
558 		 * Enable triggering of GPU reset only if specified
559 		 * by module parameter.
560 		 */
561 		if (amdgpu_gpu_recovery == 4 || amdgpu_gpu_recovery == 5)
562 			return AMD_RESET_METHOD_MODE2;
563 		else if (!(adev->flags & AMD_IS_APU))
564 			return AMD_RESET_METHOD_MODE1;
565 		else
566 			return AMD_RESET_METHOD_MODE2;
567 	default:
568 		break;
569 	}
570 
571 	if (baco_reset)
572 		return AMD_RESET_METHOD_BACO;
573 	else
574 		return AMD_RESET_METHOD_MODE1;
575 }
576 
577 static int soc15_asic_reset(struct amdgpu_device *adev)
578 {
579 	/* original raven doesn't have full asic reset */
580 	if ((adev->apu_flags & AMD_APU_IS_RAVEN) ||
581 	    (adev->apu_flags & AMD_APU_IS_RAVEN2))
582 		return 0;
583 
584 	switch (soc15_asic_reset_method(adev)) {
585 	case AMD_RESET_METHOD_PCI:
586 		dev_info(adev->dev, "PCI reset\n");
587 		return amdgpu_device_pci_reset(adev);
588 	case AMD_RESET_METHOD_BACO:
589 		dev_info(adev->dev, "BACO reset\n");
590 		return soc15_asic_baco_reset(adev);
591 	case AMD_RESET_METHOD_MODE2:
592 		dev_info(adev->dev, "MODE2 reset\n");
593 		return amdgpu_dpm_mode2_reset(adev);
594 	default:
595 		dev_info(adev->dev, "MODE1 reset\n");
596 		return amdgpu_device_mode1_reset(adev);
597 	}
598 }
599 
600 static bool soc15_supports_baco(struct amdgpu_device *adev)
601 {
602 	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
603 	case IP_VERSION(9, 0, 0):
604 	case IP_VERSION(11, 0, 2):
605 		if (adev->asic_type == CHIP_VEGA20) {
606 			if (adev->psp.sos.fw_version >= 0x80067)
607 				return amdgpu_dpm_is_baco_supported(adev);
608 			return false;
609 		} else {
610 			return amdgpu_dpm_is_baco_supported(adev);
611 		}
612 		break;
613 	default:
614 		return false;
615 	}
616 }
617 
618 /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
619 			u32 cntl_reg, u32 status_reg)
620 {
621 	return 0;
622 }*/
623 
624 static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
625 {
626 	/*int r;
627 
628 	r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
629 	if (r)
630 		return r;
631 
632 	r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
633 	*/
634 	return 0;
635 }
636 
637 static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
638 {
639 	/* todo */
640 
641 	return 0;
642 }
643 
644 static void soc15_program_aspm(struct amdgpu_device *adev)
645 {
646 	if (!amdgpu_device_should_use_aspm(adev))
647 		return;
648 
649 	if (adev->nbio.funcs->program_aspm)
650 		adev->nbio.funcs->program_aspm(adev);
651 }
652 
653 const struct amdgpu_ip_block_version vega10_common_ip_block =
654 {
655 	.type = AMD_IP_BLOCK_TYPE_COMMON,
656 	.major = 2,
657 	.minor = 0,
658 	.rev = 0,
659 	.funcs = &soc15_common_ip_funcs,
660 };
661 
662 static void soc15_reg_base_init(struct amdgpu_device *adev)
663 {
664 	/* Set IP register base before any HW register access */
665 	switch (adev->asic_type) {
666 	case CHIP_VEGA10:
667 	case CHIP_VEGA12:
668 	case CHIP_RAVEN:
669 	case CHIP_RENOIR:
670 		vega10_reg_base_init(adev);
671 		break;
672 	case CHIP_VEGA20:
673 		vega20_reg_base_init(adev);
674 		break;
675 	case CHIP_ARCTURUS:
676 		arct_reg_base_init(adev);
677 		break;
678 	case CHIP_ALDEBARAN:
679 		aldebaran_reg_base_init(adev);
680 		break;
681 	default:
682 		DRM_ERROR("Unsupported asic type: %d!\n", adev->asic_type);
683 		break;
684 	}
685 }
686 
687 void soc15_set_virt_ops(struct amdgpu_device *adev)
688 {
689 	adev->virt.ops = &xgpu_ai_virt_ops;
690 
691 	/* init soc15 reg base early enough so we can
692 	 * request request full access for sriov before
693 	 * set_ip_blocks. */
694 	soc15_reg_base_init(adev);
695 }
696 
697 static bool soc15_need_full_reset(struct amdgpu_device *adev)
698 {
699 	/* change this when we implement soft reset */
700 	return true;
701 }
702 
703 static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
704 				 uint64_t *count1)
705 {
706 	uint32_t perfctr = 0;
707 	uint64_t cnt0_of, cnt1_of;
708 	int tmp;
709 
710 	/* This reports 0 on APUs, so return to avoid writing/reading registers
711 	 * that may or may not be different from their GPU counterparts
712 	 */
713 	if (adev->flags & AMD_IS_APU)
714 		return;
715 
716 	/* Set the 2 events that we wish to watch, defined above */
717 	/* Reg 40 is # received msgs */
718 	/* Reg 104 is # of posted requests sent */
719 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
720 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
721 
722 	/* Write to enable desired perf counters */
723 	WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr);
724 	/* Zero out and enable the perf counters
725 	 * Write 0x5:
726 	 * Bit 0 = Start all counters(1)
727 	 * Bit 2 = Global counter reset enable(1)
728 	 */
729 	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
730 
731 	msleep(1000);
732 
733 	/* Load the shadow and disable the perf counters
734 	 * Write 0x2:
735 	 * Bit 0 = Stop counters(0)
736 	 * Bit 1 = Load the shadow counters(1)
737 	 */
738 	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
739 
740 	/* Read register values to get any >32bit overflow */
741 	tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK);
742 	cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
743 	cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
744 
745 	/* Get the values and add the overflow */
746 	*count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
747 	*count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
748 }
749 
750 static void vega20_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
751 				 uint64_t *count1)
752 {
753 	uint32_t perfctr = 0;
754 	uint64_t cnt0_of, cnt1_of;
755 	int tmp;
756 
757 	/* This reports 0 on APUs, so return to avoid writing/reading registers
758 	 * that may or may not be different from their GPU counterparts
759 	 */
760 	if (adev->flags & AMD_IS_APU)
761 		return;
762 
763 	/* Set the 2 events that we wish to watch, defined above */
764 	/* Reg 40 is # received msgs */
765 	/* Reg 108 is # of posted requests sent on VG20 */
766 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
767 				EVENT0_SEL, 40);
768 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
769 				EVENT1_SEL, 108);
770 
771 	/* Write to enable desired perf counters */
772 	WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctr);
773 	/* Zero out and enable the perf counters
774 	 * Write 0x5:
775 	 * Bit 0 = Start all counters(1)
776 	 * Bit 2 = Global counter reset enable(1)
777 	 */
778 	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
779 
780 	msleep(1000);
781 
782 	/* Load the shadow and disable the perf counters
783 	 * Write 0x2:
784 	 * Bit 0 = Stop counters(0)
785 	 * Bit 1 = Load the shadow counters(1)
786 	 */
787 	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
788 
789 	/* Read register values to get any >32bit overflow */
790 	tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3);
791 	cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER0_UPPER);
792 	cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER1_UPPER);
793 
794 	/* Get the values and add the overflow */
795 	*count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) | (cnt0_of << 32);
796 	*count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK3) | (cnt1_of << 32);
797 }
798 
799 static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
800 {
801 	u32 sol_reg;
802 
803 	/* CP hangs in IGT reloading test on RN, reset to WA */
804 	if (adev->asic_type == CHIP_RENOIR)
805 		return true;
806 
807 	/* Just return false for soc15 GPUs.  Reset does not seem to
808 	 * be necessary.
809 	 */
810 	if (!amdgpu_passthrough(adev))
811 		return false;
812 
813 	if (adev->flags & AMD_IS_APU)
814 		return false;
815 
816 	/* Check sOS sign of life register to confirm sys driver and sOS
817 	 * are already been loaded.
818 	 */
819 	sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
820 	if (sol_reg)
821 		return true;
822 
823 	return false;
824 }
825 
826 static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev)
827 {
828 	uint64_t nak_r, nak_g;
829 
830 	/* Get the number of NAKs received and generated */
831 	nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK);
832 	nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED);
833 
834 	/* Add the total number of NAKs, i.e the number of replays */
835 	return (nak_r + nak_g);
836 }
837 
838 static void soc15_pre_asic_init(struct amdgpu_device *adev)
839 {
840 	gmc_v9_0_restore_registers(adev);
841 }
842 
843 static const struct amdgpu_asic_funcs soc15_asic_funcs =
844 {
845 	.read_disabled_bios = &soc15_read_disabled_bios,
846 	.read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
847 	.read_register = &soc15_read_register,
848 	.reset = &soc15_asic_reset,
849 	.reset_method = &soc15_asic_reset_method,
850 	.get_xclk = &soc15_get_xclk,
851 	.set_uvd_clocks = &soc15_set_uvd_clocks,
852 	.set_vce_clocks = &soc15_set_vce_clocks,
853 	.get_config_memsize = &soc15_get_config_memsize,
854 	.need_full_reset = &soc15_need_full_reset,
855 	.init_doorbell_index = &vega10_doorbell_index_init,
856 	.get_pcie_usage = &soc15_get_pcie_usage,
857 	.need_reset_on_init = &soc15_need_reset_on_init,
858 	.get_pcie_replay_count = &soc15_get_pcie_replay_count,
859 	.supports_baco = &soc15_supports_baco,
860 	.pre_asic_init = &soc15_pre_asic_init,
861 	.query_video_codecs = &soc15_query_video_codecs,
862 };
863 
864 static const struct amdgpu_asic_funcs vega20_asic_funcs =
865 {
866 	.read_disabled_bios = &soc15_read_disabled_bios,
867 	.read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
868 	.read_register = &soc15_read_register,
869 	.reset = &soc15_asic_reset,
870 	.reset_method = &soc15_asic_reset_method,
871 	.get_xclk = &soc15_get_xclk,
872 	.set_uvd_clocks = &soc15_set_uvd_clocks,
873 	.set_vce_clocks = &soc15_set_vce_clocks,
874 	.get_config_memsize = &soc15_get_config_memsize,
875 	.need_full_reset = &soc15_need_full_reset,
876 	.init_doorbell_index = &vega20_doorbell_index_init,
877 	.get_pcie_usage = &vega20_get_pcie_usage,
878 	.need_reset_on_init = &soc15_need_reset_on_init,
879 	.get_pcie_replay_count = &soc15_get_pcie_replay_count,
880 	.supports_baco = &soc15_supports_baco,
881 	.pre_asic_init = &soc15_pre_asic_init,
882 	.query_video_codecs = &soc15_query_video_codecs,
883 };
884 
885 static const struct amdgpu_asic_funcs aqua_vanjaram_asic_funcs =
886 {
887 	.read_disabled_bios = &soc15_read_disabled_bios,
888 	.read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
889 	.read_register = &soc15_read_register,
890 	.reset = &soc15_asic_reset,
891 	.reset_method = &soc15_asic_reset_method,
892 	.get_xclk = &soc15_get_xclk,
893 	.set_uvd_clocks = &soc15_set_uvd_clocks,
894 	.set_vce_clocks = &soc15_set_vce_clocks,
895 	.get_config_memsize = &soc15_get_config_memsize,
896 	.need_full_reset = &soc15_need_full_reset,
897 	.init_doorbell_index = &aqua_vanjaram_doorbell_index_init,
898 	.get_pcie_usage = &amdgpu_nbio_get_pcie_usage,
899 	.need_reset_on_init = &soc15_need_reset_on_init,
900 	.get_pcie_replay_count = &amdgpu_nbio_get_pcie_replay_count,
901 	.supports_baco = &soc15_supports_baco,
902 	.pre_asic_init = &soc15_pre_asic_init,
903 	.query_video_codecs = &soc15_query_video_codecs,
904 	.encode_ext_smn_addressing = &aqua_vanjaram_encode_ext_smn_addressing,
905 	.get_reg_state = &aqua_vanjaram_get_reg_state,
906 };
907 
908 static int soc15_common_early_init(void *handle)
909 {
910 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
911 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
912 
913 	if (!amdgpu_sriov_vf(adev)) {
914 		adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
915 		adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
916 	}
917 	adev->smc_rreg = NULL;
918 	adev->smc_wreg = NULL;
919 	adev->pcie_rreg = &amdgpu_device_indirect_rreg;
920 	adev->pcie_wreg = &amdgpu_device_indirect_wreg;
921 	adev->pcie_rreg_ext = &amdgpu_device_indirect_rreg_ext;
922 	adev->pcie_wreg_ext = &amdgpu_device_indirect_wreg_ext;
923 	adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64;
924 	adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64;
925 	adev->pcie_rreg64_ext = &amdgpu_device_indirect_rreg64_ext;
926 	adev->pcie_wreg64_ext = &amdgpu_device_indirect_wreg64_ext;
927 	adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
928 	adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
929 	adev->didt_rreg = &soc15_didt_rreg;
930 	adev->didt_wreg = &soc15_didt_wreg;
931 	adev->gc_cac_rreg = &soc15_gc_cac_rreg;
932 	adev->gc_cac_wreg = &soc15_gc_cac_wreg;
933 	adev->se_cac_rreg = &soc15_se_cac_rreg;
934 	adev->se_cac_wreg = &soc15_se_cac_wreg;
935 
936 	adev->rev_id = amdgpu_device_get_rev_id(adev);
937 	adev->external_rev_id = 0xFF;
938 	/* TODO: split the GC and PG flags based on the relevant IP version for which
939 	 * they are relevant.
940 	 */
941 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
942 	case IP_VERSION(9, 0, 1):
943 		adev->asic_funcs = &soc15_asic_funcs;
944 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
945 			AMD_CG_SUPPORT_GFX_MGLS |
946 			AMD_CG_SUPPORT_GFX_RLC_LS |
947 			AMD_CG_SUPPORT_GFX_CP_LS |
948 			AMD_CG_SUPPORT_GFX_3D_CGCG |
949 			AMD_CG_SUPPORT_GFX_3D_CGLS |
950 			AMD_CG_SUPPORT_GFX_CGCG |
951 			AMD_CG_SUPPORT_GFX_CGLS |
952 			AMD_CG_SUPPORT_BIF_MGCG |
953 			AMD_CG_SUPPORT_BIF_LS |
954 			AMD_CG_SUPPORT_HDP_LS |
955 			AMD_CG_SUPPORT_DRM_MGCG |
956 			AMD_CG_SUPPORT_DRM_LS |
957 			AMD_CG_SUPPORT_ROM_MGCG |
958 			AMD_CG_SUPPORT_DF_MGCG |
959 			AMD_CG_SUPPORT_SDMA_MGCG |
960 			AMD_CG_SUPPORT_SDMA_LS |
961 			AMD_CG_SUPPORT_MC_MGCG |
962 			AMD_CG_SUPPORT_MC_LS;
963 		adev->pg_flags = 0;
964 		adev->external_rev_id = 0x1;
965 		break;
966 	case IP_VERSION(9, 2, 1):
967 		adev->asic_funcs = &soc15_asic_funcs;
968 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
969 			AMD_CG_SUPPORT_GFX_MGLS |
970 			AMD_CG_SUPPORT_GFX_CGCG |
971 			AMD_CG_SUPPORT_GFX_CGLS |
972 			AMD_CG_SUPPORT_GFX_3D_CGCG |
973 			AMD_CG_SUPPORT_GFX_3D_CGLS |
974 			AMD_CG_SUPPORT_GFX_CP_LS |
975 			AMD_CG_SUPPORT_MC_LS |
976 			AMD_CG_SUPPORT_MC_MGCG |
977 			AMD_CG_SUPPORT_SDMA_MGCG |
978 			AMD_CG_SUPPORT_SDMA_LS |
979 			AMD_CG_SUPPORT_BIF_MGCG |
980 			AMD_CG_SUPPORT_BIF_LS |
981 			AMD_CG_SUPPORT_HDP_MGCG |
982 			AMD_CG_SUPPORT_HDP_LS |
983 			AMD_CG_SUPPORT_ROM_MGCG |
984 			AMD_CG_SUPPORT_VCE_MGCG |
985 			AMD_CG_SUPPORT_UVD_MGCG;
986 		adev->pg_flags = 0;
987 		adev->external_rev_id = adev->rev_id + 0x14;
988 		break;
989 	case IP_VERSION(9, 4, 0):
990 		adev->asic_funcs = &vega20_asic_funcs;
991 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
992 			AMD_CG_SUPPORT_GFX_MGLS |
993 			AMD_CG_SUPPORT_GFX_CGCG |
994 			AMD_CG_SUPPORT_GFX_CGLS |
995 			AMD_CG_SUPPORT_GFX_3D_CGCG |
996 			AMD_CG_SUPPORT_GFX_3D_CGLS |
997 			AMD_CG_SUPPORT_GFX_CP_LS |
998 			AMD_CG_SUPPORT_MC_LS |
999 			AMD_CG_SUPPORT_MC_MGCG |
1000 			AMD_CG_SUPPORT_SDMA_MGCG |
1001 			AMD_CG_SUPPORT_SDMA_LS |
1002 			AMD_CG_SUPPORT_BIF_MGCG |
1003 			AMD_CG_SUPPORT_BIF_LS |
1004 			AMD_CG_SUPPORT_HDP_MGCG |
1005 			AMD_CG_SUPPORT_HDP_LS |
1006 			AMD_CG_SUPPORT_ROM_MGCG |
1007 			AMD_CG_SUPPORT_VCE_MGCG |
1008 			AMD_CG_SUPPORT_UVD_MGCG;
1009 		adev->pg_flags = 0;
1010 		adev->external_rev_id = adev->rev_id + 0x28;
1011 		break;
1012 	case IP_VERSION(9, 1, 0):
1013 	case IP_VERSION(9, 2, 2):
1014 		adev->asic_funcs = &soc15_asic_funcs;
1015 
1016 		if (adev->rev_id >= 0x8)
1017 			adev->apu_flags |= AMD_APU_IS_RAVEN2;
1018 
1019 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1020 			adev->external_rev_id = adev->rev_id + 0x79;
1021 		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1022 			adev->external_rev_id = adev->rev_id + 0x41;
1023 		else if (adev->rev_id == 1)
1024 			adev->external_rev_id = adev->rev_id + 0x20;
1025 		else
1026 			adev->external_rev_id = adev->rev_id + 0x01;
1027 
1028 		if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
1029 			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1030 				AMD_CG_SUPPORT_GFX_MGLS |
1031 				AMD_CG_SUPPORT_GFX_CP_LS |
1032 				AMD_CG_SUPPORT_GFX_3D_CGCG |
1033 				AMD_CG_SUPPORT_GFX_3D_CGLS |
1034 				AMD_CG_SUPPORT_GFX_CGCG |
1035 				AMD_CG_SUPPORT_GFX_CGLS |
1036 				AMD_CG_SUPPORT_BIF_LS |
1037 				AMD_CG_SUPPORT_HDP_LS |
1038 				AMD_CG_SUPPORT_MC_MGCG |
1039 				AMD_CG_SUPPORT_MC_LS |
1040 				AMD_CG_SUPPORT_SDMA_MGCG |
1041 				AMD_CG_SUPPORT_SDMA_LS |
1042 				AMD_CG_SUPPORT_VCN_MGCG;
1043 
1044 			adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1045 		} else if (adev->apu_flags & AMD_APU_IS_PICASSO) {
1046 			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1047 				AMD_CG_SUPPORT_GFX_MGLS |
1048 				AMD_CG_SUPPORT_GFX_CP_LS |
1049 				AMD_CG_SUPPORT_GFX_3D_CGLS |
1050 				AMD_CG_SUPPORT_GFX_CGCG |
1051 				AMD_CG_SUPPORT_GFX_CGLS |
1052 				AMD_CG_SUPPORT_BIF_LS |
1053 				AMD_CG_SUPPORT_HDP_LS |
1054 				AMD_CG_SUPPORT_MC_MGCG |
1055 				AMD_CG_SUPPORT_MC_LS |
1056 				AMD_CG_SUPPORT_SDMA_MGCG |
1057 				AMD_CG_SUPPORT_SDMA_LS |
1058 				AMD_CG_SUPPORT_VCN_MGCG;
1059 
1060 			/*
1061 			 * MMHUB PG needs to be disabled for Picasso for
1062 			 * stability reasons.
1063 			 */
1064 			adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1065 				AMD_PG_SUPPORT_VCN;
1066 		} else {
1067 			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1068 				AMD_CG_SUPPORT_GFX_MGLS |
1069 				AMD_CG_SUPPORT_GFX_RLC_LS |
1070 				AMD_CG_SUPPORT_GFX_CP_LS |
1071 				AMD_CG_SUPPORT_GFX_3D_CGLS |
1072 				AMD_CG_SUPPORT_GFX_CGCG |
1073 				AMD_CG_SUPPORT_GFX_CGLS |
1074 				AMD_CG_SUPPORT_BIF_MGCG |
1075 				AMD_CG_SUPPORT_BIF_LS |
1076 				AMD_CG_SUPPORT_HDP_MGCG |
1077 				AMD_CG_SUPPORT_HDP_LS |
1078 				AMD_CG_SUPPORT_DRM_MGCG |
1079 				AMD_CG_SUPPORT_DRM_LS |
1080 				AMD_CG_SUPPORT_MC_MGCG |
1081 				AMD_CG_SUPPORT_MC_LS |
1082 				AMD_CG_SUPPORT_SDMA_MGCG |
1083 				AMD_CG_SUPPORT_SDMA_LS |
1084 				AMD_CG_SUPPORT_VCN_MGCG;
1085 
1086 			adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1087 		}
1088 		break;
1089 	case IP_VERSION(9, 4, 1):
1090 		adev->asic_funcs = &vega20_asic_funcs;
1091 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1092 			AMD_CG_SUPPORT_GFX_MGLS |
1093 			AMD_CG_SUPPORT_GFX_CGCG |
1094 			AMD_CG_SUPPORT_GFX_CGLS |
1095 			AMD_CG_SUPPORT_GFX_CP_LS |
1096 			AMD_CG_SUPPORT_HDP_MGCG |
1097 			AMD_CG_SUPPORT_HDP_LS |
1098 			AMD_CG_SUPPORT_SDMA_MGCG |
1099 			AMD_CG_SUPPORT_SDMA_LS |
1100 			AMD_CG_SUPPORT_MC_MGCG |
1101 			AMD_CG_SUPPORT_MC_LS |
1102 			AMD_CG_SUPPORT_IH_CG |
1103 			AMD_CG_SUPPORT_VCN_MGCG |
1104 			AMD_CG_SUPPORT_JPEG_MGCG;
1105 		adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG;
1106 		adev->external_rev_id = adev->rev_id + 0x32;
1107 		break;
1108 	case IP_VERSION(9, 3, 0):
1109 		adev->asic_funcs = &soc15_asic_funcs;
1110 
1111 		if (adev->apu_flags & AMD_APU_IS_RENOIR)
1112 			adev->external_rev_id = adev->rev_id + 0x91;
1113 		else
1114 			adev->external_rev_id = adev->rev_id + 0xa1;
1115 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1116 				 AMD_CG_SUPPORT_GFX_MGLS |
1117 				 AMD_CG_SUPPORT_GFX_3D_CGCG |
1118 				 AMD_CG_SUPPORT_GFX_3D_CGLS |
1119 				 AMD_CG_SUPPORT_GFX_CGCG |
1120 				 AMD_CG_SUPPORT_GFX_CGLS |
1121 				 AMD_CG_SUPPORT_GFX_CP_LS |
1122 				 AMD_CG_SUPPORT_MC_MGCG |
1123 				 AMD_CG_SUPPORT_MC_LS |
1124 				 AMD_CG_SUPPORT_SDMA_MGCG |
1125 				 AMD_CG_SUPPORT_SDMA_LS |
1126 				 AMD_CG_SUPPORT_BIF_LS |
1127 				 AMD_CG_SUPPORT_HDP_LS |
1128 				 AMD_CG_SUPPORT_VCN_MGCG |
1129 				 AMD_CG_SUPPORT_JPEG_MGCG |
1130 				 AMD_CG_SUPPORT_IH_CG |
1131 				 AMD_CG_SUPPORT_ATHUB_LS |
1132 				 AMD_CG_SUPPORT_ATHUB_MGCG |
1133 				 AMD_CG_SUPPORT_DF_MGCG;
1134 		adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1135 				 AMD_PG_SUPPORT_VCN |
1136 				 AMD_PG_SUPPORT_JPEG |
1137 				 AMD_PG_SUPPORT_VCN_DPG;
1138 		break;
1139 	case IP_VERSION(9, 4, 2):
1140 		adev->asic_funcs = &vega20_asic_funcs;
1141 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1142 			AMD_CG_SUPPORT_GFX_MGLS |
1143 			AMD_CG_SUPPORT_GFX_CP_LS |
1144 			AMD_CG_SUPPORT_HDP_LS |
1145 			AMD_CG_SUPPORT_SDMA_MGCG |
1146 			AMD_CG_SUPPORT_SDMA_LS |
1147 			AMD_CG_SUPPORT_IH_CG |
1148 			AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG;
1149 		adev->pg_flags = AMD_PG_SUPPORT_VCN_DPG;
1150 		adev->external_rev_id = adev->rev_id + 0x3c;
1151 		break;
1152 	case IP_VERSION(9, 4, 3):
1153 		adev->asic_funcs = &aqua_vanjaram_asic_funcs;
1154 		adev->cg_flags =
1155 			AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_CGCG |
1156 			AMD_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_SDMA_MGCG |
1157 			AMD_CG_SUPPORT_GFX_FGCG | AMD_CG_SUPPORT_REPEATER_FGCG |
1158 			AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG |
1159 			AMD_CG_SUPPORT_IH_CG;
1160 		adev->pg_flags =
1161 			AMD_PG_SUPPORT_VCN |
1162 			AMD_PG_SUPPORT_VCN_DPG |
1163 			AMD_PG_SUPPORT_JPEG;
1164 		adev->external_rev_id = adev->rev_id + 0x46;
1165 		/* GC 9.4.3 uses MMIO register region hole at a different offset */
1166 		if (!amdgpu_sriov_vf(adev)) {
1167 			adev->rmmio_remap.reg_offset = 0x1A000;
1168 			adev->rmmio_remap.bus_addr = adev->rmmio_base + 0x1A000;
1169 		}
1170 		break;
1171 	default:
1172 		/* FIXME: not supported yet */
1173 		return -EINVAL;
1174 	}
1175 
1176 	if (amdgpu_sriov_vf(adev)) {
1177 		amdgpu_virt_init_setting(adev);
1178 		xgpu_ai_mailbox_set_irq_funcs(adev);
1179 	}
1180 
1181 	return 0;
1182 }
1183 
1184 static int soc15_common_late_init(void *handle)
1185 {
1186 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1187 
1188 	if (amdgpu_sriov_vf(adev))
1189 		xgpu_ai_mailbox_get_irq(adev);
1190 
1191 	/* Enable selfring doorbell aperture late because doorbell BAR
1192 	 * aperture will change if resize BAR successfully in gmc sw_init.
1193 	 */
1194 	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true);
1195 
1196 	return 0;
1197 }
1198 
1199 static int soc15_common_sw_init(void *handle)
1200 {
1201 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1202 
1203 	if (amdgpu_sriov_vf(adev))
1204 		xgpu_ai_mailbox_add_irq_id(adev);
1205 
1206 	if (adev->df.funcs &&
1207 	    adev->df.funcs->sw_init)
1208 		adev->df.funcs->sw_init(adev);
1209 
1210 	return 0;
1211 }
1212 
1213 static int soc15_common_sw_fini(void *handle)
1214 {
1215 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1216 
1217 	if (adev->df.funcs &&
1218 	    adev->df.funcs->sw_fini)
1219 		adev->df.funcs->sw_fini(adev);
1220 	return 0;
1221 }
1222 
1223 static void soc15_sdma_doorbell_range_init(struct amdgpu_device *adev)
1224 {
1225 	int i;
1226 
1227 	/* sdma doorbell range is programed by hypervisor */
1228 	if (!amdgpu_sriov_vf(adev)) {
1229 		for (i = 0; i < adev->sdma.num_instances; i++) {
1230 			adev->nbio.funcs->sdma_doorbell_range(adev, i,
1231 				true, adev->doorbell_index.sdma_engine[i] << 1,
1232 				adev->doorbell_index.sdma_doorbell_range);
1233 		}
1234 	}
1235 }
1236 
1237 static int soc15_common_hw_init(void *handle)
1238 {
1239 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1240 
1241 	/* enable aspm */
1242 	soc15_program_aspm(adev);
1243 	/* setup nbio registers */
1244 	adev->nbio.funcs->init_registers(adev);
1245 	/* remap HDP registers to a hole in mmio space,
1246 	 * for the purpose of expose those registers
1247 	 * to process space
1248 	 */
1249 	if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
1250 		adev->nbio.funcs->remap_hdp_registers(adev);
1251 
1252 	/* enable the doorbell aperture */
1253 	adev->nbio.funcs->enable_doorbell_aperture(adev, true);
1254 
1255 	/* HW doorbell routing policy: doorbell writing not
1256 	 * in SDMA/IH/MM/ACV range will be routed to CP. So
1257 	 * we need to init SDMA doorbell range prior
1258 	 * to CP ip block init and ring test.  IH already
1259 	 * happens before CP.
1260 	 */
1261 	soc15_sdma_doorbell_range_init(adev);
1262 
1263 	return 0;
1264 }
1265 
1266 static int soc15_common_hw_fini(void *handle)
1267 {
1268 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1269 
1270 	/* Disable the doorbell aperture and selfring doorbell aperture
1271 	 * separately in hw_fini because soc15_enable_doorbell_aperture
1272 	 * has been removed and there is no need to delay disabling
1273 	 * selfring doorbell.
1274 	 */
1275 	adev->nbio.funcs->enable_doorbell_aperture(adev, false);
1276 	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false);
1277 
1278 	if (amdgpu_sriov_vf(adev))
1279 		xgpu_ai_mailbox_put_irq(adev);
1280 
1281 	if (adev->nbio.ras_if &&
1282 	    amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
1283 		if (adev->nbio.ras &&
1284 		    adev->nbio.ras->init_ras_controller_interrupt)
1285 			amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0);
1286 		if (adev->nbio.ras &&
1287 		    adev->nbio.ras->init_ras_err_event_athub_interrupt)
1288 			amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
1289 	}
1290 
1291 	return 0;
1292 }
1293 
1294 static int soc15_common_suspend(void *handle)
1295 {
1296 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1297 
1298 	return soc15_common_hw_fini(adev);
1299 }
1300 
1301 static int soc15_common_resume(void *handle)
1302 {
1303 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1304 
1305 	return soc15_common_hw_init(adev);
1306 }
1307 
1308 static bool soc15_common_is_idle(void *handle)
1309 {
1310 	return true;
1311 }
1312 
1313 static int soc15_common_wait_for_idle(void *handle)
1314 {
1315 	return 0;
1316 }
1317 
1318 static int soc15_common_soft_reset(void *handle)
1319 {
1320 	return 0;
1321 }
1322 
1323 static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
1324 {
1325 	uint32_t def, data;
1326 
1327 	def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1328 
1329 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG))
1330 		data &= ~(0x01000000 |
1331 			  0x02000000 |
1332 			  0x04000000 |
1333 			  0x08000000 |
1334 			  0x10000000 |
1335 			  0x20000000 |
1336 			  0x40000000 |
1337 			  0x80000000);
1338 	else
1339 		data |= (0x01000000 |
1340 			 0x02000000 |
1341 			 0x04000000 |
1342 			 0x08000000 |
1343 			 0x10000000 |
1344 			 0x20000000 |
1345 			 0x40000000 |
1346 			 0x80000000);
1347 
1348 	if (def != data)
1349 		WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data);
1350 }
1351 
1352 static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)
1353 {
1354 	uint32_t def, data;
1355 
1356 	def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1357 
1358 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1359 		data |= 1;
1360 	else
1361 		data &= ~1;
1362 
1363 	if (def != data)
1364 		WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
1365 }
1366 
1367 static int soc15_common_set_clockgating_state(void *handle,
1368 					    enum amd_clockgating_state state)
1369 {
1370 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1371 
1372 	if (amdgpu_sriov_vf(adev))
1373 		return 0;
1374 
1375 	switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
1376 	case IP_VERSION(6, 1, 0):
1377 	case IP_VERSION(6, 2, 0):
1378 	case IP_VERSION(7, 4, 0):
1379 		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1380 				state == AMD_CG_STATE_GATE);
1381 		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1382 				state == AMD_CG_STATE_GATE);
1383 		adev->hdp.funcs->update_clock_gating(adev,
1384 				state == AMD_CG_STATE_GATE);
1385 		soc15_update_drm_clock_gating(adev,
1386 				state == AMD_CG_STATE_GATE);
1387 		soc15_update_drm_light_sleep(adev,
1388 				state == AMD_CG_STATE_GATE);
1389 		adev->smuio.funcs->update_rom_clock_gating(adev,
1390 				state == AMD_CG_STATE_GATE);
1391 		adev->df.funcs->update_medium_grain_clock_gating(adev,
1392 				state == AMD_CG_STATE_GATE);
1393 		break;
1394 	case IP_VERSION(7, 0, 0):
1395 	case IP_VERSION(7, 0, 1):
1396 	case IP_VERSION(2, 5, 0):
1397 		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1398 				state == AMD_CG_STATE_GATE);
1399 		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1400 				state == AMD_CG_STATE_GATE);
1401 		adev->hdp.funcs->update_clock_gating(adev,
1402 				state == AMD_CG_STATE_GATE);
1403 		soc15_update_drm_clock_gating(adev,
1404 				state == AMD_CG_STATE_GATE);
1405 		soc15_update_drm_light_sleep(adev,
1406 				state == AMD_CG_STATE_GATE);
1407 		break;
1408 	case IP_VERSION(7, 4, 1):
1409 	case IP_VERSION(7, 4, 4):
1410 		adev->hdp.funcs->update_clock_gating(adev,
1411 				state == AMD_CG_STATE_GATE);
1412 		break;
1413 	default:
1414 		break;
1415 	}
1416 	return 0;
1417 }
1418 
1419 static void soc15_common_get_clockgating_state(void *handle, u64 *flags)
1420 {
1421 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1422 	int data;
1423 
1424 	if (amdgpu_sriov_vf(adev))
1425 		*flags = 0;
1426 
1427 	if (adev->nbio.funcs && adev->nbio.funcs->get_clockgating_state)
1428 		adev->nbio.funcs->get_clockgating_state(adev, flags);
1429 
1430 	if (adev->hdp.funcs && adev->hdp.funcs->get_clock_gating_state)
1431 		adev->hdp.funcs->get_clock_gating_state(adev, flags);
1432 
1433 	if ((amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2)) &&
1434 	    (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6))) {
1435 		/* AMD_CG_SUPPORT_DRM_MGCG */
1436 		data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1437 		if (!(data & 0x01000000))
1438 			*flags |= AMD_CG_SUPPORT_DRM_MGCG;
1439 
1440 		/* AMD_CG_SUPPORT_DRM_LS */
1441 		data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1442 		if (data & 0x1)
1443 			*flags |= AMD_CG_SUPPORT_DRM_LS;
1444 	}
1445 
1446 	/* AMD_CG_SUPPORT_ROM_MGCG */
1447 	if (adev->smuio.funcs && adev->smuio.funcs->get_clock_gating_state)
1448 		adev->smuio.funcs->get_clock_gating_state(adev, flags);
1449 
1450 	if (adev->df.funcs && adev->df.funcs->get_clockgating_state)
1451 		adev->df.funcs->get_clockgating_state(adev, flags);
1452 }
1453 
1454 static int soc15_common_set_powergating_state(void *handle,
1455 					    enum amd_powergating_state state)
1456 {
1457 	/* todo */
1458 	return 0;
1459 }
1460 
1461 static const struct amd_ip_funcs soc15_common_ip_funcs = {
1462 	.name = "soc15_common",
1463 	.early_init = soc15_common_early_init,
1464 	.late_init = soc15_common_late_init,
1465 	.sw_init = soc15_common_sw_init,
1466 	.sw_fini = soc15_common_sw_fini,
1467 	.hw_init = soc15_common_hw_init,
1468 	.hw_fini = soc15_common_hw_fini,
1469 	.suspend = soc15_common_suspend,
1470 	.resume = soc15_common_resume,
1471 	.is_idle = soc15_common_is_idle,
1472 	.wait_for_idle = soc15_common_wait_for_idle,
1473 	.soft_reset = soc15_common_soft_reset,
1474 	.set_clockgating_state = soc15_common_set_clockgating_state,
1475 	.set_powergating_state = soc15_common_set_powergating_state,
1476 	.get_clockgating_state= soc15_common_get_clockgating_state,
1477 };
1478