xref: /linux/drivers/gpu/drm/amd/amdgpu/nv.c (revision 49c70ece54b0d1c51bc31b2b0c1070777c992c26)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27 
28 #include <drm/amdgpu_drm.h>
29 
30 #include "amdgpu.h"
31 #include "amdgpu_atombios.h"
32 #include "amdgpu_ih.h"
33 #include "amdgpu_uvd.h"
34 #include "amdgpu_vce.h"
35 #include "amdgpu_ucode.h"
36 #include "amdgpu_psp.h"
37 #include "amdgpu_smu.h"
38 #include "atom.h"
39 #include "amd_pcie.h"
40 
41 #include "gc/gc_10_1_0_offset.h"
42 #include "gc/gc_10_1_0_sh_mask.h"
43 #include "mp/mp_11_0_offset.h"
44 
45 #include "soc15.h"
46 #include "soc15_common.h"
47 #include "gmc_v10_0.h"
48 #include "gfxhub_v2_0.h"
49 #include "mmhub_v2_0.h"
50 #include "nbio_v2_3.h"
51 #include "nbio_v7_2.h"
52 #include "hdp_v5_0.h"
53 #include "nv.h"
54 #include "navi10_ih.h"
55 #include "gfx_v10_0.h"
56 #include "sdma_v5_0.h"
57 #include "sdma_v5_2.h"
58 #include "vcn_v2_0.h"
59 #include "jpeg_v2_0.h"
60 #include "vcn_v3_0.h"
61 #include "jpeg_v3_0.h"
62 #include "dce_virtual.h"
63 #include "mes_v10_1.h"
64 #include "mxgpu_nv.h"
65 #include "smuio_v11_0.h"
66 #include "smuio_v11_0_6.h"
67 
68 static const struct amd_ip_funcs nv_common_ip_funcs;
69 
70 /* Navi */
71 static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] =
72 {
73 	{
74 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
75 		.max_width = 4096,
76 		.max_height = 2304,
77 		.max_pixels_per_frame = 4096 * 2304,
78 		.max_level = 0,
79 	},
80 	{
81 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
82 		.max_width = 4096,
83 		.max_height = 2304,
84 		.max_pixels_per_frame = 4096 * 2304,
85 		.max_level = 0,
86 	},
87 };
88 
89 static const struct amdgpu_video_codecs nv_video_codecs_encode =
90 {
91 	.codec_count = ARRAY_SIZE(nv_video_codecs_encode_array),
92 	.codec_array = nv_video_codecs_encode_array,
93 };
94 
95 /* Navi1x */
96 static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] =
97 {
98 	{
99 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
100 		.max_width = 4096,
101 		.max_height = 4096,
102 		.max_pixels_per_frame = 4096 * 4096,
103 		.max_level = 3,
104 	},
105 	{
106 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
107 		.max_width = 4096,
108 		.max_height = 4096,
109 		.max_pixels_per_frame = 4096 * 4096,
110 		.max_level = 5,
111 	},
112 	{
113 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
114 		.max_width = 4096,
115 		.max_height = 4096,
116 		.max_pixels_per_frame = 4096 * 4096,
117 		.max_level = 52,
118 	},
119 	{
120 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
121 		.max_width = 4096,
122 		.max_height = 4096,
123 		.max_pixels_per_frame = 4096 * 4096,
124 		.max_level = 4,
125 	},
126 	{
127 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
128 		.max_width = 8192,
129 		.max_height = 4352,
130 		.max_pixels_per_frame = 8192 * 4352,
131 		.max_level = 186,
132 	},
133 	{
134 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
135 		.max_width = 4096,
136 		.max_height = 4096,
137 		.max_pixels_per_frame = 4096 * 4096,
138 		.max_level = 0,
139 	},
140 	{
141 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
142 		.max_width = 8192,
143 		.max_height = 4352,
144 		.max_pixels_per_frame = 8192 * 4352,
145 		.max_level = 0,
146 	},
147 };
148 
149 static const struct amdgpu_video_codecs nv_video_codecs_decode =
150 {
151 	.codec_count = ARRAY_SIZE(nv_video_codecs_decode_array),
152 	.codec_array = nv_video_codecs_decode_array,
153 };
154 
155 /* Sienna Cichlid */
156 static const struct amdgpu_video_codec_info sc_video_codecs_decode_array[] =
157 {
158 	{
159 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
160 		.max_width = 4096,
161 		.max_height = 4096,
162 		.max_pixels_per_frame = 4096 * 4096,
163 		.max_level = 3,
164 	},
165 	{
166 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
167 		.max_width = 4096,
168 		.max_height = 4096,
169 		.max_pixels_per_frame = 4096 * 4096,
170 		.max_level = 5,
171 	},
172 	{
173 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
174 		.max_width = 4096,
175 		.max_height = 4096,
176 		.max_pixels_per_frame = 4096 * 4096,
177 		.max_level = 52,
178 	},
179 	{
180 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
181 		.max_width = 4096,
182 		.max_height = 4096,
183 		.max_pixels_per_frame = 4096 * 4096,
184 		.max_level = 4,
185 	},
186 	{
187 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
188 		.max_width = 8192,
189 		.max_height = 4352,
190 		.max_pixels_per_frame = 8192 * 4352,
191 		.max_level = 186,
192 	},
193 	{
194 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
195 		.max_width = 4096,
196 		.max_height = 4096,
197 		.max_pixels_per_frame = 4096 * 4096,
198 		.max_level = 0,
199 	},
200 	{
201 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
202 		.max_width = 8192,
203 		.max_height = 4352,
204 		.max_pixels_per_frame = 8192 * 4352,
205 		.max_level = 0,
206 	},
207 	{
208 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1,
209 		.max_width = 8192,
210 		.max_height = 4352,
211 		.max_pixels_per_frame = 8192 * 4352,
212 		.max_level = 0,
213 	},
214 };
215 
216 static const struct amdgpu_video_codecs sc_video_codecs_decode =
217 {
218 	.codec_count = ARRAY_SIZE(sc_video_codecs_decode_array),
219 	.codec_array = sc_video_codecs_decode_array,
220 };
221 
222 static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
223 				 const struct amdgpu_video_codecs **codecs)
224 {
225 	switch (adev->asic_type) {
226 	case CHIP_SIENNA_CICHLID:
227 	case CHIP_NAVY_FLOUNDER:
228 	case CHIP_DIMGREY_CAVEFISH:
229 	case CHIP_VANGOGH:
230 		if (encode)
231 			*codecs = &nv_video_codecs_encode;
232 		else
233 			*codecs = &sc_video_codecs_decode;
234 		return 0;
235 	case CHIP_NAVI10:
236 	case CHIP_NAVI14:
237 	case CHIP_NAVI12:
238 		if (encode)
239 			*codecs = &nv_video_codecs_encode;
240 		else
241 			*codecs = &nv_video_codecs_decode;
242 		return 0;
243 	default:
244 		return -EINVAL;
245 	}
246 }
247 
248 /*
249  * Indirect registers accessor
250  */
251 static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
252 {
253 	unsigned long address, data;
254 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
255 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
256 
257 	return amdgpu_device_indirect_rreg(adev, address, data, reg);
258 }
259 
260 static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
261 {
262 	unsigned long address, data;
263 
264 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
265 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
266 
267 	amdgpu_device_indirect_wreg(adev, address, data, reg, v);
268 }
269 
270 static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
271 {
272 	unsigned long address, data;
273 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
274 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
275 
276 	return amdgpu_device_indirect_rreg64(adev, address, data, reg);
277 }
278 
279 static u32 nv_pcie_port_rreg(struct amdgpu_device *adev, u32 reg)
280 {
281 	unsigned long flags, address, data;
282 	u32 r;
283 	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
284 	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
285 
286 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
287 	WREG32(address, reg * 4);
288 	(void)RREG32(address);
289 	r = RREG32(data);
290 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
291 	return r;
292 }
293 
294 static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
295 {
296 	unsigned long address, data;
297 
298 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
299 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
300 
301 	amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
302 }
303 
304 static void nv_pcie_port_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
305 {
306 	unsigned long flags, address, data;
307 
308 	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
309 	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
310 
311 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
312 	WREG32(address, reg * 4);
313 	(void)RREG32(address);
314 	WREG32(data, v);
315 	(void)RREG32(data);
316 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
317 }
318 
319 static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
320 {
321 	unsigned long flags, address, data;
322 	u32 r;
323 
324 	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
325 	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
326 
327 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
328 	WREG32(address, (reg));
329 	r = RREG32(data);
330 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
331 	return r;
332 }
333 
334 static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
335 {
336 	unsigned long flags, address, data;
337 
338 	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
339 	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
340 
341 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
342 	WREG32(address, (reg));
343 	WREG32(data, (v));
344 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
345 }
346 
347 static u32 nv_get_config_memsize(struct amdgpu_device *adev)
348 {
349 	return adev->nbio.funcs->get_memsize(adev);
350 }
351 
352 static u32 nv_get_xclk(struct amdgpu_device *adev)
353 {
354 	return adev->clock.spll.reference_freq;
355 }
356 
357 
358 void nv_grbm_select(struct amdgpu_device *adev,
359 		     u32 me, u32 pipe, u32 queue, u32 vmid)
360 {
361 	u32 grbm_gfx_cntl = 0;
362 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
363 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
364 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
365 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
366 
367 	WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
368 }
369 
370 static void nv_vga_set_state(struct amdgpu_device *adev, bool state)
371 {
372 	/* todo */
373 }
374 
375 static bool nv_read_disabled_bios(struct amdgpu_device *adev)
376 {
377 	/* todo */
378 	return false;
379 }
380 
381 static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
382 				  u8 *bios, u32 length_bytes)
383 {
384 	u32 *dw_ptr;
385 	u32 i, length_dw;
386 	u32 rom_index_offset, rom_data_offset;
387 
388 	if (bios == NULL)
389 		return false;
390 	if (length_bytes == 0)
391 		return false;
392 	/* APU vbios image is part of sbios image */
393 	if (adev->flags & AMD_IS_APU)
394 		return false;
395 
396 	dw_ptr = (u32 *)bios;
397 	length_dw = ALIGN(length_bytes, 4) / 4;
398 
399 	rom_index_offset =
400 		adev->smuio.funcs->get_rom_index_offset(adev);
401 	rom_data_offset =
402 		adev->smuio.funcs->get_rom_data_offset(adev);
403 
404 	/* set rom index to 0 */
405 	WREG32(rom_index_offset, 0);
406 	/* read out the rom data */
407 	for (i = 0; i < length_dw; i++)
408 		dw_ptr[i] = RREG32(rom_data_offset);
409 
410 	return true;
411 }
412 
413 static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
414 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
415 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
416 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
417 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
418 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
419 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
420 	{ SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
421 	{ SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
422 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
423 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
424 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
425 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
426 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
427 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
428 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
429 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
430 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
431 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
432 	{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
433 };
434 
435 static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
436 					 u32 sh_num, u32 reg_offset)
437 {
438 	uint32_t val;
439 
440 	mutex_lock(&adev->grbm_idx_mutex);
441 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
442 		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
443 
444 	val = RREG32(reg_offset);
445 
446 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
447 		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
448 	mutex_unlock(&adev->grbm_idx_mutex);
449 	return val;
450 }
451 
452 static uint32_t nv_get_register_value(struct amdgpu_device *adev,
453 				      bool indexed, u32 se_num,
454 				      u32 sh_num, u32 reg_offset)
455 {
456 	if (indexed) {
457 		return nv_read_indexed_register(adev, se_num, sh_num, reg_offset);
458 	} else {
459 		if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
460 			return adev->gfx.config.gb_addr_config;
461 		return RREG32(reg_offset);
462 	}
463 }
464 
465 static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
466 			    u32 sh_num, u32 reg_offset, u32 *value)
467 {
468 	uint32_t i;
469 	struct soc15_allowed_register_entry  *en;
470 
471 	*value = 0;
472 	for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
473 		en = &nv_allowed_read_registers[i];
474 		if ((i == 7 && (adev->sdma.num_instances == 1)) || /* some asics don't have SDMA1 */
475 		    reg_offset !=
476 		    (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset))
477 			continue;
478 
479 		*value = nv_get_register_value(adev,
480 					       nv_allowed_read_registers[i].grbm_indexed,
481 					       se_num, sh_num, reg_offset);
482 		return 0;
483 	}
484 	return -EINVAL;
485 }
486 
487 static int nv_asic_mode2_reset(struct amdgpu_device *adev)
488 {
489 	u32 i;
490 	int ret = 0;
491 
492 	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
493 
494 	/* disable BM */
495 	pci_clear_master(adev->pdev);
496 
497 	amdgpu_device_cache_pci_state(adev->pdev);
498 
499 	ret = amdgpu_dpm_mode2_reset(adev);
500 	if (ret)
501 		dev_err(adev->dev, "GPU mode2 reset failed\n");
502 
503 	amdgpu_device_load_pci_state(adev->pdev);
504 
505 	/* wait for asic to come out of reset */
506 	for (i = 0; i < adev->usec_timeout; i++) {
507 		u32 memsize = adev->nbio.funcs->get_memsize(adev);
508 
509 		if (memsize != 0xffffffff)
510 			break;
511 		udelay(1);
512 	}
513 
514 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
515 
516 	return ret;
517 }
518 
519 static bool nv_asic_supports_baco(struct amdgpu_device *adev)
520 {
521 	struct smu_context *smu = &adev->smu;
522 
523 	if (smu_baco_is_support(smu))
524 		return true;
525 	else
526 		return false;
527 }
528 
529 static enum amd_reset_method
530 nv_asic_reset_method(struct amdgpu_device *adev)
531 {
532 	struct smu_context *smu = &adev->smu;
533 
534 	if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
535 	    amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
536 	    amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
537 	    amdgpu_reset_method == AMD_RESET_METHOD_PCI)
538 		return amdgpu_reset_method;
539 
540 	if (amdgpu_reset_method != -1)
541 		dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
542 				  amdgpu_reset_method);
543 
544 	switch (adev->asic_type) {
545 	case CHIP_VANGOGH:
546 		return AMD_RESET_METHOD_MODE2;
547 	case CHIP_SIENNA_CICHLID:
548 	case CHIP_NAVY_FLOUNDER:
549 	case CHIP_DIMGREY_CAVEFISH:
550 		return AMD_RESET_METHOD_MODE1;
551 	default:
552 		if (smu_baco_is_support(smu))
553 			return AMD_RESET_METHOD_BACO;
554 		else
555 			return AMD_RESET_METHOD_MODE1;
556 	}
557 }
558 
559 static int nv_asic_reset(struct amdgpu_device *adev)
560 {
561 	int ret = 0;
562 	struct smu_context *smu = &adev->smu;
563 
564 	switch (nv_asic_reset_method(adev)) {
565 	case AMD_RESET_METHOD_PCI:
566 		dev_info(adev->dev, "PCI reset\n");
567 		ret = amdgpu_device_pci_reset(adev);
568 		break;
569 	case AMD_RESET_METHOD_BACO:
570 		dev_info(adev->dev, "BACO reset\n");
571 
572 		ret = smu_baco_enter(smu);
573 		if (ret)
574 			return ret;
575 		ret = smu_baco_exit(smu);
576 		if (ret)
577 			return ret;
578 		break;
579 	case AMD_RESET_METHOD_MODE2:
580 		dev_info(adev->dev, "MODE2 reset\n");
581 		ret = nv_asic_mode2_reset(adev);
582 		break;
583 	default:
584 		dev_info(adev->dev, "MODE1 reset\n");
585 		ret = amdgpu_device_mode1_reset(adev);
586 		break;
587 	}
588 
589 	return ret;
590 }
591 
592 static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
593 {
594 	/* todo */
595 	return 0;
596 }
597 
598 static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
599 {
600 	/* todo */
601 	return 0;
602 }
603 
604 static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
605 {
606 	if (pci_is_root_bus(adev->pdev->bus))
607 		return;
608 
609 	if (amdgpu_pcie_gen2 == 0)
610 		return;
611 
612 	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
613 					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
614 		return;
615 
616 	/* todo */
617 }
618 
619 static void nv_program_aspm(struct amdgpu_device *adev)
620 {
621 	if (amdgpu_aspm != 1)
622 		return;
623 
624 	if ((adev->asic_type >= CHIP_SIENNA_CICHLID) &&
625 	    !(adev->flags & AMD_IS_APU) &&
626 	    (adev->nbio.funcs->program_aspm))
627 		adev->nbio.funcs->program_aspm(adev);
628 
629 }
630 
631 static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
632 					bool enable)
633 {
634 	adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
635 	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
636 }
637 
638 static const struct amdgpu_ip_block_version nv_common_ip_block =
639 {
640 	.type = AMD_IP_BLOCK_TYPE_COMMON,
641 	.major = 1,
642 	.minor = 0,
643 	.rev = 0,
644 	.funcs = &nv_common_ip_funcs,
645 };
646 
647 static int nv_reg_base_init(struct amdgpu_device *adev)
648 {
649 	int r;
650 
651 	if (amdgpu_discovery) {
652 		r = amdgpu_discovery_reg_base_init(adev);
653 		if (r) {
654 			DRM_WARN("failed to init reg base from ip discovery table, "
655 					"fallback to legacy init method\n");
656 			goto legacy_init;
657 		}
658 
659 		return 0;
660 	}
661 
662 legacy_init:
663 	switch (adev->asic_type) {
664 	case CHIP_NAVI10:
665 		navi10_reg_base_init(adev);
666 		break;
667 	case CHIP_NAVI14:
668 		navi14_reg_base_init(adev);
669 		break;
670 	case CHIP_NAVI12:
671 		navi12_reg_base_init(adev);
672 		break;
673 	case CHIP_SIENNA_CICHLID:
674 	case CHIP_NAVY_FLOUNDER:
675 		sienna_cichlid_reg_base_init(adev);
676 		break;
677 	case CHIP_VANGOGH:
678 		vangogh_reg_base_init(adev);
679 		break;
680 	case CHIP_DIMGREY_CAVEFISH:
681 		dimgrey_cavefish_reg_base_init(adev);
682 		break;
683 	default:
684 		return -EINVAL;
685 	}
686 
687 	return 0;
688 }
689 
690 void nv_set_virt_ops(struct amdgpu_device *adev)
691 {
692 	adev->virt.ops = &xgpu_nv_virt_ops;
693 }
694 
695 static bool nv_is_headless_sku(struct pci_dev *pdev)
696 {
697 	if ((pdev->device == 0x731E &&
698 	    (pdev->revision == 0xC6 || pdev->revision == 0xC7)) ||
699 	    (pdev->device == 0x7340 && pdev->revision == 0xC9)  ||
700 	    (pdev->device == 0x7360 && pdev->revision == 0xC7))
701 		return true;
702 	return false;
703 }
704 
705 int nv_set_ip_blocks(struct amdgpu_device *adev)
706 {
707 	int r;
708 
709 	if (adev->flags & AMD_IS_APU) {
710 		adev->nbio.funcs = &nbio_v7_2_funcs;
711 		adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
712 	} else {
713 		adev->nbio.funcs = &nbio_v2_3_funcs;
714 		adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
715 	}
716 	adev->hdp.funcs = &hdp_v5_0_funcs;
717 
718 	if (adev->asic_type >= CHIP_SIENNA_CICHLID)
719 		adev->smuio.funcs = &smuio_v11_0_6_funcs;
720 	else
721 		adev->smuio.funcs = &smuio_v11_0_funcs;
722 
723 	if (adev->asic_type == CHIP_SIENNA_CICHLID)
724 		adev->gmc.xgmi.supported = true;
725 
726 	/* Set IP register base before any HW register access */
727 	r = nv_reg_base_init(adev);
728 	if (r)
729 		return r;
730 
731 	switch (adev->asic_type) {
732 	case CHIP_NAVI10:
733 	case CHIP_NAVI14:
734 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
735 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
736 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
737 		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
738 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
739 		    !amdgpu_sriov_vf(adev))
740 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
741 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
742 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
743 #if defined(CONFIG_DRM_AMD_DC)
744 		else if (amdgpu_device_has_dc_support(adev))
745 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
746 #endif
747 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
748 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
749 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
750 		    !amdgpu_sriov_vf(adev))
751 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
752 		if (!nv_is_headless_sku(adev->pdev))
753 			amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
754 		amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
755 		if (adev->enable_mes)
756 			amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
757 		break;
758 	case CHIP_NAVI12:
759 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
760 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
761 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
762 		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
763 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
764 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
765 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
766 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
767 #if defined(CONFIG_DRM_AMD_DC)
768 		else if (amdgpu_device_has_dc_support(adev))
769 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
770 #endif
771 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
772 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
773 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
774 		    !amdgpu_sriov_vf(adev))
775 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
776 		if (!nv_is_headless_sku(adev->pdev))
777 		        amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
778 		if (!amdgpu_sriov_vf(adev))
779 			amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
780 		break;
781 	case CHIP_SIENNA_CICHLID:
782 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
783 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
784 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
785 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
786 			amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
787 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
788 		    is_support_sw_smu(adev))
789 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
790 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
791 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
792 #if defined(CONFIG_DRM_AMD_DC)
793 		else if (amdgpu_device_has_dc_support(adev))
794 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
795 #endif
796 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
797 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
798 		amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
799 		if (!amdgpu_sriov_vf(adev))
800 			amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
801 
802 		if (adev->enable_mes)
803 			amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
804 		break;
805 	case CHIP_NAVY_FLOUNDER:
806 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
807 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
808 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
809 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
810 			amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
811 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
812 		    is_support_sw_smu(adev))
813 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
814 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
815 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
816 #if defined(CONFIG_DRM_AMD_DC)
817 		else if (amdgpu_device_has_dc_support(adev))
818 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
819 #endif
820 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
821 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
822 		amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
823 		amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
824 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
825 		    is_support_sw_smu(adev))
826 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
827 		break;
828 	case CHIP_VANGOGH:
829 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
830 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
831 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
832 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
833 			amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
834 		amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
835 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
836 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
837 #if defined(CONFIG_DRM_AMD_DC)
838 		else if (amdgpu_device_has_dc_support(adev))
839 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
840 #endif
841 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
842 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
843 		amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
844 		amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
845 		break;
846 	case CHIP_DIMGREY_CAVEFISH:
847 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
848 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
849 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
850 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
851 			amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
852 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
853 		    is_support_sw_smu(adev))
854 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
855 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
856 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
857 #if defined(CONFIG_DRM_AMD_DC)
858                 else if (amdgpu_device_has_dc_support(adev))
859                         amdgpu_device_ip_block_add(adev, &dm_ip_block);
860 #endif
861 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
862 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
863 		amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
864 		amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
865 		break;
866 	default:
867 		return -EINVAL;
868 	}
869 
870 	return 0;
871 }
872 
873 static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
874 {
875 	return adev->nbio.funcs->get_rev_id(adev);
876 }
877 
878 static bool nv_need_full_reset(struct amdgpu_device *adev)
879 {
880 	return true;
881 }
882 
883 static bool nv_need_reset_on_init(struct amdgpu_device *adev)
884 {
885 	u32 sol_reg;
886 
887 	if (adev->flags & AMD_IS_APU)
888 		return false;
889 
890 	/* Check sOS sign of life register to confirm sys driver and sOS
891 	 * are already been loaded.
892 	 */
893 	sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
894 	if (sol_reg)
895 		return true;
896 
897 	return false;
898 }
899 
900 static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev)
901 {
902 
903 	/* TODO
904 	 * dummy implement for pcie_replay_count sysfs interface
905 	 * */
906 
907 	return 0;
908 }
909 
910 static void nv_init_doorbell_index(struct amdgpu_device *adev)
911 {
912 	adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
913 	adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
914 	adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
915 	adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
916 	adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
917 	adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
918 	adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
919 	adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
920 	adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
921 	adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
922 	adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
923 	adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
924 	adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
925 	adev->doorbell_index.mes_ring = AMDGPU_NAVI10_DOORBELL_MES_RING;
926 	adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
927 	adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
928 	adev->doorbell_index.sdma_engine[2] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE2;
929 	adev->doorbell_index.sdma_engine[3] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE3;
930 	adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
931 	adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
932 	adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
933 	adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
934 	adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
935 	adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
936 	adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;
937 
938 	adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
939 	adev->doorbell_index.sdma_doorbell_range = 20;
940 }
941 
942 static void nv_pre_asic_init(struct amdgpu_device *adev)
943 {
944 }
945 
946 static int nv_update_umd_stable_pstate(struct amdgpu_device *adev,
947 				       bool enter)
948 {
949 	if (enter)
950 		amdgpu_gfx_rlc_enter_safe_mode(adev);
951 	else
952 		amdgpu_gfx_rlc_exit_safe_mode(adev);
953 
954 	if (adev->gfx.funcs->update_perfmon_mgcg)
955 		adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
956 
957 	/*
958 	 * The ASPM function is not fully enabled and verified on
959 	 * Navi yet. Temporarily skip this until ASPM enabled.
960 	 */
961 	if ((adev->asic_type >= CHIP_SIENNA_CICHLID) &&
962 	    !(adev->flags & AMD_IS_APU) &&
963 	    (adev->nbio.funcs->enable_aspm))
964 		adev->nbio.funcs->enable_aspm(adev, !enter);
965 
966 	return 0;
967 }
968 
969 static const struct amdgpu_asic_funcs nv_asic_funcs =
970 {
971 	.read_disabled_bios = &nv_read_disabled_bios,
972 	.read_bios_from_rom = &nv_read_bios_from_rom,
973 	.read_register = &nv_read_register,
974 	.reset = &nv_asic_reset,
975 	.reset_method = &nv_asic_reset_method,
976 	.set_vga_state = &nv_vga_set_state,
977 	.get_xclk = &nv_get_xclk,
978 	.set_uvd_clocks = &nv_set_uvd_clocks,
979 	.set_vce_clocks = &nv_set_vce_clocks,
980 	.get_config_memsize = &nv_get_config_memsize,
981 	.init_doorbell_index = &nv_init_doorbell_index,
982 	.need_full_reset = &nv_need_full_reset,
983 	.need_reset_on_init = &nv_need_reset_on_init,
984 	.get_pcie_replay_count = &nv_get_pcie_replay_count,
985 	.supports_baco = &nv_asic_supports_baco,
986 	.pre_asic_init = &nv_pre_asic_init,
987 	.update_umd_stable_pstate = &nv_update_umd_stable_pstate,
988 	.query_video_codecs = &nv_query_video_codecs,
989 };
990 
991 static int nv_common_early_init(void *handle)
992 {
993 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
994 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
995 
996 	adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
997 	adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
998 	adev->smc_rreg = NULL;
999 	adev->smc_wreg = NULL;
1000 	adev->pcie_rreg = &nv_pcie_rreg;
1001 	adev->pcie_wreg = &nv_pcie_wreg;
1002 	adev->pcie_rreg64 = &nv_pcie_rreg64;
1003 	adev->pcie_wreg64 = &nv_pcie_wreg64;
1004 	adev->pciep_rreg = &nv_pcie_port_rreg;
1005 	adev->pciep_wreg = &nv_pcie_port_wreg;
1006 
1007 	/* TODO: will add them during VCN v2 implementation */
1008 	adev->uvd_ctx_rreg = NULL;
1009 	adev->uvd_ctx_wreg = NULL;
1010 
1011 	adev->didt_rreg = &nv_didt_rreg;
1012 	adev->didt_wreg = &nv_didt_wreg;
1013 
1014 	adev->asic_funcs = &nv_asic_funcs;
1015 
1016 	adev->rev_id = nv_get_rev_id(adev);
1017 	adev->external_rev_id = 0xff;
1018 	switch (adev->asic_type) {
1019 	case CHIP_NAVI10:
1020 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1021 			AMD_CG_SUPPORT_GFX_CGCG |
1022 			AMD_CG_SUPPORT_IH_CG |
1023 			AMD_CG_SUPPORT_HDP_MGCG |
1024 			AMD_CG_SUPPORT_HDP_LS |
1025 			AMD_CG_SUPPORT_SDMA_MGCG |
1026 			AMD_CG_SUPPORT_SDMA_LS |
1027 			AMD_CG_SUPPORT_MC_MGCG |
1028 			AMD_CG_SUPPORT_MC_LS |
1029 			AMD_CG_SUPPORT_ATHUB_MGCG |
1030 			AMD_CG_SUPPORT_ATHUB_LS |
1031 			AMD_CG_SUPPORT_VCN_MGCG |
1032 			AMD_CG_SUPPORT_JPEG_MGCG |
1033 			AMD_CG_SUPPORT_BIF_MGCG |
1034 			AMD_CG_SUPPORT_BIF_LS;
1035 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
1036 			AMD_PG_SUPPORT_VCN_DPG |
1037 			AMD_PG_SUPPORT_JPEG |
1038 			AMD_PG_SUPPORT_ATHUB;
1039 		adev->external_rev_id = adev->rev_id + 0x1;
1040 		break;
1041 	case CHIP_NAVI14:
1042 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1043 			AMD_CG_SUPPORT_GFX_CGCG |
1044 			AMD_CG_SUPPORT_IH_CG |
1045 			AMD_CG_SUPPORT_HDP_MGCG |
1046 			AMD_CG_SUPPORT_HDP_LS |
1047 			AMD_CG_SUPPORT_SDMA_MGCG |
1048 			AMD_CG_SUPPORT_SDMA_LS |
1049 			AMD_CG_SUPPORT_MC_MGCG |
1050 			AMD_CG_SUPPORT_MC_LS |
1051 			AMD_CG_SUPPORT_ATHUB_MGCG |
1052 			AMD_CG_SUPPORT_ATHUB_LS |
1053 			AMD_CG_SUPPORT_VCN_MGCG |
1054 			AMD_CG_SUPPORT_JPEG_MGCG |
1055 			AMD_CG_SUPPORT_BIF_MGCG |
1056 			AMD_CG_SUPPORT_BIF_LS;
1057 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
1058 			AMD_PG_SUPPORT_JPEG |
1059 			AMD_PG_SUPPORT_VCN_DPG;
1060 		adev->external_rev_id = adev->rev_id + 20;
1061 		break;
1062 	case CHIP_NAVI12:
1063 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1064 			AMD_CG_SUPPORT_GFX_MGLS |
1065 			AMD_CG_SUPPORT_GFX_CGCG |
1066 			AMD_CG_SUPPORT_GFX_CP_LS |
1067 			AMD_CG_SUPPORT_GFX_RLC_LS |
1068 			AMD_CG_SUPPORT_IH_CG |
1069 			AMD_CG_SUPPORT_HDP_MGCG |
1070 			AMD_CG_SUPPORT_HDP_LS |
1071 			AMD_CG_SUPPORT_SDMA_MGCG |
1072 			AMD_CG_SUPPORT_SDMA_LS |
1073 			AMD_CG_SUPPORT_MC_MGCG |
1074 			AMD_CG_SUPPORT_MC_LS |
1075 			AMD_CG_SUPPORT_ATHUB_MGCG |
1076 			AMD_CG_SUPPORT_ATHUB_LS |
1077 			AMD_CG_SUPPORT_VCN_MGCG |
1078 			AMD_CG_SUPPORT_JPEG_MGCG;
1079 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
1080 			AMD_PG_SUPPORT_VCN_DPG |
1081 			AMD_PG_SUPPORT_JPEG |
1082 			AMD_PG_SUPPORT_ATHUB;
1083 		/* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
1084 		 * as a consequence, the rev_id and external_rev_id are wrong.
1085 		 * workaround it by hardcoding rev_id to 0 (default value).
1086 		 */
1087 		if (amdgpu_sriov_vf(adev))
1088 			adev->rev_id = 0;
1089 		adev->external_rev_id = adev->rev_id + 0xa;
1090 		break;
1091 	case CHIP_SIENNA_CICHLID:
1092 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1093 			AMD_CG_SUPPORT_GFX_CGCG |
1094 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1095 			AMD_CG_SUPPORT_MC_MGCG |
1096 			AMD_CG_SUPPORT_VCN_MGCG |
1097 			AMD_CG_SUPPORT_JPEG_MGCG |
1098 			AMD_CG_SUPPORT_HDP_MGCG |
1099 			AMD_CG_SUPPORT_HDP_LS |
1100 			AMD_CG_SUPPORT_IH_CG |
1101 			AMD_CG_SUPPORT_MC_LS;
1102 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
1103 			AMD_PG_SUPPORT_VCN_DPG |
1104 			AMD_PG_SUPPORT_JPEG |
1105 			AMD_PG_SUPPORT_ATHUB |
1106 			AMD_PG_SUPPORT_MMHUB;
1107 		if (amdgpu_sriov_vf(adev)) {
1108 			/* hypervisor control CG and PG enablement */
1109 			adev->cg_flags = 0;
1110 			adev->pg_flags = 0;
1111 		}
1112 		adev->external_rev_id = adev->rev_id + 0x28;
1113 		break;
1114 	case CHIP_NAVY_FLOUNDER:
1115 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1116 			AMD_CG_SUPPORT_GFX_CGCG |
1117 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1118 			AMD_CG_SUPPORT_VCN_MGCG |
1119 			AMD_CG_SUPPORT_JPEG_MGCG |
1120 			AMD_CG_SUPPORT_MC_MGCG |
1121 			AMD_CG_SUPPORT_MC_LS |
1122 			AMD_CG_SUPPORT_HDP_MGCG |
1123 			AMD_CG_SUPPORT_HDP_LS |
1124 			AMD_CG_SUPPORT_IH_CG;
1125 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
1126 			AMD_PG_SUPPORT_VCN_DPG |
1127 			AMD_PG_SUPPORT_JPEG |
1128 			AMD_PG_SUPPORT_ATHUB |
1129 			AMD_PG_SUPPORT_MMHUB;
1130 		adev->external_rev_id = adev->rev_id + 0x32;
1131 		break;
1132 
1133 	case CHIP_VANGOGH:
1134 		adev->apu_flags |= AMD_APU_IS_VANGOGH;
1135 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1136 			AMD_CG_SUPPORT_GFX_MGLS |
1137 			AMD_CG_SUPPORT_GFX_CP_LS |
1138 			AMD_CG_SUPPORT_GFX_RLC_LS |
1139 			AMD_CG_SUPPORT_GFX_CGCG |
1140 			AMD_CG_SUPPORT_GFX_CGLS |
1141 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1142 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1143 			AMD_CG_SUPPORT_MC_MGCG |
1144 			AMD_CG_SUPPORT_MC_LS |
1145 			AMD_CG_SUPPORT_GFX_FGCG |
1146 			AMD_CG_SUPPORT_VCN_MGCG |
1147 			AMD_CG_SUPPORT_JPEG_MGCG;
1148 		adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
1149 			AMD_PG_SUPPORT_VCN |
1150 			AMD_PG_SUPPORT_VCN_DPG |
1151 			AMD_PG_SUPPORT_JPEG;
1152 		if (adev->apu_flags & AMD_APU_IS_VANGOGH)
1153 			adev->external_rev_id = adev->rev_id + 0x01;
1154 		break;
1155 	case CHIP_DIMGREY_CAVEFISH:
1156 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1157 			AMD_CG_SUPPORT_GFX_CGCG |
1158 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1159 			AMD_CG_SUPPORT_VCN_MGCG |
1160 			AMD_CG_SUPPORT_JPEG_MGCG |
1161 			AMD_CG_SUPPORT_MC_MGCG |
1162 			AMD_CG_SUPPORT_MC_LS |
1163 			AMD_CG_SUPPORT_HDP_MGCG |
1164 			AMD_CG_SUPPORT_HDP_LS |
1165 			AMD_CG_SUPPORT_IH_CG;
1166 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
1167 			AMD_PG_SUPPORT_VCN_DPG |
1168 			AMD_PG_SUPPORT_JPEG |
1169 			AMD_PG_SUPPORT_ATHUB |
1170 			AMD_PG_SUPPORT_MMHUB;
1171 		adev->external_rev_id = adev->rev_id + 0x3c;
1172 		break;
1173 	default:
1174 		/* FIXME: not supported yet */
1175 		return -EINVAL;
1176 	}
1177 
1178 	if (amdgpu_sriov_vf(adev)) {
1179 		amdgpu_virt_init_setting(adev);
1180 		xgpu_nv_mailbox_set_irq_funcs(adev);
1181 	}
1182 
1183 	return 0;
1184 }
1185 
1186 static int nv_common_late_init(void *handle)
1187 {
1188 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1189 
1190 	if (amdgpu_sriov_vf(adev))
1191 		xgpu_nv_mailbox_get_irq(adev);
1192 
1193 	return 0;
1194 }
1195 
1196 static int nv_common_sw_init(void *handle)
1197 {
1198 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1199 
1200 	if (amdgpu_sriov_vf(adev))
1201 		xgpu_nv_mailbox_add_irq_id(adev);
1202 
1203 	return 0;
1204 }
1205 
1206 static int nv_common_sw_fini(void *handle)
1207 {
1208 	return 0;
1209 }
1210 
1211 static int nv_common_hw_init(void *handle)
1212 {
1213 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1214 
1215 	/* enable pcie gen2/3 link */
1216 	nv_pcie_gen3_enable(adev);
1217 	/* enable aspm */
1218 	nv_program_aspm(adev);
1219 	/* setup nbio registers */
1220 	adev->nbio.funcs->init_registers(adev);
1221 	/* remap HDP registers to a hole in mmio space,
1222 	 * for the purpose of expose those registers
1223 	 * to process space
1224 	 */
1225 	if (adev->nbio.funcs->remap_hdp_registers)
1226 		adev->nbio.funcs->remap_hdp_registers(adev);
1227 	/* enable the doorbell aperture */
1228 	nv_enable_doorbell_aperture(adev, true);
1229 
1230 	return 0;
1231 }
1232 
1233 static int nv_common_hw_fini(void *handle)
1234 {
1235 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1236 
1237 	/* disable the doorbell aperture */
1238 	nv_enable_doorbell_aperture(adev, false);
1239 
1240 	return 0;
1241 }
1242 
1243 static int nv_common_suspend(void *handle)
1244 {
1245 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1246 
1247 	return nv_common_hw_fini(adev);
1248 }
1249 
1250 static int nv_common_resume(void *handle)
1251 {
1252 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1253 
1254 	return nv_common_hw_init(adev);
1255 }
1256 
1257 static bool nv_common_is_idle(void *handle)
1258 {
1259 	return true;
1260 }
1261 
1262 static int nv_common_wait_for_idle(void *handle)
1263 {
1264 	return 0;
1265 }
1266 
1267 static int nv_common_soft_reset(void *handle)
1268 {
1269 	return 0;
1270 }
1271 
1272 static int nv_common_set_clockgating_state(void *handle,
1273 					   enum amd_clockgating_state state)
1274 {
1275 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1276 
1277 	if (amdgpu_sriov_vf(adev))
1278 		return 0;
1279 
1280 	switch (adev->asic_type) {
1281 	case CHIP_NAVI10:
1282 	case CHIP_NAVI14:
1283 	case CHIP_NAVI12:
1284 	case CHIP_SIENNA_CICHLID:
1285 	case CHIP_NAVY_FLOUNDER:
1286 	case CHIP_DIMGREY_CAVEFISH:
1287 		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1288 				state == AMD_CG_STATE_GATE);
1289 		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1290 				state == AMD_CG_STATE_GATE);
1291 		adev->hdp.funcs->update_clock_gating(adev,
1292 				state == AMD_CG_STATE_GATE);
1293 		adev->smuio.funcs->update_rom_clock_gating(adev,
1294 				state == AMD_CG_STATE_GATE);
1295 		break;
1296 	default:
1297 		break;
1298 	}
1299 	return 0;
1300 }
1301 
1302 static int nv_common_set_powergating_state(void *handle,
1303 					   enum amd_powergating_state state)
1304 {
1305 	/* TODO */
1306 	return 0;
1307 }
1308 
1309 static void nv_common_get_clockgating_state(void *handle, u32 *flags)
1310 {
1311 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1312 
1313 	if (amdgpu_sriov_vf(adev))
1314 		*flags = 0;
1315 
1316 	adev->nbio.funcs->get_clockgating_state(adev, flags);
1317 
1318 	adev->hdp.funcs->get_clock_gating_state(adev, flags);
1319 
1320 	adev->smuio.funcs->get_clock_gating_state(adev, flags);
1321 
1322 	return;
1323 }
1324 
1325 static const struct amd_ip_funcs nv_common_ip_funcs = {
1326 	.name = "nv_common",
1327 	.early_init = nv_common_early_init,
1328 	.late_init = nv_common_late_init,
1329 	.sw_init = nv_common_sw_init,
1330 	.sw_fini = nv_common_sw_fini,
1331 	.hw_init = nv_common_hw_init,
1332 	.hw_fini = nv_common_hw_fini,
1333 	.suspend = nv_common_suspend,
1334 	.resume = nv_common_resume,
1335 	.is_idle = nv_common_is_idle,
1336 	.wait_for_idle = nv_common_wait_for_idle,
1337 	.soft_reset = nv_common_soft_reset,
1338 	.set_clockgating_state = nv_common_set_clockgating_state,
1339 	.set_powergating_state = nv_common_set_powergating_state,
1340 	.get_clockgating_state = nv_common_get_clockgating_state,
1341 };
1342