xref: /linux/drivers/gpu/drm/amd/amdgpu/nv.c (revision 8c69d0298fb56f603e694cf0188e25b58dfe8b7e)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27 
28 #include <drm/amdgpu_drm.h>
29 
30 #include "amdgpu.h"
31 #include "amdgpu_atombios.h"
32 #include "amdgpu_ih.h"
33 #include "amdgpu_uvd.h"
34 #include "amdgpu_vce.h"
35 #include "amdgpu_ucode.h"
36 #include "amdgpu_psp.h"
37 #include "atom.h"
38 #include "amd_pcie.h"
39 
40 #include "gc/gc_10_1_0_offset.h"
41 #include "gc/gc_10_1_0_sh_mask.h"
42 #include "mp/mp_11_0_offset.h"
43 
44 #include "soc15.h"
45 #include "soc15_common.h"
46 #include "gmc_v10_0.h"
47 #include "gfxhub_v2_0.h"
48 #include "mmhub_v2_0.h"
49 #include "nbio_v2_3.h"
50 #include "nbio_v7_2.h"
51 #include "hdp_v5_0.h"
52 #include "nv.h"
53 #include "navi10_ih.h"
54 #include "gfx_v10_0.h"
55 #include "sdma_v5_0.h"
56 #include "sdma_v5_2.h"
57 #include "vcn_v2_0.h"
58 #include "jpeg_v2_0.h"
59 #include "vcn_v3_0.h"
60 #include "jpeg_v3_0.h"
61 #include "dce_virtual.h"
62 #include "mes_v10_1.h"
63 #include "mxgpu_nv.h"
64 #include "smuio_v11_0.h"
65 #include "smuio_v11_0_6.h"
66 
67 static const struct amd_ip_funcs nv_common_ip_funcs;
68 
69 /* Navi */
70 static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] =
71 {
72 	{
73 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
74 		.max_width = 4096,
75 		.max_height = 2304,
76 		.max_pixels_per_frame = 4096 * 2304,
77 		.max_level = 0,
78 	},
79 	{
80 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
81 		.max_width = 4096,
82 		.max_height = 2304,
83 		.max_pixels_per_frame = 4096 * 2304,
84 		.max_level = 0,
85 	},
86 };
87 
88 static const struct amdgpu_video_codecs nv_video_codecs_encode =
89 {
90 	.codec_count = ARRAY_SIZE(nv_video_codecs_encode_array),
91 	.codec_array = nv_video_codecs_encode_array,
92 };
93 
94 /* Navi1x */
95 static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] =
96 {
97 	{
98 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
99 		.max_width = 4096,
100 		.max_height = 4096,
101 		.max_pixels_per_frame = 4096 * 4096,
102 		.max_level = 3,
103 	},
104 	{
105 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
106 		.max_width = 4096,
107 		.max_height = 4096,
108 		.max_pixels_per_frame = 4096 * 4096,
109 		.max_level = 5,
110 	},
111 	{
112 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
113 		.max_width = 4096,
114 		.max_height = 4096,
115 		.max_pixels_per_frame = 4096 * 4096,
116 		.max_level = 52,
117 	},
118 	{
119 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
120 		.max_width = 4096,
121 		.max_height = 4096,
122 		.max_pixels_per_frame = 4096 * 4096,
123 		.max_level = 4,
124 	},
125 	{
126 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
127 		.max_width = 8192,
128 		.max_height = 4352,
129 		.max_pixels_per_frame = 8192 * 4352,
130 		.max_level = 186,
131 	},
132 	{
133 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
134 		.max_width = 4096,
135 		.max_height = 4096,
136 		.max_pixels_per_frame = 4096 * 4096,
137 		.max_level = 0,
138 	},
139 	{
140 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
141 		.max_width = 8192,
142 		.max_height = 4352,
143 		.max_pixels_per_frame = 8192 * 4352,
144 		.max_level = 0,
145 	},
146 };
147 
148 static const struct amdgpu_video_codecs nv_video_codecs_decode =
149 {
150 	.codec_count = ARRAY_SIZE(nv_video_codecs_decode_array),
151 	.codec_array = nv_video_codecs_decode_array,
152 };
153 
154 /* Sienna Cichlid */
155 static const struct amdgpu_video_codec_info sc_video_codecs_decode_array[] =
156 {
157 	{
158 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
159 		.max_width = 4096,
160 		.max_height = 4096,
161 		.max_pixels_per_frame = 4096 * 4096,
162 		.max_level = 3,
163 	},
164 	{
165 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
166 		.max_width = 4096,
167 		.max_height = 4096,
168 		.max_pixels_per_frame = 4096 * 4096,
169 		.max_level = 5,
170 	},
171 	{
172 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
173 		.max_width = 4096,
174 		.max_height = 4096,
175 		.max_pixels_per_frame = 4096 * 4096,
176 		.max_level = 52,
177 	},
178 	{
179 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
180 		.max_width = 4096,
181 		.max_height = 4096,
182 		.max_pixels_per_frame = 4096 * 4096,
183 		.max_level = 4,
184 	},
185 	{
186 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
187 		.max_width = 8192,
188 		.max_height = 4352,
189 		.max_pixels_per_frame = 8192 * 4352,
190 		.max_level = 186,
191 	},
192 	{
193 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
194 		.max_width = 4096,
195 		.max_height = 4096,
196 		.max_pixels_per_frame = 4096 * 4096,
197 		.max_level = 0,
198 	},
199 	{
200 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
201 		.max_width = 8192,
202 		.max_height = 4352,
203 		.max_pixels_per_frame = 8192 * 4352,
204 		.max_level = 0,
205 	},
206 	{
207 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1,
208 		.max_width = 8192,
209 		.max_height = 4352,
210 		.max_pixels_per_frame = 8192 * 4352,
211 		.max_level = 0,
212 	},
213 };
214 
215 static const struct amdgpu_video_codecs sc_video_codecs_decode =
216 {
217 	.codec_count = ARRAY_SIZE(sc_video_codecs_decode_array),
218 	.codec_array = sc_video_codecs_decode_array,
219 };
220 
221 /* SRIOV Sienna Cichlid, not const since data is controlled by host */
222 static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] =
223 {
224 	{
225 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
226 		.max_width = 4096,
227 		.max_height = 2304,
228 		.max_pixels_per_frame = 4096 * 2304,
229 		.max_level = 0,
230 	},
231 	{
232 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
233 		.max_width = 4096,
234 		.max_height = 2304,
235 		.max_pixels_per_frame = 4096 * 2304,
236 		.max_level = 0,
237 	},
238 };
239 
240 static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array[] =
241 {
242 	{
243 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
244 		.max_width = 4096,
245 		.max_height = 4096,
246 		.max_pixels_per_frame = 4096 * 4096,
247 		.max_level = 3,
248 	},
249 	{
250 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
251 		.max_width = 4096,
252 		.max_height = 4096,
253 		.max_pixels_per_frame = 4096 * 4096,
254 		.max_level = 5,
255 	},
256 	{
257 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
258 		.max_width = 4096,
259 		.max_height = 4096,
260 		.max_pixels_per_frame = 4096 * 4096,
261 		.max_level = 52,
262 	},
263 	{
264 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
265 		.max_width = 4096,
266 		.max_height = 4096,
267 		.max_pixels_per_frame = 4096 * 4096,
268 		.max_level = 4,
269 	},
270 	{
271 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
272 		.max_width = 8192,
273 		.max_height = 4352,
274 		.max_pixels_per_frame = 8192 * 4352,
275 		.max_level = 186,
276 	},
277 	{
278 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
279 		.max_width = 4096,
280 		.max_height = 4096,
281 		.max_pixels_per_frame = 4096 * 4096,
282 		.max_level = 0,
283 	},
284 	{
285 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
286 		.max_width = 8192,
287 		.max_height = 4352,
288 		.max_pixels_per_frame = 8192 * 4352,
289 		.max_level = 0,
290 	},
291 	{
292 		.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1,
293 		.max_width = 8192,
294 		.max_height = 4352,
295 		.max_pixels_per_frame = 8192 * 4352,
296 		.max_level = 0,
297 	},
298 };
299 
300 static struct amdgpu_video_codecs sriov_sc_video_codecs_encode =
301 {
302 	.codec_count = ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
303 	.codec_array = sriov_sc_video_codecs_encode_array,
304 };
305 
306 static struct amdgpu_video_codecs sriov_sc_video_codecs_decode =
307 {
308 	.codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array),
309 	.codec_array = sriov_sc_video_codecs_decode_array,
310 };
311 
312 static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
313 				 const struct amdgpu_video_codecs **codecs)
314 {
315 	switch (adev->asic_type) {
316 	case CHIP_SIENNA_CICHLID:
317 		if (amdgpu_sriov_vf(adev)) {
318 			if (encode)
319 				*codecs = &sriov_sc_video_codecs_encode;
320 			else
321 				*codecs = &sriov_sc_video_codecs_decode;
322 		} else {
323 			if (encode)
324 				*codecs = &nv_video_codecs_encode;
325 			else
326 				*codecs = &sc_video_codecs_decode;
327 		}
328 		return 0;
329 	case CHIP_NAVY_FLOUNDER:
330 	case CHIP_DIMGREY_CAVEFISH:
331 	case CHIP_VANGOGH:
332 		if (encode)
333 			*codecs = &nv_video_codecs_encode;
334 		else
335 			*codecs = &sc_video_codecs_decode;
336 		return 0;
337 	case CHIP_NAVI10:
338 	case CHIP_NAVI14:
339 	case CHIP_NAVI12:
340 		if (encode)
341 			*codecs = &nv_video_codecs_encode;
342 		else
343 			*codecs = &nv_video_codecs_decode;
344 		return 0;
345 	default:
346 		return -EINVAL;
347 	}
348 }
349 
350 /*
351  * Indirect registers accessor
352  */
353 static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
354 {
355 	unsigned long address, data;
356 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
357 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
358 
359 	return amdgpu_device_indirect_rreg(adev, address, data, reg);
360 }
361 
362 static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
363 {
364 	unsigned long address, data;
365 
366 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
367 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
368 
369 	amdgpu_device_indirect_wreg(adev, address, data, reg, v);
370 }
371 
372 static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
373 {
374 	unsigned long address, data;
375 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
376 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
377 
378 	return amdgpu_device_indirect_rreg64(adev, address, data, reg);
379 }
380 
381 static u32 nv_pcie_port_rreg(struct amdgpu_device *adev, u32 reg)
382 {
383 	unsigned long flags, address, data;
384 	u32 r;
385 	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
386 	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
387 
388 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
389 	WREG32(address, reg * 4);
390 	(void)RREG32(address);
391 	r = RREG32(data);
392 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
393 	return r;
394 }
395 
396 static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
397 {
398 	unsigned long address, data;
399 
400 	address = adev->nbio.funcs->get_pcie_index_offset(adev);
401 	data = adev->nbio.funcs->get_pcie_data_offset(adev);
402 
403 	amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
404 }
405 
406 static void nv_pcie_port_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
407 {
408 	unsigned long flags, address, data;
409 
410 	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
411 	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
412 
413 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
414 	WREG32(address, reg * 4);
415 	(void)RREG32(address);
416 	WREG32(data, v);
417 	(void)RREG32(data);
418 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
419 }
420 
421 static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
422 {
423 	unsigned long flags, address, data;
424 	u32 r;
425 
426 	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
427 	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
428 
429 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
430 	WREG32(address, (reg));
431 	r = RREG32(data);
432 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
433 	return r;
434 }
435 
436 static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
437 {
438 	unsigned long flags, address, data;
439 
440 	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
441 	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
442 
443 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
444 	WREG32(address, (reg));
445 	WREG32(data, (v));
446 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
447 }
448 
449 static u32 nv_get_config_memsize(struct amdgpu_device *adev)
450 {
451 	return adev->nbio.funcs->get_memsize(adev);
452 }
453 
454 static u32 nv_get_xclk(struct amdgpu_device *adev)
455 {
456 	return adev->clock.spll.reference_freq;
457 }
458 
459 
460 void nv_grbm_select(struct amdgpu_device *adev,
461 		     u32 me, u32 pipe, u32 queue, u32 vmid)
462 {
463 	u32 grbm_gfx_cntl = 0;
464 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
465 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
466 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
467 	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
468 
469 	WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
470 }
471 
472 static void nv_vga_set_state(struct amdgpu_device *adev, bool state)
473 {
474 	/* todo */
475 }
476 
477 static bool nv_read_disabled_bios(struct amdgpu_device *adev)
478 {
479 	/* todo */
480 	return false;
481 }
482 
483 static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
484 				  u8 *bios, u32 length_bytes)
485 {
486 	u32 *dw_ptr;
487 	u32 i, length_dw;
488 	u32 rom_index_offset, rom_data_offset;
489 
490 	if (bios == NULL)
491 		return false;
492 	if (length_bytes == 0)
493 		return false;
494 	/* APU vbios image is part of sbios image */
495 	if (adev->flags & AMD_IS_APU)
496 		return false;
497 
498 	dw_ptr = (u32 *)bios;
499 	length_dw = ALIGN(length_bytes, 4) / 4;
500 
501 	rom_index_offset =
502 		adev->smuio.funcs->get_rom_index_offset(adev);
503 	rom_data_offset =
504 		adev->smuio.funcs->get_rom_data_offset(adev);
505 
506 	/* set rom index to 0 */
507 	WREG32(rom_index_offset, 0);
508 	/* read out the rom data */
509 	for (i = 0; i < length_dw; i++)
510 		dw_ptr[i] = RREG32(rom_data_offset);
511 
512 	return true;
513 }
514 
515 static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
516 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
517 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
518 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
519 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
520 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
521 	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
522 	{ SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
523 	{ SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
524 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
525 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
526 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
527 	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
528 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
529 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
530 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
531 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
532 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
533 	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
534 	{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
535 };
536 
537 static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
538 					 u32 sh_num, u32 reg_offset)
539 {
540 	uint32_t val;
541 
542 	mutex_lock(&adev->grbm_idx_mutex);
543 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
544 		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
545 
546 	val = RREG32(reg_offset);
547 
548 	if (se_num != 0xffffffff || sh_num != 0xffffffff)
549 		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
550 	mutex_unlock(&adev->grbm_idx_mutex);
551 	return val;
552 }
553 
554 static uint32_t nv_get_register_value(struct amdgpu_device *adev,
555 				      bool indexed, u32 se_num,
556 				      u32 sh_num, u32 reg_offset)
557 {
558 	if (indexed) {
559 		return nv_read_indexed_register(adev, se_num, sh_num, reg_offset);
560 	} else {
561 		if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
562 			return adev->gfx.config.gb_addr_config;
563 		return RREG32(reg_offset);
564 	}
565 }
566 
567 static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
568 			    u32 sh_num, u32 reg_offset, u32 *value)
569 {
570 	uint32_t i;
571 	struct soc15_allowed_register_entry  *en;
572 
573 	*value = 0;
574 	for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
575 		en = &nv_allowed_read_registers[i];
576 		if ((i == 7 && (adev->sdma.num_instances == 1)) || /* some asics don't have SDMA1 */
577 		    reg_offset !=
578 		    (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset))
579 			continue;
580 
581 		*value = nv_get_register_value(adev,
582 					       nv_allowed_read_registers[i].grbm_indexed,
583 					       se_num, sh_num, reg_offset);
584 		return 0;
585 	}
586 	return -EINVAL;
587 }
588 
589 static int nv_asic_mode2_reset(struct amdgpu_device *adev)
590 {
591 	u32 i;
592 	int ret = 0;
593 
594 	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
595 
596 	/* disable BM */
597 	pci_clear_master(adev->pdev);
598 
599 	amdgpu_device_cache_pci_state(adev->pdev);
600 
601 	ret = amdgpu_dpm_mode2_reset(adev);
602 	if (ret)
603 		dev_err(adev->dev, "GPU mode2 reset failed\n");
604 
605 	amdgpu_device_load_pci_state(adev->pdev);
606 
607 	/* wait for asic to come out of reset */
608 	for (i = 0; i < adev->usec_timeout; i++) {
609 		u32 memsize = adev->nbio.funcs->get_memsize(adev);
610 
611 		if (memsize != 0xffffffff)
612 			break;
613 		udelay(1);
614 	}
615 
616 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
617 
618 	return ret;
619 }
620 
621 static enum amd_reset_method
622 nv_asic_reset_method(struct amdgpu_device *adev)
623 {
624 	if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
625 	    amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
626 	    amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
627 	    amdgpu_reset_method == AMD_RESET_METHOD_PCI)
628 		return amdgpu_reset_method;
629 
630 	if (amdgpu_reset_method != -1)
631 		dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
632 				  amdgpu_reset_method);
633 
634 	switch (adev->asic_type) {
635 	case CHIP_VANGOGH:
636 		return AMD_RESET_METHOD_MODE2;
637 	case CHIP_SIENNA_CICHLID:
638 	case CHIP_NAVY_FLOUNDER:
639 	case CHIP_DIMGREY_CAVEFISH:
640 	case CHIP_BEIGE_GOBY:
641 		return AMD_RESET_METHOD_MODE1;
642 	default:
643 		if (amdgpu_dpm_is_baco_supported(adev))
644 			return AMD_RESET_METHOD_BACO;
645 		else
646 			return AMD_RESET_METHOD_MODE1;
647 	}
648 }
649 
650 static int nv_asic_reset(struct amdgpu_device *adev)
651 {
652 	int ret = 0;
653 
654 	switch (nv_asic_reset_method(adev)) {
655 	case AMD_RESET_METHOD_PCI:
656 		dev_info(adev->dev, "PCI reset\n");
657 		ret = amdgpu_device_pci_reset(adev);
658 		break;
659 	case AMD_RESET_METHOD_BACO:
660 		dev_info(adev->dev, "BACO reset\n");
661 		ret = amdgpu_dpm_baco_reset(adev);
662 		break;
663 	case AMD_RESET_METHOD_MODE2:
664 		dev_info(adev->dev, "MODE2 reset\n");
665 		ret = nv_asic_mode2_reset(adev);
666 		break;
667 	default:
668 		dev_info(adev->dev, "MODE1 reset\n");
669 		ret = amdgpu_device_mode1_reset(adev);
670 		break;
671 	}
672 
673 	return ret;
674 }
675 
676 static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
677 {
678 	/* todo */
679 	return 0;
680 }
681 
682 static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
683 {
684 	/* todo */
685 	return 0;
686 }
687 
688 static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
689 {
690 	if (pci_is_root_bus(adev->pdev->bus))
691 		return;
692 
693 	if (amdgpu_pcie_gen2 == 0)
694 		return;
695 
696 	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
697 					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
698 		return;
699 
700 	/* todo */
701 }
702 
703 static void nv_program_aspm(struct amdgpu_device *adev)
704 {
705 	if (!amdgpu_aspm)
706 		return;
707 
708 	if (!(adev->flags & AMD_IS_APU) &&
709 	    (adev->nbio.funcs->program_aspm))
710 		adev->nbio.funcs->program_aspm(adev);
711 
712 }
713 
714 static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
715 					bool enable)
716 {
717 	adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
718 	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
719 }
720 
721 static const struct amdgpu_ip_block_version nv_common_ip_block =
722 {
723 	.type = AMD_IP_BLOCK_TYPE_COMMON,
724 	.major = 1,
725 	.minor = 0,
726 	.rev = 0,
727 	.funcs = &nv_common_ip_funcs,
728 };
729 
730 static bool nv_is_headless_sku(struct pci_dev *pdev)
731 {
732 	if ((pdev->device == 0x731E &&
733 	    (pdev->revision == 0xC6 || pdev->revision == 0xC7)) ||
734 	    (pdev->device == 0x7340 && pdev->revision == 0xC9)  ||
735 	    (pdev->device == 0x7360 && pdev->revision == 0xC7))
736 		return true;
737 	return false;
738 }
739 
740 static int nv_reg_base_init(struct amdgpu_device *adev)
741 {
742 	int r;
743 
744 	if (amdgpu_discovery) {
745 		r = amdgpu_discovery_reg_base_init(adev);
746 		if (r) {
747 			DRM_WARN("failed to init reg base from ip discovery table, "
748 					"fallback to legacy init method\n");
749 			goto legacy_init;
750 		}
751 
752 		amdgpu_discovery_harvest_ip(adev);
753 		if (nv_is_headless_sku(adev->pdev)) {
754 			adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
755 			adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
756 		}
757 
758 		return 0;
759 	}
760 
761 legacy_init:
762 	switch (adev->asic_type) {
763 	case CHIP_NAVI10:
764 		navi10_reg_base_init(adev);
765 		break;
766 	case CHIP_NAVI14:
767 		navi14_reg_base_init(adev);
768 		break;
769 	case CHIP_NAVI12:
770 		navi12_reg_base_init(adev);
771 		break;
772 	case CHIP_SIENNA_CICHLID:
773 	case CHIP_NAVY_FLOUNDER:
774 		sienna_cichlid_reg_base_init(adev);
775 		break;
776 	case CHIP_VANGOGH:
777 		vangogh_reg_base_init(adev);
778 		break;
779 	case CHIP_DIMGREY_CAVEFISH:
780 		dimgrey_cavefish_reg_base_init(adev);
781 		break;
782 	case CHIP_BEIGE_GOBY:
783 		beige_goby_reg_base_init(adev);
784 		break;
785 	default:
786 		return -EINVAL;
787 	}
788 
789 	return 0;
790 }
791 
792 void nv_set_virt_ops(struct amdgpu_device *adev)
793 {
794 	adev->virt.ops = &xgpu_nv_virt_ops;
795 }
796 
797 int nv_set_ip_blocks(struct amdgpu_device *adev)
798 {
799 	int r;
800 
801 	if (adev->flags & AMD_IS_APU) {
802 		adev->nbio.funcs = &nbio_v7_2_funcs;
803 		adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
804 	} else {
805 		adev->nbio.funcs = &nbio_v2_3_funcs;
806 		adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
807 	}
808 	adev->hdp.funcs = &hdp_v5_0_funcs;
809 
810 	if (adev->asic_type >= CHIP_SIENNA_CICHLID)
811 		adev->smuio.funcs = &smuio_v11_0_6_funcs;
812 	else
813 		adev->smuio.funcs = &smuio_v11_0_funcs;
814 
815 	if (adev->asic_type == CHIP_SIENNA_CICHLID)
816 		adev->gmc.xgmi.supported = true;
817 
818 	/* Set IP register base before any HW register access */
819 	r = nv_reg_base_init(adev);
820 	if (r)
821 		return r;
822 
823 	switch (adev->asic_type) {
824 	case CHIP_NAVI10:
825 	case CHIP_NAVI14:
826 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
827 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
828 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
829 		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
830 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
831 		    !amdgpu_sriov_vf(adev))
832 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
833 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
834 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
835 #if defined(CONFIG_DRM_AMD_DC)
836 		else if (amdgpu_device_has_dc_support(adev))
837 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
838 #endif
839 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
840 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
841 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
842 		    !amdgpu_sriov_vf(adev))
843 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
844 		amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
845 		amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
846 		if (adev->enable_mes)
847 			amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
848 		break;
849 	case CHIP_NAVI12:
850 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
851 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
852 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
853 		amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
854 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
855 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
856 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
857 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
858 #if defined(CONFIG_DRM_AMD_DC)
859 		else if (amdgpu_device_has_dc_support(adev))
860 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
861 #endif
862 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
863 		amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
864 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
865 		    !amdgpu_sriov_vf(adev))
866 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
867 		amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
868 		if (!amdgpu_sriov_vf(adev))
869 			amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
870 		break;
871 	case CHIP_SIENNA_CICHLID:
872 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
873 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
874 		if (!amdgpu_sriov_vf(adev)) {
875 			amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
876 			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
877 				amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
878 		} else {
879 			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
880 				amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
881 			amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
882 		}
883 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
884 		    is_support_sw_smu(adev))
885 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
886 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
887 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
888 #if defined(CONFIG_DRM_AMD_DC)
889 		else if (amdgpu_device_has_dc_support(adev))
890 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
891 #endif
892 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
893 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
894 		amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
895 		if (!amdgpu_sriov_vf(adev))
896 			amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
897 		if (adev->enable_mes)
898 			amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
899 		break;
900 	case CHIP_NAVY_FLOUNDER:
901 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
902 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
903 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
904 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
905 			amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
906 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
907 		    is_support_sw_smu(adev))
908 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
909 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
910 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
911 #if defined(CONFIG_DRM_AMD_DC)
912 		else if (amdgpu_device_has_dc_support(adev))
913 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
914 #endif
915 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
916 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
917 		amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
918 		amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
919 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
920 		    is_support_sw_smu(adev))
921 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
922 		break;
923 	case CHIP_VANGOGH:
924 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
925 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
926 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
927 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
928 			amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
929 		amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
930 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
931 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
932 #if defined(CONFIG_DRM_AMD_DC)
933 		else if (amdgpu_device_has_dc_support(adev))
934 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
935 #endif
936 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
937 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
938 		amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
939 		amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
940 		break;
941 	case CHIP_DIMGREY_CAVEFISH:
942 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
943 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
944 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
945 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
946 			amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
947 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
948 		    is_support_sw_smu(adev))
949 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
950 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
951 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
952 #if defined(CONFIG_DRM_AMD_DC)
953                 else if (amdgpu_device_has_dc_support(adev))
954                         amdgpu_device_ip_block_add(adev, &dm_ip_block);
955 #endif
956 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
957 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
958 		amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
959 		amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
960 		break;
961 	case CHIP_BEIGE_GOBY:
962 		amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
963 		amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
964 		amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
965 		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
966 			amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
967 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
968 		    is_support_sw_smu(adev))
969 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
970 		amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
971 		amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
972 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
973 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
974 #if defined(CONFIG_DRM_AMD_DC)
975 		else if (amdgpu_device_has_dc_support(adev))
976 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
977 #endif
978 		if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
979 		    is_support_sw_smu(adev))
980 			amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
981 		amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
982 		break;
983 	default:
984 		return -EINVAL;
985 	}
986 
987 	return 0;
988 }
989 
990 static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
991 {
992 	return adev->nbio.funcs->get_rev_id(adev);
993 }
994 
995 static bool nv_need_full_reset(struct amdgpu_device *adev)
996 {
997 	return true;
998 }
999 
1000 static bool nv_need_reset_on_init(struct amdgpu_device *adev)
1001 {
1002 	u32 sol_reg;
1003 
1004 	if (adev->flags & AMD_IS_APU)
1005 		return false;
1006 
1007 	/* Check sOS sign of life register to confirm sys driver and sOS
1008 	 * are already been loaded.
1009 	 */
1010 	sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
1011 	if (sol_reg)
1012 		return true;
1013 
1014 	return false;
1015 }
1016 
1017 static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev)
1018 {
1019 
1020 	/* TODO
1021 	 * dummy implement for pcie_replay_count sysfs interface
1022 	 * */
1023 
1024 	return 0;
1025 }
1026 
1027 static void nv_init_doorbell_index(struct amdgpu_device *adev)
1028 {
1029 	adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
1030 	adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
1031 	adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
1032 	adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
1033 	adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
1034 	adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
1035 	adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
1036 	adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
1037 	adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
1038 	adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
1039 	adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
1040 	adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
1041 	adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
1042 	adev->doorbell_index.mes_ring = AMDGPU_NAVI10_DOORBELL_MES_RING;
1043 	adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
1044 	adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
1045 	adev->doorbell_index.sdma_engine[2] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE2;
1046 	adev->doorbell_index.sdma_engine[3] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE3;
1047 	adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
1048 	adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
1049 	adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
1050 	adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
1051 	adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
1052 	adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
1053 	adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;
1054 
1055 	adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
1056 	adev->doorbell_index.sdma_doorbell_range = 20;
1057 }
1058 
1059 static void nv_pre_asic_init(struct amdgpu_device *adev)
1060 {
1061 }
1062 
1063 static int nv_update_umd_stable_pstate(struct amdgpu_device *adev,
1064 				       bool enter)
1065 {
1066 	if (enter)
1067 		amdgpu_gfx_rlc_enter_safe_mode(adev);
1068 	else
1069 		amdgpu_gfx_rlc_exit_safe_mode(adev);
1070 
1071 	if (adev->gfx.funcs->update_perfmon_mgcg)
1072 		adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
1073 
1074 	if (!(adev->flags & AMD_IS_APU) &&
1075 	    (adev->nbio.funcs->enable_aspm))
1076 		adev->nbio.funcs->enable_aspm(adev, !enter);
1077 
1078 	return 0;
1079 }
1080 
1081 static const struct amdgpu_asic_funcs nv_asic_funcs =
1082 {
1083 	.read_disabled_bios = &nv_read_disabled_bios,
1084 	.read_bios_from_rom = &nv_read_bios_from_rom,
1085 	.read_register = &nv_read_register,
1086 	.reset = &nv_asic_reset,
1087 	.reset_method = &nv_asic_reset_method,
1088 	.set_vga_state = &nv_vga_set_state,
1089 	.get_xclk = &nv_get_xclk,
1090 	.set_uvd_clocks = &nv_set_uvd_clocks,
1091 	.set_vce_clocks = &nv_set_vce_clocks,
1092 	.get_config_memsize = &nv_get_config_memsize,
1093 	.init_doorbell_index = &nv_init_doorbell_index,
1094 	.need_full_reset = &nv_need_full_reset,
1095 	.need_reset_on_init = &nv_need_reset_on_init,
1096 	.get_pcie_replay_count = &nv_get_pcie_replay_count,
1097 	.supports_baco = &amdgpu_dpm_is_baco_supported,
1098 	.pre_asic_init = &nv_pre_asic_init,
1099 	.update_umd_stable_pstate = &nv_update_umd_stable_pstate,
1100 	.query_video_codecs = &nv_query_video_codecs,
1101 };
1102 
1103 static int nv_common_early_init(void *handle)
1104 {
1105 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
1106 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1107 
1108 	adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
1109 	adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
1110 	adev->smc_rreg = NULL;
1111 	adev->smc_wreg = NULL;
1112 	adev->pcie_rreg = &nv_pcie_rreg;
1113 	adev->pcie_wreg = &nv_pcie_wreg;
1114 	adev->pcie_rreg64 = &nv_pcie_rreg64;
1115 	adev->pcie_wreg64 = &nv_pcie_wreg64;
1116 	adev->pciep_rreg = &nv_pcie_port_rreg;
1117 	adev->pciep_wreg = &nv_pcie_port_wreg;
1118 
1119 	/* TODO: will add them during VCN v2 implementation */
1120 	adev->uvd_ctx_rreg = NULL;
1121 	adev->uvd_ctx_wreg = NULL;
1122 
1123 	adev->didt_rreg = &nv_didt_rreg;
1124 	adev->didt_wreg = &nv_didt_wreg;
1125 
1126 	adev->asic_funcs = &nv_asic_funcs;
1127 
1128 	adev->rev_id = nv_get_rev_id(adev);
1129 	adev->external_rev_id = 0xff;
1130 	switch (adev->asic_type) {
1131 	case CHIP_NAVI10:
1132 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1133 			AMD_CG_SUPPORT_GFX_CGCG |
1134 			AMD_CG_SUPPORT_IH_CG |
1135 			AMD_CG_SUPPORT_HDP_MGCG |
1136 			AMD_CG_SUPPORT_HDP_LS |
1137 			AMD_CG_SUPPORT_SDMA_MGCG |
1138 			AMD_CG_SUPPORT_SDMA_LS |
1139 			AMD_CG_SUPPORT_MC_MGCG |
1140 			AMD_CG_SUPPORT_MC_LS |
1141 			AMD_CG_SUPPORT_ATHUB_MGCG |
1142 			AMD_CG_SUPPORT_ATHUB_LS |
1143 			AMD_CG_SUPPORT_VCN_MGCG |
1144 			AMD_CG_SUPPORT_JPEG_MGCG |
1145 			AMD_CG_SUPPORT_BIF_MGCG |
1146 			AMD_CG_SUPPORT_BIF_LS;
1147 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
1148 			AMD_PG_SUPPORT_VCN_DPG |
1149 			AMD_PG_SUPPORT_JPEG |
1150 			AMD_PG_SUPPORT_ATHUB;
1151 		adev->external_rev_id = adev->rev_id + 0x1;
1152 		break;
1153 	case CHIP_NAVI14:
1154 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1155 			AMD_CG_SUPPORT_GFX_CGCG |
1156 			AMD_CG_SUPPORT_IH_CG |
1157 			AMD_CG_SUPPORT_HDP_MGCG |
1158 			AMD_CG_SUPPORT_HDP_LS |
1159 			AMD_CG_SUPPORT_SDMA_MGCG |
1160 			AMD_CG_SUPPORT_SDMA_LS |
1161 			AMD_CG_SUPPORT_MC_MGCG |
1162 			AMD_CG_SUPPORT_MC_LS |
1163 			AMD_CG_SUPPORT_ATHUB_MGCG |
1164 			AMD_CG_SUPPORT_ATHUB_LS |
1165 			AMD_CG_SUPPORT_VCN_MGCG |
1166 			AMD_CG_SUPPORT_JPEG_MGCG |
1167 			AMD_CG_SUPPORT_BIF_MGCG |
1168 			AMD_CG_SUPPORT_BIF_LS;
1169 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
1170 			AMD_PG_SUPPORT_JPEG |
1171 			AMD_PG_SUPPORT_VCN_DPG;
1172 		adev->external_rev_id = adev->rev_id + 20;
1173 		break;
1174 	case CHIP_NAVI12:
1175 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1176 			AMD_CG_SUPPORT_GFX_MGLS |
1177 			AMD_CG_SUPPORT_GFX_CGCG |
1178 			AMD_CG_SUPPORT_GFX_CP_LS |
1179 			AMD_CG_SUPPORT_GFX_RLC_LS |
1180 			AMD_CG_SUPPORT_IH_CG |
1181 			AMD_CG_SUPPORT_HDP_MGCG |
1182 			AMD_CG_SUPPORT_HDP_LS |
1183 			AMD_CG_SUPPORT_SDMA_MGCG |
1184 			AMD_CG_SUPPORT_SDMA_LS |
1185 			AMD_CG_SUPPORT_MC_MGCG |
1186 			AMD_CG_SUPPORT_MC_LS |
1187 			AMD_CG_SUPPORT_ATHUB_MGCG |
1188 			AMD_CG_SUPPORT_ATHUB_LS |
1189 			AMD_CG_SUPPORT_VCN_MGCG |
1190 			AMD_CG_SUPPORT_JPEG_MGCG;
1191 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
1192 			AMD_PG_SUPPORT_VCN_DPG |
1193 			AMD_PG_SUPPORT_JPEG |
1194 			AMD_PG_SUPPORT_ATHUB;
1195 		/* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
1196 		 * as a consequence, the rev_id and external_rev_id are wrong.
1197 		 * workaround it by hardcoding rev_id to 0 (default value).
1198 		 */
1199 		if (amdgpu_sriov_vf(adev))
1200 			adev->rev_id = 0;
1201 		adev->external_rev_id = adev->rev_id + 0xa;
1202 		break;
1203 	case CHIP_SIENNA_CICHLID:
1204 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1205 			AMD_CG_SUPPORT_GFX_CGCG |
1206 			AMD_CG_SUPPORT_GFX_CGLS |
1207 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1208 			AMD_CG_SUPPORT_MC_MGCG |
1209 			AMD_CG_SUPPORT_VCN_MGCG |
1210 			AMD_CG_SUPPORT_JPEG_MGCG |
1211 			AMD_CG_SUPPORT_HDP_MGCG |
1212 			AMD_CG_SUPPORT_HDP_LS |
1213 			AMD_CG_SUPPORT_IH_CG |
1214 			AMD_CG_SUPPORT_MC_LS;
1215 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
1216 			AMD_PG_SUPPORT_VCN_DPG |
1217 			AMD_PG_SUPPORT_JPEG |
1218 			AMD_PG_SUPPORT_ATHUB |
1219 			AMD_PG_SUPPORT_MMHUB;
1220 		if (amdgpu_sriov_vf(adev)) {
1221 			/* hypervisor control CG and PG enablement */
1222 			adev->cg_flags = 0;
1223 			adev->pg_flags = 0;
1224 		}
1225 		adev->external_rev_id = adev->rev_id + 0x28;
1226 		break;
1227 	case CHIP_NAVY_FLOUNDER:
1228 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1229 			AMD_CG_SUPPORT_GFX_CGCG |
1230 			AMD_CG_SUPPORT_GFX_CGLS |
1231 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1232 			AMD_CG_SUPPORT_VCN_MGCG |
1233 			AMD_CG_SUPPORT_JPEG_MGCG |
1234 			AMD_CG_SUPPORT_MC_MGCG |
1235 			AMD_CG_SUPPORT_MC_LS |
1236 			AMD_CG_SUPPORT_HDP_MGCG |
1237 			AMD_CG_SUPPORT_HDP_LS |
1238 			AMD_CG_SUPPORT_IH_CG;
1239 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
1240 			AMD_PG_SUPPORT_VCN_DPG |
1241 			AMD_PG_SUPPORT_JPEG |
1242 			AMD_PG_SUPPORT_ATHUB |
1243 			AMD_PG_SUPPORT_MMHUB;
1244 		adev->external_rev_id = adev->rev_id + 0x32;
1245 		break;
1246 
1247 	case CHIP_VANGOGH:
1248 		adev->apu_flags |= AMD_APU_IS_VANGOGH;
1249 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1250 			AMD_CG_SUPPORT_GFX_MGLS |
1251 			AMD_CG_SUPPORT_GFX_CP_LS |
1252 			AMD_CG_SUPPORT_GFX_RLC_LS |
1253 			AMD_CG_SUPPORT_GFX_CGCG |
1254 			AMD_CG_SUPPORT_GFX_CGLS |
1255 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1256 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1257 			AMD_CG_SUPPORT_MC_MGCG |
1258 			AMD_CG_SUPPORT_MC_LS |
1259 			AMD_CG_SUPPORT_GFX_FGCG |
1260 			AMD_CG_SUPPORT_VCN_MGCG |
1261 			AMD_CG_SUPPORT_SDMA_MGCG |
1262 			AMD_CG_SUPPORT_SDMA_LS |
1263 			AMD_CG_SUPPORT_JPEG_MGCG;
1264 		adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
1265 			AMD_PG_SUPPORT_VCN |
1266 			AMD_PG_SUPPORT_VCN_DPG |
1267 			AMD_PG_SUPPORT_JPEG;
1268 		if (adev->apu_flags & AMD_APU_IS_VANGOGH)
1269 			adev->external_rev_id = adev->rev_id + 0x01;
1270 		break;
1271 	case CHIP_DIMGREY_CAVEFISH:
1272 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1273 			AMD_CG_SUPPORT_GFX_CGCG |
1274 			AMD_CG_SUPPORT_GFX_CGLS |
1275 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1276 			AMD_CG_SUPPORT_VCN_MGCG |
1277 			AMD_CG_SUPPORT_JPEG_MGCG |
1278 			AMD_CG_SUPPORT_MC_MGCG |
1279 			AMD_CG_SUPPORT_MC_LS |
1280 			AMD_CG_SUPPORT_HDP_MGCG |
1281 			AMD_CG_SUPPORT_HDP_LS |
1282 			AMD_CG_SUPPORT_IH_CG;
1283 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
1284 			AMD_PG_SUPPORT_VCN_DPG |
1285 			AMD_PG_SUPPORT_JPEG |
1286 			AMD_PG_SUPPORT_ATHUB |
1287 			AMD_PG_SUPPORT_MMHUB;
1288 		adev->external_rev_id = adev->rev_id + 0x3c;
1289 		break;
1290 	case CHIP_BEIGE_GOBY:
1291 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1292 			AMD_CG_SUPPORT_GFX_CGCG |
1293 			AMD_CG_SUPPORT_GFX_CGLS |
1294 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1295 			AMD_CG_SUPPORT_MC_MGCG |
1296 			AMD_CG_SUPPORT_MC_LS |
1297 			AMD_CG_SUPPORT_HDP_MGCG |
1298 			AMD_CG_SUPPORT_HDP_LS |
1299 			AMD_CG_SUPPORT_IH_CG |
1300 			AMD_CG_SUPPORT_VCN_MGCG;
1301 		adev->pg_flags = AMD_PG_SUPPORT_VCN |
1302 			AMD_PG_SUPPORT_VCN_DPG |
1303 			AMD_PG_SUPPORT_ATHUB |
1304 			AMD_PG_SUPPORT_MMHUB;
1305 		adev->external_rev_id = adev->rev_id + 0x46;
1306 		break;
1307 	default:
1308 		/* FIXME: not supported yet */
1309 		return -EINVAL;
1310 	}
1311 
1312 	if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1313 		adev->pg_flags &= ~(AMD_PG_SUPPORT_VCN |
1314 				    AMD_PG_SUPPORT_VCN_DPG |
1315 				    AMD_PG_SUPPORT_JPEG);
1316 
1317 	if (amdgpu_sriov_vf(adev)) {
1318 		amdgpu_virt_init_setting(adev);
1319 		xgpu_nv_mailbox_set_irq_funcs(adev);
1320 	}
1321 
1322 	return 0;
1323 }
1324 
1325 static int nv_common_late_init(void *handle)
1326 {
1327 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1328 
1329 	if (amdgpu_sriov_vf(adev)) {
1330 		xgpu_nv_mailbox_get_irq(adev);
1331 		amdgpu_virt_update_sriov_video_codec(adev,
1332 				sriov_sc_video_codecs_encode_array, ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
1333 				sriov_sc_video_codecs_decode_array, ARRAY_SIZE(sriov_sc_video_codecs_decode_array));
1334 	}
1335 
1336 	return 0;
1337 }
1338 
1339 static int nv_common_sw_init(void *handle)
1340 {
1341 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1342 
1343 	if (amdgpu_sriov_vf(adev))
1344 		xgpu_nv_mailbox_add_irq_id(adev);
1345 
1346 	return 0;
1347 }
1348 
1349 static int nv_common_sw_fini(void *handle)
1350 {
1351 	return 0;
1352 }
1353 
1354 static int nv_common_hw_init(void *handle)
1355 {
1356 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1357 
1358 	/* enable pcie gen2/3 link */
1359 	nv_pcie_gen3_enable(adev);
1360 	/* enable aspm */
1361 	nv_program_aspm(adev);
1362 	/* setup nbio registers */
1363 	adev->nbio.funcs->init_registers(adev);
1364 	/* remap HDP registers to a hole in mmio space,
1365 	 * for the purpose of expose those registers
1366 	 * to process space
1367 	 */
1368 	if (adev->nbio.funcs->remap_hdp_registers)
1369 		adev->nbio.funcs->remap_hdp_registers(adev);
1370 	/* enable the doorbell aperture */
1371 	nv_enable_doorbell_aperture(adev, true);
1372 
1373 	return 0;
1374 }
1375 
1376 static int nv_common_hw_fini(void *handle)
1377 {
1378 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1379 
1380 	/* disable the doorbell aperture */
1381 	nv_enable_doorbell_aperture(adev, false);
1382 
1383 	return 0;
1384 }
1385 
1386 static int nv_common_suspend(void *handle)
1387 {
1388 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1389 
1390 	return nv_common_hw_fini(adev);
1391 }
1392 
1393 static int nv_common_resume(void *handle)
1394 {
1395 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1396 
1397 	return nv_common_hw_init(adev);
1398 }
1399 
1400 static bool nv_common_is_idle(void *handle)
1401 {
1402 	return true;
1403 }
1404 
1405 static int nv_common_wait_for_idle(void *handle)
1406 {
1407 	return 0;
1408 }
1409 
1410 static int nv_common_soft_reset(void *handle)
1411 {
1412 	return 0;
1413 }
1414 
1415 static int nv_common_set_clockgating_state(void *handle,
1416 					   enum amd_clockgating_state state)
1417 {
1418 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1419 
1420 	if (amdgpu_sriov_vf(adev))
1421 		return 0;
1422 
1423 	switch (adev->asic_type) {
1424 	case CHIP_NAVI10:
1425 	case CHIP_NAVI14:
1426 	case CHIP_NAVI12:
1427 	case CHIP_SIENNA_CICHLID:
1428 	case CHIP_NAVY_FLOUNDER:
1429 	case CHIP_DIMGREY_CAVEFISH:
1430 	case CHIP_BEIGE_GOBY:
1431 		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1432 				state == AMD_CG_STATE_GATE);
1433 		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1434 				state == AMD_CG_STATE_GATE);
1435 		adev->hdp.funcs->update_clock_gating(adev,
1436 				state == AMD_CG_STATE_GATE);
1437 		adev->smuio.funcs->update_rom_clock_gating(adev,
1438 				state == AMD_CG_STATE_GATE);
1439 		break;
1440 	default:
1441 		break;
1442 	}
1443 	return 0;
1444 }
1445 
1446 static int nv_common_set_powergating_state(void *handle,
1447 					   enum amd_powergating_state state)
1448 {
1449 	/* TODO */
1450 	return 0;
1451 }
1452 
1453 static void nv_common_get_clockgating_state(void *handle, u32 *flags)
1454 {
1455 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1456 
1457 	if (amdgpu_sriov_vf(adev))
1458 		*flags = 0;
1459 
1460 	adev->nbio.funcs->get_clockgating_state(adev, flags);
1461 
1462 	adev->hdp.funcs->get_clock_gating_state(adev, flags);
1463 
1464 	adev->smuio.funcs->get_clock_gating_state(adev, flags);
1465 
1466 	return;
1467 }
1468 
1469 static const struct amd_ip_funcs nv_common_ip_funcs = {
1470 	.name = "nv_common",
1471 	.early_init = nv_common_early_init,
1472 	.late_init = nv_common_late_init,
1473 	.sw_init = nv_common_sw_init,
1474 	.sw_fini = nv_common_sw_fini,
1475 	.hw_init = nv_common_hw_init,
1476 	.hw_fini = nv_common_hw_fini,
1477 	.suspend = nv_common_suspend,
1478 	.resume = nv_common_resume,
1479 	.is_idle = nv_common_is_idle,
1480 	.wait_for_idle = nv_common_wait_for_idle,
1481 	.soft_reset = nv_common_soft_reset,
1482 	.set_clockgating_state = nv_common_set_clockgating_state,
1483 	.set_powergating_state = nv_common_set_powergating_state,
1484 	.get_clockgating_state = nv_common_get_clockgating_state,
1485 };
1486