xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c (revision 624e0d7f39cb5849016c2093e4ea620842e0cf8a)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
34 #include <linux/pci.h>
35 #include <linux/pci-p2pdma.h>
36 #include <linux/apple-gmux.h>
37 
38 #include <drm/drm_aperture.h>
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_crtc_helper.h>
41 #include <drm/drm_fb_helper.h>
42 #include <drm/drm_probe_helper.h>
43 #include <drm/amdgpu_drm.h>
44 #include <linux/device.h>
45 #include <linux/vgaarb.h>
46 #include <linux/vga_switcheroo.h>
47 #include <linux/efi.h>
48 #include "amdgpu.h"
49 #include "amdgpu_trace.h"
50 #include "amdgpu_i2c.h"
51 #include "atom.h"
52 #include "amdgpu_atombios.h"
53 #include "amdgpu_atomfirmware.h"
54 #include "amd_pcie.h"
55 #ifdef CONFIG_DRM_AMDGPU_SI
56 #include "si.h"
57 #endif
58 #ifdef CONFIG_DRM_AMDGPU_CIK
59 #include "cik.h"
60 #endif
61 #include "vi.h"
62 #include "soc15.h"
63 #include "nv.h"
64 #include "bif/bif_4_1_d.h"
65 #include <linux/firmware.h>
66 #include "amdgpu_vf_error.h"
67 
68 #include "amdgpu_amdkfd.h"
69 #include "amdgpu_pm.h"
70 
71 #include "amdgpu_xgmi.h"
72 #include "amdgpu_ras.h"
73 #include "amdgpu_pmu.h"
74 #include "amdgpu_fru_eeprom.h"
75 #include "amdgpu_reset.h"
76 #include "amdgpu_virt.h"
77 
78 #include <linux/suspend.h>
79 #include <drm/task_barrier.h>
80 #include <linux/pm_runtime.h>
81 
82 #include <drm/drm_drv.h>
83 
84 #if IS_ENABLED(CONFIG_X86)
85 #include <asm/intel-family.h>
86 #endif
87 
88 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
89 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
90 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
91 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
92 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
93 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
94 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
95 
96 #define AMDGPU_RESUME_MS		2000
97 #define AMDGPU_MAX_RETRY_LIMIT		2
98 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
99 #define AMDGPU_PCIE_INDEX_FALLBACK (0x38 >> 2)
100 #define AMDGPU_PCIE_INDEX_HI_FALLBACK (0x44 >> 2)
101 #define AMDGPU_PCIE_DATA_FALLBACK (0x3C >> 2)
102 
103 static const struct drm_driver amdgpu_kms_driver;
104 
105 const char *amdgpu_asic_name[] = {
106 	"TAHITI",
107 	"PITCAIRN",
108 	"VERDE",
109 	"OLAND",
110 	"HAINAN",
111 	"BONAIRE",
112 	"KAVERI",
113 	"KABINI",
114 	"HAWAII",
115 	"MULLINS",
116 	"TOPAZ",
117 	"TONGA",
118 	"FIJI",
119 	"CARRIZO",
120 	"STONEY",
121 	"POLARIS10",
122 	"POLARIS11",
123 	"POLARIS12",
124 	"VEGAM",
125 	"VEGA10",
126 	"VEGA12",
127 	"VEGA20",
128 	"RAVEN",
129 	"ARCTURUS",
130 	"RENOIR",
131 	"ALDEBARAN",
132 	"NAVI10",
133 	"CYAN_SKILLFISH",
134 	"NAVI14",
135 	"NAVI12",
136 	"SIENNA_CICHLID",
137 	"NAVY_FLOUNDER",
138 	"VANGOGH",
139 	"DIMGREY_CAVEFISH",
140 	"BEIGE_GOBY",
141 	"YELLOW_CARP",
142 	"IP DISCOVERY",
143 	"LAST",
144 };
145 
146 /**
147  * DOC: pcie_replay_count
148  *
149  * The amdgpu driver provides a sysfs API for reporting the total number
150  * of PCIe replays (NAKs)
151  * The file pcie_replay_count is used for this and returns the total
152  * number of replays as a sum of the NAKs generated and NAKs received
153  */
154 
155 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
156 		struct device_attribute *attr, char *buf)
157 {
158 	struct drm_device *ddev = dev_get_drvdata(dev);
159 	struct amdgpu_device *adev = drm_to_adev(ddev);
160 	uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
161 
162 	return sysfs_emit(buf, "%llu\n", cnt);
163 }
164 
165 static DEVICE_ATTR(pcie_replay_count, 0444,
166 		amdgpu_device_get_pcie_replay_count, NULL);
167 
168 static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj,
169 					  struct bin_attribute *attr, char *buf,
170 					  loff_t ppos, size_t count)
171 {
172 	struct device *dev = kobj_to_dev(kobj);
173 	struct drm_device *ddev = dev_get_drvdata(dev);
174 	struct amdgpu_device *adev = drm_to_adev(ddev);
175 	ssize_t bytes_read;
176 
177 	switch (ppos) {
178 	case AMDGPU_SYS_REG_STATE_XGMI:
179 		bytes_read = amdgpu_asic_get_reg_state(
180 			adev, AMDGPU_REG_STATE_TYPE_XGMI, buf, count);
181 		break;
182 	case AMDGPU_SYS_REG_STATE_WAFL:
183 		bytes_read = amdgpu_asic_get_reg_state(
184 			adev, AMDGPU_REG_STATE_TYPE_WAFL, buf, count);
185 		break;
186 	case AMDGPU_SYS_REG_STATE_PCIE:
187 		bytes_read = amdgpu_asic_get_reg_state(
188 			adev, AMDGPU_REG_STATE_TYPE_PCIE, buf, count);
189 		break;
190 	case AMDGPU_SYS_REG_STATE_USR:
191 		bytes_read = amdgpu_asic_get_reg_state(
192 			adev, AMDGPU_REG_STATE_TYPE_USR, buf, count);
193 		break;
194 	case AMDGPU_SYS_REG_STATE_USR_1:
195 		bytes_read = amdgpu_asic_get_reg_state(
196 			adev, AMDGPU_REG_STATE_TYPE_USR_1, buf, count);
197 		break;
198 	default:
199 		return -EINVAL;
200 	}
201 
202 	return bytes_read;
203 }
204 
205 BIN_ATTR(reg_state, 0444, amdgpu_sysfs_reg_state_get, NULL,
206 	 AMDGPU_SYS_REG_STATE_END);
207 
208 int amdgpu_reg_state_sysfs_init(struct amdgpu_device *adev)
209 {
210 	int ret;
211 
212 	if (!amdgpu_asic_get_reg_state_supported(adev))
213 		return 0;
214 
215 	ret = sysfs_create_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
216 
217 	return ret;
218 }
219 
220 void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev)
221 {
222 	if (!amdgpu_asic_get_reg_state_supported(adev))
223 		return;
224 	sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
225 }
226 
227 /**
228  * DOC: board_info
229  *
230  * The amdgpu driver provides a sysfs API for giving board related information.
231  * It provides the form factor information in the format
232  *
233  *   type : form factor
234  *
235  * Possible form factor values
236  *
237  * - "cem"		- PCIE CEM card
238  * - "oam"		- Open Compute Accelerator Module
239  * - "unknown"	- Not known
240  *
241  */
242 
243 static ssize_t amdgpu_device_get_board_info(struct device *dev,
244 					    struct device_attribute *attr,
245 					    char *buf)
246 {
247 	struct drm_device *ddev = dev_get_drvdata(dev);
248 	struct amdgpu_device *adev = drm_to_adev(ddev);
249 	enum amdgpu_pkg_type pkg_type = AMDGPU_PKG_TYPE_CEM;
250 	const char *pkg;
251 
252 	if (adev->smuio.funcs && adev->smuio.funcs->get_pkg_type)
253 		pkg_type = adev->smuio.funcs->get_pkg_type(adev);
254 
255 	switch (pkg_type) {
256 	case AMDGPU_PKG_TYPE_CEM:
257 		pkg = "cem";
258 		break;
259 	case AMDGPU_PKG_TYPE_OAM:
260 		pkg = "oam";
261 		break;
262 	default:
263 		pkg = "unknown";
264 		break;
265 	}
266 
267 	return sysfs_emit(buf, "%s : %s\n", "type", pkg);
268 }
269 
270 static DEVICE_ATTR(board_info, 0444, amdgpu_device_get_board_info, NULL);
271 
272 static struct attribute *amdgpu_board_attrs[] = {
273 	&dev_attr_board_info.attr,
274 	NULL,
275 };
276 
277 static umode_t amdgpu_board_attrs_is_visible(struct kobject *kobj,
278 					     struct attribute *attr, int n)
279 {
280 	struct device *dev = kobj_to_dev(kobj);
281 	struct drm_device *ddev = dev_get_drvdata(dev);
282 	struct amdgpu_device *adev = drm_to_adev(ddev);
283 
284 	if (adev->flags & AMD_IS_APU)
285 		return 0;
286 
287 	return attr->mode;
288 }
289 
290 static const struct attribute_group amdgpu_board_attrs_group = {
291 	.attrs = amdgpu_board_attrs,
292 	.is_visible = amdgpu_board_attrs_is_visible
293 };
294 
295 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
296 
297 
298 /**
299  * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
300  *
301  * @dev: drm_device pointer
302  *
303  * Returns true if the device is a dGPU with ATPX power control,
304  * otherwise return false.
305  */
306 bool amdgpu_device_supports_px(struct drm_device *dev)
307 {
308 	struct amdgpu_device *adev = drm_to_adev(dev);
309 
310 	if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
311 		return true;
312 	return false;
313 }
314 
315 /**
316  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
317  *
318  * @dev: drm_device pointer
319  *
320  * Returns true if the device is a dGPU with ACPI power control,
321  * otherwise return false.
322  */
323 bool amdgpu_device_supports_boco(struct drm_device *dev)
324 {
325 	struct amdgpu_device *adev = drm_to_adev(dev);
326 
327 	if (adev->has_pr3 ||
328 	    ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
329 		return true;
330 	return false;
331 }
332 
333 /**
334  * amdgpu_device_supports_baco - Does the device support BACO
335  *
336  * @dev: drm_device pointer
337  *
338  * Returns true if the device supporte BACO,
339  * otherwise return false.
340  */
341 bool amdgpu_device_supports_baco(struct drm_device *dev)
342 {
343 	struct amdgpu_device *adev = drm_to_adev(dev);
344 
345 	return amdgpu_asic_supports_baco(adev);
346 }
347 
348 /**
349  * amdgpu_device_supports_smart_shift - Is the device dGPU with
350  * smart shift support
351  *
352  * @dev: drm_device pointer
353  *
354  * Returns true if the device is a dGPU with Smart Shift support,
355  * otherwise returns false.
356  */
357 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
358 {
359 	return (amdgpu_device_supports_boco(dev) &&
360 		amdgpu_acpi_is_power_shift_control_supported());
361 }
362 
363 /*
364  * VRAM access helper functions
365  */
366 
367 /**
368  * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
369  *
370  * @adev: amdgpu_device pointer
371  * @pos: offset of the buffer in vram
372  * @buf: virtual address of the buffer in system memory
373  * @size: read/write size, sizeof(@buf) must > @size
374  * @write: true - write to vram, otherwise - read from vram
375  */
376 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
377 			     void *buf, size_t size, bool write)
378 {
379 	unsigned long flags;
380 	uint32_t hi = ~0, tmp = 0;
381 	uint32_t *data = buf;
382 	uint64_t last;
383 	int idx;
384 
385 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
386 		return;
387 
388 	BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
389 
390 	spin_lock_irqsave(&adev->mmio_idx_lock, flags);
391 	for (last = pos + size; pos < last; pos += 4) {
392 		tmp = pos >> 31;
393 
394 		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
395 		if (tmp != hi) {
396 			WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
397 			hi = tmp;
398 		}
399 		if (write)
400 			WREG32_NO_KIQ(mmMM_DATA, *data++);
401 		else
402 			*data++ = RREG32_NO_KIQ(mmMM_DATA);
403 	}
404 
405 	spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
406 	drm_dev_exit(idx);
407 }
408 
409 /**
410  * amdgpu_device_aper_access - access vram by vram aperature
411  *
412  * @adev: amdgpu_device pointer
413  * @pos: offset of the buffer in vram
414  * @buf: virtual address of the buffer in system memory
415  * @size: read/write size, sizeof(@buf) must > @size
416  * @write: true - write to vram, otherwise - read from vram
417  *
418  * The return value means how many bytes have been transferred.
419  */
420 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
421 				 void *buf, size_t size, bool write)
422 {
423 #ifdef CONFIG_64BIT
424 	void __iomem *addr;
425 	size_t count = 0;
426 	uint64_t last;
427 
428 	if (!adev->mman.aper_base_kaddr)
429 		return 0;
430 
431 	last = min(pos + size, adev->gmc.visible_vram_size);
432 	if (last > pos) {
433 		addr = adev->mman.aper_base_kaddr + pos;
434 		count = last - pos;
435 
436 		if (write) {
437 			memcpy_toio(addr, buf, count);
438 			/* Make sure HDP write cache flush happens without any reordering
439 			 * after the system memory contents are sent over PCIe device
440 			 */
441 			mb();
442 			amdgpu_device_flush_hdp(adev, NULL);
443 		} else {
444 			amdgpu_device_invalidate_hdp(adev, NULL);
445 			/* Make sure HDP read cache is invalidated before issuing a read
446 			 * to the PCIe device
447 			 */
448 			mb();
449 			memcpy_fromio(buf, addr, count);
450 		}
451 
452 	}
453 
454 	return count;
455 #else
456 	return 0;
457 #endif
458 }
459 
460 /**
461  * amdgpu_device_vram_access - read/write a buffer in vram
462  *
463  * @adev: amdgpu_device pointer
464  * @pos: offset of the buffer in vram
465  * @buf: virtual address of the buffer in system memory
466  * @size: read/write size, sizeof(@buf) must > @size
467  * @write: true - write to vram, otherwise - read from vram
468  */
469 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
470 			       void *buf, size_t size, bool write)
471 {
472 	size_t count;
473 
474 	/* try to using vram apreature to access vram first */
475 	count = amdgpu_device_aper_access(adev, pos, buf, size, write);
476 	size -= count;
477 	if (size) {
478 		/* using MM to access rest vram */
479 		pos += count;
480 		buf += count;
481 		amdgpu_device_mm_access(adev, pos, buf, size, write);
482 	}
483 }
484 
485 /*
486  * register access helper functions.
487  */
488 
489 /* Check if hw access should be skipped because of hotplug or device error */
490 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
491 {
492 	if (adev->no_hw_access)
493 		return true;
494 
495 #ifdef CONFIG_LOCKDEP
496 	/*
497 	 * This is a bit complicated to understand, so worth a comment. What we assert
498 	 * here is that the GPU reset is not running on another thread in parallel.
499 	 *
500 	 * For this we trylock the read side of the reset semaphore, if that succeeds
501 	 * we know that the reset is not running in paralell.
502 	 *
503 	 * If the trylock fails we assert that we are either already holding the read
504 	 * side of the lock or are the reset thread itself and hold the write side of
505 	 * the lock.
506 	 */
507 	if (in_task()) {
508 		if (down_read_trylock(&adev->reset_domain->sem))
509 			up_read(&adev->reset_domain->sem);
510 		else
511 			lockdep_assert_held(&adev->reset_domain->sem);
512 	}
513 #endif
514 	return false;
515 }
516 
517 /**
518  * amdgpu_device_rreg - read a memory mapped IO or indirect register
519  *
520  * @adev: amdgpu_device pointer
521  * @reg: dword aligned register offset
522  * @acc_flags: access flags which require special behavior
523  *
524  * Returns the 32 bit value from the offset specified.
525  */
526 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
527 			    uint32_t reg, uint32_t acc_flags)
528 {
529 	uint32_t ret;
530 
531 	if (amdgpu_device_skip_hw_access(adev))
532 		return 0;
533 
534 	if ((reg * 4) < adev->rmmio_size) {
535 		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
536 		    amdgpu_sriov_runtime(adev) &&
537 		    down_read_trylock(&adev->reset_domain->sem)) {
538 			ret = amdgpu_kiq_rreg(adev, reg, 0);
539 			up_read(&adev->reset_domain->sem);
540 		} else {
541 			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
542 		}
543 	} else {
544 		ret = adev->pcie_rreg(adev, reg * 4);
545 	}
546 
547 	trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
548 
549 	return ret;
550 }
551 
552 /*
553  * MMIO register read with bytes helper functions
554  * @offset:bytes offset from MMIO start
555  */
556 
557 /**
558  * amdgpu_mm_rreg8 - read a memory mapped IO register
559  *
560  * @adev: amdgpu_device pointer
561  * @offset: byte aligned register offset
562  *
563  * Returns the 8 bit value from the offset specified.
564  */
565 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
566 {
567 	if (amdgpu_device_skip_hw_access(adev))
568 		return 0;
569 
570 	if (offset < adev->rmmio_size)
571 		return (readb(adev->rmmio + offset));
572 	BUG();
573 }
574 
575 
576 /**
577  * amdgpu_device_xcc_rreg - read a memory mapped IO or indirect register with specific XCC
578  *
579  * @adev: amdgpu_device pointer
580  * @reg: dword aligned register offset
581  * @acc_flags: access flags which require special behavior
582  * @xcc_id: xcc accelerated compute core id
583  *
584  * Returns the 32 bit value from the offset specified.
585  */
586 uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev,
587 				uint32_t reg, uint32_t acc_flags,
588 				uint32_t xcc_id)
589 {
590 	uint32_t ret, rlcg_flag;
591 
592 	if (amdgpu_device_skip_hw_access(adev))
593 		return 0;
594 
595 	if ((reg * 4) < adev->rmmio_size) {
596 		if (amdgpu_sriov_vf(adev) &&
597 		    !amdgpu_sriov_runtime(adev) &&
598 		    adev->gfx.rlc.rlcg_reg_access_supported &&
599 		    amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
600 							 GC_HWIP, false,
601 							 &rlcg_flag)) {
602 			ret = amdgpu_virt_rlcg_reg_rw(adev, reg, 0, rlcg_flag, xcc_id);
603 		} else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
604 		    amdgpu_sriov_runtime(adev) &&
605 		    down_read_trylock(&adev->reset_domain->sem)) {
606 			ret = amdgpu_kiq_rreg(adev, reg, xcc_id);
607 			up_read(&adev->reset_domain->sem);
608 		} else {
609 			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
610 		}
611 	} else {
612 		ret = adev->pcie_rreg(adev, reg * 4);
613 	}
614 
615 	return ret;
616 }
617 
618 /*
619  * MMIO register write with bytes helper functions
620  * @offset:bytes offset from MMIO start
621  * @value: the value want to be written to the register
622  */
623 
624 /**
625  * amdgpu_mm_wreg8 - read a memory mapped IO register
626  *
627  * @adev: amdgpu_device pointer
628  * @offset: byte aligned register offset
629  * @value: 8 bit value to write
630  *
631  * Writes the value specified to the offset specified.
632  */
633 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
634 {
635 	if (amdgpu_device_skip_hw_access(adev))
636 		return;
637 
638 	if (offset < adev->rmmio_size)
639 		writeb(value, adev->rmmio + offset);
640 	else
641 		BUG();
642 }
643 
644 /**
645  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
646  *
647  * @adev: amdgpu_device pointer
648  * @reg: dword aligned register offset
649  * @v: 32 bit value to write to the register
650  * @acc_flags: access flags which require special behavior
651  *
652  * Writes the value specified to the offset specified.
653  */
654 void amdgpu_device_wreg(struct amdgpu_device *adev,
655 			uint32_t reg, uint32_t v,
656 			uint32_t acc_flags)
657 {
658 	if (amdgpu_device_skip_hw_access(adev))
659 		return;
660 
661 	if ((reg * 4) < adev->rmmio_size) {
662 		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
663 		    amdgpu_sriov_runtime(adev) &&
664 		    down_read_trylock(&adev->reset_domain->sem)) {
665 			amdgpu_kiq_wreg(adev, reg, v, 0);
666 			up_read(&adev->reset_domain->sem);
667 		} else {
668 			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
669 		}
670 	} else {
671 		adev->pcie_wreg(adev, reg * 4, v);
672 	}
673 
674 	trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
675 }
676 
677 /**
678  * amdgpu_mm_wreg_mmio_rlc -  write register either with direct/indirect mmio or with RLC path if in range
679  *
680  * @adev: amdgpu_device pointer
681  * @reg: mmio/rlc register
682  * @v: value to write
683  * @xcc_id: xcc accelerated compute core id
684  *
685  * this function is invoked only for the debugfs register access
686  */
687 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
688 			     uint32_t reg, uint32_t v,
689 			     uint32_t xcc_id)
690 {
691 	if (amdgpu_device_skip_hw_access(adev))
692 		return;
693 
694 	if (amdgpu_sriov_fullaccess(adev) &&
695 	    adev->gfx.rlc.funcs &&
696 	    adev->gfx.rlc.funcs->is_rlcg_access_range) {
697 		if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
698 			return amdgpu_sriov_wreg(adev, reg, v, 0, 0, xcc_id);
699 	} else if ((reg * 4) >= adev->rmmio_size) {
700 		adev->pcie_wreg(adev, reg * 4, v);
701 	} else {
702 		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
703 	}
704 }
705 
706 /**
707  * amdgpu_device_xcc_wreg - write to a memory mapped IO or indirect register with specific XCC
708  *
709  * @adev: amdgpu_device pointer
710  * @reg: dword aligned register offset
711  * @v: 32 bit value to write to the register
712  * @acc_flags: access flags which require special behavior
713  * @xcc_id: xcc accelerated compute core id
714  *
715  * Writes the value specified to the offset specified.
716  */
717 void amdgpu_device_xcc_wreg(struct amdgpu_device *adev,
718 			uint32_t reg, uint32_t v,
719 			uint32_t acc_flags, uint32_t xcc_id)
720 {
721 	uint32_t rlcg_flag;
722 
723 	if (amdgpu_device_skip_hw_access(adev))
724 		return;
725 
726 	if ((reg * 4) < adev->rmmio_size) {
727 		if (amdgpu_sriov_vf(adev) &&
728 		    !amdgpu_sriov_runtime(adev) &&
729 		    adev->gfx.rlc.rlcg_reg_access_supported &&
730 		    amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
731 							 GC_HWIP, true,
732 							 &rlcg_flag)) {
733 			amdgpu_virt_rlcg_reg_rw(adev, reg, v, rlcg_flag, xcc_id);
734 		} else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
735 		    amdgpu_sriov_runtime(adev) &&
736 		    down_read_trylock(&adev->reset_domain->sem)) {
737 			amdgpu_kiq_wreg(adev, reg, v, xcc_id);
738 			up_read(&adev->reset_domain->sem);
739 		} else {
740 			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
741 		}
742 	} else {
743 		adev->pcie_wreg(adev, reg * 4, v);
744 	}
745 }
746 
747 /**
748  * amdgpu_device_indirect_rreg - read an indirect register
749  *
750  * @adev: amdgpu_device pointer
751  * @reg_addr: indirect register address to read from
752  *
753  * Returns the value of indirect register @reg_addr
754  */
755 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
756 				u32 reg_addr)
757 {
758 	unsigned long flags, pcie_index, pcie_data;
759 	void __iomem *pcie_index_offset;
760 	void __iomem *pcie_data_offset;
761 	u32 r;
762 
763 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
764 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
765 
766 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
767 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
768 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
769 
770 	writel(reg_addr, pcie_index_offset);
771 	readl(pcie_index_offset);
772 	r = readl(pcie_data_offset);
773 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
774 
775 	return r;
776 }
777 
778 u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
779 				    u64 reg_addr)
780 {
781 	unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
782 	u32 r;
783 	void __iomem *pcie_index_offset;
784 	void __iomem *pcie_index_hi_offset;
785 	void __iomem *pcie_data_offset;
786 
787 	if (unlikely(!adev->nbio.funcs)) {
788 		pcie_index = AMDGPU_PCIE_INDEX_FALLBACK;
789 		pcie_data = AMDGPU_PCIE_DATA_FALLBACK;
790 	} else {
791 		pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
792 		pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
793 	}
794 
795 	if (reg_addr >> 32) {
796 		if (unlikely(!adev->nbio.funcs))
797 			pcie_index_hi = AMDGPU_PCIE_INDEX_HI_FALLBACK;
798 		else
799 			pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
800 	} else {
801 		pcie_index_hi = 0;
802 	}
803 
804 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
805 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
806 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
807 	if (pcie_index_hi != 0)
808 		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
809 				pcie_index_hi * 4;
810 
811 	writel(reg_addr, pcie_index_offset);
812 	readl(pcie_index_offset);
813 	if (pcie_index_hi != 0) {
814 		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
815 		readl(pcie_index_hi_offset);
816 	}
817 	r = readl(pcie_data_offset);
818 
819 	/* clear the high bits */
820 	if (pcie_index_hi != 0) {
821 		writel(0, pcie_index_hi_offset);
822 		readl(pcie_index_hi_offset);
823 	}
824 
825 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
826 
827 	return r;
828 }
829 
830 /**
831  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
832  *
833  * @adev: amdgpu_device pointer
834  * @reg_addr: indirect register address to read from
835  *
836  * Returns the value of indirect register @reg_addr
837  */
838 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
839 				  u32 reg_addr)
840 {
841 	unsigned long flags, pcie_index, pcie_data;
842 	void __iomem *pcie_index_offset;
843 	void __iomem *pcie_data_offset;
844 	u64 r;
845 
846 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
847 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
848 
849 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
850 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
851 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
852 
853 	/* read low 32 bits */
854 	writel(reg_addr, pcie_index_offset);
855 	readl(pcie_index_offset);
856 	r = readl(pcie_data_offset);
857 	/* read high 32 bits */
858 	writel(reg_addr + 4, pcie_index_offset);
859 	readl(pcie_index_offset);
860 	r |= ((u64)readl(pcie_data_offset) << 32);
861 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
862 
863 	return r;
864 }
865 
866 u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev,
867 				  u64 reg_addr)
868 {
869 	unsigned long flags, pcie_index, pcie_data;
870 	unsigned long pcie_index_hi = 0;
871 	void __iomem *pcie_index_offset;
872 	void __iomem *pcie_index_hi_offset;
873 	void __iomem *pcie_data_offset;
874 	u64 r;
875 
876 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
877 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
878 	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
879 		pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
880 
881 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
882 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
883 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
884 	if (pcie_index_hi != 0)
885 		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
886 			pcie_index_hi * 4;
887 
888 	/* read low 32 bits */
889 	writel(reg_addr, pcie_index_offset);
890 	readl(pcie_index_offset);
891 	if (pcie_index_hi != 0) {
892 		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
893 		readl(pcie_index_hi_offset);
894 	}
895 	r = readl(pcie_data_offset);
896 	/* read high 32 bits */
897 	writel(reg_addr + 4, pcie_index_offset);
898 	readl(pcie_index_offset);
899 	if (pcie_index_hi != 0) {
900 		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
901 		readl(pcie_index_hi_offset);
902 	}
903 	r |= ((u64)readl(pcie_data_offset) << 32);
904 
905 	/* clear the high bits */
906 	if (pcie_index_hi != 0) {
907 		writel(0, pcie_index_hi_offset);
908 		readl(pcie_index_hi_offset);
909 	}
910 
911 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
912 
913 	return r;
914 }
915 
916 /**
917  * amdgpu_device_indirect_wreg - write an indirect register address
918  *
919  * @adev: amdgpu_device pointer
920  * @reg_addr: indirect register offset
921  * @reg_data: indirect register data
922  *
923  */
924 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
925 				 u32 reg_addr, u32 reg_data)
926 {
927 	unsigned long flags, pcie_index, pcie_data;
928 	void __iomem *pcie_index_offset;
929 	void __iomem *pcie_data_offset;
930 
931 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
932 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
933 
934 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
935 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
936 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
937 
938 	writel(reg_addr, pcie_index_offset);
939 	readl(pcie_index_offset);
940 	writel(reg_data, pcie_data_offset);
941 	readl(pcie_data_offset);
942 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
943 }
944 
945 void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
946 				     u64 reg_addr, u32 reg_data)
947 {
948 	unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
949 	void __iomem *pcie_index_offset;
950 	void __iomem *pcie_index_hi_offset;
951 	void __iomem *pcie_data_offset;
952 
953 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
954 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
955 	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
956 		pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
957 	else
958 		pcie_index_hi = 0;
959 
960 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
961 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
962 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
963 	if (pcie_index_hi != 0)
964 		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
965 				pcie_index_hi * 4;
966 
967 	writel(reg_addr, pcie_index_offset);
968 	readl(pcie_index_offset);
969 	if (pcie_index_hi != 0) {
970 		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
971 		readl(pcie_index_hi_offset);
972 	}
973 	writel(reg_data, pcie_data_offset);
974 	readl(pcie_data_offset);
975 
976 	/* clear the high bits */
977 	if (pcie_index_hi != 0) {
978 		writel(0, pcie_index_hi_offset);
979 		readl(pcie_index_hi_offset);
980 	}
981 
982 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
983 }
984 
985 /**
986  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
987  *
988  * @adev: amdgpu_device pointer
989  * @reg_addr: indirect register offset
990  * @reg_data: indirect register data
991  *
992  */
993 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
994 				   u32 reg_addr, u64 reg_data)
995 {
996 	unsigned long flags, pcie_index, pcie_data;
997 	void __iomem *pcie_index_offset;
998 	void __iomem *pcie_data_offset;
999 
1000 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1001 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1002 
1003 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1004 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1005 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1006 
1007 	/* write low 32 bits */
1008 	writel(reg_addr, pcie_index_offset);
1009 	readl(pcie_index_offset);
1010 	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
1011 	readl(pcie_data_offset);
1012 	/* write high 32 bits */
1013 	writel(reg_addr + 4, pcie_index_offset);
1014 	readl(pcie_index_offset);
1015 	writel((u32)(reg_data >> 32), pcie_data_offset);
1016 	readl(pcie_data_offset);
1017 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1018 }
1019 
1020 void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev,
1021 				   u64 reg_addr, u64 reg_data)
1022 {
1023 	unsigned long flags, pcie_index, pcie_data;
1024 	unsigned long pcie_index_hi = 0;
1025 	void __iomem *pcie_index_offset;
1026 	void __iomem *pcie_index_hi_offset;
1027 	void __iomem *pcie_data_offset;
1028 
1029 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1030 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1031 	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
1032 		pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
1033 
1034 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1035 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1036 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1037 	if (pcie_index_hi != 0)
1038 		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
1039 				pcie_index_hi * 4;
1040 
1041 	/* write low 32 bits */
1042 	writel(reg_addr, pcie_index_offset);
1043 	readl(pcie_index_offset);
1044 	if (pcie_index_hi != 0) {
1045 		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1046 		readl(pcie_index_hi_offset);
1047 	}
1048 	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
1049 	readl(pcie_data_offset);
1050 	/* write high 32 bits */
1051 	writel(reg_addr + 4, pcie_index_offset);
1052 	readl(pcie_index_offset);
1053 	if (pcie_index_hi != 0) {
1054 		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1055 		readl(pcie_index_hi_offset);
1056 	}
1057 	writel((u32)(reg_data >> 32), pcie_data_offset);
1058 	readl(pcie_data_offset);
1059 
1060 	/* clear the high bits */
1061 	if (pcie_index_hi != 0) {
1062 		writel(0, pcie_index_hi_offset);
1063 		readl(pcie_index_hi_offset);
1064 	}
1065 
1066 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1067 }
1068 
1069 /**
1070  * amdgpu_device_get_rev_id - query device rev_id
1071  *
1072  * @adev: amdgpu_device pointer
1073  *
1074  * Return device rev_id
1075  */
1076 u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
1077 {
1078 	return adev->nbio.funcs->get_rev_id(adev);
1079 }
1080 
1081 /**
1082  * amdgpu_invalid_rreg - dummy reg read function
1083  *
1084  * @adev: amdgpu_device pointer
1085  * @reg: offset of register
1086  *
1087  * Dummy register read function.  Used for register blocks
1088  * that certain asics don't have (all asics).
1089  * Returns the value in the register.
1090  */
1091 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
1092 {
1093 	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
1094 	BUG();
1095 	return 0;
1096 }
1097 
1098 static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg)
1099 {
1100 	DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
1101 	BUG();
1102 	return 0;
1103 }
1104 
1105 /**
1106  * amdgpu_invalid_wreg - dummy reg write function
1107  *
1108  * @adev: amdgpu_device pointer
1109  * @reg: offset of register
1110  * @v: value to write to the register
1111  *
1112  * Dummy register read function.  Used for register blocks
1113  * that certain asics don't have (all asics).
1114  */
1115 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
1116 {
1117 	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
1118 		  reg, v);
1119 	BUG();
1120 }
1121 
1122 static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v)
1123 {
1124 	DRM_ERROR("Invalid callback to write register 0x%llX with 0x%08X\n",
1125 		  reg, v);
1126 	BUG();
1127 }
1128 
1129 /**
1130  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
1131  *
1132  * @adev: amdgpu_device pointer
1133  * @reg: offset of register
1134  *
1135  * Dummy register read function.  Used for register blocks
1136  * that certain asics don't have (all asics).
1137  * Returns the value in the register.
1138  */
1139 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
1140 {
1141 	DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
1142 	BUG();
1143 	return 0;
1144 }
1145 
1146 static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t reg)
1147 {
1148 	DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
1149 	BUG();
1150 	return 0;
1151 }
1152 
1153 /**
1154  * amdgpu_invalid_wreg64 - dummy reg write function
1155  *
1156  * @adev: amdgpu_device pointer
1157  * @reg: offset of register
1158  * @v: value to write to the register
1159  *
1160  * Dummy register read function.  Used for register blocks
1161  * that certain asics don't have (all asics).
1162  */
1163 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
1164 {
1165 	DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
1166 		  reg, v);
1167 	BUG();
1168 }
1169 
1170 static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, uint64_t v)
1171 {
1172 	DRM_ERROR("Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n",
1173 		  reg, v);
1174 	BUG();
1175 }
1176 
1177 /**
1178  * amdgpu_block_invalid_rreg - dummy reg read function
1179  *
1180  * @adev: amdgpu_device pointer
1181  * @block: offset of instance
1182  * @reg: offset of register
1183  *
1184  * Dummy register read function.  Used for register blocks
1185  * that certain asics don't have (all asics).
1186  * Returns the value in the register.
1187  */
1188 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
1189 					  uint32_t block, uint32_t reg)
1190 {
1191 	DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
1192 		  reg, block);
1193 	BUG();
1194 	return 0;
1195 }
1196 
1197 /**
1198  * amdgpu_block_invalid_wreg - dummy reg write function
1199  *
1200  * @adev: amdgpu_device pointer
1201  * @block: offset of instance
1202  * @reg: offset of register
1203  * @v: value to write to the register
1204  *
1205  * Dummy register read function.  Used for register blocks
1206  * that certain asics don't have (all asics).
1207  */
1208 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
1209 				      uint32_t block,
1210 				      uint32_t reg, uint32_t v)
1211 {
1212 	DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
1213 		  reg, block, v);
1214 	BUG();
1215 }
1216 
1217 /**
1218  * amdgpu_device_asic_init - Wrapper for atom asic_init
1219  *
1220  * @adev: amdgpu_device pointer
1221  *
1222  * Does any asic specific work and then calls atom asic init.
1223  */
1224 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
1225 {
1226 	int ret;
1227 
1228 	amdgpu_asic_pre_asic_init(adev);
1229 
1230 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
1231 	    amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
1232 		amdgpu_psp_wait_for_bootloader(adev);
1233 		ret = amdgpu_atomfirmware_asic_init(adev, true);
1234 		return ret;
1235 	} else {
1236 		return amdgpu_atom_asic_init(adev->mode_info.atom_context);
1237 	}
1238 
1239 	return 0;
1240 }
1241 
1242 /**
1243  * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
1244  *
1245  * @adev: amdgpu_device pointer
1246  *
1247  * Allocates a scratch page of VRAM for use by various things in the
1248  * driver.
1249  */
1250 static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
1251 {
1252 	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
1253 				       AMDGPU_GEM_DOMAIN_VRAM |
1254 				       AMDGPU_GEM_DOMAIN_GTT,
1255 				       &adev->mem_scratch.robj,
1256 				       &adev->mem_scratch.gpu_addr,
1257 				       (void **)&adev->mem_scratch.ptr);
1258 }
1259 
1260 /**
1261  * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
1262  *
1263  * @adev: amdgpu_device pointer
1264  *
1265  * Frees the VRAM scratch page.
1266  */
1267 static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
1268 {
1269 	amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
1270 }
1271 
1272 /**
1273  * amdgpu_device_program_register_sequence - program an array of registers.
1274  *
1275  * @adev: amdgpu_device pointer
1276  * @registers: pointer to the register array
1277  * @array_size: size of the register array
1278  *
1279  * Programs an array or registers with and or masks.
1280  * This is a helper for setting golden registers.
1281  */
1282 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
1283 					     const u32 *registers,
1284 					     const u32 array_size)
1285 {
1286 	u32 tmp, reg, and_mask, or_mask;
1287 	int i;
1288 
1289 	if (array_size % 3)
1290 		return;
1291 
1292 	for (i = 0; i < array_size; i += 3) {
1293 		reg = registers[i + 0];
1294 		and_mask = registers[i + 1];
1295 		or_mask = registers[i + 2];
1296 
1297 		if (and_mask == 0xffffffff) {
1298 			tmp = or_mask;
1299 		} else {
1300 			tmp = RREG32(reg);
1301 			tmp &= ~and_mask;
1302 			if (adev->family >= AMDGPU_FAMILY_AI)
1303 				tmp |= (or_mask & and_mask);
1304 			else
1305 				tmp |= or_mask;
1306 		}
1307 		WREG32(reg, tmp);
1308 	}
1309 }
1310 
1311 /**
1312  * amdgpu_device_pci_config_reset - reset the GPU
1313  *
1314  * @adev: amdgpu_device pointer
1315  *
1316  * Resets the GPU using the pci config reset sequence.
1317  * Only applicable to asics prior to vega10.
1318  */
1319 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1320 {
1321 	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1322 }
1323 
1324 /**
1325  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1326  *
1327  * @adev: amdgpu_device pointer
1328  *
1329  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1330  */
1331 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1332 {
1333 	return pci_reset_function(adev->pdev);
1334 }
1335 
1336 /*
1337  * amdgpu_device_wb_*()
1338  * Writeback is the method by which the GPU updates special pages in memory
1339  * with the status of certain GPU events (fences, ring pointers,etc.).
1340  */
1341 
1342 /**
1343  * amdgpu_device_wb_fini - Disable Writeback and free memory
1344  *
1345  * @adev: amdgpu_device pointer
1346  *
1347  * Disables Writeback and frees the Writeback memory (all asics).
1348  * Used at driver shutdown.
1349  */
1350 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1351 {
1352 	if (adev->wb.wb_obj) {
1353 		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1354 				      &adev->wb.gpu_addr,
1355 				      (void **)&adev->wb.wb);
1356 		adev->wb.wb_obj = NULL;
1357 	}
1358 }
1359 
1360 /**
1361  * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1362  *
1363  * @adev: amdgpu_device pointer
1364  *
1365  * Initializes writeback and allocates writeback memory (all asics).
1366  * Used at driver startup.
1367  * Returns 0 on success or an -error on failure.
1368  */
1369 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1370 {
1371 	int r;
1372 
1373 	if (adev->wb.wb_obj == NULL) {
1374 		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1375 		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1376 					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1377 					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
1378 					    (void **)&adev->wb.wb);
1379 		if (r) {
1380 			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1381 			return r;
1382 		}
1383 
1384 		adev->wb.num_wb = AMDGPU_MAX_WB;
1385 		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1386 
1387 		/* clear wb memory */
1388 		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1389 	}
1390 
1391 	return 0;
1392 }
1393 
1394 /**
1395  * amdgpu_device_wb_get - Allocate a wb entry
1396  *
1397  * @adev: amdgpu_device pointer
1398  * @wb: wb index
1399  *
1400  * Allocate a wb slot for use by the driver (all asics).
1401  * Returns 0 on success or -EINVAL on failure.
1402  */
1403 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1404 {
1405 	unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1406 
1407 	if (offset < adev->wb.num_wb) {
1408 		__set_bit(offset, adev->wb.used);
1409 		*wb = offset << 3; /* convert to dw offset */
1410 		return 0;
1411 	} else {
1412 		return -EINVAL;
1413 	}
1414 }
1415 
1416 /**
1417  * amdgpu_device_wb_free - Free a wb entry
1418  *
1419  * @adev: amdgpu_device pointer
1420  * @wb: wb index
1421  *
1422  * Free a wb slot allocated for use by the driver (all asics)
1423  */
1424 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1425 {
1426 	wb >>= 3;
1427 	if (wb < adev->wb.num_wb)
1428 		__clear_bit(wb, adev->wb.used);
1429 }
1430 
1431 /**
1432  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1433  *
1434  * @adev: amdgpu_device pointer
1435  *
1436  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1437  * to fail, but if any of the BARs is not accessible after the size we abort
1438  * driver loading by returning -ENODEV.
1439  */
1440 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1441 {
1442 	int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1443 	struct pci_bus *root;
1444 	struct resource *res;
1445 	unsigned int i;
1446 	u16 cmd;
1447 	int r;
1448 
1449 	if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
1450 		return 0;
1451 
1452 	/* Bypass for VF */
1453 	if (amdgpu_sriov_vf(adev))
1454 		return 0;
1455 
1456 	/* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
1457 	if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
1458 		DRM_WARN("System can't access extended configuration space,please check!!\n");
1459 
1460 	/* skip if the bios has already enabled large BAR */
1461 	if (adev->gmc.real_vram_size &&
1462 	    (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1463 		return 0;
1464 
1465 	/* Check if the root BUS has 64bit memory resources */
1466 	root = adev->pdev->bus;
1467 	while (root->parent)
1468 		root = root->parent;
1469 
1470 	pci_bus_for_each_resource(root, res, i) {
1471 		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1472 		    res->start > 0x100000000ull)
1473 			break;
1474 	}
1475 
1476 	/* Trying to resize is pointless without a root hub window above 4GB */
1477 	if (!res)
1478 		return 0;
1479 
1480 	/* Limit the BAR size to what is available */
1481 	rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1482 			rbar_size);
1483 
1484 	/* Disable memory decoding while we change the BAR addresses and size */
1485 	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1486 	pci_write_config_word(adev->pdev, PCI_COMMAND,
1487 			      cmd & ~PCI_COMMAND_MEMORY);
1488 
1489 	/* Free the VRAM and doorbell BAR, we most likely need to move both. */
1490 	amdgpu_doorbell_fini(adev);
1491 	if (adev->asic_type >= CHIP_BONAIRE)
1492 		pci_release_resource(adev->pdev, 2);
1493 
1494 	pci_release_resource(adev->pdev, 0);
1495 
1496 	r = pci_resize_resource(adev->pdev, 0, rbar_size);
1497 	if (r == -ENOSPC)
1498 		DRM_INFO("Not enough PCI address space for a large BAR.");
1499 	else if (r && r != -ENOTSUPP)
1500 		DRM_ERROR("Problem resizing BAR0 (%d).", r);
1501 
1502 	pci_assign_unassigned_bus_resources(adev->pdev->bus);
1503 
1504 	/* When the doorbell or fb BAR isn't available we have no chance of
1505 	 * using the device.
1506 	 */
1507 	r = amdgpu_doorbell_init(adev);
1508 	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1509 		return -ENODEV;
1510 
1511 	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1512 
1513 	return 0;
1514 }
1515 
1516 static bool amdgpu_device_read_bios(struct amdgpu_device *adev)
1517 {
1518 	if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU))
1519 		return false;
1520 
1521 	return true;
1522 }
1523 
1524 /*
1525  * GPU helpers function.
1526  */
1527 /**
1528  * amdgpu_device_need_post - check if the hw need post or not
1529  *
1530  * @adev: amdgpu_device pointer
1531  *
1532  * Check if the asic has been initialized (all asics) at driver startup
1533  * or post is needed if  hw reset is performed.
1534  * Returns true if need or false if not.
1535  */
1536 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1537 {
1538 	uint32_t reg;
1539 
1540 	if (amdgpu_sriov_vf(adev))
1541 		return false;
1542 
1543 	if (!amdgpu_device_read_bios(adev))
1544 		return false;
1545 
1546 	if (amdgpu_passthrough(adev)) {
1547 		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1548 		 * some old smc fw still need driver do vPost otherwise gpu hang, while
1549 		 * those smc fw version above 22.15 doesn't have this flaw, so we force
1550 		 * vpost executed for smc version below 22.15
1551 		 */
1552 		if (adev->asic_type == CHIP_FIJI) {
1553 			int err;
1554 			uint32_t fw_ver;
1555 
1556 			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1557 			/* force vPost if error occured */
1558 			if (err)
1559 				return true;
1560 
1561 			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1562 			release_firmware(adev->pm.fw);
1563 			if (fw_ver < 0x00160e00)
1564 				return true;
1565 		}
1566 	}
1567 
1568 	/* Don't post if we need to reset whole hive on init */
1569 	if (adev->gmc.xgmi.pending_reset)
1570 		return false;
1571 
1572 	if (adev->has_hw_reset) {
1573 		adev->has_hw_reset = false;
1574 		return true;
1575 	}
1576 
1577 	/* bios scratch used on CIK+ */
1578 	if (adev->asic_type >= CHIP_BONAIRE)
1579 		return amdgpu_atombios_scratch_need_asic_init(adev);
1580 
1581 	/* check MEM_SIZE for older asics */
1582 	reg = amdgpu_asic_get_config_memsize(adev);
1583 
1584 	if ((reg != 0) && (reg != 0xffffffff))
1585 		return false;
1586 
1587 	return true;
1588 }
1589 
1590 /*
1591  * Check whether seamless boot is supported.
1592  *
1593  * So far we only support seamless boot on DCE 3.0 or later.
1594  * If users report that it works on older ASICS as well, we may
1595  * loosen this.
1596  */
1597 bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev)
1598 {
1599 	switch (amdgpu_seamless) {
1600 	case -1:
1601 		break;
1602 	case 1:
1603 		return true;
1604 	case 0:
1605 		return false;
1606 	default:
1607 		DRM_ERROR("Invalid value for amdgpu.seamless: %d\n",
1608 			  amdgpu_seamless);
1609 		return false;
1610 	}
1611 
1612 	if (!(adev->flags & AMD_IS_APU))
1613 		return false;
1614 
1615 	if (adev->mman.keep_stolen_vga_memory)
1616 		return false;
1617 
1618 	return amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0);
1619 }
1620 
1621 /*
1622  * Intel hosts such as Rocket Lake, Alder Lake, Raptor Lake and Sapphire Rapids
1623  * don't support dynamic speed switching. Until we have confirmation from Intel
1624  * that a specific host supports it, it's safer that we keep it disabled for all.
1625  *
1626  * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
1627  * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
1628  */
1629 static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device *adev)
1630 {
1631 #if IS_ENABLED(CONFIG_X86)
1632 	struct cpuinfo_x86 *c = &cpu_data(0);
1633 
1634 	/* eGPU change speeds based on USB4 fabric conditions */
1635 	if (dev_is_removable(adev->dev))
1636 		return true;
1637 
1638 	if (c->x86_vendor == X86_VENDOR_INTEL)
1639 		return false;
1640 #endif
1641 	return true;
1642 }
1643 
1644 /**
1645  * amdgpu_device_should_use_aspm - check if the device should program ASPM
1646  *
1647  * @adev: amdgpu_device pointer
1648  *
1649  * Confirm whether the module parameter and pcie bridge agree that ASPM should
1650  * be set for this device.
1651  *
1652  * Returns true if it should be used or false if not.
1653  */
1654 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1655 {
1656 	switch (amdgpu_aspm) {
1657 	case -1:
1658 		break;
1659 	case 0:
1660 		return false;
1661 	case 1:
1662 		return true;
1663 	default:
1664 		return false;
1665 	}
1666 	if (adev->flags & AMD_IS_APU)
1667 		return false;
1668 	if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK))
1669 		return false;
1670 	return pcie_aspm_enabled(adev->pdev);
1671 }
1672 
1673 /* if we get transitioned to only one device, take VGA back */
1674 /**
1675  * amdgpu_device_vga_set_decode - enable/disable vga decode
1676  *
1677  * @pdev: PCI device pointer
1678  * @state: enable/disable vga decode
1679  *
1680  * Enable/disable vga decode (all asics).
1681  * Returns VGA resource flags.
1682  */
1683 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1684 		bool state)
1685 {
1686 	struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1687 
1688 	amdgpu_asic_set_vga_state(adev, state);
1689 	if (state)
1690 		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1691 		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1692 	else
1693 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1694 }
1695 
1696 /**
1697  * amdgpu_device_check_block_size - validate the vm block size
1698  *
1699  * @adev: amdgpu_device pointer
1700  *
1701  * Validates the vm block size specified via module parameter.
1702  * The vm block size defines number of bits in page table versus page directory,
1703  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1704  * page table and the remaining bits are in the page directory.
1705  */
1706 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1707 {
1708 	/* defines number of bits in page table versus page directory,
1709 	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1710 	 * page table and the remaining bits are in the page directory
1711 	 */
1712 	if (amdgpu_vm_block_size == -1)
1713 		return;
1714 
1715 	if (amdgpu_vm_block_size < 9) {
1716 		dev_warn(adev->dev, "VM page table size (%d) too small\n",
1717 			 amdgpu_vm_block_size);
1718 		amdgpu_vm_block_size = -1;
1719 	}
1720 }
1721 
1722 /**
1723  * amdgpu_device_check_vm_size - validate the vm size
1724  *
1725  * @adev: amdgpu_device pointer
1726  *
1727  * Validates the vm size in GB specified via module parameter.
1728  * The VM size is the size of the GPU virtual memory space in GB.
1729  */
1730 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1731 {
1732 	/* no need to check the default value */
1733 	if (amdgpu_vm_size == -1)
1734 		return;
1735 
1736 	if (amdgpu_vm_size < 1) {
1737 		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1738 			 amdgpu_vm_size);
1739 		amdgpu_vm_size = -1;
1740 	}
1741 }
1742 
1743 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1744 {
1745 	struct sysinfo si;
1746 	bool is_os_64 = (sizeof(void *) == 8);
1747 	uint64_t total_memory;
1748 	uint64_t dram_size_seven_GB = 0x1B8000000;
1749 	uint64_t dram_size_three_GB = 0xB8000000;
1750 
1751 	if (amdgpu_smu_memory_pool_size == 0)
1752 		return;
1753 
1754 	if (!is_os_64) {
1755 		DRM_WARN("Not 64-bit OS, feature not supported\n");
1756 		goto def_value;
1757 	}
1758 	si_meminfo(&si);
1759 	total_memory = (uint64_t)si.totalram * si.mem_unit;
1760 
1761 	if ((amdgpu_smu_memory_pool_size == 1) ||
1762 		(amdgpu_smu_memory_pool_size == 2)) {
1763 		if (total_memory < dram_size_three_GB)
1764 			goto def_value1;
1765 	} else if ((amdgpu_smu_memory_pool_size == 4) ||
1766 		(amdgpu_smu_memory_pool_size == 8)) {
1767 		if (total_memory < dram_size_seven_GB)
1768 			goto def_value1;
1769 	} else {
1770 		DRM_WARN("Smu memory pool size not supported\n");
1771 		goto def_value;
1772 	}
1773 	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1774 
1775 	return;
1776 
1777 def_value1:
1778 	DRM_WARN("No enough system memory\n");
1779 def_value:
1780 	adev->pm.smu_prv_buffer_size = 0;
1781 }
1782 
1783 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1784 {
1785 	if (!(adev->flags & AMD_IS_APU) ||
1786 	    adev->asic_type < CHIP_RAVEN)
1787 		return 0;
1788 
1789 	switch (adev->asic_type) {
1790 	case CHIP_RAVEN:
1791 		if (adev->pdev->device == 0x15dd)
1792 			adev->apu_flags |= AMD_APU_IS_RAVEN;
1793 		if (adev->pdev->device == 0x15d8)
1794 			adev->apu_flags |= AMD_APU_IS_PICASSO;
1795 		break;
1796 	case CHIP_RENOIR:
1797 		if ((adev->pdev->device == 0x1636) ||
1798 		    (adev->pdev->device == 0x164c))
1799 			adev->apu_flags |= AMD_APU_IS_RENOIR;
1800 		else
1801 			adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1802 		break;
1803 	case CHIP_VANGOGH:
1804 		adev->apu_flags |= AMD_APU_IS_VANGOGH;
1805 		break;
1806 	case CHIP_YELLOW_CARP:
1807 		break;
1808 	case CHIP_CYAN_SKILLFISH:
1809 		if ((adev->pdev->device == 0x13FE) ||
1810 		    (adev->pdev->device == 0x143F))
1811 			adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1812 		break;
1813 	default:
1814 		break;
1815 	}
1816 
1817 	return 0;
1818 }
1819 
1820 /**
1821  * amdgpu_device_check_arguments - validate module params
1822  *
1823  * @adev: amdgpu_device pointer
1824  *
1825  * Validates certain module parameters and updates
1826  * the associated values used by the driver (all asics).
1827  */
1828 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1829 {
1830 	if (amdgpu_sched_jobs < 4) {
1831 		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1832 			 amdgpu_sched_jobs);
1833 		amdgpu_sched_jobs = 4;
1834 	} else if (!is_power_of_2(amdgpu_sched_jobs)) {
1835 		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1836 			 amdgpu_sched_jobs);
1837 		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1838 	}
1839 
1840 	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1841 		/* gart size must be greater or equal to 32M */
1842 		dev_warn(adev->dev, "gart size (%d) too small\n",
1843 			 amdgpu_gart_size);
1844 		amdgpu_gart_size = -1;
1845 	}
1846 
1847 	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1848 		/* gtt size must be greater or equal to 32M */
1849 		dev_warn(adev->dev, "gtt size (%d) too small\n",
1850 				 amdgpu_gtt_size);
1851 		amdgpu_gtt_size = -1;
1852 	}
1853 
1854 	/* valid range is between 4 and 9 inclusive */
1855 	if (amdgpu_vm_fragment_size != -1 &&
1856 	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1857 		dev_warn(adev->dev, "valid range is between 4 and 9\n");
1858 		amdgpu_vm_fragment_size = -1;
1859 	}
1860 
1861 	if (amdgpu_sched_hw_submission < 2) {
1862 		dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1863 			 amdgpu_sched_hw_submission);
1864 		amdgpu_sched_hw_submission = 2;
1865 	} else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1866 		dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1867 			 amdgpu_sched_hw_submission);
1868 		amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1869 	}
1870 
1871 	if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1872 		dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1873 		amdgpu_reset_method = -1;
1874 	}
1875 
1876 	amdgpu_device_check_smu_prv_buffer_size(adev);
1877 
1878 	amdgpu_device_check_vm_size(adev);
1879 
1880 	amdgpu_device_check_block_size(adev);
1881 
1882 	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1883 
1884 	return 0;
1885 }
1886 
1887 /**
1888  * amdgpu_switcheroo_set_state - set switcheroo state
1889  *
1890  * @pdev: pci dev pointer
1891  * @state: vga_switcheroo state
1892  *
1893  * Callback for the switcheroo driver.  Suspends or resumes
1894  * the asics before or after it is powered up using ACPI methods.
1895  */
1896 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1897 					enum vga_switcheroo_state state)
1898 {
1899 	struct drm_device *dev = pci_get_drvdata(pdev);
1900 	int r;
1901 
1902 	if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1903 		return;
1904 
1905 	if (state == VGA_SWITCHEROO_ON) {
1906 		pr_info("switched on\n");
1907 		/* don't suspend or resume card normally */
1908 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1909 
1910 		pci_set_power_state(pdev, PCI_D0);
1911 		amdgpu_device_load_pci_state(pdev);
1912 		r = pci_enable_device(pdev);
1913 		if (r)
1914 			DRM_WARN("pci_enable_device failed (%d)\n", r);
1915 		amdgpu_device_resume(dev, true);
1916 
1917 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
1918 	} else {
1919 		pr_info("switched off\n");
1920 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1921 		amdgpu_device_prepare(dev);
1922 		amdgpu_device_suspend(dev, true);
1923 		amdgpu_device_cache_pci_state(pdev);
1924 		/* Shut down the device */
1925 		pci_disable_device(pdev);
1926 		pci_set_power_state(pdev, PCI_D3cold);
1927 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1928 	}
1929 }
1930 
1931 /**
1932  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1933  *
1934  * @pdev: pci dev pointer
1935  *
1936  * Callback for the switcheroo driver.  Check of the switcheroo
1937  * state can be changed.
1938  * Returns true if the state can be changed, false if not.
1939  */
1940 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1941 {
1942 	struct drm_device *dev = pci_get_drvdata(pdev);
1943 
1944        /*
1945 	* FIXME: open_count is protected by drm_global_mutex but that would lead to
1946 	* locking inversion with the driver load path. And the access here is
1947 	* completely racy anyway. So don't bother with locking for now.
1948 	*/
1949 	return atomic_read(&dev->open_count) == 0;
1950 }
1951 
1952 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1953 	.set_gpu_state = amdgpu_switcheroo_set_state,
1954 	.reprobe = NULL,
1955 	.can_switch = amdgpu_switcheroo_can_switch,
1956 };
1957 
1958 /**
1959  * amdgpu_device_ip_set_clockgating_state - set the CG state
1960  *
1961  * @dev: amdgpu_device pointer
1962  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1963  * @state: clockgating state (gate or ungate)
1964  *
1965  * Sets the requested clockgating state for all instances of
1966  * the hardware IP specified.
1967  * Returns the error code from the last instance.
1968  */
1969 int amdgpu_device_ip_set_clockgating_state(void *dev,
1970 					   enum amd_ip_block_type block_type,
1971 					   enum amd_clockgating_state state)
1972 {
1973 	struct amdgpu_device *adev = dev;
1974 	int i, r = 0;
1975 
1976 	for (i = 0; i < adev->num_ip_blocks; i++) {
1977 		if (!adev->ip_blocks[i].status.valid)
1978 			continue;
1979 		if (adev->ip_blocks[i].version->type != block_type)
1980 			continue;
1981 		if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1982 			continue;
1983 		r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1984 			(void *)adev, state);
1985 		if (r)
1986 			DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1987 				  adev->ip_blocks[i].version->funcs->name, r);
1988 	}
1989 	return r;
1990 }
1991 
1992 /**
1993  * amdgpu_device_ip_set_powergating_state - set the PG state
1994  *
1995  * @dev: amdgpu_device pointer
1996  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1997  * @state: powergating state (gate or ungate)
1998  *
1999  * Sets the requested powergating state for all instances of
2000  * the hardware IP specified.
2001  * Returns the error code from the last instance.
2002  */
2003 int amdgpu_device_ip_set_powergating_state(void *dev,
2004 					   enum amd_ip_block_type block_type,
2005 					   enum amd_powergating_state state)
2006 {
2007 	struct amdgpu_device *adev = dev;
2008 	int i, r = 0;
2009 
2010 	for (i = 0; i < adev->num_ip_blocks; i++) {
2011 		if (!adev->ip_blocks[i].status.valid)
2012 			continue;
2013 		if (adev->ip_blocks[i].version->type != block_type)
2014 			continue;
2015 		if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
2016 			continue;
2017 		r = adev->ip_blocks[i].version->funcs->set_powergating_state(
2018 			(void *)adev, state);
2019 		if (r)
2020 			DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
2021 				  adev->ip_blocks[i].version->funcs->name, r);
2022 	}
2023 	return r;
2024 }
2025 
2026 /**
2027  * amdgpu_device_ip_get_clockgating_state - get the CG state
2028  *
2029  * @adev: amdgpu_device pointer
2030  * @flags: clockgating feature flags
2031  *
2032  * Walks the list of IPs on the device and updates the clockgating
2033  * flags for each IP.
2034  * Updates @flags with the feature flags for each hardware IP where
2035  * clockgating is enabled.
2036  */
2037 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
2038 					    u64 *flags)
2039 {
2040 	int i;
2041 
2042 	for (i = 0; i < adev->num_ip_blocks; i++) {
2043 		if (!adev->ip_blocks[i].status.valid)
2044 			continue;
2045 		if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
2046 			adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
2047 	}
2048 }
2049 
2050 /**
2051  * amdgpu_device_ip_wait_for_idle - wait for idle
2052  *
2053  * @adev: amdgpu_device pointer
2054  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2055  *
2056  * Waits for the request hardware IP to be idle.
2057  * Returns 0 for success or a negative error code on failure.
2058  */
2059 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
2060 				   enum amd_ip_block_type block_type)
2061 {
2062 	int i, r;
2063 
2064 	for (i = 0; i < adev->num_ip_blocks; i++) {
2065 		if (!adev->ip_blocks[i].status.valid)
2066 			continue;
2067 		if (adev->ip_blocks[i].version->type == block_type) {
2068 			r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
2069 			if (r)
2070 				return r;
2071 			break;
2072 		}
2073 	}
2074 	return 0;
2075 
2076 }
2077 
2078 /**
2079  * amdgpu_device_ip_is_idle - is the hardware IP idle
2080  *
2081  * @adev: amdgpu_device pointer
2082  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
2083  *
2084  * Check if the hardware IP is idle or not.
2085  * Returns true if it the IP is idle, false if not.
2086  */
2087 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
2088 			      enum amd_ip_block_type block_type)
2089 {
2090 	int i;
2091 
2092 	for (i = 0; i < adev->num_ip_blocks; i++) {
2093 		if (!adev->ip_blocks[i].status.valid)
2094 			continue;
2095 		if (adev->ip_blocks[i].version->type == block_type)
2096 			return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
2097 	}
2098 	return true;
2099 
2100 }
2101 
2102 /**
2103  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
2104  *
2105  * @adev: amdgpu_device pointer
2106  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
2107  *
2108  * Returns a pointer to the hardware IP block structure
2109  * if it exists for the asic, otherwise NULL.
2110  */
2111 struct amdgpu_ip_block *
2112 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
2113 			      enum amd_ip_block_type type)
2114 {
2115 	int i;
2116 
2117 	for (i = 0; i < adev->num_ip_blocks; i++)
2118 		if (adev->ip_blocks[i].version->type == type)
2119 			return &adev->ip_blocks[i];
2120 
2121 	return NULL;
2122 }
2123 
2124 /**
2125  * amdgpu_device_ip_block_version_cmp
2126  *
2127  * @adev: amdgpu_device pointer
2128  * @type: enum amd_ip_block_type
2129  * @major: major version
2130  * @minor: minor version
2131  *
2132  * return 0 if equal or greater
2133  * return 1 if smaller or the ip_block doesn't exist
2134  */
2135 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
2136 				       enum amd_ip_block_type type,
2137 				       u32 major, u32 minor)
2138 {
2139 	struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
2140 
2141 	if (ip_block && ((ip_block->version->major > major) ||
2142 			((ip_block->version->major == major) &&
2143 			(ip_block->version->minor >= minor))))
2144 		return 0;
2145 
2146 	return 1;
2147 }
2148 
2149 /**
2150  * amdgpu_device_ip_block_add
2151  *
2152  * @adev: amdgpu_device pointer
2153  * @ip_block_version: pointer to the IP to add
2154  *
2155  * Adds the IP block driver information to the collection of IPs
2156  * on the asic.
2157  */
2158 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
2159 			       const struct amdgpu_ip_block_version *ip_block_version)
2160 {
2161 	if (!ip_block_version)
2162 		return -EINVAL;
2163 
2164 	switch (ip_block_version->type) {
2165 	case AMD_IP_BLOCK_TYPE_VCN:
2166 		if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
2167 			return 0;
2168 		break;
2169 	case AMD_IP_BLOCK_TYPE_JPEG:
2170 		if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
2171 			return 0;
2172 		break;
2173 	default:
2174 		break;
2175 	}
2176 
2177 	DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
2178 		  ip_block_version->funcs->name);
2179 
2180 	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
2181 
2182 	return 0;
2183 }
2184 
2185 /**
2186  * amdgpu_device_enable_virtual_display - enable virtual display feature
2187  *
2188  * @adev: amdgpu_device pointer
2189  *
2190  * Enabled the virtual display feature if the user has enabled it via
2191  * the module parameter virtual_display.  This feature provides a virtual
2192  * display hardware on headless boards or in virtualized environments.
2193  * This function parses and validates the configuration string specified by
2194  * the user and configues the virtual display configuration (number of
2195  * virtual connectors, crtcs, etc.) specified.
2196  */
2197 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
2198 {
2199 	adev->enable_virtual_display = false;
2200 
2201 	if (amdgpu_virtual_display) {
2202 		const char *pci_address_name = pci_name(adev->pdev);
2203 		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
2204 
2205 		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
2206 		pciaddstr_tmp = pciaddstr;
2207 		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
2208 			pciaddname = strsep(&pciaddname_tmp, ",");
2209 			if (!strcmp("all", pciaddname)
2210 			    || !strcmp(pci_address_name, pciaddname)) {
2211 				long num_crtc;
2212 				int res = -1;
2213 
2214 				adev->enable_virtual_display = true;
2215 
2216 				if (pciaddname_tmp)
2217 					res = kstrtol(pciaddname_tmp, 10,
2218 						      &num_crtc);
2219 
2220 				if (!res) {
2221 					if (num_crtc < 1)
2222 						num_crtc = 1;
2223 					if (num_crtc > 6)
2224 						num_crtc = 6;
2225 					adev->mode_info.num_crtc = num_crtc;
2226 				} else {
2227 					adev->mode_info.num_crtc = 1;
2228 				}
2229 				break;
2230 			}
2231 		}
2232 
2233 		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
2234 			 amdgpu_virtual_display, pci_address_name,
2235 			 adev->enable_virtual_display, adev->mode_info.num_crtc);
2236 
2237 		kfree(pciaddstr);
2238 	}
2239 }
2240 
2241 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
2242 {
2243 	if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
2244 		adev->mode_info.num_crtc = 1;
2245 		adev->enable_virtual_display = true;
2246 		DRM_INFO("virtual_display:%d, num_crtc:%d\n",
2247 			 adev->enable_virtual_display, adev->mode_info.num_crtc);
2248 	}
2249 }
2250 
2251 /**
2252  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
2253  *
2254  * @adev: amdgpu_device pointer
2255  *
2256  * Parses the asic configuration parameters specified in the gpu info
2257  * firmware and makes them availale to the driver for use in configuring
2258  * the asic.
2259  * Returns 0 on success, -EINVAL on failure.
2260  */
2261 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
2262 {
2263 	const char *chip_name;
2264 	char fw_name[40];
2265 	int err;
2266 	const struct gpu_info_firmware_header_v1_0 *hdr;
2267 
2268 	adev->firmware.gpu_info_fw = NULL;
2269 
2270 	if (adev->mman.discovery_bin)
2271 		return 0;
2272 
2273 	switch (adev->asic_type) {
2274 	default:
2275 		return 0;
2276 	case CHIP_VEGA10:
2277 		chip_name = "vega10";
2278 		break;
2279 	case CHIP_VEGA12:
2280 		chip_name = "vega12";
2281 		break;
2282 	case CHIP_RAVEN:
2283 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
2284 			chip_name = "raven2";
2285 		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
2286 			chip_name = "picasso";
2287 		else
2288 			chip_name = "raven";
2289 		break;
2290 	case CHIP_ARCTURUS:
2291 		chip_name = "arcturus";
2292 		break;
2293 	case CHIP_NAVI12:
2294 		chip_name = "navi12";
2295 		break;
2296 	}
2297 
2298 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
2299 	err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, fw_name);
2300 	if (err) {
2301 		dev_err(adev->dev,
2302 			"Failed to get gpu_info firmware \"%s\"\n",
2303 			fw_name);
2304 		goto out;
2305 	}
2306 
2307 	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
2308 	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
2309 
2310 	switch (hdr->version_major) {
2311 	case 1:
2312 	{
2313 		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2314 			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2315 								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2316 
2317 		/*
2318 		 * Should be droped when DAL no longer needs it.
2319 		 */
2320 		if (adev->asic_type == CHIP_NAVI12)
2321 			goto parse_soc_bounding_box;
2322 
2323 		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2324 		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2325 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2326 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2327 		adev->gfx.config.max_texture_channel_caches =
2328 			le32_to_cpu(gpu_info_fw->gc_num_tccs);
2329 		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2330 		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2331 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2332 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2333 		adev->gfx.config.double_offchip_lds_buf =
2334 			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2335 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2336 		adev->gfx.cu_info.max_waves_per_simd =
2337 			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2338 		adev->gfx.cu_info.max_scratch_slots_per_cu =
2339 			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2340 		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2341 		if (hdr->version_minor >= 1) {
2342 			const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2343 				(const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2344 									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2345 			adev->gfx.config.num_sc_per_sh =
2346 				le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2347 			adev->gfx.config.num_packer_per_sc =
2348 				le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2349 		}
2350 
2351 parse_soc_bounding_box:
2352 		/*
2353 		 * soc bounding box info is not integrated in disocovery table,
2354 		 * we always need to parse it from gpu info firmware if needed.
2355 		 */
2356 		if (hdr->version_minor == 2) {
2357 			const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2358 				(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2359 									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2360 			adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2361 		}
2362 		break;
2363 	}
2364 	default:
2365 		dev_err(adev->dev,
2366 			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2367 		err = -EINVAL;
2368 		goto out;
2369 	}
2370 out:
2371 	return err;
2372 }
2373 
2374 /**
2375  * amdgpu_device_ip_early_init - run early init for hardware IPs
2376  *
2377  * @adev: amdgpu_device pointer
2378  *
2379  * Early initialization pass for hardware IPs.  The hardware IPs that make
2380  * up each asic are discovered each IP's early_init callback is run.  This
2381  * is the first stage in initializing the asic.
2382  * Returns 0 on success, negative error code on failure.
2383  */
2384 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2385 {
2386 	struct pci_dev *parent;
2387 	int i, r;
2388 	bool total;
2389 
2390 	amdgpu_device_enable_virtual_display(adev);
2391 
2392 	if (amdgpu_sriov_vf(adev)) {
2393 		r = amdgpu_virt_request_full_gpu(adev, true);
2394 		if (r)
2395 			return r;
2396 	}
2397 
2398 	switch (adev->asic_type) {
2399 #ifdef CONFIG_DRM_AMDGPU_SI
2400 	case CHIP_VERDE:
2401 	case CHIP_TAHITI:
2402 	case CHIP_PITCAIRN:
2403 	case CHIP_OLAND:
2404 	case CHIP_HAINAN:
2405 		adev->family = AMDGPU_FAMILY_SI;
2406 		r = si_set_ip_blocks(adev);
2407 		if (r)
2408 			return r;
2409 		break;
2410 #endif
2411 #ifdef CONFIG_DRM_AMDGPU_CIK
2412 	case CHIP_BONAIRE:
2413 	case CHIP_HAWAII:
2414 	case CHIP_KAVERI:
2415 	case CHIP_KABINI:
2416 	case CHIP_MULLINS:
2417 		if (adev->flags & AMD_IS_APU)
2418 			adev->family = AMDGPU_FAMILY_KV;
2419 		else
2420 			adev->family = AMDGPU_FAMILY_CI;
2421 
2422 		r = cik_set_ip_blocks(adev);
2423 		if (r)
2424 			return r;
2425 		break;
2426 #endif
2427 	case CHIP_TOPAZ:
2428 	case CHIP_TONGA:
2429 	case CHIP_FIJI:
2430 	case CHIP_POLARIS10:
2431 	case CHIP_POLARIS11:
2432 	case CHIP_POLARIS12:
2433 	case CHIP_VEGAM:
2434 	case CHIP_CARRIZO:
2435 	case CHIP_STONEY:
2436 		if (adev->flags & AMD_IS_APU)
2437 			adev->family = AMDGPU_FAMILY_CZ;
2438 		else
2439 			adev->family = AMDGPU_FAMILY_VI;
2440 
2441 		r = vi_set_ip_blocks(adev);
2442 		if (r)
2443 			return r;
2444 		break;
2445 	default:
2446 		r = amdgpu_discovery_set_ip_blocks(adev);
2447 		if (r)
2448 			return r;
2449 		break;
2450 	}
2451 
2452 	if (amdgpu_has_atpx() &&
2453 	    (amdgpu_is_atpx_hybrid() ||
2454 	     amdgpu_has_atpx_dgpu_power_cntl()) &&
2455 	    ((adev->flags & AMD_IS_APU) == 0) &&
2456 	    !dev_is_removable(&adev->pdev->dev))
2457 		adev->flags |= AMD_IS_PX;
2458 
2459 	if (!(adev->flags & AMD_IS_APU)) {
2460 		parent = pcie_find_root_port(adev->pdev);
2461 		adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2462 	}
2463 
2464 
2465 	adev->pm.pp_feature = amdgpu_pp_feature_mask;
2466 	if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2467 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2468 	if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2469 		adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2470 	if (!amdgpu_device_pcie_dynamic_switching_supported(adev))
2471 		adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
2472 
2473 	total = true;
2474 	for (i = 0; i < adev->num_ip_blocks; i++) {
2475 		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2476 			DRM_WARN("disabled ip block: %d <%s>\n",
2477 				  i, adev->ip_blocks[i].version->funcs->name);
2478 			adev->ip_blocks[i].status.valid = false;
2479 		} else {
2480 			if (adev->ip_blocks[i].version->funcs->early_init) {
2481 				r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2482 				if (r == -ENOENT) {
2483 					adev->ip_blocks[i].status.valid = false;
2484 				} else if (r) {
2485 					DRM_ERROR("early_init of IP block <%s> failed %d\n",
2486 						  adev->ip_blocks[i].version->funcs->name, r);
2487 					total = false;
2488 				} else {
2489 					adev->ip_blocks[i].status.valid = true;
2490 				}
2491 			} else {
2492 				adev->ip_blocks[i].status.valid = true;
2493 			}
2494 		}
2495 		/* get the vbios after the asic_funcs are set up */
2496 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2497 			r = amdgpu_device_parse_gpu_info_fw(adev);
2498 			if (r)
2499 				return r;
2500 
2501 			/* Read BIOS */
2502 			if (amdgpu_device_read_bios(adev)) {
2503 				if (!amdgpu_get_bios(adev))
2504 					return -EINVAL;
2505 
2506 				r = amdgpu_atombios_init(adev);
2507 				if (r) {
2508 					dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2509 					amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2510 					return r;
2511 				}
2512 			}
2513 
2514 			/*get pf2vf msg info at it's earliest time*/
2515 			if (amdgpu_sriov_vf(adev))
2516 				amdgpu_virt_init_data_exchange(adev);
2517 
2518 		}
2519 	}
2520 	if (!total)
2521 		return -ENODEV;
2522 
2523 	amdgpu_amdkfd_device_probe(adev);
2524 	adev->cg_flags &= amdgpu_cg_mask;
2525 	adev->pg_flags &= amdgpu_pg_mask;
2526 
2527 	return 0;
2528 }
2529 
2530 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2531 {
2532 	int i, r;
2533 
2534 	for (i = 0; i < adev->num_ip_blocks; i++) {
2535 		if (!adev->ip_blocks[i].status.sw)
2536 			continue;
2537 		if (adev->ip_blocks[i].status.hw)
2538 			continue;
2539 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2540 		    (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2541 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2542 			r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2543 			if (r) {
2544 				DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2545 					  adev->ip_blocks[i].version->funcs->name, r);
2546 				return r;
2547 			}
2548 			adev->ip_blocks[i].status.hw = true;
2549 		}
2550 	}
2551 
2552 	return 0;
2553 }
2554 
2555 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2556 {
2557 	int i, r;
2558 
2559 	for (i = 0; i < adev->num_ip_blocks; i++) {
2560 		if (!adev->ip_blocks[i].status.sw)
2561 			continue;
2562 		if (adev->ip_blocks[i].status.hw)
2563 			continue;
2564 		r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2565 		if (r) {
2566 			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2567 				  adev->ip_blocks[i].version->funcs->name, r);
2568 			return r;
2569 		}
2570 		adev->ip_blocks[i].status.hw = true;
2571 	}
2572 
2573 	return 0;
2574 }
2575 
2576 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2577 {
2578 	int r = 0;
2579 	int i;
2580 	uint32_t smu_version;
2581 
2582 	if (adev->asic_type >= CHIP_VEGA10) {
2583 		for (i = 0; i < adev->num_ip_blocks; i++) {
2584 			if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2585 				continue;
2586 
2587 			if (!adev->ip_blocks[i].status.sw)
2588 				continue;
2589 
2590 			/* no need to do the fw loading again if already done*/
2591 			if (adev->ip_blocks[i].status.hw == true)
2592 				break;
2593 
2594 			if (amdgpu_in_reset(adev) || adev->in_suspend) {
2595 				r = adev->ip_blocks[i].version->funcs->resume(adev);
2596 				if (r) {
2597 					DRM_ERROR("resume of IP block <%s> failed %d\n",
2598 							  adev->ip_blocks[i].version->funcs->name, r);
2599 					return r;
2600 				}
2601 			} else {
2602 				r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2603 				if (r) {
2604 					DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2605 							  adev->ip_blocks[i].version->funcs->name, r);
2606 					return r;
2607 				}
2608 			}
2609 
2610 			adev->ip_blocks[i].status.hw = true;
2611 			break;
2612 		}
2613 	}
2614 
2615 	if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2616 		r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2617 
2618 	return r;
2619 }
2620 
2621 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2622 {
2623 	long timeout;
2624 	int r, i;
2625 
2626 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2627 		struct amdgpu_ring *ring = adev->rings[i];
2628 
2629 		/* No need to setup the GPU scheduler for rings that don't need it */
2630 		if (!ring || ring->no_scheduler)
2631 			continue;
2632 
2633 		switch (ring->funcs->type) {
2634 		case AMDGPU_RING_TYPE_GFX:
2635 			timeout = adev->gfx_timeout;
2636 			break;
2637 		case AMDGPU_RING_TYPE_COMPUTE:
2638 			timeout = adev->compute_timeout;
2639 			break;
2640 		case AMDGPU_RING_TYPE_SDMA:
2641 			timeout = adev->sdma_timeout;
2642 			break;
2643 		default:
2644 			timeout = adev->video_timeout;
2645 			break;
2646 		}
2647 
2648 		r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, NULL,
2649 				   DRM_SCHED_PRIORITY_COUNT,
2650 				   ring->num_hw_submission, 0,
2651 				   timeout, adev->reset_domain->wq,
2652 				   ring->sched_score, ring->name,
2653 				   adev->dev);
2654 		if (r) {
2655 			DRM_ERROR("Failed to create scheduler on ring %s.\n",
2656 				  ring->name);
2657 			return r;
2658 		}
2659 		r = amdgpu_uvd_entity_init(adev, ring);
2660 		if (r) {
2661 			DRM_ERROR("Failed to create UVD scheduling entity on ring %s.\n",
2662 				  ring->name);
2663 			return r;
2664 		}
2665 		r = amdgpu_vce_entity_init(adev, ring);
2666 		if (r) {
2667 			DRM_ERROR("Failed to create VCE scheduling entity on ring %s.\n",
2668 				  ring->name);
2669 			return r;
2670 		}
2671 	}
2672 
2673 	amdgpu_xcp_update_partition_sched_list(adev);
2674 
2675 	return 0;
2676 }
2677 
2678 
2679 /**
2680  * amdgpu_device_ip_init - run init for hardware IPs
2681  *
2682  * @adev: amdgpu_device pointer
2683  *
2684  * Main initialization pass for hardware IPs.  The list of all the hardware
2685  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2686  * are run.  sw_init initializes the software state associated with each IP
2687  * and hw_init initializes the hardware associated with each IP.
2688  * Returns 0 on success, negative error code on failure.
2689  */
2690 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2691 {
2692 	int i, r;
2693 
2694 	r = amdgpu_ras_init(adev);
2695 	if (r)
2696 		return r;
2697 
2698 	for (i = 0; i < adev->num_ip_blocks; i++) {
2699 		if (!adev->ip_blocks[i].status.valid)
2700 			continue;
2701 		r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2702 		if (r) {
2703 			DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2704 				  adev->ip_blocks[i].version->funcs->name, r);
2705 			goto init_failed;
2706 		}
2707 		adev->ip_blocks[i].status.sw = true;
2708 
2709 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2710 			/* need to do common hw init early so everything is set up for gmc */
2711 			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2712 			if (r) {
2713 				DRM_ERROR("hw_init %d failed %d\n", i, r);
2714 				goto init_failed;
2715 			}
2716 			adev->ip_blocks[i].status.hw = true;
2717 		} else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2718 			/* need to do gmc hw init early so we can allocate gpu mem */
2719 			/* Try to reserve bad pages early */
2720 			if (amdgpu_sriov_vf(adev))
2721 				amdgpu_virt_exchange_data(adev);
2722 
2723 			r = amdgpu_device_mem_scratch_init(adev);
2724 			if (r) {
2725 				DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
2726 				goto init_failed;
2727 			}
2728 			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2729 			if (r) {
2730 				DRM_ERROR("hw_init %d failed %d\n", i, r);
2731 				goto init_failed;
2732 			}
2733 			r = amdgpu_device_wb_init(adev);
2734 			if (r) {
2735 				DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2736 				goto init_failed;
2737 			}
2738 			adev->ip_blocks[i].status.hw = true;
2739 
2740 			/* right after GMC hw init, we create CSA */
2741 			if (adev->gfx.mcbp) {
2742 				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2743 							       AMDGPU_GEM_DOMAIN_VRAM |
2744 							       AMDGPU_GEM_DOMAIN_GTT,
2745 							       AMDGPU_CSA_SIZE);
2746 				if (r) {
2747 					DRM_ERROR("allocate CSA failed %d\n", r);
2748 					goto init_failed;
2749 				}
2750 			}
2751 
2752 			r = amdgpu_seq64_init(adev);
2753 			if (r) {
2754 				DRM_ERROR("allocate seq64 failed %d\n", r);
2755 				goto init_failed;
2756 			}
2757 		}
2758 	}
2759 
2760 	if (amdgpu_sriov_vf(adev))
2761 		amdgpu_virt_init_data_exchange(adev);
2762 
2763 	r = amdgpu_ib_pool_init(adev);
2764 	if (r) {
2765 		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2766 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2767 		goto init_failed;
2768 	}
2769 
2770 	r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2771 	if (r)
2772 		goto init_failed;
2773 
2774 	r = amdgpu_device_ip_hw_init_phase1(adev);
2775 	if (r)
2776 		goto init_failed;
2777 
2778 	r = amdgpu_device_fw_loading(adev);
2779 	if (r)
2780 		goto init_failed;
2781 
2782 	r = amdgpu_device_ip_hw_init_phase2(adev);
2783 	if (r)
2784 		goto init_failed;
2785 
2786 	/*
2787 	 * retired pages will be loaded from eeprom and reserved here,
2788 	 * it should be called after amdgpu_device_ip_hw_init_phase2  since
2789 	 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2790 	 * for I2C communication which only true at this point.
2791 	 *
2792 	 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2793 	 * failure from bad gpu situation and stop amdgpu init process
2794 	 * accordingly. For other failed cases, it will still release all
2795 	 * the resource and print error message, rather than returning one
2796 	 * negative value to upper level.
2797 	 *
2798 	 * Note: theoretically, this should be called before all vram allocations
2799 	 * to protect retired page from abusing
2800 	 */
2801 	r = amdgpu_ras_recovery_init(adev);
2802 	if (r)
2803 		goto init_failed;
2804 
2805 	/**
2806 	 * In case of XGMI grab extra reference for reset domain for this device
2807 	 */
2808 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2809 		if (amdgpu_xgmi_add_device(adev) == 0) {
2810 			if (!amdgpu_sriov_vf(adev)) {
2811 				struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2812 
2813 				if (WARN_ON(!hive)) {
2814 					r = -ENOENT;
2815 					goto init_failed;
2816 				}
2817 
2818 				if (!hive->reset_domain ||
2819 				    !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2820 					r = -ENOENT;
2821 					amdgpu_put_xgmi_hive(hive);
2822 					goto init_failed;
2823 				}
2824 
2825 				/* Drop the early temporary reset domain we created for device */
2826 				amdgpu_reset_put_reset_domain(adev->reset_domain);
2827 				adev->reset_domain = hive->reset_domain;
2828 				amdgpu_put_xgmi_hive(hive);
2829 			}
2830 		}
2831 	}
2832 
2833 	r = amdgpu_device_init_schedulers(adev);
2834 	if (r)
2835 		goto init_failed;
2836 
2837 	if (adev->mman.buffer_funcs_ring->sched.ready)
2838 		amdgpu_ttm_set_buffer_funcs_status(adev, true);
2839 
2840 	/* Don't init kfd if whole hive need to be reset during init */
2841 	if (!adev->gmc.xgmi.pending_reset) {
2842 		kgd2kfd_init_zone_device(adev);
2843 		amdgpu_amdkfd_device_init(adev);
2844 	}
2845 
2846 	amdgpu_fru_get_product_info(adev);
2847 
2848 init_failed:
2849 
2850 	return r;
2851 }
2852 
2853 /**
2854  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2855  *
2856  * @adev: amdgpu_device pointer
2857  *
2858  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2859  * this function before a GPU reset.  If the value is retained after a
2860  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2861  */
2862 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2863 {
2864 	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2865 }
2866 
2867 /**
2868  * amdgpu_device_check_vram_lost - check if vram is valid
2869  *
2870  * @adev: amdgpu_device pointer
2871  *
2872  * Checks the reset magic value written to the gart pointer in VRAM.
2873  * The driver calls this after a GPU reset to see if the contents of
2874  * VRAM is lost or now.
2875  * returns true if vram is lost, false if not.
2876  */
2877 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2878 {
2879 	if (memcmp(adev->gart.ptr, adev->reset_magic,
2880 			AMDGPU_RESET_MAGIC_NUM))
2881 		return true;
2882 
2883 	if (!amdgpu_in_reset(adev))
2884 		return false;
2885 
2886 	/*
2887 	 * For all ASICs with baco/mode1 reset, the VRAM is
2888 	 * always assumed to be lost.
2889 	 */
2890 	switch (amdgpu_asic_reset_method(adev)) {
2891 	case AMD_RESET_METHOD_BACO:
2892 	case AMD_RESET_METHOD_MODE1:
2893 		return true;
2894 	default:
2895 		return false;
2896 	}
2897 }
2898 
2899 /**
2900  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2901  *
2902  * @adev: amdgpu_device pointer
2903  * @state: clockgating state (gate or ungate)
2904  *
2905  * The list of all the hardware IPs that make up the asic is walked and the
2906  * set_clockgating_state callbacks are run.
2907  * Late initialization pass enabling clockgating for hardware IPs.
2908  * Fini or suspend, pass disabling clockgating for hardware IPs.
2909  * Returns 0 on success, negative error code on failure.
2910  */
2911 
2912 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2913 			       enum amd_clockgating_state state)
2914 {
2915 	int i, j, r;
2916 
2917 	if (amdgpu_emu_mode == 1)
2918 		return 0;
2919 
2920 	for (j = 0; j < adev->num_ip_blocks; j++) {
2921 		i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2922 		if (!adev->ip_blocks[i].status.late_initialized)
2923 			continue;
2924 		/* skip CG for GFX, SDMA on S0ix */
2925 		if (adev->in_s0ix &&
2926 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2927 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2928 			continue;
2929 		/* skip CG for VCE/UVD, it's handled specially */
2930 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2931 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2932 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2933 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2934 		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2935 			/* enable clockgating to save power */
2936 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2937 										     state);
2938 			if (r) {
2939 				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2940 					  adev->ip_blocks[i].version->funcs->name, r);
2941 				return r;
2942 			}
2943 		}
2944 	}
2945 
2946 	return 0;
2947 }
2948 
2949 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2950 			       enum amd_powergating_state state)
2951 {
2952 	int i, j, r;
2953 
2954 	if (amdgpu_emu_mode == 1)
2955 		return 0;
2956 
2957 	for (j = 0; j < adev->num_ip_blocks; j++) {
2958 		i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2959 		if (!adev->ip_blocks[i].status.late_initialized)
2960 			continue;
2961 		/* skip PG for GFX, SDMA on S0ix */
2962 		if (adev->in_s0ix &&
2963 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2964 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2965 			continue;
2966 		/* skip CG for VCE/UVD, it's handled specially */
2967 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2968 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2969 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2970 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2971 		    adev->ip_blocks[i].version->funcs->set_powergating_state) {
2972 			/* enable powergating to save power */
2973 			r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2974 											state);
2975 			if (r) {
2976 				DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2977 					  adev->ip_blocks[i].version->funcs->name, r);
2978 				return r;
2979 			}
2980 		}
2981 	}
2982 	return 0;
2983 }
2984 
2985 static int amdgpu_device_enable_mgpu_fan_boost(void)
2986 {
2987 	struct amdgpu_gpu_instance *gpu_ins;
2988 	struct amdgpu_device *adev;
2989 	int i, ret = 0;
2990 
2991 	mutex_lock(&mgpu_info.mutex);
2992 
2993 	/*
2994 	 * MGPU fan boost feature should be enabled
2995 	 * only when there are two or more dGPUs in
2996 	 * the system
2997 	 */
2998 	if (mgpu_info.num_dgpu < 2)
2999 		goto out;
3000 
3001 	for (i = 0; i < mgpu_info.num_dgpu; i++) {
3002 		gpu_ins = &(mgpu_info.gpu_ins[i]);
3003 		adev = gpu_ins->adev;
3004 		if (!(adev->flags & AMD_IS_APU) &&
3005 		    !gpu_ins->mgpu_fan_enabled) {
3006 			ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
3007 			if (ret)
3008 				break;
3009 
3010 			gpu_ins->mgpu_fan_enabled = 1;
3011 		}
3012 	}
3013 
3014 out:
3015 	mutex_unlock(&mgpu_info.mutex);
3016 
3017 	return ret;
3018 }
3019 
3020 /**
3021  * amdgpu_device_ip_late_init - run late init for hardware IPs
3022  *
3023  * @adev: amdgpu_device pointer
3024  *
3025  * Late initialization pass for hardware IPs.  The list of all the hardware
3026  * IPs that make up the asic is walked and the late_init callbacks are run.
3027  * late_init covers any special initialization that an IP requires
3028  * after all of the have been initialized or something that needs to happen
3029  * late in the init process.
3030  * Returns 0 on success, negative error code on failure.
3031  */
3032 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
3033 {
3034 	struct amdgpu_gpu_instance *gpu_instance;
3035 	int i = 0, r;
3036 
3037 	for (i = 0; i < adev->num_ip_blocks; i++) {
3038 		if (!adev->ip_blocks[i].status.hw)
3039 			continue;
3040 		if (adev->ip_blocks[i].version->funcs->late_init) {
3041 			r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
3042 			if (r) {
3043 				DRM_ERROR("late_init of IP block <%s> failed %d\n",
3044 					  adev->ip_blocks[i].version->funcs->name, r);
3045 				return r;
3046 			}
3047 		}
3048 		adev->ip_blocks[i].status.late_initialized = true;
3049 	}
3050 
3051 	r = amdgpu_ras_late_init(adev);
3052 	if (r) {
3053 		DRM_ERROR("amdgpu_ras_late_init failed %d", r);
3054 		return r;
3055 	}
3056 
3057 	amdgpu_ras_set_error_query_ready(adev, true);
3058 
3059 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
3060 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
3061 
3062 	amdgpu_device_fill_reset_magic(adev);
3063 
3064 	r = amdgpu_device_enable_mgpu_fan_boost();
3065 	if (r)
3066 		DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
3067 
3068 	/* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
3069 	if (amdgpu_passthrough(adev) &&
3070 	    ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) ||
3071 	     adev->asic_type == CHIP_ALDEBARAN))
3072 		amdgpu_dpm_handle_passthrough_sbr(adev, true);
3073 
3074 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
3075 		mutex_lock(&mgpu_info.mutex);
3076 
3077 		/*
3078 		 * Reset device p-state to low as this was booted with high.
3079 		 *
3080 		 * This should be performed only after all devices from the same
3081 		 * hive get initialized.
3082 		 *
3083 		 * However, it's unknown how many device in the hive in advance.
3084 		 * As this is counted one by one during devices initializations.
3085 		 *
3086 		 * So, we wait for all XGMI interlinked devices initialized.
3087 		 * This may bring some delays as those devices may come from
3088 		 * different hives. But that should be OK.
3089 		 */
3090 		if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
3091 			for (i = 0; i < mgpu_info.num_gpu; i++) {
3092 				gpu_instance = &(mgpu_info.gpu_ins[i]);
3093 				if (gpu_instance->adev->flags & AMD_IS_APU)
3094 					continue;
3095 
3096 				r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
3097 						AMDGPU_XGMI_PSTATE_MIN);
3098 				if (r) {
3099 					DRM_ERROR("pstate setting failed (%d).\n", r);
3100 					break;
3101 				}
3102 			}
3103 		}
3104 
3105 		mutex_unlock(&mgpu_info.mutex);
3106 	}
3107 
3108 	return 0;
3109 }
3110 
3111 /**
3112  * amdgpu_device_smu_fini_early - smu hw_fini wrapper
3113  *
3114  * @adev: amdgpu_device pointer
3115  *
3116  * For ASICs need to disable SMC first
3117  */
3118 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
3119 {
3120 	int i, r;
3121 
3122 	if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))
3123 		return;
3124 
3125 	for (i = 0; i < adev->num_ip_blocks; i++) {
3126 		if (!adev->ip_blocks[i].status.hw)
3127 			continue;
3128 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3129 			r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3130 			/* XXX handle errors */
3131 			if (r) {
3132 				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
3133 					  adev->ip_blocks[i].version->funcs->name, r);
3134 			}
3135 			adev->ip_blocks[i].status.hw = false;
3136 			break;
3137 		}
3138 	}
3139 }
3140 
3141 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
3142 {
3143 	int i, r;
3144 
3145 	for (i = 0; i < adev->num_ip_blocks; i++) {
3146 		if (!adev->ip_blocks[i].version->funcs->early_fini)
3147 			continue;
3148 
3149 		r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
3150 		if (r) {
3151 			DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
3152 				  adev->ip_blocks[i].version->funcs->name, r);
3153 		}
3154 	}
3155 
3156 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
3157 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
3158 
3159 	amdgpu_amdkfd_suspend(adev, false);
3160 
3161 	/* Workaroud for ASICs need to disable SMC first */
3162 	amdgpu_device_smu_fini_early(adev);
3163 
3164 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3165 		if (!adev->ip_blocks[i].status.hw)
3166 			continue;
3167 
3168 		r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
3169 		/* XXX handle errors */
3170 		if (r) {
3171 			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
3172 				  adev->ip_blocks[i].version->funcs->name, r);
3173 		}
3174 
3175 		adev->ip_blocks[i].status.hw = false;
3176 	}
3177 
3178 	if (amdgpu_sriov_vf(adev)) {
3179 		if (amdgpu_virt_release_full_gpu(adev, false))
3180 			DRM_ERROR("failed to release exclusive mode on fini\n");
3181 	}
3182 
3183 	return 0;
3184 }
3185 
3186 /**
3187  * amdgpu_device_ip_fini - run fini for hardware IPs
3188  *
3189  * @adev: amdgpu_device pointer
3190  *
3191  * Main teardown pass for hardware IPs.  The list of all the hardware
3192  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
3193  * are run.  hw_fini tears down the hardware associated with each IP
3194  * and sw_fini tears down any software state associated with each IP.
3195  * Returns 0 on success, negative error code on failure.
3196  */
3197 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
3198 {
3199 	int i, r;
3200 
3201 	if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
3202 		amdgpu_virt_release_ras_err_handler_data(adev);
3203 
3204 	if (adev->gmc.xgmi.num_physical_nodes > 1)
3205 		amdgpu_xgmi_remove_device(adev);
3206 
3207 	amdgpu_amdkfd_device_fini_sw(adev);
3208 
3209 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3210 		if (!adev->ip_blocks[i].status.sw)
3211 			continue;
3212 
3213 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
3214 			amdgpu_ucode_free_bo(adev);
3215 			amdgpu_free_static_csa(&adev->virt.csa_obj);
3216 			amdgpu_device_wb_fini(adev);
3217 			amdgpu_device_mem_scratch_fini(adev);
3218 			amdgpu_ib_pool_fini(adev);
3219 			amdgpu_seq64_fini(adev);
3220 		}
3221 
3222 		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
3223 		/* XXX handle errors */
3224 		if (r) {
3225 			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
3226 				  adev->ip_blocks[i].version->funcs->name, r);
3227 		}
3228 		adev->ip_blocks[i].status.sw = false;
3229 		adev->ip_blocks[i].status.valid = false;
3230 	}
3231 
3232 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3233 		if (!adev->ip_blocks[i].status.late_initialized)
3234 			continue;
3235 		if (adev->ip_blocks[i].version->funcs->late_fini)
3236 			adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
3237 		adev->ip_blocks[i].status.late_initialized = false;
3238 	}
3239 
3240 	amdgpu_ras_fini(adev);
3241 
3242 	return 0;
3243 }
3244 
3245 /**
3246  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
3247  *
3248  * @work: work_struct.
3249  */
3250 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
3251 {
3252 	struct amdgpu_device *adev =
3253 		container_of(work, struct amdgpu_device, delayed_init_work.work);
3254 	int r;
3255 
3256 	r = amdgpu_ib_ring_tests(adev);
3257 	if (r)
3258 		DRM_ERROR("ib ring test failed (%d).\n", r);
3259 }
3260 
3261 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
3262 {
3263 	struct amdgpu_device *adev =
3264 		container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
3265 
3266 	WARN_ON_ONCE(adev->gfx.gfx_off_state);
3267 	WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
3268 
3269 	if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
3270 		adev->gfx.gfx_off_state = true;
3271 }
3272 
3273 /**
3274  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
3275  *
3276  * @adev: amdgpu_device pointer
3277  *
3278  * Main suspend function for hardware IPs.  The list of all the hardware
3279  * IPs that make up the asic is walked, clockgating is disabled and the
3280  * suspend callbacks are run.  suspend puts the hardware and software state
3281  * in each IP into a state suitable for suspend.
3282  * Returns 0 on success, negative error code on failure.
3283  */
3284 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
3285 {
3286 	int i, r;
3287 
3288 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
3289 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
3290 
3291 	/*
3292 	 * Per PMFW team's suggestion, driver needs to handle gfxoff
3293 	 * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
3294 	 * scenario. Add the missing df cstate disablement here.
3295 	 */
3296 	if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
3297 		dev_warn(adev->dev, "Failed to disallow df cstate");
3298 
3299 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3300 		if (!adev->ip_blocks[i].status.valid)
3301 			continue;
3302 
3303 		/* displays are handled separately */
3304 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
3305 			continue;
3306 
3307 		/* XXX handle errors */
3308 		r = adev->ip_blocks[i].version->funcs->suspend(adev);
3309 		/* XXX handle errors */
3310 		if (r) {
3311 			DRM_ERROR("suspend of IP block <%s> failed %d\n",
3312 				  adev->ip_blocks[i].version->funcs->name, r);
3313 			return r;
3314 		}
3315 
3316 		adev->ip_blocks[i].status.hw = false;
3317 	}
3318 
3319 	return 0;
3320 }
3321 
3322 /**
3323  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
3324  *
3325  * @adev: amdgpu_device pointer
3326  *
3327  * Main suspend function for hardware IPs.  The list of all the hardware
3328  * IPs that make up the asic is walked, clockgating is disabled and the
3329  * suspend callbacks are run.  suspend puts the hardware and software state
3330  * in each IP into a state suitable for suspend.
3331  * Returns 0 on success, negative error code on failure.
3332  */
3333 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
3334 {
3335 	int i, r;
3336 
3337 	if (adev->in_s0ix)
3338 		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
3339 
3340 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3341 		if (!adev->ip_blocks[i].status.valid)
3342 			continue;
3343 		/* displays are handled in phase1 */
3344 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
3345 			continue;
3346 		/* PSP lost connection when err_event_athub occurs */
3347 		if (amdgpu_ras_intr_triggered() &&
3348 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3349 			adev->ip_blocks[i].status.hw = false;
3350 			continue;
3351 		}
3352 
3353 		/* skip unnecessary suspend if we do not initialize them yet */
3354 		if (adev->gmc.xgmi.pending_reset &&
3355 		    !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3356 		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
3357 		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3358 		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
3359 			adev->ip_blocks[i].status.hw = false;
3360 			continue;
3361 		}
3362 
3363 		/* skip suspend of gfx/mes and psp for S0ix
3364 		 * gfx is in gfxoff state, so on resume it will exit gfxoff just
3365 		 * like at runtime. PSP is also part of the always on hardware
3366 		 * so no need to suspend it.
3367 		 */
3368 		if (adev->in_s0ix &&
3369 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3370 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3371 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
3372 			continue;
3373 
3374 		/* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
3375 		if (adev->in_s0ix &&
3376 		    (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >=
3377 		     IP_VERSION(5, 0, 0)) &&
3378 		    (adev->ip_blocks[i].version->type ==
3379 		     AMD_IP_BLOCK_TYPE_SDMA))
3380 			continue;
3381 
3382 		/* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot.
3383 		 * These are in TMR, hence are expected to be reused by PSP-TOS to reload
3384 		 * from this location and RLC Autoload automatically also gets loaded
3385 		 * from here based on PMFW -> PSP message during re-init sequence.
3386 		 * Therefore, the psp suspend & resume should be skipped to avoid destroy
3387 		 * the TMR and reload FWs again for IMU enabled APU ASICs.
3388 		 */
3389 		if (amdgpu_in_reset(adev) &&
3390 		    (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
3391 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3392 			continue;
3393 
3394 		/* XXX handle errors */
3395 		r = adev->ip_blocks[i].version->funcs->suspend(adev);
3396 		/* XXX handle errors */
3397 		if (r) {
3398 			DRM_ERROR("suspend of IP block <%s> failed %d\n",
3399 				  adev->ip_blocks[i].version->funcs->name, r);
3400 		}
3401 		adev->ip_blocks[i].status.hw = false;
3402 		/* handle putting the SMC in the appropriate state */
3403 		if (!amdgpu_sriov_vf(adev)) {
3404 			if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3405 				r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3406 				if (r) {
3407 					DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3408 							adev->mp1_state, r);
3409 					return r;
3410 				}
3411 			}
3412 		}
3413 	}
3414 
3415 	return 0;
3416 }
3417 
3418 /**
3419  * amdgpu_device_ip_suspend - run suspend for hardware IPs
3420  *
3421  * @adev: amdgpu_device pointer
3422  *
3423  * Main suspend function for hardware IPs.  The list of all the hardware
3424  * IPs that make up the asic is walked, clockgating is disabled and the
3425  * suspend callbacks are run.  suspend puts the hardware and software state
3426  * in each IP into a state suitable for suspend.
3427  * Returns 0 on success, negative error code on failure.
3428  */
3429 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3430 {
3431 	int r;
3432 
3433 	if (amdgpu_sriov_vf(adev)) {
3434 		amdgpu_virt_fini_data_exchange(adev);
3435 		amdgpu_virt_request_full_gpu(adev, false);
3436 	}
3437 
3438 	amdgpu_ttm_set_buffer_funcs_status(adev, false);
3439 
3440 	r = amdgpu_device_ip_suspend_phase1(adev);
3441 	if (r)
3442 		return r;
3443 	r = amdgpu_device_ip_suspend_phase2(adev);
3444 
3445 	if (amdgpu_sriov_vf(adev))
3446 		amdgpu_virt_release_full_gpu(adev, false);
3447 
3448 	return r;
3449 }
3450 
3451 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3452 {
3453 	int i, r;
3454 
3455 	static enum amd_ip_block_type ip_order[] = {
3456 		AMD_IP_BLOCK_TYPE_COMMON,
3457 		AMD_IP_BLOCK_TYPE_GMC,
3458 		AMD_IP_BLOCK_TYPE_PSP,
3459 		AMD_IP_BLOCK_TYPE_IH,
3460 	};
3461 
3462 	for (i = 0; i < adev->num_ip_blocks; i++) {
3463 		int j;
3464 		struct amdgpu_ip_block *block;
3465 
3466 		block = &adev->ip_blocks[i];
3467 		block->status.hw = false;
3468 
3469 		for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3470 
3471 			if (block->version->type != ip_order[j] ||
3472 				!block->status.valid)
3473 				continue;
3474 
3475 			r = block->version->funcs->hw_init(adev);
3476 			DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3477 			if (r)
3478 				return r;
3479 			block->status.hw = true;
3480 		}
3481 	}
3482 
3483 	return 0;
3484 }
3485 
3486 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3487 {
3488 	int i, r;
3489 
3490 	static enum amd_ip_block_type ip_order[] = {
3491 		AMD_IP_BLOCK_TYPE_SMC,
3492 		AMD_IP_BLOCK_TYPE_DCE,
3493 		AMD_IP_BLOCK_TYPE_GFX,
3494 		AMD_IP_BLOCK_TYPE_SDMA,
3495 		AMD_IP_BLOCK_TYPE_MES,
3496 		AMD_IP_BLOCK_TYPE_UVD,
3497 		AMD_IP_BLOCK_TYPE_VCE,
3498 		AMD_IP_BLOCK_TYPE_VCN,
3499 		AMD_IP_BLOCK_TYPE_JPEG
3500 	};
3501 
3502 	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3503 		int j;
3504 		struct amdgpu_ip_block *block;
3505 
3506 		for (j = 0; j < adev->num_ip_blocks; j++) {
3507 			block = &adev->ip_blocks[j];
3508 
3509 			if (block->version->type != ip_order[i] ||
3510 				!block->status.valid ||
3511 				block->status.hw)
3512 				continue;
3513 
3514 			if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3515 				r = block->version->funcs->resume(adev);
3516 			else
3517 				r = block->version->funcs->hw_init(adev);
3518 
3519 			DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3520 			if (r)
3521 				return r;
3522 			block->status.hw = true;
3523 		}
3524 	}
3525 
3526 	return 0;
3527 }
3528 
3529 /**
3530  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3531  *
3532  * @adev: amdgpu_device pointer
3533  *
3534  * First resume function for hardware IPs.  The list of all the hardware
3535  * IPs that make up the asic is walked and the resume callbacks are run for
3536  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3537  * after a suspend and updates the software state as necessary.  This
3538  * function is also used for restoring the GPU after a GPU reset.
3539  * Returns 0 on success, negative error code on failure.
3540  */
3541 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3542 {
3543 	int i, r;
3544 
3545 	for (i = 0; i < adev->num_ip_blocks; i++) {
3546 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3547 			continue;
3548 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3549 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3550 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3551 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3552 
3553 			r = adev->ip_blocks[i].version->funcs->resume(adev);
3554 			if (r) {
3555 				DRM_ERROR("resume of IP block <%s> failed %d\n",
3556 					  adev->ip_blocks[i].version->funcs->name, r);
3557 				return r;
3558 			}
3559 			adev->ip_blocks[i].status.hw = true;
3560 		}
3561 	}
3562 
3563 	return 0;
3564 }
3565 
3566 /**
3567  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3568  *
3569  * @adev: amdgpu_device pointer
3570  *
3571  * First resume function for hardware IPs.  The list of all the hardware
3572  * IPs that make up the asic is walked and the resume callbacks are run for
3573  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3574  * functional state after a suspend and updates the software state as
3575  * necessary.  This function is also used for restoring the GPU after a GPU
3576  * reset.
3577  * Returns 0 on success, negative error code on failure.
3578  */
3579 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3580 {
3581 	int i, r;
3582 
3583 	for (i = 0; i < adev->num_ip_blocks; i++) {
3584 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3585 			continue;
3586 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3587 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3588 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3589 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3590 			continue;
3591 		r = adev->ip_blocks[i].version->funcs->resume(adev);
3592 		if (r) {
3593 			DRM_ERROR("resume of IP block <%s> failed %d\n",
3594 				  adev->ip_blocks[i].version->funcs->name, r);
3595 			return r;
3596 		}
3597 		adev->ip_blocks[i].status.hw = true;
3598 	}
3599 
3600 	return 0;
3601 }
3602 
3603 /**
3604  * amdgpu_device_ip_resume - run resume for hardware IPs
3605  *
3606  * @adev: amdgpu_device pointer
3607  *
3608  * Main resume function for hardware IPs.  The hardware IPs
3609  * are split into two resume functions because they are
3610  * also used in recovering from a GPU reset and some additional
3611  * steps need to be take between them.  In this case (S3/S4) they are
3612  * run sequentially.
3613  * Returns 0 on success, negative error code on failure.
3614  */
3615 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3616 {
3617 	int r;
3618 
3619 	r = amdgpu_device_ip_resume_phase1(adev);
3620 	if (r)
3621 		return r;
3622 
3623 	r = amdgpu_device_fw_loading(adev);
3624 	if (r)
3625 		return r;
3626 
3627 	r = amdgpu_device_ip_resume_phase2(adev);
3628 
3629 	if (adev->mman.buffer_funcs_ring->sched.ready)
3630 		amdgpu_ttm_set_buffer_funcs_status(adev, true);
3631 
3632 	return r;
3633 }
3634 
3635 /**
3636  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3637  *
3638  * @adev: amdgpu_device pointer
3639  *
3640  * Query the VBIOS data tables to determine if the board supports SR-IOV.
3641  */
3642 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3643 {
3644 	if (amdgpu_sriov_vf(adev)) {
3645 		if (adev->is_atom_fw) {
3646 			if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3647 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3648 		} else {
3649 			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3650 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3651 		}
3652 
3653 		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3654 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3655 	}
3656 }
3657 
3658 /**
3659  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3660  *
3661  * @asic_type: AMD asic type
3662  *
3663  * Check if there is DC (new modesetting infrastructre) support for an asic.
3664  * returns true if DC has support, false if not.
3665  */
3666 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3667 {
3668 	switch (asic_type) {
3669 #ifdef CONFIG_DRM_AMDGPU_SI
3670 	case CHIP_HAINAN:
3671 #endif
3672 	case CHIP_TOPAZ:
3673 		/* chips with no display hardware */
3674 		return false;
3675 #if defined(CONFIG_DRM_AMD_DC)
3676 	case CHIP_TAHITI:
3677 	case CHIP_PITCAIRN:
3678 	case CHIP_VERDE:
3679 	case CHIP_OLAND:
3680 		/*
3681 		 * We have systems in the wild with these ASICs that require
3682 		 * LVDS and VGA support which is not supported with DC.
3683 		 *
3684 		 * Fallback to the non-DC driver here by default so as not to
3685 		 * cause regressions.
3686 		 */
3687 #if defined(CONFIG_DRM_AMD_DC_SI)
3688 		return amdgpu_dc > 0;
3689 #else
3690 		return false;
3691 #endif
3692 	case CHIP_BONAIRE:
3693 	case CHIP_KAVERI:
3694 	case CHIP_KABINI:
3695 	case CHIP_MULLINS:
3696 		/*
3697 		 * We have systems in the wild with these ASICs that require
3698 		 * VGA support which is not supported with DC.
3699 		 *
3700 		 * Fallback to the non-DC driver here by default so as not to
3701 		 * cause regressions.
3702 		 */
3703 		return amdgpu_dc > 0;
3704 	default:
3705 		return amdgpu_dc != 0;
3706 #else
3707 	default:
3708 		if (amdgpu_dc > 0)
3709 			DRM_INFO_ONCE("Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
3710 		return false;
3711 #endif
3712 	}
3713 }
3714 
3715 /**
3716  * amdgpu_device_has_dc_support - check if dc is supported
3717  *
3718  * @adev: amdgpu_device pointer
3719  *
3720  * Returns true for supported, false for not supported
3721  */
3722 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3723 {
3724 	if (adev->enable_virtual_display ||
3725 	    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3726 		return false;
3727 
3728 	return amdgpu_device_asic_has_dc_support(adev->asic_type);
3729 }
3730 
3731 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3732 {
3733 	struct amdgpu_device *adev =
3734 		container_of(__work, struct amdgpu_device, xgmi_reset_work);
3735 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3736 
3737 	/* It's a bug to not have a hive within this function */
3738 	if (WARN_ON(!hive))
3739 		return;
3740 
3741 	/*
3742 	 * Use task barrier to synchronize all xgmi reset works across the
3743 	 * hive. task_barrier_enter and task_barrier_exit will block
3744 	 * until all the threads running the xgmi reset works reach
3745 	 * those points. task_barrier_full will do both blocks.
3746 	 */
3747 	if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3748 
3749 		task_barrier_enter(&hive->tb);
3750 		adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3751 
3752 		if (adev->asic_reset_res)
3753 			goto fail;
3754 
3755 		task_barrier_exit(&hive->tb);
3756 		adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3757 
3758 		if (adev->asic_reset_res)
3759 			goto fail;
3760 
3761 		amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB);
3762 	} else {
3763 
3764 		task_barrier_full(&hive->tb);
3765 		adev->asic_reset_res =  amdgpu_asic_reset(adev);
3766 	}
3767 
3768 fail:
3769 	if (adev->asic_reset_res)
3770 		DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3771 			 adev->asic_reset_res, adev_to_drm(adev)->unique);
3772 	amdgpu_put_xgmi_hive(hive);
3773 }
3774 
3775 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3776 {
3777 	char *input = amdgpu_lockup_timeout;
3778 	char *timeout_setting = NULL;
3779 	int index = 0;
3780 	long timeout;
3781 	int ret = 0;
3782 
3783 	/*
3784 	 * By default timeout for non compute jobs is 10000
3785 	 * and 60000 for compute jobs.
3786 	 * In SR-IOV or passthrough mode, timeout for compute
3787 	 * jobs are 60000 by default.
3788 	 */
3789 	adev->gfx_timeout = msecs_to_jiffies(10000);
3790 	adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3791 	if (amdgpu_sriov_vf(adev))
3792 		adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3793 					msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3794 	else
3795 		adev->compute_timeout =  msecs_to_jiffies(60000);
3796 
3797 	if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3798 		while ((timeout_setting = strsep(&input, ",")) &&
3799 				strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3800 			ret = kstrtol(timeout_setting, 0, &timeout);
3801 			if (ret)
3802 				return ret;
3803 
3804 			if (timeout == 0) {
3805 				index++;
3806 				continue;
3807 			} else if (timeout < 0) {
3808 				timeout = MAX_SCHEDULE_TIMEOUT;
3809 				dev_warn(adev->dev, "lockup timeout disabled");
3810 				add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3811 			} else {
3812 				timeout = msecs_to_jiffies(timeout);
3813 			}
3814 
3815 			switch (index++) {
3816 			case 0:
3817 				adev->gfx_timeout = timeout;
3818 				break;
3819 			case 1:
3820 				adev->compute_timeout = timeout;
3821 				break;
3822 			case 2:
3823 				adev->sdma_timeout = timeout;
3824 				break;
3825 			case 3:
3826 				adev->video_timeout = timeout;
3827 				break;
3828 			default:
3829 				break;
3830 			}
3831 		}
3832 		/*
3833 		 * There is only one value specified and
3834 		 * it should apply to all non-compute jobs.
3835 		 */
3836 		if (index == 1) {
3837 			adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3838 			if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3839 				adev->compute_timeout = adev->gfx_timeout;
3840 		}
3841 	}
3842 
3843 	return ret;
3844 }
3845 
3846 /**
3847  * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3848  *
3849  * @adev: amdgpu_device pointer
3850  *
3851  * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3852  */
3853 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3854 {
3855 	struct iommu_domain *domain;
3856 
3857 	domain = iommu_get_domain_for_dev(adev->dev);
3858 	if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3859 		adev->ram_is_direct_mapped = true;
3860 }
3861 
3862 static const struct attribute *amdgpu_dev_attributes[] = {
3863 	&dev_attr_pcie_replay_count.attr,
3864 	NULL
3865 };
3866 
3867 static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
3868 {
3869 	if (amdgpu_mcbp == 1)
3870 		adev->gfx.mcbp = true;
3871 	else if (amdgpu_mcbp == 0)
3872 		adev->gfx.mcbp = false;
3873 
3874 	if (amdgpu_sriov_vf(adev))
3875 		adev->gfx.mcbp = true;
3876 
3877 	if (adev->gfx.mcbp)
3878 		DRM_INFO("MCBP is enabled\n");
3879 }
3880 
3881 /**
3882  * amdgpu_device_init - initialize the driver
3883  *
3884  * @adev: amdgpu_device pointer
3885  * @flags: driver flags
3886  *
3887  * Initializes the driver info and hw (all asics).
3888  * Returns 0 for success or an error on failure.
3889  * Called at driver startup.
3890  */
3891 int amdgpu_device_init(struct amdgpu_device *adev,
3892 		       uint32_t flags)
3893 {
3894 	struct drm_device *ddev = adev_to_drm(adev);
3895 	struct pci_dev *pdev = adev->pdev;
3896 	int r, i;
3897 	bool px = false;
3898 	u32 max_MBps;
3899 	int tmp;
3900 
3901 	adev->shutdown = false;
3902 	adev->flags = flags;
3903 
3904 	if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3905 		adev->asic_type = amdgpu_force_asic_type;
3906 	else
3907 		adev->asic_type = flags & AMD_ASIC_MASK;
3908 
3909 	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3910 	if (amdgpu_emu_mode == 1)
3911 		adev->usec_timeout *= 10;
3912 	adev->gmc.gart_size = 512 * 1024 * 1024;
3913 	adev->accel_working = false;
3914 	adev->num_rings = 0;
3915 	RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3916 	adev->mman.buffer_funcs = NULL;
3917 	adev->mman.buffer_funcs_ring = NULL;
3918 	adev->vm_manager.vm_pte_funcs = NULL;
3919 	adev->vm_manager.vm_pte_num_scheds = 0;
3920 	adev->gmc.gmc_funcs = NULL;
3921 	adev->harvest_ip_mask = 0x0;
3922 	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3923 	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3924 
3925 	adev->smc_rreg = &amdgpu_invalid_rreg;
3926 	adev->smc_wreg = &amdgpu_invalid_wreg;
3927 	adev->pcie_rreg = &amdgpu_invalid_rreg;
3928 	adev->pcie_wreg = &amdgpu_invalid_wreg;
3929 	adev->pcie_rreg_ext = &amdgpu_invalid_rreg_ext;
3930 	adev->pcie_wreg_ext = &amdgpu_invalid_wreg_ext;
3931 	adev->pciep_rreg = &amdgpu_invalid_rreg;
3932 	adev->pciep_wreg = &amdgpu_invalid_wreg;
3933 	adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3934 	adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3935 	adev->pcie_rreg64_ext = &amdgpu_invalid_rreg64_ext;
3936 	adev->pcie_wreg64_ext = &amdgpu_invalid_wreg64_ext;
3937 	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3938 	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3939 	adev->didt_rreg = &amdgpu_invalid_rreg;
3940 	adev->didt_wreg = &amdgpu_invalid_wreg;
3941 	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3942 	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3943 	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3944 	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3945 
3946 	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3947 		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3948 		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3949 
3950 	/* mutex initialization are all done here so we
3951 	 * can recall function without having locking issues
3952 	 */
3953 	mutex_init(&adev->firmware.mutex);
3954 	mutex_init(&adev->pm.mutex);
3955 	mutex_init(&adev->gfx.gpu_clock_mutex);
3956 	mutex_init(&adev->srbm_mutex);
3957 	mutex_init(&adev->gfx.pipe_reserve_mutex);
3958 	mutex_init(&adev->gfx.gfx_off_mutex);
3959 	mutex_init(&adev->gfx.partition_mutex);
3960 	mutex_init(&adev->grbm_idx_mutex);
3961 	mutex_init(&adev->mn_lock);
3962 	mutex_init(&adev->virt.vf_errors.lock);
3963 	hash_init(adev->mn_hash);
3964 	mutex_init(&adev->psp.mutex);
3965 	mutex_init(&adev->notifier_lock);
3966 	mutex_init(&adev->pm.stable_pstate_ctx_lock);
3967 	mutex_init(&adev->benchmark_mutex);
3968 
3969 	amdgpu_device_init_apu_flags(adev);
3970 
3971 	r = amdgpu_device_check_arguments(adev);
3972 	if (r)
3973 		return r;
3974 
3975 	spin_lock_init(&adev->mmio_idx_lock);
3976 	spin_lock_init(&adev->smc_idx_lock);
3977 	spin_lock_init(&adev->pcie_idx_lock);
3978 	spin_lock_init(&adev->uvd_ctx_idx_lock);
3979 	spin_lock_init(&adev->didt_idx_lock);
3980 	spin_lock_init(&adev->gc_cac_idx_lock);
3981 	spin_lock_init(&adev->se_cac_idx_lock);
3982 	spin_lock_init(&adev->audio_endpt_idx_lock);
3983 	spin_lock_init(&adev->mm_stats.lock);
3984 
3985 	INIT_LIST_HEAD(&adev->shadow_list);
3986 	mutex_init(&adev->shadow_list_lock);
3987 
3988 	INIT_LIST_HEAD(&adev->reset_list);
3989 
3990 	INIT_LIST_HEAD(&adev->ras_list);
3991 
3992 	INIT_LIST_HEAD(&adev->pm.od_kobj_list);
3993 
3994 	INIT_DELAYED_WORK(&adev->delayed_init_work,
3995 			  amdgpu_device_delayed_init_work_handler);
3996 	INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3997 			  amdgpu_device_delay_enable_gfx_off);
3998 
3999 	INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
4000 
4001 	adev->gfx.gfx_off_req_count = 1;
4002 	adev->gfx.gfx_off_residency = 0;
4003 	adev->gfx.gfx_off_entrycount = 0;
4004 	adev->pm.ac_power = power_supply_is_system_supplied() > 0;
4005 
4006 	atomic_set(&adev->throttling_logging_enabled, 1);
4007 	/*
4008 	 * If throttling continues, logging will be performed every minute
4009 	 * to avoid log flooding. "-1" is subtracted since the thermal
4010 	 * throttling interrupt comes every second. Thus, the total logging
4011 	 * interval is 59 seconds(retelimited printk interval) + 1(waiting
4012 	 * for throttling interrupt) = 60 seconds.
4013 	 */
4014 	ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
4015 	ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
4016 
4017 	/* Registers mapping */
4018 	/* TODO: block userspace mapping of io register */
4019 	if (adev->asic_type >= CHIP_BONAIRE) {
4020 		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
4021 		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
4022 	} else {
4023 		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
4024 		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
4025 	}
4026 
4027 	for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
4028 		atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
4029 
4030 	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
4031 	if (!adev->rmmio)
4032 		return -ENOMEM;
4033 
4034 	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
4035 	DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size);
4036 
4037 	/*
4038 	 * Reset domain needs to be present early, before XGMI hive discovered
4039 	 * (if any) and intitialized to use reset sem and in_gpu reset flag
4040 	 * early on during init and before calling to RREG32.
4041 	 */
4042 	adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
4043 	if (!adev->reset_domain)
4044 		return -ENOMEM;
4045 
4046 	/* detect hw virtualization here */
4047 	amdgpu_detect_virtualization(adev);
4048 
4049 	amdgpu_device_get_pcie_info(adev);
4050 
4051 	r = amdgpu_aca_init(adev);
4052 	if (r)
4053 		return r;
4054 
4055 	r = amdgpu_device_get_job_timeout_settings(adev);
4056 	if (r) {
4057 		dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
4058 		return r;
4059 	}
4060 
4061 	/* early init functions */
4062 	r = amdgpu_device_ip_early_init(adev);
4063 	if (r)
4064 		return r;
4065 
4066 	amdgpu_device_set_mcbp(adev);
4067 
4068 	/* Get rid of things like offb */
4069 	r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
4070 	if (r)
4071 		return r;
4072 
4073 	/* Enable TMZ based on IP_VERSION */
4074 	amdgpu_gmc_tmz_set(adev);
4075 
4076 	amdgpu_gmc_noretry_set(adev);
4077 	/* Need to get xgmi info early to decide the reset behavior*/
4078 	if (adev->gmc.xgmi.supported) {
4079 		r = adev->gfxhub.funcs->get_xgmi_info(adev);
4080 		if (r)
4081 			return r;
4082 	}
4083 
4084 	/* enable PCIE atomic ops */
4085 	if (amdgpu_sriov_vf(adev)) {
4086 		if (adev->virt.fw_reserve.p_pf2vf)
4087 			adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
4088 						      adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
4089 				(PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
4090 	/* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a
4091 	 * internal path natively support atomics, set have_atomics_support to true.
4092 	 */
4093 	} else if ((adev->flags & AMD_IS_APU) &&
4094 		   (amdgpu_ip_version(adev, GC_HWIP, 0) >
4095 		    IP_VERSION(9, 0, 0))) {
4096 		adev->have_atomics_support = true;
4097 	} else {
4098 		adev->have_atomics_support =
4099 			!pci_enable_atomic_ops_to_root(adev->pdev,
4100 					  PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
4101 					  PCI_EXP_DEVCAP2_ATOMIC_COMP64);
4102 	}
4103 
4104 	if (!adev->have_atomics_support)
4105 		dev_info(adev->dev, "PCIE atomic ops is not supported\n");
4106 
4107 	/* doorbell bar mapping and doorbell index init*/
4108 	amdgpu_doorbell_init(adev);
4109 
4110 	if (amdgpu_emu_mode == 1) {
4111 		/* post the asic on emulation mode */
4112 		emu_soc_asic_init(adev);
4113 		goto fence_driver_init;
4114 	}
4115 
4116 	amdgpu_reset_init(adev);
4117 
4118 	/* detect if we are with an SRIOV vbios */
4119 	if (adev->bios)
4120 		amdgpu_device_detect_sriov_bios(adev);
4121 
4122 	/* check if we need to reset the asic
4123 	 *  E.g., driver was not cleanly unloaded previously, etc.
4124 	 */
4125 	if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
4126 		if (adev->gmc.xgmi.num_physical_nodes) {
4127 			dev_info(adev->dev, "Pending hive reset.\n");
4128 			adev->gmc.xgmi.pending_reset = true;
4129 			/* Only need to init necessary block for SMU to handle the reset */
4130 			for (i = 0; i < adev->num_ip_blocks; i++) {
4131 				if (!adev->ip_blocks[i].status.valid)
4132 					continue;
4133 				if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
4134 				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
4135 				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
4136 				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
4137 					DRM_DEBUG("IP %s disabled for hw_init.\n",
4138 						adev->ip_blocks[i].version->funcs->name);
4139 					adev->ip_blocks[i].status.hw = true;
4140 				}
4141 			}
4142 		} else {
4143 			switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
4144 			case IP_VERSION(13, 0, 0):
4145 			case IP_VERSION(13, 0, 7):
4146 			case IP_VERSION(13, 0, 10):
4147 				r = psp_gpu_reset(adev);
4148 				break;
4149 			default:
4150 				tmp = amdgpu_reset_method;
4151 				/* It should do a default reset when loading or reloading the driver,
4152 				 * regardless of the module parameter reset_method.
4153 				 */
4154 				amdgpu_reset_method = AMD_RESET_METHOD_NONE;
4155 				r = amdgpu_asic_reset(adev);
4156 				amdgpu_reset_method = tmp;
4157 				break;
4158 			}
4159 
4160 			if (r) {
4161 				dev_err(adev->dev, "asic reset on init failed\n");
4162 				goto failed;
4163 			}
4164 		}
4165 	}
4166 
4167 	/* Post card if necessary */
4168 	if (amdgpu_device_need_post(adev)) {
4169 		if (!adev->bios) {
4170 			dev_err(adev->dev, "no vBIOS found\n");
4171 			r = -EINVAL;
4172 			goto failed;
4173 		}
4174 		DRM_INFO("GPU posting now...\n");
4175 		r = amdgpu_device_asic_init(adev);
4176 		if (r) {
4177 			dev_err(adev->dev, "gpu post error!\n");
4178 			goto failed;
4179 		}
4180 	}
4181 
4182 	if (adev->bios) {
4183 		if (adev->is_atom_fw) {
4184 			/* Initialize clocks */
4185 			r = amdgpu_atomfirmware_get_clock_info(adev);
4186 			if (r) {
4187 				dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
4188 				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
4189 				goto failed;
4190 			}
4191 		} else {
4192 			/* Initialize clocks */
4193 			r = amdgpu_atombios_get_clock_info(adev);
4194 			if (r) {
4195 				dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
4196 				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
4197 				goto failed;
4198 			}
4199 			/* init i2c buses */
4200 			if (!amdgpu_device_has_dc_support(adev))
4201 				amdgpu_atombios_i2c_init(adev);
4202 		}
4203 	}
4204 
4205 fence_driver_init:
4206 	/* Fence driver */
4207 	r = amdgpu_fence_driver_sw_init(adev);
4208 	if (r) {
4209 		dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
4210 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
4211 		goto failed;
4212 	}
4213 
4214 	/* init the mode config */
4215 	drm_mode_config_init(adev_to_drm(adev));
4216 
4217 	r = amdgpu_device_ip_init(adev);
4218 	if (r) {
4219 		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
4220 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
4221 		goto release_ras_con;
4222 	}
4223 
4224 	amdgpu_fence_driver_hw_init(adev);
4225 
4226 	dev_info(adev->dev,
4227 		"SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
4228 			adev->gfx.config.max_shader_engines,
4229 			adev->gfx.config.max_sh_per_se,
4230 			adev->gfx.config.max_cu_per_sh,
4231 			adev->gfx.cu_info.number);
4232 
4233 	adev->accel_working = true;
4234 
4235 	amdgpu_vm_check_compute_bug(adev);
4236 
4237 	/* Initialize the buffer migration limit. */
4238 	if (amdgpu_moverate >= 0)
4239 		max_MBps = amdgpu_moverate;
4240 	else
4241 		max_MBps = 8; /* Allow 8 MB/s. */
4242 	/* Get a log2 for easy divisions. */
4243 	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
4244 
4245 	/*
4246 	 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
4247 	 * Otherwise the mgpu fan boost feature will be skipped due to the
4248 	 * gpu instance is counted less.
4249 	 */
4250 	amdgpu_register_gpu_instance(adev);
4251 
4252 	/* enable clockgating, etc. after ib tests, etc. since some blocks require
4253 	 * explicit gating rather than handling it automatically.
4254 	 */
4255 	if (!adev->gmc.xgmi.pending_reset) {
4256 		r = amdgpu_device_ip_late_init(adev);
4257 		if (r) {
4258 			dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
4259 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
4260 			goto release_ras_con;
4261 		}
4262 		/* must succeed. */
4263 		amdgpu_ras_resume(adev);
4264 		queue_delayed_work(system_wq, &adev->delayed_init_work,
4265 				   msecs_to_jiffies(AMDGPU_RESUME_MS));
4266 	}
4267 
4268 	if (amdgpu_sriov_vf(adev)) {
4269 		amdgpu_virt_release_full_gpu(adev, true);
4270 		flush_delayed_work(&adev->delayed_init_work);
4271 	}
4272 
4273 	/*
4274 	 * Place those sysfs registering after `late_init`. As some of those
4275 	 * operations performed in `late_init` might affect the sysfs
4276 	 * interfaces creating.
4277 	 */
4278 	r = amdgpu_atombios_sysfs_init(adev);
4279 	if (r)
4280 		drm_err(&adev->ddev,
4281 			"registering atombios sysfs failed (%d).\n", r);
4282 
4283 	r = amdgpu_pm_sysfs_init(adev);
4284 	if (r)
4285 		DRM_ERROR("registering pm sysfs failed (%d).\n", r);
4286 
4287 	r = amdgpu_ucode_sysfs_init(adev);
4288 	if (r) {
4289 		adev->ucode_sysfs_en = false;
4290 		DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
4291 	} else
4292 		adev->ucode_sysfs_en = true;
4293 
4294 	r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
4295 	if (r)
4296 		dev_err(adev->dev, "Could not create amdgpu device attr\n");
4297 
4298 	r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group);
4299 	if (r)
4300 		dev_err(adev->dev,
4301 			"Could not create amdgpu board attributes\n");
4302 
4303 	amdgpu_fru_sysfs_init(adev);
4304 	amdgpu_reg_state_sysfs_init(adev);
4305 
4306 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
4307 		r = amdgpu_pmu_init(adev);
4308 	if (r)
4309 		dev_err(adev->dev, "amdgpu_pmu_init failed\n");
4310 
4311 	/* Have stored pci confspace at hand for restore in sudden PCI error */
4312 	if (amdgpu_device_cache_pci_state(adev->pdev))
4313 		pci_restore_state(pdev);
4314 
4315 	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
4316 	/* this will fail for cards that aren't VGA class devices, just
4317 	 * ignore it
4318 	 */
4319 	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4320 		vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
4321 
4322 	px = amdgpu_device_supports_px(ddev);
4323 
4324 	if (px || (!dev_is_removable(&adev->pdev->dev) &&
4325 				apple_gmux_detect(NULL, NULL)))
4326 		vga_switcheroo_register_client(adev->pdev,
4327 					       &amdgpu_switcheroo_ops, px);
4328 
4329 	if (px)
4330 		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
4331 
4332 	if (adev->gmc.xgmi.pending_reset)
4333 		queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
4334 				   msecs_to_jiffies(AMDGPU_RESUME_MS));
4335 
4336 	amdgpu_device_check_iommu_direct_map(adev);
4337 
4338 	return 0;
4339 
4340 release_ras_con:
4341 	if (amdgpu_sriov_vf(adev))
4342 		amdgpu_virt_release_full_gpu(adev, true);
4343 
4344 	/* failed in exclusive mode due to timeout */
4345 	if (amdgpu_sriov_vf(adev) &&
4346 		!amdgpu_sriov_runtime(adev) &&
4347 		amdgpu_virt_mmio_blocked(adev) &&
4348 		!amdgpu_virt_wait_reset(adev)) {
4349 		dev_err(adev->dev, "VF exclusive mode timeout\n");
4350 		/* Don't send request since VF is inactive. */
4351 		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
4352 		adev->virt.ops = NULL;
4353 		r = -EAGAIN;
4354 	}
4355 	amdgpu_release_ras_context(adev);
4356 
4357 failed:
4358 	amdgpu_vf_error_trans_all(adev);
4359 
4360 	return r;
4361 }
4362 
4363 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
4364 {
4365 
4366 	/* Clear all CPU mappings pointing to this device */
4367 	unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
4368 
4369 	/* Unmap all mapped bars - Doorbell, registers and VRAM */
4370 	amdgpu_doorbell_fini(adev);
4371 
4372 	iounmap(adev->rmmio);
4373 	adev->rmmio = NULL;
4374 	if (adev->mman.aper_base_kaddr)
4375 		iounmap(adev->mman.aper_base_kaddr);
4376 	adev->mman.aper_base_kaddr = NULL;
4377 
4378 	/* Memory manager related */
4379 	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
4380 		arch_phys_wc_del(adev->gmc.vram_mtrr);
4381 		arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
4382 	}
4383 }
4384 
4385 /**
4386  * amdgpu_device_fini_hw - tear down the driver
4387  *
4388  * @adev: amdgpu_device pointer
4389  *
4390  * Tear down the driver info (all asics).
4391  * Called at driver shutdown.
4392  */
4393 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
4394 {
4395 	dev_info(adev->dev, "amdgpu: finishing device.\n");
4396 	flush_delayed_work(&adev->delayed_init_work);
4397 	adev->shutdown = true;
4398 
4399 	/* make sure IB test finished before entering exclusive mode
4400 	 * to avoid preemption on IB test
4401 	 */
4402 	if (amdgpu_sriov_vf(adev)) {
4403 		amdgpu_virt_request_full_gpu(adev, false);
4404 		amdgpu_virt_fini_data_exchange(adev);
4405 	}
4406 
4407 	/* disable all interrupts */
4408 	amdgpu_irq_disable_all(adev);
4409 	if (adev->mode_info.mode_config_initialized) {
4410 		if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
4411 			drm_helper_force_disable_all(adev_to_drm(adev));
4412 		else
4413 			drm_atomic_helper_shutdown(adev_to_drm(adev));
4414 	}
4415 	amdgpu_fence_driver_hw_fini(adev);
4416 
4417 	if (adev->mman.initialized)
4418 		drain_workqueue(adev->mman.bdev.wq);
4419 
4420 	if (adev->pm.sysfs_initialized)
4421 		amdgpu_pm_sysfs_fini(adev);
4422 	if (adev->ucode_sysfs_en)
4423 		amdgpu_ucode_sysfs_fini(adev);
4424 	sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
4425 	amdgpu_fru_sysfs_fini(adev);
4426 
4427 	amdgpu_reg_state_sysfs_fini(adev);
4428 
4429 	/* disable ras feature must before hw fini */
4430 	amdgpu_ras_pre_fini(adev);
4431 
4432 	amdgpu_ttm_set_buffer_funcs_status(adev, false);
4433 
4434 	amdgpu_device_ip_fini_early(adev);
4435 
4436 	amdgpu_irq_fini_hw(adev);
4437 
4438 	if (adev->mman.initialized)
4439 		ttm_device_clear_dma_mappings(&adev->mman.bdev);
4440 
4441 	amdgpu_gart_dummy_page_fini(adev);
4442 
4443 	if (drm_dev_is_unplugged(adev_to_drm(adev)))
4444 		amdgpu_device_unmap_mmio(adev);
4445 
4446 }
4447 
4448 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4449 {
4450 	int idx;
4451 	bool px;
4452 
4453 	amdgpu_fence_driver_sw_fini(adev);
4454 	amdgpu_device_ip_fini(adev);
4455 	amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
4456 	adev->accel_working = false;
4457 	dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4458 
4459 	amdgpu_reset_fini(adev);
4460 
4461 	amdgpu_aca_fini(adev);
4462 
4463 	/* free i2c buses */
4464 	if (!amdgpu_device_has_dc_support(adev))
4465 		amdgpu_i2c_fini(adev);
4466 
4467 	if (amdgpu_emu_mode != 1)
4468 		amdgpu_atombios_fini(adev);
4469 
4470 	kfree(adev->bios);
4471 	adev->bios = NULL;
4472 
4473 	kfree(adev->fru_info);
4474 	adev->fru_info = NULL;
4475 
4476 	px = amdgpu_device_supports_px(adev_to_drm(adev));
4477 
4478 	if (px || (!dev_is_removable(&adev->pdev->dev) &&
4479 				apple_gmux_detect(NULL, NULL)))
4480 		vga_switcheroo_unregister_client(adev->pdev);
4481 
4482 	if (px)
4483 		vga_switcheroo_fini_domain_pm_ops(adev->dev);
4484 
4485 	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4486 		vga_client_unregister(adev->pdev);
4487 
4488 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4489 
4490 		iounmap(adev->rmmio);
4491 		adev->rmmio = NULL;
4492 		amdgpu_doorbell_fini(adev);
4493 		drm_dev_exit(idx);
4494 	}
4495 
4496 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
4497 		amdgpu_pmu_fini(adev);
4498 	if (adev->mman.discovery_bin)
4499 		amdgpu_discovery_fini(adev);
4500 
4501 	amdgpu_reset_put_reset_domain(adev->reset_domain);
4502 	adev->reset_domain = NULL;
4503 
4504 	kfree(adev->pci_state);
4505 
4506 }
4507 
4508 /**
4509  * amdgpu_device_evict_resources - evict device resources
4510  * @adev: amdgpu device object
4511  *
4512  * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4513  * of the vram memory type. Mainly used for evicting device resources
4514  * at suspend time.
4515  *
4516  */
4517 static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4518 {
4519 	int ret;
4520 
4521 	/* No need to evict vram on APUs for suspend to ram or s2idle */
4522 	if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4523 		return 0;
4524 
4525 	ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4526 	if (ret)
4527 		DRM_WARN("evicting device resources failed\n");
4528 	return ret;
4529 }
4530 
4531 /*
4532  * Suspend & resume.
4533  */
4534 /**
4535  * amdgpu_device_prepare - prepare for device suspend
4536  *
4537  * @dev: drm dev pointer
4538  *
4539  * Prepare to put the hw in the suspend state (all asics).
4540  * Returns 0 for success or an error on failure.
4541  * Called at driver suspend.
4542  */
4543 int amdgpu_device_prepare(struct drm_device *dev)
4544 {
4545 	struct amdgpu_device *adev = drm_to_adev(dev);
4546 	int i, r;
4547 
4548 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4549 		return 0;
4550 
4551 	/* Evict the majority of BOs before starting suspend sequence */
4552 	r = amdgpu_device_evict_resources(adev);
4553 	if (r)
4554 		return r;
4555 
4556 	for (i = 0; i < adev->num_ip_blocks; i++) {
4557 		if (!adev->ip_blocks[i].status.valid)
4558 			continue;
4559 		if (!adev->ip_blocks[i].version->funcs->prepare_suspend)
4560 			continue;
4561 		r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev);
4562 		if (r)
4563 			return r;
4564 	}
4565 
4566 	return 0;
4567 }
4568 
4569 /**
4570  * amdgpu_device_suspend - initiate device suspend
4571  *
4572  * @dev: drm dev pointer
4573  * @fbcon : notify the fbdev of suspend
4574  *
4575  * Puts the hw in the suspend state (all asics).
4576  * Returns 0 for success or an error on failure.
4577  * Called at driver suspend.
4578  */
4579 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4580 {
4581 	struct amdgpu_device *adev = drm_to_adev(dev);
4582 	int r = 0;
4583 
4584 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4585 		return 0;
4586 
4587 	adev->in_suspend = true;
4588 
4589 	if (amdgpu_sriov_vf(adev)) {
4590 		amdgpu_virt_fini_data_exchange(adev);
4591 		r = amdgpu_virt_request_full_gpu(adev, false);
4592 		if (r)
4593 			return r;
4594 	}
4595 
4596 	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4597 		DRM_WARN("smart shift update failed\n");
4598 
4599 	if (fbcon)
4600 		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4601 
4602 	cancel_delayed_work_sync(&adev->delayed_init_work);
4603 	flush_delayed_work(&adev->gfx.gfx_off_delay_work);
4604 
4605 	amdgpu_ras_suspend(adev);
4606 
4607 	amdgpu_device_ip_suspend_phase1(adev);
4608 
4609 	if (!adev->in_s0ix)
4610 		amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4611 
4612 	r = amdgpu_device_evict_resources(adev);
4613 	if (r)
4614 		return r;
4615 
4616 	amdgpu_ttm_set_buffer_funcs_status(adev, false);
4617 
4618 	amdgpu_fence_driver_hw_fini(adev);
4619 
4620 	amdgpu_device_ip_suspend_phase2(adev);
4621 
4622 	if (amdgpu_sriov_vf(adev))
4623 		amdgpu_virt_release_full_gpu(adev, false);
4624 
4625 	r = amdgpu_dpm_notify_rlc_state(adev, false);
4626 	if (r)
4627 		return r;
4628 
4629 	return 0;
4630 }
4631 
4632 /**
4633  * amdgpu_device_resume - initiate device resume
4634  *
4635  * @dev: drm dev pointer
4636  * @fbcon : notify the fbdev of resume
4637  *
4638  * Bring the hw back to operating state (all asics).
4639  * Returns 0 for success or an error on failure.
4640  * Called at driver resume.
4641  */
4642 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4643 {
4644 	struct amdgpu_device *adev = drm_to_adev(dev);
4645 	int r = 0;
4646 
4647 	if (amdgpu_sriov_vf(adev)) {
4648 		r = amdgpu_virt_request_full_gpu(adev, true);
4649 		if (r)
4650 			return r;
4651 	}
4652 
4653 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4654 		return 0;
4655 
4656 	if (adev->in_s0ix)
4657 		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4658 
4659 	/* post card */
4660 	if (amdgpu_device_need_post(adev)) {
4661 		r = amdgpu_device_asic_init(adev);
4662 		if (r)
4663 			dev_err(adev->dev, "amdgpu asic init failed\n");
4664 	}
4665 
4666 	r = amdgpu_device_ip_resume(adev);
4667 
4668 	if (r) {
4669 		dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4670 		goto exit;
4671 	}
4672 	amdgpu_fence_driver_hw_init(adev);
4673 
4674 	if (!adev->in_s0ix) {
4675 		r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4676 		if (r)
4677 			goto exit;
4678 	}
4679 
4680 	r = amdgpu_device_ip_late_init(adev);
4681 	if (r)
4682 		goto exit;
4683 
4684 	queue_delayed_work(system_wq, &adev->delayed_init_work,
4685 			   msecs_to_jiffies(AMDGPU_RESUME_MS));
4686 exit:
4687 	if (amdgpu_sriov_vf(adev)) {
4688 		amdgpu_virt_init_data_exchange(adev);
4689 		amdgpu_virt_release_full_gpu(adev, true);
4690 	}
4691 
4692 	if (r)
4693 		return r;
4694 
4695 	/* Make sure IB tests flushed */
4696 	flush_delayed_work(&adev->delayed_init_work);
4697 
4698 	if (fbcon)
4699 		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4700 
4701 	amdgpu_ras_resume(adev);
4702 
4703 	if (adev->mode_info.num_crtc) {
4704 		/*
4705 		 * Most of the connector probing functions try to acquire runtime pm
4706 		 * refs to ensure that the GPU is powered on when connector polling is
4707 		 * performed. Since we're calling this from a runtime PM callback,
4708 		 * trying to acquire rpm refs will cause us to deadlock.
4709 		 *
4710 		 * Since we're guaranteed to be holding the rpm lock, it's safe to
4711 		 * temporarily disable the rpm helpers so this doesn't deadlock us.
4712 		 */
4713 #ifdef CONFIG_PM
4714 		dev->dev->power.disable_depth++;
4715 #endif
4716 		if (!adev->dc_enabled)
4717 			drm_helper_hpd_irq_event(dev);
4718 		else
4719 			drm_kms_helper_hotplug_event(dev);
4720 #ifdef CONFIG_PM
4721 		dev->dev->power.disable_depth--;
4722 #endif
4723 	}
4724 	adev->in_suspend = false;
4725 
4726 	if (adev->enable_mes)
4727 		amdgpu_mes_self_test(adev);
4728 
4729 	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4730 		DRM_WARN("smart shift update failed\n");
4731 
4732 	return 0;
4733 }
4734 
4735 /**
4736  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4737  *
4738  * @adev: amdgpu_device pointer
4739  *
4740  * The list of all the hardware IPs that make up the asic is walked and
4741  * the check_soft_reset callbacks are run.  check_soft_reset determines
4742  * if the asic is still hung or not.
4743  * Returns true if any of the IPs are still in a hung state, false if not.
4744  */
4745 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4746 {
4747 	int i;
4748 	bool asic_hang = false;
4749 
4750 	if (amdgpu_sriov_vf(adev))
4751 		return true;
4752 
4753 	if (amdgpu_asic_need_full_reset(adev))
4754 		return true;
4755 
4756 	for (i = 0; i < adev->num_ip_blocks; i++) {
4757 		if (!adev->ip_blocks[i].status.valid)
4758 			continue;
4759 		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4760 			adev->ip_blocks[i].status.hang =
4761 				adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4762 		if (adev->ip_blocks[i].status.hang) {
4763 			dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4764 			asic_hang = true;
4765 		}
4766 	}
4767 	return asic_hang;
4768 }
4769 
4770 /**
4771  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4772  *
4773  * @adev: amdgpu_device pointer
4774  *
4775  * The list of all the hardware IPs that make up the asic is walked and the
4776  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4777  * handles any IP specific hardware or software state changes that are
4778  * necessary for a soft reset to succeed.
4779  * Returns 0 on success, negative error code on failure.
4780  */
4781 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4782 {
4783 	int i, r = 0;
4784 
4785 	for (i = 0; i < adev->num_ip_blocks; i++) {
4786 		if (!adev->ip_blocks[i].status.valid)
4787 			continue;
4788 		if (adev->ip_blocks[i].status.hang &&
4789 		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4790 			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4791 			if (r)
4792 				return r;
4793 		}
4794 	}
4795 
4796 	return 0;
4797 }
4798 
4799 /**
4800  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4801  *
4802  * @adev: amdgpu_device pointer
4803  *
4804  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4805  * reset is necessary to recover.
4806  * Returns true if a full asic reset is required, false if not.
4807  */
4808 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4809 {
4810 	int i;
4811 
4812 	if (amdgpu_asic_need_full_reset(adev))
4813 		return true;
4814 
4815 	for (i = 0; i < adev->num_ip_blocks; i++) {
4816 		if (!adev->ip_blocks[i].status.valid)
4817 			continue;
4818 		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4819 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4820 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4821 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4822 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4823 			if (adev->ip_blocks[i].status.hang) {
4824 				dev_info(adev->dev, "Some block need full reset!\n");
4825 				return true;
4826 			}
4827 		}
4828 	}
4829 	return false;
4830 }
4831 
4832 /**
4833  * amdgpu_device_ip_soft_reset - do a soft reset
4834  *
4835  * @adev: amdgpu_device pointer
4836  *
4837  * The list of all the hardware IPs that make up the asic is walked and the
4838  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4839  * IP specific hardware or software state changes that are necessary to soft
4840  * reset the IP.
4841  * Returns 0 on success, negative error code on failure.
4842  */
4843 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4844 {
4845 	int i, r = 0;
4846 
4847 	for (i = 0; i < adev->num_ip_blocks; i++) {
4848 		if (!adev->ip_blocks[i].status.valid)
4849 			continue;
4850 		if (adev->ip_blocks[i].status.hang &&
4851 		    adev->ip_blocks[i].version->funcs->soft_reset) {
4852 			r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4853 			if (r)
4854 				return r;
4855 		}
4856 	}
4857 
4858 	return 0;
4859 }
4860 
4861 /**
4862  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4863  *
4864  * @adev: amdgpu_device pointer
4865  *
4866  * The list of all the hardware IPs that make up the asic is walked and the
4867  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4868  * handles any IP specific hardware or software state changes that are
4869  * necessary after the IP has been soft reset.
4870  * Returns 0 on success, negative error code on failure.
4871  */
4872 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4873 {
4874 	int i, r = 0;
4875 
4876 	for (i = 0; i < adev->num_ip_blocks; i++) {
4877 		if (!adev->ip_blocks[i].status.valid)
4878 			continue;
4879 		if (adev->ip_blocks[i].status.hang &&
4880 		    adev->ip_blocks[i].version->funcs->post_soft_reset)
4881 			r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4882 		if (r)
4883 			return r;
4884 	}
4885 
4886 	return 0;
4887 }
4888 
4889 /**
4890  * amdgpu_device_recover_vram - Recover some VRAM contents
4891  *
4892  * @adev: amdgpu_device pointer
4893  *
4894  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4895  * restore things like GPUVM page tables after a GPU reset where
4896  * the contents of VRAM might be lost.
4897  *
4898  * Returns:
4899  * 0 on success, negative error code on failure.
4900  */
4901 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4902 {
4903 	struct dma_fence *fence = NULL, *next = NULL;
4904 	struct amdgpu_bo *shadow;
4905 	struct amdgpu_bo_vm *vmbo;
4906 	long r = 1, tmo;
4907 
4908 	if (amdgpu_sriov_runtime(adev))
4909 		tmo = msecs_to_jiffies(8000);
4910 	else
4911 		tmo = msecs_to_jiffies(100);
4912 
4913 	dev_info(adev->dev, "recover vram bo from shadow start\n");
4914 	mutex_lock(&adev->shadow_list_lock);
4915 	list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4916 		/* If vm is compute context or adev is APU, shadow will be NULL */
4917 		if (!vmbo->shadow)
4918 			continue;
4919 		shadow = vmbo->shadow;
4920 
4921 		/* No need to recover an evicted BO */
4922 		if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4923 		    shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4924 		    shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4925 			continue;
4926 
4927 		r = amdgpu_bo_restore_shadow(shadow, &next);
4928 		if (r)
4929 			break;
4930 
4931 		if (fence) {
4932 			tmo = dma_fence_wait_timeout(fence, false, tmo);
4933 			dma_fence_put(fence);
4934 			fence = next;
4935 			if (tmo == 0) {
4936 				r = -ETIMEDOUT;
4937 				break;
4938 			} else if (tmo < 0) {
4939 				r = tmo;
4940 				break;
4941 			}
4942 		} else {
4943 			fence = next;
4944 		}
4945 	}
4946 	mutex_unlock(&adev->shadow_list_lock);
4947 
4948 	if (fence)
4949 		tmo = dma_fence_wait_timeout(fence, false, tmo);
4950 	dma_fence_put(fence);
4951 
4952 	if (r < 0 || tmo <= 0) {
4953 		dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4954 		return -EIO;
4955 	}
4956 
4957 	dev_info(adev->dev, "recover vram bo from shadow done\n");
4958 	return 0;
4959 }
4960 
4961 
4962 /**
4963  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4964  *
4965  * @adev: amdgpu_device pointer
4966  * @from_hypervisor: request from hypervisor
4967  *
4968  * do VF FLR and reinitialize Asic
4969  * return 0 means succeeded otherwise failed
4970  */
4971 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4972 				     bool from_hypervisor)
4973 {
4974 	int r;
4975 	struct amdgpu_hive_info *hive = NULL;
4976 	int retry_limit = 0;
4977 
4978 retry:
4979 	amdgpu_amdkfd_pre_reset(adev);
4980 
4981 	if (from_hypervisor)
4982 		r = amdgpu_virt_request_full_gpu(adev, true);
4983 	else
4984 		r = amdgpu_virt_reset_gpu(adev);
4985 	if (r)
4986 		return r;
4987 	amdgpu_irq_gpu_reset_resume_helper(adev);
4988 
4989 	/* some sw clean up VF needs to do before recover */
4990 	amdgpu_virt_post_reset(adev);
4991 
4992 	/* Resume IP prior to SMC */
4993 	r = amdgpu_device_ip_reinit_early_sriov(adev);
4994 	if (r)
4995 		goto error;
4996 
4997 	amdgpu_virt_init_data_exchange(adev);
4998 
4999 	r = amdgpu_device_fw_loading(adev);
5000 	if (r)
5001 		return r;
5002 
5003 	/* now we are okay to resume SMC/CP/SDMA */
5004 	r = amdgpu_device_ip_reinit_late_sriov(adev);
5005 	if (r)
5006 		goto error;
5007 
5008 	hive = amdgpu_get_xgmi_hive(adev);
5009 	/* Update PSP FW topology after reset */
5010 	if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
5011 		r = amdgpu_xgmi_update_topology(hive, adev);
5012 
5013 	if (hive)
5014 		amdgpu_put_xgmi_hive(hive);
5015 
5016 	if (!r) {
5017 		r = amdgpu_ib_ring_tests(adev);
5018 
5019 		amdgpu_amdkfd_post_reset(adev);
5020 	}
5021 
5022 error:
5023 	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
5024 		amdgpu_inc_vram_lost(adev);
5025 		r = amdgpu_device_recover_vram(adev);
5026 	}
5027 	amdgpu_virt_release_full_gpu(adev, true);
5028 
5029 	if (AMDGPU_RETRY_SRIOV_RESET(r)) {
5030 		if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
5031 			retry_limit++;
5032 			goto retry;
5033 		} else
5034 			DRM_ERROR("GPU reset retry is beyond the retry limit\n");
5035 	}
5036 
5037 	return r;
5038 }
5039 
5040 /**
5041  * amdgpu_device_has_job_running - check if there is any job in mirror list
5042  *
5043  * @adev: amdgpu_device pointer
5044  *
5045  * check if there is any job in mirror list
5046  */
5047 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
5048 {
5049 	int i;
5050 	struct drm_sched_job *job;
5051 
5052 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5053 		struct amdgpu_ring *ring = adev->rings[i];
5054 
5055 		if (!ring || !drm_sched_wqueue_ready(&ring->sched))
5056 			continue;
5057 
5058 		spin_lock(&ring->sched.job_list_lock);
5059 		job = list_first_entry_or_null(&ring->sched.pending_list,
5060 					       struct drm_sched_job, list);
5061 		spin_unlock(&ring->sched.job_list_lock);
5062 		if (job)
5063 			return true;
5064 	}
5065 	return false;
5066 }
5067 
5068 /**
5069  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
5070  *
5071  * @adev: amdgpu_device pointer
5072  *
5073  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
5074  * a hung GPU.
5075  */
5076 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
5077 {
5078 
5079 	if (amdgpu_gpu_recovery == 0)
5080 		goto disabled;
5081 
5082 	/* Skip soft reset check in fatal error mode */
5083 	if (!amdgpu_ras_is_poison_mode_supported(adev))
5084 		return true;
5085 
5086 	if (amdgpu_sriov_vf(adev))
5087 		return true;
5088 
5089 	if (amdgpu_gpu_recovery == -1) {
5090 		switch (adev->asic_type) {
5091 #ifdef CONFIG_DRM_AMDGPU_SI
5092 		case CHIP_VERDE:
5093 		case CHIP_TAHITI:
5094 		case CHIP_PITCAIRN:
5095 		case CHIP_OLAND:
5096 		case CHIP_HAINAN:
5097 #endif
5098 #ifdef CONFIG_DRM_AMDGPU_CIK
5099 		case CHIP_KAVERI:
5100 		case CHIP_KABINI:
5101 		case CHIP_MULLINS:
5102 #endif
5103 		case CHIP_CARRIZO:
5104 		case CHIP_STONEY:
5105 		case CHIP_CYAN_SKILLFISH:
5106 			goto disabled;
5107 		default:
5108 			break;
5109 		}
5110 	}
5111 
5112 	return true;
5113 
5114 disabled:
5115 		dev_info(adev->dev, "GPU recovery disabled.\n");
5116 		return false;
5117 }
5118 
5119 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
5120 {
5121 	u32 i;
5122 	int ret = 0;
5123 
5124 	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
5125 
5126 	dev_info(adev->dev, "GPU mode1 reset\n");
5127 
5128 	/* disable BM */
5129 	pci_clear_master(adev->pdev);
5130 
5131 	amdgpu_device_cache_pci_state(adev->pdev);
5132 
5133 	if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
5134 		dev_info(adev->dev, "GPU smu mode1 reset\n");
5135 		ret = amdgpu_dpm_mode1_reset(adev);
5136 	} else {
5137 		dev_info(adev->dev, "GPU psp mode1 reset\n");
5138 		ret = psp_gpu_reset(adev);
5139 	}
5140 
5141 	if (ret)
5142 		goto mode1_reset_failed;
5143 
5144 	amdgpu_device_load_pci_state(adev->pdev);
5145 	ret = amdgpu_psp_wait_for_bootloader(adev);
5146 	if (ret)
5147 		goto mode1_reset_failed;
5148 
5149 	/* wait for asic to come out of reset */
5150 	for (i = 0; i < adev->usec_timeout; i++) {
5151 		u32 memsize = adev->nbio.funcs->get_memsize(adev);
5152 
5153 		if (memsize != 0xffffffff)
5154 			break;
5155 		udelay(1);
5156 	}
5157 
5158 	if (i >= adev->usec_timeout) {
5159 		ret = -ETIMEDOUT;
5160 		goto mode1_reset_failed;
5161 	}
5162 
5163 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
5164 
5165 	return 0;
5166 
5167 mode1_reset_failed:
5168 	dev_err(adev->dev, "GPU mode1 reset failed\n");
5169 	return ret;
5170 }
5171 
5172 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
5173 				 struct amdgpu_reset_context *reset_context)
5174 {
5175 	int i, r = 0;
5176 	struct amdgpu_job *job = NULL;
5177 	bool need_full_reset =
5178 		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5179 
5180 	if (reset_context->reset_req_dev == adev)
5181 		job = reset_context->job;
5182 
5183 	if (amdgpu_sriov_vf(adev)) {
5184 		/* stop the data exchange thread */
5185 		amdgpu_virt_fini_data_exchange(adev);
5186 	}
5187 
5188 	amdgpu_fence_driver_isr_toggle(adev, true);
5189 
5190 	/* block all schedulers and reset given job's ring */
5191 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5192 		struct amdgpu_ring *ring = adev->rings[i];
5193 
5194 		if (!ring || !drm_sched_wqueue_ready(&ring->sched))
5195 			continue;
5196 
5197 		/* Clear job fence from fence drv to avoid force_completion
5198 		 * leave NULL and vm flush fence in fence drv
5199 		 */
5200 		amdgpu_fence_driver_clear_job_fences(ring);
5201 
5202 		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
5203 		amdgpu_fence_driver_force_completion(ring);
5204 	}
5205 
5206 	amdgpu_fence_driver_isr_toggle(adev, false);
5207 
5208 	if (job && job->vm)
5209 		drm_sched_increase_karma(&job->base);
5210 
5211 	r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
5212 	/* If reset handler not implemented, continue; otherwise return */
5213 	if (r == -EOPNOTSUPP)
5214 		r = 0;
5215 	else
5216 		return r;
5217 
5218 	/* Don't suspend on bare metal if we are not going to HW reset the ASIC */
5219 	if (!amdgpu_sriov_vf(adev)) {
5220 
5221 		if (!need_full_reset)
5222 			need_full_reset = amdgpu_device_ip_need_full_reset(adev);
5223 
5224 		if (!need_full_reset && amdgpu_gpu_recovery &&
5225 		    amdgpu_device_ip_check_soft_reset(adev)) {
5226 			amdgpu_device_ip_pre_soft_reset(adev);
5227 			r = amdgpu_device_ip_soft_reset(adev);
5228 			amdgpu_device_ip_post_soft_reset(adev);
5229 			if (r || amdgpu_device_ip_check_soft_reset(adev)) {
5230 				dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
5231 				need_full_reset = true;
5232 			}
5233 		}
5234 
5235 		if (need_full_reset)
5236 			r = amdgpu_device_ip_suspend(adev);
5237 		if (need_full_reset)
5238 			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5239 		else
5240 			clear_bit(AMDGPU_NEED_FULL_RESET,
5241 				  &reset_context->flags);
5242 	}
5243 
5244 	return r;
5245 }
5246 
5247 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
5248 {
5249 	int i;
5250 
5251 	lockdep_assert_held(&adev->reset_domain->sem);
5252 
5253 	for (i = 0; i < adev->reset_info.num_regs; i++) {
5254 		adev->reset_info.reset_dump_reg_value[i] =
5255 			RREG32(adev->reset_info.reset_dump_reg_list[i]);
5256 
5257 		trace_amdgpu_reset_reg_dumps(adev->reset_info.reset_dump_reg_list[i],
5258 					     adev->reset_info.reset_dump_reg_value[i]);
5259 	}
5260 
5261 	return 0;
5262 }
5263 
5264 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
5265 			 struct amdgpu_reset_context *reset_context)
5266 {
5267 	struct amdgpu_device *tmp_adev = NULL;
5268 	bool need_full_reset, skip_hw_reset, vram_lost = false;
5269 	int r = 0;
5270 
5271 	/* Try reset handler method first */
5272 	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5273 				    reset_list);
5274 	amdgpu_reset_reg_dumps(tmp_adev);
5275 
5276 	reset_context->reset_device_list = device_list_handle;
5277 	r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
5278 	/* If reset handler not implemented, continue; otherwise return */
5279 	if (r == -EOPNOTSUPP)
5280 		r = 0;
5281 	else
5282 		return r;
5283 
5284 	/* Reset handler not implemented, use the default method */
5285 	need_full_reset =
5286 		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5287 	skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
5288 
5289 	/*
5290 	 * ASIC reset has to be done on all XGMI hive nodes ASAP
5291 	 * to allow proper links negotiation in FW (within 1 sec)
5292 	 */
5293 	if (!skip_hw_reset && need_full_reset) {
5294 		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5295 			/* For XGMI run all resets in parallel to speed up the process */
5296 			if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5297 				tmp_adev->gmc.xgmi.pending_reset = false;
5298 				if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
5299 					r = -EALREADY;
5300 			} else
5301 				r = amdgpu_asic_reset(tmp_adev);
5302 
5303 			if (r) {
5304 				dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
5305 					 r, adev_to_drm(tmp_adev)->unique);
5306 				goto out;
5307 			}
5308 		}
5309 
5310 		/* For XGMI wait for all resets to complete before proceed */
5311 		if (!r) {
5312 			list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5313 				if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5314 					flush_work(&tmp_adev->xgmi_reset_work);
5315 					r = tmp_adev->asic_reset_res;
5316 					if (r)
5317 						break;
5318 				}
5319 			}
5320 		}
5321 	}
5322 
5323 	if (!r && amdgpu_ras_intr_triggered()) {
5324 		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5325 			amdgpu_ras_reset_error_count(tmp_adev, AMDGPU_RAS_BLOCK__MMHUB);
5326 		}
5327 
5328 		amdgpu_ras_intr_cleared();
5329 	}
5330 
5331 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5332 		if (need_full_reset) {
5333 			/* post card */
5334 			r = amdgpu_device_asic_init(tmp_adev);
5335 			if (r) {
5336 				dev_warn(tmp_adev->dev, "asic atom init failed!");
5337 			} else {
5338 				dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
5339 
5340 				r = amdgpu_device_ip_resume_phase1(tmp_adev);
5341 				if (r)
5342 					goto out;
5343 
5344 				vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
5345 
5346 				amdgpu_coredump(tmp_adev, vram_lost, reset_context);
5347 
5348 				if (vram_lost) {
5349 					DRM_INFO("VRAM is lost due to GPU reset!\n");
5350 					amdgpu_inc_vram_lost(tmp_adev);
5351 				}
5352 
5353 				r = amdgpu_device_fw_loading(tmp_adev);
5354 				if (r)
5355 					return r;
5356 
5357 				r = amdgpu_xcp_restore_partition_mode(
5358 					tmp_adev->xcp_mgr);
5359 				if (r)
5360 					goto out;
5361 
5362 				r = amdgpu_device_ip_resume_phase2(tmp_adev);
5363 				if (r)
5364 					goto out;
5365 
5366 				if (tmp_adev->mman.buffer_funcs_ring->sched.ready)
5367 					amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true);
5368 
5369 				if (vram_lost)
5370 					amdgpu_device_fill_reset_magic(tmp_adev);
5371 
5372 				/*
5373 				 * Add this ASIC as tracked as reset was already
5374 				 * complete successfully.
5375 				 */
5376 				amdgpu_register_gpu_instance(tmp_adev);
5377 
5378 				if (!reset_context->hive &&
5379 				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5380 					amdgpu_xgmi_add_device(tmp_adev);
5381 
5382 				r = amdgpu_device_ip_late_init(tmp_adev);
5383 				if (r)
5384 					goto out;
5385 
5386 				drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
5387 
5388 				/*
5389 				 * The GPU enters bad state once faulty pages
5390 				 * by ECC has reached the threshold, and ras
5391 				 * recovery is scheduled next. So add one check
5392 				 * here to break recovery if it indeed exceeds
5393 				 * bad page threshold, and remind user to
5394 				 * retire this GPU or setting one bigger
5395 				 * bad_page_threshold value to fix this once
5396 				 * probing driver again.
5397 				 */
5398 				if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
5399 					/* must succeed. */
5400 					amdgpu_ras_resume(tmp_adev);
5401 				} else {
5402 					r = -EINVAL;
5403 					goto out;
5404 				}
5405 
5406 				/* Update PSP FW topology after reset */
5407 				if (reset_context->hive &&
5408 				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5409 					r = amdgpu_xgmi_update_topology(
5410 						reset_context->hive, tmp_adev);
5411 			}
5412 		}
5413 
5414 out:
5415 		if (!r) {
5416 			amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
5417 			r = amdgpu_ib_ring_tests(tmp_adev);
5418 			if (r) {
5419 				dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
5420 				need_full_reset = true;
5421 				r = -EAGAIN;
5422 				goto end;
5423 			}
5424 		}
5425 
5426 		if (!r)
5427 			r = amdgpu_device_recover_vram(tmp_adev);
5428 		else
5429 			tmp_adev->asic_reset_res = r;
5430 	}
5431 
5432 end:
5433 	if (need_full_reset)
5434 		set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5435 	else
5436 		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5437 	return r;
5438 }
5439 
5440 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
5441 {
5442 
5443 	switch (amdgpu_asic_reset_method(adev)) {
5444 	case AMD_RESET_METHOD_MODE1:
5445 		adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5446 		break;
5447 	case AMD_RESET_METHOD_MODE2:
5448 		adev->mp1_state = PP_MP1_STATE_RESET;
5449 		break;
5450 	default:
5451 		adev->mp1_state = PP_MP1_STATE_NONE;
5452 		break;
5453 	}
5454 }
5455 
5456 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5457 {
5458 	amdgpu_vf_error_trans_all(adev);
5459 	adev->mp1_state = PP_MP1_STATE_NONE;
5460 }
5461 
5462 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5463 {
5464 	struct pci_dev *p = NULL;
5465 
5466 	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5467 			adev->pdev->bus->number, 1);
5468 	if (p) {
5469 		pm_runtime_enable(&(p->dev));
5470 		pm_runtime_resume(&(p->dev));
5471 	}
5472 
5473 	pci_dev_put(p);
5474 }
5475 
5476 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5477 {
5478 	enum amd_reset_method reset_method;
5479 	struct pci_dev *p = NULL;
5480 	u64 expires;
5481 
5482 	/*
5483 	 * For now, only BACO and mode1 reset are confirmed
5484 	 * to suffer the audio issue without proper suspended.
5485 	 */
5486 	reset_method = amdgpu_asic_reset_method(adev);
5487 	if ((reset_method != AMD_RESET_METHOD_BACO) &&
5488 	     (reset_method != AMD_RESET_METHOD_MODE1))
5489 		return -EINVAL;
5490 
5491 	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5492 			adev->pdev->bus->number, 1);
5493 	if (!p)
5494 		return -ENODEV;
5495 
5496 	expires = pm_runtime_autosuspend_expiration(&(p->dev));
5497 	if (!expires)
5498 		/*
5499 		 * If we cannot get the audio device autosuspend delay,
5500 		 * a fixed 4S interval will be used. Considering 3S is
5501 		 * the audio controller default autosuspend delay setting.
5502 		 * 4S used here is guaranteed to cover that.
5503 		 */
5504 		expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5505 
5506 	while (!pm_runtime_status_suspended(&(p->dev))) {
5507 		if (!pm_runtime_suspend(&(p->dev)))
5508 			break;
5509 
5510 		if (expires < ktime_get_mono_fast_ns()) {
5511 			dev_warn(adev->dev, "failed to suspend display audio\n");
5512 			pci_dev_put(p);
5513 			/* TODO: abort the succeeding gpu reset? */
5514 			return -ETIMEDOUT;
5515 		}
5516 	}
5517 
5518 	pm_runtime_disable(&(p->dev));
5519 
5520 	pci_dev_put(p);
5521 	return 0;
5522 }
5523 
5524 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5525 {
5526 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5527 
5528 #if defined(CONFIG_DEBUG_FS)
5529 	if (!amdgpu_sriov_vf(adev))
5530 		cancel_work(&adev->reset_work);
5531 #endif
5532 
5533 	if (adev->kfd.dev)
5534 		cancel_work(&adev->kfd.reset_work);
5535 
5536 	if (amdgpu_sriov_vf(adev))
5537 		cancel_work(&adev->virt.flr_work);
5538 
5539 	if (con && adev->ras_enabled)
5540 		cancel_work(&con->recovery_work);
5541 
5542 }
5543 
5544 /**
5545  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5546  *
5547  * @adev: amdgpu_device pointer
5548  * @job: which job trigger hang
5549  * @reset_context: amdgpu reset context pointer
5550  *
5551  * Attempt to reset the GPU if it has hung (all asics).
5552  * Attempt to do soft-reset or full-reset and reinitialize Asic
5553  * Returns 0 for success or an error on failure.
5554  */
5555 
5556 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5557 			      struct amdgpu_job *job,
5558 			      struct amdgpu_reset_context *reset_context)
5559 {
5560 	struct list_head device_list, *device_list_handle =  NULL;
5561 	bool job_signaled = false;
5562 	struct amdgpu_hive_info *hive = NULL;
5563 	struct amdgpu_device *tmp_adev = NULL;
5564 	int i, r = 0;
5565 	bool need_emergency_restart = false;
5566 	bool audio_suspended = false;
5567 
5568 	/*
5569 	 * Special case: RAS triggered and full reset isn't supported
5570 	 */
5571 	need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5572 
5573 	/*
5574 	 * Flush RAM to disk so that after reboot
5575 	 * the user can read log and see why the system rebooted.
5576 	 */
5577 	if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
5578 		amdgpu_ras_get_context(adev)->reboot) {
5579 		DRM_WARN("Emergency reboot.");
5580 
5581 		ksys_sync_helper();
5582 		emergency_restart();
5583 	}
5584 
5585 	dev_info(adev->dev, "GPU %s begin!\n",
5586 		need_emergency_restart ? "jobs stop":"reset");
5587 
5588 	if (!amdgpu_sriov_vf(adev))
5589 		hive = amdgpu_get_xgmi_hive(adev);
5590 	if (hive)
5591 		mutex_lock(&hive->hive_lock);
5592 
5593 	reset_context->job = job;
5594 	reset_context->hive = hive;
5595 	/*
5596 	 * Build list of devices to reset.
5597 	 * In case we are in XGMI hive mode, resort the device list
5598 	 * to put adev in the 1st position.
5599 	 */
5600 	INIT_LIST_HEAD(&device_list);
5601 	if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5602 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5603 			list_add_tail(&tmp_adev->reset_list, &device_list);
5604 			if (adev->shutdown)
5605 				tmp_adev->shutdown = true;
5606 		}
5607 		if (!list_is_first(&adev->reset_list, &device_list))
5608 			list_rotate_to_front(&adev->reset_list, &device_list);
5609 		device_list_handle = &device_list;
5610 	} else {
5611 		list_add_tail(&adev->reset_list, &device_list);
5612 		device_list_handle = &device_list;
5613 	}
5614 
5615 	/* We need to lock reset domain only once both for XGMI and single device */
5616 	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5617 				    reset_list);
5618 	amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5619 
5620 	/* block all schedulers and reset given job's ring */
5621 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5622 
5623 		amdgpu_device_set_mp1_state(tmp_adev);
5624 
5625 		/*
5626 		 * Try to put the audio codec into suspend state
5627 		 * before gpu reset started.
5628 		 *
5629 		 * Due to the power domain of the graphics device
5630 		 * is shared with AZ power domain. Without this,
5631 		 * we may change the audio hardware from behind
5632 		 * the audio driver's back. That will trigger
5633 		 * some audio codec errors.
5634 		 */
5635 		if (!amdgpu_device_suspend_display_audio(tmp_adev))
5636 			audio_suspended = true;
5637 
5638 		amdgpu_ras_set_error_query_ready(tmp_adev, false);
5639 
5640 		cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5641 
5642 		if (!amdgpu_sriov_vf(tmp_adev))
5643 			amdgpu_amdkfd_pre_reset(tmp_adev);
5644 
5645 		/*
5646 		 * Mark these ASICs to be reseted as untracked first
5647 		 * And add them back after reset completed
5648 		 */
5649 		amdgpu_unregister_gpu_instance(tmp_adev);
5650 
5651 		drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5652 
5653 		/* disable ras on ALL IPs */
5654 		if (!need_emergency_restart &&
5655 		      amdgpu_device_ip_need_full_reset(tmp_adev))
5656 			amdgpu_ras_suspend(tmp_adev);
5657 
5658 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5659 			struct amdgpu_ring *ring = tmp_adev->rings[i];
5660 
5661 			if (!ring || !drm_sched_wqueue_ready(&ring->sched))
5662 				continue;
5663 
5664 			drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5665 
5666 			if (need_emergency_restart)
5667 				amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5668 		}
5669 		atomic_inc(&tmp_adev->gpu_reset_counter);
5670 	}
5671 
5672 	if (need_emergency_restart)
5673 		goto skip_sched_resume;
5674 
5675 	/*
5676 	 * Must check guilty signal here since after this point all old
5677 	 * HW fences are force signaled.
5678 	 *
5679 	 * job->base holds a reference to parent fence
5680 	 */
5681 	if (job && dma_fence_is_signaled(&job->hw_fence)) {
5682 		job_signaled = true;
5683 		dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5684 		goto skip_hw_reset;
5685 	}
5686 
5687 retry:	/* Rest of adevs pre asic reset from XGMI hive. */
5688 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5689 		r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5690 		/*TODO Should we stop ?*/
5691 		if (r) {
5692 			dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5693 				  r, adev_to_drm(tmp_adev)->unique);
5694 			tmp_adev->asic_reset_res = r;
5695 		}
5696 
5697 		/*
5698 		 * Drop all pending non scheduler resets. Scheduler resets
5699 		 * were already dropped during drm_sched_stop
5700 		 */
5701 		amdgpu_device_stop_pending_resets(tmp_adev);
5702 	}
5703 
5704 	/* Actual ASIC resets if needed.*/
5705 	/* Host driver will handle XGMI hive reset for SRIOV */
5706 	if (amdgpu_sriov_vf(adev)) {
5707 		r = amdgpu_device_reset_sriov(adev, job ? false : true);
5708 		if (r)
5709 			adev->asic_reset_res = r;
5710 
5711 		/* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */
5712 		if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
5713 			    IP_VERSION(9, 4, 2) ||
5714 		    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3))
5715 			amdgpu_ras_resume(adev);
5716 	} else {
5717 		r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5718 		if (r && r == -EAGAIN)
5719 			goto retry;
5720 	}
5721 
5722 skip_hw_reset:
5723 
5724 	/* Post ASIC reset for all devs .*/
5725 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5726 
5727 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5728 			struct amdgpu_ring *ring = tmp_adev->rings[i];
5729 
5730 			if (!ring || !drm_sched_wqueue_ready(&ring->sched))
5731 				continue;
5732 
5733 			drm_sched_start(&ring->sched, true);
5734 		}
5735 
5736 		if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
5737 			drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5738 
5739 		if (tmp_adev->asic_reset_res)
5740 			r = tmp_adev->asic_reset_res;
5741 
5742 		tmp_adev->asic_reset_res = 0;
5743 
5744 		if (r) {
5745 			/* bad news, how to tell it to userspace ? */
5746 			dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5747 			amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5748 		} else {
5749 			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5750 			if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5751 				DRM_WARN("smart shift update failed\n");
5752 		}
5753 	}
5754 
5755 skip_sched_resume:
5756 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5757 		/* unlock kfd: SRIOV would do it separately */
5758 		if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5759 			amdgpu_amdkfd_post_reset(tmp_adev);
5760 
5761 		/* kfd_post_reset will do nothing if kfd device is not initialized,
5762 		 * need to bring up kfd here if it's not be initialized before
5763 		 */
5764 		if (!adev->kfd.init_complete)
5765 			amdgpu_amdkfd_device_init(adev);
5766 
5767 		if (audio_suspended)
5768 			amdgpu_device_resume_display_audio(tmp_adev);
5769 
5770 		amdgpu_device_unset_mp1_state(tmp_adev);
5771 
5772 		amdgpu_ras_set_error_query_ready(tmp_adev, true);
5773 	}
5774 
5775 	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5776 					    reset_list);
5777 	amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5778 
5779 	if (hive) {
5780 		mutex_unlock(&hive->hive_lock);
5781 		amdgpu_put_xgmi_hive(hive);
5782 	}
5783 
5784 	if (r)
5785 		dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5786 
5787 	atomic_set(&adev->reset_domain->reset_res, r);
5788 	return r;
5789 }
5790 
5791 /**
5792  * amdgpu_device_partner_bandwidth - find the bandwidth of appropriate partner
5793  *
5794  * @adev: amdgpu_device pointer
5795  * @speed: pointer to the speed of the link
5796  * @width: pointer to the width of the link
5797  *
5798  * Evaluate the hierarchy to find the speed and bandwidth capabilities of the
5799  * first physical partner to an AMD dGPU.
5800  * This will exclude any virtual switches and links.
5801  */
5802 static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
5803 					    enum pci_bus_speed *speed,
5804 					    enum pcie_link_width *width)
5805 {
5806 	struct pci_dev *parent = adev->pdev;
5807 
5808 	if (!speed || !width)
5809 		return;
5810 
5811 	*speed = PCI_SPEED_UNKNOWN;
5812 	*width = PCIE_LNK_WIDTH_UNKNOWN;
5813 
5814 	while ((parent = pci_upstream_bridge(parent))) {
5815 		/* skip upstream/downstream switches internal to dGPU*/
5816 		if (parent->vendor == PCI_VENDOR_ID_ATI)
5817 			continue;
5818 		*speed = pcie_get_speed_cap(parent);
5819 		*width = pcie_get_width_cap(parent);
5820 		break;
5821 	}
5822 }
5823 
5824 /**
5825  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5826  *
5827  * @adev: amdgpu_device pointer
5828  *
5829  * Fetchs and stores in the driver the PCIE capabilities (gen speed
5830  * and lanes) of the slot the device is in. Handles APUs and
5831  * virtualized environments where PCIE config space may not be available.
5832  */
5833 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5834 {
5835 	struct pci_dev *pdev;
5836 	enum pci_bus_speed speed_cap, platform_speed_cap;
5837 	enum pcie_link_width platform_link_width;
5838 
5839 	if (amdgpu_pcie_gen_cap)
5840 		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5841 
5842 	if (amdgpu_pcie_lane_cap)
5843 		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5844 
5845 	/* covers APUs as well */
5846 	if (pci_is_root_bus(adev->pdev->bus) && !amdgpu_passthrough(adev)) {
5847 		if (adev->pm.pcie_gen_mask == 0)
5848 			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5849 		if (adev->pm.pcie_mlw_mask == 0)
5850 			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5851 		return;
5852 	}
5853 
5854 	if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5855 		return;
5856 
5857 	amdgpu_device_partner_bandwidth(adev, &platform_speed_cap,
5858 					&platform_link_width);
5859 
5860 	if (adev->pm.pcie_gen_mask == 0) {
5861 		/* asic caps */
5862 		pdev = adev->pdev;
5863 		speed_cap = pcie_get_speed_cap(pdev);
5864 		if (speed_cap == PCI_SPEED_UNKNOWN) {
5865 			adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5866 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5867 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5868 		} else {
5869 			if (speed_cap == PCIE_SPEED_32_0GT)
5870 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5871 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5872 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5873 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5874 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5875 			else if (speed_cap == PCIE_SPEED_16_0GT)
5876 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5877 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5878 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5879 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5880 			else if (speed_cap == PCIE_SPEED_8_0GT)
5881 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5882 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5883 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5884 			else if (speed_cap == PCIE_SPEED_5_0GT)
5885 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5886 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5887 			else
5888 				adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5889 		}
5890 		/* platform caps */
5891 		if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5892 			adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5893 						   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5894 		} else {
5895 			if (platform_speed_cap == PCIE_SPEED_32_0GT)
5896 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5897 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5898 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5899 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5900 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5901 			else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5902 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5903 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5904 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5905 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5906 			else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5907 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5908 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5909 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5910 			else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5911 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5912 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5913 			else
5914 				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5915 
5916 		}
5917 	}
5918 	if (adev->pm.pcie_mlw_mask == 0) {
5919 		if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5920 			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5921 		} else {
5922 			switch (platform_link_width) {
5923 			case PCIE_LNK_X32:
5924 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5925 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5926 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5927 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5928 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5929 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5930 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5931 				break;
5932 			case PCIE_LNK_X16:
5933 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5934 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5935 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5936 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5937 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5938 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5939 				break;
5940 			case PCIE_LNK_X12:
5941 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5942 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5943 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5944 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5945 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5946 				break;
5947 			case PCIE_LNK_X8:
5948 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5949 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5950 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5951 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5952 				break;
5953 			case PCIE_LNK_X4:
5954 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5955 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5956 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5957 				break;
5958 			case PCIE_LNK_X2:
5959 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5960 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5961 				break;
5962 			case PCIE_LNK_X1:
5963 				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5964 				break;
5965 			default:
5966 				break;
5967 			}
5968 		}
5969 	}
5970 }
5971 
5972 /**
5973  * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5974  *
5975  * @adev: amdgpu_device pointer
5976  * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5977  *
5978  * Return true if @peer_adev can access (DMA) @adev through the PCIe
5979  * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5980  * @peer_adev.
5981  */
5982 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5983 				      struct amdgpu_device *peer_adev)
5984 {
5985 #ifdef CONFIG_HSA_AMD_P2P
5986 	uint64_t address_mask = peer_adev->dev->dma_mask ?
5987 		~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5988 	resource_size_t aper_limit =
5989 		adev->gmc.aper_base + adev->gmc.aper_size - 1;
5990 	bool p2p_access =
5991 		!adev->gmc.xgmi.connected_to_cpu &&
5992 		!(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
5993 
5994 	return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5995 		adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5996 		!(adev->gmc.aper_base & address_mask ||
5997 		  aper_limit & address_mask));
5998 #else
5999 	return false;
6000 #endif
6001 }
6002 
6003 int amdgpu_device_baco_enter(struct drm_device *dev)
6004 {
6005 	struct amdgpu_device *adev = drm_to_adev(dev);
6006 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
6007 
6008 	if (!amdgpu_device_supports_baco(dev))
6009 		return -ENOTSUPP;
6010 
6011 	if (ras && adev->ras_enabled &&
6012 	    adev->nbio.funcs->enable_doorbell_interrupt)
6013 		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
6014 
6015 	return amdgpu_dpm_baco_enter(adev);
6016 }
6017 
6018 int amdgpu_device_baco_exit(struct drm_device *dev)
6019 {
6020 	struct amdgpu_device *adev = drm_to_adev(dev);
6021 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
6022 	int ret = 0;
6023 
6024 	if (!amdgpu_device_supports_baco(dev))
6025 		return -ENOTSUPP;
6026 
6027 	ret = amdgpu_dpm_baco_exit(adev);
6028 	if (ret)
6029 		return ret;
6030 
6031 	if (ras && adev->ras_enabled &&
6032 	    adev->nbio.funcs->enable_doorbell_interrupt)
6033 		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
6034 
6035 	if (amdgpu_passthrough(adev) &&
6036 	    adev->nbio.funcs->clear_doorbell_interrupt)
6037 		adev->nbio.funcs->clear_doorbell_interrupt(adev);
6038 
6039 	return 0;
6040 }
6041 
6042 /**
6043  * amdgpu_pci_error_detected - Called when a PCI error is detected.
6044  * @pdev: PCI device struct
6045  * @state: PCI channel state
6046  *
6047  * Description: Called when a PCI error is detected.
6048  *
6049  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
6050  */
6051 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
6052 {
6053 	struct drm_device *dev = pci_get_drvdata(pdev);
6054 	struct amdgpu_device *adev = drm_to_adev(dev);
6055 	int i;
6056 
6057 	DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
6058 
6059 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
6060 		DRM_WARN("No support for XGMI hive yet...");
6061 		return PCI_ERS_RESULT_DISCONNECT;
6062 	}
6063 
6064 	adev->pci_channel_state = state;
6065 
6066 	switch (state) {
6067 	case pci_channel_io_normal:
6068 		return PCI_ERS_RESULT_CAN_RECOVER;
6069 	/* Fatal error, prepare for slot reset */
6070 	case pci_channel_io_frozen:
6071 		/*
6072 		 * Locking adev->reset_domain->sem will prevent any external access
6073 		 * to GPU during PCI error recovery
6074 		 */
6075 		amdgpu_device_lock_reset_domain(adev->reset_domain);
6076 		amdgpu_device_set_mp1_state(adev);
6077 
6078 		/*
6079 		 * Block any work scheduling as we do for regular GPU reset
6080 		 * for the duration of the recovery
6081 		 */
6082 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
6083 			struct amdgpu_ring *ring = adev->rings[i];
6084 
6085 			if (!ring || !drm_sched_wqueue_ready(&ring->sched))
6086 				continue;
6087 
6088 			drm_sched_stop(&ring->sched, NULL);
6089 		}
6090 		atomic_inc(&adev->gpu_reset_counter);
6091 		return PCI_ERS_RESULT_NEED_RESET;
6092 	case pci_channel_io_perm_failure:
6093 		/* Permanent error, prepare for device removal */
6094 		return PCI_ERS_RESULT_DISCONNECT;
6095 	}
6096 
6097 	return PCI_ERS_RESULT_NEED_RESET;
6098 }
6099 
6100 /**
6101  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
6102  * @pdev: pointer to PCI device
6103  */
6104 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
6105 {
6106 
6107 	DRM_INFO("PCI error: mmio enabled callback!!\n");
6108 
6109 	/* TODO - dump whatever for debugging purposes */
6110 
6111 	/* This called only if amdgpu_pci_error_detected returns
6112 	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
6113 	 * works, no need to reset slot.
6114 	 */
6115 
6116 	return PCI_ERS_RESULT_RECOVERED;
6117 }
6118 
6119 /**
6120  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
6121  * @pdev: PCI device struct
6122  *
6123  * Description: This routine is called by the pci error recovery
6124  * code after the PCI slot has been reset, just before we
6125  * should resume normal operations.
6126  */
6127 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
6128 {
6129 	struct drm_device *dev = pci_get_drvdata(pdev);
6130 	struct amdgpu_device *adev = drm_to_adev(dev);
6131 	int r, i;
6132 	struct amdgpu_reset_context reset_context;
6133 	u32 memsize;
6134 	struct list_head device_list;
6135 	struct amdgpu_hive_info *hive;
6136 	int hive_ras_recovery = 0;
6137 	struct amdgpu_ras *ras;
6138 
6139 	/* PCI error slot reset should be skipped During RAS recovery */
6140 	hive = amdgpu_get_xgmi_hive(adev);
6141 	if (hive) {
6142 		hive_ras_recovery = atomic_read(&hive->ras_recovery);
6143 		amdgpu_put_xgmi_hive(hive);
6144 	}
6145 	ras = amdgpu_ras_get_context(adev);
6146 	if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) &&
6147 		 ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery))
6148 		return PCI_ERS_RESULT_RECOVERED;
6149 
6150 	DRM_INFO("PCI error: slot reset callback!!\n");
6151 
6152 	memset(&reset_context, 0, sizeof(reset_context));
6153 
6154 	INIT_LIST_HEAD(&device_list);
6155 	list_add_tail(&adev->reset_list, &device_list);
6156 
6157 	/* wait for asic to come out of reset */
6158 	msleep(500);
6159 
6160 	/* Restore PCI confspace */
6161 	amdgpu_device_load_pci_state(pdev);
6162 
6163 	/* confirm  ASIC came out of reset */
6164 	for (i = 0; i < adev->usec_timeout; i++) {
6165 		memsize = amdgpu_asic_get_config_memsize(adev);
6166 
6167 		if (memsize != 0xffffffff)
6168 			break;
6169 		udelay(1);
6170 	}
6171 	if (memsize == 0xffffffff) {
6172 		r = -ETIME;
6173 		goto out;
6174 	}
6175 
6176 	reset_context.method = AMD_RESET_METHOD_NONE;
6177 	reset_context.reset_req_dev = adev;
6178 	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
6179 	set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
6180 
6181 	adev->no_hw_access = true;
6182 	r = amdgpu_device_pre_asic_reset(adev, &reset_context);
6183 	adev->no_hw_access = false;
6184 	if (r)
6185 		goto out;
6186 
6187 	r = amdgpu_do_asic_reset(&device_list, &reset_context);
6188 
6189 out:
6190 	if (!r) {
6191 		if (amdgpu_device_cache_pci_state(adev->pdev))
6192 			pci_restore_state(adev->pdev);
6193 
6194 		DRM_INFO("PCIe error recovery succeeded\n");
6195 	} else {
6196 		DRM_ERROR("PCIe error recovery failed, err:%d", r);
6197 		amdgpu_device_unset_mp1_state(adev);
6198 		amdgpu_device_unlock_reset_domain(adev->reset_domain);
6199 	}
6200 
6201 	return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
6202 }
6203 
6204 /**
6205  * amdgpu_pci_resume() - resume normal ops after PCI reset
6206  * @pdev: pointer to PCI device
6207  *
6208  * Called when the error recovery driver tells us that its
6209  * OK to resume normal operation.
6210  */
6211 void amdgpu_pci_resume(struct pci_dev *pdev)
6212 {
6213 	struct drm_device *dev = pci_get_drvdata(pdev);
6214 	struct amdgpu_device *adev = drm_to_adev(dev);
6215 	int i;
6216 
6217 
6218 	DRM_INFO("PCI error: resume callback!!\n");
6219 
6220 	/* Only continue execution for the case of pci_channel_io_frozen */
6221 	if (adev->pci_channel_state != pci_channel_io_frozen)
6222 		return;
6223 
6224 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
6225 		struct amdgpu_ring *ring = adev->rings[i];
6226 
6227 		if (!ring || !drm_sched_wqueue_ready(&ring->sched))
6228 			continue;
6229 
6230 		drm_sched_start(&ring->sched, true);
6231 	}
6232 
6233 	amdgpu_device_unset_mp1_state(adev);
6234 	amdgpu_device_unlock_reset_domain(adev->reset_domain);
6235 }
6236 
6237 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
6238 {
6239 	struct drm_device *dev = pci_get_drvdata(pdev);
6240 	struct amdgpu_device *adev = drm_to_adev(dev);
6241 	int r;
6242 
6243 	r = pci_save_state(pdev);
6244 	if (!r) {
6245 		kfree(adev->pci_state);
6246 
6247 		adev->pci_state = pci_store_saved_state(pdev);
6248 
6249 		if (!adev->pci_state) {
6250 			DRM_ERROR("Failed to store PCI saved state");
6251 			return false;
6252 		}
6253 	} else {
6254 		DRM_WARN("Failed to save PCI state, err:%d\n", r);
6255 		return false;
6256 	}
6257 
6258 	return true;
6259 }
6260 
6261 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
6262 {
6263 	struct drm_device *dev = pci_get_drvdata(pdev);
6264 	struct amdgpu_device *adev = drm_to_adev(dev);
6265 	int r;
6266 
6267 	if (!adev->pci_state)
6268 		return false;
6269 
6270 	r = pci_load_saved_state(pdev, adev->pci_state);
6271 
6272 	if (!r) {
6273 		pci_restore_state(pdev);
6274 	} else {
6275 		DRM_WARN("Failed to load PCI state, err:%d\n", r);
6276 		return false;
6277 	}
6278 
6279 	return true;
6280 }
6281 
6282 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
6283 		struct amdgpu_ring *ring)
6284 {
6285 #ifdef CONFIG_X86_64
6286 	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6287 		return;
6288 #endif
6289 	if (adev->gmc.xgmi.connected_to_cpu)
6290 		return;
6291 
6292 	if (ring && ring->funcs->emit_hdp_flush)
6293 		amdgpu_ring_emit_hdp_flush(ring);
6294 	else
6295 		amdgpu_asic_flush_hdp(adev, ring);
6296 }
6297 
6298 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
6299 		struct amdgpu_ring *ring)
6300 {
6301 #ifdef CONFIG_X86_64
6302 	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6303 		return;
6304 #endif
6305 	if (adev->gmc.xgmi.connected_to_cpu)
6306 		return;
6307 
6308 	amdgpu_asic_invalidate_hdp(adev, ring);
6309 }
6310 
6311 int amdgpu_in_reset(struct amdgpu_device *adev)
6312 {
6313 	return atomic_read(&adev->reset_domain->in_gpu_reset);
6314 }
6315 
6316 /**
6317  * amdgpu_device_halt() - bring hardware to some kind of halt state
6318  *
6319  * @adev: amdgpu_device pointer
6320  *
6321  * Bring hardware to some kind of halt state so that no one can touch it
6322  * any more. It will help to maintain error context when error occurred.
6323  * Compare to a simple hang, the system will keep stable at least for SSH
6324  * access. Then it should be trivial to inspect the hardware state and
6325  * see what's going on. Implemented as following:
6326  *
6327  * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
6328  *    clears all CPU mappings to device, disallows remappings through page faults
6329  * 2. amdgpu_irq_disable_all() disables all interrupts
6330  * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
6331  * 4. set adev->no_hw_access to avoid potential crashes after setp 5
6332  * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
6333  * 6. pci_disable_device() and pci_wait_for_pending_transaction()
6334  *    flush any in flight DMA operations
6335  */
6336 void amdgpu_device_halt(struct amdgpu_device *adev)
6337 {
6338 	struct pci_dev *pdev = adev->pdev;
6339 	struct drm_device *ddev = adev_to_drm(adev);
6340 
6341 	amdgpu_xcp_dev_unplug(adev);
6342 	drm_dev_unplug(ddev);
6343 
6344 	amdgpu_irq_disable_all(adev);
6345 
6346 	amdgpu_fence_driver_hw_fini(adev);
6347 
6348 	adev->no_hw_access = true;
6349 
6350 	amdgpu_device_unmap_mmio(adev);
6351 
6352 	pci_disable_device(pdev);
6353 	pci_wait_for_pending_transaction(pdev);
6354 }
6355 
6356 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
6357 				u32 reg)
6358 {
6359 	unsigned long flags, address, data;
6360 	u32 r;
6361 
6362 	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6363 	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6364 
6365 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6366 	WREG32(address, reg * 4);
6367 	(void)RREG32(address);
6368 	r = RREG32(data);
6369 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6370 	return r;
6371 }
6372 
6373 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
6374 				u32 reg, u32 v)
6375 {
6376 	unsigned long flags, address, data;
6377 
6378 	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6379 	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6380 
6381 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6382 	WREG32(address, reg * 4);
6383 	(void)RREG32(address);
6384 	WREG32(data, v);
6385 	(void)RREG32(data);
6386 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6387 }
6388 
6389 /**
6390  * amdgpu_device_switch_gang - switch to a new gang
6391  * @adev: amdgpu_device pointer
6392  * @gang: the gang to switch to
6393  *
6394  * Try to switch to a new gang.
6395  * Returns: NULL if we switched to the new gang or a reference to the current
6396  * gang leader.
6397  */
6398 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
6399 					    struct dma_fence *gang)
6400 {
6401 	struct dma_fence *old = NULL;
6402 
6403 	do {
6404 		dma_fence_put(old);
6405 		rcu_read_lock();
6406 		old = dma_fence_get_rcu_safe(&adev->gang_submit);
6407 		rcu_read_unlock();
6408 
6409 		if (old == gang)
6410 			break;
6411 
6412 		if (!dma_fence_is_signaled(old))
6413 			return old;
6414 
6415 	} while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
6416 			 old, gang) != old);
6417 
6418 	dma_fence_put(old);
6419 	return NULL;
6420 }
6421 
6422 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
6423 {
6424 	switch (adev->asic_type) {
6425 #ifdef CONFIG_DRM_AMDGPU_SI
6426 	case CHIP_HAINAN:
6427 #endif
6428 	case CHIP_TOPAZ:
6429 		/* chips with no display hardware */
6430 		return false;
6431 #ifdef CONFIG_DRM_AMDGPU_SI
6432 	case CHIP_TAHITI:
6433 	case CHIP_PITCAIRN:
6434 	case CHIP_VERDE:
6435 	case CHIP_OLAND:
6436 #endif
6437 #ifdef CONFIG_DRM_AMDGPU_CIK
6438 	case CHIP_BONAIRE:
6439 	case CHIP_HAWAII:
6440 	case CHIP_KAVERI:
6441 	case CHIP_KABINI:
6442 	case CHIP_MULLINS:
6443 #endif
6444 	case CHIP_TONGA:
6445 	case CHIP_FIJI:
6446 	case CHIP_POLARIS10:
6447 	case CHIP_POLARIS11:
6448 	case CHIP_POLARIS12:
6449 	case CHIP_VEGAM:
6450 	case CHIP_CARRIZO:
6451 	case CHIP_STONEY:
6452 		/* chips with display hardware */
6453 		return true;
6454 	default:
6455 		/* IP discovery */
6456 		if (!amdgpu_ip_version(adev, DCE_HWIP, 0) ||
6457 		    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
6458 			return false;
6459 		return true;
6460 	}
6461 }
6462 
6463 uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
6464 		uint32_t inst, uint32_t reg_addr, char reg_name[],
6465 		uint32_t expected_value, uint32_t mask)
6466 {
6467 	uint32_t ret = 0;
6468 	uint32_t old_ = 0;
6469 	uint32_t tmp_ = RREG32(reg_addr);
6470 	uint32_t loop = adev->usec_timeout;
6471 
6472 	while ((tmp_ & (mask)) != (expected_value)) {
6473 		if (old_ != tmp_) {
6474 			loop = adev->usec_timeout;
6475 			old_ = tmp_;
6476 		} else
6477 			udelay(1);
6478 		tmp_ = RREG32(reg_addr);
6479 		loop--;
6480 		if (!loop) {
6481 			DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
6482 				  inst, reg_name, (uint32_t)expected_value,
6483 				  (uint32_t)(tmp_ & (mask)));
6484 			ret = -ETIMEDOUT;
6485 			break;
6486 		}
6487 	}
6488 	return ret;
6489 }
6490