xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c (revision db5b5c679e6cad2bb147337af6c378d278231b45)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_probe_helper.h>
36 #include <drm/amdgpu_drm.h>
37 #include <linux/vgaarb.h>
38 #include <linux/vga_switcheroo.h>
39 #include <linux/efi.h>
40 #include "amdgpu.h"
41 #include "amdgpu_trace.h"
42 #include "amdgpu_i2c.h"
43 #include "atom.h"
44 #include "amdgpu_atombios.h"
45 #include "amdgpu_atomfirmware.h"
46 #include "amd_pcie.h"
47 #ifdef CONFIG_DRM_AMDGPU_SI
48 #include "si.h"
49 #endif
50 #ifdef CONFIG_DRM_AMDGPU_CIK
51 #include "cik.h"
52 #endif
53 #include "vi.h"
54 #include "soc15.h"
55 #include "nv.h"
56 #include "bif/bif_4_1_d.h"
57 #include <linux/pci.h>
58 #include <linux/firmware.h>
59 #include "amdgpu_vf_error.h"
60 
61 #include "amdgpu_amdkfd.h"
62 #include "amdgpu_pm.h"
63 
64 #include "amdgpu_xgmi.h"
65 #include "amdgpu_ras.h"
66 #include "amdgpu_pmu.h"
67 #include "amdgpu_fru_eeprom.h"
68 #include "amdgpu_reset.h"
69 
70 #include <linux/suspend.h>
71 #include <drm/task_barrier.h>
72 #include <linux/pm_runtime.h>
73 
74 #include <drm/drm_drv.h>
75 
76 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
77 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
78 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
79 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
80 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
81 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
82 MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
83 MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
84 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
85 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
86 MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
87 MODULE_FIRMWARE("amdgpu/yellow_carp_gpu_info.bin");
88 
89 #define AMDGPU_RESUME_MS		2000
90 
91 const char *amdgpu_asic_name[] = {
92 	"TAHITI",
93 	"PITCAIRN",
94 	"VERDE",
95 	"OLAND",
96 	"HAINAN",
97 	"BONAIRE",
98 	"KAVERI",
99 	"KABINI",
100 	"HAWAII",
101 	"MULLINS",
102 	"TOPAZ",
103 	"TONGA",
104 	"FIJI",
105 	"CARRIZO",
106 	"STONEY",
107 	"POLARIS10",
108 	"POLARIS11",
109 	"POLARIS12",
110 	"VEGAM",
111 	"VEGA10",
112 	"VEGA12",
113 	"VEGA20",
114 	"RAVEN",
115 	"ARCTURUS",
116 	"RENOIR",
117 	"ALDEBARAN",
118 	"NAVI10",
119 	"CYAN_SKILLFISH",
120 	"NAVI14",
121 	"NAVI12",
122 	"SIENNA_CICHLID",
123 	"NAVY_FLOUNDER",
124 	"VANGOGH",
125 	"DIMGREY_CAVEFISH",
126 	"BEIGE_GOBY",
127 	"YELLOW_CARP",
128 	"IP DISCOVERY",
129 	"LAST",
130 };
131 
132 /**
133  * DOC: pcie_replay_count
134  *
135  * The amdgpu driver provides a sysfs API for reporting the total number
136  * of PCIe replays (NAKs)
137  * The file pcie_replay_count is used for this and returns the total
138  * number of replays as a sum of the NAKs generated and NAKs received
139  */
140 
141 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
142 		struct device_attribute *attr, char *buf)
143 {
144 	struct drm_device *ddev = dev_get_drvdata(dev);
145 	struct amdgpu_device *adev = drm_to_adev(ddev);
146 	uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
147 
148 	return sysfs_emit(buf, "%llu\n", cnt);
149 }
150 
151 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
152 		amdgpu_device_get_pcie_replay_count, NULL);
153 
154 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
155 
156 /**
157  * DOC: product_name
158  *
159  * The amdgpu driver provides a sysfs API for reporting the product name
160  * for the device
161  * The file serial_number is used for this and returns the product name
162  * as returned from the FRU.
163  * NOTE: This is only available for certain server cards
164  */
165 
166 static ssize_t amdgpu_device_get_product_name(struct device *dev,
167 		struct device_attribute *attr, char *buf)
168 {
169 	struct drm_device *ddev = dev_get_drvdata(dev);
170 	struct amdgpu_device *adev = drm_to_adev(ddev);
171 
172 	return sysfs_emit(buf, "%s\n", adev->product_name);
173 }
174 
175 static DEVICE_ATTR(product_name, S_IRUGO,
176 		amdgpu_device_get_product_name, NULL);
177 
178 /**
179  * DOC: product_number
180  *
181  * The amdgpu driver provides a sysfs API for reporting the part number
182  * for the device
183  * The file serial_number is used for this and returns the part number
184  * as returned from the FRU.
185  * NOTE: This is only available for certain server cards
186  */
187 
188 static ssize_t amdgpu_device_get_product_number(struct device *dev,
189 		struct device_attribute *attr, char *buf)
190 {
191 	struct drm_device *ddev = dev_get_drvdata(dev);
192 	struct amdgpu_device *adev = drm_to_adev(ddev);
193 
194 	return sysfs_emit(buf, "%s\n", adev->product_number);
195 }
196 
197 static DEVICE_ATTR(product_number, S_IRUGO,
198 		amdgpu_device_get_product_number, NULL);
199 
200 /**
201  * DOC: serial_number
202  *
203  * The amdgpu driver provides a sysfs API for reporting the serial number
204  * for the device
205  * The file serial_number is used for this and returns the serial number
206  * as returned from the FRU.
207  * NOTE: This is only available for certain server cards
208  */
209 
210 static ssize_t amdgpu_device_get_serial_number(struct device *dev,
211 		struct device_attribute *attr, char *buf)
212 {
213 	struct drm_device *ddev = dev_get_drvdata(dev);
214 	struct amdgpu_device *adev = drm_to_adev(ddev);
215 
216 	return sysfs_emit(buf, "%s\n", adev->serial);
217 }
218 
219 static DEVICE_ATTR(serial_number, S_IRUGO,
220 		amdgpu_device_get_serial_number, NULL);
221 
222 /**
223  * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
224  *
225  * @dev: drm_device pointer
226  *
227  * Returns true if the device is a dGPU with ATPX power control,
228  * otherwise return false.
229  */
230 bool amdgpu_device_supports_px(struct drm_device *dev)
231 {
232 	struct amdgpu_device *adev = drm_to_adev(dev);
233 
234 	if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
235 		return true;
236 	return false;
237 }
238 
239 /**
240  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
241  *
242  * @dev: drm_device pointer
243  *
244  * Returns true if the device is a dGPU with ACPI power control,
245  * otherwise return false.
246  */
247 bool amdgpu_device_supports_boco(struct drm_device *dev)
248 {
249 	struct amdgpu_device *adev = drm_to_adev(dev);
250 
251 	if (adev->has_pr3 ||
252 	    ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
253 		return true;
254 	return false;
255 }
256 
257 /**
258  * amdgpu_device_supports_baco - Does the device support BACO
259  *
260  * @dev: drm_device pointer
261  *
262  * Returns true if the device supporte BACO,
263  * otherwise return false.
264  */
265 bool amdgpu_device_supports_baco(struct drm_device *dev)
266 {
267 	struct amdgpu_device *adev = drm_to_adev(dev);
268 
269 	return amdgpu_asic_supports_baco(adev);
270 }
271 
272 /**
273  * amdgpu_device_supports_smart_shift - Is the device dGPU with
274  * smart shift support
275  *
276  * @dev: drm_device pointer
277  *
278  * Returns true if the device is a dGPU with Smart Shift support,
279  * otherwise returns false.
280  */
281 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
282 {
283 	return (amdgpu_device_supports_boco(dev) &&
284 		amdgpu_acpi_is_power_shift_control_supported());
285 }
286 
287 /*
288  * VRAM access helper functions
289  */
290 
291 /**
292  * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
293  *
294  * @adev: amdgpu_device pointer
295  * @pos: offset of the buffer in vram
296  * @buf: virtual address of the buffer in system memory
297  * @size: read/write size, sizeof(@buf) must > @size
298  * @write: true - write to vram, otherwise - read from vram
299  */
300 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
301 			     void *buf, size_t size, bool write)
302 {
303 	unsigned long flags;
304 	uint32_t hi = ~0, tmp = 0;
305 	uint32_t *data = buf;
306 	uint64_t last;
307 	int idx;
308 
309 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
310 		return;
311 
312 	BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
313 
314 	spin_lock_irqsave(&adev->mmio_idx_lock, flags);
315 	for (last = pos + size; pos < last; pos += 4) {
316 		tmp = pos >> 31;
317 
318 		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
319 		if (tmp != hi) {
320 			WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
321 			hi = tmp;
322 		}
323 		if (write)
324 			WREG32_NO_KIQ(mmMM_DATA, *data++);
325 		else
326 			*data++ = RREG32_NO_KIQ(mmMM_DATA);
327 	}
328 
329 	spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
330 	drm_dev_exit(idx);
331 }
332 
333 /**
334  * amdgpu_device_vram_access - access vram by vram aperature
335  *
336  * @adev: amdgpu_device pointer
337  * @pos: offset of the buffer in vram
338  * @buf: virtual address of the buffer in system memory
339  * @size: read/write size, sizeof(@buf) must > @size
340  * @write: true - write to vram, otherwise - read from vram
341  *
342  * The return value means how many bytes have been transferred.
343  */
344 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
345 				 void *buf, size_t size, bool write)
346 {
347 #ifdef CONFIG_64BIT
348 	void __iomem *addr;
349 	size_t count = 0;
350 	uint64_t last;
351 
352 	if (!adev->mman.aper_base_kaddr)
353 		return 0;
354 
355 	last = min(pos + size, adev->gmc.visible_vram_size);
356 	if (last > pos) {
357 		addr = adev->mman.aper_base_kaddr + pos;
358 		count = last - pos;
359 
360 		if (write) {
361 			memcpy_toio(addr, buf, count);
362 			mb();
363 			amdgpu_device_flush_hdp(adev, NULL);
364 		} else {
365 			amdgpu_device_invalidate_hdp(adev, NULL);
366 			mb();
367 			memcpy_fromio(buf, addr, count);
368 		}
369 
370 	}
371 
372 	return count;
373 #else
374 	return 0;
375 #endif
376 }
377 
378 /**
379  * amdgpu_device_vram_access - read/write a buffer in vram
380  *
381  * @adev: amdgpu_device pointer
382  * @pos: offset of the buffer in vram
383  * @buf: virtual address of the buffer in system memory
384  * @size: read/write size, sizeof(@buf) must > @size
385  * @write: true - write to vram, otherwise - read from vram
386  */
387 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
388 			       void *buf, size_t size, bool write)
389 {
390 	size_t count;
391 
392 	/* try to using vram apreature to access vram first */
393 	count = amdgpu_device_aper_access(adev, pos, buf, size, write);
394 	size -= count;
395 	if (size) {
396 		/* using MM to access rest vram */
397 		pos += count;
398 		buf += count;
399 		amdgpu_device_mm_access(adev, pos, buf, size, write);
400 	}
401 }
402 
403 /*
404  * register access helper functions.
405  */
406 
407 /* Check if hw access should be skipped because of hotplug or device error */
408 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
409 {
410 	if (adev->no_hw_access)
411 		return true;
412 
413 #ifdef CONFIG_LOCKDEP
414 	/*
415 	 * This is a bit complicated to understand, so worth a comment. What we assert
416 	 * here is that the GPU reset is not running on another thread in parallel.
417 	 *
418 	 * For this we trylock the read side of the reset semaphore, if that succeeds
419 	 * we know that the reset is not running in paralell.
420 	 *
421 	 * If the trylock fails we assert that we are either already holding the read
422 	 * side of the lock or are the reset thread itself and hold the write side of
423 	 * the lock.
424 	 */
425 	if (in_task()) {
426 		if (down_read_trylock(&adev->reset_sem))
427 			up_read(&adev->reset_sem);
428 		else
429 			lockdep_assert_held(&adev->reset_sem);
430 	}
431 #endif
432 	return false;
433 }
434 
435 /**
436  * amdgpu_device_rreg - read a memory mapped IO or indirect register
437  *
438  * @adev: amdgpu_device pointer
439  * @reg: dword aligned register offset
440  * @acc_flags: access flags which require special behavior
441  *
442  * Returns the 32 bit value from the offset specified.
443  */
444 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
445 			    uint32_t reg, uint32_t acc_flags)
446 {
447 	uint32_t ret;
448 
449 	if (amdgpu_device_skip_hw_access(adev))
450 		return 0;
451 
452 	if ((reg * 4) < adev->rmmio_size) {
453 		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
454 		    amdgpu_sriov_runtime(adev) &&
455 		    down_read_trylock(&adev->reset_sem)) {
456 			ret = amdgpu_kiq_rreg(adev, reg);
457 			up_read(&adev->reset_sem);
458 		} else {
459 			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
460 		}
461 	} else {
462 		ret = adev->pcie_rreg(adev, reg * 4);
463 	}
464 
465 	trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
466 
467 	return ret;
468 }
469 
470 /*
471  * MMIO register read with bytes helper functions
472  * @offset:bytes offset from MMIO start
473  *
474 */
475 
476 /**
477  * amdgpu_mm_rreg8 - read a memory mapped IO register
478  *
479  * @adev: amdgpu_device pointer
480  * @offset: byte aligned register offset
481  *
482  * Returns the 8 bit value from the offset specified.
483  */
484 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
485 {
486 	if (amdgpu_device_skip_hw_access(adev))
487 		return 0;
488 
489 	if (offset < adev->rmmio_size)
490 		return (readb(adev->rmmio + offset));
491 	BUG();
492 }
493 
494 /*
495  * MMIO register write with bytes helper functions
496  * @offset:bytes offset from MMIO start
497  * @value: the value want to be written to the register
498  *
499 */
500 /**
501  * amdgpu_mm_wreg8 - read a memory mapped IO register
502  *
503  * @adev: amdgpu_device pointer
504  * @offset: byte aligned register offset
505  * @value: 8 bit value to write
506  *
507  * Writes the value specified to the offset specified.
508  */
509 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
510 {
511 	if (amdgpu_device_skip_hw_access(adev))
512 		return;
513 
514 	if (offset < adev->rmmio_size)
515 		writeb(value, adev->rmmio + offset);
516 	else
517 		BUG();
518 }
519 
520 /**
521  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
522  *
523  * @adev: amdgpu_device pointer
524  * @reg: dword aligned register offset
525  * @v: 32 bit value to write to the register
526  * @acc_flags: access flags which require special behavior
527  *
528  * Writes the value specified to the offset specified.
529  */
530 void amdgpu_device_wreg(struct amdgpu_device *adev,
531 			uint32_t reg, uint32_t v,
532 			uint32_t acc_flags)
533 {
534 	if (amdgpu_device_skip_hw_access(adev))
535 		return;
536 
537 	if ((reg * 4) < adev->rmmio_size) {
538 		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
539 		    amdgpu_sriov_runtime(adev) &&
540 		    down_read_trylock(&adev->reset_sem)) {
541 			amdgpu_kiq_wreg(adev, reg, v);
542 			up_read(&adev->reset_sem);
543 		} else {
544 			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
545 		}
546 	} else {
547 		adev->pcie_wreg(adev, reg * 4, v);
548 	}
549 
550 	trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
551 }
552 
553 /*
554  * amdgpu_mm_wreg_mmio_rlc -  write register either with mmio or with RLC path if in range
555  *
556  * this function is invoked only the debugfs register access
557  * */
558 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
559 			     uint32_t reg, uint32_t v)
560 {
561 	if (amdgpu_device_skip_hw_access(adev))
562 		return;
563 
564 	if (amdgpu_sriov_fullaccess(adev) &&
565 	    adev->gfx.rlc.funcs &&
566 	    adev->gfx.rlc.funcs->is_rlcg_access_range) {
567 		if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
568 			return adev->gfx.rlc.funcs->sriov_wreg(adev, reg, v, 0, 0);
569 	} else {
570 		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
571 	}
572 }
573 
574 /**
575  * amdgpu_mm_rdoorbell - read a doorbell dword
576  *
577  * @adev: amdgpu_device pointer
578  * @index: doorbell index
579  *
580  * Returns the value in the doorbell aperture at the
581  * requested doorbell index (CIK).
582  */
583 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
584 {
585 	if (amdgpu_device_skip_hw_access(adev))
586 		return 0;
587 
588 	if (index < adev->doorbell.num_doorbells) {
589 		return readl(adev->doorbell.ptr + index);
590 	} else {
591 		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
592 		return 0;
593 	}
594 }
595 
596 /**
597  * amdgpu_mm_wdoorbell - write a doorbell dword
598  *
599  * @adev: amdgpu_device pointer
600  * @index: doorbell index
601  * @v: value to write
602  *
603  * Writes @v to the doorbell aperture at the
604  * requested doorbell index (CIK).
605  */
606 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
607 {
608 	if (amdgpu_device_skip_hw_access(adev))
609 		return;
610 
611 	if (index < adev->doorbell.num_doorbells) {
612 		writel(v, adev->doorbell.ptr + index);
613 	} else {
614 		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
615 	}
616 }
617 
618 /**
619  * amdgpu_mm_rdoorbell64 - read a doorbell Qword
620  *
621  * @adev: amdgpu_device pointer
622  * @index: doorbell index
623  *
624  * Returns the value in the doorbell aperture at the
625  * requested doorbell index (VEGA10+).
626  */
627 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
628 {
629 	if (amdgpu_device_skip_hw_access(adev))
630 		return 0;
631 
632 	if (index < adev->doorbell.num_doorbells) {
633 		return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
634 	} else {
635 		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
636 		return 0;
637 	}
638 }
639 
640 /**
641  * amdgpu_mm_wdoorbell64 - write a doorbell Qword
642  *
643  * @adev: amdgpu_device pointer
644  * @index: doorbell index
645  * @v: value to write
646  *
647  * Writes @v to the doorbell aperture at the
648  * requested doorbell index (VEGA10+).
649  */
650 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
651 {
652 	if (amdgpu_device_skip_hw_access(adev))
653 		return;
654 
655 	if (index < adev->doorbell.num_doorbells) {
656 		atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
657 	} else {
658 		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
659 	}
660 }
661 
662 /**
663  * amdgpu_device_indirect_rreg - read an indirect register
664  *
665  * @adev: amdgpu_device pointer
666  * @pcie_index: mmio register offset
667  * @pcie_data: mmio register offset
668  * @reg_addr: indirect register address to read from
669  *
670  * Returns the value of indirect register @reg_addr
671  */
672 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
673 				u32 pcie_index, u32 pcie_data,
674 				u32 reg_addr)
675 {
676 	unsigned long flags;
677 	u32 r;
678 	void __iomem *pcie_index_offset;
679 	void __iomem *pcie_data_offset;
680 
681 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
682 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
683 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
684 
685 	writel(reg_addr, pcie_index_offset);
686 	readl(pcie_index_offset);
687 	r = readl(pcie_data_offset);
688 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
689 
690 	return r;
691 }
692 
693 /**
694  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
695  *
696  * @adev: amdgpu_device pointer
697  * @pcie_index: mmio register offset
698  * @pcie_data: mmio register offset
699  * @reg_addr: indirect register address to read from
700  *
701  * Returns the value of indirect register @reg_addr
702  */
703 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
704 				  u32 pcie_index, u32 pcie_data,
705 				  u32 reg_addr)
706 {
707 	unsigned long flags;
708 	u64 r;
709 	void __iomem *pcie_index_offset;
710 	void __iomem *pcie_data_offset;
711 
712 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
713 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
714 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
715 
716 	/* read low 32 bits */
717 	writel(reg_addr, pcie_index_offset);
718 	readl(pcie_index_offset);
719 	r = readl(pcie_data_offset);
720 	/* read high 32 bits */
721 	writel(reg_addr + 4, pcie_index_offset);
722 	readl(pcie_index_offset);
723 	r |= ((u64)readl(pcie_data_offset) << 32);
724 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
725 
726 	return r;
727 }
728 
729 /**
730  * amdgpu_device_indirect_wreg - write an indirect register address
731  *
732  * @adev: amdgpu_device pointer
733  * @pcie_index: mmio register offset
734  * @pcie_data: mmio register offset
735  * @reg_addr: indirect register offset
736  * @reg_data: indirect register data
737  *
738  */
739 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
740 				 u32 pcie_index, u32 pcie_data,
741 				 u32 reg_addr, u32 reg_data)
742 {
743 	unsigned long flags;
744 	void __iomem *pcie_index_offset;
745 	void __iomem *pcie_data_offset;
746 
747 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
748 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
749 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
750 
751 	writel(reg_addr, pcie_index_offset);
752 	readl(pcie_index_offset);
753 	writel(reg_data, pcie_data_offset);
754 	readl(pcie_data_offset);
755 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
756 }
757 
758 /**
759  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
760  *
761  * @adev: amdgpu_device pointer
762  * @pcie_index: mmio register offset
763  * @pcie_data: mmio register offset
764  * @reg_addr: indirect register offset
765  * @reg_data: indirect register data
766  *
767  */
768 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
769 				   u32 pcie_index, u32 pcie_data,
770 				   u32 reg_addr, u64 reg_data)
771 {
772 	unsigned long flags;
773 	void __iomem *pcie_index_offset;
774 	void __iomem *pcie_data_offset;
775 
776 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
777 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
778 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
779 
780 	/* write low 32 bits */
781 	writel(reg_addr, pcie_index_offset);
782 	readl(pcie_index_offset);
783 	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
784 	readl(pcie_data_offset);
785 	/* write high 32 bits */
786 	writel(reg_addr + 4, pcie_index_offset);
787 	readl(pcie_index_offset);
788 	writel((u32)(reg_data >> 32), pcie_data_offset);
789 	readl(pcie_data_offset);
790 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
791 }
792 
793 /**
794  * amdgpu_invalid_rreg - dummy reg read function
795  *
796  * @adev: amdgpu_device pointer
797  * @reg: offset of register
798  *
799  * Dummy register read function.  Used for register blocks
800  * that certain asics don't have (all asics).
801  * Returns the value in the register.
802  */
803 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
804 {
805 	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
806 	BUG();
807 	return 0;
808 }
809 
810 /**
811  * amdgpu_invalid_wreg - dummy reg write function
812  *
813  * @adev: amdgpu_device pointer
814  * @reg: offset of register
815  * @v: value to write to the register
816  *
817  * Dummy register read function.  Used for register blocks
818  * that certain asics don't have (all asics).
819  */
820 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
821 {
822 	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
823 		  reg, v);
824 	BUG();
825 }
826 
827 /**
828  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
829  *
830  * @adev: amdgpu_device pointer
831  * @reg: offset of register
832  *
833  * Dummy register read function.  Used for register blocks
834  * that certain asics don't have (all asics).
835  * Returns the value in the register.
836  */
837 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
838 {
839 	DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
840 	BUG();
841 	return 0;
842 }
843 
844 /**
845  * amdgpu_invalid_wreg64 - dummy reg write function
846  *
847  * @adev: amdgpu_device pointer
848  * @reg: offset of register
849  * @v: value to write to the register
850  *
851  * Dummy register read function.  Used for register blocks
852  * that certain asics don't have (all asics).
853  */
854 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
855 {
856 	DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
857 		  reg, v);
858 	BUG();
859 }
860 
861 /**
862  * amdgpu_block_invalid_rreg - dummy reg read function
863  *
864  * @adev: amdgpu_device pointer
865  * @block: offset of instance
866  * @reg: offset of register
867  *
868  * Dummy register read function.  Used for register blocks
869  * that certain asics don't have (all asics).
870  * Returns the value in the register.
871  */
872 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
873 					  uint32_t block, uint32_t reg)
874 {
875 	DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
876 		  reg, block);
877 	BUG();
878 	return 0;
879 }
880 
881 /**
882  * amdgpu_block_invalid_wreg - dummy reg write function
883  *
884  * @adev: amdgpu_device pointer
885  * @block: offset of instance
886  * @reg: offset of register
887  * @v: value to write to the register
888  *
889  * Dummy register read function.  Used for register blocks
890  * that certain asics don't have (all asics).
891  */
892 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
893 				      uint32_t block,
894 				      uint32_t reg, uint32_t v)
895 {
896 	DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
897 		  reg, block, v);
898 	BUG();
899 }
900 
901 /**
902  * amdgpu_device_asic_init - Wrapper for atom asic_init
903  *
904  * @adev: amdgpu_device pointer
905  *
906  * Does any asic specific work and then calls atom asic init.
907  */
908 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
909 {
910 	amdgpu_asic_pre_asic_init(adev);
911 
912 	return amdgpu_atom_asic_init(adev->mode_info.atom_context);
913 }
914 
915 /**
916  * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
917  *
918  * @adev: amdgpu_device pointer
919  *
920  * Allocates a scratch page of VRAM for use by various things in the
921  * driver.
922  */
923 static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
924 {
925 	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
926 				       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
927 				       &adev->vram_scratch.robj,
928 				       &adev->vram_scratch.gpu_addr,
929 				       (void **)&adev->vram_scratch.ptr);
930 }
931 
932 /**
933  * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
934  *
935  * @adev: amdgpu_device pointer
936  *
937  * Frees the VRAM scratch page.
938  */
939 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
940 {
941 	amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
942 }
943 
944 /**
945  * amdgpu_device_program_register_sequence - program an array of registers.
946  *
947  * @adev: amdgpu_device pointer
948  * @registers: pointer to the register array
949  * @array_size: size of the register array
950  *
951  * Programs an array or registers with and and or masks.
952  * This is a helper for setting golden registers.
953  */
954 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
955 					     const u32 *registers,
956 					     const u32 array_size)
957 {
958 	u32 tmp, reg, and_mask, or_mask;
959 	int i;
960 
961 	if (array_size % 3)
962 		return;
963 
964 	for (i = 0; i < array_size; i +=3) {
965 		reg = registers[i + 0];
966 		and_mask = registers[i + 1];
967 		or_mask = registers[i + 2];
968 
969 		if (and_mask == 0xffffffff) {
970 			tmp = or_mask;
971 		} else {
972 			tmp = RREG32(reg);
973 			tmp &= ~and_mask;
974 			if (adev->family >= AMDGPU_FAMILY_AI)
975 				tmp |= (or_mask & and_mask);
976 			else
977 				tmp |= or_mask;
978 		}
979 		WREG32(reg, tmp);
980 	}
981 }
982 
983 /**
984  * amdgpu_device_pci_config_reset - reset the GPU
985  *
986  * @adev: amdgpu_device pointer
987  *
988  * Resets the GPU using the pci config reset sequence.
989  * Only applicable to asics prior to vega10.
990  */
991 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
992 {
993 	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
994 }
995 
996 /**
997  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
998  *
999  * @adev: amdgpu_device pointer
1000  *
1001  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1002  */
1003 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1004 {
1005 	return pci_reset_function(adev->pdev);
1006 }
1007 
1008 /*
1009  * GPU doorbell aperture helpers function.
1010  */
1011 /**
1012  * amdgpu_device_doorbell_init - Init doorbell driver information.
1013  *
1014  * @adev: amdgpu_device pointer
1015  *
1016  * Init doorbell driver information (CIK)
1017  * Returns 0 on success, error on failure.
1018  */
1019 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
1020 {
1021 
1022 	/* No doorbell on SI hardware generation */
1023 	if (adev->asic_type < CHIP_BONAIRE) {
1024 		adev->doorbell.base = 0;
1025 		adev->doorbell.size = 0;
1026 		adev->doorbell.num_doorbells = 0;
1027 		adev->doorbell.ptr = NULL;
1028 		return 0;
1029 	}
1030 
1031 	if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
1032 		return -EINVAL;
1033 
1034 	amdgpu_asic_init_doorbell_index(adev);
1035 
1036 	/* doorbell bar mapping */
1037 	adev->doorbell.base = pci_resource_start(adev->pdev, 2);
1038 	adev->doorbell.size = pci_resource_len(adev->pdev, 2);
1039 
1040 	adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
1041 					     adev->doorbell_index.max_assignment+1);
1042 	if (adev->doorbell.num_doorbells == 0)
1043 		return -EINVAL;
1044 
1045 	/* For Vega, reserve and map two pages on doorbell BAR since SDMA
1046 	 * paging queue doorbell use the second page. The
1047 	 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
1048 	 * doorbells are in the first page. So with paging queue enabled,
1049 	 * the max num_doorbells should + 1 page (0x400 in dword)
1050 	 */
1051 	if (adev->asic_type >= CHIP_VEGA10)
1052 		adev->doorbell.num_doorbells += 0x400;
1053 
1054 	adev->doorbell.ptr = ioremap(adev->doorbell.base,
1055 				     adev->doorbell.num_doorbells *
1056 				     sizeof(u32));
1057 	if (adev->doorbell.ptr == NULL)
1058 		return -ENOMEM;
1059 
1060 	return 0;
1061 }
1062 
1063 /**
1064  * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
1065  *
1066  * @adev: amdgpu_device pointer
1067  *
1068  * Tear down doorbell driver information (CIK)
1069  */
1070 static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
1071 {
1072 	iounmap(adev->doorbell.ptr);
1073 	adev->doorbell.ptr = NULL;
1074 }
1075 
1076 
1077 
1078 /*
1079  * amdgpu_device_wb_*()
1080  * Writeback is the method by which the GPU updates special pages in memory
1081  * with the status of certain GPU events (fences, ring pointers,etc.).
1082  */
1083 
1084 /**
1085  * amdgpu_device_wb_fini - Disable Writeback and free memory
1086  *
1087  * @adev: amdgpu_device pointer
1088  *
1089  * Disables Writeback and frees the Writeback memory (all asics).
1090  * Used at driver shutdown.
1091  */
1092 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1093 {
1094 	if (adev->wb.wb_obj) {
1095 		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1096 				      &adev->wb.gpu_addr,
1097 				      (void **)&adev->wb.wb);
1098 		adev->wb.wb_obj = NULL;
1099 	}
1100 }
1101 
1102 /**
1103  * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
1104  *
1105  * @adev: amdgpu_device pointer
1106  *
1107  * Initializes writeback and allocates writeback memory (all asics).
1108  * Used at driver startup.
1109  * Returns 0 on success or an -error on failure.
1110  */
1111 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1112 {
1113 	int r;
1114 
1115 	if (adev->wb.wb_obj == NULL) {
1116 		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1117 		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1118 					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1119 					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
1120 					    (void **)&adev->wb.wb);
1121 		if (r) {
1122 			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1123 			return r;
1124 		}
1125 
1126 		adev->wb.num_wb = AMDGPU_MAX_WB;
1127 		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1128 
1129 		/* clear wb memory */
1130 		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1131 	}
1132 
1133 	return 0;
1134 }
1135 
1136 /**
1137  * amdgpu_device_wb_get - Allocate a wb entry
1138  *
1139  * @adev: amdgpu_device pointer
1140  * @wb: wb index
1141  *
1142  * Allocate a wb slot for use by the driver (all asics).
1143  * Returns 0 on success or -EINVAL on failure.
1144  */
1145 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1146 {
1147 	unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1148 
1149 	if (offset < adev->wb.num_wb) {
1150 		__set_bit(offset, adev->wb.used);
1151 		*wb = offset << 3; /* convert to dw offset */
1152 		return 0;
1153 	} else {
1154 		return -EINVAL;
1155 	}
1156 }
1157 
1158 /**
1159  * amdgpu_device_wb_free - Free a wb entry
1160  *
1161  * @adev: amdgpu_device pointer
1162  * @wb: wb index
1163  *
1164  * Free a wb slot allocated for use by the driver (all asics)
1165  */
1166 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1167 {
1168 	wb >>= 3;
1169 	if (wb < adev->wb.num_wb)
1170 		__clear_bit(wb, adev->wb.used);
1171 }
1172 
1173 /**
1174  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1175  *
1176  * @adev: amdgpu_device pointer
1177  *
1178  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1179  * to fail, but if any of the BARs is not accessible after the size we abort
1180  * driver loading by returning -ENODEV.
1181  */
1182 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1183 {
1184 	int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1185 	struct pci_bus *root;
1186 	struct resource *res;
1187 	unsigned i;
1188 	u16 cmd;
1189 	int r;
1190 
1191 	/* Bypass for VF */
1192 	if (amdgpu_sriov_vf(adev))
1193 		return 0;
1194 
1195 	/* skip if the bios has already enabled large BAR */
1196 	if (adev->gmc.real_vram_size &&
1197 	    (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1198 		return 0;
1199 
1200 	/* Check if the root BUS has 64bit memory resources */
1201 	root = adev->pdev->bus;
1202 	while (root->parent)
1203 		root = root->parent;
1204 
1205 	pci_bus_for_each_resource(root, res, i) {
1206 		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1207 		    res->start > 0x100000000ull)
1208 			break;
1209 	}
1210 
1211 	/* Trying to resize is pointless without a root hub window above 4GB */
1212 	if (!res)
1213 		return 0;
1214 
1215 	/* Limit the BAR size to what is available */
1216 	rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1217 			rbar_size);
1218 
1219 	/* Disable memory decoding while we change the BAR addresses and size */
1220 	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1221 	pci_write_config_word(adev->pdev, PCI_COMMAND,
1222 			      cmd & ~PCI_COMMAND_MEMORY);
1223 
1224 	/* Free the VRAM and doorbell BAR, we most likely need to move both. */
1225 	amdgpu_device_doorbell_fini(adev);
1226 	if (adev->asic_type >= CHIP_BONAIRE)
1227 		pci_release_resource(adev->pdev, 2);
1228 
1229 	pci_release_resource(adev->pdev, 0);
1230 
1231 	r = pci_resize_resource(adev->pdev, 0, rbar_size);
1232 	if (r == -ENOSPC)
1233 		DRM_INFO("Not enough PCI address space for a large BAR.");
1234 	else if (r && r != -ENOTSUPP)
1235 		DRM_ERROR("Problem resizing BAR0 (%d).", r);
1236 
1237 	pci_assign_unassigned_bus_resources(adev->pdev->bus);
1238 
1239 	/* When the doorbell or fb BAR isn't available we have no chance of
1240 	 * using the device.
1241 	 */
1242 	r = amdgpu_device_doorbell_init(adev);
1243 	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1244 		return -ENODEV;
1245 
1246 	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1247 
1248 	return 0;
1249 }
1250 
1251 /*
1252  * GPU helpers function.
1253  */
1254 /**
1255  * amdgpu_device_need_post - check if the hw need post or not
1256  *
1257  * @adev: amdgpu_device pointer
1258  *
1259  * Check if the asic has been initialized (all asics) at driver startup
1260  * or post is needed if  hw reset is performed.
1261  * Returns true if need or false if not.
1262  */
1263 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1264 {
1265 	uint32_t reg;
1266 
1267 	if (amdgpu_sriov_vf(adev))
1268 		return false;
1269 
1270 	if (amdgpu_passthrough(adev)) {
1271 		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1272 		 * some old smc fw still need driver do vPost otherwise gpu hang, while
1273 		 * those smc fw version above 22.15 doesn't have this flaw, so we force
1274 		 * vpost executed for smc version below 22.15
1275 		 */
1276 		if (adev->asic_type == CHIP_FIJI) {
1277 			int err;
1278 			uint32_t fw_ver;
1279 			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1280 			/* force vPost if error occured */
1281 			if (err)
1282 				return true;
1283 
1284 			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1285 			if (fw_ver < 0x00160e00)
1286 				return true;
1287 		}
1288 	}
1289 
1290 	/* Don't post if we need to reset whole hive on init */
1291 	if (adev->gmc.xgmi.pending_reset)
1292 		return false;
1293 
1294 	if (adev->has_hw_reset) {
1295 		adev->has_hw_reset = false;
1296 		return true;
1297 	}
1298 
1299 	/* bios scratch used on CIK+ */
1300 	if (adev->asic_type >= CHIP_BONAIRE)
1301 		return amdgpu_atombios_scratch_need_asic_init(adev);
1302 
1303 	/* check MEM_SIZE for older asics */
1304 	reg = amdgpu_asic_get_config_memsize(adev);
1305 
1306 	if ((reg != 0) && (reg != 0xffffffff))
1307 		return false;
1308 
1309 	return true;
1310 }
1311 
1312 /* if we get transitioned to only one device, take VGA back */
1313 /**
1314  * amdgpu_device_vga_set_decode - enable/disable vga decode
1315  *
1316  * @pdev: PCI device pointer
1317  * @state: enable/disable vga decode
1318  *
1319  * Enable/disable vga decode (all asics).
1320  * Returns VGA resource flags.
1321  */
1322 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1323 		bool state)
1324 {
1325 	struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1326 	amdgpu_asic_set_vga_state(adev, state);
1327 	if (state)
1328 		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1329 		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1330 	else
1331 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1332 }
1333 
1334 /**
1335  * amdgpu_device_check_block_size - validate the vm block size
1336  *
1337  * @adev: amdgpu_device pointer
1338  *
1339  * Validates the vm block size specified via module parameter.
1340  * The vm block size defines number of bits in page table versus page directory,
1341  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1342  * page table and the remaining bits are in the page directory.
1343  */
1344 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1345 {
1346 	/* defines number of bits in page table versus page directory,
1347 	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1348 	 * page table and the remaining bits are in the page directory */
1349 	if (amdgpu_vm_block_size == -1)
1350 		return;
1351 
1352 	if (amdgpu_vm_block_size < 9) {
1353 		dev_warn(adev->dev, "VM page table size (%d) too small\n",
1354 			 amdgpu_vm_block_size);
1355 		amdgpu_vm_block_size = -1;
1356 	}
1357 }
1358 
1359 /**
1360  * amdgpu_device_check_vm_size - validate the vm size
1361  *
1362  * @adev: amdgpu_device pointer
1363  *
1364  * Validates the vm size in GB specified via module parameter.
1365  * The VM size is the size of the GPU virtual memory space in GB.
1366  */
1367 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1368 {
1369 	/* no need to check the default value */
1370 	if (amdgpu_vm_size == -1)
1371 		return;
1372 
1373 	if (amdgpu_vm_size < 1) {
1374 		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1375 			 amdgpu_vm_size);
1376 		amdgpu_vm_size = -1;
1377 	}
1378 }
1379 
1380 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1381 {
1382 	struct sysinfo si;
1383 	bool is_os_64 = (sizeof(void *) == 8);
1384 	uint64_t total_memory;
1385 	uint64_t dram_size_seven_GB = 0x1B8000000;
1386 	uint64_t dram_size_three_GB = 0xB8000000;
1387 
1388 	if (amdgpu_smu_memory_pool_size == 0)
1389 		return;
1390 
1391 	if (!is_os_64) {
1392 		DRM_WARN("Not 64-bit OS, feature not supported\n");
1393 		goto def_value;
1394 	}
1395 	si_meminfo(&si);
1396 	total_memory = (uint64_t)si.totalram * si.mem_unit;
1397 
1398 	if ((amdgpu_smu_memory_pool_size == 1) ||
1399 		(amdgpu_smu_memory_pool_size == 2)) {
1400 		if (total_memory < dram_size_three_GB)
1401 			goto def_value1;
1402 	} else if ((amdgpu_smu_memory_pool_size == 4) ||
1403 		(amdgpu_smu_memory_pool_size == 8)) {
1404 		if (total_memory < dram_size_seven_GB)
1405 			goto def_value1;
1406 	} else {
1407 		DRM_WARN("Smu memory pool size not supported\n");
1408 		goto def_value;
1409 	}
1410 	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1411 
1412 	return;
1413 
1414 def_value1:
1415 	DRM_WARN("No enough system memory\n");
1416 def_value:
1417 	adev->pm.smu_prv_buffer_size = 0;
1418 }
1419 
1420 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1421 {
1422 	if (!(adev->flags & AMD_IS_APU) ||
1423 	    adev->asic_type < CHIP_RAVEN)
1424 		return 0;
1425 
1426 	switch (adev->asic_type) {
1427 	case CHIP_RAVEN:
1428 		if (adev->pdev->device == 0x15dd)
1429 			adev->apu_flags |= AMD_APU_IS_RAVEN;
1430 		if (adev->pdev->device == 0x15d8)
1431 			adev->apu_flags |= AMD_APU_IS_PICASSO;
1432 		break;
1433 	case CHIP_RENOIR:
1434 		if ((adev->pdev->device == 0x1636) ||
1435 		    (adev->pdev->device == 0x164c))
1436 			adev->apu_flags |= AMD_APU_IS_RENOIR;
1437 		else
1438 			adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1439 		break;
1440 	case CHIP_VANGOGH:
1441 		adev->apu_flags |= AMD_APU_IS_VANGOGH;
1442 		break;
1443 	case CHIP_YELLOW_CARP:
1444 		break;
1445 	case CHIP_CYAN_SKILLFISH:
1446 		if (adev->pdev->device == 0x13FE)
1447 			adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1448 		break;
1449 	default:
1450 		return -EINVAL;
1451 	}
1452 
1453 	return 0;
1454 }
1455 
1456 /**
1457  * amdgpu_device_check_arguments - validate module params
1458  *
1459  * @adev: amdgpu_device pointer
1460  *
1461  * Validates certain module parameters and updates
1462  * the associated values used by the driver (all asics).
1463  */
1464 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1465 {
1466 	if (amdgpu_sched_jobs < 4) {
1467 		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1468 			 amdgpu_sched_jobs);
1469 		amdgpu_sched_jobs = 4;
1470 	} else if (!is_power_of_2(amdgpu_sched_jobs)){
1471 		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1472 			 amdgpu_sched_jobs);
1473 		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1474 	}
1475 
1476 	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1477 		/* gart size must be greater or equal to 32M */
1478 		dev_warn(adev->dev, "gart size (%d) too small\n",
1479 			 amdgpu_gart_size);
1480 		amdgpu_gart_size = -1;
1481 	}
1482 
1483 	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1484 		/* gtt size must be greater or equal to 32M */
1485 		dev_warn(adev->dev, "gtt size (%d) too small\n",
1486 				 amdgpu_gtt_size);
1487 		amdgpu_gtt_size = -1;
1488 	}
1489 
1490 	/* valid range is between 4 and 9 inclusive */
1491 	if (amdgpu_vm_fragment_size != -1 &&
1492 	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1493 		dev_warn(adev->dev, "valid range is between 4 and 9\n");
1494 		amdgpu_vm_fragment_size = -1;
1495 	}
1496 
1497 	if (amdgpu_sched_hw_submission < 2) {
1498 		dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1499 			 amdgpu_sched_hw_submission);
1500 		amdgpu_sched_hw_submission = 2;
1501 	} else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1502 		dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1503 			 amdgpu_sched_hw_submission);
1504 		amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1505 	}
1506 
1507 	amdgpu_device_check_smu_prv_buffer_size(adev);
1508 
1509 	amdgpu_device_check_vm_size(adev);
1510 
1511 	amdgpu_device_check_block_size(adev);
1512 
1513 	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1514 
1515 	amdgpu_gmc_tmz_set(adev);
1516 
1517 	amdgpu_gmc_noretry_set(adev);
1518 
1519 	return 0;
1520 }
1521 
1522 /**
1523  * amdgpu_switcheroo_set_state - set switcheroo state
1524  *
1525  * @pdev: pci dev pointer
1526  * @state: vga_switcheroo state
1527  *
1528  * Callback for the switcheroo driver.  Suspends or resumes the
1529  * the asics before or after it is powered up using ACPI methods.
1530  */
1531 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1532 					enum vga_switcheroo_state state)
1533 {
1534 	struct drm_device *dev = pci_get_drvdata(pdev);
1535 	int r;
1536 
1537 	if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1538 		return;
1539 
1540 	if (state == VGA_SWITCHEROO_ON) {
1541 		pr_info("switched on\n");
1542 		/* don't suspend or resume card normally */
1543 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1544 
1545 		pci_set_power_state(pdev, PCI_D0);
1546 		amdgpu_device_load_pci_state(pdev);
1547 		r = pci_enable_device(pdev);
1548 		if (r)
1549 			DRM_WARN("pci_enable_device failed (%d)\n", r);
1550 		amdgpu_device_resume(dev, true);
1551 
1552 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
1553 	} else {
1554 		pr_info("switched off\n");
1555 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1556 		amdgpu_device_suspend(dev, true);
1557 		amdgpu_device_cache_pci_state(pdev);
1558 		/* Shut down the device */
1559 		pci_disable_device(pdev);
1560 		pci_set_power_state(pdev, PCI_D3cold);
1561 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1562 	}
1563 }
1564 
1565 /**
1566  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1567  *
1568  * @pdev: pci dev pointer
1569  *
1570  * Callback for the switcheroo driver.  Check of the switcheroo
1571  * state can be changed.
1572  * Returns true if the state can be changed, false if not.
1573  */
1574 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1575 {
1576 	struct drm_device *dev = pci_get_drvdata(pdev);
1577 
1578 	/*
1579 	* FIXME: open_count is protected by drm_global_mutex but that would lead to
1580 	* locking inversion with the driver load path. And the access here is
1581 	* completely racy anyway. So don't bother with locking for now.
1582 	*/
1583 	return atomic_read(&dev->open_count) == 0;
1584 }
1585 
1586 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1587 	.set_gpu_state = amdgpu_switcheroo_set_state,
1588 	.reprobe = NULL,
1589 	.can_switch = amdgpu_switcheroo_can_switch,
1590 };
1591 
1592 /**
1593  * amdgpu_device_ip_set_clockgating_state - set the CG state
1594  *
1595  * @dev: amdgpu_device pointer
1596  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1597  * @state: clockgating state (gate or ungate)
1598  *
1599  * Sets the requested clockgating state for all instances of
1600  * the hardware IP specified.
1601  * Returns the error code from the last instance.
1602  */
1603 int amdgpu_device_ip_set_clockgating_state(void *dev,
1604 					   enum amd_ip_block_type block_type,
1605 					   enum amd_clockgating_state state)
1606 {
1607 	struct amdgpu_device *adev = dev;
1608 	int i, r = 0;
1609 
1610 	for (i = 0; i < adev->num_ip_blocks; i++) {
1611 		if (!adev->ip_blocks[i].status.valid)
1612 			continue;
1613 		if (adev->ip_blocks[i].version->type != block_type)
1614 			continue;
1615 		if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1616 			continue;
1617 		r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1618 			(void *)adev, state);
1619 		if (r)
1620 			DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1621 				  adev->ip_blocks[i].version->funcs->name, r);
1622 	}
1623 	return r;
1624 }
1625 
1626 /**
1627  * amdgpu_device_ip_set_powergating_state - set the PG state
1628  *
1629  * @dev: amdgpu_device pointer
1630  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1631  * @state: powergating state (gate or ungate)
1632  *
1633  * Sets the requested powergating state for all instances of
1634  * the hardware IP specified.
1635  * Returns the error code from the last instance.
1636  */
1637 int amdgpu_device_ip_set_powergating_state(void *dev,
1638 					   enum amd_ip_block_type block_type,
1639 					   enum amd_powergating_state state)
1640 {
1641 	struct amdgpu_device *adev = dev;
1642 	int i, r = 0;
1643 
1644 	for (i = 0; i < adev->num_ip_blocks; i++) {
1645 		if (!adev->ip_blocks[i].status.valid)
1646 			continue;
1647 		if (adev->ip_blocks[i].version->type != block_type)
1648 			continue;
1649 		if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1650 			continue;
1651 		r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1652 			(void *)adev, state);
1653 		if (r)
1654 			DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1655 				  adev->ip_blocks[i].version->funcs->name, r);
1656 	}
1657 	return r;
1658 }
1659 
1660 /**
1661  * amdgpu_device_ip_get_clockgating_state - get the CG state
1662  *
1663  * @adev: amdgpu_device pointer
1664  * @flags: clockgating feature flags
1665  *
1666  * Walks the list of IPs on the device and updates the clockgating
1667  * flags for each IP.
1668  * Updates @flags with the feature flags for each hardware IP where
1669  * clockgating is enabled.
1670  */
1671 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1672 					    u32 *flags)
1673 {
1674 	int i;
1675 
1676 	for (i = 0; i < adev->num_ip_blocks; i++) {
1677 		if (!adev->ip_blocks[i].status.valid)
1678 			continue;
1679 		if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1680 			adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1681 	}
1682 }
1683 
1684 /**
1685  * amdgpu_device_ip_wait_for_idle - wait for idle
1686  *
1687  * @adev: amdgpu_device pointer
1688  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1689  *
1690  * Waits for the request hardware IP to be idle.
1691  * Returns 0 for success or a negative error code on failure.
1692  */
1693 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1694 				   enum amd_ip_block_type block_type)
1695 {
1696 	int i, r;
1697 
1698 	for (i = 0; i < adev->num_ip_blocks; i++) {
1699 		if (!adev->ip_blocks[i].status.valid)
1700 			continue;
1701 		if (adev->ip_blocks[i].version->type == block_type) {
1702 			r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1703 			if (r)
1704 				return r;
1705 			break;
1706 		}
1707 	}
1708 	return 0;
1709 
1710 }
1711 
1712 /**
1713  * amdgpu_device_ip_is_idle - is the hardware IP idle
1714  *
1715  * @adev: amdgpu_device pointer
1716  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1717  *
1718  * Check if the hardware IP is idle or not.
1719  * Returns true if it the IP is idle, false if not.
1720  */
1721 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1722 			      enum amd_ip_block_type block_type)
1723 {
1724 	int i;
1725 
1726 	for (i = 0; i < adev->num_ip_blocks; i++) {
1727 		if (!adev->ip_blocks[i].status.valid)
1728 			continue;
1729 		if (adev->ip_blocks[i].version->type == block_type)
1730 			return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1731 	}
1732 	return true;
1733 
1734 }
1735 
1736 /**
1737  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1738  *
1739  * @adev: amdgpu_device pointer
1740  * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1741  *
1742  * Returns a pointer to the hardware IP block structure
1743  * if it exists for the asic, otherwise NULL.
1744  */
1745 struct amdgpu_ip_block *
1746 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1747 			      enum amd_ip_block_type type)
1748 {
1749 	int i;
1750 
1751 	for (i = 0; i < adev->num_ip_blocks; i++)
1752 		if (adev->ip_blocks[i].version->type == type)
1753 			return &adev->ip_blocks[i];
1754 
1755 	return NULL;
1756 }
1757 
1758 /**
1759  * amdgpu_device_ip_block_version_cmp
1760  *
1761  * @adev: amdgpu_device pointer
1762  * @type: enum amd_ip_block_type
1763  * @major: major version
1764  * @minor: minor version
1765  *
1766  * return 0 if equal or greater
1767  * return 1 if smaller or the ip_block doesn't exist
1768  */
1769 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1770 				       enum amd_ip_block_type type,
1771 				       u32 major, u32 minor)
1772 {
1773 	struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1774 
1775 	if (ip_block && ((ip_block->version->major > major) ||
1776 			((ip_block->version->major == major) &&
1777 			(ip_block->version->minor >= minor))))
1778 		return 0;
1779 
1780 	return 1;
1781 }
1782 
1783 /**
1784  * amdgpu_device_ip_block_add
1785  *
1786  * @adev: amdgpu_device pointer
1787  * @ip_block_version: pointer to the IP to add
1788  *
1789  * Adds the IP block driver information to the collection of IPs
1790  * on the asic.
1791  */
1792 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1793 			       const struct amdgpu_ip_block_version *ip_block_version)
1794 {
1795 	if (!ip_block_version)
1796 		return -EINVAL;
1797 
1798 	switch (ip_block_version->type) {
1799 	case AMD_IP_BLOCK_TYPE_VCN:
1800 		if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1801 			return 0;
1802 		break;
1803 	case AMD_IP_BLOCK_TYPE_JPEG:
1804 		if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1805 			return 0;
1806 		break;
1807 	default:
1808 		break;
1809 	}
1810 
1811 	DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1812 		  ip_block_version->funcs->name);
1813 
1814 	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1815 
1816 	return 0;
1817 }
1818 
1819 /**
1820  * amdgpu_device_enable_virtual_display - enable virtual display feature
1821  *
1822  * @adev: amdgpu_device pointer
1823  *
1824  * Enabled the virtual display feature if the user has enabled it via
1825  * the module parameter virtual_display.  This feature provides a virtual
1826  * display hardware on headless boards or in virtualized environments.
1827  * This function parses and validates the configuration string specified by
1828  * the user and configues the virtual display configuration (number of
1829  * virtual connectors, crtcs, etc.) specified.
1830  */
1831 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1832 {
1833 	adev->enable_virtual_display = false;
1834 
1835 	if (amdgpu_virtual_display) {
1836 		const char *pci_address_name = pci_name(adev->pdev);
1837 		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1838 
1839 		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1840 		pciaddstr_tmp = pciaddstr;
1841 		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1842 			pciaddname = strsep(&pciaddname_tmp, ",");
1843 			if (!strcmp("all", pciaddname)
1844 			    || !strcmp(pci_address_name, pciaddname)) {
1845 				long num_crtc;
1846 				int res = -1;
1847 
1848 				adev->enable_virtual_display = true;
1849 
1850 				if (pciaddname_tmp)
1851 					res = kstrtol(pciaddname_tmp, 10,
1852 						      &num_crtc);
1853 
1854 				if (!res) {
1855 					if (num_crtc < 1)
1856 						num_crtc = 1;
1857 					if (num_crtc > 6)
1858 						num_crtc = 6;
1859 					adev->mode_info.num_crtc = num_crtc;
1860 				} else {
1861 					adev->mode_info.num_crtc = 1;
1862 				}
1863 				break;
1864 			}
1865 		}
1866 
1867 		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1868 			 amdgpu_virtual_display, pci_address_name,
1869 			 adev->enable_virtual_display, adev->mode_info.num_crtc);
1870 
1871 		kfree(pciaddstr);
1872 	}
1873 }
1874 
1875 /**
1876  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1877  *
1878  * @adev: amdgpu_device pointer
1879  *
1880  * Parses the asic configuration parameters specified in the gpu info
1881  * firmware and makes them availale to the driver for use in configuring
1882  * the asic.
1883  * Returns 0 on success, -EINVAL on failure.
1884  */
1885 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1886 {
1887 	const char *chip_name;
1888 	char fw_name[40];
1889 	int err;
1890 	const struct gpu_info_firmware_header_v1_0 *hdr;
1891 
1892 	adev->firmware.gpu_info_fw = NULL;
1893 
1894 	if (adev->mman.discovery_bin) {
1895 		amdgpu_discovery_get_gfx_info(adev);
1896 
1897 		/*
1898 		 * FIXME: The bounding box is still needed by Navi12, so
1899 		 * temporarily read it from gpu_info firmware. Should be droped
1900 		 * when DAL no longer needs it.
1901 		 */
1902 		if (adev->asic_type != CHIP_NAVI12)
1903 			return 0;
1904 	}
1905 
1906 	switch (adev->asic_type) {
1907 #ifdef CONFIG_DRM_AMDGPU_SI
1908 	case CHIP_VERDE:
1909 	case CHIP_TAHITI:
1910 	case CHIP_PITCAIRN:
1911 	case CHIP_OLAND:
1912 	case CHIP_HAINAN:
1913 #endif
1914 #ifdef CONFIG_DRM_AMDGPU_CIK
1915 	case CHIP_BONAIRE:
1916 	case CHIP_HAWAII:
1917 	case CHIP_KAVERI:
1918 	case CHIP_KABINI:
1919 	case CHIP_MULLINS:
1920 #endif
1921 	case CHIP_TOPAZ:
1922 	case CHIP_TONGA:
1923 	case CHIP_FIJI:
1924 	case CHIP_POLARIS10:
1925 	case CHIP_POLARIS11:
1926 	case CHIP_POLARIS12:
1927 	case CHIP_VEGAM:
1928 	case CHIP_CARRIZO:
1929 	case CHIP_STONEY:
1930 	case CHIP_VEGA20:
1931 	case CHIP_ALDEBARAN:
1932 	case CHIP_SIENNA_CICHLID:
1933 	case CHIP_NAVY_FLOUNDER:
1934 	case CHIP_DIMGREY_CAVEFISH:
1935 	case CHIP_BEIGE_GOBY:
1936 	default:
1937 		return 0;
1938 	case CHIP_VEGA10:
1939 		chip_name = "vega10";
1940 		break;
1941 	case CHIP_VEGA12:
1942 		chip_name = "vega12";
1943 		break;
1944 	case CHIP_RAVEN:
1945 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1946 			chip_name = "raven2";
1947 		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1948 			chip_name = "picasso";
1949 		else
1950 			chip_name = "raven";
1951 		break;
1952 	case CHIP_ARCTURUS:
1953 		chip_name = "arcturus";
1954 		break;
1955 	case CHIP_RENOIR:
1956 		if (adev->apu_flags & AMD_APU_IS_RENOIR)
1957 			chip_name = "renoir";
1958 		else
1959 			chip_name = "green_sardine";
1960 		break;
1961 	case CHIP_NAVI10:
1962 		chip_name = "navi10";
1963 		break;
1964 	case CHIP_NAVI14:
1965 		chip_name = "navi14";
1966 		break;
1967 	case CHIP_NAVI12:
1968 		chip_name = "navi12";
1969 		break;
1970 	case CHIP_VANGOGH:
1971 		chip_name = "vangogh";
1972 		break;
1973 	case CHIP_YELLOW_CARP:
1974 		chip_name = "yellow_carp";
1975 		break;
1976 	}
1977 
1978 	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1979 	err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1980 	if (err) {
1981 		dev_err(adev->dev,
1982 			"Failed to load gpu_info firmware \"%s\"\n",
1983 			fw_name);
1984 		goto out;
1985 	}
1986 	err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1987 	if (err) {
1988 		dev_err(adev->dev,
1989 			"Failed to validate gpu_info firmware \"%s\"\n",
1990 			fw_name);
1991 		goto out;
1992 	}
1993 
1994 	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1995 	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1996 
1997 	switch (hdr->version_major) {
1998 	case 1:
1999 	{
2000 		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2001 			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2002 								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2003 
2004 		/*
2005 		 * Should be droped when DAL no longer needs it.
2006 		 */
2007 		if (adev->asic_type == CHIP_NAVI12)
2008 			goto parse_soc_bounding_box;
2009 
2010 		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2011 		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2012 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2013 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2014 		adev->gfx.config.max_texture_channel_caches =
2015 			le32_to_cpu(gpu_info_fw->gc_num_tccs);
2016 		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2017 		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2018 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2019 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2020 		adev->gfx.config.double_offchip_lds_buf =
2021 			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2022 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2023 		adev->gfx.cu_info.max_waves_per_simd =
2024 			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2025 		adev->gfx.cu_info.max_scratch_slots_per_cu =
2026 			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2027 		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2028 		if (hdr->version_minor >= 1) {
2029 			const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2030 				(const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2031 									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2032 			adev->gfx.config.num_sc_per_sh =
2033 				le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2034 			adev->gfx.config.num_packer_per_sc =
2035 				le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2036 		}
2037 
2038 parse_soc_bounding_box:
2039 		/*
2040 		 * soc bounding box info is not integrated in disocovery table,
2041 		 * we always need to parse it from gpu info firmware if needed.
2042 		 */
2043 		if (hdr->version_minor == 2) {
2044 			const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2045 				(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2046 									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2047 			adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2048 		}
2049 		break;
2050 	}
2051 	default:
2052 		dev_err(adev->dev,
2053 			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2054 		err = -EINVAL;
2055 		goto out;
2056 	}
2057 out:
2058 	return err;
2059 }
2060 
2061 /**
2062  * amdgpu_device_ip_early_init - run early init for hardware IPs
2063  *
2064  * @adev: amdgpu_device pointer
2065  *
2066  * Early initialization pass for hardware IPs.  The hardware IPs that make
2067  * up each asic are discovered each IP's early_init callback is run.  This
2068  * is the first stage in initializing the asic.
2069  * Returns 0 on success, negative error code on failure.
2070  */
2071 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2072 {
2073 	int i, r;
2074 
2075 	amdgpu_device_enable_virtual_display(adev);
2076 
2077 	if (amdgpu_sriov_vf(adev)) {
2078 		r = amdgpu_virt_request_full_gpu(adev, true);
2079 		if (r)
2080 			return r;
2081 	}
2082 
2083 	switch (adev->asic_type) {
2084 #ifdef CONFIG_DRM_AMDGPU_SI
2085 	case CHIP_VERDE:
2086 	case CHIP_TAHITI:
2087 	case CHIP_PITCAIRN:
2088 	case CHIP_OLAND:
2089 	case CHIP_HAINAN:
2090 		adev->family = AMDGPU_FAMILY_SI;
2091 		r = si_set_ip_blocks(adev);
2092 		if (r)
2093 			return r;
2094 		break;
2095 #endif
2096 #ifdef CONFIG_DRM_AMDGPU_CIK
2097 	case CHIP_BONAIRE:
2098 	case CHIP_HAWAII:
2099 	case CHIP_KAVERI:
2100 	case CHIP_KABINI:
2101 	case CHIP_MULLINS:
2102 		if (adev->flags & AMD_IS_APU)
2103 			adev->family = AMDGPU_FAMILY_KV;
2104 		else
2105 			adev->family = AMDGPU_FAMILY_CI;
2106 
2107 		r = cik_set_ip_blocks(adev);
2108 		if (r)
2109 			return r;
2110 		break;
2111 #endif
2112 	case CHIP_TOPAZ:
2113 	case CHIP_TONGA:
2114 	case CHIP_FIJI:
2115 	case CHIP_POLARIS10:
2116 	case CHIP_POLARIS11:
2117 	case CHIP_POLARIS12:
2118 	case CHIP_VEGAM:
2119 	case CHIP_CARRIZO:
2120 	case CHIP_STONEY:
2121 		if (adev->flags & AMD_IS_APU)
2122 			adev->family = AMDGPU_FAMILY_CZ;
2123 		else
2124 			adev->family = AMDGPU_FAMILY_VI;
2125 
2126 		r = vi_set_ip_blocks(adev);
2127 		if (r)
2128 			return r;
2129 		break;
2130 	default:
2131 		r = amdgpu_discovery_set_ip_blocks(adev);
2132 		if (r)
2133 			return r;
2134 		break;
2135 	}
2136 
2137 	amdgpu_amdkfd_device_probe(adev);
2138 
2139 	adev->pm.pp_feature = amdgpu_pp_feature_mask;
2140 	if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2141 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2142 	if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2143 		adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2144 
2145 	for (i = 0; i < adev->num_ip_blocks; i++) {
2146 		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2147 			DRM_ERROR("disabled ip block: %d <%s>\n",
2148 				  i, adev->ip_blocks[i].version->funcs->name);
2149 			adev->ip_blocks[i].status.valid = false;
2150 		} else {
2151 			if (adev->ip_blocks[i].version->funcs->early_init) {
2152 				r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2153 				if (r == -ENOENT) {
2154 					adev->ip_blocks[i].status.valid = false;
2155 				} else if (r) {
2156 					DRM_ERROR("early_init of IP block <%s> failed %d\n",
2157 						  adev->ip_blocks[i].version->funcs->name, r);
2158 					return r;
2159 				} else {
2160 					adev->ip_blocks[i].status.valid = true;
2161 				}
2162 			} else {
2163 				adev->ip_blocks[i].status.valid = true;
2164 			}
2165 		}
2166 		/* get the vbios after the asic_funcs are set up */
2167 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2168 			r = amdgpu_device_parse_gpu_info_fw(adev);
2169 			if (r)
2170 				return r;
2171 
2172 			/* Read BIOS */
2173 			if (!amdgpu_get_bios(adev))
2174 				return -EINVAL;
2175 
2176 			r = amdgpu_atombios_init(adev);
2177 			if (r) {
2178 				dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2179 				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2180 				return r;
2181 			}
2182 
2183 			/*get pf2vf msg info at it's earliest time*/
2184 			if (amdgpu_sriov_vf(adev))
2185 				amdgpu_virt_init_data_exchange(adev);
2186 
2187 		}
2188 	}
2189 
2190 	adev->cg_flags &= amdgpu_cg_mask;
2191 	adev->pg_flags &= amdgpu_pg_mask;
2192 
2193 	return 0;
2194 }
2195 
2196 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2197 {
2198 	int i, r;
2199 
2200 	for (i = 0; i < adev->num_ip_blocks; i++) {
2201 		if (!adev->ip_blocks[i].status.sw)
2202 			continue;
2203 		if (adev->ip_blocks[i].status.hw)
2204 			continue;
2205 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2206 		    (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2207 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2208 			r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2209 			if (r) {
2210 				DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2211 					  adev->ip_blocks[i].version->funcs->name, r);
2212 				return r;
2213 			}
2214 			adev->ip_blocks[i].status.hw = true;
2215 		}
2216 	}
2217 
2218 	return 0;
2219 }
2220 
2221 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2222 {
2223 	int i, r;
2224 
2225 	for (i = 0; i < adev->num_ip_blocks; i++) {
2226 		if (!adev->ip_blocks[i].status.sw)
2227 			continue;
2228 		if (adev->ip_blocks[i].status.hw)
2229 			continue;
2230 		r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2231 		if (r) {
2232 			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2233 				  adev->ip_blocks[i].version->funcs->name, r);
2234 			return r;
2235 		}
2236 		adev->ip_blocks[i].status.hw = true;
2237 	}
2238 
2239 	return 0;
2240 }
2241 
2242 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2243 {
2244 	int r = 0;
2245 	int i;
2246 	uint32_t smu_version;
2247 
2248 	if (adev->asic_type >= CHIP_VEGA10) {
2249 		for (i = 0; i < adev->num_ip_blocks; i++) {
2250 			if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2251 				continue;
2252 
2253 			if (!adev->ip_blocks[i].status.sw)
2254 				continue;
2255 
2256 			/* no need to do the fw loading again if already done*/
2257 			if (adev->ip_blocks[i].status.hw == true)
2258 				break;
2259 
2260 			if (amdgpu_in_reset(adev) || adev->in_suspend) {
2261 				r = adev->ip_blocks[i].version->funcs->resume(adev);
2262 				if (r) {
2263 					DRM_ERROR("resume of IP block <%s> failed %d\n",
2264 							  adev->ip_blocks[i].version->funcs->name, r);
2265 					return r;
2266 				}
2267 			} else {
2268 				r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2269 				if (r) {
2270 					DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2271 							  adev->ip_blocks[i].version->funcs->name, r);
2272 					return r;
2273 				}
2274 			}
2275 
2276 			adev->ip_blocks[i].status.hw = true;
2277 			break;
2278 		}
2279 	}
2280 
2281 	if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2282 		r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2283 
2284 	return r;
2285 }
2286 
2287 /**
2288  * amdgpu_device_ip_init - run init for hardware IPs
2289  *
2290  * @adev: amdgpu_device pointer
2291  *
2292  * Main initialization pass for hardware IPs.  The list of all the hardware
2293  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2294  * are run.  sw_init initializes the software state associated with each IP
2295  * and hw_init initializes the hardware associated with each IP.
2296  * Returns 0 on success, negative error code on failure.
2297  */
2298 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2299 {
2300 	int i, r;
2301 
2302 	r = amdgpu_ras_init(adev);
2303 	if (r)
2304 		return r;
2305 
2306 	for (i = 0; i < adev->num_ip_blocks; i++) {
2307 		if (!adev->ip_blocks[i].status.valid)
2308 			continue;
2309 		r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2310 		if (r) {
2311 			DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2312 				  adev->ip_blocks[i].version->funcs->name, r);
2313 			goto init_failed;
2314 		}
2315 		adev->ip_blocks[i].status.sw = true;
2316 
2317 		/* need to do gmc hw init early so we can allocate gpu mem */
2318 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2319 			r = amdgpu_device_vram_scratch_init(adev);
2320 			if (r) {
2321 				DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2322 				goto init_failed;
2323 			}
2324 			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2325 			if (r) {
2326 				DRM_ERROR("hw_init %d failed %d\n", i, r);
2327 				goto init_failed;
2328 			}
2329 			r = amdgpu_device_wb_init(adev);
2330 			if (r) {
2331 				DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2332 				goto init_failed;
2333 			}
2334 			adev->ip_blocks[i].status.hw = true;
2335 
2336 			/* right after GMC hw init, we create CSA */
2337 			if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
2338 				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2339 								AMDGPU_GEM_DOMAIN_VRAM,
2340 								AMDGPU_CSA_SIZE);
2341 				if (r) {
2342 					DRM_ERROR("allocate CSA failed %d\n", r);
2343 					goto init_failed;
2344 				}
2345 			}
2346 		}
2347 	}
2348 
2349 	if (amdgpu_sriov_vf(adev))
2350 		amdgpu_virt_init_data_exchange(adev);
2351 
2352 	r = amdgpu_ib_pool_init(adev);
2353 	if (r) {
2354 		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2355 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2356 		goto init_failed;
2357 	}
2358 
2359 	r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2360 	if (r)
2361 		goto init_failed;
2362 
2363 	r = amdgpu_device_ip_hw_init_phase1(adev);
2364 	if (r)
2365 		goto init_failed;
2366 
2367 	r = amdgpu_device_fw_loading(adev);
2368 	if (r)
2369 		goto init_failed;
2370 
2371 	r = amdgpu_device_ip_hw_init_phase2(adev);
2372 	if (r)
2373 		goto init_failed;
2374 
2375 	/*
2376 	 * retired pages will be loaded from eeprom and reserved here,
2377 	 * it should be called after amdgpu_device_ip_hw_init_phase2  since
2378 	 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2379 	 * for I2C communication which only true at this point.
2380 	 *
2381 	 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2382 	 * failure from bad gpu situation and stop amdgpu init process
2383 	 * accordingly. For other failed cases, it will still release all
2384 	 * the resource and print error message, rather than returning one
2385 	 * negative value to upper level.
2386 	 *
2387 	 * Note: theoretically, this should be called before all vram allocations
2388 	 * to protect retired page from abusing
2389 	 */
2390 	r = amdgpu_ras_recovery_init(adev);
2391 	if (r)
2392 		goto init_failed;
2393 
2394 	if (adev->gmc.xgmi.num_physical_nodes > 1)
2395 		amdgpu_xgmi_add_device(adev);
2396 
2397 	/* Don't init kfd if whole hive need to be reset during init */
2398 	if (!adev->gmc.xgmi.pending_reset)
2399 		amdgpu_amdkfd_device_init(adev);
2400 
2401 	amdgpu_fru_get_product_info(adev);
2402 
2403 init_failed:
2404 	if (amdgpu_sriov_vf(adev))
2405 		amdgpu_virt_release_full_gpu(adev, true);
2406 
2407 	return r;
2408 }
2409 
2410 /**
2411  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2412  *
2413  * @adev: amdgpu_device pointer
2414  *
2415  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2416  * this function before a GPU reset.  If the value is retained after a
2417  * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
2418  */
2419 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2420 {
2421 	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2422 }
2423 
2424 /**
2425  * amdgpu_device_check_vram_lost - check if vram is valid
2426  *
2427  * @adev: amdgpu_device pointer
2428  *
2429  * Checks the reset magic value written to the gart pointer in VRAM.
2430  * The driver calls this after a GPU reset to see if the contents of
2431  * VRAM is lost or now.
2432  * returns true if vram is lost, false if not.
2433  */
2434 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2435 {
2436 	if (memcmp(adev->gart.ptr, adev->reset_magic,
2437 			AMDGPU_RESET_MAGIC_NUM))
2438 		return true;
2439 
2440 	if (!amdgpu_in_reset(adev))
2441 		return false;
2442 
2443 	/*
2444 	 * For all ASICs with baco/mode1 reset, the VRAM is
2445 	 * always assumed to be lost.
2446 	 */
2447 	switch (amdgpu_asic_reset_method(adev)) {
2448 	case AMD_RESET_METHOD_BACO:
2449 	case AMD_RESET_METHOD_MODE1:
2450 		return true;
2451 	default:
2452 		return false;
2453 	}
2454 }
2455 
2456 /**
2457  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2458  *
2459  * @adev: amdgpu_device pointer
2460  * @state: clockgating state (gate or ungate)
2461  *
2462  * The list of all the hardware IPs that make up the asic is walked and the
2463  * set_clockgating_state callbacks are run.
2464  * Late initialization pass enabling clockgating for hardware IPs.
2465  * Fini or suspend, pass disabling clockgating for hardware IPs.
2466  * Returns 0 on success, negative error code on failure.
2467  */
2468 
2469 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2470 			       enum amd_clockgating_state state)
2471 {
2472 	int i, j, r;
2473 
2474 	if (amdgpu_emu_mode == 1)
2475 		return 0;
2476 
2477 	for (j = 0; j < adev->num_ip_blocks; j++) {
2478 		i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2479 		if (!adev->ip_blocks[i].status.late_initialized)
2480 			continue;
2481 		/* skip CG for GFX on S0ix */
2482 		if (adev->in_s0ix &&
2483 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2484 			continue;
2485 		/* skip CG for VCE/UVD, it's handled specially */
2486 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2487 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2488 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2489 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2490 		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2491 			/* enable clockgating to save power */
2492 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2493 										     state);
2494 			if (r) {
2495 				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2496 					  adev->ip_blocks[i].version->funcs->name, r);
2497 				return r;
2498 			}
2499 		}
2500 	}
2501 
2502 	return 0;
2503 }
2504 
2505 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2506 			       enum amd_powergating_state state)
2507 {
2508 	int i, j, r;
2509 
2510 	if (amdgpu_emu_mode == 1)
2511 		return 0;
2512 
2513 	for (j = 0; j < adev->num_ip_blocks; j++) {
2514 		i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2515 		if (!adev->ip_blocks[i].status.late_initialized)
2516 			continue;
2517 		/* skip PG for GFX on S0ix */
2518 		if (adev->in_s0ix &&
2519 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
2520 			continue;
2521 		/* skip CG for VCE/UVD, it's handled specially */
2522 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2523 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2524 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2525 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2526 		    adev->ip_blocks[i].version->funcs->set_powergating_state) {
2527 			/* enable powergating to save power */
2528 			r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2529 											state);
2530 			if (r) {
2531 				DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2532 					  adev->ip_blocks[i].version->funcs->name, r);
2533 				return r;
2534 			}
2535 		}
2536 	}
2537 	return 0;
2538 }
2539 
2540 static int amdgpu_device_enable_mgpu_fan_boost(void)
2541 {
2542 	struct amdgpu_gpu_instance *gpu_ins;
2543 	struct amdgpu_device *adev;
2544 	int i, ret = 0;
2545 
2546 	mutex_lock(&mgpu_info.mutex);
2547 
2548 	/*
2549 	 * MGPU fan boost feature should be enabled
2550 	 * only when there are two or more dGPUs in
2551 	 * the system
2552 	 */
2553 	if (mgpu_info.num_dgpu < 2)
2554 		goto out;
2555 
2556 	for (i = 0; i < mgpu_info.num_dgpu; i++) {
2557 		gpu_ins = &(mgpu_info.gpu_ins[i]);
2558 		adev = gpu_ins->adev;
2559 		if (!(adev->flags & AMD_IS_APU) &&
2560 		    !gpu_ins->mgpu_fan_enabled) {
2561 			ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2562 			if (ret)
2563 				break;
2564 
2565 			gpu_ins->mgpu_fan_enabled = 1;
2566 		}
2567 	}
2568 
2569 out:
2570 	mutex_unlock(&mgpu_info.mutex);
2571 
2572 	return ret;
2573 }
2574 
2575 /**
2576  * amdgpu_device_ip_late_init - run late init for hardware IPs
2577  *
2578  * @adev: amdgpu_device pointer
2579  *
2580  * Late initialization pass for hardware IPs.  The list of all the hardware
2581  * IPs that make up the asic is walked and the late_init callbacks are run.
2582  * late_init covers any special initialization that an IP requires
2583  * after all of the have been initialized or something that needs to happen
2584  * late in the init process.
2585  * Returns 0 on success, negative error code on failure.
2586  */
2587 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2588 {
2589 	struct amdgpu_gpu_instance *gpu_instance;
2590 	int i = 0, r;
2591 
2592 	for (i = 0; i < adev->num_ip_blocks; i++) {
2593 		if (!adev->ip_blocks[i].status.hw)
2594 			continue;
2595 		if (adev->ip_blocks[i].version->funcs->late_init) {
2596 			r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2597 			if (r) {
2598 				DRM_ERROR("late_init of IP block <%s> failed %d\n",
2599 					  adev->ip_blocks[i].version->funcs->name, r);
2600 				return r;
2601 			}
2602 		}
2603 		adev->ip_blocks[i].status.late_initialized = true;
2604 	}
2605 
2606 	amdgpu_ras_set_error_query_ready(adev, true);
2607 
2608 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2609 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2610 
2611 	amdgpu_device_fill_reset_magic(adev);
2612 
2613 	r = amdgpu_device_enable_mgpu_fan_boost();
2614 	if (r)
2615 		DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2616 
2617 	/* For XGMI + passthrough configuration on arcturus, enable light SBR */
2618 	if (adev->asic_type == CHIP_ARCTURUS &&
2619 	    amdgpu_passthrough(adev) &&
2620 	    adev->gmc.xgmi.num_physical_nodes > 1)
2621 		smu_set_light_sbr(&adev->smu, true);
2622 
2623 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2624 		mutex_lock(&mgpu_info.mutex);
2625 
2626 		/*
2627 		 * Reset device p-state to low as this was booted with high.
2628 		 *
2629 		 * This should be performed only after all devices from the same
2630 		 * hive get initialized.
2631 		 *
2632 		 * However, it's unknown how many device in the hive in advance.
2633 		 * As this is counted one by one during devices initializations.
2634 		 *
2635 		 * So, we wait for all XGMI interlinked devices initialized.
2636 		 * This may bring some delays as those devices may come from
2637 		 * different hives. But that should be OK.
2638 		 */
2639 		if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2640 			for (i = 0; i < mgpu_info.num_gpu; i++) {
2641 				gpu_instance = &(mgpu_info.gpu_ins[i]);
2642 				if (gpu_instance->adev->flags & AMD_IS_APU)
2643 					continue;
2644 
2645 				r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2646 						AMDGPU_XGMI_PSTATE_MIN);
2647 				if (r) {
2648 					DRM_ERROR("pstate setting failed (%d).\n", r);
2649 					break;
2650 				}
2651 			}
2652 		}
2653 
2654 		mutex_unlock(&mgpu_info.mutex);
2655 	}
2656 
2657 	return 0;
2658 }
2659 
2660 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2661 {
2662 	int i, r;
2663 
2664 	for (i = 0; i < adev->num_ip_blocks; i++) {
2665 		if (!adev->ip_blocks[i].version->funcs->early_fini)
2666 			continue;
2667 
2668 		r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2669 		if (r) {
2670 			DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2671 				  adev->ip_blocks[i].version->funcs->name, r);
2672 		}
2673 	}
2674 
2675 	amdgpu_amdkfd_suspend(adev, false);
2676 
2677 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2678 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2679 
2680 	/* need to disable SMC first */
2681 	for (i = 0; i < adev->num_ip_blocks; i++) {
2682 		if (!adev->ip_blocks[i].status.hw)
2683 			continue;
2684 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2685 			r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2686 			/* XXX handle errors */
2687 			if (r) {
2688 				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2689 					  adev->ip_blocks[i].version->funcs->name, r);
2690 			}
2691 			adev->ip_blocks[i].status.hw = false;
2692 			break;
2693 		}
2694 	}
2695 
2696 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2697 		if (!adev->ip_blocks[i].status.hw)
2698 			continue;
2699 
2700 		r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2701 		/* XXX handle errors */
2702 		if (r) {
2703 			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2704 				  adev->ip_blocks[i].version->funcs->name, r);
2705 		}
2706 
2707 		adev->ip_blocks[i].status.hw = false;
2708 	}
2709 
2710 	if (amdgpu_sriov_vf(adev)) {
2711 		if (amdgpu_virt_release_full_gpu(adev, false))
2712 			DRM_ERROR("failed to release exclusive mode on fini\n");
2713 	}
2714 
2715 	return 0;
2716 }
2717 
2718 /**
2719  * amdgpu_device_ip_fini - run fini for hardware IPs
2720  *
2721  * @adev: amdgpu_device pointer
2722  *
2723  * Main teardown pass for hardware IPs.  The list of all the hardware
2724  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2725  * are run.  hw_fini tears down the hardware associated with each IP
2726  * and sw_fini tears down any software state associated with each IP.
2727  * Returns 0 on success, negative error code on failure.
2728  */
2729 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2730 {
2731 	int i, r;
2732 
2733 	if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2734 		amdgpu_virt_release_ras_err_handler_data(adev);
2735 
2736 	amdgpu_ras_pre_fini(adev);
2737 
2738 	if (adev->gmc.xgmi.num_physical_nodes > 1)
2739 		amdgpu_xgmi_remove_device(adev);
2740 
2741 	amdgpu_amdkfd_device_fini_sw(adev);
2742 
2743 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2744 		if (!adev->ip_blocks[i].status.sw)
2745 			continue;
2746 
2747 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2748 			amdgpu_ucode_free_bo(adev);
2749 			amdgpu_free_static_csa(&adev->virt.csa_obj);
2750 			amdgpu_device_wb_fini(adev);
2751 			amdgpu_device_vram_scratch_fini(adev);
2752 			amdgpu_ib_pool_fini(adev);
2753 		}
2754 
2755 		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2756 		/* XXX handle errors */
2757 		if (r) {
2758 			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2759 				  adev->ip_blocks[i].version->funcs->name, r);
2760 		}
2761 		adev->ip_blocks[i].status.sw = false;
2762 		adev->ip_blocks[i].status.valid = false;
2763 	}
2764 
2765 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2766 		if (!adev->ip_blocks[i].status.late_initialized)
2767 			continue;
2768 		if (adev->ip_blocks[i].version->funcs->late_fini)
2769 			adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2770 		adev->ip_blocks[i].status.late_initialized = false;
2771 	}
2772 
2773 	amdgpu_ras_fini(adev);
2774 
2775 	return 0;
2776 }
2777 
2778 /**
2779  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2780  *
2781  * @work: work_struct.
2782  */
2783 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2784 {
2785 	struct amdgpu_device *adev =
2786 		container_of(work, struct amdgpu_device, delayed_init_work.work);
2787 	int r;
2788 
2789 	r = amdgpu_ib_ring_tests(adev);
2790 	if (r)
2791 		DRM_ERROR("ib ring test failed (%d).\n", r);
2792 }
2793 
2794 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2795 {
2796 	struct amdgpu_device *adev =
2797 		container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2798 
2799 	WARN_ON_ONCE(adev->gfx.gfx_off_state);
2800 	WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2801 
2802 	if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2803 		adev->gfx.gfx_off_state = true;
2804 }
2805 
2806 /**
2807  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2808  *
2809  * @adev: amdgpu_device pointer
2810  *
2811  * Main suspend function for hardware IPs.  The list of all the hardware
2812  * IPs that make up the asic is walked, clockgating is disabled and the
2813  * suspend callbacks are run.  suspend puts the hardware and software state
2814  * in each IP into a state suitable for suspend.
2815  * Returns 0 on success, negative error code on failure.
2816  */
2817 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2818 {
2819 	int i, r;
2820 
2821 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2822 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2823 
2824 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2825 		if (!adev->ip_blocks[i].status.valid)
2826 			continue;
2827 
2828 		/* displays are handled separately */
2829 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2830 			continue;
2831 
2832 		/* XXX handle errors */
2833 		r = adev->ip_blocks[i].version->funcs->suspend(adev);
2834 		/* XXX handle errors */
2835 		if (r) {
2836 			DRM_ERROR("suspend of IP block <%s> failed %d\n",
2837 				  adev->ip_blocks[i].version->funcs->name, r);
2838 			return r;
2839 		}
2840 
2841 		adev->ip_blocks[i].status.hw = false;
2842 	}
2843 
2844 	return 0;
2845 }
2846 
2847 /**
2848  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2849  *
2850  * @adev: amdgpu_device pointer
2851  *
2852  * Main suspend function for hardware IPs.  The list of all the hardware
2853  * IPs that make up the asic is walked, clockgating is disabled and the
2854  * suspend callbacks are run.  suspend puts the hardware and software state
2855  * in each IP into a state suitable for suspend.
2856  * Returns 0 on success, negative error code on failure.
2857  */
2858 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2859 {
2860 	int i, r;
2861 
2862 	if (adev->in_s0ix)
2863 		amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
2864 
2865 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2866 		if (!adev->ip_blocks[i].status.valid)
2867 			continue;
2868 		/* displays are handled in phase1 */
2869 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2870 			continue;
2871 		/* PSP lost connection when err_event_athub occurs */
2872 		if (amdgpu_ras_intr_triggered() &&
2873 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2874 			adev->ip_blocks[i].status.hw = false;
2875 			continue;
2876 		}
2877 
2878 		/* skip unnecessary suspend if we do not initialize them yet */
2879 		if (adev->gmc.xgmi.pending_reset &&
2880 		    !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2881 		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2882 		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2883 		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2884 			adev->ip_blocks[i].status.hw = false;
2885 			continue;
2886 		}
2887 
2888 		/* skip suspend of gfx and psp for S0ix
2889 		 * gfx is in gfxoff state, so on resume it will exit gfxoff just
2890 		 * like at runtime. PSP is also part of the always on hardware
2891 		 * so no need to suspend it.
2892 		 */
2893 		if (adev->in_s0ix &&
2894 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
2895 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
2896 			continue;
2897 
2898 		/* XXX handle errors */
2899 		r = adev->ip_blocks[i].version->funcs->suspend(adev);
2900 		/* XXX handle errors */
2901 		if (r) {
2902 			DRM_ERROR("suspend of IP block <%s> failed %d\n",
2903 				  adev->ip_blocks[i].version->funcs->name, r);
2904 		}
2905 		adev->ip_blocks[i].status.hw = false;
2906 		/* handle putting the SMC in the appropriate state */
2907 		if(!amdgpu_sriov_vf(adev)){
2908 			if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2909 				r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
2910 				if (r) {
2911 					DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
2912 							adev->mp1_state, r);
2913 					return r;
2914 				}
2915 			}
2916 		}
2917 	}
2918 
2919 	return 0;
2920 }
2921 
2922 /**
2923  * amdgpu_device_ip_suspend - run suspend for hardware IPs
2924  *
2925  * @adev: amdgpu_device pointer
2926  *
2927  * Main suspend function for hardware IPs.  The list of all the hardware
2928  * IPs that make up the asic is walked, clockgating is disabled and the
2929  * suspend callbacks are run.  suspend puts the hardware and software state
2930  * in each IP into a state suitable for suspend.
2931  * Returns 0 on success, negative error code on failure.
2932  */
2933 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
2934 {
2935 	int r;
2936 
2937 	if (amdgpu_sriov_vf(adev)) {
2938 		amdgpu_virt_fini_data_exchange(adev);
2939 		amdgpu_virt_request_full_gpu(adev, false);
2940 	}
2941 
2942 	r = amdgpu_device_ip_suspend_phase1(adev);
2943 	if (r)
2944 		return r;
2945 	r = amdgpu_device_ip_suspend_phase2(adev);
2946 
2947 	if (amdgpu_sriov_vf(adev))
2948 		amdgpu_virt_release_full_gpu(adev, false);
2949 
2950 	return r;
2951 }
2952 
2953 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2954 {
2955 	int i, r;
2956 
2957 	static enum amd_ip_block_type ip_order[] = {
2958 		AMD_IP_BLOCK_TYPE_GMC,
2959 		AMD_IP_BLOCK_TYPE_COMMON,
2960 		AMD_IP_BLOCK_TYPE_PSP,
2961 		AMD_IP_BLOCK_TYPE_IH,
2962 	};
2963 
2964 	for (i = 0; i < adev->num_ip_blocks; i++) {
2965 		int j;
2966 		struct amdgpu_ip_block *block;
2967 
2968 		block = &adev->ip_blocks[i];
2969 		block->status.hw = false;
2970 
2971 		for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
2972 
2973 			if (block->version->type != ip_order[j] ||
2974 				!block->status.valid)
2975 				continue;
2976 
2977 			r = block->version->funcs->hw_init(adev);
2978 			DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2979 			if (r)
2980 				return r;
2981 			block->status.hw = true;
2982 		}
2983 	}
2984 
2985 	return 0;
2986 }
2987 
2988 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2989 {
2990 	int i, r;
2991 
2992 	static enum amd_ip_block_type ip_order[] = {
2993 		AMD_IP_BLOCK_TYPE_SMC,
2994 		AMD_IP_BLOCK_TYPE_DCE,
2995 		AMD_IP_BLOCK_TYPE_GFX,
2996 		AMD_IP_BLOCK_TYPE_SDMA,
2997 		AMD_IP_BLOCK_TYPE_UVD,
2998 		AMD_IP_BLOCK_TYPE_VCE,
2999 		AMD_IP_BLOCK_TYPE_VCN
3000 	};
3001 
3002 	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3003 		int j;
3004 		struct amdgpu_ip_block *block;
3005 
3006 		for (j = 0; j < adev->num_ip_blocks; j++) {
3007 			block = &adev->ip_blocks[j];
3008 
3009 			if (block->version->type != ip_order[i] ||
3010 				!block->status.valid ||
3011 				block->status.hw)
3012 				continue;
3013 
3014 			if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3015 				r = block->version->funcs->resume(adev);
3016 			else
3017 				r = block->version->funcs->hw_init(adev);
3018 
3019 			DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3020 			if (r)
3021 				return r;
3022 			block->status.hw = true;
3023 		}
3024 	}
3025 
3026 	return 0;
3027 }
3028 
3029 /**
3030  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3031  *
3032  * @adev: amdgpu_device pointer
3033  *
3034  * First resume function for hardware IPs.  The list of all the hardware
3035  * IPs that make up the asic is walked and the resume callbacks are run for
3036  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3037  * after a suspend and updates the software state as necessary.  This
3038  * function is also used for restoring the GPU after a GPU reset.
3039  * Returns 0 on success, negative error code on failure.
3040  */
3041 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3042 {
3043 	int i, r;
3044 
3045 	for (i = 0; i < adev->num_ip_blocks; i++) {
3046 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3047 			continue;
3048 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3049 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3050 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
3051 
3052 			r = adev->ip_blocks[i].version->funcs->resume(adev);
3053 			if (r) {
3054 				DRM_ERROR("resume of IP block <%s> failed %d\n",
3055 					  adev->ip_blocks[i].version->funcs->name, r);
3056 				return r;
3057 			}
3058 			adev->ip_blocks[i].status.hw = true;
3059 		}
3060 	}
3061 
3062 	return 0;
3063 }
3064 
3065 /**
3066  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3067  *
3068  * @adev: amdgpu_device pointer
3069  *
3070  * First resume function for hardware IPs.  The list of all the hardware
3071  * IPs that make up the asic is walked and the resume callbacks are run for
3072  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3073  * functional state after a suspend and updates the software state as
3074  * necessary.  This function is also used for restoring the GPU after a GPU
3075  * reset.
3076  * Returns 0 on success, negative error code on failure.
3077  */
3078 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3079 {
3080 	int i, r;
3081 
3082 	for (i = 0; i < adev->num_ip_blocks; i++) {
3083 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3084 			continue;
3085 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3086 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3087 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3088 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3089 			continue;
3090 		r = adev->ip_blocks[i].version->funcs->resume(adev);
3091 		if (r) {
3092 			DRM_ERROR("resume of IP block <%s> failed %d\n",
3093 				  adev->ip_blocks[i].version->funcs->name, r);
3094 			return r;
3095 		}
3096 		adev->ip_blocks[i].status.hw = true;
3097 	}
3098 
3099 	return 0;
3100 }
3101 
3102 /**
3103  * amdgpu_device_ip_resume - run resume for hardware IPs
3104  *
3105  * @adev: amdgpu_device pointer
3106  *
3107  * Main resume function for hardware IPs.  The hardware IPs
3108  * are split into two resume functions because they are
3109  * are also used in in recovering from a GPU reset and some additional
3110  * steps need to be take between them.  In this case (S3/S4) they are
3111  * run sequentially.
3112  * Returns 0 on success, negative error code on failure.
3113  */
3114 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3115 {
3116 	int r;
3117 
3118 	r = amdgpu_amdkfd_resume_iommu(adev);
3119 	if (r)
3120 		return r;
3121 
3122 	r = amdgpu_device_ip_resume_phase1(adev);
3123 	if (r)
3124 		return r;
3125 
3126 	r = amdgpu_device_fw_loading(adev);
3127 	if (r)
3128 		return r;
3129 
3130 	r = amdgpu_device_ip_resume_phase2(adev);
3131 
3132 	return r;
3133 }
3134 
3135 /**
3136  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3137  *
3138  * @adev: amdgpu_device pointer
3139  *
3140  * Query the VBIOS data tables to determine if the board supports SR-IOV.
3141  */
3142 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3143 {
3144 	if (amdgpu_sriov_vf(adev)) {
3145 		if (adev->is_atom_fw) {
3146 			if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3147 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3148 		} else {
3149 			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3150 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3151 		}
3152 
3153 		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3154 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3155 	}
3156 }
3157 
3158 /**
3159  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3160  *
3161  * @asic_type: AMD asic type
3162  *
3163  * Check if there is DC (new modesetting infrastructre) support for an asic.
3164  * returns true if DC has support, false if not.
3165  */
3166 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3167 {
3168 	switch (asic_type) {
3169 #if defined(CONFIG_DRM_AMD_DC)
3170 	case CHIP_TAHITI:
3171 	case CHIP_PITCAIRN:
3172 	case CHIP_VERDE:
3173 	case CHIP_OLAND:
3174 		/*
3175 		 * We have systems in the wild with these ASICs that require
3176 		 * LVDS and VGA support which is not supported with DC.
3177 		 *
3178 		 * Fallback to the non-DC driver here by default so as not to
3179 		 * cause regressions.
3180 		 */
3181 #if defined(CONFIG_DRM_AMD_DC_SI)
3182 		return amdgpu_dc > 0;
3183 #else
3184 		return false;
3185 #endif
3186 	case CHIP_BONAIRE:
3187 	case CHIP_KAVERI:
3188 	case CHIP_KABINI:
3189 	case CHIP_MULLINS:
3190 		/*
3191 		 * We have systems in the wild with these ASICs that require
3192 		 * LVDS and VGA support which is not supported with DC.
3193 		 *
3194 		 * Fallback to the non-DC driver here by default so as not to
3195 		 * cause regressions.
3196 		 */
3197 		return amdgpu_dc > 0;
3198 	case CHIP_HAWAII:
3199 	case CHIP_CARRIZO:
3200 	case CHIP_STONEY:
3201 	case CHIP_POLARIS10:
3202 	case CHIP_POLARIS11:
3203 	case CHIP_POLARIS12:
3204 	case CHIP_VEGAM:
3205 	case CHIP_TONGA:
3206 	case CHIP_FIJI:
3207 	case CHIP_VEGA10:
3208 	case CHIP_VEGA12:
3209 	case CHIP_VEGA20:
3210 #if defined(CONFIG_DRM_AMD_DC_DCN)
3211 	case CHIP_RAVEN:
3212 	case CHIP_NAVI10:
3213 	case CHIP_NAVI14:
3214 	case CHIP_NAVI12:
3215 	case CHIP_RENOIR:
3216 	case CHIP_CYAN_SKILLFISH:
3217 	case CHIP_SIENNA_CICHLID:
3218 	case CHIP_NAVY_FLOUNDER:
3219 	case CHIP_DIMGREY_CAVEFISH:
3220 	case CHIP_BEIGE_GOBY:
3221 	case CHIP_VANGOGH:
3222 	case CHIP_YELLOW_CARP:
3223 #endif
3224 	default:
3225 		return amdgpu_dc != 0;
3226 #else
3227 	default:
3228 		if (amdgpu_dc > 0)
3229 			DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3230 					 "but isn't supported by ASIC, ignoring\n");
3231 		return false;
3232 #endif
3233 	}
3234 }
3235 
3236 /**
3237  * amdgpu_device_has_dc_support - check if dc is supported
3238  *
3239  * @adev: amdgpu_device pointer
3240  *
3241  * Returns true for supported, false for not supported
3242  */
3243 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3244 {
3245 	if (amdgpu_sriov_vf(adev) ||
3246 	    adev->enable_virtual_display ||
3247 	    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3248 		return false;
3249 
3250 	return amdgpu_device_asic_has_dc_support(adev->asic_type);
3251 }
3252 
3253 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3254 {
3255 	struct amdgpu_device *adev =
3256 		container_of(__work, struct amdgpu_device, xgmi_reset_work);
3257 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3258 
3259 	/* It's a bug to not have a hive within this function */
3260 	if (WARN_ON(!hive))
3261 		return;
3262 
3263 	/*
3264 	 * Use task barrier to synchronize all xgmi reset works across the
3265 	 * hive. task_barrier_enter and task_barrier_exit will block
3266 	 * until all the threads running the xgmi reset works reach
3267 	 * those points. task_barrier_full will do both blocks.
3268 	 */
3269 	if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3270 
3271 		task_barrier_enter(&hive->tb);
3272 		adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3273 
3274 		if (adev->asic_reset_res)
3275 			goto fail;
3276 
3277 		task_barrier_exit(&hive->tb);
3278 		adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3279 
3280 		if (adev->asic_reset_res)
3281 			goto fail;
3282 
3283 		if (adev->mmhub.ras_funcs &&
3284 		    adev->mmhub.ras_funcs->reset_ras_error_count)
3285 			adev->mmhub.ras_funcs->reset_ras_error_count(adev);
3286 	} else {
3287 
3288 		task_barrier_full(&hive->tb);
3289 		adev->asic_reset_res =  amdgpu_asic_reset(adev);
3290 	}
3291 
3292 fail:
3293 	if (adev->asic_reset_res)
3294 		DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3295 			 adev->asic_reset_res, adev_to_drm(adev)->unique);
3296 	amdgpu_put_xgmi_hive(hive);
3297 }
3298 
3299 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3300 {
3301 	char *input = amdgpu_lockup_timeout;
3302 	char *timeout_setting = NULL;
3303 	int index = 0;
3304 	long timeout;
3305 	int ret = 0;
3306 
3307 	/*
3308 	 * By default timeout for non compute jobs is 10000
3309 	 * and 60000 for compute jobs.
3310 	 * In SR-IOV or passthrough mode, timeout for compute
3311 	 * jobs are 60000 by default.
3312 	 */
3313 	adev->gfx_timeout = msecs_to_jiffies(10000);
3314 	adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3315 	if (amdgpu_sriov_vf(adev))
3316 		adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3317 					msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3318 	else
3319 		adev->compute_timeout =  msecs_to_jiffies(60000);
3320 
3321 	if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3322 		while ((timeout_setting = strsep(&input, ",")) &&
3323 				strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3324 			ret = kstrtol(timeout_setting, 0, &timeout);
3325 			if (ret)
3326 				return ret;
3327 
3328 			if (timeout == 0) {
3329 				index++;
3330 				continue;
3331 			} else if (timeout < 0) {
3332 				timeout = MAX_SCHEDULE_TIMEOUT;
3333 				dev_warn(adev->dev, "lockup timeout disabled");
3334 				add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3335 			} else {
3336 				timeout = msecs_to_jiffies(timeout);
3337 			}
3338 
3339 			switch (index++) {
3340 			case 0:
3341 				adev->gfx_timeout = timeout;
3342 				break;
3343 			case 1:
3344 				adev->compute_timeout = timeout;
3345 				break;
3346 			case 2:
3347 				adev->sdma_timeout = timeout;
3348 				break;
3349 			case 3:
3350 				adev->video_timeout = timeout;
3351 				break;
3352 			default:
3353 				break;
3354 			}
3355 		}
3356 		/*
3357 		 * There is only one value specified and
3358 		 * it should apply to all non-compute jobs.
3359 		 */
3360 		if (index == 1) {
3361 			adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3362 			if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3363 				adev->compute_timeout = adev->gfx_timeout;
3364 		}
3365 	}
3366 
3367 	return ret;
3368 }
3369 
3370 static const struct attribute *amdgpu_dev_attributes[] = {
3371 	&dev_attr_product_name.attr,
3372 	&dev_attr_product_number.attr,
3373 	&dev_attr_serial_number.attr,
3374 	&dev_attr_pcie_replay_count.attr,
3375 	NULL
3376 };
3377 
3378 /**
3379  * amdgpu_device_init - initialize the driver
3380  *
3381  * @adev: amdgpu_device pointer
3382  * @flags: driver flags
3383  *
3384  * Initializes the driver info and hw (all asics).
3385  * Returns 0 for success or an error on failure.
3386  * Called at driver startup.
3387  */
3388 int amdgpu_device_init(struct amdgpu_device *adev,
3389 		       uint32_t flags)
3390 {
3391 	struct drm_device *ddev = adev_to_drm(adev);
3392 	struct pci_dev *pdev = adev->pdev;
3393 	int r, i;
3394 	bool px = false;
3395 	u32 max_MBps;
3396 
3397 	adev->shutdown = false;
3398 	adev->flags = flags;
3399 
3400 	if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3401 		adev->asic_type = amdgpu_force_asic_type;
3402 	else
3403 		adev->asic_type = flags & AMD_ASIC_MASK;
3404 
3405 	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3406 	if (amdgpu_emu_mode == 1)
3407 		adev->usec_timeout *= 10;
3408 	adev->gmc.gart_size = 512 * 1024 * 1024;
3409 	adev->accel_working = false;
3410 	adev->num_rings = 0;
3411 	adev->mman.buffer_funcs = NULL;
3412 	adev->mman.buffer_funcs_ring = NULL;
3413 	adev->vm_manager.vm_pte_funcs = NULL;
3414 	adev->vm_manager.vm_pte_num_scheds = 0;
3415 	adev->gmc.gmc_funcs = NULL;
3416 	adev->harvest_ip_mask = 0x0;
3417 	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3418 	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3419 
3420 	adev->smc_rreg = &amdgpu_invalid_rreg;
3421 	adev->smc_wreg = &amdgpu_invalid_wreg;
3422 	adev->pcie_rreg = &amdgpu_invalid_rreg;
3423 	adev->pcie_wreg = &amdgpu_invalid_wreg;
3424 	adev->pciep_rreg = &amdgpu_invalid_rreg;
3425 	adev->pciep_wreg = &amdgpu_invalid_wreg;
3426 	adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3427 	adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3428 	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3429 	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3430 	adev->didt_rreg = &amdgpu_invalid_rreg;
3431 	adev->didt_wreg = &amdgpu_invalid_wreg;
3432 	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3433 	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3434 	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3435 	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3436 
3437 	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3438 		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3439 		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3440 
3441 	/* mutex initialization are all done here so we
3442 	 * can recall function without having locking issues */
3443 	mutex_init(&adev->firmware.mutex);
3444 	mutex_init(&adev->pm.mutex);
3445 	mutex_init(&adev->gfx.gpu_clock_mutex);
3446 	mutex_init(&adev->srbm_mutex);
3447 	mutex_init(&adev->gfx.pipe_reserve_mutex);
3448 	mutex_init(&adev->gfx.gfx_off_mutex);
3449 	mutex_init(&adev->grbm_idx_mutex);
3450 	mutex_init(&adev->mn_lock);
3451 	mutex_init(&adev->virt.vf_errors.lock);
3452 	hash_init(adev->mn_hash);
3453 	atomic_set(&adev->in_gpu_reset, 0);
3454 	init_rwsem(&adev->reset_sem);
3455 	mutex_init(&adev->psp.mutex);
3456 	mutex_init(&adev->notifier_lock);
3457 
3458 	r = amdgpu_device_init_apu_flags(adev);
3459 	if (r)
3460 		return r;
3461 
3462 	r = amdgpu_device_check_arguments(adev);
3463 	if (r)
3464 		return r;
3465 
3466 	spin_lock_init(&adev->mmio_idx_lock);
3467 	spin_lock_init(&adev->smc_idx_lock);
3468 	spin_lock_init(&adev->pcie_idx_lock);
3469 	spin_lock_init(&adev->uvd_ctx_idx_lock);
3470 	spin_lock_init(&adev->didt_idx_lock);
3471 	spin_lock_init(&adev->gc_cac_idx_lock);
3472 	spin_lock_init(&adev->se_cac_idx_lock);
3473 	spin_lock_init(&adev->audio_endpt_idx_lock);
3474 	spin_lock_init(&adev->mm_stats.lock);
3475 
3476 	INIT_LIST_HEAD(&adev->shadow_list);
3477 	mutex_init(&adev->shadow_list_lock);
3478 
3479 	INIT_LIST_HEAD(&adev->reset_list);
3480 
3481 	INIT_DELAYED_WORK(&adev->delayed_init_work,
3482 			  amdgpu_device_delayed_init_work_handler);
3483 	INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3484 			  amdgpu_device_delay_enable_gfx_off);
3485 
3486 	INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3487 
3488 	adev->gfx.gfx_off_req_count = 1;
3489 	adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3490 
3491 	atomic_set(&adev->throttling_logging_enabled, 1);
3492 	/*
3493 	 * If throttling continues, logging will be performed every minute
3494 	 * to avoid log flooding. "-1" is subtracted since the thermal
3495 	 * throttling interrupt comes every second. Thus, the total logging
3496 	 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3497 	 * for throttling interrupt) = 60 seconds.
3498 	 */
3499 	ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3500 	ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3501 
3502 	/* Registers mapping */
3503 	/* TODO: block userspace mapping of io register */
3504 	if (adev->asic_type >= CHIP_BONAIRE) {
3505 		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3506 		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3507 	} else {
3508 		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3509 		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3510 	}
3511 
3512 	for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3513 		atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3514 
3515 	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3516 	if (adev->rmmio == NULL) {
3517 		return -ENOMEM;
3518 	}
3519 	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3520 	DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
3521 
3522 	amdgpu_device_get_pcie_info(adev);
3523 
3524 	if (amdgpu_mcbp)
3525 		DRM_INFO("MCBP is enabled\n");
3526 
3527 	if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
3528 		adev->enable_mes = true;
3529 
3530 	/* detect hw virtualization here */
3531 	amdgpu_detect_virtualization(adev);
3532 
3533 	r = amdgpu_device_get_job_timeout_settings(adev);
3534 	if (r) {
3535 		dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3536 		return r;
3537 	}
3538 
3539 	/* early init functions */
3540 	r = amdgpu_device_ip_early_init(adev);
3541 	if (r)
3542 		return r;
3543 
3544 	/* enable PCIE atomic ops */
3545 	if (amdgpu_sriov_vf(adev))
3546 		adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3547 			adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_enabled_flags ==
3548 			(PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3549 	else
3550 		adev->have_atomics_support =
3551 			!pci_enable_atomic_ops_to_root(adev->pdev,
3552 					  PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3553 					  PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3554 	if (!adev->have_atomics_support)
3555 		dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3556 
3557 	/* doorbell bar mapping and doorbell index init*/
3558 	amdgpu_device_doorbell_init(adev);
3559 
3560 	if (amdgpu_emu_mode == 1) {
3561 		/* post the asic on emulation mode */
3562 		emu_soc_asic_init(adev);
3563 		goto fence_driver_init;
3564 	}
3565 
3566 	amdgpu_reset_init(adev);
3567 
3568 	/* detect if we are with an SRIOV vbios */
3569 	amdgpu_device_detect_sriov_bios(adev);
3570 
3571 	/* check if we need to reset the asic
3572 	 *  E.g., driver was not cleanly unloaded previously, etc.
3573 	 */
3574 	if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3575 		if (adev->gmc.xgmi.num_physical_nodes) {
3576 			dev_info(adev->dev, "Pending hive reset.\n");
3577 			adev->gmc.xgmi.pending_reset = true;
3578 			/* Only need to init necessary block for SMU to handle the reset */
3579 			for (i = 0; i < adev->num_ip_blocks; i++) {
3580 				if (!adev->ip_blocks[i].status.valid)
3581 					continue;
3582 				if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3583 				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3584 				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3585 				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3586 					DRM_DEBUG("IP %s disabled for hw_init.\n",
3587 						adev->ip_blocks[i].version->funcs->name);
3588 					adev->ip_blocks[i].status.hw = true;
3589 				}
3590 			}
3591 		} else {
3592 			r = amdgpu_asic_reset(adev);
3593 			if (r) {
3594 				dev_err(adev->dev, "asic reset on init failed\n");
3595 				goto failed;
3596 			}
3597 		}
3598 	}
3599 
3600 	pci_enable_pcie_error_reporting(adev->pdev);
3601 
3602 	/* Post card if necessary */
3603 	if (amdgpu_device_need_post(adev)) {
3604 		if (!adev->bios) {
3605 			dev_err(adev->dev, "no vBIOS found\n");
3606 			r = -EINVAL;
3607 			goto failed;
3608 		}
3609 		DRM_INFO("GPU posting now...\n");
3610 		r = amdgpu_device_asic_init(adev);
3611 		if (r) {
3612 			dev_err(adev->dev, "gpu post error!\n");
3613 			goto failed;
3614 		}
3615 	}
3616 
3617 	if (adev->is_atom_fw) {
3618 		/* Initialize clocks */
3619 		r = amdgpu_atomfirmware_get_clock_info(adev);
3620 		if (r) {
3621 			dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3622 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3623 			goto failed;
3624 		}
3625 	} else {
3626 		/* Initialize clocks */
3627 		r = amdgpu_atombios_get_clock_info(adev);
3628 		if (r) {
3629 			dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3630 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3631 			goto failed;
3632 		}
3633 		/* init i2c buses */
3634 		if (!amdgpu_device_has_dc_support(adev))
3635 			amdgpu_atombios_i2c_init(adev);
3636 	}
3637 
3638 fence_driver_init:
3639 	/* Fence driver */
3640 	r = amdgpu_fence_driver_sw_init(adev);
3641 	if (r) {
3642 		dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3643 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3644 		goto failed;
3645 	}
3646 
3647 	/* init the mode config */
3648 	drm_mode_config_init(adev_to_drm(adev));
3649 
3650 	r = amdgpu_device_ip_init(adev);
3651 	if (r) {
3652 		/* failed in exclusive mode due to timeout */
3653 		if (amdgpu_sriov_vf(adev) &&
3654 		    !amdgpu_sriov_runtime(adev) &&
3655 		    amdgpu_virt_mmio_blocked(adev) &&
3656 		    !amdgpu_virt_wait_reset(adev)) {
3657 			dev_err(adev->dev, "VF exclusive mode timeout\n");
3658 			/* Don't send request since VF is inactive. */
3659 			adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3660 			adev->virt.ops = NULL;
3661 			r = -EAGAIN;
3662 			goto release_ras_con;
3663 		}
3664 		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3665 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3666 		goto release_ras_con;
3667 	}
3668 
3669 	amdgpu_fence_driver_hw_init(adev);
3670 
3671 	dev_info(adev->dev,
3672 		"SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3673 			adev->gfx.config.max_shader_engines,
3674 			adev->gfx.config.max_sh_per_se,
3675 			adev->gfx.config.max_cu_per_sh,
3676 			adev->gfx.cu_info.number);
3677 
3678 	adev->accel_working = true;
3679 
3680 	amdgpu_vm_check_compute_bug(adev);
3681 
3682 	/* Initialize the buffer migration limit. */
3683 	if (amdgpu_moverate >= 0)
3684 		max_MBps = amdgpu_moverate;
3685 	else
3686 		max_MBps = 8; /* Allow 8 MB/s. */
3687 	/* Get a log2 for easy divisions. */
3688 	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3689 
3690 	r = amdgpu_pm_sysfs_init(adev);
3691 	if (r) {
3692 		adev->pm_sysfs_en = false;
3693 		DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3694 	} else
3695 		adev->pm_sysfs_en = true;
3696 
3697 	r = amdgpu_ucode_sysfs_init(adev);
3698 	if (r) {
3699 		adev->ucode_sysfs_en = false;
3700 		DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3701 	} else
3702 		adev->ucode_sysfs_en = true;
3703 
3704 	if ((amdgpu_testing & 1)) {
3705 		if (adev->accel_working)
3706 			amdgpu_test_moves(adev);
3707 		else
3708 			DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
3709 	}
3710 	if (amdgpu_benchmarking) {
3711 		if (adev->accel_working)
3712 			amdgpu_benchmark(adev, amdgpu_benchmarking);
3713 		else
3714 			DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
3715 	}
3716 
3717 	/*
3718 	 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3719 	 * Otherwise the mgpu fan boost feature will be skipped due to the
3720 	 * gpu instance is counted less.
3721 	 */
3722 	amdgpu_register_gpu_instance(adev);
3723 
3724 	/* enable clockgating, etc. after ib tests, etc. since some blocks require
3725 	 * explicit gating rather than handling it automatically.
3726 	 */
3727 	if (!adev->gmc.xgmi.pending_reset) {
3728 		r = amdgpu_device_ip_late_init(adev);
3729 		if (r) {
3730 			dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3731 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3732 			goto release_ras_con;
3733 		}
3734 		/* must succeed. */
3735 		amdgpu_ras_resume(adev);
3736 		queue_delayed_work(system_wq, &adev->delayed_init_work,
3737 				   msecs_to_jiffies(AMDGPU_RESUME_MS));
3738 	}
3739 
3740 	if (amdgpu_sriov_vf(adev))
3741 		flush_delayed_work(&adev->delayed_init_work);
3742 
3743 	r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3744 	if (r)
3745 		dev_err(adev->dev, "Could not create amdgpu device attr\n");
3746 
3747 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
3748 		r = amdgpu_pmu_init(adev);
3749 	if (r)
3750 		dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3751 
3752 	/* Have stored pci confspace at hand for restore in sudden PCI error */
3753 	if (amdgpu_device_cache_pci_state(adev->pdev))
3754 		pci_restore_state(pdev);
3755 
3756 	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3757 	/* this will fail for cards that aren't VGA class devices, just
3758 	 * ignore it */
3759 	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3760 		vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3761 
3762 	if (amdgpu_device_supports_px(ddev)) {
3763 		px = true;
3764 		vga_switcheroo_register_client(adev->pdev,
3765 					       &amdgpu_switcheroo_ops, px);
3766 		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3767 	}
3768 
3769 	if (adev->gmc.xgmi.pending_reset)
3770 		queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3771 				   msecs_to_jiffies(AMDGPU_RESUME_MS));
3772 
3773 	return 0;
3774 
3775 release_ras_con:
3776 	amdgpu_release_ras_context(adev);
3777 
3778 failed:
3779 	amdgpu_vf_error_trans_all(adev);
3780 
3781 	return r;
3782 }
3783 
3784 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3785 {
3786 	/* Clear all CPU mappings pointing to this device */
3787 	unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3788 
3789 	/* Unmap all mapped bars - Doorbell, registers and VRAM */
3790 	amdgpu_device_doorbell_fini(adev);
3791 
3792 	iounmap(adev->rmmio);
3793 	adev->rmmio = NULL;
3794 	if (adev->mman.aper_base_kaddr)
3795 		iounmap(adev->mman.aper_base_kaddr);
3796 	adev->mman.aper_base_kaddr = NULL;
3797 
3798 	/* Memory manager related */
3799 	if (!adev->gmc.xgmi.connected_to_cpu) {
3800 		arch_phys_wc_del(adev->gmc.vram_mtrr);
3801 		arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3802 	}
3803 }
3804 
3805 /**
3806  * amdgpu_device_fini - tear down the driver
3807  *
3808  * @adev: amdgpu_device pointer
3809  *
3810  * Tear down the driver info (all asics).
3811  * Called at driver shutdown.
3812  */
3813 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3814 {
3815 	dev_info(adev->dev, "amdgpu: finishing device.\n");
3816 	flush_delayed_work(&adev->delayed_init_work);
3817 	if (adev->mman.initialized) {
3818 		flush_delayed_work(&adev->mman.bdev.wq);
3819 		ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3820 	}
3821 	adev->shutdown = true;
3822 
3823 	/* make sure IB test finished before entering exclusive mode
3824 	 * to avoid preemption on IB test
3825 	 * */
3826 	if (amdgpu_sriov_vf(adev)) {
3827 		amdgpu_virt_request_full_gpu(adev, false);
3828 		amdgpu_virt_fini_data_exchange(adev);
3829 	}
3830 
3831 	/* disable all interrupts */
3832 	amdgpu_irq_disable_all(adev);
3833 	if (adev->mode_info.mode_config_initialized){
3834 		if (!amdgpu_device_has_dc_support(adev))
3835 			drm_helper_force_disable_all(adev_to_drm(adev));
3836 		else
3837 			drm_atomic_helper_shutdown(adev_to_drm(adev));
3838 	}
3839 	amdgpu_fence_driver_hw_fini(adev);
3840 
3841 	if (adev->pm_sysfs_en)
3842 		amdgpu_pm_sysfs_fini(adev);
3843 	if (adev->ucode_sysfs_en)
3844 		amdgpu_ucode_sysfs_fini(adev);
3845 	sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3846 
3847 	amdgpu_device_ip_fini_early(adev);
3848 
3849 	amdgpu_irq_fini_hw(adev);
3850 
3851 	ttm_device_clear_dma_mappings(&adev->mman.bdev);
3852 
3853 	amdgpu_gart_dummy_page_fini(adev);
3854 
3855 	amdgpu_device_unmap_mmio(adev);
3856 }
3857 
3858 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
3859 {
3860 	amdgpu_fence_driver_sw_fini(adev);
3861 	amdgpu_device_ip_fini(adev);
3862 	release_firmware(adev->firmware.gpu_info_fw);
3863 	adev->firmware.gpu_info_fw = NULL;
3864 	adev->accel_working = false;
3865 
3866 	amdgpu_reset_fini(adev);
3867 
3868 	/* free i2c buses */
3869 	if (!amdgpu_device_has_dc_support(adev))
3870 		amdgpu_i2c_fini(adev);
3871 
3872 	if (amdgpu_emu_mode != 1)
3873 		amdgpu_atombios_fini(adev);
3874 
3875 	kfree(adev->bios);
3876 	adev->bios = NULL;
3877 	if (amdgpu_device_supports_px(adev_to_drm(adev))) {
3878 		vga_switcheroo_unregister_client(adev->pdev);
3879 		vga_switcheroo_fini_domain_pm_ops(adev->dev);
3880 	}
3881 	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3882 		vga_client_unregister(adev->pdev);
3883 
3884 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
3885 		amdgpu_pmu_fini(adev);
3886 	if (adev->mman.discovery_bin)
3887 		amdgpu_discovery_fini(adev);
3888 
3889 	kfree(adev->pci_state);
3890 
3891 }
3892 
3893 /**
3894  * amdgpu_device_evict_resources - evict device resources
3895  * @adev: amdgpu device object
3896  *
3897  * Evicts all ttm device resources(vram BOs, gart table) from the lru list
3898  * of the vram memory type. Mainly used for evicting device resources
3899  * at suspend time.
3900  *
3901  */
3902 static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
3903 {
3904 	/* No need to evict vram on APUs for suspend to ram */
3905 	if (adev->in_s3 && (adev->flags & AMD_IS_APU))
3906 		return;
3907 
3908 	if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM))
3909 		DRM_WARN("evicting device resources failed\n");
3910 
3911 }
3912 
3913 /*
3914  * Suspend & resume.
3915  */
3916 /**
3917  * amdgpu_device_suspend - initiate device suspend
3918  *
3919  * @dev: drm dev pointer
3920  * @fbcon : notify the fbdev of suspend
3921  *
3922  * Puts the hw in the suspend state (all asics).
3923  * Returns 0 for success or an error on failure.
3924  * Called at driver suspend.
3925  */
3926 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
3927 {
3928 	struct amdgpu_device *adev = drm_to_adev(dev);
3929 
3930 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3931 		return 0;
3932 
3933 	adev->in_suspend = true;
3934 
3935 	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
3936 		DRM_WARN("smart shift update failed\n");
3937 
3938 	drm_kms_helper_poll_disable(dev);
3939 
3940 	if (fbcon)
3941 		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
3942 
3943 	cancel_delayed_work_sync(&adev->delayed_init_work);
3944 
3945 	amdgpu_ras_suspend(adev);
3946 
3947 	amdgpu_device_ip_suspend_phase1(adev);
3948 
3949 	if (!adev->in_s0ix)
3950 		amdgpu_amdkfd_suspend(adev, adev->in_runpm);
3951 
3952 	/* First evict vram memory */
3953 	amdgpu_device_evict_resources(adev);
3954 
3955 	amdgpu_fence_driver_hw_fini(adev);
3956 
3957 	amdgpu_device_ip_suspend_phase2(adev);
3958 	/* This second call to evict device resources is to evict
3959 	 * the gart page table using the CPU.
3960 	 */
3961 	amdgpu_device_evict_resources(adev);
3962 
3963 	return 0;
3964 }
3965 
3966 /**
3967  * amdgpu_device_resume - initiate device resume
3968  *
3969  * @dev: drm dev pointer
3970  * @fbcon : notify the fbdev of resume
3971  *
3972  * Bring the hw back to operating state (all asics).
3973  * Returns 0 for success or an error on failure.
3974  * Called at driver resume.
3975  */
3976 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
3977 {
3978 	struct amdgpu_device *adev = drm_to_adev(dev);
3979 	int r = 0;
3980 
3981 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
3982 		return 0;
3983 
3984 	if (adev->in_s0ix)
3985 		amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
3986 
3987 	/* post card */
3988 	if (amdgpu_device_need_post(adev)) {
3989 		r = amdgpu_device_asic_init(adev);
3990 		if (r)
3991 			dev_err(adev->dev, "amdgpu asic init failed\n");
3992 	}
3993 
3994 	r = amdgpu_device_ip_resume(adev);
3995 	if (r) {
3996 		dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
3997 		return r;
3998 	}
3999 	amdgpu_fence_driver_hw_init(adev);
4000 
4001 	r = amdgpu_device_ip_late_init(adev);
4002 	if (r)
4003 		return r;
4004 
4005 	queue_delayed_work(system_wq, &adev->delayed_init_work,
4006 			   msecs_to_jiffies(AMDGPU_RESUME_MS));
4007 
4008 	if (!adev->in_s0ix) {
4009 		r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4010 		if (r)
4011 			return r;
4012 	}
4013 
4014 	/* Make sure IB tests flushed */
4015 	flush_delayed_work(&adev->delayed_init_work);
4016 
4017 	if (fbcon)
4018 		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4019 
4020 	drm_kms_helper_poll_enable(dev);
4021 
4022 	amdgpu_ras_resume(adev);
4023 
4024 	/*
4025 	 * Most of the connector probing functions try to acquire runtime pm
4026 	 * refs to ensure that the GPU is powered on when connector polling is
4027 	 * performed. Since we're calling this from a runtime PM callback,
4028 	 * trying to acquire rpm refs will cause us to deadlock.
4029 	 *
4030 	 * Since we're guaranteed to be holding the rpm lock, it's safe to
4031 	 * temporarily disable the rpm helpers so this doesn't deadlock us.
4032 	 */
4033 #ifdef CONFIG_PM
4034 	dev->dev->power.disable_depth++;
4035 #endif
4036 	if (!amdgpu_device_has_dc_support(adev))
4037 		drm_helper_hpd_irq_event(dev);
4038 	else
4039 		drm_kms_helper_hotplug_event(dev);
4040 #ifdef CONFIG_PM
4041 	dev->dev->power.disable_depth--;
4042 #endif
4043 	adev->in_suspend = false;
4044 
4045 	if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4046 		DRM_WARN("smart shift update failed\n");
4047 
4048 	return 0;
4049 }
4050 
4051 /**
4052  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4053  *
4054  * @adev: amdgpu_device pointer
4055  *
4056  * The list of all the hardware IPs that make up the asic is walked and
4057  * the check_soft_reset callbacks are run.  check_soft_reset determines
4058  * if the asic is still hung or not.
4059  * Returns true if any of the IPs are still in a hung state, false if not.
4060  */
4061 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4062 {
4063 	int i;
4064 	bool asic_hang = false;
4065 
4066 	if (amdgpu_sriov_vf(adev))
4067 		return true;
4068 
4069 	if (amdgpu_asic_need_full_reset(adev))
4070 		return true;
4071 
4072 	for (i = 0; i < adev->num_ip_blocks; i++) {
4073 		if (!adev->ip_blocks[i].status.valid)
4074 			continue;
4075 		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4076 			adev->ip_blocks[i].status.hang =
4077 				adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4078 		if (adev->ip_blocks[i].status.hang) {
4079 			dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4080 			asic_hang = true;
4081 		}
4082 	}
4083 	return asic_hang;
4084 }
4085 
4086 /**
4087  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4088  *
4089  * @adev: amdgpu_device pointer
4090  *
4091  * The list of all the hardware IPs that make up the asic is walked and the
4092  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4093  * handles any IP specific hardware or software state changes that are
4094  * necessary for a soft reset to succeed.
4095  * Returns 0 on success, negative error code on failure.
4096  */
4097 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4098 {
4099 	int i, r = 0;
4100 
4101 	for (i = 0; i < adev->num_ip_blocks; i++) {
4102 		if (!adev->ip_blocks[i].status.valid)
4103 			continue;
4104 		if (adev->ip_blocks[i].status.hang &&
4105 		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4106 			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4107 			if (r)
4108 				return r;
4109 		}
4110 	}
4111 
4112 	return 0;
4113 }
4114 
4115 /**
4116  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4117  *
4118  * @adev: amdgpu_device pointer
4119  *
4120  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4121  * reset is necessary to recover.
4122  * Returns true if a full asic reset is required, false if not.
4123  */
4124 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4125 {
4126 	int i;
4127 
4128 	if (amdgpu_asic_need_full_reset(adev))
4129 		return true;
4130 
4131 	for (i = 0; i < adev->num_ip_blocks; i++) {
4132 		if (!adev->ip_blocks[i].status.valid)
4133 			continue;
4134 		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4135 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4136 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4137 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4138 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4139 			if (adev->ip_blocks[i].status.hang) {
4140 				dev_info(adev->dev, "Some block need full reset!\n");
4141 				return true;
4142 			}
4143 		}
4144 	}
4145 	return false;
4146 }
4147 
4148 /**
4149  * amdgpu_device_ip_soft_reset - do a soft reset
4150  *
4151  * @adev: amdgpu_device pointer
4152  *
4153  * The list of all the hardware IPs that make up the asic is walked and the
4154  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4155  * IP specific hardware or software state changes that are necessary to soft
4156  * reset the IP.
4157  * Returns 0 on success, negative error code on failure.
4158  */
4159 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4160 {
4161 	int i, r = 0;
4162 
4163 	for (i = 0; i < adev->num_ip_blocks; i++) {
4164 		if (!adev->ip_blocks[i].status.valid)
4165 			continue;
4166 		if (adev->ip_blocks[i].status.hang &&
4167 		    adev->ip_blocks[i].version->funcs->soft_reset) {
4168 			r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4169 			if (r)
4170 				return r;
4171 		}
4172 	}
4173 
4174 	return 0;
4175 }
4176 
4177 /**
4178  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4179  *
4180  * @adev: amdgpu_device pointer
4181  *
4182  * The list of all the hardware IPs that make up the asic is walked and the
4183  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4184  * handles any IP specific hardware or software state changes that are
4185  * necessary after the IP has been soft reset.
4186  * Returns 0 on success, negative error code on failure.
4187  */
4188 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4189 {
4190 	int i, r = 0;
4191 
4192 	for (i = 0; i < adev->num_ip_blocks; i++) {
4193 		if (!adev->ip_blocks[i].status.valid)
4194 			continue;
4195 		if (adev->ip_blocks[i].status.hang &&
4196 		    adev->ip_blocks[i].version->funcs->post_soft_reset)
4197 			r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4198 		if (r)
4199 			return r;
4200 	}
4201 
4202 	return 0;
4203 }
4204 
4205 /**
4206  * amdgpu_device_recover_vram - Recover some VRAM contents
4207  *
4208  * @adev: amdgpu_device pointer
4209  *
4210  * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
4211  * restore things like GPUVM page tables after a GPU reset where
4212  * the contents of VRAM might be lost.
4213  *
4214  * Returns:
4215  * 0 on success, negative error code on failure.
4216  */
4217 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4218 {
4219 	struct dma_fence *fence = NULL, *next = NULL;
4220 	struct amdgpu_bo *shadow;
4221 	struct amdgpu_bo_vm *vmbo;
4222 	long r = 1, tmo;
4223 
4224 	if (amdgpu_sriov_runtime(adev))
4225 		tmo = msecs_to_jiffies(8000);
4226 	else
4227 		tmo = msecs_to_jiffies(100);
4228 
4229 	dev_info(adev->dev, "recover vram bo from shadow start\n");
4230 	mutex_lock(&adev->shadow_list_lock);
4231 	list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4232 		shadow = &vmbo->bo;
4233 		/* No need to recover an evicted BO */
4234 		if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4235 		    shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4236 		    shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4237 			continue;
4238 
4239 		r = amdgpu_bo_restore_shadow(shadow, &next);
4240 		if (r)
4241 			break;
4242 
4243 		if (fence) {
4244 			tmo = dma_fence_wait_timeout(fence, false, tmo);
4245 			dma_fence_put(fence);
4246 			fence = next;
4247 			if (tmo == 0) {
4248 				r = -ETIMEDOUT;
4249 				break;
4250 			} else if (tmo < 0) {
4251 				r = tmo;
4252 				break;
4253 			}
4254 		} else {
4255 			fence = next;
4256 		}
4257 	}
4258 	mutex_unlock(&adev->shadow_list_lock);
4259 
4260 	if (fence)
4261 		tmo = dma_fence_wait_timeout(fence, false, tmo);
4262 	dma_fence_put(fence);
4263 
4264 	if (r < 0 || tmo <= 0) {
4265 		dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4266 		return -EIO;
4267 	}
4268 
4269 	dev_info(adev->dev, "recover vram bo from shadow done\n");
4270 	return 0;
4271 }
4272 
4273 
4274 /**
4275  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4276  *
4277  * @adev: amdgpu_device pointer
4278  * @from_hypervisor: request from hypervisor
4279  *
4280  * do VF FLR and reinitialize Asic
4281  * return 0 means succeeded otherwise failed
4282  */
4283 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4284 				     bool from_hypervisor)
4285 {
4286 	int r;
4287 
4288 	if (from_hypervisor)
4289 		r = amdgpu_virt_request_full_gpu(adev, true);
4290 	else
4291 		r = amdgpu_virt_reset_gpu(adev);
4292 	if (r)
4293 		return r;
4294 
4295 	/* Resume IP prior to SMC */
4296 	r = amdgpu_device_ip_reinit_early_sriov(adev);
4297 	if (r)
4298 		goto error;
4299 
4300 	amdgpu_virt_init_data_exchange(adev);
4301 	/* we need recover gart prior to run SMC/CP/SDMA resume */
4302 	amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
4303 
4304 	r = amdgpu_device_fw_loading(adev);
4305 	if (r)
4306 		return r;
4307 
4308 	/* now we are okay to resume SMC/CP/SDMA */
4309 	r = amdgpu_device_ip_reinit_late_sriov(adev);
4310 	if (r)
4311 		goto error;
4312 
4313 	amdgpu_irq_gpu_reset_resume_helper(adev);
4314 	r = amdgpu_ib_ring_tests(adev);
4315 	amdgpu_amdkfd_post_reset(adev);
4316 
4317 error:
4318 	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4319 		amdgpu_inc_vram_lost(adev);
4320 		r = amdgpu_device_recover_vram(adev);
4321 	}
4322 	amdgpu_virt_release_full_gpu(adev, true);
4323 
4324 	return r;
4325 }
4326 
4327 /**
4328  * amdgpu_device_has_job_running - check if there is any job in mirror list
4329  *
4330  * @adev: amdgpu_device pointer
4331  *
4332  * check if there is any job in mirror list
4333  */
4334 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4335 {
4336 	int i;
4337 	struct drm_sched_job *job;
4338 
4339 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4340 		struct amdgpu_ring *ring = adev->rings[i];
4341 
4342 		if (!ring || !ring->sched.thread)
4343 			continue;
4344 
4345 		spin_lock(&ring->sched.job_list_lock);
4346 		job = list_first_entry_or_null(&ring->sched.pending_list,
4347 					       struct drm_sched_job, list);
4348 		spin_unlock(&ring->sched.job_list_lock);
4349 		if (job)
4350 			return true;
4351 	}
4352 	return false;
4353 }
4354 
4355 /**
4356  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4357  *
4358  * @adev: amdgpu_device pointer
4359  *
4360  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4361  * a hung GPU.
4362  */
4363 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4364 {
4365 	if (!amdgpu_device_ip_check_soft_reset(adev)) {
4366 		dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
4367 		return false;
4368 	}
4369 
4370 	if (amdgpu_gpu_recovery == 0)
4371 		goto disabled;
4372 
4373 	if (amdgpu_sriov_vf(adev))
4374 		return true;
4375 
4376 	if (amdgpu_gpu_recovery == -1) {
4377 		switch (adev->asic_type) {
4378 		case CHIP_BONAIRE:
4379 		case CHIP_HAWAII:
4380 		case CHIP_TOPAZ:
4381 		case CHIP_TONGA:
4382 		case CHIP_FIJI:
4383 		case CHIP_POLARIS10:
4384 		case CHIP_POLARIS11:
4385 		case CHIP_POLARIS12:
4386 		case CHIP_VEGAM:
4387 		case CHIP_VEGA20:
4388 		case CHIP_VEGA10:
4389 		case CHIP_VEGA12:
4390 		case CHIP_RAVEN:
4391 		case CHIP_ARCTURUS:
4392 		case CHIP_RENOIR:
4393 		case CHIP_NAVI10:
4394 		case CHIP_NAVI14:
4395 		case CHIP_NAVI12:
4396 		case CHIP_SIENNA_CICHLID:
4397 		case CHIP_NAVY_FLOUNDER:
4398 		case CHIP_DIMGREY_CAVEFISH:
4399 		case CHIP_BEIGE_GOBY:
4400 		case CHIP_VANGOGH:
4401 		case CHIP_ALDEBARAN:
4402 			break;
4403 		default:
4404 			goto disabled;
4405 		}
4406 	}
4407 
4408 	return true;
4409 
4410 disabled:
4411 		dev_info(adev->dev, "GPU recovery disabled.\n");
4412 		return false;
4413 }
4414 
4415 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4416 {
4417         u32 i;
4418         int ret = 0;
4419 
4420         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4421 
4422         dev_info(adev->dev, "GPU mode1 reset\n");
4423 
4424         /* disable BM */
4425         pci_clear_master(adev->pdev);
4426 
4427         amdgpu_device_cache_pci_state(adev->pdev);
4428 
4429         if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4430                 dev_info(adev->dev, "GPU smu mode1 reset\n");
4431                 ret = amdgpu_dpm_mode1_reset(adev);
4432         } else {
4433                 dev_info(adev->dev, "GPU psp mode1 reset\n");
4434                 ret = psp_gpu_reset(adev);
4435         }
4436 
4437         if (ret)
4438                 dev_err(adev->dev, "GPU mode1 reset failed\n");
4439 
4440         amdgpu_device_load_pci_state(adev->pdev);
4441 
4442         /* wait for asic to come out of reset */
4443         for (i = 0; i < adev->usec_timeout; i++) {
4444                 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4445 
4446                 if (memsize != 0xffffffff)
4447                         break;
4448                 udelay(1);
4449         }
4450 
4451         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4452         return ret;
4453 }
4454 
4455 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4456 				 struct amdgpu_reset_context *reset_context)
4457 {
4458 	int i, j, r = 0;
4459 	struct amdgpu_job *job = NULL;
4460 	bool need_full_reset =
4461 		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4462 
4463 	if (reset_context->reset_req_dev == adev)
4464 		job = reset_context->job;
4465 
4466 	if (amdgpu_sriov_vf(adev)) {
4467 		/* stop the data exchange thread */
4468 		amdgpu_virt_fini_data_exchange(adev);
4469 	}
4470 
4471 	/* block all schedulers and reset given job's ring */
4472 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4473 		struct amdgpu_ring *ring = adev->rings[i];
4474 
4475 		if (!ring || !ring->sched.thread)
4476 			continue;
4477 
4478 		/*clear job fence from fence drv to avoid force_completion
4479 		 *leave NULL and vm flush fence in fence drv */
4480 		for (j = 0; j <= ring->fence_drv.num_fences_mask; j++) {
4481 			struct dma_fence *old, **ptr;
4482 
4483 			ptr = &ring->fence_drv.fences[j];
4484 			old = rcu_dereference_protected(*ptr, 1);
4485 			if (old && test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &old->flags)) {
4486 				RCU_INIT_POINTER(*ptr, NULL);
4487 			}
4488 		}
4489 		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4490 		amdgpu_fence_driver_force_completion(ring);
4491 	}
4492 
4493 	if (job && job->vm)
4494 		drm_sched_increase_karma(&job->base);
4495 
4496 	r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4497 	/* If reset handler not implemented, continue; otherwise return */
4498 	if (r == -ENOSYS)
4499 		r = 0;
4500 	else
4501 		return r;
4502 
4503 	/* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4504 	if (!amdgpu_sriov_vf(adev)) {
4505 
4506 		if (!need_full_reset)
4507 			need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4508 
4509 		if (!need_full_reset) {
4510 			amdgpu_device_ip_pre_soft_reset(adev);
4511 			r = amdgpu_device_ip_soft_reset(adev);
4512 			amdgpu_device_ip_post_soft_reset(adev);
4513 			if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4514 				dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4515 				need_full_reset = true;
4516 			}
4517 		}
4518 
4519 		if (need_full_reset)
4520 			r = amdgpu_device_ip_suspend(adev);
4521 		if (need_full_reset)
4522 			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4523 		else
4524 			clear_bit(AMDGPU_NEED_FULL_RESET,
4525 				  &reset_context->flags);
4526 	}
4527 
4528 	return r;
4529 }
4530 
4531 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4532 			 struct amdgpu_reset_context *reset_context)
4533 {
4534 	struct amdgpu_device *tmp_adev = NULL;
4535 	bool need_full_reset, skip_hw_reset, vram_lost = false;
4536 	int r = 0;
4537 
4538 	/* Try reset handler method first */
4539 	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4540 				    reset_list);
4541 	r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4542 	/* If reset handler not implemented, continue; otherwise return */
4543 	if (r == -ENOSYS)
4544 		r = 0;
4545 	else
4546 		return r;
4547 
4548 	/* Reset handler not implemented, use the default method */
4549 	need_full_reset =
4550 		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4551 	skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4552 
4553 	/*
4554 	 * ASIC reset has to be done on all XGMI hive nodes ASAP
4555 	 * to allow proper links negotiation in FW (within 1 sec)
4556 	 */
4557 	if (!skip_hw_reset && need_full_reset) {
4558 		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4559 			/* For XGMI run all resets in parallel to speed up the process */
4560 			if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4561 				tmp_adev->gmc.xgmi.pending_reset = false;
4562 				if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4563 					r = -EALREADY;
4564 			} else
4565 				r = amdgpu_asic_reset(tmp_adev);
4566 
4567 			if (r) {
4568 				dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4569 					 r, adev_to_drm(tmp_adev)->unique);
4570 				break;
4571 			}
4572 		}
4573 
4574 		/* For XGMI wait for all resets to complete before proceed */
4575 		if (!r) {
4576 			list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4577 				if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4578 					flush_work(&tmp_adev->xgmi_reset_work);
4579 					r = tmp_adev->asic_reset_res;
4580 					if (r)
4581 						break;
4582 				}
4583 			}
4584 		}
4585 	}
4586 
4587 	if (!r && amdgpu_ras_intr_triggered()) {
4588 		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4589 			if (tmp_adev->mmhub.ras_funcs &&
4590 			    tmp_adev->mmhub.ras_funcs->reset_ras_error_count)
4591 				tmp_adev->mmhub.ras_funcs->reset_ras_error_count(tmp_adev);
4592 		}
4593 
4594 		amdgpu_ras_intr_cleared();
4595 	}
4596 
4597 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4598 		if (need_full_reset) {
4599 			/* post card */
4600 			r = amdgpu_device_asic_init(tmp_adev);
4601 			if (r) {
4602 				dev_warn(tmp_adev->dev, "asic atom init failed!");
4603 			} else {
4604 				dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4605 				r = amdgpu_amdkfd_resume_iommu(tmp_adev);
4606 				if (r)
4607 					goto out;
4608 
4609 				r = amdgpu_device_ip_resume_phase1(tmp_adev);
4610 				if (r)
4611 					goto out;
4612 
4613 				vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4614 				if (vram_lost) {
4615 					DRM_INFO("VRAM is lost due to GPU reset!\n");
4616 					amdgpu_inc_vram_lost(tmp_adev);
4617 				}
4618 
4619 				r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT));
4620 				if (r)
4621 					goto out;
4622 
4623 				r = amdgpu_device_fw_loading(tmp_adev);
4624 				if (r)
4625 					return r;
4626 
4627 				r = amdgpu_device_ip_resume_phase2(tmp_adev);
4628 				if (r)
4629 					goto out;
4630 
4631 				if (vram_lost)
4632 					amdgpu_device_fill_reset_magic(tmp_adev);
4633 
4634 				/*
4635 				 * Add this ASIC as tracked as reset was already
4636 				 * complete successfully.
4637 				 */
4638 				amdgpu_register_gpu_instance(tmp_adev);
4639 
4640 				if (!reset_context->hive &&
4641 				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4642 					amdgpu_xgmi_add_device(tmp_adev);
4643 
4644 				r = amdgpu_device_ip_late_init(tmp_adev);
4645 				if (r)
4646 					goto out;
4647 
4648 				drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
4649 
4650 				/*
4651 				 * The GPU enters bad state once faulty pages
4652 				 * by ECC has reached the threshold, and ras
4653 				 * recovery is scheduled next. So add one check
4654 				 * here to break recovery if it indeed exceeds
4655 				 * bad page threshold, and remind user to
4656 				 * retire this GPU or setting one bigger
4657 				 * bad_page_threshold value to fix this once
4658 				 * probing driver again.
4659 				 */
4660 				if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4661 					/* must succeed. */
4662 					amdgpu_ras_resume(tmp_adev);
4663 				} else {
4664 					r = -EINVAL;
4665 					goto out;
4666 				}
4667 
4668 				/* Update PSP FW topology after reset */
4669 				if (reset_context->hive &&
4670 				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4671 					r = amdgpu_xgmi_update_topology(
4672 						reset_context->hive, tmp_adev);
4673 			}
4674 		}
4675 
4676 out:
4677 		if (!r) {
4678 			amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
4679 			r = amdgpu_ib_ring_tests(tmp_adev);
4680 			if (r) {
4681 				dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
4682 				need_full_reset = true;
4683 				r = -EAGAIN;
4684 				goto end;
4685 			}
4686 		}
4687 
4688 		if (!r)
4689 			r = amdgpu_device_recover_vram(tmp_adev);
4690 		else
4691 			tmp_adev->asic_reset_res = r;
4692 	}
4693 
4694 end:
4695 	if (need_full_reset)
4696 		set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4697 	else
4698 		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4699 	return r;
4700 }
4701 
4702 static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
4703 				struct amdgpu_hive_info *hive)
4704 {
4705 	if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
4706 		return false;
4707 
4708 	if (hive) {
4709 		down_write_nest_lock(&adev->reset_sem, &hive->hive_lock);
4710 	} else {
4711 		down_write(&adev->reset_sem);
4712 	}
4713 
4714 	switch (amdgpu_asic_reset_method(adev)) {
4715 	case AMD_RESET_METHOD_MODE1:
4716 		adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
4717 		break;
4718 	case AMD_RESET_METHOD_MODE2:
4719 		adev->mp1_state = PP_MP1_STATE_RESET;
4720 		break;
4721 	default:
4722 		adev->mp1_state = PP_MP1_STATE_NONE;
4723 		break;
4724 	}
4725 
4726 	return true;
4727 }
4728 
4729 static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
4730 {
4731 	amdgpu_vf_error_trans_all(adev);
4732 	adev->mp1_state = PP_MP1_STATE_NONE;
4733 	atomic_set(&adev->in_gpu_reset, 0);
4734 	up_write(&adev->reset_sem);
4735 }
4736 
4737 /*
4738  * to lockup a list of amdgpu devices in a hive safely, if not a hive
4739  * with multiple nodes, it will be similar as amdgpu_device_lock_adev.
4740  *
4741  * unlock won't require roll back.
4742  */
4743 static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive)
4744 {
4745 	struct amdgpu_device *tmp_adev = NULL;
4746 
4747 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
4748 		if (!hive) {
4749 			dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes");
4750 			return -ENODEV;
4751 		}
4752 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4753 			if (!amdgpu_device_lock_adev(tmp_adev, hive))
4754 				goto roll_back;
4755 		}
4756 	} else if (!amdgpu_device_lock_adev(adev, hive))
4757 		return -EAGAIN;
4758 
4759 	return 0;
4760 roll_back:
4761 	if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) {
4762 		/*
4763 		 * if the lockup iteration break in the middle of a hive,
4764 		 * it may means there may has a race issue,
4765 		 * or a hive device locked up independently.
4766 		 * we may be in trouble and may not, so will try to roll back
4767 		 * the lock and give out a warnning.
4768 		 */
4769 		dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock");
4770 		list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) {
4771 			amdgpu_device_unlock_adev(tmp_adev);
4772 		}
4773 	}
4774 	return -EAGAIN;
4775 }
4776 
4777 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
4778 {
4779 	struct pci_dev *p = NULL;
4780 
4781 	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4782 			adev->pdev->bus->number, 1);
4783 	if (p) {
4784 		pm_runtime_enable(&(p->dev));
4785 		pm_runtime_resume(&(p->dev));
4786 	}
4787 }
4788 
4789 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
4790 {
4791 	enum amd_reset_method reset_method;
4792 	struct pci_dev *p = NULL;
4793 	u64 expires;
4794 
4795 	/*
4796 	 * For now, only BACO and mode1 reset are confirmed
4797 	 * to suffer the audio issue without proper suspended.
4798 	 */
4799 	reset_method = amdgpu_asic_reset_method(adev);
4800 	if ((reset_method != AMD_RESET_METHOD_BACO) &&
4801 	     (reset_method != AMD_RESET_METHOD_MODE1))
4802 		return -EINVAL;
4803 
4804 	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
4805 			adev->pdev->bus->number, 1);
4806 	if (!p)
4807 		return -ENODEV;
4808 
4809 	expires = pm_runtime_autosuspend_expiration(&(p->dev));
4810 	if (!expires)
4811 		/*
4812 		 * If we cannot get the audio device autosuspend delay,
4813 		 * a fixed 4S interval will be used. Considering 3S is
4814 		 * the audio controller default autosuspend delay setting.
4815 		 * 4S used here is guaranteed to cover that.
4816 		 */
4817 		expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4818 
4819 	while (!pm_runtime_status_suspended(&(p->dev))) {
4820 		if (!pm_runtime_suspend(&(p->dev)))
4821 			break;
4822 
4823 		if (expires < ktime_get_mono_fast_ns()) {
4824 			dev_warn(adev->dev, "failed to suspend display audio\n");
4825 			/* TODO: abort the succeeding gpu reset? */
4826 			return -ETIMEDOUT;
4827 		}
4828 	}
4829 
4830 	pm_runtime_disable(&(p->dev));
4831 
4832 	return 0;
4833 }
4834 
4835 static void amdgpu_device_recheck_guilty_jobs(
4836 	struct amdgpu_device *adev, struct list_head *device_list_handle,
4837 	struct amdgpu_reset_context *reset_context)
4838 {
4839 	int i, r = 0;
4840 
4841 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4842 		struct amdgpu_ring *ring = adev->rings[i];
4843 		int ret = 0;
4844 		struct drm_sched_job *s_job;
4845 
4846 		if (!ring || !ring->sched.thread)
4847 			continue;
4848 
4849 		s_job = list_first_entry_or_null(&ring->sched.pending_list,
4850 				struct drm_sched_job, list);
4851 		if (s_job == NULL)
4852 			continue;
4853 
4854 		/* clear job's guilty and depend the folowing step to decide the real one */
4855 		drm_sched_reset_karma(s_job);
4856 		/* for the real bad job, it will be resubmitted twice, adding a dma_fence_get
4857 		 * to make sure fence is balanced */
4858 		dma_fence_get(s_job->s_fence->parent);
4859 		drm_sched_resubmit_jobs_ext(&ring->sched, 1);
4860 
4861 		ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
4862 		if (ret == 0) { /* timeout */
4863 			DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
4864 						ring->sched.name, s_job->id);
4865 
4866 			/* set guilty */
4867 			drm_sched_increase_karma(s_job);
4868 retry:
4869 			/* do hw reset */
4870 			if (amdgpu_sriov_vf(adev)) {
4871 				amdgpu_virt_fini_data_exchange(adev);
4872 				r = amdgpu_device_reset_sriov(adev, false);
4873 				if (r)
4874 					adev->asic_reset_res = r;
4875 			} else {
4876 				clear_bit(AMDGPU_SKIP_HW_RESET,
4877 					  &reset_context->flags);
4878 				r = amdgpu_do_asic_reset(device_list_handle,
4879 							 reset_context);
4880 				if (r && r == -EAGAIN)
4881 					goto retry;
4882 			}
4883 
4884 			/*
4885 			 * add reset counter so that the following
4886 			 * resubmitted job could flush vmid
4887 			 */
4888 			atomic_inc(&adev->gpu_reset_counter);
4889 			continue;
4890 		}
4891 
4892 		/* got the hw fence, signal finished fence */
4893 		atomic_dec(ring->sched.score);
4894 		dma_fence_put(s_job->s_fence->parent);
4895 		dma_fence_get(&s_job->s_fence->finished);
4896 		dma_fence_signal(&s_job->s_fence->finished);
4897 		dma_fence_put(&s_job->s_fence->finished);
4898 
4899 		/* remove node from list and free the job */
4900 		spin_lock(&ring->sched.job_list_lock);
4901 		list_del_init(&s_job->list);
4902 		spin_unlock(&ring->sched.job_list_lock);
4903 		ring->sched.ops->free_job(s_job);
4904 	}
4905 }
4906 
4907 /**
4908  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
4909  *
4910  * @adev: amdgpu_device pointer
4911  * @job: which job trigger hang
4912  *
4913  * Attempt to reset the GPU if it has hung (all asics).
4914  * Attempt to do soft-reset or full-reset and reinitialize Asic
4915  * Returns 0 for success or an error on failure.
4916  */
4917 
4918 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
4919 			      struct amdgpu_job *job)
4920 {
4921 	struct list_head device_list, *device_list_handle =  NULL;
4922 	bool job_signaled = false;
4923 	struct amdgpu_hive_info *hive = NULL;
4924 	struct amdgpu_device *tmp_adev = NULL;
4925 	int i, r = 0;
4926 	bool need_emergency_restart = false;
4927 	bool audio_suspended = false;
4928 	int tmp_vram_lost_counter;
4929 	struct amdgpu_reset_context reset_context;
4930 
4931 	memset(&reset_context, 0, sizeof(reset_context));
4932 
4933 	/*
4934 	 * Special case: RAS triggered and full reset isn't supported
4935 	 */
4936 	need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
4937 
4938 	/*
4939 	 * Flush RAM to disk so that after reboot
4940 	 * the user can read log and see why the system rebooted.
4941 	 */
4942 	if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
4943 		DRM_WARN("Emergency reboot.");
4944 
4945 		ksys_sync_helper();
4946 		emergency_restart();
4947 	}
4948 
4949 	dev_info(adev->dev, "GPU %s begin!\n",
4950 		need_emergency_restart ? "jobs stop":"reset");
4951 
4952 	/*
4953 	 * Here we trylock to avoid chain of resets executing from
4954 	 * either trigger by jobs on different adevs in XGMI hive or jobs on
4955 	 * different schedulers for same device while this TO handler is running.
4956 	 * We always reset all schedulers for device and all devices for XGMI
4957 	 * hive so that should take care of them too.
4958 	 */
4959 	hive = amdgpu_get_xgmi_hive(adev);
4960 	if (hive) {
4961 		if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
4962 			DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
4963 				job ? job->base.id : -1, hive->hive_id);
4964 			amdgpu_put_xgmi_hive(hive);
4965 			if (job && job->vm)
4966 				drm_sched_increase_karma(&job->base);
4967 			return 0;
4968 		}
4969 		mutex_lock(&hive->hive_lock);
4970 	}
4971 
4972 	reset_context.method = AMD_RESET_METHOD_NONE;
4973 	reset_context.reset_req_dev = adev;
4974 	reset_context.job = job;
4975 	reset_context.hive = hive;
4976 	clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
4977 
4978 	/*
4979 	 * lock the device before we try to operate the linked list
4980 	 * if didn't get the device lock, don't touch the linked list since
4981 	 * others may iterating it.
4982 	 */
4983 	r = amdgpu_device_lock_hive_adev(adev, hive);
4984 	if (r) {
4985 		dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
4986 					job ? job->base.id : -1);
4987 
4988 		/* even we skipped this reset, still need to set the job to guilty */
4989 		if (job && job->vm)
4990 			drm_sched_increase_karma(&job->base);
4991 		goto skip_recovery;
4992 	}
4993 
4994 	/*
4995 	 * Build list of devices to reset.
4996 	 * In case we are in XGMI hive mode, resort the device list
4997 	 * to put adev in the 1st position.
4998 	 */
4999 	INIT_LIST_HEAD(&device_list);
5000 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
5001 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
5002 			list_add_tail(&tmp_adev->reset_list, &device_list);
5003 		if (!list_is_first(&adev->reset_list, &device_list))
5004 			list_rotate_to_front(&adev->reset_list, &device_list);
5005 		device_list_handle = &device_list;
5006 	} else {
5007 		list_add_tail(&adev->reset_list, &device_list);
5008 		device_list_handle = &device_list;
5009 	}
5010 
5011 	/* block all schedulers and reset given job's ring */
5012 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5013 		/*
5014 		 * Try to put the audio codec into suspend state
5015 		 * before gpu reset started.
5016 		 *
5017 		 * Due to the power domain of the graphics device
5018 		 * is shared with AZ power domain. Without this,
5019 		 * we may change the audio hardware from behind
5020 		 * the audio driver's back. That will trigger
5021 		 * some audio codec errors.
5022 		 */
5023 		if (!amdgpu_device_suspend_display_audio(tmp_adev))
5024 			audio_suspended = true;
5025 
5026 		amdgpu_ras_set_error_query_ready(tmp_adev, false);
5027 
5028 		cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5029 
5030 		amdgpu_amdkfd_pre_reset(tmp_adev);
5031 
5032 		/*
5033 		 * Mark these ASICs to be reseted as untracked first
5034 		 * And add them back after reset completed
5035 		 */
5036 		amdgpu_unregister_gpu_instance(tmp_adev);
5037 
5038 		drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
5039 
5040 		/* disable ras on ALL IPs */
5041 		if (!need_emergency_restart &&
5042 		      amdgpu_device_ip_need_full_reset(tmp_adev))
5043 			amdgpu_ras_suspend(tmp_adev);
5044 
5045 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5046 			struct amdgpu_ring *ring = tmp_adev->rings[i];
5047 
5048 			if (!ring || !ring->sched.thread)
5049 				continue;
5050 
5051 			drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5052 
5053 			if (need_emergency_restart)
5054 				amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5055 		}
5056 		atomic_inc(&tmp_adev->gpu_reset_counter);
5057 	}
5058 
5059 	if (need_emergency_restart)
5060 		goto skip_sched_resume;
5061 
5062 	/*
5063 	 * Must check guilty signal here since after this point all old
5064 	 * HW fences are force signaled.
5065 	 *
5066 	 * job->base holds a reference to parent fence
5067 	 */
5068 	if (job && job->base.s_fence->parent &&
5069 	    dma_fence_is_signaled(job->base.s_fence->parent)) {
5070 		job_signaled = true;
5071 		dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5072 		goto skip_hw_reset;
5073 	}
5074 
5075 retry:	/* Rest of adevs pre asic reset from XGMI hive. */
5076 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5077 		r = amdgpu_device_pre_asic_reset(tmp_adev, &reset_context);
5078 		/*TODO Should we stop ?*/
5079 		if (r) {
5080 			dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5081 				  r, adev_to_drm(tmp_adev)->unique);
5082 			tmp_adev->asic_reset_res = r;
5083 		}
5084 	}
5085 
5086 	tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
5087 	/* Actual ASIC resets if needed.*/
5088 	/* TODO Implement XGMI hive reset logic for SRIOV */
5089 	if (amdgpu_sriov_vf(adev)) {
5090 		r = amdgpu_device_reset_sriov(adev, job ? false : true);
5091 		if (r)
5092 			adev->asic_reset_res = r;
5093 	} else {
5094 		r = amdgpu_do_asic_reset(device_list_handle, &reset_context);
5095 		if (r && r == -EAGAIN)
5096 			goto retry;
5097 	}
5098 
5099 skip_hw_reset:
5100 
5101 	/* Post ASIC reset for all devs .*/
5102 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5103 
5104 		/*
5105 		 * Sometimes a later bad compute job can block a good gfx job as gfx
5106 		 * and compute ring share internal GC HW mutually. We add an additional
5107 		 * guilty jobs recheck step to find the real guilty job, it synchronously
5108 		 * submits and pends for the first job being signaled. If it gets timeout,
5109 		 * we identify it as a real guilty job.
5110 		 */
5111 		if (amdgpu_gpu_recovery == 2 &&
5112 			!(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
5113 			amdgpu_device_recheck_guilty_jobs(
5114 				tmp_adev, device_list_handle, &reset_context);
5115 
5116 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5117 			struct amdgpu_ring *ring = tmp_adev->rings[i];
5118 
5119 			if (!ring || !ring->sched.thread)
5120 				continue;
5121 
5122 			/* No point to resubmit jobs if we didn't HW reset*/
5123 			if (!tmp_adev->asic_reset_res && !job_signaled)
5124 				drm_sched_resubmit_jobs(&ring->sched);
5125 
5126 			drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
5127 		}
5128 
5129 		if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
5130 			drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5131 		}
5132 
5133 		tmp_adev->asic_reset_res = 0;
5134 
5135 		if (r) {
5136 			/* bad news, how to tell it to userspace ? */
5137 			dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5138 			amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5139 		} else {
5140 			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5141 			if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5142 				DRM_WARN("smart shift update failed\n");
5143 		}
5144 	}
5145 
5146 skip_sched_resume:
5147 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5148 		/* unlock kfd: SRIOV would do it separately */
5149 		if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5150 	                amdgpu_amdkfd_post_reset(tmp_adev);
5151 
5152 		/* kfd_post_reset will do nothing if kfd device is not initialized,
5153 		 * need to bring up kfd here if it's not be initialized before
5154 		 */
5155 		if (!adev->kfd.init_complete)
5156 			amdgpu_amdkfd_device_init(adev);
5157 
5158 		if (audio_suspended)
5159 			amdgpu_device_resume_display_audio(tmp_adev);
5160 		amdgpu_device_unlock_adev(tmp_adev);
5161 	}
5162 
5163 skip_recovery:
5164 	if (hive) {
5165 		atomic_set(&hive->in_reset, 0);
5166 		mutex_unlock(&hive->hive_lock);
5167 		amdgpu_put_xgmi_hive(hive);
5168 	}
5169 
5170 	if (r && r != -EAGAIN)
5171 		dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5172 	return r;
5173 }
5174 
5175 /**
5176  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5177  *
5178  * @adev: amdgpu_device pointer
5179  *
5180  * Fetchs and stores in the driver the PCIE capabilities (gen speed
5181  * and lanes) of the slot the device is in. Handles APUs and
5182  * virtualized environments where PCIE config space may not be available.
5183  */
5184 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5185 {
5186 	struct pci_dev *pdev;
5187 	enum pci_bus_speed speed_cap, platform_speed_cap;
5188 	enum pcie_link_width platform_link_width;
5189 
5190 	if (amdgpu_pcie_gen_cap)
5191 		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5192 
5193 	if (amdgpu_pcie_lane_cap)
5194 		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5195 
5196 	/* covers APUs as well */
5197 	if (pci_is_root_bus(adev->pdev->bus)) {
5198 		if (adev->pm.pcie_gen_mask == 0)
5199 			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5200 		if (adev->pm.pcie_mlw_mask == 0)
5201 			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5202 		return;
5203 	}
5204 
5205 	if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5206 		return;
5207 
5208 	pcie_bandwidth_available(adev->pdev, NULL,
5209 				 &platform_speed_cap, &platform_link_width);
5210 
5211 	if (adev->pm.pcie_gen_mask == 0) {
5212 		/* asic caps */
5213 		pdev = adev->pdev;
5214 		speed_cap = pcie_get_speed_cap(pdev);
5215 		if (speed_cap == PCI_SPEED_UNKNOWN) {
5216 			adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5217 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5218 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5219 		} else {
5220 			if (speed_cap == PCIE_SPEED_32_0GT)
5221 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5222 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5223 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5224 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5225 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5226 			else if (speed_cap == PCIE_SPEED_16_0GT)
5227 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5228 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5229 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5230 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5231 			else if (speed_cap == PCIE_SPEED_8_0GT)
5232 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5233 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5234 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5235 			else if (speed_cap == PCIE_SPEED_5_0GT)
5236 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5237 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5238 			else
5239 				adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5240 		}
5241 		/* platform caps */
5242 		if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5243 			adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5244 						   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5245 		} else {
5246 			if (platform_speed_cap == PCIE_SPEED_32_0GT)
5247 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5248 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5249 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5250 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5251 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5252 			else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5253 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5254 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5255 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5256 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5257 			else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5258 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5259 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5260 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5261 			else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5262 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5263 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5264 			else
5265 				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5266 
5267 		}
5268 	}
5269 	if (adev->pm.pcie_mlw_mask == 0) {
5270 		if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5271 			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5272 		} else {
5273 			switch (platform_link_width) {
5274 			case PCIE_LNK_X32:
5275 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5276 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5277 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5278 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5279 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5280 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5281 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5282 				break;
5283 			case PCIE_LNK_X16:
5284 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5285 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5286 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5287 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5288 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5289 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5290 				break;
5291 			case PCIE_LNK_X12:
5292 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5293 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5294 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5295 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5296 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5297 				break;
5298 			case PCIE_LNK_X8:
5299 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5300 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5301 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5302 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5303 				break;
5304 			case PCIE_LNK_X4:
5305 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5306 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5307 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5308 				break;
5309 			case PCIE_LNK_X2:
5310 				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5311 							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5312 				break;
5313 			case PCIE_LNK_X1:
5314 				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5315 				break;
5316 			default:
5317 				break;
5318 			}
5319 		}
5320 	}
5321 }
5322 
5323 int amdgpu_device_baco_enter(struct drm_device *dev)
5324 {
5325 	struct amdgpu_device *adev = drm_to_adev(dev);
5326 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5327 
5328 	if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5329 		return -ENOTSUPP;
5330 
5331 	if (ras && adev->ras_enabled &&
5332 	    adev->nbio.funcs->enable_doorbell_interrupt)
5333 		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5334 
5335 	return amdgpu_dpm_baco_enter(adev);
5336 }
5337 
5338 int amdgpu_device_baco_exit(struct drm_device *dev)
5339 {
5340 	struct amdgpu_device *adev = drm_to_adev(dev);
5341 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5342 	int ret = 0;
5343 
5344 	if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
5345 		return -ENOTSUPP;
5346 
5347 	ret = amdgpu_dpm_baco_exit(adev);
5348 	if (ret)
5349 		return ret;
5350 
5351 	if (ras && adev->ras_enabled &&
5352 	    adev->nbio.funcs->enable_doorbell_interrupt)
5353 		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5354 
5355 	if (amdgpu_passthrough(adev) &&
5356 	    adev->nbio.funcs->clear_doorbell_interrupt)
5357 		adev->nbio.funcs->clear_doorbell_interrupt(adev);
5358 
5359 	return 0;
5360 }
5361 
5362 static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev)
5363 {
5364 	int i;
5365 
5366 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5367 		struct amdgpu_ring *ring = adev->rings[i];
5368 
5369 		if (!ring || !ring->sched.thread)
5370 			continue;
5371 
5372 		cancel_delayed_work_sync(&ring->sched.work_tdr);
5373 	}
5374 }
5375 
5376 /**
5377  * amdgpu_pci_error_detected - Called when a PCI error is detected.
5378  * @pdev: PCI device struct
5379  * @state: PCI channel state
5380  *
5381  * Description: Called when a PCI error is detected.
5382  *
5383  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5384  */
5385 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5386 {
5387 	struct drm_device *dev = pci_get_drvdata(pdev);
5388 	struct amdgpu_device *adev = drm_to_adev(dev);
5389 	int i;
5390 
5391 	DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5392 
5393 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
5394 		DRM_WARN("No support for XGMI hive yet...");
5395 		return PCI_ERS_RESULT_DISCONNECT;
5396 	}
5397 
5398 	adev->pci_channel_state = state;
5399 
5400 	switch (state) {
5401 	case pci_channel_io_normal:
5402 		return PCI_ERS_RESULT_CAN_RECOVER;
5403 	/* Fatal error, prepare for slot reset */
5404 	case pci_channel_io_frozen:
5405 		/*
5406 		 * Cancel and wait for all TDRs in progress if failing to
5407 		 * set  adev->in_gpu_reset in amdgpu_device_lock_adev
5408 		 *
5409 		 * Locking adev->reset_sem will prevent any external access
5410 		 * to GPU during PCI error recovery
5411 		 */
5412 		while (!amdgpu_device_lock_adev(adev, NULL))
5413 			amdgpu_cancel_all_tdr(adev);
5414 
5415 		/*
5416 		 * Block any work scheduling as we do for regular GPU reset
5417 		 * for the duration of the recovery
5418 		 */
5419 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5420 			struct amdgpu_ring *ring = adev->rings[i];
5421 
5422 			if (!ring || !ring->sched.thread)
5423 				continue;
5424 
5425 			drm_sched_stop(&ring->sched, NULL);
5426 		}
5427 		atomic_inc(&adev->gpu_reset_counter);
5428 		return PCI_ERS_RESULT_NEED_RESET;
5429 	case pci_channel_io_perm_failure:
5430 		/* Permanent error, prepare for device removal */
5431 		return PCI_ERS_RESULT_DISCONNECT;
5432 	}
5433 
5434 	return PCI_ERS_RESULT_NEED_RESET;
5435 }
5436 
5437 /**
5438  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5439  * @pdev: pointer to PCI device
5440  */
5441 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5442 {
5443 
5444 	DRM_INFO("PCI error: mmio enabled callback!!\n");
5445 
5446 	/* TODO - dump whatever for debugging purposes */
5447 
5448 	/* This called only if amdgpu_pci_error_detected returns
5449 	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5450 	 * works, no need to reset slot.
5451 	 */
5452 
5453 	return PCI_ERS_RESULT_RECOVERED;
5454 }
5455 
5456 /**
5457  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5458  * @pdev: PCI device struct
5459  *
5460  * Description: This routine is called by the pci error recovery
5461  * code after the PCI slot has been reset, just before we
5462  * should resume normal operations.
5463  */
5464 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5465 {
5466 	struct drm_device *dev = pci_get_drvdata(pdev);
5467 	struct amdgpu_device *adev = drm_to_adev(dev);
5468 	int r, i;
5469 	struct amdgpu_reset_context reset_context;
5470 	u32 memsize;
5471 	struct list_head device_list;
5472 
5473 	DRM_INFO("PCI error: slot reset callback!!\n");
5474 
5475 	memset(&reset_context, 0, sizeof(reset_context));
5476 
5477 	INIT_LIST_HEAD(&device_list);
5478 	list_add_tail(&adev->reset_list, &device_list);
5479 
5480 	/* wait for asic to come out of reset */
5481 	msleep(500);
5482 
5483 	/* Restore PCI confspace */
5484 	amdgpu_device_load_pci_state(pdev);
5485 
5486 	/* confirm  ASIC came out of reset */
5487 	for (i = 0; i < adev->usec_timeout; i++) {
5488 		memsize = amdgpu_asic_get_config_memsize(adev);
5489 
5490 		if (memsize != 0xffffffff)
5491 			break;
5492 		udelay(1);
5493 	}
5494 	if (memsize == 0xffffffff) {
5495 		r = -ETIME;
5496 		goto out;
5497 	}
5498 
5499 	reset_context.method = AMD_RESET_METHOD_NONE;
5500 	reset_context.reset_req_dev = adev;
5501 	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5502 	set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5503 
5504 	adev->no_hw_access = true;
5505 	r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5506 	adev->no_hw_access = false;
5507 	if (r)
5508 		goto out;
5509 
5510 	r = amdgpu_do_asic_reset(&device_list, &reset_context);
5511 
5512 out:
5513 	if (!r) {
5514 		if (amdgpu_device_cache_pci_state(adev->pdev))
5515 			pci_restore_state(adev->pdev);
5516 
5517 		DRM_INFO("PCIe error recovery succeeded\n");
5518 	} else {
5519 		DRM_ERROR("PCIe error recovery failed, err:%d", r);
5520 		amdgpu_device_unlock_adev(adev);
5521 	}
5522 
5523 	return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5524 }
5525 
5526 /**
5527  * amdgpu_pci_resume() - resume normal ops after PCI reset
5528  * @pdev: pointer to PCI device
5529  *
5530  * Called when the error recovery driver tells us that its
5531  * OK to resume normal operation.
5532  */
5533 void amdgpu_pci_resume(struct pci_dev *pdev)
5534 {
5535 	struct drm_device *dev = pci_get_drvdata(pdev);
5536 	struct amdgpu_device *adev = drm_to_adev(dev);
5537 	int i;
5538 
5539 
5540 	DRM_INFO("PCI error: resume callback!!\n");
5541 
5542 	/* Only continue execution for the case of pci_channel_io_frozen */
5543 	if (adev->pci_channel_state != pci_channel_io_frozen)
5544 		return;
5545 
5546 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5547 		struct amdgpu_ring *ring = adev->rings[i];
5548 
5549 		if (!ring || !ring->sched.thread)
5550 			continue;
5551 
5552 
5553 		drm_sched_resubmit_jobs(&ring->sched);
5554 		drm_sched_start(&ring->sched, true);
5555 	}
5556 
5557 	amdgpu_device_unlock_adev(adev);
5558 }
5559 
5560 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5561 {
5562 	struct drm_device *dev = pci_get_drvdata(pdev);
5563 	struct amdgpu_device *adev = drm_to_adev(dev);
5564 	int r;
5565 
5566 	r = pci_save_state(pdev);
5567 	if (!r) {
5568 		kfree(adev->pci_state);
5569 
5570 		adev->pci_state = pci_store_saved_state(pdev);
5571 
5572 		if (!adev->pci_state) {
5573 			DRM_ERROR("Failed to store PCI saved state");
5574 			return false;
5575 		}
5576 	} else {
5577 		DRM_WARN("Failed to save PCI state, err:%d\n", r);
5578 		return false;
5579 	}
5580 
5581 	return true;
5582 }
5583 
5584 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5585 {
5586 	struct drm_device *dev = pci_get_drvdata(pdev);
5587 	struct amdgpu_device *adev = drm_to_adev(dev);
5588 	int r;
5589 
5590 	if (!adev->pci_state)
5591 		return false;
5592 
5593 	r = pci_load_saved_state(pdev, adev->pci_state);
5594 
5595 	if (!r) {
5596 		pci_restore_state(pdev);
5597 	} else {
5598 		DRM_WARN("Failed to load PCI state, err:%d\n", r);
5599 		return false;
5600 	}
5601 
5602 	return true;
5603 }
5604 
5605 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5606 		struct amdgpu_ring *ring)
5607 {
5608 #ifdef CONFIG_X86_64
5609 	if (adev->flags & AMD_IS_APU)
5610 		return;
5611 #endif
5612 	if (adev->gmc.xgmi.connected_to_cpu)
5613 		return;
5614 
5615 	if (ring && ring->funcs->emit_hdp_flush)
5616 		amdgpu_ring_emit_hdp_flush(ring);
5617 	else
5618 		amdgpu_asic_flush_hdp(adev, ring);
5619 }
5620 
5621 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5622 		struct amdgpu_ring *ring)
5623 {
5624 #ifdef CONFIG_X86_64
5625 	if (adev->flags & AMD_IS_APU)
5626 		return;
5627 #endif
5628 	if (adev->gmc.xgmi.connected_to_cpu)
5629 		return;
5630 
5631 	amdgpu_asic_invalidate_hdp(adev, ring);
5632 }
5633