xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c (revision 2c5f15ee2c760514c5be0f02cf9c9f1ff68b9ac8)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 #include <linux/aperture.h>
30 #include <linux/power_supply.h>
31 #include <linux/kthread.h>
32 #include <linux/module.h>
33 #include <linux/console.h>
34 #include <linux/slab.h>
35 #include <linux/iommu.h>
36 #include <linux/pci.h>
37 #include <linux/pci-p2pdma.h>
38 #include <linux/apple-gmux.h>
39 #include <linux/nospec.h>
40 
41 #include <drm/drm_atomic_helper.h>
42 #include <drm/drm_client_event.h>
43 #include <drm/drm_crtc_helper.h>
44 #include <drm/drm_probe_helper.h>
45 #include <drm/amdgpu_drm.h>
46 #include <linux/device.h>
47 #include <linux/vgaarb.h>
48 #include <linux/vga_switcheroo.h>
49 #include <linux/efi.h>
50 #include "amdgpu.h"
51 #include "amdgpu_trace.h"
52 #include "amdgpu_i2c.h"
53 #include "atom.h"
54 #include "amdgpu_atombios.h"
55 #include "amdgpu_atomfirmware.h"
56 #include "amd_pcie.h"
57 #ifdef CONFIG_DRM_AMDGPU_SI
58 #include "si.h"
59 #endif
60 #ifdef CONFIG_DRM_AMDGPU_CIK
61 #include "cik.h"
62 #endif
63 #include "vi.h"
64 #include "soc15.h"
65 #include "nv.h"
66 #include "bif/bif_4_1_d.h"
67 #include <linux/firmware.h>
68 #include "amdgpu_vf_error.h"
69 
70 #include "amdgpu_amdkfd.h"
71 #include "amdgpu_pm.h"
72 
73 #include "amdgpu_xgmi.h"
74 #include "amdgpu_ras.h"
75 #include "amdgpu_ras_mgr.h"
76 #include "amdgpu_pmu.h"
77 #include "amdgpu_fru_eeprom.h"
78 #include "amdgpu_reset.h"
79 #include "amdgpu_virt.h"
80 #include "amdgpu_dev_coredump.h"
81 
82 #include <linux/suspend.h>
83 #include <drm/task_barrier.h>
84 #include <linux/pm_runtime.h>
85 
86 #include <drm/drm_drv.h>
87 
88 #if IS_ENABLED(CONFIG_X86)
89 #include <asm/intel-family.h>
90 #include <asm/cpu_device_id.h>
91 #endif
92 
93 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
94 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
95 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
96 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
97 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
98 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
99 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
100 MODULE_FIRMWARE("amdgpu/cyan_skillfish_gpu_info.bin");
101 
102 #define AMDGPU_RESUME_MS		2000
103 #define AMDGPU_MAX_RETRY_LIMIT		2
104 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
105 #define AMDGPU_PCIE_INDEX_FALLBACK (0x38 >> 2)
106 #define AMDGPU_PCIE_INDEX_HI_FALLBACK (0x44 >> 2)
107 #define AMDGPU_PCIE_DATA_FALLBACK (0x3C >> 2)
108 
109 #define AMDGPU_VBIOS_SKIP (1U << 0)
110 #define AMDGPU_VBIOS_OPTIONAL (1U << 1)
111 
112 static const struct drm_driver amdgpu_kms_driver;
113 
114 const char *amdgpu_asic_name[] = {
115 	"TAHITI",
116 	"PITCAIRN",
117 	"VERDE",
118 	"OLAND",
119 	"HAINAN",
120 	"BONAIRE",
121 	"KAVERI",
122 	"KABINI",
123 	"HAWAII",
124 	"MULLINS",
125 	"TOPAZ",
126 	"TONGA",
127 	"FIJI",
128 	"CARRIZO",
129 	"STONEY",
130 	"POLARIS10",
131 	"POLARIS11",
132 	"POLARIS12",
133 	"VEGAM",
134 	"VEGA10",
135 	"VEGA12",
136 	"VEGA20",
137 	"RAVEN",
138 	"ARCTURUS",
139 	"RENOIR",
140 	"ALDEBARAN",
141 	"NAVI10",
142 	"CYAN_SKILLFISH",
143 	"NAVI14",
144 	"NAVI12",
145 	"SIENNA_CICHLID",
146 	"NAVY_FLOUNDER",
147 	"VANGOGH",
148 	"DIMGREY_CAVEFISH",
149 	"BEIGE_GOBY",
150 	"YELLOW_CARP",
151 	"IP DISCOVERY",
152 	"LAST",
153 };
154 
155 #define AMDGPU_IP_BLK_MASK_ALL GENMASK(AMD_IP_BLOCK_TYPE_NUM  - 1, 0)
156 /*
157  * Default init level where all blocks are expected to be initialized. This is
158  * the level of initialization expected by default and also after a full reset
159  * of the device.
160  */
161 struct amdgpu_init_level amdgpu_init_default = {
162 	.level = AMDGPU_INIT_LEVEL_DEFAULT,
163 	.hwini_ip_block_mask = AMDGPU_IP_BLK_MASK_ALL,
164 };
165 
166 struct amdgpu_init_level amdgpu_init_recovery = {
167 	.level = AMDGPU_INIT_LEVEL_RESET_RECOVERY,
168 	.hwini_ip_block_mask = AMDGPU_IP_BLK_MASK_ALL,
169 };
170 
171 /*
172  * Minimal blocks needed to be initialized before a XGMI hive can be reset. This
173  * is used for cases like reset on initialization where the entire hive needs to
174  * be reset before first use.
175  */
176 struct amdgpu_init_level amdgpu_init_minimal_xgmi = {
177 	.level = AMDGPU_INIT_LEVEL_MINIMAL_XGMI,
178 	.hwini_ip_block_mask =
179 		BIT(AMD_IP_BLOCK_TYPE_GMC) | BIT(AMD_IP_BLOCK_TYPE_SMC) |
180 		BIT(AMD_IP_BLOCK_TYPE_COMMON) | BIT(AMD_IP_BLOCK_TYPE_IH) |
181 		BIT(AMD_IP_BLOCK_TYPE_PSP)
182 };
183 
184 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev);
185 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev);
186 static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev);
187 
188 static void amdgpu_device_load_switch_state(struct amdgpu_device *adev);
189 
190 static inline bool amdgpu_ip_member_of_hwini(struct amdgpu_device *adev,
191 					     enum amd_ip_block_type block)
192 {
193 	return (adev->init_lvl->hwini_ip_block_mask & (1U << block)) != 0;
194 }
195 
196 void amdgpu_set_init_level(struct amdgpu_device *adev,
197 			   enum amdgpu_init_lvl_id lvl)
198 {
199 	switch (lvl) {
200 	case AMDGPU_INIT_LEVEL_MINIMAL_XGMI:
201 		adev->init_lvl = &amdgpu_init_minimal_xgmi;
202 		break;
203 	case AMDGPU_INIT_LEVEL_RESET_RECOVERY:
204 		adev->init_lvl = &amdgpu_init_recovery;
205 		break;
206 	case AMDGPU_INIT_LEVEL_DEFAULT:
207 		fallthrough;
208 	default:
209 		adev->init_lvl = &amdgpu_init_default;
210 		break;
211 	}
212 }
213 
214 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev);
215 static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
216 				     void *data);
217 
218 /**
219  * DOC: pcie_replay_count
220  *
221  * The amdgpu driver provides a sysfs API for reporting the total number
222  * of PCIe replays (NAKs).
223  * The file pcie_replay_count is used for this and returns the total
224  * number of replays as a sum of the NAKs generated and NAKs received.
225  */
226 
227 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
228 		struct device_attribute *attr, char *buf)
229 {
230 	struct drm_device *ddev = dev_get_drvdata(dev);
231 	struct amdgpu_device *adev = drm_to_adev(ddev);
232 	uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
233 
234 	return sysfs_emit(buf, "%llu\n", cnt);
235 }
236 
237 static DEVICE_ATTR(pcie_replay_count, 0444,
238 		amdgpu_device_get_pcie_replay_count, NULL);
239 
240 static int amdgpu_device_attr_sysfs_init(struct amdgpu_device *adev)
241 {
242 	int ret = 0;
243 
244 	if (amdgpu_nbio_is_replay_cnt_supported(adev))
245 		ret = sysfs_create_file(&adev->dev->kobj,
246 					&dev_attr_pcie_replay_count.attr);
247 
248 	return ret;
249 }
250 
251 static void amdgpu_device_attr_sysfs_fini(struct amdgpu_device *adev)
252 {
253 	if (amdgpu_nbio_is_replay_cnt_supported(adev))
254 		sysfs_remove_file(&adev->dev->kobj,
255 				  &dev_attr_pcie_replay_count.attr);
256 }
257 
258 static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj,
259 					  const struct bin_attribute *attr, char *buf,
260 					  loff_t ppos, size_t count)
261 {
262 	struct device *dev = kobj_to_dev(kobj);
263 	struct drm_device *ddev = dev_get_drvdata(dev);
264 	struct amdgpu_device *adev = drm_to_adev(ddev);
265 	ssize_t bytes_read;
266 
267 	switch (ppos) {
268 	case AMDGPU_SYS_REG_STATE_XGMI:
269 		bytes_read = amdgpu_asic_get_reg_state(
270 			adev, AMDGPU_REG_STATE_TYPE_XGMI, buf, count);
271 		break;
272 	case AMDGPU_SYS_REG_STATE_WAFL:
273 		bytes_read = amdgpu_asic_get_reg_state(
274 			adev, AMDGPU_REG_STATE_TYPE_WAFL, buf, count);
275 		break;
276 	case AMDGPU_SYS_REG_STATE_PCIE:
277 		bytes_read = amdgpu_asic_get_reg_state(
278 			adev, AMDGPU_REG_STATE_TYPE_PCIE, buf, count);
279 		break;
280 	case AMDGPU_SYS_REG_STATE_USR:
281 		bytes_read = amdgpu_asic_get_reg_state(
282 			adev, AMDGPU_REG_STATE_TYPE_USR, buf, count);
283 		break;
284 	case AMDGPU_SYS_REG_STATE_USR_1:
285 		bytes_read = amdgpu_asic_get_reg_state(
286 			adev, AMDGPU_REG_STATE_TYPE_USR_1, buf, count);
287 		break;
288 	default:
289 		return -EINVAL;
290 	}
291 
292 	return bytes_read;
293 }
294 
295 static const BIN_ATTR(reg_state, 0444, amdgpu_sysfs_reg_state_get, NULL,
296 		      AMDGPU_SYS_REG_STATE_END);
297 
298 int amdgpu_reg_state_sysfs_init(struct amdgpu_device *adev)
299 {
300 	int ret;
301 
302 	if (!amdgpu_asic_get_reg_state_supported(adev))
303 		return 0;
304 
305 	ret = sysfs_create_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
306 
307 	return ret;
308 }
309 
310 void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev)
311 {
312 	if (!amdgpu_asic_get_reg_state_supported(adev))
313 		return;
314 	sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
315 }
316 
317 /**
318  * DOC: board_info
319  *
320  * The amdgpu driver provides a sysfs API for giving board related information.
321  * It provides the form factor information in the format
322  *
323  *   type : form factor
324  *
325  * Possible form factor values
326  *
327  * - "cem"		- PCIE CEM card
328  * - "oam"		- Open Compute Accelerator Module
329  * - "unknown"	- Not known
330  *
331  */
332 
333 static ssize_t amdgpu_device_get_board_info(struct device *dev,
334 					    struct device_attribute *attr,
335 					    char *buf)
336 {
337 	struct drm_device *ddev = dev_get_drvdata(dev);
338 	struct amdgpu_device *adev = drm_to_adev(ddev);
339 	enum amdgpu_pkg_type pkg_type = AMDGPU_PKG_TYPE_CEM;
340 	const char *pkg;
341 
342 	if (adev->smuio.funcs && adev->smuio.funcs->get_pkg_type)
343 		pkg_type = adev->smuio.funcs->get_pkg_type(adev);
344 
345 	switch (pkg_type) {
346 	case AMDGPU_PKG_TYPE_CEM:
347 		pkg = "cem";
348 		break;
349 	case AMDGPU_PKG_TYPE_OAM:
350 		pkg = "oam";
351 		break;
352 	default:
353 		pkg = "unknown";
354 		break;
355 	}
356 
357 	return sysfs_emit(buf, "%s : %s\n", "type", pkg);
358 }
359 
360 static DEVICE_ATTR(board_info, 0444, amdgpu_device_get_board_info, NULL);
361 
362 static struct attribute *amdgpu_board_attrs[] = {
363 	&dev_attr_board_info.attr,
364 	NULL,
365 };
366 
367 static umode_t amdgpu_board_attrs_is_visible(struct kobject *kobj,
368 					     struct attribute *attr, int n)
369 {
370 	struct device *dev = kobj_to_dev(kobj);
371 	struct drm_device *ddev = dev_get_drvdata(dev);
372 	struct amdgpu_device *adev = drm_to_adev(ddev);
373 
374 	if (adev->flags & AMD_IS_APU)
375 		return 0;
376 
377 	return attr->mode;
378 }
379 
380 static const struct attribute_group amdgpu_board_attrs_group = {
381 	.attrs = amdgpu_board_attrs,
382 	.is_visible = amdgpu_board_attrs_is_visible
383 };
384 
385 /**
386  * DOC: uma/carveout_options
387  *
388  * This is a read-only file that lists all available UMA allocation
389  * options and their corresponding indices. Example output::
390  *
391  *     $ cat uma/carveout_options
392  *     0: Minimum (512 MB)
393  *     1:  (1 GB)
394  *     2:  (2 GB)
395  *     3:  (4 GB)
396  *     4:  (6 GB)
397  *     5:  (8 GB)
398  *     6:  (12 GB)
399  *     7: Medium (16 GB)
400  *     8:  (24 GB)
401  *     9: High (32 GB)
402  */
403 static ssize_t carveout_options_show(struct device *dev,
404 				     struct device_attribute *attr,
405 				     char *buf)
406 {
407 	struct drm_device *ddev = dev_get_drvdata(dev);
408 	struct amdgpu_device *adev = drm_to_adev(ddev);
409 	struct amdgpu_uma_carveout_info *uma_info = &adev->uma_info;
410 	uint32_t memory_carved;
411 	ssize_t size = 0;
412 
413 	if (!uma_info || !uma_info->num_entries)
414 		return -ENODEV;
415 
416 	for (int i = 0; i < uma_info->num_entries; i++) {
417 		memory_carved = uma_info->entries[i].memory_carved_mb;
418 		if (memory_carved >= SZ_1G/SZ_1M) {
419 			size += sysfs_emit_at(buf, size, "%d: %s (%u GB)\n",
420 					      i,
421 					      uma_info->entries[i].name,
422 					      memory_carved >> 10);
423 		} else {
424 			size += sysfs_emit_at(buf, size, "%d: %s (%u MB)\n",
425 					      i,
426 					      uma_info->entries[i].name,
427 					      memory_carved);
428 		}
429 	}
430 
431 	return size;
432 }
433 static DEVICE_ATTR_RO(carveout_options);
434 
435 /**
436  * DOC: uma/carveout
437  *
438  * This file is both readable and writable. When read, it shows the
439  * index of the current setting. Writing a valid index to this file
440  * allows users to change the UMA carveout size to the selected option
441  * on the next boot.
442  *
443  * The available options and their corresponding indices can be read
444  * from the uma/carveout_options file.
445  */
446 static ssize_t carveout_show(struct device *dev,
447 			     struct device_attribute *attr,
448 			     char *buf)
449 {
450 	struct drm_device *ddev = dev_get_drvdata(dev);
451 	struct amdgpu_device *adev = drm_to_adev(ddev);
452 
453 	return sysfs_emit(buf, "%u\n", adev->uma_info.uma_option_index);
454 }
455 
456 static ssize_t carveout_store(struct device *dev,
457 			      struct device_attribute *attr,
458 			      const char *buf, size_t count)
459 {
460 	struct drm_device *ddev = dev_get_drvdata(dev);
461 	struct amdgpu_device *adev = drm_to_adev(ddev);
462 	struct amdgpu_uma_carveout_info *uma_info = &adev->uma_info;
463 	struct amdgpu_uma_carveout_option *opt;
464 	unsigned long val;
465 	uint8_t flags;
466 	int r;
467 
468 	r = kstrtoul(buf, 10, &val);
469 	if (r)
470 		return r;
471 
472 	if (val >= uma_info->num_entries)
473 		return -EINVAL;
474 
475 	val = array_index_nospec(val, uma_info->num_entries);
476 	opt = &uma_info->entries[val];
477 
478 	if (!(opt->flags & AMDGPU_UMA_FLAG_AUTO) &&
479 	    !(opt->flags & AMDGPU_UMA_FLAG_CUSTOM)) {
480 		drm_err_once(ddev, "Option %lu not supported due to lack of Custom/Auto flag", val);
481 		return -EINVAL;
482 	}
483 
484 	flags = opt->flags;
485 	flags &= ~((flags & AMDGPU_UMA_FLAG_AUTO) >> 1);
486 
487 	guard(mutex)(&uma_info->update_lock);
488 
489 	r = amdgpu_acpi_set_uma_allocation_size(adev, val, flags);
490 	if (r)
491 		return r;
492 
493 	uma_info->uma_option_index = val;
494 
495 	return count;
496 }
497 static DEVICE_ATTR_RW(carveout);
498 
499 static struct attribute *amdgpu_uma_attrs[] = {
500 	&dev_attr_carveout.attr,
501 	&dev_attr_carveout_options.attr,
502 	NULL
503 };
504 
505 const struct attribute_group amdgpu_uma_attr_group = {
506 	.name = "uma",
507 	.attrs = amdgpu_uma_attrs
508 };
509 
510 static void amdgpu_uma_sysfs_init(struct amdgpu_device *adev)
511 {
512 	int rc;
513 
514 	if (!(adev->flags & AMD_IS_APU))
515 		return;
516 
517 	if (!amdgpu_acpi_is_set_uma_allocation_size_supported())
518 		return;
519 
520 	rc = amdgpu_atomfirmware_get_uma_carveout_info(adev, &adev->uma_info);
521 	if (rc) {
522 		drm_dbg(adev_to_drm(adev),
523 			"Failed to parse UMA carveout info from VBIOS: %d\n", rc);
524 		goto out_info;
525 	}
526 
527 	mutex_init(&adev->uma_info.update_lock);
528 
529 	rc = devm_device_add_group(adev->dev, &amdgpu_uma_attr_group);
530 	if (rc) {
531 		drm_dbg(adev_to_drm(adev), "Failed to add UMA carveout sysfs interfaces %d\n", rc);
532 		goto out_attr;
533 	}
534 
535 	return;
536 
537 out_attr:
538 	mutex_destroy(&adev->uma_info.update_lock);
539 out_info:
540 	return;
541 }
542 
543 static void amdgpu_uma_sysfs_fini(struct amdgpu_device *adev)
544 {
545 	struct amdgpu_uma_carveout_info *uma_info = &adev->uma_info;
546 
547 	if (!amdgpu_acpi_is_set_uma_allocation_size_supported())
548 		return;
549 
550 	mutex_destroy(&uma_info->update_lock);
551 	uma_info->num_entries = 0;
552 }
553 
554 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
555 
556 /**
557  * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
558  *
559  * @adev: amdgpu device pointer
560  *
561  * Returns true if the device is a dGPU with ATPX power control,
562  * otherwise return false.
563  */
564 bool amdgpu_device_supports_px(struct amdgpu_device *adev)
565 {
566 	if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
567 		return true;
568 	return false;
569 }
570 
571 /**
572  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
573  *
574  * @adev: amdgpu device pointer
575  *
576  * Returns true if the device is a dGPU with ACPI power control,
577  * otherwise return false.
578  */
579 bool amdgpu_device_supports_boco(struct amdgpu_device *adev)
580 {
581 	if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
582 		return false;
583 
584 	if (adev->has_pr3 ||
585 	    ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
586 		return true;
587 	return false;
588 }
589 
590 /**
591  * amdgpu_device_supports_baco - Does the device support BACO
592  *
593  * @adev: amdgpu device pointer
594  *
595  * Return:
596  * 1 if the device supports BACO;
597  * 3 if the device supports MACO (only works if BACO is supported)
598  * otherwise return 0.
599  */
600 int amdgpu_device_supports_baco(struct amdgpu_device *adev)
601 {
602 	return amdgpu_asic_supports_baco(adev);
603 }
604 
605 void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev)
606 {
607 	int bamaco_support;
608 
609 	adev->pm.rpm_mode = AMDGPU_RUNPM_NONE;
610 	bamaco_support = amdgpu_device_supports_baco(adev);
611 
612 	switch (amdgpu_runtime_pm) {
613 	case 2:
614 		if (bamaco_support & MACO_SUPPORT) {
615 			adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO;
616 			dev_info(adev->dev, "Forcing BAMACO for runtime pm\n");
617 		} else if (bamaco_support == BACO_SUPPORT) {
618 			adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
619 			dev_info(adev->dev, "Requested mode BAMACO not available,fallback to use BACO\n");
620 		}
621 		break;
622 	case 1:
623 		if (bamaco_support & BACO_SUPPORT) {
624 			adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
625 			dev_info(adev->dev, "Forcing BACO for runtime pm\n");
626 		}
627 		break;
628 	case -1:
629 	case -2:
630 		if (amdgpu_device_supports_px(adev)) {
631 			/* enable PX as runtime mode */
632 			adev->pm.rpm_mode = AMDGPU_RUNPM_PX;
633 			dev_info(adev->dev, "Using ATPX for runtime pm\n");
634 		} else if (amdgpu_device_supports_boco(adev)) {
635 			/* enable boco as runtime mode */
636 			adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO;
637 			dev_info(adev->dev, "Using BOCO for runtime pm\n");
638 		} else {
639 			if (!bamaco_support)
640 				goto no_runtime_pm;
641 
642 			switch (adev->asic_type) {
643 			case CHIP_VEGA20:
644 			case CHIP_ARCTURUS:
645 				/* BACO are not supported on vega20 and arctrus */
646 				break;
647 			case CHIP_VEGA10:
648 				/* enable BACO as runpm mode if noretry=0 */
649 				if (!adev->gmc.noretry && !amdgpu_passthrough(adev))
650 					adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
651 				break;
652 			default:
653 				/* enable BACO as runpm mode on CI+ */
654 				if (!amdgpu_passthrough(adev))
655 					adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
656 				break;
657 			}
658 
659 			if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) {
660 				if (bamaco_support & MACO_SUPPORT) {
661 					adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO;
662 					dev_info(adev->dev, "Using BAMACO for runtime pm\n");
663 				} else {
664 					dev_info(adev->dev, "Using BACO for runtime pm\n");
665 				}
666 			}
667 		}
668 		break;
669 	case 0:
670 		dev_info(adev->dev, "runtime pm is manually disabled\n");
671 		break;
672 	default:
673 		break;
674 	}
675 
676 no_runtime_pm:
677 	if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE)
678 		dev_info(adev->dev, "Runtime PM not available\n");
679 }
680 /**
681  * amdgpu_device_supports_smart_shift - Is the device dGPU with
682  * smart shift support
683  *
684  * @adev: amdgpu device pointer
685  *
686  * Returns true if the device is a dGPU with Smart Shift support,
687  * otherwise returns false.
688  */
689 bool amdgpu_device_supports_smart_shift(struct amdgpu_device *adev)
690 {
691 	return (amdgpu_device_supports_boco(adev) &&
692 		amdgpu_acpi_is_power_shift_control_supported());
693 }
694 
695 /*
696  * VRAM access helper functions
697  */
698 
699 /**
700  * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
701  *
702  * @adev: amdgpu_device pointer
703  * @pos: offset of the buffer in vram
704  * @buf: virtual address of the buffer in system memory
705  * @size: read/write size, sizeof(@buf) must > @size
706  * @write: true - write to vram, otherwise - read from vram
707  */
708 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
709 			     void *buf, size_t size, bool write)
710 {
711 	unsigned long flags;
712 	uint32_t hi = ~0, tmp = 0;
713 	uint32_t *data = buf;
714 	uint64_t last;
715 	int idx;
716 
717 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
718 		return;
719 
720 	BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
721 
722 	spin_lock_irqsave(&adev->mmio_idx_lock, flags);
723 	for (last = pos + size; pos < last; pos += 4) {
724 		tmp = pos >> 31;
725 
726 		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
727 		if (tmp != hi) {
728 			WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
729 			hi = tmp;
730 		}
731 		if (write)
732 			WREG32_NO_KIQ(mmMM_DATA, *data++);
733 		else
734 			*data++ = RREG32_NO_KIQ(mmMM_DATA);
735 	}
736 
737 	spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
738 	drm_dev_exit(idx);
739 }
740 
741 /**
742  * amdgpu_device_aper_access - access vram by vram aperture
743  *
744  * @adev: amdgpu_device pointer
745  * @pos: offset of the buffer in vram
746  * @buf: virtual address of the buffer in system memory
747  * @size: read/write size, sizeof(@buf) must > @size
748  * @write: true - write to vram, otherwise - read from vram
749  *
750  * The return value means how many bytes have been transferred.
751  */
752 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
753 				 void *buf, size_t size, bool write)
754 {
755 #ifdef CONFIG_64BIT
756 	void __iomem *addr;
757 	size_t count = 0;
758 	uint64_t last;
759 
760 	if (!adev->mman.aper_base_kaddr)
761 		return 0;
762 
763 	last = min(pos + size, adev->gmc.visible_vram_size);
764 	if (last > pos) {
765 		addr = adev->mman.aper_base_kaddr + pos;
766 		count = last - pos;
767 
768 		if (write) {
769 			memcpy_toio(addr, buf, count);
770 			/* Make sure HDP write cache flush happens without any reordering
771 			 * after the system memory contents are sent over PCIe device
772 			 */
773 			mb();
774 			amdgpu_device_flush_hdp(adev, NULL);
775 		} else {
776 			amdgpu_device_invalidate_hdp(adev, NULL);
777 			/* Make sure HDP read cache is invalidated before issuing a read
778 			 * to the PCIe device
779 			 */
780 			mb();
781 			memcpy_fromio(buf, addr, count);
782 		}
783 
784 	}
785 
786 	return count;
787 #else
788 	return 0;
789 #endif
790 }
791 
792 /**
793  * amdgpu_device_vram_access - read/write a buffer in vram
794  *
795  * @adev: amdgpu_device pointer
796  * @pos: offset of the buffer in vram
797  * @buf: virtual address of the buffer in system memory
798  * @size: read/write size, sizeof(@buf) must > @size
799  * @write: true - write to vram, otherwise - read from vram
800  */
801 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
802 			       void *buf, size_t size, bool write)
803 {
804 	size_t count;
805 
806 	/* try to using vram apreature to access vram first */
807 	count = amdgpu_device_aper_access(adev, pos, buf, size, write);
808 	size -= count;
809 	if (size) {
810 		/* using MM to access rest vram */
811 		pos += count;
812 		buf += count;
813 		amdgpu_device_mm_access(adev, pos, buf, size, write);
814 	}
815 }
816 
817 /*
818  * register access helper functions.
819  */
820 
821 /* Check if hw access should be skipped because of hotplug or device error */
822 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
823 {
824 	if (adev->no_hw_access)
825 		return true;
826 
827 #ifdef CONFIG_LOCKDEP
828 	/*
829 	 * This is a bit complicated to understand, so worth a comment. What we assert
830 	 * here is that the GPU reset is not running on another thread in parallel.
831 	 *
832 	 * For this we trylock the read side of the reset semaphore, if that succeeds
833 	 * we know that the reset is not running in parallel.
834 	 *
835 	 * If the trylock fails we assert that we are either already holding the read
836 	 * side of the lock or are the reset thread itself and hold the write side of
837 	 * the lock.
838 	 */
839 	if (in_task()) {
840 		if (down_read_trylock(&adev->reset_domain->sem))
841 			up_read(&adev->reset_domain->sem);
842 		else
843 			lockdep_assert_held(&adev->reset_domain->sem);
844 	}
845 #endif
846 	return false;
847 }
848 
849 /**
850  * amdgpu_device_get_rev_id - query device rev_id
851  *
852  * @adev: amdgpu_device pointer
853  *
854  * Return device rev_id
855  */
856 u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
857 {
858 	return adev->nbio.funcs->get_rev_id(adev);
859 }
860 
861 static uint32_t amdgpu_device_get_vbios_flags(struct amdgpu_device *adev)
862 {
863 	if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU))
864 		return AMDGPU_VBIOS_SKIP;
865 
866 	if (hweight32(adev->aid_mask) && amdgpu_passthrough(adev))
867 		return AMDGPU_VBIOS_OPTIONAL;
868 
869 	return 0;
870 }
871 
872 /**
873  * amdgpu_device_asic_init - Wrapper for atom asic_init
874  *
875  * @adev: amdgpu_device pointer
876  *
877  * Does any asic specific work and then calls atom asic init.
878  */
879 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
880 {
881 	uint32_t flags;
882 	bool optional;
883 	int ret;
884 
885 	amdgpu_asic_pre_asic_init(adev);
886 	flags = amdgpu_device_get_vbios_flags(adev);
887 	optional = !!(flags & (AMDGPU_VBIOS_OPTIONAL | AMDGPU_VBIOS_SKIP));
888 
889 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
890 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
891 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0) ||
892 	    amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
893 		amdgpu_psp_wait_for_bootloader(adev);
894 		if (optional && !adev->bios)
895 			return 0;
896 
897 		ret = amdgpu_atomfirmware_asic_init(adev, true);
898 		return ret;
899 	} else {
900 		if (optional && !adev->bios)
901 			return 0;
902 
903 		return amdgpu_atom_asic_init(adev->mode_info.atom_context);
904 	}
905 
906 	return 0;
907 }
908 
909 /**
910  * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
911  *
912  * @adev: amdgpu_device pointer
913  *
914  * Allocates a scratch page of VRAM for use by various things in the
915  * driver.
916  */
917 static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
918 {
919 	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
920 				       AMDGPU_GEM_DOMAIN_VRAM |
921 				       AMDGPU_GEM_DOMAIN_GTT,
922 				       &adev->mem_scratch.robj,
923 				       &adev->mem_scratch.gpu_addr,
924 				       (void **)&adev->mem_scratch.ptr);
925 }
926 
927 /**
928  * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
929  *
930  * @adev: amdgpu_device pointer
931  *
932  * Frees the VRAM scratch page.
933  */
934 static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
935 {
936 	amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
937 }
938 
939 /**
940  * amdgpu_device_program_register_sequence - program an array of registers.
941  *
942  * @adev: amdgpu_device pointer
943  * @registers: pointer to the register array
944  * @array_size: size of the register array
945  *
946  * Programs an array or registers with and or masks.
947  * This is a helper for setting golden registers.
948  */
949 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
950 					     const u32 *registers,
951 					     const u32 array_size)
952 {
953 	u32 tmp, reg, and_mask, or_mask;
954 	int i;
955 
956 	if (array_size % 3)
957 		return;
958 
959 	for (i = 0; i < array_size; i += 3) {
960 		reg = registers[i + 0];
961 		and_mask = registers[i + 1];
962 		or_mask = registers[i + 2];
963 
964 		if (and_mask == 0xffffffff) {
965 			tmp = or_mask;
966 		} else {
967 			tmp = RREG32(reg);
968 			tmp &= ~and_mask;
969 			if (adev->family >= AMDGPU_FAMILY_AI)
970 				tmp |= (or_mask & and_mask);
971 			else
972 				tmp |= or_mask;
973 		}
974 		WREG32(reg, tmp);
975 	}
976 }
977 
978 /**
979  * amdgpu_device_pci_config_reset - reset the GPU
980  *
981  * @adev: amdgpu_device pointer
982  *
983  * Resets the GPU using the pci config reset sequence.
984  * Only applicable to asics prior to vega10.
985  */
986 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
987 {
988 	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
989 }
990 
991 /**
992  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
993  *
994  * @adev: amdgpu_device pointer
995  *
996  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
997  */
998 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
999 {
1000 	return pci_reset_function(adev->pdev);
1001 }
1002 
1003 /*
1004  * amdgpu_device_wb_*()
1005  * Writeback is the method by which the GPU updates special pages in memory
1006  * with the status of certain GPU events (fences, ring pointers,etc.).
1007  */
1008 
1009 /**
1010  * amdgpu_device_wb_fini - Disable Writeback and free memory
1011  *
1012  * @adev: amdgpu_device pointer
1013  *
1014  * Disables Writeback and frees the Writeback memory (all asics).
1015  * Used at driver shutdown.
1016  */
1017 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1018 {
1019 	if (adev->wb.wb_obj) {
1020 		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1021 				      &adev->wb.gpu_addr,
1022 				      (void **)&adev->wb.wb);
1023 		adev->wb.wb_obj = NULL;
1024 	}
1025 }
1026 
1027 /**
1028  * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1029  *
1030  * @adev: amdgpu_device pointer
1031  *
1032  * Initializes writeback and allocates writeback memory (all asics).
1033  * Used at driver startup.
1034  * Returns 0 on success or an -error on failure.
1035  */
1036 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1037 {
1038 	int r;
1039 
1040 	if (adev->wb.wb_obj == NULL) {
1041 		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1042 		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1043 					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1044 					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
1045 					    (void **)&adev->wb.wb);
1046 		if (r) {
1047 			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1048 			return r;
1049 		}
1050 
1051 		adev->wb.num_wb = AMDGPU_MAX_WB;
1052 		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1053 
1054 		/* clear wb memory */
1055 		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1056 	}
1057 
1058 	return 0;
1059 }
1060 
1061 /**
1062  * amdgpu_device_wb_get - Allocate a wb entry
1063  *
1064  * @adev: amdgpu_device pointer
1065  * @wb: wb index
1066  *
1067  * Allocate a wb slot for use by the driver (all asics).
1068  * Returns 0 on success or -EINVAL on failure.
1069  */
1070 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1071 {
1072 	unsigned long flags, offset;
1073 
1074 	spin_lock_irqsave(&adev->wb.lock, flags);
1075 	offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1076 	if (offset < adev->wb.num_wb) {
1077 		__set_bit(offset, adev->wb.used);
1078 		spin_unlock_irqrestore(&adev->wb.lock, flags);
1079 		*wb = offset << 3; /* convert to dw offset */
1080 		return 0;
1081 	} else {
1082 		spin_unlock_irqrestore(&adev->wb.lock, flags);
1083 		return -EINVAL;
1084 	}
1085 }
1086 
1087 /**
1088  * amdgpu_device_wb_free - Free a wb entry
1089  *
1090  * @adev: amdgpu_device pointer
1091  * @wb: wb index
1092  *
1093  * Free a wb slot allocated for use by the driver (all asics)
1094  */
1095 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1096 {
1097 	unsigned long flags;
1098 
1099 	wb >>= 3;
1100 	spin_lock_irqsave(&adev->wb.lock, flags);
1101 	if (wb < adev->wb.num_wb)
1102 		__clear_bit(wb, adev->wb.used);
1103 	spin_unlock_irqrestore(&adev->wb.lock, flags);
1104 }
1105 
1106 /**
1107  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1108  *
1109  * @adev: amdgpu_device pointer
1110  *
1111  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1112  * to fail, but if any of the BARs is not accessible after the size we abort
1113  * driver loading by returning -ENODEV.
1114  */
1115 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1116 {
1117 	int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1118 	struct pci_bus *root;
1119 	struct resource *res;
1120 	int max_size, r;
1121 	unsigned int i;
1122 	u16 cmd;
1123 
1124 	if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
1125 		return 0;
1126 
1127 	/* Bypass for VF */
1128 	if (amdgpu_sriov_vf(adev))
1129 		return 0;
1130 
1131 	if (!amdgpu_rebar)
1132 		return 0;
1133 
1134 	/* resizing on Dell G5 SE platforms causes problems with runtime pm */
1135 	if ((amdgpu_runtime_pm != 0) &&
1136 	    adev->pdev->vendor == PCI_VENDOR_ID_ATI &&
1137 	    adev->pdev->device == 0x731f &&
1138 	    adev->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
1139 		return 0;
1140 
1141 	/* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
1142 	if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
1143 		dev_warn(
1144 			adev->dev,
1145 			"System can't access extended configuration space, please check!!\n");
1146 
1147 	/* skip if the bios has already enabled large BAR */
1148 	if (adev->gmc.real_vram_size &&
1149 	    (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1150 		return 0;
1151 
1152 	/* Check if the root BUS has 64bit memory resources */
1153 	root = adev->pdev->bus;
1154 	while (root->parent)
1155 		root = root->parent;
1156 
1157 	pci_bus_for_each_resource(root, res, i) {
1158 		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1159 		    res->start > 0x100000000ull)
1160 			break;
1161 	}
1162 
1163 	/* Trying to resize is pointless without a root hub window above 4GB */
1164 	if (!res)
1165 		return 0;
1166 
1167 	/* Limit the BAR size to what is available */
1168 	max_size = pci_rebar_get_max_size(adev->pdev, 0);
1169 	if (max_size < 0)
1170 		return 0;
1171 	rbar_size = min(max_size, rbar_size);
1172 
1173 	/* Disable memory decoding while we change the BAR addresses and size */
1174 	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1175 	pci_write_config_word(adev->pdev, PCI_COMMAND,
1176 			      cmd & ~PCI_COMMAND_MEMORY);
1177 
1178 	/* Tear down doorbell as resizing will release BARs */
1179 	amdgpu_doorbell_fini(adev);
1180 
1181 	r = pci_resize_resource(adev->pdev, 0, rbar_size,
1182 				(adev->asic_type >= CHIP_BONAIRE) ? 1 << 5
1183 								  : 1 << 2);
1184 	if (r == -ENOSPC)
1185 		dev_info(adev->dev,
1186 			 "Not enough PCI address space for a large BAR.");
1187 	else if (r && r != -ENOTSUPP)
1188 		dev_err(adev->dev, "Problem resizing BAR0 (%d).", r);
1189 
1190 	/* When the doorbell or fb BAR isn't available we have no chance of
1191 	 * using the device.
1192 	 */
1193 	r = amdgpu_doorbell_init(adev);
1194 	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1195 		return -ENODEV;
1196 
1197 	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1198 
1199 	return 0;
1200 }
1201 
1202 /*
1203  * GPU helpers function.
1204  */
1205 /**
1206  * amdgpu_device_need_post - check if the hw need post or not
1207  *
1208  * @adev: amdgpu_device pointer
1209  *
1210  * Check if the asic has been initialized (all asics) at driver startup
1211  * or post is needed if  hw reset is performed.
1212  * Returns true if need or false if not.
1213  */
1214 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1215 {
1216 	uint32_t reg, flags;
1217 
1218 	if (amdgpu_sriov_vf(adev))
1219 		return false;
1220 
1221 	flags = amdgpu_device_get_vbios_flags(adev);
1222 	if (flags & AMDGPU_VBIOS_SKIP)
1223 		return false;
1224 	if ((flags & AMDGPU_VBIOS_OPTIONAL) && !adev->bios)
1225 		return false;
1226 
1227 	if (amdgpu_passthrough(adev)) {
1228 		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1229 		 * some old smc fw still need driver do vPost otherwise gpu hang, while
1230 		 * those smc fw version above 22.15 doesn't have this flaw, so we force
1231 		 * vpost executed for smc version below 22.15
1232 		 */
1233 		if (adev->asic_type == CHIP_FIJI) {
1234 			int err;
1235 			uint32_t fw_ver;
1236 
1237 			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1238 			/* force vPost if error occurred */
1239 			if (err)
1240 				return true;
1241 
1242 			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1243 			release_firmware(adev->pm.fw);
1244 			if (fw_ver < 0x00160e00)
1245 				return true;
1246 		}
1247 	}
1248 
1249 	/* Don't post if we need to reset whole hive on init */
1250 	if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
1251 		return false;
1252 
1253 	if (adev->has_hw_reset) {
1254 		adev->has_hw_reset = false;
1255 		return true;
1256 	}
1257 
1258 	/* bios scratch used on CIK+ */
1259 	if (adev->asic_type >= CHIP_BONAIRE)
1260 		return amdgpu_atombios_scratch_need_asic_init(adev);
1261 
1262 	/* check MEM_SIZE for older asics */
1263 	reg = amdgpu_asic_get_config_memsize(adev);
1264 
1265 	if ((reg != 0) && (reg != 0xffffffff))
1266 		return false;
1267 
1268 	return true;
1269 }
1270 
1271 /*
1272  * Check whether seamless boot is supported.
1273  *
1274  * So far we only support seamless boot on DCE 3.0 or later.
1275  * If users report that it works on older ASICS as well, we may
1276  * loosen this.
1277  */
1278 bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev)
1279 {
1280 	switch (amdgpu_seamless) {
1281 	case -1:
1282 		break;
1283 	case 1:
1284 		return true;
1285 	case 0:
1286 		return false;
1287 	default:
1288 		dev_err(adev->dev, "Invalid value for amdgpu.seamless: %d\n",
1289 			amdgpu_seamless);
1290 		return false;
1291 	}
1292 
1293 	if (!(adev->flags & AMD_IS_APU))
1294 		return false;
1295 
1296 	if (adev->mman.keep_stolen_vga_memory)
1297 		return false;
1298 
1299 	return amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0);
1300 }
1301 
1302 /*
1303  * Intel hosts such as Rocket Lake, Alder Lake, Raptor Lake and Sapphire Rapids
1304  * don't support dynamic speed switching. Until we have confirmation from Intel
1305  * that a specific host supports it, it's safer that we keep it disabled for all.
1306  *
1307  * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
1308  * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
1309  */
1310 static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device *adev)
1311 {
1312 #if IS_ENABLED(CONFIG_X86)
1313 	struct cpuinfo_x86 *c = &cpu_data(0);
1314 
1315 	/* eGPU change speeds based on USB4 fabric conditions */
1316 	if (dev_is_removable(adev->dev))
1317 		return true;
1318 
1319 	if (c->x86_vendor == X86_VENDOR_INTEL)
1320 		return false;
1321 #endif
1322 	return true;
1323 }
1324 
1325 static bool amdgpu_device_aspm_support_quirk(struct amdgpu_device *adev)
1326 {
1327 	/* Enabling ASPM causes randoms hangs on Tahiti and Oland on Zen4.
1328 	 * It's unclear if this is a platform-specific or GPU-specific issue.
1329 	 * Disable ASPM on SI for the time being.
1330 	 */
1331 	if (adev->family == AMDGPU_FAMILY_SI)
1332 		return true;
1333 
1334 #if IS_ENABLED(CONFIG_X86)
1335 	struct cpuinfo_x86 *c = &cpu_data(0);
1336 
1337 	if (!(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 0, 0) ||
1338 		  amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 0, 1)))
1339 		return false;
1340 
1341 	if (c->x86 == 6 &&
1342 		adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5) {
1343 		switch (c->x86_model) {
1344 		case VFM_MODEL(INTEL_ALDERLAKE):
1345 		case VFM_MODEL(INTEL_ALDERLAKE_L):
1346 		case VFM_MODEL(INTEL_RAPTORLAKE):
1347 		case VFM_MODEL(INTEL_RAPTORLAKE_P):
1348 		case VFM_MODEL(INTEL_RAPTORLAKE_S):
1349 			return true;
1350 		default:
1351 			return false;
1352 		}
1353 	} else {
1354 		return false;
1355 	}
1356 #else
1357 	return false;
1358 #endif
1359 }
1360 
1361 /**
1362  * amdgpu_device_should_use_aspm - check if the device should program ASPM
1363  *
1364  * @adev: amdgpu_device pointer
1365  *
1366  * Confirm whether the module parameter and pcie bridge agree that ASPM should
1367  * be set for this device.
1368  *
1369  * Returns true if it should be used or false if not.
1370  */
1371 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1372 {
1373 	switch (amdgpu_aspm) {
1374 	case -1:
1375 		break;
1376 	case 0:
1377 		return false;
1378 	case 1:
1379 		return true;
1380 	default:
1381 		return false;
1382 	}
1383 	if (adev->flags & AMD_IS_APU)
1384 		return false;
1385 	if (amdgpu_device_aspm_support_quirk(adev))
1386 		return false;
1387 	return pcie_aspm_enabled(adev->pdev);
1388 }
1389 
1390 /* if we get transitioned to only one device, take VGA back */
1391 /**
1392  * amdgpu_device_vga_set_decode - enable/disable vga decode
1393  *
1394  * @pdev: PCI device pointer
1395  * @state: enable/disable vga decode
1396  *
1397  * Enable/disable vga decode (all asics).
1398  * Returns VGA resource flags.
1399  */
1400 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1401 		bool state)
1402 {
1403 	struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1404 
1405 	amdgpu_asic_set_vga_state(adev, state);
1406 	if (state)
1407 		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1408 		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1409 	else
1410 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1411 }
1412 
1413 /**
1414  * amdgpu_device_check_block_size - validate the vm block size
1415  *
1416  * @adev: amdgpu_device pointer
1417  *
1418  * Validates the vm block size specified via module parameter.
1419  * The vm block size defines number of bits in page table versus page directory,
1420  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1421  * page table and the remaining bits are in the page directory.
1422  */
1423 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1424 {
1425 	/* defines number of bits in page table versus page directory,
1426 	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1427 	 * page table and the remaining bits are in the page directory
1428 	 */
1429 	if (amdgpu_vm_block_size == -1)
1430 		return;
1431 
1432 	if (amdgpu_vm_block_size < 9) {
1433 		dev_warn(adev->dev, "VM page table size (%d) too small\n",
1434 			 amdgpu_vm_block_size);
1435 		amdgpu_vm_block_size = -1;
1436 	}
1437 }
1438 
1439 /**
1440  * amdgpu_device_check_vm_size - validate the vm size
1441  *
1442  * @adev: amdgpu_device pointer
1443  *
1444  * Validates the vm size in GB specified via module parameter.
1445  * The VM size is the size of the GPU virtual memory space in GB.
1446  */
1447 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1448 {
1449 	/* no need to check the default value */
1450 	if (amdgpu_vm_size == -1)
1451 		return;
1452 
1453 	if (amdgpu_vm_size < 1) {
1454 		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1455 			 amdgpu_vm_size);
1456 		amdgpu_vm_size = -1;
1457 	}
1458 }
1459 
1460 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1461 {
1462 	struct sysinfo si;
1463 	bool is_os_64 = (sizeof(void *) == 8);
1464 	uint64_t total_memory;
1465 	uint64_t dram_size_seven_GB = 0x1B8000000;
1466 	uint64_t dram_size_three_GB = 0xB8000000;
1467 
1468 	if (amdgpu_smu_memory_pool_size == 0)
1469 		return;
1470 
1471 	if (!is_os_64) {
1472 		dev_warn(adev->dev, "Not 64-bit OS, feature not supported\n");
1473 		goto def_value;
1474 	}
1475 	si_meminfo(&si);
1476 	total_memory = (uint64_t)si.totalram * si.mem_unit;
1477 
1478 	if ((amdgpu_smu_memory_pool_size == 1) ||
1479 		(amdgpu_smu_memory_pool_size == 2)) {
1480 		if (total_memory < dram_size_three_GB)
1481 			goto def_value1;
1482 	} else if ((amdgpu_smu_memory_pool_size == 4) ||
1483 		(amdgpu_smu_memory_pool_size == 8)) {
1484 		if (total_memory < dram_size_seven_GB)
1485 			goto def_value1;
1486 	} else {
1487 		dev_warn(adev->dev, "Smu memory pool size not supported\n");
1488 		goto def_value;
1489 	}
1490 	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1491 
1492 	return;
1493 
1494 def_value1:
1495 	dev_warn(adev->dev, "No enough system memory\n");
1496 def_value:
1497 	adev->pm.smu_prv_buffer_size = 0;
1498 }
1499 
1500 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1501 {
1502 	if (!(adev->flags & AMD_IS_APU) ||
1503 	    adev->asic_type < CHIP_RAVEN)
1504 		return 0;
1505 
1506 	switch (adev->asic_type) {
1507 	case CHIP_RAVEN:
1508 		if (adev->pdev->device == 0x15dd)
1509 			adev->apu_flags |= AMD_APU_IS_RAVEN;
1510 		if (adev->pdev->device == 0x15d8)
1511 			adev->apu_flags |= AMD_APU_IS_PICASSO;
1512 		break;
1513 	case CHIP_RENOIR:
1514 		if ((adev->pdev->device == 0x1636) ||
1515 		    (adev->pdev->device == 0x164c))
1516 			adev->apu_flags |= AMD_APU_IS_RENOIR;
1517 		else
1518 			adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1519 		break;
1520 	case CHIP_VANGOGH:
1521 		adev->apu_flags |= AMD_APU_IS_VANGOGH;
1522 		break;
1523 	case CHIP_YELLOW_CARP:
1524 		break;
1525 	case CHIP_CYAN_SKILLFISH:
1526 		if ((adev->pdev->device == 0x13FE) ||
1527 		    (adev->pdev->device == 0x143F))
1528 			adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1529 		break;
1530 	default:
1531 		break;
1532 	}
1533 
1534 	return 0;
1535 }
1536 
1537 /**
1538  * amdgpu_device_check_arguments - validate module params
1539  *
1540  * @adev: amdgpu_device pointer
1541  *
1542  * Validates certain module parameters and updates
1543  * the associated values used by the driver (all asics).
1544  */
1545 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1546 {
1547 	int i;
1548 
1549 	if (amdgpu_sched_jobs < 4) {
1550 		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1551 			 amdgpu_sched_jobs);
1552 		amdgpu_sched_jobs = 4;
1553 	} else if (!is_power_of_2(amdgpu_sched_jobs)) {
1554 		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1555 			 amdgpu_sched_jobs);
1556 		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1557 	}
1558 
1559 	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1560 		/* gart size must be greater or equal to 32M */
1561 		dev_warn(adev->dev, "gart size (%d) too small\n",
1562 			 amdgpu_gart_size);
1563 		amdgpu_gart_size = -1;
1564 	}
1565 
1566 	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1567 		/* gtt size must be greater or equal to 32M */
1568 		dev_warn(adev->dev, "gtt size (%d) too small\n",
1569 				 amdgpu_gtt_size);
1570 		amdgpu_gtt_size = -1;
1571 	}
1572 
1573 	/* valid range is between 4 and 9 inclusive */
1574 	if (amdgpu_vm_fragment_size != -1 &&
1575 	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1576 		dev_warn(adev->dev, "valid range is between 4 and 9\n");
1577 		amdgpu_vm_fragment_size = -1;
1578 	}
1579 
1580 	if (amdgpu_sched_hw_submission < 2) {
1581 		dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1582 			 amdgpu_sched_hw_submission);
1583 		amdgpu_sched_hw_submission = 2;
1584 	} else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1585 		dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1586 			 amdgpu_sched_hw_submission);
1587 		amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1588 	}
1589 
1590 	if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1591 		dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1592 		amdgpu_reset_method = -1;
1593 	}
1594 
1595 	amdgpu_device_check_smu_prv_buffer_size(adev);
1596 
1597 	amdgpu_device_check_vm_size(adev);
1598 
1599 	amdgpu_device_check_block_size(adev);
1600 
1601 	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1602 
1603 	for (i = 0; i < MAX_XCP; i++) {
1604 		switch (amdgpu_enforce_isolation) {
1605 		case -1:
1606 		case 0:
1607 		default:
1608 			/* disable */
1609 			adev->enforce_isolation[i] = AMDGPU_ENFORCE_ISOLATION_DISABLE;
1610 			break;
1611 		case 1:
1612 			/* enable */
1613 			adev->enforce_isolation[i] =
1614 				AMDGPU_ENFORCE_ISOLATION_ENABLE;
1615 			break;
1616 		case 2:
1617 			/* enable legacy mode */
1618 			adev->enforce_isolation[i] =
1619 				AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY;
1620 			break;
1621 		case 3:
1622 			/* enable only process isolation without submitting cleaner shader */
1623 			adev->enforce_isolation[i] =
1624 				AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER;
1625 			break;
1626 		}
1627 	}
1628 
1629 	return 0;
1630 }
1631 
1632 /**
1633  * amdgpu_switcheroo_set_state - set switcheroo state
1634  *
1635  * @pdev: pci dev pointer
1636  * @state: vga_switcheroo state
1637  *
1638  * Callback for the switcheroo driver.  Suspends or resumes
1639  * the asics before or after it is powered up using ACPI methods.
1640  */
1641 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1642 					enum vga_switcheroo_state state)
1643 {
1644 	struct drm_device *dev = pci_get_drvdata(pdev);
1645 	int r;
1646 
1647 	if (amdgpu_device_supports_px(drm_to_adev(dev)) &&
1648 	    state == VGA_SWITCHEROO_OFF)
1649 		return;
1650 
1651 	if (state == VGA_SWITCHEROO_ON) {
1652 		pr_info("switched on\n");
1653 		/* don't suspend or resume card normally */
1654 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1655 
1656 		pci_set_power_state(pdev, PCI_D0);
1657 		amdgpu_device_load_pci_state(pdev);
1658 		r = pci_enable_device(pdev);
1659 		if (r)
1660 			dev_warn(&pdev->dev, "pci_enable_device failed (%d)\n",
1661 				 r);
1662 		amdgpu_device_resume(dev, true);
1663 
1664 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
1665 	} else {
1666 		dev_info(&pdev->dev, "switched off\n");
1667 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1668 		amdgpu_device_prepare(dev);
1669 		amdgpu_device_suspend(dev, true);
1670 		amdgpu_device_cache_pci_state(pdev);
1671 		/* Shut down the device */
1672 		pci_disable_device(pdev);
1673 		pci_set_power_state(pdev, PCI_D3cold);
1674 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1675 	}
1676 }
1677 
1678 /**
1679  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1680  *
1681  * @pdev: pci dev pointer
1682  *
1683  * Callback for the switcheroo driver.  Check of the switcheroo
1684  * state can be changed.
1685  * Returns true if the state can be changed, false if not.
1686  */
1687 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1688 {
1689 	struct drm_device *dev = pci_get_drvdata(pdev);
1690 
1691        /*
1692 	* FIXME: open_count is protected by drm_global_mutex but that would lead to
1693 	* locking inversion with the driver load path. And the access here is
1694 	* completely racy anyway. So don't bother with locking for now.
1695 	*/
1696 	return atomic_read(&dev->open_count) == 0;
1697 }
1698 
1699 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1700 	.set_gpu_state = amdgpu_switcheroo_set_state,
1701 	.reprobe = NULL,
1702 	.can_switch = amdgpu_switcheroo_can_switch,
1703 };
1704 
1705 /**
1706  * amdgpu_device_enable_virtual_display - enable virtual display feature
1707  *
1708  * @adev: amdgpu_device pointer
1709  *
1710  * Enabled the virtual display feature if the user has enabled it via
1711  * the module parameter virtual_display.  This feature provides a virtual
1712  * display hardware on headless boards or in virtualized environments.
1713  * This function parses and validates the configuration string specified by
1714  * the user and configures the virtual display configuration (number of
1715  * virtual connectors, crtcs, etc.) specified.
1716  */
1717 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1718 {
1719 	adev->enable_virtual_display = false;
1720 
1721 	if (amdgpu_virtual_display) {
1722 		const char *pci_address_name = pci_name(adev->pdev);
1723 		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1724 
1725 		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1726 		pciaddstr_tmp = pciaddstr;
1727 		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1728 			pciaddname = strsep(&pciaddname_tmp, ",");
1729 			if (!strcmp("all", pciaddname)
1730 			    || !strcmp(pci_address_name, pciaddname)) {
1731 				long num_crtc;
1732 				int res = -1;
1733 
1734 				adev->enable_virtual_display = true;
1735 
1736 				if (pciaddname_tmp)
1737 					res = kstrtol(pciaddname_tmp, 10,
1738 						      &num_crtc);
1739 
1740 				if (!res) {
1741 					if (num_crtc < 1)
1742 						num_crtc = 1;
1743 					if (num_crtc > 6)
1744 						num_crtc = 6;
1745 					adev->mode_info.num_crtc = num_crtc;
1746 				} else {
1747 					adev->mode_info.num_crtc = 1;
1748 				}
1749 				break;
1750 			}
1751 		}
1752 
1753 		dev_info(
1754 			adev->dev,
1755 			"virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1756 			amdgpu_virtual_display, pci_address_name,
1757 			adev->enable_virtual_display, adev->mode_info.num_crtc);
1758 
1759 		kfree(pciaddstr);
1760 	}
1761 }
1762 
1763 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
1764 {
1765 	if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
1766 		adev->mode_info.num_crtc = 1;
1767 		adev->enable_virtual_display = true;
1768 		dev_info(adev->dev, "virtual_display:%d, num_crtc:%d\n",
1769 			 adev->enable_virtual_display,
1770 			 adev->mode_info.num_crtc);
1771 	}
1772 }
1773 
1774 /**
1775  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1776  *
1777  * @adev: amdgpu_device pointer
1778  *
1779  * Parses the asic configuration parameters specified in the gpu info
1780  * firmware and makes them available to the driver for use in configuring
1781  * the asic.
1782  * Returns 0 on success, -EINVAL on failure.
1783  */
1784 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1785 {
1786 	const char *chip_name;
1787 	int err;
1788 	const struct gpu_info_firmware_header_v1_0 *hdr;
1789 
1790 	adev->firmware.gpu_info_fw = NULL;
1791 
1792 	switch (adev->asic_type) {
1793 	default:
1794 		return 0;
1795 	case CHIP_VEGA10:
1796 		chip_name = "vega10";
1797 		break;
1798 	case CHIP_VEGA12:
1799 		chip_name = "vega12";
1800 		break;
1801 	case CHIP_RAVEN:
1802 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1803 			chip_name = "raven2";
1804 		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1805 			chip_name = "picasso";
1806 		else
1807 			chip_name = "raven";
1808 		break;
1809 	case CHIP_ARCTURUS:
1810 		chip_name = "arcturus";
1811 		break;
1812 	case CHIP_NAVI12:
1813 		if (adev->discovery.bin)
1814 			return 0;
1815 		chip_name = "navi12";
1816 		break;
1817 	case CHIP_CYAN_SKILLFISH:
1818 		if (adev->discovery.bin)
1819 			return 0;
1820 		chip_name = "cyan_skillfish";
1821 		break;
1822 	}
1823 
1824 	err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw,
1825 				   AMDGPU_UCODE_OPTIONAL,
1826 				   "amdgpu/%s_gpu_info.bin", chip_name);
1827 	if (err) {
1828 		dev_err(adev->dev,
1829 			"Failed to get gpu_info firmware \"%s_gpu_info.bin\"\n",
1830 			chip_name);
1831 		goto out;
1832 	}
1833 
1834 	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1835 	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1836 
1837 	switch (hdr->version_major) {
1838 	case 1:
1839 	{
1840 		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1841 			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1842 								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1843 
1844 		/*
1845 		 * Should be dropped when DAL no longer needs it.
1846 		 */
1847 		if (adev->asic_type == CHIP_NAVI12)
1848 			goto parse_soc_bounding_box;
1849 
1850 		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1851 		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1852 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1853 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1854 		adev->gfx.config.max_texture_channel_caches =
1855 			le32_to_cpu(gpu_info_fw->gc_num_tccs);
1856 		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1857 		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1858 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1859 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1860 		adev->gfx.config.double_offchip_lds_buf =
1861 			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1862 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1863 		adev->gfx.cu_info.max_waves_per_simd =
1864 			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1865 		adev->gfx.cu_info.max_scratch_slots_per_cu =
1866 			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1867 		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1868 		if (hdr->version_minor >= 1) {
1869 			const struct gpu_info_firmware_v1_1 *gpu_info_fw =
1870 				(const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
1871 									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1872 			adev->gfx.config.num_sc_per_sh =
1873 				le32_to_cpu(gpu_info_fw->num_sc_per_sh);
1874 			adev->gfx.config.num_packer_per_sc =
1875 				le32_to_cpu(gpu_info_fw->num_packer_per_sc);
1876 		}
1877 
1878 parse_soc_bounding_box:
1879 		/*
1880 		 * soc bounding box info is not integrated in disocovery table,
1881 		 * we always need to parse it from gpu info firmware if needed.
1882 		 */
1883 		if (hdr->version_minor == 2) {
1884 			const struct gpu_info_firmware_v1_2 *gpu_info_fw =
1885 				(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
1886 									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1887 			adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
1888 		}
1889 		break;
1890 	}
1891 	default:
1892 		dev_err(adev->dev,
1893 			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1894 		err = -EINVAL;
1895 		goto out;
1896 	}
1897 out:
1898 	return err;
1899 }
1900 
1901 static void amdgpu_uid_init(struct amdgpu_device *adev)
1902 {
1903 	/* Initialize the UID for the device */
1904 	adev->uid_info = kzalloc_obj(struct amdgpu_uid);
1905 	if (!adev->uid_info) {
1906 		dev_warn(adev->dev, "Failed to allocate memory for UID\n");
1907 		return;
1908 	}
1909 	adev->uid_info->adev = adev;
1910 }
1911 
1912 static void amdgpu_uid_fini(struct amdgpu_device *adev)
1913 {
1914 	/* Free the UID memory */
1915 	kfree(adev->uid_info);
1916 	adev->uid_info = NULL;
1917 }
1918 
1919 /**
1920  * amdgpu_device_ip_early_init - run early init for hardware IPs
1921  *
1922  * @adev: amdgpu_device pointer
1923  *
1924  * Early initialization pass for hardware IPs.  The hardware IPs that make
1925  * up each asic are discovered each IP's early_init callback is run.  This
1926  * is the first stage in initializing the asic.
1927  * Returns 0 on success, negative error code on failure.
1928  */
1929 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1930 {
1931 	struct amdgpu_ip_block *ip_block;
1932 	struct pci_dev *parent;
1933 	bool total, skip_bios;
1934 	uint32_t bios_flags;
1935 	int i, r;
1936 
1937 	amdgpu_device_enable_virtual_display(adev);
1938 
1939 	if (amdgpu_sriov_vf(adev)) {
1940 		r = amdgpu_virt_request_full_gpu(adev, true);
1941 		if (r)
1942 			return r;
1943 
1944 		r = amdgpu_virt_init_critical_region(adev);
1945 		if (r)
1946 			return r;
1947 	}
1948 
1949 	switch (adev->asic_type) {
1950 #ifdef CONFIG_DRM_AMDGPU_SI
1951 	case CHIP_VERDE:
1952 	case CHIP_TAHITI:
1953 	case CHIP_PITCAIRN:
1954 	case CHIP_OLAND:
1955 	case CHIP_HAINAN:
1956 		adev->family = AMDGPU_FAMILY_SI;
1957 		r = si_set_ip_blocks(adev);
1958 		if (r)
1959 			return r;
1960 		break;
1961 #endif
1962 #ifdef CONFIG_DRM_AMDGPU_CIK
1963 	case CHIP_BONAIRE:
1964 	case CHIP_HAWAII:
1965 	case CHIP_KAVERI:
1966 	case CHIP_KABINI:
1967 	case CHIP_MULLINS:
1968 		if (adev->flags & AMD_IS_APU)
1969 			adev->family = AMDGPU_FAMILY_KV;
1970 		else
1971 			adev->family = AMDGPU_FAMILY_CI;
1972 
1973 		r = cik_set_ip_blocks(adev);
1974 		if (r)
1975 			return r;
1976 		break;
1977 #endif
1978 	case CHIP_TOPAZ:
1979 	case CHIP_TONGA:
1980 	case CHIP_FIJI:
1981 	case CHIP_POLARIS10:
1982 	case CHIP_POLARIS11:
1983 	case CHIP_POLARIS12:
1984 	case CHIP_VEGAM:
1985 	case CHIP_CARRIZO:
1986 	case CHIP_STONEY:
1987 		if (adev->flags & AMD_IS_APU)
1988 			adev->family = AMDGPU_FAMILY_CZ;
1989 		else
1990 			adev->family = AMDGPU_FAMILY_VI;
1991 
1992 		r = vi_set_ip_blocks(adev);
1993 		if (r)
1994 			return r;
1995 		break;
1996 	default:
1997 		r = amdgpu_discovery_set_ip_blocks(adev);
1998 		if (r) {
1999 			adev->num_ip_blocks = 0;
2000 			return r;
2001 		}
2002 		break;
2003 	}
2004 
2005 	/* Check for IP version 9.4.3 with A0 hardware */
2006 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
2007 	    !amdgpu_device_get_rev_id(adev)) {
2008 		dev_err(adev->dev, "Unsupported A0 hardware\n");
2009 		return -ENODEV;	/* device unsupported - no device error */
2010 	}
2011 
2012 	if (amdgpu_has_atpx() &&
2013 	    (amdgpu_is_atpx_hybrid() ||
2014 	     amdgpu_has_atpx_dgpu_power_cntl()) &&
2015 	    ((adev->flags & AMD_IS_APU) == 0) &&
2016 	    !dev_is_removable(&adev->pdev->dev))
2017 		adev->flags |= AMD_IS_PX;
2018 
2019 	if (!(adev->flags & AMD_IS_APU)) {
2020 		parent = pcie_find_root_port(adev->pdev);
2021 		adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2022 	}
2023 
2024 	adev->pm.pp_feature = amdgpu_pp_feature_mask;
2025 	if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2026 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2027 	if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2028 		adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2029 	if (!amdgpu_device_pcie_dynamic_switching_supported(adev))
2030 		adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
2031 
2032 	adev->virt.is_xgmi_node_migrate_enabled = false;
2033 	if (amdgpu_sriov_vf(adev)) {
2034 		adev->virt.is_xgmi_node_migrate_enabled =
2035 			amdgpu_ip_version((adev), GC_HWIP, 0) == IP_VERSION(9, 4, 4);
2036 	}
2037 
2038 	total = true;
2039 	for (i = 0; i < adev->num_ip_blocks; i++) {
2040 		ip_block = &adev->ip_blocks[i];
2041 
2042 		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2043 			dev_warn(adev->dev, "disabled ip block: %d <%s>\n", i,
2044 				 adev->ip_blocks[i].version->funcs->name);
2045 			adev->ip_blocks[i].status.valid = false;
2046 		} else if (ip_block->version->funcs->early_init) {
2047 			r = ip_block->version->funcs->early_init(ip_block);
2048 			if (r == -ENOENT) {
2049 				adev->ip_blocks[i].status.valid = false;
2050 			} else if (r) {
2051 				dev_err(adev->dev,
2052 					"early_init of IP block <%s> failed %d\n",
2053 					adev->ip_blocks[i].version->funcs->name,
2054 					r);
2055 				total = false;
2056 			} else {
2057 				adev->ip_blocks[i].status.valid = true;
2058 			}
2059 		} else {
2060 			adev->ip_blocks[i].status.valid = true;
2061 		}
2062 		/* get the vbios after the asic_funcs are set up */
2063 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2064 			r = amdgpu_device_parse_gpu_info_fw(adev);
2065 			if (r)
2066 				return r;
2067 
2068 			bios_flags = amdgpu_device_get_vbios_flags(adev);
2069 			skip_bios = !!(bios_flags & AMDGPU_VBIOS_SKIP);
2070 			/* Read BIOS */
2071 			if (!skip_bios) {
2072 				bool optional =
2073 					!!(bios_flags & AMDGPU_VBIOS_OPTIONAL);
2074 				if (!amdgpu_get_bios(adev) && !optional)
2075 					return -EINVAL;
2076 
2077 				if (optional && !adev->bios)
2078 					dev_info(
2079 						adev->dev,
2080 						"VBIOS image optional, proceeding without VBIOS image");
2081 
2082 				if (adev->bios) {
2083 					r = amdgpu_atombios_init(adev);
2084 					if (r) {
2085 						dev_err(adev->dev,
2086 							"amdgpu_atombios_init failed\n");
2087 						amdgpu_vf_error_put(
2088 							adev,
2089 							AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL,
2090 							0, 0);
2091 						return r;
2092 					}
2093 				}
2094 			}
2095 
2096 			/*get pf2vf msg info at it's earliest time*/
2097 			if (amdgpu_sriov_vf(adev))
2098 				amdgpu_virt_init_data_exchange(adev);
2099 
2100 		}
2101 	}
2102 	if (!total)
2103 		return -ENODEV;
2104 
2105 	if (adev->gmc.xgmi.supported)
2106 		amdgpu_xgmi_early_init(adev);
2107 
2108 	if (amdgpu_is_multi_aid(adev))
2109 		amdgpu_uid_init(adev);
2110 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
2111 	if (ip_block->status.valid != false)
2112 		amdgpu_amdkfd_device_probe(adev);
2113 
2114 	adev->cg_flags &= amdgpu_cg_mask;
2115 	adev->pg_flags &= amdgpu_pg_mask;
2116 
2117 	return 0;
2118 }
2119 
2120 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2121 {
2122 	int i, r;
2123 
2124 	for (i = 0; i < adev->num_ip_blocks; i++) {
2125 		if (!adev->ip_blocks[i].status.sw)
2126 			continue;
2127 		if (adev->ip_blocks[i].status.hw)
2128 			continue;
2129 		if (!amdgpu_ip_member_of_hwini(
2130 			    adev, adev->ip_blocks[i].version->type))
2131 			continue;
2132 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2133 		    (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2134 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2135 			r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
2136 			if (r) {
2137 				dev_err(adev->dev,
2138 					"hw_init of IP block <%s> failed %d\n",
2139 					adev->ip_blocks[i].version->funcs->name,
2140 					r);
2141 				return r;
2142 			}
2143 			adev->ip_blocks[i].status.hw = true;
2144 		}
2145 	}
2146 
2147 	return 0;
2148 }
2149 
2150 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2151 {
2152 	int i, r;
2153 
2154 	for (i = 0; i < adev->num_ip_blocks; i++) {
2155 		if (!adev->ip_blocks[i].status.sw)
2156 			continue;
2157 		if (adev->ip_blocks[i].status.hw)
2158 			continue;
2159 		if (!amdgpu_ip_member_of_hwini(
2160 			    adev, adev->ip_blocks[i].version->type))
2161 			continue;
2162 		r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
2163 		if (r) {
2164 			dev_err(adev->dev,
2165 				"hw_init of IP block <%s> failed %d\n",
2166 				adev->ip_blocks[i].version->funcs->name, r);
2167 			return r;
2168 		}
2169 		adev->ip_blocks[i].status.hw = true;
2170 	}
2171 
2172 	return 0;
2173 }
2174 
2175 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2176 {
2177 	int r = 0;
2178 	int i;
2179 	uint32_t smu_version;
2180 
2181 	if (adev->asic_type >= CHIP_VEGA10) {
2182 		for (i = 0; i < adev->num_ip_blocks; i++) {
2183 			if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2184 				continue;
2185 
2186 			if (!amdgpu_ip_member_of_hwini(adev,
2187 						       AMD_IP_BLOCK_TYPE_PSP))
2188 				break;
2189 
2190 			if (!adev->ip_blocks[i].status.sw)
2191 				continue;
2192 
2193 			/* no need to do the fw loading again if already done*/
2194 			if (adev->ip_blocks[i].status.hw == true)
2195 				break;
2196 
2197 			if (amdgpu_in_reset(adev) || adev->in_suspend) {
2198 				r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
2199 				if (r)
2200 					return r;
2201 			} else {
2202 				r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
2203 				if (r) {
2204 					dev_err(adev->dev,
2205 						"hw_init of IP block <%s> failed %d\n",
2206 						adev->ip_blocks[i]
2207 							.version->funcs->name,
2208 						r);
2209 					return r;
2210 				}
2211 				adev->ip_blocks[i].status.hw = true;
2212 			}
2213 			break;
2214 		}
2215 	}
2216 
2217 	if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2218 		r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2219 
2220 	return r;
2221 }
2222 
2223 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2224 {
2225 	struct drm_sched_init_args args = {
2226 		.ops = &amdgpu_sched_ops,
2227 		.num_rqs = DRM_SCHED_PRIORITY_COUNT,
2228 		.timeout_wq = adev->reset_domain->wq,
2229 		.dev = adev->dev,
2230 	};
2231 	long timeout;
2232 	int r, i;
2233 
2234 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2235 		struct amdgpu_ring *ring = adev->rings[i];
2236 
2237 		/* No need to setup the GPU scheduler for rings that don't need it */
2238 		if (!ring || ring->no_scheduler)
2239 			continue;
2240 
2241 		switch (ring->funcs->type) {
2242 		case AMDGPU_RING_TYPE_GFX:
2243 			timeout = adev->gfx_timeout;
2244 			break;
2245 		case AMDGPU_RING_TYPE_COMPUTE:
2246 			timeout = adev->compute_timeout;
2247 			break;
2248 		case AMDGPU_RING_TYPE_SDMA:
2249 			timeout = adev->sdma_timeout;
2250 			break;
2251 		default:
2252 			timeout = adev->video_timeout;
2253 			break;
2254 		}
2255 
2256 		args.timeout = timeout;
2257 		args.credit_limit = ring->num_hw_submission;
2258 		args.score = ring->sched_score;
2259 		args.name = ring->name;
2260 
2261 		r = drm_sched_init(&ring->sched, &args);
2262 		if (r) {
2263 			dev_err(adev->dev,
2264 				"Failed to create scheduler on ring %s.\n",
2265 				ring->name);
2266 			return r;
2267 		}
2268 		r = amdgpu_uvd_entity_init(adev, ring);
2269 		if (r) {
2270 			dev_err(adev->dev,
2271 				"Failed to create UVD scheduling entity on ring %s.\n",
2272 				ring->name);
2273 			return r;
2274 		}
2275 		r = amdgpu_vce_entity_init(adev, ring);
2276 		if (r) {
2277 			dev_err(adev->dev,
2278 				"Failed to create VCE scheduling entity on ring %s.\n",
2279 				ring->name);
2280 			return r;
2281 		}
2282 	}
2283 
2284 	if (adev->xcp_mgr)
2285 		amdgpu_xcp_update_partition_sched_list(adev);
2286 
2287 	return 0;
2288 }
2289 
2290 
2291 /**
2292  * amdgpu_device_ip_init - run init for hardware IPs
2293  *
2294  * @adev: amdgpu_device pointer
2295  *
2296  * Main initialization pass for hardware IPs.  The list of all the hardware
2297  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2298  * are run.  sw_init initializes the software state associated with each IP
2299  * and hw_init initializes the hardware associated with each IP.
2300  * Returns 0 on success, negative error code on failure.
2301  */
2302 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2303 {
2304 	bool init_badpage;
2305 	int i, r;
2306 
2307 	r = amdgpu_ras_init(adev);
2308 	if (r)
2309 		return r;
2310 
2311 	for (i = 0; i < adev->num_ip_blocks; i++) {
2312 		if (!adev->ip_blocks[i].status.valid)
2313 			continue;
2314 		if (adev->ip_blocks[i].version->funcs->sw_init) {
2315 			r = adev->ip_blocks[i].version->funcs->sw_init(&adev->ip_blocks[i]);
2316 			if (r) {
2317 				dev_err(adev->dev,
2318 					"sw_init of IP block <%s> failed %d\n",
2319 					adev->ip_blocks[i].version->funcs->name,
2320 					r);
2321 				goto init_failed;
2322 			}
2323 		}
2324 		adev->ip_blocks[i].status.sw = true;
2325 
2326 		if (!amdgpu_ip_member_of_hwini(
2327 			    adev, adev->ip_blocks[i].version->type))
2328 			continue;
2329 
2330 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2331 			/* need to do common hw init early so everything is set up for gmc */
2332 			r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
2333 			if (r) {
2334 				dev_err(adev->dev, "hw_init %d failed %d\n", i,
2335 					r);
2336 				goto init_failed;
2337 			}
2338 			adev->ip_blocks[i].status.hw = true;
2339 		} else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2340 			/* need to do gmc hw init early so we can allocate gpu mem */
2341 			/* Try to reserve bad pages early */
2342 			if (amdgpu_sriov_vf(adev))
2343 				amdgpu_virt_exchange_data(adev);
2344 
2345 			r = amdgpu_device_mem_scratch_init(adev);
2346 			if (r) {
2347 				dev_err(adev->dev,
2348 					"amdgpu_mem_scratch_init failed %d\n",
2349 					r);
2350 				goto init_failed;
2351 			}
2352 			r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
2353 			if (r) {
2354 				dev_err(adev->dev, "hw_init %d failed %d\n", i,
2355 					r);
2356 				goto init_failed;
2357 			}
2358 			r = amdgpu_device_wb_init(adev);
2359 			if (r) {
2360 				dev_err(adev->dev,
2361 					"amdgpu_device_wb_init failed %d\n", r);
2362 				goto init_failed;
2363 			}
2364 			adev->ip_blocks[i].status.hw = true;
2365 
2366 			/* right after GMC hw init, we create CSA */
2367 			if (adev->gfx.mcbp) {
2368 				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2369 							       AMDGPU_GEM_DOMAIN_VRAM |
2370 							       AMDGPU_GEM_DOMAIN_GTT,
2371 							       AMDGPU_CSA_SIZE);
2372 				if (r) {
2373 					dev_err(adev->dev,
2374 						"allocate CSA failed %d\n", r);
2375 					goto init_failed;
2376 				}
2377 			}
2378 
2379 			r = amdgpu_seq64_init(adev);
2380 			if (r) {
2381 				dev_err(adev->dev, "allocate seq64 failed %d\n",
2382 					r);
2383 				goto init_failed;
2384 			}
2385 		}
2386 	}
2387 
2388 	if (amdgpu_sriov_vf(adev))
2389 		amdgpu_virt_init_data_exchange(adev);
2390 
2391 	r = amdgpu_ib_pool_init(adev);
2392 	if (r) {
2393 		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2394 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2395 		goto init_failed;
2396 	}
2397 
2398 	r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2399 	if (r)
2400 		goto init_failed;
2401 
2402 	r = amdgpu_device_ip_hw_init_phase1(adev);
2403 	if (r)
2404 		goto init_failed;
2405 
2406 	r = amdgpu_device_fw_loading(adev);
2407 	if (r)
2408 		goto init_failed;
2409 
2410 	r = amdgpu_device_ip_hw_init_phase2(adev);
2411 	if (r)
2412 		goto init_failed;
2413 
2414 	/*
2415 	 * retired pages will be loaded from eeprom and reserved here,
2416 	 * it should be called after amdgpu_device_ip_hw_init_phase2  since
2417 	 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2418 	 * for I2C communication which only true at this point.
2419 	 *
2420 	 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2421 	 * failure from bad gpu situation and stop amdgpu init process
2422 	 * accordingly. For other failed cases, it will still release all
2423 	 * the resource and print error message, rather than returning one
2424 	 * negative value to upper level.
2425 	 *
2426 	 * Note: theoretically, this should be called before all vram allocations
2427 	 * to protect retired page from abusing
2428 	 */
2429 	init_badpage = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI);
2430 	r = amdgpu_ras_recovery_init(adev, init_badpage);
2431 	if (r)
2432 		goto init_failed;
2433 
2434 	/**
2435 	 * In case of XGMI grab extra reference for reset domain for this device
2436 	 */
2437 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2438 		if (amdgpu_xgmi_add_device(adev) == 0) {
2439 			if (!amdgpu_sriov_vf(adev)) {
2440 				struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2441 
2442 				if (WARN_ON(!hive)) {
2443 					r = -ENOENT;
2444 					goto init_failed;
2445 				}
2446 
2447 				if (!hive->reset_domain ||
2448 				    !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2449 					r = -ENOENT;
2450 					amdgpu_put_xgmi_hive(hive);
2451 					goto init_failed;
2452 				}
2453 
2454 				/* Drop the early temporary reset domain we created for device */
2455 				amdgpu_reset_put_reset_domain(adev->reset_domain);
2456 				adev->reset_domain = hive->reset_domain;
2457 				amdgpu_put_xgmi_hive(hive);
2458 			}
2459 		}
2460 	}
2461 
2462 	r = amdgpu_device_init_schedulers(adev);
2463 	if (r)
2464 		goto init_failed;
2465 
2466 	amdgpu_ttm_set_buffer_funcs_status(adev, true);
2467 
2468 	/* Don't init kfd if whole hive need to be reset during init */
2469 	if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
2470 		amdgpu_amdkfd_device_init(adev);
2471 	}
2472 
2473 	amdgpu_fru_get_product_info(adev);
2474 
2475 	r = amdgpu_cper_init(adev);
2476 
2477 init_failed:
2478 
2479 	return r;
2480 }
2481 
2482 /**
2483  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2484  *
2485  * @adev: amdgpu_device pointer
2486  *
2487  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2488  * this function before a GPU reset.  If the value is retained after a
2489  * GPU reset, VRAM has not been lost. Some GPU resets may destroy VRAM contents.
2490  */
2491 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2492 {
2493 	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2494 }
2495 
2496 /**
2497  * amdgpu_device_check_vram_lost - check if vram is valid
2498  *
2499  * @adev: amdgpu_device pointer
2500  *
2501  * Checks the reset magic value written to the gart pointer in VRAM.
2502  * The driver calls this after a GPU reset to see if the contents of
2503  * VRAM is lost or now.
2504  * returns true if vram is lost, false if not.
2505  */
2506 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2507 {
2508 	if (memcmp(adev->gart.ptr, adev->reset_magic,
2509 			AMDGPU_RESET_MAGIC_NUM))
2510 		return true;
2511 
2512 	if (!amdgpu_in_reset(adev))
2513 		return false;
2514 
2515 	/*
2516 	 * For all ASICs with baco/mode1 reset, the VRAM is
2517 	 * always assumed to be lost.
2518 	 */
2519 	switch (amdgpu_asic_reset_method(adev)) {
2520 	case AMD_RESET_METHOD_LEGACY:
2521 	case AMD_RESET_METHOD_LINK:
2522 	case AMD_RESET_METHOD_BACO:
2523 	case AMD_RESET_METHOD_MODE1:
2524 		return true;
2525 	default:
2526 		return false;
2527 	}
2528 }
2529 
2530 /**
2531  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2532  *
2533  * @adev: amdgpu_device pointer
2534  * @state: clockgating state (gate or ungate)
2535  *
2536  * The list of all the hardware IPs that make up the asic is walked and the
2537  * set_clockgating_state callbacks are run.
2538  * Late initialization pass enabling clockgating for hardware IPs.
2539  * Fini or suspend, pass disabling clockgating for hardware IPs.
2540  * Returns 0 on success, negative error code on failure.
2541  */
2542 
2543 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2544 			       enum amd_clockgating_state state)
2545 {
2546 	int i, j, r;
2547 
2548 	if (amdgpu_emu_mode == 1)
2549 		return 0;
2550 
2551 	for (j = 0; j < adev->num_ip_blocks; j++) {
2552 		i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2553 		if (!adev->ip_blocks[i].status.late_initialized)
2554 			continue;
2555 		if (!adev->ip_blocks[i].version)
2556 			continue;
2557 		/* skip CG for GFX, SDMA on S0ix */
2558 		if (adev->in_s0ix &&
2559 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2560 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2561 			continue;
2562 		/* skip CG for VCE/UVD, it's handled specially */
2563 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2564 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2565 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2566 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2567 		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2568 			/* enable clockgating to save power */
2569 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state(&adev->ip_blocks[i],
2570 										     state);
2571 			if (r) {
2572 				dev_err(adev->dev,
2573 					"set_clockgating_state(gate) of IP block <%s> failed %d\n",
2574 					adev->ip_blocks[i].version->funcs->name,
2575 					r);
2576 				return r;
2577 			}
2578 		}
2579 	}
2580 
2581 	return 0;
2582 }
2583 
2584 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2585 			       enum amd_powergating_state state)
2586 {
2587 	int i, j, r;
2588 
2589 	if (amdgpu_emu_mode == 1)
2590 		return 0;
2591 
2592 	for (j = 0; j < adev->num_ip_blocks; j++) {
2593 		i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2594 		if (!adev->ip_blocks[i].status.late_initialized)
2595 			continue;
2596 		if (!adev->ip_blocks[i].version)
2597 			continue;
2598 		/* skip PG for GFX, SDMA on S0ix */
2599 		if (adev->in_s0ix &&
2600 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2601 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2602 			continue;
2603 		/* skip CG for VCE/UVD, it's handled specially */
2604 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2605 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2606 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2607 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2608 		    adev->ip_blocks[i].version->funcs->set_powergating_state) {
2609 			/* enable powergating to save power */
2610 			r = adev->ip_blocks[i].version->funcs->set_powergating_state(&adev->ip_blocks[i],
2611 											state);
2612 			if (r) {
2613 				dev_err(adev->dev,
2614 					"set_powergating_state(gate) of IP block <%s> failed %d\n",
2615 					adev->ip_blocks[i].version->funcs->name,
2616 					r);
2617 				return r;
2618 			}
2619 		}
2620 	}
2621 	return 0;
2622 }
2623 
2624 static int amdgpu_device_enable_mgpu_fan_boost(void)
2625 {
2626 	struct amdgpu_gpu_instance *gpu_ins;
2627 	struct amdgpu_device *adev;
2628 	int i, ret = 0;
2629 
2630 	mutex_lock(&mgpu_info.mutex);
2631 
2632 	/*
2633 	 * MGPU fan boost feature should be enabled
2634 	 * only when there are two or more dGPUs in
2635 	 * the system
2636 	 */
2637 	if (mgpu_info.num_dgpu < 2)
2638 		goto out;
2639 
2640 	for (i = 0; i < mgpu_info.num_dgpu; i++) {
2641 		gpu_ins = &(mgpu_info.gpu_ins[i]);
2642 		adev = gpu_ins->adev;
2643 		if (!(adev->flags & AMD_IS_APU || amdgpu_sriov_multi_vf_mode(adev)) &&
2644 		    !gpu_ins->mgpu_fan_enabled) {
2645 			ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2646 			if (ret)
2647 				break;
2648 
2649 			gpu_ins->mgpu_fan_enabled = 1;
2650 		}
2651 	}
2652 
2653 out:
2654 	mutex_unlock(&mgpu_info.mutex);
2655 
2656 	return ret;
2657 }
2658 
2659 /**
2660  * amdgpu_device_ip_late_init - run late init for hardware IPs
2661  *
2662  * @adev: amdgpu_device pointer
2663  *
2664  * Late initialization pass for hardware IPs.  The list of all the hardware
2665  * IPs that make up the asic is walked and the late_init callbacks are run.
2666  * late_init covers any special initialization that an IP requires
2667  * after all of the have been initialized or something that needs to happen
2668  * late in the init process.
2669  * Returns 0 on success, negative error code on failure.
2670  */
2671 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2672 {
2673 	struct amdgpu_gpu_instance *gpu_instance;
2674 	int i = 0, r;
2675 
2676 	for (i = 0; i < adev->num_ip_blocks; i++) {
2677 		if (!adev->ip_blocks[i].status.hw)
2678 			continue;
2679 		if (adev->ip_blocks[i].version->funcs->late_init) {
2680 			r = adev->ip_blocks[i].version->funcs->late_init(&adev->ip_blocks[i]);
2681 			if (r) {
2682 				dev_err(adev->dev,
2683 					"late_init of IP block <%s> failed %d\n",
2684 					adev->ip_blocks[i].version->funcs->name,
2685 					r);
2686 				return r;
2687 			}
2688 		}
2689 		adev->ip_blocks[i].status.late_initialized = true;
2690 	}
2691 
2692 	r = amdgpu_ras_late_init(adev);
2693 	if (r) {
2694 		dev_err(adev->dev, "amdgpu_ras_late_init failed %d", r);
2695 		return r;
2696 	}
2697 
2698 	if (!amdgpu_reset_in_recovery(adev))
2699 		amdgpu_ras_set_error_query_ready(adev, true);
2700 
2701 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2702 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2703 
2704 	amdgpu_device_fill_reset_magic(adev);
2705 
2706 	r = amdgpu_device_enable_mgpu_fan_boost();
2707 	if (r)
2708 		dev_err(adev->dev, "enable mgpu fan boost failed (%d).\n", r);
2709 
2710 	/* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2711 	if (amdgpu_passthrough(adev) &&
2712 	    ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) ||
2713 	     adev->asic_type == CHIP_ALDEBARAN))
2714 		amdgpu_dpm_handle_passthrough_sbr(adev, true);
2715 
2716 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2717 		mutex_lock(&mgpu_info.mutex);
2718 
2719 		/*
2720 		 * Reset device p-state to low as this was booted with high.
2721 		 *
2722 		 * This should be performed only after all devices from the same
2723 		 * hive get initialized.
2724 		 *
2725 		 * However, it's unknown how many device in the hive in advance.
2726 		 * As this is counted one by one during devices initializations.
2727 		 *
2728 		 * So, we wait for all XGMI interlinked devices initialized.
2729 		 * This may bring some delays as those devices may come from
2730 		 * different hives. But that should be OK.
2731 		 */
2732 		if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2733 			for (i = 0; i < mgpu_info.num_gpu; i++) {
2734 				gpu_instance = &(mgpu_info.gpu_ins[i]);
2735 				if (gpu_instance->adev->flags & AMD_IS_APU)
2736 					continue;
2737 
2738 				r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2739 						AMDGPU_XGMI_PSTATE_MIN);
2740 				if (r) {
2741 					dev_err(adev->dev,
2742 						"pstate setting failed (%d).\n",
2743 						r);
2744 					break;
2745 				}
2746 			}
2747 		}
2748 
2749 		mutex_unlock(&mgpu_info.mutex);
2750 	}
2751 
2752 	return 0;
2753 }
2754 
2755 static void amdgpu_ip_block_hw_fini(struct amdgpu_ip_block *ip_block)
2756 {
2757 	struct amdgpu_device *adev = ip_block->adev;
2758 	int r;
2759 
2760 	if (!ip_block->version->funcs->hw_fini) {
2761 		dev_err(adev->dev, "hw_fini of IP block <%s> not defined\n",
2762 			ip_block->version->funcs->name);
2763 	} else {
2764 		r = ip_block->version->funcs->hw_fini(ip_block);
2765 		/* XXX handle errors */
2766 		if (r) {
2767 			dev_dbg(adev->dev,
2768 				"hw_fini of IP block <%s> failed %d\n",
2769 				ip_block->version->funcs->name, r);
2770 		}
2771 	}
2772 
2773 	ip_block->status.hw = false;
2774 }
2775 
2776 /**
2777  * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2778  *
2779  * @adev: amdgpu_device pointer
2780  *
2781  * For ASICs need to disable SMC first
2782  */
2783 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2784 {
2785 	int i;
2786 
2787 	if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))
2788 		return;
2789 
2790 	for (i = 0; i < adev->num_ip_blocks; i++) {
2791 		if (!adev->ip_blocks[i].status.hw)
2792 			continue;
2793 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2794 			amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]);
2795 			break;
2796 		}
2797 	}
2798 }
2799 
2800 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2801 {
2802 	int i, r;
2803 
2804 	for (i = 0; i < adev->num_ip_blocks; i++) {
2805 		if (!adev->ip_blocks[i].version)
2806 			continue;
2807 		if (!adev->ip_blocks[i].version->funcs->early_fini)
2808 			continue;
2809 
2810 		r = adev->ip_blocks[i].version->funcs->early_fini(&adev->ip_blocks[i]);
2811 		if (r) {
2812 			dev_dbg(adev->dev,
2813 				"early_fini of IP block <%s> failed %d\n",
2814 				adev->ip_blocks[i].version->funcs->name, r);
2815 		}
2816 	}
2817 
2818 	amdgpu_amdkfd_suspend(adev, true);
2819 	amdgpu_amdkfd_teardown_processes(adev);
2820 	amdgpu_userq_suspend(adev);
2821 
2822 	/* Workaround for ASICs need to disable SMC first */
2823 	amdgpu_device_smu_fini_early(adev);
2824 
2825 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2826 		if (!adev->ip_blocks[i].status.hw)
2827 			continue;
2828 
2829 		amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]);
2830 	}
2831 
2832 	if (amdgpu_sriov_vf(adev)) {
2833 		if (amdgpu_virt_release_full_gpu(adev, false))
2834 			dev_err(adev->dev,
2835 				"failed to release exclusive mode on fini\n");
2836 	}
2837 
2838 	/*
2839 	 * Driver reload on the APU can fail due to firmware validation because
2840 	 * the PSP is always running, as it is shared across the whole SoC.
2841 	 * This same issue does not occur on dGPU because it has a mechanism
2842 	 * that checks whether the PSP is running. A solution for those issues
2843 	 * in the APU is to trigger a GPU reset, but this should be done during
2844 	 * the unload phase to avoid adding boot latency and screen flicker.
2845 	 */
2846 	if ((adev->flags & AMD_IS_APU) && !adev->gmc.is_app_apu) {
2847 		r = amdgpu_asic_reset(adev);
2848 		if (r)
2849 			dev_err(adev->dev, "asic reset on %s failed\n", __func__);
2850 	}
2851 
2852 	return 0;
2853 }
2854 
2855 /**
2856  * amdgpu_device_ip_fini - run fini for hardware IPs
2857  *
2858  * @adev: amdgpu_device pointer
2859  *
2860  * Main teardown pass for hardware IPs.  The list of all the hardware
2861  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2862  * are run.  hw_fini tears down the hardware associated with each IP
2863  * and sw_fini tears down any software state associated with each IP.
2864  * Returns 0 on success, negative error code on failure.
2865  */
2866 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2867 {
2868 	int i, r;
2869 
2870 	amdgpu_cper_fini(adev);
2871 
2872 	if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2873 		amdgpu_virt_release_ras_err_handler_data(adev);
2874 
2875 	if (adev->gmc.xgmi.num_physical_nodes > 1)
2876 		amdgpu_xgmi_remove_device(adev);
2877 
2878 	amdgpu_amdkfd_device_fini_sw(adev);
2879 
2880 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2881 		if (!adev->ip_blocks[i].status.sw)
2882 			continue;
2883 
2884 		if (!adev->ip_blocks[i].version)
2885 			continue;
2886 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2887 			amdgpu_ucode_free_bo(adev);
2888 			amdgpu_free_static_csa(&adev->virt.csa_obj);
2889 			amdgpu_device_wb_fini(adev);
2890 			amdgpu_device_mem_scratch_fini(adev);
2891 			amdgpu_ib_pool_fini(adev);
2892 			amdgpu_seq64_fini(adev);
2893 			amdgpu_doorbell_fini(adev);
2894 		}
2895 		if (adev->ip_blocks[i].version->funcs->sw_fini) {
2896 			r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]);
2897 			/* XXX handle errors */
2898 			if (r) {
2899 				dev_dbg(adev->dev,
2900 					"sw_fini of IP block <%s> failed %d\n",
2901 					adev->ip_blocks[i].version->funcs->name,
2902 					r);
2903 			}
2904 		}
2905 		adev->ip_blocks[i].status.sw = false;
2906 		adev->ip_blocks[i].status.valid = false;
2907 	}
2908 
2909 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2910 		if (!adev->ip_blocks[i].status.late_initialized)
2911 			continue;
2912 		if (!adev->ip_blocks[i].version)
2913 			continue;
2914 		if (adev->ip_blocks[i].version->funcs->late_fini)
2915 			adev->ip_blocks[i].version->funcs->late_fini(&adev->ip_blocks[i]);
2916 		adev->ip_blocks[i].status.late_initialized = false;
2917 	}
2918 
2919 	amdgpu_ras_fini(adev);
2920 	amdgpu_uid_fini(adev);
2921 
2922 	return 0;
2923 }
2924 
2925 /**
2926  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2927  *
2928  * @work: work_struct.
2929  */
2930 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2931 {
2932 	struct amdgpu_device *adev =
2933 		container_of(work, struct amdgpu_device, delayed_init_work.work);
2934 	int r;
2935 
2936 	r = amdgpu_ib_ring_tests(adev);
2937 	if (r)
2938 		dev_err(adev->dev, "ib ring test failed (%d).\n", r);
2939 }
2940 
2941 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2942 {
2943 	struct amdgpu_device *adev =
2944 		container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2945 
2946 	WARN_ON_ONCE(adev->gfx.gfx_off_state);
2947 	WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2948 
2949 	if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true, 0))
2950 		adev->gfx.gfx_off_state = true;
2951 }
2952 
2953 /**
2954  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2955  *
2956  * @adev: amdgpu_device pointer
2957  *
2958  * Main suspend function for hardware IPs.  The list of all the hardware
2959  * IPs that make up the asic is walked, clockgating is disabled and the
2960  * suspend callbacks are run.  suspend puts the hardware and software state
2961  * in each IP into a state suitable for suspend.
2962  * Returns 0 on success, negative error code on failure.
2963  */
2964 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2965 {
2966 	int i, r, rec;
2967 
2968 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2969 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2970 
2971 	/*
2972 	 * Per PMFW team's suggestion, driver needs to handle gfxoff
2973 	 * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
2974 	 * scenario. Add the missing df cstate disablement here.
2975 	 */
2976 	if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
2977 		dev_warn(adev->dev, "Failed to disallow df cstate");
2978 
2979 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2980 		if (!adev->ip_blocks[i].status.valid)
2981 			continue;
2982 
2983 		/* displays are handled separately */
2984 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2985 			continue;
2986 
2987 		r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
2988 		if (r)
2989 			goto unwind;
2990 	}
2991 
2992 	return 0;
2993 unwind:
2994 	rec = amdgpu_device_ip_resume_phase3(adev);
2995 	if (rec)
2996 		dev_err(adev->dev,
2997 			"amdgpu_device_ip_resume_phase3 failed during unwind: %d\n",
2998 			rec);
2999 
3000 	amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW);
3001 
3002 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
3003 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
3004 
3005 	return r;
3006 }
3007 
3008 /**
3009  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
3010  *
3011  * @adev: amdgpu_device pointer
3012  *
3013  * Main suspend function for hardware IPs.  The list of all the hardware
3014  * IPs that make up the asic is walked, clockgating is disabled and the
3015  * suspend callbacks are run.  suspend puts the hardware and software state
3016  * in each IP into a state suitable for suspend.
3017  * Returns 0 on success, negative error code on failure.
3018  */
3019 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
3020 {
3021 	int i, r, rec;
3022 
3023 	if (adev->in_s0ix)
3024 		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
3025 
3026 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3027 		if (!adev->ip_blocks[i].status.valid)
3028 			continue;
3029 		/* displays are handled in phase1 */
3030 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
3031 			continue;
3032 		/* PSP lost connection when err_event_athub occurs */
3033 		if (amdgpu_ras_intr_triggered() &&
3034 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3035 			adev->ip_blocks[i].status.hw = false;
3036 			continue;
3037 		}
3038 
3039 		/* skip unnecessary suspend if we do not initialize them yet */
3040 		if (!amdgpu_ip_member_of_hwini(
3041 			    adev, adev->ip_blocks[i].version->type))
3042 			continue;
3043 
3044 		/* Since we skip suspend for S0i3, we need to cancel the delayed
3045 		 * idle work here as the suspend callback never gets called.
3046 		 */
3047 		if (adev->in_s0ix &&
3048 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX &&
3049 		    amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0))
3050 			cancel_delayed_work_sync(&adev->gfx.idle_work);
3051 		/* skip suspend of gfx/mes and psp for S0ix
3052 		 * gfx is in gfxoff state, so on resume it will exit gfxoff just
3053 		 * like at runtime. PSP is also part of the always on hardware
3054 		 * so no need to suspend it.
3055 		 */
3056 		if (adev->in_s0ix &&
3057 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3058 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3059 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
3060 			continue;
3061 
3062 		/* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
3063 		if (adev->in_s0ix &&
3064 		    (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >=
3065 		     IP_VERSION(5, 0, 0)) &&
3066 		    (adev->ip_blocks[i].version->type ==
3067 		     AMD_IP_BLOCK_TYPE_SDMA))
3068 			continue;
3069 
3070 		/* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot.
3071 		 * These are in TMR, hence are expected to be reused by PSP-TOS to reload
3072 		 * from this location and RLC Autoload automatically also gets loaded
3073 		 * from here based on PMFW -> PSP message during re-init sequence.
3074 		 * Therefore, the psp suspend & resume should be skipped to avoid destroy
3075 		 * the TMR and reload FWs again for IMU enabled APU ASICs.
3076 		 */
3077 		if (amdgpu_in_reset(adev) &&
3078 		    (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
3079 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3080 			continue;
3081 
3082 		r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
3083 		if (r)
3084 			goto unwind;
3085 
3086 		/* handle putting the SMC in the appropriate state */
3087 		if (!amdgpu_sriov_vf(adev)) {
3088 			if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3089 				r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3090 				if (r) {
3091 					dev_err(adev->dev,
3092 						"SMC failed to set mp1 state %d, %d\n",
3093 						adev->mp1_state, r);
3094 					goto unwind;
3095 				}
3096 			}
3097 		}
3098 	}
3099 
3100 	return 0;
3101 unwind:
3102 	/* suspend phase 2 = resume phase 1 + resume phase 2 */
3103 	rec = amdgpu_device_ip_resume_phase1(adev);
3104 	if (rec) {
3105 		dev_err(adev->dev,
3106 			"amdgpu_device_ip_resume_phase1 failed during unwind: %d\n",
3107 			rec);
3108 		return r;
3109 	}
3110 
3111 	rec = amdgpu_device_fw_loading(adev);
3112 	if (rec) {
3113 		dev_err(adev->dev,
3114 			"amdgpu_device_fw_loading failed during unwind: %d\n",
3115 			rec);
3116 		return r;
3117 	}
3118 
3119 	rec = amdgpu_device_ip_resume_phase2(adev);
3120 	if (rec) {
3121 		dev_err(adev->dev,
3122 			"amdgpu_device_ip_resume_phase2 failed during unwind: %d\n",
3123 			rec);
3124 		return r;
3125 	}
3126 
3127 	return r;
3128 }
3129 
3130 /**
3131  * amdgpu_device_ip_suspend - run suspend for hardware IPs
3132  *
3133  * @adev: amdgpu_device pointer
3134  *
3135  * Main suspend function for hardware IPs.  The list of all the hardware
3136  * IPs that make up the asic is walked, clockgating is disabled and the
3137  * suspend callbacks are run.  suspend puts the hardware and software state
3138  * in each IP into a state suitable for suspend.
3139  * Returns 0 on success, negative error code on failure.
3140  */
3141 static int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3142 {
3143 	int r;
3144 
3145 	if (amdgpu_sriov_vf(adev)) {
3146 		amdgpu_virt_fini_data_exchange(adev);
3147 		amdgpu_virt_request_full_gpu(adev, false);
3148 	}
3149 
3150 	amdgpu_ttm_set_buffer_funcs_status(adev, false);
3151 
3152 	r = amdgpu_device_ip_suspend_phase1(adev);
3153 	if (r)
3154 		return r;
3155 	r = amdgpu_device_ip_suspend_phase2(adev);
3156 
3157 	if (amdgpu_sriov_vf(adev))
3158 		amdgpu_virt_release_full_gpu(adev, false);
3159 
3160 	return r;
3161 }
3162 
3163 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3164 {
3165 	int i, r;
3166 
3167 	static enum amd_ip_block_type ip_order[] = {
3168 		AMD_IP_BLOCK_TYPE_COMMON,
3169 		AMD_IP_BLOCK_TYPE_GMC,
3170 		AMD_IP_BLOCK_TYPE_PSP,
3171 		AMD_IP_BLOCK_TYPE_IH,
3172 	};
3173 
3174 	for (i = 0; i < adev->num_ip_blocks; i++) {
3175 		int j;
3176 		struct amdgpu_ip_block *block;
3177 
3178 		block = &adev->ip_blocks[i];
3179 		block->status.hw = false;
3180 
3181 		for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3182 
3183 			if (block->version->type != ip_order[j] ||
3184 				!block->status.valid)
3185 				continue;
3186 
3187 			r = block->version->funcs->hw_init(&adev->ip_blocks[i]);
3188 			if (r) {
3189 				dev_err(adev->dev, "RE-INIT-early: %s failed\n",
3190 					 block->version->funcs->name);
3191 				return r;
3192 			}
3193 			block->status.hw = true;
3194 		}
3195 	}
3196 
3197 	return 0;
3198 }
3199 
3200 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3201 {
3202 	struct amdgpu_ip_block *block;
3203 	int i, r = 0;
3204 
3205 	static enum amd_ip_block_type ip_order[] = {
3206 		AMD_IP_BLOCK_TYPE_SMC,
3207 		AMD_IP_BLOCK_TYPE_DCE,
3208 		AMD_IP_BLOCK_TYPE_GFX,
3209 		AMD_IP_BLOCK_TYPE_SDMA,
3210 		AMD_IP_BLOCK_TYPE_MES,
3211 		AMD_IP_BLOCK_TYPE_UVD,
3212 		AMD_IP_BLOCK_TYPE_VCE,
3213 		AMD_IP_BLOCK_TYPE_VCN,
3214 		AMD_IP_BLOCK_TYPE_JPEG
3215 	};
3216 
3217 	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3218 		block = amdgpu_device_ip_get_ip_block(adev, ip_order[i]);
3219 
3220 		if (!block)
3221 			continue;
3222 
3223 		if (block->status.valid && !block->status.hw) {
3224 			if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) {
3225 				r = amdgpu_ip_block_resume(block);
3226 			} else {
3227 				r = block->version->funcs->hw_init(block);
3228 			}
3229 
3230 			if (r) {
3231 				dev_err(adev->dev, "RE-INIT-late: %s failed\n",
3232 					 block->version->funcs->name);
3233 				break;
3234 			}
3235 			block->status.hw = true;
3236 		}
3237 	}
3238 
3239 	return r;
3240 }
3241 
3242 /**
3243  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3244  *
3245  * @adev: amdgpu_device pointer
3246  *
3247  * First resume function for hardware IPs.  The list of all the hardware
3248  * IPs that make up the asic is walked and the resume callbacks are run for
3249  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3250  * after a suspend and updates the software state as necessary.  This
3251  * function is also used for restoring the GPU after a GPU reset.
3252  * Returns 0 on success, negative error code on failure.
3253  */
3254 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3255 {
3256 	int i, r;
3257 
3258 	for (i = 0; i < adev->num_ip_blocks; i++) {
3259 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3260 			continue;
3261 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3262 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3263 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3264 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3265 
3266 			r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
3267 			if (r)
3268 				return r;
3269 		}
3270 	}
3271 
3272 	return 0;
3273 }
3274 
3275 /**
3276  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3277  *
3278  * @adev: amdgpu_device pointer
3279  *
3280  * Second resume function for hardware IPs.  The list of all the hardware
3281  * IPs that make up the asic is walked and the resume callbacks are run for
3282  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3283  * functional state after a suspend and updates the software state as
3284  * necessary.  This function is also used for restoring the GPU after a GPU
3285  * reset.
3286  * Returns 0 on success, negative error code on failure.
3287  */
3288 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3289 {
3290 	int i, r;
3291 
3292 	for (i = 0; i < adev->num_ip_blocks; i++) {
3293 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3294 			continue;
3295 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3296 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3297 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3298 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE ||
3299 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3300 			continue;
3301 		r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
3302 		if (r)
3303 			return r;
3304 	}
3305 
3306 	return 0;
3307 }
3308 
3309 /**
3310  * amdgpu_device_ip_resume_phase3 - run resume for hardware IPs
3311  *
3312  * @adev: amdgpu_device pointer
3313  *
3314  * Third resume function for hardware IPs.  The list of all the hardware
3315  * IPs that make up the asic is walked and the resume callbacks are run for
3316  * all DCE.  resume puts the hardware into a functional state after a suspend
3317  * and updates the software state as necessary.  This function is also used
3318  * for restoring the GPU after a GPU reset.
3319  *
3320  * Returns 0 on success, negative error code on failure.
3321  */
3322 static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev)
3323 {
3324 	int i, r;
3325 
3326 	for (i = 0; i < adev->num_ip_blocks; i++) {
3327 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3328 			continue;
3329 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
3330 			r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
3331 			if (r)
3332 				return r;
3333 		}
3334 	}
3335 
3336 	return 0;
3337 }
3338 
3339 /**
3340  * amdgpu_device_ip_resume - run resume for hardware IPs
3341  *
3342  * @adev: amdgpu_device pointer
3343  *
3344  * Main resume function for hardware IPs.  The hardware IPs
3345  * are split into two resume functions because they are
3346  * also used in recovering from a GPU reset and some additional
3347  * steps need to be take between them.  In this case (S3/S4) they are
3348  * run sequentially.
3349  * Returns 0 on success, negative error code on failure.
3350  */
3351 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3352 {
3353 	int r;
3354 
3355 	r = amdgpu_device_ip_resume_phase1(adev);
3356 	if (r)
3357 		return r;
3358 
3359 	r = amdgpu_device_fw_loading(adev);
3360 	if (r)
3361 		return r;
3362 
3363 	r = amdgpu_device_ip_resume_phase2(adev);
3364 
3365 	amdgpu_ttm_set_buffer_funcs_status(adev, true);
3366 
3367 	if (r)
3368 		return r;
3369 
3370 	amdgpu_fence_driver_hw_init(adev);
3371 
3372 	r = amdgpu_device_ip_resume_phase3(adev);
3373 
3374 	return r;
3375 }
3376 
3377 /**
3378  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3379  *
3380  * @adev: amdgpu_device pointer
3381  *
3382  * Query the VBIOS data tables to determine if the board supports SR-IOV.
3383  */
3384 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3385 {
3386 	if (amdgpu_sriov_vf(adev)) {
3387 		if (adev->is_atom_fw) {
3388 			if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3389 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3390 		} else {
3391 			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3392 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3393 		}
3394 
3395 		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3396 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3397 	}
3398 }
3399 
3400 /**
3401  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3402  *
3403  * @pdev : pci device context
3404  * @asic_type: AMD asic type
3405  *
3406  * Check if there is DC (new modesetting infrastructre) support for an asic.
3407  * returns true if DC has support, false if not.
3408  */
3409 bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev,
3410 				       enum amd_asic_type asic_type)
3411 {
3412 	switch (asic_type) {
3413 #ifdef CONFIG_DRM_AMDGPU_SI
3414 	case CHIP_HAINAN:
3415 #endif
3416 	case CHIP_TOPAZ:
3417 		/* chips with no display hardware */
3418 		return false;
3419 #if defined(CONFIG_DRM_AMD_DC)
3420 	case CHIP_TAHITI:
3421 	case CHIP_PITCAIRN:
3422 	case CHIP_VERDE:
3423 	case CHIP_OLAND:
3424 		return amdgpu_dc != 0 && IS_ENABLED(CONFIG_DRM_AMD_DC_SI);
3425 	default:
3426 		return amdgpu_dc != 0;
3427 #else
3428 	default:
3429 		if (amdgpu_dc > 0)
3430 			dev_info_once(
3431 				&pdev->dev,
3432 				"Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
3433 		return false;
3434 #endif
3435 	}
3436 }
3437 
3438 /**
3439  * amdgpu_device_has_dc_support - check if dc is supported
3440  *
3441  * @adev: amdgpu_device pointer
3442  *
3443  * Returns true for supported, false for not supported
3444  */
3445 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3446 {
3447 	if (adev->enable_virtual_display ||
3448 	    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3449 		return false;
3450 
3451 	return amdgpu_device_asic_has_dc_support(adev->pdev, adev->asic_type);
3452 }
3453 
3454 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3455 {
3456 	struct amdgpu_device *adev =
3457 		container_of(__work, struct amdgpu_device, xgmi_reset_work);
3458 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3459 
3460 	/* It's a bug to not have a hive within this function */
3461 	if (WARN_ON(!hive))
3462 		return;
3463 
3464 	/*
3465 	 * Use task barrier to synchronize all xgmi reset works across the
3466 	 * hive. task_barrier_enter and task_barrier_exit will block
3467 	 * until all the threads running the xgmi reset works reach
3468 	 * those points. task_barrier_full will do both blocks.
3469 	 */
3470 	if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3471 
3472 		task_barrier_enter(&hive->tb);
3473 		adev->asic_reset_res = amdgpu_device_baco_enter(adev);
3474 
3475 		if (adev->asic_reset_res)
3476 			goto fail;
3477 
3478 		task_barrier_exit(&hive->tb);
3479 		adev->asic_reset_res = amdgpu_device_baco_exit(adev);
3480 
3481 		if (adev->asic_reset_res)
3482 			goto fail;
3483 
3484 		amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB);
3485 	} else {
3486 
3487 		task_barrier_full(&hive->tb);
3488 		adev->asic_reset_res =  amdgpu_asic_reset(adev);
3489 	}
3490 
3491 fail:
3492 	if (adev->asic_reset_res)
3493 		dev_warn(adev->dev,
3494 			 "ASIC reset failed with error, %d for drm dev, %s",
3495 			 adev->asic_reset_res, adev_to_drm(adev)->unique);
3496 	amdgpu_put_xgmi_hive(hive);
3497 }
3498 
3499 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3500 {
3501 	char *input = amdgpu_lockup_timeout;
3502 	char *timeout_setting = NULL;
3503 	int index = 0;
3504 	long timeout;
3505 	int ret = 0;
3506 
3507 	/* By default timeout for all queues is 2 sec */
3508 	adev->gfx_timeout = adev->compute_timeout = adev->sdma_timeout =
3509 		adev->video_timeout = msecs_to_jiffies(2000);
3510 
3511 	if (!strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH))
3512 		return 0;
3513 
3514 	while ((timeout_setting = strsep(&input, ",")) &&
3515 	       strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3516 		ret = kstrtol(timeout_setting, 0, &timeout);
3517 		if (ret)
3518 			return ret;
3519 
3520 		if (timeout == 0) {
3521 			index++;
3522 			continue;
3523 		} else if (timeout < 0) {
3524 			timeout = MAX_SCHEDULE_TIMEOUT;
3525 			dev_warn(adev->dev, "lockup timeout disabled");
3526 			add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3527 		} else {
3528 			timeout = msecs_to_jiffies(timeout);
3529 		}
3530 
3531 		switch (index++) {
3532 		case 0:
3533 			adev->gfx_timeout = timeout;
3534 			break;
3535 		case 1:
3536 			adev->compute_timeout = timeout;
3537 			break;
3538 		case 2:
3539 			adev->sdma_timeout = timeout;
3540 			break;
3541 		case 3:
3542 			adev->video_timeout = timeout;
3543 			break;
3544 		default:
3545 			break;
3546 		}
3547 	}
3548 
3549 	/* When only one value specified apply it to all queues. */
3550 	if (index == 1)
3551 		adev->gfx_timeout = adev->compute_timeout = adev->sdma_timeout =
3552 			adev->video_timeout = timeout;
3553 
3554 	return ret;
3555 }
3556 
3557 /**
3558  * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3559  *
3560  * @adev: amdgpu_device pointer
3561  *
3562  * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3563  */
3564 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3565 {
3566 	struct iommu_domain *domain;
3567 
3568 	domain = iommu_get_domain_for_dev(adev->dev);
3569 	if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3570 		adev->ram_is_direct_mapped = true;
3571 }
3572 
3573 #if defined(CONFIG_HSA_AMD_P2P)
3574 /**
3575  * amdgpu_device_check_iommu_remap - Check if DMA remapping is enabled.
3576  *
3577  * @adev: amdgpu_device pointer
3578  *
3579  * return if IOMMU remapping bar address
3580  */
3581 static bool amdgpu_device_check_iommu_remap(struct amdgpu_device *adev)
3582 {
3583 	struct iommu_domain *domain;
3584 
3585 	domain = iommu_get_domain_for_dev(adev->dev);
3586 	if (domain && (domain->type == IOMMU_DOMAIN_DMA ||
3587 		domain->type ==	IOMMU_DOMAIN_DMA_FQ))
3588 		return true;
3589 
3590 	return false;
3591 }
3592 #endif
3593 
3594 static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
3595 {
3596 	if (amdgpu_mcbp == 1)
3597 		adev->gfx.mcbp = true;
3598 	else if (amdgpu_mcbp == 0)
3599 		adev->gfx.mcbp = false;
3600 
3601 	if (amdgpu_sriov_vf(adev))
3602 		adev->gfx.mcbp = true;
3603 
3604 	if (adev->gfx.mcbp)
3605 		dev_info(adev->dev, "MCBP is enabled\n");
3606 }
3607 
3608 static int amdgpu_device_sys_interface_init(struct amdgpu_device *adev)
3609 {
3610 	int r;
3611 
3612 	r = amdgpu_atombios_sysfs_init(adev);
3613 	if (r)
3614 		drm_err(&adev->ddev,
3615 			"registering atombios sysfs failed (%d).\n", r);
3616 
3617 	r = amdgpu_pm_sysfs_init(adev);
3618 	if (r)
3619 		dev_err(adev->dev, "registering pm sysfs failed (%d).\n", r);
3620 
3621 	r = amdgpu_ucode_sysfs_init(adev);
3622 	if (r) {
3623 		adev->ucode_sysfs_en = false;
3624 		dev_err(adev->dev, "Creating firmware sysfs failed (%d).\n", r);
3625 	} else
3626 		adev->ucode_sysfs_en = true;
3627 
3628 	r = amdgpu_device_attr_sysfs_init(adev);
3629 	if (r)
3630 		dev_err(adev->dev, "Could not create amdgpu device attr\n");
3631 
3632 	r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group);
3633 	if (r)
3634 		dev_err(adev->dev,
3635 			"Could not create amdgpu board attributes\n");
3636 
3637 	amdgpu_fru_sysfs_init(adev);
3638 	amdgpu_reg_state_sysfs_init(adev);
3639 	amdgpu_xcp_sysfs_init(adev);
3640 	amdgpu_uma_sysfs_init(adev);
3641 
3642 	return r;
3643 }
3644 
3645 static void amdgpu_device_sys_interface_fini(struct amdgpu_device *adev)
3646 {
3647 	if (adev->pm.sysfs_initialized)
3648 		amdgpu_pm_sysfs_fini(adev);
3649 	if (adev->ucode_sysfs_en)
3650 		amdgpu_ucode_sysfs_fini(adev);
3651 	amdgpu_device_attr_sysfs_fini(adev);
3652 	amdgpu_fru_sysfs_fini(adev);
3653 
3654 	amdgpu_reg_state_sysfs_fini(adev);
3655 	amdgpu_xcp_sysfs_fini(adev);
3656 	amdgpu_uma_sysfs_fini(adev);
3657 }
3658 
3659 /**
3660  * amdgpu_device_init - initialize the driver
3661  *
3662  * @adev: amdgpu_device pointer
3663  * @flags: driver flags
3664  *
3665  * Initializes the driver info and hw (all asics).
3666  * Returns 0 for success or an error on failure.
3667  * Called at driver startup.
3668  */
3669 int amdgpu_device_init(struct amdgpu_device *adev,
3670 		       uint32_t flags)
3671 {
3672 	struct pci_dev *pdev = adev->pdev;
3673 	int r, i;
3674 	bool px = false;
3675 	u32 max_MBps;
3676 	int tmp;
3677 
3678 	adev->shutdown = false;
3679 	adev->flags = flags;
3680 
3681 	if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3682 		adev->asic_type = amdgpu_force_asic_type;
3683 	else
3684 		adev->asic_type = flags & AMD_ASIC_MASK;
3685 
3686 	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3687 	if (amdgpu_emu_mode == 1)
3688 		adev->usec_timeout *= 10;
3689 	adev->gmc.gart_size = 512 * 1024 * 1024;
3690 	adev->accel_working = false;
3691 	adev->num_rings = 0;
3692 	RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3693 	adev->mman.buffer_funcs = NULL;
3694 	adev->mman.buffer_funcs_ring = NULL;
3695 	adev->vm_manager.vm_pte_funcs = NULL;
3696 	adev->vm_manager.vm_pte_num_scheds = 0;
3697 	adev->gmc.gmc_funcs = NULL;
3698 	adev->harvest_ip_mask = 0x0;
3699 	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3700 	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3701 
3702 	amdgpu_reg_access_init(adev);
3703 
3704 	dev_info(
3705 		adev->dev,
3706 		"initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3707 		amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3708 		pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3709 
3710 	/* mutex initialization are all done here so we
3711 	 * can recall function without having locking issues
3712 	 */
3713 	mutex_init(&adev->firmware.mutex);
3714 	mutex_init(&adev->pm.mutex);
3715 	mutex_init(&adev->gfx.gpu_clock_mutex);
3716 	mutex_init(&adev->srbm_mutex);
3717 	mutex_init(&adev->gfx.pipe_reserve_mutex);
3718 	mutex_init(&adev->gfx.gfx_off_mutex);
3719 	mutex_init(&adev->gfx.partition_mutex);
3720 	mutex_init(&adev->grbm_idx_mutex);
3721 	mutex_init(&adev->mn_lock);
3722 	mutex_init(&adev->virt.vf_errors.lock);
3723 	hash_init(adev->mn_hash);
3724 	mutex_init(&adev->psp.mutex);
3725 	mutex_init(&adev->notifier_lock);
3726 	mutex_init(&adev->pm.stable_pstate_ctx_lock);
3727 	mutex_init(&adev->benchmark_mutex);
3728 	mutex_init(&adev->gfx.reset_sem_mutex);
3729 	/* Initialize the mutex for cleaner shader isolation between GFX and compute processes */
3730 	mutex_init(&adev->enforce_isolation_mutex);
3731 	for (i = 0; i < MAX_XCP; ++i) {
3732 		adev->isolation[i].spearhead = dma_fence_get_stub();
3733 		amdgpu_sync_create(&adev->isolation[i].active);
3734 		amdgpu_sync_create(&adev->isolation[i].prev);
3735 	}
3736 	mutex_init(&adev->gfx.userq_sch_mutex);
3737 	mutex_init(&adev->gfx.workload_profile_mutex);
3738 	mutex_init(&adev->vcn.workload_profile_mutex);
3739 
3740 	amdgpu_device_init_apu_flags(adev);
3741 
3742 	r = amdgpu_device_check_arguments(adev);
3743 	if (r)
3744 		return r;
3745 
3746 	spin_lock_init(&adev->mmio_idx_lock);
3747 	spin_lock_init(&adev->mm_stats.lock);
3748 	spin_lock_init(&adev->virt.rlcg_reg_lock);
3749 	spin_lock_init(&adev->wb.lock);
3750 
3751 	xa_init_flags(&adev->userq_xa, XA_FLAGS_LOCK_IRQ);
3752 
3753 	INIT_LIST_HEAD(&adev->reset_list);
3754 
3755 	INIT_LIST_HEAD(&adev->ras_list);
3756 
3757 	INIT_LIST_HEAD(&adev->pm.od_kobj_list);
3758 
3759 	xa_init(&adev->userq_doorbell_xa);
3760 
3761 	INIT_DELAYED_WORK(&adev->delayed_init_work,
3762 			  amdgpu_device_delayed_init_work_handler);
3763 	INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3764 			  amdgpu_device_delay_enable_gfx_off);
3765 	/*
3766 	 * Initialize the enforce_isolation work structures for each XCP
3767 	 * partition.  This work handler is responsible for enforcing shader
3768 	 * isolation on AMD GPUs.  It counts the number of emitted fences for
3769 	 * each GFX and compute ring.  If there are any fences, it schedules
3770 	 * the `enforce_isolation_work` to be run after a delay.  If there are
3771 	 * no fences, it signals the Kernel Fusion Driver (KFD) to resume the
3772 	 * runqueue.
3773 	 */
3774 	for (i = 0; i < MAX_XCP; i++) {
3775 		INIT_DELAYED_WORK(&adev->gfx.enforce_isolation[i].work,
3776 				  amdgpu_gfx_enforce_isolation_handler);
3777 		adev->gfx.enforce_isolation[i].adev = adev;
3778 		adev->gfx.enforce_isolation[i].xcp_id = i;
3779 	}
3780 
3781 	INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3782 	INIT_WORK(&adev->userq_reset_work, amdgpu_userq_reset_work);
3783 
3784 	adev->gfx.gfx_off_req_count = 1;
3785 	adev->gfx.gfx_off_residency = 0;
3786 	adev->gfx.gfx_off_entrycount = 0;
3787 	adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3788 
3789 	atomic_set(&adev->throttling_logging_enabled, 1);
3790 	/*
3791 	 * If throttling continues, logging will be performed every minute
3792 	 * to avoid log flooding. "-1" is subtracted since the thermal
3793 	 * throttling interrupt comes every second. Thus, the total logging
3794 	 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3795 	 * for throttling interrupt) = 60 seconds.
3796 	 */
3797 	ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3798 
3799 	ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3800 
3801 	/* Registers mapping */
3802 	/* TODO: block userspace mapping of io register */
3803 	if (adev->asic_type >= CHIP_BONAIRE) {
3804 		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3805 		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3806 	} else {
3807 		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3808 		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3809 	}
3810 
3811 	for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3812 		atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3813 
3814 	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3815 	if (!adev->rmmio)
3816 		return -ENOMEM;
3817 
3818 	dev_info(adev->dev, "register mmio base: 0x%08X\n",
3819 		 (uint32_t)adev->rmmio_base);
3820 	dev_info(adev->dev, "register mmio size: %u\n",
3821 		 (unsigned int)adev->rmmio_size);
3822 
3823 	/*
3824 	 * Reset domain needs to be present early, before XGMI hive discovered
3825 	 * (if any) and initialized to use reset sem and in_gpu reset flag
3826 	 * early on during init and before calling to RREG32.
3827 	 */
3828 	adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3829 	if (!adev->reset_domain)
3830 		return -ENOMEM;
3831 
3832 	/* detect hw virtualization here */
3833 	amdgpu_virt_init(adev);
3834 
3835 	amdgpu_device_get_pcie_info(adev);
3836 
3837 	r = amdgpu_device_get_job_timeout_settings(adev);
3838 	if (r) {
3839 		dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3840 		return r;
3841 	}
3842 
3843 	amdgpu_device_set_mcbp(adev);
3844 
3845 	/*
3846 	 * By default, use default mode where all blocks are expected to be
3847 	 * initialized. At present a 'swinit' of blocks is required to be
3848 	 * completed before the need for a different level is detected.
3849 	 */
3850 	amdgpu_set_init_level(adev, AMDGPU_INIT_LEVEL_DEFAULT);
3851 	/* early init functions */
3852 	r = amdgpu_device_ip_early_init(adev);
3853 	if (r)
3854 		return r;
3855 
3856 	/*
3857 	 * No need to remove conflicting FBs for non-display class devices.
3858 	 * This prevents the sysfb from being freed accidently.
3859 	 */
3860 	if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA ||
3861 	    (pdev->class >> 8) == PCI_CLASS_DISPLAY_OTHER) {
3862 		/* Get rid of things like offb */
3863 		r = aperture_remove_conflicting_pci_devices(adev->pdev, amdgpu_kms_driver.name);
3864 		if (r)
3865 			return r;
3866 	}
3867 
3868 	/* Enable TMZ based on IP_VERSION */
3869 	amdgpu_gmc_tmz_set(adev);
3870 
3871 	if (amdgpu_sriov_vf(adev) &&
3872 	    amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))
3873 		/* VF MMIO access (except mailbox range) from CPU
3874 		 * will be blocked during sriov runtime
3875 		 */
3876 		adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT;
3877 
3878 	amdgpu_gmc_noretry_set(adev);
3879 	/* Need to get xgmi info early to decide the reset behavior*/
3880 	if (adev->gmc.xgmi.supported) {
3881 		if (adev->gfxhub.funcs &&
3882 		    adev->gfxhub.funcs->get_xgmi_info) {
3883 			r = adev->gfxhub.funcs->get_xgmi_info(adev);
3884 			if (r)
3885 				return r;
3886 		}
3887 	}
3888 
3889 	if (adev->gmc.xgmi.connected_to_cpu) {
3890 		if (adev->mmhub.funcs &&
3891 		    adev->mmhub.funcs->get_xgmi_info) {
3892 			r = adev->mmhub.funcs->get_xgmi_info(adev);
3893 			if (r)
3894 				return r;
3895 		}
3896 	}
3897 
3898 	/* enable PCIE atomic ops */
3899 	if (amdgpu_sriov_vf(adev)) {
3900 		if (adev->virt.fw_reserve.p_pf2vf)
3901 			adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3902 						      adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3903 				(PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3904 	/* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a
3905 	 * internal path natively support atomics, set have_atomics_support to true.
3906 	 */
3907 	} else if ((adev->flags & AMD_IS_APU &&
3908 		   amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0)) ||
3909 		   (adev->gmc.xgmi.connected_to_cpu &&
3910 		   amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 1, 0))) {
3911 		adev->have_atomics_support = true;
3912 	} else {
3913 		adev->have_atomics_support =
3914 			!pci_enable_atomic_ops_to_root(adev->pdev,
3915 					  PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3916 					  PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3917 	}
3918 
3919 	if (!adev->have_atomics_support)
3920 		dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3921 
3922 	/* doorbell bar mapping and doorbell index init*/
3923 	amdgpu_doorbell_init(adev);
3924 
3925 	if (amdgpu_emu_mode == 1) {
3926 		/* post the asic on emulation mode */
3927 		emu_soc_asic_init(adev);
3928 		goto fence_driver_init;
3929 	}
3930 
3931 	amdgpu_reset_init(adev);
3932 
3933 	/* detect if we are with an SRIOV vbios */
3934 	if (adev->bios)
3935 		amdgpu_device_detect_sriov_bios(adev);
3936 
3937 	/* check if we need to reset the asic
3938 	 *  E.g., driver was not cleanly unloaded previously, etc.
3939 	 */
3940 	if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3941 		if (adev->gmc.xgmi.num_physical_nodes) {
3942 			dev_info(adev->dev, "Pending hive reset.\n");
3943 			amdgpu_set_init_level(adev,
3944 					      AMDGPU_INIT_LEVEL_MINIMAL_XGMI);
3945 		} else {
3946 				tmp = amdgpu_reset_method;
3947 				/* It should do a default reset when loading or reloading the driver,
3948 				 * regardless of the module parameter reset_method.
3949 				 */
3950 				amdgpu_reset_method = AMD_RESET_METHOD_NONE;
3951 				r = amdgpu_asic_reset(adev);
3952 				amdgpu_reset_method = tmp;
3953 		}
3954 
3955 		if (r) {
3956 		  dev_err(adev->dev, "asic reset on init failed\n");
3957 		  goto failed;
3958 		}
3959 	}
3960 
3961 	/* Post card if necessary */
3962 	if (amdgpu_device_need_post(adev)) {
3963 		if (!adev->bios) {
3964 			dev_err(adev->dev, "no vBIOS found\n");
3965 			r = -EINVAL;
3966 			goto failed;
3967 		}
3968 		dev_info(adev->dev, "GPU posting now...\n");
3969 		r = amdgpu_device_asic_init(adev);
3970 		if (r) {
3971 			dev_err(adev->dev, "gpu post error!\n");
3972 			goto failed;
3973 		}
3974 	}
3975 
3976 	if (adev->bios) {
3977 		if (adev->is_atom_fw) {
3978 			/* Initialize clocks */
3979 			r = amdgpu_atomfirmware_get_clock_info(adev);
3980 			if (r) {
3981 				dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3982 				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3983 				goto failed;
3984 			}
3985 		} else {
3986 			/* Initialize clocks */
3987 			r = amdgpu_atombios_get_clock_info(adev);
3988 			if (r) {
3989 				dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3990 				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3991 				goto failed;
3992 			}
3993 			/* init i2c buses */
3994 			amdgpu_i2c_init(adev);
3995 		}
3996 	}
3997 
3998 fence_driver_init:
3999 	/* Fence driver */
4000 	r = amdgpu_fence_driver_sw_init(adev);
4001 	if (r) {
4002 		dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
4003 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
4004 		goto failed;
4005 	}
4006 
4007 	/* init the mode config */
4008 	drm_mode_config_init(adev_to_drm(adev));
4009 
4010 	r = amdgpu_device_ip_init(adev);
4011 	if (r) {
4012 		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
4013 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
4014 		goto release_ras_con;
4015 	}
4016 
4017 	amdgpu_fence_driver_hw_init(adev);
4018 
4019 	dev_info(adev->dev,
4020 		"SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
4021 			adev->gfx.config.max_shader_engines,
4022 			adev->gfx.config.max_sh_per_se,
4023 			adev->gfx.config.max_cu_per_sh,
4024 			adev->gfx.cu_info.number);
4025 
4026 	adev->accel_working = true;
4027 
4028 	amdgpu_vm_check_compute_bug(adev);
4029 
4030 	/* Initialize the buffer migration limit. */
4031 	if (amdgpu_moverate >= 0)
4032 		max_MBps = amdgpu_moverate;
4033 	else
4034 		max_MBps = 8; /* Allow 8 MB/s. */
4035 	/* Get a log2 for easy divisions. */
4036 	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
4037 
4038 	/*
4039 	 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
4040 	 * Otherwise the mgpu fan boost feature will be skipped due to the
4041 	 * gpu instance is counted less.
4042 	 */
4043 	amdgpu_register_gpu_instance(adev);
4044 
4045 	/* enable clockgating, etc. after ib tests, etc. since some blocks require
4046 	 * explicit gating rather than handling it automatically.
4047 	 */
4048 	if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
4049 		r = amdgpu_device_ip_late_init(adev);
4050 		if (r) {
4051 			dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
4052 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
4053 			goto release_ras_con;
4054 		}
4055 		/* must succeed. */
4056 		amdgpu_ras_resume(adev);
4057 		queue_delayed_work(system_wq, &adev->delayed_init_work,
4058 				   msecs_to_jiffies(AMDGPU_RESUME_MS));
4059 	}
4060 
4061 	if (amdgpu_sriov_vf(adev)) {
4062 		amdgpu_virt_release_full_gpu(adev, true);
4063 		flush_delayed_work(&adev->delayed_init_work);
4064 	}
4065 
4066 	/* Don't init kfd if whole hive need to be reset during init */
4067 	if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
4068 		kgd2kfd_init_zone_device(adev);
4069 		kfd_update_svm_support_properties(adev);
4070 	}
4071 
4072 	if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
4073 		amdgpu_xgmi_reset_on_init(adev);
4074 
4075 	/*
4076 	 * Place those sysfs registering after `late_init`. As some of those
4077 	 * operations performed in `late_init` might affect the sysfs
4078 	 * interfaces creating.
4079 	 */
4080 	r = amdgpu_device_sys_interface_init(adev);
4081 
4082 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
4083 		r = amdgpu_pmu_init(adev);
4084 	if (r)
4085 		dev_err(adev->dev, "amdgpu_pmu_init failed\n");
4086 
4087 	/* Have stored pci confspace at hand for restore in sudden PCI error */
4088 	if (amdgpu_device_cache_pci_state(adev->pdev))
4089 		pci_restore_state(pdev);
4090 
4091 	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
4092 	/* this will fail for cards that aren't VGA class devices, just
4093 	 * ignore it
4094 	 */
4095 	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4096 		vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
4097 
4098 	px = amdgpu_device_supports_px(adev);
4099 
4100 	if (px || (!dev_is_removable(&adev->pdev->dev) &&
4101 				apple_gmux_detect(NULL, NULL)))
4102 		vga_switcheroo_register_client(adev->pdev,
4103 					       &amdgpu_switcheroo_ops, px);
4104 
4105 	if (px)
4106 		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
4107 
4108 	amdgpu_device_check_iommu_direct_map(adev);
4109 
4110 	adev->pm_nb.notifier_call = amdgpu_device_pm_notifier;
4111 	r = register_pm_notifier(&adev->pm_nb);
4112 	if (r)
4113 		goto failed;
4114 
4115 	return 0;
4116 
4117 release_ras_con:
4118 	if (amdgpu_sriov_vf(adev))
4119 		amdgpu_virt_release_full_gpu(adev, true);
4120 
4121 	/* failed in exclusive mode due to timeout */
4122 	if (amdgpu_sriov_vf(adev) &&
4123 		!amdgpu_sriov_runtime(adev) &&
4124 		amdgpu_virt_mmio_blocked(adev) &&
4125 		!amdgpu_virt_wait_reset(adev)) {
4126 		dev_err(adev->dev, "VF exclusive mode timeout\n");
4127 		/* Don't send request since VF is inactive. */
4128 		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
4129 		adev->virt.ops = NULL;
4130 		r = -EAGAIN;
4131 	}
4132 	amdgpu_release_ras_context(adev);
4133 
4134 failed:
4135 	amdgpu_vf_error_trans_all(adev);
4136 
4137 	return r;
4138 }
4139 
4140 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
4141 {
4142 
4143 	/* Clear all CPU mappings pointing to this device */
4144 	unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
4145 
4146 	/* Unmap all mapped bars - Doorbell, registers and VRAM */
4147 	amdgpu_doorbell_fini(adev);
4148 
4149 	iounmap(adev->rmmio);
4150 	adev->rmmio = NULL;
4151 	if (adev->mman.aper_base_kaddr)
4152 		iounmap(adev->mman.aper_base_kaddr);
4153 	adev->mman.aper_base_kaddr = NULL;
4154 
4155 	/* Memory manager related */
4156 	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
4157 		arch_phys_wc_del(adev->gmc.vram_mtrr);
4158 		arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
4159 	}
4160 }
4161 
4162 /**
4163  * amdgpu_device_fini_hw - tear down the driver
4164  *
4165  * @adev: amdgpu_device pointer
4166  *
4167  * Tear down the driver info (all asics).
4168  * Called at driver shutdown.
4169  */
4170 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
4171 {
4172 	dev_info(adev->dev, "finishing device.\n");
4173 	flush_delayed_work(&adev->delayed_init_work);
4174 
4175 	if (adev->mman.initialized)
4176 		drain_workqueue(adev->mman.bdev.wq);
4177 	adev->shutdown = true;
4178 
4179 	unregister_pm_notifier(&adev->pm_nb);
4180 
4181 	/* make sure IB test finished before entering exclusive mode
4182 	 * to avoid preemption on IB test
4183 	 */
4184 	if (amdgpu_sriov_vf(adev)) {
4185 		amdgpu_virt_request_full_gpu(adev, false);
4186 		amdgpu_virt_fini_data_exchange(adev);
4187 	}
4188 
4189 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
4190 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
4191 
4192 	/* disable all interrupts */
4193 	amdgpu_irq_disable_all(adev);
4194 	if (adev->mode_info.mode_config_initialized) {
4195 		if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
4196 			drm_helper_force_disable_all(adev_to_drm(adev));
4197 		else
4198 			drm_atomic_helper_shutdown(adev_to_drm(adev));
4199 	}
4200 	amdgpu_fence_driver_hw_fini(adev);
4201 
4202 	amdgpu_device_sys_interface_fini(adev);
4203 
4204 	/* disable ras feature must before hw fini */
4205 	amdgpu_ras_pre_fini(adev);
4206 
4207 	amdgpu_ttm_set_buffer_funcs_status(adev, false);
4208 
4209 	/*
4210 	 * device went through surprise hotplug; we need to destroy topology
4211 	 * before ip_fini_early to prevent kfd locking refcount issues by calling
4212 	 * amdgpu_amdkfd_suspend()
4213 	 */
4214 	if (pci_dev_is_disconnected(adev->pdev))
4215 		amdgpu_amdkfd_device_fini_sw(adev);
4216 
4217 	amdgpu_device_ip_fini_early(adev);
4218 
4219 	amdgpu_irq_fini_hw(adev);
4220 
4221 	if (adev->mman.initialized)
4222 		ttm_device_clear_dma_mappings(&adev->mman.bdev);
4223 
4224 	amdgpu_gart_dummy_page_fini(adev);
4225 
4226 	if (pci_dev_is_disconnected(adev->pdev))
4227 		amdgpu_device_unmap_mmio(adev);
4228 
4229 }
4230 
4231 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4232 {
4233 	int i, idx;
4234 	bool px;
4235 
4236 	amdgpu_device_ip_fini(adev);
4237 	amdgpu_fence_driver_sw_fini(adev);
4238 	amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
4239 	adev->accel_working = false;
4240 	dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4241 	for (i = 0; i < MAX_XCP; ++i) {
4242 		dma_fence_put(adev->isolation[i].spearhead);
4243 		amdgpu_sync_free(&adev->isolation[i].active);
4244 		amdgpu_sync_free(&adev->isolation[i].prev);
4245 	}
4246 
4247 	amdgpu_reset_fini(adev);
4248 
4249 	/* free i2c buses */
4250 	amdgpu_i2c_fini(adev);
4251 
4252 	if (adev->bios) {
4253 		if (amdgpu_emu_mode != 1)
4254 			amdgpu_atombios_fini(adev);
4255 		amdgpu_bios_release(adev);
4256 	}
4257 
4258 	kfree(adev->fru_info);
4259 	adev->fru_info = NULL;
4260 
4261 	kfree(adev->xcp_mgr);
4262 	adev->xcp_mgr = NULL;
4263 
4264 	px = amdgpu_device_supports_px(adev);
4265 
4266 	if (px || (!dev_is_removable(&adev->pdev->dev) &&
4267 				apple_gmux_detect(NULL, NULL)))
4268 		vga_switcheroo_unregister_client(adev->pdev);
4269 
4270 	if (px)
4271 		vga_switcheroo_fini_domain_pm_ops(adev->dev);
4272 
4273 	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4274 		vga_client_unregister(adev->pdev);
4275 
4276 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4277 
4278 		iounmap(adev->rmmio);
4279 		adev->rmmio = NULL;
4280 		drm_dev_exit(idx);
4281 	}
4282 
4283 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
4284 		amdgpu_pmu_fini(adev);
4285 	if (adev->discovery.bin)
4286 		amdgpu_discovery_fini(adev);
4287 
4288 	amdgpu_reset_put_reset_domain(adev->reset_domain);
4289 	adev->reset_domain = NULL;
4290 
4291 	kfree(adev->pci_state);
4292 	kfree(adev->pcie_reset_ctx.swds_pcistate);
4293 	kfree(adev->pcie_reset_ctx.swus_pcistate);
4294 }
4295 
4296 /**
4297  * amdgpu_device_evict_resources - evict device resources
4298  * @adev: amdgpu device object
4299  *
4300  * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4301  * of the vram memory type. Mainly used for evicting device resources
4302  * at suspend time.
4303  *
4304  */
4305 static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4306 {
4307 	int ret;
4308 
4309 	/* No need to evict vram on APUs unless going to S4 */
4310 	if (!adev->in_s4 && (adev->flags & AMD_IS_APU))
4311 		return 0;
4312 
4313 	/* No need to evict when going to S5 through S4 callbacks */
4314 	if (system_state == SYSTEM_POWER_OFF)
4315 		return 0;
4316 
4317 	ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4318 	if (ret) {
4319 		dev_warn(adev->dev, "evicting device resources failed\n");
4320 		return ret;
4321 	}
4322 
4323 	if (adev->in_s4) {
4324 		ret = ttm_device_prepare_hibernation(&adev->mman.bdev);
4325 		if (ret)
4326 			dev_err(adev->dev, "prepare hibernation failed, %d\n", ret);
4327 	}
4328 	return ret;
4329 }
4330 
4331 /*
4332  * Suspend & resume.
4333  */
4334 /**
4335  * amdgpu_device_pm_notifier - Notification block for Suspend/Hibernate events
4336  * @nb: notifier block
4337  * @mode: suspend mode
4338  * @data: data
4339  *
4340  * This function is called when the system is about to suspend or hibernate.
4341  * It is used to set the appropriate flags so that eviction can be optimized
4342  * in the pm prepare callback.
4343  */
4344 static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
4345 				     void *data)
4346 {
4347 	struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, pm_nb);
4348 
4349 	switch (mode) {
4350 	case PM_HIBERNATION_PREPARE:
4351 		adev->in_s4 = true;
4352 		break;
4353 	case PM_POST_HIBERNATION:
4354 		adev->in_s4 = false;
4355 		break;
4356 	}
4357 
4358 	return NOTIFY_DONE;
4359 }
4360 
4361 /**
4362  * amdgpu_device_prepare - prepare for device suspend
4363  *
4364  * @dev: drm dev pointer
4365  *
4366  * Prepare to put the hw in the suspend state (all asics).
4367  * Returns 0 for success or an error on failure.
4368  * Called at driver suspend.
4369  */
4370 int amdgpu_device_prepare(struct drm_device *dev)
4371 {
4372 	struct amdgpu_device *adev = drm_to_adev(dev);
4373 	int i, r;
4374 
4375 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4376 		return 0;
4377 
4378 	/* Evict the majority of BOs before starting suspend sequence */
4379 	r = amdgpu_device_evict_resources(adev);
4380 	if (r)
4381 		return r;
4382 
4383 	flush_delayed_work(&adev->gfx.gfx_off_delay_work);
4384 
4385 	for (i = 0; i < adev->num_ip_blocks; i++) {
4386 		if (!adev->ip_blocks[i].status.valid)
4387 			continue;
4388 		if (!adev->ip_blocks[i].version->funcs->prepare_suspend)
4389 			continue;
4390 		r = adev->ip_blocks[i].version->funcs->prepare_suspend(&adev->ip_blocks[i]);
4391 		if (r)
4392 			return r;
4393 	}
4394 
4395 	return 0;
4396 }
4397 
4398 /**
4399  * amdgpu_device_complete - complete power state transition
4400  *
4401  * @dev: drm dev pointer
4402  *
4403  * Undo the changes from amdgpu_device_prepare. This will be
4404  * called on all resume transitions, including those that failed.
4405  */
4406 void amdgpu_device_complete(struct drm_device *dev)
4407 {
4408 	struct amdgpu_device *adev = drm_to_adev(dev);
4409 	int i;
4410 
4411 	for (i = 0; i < adev->num_ip_blocks; i++) {
4412 		if (!adev->ip_blocks[i].status.valid)
4413 			continue;
4414 		if (!adev->ip_blocks[i].version->funcs->complete)
4415 			continue;
4416 		adev->ip_blocks[i].version->funcs->complete(&adev->ip_blocks[i]);
4417 	}
4418 }
4419 
4420 /**
4421  * amdgpu_device_suspend - initiate device suspend
4422  *
4423  * @dev: drm dev pointer
4424  * @notify_clients: notify in-kernel DRM clients
4425  *
4426  * Puts the hw in the suspend state (all asics).
4427  * Returns 0 for success or an error on failure.
4428  * Called at driver suspend.
4429  */
4430 int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
4431 {
4432 	struct amdgpu_device *adev = drm_to_adev(dev);
4433 	int r, rec;
4434 
4435 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4436 		return 0;
4437 
4438 	adev->in_suspend = true;
4439 
4440 	if (amdgpu_sriov_vf(adev)) {
4441 		if (!adev->in_runpm)
4442 			amdgpu_amdkfd_suspend_process(adev);
4443 		amdgpu_virt_fini_data_exchange(adev);
4444 		r = amdgpu_virt_request_full_gpu(adev, false);
4445 		if (r)
4446 			return r;
4447 	}
4448 
4449 	r = amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D3);
4450 	if (r)
4451 		goto unwind_sriov;
4452 
4453 	if (notify_clients)
4454 		drm_client_dev_suspend(adev_to_drm(adev));
4455 
4456 	cancel_delayed_work_sync(&adev->delayed_init_work);
4457 
4458 	amdgpu_ras_suspend(adev);
4459 
4460 	r = amdgpu_device_ip_suspend_phase1(adev);
4461 	if (r)
4462 		goto unwind_smartshift;
4463 
4464 	amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
4465 	r = amdgpu_userq_suspend(adev);
4466 	if (r)
4467 		goto unwind_ip_phase1;
4468 
4469 	r = amdgpu_device_evict_resources(adev);
4470 	if (r)
4471 		goto unwind_userq;
4472 
4473 	amdgpu_ttm_set_buffer_funcs_status(adev, false);
4474 
4475 	amdgpu_fence_driver_hw_fini(adev);
4476 
4477 	r = amdgpu_device_ip_suspend_phase2(adev);
4478 	if (r)
4479 		goto unwind_evict;
4480 
4481 	if (amdgpu_sriov_vf(adev))
4482 		amdgpu_virt_release_full_gpu(adev, false);
4483 
4484 	return 0;
4485 
4486 unwind_evict:
4487 	amdgpu_ttm_set_buffer_funcs_status(adev, true);
4488 	amdgpu_fence_driver_hw_init(adev);
4489 
4490 unwind_userq:
4491 	rec = amdgpu_userq_resume(adev);
4492 	if (rec) {
4493 		dev_warn(adev->dev, "failed to re-initialize user queues: %d\n", rec);
4494 		return r;
4495 	}
4496 	rec = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
4497 	if (rec) {
4498 		dev_warn(adev->dev, "failed to re-initialize kfd: %d\n", rec);
4499 		return r;
4500 	}
4501 
4502 unwind_ip_phase1:
4503 	/* suspend phase 1 = resume phase 3 */
4504 	rec = amdgpu_device_ip_resume_phase3(adev);
4505 	if (rec) {
4506 		dev_warn(adev->dev, "failed to re-initialize IPs phase1: %d\n", rec);
4507 		return r;
4508 	}
4509 
4510 unwind_smartshift:
4511 	rec = amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D0);
4512 	if (rec) {
4513 		dev_warn(adev->dev, "failed to re-update smart shift: %d\n", rec);
4514 		return r;
4515 	}
4516 
4517 	if (notify_clients)
4518 		drm_client_dev_resume(adev_to_drm(adev));
4519 
4520 	amdgpu_ras_resume(adev);
4521 
4522 unwind_sriov:
4523 	if (amdgpu_sriov_vf(adev)) {
4524 		rec = amdgpu_virt_request_full_gpu(adev, true);
4525 		if (rec) {
4526 			dev_warn(adev->dev, "failed to reinitialize sriov: %d\n", rec);
4527 			return r;
4528 		}
4529 	}
4530 
4531 	adev->in_suspend = adev->in_s0ix = adev->in_s3 = false;
4532 
4533 	return r;
4534 }
4535 
4536 static inline int amdgpu_virt_resume(struct amdgpu_device *adev)
4537 {
4538 	int r;
4539 	unsigned int prev_physical_node_id = adev->gmc.xgmi.physical_node_id;
4540 
4541 	/* During VM resume, QEMU programming of VF MSIX table (register GFXMSIX_VECT0_ADDR_LO)
4542 	 * may not work. The access could be blocked by nBIF protection as VF isn't in
4543 	 * exclusive access mode. Exclusive access is enabled now, disable/enable MSIX
4544 	 * so that QEMU reprograms MSIX table.
4545 	 */
4546 	amdgpu_restore_msix(adev);
4547 
4548 	r = adev->gfxhub.funcs->get_xgmi_info(adev);
4549 	if (r)
4550 		return r;
4551 
4552 	dev_info(adev->dev, "xgmi node, old id %d, new id %d\n",
4553 		prev_physical_node_id, adev->gmc.xgmi.physical_node_id);
4554 
4555 	adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
4556 	adev->vm_manager.vram_base_offset +=
4557 		adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
4558 
4559 	return 0;
4560 }
4561 
4562 /**
4563  * amdgpu_device_resume - initiate device resume
4564  *
4565  * @dev: drm dev pointer
4566  * @notify_clients: notify in-kernel DRM clients
4567  *
4568  * Bring the hw back to operating state (all asics).
4569  * Returns 0 for success or an error on failure.
4570  * Called at driver resume.
4571  */
4572 int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
4573 {
4574 	struct amdgpu_device *adev = drm_to_adev(dev);
4575 	int r = 0;
4576 
4577 	if (amdgpu_sriov_vf(adev)) {
4578 		r = amdgpu_virt_request_full_gpu(adev, true);
4579 		if (r)
4580 			return r;
4581 	}
4582 
4583 	if (amdgpu_virt_xgmi_migrate_enabled(adev)) {
4584 		r = amdgpu_virt_resume(adev);
4585 		if (r)
4586 			goto exit;
4587 	}
4588 
4589 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4590 		return 0;
4591 
4592 	if (adev->in_s0ix)
4593 		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4594 
4595 	/* post card */
4596 	if (amdgpu_device_need_post(adev)) {
4597 		r = amdgpu_device_asic_init(adev);
4598 		if (r)
4599 			dev_err(adev->dev, "amdgpu asic init failed\n");
4600 	}
4601 
4602 	r = amdgpu_device_ip_resume(adev);
4603 
4604 	if (r) {
4605 		dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4606 		goto exit;
4607 	}
4608 
4609 	r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
4610 	if (r)
4611 		goto exit;
4612 
4613 	r = amdgpu_userq_resume(adev);
4614 	if (r)
4615 		goto exit;
4616 
4617 	r = amdgpu_device_ip_late_init(adev);
4618 	if (r)
4619 		goto exit;
4620 
4621 	queue_delayed_work(system_wq, &adev->delayed_init_work,
4622 			   msecs_to_jiffies(AMDGPU_RESUME_MS));
4623 exit:
4624 	if (amdgpu_sriov_vf(adev)) {
4625 		amdgpu_virt_init_data_exchange(adev);
4626 		amdgpu_virt_release_full_gpu(adev, true);
4627 
4628 		if (!r && !adev->in_runpm)
4629 			r = amdgpu_amdkfd_resume_process(adev);
4630 	}
4631 
4632 	if (r)
4633 		return r;
4634 
4635 	/* Make sure IB tests flushed */
4636 	flush_delayed_work(&adev->delayed_init_work);
4637 
4638 	if (notify_clients)
4639 		drm_client_dev_resume(adev_to_drm(adev));
4640 
4641 	amdgpu_ras_resume(adev);
4642 
4643 	if (adev->mode_info.num_crtc) {
4644 		/*
4645 		 * Most of the connector probing functions try to acquire runtime pm
4646 		 * refs to ensure that the GPU is powered on when connector polling is
4647 		 * performed. Since we're calling this from a runtime PM callback,
4648 		 * trying to acquire rpm refs will cause us to deadlock.
4649 		 *
4650 		 * Since we're guaranteed to be holding the rpm lock, it's safe to
4651 		 * temporarily disable the rpm helpers so this doesn't deadlock us.
4652 		 */
4653 #ifdef CONFIG_PM
4654 		dev->dev->power.disable_depth++;
4655 #endif
4656 		if (!adev->dc_enabled)
4657 			drm_helper_hpd_irq_event(dev);
4658 		else
4659 			drm_kms_helper_hotplug_event(dev);
4660 #ifdef CONFIG_PM
4661 		dev->dev->power.disable_depth--;
4662 #endif
4663 	}
4664 
4665 	amdgpu_vram_mgr_clear_reset_blocks(adev);
4666 	adev->in_suspend = false;
4667 
4668 	if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D0))
4669 		dev_warn(adev->dev, "smart shift update failed\n");
4670 
4671 	return 0;
4672 }
4673 
4674 /**
4675  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4676  *
4677  * @adev: amdgpu_device pointer
4678  *
4679  * The list of all the hardware IPs that make up the asic is walked and
4680  * the check_soft_reset callbacks are run.  check_soft_reset determines
4681  * if the asic is still hung or not.
4682  * Returns true if any of the IPs are still in a hung state, false if not.
4683  */
4684 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4685 {
4686 	int i;
4687 	bool asic_hang = false;
4688 
4689 	if (amdgpu_sriov_vf(adev))
4690 		return true;
4691 
4692 	if (amdgpu_asic_need_full_reset(adev))
4693 		return true;
4694 
4695 	for (i = 0; i < adev->num_ip_blocks; i++) {
4696 		if (!adev->ip_blocks[i].status.valid)
4697 			continue;
4698 		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4699 			adev->ip_blocks[i].status.hang =
4700 				adev->ip_blocks[i].version->funcs->check_soft_reset(
4701 					&adev->ip_blocks[i]);
4702 		if (adev->ip_blocks[i].status.hang) {
4703 			dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4704 			asic_hang = true;
4705 		}
4706 	}
4707 	return asic_hang;
4708 }
4709 
4710 /**
4711  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4712  *
4713  * @adev: amdgpu_device pointer
4714  *
4715  * The list of all the hardware IPs that make up the asic is walked and the
4716  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4717  * handles any IP specific hardware or software state changes that are
4718  * necessary for a soft reset to succeed.
4719  * Returns 0 on success, negative error code on failure.
4720  */
4721 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4722 {
4723 	int i, r = 0;
4724 
4725 	for (i = 0; i < adev->num_ip_blocks; i++) {
4726 		if (!adev->ip_blocks[i].status.valid)
4727 			continue;
4728 		if (adev->ip_blocks[i].status.hang &&
4729 		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4730 			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(&adev->ip_blocks[i]);
4731 			if (r)
4732 				return r;
4733 		}
4734 	}
4735 
4736 	return 0;
4737 }
4738 
4739 /**
4740  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4741  *
4742  * @adev: amdgpu_device pointer
4743  *
4744  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4745  * reset is necessary to recover.
4746  * Returns true if a full asic reset is required, false if not.
4747  */
4748 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4749 {
4750 	int i;
4751 
4752 	if (amdgpu_asic_need_full_reset(adev))
4753 		return true;
4754 
4755 	for (i = 0; i < adev->num_ip_blocks; i++) {
4756 		if (!adev->ip_blocks[i].status.valid)
4757 			continue;
4758 		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4759 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4760 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4761 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4762 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4763 			if (adev->ip_blocks[i].status.hang) {
4764 				dev_info(adev->dev, "Some block need full reset!\n");
4765 				return true;
4766 			}
4767 		}
4768 	}
4769 	return false;
4770 }
4771 
4772 /**
4773  * amdgpu_device_ip_soft_reset - do a soft reset
4774  *
4775  * @adev: amdgpu_device pointer
4776  *
4777  * The list of all the hardware IPs that make up the asic is walked and the
4778  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4779  * IP specific hardware or software state changes that are necessary to soft
4780  * reset the IP.
4781  * Returns 0 on success, negative error code on failure.
4782  */
4783 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4784 {
4785 	int i, r = 0;
4786 
4787 	for (i = 0; i < adev->num_ip_blocks; i++) {
4788 		if (!adev->ip_blocks[i].status.valid)
4789 			continue;
4790 		if (adev->ip_blocks[i].status.hang &&
4791 		    adev->ip_blocks[i].version->funcs->soft_reset) {
4792 			r = adev->ip_blocks[i].version->funcs->soft_reset(&adev->ip_blocks[i]);
4793 			if (r)
4794 				return r;
4795 		}
4796 	}
4797 
4798 	return 0;
4799 }
4800 
4801 /**
4802  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4803  *
4804  * @adev: amdgpu_device pointer
4805  *
4806  * The list of all the hardware IPs that make up the asic is walked and the
4807  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4808  * handles any IP specific hardware or software state changes that are
4809  * necessary after the IP has been soft reset.
4810  * Returns 0 on success, negative error code on failure.
4811  */
4812 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4813 {
4814 	int i, r = 0;
4815 
4816 	for (i = 0; i < adev->num_ip_blocks; i++) {
4817 		if (!adev->ip_blocks[i].status.valid)
4818 			continue;
4819 		if (adev->ip_blocks[i].status.hang &&
4820 		    adev->ip_blocks[i].version->funcs->post_soft_reset)
4821 			r = adev->ip_blocks[i].version->funcs->post_soft_reset(&adev->ip_blocks[i]);
4822 		if (r)
4823 			return r;
4824 	}
4825 
4826 	return 0;
4827 }
4828 
4829 /**
4830  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4831  *
4832  * @adev: amdgpu_device pointer
4833  * @reset_context: amdgpu reset context pointer
4834  *
4835  * do VF FLR and reinitialize Asic
4836  * return 0 means succeeded otherwise failed
4837  */
4838 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4839 				     struct amdgpu_reset_context *reset_context)
4840 {
4841 	int r;
4842 	struct amdgpu_hive_info *hive = NULL;
4843 
4844 	if (test_bit(AMDGPU_HOST_FLR, &reset_context->flags)) {
4845 		if (!amdgpu_ras_get_fed_status(adev))
4846 			amdgpu_virt_ready_to_reset(adev);
4847 		amdgpu_virt_wait_reset(adev);
4848 		clear_bit(AMDGPU_HOST_FLR, &reset_context->flags);
4849 		r = amdgpu_virt_request_full_gpu(adev, true);
4850 	} else {
4851 		r = amdgpu_virt_reset_gpu(adev);
4852 	}
4853 	if (r)
4854 		return r;
4855 
4856 	amdgpu_ras_clear_err_state(adev);
4857 	amdgpu_irq_gpu_reset_resume_helper(adev);
4858 
4859 	/* some sw clean up VF needs to do before recover */
4860 	amdgpu_virt_post_reset(adev);
4861 
4862 	/* Resume IP prior to SMC */
4863 	r = amdgpu_device_ip_reinit_early_sriov(adev);
4864 	if (r)
4865 		return r;
4866 
4867 	amdgpu_virt_init_data_exchange(adev);
4868 
4869 	r = amdgpu_device_fw_loading(adev);
4870 	if (r)
4871 		return r;
4872 
4873 	/* now we are okay to resume SMC/CP/SDMA */
4874 	r = amdgpu_device_ip_reinit_late_sriov(adev);
4875 	if (r)
4876 		return r;
4877 
4878 	hive = amdgpu_get_xgmi_hive(adev);
4879 	/* Update PSP FW topology after reset */
4880 	if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4881 		r = amdgpu_xgmi_update_topology(hive, adev);
4882 	if (hive)
4883 		amdgpu_put_xgmi_hive(hive);
4884 	if (r)
4885 		return r;
4886 
4887 	r = amdgpu_ib_ring_tests(adev);
4888 	if (r)
4889 		return r;
4890 
4891 	if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST)
4892 		amdgpu_inc_vram_lost(adev);
4893 
4894 	/* need to be called during full access so we can't do it later like
4895 	 * bare-metal does.
4896 	 */
4897 	amdgpu_amdkfd_post_reset(adev);
4898 	amdgpu_virt_release_full_gpu(adev, true);
4899 
4900 	/* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */
4901 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
4902 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
4903 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
4904 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0) ||
4905 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3))
4906 		amdgpu_ras_resume(adev);
4907 
4908 	amdgpu_virt_ras_telemetry_post_reset(adev);
4909 
4910 	return 0;
4911 }
4912 
4913 /**
4914  * amdgpu_device_has_job_running - check if there is any unfinished job
4915  *
4916  * @adev: amdgpu_device pointer
4917  *
4918  * check if there is any job running on the device when guest driver receives
4919  * FLR notification from host driver. If there are still jobs running, then
4920  * the guest driver will not respond the FLR reset. Instead, let the job hit
4921  * the timeout and guest driver then issue the reset request.
4922  */
4923 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4924 {
4925 	int i;
4926 
4927 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4928 		struct amdgpu_ring *ring = adev->rings[i];
4929 
4930 		if (!amdgpu_ring_sched_ready(ring))
4931 			continue;
4932 
4933 		if (amdgpu_fence_count_emitted(ring))
4934 			return true;
4935 	}
4936 	return false;
4937 }
4938 
4939 /**
4940  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4941  *
4942  * @adev: amdgpu_device pointer
4943  *
4944  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4945  * a hung GPU.
4946  */
4947 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4948 {
4949 
4950 	if (amdgpu_gpu_recovery == 0)
4951 		goto disabled;
4952 
4953 	/* Skip soft reset check in fatal error mode */
4954 	if (!amdgpu_ras_is_poison_mode_supported(adev))
4955 		return true;
4956 
4957 	if (amdgpu_sriov_vf(adev))
4958 		return true;
4959 
4960 	if (amdgpu_gpu_recovery == -1) {
4961 		switch (adev->asic_type) {
4962 #ifdef CONFIG_DRM_AMDGPU_SI
4963 		case CHIP_VERDE:
4964 		case CHIP_TAHITI:
4965 		case CHIP_PITCAIRN:
4966 		case CHIP_OLAND:
4967 		case CHIP_HAINAN:
4968 #endif
4969 #ifdef CONFIG_DRM_AMDGPU_CIK
4970 		case CHIP_KAVERI:
4971 		case CHIP_KABINI:
4972 		case CHIP_MULLINS:
4973 #endif
4974 		case CHIP_CARRIZO:
4975 		case CHIP_STONEY:
4976 		case CHIP_CYAN_SKILLFISH:
4977 			goto disabled;
4978 		default:
4979 			break;
4980 		}
4981 	}
4982 
4983 	return true;
4984 
4985 disabled:
4986 		dev_info(adev->dev, "GPU recovery disabled.\n");
4987 		return false;
4988 }
4989 
4990 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4991 {
4992 	u32 i;
4993 	int ret = 0;
4994 
4995 	if (adev->bios)
4996 		amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4997 
4998 	dev_info(adev->dev, "GPU mode1 reset\n");
4999 
5000 	/* Cache the state before bus master disable. The saved config space
5001 	 * values are used in other cases like restore after mode-2 reset.
5002 	 */
5003 	amdgpu_device_cache_pci_state(adev->pdev);
5004 
5005 	/* disable BM */
5006 	pci_clear_master(adev->pdev);
5007 
5008 	if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
5009 		dev_info(adev->dev, "GPU smu mode1 reset\n");
5010 		ret = amdgpu_dpm_mode1_reset(adev);
5011 	} else {
5012 		dev_info(adev->dev, "GPU psp mode1 reset\n");
5013 		ret = psp_gpu_reset(adev);
5014 	}
5015 
5016 	if (ret)
5017 		goto mode1_reset_failed;
5018 
5019 	/* enable mmio access after mode 1 reset completed */
5020 	adev->no_hw_access = false;
5021 
5022 	/* ensure no_hw_access is updated before we access hw */
5023 	smp_mb();
5024 
5025 	amdgpu_device_load_pci_state(adev->pdev);
5026 	ret = amdgpu_psp_wait_for_bootloader(adev);
5027 	if (ret)
5028 		goto mode1_reset_failed;
5029 
5030 	/* wait for asic to come out of reset */
5031 	for (i = 0; i < adev->usec_timeout; i++) {
5032 		u32 memsize = adev->nbio.funcs->get_memsize(adev);
5033 
5034 		if (memsize != 0xffffffff)
5035 			break;
5036 		udelay(1);
5037 	}
5038 
5039 	if (i >= adev->usec_timeout) {
5040 		ret = -ETIMEDOUT;
5041 		goto mode1_reset_failed;
5042 	}
5043 
5044 	if (adev->bios)
5045 		amdgpu_atombios_scratch_regs_engine_hung(adev, false);
5046 
5047 	return 0;
5048 
5049 mode1_reset_failed:
5050 	dev_err(adev->dev, "GPU mode1 reset failed\n");
5051 	return ret;
5052 }
5053 
5054 int amdgpu_device_link_reset(struct amdgpu_device *adev)
5055 {
5056 	int ret = 0;
5057 
5058 	dev_info(adev->dev, "GPU link reset\n");
5059 
5060 	if (!amdgpu_reset_in_dpc(adev))
5061 		ret = amdgpu_dpm_link_reset(adev);
5062 
5063 	if (ret)
5064 		goto link_reset_failed;
5065 
5066 	ret = amdgpu_psp_wait_for_bootloader(adev);
5067 	if (ret)
5068 		goto link_reset_failed;
5069 
5070 	return 0;
5071 
5072 link_reset_failed:
5073 	dev_err(adev->dev, "GPU link reset failed\n");
5074 	return ret;
5075 }
5076 
5077 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
5078 				 struct amdgpu_reset_context *reset_context)
5079 {
5080 	int i, r = 0;
5081 	struct amdgpu_job *job = NULL;
5082 	struct amdgpu_device *tmp_adev = reset_context->reset_req_dev;
5083 	bool need_full_reset =
5084 		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5085 
5086 	if (reset_context->reset_req_dev == adev)
5087 		job = reset_context->job;
5088 
5089 	if (amdgpu_sriov_vf(adev))
5090 		amdgpu_virt_pre_reset(adev);
5091 
5092 	amdgpu_fence_driver_isr_toggle(adev, true);
5093 
5094 	/* block all schedulers and reset given job's ring */
5095 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5096 		struct amdgpu_ring *ring = adev->rings[i];
5097 
5098 		if (!amdgpu_ring_sched_ready(ring))
5099 			continue;
5100 
5101 		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
5102 		amdgpu_fence_driver_force_completion(ring);
5103 	}
5104 
5105 	amdgpu_fence_driver_isr_toggle(adev, false);
5106 
5107 	if (job && job->vm)
5108 		drm_sched_increase_karma(&job->base);
5109 
5110 	r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
5111 	/* If reset handler not implemented, continue; otherwise return */
5112 	if (r == -EOPNOTSUPP)
5113 		r = 0;
5114 	else
5115 		return r;
5116 
5117 	/* Don't suspend on bare metal if we are not going to HW reset the ASIC */
5118 	if (!amdgpu_sriov_vf(adev)) {
5119 
5120 		if (!need_full_reset)
5121 			need_full_reset = amdgpu_device_ip_need_full_reset(adev);
5122 
5123 		if (!need_full_reset && amdgpu_gpu_recovery &&
5124 		    amdgpu_device_ip_check_soft_reset(adev)) {
5125 			amdgpu_device_ip_pre_soft_reset(adev);
5126 			r = amdgpu_device_ip_soft_reset(adev);
5127 			amdgpu_device_ip_post_soft_reset(adev);
5128 			if (r || amdgpu_device_ip_check_soft_reset(adev)) {
5129 				dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
5130 				need_full_reset = true;
5131 			}
5132 		}
5133 
5134 		if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) {
5135 			dev_info(tmp_adev->dev, "Dumping IP State\n");
5136 			/* Trigger ip dump before we reset the asic */
5137 			for (i = 0; i < tmp_adev->num_ip_blocks; i++)
5138 				if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state)
5139 					tmp_adev->ip_blocks[i].version->funcs
5140 						->dump_ip_state((void *)&tmp_adev->ip_blocks[i]);
5141 			dev_info(tmp_adev->dev, "Dumping IP State Completed\n");
5142 		}
5143 
5144 		if (need_full_reset)
5145 			r = amdgpu_device_ip_suspend(adev);
5146 		if (need_full_reset)
5147 			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5148 		else
5149 			clear_bit(AMDGPU_NEED_FULL_RESET,
5150 				  &reset_context->flags);
5151 	}
5152 
5153 	return r;
5154 }
5155 
5156 int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
5157 {
5158 	struct list_head *device_list_handle;
5159 	bool full_reset, vram_lost = false;
5160 	struct amdgpu_device *tmp_adev;
5161 	int r, init_level;
5162 
5163 	device_list_handle = reset_context->reset_device_list;
5164 
5165 	if (!device_list_handle)
5166 		return -EINVAL;
5167 
5168 	full_reset = test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5169 
5170 	/**
5171 	 * If it's reset on init, it's default init level, otherwise keep level
5172 	 * as recovery level.
5173 	 */
5174 	if (reset_context->method == AMD_RESET_METHOD_ON_INIT)
5175 			init_level = AMDGPU_INIT_LEVEL_DEFAULT;
5176 	else
5177 			init_level = AMDGPU_INIT_LEVEL_RESET_RECOVERY;
5178 
5179 	r = 0;
5180 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5181 		amdgpu_set_init_level(tmp_adev, init_level);
5182 		if (full_reset) {
5183 			/* post card */
5184 			amdgpu_reset_set_dpc_status(tmp_adev, false);
5185 			amdgpu_ras_clear_err_state(tmp_adev);
5186 			r = amdgpu_device_asic_init(tmp_adev);
5187 			if (r) {
5188 				dev_warn(tmp_adev->dev, "asic atom init failed!");
5189 			} else {
5190 				dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
5191 
5192 				r = amdgpu_device_ip_resume_phase1(tmp_adev);
5193 				if (r)
5194 					goto out;
5195 
5196 				vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
5197 
5198 				if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags))
5199 					amdgpu_coredump(tmp_adev, false, vram_lost, reset_context->job);
5200 
5201 				if (vram_lost) {
5202 					dev_info(
5203 						tmp_adev->dev,
5204 						"VRAM is lost due to GPU reset!\n");
5205 					amdgpu_inc_vram_lost(tmp_adev);
5206 				}
5207 
5208 				r = amdgpu_device_fw_loading(tmp_adev);
5209 				if (r)
5210 					return r;
5211 
5212 				r = amdgpu_xcp_restore_partition_mode(
5213 					tmp_adev->xcp_mgr);
5214 				if (r)
5215 					goto out;
5216 
5217 				r = amdgpu_device_ip_resume_phase2(tmp_adev);
5218 				if (r)
5219 					goto out;
5220 
5221 				amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true);
5222 
5223 				r = amdgpu_device_ip_resume_phase3(tmp_adev);
5224 				if (r)
5225 					goto out;
5226 
5227 				if (vram_lost)
5228 					amdgpu_device_fill_reset_magic(tmp_adev);
5229 
5230 				/*
5231 				 * Add this ASIC as tracked as reset was already
5232 				 * complete successfully.
5233 				 */
5234 				amdgpu_register_gpu_instance(tmp_adev);
5235 
5236 				if (!reset_context->hive &&
5237 				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5238 					amdgpu_xgmi_add_device(tmp_adev);
5239 
5240 				r = amdgpu_device_ip_late_init(tmp_adev);
5241 				if (r)
5242 					goto out;
5243 
5244 				r = amdgpu_userq_post_reset(tmp_adev, vram_lost);
5245 				if (r)
5246 					goto out;
5247 
5248 				drm_client_dev_resume(adev_to_drm(tmp_adev));
5249 
5250 				/*
5251 				 * The GPU enters bad state once faulty pages
5252 				 * by ECC has reached the threshold, and ras
5253 				 * recovery is scheduled next. So add one check
5254 				 * here to break recovery if it indeed exceeds
5255 				 * bad page threshold, and remind user to
5256 				 * retire this GPU or setting one bigger
5257 				 * bad_page_threshold value to fix this once
5258 				 * probing driver again.
5259 				 */
5260 				if (!amdgpu_ras_is_rma(tmp_adev)) {
5261 					/* must succeed. */
5262 					amdgpu_ras_resume(tmp_adev);
5263 				} else {
5264 					r = -EINVAL;
5265 					goto out;
5266 				}
5267 
5268 				/* Update PSP FW topology after reset */
5269 				if (reset_context->hive &&
5270 				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5271 					r = amdgpu_xgmi_update_topology(
5272 						reset_context->hive, tmp_adev);
5273 			}
5274 		}
5275 
5276 out:
5277 		if (!r) {
5278 			/* IP init is complete now, set level as default */
5279 			amdgpu_set_init_level(tmp_adev,
5280 					      AMDGPU_INIT_LEVEL_DEFAULT);
5281 			amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
5282 			r = amdgpu_ib_ring_tests(tmp_adev);
5283 			if (r) {
5284 				dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
5285 				r = -EAGAIN;
5286 				goto end;
5287 			}
5288 		}
5289 
5290 		if (r)
5291 			tmp_adev->asic_reset_res = r;
5292 	}
5293 
5294 end:
5295 	return r;
5296 }
5297 
5298 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
5299 			 struct amdgpu_reset_context *reset_context)
5300 {
5301 	struct amdgpu_device *tmp_adev = NULL;
5302 	bool need_full_reset, skip_hw_reset;
5303 	int r = 0;
5304 
5305 	/* Try reset handler method first */
5306 	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5307 				    reset_list);
5308 
5309 	reset_context->reset_device_list = device_list_handle;
5310 	r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
5311 	/* If reset handler not implemented, continue; otherwise return */
5312 	if (r == -EOPNOTSUPP)
5313 		r = 0;
5314 	else
5315 		return r;
5316 
5317 	/* Reset handler not implemented, use the default method */
5318 	need_full_reset =
5319 		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5320 	skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
5321 
5322 	/*
5323 	 * ASIC reset has to be done on all XGMI hive nodes ASAP
5324 	 * to allow proper links negotiation in FW (within 1 sec)
5325 	 */
5326 	if (!skip_hw_reset && need_full_reset) {
5327 		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5328 			/* For XGMI run all resets in parallel to speed up the process */
5329 			if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5330 				if (!queue_work(system_unbound_wq,
5331 						&tmp_adev->xgmi_reset_work))
5332 					r = -EALREADY;
5333 			} else
5334 				r = amdgpu_asic_reset(tmp_adev);
5335 
5336 			if (r) {
5337 				dev_err(tmp_adev->dev,
5338 					"ASIC reset failed with error, %d for drm dev, %s",
5339 					r, adev_to_drm(tmp_adev)->unique);
5340 				goto out;
5341 			}
5342 		}
5343 
5344 		/* For XGMI wait for all resets to complete before proceed */
5345 		if (!r) {
5346 			list_for_each_entry(tmp_adev, device_list_handle,
5347 					    reset_list) {
5348 				if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5349 					flush_work(&tmp_adev->xgmi_reset_work);
5350 					r = tmp_adev->asic_reset_res;
5351 					if (r)
5352 						break;
5353 				}
5354 			}
5355 		}
5356 	}
5357 
5358 	if (!r && amdgpu_ras_intr_triggered()) {
5359 		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5360 			amdgpu_ras_reset_error_count(tmp_adev,
5361 						     AMDGPU_RAS_BLOCK__MMHUB);
5362 		}
5363 
5364 		amdgpu_ras_intr_cleared();
5365 	}
5366 
5367 	r = amdgpu_device_reinit_after_reset(reset_context);
5368 	if (r == -EAGAIN)
5369 		set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5370 	else
5371 		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5372 
5373 out:
5374 	return r;
5375 }
5376 
5377 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
5378 {
5379 
5380 	switch (amdgpu_asic_reset_method(adev)) {
5381 	case AMD_RESET_METHOD_MODE1:
5382 	case AMD_RESET_METHOD_LINK:
5383 		adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5384 		break;
5385 	case AMD_RESET_METHOD_MODE2:
5386 		adev->mp1_state = PP_MP1_STATE_RESET;
5387 		break;
5388 	default:
5389 		adev->mp1_state = PP_MP1_STATE_NONE;
5390 		break;
5391 	}
5392 }
5393 
5394 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5395 {
5396 	amdgpu_vf_error_trans_all(adev);
5397 	adev->mp1_state = PP_MP1_STATE_NONE;
5398 }
5399 
5400 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5401 {
5402 	struct pci_dev *p = NULL;
5403 
5404 	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5405 			adev->pdev->bus->number, 1);
5406 	if (p) {
5407 		pm_runtime_enable(&(p->dev));
5408 		pm_runtime_resume(&(p->dev));
5409 	}
5410 
5411 	pci_dev_put(p);
5412 }
5413 
5414 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5415 {
5416 	enum amd_reset_method reset_method;
5417 	struct pci_dev *p = NULL;
5418 	u64 expires;
5419 
5420 	/*
5421 	 * For now, only BACO and mode1 reset are confirmed
5422 	 * to suffer the audio issue without proper suspended.
5423 	 */
5424 	reset_method = amdgpu_asic_reset_method(adev);
5425 	if ((reset_method != AMD_RESET_METHOD_BACO) &&
5426 	     (reset_method != AMD_RESET_METHOD_MODE1))
5427 		return -EINVAL;
5428 
5429 	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5430 			adev->pdev->bus->number, 1);
5431 	if (!p)
5432 		return -ENODEV;
5433 
5434 	expires = pm_runtime_autosuspend_expiration(&(p->dev));
5435 	if (!expires)
5436 		/*
5437 		 * If we cannot get the audio device autosuspend delay,
5438 		 * a fixed 4S interval will be used. Considering 3S is
5439 		 * the audio controller default autosuspend delay setting.
5440 		 * 4S used here is guaranteed to cover that.
5441 		 */
5442 		expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5443 
5444 	while (!pm_runtime_status_suspended(&(p->dev))) {
5445 		if (!pm_runtime_suspend(&(p->dev)))
5446 			break;
5447 
5448 		if (expires < ktime_get_mono_fast_ns()) {
5449 			dev_warn(adev->dev, "failed to suspend display audio\n");
5450 			pci_dev_put(p);
5451 			/* TODO: abort the succeeding gpu reset? */
5452 			return -ETIMEDOUT;
5453 		}
5454 	}
5455 
5456 	pm_runtime_disable(&(p->dev));
5457 
5458 	pci_dev_put(p);
5459 	return 0;
5460 }
5461 
5462 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5463 {
5464 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5465 
5466 #if defined(CONFIG_DEBUG_FS)
5467 	if (!amdgpu_sriov_vf(adev))
5468 		cancel_work(&adev->reset_work);
5469 #endif
5470 	cancel_work(&adev->userq_reset_work);
5471 
5472 	if (adev->kfd.dev)
5473 		cancel_work(&adev->kfd.reset_work);
5474 
5475 	if (amdgpu_sriov_vf(adev))
5476 		cancel_work(&adev->virt.flr_work);
5477 
5478 	if (con && adev->ras_enabled)
5479 		cancel_work(&con->recovery_work);
5480 
5481 }
5482 
5483 static int amdgpu_device_health_check(struct list_head *device_list_handle)
5484 {
5485 	struct amdgpu_device *tmp_adev;
5486 	int ret = 0;
5487 
5488 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5489 		ret |= amdgpu_device_bus_status_check(tmp_adev);
5490 	}
5491 
5492 	return ret;
5493 }
5494 
5495 static void amdgpu_device_recovery_prepare(struct amdgpu_device *adev,
5496 					  struct list_head *device_list,
5497 					  struct amdgpu_hive_info *hive)
5498 {
5499 	struct amdgpu_device *tmp_adev = NULL;
5500 
5501 	/*
5502 	 * Build list of devices to reset.
5503 	 * In case we are in XGMI hive mode, resort the device list
5504 	 * to put adev in the 1st position.
5505 	 */
5506 	if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
5507 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5508 			list_add_tail(&tmp_adev->reset_list, device_list);
5509 			if (adev->shutdown)
5510 				tmp_adev->shutdown = true;
5511 			if (amdgpu_reset_in_dpc(adev))
5512 				tmp_adev->pcie_reset_ctx.in_link_reset = true;
5513 		}
5514 		if (!list_is_first(&adev->reset_list, device_list))
5515 			list_rotate_to_front(&adev->reset_list, device_list);
5516 	} else {
5517 		list_add_tail(&adev->reset_list, device_list);
5518 	}
5519 }
5520 
5521 static void amdgpu_device_recovery_get_reset_lock(struct amdgpu_device *adev,
5522 						  struct list_head *device_list)
5523 {
5524 	struct amdgpu_device *tmp_adev = NULL;
5525 
5526 	if (list_empty(device_list))
5527 		return;
5528 	tmp_adev =
5529 		list_first_entry(device_list, struct amdgpu_device, reset_list);
5530 	amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5531 }
5532 
5533 static void amdgpu_device_recovery_put_reset_lock(struct amdgpu_device *adev,
5534 						  struct list_head *device_list)
5535 {
5536 	struct amdgpu_device *tmp_adev = NULL;
5537 
5538 	if (list_empty(device_list))
5539 		return;
5540 	tmp_adev =
5541 		list_first_entry(device_list, struct amdgpu_device, reset_list);
5542 	amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5543 }
5544 
5545 static void amdgpu_device_halt_activities(struct amdgpu_device *adev,
5546 					  struct amdgpu_job *job,
5547 					  struct amdgpu_reset_context *reset_context,
5548 					  struct list_head *device_list,
5549 					  struct amdgpu_hive_info *hive,
5550 					  bool need_emergency_restart)
5551 {
5552 	struct amdgpu_device *tmp_adev = NULL;
5553 	int i;
5554 
5555 	/* block all schedulers and reset given job's ring */
5556 	list_for_each_entry(tmp_adev, device_list, reset_list) {
5557 		amdgpu_device_set_mp1_state(tmp_adev);
5558 
5559 		/*
5560 		 * Try to put the audio codec into suspend state
5561 		 * before gpu reset started.
5562 		 *
5563 		 * Due to the power domain of the graphics device
5564 		 * is shared with AZ power domain. Without this,
5565 		 * we may change the audio hardware from behind
5566 		 * the audio driver's back. That will trigger
5567 		 * some audio codec errors.
5568 		 */
5569 		if (!amdgpu_device_suspend_display_audio(tmp_adev))
5570 			tmp_adev->pcie_reset_ctx.audio_suspended = true;
5571 
5572 		amdgpu_ras_set_error_query_ready(tmp_adev, false);
5573 
5574 		cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5575 
5576 		amdgpu_amdkfd_pre_reset(tmp_adev, reset_context);
5577 
5578 		/*
5579 		 * Mark these ASICs to be reset as untracked first
5580 		 * And add them back after reset completed
5581 		 */
5582 		amdgpu_unregister_gpu_instance(tmp_adev);
5583 
5584 		drm_client_dev_suspend(adev_to_drm(tmp_adev));
5585 
5586 		/* disable ras on ALL IPs */
5587 		if (!need_emergency_restart && !amdgpu_reset_in_dpc(adev) &&
5588 		    amdgpu_device_ip_need_full_reset(tmp_adev))
5589 			amdgpu_ras_suspend(tmp_adev);
5590 
5591 		amdgpu_userq_pre_reset(tmp_adev);
5592 
5593 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5594 			struct amdgpu_ring *ring = tmp_adev->rings[i];
5595 
5596 			if (!amdgpu_ring_sched_ready(ring))
5597 				continue;
5598 
5599 			drm_sched_wqueue_stop(&ring->sched);
5600 
5601 			if (need_emergency_restart)
5602 				amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5603 		}
5604 		atomic_inc(&tmp_adev->gpu_reset_counter);
5605 	}
5606 }
5607 
5608 static int amdgpu_device_asic_reset(struct amdgpu_device *adev,
5609 			      struct list_head *device_list,
5610 			      struct amdgpu_reset_context *reset_context)
5611 {
5612 	struct amdgpu_device *tmp_adev = NULL;
5613 	int retry_limit = AMDGPU_MAX_RETRY_LIMIT;
5614 	int r = 0;
5615 
5616 retry:	/* Rest of adevs pre asic reset from XGMI hive. */
5617 	list_for_each_entry(tmp_adev, device_list, reset_list) {
5618 		r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5619 		/*TODO Should we stop ?*/
5620 		if (r) {
5621 			dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5622 				  r, adev_to_drm(tmp_adev)->unique);
5623 			tmp_adev->asic_reset_res = r;
5624 		}
5625 	}
5626 
5627 	/* Actual ASIC resets if needed.*/
5628 	/* Host driver will handle XGMI hive reset for SRIOV */
5629 	if (amdgpu_sriov_vf(adev)) {
5630 
5631 		/* Bail out of reset early */
5632 		if (amdgpu_ras_is_rma(adev))
5633 			return -ENODEV;
5634 
5635 		if (amdgpu_ras_get_fed_status(adev) || amdgpu_virt_rcvd_ras_interrupt(adev)) {
5636 			dev_dbg(adev->dev, "Detected RAS error, wait for FLR completion\n");
5637 			amdgpu_ras_set_fed(adev, true);
5638 			set_bit(AMDGPU_HOST_FLR, &reset_context->flags);
5639 		}
5640 
5641 		r = amdgpu_device_reset_sriov(adev, reset_context);
5642 		if (AMDGPU_RETRY_SRIOV_RESET(r) && (retry_limit--) > 0) {
5643 			amdgpu_virt_release_full_gpu(adev, true);
5644 			goto retry;
5645 		}
5646 		if (r)
5647 			adev->asic_reset_res = r;
5648 	} else {
5649 		r = amdgpu_do_asic_reset(device_list, reset_context);
5650 		if (r && r == -EAGAIN)
5651 			goto retry;
5652 	}
5653 
5654 	list_for_each_entry(tmp_adev, device_list, reset_list) {
5655 		/*
5656 		 * Drop any pending non scheduler resets queued before reset is done.
5657 		 * Any reset scheduled after this point would be valid. Scheduler resets
5658 		 * were already dropped during drm_sched_stop and no new ones can come
5659 		 * in before drm_sched_start.
5660 		 */
5661 		amdgpu_device_stop_pending_resets(tmp_adev);
5662 	}
5663 
5664 	return r;
5665 }
5666 
5667 static int amdgpu_device_sched_resume(struct list_head *device_list,
5668 			      struct amdgpu_reset_context *reset_context,
5669 			      bool   job_signaled)
5670 {
5671 	struct amdgpu_device *tmp_adev = NULL;
5672 	int i, r = 0;
5673 
5674 	/* Post ASIC reset for all devs .*/
5675 	list_for_each_entry(tmp_adev, device_list, reset_list) {
5676 
5677 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5678 			struct amdgpu_ring *ring = tmp_adev->rings[i];
5679 
5680 			if (!amdgpu_ring_sched_ready(ring))
5681 				continue;
5682 
5683 			drm_sched_wqueue_start(&ring->sched);
5684 		}
5685 
5686 		if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
5687 			drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5688 
5689 		if (tmp_adev->asic_reset_res) {
5690 			/* bad news, how to tell it to userspace ?
5691 			 * for ras error, we should report GPU bad status instead of
5692 			 * reset failure
5693 			 */
5694 			if (reset_context->src != AMDGPU_RESET_SRC_RAS ||
5695 			    !amdgpu_ras_eeprom_check_err_threshold(tmp_adev))
5696 				dev_info(
5697 					tmp_adev->dev,
5698 					"GPU reset(%d) failed with error %d\n",
5699 					atomic_read(
5700 						&tmp_adev->gpu_reset_counter),
5701 					tmp_adev->asic_reset_res);
5702 			amdgpu_vf_error_put(tmp_adev,
5703 					    AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0,
5704 					    tmp_adev->asic_reset_res);
5705 			if (!r)
5706 				r = tmp_adev->asic_reset_res;
5707 			tmp_adev->asic_reset_res = 0;
5708 		} else {
5709 			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n",
5710 				 atomic_read(&tmp_adev->gpu_reset_counter));
5711 			if (amdgpu_acpi_smart_shift_update(tmp_adev,
5712 							   AMDGPU_SS_DEV_D0))
5713 				dev_warn(tmp_adev->dev,
5714 					 "smart shift update failed\n");
5715 		}
5716 	}
5717 
5718 	return r;
5719 }
5720 
5721 static void amdgpu_device_gpu_resume(struct amdgpu_device *adev,
5722 			      struct list_head *device_list,
5723 			      bool   need_emergency_restart)
5724 {
5725 	struct amdgpu_device *tmp_adev = NULL;
5726 
5727 	list_for_each_entry(tmp_adev, device_list, reset_list) {
5728 		/* unlock kfd: SRIOV would do it separately */
5729 		if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5730 			amdgpu_amdkfd_post_reset(tmp_adev);
5731 
5732 		/* kfd_post_reset will do nothing if kfd device is not initialized,
5733 		 * need to bring up kfd here if it's not be initialized before
5734 		 */
5735 		if (!adev->kfd.init_complete)
5736 			amdgpu_amdkfd_device_init(adev);
5737 
5738 		if (tmp_adev->pcie_reset_ctx.audio_suspended)
5739 			amdgpu_device_resume_display_audio(tmp_adev);
5740 
5741 		amdgpu_device_unset_mp1_state(tmp_adev);
5742 
5743 		amdgpu_ras_set_error_query_ready(tmp_adev, true);
5744 
5745 	}
5746 }
5747 
5748 
5749 /**
5750  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5751  *
5752  * @adev: amdgpu_device pointer
5753  * @job: which job trigger hang
5754  * @reset_context: amdgpu reset context pointer
5755  *
5756  * Attempt to reset the GPU if it has hung (all asics).
5757  * Attempt to do soft-reset or full-reset and reinitialize Asic
5758  * Returns 0 for success or an error on failure.
5759  */
5760 
5761 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5762 			      struct amdgpu_job *job,
5763 			      struct amdgpu_reset_context *reset_context)
5764 {
5765 	struct list_head device_list;
5766 	bool job_signaled = false;
5767 	struct amdgpu_hive_info *hive = NULL;
5768 	int r = 0;
5769 	bool need_emergency_restart = false;
5770 	/* save the pasid here as the job may be freed before the end of the reset */
5771 	int pasid = job ? job->pasid : -EINVAL;
5772 
5773 	/*
5774 	 * If it reaches here because of hang/timeout and a RAS error is
5775 	 * detected at the same time, let RAS recovery take care of it.
5776 	 */
5777 	if (amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY) &&
5778 	    !amdgpu_sriov_vf(adev) &&
5779 	    reset_context->src != AMDGPU_RESET_SRC_RAS) {
5780 		dev_dbg(adev->dev,
5781 			"Gpu recovery from source: %d yielding to RAS error recovery handling",
5782 			reset_context->src);
5783 		return 0;
5784 	}
5785 
5786 	/*
5787 	 * Special case: RAS triggered and full reset isn't supported
5788 	 */
5789 	need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5790 
5791 	/*
5792 	 * Flush RAM to disk so that after reboot
5793 	 * the user can read log and see why the system rebooted.
5794 	 */
5795 	if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
5796 		amdgpu_ras_get_context(adev)->reboot) {
5797 		dev_warn(adev->dev, "Emergency reboot.");
5798 
5799 		ksys_sync_helper();
5800 		emergency_restart();
5801 	}
5802 
5803 	dev_info(adev->dev, "GPU %s begin!. Source:  %d\n",
5804 		 need_emergency_restart ? "jobs stop" : "reset",
5805 		 reset_context->src);
5806 
5807 	if (!amdgpu_sriov_vf(adev))
5808 		hive = amdgpu_get_xgmi_hive(adev);
5809 	if (hive)
5810 		mutex_lock(&hive->hive_lock);
5811 
5812 	reset_context->job = job;
5813 	reset_context->hive = hive;
5814 	INIT_LIST_HEAD(&device_list);
5815 
5816 	amdgpu_device_recovery_prepare(adev, &device_list, hive);
5817 
5818 	if (!amdgpu_sriov_vf(adev)) {
5819 		r = amdgpu_device_health_check(&device_list);
5820 		if (r)
5821 			goto end_reset;
5822 	}
5823 
5824 	/* Cannot be called after locking reset domain */
5825 	amdgpu_ras_pre_reset(adev, &device_list);
5826 
5827 	/* We need to lock reset domain only once both for XGMI and single device */
5828 	amdgpu_device_recovery_get_reset_lock(adev, &device_list);
5829 
5830 	amdgpu_device_halt_activities(adev, job, reset_context, &device_list,
5831 				      hive, need_emergency_restart);
5832 	if (need_emergency_restart)
5833 		goto skip_sched_resume;
5834 	/*
5835 	 * Must check guilty signal here since after this point all old
5836 	 * HW fences are force signaled.
5837 	 *
5838 	 * job->base holds a reference to parent fence
5839 	 */
5840 	if (job && (dma_fence_get_status(&job->hw_fence->base) > 0)) {
5841 		job_signaled = true;
5842 		dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5843 		goto skip_hw_reset;
5844 	}
5845 
5846 	r = amdgpu_device_asic_reset(adev, &device_list, reset_context);
5847 	if (r)
5848 		goto reset_unlock;
5849 skip_hw_reset:
5850 	r = amdgpu_device_sched_resume(&device_list, reset_context, job_signaled);
5851 	if (r)
5852 		goto reset_unlock;
5853 skip_sched_resume:
5854 	amdgpu_device_gpu_resume(adev, &device_list, need_emergency_restart);
5855 reset_unlock:
5856 	amdgpu_device_recovery_put_reset_lock(adev, &device_list);
5857 	amdgpu_ras_post_reset(adev, &device_list);
5858 end_reset:
5859 	if (hive) {
5860 		mutex_unlock(&hive->hive_lock);
5861 		amdgpu_put_xgmi_hive(hive);
5862 	}
5863 
5864 	if (r)
5865 		dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5866 
5867 	atomic_set(&adev->reset_domain->reset_res, r);
5868 
5869 	if (!r) {
5870 		struct amdgpu_task_info *ti = NULL;
5871 
5872 		/*
5873 		 * The job may already be freed at this point via the sched tdr workqueue so
5874 		 * use the cached pasid.
5875 		 */
5876 		if (pasid >= 0)
5877 			ti = amdgpu_vm_get_task_info_pasid(adev, pasid);
5878 
5879 		drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE,
5880 				     ti ? &ti->task : NULL);
5881 
5882 		amdgpu_vm_put_task_info(ti);
5883 	}
5884 
5885 	return r;
5886 }
5887 
5888 /**
5889  * amdgpu_device_partner_bandwidth - find the bandwidth of appropriate partner
5890  *
5891  * @adev: amdgpu_device pointer
5892  * @speed: pointer to the speed of the link
5893  * @width: pointer to the width of the link
5894  *
5895  * Evaluate the hierarchy to find the speed and bandwidth capabilities of the
5896  * first physical partner to an AMD dGPU.
5897  * This will exclude any virtual switches and links.
5898  */
5899 static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
5900 					    enum pci_bus_speed *speed,
5901 					    enum pcie_link_width *width)
5902 {
5903 	struct pci_dev *parent = adev->pdev;
5904 
5905 	if (!speed || !width)
5906 		return;
5907 
5908 	*speed = PCI_SPEED_UNKNOWN;
5909 	*width = PCIE_LNK_WIDTH_UNKNOWN;
5910 
5911 	if (amdgpu_device_pcie_dynamic_switching_supported(adev)) {
5912 		while ((parent = pci_upstream_bridge(parent))) {
5913 			/* skip upstream/downstream switches internal to dGPU*/
5914 			if (parent->vendor == PCI_VENDOR_ID_ATI)
5915 				continue;
5916 			*speed = pcie_get_speed_cap(parent);
5917 			*width = pcie_get_width_cap(parent);
5918 			break;
5919 		}
5920 	} else {
5921 		/* use the current speeds rather than max if switching is not supported */
5922 		pcie_bandwidth_available(adev->pdev, NULL, speed, width);
5923 	}
5924 }
5925 
5926 /**
5927  * amdgpu_device_gpu_bandwidth - find the bandwidth of the GPU
5928  *
5929  * @adev: amdgpu_device pointer
5930  * @speed: pointer to the speed of the link
5931  * @width: pointer to the width of the link
5932  *
5933  * Evaluate the hierarchy to find the speed and bandwidth capabilities of the
5934  * AMD dGPU which may be a virtual upstream bridge.
5935  */
5936 static void amdgpu_device_gpu_bandwidth(struct amdgpu_device *adev,
5937 					enum pci_bus_speed *speed,
5938 					enum pcie_link_width *width)
5939 {
5940 	struct pci_dev *parent = adev->pdev;
5941 
5942 	if (!speed || !width)
5943 		return;
5944 
5945 	parent = pci_upstream_bridge(parent);
5946 	if (parent && parent->vendor == PCI_VENDOR_ID_ATI) {
5947 		/* use the upstream/downstream switches internal to dGPU */
5948 		*speed = pcie_get_speed_cap(parent);
5949 		*width = pcie_get_width_cap(parent);
5950 		while ((parent = pci_upstream_bridge(parent))) {
5951 			if (parent->vendor == PCI_VENDOR_ID_ATI) {
5952 				/* use the upstream/downstream switches internal to dGPU */
5953 				*speed = pcie_get_speed_cap(parent);
5954 				*width = pcie_get_width_cap(parent);
5955 			}
5956 		}
5957 	} else {
5958 		/* use the device itself */
5959 		*speed = pcie_get_speed_cap(adev->pdev);
5960 		*width = pcie_get_width_cap(adev->pdev);
5961 	}
5962 }
5963 
5964 /**
5965  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5966  *
5967  * @adev: amdgpu_device pointer
5968  *
5969  * Fetches and stores in the driver the PCIE capabilities (gen speed
5970  * and lanes) of the slot the device is in. Handles APUs and
5971  * virtualized environments where PCIE config space may not be available.
5972  */
5973 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5974 {
5975 	enum pci_bus_speed speed_cap, platform_speed_cap;
5976 	enum pcie_link_width platform_link_width, link_width;
5977 
5978 	if (amdgpu_pcie_gen_cap)
5979 		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5980 
5981 	if (amdgpu_pcie_lane_cap)
5982 		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5983 
5984 	/* covers APUs as well */
5985 	if (pci_is_root_bus(adev->pdev->bus) && !amdgpu_passthrough(adev)) {
5986 		if (adev->pm.pcie_gen_mask == 0)
5987 			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5988 		if (adev->pm.pcie_mlw_mask == 0)
5989 			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5990 		return;
5991 	}
5992 
5993 	if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5994 		return;
5995 
5996 	amdgpu_device_partner_bandwidth(adev, &platform_speed_cap,
5997 					&platform_link_width);
5998 	amdgpu_device_gpu_bandwidth(adev, &speed_cap, &link_width);
5999 
6000 	if (adev->pm.pcie_gen_mask == 0) {
6001 		/* asic caps */
6002 		if (speed_cap == PCI_SPEED_UNKNOWN) {
6003 			adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6004 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6005 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
6006 		} else {
6007 			if (speed_cap == PCIE_SPEED_32_0GT)
6008 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6009 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6010 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
6011 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
6012 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
6013 			else if (speed_cap == PCIE_SPEED_16_0GT)
6014 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6015 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6016 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
6017 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
6018 			else if (speed_cap == PCIE_SPEED_8_0GT)
6019 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6020 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6021 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
6022 			else if (speed_cap == PCIE_SPEED_5_0GT)
6023 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6024 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
6025 			else
6026 				adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
6027 		}
6028 		/* platform caps */
6029 		if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
6030 			adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6031 						   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
6032 		} else {
6033 			if (platform_speed_cap == PCIE_SPEED_32_0GT)
6034 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6035 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6036 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
6037 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
6038 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
6039 			else if (platform_speed_cap == PCIE_SPEED_16_0GT)
6040 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6041 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6042 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
6043 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
6044 			else if (platform_speed_cap == PCIE_SPEED_8_0GT)
6045 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6046 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6047 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
6048 			else if (platform_speed_cap == PCIE_SPEED_5_0GT)
6049 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6050 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
6051 			else
6052 				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
6053 
6054 		}
6055 	}
6056 	if (adev->pm.pcie_mlw_mask == 0) {
6057 		/* asic caps */
6058 		if (link_width == PCIE_LNK_WIDTH_UNKNOWN) {
6059 			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_ASIC_PCIE_MLW_MASK;
6060 		} else {
6061 			switch (link_width) {
6062 			case PCIE_LNK_X32:
6063 				adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X32 |
6064 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 |
6065 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
6066 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
6067 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
6068 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6069 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
6070 				break;
6071 			case PCIE_LNK_X16:
6072 				adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 |
6073 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
6074 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
6075 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
6076 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6077 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
6078 				break;
6079 			case PCIE_LNK_X12:
6080 				adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
6081 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
6082 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
6083 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6084 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
6085 				break;
6086 			case PCIE_LNK_X8:
6087 				adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
6088 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
6089 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6090 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
6091 				break;
6092 			case PCIE_LNK_X4:
6093 				adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
6094 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6095 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
6096 				break;
6097 			case PCIE_LNK_X2:
6098 				adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6099 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
6100 				break;
6101 			case PCIE_LNK_X1:
6102 				adev->pm.pcie_mlw_mask |= CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1;
6103 				break;
6104 			default:
6105 				break;
6106 			}
6107 		}
6108 		/* platform caps */
6109 		if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
6110 			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
6111 		} else {
6112 			switch (platform_link_width) {
6113 			case PCIE_LNK_X32:
6114 				adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
6115 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
6116 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
6117 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
6118 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6119 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6120 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6121 				break;
6122 			case PCIE_LNK_X16:
6123 				adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
6124 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
6125 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
6126 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6127 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6128 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6129 				break;
6130 			case PCIE_LNK_X12:
6131 				adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
6132 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
6133 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6134 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6135 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6136 				break;
6137 			case PCIE_LNK_X8:
6138 				adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
6139 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6140 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6141 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6142 				break;
6143 			case PCIE_LNK_X4:
6144 				adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6145 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6146 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6147 				break;
6148 			case PCIE_LNK_X2:
6149 				adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6150 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6151 				break;
6152 			case PCIE_LNK_X1:
6153 				adev->pm.pcie_mlw_mask |= CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
6154 				break;
6155 			default:
6156 				break;
6157 			}
6158 		}
6159 	}
6160 }
6161 
6162 /**
6163  * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
6164  *
6165  * @adev: amdgpu_device pointer
6166  * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
6167  *
6168  * Return true if @peer_adev can access (DMA) @adev through the PCIe
6169  * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
6170  * @peer_adev.
6171  */
6172 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
6173 				      struct amdgpu_device *peer_adev)
6174 {
6175 #ifdef CONFIG_HSA_AMD_P2P
6176 	bool p2p_access =
6177 		!adev->gmc.xgmi.connected_to_cpu &&
6178 		!(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
6179 	if (!p2p_access)
6180 		dev_info(adev->dev, "PCIe P2P access from peer device %s is not supported by the chipset\n",
6181 			pci_name(peer_adev->pdev));
6182 
6183 	bool is_large_bar = adev->gmc.visible_vram_size &&
6184 		adev->gmc.real_vram_size == adev->gmc.visible_vram_size;
6185 	bool p2p_addressable = amdgpu_device_check_iommu_remap(peer_adev);
6186 
6187 	if (!p2p_addressable) {
6188 		uint64_t address_mask = peer_adev->dev->dma_mask ?
6189 			~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
6190 		resource_size_t aper_limit =
6191 			adev->gmc.aper_base + adev->gmc.aper_size - 1;
6192 
6193 		p2p_addressable = !(adev->gmc.aper_base & address_mask ||
6194 				     aper_limit & address_mask);
6195 	}
6196 	return pcie_p2p && is_large_bar && p2p_access && p2p_addressable;
6197 #else
6198 	return false;
6199 #endif
6200 }
6201 
6202 int amdgpu_device_baco_enter(struct amdgpu_device *adev)
6203 {
6204 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
6205 
6206 	if (!amdgpu_device_supports_baco(adev))
6207 		return -ENOTSUPP;
6208 
6209 	if (ras && adev->ras_enabled &&
6210 	    adev->nbio.funcs->enable_doorbell_interrupt)
6211 		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
6212 
6213 	return amdgpu_dpm_baco_enter(adev);
6214 }
6215 
6216 int amdgpu_device_baco_exit(struct amdgpu_device *adev)
6217 {
6218 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
6219 	int ret = 0;
6220 
6221 	if (!amdgpu_device_supports_baco(adev))
6222 		return -ENOTSUPP;
6223 
6224 	ret = amdgpu_dpm_baco_exit(adev);
6225 	if (ret)
6226 		return ret;
6227 
6228 	if (ras && adev->ras_enabled &&
6229 	    adev->nbio.funcs->enable_doorbell_interrupt)
6230 		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
6231 
6232 	if (amdgpu_passthrough(adev) && adev->nbio.funcs &&
6233 	    adev->nbio.funcs->clear_doorbell_interrupt)
6234 		adev->nbio.funcs->clear_doorbell_interrupt(adev);
6235 
6236 	return 0;
6237 }
6238 
6239 /**
6240  * amdgpu_pci_error_detected - Called when a PCI error is detected.
6241  * @pdev: PCI device struct
6242  * @state: PCI channel state
6243  *
6244  * Description: Called when a PCI error is detected.
6245  *
6246  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
6247  */
6248 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
6249 {
6250 	struct drm_device *dev = pci_get_drvdata(pdev);
6251 	struct amdgpu_device *adev = drm_to_adev(dev);
6252 	struct amdgpu_hive_info *hive __free(xgmi_put_hive) =
6253 		amdgpu_get_xgmi_hive(adev);
6254 	struct amdgpu_reset_context reset_context;
6255 	struct list_head device_list;
6256 
6257 	dev_info(adev->dev, "PCI error: detected callback!!\n");
6258 
6259 	adev->pci_channel_state = state;
6260 
6261 	switch (state) {
6262 	case pci_channel_io_normal:
6263 		dev_info(adev->dev, "pci_channel_io_normal: state(%d)!!\n", state);
6264 		return PCI_ERS_RESULT_CAN_RECOVER;
6265 	case pci_channel_io_frozen:
6266 		/* Fatal error, prepare for slot reset */
6267 		dev_info(adev->dev, "pci_channel_io_frozen: state(%d)!!\n", state);
6268 		if (hive) {
6269 			/* Hive devices should be able to support FW based
6270 			 * link reset on other devices, if not return.
6271 			 */
6272 			if (!amdgpu_dpm_is_link_reset_supported(adev)) {
6273 				dev_warn(adev->dev,
6274 					 "No support for XGMI hive yet...\n");
6275 				return PCI_ERS_RESULT_DISCONNECT;
6276 			}
6277 			/* Set dpc status only if device is part of hive
6278 			 * Non-hive devices should be able to recover after
6279 			 * link reset.
6280 			 */
6281 			amdgpu_reset_set_dpc_status(adev, true);
6282 
6283 			mutex_lock(&hive->hive_lock);
6284 		}
6285 		memset(&reset_context, 0, sizeof(reset_context));
6286 		INIT_LIST_HEAD(&device_list);
6287 
6288 		amdgpu_device_recovery_prepare(adev, &device_list, hive);
6289 		amdgpu_device_recovery_get_reset_lock(adev, &device_list);
6290 		amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list,
6291 					      hive, false);
6292 		if (hive)
6293 			mutex_unlock(&hive->hive_lock);
6294 		return PCI_ERS_RESULT_NEED_RESET;
6295 	case pci_channel_io_perm_failure:
6296 		/* Permanent error, prepare for device removal */
6297 		dev_info(adev->dev, "pci_channel_io_perm_failure: state(%d)!!\n", state);
6298 		return PCI_ERS_RESULT_DISCONNECT;
6299 	}
6300 
6301 	return PCI_ERS_RESULT_NEED_RESET;
6302 }
6303 
6304 /**
6305  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
6306  * @pdev: pointer to PCI device
6307  */
6308 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
6309 {
6310 	struct drm_device *dev = pci_get_drvdata(pdev);
6311 	struct amdgpu_device *adev = drm_to_adev(dev);
6312 
6313 	dev_info(adev->dev, "PCI error: mmio enabled callback!!\n");
6314 
6315 	/* TODO - dump whatever for debugging purposes */
6316 
6317 	/* This called only if amdgpu_pci_error_detected returns
6318 	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
6319 	 * works, no need to reset slot.
6320 	 */
6321 
6322 	return PCI_ERS_RESULT_RECOVERED;
6323 }
6324 
6325 /**
6326  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
6327  * @pdev: PCI device struct
6328  *
6329  * Description: This routine is called by the pci error recovery
6330  * code after the PCI slot has been reset, just before we
6331  * should resume normal operations.
6332  */
6333 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
6334 {
6335 	struct drm_device *dev = pci_get_drvdata(pdev);
6336 	struct amdgpu_device *adev = drm_to_adev(dev);
6337 	struct amdgpu_reset_context reset_context;
6338 	struct amdgpu_device *tmp_adev;
6339 	struct amdgpu_hive_info *hive;
6340 	struct list_head device_list;
6341 	struct pci_dev *link_dev;
6342 	int r = 0, i, timeout;
6343 	u32 memsize;
6344 	u16 status;
6345 
6346 	dev_info(adev->dev, "PCI error: slot reset callback!!\n");
6347 
6348 	memset(&reset_context, 0, sizeof(reset_context));
6349 	INIT_LIST_HEAD(&device_list);
6350 	hive = amdgpu_get_xgmi_hive(adev);
6351 	if (hive) {
6352 		mutex_lock(&hive->hive_lock);
6353 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
6354 			list_add_tail(&tmp_adev->reset_list, &device_list);
6355 	} else {
6356 		list_add_tail(&adev->reset_list, &device_list);
6357 	}
6358 
6359 	if (adev->pcie_reset_ctx.swus)
6360 		link_dev = adev->pcie_reset_ctx.swus;
6361 	else
6362 		link_dev = adev->pdev;
6363 	/* wait for asic to come out of reset, timeout = 10s */
6364 	timeout = 10000;
6365 	do {
6366 		usleep_range(10000, 10500);
6367 		r = pci_read_config_word(link_dev, PCI_VENDOR_ID, &status);
6368 		timeout -= 10;
6369 	} while (timeout > 0 && (status != PCI_VENDOR_ID_ATI) &&
6370 		 (status != PCI_VENDOR_ID_AMD));
6371 
6372 	if ((status != PCI_VENDOR_ID_ATI) && (status != PCI_VENDOR_ID_AMD)) {
6373 		r = -ETIME;
6374 		goto out;
6375 	}
6376 
6377 	amdgpu_device_load_switch_state(adev);
6378 	/* Restore PCI confspace */
6379 	amdgpu_device_load_pci_state(pdev);
6380 
6381 	/* confirm  ASIC came out of reset */
6382 	for (i = 0; i < adev->usec_timeout; i++) {
6383 		memsize = amdgpu_asic_get_config_memsize(adev);
6384 
6385 		if (memsize != 0xffffffff)
6386 			break;
6387 		udelay(1);
6388 	}
6389 	if (memsize == 0xffffffff) {
6390 		r = -ETIME;
6391 		goto out;
6392 	}
6393 
6394 	reset_context.method = AMD_RESET_METHOD_NONE;
6395 	reset_context.reset_req_dev = adev;
6396 	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
6397 	set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
6398 
6399 	if (hive) {
6400 		reset_context.hive = hive;
6401 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
6402 			tmp_adev->pcie_reset_ctx.in_link_reset = true;
6403 	} else {
6404 		set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
6405 	}
6406 
6407 	r = amdgpu_device_asic_reset(adev, &device_list, &reset_context);
6408 out:
6409 	if (!r) {
6410 		if (amdgpu_device_cache_pci_state(adev->pdev))
6411 			pci_restore_state(adev->pdev);
6412 		dev_info(adev->dev, "PCIe error recovery succeeded\n");
6413 	} else {
6414 		dev_err(adev->dev, "PCIe error recovery failed, err:%d\n", r);
6415 		if (hive) {
6416 			list_for_each_entry(tmp_adev, &device_list, reset_list)
6417 				amdgpu_device_unset_mp1_state(tmp_adev);
6418 		}
6419 		amdgpu_device_recovery_put_reset_lock(adev, &device_list);
6420 	}
6421 
6422 	if (hive) {
6423 		mutex_unlock(&hive->hive_lock);
6424 		amdgpu_put_xgmi_hive(hive);
6425 	}
6426 
6427 	return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
6428 }
6429 
6430 /**
6431  * amdgpu_pci_resume() - resume normal ops after PCI reset
6432  * @pdev: pointer to PCI device
6433  *
6434  * Called when the error recovery driver tells us that its
6435  * OK to resume normal operation.
6436  */
6437 void amdgpu_pci_resume(struct pci_dev *pdev)
6438 {
6439 	struct drm_device *dev = pci_get_drvdata(pdev);
6440 	struct amdgpu_device *adev = drm_to_adev(dev);
6441 	struct list_head device_list;
6442 	struct amdgpu_hive_info *hive = NULL;
6443 	struct amdgpu_device *tmp_adev = NULL;
6444 
6445 	dev_info(adev->dev, "PCI error: resume callback!!\n");
6446 
6447 	/* Only continue execution for the case of pci_channel_io_frozen */
6448 	if (adev->pci_channel_state != pci_channel_io_frozen)
6449 		return;
6450 
6451 	INIT_LIST_HEAD(&device_list);
6452 
6453 	hive = amdgpu_get_xgmi_hive(adev);
6454 	if (hive) {
6455 		mutex_lock(&hive->hive_lock);
6456 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
6457 			tmp_adev->pcie_reset_ctx.in_link_reset = false;
6458 			list_add_tail(&tmp_adev->reset_list, &device_list);
6459 		}
6460 	} else
6461 		list_add_tail(&adev->reset_list, &device_list);
6462 
6463 	amdgpu_device_sched_resume(&device_list, NULL, NULL);
6464 	amdgpu_device_gpu_resume(adev, &device_list, false);
6465 	amdgpu_device_recovery_put_reset_lock(adev, &device_list);
6466 
6467 	if (hive) {
6468 		mutex_unlock(&hive->hive_lock);
6469 		amdgpu_put_xgmi_hive(hive);
6470 	}
6471 }
6472 
6473 static void amdgpu_device_cache_switch_state(struct amdgpu_device *adev)
6474 {
6475 	struct pci_dev *swus, *swds;
6476 	int r;
6477 
6478 	swds = pci_upstream_bridge(adev->pdev);
6479 	if (!swds || swds->vendor != PCI_VENDOR_ID_ATI ||
6480 	    pci_pcie_type(swds) != PCI_EXP_TYPE_DOWNSTREAM)
6481 		return;
6482 	swus = pci_upstream_bridge(swds);
6483 	if (!swus ||
6484 	    (swus->vendor != PCI_VENDOR_ID_ATI &&
6485 	     swus->vendor != PCI_VENDOR_ID_AMD) ||
6486 	    pci_pcie_type(swus) != PCI_EXP_TYPE_UPSTREAM)
6487 		return;
6488 
6489 	/* If already saved, return */
6490 	if (adev->pcie_reset_ctx.swus)
6491 		return;
6492 	/* Upstream bridge is ATI, assume it's SWUS/DS architecture */
6493 	r = pci_save_state(swds);
6494 	if (r)
6495 		return;
6496 	adev->pcie_reset_ctx.swds_pcistate = pci_store_saved_state(swds);
6497 
6498 	r = pci_save_state(swus);
6499 	if (r)
6500 		return;
6501 	adev->pcie_reset_ctx.swus_pcistate = pci_store_saved_state(swus);
6502 
6503 	adev->pcie_reset_ctx.swus = swus;
6504 }
6505 
6506 static void amdgpu_device_load_switch_state(struct amdgpu_device *adev)
6507 {
6508 	struct pci_dev *pdev;
6509 	int r;
6510 
6511 	if (!adev->pcie_reset_ctx.swds_pcistate ||
6512 	    !adev->pcie_reset_ctx.swus_pcistate)
6513 		return;
6514 
6515 	pdev = adev->pcie_reset_ctx.swus;
6516 	r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swus_pcistate);
6517 	if (!r) {
6518 		pci_restore_state(pdev);
6519 	} else {
6520 		dev_warn(adev->dev, "Failed to load SWUS state, err:%d\n", r);
6521 		return;
6522 	}
6523 
6524 	pdev = pci_upstream_bridge(adev->pdev);
6525 	r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swds_pcistate);
6526 	if (!r)
6527 		pci_restore_state(pdev);
6528 	else
6529 		dev_warn(adev->dev, "Failed to load SWDS state, err:%d\n", r);
6530 }
6531 
6532 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
6533 {
6534 	struct drm_device *dev = pci_get_drvdata(pdev);
6535 	struct amdgpu_device *adev = drm_to_adev(dev);
6536 	int r;
6537 
6538 	if (amdgpu_sriov_vf(adev))
6539 		return false;
6540 
6541 	r = pci_save_state(pdev);
6542 	if (!r) {
6543 		kfree(adev->pci_state);
6544 
6545 		adev->pci_state = pci_store_saved_state(pdev);
6546 
6547 		if (!adev->pci_state) {
6548 			dev_err(adev->dev, "Failed to store PCI saved state");
6549 			return false;
6550 		}
6551 	} else {
6552 		dev_warn(adev->dev, "Failed to save PCI state, err:%d\n", r);
6553 		return false;
6554 	}
6555 
6556 	amdgpu_device_cache_switch_state(adev);
6557 
6558 	return true;
6559 }
6560 
6561 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
6562 {
6563 	struct drm_device *dev = pci_get_drvdata(pdev);
6564 	struct amdgpu_device *adev = drm_to_adev(dev);
6565 	int r;
6566 
6567 	if (!adev->pci_state)
6568 		return false;
6569 
6570 	r = pci_load_saved_state(pdev, adev->pci_state);
6571 
6572 	if (!r) {
6573 		pci_restore_state(pdev);
6574 	} else {
6575 		dev_warn(adev->dev, "Failed to load PCI state, err:%d\n", r);
6576 		return false;
6577 	}
6578 
6579 	return true;
6580 }
6581 
6582 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
6583 		struct amdgpu_ring *ring)
6584 {
6585 #ifdef CONFIG_X86_64
6586 	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6587 		return;
6588 #endif
6589 	if (adev->gmc.xgmi.connected_to_cpu)
6590 		return;
6591 
6592 	if (ring && ring->funcs->emit_hdp_flush) {
6593 		amdgpu_ring_emit_hdp_flush(ring);
6594 		return;
6595 	}
6596 
6597 	if (!ring && amdgpu_sriov_runtime(adev)) {
6598 		if (!amdgpu_kiq_hdp_flush(adev))
6599 			return;
6600 	}
6601 
6602 	amdgpu_hdp_flush(adev, ring);
6603 }
6604 
6605 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
6606 		struct amdgpu_ring *ring)
6607 {
6608 #ifdef CONFIG_X86_64
6609 	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6610 		return;
6611 #endif
6612 	if (adev->gmc.xgmi.connected_to_cpu)
6613 		return;
6614 
6615 	amdgpu_hdp_invalidate(adev, ring);
6616 }
6617 
6618 int amdgpu_in_reset(struct amdgpu_device *adev)
6619 {
6620 	return atomic_read(&adev->reset_domain->in_gpu_reset);
6621 }
6622 
6623 /**
6624  * amdgpu_device_halt() - bring hardware to some kind of halt state
6625  *
6626  * @adev: amdgpu_device pointer
6627  *
6628  * Bring hardware to some kind of halt state so that no one can touch it
6629  * any more. It will help to maintain error context when error occurred.
6630  * Compare to a simple hang, the system will keep stable at least for SSH
6631  * access. Then it should be trivial to inspect the hardware state and
6632  * see what's going on. Implemented as following:
6633  *
6634  * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
6635  *    clears all CPU mappings to device, disallows remappings through page faults
6636  * 2. amdgpu_irq_disable_all() disables all interrupts
6637  * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
6638  * 4. set adev->no_hw_access to avoid potential crashes after setp 5
6639  * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
6640  * 6. pci_disable_device() and pci_wait_for_pending_transaction()
6641  *    flush any in flight DMA operations
6642  */
6643 void amdgpu_device_halt(struct amdgpu_device *adev)
6644 {
6645 	struct pci_dev *pdev = adev->pdev;
6646 	struct drm_device *ddev = adev_to_drm(adev);
6647 
6648 	amdgpu_xcp_dev_unplug(adev);
6649 	drm_dev_unplug(ddev);
6650 
6651 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
6652 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
6653 
6654 	amdgpu_irq_disable_all(adev);
6655 
6656 	amdgpu_fence_driver_hw_fini(adev);
6657 
6658 	adev->no_hw_access = true;
6659 
6660 	amdgpu_device_unmap_mmio(adev);
6661 
6662 	pci_disable_device(pdev);
6663 	pci_wait_for_pending_transaction(pdev);
6664 }
6665 
6666 /**
6667  * amdgpu_device_get_gang - return a reference to the current gang
6668  * @adev: amdgpu_device pointer
6669  *
6670  * Returns: A new reference to the current gang leader.
6671  */
6672 struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev)
6673 {
6674 	struct dma_fence *fence;
6675 
6676 	rcu_read_lock();
6677 	fence = dma_fence_get_rcu_safe(&adev->gang_submit);
6678 	rcu_read_unlock();
6679 	return fence;
6680 }
6681 
6682 /**
6683  * amdgpu_device_switch_gang - switch to a new gang
6684  * @adev: amdgpu_device pointer
6685  * @gang: the gang to switch to
6686  *
6687  * Try to switch to a new gang.
6688  * Returns: NULL if we switched to the new gang or a reference to the current
6689  * gang leader.
6690  */
6691 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
6692 					    struct dma_fence *gang)
6693 {
6694 	struct dma_fence *old = NULL;
6695 
6696 	dma_fence_get(gang);
6697 	do {
6698 		dma_fence_put(old);
6699 		old = amdgpu_device_get_gang(adev);
6700 		if (old == gang)
6701 			break;
6702 
6703 		if (!dma_fence_is_signaled(old)) {
6704 			dma_fence_put(gang);
6705 			return old;
6706 		}
6707 
6708 	} while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
6709 			 old, gang) != old);
6710 
6711 	/*
6712 	 * Drop it once for the exchanged reference in adev and once for the
6713 	 * thread local reference acquired in amdgpu_device_get_gang().
6714 	 */
6715 	dma_fence_put(old);
6716 	dma_fence_put(old);
6717 	return NULL;
6718 }
6719 
6720 /**
6721  * amdgpu_device_enforce_isolation - enforce HW isolation
6722  * @adev: the amdgpu device pointer
6723  * @ring: the HW ring the job is supposed to run on
6724  * @job: the job which is about to be pushed to the HW ring
6725  *
6726  * Makes sure that only one client at a time can use the GFX block.
6727  * Returns: The dependency to wait on before the job can be pushed to the HW.
6728  * The function is called multiple times until NULL is returned.
6729  */
6730 struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev,
6731 						  struct amdgpu_ring *ring,
6732 						  struct amdgpu_job *job)
6733 {
6734 	struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id];
6735 	struct drm_sched_fence *f = job->base.s_fence;
6736 	struct dma_fence *dep;
6737 	void *owner;
6738 	int r;
6739 
6740 	/*
6741 	 * For now enforce isolation only for the GFX block since we only need
6742 	 * the cleaner shader on those rings.
6743 	 */
6744 	if (ring->funcs->type != AMDGPU_RING_TYPE_GFX &&
6745 	    ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
6746 		return NULL;
6747 
6748 	/*
6749 	 * All submissions where enforce isolation is false are handled as if
6750 	 * they come from a single client. Use ~0l as the owner to distinct it
6751 	 * from kernel submissions where the owner is NULL.
6752 	 */
6753 	owner = job->enforce_isolation ? f->owner : (void *)~0l;
6754 
6755 	mutex_lock(&adev->enforce_isolation_mutex);
6756 
6757 	/*
6758 	 * The "spearhead" submission is the first one which changes the
6759 	 * ownership to its client. We always need to wait for it to be
6760 	 * pushed to the HW before proceeding with anything.
6761 	 */
6762 	if (&f->scheduled != isolation->spearhead &&
6763 	    !dma_fence_is_signaled(isolation->spearhead)) {
6764 		dep = isolation->spearhead;
6765 		goto out_grab_ref;
6766 	}
6767 
6768 	if (isolation->owner != owner) {
6769 
6770 		/*
6771 		 * Wait for any gang to be assembled before switching to a
6772 		 * different owner or otherwise we could deadlock the
6773 		 * submissions.
6774 		 */
6775 		if (!job->gang_submit) {
6776 			dep = amdgpu_device_get_gang(adev);
6777 			if (!dma_fence_is_signaled(dep))
6778 				goto out_return_dep;
6779 			dma_fence_put(dep);
6780 		}
6781 
6782 		dma_fence_put(isolation->spearhead);
6783 		isolation->spearhead = dma_fence_get(&f->scheduled);
6784 		amdgpu_sync_move(&isolation->active, &isolation->prev);
6785 		trace_amdgpu_isolation(isolation->owner, owner);
6786 		isolation->owner = owner;
6787 	}
6788 
6789 	/*
6790 	 * Specifying the ring here helps to pipeline submissions even when
6791 	 * isolation is enabled. If that is not desired for testing NULL can be
6792 	 * used instead of the ring to enforce a CPU round trip while switching
6793 	 * between clients.
6794 	 */
6795 	dep = amdgpu_sync_peek_fence(&isolation->prev, ring);
6796 	r = amdgpu_sync_fence(&isolation->active, &f->finished, GFP_NOWAIT);
6797 	if (r)
6798 		dev_warn(adev->dev, "OOM tracking isolation\n");
6799 
6800 out_grab_ref:
6801 	dma_fence_get(dep);
6802 out_return_dep:
6803 	mutex_unlock(&adev->enforce_isolation_mutex);
6804 	return dep;
6805 }
6806 
6807 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
6808 {
6809 	switch (adev->asic_type) {
6810 #ifdef CONFIG_DRM_AMDGPU_SI
6811 	case CHIP_HAINAN:
6812 #endif
6813 	case CHIP_TOPAZ:
6814 		/* chips with no display hardware */
6815 		return false;
6816 #ifdef CONFIG_DRM_AMDGPU_SI
6817 	case CHIP_TAHITI:
6818 	case CHIP_PITCAIRN:
6819 	case CHIP_VERDE:
6820 	case CHIP_OLAND:
6821 #endif
6822 #ifdef CONFIG_DRM_AMDGPU_CIK
6823 	case CHIP_BONAIRE:
6824 	case CHIP_HAWAII:
6825 	case CHIP_KAVERI:
6826 	case CHIP_KABINI:
6827 	case CHIP_MULLINS:
6828 #endif
6829 	case CHIP_TONGA:
6830 	case CHIP_FIJI:
6831 	case CHIP_POLARIS10:
6832 	case CHIP_POLARIS11:
6833 	case CHIP_POLARIS12:
6834 	case CHIP_VEGAM:
6835 	case CHIP_CARRIZO:
6836 	case CHIP_STONEY:
6837 		/* chips with display hardware */
6838 		return true;
6839 	default:
6840 		/* IP discovery */
6841 		if (!amdgpu_ip_version(adev, DCE_HWIP, 0) ||
6842 		    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
6843 			return false;
6844 		return true;
6845 	}
6846 }
6847 
6848 ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring)
6849 {
6850 	ssize_t size = 0;
6851 
6852 	if (!ring || !ring->adev)
6853 		return size;
6854 
6855 	if (amdgpu_device_should_recover_gpu(ring->adev))
6856 		size |= AMDGPU_RESET_TYPE_FULL;
6857 
6858 	if (unlikely(!ring->adev->debug_disable_soft_recovery) &&
6859 	    !amdgpu_sriov_vf(ring->adev) && ring->funcs->soft_recovery)
6860 		size |= AMDGPU_RESET_TYPE_SOFT_RESET;
6861 
6862 	return size;
6863 }
6864 
6865 ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset)
6866 {
6867 	ssize_t size = 0;
6868 
6869 	if (supported_reset == 0) {
6870 		size += sysfs_emit_at(buf, size, "unsupported");
6871 		size += sysfs_emit_at(buf, size, "\n");
6872 		return size;
6873 
6874 	}
6875 
6876 	if (supported_reset & AMDGPU_RESET_TYPE_SOFT_RESET)
6877 		size += sysfs_emit_at(buf, size, "soft ");
6878 
6879 	if (supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)
6880 		size += sysfs_emit_at(buf, size, "queue ");
6881 
6882 	if (supported_reset & AMDGPU_RESET_TYPE_PER_PIPE)
6883 		size += sysfs_emit_at(buf, size, "pipe ");
6884 
6885 	if (supported_reset & AMDGPU_RESET_TYPE_FULL)
6886 		size += sysfs_emit_at(buf, size, "full ");
6887 
6888 	size += sysfs_emit_at(buf, size, "\n");
6889 	return size;
6890 }
6891 
6892 void amdgpu_device_set_uid(struct amdgpu_uid *uid_info,
6893 			   enum amdgpu_uid_type type, uint8_t inst,
6894 			   uint64_t uid)
6895 {
6896 	if (!uid_info)
6897 		return;
6898 
6899 	if (type >= AMDGPU_UID_TYPE_MAX) {
6900 		dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n",
6901 			     type);
6902 		return;
6903 	}
6904 
6905 	if (inst >= AMDGPU_UID_INST_MAX) {
6906 		dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n",
6907 			     inst);
6908 		return;
6909 	}
6910 
6911 	if (uid_info->uid[type][inst] != 0) {
6912 		dev_warn_once(
6913 			uid_info->adev->dev,
6914 			"Overwriting existing UID %llu for type %d instance %d\n",
6915 			uid_info->uid[type][inst], type, inst);
6916 	}
6917 
6918 	uid_info->uid[type][inst] = uid;
6919 }
6920 
6921 u64 amdgpu_device_get_uid(struct amdgpu_uid *uid_info,
6922 			  enum amdgpu_uid_type type, uint8_t inst)
6923 {
6924 	if (!uid_info)
6925 		return 0;
6926 
6927 	if (type >= AMDGPU_UID_TYPE_MAX) {
6928 		dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n",
6929 			     type);
6930 		return 0;
6931 	}
6932 
6933 	if (inst >= AMDGPU_UID_INST_MAX) {
6934 		dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n",
6935 			     inst);
6936 		return 0;
6937 	}
6938 
6939 	return uid_info->uid[type][inst];
6940 }
6941