xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c (revision bd096a56da7cad1c93c0138a64478b43f5a94736)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 #include <linux/aperture.h>
30 #include <linux/power_supply.h>
31 #include <linux/kthread.h>
32 #include <linux/module.h>
33 #include <linux/console.h>
34 #include <linux/slab.h>
35 #include <linux/iommu.h>
36 #include <linux/pci.h>
37 #include <linux/pci-p2pdma.h>
38 #include <linux/apple-gmux.h>
39 #include <linux/nospec.h>
40 
41 #include <drm/drm_atomic_helper.h>
42 #include <drm/drm_client_event.h>
43 #include <drm/drm_crtc_helper.h>
44 #include <drm/drm_probe_helper.h>
45 #include <drm/amdgpu_drm.h>
46 #include <linux/device.h>
47 #include <linux/vgaarb.h>
48 #include <linux/vga_switcheroo.h>
49 #include <linux/efi.h>
50 #include "amdgpu.h"
51 #include "amdgpu_trace.h"
52 #include "amdgpu_i2c.h"
53 #include "atom.h"
54 #include "amdgpu_atombios.h"
55 #include "amdgpu_atomfirmware.h"
56 #include "amd_pcie.h"
57 #ifdef CONFIG_DRM_AMDGPU_SI
58 #include "si.h"
59 #endif
60 #ifdef CONFIG_DRM_AMDGPU_CIK
61 #include "cik.h"
62 #endif
63 #include "vi.h"
64 #include "soc15.h"
65 #include "nv.h"
66 #include "bif/bif_4_1_d.h"
67 #include <linux/firmware.h>
68 #include "amdgpu_vf_error.h"
69 
70 #include "amdgpu_amdkfd.h"
71 #include "amdgpu_pm.h"
72 
73 #include "amdgpu_xgmi.h"
74 #include "amdgpu_ras.h"
75 #include "amdgpu_ras_mgr.h"
76 #include "amdgpu_pmu.h"
77 #include "amdgpu_fru_eeprom.h"
78 #include "amdgpu_reset.h"
79 #include "amdgpu_virt.h"
80 #include "amdgpu_dev_coredump.h"
81 
82 #include <linux/suspend.h>
83 #include <drm/task_barrier.h>
84 #include <linux/pm_runtime.h>
85 
86 #include <drm/drm_drv.h>
87 
88 #if IS_ENABLED(CONFIG_X86)
89 #include <asm/intel-family.h>
90 #include <asm/cpu_device_id.h>
91 #endif
92 
93 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
94 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
95 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
96 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
97 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
98 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
99 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
100 MODULE_FIRMWARE("amdgpu/cyan_skillfish_gpu_info.bin");
101 
102 #define AMDGPU_RESUME_MS		2000
103 #define AMDGPU_MAX_RETRY_LIMIT		2
104 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
105 #define AMDGPU_PCIE_INDEX_FALLBACK (0x38 >> 2)
106 #define AMDGPU_PCIE_INDEX_HI_FALLBACK (0x44 >> 2)
107 #define AMDGPU_PCIE_DATA_FALLBACK (0x3C >> 2)
108 
109 #define AMDGPU_VBIOS_SKIP (1U << 0)
110 #define AMDGPU_VBIOS_OPTIONAL (1U << 1)
111 
112 static const struct drm_driver amdgpu_kms_driver;
113 
114 const char *amdgpu_asic_name[] = {
115 	"TAHITI",
116 	"PITCAIRN",
117 	"VERDE",
118 	"OLAND",
119 	"HAINAN",
120 	"BONAIRE",
121 	"KAVERI",
122 	"KABINI",
123 	"HAWAII",
124 	"MULLINS",
125 	"TOPAZ",
126 	"TONGA",
127 	"FIJI",
128 	"CARRIZO",
129 	"STONEY",
130 	"POLARIS10",
131 	"POLARIS11",
132 	"POLARIS12",
133 	"VEGAM",
134 	"VEGA10",
135 	"VEGA12",
136 	"VEGA20",
137 	"RAVEN",
138 	"ARCTURUS",
139 	"RENOIR",
140 	"ALDEBARAN",
141 	"NAVI10",
142 	"CYAN_SKILLFISH",
143 	"NAVI14",
144 	"NAVI12",
145 	"SIENNA_CICHLID",
146 	"NAVY_FLOUNDER",
147 	"VANGOGH",
148 	"DIMGREY_CAVEFISH",
149 	"BEIGE_GOBY",
150 	"YELLOW_CARP",
151 	"IP DISCOVERY",
152 	"LAST",
153 };
154 
155 #define AMDGPU_IP_BLK_MASK_ALL GENMASK(AMD_IP_BLOCK_TYPE_NUM  - 1, 0)
156 /*
157  * Default init level where all blocks are expected to be initialized. This is
158  * the level of initialization expected by default and also after a full reset
159  * of the device.
160  */
161 struct amdgpu_init_level amdgpu_init_default = {
162 	.level = AMDGPU_INIT_LEVEL_DEFAULT,
163 	.hwini_ip_block_mask = AMDGPU_IP_BLK_MASK_ALL,
164 };
165 
166 struct amdgpu_init_level amdgpu_init_recovery = {
167 	.level = AMDGPU_INIT_LEVEL_RESET_RECOVERY,
168 	.hwini_ip_block_mask = AMDGPU_IP_BLK_MASK_ALL,
169 };
170 
171 /*
172  * Minimal blocks needed to be initialized before a XGMI hive can be reset. This
173  * is used for cases like reset on initialization where the entire hive needs to
174  * be reset before first use.
175  */
176 struct amdgpu_init_level amdgpu_init_minimal_xgmi = {
177 	.level = AMDGPU_INIT_LEVEL_MINIMAL_XGMI,
178 	.hwini_ip_block_mask =
179 		BIT(AMD_IP_BLOCK_TYPE_GMC) | BIT(AMD_IP_BLOCK_TYPE_SMC) |
180 		BIT(AMD_IP_BLOCK_TYPE_COMMON) | BIT(AMD_IP_BLOCK_TYPE_IH) |
181 		BIT(AMD_IP_BLOCK_TYPE_PSP)
182 };
183 
184 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev);
185 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev);
186 static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev);
187 
188 static void amdgpu_device_load_switch_state(struct amdgpu_device *adev);
189 
190 static inline bool amdgpu_ip_member_of_hwini(struct amdgpu_device *adev,
191 					     enum amd_ip_block_type block)
192 {
193 	return (adev->init_lvl->hwini_ip_block_mask & (1U << block)) != 0;
194 }
195 
196 void amdgpu_set_init_level(struct amdgpu_device *adev,
197 			   enum amdgpu_init_lvl_id lvl)
198 {
199 	switch (lvl) {
200 	case AMDGPU_INIT_LEVEL_MINIMAL_XGMI:
201 		adev->init_lvl = &amdgpu_init_minimal_xgmi;
202 		break;
203 	case AMDGPU_INIT_LEVEL_RESET_RECOVERY:
204 		adev->init_lvl = &amdgpu_init_recovery;
205 		break;
206 	case AMDGPU_INIT_LEVEL_DEFAULT:
207 		fallthrough;
208 	default:
209 		adev->init_lvl = &amdgpu_init_default;
210 		break;
211 	}
212 }
213 
214 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev);
215 static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
216 				     void *data);
217 
218 /**
219  * DOC: pcie_replay_count
220  *
221  * The amdgpu driver provides a sysfs API for reporting the total number
222  * of PCIe replays (NAKs).
223  * The file pcie_replay_count is used for this and returns the total
224  * number of replays as a sum of the NAKs generated and NAKs received.
225  */
226 
227 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
228 		struct device_attribute *attr, char *buf)
229 {
230 	struct drm_device *ddev = dev_get_drvdata(dev);
231 	struct amdgpu_device *adev = drm_to_adev(ddev);
232 	uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
233 
234 	return sysfs_emit(buf, "%llu\n", cnt);
235 }
236 
237 static DEVICE_ATTR(pcie_replay_count, 0444,
238 		amdgpu_device_get_pcie_replay_count, NULL);
239 
240 static int amdgpu_device_attr_sysfs_init(struct amdgpu_device *adev)
241 {
242 	int ret = 0;
243 
244 	if (amdgpu_nbio_is_replay_cnt_supported(adev))
245 		ret = sysfs_create_file(&adev->dev->kobj,
246 					&dev_attr_pcie_replay_count.attr);
247 
248 	return ret;
249 }
250 
251 static void amdgpu_device_attr_sysfs_fini(struct amdgpu_device *adev)
252 {
253 	if (amdgpu_nbio_is_replay_cnt_supported(adev))
254 		sysfs_remove_file(&adev->dev->kobj,
255 				  &dev_attr_pcie_replay_count.attr);
256 }
257 
258 static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj,
259 					  const struct bin_attribute *attr, char *buf,
260 					  loff_t ppos, size_t count)
261 {
262 	struct device *dev = kobj_to_dev(kobj);
263 	struct drm_device *ddev = dev_get_drvdata(dev);
264 	struct amdgpu_device *adev = drm_to_adev(ddev);
265 	ssize_t bytes_read;
266 
267 	switch (ppos) {
268 	case AMDGPU_SYS_REG_STATE_XGMI:
269 		bytes_read = amdgpu_asic_get_reg_state(
270 			adev, AMDGPU_REG_STATE_TYPE_XGMI, buf, count);
271 		break;
272 	case AMDGPU_SYS_REG_STATE_WAFL:
273 		bytes_read = amdgpu_asic_get_reg_state(
274 			adev, AMDGPU_REG_STATE_TYPE_WAFL, buf, count);
275 		break;
276 	case AMDGPU_SYS_REG_STATE_PCIE:
277 		bytes_read = amdgpu_asic_get_reg_state(
278 			adev, AMDGPU_REG_STATE_TYPE_PCIE, buf, count);
279 		break;
280 	case AMDGPU_SYS_REG_STATE_USR:
281 		bytes_read = amdgpu_asic_get_reg_state(
282 			adev, AMDGPU_REG_STATE_TYPE_USR, buf, count);
283 		break;
284 	case AMDGPU_SYS_REG_STATE_USR_1:
285 		bytes_read = amdgpu_asic_get_reg_state(
286 			adev, AMDGPU_REG_STATE_TYPE_USR_1, buf, count);
287 		break;
288 	default:
289 		return -EINVAL;
290 	}
291 
292 	return bytes_read;
293 }
294 
295 static const BIN_ATTR(reg_state, 0444, amdgpu_sysfs_reg_state_get, NULL,
296 		      AMDGPU_SYS_REG_STATE_END);
297 
298 int amdgpu_reg_state_sysfs_init(struct amdgpu_device *adev)
299 {
300 	int ret;
301 
302 	if (!amdgpu_asic_get_reg_state_supported(adev))
303 		return 0;
304 
305 	ret = sysfs_create_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
306 
307 	return ret;
308 }
309 
310 void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev)
311 {
312 	if (!amdgpu_asic_get_reg_state_supported(adev))
313 		return;
314 	sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
315 }
316 
317 /**
318  * DOC: board_info
319  *
320  * The amdgpu driver provides a sysfs API for giving board related information.
321  * It provides the form factor information in the format
322  *
323  *   type : form factor
324  *
325  * Possible form factor values
326  *
327  * - "cem"		- PCIE CEM card
328  * - "oam"		- Open Compute Accelerator Module
329  * - "unknown"	- Not known
330  *
331  */
332 
333 static ssize_t amdgpu_device_get_board_info(struct device *dev,
334 					    struct device_attribute *attr,
335 					    char *buf)
336 {
337 	struct drm_device *ddev = dev_get_drvdata(dev);
338 	struct amdgpu_device *adev = drm_to_adev(ddev);
339 	enum amdgpu_pkg_type pkg_type = AMDGPU_PKG_TYPE_CEM;
340 	const char *pkg;
341 
342 	if (adev->smuio.funcs && adev->smuio.funcs->get_pkg_type)
343 		pkg_type = adev->smuio.funcs->get_pkg_type(adev);
344 
345 	switch (pkg_type) {
346 	case AMDGPU_PKG_TYPE_CEM:
347 		pkg = "cem";
348 		break;
349 	case AMDGPU_PKG_TYPE_OAM:
350 		pkg = "oam";
351 		break;
352 	default:
353 		pkg = "unknown";
354 		break;
355 	}
356 
357 	return sysfs_emit(buf, "%s : %s\n", "type", pkg);
358 }
359 
360 static DEVICE_ATTR(board_info, 0444, amdgpu_device_get_board_info, NULL);
361 
362 static struct attribute *amdgpu_board_attrs[] = {
363 	&dev_attr_board_info.attr,
364 	NULL,
365 };
366 
367 static umode_t amdgpu_board_attrs_is_visible(struct kobject *kobj,
368 					     struct attribute *attr, int n)
369 {
370 	struct device *dev = kobj_to_dev(kobj);
371 	struct drm_device *ddev = dev_get_drvdata(dev);
372 	struct amdgpu_device *adev = drm_to_adev(ddev);
373 
374 	if (adev->flags & AMD_IS_APU)
375 		return 0;
376 
377 	return attr->mode;
378 }
379 
380 static const struct attribute_group amdgpu_board_attrs_group = {
381 	.attrs = amdgpu_board_attrs,
382 	.is_visible = amdgpu_board_attrs_is_visible
383 };
384 
385 /**
386  * DOC: uma/carveout_options
387  *
388  * This is a read-only file that lists all available UMA allocation
389  * options and their corresponding indices. Example output::
390  *
391  *     $ cat uma/carveout_options
392  *     0: Minimum (512 MB)
393  *     1:  (1 GB)
394  *     2:  (2 GB)
395  *     3:  (4 GB)
396  *     4:  (6 GB)
397  *     5:  (8 GB)
398  *     6:  (12 GB)
399  *     7: Medium (16 GB)
400  *     8:  (24 GB)
401  *     9: High (32 GB)
402  */
403 static ssize_t carveout_options_show(struct device *dev,
404 				     struct device_attribute *attr,
405 				     char *buf)
406 {
407 	struct drm_device *ddev = dev_get_drvdata(dev);
408 	struct amdgpu_device *adev = drm_to_adev(ddev);
409 	struct amdgpu_uma_carveout_info *uma_info = &adev->uma_info;
410 	uint32_t memory_carved;
411 	ssize_t size = 0;
412 
413 	if (!uma_info || !uma_info->num_entries)
414 		return -ENODEV;
415 
416 	for (int i = 0; i < uma_info->num_entries; i++) {
417 		memory_carved = uma_info->entries[i].memory_carved_mb;
418 		if (memory_carved >= SZ_1G/SZ_1M) {
419 			size += sysfs_emit_at(buf, size, "%d: %s (%u GB)\n",
420 					      i,
421 					      uma_info->entries[i].name,
422 					      memory_carved >> 10);
423 		} else {
424 			size += sysfs_emit_at(buf, size, "%d: %s (%u MB)\n",
425 					      i,
426 					      uma_info->entries[i].name,
427 					      memory_carved);
428 		}
429 	}
430 
431 	return size;
432 }
433 static DEVICE_ATTR_RO(carveout_options);
434 
435 /**
436  * DOC: uma/carveout
437  *
438  * This file is both readable and writable. When read, it shows the
439  * index of the current setting. Writing a valid index to this file
440  * allows users to change the UMA carveout size to the selected option
441  * on the next boot.
442  *
443  * The available options and their corresponding indices can be read
444  * from the uma/carveout_options file.
445  */
446 static ssize_t carveout_show(struct device *dev,
447 			     struct device_attribute *attr,
448 			     char *buf)
449 {
450 	struct drm_device *ddev = dev_get_drvdata(dev);
451 	struct amdgpu_device *adev = drm_to_adev(ddev);
452 
453 	return sysfs_emit(buf, "%u\n", adev->uma_info.uma_option_index);
454 }
455 
456 static ssize_t carveout_store(struct device *dev,
457 			      struct device_attribute *attr,
458 			      const char *buf, size_t count)
459 {
460 	struct drm_device *ddev = dev_get_drvdata(dev);
461 	struct amdgpu_device *adev = drm_to_adev(ddev);
462 	struct amdgpu_uma_carveout_info *uma_info = &adev->uma_info;
463 	struct amdgpu_uma_carveout_option *opt;
464 	unsigned long val;
465 	uint8_t flags;
466 	int r;
467 
468 	r = kstrtoul(buf, 10, &val);
469 	if (r)
470 		return r;
471 
472 	if (val >= uma_info->num_entries)
473 		return -EINVAL;
474 
475 	val = array_index_nospec(val, uma_info->num_entries);
476 	opt = &uma_info->entries[val];
477 
478 	if (!(opt->flags & AMDGPU_UMA_FLAG_AUTO) &&
479 	    !(opt->flags & AMDGPU_UMA_FLAG_CUSTOM)) {
480 		drm_err_once(ddev, "Option %lu not supported due to lack of Custom/Auto flag", val);
481 		return -EINVAL;
482 	}
483 
484 	flags = opt->flags;
485 	flags &= ~((flags & AMDGPU_UMA_FLAG_AUTO) >> 1);
486 
487 	guard(mutex)(&uma_info->update_lock);
488 
489 	r = amdgpu_acpi_set_uma_allocation_size(adev, val, flags);
490 	if (r)
491 		return r;
492 
493 	uma_info->uma_option_index = val;
494 
495 	return count;
496 }
497 static DEVICE_ATTR_RW(carveout);
498 
499 static struct attribute *amdgpu_uma_attrs[] = {
500 	&dev_attr_carveout.attr,
501 	&dev_attr_carveout_options.attr,
502 	NULL
503 };
504 
505 const struct attribute_group amdgpu_uma_attr_group = {
506 	.name = "uma",
507 	.attrs = amdgpu_uma_attrs
508 };
509 
510 static void amdgpu_uma_sysfs_init(struct amdgpu_device *adev)
511 {
512 	int rc;
513 
514 	if (!(adev->flags & AMD_IS_APU))
515 		return;
516 
517 	if (!amdgpu_acpi_is_set_uma_allocation_size_supported())
518 		return;
519 
520 	rc = amdgpu_atomfirmware_get_uma_carveout_info(adev, &adev->uma_info);
521 	if (rc) {
522 		drm_dbg(adev_to_drm(adev),
523 			"Failed to parse UMA carveout info from VBIOS: %d\n", rc);
524 		goto out_info;
525 	}
526 
527 	mutex_init(&adev->uma_info.update_lock);
528 
529 	rc = devm_device_add_group(adev->dev, &amdgpu_uma_attr_group);
530 	if (rc) {
531 		drm_dbg(adev_to_drm(adev), "Failed to add UMA carveout sysfs interfaces %d\n", rc);
532 		goto out_attr;
533 	}
534 
535 	return;
536 
537 out_attr:
538 	mutex_destroy(&adev->uma_info.update_lock);
539 out_info:
540 	return;
541 }
542 
543 static void amdgpu_uma_sysfs_fini(struct amdgpu_device *adev)
544 {
545 	struct amdgpu_uma_carveout_info *uma_info = &adev->uma_info;
546 
547 	if (!amdgpu_acpi_is_set_uma_allocation_size_supported())
548 		return;
549 
550 	mutex_destroy(&uma_info->update_lock);
551 	uma_info->num_entries = 0;
552 }
553 
554 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
555 
556 /**
557  * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
558  *
559  * @adev: amdgpu device pointer
560  *
561  * Returns true if the device is a dGPU with ATPX power control,
562  * otherwise return false.
563  */
564 bool amdgpu_device_supports_px(struct amdgpu_device *adev)
565 {
566 	if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
567 		return true;
568 	return false;
569 }
570 
571 /**
572  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
573  *
574  * @adev: amdgpu device pointer
575  *
576  * Returns true if the device is a dGPU with ACPI power control,
577  * otherwise return false.
578  */
579 bool amdgpu_device_supports_boco(struct amdgpu_device *adev)
580 {
581 	if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
582 		return false;
583 
584 	if (adev->has_pr3 ||
585 	    ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
586 		return true;
587 	return false;
588 }
589 
590 /**
591  * amdgpu_device_supports_baco - Does the device support BACO
592  *
593  * @adev: amdgpu device pointer
594  *
595  * Return:
596  * 1 if the device supports BACO;
597  * 3 if the device supports MACO (only works if BACO is supported)
598  * otherwise return 0.
599  */
600 int amdgpu_device_supports_baco(struct amdgpu_device *adev)
601 {
602 	return amdgpu_asic_supports_baco(adev);
603 }
604 
605 void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev)
606 {
607 	int bamaco_support;
608 
609 	adev->pm.rpm_mode = AMDGPU_RUNPM_NONE;
610 	bamaco_support = amdgpu_device_supports_baco(adev);
611 
612 	switch (amdgpu_runtime_pm) {
613 	case 2:
614 		if (bamaco_support & MACO_SUPPORT) {
615 			adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO;
616 			dev_info(adev->dev, "Forcing BAMACO for runtime pm\n");
617 		} else if (bamaco_support == BACO_SUPPORT) {
618 			adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
619 			dev_info(adev->dev, "Requested mode BAMACO not available,fallback to use BACO\n");
620 		}
621 		break;
622 	case 1:
623 		if (bamaco_support & BACO_SUPPORT) {
624 			adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
625 			dev_info(adev->dev, "Forcing BACO for runtime pm\n");
626 		}
627 		break;
628 	case -1:
629 	case -2:
630 		if (amdgpu_device_supports_px(adev)) {
631 			/* enable PX as runtime mode */
632 			adev->pm.rpm_mode = AMDGPU_RUNPM_PX;
633 			dev_info(adev->dev, "Using ATPX for runtime pm\n");
634 		} else if (amdgpu_device_supports_boco(adev)) {
635 			/* enable boco as runtime mode */
636 			adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO;
637 			dev_info(adev->dev, "Using BOCO for runtime pm\n");
638 		} else {
639 			if (!bamaco_support)
640 				goto no_runtime_pm;
641 
642 			switch (adev->asic_type) {
643 			case CHIP_VEGA20:
644 			case CHIP_ARCTURUS:
645 				/* BACO are not supported on vega20 and arctrus */
646 				break;
647 			case CHIP_VEGA10:
648 				/* enable BACO as runpm mode if noretry=0 */
649 				if (!adev->gmc.noretry && !amdgpu_passthrough(adev))
650 					adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
651 				break;
652 			default:
653 				/* enable BACO as runpm mode on CI+ */
654 				if (!amdgpu_passthrough(adev))
655 					adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
656 				break;
657 			}
658 
659 			if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) {
660 				if (bamaco_support & MACO_SUPPORT) {
661 					adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO;
662 					dev_info(adev->dev, "Using BAMACO for runtime pm\n");
663 				} else {
664 					dev_info(adev->dev, "Using BACO for runtime pm\n");
665 				}
666 			}
667 		}
668 		break;
669 	case 0:
670 		dev_info(adev->dev, "runtime pm is manually disabled\n");
671 		break;
672 	default:
673 		break;
674 	}
675 
676 no_runtime_pm:
677 	if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE)
678 		dev_info(adev->dev, "Runtime PM not available\n");
679 }
680 /**
681  * amdgpu_device_supports_smart_shift - Is the device dGPU with
682  * smart shift support
683  *
684  * @adev: amdgpu device pointer
685  *
686  * Returns true if the device is a dGPU with Smart Shift support,
687  * otherwise returns false.
688  */
689 bool amdgpu_device_supports_smart_shift(struct amdgpu_device *adev)
690 {
691 	return (amdgpu_device_supports_boco(adev) &&
692 		amdgpu_acpi_is_power_shift_control_supported());
693 }
694 
695 /*
696  * VRAM access helper functions
697  */
698 
699 /**
700  * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
701  *
702  * @adev: amdgpu_device pointer
703  * @pos: offset of the buffer in vram
704  * @buf: virtual address of the buffer in system memory
705  * @size: read/write size, sizeof(@buf) must > @size
706  * @write: true - write to vram, otherwise - read from vram
707  */
708 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
709 			     void *buf, size_t size, bool write)
710 {
711 	unsigned long flags;
712 	uint32_t hi = ~0, tmp = 0;
713 	uint32_t *data = buf;
714 	uint64_t last;
715 	int idx;
716 
717 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
718 		return;
719 
720 	BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
721 
722 	spin_lock_irqsave(&adev->mmio_idx_lock, flags);
723 	for (last = pos + size; pos < last; pos += 4) {
724 		tmp = pos >> 31;
725 
726 		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
727 		if (tmp != hi) {
728 			WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
729 			hi = tmp;
730 		}
731 		if (write)
732 			WREG32_NO_KIQ(mmMM_DATA, *data++);
733 		else
734 			*data++ = RREG32_NO_KIQ(mmMM_DATA);
735 	}
736 
737 	spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
738 	drm_dev_exit(idx);
739 }
740 
741 /**
742  * amdgpu_device_aper_access - access vram by vram aperture
743  *
744  * @adev: amdgpu_device pointer
745  * @pos: offset of the buffer in vram
746  * @buf: virtual address of the buffer in system memory
747  * @size: read/write size, sizeof(@buf) must > @size
748  * @write: true - write to vram, otherwise - read from vram
749  *
750  * The return value means how many bytes have been transferred.
751  */
752 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
753 				 void *buf, size_t size, bool write)
754 {
755 #ifdef CONFIG_64BIT
756 	void __iomem *addr;
757 	size_t count = 0;
758 	uint64_t last;
759 
760 	if (!adev->mman.aper_base_kaddr)
761 		return 0;
762 
763 	last = min(pos + size, adev->gmc.visible_vram_size);
764 	if (last > pos) {
765 		addr = adev->mman.aper_base_kaddr + pos;
766 		count = last - pos;
767 
768 		if (write) {
769 			memcpy_toio(addr, buf, count);
770 			/* Make sure HDP write cache flush happens without any reordering
771 			 * after the system memory contents are sent over PCIe device
772 			 */
773 			mb();
774 			amdgpu_device_flush_hdp(adev, NULL);
775 		} else {
776 			amdgpu_device_invalidate_hdp(adev, NULL);
777 			/* Make sure HDP read cache is invalidated before issuing a read
778 			 * to the PCIe device
779 			 */
780 			mb();
781 			memcpy_fromio(buf, addr, count);
782 		}
783 
784 	}
785 
786 	return count;
787 #else
788 	return 0;
789 #endif
790 }
791 
792 /**
793  * amdgpu_device_vram_access - read/write a buffer in vram
794  *
795  * @adev: amdgpu_device pointer
796  * @pos: offset of the buffer in vram
797  * @buf: virtual address of the buffer in system memory
798  * @size: read/write size, sizeof(@buf) must > @size
799  * @write: true - write to vram, otherwise - read from vram
800  */
801 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
802 			       void *buf, size_t size, bool write)
803 {
804 	size_t count;
805 
806 	/* try to using vram apreature to access vram first */
807 	count = amdgpu_device_aper_access(adev, pos, buf, size, write);
808 	size -= count;
809 	if (size) {
810 		/* using MM to access rest vram */
811 		pos += count;
812 		buf += count;
813 		amdgpu_device_mm_access(adev, pos, buf, size, write);
814 	}
815 }
816 
817 /*
818  * register access helper functions.
819  */
820 
821 /* Check if hw access should be skipped because of hotplug or device error */
822 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
823 {
824 	if (adev->no_hw_access)
825 		return true;
826 
827 #ifdef CONFIG_LOCKDEP
828 	/*
829 	 * This is a bit complicated to understand, so worth a comment. What we assert
830 	 * here is that the GPU reset is not running on another thread in parallel.
831 	 *
832 	 * For this we trylock the read side of the reset semaphore, if that succeeds
833 	 * we know that the reset is not running in parallel.
834 	 *
835 	 * If the trylock fails we assert that we are either already holding the read
836 	 * side of the lock or are the reset thread itself and hold the write side of
837 	 * the lock.
838 	 */
839 	if (in_task()) {
840 		if (down_read_trylock(&adev->reset_domain->sem))
841 			up_read(&adev->reset_domain->sem);
842 		else
843 			lockdep_assert_held(&adev->reset_domain->sem);
844 	}
845 #endif
846 	return false;
847 }
848 
849 /**
850  * amdgpu_device_rreg - read a memory mapped IO or indirect register
851  *
852  * @adev: amdgpu_device pointer
853  * @reg: dword aligned register offset
854  * @acc_flags: access flags which require special behavior
855  *
856  * Returns the 32 bit value from the offset specified.
857  */
858 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
859 			    uint32_t reg, uint32_t acc_flags)
860 {
861 	uint32_t ret;
862 
863 	if (amdgpu_device_skip_hw_access(adev))
864 		return 0;
865 
866 	if ((reg * 4) < adev->rmmio_size) {
867 		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
868 		    amdgpu_sriov_runtime(adev) &&
869 		    down_read_trylock(&adev->reset_domain->sem)) {
870 			ret = amdgpu_kiq_rreg(adev, reg, 0);
871 			up_read(&adev->reset_domain->sem);
872 		} else {
873 			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
874 		}
875 	} else {
876 		ret = adev->pcie_rreg(adev, reg * 4);
877 	}
878 
879 	trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
880 
881 	return ret;
882 }
883 
884 /*
885  * MMIO register read with bytes helper functions
886  * @offset:bytes offset from MMIO start
887  */
888 
889 /**
890  * amdgpu_mm_rreg8 - read a memory mapped IO register
891  *
892  * @adev: amdgpu_device pointer
893  * @offset: byte aligned register offset
894  *
895  * Returns the 8 bit value from the offset specified.
896  */
897 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
898 {
899 	if (amdgpu_device_skip_hw_access(adev))
900 		return 0;
901 
902 	if (offset < adev->rmmio_size)
903 		return (readb(adev->rmmio + offset));
904 	BUG();
905 }
906 
907 
908 /**
909  * amdgpu_device_xcc_rreg - read a memory mapped IO or indirect register with specific XCC
910  *
911  * @adev: amdgpu_device pointer
912  * @reg: dword aligned register offset
913  * @acc_flags: access flags which require special behavior
914  * @xcc_id: xcc accelerated compute core id
915  *
916  * Returns the 32 bit value from the offset specified.
917  */
918 uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev,
919 				uint32_t reg, uint32_t acc_flags,
920 				uint32_t xcc_id)
921 {
922 	uint32_t ret, rlcg_flag;
923 
924 	if (amdgpu_device_skip_hw_access(adev))
925 		return 0;
926 
927 	if ((reg * 4) < adev->rmmio_size) {
928 		if (amdgpu_sriov_vf(adev) &&
929 		    !amdgpu_sriov_runtime(adev) &&
930 		    adev->gfx.rlc.rlcg_reg_access_supported &&
931 		    amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
932 							 GC_HWIP, false,
933 							 &rlcg_flag)) {
934 			ret = amdgpu_virt_rlcg_reg_rw(adev, reg, 0, rlcg_flag, GET_INST(GC, xcc_id));
935 		} else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
936 		    amdgpu_sriov_runtime(adev) &&
937 		    down_read_trylock(&adev->reset_domain->sem)) {
938 			ret = amdgpu_kiq_rreg(adev, reg, xcc_id);
939 			up_read(&adev->reset_domain->sem);
940 		} else {
941 			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
942 		}
943 	} else {
944 		ret = adev->pcie_rreg(adev, reg * 4);
945 	}
946 
947 	return ret;
948 }
949 
950 /*
951  * MMIO register write with bytes helper functions
952  * @offset:bytes offset from MMIO start
953  * @value: the value want to be written to the register
954  */
955 
956 /**
957  * amdgpu_mm_wreg8 - read a memory mapped IO register
958  *
959  * @adev: amdgpu_device pointer
960  * @offset: byte aligned register offset
961  * @value: 8 bit value to write
962  *
963  * Writes the value specified to the offset specified.
964  */
965 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
966 {
967 	if (amdgpu_device_skip_hw_access(adev))
968 		return;
969 
970 	if (offset < adev->rmmio_size)
971 		writeb(value, adev->rmmio + offset);
972 	else
973 		BUG();
974 }
975 
976 /**
977  * amdgpu_device_wreg - write to a memory mapped IO or indirect register
978  *
979  * @adev: amdgpu_device pointer
980  * @reg: dword aligned register offset
981  * @v: 32 bit value to write to the register
982  * @acc_flags: access flags which require special behavior
983  *
984  * Writes the value specified to the offset specified.
985  */
986 void amdgpu_device_wreg(struct amdgpu_device *adev,
987 			uint32_t reg, uint32_t v,
988 			uint32_t acc_flags)
989 {
990 	if (amdgpu_device_skip_hw_access(adev))
991 		return;
992 
993 	if ((reg * 4) < adev->rmmio_size) {
994 		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
995 		    amdgpu_sriov_runtime(adev) &&
996 		    down_read_trylock(&adev->reset_domain->sem)) {
997 			amdgpu_kiq_wreg(adev, reg, v, 0);
998 			up_read(&adev->reset_domain->sem);
999 		} else {
1000 			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
1001 		}
1002 	} else {
1003 		adev->pcie_wreg(adev, reg * 4, v);
1004 	}
1005 
1006 	trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
1007 }
1008 
1009 /**
1010  * amdgpu_mm_wreg_mmio_rlc -  write register either with direct/indirect mmio or with RLC path if in range
1011  *
1012  * @adev: amdgpu_device pointer
1013  * @reg: mmio/rlc register
1014  * @v: value to write
1015  * @xcc_id: xcc accelerated compute core id
1016  *
1017  * this function is invoked only for the debugfs register access
1018  */
1019 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
1020 			     uint32_t reg, uint32_t v,
1021 			     uint32_t xcc_id)
1022 {
1023 	if (amdgpu_device_skip_hw_access(adev))
1024 		return;
1025 
1026 	if (amdgpu_sriov_fullaccess(adev) &&
1027 	    adev->gfx.rlc.funcs &&
1028 	    adev->gfx.rlc.funcs->is_rlcg_access_range) {
1029 		if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
1030 			return amdgpu_sriov_wreg(adev, reg, v, 0, 0, xcc_id);
1031 	} else if ((reg * 4) >= adev->rmmio_size) {
1032 		adev->pcie_wreg(adev, reg * 4, v);
1033 	} else {
1034 		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
1035 	}
1036 }
1037 
1038 /**
1039  * amdgpu_device_xcc_wreg - write to a memory mapped IO or indirect register with specific XCC
1040  *
1041  * @adev: amdgpu_device pointer
1042  * @reg: dword aligned register offset
1043  * @v: 32 bit value to write to the register
1044  * @acc_flags: access flags which require special behavior
1045  * @xcc_id: xcc accelerated compute core id
1046  *
1047  * Writes the value specified to the offset specified.
1048  */
1049 void amdgpu_device_xcc_wreg(struct amdgpu_device *adev,
1050 			uint32_t reg, uint32_t v,
1051 			uint32_t acc_flags, uint32_t xcc_id)
1052 {
1053 	uint32_t rlcg_flag;
1054 
1055 	if (amdgpu_device_skip_hw_access(adev))
1056 		return;
1057 
1058 	if ((reg * 4) < adev->rmmio_size) {
1059 		if (amdgpu_sriov_vf(adev) &&
1060 		    !amdgpu_sriov_runtime(adev) &&
1061 		    adev->gfx.rlc.rlcg_reg_access_supported &&
1062 		    amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
1063 							 GC_HWIP, true,
1064 							 &rlcg_flag)) {
1065 			amdgpu_virt_rlcg_reg_rw(adev, reg, v, rlcg_flag, GET_INST(GC, xcc_id));
1066 		} else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
1067 		    amdgpu_sriov_runtime(adev) &&
1068 		    down_read_trylock(&adev->reset_domain->sem)) {
1069 			amdgpu_kiq_wreg(adev, reg, v, xcc_id);
1070 			up_read(&adev->reset_domain->sem);
1071 		} else {
1072 			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
1073 		}
1074 	} else {
1075 		adev->pcie_wreg(adev, reg * 4, v);
1076 	}
1077 }
1078 
1079 /**
1080  * amdgpu_device_indirect_rreg - read an indirect register
1081  *
1082  * @adev: amdgpu_device pointer
1083  * @reg_addr: indirect register address to read from
1084  *
1085  * Returns the value of indirect register @reg_addr
1086  */
1087 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
1088 				u32 reg_addr)
1089 {
1090 	unsigned long flags, pcie_index, pcie_data;
1091 	void __iomem *pcie_index_offset;
1092 	void __iomem *pcie_data_offset;
1093 	u32 r;
1094 
1095 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1096 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1097 
1098 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1099 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1100 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1101 
1102 	writel(reg_addr, pcie_index_offset);
1103 	readl(pcie_index_offset);
1104 	r = readl(pcie_data_offset);
1105 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1106 
1107 	return r;
1108 }
1109 
1110 u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
1111 				    u64 reg_addr)
1112 {
1113 	unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
1114 	u32 r;
1115 	void __iomem *pcie_index_offset;
1116 	void __iomem *pcie_index_hi_offset;
1117 	void __iomem *pcie_data_offset;
1118 
1119 	if (unlikely(!adev->nbio.funcs)) {
1120 		pcie_index = AMDGPU_PCIE_INDEX_FALLBACK;
1121 		pcie_data = AMDGPU_PCIE_DATA_FALLBACK;
1122 	} else {
1123 		pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1124 		pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1125 	}
1126 
1127 	if (reg_addr >> 32) {
1128 		if (unlikely(!adev->nbio.funcs))
1129 			pcie_index_hi = AMDGPU_PCIE_INDEX_HI_FALLBACK;
1130 		else
1131 			pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
1132 	} else {
1133 		pcie_index_hi = 0;
1134 	}
1135 
1136 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1137 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1138 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1139 	if (pcie_index_hi != 0)
1140 		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
1141 				pcie_index_hi * 4;
1142 
1143 	writel(reg_addr, pcie_index_offset);
1144 	readl(pcie_index_offset);
1145 	if (pcie_index_hi != 0) {
1146 		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1147 		readl(pcie_index_hi_offset);
1148 	}
1149 	r = readl(pcie_data_offset);
1150 
1151 	/* clear the high bits */
1152 	if (pcie_index_hi != 0) {
1153 		writel(0, pcie_index_hi_offset);
1154 		readl(pcie_index_hi_offset);
1155 	}
1156 
1157 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1158 
1159 	return r;
1160 }
1161 
1162 /**
1163  * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
1164  *
1165  * @adev: amdgpu_device pointer
1166  * @reg_addr: indirect register address to read from
1167  *
1168  * Returns the value of indirect register @reg_addr
1169  */
1170 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
1171 				  u32 reg_addr)
1172 {
1173 	unsigned long flags, pcie_index, pcie_data;
1174 	void __iomem *pcie_index_offset;
1175 	void __iomem *pcie_data_offset;
1176 	u64 r;
1177 
1178 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1179 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1180 
1181 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1182 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1183 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1184 
1185 	/* read low 32 bits */
1186 	writel(reg_addr, pcie_index_offset);
1187 	readl(pcie_index_offset);
1188 	r = readl(pcie_data_offset);
1189 	/* read high 32 bits */
1190 	writel(reg_addr + 4, pcie_index_offset);
1191 	readl(pcie_index_offset);
1192 	r |= ((u64)readl(pcie_data_offset) << 32);
1193 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1194 
1195 	return r;
1196 }
1197 
1198 u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev,
1199 				  u64 reg_addr)
1200 {
1201 	unsigned long flags, pcie_index, pcie_data;
1202 	unsigned long pcie_index_hi = 0;
1203 	void __iomem *pcie_index_offset;
1204 	void __iomem *pcie_index_hi_offset;
1205 	void __iomem *pcie_data_offset;
1206 	u64 r;
1207 
1208 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1209 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1210 	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
1211 		pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
1212 
1213 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1214 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1215 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1216 	if (pcie_index_hi != 0)
1217 		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
1218 			pcie_index_hi * 4;
1219 
1220 	/* read low 32 bits */
1221 	writel(reg_addr, pcie_index_offset);
1222 	readl(pcie_index_offset);
1223 	if (pcie_index_hi != 0) {
1224 		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1225 		readl(pcie_index_hi_offset);
1226 	}
1227 	r = readl(pcie_data_offset);
1228 	/* read high 32 bits */
1229 	writel(reg_addr + 4, pcie_index_offset);
1230 	readl(pcie_index_offset);
1231 	if (pcie_index_hi != 0) {
1232 		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1233 		readl(pcie_index_hi_offset);
1234 	}
1235 	r |= ((u64)readl(pcie_data_offset) << 32);
1236 
1237 	/* clear the high bits */
1238 	if (pcie_index_hi != 0) {
1239 		writel(0, pcie_index_hi_offset);
1240 		readl(pcie_index_hi_offset);
1241 	}
1242 
1243 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1244 
1245 	return r;
1246 }
1247 
1248 /**
1249  * amdgpu_device_indirect_wreg - write an indirect register address
1250  *
1251  * @adev: amdgpu_device pointer
1252  * @reg_addr: indirect register offset
1253  * @reg_data: indirect register data
1254  *
1255  */
1256 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
1257 				 u32 reg_addr, u32 reg_data)
1258 {
1259 	unsigned long flags, pcie_index, pcie_data;
1260 	void __iomem *pcie_index_offset;
1261 	void __iomem *pcie_data_offset;
1262 
1263 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1264 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1265 
1266 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1267 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1268 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1269 
1270 	writel(reg_addr, pcie_index_offset);
1271 	readl(pcie_index_offset);
1272 	writel(reg_data, pcie_data_offset);
1273 	readl(pcie_data_offset);
1274 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1275 }
1276 
1277 void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
1278 				     u64 reg_addr, u32 reg_data)
1279 {
1280 	unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
1281 	void __iomem *pcie_index_offset;
1282 	void __iomem *pcie_index_hi_offset;
1283 	void __iomem *pcie_data_offset;
1284 
1285 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1286 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1287 	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
1288 		pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
1289 	else
1290 		pcie_index_hi = 0;
1291 
1292 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1293 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1294 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1295 	if (pcie_index_hi != 0)
1296 		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
1297 				pcie_index_hi * 4;
1298 
1299 	writel(reg_addr, pcie_index_offset);
1300 	readl(pcie_index_offset);
1301 	if (pcie_index_hi != 0) {
1302 		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1303 		readl(pcie_index_hi_offset);
1304 	}
1305 	writel(reg_data, pcie_data_offset);
1306 	readl(pcie_data_offset);
1307 
1308 	/* clear the high bits */
1309 	if (pcie_index_hi != 0) {
1310 		writel(0, pcie_index_hi_offset);
1311 		readl(pcie_index_hi_offset);
1312 	}
1313 
1314 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1315 }
1316 
1317 /**
1318  * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
1319  *
1320  * @adev: amdgpu_device pointer
1321  * @reg_addr: indirect register offset
1322  * @reg_data: indirect register data
1323  *
1324  */
1325 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
1326 				   u32 reg_addr, u64 reg_data)
1327 {
1328 	unsigned long flags, pcie_index, pcie_data;
1329 	void __iomem *pcie_index_offset;
1330 	void __iomem *pcie_data_offset;
1331 
1332 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1333 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1334 
1335 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1336 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1337 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1338 
1339 	/* write low 32 bits */
1340 	writel(reg_addr, pcie_index_offset);
1341 	readl(pcie_index_offset);
1342 	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
1343 	readl(pcie_data_offset);
1344 	/* write high 32 bits */
1345 	writel(reg_addr + 4, pcie_index_offset);
1346 	readl(pcie_index_offset);
1347 	writel((u32)(reg_data >> 32), pcie_data_offset);
1348 	readl(pcie_data_offset);
1349 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1350 }
1351 
1352 void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev,
1353 				   u64 reg_addr, u64 reg_data)
1354 {
1355 	unsigned long flags, pcie_index, pcie_data;
1356 	unsigned long pcie_index_hi = 0;
1357 	void __iomem *pcie_index_offset;
1358 	void __iomem *pcie_index_hi_offset;
1359 	void __iomem *pcie_data_offset;
1360 
1361 	pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
1362 	pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
1363 	if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
1364 		pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
1365 
1366 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
1367 	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
1368 	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
1369 	if (pcie_index_hi != 0)
1370 		pcie_index_hi_offset = (void __iomem *)adev->rmmio +
1371 				pcie_index_hi * 4;
1372 
1373 	/* write low 32 bits */
1374 	writel(reg_addr, pcie_index_offset);
1375 	readl(pcie_index_offset);
1376 	if (pcie_index_hi != 0) {
1377 		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1378 		readl(pcie_index_hi_offset);
1379 	}
1380 	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
1381 	readl(pcie_data_offset);
1382 	/* write high 32 bits */
1383 	writel(reg_addr + 4, pcie_index_offset);
1384 	readl(pcie_index_offset);
1385 	if (pcie_index_hi != 0) {
1386 		writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
1387 		readl(pcie_index_hi_offset);
1388 	}
1389 	writel((u32)(reg_data >> 32), pcie_data_offset);
1390 	readl(pcie_data_offset);
1391 
1392 	/* clear the high bits */
1393 	if (pcie_index_hi != 0) {
1394 		writel(0, pcie_index_hi_offset);
1395 		readl(pcie_index_hi_offset);
1396 	}
1397 
1398 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
1399 }
1400 
1401 /**
1402  * amdgpu_device_get_rev_id - query device rev_id
1403  *
1404  * @adev: amdgpu_device pointer
1405  *
1406  * Return device rev_id
1407  */
1408 u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
1409 {
1410 	return adev->nbio.funcs->get_rev_id(adev);
1411 }
1412 
1413 /**
1414  * amdgpu_invalid_rreg - dummy reg read function
1415  *
1416  * @adev: amdgpu_device pointer
1417  * @reg: offset of register
1418  *
1419  * Dummy register read function.  Used for register blocks
1420  * that certain asics don't have (all asics).
1421  * Returns the value in the register.
1422  */
1423 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
1424 {
1425 	dev_err(adev->dev, "Invalid callback to read register 0x%04X\n", reg);
1426 	BUG();
1427 	return 0;
1428 }
1429 
1430 static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg)
1431 {
1432 	dev_err(adev->dev, "Invalid callback to read register 0x%llX\n", reg);
1433 	BUG();
1434 	return 0;
1435 }
1436 
1437 /**
1438  * amdgpu_invalid_wreg - dummy reg write function
1439  *
1440  * @adev: amdgpu_device pointer
1441  * @reg: offset of register
1442  * @v: value to write to the register
1443  *
1444  * Dummy register read function.  Used for register blocks
1445  * that certain asics don't have (all asics).
1446  */
1447 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
1448 {
1449 	dev_err(adev->dev,
1450 		"Invalid callback to write register 0x%04X with 0x%08X\n", reg,
1451 		v);
1452 	BUG();
1453 }
1454 
1455 static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v)
1456 {
1457 	dev_err(adev->dev,
1458 		"Invalid callback to write register 0x%llX with 0x%08X\n", reg,
1459 		v);
1460 	BUG();
1461 }
1462 
1463 /**
1464  * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
1465  *
1466  * @adev: amdgpu_device pointer
1467  * @reg: offset of register
1468  *
1469  * Dummy register read function.  Used for register blocks
1470  * that certain asics don't have (all asics).
1471  * Returns the value in the register.
1472  */
1473 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
1474 {
1475 	dev_err(adev->dev, "Invalid callback to read 64 bit register 0x%04X\n",
1476 		reg);
1477 	BUG();
1478 	return 0;
1479 }
1480 
1481 static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t reg)
1482 {
1483 	dev_err(adev->dev, "Invalid callback to read register 0x%llX\n", reg);
1484 	BUG();
1485 	return 0;
1486 }
1487 
1488 /**
1489  * amdgpu_invalid_wreg64 - dummy reg write function
1490  *
1491  * @adev: amdgpu_device pointer
1492  * @reg: offset of register
1493  * @v: value to write to the register
1494  *
1495  * Dummy register read function.  Used for register blocks
1496  * that certain asics don't have (all asics).
1497  */
1498 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
1499 {
1500 	dev_err(adev->dev,
1501 		"Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
1502 		reg, v);
1503 	BUG();
1504 }
1505 
1506 static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, uint64_t v)
1507 {
1508 	dev_err(adev->dev,
1509 		"Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n",
1510 		reg, v);
1511 	BUG();
1512 }
1513 
1514 /**
1515  * amdgpu_block_invalid_rreg - dummy reg read function
1516  *
1517  * @adev: amdgpu_device pointer
1518  * @block: offset of instance
1519  * @reg: offset of register
1520  *
1521  * Dummy register read function.  Used for register blocks
1522  * that certain asics don't have (all asics).
1523  * Returns the value in the register.
1524  */
1525 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
1526 					  uint32_t block, uint32_t reg)
1527 {
1528 	dev_err(adev->dev,
1529 		"Invalid callback to read register 0x%04X in block 0x%04X\n",
1530 		reg, block);
1531 	BUG();
1532 	return 0;
1533 }
1534 
1535 /**
1536  * amdgpu_block_invalid_wreg - dummy reg write function
1537  *
1538  * @adev: amdgpu_device pointer
1539  * @block: offset of instance
1540  * @reg: offset of register
1541  * @v: value to write to the register
1542  *
1543  * Dummy register read function.  Used for register blocks
1544  * that certain asics don't have (all asics).
1545  */
1546 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
1547 				      uint32_t block,
1548 				      uint32_t reg, uint32_t v)
1549 {
1550 	dev_err(adev->dev,
1551 		"Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
1552 		reg, block, v);
1553 	BUG();
1554 }
1555 
1556 static uint32_t amdgpu_device_get_vbios_flags(struct amdgpu_device *adev)
1557 {
1558 	if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU))
1559 		return AMDGPU_VBIOS_SKIP;
1560 
1561 	if (hweight32(adev->aid_mask) && amdgpu_passthrough(adev))
1562 		return AMDGPU_VBIOS_OPTIONAL;
1563 
1564 	return 0;
1565 }
1566 
1567 /**
1568  * amdgpu_device_asic_init - Wrapper for atom asic_init
1569  *
1570  * @adev: amdgpu_device pointer
1571  *
1572  * Does any asic specific work and then calls atom asic init.
1573  */
1574 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
1575 {
1576 	uint32_t flags;
1577 	bool optional;
1578 	int ret;
1579 
1580 	amdgpu_asic_pre_asic_init(adev);
1581 	flags = amdgpu_device_get_vbios_flags(adev);
1582 	optional = !!(flags & (AMDGPU_VBIOS_OPTIONAL | AMDGPU_VBIOS_SKIP));
1583 
1584 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
1585 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
1586 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0) ||
1587 	    amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
1588 		amdgpu_psp_wait_for_bootloader(adev);
1589 		if (optional && !adev->bios)
1590 			return 0;
1591 
1592 		ret = amdgpu_atomfirmware_asic_init(adev, true);
1593 		return ret;
1594 	} else {
1595 		if (optional && !adev->bios)
1596 			return 0;
1597 
1598 		return amdgpu_atom_asic_init(adev->mode_info.atom_context);
1599 	}
1600 
1601 	return 0;
1602 }
1603 
1604 /**
1605  * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
1606  *
1607  * @adev: amdgpu_device pointer
1608  *
1609  * Allocates a scratch page of VRAM for use by various things in the
1610  * driver.
1611  */
1612 static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
1613 {
1614 	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
1615 				       AMDGPU_GEM_DOMAIN_VRAM |
1616 				       AMDGPU_GEM_DOMAIN_GTT,
1617 				       &adev->mem_scratch.robj,
1618 				       &adev->mem_scratch.gpu_addr,
1619 				       (void **)&adev->mem_scratch.ptr);
1620 }
1621 
1622 /**
1623  * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
1624  *
1625  * @adev: amdgpu_device pointer
1626  *
1627  * Frees the VRAM scratch page.
1628  */
1629 static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
1630 {
1631 	amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
1632 }
1633 
1634 /**
1635  * amdgpu_device_program_register_sequence - program an array of registers.
1636  *
1637  * @adev: amdgpu_device pointer
1638  * @registers: pointer to the register array
1639  * @array_size: size of the register array
1640  *
1641  * Programs an array or registers with and or masks.
1642  * This is a helper for setting golden registers.
1643  */
1644 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
1645 					     const u32 *registers,
1646 					     const u32 array_size)
1647 {
1648 	u32 tmp, reg, and_mask, or_mask;
1649 	int i;
1650 
1651 	if (array_size % 3)
1652 		return;
1653 
1654 	for (i = 0; i < array_size; i += 3) {
1655 		reg = registers[i + 0];
1656 		and_mask = registers[i + 1];
1657 		or_mask = registers[i + 2];
1658 
1659 		if (and_mask == 0xffffffff) {
1660 			tmp = or_mask;
1661 		} else {
1662 			tmp = RREG32(reg);
1663 			tmp &= ~and_mask;
1664 			if (adev->family >= AMDGPU_FAMILY_AI)
1665 				tmp |= (or_mask & and_mask);
1666 			else
1667 				tmp |= or_mask;
1668 		}
1669 		WREG32(reg, tmp);
1670 	}
1671 }
1672 
1673 /**
1674  * amdgpu_device_pci_config_reset - reset the GPU
1675  *
1676  * @adev: amdgpu_device pointer
1677  *
1678  * Resets the GPU using the pci config reset sequence.
1679  * Only applicable to asics prior to vega10.
1680  */
1681 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
1682 {
1683 	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
1684 }
1685 
1686 /**
1687  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
1688  *
1689  * @adev: amdgpu_device pointer
1690  *
1691  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
1692  */
1693 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
1694 {
1695 	return pci_reset_function(adev->pdev);
1696 }
1697 
1698 /*
1699  * amdgpu_device_wb_*()
1700  * Writeback is the method by which the GPU updates special pages in memory
1701  * with the status of certain GPU events (fences, ring pointers,etc.).
1702  */
1703 
1704 /**
1705  * amdgpu_device_wb_fini - Disable Writeback and free memory
1706  *
1707  * @adev: amdgpu_device pointer
1708  *
1709  * Disables Writeback and frees the Writeback memory (all asics).
1710  * Used at driver shutdown.
1711  */
1712 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1713 {
1714 	if (adev->wb.wb_obj) {
1715 		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1716 				      &adev->wb.gpu_addr,
1717 				      (void **)&adev->wb.wb);
1718 		adev->wb.wb_obj = NULL;
1719 	}
1720 }
1721 
1722 /**
1723  * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1724  *
1725  * @adev: amdgpu_device pointer
1726  *
1727  * Initializes writeback and allocates writeback memory (all asics).
1728  * Used at driver startup.
1729  * Returns 0 on success or an -error on failure.
1730  */
1731 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1732 {
1733 	int r;
1734 
1735 	if (adev->wb.wb_obj == NULL) {
1736 		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1737 		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1738 					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1739 					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
1740 					    (void **)&adev->wb.wb);
1741 		if (r) {
1742 			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1743 			return r;
1744 		}
1745 
1746 		adev->wb.num_wb = AMDGPU_MAX_WB;
1747 		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1748 
1749 		/* clear wb memory */
1750 		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1751 	}
1752 
1753 	return 0;
1754 }
1755 
1756 /**
1757  * amdgpu_device_wb_get - Allocate a wb entry
1758  *
1759  * @adev: amdgpu_device pointer
1760  * @wb: wb index
1761  *
1762  * Allocate a wb slot for use by the driver (all asics).
1763  * Returns 0 on success or -EINVAL on failure.
1764  */
1765 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1766 {
1767 	unsigned long flags, offset;
1768 
1769 	spin_lock_irqsave(&adev->wb.lock, flags);
1770 	offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1771 	if (offset < adev->wb.num_wb) {
1772 		__set_bit(offset, adev->wb.used);
1773 		spin_unlock_irqrestore(&adev->wb.lock, flags);
1774 		*wb = offset << 3; /* convert to dw offset */
1775 		return 0;
1776 	} else {
1777 		spin_unlock_irqrestore(&adev->wb.lock, flags);
1778 		return -EINVAL;
1779 	}
1780 }
1781 
1782 /**
1783  * amdgpu_device_wb_free - Free a wb entry
1784  *
1785  * @adev: amdgpu_device pointer
1786  * @wb: wb index
1787  *
1788  * Free a wb slot allocated for use by the driver (all asics)
1789  */
1790 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1791 {
1792 	unsigned long flags;
1793 
1794 	wb >>= 3;
1795 	spin_lock_irqsave(&adev->wb.lock, flags);
1796 	if (wb < adev->wb.num_wb)
1797 		__clear_bit(wb, adev->wb.used);
1798 	spin_unlock_irqrestore(&adev->wb.lock, flags);
1799 }
1800 
1801 /**
1802  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1803  *
1804  * @adev: amdgpu_device pointer
1805  *
1806  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1807  * to fail, but if any of the BARs is not accessible after the size we abort
1808  * driver loading by returning -ENODEV.
1809  */
1810 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1811 {
1812 	int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1813 	struct pci_bus *root;
1814 	struct resource *res;
1815 	int max_size, r;
1816 	unsigned int i;
1817 	u16 cmd;
1818 
1819 	if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
1820 		return 0;
1821 
1822 	/* Bypass for VF */
1823 	if (amdgpu_sriov_vf(adev))
1824 		return 0;
1825 
1826 	if (!amdgpu_rebar)
1827 		return 0;
1828 
1829 	/* resizing on Dell G5 SE platforms causes problems with runtime pm */
1830 	if ((amdgpu_runtime_pm != 0) &&
1831 	    adev->pdev->vendor == PCI_VENDOR_ID_ATI &&
1832 	    adev->pdev->device == 0x731f &&
1833 	    adev->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
1834 		return 0;
1835 
1836 	/* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
1837 	if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
1838 		dev_warn(
1839 			adev->dev,
1840 			"System can't access extended configuration space, please check!!\n");
1841 
1842 	/* skip if the bios has already enabled large BAR */
1843 	if (adev->gmc.real_vram_size &&
1844 	    (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1845 		return 0;
1846 
1847 	/* Check if the root BUS has 64bit memory resources */
1848 	root = adev->pdev->bus;
1849 	while (root->parent)
1850 		root = root->parent;
1851 
1852 	pci_bus_for_each_resource(root, res, i) {
1853 		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1854 		    res->start > 0x100000000ull)
1855 			break;
1856 	}
1857 
1858 	/* Trying to resize is pointless without a root hub window above 4GB */
1859 	if (!res)
1860 		return 0;
1861 
1862 	/* Limit the BAR size to what is available */
1863 	max_size = pci_rebar_get_max_size(adev->pdev, 0);
1864 	if (max_size < 0)
1865 		return 0;
1866 	rbar_size = min(max_size, rbar_size);
1867 
1868 	/* Disable memory decoding while we change the BAR addresses and size */
1869 	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1870 	pci_write_config_word(adev->pdev, PCI_COMMAND,
1871 			      cmd & ~PCI_COMMAND_MEMORY);
1872 
1873 	/* Tear down doorbell as resizing will release BARs */
1874 	amdgpu_doorbell_fini(adev);
1875 
1876 	r = pci_resize_resource(adev->pdev, 0, rbar_size,
1877 				(adev->asic_type >= CHIP_BONAIRE) ? 1 << 5
1878 								  : 1 << 2);
1879 	if (r == -ENOSPC)
1880 		dev_info(adev->dev,
1881 			 "Not enough PCI address space for a large BAR.");
1882 	else if (r && r != -ENOTSUPP)
1883 		dev_err(adev->dev, "Problem resizing BAR0 (%d).", r);
1884 
1885 	/* When the doorbell or fb BAR isn't available we have no chance of
1886 	 * using the device.
1887 	 */
1888 	r = amdgpu_doorbell_init(adev);
1889 	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1890 		return -ENODEV;
1891 
1892 	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1893 
1894 	return 0;
1895 }
1896 
1897 /*
1898  * GPU helpers function.
1899  */
1900 /**
1901  * amdgpu_device_need_post - check if the hw need post or not
1902  *
1903  * @adev: amdgpu_device pointer
1904  *
1905  * Check if the asic has been initialized (all asics) at driver startup
1906  * or post is needed if  hw reset is performed.
1907  * Returns true if need or false if not.
1908  */
1909 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1910 {
1911 	uint32_t reg, flags;
1912 
1913 	if (amdgpu_sriov_vf(adev))
1914 		return false;
1915 
1916 	flags = amdgpu_device_get_vbios_flags(adev);
1917 	if (flags & AMDGPU_VBIOS_SKIP)
1918 		return false;
1919 	if ((flags & AMDGPU_VBIOS_OPTIONAL) && !adev->bios)
1920 		return false;
1921 
1922 	if (amdgpu_passthrough(adev)) {
1923 		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1924 		 * some old smc fw still need driver do vPost otherwise gpu hang, while
1925 		 * those smc fw version above 22.15 doesn't have this flaw, so we force
1926 		 * vpost executed for smc version below 22.15
1927 		 */
1928 		if (adev->asic_type == CHIP_FIJI) {
1929 			int err;
1930 			uint32_t fw_ver;
1931 
1932 			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1933 			/* force vPost if error occurred */
1934 			if (err)
1935 				return true;
1936 
1937 			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1938 			release_firmware(adev->pm.fw);
1939 			if (fw_ver < 0x00160e00)
1940 				return true;
1941 		}
1942 	}
1943 
1944 	/* Don't post if we need to reset whole hive on init */
1945 	if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
1946 		return false;
1947 
1948 	if (adev->has_hw_reset) {
1949 		adev->has_hw_reset = false;
1950 		return true;
1951 	}
1952 
1953 	/* bios scratch used on CIK+ */
1954 	if (adev->asic_type >= CHIP_BONAIRE)
1955 		return amdgpu_atombios_scratch_need_asic_init(adev);
1956 
1957 	/* check MEM_SIZE for older asics */
1958 	reg = amdgpu_asic_get_config_memsize(adev);
1959 
1960 	if ((reg != 0) && (reg != 0xffffffff))
1961 		return false;
1962 
1963 	return true;
1964 }
1965 
1966 /*
1967  * Check whether seamless boot is supported.
1968  *
1969  * So far we only support seamless boot on DCE 3.0 or later.
1970  * If users report that it works on older ASICS as well, we may
1971  * loosen this.
1972  */
1973 bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev)
1974 {
1975 	switch (amdgpu_seamless) {
1976 	case -1:
1977 		break;
1978 	case 1:
1979 		return true;
1980 	case 0:
1981 		return false;
1982 	default:
1983 		dev_err(adev->dev, "Invalid value for amdgpu.seamless: %d\n",
1984 			amdgpu_seamless);
1985 		return false;
1986 	}
1987 
1988 	if (!(adev->flags & AMD_IS_APU))
1989 		return false;
1990 
1991 	if (adev->mman.keep_stolen_vga_memory)
1992 		return false;
1993 
1994 	return amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0);
1995 }
1996 
1997 /*
1998  * Intel hosts such as Rocket Lake, Alder Lake, Raptor Lake and Sapphire Rapids
1999  * don't support dynamic speed switching. Until we have confirmation from Intel
2000  * that a specific host supports it, it's safer that we keep it disabled for all.
2001  *
2002  * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
2003  * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
2004  */
2005 static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device *adev)
2006 {
2007 #if IS_ENABLED(CONFIG_X86)
2008 	struct cpuinfo_x86 *c = &cpu_data(0);
2009 
2010 	/* eGPU change speeds based on USB4 fabric conditions */
2011 	if (dev_is_removable(adev->dev))
2012 		return true;
2013 
2014 	if (c->x86_vendor == X86_VENDOR_INTEL)
2015 		return false;
2016 #endif
2017 	return true;
2018 }
2019 
2020 static bool amdgpu_device_aspm_support_quirk(struct amdgpu_device *adev)
2021 {
2022 	/* Enabling ASPM causes randoms hangs on Tahiti and Oland on Zen4.
2023 	 * It's unclear if this is a platform-specific or GPU-specific issue.
2024 	 * Disable ASPM on SI for the time being.
2025 	 */
2026 	if (adev->family == AMDGPU_FAMILY_SI)
2027 		return true;
2028 
2029 #if IS_ENABLED(CONFIG_X86)
2030 	struct cpuinfo_x86 *c = &cpu_data(0);
2031 
2032 	if (!(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 0, 0) ||
2033 		  amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 0, 1)))
2034 		return false;
2035 
2036 	if (c->x86 == 6 &&
2037 		adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5) {
2038 		switch (c->x86_model) {
2039 		case VFM_MODEL(INTEL_ALDERLAKE):
2040 		case VFM_MODEL(INTEL_ALDERLAKE_L):
2041 		case VFM_MODEL(INTEL_RAPTORLAKE):
2042 		case VFM_MODEL(INTEL_RAPTORLAKE_P):
2043 		case VFM_MODEL(INTEL_RAPTORLAKE_S):
2044 			return true;
2045 		default:
2046 			return false;
2047 		}
2048 	} else {
2049 		return false;
2050 	}
2051 #else
2052 	return false;
2053 #endif
2054 }
2055 
2056 /**
2057  * amdgpu_device_should_use_aspm - check if the device should program ASPM
2058  *
2059  * @adev: amdgpu_device pointer
2060  *
2061  * Confirm whether the module parameter and pcie bridge agree that ASPM should
2062  * be set for this device.
2063  *
2064  * Returns true if it should be used or false if not.
2065  */
2066 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
2067 {
2068 	switch (amdgpu_aspm) {
2069 	case -1:
2070 		break;
2071 	case 0:
2072 		return false;
2073 	case 1:
2074 		return true;
2075 	default:
2076 		return false;
2077 	}
2078 	if (adev->flags & AMD_IS_APU)
2079 		return false;
2080 	if (amdgpu_device_aspm_support_quirk(adev))
2081 		return false;
2082 	return pcie_aspm_enabled(adev->pdev);
2083 }
2084 
2085 /* if we get transitioned to only one device, take VGA back */
2086 /**
2087  * amdgpu_device_vga_set_decode - enable/disable vga decode
2088  *
2089  * @pdev: PCI device pointer
2090  * @state: enable/disable vga decode
2091  *
2092  * Enable/disable vga decode (all asics).
2093  * Returns VGA resource flags.
2094  */
2095 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
2096 		bool state)
2097 {
2098 	struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
2099 
2100 	amdgpu_asic_set_vga_state(adev, state);
2101 	if (state)
2102 		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
2103 		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
2104 	else
2105 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
2106 }
2107 
2108 /**
2109  * amdgpu_device_check_block_size - validate the vm block size
2110  *
2111  * @adev: amdgpu_device pointer
2112  *
2113  * Validates the vm block size specified via module parameter.
2114  * The vm block size defines number of bits in page table versus page directory,
2115  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
2116  * page table and the remaining bits are in the page directory.
2117  */
2118 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
2119 {
2120 	/* defines number of bits in page table versus page directory,
2121 	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
2122 	 * page table and the remaining bits are in the page directory
2123 	 */
2124 	if (amdgpu_vm_block_size == -1)
2125 		return;
2126 
2127 	if (amdgpu_vm_block_size < 9) {
2128 		dev_warn(adev->dev, "VM page table size (%d) too small\n",
2129 			 amdgpu_vm_block_size);
2130 		amdgpu_vm_block_size = -1;
2131 	}
2132 }
2133 
2134 /**
2135  * amdgpu_device_check_vm_size - validate the vm size
2136  *
2137  * @adev: amdgpu_device pointer
2138  *
2139  * Validates the vm size in GB specified via module parameter.
2140  * The VM size is the size of the GPU virtual memory space in GB.
2141  */
2142 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
2143 {
2144 	/* no need to check the default value */
2145 	if (amdgpu_vm_size == -1)
2146 		return;
2147 
2148 	if (amdgpu_vm_size < 1) {
2149 		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
2150 			 amdgpu_vm_size);
2151 		amdgpu_vm_size = -1;
2152 	}
2153 }
2154 
2155 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
2156 {
2157 	struct sysinfo si;
2158 	bool is_os_64 = (sizeof(void *) == 8);
2159 	uint64_t total_memory;
2160 	uint64_t dram_size_seven_GB = 0x1B8000000;
2161 	uint64_t dram_size_three_GB = 0xB8000000;
2162 
2163 	if (amdgpu_smu_memory_pool_size == 0)
2164 		return;
2165 
2166 	if (!is_os_64) {
2167 		dev_warn(adev->dev, "Not 64-bit OS, feature not supported\n");
2168 		goto def_value;
2169 	}
2170 	si_meminfo(&si);
2171 	total_memory = (uint64_t)si.totalram * si.mem_unit;
2172 
2173 	if ((amdgpu_smu_memory_pool_size == 1) ||
2174 		(amdgpu_smu_memory_pool_size == 2)) {
2175 		if (total_memory < dram_size_three_GB)
2176 			goto def_value1;
2177 	} else if ((amdgpu_smu_memory_pool_size == 4) ||
2178 		(amdgpu_smu_memory_pool_size == 8)) {
2179 		if (total_memory < dram_size_seven_GB)
2180 			goto def_value1;
2181 	} else {
2182 		dev_warn(adev->dev, "Smu memory pool size not supported\n");
2183 		goto def_value;
2184 	}
2185 	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
2186 
2187 	return;
2188 
2189 def_value1:
2190 	dev_warn(adev->dev, "No enough system memory\n");
2191 def_value:
2192 	adev->pm.smu_prv_buffer_size = 0;
2193 }
2194 
2195 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
2196 {
2197 	if (!(adev->flags & AMD_IS_APU) ||
2198 	    adev->asic_type < CHIP_RAVEN)
2199 		return 0;
2200 
2201 	switch (adev->asic_type) {
2202 	case CHIP_RAVEN:
2203 		if (adev->pdev->device == 0x15dd)
2204 			adev->apu_flags |= AMD_APU_IS_RAVEN;
2205 		if (adev->pdev->device == 0x15d8)
2206 			adev->apu_flags |= AMD_APU_IS_PICASSO;
2207 		break;
2208 	case CHIP_RENOIR:
2209 		if ((adev->pdev->device == 0x1636) ||
2210 		    (adev->pdev->device == 0x164c))
2211 			adev->apu_flags |= AMD_APU_IS_RENOIR;
2212 		else
2213 			adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
2214 		break;
2215 	case CHIP_VANGOGH:
2216 		adev->apu_flags |= AMD_APU_IS_VANGOGH;
2217 		break;
2218 	case CHIP_YELLOW_CARP:
2219 		break;
2220 	case CHIP_CYAN_SKILLFISH:
2221 		if ((adev->pdev->device == 0x13FE) ||
2222 		    (adev->pdev->device == 0x143F))
2223 			adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
2224 		break;
2225 	default:
2226 		break;
2227 	}
2228 
2229 	return 0;
2230 }
2231 
2232 /**
2233  * amdgpu_device_check_arguments - validate module params
2234  *
2235  * @adev: amdgpu_device pointer
2236  *
2237  * Validates certain module parameters and updates
2238  * the associated values used by the driver (all asics).
2239  */
2240 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
2241 {
2242 	int i;
2243 
2244 	if (amdgpu_sched_jobs < 4) {
2245 		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
2246 			 amdgpu_sched_jobs);
2247 		amdgpu_sched_jobs = 4;
2248 	} else if (!is_power_of_2(amdgpu_sched_jobs)) {
2249 		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
2250 			 amdgpu_sched_jobs);
2251 		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
2252 	}
2253 
2254 	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
2255 		/* gart size must be greater or equal to 32M */
2256 		dev_warn(adev->dev, "gart size (%d) too small\n",
2257 			 amdgpu_gart_size);
2258 		amdgpu_gart_size = -1;
2259 	}
2260 
2261 	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
2262 		/* gtt size must be greater or equal to 32M */
2263 		dev_warn(adev->dev, "gtt size (%d) too small\n",
2264 				 amdgpu_gtt_size);
2265 		amdgpu_gtt_size = -1;
2266 	}
2267 
2268 	/* valid range is between 4 and 9 inclusive */
2269 	if (amdgpu_vm_fragment_size != -1 &&
2270 	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
2271 		dev_warn(adev->dev, "valid range is between 4 and 9\n");
2272 		amdgpu_vm_fragment_size = -1;
2273 	}
2274 
2275 	if (amdgpu_sched_hw_submission < 2) {
2276 		dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
2277 			 amdgpu_sched_hw_submission);
2278 		amdgpu_sched_hw_submission = 2;
2279 	} else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
2280 		dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
2281 			 amdgpu_sched_hw_submission);
2282 		amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
2283 	}
2284 
2285 	if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
2286 		dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
2287 		amdgpu_reset_method = -1;
2288 	}
2289 
2290 	amdgpu_device_check_smu_prv_buffer_size(adev);
2291 
2292 	amdgpu_device_check_vm_size(adev);
2293 
2294 	amdgpu_device_check_block_size(adev);
2295 
2296 	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
2297 
2298 	for (i = 0; i < MAX_XCP; i++) {
2299 		switch (amdgpu_enforce_isolation) {
2300 		case -1:
2301 		case 0:
2302 		default:
2303 			/* disable */
2304 			adev->enforce_isolation[i] = AMDGPU_ENFORCE_ISOLATION_DISABLE;
2305 			break;
2306 		case 1:
2307 			/* enable */
2308 			adev->enforce_isolation[i] =
2309 				AMDGPU_ENFORCE_ISOLATION_ENABLE;
2310 			break;
2311 		case 2:
2312 			/* enable legacy mode */
2313 			adev->enforce_isolation[i] =
2314 				AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY;
2315 			break;
2316 		case 3:
2317 			/* enable only process isolation without submitting cleaner shader */
2318 			adev->enforce_isolation[i] =
2319 				AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER;
2320 			break;
2321 		}
2322 	}
2323 
2324 	return 0;
2325 }
2326 
2327 /**
2328  * amdgpu_switcheroo_set_state - set switcheroo state
2329  *
2330  * @pdev: pci dev pointer
2331  * @state: vga_switcheroo state
2332  *
2333  * Callback for the switcheroo driver.  Suspends or resumes
2334  * the asics before or after it is powered up using ACPI methods.
2335  */
2336 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
2337 					enum vga_switcheroo_state state)
2338 {
2339 	struct drm_device *dev = pci_get_drvdata(pdev);
2340 	int r;
2341 
2342 	if (amdgpu_device_supports_px(drm_to_adev(dev)) &&
2343 	    state == VGA_SWITCHEROO_OFF)
2344 		return;
2345 
2346 	if (state == VGA_SWITCHEROO_ON) {
2347 		pr_info("switched on\n");
2348 		/* don't suspend or resume card normally */
2349 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
2350 
2351 		pci_set_power_state(pdev, PCI_D0);
2352 		amdgpu_device_load_pci_state(pdev);
2353 		r = pci_enable_device(pdev);
2354 		if (r)
2355 			dev_warn(&pdev->dev, "pci_enable_device failed (%d)\n",
2356 				 r);
2357 		amdgpu_device_resume(dev, true);
2358 
2359 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
2360 	} else {
2361 		dev_info(&pdev->dev, "switched off\n");
2362 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
2363 		amdgpu_device_prepare(dev);
2364 		amdgpu_device_suspend(dev, true);
2365 		amdgpu_device_cache_pci_state(pdev);
2366 		/* Shut down the device */
2367 		pci_disable_device(pdev);
2368 		pci_set_power_state(pdev, PCI_D3cold);
2369 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
2370 	}
2371 }
2372 
2373 /**
2374  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
2375  *
2376  * @pdev: pci dev pointer
2377  *
2378  * Callback for the switcheroo driver.  Check of the switcheroo
2379  * state can be changed.
2380  * Returns true if the state can be changed, false if not.
2381  */
2382 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
2383 {
2384 	struct drm_device *dev = pci_get_drvdata(pdev);
2385 
2386        /*
2387 	* FIXME: open_count is protected by drm_global_mutex but that would lead to
2388 	* locking inversion with the driver load path. And the access here is
2389 	* completely racy anyway. So don't bother with locking for now.
2390 	*/
2391 	return atomic_read(&dev->open_count) == 0;
2392 }
2393 
2394 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
2395 	.set_gpu_state = amdgpu_switcheroo_set_state,
2396 	.reprobe = NULL,
2397 	.can_switch = amdgpu_switcheroo_can_switch,
2398 };
2399 
2400 /**
2401  * amdgpu_device_enable_virtual_display - enable virtual display feature
2402  *
2403  * @adev: amdgpu_device pointer
2404  *
2405  * Enabled the virtual display feature if the user has enabled it via
2406  * the module parameter virtual_display.  This feature provides a virtual
2407  * display hardware on headless boards or in virtualized environments.
2408  * This function parses and validates the configuration string specified by
2409  * the user and configures the virtual display configuration (number of
2410  * virtual connectors, crtcs, etc.) specified.
2411  */
2412 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
2413 {
2414 	adev->enable_virtual_display = false;
2415 
2416 	if (amdgpu_virtual_display) {
2417 		const char *pci_address_name = pci_name(adev->pdev);
2418 		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
2419 
2420 		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
2421 		pciaddstr_tmp = pciaddstr;
2422 		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
2423 			pciaddname = strsep(&pciaddname_tmp, ",");
2424 			if (!strcmp("all", pciaddname)
2425 			    || !strcmp(pci_address_name, pciaddname)) {
2426 				long num_crtc;
2427 				int res = -1;
2428 
2429 				adev->enable_virtual_display = true;
2430 
2431 				if (pciaddname_tmp)
2432 					res = kstrtol(pciaddname_tmp, 10,
2433 						      &num_crtc);
2434 
2435 				if (!res) {
2436 					if (num_crtc < 1)
2437 						num_crtc = 1;
2438 					if (num_crtc > 6)
2439 						num_crtc = 6;
2440 					adev->mode_info.num_crtc = num_crtc;
2441 				} else {
2442 					adev->mode_info.num_crtc = 1;
2443 				}
2444 				break;
2445 			}
2446 		}
2447 
2448 		dev_info(
2449 			adev->dev,
2450 			"virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
2451 			amdgpu_virtual_display, pci_address_name,
2452 			adev->enable_virtual_display, adev->mode_info.num_crtc);
2453 
2454 		kfree(pciaddstr);
2455 	}
2456 }
2457 
2458 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
2459 {
2460 	if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
2461 		adev->mode_info.num_crtc = 1;
2462 		adev->enable_virtual_display = true;
2463 		dev_info(adev->dev, "virtual_display:%d, num_crtc:%d\n",
2464 			 adev->enable_virtual_display,
2465 			 adev->mode_info.num_crtc);
2466 	}
2467 }
2468 
2469 /**
2470  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
2471  *
2472  * @adev: amdgpu_device pointer
2473  *
2474  * Parses the asic configuration parameters specified in the gpu info
2475  * firmware and makes them available to the driver for use in configuring
2476  * the asic.
2477  * Returns 0 on success, -EINVAL on failure.
2478  */
2479 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
2480 {
2481 	const char *chip_name;
2482 	int err;
2483 	const struct gpu_info_firmware_header_v1_0 *hdr;
2484 
2485 	adev->firmware.gpu_info_fw = NULL;
2486 
2487 	switch (adev->asic_type) {
2488 	default:
2489 		return 0;
2490 	case CHIP_VEGA10:
2491 		chip_name = "vega10";
2492 		break;
2493 	case CHIP_VEGA12:
2494 		chip_name = "vega12";
2495 		break;
2496 	case CHIP_RAVEN:
2497 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
2498 			chip_name = "raven2";
2499 		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
2500 			chip_name = "picasso";
2501 		else
2502 			chip_name = "raven";
2503 		break;
2504 	case CHIP_ARCTURUS:
2505 		chip_name = "arcturus";
2506 		break;
2507 	case CHIP_NAVI12:
2508 		if (adev->discovery.bin)
2509 			return 0;
2510 		chip_name = "navi12";
2511 		break;
2512 	case CHIP_CYAN_SKILLFISH:
2513 		if (adev->discovery.bin)
2514 			return 0;
2515 		chip_name = "cyan_skillfish";
2516 		break;
2517 	}
2518 
2519 	err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw,
2520 				   AMDGPU_UCODE_OPTIONAL,
2521 				   "amdgpu/%s_gpu_info.bin", chip_name);
2522 	if (err) {
2523 		dev_err(adev->dev,
2524 			"Failed to get gpu_info firmware \"%s_gpu_info.bin\"\n",
2525 			chip_name);
2526 		goto out;
2527 	}
2528 
2529 	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
2530 	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
2531 
2532 	switch (hdr->version_major) {
2533 	case 1:
2534 	{
2535 		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
2536 			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
2537 								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2538 
2539 		/*
2540 		 * Should be dropped when DAL no longer needs it.
2541 		 */
2542 		if (adev->asic_type == CHIP_NAVI12)
2543 			goto parse_soc_bounding_box;
2544 
2545 		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
2546 		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
2547 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
2548 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
2549 		adev->gfx.config.max_texture_channel_caches =
2550 			le32_to_cpu(gpu_info_fw->gc_num_tccs);
2551 		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
2552 		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
2553 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
2554 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
2555 		adev->gfx.config.double_offchip_lds_buf =
2556 			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
2557 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
2558 		adev->gfx.cu_info.max_waves_per_simd =
2559 			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
2560 		adev->gfx.cu_info.max_scratch_slots_per_cu =
2561 			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
2562 		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
2563 		if (hdr->version_minor >= 1) {
2564 			const struct gpu_info_firmware_v1_1 *gpu_info_fw =
2565 				(const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2566 									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2567 			adev->gfx.config.num_sc_per_sh =
2568 				le32_to_cpu(gpu_info_fw->num_sc_per_sh);
2569 			adev->gfx.config.num_packer_per_sc =
2570 				le32_to_cpu(gpu_info_fw->num_packer_per_sc);
2571 		}
2572 
2573 parse_soc_bounding_box:
2574 		/*
2575 		 * soc bounding box info is not integrated in disocovery table,
2576 		 * we always need to parse it from gpu info firmware if needed.
2577 		 */
2578 		if (hdr->version_minor == 2) {
2579 			const struct gpu_info_firmware_v1_2 *gpu_info_fw =
2580 				(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2581 									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2582 			adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2583 		}
2584 		break;
2585 	}
2586 	default:
2587 		dev_err(adev->dev,
2588 			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
2589 		err = -EINVAL;
2590 		goto out;
2591 	}
2592 out:
2593 	return err;
2594 }
2595 
2596 static void amdgpu_uid_init(struct amdgpu_device *adev)
2597 {
2598 	/* Initialize the UID for the device */
2599 	adev->uid_info = kzalloc_obj(struct amdgpu_uid);
2600 	if (!adev->uid_info) {
2601 		dev_warn(adev->dev, "Failed to allocate memory for UID\n");
2602 		return;
2603 	}
2604 	adev->uid_info->adev = adev;
2605 }
2606 
2607 static void amdgpu_uid_fini(struct amdgpu_device *adev)
2608 {
2609 	/* Free the UID memory */
2610 	kfree(adev->uid_info);
2611 	adev->uid_info = NULL;
2612 }
2613 
2614 /**
2615  * amdgpu_device_ip_early_init - run early init for hardware IPs
2616  *
2617  * @adev: amdgpu_device pointer
2618  *
2619  * Early initialization pass for hardware IPs.  The hardware IPs that make
2620  * up each asic are discovered each IP's early_init callback is run.  This
2621  * is the first stage in initializing the asic.
2622  * Returns 0 on success, negative error code on failure.
2623  */
2624 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2625 {
2626 	struct amdgpu_ip_block *ip_block;
2627 	struct pci_dev *parent;
2628 	bool total, skip_bios;
2629 	uint32_t bios_flags;
2630 	int i, r;
2631 
2632 	amdgpu_device_enable_virtual_display(adev);
2633 
2634 	if (amdgpu_sriov_vf(adev)) {
2635 		r = amdgpu_virt_request_full_gpu(adev, true);
2636 		if (r)
2637 			return r;
2638 
2639 		r = amdgpu_virt_init_critical_region(adev);
2640 		if (r)
2641 			return r;
2642 	}
2643 
2644 	switch (adev->asic_type) {
2645 #ifdef CONFIG_DRM_AMDGPU_SI
2646 	case CHIP_VERDE:
2647 	case CHIP_TAHITI:
2648 	case CHIP_PITCAIRN:
2649 	case CHIP_OLAND:
2650 	case CHIP_HAINAN:
2651 		adev->family = AMDGPU_FAMILY_SI;
2652 		r = si_set_ip_blocks(adev);
2653 		if (r)
2654 			return r;
2655 		break;
2656 #endif
2657 #ifdef CONFIG_DRM_AMDGPU_CIK
2658 	case CHIP_BONAIRE:
2659 	case CHIP_HAWAII:
2660 	case CHIP_KAVERI:
2661 	case CHIP_KABINI:
2662 	case CHIP_MULLINS:
2663 		if (adev->flags & AMD_IS_APU)
2664 			adev->family = AMDGPU_FAMILY_KV;
2665 		else
2666 			adev->family = AMDGPU_FAMILY_CI;
2667 
2668 		r = cik_set_ip_blocks(adev);
2669 		if (r)
2670 			return r;
2671 		break;
2672 #endif
2673 	case CHIP_TOPAZ:
2674 	case CHIP_TONGA:
2675 	case CHIP_FIJI:
2676 	case CHIP_POLARIS10:
2677 	case CHIP_POLARIS11:
2678 	case CHIP_POLARIS12:
2679 	case CHIP_VEGAM:
2680 	case CHIP_CARRIZO:
2681 	case CHIP_STONEY:
2682 		if (adev->flags & AMD_IS_APU)
2683 			adev->family = AMDGPU_FAMILY_CZ;
2684 		else
2685 			adev->family = AMDGPU_FAMILY_VI;
2686 
2687 		r = vi_set_ip_blocks(adev);
2688 		if (r)
2689 			return r;
2690 		break;
2691 	default:
2692 		r = amdgpu_discovery_set_ip_blocks(adev);
2693 		if (r)
2694 			return r;
2695 		break;
2696 	}
2697 
2698 	/* Check for IP version 9.4.3 with A0 hardware */
2699 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
2700 	    !amdgpu_device_get_rev_id(adev)) {
2701 		dev_err(adev->dev, "Unsupported A0 hardware\n");
2702 		return -ENODEV;	/* device unsupported - no device error */
2703 	}
2704 
2705 	if (amdgpu_has_atpx() &&
2706 	    (amdgpu_is_atpx_hybrid() ||
2707 	     amdgpu_has_atpx_dgpu_power_cntl()) &&
2708 	    ((adev->flags & AMD_IS_APU) == 0) &&
2709 	    !dev_is_removable(&adev->pdev->dev))
2710 		adev->flags |= AMD_IS_PX;
2711 
2712 	if (!(adev->flags & AMD_IS_APU)) {
2713 		parent = pcie_find_root_port(adev->pdev);
2714 		adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2715 	}
2716 
2717 	adev->pm.pp_feature = amdgpu_pp_feature_mask;
2718 	if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2719 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2720 	if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2721 		adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2722 	if (!amdgpu_device_pcie_dynamic_switching_supported(adev))
2723 		adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
2724 
2725 	adev->virt.is_xgmi_node_migrate_enabled = false;
2726 	if (amdgpu_sriov_vf(adev)) {
2727 		adev->virt.is_xgmi_node_migrate_enabled =
2728 			amdgpu_ip_version((adev), GC_HWIP, 0) == IP_VERSION(9, 4, 4);
2729 	}
2730 
2731 	total = true;
2732 	for (i = 0; i < adev->num_ip_blocks; i++) {
2733 		ip_block = &adev->ip_blocks[i];
2734 
2735 		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2736 			dev_warn(adev->dev, "disabled ip block: %d <%s>\n", i,
2737 				 adev->ip_blocks[i].version->funcs->name);
2738 			adev->ip_blocks[i].status.valid = false;
2739 		} else if (ip_block->version->funcs->early_init) {
2740 			r = ip_block->version->funcs->early_init(ip_block);
2741 			if (r == -ENOENT) {
2742 				adev->ip_blocks[i].status.valid = false;
2743 			} else if (r) {
2744 				dev_err(adev->dev,
2745 					"early_init of IP block <%s> failed %d\n",
2746 					adev->ip_blocks[i].version->funcs->name,
2747 					r);
2748 				total = false;
2749 			} else {
2750 				adev->ip_blocks[i].status.valid = true;
2751 			}
2752 		} else {
2753 			adev->ip_blocks[i].status.valid = true;
2754 		}
2755 		/* get the vbios after the asic_funcs are set up */
2756 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2757 			r = amdgpu_device_parse_gpu_info_fw(adev);
2758 			if (r)
2759 				return r;
2760 
2761 			bios_flags = amdgpu_device_get_vbios_flags(adev);
2762 			skip_bios = !!(bios_flags & AMDGPU_VBIOS_SKIP);
2763 			/* Read BIOS */
2764 			if (!skip_bios) {
2765 				bool optional =
2766 					!!(bios_flags & AMDGPU_VBIOS_OPTIONAL);
2767 				if (!amdgpu_get_bios(adev) && !optional)
2768 					return -EINVAL;
2769 
2770 				if (optional && !adev->bios)
2771 					dev_info(
2772 						adev->dev,
2773 						"VBIOS image optional, proceeding without VBIOS image");
2774 
2775 				if (adev->bios) {
2776 					r = amdgpu_atombios_init(adev);
2777 					if (r) {
2778 						dev_err(adev->dev,
2779 							"amdgpu_atombios_init failed\n");
2780 						amdgpu_vf_error_put(
2781 							adev,
2782 							AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL,
2783 							0, 0);
2784 						return r;
2785 					}
2786 				}
2787 			}
2788 
2789 			/*get pf2vf msg info at it's earliest time*/
2790 			if (amdgpu_sriov_vf(adev))
2791 				amdgpu_virt_init_data_exchange(adev);
2792 
2793 		}
2794 	}
2795 	if (!total)
2796 		return -ENODEV;
2797 
2798 	if (adev->gmc.xgmi.supported)
2799 		amdgpu_xgmi_early_init(adev);
2800 
2801 	if (amdgpu_is_multi_aid(adev))
2802 		amdgpu_uid_init(adev);
2803 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
2804 	if (ip_block->status.valid != false)
2805 		amdgpu_amdkfd_device_probe(adev);
2806 
2807 	adev->cg_flags &= amdgpu_cg_mask;
2808 	adev->pg_flags &= amdgpu_pg_mask;
2809 
2810 	return 0;
2811 }
2812 
2813 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2814 {
2815 	int i, r;
2816 
2817 	for (i = 0; i < adev->num_ip_blocks; i++) {
2818 		if (!adev->ip_blocks[i].status.sw)
2819 			continue;
2820 		if (adev->ip_blocks[i].status.hw)
2821 			continue;
2822 		if (!amdgpu_ip_member_of_hwini(
2823 			    adev, adev->ip_blocks[i].version->type))
2824 			continue;
2825 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2826 		    (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2827 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2828 			r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
2829 			if (r) {
2830 				dev_err(adev->dev,
2831 					"hw_init of IP block <%s> failed %d\n",
2832 					adev->ip_blocks[i].version->funcs->name,
2833 					r);
2834 				return r;
2835 			}
2836 			adev->ip_blocks[i].status.hw = true;
2837 		}
2838 	}
2839 
2840 	return 0;
2841 }
2842 
2843 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2844 {
2845 	int i, r;
2846 
2847 	for (i = 0; i < adev->num_ip_blocks; i++) {
2848 		if (!adev->ip_blocks[i].status.sw)
2849 			continue;
2850 		if (adev->ip_blocks[i].status.hw)
2851 			continue;
2852 		if (!amdgpu_ip_member_of_hwini(
2853 			    adev, adev->ip_blocks[i].version->type))
2854 			continue;
2855 		r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
2856 		if (r) {
2857 			dev_err(adev->dev,
2858 				"hw_init of IP block <%s> failed %d\n",
2859 				adev->ip_blocks[i].version->funcs->name, r);
2860 			return r;
2861 		}
2862 		adev->ip_blocks[i].status.hw = true;
2863 	}
2864 
2865 	return 0;
2866 }
2867 
2868 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2869 {
2870 	int r = 0;
2871 	int i;
2872 	uint32_t smu_version;
2873 
2874 	if (adev->asic_type >= CHIP_VEGA10) {
2875 		for (i = 0; i < adev->num_ip_blocks; i++) {
2876 			if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2877 				continue;
2878 
2879 			if (!amdgpu_ip_member_of_hwini(adev,
2880 						       AMD_IP_BLOCK_TYPE_PSP))
2881 				break;
2882 
2883 			if (!adev->ip_blocks[i].status.sw)
2884 				continue;
2885 
2886 			/* no need to do the fw loading again if already done*/
2887 			if (adev->ip_blocks[i].status.hw == true)
2888 				break;
2889 
2890 			if (amdgpu_in_reset(adev) || adev->in_suspend) {
2891 				r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
2892 				if (r)
2893 					return r;
2894 			} else {
2895 				r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
2896 				if (r) {
2897 					dev_err(adev->dev,
2898 						"hw_init of IP block <%s> failed %d\n",
2899 						adev->ip_blocks[i]
2900 							.version->funcs->name,
2901 						r);
2902 					return r;
2903 				}
2904 				adev->ip_blocks[i].status.hw = true;
2905 			}
2906 			break;
2907 		}
2908 	}
2909 
2910 	if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2911 		r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2912 
2913 	return r;
2914 }
2915 
2916 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2917 {
2918 	struct drm_sched_init_args args = {
2919 		.ops = &amdgpu_sched_ops,
2920 		.num_rqs = DRM_SCHED_PRIORITY_COUNT,
2921 		.timeout_wq = adev->reset_domain->wq,
2922 		.dev = adev->dev,
2923 	};
2924 	long timeout;
2925 	int r, i;
2926 
2927 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2928 		struct amdgpu_ring *ring = adev->rings[i];
2929 
2930 		/* No need to setup the GPU scheduler for rings that don't need it */
2931 		if (!ring || ring->no_scheduler)
2932 			continue;
2933 
2934 		switch (ring->funcs->type) {
2935 		case AMDGPU_RING_TYPE_GFX:
2936 			timeout = adev->gfx_timeout;
2937 			break;
2938 		case AMDGPU_RING_TYPE_COMPUTE:
2939 			timeout = adev->compute_timeout;
2940 			break;
2941 		case AMDGPU_RING_TYPE_SDMA:
2942 			timeout = adev->sdma_timeout;
2943 			break;
2944 		default:
2945 			timeout = adev->video_timeout;
2946 			break;
2947 		}
2948 
2949 		args.timeout = timeout;
2950 		args.credit_limit = ring->num_hw_submission;
2951 		args.score = ring->sched_score;
2952 		args.name = ring->name;
2953 
2954 		r = drm_sched_init(&ring->sched, &args);
2955 		if (r) {
2956 			dev_err(adev->dev,
2957 				"Failed to create scheduler on ring %s.\n",
2958 				ring->name);
2959 			return r;
2960 		}
2961 		r = amdgpu_uvd_entity_init(adev, ring);
2962 		if (r) {
2963 			dev_err(adev->dev,
2964 				"Failed to create UVD scheduling entity on ring %s.\n",
2965 				ring->name);
2966 			return r;
2967 		}
2968 		r = amdgpu_vce_entity_init(adev, ring);
2969 		if (r) {
2970 			dev_err(adev->dev,
2971 				"Failed to create VCE scheduling entity on ring %s.\n",
2972 				ring->name);
2973 			return r;
2974 		}
2975 	}
2976 
2977 	if (adev->xcp_mgr)
2978 		amdgpu_xcp_update_partition_sched_list(adev);
2979 
2980 	return 0;
2981 }
2982 
2983 
2984 /**
2985  * amdgpu_device_ip_init - run init for hardware IPs
2986  *
2987  * @adev: amdgpu_device pointer
2988  *
2989  * Main initialization pass for hardware IPs.  The list of all the hardware
2990  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2991  * are run.  sw_init initializes the software state associated with each IP
2992  * and hw_init initializes the hardware associated with each IP.
2993  * Returns 0 on success, negative error code on failure.
2994  */
2995 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2996 {
2997 	bool init_badpage;
2998 	int i, r;
2999 
3000 	r = amdgpu_ras_init(adev);
3001 	if (r)
3002 		return r;
3003 
3004 	for (i = 0; i < adev->num_ip_blocks; i++) {
3005 		if (!adev->ip_blocks[i].status.valid)
3006 			continue;
3007 		if (adev->ip_blocks[i].version->funcs->sw_init) {
3008 			r = adev->ip_blocks[i].version->funcs->sw_init(&adev->ip_blocks[i]);
3009 			if (r) {
3010 				dev_err(adev->dev,
3011 					"sw_init of IP block <%s> failed %d\n",
3012 					adev->ip_blocks[i].version->funcs->name,
3013 					r);
3014 				goto init_failed;
3015 			}
3016 		}
3017 		adev->ip_blocks[i].status.sw = true;
3018 
3019 		if (!amdgpu_ip_member_of_hwini(
3020 			    adev, adev->ip_blocks[i].version->type))
3021 			continue;
3022 
3023 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
3024 			/* need to do common hw init early so everything is set up for gmc */
3025 			r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
3026 			if (r) {
3027 				dev_err(adev->dev, "hw_init %d failed %d\n", i,
3028 					r);
3029 				goto init_failed;
3030 			}
3031 			adev->ip_blocks[i].status.hw = true;
3032 		} else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
3033 			/* need to do gmc hw init early so we can allocate gpu mem */
3034 			/* Try to reserve bad pages early */
3035 			if (amdgpu_sriov_vf(adev))
3036 				amdgpu_virt_exchange_data(adev);
3037 
3038 			r = amdgpu_device_mem_scratch_init(adev);
3039 			if (r) {
3040 				dev_err(adev->dev,
3041 					"amdgpu_mem_scratch_init failed %d\n",
3042 					r);
3043 				goto init_failed;
3044 			}
3045 			r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
3046 			if (r) {
3047 				dev_err(adev->dev, "hw_init %d failed %d\n", i,
3048 					r);
3049 				goto init_failed;
3050 			}
3051 			r = amdgpu_device_wb_init(adev);
3052 			if (r) {
3053 				dev_err(adev->dev,
3054 					"amdgpu_device_wb_init failed %d\n", r);
3055 				goto init_failed;
3056 			}
3057 			adev->ip_blocks[i].status.hw = true;
3058 
3059 			/* right after GMC hw init, we create CSA */
3060 			if (adev->gfx.mcbp) {
3061 				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
3062 							       AMDGPU_GEM_DOMAIN_VRAM |
3063 							       AMDGPU_GEM_DOMAIN_GTT,
3064 							       AMDGPU_CSA_SIZE);
3065 				if (r) {
3066 					dev_err(adev->dev,
3067 						"allocate CSA failed %d\n", r);
3068 					goto init_failed;
3069 				}
3070 			}
3071 
3072 			r = amdgpu_seq64_init(adev);
3073 			if (r) {
3074 				dev_err(adev->dev, "allocate seq64 failed %d\n",
3075 					r);
3076 				goto init_failed;
3077 			}
3078 		}
3079 	}
3080 
3081 	if (amdgpu_sriov_vf(adev))
3082 		amdgpu_virt_init_data_exchange(adev);
3083 
3084 	r = amdgpu_ib_pool_init(adev);
3085 	if (r) {
3086 		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
3087 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
3088 		goto init_failed;
3089 	}
3090 
3091 	r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
3092 	if (r)
3093 		goto init_failed;
3094 
3095 	r = amdgpu_device_ip_hw_init_phase1(adev);
3096 	if (r)
3097 		goto init_failed;
3098 
3099 	r = amdgpu_device_fw_loading(adev);
3100 	if (r)
3101 		goto init_failed;
3102 
3103 	r = amdgpu_device_ip_hw_init_phase2(adev);
3104 	if (r)
3105 		goto init_failed;
3106 
3107 	/*
3108 	 * retired pages will be loaded from eeprom and reserved here,
3109 	 * it should be called after amdgpu_device_ip_hw_init_phase2  since
3110 	 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
3111 	 * for I2C communication which only true at this point.
3112 	 *
3113 	 * amdgpu_ras_recovery_init may fail, but the upper only cares the
3114 	 * failure from bad gpu situation and stop amdgpu init process
3115 	 * accordingly. For other failed cases, it will still release all
3116 	 * the resource and print error message, rather than returning one
3117 	 * negative value to upper level.
3118 	 *
3119 	 * Note: theoretically, this should be called before all vram allocations
3120 	 * to protect retired page from abusing
3121 	 */
3122 	init_badpage = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI);
3123 	r = amdgpu_ras_recovery_init(adev, init_badpage);
3124 	if (r)
3125 		goto init_failed;
3126 
3127 	/**
3128 	 * In case of XGMI grab extra reference for reset domain for this device
3129 	 */
3130 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
3131 		if (amdgpu_xgmi_add_device(adev) == 0) {
3132 			if (!amdgpu_sriov_vf(adev)) {
3133 				struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3134 
3135 				if (WARN_ON(!hive)) {
3136 					r = -ENOENT;
3137 					goto init_failed;
3138 				}
3139 
3140 				if (!hive->reset_domain ||
3141 				    !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
3142 					r = -ENOENT;
3143 					amdgpu_put_xgmi_hive(hive);
3144 					goto init_failed;
3145 				}
3146 
3147 				/* Drop the early temporary reset domain we created for device */
3148 				amdgpu_reset_put_reset_domain(adev->reset_domain);
3149 				adev->reset_domain = hive->reset_domain;
3150 				amdgpu_put_xgmi_hive(hive);
3151 			}
3152 		}
3153 	}
3154 
3155 	r = amdgpu_device_init_schedulers(adev);
3156 	if (r)
3157 		goto init_failed;
3158 
3159 	amdgpu_ttm_set_buffer_funcs_status(adev, true);
3160 
3161 	/* Don't init kfd if whole hive need to be reset during init */
3162 	if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
3163 		amdgpu_amdkfd_device_init(adev);
3164 	}
3165 
3166 	amdgpu_fru_get_product_info(adev);
3167 
3168 	r = amdgpu_cper_init(adev);
3169 
3170 init_failed:
3171 
3172 	return r;
3173 }
3174 
3175 /**
3176  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
3177  *
3178  * @adev: amdgpu_device pointer
3179  *
3180  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
3181  * this function before a GPU reset.  If the value is retained after a
3182  * GPU reset, VRAM has not been lost. Some GPU resets may destroy VRAM contents.
3183  */
3184 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
3185 {
3186 	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
3187 }
3188 
3189 /**
3190  * amdgpu_device_check_vram_lost - check if vram is valid
3191  *
3192  * @adev: amdgpu_device pointer
3193  *
3194  * Checks the reset magic value written to the gart pointer in VRAM.
3195  * The driver calls this after a GPU reset to see if the contents of
3196  * VRAM is lost or now.
3197  * returns true if vram is lost, false if not.
3198  */
3199 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
3200 {
3201 	if (memcmp(adev->gart.ptr, adev->reset_magic,
3202 			AMDGPU_RESET_MAGIC_NUM))
3203 		return true;
3204 
3205 	if (!amdgpu_in_reset(adev))
3206 		return false;
3207 
3208 	/*
3209 	 * For all ASICs with baco/mode1 reset, the VRAM is
3210 	 * always assumed to be lost.
3211 	 */
3212 	switch (amdgpu_asic_reset_method(adev)) {
3213 	case AMD_RESET_METHOD_LEGACY:
3214 	case AMD_RESET_METHOD_LINK:
3215 	case AMD_RESET_METHOD_BACO:
3216 	case AMD_RESET_METHOD_MODE1:
3217 		return true;
3218 	default:
3219 		return false;
3220 	}
3221 }
3222 
3223 /**
3224  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
3225  *
3226  * @adev: amdgpu_device pointer
3227  * @state: clockgating state (gate or ungate)
3228  *
3229  * The list of all the hardware IPs that make up the asic is walked and the
3230  * set_clockgating_state callbacks are run.
3231  * Late initialization pass enabling clockgating for hardware IPs.
3232  * Fini or suspend, pass disabling clockgating for hardware IPs.
3233  * Returns 0 on success, negative error code on failure.
3234  */
3235 
3236 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
3237 			       enum amd_clockgating_state state)
3238 {
3239 	int i, j, r;
3240 
3241 	if (amdgpu_emu_mode == 1)
3242 		return 0;
3243 
3244 	for (j = 0; j < adev->num_ip_blocks; j++) {
3245 		i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
3246 		if (!adev->ip_blocks[i].status.late_initialized)
3247 			continue;
3248 		/* skip CG for GFX, SDMA on S0ix */
3249 		if (adev->in_s0ix &&
3250 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3251 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
3252 			continue;
3253 		/* skip CG for VCE/UVD, it's handled specially */
3254 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
3255 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
3256 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
3257 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
3258 		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
3259 			/* enable clockgating to save power */
3260 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state(&adev->ip_blocks[i],
3261 										     state);
3262 			if (r) {
3263 				dev_err(adev->dev,
3264 					"set_clockgating_state(gate) of IP block <%s> failed %d\n",
3265 					adev->ip_blocks[i].version->funcs->name,
3266 					r);
3267 				return r;
3268 			}
3269 		}
3270 	}
3271 
3272 	return 0;
3273 }
3274 
3275 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
3276 			       enum amd_powergating_state state)
3277 {
3278 	int i, j, r;
3279 
3280 	if (amdgpu_emu_mode == 1)
3281 		return 0;
3282 
3283 	for (j = 0; j < adev->num_ip_blocks; j++) {
3284 		i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
3285 		if (!adev->ip_blocks[i].status.late_initialized)
3286 			continue;
3287 		/* skip PG for GFX, SDMA on S0ix */
3288 		if (adev->in_s0ix &&
3289 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3290 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
3291 			continue;
3292 		/* skip CG for VCE/UVD, it's handled specially */
3293 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
3294 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
3295 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
3296 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
3297 		    adev->ip_blocks[i].version->funcs->set_powergating_state) {
3298 			/* enable powergating to save power */
3299 			r = adev->ip_blocks[i].version->funcs->set_powergating_state(&adev->ip_blocks[i],
3300 											state);
3301 			if (r) {
3302 				dev_err(adev->dev,
3303 					"set_powergating_state(gate) of IP block <%s> failed %d\n",
3304 					adev->ip_blocks[i].version->funcs->name,
3305 					r);
3306 				return r;
3307 			}
3308 		}
3309 	}
3310 	return 0;
3311 }
3312 
3313 static int amdgpu_device_enable_mgpu_fan_boost(void)
3314 {
3315 	struct amdgpu_gpu_instance *gpu_ins;
3316 	struct amdgpu_device *adev;
3317 	int i, ret = 0;
3318 
3319 	mutex_lock(&mgpu_info.mutex);
3320 
3321 	/*
3322 	 * MGPU fan boost feature should be enabled
3323 	 * only when there are two or more dGPUs in
3324 	 * the system
3325 	 */
3326 	if (mgpu_info.num_dgpu < 2)
3327 		goto out;
3328 
3329 	for (i = 0; i < mgpu_info.num_dgpu; i++) {
3330 		gpu_ins = &(mgpu_info.gpu_ins[i]);
3331 		adev = gpu_ins->adev;
3332 		if (!(adev->flags & AMD_IS_APU || amdgpu_sriov_multi_vf_mode(adev)) &&
3333 		    !gpu_ins->mgpu_fan_enabled) {
3334 			ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
3335 			if (ret)
3336 				break;
3337 
3338 			gpu_ins->mgpu_fan_enabled = 1;
3339 		}
3340 	}
3341 
3342 out:
3343 	mutex_unlock(&mgpu_info.mutex);
3344 
3345 	return ret;
3346 }
3347 
3348 /**
3349  * amdgpu_device_ip_late_init - run late init for hardware IPs
3350  *
3351  * @adev: amdgpu_device pointer
3352  *
3353  * Late initialization pass for hardware IPs.  The list of all the hardware
3354  * IPs that make up the asic is walked and the late_init callbacks are run.
3355  * late_init covers any special initialization that an IP requires
3356  * after all of the have been initialized or something that needs to happen
3357  * late in the init process.
3358  * Returns 0 on success, negative error code on failure.
3359  */
3360 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
3361 {
3362 	struct amdgpu_gpu_instance *gpu_instance;
3363 	int i = 0, r;
3364 
3365 	for (i = 0; i < adev->num_ip_blocks; i++) {
3366 		if (!adev->ip_blocks[i].status.hw)
3367 			continue;
3368 		if (adev->ip_blocks[i].version->funcs->late_init) {
3369 			r = adev->ip_blocks[i].version->funcs->late_init(&adev->ip_blocks[i]);
3370 			if (r) {
3371 				dev_err(adev->dev,
3372 					"late_init of IP block <%s> failed %d\n",
3373 					adev->ip_blocks[i].version->funcs->name,
3374 					r);
3375 				return r;
3376 			}
3377 		}
3378 		adev->ip_blocks[i].status.late_initialized = true;
3379 	}
3380 
3381 	r = amdgpu_ras_late_init(adev);
3382 	if (r) {
3383 		dev_err(adev->dev, "amdgpu_ras_late_init failed %d", r);
3384 		return r;
3385 	}
3386 
3387 	if (!amdgpu_reset_in_recovery(adev))
3388 		amdgpu_ras_set_error_query_ready(adev, true);
3389 
3390 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
3391 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
3392 
3393 	amdgpu_device_fill_reset_magic(adev);
3394 
3395 	r = amdgpu_device_enable_mgpu_fan_boost();
3396 	if (r)
3397 		dev_err(adev->dev, "enable mgpu fan boost failed (%d).\n", r);
3398 
3399 	/* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
3400 	if (amdgpu_passthrough(adev) &&
3401 	    ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) ||
3402 	     adev->asic_type == CHIP_ALDEBARAN))
3403 		amdgpu_dpm_handle_passthrough_sbr(adev, true);
3404 
3405 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
3406 		mutex_lock(&mgpu_info.mutex);
3407 
3408 		/*
3409 		 * Reset device p-state to low as this was booted with high.
3410 		 *
3411 		 * This should be performed only after all devices from the same
3412 		 * hive get initialized.
3413 		 *
3414 		 * However, it's unknown how many device in the hive in advance.
3415 		 * As this is counted one by one during devices initializations.
3416 		 *
3417 		 * So, we wait for all XGMI interlinked devices initialized.
3418 		 * This may bring some delays as those devices may come from
3419 		 * different hives. But that should be OK.
3420 		 */
3421 		if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
3422 			for (i = 0; i < mgpu_info.num_gpu; i++) {
3423 				gpu_instance = &(mgpu_info.gpu_ins[i]);
3424 				if (gpu_instance->adev->flags & AMD_IS_APU)
3425 					continue;
3426 
3427 				r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
3428 						AMDGPU_XGMI_PSTATE_MIN);
3429 				if (r) {
3430 					dev_err(adev->dev,
3431 						"pstate setting failed (%d).\n",
3432 						r);
3433 					break;
3434 				}
3435 			}
3436 		}
3437 
3438 		mutex_unlock(&mgpu_info.mutex);
3439 	}
3440 
3441 	return 0;
3442 }
3443 
3444 static void amdgpu_ip_block_hw_fini(struct amdgpu_ip_block *ip_block)
3445 {
3446 	struct amdgpu_device *adev = ip_block->adev;
3447 	int r;
3448 
3449 	if (!ip_block->version->funcs->hw_fini) {
3450 		dev_err(adev->dev, "hw_fini of IP block <%s> not defined\n",
3451 			ip_block->version->funcs->name);
3452 	} else {
3453 		r = ip_block->version->funcs->hw_fini(ip_block);
3454 		/* XXX handle errors */
3455 		if (r) {
3456 			dev_dbg(adev->dev,
3457 				"hw_fini of IP block <%s> failed %d\n",
3458 				ip_block->version->funcs->name, r);
3459 		}
3460 	}
3461 
3462 	ip_block->status.hw = false;
3463 }
3464 
3465 /**
3466  * amdgpu_device_smu_fini_early - smu hw_fini wrapper
3467  *
3468  * @adev: amdgpu_device pointer
3469  *
3470  * For ASICs need to disable SMC first
3471  */
3472 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
3473 {
3474 	int i;
3475 
3476 	if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))
3477 		return;
3478 
3479 	for (i = 0; i < adev->num_ip_blocks; i++) {
3480 		if (!adev->ip_blocks[i].status.hw)
3481 			continue;
3482 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3483 			amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]);
3484 			break;
3485 		}
3486 	}
3487 }
3488 
3489 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
3490 {
3491 	int i, r;
3492 
3493 	for (i = 0; i < adev->num_ip_blocks; i++) {
3494 		if (!adev->ip_blocks[i].version->funcs->early_fini)
3495 			continue;
3496 
3497 		r = adev->ip_blocks[i].version->funcs->early_fini(&adev->ip_blocks[i]);
3498 		if (r) {
3499 			dev_dbg(adev->dev,
3500 				"early_fini of IP block <%s> failed %d\n",
3501 				adev->ip_blocks[i].version->funcs->name, r);
3502 		}
3503 	}
3504 
3505 	amdgpu_amdkfd_suspend(adev, true);
3506 	amdgpu_amdkfd_teardown_processes(adev);
3507 	amdgpu_userq_suspend(adev);
3508 
3509 	/* Workaround for ASICs need to disable SMC first */
3510 	amdgpu_device_smu_fini_early(adev);
3511 
3512 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3513 		if (!adev->ip_blocks[i].status.hw)
3514 			continue;
3515 
3516 		amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]);
3517 	}
3518 
3519 	if (amdgpu_sriov_vf(adev)) {
3520 		if (amdgpu_virt_release_full_gpu(adev, false))
3521 			dev_err(adev->dev,
3522 				"failed to release exclusive mode on fini\n");
3523 	}
3524 
3525 	/*
3526 	 * Driver reload on the APU can fail due to firmware validation because
3527 	 * the PSP is always running, as it is shared across the whole SoC.
3528 	 * This same issue does not occur on dGPU because it has a mechanism
3529 	 * that checks whether the PSP is running. A solution for those issues
3530 	 * in the APU is to trigger a GPU reset, but this should be done during
3531 	 * the unload phase to avoid adding boot latency and screen flicker.
3532 	 */
3533 	if ((adev->flags & AMD_IS_APU) && !adev->gmc.is_app_apu) {
3534 		r = amdgpu_asic_reset(adev);
3535 		if (r)
3536 			dev_err(adev->dev, "asic reset on %s failed\n", __func__);
3537 	}
3538 
3539 	return 0;
3540 }
3541 
3542 /**
3543  * amdgpu_device_ip_fini - run fini for hardware IPs
3544  *
3545  * @adev: amdgpu_device pointer
3546  *
3547  * Main teardown pass for hardware IPs.  The list of all the hardware
3548  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
3549  * are run.  hw_fini tears down the hardware associated with each IP
3550  * and sw_fini tears down any software state associated with each IP.
3551  * Returns 0 on success, negative error code on failure.
3552  */
3553 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
3554 {
3555 	int i, r;
3556 
3557 	amdgpu_cper_fini(adev);
3558 
3559 	if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
3560 		amdgpu_virt_release_ras_err_handler_data(adev);
3561 
3562 	if (adev->gmc.xgmi.num_physical_nodes > 1)
3563 		amdgpu_xgmi_remove_device(adev);
3564 
3565 	amdgpu_amdkfd_device_fini_sw(adev);
3566 
3567 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3568 		if (!adev->ip_blocks[i].status.sw)
3569 			continue;
3570 
3571 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
3572 			amdgpu_ucode_free_bo(adev);
3573 			amdgpu_free_static_csa(&adev->virt.csa_obj);
3574 			amdgpu_device_wb_fini(adev);
3575 			amdgpu_device_mem_scratch_fini(adev);
3576 			amdgpu_ib_pool_fini(adev);
3577 			amdgpu_seq64_fini(adev);
3578 			amdgpu_doorbell_fini(adev);
3579 		}
3580 		if (adev->ip_blocks[i].version->funcs->sw_fini) {
3581 			r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]);
3582 			/* XXX handle errors */
3583 			if (r) {
3584 				dev_dbg(adev->dev,
3585 					"sw_fini of IP block <%s> failed %d\n",
3586 					adev->ip_blocks[i].version->funcs->name,
3587 					r);
3588 			}
3589 		}
3590 		adev->ip_blocks[i].status.sw = false;
3591 		adev->ip_blocks[i].status.valid = false;
3592 	}
3593 
3594 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3595 		if (!adev->ip_blocks[i].status.late_initialized)
3596 			continue;
3597 		if (adev->ip_blocks[i].version->funcs->late_fini)
3598 			adev->ip_blocks[i].version->funcs->late_fini(&adev->ip_blocks[i]);
3599 		adev->ip_blocks[i].status.late_initialized = false;
3600 	}
3601 
3602 	amdgpu_ras_fini(adev);
3603 	amdgpu_uid_fini(adev);
3604 
3605 	return 0;
3606 }
3607 
3608 /**
3609  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
3610  *
3611  * @work: work_struct.
3612  */
3613 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
3614 {
3615 	struct amdgpu_device *adev =
3616 		container_of(work, struct amdgpu_device, delayed_init_work.work);
3617 	int r;
3618 
3619 	r = amdgpu_ib_ring_tests(adev);
3620 	if (r)
3621 		dev_err(adev->dev, "ib ring test failed (%d).\n", r);
3622 }
3623 
3624 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
3625 {
3626 	struct amdgpu_device *adev =
3627 		container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
3628 
3629 	WARN_ON_ONCE(adev->gfx.gfx_off_state);
3630 	WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
3631 
3632 	if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true, 0))
3633 		adev->gfx.gfx_off_state = true;
3634 }
3635 
3636 /**
3637  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
3638  *
3639  * @adev: amdgpu_device pointer
3640  *
3641  * Main suspend function for hardware IPs.  The list of all the hardware
3642  * IPs that make up the asic is walked, clockgating is disabled and the
3643  * suspend callbacks are run.  suspend puts the hardware and software state
3644  * in each IP into a state suitable for suspend.
3645  * Returns 0 on success, negative error code on failure.
3646  */
3647 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
3648 {
3649 	int i, r, rec;
3650 
3651 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
3652 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
3653 
3654 	/*
3655 	 * Per PMFW team's suggestion, driver needs to handle gfxoff
3656 	 * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
3657 	 * scenario. Add the missing df cstate disablement here.
3658 	 */
3659 	if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
3660 		dev_warn(adev->dev, "Failed to disallow df cstate");
3661 
3662 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3663 		if (!adev->ip_blocks[i].status.valid)
3664 			continue;
3665 
3666 		/* displays are handled separately */
3667 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
3668 			continue;
3669 
3670 		r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
3671 		if (r)
3672 			goto unwind;
3673 	}
3674 
3675 	return 0;
3676 unwind:
3677 	rec = amdgpu_device_ip_resume_phase3(adev);
3678 	if (rec)
3679 		dev_err(adev->dev,
3680 			"amdgpu_device_ip_resume_phase3 failed during unwind: %d\n",
3681 			rec);
3682 
3683 	amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW);
3684 
3685 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
3686 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
3687 
3688 	return r;
3689 }
3690 
3691 /**
3692  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
3693  *
3694  * @adev: amdgpu_device pointer
3695  *
3696  * Main suspend function for hardware IPs.  The list of all the hardware
3697  * IPs that make up the asic is walked, clockgating is disabled and the
3698  * suspend callbacks are run.  suspend puts the hardware and software state
3699  * in each IP into a state suitable for suspend.
3700  * Returns 0 on success, negative error code on failure.
3701  */
3702 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
3703 {
3704 	int i, r, rec;
3705 
3706 	if (adev->in_s0ix)
3707 		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
3708 
3709 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3710 		if (!adev->ip_blocks[i].status.valid)
3711 			continue;
3712 		/* displays are handled in phase1 */
3713 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
3714 			continue;
3715 		/* PSP lost connection when err_event_athub occurs */
3716 		if (amdgpu_ras_intr_triggered() &&
3717 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3718 			adev->ip_blocks[i].status.hw = false;
3719 			continue;
3720 		}
3721 
3722 		/* skip unnecessary suspend if we do not initialize them yet */
3723 		if (!amdgpu_ip_member_of_hwini(
3724 			    adev, adev->ip_blocks[i].version->type))
3725 			continue;
3726 
3727 		/* Since we skip suspend for S0i3, we need to cancel the delayed
3728 		 * idle work here as the suspend callback never gets called.
3729 		 */
3730 		if (adev->in_s0ix &&
3731 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX &&
3732 		    amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0))
3733 			cancel_delayed_work_sync(&adev->gfx.idle_work);
3734 		/* skip suspend of gfx/mes and psp for S0ix
3735 		 * gfx is in gfxoff state, so on resume it will exit gfxoff just
3736 		 * like at runtime. PSP is also part of the always on hardware
3737 		 * so no need to suspend it.
3738 		 */
3739 		if (adev->in_s0ix &&
3740 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3741 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3742 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
3743 			continue;
3744 
3745 		/* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
3746 		if (adev->in_s0ix &&
3747 		    (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >=
3748 		     IP_VERSION(5, 0, 0)) &&
3749 		    (adev->ip_blocks[i].version->type ==
3750 		     AMD_IP_BLOCK_TYPE_SDMA))
3751 			continue;
3752 
3753 		/* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot.
3754 		 * These are in TMR, hence are expected to be reused by PSP-TOS to reload
3755 		 * from this location and RLC Autoload automatically also gets loaded
3756 		 * from here based on PMFW -> PSP message during re-init sequence.
3757 		 * Therefore, the psp suspend & resume should be skipped to avoid destroy
3758 		 * the TMR and reload FWs again for IMU enabled APU ASICs.
3759 		 */
3760 		if (amdgpu_in_reset(adev) &&
3761 		    (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
3762 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3763 			continue;
3764 
3765 		r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
3766 		if (r)
3767 			goto unwind;
3768 
3769 		/* handle putting the SMC in the appropriate state */
3770 		if (!amdgpu_sriov_vf(adev)) {
3771 			if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3772 				r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3773 				if (r) {
3774 					dev_err(adev->dev,
3775 						"SMC failed to set mp1 state %d, %d\n",
3776 						adev->mp1_state, r);
3777 					goto unwind;
3778 				}
3779 			}
3780 		}
3781 	}
3782 
3783 	return 0;
3784 unwind:
3785 	/* suspend phase 2 = resume phase 1 + resume phase 2 */
3786 	rec = amdgpu_device_ip_resume_phase1(adev);
3787 	if (rec) {
3788 		dev_err(adev->dev,
3789 			"amdgpu_device_ip_resume_phase1 failed during unwind: %d\n",
3790 			rec);
3791 		return r;
3792 	}
3793 
3794 	rec = amdgpu_device_fw_loading(adev);
3795 	if (rec) {
3796 		dev_err(adev->dev,
3797 			"amdgpu_device_fw_loading failed during unwind: %d\n",
3798 			rec);
3799 		return r;
3800 	}
3801 
3802 	rec = amdgpu_device_ip_resume_phase2(adev);
3803 	if (rec) {
3804 		dev_err(adev->dev,
3805 			"amdgpu_device_ip_resume_phase2 failed during unwind: %d\n",
3806 			rec);
3807 		return r;
3808 	}
3809 
3810 	return r;
3811 }
3812 
3813 /**
3814  * amdgpu_device_ip_suspend - run suspend for hardware IPs
3815  *
3816  * @adev: amdgpu_device pointer
3817  *
3818  * Main suspend function for hardware IPs.  The list of all the hardware
3819  * IPs that make up the asic is walked, clockgating is disabled and the
3820  * suspend callbacks are run.  suspend puts the hardware and software state
3821  * in each IP into a state suitable for suspend.
3822  * Returns 0 on success, negative error code on failure.
3823  */
3824 static int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3825 {
3826 	int r;
3827 
3828 	if (amdgpu_sriov_vf(adev)) {
3829 		amdgpu_virt_fini_data_exchange(adev);
3830 		amdgpu_virt_request_full_gpu(adev, false);
3831 	}
3832 
3833 	amdgpu_ttm_set_buffer_funcs_status(adev, false);
3834 
3835 	r = amdgpu_device_ip_suspend_phase1(adev);
3836 	if (r)
3837 		return r;
3838 	r = amdgpu_device_ip_suspend_phase2(adev);
3839 
3840 	if (amdgpu_sriov_vf(adev))
3841 		amdgpu_virt_release_full_gpu(adev, false);
3842 
3843 	return r;
3844 }
3845 
3846 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3847 {
3848 	int i, r;
3849 
3850 	static enum amd_ip_block_type ip_order[] = {
3851 		AMD_IP_BLOCK_TYPE_COMMON,
3852 		AMD_IP_BLOCK_TYPE_GMC,
3853 		AMD_IP_BLOCK_TYPE_PSP,
3854 		AMD_IP_BLOCK_TYPE_IH,
3855 	};
3856 
3857 	for (i = 0; i < adev->num_ip_blocks; i++) {
3858 		int j;
3859 		struct amdgpu_ip_block *block;
3860 
3861 		block = &adev->ip_blocks[i];
3862 		block->status.hw = false;
3863 
3864 		for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3865 
3866 			if (block->version->type != ip_order[j] ||
3867 				!block->status.valid)
3868 				continue;
3869 
3870 			r = block->version->funcs->hw_init(&adev->ip_blocks[i]);
3871 			if (r) {
3872 				dev_err(adev->dev, "RE-INIT-early: %s failed\n",
3873 					 block->version->funcs->name);
3874 				return r;
3875 			}
3876 			block->status.hw = true;
3877 		}
3878 	}
3879 
3880 	return 0;
3881 }
3882 
3883 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3884 {
3885 	struct amdgpu_ip_block *block;
3886 	int i, r = 0;
3887 
3888 	static enum amd_ip_block_type ip_order[] = {
3889 		AMD_IP_BLOCK_TYPE_SMC,
3890 		AMD_IP_BLOCK_TYPE_DCE,
3891 		AMD_IP_BLOCK_TYPE_GFX,
3892 		AMD_IP_BLOCK_TYPE_SDMA,
3893 		AMD_IP_BLOCK_TYPE_MES,
3894 		AMD_IP_BLOCK_TYPE_UVD,
3895 		AMD_IP_BLOCK_TYPE_VCE,
3896 		AMD_IP_BLOCK_TYPE_VCN,
3897 		AMD_IP_BLOCK_TYPE_JPEG
3898 	};
3899 
3900 	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3901 		block = amdgpu_device_ip_get_ip_block(adev, ip_order[i]);
3902 
3903 		if (!block)
3904 			continue;
3905 
3906 		if (block->status.valid && !block->status.hw) {
3907 			if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) {
3908 				r = amdgpu_ip_block_resume(block);
3909 			} else {
3910 				r = block->version->funcs->hw_init(block);
3911 			}
3912 
3913 			if (r) {
3914 				dev_err(adev->dev, "RE-INIT-late: %s failed\n",
3915 					 block->version->funcs->name);
3916 				break;
3917 			}
3918 			block->status.hw = true;
3919 		}
3920 	}
3921 
3922 	return r;
3923 }
3924 
3925 /**
3926  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3927  *
3928  * @adev: amdgpu_device pointer
3929  *
3930  * First resume function for hardware IPs.  The list of all the hardware
3931  * IPs that make up the asic is walked and the resume callbacks are run for
3932  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3933  * after a suspend and updates the software state as necessary.  This
3934  * function is also used for restoring the GPU after a GPU reset.
3935  * Returns 0 on success, negative error code on failure.
3936  */
3937 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3938 {
3939 	int i, r;
3940 
3941 	for (i = 0; i < adev->num_ip_blocks; i++) {
3942 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3943 			continue;
3944 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3945 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3946 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3947 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3948 
3949 			r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
3950 			if (r)
3951 				return r;
3952 		}
3953 	}
3954 
3955 	return 0;
3956 }
3957 
3958 /**
3959  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3960  *
3961  * @adev: amdgpu_device pointer
3962  *
3963  * Second resume function for hardware IPs.  The list of all the hardware
3964  * IPs that make up the asic is walked and the resume callbacks are run for
3965  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3966  * functional state after a suspend and updates the software state as
3967  * necessary.  This function is also used for restoring the GPU after a GPU
3968  * reset.
3969  * Returns 0 on success, negative error code on failure.
3970  */
3971 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3972 {
3973 	int i, r;
3974 
3975 	for (i = 0; i < adev->num_ip_blocks; i++) {
3976 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3977 			continue;
3978 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3979 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3980 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3981 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE ||
3982 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3983 			continue;
3984 		r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
3985 		if (r)
3986 			return r;
3987 	}
3988 
3989 	return 0;
3990 }
3991 
3992 /**
3993  * amdgpu_device_ip_resume_phase3 - run resume for hardware IPs
3994  *
3995  * @adev: amdgpu_device pointer
3996  *
3997  * Third resume function for hardware IPs.  The list of all the hardware
3998  * IPs that make up the asic is walked and the resume callbacks are run for
3999  * all DCE.  resume puts the hardware into a functional state after a suspend
4000  * and updates the software state as necessary.  This function is also used
4001  * for restoring the GPU after a GPU reset.
4002  *
4003  * Returns 0 on success, negative error code on failure.
4004  */
4005 static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev)
4006 {
4007 	int i, r;
4008 
4009 	for (i = 0; i < adev->num_ip_blocks; i++) {
4010 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
4011 			continue;
4012 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
4013 			r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
4014 			if (r)
4015 				return r;
4016 		}
4017 	}
4018 
4019 	return 0;
4020 }
4021 
4022 /**
4023  * amdgpu_device_ip_resume - run resume for hardware IPs
4024  *
4025  * @adev: amdgpu_device pointer
4026  *
4027  * Main resume function for hardware IPs.  The hardware IPs
4028  * are split into two resume functions because they are
4029  * also used in recovering from a GPU reset and some additional
4030  * steps need to be take between them.  In this case (S3/S4) they are
4031  * run sequentially.
4032  * Returns 0 on success, negative error code on failure.
4033  */
4034 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
4035 {
4036 	int r;
4037 
4038 	r = amdgpu_device_ip_resume_phase1(adev);
4039 	if (r)
4040 		return r;
4041 
4042 	r = amdgpu_device_fw_loading(adev);
4043 	if (r)
4044 		return r;
4045 
4046 	r = amdgpu_device_ip_resume_phase2(adev);
4047 
4048 	amdgpu_ttm_set_buffer_funcs_status(adev, true);
4049 
4050 	if (r)
4051 		return r;
4052 
4053 	amdgpu_fence_driver_hw_init(adev);
4054 
4055 	r = amdgpu_device_ip_resume_phase3(adev);
4056 
4057 	return r;
4058 }
4059 
4060 /**
4061  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
4062  *
4063  * @adev: amdgpu_device pointer
4064  *
4065  * Query the VBIOS data tables to determine if the board supports SR-IOV.
4066  */
4067 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
4068 {
4069 	if (amdgpu_sriov_vf(adev)) {
4070 		if (adev->is_atom_fw) {
4071 			if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
4072 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
4073 		} else {
4074 			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
4075 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
4076 		}
4077 
4078 		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
4079 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
4080 	}
4081 }
4082 
4083 /**
4084  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
4085  *
4086  * @pdev : pci device context
4087  * @asic_type: AMD asic type
4088  *
4089  * Check if there is DC (new modesetting infrastructre) support for an asic.
4090  * returns true if DC has support, false if not.
4091  */
4092 bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev,
4093 				       enum amd_asic_type asic_type)
4094 {
4095 	switch (asic_type) {
4096 #ifdef CONFIG_DRM_AMDGPU_SI
4097 	case CHIP_HAINAN:
4098 #endif
4099 	case CHIP_TOPAZ:
4100 		/* chips with no display hardware */
4101 		return false;
4102 #if defined(CONFIG_DRM_AMD_DC)
4103 	case CHIP_TAHITI:
4104 	case CHIP_PITCAIRN:
4105 	case CHIP_VERDE:
4106 	case CHIP_OLAND:
4107 		return amdgpu_dc != 0 && IS_ENABLED(CONFIG_DRM_AMD_DC_SI);
4108 	default:
4109 		return amdgpu_dc != 0;
4110 #else
4111 	default:
4112 		if (amdgpu_dc > 0)
4113 			dev_info_once(
4114 				&pdev->dev,
4115 				"Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
4116 		return false;
4117 #endif
4118 	}
4119 }
4120 
4121 /**
4122  * amdgpu_device_has_dc_support - check if dc is supported
4123  *
4124  * @adev: amdgpu_device pointer
4125  *
4126  * Returns true for supported, false for not supported
4127  */
4128 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
4129 {
4130 	if (adev->enable_virtual_display ||
4131 	    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
4132 		return false;
4133 
4134 	return amdgpu_device_asic_has_dc_support(adev->pdev, adev->asic_type);
4135 }
4136 
4137 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
4138 {
4139 	struct amdgpu_device *adev =
4140 		container_of(__work, struct amdgpu_device, xgmi_reset_work);
4141 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
4142 
4143 	/* It's a bug to not have a hive within this function */
4144 	if (WARN_ON(!hive))
4145 		return;
4146 
4147 	/*
4148 	 * Use task barrier to synchronize all xgmi reset works across the
4149 	 * hive. task_barrier_enter and task_barrier_exit will block
4150 	 * until all the threads running the xgmi reset works reach
4151 	 * those points. task_barrier_full will do both blocks.
4152 	 */
4153 	if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
4154 
4155 		task_barrier_enter(&hive->tb);
4156 		adev->asic_reset_res = amdgpu_device_baco_enter(adev);
4157 
4158 		if (adev->asic_reset_res)
4159 			goto fail;
4160 
4161 		task_barrier_exit(&hive->tb);
4162 		adev->asic_reset_res = amdgpu_device_baco_exit(adev);
4163 
4164 		if (adev->asic_reset_res)
4165 			goto fail;
4166 
4167 		amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB);
4168 	} else {
4169 
4170 		task_barrier_full(&hive->tb);
4171 		adev->asic_reset_res =  amdgpu_asic_reset(adev);
4172 	}
4173 
4174 fail:
4175 	if (adev->asic_reset_res)
4176 		dev_warn(adev->dev,
4177 			 "ASIC reset failed with error, %d for drm dev, %s",
4178 			 adev->asic_reset_res, adev_to_drm(adev)->unique);
4179 	amdgpu_put_xgmi_hive(hive);
4180 }
4181 
4182 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
4183 {
4184 	char *input = amdgpu_lockup_timeout;
4185 	char *timeout_setting = NULL;
4186 	int index = 0;
4187 	long timeout;
4188 	int ret = 0;
4189 
4190 	/* By default timeout for all queues is 2 sec */
4191 	adev->gfx_timeout = adev->compute_timeout = adev->sdma_timeout =
4192 		adev->video_timeout = msecs_to_jiffies(2000);
4193 
4194 	if (!strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH))
4195 		return 0;
4196 
4197 	while ((timeout_setting = strsep(&input, ",")) &&
4198 	       strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
4199 		ret = kstrtol(timeout_setting, 0, &timeout);
4200 		if (ret)
4201 			return ret;
4202 
4203 		if (timeout == 0) {
4204 			index++;
4205 			continue;
4206 		} else if (timeout < 0) {
4207 			timeout = MAX_SCHEDULE_TIMEOUT;
4208 			dev_warn(adev->dev, "lockup timeout disabled");
4209 			add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
4210 		} else {
4211 			timeout = msecs_to_jiffies(timeout);
4212 		}
4213 
4214 		switch (index++) {
4215 		case 0:
4216 			adev->gfx_timeout = timeout;
4217 			break;
4218 		case 1:
4219 			adev->compute_timeout = timeout;
4220 			break;
4221 		case 2:
4222 			adev->sdma_timeout = timeout;
4223 			break;
4224 		case 3:
4225 			adev->video_timeout = timeout;
4226 			break;
4227 		default:
4228 			break;
4229 		}
4230 	}
4231 
4232 	/* When only one value specified apply it to all queues. */
4233 	if (index == 1)
4234 		adev->gfx_timeout = adev->compute_timeout = adev->sdma_timeout =
4235 			adev->video_timeout = timeout;
4236 
4237 	return ret;
4238 }
4239 
4240 /**
4241  * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
4242  *
4243  * @adev: amdgpu_device pointer
4244  *
4245  * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
4246  */
4247 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
4248 {
4249 	struct iommu_domain *domain;
4250 
4251 	domain = iommu_get_domain_for_dev(adev->dev);
4252 	if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
4253 		adev->ram_is_direct_mapped = true;
4254 }
4255 
4256 #if defined(CONFIG_HSA_AMD_P2P)
4257 /**
4258  * amdgpu_device_check_iommu_remap - Check if DMA remapping is enabled.
4259  *
4260  * @adev: amdgpu_device pointer
4261  *
4262  * return if IOMMU remapping bar address
4263  */
4264 static bool amdgpu_device_check_iommu_remap(struct amdgpu_device *adev)
4265 {
4266 	struct iommu_domain *domain;
4267 
4268 	domain = iommu_get_domain_for_dev(adev->dev);
4269 	if (domain && (domain->type == IOMMU_DOMAIN_DMA ||
4270 		domain->type ==	IOMMU_DOMAIN_DMA_FQ))
4271 		return true;
4272 
4273 	return false;
4274 }
4275 #endif
4276 
4277 static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
4278 {
4279 	if (amdgpu_mcbp == 1)
4280 		adev->gfx.mcbp = true;
4281 	else if (amdgpu_mcbp == 0)
4282 		adev->gfx.mcbp = false;
4283 
4284 	if (amdgpu_sriov_vf(adev))
4285 		adev->gfx.mcbp = true;
4286 
4287 	if (adev->gfx.mcbp)
4288 		dev_info(adev->dev, "MCBP is enabled\n");
4289 }
4290 
4291 static int amdgpu_device_sys_interface_init(struct amdgpu_device *adev)
4292 {
4293 	int r;
4294 
4295 	r = amdgpu_atombios_sysfs_init(adev);
4296 	if (r)
4297 		drm_err(&adev->ddev,
4298 			"registering atombios sysfs failed (%d).\n", r);
4299 
4300 	r = amdgpu_pm_sysfs_init(adev);
4301 	if (r)
4302 		dev_err(adev->dev, "registering pm sysfs failed (%d).\n", r);
4303 
4304 	r = amdgpu_ucode_sysfs_init(adev);
4305 	if (r) {
4306 		adev->ucode_sysfs_en = false;
4307 		dev_err(adev->dev, "Creating firmware sysfs failed (%d).\n", r);
4308 	} else
4309 		adev->ucode_sysfs_en = true;
4310 
4311 	r = amdgpu_device_attr_sysfs_init(adev);
4312 	if (r)
4313 		dev_err(adev->dev, "Could not create amdgpu device attr\n");
4314 
4315 	r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group);
4316 	if (r)
4317 		dev_err(adev->dev,
4318 			"Could not create amdgpu board attributes\n");
4319 
4320 	amdgpu_fru_sysfs_init(adev);
4321 	amdgpu_reg_state_sysfs_init(adev);
4322 	amdgpu_xcp_sysfs_init(adev);
4323 	amdgpu_uma_sysfs_init(adev);
4324 
4325 	return r;
4326 }
4327 
4328 static void amdgpu_device_sys_interface_fini(struct amdgpu_device *adev)
4329 {
4330 	if (adev->pm.sysfs_initialized)
4331 		amdgpu_pm_sysfs_fini(adev);
4332 	if (adev->ucode_sysfs_en)
4333 		amdgpu_ucode_sysfs_fini(adev);
4334 	amdgpu_device_attr_sysfs_fini(adev);
4335 	amdgpu_fru_sysfs_fini(adev);
4336 
4337 	amdgpu_reg_state_sysfs_fini(adev);
4338 	amdgpu_xcp_sysfs_fini(adev);
4339 	amdgpu_uma_sysfs_fini(adev);
4340 }
4341 
4342 /**
4343  * amdgpu_device_init - initialize the driver
4344  *
4345  * @adev: amdgpu_device pointer
4346  * @flags: driver flags
4347  *
4348  * Initializes the driver info and hw (all asics).
4349  * Returns 0 for success or an error on failure.
4350  * Called at driver startup.
4351  */
4352 int amdgpu_device_init(struct amdgpu_device *adev,
4353 		       uint32_t flags)
4354 {
4355 	struct pci_dev *pdev = adev->pdev;
4356 	int r, i;
4357 	bool px = false;
4358 	u32 max_MBps;
4359 	int tmp;
4360 
4361 	adev->shutdown = false;
4362 	adev->flags = flags;
4363 
4364 	if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
4365 		adev->asic_type = amdgpu_force_asic_type;
4366 	else
4367 		adev->asic_type = flags & AMD_ASIC_MASK;
4368 
4369 	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
4370 	if (amdgpu_emu_mode == 1)
4371 		adev->usec_timeout *= 10;
4372 	adev->gmc.gart_size = 512 * 1024 * 1024;
4373 	adev->accel_working = false;
4374 	adev->num_rings = 0;
4375 	RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
4376 	adev->mman.buffer_funcs = NULL;
4377 	adev->mman.buffer_funcs_ring = NULL;
4378 	adev->vm_manager.vm_pte_funcs = NULL;
4379 	adev->vm_manager.vm_pte_num_scheds = 0;
4380 	adev->gmc.gmc_funcs = NULL;
4381 	adev->harvest_ip_mask = 0x0;
4382 	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
4383 	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
4384 
4385 	adev->smc_rreg = &amdgpu_invalid_rreg;
4386 	adev->smc_wreg = &amdgpu_invalid_wreg;
4387 	adev->pcie_rreg = &amdgpu_invalid_rreg;
4388 	adev->pcie_wreg = &amdgpu_invalid_wreg;
4389 	adev->pcie_rreg_ext = &amdgpu_invalid_rreg_ext;
4390 	adev->pcie_wreg_ext = &amdgpu_invalid_wreg_ext;
4391 	adev->pciep_rreg = &amdgpu_invalid_rreg;
4392 	adev->pciep_wreg = &amdgpu_invalid_wreg;
4393 	adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
4394 	adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
4395 	adev->pcie_rreg64_ext = &amdgpu_invalid_rreg64_ext;
4396 	adev->pcie_wreg64_ext = &amdgpu_invalid_wreg64_ext;
4397 	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
4398 	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
4399 	adev->didt_rreg = &amdgpu_invalid_rreg;
4400 	adev->didt_wreg = &amdgpu_invalid_wreg;
4401 	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
4402 	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
4403 	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
4404 	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
4405 
4406 	dev_info(
4407 		adev->dev,
4408 		"initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
4409 		amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
4410 		pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
4411 
4412 	/* mutex initialization are all done here so we
4413 	 * can recall function without having locking issues
4414 	 */
4415 	mutex_init(&adev->firmware.mutex);
4416 	mutex_init(&adev->pm.mutex);
4417 	mutex_init(&adev->gfx.gpu_clock_mutex);
4418 	mutex_init(&adev->srbm_mutex);
4419 	mutex_init(&adev->gfx.pipe_reserve_mutex);
4420 	mutex_init(&adev->gfx.gfx_off_mutex);
4421 	mutex_init(&adev->gfx.partition_mutex);
4422 	mutex_init(&adev->grbm_idx_mutex);
4423 	mutex_init(&adev->mn_lock);
4424 	mutex_init(&adev->virt.vf_errors.lock);
4425 	hash_init(adev->mn_hash);
4426 	mutex_init(&adev->psp.mutex);
4427 	mutex_init(&adev->notifier_lock);
4428 	mutex_init(&adev->pm.stable_pstate_ctx_lock);
4429 	mutex_init(&adev->benchmark_mutex);
4430 	mutex_init(&adev->gfx.reset_sem_mutex);
4431 	/* Initialize the mutex for cleaner shader isolation between GFX and compute processes */
4432 	mutex_init(&adev->enforce_isolation_mutex);
4433 	for (i = 0; i < MAX_XCP; ++i) {
4434 		adev->isolation[i].spearhead = dma_fence_get_stub();
4435 		amdgpu_sync_create(&adev->isolation[i].active);
4436 		amdgpu_sync_create(&adev->isolation[i].prev);
4437 	}
4438 	mutex_init(&adev->gfx.userq_sch_mutex);
4439 	mutex_init(&adev->gfx.workload_profile_mutex);
4440 	mutex_init(&adev->vcn.workload_profile_mutex);
4441 
4442 	amdgpu_device_init_apu_flags(adev);
4443 
4444 	r = amdgpu_device_check_arguments(adev);
4445 	if (r)
4446 		return r;
4447 
4448 	spin_lock_init(&adev->mmio_idx_lock);
4449 	spin_lock_init(&adev->smc_idx_lock);
4450 	spin_lock_init(&adev->pcie_idx_lock);
4451 	spin_lock_init(&adev->uvd_ctx_idx_lock);
4452 	spin_lock_init(&adev->didt_idx_lock);
4453 	spin_lock_init(&adev->gc_cac_idx_lock);
4454 	spin_lock_init(&adev->se_cac_idx_lock);
4455 	spin_lock_init(&adev->audio_endpt_idx_lock);
4456 	spin_lock_init(&adev->mm_stats.lock);
4457 	spin_lock_init(&adev->virt.rlcg_reg_lock);
4458 	spin_lock_init(&adev->wb.lock);
4459 
4460 	xa_init_flags(&adev->userq_xa, XA_FLAGS_LOCK_IRQ);
4461 
4462 	INIT_LIST_HEAD(&adev->reset_list);
4463 
4464 	INIT_LIST_HEAD(&adev->ras_list);
4465 
4466 	INIT_LIST_HEAD(&adev->pm.od_kobj_list);
4467 
4468 	xa_init(&adev->userq_doorbell_xa);
4469 
4470 	INIT_DELAYED_WORK(&adev->delayed_init_work,
4471 			  amdgpu_device_delayed_init_work_handler);
4472 	INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
4473 			  amdgpu_device_delay_enable_gfx_off);
4474 	/*
4475 	 * Initialize the enforce_isolation work structures for each XCP
4476 	 * partition.  This work handler is responsible for enforcing shader
4477 	 * isolation on AMD GPUs.  It counts the number of emitted fences for
4478 	 * each GFX and compute ring.  If there are any fences, it schedules
4479 	 * the `enforce_isolation_work` to be run after a delay.  If there are
4480 	 * no fences, it signals the Kernel Fusion Driver (KFD) to resume the
4481 	 * runqueue.
4482 	 */
4483 	for (i = 0; i < MAX_XCP; i++) {
4484 		INIT_DELAYED_WORK(&adev->gfx.enforce_isolation[i].work,
4485 				  amdgpu_gfx_enforce_isolation_handler);
4486 		adev->gfx.enforce_isolation[i].adev = adev;
4487 		adev->gfx.enforce_isolation[i].xcp_id = i;
4488 	}
4489 
4490 	INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
4491 	INIT_WORK(&adev->userq_reset_work, amdgpu_userq_reset_work);
4492 
4493 	adev->gfx.gfx_off_req_count = 1;
4494 	adev->gfx.gfx_off_residency = 0;
4495 	adev->gfx.gfx_off_entrycount = 0;
4496 	adev->pm.ac_power = power_supply_is_system_supplied() > 0;
4497 
4498 	atomic_set(&adev->throttling_logging_enabled, 1);
4499 	/*
4500 	 * If throttling continues, logging will be performed every minute
4501 	 * to avoid log flooding. "-1" is subtracted since the thermal
4502 	 * throttling interrupt comes every second. Thus, the total logging
4503 	 * interval is 59 seconds(retelimited printk interval) + 1(waiting
4504 	 * for throttling interrupt) = 60 seconds.
4505 	 */
4506 	ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
4507 
4508 	ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
4509 
4510 	/* Registers mapping */
4511 	/* TODO: block userspace mapping of io register */
4512 	if (adev->asic_type >= CHIP_BONAIRE) {
4513 		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
4514 		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
4515 	} else {
4516 		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
4517 		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
4518 	}
4519 
4520 	for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
4521 		atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
4522 
4523 	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
4524 	if (!adev->rmmio)
4525 		return -ENOMEM;
4526 
4527 	dev_info(adev->dev, "register mmio base: 0x%08X\n",
4528 		 (uint32_t)adev->rmmio_base);
4529 	dev_info(adev->dev, "register mmio size: %u\n",
4530 		 (unsigned int)adev->rmmio_size);
4531 
4532 	/*
4533 	 * Reset domain needs to be present early, before XGMI hive discovered
4534 	 * (if any) and initialized to use reset sem and in_gpu reset flag
4535 	 * early on during init and before calling to RREG32.
4536 	 */
4537 	adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
4538 	if (!adev->reset_domain)
4539 		return -ENOMEM;
4540 
4541 	/* detect hw virtualization here */
4542 	amdgpu_virt_init(adev);
4543 
4544 	amdgpu_device_get_pcie_info(adev);
4545 
4546 	r = amdgpu_device_get_job_timeout_settings(adev);
4547 	if (r) {
4548 		dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
4549 		return r;
4550 	}
4551 
4552 	amdgpu_device_set_mcbp(adev);
4553 
4554 	/*
4555 	 * By default, use default mode where all blocks are expected to be
4556 	 * initialized. At present a 'swinit' of blocks is required to be
4557 	 * completed before the need for a different level is detected.
4558 	 */
4559 	amdgpu_set_init_level(adev, AMDGPU_INIT_LEVEL_DEFAULT);
4560 	/* early init functions */
4561 	r = amdgpu_device_ip_early_init(adev);
4562 	if (r)
4563 		return r;
4564 
4565 	/*
4566 	 * No need to remove conflicting FBs for non-display class devices.
4567 	 * This prevents the sysfb from being freed accidently.
4568 	 */
4569 	if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA ||
4570 	    (pdev->class >> 8) == PCI_CLASS_DISPLAY_OTHER) {
4571 		/* Get rid of things like offb */
4572 		r = aperture_remove_conflicting_pci_devices(adev->pdev, amdgpu_kms_driver.name);
4573 		if (r)
4574 			return r;
4575 	}
4576 
4577 	/* Enable TMZ based on IP_VERSION */
4578 	amdgpu_gmc_tmz_set(adev);
4579 
4580 	if (amdgpu_sriov_vf(adev) &&
4581 	    amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))
4582 		/* VF MMIO access (except mailbox range) from CPU
4583 		 * will be blocked during sriov runtime
4584 		 */
4585 		adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT;
4586 
4587 	amdgpu_gmc_noretry_set(adev);
4588 	/* Need to get xgmi info early to decide the reset behavior*/
4589 	if (adev->gmc.xgmi.supported) {
4590 		r = adev->gfxhub.funcs->get_xgmi_info(adev);
4591 		if (r)
4592 			return r;
4593 	}
4594 
4595 	/* enable PCIE atomic ops */
4596 	if (amdgpu_sriov_vf(adev)) {
4597 		if (adev->virt.fw_reserve.p_pf2vf)
4598 			adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
4599 						      adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
4600 				(PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
4601 	/* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a
4602 	 * internal path natively support atomics, set have_atomics_support to true.
4603 	 */
4604 	} else if ((adev->flags & AMD_IS_APU &&
4605 		   amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0)) ||
4606 		   (adev->gmc.xgmi.connected_to_cpu &&
4607 		   amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 1, 0))) {
4608 		adev->have_atomics_support = true;
4609 	} else {
4610 		adev->have_atomics_support =
4611 			!pci_enable_atomic_ops_to_root(adev->pdev,
4612 					  PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
4613 					  PCI_EXP_DEVCAP2_ATOMIC_COMP64);
4614 	}
4615 
4616 	if (!adev->have_atomics_support)
4617 		dev_info(adev->dev, "PCIE atomic ops is not supported\n");
4618 
4619 	/* doorbell bar mapping and doorbell index init*/
4620 	amdgpu_doorbell_init(adev);
4621 
4622 	if (amdgpu_emu_mode == 1) {
4623 		/* post the asic on emulation mode */
4624 		emu_soc_asic_init(adev);
4625 		goto fence_driver_init;
4626 	}
4627 
4628 	amdgpu_reset_init(adev);
4629 
4630 	/* detect if we are with an SRIOV vbios */
4631 	if (adev->bios)
4632 		amdgpu_device_detect_sriov_bios(adev);
4633 
4634 	/* check if we need to reset the asic
4635 	 *  E.g., driver was not cleanly unloaded previously, etc.
4636 	 */
4637 	if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
4638 		if (adev->gmc.xgmi.num_physical_nodes) {
4639 			dev_info(adev->dev, "Pending hive reset.\n");
4640 			amdgpu_set_init_level(adev,
4641 					      AMDGPU_INIT_LEVEL_MINIMAL_XGMI);
4642 		} else {
4643 				tmp = amdgpu_reset_method;
4644 				/* It should do a default reset when loading or reloading the driver,
4645 				 * regardless of the module parameter reset_method.
4646 				 */
4647 				amdgpu_reset_method = AMD_RESET_METHOD_NONE;
4648 				r = amdgpu_asic_reset(adev);
4649 				amdgpu_reset_method = tmp;
4650 		}
4651 
4652 		if (r) {
4653 		  dev_err(adev->dev, "asic reset on init failed\n");
4654 		  goto failed;
4655 		}
4656 	}
4657 
4658 	/* Post card if necessary */
4659 	if (amdgpu_device_need_post(adev)) {
4660 		if (!adev->bios) {
4661 			dev_err(adev->dev, "no vBIOS found\n");
4662 			r = -EINVAL;
4663 			goto failed;
4664 		}
4665 		dev_info(adev->dev, "GPU posting now...\n");
4666 		r = amdgpu_device_asic_init(adev);
4667 		if (r) {
4668 			dev_err(adev->dev, "gpu post error!\n");
4669 			goto failed;
4670 		}
4671 	}
4672 
4673 	if (adev->bios) {
4674 		if (adev->is_atom_fw) {
4675 			/* Initialize clocks */
4676 			r = amdgpu_atomfirmware_get_clock_info(adev);
4677 			if (r) {
4678 				dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
4679 				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
4680 				goto failed;
4681 			}
4682 		} else {
4683 			/* Initialize clocks */
4684 			r = amdgpu_atombios_get_clock_info(adev);
4685 			if (r) {
4686 				dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
4687 				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
4688 				goto failed;
4689 			}
4690 			/* init i2c buses */
4691 			amdgpu_i2c_init(adev);
4692 		}
4693 	}
4694 
4695 fence_driver_init:
4696 	/* Fence driver */
4697 	r = amdgpu_fence_driver_sw_init(adev);
4698 	if (r) {
4699 		dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
4700 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
4701 		goto failed;
4702 	}
4703 
4704 	/* init the mode config */
4705 	drm_mode_config_init(adev_to_drm(adev));
4706 
4707 	r = amdgpu_device_ip_init(adev);
4708 	if (r) {
4709 		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
4710 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
4711 		goto release_ras_con;
4712 	}
4713 
4714 	amdgpu_fence_driver_hw_init(adev);
4715 
4716 	dev_info(adev->dev,
4717 		"SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
4718 			adev->gfx.config.max_shader_engines,
4719 			adev->gfx.config.max_sh_per_se,
4720 			adev->gfx.config.max_cu_per_sh,
4721 			adev->gfx.cu_info.number);
4722 
4723 	adev->accel_working = true;
4724 
4725 	amdgpu_vm_check_compute_bug(adev);
4726 
4727 	/* Initialize the buffer migration limit. */
4728 	if (amdgpu_moverate >= 0)
4729 		max_MBps = amdgpu_moverate;
4730 	else
4731 		max_MBps = 8; /* Allow 8 MB/s. */
4732 	/* Get a log2 for easy divisions. */
4733 	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
4734 
4735 	/*
4736 	 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
4737 	 * Otherwise the mgpu fan boost feature will be skipped due to the
4738 	 * gpu instance is counted less.
4739 	 */
4740 	amdgpu_register_gpu_instance(adev);
4741 
4742 	/* enable clockgating, etc. after ib tests, etc. since some blocks require
4743 	 * explicit gating rather than handling it automatically.
4744 	 */
4745 	if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
4746 		r = amdgpu_device_ip_late_init(adev);
4747 		if (r) {
4748 			dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
4749 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
4750 			goto release_ras_con;
4751 		}
4752 		/* must succeed. */
4753 		amdgpu_ras_resume(adev);
4754 		queue_delayed_work(system_wq, &adev->delayed_init_work,
4755 				   msecs_to_jiffies(AMDGPU_RESUME_MS));
4756 	}
4757 
4758 	if (amdgpu_sriov_vf(adev)) {
4759 		amdgpu_virt_release_full_gpu(adev, true);
4760 		flush_delayed_work(&adev->delayed_init_work);
4761 	}
4762 
4763 	/* Don't init kfd if whole hive need to be reset during init */
4764 	if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
4765 		kgd2kfd_init_zone_device(adev);
4766 		kfd_update_svm_support_properties(adev);
4767 	}
4768 
4769 	if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
4770 		amdgpu_xgmi_reset_on_init(adev);
4771 
4772 	/*
4773 	 * Place those sysfs registering after `late_init`. As some of those
4774 	 * operations performed in `late_init` might affect the sysfs
4775 	 * interfaces creating.
4776 	 */
4777 	r = amdgpu_device_sys_interface_init(adev);
4778 
4779 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
4780 		r = amdgpu_pmu_init(adev);
4781 	if (r)
4782 		dev_err(adev->dev, "amdgpu_pmu_init failed\n");
4783 
4784 	/* Have stored pci confspace at hand for restore in sudden PCI error */
4785 	if (amdgpu_device_cache_pci_state(adev->pdev))
4786 		pci_restore_state(pdev);
4787 
4788 	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
4789 	/* this will fail for cards that aren't VGA class devices, just
4790 	 * ignore it
4791 	 */
4792 	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4793 		vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
4794 
4795 	px = amdgpu_device_supports_px(adev);
4796 
4797 	if (px || (!dev_is_removable(&adev->pdev->dev) &&
4798 				apple_gmux_detect(NULL, NULL)))
4799 		vga_switcheroo_register_client(adev->pdev,
4800 					       &amdgpu_switcheroo_ops, px);
4801 
4802 	if (px)
4803 		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
4804 
4805 	amdgpu_device_check_iommu_direct_map(adev);
4806 
4807 	adev->pm_nb.notifier_call = amdgpu_device_pm_notifier;
4808 	r = register_pm_notifier(&adev->pm_nb);
4809 	if (r)
4810 		goto failed;
4811 
4812 	return 0;
4813 
4814 release_ras_con:
4815 	if (amdgpu_sriov_vf(adev))
4816 		amdgpu_virt_release_full_gpu(adev, true);
4817 
4818 	/* failed in exclusive mode due to timeout */
4819 	if (amdgpu_sriov_vf(adev) &&
4820 		!amdgpu_sriov_runtime(adev) &&
4821 		amdgpu_virt_mmio_blocked(adev) &&
4822 		!amdgpu_virt_wait_reset(adev)) {
4823 		dev_err(adev->dev, "VF exclusive mode timeout\n");
4824 		/* Don't send request since VF is inactive. */
4825 		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
4826 		adev->virt.ops = NULL;
4827 		r = -EAGAIN;
4828 	}
4829 	amdgpu_release_ras_context(adev);
4830 
4831 failed:
4832 	amdgpu_vf_error_trans_all(adev);
4833 
4834 	return r;
4835 }
4836 
4837 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
4838 {
4839 
4840 	/* Clear all CPU mappings pointing to this device */
4841 	unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
4842 
4843 	/* Unmap all mapped bars - Doorbell, registers and VRAM */
4844 	amdgpu_doorbell_fini(adev);
4845 
4846 	iounmap(adev->rmmio);
4847 	adev->rmmio = NULL;
4848 	if (adev->mman.aper_base_kaddr)
4849 		iounmap(adev->mman.aper_base_kaddr);
4850 	adev->mman.aper_base_kaddr = NULL;
4851 
4852 	/* Memory manager related */
4853 	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
4854 		arch_phys_wc_del(adev->gmc.vram_mtrr);
4855 		arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
4856 	}
4857 }
4858 
4859 /**
4860  * amdgpu_device_fini_hw - tear down the driver
4861  *
4862  * @adev: amdgpu_device pointer
4863  *
4864  * Tear down the driver info (all asics).
4865  * Called at driver shutdown.
4866  */
4867 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
4868 {
4869 	dev_info(adev->dev, "finishing device.\n");
4870 	flush_delayed_work(&adev->delayed_init_work);
4871 
4872 	if (adev->mman.initialized)
4873 		drain_workqueue(adev->mman.bdev.wq);
4874 	adev->shutdown = true;
4875 
4876 	unregister_pm_notifier(&adev->pm_nb);
4877 
4878 	/* make sure IB test finished before entering exclusive mode
4879 	 * to avoid preemption on IB test
4880 	 */
4881 	if (amdgpu_sriov_vf(adev)) {
4882 		amdgpu_virt_request_full_gpu(adev, false);
4883 		amdgpu_virt_fini_data_exchange(adev);
4884 	}
4885 
4886 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
4887 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
4888 
4889 	/* disable all interrupts */
4890 	amdgpu_irq_disable_all(adev);
4891 	if (adev->mode_info.mode_config_initialized) {
4892 		if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
4893 			drm_helper_force_disable_all(adev_to_drm(adev));
4894 		else
4895 			drm_atomic_helper_shutdown(adev_to_drm(adev));
4896 	}
4897 	amdgpu_fence_driver_hw_fini(adev);
4898 
4899 	amdgpu_device_sys_interface_fini(adev);
4900 
4901 	/* disable ras feature must before hw fini */
4902 	amdgpu_ras_pre_fini(adev);
4903 
4904 	amdgpu_ttm_set_buffer_funcs_status(adev, false);
4905 
4906 	/*
4907 	 * device went through surprise hotplug; we need to destroy topology
4908 	 * before ip_fini_early to prevent kfd locking refcount issues by calling
4909 	 * amdgpu_amdkfd_suspend()
4910 	 */
4911 	if (pci_dev_is_disconnected(adev->pdev))
4912 		amdgpu_amdkfd_device_fini_sw(adev);
4913 
4914 	amdgpu_device_ip_fini_early(adev);
4915 
4916 	amdgpu_irq_fini_hw(adev);
4917 
4918 	if (adev->mman.initialized)
4919 		ttm_device_clear_dma_mappings(&adev->mman.bdev);
4920 
4921 	amdgpu_gart_dummy_page_fini(adev);
4922 
4923 	if (pci_dev_is_disconnected(adev->pdev))
4924 		amdgpu_device_unmap_mmio(adev);
4925 
4926 }
4927 
4928 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4929 {
4930 	int i, idx;
4931 	bool px;
4932 
4933 	amdgpu_device_ip_fini(adev);
4934 	amdgpu_fence_driver_sw_fini(adev);
4935 	amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
4936 	adev->accel_working = false;
4937 	dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4938 	for (i = 0; i < MAX_XCP; ++i) {
4939 		dma_fence_put(adev->isolation[i].spearhead);
4940 		amdgpu_sync_free(&adev->isolation[i].active);
4941 		amdgpu_sync_free(&adev->isolation[i].prev);
4942 	}
4943 
4944 	amdgpu_reset_fini(adev);
4945 
4946 	/* free i2c buses */
4947 	amdgpu_i2c_fini(adev);
4948 
4949 	if (adev->bios) {
4950 		if (amdgpu_emu_mode != 1)
4951 			amdgpu_atombios_fini(adev);
4952 		amdgpu_bios_release(adev);
4953 	}
4954 
4955 	kfree(adev->fru_info);
4956 	adev->fru_info = NULL;
4957 
4958 	kfree(adev->xcp_mgr);
4959 	adev->xcp_mgr = NULL;
4960 
4961 	px = amdgpu_device_supports_px(adev);
4962 
4963 	if (px || (!dev_is_removable(&adev->pdev->dev) &&
4964 				apple_gmux_detect(NULL, NULL)))
4965 		vga_switcheroo_unregister_client(adev->pdev);
4966 
4967 	if (px)
4968 		vga_switcheroo_fini_domain_pm_ops(adev->dev);
4969 
4970 	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4971 		vga_client_unregister(adev->pdev);
4972 
4973 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4974 
4975 		iounmap(adev->rmmio);
4976 		adev->rmmio = NULL;
4977 		drm_dev_exit(idx);
4978 	}
4979 
4980 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
4981 		amdgpu_pmu_fini(adev);
4982 	if (adev->discovery.bin)
4983 		amdgpu_discovery_fini(adev);
4984 
4985 	amdgpu_reset_put_reset_domain(adev->reset_domain);
4986 	adev->reset_domain = NULL;
4987 
4988 	kfree(adev->pci_state);
4989 	kfree(adev->pcie_reset_ctx.swds_pcistate);
4990 	kfree(adev->pcie_reset_ctx.swus_pcistate);
4991 }
4992 
4993 /**
4994  * amdgpu_device_evict_resources - evict device resources
4995  * @adev: amdgpu device object
4996  *
4997  * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4998  * of the vram memory type. Mainly used for evicting device resources
4999  * at suspend time.
5000  *
5001  */
5002 static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
5003 {
5004 	int ret;
5005 
5006 	/* No need to evict vram on APUs unless going to S4 */
5007 	if (!adev->in_s4 && (adev->flags & AMD_IS_APU))
5008 		return 0;
5009 
5010 	/* No need to evict when going to S5 through S4 callbacks */
5011 	if (system_state == SYSTEM_POWER_OFF)
5012 		return 0;
5013 
5014 	ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
5015 	if (ret) {
5016 		dev_warn(adev->dev, "evicting device resources failed\n");
5017 		return ret;
5018 	}
5019 
5020 	if (adev->in_s4) {
5021 		ret = ttm_device_prepare_hibernation(&adev->mman.bdev);
5022 		if (ret)
5023 			dev_err(adev->dev, "prepare hibernation failed, %d\n", ret);
5024 	}
5025 	return ret;
5026 }
5027 
5028 /*
5029  * Suspend & resume.
5030  */
5031 /**
5032  * amdgpu_device_pm_notifier - Notification block for Suspend/Hibernate events
5033  * @nb: notifier block
5034  * @mode: suspend mode
5035  * @data: data
5036  *
5037  * This function is called when the system is about to suspend or hibernate.
5038  * It is used to set the appropriate flags so that eviction can be optimized
5039  * in the pm prepare callback.
5040  */
5041 static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
5042 				     void *data)
5043 {
5044 	struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, pm_nb);
5045 
5046 	switch (mode) {
5047 	case PM_HIBERNATION_PREPARE:
5048 		adev->in_s4 = true;
5049 		break;
5050 	case PM_POST_HIBERNATION:
5051 		adev->in_s4 = false;
5052 		break;
5053 	}
5054 
5055 	return NOTIFY_DONE;
5056 }
5057 
5058 /**
5059  * amdgpu_device_prepare - prepare for device suspend
5060  *
5061  * @dev: drm dev pointer
5062  *
5063  * Prepare to put the hw in the suspend state (all asics).
5064  * Returns 0 for success or an error on failure.
5065  * Called at driver suspend.
5066  */
5067 int amdgpu_device_prepare(struct drm_device *dev)
5068 {
5069 	struct amdgpu_device *adev = drm_to_adev(dev);
5070 	int i, r;
5071 
5072 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
5073 		return 0;
5074 
5075 	/* Evict the majority of BOs before starting suspend sequence */
5076 	r = amdgpu_device_evict_resources(adev);
5077 	if (r)
5078 		return r;
5079 
5080 	flush_delayed_work(&adev->gfx.gfx_off_delay_work);
5081 
5082 	for (i = 0; i < adev->num_ip_blocks; i++) {
5083 		if (!adev->ip_blocks[i].status.valid)
5084 			continue;
5085 		if (!adev->ip_blocks[i].version->funcs->prepare_suspend)
5086 			continue;
5087 		r = adev->ip_blocks[i].version->funcs->prepare_suspend(&adev->ip_blocks[i]);
5088 		if (r)
5089 			return r;
5090 	}
5091 
5092 	return 0;
5093 }
5094 
5095 /**
5096  * amdgpu_device_complete - complete power state transition
5097  *
5098  * @dev: drm dev pointer
5099  *
5100  * Undo the changes from amdgpu_device_prepare. This will be
5101  * called on all resume transitions, including those that failed.
5102  */
5103 void amdgpu_device_complete(struct drm_device *dev)
5104 {
5105 	struct amdgpu_device *adev = drm_to_adev(dev);
5106 	int i;
5107 
5108 	for (i = 0; i < adev->num_ip_blocks; i++) {
5109 		if (!adev->ip_blocks[i].status.valid)
5110 			continue;
5111 		if (!adev->ip_blocks[i].version->funcs->complete)
5112 			continue;
5113 		adev->ip_blocks[i].version->funcs->complete(&adev->ip_blocks[i]);
5114 	}
5115 }
5116 
5117 /**
5118  * amdgpu_device_suspend - initiate device suspend
5119  *
5120  * @dev: drm dev pointer
5121  * @notify_clients: notify in-kernel DRM clients
5122  *
5123  * Puts the hw in the suspend state (all asics).
5124  * Returns 0 for success or an error on failure.
5125  * Called at driver suspend.
5126  */
5127 int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
5128 {
5129 	struct amdgpu_device *adev = drm_to_adev(dev);
5130 	int r, rec;
5131 
5132 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
5133 		return 0;
5134 
5135 	adev->in_suspend = true;
5136 
5137 	if (amdgpu_sriov_vf(adev)) {
5138 		if (!adev->in_runpm)
5139 			amdgpu_amdkfd_suspend_process(adev);
5140 		amdgpu_virt_fini_data_exchange(adev);
5141 		r = amdgpu_virt_request_full_gpu(adev, false);
5142 		if (r)
5143 			return r;
5144 	}
5145 
5146 	r = amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D3);
5147 	if (r)
5148 		goto unwind_sriov;
5149 
5150 	if (notify_clients)
5151 		drm_client_dev_suspend(adev_to_drm(adev));
5152 
5153 	cancel_delayed_work_sync(&adev->delayed_init_work);
5154 
5155 	amdgpu_ras_suspend(adev);
5156 
5157 	r = amdgpu_device_ip_suspend_phase1(adev);
5158 	if (r)
5159 		goto unwind_smartshift;
5160 
5161 	amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
5162 	r = amdgpu_userq_suspend(adev);
5163 	if (r)
5164 		goto unwind_ip_phase1;
5165 
5166 	r = amdgpu_device_evict_resources(adev);
5167 	if (r)
5168 		goto unwind_userq;
5169 
5170 	amdgpu_ttm_set_buffer_funcs_status(adev, false);
5171 
5172 	amdgpu_fence_driver_hw_fini(adev);
5173 
5174 	r = amdgpu_device_ip_suspend_phase2(adev);
5175 	if (r)
5176 		goto unwind_evict;
5177 
5178 	if (amdgpu_sriov_vf(adev))
5179 		amdgpu_virt_release_full_gpu(adev, false);
5180 
5181 	return 0;
5182 
5183 unwind_evict:
5184 	amdgpu_ttm_set_buffer_funcs_status(adev, true);
5185 	amdgpu_fence_driver_hw_init(adev);
5186 
5187 unwind_userq:
5188 	rec = amdgpu_userq_resume(adev);
5189 	if (rec) {
5190 		dev_warn(adev->dev, "failed to re-initialize user queues: %d\n", rec);
5191 		return r;
5192 	}
5193 	rec = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
5194 	if (rec) {
5195 		dev_warn(adev->dev, "failed to re-initialize kfd: %d\n", rec);
5196 		return r;
5197 	}
5198 
5199 unwind_ip_phase1:
5200 	/* suspend phase 1 = resume phase 3 */
5201 	rec = amdgpu_device_ip_resume_phase3(adev);
5202 	if (rec) {
5203 		dev_warn(adev->dev, "failed to re-initialize IPs phase1: %d\n", rec);
5204 		return r;
5205 	}
5206 
5207 unwind_smartshift:
5208 	rec = amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D0);
5209 	if (rec) {
5210 		dev_warn(adev->dev, "failed to re-update smart shift: %d\n", rec);
5211 		return r;
5212 	}
5213 
5214 	if (notify_clients)
5215 		drm_client_dev_resume(adev_to_drm(adev));
5216 
5217 	amdgpu_ras_resume(adev);
5218 
5219 unwind_sriov:
5220 	if (amdgpu_sriov_vf(adev)) {
5221 		rec = amdgpu_virt_request_full_gpu(adev, true);
5222 		if (rec) {
5223 			dev_warn(adev->dev, "failed to reinitialize sriov: %d\n", rec);
5224 			return r;
5225 		}
5226 	}
5227 
5228 	adev->in_suspend = adev->in_s0ix = adev->in_s3 = false;
5229 
5230 	return r;
5231 }
5232 
5233 static inline int amdgpu_virt_resume(struct amdgpu_device *adev)
5234 {
5235 	int r;
5236 	unsigned int prev_physical_node_id = adev->gmc.xgmi.physical_node_id;
5237 
5238 	/* During VM resume, QEMU programming of VF MSIX table (register GFXMSIX_VECT0_ADDR_LO)
5239 	 * may not work. The access could be blocked by nBIF protection as VF isn't in
5240 	 * exclusive access mode. Exclusive access is enabled now, disable/enable MSIX
5241 	 * so that QEMU reprograms MSIX table.
5242 	 */
5243 	amdgpu_restore_msix(adev);
5244 
5245 	r = adev->gfxhub.funcs->get_xgmi_info(adev);
5246 	if (r)
5247 		return r;
5248 
5249 	dev_info(adev->dev, "xgmi node, old id %d, new id %d\n",
5250 		prev_physical_node_id, adev->gmc.xgmi.physical_node_id);
5251 
5252 	adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
5253 	adev->vm_manager.vram_base_offset +=
5254 		adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
5255 
5256 	return 0;
5257 }
5258 
5259 /**
5260  * amdgpu_device_resume - initiate device resume
5261  *
5262  * @dev: drm dev pointer
5263  * @notify_clients: notify in-kernel DRM clients
5264  *
5265  * Bring the hw back to operating state (all asics).
5266  * Returns 0 for success or an error on failure.
5267  * Called at driver resume.
5268  */
5269 int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
5270 {
5271 	struct amdgpu_device *adev = drm_to_adev(dev);
5272 	int r = 0;
5273 
5274 	if (amdgpu_sriov_vf(adev)) {
5275 		r = amdgpu_virt_request_full_gpu(adev, true);
5276 		if (r)
5277 			return r;
5278 	}
5279 
5280 	if (amdgpu_virt_xgmi_migrate_enabled(adev)) {
5281 		r = amdgpu_virt_resume(adev);
5282 		if (r)
5283 			goto exit;
5284 	}
5285 
5286 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
5287 		return 0;
5288 
5289 	if (adev->in_s0ix)
5290 		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
5291 
5292 	/* post card */
5293 	if (amdgpu_device_need_post(adev)) {
5294 		r = amdgpu_device_asic_init(adev);
5295 		if (r)
5296 			dev_err(adev->dev, "amdgpu asic init failed\n");
5297 	}
5298 
5299 	r = amdgpu_device_ip_resume(adev);
5300 
5301 	if (r) {
5302 		dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
5303 		goto exit;
5304 	}
5305 
5306 	r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
5307 	if (r)
5308 		goto exit;
5309 
5310 	r = amdgpu_userq_resume(adev);
5311 	if (r)
5312 		goto exit;
5313 
5314 	r = amdgpu_device_ip_late_init(adev);
5315 	if (r)
5316 		goto exit;
5317 
5318 	queue_delayed_work(system_wq, &adev->delayed_init_work,
5319 			   msecs_to_jiffies(AMDGPU_RESUME_MS));
5320 exit:
5321 	if (amdgpu_sriov_vf(adev)) {
5322 		amdgpu_virt_init_data_exchange(adev);
5323 		amdgpu_virt_release_full_gpu(adev, true);
5324 
5325 		if (!r && !adev->in_runpm)
5326 			r = amdgpu_amdkfd_resume_process(adev);
5327 	}
5328 
5329 	if (r)
5330 		return r;
5331 
5332 	/* Make sure IB tests flushed */
5333 	flush_delayed_work(&adev->delayed_init_work);
5334 
5335 	if (notify_clients)
5336 		drm_client_dev_resume(adev_to_drm(adev));
5337 
5338 	amdgpu_ras_resume(adev);
5339 
5340 	if (adev->mode_info.num_crtc) {
5341 		/*
5342 		 * Most of the connector probing functions try to acquire runtime pm
5343 		 * refs to ensure that the GPU is powered on when connector polling is
5344 		 * performed. Since we're calling this from a runtime PM callback,
5345 		 * trying to acquire rpm refs will cause us to deadlock.
5346 		 *
5347 		 * Since we're guaranteed to be holding the rpm lock, it's safe to
5348 		 * temporarily disable the rpm helpers so this doesn't deadlock us.
5349 		 */
5350 #ifdef CONFIG_PM
5351 		dev->dev->power.disable_depth++;
5352 #endif
5353 		if (!adev->dc_enabled)
5354 			drm_helper_hpd_irq_event(dev);
5355 		else
5356 			drm_kms_helper_hotplug_event(dev);
5357 #ifdef CONFIG_PM
5358 		dev->dev->power.disable_depth--;
5359 #endif
5360 	}
5361 
5362 	amdgpu_vram_mgr_clear_reset_blocks(adev);
5363 	adev->in_suspend = false;
5364 
5365 	if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D0))
5366 		dev_warn(adev->dev, "smart shift update failed\n");
5367 
5368 	return 0;
5369 }
5370 
5371 /**
5372  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
5373  *
5374  * @adev: amdgpu_device pointer
5375  *
5376  * The list of all the hardware IPs that make up the asic is walked and
5377  * the check_soft_reset callbacks are run.  check_soft_reset determines
5378  * if the asic is still hung or not.
5379  * Returns true if any of the IPs are still in a hung state, false if not.
5380  */
5381 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
5382 {
5383 	int i;
5384 	bool asic_hang = false;
5385 
5386 	if (amdgpu_sriov_vf(adev))
5387 		return true;
5388 
5389 	if (amdgpu_asic_need_full_reset(adev))
5390 		return true;
5391 
5392 	for (i = 0; i < adev->num_ip_blocks; i++) {
5393 		if (!adev->ip_blocks[i].status.valid)
5394 			continue;
5395 		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
5396 			adev->ip_blocks[i].status.hang =
5397 				adev->ip_blocks[i].version->funcs->check_soft_reset(
5398 					&adev->ip_blocks[i]);
5399 		if (adev->ip_blocks[i].status.hang) {
5400 			dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
5401 			asic_hang = true;
5402 		}
5403 	}
5404 	return asic_hang;
5405 }
5406 
5407 /**
5408  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
5409  *
5410  * @adev: amdgpu_device pointer
5411  *
5412  * The list of all the hardware IPs that make up the asic is walked and the
5413  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
5414  * handles any IP specific hardware or software state changes that are
5415  * necessary for a soft reset to succeed.
5416  * Returns 0 on success, negative error code on failure.
5417  */
5418 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
5419 {
5420 	int i, r = 0;
5421 
5422 	for (i = 0; i < adev->num_ip_blocks; i++) {
5423 		if (!adev->ip_blocks[i].status.valid)
5424 			continue;
5425 		if (adev->ip_blocks[i].status.hang &&
5426 		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
5427 			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(&adev->ip_blocks[i]);
5428 			if (r)
5429 				return r;
5430 		}
5431 	}
5432 
5433 	return 0;
5434 }
5435 
5436 /**
5437  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
5438  *
5439  * @adev: amdgpu_device pointer
5440  *
5441  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
5442  * reset is necessary to recover.
5443  * Returns true if a full asic reset is required, false if not.
5444  */
5445 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
5446 {
5447 	int i;
5448 
5449 	if (amdgpu_asic_need_full_reset(adev))
5450 		return true;
5451 
5452 	for (i = 0; i < adev->num_ip_blocks; i++) {
5453 		if (!adev->ip_blocks[i].status.valid)
5454 			continue;
5455 		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
5456 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
5457 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
5458 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
5459 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
5460 			if (adev->ip_blocks[i].status.hang) {
5461 				dev_info(adev->dev, "Some block need full reset!\n");
5462 				return true;
5463 			}
5464 		}
5465 	}
5466 	return false;
5467 }
5468 
5469 /**
5470  * amdgpu_device_ip_soft_reset - do a soft reset
5471  *
5472  * @adev: amdgpu_device pointer
5473  *
5474  * The list of all the hardware IPs that make up the asic is walked and the
5475  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
5476  * IP specific hardware or software state changes that are necessary to soft
5477  * reset the IP.
5478  * Returns 0 on success, negative error code on failure.
5479  */
5480 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
5481 {
5482 	int i, r = 0;
5483 
5484 	for (i = 0; i < adev->num_ip_blocks; i++) {
5485 		if (!adev->ip_blocks[i].status.valid)
5486 			continue;
5487 		if (adev->ip_blocks[i].status.hang &&
5488 		    adev->ip_blocks[i].version->funcs->soft_reset) {
5489 			r = adev->ip_blocks[i].version->funcs->soft_reset(&adev->ip_blocks[i]);
5490 			if (r)
5491 				return r;
5492 		}
5493 	}
5494 
5495 	return 0;
5496 }
5497 
5498 /**
5499  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
5500  *
5501  * @adev: amdgpu_device pointer
5502  *
5503  * The list of all the hardware IPs that make up the asic is walked and the
5504  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
5505  * handles any IP specific hardware or software state changes that are
5506  * necessary after the IP has been soft reset.
5507  * Returns 0 on success, negative error code on failure.
5508  */
5509 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
5510 {
5511 	int i, r = 0;
5512 
5513 	for (i = 0; i < adev->num_ip_blocks; i++) {
5514 		if (!adev->ip_blocks[i].status.valid)
5515 			continue;
5516 		if (adev->ip_blocks[i].status.hang &&
5517 		    adev->ip_blocks[i].version->funcs->post_soft_reset)
5518 			r = adev->ip_blocks[i].version->funcs->post_soft_reset(&adev->ip_blocks[i]);
5519 		if (r)
5520 			return r;
5521 	}
5522 
5523 	return 0;
5524 }
5525 
5526 /**
5527  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
5528  *
5529  * @adev: amdgpu_device pointer
5530  * @reset_context: amdgpu reset context pointer
5531  *
5532  * do VF FLR and reinitialize Asic
5533  * return 0 means succeeded otherwise failed
5534  */
5535 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
5536 				     struct amdgpu_reset_context *reset_context)
5537 {
5538 	int r;
5539 	struct amdgpu_hive_info *hive = NULL;
5540 
5541 	if (test_bit(AMDGPU_HOST_FLR, &reset_context->flags)) {
5542 		if (!amdgpu_ras_get_fed_status(adev))
5543 			amdgpu_virt_ready_to_reset(adev);
5544 		amdgpu_virt_wait_reset(adev);
5545 		clear_bit(AMDGPU_HOST_FLR, &reset_context->flags);
5546 		r = amdgpu_virt_request_full_gpu(adev, true);
5547 	} else {
5548 		r = amdgpu_virt_reset_gpu(adev);
5549 	}
5550 	if (r)
5551 		return r;
5552 
5553 	amdgpu_ras_clear_err_state(adev);
5554 	amdgpu_irq_gpu_reset_resume_helper(adev);
5555 
5556 	/* some sw clean up VF needs to do before recover */
5557 	amdgpu_virt_post_reset(adev);
5558 
5559 	/* Resume IP prior to SMC */
5560 	r = amdgpu_device_ip_reinit_early_sriov(adev);
5561 	if (r)
5562 		return r;
5563 
5564 	amdgpu_virt_init_data_exchange(adev);
5565 
5566 	r = amdgpu_device_fw_loading(adev);
5567 	if (r)
5568 		return r;
5569 
5570 	/* now we are okay to resume SMC/CP/SDMA */
5571 	r = amdgpu_device_ip_reinit_late_sriov(adev);
5572 	if (r)
5573 		return r;
5574 
5575 	hive = amdgpu_get_xgmi_hive(adev);
5576 	/* Update PSP FW topology after reset */
5577 	if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
5578 		r = amdgpu_xgmi_update_topology(hive, adev);
5579 	if (hive)
5580 		amdgpu_put_xgmi_hive(hive);
5581 	if (r)
5582 		return r;
5583 
5584 	r = amdgpu_ib_ring_tests(adev);
5585 	if (r)
5586 		return r;
5587 
5588 	if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST)
5589 		amdgpu_inc_vram_lost(adev);
5590 
5591 	/* need to be called during full access so we can't do it later like
5592 	 * bare-metal does.
5593 	 */
5594 	amdgpu_amdkfd_post_reset(adev);
5595 	amdgpu_virt_release_full_gpu(adev, true);
5596 
5597 	/* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */
5598 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
5599 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
5600 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
5601 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0) ||
5602 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3))
5603 		amdgpu_ras_resume(adev);
5604 
5605 	amdgpu_virt_ras_telemetry_post_reset(adev);
5606 
5607 	return 0;
5608 }
5609 
5610 /**
5611  * amdgpu_device_has_job_running - check if there is any unfinished job
5612  *
5613  * @adev: amdgpu_device pointer
5614  *
5615  * check if there is any job running on the device when guest driver receives
5616  * FLR notification from host driver. If there are still jobs running, then
5617  * the guest driver will not respond the FLR reset. Instead, let the job hit
5618  * the timeout and guest driver then issue the reset request.
5619  */
5620 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
5621 {
5622 	int i;
5623 
5624 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5625 		struct amdgpu_ring *ring = adev->rings[i];
5626 
5627 		if (!amdgpu_ring_sched_ready(ring))
5628 			continue;
5629 
5630 		if (amdgpu_fence_count_emitted(ring))
5631 			return true;
5632 	}
5633 	return false;
5634 }
5635 
5636 /**
5637  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
5638  *
5639  * @adev: amdgpu_device pointer
5640  *
5641  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
5642  * a hung GPU.
5643  */
5644 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
5645 {
5646 
5647 	if (amdgpu_gpu_recovery == 0)
5648 		goto disabled;
5649 
5650 	/* Skip soft reset check in fatal error mode */
5651 	if (!amdgpu_ras_is_poison_mode_supported(adev))
5652 		return true;
5653 
5654 	if (amdgpu_sriov_vf(adev))
5655 		return true;
5656 
5657 	if (amdgpu_gpu_recovery == -1) {
5658 		switch (adev->asic_type) {
5659 #ifdef CONFIG_DRM_AMDGPU_SI
5660 		case CHIP_VERDE:
5661 		case CHIP_TAHITI:
5662 		case CHIP_PITCAIRN:
5663 		case CHIP_OLAND:
5664 		case CHIP_HAINAN:
5665 #endif
5666 #ifdef CONFIG_DRM_AMDGPU_CIK
5667 		case CHIP_KAVERI:
5668 		case CHIP_KABINI:
5669 		case CHIP_MULLINS:
5670 #endif
5671 		case CHIP_CARRIZO:
5672 		case CHIP_STONEY:
5673 		case CHIP_CYAN_SKILLFISH:
5674 			goto disabled;
5675 		default:
5676 			break;
5677 		}
5678 	}
5679 
5680 	return true;
5681 
5682 disabled:
5683 		dev_info(adev->dev, "GPU recovery disabled.\n");
5684 		return false;
5685 }
5686 
5687 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
5688 {
5689 	u32 i;
5690 	int ret = 0;
5691 
5692 	if (adev->bios)
5693 		amdgpu_atombios_scratch_regs_engine_hung(adev, true);
5694 
5695 	dev_info(adev->dev, "GPU mode1 reset\n");
5696 
5697 	/* Cache the state before bus master disable. The saved config space
5698 	 * values are used in other cases like restore after mode-2 reset.
5699 	 */
5700 	amdgpu_device_cache_pci_state(adev->pdev);
5701 
5702 	/* disable BM */
5703 	pci_clear_master(adev->pdev);
5704 
5705 	if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
5706 		dev_info(adev->dev, "GPU smu mode1 reset\n");
5707 		ret = amdgpu_dpm_mode1_reset(adev);
5708 	} else {
5709 		dev_info(adev->dev, "GPU psp mode1 reset\n");
5710 		ret = psp_gpu_reset(adev);
5711 	}
5712 
5713 	if (ret)
5714 		goto mode1_reset_failed;
5715 
5716 	/* enable mmio access after mode 1 reset completed */
5717 	adev->no_hw_access = false;
5718 
5719 	/* ensure no_hw_access is updated before we access hw */
5720 	smp_mb();
5721 
5722 	amdgpu_device_load_pci_state(adev->pdev);
5723 	ret = amdgpu_psp_wait_for_bootloader(adev);
5724 	if (ret)
5725 		goto mode1_reset_failed;
5726 
5727 	/* wait for asic to come out of reset */
5728 	for (i = 0; i < adev->usec_timeout; i++) {
5729 		u32 memsize = adev->nbio.funcs->get_memsize(adev);
5730 
5731 		if (memsize != 0xffffffff)
5732 			break;
5733 		udelay(1);
5734 	}
5735 
5736 	if (i >= adev->usec_timeout) {
5737 		ret = -ETIMEDOUT;
5738 		goto mode1_reset_failed;
5739 	}
5740 
5741 	if (adev->bios)
5742 		amdgpu_atombios_scratch_regs_engine_hung(adev, false);
5743 
5744 	return 0;
5745 
5746 mode1_reset_failed:
5747 	dev_err(adev->dev, "GPU mode1 reset failed\n");
5748 	return ret;
5749 }
5750 
5751 int amdgpu_device_link_reset(struct amdgpu_device *adev)
5752 {
5753 	int ret = 0;
5754 
5755 	dev_info(adev->dev, "GPU link reset\n");
5756 
5757 	if (!amdgpu_reset_in_dpc(adev))
5758 		ret = amdgpu_dpm_link_reset(adev);
5759 
5760 	if (ret)
5761 		goto link_reset_failed;
5762 
5763 	ret = amdgpu_psp_wait_for_bootloader(adev);
5764 	if (ret)
5765 		goto link_reset_failed;
5766 
5767 	return 0;
5768 
5769 link_reset_failed:
5770 	dev_err(adev->dev, "GPU link reset failed\n");
5771 	return ret;
5772 }
5773 
5774 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
5775 				 struct amdgpu_reset_context *reset_context)
5776 {
5777 	int i, r = 0;
5778 	struct amdgpu_job *job = NULL;
5779 	struct amdgpu_device *tmp_adev = reset_context->reset_req_dev;
5780 	bool need_full_reset =
5781 		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5782 
5783 	if (reset_context->reset_req_dev == adev)
5784 		job = reset_context->job;
5785 
5786 	if (amdgpu_sriov_vf(adev))
5787 		amdgpu_virt_pre_reset(adev);
5788 
5789 	amdgpu_fence_driver_isr_toggle(adev, true);
5790 
5791 	/* block all schedulers and reset given job's ring */
5792 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5793 		struct amdgpu_ring *ring = adev->rings[i];
5794 
5795 		if (!amdgpu_ring_sched_ready(ring))
5796 			continue;
5797 
5798 		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
5799 		amdgpu_fence_driver_force_completion(ring);
5800 	}
5801 
5802 	amdgpu_fence_driver_isr_toggle(adev, false);
5803 
5804 	if (job && job->vm)
5805 		drm_sched_increase_karma(&job->base);
5806 
5807 	r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
5808 	/* If reset handler not implemented, continue; otherwise return */
5809 	if (r == -EOPNOTSUPP)
5810 		r = 0;
5811 	else
5812 		return r;
5813 
5814 	/* Don't suspend on bare metal if we are not going to HW reset the ASIC */
5815 	if (!amdgpu_sriov_vf(adev)) {
5816 
5817 		if (!need_full_reset)
5818 			need_full_reset = amdgpu_device_ip_need_full_reset(adev);
5819 
5820 		if (!need_full_reset && amdgpu_gpu_recovery &&
5821 		    amdgpu_device_ip_check_soft_reset(adev)) {
5822 			amdgpu_device_ip_pre_soft_reset(adev);
5823 			r = amdgpu_device_ip_soft_reset(adev);
5824 			amdgpu_device_ip_post_soft_reset(adev);
5825 			if (r || amdgpu_device_ip_check_soft_reset(adev)) {
5826 				dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
5827 				need_full_reset = true;
5828 			}
5829 		}
5830 
5831 		if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) {
5832 			dev_info(tmp_adev->dev, "Dumping IP State\n");
5833 			/* Trigger ip dump before we reset the asic */
5834 			for (i = 0; i < tmp_adev->num_ip_blocks; i++)
5835 				if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state)
5836 					tmp_adev->ip_blocks[i].version->funcs
5837 						->dump_ip_state((void *)&tmp_adev->ip_blocks[i]);
5838 			dev_info(tmp_adev->dev, "Dumping IP State Completed\n");
5839 		}
5840 
5841 		if (need_full_reset)
5842 			r = amdgpu_device_ip_suspend(adev);
5843 		if (need_full_reset)
5844 			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5845 		else
5846 			clear_bit(AMDGPU_NEED_FULL_RESET,
5847 				  &reset_context->flags);
5848 	}
5849 
5850 	return r;
5851 }
5852 
5853 int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
5854 {
5855 	struct list_head *device_list_handle;
5856 	bool full_reset, vram_lost = false;
5857 	struct amdgpu_device *tmp_adev;
5858 	int r, init_level;
5859 
5860 	device_list_handle = reset_context->reset_device_list;
5861 
5862 	if (!device_list_handle)
5863 		return -EINVAL;
5864 
5865 	full_reset = test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5866 
5867 	/**
5868 	 * If it's reset on init, it's default init level, otherwise keep level
5869 	 * as recovery level.
5870 	 */
5871 	if (reset_context->method == AMD_RESET_METHOD_ON_INIT)
5872 			init_level = AMDGPU_INIT_LEVEL_DEFAULT;
5873 	else
5874 			init_level = AMDGPU_INIT_LEVEL_RESET_RECOVERY;
5875 
5876 	r = 0;
5877 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5878 		amdgpu_set_init_level(tmp_adev, init_level);
5879 		if (full_reset) {
5880 			/* post card */
5881 			amdgpu_reset_set_dpc_status(tmp_adev, false);
5882 			amdgpu_ras_clear_err_state(tmp_adev);
5883 			r = amdgpu_device_asic_init(tmp_adev);
5884 			if (r) {
5885 				dev_warn(tmp_adev->dev, "asic atom init failed!");
5886 			} else {
5887 				dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
5888 
5889 				r = amdgpu_device_ip_resume_phase1(tmp_adev);
5890 				if (r)
5891 					goto out;
5892 
5893 				vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
5894 
5895 				if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags))
5896 					amdgpu_coredump(tmp_adev, false, vram_lost, reset_context->job);
5897 
5898 				if (vram_lost) {
5899 					dev_info(
5900 						tmp_adev->dev,
5901 						"VRAM is lost due to GPU reset!\n");
5902 					amdgpu_inc_vram_lost(tmp_adev);
5903 				}
5904 
5905 				r = amdgpu_device_fw_loading(tmp_adev);
5906 				if (r)
5907 					return r;
5908 
5909 				r = amdgpu_xcp_restore_partition_mode(
5910 					tmp_adev->xcp_mgr);
5911 				if (r)
5912 					goto out;
5913 
5914 				r = amdgpu_device_ip_resume_phase2(tmp_adev);
5915 				if (r)
5916 					goto out;
5917 
5918 				amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true);
5919 
5920 				r = amdgpu_device_ip_resume_phase3(tmp_adev);
5921 				if (r)
5922 					goto out;
5923 
5924 				if (vram_lost)
5925 					amdgpu_device_fill_reset_magic(tmp_adev);
5926 
5927 				/*
5928 				 * Add this ASIC as tracked as reset was already
5929 				 * complete successfully.
5930 				 */
5931 				amdgpu_register_gpu_instance(tmp_adev);
5932 
5933 				if (!reset_context->hive &&
5934 				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5935 					amdgpu_xgmi_add_device(tmp_adev);
5936 
5937 				r = amdgpu_device_ip_late_init(tmp_adev);
5938 				if (r)
5939 					goto out;
5940 
5941 				r = amdgpu_userq_post_reset(tmp_adev, vram_lost);
5942 				if (r)
5943 					goto out;
5944 
5945 				drm_client_dev_resume(adev_to_drm(tmp_adev));
5946 
5947 				/*
5948 				 * The GPU enters bad state once faulty pages
5949 				 * by ECC has reached the threshold, and ras
5950 				 * recovery is scheduled next. So add one check
5951 				 * here to break recovery if it indeed exceeds
5952 				 * bad page threshold, and remind user to
5953 				 * retire this GPU or setting one bigger
5954 				 * bad_page_threshold value to fix this once
5955 				 * probing driver again.
5956 				 */
5957 				if (!amdgpu_ras_is_rma(tmp_adev)) {
5958 					/* must succeed. */
5959 					amdgpu_ras_resume(tmp_adev);
5960 				} else {
5961 					r = -EINVAL;
5962 					goto out;
5963 				}
5964 
5965 				/* Update PSP FW topology after reset */
5966 				if (reset_context->hive &&
5967 				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5968 					r = amdgpu_xgmi_update_topology(
5969 						reset_context->hive, tmp_adev);
5970 			}
5971 		}
5972 
5973 out:
5974 		if (!r) {
5975 			/* IP init is complete now, set level as default */
5976 			amdgpu_set_init_level(tmp_adev,
5977 					      AMDGPU_INIT_LEVEL_DEFAULT);
5978 			amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
5979 			r = amdgpu_ib_ring_tests(tmp_adev);
5980 			if (r) {
5981 				dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
5982 				r = -EAGAIN;
5983 				goto end;
5984 			}
5985 		}
5986 
5987 		if (r)
5988 			tmp_adev->asic_reset_res = r;
5989 	}
5990 
5991 end:
5992 	return r;
5993 }
5994 
5995 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
5996 			 struct amdgpu_reset_context *reset_context)
5997 {
5998 	struct amdgpu_device *tmp_adev = NULL;
5999 	bool need_full_reset, skip_hw_reset;
6000 	int r = 0;
6001 
6002 	/* Try reset handler method first */
6003 	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
6004 				    reset_list);
6005 
6006 	reset_context->reset_device_list = device_list_handle;
6007 	r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
6008 	/* If reset handler not implemented, continue; otherwise return */
6009 	if (r == -EOPNOTSUPP)
6010 		r = 0;
6011 	else
6012 		return r;
6013 
6014 	/* Reset handler not implemented, use the default method */
6015 	need_full_reset =
6016 		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
6017 	skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
6018 
6019 	/*
6020 	 * ASIC reset has to be done on all XGMI hive nodes ASAP
6021 	 * to allow proper links negotiation in FW (within 1 sec)
6022 	 */
6023 	if (!skip_hw_reset && need_full_reset) {
6024 		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
6025 			/* For XGMI run all resets in parallel to speed up the process */
6026 			if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
6027 				if (!queue_work(system_unbound_wq,
6028 						&tmp_adev->xgmi_reset_work))
6029 					r = -EALREADY;
6030 			} else
6031 				r = amdgpu_asic_reset(tmp_adev);
6032 
6033 			if (r) {
6034 				dev_err(tmp_adev->dev,
6035 					"ASIC reset failed with error, %d for drm dev, %s",
6036 					r, adev_to_drm(tmp_adev)->unique);
6037 				goto out;
6038 			}
6039 		}
6040 
6041 		/* For XGMI wait for all resets to complete before proceed */
6042 		if (!r) {
6043 			list_for_each_entry(tmp_adev, device_list_handle,
6044 					    reset_list) {
6045 				if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
6046 					flush_work(&tmp_adev->xgmi_reset_work);
6047 					r = tmp_adev->asic_reset_res;
6048 					if (r)
6049 						break;
6050 				}
6051 			}
6052 		}
6053 	}
6054 
6055 	if (!r && amdgpu_ras_intr_triggered()) {
6056 		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
6057 			amdgpu_ras_reset_error_count(tmp_adev,
6058 						     AMDGPU_RAS_BLOCK__MMHUB);
6059 		}
6060 
6061 		amdgpu_ras_intr_cleared();
6062 	}
6063 
6064 	r = amdgpu_device_reinit_after_reset(reset_context);
6065 	if (r == -EAGAIN)
6066 		set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
6067 	else
6068 		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
6069 
6070 out:
6071 	return r;
6072 }
6073 
6074 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
6075 {
6076 
6077 	switch (amdgpu_asic_reset_method(adev)) {
6078 	case AMD_RESET_METHOD_MODE1:
6079 	case AMD_RESET_METHOD_LINK:
6080 		adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
6081 		break;
6082 	case AMD_RESET_METHOD_MODE2:
6083 		adev->mp1_state = PP_MP1_STATE_RESET;
6084 		break;
6085 	default:
6086 		adev->mp1_state = PP_MP1_STATE_NONE;
6087 		break;
6088 	}
6089 }
6090 
6091 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
6092 {
6093 	amdgpu_vf_error_trans_all(adev);
6094 	adev->mp1_state = PP_MP1_STATE_NONE;
6095 }
6096 
6097 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
6098 {
6099 	struct pci_dev *p = NULL;
6100 
6101 	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
6102 			adev->pdev->bus->number, 1);
6103 	if (p) {
6104 		pm_runtime_enable(&(p->dev));
6105 		pm_runtime_resume(&(p->dev));
6106 	}
6107 
6108 	pci_dev_put(p);
6109 }
6110 
6111 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
6112 {
6113 	enum amd_reset_method reset_method;
6114 	struct pci_dev *p = NULL;
6115 	u64 expires;
6116 
6117 	/*
6118 	 * For now, only BACO and mode1 reset are confirmed
6119 	 * to suffer the audio issue without proper suspended.
6120 	 */
6121 	reset_method = amdgpu_asic_reset_method(adev);
6122 	if ((reset_method != AMD_RESET_METHOD_BACO) &&
6123 	     (reset_method != AMD_RESET_METHOD_MODE1))
6124 		return -EINVAL;
6125 
6126 	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
6127 			adev->pdev->bus->number, 1);
6128 	if (!p)
6129 		return -ENODEV;
6130 
6131 	expires = pm_runtime_autosuspend_expiration(&(p->dev));
6132 	if (!expires)
6133 		/*
6134 		 * If we cannot get the audio device autosuspend delay,
6135 		 * a fixed 4S interval will be used. Considering 3S is
6136 		 * the audio controller default autosuspend delay setting.
6137 		 * 4S used here is guaranteed to cover that.
6138 		 */
6139 		expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
6140 
6141 	while (!pm_runtime_status_suspended(&(p->dev))) {
6142 		if (!pm_runtime_suspend(&(p->dev)))
6143 			break;
6144 
6145 		if (expires < ktime_get_mono_fast_ns()) {
6146 			dev_warn(adev->dev, "failed to suspend display audio\n");
6147 			pci_dev_put(p);
6148 			/* TODO: abort the succeeding gpu reset? */
6149 			return -ETIMEDOUT;
6150 		}
6151 	}
6152 
6153 	pm_runtime_disable(&(p->dev));
6154 
6155 	pci_dev_put(p);
6156 	return 0;
6157 }
6158 
6159 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
6160 {
6161 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
6162 
6163 #if defined(CONFIG_DEBUG_FS)
6164 	if (!amdgpu_sriov_vf(adev))
6165 		cancel_work(&adev->reset_work);
6166 #endif
6167 	cancel_work(&adev->userq_reset_work);
6168 
6169 	if (adev->kfd.dev)
6170 		cancel_work(&adev->kfd.reset_work);
6171 
6172 	if (amdgpu_sriov_vf(adev))
6173 		cancel_work(&adev->virt.flr_work);
6174 
6175 	if (con && adev->ras_enabled)
6176 		cancel_work(&con->recovery_work);
6177 
6178 }
6179 
6180 static int amdgpu_device_health_check(struct list_head *device_list_handle)
6181 {
6182 	struct amdgpu_device *tmp_adev;
6183 	int ret = 0;
6184 
6185 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
6186 		ret |= amdgpu_device_bus_status_check(tmp_adev);
6187 	}
6188 
6189 	return ret;
6190 }
6191 
6192 static void amdgpu_device_recovery_prepare(struct amdgpu_device *adev,
6193 					  struct list_head *device_list,
6194 					  struct amdgpu_hive_info *hive)
6195 {
6196 	struct amdgpu_device *tmp_adev = NULL;
6197 
6198 	/*
6199 	 * Build list of devices to reset.
6200 	 * In case we are in XGMI hive mode, resort the device list
6201 	 * to put adev in the 1st position.
6202 	 */
6203 	if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
6204 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
6205 			list_add_tail(&tmp_adev->reset_list, device_list);
6206 			if (adev->shutdown)
6207 				tmp_adev->shutdown = true;
6208 			if (amdgpu_reset_in_dpc(adev))
6209 				tmp_adev->pcie_reset_ctx.in_link_reset = true;
6210 		}
6211 		if (!list_is_first(&adev->reset_list, device_list))
6212 			list_rotate_to_front(&adev->reset_list, device_list);
6213 	} else {
6214 		list_add_tail(&adev->reset_list, device_list);
6215 	}
6216 }
6217 
6218 static void amdgpu_device_recovery_get_reset_lock(struct amdgpu_device *adev,
6219 						  struct list_head *device_list)
6220 {
6221 	struct amdgpu_device *tmp_adev = NULL;
6222 
6223 	if (list_empty(device_list))
6224 		return;
6225 	tmp_adev =
6226 		list_first_entry(device_list, struct amdgpu_device, reset_list);
6227 	amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
6228 }
6229 
6230 static void amdgpu_device_recovery_put_reset_lock(struct amdgpu_device *adev,
6231 						  struct list_head *device_list)
6232 {
6233 	struct amdgpu_device *tmp_adev = NULL;
6234 
6235 	if (list_empty(device_list))
6236 		return;
6237 	tmp_adev =
6238 		list_first_entry(device_list, struct amdgpu_device, reset_list);
6239 	amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
6240 }
6241 
6242 static void amdgpu_device_halt_activities(struct amdgpu_device *adev,
6243 					  struct amdgpu_job *job,
6244 					  struct amdgpu_reset_context *reset_context,
6245 					  struct list_head *device_list,
6246 					  struct amdgpu_hive_info *hive,
6247 					  bool need_emergency_restart)
6248 {
6249 	struct amdgpu_device *tmp_adev = NULL;
6250 	int i;
6251 
6252 	/* block all schedulers and reset given job's ring */
6253 	list_for_each_entry(tmp_adev, device_list, reset_list) {
6254 		amdgpu_device_set_mp1_state(tmp_adev);
6255 
6256 		/*
6257 		 * Try to put the audio codec into suspend state
6258 		 * before gpu reset started.
6259 		 *
6260 		 * Due to the power domain of the graphics device
6261 		 * is shared with AZ power domain. Without this,
6262 		 * we may change the audio hardware from behind
6263 		 * the audio driver's back. That will trigger
6264 		 * some audio codec errors.
6265 		 */
6266 		if (!amdgpu_device_suspend_display_audio(tmp_adev))
6267 			tmp_adev->pcie_reset_ctx.audio_suspended = true;
6268 
6269 		amdgpu_ras_set_error_query_ready(tmp_adev, false);
6270 
6271 		cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
6272 
6273 		amdgpu_amdkfd_pre_reset(tmp_adev, reset_context);
6274 
6275 		/*
6276 		 * Mark these ASICs to be reset as untracked first
6277 		 * And add them back after reset completed
6278 		 */
6279 		amdgpu_unregister_gpu_instance(tmp_adev);
6280 
6281 		drm_client_dev_suspend(adev_to_drm(tmp_adev));
6282 
6283 		/* disable ras on ALL IPs */
6284 		if (!need_emergency_restart && !amdgpu_reset_in_dpc(adev) &&
6285 		    amdgpu_device_ip_need_full_reset(tmp_adev))
6286 			amdgpu_ras_suspend(tmp_adev);
6287 
6288 		amdgpu_userq_pre_reset(tmp_adev);
6289 
6290 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
6291 			struct amdgpu_ring *ring = tmp_adev->rings[i];
6292 
6293 			if (!amdgpu_ring_sched_ready(ring))
6294 				continue;
6295 
6296 			drm_sched_wqueue_stop(&ring->sched);
6297 
6298 			if (need_emergency_restart)
6299 				amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
6300 		}
6301 		atomic_inc(&tmp_adev->gpu_reset_counter);
6302 	}
6303 }
6304 
6305 static int amdgpu_device_asic_reset(struct amdgpu_device *adev,
6306 			      struct list_head *device_list,
6307 			      struct amdgpu_reset_context *reset_context)
6308 {
6309 	struct amdgpu_device *tmp_adev = NULL;
6310 	int retry_limit = AMDGPU_MAX_RETRY_LIMIT;
6311 	int r = 0;
6312 
6313 retry:	/* Rest of adevs pre asic reset from XGMI hive. */
6314 	list_for_each_entry(tmp_adev, device_list, reset_list) {
6315 		r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
6316 		/*TODO Should we stop ?*/
6317 		if (r) {
6318 			dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
6319 				  r, adev_to_drm(tmp_adev)->unique);
6320 			tmp_adev->asic_reset_res = r;
6321 		}
6322 	}
6323 
6324 	/* Actual ASIC resets if needed.*/
6325 	/* Host driver will handle XGMI hive reset for SRIOV */
6326 	if (amdgpu_sriov_vf(adev)) {
6327 
6328 		/* Bail out of reset early */
6329 		if (amdgpu_ras_is_rma(adev))
6330 			return -ENODEV;
6331 
6332 		if (amdgpu_ras_get_fed_status(adev) || amdgpu_virt_rcvd_ras_interrupt(adev)) {
6333 			dev_dbg(adev->dev, "Detected RAS error, wait for FLR completion\n");
6334 			amdgpu_ras_set_fed(adev, true);
6335 			set_bit(AMDGPU_HOST_FLR, &reset_context->flags);
6336 		}
6337 
6338 		r = amdgpu_device_reset_sriov(adev, reset_context);
6339 		if (AMDGPU_RETRY_SRIOV_RESET(r) && (retry_limit--) > 0) {
6340 			amdgpu_virt_release_full_gpu(adev, true);
6341 			goto retry;
6342 		}
6343 		if (r)
6344 			adev->asic_reset_res = r;
6345 	} else {
6346 		r = amdgpu_do_asic_reset(device_list, reset_context);
6347 		if (r && r == -EAGAIN)
6348 			goto retry;
6349 	}
6350 
6351 	list_for_each_entry(tmp_adev, device_list, reset_list) {
6352 		/*
6353 		 * Drop any pending non scheduler resets queued before reset is done.
6354 		 * Any reset scheduled after this point would be valid. Scheduler resets
6355 		 * were already dropped during drm_sched_stop and no new ones can come
6356 		 * in before drm_sched_start.
6357 		 */
6358 		amdgpu_device_stop_pending_resets(tmp_adev);
6359 	}
6360 
6361 	return r;
6362 }
6363 
6364 static int amdgpu_device_sched_resume(struct list_head *device_list,
6365 			      struct amdgpu_reset_context *reset_context,
6366 			      bool   job_signaled)
6367 {
6368 	struct amdgpu_device *tmp_adev = NULL;
6369 	int i, r = 0;
6370 
6371 	/* Post ASIC reset for all devs .*/
6372 	list_for_each_entry(tmp_adev, device_list, reset_list) {
6373 
6374 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
6375 			struct amdgpu_ring *ring = tmp_adev->rings[i];
6376 
6377 			if (!amdgpu_ring_sched_ready(ring))
6378 				continue;
6379 
6380 			drm_sched_wqueue_start(&ring->sched);
6381 		}
6382 
6383 		if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
6384 			drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
6385 
6386 		if (tmp_adev->asic_reset_res) {
6387 			/* bad news, how to tell it to userspace ?
6388 			 * for ras error, we should report GPU bad status instead of
6389 			 * reset failure
6390 			 */
6391 			if (reset_context->src != AMDGPU_RESET_SRC_RAS ||
6392 			    !amdgpu_ras_eeprom_check_err_threshold(tmp_adev))
6393 				dev_info(
6394 					tmp_adev->dev,
6395 					"GPU reset(%d) failed with error %d\n",
6396 					atomic_read(
6397 						&tmp_adev->gpu_reset_counter),
6398 					tmp_adev->asic_reset_res);
6399 			amdgpu_vf_error_put(tmp_adev,
6400 					    AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0,
6401 					    tmp_adev->asic_reset_res);
6402 			if (!r)
6403 				r = tmp_adev->asic_reset_res;
6404 			tmp_adev->asic_reset_res = 0;
6405 		} else {
6406 			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n",
6407 				 atomic_read(&tmp_adev->gpu_reset_counter));
6408 			if (amdgpu_acpi_smart_shift_update(tmp_adev,
6409 							   AMDGPU_SS_DEV_D0))
6410 				dev_warn(tmp_adev->dev,
6411 					 "smart shift update failed\n");
6412 		}
6413 	}
6414 
6415 	return r;
6416 }
6417 
6418 static void amdgpu_device_gpu_resume(struct amdgpu_device *adev,
6419 			      struct list_head *device_list,
6420 			      bool   need_emergency_restart)
6421 {
6422 	struct amdgpu_device *tmp_adev = NULL;
6423 
6424 	list_for_each_entry(tmp_adev, device_list, reset_list) {
6425 		/* unlock kfd: SRIOV would do it separately */
6426 		if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
6427 			amdgpu_amdkfd_post_reset(tmp_adev);
6428 
6429 		/* kfd_post_reset will do nothing if kfd device is not initialized,
6430 		 * need to bring up kfd here if it's not be initialized before
6431 		 */
6432 		if (!adev->kfd.init_complete)
6433 			amdgpu_amdkfd_device_init(adev);
6434 
6435 		if (tmp_adev->pcie_reset_ctx.audio_suspended)
6436 			amdgpu_device_resume_display_audio(tmp_adev);
6437 
6438 		amdgpu_device_unset_mp1_state(tmp_adev);
6439 
6440 		amdgpu_ras_set_error_query_ready(tmp_adev, true);
6441 
6442 	}
6443 }
6444 
6445 
6446 /**
6447  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
6448  *
6449  * @adev: amdgpu_device pointer
6450  * @job: which job trigger hang
6451  * @reset_context: amdgpu reset context pointer
6452  *
6453  * Attempt to reset the GPU if it has hung (all asics).
6454  * Attempt to do soft-reset or full-reset and reinitialize Asic
6455  * Returns 0 for success or an error on failure.
6456  */
6457 
6458 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
6459 			      struct amdgpu_job *job,
6460 			      struct amdgpu_reset_context *reset_context)
6461 {
6462 	struct list_head device_list;
6463 	bool job_signaled = false;
6464 	struct amdgpu_hive_info *hive = NULL;
6465 	int r = 0;
6466 	bool need_emergency_restart = false;
6467 	/* save the pasid here as the job may be freed before the end of the reset */
6468 	int pasid = job ? job->pasid : -EINVAL;
6469 
6470 	/*
6471 	 * If it reaches here because of hang/timeout and a RAS error is
6472 	 * detected at the same time, let RAS recovery take care of it.
6473 	 */
6474 	if (amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY) &&
6475 	    !amdgpu_sriov_vf(adev) &&
6476 	    reset_context->src != AMDGPU_RESET_SRC_RAS) {
6477 		dev_dbg(adev->dev,
6478 			"Gpu recovery from source: %d yielding to RAS error recovery handling",
6479 			reset_context->src);
6480 		return 0;
6481 	}
6482 
6483 	/*
6484 	 * Special case: RAS triggered and full reset isn't supported
6485 	 */
6486 	need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
6487 
6488 	/*
6489 	 * Flush RAM to disk so that after reboot
6490 	 * the user can read log and see why the system rebooted.
6491 	 */
6492 	if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
6493 		amdgpu_ras_get_context(adev)->reboot) {
6494 		dev_warn(adev->dev, "Emergency reboot.");
6495 
6496 		ksys_sync_helper();
6497 		emergency_restart();
6498 	}
6499 
6500 	dev_info(adev->dev, "GPU %s begin!. Source:  %d\n",
6501 		 need_emergency_restart ? "jobs stop" : "reset",
6502 		 reset_context->src);
6503 
6504 	if (!amdgpu_sriov_vf(adev))
6505 		hive = amdgpu_get_xgmi_hive(adev);
6506 	if (hive)
6507 		mutex_lock(&hive->hive_lock);
6508 
6509 	reset_context->job = job;
6510 	reset_context->hive = hive;
6511 	INIT_LIST_HEAD(&device_list);
6512 
6513 	amdgpu_device_recovery_prepare(adev, &device_list, hive);
6514 
6515 	if (!amdgpu_sriov_vf(adev)) {
6516 		r = amdgpu_device_health_check(&device_list);
6517 		if (r)
6518 			goto end_reset;
6519 	}
6520 
6521 	/* Cannot be called after locking reset domain */
6522 	amdgpu_ras_pre_reset(adev, &device_list);
6523 
6524 	/* We need to lock reset domain only once both for XGMI and single device */
6525 	amdgpu_device_recovery_get_reset_lock(adev, &device_list);
6526 
6527 	amdgpu_device_halt_activities(adev, job, reset_context, &device_list,
6528 				      hive, need_emergency_restart);
6529 	if (need_emergency_restart)
6530 		goto skip_sched_resume;
6531 	/*
6532 	 * Must check guilty signal here since after this point all old
6533 	 * HW fences are force signaled.
6534 	 *
6535 	 * job->base holds a reference to parent fence
6536 	 */
6537 	if (job && (dma_fence_get_status(&job->hw_fence->base) > 0)) {
6538 		job_signaled = true;
6539 		dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
6540 		goto skip_hw_reset;
6541 	}
6542 
6543 	r = amdgpu_device_asic_reset(adev, &device_list, reset_context);
6544 	if (r)
6545 		goto reset_unlock;
6546 skip_hw_reset:
6547 	r = amdgpu_device_sched_resume(&device_list, reset_context, job_signaled);
6548 	if (r)
6549 		goto reset_unlock;
6550 skip_sched_resume:
6551 	amdgpu_device_gpu_resume(adev, &device_list, need_emergency_restart);
6552 reset_unlock:
6553 	amdgpu_device_recovery_put_reset_lock(adev, &device_list);
6554 	amdgpu_ras_post_reset(adev, &device_list);
6555 end_reset:
6556 	if (hive) {
6557 		mutex_unlock(&hive->hive_lock);
6558 		amdgpu_put_xgmi_hive(hive);
6559 	}
6560 
6561 	if (r)
6562 		dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
6563 
6564 	atomic_set(&adev->reset_domain->reset_res, r);
6565 
6566 	if (!r) {
6567 		struct amdgpu_task_info *ti = NULL;
6568 
6569 		/*
6570 		 * The job may already be freed at this point via the sched tdr workqueue so
6571 		 * use the cached pasid.
6572 		 */
6573 		if (pasid >= 0)
6574 			ti = amdgpu_vm_get_task_info_pasid(adev, pasid);
6575 
6576 		drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE,
6577 				     ti ? &ti->task : NULL);
6578 
6579 		amdgpu_vm_put_task_info(ti);
6580 	}
6581 
6582 	return r;
6583 }
6584 
6585 /**
6586  * amdgpu_device_partner_bandwidth - find the bandwidth of appropriate partner
6587  *
6588  * @adev: amdgpu_device pointer
6589  * @speed: pointer to the speed of the link
6590  * @width: pointer to the width of the link
6591  *
6592  * Evaluate the hierarchy to find the speed and bandwidth capabilities of the
6593  * first physical partner to an AMD dGPU.
6594  * This will exclude any virtual switches and links.
6595  */
6596 static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
6597 					    enum pci_bus_speed *speed,
6598 					    enum pcie_link_width *width)
6599 {
6600 	struct pci_dev *parent = adev->pdev;
6601 
6602 	if (!speed || !width)
6603 		return;
6604 
6605 	*speed = PCI_SPEED_UNKNOWN;
6606 	*width = PCIE_LNK_WIDTH_UNKNOWN;
6607 
6608 	if (amdgpu_device_pcie_dynamic_switching_supported(adev)) {
6609 		while ((parent = pci_upstream_bridge(parent))) {
6610 			/* skip upstream/downstream switches internal to dGPU*/
6611 			if (parent->vendor == PCI_VENDOR_ID_ATI)
6612 				continue;
6613 			*speed = pcie_get_speed_cap(parent);
6614 			*width = pcie_get_width_cap(parent);
6615 			break;
6616 		}
6617 	} else {
6618 		/* use the current speeds rather than max if switching is not supported */
6619 		pcie_bandwidth_available(adev->pdev, NULL, speed, width);
6620 	}
6621 }
6622 
6623 /**
6624  * amdgpu_device_gpu_bandwidth - find the bandwidth of the GPU
6625  *
6626  * @adev: amdgpu_device pointer
6627  * @speed: pointer to the speed of the link
6628  * @width: pointer to the width of the link
6629  *
6630  * Evaluate the hierarchy to find the speed and bandwidth capabilities of the
6631  * AMD dGPU which may be a virtual upstream bridge.
6632  */
6633 static void amdgpu_device_gpu_bandwidth(struct amdgpu_device *adev,
6634 					enum pci_bus_speed *speed,
6635 					enum pcie_link_width *width)
6636 {
6637 	struct pci_dev *parent = adev->pdev;
6638 
6639 	if (!speed || !width)
6640 		return;
6641 
6642 	parent = pci_upstream_bridge(parent);
6643 	if (parent && parent->vendor == PCI_VENDOR_ID_ATI) {
6644 		/* use the upstream/downstream switches internal to dGPU */
6645 		*speed = pcie_get_speed_cap(parent);
6646 		*width = pcie_get_width_cap(parent);
6647 		while ((parent = pci_upstream_bridge(parent))) {
6648 			if (parent->vendor == PCI_VENDOR_ID_ATI) {
6649 				/* use the upstream/downstream switches internal to dGPU */
6650 				*speed = pcie_get_speed_cap(parent);
6651 				*width = pcie_get_width_cap(parent);
6652 			}
6653 		}
6654 	} else {
6655 		/* use the device itself */
6656 		*speed = pcie_get_speed_cap(adev->pdev);
6657 		*width = pcie_get_width_cap(adev->pdev);
6658 	}
6659 }
6660 
6661 /**
6662  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
6663  *
6664  * @adev: amdgpu_device pointer
6665  *
6666  * Fetches and stores in the driver the PCIE capabilities (gen speed
6667  * and lanes) of the slot the device is in. Handles APUs and
6668  * virtualized environments where PCIE config space may not be available.
6669  */
6670 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
6671 {
6672 	enum pci_bus_speed speed_cap, platform_speed_cap;
6673 	enum pcie_link_width platform_link_width, link_width;
6674 
6675 	if (amdgpu_pcie_gen_cap)
6676 		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
6677 
6678 	if (amdgpu_pcie_lane_cap)
6679 		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
6680 
6681 	/* covers APUs as well */
6682 	if (pci_is_root_bus(adev->pdev->bus) && !amdgpu_passthrough(adev)) {
6683 		if (adev->pm.pcie_gen_mask == 0)
6684 			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
6685 		if (adev->pm.pcie_mlw_mask == 0)
6686 			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
6687 		return;
6688 	}
6689 
6690 	if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
6691 		return;
6692 
6693 	amdgpu_device_partner_bandwidth(adev, &platform_speed_cap,
6694 					&platform_link_width);
6695 	amdgpu_device_gpu_bandwidth(adev, &speed_cap, &link_width);
6696 
6697 	if (adev->pm.pcie_gen_mask == 0) {
6698 		/* asic caps */
6699 		if (speed_cap == PCI_SPEED_UNKNOWN) {
6700 			adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6701 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6702 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
6703 		} else {
6704 			if (speed_cap == PCIE_SPEED_32_0GT)
6705 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6706 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6707 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
6708 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
6709 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
6710 			else if (speed_cap == PCIE_SPEED_16_0GT)
6711 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6712 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6713 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
6714 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
6715 			else if (speed_cap == PCIE_SPEED_8_0GT)
6716 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6717 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6718 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
6719 			else if (speed_cap == PCIE_SPEED_5_0GT)
6720 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6721 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
6722 			else
6723 				adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
6724 		}
6725 		/* platform caps */
6726 		if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
6727 			adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6728 						   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
6729 		} else {
6730 			if (platform_speed_cap == PCIE_SPEED_32_0GT)
6731 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6732 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6733 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
6734 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
6735 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
6736 			else if (platform_speed_cap == PCIE_SPEED_16_0GT)
6737 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6738 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6739 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
6740 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
6741 			else if (platform_speed_cap == PCIE_SPEED_8_0GT)
6742 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6743 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6744 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
6745 			else if (platform_speed_cap == PCIE_SPEED_5_0GT)
6746 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6747 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
6748 			else
6749 				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
6750 
6751 		}
6752 	}
6753 	if (adev->pm.pcie_mlw_mask == 0) {
6754 		/* asic caps */
6755 		if (link_width == PCIE_LNK_WIDTH_UNKNOWN) {
6756 			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_ASIC_PCIE_MLW_MASK;
6757 		} else {
6758 			switch (link_width) {
6759 			case PCIE_LNK_X32:
6760 				adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X32 |
6761 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 |
6762 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
6763 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
6764 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
6765 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6766 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
6767 				break;
6768 			case PCIE_LNK_X16:
6769 				adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 |
6770 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
6771 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
6772 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
6773 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6774 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
6775 				break;
6776 			case PCIE_LNK_X12:
6777 				adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
6778 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
6779 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
6780 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6781 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
6782 				break;
6783 			case PCIE_LNK_X8:
6784 				adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
6785 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
6786 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6787 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
6788 				break;
6789 			case PCIE_LNK_X4:
6790 				adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
6791 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6792 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
6793 				break;
6794 			case PCIE_LNK_X2:
6795 				adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6796 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
6797 				break;
6798 			case PCIE_LNK_X1:
6799 				adev->pm.pcie_mlw_mask |= CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1;
6800 				break;
6801 			default:
6802 				break;
6803 			}
6804 		}
6805 		/* platform caps */
6806 		if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
6807 			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
6808 		} else {
6809 			switch (platform_link_width) {
6810 			case PCIE_LNK_X32:
6811 				adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
6812 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
6813 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
6814 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
6815 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6816 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6817 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6818 				break;
6819 			case PCIE_LNK_X16:
6820 				adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
6821 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
6822 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
6823 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6824 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6825 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6826 				break;
6827 			case PCIE_LNK_X12:
6828 				adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
6829 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
6830 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6831 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6832 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6833 				break;
6834 			case PCIE_LNK_X8:
6835 				adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
6836 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6837 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6838 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6839 				break;
6840 			case PCIE_LNK_X4:
6841 				adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6842 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6843 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6844 				break;
6845 			case PCIE_LNK_X2:
6846 				adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6847 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6848 				break;
6849 			case PCIE_LNK_X1:
6850 				adev->pm.pcie_mlw_mask |= CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
6851 				break;
6852 			default:
6853 				break;
6854 			}
6855 		}
6856 	}
6857 }
6858 
6859 /**
6860  * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
6861  *
6862  * @adev: amdgpu_device pointer
6863  * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
6864  *
6865  * Return true if @peer_adev can access (DMA) @adev through the PCIe
6866  * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
6867  * @peer_adev.
6868  */
6869 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
6870 				      struct amdgpu_device *peer_adev)
6871 {
6872 #ifdef CONFIG_HSA_AMD_P2P
6873 	bool p2p_access =
6874 		!adev->gmc.xgmi.connected_to_cpu &&
6875 		!(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
6876 	if (!p2p_access)
6877 		dev_info(adev->dev, "PCIe P2P access from peer device %s is not supported by the chipset\n",
6878 			pci_name(peer_adev->pdev));
6879 
6880 	bool is_large_bar = adev->gmc.visible_vram_size &&
6881 		adev->gmc.real_vram_size == adev->gmc.visible_vram_size;
6882 	bool p2p_addressable = amdgpu_device_check_iommu_remap(peer_adev);
6883 
6884 	if (!p2p_addressable) {
6885 		uint64_t address_mask = peer_adev->dev->dma_mask ?
6886 			~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
6887 		resource_size_t aper_limit =
6888 			adev->gmc.aper_base + adev->gmc.aper_size - 1;
6889 
6890 		p2p_addressable = !(adev->gmc.aper_base & address_mask ||
6891 				     aper_limit & address_mask);
6892 	}
6893 	return pcie_p2p && is_large_bar && p2p_access && p2p_addressable;
6894 #else
6895 	return false;
6896 #endif
6897 }
6898 
6899 int amdgpu_device_baco_enter(struct amdgpu_device *adev)
6900 {
6901 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
6902 
6903 	if (!amdgpu_device_supports_baco(adev))
6904 		return -ENOTSUPP;
6905 
6906 	if (ras && adev->ras_enabled &&
6907 	    adev->nbio.funcs->enable_doorbell_interrupt)
6908 		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
6909 
6910 	return amdgpu_dpm_baco_enter(adev);
6911 }
6912 
6913 int amdgpu_device_baco_exit(struct amdgpu_device *adev)
6914 {
6915 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
6916 	int ret = 0;
6917 
6918 	if (!amdgpu_device_supports_baco(adev))
6919 		return -ENOTSUPP;
6920 
6921 	ret = amdgpu_dpm_baco_exit(adev);
6922 	if (ret)
6923 		return ret;
6924 
6925 	if (ras && adev->ras_enabled &&
6926 	    adev->nbio.funcs->enable_doorbell_interrupt)
6927 		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
6928 
6929 	if (amdgpu_passthrough(adev) && adev->nbio.funcs &&
6930 	    adev->nbio.funcs->clear_doorbell_interrupt)
6931 		adev->nbio.funcs->clear_doorbell_interrupt(adev);
6932 
6933 	return 0;
6934 }
6935 
6936 /**
6937  * amdgpu_pci_error_detected - Called when a PCI error is detected.
6938  * @pdev: PCI device struct
6939  * @state: PCI channel state
6940  *
6941  * Description: Called when a PCI error is detected.
6942  *
6943  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
6944  */
6945 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
6946 {
6947 	struct drm_device *dev = pci_get_drvdata(pdev);
6948 	struct amdgpu_device *adev = drm_to_adev(dev);
6949 	struct amdgpu_hive_info *hive __free(xgmi_put_hive) =
6950 		amdgpu_get_xgmi_hive(adev);
6951 	struct amdgpu_reset_context reset_context;
6952 	struct list_head device_list;
6953 
6954 	dev_info(adev->dev, "PCI error: detected callback!!\n");
6955 
6956 	adev->pci_channel_state = state;
6957 
6958 	switch (state) {
6959 	case pci_channel_io_normal:
6960 		dev_info(adev->dev, "pci_channel_io_normal: state(%d)!!\n", state);
6961 		return PCI_ERS_RESULT_CAN_RECOVER;
6962 	case pci_channel_io_frozen:
6963 		/* Fatal error, prepare for slot reset */
6964 		dev_info(adev->dev, "pci_channel_io_frozen: state(%d)!!\n", state);
6965 		if (hive) {
6966 			/* Hive devices should be able to support FW based
6967 			 * link reset on other devices, if not return.
6968 			 */
6969 			if (!amdgpu_dpm_is_link_reset_supported(adev)) {
6970 				dev_warn(adev->dev,
6971 					 "No support for XGMI hive yet...\n");
6972 				return PCI_ERS_RESULT_DISCONNECT;
6973 			}
6974 			/* Set dpc status only if device is part of hive
6975 			 * Non-hive devices should be able to recover after
6976 			 * link reset.
6977 			 */
6978 			amdgpu_reset_set_dpc_status(adev, true);
6979 
6980 			mutex_lock(&hive->hive_lock);
6981 		}
6982 		memset(&reset_context, 0, sizeof(reset_context));
6983 		INIT_LIST_HEAD(&device_list);
6984 
6985 		amdgpu_device_recovery_prepare(adev, &device_list, hive);
6986 		amdgpu_device_recovery_get_reset_lock(adev, &device_list);
6987 		amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list,
6988 					      hive, false);
6989 		if (hive)
6990 			mutex_unlock(&hive->hive_lock);
6991 		return PCI_ERS_RESULT_NEED_RESET;
6992 	case pci_channel_io_perm_failure:
6993 		/* Permanent error, prepare for device removal */
6994 		dev_info(adev->dev, "pci_channel_io_perm_failure: state(%d)!!\n", state);
6995 		return PCI_ERS_RESULT_DISCONNECT;
6996 	}
6997 
6998 	return PCI_ERS_RESULT_NEED_RESET;
6999 }
7000 
7001 /**
7002  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
7003  * @pdev: pointer to PCI device
7004  */
7005 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
7006 {
7007 	struct drm_device *dev = pci_get_drvdata(pdev);
7008 	struct amdgpu_device *adev = drm_to_adev(dev);
7009 
7010 	dev_info(adev->dev, "PCI error: mmio enabled callback!!\n");
7011 
7012 	/* TODO - dump whatever for debugging purposes */
7013 
7014 	/* This called only if amdgpu_pci_error_detected returns
7015 	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
7016 	 * works, no need to reset slot.
7017 	 */
7018 
7019 	return PCI_ERS_RESULT_RECOVERED;
7020 }
7021 
7022 /**
7023  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
7024  * @pdev: PCI device struct
7025  *
7026  * Description: This routine is called by the pci error recovery
7027  * code after the PCI slot has been reset, just before we
7028  * should resume normal operations.
7029  */
7030 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
7031 {
7032 	struct drm_device *dev = pci_get_drvdata(pdev);
7033 	struct amdgpu_device *adev = drm_to_adev(dev);
7034 	struct amdgpu_reset_context reset_context;
7035 	struct amdgpu_device *tmp_adev;
7036 	struct amdgpu_hive_info *hive;
7037 	struct list_head device_list;
7038 	struct pci_dev *link_dev;
7039 	int r = 0, i, timeout;
7040 	u32 memsize;
7041 	u16 status;
7042 
7043 	dev_info(adev->dev, "PCI error: slot reset callback!!\n");
7044 
7045 	memset(&reset_context, 0, sizeof(reset_context));
7046 
7047 	if (adev->pcie_reset_ctx.swus)
7048 		link_dev = adev->pcie_reset_ctx.swus;
7049 	else
7050 		link_dev = adev->pdev;
7051 	/* wait for asic to come out of reset, timeout = 10s */
7052 	timeout = 10000;
7053 	do {
7054 		usleep_range(10000, 10500);
7055 		r = pci_read_config_word(link_dev, PCI_VENDOR_ID, &status);
7056 		timeout -= 10;
7057 	} while (timeout > 0 && (status != PCI_VENDOR_ID_ATI) &&
7058 		 (status != PCI_VENDOR_ID_AMD));
7059 
7060 	if ((status != PCI_VENDOR_ID_ATI) && (status != PCI_VENDOR_ID_AMD)) {
7061 		r = -ETIME;
7062 		goto out;
7063 	}
7064 
7065 	amdgpu_device_load_switch_state(adev);
7066 	/* Restore PCI confspace */
7067 	amdgpu_device_load_pci_state(pdev);
7068 
7069 	/* confirm  ASIC came out of reset */
7070 	for (i = 0; i < adev->usec_timeout; i++) {
7071 		memsize = amdgpu_asic_get_config_memsize(adev);
7072 
7073 		if (memsize != 0xffffffff)
7074 			break;
7075 		udelay(1);
7076 	}
7077 	if (memsize == 0xffffffff) {
7078 		r = -ETIME;
7079 		goto out;
7080 	}
7081 
7082 	reset_context.method = AMD_RESET_METHOD_NONE;
7083 	reset_context.reset_req_dev = adev;
7084 	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
7085 	set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
7086 	INIT_LIST_HEAD(&device_list);
7087 
7088 	hive = amdgpu_get_xgmi_hive(adev);
7089 	if (hive) {
7090 		mutex_lock(&hive->hive_lock);
7091 		reset_context.hive = hive;
7092 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
7093 			tmp_adev->pcie_reset_ctx.in_link_reset = true;
7094 			list_add_tail(&tmp_adev->reset_list, &device_list);
7095 		}
7096 	} else {
7097 		set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
7098 		list_add_tail(&adev->reset_list, &device_list);
7099 	}
7100 
7101 	r = amdgpu_device_asic_reset(adev, &device_list, &reset_context);
7102 out:
7103 	if (!r) {
7104 		if (amdgpu_device_cache_pci_state(adev->pdev))
7105 			pci_restore_state(adev->pdev);
7106 		dev_info(adev->dev, "PCIe error recovery succeeded\n");
7107 	} else {
7108 		dev_err(adev->dev, "PCIe error recovery failed, err:%d\n", r);
7109 		if (hive) {
7110 			list_for_each_entry(tmp_adev, &device_list, reset_list)
7111 				amdgpu_device_unset_mp1_state(tmp_adev);
7112 		}
7113 		amdgpu_device_recovery_put_reset_lock(adev, &device_list);
7114 	}
7115 
7116 	if (hive) {
7117 		mutex_unlock(&hive->hive_lock);
7118 		amdgpu_put_xgmi_hive(hive);
7119 	}
7120 
7121 	return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
7122 }
7123 
7124 /**
7125  * amdgpu_pci_resume() - resume normal ops after PCI reset
7126  * @pdev: pointer to PCI device
7127  *
7128  * Called when the error recovery driver tells us that its
7129  * OK to resume normal operation.
7130  */
7131 void amdgpu_pci_resume(struct pci_dev *pdev)
7132 {
7133 	struct drm_device *dev = pci_get_drvdata(pdev);
7134 	struct amdgpu_device *adev = drm_to_adev(dev);
7135 	struct list_head device_list;
7136 	struct amdgpu_hive_info *hive = NULL;
7137 	struct amdgpu_device *tmp_adev = NULL;
7138 
7139 	dev_info(adev->dev, "PCI error: resume callback!!\n");
7140 
7141 	/* Only continue execution for the case of pci_channel_io_frozen */
7142 	if (adev->pci_channel_state != pci_channel_io_frozen)
7143 		return;
7144 
7145 	INIT_LIST_HEAD(&device_list);
7146 
7147 	hive = amdgpu_get_xgmi_hive(adev);
7148 	if (hive) {
7149 		mutex_lock(&hive->hive_lock);
7150 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
7151 			tmp_adev->pcie_reset_ctx.in_link_reset = false;
7152 			list_add_tail(&tmp_adev->reset_list, &device_list);
7153 		}
7154 	} else
7155 		list_add_tail(&adev->reset_list, &device_list);
7156 
7157 	amdgpu_device_sched_resume(&device_list, NULL, NULL);
7158 	amdgpu_device_gpu_resume(adev, &device_list, false);
7159 	amdgpu_device_recovery_put_reset_lock(adev, &device_list);
7160 
7161 	if (hive) {
7162 		mutex_unlock(&hive->hive_lock);
7163 		amdgpu_put_xgmi_hive(hive);
7164 	}
7165 }
7166 
7167 static void amdgpu_device_cache_switch_state(struct amdgpu_device *adev)
7168 {
7169 	struct pci_dev *swus, *swds;
7170 	int r;
7171 
7172 	swds = pci_upstream_bridge(adev->pdev);
7173 	if (!swds || swds->vendor != PCI_VENDOR_ID_ATI ||
7174 	    pci_pcie_type(swds) != PCI_EXP_TYPE_DOWNSTREAM)
7175 		return;
7176 	swus = pci_upstream_bridge(swds);
7177 	if (!swus ||
7178 	    (swus->vendor != PCI_VENDOR_ID_ATI &&
7179 	     swus->vendor != PCI_VENDOR_ID_AMD) ||
7180 	    pci_pcie_type(swus) != PCI_EXP_TYPE_UPSTREAM)
7181 		return;
7182 
7183 	/* If already saved, return */
7184 	if (adev->pcie_reset_ctx.swus)
7185 		return;
7186 	/* Upstream bridge is ATI, assume it's SWUS/DS architecture */
7187 	r = pci_save_state(swds);
7188 	if (r)
7189 		return;
7190 	adev->pcie_reset_ctx.swds_pcistate = pci_store_saved_state(swds);
7191 
7192 	r = pci_save_state(swus);
7193 	if (r)
7194 		return;
7195 	adev->pcie_reset_ctx.swus_pcistate = pci_store_saved_state(swus);
7196 
7197 	adev->pcie_reset_ctx.swus = swus;
7198 }
7199 
7200 static void amdgpu_device_load_switch_state(struct amdgpu_device *adev)
7201 {
7202 	struct pci_dev *pdev;
7203 	int r;
7204 
7205 	if (!adev->pcie_reset_ctx.swds_pcistate ||
7206 	    !adev->pcie_reset_ctx.swus_pcistate)
7207 		return;
7208 
7209 	pdev = adev->pcie_reset_ctx.swus;
7210 	r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swus_pcistate);
7211 	if (!r) {
7212 		pci_restore_state(pdev);
7213 	} else {
7214 		dev_warn(adev->dev, "Failed to load SWUS state, err:%d\n", r);
7215 		return;
7216 	}
7217 
7218 	pdev = pci_upstream_bridge(adev->pdev);
7219 	r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swds_pcistate);
7220 	if (!r)
7221 		pci_restore_state(pdev);
7222 	else
7223 		dev_warn(adev->dev, "Failed to load SWDS state, err:%d\n", r);
7224 }
7225 
7226 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
7227 {
7228 	struct drm_device *dev = pci_get_drvdata(pdev);
7229 	struct amdgpu_device *adev = drm_to_adev(dev);
7230 	int r;
7231 
7232 	if (amdgpu_sriov_vf(adev))
7233 		return false;
7234 
7235 	r = pci_save_state(pdev);
7236 	if (!r) {
7237 		kfree(adev->pci_state);
7238 
7239 		adev->pci_state = pci_store_saved_state(pdev);
7240 
7241 		if (!adev->pci_state) {
7242 			dev_err(adev->dev, "Failed to store PCI saved state");
7243 			return false;
7244 		}
7245 	} else {
7246 		dev_warn(adev->dev, "Failed to save PCI state, err:%d\n", r);
7247 		return false;
7248 	}
7249 
7250 	amdgpu_device_cache_switch_state(adev);
7251 
7252 	return true;
7253 }
7254 
7255 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
7256 {
7257 	struct drm_device *dev = pci_get_drvdata(pdev);
7258 	struct amdgpu_device *adev = drm_to_adev(dev);
7259 	int r;
7260 
7261 	if (!adev->pci_state)
7262 		return false;
7263 
7264 	r = pci_load_saved_state(pdev, adev->pci_state);
7265 
7266 	if (!r) {
7267 		pci_restore_state(pdev);
7268 	} else {
7269 		dev_warn(adev->dev, "Failed to load PCI state, err:%d\n", r);
7270 		return false;
7271 	}
7272 
7273 	return true;
7274 }
7275 
7276 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
7277 		struct amdgpu_ring *ring)
7278 {
7279 #ifdef CONFIG_X86_64
7280 	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
7281 		return;
7282 #endif
7283 	if (adev->gmc.xgmi.connected_to_cpu)
7284 		return;
7285 
7286 	if (ring && ring->funcs->emit_hdp_flush) {
7287 		amdgpu_ring_emit_hdp_flush(ring);
7288 		return;
7289 	}
7290 
7291 	if (!ring && amdgpu_sriov_runtime(adev)) {
7292 		if (!amdgpu_kiq_hdp_flush(adev))
7293 			return;
7294 	}
7295 
7296 	amdgpu_hdp_flush(adev, ring);
7297 }
7298 
7299 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
7300 		struct amdgpu_ring *ring)
7301 {
7302 #ifdef CONFIG_X86_64
7303 	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
7304 		return;
7305 #endif
7306 	if (adev->gmc.xgmi.connected_to_cpu)
7307 		return;
7308 
7309 	amdgpu_hdp_invalidate(adev, ring);
7310 }
7311 
7312 int amdgpu_in_reset(struct amdgpu_device *adev)
7313 {
7314 	return atomic_read(&adev->reset_domain->in_gpu_reset);
7315 }
7316 
7317 /**
7318  * amdgpu_device_halt() - bring hardware to some kind of halt state
7319  *
7320  * @adev: amdgpu_device pointer
7321  *
7322  * Bring hardware to some kind of halt state so that no one can touch it
7323  * any more. It will help to maintain error context when error occurred.
7324  * Compare to a simple hang, the system will keep stable at least for SSH
7325  * access. Then it should be trivial to inspect the hardware state and
7326  * see what's going on. Implemented as following:
7327  *
7328  * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
7329  *    clears all CPU mappings to device, disallows remappings through page faults
7330  * 2. amdgpu_irq_disable_all() disables all interrupts
7331  * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
7332  * 4. set adev->no_hw_access to avoid potential crashes after setp 5
7333  * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
7334  * 6. pci_disable_device() and pci_wait_for_pending_transaction()
7335  *    flush any in flight DMA operations
7336  */
7337 void amdgpu_device_halt(struct amdgpu_device *adev)
7338 {
7339 	struct pci_dev *pdev = adev->pdev;
7340 	struct drm_device *ddev = adev_to_drm(adev);
7341 
7342 	amdgpu_xcp_dev_unplug(adev);
7343 	drm_dev_unplug(ddev);
7344 
7345 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
7346 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
7347 
7348 	amdgpu_irq_disable_all(adev);
7349 
7350 	amdgpu_fence_driver_hw_fini(adev);
7351 
7352 	adev->no_hw_access = true;
7353 
7354 	amdgpu_device_unmap_mmio(adev);
7355 
7356 	pci_disable_device(pdev);
7357 	pci_wait_for_pending_transaction(pdev);
7358 }
7359 
7360 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
7361 				u32 reg)
7362 {
7363 	unsigned long flags, address, data;
7364 	u32 r;
7365 
7366 	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
7367 	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
7368 
7369 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
7370 	WREG32(address, reg * 4);
7371 	(void)RREG32(address);
7372 	r = RREG32(data);
7373 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
7374 	return r;
7375 }
7376 
7377 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
7378 				u32 reg, u32 v)
7379 {
7380 	unsigned long flags, address, data;
7381 
7382 	address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
7383 	data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
7384 
7385 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
7386 	WREG32(address, reg * 4);
7387 	(void)RREG32(address);
7388 	WREG32(data, v);
7389 	(void)RREG32(data);
7390 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
7391 }
7392 
7393 /**
7394  * amdgpu_device_get_gang - return a reference to the current gang
7395  * @adev: amdgpu_device pointer
7396  *
7397  * Returns: A new reference to the current gang leader.
7398  */
7399 struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev)
7400 {
7401 	struct dma_fence *fence;
7402 
7403 	rcu_read_lock();
7404 	fence = dma_fence_get_rcu_safe(&adev->gang_submit);
7405 	rcu_read_unlock();
7406 	return fence;
7407 }
7408 
7409 /**
7410  * amdgpu_device_switch_gang - switch to a new gang
7411  * @adev: amdgpu_device pointer
7412  * @gang: the gang to switch to
7413  *
7414  * Try to switch to a new gang.
7415  * Returns: NULL if we switched to the new gang or a reference to the current
7416  * gang leader.
7417  */
7418 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
7419 					    struct dma_fence *gang)
7420 {
7421 	struct dma_fence *old = NULL;
7422 
7423 	dma_fence_get(gang);
7424 	do {
7425 		dma_fence_put(old);
7426 		old = amdgpu_device_get_gang(adev);
7427 		if (old == gang)
7428 			break;
7429 
7430 		if (!dma_fence_is_signaled(old)) {
7431 			dma_fence_put(gang);
7432 			return old;
7433 		}
7434 
7435 	} while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
7436 			 old, gang) != old);
7437 
7438 	/*
7439 	 * Drop it once for the exchanged reference in adev and once for the
7440 	 * thread local reference acquired in amdgpu_device_get_gang().
7441 	 */
7442 	dma_fence_put(old);
7443 	dma_fence_put(old);
7444 	return NULL;
7445 }
7446 
7447 /**
7448  * amdgpu_device_enforce_isolation - enforce HW isolation
7449  * @adev: the amdgpu device pointer
7450  * @ring: the HW ring the job is supposed to run on
7451  * @job: the job which is about to be pushed to the HW ring
7452  *
7453  * Makes sure that only one client at a time can use the GFX block.
7454  * Returns: The dependency to wait on before the job can be pushed to the HW.
7455  * The function is called multiple times until NULL is returned.
7456  */
7457 struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev,
7458 						  struct amdgpu_ring *ring,
7459 						  struct amdgpu_job *job)
7460 {
7461 	struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id];
7462 	struct drm_sched_fence *f = job->base.s_fence;
7463 	struct dma_fence *dep;
7464 	void *owner;
7465 	int r;
7466 
7467 	/*
7468 	 * For now enforce isolation only for the GFX block since we only need
7469 	 * the cleaner shader on those rings.
7470 	 */
7471 	if (ring->funcs->type != AMDGPU_RING_TYPE_GFX &&
7472 	    ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
7473 		return NULL;
7474 
7475 	/*
7476 	 * All submissions where enforce isolation is false are handled as if
7477 	 * they come from a single client. Use ~0l as the owner to distinct it
7478 	 * from kernel submissions where the owner is NULL.
7479 	 */
7480 	owner = job->enforce_isolation ? f->owner : (void *)~0l;
7481 
7482 	mutex_lock(&adev->enforce_isolation_mutex);
7483 
7484 	/*
7485 	 * The "spearhead" submission is the first one which changes the
7486 	 * ownership to its client. We always need to wait for it to be
7487 	 * pushed to the HW before proceeding with anything.
7488 	 */
7489 	if (&f->scheduled != isolation->spearhead &&
7490 	    !dma_fence_is_signaled(isolation->spearhead)) {
7491 		dep = isolation->spearhead;
7492 		goto out_grab_ref;
7493 	}
7494 
7495 	if (isolation->owner != owner) {
7496 
7497 		/*
7498 		 * Wait for any gang to be assembled before switching to a
7499 		 * different owner or otherwise we could deadlock the
7500 		 * submissions.
7501 		 */
7502 		if (!job->gang_submit) {
7503 			dep = amdgpu_device_get_gang(adev);
7504 			if (!dma_fence_is_signaled(dep))
7505 				goto out_return_dep;
7506 			dma_fence_put(dep);
7507 		}
7508 
7509 		dma_fence_put(isolation->spearhead);
7510 		isolation->spearhead = dma_fence_get(&f->scheduled);
7511 		amdgpu_sync_move(&isolation->active, &isolation->prev);
7512 		trace_amdgpu_isolation(isolation->owner, owner);
7513 		isolation->owner = owner;
7514 	}
7515 
7516 	/*
7517 	 * Specifying the ring here helps to pipeline submissions even when
7518 	 * isolation is enabled. If that is not desired for testing NULL can be
7519 	 * used instead of the ring to enforce a CPU round trip while switching
7520 	 * between clients.
7521 	 */
7522 	dep = amdgpu_sync_peek_fence(&isolation->prev, ring);
7523 	r = amdgpu_sync_fence(&isolation->active, &f->finished, GFP_NOWAIT);
7524 	if (r)
7525 		dev_warn(adev->dev, "OOM tracking isolation\n");
7526 
7527 out_grab_ref:
7528 	dma_fence_get(dep);
7529 out_return_dep:
7530 	mutex_unlock(&adev->enforce_isolation_mutex);
7531 	return dep;
7532 }
7533 
7534 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
7535 {
7536 	switch (adev->asic_type) {
7537 #ifdef CONFIG_DRM_AMDGPU_SI
7538 	case CHIP_HAINAN:
7539 #endif
7540 	case CHIP_TOPAZ:
7541 		/* chips with no display hardware */
7542 		return false;
7543 #ifdef CONFIG_DRM_AMDGPU_SI
7544 	case CHIP_TAHITI:
7545 	case CHIP_PITCAIRN:
7546 	case CHIP_VERDE:
7547 	case CHIP_OLAND:
7548 #endif
7549 #ifdef CONFIG_DRM_AMDGPU_CIK
7550 	case CHIP_BONAIRE:
7551 	case CHIP_HAWAII:
7552 	case CHIP_KAVERI:
7553 	case CHIP_KABINI:
7554 	case CHIP_MULLINS:
7555 #endif
7556 	case CHIP_TONGA:
7557 	case CHIP_FIJI:
7558 	case CHIP_POLARIS10:
7559 	case CHIP_POLARIS11:
7560 	case CHIP_POLARIS12:
7561 	case CHIP_VEGAM:
7562 	case CHIP_CARRIZO:
7563 	case CHIP_STONEY:
7564 		/* chips with display hardware */
7565 		return true;
7566 	default:
7567 		/* IP discovery */
7568 		if (!amdgpu_ip_version(adev, DCE_HWIP, 0) ||
7569 		    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
7570 			return false;
7571 		return true;
7572 	}
7573 }
7574 
7575 uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
7576 		uint32_t inst, uint32_t reg_addr, char reg_name[],
7577 		uint32_t expected_value, uint32_t mask)
7578 {
7579 	uint32_t ret = 0;
7580 	uint32_t old_ = 0;
7581 	uint32_t tmp_ = RREG32(reg_addr);
7582 	uint32_t loop = adev->usec_timeout;
7583 
7584 	while ((tmp_ & (mask)) != (expected_value)) {
7585 		if (old_ != tmp_) {
7586 			loop = adev->usec_timeout;
7587 			old_ = tmp_;
7588 		} else
7589 			udelay(1);
7590 		tmp_ = RREG32(reg_addr);
7591 		loop--;
7592 		if (!loop) {
7593 			dev_warn(
7594 				adev->dev,
7595 				"Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
7596 				inst, reg_name, (uint32_t)expected_value,
7597 				(uint32_t)(tmp_ & (mask)));
7598 			ret = -ETIMEDOUT;
7599 			break;
7600 		}
7601 	}
7602 	return ret;
7603 }
7604 
7605 ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring)
7606 {
7607 	ssize_t size = 0;
7608 
7609 	if (!ring || !ring->adev)
7610 		return size;
7611 
7612 	if (amdgpu_device_should_recover_gpu(ring->adev))
7613 		size |= AMDGPU_RESET_TYPE_FULL;
7614 
7615 	if (unlikely(!ring->adev->debug_disable_soft_recovery) &&
7616 	    !amdgpu_sriov_vf(ring->adev) && ring->funcs->soft_recovery)
7617 		size |= AMDGPU_RESET_TYPE_SOFT_RESET;
7618 
7619 	return size;
7620 }
7621 
7622 ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset)
7623 {
7624 	ssize_t size = 0;
7625 
7626 	if (supported_reset == 0) {
7627 		size += sysfs_emit_at(buf, size, "unsupported");
7628 		size += sysfs_emit_at(buf, size, "\n");
7629 		return size;
7630 
7631 	}
7632 
7633 	if (supported_reset & AMDGPU_RESET_TYPE_SOFT_RESET)
7634 		size += sysfs_emit_at(buf, size, "soft ");
7635 
7636 	if (supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)
7637 		size += sysfs_emit_at(buf, size, "queue ");
7638 
7639 	if (supported_reset & AMDGPU_RESET_TYPE_PER_PIPE)
7640 		size += sysfs_emit_at(buf, size, "pipe ");
7641 
7642 	if (supported_reset & AMDGPU_RESET_TYPE_FULL)
7643 		size += sysfs_emit_at(buf, size, "full ");
7644 
7645 	size += sysfs_emit_at(buf, size, "\n");
7646 	return size;
7647 }
7648 
7649 void amdgpu_device_set_uid(struct amdgpu_uid *uid_info,
7650 			   enum amdgpu_uid_type type, uint8_t inst,
7651 			   uint64_t uid)
7652 {
7653 	if (!uid_info)
7654 		return;
7655 
7656 	if (type >= AMDGPU_UID_TYPE_MAX) {
7657 		dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n",
7658 			     type);
7659 		return;
7660 	}
7661 
7662 	if (inst >= AMDGPU_UID_INST_MAX) {
7663 		dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n",
7664 			     inst);
7665 		return;
7666 	}
7667 
7668 	if (uid_info->uid[type][inst] != 0) {
7669 		dev_warn_once(
7670 			uid_info->adev->dev,
7671 			"Overwriting existing UID %llu for type %d instance %d\n",
7672 			uid_info->uid[type][inst], type, inst);
7673 	}
7674 
7675 	uid_info->uid[type][inst] = uid;
7676 }
7677 
7678 u64 amdgpu_device_get_uid(struct amdgpu_uid *uid_info,
7679 			  enum amdgpu_uid_type type, uint8_t inst)
7680 {
7681 	if (!uid_info)
7682 		return 0;
7683 
7684 	if (type >= AMDGPU_UID_TYPE_MAX) {
7685 		dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n",
7686 			     type);
7687 		return 0;
7688 	}
7689 
7690 	if (inst >= AMDGPU_UID_INST_MAX) {
7691 		dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n",
7692 			     inst);
7693 		return 0;
7694 	}
7695 
7696 	return uid_info->uid[type][inst];
7697 }
7698