xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c (revision 47776ac1e3f4a2aefcf7fe7c7e4a11151b676222)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 #include <linux/aperture.h>
30 #include <linux/power_supply.h>
31 #include <linux/kthread.h>
32 #include <linux/module.h>
33 #include <linux/console.h>
34 #include <linux/slab.h>
35 #include <linux/iommu.h>
36 #include <linux/pci.h>
37 #include <linux/pci-p2pdma.h>
38 #include <linux/apple-gmux.h>
39 #include <linux/nospec.h>
40 
41 #include <drm/drm_atomic_helper.h>
42 #include <drm/drm_client_event.h>
43 #include <drm/drm_crtc_helper.h>
44 #include <drm/drm_probe_helper.h>
45 #include <drm/amdgpu_drm.h>
46 #include <linux/device.h>
47 #include <linux/vgaarb.h>
48 #include <linux/vga_switcheroo.h>
49 #include <linux/efi.h>
50 #include "amdgpu.h"
51 #include "amdgpu_trace.h"
52 #include "amdgpu_i2c.h"
53 #include "atom.h"
54 #include "amdgpu_atombios.h"
55 #include "amdgpu_atomfirmware.h"
56 #include "amd_pcie.h"
57 #ifdef CONFIG_DRM_AMDGPU_SI
58 #include "si.h"
59 #endif
60 #ifdef CONFIG_DRM_AMDGPU_CIK
61 #include "cik.h"
62 #endif
63 #include "vi.h"
64 #include "soc15.h"
65 #include "nv.h"
66 #include "bif/bif_4_1_d.h"
67 #include <linux/firmware.h>
68 #include "amdgpu_vf_error.h"
69 
70 #include "amdgpu_amdkfd.h"
71 #include "amdgpu_pm.h"
72 
73 #include "amdgpu_xgmi.h"
74 #include "amdgpu_ras.h"
75 #include "amdgpu_ras_mgr.h"
76 #include "amdgpu_pmu.h"
77 #include "amdgpu_fru_eeprom.h"
78 #include "amdgpu_reset.h"
79 #include "amdgpu_virt.h"
80 #include "amdgpu_dev_coredump.h"
81 
82 #include <linux/suspend.h>
83 #include <drm/task_barrier.h>
84 #include <linux/pm_runtime.h>
85 
86 #include <drm/drm_drv.h>
87 
88 #if IS_ENABLED(CONFIG_X86)
89 #include <asm/intel-family.h>
90 #include <asm/cpu_device_id.h>
91 #endif
92 
93 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
94 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
95 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
96 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
97 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
98 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
99 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
100 MODULE_FIRMWARE("amdgpu/cyan_skillfish_gpu_info.bin");
101 
102 #define AMDGPU_RESUME_MS		2000
103 #define AMDGPU_MAX_RETRY_LIMIT		2
104 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
105 #define AMDGPU_PCIE_INDEX_FALLBACK (0x38 >> 2)
106 #define AMDGPU_PCIE_INDEX_HI_FALLBACK (0x44 >> 2)
107 #define AMDGPU_PCIE_DATA_FALLBACK (0x3C >> 2)
108 
109 #define AMDGPU_VBIOS_SKIP (1U << 0)
110 #define AMDGPU_VBIOS_OPTIONAL (1U << 1)
111 
112 static const struct drm_driver amdgpu_kms_driver;
113 
114 const char *amdgpu_asic_name[] = {
115 	"TAHITI",
116 	"PITCAIRN",
117 	"VERDE",
118 	"OLAND",
119 	"HAINAN",
120 	"BONAIRE",
121 	"KAVERI",
122 	"KABINI",
123 	"HAWAII",
124 	"MULLINS",
125 	"TOPAZ",
126 	"TONGA",
127 	"FIJI",
128 	"CARRIZO",
129 	"STONEY",
130 	"POLARIS10",
131 	"POLARIS11",
132 	"POLARIS12",
133 	"VEGAM",
134 	"VEGA10",
135 	"VEGA12",
136 	"VEGA20",
137 	"RAVEN",
138 	"ARCTURUS",
139 	"RENOIR",
140 	"ALDEBARAN",
141 	"NAVI10",
142 	"CYAN_SKILLFISH",
143 	"NAVI14",
144 	"NAVI12",
145 	"SIENNA_CICHLID",
146 	"NAVY_FLOUNDER",
147 	"VANGOGH",
148 	"DIMGREY_CAVEFISH",
149 	"BEIGE_GOBY",
150 	"YELLOW_CARP",
151 	"IP DISCOVERY",
152 	"LAST",
153 };
154 
155 #define AMDGPU_IP_BLK_MASK_ALL GENMASK(AMD_IP_BLOCK_TYPE_NUM  - 1, 0)
156 /*
157  * Default init level where all blocks are expected to be initialized. This is
158  * the level of initialization expected by default and also after a full reset
159  * of the device.
160  */
161 struct amdgpu_init_level amdgpu_init_default = {
162 	.level = AMDGPU_INIT_LEVEL_DEFAULT,
163 	.hwini_ip_block_mask = AMDGPU_IP_BLK_MASK_ALL,
164 };
165 
166 struct amdgpu_init_level amdgpu_init_recovery = {
167 	.level = AMDGPU_INIT_LEVEL_RESET_RECOVERY,
168 	.hwini_ip_block_mask = AMDGPU_IP_BLK_MASK_ALL,
169 };
170 
171 /*
172  * Minimal blocks needed to be initialized before a XGMI hive can be reset. This
173  * is used for cases like reset on initialization where the entire hive needs to
174  * be reset before first use.
175  */
176 struct amdgpu_init_level amdgpu_init_minimal_xgmi = {
177 	.level = AMDGPU_INIT_LEVEL_MINIMAL_XGMI,
178 	.hwini_ip_block_mask =
179 		BIT(AMD_IP_BLOCK_TYPE_GMC) | BIT(AMD_IP_BLOCK_TYPE_SMC) |
180 		BIT(AMD_IP_BLOCK_TYPE_COMMON) | BIT(AMD_IP_BLOCK_TYPE_IH) |
181 		BIT(AMD_IP_BLOCK_TYPE_PSP)
182 };
183 
184 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev);
185 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev);
186 static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev);
187 
188 static void amdgpu_device_load_switch_state(struct amdgpu_device *adev);
189 
190 static inline bool amdgpu_ip_member_of_hwini(struct amdgpu_device *adev,
191 					     enum amd_ip_block_type block)
192 {
193 	return (adev->init_lvl->hwini_ip_block_mask & (1U << block)) != 0;
194 }
195 
196 void amdgpu_set_init_level(struct amdgpu_device *adev,
197 			   enum amdgpu_init_lvl_id lvl)
198 {
199 	switch (lvl) {
200 	case AMDGPU_INIT_LEVEL_MINIMAL_XGMI:
201 		adev->init_lvl = &amdgpu_init_minimal_xgmi;
202 		break;
203 	case AMDGPU_INIT_LEVEL_RESET_RECOVERY:
204 		adev->init_lvl = &amdgpu_init_recovery;
205 		break;
206 	case AMDGPU_INIT_LEVEL_DEFAULT:
207 		fallthrough;
208 	default:
209 		adev->init_lvl = &amdgpu_init_default;
210 		break;
211 	}
212 }
213 
214 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev);
215 static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
216 				     void *data);
217 
218 /**
219  * DOC: pcie_replay_count
220  *
221  * The amdgpu driver provides a sysfs API for reporting the total number
222  * of PCIe replays (NAKs).
223  * The file pcie_replay_count is used for this and returns the total
224  * number of replays as a sum of the NAKs generated and NAKs received.
225  */
226 
227 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
228 		struct device_attribute *attr, char *buf)
229 {
230 	struct drm_device *ddev = dev_get_drvdata(dev);
231 	struct amdgpu_device *adev = drm_to_adev(ddev);
232 	uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
233 
234 	return sysfs_emit(buf, "%llu\n", cnt);
235 }
236 
237 static DEVICE_ATTR(pcie_replay_count, 0444,
238 		amdgpu_device_get_pcie_replay_count, NULL);
239 
240 static int amdgpu_device_attr_sysfs_init(struct amdgpu_device *adev)
241 {
242 	int ret = 0;
243 
244 	if (amdgpu_nbio_is_replay_cnt_supported(adev))
245 		ret = sysfs_create_file(&adev->dev->kobj,
246 					&dev_attr_pcie_replay_count.attr);
247 
248 	return ret;
249 }
250 
251 static void amdgpu_device_attr_sysfs_fini(struct amdgpu_device *adev)
252 {
253 	if (amdgpu_nbio_is_replay_cnt_supported(adev))
254 		sysfs_remove_file(&adev->dev->kobj,
255 				  &dev_attr_pcie_replay_count.attr);
256 }
257 
258 static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj,
259 					  const struct bin_attribute *attr, char *buf,
260 					  loff_t ppos, size_t count)
261 {
262 	struct device *dev = kobj_to_dev(kobj);
263 	struct drm_device *ddev = dev_get_drvdata(dev);
264 	struct amdgpu_device *adev = drm_to_adev(ddev);
265 	ssize_t bytes_read;
266 
267 	switch (ppos) {
268 	case AMDGPU_SYS_REG_STATE_XGMI:
269 		bytes_read = amdgpu_asic_get_reg_state(
270 			adev, AMDGPU_REG_STATE_TYPE_XGMI, buf, count);
271 		break;
272 	case AMDGPU_SYS_REG_STATE_WAFL:
273 		bytes_read = amdgpu_asic_get_reg_state(
274 			adev, AMDGPU_REG_STATE_TYPE_WAFL, buf, count);
275 		break;
276 	case AMDGPU_SYS_REG_STATE_PCIE:
277 		bytes_read = amdgpu_asic_get_reg_state(
278 			adev, AMDGPU_REG_STATE_TYPE_PCIE, buf, count);
279 		break;
280 	case AMDGPU_SYS_REG_STATE_USR:
281 		bytes_read = amdgpu_asic_get_reg_state(
282 			adev, AMDGPU_REG_STATE_TYPE_USR, buf, count);
283 		break;
284 	case AMDGPU_SYS_REG_STATE_USR_1:
285 		bytes_read = amdgpu_asic_get_reg_state(
286 			adev, AMDGPU_REG_STATE_TYPE_USR_1, buf, count);
287 		break;
288 	default:
289 		return -EINVAL;
290 	}
291 
292 	return bytes_read;
293 }
294 
295 static const BIN_ATTR(reg_state, 0444, amdgpu_sysfs_reg_state_get, NULL,
296 		      AMDGPU_SYS_REG_STATE_END);
297 
298 int amdgpu_reg_state_sysfs_init(struct amdgpu_device *adev)
299 {
300 	int ret;
301 
302 	if (!amdgpu_asic_get_reg_state_supported(adev))
303 		return 0;
304 
305 	ret = sysfs_create_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
306 
307 	return ret;
308 }
309 
310 void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev)
311 {
312 	if (!amdgpu_asic_get_reg_state_supported(adev))
313 		return;
314 	sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
315 }
316 
317 /**
318  * DOC: board_info
319  *
320  * The amdgpu driver provides a sysfs API for giving board related information.
321  * It provides the form factor information in the format
322  *
323  *   type : form factor
324  *
325  * Possible form factor values
326  *
327  * - "cem"		- PCIE CEM card
328  * - "oam"		- Open Compute Accelerator Module
329  * - "unknown"	- Not known
330  *
331  */
332 
333 static ssize_t amdgpu_device_get_board_info(struct device *dev,
334 					    struct device_attribute *attr,
335 					    char *buf)
336 {
337 	struct drm_device *ddev = dev_get_drvdata(dev);
338 	struct amdgpu_device *adev = drm_to_adev(ddev);
339 	enum amdgpu_pkg_type pkg_type = AMDGPU_PKG_TYPE_CEM;
340 	const char *pkg;
341 
342 	if (adev->smuio.funcs && adev->smuio.funcs->get_pkg_type)
343 		pkg_type = adev->smuio.funcs->get_pkg_type(adev);
344 
345 	switch (pkg_type) {
346 	case AMDGPU_PKG_TYPE_CEM:
347 		pkg = "cem";
348 		break;
349 	case AMDGPU_PKG_TYPE_OAM:
350 		pkg = "oam";
351 		break;
352 	default:
353 		pkg = "unknown";
354 		break;
355 	}
356 
357 	return sysfs_emit(buf, "%s : %s\n", "type", pkg);
358 }
359 
360 static DEVICE_ATTR(board_info, 0444, amdgpu_device_get_board_info, NULL);
361 
362 static struct attribute *amdgpu_board_attrs[] = {
363 	&dev_attr_board_info.attr,
364 	NULL,
365 };
366 
367 static umode_t amdgpu_board_attrs_is_visible(struct kobject *kobj,
368 					     struct attribute *attr, int n)
369 {
370 	struct device *dev = kobj_to_dev(kobj);
371 	struct drm_device *ddev = dev_get_drvdata(dev);
372 	struct amdgpu_device *adev = drm_to_adev(ddev);
373 
374 	if (adev->flags & AMD_IS_APU)
375 		return 0;
376 
377 	return attr->mode;
378 }
379 
380 static const struct attribute_group amdgpu_board_attrs_group = {
381 	.attrs = amdgpu_board_attrs,
382 	.is_visible = amdgpu_board_attrs_is_visible
383 };
384 
385 /**
386  * DOC: uma/carveout_options
387  *
388  * This is a read-only file that lists all available UMA allocation
389  * options and their corresponding indices. Example output::
390  *
391  *     $ cat uma/carveout_options
392  *     0: Minimum (512 MB)
393  *     1:  (1 GB)
394  *     2:  (2 GB)
395  *     3:  (4 GB)
396  *     4:  (6 GB)
397  *     5:  (8 GB)
398  *     6:  (12 GB)
399  *     7: Medium (16 GB)
400  *     8:  (24 GB)
401  *     9: High (32 GB)
402  */
403 static ssize_t carveout_options_show(struct device *dev,
404 				     struct device_attribute *attr,
405 				     char *buf)
406 {
407 	struct drm_device *ddev = dev_get_drvdata(dev);
408 	struct amdgpu_device *adev = drm_to_adev(ddev);
409 	struct amdgpu_uma_carveout_info *uma_info = &adev->uma_info;
410 	uint32_t memory_carved;
411 	ssize_t size = 0;
412 
413 	if (!uma_info || !uma_info->num_entries)
414 		return -ENODEV;
415 
416 	for (int i = 0; i < uma_info->num_entries; i++) {
417 		memory_carved = uma_info->entries[i].memory_carved_mb;
418 		if (memory_carved >= SZ_1G/SZ_1M) {
419 			size += sysfs_emit_at(buf, size, "%d: %s (%u GB)\n",
420 					      i,
421 					      uma_info->entries[i].name,
422 					      memory_carved >> 10);
423 		} else {
424 			size += sysfs_emit_at(buf, size, "%d: %s (%u MB)\n",
425 					      i,
426 					      uma_info->entries[i].name,
427 					      memory_carved);
428 		}
429 	}
430 
431 	return size;
432 }
433 static DEVICE_ATTR_RO(carveout_options);
434 
435 /**
436  * DOC: uma/carveout
437  *
438  * This file is both readable and writable. When read, it shows the
439  * index of the current setting. Writing a valid index to this file
440  * allows users to change the UMA carveout size to the selected option
441  * on the next boot.
442  *
443  * The available options and their corresponding indices can be read
444  * from the uma/carveout_options file.
445  */
446 static ssize_t carveout_show(struct device *dev,
447 			     struct device_attribute *attr,
448 			     char *buf)
449 {
450 	struct drm_device *ddev = dev_get_drvdata(dev);
451 	struct amdgpu_device *adev = drm_to_adev(ddev);
452 
453 	return sysfs_emit(buf, "%u\n", adev->uma_info.uma_option_index);
454 }
455 
456 static ssize_t carveout_store(struct device *dev,
457 			      struct device_attribute *attr,
458 			      const char *buf, size_t count)
459 {
460 	struct drm_device *ddev = dev_get_drvdata(dev);
461 	struct amdgpu_device *adev = drm_to_adev(ddev);
462 	struct amdgpu_uma_carveout_info *uma_info = &adev->uma_info;
463 	struct amdgpu_uma_carveout_option *opt;
464 	unsigned long val;
465 	uint8_t flags;
466 	int r;
467 
468 	r = kstrtoul(buf, 10, &val);
469 	if (r)
470 		return r;
471 
472 	if (val >= uma_info->num_entries)
473 		return -EINVAL;
474 
475 	val = array_index_nospec(val, uma_info->num_entries);
476 	opt = &uma_info->entries[val];
477 
478 	if (!(opt->flags & AMDGPU_UMA_FLAG_AUTO) &&
479 	    !(opt->flags & AMDGPU_UMA_FLAG_CUSTOM)) {
480 		drm_err_once(ddev, "Option %lu not supported due to lack of Custom/Auto flag", val);
481 		return -EINVAL;
482 	}
483 
484 	flags = opt->flags;
485 	flags &= ~((flags & AMDGPU_UMA_FLAG_AUTO) >> 1);
486 
487 	guard(mutex)(&uma_info->update_lock);
488 
489 	r = amdgpu_acpi_set_uma_allocation_size(adev, val, flags);
490 	if (r)
491 		return r;
492 
493 	uma_info->uma_option_index = val;
494 
495 	return count;
496 }
497 static DEVICE_ATTR_RW(carveout);
498 
499 static struct attribute *amdgpu_uma_attrs[] = {
500 	&dev_attr_carveout.attr,
501 	&dev_attr_carveout_options.attr,
502 	NULL
503 };
504 
505 const struct attribute_group amdgpu_uma_attr_group = {
506 	.name = "uma",
507 	.attrs = amdgpu_uma_attrs
508 };
509 
510 static void amdgpu_uma_sysfs_init(struct amdgpu_device *adev)
511 {
512 	int rc;
513 
514 	if (!(adev->flags & AMD_IS_APU))
515 		return;
516 
517 	if (!amdgpu_acpi_is_set_uma_allocation_size_supported())
518 		return;
519 
520 	rc = amdgpu_atomfirmware_get_uma_carveout_info(adev, &adev->uma_info);
521 	if (rc) {
522 		drm_dbg(adev_to_drm(adev),
523 			"Failed to parse UMA carveout info from VBIOS: %d\n", rc);
524 		goto out_info;
525 	}
526 
527 	mutex_init(&adev->uma_info.update_lock);
528 
529 	rc = devm_device_add_group(adev->dev, &amdgpu_uma_attr_group);
530 	if (rc) {
531 		drm_dbg(adev_to_drm(adev), "Failed to add UMA carveout sysfs interfaces %d\n", rc);
532 		goto out_attr;
533 	}
534 
535 	return;
536 
537 out_attr:
538 	mutex_destroy(&adev->uma_info.update_lock);
539 out_info:
540 	return;
541 }
542 
543 static void amdgpu_uma_sysfs_fini(struct amdgpu_device *adev)
544 {
545 	struct amdgpu_uma_carveout_info *uma_info = &adev->uma_info;
546 
547 	if (!amdgpu_acpi_is_set_uma_allocation_size_supported())
548 		return;
549 
550 	mutex_destroy(&uma_info->update_lock);
551 	uma_info->num_entries = 0;
552 }
553 
554 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
555 
556 /**
557  * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
558  *
559  * @adev: amdgpu device pointer
560  *
561  * Returns true if the device is a dGPU with ATPX power control,
562  * otherwise return false.
563  */
564 bool amdgpu_device_supports_px(struct amdgpu_device *adev)
565 {
566 	if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
567 		return true;
568 	return false;
569 }
570 
571 /**
572  * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
573  *
574  * @adev: amdgpu device pointer
575  *
576  * Returns true if the device is a dGPU with ACPI power control,
577  * otherwise return false.
578  */
579 bool amdgpu_device_supports_boco(struct amdgpu_device *adev)
580 {
581 	if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
582 		return false;
583 
584 	if (adev->has_pr3 ||
585 	    ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
586 		return true;
587 	return false;
588 }
589 
590 /**
591  * amdgpu_device_supports_baco - Does the device support BACO
592  *
593  * @adev: amdgpu device pointer
594  *
595  * Return:
596  * 1 if the device supports BACO;
597  * 3 if the device supports MACO (only works if BACO is supported)
598  * otherwise return 0.
599  */
600 int amdgpu_device_supports_baco(struct amdgpu_device *adev)
601 {
602 	return amdgpu_asic_supports_baco(adev);
603 }
604 
605 void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev)
606 {
607 	int bamaco_support;
608 
609 	adev->pm.rpm_mode = AMDGPU_RUNPM_NONE;
610 	bamaco_support = amdgpu_device_supports_baco(adev);
611 
612 	switch (amdgpu_runtime_pm) {
613 	case 2:
614 		if (bamaco_support & MACO_SUPPORT) {
615 			adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO;
616 			dev_info(adev->dev, "Forcing BAMACO for runtime pm\n");
617 		} else if (bamaco_support == BACO_SUPPORT) {
618 			adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
619 			dev_info(adev->dev, "Requested mode BAMACO not available,fallback to use BACO\n");
620 		}
621 		break;
622 	case 1:
623 		if (bamaco_support & BACO_SUPPORT) {
624 			adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
625 			dev_info(adev->dev, "Forcing BACO for runtime pm\n");
626 		}
627 		break;
628 	case -1:
629 	case -2:
630 		if (amdgpu_device_supports_px(adev)) {
631 			/* enable PX as runtime mode */
632 			adev->pm.rpm_mode = AMDGPU_RUNPM_PX;
633 			dev_info(adev->dev, "Using ATPX for runtime pm\n");
634 		} else if (amdgpu_device_supports_boco(adev)) {
635 			/* enable boco as runtime mode */
636 			adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO;
637 			dev_info(adev->dev, "Using BOCO for runtime pm\n");
638 		} else {
639 			if (!bamaco_support)
640 				goto no_runtime_pm;
641 
642 			switch (adev->asic_type) {
643 			case CHIP_VEGA20:
644 			case CHIP_ARCTURUS:
645 				/* BACO are not supported on vega20 and arctrus */
646 				break;
647 			case CHIP_VEGA10:
648 				/* enable BACO as runpm mode if noretry=0 */
649 				if (!adev->gmc.noretry && !amdgpu_passthrough(adev))
650 					adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
651 				break;
652 			default:
653 				/* enable BACO as runpm mode on CI+ */
654 				if (!amdgpu_passthrough(adev))
655 					adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
656 				break;
657 			}
658 
659 			if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) {
660 				if (bamaco_support & MACO_SUPPORT) {
661 					adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO;
662 					dev_info(adev->dev, "Using BAMACO for runtime pm\n");
663 				} else {
664 					dev_info(adev->dev, "Using BACO for runtime pm\n");
665 				}
666 			}
667 		}
668 		break;
669 	case 0:
670 		dev_info(adev->dev, "runtime pm is manually disabled\n");
671 		break;
672 	default:
673 		break;
674 	}
675 
676 no_runtime_pm:
677 	if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE)
678 		dev_info(adev->dev, "Runtime PM not available\n");
679 }
680 /**
681  * amdgpu_device_supports_smart_shift - Is the device dGPU with
682  * smart shift support
683  *
684  * @adev: amdgpu device pointer
685  *
686  * Returns true if the device is a dGPU with Smart Shift support,
687  * otherwise returns false.
688  */
689 bool amdgpu_device_supports_smart_shift(struct amdgpu_device *adev)
690 {
691 	return (amdgpu_device_supports_boco(adev) &&
692 		amdgpu_acpi_is_power_shift_control_supported());
693 }
694 
695 /*
696  * VRAM access helper functions
697  */
698 
699 /**
700  * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
701  *
702  * @adev: amdgpu_device pointer
703  * @pos: offset of the buffer in vram
704  * @buf: virtual address of the buffer in system memory
705  * @size: read/write size, sizeof(@buf) must > @size
706  * @write: true - write to vram, otherwise - read from vram
707  */
708 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
709 			     void *buf, size_t size, bool write)
710 {
711 	unsigned long flags;
712 	uint32_t hi = ~0, tmp = 0;
713 	uint32_t *data = buf;
714 	uint64_t last;
715 	int idx;
716 
717 	if (!drm_dev_enter(adev_to_drm(adev), &idx))
718 		return;
719 
720 	BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
721 
722 	spin_lock_irqsave(&adev->mmio_idx_lock, flags);
723 	for (last = pos + size; pos < last; pos += 4) {
724 		tmp = pos >> 31;
725 
726 		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
727 		if (tmp != hi) {
728 			WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
729 			hi = tmp;
730 		}
731 		if (write)
732 			WREG32_NO_KIQ(mmMM_DATA, *data++);
733 		else
734 			*data++ = RREG32_NO_KIQ(mmMM_DATA);
735 	}
736 
737 	spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
738 	drm_dev_exit(idx);
739 }
740 
741 /**
742  * amdgpu_device_aper_access - access vram by vram aperture
743  *
744  * @adev: amdgpu_device pointer
745  * @pos: offset of the buffer in vram
746  * @buf: virtual address of the buffer in system memory
747  * @size: read/write size, sizeof(@buf) must > @size
748  * @write: true - write to vram, otherwise - read from vram
749  *
750  * The return value means how many bytes have been transferred.
751  */
752 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
753 				 void *buf, size_t size, bool write)
754 {
755 #ifdef CONFIG_64BIT
756 	void __iomem *addr;
757 	size_t count = 0;
758 	uint64_t last;
759 
760 	if (!adev->mman.aper_base_kaddr)
761 		return 0;
762 
763 	last = min(pos + size, adev->gmc.visible_vram_size);
764 	if (last > pos) {
765 		addr = adev->mman.aper_base_kaddr + pos;
766 		count = last - pos;
767 
768 		if (write) {
769 			memcpy_toio(addr, buf, count);
770 			/* Make sure HDP write cache flush happens without any reordering
771 			 * after the system memory contents are sent over PCIe device
772 			 */
773 			mb();
774 			amdgpu_device_flush_hdp(adev, NULL);
775 		} else {
776 			amdgpu_device_invalidate_hdp(adev, NULL);
777 			/* Make sure HDP read cache is invalidated before issuing a read
778 			 * to the PCIe device
779 			 */
780 			mb();
781 			memcpy_fromio(buf, addr, count);
782 		}
783 
784 	}
785 
786 	return count;
787 #else
788 	return 0;
789 #endif
790 }
791 
792 /**
793  * amdgpu_device_vram_access - read/write a buffer in vram
794  *
795  * @adev: amdgpu_device pointer
796  * @pos: offset of the buffer in vram
797  * @buf: virtual address of the buffer in system memory
798  * @size: read/write size, sizeof(@buf) must > @size
799  * @write: true - write to vram, otherwise - read from vram
800  */
801 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
802 			       void *buf, size_t size, bool write)
803 {
804 	size_t count;
805 
806 	/* try to using vram apreature to access vram first */
807 	count = amdgpu_device_aper_access(adev, pos, buf, size, write);
808 	size -= count;
809 	if (size) {
810 		/* using MM to access rest vram */
811 		pos += count;
812 		buf += count;
813 		amdgpu_device_mm_access(adev, pos, buf, size, write);
814 	}
815 }
816 
817 /*
818  * register access helper functions.
819  */
820 
821 /* Check if hw access should be skipped because of hotplug or device error */
822 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
823 {
824 	if (adev->no_hw_access)
825 		return true;
826 
827 #ifdef CONFIG_LOCKDEP
828 	/*
829 	 * This is a bit complicated to understand, so worth a comment. What we assert
830 	 * here is that the GPU reset is not running on another thread in parallel.
831 	 *
832 	 * For this we trylock the read side of the reset semaphore, if that succeeds
833 	 * we know that the reset is not running in parallel.
834 	 *
835 	 * If the trylock fails we assert that we are either already holding the read
836 	 * side of the lock or are the reset thread itself and hold the write side of
837 	 * the lock.
838 	 */
839 	if (in_task()) {
840 		if (down_read_trylock(&adev->reset_domain->sem))
841 			up_read(&adev->reset_domain->sem);
842 		else
843 			lockdep_assert_held(&adev->reset_domain->sem);
844 	}
845 #endif
846 	return false;
847 }
848 
849 /**
850  * amdgpu_device_get_rev_id - query device rev_id
851  *
852  * @adev: amdgpu_device pointer
853  *
854  * Return device rev_id
855  */
856 u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
857 {
858 	return adev->nbio.funcs->get_rev_id(adev);
859 }
860 
861 static uint32_t amdgpu_device_get_vbios_flags(struct amdgpu_device *adev)
862 {
863 	if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU))
864 		return AMDGPU_VBIOS_SKIP;
865 
866 	if (hweight32(adev->aid_mask) && amdgpu_passthrough(adev))
867 		return AMDGPU_VBIOS_OPTIONAL;
868 
869 	return 0;
870 }
871 
872 /**
873  * amdgpu_device_asic_init - Wrapper for atom asic_init
874  *
875  * @adev: amdgpu_device pointer
876  *
877  * Does any asic specific work and then calls atom asic init.
878  */
879 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
880 {
881 	uint32_t flags;
882 	bool optional;
883 	int ret;
884 
885 	amdgpu_asic_pre_asic_init(adev);
886 	flags = amdgpu_device_get_vbios_flags(adev);
887 	optional = !!(flags & (AMDGPU_VBIOS_OPTIONAL | AMDGPU_VBIOS_SKIP));
888 
889 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
890 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
891 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0) ||
892 	    amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
893 		amdgpu_psp_wait_for_bootloader(adev);
894 		if (optional && !adev->bios)
895 			return 0;
896 
897 		ret = amdgpu_atomfirmware_asic_init(adev, true);
898 		return ret;
899 	} else {
900 		if (optional && !adev->bios)
901 			return 0;
902 
903 		return amdgpu_atom_asic_init(adev->mode_info.atom_context);
904 	}
905 
906 	return 0;
907 }
908 
909 /**
910  * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
911  *
912  * @adev: amdgpu_device pointer
913  *
914  * Allocates a scratch page of VRAM for use by various things in the
915  * driver.
916  */
917 static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
918 {
919 	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
920 				       AMDGPU_GEM_DOMAIN_VRAM |
921 				       AMDGPU_GEM_DOMAIN_GTT,
922 				       &adev->mem_scratch.robj,
923 				       &adev->mem_scratch.gpu_addr,
924 				       (void **)&adev->mem_scratch.ptr);
925 }
926 
927 /**
928  * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
929  *
930  * @adev: amdgpu_device pointer
931  *
932  * Frees the VRAM scratch page.
933  */
934 static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
935 {
936 	amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
937 }
938 
939 /**
940  * amdgpu_device_program_register_sequence - program an array of registers.
941  *
942  * @adev: amdgpu_device pointer
943  * @registers: pointer to the register array
944  * @array_size: size of the register array
945  *
946  * Programs an array or registers with and or masks.
947  * This is a helper for setting golden registers.
948  */
949 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
950 					     const u32 *registers,
951 					     const u32 array_size)
952 {
953 	u32 tmp, reg, and_mask, or_mask;
954 	int i;
955 
956 	if (array_size % 3)
957 		return;
958 
959 	for (i = 0; i < array_size; i += 3) {
960 		reg = registers[i + 0];
961 		and_mask = registers[i + 1];
962 		or_mask = registers[i + 2];
963 
964 		if (and_mask == 0xffffffff) {
965 			tmp = or_mask;
966 		} else {
967 			tmp = RREG32(reg);
968 			tmp &= ~and_mask;
969 			if (adev->family >= AMDGPU_FAMILY_AI)
970 				tmp |= (or_mask & and_mask);
971 			else
972 				tmp |= or_mask;
973 		}
974 		WREG32(reg, tmp);
975 	}
976 }
977 
978 /**
979  * amdgpu_device_pci_config_reset - reset the GPU
980  *
981  * @adev: amdgpu_device pointer
982  *
983  * Resets the GPU using the pci config reset sequence.
984  * Only applicable to asics prior to vega10.
985  */
986 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
987 {
988 	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
989 }
990 
991 /**
992  * amdgpu_device_pci_reset - reset the GPU using generic PCI means
993  *
994  * @adev: amdgpu_device pointer
995  *
996  * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
997  */
998 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
999 {
1000 	return pci_reset_function(adev->pdev);
1001 }
1002 
1003 /*
1004  * amdgpu_device_wb_*()
1005  * Writeback is the method by which the GPU updates special pages in memory
1006  * with the status of certain GPU events (fences, ring pointers,etc.).
1007  */
1008 
1009 /**
1010  * amdgpu_device_wb_fini - Disable Writeback and free memory
1011  *
1012  * @adev: amdgpu_device pointer
1013  *
1014  * Disables Writeback and frees the Writeback memory (all asics).
1015  * Used at driver shutdown.
1016  */
1017 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1018 {
1019 	if (adev->wb.wb_obj) {
1020 		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1021 				      &adev->wb.gpu_addr,
1022 				      (void **)&adev->wb.wb);
1023 		adev->wb.wb_obj = NULL;
1024 	}
1025 }
1026 
1027 /**
1028  * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1029  *
1030  * @adev: amdgpu_device pointer
1031  *
1032  * Initializes writeback and allocates writeback memory (all asics).
1033  * Used at driver startup.
1034  * Returns 0 on success or an -error on failure.
1035  */
1036 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1037 {
1038 	int r;
1039 
1040 	if (adev->wb.wb_obj == NULL) {
1041 		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1042 		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1043 					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1044 					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
1045 					    (void **)&adev->wb.wb);
1046 		if (r) {
1047 			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1048 			return r;
1049 		}
1050 
1051 		adev->wb.num_wb = AMDGPU_MAX_WB;
1052 		memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1053 
1054 		/* clear wb memory */
1055 		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1056 	}
1057 
1058 	return 0;
1059 }
1060 
1061 /**
1062  * amdgpu_device_wb_get - Allocate a wb entry
1063  *
1064  * @adev: amdgpu_device pointer
1065  * @wb: wb index
1066  *
1067  * Allocate a wb slot for use by the driver (all asics).
1068  * Returns 0 on success or -EINVAL on failure.
1069  */
1070 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1071 {
1072 	unsigned long flags, offset;
1073 
1074 	spin_lock_irqsave(&adev->wb.lock, flags);
1075 	offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1076 	if (offset < adev->wb.num_wb) {
1077 		__set_bit(offset, adev->wb.used);
1078 		spin_unlock_irqrestore(&adev->wb.lock, flags);
1079 		*wb = offset << 3; /* convert to dw offset */
1080 		return 0;
1081 	} else {
1082 		spin_unlock_irqrestore(&adev->wb.lock, flags);
1083 		return -EINVAL;
1084 	}
1085 }
1086 
1087 /**
1088  * amdgpu_device_wb_free - Free a wb entry
1089  *
1090  * @adev: amdgpu_device pointer
1091  * @wb: wb index
1092  *
1093  * Free a wb slot allocated for use by the driver (all asics)
1094  */
1095 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1096 {
1097 	unsigned long flags;
1098 
1099 	wb >>= 3;
1100 	spin_lock_irqsave(&adev->wb.lock, flags);
1101 	if (wb < adev->wb.num_wb)
1102 		__clear_bit(wb, adev->wb.used);
1103 	spin_unlock_irqrestore(&adev->wb.lock, flags);
1104 }
1105 
1106 /**
1107  * amdgpu_device_resize_fb_bar - try to resize FB BAR
1108  *
1109  * @adev: amdgpu_device pointer
1110  *
1111  * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1112  * to fail, but if any of the BARs is not accessible after the size we abort
1113  * driver loading by returning -ENODEV.
1114  */
1115 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1116 {
1117 	int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1118 	struct pci_bus *root;
1119 	struct resource *res;
1120 	int max_size, r;
1121 	unsigned int i;
1122 	u16 cmd;
1123 
1124 	if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
1125 		return 0;
1126 
1127 	/* Bypass for VF */
1128 	if (amdgpu_sriov_vf(adev))
1129 		return 0;
1130 
1131 	if (!amdgpu_rebar)
1132 		return 0;
1133 
1134 	/* resizing on Dell G5 SE platforms causes problems with runtime pm */
1135 	if ((amdgpu_runtime_pm != 0) &&
1136 	    adev->pdev->vendor == PCI_VENDOR_ID_ATI &&
1137 	    adev->pdev->device == 0x731f &&
1138 	    adev->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
1139 		return 0;
1140 
1141 	/* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
1142 	if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
1143 		dev_warn(
1144 			adev->dev,
1145 			"System can't access extended configuration space, please check!!\n");
1146 
1147 	/* skip if the bios has already enabled large BAR */
1148 	if (adev->gmc.real_vram_size &&
1149 	    (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1150 		return 0;
1151 
1152 	/* Check if the root BUS has 64bit memory resources */
1153 	root = adev->pdev->bus;
1154 	while (root->parent)
1155 		root = root->parent;
1156 
1157 	pci_bus_for_each_resource(root, res, i) {
1158 		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1159 		    res->start > 0x100000000ull)
1160 			break;
1161 	}
1162 
1163 	/* Trying to resize is pointless without a root hub window above 4GB */
1164 	if (!res)
1165 		return 0;
1166 
1167 	/* Limit the BAR size to what is available */
1168 	max_size = pci_rebar_get_max_size(adev->pdev, 0);
1169 	if (max_size < 0)
1170 		return 0;
1171 	rbar_size = min(max_size, rbar_size);
1172 
1173 	/* Disable memory decoding while we change the BAR addresses and size */
1174 	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1175 	pci_write_config_word(adev->pdev, PCI_COMMAND,
1176 			      cmd & ~PCI_COMMAND_MEMORY);
1177 
1178 	/* Tear down doorbell as resizing will release BARs */
1179 	amdgpu_doorbell_fini(adev);
1180 
1181 	r = pci_resize_resource(adev->pdev, 0, rbar_size,
1182 				(adev->asic_type >= CHIP_BONAIRE) ? 1 << 5
1183 								  : 1 << 2);
1184 	if (r == -ENOSPC)
1185 		dev_info(adev->dev,
1186 			 "Not enough PCI address space for a large BAR.");
1187 	else if (r && r != -ENOTSUPP)
1188 		dev_err(adev->dev, "Problem resizing BAR0 (%d).", r);
1189 
1190 	/* When the doorbell or fb BAR isn't available we have no chance of
1191 	 * using the device.
1192 	 */
1193 	r = amdgpu_doorbell_init(adev);
1194 	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1195 		return -ENODEV;
1196 
1197 	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1198 
1199 	return 0;
1200 }
1201 
1202 /*
1203  * GPU helpers function.
1204  */
1205 /**
1206  * amdgpu_device_need_post - check if the hw need post or not
1207  *
1208  * @adev: amdgpu_device pointer
1209  *
1210  * Check if the asic has been initialized (all asics) at driver startup
1211  * or post is needed if  hw reset is performed.
1212  * Returns true if need or false if not.
1213  */
1214 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1215 {
1216 	uint32_t reg, flags;
1217 
1218 	if (amdgpu_sriov_vf(adev))
1219 		return false;
1220 
1221 	flags = amdgpu_device_get_vbios_flags(adev);
1222 	if (flags & AMDGPU_VBIOS_SKIP)
1223 		return false;
1224 	if ((flags & AMDGPU_VBIOS_OPTIONAL) && !adev->bios)
1225 		return false;
1226 
1227 	if (amdgpu_passthrough(adev)) {
1228 		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1229 		 * some old smc fw still need driver do vPost otherwise gpu hang, while
1230 		 * those smc fw version above 22.15 doesn't have this flaw, so we force
1231 		 * vpost executed for smc version below 22.15
1232 		 */
1233 		if (adev->asic_type == CHIP_FIJI) {
1234 			int err;
1235 			uint32_t fw_ver;
1236 
1237 			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1238 			/* force vPost if error occurred */
1239 			if (err)
1240 				return true;
1241 
1242 			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1243 			release_firmware(adev->pm.fw);
1244 			if (fw_ver < 0x00160e00)
1245 				return true;
1246 		}
1247 	}
1248 
1249 	/* Don't post if we need to reset whole hive on init */
1250 	if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
1251 		return false;
1252 
1253 	if (adev->has_hw_reset) {
1254 		adev->has_hw_reset = false;
1255 		return true;
1256 	}
1257 
1258 	/* bios scratch used on CIK+ */
1259 	if (adev->asic_type >= CHIP_BONAIRE)
1260 		return amdgpu_atombios_scratch_need_asic_init(adev);
1261 
1262 	/* check MEM_SIZE for older asics */
1263 	reg = amdgpu_asic_get_config_memsize(adev);
1264 
1265 	if ((reg != 0) && (reg != 0xffffffff))
1266 		return false;
1267 
1268 	return true;
1269 }
1270 
1271 /*
1272  * Check whether seamless boot is supported.
1273  *
1274  * So far we only support seamless boot on DCE 3.0 or later.
1275  * If users report that it works on older ASICS as well, we may
1276  * loosen this.
1277  */
1278 bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev)
1279 {
1280 	switch (amdgpu_seamless) {
1281 	case -1:
1282 		break;
1283 	case 1:
1284 		return true;
1285 	case 0:
1286 		return false;
1287 	default:
1288 		dev_err(adev->dev, "Invalid value for amdgpu.seamless: %d\n",
1289 			amdgpu_seamless);
1290 		return false;
1291 	}
1292 
1293 	if (!(adev->flags & AMD_IS_APU))
1294 		return false;
1295 
1296 	if (adev->mman.keep_stolen_vga_memory)
1297 		return false;
1298 
1299 	return amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0);
1300 }
1301 
1302 /*
1303  * Intel hosts such as Rocket Lake, Alder Lake, Raptor Lake and Sapphire Rapids
1304  * don't support dynamic speed switching. Until we have confirmation from Intel
1305  * that a specific host supports it, it's safer that we keep it disabled for all.
1306  *
1307  * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
1308  * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
1309  */
1310 static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device *adev)
1311 {
1312 #if IS_ENABLED(CONFIG_X86)
1313 	struct cpuinfo_x86 *c = &cpu_data(0);
1314 
1315 	/* eGPU change speeds based on USB4 fabric conditions */
1316 	if (dev_is_removable(adev->dev))
1317 		return true;
1318 
1319 	if (c->x86_vendor == X86_VENDOR_INTEL)
1320 		return false;
1321 #endif
1322 	return true;
1323 }
1324 
1325 static bool amdgpu_device_aspm_support_quirk(struct amdgpu_device *adev)
1326 {
1327 	/* Enabling ASPM causes randoms hangs on Tahiti and Oland on Zen4.
1328 	 * It's unclear if this is a platform-specific or GPU-specific issue.
1329 	 * Disable ASPM on SI for the time being.
1330 	 */
1331 	if (adev->family == AMDGPU_FAMILY_SI)
1332 		return true;
1333 
1334 #if IS_ENABLED(CONFIG_X86)
1335 	struct cpuinfo_x86 *c = &cpu_data(0);
1336 
1337 	if (c->x86_vendor == X86_VENDOR_INTEL) {
1338 		switch (c->x86_model) {
1339 		case VFM_MODEL(INTEL_ALDERLAKE):
1340 		case VFM_MODEL(INTEL_ALDERLAKE_L):
1341 		case VFM_MODEL(INTEL_RAPTORLAKE):
1342 		case VFM_MODEL(INTEL_RAPTORLAKE_P):
1343 		case VFM_MODEL(INTEL_RAPTORLAKE_S):
1344 		case VFM_MODEL(INTEL_TIGERLAKE):
1345 		case VFM_MODEL(INTEL_TIGERLAKE_L):
1346 			return true;
1347 		default:
1348 			return false;
1349 		}
1350 	} else {
1351 		return false;
1352 	}
1353 #else
1354 	return false;
1355 #endif
1356 }
1357 
1358 /**
1359  * amdgpu_device_should_use_aspm - check if the device should program ASPM
1360  *
1361  * @adev: amdgpu_device pointer
1362  *
1363  * Confirm whether the module parameter and pcie bridge agree that ASPM should
1364  * be set for this device.
1365  *
1366  * Returns true if it should be used or false if not.
1367  */
1368 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1369 {
1370 	switch (amdgpu_aspm) {
1371 	case -1:
1372 		break;
1373 	case 0:
1374 		return false;
1375 	case 1:
1376 		return true;
1377 	default:
1378 		return false;
1379 	}
1380 	if (adev->flags & AMD_IS_APU)
1381 		return false;
1382 	if (amdgpu_device_aspm_support_quirk(adev))
1383 		return false;
1384 	return pcie_aspm_enabled(adev->pdev);
1385 }
1386 
1387 /* if we get transitioned to only one device, take VGA back */
1388 /**
1389  * amdgpu_device_vga_set_decode - enable/disable vga decode
1390  *
1391  * @pdev: PCI device pointer
1392  * @state: enable/disable vga decode
1393  *
1394  * Enable/disable vga decode (all asics).
1395  * Returns VGA resource flags.
1396  */
1397 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1398 		bool state)
1399 {
1400 	struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1401 
1402 	amdgpu_asic_set_vga_state(adev, state);
1403 	if (state)
1404 		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1405 		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1406 	else
1407 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1408 }
1409 
1410 /**
1411  * amdgpu_device_check_block_size - validate the vm block size
1412  *
1413  * @adev: amdgpu_device pointer
1414  *
1415  * Validates the vm block size specified via module parameter.
1416  * The vm block size defines number of bits in page table versus page directory,
1417  * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1418  * page table and the remaining bits are in the page directory.
1419  */
1420 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1421 {
1422 	/* defines number of bits in page table versus page directory,
1423 	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1424 	 * page table and the remaining bits are in the page directory
1425 	 */
1426 	if (amdgpu_vm_block_size == -1)
1427 		return;
1428 
1429 	if (amdgpu_vm_block_size < 9) {
1430 		dev_warn(adev->dev, "VM page table size (%d) too small\n",
1431 			 amdgpu_vm_block_size);
1432 		amdgpu_vm_block_size = -1;
1433 	}
1434 }
1435 
1436 /**
1437  * amdgpu_device_check_vm_size - validate the vm size
1438  *
1439  * @adev: amdgpu_device pointer
1440  *
1441  * Validates the vm size in GB specified via module parameter.
1442  * The VM size is the size of the GPU virtual memory space in GB.
1443  */
1444 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1445 {
1446 	/* no need to check the default value */
1447 	if (amdgpu_vm_size == -1)
1448 		return;
1449 
1450 	if (amdgpu_vm_size < 1) {
1451 		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1452 			 amdgpu_vm_size);
1453 		amdgpu_vm_size = -1;
1454 	}
1455 }
1456 
1457 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1458 {
1459 	struct sysinfo si;
1460 	bool is_os_64 = (sizeof(void *) == 8);
1461 	uint64_t total_memory;
1462 	uint64_t dram_size_seven_GB = 0x1B8000000;
1463 	uint64_t dram_size_three_GB = 0xB8000000;
1464 
1465 	if (amdgpu_smu_memory_pool_size == 0)
1466 		return;
1467 
1468 	if (!is_os_64) {
1469 		dev_warn(adev->dev, "Not 64-bit OS, feature not supported\n");
1470 		goto def_value;
1471 	}
1472 	si_meminfo(&si);
1473 	total_memory = (uint64_t)si.totalram * si.mem_unit;
1474 
1475 	if ((amdgpu_smu_memory_pool_size == 1) ||
1476 		(amdgpu_smu_memory_pool_size == 2)) {
1477 		if (total_memory < dram_size_three_GB)
1478 			goto def_value1;
1479 	} else if ((amdgpu_smu_memory_pool_size == 4) ||
1480 		(amdgpu_smu_memory_pool_size == 8)) {
1481 		if (total_memory < dram_size_seven_GB)
1482 			goto def_value1;
1483 	} else {
1484 		dev_warn(adev->dev, "Smu memory pool size not supported\n");
1485 		goto def_value;
1486 	}
1487 	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1488 
1489 	return;
1490 
1491 def_value1:
1492 	dev_warn(adev->dev, "No enough system memory\n");
1493 def_value:
1494 	adev->pm.smu_prv_buffer_size = 0;
1495 }
1496 
1497 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1498 {
1499 	if (!(adev->flags & AMD_IS_APU) ||
1500 	    adev->asic_type < CHIP_RAVEN)
1501 		return 0;
1502 
1503 	switch (adev->asic_type) {
1504 	case CHIP_RAVEN:
1505 		if (adev->pdev->device == 0x15dd)
1506 			adev->apu_flags |= AMD_APU_IS_RAVEN;
1507 		if (adev->pdev->device == 0x15d8)
1508 			adev->apu_flags |= AMD_APU_IS_PICASSO;
1509 		break;
1510 	case CHIP_RENOIR:
1511 		if ((adev->pdev->device == 0x1636) ||
1512 		    (adev->pdev->device == 0x164c))
1513 			adev->apu_flags |= AMD_APU_IS_RENOIR;
1514 		else
1515 			adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1516 		break;
1517 	case CHIP_VANGOGH:
1518 		adev->apu_flags |= AMD_APU_IS_VANGOGH;
1519 		break;
1520 	case CHIP_YELLOW_CARP:
1521 		break;
1522 	case CHIP_CYAN_SKILLFISH:
1523 		if ((adev->pdev->device == 0x13FE) ||
1524 		    (adev->pdev->device == 0x143F))
1525 			adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1526 		break;
1527 	default:
1528 		break;
1529 	}
1530 
1531 	return 0;
1532 }
1533 
1534 /**
1535  * amdgpu_device_check_arguments - validate module params
1536  *
1537  * @adev: amdgpu_device pointer
1538  *
1539  * Validates certain module parameters and updates
1540  * the associated values used by the driver (all asics).
1541  */
1542 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1543 {
1544 	int i;
1545 
1546 	if (amdgpu_sched_jobs < 4) {
1547 		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1548 			 amdgpu_sched_jobs);
1549 		amdgpu_sched_jobs = 4;
1550 	} else if (!is_power_of_2(amdgpu_sched_jobs)) {
1551 		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1552 			 amdgpu_sched_jobs);
1553 		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1554 	}
1555 
1556 	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1557 		/* gart size must be greater or equal to 32M */
1558 		dev_warn(adev->dev, "gart size (%d) too small\n",
1559 			 amdgpu_gart_size);
1560 		amdgpu_gart_size = -1;
1561 	}
1562 
1563 	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1564 		/* gtt size must be greater or equal to 32M */
1565 		dev_warn(adev->dev, "gtt size (%d) too small\n",
1566 				 amdgpu_gtt_size);
1567 		amdgpu_gtt_size = -1;
1568 	}
1569 
1570 	/* valid range is between 4 and 9 inclusive */
1571 	if (amdgpu_vm_fragment_size != -1 &&
1572 	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1573 		dev_warn(adev->dev, "valid range is between 4 and 9\n");
1574 		amdgpu_vm_fragment_size = -1;
1575 	}
1576 
1577 	if (amdgpu_sched_hw_submission < 2) {
1578 		dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1579 			 amdgpu_sched_hw_submission);
1580 		amdgpu_sched_hw_submission = 2;
1581 	} else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1582 		dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1583 			 amdgpu_sched_hw_submission);
1584 		amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1585 	}
1586 
1587 	if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1588 		dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1589 		amdgpu_reset_method = -1;
1590 	}
1591 
1592 	amdgpu_device_check_smu_prv_buffer_size(adev);
1593 
1594 	amdgpu_device_check_vm_size(adev);
1595 
1596 	amdgpu_device_check_block_size(adev);
1597 
1598 	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1599 
1600 	for (i = 0; i < MAX_XCP; i++) {
1601 		switch (amdgpu_enforce_isolation) {
1602 		case -1:
1603 		case 0:
1604 		default:
1605 			/* disable */
1606 			adev->enforce_isolation[i] = AMDGPU_ENFORCE_ISOLATION_DISABLE;
1607 			break;
1608 		case 1:
1609 			/* enable */
1610 			adev->enforce_isolation[i] =
1611 				AMDGPU_ENFORCE_ISOLATION_ENABLE;
1612 			break;
1613 		case 2:
1614 			/* enable legacy mode */
1615 			adev->enforce_isolation[i] =
1616 				AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY;
1617 			break;
1618 		case 3:
1619 			/* enable only process isolation without submitting cleaner shader */
1620 			adev->enforce_isolation[i] =
1621 				AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER;
1622 			break;
1623 		}
1624 	}
1625 
1626 	return 0;
1627 }
1628 
1629 /**
1630  * amdgpu_switcheroo_set_state - set switcheroo state
1631  *
1632  * @pdev: pci dev pointer
1633  * @state: vga_switcheroo state
1634  *
1635  * Callback for the switcheroo driver.  Suspends or resumes
1636  * the asics before or after it is powered up using ACPI methods.
1637  */
1638 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1639 					enum vga_switcheroo_state state)
1640 {
1641 	struct drm_device *dev = pci_get_drvdata(pdev);
1642 	int r;
1643 
1644 	if (amdgpu_device_supports_px(drm_to_adev(dev)) &&
1645 	    state == VGA_SWITCHEROO_OFF)
1646 		return;
1647 
1648 	if (state == VGA_SWITCHEROO_ON) {
1649 		pr_info("switched on\n");
1650 		/* don't suspend or resume card normally */
1651 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1652 
1653 		pci_set_power_state(pdev, PCI_D0);
1654 		amdgpu_device_load_pci_state(pdev);
1655 		r = pci_enable_device(pdev);
1656 		if (r)
1657 			dev_warn(&pdev->dev, "pci_enable_device failed (%d)\n",
1658 				 r);
1659 		amdgpu_device_resume(dev, true);
1660 
1661 		dev->switch_power_state = DRM_SWITCH_POWER_ON;
1662 	} else {
1663 		dev_info(&pdev->dev, "switched off\n");
1664 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1665 		amdgpu_device_prepare(dev);
1666 		amdgpu_device_suspend(dev, true);
1667 		amdgpu_device_cache_pci_state(pdev);
1668 		/* Shut down the device */
1669 		pci_disable_device(pdev);
1670 		pci_set_power_state(pdev, PCI_D3cold);
1671 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1672 	}
1673 }
1674 
1675 /**
1676  * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1677  *
1678  * @pdev: pci dev pointer
1679  *
1680  * Callback for the switcheroo driver.  Check of the switcheroo
1681  * state can be changed.
1682  * Returns true if the state can be changed, false if not.
1683  */
1684 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1685 {
1686 	struct drm_device *dev = pci_get_drvdata(pdev);
1687 
1688        /*
1689 	* FIXME: open_count is protected by drm_global_mutex but that would lead to
1690 	* locking inversion with the driver load path. And the access here is
1691 	* completely racy anyway. So don't bother with locking for now.
1692 	*/
1693 	return atomic_read(&dev->open_count) == 0;
1694 }
1695 
1696 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1697 	.set_gpu_state = amdgpu_switcheroo_set_state,
1698 	.reprobe = NULL,
1699 	.can_switch = amdgpu_switcheroo_can_switch,
1700 };
1701 
1702 /**
1703  * amdgpu_device_enable_virtual_display - enable virtual display feature
1704  *
1705  * @adev: amdgpu_device pointer
1706  *
1707  * Enabled the virtual display feature if the user has enabled it via
1708  * the module parameter virtual_display.  This feature provides a virtual
1709  * display hardware on headless boards or in virtualized environments.
1710  * This function parses and validates the configuration string specified by
1711  * the user and configures the virtual display configuration (number of
1712  * virtual connectors, crtcs, etc.) specified.
1713  */
1714 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1715 {
1716 	adev->enable_virtual_display = false;
1717 
1718 	if (amdgpu_virtual_display) {
1719 		const char *pci_address_name = pci_name(adev->pdev);
1720 		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1721 
1722 		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1723 		pciaddstr_tmp = pciaddstr;
1724 		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1725 			pciaddname = strsep(&pciaddname_tmp, ",");
1726 			if (!strcmp("all", pciaddname)
1727 			    || !strcmp(pci_address_name, pciaddname)) {
1728 				long num_crtc;
1729 				int res = -1;
1730 
1731 				adev->enable_virtual_display = true;
1732 
1733 				if (pciaddname_tmp)
1734 					res = kstrtol(pciaddname_tmp, 10,
1735 						      &num_crtc);
1736 
1737 				if (!res) {
1738 					if (num_crtc < 1)
1739 						num_crtc = 1;
1740 					if (num_crtc > 6)
1741 						num_crtc = 6;
1742 					adev->mode_info.num_crtc = num_crtc;
1743 				} else {
1744 					adev->mode_info.num_crtc = 1;
1745 				}
1746 				break;
1747 			}
1748 		}
1749 
1750 		dev_info(
1751 			adev->dev,
1752 			"virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1753 			amdgpu_virtual_display, pci_address_name,
1754 			adev->enable_virtual_display, adev->mode_info.num_crtc);
1755 
1756 		kfree(pciaddstr);
1757 	}
1758 }
1759 
1760 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
1761 {
1762 	if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
1763 		adev->mode_info.num_crtc = 1;
1764 		adev->enable_virtual_display = true;
1765 		dev_info(adev->dev, "virtual_display:%d, num_crtc:%d\n",
1766 			 adev->enable_virtual_display,
1767 			 adev->mode_info.num_crtc);
1768 	}
1769 }
1770 
1771 /**
1772  * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1773  *
1774  * @adev: amdgpu_device pointer
1775  *
1776  * Parses the asic configuration parameters specified in the gpu info
1777  * firmware and makes them available to the driver for use in configuring
1778  * the asic.
1779  * Returns 0 on success, -EINVAL on failure.
1780  */
1781 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1782 {
1783 	const char *chip_name;
1784 	int err;
1785 	const struct gpu_info_firmware_header_v1_0 *hdr;
1786 
1787 	adev->firmware.gpu_info_fw = NULL;
1788 
1789 	switch (adev->asic_type) {
1790 	default:
1791 		return 0;
1792 	case CHIP_VEGA10:
1793 		chip_name = "vega10";
1794 		break;
1795 	case CHIP_VEGA12:
1796 		chip_name = "vega12";
1797 		break;
1798 	case CHIP_RAVEN:
1799 		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1800 			chip_name = "raven2";
1801 		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1802 			chip_name = "picasso";
1803 		else
1804 			chip_name = "raven";
1805 		break;
1806 	case CHIP_ARCTURUS:
1807 		chip_name = "arcturus";
1808 		break;
1809 	case CHIP_NAVI12:
1810 		if (adev->discovery.bin)
1811 			return 0;
1812 		chip_name = "navi12";
1813 		break;
1814 	case CHIP_CYAN_SKILLFISH:
1815 		if (adev->discovery.bin)
1816 			return 0;
1817 		chip_name = "cyan_skillfish";
1818 		break;
1819 	}
1820 
1821 	err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw,
1822 				   AMDGPU_UCODE_OPTIONAL,
1823 				   "amdgpu/%s_gpu_info.bin", chip_name);
1824 	if (err) {
1825 		dev_err(adev->dev,
1826 			"Failed to get gpu_info firmware \"%s_gpu_info.bin\"\n",
1827 			chip_name);
1828 		goto out;
1829 	}
1830 
1831 	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1832 	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1833 
1834 	switch (hdr->version_major) {
1835 	case 1:
1836 	{
1837 		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1838 			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1839 								le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1840 
1841 		/*
1842 		 * Should be dropped when DAL no longer needs it.
1843 		 */
1844 		if (adev->asic_type == CHIP_NAVI12)
1845 			goto parse_soc_bounding_box;
1846 
1847 		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1848 		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1849 		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1850 		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1851 		adev->gfx.config.max_texture_channel_caches =
1852 			le32_to_cpu(gpu_info_fw->gc_num_tccs);
1853 		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1854 		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1855 		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1856 		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1857 		adev->gfx.config.double_offchip_lds_buf =
1858 			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1859 		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1860 		adev->gfx.cu_info.max_waves_per_simd =
1861 			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1862 		adev->gfx.cu_info.max_scratch_slots_per_cu =
1863 			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1864 		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1865 		if (hdr->version_minor >= 1) {
1866 			const struct gpu_info_firmware_v1_1 *gpu_info_fw =
1867 				(const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
1868 									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1869 			adev->gfx.config.num_sc_per_sh =
1870 				le32_to_cpu(gpu_info_fw->num_sc_per_sh);
1871 			adev->gfx.config.num_packer_per_sc =
1872 				le32_to_cpu(gpu_info_fw->num_packer_per_sc);
1873 		}
1874 
1875 parse_soc_bounding_box:
1876 		/*
1877 		 * soc bounding box info is not integrated in disocovery table,
1878 		 * we always need to parse it from gpu info firmware if needed.
1879 		 */
1880 		if (hdr->version_minor == 2) {
1881 			const struct gpu_info_firmware_v1_2 *gpu_info_fw =
1882 				(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
1883 									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1884 			adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
1885 		}
1886 		break;
1887 	}
1888 	default:
1889 		dev_err(adev->dev,
1890 			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1891 		err = -EINVAL;
1892 		goto out;
1893 	}
1894 out:
1895 	return err;
1896 }
1897 
1898 static void amdgpu_uid_init(struct amdgpu_device *adev)
1899 {
1900 	/* Initialize the UID for the device */
1901 	adev->uid_info = kzalloc_obj(struct amdgpu_uid);
1902 	if (!adev->uid_info) {
1903 		dev_warn(adev->dev, "Failed to allocate memory for UID\n");
1904 		return;
1905 	}
1906 	adev->uid_info->adev = adev;
1907 }
1908 
1909 static void amdgpu_uid_fini(struct amdgpu_device *adev)
1910 {
1911 	/* Free the UID memory */
1912 	kfree(adev->uid_info);
1913 	adev->uid_info = NULL;
1914 }
1915 
1916 /**
1917  * amdgpu_device_ip_early_init - run early init for hardware IPs
1918  *
1919  * @adev: amdgpu_device pointer
1920  *
1921  * Early initialization pass for hardware IPs.  The hardware IPs that make
1922  * up each asic are discovered each IP's early_init callback is run.  This
1923  * is the first stage in initializing the asic.
1924  * Returns 0 on success, negative error code on failure.
1925  */
1926 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1927 {
1928 	struct amdgpu_ip_block *ip_block;
1929 	struct pci_dev *parent;
1930 	bool total, skip_bios;
1931 	uint32_t bios_flags;
1932 	int i, r;
1933 
1934 	amdgpu_device_enable_virtual_display(adev);
1935 
1936 	if (amdgpu_sriov_vf(adev)) {
1937 		r = amdgpu_virt_request_full_gpu(adev, true);
1938 		if (r)
1939 			return r;
1940 
1941 		r = amdgpu_virt_init_critical_region(adev);
1942 		if (r)
1943 			return r;
1944 	}
1945 
1946 	switch (adev->asic_type) {
1947 #ifdef CONFIG_DRM_AMDGPU_SI
1948 	case CHIP_VERDE:
1949 	case CHIP_TAHITI:
1950 	case CHIP_PITCAIRN:
1951 	case CHIP_OLAND:
1952 	case CHIP_HAINAN:
1953 		adev->family = AMDGPU_FAMILY_SI;
1954 		r = si_set_ip_blocks(adev);
1955 		if (r)
1956 			return r;
1957 		break;
1958 #endif
1959 #ifdef CONFIG_DRM_AMDGPU_CIK
1960 	case CHIP_BONAIRE:
1961 	case CHIP_HAWAII:
1962 	case CHIP_KAVERI:
1963 	case CHIP_KABINI:
1964 	case CHIP_MULLINS:
1965 		if (adev->flags & AMD_IS_APU)
1966 			adev->family = AMDGPU_FAMILY_KV;
1967 		else
1968 			adev->family = AMDGPU_FAMILY_CI;
1969 
1970 		r = cik_set_ip_blocks(adev);
1971 		if (r)
1972 			return r;
1973 		break;
1974 #endif
1975 	case CHIP_TOPAZ:
1976 	case CHIP_TONGA:
1977 	case CHIP_FIJI:
1978 	case CHIP_POLARIS10:
1979 	case CHIP_POLARIS11:
1980 	case CHIP_POLARIS12:
1981 	case CHIP_VEGAM:
1982 	case CHIP_CARRIZO:
1983 	case CHIP_STONEY:
1984 		if (adev->flags & AMD_IS_APU)
1985 			adev->family = AMDGPU_FAMILY_CZ;
1986 		else
1987 			adev->family = AMDGPU_FAMILY_VI;
1988 
1989 		r = vi_set_ip_blocks(adev);
1990 		if (r)
1991 			return r;
1992 		break;
1993 	default:
1994 		r = amdgpu_discovery_set_ip_blocks(adev);
1995 		if (r) {
1996 			adev->num_ip_blocks = 0;
1997 			return r;
1998 		}
1999 		break;
2000 	}
2001 
2002 	/* Check for IP version 9.4.3 with A0 hardware */
2003 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
2004 	    !amdgpu_device_get_rev_id(adev)) {
2005 		dev_err(adev->dev, "Unsupported A0 hardware\n");
2006 		return -ENODEV;	/* device unsupported - no device error */
2007 	}
2008 
2009 	if (amdgpu_has_atpx() &&
2010 	    (amdgpu_is_atpx_hybrid() ||
2011 	     amdgpu_has_atpx_dgpu_power_cntl()) &&
2012 	    ((adev->flags & AMD_IS_APU) == 0) &&
2013 	    !dev_is_removable(&adev->pdev->dev))
2014 		adev->flags |= AMD_IS_PX;
2015 
2016 	if (!(adev->flags & AMD_IS_APU)) {
2017 		parent = pcie_find_root_port(adev->pdev);
2018 		adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2019 	}
2020 
2021 	adev->pm.pp_feature = amdgpu_pp_feature_mask;
2022 	if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2023 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2024 	if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2025 		adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2026 	if (!amdgpu_device_pcie_dynamic_switching_supported(adev))
2027 		adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
2028 
2029 	adev->virt.is_xgmi_node_migrate_enabled = false;
2030 	if (amdgpu_sriov_vf(adev)) {
2031 		adev->virt.is_xgmi_node_migrate_enabled =
2032 			amdgpu_ip_version((adev), GC_HWIP, 0) == IP_VERSION(9, 4, 4);
2033 	}
2034 
2035 	total = true;
2036 	for (i = 0; i < adev->num_ip_blocks; i++) {
2037 		ip_block = &adev->ip_blocks[i];
2038 
2039 		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2040 			dev_warn(adev->dev, "disabled ip block: %d <%s>\n", i,
2041 				 adev->ip_blocks[i].version->funcs->name);
2042 			adev->ip_blocks[i].status.valid = false;
2043 		} else if (ip_block->version->funcs->early_init) {
2044 			r = ip_block->version->funcs->early_init(ip_block);
2045 			if (r == -ENOENT) {
2046 				adev->ip_blocks[i].status.valid = false;
2047 			} else if (r) {
2048 				dev_err(adev->dev,
2049 					"early_init of IP block <%s> failed %d\n",
2050 					adev->ip_blocks[i].version->funcs->name,
2051 					r);
2052 				total = false;
2053 			} else {
2054 				adev->ip_blocks[i].status.valid = true;
2055 			}
2056 		} else {
2057 			adev->ip_blocks[i].status.valid = true;
2058 		}
2059 		/* get the vbios after the asic_funcs are set up */
2060 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2061 			r = amdgpu_device_parse_gpu_info_fw(adev);
2062 			if (r)
2063 				return r;
2064 
2065 			bios_flags = amdgpu_device_get_vbios_flags(adev);
2066 			skip_bios = !!(bios_flags & AMDGPU_VBIOS_SKIP);
2067 			/* Read BIOS */
2068 			if (!skip_bios) {
2069 				bool optional =
2070 					!!(bios_flags & AMDGPU_VBIOS_OPTIONAL);
2071 				if (!amdgpu_get_bios(adev) && !optional)
2072 					return -EINVAL;
2073 
2074 				if (optional && !adev->bios)
2075 					dev_info(
2076 						adev->dev,
2077 						"VBIOS image optional, proceeding without VBIOS image");
2078 
2079 				if (adev->bios) {
2080 					r = amdgpu_atombios_init(adev);
2081 					if (r) {
2082 						dev_err(adev->dev,
2083 							"amdgpu_atombios_init failed\n");
2084 						amdgpu_vf_error_put(
2085 							adev,
2086 							AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL,
2087 							0, 0);
2088 						return r;
2089 					}
2090 				}
2091 			}
2092 
2093 			/*get pf2vf msg info at it's earliest time*/
2094 			if (amdgpu_sriov_vf(adev))
2095 				amdgpu_virt_init_data_exchange(adev);
2096 
2097 		}
2098 	}
2099 	if (!total)
2100 		return -ENODEV;
2101 
2102 	if (adev->gmc.xgmi.supported)
2103 		amdgpu_xgmi_early_init(adev);
2104 
2105 	if (amdgpu_is_multi_aid(adev))
2106 		amdgpu_uid_init(adev);
2107 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
2108 	if (ip_block->status.valid != false)
2109 		amdgpu_amdkfd_device_probe(adev);
2110 
2111 	adev->cg_flags &= amdgpu_cg_mask;
2112 	adev->pg_flags &= amdgpu_pg_mask;
2113 
2114 	return 0;
2115 }
2116 
2117 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2118 {
2119 	int i, r;
2120 
2121 	for (i = 0; i < adev->num_ip_blocks; i++) {
2122 		if (!adev->ip_blocks[i].status.sw)
2123 			continue;
2124 		if (adev->ip_blocks[i].status.hw)
2125 			continue;
2126 		if (!amdgpu_ip_member_of_hwini(
2127 			    adev, adev->ip_blocks[i].version->type))
2128 			continue;
2129 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2130 		    (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2131 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2132 			r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
2133 			if (r) {
2134 				dev_err(adev->dev,
2135 					"hw_init of IP block <%s> failed %d\n",
2136 					adev->ip_blocks[i].version->funcs->name,
2137 					r);
2138 				return r;
2139 			}
2140 			adev->ip_blocks[i].status.hw = true;
2141 		}
2142 	}
2143 
2144 	return 0;
2145 }
2146 
2147 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2148 {
2149 	int i, r;
2150 
2151 	for (i = 0; i < adev->num_ip_blocks; i++) {
2152 		if (!adev->ip_blocks[i].status.sw)
2153 			continue;
2154 		if (adev->ip_blocks[i].status.hw)
2155 			continue;
2156 		if (!amdgpu_ip_member_of_hwini(
2157 			    adev, adev->ip_blocks[i].version->type))
2158 			continue;
2159 		r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
2160 		if (r) {
2161 			dev_err(adev->dev,
2162 				"hw_init of IP block <%s> failed %d\n",
2163 				adev->ip_blocks[i].version->funcs->name, r);
2164 			return r;
2165 		}
2166 		adev->ip_blocks[i].status.hw = true;
2167 	}
2168 
2169 	return 0;
2170 }
2171 
2172 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2173 {
2174 	int r = 0;
2175 	int i;
2176 	uint32_t smu_version;
2177 
2178 	if (adev->asic_type >= CHIP_VEGA10) {
2179 		for (i = 0; i < adev->num_ip_blocks; i++) {
2180 			if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2181 				continue;
2182 
2183 			if (!amdgpu_ip_member_of_hwini(adev,
2184 						       AMD_IP_BLOCK_TYPE_PSP))
2185 				break;
2186 
2187 			if (!adev->ip_blocks[i].status.sw)
2188 				continue;
2189 
2190 			/* no need to do the fw loading again if already done*/
2191 			if (adev->ip_blocks[i].status.hw == true)
2192 				break;
2193 
2194 			if (amdgpu_in_reset(adev) || adev->in_suspend) {
2195 				r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
2196 				if (r)
2197 					return r;
2198 			} else {
2199 				r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
2200 				if (r) {
2201 					dev_err(adev->dev,
2202 						"hw_init of IP block <%s> failed %d\n",
2203 						adev->ip_blocks[i]
2204 							.version->funcs->name,
2205 						r);
2206 					return r;
2207 				}
2208 				adev->ip_blocks[i].status.hw = true;
2209 			}
2210 			break;
2211 		}
2212 	}
2213 
2214 	if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2215 		r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2216 
2217 	return r;
2218 }
2219 
2220 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2221 {
2222 	struct drm_sched_init_args args = {
2223 		.ops = &amdgpu_sched_ops,
2224 		.num_rqs = DRM_SCHED_PRIORITY_COUNT,
2225 		.timeout_wq = adev->reset_domain->wq,
2226 		.dev = adev->dev,
2227 	};
2228 	long timeout;
2229 	int r, i;
2230 
2231 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2232 		struct amdgpu_ring *ring = adev->rings[i];
2233 
2234 		/* No need to setup the GPU scheduler for rings that don't need it */
2235 		if (!ring || ring->no_scheduler)
2236 			continue;
2237 
2238 		switch (ring->funcs->type) {
2239 		case AMDGPU_RING_TYPE_GFX:
2240 			timeout = adev->gfx_timeout;
2241 			break;
2242 		case AMDGPU_RING_TYPE_COMPUTE:
2243 			timeout = adev->compute_timeout;
2244 			break;
2245 		case AMDGPU_RING_TYPE_SDMA:
2246 			timeout = adev->sdma_timeout;
2247 			break;
2248 		default:
2249 			timeout = adev->video_timeout;
2250 			break;
2251 		}
2252 
2253 		args.timeout = timeout;
2254 		args.credit_limit = ring->num_hw_submission;
2255 		args.score = ring->sched_score;
2256 		args.name = ring->name;
2257 
2258 		r = drm_sched_init(&ring->sched, &args);
2259 		if (r) {
2260 			dev_err(adev->dev,
2261 				"Failed to create scheduler on ring %s.\n",
2262 				ring->name);
2263 			return r;
2264 		}
2265 		r = amdgpu_uvd_entity_init(adev, ring);
2266 		if (r) {
2267 			dev_err(adev->dev,
2268 				"Failed to create UVD scheduling entity on ring %s.\n",
2269 				ring->name);
2270 			return r;
2271 		}
2272 		r = amdgpu_vce_entity_init(adev, ring);
2273 		if (r) {
2274 			dev_err(adev->dev,
2275 				"Failed to create VCE scheduling entity on ring %s.\n",
2276 				ring->name);
2277 			return r;
2278 		}
2279 	}
2280 
2281 	if (adev->xcp_mgr)
2282 		amdgpu_xcp_update_partition_sched_list(adev);
2283 
2284 	return 0;
2285 }
2286 
2287 
2288 /**
2289  * amdgpu_device_ip_init - run init for hardware IPs
2290  *
2291  * @adev: amdgpu_device pointer
2292  *
2293  * Main initialization pass for hardware IPs.  The list of all the hardware
2294  * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2295  * are run.  sw_init initializes the software state associated with each IP
2296  * and hw_init initializes the hardware associated with each IP.
2297  * Returns 0 on success, negative error code on failure.
2298  */
2299 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2300 {
2301 	bool init_badpage;
2302 	int i, r;
2303 
2304 	r = amdgpu_ras_init(adev);
2305 	if (r)
2306 		return r;
2307 
2308 	for (i = 0; i < adev->num_ip_blocks; i++) {
2309 		if (!adev->ip_blocks[i].status.valid)
2310 			continue;
2311 		if (adev->ip_blocks[i].version->funcs->sw_init) {
2312 			r = adev->ip_blocks[i].version->funcs->sw_init(&adev->ip_blocks[i]);
2313 			if (r) {
2314 				dev_err(adev->dev,
2315 					"sw_init of IP block <%s> failed %d\n",
2316 					adev->ip_blocks[i].version->funcs->name,
2317 					r);
2318 				goto init_failed;
2319 			}
2320 		}
2321 		adev->ip_blocks[i].status.sw = true;
2322 
2323 		if (!amdgpu_ip_member_of_hwini(
2324 			    adev, adev->ip_blocks[i].version->type))
2325 			continue;
2326 
2327 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2328 			/* need to do common hw init early so everything is set up for gmc */
2329 			r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
2330 			if (r) {
2331 				dev_err(adev->dev, "hw_init %d failed %d\n", i,
2332 					r);
2333 				goto init_failed;
2334 			}
2335 			adev->ip_blocks[i].status.hw = true;
2336 		} else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2337 			/* need to do gmc hw init early so we can allocate gpu mem */
2338 			/* Try to reserve bad pages early */
2339 			if (amdgpu_sriov_vf(adev))
2340 				amdgpu_virt_exchange_data(adev);
2341 
2342 			r = amdgpu_device_mem_scratch_init(adev);
2343 			if (r) {
2344 				dev_err(adev->dev,
2345 					"amdgpu_mem_scratch_init failed %d\n",
2346 					r);
2347 				goto init_failed;
2348 			}
2349 			r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
2350 			if (r) {
2351 				dev_err(adev->dev, "hw_init %d failed %d\n", i,
2352 					r);
2353 				goto init_failed;
2354 			}
2355 			r = amdgpu_device_wb_init(adev);
2356 			if (r) {
2357 				dev_err(adev->dev,
2358 					"amdgpu_device_wb_init failed %d\n", r);
2359 				goto init_failed;
2360 			}
2361 			adev->ip_blocks[i].status.hw = true;
2362 
2363 			/* right after GMC hw init, we create CSA */
2364 			if (adev->gfx.mcbp) {
2365 				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2366 							       AMDGPU_GEM_DOMAIN_VRAM |
2367 							       AMDGPU_GEM_DOMAIN_GTT,
2368 							       AMDGPU_CSA_SIZE);
2369 				if (r) {
2370 					dev_err(adev->dev,
2371 						"allocate CSA failed %d\n", r);
2372 					goto init_failed;
2373 				}
2374 			}
2375 
2376 			r = amdgpu_seq64_init(adev);
2377 			if (r) {
2378 				dev_err(adev->dev, "allocate seq64 failed %d\n",
2379 					r);
2380 				goto init_failed;
2381 			}
2382 		}
2383 	}
2384 
2385 	if (amdgpu_sriov_vf(adev))
2386 		amdgpu_virt_init_data_exchange(adev);
2387 
2388 	r = amdgpu_ib_pool_init(adev);
2389 	if (r) {
2390 		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2391 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2392 		goto init_failed;
2393 	}
2394 
2395 	r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2396 	if (r)
2397 		goto init_failed;
2398 
2399 	r = amdgpu_device_ip_hw_init_phase1(adev);
2400 	if (r)
2401 		goto init_failed;
2402 
2403 	r = amdgpu_device_fw_loading(adev);
2404 	if (r)
2405 		goto init_failed;
2406 
2407 	r = amdgpu_device_ip_hw_init_phase2(adev);
2408 	if (r)
2409 		goto init_failed;
2410 
2411 	/*
2412 	 * retired pages will be loaded from eeprom and reserved here,
2413 	 * it should be called after amdgpu_device_ip_hw_init_phase2  since
2414 	 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2415 	 * for I2C communication which only true at this point.
2416 	 *
2417 	 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2418 	 * failure from bad gpu situation and stop amdgpu init process
2419 	 * accordingly. For other failed cases, it will still release all
2420 	 * the resource and print error message, rather than returning one
2421 	 * negative value to upper level.
2422 	 *
2423 	 * Note: theoretically, this should be called before all vram allocations
2424 	 * to protect retired page from abusing
2425 	 */
2426 	init_badpage = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI);
2427 	r = amdgpu_ras_recovery_init(adev, init_badpage);
2428 	if (r)
2429 		goto init_failed;
2430 
2431 	/**
2432 	 * In case of XGMI grab extra reference for reset domain for this device
2433 	 */
2434 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2435 		if (amdgpu_xgmi_add_device(adev) == 0) {
2436 			if (!amdgpu_sriov_vf(adev)) {
2437 				struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2438 
2439 				if (WARN_ON(!hive)) {
2440 					r = -ENOENT;
2441 					goto init_failed;
2442 				}
2443 
2444 				if (!hive->reset_domain ||
2445 				    !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2446 					r = -ENOENT;
2447 					amdgpu_put_xgmi_hive(hive);
2448 					goto init_failed;
2449 				}
2450 
2451 				/* Drop the early temporary reset domain we created for device */
2452 				amdgpu_reset_put_reset_domain(adev->reset_domain);
2453 				adev->reset_domain = hive->reset_domain;
2454 				amdgpu_put_xgmi_hive(hive);
2455 			}
2456 		}
2457 	}
2458 
2459 	r = amdgpu_device_init_schedulers(adev);
2460 	if (r)
2461 		goto init_failed;
2462 
2463 	amdgpu_ttm_set_buffer_funcs_status(adev, true);
2464 
2465 	/* Don't init kfd if whole hive need to be reset during init */
2466 	if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
2467 		amdgpu_amdkfd_device_init(adev);
2468 	}
2469 
2470 	amdgpu_fru_get_product_info(adev);
2471 
2472 	r = amdgpu_cper_init(adev);
2473 
2474 init_failed:
2475 
2476 	return r;
2477 }
2478 
2479 /**
2480  * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2481  *
2482  * @adev: amdgpu_device pointer
2483  *
2484  * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
2485  * this function before a GPU reset.  If the value is retained after a
2486  * GPU reset, VRAM has not been lost. Some GPU resets may destroy VRAM contents.
2487  */
2488 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2489 {
2490 	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2491 }
2492 
2493 /**
2494  * amdgpu_device_check_vram_lost - check if vram is valid
2495  *
2496  * @adev: amdgpu_device pointer
2497  *
2498  * Checks the reset magic value written to the gart pointer in VRAM.
2499  * The driver calls this after a GPU reset to see if the contents of
2500  * VRAM is lost or now.
2501  * returns true if vram is lost, false if not.
2502  */
2503 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2504 {
2505 	if (memcmp(adev->gart.ptr, adev->reset_magic,
2506 			AMDGPU_RESET_MAGIC_NUM))
2507 		return true;
2508 
2509 	if (!amdgpu_in_reset(adev))
2510 		return false;
2511 
2512 	/*
2513 	 * For all ASICs with baco/mode1 reset, the VRAM is
2514 	 * always assumed to be lost.
2515 	 */
2516 	switch (amdgpu_asic_reset_method(adev)) {
2517 	case AMD_RESET_METHOD_LEGACY:
2518 	case AMD_RESET_METHOD_LINK:
2519 	case AMD_RESET_METHOD_BACO:
2520 	case AMD_RESET_METHOD_MODE1:
2521 		return true;
2522 	default:
2523 		return false;
2524 	}
2525 }
2526 
2527 /**
2528  * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2529  *
2530  * @adev: amdgpu_device pointer
2531  * @state: clockgating state (gate or ungate)
2532  *
2533  * The list of all the hardware IPs that make up the asic is walked and the
2534  * set_clockgating_state callbacks are run.
2535  * Late initialization pass enabling clockgating for hardware IPs.
2536  * Fini or suspend, pass disabling clockgating for hardware IPs.
2537  * Returns 0 on success, negative error code on failure.
2538  */
2539 
2540 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2541 			       enum amd_clockgating_state state)
2542 {
2543 	int i, j, r;
2544 
2545 	if (amdgpu_emu_mode == 1)
2546 		return 0;
2547 
2548 	for (j = 0; j < adev->num_ip_blocks; j++) {
2549 		i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2550 		if (!adev->ip_blocks[i].status.late_initialized)
2551 			continue;
2552 		if (!adev->ip_blocks[i].version)
2553 			continue;
2554 		/* skip CG for GFX, SDMA on S0ix */
2555 		if (adev->in_s0ix &&
2556 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2557 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2558 			continue;
2559 		/* skip CG for VCE/UVD, it's handled specially */
2560 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2561 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2562 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2563 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2564 		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2565 			/* enable clockgating to save power */
2566 			r = adev->ip_blocks[i].version->funcs->set_clockgating_state(&adev->ip_blocks[i],
2567 										     state);
2568 			if (r) {
2569 				dev_err(adev->dev,
2570 					"set_clockgating_state(gate) of IP block <%s> failed %d\n",
2571 					adev->ip_blocks[i].version->funcs->name,
2572 					r);
2573 				return r;
2574 			}
2575 		}
2576 	}
2577 
2578 	return 0;
2579 }
2580 
2581 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2582 			       enum amd_powergating_state state)
2583 {
2584 	int i, j, r;
2585 
2586 	if (amdgpu_emu_mode == 1)
2587 		return 0;
2588 
2589 	for (j = 0; j < adev->num_ip_blocks; j++) {
2590 		i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2591 		if (!adev->ip_blocks[i].status.late_initialized)
2592 			continue;
2593 		if (!adev->ip_blocks[i].version)
2594 			continue;
2595 		/* skip PG for GFX, SDMA on S0ix */
2596 		if (adev->in_s0ix &&
2597 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2598 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2599 			continue;
2600 		/* skip CG for VCE/UVD, it's handled specially */
2601 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2602 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2603 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2604 		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2605 		    adev->ip_blocks[i].version->funcs->set_powergating_state) {
2606 			/* enable powergating to save power */
2607 			r = adev->ip_blocks[i].version->funcs->set_powergating_state(&adev->ip_blocks[i],
2608 											state);
2609 			if (r) {
2610 				dev_err(adev->dev,
2611 					"set_powergating_state(gate) of IP block <%s> failed %d\n",
2612 					adev->ip_blocks[i].version->funcs->name,
2613 					r);
2614 				return r;
2615 			}
2616 		}
2617 	}
2618 	return 0;
2619 }
2620 
2621 static int amdgpu_device_enable_mgpu_fan_boost(void)
2622 {
2623 	struct amdgpu_gpu_instance *gpu_ins;
2624 	struct amdgpu_device *adev;
2625 	int i, ret = 0;
2626 
2627 	mutex_lock(&mgpu_info.mutex);
2628 
2629 	/*
2630 	 * MGPU fan boost feature should be enabled
2631 	 * only when there are two or more dGPUs in
2632 	 * the system
2633 	 */
2634 	if (mgpu_info.num_dgpu < 2)
2635 		goto out;
2636 
2637 	for (i = 0; i < mgpu_info.num_dgpu; i++) {
2638 		gpu_ins = &(mgpu_info.gpu_ins[i]);
2639 		adev = gpu_ins->adev;
2640 		if (!(adev->flags & AMD_IS_APU || amdgpu_sriov_multi_vf_mode(adev)) &&
2641 		    !gpu_ins->mgpu_fan_enabled) {
2642 			ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2643 			if (ret)
2644 				break;
2645 
2646 			gpu_ins->mgpu_fan_enabled = 1;
2647 		}
2648 	}
2649 
2650 out:
2651 	mutex_unlock(&mgpu_info.mutex);
2652 
2653 	return ret;
2654 }
2655 
2656 /**
2657  * amdgpu_device_ip_late_init - run late init for hardware IPs
2658  *
2659  * @adev: amdgpu_device pointer
2660  *
2661  * Late initialization pass for hardware IPs.  The list of all the hardware
2662  * IPs that make up the asic is walked and the late_init callbacks are run.
2663  * late_init covers any special initialization that an IP requires
2664  * after all of the have been initialized or something that needs to happen
2665  * late in the init process.
2666  * Returns 0 on success, negative error code on failure.
2667  */
2668 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2669 {
2670 	struct amdgpu_gpu_instance *gpu_instance;
2671 	int i = 0, r;
2672 
2673 	for (i = 0; i < adev->num_ip_blocks; i++) {
2674 		if (!adev->ip_blocks[i].status.hw)
2675 			continue;
2676 		if (adev->ip_blocks[i].version->funcs->late_init) {
2677 			r = adev->ip_blocks[i].version->funcs->late_init(&adev->ip_blocks[i]);
2678 			if (r) {
2679 				dev_err(adev->dev,
2680 					"late_init of IP block <%s> failed %d\n",
2681 					adev->ip_blocks[i].version->funcs->name,
2682 					r);
2683 				return r;
2684 			}
2685 		}
2686 		adev->ip_blocks[i].status.late_initialized = true;
2687 	}
2688 
2689 	r = amdgpu_ras_late_init(adev);
2690 	if (r) {
2691 		dev_err(adev->dev, "amdgpu_ras_late_init failed %d", r);
2692 		return r;
2693 	}
2694 
2695 	if (!amdgpu_reset_in_recovery(adev))
2696 		amdgpu_ras_set_error_query_ready(adev, true);
2697 
2698 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2699 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2700 
2701 	amdgpu_device_fill_reset_magic(adev);
2702 
2703 	r = amdgpu_device_enable_mgpu_fan_boost();
2704 	if (r)
2705 		dev_err(adev->dev, "enable mgpu fan boost failed (%d).\n", r);
2706 
2707 	/* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2708 	if (amdgpu_passthrough(adev) &&
2709 	    ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) ||
2710 	     adev->asic_type == CHIP_ALDEBARAN))
2711 		amdgpu_dpm_handle_passthrough_sbr(adev, true);
2712 
2713 	if (adev->gmc.xgmi.num_physical_nodes > 1) {
2714 		mutex_lock(&mgpu_info.mutex);
2715 
2716 		/*
2717 		 * Reset device p-state to low as this was booted with high.
2718 		 *
2719 		 * This should be performed only after all devices from the same
2720 		 * hive get initialized.
2721 		 *
2722 		 * However, it's unknown how many device in the hive in advance.
2723 		 * As this is counted one by one during devices initializations.
2724 		 *
2725 		 * So, we wait for all XGMI interlinked devices initialized.
2726 		 * This may bring some delays as those devices may come from
2727 		 * different hives. But that should be OK.
2728 		 */
2729 		if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2730 			for (i = 0; i < mgpu_info.num_gpu; i++) {
2731 				gpu_instance = &(mgpu_info.gpu_ins[i]);
2732 				if (gpu_instance->adev->flags & AMD_IS_APU)
2733 					continue;
2734 
2735 				r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2736 						AMDGPU_XGMI_PSTATE_MIN);
2737 				if (r) {
2738 					dev_err(adev->dev,
2739 						"pstate setting failed (%d).\n",
2740 						r);
2741 					break;
2742 				}
2743 			}
2744 		}
2745 
2746 		mutex_unlock(&mgpu_info.mutex);
2747 	}
2748 
2749 	return 0;
2750 }
2751 
2752 static void amdgpu_ip_block_hw_fini(struct amdgpu_ip_block *ip_block)
2753 {
2754 	struct amdgpu_device *adev = ip_block->adev;
2755 	int r;
2756 
2757 	if (!ip_block->version->funcs->hw_fini) {
2758 		dev_err(adev->dev, "hw_fini of IP block <%s> not defined\n",
2759 			ip_block->version->funcs->name);
2760 	} else {
2761 		r = ip_block->version->funcs->hw_fini(ip_block);
2762 		/* XXX handle errors */
2763 		if (r) {
2764 			dev_dbg(adev->dev,
2765 				"hw_fini of IP block <%s> failed %d\n",
2766 				ip_block->version->funcs->name, r);
2767 		}
2768 	}
2769 
2770 	ip_block->status.hw = false;
2771 }
2772 
2773 /**
2774  * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2775  *
2776  * @adev: amdgpu_device pointer
2777  *
2778  * For ASICs need to disable SMC first
2779  */
2780 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2781 {
2782 	int i;
2783 
2784 	if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))
2785 		return;
2786 
2787 	for (i = 0; i < adev->num_ip_blocks; i++) {
2788 		if (!adev->ip_blocks[i].status.hw)
2789 			continue;
2790 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2791 			amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]);
2792 			break;
2793 		}
2794 	}
2795 }
2796 
2797 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2798 {
2799 	int i, r;
2800 
2801 	for (i = 0; i < adev->num_ip_blocks; i++) {
2802 		if (!adev->ip_blocks[i].version)
2803 			continue;
2804 		if (!adev->ip_blocks[i].version->funcs->early_fini)
2805 			continue;
2806 
2807 		r = adev->ip_blocks[i].version->funcs->early_fini(&adev->ip_blocks[i]);
2808 		if (r) {
2809 			dev_dbg(adev->dev,
2810 				"early_fini of IP block <%s> failed %d\n",
2811 				adev->ip_blocks[i].version->funcs->name, r);
2812 		}
2813 	}
2814 
2815 	amdgpu_amdkfd_suspend(adev, true);
2816 	amdgpu_amdkfd_teardown_processes(adev);
2817 	amdgpu_userq_suspend(adev);
2818 
2819 	/* Workaround for ASICs need to disable SMC first */
2820 	amdgpu_device_smu_fini_early(adev);
2821 
2822 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2823 		if (!adev->ip_blocks[i].status.hw)
2824 			continue;
2825 
2826 		amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]);
2827 	}
2828 
2829 	if (amdgpu_sriov_vf(adev)) {
2830 		if (amdgpu_virt_release_full_gpu(adev, false))
2831 			dev_err(adev->dev,
2832 				"failed to release exclusive mode on fini\n");
2833 	}
2834 
2835 	/*
2836 	 * Driver reload on the APU can fail due to firmware validation because
2837 	 * the PSP is always running, as it is shared across the whole SoC.
2838 	 * This same issue does not occur on dGPU because it has a mechanism
2839 	 * that checks whether the PSP is running. A solution for those issues
2840 	 * in the APU is to trigger a GPU reset, but this should be done during
2841 	 * the unload phase to avoid adding boot latency and screen flicker.
2842 	 * GFX V11 has GC block as default off IP. Every time AMDGPU driver sends
2843 	 * a request to PMFW to unload MP1, PMFW will put GC in reset and power down
2844 	 * the voltage. Hence, skipping reset for APUs with GFX V11 or later.
2845 	 */
2846 	if ((adev->flags & AMD_IS_APU) && !adev->gmc.is_app_apu &&
2847 		amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(11, 0, 0)) {
2848 		r = amdgpu_asic_reset(adev);
2849 		if (r)
2850 			dev_err(adev->dev, "asic reset on %s failed\n", __func__);
2851 	}
2852 
2853 	return 0;
2854 }
2855 
2856 /**
2857  * amdgpu_device_ip_fini - run fini for hardware IPs
2858  *
2859  * @adev: amdgpu_device pointer
2860  *
2861  * Main teardown pass for hardware IPs.  The list of all the hardware
2862  * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2863  * are run.  hw_fini tears down the hardware associated with each IP
2864  * and sw_fini tears down any software state associated with each IP.
2865  * Returns 0 on success, negative error code on failure.
2866  */
2867 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2868 {
2869 	int i, r;
2870 
2871 	amdgpu_cper_fini(adev);
2872 
2873 	if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2874 		amdgpu_virt_release_ras_err_handler_data(adev);
2875 
2876 	if (adev->gmc.xgmi.num_physical_nodes > 1)
2877 		amdgpu_xgmi_remove_device(adev);
2878 
2879 	amdgpu_amdkfd_device_fini_sw(adev);
2880 
2881 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2882 		if (!adev->ip_blocks[i].status.sw)
2883 			continue;
2884 
2885 		if (!adev->ip_blocks[i].version)
2886 			continue;
2887 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2888 			amdgpu_ucode_free_bo(adev);
2889 			amdgpu_free_static_csa(&adev->virt.csa_obj);
2890 			amdgpu_device_wb_fini(adev);
2891 			amdgpu_device_mem_scratch_fini(adev);
2892 			amdgpu_ib_pool_fini(adev);
2893 			amdgpu_seq64_fini(adev);
2894 			amdgpu_doorbell_fini(adev);
2895 		}
2896 		if (adev->ip_blocks[i].version->funcs->sw_fini) {
2897 			r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]);
2898 			/* XXX handle errors */
2899 			if (r) {
2900 				dev_dbg(adev->dev,
2901 					"sw_fini of IP block <%s> failed %d\n",
2902 					adev->ip_blocks[i].version->funcs->name,
2903 					r);
2904 			}
2905 		}
2906 		adev->ip_blocks[i].status.sw = false;
2907 		adev->ip_blocks[i].status.valid = false;
2908 	}
2909 
2910 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2911 		if (!adev->ip_blocks[i].status.late_initialized)
2912 			continue;
2913 		if (!adev->ip_blocks[i].version)
2914 			continue;
2915 		if (adev->ip_blocks[i].version->funcs->late_fini)
2916 			adev->ip_blocks[i].version->funcs->late_fini(&adev->ip_blocks[i]);
2917 		adev->ip_blocks[i].status.late_initialized = false;
2918 	}
2919 
2920 	amdgpu_ras_fini(adev);
2921 	amdgpu_uid_fini(adev);
2922 
2923 	return 0;
2924 }
2925 
2926 /**
2927  * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2928  *
2929  * @work: work_struct.
2930  */
2931 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2932 {
2933 	struct amdgpu_device *adev =
2934 		container_of(work, struct amdgpu_device, delayed_init_work.work);
2935 	int r;
2936 
2937 	r = amdgpu_ib_ring_tests(adev);
2938 	if (r)
2939 		dev_err(adev->dev, "ib ring test failed (%d).\n", r);
2940 }
2941 
2942 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2943 {
2944 	struct amdgpu_device *adev =
2945 		container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2946 
2947 	WARN_ON_ONCE(adev->gfx.gfx_off_state);
2948 	WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2949 
2950 	if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true, 0))
2951 		adev->gfx.gfx_off_state = true;
2952 }
2953 
2954 /**
2955  * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2956  *
2957  * @adev: amdgpu_device pointer
2958  *
2959  * Main suspend function for hardware IPs.  The list of all the hardware
2960  * IPs that make up the asic is walked, clockgating is disabled and the
2961  * suspend callbacks are run.  suspend puts the hardware and software state
2962  * in each IP into a state suitable for suspend.
2963  * Returns 0 on success, negative error code on failure.
2964  */
2965 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2966 {
2967 	int i, r, rec;
2968 
2969 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2970 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2971 
2972 	/*
2973 	 * Per PMFW team's suggestion, driver needs to handle gfxoff
2974 	 * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
2975 	 * scenario. Add the missing df cstate disablement here.
2976 	 */
2977 	if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
2978 		dev_warn(adev->dev, "Failed to disallow df cstate");
2979 
2980 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2981 		if (!adev->ip_blocks[i].status.valid)
2982 			continue;
2983 
2984 		/* displays are handled separately */
2985 		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2986 			continue;
2987 
2988 		r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
2989 		if (r)
2990 			goto unwind;
2991 	}
2992 
2993 	return 0;
2994 unwind:
2995 	rec = amdgpu_device_ip_resume_phase3(adev);
2996 	if (rec)
2997 		dev_err(adev->dev,
2998 			"amdgpu_device_ip_resume_phase3 failed during unwind: %d\n",
2999 			rec);
3000 
3001 	amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW);
3002 
3003 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
3004 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
3005 
3006 	return r;
3007 }
3008 
3009 /**
3010  * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
3011  *
3012  * @adev: amdgpu_device pointer
3013  *
3014  * Main suspend function for hardware IPs.  The list of all the hardware
3015  * IPs that make up the asic is walked, clockgating is disabled and the
3016  * suspend callbacks are run.  suspend puts the hardware and software state
3017  * in each IP into a state suitable for suspend.
3018  * Returns 0 on success, negative error code on failure.
3019  */
3020 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
3021 {
3022 	int i, r, rec;
3023 
3024 	if (adev->in_s0ix)
3025 		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
3026 
3027 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
3028 		if (!adev->ip_blocks[i].status.valid)
3029 			continue;
3030 		/* displays are handled in phase1 */
3031 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
3032 			continue;
3033 		/* PSP lost connection when err_event_athub occurs */
3034 		if (amdgpu_ras_intr_triggered() &&
3035 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3036 			adev->ip_blocks[i].status.hw = false;
3037 			continue;
3038 		}
3039 
3040 		/* skip unnecessary suspend if we do not initialize them yet */
3041 		if (!amdgpu_ip_member_of_hwini(
3042 			    adev, adev->ip_blocks[i].version->type))
3043 			continue;
3044 
3045 		/* Since we skip suspend for S0i3, we need to cancel the delayed
3046 		 * idle work here as the suspend callback never gets called.
3047 		 */
3048 		if (adev->in_s0ix &&
3049 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX &&
3050 		    amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0))
3051 			cancel_delayed_work_sync(&adev->gfx.idle_work);
3052 		/* skip suspend of gfx/mes and psp for S0ix
3053 		 * gfx is in gfxoff state, so on resume it will exit gfxoff just
3054 		 * like at runtime. PSP is also part of the always on hardware
3055 		 * so no need to suspend it.
3056 		 */
3057 		if (adev->in_s0ix &&
3058 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3059 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3060 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
3061 			continue;
3062 
3063 		/* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
3064 		if (adev->in_s0ix &&
3065 		    (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >=
3066 		     IP_VERSION(5, 0, 0)) &&
3067 		    (adev->ip_blocks[i].version->type ==
3068 		     AMD_IP_BLOCK_TYPE_SDMA))
3069 			continue;
3070 
3071 		/* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot.
3072 		 * These are in TMR, hence are expected to be reused by PSP-TOS to reload
3073 		 * from this location and RLC Autoload automatically also gets loaded
3074 		 * from here based on PMFW -> PSP message during re-init sequence.
3075 		 * Therefore, the psp suspend & resume should be skipped to avoid destroy
3076 		 * the TMR and reload FWs again for IMU enabled APU ASICs.
3077 		 */
3078 		if (amdgpu_in_reset(adev) &&
3079 		    (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
3080 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3081 			continue;
3082 
3083 		r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
3084 		if (r)
3085 			goto unwind;
3086 
3087 		/* handle putting the SMC in the appropriate state */
3088 		if (!amdgpu_sriov_vf(adev)) {
3089 			if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3090 				r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3091 				if (r) {
3092 					dev_err(adev->dev,
3093 						"SMC failed to set mp1 state %d, %d\n",
3094 						adev->mp1_state, r);
3095 					goto unwind;
3096 				}
3097 			}
3098 		}
3099 	}
3100 
3101 	return 0;
3102 unwind:
3103 	/* suspend phase 2 = resume phase 1 + resume phase 2 */
3104 	rec = amdgpu_device_ip_resume_phase1(adev);
3105 	if (rec) {
3106 		dev_err(adev->dev,
3107 			"amdgpu_device_ip_resume_phase1 failed during unwind: %d\n",
3108 			rec);
3109 		return r;
3110 	}
3111 
3112 	rec = amdgpu_device_fw_loading(adev);
3113 	if (rec) {
3114 		dev_err(adev->dev,
3115 			"amdgpu_device_fw_loading failed during unwind: %d\n",
3116 			rec);
3117 		return r;
3118 	}
3119 
3120 	rec = amdgpu_device_ip_resume_phase2(adev);
3121 	if (rec) {
3122 		dev_err(adev->dev,
3123 			"amdgpu_device_ip_resume_phase2 failed during unwind: %d\n",
3124 			rec);
3125 		return r;
3126 	}
3127 
3128 	return r;
3129 }
3130 
3131 /**
3132  * amdgpu_device_ip_suspend - run suspend for hardware IPs
3133  *
3134  * @adev: amdgpu_device pointer
3135  *
3136  * Main suspend function for hardware IPs.  The list of all the hardware
3137  * IPs that make up the asic is walked, clockgating is disabled and the
3138  * suspend callbacks are run.  suspend puts the hardware and software state
3139  * in each IP into a state suitable for suspend.
3140  * Returns 0 on success, negative error code on failure.
3141  */
3142 static int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3143 {
3144 	int r;
3145 
3146 	if (amdgpu_sriov_vf(adev)) {
3147 		amdgpu_virt_fini_data_exchange(adev);
3148 		amdgpu_virt_request_full_gpu(adev, false);
3149 	}
3150 
3151 	amdgpu_ttm_set_buffer_funcs_status(adev, false);
3152 
3153 	r = amdgpu_device_ip_suspend_phase1(adev);
3154 	if (r)
3155 		return r;
3156 	r = amdgpu_device_ip_suspend_phase2(adev);
3157 
3158 	if (amdgpu_sriov_vf(adev))
3159 		amdgpu_virt_release_full_gpu(adev, false);
3160 
3161 	return r;
3162 }
3163 
3164 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3165 {
3166 	int i, r;
3167 
3168 	static enum amd_ip_block_type ip_order[] = {
3169 		AMD_IP_BLOCK_TYPE_COMMON,
3170 		AMD_IP_BLOCK_TYPE_GMC,
3171 		AMD_IP_BLOCK_TYPE_PSP,
3172 		AMD_IP_BLOCK_TYPE_IH,
3173 	};
3174 
3175 	for (i = 0; i < adev->num_ip_blocks; i++) {
3176 		int j;
3177 		struct amdgpu_ip_block *block;
3178 
3179 		block = &adev->ip_blocks[i];
3180 		block->status.hw = false;
3181 
3182 		for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3183 
3184 			if (block->version->type != ip_order[j] ||
3185 				!block->status.valid)
3186 				continue;
3187 
3188 			r = block->version->funcs->hw_init(&adev->ip_blocks[i]);
3189 			if (r) {
3190 				dev_err(adev->dev, "RE-INIT-early: %s failed\n",
3191 					 block->version->funcs->name);
3192 				return r;
3193 			}
3194 			block->status.hw = true;
3195 		}
3196 	}
3197 
3198 	return 0;
3199 }
3200 
3201 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3202 {
3203 	struct amdgpu_ip_block *block;
3204 	int i, r = 0;
3205 
3206 	static enum amd_ip_block_type ip_order[] = {
3207 		AMD_IP_BLOCK_TYPE_SMC,
3208 		AMD_IP_BLOCK_TYPE_DCE,
3209 		AMD_IP_BLOCK_TYPE_GFX,
3210 		AMD_IP_BLOCK_TYPE_SDMA,
3211 		AMD_IP_BLOCK_TYPE_MES,
3212 		AMD_IP_BLOCK_TYPE_UVD,
3213 		AMD_IP_BLOCK_TYPE_VCE,
3214 		AMD_IP_BLOCK_TYPE_VCN,
3215 		AMD_IP_BLOCK_TYPE_JPEG
3216 	};
3217 
3218 	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3219 		block = amdgpu_device_ip_get_ip_block(adev, ip_order[i]);
3220 
3221 		if (!block)
3222 			continue;
3223 
3224 		if (block->status.valid && !block->status.hw) {
3225 			if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) {
3226 				r = amdgpu_ip_block_resume(block);
3227 			} else {
3228 				r = block->version->funcs->hw_init(block);
3229 			}
3230 
3231 			if (r) {
3232 				dev_err(adev->dev, "RE-INIT-late: %s failed\n",
3233 					 block->version->funcs->name);
3234 				break;
3235 			}
3236 			block->status.hw = true;
3237 		}
3238 	}
3239 
3240 	return r;
3241 }
3242 
3243 /**
3244  * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3245  *
3246  * @adev: amdgpu_device pointer
3247  *
3248  * First resume function for hardware IPs.  The list of all the hardware
3249  * IPs that make up the asic is walked and the resume callbacks are run for
3250  * COMMON, GMC, and IH.  resume puts the hardware into a functional state
3251  * after a suspend and updates the software state as necessary.  This
3252  * function is also used for restoring the GPU after a GPU reset.
3253  * Returns 0 on success, negative error code on failure.
3254  */
3255 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3256 {
3257 	int i, r;
3258 
3259 	for (i = 0; i < adev->num_ip_blocks; i++) {
3260 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3261 			continue;
3262 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3263 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3264 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3265 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3266 
3267 			r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
3268 			if (r)
3269 				return r;
3270 		}
3271 	}
3272 
3273 	return 0;
3274 }
3275 
3276 /**
3277  * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3278  *
3279  * @adev: amdgpu_device pointer
3280  *
3281  * Second resume function for hardware IPs.  The list of all the hardware
3282  * IPs that make up the asic is walked and the resume callbacks are run for
3283  * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
3284  * functional state after a suspend and updates the software state as
3285  * necessary.  This function is also used for restoring the GPU after a GPU
3286  * reset.
3287  * Returns 0 on success, negative error code on failure.
3288  */
3289 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3290 {
3291 	int i, r;
3292 
3293 	for (i = 0; i < adev->num_ip_blocks; i++) {
3294 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3295 			continue;
3296 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3297 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3298 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3299 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE ||
3300 		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3301 			continue;
3302 		r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
3303 		if (r)
3304 			return r;
3305 	}
3306 
3307 	return 0;
3308 }
3309 
3310 /**
3311  * amdgpu_device_ip_resume_phase3 - run resume for hardware IPs
3312  *
3313  * @adev: amdgpu_device pointer
3314  *
3315  * Third resume function for hardware IPs.  The list of all the hardware
3316  * IPs that make up the asic is walked and the resume callbacks are run for
3317  * all DCE.  resume puts the hardware into a functional state after a suspend
3318  * and updates the software state as necessary.  This function is also used
3319  * for restoring the GPU after a GPU reset.
3320  *
3321  * Returns 0 on success, negative error code on failure.
3322  */
3323 static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev)
3324 {
3325 	int i, r;
3326 
3327 	for (i = 0; i < adev->num_ip_blocks; i++) {
3328 		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3329 			continue;
3330 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
3331 			r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
3332 			if (r)
3333 				return r;
3334 		}
3335 	}
3336 
3337 	return 0;
3338 }
3339 
3340 /**
3341  * amdgpu_device_ip_resume - run resume for hardware IPs
3342  *
3343  * @adev: amdgpu_device pointer
3344  *
3345  * Main resume function for hardware IPs.  The hardware IPs
3346  * are split into two resume functions because they are
3347  * also used in recovering from a GPU reset and some additional
3348  * steps need to be take between them.  In this case (S3/S4) they are
3349  * run sequentially.
3350  * Returns 0 on success, negative error code on failure.
3351  */
3352 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3353 {
3354 	int r;
3355 
3356 	r = amdgpu_device_ip_resume_phase1(adev);
3357 	if (r)
3358 		return r;
3359 
3360 	r = amdgpu_device_fw_loading(adev);
3361 	if (r)
3362 		return r;
3363 
3364 	r = amdgpu_device_ip_resume_phase2(adev);
3365 
3366 	amdgpu_ttm_set_buffer_funcs_status(adev, true);
3367 
3368 	if (r)
3369 		return r;
3370 
3371 	amdgpu_fence_driver_hw_init(adev);
3372 
3373 	r = amdgpu_device_ip_resume_phase3(adev);
3374 
3375 	return r;
3376 }
3377 
3378 /**
3379  * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3380  *
3381  * @adev: amdgpu_device pointer
3382  *
3383  * Query the VBIOS data tables to determine if the board supports SR-IOV.
3384  */
3385 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3386 {
3387 	if (amdgpu_sriov_vf(adev)) {
3388 		if (adev->is_atom_fw) {
3389 			if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3390 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3391 		} else {
3392 			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3393 				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3394 		}
3395 
3396 		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3397 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3398 	}
3399 }
3400 
3401 /**
3402  * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3403  *
3404  * @pdev : pci device context
3405  * @asic_type: AMD asic type
3406  *
3407  * Check if there is DC (new modesetting infrastructre) support for an asic.
3408  * returns true if DC has support, false if not.
3409  */
3410 bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev,
3411 				       enum amd_asic_type asic_type)
3412 {
3413 	switch (asic_type) {
3414 #ifdef CONFIG_DRM_AMDGPU_SI
3415 	case CHIP_HAINAN:
3416 #endif
3417 	case CHIP_TOPAZ:
3418 		/* chips with no display hardware */
3419 		return false;
3420 #if defined(CONFIG_DRM_AMD_DC)
3421 	case CHIP_TAHITI:
3422 	case CHIP_PITCAIRN:
3423 	case CHIP_VERDE:
3424 	case CHIP_OLAND:
3425 		return amdgpu_dc != 0 && IS_ENABLED(CONFIG_DRM_AMD_DC_SI);
3426 	default:
3427 		return amdgpu_dc != 0;
3428 #else
3429 	default:
3430 		if (amdgpu_dc > 0)
3431 			dev_info_once(
3432 				&pdev->dev,
3433 				"Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
3434 		return false;
3435 #endif
3436 	}
3437 }
3438 
3439 /**
3440  * amdgpu_device_has_dc_support - check if dc is supported
3441  *
3442  * @adev: amdgpu_device pointer
3443  *
3444  * Returns true for supported, false for not supported
3445  */
3446 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3447 {
3448 	if (adev->enable_virtual_display ||
3449 	    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3450 		return false;
3451 
3452 	return amdgpu_device_asic_has_dc_support(adev->pdev, adev->asic_type);
3453 }
3454 
3455 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3456 {
3457 	struct amdgpu_device *adev =
3458 		container_of(__work, struct amdgpu_device, xgmi_reset_work);
3459 	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3460 
3461 	/* It's a bug to not have a hive within this function */
3462 	if (WARN_ON(!hive))
3463 		return;
3464 
3465 	/*
3466 	 * Use task barrier to synchronize all xgmi reset works across the
3467 	 * hive. task_barrier_enter and task_barrier_exit will block
3468 	 * until all the threads running the xgmi reset works reach
3469 	 * those points. task_barrier_full will do both blocks.
3470 	 */
3471 	if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3472 
3473 		task_barrier_enter(&hive->tb);
3474 		adev->asic_reset_res = amdgpu_device_baco_enter(adev);
3475 
3476 		if (adev->asic_reset_res)
3477 			goto fail;
3478 
3479 		task_barrier_exit(&hive->tb);
3480 		adev->asic_reset_res = amdgpu_device_baco_exit(adev);
3481 
3482 		if (adev->asic_reset_res)
3483 			goto fail;
3484 
3485 		amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB);
3486 	} else {
3487 
3488 		task_barrier_full(&hive->tb);
3489 		adev->asic_reset_res =  amdgpu_asic_reset(adev);
3490 	}
3491 
3492 fail:
3493 	if (adev->asic_reset_res)
3494 		dev_warn(adev->dev,
3495 			 "ASIC reset failed with error, %d for drm dev, %s",
3496 			 adev->asic_reset_res, adev_to_drm(adev)->unique);
3497 	amdgpu_put_xgmi_hive(hive);
3498 }
3499 
3500 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3501 {
3502 	char buf[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH];
3503 	char *input = buf;
3504 	char *timeout_setting = NULL;
3505 	int index = 0;
3506 	long timeout;
3507 	int ret = 0;
3508 
3509 	/* By default timeout for all queues is 2 sec */
3510 	adev->gfx_timeout = adev->compute_timeout = adev->sdma_timeout =
3511 		adev->video_timeout = msecs_to_jiffies(2000);
3512 
3513 	if (!strnlen(amdgpu_lockup_timeout, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH))
3514 		return 0;
3515 
3516 	/*
3517 	 * strsep() destructively modifies its input by replacing delimiters
3518 	 * with '\0'. Use a stack copy so the global module parameter buffer
3519 	 * remains intact for multi-GPU systems where this function is called
3520 	 * once per device.
3521 	 */
3522 	strscpy(buf, amdgpu_lockup_timeout, sizeof(buf));
3523 
3524 	while ((timeout_setting = strsep(&input, ",")) &&
3525 	       strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3526 		ret = kstrtol(timeout_setting, 0, &timeout);
3527 		if (ret)
3528 			return ret;
3529 
3530 		if (timeout == 0) {
3531 			index++;
3532 			continue;
3533 		} else if (timeout < 0) {
3534 			timeout = MAX_SCHEDULE_TIMEOUT;
3535 			dev_warn(adev->dev, "lockup timeout disabled");
3536 			add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3537 		} else {
3538 			timeout = msecs_to_jiffies(timeout);
3539 		}
3540 
3541 		switch (index++) {
3542 		case 0:
3543 			adev->gfx_timeout = timeout;
3544 			break;
3545 		case 1:
3546 			adev->compute_timeout = timeout;
3547 			break;
3548 		case 2:
3549 			adev->sdma_timeout = timeout;
3550 			break;
3551 		case 3:
3552 			adev->video_timeout = timeout;
3553 			break;
3554 		default:
3555 			break;
3556 		}
3557 	}
3558 
3559 	/* When only one value specified apply it to all queues. */
3560 	if (index == 1)
3561 		adev->gfx_timeout = adev->compute_timeout = adev->sdma_timeout =
3562 			adev->video_timeout = timeout;
3563 
3564 	return ret;
3565 }
3566 
3567 /**
3568  * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3569  *
3570  * @adev: amdgpu_device pointer
3571  *
3572  * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3573  */
3574 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3575 {
3576 	struct iommu_domain *domain;
3577 
3578 	domain = iommu_get_domain_for_dev(adev->dev);
3579 	if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3580 		adev->ram_is_direct_mapped = true;
3581 }
3582 
3583 #if defined(CONFIG_HSA_AMD_P2P)
3584 /**
3585  * amdgpu_device_check_iommu_remap - Check if DMA remapping is enabled.
3586  *
3587  * @adev: amdgpu_device pointer
3588  *
3589  * return if IOMMU remapping bar address
3590  */
3591 static bool amdgpu_device_check_iommu_remap(struct amdgpu_device *adev)
3592 {
3593 	struct iommu_domain *domain;
3594 
3595 	domain = iommu_get_domain_for_dev(adev->dev);
3596 	if (domain && (domain->type == IOMMU_DOMAIN_DMA ||
3597 		domain->type ==	IOMMU_DOMAIN_DMA_FQ))
3598 		return true;
3599 
3600 	return false;
3601 }
3602 #endif
3603 
3604 static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
3605 {
3606 	if (amdgpu_mcbp == 1)
3607 		adev->gfx.mcbp = true;
3608 	else if (amdgpu_mcbp == 0)
3609 		adev->gfx.mcbp = false;
3610 
3611 	if (amdgpu_sriov_vf(adev))
3612 		adev->gfx.mcbp = true;
3613 
3614 	if (adev->gfx.mcbp)
3615 		dev_info(adev->dev, "MCBP is enabled\n");
3616 }
3617 
3618 static int amdgpu_device_sys_interface_init(struct amdgpu_device *adev)
3619 {
3620 	int r;
3621 
3622 	r = amdgpu_atombios_sysfs_init(adev);
3623 	if (r)
3624 		drm_err(&adev->ddev,
3625 			"registering atombios sysfs failed (%d).\n", r);
3626 
3627 	r = amdgpu_pm_sysfs_init(adev);
3628 	if (r)
3629 		dev_err(adev->dev, "registering pm sysfs failed (%d).\n", r);
3630 
3631 	r = amdgpu_ucode_sysfs_init(adev);
3632 	if (r) {
3633 		adev->ucode_sysfs_en = false;
3634 		dev_err(adev->dev, "Creating firmware sysfs failed (%d).\n", r);
3635 	} else
3636 		adev->ucode_sysfs_en = true;
3637 
3638 	r = amdgpu_device_attr_sysfs_init(adev);
3639 	if (r)
3640 		dev_err(adev->dev, "Could not create amdgpu device attr\n");
3641 
3642 	r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group);
3643 	if (r)
3644 		dev_err(adev->dev,
3645 			"Could not create amdgpu board attributes\n");
3646 
3647 	amdgpu_fru_sysfs_init(adev);
3648 	amdgpu_reg_state_sysfs_init(adev);
3649 	amdgpu_xcp_sysfs_init(adev);
3650 	amdgpu_uma_sysfs_init(adev);
3651 
3652 	return r;
3653 }
3654 
3655 static void amdgpu_device_sys_interface_fini(struct amdgpu_device *adev)
3656 {
3657 	if (adev->pm.sysfs_initialized)
3658 		amdgpu_pm_sysfs_fini(adev);
3659 	if (adev->ucode_sysfs_en)
3660 		amdgpu_ucode_sysfs_fini(adev);
3661 	amdgpu_device_attr_sysfs_fini(adev);
3662 	amdgpu_fru_sysfs_fini(adev);
3663 
3664 	amdgpu_reg_state_sysfs_fini(adev);
3665 	amdgpu_xcp_sysfs_fini(adev);
3666 	amdgpu_uma_sysfs_fini(adev);
3667 }
3668 
3669 /**
3670  * amdgpu_device_init - initialize the driver
3671  *
3672  * @adev: amdgpu_device pointer
3673  * @flags: driver flags
3674  *
3675  * Initializes the driver info and hw (all asics).
3676  * Returns 0 for success or an error on failure.
3677  * Called at driver startup.
3678  */
3679 int amdgpu_device_init(struct amdgpu_device *adev,
3680 		       uint32_t flags)
3681 {
3682 	struct pci_dev *pdev = adev->pdev;
3683 	int r, i;
3684 	bool px = false;
3685 	u32 max_MBps;
3686 	int tmp;
3687 
3688 	adev->shutdown = false;
3689 	adev->flags = flags;
3690 
3691 	if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3692 		adev->asic_type = amdgpu_force_asic_type;
3693 	else
3694 		adev->asic_type = flags & AMD_ASIC_MASK;
3695 
3696 	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3697 	if (amdgpu_emu_mode == 1)
3698 		adev->usec_timeout *= 10;
3699 	adev->gmc.gart_size = 512 * 1024 * 1024;
3700 	adev->accel_working = false;
3701 	adev->num_rings = 0;
3702 	RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3703 	adev->mman.buffer_funcs = NULL;
3704 	adev->mman.buffer_funcs_ring = NULL;
3705 	adev->vm_manager.vm_pte_funcs = NULL;
3706 	adev->vm_manager.vm_pte_num_scheds = 0;
3707 	adev->gmc.gmc_funcs = NULL;
3708 	adev->harvest_ip_mask = 0x0;
3709 	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3710 	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3711 
3712 	amdgpu_reg_access_init(adev);
3713 
3714 	dev_info(
3715 		adev->dev,
3716 		"initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3717 		amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3718 		pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3719 
3720 	/* mutex initialization are all done here so we
3721 	 * can recall function without having locking issues
3722 	 */
3723 	mutex_init(&adev->firmware.mutex);
3724 	mutex_init(&adev->pm.mutex);
3725 	mutex_init(&adev->gfx.gpu_clock_mutex);
3726 	mutex_init(&adev->srbm_mutex);
3727 	mutex_init(&adev->gfx.pipe_reserve_mutex);
3728 	mutex_init(&adev->gfx.gfx_off_mutex);
3729 	mutex_init(&adev->gfx.partition_mutex);
3730 	mutex_init(&adev->grbm_idx_mutex);
3731 	mutex_init(&adev->mn_lock);
3732 	mutex_init(&adev->virt.vf_errors.lock);
3733 	hash_init(adev->mn_hash);
3734 	mutex_init(&adev->psp.mutex);
3735 	mutex_init(&adev->notifier_lock);
3736 	mutex_init(&adev->pm.stable_pstate_ctx_lock);
3737 	mutex_init(&adev->benchmark_mutex);
3738 	mutex_init(&adev->gfx.reset_sem_mutex);
3739 	/* Initialize the mutex for cleaner shader isolation between GFX and compute processes */
3740 	mutex_init(&adev->enforce_isolation_mutex);
3741 	for (i = 0; i < MAX_XCP; ++i) {
3742 		adev->isolation[i].spearhead = dma_fence_get_stub();
3743 		amdgpu_sync_create(&adev->isolation[i].active);
3744 		amdgpu_sync_create(&adev->isolation[i].prev);
3745 	}
3746 	mutex_init(&adev->gfx.userq_sch_mutex);
3747 	mutex_init(&adev->gfx.workload_profile_mutex);
3748 	mutex_init(&adev->vcn.workload_profile_mutex);
3749 
3750 	amdgpu_device_init_apu_flags(adev);
3751 
3752 	r = amdgpu_device_check_arguments(adev);
3753 	if (r)
3754 		return r;
3755 
3756 	spin_lock_init(&adev->mmio_idx_lock);
3757 	spin_lock_init(&adev->mm_stats.lock);
3758 	spin_lock_init(&adev->virt.rlcg_reg_lock);
3759 	spin_lock_init(&adev->wb.lock);
3760 
3761 	INIT_LIST_HEAD(&adev->reset_list);
3762 
3763 	INIT_LIST_HEAD(&adev->ras_list);
3764 
3765 	INIT_LIST_HEAD(&adev->pm.od_kobj_list);
3766 
3767 	xa_init_flags(&adev->userq_doorbell_xa, XA_FLAGS_LOCK_IRQ);
3768 
3769 	INIT_DELAYED_WORK(&adev->delayed_init_work,
3770 			  amdgpu_device_delayed_init_work_handler);
3771 	INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3772 			  amdgpu_device_delay_enable_gfx_off);
3773 	/*
3774 	 * Initialize the enforce_isolation work structures for each XCP
3775 	 * partition.  This work handler is responsible for enforcing shader
3776 	 * isolation on AMD GPUs.  It counts the number of emitted fences for
3777 	 * each GFX and compute ring.  If there are any fences, it schedules
3778 	 * the `enforce_isolation_work` to be run after a delay.  If there are
3779 	 * no fences, it signals the Kernel Fusion Driver (KFD) to resume the
3780 	 * runqueue.
3781 	 */
3782 	for (i = 0; i < MAX_XCP; i++) {
3783 		INIT_DELAYED_WORK(&adev->gfx.enforce_isolation[i].work,
3784 				  amdgpu_gfx_enforce_isolation_handler);
3785 		adev->gfx.enforce_isolation[i].adev = adev;
3786 		adev->gfx.enforce_isolation[i].xcp_id = i;
3787 	}
3788 
3789 	INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3790 	INIT_WORK(&adev->userq_reset_work, amdgpu_userq_reset_work);
3791 
3792 	amdgpu_coredump_init(adev);
3793 
3794 	adev->gfx.gfx_off_req_count = 1;
3795 	adev->gfx.gfx_off_residency = 0;
3796 	adev->gfx.gfx_off_entrycount = 0;
3797 	adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3798 
3799 	atomic_set(&adev->throttling_logging_enabled, 1);
3800 	/*
3801 	 * If throttling continues, logging will be performed every minute
3802 	 * to avoid log flooding. "-1" is subtracted since the thermal
3803 	 * throttling interrupt comes every second. Thus, the total logging
3804 	 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3805 	 * for throttling interrupt) = 60 seconds.
3806 	 */
3807 	ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3808 
3809 	ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3810 
3811 	/* Registers mapping */
3812 	/* TODO: block userspace mapping of io register */
3813 	if (adev->asic_type >= CHIP_BONAIRE) {
3814 		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3815 		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3816 	} else {
3817 		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3818 		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3819 	}
3820 
3821 	for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3822 		atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3823 
3824 	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3825 	if (!adev->rmmio)
3826 		return -ENOMEM;
3827 
3828 	dev_info(adev->dev, "register mmio base: 0x%08X\n",
3829 		 (uint32_t)adev->rmmio_base);
3830 	dev_info(adev->dev, "register mmio size: %u\n",
3831 		 (unsigned int)adev->rmmio_size);
3832 
3833 	/*
3834 	 * Reset domain needs to be present early, before XGMI hive discovered
3835 	 * (if any) and initialized to use reset sem and in_gpu reset flag
3836 	 * early on during init and before calling to RREG32.
3837 	 */
3838 	adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3839 	if (!adev->reset_domain)
3840 		return -ENOMEM;
3841 
3842 	/* detect hw virtualization here */
3843 	amdgpu_virt_init(adev);
3844 
3845 	amdgpu_device_get_pcie_info(adev);
3846 
3847 	r = amdgpu_device_get_job_timeout_settings(adev);
3848 	if (r) {
3849 		dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3850 		return r;
3851 	}
3852 
3853 	amdgpu_device_set_mcbp(adev);
3854 
3855 	/*
3856 	 * By default, use default mode where all blocks are expected to be
3857 	 * initialized. At present a 'swinit' of blocks is required to be
3858 	 * completed before the need for a different level is detected.
3859 	 */
3860 	amdgpu_set_init_level(adev, AMDGPU_INIT_LEVEL_DEFAULT);
3861 	/* early init functions */
3862 	r = amdgpu_device_ip_early_init(adev);
3863 	if (r)
3864 		return r;
3865 
3866 	/*
3867 	 * No need to remove conflicting FBs for non-display class devices.
3868 	 * This prevents the sysfb from being freed accidently.
3869 	 */
3870 	if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA ||
3871 	    (pdev->class >> 8) == PCI_CLASS_DISPLAY_OTHER) {
3872 		/* Get rid of things like offb */
3873 		r = aperture_remove_conflicting_pci_devices(adev->pdev, amdgpu_kms_driver.name);
3874 		if (r)
3875 			return r;
3876 	}
3877 
3878 	/* Enable TMZ based on IP_VERSION */
3879 	amdgpu_gmc_tmz_set(adev);
3880 
3881 	if (amdgpu_sriov_vf(adev) &&
3882 	    amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))
3883 		/* VF MMIO access (except mailbox range) from CPU
3884 		 * will be blocked during sriov runtime
3885 		 */
3886 		adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT;
3887 
3888 	amdgpu_gmc_noretry_set(adev);
3889 	/* Need to get xgmi info early to decide the reset behavior*/
3890 	if (adev->gmc.xgmi.supported) {
3891 		if (adev->gfxhub.funcs &&
3892 		    adev->gfxhub.funcs->get_xgmi_info) {
3893 			r = adev->gfxhub.funcs->get_xgmi_info(adev);
3894 			if (r)
3895 				return r;
3896 		}
3897 	}
3898 
3899 	if (adev->gmc.xgmi.connected_to_cpu) {
3900 		if (adev->mmhub.funcs &&
3901 		    adev->mmhub.funcs->get_xgmi_info) {
3902 			r = adev->mmhub.funcs->get_xgmi_info(adev);
3903 			if (r)
3904 				return r;
3905 		}
3906 	}
3907 
3908 	/* enable PCIE atomic ops */
3909 	if (amdgpu_sriov_vf(adev)) {
3910 		if (adev->virt.fw_reserve.p_pf2vf)
3911 			adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3912 						      adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3913 				(PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3914 	/* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a
3915 	 * internal path natively support atomics, set have_atomics_support to true.
3916 	 */
3917 	} else if ((adev->flags & AMD_IS_APU &&
3918 		   amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0)) ||
3919 		   (adev->gmc.xgmi.connected_to_cpu &&
3920 		   amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 1, 0))) {
3921 		adev->have_atomics_support = true;
3922 	} else {
3923 		adev->have_atomics_support =
3924 			!pci_enable_atomic_ops_to_root(adev->pdev,
3925 					  PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3926 					  PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3927 	}
3928 
3929 	if (!adev->have_atomics_support)
3930 		dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3931 
3932 	/* doorbell bar mapping and doorbell index init*/
3933 	amdgpu_doorbell_init(adev);
3934 
3935 	if (amdgpu_emu_mode == 1) {
3936 		/* post the asic on emulation mode */
3937 		emu_soc_asic_init(adev);
3938 		goto fence_driver_init;
3939 	}
3940 
3941 	amdgpu_reset_init(adev);
3942 
3943 	/* detect if we are with an SRIOV vbios */
3944 	if (adev->bios)
3945 		amdgpu_device_detect_sriov_bios(adev);
3946 
3947 	/* check if we need to reset the asic
3948 	 *  E.g., driver was not cleanly unloaded previously, etc.
3949 	 */
3950 	if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3951 		if (adev->gmc.xgmi.num_physical_nodes) {
3952 			dev_info(adev->dev, "Pending hive reset.\n");
3953 			amdgpu_set_init_level(adev,
3954 					      AMDGPU_INIT_LEVEL_MINIMAL_XGMI);
3955 		} else {
3956 				tmp = amdgpu_reset_method;
3957 				/* It should do a default reset when loading or reloading the driver,
3958 				 * regardless of the module parameter reset_method.
3959 				 */
3960 				amdgpu_reset_method = AMD_RESET_METHOD_NONE;
3961 				r = amdgpu_asic_reset(adev);
3962 				amdgpu_reset_method = tmp;
3963 		}
3964 
3965 		if (r) {
3966 		  dev_err(adev->dev, "asic reset on init failed\n");
3967 		  goto failed;
3968 		}
3969 	}
3970 
3971 	/* Post card if necessary */
3972 	if (amdgpu_device_need_post(adev)) {
3973 		if (!adev->bios) {
3974 			dev_err(adev->dev, "no vBIOS found\n");
3975 			r = -EINVAL;
3976 			goto failed;
3977 		}
3978 		dev_info(adev->dev, "GPU posting now...\n");
3979 		r = amdgpu_device_asic_init(adev);
3980 		if (r) {
3981 			dev_err(adev->dev, "gpu post error!\n");
3982 			goto failed;
3983 		}
3984 	}
3985 
3986 	if (adev->bios) {
3987 		if (adev->is_atom_fw) {
3988 			/* Initialize clocks */
3989 			r = amdgpu_atomfirmware_get_clock_info(adev);
3990 			if (r) {
3991 				dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3992 				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3993 				goto failed;
3994 			}
3995 		} else {
3996 			/* Initialize clocks */
3997 			r = amdgpu_atombios_get_clock_info(adev);
3998 			if (r) {
3999 				dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
4000 				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
4001 				goto failed;
4002 			}
4003 			/* init i2c buses */
4004 			amdgpu_i2c_init(adev);
4005 		}
4006 	}
4007 
4008 fence_driver_init:
4009 	/* Fence driver */
4010 	r = amdgpu_fence_driver_sw_init(adev);
4011 	if (r) {
4012 		dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
4013 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
4014 		goto failed;
4015 	}
4016 
4017 	/* init the mode config */
4018 	drm_mode_config_init(adev_to_drm(adev));
4019 
4020 	r = amdgpu_device_ip_init(adev);
4021 	if (r) {
4022 		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
4023 		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
4024 		goto release_ras_con;
4025 	}
4026 
4027 	amdgpu_fence_driver_hw_init(adev);
4028 
4029 	dev_info(adev->dev,
4030 		"SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
4031 			adev->gfx.config.max_shader_engines,
4032 			adev->gfx.config.max_sh_per_se,
4033 			adev->gfx.config.max_cu_per_sh,
4034 			adev->gfx.cu_info.number);
4035 
4036 	adev->accel_working = true;
4037 
4038 	amdgpu_vm_check_compute_bug(adev);
4039 
4040 	/* Initialize the buffer migration limit. */
4041 	if (amdgpu_moverate >= 0)
4042 		max_MBps = amdgpu_moverate;
4043 	else
4044 		max_MBps = 8; /* Allow 8 MB/s. */
4045 	/* Get a log2 for easy divisions. */
4046 	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
4047 
4048 	/*
4049 	 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
4050 	 * Otherwise the mgpu fan boost feature will be skipped due to the
4051 	 * gpu instance is counted less.
4052 	 */
4053 	amdgpu_register_gpu_instance(adev);
4054 
4055 	/* enable clockgating, etc. after ib tests, etc. since some blocks require
4056 	 * explicit gating rather than handling it automatically.
4057 	 */
4058 	if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
4059 		r = amdgpu_device_ip_late_init(adev);
4060 		if (r) {
4061 			dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
4062 			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
4063 			goto release_ras_con;
4064 		}
4065 		/* must succeed. */
4066 		amdgpu_ras_resume(adev);
4067 		queue_delayed_work(system_dfl_wq, &adev->delayed_init_work,
4068 				   msecs_to_jiffies(AMDGPU_RESUME_MS));
4069 	}
4070 
4071 	if (amdgpu_sriov_vf(adev)) {
4072 		amdgpu_virt_release_full_gpu(adev, true);
4073 		flush_delayed_work(&adev->delayed_init_work);
4074 	}
4075 
4076 	/* Don't init kfd if whole hive need to be reset during init */
4077 	if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
4078 		kgd2kfd_init_zone_device(adev);
4079 		kfd_update_svm_support_properties(adev);
4080 	}
4081 
4082 	if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
4083 		amdgpu_xgmi_reset_on_init(adev);
4084 
4085 	/*
4086 	 * Place those sysfs registering after `late_init`. As some of those
4087 	 * operations performed in `late_init` might affect the sysfs
4088 	 * interfaces creating.
4089 	 */
4090 	r = amdgpu_device_sys_interface_init(adev);
4091 
4092 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
4093 		r = amdgpu_pmu_init(adev);
4094 	if (r)
4095 		dev_err(adev->dev, "amdgpu_pmu_init failed\n");
4096 
4097 	/* Have stored pci confspace at hand for restore in sudden PCI error */
4098 	if (amdgpu_device_cache_pci_state(adev->pdev))
4099 		pci_restore_state(pdev);
4100 
4101 	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
4102 	/* this will fail for cards that aren't VGA class devices, just
4103 	 * ignore it
4104 	 */
4105 	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4106 		vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
4107 
4108 	px = amdgpu_device_supports_px(adev);
4109 
4110 	if (px || (!dev_is_removable(&adev->pdev->dev) &&
4111 				apple_gmux_detect(NULL, NULL)))
4112 		vga_switcheroo_register_client(adev->pdev,
4113 					       &amdgpu_switcheroo_ops, px);
4114 
4115 	if (px)
4116 		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
4117 
4118 	amdgpu_device_check_iommu_direct_map(adev);
4119 
4120 	adev->pm_nb.notifier_call = amdgpu_device_pm_notifier;
4121 	r = register_pm_notifier(&adev->pm_nb);
4122 	if (r)
4123 		goto failed;
4124 
4125 	return 0;
4126 
4127 release_ras_con:
4128 	if (amdgpu_sriov_vf(adev))
4129 		amdgpu_virt_release_full_gpu(adev, true);
4130 
4131 	/* failed in exclusive mode due to timeout */
4132 	if (amdgpu_sriov_vf(adev) &&
4133 		!amdgpu_sriov_runtime(adev) &&
4134 		amdgpu_virt_mmio_blocked(adev) &&
4135 		!amdgpu_virt_wait_reset(adev)) {
4136 		dev_err(adev->dev, "VF exclusive mode timeout\n");
4137 		/* Don't send request since VF is inactive. */
4138 		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
4139 		adev->virt.ops = NULL;
4140 		r = -EAGAIN;
4141 	}
4142 	amdgpu_release_ras_context(adev);
4143 
4144 failed:
4145 	amdgpu_vf_error_trans_all(adev);
4146 
4147 	return r;
4148 }
4149 
4150 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
4151 {
4152 
4153 	/* Clear all CPU mappings pointing to this device */
4154 	unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
4155 
4156 	/* Unmap all mapped bars - Doorbell, registers and VRAM */
4157 	amdgpu_doorbell_fini(adev);
4158 
4159 	iounmap(adev->rmmio);
4160 	adev->rmmio = NULL;
4161 	if (adev->mman.aper_base_kaddr)
4162 		iounmap(adev->mman.aper_base_kaddr);
4163 	adev->mman.aper_base_kaddr = NULL;
4164 
4165 	/* Memory manager related */
4166 	if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
4167 		arch_phys_wc_del(adev->gmc.vram_mtrr);
4168 		arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
4169 	}
4170 }
4171 
4172 /**
4173  * amdgpu_device_fini_hw - tear down the driver
4174  *
4175  * @adev: amdgpu_device pointer
4176  *
4177  * Tear down the driver info (all asics).
4178  * Called at driver shutdown.
4179  */
4180 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
4181 {
4182 	dev_info(adev->dev, "finishing device.\n");
4183 	flush_delayed_work(&adev->delayed_init_work);
4184 
4185 	if (adev->mman.initialized)
4186 		drain_workqueue(adev->mman.bdev.wq);
4187 	adev->shutdown = true;
4188 
4189 	unregister_pm_notifier(&adev->pm_nb);
4190 
4191 	/* make sure IB test finished before entering exclusive mode
4192 	 * to avoid preemption on IB test
4193 	 */
4194 	if (amdgpu_sriov_vf(adev)) {
4195 		amdgpu_virt_request_full_gpu(adev, false);
4196 		amdgpu_virt_fini_data_exchange(adev);
4197 	}
4198 
4199 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
4200 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
4201 
4202 	/* disable all interrupts */
4203 	amdgpu_irq_disable_all(adev);
4204 	if (adev->mode_info.mode_config_initialized) {
4205 		if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
4206 			drm_helper_force_disable_all(adev_to_drm(adev));
4207 		else
4208 			drm_atomic_helper_shutdown(adev_to_drm(adev));
4209 	}
4210 	amdgpu_fence_driver_hw_fini(adev);
4211 
4212 	amdgpu_device_sys_interface_fini(adev);
4213 
4214 	/* disable ras feature must before hw fini */
4215 	amdgpu_ras_pre_fini(adev);
4216 
4217 	amdgpu_ttm_set_buffer_funcs_status(adev, false);
4218 
4219 	/*
4220 	 * device went through surprise hotplug; we need to destroy topology
4221 	 * before ip_fini_early to prevent kfd locking refcount issues by calling
4222 	 * amdgpu_amdkfd_suspend()
4223 	 */
4224 	if (pci_dev_is_disconnected(adev->pdev))
4225 		amdgpu_amdkfd_device_fini_sw(adev);
4226 
4227 	amdgpu_coredump_fini(adev);
4228 	amdgpu_device_ip_fini_early(adev);
4229 
4230 	amdgpu_irq_fini_hw(adev);
4231 
4232 	if (adev->mman.initialized)
4233 		ttm_device_clear_dma_mappings(&adev->mman.bdev);
4234 
4235 	amdgpu_gart_dummy_page_fini(adev);
4236 
4237 	if (pci_dev_is_disconnected(adev->pdev))
4238 		amdgpu_device_unmap_mmio(adev);
4239 
4240 }
4241 
4242 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4243 {
4244 	int i, idx;
4245 	bool px;
4246 
4247 	amdgpu_device_ip_fini(adev);
4248 	amdgpu_fence_driver_sw_fini(adev);
4249 	amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
4250 	adev->accel_working = false;
4251 	dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4252 	for (i = 0; i < MAX_XCP; ++i) {
4253 		dma_fence_put(adev->isolation[i].spearhead);
4254 		amdgpu_sync_free(&adev->isolation[i].active);
4255 		amdgpu_sync_free(&adev->isolation[i].prev);
4256 	}
4257 
4258 	amdgpu_reset_fini(adev);
4259 
4260 	/* free i2c buses */
4261 	amdgpu_i2c_fini(adev);
4262 
4263 	if (adev->bios) {
4264 		if (amdgpu_emu_mode != 1)
4265 			amdgpu_atombios_fini(adev);
4266 		amdgpu_bios_release(adev);
4267 	}
4268 
4269 	kfree(adev->fru_info);
4270 	adev->fru_info = NULL;
4271 
4272 	kfree(adev->xcp_mgr);
4273 	adev->xcp_mgr = NULL;
4274 
4275 	px = amdgpu_device_supports_px(adev);
4276 
4277 	if (px || (!dev_is_removable(&adev->pdev->dev) &&
4278 				apple_gmux_detect(NULL, NULL)))
4279 		vga_switcheroo_unregister_client(adev->pdev);
4280 
4281 	if (px)
4282 		vga_switcheroo_fini_domain_pm_ops(adev->dev);
4283 
4284 	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4285 		vga_client_unregister(adev->pdev);
4286 
4287 	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4288 
4289 		iounmap(adev->rmmio);
4290 		adev->rmmio = NULL;
4291 		drm_dev_exit(idx);
4292 	}
4293 
4294 	if (IS_ENABLED(CONFIG_PERF_EVENTS))
4295 		amdgpu_pmu_fini(adev);
4296 	if (adev->discovery.bin)
4297 		amdgpu_discovery_fini(adev);
4298 
4299 	amdgpu_reset_put_reset_domain(adev->reset_domain);
4300 	adev->reset_domain = NULL;
4301 
4302 	kfree(adev->pci_state);
4303 	kfree(adev->pcie_reset_ctx.swds_pcistate);
4304 	kfree(adev->pcie_reset_ctx.swus_pcistate);
4305 }
4306 
4307 /**
4308  * amdgpu_device_evict_resources - evict device resources
4309  * @adev: amdgpu device object
4310  *
4311  * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4312  * of the vram memory type. Mainly used for evicting device resources
4313  * at suspend time.
4314  *
4315  */
4316 static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4317 {
4318 	int ret;
4319 
4320 	/* No need to evict vram on APUs unless going to S4 */
4321 	if (!adev->in_s4 && (adev->flags & AMD_IS_APU))
4322 		return 0;
4323 
4324 	/* No need to evict when going to S5 through S4 callbacks */
4325 	if (system_state == SYSTEM_POWER_OFF)
4326 		return 0;
4327 
4328 	ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4329 	if (ret) {
4330 		dev_warn(adev->dev, "evicting device resources failed\n");
4331 		return ret;
4332 	}
4333 
4334 	if (adev->in_s4) {
4335 		ret = ttm_device_prepare_hibernation(&adev->mman.bdev);
4336 		if (ret)
4337 			dev_err(adev->dev, "prepare hibernation failed, %d\n", ret);
4338 	}
4339 	return ret;
4340 }
4341 
4342 /*
4343  * Suspend & resume.
4344  */
4345 /**
4346  * amdgpu_device_pm_notifier - Notification block for Suspend/Hibernate events
4347  * @nb: notifier block
4348  * @mode: suspend mode
4349  * @data: data
4350  *
4351  * This function is called when the system is about to suspend or hibernate.
4352  * It is used to set the appropriate flags so that eviction can be optimized
4353  * in the pm prepare callback.
4354  */
4355 static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
4356 				     void *data)
4357 {
4358 	struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, pm_nb);
4359 
4360 	switch (mode) {
4361 	case PM_HIBERNATION_PREPARE:
4362 		adev->in_s4 = true;
4363 		break;
4364 	case PM_POST_HIBERNATION:
4365 		adev->in_s4 = false;
4366 		break;
4367 	}
4368 
4369 	return NOTIFY_DONE;
4370 }
4371 
4372 /**
4373  * amdgpu_device_prepare - prepare for device suspend
4374  *
4375  * @dev: drm dev pointer
4376  *
4377  * Prepare to put the hw in the suspend state (all asics).
4378  * Returns 0 for success or an error on failure.
4379  * Called at driver suspend.
4380  */
4381 int amdgpu_device_prepare(struct drm_device *dev)
4382 {
4383 	struct amdgpu_device *adev = drm_to_adev(dev);
4384 	int i, r;
4385 
4386 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4387 		return 0;
4388 
4389 	/* Evict the majority of BOs before starting suspend sequence */
4390 	r = amdgpu_device_evict_resources(adev);
4391 	if (r)
4392 		return r;
4393 
4394 	flush_delayed_work(&adev->gfx.gfx_off_delay_work);
4395 
4396 	for (i = 0; i < adev->num_ip_blocks; i++) {
4397 		if (!adev->ip_blocks[i].status.valid)
4398 			continue;
4399 		if (!adev->ip_blocks[i].version->funcs->prepare_suspend)
4400 			continue;
4401 		r = adev->ip_blocks[i].version->funcs->prepare_suspend(&adev->ip_blocks[i]);
4402 		if (r)
4403 			return r;
4404 	}
4405 
4406 	return 0;
4407 }
4408 
4409 /**
4410  * amdgpu_device_complete - complete power state transition
4411  *
4412  * @dev: drm dev pointer
4413  *
4414  * Undo the changes from amdgpu_device_prepare. This will be
4415  * called on all resume transitions, including those that failed.
4416  */
4417 void amdgpu_device_complete(struct drm_device *dev)
4418 {
4419 	struct amdgpu_device *adev = drm_to_adev(dev);
4420 	int i;
4421 
4422 	for (i = 0; i < adev->num_ip_blocks; i++) {
4423 		if (!adev->ip_blocks[i].status.valid)
4424 			continue;
4425 		if (!adev->ip_blocks[i].version->funcs->complete)
4426 			continue;
4427 		adev->ip_blocks[i].version->funcs->complete(&adev->ip_blocks[i]);
4428 	}
4429 }
4430 
4431 /**
4432  * amdgpu_device_suspend - initiate device suspend
4433  *
4434  * @dev: drm dev pointer
4435  * @notify_clients: notify in-kernel DRM clients
4436  *
4437  * Puts the hw in the suspend state (all asics).
4438  * Returns 0 for success or an error on failure.
4439  * Called at driver suspend.
4440  */
4441 int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
4442 {
4443 	struct amdgpu_device *adev = drm_to_adev(dev);
4444 	int r, rec;
4445 
4446 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4447 		return 0;
4448 
4449 	adev->in_suspend = true;
4450 
4451 	if (amdgpu_sriov_vf(adev)) {
4452 		if (!adev->in_runpm)
4453 			amdgpu_amdkfd_suspend_process(adev);
4454 		amdgpu_virt_fini_data_exchange(adev);
4455 		r = amdgpu_virt_request_full_gpu(adev, false);
4456 		if (r)
4457 			return r;
4458 	}
4459 
4460 	r = amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D3);
4461 	if (r)
4462 		goto unwind_sriov;
4463 
4464 	if (notify_clients)
4465 		drm_client_dev_suspend(adev_to_drm(adev));
4466 
4467 	cancel_delayed_work_sync(&adev->delayed_init_work);
4468 
4469 	amdgpu_ras_suspend(adev);
4470 
4471 	r = amdgpu_device_ip_suspend_phase1(adev);
4472 	if (r)
4473 		goto unwind_smartshift;
4474 
4475 	amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
4476 	r = amdgpu_userq_suspend(adev);
4477 	if (r)
4478 		goto unwind_ip_phase1;
4479 
4480 	r = amdgpu_device_evict_resources(adev);
4481 	if (r)
4482 		goto unwind_userq;
4483 
4484 	amdgpu_ttm_set_buffer_funcs_status(adev, false);
4485 
4486 	amdgpu_fence_driver_hw_fini(adev);
4487 
4488 	r = amdgpu_device_ip_suspend_phase2(adev);
4489 	if (r)
4490 		goto unwind_evict;
4491 
4492 	if (amdgpu_sriov_vf(adev))
4493 		amdgpu_virt_release_full_gpu(adev, false);
4494 
4495 	return 0;
4496 
4497 unwind_evict:
4498 	amdgpu_ttm_set_buffer_funcs_status(adev, true);
4499 	amdgpu_fence_driver_hw_init(adev);
4500 
4501 unwind_userq:
4502 	rec = amdgpu_userq_resume(adev);
4503 	if (rec) {
4504 		dev_warn(adev->dev, "failed to re-initialize user queues: %d\n", rec);
4505 		return r;
4506 	}
4507 	rec = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
4508 	if (rec) {
4509 		dev_warn(adev->dev, "failed to re-initialize kfd: %d\n", rec);
4510 		return r;
4511 	}
4512 
4513 unwind_ip_phase1:
4514 	/* suspend phase 1 = resume phase 3 */
4515 	rec = amdgpu_device_ip_resume_phase3(adev);
4516 	if (rec) {
4517 		dev_warn(adev->dev, "failed to re-initialize IPs phase1: %d\n", rec);
4518 		return r;
4519 	}
4520 
4521 unwind_smartshift:
4522 	rec = amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D0);
4523 	if (rec) {
4524 		dev_warn(adev->dev, "failed to re-update smart shift: %d\n", rec);
4525 		return r;
4526 	}
4527 
4528 	if (notify_clients)
4529 		drm_client_dev_resume(adev_to_drm(adev));
4530 
4531 	amdgpu_ras_resume(adev);
4532 
4533 unwind_sriov:
4534 	if (amdgpu_sriov_vf(adev)) {
4535 		rec = amdgpu_virt_request_full_gpu(adev, true);
4536 		if (rec) {
4537 			dev_warn(adev->dev, "failed to reinitialize sriov: %d\n", rec);
4538 			return r;
4539 		}
4540 	}
4541 
4542 	adev->in_suspend = adev->in_s0ix = adev->in_s3 = false;
4543 
4544 	return r;
4545 }
4546 
4547 static inline int amdgpu_virt_resume(struct amdgpu_device *adev)
4548 {
4549 	int r;
4550 	unsigned int prev_physical_node_id = adev->gmc.xgmi.physical_node_id;
4551 
4552 	/* During VM resume, QEMU programming of VF MSIX table (register GFXMSIX_VECT0_ADDR_LO)
4553 	 * may not work. The access could be blocked by nBIF protection as VF isn't in
4554 	 * exclusive access mode. Exclusive access is enabled now, disable/enable MSIX
4555 	 * so that QEMU reprograms MSIX table.
4556 	 */
4557 	amdgpu_restore_msix(adev);
4558 
4559 	r = adev->gfxhub.funcs->get_xgmi_info(adev);
4560 	if (r)
4561 		return r;
4562 
4563 	dev_info(adev->dev, "xgmi node, old id %d, new id %d\n",
4564 		prev_physical_node_id, adev->gmc.xgmi.physical_node_id);
4565 
4566 	adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
4567 	adev->vm_manager.vram_base_offset +=
4568 		adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
4569 
4570 	return 0;
4571 }
4572 
4573 /**
4574  * amdgpu_device_resume - initiate device resume
4575  *
4576  * @dev: drm dev pointer
4577  * @notify_clients: notify in-kernel DRM clients
4578  *
4579  * Bring the hw back to operating state (all asics).
4580  * Returns 0 for success or an error on failure.
4581  * Called at driver resume.
4582  */
4583 int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
4584 {
4585 	struct amdgpu_device *adev = drm_to_adev(dev);
4586 	int r = 0;
4587 
4588 	if (amdgpu_sriov_vf(adev)) {
4589 		r = amdgpu_virt_request_full_gpu(adev, true);
4590 		if (r)
4591 			return r;
4592 	}
4593 
4594 	if (amdgpu_virt_xgmi_migrate_enabled(adev)) {
4595 		r = amdgpu_virt_resume(adev);
4596 		if (r)
4597 			goto exit;
4598 	}
4599 
4600 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4601 		return 0;
4602 
4603 	if (adev->in_s0ix)
4604 		amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4605 
4606 	/* post card */
4607 	if (amdgpu_device_need_post(adev)) {
4608 		r = amdgpu_device_asic_init(adev);
4609 		if (r)
4610 			dev_err(adev->dev, "amdgpu asic init failed\n");
4611 	}
4612 
4613 	r = amdgpu_device_ip_resume(adev);
4614 
4615 	if (r) {
4616 		dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4617 		goto exit;
4618 	}
4619 
4620 	r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
4621 	if (r)
4622 		goto exit;
4623 
4624 	r = amdgpu_userq_resume(adev);
4625 	if (r)
4626 		goto exit;
4627 
4628 	r = amdgpu_device_ip_late_init(adev);
4629 	if (r)
4630 		goto exit;
4631 
4632 	queue_delayed_work(system_dfl_wq, &adev->delayed_init_work,
4633 			   msecs_to_jiffies(AMDGPU_RESUME_MS));
4634 exit:
4635 	if (amdgpu_sriov_vf(adev)) {
4636 		amdgpu_virt_init_data_exchange(adev);
4637 		amdgpu_virt_release_full_gpu(adev, true);
4638 
4639 		if (!r && !adev->in_runpm)
4640 			r = amdgpu_amdkfd_resume_process(adev);
4641 	}
4642 
4643 	if (r)
4644 		return r;
4645 
4646 	/* Make sure IB tests flushed */
4647 	flush_delayed_work(&adev->delayed_init_work);
4648 
4649 	if (notify_clients)
4650 		drm_client_dev_resume(adev_to_drm(adev));
4651 
4652 	amdgpu_ras_resume(adev);
4653 
4654 	if (adev->mode_info.num_crtc) {
4655 		/*
4656 		 * Most of the connector probing functions try to acquire runtime pm
4657 		 * refs to ensure that the GPU is powered on when connector polling is
4658 		 * performed. Since we're calling this from a runtime PM callback,
4659 		 * trying to acquire rpm refs will cause us to deadlock.
4660 		 *
4661 		 * Since we're guaranteed to be holding the rpm lock, it's safe to
4662 		 * temporarily disable the rpm helpers so this doesn't deadlock us.
4663 		 */
4664 #ifdef CONFIG_PM
4665 		dev->dev->power.disable_depth++;
4666 #endif
4667 		if (!adev->dc_enabled)
4668 			drm_helper_hpd_irq_event(dev);
4669 		else
4670 			drm_kms_helper_hotplug_event(dev);
4671 #ifdef CONFIG_PM
4672 		dev->dev->power.disable_depth--;
4673 #endif
4674 	}
4675 
4676 	amdgpu_vram_mgr_clear_reset_blocks(adev);
4677 	adev->in_suspend = false;
4678 
4679 	if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D0))
4680 		dev_warn(adev->dev, "smart shift update failed\n");
4681 
4682 	return 0;
4683 }
4684 
4685 /**
4686  * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4687  *
4688  * @adev: amdgpu_device pointer
4689  *
4690  * The list of all the hardware IPs that make up the asic is walked and
4691  * the check_soft_reset callbacks are run.  check_soft_reset determines
4692  * if the asic is still hung or not.
4693  * Returns true if any of the IPs are still in a hung state, false if not.
4694  */
4695 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4696 {
4697 	int i;
4698 	bool asic_hang = false;
4699 
4700 	if (amdgpu_sriov_vf(adev))
4701 		return true;
4702 
4703 	if (amdgpu_asic_need_full_reset(adev))
4704 		return true;
4705 
4706 	for (i = 0; i < adev->num_ip_blocks; i++) {
4707 		if (!adev->ip_blocks[i].status.valid)
4708 			continue;
4709 		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4710 			adev->ip_blocks[i].status.hang =
4711 				adev->ip_blocks[i].version->funcs->check_soft_reset(
4712 					&adev->ip_blocks[i]);
4713 		if (adev->ip_blocks[i].status.hang) {
4714 			dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4715 			asic_hang = true;
4716 		}
4717 	}
4718 	return asic_hang;
4719 }
4720 
4721 /**
4722  * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4723  *
4724  * @adev: amdgpu_device pointer
4725  *
4726  * The list of all the hardware IPs that make up the asic is walked and the
4727  * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
4728  * handles any IP specific hardware or software state changes that are
4729  * necessary for a soft reset to succeed.
4730  * Returns 0 on success, negative error code on failure.
4731  */
4732 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4733 {
4734 	int i, r = 0;
4735 
4736 	for (i = 0; i < adev->num_ip_blocks; i++) {
4737 		if (!adev->ip_blocks[i].status.valid)
4738 			continue;
4739 		if (adev->ip_blocks[i].status.hang &&
4740 		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4741 			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(&adev->ip_blocks[i]);
4742 			if (r)
4743 				return r;
4744 		}
4745 	}
4746 
4747 	return 0;
4748 }
4749 
4750 /**
4751  * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4752  *
4753  * @adev: amdgpu_device pointer
4754  *
4755  * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
4756  * reset is necessary to recover.
4757  * Returns true if a full asic reset is required, false if not.
4758  */
4759 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4760 {
4761 	int i;
4762 
4763 	if (amdgpu_asic_need_full_reset(adev))
4764 		return true;
4765 
4766 	for (i = 0; i < adev->num_ip_blocks; i++) {
4767 		if (!adev->ip_blocks[i].status.valid)
4768 			continue;
4769 		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4770 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4771 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4772 		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4773 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4774 			if (adev->ip_blocks[i].status.hang) {
4775 				dev_info(adev->dev, "Some block need full reset!\n");
4776 				return true;
4777 			}
4778 		}
4779 	}
4780 	return false;
4781 }
4782 
4783 /**
4784  * amdgpu_device_ip_soft_reset - do a soft reset
4785  *
4786  * @adev: amdgpu_device pointer
4787  *
4788  * The list of all the hardware IPs that make up the asic is walked and the
4789  * soft_reset callbacks are run if the block is hung.  soft_reset handles any
4790  * IP specific hardware or software state changes that are necessary to soft
4791  * reset the IP.
4792  * Returns 0 on success, negative error code on failure.
4793  */
4794 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4795 {
4796 	int i, r = 0;
4797 
4798 	for (i = 0; i < adev->num_ip_blocks; i++) {
4799 		if (!adev->ip_blocks[i].status.valid)
4800 			continue;
4801 		if (adev->ip_blocks[i].status.hang &&
4802 		    adev->ip_blocks[i].version->funcs->soft_reset) {
4803 			r = adev->ip_blocks[i].version->funcs->soft_reset(&adev->ip_blocks[i]);
4804 			if (r)
4805 				return r;
4806 		}
4807 	}
4808 
4809 	return 0;
4810 }
4811 
4812 /**
4813  * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4814  *
4815  * @adev: amdgpu_device pointer
4816  *
4817  * The list of all the hardware IPs that make up the asic is walked and the
4818  * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
4819  * handles any IP specific hardware or software state changes that are
4820  * necessary after the IP has been soft reset.
4821  * Returns 0 on success, negative error code on failure.
4822  */
4823 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4824 {
4825 	int i, r = 0;
4826 
4827 	for (i = 0; i < adev->num_ip_blocks; i++) {
4828 		if (!adev->ip_blocks[i].status.valid)
4829 			continue;
4830 		if (adev->ip_blocks[i].status.hang &&
4831 		    adev->ip_blocks[i].version->funcs->post_soft_reset)
4832 			r = adev->ip_blocks[i].version->funcs->post_soft_reset(&adev->ip_blocks[i]);
4833 		if (r)
4834 			return r;
4835 	}
4836 
4837 	return 0;
4838 }
4839 
4840 /**
4841  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4842  *
4843  * @adev: amdgpu_device pointer
4844  * @reset_context: amdgpu reset context pointer
4845  *
4846  * do VF FLR and reinitialize Asic
4847  * return 0 means succeeded otherwise failed
4848  */
4849 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4850 				     struct amdgpu_reset_context *reset_context)
4851 {
4852 	int r;
4853 	struct amdgpu_hive_info *hive = NULL;
4854 
4855 	if (test_bit(AMDGPU_HOST_FLR, &reset_context->flags)) {
4856 		if (!amdgpu_ras_get_fed_status(adev))
4857 			amdgpu_virt_ready_to_reset(adev);
4858 		amdgpu_virt_wait_reset(adev);
4859 		clear_bit(AMDGPU_HOST_FLR, &reset_context->flags);
4860 		r = amdgpu_virt_request_full_gpu(adev, true);
4861 	} else {
4862 		r = amdgpu_virt_reset_gpu(adev);
4863 	}
4864 	if (r)
4865 		return r;
4866 
4867 	amdgpu_ras_clear_err_state(adev);
4868 	amdgpu_irq_gpu_reset_resume_helper(adev);
4869 
4870 	/* some sw clean up VF needs to do before recover */
4871 	amdgpu_virt_post_reset(adev);
4872 
4873 	/* Resume IP prior to SMC */
4874 	r = amdgpu_device_ip_reinit_early_sriov(adev);
4875 	if (r)
4876 		return r;
4877 
4878 	amdgpu_virt_init_data_exchange(adev);
4879 
4880 	r = amdgpu_device_fw_loading(adev);
4881 	if (r)
4882 		return r;
4883 
4884 	/* now we are okay to resume SMC/CP/SDMA */
4885 	r = amdgpu_device_ip_reinit_late_sriov(adev);
4886 	if (r)
4887 		return r;
4888 
4889 	hive = amdgpu_get_xgmi_hive(adev);
4890 	/* Update PSP FW topology after reset */
4891 	if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4892 		r = amdgpu_xgmi_update_topology(hive, adev);
4893 	if (hive)
4894 		amdgpu_put_xgmi_hive(hive);
4895 	if (r)
4896 		return r;
4897 
4898 	r = amdgpu_ib_ring_tests(adev);
4899 	if (r)
4900 		return r;
4901 
4902 	if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST)
4903 		amdgpu_inc_vram_lost(adev);
4904 
4905 	/* need to be called during full access so we can't do it later like
4906 	 * bare-metal does.
4907 	 */
4908 	amdgpu_amdkfd_post_reset(adev);
4909 	amdgpu_virt_release_full_gpu(adev, true);
4910 
4911 	/* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */
4912 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
4913 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
4914 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
4915 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0) ||
4916 	    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3))
4917 		amdgpu_ras_resume(adev);
4918 
4919 	amdgpu_virt_ras_telemetry_post_reset(adev);
4920 
4921 	return 0;
4922 }
4923 
4924 /**
4925  * amdgpu_device_has_job_running - check if there is any unfinished job
4926  *
4927  * @adev: amdgpu_device pointer
4928  *
4929  * check if there is any job running on the device when guest driver receives
4930  * FLR notification from host driver. If there are still jobs running, then
4931  * the guest driver will not respond the FLR reset. Instead, let the job hit
4932  * the timeout and guest driver then issue the reset request.
4933  */
4934 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4935 {
4936 	int i;
4937 
4938 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4939 		struct amdgpu_ring *ring = adev->rings[i];
4940 
4941 		if (!amdgpu_ring_sched_ready(ring))
4942 			continue;
4943 
4944 		if (amdgpu_fence_count_emitted(ring))
4945 			return true;
4946 	}
4947 	return false;
4948 }
4949 
4950 /**
4951  * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4952  *
4953  * @adev: amdgpu_device pointer
4954  *
4955  * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4956  * a hung GPU.
4957  */
4958 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4959 {
4960 
4961 	if (amdgpu_gpu_recovery == 0)
4962 		goto disabled;
4963 
4964 	/* Skip soft reset check in fatal error mode */
4965 	if (!amdgpu_ras_is_poison_mode_supported(adev))
4966 		return true;
4967 
4968 	if (amdgpu_sriov_vf(adev))
4969 		return true;
4970 
4971 	if (amdgpu_gpu_recovery == -1) {
4972 		switch (adev->asic_type) {
4973 #ifdef CONFIG_DRM_AMDGPU_SI
4974 		case CHIP_VERDE:
4975 		case CHIP_TAHITI:
4976 		case CHIP_PITCAIRN:
4977 		case CHIP_OLAND:
4978 		case CHIP_HAINAN:
4979 #endif
4980 #ifdef CONFIG_DRM_AMDGPU_CIK
4981 		case CHIP_KAVERI:
4982 		case CHIP_KABINI:
4983 		case CHIP_MULLINS:
4984 #endif
4985 		case CHIP_CARRIZO:
4986 		case CHIP_STONEY:
4987 		case CHIP_CYAN_SKILLFISH:
4988 			goto disabled;
4989 		default:
4990 			break;
4991 		}
4992 	}
4993 
4994 	return true;
4995 
4996 disabled:
4997 		dev_info(adev->dev, "GPU recovery disabled.\n");
4998 		return false;
4999 }
5000 
5001 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
5002 {
5003 	u32 i;
5004 	int ret = 0;
5005 
5006 	if (adev->bios)
5007 		amdgpu_atombios_scratch_regs_engine_hung(adev, true);
5008 
5009 	dev_info(adev->dev, "GPU mode1 reset\n");
5010 
5011 	/* Cache the state before bus master disable. The saved config space
5012 	 * values are used in other cases like restore after mode-2 reset.
5013 	 */
5014 	amdgpu_device_cache_pci_state(adev->pdev);
5015 
5016 	/* disable BM */
5017 	pci_clear_master(adev->pdev);
5018 
5019 	if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
5020 		dev_info(adev->dev, "GPU smu mode1 reset\n");
5021 		ret = amdgpu_dpm_mode1_reset(adev);
5022 	} else {
5023 		dev_info(adev->dev, "GPU psp mode1 reset\n");
5024 		ret = psp_gpu_reset(adev);
5025 	}
5026 
5027 	if (ret)
5028 		goto mode1_reset_failed;
5029 
5030 	/* enable mmio access after mode 1 reset completed */
5031 	adev->no_hw_access = false;
5032 
5033 	/* ensure no_hw_access is updated before we access hw */
5034 	smp_mb();
5035 
5036 	amdgpu_device_load_pci_state(adev->pdev);
5037 	ret = amdgpu_psp_wait_for_bootloader(adev);
5038 	if (ret)
5039 		goto mode1_reset_failed;
5040 
5041 	/* wait for asic to come out of reset */
5042 	for (i = 0; i < adev->usec_timeout; i++) {
5043 		u32 memsize = adev->nbio.funcs->get_memsize(adev);
5044 
5045 		if (memsize != 0xffffffff)
5046 			break;
5047 		udelay(1);
5048 	}
5049 
5050 	if (i >= adev->usec_timeout) {
5051 		ret = -ETIMEDOUT;
5052 		goto mode1_reset_failed;
5053 	}
5054 
5055 	if (adev->bios)
5056 		amdgpu_atombios_scratch_regs_engine_hung(adev, false);
5057 
5058 	return 0;
5059 
5060 mode1_reset_failed:
5061 	dev_err(adev->dev, "GPU mode1 reset failed\n");
5062 	return ret;
5063 }
5064 
5065 int amdgpu_device_link_reset(struct amdgpu_device *adev)
5066 {
5067 	int ret = 0;
5068 
5069 	dev_info(adev->dev, "GPU link reset\n");
5070 
5071 	if (!amdgpu_reset_in_dpc(adev))
5072 		ret = amdgpu_dpm_link_reset(adev);
5073 
5074 	if (ret)
5075 		goto link_reset_failed;
5076 
5077 	ret = amdgpu_psp_wait_for_bootloader(adev);
5078 	if (ret)
5079 		goto link_reset_failed;
5080 
5081 	return 0;
5082 
5083 link_reset_failed:
5084 	dev_err(adev->dev, "GPU link reset failed\n");
5085 	return ret;
5086 }
5087 
5088 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
5089 				 struct amdgpu_reset_context *reset_context)
5090 {
5091 	int i, r = 0;
5092 	struct amdgpu_job *job = NULL;
5093 	struct amdgpu_device *tmp_adev = reset_context->reset_req_dev;
5094 	bool need_full_reset =
5095 		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5096 
5097 	if (reset_context->reset_req_dev == adev)
5098 		job = reset_context->job;
5099 
5100 	if (amdgpu_sriov_vf(adev))
5101 		amdgpu_virt_pre_reset(adev);
5102 
5103 	amdgpu_fence_driver_isr_toggle(adev, true);
5104 
5105 	/* block all schedulers and reset given job's ring */
5106 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5107 		struct amdgpu_ring *ring = adev->rings[i];
5108 
5109 		if (!amdgpu_ring_sched_ready(ring))
5110 			continue;
5111 
5112 		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
5113 		amdgpu_fence_driver_force_completion(ring);
5114 	}
5115 
5116 	amdgpu_fence_driver_isr_toggle(adev, false);
5117 
5118 	if (job && job->vm)
5119 		drm_sched_increase_karma(&job->base);
5120 
5121 	r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
5122 	/* If reset handler not implemented, continue; otherwise return */
5123 	if (r == -EOPNOTSUPP)
5124 		r = 0;
5125 	else
5126 		return r;
5127 
5128 	/* Don't suspend on bare metal if we are not going to HW reset the ASIC */
5129 	if (!amdgpu_sriov_vf(adev)) {
5130 
5131 		if (!need_full_reset)
5132 			need_full_reset = amdgpu_device_ip_need_full_reset(adev);
5133 
5134 		if (!need_full_reset && amdgpu_gpu_recovery &&
5135 		    amdgpu_device_ip_check_soft_reset(adev)) {
5136 			amdgpu_device_ip_pre_soft_reset(adev);
5137 			r = amdgpu_device_ip_soft_reset(adev);
5138 			amdgpu_device_ip_post_soft_reset(adev);
5139 			if (r || amdgpu_device_ip_check_soft_reset(adev)) {
5140 				dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
5141 				need_full_reset = true;
5142 			}
5143 		}
5144 
5145 		if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) {
5146 			dev_info(tmp_adev->dev, "Dumping IP State\n");
5147 			/* Trigger ip dump before we reset the asic */
5148 			for (i = 0; i < tmp_adev->num_ip_blocks; i++)
5149 				if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state)
5150 					tmp_adev->ip_blocks[i].version->funcs
5151 						->dump_ip_state((void *)&tmp_adev->ip_blocks[i]);
5152 			dev_info(tmp_adev->dev, "Dumping IP State Completed\n");
5153 		}
5154 
5155 		if (need_full_reset)
5156 			r = amdgpu_device_ip_suspend(adev);
5157 		if (need_full_reset)
5158 			set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5159 		else
5160 			clear_bit(AMDGPU_NEED_FULL_RESET,
5161 				  &reset_context->flags);
5162 	}
5163 
5164 	return r;
5165 }
5166 
5167 int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
5168 {
5169 	struct list_head *device_list_handle;
5170 	bool full_reset, vram_lost = false;
5171 	struct amdgpu_device *tmp_adev;
5172 	int r, init_level;
5173 
5174 	device_list_handle = reset_context->reset_device_list;
5175 
5176 	if (!device_list_handle)
5177 		return -EINVAL;
5178 
5179 	full_reset = test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5180 
5181 	/**
5182 	 * If it's reset on init, it's default init level, otherwise keep level
5183 	 * as recovery level.
5184 	 */
5185 	if (reset_context->method == AMD_RESET_METHOD_ON_INIT)
5186 			init_level = AMDGPU_INIT_LEVEL_DEFAULT;
5187 	else
5188 			init_level = AMDGPU_INIT_LEVEL_RESET_RECOVERY;
5189 
5190 	r = 0;
5191 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5192 		amdgpu_set_init_level(tmp_adev, init_level);
5193 		if (full_reset) {
5194 			/* post card */
5195 			amdgpu_reset_set_dpc_status(tmp_adev, false);
5196 			amdgpu_ras_clear_err_state(tmp_adev);
5197 			r = amdgpu_device_asic_init(tmp_adev);
5198 			if (r) {
5199 				dev_warn(tmp_adev->dev, "asic atom init failed!");
5200 			} else {
5201 				dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
5202 
5203 				r = amdgpu_device_ip_resume_phase1(tmp_adev);
5204 				if (r)
5205 					goto out;
5206 
5207 				vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
5208 
5209 				if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags))
5210 					amdgpu_coredump(tmp_adev, false, vram_lost, reset_context->job);
5211 
5212 				if (vram_lost) {
5213 					dev_info(
5214 						tmp_adev->dev,
5215 						"VRAM is lost due to GPU reset!\n");
5216 					amdgpu_inc_vram_lost(tmp_adev);
5217 				}
5218 
5219 				r = amdgpu_device_fw_loading(tmp_adev);
5220 				if (r)
5221 					return r;
5222 
5223 				r = amdgpu_xcp_restore_partition_mode(
5224 					tmp_adev->xcp_mgr);
5225 				if (r)
5226 					goto out;
5227 
5228 				r = amdgpu_device_ip_resume_phase2(tmp_adev);
5229 				if (r)
5230 					goto out;
5231 
5232 				amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true);
5233 
5234 				r = amdgpu_device_ip_resume_phase3(tmp_adev);
5235 				if (r)
5236 					goto out;
5237 
5238 				if (vram_lost)
5239 					amdgpu_device_fill_reset_magic(tmp_adev);
5240 
5241 				/*
5242 				 * Add this ASIC as tracked as reset was already
5243 				 * complete successfully.
5244 				 */
5245 				amdgpu_register_gpu_instance(tmp_adev);
5246 
5247 				if (!reset_context->hive &&
5248 				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5249 					amdgpu_xgmi_add_device(tmp_adev);
5250 
5251 				r = amdgpu_device_ip_late_init(tmp_adev);
5252 				if (r)
5253 					goto out;
5254 
5255 				r = amdgpu_userq_post_reset(tmp_adev, vram_lost);
5256 				if (r)
5257 					goto out;
5258 
5259 				drm_client_dev_resume(adev_to_drm(tmp_adev));
5260 
5261 				/*
5262 				 * The GPU enters bad state once faulty pages
5263 				 * by ECC has reached the threshold, and ras
5264 				 * recovery is scheduled next. So add one check
5265 				 * here to break recovery if it indeed exceeds
5266 				 * bad page threshold, and remind user to
5267 				 * retire this GPU or setting one bigger
5268 				 * bad_page_threshold value to fix this once
5269 				 * probing driver again.
5270 				 */
5271 				if (!amdgpu_ras_is_rma(tmp_adev)) {
5272 					/* must succeed. */
5273 					amdgpu_ras_resume(tmp_adev);
5274 				} else {
5275 					r = -EINVAL;
5276 					goto out;
5277 				}
5278 
5279 				/* Update PSP FW topology after reset */
5280 				if (reset_context->hive &&
5281 				    tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5282 					r = amdgpu_xgmi_update_topology(
5283 						reset_context->hive, tmp_adev);
5284 			}
5285 		}
5286 
5287 out:
5288 		if (!r) {
5289 			/* IP init is complete now, set level as default */
5290 			amdgpu_set_init_level(tmp_adev,
5291 					      AMDGPU_INIT_LEVEL_DEFAULT);
5292 			amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
5293 			r = amdgpu_ib_ring_tests(tmp_adev);
5294 			if (r) {
5295 				dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
5296 				r = -EAGAIN;
5297 				goto end;
5298 			}
5299 		}
5300 
5301 		if (r)
5302 			tmp_adev->asic_reset_res = r;
5303 	}
5304 
5305 end:
5306 	return r;
5307 }
5308 
5309 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
5310 			 struct amdgpu_reset_context *reset_context)
5311 {
5312 	struct amdgpu_device *tmp_adev = NULL;
5313 	bool need_full_reset, skip_hw_reset;
5314 	int r = 0;
5315 
5316 	/* Try reset handler method first */
5317 	tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5318 				    reset_list);
5319 
5320 	reset_context->reset_device_list = device_list_handle;
5321 	r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
5322 	/* If reset handler not implemented, continue; otherwise return */
5323 	if (r == -EOPNOTSUPP)
5324 		r = 0;
5325 	else
5326 		return r;
5327 
5328 	/* Reset handler not implemented, use the default method */
5329 	need_full_reset =
5330 		test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5331 	skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
5332 
5333 	/*
5334 	 * ASIC reset has to be done on all XGMI hive nodes ASAP
5335 	 * to allow proper links negotiation in FW (within 1 sec)
5336 	 */
5337 	if (!skip_hw_reset && need_full_reset) {
5338 		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5339 			/* For XGMI run all resets in parallel to speed up the process */
5340 			if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5341 				if (!queue_work(system_dfl_wq,
5342 						&tmp_adev->xgmi_reset_work))
5343 					r = -EALREADY;
5344 			} else
5345 				r = amdgpu_asic_reset(tmp_adev);
5346 
5347 			if (r) {
5348 				dev_err(tmp_adev->dev,
5349 					"ASIC reset failed with error, %d for drm dev, %s",
5350 					r, adev_to_drm(tmp_adev)->unique);
5351 				goto out;
5352 			}
5353 		}
5354 
5355 		/* For XGMI wait for all resets to complete before proceed */
5356 		if (!r) {
5357 			list_for_each_entry(tmp_adev, device_list_handle,
5358 					    reset_list) {
5359 				if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
5360 					flush_work(&tmp_adev->xgmi_reset_work);
5361 					r = tmp_adev->asic_reset_res;
5362 					if (r)
5363 						break;
5364 				}
5365 			}
5366 		}
5367 	}
5368 
5369 	if (!r && amdgpu_ras_intr_triggered()) {
5370 		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5371 			amdgpu_ras_reset_error_count(tmp_adev,
5372 						     AMDGPU_RAS_BLOCK__MMHUB);
5373 		}
5374 
5375 		amdgpu_ras_intr_cleared();
5376 	}
5377 
5378 	r = amdgpu_device_reinit_after_reset(reset_context);
5379 	if (r == -EAGAIN)
5380 		set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5381 	else
5382 		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5383 
5384 out:
5385 	return r;
5386 }
5387 
5388 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
5389 {
5390 
5391 	switch (amdgpu_asic_reset_method(adev)) {
5392 	case AMD_RESET_METHOD_MODE1:
5393 	case AMD_RESET_METHOD_LINK:
5394 		adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5395 		break;
5396 	case AMD_RESET_METHOD_MODE2:
5397 		adev->mp1_state = PP_MP1_STATE_RESET;
5398 		break;
5399 	default:
5400 		adev->mp1_state = PP_MP1_STATE_NONE;
5401 		break;
5402 	}
5403 }
5404 
5405 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5406 {
5407 	amdgpu_vf_error_trans_all(adev);
5408 	adev->mp1_state = PP_MP1_STATE_NONE;
5409 }
5410 
5411 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5412 {
5413 	struct pci_dev *p = NULL;
5414 
5415 	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5416 			adev->pdev->bus->number, 1);
5417 	if (p) {
5418 		pm_runtime_enable(&(p->dev));
5419 		pm_runtime_resume(&(p->dev));
5420 	}
5421 
5422 	pci_dev_put(p);
5423 }
5424 
5425 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5426 {
5427 	enum amd_reset_method reset_method;
5428 	struct pci_dev *p = NULL;
5429 	u64 expires;
5430 
5431 	/*
5432 	 * For now, only BACO and mode1 reset are confirmed
5433 	 * to suffer the audio issue without proper suspended.
5434 	 */
5435 	reset_method = amdgpu_asic_reset_method(adev);
5436 	if ((reset_method != AMD_RESET_METHOD_BACO) &&
5437 	     (reset_method != AMD_RESET_METHOD_MODE1))
5438 		return -EINVAL;
5439 
5440 	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5441 			adev->pdev->bus->number, 1);
5442 	if (!p)
5443 		return -ENODEV;
5444 
5445 	expires = pm_runtime_autosuspend_expiration(&(p->dev));
5446 	if (!expires)
5447 		/*
5448 		 * If we cannot get the audio device autosuspend delay,
5449 		 * a fixed 4S interval will be used. Considering 3S is
5450 		 * the audio controller default autosuspend delay setting.
5451 		 * 4S used here is guaranteed to cover that.
5452 		 */
5453 		expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5454 
5455 	while (!pm_runtime_status_suspended(&(p->dev))) {
5456 		if (!pm_runtime_suspend(&(p->dev)))
5457 			break;
5458 
5459 		if (expires < ktime_get_mono_fast_ns()) {
5460 			dev_warn(adev->dev, "failed to suspend display audio\n");
5461 			pci_dev_put(p);
5462 			/* TODO: abort the succeeding gpu reset? */
5463 			return -ETIMEDOUT;
5464 		}
5465 	}
5466 
5467 	pm_runtime_disable(&(p->dev));
5468 
5469 	pci_dev_put(p);
5470 	return 0;
5471 }
5472 
5473 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5474 {
5475 	struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5476 
5477 #if defined(CONFIG_DEBUG_FS)
5478 	if (!amdgpu_sriov_vf(adev))
5479 		cancel_work(&adev->reset_work);
5480 #endif
5481 	cancel_work(&adev->userq_reset_work);
5482 
5483 	if (adev->kfd.dev)
5484 		cancel_work(&adev->kfd.reset_work);
5485 
5486 	if (amdgpu_sriov_vf(adev))
5487 		cancel_work(&adev->virt.flr_work);
5488 
5489 	if (con && adev->ras_enabled)
5490 		cancel_work(&con->recovery_work);
5491 
5492 }
5493 
5494 static int amdgpu_device_health_check(struct list_head *device_list_handle)
5495 {
5496 	struct amdgpu_device *tmp_adev;
5497 	int ret = 0;
5498 
5499 	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5500 		ret |= amdgpu_device_bus_status_check(tmp_adev);
5501 	}
5502 
5503 	return ret;
5504 }
5505 
5506 static void amdgpu_device_recovery_prepare(struct amdgpu_device *adev,
5507 					  struct list_head *device_list,
5508 					  struct amdgpu_hive_info *hive)
5509 {
5510 	struct amdgpu_device *tmp_adev = NULL;
5511 
5512 	/*
5513 	 * Build list of devices to reset.
5514 	 * In case we are in XGMI hive mode, resort the device list
5515 	 * to put adev in the 1st position.
5516 	 */
5517 	if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
5518 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5519 			list_add_tail(&tmp_adev->reset_list, device_list);
5520 			if (adev->shutdown)
5521 				tmp_adev->shutdown = true;
5522 		}
5523 		if (!list_is_first(&adev->reset_list, device_list))
5524 			list_rotate_to_front(&adev->reset_list, device_list);
5525 	} else {
5526 		list_add_tail(&adev->reset_list, device_list);
5527 	}
5528 }
5529 
5530 static void amdgpu_device_recovery_get_reset_lock(struct amdgpu_device *adev,
5531 						  struct list_head *device_list)
5532 {
5533 	struct amdgpu_device *tmp_adev = NULL;
5534 
5535 	if (list_empty(device_list))
5536 		return;
5537 	tmp_adev =
5538 		list_first_entry(device_list, struct amdgpu_device, reset_list);
5539 	amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5540 }
5541 
5542 static void amdgpu_device_recovery_put_reset_lock(struct amdgpu_device *adev,
5543 						  struct list_head *device_list)
5544 {
5545 	struct amdgpu_device *tmp_adev = NULL;
5546 
5547 	if (list_empty(device_list))
5548 		return;
5549 	tmp_adev =
5550 		list_first_entry(device_list, struct amdgpu_device, reset_list);
5551 	amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5552 }
5553 
5554 static void amdgpu_device_halt_activities(struct amdgpu_device *adev,
5555 					  struct amdgpu_job *job,
5556 					  struct amdgpu_reset_context *reset_context,
5557 					  struct list_head *device_list,
5558 					  struct amdgpu_hive_info *hive,
5559 					  bool need_emergency_restart)
5560 {
5561 	struct amdgpu_device *tmp_adev = NULL;
5562 	int i;
5563 
5564 	/* block all schedulers and reset given job's ring */
5565 	list_for_each_entry(tmp_adev, device_list, reset_list) {
5566 		amdgpu_device_set_mp1_state(tmp_adev);
5567 
5568 		/*
5569 		 * Try to put the audio codec into suspend state
5570 		 * before gpu reset started.
5571 		 *
5572 		 * Due to the power domain of the graphics device
5573 		 * is shared with AZ power domain. Without this,
5574 		 * we may change the audio hardware from behind
5575 		 * the audio driver's back. That will trigger
5576 		 * some audio codec errors.
5577 		 */
5578 		if (!amdgpu_device_suspend_display_audio(tmp_adev))
5579 			tmp_adev->pcie_reset_ctx.audio_suspended = true;
5580 
5581 		amdgpu_ras_set_error_query_ready(tmp_adev, false);
5582 
5583 		cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5584 
5585 		amdgpu_amdkfd_pre_reset(tmp_adev, reset_context);
5586 
5587 		/*
5588 		 * Mark these ASICs to be reset as untracked first
5589 		 * And add them back after reset completed
5590 		 */
5591 		amdgpu_unregister_gpu_instance(tmp_adev);
5592 
5593 		drm_client_dev_suspend(adev_to_drm(tmp_adev));
5594 
5595 		/* disable ras on ALL IPs */
5596 		if (!need_emergency_restart && !amdgpu_reset_in_dpc(adev) &&
5597 		    amdgpu_device_ip_need_full_reset(tmp_adev))
5598 			amdgpu_ras_suspend(tmp_adev);
5599 
5600 		amdgpu_userq_pre_reset(tmp_adev);
5601 
5602 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5603 			struct amdgpu_ring *ring = tmp_adev->rings[i];
5604 
5605 			if (!amdgpu_ring_sched_ready(ring))
5606 				continue;
5607 
5608 			drm_sched_wqueue_stop(&ring->sched);
5609 
5610 			if (need_emergency_restart)
5611 				amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5612 		}
5613 		atomic_inc(&tmp_adev->gpu_reset_counter);
5614 	}
5615 }
5616 
5617 static int amdgpu_device_asic_reset(struct amdgpu_device *adev,
5618 			      struct list_head *device_list,
5619 			      struct amdgpu_reset_context *reset_context)
5620 {
5621 	struct amdgpu_device *tmp_adev = NULL;
5622 	int retry_limit = AMDGPU_MAX_RETRY_LIMIT;
5623 	int r = 0;
5624 
5625 retry:	/* Rest of adevs pre asic reset from XGMI hive. */
5626 	list_for_each_entry(tmp_adev, device_list, reset_list) {
5627 		r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5628 		/*TODO Should we stop ?*/
5629 		if (r) {
5630 			dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5631 				  r, adev_to_drm(tmp_adev)->unique);
5632 			tmp_adev->asic_reset_res = r;
5633 		}
5634 	}
5635 
5636 	/* Actual ASIC resets if needed.*/
5637 	/* Host driver will handle XGMI hive reset for SRIOV */
5638 	if (amdgpu_sriov_vf(adev)) {
5639 
5640 		/* Bail out of reset early */
5641 		if (amdgpu_ras_is_rma(adev))
5642 			return -ENODEV;
5643 
5644 		if (amdgpu_ras_get_fed_status(adev) || amdgpu_virt_rcvd_ras_interrupt(adev)) {
5645 			dev_dbg(adev->dev, "Detected RAS error, wait for FLR completion\n");
5646 			amdgpu_ras_set_fed(adev, true);
5647 			set_bit(AMDGPU_HOST_FLR, &reset_context->flags);
5648 		}
5649 
5650 		r = amdgpu_device_reset_sriov(adev, reset_context);
5651 		if (AMDGPU_RETRY_SRIOV_RESET(r) && (retry_limit--) > 0) {
5652 			amdgpu_virt_release_full_gpu(adev, true);
5653 			goto retry;
5654 		}
5655 		if (r)
5656 			adev->asic_reset_res = r;
5657 	} else {
5658 		r = amdgpu_do_asic_reset(device_list, reset_context);
5659 		if (r && r == -EAGAIN)
5660 			goto retry;
5661 	}
5662 
5663 	list_for_each_entry(tmp_adev, device_list, reset_list) {
5664 		/*
5665 		 * Drop any pending non scheduler resets queued before reset is done.
5666 		 * Any reset scheduled after this point would be valid. Scheduler resets
5667 		 * were already dropped during drm_sched_stop and no new ones can come
5668 		 * in before drm_sched_start.
5669 		 */
5670 		amdgpu_device_stop_pending_resets(tmp_adev);
5671 	}
5672 
5673 	return r;
5674 }
5675 
5676 static int amdgpu_device_sched_resume(struct list_head *device_list,
5677 			      struct amdgpu_reset_context *reset_context,
5678 			      bool   job_signaled)
5679 {
5680 	struct amdgpu_device *tmp_adev = NULL;
5681 	int i, r = 0;
5682 
5683 	/* Post ASIC reset for all devs .*/
5684 	list_for_each_entry(tmp_adev, device_list, reset_list) {
5685 
5686 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5687 			struct amdgpu_ring *ring = tmp_adev->rings[i];
5688 
5689 			if (!amdgpu_ring_sched_ready(ring))
5690 				continue;
5691 
5692 			drm_sched_wqueue_start(&ring->sched);
5693 		}
5694 
5695 		if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
5696 			drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5697 
5698 		if (tmp_adev->asic_reset_res) {
5699 			/* bad news, how to tell it to userspace ?
5700 			 * for ras error, we should report GPU bad status instead of
5701 			 * reset failure
5702 			 */
5703 			if (reset_context->src != AMDGPU_RESET_SRC_RAS ||
5704 			    !amdgpu_ras_eeprom_check_err_threshold(tmp_adev))
5705 				dev_info(
5706 					tmp_adev->dev,
5707 					"GPU reset(%d) failed with error %d\n",
5708 					atomic_read(
5709 						&tmp_adev->gpu_reset_counter),
5710 					tmp_adev->asic_reset_res);
5711 			amdgpu_vf_error_put(tmp_adev,
5712 					    AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0,
5713 					    tmp_adev->asic_reset_res);
5714 			if (!r)
5715 				r = tmp_adev->asic_reset_res;
5716 			tmp_adev->asic_reset_res = 0;
5717 		} else {
5718 			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n",
5719 				 atomic_read(&tmp_adev->gpu_reset_counter));
5720 			if (amdgpu_acpi_smart_shift_update(tmp_adev,
5721 							   AMDGPU_SS_DEV_D0))
5722 				dev_warn(tmp_adev->dev,
5723 					 "smart shift update failed\n");
5724 		}
5725 	}
5726 
5727 	return r;
5728 }
5729 
5730 static void amdgpu_device_gpu_resume(struct amdgpu_device *adev,
5731 			      struct list_head *device_list,
5732 			      bool   need_emergency_restart)
5733 {
5734 	struct amdgpu_device *tmp_adev = NULL;
5735 
5736 	list_for_each_entry(tmp_adev, device_list, reset_list) {
5737 		/* unlock kfd: SRIOV would do it separately */
5738 		if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5739 			amdgpu_amdkfd_post_reset(tmp_adev);
5740 
5741 		/* kfd_post_reset will do nothing if kfd device is not initialized,
5742 		 * need to bring up kfd here if it's not be initialized before
5743 		 */
5744 		if (!adev->kfd.init_complete)
5745 			amdgpu_amdkfd_device_init(adev);
5746 
5747 		if (tmp_adev->pcie_reset_ctx.audio_suspended)
5748 			amdgpu_device_resume_display_audio(tmp_adev);
5749 
5750 		amdgpu_device_unset_mp1_state(tmp_adev);
5751 
5752 		amdgpu_ras_set_error_query_ready(tmp_adev, true);
5753 
5754 	}
5755 }
5756 
5757 
5758 /**
5759  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5760  *
5761  * @adev: amdgpu_device pointer
5762  * @job: which job trigger hang
5763  * @reset_context: amdgpu reset context pointer
5764  *
5765  * Attempt to reset the GPU if it has hung (all asics).
5766  * Attempt to do soft-reset or full-reset and reinitialize Asic
5767  * Returns 0 for success or an error on failure.
5768  */
5769 
5770 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5771 			      struct amdgpu_job *job,
5772 			      struct amdgpu_reset_context *reset_context)
5773 {
5774 	struct list_head device_list;
5775 	bool job_signaled = false;
5776 	struct amdgpu_hive_info *hive = NULL;
5777 	int r = 0;
5778 	bool need_emergency_restart = false;
5779 	/* save the pasid here as the job may be freed before the end of the reset */
5780 	int pasid = job ? job->pasid : -EINVAL;
5781 
5782 	/*
5783 	 * If it reaches here because of hang/timeout and a RAS error is
5784 	 * detected at the same time, let RAS recovery take care of it.
5785 	 */
5786 	if (amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY) &&
5787 	    !amdgpu_sriov_vf(adev) &&
5788 	    reset_context->src != AMDGPU_RESET_SRC_RAS) {
5789 		dev_dbg(adev->dev,
5790 			"Gpu recovery from source: %d yielding to RAS error recovery handling",
5791 			reset_context->src);
5792 		return 0;
5793 	}
5794 
5795 	/*
5796 	 * Special case: RAS triggered and full reset isn't supported
5797 	 */
5798 	need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5799 
5800 	/*
5801 	 * Flush RAM to disk so that after reboot
5802 	 * the user can read log and see why the system rebooted.
5803 	 */
5804 	if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
5805 		amdgpu_ras_get_context(adev)->reboot) {
5806 		dev_warn(adev->dev, "Emergency reboot.");
5807 
5808 		ksys_sync_helper();
5809 		emergency_restart();
5810 	}
5811 
5812 	dev_info(adev->dev, "GPU %s begin!. Source:  %d\n",
5813 		 need_emergency_restart ? "jobs stop" : "reset",
5814 		 reset_context->src);
5815 
5816 	if (!amdgpu_sriov_vf(adev))
5817 		hive = amdgpu_get_xgmi_hive(adev);
5818 	if (hive)
5819 		mutex_lock(&hive->hive_lock);
5820 
5821 	reset_context->job = job;
5822 	reset_context->hive = hive;
5823 	INIT_LIST_HEAD(&device_list);
5824 
5825 	amdgpu_device_recovery_prepare(adev, &device_list, hive);
5826 
5827 	if (!amdgpu_sriov_vf(adev)) {
5828 		r = amdgpu_device_health_check(&device_list);
5829 		if (r)
5830 			goto end_reset;
5831 	}
5832 
5833 	/* Cannot be called after locking reset domain */
5834 	amdgpu_ras_pre_reset(adev, &device_list);
5835 
5836 	/* We need to lock reset domain only once both for XGMI and single device */
5837 	amdgpu_device_recovery_get_reset_lock(adev, &device_list);
5838 
5839 	amdgpu_device_halt_activities(adev, job, reset_context, &device_list,
5840 				      hive, need_emergency_restart);
5841 	if (need_emergency_restart)
5842 		goto skip_sched_resume;
5843 	/*
5844 	 * Must check guilty signal here since after this point all old
5845 	 * HW fences are force signaled.
5846 	 *
5847 	 * job->base holds a reference to parent fence
5848 	 */
5849 	if (job && (dma_fence_get_status(&job->hw_fence->base) > 0)) {
5850 		job_signaled = true;
5851 		dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5852 		goto skip_hw_reset;
5853 	}
5854 
5855 	r = amdgpu_device_asic_reset(adev, &device_list, reset_context);
5856 	if (r)
5857 		goto reset_unlock;
5858 skip_hw_reset:
5859 	r = amdgpu_device_sched_resume(&device_list, reset_context, job_signaled);
5860 	if (r)
5861 		goto reset_unlock;
5862 skip_sched_resume:
5863 	amdgpu_device_gpu_resume(adev, &device_list, need_emergency_restart);
5864 reset_unlock:
5865 	amdgpu_device_recovery_put_reset_lock(adev, &device_list);
5866 	amdgpu_ras_post_reset(adev, &device_list);
5867 end_reset:
5868 	if (hive) {
5869 		mutex_unlock(&hive->hive_lock);
5870 		amdgpu_put_xgmi_hive(hive);
5871 	}
5872 
5873 	if (r)
5874 		dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5875 
5876 	atomic_set(&adev->reset_domain->reset_res, r);
5877 
5878 	if (!r) {
5879 		struct amdgpu_task_info *ti = NULL;
5880 
5881 		/*
5882 		 * The job may already be freed at this point via the sched tdr workqueue so
5883 		 * use the cached pasid.
5884 		 */
5885 		if (pasid >= 0)
5886 			ti = amdgpu_vm_get_task_info_pasid(adev, pasid);
5887 
5888 		drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE,
5889 				     ti ? &ti->task : NULL);
5890 
5891 		amdgpu_vm_put_task_info(ti);
5892 	}
5893 
5894 	return r;
5895 }
5896 
5897 /**
5898  * amdgpu_device_partner_bandwidth - find the bandwidth of appropriate partner
5899  *
5900  * @adev: amdgpu_device pointer
5901  * @speed: pointer to the speed of the link
5902  * @width: pointer to the width of the link
5903  *
5904  * Evaluate the hierarchy to find the speed and bandwidth capabilities of the
5905  * first physical partner to an AMD dGPU.
5906  * This will exclude any virtual switches and links.
5907  */
5908 static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
5909 					    enum pci_bus_speed *speed,
5910 					    enum pcie_link_width *width)
5911 {
5912 	struct pci_dev *parent = adev->pdev;
5913 
5914 	if (!speed || !width)
5915 		return;
5916 
5917 	*speed = PCI_SPEED_UNKNOWN;
5918 	*width = PCIE_LNK_WIDTH_UNKNOWN;
5919 
5920 	if (amdgpu_device_pcie_dynamic_switching_supported(adev)) {
5921 		while ((parent = pci_upstream_bridge(parent))) {
5922 			/* skip upstream/downstream switches internal to dGPU*/
5923 			if (parent->vendor == PCI_VENDOR_ID_ATI)
5924 				continue;
5925 			*speed = pcie_get_speed_cap(parent);
5926 			*width = pcie_get_width_cap(parent);
5927 			break;
5928 		}
5929 	} else {
5930 		/* use the current speeds rather than max if switching is not supported */
5931 		pcie_bandwidth_available(adev->pdev, NULL, speed, width);
5932 	}
5933 }
5934 
5935 /**
5936  * amdgpu_device_gpu_bandwidth - find the bandwidth of the GPU
5937  *
5938  * @adev: amdgpu_device pointer
5939  * @speed: pointer to the speed of the link
5940  * @width: pointer to the width of the link
5941  *
5942  * Evaluate the hierarchy to find the speed and bandwidth capabilities of the
5943  * AMD dGPU which may be a virtual upstream bridge.
5944  */
5945 static void amdgpu_device_gpu_bandwidth(struct amdgpu_device *adev,
5946 					enum pci_bus_speed *speed,
5947 					enum pcie_link_width *width)
5948 {
5949 	struct pci_dev *parent = adev->pdev;
5950 
5951 	if (!speed || !width)
5952 		return;
5953 
5954 	parent = pci_upstream_bridge(parent);
5955 	if (parent && parent->vendor == PCI_VENDOR_ID_ATI) {
5956 		/* use the upstream/downstream switches internal to dGPU */
5957 		*speed = pcie_get_speed_cap(parent);
5958 		*width = pcie_get_width_cap(parent);
5959 		while ((parent = pci_upstream_bridge(parent))) {
5960 			if (parent->vendor == PCI_VENDOR_ID_ATI) {
5961 				/* use the upstream/downstream switches internal to dGPU */
5962 				*speed = pcie_get_speed_cap(parent);
5963 				*width = pcie_get_width_cap(parent);
5964 			}
5965 		}
5966 	} else {
5967 		/* use the device itself */
5968 		*speed = pcie_get_speed_cap(adev->pdev);
5969 		*width = pcie_get_width_cap(adev->pdev);
5970 	}
5971 }
5972 
5973 /**
5974  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5975  *
5976  * @adev: amdgpu_device pointer
5977  *
5978  * Fetches and stores in the driver the PCIE capabilities (gen speed
5979  * and lanes) of the slot the device is in. Handles APUs and
5980  * virtualized environments where PCIE config space may not be available.
5981  */
5982 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5983 {
5984 	enum pci_bus_speed speed_cap, platform_speed_cap;
5985 	enum pcie_link_width platform_link_width, link_width;
5986 
5987 	if (amdgpu_pcie_gen_cap)
5988 		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5989 
5990 	if (amdgpu_pcie_lane_cap)
5991 		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5992 
5993 	/* covers APUs as well */
5994 	if (pci_is_root_bus(adev->pdev->bus) && !amdgpu_passthrough(adev)) {
5995 		if (adev->pm.pcie_gen_mask == 0)
5996 			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5997 		if (adev->pm.pcie_mlw_mask == 0)
5998 			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5999 		return;
6000 	}
6001 
6002 	if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
6003 		return;
6004 
6005 	amdgpu_device_partner_bandwidth(adev, &platform_speed_cap,
6006 					&platform_link_width);
6007 	amdgpu_device_gpu_bandwidth(adev, &speed_cap, &link_width);
6008 
6009 	if (adev->pm.pcie_gen_mask == 0) {
6010 		/* asic caps */
6011 		if (speed_cap == PCI_SPEED_UNKNOWN) {
6012 			adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6013 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6014 						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
6015 		} else {
6016 			if (speed_cap == PCIE_SPEED_32_0GT)
6017 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6018 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6019 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
6020 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
6021 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
6022 			else if (speed_cap == PCIE_SPEED_16_0GT)
6023 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6024 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6025 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
6026 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
6027 			else if (speed_cap == PCIE_SPEED_8_0GT)
6028 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6029 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6030 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
6031 			else if (speed_cap == PCIE_SPEED_5_0GT)
6032 				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6033 							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
6034 			else
6035 				adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
6036 		}
6037 		/* platform caps */
6038 		if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
6039 			adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6040 						   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
6041 		} else {
6042 			if (platform_speed_cap == PCIE_SPEED_32_0GT)
6043 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6044 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6045 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
6046 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
6047 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
6048 			else if (platform_speed_cap == PCIE_SPEED_16_0GT)
6049 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6050 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6051 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
6052 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
6053 			else if (platform_speed_cap == PCIE_SPEED_8_0GT)
6054 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6055 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
6056 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
6057 			else if (platform_speed_cap == PCIE_SPEED_5_0GT)
6058 				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
6059 							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
6060 			else
6061 				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
6062 
6063 		}
6064 	}
6065 	if (adev->pm.pcie_mlw_mask == 0) {
6066 		/* asic caps */
6067 		if (link_width == PCIE_LNK_WIDTH_UNKNOWN) {
6068 			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_ASIC_PCIE_MLW_MASK;
6069 		} else {
6070 			switch (link_width) {
6071 			case PCIE_LNK_X32:
6072 				adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X32 |
6073 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 |
6074 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
6075 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
6076 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
6077 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6078 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
6079 				break;
6080 			case PCIE_LNK_X16:
6081 				adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 |
6082 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
6083 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
6084 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
6085 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6086 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
6087 				break;
6088 			case PCIE_LNK_X12:
6089 				adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
6090 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
6091 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
6092 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6093 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
6094 				break;
6095 			case PCIE_LNK_X8:
6096 				adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
6097 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
6098 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6099 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
6100 				break;
6101 			case PCIE_LNK_X4:
6102 				adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
6103 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6104 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
6105 				break;
6106 			case PCIE_LNK_X2:
6107 				adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
6108 							   CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
6109 				break;
6110 			case PCIE_LNK_X1:
6111 				adev->pm.pcie_mlw_mask |= CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1;
6112 				break;
6113 			default:
6114 				break;
6115 			}
6116 		}
6117 		/* platform caps */
6118 		if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
6119 			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
6120 		} else {
6121 			switch (platform_link_width) {
6122 			case PCIE_LNK_X32:
6123 				adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
6124 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
6125 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
6126 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
6127 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6128 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6129 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6130 				break;
6131 			case PCIE_LNK_X16:
6132 				adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
6133 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
6134 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
6135 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6136 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6137 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6138 				break;
6139 			case PCIE_LNK_X12:
6140 				adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
6141 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
6142 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6143 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6144 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6145 				break;
6146 			case PCIE_LNK_X8:
6147 				adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
6148 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6149 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6150 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6151 				break;
6152 			case PCIE_LNK_X4:
6153 				adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
6154 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6155 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6156 				break;
6157 			case PCIE_LNK_X2:
6158 				adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
6159 							   CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
6160 				break;
6161 			case PCIE_LNK_X1:
6162 				adev->pm.pcie_mlw_mask |= CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
6163 				break;
6164 			default:
6165 				break;
6166 			}
6167 		}
6168 	}
6169 }
6170 
6171 /**
6172  * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
6173  *
6174  * @adev: amdgpu_device pointer
6175  * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
6176  *
6177  * Return true if @peer_adev can access (DMA) @adev through the PCIe
6178  * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
6179  * @peer_adev.
6180  */
6181 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
6182 				      struct amdgpu_device *peer_adev)
6183 {
6184 #ifdef CONFIG_HSA_AMD_P2P
6185 	bool p2p_access =
6186 		!adev->gmc.xgmi.connected_to_cpu &&
6187 		!(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
6188 	if (!p2p_access)
6189 		dev_info(adev->dev, "PCIe P2P access from peer device %s is not supported by the chipset\n",
6190 			pci_name(peer_adev->pdev));
6191 
6192 	bool is_large_bar = adev->gmc.visible_vram_size &&
6193 		adev->gmc.real_vram_size == adev->gmc.visible_vram_size;
6194 	bool p2p_addressable = amdgpu_device_check_iommu_remap(peer_adev);
6195 
6196 	if (!p2p_addressable) {
6197 		uint64_t address_mask = peer_adev->dev->dma_mask ?
6198 			~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
6199 		resource_size_t aper_limit =
6200 			adev->gmc.aper_base + adev->gmc.aper_size - 1;
6201 
6202 		p2p_addressable = !(adev->gmc.aper_base & address_mask ||
6203 				     aper_limit & address_mask);
6204 	}
6205 	return pcie_p2p && is_large_bar && p2p_access && p2p_addressable;
6206 #else
6207 	return false;
6208 #endif
6209 }
6210 
6211 int amdgpu_device_baco_enter(struct amdgpu_device *adev)
6212 {
6213 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
6214 
6215 	if (!amdgpu_device_supports_baco(adev))
6216 		return -ENOTSUPP;
6217 
6218 	if (ras && adev->ras_enabled &&
6219 	    adev->nbio.funcs->enable_doorbell_interrupt)
6220 		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
6221 
6222 	return amdgpu_dpm_baco_enter(adev);
6223 }
6224 
6225 int amdgpu_device_baco_exit(struct amdgpu_device *adev)
6226 {
6227 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
6228 	int ret = 0;
6229 
6230 	if (!amdgpu_device_supports_baco(adev))
6231 		return -ENOTSUPP;
6232 
6233 	ret = amdgpu_dpm_baco_exit(adev);
6234 	if (ret)
6235 		return ret;
6236 
6237 	if (ras && adev->ras_enabled &&
6238 	    adev->nbio.funcs->enable_doorbell_interrupt)
6239 		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
6240 
6241 	if (amdgpu_passthrough(adev) && adev->nbio.funcs &&
6242 	    adev->nbio.funcs->clear_doorbell_interrupt)
6243 		adev->nbio.funcs->clear_doorbell_interrupt(adev);
6244 
6245 	return 0;
6246 }
6247 
6248 /**
6249  * amdgpu_pci_error_detected - Called when a PCI error is detected.
6250  * @pdev: PCI device struct
6251  * @state: PCI channel state
6252  *
6253  * Description: Called when a PCI error is detected.
6254  *
6255  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
6256  */
6257 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
6258 {
6259 	struct drm_device *dev = pci_get_drvdata(pdev);
6260 	struct amdgpu_device *adev = drm_to_adev(dev);
6261 	struct amdgpu_hive_info *hive __free(xgmi_put_hive) =
6262 		amdgpu_get_xgmi_hive(adev);
6263 	struct amdgpu_reset_context reset_context;
6264 	struct list_head device_list;
6265 
6266 	dev_info(adev->dev, "PCI error: detected callback!!\n");
6267 
6268 	adev->pci_channel_state = state;
6269 
6270 	switch (state) {
6271 	case pci_channel_io_normal:
6272 		dev_info(adev->dev, "pci_channel_io_normal: state(%d)!!\n", state);
6273 		return PCI_ERS_RESULT_CAN_RECOVER;
6274 	case pci_channel_io_frozen:
6275 		/* Fatal error, prepare for slot reset */
6276 		dev_info(adev->dev, "pci_channel_io_frozen: state(%d)!!\n", state);
6277 		if (hive) {
6278 			/* Hive devices should be able to support FW based
6279 			 * link reset on other devices, if not return.
6280 			 */
6281 			if (!amdgpu_dpm_is_link_reset_supported(adev)) {
6282 				dev_warn(adev->dev,
6283 					 "No support for XGMI hive yet...\n");
6284 				return PCI_ERS_RESULT_DISCONNECT;
6285 			}
6286 			/* Set dpc status only if device is part of hive
6287 			 * Non-hive devices should be able to recover after
6288 			 * link reset.
6289 			 */
6290 			amdgpu_reset_set_dpc_status(adev, true);
6291 
6292 			mutex_lock(&hive->hive_lock);
6293 		} else {
6294 			if (amdgpu_device_bus_status_check(adev))
6295 				amdgpu_reset_set_dpc_status(adev, true);
6296 		}
6297 		memset(&reset_context, 0, sizeof(reset_context));
6298 		INIT_LIST_HEAD(&device_list);
6299 
6300 		amdgpu_device_recovery_prepare(adev, &device_list, hive);
6301 		amdgpu_device_recovery_get_reset_lock(adev, &device_list);
6302 		amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list,
6303 					      hive, false);
6304 		if (hive)
6305 			mutex_unlock(&hive->hive_lock);
6306 		return PCI_ERS_RESULT_NEED_RESET;
6307 	case pci_channel_io_perm_failure:
6308 		/* Permanent error, prepare for device removal */
6309 		dev_info(adev->dev, "pci_channel_io_perm_failure: state(%d)!!\n", state);
6310 		return PCI_ERS_RESULT_DISCONNECT;
6311 	}
6312 
6313 	return PCI_ERS_RESULT_NEED_RESET;
6314 }
6315 
6316 /**
6317  * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
6318  * @pdev: pointer to PCI device
6319  */
6320 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
6321 {
6322 	struct drm_device *dev = pci_get_drvdata(pdev);
6323 	struct amdgpu_device *adev = drm_to_adev(dev);
6324 
6325 	dev_info(adev->dev, "PCI error: mmio enabled callback!!\n");
6326 
6327 	/* TODO - dump whatever for debugging purposes */
6328 
6329 	/* This called only if amdgpu_pci_error_detected returns
6330 	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
6331 	 * works, no need to reset slot.
6332 	 */
6333 
6334 	return PCI_ERS_RESULT_RECOVERED;
6335 }
6336 
6337 /**
6338  * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
6339  * @pdev: PCI device struct
6340  *
6341  * Description: This routine is called by the pci error recovery
6342  * code after the PCI slot has been reset, just before we
6343  * should resume normal operations.
6344  */
6345 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
6346 {
6347 	struct drm_device *dev = pci_get_drvdata(pdev);
6348 	struct amdgpu_device *adev = drm_to_adev(dev);
6349 	struct amdgpu_reset_context reset_context;
6350 	struct amdgpu_device *tmp_adev;
6351 	struct amdgpu_hive_info *hive;
6352 	struct list_head device_list;
6353 	struct pci_dev *link_dev;
6354 	int r = 0, i, timeout;
6355 	u32 memsize;
6356 	u16 status;
6357 
6358 	dev_info(adev->dev, "PCI error: slot reset callback!!\n");
6359 
6360 	memset(&reset_context, 0, sizeof(reset_context));
6361 	INIT_LIST_HEAD(&device_list);
6362 	hive = amdgpu_get_xgmi_hive(adev);
6363 	if (hive) {
6364 		mutex_lock(&hive->hive_lock);
6365 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
6366 			list_add_tail(&tmp_adev->reset_list, &device_list);
6367 	} else {
6368 		list_add_tail(&adev->reset_list, &device_list);
6369 	}
6370 
6371 	if (adev->pcie_reset_ctx.swus)
6372 		link_dev = adev->pcie_reset_ctx.swus;
6373 	else
6374 		link_dev = adev->pdev;
6375 	/* wait for asic to come out of reset, timeout = 10s */
6376 	timeout = 10000;
6377 	do {
6378 		usleep_range(10000, 10500);
6379 		r = pci_read_config_word(link_dev, PCI_VENDOR_ID, &status);
6380 		timeout -= 10;
6381 	} while (timeout > 0 && (status != PCI_VENDOR_ID_ATI) &&
6382 		 (status != PCI_VENDOR_ID_AMD));
6383 
6384 	if ((status != PCI_VENDOR_ID_ATI) && (status != PCI_VENDOR_ID_AMD)) {
6385 		r = -ETIME;
6386 		goto out;
6387 	}
6388 
6389 	amdgpu_device_load_switch_state(adev);
6390 	/* Restore PCI confspace */
6391 	amdgpu_device_load_pci_state(pdev);
6392 
6393 	/* confirm  ASIC came out of reset */
6394 	for (i = 0; i < adev->usec_timeout; i++) {
6395 		memsize = amdgpu_asic_get_config_memsize(adev);
6396 
6397 		if (memsize != 0xffffffff)
6398 			break;
6399 		udelay(1);
6400 	}
6401 	if (memsize == 0xffffffff) {
6402 		r = -ETIME;
6403 		goto out;
6404 	}
6405 
6406 	reset_context.method = AMD_RESET_METHOD_NONE;
6407 	reset_context.reset_req_dev = adev;
6408 	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
6409 	set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
6410 
6411 	if (hive) {
6412 		reset_context.hive = hive;
6413 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
6414 			tmp_adev->pcie_reset_ctx.in_link_reset = true;
6415 	} else {
6416 		adev->pcie_reset_ctx.in_link_reset = true;
6417 		set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
6418 	}
6419 
6420 	r = amdgpu_device_asic_reset(adev, &device_list, &reset_context);
6421 out:
6422 	if (!r) {
6423 		if (amdgpu_device_cache_pci_state(adev->pdev))
6424 			pci_restore_state(adev->pdev);
6425 		dev_info(adev->dev, "PCIe error recovery succeeded\n");
6426 	} else {
6427 		dev_err(adev->dev, "PCIe error recovery failed, err:%d\n", r);
6428 		if (hive) {
6429 			list_for_each_entry(tmp_adev, &device_list, reset_list)
6430 				amdgpu_device_unset_mp1_state(tmp_adev);
6431 		}
6432 		amdgpu_device_recovery_put_reset_lock(adev, &device_list);
6433 	}
6434 
6435 	if (hive) {
6436 		mutex_unlock(&hive->hive_lock);
6437 		amdgpu_put_xgmi_hive(hive);
6438 	}
6439 
6440 	return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
6441 }
6442 
6443 /**
6444  * amdgpu_pci_resume() - resume normal ops after PCI reset
6445  * @pdev: pointer to PCI device
6446  *
6447  * Called when the error recovery driver tells us that its
6448  * OK to resume normal operation.
6449  */
6450 void amdgpu_pci_resume(struct pci_dev *pdev)
6451 {
6452 	struct drm_device *dev = pci_get_drvdata(pdev);
6453 	struct amdgpu_device *adev = drm_to_adev(dev);
6454 	struct list_head device_list;
6455 	struct amdgpu_hive_info *hive = NULL;
6456 	struct amdgpu_device *tmp_adev = NULL;
6457 
6458 	dev_info(adev->dev, "PCI error: resume callback!!\n");
6459 
6460 	/* Only continue execution for the case of pci_channel_io_frozen */
6461 	if (adev->pci_channel_state != pci_channel_io_frozen)
6462 		return;
6463 
6464 	INIT_LIST_HEAD(&device_list);
6465 
6466 	hive = amdgpu_get_xgmi_hive(adev);
6467 	if (hive) {
6468 		mutex_lock(&hive->hive_lock);
6469 		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
6470 			tmp_adev->pcie_reset_ctx.in_link_reset = false;
6471 			list_add_tail(&tmp_adev->reset_list, &device_list);
6472 		}
6473 	} else {
6474 		adev->pcie_reset_ctx.in_link_reset = false;
6475 		list_add_tail(&adev->reset_list, &device_list);
6476 	}
6477 	amdgpu_device_sched_resume(&device_list, NULL, NULL);
6478 	amdgpu_device_gpu_resume(adev, &device_list, false);
6479 	amdgpu_device_recovery_put_reset_lock(adev, &device_list);
6480 
6481 	if (hive) {
6482 		mutex_unlock(&hive->hive_lock);
6483 		amdgpu_put_xgmi_hive(hive);
6484 	}
6485 }
6486 
6487 static void amdgpu_device_cache_switch_state(struct amdgpu_device *adev)
6488 {
6489 	struct pci_dev *swus, *swds;
6490 	int r;
6491 
6492 	swds = pci_upstream_bridge(adev->pdev);
6493 	if (!swds || swds->vendor != PCI_VENDOR_ID_ATI ||
6494 	    pci_pcie_type(swds) != PCI_EXP_TYPE_DOWNSTREAM)
6495 		return;
6496 	swus = pci_upstream_bridge(swds);
6497 	if (!swus ||
6498 	    (swus->vendor != PCI_VENDOR_ID_ATI &&
6499 	     swus->vendor != PCI_VENDOR_ID_AMD) ||
6500 	    pci_pcie_type(swus) != PCI_EXP_TYPE_UPSTREAM)
6501 		return;
6502 
6503 	/* If already saved, return */
6504 	if (adev->pcie_reset_ctx.swus)
6505 		return;
6506 	/* Upstream bridge is ATI, assume it's SWUS/DS architecture */
6507 	r = pci_save_state(swds);
6508 	if (r)
6509 		return;
6510 	adev->pcie_reset_ctx.swds_pcistate = pci_store_saved_state(swds);
6511 
6512 	r = pci_save_state(swus);
6513 	if (r)
6514 		return;
6515 	adev->pcie_reset_ctx.swus_pcistate = pci_store_saved_state(swus);
6516 
6517 	adev->pcie_reset_ctx.swus = swus;
6518 }
6519 
6520 static void amdgpu_device_load_switch_state(struct amdgpu_device *adev)
6521 {
6522 	struct pci_dev *pdev;
6523 	int r;
6524 
6525 	if (!adev->pcie_reset_ctx.swds_pcistate ||
6526 	    !adev->pcie_reset_ctx.swus_pcistate)
6527 		return;
6528 
6529 	pdev = adev->pcie_reset_ctx.swus;
6530 	r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swus_pcistate);
6531 	if (!r) {
6532 		pci_restore_state(pdev);
6533 	} else {
6534 		dev_warn(adev->dev, "Failed to load SWUS state, err:%d\n", r);
6535 		return;
6536 	}
6537 
6538 	pdev = pci_upstream_bridge(adev->pdev);
6539 	r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swds_pcistate);
6540 	if (!r)
6541 		pci_restore_state(pdev);
6542 	else
6543 		dev_warn(adev->dev, "Failed to load SWDS state, err:%d\n", r);
6544 }
6545 
6546 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
6547 {
6548 	struct drm_device *dev = pci_get_drvdata(pdev);
6549 	struct amdgpu_device *adev = drm_to_adev(dev);
6550 	int r;
6551 
6552 	if (amdgpu_sriov_vf(adev))
6553 		return false;
6554 
6555 	r = pci_save_state(pdev);
6556 	if (!r) {
6557 		kfree(adev->pci_state);
6558 
6559 		adev->pci_state = pci_store_saved_state(pdev);
6560 
6561 		if (!adev->pci_state) {
6562 			dev_err(adev->dev, "Failed to store PCI saved state");
6563 			return false;
6564 		}
6565 	} else {
6566 		dev_warn(adev->dev, "Failed to save PCI state, err:%d\n", r);
6567 		return false;
6568 	}
6569 
6570 	amdgpu_device_cache_switch_state(adev);
6571 
6572 	return true;
6573 }
6574 
6575 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
6576 {
6577 	struct drm_device *dev = pci_get_drvdata(pdev);
6578 	struct amdgpu_device *adev = drm_to_adev(dev);
6579 	int r;
6580 
6581 	if (!adev->pci_state)
6582 		return false;
6583 
6584 	r = pci_load_saved_state(pdev, adev->pci_state);
6585 
6586 	if (!r) {
6587 		pci_restore_state(pdev);
6588 	} else {
6589 		dev_warn(adev->dev, "Failed to load PCI state, err:%d\n", r);
6590 		return false;
6591 	}
6592 
6593 	return true;
6594 }
6595 
6596 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
6597 		struct amdgpu_ring *ring)
6598 {
6599 #ifdef CONFIG_X86_64
6600 	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6601 		return;
6602 #endif
6603 	if (adev->gmc.xgmi.connected_to_cpu)
6604 		return;
6605 
6606 	if (ring && ring->funcs->emit_hdp_flush) {
6607 		amdgpu_ring_emit_hdp_flush(ring);
6608 		return;
6609 	}
6610 
6611 	if (!ring && amdgpu_sriov_runtime(adev)) {
6612 		if (!amdgpu_kiq_hdp_flush(adev))
6613 			return;
6614 	}
6615 
6616 	amdgpu_hdp_flush(adev, ring);
6617 }
6618 
6619 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
6620 		struct amdgpu_ring *ring)
6621 {
6622 #ifdef CONFIG_X86_64
6623 	if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6624 		return;
6625 #endif
6626 	if (adev->gmc.xgmi.connected_to_cpu)
6627 		return;
6628 
6629 	amdgpu_hdp_invalidate(adev, ring);
6630 }
6631 
6632 int amdgpu_in_reset(struct amdgpu_device *adev)
6633 {
6634 	return atomic_read(&adev->reset_domain->in_gpu_reset);
6635 }
6636 
6637 /**
6638  * amdgpu_device_halt() - bring hardware to some kind of halt state
6639  *
6640  * @adev: amdgpu_device pointer
6641  *
6642  * Bring hardware to some kind of halt state so that no one can touch it
6643  * any more. It will help to maintain error context when error occurred.
6644  * Compare to a simple hang, the system will keep stable at least for SSH
6645  * access. Then it should be trivial to inspect the hardware state and
6646  * see what's going on. Implemented as following:
6647  *
6648  * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
6649  *    clears all CPU mappings to device, disallows remappings through page faults
6650  * 2. amdgpu_irq_disable_all() disables all interrupts
6651  * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
6652  * 4. set adev->no_hw_access to avoid potential crashes after setp 5
6653  * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
6654  * 6. pci_disable_device() and pci_wait_for_pending_transaction()
6655  *    flush any in flight DMA operations
6656  */
6657 void amdgpu_device_halt(struct amdgpu_device *adev)
6658 {
6659 	struct pci_dev *pdev = adev->pdev;
6660 	struct drm_device *ddev = adev_to_drm(adev);
6661 
6662 	amdgpu_xcp_dev_unplug(adev);
6663 	drm_dev_unplug(ddev);
6664 
6665 	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
6666 	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
6667 
6668 	amdgpu_irq_disable_all(adev);
6669 
6670 	amdgpu_fence_driver_hw_fini(adev);
6671 
6672 	adev->no_hw_access = true;
6673 
6674 	amdgpu_device_unmap_mmio(adev);
6675 
6676 	pci_disable_device(pdev);
6677 	pci_wait_for_pending_transaction(pdev);
6678 }
6679 
6680 /**
6681  * amdgpu_device_get_gang - return a reference to the current gang
6682  * @adev: amdgpu_device pointer
6683  *
6684  * Returns: A new reference to the current gang leader.
6685  */
6686 struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev)
6687 {
6688 	struct dma_fence *fence;
6689 
6690 	rcu_read_lock();
6691 	fence = dma_fence_get_rcu_safe(&adev->gang_submit);
6692 	rcu_read_unlock();
6693 	return fence;
6694 }
6695 
6696 /**
6697  * amdgpu_device_switch_gang - switch to a new gang
6698  * @adev: amdgpu_device pointer
6699  * @gang: the gang to switch to
6700  *
6701  * Try to switch to a new gang.
6702  * Returns: NULL if we switched to the new gang or a reference to the current
6703  * gang leader.
6704  */
6705 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
6706 					    struct dma_fence *gang)
6707 {
6708 	struct dma_fence *old = NULL;
6709 
6710 	dma_fence_get(gang);
6711 	do {
6712 		dma_fence_put(old);
6713 		old = amdgpu_device_get_gang(adev);
6714 		if (old == gang)
6715 			break;
6716 
6717 		if (!dma_fence_is_signaled(old)) {
6718 			dma_fence_put(gang);
6719 			return old;
6720 		}
6721 
6722 	} while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
6723 			 old, gang) != old);
6724 
6725 	/*
6726 	 * Drop it once for the exchanged reference in adev and once for the
6727 	 * thread local reference acquired in amdgpu_device_get_gang().
6728 	 */
6729 	dma_fence_put(old);
6730 	dma_fence_put(old);
6731 	return NULL;
6732 }
6733 
6734 /**
6735  * amdgpu_device_enforce_isolation - enforce HW isolation
6736  * @adev: the amdgpu device pointer
6737  * @ring: the HW ring the job is supposed to run on
6738  * @job: the job which is about to be pushed to the HW ring
6739  *
6740  * Makes sure that only one client at a time can use the GFX block.
6741  * Returns: The dependency to wait on before the job can be pushed to the HW.
6742  * The function is called multiple times until NULL is returned.
6743  */
6744 struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev,
6745 						  struct amdgpu_ring *ring,
6746 						  struct amdgpu_job *job)
6747 {
6748 	struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id];
6749 	struct drm_sched_fence *f = job->base.s_fence;
6750 	struct dma_fence *dep;
6751 	void *owner;
6752 	int r;
6753 
6754 	/*
6755 	 * For now enforce isolation only for the GFX block since we only need
6756 	 * the cleaner shader on those rings.
6757 	 */
6758 	if (ring->funcs->type != AMDGPU_RING_TYPE_GFX &&
6759 	    ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
6760 		return NULL;
6761 
6762 	/*
6763 	 * All submissions where enforce isolation is false are handled as if
6764 	 * they come from a single client. Use ~0l as the owner to distinct it
6765 	 * from kernel submissions where the owner is NULL.
6766 	 */
6767 	owner = job->enforce_isolation ? f->owner : (void *)~0l;
6768 
6769 	mutex_lock(&adev->enforce_isolation_mutex);
6770 
6771 	/*
6772 	 * The "spearhead" submission is the first one which changes the
6773 	 * ownership to its client. We always need to wait for it to be
6774 	 * pushed to the HW before proceeding with anything.
6775 	 */
6776 	if (&f->scheduled != isolation->spearhead &&
6777 	    !dma_fence_is_signaled(isolation->spearhead)) {
6778 		dep = isolation->spearhead;
6779 		goto out_grab_ref;
6780 	}
6781 
6782 	if (isolation->owner != owner) {
6783 
6784 		/*
6785 		 * Wait for any gang to be assembled before switching to a
6786 		 * different owner or otherwise we could deadlock the
6787 		 * submissions.
6788 		 */
6789 		if (!job->gang_submit) {
6790 			dep = amdgpu_device_get_gang(adev);
6791 			if (!dma_fence_is_signaled(dep))
6792 				goto out_return_dep;
6793 			dma_fence_put(dep);
6794 		}
6795 
6796 		dma_fence_put(isolation->spearhead);
6797 		isolation->spearhead = dma_fence_get(&f->scheduled);
6798 		amdgpu_sync_move(&isolation->active, &isolation->prev);
6799 		trace_amdgpu_isolation(isolation->owner, owner);
6800 		isolation->owner = owner;
6801 	}
6802 
6803 	/*
6804 	 * Specifying the ring here helps to pipeline submissions even when
6805 	 * isolation is enabled. If that is not desired for testing NULL can be
6806 	 * used instead of the ring to enforce a CPU round trip while switching
6807 	 * between clients.
6808 	 */
6809 	dep = amdgpu_sync_peek_fence(&isolation->prev, ring);
6810 	r = amdgpu_sync_fence(&isolation->active, &f->finished, GFP_NOWAIT);
6811 	if (r)
6812 		dev_warn(adev->dev, "OOM tracking isolation\n");
6813 
6814 out_grab_ref:
6815 	dma_fence_get(dep);
6816 out_return_dep:
6817 	mutex_unlock(&adev->enforce_isolation_mutex);
6818 	return dep;
6819 }
6820 
6821 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
6822 {
6823 	switch (adev->asic_type) {
6824 #ifdef CONFIG_DRM_AMDGPU_SI
6825 	case CHIP_HAINAN:
6826 #endif
6827 	case CHIP_TOPAZ:
6828 		/* chips with no display hardware */
6829 		return false;
6830 #ifdef CONFIG_DRM_AMDGPU_SI
6831 	case CHIP_TAHITI:
6832 	case CHIP_PITCAIRN:
6833 	case CHIP_VERDE:
6834 	case CHIP_OLAND:
6835 #endif
6836 #ifdef CONFIG_DRM_AMDGPU_CIK
6837 	case CHIP_BONAIRE:
6838 	case CHIP_HAWAII:
6839 	case CHIP_KAVERI:
6840 	case CHIP_KABINI:
6841 	case CHIP_MULLINS:
6842 #endif
6843 	case CHIP_TONGA:
6844 	case CHIP_FIJI:
6845 	case CHIP_POLARIS10:
6846 	case CHIP_POLARIS11:
6847 	case CHIP_POLARIS12:
6848 	case CHIP_VEGAM:
6849 	case CHIP_CARRIZO:
6850 	case CHIP_STONEY:
6851 		/* chips with display hardware */
6852 		return true;
6853 	default:
6854 		/* IP discovery */
6855 		if (!amdgpu_ip_version(adev, DCE_HWIP, 0) ||
6856 		    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
6857 			return false;
6858 		return true;
6859 	}
6860 }
6861 
6862 ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring)
6863 {
6864 	ssize_t size = 0;
6865 
6866 	if (!ring || !ring->adev)
6867 		return size;
6868 
6869 	if (amdgpu_device_should_recover_gpu(ring->adev))
6870 		size |= AMDGPU_RESET_TYPE_FULL;
6871 
6872 	if (unlikely(!ring->adev->debug_disable_soft_recovery) &&
6873 	    !amdgpu_sriov_vf(ring->adev) && ring->funcs->soft_recovery)
6874 		size |= AMDGPU_RESET_TYPE_SOFT_RESET;
6875 
6876 	return size;
6877 }
6878 
6879 ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset)
6880 {
6881 	ssize_t size = 0;
6882 
6883 	if (supported_reset == 0) {
6884 		size += sysfs_emit_at(buf, size, "unsupported");
6885 		size += sysfs_emit_at(buf, size, "\n");
6886 		return size;
6887 
6888 	}
6889 
6890 	if (supported_reset & AMDGPU_RESET_TYPE_SOFT_RESET)
6891 		size += sysfs_emit_at(buf, size, "soft ");
6892 
6893 	if (supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)
6894 		size += sysfs_emit_at(buf, size, "queue ");
6895 
6896 	if (supported_reset & AMDGPU_RESET_TYPE_PER_PIPE)
6897 		size += sysfs_emit_at(buf, size, "pipe ");
6898 
6899 	if (supported_reset & AMDGPU_RESET_TYPE_FULL)
6900 		size += sysfs_emit_at(buf, size, "full ");
6901 
6902 	size += sysfs_emit_at(buf, size, "\n");
6903 	return size;
6904 }
6905 
6906 void amdgpu_device_set_uid(struct amdgpu_uid *uid_info,
6907 			   enum amdgpu_uid_type type, uint8_t inst,
6908 			   uint64_t uid)
6909 {
6910 	if (!uid_info)
6911 		return;
6912 
6913 	if (type >= AMDGPU_UID_TYPE_MAX) {
6914 		dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n",
6915 			     type);
6916 		return;
6917 	}
6918 
6919 	if (inst >= AMDGPU_UID_INST_MAX) {
6920 		dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n",
6921 			     inst);
6922 		return;
6923 	}
6924 
6925 	if (uid_info->uid[type][inst] != 0) {
6926 		dev_warn_once(
6927 			uid_info->adev->dev,
6928 			"Overwriting existing UID %llu for type %d instance %d\n",
6929 			uid_info->uid[type][inst], type, inst);
6930 	}
6931 
6932 	uid_info->uid[type][inst] = uid;
6933 }
6934 
6935 u64 amdgpu_device_get_uid(struct amdgpu_uid *uid_info,
6936 			  enum amdgpu_uid_type type, uint8_t inst)
6937 {
6938 	if (!uid_info)
6939 		return 0;
6940 
6941 	if (type >= AMDGPU_UID_TYPE_MAX) {
6942 		dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n",
6943 			     type);
6944 		return 0;
6945 	}
6946 
6947 	if (inst >= AMDGPU_UID_INST_MAX) {
6948 		dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n",
6949 			     inst);
6950 		return 0;
6951 	}
6952 
6953 	return uid_info->uid[type][inst];
6954 }
6955